From 6c4032071f275a9faca3f2150998c0f039c3006a Mon Sep 17 00:00:00 2001 From: Vitalii Solodilov Date: Tue, 19 Jun 2018 11:19:23 +0400 Subject: [PATCH 0001/1815] Improvement of RabbitMQ plugin #3025 #3252 * new metrics: * unroutable messages * node uptime * gc metrics * mnesia metrics * node healthcheck * IO metrics * refactoring tests: * moved the json examples to a separate files * check metric values Signed-off-by: Vitalii Solodilov --- plugins/inputs/rabbitmq/README.md | 25 + plugins/inputs/rabbitmq/rabbitmq.go | 209 ++++-- plugins/inputs/rabbitmq/rabbitmq_test.go | 638 ++++-------------- .../inputs/rabbitmq/testdata/exchanges.json | 22 + .../rabbitmq/testdata/healthchecks.json | 1 + plugins/inputs/rabbitmq/testdata/nodes.json | 87 +++ .../inputs/rabbitmq/testdata/overview.json | 63 ++ plugins/inputs/rabbitmq/testdata/queues.json | 114 ++++ 8 files changed, 579 insertions(+), 580 deletions(-) create mode 100644 plugins/inputs/rabbitmq/testdata/exchanges.json create mode 100644 plugins/inputs/rabbitmq/testdata/healthchecks.json create mode 100644 plugins/inputs/rabbitmq/testdata/nodes.json create mode 100644 plugins/inputs/rabbitmq/testdata/overview.json create mode 100644 plugins/inputs/rabbitmq/testdata/queues.json diff --git a/plugins/inputs/rabbitmq/README.md b/plugins/inputs/rabbitmq/README.md index ae6dac6f1..0406df700 100644 --- a/plugins/inputs/rabbitmq/README.md +++ b/plugins/inputs/rabbitmq/README.md @@ -68,20 +68,42 @@ For additional details reference the [RabbitMQ Management HTTP Stats](https://cd - queues (int, queues) - clustering_listeners (int, cluster nodes) - amqp_listeners (int, amqp nodes up) + - return_unroutable (int, number of unroutable messages) + - return_unroutable_rate (float, number of unroutable messages per second) - rabbitmq_node - disk_free (int, bytes) - disk_free_limit (int, bytes) + - disk_free_alarm (int, disk alarm) - fd_total (int, file descriptors) - fd_used (int, file descriptors) - mem_limit (int, bytes) - mem_used (int, bytes) + - mem_alarm (int, memory a) - proc_total (int, erlang processes) - proc_used (int, erlang processes) - run_queue (int, erlang processes) - sockets_total (int, sockets) - sockets_used (int, sockets) - running (int, node up) + - uptime (int, milliseconds) + - health_check_status (int, 1 or 0) + - mnesia_disk_tx_count (int, number of disk transaction) + - mnesia_ram_tx_count (int, number of ram transaction) + - mnesia_disk_tx_count_rate (float, number of disk transaction per second) + - mnesia_ram_tx_count_rate (float, number of ram transaction per second) + - gc_num (int, number of garbage collection) + - gc_bytes_reclaimed (int, bytes) + - gc_num_rate (float, number of garbage collection per second) + - gc_bytes_reclaimed_rate (float, bytes per second) + - io_read_avg_time (float, number of read operations) + - io_read_avg_time_rate (int, number of read operations per second) + - io_read_bytes (int, bytes) + - io_read_bytes_rate (float, bytes per second) + - io_write_avg_time (int, milliseconds) + - io_write_avg_time_rate (float, milliseconds per second) + - io_write_bytes (int, bytes) + - io_write_bytes_rate (float, bytes per second) - rabbitmq_queue - consumer_utilisation (float, percent) @@ -109,7 +131,9 @@ For additional details reference the [RabbitMQ Management HTTP Stats](https://cd - rabbitmq_exchange - messages_publish_in (int, count) + - messages_publish_in_rate (int, messages per second) - messages_publish_out (int, count) + - messages_publish_out_rate (int, messages per second) ### Tags: @@ -121,6 +145,7 @@ For additional details reference the [RabbitMQ Management HTTP Stats](https://cd - rabbitmq_node - node + - url - rabbitmq_queue - url diff --git a/plugins/inputs/rabbitmq/rabbitmq.go b/plugins/inputs/rabbitmq/rabbitmq.go index 49dabe1b5..4e7e918da 100644 --- a/plugins/inputs/rabbitmq/rabbitmq.go +++ b/plugins/inputs/rabbitmq/rabbitmq.go @@ -72,23 +72,27 @@ type Listeners struct { // Details ... type Details struct { - Rate float64 + Rate float64 `json:"rate"` } // MessageStats ... type MessageStats struct { - Ack int64 - AckDetails Details `json:"ack_details"` - Deliver int64 - DeliverDetails Details `json:"deliver_details"` - DeliverGet int64 `json:"deliver_get"` - DeliverGetDetails Details `json:"deliver_get_details"` - Publish int64 - PublishDetails Details `json:"publish_details"` - Redeliver int64 - RedeliverDetails Details `json:"redeliver_details"` - PublishIn int64 `json:"publish_in"` - PublishOut int64 `json:"publish_out"` + Ack int64 + AckDetails Details `json:"ack_details"` + Deliver int64 + DeliverDetails Details `json:"deliver_details"` + DeliverGet int64 `json:"deliver_get"` + DeliverGetDetails Details `json:"deliver_get_details"` + Publish int64 + PublishDetails Details `json:"publish_details"` + Redeliver int64 + RedeliverDetails Details `json:"redeliver_details"` + PublishIn int64 `json:"publish_in"` + PublishInDetails Details `json:"publish_in_details"` + PublishOut int64 `json:"publish_out"` + PublishOutDetails Details `json:"publish_out_details"` + ReturnUnroutable int64 `json:"return_unroutable"` + ReturnUnroutableDetails Details `json:"return_unroutable_details"` } // ObjectTotals ... @@ -131,18 +135,37 @@ type Queue struct { type Node struct { Name string - DiskFree int64 `json:"disk_free"` - DiskFreeLimit int64 `json:"disk_free_limit"` - FdTotal int64 `json:"fd_total"` - FdUsed int64 `json:"fd_used"` - MemLimit int64 `json:"mem_limit"` - MemUsed int64 `json:"mem_used"` - ProcTotal int64 `json:"proc_total"` - ProcUsed int64 `json:"proc_used"` - RunQueue int64 `json:"run_queue"` - SocketsTotal int64 `json:"sockets_total"` - SocketsUsed int64 `json:"sockets_used"` - Running bool `json:"running"` + DiskFree int64 `json:"disk_free"` + DiskFreeLimit int64 `json:"disk_free_limit"` + DiskFreeAlarm bool `json:"disk_free_alarm"` + FdTotal int64 `json:"fd_total"` + FdUsed int64 `json:"fd_used"` + MemLimit int64 `json:"mem_limit"` + MemUsed int64 `json:"mem_used"` + MemAlarm bool `json:"mem_alarm"` + ProcTotal int64 `json:"proc_total"` + ProcUsed int64 `json:"proc_used"` + RunQueue int64 `json:"run_queue"` + SocketsTotal int64 `json:"sockets_total"` + SocketsUsed int64 `json:"sockets_used"` + Running bool `json:"running"` + Uptime int64 `json:"uptime"` + MnesiaDiskTxCount int64 `json:"mnesia_disk_tx_count"` + MnesiaDiskTxCountDetails Details `json:"mnesia_disk_tx_count_details"` + MnesiaRamTxCount int64 `json:"mnesia_ram_tx_count"` + MnesiaRamTxCountDetails Details `json:"mnesia_ram_tx_count_details"` + GcNum int64 `json:"gc_num"` + GcNumDetails Details `json:"gc_num_details"` + GcBytesReclaimed int64 `json:"gc_bytes_reclaimed"` + GcBytesReclaimedDetails Details `json:"gc_bytes_reclaimed_details"` + IoReadAvgTime int64 `json:"io_read_avg_time"` + IoReadAvgTimeDetails Details `json:"io_read_avg_time_details"` + IoReadBytes int64 `json:"io_read_bytes"` + IoReadBytesDetails Details `json:"io_read_bytes_details"` + IoWriteAvgTime int64 `json:"io_write_avg_time"` + IoWriteAvgTimeDetails Details `json:"io_write_avg_time_details"` + IoWriteBytes int64 `json:"io_write_bytes"` + IoWriteBytesDetails Details `json:"io_write_bytes_details"` } type Exchange struct { @@ -155,6 +178,10 @@ type Exchange struct { AutoDelete bool `json:"auto_delete"` } +type HealthCheck struct { + Status string `json:"status"` +} + // gatherFunc ... type gatherFunc func(r *RabbitMQ, acc telegraf.Accumulator) @@ -204,6 +231,13 @@ var sampleConfig = ` queue_name_exclude = [] ` +func boolToInt(b bool) int64 { + if b { + return 1 + } + return 0 +} + // SampleConfig ... func (r *RabbitMQ) SampleConfig() string { return sampleConfig @@ -302,12 +336,12 @@ func gatherOverview(r *RabbitMQ, acc telegraf.Accumulator) { return } - var clustering_listeners, amqp_listeners int64 = 0, 0 + var clusteringListeners, amqpListeners int64 = 0, 0 for _, listener := range overview.Listeners { if listener.Protocol == "clustering" { - clustering_listeners++ + clusteringListeners++ } else if listener.Protocol == "amqp" { - amqp_listeners++ + amqpListeners++ } } @@ -328,48 +362,109 @@ func gatherOverview(r *RabbitMQ, acc telegraf.Accumulator) { "messages_delivered": overview.MessageStats.Deliver, "messages_delivered_get": overview.MessageStats.DeliverGet, "messages_published": overview.MessageStats.Publish, - "clustering_listeners": clustering_listeners, - "amqp_listeners": amqp_listeners, + "clustering_listeners": clusteringListeners, + "amqp_listeners": amqpListeners, + "return_unroutable": overview.MessageStats.ReturnUnroutable, + "return_unroutable_rate": overview.MessageStats.ReturnUnroutableDetails.Rate, } acc.AddFields("rabbitmq_overview", fields, tags) } func gatherNodes(r *RabbitMQ, acc telegraf.Accumulator) { - nodes := make([]Node, 0) + allNodes := make([]Node, 0) // Gather information about nodes - err := r.requestJSON("/api/nodes", &nodes) + err := r.requestJSON("/api/nodes", &allNodes) if err != nil { acc.AddError(err) return } - now := time.Now() + + nodes := make(map[string]Node) + for _, node := range allNodes { + if r.shouldGatherNode(node) { + nodes[node.Name] = node + } + } + + numberNodes := len(nodes) + if numberNodes == 0 { + return + } + + type NodeHealthCheck struct { + NodeName string + HealthCheck HealthCheck + Error error + } + + healthChecksChannel := make(chan NodeHealthCheck, numberNodes) for _, node := range nodes { - if !r.shouldGatherNode(node) { - continue + go func(nodeName string, healthChecksChannel chan NodeHealthCheck) { + var healthCheck HealthCheck + + err := r.requestJSON("/api/healthchecks/node/"+nodeName, &healthCheck) + nodeHealthCheck := NodeHealthCheck{ + NodeName: nodeName, + Error: err, + HealthCheck: healthCheck, + } + + healthChecksChannel <- nodeHealthCheck + }(node.Name, healthChecksChannel) + } + + now := time.Now() + + for i := 0; i < len(nodes); i++ { + nodeHealthCheck := <-healthChecksChannel + + var healthCheckStatus int64 = 0 + + if nodeHealthCheck.Error != nil { + acc.AddError(nodeHealthCheck.Error) + } else if nodeHealthCheck.HealthCheck.Status == "ok" { + healthCheckStatus = 1 } + node := nodes[nodeHealthCheck.NodeName] + tags := map[string]string{"url": r.URL} tags["node"] = node.Name - var running int64 = 0 - if node.Running { - running = 1 - } - fields := map[string]interface{}{ - "disk_free": node.DiskFree, - "disk_free_limit": node.DiskFreeLimit, - "fd_total": node.FdTotal, - "fd_used": node.FdUsed, - "mem_limit": node.MemLimit, - "mem_used": node.MemUsed, - "proc_total": node.ProcTotal, - "proc_used": node.ProcUsed, - "run_queue": node.RunQueue, - "sockets_total": node.SocketsTotal, - "sockets_used": node.SocketsUsed, - "running": running, + "disk_free": node.DiskFree, + "disk_free_limit": node.DiskFreeLimit, + "disk_free_alarm": boolToInt(node.DiskFreeAlarm), + "fd_total": node.FdTotal, + "fd_used": node.FdUsed, + "mem_limit": node.MemLimit, + "mem_used": node.MemUsed, + "mem_alarm": boolToInt(node.MemAlarm), + "proc_total": node.ProcTotal, + "proc_used": node.ProcUsed, + "run_queue": node.RunQueue, + "sockets_total": node.SocketsTotal, + "sockets_used": node.SocketsUsed, + "uptime": node.Uptime, + "mnesia_disk_tx_count": node.MnesiaDiskTxCount, + "mnesia_disk_tx_count_rate": node.MnesiaDiskTxCountDetails.Rate, + "mnesia_ram_tx_count": node.MnesiaRamTxCount, + "mnesia_ram_tx_count_rate": node.MnesiaRamTxCountDetails.Rate, + "gc_num": node.GcNum, + "gc_num_rate": node.GcNumDetails.Rate, + "gc_bytes_reclaimed": node.GcBytesReclaimed, + "gc_bytes_reclaimed_rate": node.GcBytesReclaimedDetails.Rate, + "io_read_avg_time": node.IoReadAvgTime, + "io_read_avg_time_rate": node.IoReadAvgTimeDetails.Rate, + "io_read_bytes": node.IoReadBytes, + "io_read_bytes_rate": node.IoReadBytesDetails.Rate, + "io_write_avg_time": node.IoWriteAvgTime, + "io_write_avg_time_rate": node.IoWriteAvgTimeDetails.Rate, + "io_write_bytes": node.IoWriteBytes, + "io_write_bytes_rate": node.IoWriteBytesDetails.Rate, + "running": boolToInt(node.Running), + "health_check_status": healthCheckStatus, } acc.AddFields("rabbitmq_node", fields, tags, now) } @@ -459,8 +554,10 @@ func gatherExchanges(r *RabbitMQ, acc telegraf.Accumulator) { acc.AddFields( "rabbitmq_exchange", map[string]interface{}{ - "messages_publish_in": exchange.MessageStats.PublishIn, - "messages_publish_out": exchange.MessageStats.PublishOut, + "messages_publish_in": exchange.MessageStats.PublishIn, + "messages_publish_in_rate": exchange.MessageStats.PublishInDetails.Rate, + "messages_publish_out": exchange.MessageStats.PublishOut, + "messages_publish_out_rate": exchange.MessageStats.PublishOutDetails.Rate, }, tags, ) @@ -487,11 +584,11 @@ func (r *RabbitMQ) createQueueFilter() error { r.QueueInclude = append(r.QueueInclude, r.Queues...) } - filter, err := filter.NewIncludeExcludeFilter(r.QueueInclude, r.QueueExclude) + queueFilter, err := filter.NewIncludeExcludeFilter(r.QueueInclude, r.QueueExclude) if err != nil { return err } - r.queueFilter = filter + r.queueFilter = queueFilter for _, q := range r.QueueExclude { if q == "*" { diff --git a/plugins/inputs/rabbitmq/rabbitmq_test.go b/plugins/inputs/rabbitmq/rabbitmq_test.go index 5e9829cc0..0f98f95ce 100644 --- a/plugins/inputs/rabbitmq/rabbitmq_test.go +++ b/plugins/inputs/rabbitmq/rabbitmq_test.go @@ -9,503 +9,35 @@ import ( "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "io/ioutil" ) -const sampleOverviewResponse = ` -{ - "message_stats": { - "ack": 5246, - "ack_details": { - "rate": 0.0 - }, - "deliver": 5246, - "deliver_details": { - "rate": 0.0 - }, - "deliver_get": 5246, - "deliver_get_details": { - "rate": 0.0 - }, - "publish": 5258, - "publish_details": { - "rate": 0.0 - } - }, - "object_totals": { - "channels": 44, - "connections": 44, - "consumers": 65, - "exchanges": 43, - "queues": 62 - }, - "queue_totals": { - "messages": 0, - "messages_details": { - "rate": 0.0 - }, - "messages_ready": 0, - "messages_ready_details": { - "rate": 0.0 - }, - "messages_unacknowledged": 0, - "messages_unacknowledged_details": { - "rate": 0.0 - } - }, - "listeners": [ - { - "name": "rabbit@node-a", - "protocol": "amqp" - }, - { - "name": "rabbit@node-b", - "protocol": "amqp" - }, - { - "name": "rabbit@node-a", - "protocol": "clustering" - }, - { - "name": "rabbit@node-b", - "protocol": "clustering" - } - ] -} -` - -const sampleNodesResponse = ` -[ - { - "db_dir": "/var/lib/rabbitmq/mnesia/rabbit@vagrant-ubuntu-trusty-64", - "disk_free": 37768282112, - "disk_free_alarm": false, - "disk_free_details": { - "rate": 0.0 - }, - "disk_free_limit": 50000000, - "enabled_plugins": [ - "rabbitmq_management" - ], - "fd_total": 1024, - "fd_used": 63, - "fd_used_details": { - "rate": 0.0 - }, - "io_read_avg_time": 0, - "io_read_avg_time_details": { - "rate": 0.0 - }, - "io_read_bytes": 1, - "io_read_bytes_details": { - "rate": 0.0 - }, - "io_read_count": 1, - "io_read_count_details": { - "rate": 0.0 - }, - "io_sync_avg_time": 0, - "io_sync_avg_time_details": { - "rate": 0.0 - }, - "io_write_avg_time": 0, - "io_write_avg_time_details": { - "rate": 0.0 - }, - "log_file": "/var/log/rabbitmq/rabbit@vagrant-ubuntu-trusty-64.log", - "mem_alarm": false, - "mem_limit": 2503771750, - "mem_used": 159707080, - "mem_used_details": { - "rate": 15185.6 - }, - "mnesia_disk_tx_count": 16, - "mnesia_disk_tx_count_details": { - "rate": 0.0 - }, - "mnesia_ram_tx_count": 296, - "mnesia_ram_tx_count_details": { - "rate": 0.0 - }, - "name": "rabbit@vagrant-ubuntu-trusty-64", - "net_ticktime": 60, - "os_pid": "14244", - "partitions": [], - "proc_total": 1048576, - "proc_used": 783, - "proc_used_details": { - "rate": 0.0 - }, - "processors": 1, - "rates_mode": "basic", - "run_queue": 0, - "running": true, - "sasl_log_file": "/var/log/rabbitmq/rabbit@vagrant-ubuntu-trusty-64-sasl.log", - "sockets_total": 829, - "sockets_used": 45, - "sockets_used_details": { - "rate": 0.0 - }, - "type": "disc", - "uptime": 7464827 - } -] -` -const sampleQueuesResponse = ` -[ - { - "memory": 21960, - "messages": 0, - "messages_details": { - "rate": 0 - }, - "messages_ready": 0, - "messages_ready_details": { - "rate": 0 - }, - "messages_unacknowledged": 0, - "messages_unacknowledged_details": { - "rate": 0 - }, - "idle_since": "2015-11-01 8:22:15", - "consumer_utilisation": "", - "policy": "federator", - "exclusive_consumer_tag": "", - "consumers": 0, - "recoverable_slaves": "", - "state": "running", - "messages_ram": 0, - "messages_ready_ram": 0, - "messages_unacknowledged_ram": 0, - "messages_persistent": 0, - "message_bytes": 0, - "message_bytes_ready": 0, - "message_bytes_unacknowledged": 0, - "message_bytes_ram": 0, - "message_bytes_persistent": 0, - "disk_reads": 0, - "disk_writes": 0, - "backing_queue_status": { - "q1": 0, - "q2": 0, - "delta": [ - "delta", - "undefined", - 0, - "undefined" - ], - "q3": 0, - "q4": 0, - "len": 0, - "target_ram_count": "infinity", - "next_seq_id": 0, - "avg_ingress_rate": 0, - "avg_egress_rate": 0, - "avg_ack_ingress_rate": 0, - "avg_ack_egress_rate": 0 - }, - "name": "collectd-queue", - "vhost": "collectd", - "durable": true, - "auto_delete": false, - "arguments": {}, - "node": "rabbit@testhost" - }, - { - "memory": 55528, - "message_stats": { - "ack": 223654927, - "ack_details": { - "rate": 0 - }, - "deliver": 224518745, - "deliver_details": { - "rate": 0 - }, - "deliver_get": 224518829, - "deliver_get_details": { - "rate": 0 - }, - "get": 19, - "get_details": { - "rate": 0 - }, - "get_no_ack": 65, - "get_no_ack_details": { - "rate": 0 - }, - "publish": 223883765, - "publish_details": { - "rate": 0 - }, - "redeliver": 863805, - "redeliver_details": { - "rate": 0 - } - }, - "messages": 24, - "messages_details": { - "rate": 0 - }, - "messages_ready": 24, - "messages_ready_details": { - "rate": 0 - }, - "messages_unacknowledged": 0, - "messages_unacknowledged_details": { - "rate": 0 - }, - "idle_since": "2015-11-01 8:22:14", - "consumer_utilisation": "", - "policy": "", - "exclusive_consumer_tag": "", - "consumers": 0, - "recoverable_slaves": "", - "state": "running", - "messages_ram": 24, - "messages_ready_ram": 24, - "messages_unacknowledged_ram": 0, - "messages_persistent": 0, - "message_bytes": 149220, - "message_bytes_ready": 149220, - "message_bytes_unacknowledged": 0, - "message_bytes_ram": 149220, - "message_bytes_persistent": 0, - "disk_reads": 0, - "disk_writes": 0, - "backing_queue_status": { - "q1": 0, - "q2": 0, - "delta": [ - "delta", - "undefined", - 0, - "undefined" - ], - "q3": 0, - "q4": 24, - "len": 24, - "target_ram_count": "infinity", - "next_seq_id": 223883765, - "avg_ingress_rate": 0, - "avg_egress_rate": 0, - "avg_ack_ingress_rate": 0, - "avg_ack_egress_rate": 0 - }, - "name": "telegraf", - "vhost": "collectd", - "durable": true, - "auto_delete": false, - "arguments": {}, - "node": "rabbit@testhost" - }, - { - "message_stats": { - "ack": 1296077, - "ack_details": { - "rate": 0 - }, - "deliver": 1513176, - "deliver_details": { - "rate": 0.4 - }, - "deliver_get": 1513239, - "deliver_get_details": { - "rate": 0.4 - }, - "disk_writes": 7976, - "disk_writes_details": { - "rate": 0 - }, - "get": 40, - "get_details": { - "rate": 0 - }, - "get_no_ack": 23, - "get_no_ack_details": { - "rate": 0 - }, - "publish": 1325628, - "publish_details": { - "rate": 0.4 - }, - "redeliver": 216034, - "redeliver_details": { - "rate": 0 - } - }, - "messages": 5, - "messages_details": { - "rate": 0.4 - }, - "messages_ready": 0, - "messages_ready_details": { - "rate": 0 - }, - "messages_unacknowledged": 5, - "messages_unacknowledged_details": { - "rate": 0.4 - }, - "policy": "federator", - "exclusive_consumer_tag": "", - "consumers": 1, - "consumer_utilisation": 1, - "memory": 122856, - "recoverable_slaves": "", - "state": "running", - "messages_ram": 5, - "messages_ready_ram": 0, - "messages_unacknowledged_ram": 5, - "messages_persistent": 0, - "message_bytes": 150096, - "message_bytes_ready": 0, - "message_bytes_unacknowledged": 150096, - "message_bytes_ram": 150096, - "message_bytes_persistent": 0, - "disk_reads": 0, - "disk_writes": 7976, - "backing_queue_status": { - "q1": 0, - "q2": 0, - "delta": [ - "delta", - "undefined", - 0, - "undefined" - ], - "q3": 0, - "q4": 0, - "len": 0, - "target_ram_count": "infinity", - "next_seq_id": 1325628, - "avg_ingress_rate": 0.19115840579934168, - "avg_egress_rate": 0.19115840579934168, - "avg_ack_ingress_rate": 0.19115840579934168, - "avg_ack_egress_rate": 0.1492766485341716 - }, - "name": "telegraf", - "vhost": "metrics", - "durable": true, - "auto_delete": false, - "arguments": {}, - "node": "rabbit@testhost" - } -] -` - -const sampleExchangesResponse = ` -[ - { - "arguments": { }, - "internal": false, - "auto_delete": false, - "durable": true, - "type": "direct", - "vhost": "\/", - "name": "" - }, - { - "message_stats": { - "publish_in_details": { - "rate": 0 - }, - "publish_in": 2, - "publish_out_details": { - "rate": 0 - }, - "publish_out": 1 - }, - "arguments": { }, - "internal": false, - "auto_delete": false, - "durable": true, - "type": "fanout", - "vhost": "\/", - "name": "telegraf" - }, - { - "arguments": { }, - "internal": false, - "auto_delete": false, - "durable": true, - "type": "direct", - "vhost": "\/", - "name": "amq.direct" - }, - { - "arguments": { }, - "internal": false, - "auto_delete": false, - "durable": true, - "type": "fanout", - "vhost": "\/", - "name": "amq.fanout" - }, - { - "arguments": { }, - "internal": false, - "auto_delete": false, - "durable": true, - "type": "headers", - "vhost": "\/", - "name": "amq.headers" - }, - { - "arguments": { }, - "internal": false, - "auto_delete": false, - "durable": true, - "type": "headers", - "vhost": "\/", - "name": "amq.match" - }, - { - "arguments": { }, - "internal": true, - "auto_delete": false, - "durable": true, - "type": "topic", - "vhost": "\/", - "name": "amq.rabbitmq.log" - }, - { - "arguments": { }, - "internal": true, - "auto_delete": false, - "durable": true, - "type": "topic", - "vhost": "\/", - "name": "amq.rabbitmq.trace" - }, - { - "arguments": { }, - "internal": false, - "auto_delete": false, - "durable": true, - "type": "topic", - "vhost": "\/", - "name": "amq.topic" - } -] -` - func TestRabbitMQGeneratesMetrics(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - var rsp string + var jsonFilePath string switch r.URL.Path { case "/api/overview": - rsp = sampleOverviewResponse + jsonFilePath = "testdata/overview.json" case "/api/nodes": - rsp = sampleNodesResponse + jsonFilePath = "testdata/nodes.json" case "/api/queues": - rsp = sampleQueuesResponse + jsonFilePath = "testdata/queues.json" case "/api/exchanges": - rsp = sampleExchangesResponse + jsonFilePath = "testdata/exchanges.json" + case "/api/healthchecks/node/rabbit@vagrant-ubuntu-trusty-64": + jsonFilePath = "testdata/healthchecks.json" default: panic("Cannot handle request") } - fmt.Fprintln(w, rsp) + data, err := ioutil.ReadFile(jsonFilePath) + + if err != nil { + panic(fmt.Sprintf("could not read from data file %s", jsonFilePath)) + } + + w.Write(data) })) defer ts.Close() @@ -513,60 +45,118 @@ func TestRabbitMQGeneratesMetrics(t *testing.T) { URL: ts.URL, } - var acc testutil.Accumulator + acc := &testutil.Accumulator{} err := acc.GatherError(r.Gather) require.NoError(t, err) - intMetrics := []string{ - "messages", - "messages_ready", - "messages_unacked", - - "messages_acked", - "messages_delivered", - "messages_published", - - "channels", - "connections", - "consumers", - "exchanges", - "queues", - "clustering_listeners", - "amqp_listeners", + overviewMetrics := map[string]interface{}{ + "messages": 5, + "messages_ready": 32, + "messages_unacked": 27, + "messages_acked": 5246, + "messages_delivered": 5234, + "messages_delivered_get": 3333, + "messages_published": 5258, + "channels": 44, + "connections": 44, + "consumers": 65, + "exchanges": 43, + "queues": 62, + "clustering_listeners": 2, + "amqp_listeners": 2, + "return_unroutable": 10, + "return_unroutable_rate": 3.3, } + compareMetrics(t, overviewMetrics, acc, "rabbitmq_overview") - for _, metric := range intMetrics { - assert.True(t, acc.HasInt64Field("rabbitmq_overview", metric)) + queuesMetrics := map[string]interface{}{ + "consumers": 3, + "consumer_utilisation": 1.0, + "memory": 143776, + "message_bytes": 3, + "message_bytes_ready": 4, + "message_bytes_unacked": 5, + "message_bytes_ram": 6, + "message_bytes_persist": 7, + "messages": 44, + "messages_ready": 32, + "messages_unack": 44, + "messages_ack": 3457, + "messages_ack_rate": 9.9, + "messages_deliver": 22222, + "messages_deliver_rate": 333.4, + "messages_deliver_get": 3457, + "messages_deliver_get_rate": 0.2, + "messages_publish": 3457, + "messages_publish_rate": 11.2, + "messages_redeliver": 33, + "messages_redeliver_rate": 2.5, + "idle_since": "2015-11-01 8:22:14", } + compareMetrics(t, queuesMetrics, acc, "rabbitmq_queue") - nodeIntMetrics := []string{ - "disk_free", - "disk_free_limit", - "fd_total", - "fd_used", - "mem_limit", - "mem_used", - "proc_total", - "proc_used", - "run_queue", - "sockets_total", - "sockets_used", - "running", + nodeMetrics := map[string]interface{}{ + "disk_free": 3776, + "disk_free_limit": 50000000, + "disk_free_alarm": 0, + "fd_total": 1024, + "fd_used": 63, + "mem_limit": 2503, + "mem_used": 159707080, + "mem_alarm": 1, + "proc_total": 1048576, + "proc_used": 783, + "run_queue": 0, + "sockets_total": 829, + "sockets_used": 45, + "uptime": 7464827, + "running": 1, + "health_check_status": 1, + "mnesia_disk_tx_count": 16, + "mnesia_ram_tx_count": 296, + "mnesia_disk_tx_count_rate": 1.1, + "mnesia_ram_tx_count_rate": 2.2, + "gc_num": 57280132, + "gc_bytes_reclaimed": 2533, + "gc_num_rate": 274.2, + "gc_bytes_reclaimed_rate": 16490856.3, + "io_read_avg_time": 983, + "io_read_avg_time_rate": 88.77, + "io_read_bytes": 1111, + "io_read_bytes_rate": 99.99, + "io_write_avg_time": 134, + "io_write_avg_time_rate": 4.32, + "io_write_bytes": 823, + "io_write_bytes_rate": 32.8, } + compareMetrics(t, nodeMetrics, acc, "rabbitmq_node") - for _, metric := range nodeIntMetrics { - assert.True(t, acc.HasInt64Field("rabbitmq_node", metric)) + exchangeMetrics := map[string]interface{}{ + "messages_publish_in": 3678, + "messages_publish_in_rate": 3.2, + "messages_publish_out": 3677, + "messages_publish_out_rate": 5.1, } + compareMetrics(t, exchangeMetrics, acc, "rabbitmq_exchange") +} - assert.True(t, acc.HasMeasurement("rabbitmq_queue")) +func compareMetrics(t *testing.T, expectedMetrics map[string]interface{}, + accumulator *testutil.Accumulator, measurementKey string) { + measurement, exist := accumulator.Get(measurementKey) - exchangeIntMetrics := []string{ - "messages_publish_in", - "messages_publish_out", - } + assert.True(t, exist, "There is measurement %s", measurementKey) + assert.Equal(t, len(expectedMetrics), len(measurement.Fields)) - for _, metric := range exchangeIntMetrics { - assert.True(t, acc.HasInt64Field("rabbitmq_exchange", metric)) + for metricName, metricValue := range expectedMetrics { + actualMetricValue := measurement.Fields[metricName] + + if accumulator.HasStringField(measurementKey, metricName) { + assert.Equal(t, metricValue, actualMetricValue, + "Metric name: %s", metricName) + } else { + assert.InDelta(t, metricValue, actualMetricValue, 0e5, + "Metric name: %s", metricName) + } } } diff --git a/plugins/inputs/rabbitmq/testdata/exchanges.json b/plugins/inputs/rabbitmq/testdata/exchanges.json new file mode 100644 index 000000000..203c29a59 --- /dev/null +++ b/plugins/inputs/rabbitmq/testdata/exchanges.json @@ -0,0 +1,22 @@ +[ + { + "message_stats": { + "publish_in_details": { + "rate": 3.2 + }, + "publish_in": 3678, + "publish_out_details": { + "rate": 5.1 + }, + "publish_out": 3677 + }, + "user_who_performed_action": "mistral_testuser_1", + "arguments": {}, + "internal": false, + "auto_delete": true, + "durable": false, + "type": "direct", + "vhost": "sorandomsorandom", + "name": "reply_a716f0523cd44941ad2ea6ce4a3869c3" + } +] \ No newline at end of file diff --git a/plugins/inputs/rabbitmq/testdata/healthchecks.json b/plugins/inputs/rabbitmq/testdata/healthchecks.json new file mode 100644 index 000000000..1a36cf5fc --- /dev/null +++ b/plugins/inputs/rabbitmq/testdata/healthchecks.json @@ -0,0 +1 @@ +{"status":"ok"} \ No newline at end of file diff --git a/plugins/inputs/rabbitmq/testdata/nodes.json b/plugins/inputs/rabbitmq/testdata/nodes.json new file mode 100644 index 000000000..42b7a4b7a --- /dev/null +++ b/plugins/inputs/rabbitmq/testdata/nodes.json @@ -0,0 +1,87 @@ +[ + { + "db_dir": "/var/lib/rabbitmq/mnesia/rabbit@vagrant-ubuntu-trusty-64", + "disk_free": 3776, + "disk_free_alarm": false, + "disk_free_details": { + "rate": 0.0 + }, + "disk_free_limit": 50000000, + "enabled_plugins": [ + "rabbitmq_management" + ], + "gc_num": 57280132, + "gc_num_details": { + "rate": 274.2 + }, + "gc_bytes_reclaimed": 2533, + "gc_bytes_reclaimed_details": { + "rate": 16490856.3 + }, + "fd_total": 1024, + "fd_used": 63, + "fd_used_details": { + "rate": 0.0 + }, + "io_read_avg_time": 983, + "io_read_avg_time_details": { + "rate": 88.77 + }, + "io_read_bytes": 1111, + "io_read_bytes_details": { + "rate": 99.99 + }, + "io_read_count": 1, + "io_read_count_details": { + "rate": 0.0 + }, + "io_sync_avg_time": 0, + "io_sync_avg_time_details": { + "rate": 0.0 + }, + "io_write_avg_time": 134, + "io_write_avg_time_details": { + "rate": 4.32 + }, + "io_write_bytes": 823, + "io_write_bytes_details": { + "rate": 32.8 + }, + "log_file": "/var/log/rabbitmq/rabbit@vagrant-ubuntu-trusty-64.log", + "mem_alarm": true, + "mem_limit": 2503, + "mem_used": 159707080, + "mem_used_details": { + "rate": 15185.6 + }, + "mnesia_disk_tx_count": 16, + "mnesia_disk_tx_count_details": { + "rate": 1.1 + }, + "mnesia_ram_tx_count": 296, + "mnesia_ram_tx_count_details": { + "rate": 2.2 + }, + "name": "rabbit@vagrant-ubuntu-trusty-64", + "net_ticktime": 60, + "os_pid": "14244", + "partitions": [], + "proc_total": 1048576, + "proc_used": 783, + "proc_used_details": { + "rate": 0.0 + }, + "processors": 1, + "rates_mode": "basic", + "run_queue": 0, + "running": true, + "sasl_log_file": "/var/log/rabbitmq/rabbit@vagrant-ubuntu-trusty-64-sasl.log", + "sockets_total": 829, + "sockets_used": 45, + "sockets_used_details": { + "rate": 0.0 + }, + "type": "disc", + "uptime": 7464827 + } +] \ No newline at end of file diff --git a/plugins/inputs/rabbitmq/testdata/overview.json b/plugins/inputs/rabbitmq/testdata/overview.json new file mode 100644 index 000000000..a4cbb2ad6 --- /dev/null +++ b/plugins/inputs/rabbitmq/testdata/overview.json @@ -0,0 +1,63 @@ +{ + "message_stats": { + "ack": 5246, + "ack_details": { + "rate": 0.0 + }, + "deliver": 5234, + "deliver_details": { + "rate": 0.0 + }, + "deliver_get": 3333, + "deliver_get_details": { + "rate": 0.0 + }, + "publish": 5258, + "publish_details": { + "rate": 0.0 + }, + "return_unroutable": 10, + "return_unroutable_details": { + "rate": 3.3 + } + }, + "object_totals": { + "channels": 44, + "connections": 44, + "consumers": 65, + "exchanges": 43, + "queues": 62 + }, + "queue_totals": { + "messages": 5, + "messages_details": { + "rate": 0.0 + }, + "messages_ready": 32, + "messages_ready_details": { + "rate": 0.0 + }, + "messages_unacknowledged": 27, + "messages_unacknowledged_details": { + "rate": 0.0 + } + }, + "listeners": [ + { + "name": "rabbit@node-a", + "protocol": "amqp" + }, + { + "name": "rabbit@node-b", + "protocol": "amqp" + }, + { + "name": "rabbit@node-a", + "protocol": "clustering" + }, + { + "name": "rabbit@node-b", + "protocol": "clustering" + } + ] +} \ No newline at end of file diff --git a/plugins/inputs/rabbitmq/testdata/queues.json b/plugins/inputs/rabbitmq/testdata/queues.json new file mode 100644 index 000000000..356e1a466 --- /dev/null +++ b/plugins/inputs/rabbitmq/testdata/queues.json @@ -0,0 +1,114 @@ +[ + { + "messages_details": { + "rate": 0.0 + }, + "messages": 44, + "messages_unacknowledged_details": { + "rate": 0.0 + }, + "messages_unacknowledged": 44, + "messages_ready_details": { + "rate": 0.0 + }, + "messages_ready": 32, + "reductions_details": { + "rate": 223.0 + }, + "reductions": 15875433, + "message_stats": { + "deliver_get_details": { + "rate": 0.2 + }, + "deliver_get": 3457, + "ack_details": { + "rate": 9.9 + }, + "ack": 3457, + "redeliver_details": { + "rate": 2.5 + }, + "redeliver": 33, + "deliver_no_ack_details": { + "rate": 0.0 + }, + "deliver_no_ack": 0, + "deliver_details": { + "rate": 333.4 + }, + "deliver": 22222, + "get_no_ack_details": { + "rate": 0.0 + }, + "get_no_ack": 0, + "get_details": { + "rate": 0.0 + }, + "get": 0, + "publish_details": { + "rate": 11.2 + }, + "publish": 3457 + }, + "node": "rabbit@rmqlocal-0.rmqlocal.ankorabbitstatefulset3.svc.cluster.local", + "arguments": { + "x-expires": 1800000, + "x-ha-policy": "all" + }, + "exclusive": false, + "auto_delete": false, + "durable": false, + "vhost": "sorandomsorandom", + "name": "reply_a716f0523cd44941ad2ea6ce4a3869c3", + "message_bytes_paged_out": 0, + "messages_paged_out": 0, + "idle_since": "2015-11-01 8:22:14", + "backing_queue_status": { + "avg_ack_egress_rate": 0.2374460025857711, + "avg_ack_ingress_rate": 0.2374460025857711, + "avg_egress_rate": 0.2374460025857711, + "avg_ingress_rate": 0.2374460025857711, + "delta": [ + "delta", + "undefined", + 0, + 0, + "undefined" + ], + "len": 0, + "mode": "default", + "next_seq_id": 3457, + "q1": 0, + "q2": 0, + "q3": 0, + "q4": 0, + "target_ram_count": 0 + }, + "head_message_timestamp": null, + "message_bytes_persistent": 7, + "message_bytes_ram": 6, + "message_bytes_unacknowledged": 5, + "message_bytes_ready": 4, + "message_bytes": 3, + "messages_persistent": 0, + "messages_unacknowledged_ram": 0, + "messages_ready_ram": 0, + "messages_ram": 0, + "garbage_collection": { + "minor_gcs": 314, + "fullsweep_after": 65535, + "min_heap_size": 233, + "min_bin_vheap_size": 46422, + "max_heap_size": 0 + }, + "state": "running", + "recoverable_slaves": null, + "memory": 143776, + "consumer_utilisation": 1.0, + "consumers": 3, + "exclusive_consumer_tag": null, + "effective_policy_definition": [], + "operator_policy": null, + "policy": null + } +] \ No newline at end of file From 7591a50d526abbed2f408ebb605fc4e66d4840c7 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 9 Jul 2018 17:39:51 -0700 Subject: [PATCH 0002/1815] Add path tag to tail tests --- plugins/inputs/tail/tail_test.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/plugins/inputs/tail/tail_test.go b/plugins/inputs/tail/tail_test.go index e8a16cc5c..23df0d0b8 100644 --- a/plugins/inputs/tail/tail_test.go +++ b/plugins/inputs/tail/tail_test.go @@ -43,6 +43,7 @@ func TestTailFromBeginning(t *testing.T) { }, map[string]string{ "mytag": "foo", + "path": tmpfile.Name(), }) } @@ -84,6 +85,7 @@ func TestTailFromEnd(t *testing.T) { }, map[string]string{ "othertag": "foo", + "path": tmpfile.Name(), }) assert.Len(t, acc.Metrics, 1) } From 9a14d1f0743d025c3d0f0cfc02d8a88e547a107f Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 10 Jul 2018 20:20:44 -0700 Subject: [PATCH 0003/1815] Fix quoting in nvidia_smi input --- plugins/inputs/nvidia_smi/README.md | 8 ++++---- plugins/inputs/nvidia_smi/nvidia_smi.go | 8 ++++---- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/plugins/inputs/nvidia_smi/README.md b/plugins/inputs/nvidia_smi/README.md index 5d0cc99c6..97fd9ff04 100644 --- a/plugins/inputs/nvidia_smi/README.md +++ b/plugins/inputs/nvidia_smi/README.md @@ -7,11 +7,11 @@ This plugin uses a query on the [`nvidia-smi`](https://developer.nvidia.com/nvid ```toml # Pulls statistics from nvidia GPUs attached to the host [[inputs.nvidia_smi]] -## Optional: path to nvidia-smi binary, defaults to $PATH via exec.LookPath -# bin_path = /usr/bin/nvidia-smi + ## Optional: path to nvidia-smi binary, defaults to $PATH via exec.LookPath + # bin_path = "/usr/bin/nvidia-smi" -## Optional: timeout for GPU polling -# timeout = 5s + ## Optional: timeout for GPU polling + # timeout = "5s" ``` ### Metrics diff --git a/plugins/inputs/nvidia_smi/nvidia_smi.go b/plugins/inputs/nvidia_smi/nvidia_smi.go index 84784b765..abce80dcb 100644 --- a/plugins/inputs/nvidia_smi/nvidia_smi.go +++ b/plugins/inputs/nvidia_smi/nvidia_smi.go @@ -50,11 +50,11 @@ func (smi *NvidiaSMI) Description() string { // SampleConfig returns the sample configuration for the NvidiaSMI plugin func (smi *NvidiaSMI) SampleConfig() string { return ` -## Optional: path to nvidia-smi binary, defaults to $PATH via exec.LookPath -# bin_path = /usr/bin/nvidia-smi + ## Optional: path to nvidia-smi binary, defaults to $PATH via exec.LookPath + # bin_path = "/usr/bin/nvidia-smi" -## Optional: timeout for GPU polling -# timeout = 5s + ## Optional: timeout for GPU polling + # timeout = "5s" ` } From 7b73b0db3a6bb6299c6201547bc77744e2107af0 Mon Sep 17 00:00:00 2001 From: Steve Domino Date: Wed, 11 Jul 2018 17:43:49 -0600 Subject: [PATCH 0004/1815] Moved system package inputs out to top level (#4406) --- plugins/inputs/all/all.go | 10 ++++++ .../{system/CPU_README.md => cpu/README.md} | 0 plugins/inputs/{system => cpu}/cpu.go | 9 +++--- plugins/inputs/{system => cpu}/cpu_test.go | 17 +++++----- .../{system/DISK_README.md => disk/README.md} | 0 plugins/inputs/{system => disk}/disk.go | 7 ++-- plugins/inputs/{system => disk}/disk_test.go | 9 +++--- .../DISKIO_README.md => diskio/README.md} | 0 plugins/inputs/{system => diskio}/diskio.go | 7 ++-- .../inputs/{system => diskio}/diskio_linux.go | 2 +- .../{system => diskio}/diskio_linux_test.go | 2 +- .../inputs/{system => diskio}/diskio_other.go | 2 +- .../inputs/{system => diskio}/diskio_test.go | 5 +-- .../KERNEL_README.md => kernel/README.md} | 0 plugins/inputs/{system => kernel}/kernel.go | 2 +- .../{system => kernel}/kernel_notlinux.go | 2 +- .../inputs/{system => kernel}/kernel_test.go | 4 +-- .../README.md} | 0 .../kernel_vmstat.go | 2 +- .../kernel_vmstat/kernel_vmstat_notlinux.go | 3 ++ .../kernel_vmstat_test.go | 10 +++--- .../README.md} | 0 .../linux_sysctl_fs.go | 2 +- .../linux_sysctl_fs_test.go | 2 +- .../{system/MEM_README.md => mem/README.md} | 0 plugins/inputs/{system => mem}/memory.go | 7 ++-- plugins/inputs/{system => mem}/memory_test.go | 5 +-- .../inputs/{system => net}/NETSTAT_README.md | 19 +++++++---- plugins/inputs/{system => net}/NET_README.md | 0 plugins/inputs/{system => net}/net.go | 7 ++-- plugins/inputs/{system => net}/net_test.go | 5 +-- plugins/inputs/{system => net}/netstat.go | 7 ++-- .../README.md} | 0 .../inputs/{system => processes}/processes.go | 5 +-- .../{system => processes}/processes_test.go | 4 ++- plugins/inputs/processes/processes_windows.go | 3 ++ .../{system/SWAP_README.md => swap/README.md} | 0 plugins/inputs/{system => swap}/swap.go | 7 ++-- plugins/inputs/{system => swap}/swap_test.go | 5 +-- .../system/{SYSTEM_README.md => README.md} | 0 plugins/inputs/system/mock_PS.go | 12 +++---- plugins/inputs/system/ps.go | 32 +++++++++---------- 42 files changed, 126 insertions(+), 89 deletions(-) rename plugins/inputs/{system/CPU_README.md => cpu/README.md} (100%) rename plugins/inputs/{system => cpu}/cpu.go (95%) rename plugins/inputs/{system => cpu}/cpu_test.go (97%) rename plugins/inputs/{system/DISK_README.md => disk/README.md} (100%) rename plugins/inputs/{system => disk}/disk.go (95%) rename plugins/inputs/{system => disk}/disk_test.go (96%) rename plugins/inputs/{system/DISKIO_README.md => diskio/README.md} (100%) rename plugins/inputs/{system => diskio}/diskio.go (97%) rename plugins/inputs/{system => diskio}/diskio_linux.go (98%) rename plugins/inputs/{system => diskio}/diskio_linux_test.go (99%) rename plugins/inputs/{system => diskio}/diskio_other.go (90%) rename plugins/inputs/{system => diskio}/diskio_test.go (96%) rename plugins/inputs/{system/KERNEL_README.md => kernel/README.md} (100%) rename plugins/inputs/{system => kernel}/kernel.go (99%) rename plugins/inputs/{system => kernel}/kernel_notlinux.go (96%) rename plugins/inputs/{system => kernel}/kernel_test.go (98%) rename plugins/inputs/{system/KERNEL_VMSTAT_README.md => kernel_vmstat/README.md} (100%) rename plugins/inputs/{system => kernel_vmstat}/kernel_vmstat.go (98%) create mode 100644 plugins/inputs/kernel_vmstat/kernel_vmstat_notlinux.go rename plugins/inputs/{system => kernel_vmstat}/kernel_vmstat_test.go (97%) rename plugins/inputs/{system/LINUX_SYSCTL_FS_README.md => linux_sysctl_fs/README.md} (100%) rename plugins/inputs/{system => linux_sysctl_fs}/linux_sysctl_fs.go (98%) rename plugins/inputs/{system => linux_sysctl_fs}/linux_sysctl_fs_test.go (98%) rename plugins/inputs/{system/MEM_README.md => mem/README.md} (100%) rename plugins/inputs/{system => mem}/memory.go (90%) rename plugins/inputs/{system => mem}/memory_test.go (93%) rename plugins/inputs/{system => net}/NETSTAT_README.md (67%) rename plugins/inputs/{system => net}/NET_README.md (100%) rename plugins/inputs/{system => net}/net.go (95%) rename plugins/inputs/{system => net}/net_test.go (96%) rename plugins/inputs/{system => net}/netstat.go (92%) rename plugins/inputs/{system/PROCESSES_README.md => processes/README.md} (100%) rename plugins/inputs/{system => processes}/processes.go (97%) rename plugins/inputs/{system => processes}/processes_test.go (98%) create mode 100644 plugins/inputs/processes/processes_windows.go rename plugins/inputs/{system/SWAP_README.md => swap/README.md} (100%) rename plugins/inputs/{system => swap}/swap.go (88%) rename plugins/inputs/{system => swap}/swap_test.go (89%) rename plugins/inputs/system/{SYSTEM_README.md => README.md} (100%) diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index b2be2be5a..594e0ea42 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -17,7 +17,10 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/consul" _ "github.com/influxdata/telegraf/plugins/inputs/couchbase" _ "github.com/influxdata/telegraf/plugins/inputs/couchdb" + _ "github.com/influxdata/telegraf/plugins/inputs/cpu" _ "github.com/influxdata/telegraf/plugins/inputs/dcos" + _ "github.com/influxdata/telegraf/plugins/inputs/disk" + _ "github.com/influxdata/telegraf/plugins/inputs/diskio" _ "github.com/influxdata/telegraf/plugins/inputs/disque" _ "github.com/influxdata/telegraf/plugins/inputs/dmcache" _ "github.com/influxdata/telegraf/plugins/inputs/dns_query" @@ -48,12 +51,16 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/kafka_consumer" _ "github.com/influxdata/telegraf/plugins/inputs/kafka_consumer_legacy" _ "github.com/influxdata/telegraf/plugins/inputs/kapacitor" + _ "github.com/influxdata/telegraf/plugins/inputs/kernel" + _ "github.com/influxdata/telegraf/plugins/inputs/kernel_vmstat" _ "github.com/influxdata/telegraf/plugins/inputs/kubernetes" _ "github.com/influxdata/telegraf/plugins/inputs/leofs" + _ "github.com/influxdata/telegraf/plugins/inputs/linux_sysctl_fs" _ "github.com/influxdata/telegraf/plugins/inputs/logparser" _ "github.com/influxdata/telegraf/plugins/inputs/lustre2" _ "github.com/influxdata/telegraf/plugins/inputs/mailchimp" _ "github.com/influxdata/telegraf/plugins/inputs/mcrouter" + _ "github.com/influxdata/telegraf/plugins/inputs/mem" _ "github.com/influxdata/telegraf/plugins/inputs/memcached" _ "github.com/influxdata/telegraf/plugins/inputs/mesos" _ "github.com/influxdata/telegraf/plugins/inputs/minecraft" @@ -62,6 +69,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/mysql" _ "github.com/influxdata/telegraf/plugins/inputs/nats" _ "github.com/influxdata/telegraf/plugins/inputs/nats_consumer" + _ "github.com/influxdata/telegraf/plugins/inputs/net" _ "github.com/influxdata/telegraf/plugins/inputs/net_response" _ "github.com/influxdata/telegraf/plugins/inputs/nginx" _ "github.com/influxdata/telegraf/plugins/inputs/nginx_plus" @@ -80,6 +88,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/postgresql" _ "github.com/influxdata/telegraf/plugins/inputs/postgresql_extensible" _ "github.com/influxdata/telegraf/plugins/inputs/powerdns" + _ "github.com/influxdata/telegraf/plugins/inputs/processes" _ "github.com/influxdata/telegraf/plugins/inputs/procstat" _ "github.com/influxdata/telegraf/plugins/inputs/prometheus" _ "github.com/influxdata/telegraf/plugins/inputs/puppetagent" @@ -97,6 +106,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/solr" _ "github.com/influxdata/telegraf/plugins/inputs/sqlserver" _ "github.com/influxdata/telegraf/plugins/inputs/statsd" + _ "github.com/influxdata/telegraf/plugins/inputs/swap" _ "github.com/influxdata/telegraf/plugins/inputs/syslog" _ "github.com/influxdata/telegraf/plugins/inputs/sysstat" _ "github.com/influxdata/telegraf/plugins/inputs/system" diff --git a/plugins/inputs/system/CPU_README.md b/plugins/inputs/cpu/README.md similarity index 100% rename from plugins/inputs/system/CPU_README.md rename to plugins/inputs/cpu/README.md diff --git a/plugins/inputs/system/cpu.go b/plugins/inputs/cpu/cpu.go similarity index 95% rename from plugins/inputs/system/cpu.go rename to plugins/inputs/cpu/cpu.go index 99fa451b3..e073309e4 100644 --- a/plugins/inputs/system/cpu.go +++ b/plugins/inputs/cpu/cpu.go @@ -1,4 +1,4 @@ -package system +package cpu import ( "fmt" @@ -6,11 +6,12 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/inputs/system" "github.com/shirou/gopsutil/cpu" ) type CPUStats struct { - ps PS + ps system.PS lastStats map[string]cpu.TimesStat PerCPU bool `toml:"percpu"` @@ -19,7 +20,7 @@ type CPUStats struct { ReportActive bool `toml:"report_active"` } -func NewCPUStats(ps PS) *CPUStats { +func NewCPUStats(ps system.PS) *CPUStats { return &CPUStats{ ps: ps, CollectCPUTime: true, @@ -146,7 +147,7 @@ func init() { return &CPUStats{ PerCPU: true, TotalCPU: true, - ps: newSystemPS(), + ps: system.NewSystemPS(), } }) } diff --git a/plugins/inputs/system/cpu_test.go b/plugins/inputs/cpu/cpu_test.go similarity index 97% rename from plugins/inputs/system/cpu_test.go rename to plugins/inputs/cpu/cpu_test.go index 43825fca7..b4a6f87ff 100644 --- a/plugins/inputs/system/cpu_test.go +++ b/plugins/inputs/cpu/cpu_test.go @@ -1,9 +1,10 @@ -package system +package cpu import ( "fmt" "testing" + "github.com/influxdata/telegraf/plugins/inputs/system" "github.com/influxdata/telegraf/testutil" "github.com/shirou/gopsutil/cpu" "github.com/stretchr/testify/assert" @@ -11,7 +12,7 @@ import ( ) func TestCPUStats(t *testing.T) { - var mps MockPS + var mps system.MockPS defer mps.AssertExpectations(t) var acc testutil.Accumulator @@ -68,7 +69,7 @@ func TestCPUStats(t *testing.T) { assertContainsTaggedFloat(t, &acc, "cpu", "time_guest", 3.1, 0, cputags) assertContainsTaggedFloat(t, &acc, "cpu", "time_guest_nice", 0.324, 0, cputags) - mps2 := MockPS{} + mps2 := system.MockPS{} mps2.On("CPUTimes").Return([]cpu.TimesStat{cts2}, nil) cs.ps = &mps2 @@ -153,8 +154,8 @@ func assertContainsTaggedFloat( // TestCPUCountChange tests that no errors are encountered if the number of // CPUs increases as reported with LXC. func TestCPUCountIncrease(t *testing.T) { - var mps MockPS - var mps2 MockPS + var mps system.MockPS + var mps2 system.MockPS var acc testutil.Accumulator var err error @@ -188,7 +189,7 @@ func TestCPUCountIncrease(t *testing.T) { // TestCPUTimesDecrease tests that telegraf continue to works after // CPU times decrease, which seems to occur when Linux system is suspended. func TestCPUTimesDecrease(t *testing.T) { - var mps MockPS + var mps system.MockPS defer mps.AssertExpectations(t) var acc testutil.Accumulator @@ -230,7 +231,7 @@ func TestCPUTimesDecrease(t *testing.T) { assertContainsTaggedFloat(t, &acc, "cpu", "time_idle", 80, 0, cputags) assertContainsTaggedFloat(t, &acc, "cpu", "time_iowait", 2, 0, cputags) - mps2 := MockPS{} + mps2 := system.MockPS{} mps2.On("CPUTimes").Return([]cpu.TimesStat{cts2}, nil) cs.ps = &mps2 @@ -238,7 +239,7 @@ func TestCPUTimesDecrease(t *testing.T) { err = cs.Gather(&acc) require.Error(t, err) - mps3 := MockPS{} + mps3 := system.MockPS{} mps3.On("CPUTimes").Return([]cpu.TimesStat{cts3}, nil) cs.ps = &mps3 diff --git a/plugins/inputs/system/DISK_README.md b/plugins/inputs/disk/README.md similarity index 100% rename from plugins/inputs/system/DISK_README.md rename to plugins/inputs/disk/README.md diff --git a/plugins/inputs/system/disk.go b/plugins/inputs/disk/disk.go similarity index 95% rename from plugins/inputs/system/disk.go rename to plugins/inputs/disk/disk.go index 172261560..5a30dbecf 100644 --- a/plugins/inputs/system/disk.go +++ b/plugins/inputs/disk/disk.go @@ -1,4 +1,4 @@ -package system +package disk import ( "fmt" @@ -6,10 +6,11 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/inputs/system" ) type DiskStats struct { - ps PS + ps system.PS // Legacy support Mountpoints []string @@ -105,7 +106,7 @@ func parseOptions(opts string) MountOptions { } func init() { - ps := newSystemPS() + ps := system.NewSystemPS() inputs.Add("disk", func() telegraf.Input { return &DiskStats{ps: ps} }) diff --git a/plugins/inputs/system/disk_test.go b/plugins/inputs/disk/disk_test.go similarity index 96% rename from plugins/inputs/system/disk_test.go rename to plugins/inputs/disk/disk_test.go index 938ca1b06..c20df41db 100644 --- a/plugins/inputs/system/disk_test.go +++ b/plugins/inputs/disk/disk_test.go @@ -1,9 +1,10 @@ -package system +package disk import ( "os" "testing" + "github.com/influxdata/telegraf/plugins/inputs/system" "github.com/influxdata/telegraf/testutil" "github.com/shirou/gopsutil/disk" "github.com/stretchr/testify/assert" @@ -17,7 +18,7 @@ type MockFileInfo struct { func TestDiskUsage(t *testing.T) { mck := &mock.Mock{} - mps := MockPSDisk{&systemPS{&mockDiskUsage{mck}}, mck} + mps := system.MockPSDisk{SystemPS: &system.SystemPS{PSDiskDeps: &system.MockDiskUsage{Mock: mck}}, Mock: mck} defer mps.AssertExpectations(t) var acc testutil.Accumulator @@ -229,7 +230,7 @@ func TestDiskUsageHostMountPrefix(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { mck := &mock.Mock{} - mps := MockPSDisk{&systemPS{&mockDiskUsage{mck}}, mck} + mps := system.MockPSDisk{SystemPS: &system.SystemPS{PSDiskDeps: &system.MockDiskUsage{Mock: mck}}, Mock: mck} defer mps.AssertExpectations(t) var acc testutil.Accumulator @@ -252,7 +253,7 @@ func TestDiskUsageHostMountPrefix(t *testing.T) { } func TestDiskStats(t *testing.T) { - var mps MockPS + var mps system.MockPS defer mps.AssertExpectations(t) var acc testutil.Accumulator var err error diff --git a/plugins/inputs/system/DISKIO_README.md b/plugins/inputs/diskio/README.md similarity index 100% rename from plugins/inputs/system/DISKIO_README.md rename to plugins/inputs/diskio/README.md diff --git a/plugins/inputs/system/diskio.go b/plugins/inputs/diskio/diskio.go similarity index 97% rename from plugins/inputs/system/diskio.go rename to plugins/inputs/diskio/diskio.go index 21e70d5eb..54e74d518 100644 --- a/plugins/inputs/system/diskio.go +++ b/plugins/inputs/diskio/diskio.go @@ -1,4 +1,4 @@ -package system +package diskio import ( "fmt" @@ -9,6 +9,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/filter" "github.com/influxdata/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/inputs/system" ) var ( @@ -16,7 +17,7 @@ var ( ) type DiskIO struct { - ps PS + ps system.PS Devices []string DeviceTags []string @@ -189,7 +190,7 @@ func (s *DiskIO) diskTags(devName string) map[string]string { } func init() { - ps := newSystemPS() + ps := system.NewSystemPS() inputs.Add("diskio", func() telegraf.Input { return &DiskIO{ps: ps, SkipSerialNumber: true} }) diff --git a/plugins/inputs/system/diskio_linux.go b/plugins/inputs/diskio/diskio_linux.go similarity index 98% rename from plugins/inputs/system/diskio_linux.go rename to plugins/inputs/diskio/diskio_linux.go index b15f74383..38240a0a1 100644 --- a/plugins/inputs/system/diskio_linux.go +++ b/plugins/inputs/diskio/diskio_linux.go @@ -1,4 +1,4 @@ -package system +package diskio import ( "bufio" diff --git a/plugins/inputs/system/diskio_linux_test.go b/plugins/inputs/diskio/diskio_linux_test.go similarity index 99% rename from plugins/inputs/system/diskio_linux_test.go rename to plugins/inputs/diskio/diskio_linux_test.go index 96aed211b..b18bb67a8 100644 --- a/plugins/inputs/system/diskio_linux_test.go +++ b/plugins/inputs/diskio/diskio_linux_test.go @@ -1,6 +1,6 @@ // +build linux -package system +package diskio import ( "io/ioutil" diff --git a/plugins/inputs/system/diskio_other.go b/plugins/inputs/diskio/diskio_other.go similarity index 90% rename from plugins/inputs/system/diskio_other.go rename to plugins/inputs/diskio/diskio_other.go index 0a3abb686..07fb8c3b8 100644 --- a/plugins/inputs/system/diskio_other.go +++ b/plugins/inputs/diskio/diskio_other.go @@ -1,6 +1,6 @@ // +build !linux -package system +package diskio type diskInfoCache struct{} diff --git a/plugins/inputs/system/diskio_test.go b/plugins/inputs/diskio/diskio_test.go similarity index 96% rename from plugins/inputs/system/diskio_test.go rename to plugins/inputs/diskio/diskio_test.go index d8b908c3e..ac5833165 100644 --- a/plugins/inputs/system/diskio_test.go +++ b/plugins/inputs/diskio/diskio_test.go @@ -1,8 +1,9 @@ -package system +package diskio import ( "testing" + "github.com/influxdata/telegraf/plugins/inputs/system" "github.com/influxdata/telegraf/testutil" "github.com/shirou/gopsutil/disk" "github.com/stretchr/testify/require" @@ -96,7 +97,7 @@ func TestDiskIO(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - var mps MockPS + var mps system.MockPS mps.On("DiskIO").Return(tt.result.stats, tt.result.err) var acc testutil.Accumulator diff --git a/plugins/inputs/system/KERNEL_README.md b/plugins/inputs/kernel/README.md similarity index 100% rename from plugins/inputs/system/KERNEL_README.md rename to plugins/inputs/kernel/README.md diff --git a/plugins/inputs/system/kernel.go b/plugins/inputs/kernel/kernel.go similarity index 99% rename from plugins/inputs/system/kernel.go rename to plugins/inputs/kernel/kernel.go index 1b3bc1dfa..461c9564a 100644 --- a/plugins/inputs/system/kernel.go +++ b/plugins/inputs/kernel/kernel.go @@ -1,6 +1,6 @@ // +build linux -package system +package kernel import ( "bytes" diff --git a/plugins/inputs/system/kernel_notlinux.go b/plugins/inputs/kernel/kernel_notlinux.go similarity index 96% rename from plugins/inputs/system/kernel_notlinux.go rename to plugins/inputs/kernel/kernel_notlinux.go index 9053b5c04..05f6e55c4 100644 --- a/plugins/inputs/system/kernel_notlinux.go +++ b/plugins/inputs/kernel/kernel_notlinux.go @@ -1,6 +1,6 @@ // +build !linux -package system +package kernel import ( "github.com/influxdata/telegraf" diff --git a/plugins/inputs/system/kernel_test.go b/plugins/inputs/kernel/kernel_test.go similarity index 98% rename from plugins/inputs/system/kernel_test.go rename to plugins/inputs/kernel/kernel_test.go index bf090eb88..d356f4380 100644 --- a/plugins/inputs/system/kernel_test.go +++ b/plugins/inputs/kernel/kernel_test.go @@ -1,6 +1,6 @@ // +build linux -package system +package kernel import ( "io/ioutil" @@ -168,7 +168,7 @@ const entropyStatFile_Partial = `1024` const entropyStatFile_Invalid = `` func makeFakeStatFile(content []byte) string { - tmpfile, err := ioutil.TempFile("", "kerneltest") + tmpfile, err := ioutil.TempFile("", "kernel_test") if err != nil { panic(err) } diff --git a/plugins/inputs/system/KERNEL_VMSTAT_README.md b/plugins/inputs/kernel_vmstat/README.md similarity index 100% rename from plugins/inputs/system/KERNEL_VMSTAT_README.md rename to plugins/inputs/kernel_vmstat/README.md diff --git a/plugins/inputs/system/kernel_vmstat.go b/plugins/inputs/kernel_vmstat/kernel_vmstat.go similarity index 98% rename from plugins/inputs/system/kernel_vmstat.go rename to plugins/inputs/kernel_vmstat/kernel_vmstat.go index 197d81185..ffc56d97d 100644 --- a/plugins/inputs/system/kernel_vmstat.go +++ b/plugins/inputs/kernel_vmstat/kernel_vmstat.go @@ -1,6 +1,6 @@ // +build linux -package system +package kernel_vmstat import ( "bytes" diff --git a/plugins/inputs/kernel_vmstat/kernel_vmstat_notlinux.go b/plugins/inputs/kernel_vmstat/kernel_vmstat_notlinux.go new file mode 100644 index 000000000..11a5d2e55 --- /dev/null +++ b/plugins/inputs/kernel_vmstat/kernel_vmstat_notlinux.go @@ -0,0 +1,3 @@ +// +build !linux + +package kernel_vmstat diff --git a/plugins/inputs/system/kernel_vmstat_test.go b/plugins/inputs/kernel_vmstat/kernel_vmstat_test.go similarity index 97% rename from plugins/inputs/system/kernel_vmstat_test.go rename to plugins/inputs/kernel_vmstat/kernel_vmstat_test.go index ed0c03e28..bba615a74 100644 --- a/plugins/inputs/system/kernel_vmstat_test.go +++ b/plugins/inputs/kernel_vmstat/kernel_vmstat_test.go @@ -1,6 +1,6 @@ // +build linux -package system +package kernel_vmstat import ( "io/ioutil" @@ -13,7 +13,7 @@ import ( ) func TestFullVmStatProcFile(t *testing.T) { - tmpfile := makeFakeStatFile([]byte(vmStatFile_Full)) + tmpfile := makeFakeVmStatFile([]byte(vmStatFile_Full)) defer os.Remove(tmpfile) k := KernelVmstat{ @@ -121,7 +121,7 @@ func TestFullVmStatProcFile(t *testing.T) { } func TestPartialVmStatProcFile(t *testing.T) { - tmpfile := makeFakeStatFile([]byte(vmStatFile_Partial)) + tmpfile := makeFakeVmStatFile([]byte(vmStatFile_Partial)) defer os.Remove(tmpfile) k := KernelVmstat{ @@ -151,7 +151,7 @@ func TestPartialVmStatProcFile(t *testing.T) { } func TestInvalidVmStatProcFile1(t *testing.T) { - tmpfile := makeFakeStatFile([]byte(vmStatFile_Invalid)) + tmpfile := makeFakeVmStatFile([]byte(vmStatFile_Invalid)) defer os.Remove(tmpfile) k := KernelVmstat{ @@ -164,7 +164,7 @@ func TestInvalidVmStatProcFile1(t *testing.T) { } func TestNoVmStatProcFile(t *testing.T) { - tmpfile := makeFakeStatFile([]byte(vmStatFile_Invalid)) + tmpfile := makeFakeVmStatFile([]byte(vmStatFile_Invalid)) os.Remove(tmpfile) k := KernelVmstat{ diff --git a/plugins/inputs/system/LINUX_SYSCTL_FS_README.md b/plugins/inputs/linux_sysctl_fs/README.md similarity index 100% rename from plugins/inputs/system/LINUX_SYSCTL_FS_README.md rename to plugins/inputs/linux_sysctl_fs/README.md diff --git a/plugins/inputs/system/linux_sysctl_fs.go b/plugins/inputs/linux_sysctl_fs/linux_sysctl_fs.go similarity index 98% rename from plugins/inputs/system/linux_sysctl_fs.go rename to plugins/inputs/linux_sysctl_fs/linux_sysctl_fs.go index 55ebcb668..ed2496340 100644 --- a/plugins/inputs/system/linux_sysctl_fs.go +++ b/plugins/inputs/linux_sysctl_fs/linux_sysctl_fs.go @@ -1,4 +1,4 @@ -package system +package linux_sysctl_fs import ( "bytes" diff --git a/plugins/inputs/system/linux_sysctl_fs_test.go b/plugins/inputs/linux_sysctl_fs/linux_sysctl_fs_test.go similarity index 98% rename from plugins/inputs/system/linux_sysctl_fs_test.go rename to plugins/inputs/linux_sysctl_fs/linux_sysctl_fs_test.go index 6561465cb..78011e288 100644 --- a/plugins/inputs/system/linux_sysctl_fs_test.go +++ b/plugins/inputs/linux_sysctl_fs/linux_sysctl_fs_test.go @@ -1,4 +1,4 @@ -package system +package linux_sysctl_fs import ( "io/ioutil" diff --git a/plugins/inputs/system/MEM_README.md b/plugins/inputs/mem/README.md similarity index 100% rename from plugins/inputs/system/MEM_README.md rename to plugins/inputs/mem/README.md diff --git a/plugins/inputs/system/memory.go b/plugins/inputs/mem/memory.go similarity index 90% rename from plugins/inputs/system/memory.go rename to plugins/inputs/mem/memory.go index b44fabc49..f664dd3f4 100644 --- a/plugins/inputs/system/memory.go +++ b/plugins/inputs/mem/memory.go @@ -1,14 +1,15 @@ -package system +package mem import ( "fmt" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/inputs/system" ) type MemStats struct { - ps PS + ps system.PS } func (_ *MemStats) Description() string { @@ -43,7 +44,7 @@ func (s *MemStats) Gather(acc telegraf.Accumulator) error { } func init() { - ps := newSystemPS() + ps := system.NewSystemPS() inputs.Add("mem", func() telegraf.Input { return &MemStats{ps: ps} }) diff --git a/plugins/inputs/system/memory_test.go b/plugins/inputs/mem/memory_test.go similarity index 93% rename from plugins/inputs/system/memory_test.go rename to plugins/inputs/mem/memory_test.go index 34914db9c..ef9af8d22 100644 --- a/plugins/inputs/system/memory_test.go +++ b/plugins/inputs/mem/memory_test.go @@ -1,15 +1,16 @@ -package system +package mem import ( "testing" + "github.com/influxdata/telegraf/plugins/inputs/system" "github.com/influxdata/telegraf/testutil" "github.com/shirou/gopsutil/mem" "github.com/stretchr/testify/require" ) func TestMemStats(t *testing.T) { - var mps MockPS + var mps system.MockPS var err error defer mps.AssertExpectations(t) var acc testutil.Accumulator diff --git a/plugins/inputs/system/NETSTAT_README.md b/plugins/inputs/net/NETSTAT_README.md similarity index 67% rename from plugins/inputs/system/NETSTAT_README.md rename to plugins/inputs/net/NETSTAT_README.md index 636a7e3af..d0f39f5e4 100644 --- a/plugins/inputs/system/NETSTAT_README.md +++ b/plugins/inputs/net/NETSTAT_README.md @@ -1,10 +1,18 @@ -Telegraf plugin: NETSTAT +# Netstat Input Plugin -#### Description +This plugin collects TCP connections state and UDP socket counts by using `lsof`. -The NETSTAT plugin collects TCP connections state and UDP socket counts by using `lsof`. +### Configuration: -Supported TCP Connection states are follows. +``` toml +# Collect TCP connections state and UDP socket counts +[[inputs.netstat]] + # no configuration +``` + +# Measurements: + +Supported TCP Connection states are follows. - established - syn_sent @@ -19,8 +27,6 @@ Supported TCP Connection states are follows. - closing - none - -# Measurements: ### TCP Connection State measurements: Meta: @@ -49,4 +55,3 @@ Meta: Measurement names: - udp_socket - diff --git a/plugins/inputs/system/NET_README.md b/plugins/inputs/net/NET_README.md similarity index 100% rename from plugins/inputs/system/NET_README.md rename to plugins/inputs/net/NET_README.md diff --git a/plugins/inputs/system/net.go b/plugins/inputs/net/net.go similarity index 95% rename from plugins/inputs/system/net.go rename to plugins/inputs/net/net.go index a7ba5c63d..35d4a2448 100644 --- a/plugins/inputs/system/net.go +++ b/plugins/inputs/net/net.go @@ -1,4 +1,4 @@ -package system +package net import ( "fmt" @@ -8,11 +8,12 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/filter" "github.com/influxdata/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/inputs/system" ) type NetIOStats struct { filter filter.Filter - ps PS + ps system.PS skipChecks bool IgnoreProtocolStats bool @@ -119,6 +120,6 @@ func (s *NetIOStats) Gather(acc telegraf.Accumulator) error { func init() { inputs.Add("net", func() telegraf.Input { - return &NetIOStats{ps: newSystemPS()} + return &NetIOStats{ps: system.NewSystemPS()} }) } diff --git a/plugins/inputs/system/net_test.go b/plugins/inputs/net/net_test.go similarity index 96% rename from plugins/inputs/system/net_test.go rename to plugins/inputs/net/net_test.go index 83b9bd460..035dbaecd 100644 --- a/plugins/inputs/system/net_test.go +++ b/plugins/inputs/net/net_test.go @@ -1,16 +1,17 @@ -package system +package net import ( "syscall" "testing" + "github.com/influxdata/telegraf/plugins/inputs/system" "github.com/influxdata/telegraf/testutil" "github.com/shirou/gopsutil/net" "github.com/stretchr/testify/require" ) func TestNetStats(t *testing.T) { - var mps MockPS + var mps system.MockPS var err error defer mps.AssertExpectations(t) var acc testutil.Accumulator diff --git a/plugins/inputs/system/netstat.go b/plugins/inputs/net/netstat.go similarity index 92% rename from plugins/inputs/system/netstat.go rename to plugins/inputs/net/netstat.go index 1699e0808..555b396af 100644 --- a/plugins/inputs/system/netstat.go +++ b/plugins/inputs/net/netstat.go @@ -1,4 +1,4 @@ -package system +package net import ( "fmt" @@ -6,10 +6,11 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/inputs/system" ) type NetStats struct { - ps PS + ps system.PS } func (_ *NetStats) Description() string { @@ -66,6 +67,6 @@ func (s *NetStats) Gather(acc telegraf.Accumulator) error { func init() { inputs.Add("netstat", func() telegraf.Input { - return &NetStats{ps: newSystemPS()} + return &NetStats{ps: system.NewSystemPS()} }) } diff --git a/plugins/inputs/system/PROCESSES_README.md b/plugins/inputs/processes/README.md similarity index 100% rename from plugins/inputs/system/PROCESSES_README.md rename to plugins/inputs/processes/README.md diff --git a/plugins/inputs/system/processes.go b/plugins/inputs/processes/processes.go similarity index 97% rename from plugins/inputs/system/processes.go rename to plugins/inputs/processes/processes.go index 9258bc417..c71d72f50 100644 --- a/plugins/inputs/system/processes.go +++ b/plugins/inputs/processes/processes.go @@ -1,6 +1,6 @@ // +build !windows -package system +package processes import ( "bytes" @@ -16,6 +16,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/inputs/linux_sysctl_fs" ) type Processes struct { @@ -133,7 +134,7 @@ func (p *Processes) gatherFromPS(fields map[string]interface{}) error { // get process states from /proc/(pid)/stat files func (p *Processes) gatherFromProc(fields map[string]interface{}) error { - filenames, err := filepath.Glob(GetHostProc() + "/[0-9]*/stat") + filenames, err := filepath.Glob(linux_sysctl_fs.GetHostProc() + "/[0-9]*/stat") if err != nil { return err diff --git a/plugins/inputs/system/processes_test.go b/plugins/inputs/processes/processes_test.go similarity index 98% rename from plugins/inputs/system/processes_test.go rename to plugins/inputs/processes/processes_test.go index 5401e1a70..27fdf76a1 100644 --- a/plugins/inputs/system/processes_test.go +++ b/plugins/inputs/processes/processes_test.go @@ -1,4 +1,6 @@ -package system +// +build !windows + +package processes import ( "fmt" diff --git a/plugins/inputs/processes/processes_windows.go b/plugins/inputs/processes/processes_windows.go new file mode 100644 index 000000000..32c73f918 --- /dev/null +++ b/plugins/inputs/processes/processes_windows.go @@ -0,0 +1,3 @@ +// +build windows + +package processes diff --git a/plugins/inputs/system/SWAP_README.md b/plugins/inputs/swap/README.md similarity index 100% rename from plugins/inputs/system/SWAP_README.md rename to plugins/inputs/swap/README.md diff --git a/plugins/inputs/system/swap.go b/plugins/inputs/swap/swap.go similarity index 88% rename from plugins/inputs/system/swap.go rename to plugins/inputs/swap/swap.go index f1f7c8e23..eabb40a03 100644 --- a/plugins/inputs/system/swap.go +++ b/plugins/inputs/swap/swap.go @@ -1,14 +1,15 @@ -package system +package swap import ( "fmt" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/inputs/system" ) type SwapStats struct { - ps PS + ps system.PS } func (_ *SwapStats) Description() string { @@ -40,7 +41,7 @@ func (s *SwapStats) Gather(acc telegraf.Accumulator) error { } func init() { - ps := newSystemPS() + ps := system.NewSystemPS() inputs.Add("swap", func() telegraf.Input { return &SwapStats{ps: ps} }) diff --git a/plugins/inputs/system/swap_test.go b/plugins/inputs/swap/swap_test.go similarity index 89% rename from plugins/inputs/system/swap_test.go rename to plugins/inputs/swap/swap_test.go index ec9a0fe54..3f97b354e 100644 --- a/plugins/inputs/system/swap_test.go +++ b/plugins/inputs/swap/swap_test.go @@ -1,15 +1,16 @@ -package system +package swap import ( "testing" + "github.com/influxdata/telegraf/plugins/inputs/system" "github.com/influxdata/telegraf/testutil" "github.com/shirou/gopsutil/mem" "github.com/stretchr/testify/require" ) func TestSwapStats(t *testing.T) { - var mps MockPS + var mps system.MockPS var err error defer mps.AssertExpectations(t) var acc testutil.Accumulator diff --git a/plugins/inputs/system/SYSTEM_README.md b/plugins/inputs/system/README.md similarity index 100% rename from plugins/inputs/system/SYSTEM_README.md rename to plugins/inputs/system/README.md diff --git a/plugins/inputs/system/mock_PS.go b/plugins/inputs/system/mock_PS.go index d5093f031..323332f3e 100644 --- a/plugins/inputs/system/mock_PS.go +++ b/plugins/inputs/system/mock_PS.go @@ -19,11 +19,11 @@ type MockPS struct { } type MockPSDisk struct { - *systemPS + *SystemPS *mock.Mock } -type mockDiskUsage struct { +type MockDiskUsage struct { *mock.Mock } @@ -109,7 +109,7 @@ func (m *MockPS) NetConnections() ([]net.ConnectionStat, error) { return r0, r1 } -func (m *mockDiskUsage) Partitions(all bool) ([]disk.PartitionStat, error) { +func (m *MockDiskUsage) Partitions(all bool) ([]disk.PartitionStat, error) { ret := m.Called(all) r0 := ret.Get(0).([]disk.PartitionStat) @@ -118,12 +118,12 @@ func (m *mockDiskUsage) Partitions(all bool) ([]disk.PartitionStat, error) { return r0, r1 } -func (m *mockDiskUsage) OSGetenv(key string) string { +func (m *MockDiskUsage) OSGetenv(key string) string { ret := m.Called(key) return ret.Get(0).(string) } -func (m *mockDiskUsage) OSStat(name string) (os.FileInfo, error) { +func (m *MockDiskUsage) OSStat(name string) (os.FileInfo, error) { ret := m.Called(name) r0 := ret.Get(0).(os.FileInfo) @@ -132,7 +132,7 @@ func (m *mockDiskUsage) OSStat(name string) (os.FileInfo, error) { return r0, r1 } -func (m *mockDiskUsage) PSDiskUsage(path string) (*disk.UsageStat, error) { +func (m *MockDiskUsage) PSDiskUsage(path string) (*disk.UsageStat, error) { ret := m.Called(path) r0 := ret.Get(0).(*disk.UsageStat) diff --git a/plugins/inputs/system/ps.go b/plugins/inputs/system/ps.go index 81161ae68..038e2a0a8 100644 --- a/plugins/inputs/system/ps.go +++ b/plugins/inputs/system/ps.go @@ -39,17 +39,17 @@ func add(acc telegraf.Accumulator, } } -func newSystemPS() *systemPS { - return &systemPS{&systemPSDisk{}} +func NewSystemPS() *SystemPS { + return &SystemPS{&SystemPSDisk{}} } -type systemPS struct { +type SystemPS struct { PSDiskDeps } -type systemPSDisk struct{} +type SystemPSDisk struct{} -func (s *systemPS) CPUTimes(perCPU, totalCPU bool) ([]cpu.TimesStat, error) { +func (s *SystemPS) CPUTimes(perCPU, totalCPU bool) ([]cpu.TimesStat, error) { var cpuTimes []cpu.TimesStat if perCPU { if perCPUTimes, err := cpu.Times(true); err == nil { @@ -68,7 +68,7 @@ func (s *systemPS) CPUTimes(perCPU, totalCPU bool) ([]cpu.TimesStat, error) { return cpuTimes, nil } -func (s *systemPS) DiskUsage( +func (s *SystemPS) DiskUsage( mountPointFilter []string, fstypeExclude []string, ) ([]*disk.UsageStat, []*disk.PartitionStat, error) { @@ -139,19 +139,19 @@ func (s *systemPS) DiskUsage( return usage, partitions, nil } -func (s *systemPS) NetProto() ([]net.ProtoCountersStat, error) { +func (s *SystemPS) NetProto() ([]net.ProtoCountersStat, error) { return net.ProtoCounters(nil) } -func (s *systemPS) NetIO() ([]net.IOCountersStat, error) { +func (s *SystemPS) NetIO() ([]net.IOCountersStat, error) { return net.IOCounters(true) } -func (s *systemPS) NetConnections() ([]net.ConnectionStat, error) { +func (s *SystemPS) NetConnections() ([]net.ConnectionStat, error) { return net.Connections("all") } -func (s *systemPS) DiskIO(names []string) (map[string]disk.IOCountersStat, error) { +func (s *SystemPS) DiskIO(names []string) (map[string]disk.IOCountersStat, error) { m, err := disk.IOCounters(names...) if err == internal.NotImplementedError { return nil, nil @@ -160,26 +160,26 @@ func (s *systemPS) DiskIO(names []string) (map[string]disk.IOCountersStat, error return m, err } -func (s *systemPS) VMStat() (*mem.VirtualMemoryStat, error) { +func (s *SystemPS) VMStat() (*mem.VirtualMemoryStat, error) { return mem.VirtualMemory() } -func (s *systemPS) SwapStat() (*mem.SwapMemoryStat, error) { +func (s *SystemPS) SwapStat() (*mem.SwapMemoryStat, error) { return mem.SwapMemory() } -func (s *systemPSDisk) Partitions(all bool) ([]disk.PartitionStat, error) { +func (s *SystemPSDisk) Partitions(all bool) ([]disk.PartitionStat, error) { return disk.Partitions(all) } -func (s *systemPSDisk) OSGetenv(key string) string { +func (s *SystemPSDisk) OSGetenv(key string) string { return os.Getenv(key) } -func (s *systemPSDisk) OSStat(name string) (os.FileInfo, error) { +func (s *SystemPSDisk) OSStat(name string) (os.FileInfo, error) { return os.Stat(name) } -func (s *systemPSDisk) PSDiskUsage(path string) (*disk.UsageStat, error) { +func (s *SystemPSDisk) PSDiskUsage(path string) (*disk.UsageStat, error) { return disk.Usage(path) } From 9ebf16636d42a5bd26888eb284763e70c8daefaa Mon Sep 17 00:00:00 2001 From: maxunt Date: Wed, 11 Jul 2018 17:29:23 -0700 Subject: [PATCH 0005/1815] Add parse_multivalue to collectd parser (#4403) --- docs/DATA_FORMATS_INPUT.md | 6 ++ internal/config/config.go | 9 ++ plugins/parsers/collectd/parser.go | 109 +++++++++++++++++------- plugins/parsers/collectd/parser_test.go | 20 ++++- plugins/parsers/registry.go | 8 +- 5 files changed, 117 insertions(+), 35 deletions(-) diff --git a/docs/DATA_FORMATS_INPUT.md b/docs/DATA_FORMATS_INPUT.md index c1192e72b..88282c846 100644 --- a/docs/DATA_FORMATS_INPUT.md +++ b/docs/DATA_FORMATS_INPUT.md @@ -479,6 +479,12 @@ You can also change the path to the typesdb or add additional typesdb using collectd_security_level = "encrypt" ## Path of to TypesDB specifications collectd_typesdb = ["/usr/share/collectd/types.db"] + + # Multi-value plugins can be handled two ways. + # "split" will parse and store the multi-value plugin data into separate measurements + # "join" will parse and store the multi-value plugin as a single multi-value measurement. + # "split" is the default behavior for backward compatability with previous versions of influxdb. + collectd_parse_multivalue = "split" ``` # Dropwizard: diff --git a/internal/config/config.go b/internal/config/config.go index 8a31c271e..5b3e53457 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -1285,6 +1285,14 @@ func buildParser(name string, tbl *ast.Table) (parsers.Parser, error) { } } + if node, ok := tbl.Fields["collectd_parse_multivalue"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if str, ok := kv.Value.(*ast.String); ok { + c.CollectdSplit = str.Value + } + } + } + if node, ok := tbl.Fields["collectd_typesdb"]; ok { if kv, ok := node.(*ast.KeyValue); ok { if ary, ok := kv.Value.(*ast.Array); ok { @@ -1348,6 +1356,7 @@ func buildParser(name string, tbl *ast.Table) (parsers.Parser, error) { delete(tbl.Fields, "collectd_auth_file") delete(tbl.Fields, "collectd_security_level") delete(tbl.Fields, "collectd_typesdb") + delete(tbl.Fields, "collectd_parse_multivalue") delete(tbl.Fields, "dropwizard_metric_registry_path") delete(tbl.Fields, "dropwizard_time_path") delete(tbl.Fields, "dropwizard_time_format") diff --git a/plugins/parsers/collectd/parser.go b/plugins/parsers/collectd/parser.go index 20525610c..6b7fbd756 100644 --- a/plugins/parsers/collectd/parser.go +++ b/plugins/parsers/collectd/parser.go @@ -21,7 +21,10 @@ type CollectdParser struct { // DefaultTags will be added to every parsed metric DefaultTags map[string]string - popts network.ParseOpts + //whether or not to split multi value metric into multiple metrics + //default value is split + ParseMultiValue string + popts network.ParseOpts } func (p *CollectdParser) SetParseOpts(popts *network.ParseOpts) { @@ -32,6 +35,7 @@ func NewCollectdParser( authFile string, securityLevel string, typesDB []string, + split string, ) (*CollectdParser, error) { popts := network.ParseOpts{} @@ -64,7 +68,8 @@ func NewCollectdParser( } } - parser := CollectdParser{popts: popts} + parser := CollectdParser{popts: popts, + ParseMultiValue: split} return &parser, nil } @@ -76,7 +81,7 @@ func (p *CollectdParser) Parse(buf []byte) ([]telegraf.Metric, error) { metrics := []telegraf.Metric{} for _, valueList := range valueLists { - metrics = append(metrics, UnmarshalValueList(valueList)...) + metrics = append(metrics, UnmarshalValueList(valueList, p.ParseMultiValue)...) } if len(p.DefaultTags) > 0 { @@ -111,47 +116,91 @@ func (p *CollectdParser) SetDefaultTags(tags map[string]string) { } // UnmarshalValueList translates a ValueList into a Telegraf metric. -func UnmarshalValueList(vl *api.ValueList) []telegraf.Metric { +func UnmarshalValueList(vl *api.ValueList, multiValue string) []telegraf.Metric { timestamp := vl.Time.UTC() var metrics []telegraf.Metric - for i := range vl.Values { - var name string - name = fmt.Sprintf("%s_%s", vl.Identifier.Plugin, vl.DSName(i)) + + //set multiValue to default "split" if nothing is specified + if multiValue == "" { + multiValue = "split" + } + switch multiValue { + case "split": + for i := range vl.Values { + var name string + name = fmt.Sprintf("%s_%s", vl.Identifier.Plugin, vl.DSName(i)) + tags := make(map[string]string) + fields := make(map[string]interface{}) + + // Convert interface back to actual type, then to float64 + switch value := vl.Values[i].(type) { + case api.Gauge: + fields["value"] = float64(value) + case api.Derive: + fields["value"] = float64(value) + case api.Counter: + fields["value"] = float64(value) + } + + if vl.Identifier.Host != "" { + tags["host"] = vl.Identifier.Host + } + if vl.Identifier.PluginInstance != "" { + tags["instance"] = vl.Identifier.PluginInstance + } + if vl.Identifier.Type != "" { + tags["type"] = vl.Identifier.Type + } + if vl.Identifier.TypeInstance != "" { + tags["type_instance"] = vl.Identifier.TypeInstance + } + + // Drop invalid points + m, err := metric.New(name, tags, fields, timestamp) + if err != nil { + log.Printf("E! Dropping metric %v: %v", name, err) + continue + } + + metrics = append(metrics, m) + } + case "join": + name := vl.Identifier.Plugin tags := make(map[string]string) fields := make(map[string]interface{}) + for i := range vl.Values { + switch value := vl.Values[i].(type) { + case api.Gauge: + fields[vl.DSName(i)] = float64(value) + case api.Derive: + fields[vl.DSName(i)] = float64(value) + case api.Counter: + fields[vl.DSName(i)] = float64(value) + } - // Convert interface back to actual type, then to float64 - switch value := vl.Values[i].(type) { - case api.Gauge: - fields["value"] = float64(value) - case api.Derive: - fields["value"] = float64(value) - case api.Counter: - fields["value"] = float64(value) + if vl.Identifier.Host != "" { + tags["host"] = vl.Identifier.Host + } + if vl.Identifier.PluginInstance != "" { + tags["instance"] = vl.Identifier.PluginInstance + } + if vl.Identifier.Type != "" { + tags["type"] = vl.Identifier.Type + } + if vl.Identifier.TypeInstance != "" { + tags["type_instance"] = vl.Identifier.TypeInstance + } } - if vl.Identifier.Host != "" { - tags["host"] = vl.Identifier.Host - } - if vl.Identifier.PluginInstance != "" { - tags["instance"] = vl.Identifier.PluginInstance - } - if vl.Identifier.Type != "" { - tags["type"] = vl.Identifier.Type - } - if vl.Identifier.TypeInstance != "" { - tags["type_instance"] = vl.Identifier.TypeInstance - } - - // Drop invalid points m, err := metric.New(name, tags, fields, timestamp) if err != nil { log.Printf("E! Dropping metric %v: %v", name, err) - continue } metrics = append(metrics, m) + default: + log.Printf("parse-multi-value config can only be 'split' or 'join'") } return metrics } diff --git a/plugins/parsers/collectd/parser_test.go b/plugins/parsers/collectd/parser_test.go index 3aad04013..afd58ec72 100644 --- a/plugins/parsers/collectd/parser_test.go +++ b/plugins/parsers/collectd/parser_test.go @@ -6,6 +6,7 @@ import ( "collectd.org/api" "collectd.org/network" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/influxdata/telegraf" @@ -76,7 +77,7 @@ var multiMetric = testCase{ api.Derive(42), api.Gauge(42), }, - DSNames: []string(nil), + DSNames: []string{"t1", "t2"}, }, }, []metricData{ @@ -108,7 +109,7 @@ var multiMetric = testCase{ } func TestNewCollectdParser(t *testing.T) { - parser, err := NewCollectdParser("", "", []string{}) + parser, err := NewCollectdParser("", "", []string{}, "join") require.Nil(t, err) require.Equal(t, parser.popts.SecurityLevel, network.None) require.NotNil(t, parser.popts.PasswordLookup) @@ -133,6 +134,19 @@ func TestParse(t *testing.T) { } } +func TestParseMultiValueSplit(t *testing.T) { + buf, err := writeValueList(multiMetric.vl) + require.Nil(t, err) + bytes, err := buf.Bytes() + require.Nil(t, err) + + parser := &CollectdParser{ParseMultiValue: "split"} + metrics, err := parser.Parse(bytes) + require.Nil(t, err) + + assert.Equal(t, 2, len(metrics)) +} + func TestParse_DefaultTags(t *testing.T) { buf, err := writeValueList(singleMetric.vl) require.Nil(t, err) @@ -266,7 +280,7 @@ func TestParseLine(t *testing.T) { bytes, err := buf.Bytes() require.Nil(t, err) - parser, err := NewCollectdParser("", "", []string{}) + parser, err := NewCollectdParser("", "", []string{}, "split") require.Nil(t, err) metric, err := parser.ParseLine(string(bytes)) require.Nil(t, err) diff --git a/plugins/parsers/registry.go b/plugins/parsers/registry.go index 58fce1722..ac6bbbda8 100644 --- a/plugins/parsers/registry.go +++ b/plugins/parsers/registry.go @@ -66,6 +66,9 @@ type Config struct { // Dataset specification for collectd CollectdTypesDB []string + // whether to split or join multivalue metrics + CollectdSplit string + // DataType only applies to value, this will be the type to parse value to DataType string @@ -109,7 +112,7 @@ func NewParser(config *Config) (Parser, error) { config.Templates, config.DefaultTags) case "collectd": parser, err = NewCollectdParser(config.CollectdAuthFile, - config.CollectdSecurityLevel, config.CollectdTypesDB) + config.CollectdSecurityLevel, config.CollectdTypesDB, config.CollectdSplit) case "dropwizard": parser, err = NewDropwizardParser( config.DropwizardMetricRegistryPath, @@ -172,8 +175,9 @@ func NewCollectdParser( authFile string, securityLevel string, typesDB []string, + split string, ) (Parser, error) { - return collectd.NewCollectdParser(authFile, securityLevel, typesDB) + return collectd.NewCollectdParser(authFile, securityLevel, typesDB, split) } func NewDropwizardParser( From fb7c1d775b7e530d03f33bf1ae7fbb1ea731918c Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 11 Jul 2018 17:31:11 -0700 Subject: [PATCH 0006/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index bd0c262ab..58626e093 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -34,6 +34,7 @@ - [#4362](https://github.com/influxdata/telegraf/pull/4362): Add mongo document and connection metrics. - [#3772](https://github.com/influxdata/telegraf/pull/3772): Add Enum Processor. - [#4386](https://github.com/influxdata/telegraf/pull/4386): Add user tag to procstat input. +- [#4403](https://github.com/influxdata/telegraf/pull/4403): Add support for multivalue metrics to collectd parser. ## v1.7.2 [unreleased] From 9e77bfc3edcedc67bbab7b66ed978ba9d085db08 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 11 Jul 2018 17:33:27 -0700 Subject: [PATCH 0007/1815] Fix potential deadlock by not calling AddMetric concurrently (#4404) --- agent/agent.go | 100 +++++++++++++++++++++++++------------------------ 1 file changed, 52 insertions(+), 48 deletions(-) diff --git a/agent/agent.go b/agent/agent.go index 6eb9505e2..6f7b540f2 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -241,14 +241,12 @@ func (a *Agent) flush() { } // flusher monitors the metrics input channel and flushes on the minimum interval -func (a *Agent) flusher(shutdown chan struct{}, metricC chan telegraf.Metric, aggC chan telegraf.Metric) error { - // Inelegant, but this sleep is to allow the Gather threads to run, so that - // the flusher will flush after metrics are collected. - time.Sleep(time.Millisecond * 300) - - // create an output metric channel and a gorouting that continuously passes - // each metric onto the output plugins & aggregators. - outMetricC := make(chan telegraf.Metric, 100) +func (a *Agent) flusher( + shutdown chan struct{}, + metricC chan telegraf.Metric, + aggMetricC chan telegraf.Metric, + outMetricC chan telegraf.Metric, +) error { var wg sync.WaitGroup wg.Add(1) go func() { @@ -257,56 +255,67 @@ func (a *Agent) flusher(shutdown chan struct{}, metricC chan telegraf.Metric, ag select { case <-shutdown: if len(outMetricC) > 0 { - // keep going until outMetricC is flushed + // keep going until channel is empty continue } return - case m := <-outMetricC: - // if dropOriginal is set to true, then we will only send this - // metric to the aggregators, not the outputs. - var dropOriginal bool - for _, agg := range a.Config.Aggregators { - if ok := agg.Add(m.Copy()); ok { - dropOriginal = true - } - } - if !dropOriginal { - for i, o := range a.Config.Outputs { - if i == len(a.Config.Outputs)-1 { - o.AddMetric(m) - } else { - o.AddMetric(m.Copy()) - } + case metric := <-outMetricC: + for i, o := range a.Config.Outputs { + if i == len(a.Config.Outputs)-1 { + o.AddMetric(metric) + } else { + o.AddMetric(metric.Copy()) } } } } }() + wg.Add(1) + go func() { + defer wg.Done() + for metric := range aggMetricC { + // Apply Processors + metrics := []telegraf.Metric{metric} + for _, processor := range a.Config.Processors { + metrics = processor.Apply(metrics...) + } + outMetricC <- metric + } + }() + wg.Add(1) go func() { defer wg.Done() for { select { case <-shutdown: - if len(aggC) > 0 { - // keep going until aggC is flushed + if len(metricC) > 0 { + // keep going until channel is empty continue } + close(aggMetricC) return - case metric := <-aggC: + case metric := <-metricC: + // Apply Processors metrics := []telegraf.Metric{metric} for _, processor := range a.Config.Processors { metrics = processor.Apply(metrics...) } - for _, m := range metrics { - for i, o := range a.Config.Outputs { - if i == len(a.Config.Outputs)-1 { - o.AddMetric(m) - } else { - o.AddMetric(m.Copy()) + + for _, metric := range metrics { + // Apply Aggregators + var dropOriginal bool + for _, agg := range a.Config.Aggregators { + if ok := agg.Add(metric.Copy()); ok { + dropOriginal = true } } + + // Forward metric to Outputs + if !dropOriginal { + outMetricC <- metric + } } } } @@ -335,16 +344,6 @@ func (a *Agent) flusher(shutdown chan struct{}, metricC chan telegraf.Metric, ag " already a flush ongoing.") } }() - case metric := <-metricC: - // NOTE potential bottleneck here as we put each metric through the - // processors serially. - mS := []telegraf.Metric{metric} - for _, processor := range a.Config.Processors { - mS = processor.Apply(mS...) - } - for _, m := range mS { - outMetricC <- m - } } } } @@ -358,9 +357,14 @@ func (a *Agent) Run(shutdown chan struct{}) error { a.Config.Agent.Interval.Duration, a.Config.Agent.Quiet, a.Config.Agent.Hostname, a.Config.Agent.FlushInterval.Duration) - // channel shared between all input threads for accumulating metrics + // Channel shared between all input threads for accumulating metrics metricC := make(chan telegraf.Metric, 100) - aggC := make(chan telegraf.Metric, 100) + + // Channel for metrics ready to be output + outMetricC := make(chan telegraf.Metric, 100) + + // Channel for aggregated metrics + aggMetricC := make(chan telegraf.Metric, 100) // Round collection to nearest interval by sleeping if a.Config.Agent.RoundInterval { @@ -371,7 +375,7 @@ func (a *Agent) Run(shutdown chan struct{}) error { wg.Add(1) go func() { defer wg.Done() - if err := a.flusher(shutdown, metricC, aggC); err != nil { + if err := a.flusher(shutdown, metricC, aggMetricC, outMetricC); err != nil { log.Printf("E! Flusher routine failed, exiting: %s\n", err.Error()) close(shutdown) } @@ -381,7 +385,7 @@ func (a *Agent) Run(shutdown chan struct{}) error { for _, aggregator := range a.Config.Aggregators { go func(agg *models.RunningAggregator) { defer wg.Done() - acc := NewAccumulator(agg, aggC) + acc := NewAccumulator(agg, aggMetricC) acc.SetPrecision(a.Config.Agent.Precision.Duration, a.Config.Agent.Interval.Duration) agg.Run(acc, shutdown) From 5150d565d7d6bb268a49560825851cb433a65f10 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 11 Jul 2018 22:57:46 -0700 Subject: [PATCH 0008/1815] Fix several build issues (#4412) --- CONTRIBUTING.md | 2 +- Makefile | 17 ++++++++++++++--- 2 files changed, 15 insertions(+), 4 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 9a89e3cbf..66aa92cc3 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -31,7 +31,7 @@ which can be found [on our website](http://influxdb.com/community/cla.html) Assuming you can already build the project, run these in the telegraf directory: 1. `go get -u github.com/golang/dep/cmd/dep` -2. `dep ensure` +2. `dep ensure -vendor-only` 3. `dep ensure -add github.com/[dependency]/[new-package]` ## Input Plugins diff --git a/Makefile b/Makefile index 2f3fcecea..b4f89c799 100644 --- a/Makefile +++ b/Makefile @@ -1,5 +1,14 @@ +ifeq ($(SHELL), cmd) + VERSION := $(shell git describe --exact-match --tags 2>nil) + HOME := $(HOMEPATH) +else ifeq ($(SHELL), sh.exe) + VERSION := $(shell git describe --exact-match --tags 2>nil) + HOME := $(HOMEPATH) +else + VERSION := $(shell git describe --exact-match --tags 2>/dev/null) +endif + PREFIX := /usr/local -VERSION := $(shell git describe --exact-match --tags 2>/dev/null) BRANCH := $(shell git rev-parse --abbrev-ref HEAD) COMMIT := $(shell git rev-parse --short HEAD) GOFILES ?= $(shell git ls-files '*.go') @@ -8,8 +17,10 @@ BUILDFLAGS ?= ifdef GOBIN PATH := $(GOBIN):$(PATH) -else +else ifdef GOPATH PATH := $(subst :,/bin:,$(GOPATH))/bin:$(PATH) +else +PATH := $(HOME)/go/bin:$(PATH) endif LDFLAGS := $(LDFLAGS) -X main.commit=$(COMMIT) -X main.branch=$(BRANCH) @@ -24,7 +35,7 @@ all: deps: go get -u github.com/golang/lint/golint go get -u github.com/golang/dep/cmd/dep - dep ensure + dep ensure -vendor-only telegraf: go build -ldflags "$(LDFLAGS)" ./cmd/telegraf From 0812ffdace46c00a5d370b68001f86b9164f9460 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 12 Jul 2018 12:25:04 -0700 Subject: [PATCH 0009/1815] Update links to system plugins --- README.md | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/README.md b/README.md index 679e2847f..414ab2da7 100644 --- a/README.md +++ b/README.md @@ -140,7 +140,10 @@ configuration options. * [conntrack](./plugins/inputs/conntrack) * [couchbase](./plugins/inputs/couchbase) * [couchdb](./plugins/inputs/couchdb) +* [cpu](./plugins/inputs/cpu) * [DC/OS](./plugins/inputs/dcos) +* [diskio](./plugins/inputs/diskio) +* [disk](./plugins/inputs/disk) * [disque](./plugins/inputs/disque) * [dmcache](./plugins/inputs/dmcache) * [dns query time](./plugins/inputs/dns_query) @@ -168,18 +171,24 @@ configuration options. * [jolokia2](./plugins/inputs/jolokia2) (java, cassandra, kafka) - [jti_openconfig_telemetry](./plugins/inputs/jti_openconfig_telemetry) * [kapacitor](./plugins/inputs/kapacitor) +* [kernel](./plugins/inputs/kernel) +* [kernel_vmstat](./plugins/inputs/kernel_vmstat) * [kubernetes](./plugins/inputs/kubernetes) * [leofs](./plugins/inputs/leofs) +* [linux_sysctl_fs](./plugins/inputs/linux_sysctl_fs) * [lustre2](./plugins/inputs/lustre2) * [mailchimp](./plugins/inputs/mailchimp) * [mcrouter](./plugins/inputs/mcrouter) * [memcached](./plugins/inputs/memcached) +* [mem](./plugins/inputs/mem) * [mesos](./plugins/inputs/mesos) * [minecraft](./plugins/inputs/minecraft) * [mongodb](./plugins/inputs/mongodb) * [mysql](./plugins/inputs/mysql) * [nats](./plugins/inputs/nats) +* [net](./plugins/inputs/net) * [net_response](./plugins/inputs/net_response) +* [netstat](./plugins/inputs/netstat) * [nginx](./plugins/inputs/nginx) * [nginx_plus](./plugins/inputs/nginx_plus) * [nsq](./plugins/inputs/nsq) @@ -196,6 +205,7 @@ configuration options. * [postgresql_extensible](./plugins/inputs/postgresql_extensible) * [postgresql](./plugins/inputs/postgresql) * [powerdns](./plugins/inputs/powerdns) +* [processes](./plugins/inputs/processes) * [procstat](./plugins/inputs/procstat) * [prometheus](./plugins/inputs/prometheus) (can be used for [Caddy server](./plugins/inputs/prometheus/README.md#usage-for-caddy-http-server)) * [puppetagent](./plugins/inputs/puppetagent) @@ -211,7 +221,9 @@ configuration options. * [snmp_legacy](./plugins/inputs/snmp_legacy) * [solr](./plugins/inputs/solr) * [sql server](./plugins/inputs/sqlserver) (microsoft) +* [swap](./plugins/inputs/swap) * [syslog](./plugins/inputs/syslog) +* [system](./plugins/inputs/system) * [teamspeak](./plugins/inputs/teamspeak) * [tengine](./plugins/inputs/tengine) * [tomcat](./plugins/inputs/tomcat) @@ -223,18 +235,6 @@ configuration options. * [win_perf_counters](./plugins/inputs/win_perf_counters) (windows performance counters) * [win_services](./plugins/inputs/win_services) * [sysstat](./plugins/inputs/sysstat) -* [system](./plugins/inputs/system) - * cpu - * mem - * net - * netstat - * disk - * diskio - * swap - * processes - * kernel (/proc/stat) - * kernel (/proc/vmstat) - * linux_sysctl_fs (/proc/sys/fs) Telegraf can also collect metrics via the following service plugins: From a5c4cac8f3ef4b9ba0c5005073c0c3ced1179bfb Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 12 Jul 2018 12:25:27 -0700 Subject: [PATCH 0010/1815] Fix typesetting issue in README --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 414ab2da7..0f4e5656a 100644 --- a/README.md +++ b/README.md @@ -169,7 +169,7 @@ configuration options. * [ipset](./plugins/inputs/ipset) * [jolokia](./plugins/inputs/jolokia) (deprecated, use [jolokia2](./plugins/inputs/jolokia2)) * [jolokia2](./plugins/inputs/jolokia2) (java, cassandra, kafka) -- [jti_openconfig_telemetry](./plugins/inputs/jti_openconfig_telemetry) +* [jti_openconfig_telemetry](./plugins/inputs/jti_openconfig_telemetry) * [kapacitor](./plugins/inputs/kapacitor) * [kernel](./plugins/inputs/kernel) * [kernel_vmstat](./plugins/inputs/kernel_vmstat) From 8ff63a4b79d84df74bb8d47cdd956f743422634a Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 12 Jul 2018 12:26:46 -0700 Subject: [PATCH 0011/1815] Merge service inputs with main input listing --- README.md | 49 +++++++++++++++++++++++-------------------------- 1 file changed, 23 insertions(+), 26 deletions(-) diff --git a/README.md b/README.md index 0f4e5656a..4fc4db323 100644 --- a/README.md +++ b/README.md @@ -131,13 +131,13 @@ configuration options. * [aws cloudwatch](./plugins/inputs/cloudwatch) * [bcache](./plugins/inputs/bcache) * [bond](./plugins/inputs/bond) -* [cassandra](./plugins/inputs/cassandra) (deprecated, use [jolokia2](./plugins/inputs/jolokia2)) * [burrow](./plugins/inputs/burrow) +* [cassandra](./plugins/inputs/cassandra) (deprecated, use [jolokia2](./plugins/inputs/jolokia2)) * [ceph](./plugins/inputs/ceph) * [cgroup](./plugins/inputs/cgroup) * [chrony](./plugins/inputs/chrony) -* [consul](./plugins/inputs/consul) * [conntrack](./plugins/inputs/conntrack) +* [consul](./plugins/inputs/consul) * [couchbase](./plugins/inputs/couchbase) * [couchdb](./plugins/inputs/couchdb) * [cpu](./plugins/inputs/cpu) @@ -158,24 +158,27 @@ configuration options. * [graylog](./plugins/inputs/graylog) * [haproxy](./plugins/inputs/haproxy) * [hddtemp](./plugins/inputs/hddtemp) +* [httpjson](./plugins/inputs/httpjson) (generic JSON-emitting http service plugin) +* [http_listener](./plugins/inputs/http_listener) * [http](./plugins/inputs/http) (generic HTTP plugin, supports using input data formats) * [http_response](./plugins/inputs/http_response) -* [httpjson](./plugins/inputs/httpjson) (generic JSON-emitting http service plugin) -* [internal](./plugins/inputs/internal) * [influxdb](./plugins/inputs/influxdb) +* [internal](./plugins/inputs/internal) * [interrupts](./plugins/inputs/interrupts) * [ipmi_sensor](./plugins/inputs/ipmi_sensor) -* [iptables](./plugins/inputs/iptables) * [ipset](./plugins/inputs/ipset) -* [jolokia](./plugins/inputs/jolokia) (deprecated, use [jolokia2](./plugins/inputs/jolokia2)) +* [iptables](./plugins/inputs/iptables) * [jolokia2](./plugins/inputs/jolokia2) (java, cassandra, kafka) +* [jolokia](./plugins/inputs/jolokia) (deprecated, use [jolokia2](./plugins/inputs/jolokia2)) * [jti_openconfig_telemetry](./plugins/inputs/jti_openconfig_telemetry) +* [kafka_consumer](./plugins/inputs/kafka_consumer) * [kapacitor](./plugins/inputs/kapacitor) * [kernel](./plugins/inputs/kernel) * [kernel_vmstat](./plugins/inputs/kernel_vmstat) * [kubernetes](./plugins/inputs/kubernetes) * [leofs](./plugins/inputs/leofs) * [linux_sysctl_fs](./plugins/inputs/linux_sysctl_fs) +* [logparser](./plugins/inputs/logparser) * [lustre2](./plugins/inputs/lustre2) * [mailchimp](./plugins/inputs/mailchimp) * [mcrouter](./plugins/inputs/mcrouter) @@ -184,13 +187,16 @@ configuration options. * [mesos](./plugins/inputs/mesos) * [minecraft](./plugins/inputs/minecraft) * [mongodb](./plugins/inputs/mongodb) +* [mqtt_consumer](./plugins/inputs/mqtt_consumer) * [mysql](./plugins/inputs/mysql) +* [nats_consumer](./plugins/inputs/nats_consumer) * [nats](./plugins/inputs/nats) * [net](./plugins/inputs/net) * [net_response](./plugins/inputs/net_response) * [netstat](./plugins/inputs/netstat) * [nginx](./plugins/inputs/nginx) * [nginx_plus](./plugins/inputs/nginx_plus) +* [nsq_consumer](./plugins/inputs/nsq_consumer) * [nsq](./plugins/inputs/nsq) * [nstat](./plugins/inputs/nstat) * [ntpq](./plugins/inputs/ntpq) @@ -217,38 +223,25 @@ configuration options. * [salesforce](./plugins/inputs/salesforce) * [sensors](./plugins/inputs/sensors) * [smart](./plugins/inputs/smart) -* [snmp](./plugins/inputs/snmp) * [snmp_legacy](./plugins/inputs/snmp_legacy) +* [snmp](./plugins/inputs/snmp) +* [socket_listener](./plugins/inputs/socket_listener) * [solr](./plugins/inputs/solr) * [sql server](./plugins/inputs/sqlserver) (microsoft) +* [statsd](./plugins/inputs/statsd) * [swap](./plugins/inputs/swap) * [syslog](./plugins/inputs/syslog) +* [sysstat](./plugins/inputs/sysstat) * [system](./plugins/inputs/system) +* [tail](./plugins/inputs/tail) +* [tcp_listener](./plugins/inputs/socket_listener) * [teamspeak](./plugins/inputs/teamspeak) * [tengine](./plugins/inputs/tengine) * [tomcat](./plugins/inputs/tomcat) * [twemproxy](./plugins/inputs/twemproxy) +* [udp_listener](./plugins/inputs/socket_listener) * [unbound](./plugins/inputs/unbound) * [varnish](./plugins/inputs/varnish) -* [zfs](./plugins/inputs/zfs) -* [zookeeper](./plugins/inputs/zookeeper) -* [win_perf_counters](./plugins/inputs/win_perf_counters) (windows performance counters) -* [win_services](./plugins/inputs/win_services) -* [sysstat](./plugins/inputs/sysstat) - -Telegraf can also collect metrics via the following service plugins: - -* [http_listener](./plugins/inputs/http_listener) -* [kafka_consumer](./plugins/inputs/kafka_consumer) -* [mqtt_consumer](./plugins/inputs/mqtt_consumer) -* [nats_consumer](./plugins/inputs/nats_consumer) -* [nsq_consumer](./plugins/inputs/nsq_consumer) -* [logparser](./plugins/inputs/logparser) -* [statsd](./plugins/inputs/statsd) -* [socket_listener](./plugins/inputs/socket_listener) -* [tail](./plugins/inputs/tail) -* [tcp_listener](./plugins/inputs/socket_listener) -* [udp_listener](./plugins/inputs/socket_listener) * [webhooks](./plugins/inputs/webhooks) * [filestack](./plugins/inputs/webhooks/filestack) * [github](./plugins/inputs/webhooks/github) @@ -256,7 +249,11 @@ Telegraf can also collect metrics via the following service plugins: * [papertrail](./plugins/inputs/webhooks/papertrail) * [particle](./plugins/inputs/webhooks/particle) * [rollbar](./plugins/inputs/webhooks/rollbar) +* [win_perf_counters](./plugins/inputs/win_perf_counters) (windows performance counters) +* [win_services](./plugins/inputs/win_services) +* [zfs](./plugins/inputs/zfs) * [zipkin](./plugins/inputs/zipkin) +* [zookeeper](./plugins/inputs/zookeeper) Telegraf is able to parse the following input data formats into metrics, these formats may be used with input plugins supporting the `data_format` option: From 0da94a1b3c21b92e3e50f91c3de97d8175f5a77d Mon Sep 17 00:00:00 2001 From: Greg Date: Thu, 12 Jul 2018 19:41:49 -0600 Subject: [PATCH 0012/1815] Fix incorrect container name gathered in docker input (#4391) --- plugins/inputs/docker/docker.go | 27 ++-- plugins/inputs/docker/docker_test.go | 173 ++++++++--------------- plugins/inputs/docker/docker_testdata.go | 34 ++++- 3 files changed, 107 insertions(+), 127 deletions(-) diff --git a/plugins/inputs/docker/docker.go b/plugins/inputs/docker/docker.go index aa1de7479..5d4cb0de8 100644 --- a/plugins/inputs/docker/docker.go +++ b/plugins/inputs/docker/docker.go @@ -365,10 +365,18 @@ func (d *Docker) gatherContainer( ) error { var v *types.StatsJSON // Parse container name - cname := "unknown" - if len(container.Names) > 0 { - // Not sure what to do with other names, just take the first. - cname = strings.TrimPrefix(container.Names[0], "/") + var cname string + for _, name := range container.Names { + trimmedName := strings.TrimPrefix(name, "/") + match := d.containerFilter.Match(trimmedName) + if match { + cname = trimmedName + break + } + } + + if cname == "" { + return nil } // the image name sometimes has a version part, or a private repo @@ -391,10 +399,6 @@ func (d *Docker) gatherContainer( "container_version": imageVersion, } - if !d.containerFilter.Match(cname) { - return nil - } - ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration) defer cancel() r, err := d.client.ContainerStats(ctx, container.ID, false) @@ -411,6 +415,9 @@ func (d *Docker) gatherContainer( } daemonOSType := r.OSType + // use common (printed at `docker ps`) name for container + tags["container_name"] = strings.TrimPrefix(v.Name, "/") + // Add labels to tags for k, label := range container.Labels { if d.labelFilter.Match(k) { @@ -461,12 +468,12 @@ func (d *Docker) gatherContainer( acc.AddFields("docker_container_health", healthfields, tags, time.Now()) } - gatherContainerStats(v, acc, tags, container.ID, d.PerDevice, d.Total, daemonOSType) + parseContainerStats(v, acc, tags, container.ID, d.PerDevice, d.Total, daemonOSType) return nil } -func gatherContainerStats( +func parseContainerStats( stat *types.StatsJSON, acc telegraf.Accumulator, tags map[string]string, diff --git a/plugins/inputs/docker/docker_test.go b/plugins/inputs/docker/docker_test.go index d51c61c00..b97c34b6b 100644 --- a/plugins/inputs/docker/docker_test.go +++ b/plugins/inputs/docker/docker_test.go @@ -77,8 +77,8 @@ var baseClient = MockClient{ ContainerListF: func(context.Context, types.ContainerListOptions) ([]types.Container, error) { return containerList, nil }, - ContainerStatsF: func(context.Context, string, bool) (types.ContainerStats, error) { - return containerStats(), nil + ContainerStatsF: func(c context.Context, s string, b bool) (types.ContainerStats, error) { + return containerStats(s), nil }, ContainerInspectF: func(context.Context, string) (types.ContainerJSON, error) { return containerInspect, nil @@ -107,7 +107,7 @@ func TestDockerGatherContainerStats(t *testing.T) { "container_image": "redis/image", } - gatherContainerStats(stats, &acc, tags, "123456789", true, true, "linux") + parseContainerStats(stats, &acc, tags, "123456789", true, true, "linux") // test docker_container_net measurement netfields := map[string]interface{}{ @@ -290,11 +290,9 @@ func TestContainerLabels(t *testing.T) { }{ { name: "Nil filters matches all", - container: types.Container{ - Labels: map[string]string{ - "a": "x", - }, - }, + container: genContainerLabeled(map[string]string{ + "a": "x", + }), include: nil, exclude: nil, expected: map[string]string{ @@ -303,11 +301,9 @@ func TestContainerLabels(t *testing.T) { }, { name: "Empty filters matches all", - container: types.Container{ - Labels: map[string]string{ - "a": "x", - }, - }, + container: genContainerLabeled(map[string]string{ + "a": "x", + }), include: []string{}, exclude: []string{}, expected: map[string]string{ @@ -316,12 +312,10 @@ func TestContainerLabels(t *testing.T) { }, { name: "Must match include", - container: types.Container{ - Labels: map[string]string{ - "a": "x", - "b": "y", - }, - }, + container: genContainerLabeled(map[string]string{ + "a": "x", + "b": "y", + }), include: []string{"a"}, exclude: []string{}, expected: map[string]string{ @@ -330,12 +324,10 @@ func TestContainerLabels(t *testing.T) { }, { name: "Must not match exclude", - container: types.Container{ - Labels: map[string]string{ - "a": "x", - "b": "y", - }, - }, + container: genContainerLabeled(map[string]string{ + "a": "x", + "b": "y", + }), include: []string{}, exclude: []string{"b"}, expected: map[string]string{ @@ -344,13 +336,11 @@ func TestContainerLabels(t *testing.T) { }, { name: "Include Glob", - container: types.Container{ - Labels: map[string]string{ - "aa": "x", - "ab": "y", - "bb": "z", - }, - }, + container: genContainerLabeled(map[string]string{ + "aa": "x", + "ab": "y", + "bb": "z", + }), include: []string{"a*"}, exclude: []string{}, expected: map[string]string{ @@ -360,13 +350,11 @@ func TestContainerLabels(t *testing.T) { }, { name: "Exclude Glob", - container: types.Container{ - Labels: map[string]string{ - "aa": "x", - "ab": "y", - "bb": "z", - }, - }, + container: genContainerLabeled(map[string]string{ + "aa": "x", + "ab": "y", + "bb": "z", + }), include: []string{}, exclude: []string{"a*"}, expected: map[string]string{ @@ -375,13 +363,11 @@ func TestContainerLabels(t *testing.T) { }, { name: "Excluded Includes", - container: types.Container{ - Labels: map[string]string{ - "aa": "x", - "ab": "y", - "bb": "z", - }, - }, + container: genContainerLabeled(map[string]string{ + "aa": "x", + "ab": "y", + "bb": "z", + }), include: []string{"a*"}, exclude: []string{"*b"}, expected: map[string]string{ @@ -425,6 +411,12 @@ func TestContainerLabels(t *testing.T) { } } +func genContainerLabeled(labels map[string]string) types.Container { + c := containerList[0] + c.Labels = labels + return c +} + func TestContainerNames(t *testing.T) { var tests = []struct { name string @@ -434,112 +426,67 @@ func TestContainerNames(t *testing.T) { expected []string }{ { - name: "Nil filters matches all", - containers: [][]string{ - {"/etcd"}, - {"/etcd2"}, - }, + name: "Nil filters matches all", include: nil, exclude: nil, - expected: []string{"etcd", "etcd2"}, + expected: []string{"etcd", "etcd2", "acme", "acme-test", "foo"}, }, { - name: "Empty filters matches all", - containers: [][]string{ - {"/etcd"}, - {"/etcd2"}, - }, + name: "Empty filters matches all", include: []string{}, exclude: []string{}, - expected: []string{"etcd", "etcd2"}, + expected: []string{"etcd", "etcd2", "acme", "acme-test", "foo"}, }, { - name: "Match all containers", - containers: [][]string{ - {"/etcd"}, - {"/etcd2"}, - }, + name: "Match all containers", include: []string{"*"}, exclude: []string{}, - expected: []string{"etcd", "etcd2"}, + expected: []string{"etcd", "etcd2", "acme", "acme-test", "foo"}, }, { - name: "Include prefix match", - containers: [][]string{ - {"/etcd"}, - {"/etcd2"}, - }, + name: "Include prefix match", include: []string{"etc*"}, exclude: []string{}, expected: []string{"etcd", "etcd2"}, }, { - name: "Exact match", - containers: [][]string{ - {"/etcd"}, - {"/etcd2"}, - }, + name: "Exact match", include: []string{"etcd"}, exclude: []string{}, expected: []string{"etcd"}, }, { - name: "Star matches zero length", - containers: [][]string{ - {"/etcd"}, - {"/etcd2"}, - }, + name: "Star matches zero length", include: []string{"etcd2*"}, exclude: []string{}, expected: []string{"etcd2"}, }, { - name: "Exclude matches all", - containers: [][]string{ - {"/etcd"}, - {"/etcd2"}, - }, + name: "Exclude matches all", include: []string{}, exclude: []string{"etc*"}, - expected: []string{}, + expected: []string{"acme", "acme-test", "foo"}, }, { - name: "Exclude single", - containers: [][]string{ - {"/etcd"}, - {"/etcd2"}, - }, + name: "Exclude single", include: []string{}, exclude: []string{"etcd"}, - expected: []string{"etcd2"}, + expected: []string{"etcd2", "acme", "acme-test", "foo"}, }, { - name: "Exclude all", - containers: [][]string{ - {"/etcd"}, - {"/etcd2"}, - }, + name: "Exclude all", include: []string{"*"}, exclude: []string{"*"}, expected: []string{}, }, { - name: "Exclude item matching include", - containers: [][]string{ - {"acme"}, - {"foo"}, - {"acme-test"}, - }, + name: "Exclude item matching include", include: []string{"acme*"}, exclude: []string{"*test*"}, expected: []string{"acme"}, }, { - name: "Exclude item no wildcards", - containers: [][]string{ - {"acme"}, - {"acme-test"}, - }, + name: "Exclude item no wildcards", include: []string{"acme*"}, exclude: []string{"test"}, expected: []string{"acme", "acme-test"}, @@ -552,14 +499,12 @@ func TestContainerNames(t *testing.T) { newClientFunc := func(host string, tlsConfig *tls.Config) (Client, error) { client := baseClient client.ContainerListF = func(context.Context, types.ContainerListOptions) ([]types.Container, error) { - var containers []types.Container - for _, names := range tt.containers { - containers = append(containers, types.Container{ - Names: names, - }) - } - return containers, nil + return containerList, nil } + client.ContainerStatsF = func(c context.Context, s string, b bool) (types.ContainerStats, error) { + return containerStats(s), nil + } + return &client, nil } diff --git a/plugins/inputs/docker/docker_testdata.go b/plugins/inputs/docker/docker_testdata.go index 1168048a2..bb275a1cc 100644 --- a/plugins/inputs/docker/docker_testdata.go +++ b/plugins/inputs/docker/docker_testdata.go @@ -1,6 +1,7 @@ package docker import ( + "fmt" "io/ioutil" "strings" "time" @@ -133,6 +134,18 @@ var containerList = []types.Container{ SizeRw: 0, SizeRootFs: 0, }, + types.Container{ + ID: "e8a713dd90604f5a257b97c15945e047ab60ed5b2c4397c5a6b5bf40e1bd2791", + Names: []string{"/acme"}, + }, + types.Container{ + ID: "9bc6faf9ba8106fae32e8faafd38a1dd6f6d262bec172398cc10bc03c0d6841a", + Names: []string{"/acme-test"}, + }, + types.Container{ + ID: "d4ccced494a1d5fe8ebdb0a86335a0dab069319912221e5838a132ab18a8bc84", + Names: []string{"/foo"}, + }, } var two = uint64(2) @@ -208,10 +221,25 @@ var NodeList = []swarm.Node{ }, } -func containerStats() types.ContainerStats { +func containerStats(s string) types.ContainerStats { var stat types.ContainerStats - jsonStat := ` + var name string + switch s { + case "e2173b9478a6ae55e237d4d74f8bbb753f0817192b5081334dc78476296b7dfb": + name = "etcd" + case "b7dfbb9478a6ae55e237d4d74f8bbb753f0817192b5081334dc78476296e2173": + name = "etcd2" + case "e8a713dd90604f5a257b97c15945e047ab60ed5b2c4397c5a6b5bf40e1bd2791": + name = "/acme" + case "9bc6faf9ba8106fae32e8faafd38a1dd6f6d262bec172398cc10bc03c0d6841a": + name = "/acme-test" + case "d4ccced494a1d5fe8ebdb0a86335a0dab069319912221e5838a132ab18a8bc84": + name = "/foo" + } + + jsonStat := fmt.Sprintf(` { + "name": "%s", "blkio_stats": { "io_service_bytes_recursive": [ { @@ -315,7 +343,7 @@ func containerStats() types.ContainerStats { "throttling_data": {} }, "read": "2016-02-24T11:42:27.472459608-05:00" -}` +}`, name) stat.Body = ioutil.NopCloser(strings.NewReader(jsonStat)) return stat } From c8f00030dd94155e0a94c9f6684e635f2c0ec0a8 Mon Sep 17 00:00:00 2001 From: Rion Date: Fri, 13 Jul 2018 13:53:56 -0700 Subject: [PATCH 0013/1815] Add support for setting kafka client id (#4418) --- plugins/inputs/kafka_consumer/README.md | 3 +++ plugins/inputs/kafka_consumer/kafka_consumer.go | 10 ++++++++++ plugins/outputs/kafka/README.md | 3 +++ plugins/outputs/kafka/kafka.go | 11 +++++++++++ 4 files changed, 27 insertions(+) diff --git a/plugins/inputs/kafka_consumer/README.md b/plugins/inputs/kafka_consumer/README.md index 67dbb539e..d794377fa 100644 --- a/plugins/inputs/kafka_consumer/README.md +++ b/plugins/inputs/kafka_consumer/README.md @@ -22,6 +22,9 @@ and use the old zookeeper connection method. ## Offset (must be either "oldest" or "newest") offset = "oldest" + ## Optional client id + # client_id = "my_client" + ## Optional TLS Config # tls_ca = "/etc/telegraf/ca.pem" # tls_cert = "/etc/telegraf/cert.pem" diff --git a/plugins/inputs/kafka_consumer/kafka_consumer.go b/plugins/inputs/kafka_consumer/kafka_consumer.go index bf74dd5ab..72172bcb6 100644 --- a/plugins/inputs/kafka_consumer/kafka_consumer.go +++ b/plugins/inputs/kafka_consumer/kafka_consumer.go @@ -17,6 +17,7 @@ import ( type Kafka struct { ConsumerGroup string + ClientID string `toml:"client_id"` Topics []string Brokers []string MaxMessageLen int @@ -59,6 +60,9 @@ var sampleConfig = ` brokers = ["localhost:9092"] ## topic(s) to consume topics = ["telegraf"] + + ## Optional Client id + # client_id = "Telegraf" ## Optional TLS Config # tls_ca = "/etc/telegraf/ca.pem" @@ -114,6 +118,12 @@ func (k *Kafka) Start(acc telegraf.Accumulator) error { return err } + if k.ClientID != "" { + config.ClientID = k.ClientID + } else { + config.ClientID = "Telegraf" + } + if tlsConfig != nil { log.Printf("D! TLS Enabled") config.Net.TLS.Config = tlsConfig diff --git a/plugins/outputs/kafka/README.md b/plugins/outputs/kafka/README.md index 196e2e914..00544e99c 100644 --- a/plugins/outputs/kafka/README.md +++ b/plugins/outputs/kafka/README.md @@ -10,6 +10,9 @@ This plugin writes to a [Kafka Broker](http://kafka.apache.org/07/quickstart.htm ## Kafka topic for producer messages topic = "telegraf" + ## Optional client id + # client_id = "my_client" + ## Optional topic suffix configuration. ## If the section is omitted, no suffix is used. ## Following topic suffix methods are supported: diff --git a/plugins/outputs/kafka/kafka.go b/plugins/outputs/kafka/kafka.go index 716e06c44..d61aaadd8 100644 --- a/plugins/outputs/kafka/kafka.go +++ b/plugins/outputs/kafka/kafka.go @@ -25,6 +25,8 @@ type ( Brokers []string // Kafka topic Topic string + // Kafka client id + ClientID string `toml:"client_id"` // Kafka topic suffix option TopicSuffix TopicSuffix `toml:"topic_suffix"` // Routing Key Tag @@ -68,6 +70,9 @@ var sampleConfig = ` brokers = ["localhost:9092"] ## Kafka topic for producer messages topic = "telegraf" + + ## Optional Client id + # client_id = "Telegraf" ## Optional topic suffix configuration. ## If the section is omitted, no suffix is used. @@ -186,6 +191,12 @@ func (k *Kafka) Connect() error { } config := sarama.NewConfig() + if k.ClientID != "" { + config.ClientID = k.ClientID + } else { + config.ClientID = "Telegraf" + } + config.Producer.RequiredAcks = sarama.RequiredAcks(k.RequiredAcks) config.Producer.Compression = sarama.CompressionCodec(k.CompressionCodec) config.Producer.Retry.Max = k.MaxRetry From 257e715b4252bbf03b485511982bd50bd6e90ed7 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 13 Jul 2018 13:58:39 -0700 Subject: [PATCH 0014/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 58626e093..f4a689557 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -35,6 +35,7 @@ - [#3772](https://github.com/influxdata/telegraf/pull/3772): Add Enum Processor. - [#4386](https://github.com/influxdata/telegraf/pull/4386): Add user tag to procstat input. - [#4403](https://github.com/influxdata/telegraf/pull/4403): Add support for multivalue metrics to collectd parser. +- [#4418](https://github.com/influxdata/telegraf/pull/4418): Add support for setting kafka client id. ## v1.7.2 [unreleased] From 49a5dea536a42b682d6d8581c20d0bc7174561d6 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 13 Jul 2018 13:59:45 -0700 Subject: [PATCH 0015/1815] Update client_id in kafka input and output readme --- plugins/inputs/kafka_consumer/README.md | 2 +- plugins/inputs/kafka_consumer/kafka_consumer.go | 2 +- plugins/outputs/kafka/README.md | 2 +- plugins/outputs/kafka/kafka.go | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/plugins/inputs/kafka_consumer/README.md b/plugins/inputs/kafka_consumer/README.md index d794377fa..24a0efc0f 100644 --- a/plugins/inputs/kafka_consumer/README.md +++ b/plugins/inputs/kafka_consumer/README.md @@ -23,7 +23,7 @@ and use the old zookeeper connection method. offset = "oldest" ## Optional client id - # client_id = "my_client" + # client_id = "Telegraf" ## Optional TLS Config # tls_ca = "/etc/telegraf/ca.pem" diff --git a/plugins/inputs/kafka_consumer/kafka_consumer.go b/plugins/inputs/kafka_consumer/kafka_consumer.go index 72172bcb6..d3791b224 100644 --- a/plugins/inputs/kafka_consumer/kafka_consumer.go +++ b/plugins/inputs/kafka_consumer/kafka_consumer.go @@ -60,7 +60,7 @@ var sampleConfig = ` brokers = ["localhost:9092"] ## topic(s) to consume topics = ["telegraf"] - + ## Optional Client id # client_id = "Telegraf" diff --git a/plugins/outputs/kafka/README.md b/plugins/outputs/kafka/README.md index 00544e99c..562f3fd5d 100644 --- a/plugins/outputs/kafka/README.md +++ b/plugins/outputs/kafka/README.md @@ -11,7 +11,7 @@ This plugin writes to a [Kafka Broker](http://kafka.apache.org/07/quickstart.htm topic = "telegraf" ## Optional client id - # client_id = "my_client" + # client_id = "Telegraf" ## Optional topic suffix configuration. ## If the section is omitted, no suffix is used. diff --git a/plugins/outputs/kafka/kafka.go b/plugins/outputs/kafka/kafka.go index d61aaadd8..a45e2a4e9 100644 --- a/plugins/outputs/kafka/kafka.go +++ b/plugins/outputs/kafka/kafka.go @@ -70,7 +70,7 @@ var sampleConfig = ` brokers = ["localhost:9092"] ## Kafka topic for producer messages topic = "telegraf" - + ## Optional Client id # client_id = "Telegraf" From 411b26bb1faad02b6b581689099d9587737145cc Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 13 Jul 2018 14:14:18 -0700 Subject: [PATCH 0016/1815] Fix output format of printer processor (#4417) --- plugins/processors/printer/printer.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/processors/printer/printer.go b/plugins/processors/printer/printer.go index 363e9a21d..ead3e8ece 100644 --- a/plugins/processors/printer/printer.go +++ b/plugins/processors/printer/printer.go @@ -30,7 +30,7 @@ func (p *Printer) Apply(in ...telegraf.Metric) []telegraf.Metric { if err != nil { continue } - fmt.Println(octets) + fmt.Printf("%s", octets) } return in } From af98d070f5ca88251fc5c7a060701f123e223647 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 13 Jul 2018 14:15:21 -0700 Subject: [PATCH 0017/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index f4a689557..ed631a674 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -43,6 +43,7 @@ - [#4381](https://github.com/influxdata/telegraf/issues/4381): Use localhost as default server tag in zookeeper input. - [#4374](https://github.com/influxdata/telegraf/issues/4374): Don't set values when pattern doesn't match in regex processor. +- [#4416](https://github.com/influxdata/telegraf/issues/4416): Fix output format of printer processor. ## v1.7.1 [2018-07-03] From 6d876c18e0297689a13d31c72c82d859f89d95cd Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 13 Jul 2018 22:54:34 -0700 Subject: [PATCH 0018/1815] Fix metric can have duplicate field (#4422) --- metric/metric.go | 1 + metric/metric_test.go | 5 +++++ 2 files changed, 6 insertions(+) diff --git a/metric/metric.go b/metric/metric.go index 077b3a314..9f1a42ccb 100644 --- a/metric/metric.go +++ b/metric/metric.go @@ -168,6 +168,7 @@ func (m *metric) AddField(key string, value interface{}) { for i, field := range m.fields { if key == field.Key { m.fields[i] = &telegraf.Field{Key: key, Value: convertField(value)} + return } } m.fields = append(m.fields, &telegraf.Field{Key: key, Value: convertField(value)}) diff --git a/metric/metric_test.go b/metric/metric_test.go index 47d44f3ef..004fa5915 100644 --- a/metric/metric_test.go +++ b/metric/metric_test.go @@ -30,6 +30,7 @@ func TestNewMetric(t *testing.T) { require.Equal(t, now, m.Time()) } +// cpu value=1 func baseMetric() telegraf.Metric { tags := map[string]string{} fields := map[string]interface{}{ @@ -111,6 +112,8 @@ func TestAddFieldOverwrites(t *testing.T) { m.AddField("value", 1.0) m.AddField("value", 42.0) + require.Equal(t, 1, len(m.FieldList())) + value, ok := m.GetField("value") require.True(t, ok) require.Equal(t, 42.0, value) @@ -122,6 +125,8 @@ func TestAddFieldChangesType(t *testing.T) { m.AddField("value", 1.0) m.AddField("value", "xyzzy") + require.Equal(t, 1, len(m.FieldList())) + value, ok := m.GetField("value") require.True(t, ok) require.Equal(t, "xyzzy", value) From 3f87e5bf573d36ad71e9b64c1fca9094253d2100 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 13 Jul 2018 22:56:14 -0700 Subject: [PATCH 0019/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index ed631a674..327648db1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -44,6 +44,7 @@ - [#4381](https://github.com/influxdata/telegraf/issues/4381): Use localhost as default server tag in zookeeper input. - [#4374](https://github.com/influxdata/telegraf/issues/4374): Don't set values when pattern doesn't match in regex processor. - [#4416](https://github.com/influxdata/telegraf/issues/4416): Fix output format of printer processor. +- [#4422](https://github.com/influxdata/telegraf/issues/4422): Fix metric can have duplicate field. ## v1.7.1 [2018-07-03] From 774a9f0492bd1611478220611883073eef8cab74 Mon Sep 17 00:00:00 2001 From: maxunt Date: Fri, 13 Jul 2018 23:22:59 -0700 Subject: [PATCH 0020/1815] Add file input plugin and grok parser (#4332) --- README.md | 1 + docs/DATA_FORMATS_INPUT.md | 103 ++++++++++++++++++ internal/config/config.go | 58 ++++++++++ plugins/inputs/all/all.go | 1 + plugins/inputs/file/README.md | 25 +++++ plugins/inputs/file/dev/docker-compose.yml | 13 +++ plugins/inputs/file/dev/json_a.log | 14 +++ plugins/inputs/file/dev/telegraf.conf | 7 ++ plugins/inputs/file/file.go | 102 +++++++++++++++++ plugins/inputs/file/file_test.go | 61 +++++++++++ plugins/inputs/file/testfiles/grok_a.log | 2 + plugins/inputs/file/testfiles/json_a.log | 14 +++ plugins/inputs/logparser/README.md | 7 ++ .../inputs/logparser/grok/testdata/.DS_Store | Bin 0 -> 6148 bytes plugins/inputs/logparser/logparser.go | 83 +++++++------- plugins/inputs/logparser/logparser_test.go | 43 ++++---- .../grok/influx_patterns.go | 0 .../grok/grok.go => parsers/grok/parser.go} | 46 ++++++-- .../grok/parser_test.go} | 82 ++------------ plugins/parsers/grok/testdata/.DS_Store | Bin 0 -> 6148 bytes plugins/parsers/grok/testdata/test-patterns | 14 +++ plugins/parsers/grok/testdata/test_a.log | 1 + plugins/parsers/grok/testdata/test_b.log | 1 + plugins/parsers/registry.go | 34 ++++++ 24 files changed, 558 insertions(+), 154 deletions(-) create mode 100644 plugins/inputs/file/README.md create mode 100644 plugins/inputs/file/dev/docker-compose.yml create mode 100644 plugins/inputs/file/dev/json_a.log create mode 100644 plugins/inputs/file/dev/telegraf.conf create mode 100644 plugins/inputs/file/file.go create mode 100644 plugins/inputs/file/file_test.go create mode 100644 plugins/inputs/file/testfiles/grok_a.log create mode 100644 plugins/inputs/file/testfiles/json_a.log create mode 100644 plugins/inputs/logparser/grok/testdata/.DS_Store rename plugins/{inputs/logparser => parsers}/grok/influx_patterns.go (100%) rename plugins/{inputs/logparser/grok/grok.go => parsers/grok/parser.go} (94%) rename plugins/{inputs/logparser/grok/grok_test.go => parsers/grok/parser_test.go} (94%) create mode 100644 plugins/parsers/grok/testdata/.DS_Store create mode 100644 plugins/parsers/grok/testdata/test-patterns create mode 100644 plugins/parsers/grok/testdata/test_a.log create mode 100644 plugins/parsers/grok/testdata/test_b.log diff --git a/README.md b/README.md index 4fc4db323..75ac9de1e 100644 --- a/README.md +++ b/README.md @@ -153,6 +153,7 @@ configuration options. * [exec](./plugins/inputs/exec) (generic executable plugin, support JSON, influx, graphite and nagios) * [fail2ban](./plugins/inputs/fail2ban) * [fibaro](./plugins/inputs/fibaro) +* [file](./plugins/inputs/file) * [filestat](./plugins/inputs/filestat) * [fluentd](./plugins/inputs/fluentd) * [graylog](./plugins/inputs/graylog) diff --git a/docs/DATA_FORMATS_INPUT.md b/docs/DATA_FORMATS_INPUT.md index 88282c846..24335a453 100644 --- a/docs/DATA_FORMATS_INPUT.md +++ b/docs/DATA_FORMATS_INPUT.md @@ -9,6 +9,7 @@ Telegraf is able to parse the following input data formats into metrics: 1. [Nagios](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#nagios) (exec input only) 1. [Collectd](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#collectd) 1. [Dropwizard](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#dropwizard) +1. [Grok](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#grok) Telegraf metrics, like InfluxDB [points](https://docs.influxdata.com/influxdb/v0.10/write_protocols/line/), @@ -657,5 +658,107 @@ For more information about the dropwizard json format see # [inputs.exec.dropwizard_tag_paths] # tag1 = "tags.tag1" # tag2 = "tags.tag2" +``` +#### Grok +Parse logstash-style "grok" patterns. Patterns can be added to patterns, or custom patterns read from custom_pattern_files. + +# View logstash grok pattern docs here: +# https://www.elastic.co/guide/en/logstash/current/plugins-filters-grok.html +# All default logstash patterns are supported, these can be viewed here: +# https://github.com/logstash-plugins/logstash-patterns-core/blob/master/patterns/grok-patterns + +# Available modifiers: +# string (default if nothing is specified) +# int +# float +# duration (ie, 5.23ms gets converted to int nanoseconds) +# tag (converts the field into a tag) +# drop (drops the field completely) +# Timestamp modifiers: +# ts-ansic ("Mon Jan _2 15:04:05 2006") +# ts-unix ("Mon Jan _2 15:04:05 MST 2006") +# ts-ruby ("Mon Jan 02 15:04:05 -0700 2006") +# ts-rfc822 ("02 Jan 06 15:04 MST") +# ts-rfc822z ("02 Jan 06 15:04 -0700") +# ts-rfc850 ("Monday, 02-Jan-06 15:04:05 MST") +# ts-rfc1123 ("Mon, 02 Jan 2006 15:04:05 MST") +# ts-rfc1123z ("Mon, 02 Jan 2006 15:04:05 -0700") +# ts-rfc3339 ("2006-01-02T15:04:05Z07:00") +# ts-rfc3339nano ("2006-01-02T15:04:05.999999999Z07:00") +# ts-httpd ("02/Jan/2006:15:04:05 -0700") +# ts-epoch (seconds since unix epoch) +# ts-epochnano (nanoseconds since unix epoch) +# ts-"CUSTOM" +# CUSTOM time layouts must be within quotes and be the representation of the +# "reference time", which is Mon Jan 2 15:04:05 -0700 MST 2006 +# See https://golang.org/pkg/time/#Parse for more details. + +# Example log file pattern, example log looks like this: +# [04/Jun/2016:12:41:45 +0100] 1.25 200 192.168.1.1 5.432µs +# Breakdown of the DURATION pattern below: +# NUMBER is a builtin logstash grok pattern matching float & int numbers. +# [nuµm]? is a regex specifying 0 or 1 of the characters within brackets. +# s is also regex, this pattern must end in "s". +# so DURATION will match something like '5.324ms' or '6.1µs' or '10s' +DURATION %{NUMBER}[nuµm]?s +RESPONSE_CODE %{NUMBER:response_code:tag} +RESPONSE_TIME %{DURATION:response_time_ns:duration} +EXAMPLE_LOG \[%{HTTPDATE:ts:ts-httpd}\] %{NUMBER:myfloat:float} %{RESPONSE_CODE} %{IPORHOST:clientip} %{RESPONSE_TIME} + +# Wider-ranging username matching vs. logstash built-in %{USER} +NGUSERNAME [a-zA-Z0-9\.\@\-\+_%]+ +NGUSER %{NGUSERNAME} +# Wider-ranging client IP matching +CLIENT (?:%{IPORHOST}|%{HOSTPORT}|::1) + +## +## COMMON LOG PATTERNS +## + +# apache & nginx logs, this is also known as the "common log format" +# see https://en.wikipedia.org/wiki/Common_Log_Format +COMMON_LOG_FORMAT %{CLIENT:client_ip} %{NOTSPACE:ident} %{NOTSPACE:auth} \[%{HTTPDATE:ts:ts-httpd}\] "(?:%{WORD:verb:tag} %{NOTSPACE:request}(?: HTTP/%{NUMBER:http_version:float})?|%{DATA})" %{NUMBER:resp_code:tag} (?:%{NUMBER:resp_bytes:int}|-) + +# Combined log format is the same as the common log format but with the addition +# of two quoted strings at the end for "referrer" and "agent" +# See Examples at http://httpd.apache.org/docs/current/mod/mod_log_config.html +COMBINED_LOG_FORMAT %{COMMON_LOG_FORMAT} %{QS:referrer} %{QS:agent} + +# HTTPD log formats +HTTPD20_ERRORLOG \[%{HTTPDERROR_DATE:timestamp}\] \[%{LOGLEVEL:loglevel:tag}\] (?:\[client %{IPORHOST:clientip}\] ){0,1}%{GREEDYDATA:errormsg} +HTTPD24_ERRORLOG \[%{HTTPDERROR_DATE:timestamp}\] \[%{WORD:module}:%{LOGLEVEL:loglevel:tag}\] \[pid %{POSINT:pid:int}:tid %{NUMBER:tid:int}\]( \(%{POSINT:proxy_errorcode:int}\)%{DATA:proxy_errormessage}:)?( \[client %{IPORHOST:client}:%{POSINT:clientport}\])? %{DATA:errorcode}: %{GREEDYDATA:message} +HTTPD_ERRORLOG %{HTTPD20_ERRORLOG}|%{HTTPD24_ERRORLOG} + +#### Grok Configuration: +```toml +[[inputs.reader]] + ## This is a list of patterns to check the given log file(s) for. + ## Note that adding patterns here increases processing time. The most + ## efficient configuration is to have one pattern per logparser. + ## Other common built-in patterns are: + ## %{COMMON_LOG_FORMAT} (plain apache & nginx access logs) + ## %{COMBINED_LOG_FORMAT} (access logs + referrer & agent) + grok_patterns = ["%{COMBINED_LOG_FORMAT}"] + + ## Name of the outputted measurement name. + grok_name_override = "apache_access_log" + + ## Full path(s) to custom pattern files. + grok_custom_pattern_files = [] + + ## Custom patterns can also be defined here. Put one pattern per line. + grok_custom_patterns = ''' + ''' + + ## Timezone allows you to provide an override for timestamps that + ## don't already include an offset + ## e.g. 04/06/2016 12:41:45 data one two 5.43µs + ## + ## Default: "" which renders UTC + ## Options are as follows: + ## 1. Local -- interpret based on machine localtime + ## 2. "Canada/Eastern" -- Unix TZ values like those found in https://en.wikipedia.org/wiki/List_of_tz_database_time_zones + ## 3. UTC -- or blank/unspecified, will return timestamp in UTC + grok_timezone = "Canada/Eastern" ``` \ No newline at end of file diff --git a/internal/config/config.go b/internal/config/config.go index 5b3e53457..21c71d946 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -1346,6 +1346,59 @@ func buildParser(name string, tbl *ast.Table) (parsers.Parser, error) { } } + //for grok data_format + if node, ok := tbl.Fields["grok_named_patterns"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if ary, ok := kv.Value.(*ast.Array); ok { + for _, elem := range ary.Value { + if str, ok := elem.(*ast.String); ok { + c.GrokNamedPatterns = append(c.GrokNamedPatterns, str.Value) + } + } + } + } + } + + if node, ok := tbl.Fields["grok_patterns"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if ary, ok := kv.Value.(*ast.Array); ok { + for _, elem := range ary.Value { + if str, ok := elem.(*ast.String); ok { + c.GrokPatterns = append(c.GrokPatterns, str.Value) + } + } + } + } + } + + if node, ok := tbl.Fields["grok_custom_patterns"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if str, ok := kv.Value.(*ast.String); ok { + c.GrokCustomPatterns = str.Value + } + } + } + + if node, ok := tbl.Fields["grok_custom_pattern_files"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if ary, ok := kv.Value.(*ast.Array); ok { + for _, elem := range ary.Value { + if str, ok := elem.(*ast.String); ok { + c.GrokCustomPatternFiles = append(c.GrokCustomPatternFiles, str.Value) + } + } + } + } + } + + if node, ok := tbl.Fields["grok_timezone"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if str, ok := kv.Value.(*ast.String); ok { + c.GrokTimeZone = str.Value + } + } + } + c.MetricName = name delete(tbl.Fields, "data_format") @@ -1362,6 +1415,11 @@ func buildParser(name string, tbl *ast.Table) (parsers.Parser, error) { delete(tbl.Fields, "dropwizard_time_format") delete(tbl.Fields, "dropwizard_tags_path") delete(tbl.Fields, "dropwizard_tag_paths") + delete(tbl.Fields, "grok_named_patterns") + delete(tbl.Fields, "grok_patterns") + delete(tbl.Fields, "grok_custom_patterns") + delete(tbl.Fields, "grok_custom_pattern_files") + delete(tbl.Fields, "grok_timezone") return parsers.NewParser(c) } diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index 594e0ea42..8594db0a9 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -30,6 +30,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/exec" _ "github.com/influxdata/telegraf/plugins/inputs/fail2ban" _ "github.com/influxdata/telegraf/plugins/inputs/fibaro" + _ "github.com/influxdata/telegraf/plugins/inputs/file" _ "github.com/influxdata/telegraf/plugins/inputs/filestat" _ "github.com/influxdata/telegraf/plugins/inputs/fluentd" _ "github.com/influxdata/telegraf/plugins/inputs/graylog" diff --git a/plugins/inputs/file/README.md b/plugins/inputs/file/README.md new file mode 100644 index 000000000..73a3a2362 --- /dev/null +++ b/plugins/inputs/file/README.md @@ -0,0 +1,25 @@ +# File Input Plugin + +The file plugin updates a list of files every interval and parses the contents +using the selected [input data format](/docs/DATA_FORMATS_INPUT.md). + +Files will always be read in their entirety, if you wish to tail/follow a file +use the [tail input plugin](/plugins/inputs/tail) instead. + +### Configuration: +```toml +[[inputs.file]] + ## Files to parse each interval. + ## These accept standard unix glob matching rules, but with the addition of + ## ** as a "super asterisk". ie: + ## /var/log/**.log -> recursively find all .log files in /var/log + ## /var/log/*/*.log -> find all .log files with a parent dir in /var/log + ## /var/log/apache.log -> only tail the apache log file + files = ["/var/log/apache/access.log"] + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "influx" +``` diff --git a/plugins/inputs/file/dev/docker-compose.yml b/plugins/inputs/file/dev/docker-compose.yml new file mode 100644 index 000000000..3c16fca90 --- /dev/null +++ b/plugins/inputs/file/dev/docker-compose.yml @@ -0,0 +1,13 @@ +version: '3' + +services: + telegraf: + image: glinton/scratch + volumes: + - ./telegraf.conf:/telegraf.conf + - ../../../../telegraf:/telegraf + - ./json_a.log:/var/log/test.log + entrypoint: + - /telegraf + - --config + - /telegraf.conf diff --git a/plugins/inputs/file/dev/json_a.log b/plugins/inputs/file/dev/json_a.log new file mode 100644 index 000000000..0f52e9d1e --- /dev/null +++ b/plugins/inputs/file/dev/json_a.log @@ -0,0 +1,14 @@ +{ +"parent": { + "child": 3.0, + "ignored_child": "hi" +}, +"ignored_null": null, +"integer": 4, +"list": [3, 4], +"ignored_parent": { + "another_ignored_null": null, + "ignored_string": "hello, world!" +}, +"another_list": [4] +} diff --git a/plugins/inputs/file/dev/telegraf.conf b/plugins/inputs/file/dev/telegraf.conf new file mode 100644 index 000000000..8cc0fb85d --- /dev/null +++ b/plugins/inputs/file/dev/telegraf.conf @@ -0,0 +1,7 @@ +[[inputs.file]] + files = ["/var/log/test.log"] + data_format = "json" + name_override = "json_file" + +[[outputs.file]] + files = ["stdout"] diff --git a/plugins/inputs/file/file.go b/plugins/inputs/file/file.go new file mode 100644 index 000000000..2779561fc --- /dev/null +++ b/plugins/inputs/file/file.go @@ -0,0 +1,102 @@ +package file + +import ( + "fmt" + "io/ioutil" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal/globpath" + "github.com/influxdata/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/parsers" +) + +type File struct { + Files []string `toml:"files"` + FromBeginning bool + parser parsers.Parser + + filenames []string +} + +const sampleConfig = ` + ## Files to parse each interval. + ## These accept standard unix glob matching rules, but with the addition of + ## ** as a "super asterisk". ie: + ## /var/log/**.log -> recursively find all .log files in /var/log + ## /var/log/*/*.log -> find all .log files with a parent dir in /var/log + ## /var/log/apache.log -> only tail the apache log file + files = ["/var/log/apache/access.log"] + + ## The dataformat to be read from files + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "influx" +` + +// SampleConfig returns the default configuration of the Input +func (f *File) SampleConfig() string { + return sampleConfig +} + +func (f *File) Description() string { + return "reload and gather from file[s] on telegraf's interval" +} + +func (f *File) Gather(acc telegraf.Accumulator) error { + err := f.refreshFilePaths() + if err != nil { + return err + } + for _, k := range f.filenames { + metrics, err := f.readMetric(k) + if err != nil { + return err + } + + for _, m := range metrics { + acc.AddFields(m.Name(), m.Fields(), m.Tags(), m.Time()) + } + } + return nil +} + +func (f *File) SetParser(p parsers.Parser) { + f.parser = p +} + +func (f *File) refreshFilePaths() error { + var allFiles []string + for _, file := range f.Files { + g, err := globpath.Compile(file) + if err != nil { + return fmt.Errorf("could not compile glob %v: %v", file, err) + } + files := g.Match() + if len(files) <= 0 { + return fmt.Errorf("could not find file: %v", file) + } + + for k := range files { + allFiles = append(allFiles, k) + } + } + + f.filenames = allFiles + return nil +} + +func (f *File) readMetric(filename string) ([]telegraf.Metric, error) { + fileContents, err := ioutil.ReadFile(filename) + if err != nil { + return nil, fmt.Errorf("E! Error file: %v could not be read, %s", filename, err) + } + return f.parser.Parse(fileContents) + +} + +func init() { + inputs.Add("file", func() telegraf.Input { + return &File{} + }) +} diff --git a/plugins/inputs/file/file_test.go b/plugins/inputs/file/file_test.go new file mode 100644 index 000000000..281056646 --- /dev/null +++ b/plugins/inputs/file/file_test.go @@ -0,0 +1,61 @@ +package file + +import ( + "os" + "path/filepath" + "testing" + + "github.com/influxdata/telegraf/plugins/parsers" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestRefreshFilePaths(t *testing.T) { + wd, err := os.Getwd() + r := File{ + Files: []string{filepath.Join(wd, "testfiles/**.log")}, + } + + err = r.refreshFilePaths() + require.NoError(t, err) + assert.Equal(t, len(r.filenames), 2) +} +func TestJSONParserCompile(t *testing.T) { + var acc testutil.Accumulator + wd, _ := os.Getwd() + r := File{ + Files: []string{filepath.Join(wd, "testfiles/json_a.log")}, + } + parserConfig := parsers.Config{ + DataFormat: "json", + TagKeys: []string{"parent_ignored_child"}, + } + nParser, err := parsers.NewParser(&parserConfig) + r.parser = nParser + assert.NoError(t, err) + + r.Gather(&acc) + assert.Equal(t, map[string]string{"parent_ignored_child": "hi"}, acc.Metrics[0].Tags) + assert.Equal(t, 5, len(acc.Metrics[0].Fields)) +} + +func TestGrokParser(t *testing.T) { + wd, _ := os.Getwd() + var acc testutil.Accumulator + r := File{ + Files: []string{filepath.Join(wd, "testfiles/grok_a.log")}, + } + + parserConfig := parsers.Config{ + DataFormat: "grok", + GrokPatterns: []string{"%{COMMON_LOG_FORMAT}"}, + } + + nParser, err := parsers.NewParser(&parserConfig) + r.parser = nParser + assert.NoError(t, err) + + err = r.Gather(&acc) + assert.Equal(t, 2, len(acc.Metrics)) +} diff --git a/plugins/inputs/file/testfiles/grok_a.log b/plugins/inputs/file/testfiles/grok_a.log new file mode 100644 index 000000000..5295fcb75 --- /dev/null +++ b/plugins/inputs/file/testfiles/grok_a.log @@ -0,0 +1,2 @@ +127.0.0.1 user-identifier frank [10/Oct/2000:13:55:36 -0700] "GET /apache_pb.gif HTTP/1.0" 200 2326 +128.0.0.1 user-identifier tony [10/Oct/2000:13:55:36 -0800] "GET /apache_pb.gif HTTP/1.0" 300 45 \ No newline at end of file diff --git a/plugins/inputs/file/testfiles/json_a.log b/plugins/inputs/file/testfiles/json_a.log new file mode 100644 index 000000000..609c40a09 --- /dev/null +++ b/plugins/inputs/file/testfiles/json_a.log @@ -0,0 +1,14 @@ +{ + "parent": { + "child": 3.0, + "ignored_child": "hi" + }, + "ignored_null": null, + "integer": 4, + "list": [3, 4], + "ignored_parent": { + "another_ignored_null": null, + "ignored_string": "hello, world!" + }, + "another_list": [4] +} diff --git a/plugins/inputs/logparser/README.md b/plugins/inputs/logparser/README.md index 1caa3830c..69aedc4b7 100644 --- a/plugins/inputs/logparser/README.md +++ b/plugins/inputs/logparser/README.md @@ -1,5 +1,9 @@ # Logparser Input Plugin +### **Deprecated in version 1.8**: Please use the +[tail](/plugins/inputs/tail) plugin with the `grok` +[data format](/docs/DATA_FORMATS_INPUT.md). + The `logparser` plugin streams and parses the given logfiles. Currently it has the capability of parsing "grok" patterns from logfiles, which also supports regex patterns. @@ -8,6 +12,9 @@ regex patterns. ```toml [[inputs.logparser]] + ## DEPRECATED: The `logparser` plugin is deprecated in 1.8. Please use the + ## `tail` plugin with the grok data_format instead. + ## Log files to parse. ## These accept standard unix glob matching rules, but with the addition of ## ** as a "super asterisk". ie: diff --git a/plugins/inputs/logparser/grok/testdata/.DS_Store b/plugins/inputs/logparser/grok/testdata/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..15d123ef0efe733881b859864ca23e55a26587a3 GIT binary patch literal 6148 zcmeHKyH3PF47A~j6DOqWpxlxMBx;)IbQIKl01!Yl5jkbEMnQdlg!@R)0kED}c?uHyW`G(Ve!Fm3`vB}b<0;xbM zkP4&%KcWEcY}$0@m_8Lq1yX^H0z4lI3b6^Sj&^jwSqcE0pxF#{<`U$Tz$UOdq6el< z1v*tD#ZafCy+m9SSRI`%8p(&o%x}qy+Uyv=Sh#58m_8Lq1%?VR_l3=Q|DWQM8BOvn zBwZ?y3j9|Ebh12OF0i|}TR-iScWptrM4>URvI0YW@DqR+o+JC#>HURu#5IA{k)yEP Q=)n9DFhSC#0>7ZZD@y@41poj5 literal 0 HcmV?d00001 diff --git a/plugins/inputs/logparser/logparser.go b/plugins/inputs/logparser/logparser.go index 8eb866084..b6ce72546 100644 --- a/plugins/inputs/logparser/logparser.go +++ b/plugins/inputs/logparser/logparser.go @@ -3,9 +3,7 @@ package logparser import ( - "fmt" "log" - "reflect" "strings" "sync" @@ -14,9 +12,8 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal/globpath" "github.com/influxdata/telegraf/plugins/inputs" - + "github.com/influxdata/telegraf/plugins/parsers" // Parsers - "github.com/influxdata/telegraf/plugins/inputs/logparser/grok" ) const ( @@ -24,9 +21,13 @@ const ( ) // LogParser in the primary interface for the plugin -type LogParser interface { - ParseLine(line string) (telegraf.Metric, error) - Compile() error +type GrokConfig struct { + MeasurementName string `toml:"measurement"` + Patterns []string + NamedPatterns []string + CustomPatterns string + CustomPatternFiles []string + TimeZone string } type logEntry struct { @@ -45,14 +46,17 @@ type LogParserPlugin struct { done chan struct{} wg sync.WaitGroup acc telegraf.Accumulator - parsers []LogParser sync.Mutex - GrokParser *grok.Parser `toml:"grok"` + GrokParser parsers.Parser + GrokConfig GrokConfig `toml:"grok"` } const sampleConfig = ` + ## DEPRECATED: The 'logparser' plugin is deprecated in 1.8. Please use the + ## 'tail' plugin with the grok data_format as a replacement. + ## Log files to parse. ## These accept standard unix glob matching rules, but with the addition of ## ** as a "super asterisk". ie: @@ -122,6 +126,9 @@ func (l *LogParserPlugin) Gather(acc telegraf.Accumulator) error { // Start kicks off collection of stats for the plugin func (l *LogParserPlugin) Start(acc telegraf.Accumulator) error { + log.Println("W! DEPRECATED: The logparser plugin is deprecated in 1.8. " + + "Please use the tail plugin with the grok data_format as a replacement.") + l.Lock() defer l.Unlock() @@ -131,32 +138,19 @@ func (l *LogParserPlugin) Start(acc telegraf.Accumulator) error { l.tailers = make(map[string]*tail.Tail) // Looks for fields which implement LogParser interface - l.parsers = []LogParser{} - s := reflect.ValueOf(l).Elem() - for i := 0; i < s.NumField(); i++ { - f := s.Field(i) - - if !f.CanInterface() { - continue - } - - if lpPlugin, ok := f.Interface().(LogParser); ok { - if reflect.ValueOf(lpPlugin).IsNil() { - continue - } - l.parsers = append(l.parsers, lpPlugin) - } + config := &parsers.Config{ + GrokPatterns: l.GrokConfig.Patterns, + GrokNamedPatterns: l.GrokConfig.NamedPatterns, + GrokCustomPatterns: l.GrokConfig.CustomPatterns, + GrokCustomPatternFiles: l.GrokConfig.CustomPatternFiles, + GrokTimeZone: l.GrokConfig.TimeZone, + DataFormat: "grok", } - if len(l.parsers) == 0 { - return fmt.Errorf("logparser input plugin: no parser defined") - } - - // compile log parser patterns: - for _, parser := range l.parsers { - if err := parser.Compile(); err != nil { - return err - } + var err error + l.GrokParser, err = parsers.NewParser(config) + if err != nil { + return err } l.wg.Add(1) @@ -251,8 +245,8 @@ func (l *LogParserPlugin) receiver(tailer *tail.Tail) { } } -// parser is launched as a goroutine to watch the l.lines channel. -// when a line is available, parser parses it and adds the metric(s) to the +// parse is launched as a goroutine to watch the l.lines channel. +// when a line is available, parse parses it and adds the metric(s) to the // accumulator. func (l *LogParserPlugin) parser() { defer l.wg.Done() @@ -269,18 +263,17 @@ func (l *LogParserPlugin) parser() { continue } } - for _, parser := range l.parsers { - m, err = parser.ParseLine(entry.line) - if err == nil { - if m != nil { - tags := m.Tags() - tags["path"] = entry.path - l.acc.AddFields(m.Name(), m.Fields(), tags, m.Time()) - } - } else { - log.Println("E! Error parsing log line: " + err.Error()) + m, err = l.GrokParser.ParseLine(entry.line) + if err == nil { + if m != nil { + tags := m.Tags() + tags["path"] = entry.path + l.acc.AddFields(l.GrokConfig.MeasurementName, m.Fields(), tags, m.Time()) } + } else { + log.Println("E! Error parsing log line: " + err.Error()) } + } } diff --git a/plugins/inputs/logparser/logparser_test.go b/plugins/inputs/logparser/logparser_test.go index 98567b4c2..3f0ab4daa 100644 --- a/plugins/inputs/logparser/logparser_test.go +++ b/plugins/inputs/logparser/logparser_test.go @@ -9,8 +9,6 @@ import ( "github.com/influxdata/telegraf/testutil" - "github.com/influxdata/telegraf/plugins/inputs/logparser/grok" - "github.com/stretchr/testify/assert" ) @@ -26,15 +24,14 @@ func TestStartNoParsers(t *testing.T) { func TestGrokParseLogFilesNonExistPattern(t *testing.T) { thisdir := getCurrentDir() - p := &grok.Parser{ - Patterns: []string{"%{FOOBAR}"}, - CustomPatternFiles: []string{thisdir + "grok/testdata/test-patterns"}, - } logparser := &LogParserPlugin{ FromBeginning: true, Files: []string{thisdir + "grok/testdata/*.log"}, - GrokParser: p, + GrokConfig: GrokConfig{ + Patterns: []string{"%{FOOBAR}"}, + CustomPatternFiles: []string{thisdir + "grok/testdata/test-patterns"}, + }, } acc := testutil.Accumulator{} @@ -44,20 +41,19 @@ func TestGrokParseLogFilesNonExistPattern(t *testing.T) { func TestGrokParseLogFiles(t *testing.T) { thisdir := getCurrentDir() - p := &grok.Parser{ - Patterns: []string{"%{TEST_LOG_A}", "%{TEST_LOG_B}"}, - CustomPatternFiles: []string{thisdir + "grok/testdata/test-patterns"}, - } logparser := &LogParserPlugin{ + GrokConfig: GrokConfig{ + MeasurementName: "logparser_grok", + Patterns: []string{"%{TEST_LOG_A}", "%{TEST_LOG_B}"}, + CustomPatternFiles: []string{thisdir + "grok/testdata/test-patterns"}, + }, FromBeginning: true, Files: []string{thisdir + "grok/testdata/*.log"}, - GrokParser: p, } acc := testutil.Accumulator{} assert.NoError(t, logparser.Start(&acc)) - acc.Wait(2) logparser.Stop() @@ -91,15 +87,15 @@ func TestGrokParseLogFilesAppearLater(t *testing.T) { assert.NoError(t, err) thisdir := getCurrentDir() - p := &grok.Parser{ - Patterns: []string{"%{TEST_LOG_A}", "%{TEST_LOG_B}"}, - CustomPatternFiles: []string{thisdir + "grok/testdata/test-patterns"}, - } logparser := &LogParserPlugin{ FromBeginning: true, Files: []string{emptydir + "/*.log"}, - GrokParser: p, + GrokConfig: GrokConfig{ + MeasurementName: "logparser_grok", + Patterns: []string{"%{TEST_LOG_A}", "%{TEST_LOG_B}"}, + CustomPatternFiles: []string{thisdir + "grok/testdata/test-patterns"}, + }, } acc := testutil.Accumulator{} @@ -130,16 +126,15 @@ func TestGrokParseLogFilesAppearLater(t *testing.T) { // pattern available for test_b.log func TestGrokParseLogFilesOneBad(t *testing.T) { thisdir := getCurrentDir() - p := &grok.Parser{ - Patterns: []string{"%{TEST_LOG_A}", "%{TEST_LOG_BAD}"}, - CustomPatternFiles: []string{thisdir + "grok/testdata/test-patterns"}, - } - assert.NoError(t, p.Compile()) logparser := &LogParserPlugin{ FromBeginning: true, Files: []string{thisdir + "grok/testdata/test_a.log"}, - GrokParser: p, + GrokConfig: GrokConfig{ + MeasurementName: "logparser_grok", + Patterns: []string{"%{TEST_LOG_A}", "%{TEST_LOG_BAD}"}, + CustomPatternFiles: []string{thisdir + "grok/testdata/test-patterns"}, + }, } acc := testutil.Accumulator{} diff --git a/plugins/inputs/logparser/grok/influx_patterns.go b/plugins/parsers/grok/influx_patterns.go similarity index 100% rename from plugins/inputs/logparser/grok/influx_patterns.go rename to plugins/parsers/grok/influx_patterns.go diff --git a/plugins/inputs/logparser/grok/grok.go b/plugins/parsers/grok/parser.go similarity index 94% rename from plugins/inputs/logparser/grok/grok.go rename to plugins/parsers/grok/parser.go index 4e6efc2c7..e17f127fc 100644 --- a/plugins/inputs/logparser/grok/grok.go +++ b/plugins/parsers/grok/parser.go @@ -68,10 +68,11 @@ type Parser struct { // specified by the user in Patterns. // They will look like: // GROK_INTERNAL_PATTERN_0, GROK_INTERNAL_PATTERN_1, etc. - namedPatterns []string + NamedPatterns []string CustomPatterns string CustomPatternFiles []string Measurement string + DefaultTags map[string]string // Timezone is an optional component to help render log dates to // your chosen zone. @@ -133,7 +134,7 @@ func (p *Parser) Compile() error { // Give Patterns fake names so that they can be treated as named // "custom patterns" - p.namedPatterns = make([]string, 0, len(p.Patterns)) + p.NamedPatterns = make([]string, 0, len(p.Patterns)) for i, pattern := range p.Patterns { pattern = strings.TrimSpace(pattern) if pattern == "" { @@ -141,10 +142,10 @@ func (p *Parser) Compile() error { } name := fmt.Sprintf("GROK_INTERNAL_PATTERN_%d", i) p.CustomPatterns += "\n" + name + " " + pattern + "\n" - p.namedPatterns = append(p.namedPatterns, "%{"+name+"}") + p.NamedPatterns = append(p.NamedPatterns, "%{"+name+"}") } - if len(p.namedPatterns) == 0 { + if len(p.NamedPatterns) == 0 { return fmt.Errorf("pattern required") } @@ -167,10 +168,6 @@ func (p *Parser) Compile() error { p.addCustomPatterns(scanner) } - if p.Measurement == "" { - p.Measurement = "logparser_grok" - } - p.loc, err = time.LoadLocation(p.Timezone) if err != nil { log.Printf("W! improper timezone supplied (%s), setting loc to UTC", p.Timezone) @@ -191,7 +188,7 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { var values map[string]string // the matching pattern string var patternName string - for _, pattern := range p.namedPatterns { + for _, pattern := range p.NamedPatterns { if values, err = p.g.Parse(pattern, line); err != nil { return nil, err } @@ -208,6 +205,12 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { fields := make(map[string]interface{}) tags := make(map[string]string) + + //add default tags + for k, v := range p.DefaultTags { + tags[k] = v + } + timestamp := time.Now() for k, v := range values { if k == "" || v == "" { @@ -335,9 +338,7 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { case DROP: // goodbye! default: - // Replace commas with dot character v = strings.Replace(v, ",", ".", -1) - ts, err := time.ParseInLocation(t, v, p.loc) if err == nil { timestamp = ts @@ -354,6 +355,29 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { return metric.New(p.Measurement, tags, fields, p.tsModder.tsMod(timestamp)) } +func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) { + scanner := bufio.NewScanner(strings.NewReader(string(buf))) + var lines []string + for scanner.Scan() { + lines = append(lines, scanner.Text()) + } + var metrics []telegraf.Metric + + for _, line := range lines { + m, err := p.ParseLine(line) + if err != nil { + return nil, err + } + metrics = append(metrics, m) + } + + return metrics, nil +} + +func (p *Parser) SetDefaultTags(tags map[string]string) { + p.DefaultTags = tags +} + func (p *Parser) addCustomPatterns(scanner *bufio.Scanner) { for scanner.Scan() { line := strings.TrimSpace(scanner.Text()) diff --git a/plugins/inputs/logparser/grok/grok_test.go b/plugins/parsers/grok/parser_test.go similarity index 94% rename from plugins/inputs/logparser/grok/grok_test.go rename to plugins/parsers/grok/parser_test.go index 075c00ca4..09f8fa16d 100644 --- a/plugins/inputs/logparser/grok/grok_test.go +++ b/plugins/parsers/grok/parser_test.go @@ -4,79 +4,18 @@ import ( "testing" "time" - "github.com/influxdata/telegraf" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) -var benchM telegraf.Metric - -func Benchmark_ParseLine_CommonLogFormat(b *testing.B) { - p := &Parser{ - Patterns: []string{"%{COMMON_LOG_FORMAT}"}, +func TestGrokParse(t *testing.T) { + parser := Parser{ + Measurement: "t_met", + Patterns: []string{"%{COMMON_LOG_FORMAT}"}, } - _ = p.Compile() - - var m telegraf.Metric - for n := 0; n < b.N; n++ { - m, _ = p.ParseLine(`127.0.0.1 user-identifier frank [10/Oct/2000:13:55:36 -0700] "GET /apache_pb.gif HTTP/1.0" 200 2326`) - } - benchM = m -} - -func Benchmark_ParseLine_CombinedLogFormat(b *testing.B) { - p := &Parser{ - Patterns: []string{"%{COMBINED_LOG_FORMAT}"}, - } - _ = p.Compile() - - var m telegraf.Metric - for n := 0; n < b.N; n++ { - m, _ = p.ParseLine(`127.0.0.1 user-identifier frank [10/Oct/2000:13:55:36 -0700] "GET /apache_pb.gif HTTP/1.0" 200 2326 "-" "Mozilla"`) - } - benchM = m -} - -func Benchmark_ParseLine_CustomPattern(b *testing.B) { - p := &Parser{ - Patterns: []string{"%{TEST_LOG_A}", "%{TEST_LOG_B}"}, - CustomPatterns: ` - DURATION %{NUMBER}[nuµm]?s - RESPONSE_CODE %{NUMBER:response_code:tag} - RESPONSE_TIME %{DURATION:response_time:duration} - TEST_LOG_A %{NUMBER:myfloat:float} %{RESPONSE_CODE} %{IPORHOST:clientip} %{RESPONSE_TIME} - `, - } - _ = p.Compile() - - var m telegraf.Metric - for n := 0; n < b.N; n++ { - m, _ = p.ParseLine(`[04/Jun/2016:12:41:45 +0100] 1.25 200 192.168.1.1 5.432µs 101`) - } - benchM = m -} - -// Test a very simple parse pattern. -func TestSimpleParse(t *testing.T) { - p := &Parser{ - Patterns: []string{"%{TESTLOG}"}, - CustomPatterns: ` - TESTLOG %{NUMBER:num:int} %{WORD:client} - `, - } - assert.NoError(t, p.Compile()) - - m, err := p.ParseLine(`142 bot`) + parser.Compile() + _, err := parser.Parse([]byte(`127.0.0.1 user-identifier frank [10/Oct/2000:13:55:36 -0700] "GET /apache_pb.gif HTTP/1.0" 200 2326`)) assert.NoError(t, err) - require.NotNil(t, m) - - assert.Equal(t, - map[string]interface{}{ - "num": int64(142), - "client": "bot", - }, - m.Fields()) } // Verify that patterns with a regex lookahead fail at compile time. @@ -96,8 +35,7 @@ func TestParsePatternsWithLookahead(t *testing.T) { func TestMeasurementName(t *testing.T) { p := &Parser{ - Measurement: "my_web_log", - Patterns: []string{"%{COMMON_LOG_FORMAT}"}, + Patterns: []string{"%{COMMON_LOG_FORMAT}"}, } assert.NoError(t, p.Compile()) @@ -116,13 +54,11 @@ func TestMeasurementName(t *testing.T) { }, m.Fields()) assert.Equal(t, map[string]string{"verb": "GET", "resp_code": "200"}, m.Tags()) - assert.Equal(t, "my_web_log", m.Name()) } func TestCLF_IPv6(t *testing.T) { p := &Parser{ - Measurement: "my_web_log", - Patterns: []string{"%{COMMON_LOG_FORMAT}"}, + Patterns: []string{"%{COMMON_LOG_FORMAT}"}, } assert.NoError(t, p.Compile()) @@ -140,7 +76,6 @@ func TestCLF_IPv6(t *testing.T) { }, m.Fields()) assert.Equal(t, map[string]string{"verb": "GET", "resp_code": "200"}, m.Tags()) - assert.Equal(t, "my_web_log", m.Name()) m, err = p.ParseLine(`::1 user-identifier frank [10/Oct/2000:13:55:36 -0700] "GET /apache_pb.gif HTTP/1.0" 200 2326`) require.NotNil(t, m) @@ -156,7 +91,6 @@ func TestCLF_IPv6(t *testing.T) { }, m.Fields()) assert.Equal(t, map[string]string{"verb": "GET", "resp_code": "200"}, m.Tags()) - assert.Equal(t, "my_web_log", m.Name()) } func TestCustomInfluxdbHttpd(t *testing.T) { diff --git a/plugins/parsers/grok/testdata/.DS_Store b/plugins/parsers/grok/testdata/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..15d123ef0efe733881b859864ca23e55a26587a3 GIT binary patch literal 6148 zcmeHKyH3PF47A~j6DOqWpxlxMBx;)IbQIKl01!Yl5jkbEMnQdlg!@R)0kED}c?uHyW`G(Ve!Fm3`vB}b<0;xbM zkP4&%KcWEcY}$0@m_8Lq1yX^H0z4lI3b6^Sj&^jwSqcE0pxF#{<`U$Tz$UOdq6el< z1v*tD#ZafCy+m9SSRI`%8p(&o%x}qy+Uyv=Sh#58m_8Lq1%?VR_l3=Q|DWQM8BOvn zBwZ?y3j9|Ebh12OF0i|}TR-iScWptrM4>URvI0YW@DqR+o+JC#>HURu#5IA{k)yEP Q=)n9DFhSC#0>7ZZD@y@41poj5 literal 0 HcmV?d00001 diff --git a/plugins/parsers/grok/testdata/test-patterns b/plugins/parsers/grok/testdata/test-patterns new file mode 100644 index 000000000..ba995fbd1 --- /dev/null +++ b/plugins/parsers/grok/testdata/test-patterns @@ -0,0 +1,14 @@ +# Test A log line: +# [04/Jun/2016:12:41:45 +0100] 1.25 200 192.168.1.1 5.432µs 101 +DURATION %{NUMBER}[nuµm]?s +RESPONSE_CODE %{NUMBER:response_code:tag} +RESPONSE_TIME %{DURATION:response_time:duration} +TEST_LOG_A \[%{HTTPDATE:timestamp:ts-httpd}\] %{NUMBER:myfloat:float} %{RESPONSE_CODE} %{IPORHOST:clientip} %{RESPONSE_TIME} %{NUMBER:myint:int} + +# Test B log line: +# [04/06/2016--12:41:45] 1.25 mystring dropme nomodifier +TEST_TIMESTAMP %{MONTHDAY}/%{MONTHNUM}/%{YEAR}--%{TIME} +TEST_LOG_B \[%{TEST_TIMESTAMP:timestamp:ts-"02/01/2006--15:04:05"}\] %{NUMBER:myfloat:float} %{WORD:mystring:string} %{WORD:dropme:drop} %{WORD:nomodifier} + +TEST_TIMESTAMP %{MONTHDAY}/%{MONTHNUM}/%{YEAR}--%{TIME} +TEST_LOG_BAD \[%{TEST_TIMESTAMP:timestamp:ts-"02/01/2006--15:04:05"}\] %{NUMBER:myfloat:float} %{WORD:mystring:int} %{WORD:dropme:drop} %{WORD:nomodifier} diff --git a/plugins/parsers/grok/testdata/test_a.log b/plugins/parsers/grok/testdata/test_a.log new file mode 100644 index 000000000..a44d72fdf --- /dev/null +++ b/plugins/parsers/grok/testdata/test_a.log @@ -0,0 +1 @@ +[04/Jun/2016:12:41:45 +0100] 1.25 200 192.168.1.1 5.432µs 101 diff --git a/plugins/parsers/grok/testdata/test_b.log b/plugins/parsers/grok/testdata/test_b.log new file mode 100644 index 000000000..49e2983e8 --- /dev/null +++ b/plugins/parsers/grok/testdata/test_b.log @@ -0,0 +1 @@ +[04/06/2016--12:41:45] 1.25 mystring dropme nomodifier diff --git a/plugins/parsers/registry.go b/plugins/parsers/registry.go index ac6bbbda8..24e73d4b6 100644 --- a/plugins/parsers/registry.go +++ b/plugins/parsers/registry.go @@ -8,6 +8,7 @@ import ( "github.com/influxdata/telegraf/plugins/parsers/collectd" "github.com/influxdata/telegraf/plugins/parsers/dropwizard" "github.com/influxdata/telegraf/plugins/parsers/graphite" + "github.com/influxdata/telegraf/plugins/parsers/grok" "github.com/influxdata/telegraf/plugins/parsers/influx" "github.com/influxdata/telegraf/plugins/parsers/json" "github.com/influxdata/telegraf/plugins/parsers/nagios" @@ -90,6 +91,13 @@ type Config struct { // an optional map containing tag names as keys and json paths to retrieve the tag values from as values // used if TagsPath is empty or doesn't return any tags DropwizardTagPathsMap map[string]string + + //grok patterns + GrokPatterns []string + GrokNamedPatterns []string + GrokCustomPatterns string + GrokCustomPatternFiles []string + GrokTimeZone string } // NewParser returns a Parser interface based on the given config. @@ -123,12 +131,38 @@ func NewParser(config *Config) (Parser, error) { config.DefaultTags, config.Separator, config.Templates) + case "grok": + parser, err = newGrokParser( + config.MetricName, + config.GrokPatterns, + config.GrokNamedPatterns, + config.GrokCustomPatterns, + config.GrokCustomPatternFiles, + config.GrokTimeZone) default: err = fmt.Errorf("Invalid data format: %s", config.DataFormat) } return parser, err } +func newGrokParser(metricName string, + patterns []string, + nPatterns []string, + cPatterns string, + cPatternFiles []string, tZone string) (Parser, error) { + parser := grok.Parser{ + Measurement: metricName, + Patterns: patterns, + NamedPatterns: nPatterns, + CustomPatterns: cPatterns, + CustomPatternFiles: cPatternFiles, + Timezone: tZone, + } + + err := parser.Compile() + return &parser, err +} + func NewJSONParser( metricName string, tagKeys []string, From 14d25af4d4e49dc94fa895a05b3772b8b90ed0c7 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 13 Jul 2018 23:23:47 -0700 Subject: [PATCH 0021/1815] Remove DS_Store files --- plugins/inputs/logparser/grok/testdata/.DS_Store | Bin 6148 -> 0 bytes plugins/parsers/grok/testdata/.DS_Store | Bin 6148 -> 0 bytes 2 files changed, 0 insertions(+), 0 deletions(-) delete mode 100644 plugins/inputs/logparser/grok/testdata/.DS_Store delete mode 100644 plugins/parsers/grok/testdata/.DS_Store diff --git a/plugins/inputs/logparser/grok/testdata/.DS_Store b/plugins/inputs/logparser/grok/testdata/.DS_Store deleted file mode 100644 index 15d123ef0efe733881b859864ca23e55a26587a3..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 6148 zcmeHKyH3PF47A~j6DOqWpxlxMBx;)IbQIKl01!Yl5jkbEMnQdlg!@R)0kED}c?uHyW`G(Ve!Fm3`vB}b<0;xbM zkP4&%KcWEcY}$0@m_8Lq1yX^H0z4lI3b6^Sj&^jwSqcE0pxF#{<`U$Tz$UOdq6el< z1v*tD#ZafCy+m9SSRI`%8p(&o%x}qy+Uyv=Sh#58m_8Lq1%?VR_l3=Q|DWQM8BOvn zBwZ?y3j9|Ebh12OF0i|}TR-iScWptrM4>URvI0YW@DqR+o+JC#>HURu#5IA{k)yEP Q=)n9DFhSC#0>7ZZD@y@41poj5 diff --git a/plugins/parsers/grok/testdata/.DS_Store b/plugins/parsers/grok/testdata/.DS_Store deleted file mode 100644 index 15d123ef0efe733881b859864ca23e55a26587a3..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 6148 zcmeHKyH3PF47A~j6DOqWpxlxMBx;)IbQIKl01!Yl5jkbEMnQdlg!@R)0kED}c?uHyW`G(Ve!Fm3`vB}b<0;xbM zkP4&%KcWEcY}$0@m_8Lq1yX^H0z4lI3b6^Sj&^jwSqcE0pxF#{<`U$Tz$UOdq6el< z1v*tD#ZafCy+m9SSRI`%8p(&o%x}qy+Uyv=Sh#58m_8Lq1%?VR_l3=Q|DWQM8BOvn zBwZ?y3j9|Ebh12OF0i|}TR-iScWptrM4>URvI0YW@DqR+o+JC#>HURu#5IA{k)yEP Q=)n9DFhSC#0>7ZZD@y@41poj5 From 5f8c9838acffa01d398884265c81f82c589d0747 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 13 Jul 2018 23:25:35 -0700 Subject: [PATCH 0022/1815] Update changelog --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 327648db1..779e461e0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,7 @@ ### New Inputs +- [file](./plugins/inputs/file/README.md) - Contributed by @maxunt - [tengine](./plugins/inputs/tengine/README.md) - Contributed by @ertaoxu ### New Processors @@ -36,6 +37,7 @@ - [#4386](https://github.com/influxdata/telegraf/pull/4386): Add user tag to procstat input. - [#4403](https://github.com/influxdata/telegraf/pull/4403): Add support for multivalue metrics to collectd parser. - [#4418](https://github.com/influxdata/telegraf/pull/4418): Add support for setting kafka client id. +- [#4332](https://github.com/influxdata/telegraf/pull/4332): Add file input plugin and grok parser. ## v1.7.2 [unreleased] From 3218ed7e0d2b320d6a7d8e2d9a1c7a277b49beca Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 16 Jul 2018 15:10:11 -0700 Subject: [PATCH 0023/1815] Add logparser deprecation notice to release notes --- CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 779e461e0..705598c76 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,10 @@ ### Release Notes +- With the addition of the standalone `grok` input data format, the + `logparser` input plugin has been deprecated in favor of using the `tail` + input plugin combined with `data_format="grok"` . + ### New Inputs - [file](./plugins/inputs/file/README.md) - Contributed by @maxunt From b75d66ff41a130f37b8e8499a51117b309e484cd Mon Sep 17 00:00:00 2001 From: Greg Date: Tue, 17 Jul 2018 15:54:10 -0600 Subject: [PATCH 0024/1815] Return error if NewRequest fails in http output (#4429) --- plugins/outputs/http/http.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/plugins/outputs/http/http.go b/plugins/outputs/http/http.go index 198aefe07..e36460ac8 100644 --- a/plugins/outputs/http/http.go +++ b/plugins/outputs/http/http.go @@ -127,6 +127,9 @@ func (h *HTTP) Write(metrics []telegraf.Metric) error { func (h *HTTP) write(reqBody []byte) error { req, err := http.NewRequest(h.Method, h.URL, bytes.NewBuffer(reqBody)) + if err != nil { + return err + } req.Header.Set("Content-Type", defaultContentType) for k, v := range h.Headers { From 1cc300710c6e89607db9f81faa2a762d7bf22076 Mon Sep 17 00:00:00 2001 From: Greg Linton Date: Tue, 17 Jul 2018 16:00:47 -0600 Subject: [PATCH 0025/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 705598c76..3679d3942 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -51,6 +51,7 @@ - [#4374](https://github.com/influxdata/telegraf/issues/4374): Don't set values when pattern doesn't match in regex processor. - [#4416](https://github.com/influxdata/telegraf/issues/4416): Fix output format of printer processor. - [#4422](https://github.com/influxdata/telegraf/issues/4422): Fix metric can have duplicate field. +- [#4389](https://github.com/influxdata/telegraf/issues/4389): Return error if NewRequest fails in http output. ## v1.7.1 [2018-07-03] From 69d22afcc270e148532eb8965f9db5618391d0f9 Mon Sep 17 00:00:00 2001 From: Greg Date: Tue, 17 Jul 2018 16:47:09 -0600 Subject: [PATCH 0026/1815] Reset read deadline for syslog input (#4369) --- plugins/inputs/syslog/syslog.go | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/plugins/inputs/syslog/syslog.go b/plugins/inputs/syslog/syslog.go index 21f6a770f..6f8d959ec 100644 --- a/plugins/inputs/syslog/syslog.go +++ b/plugins/inputs/syslog/syslog.go @@ -19,7 +19,7 @@ import ( "github.com/influxdata/telegraf/plugins/inputs" ) -const defaultReadTimeout = time.Millisecond * 500 +const defaultReadTimeout = time.Second * 5 const ipMaxPacketSize = 64 * 1024 // Syslog is a syslog plugin @@ -279,19 +279,23 @@ func (s *Syslog) handle(conn net.Conn, acc telegraf.Accumulator) { conn.Close() }() - if s.ReadTimeout != nil && s.ReadTimeout.Duration > 0 { - conn.SetReadDeadline(time.Now().Add(s.ReadTimeout.Duration)) - } - var p *rfc5425.Parser + if s.BestEffort { p = rfc5425.NewParser(conn, rfc5425.WithBestEffort()) } else { p = rfc5425.NewParser(conn) } + if s.ReadTimeout != nil && s.ReadTimeout.Duration > 0 { + conn.SetReadDeadline(time.Now().Add(s.ReadTimeout.Duration)) + } + p.ParseExecuting(func(r *rfc5425.Result) { s.store(*r, acc) + if s.ReadTimeout != nil && s.ReadTimeout.Duration > 0 { + conn.SetReadDeadline(time.Now().Add(s.ReadTimeout.Duration)) + } }) } @@ -361,7 +365,7 @@ func fields(msg rfc5424.SyslogMessage, s *Syslog) map[string]interface{} { } if msg.Message() != nil { - flds["message"] = *msg.Message() + flds["message"] = strings.TrimSpace(*msg.Message()) } if msg.StructuredData() != nil { From f363e70f33ff9bec695dfaba2600282f0c3c5fef Mon Sep 17 00:00:00 2001 From: Greg Linton Date: Tue, 17 Jul 2018 16:48:41 -0600 Subject: [PATCH 0027/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3679d3942..5fb05e188 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -52,6 +52,7 @@ - [#4416](https://github.com/influxdata/telegraf/issues/4416): Fix output format of printer processor. - [#4422](https://github.com/influxdata/telegraf/issues/4422): Fix metric can have duplicate field. - [#4389](https://github.com/influxdata/telegraf/issues/4389): Return error if NewRequest fails in http output. +- [#4335](https://github.com/influxdata/telegraf/issues/4335): Reset read deadline for syslog input. ## v1.7.1 [2018-07-03] From 4c2786298d02f6b35b1240f7e1816c1b3a01f36f Mon Sep 17 00:00:00 2001 From: Mathevet julien Date: Wed, 18 Jul 2018 01:02:03 +0200 Subject: [PATCH 0028/1815] Exclude cached memory on docker input plugin (#4383) --- plugins/inputs/docker/docker.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/inputs/docker/docker.go b/plugins/inputs/docker/docker.go index 5d4cb0de8..c98f1f845 100644 --- a/plugins/inputs/docker/docker.go +++ b/plugins/inputs/docker/docker.go @@ -534,11 +534,11 @@ func parseContainerStats( if daemonOSType != "windows" { memfields["limit"] = stat.MemoryStats.Limit - memfields["usage"] = stat.MemoryStats.Usage memfields["max_usage"] = stat.MemoryStats.MaxUsage mem := calculateMemUsageUnixNoCache(stat.MemoryStats) memLimit := float64(stat.MemoryStats.Limit) + memfields["usage"] = uint64(mem) memfields["usage_percent"] = calculateMemPercentUnixNoCache(memLimit, mem) } else { memfields["commit_bytes"] = stat.MemoryStats.Commit From eb64617e376474c815a9dc3bc954aca4ac55972d Mon Sep 17 00:00:00 2001 From: Greg Linton Date: Tue, 17 Jul 2018 17:03:15 -0600 Subject: [PATCH 0029/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5fb05e188..2b3cd1b4f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -53,6 +53,7 @@ - [#4422](https://github.com/influxdata/telegraf/issues/4422): Fix metric can have duplicate field. - [#4389](https://github.com/influxdata/telegraf/issues/4389): Return error if NewRequest fails in http output. - [#4335](https://github.com/influxdata/telegraf/issues/4335): Reset read deadline for syslog input. +- [#4375](https://github.com/influxdata/telegraf/issues/4375): Exclude cached memory on docker input plugin. ## v1.7.1 [2018-07-03] From f37b503f686cbeb985c24c90c96d622da32712b3 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 18 Jul 2018 13:52:49 -0700 Subject: [PATCH 0030/1815] Set 1.7.2 release date --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2b3cd1b4f..5ba790613 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -43,7 +43,7 @@ - [#4418](https://github.com/influxdata/telegraf/pull/4418): Add support for setting kafka client id. - [#4332](https://github.com/influxdata/telegraf/pull/4332): Add file input plugin and grok parser. -## v1.7.2 [unreleased] +## v1.7.2 [2018-07-18] ### Bugfixes From 9657870258a3efb8e6ab2f77e87e2aa869927566 Mon Sep 17 00:00:00 2001 From: david7482 Date: Tue, 24 Jul 2018 02:00:35 +0800 Subject: [PATCH 0031/1815] Improve cloudwatch output performance (#4320) --- plugins/outputs/cloudwatch/cloudwatch.go | 18 ++++-------------- 1 file changed, 4 insertions(+), 14 deletions(-) diff --git a/plugins/outputs/cloudwatch/cloudwatch.go b/plugins/outputs/cloudwatch/cloudwatch.go index 39b13cf29..f7ccc1fee 100644 --- a/plugins/outputs/cloudwatch/cloudwatch.go +++ b/plugins/outputs/cloudwatch/cloudwatch.go @@ -93,27 +93,17 @@ func (c *CloudWatch) Close() error { } func (c *CloudWatch) Write(metrics []telegraf.Metric) error { + + var datums []*cloudwatch.MetricDatum for _, m := range metrics { - err := c.WriteSinglePoint(m) - if err != nil { - return err - } + d := BuildMetricDatum(m) + datums = append(datums, d...) } - return nil -} - -// Write data for a single point. A point can have many fields and one field -// is equal to one MetricDatum. There is a limit on how many MetricDatums a -// request can have so we process one Point at a time. -func (c *CloudWatch) WriteSinglePoint(point telegraf.Metric) error { - datums := BuildMetricDatum(point) - const maxDatumsPerCall = 20 // PutMetricData only supports up to 20 data metrics per call for _, partition := range PartitionDatums(maxDatumsPerCall, datums) { err := c.WriteToCloudWatch(partition) - if err != nil { return err } From be2ea90503ecae992d5b080536a0b1cac36bf927 Mon Sep 17 00:00:00 2001 From: Greg Linton Date: Mon, 23 Jul 2018 12:03:21 -0600 Subject: [PATCH 0032/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5ba790613..49bc3a04b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -42,6 +42,7 @@ - [#4403](https://github.com/influxdata/telegraf/pull/4403): Add support for multivalue metrics to collectd parser. - [#4418](https://github.com/influxdata/telegraf/pull/4418): Add support for setting kafka client id. - [#4332](https://github.com/influxdata/telegraf/pull/4332): Add file input plugin and grok parser. +- [#4320](https://github.com/influxdata/telegraf/pull/4320): Improve cloudwatch output performance. ## v1.7.2 [2018-07-18] From d95824a9c18b9824cccd8ba74a51790272b04684 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 24 Jul 2018 10:43:18 -0700 Subject: [PATCH 0033/1815] Downgrade max aerospike client version to 1.27.0 (#4462) This is currently the most recent version without the memory leak issue. --- Gopkg.lock | 6 +++--- Gopkg.toml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Gopkg.lock b/Gopkg.lock index 194bb61e6..600c7673b 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -59,8 +59,8 @@ "types/rand", "utils/buffer" ] - revision = "c10b5393e43bd60125aca6289c7b24879edb1787" - version = "v1.33.0" + revision = "1dc8cf203d24cd454e71ce40ab4cd0bf3112df90" + version = "v1.27.0" [[projects]] branch = "master" @@ -968,6 +968,6 @@ [solve-meta] analyzer-name = "dep" analyzer-version = 1 - inputs-digest = "024194b983d91b9500fe97e0aa0ddb5fe725030cb51ddfb034e386cae1098370" + inputs-digest = "726abf0a241126b415293c203dddc516e4d8be9b0f2913fb3ab2c4eb332e3ce2" solver-name = "gps-cdcl" solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml index 78d3749a9..abf1b4a06 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -4,7 +4,7 @@ [[constraint]] name = "github.com/aerospike/aerospike-client-go" - version = "^1.33.0" + version = "<=1.27.0" [[constraint]] name = "github.com/amir/raidman" From 9051ea9dc0f2c13f6e806beefea771ece29fe077 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 24 Jul 2018 14:57:24 -0700 Subject: [PATCH 0034/1815] Require dep to be installed before building (#4461) --- .circleci/config.yml | 75 +++++++++++++++---- CONTRIBUTING.md | 5 +- Makefile | 55 ++++++++++---- README.md | 19 +++-- appveyor.yml | 15 ++-- .../inputs/win_services/win_services_test.go | 24 +++++- scripts/build.py | 6 +- scripts/ci-1.10.docker | 28 +++++++ scripts/ci-1.9.docker | 28 +++++++ scripts/release.sh | 35 --------- 10 files changed, 204 insertions(+), 86 deletions(-) create mode 100644 scripts/ci-1.10.docker create mode 100644 scripts/ci-1.9.docker delete mode 100755 scripts/release.sh diff --git a/.circleci/config.yml b/.circleci/config.yml index 578e0bcbd..2ddcce1e6 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -4,10 +4,10 @@ defaults: working_directory: '/go/src/github.com/influxdata/telegraf' go-1_9: &go-1_9 docker: - - image: 'circleci/golang:1.9.7' + - image: 'quay.io/influxdb/telegraf-ci:1.9.7' go-1_10: &go-1_10 docker: - - image: 'circleci/golang:1.10.3' + - image: 'quay.io/influxdb/telegraf-ci:1.10.3' version: 2 jobs: @@ -27,53 +27,98 @@ jobs: root: '/go/src' paths: - '*' + test-go-1.9: <<: [ *defaults, *go-1_9 ] steps: - attach_workspace: at: '/go/src' - - run: 'make test-ci' + - run: 'make check' + - run: 'make test' test-go-1.10: <<: [ *defaults, *go-1_10 ] steps: - attach_workspace: at: '/go/src' - - run: 'make test-ci' - - run: 'GOARCH=386 make test-ci' + - run: 'make check' + - run: 'make test' + test-go-1.10-386: + <<: [ *defaults, *go-1_10 ] + steps: + - attach_workspace: + at: '/go/src' + - run: 'GOARCH=386 make check' + - run: 'GOARCH=386 make test' + + package: + <<: [ *defaults, *go-1_10 ] + steps: + - attach_workspace: + at: '/go/src' + - run: 'make package' + - store_artifacts: + path: './build' + destination: 'build' release: <<: [ *defaults, *go-1_10 ] steps: - attach_workspace: at: '/go/src' - - run: './scripts/release.sh' + - run: 'make package-release' - store_artifacts: - path: './artifacts' - destination: '.' + path: './build' + destination: 'build' nightly: <<: [ *defaults, *go-1_10 ] steps: - attach_workspace: at: '/go/src' - - run: './scripts/release.sh' + - run: 'make package-nightly' - store_artifacts: - path: './artifacts' - destination: '.' + path: './build' + destination: 'build' workflows: version: 2 - build_and_release: + check: jobs: - - 'deps' + - 'deps': + filters: + tags: + only: /.*/ - 'test-go-1.9': requires: - 'deps' + filters: + tags: + only: /.*/ - 'test-go-1.10': requires: - 'deps' + filters: + tags: + only: /.*/ + - 'test-go-1.10-386': + requires: + - 'deps' + filters: + tags: + only: /.*/ + - 'package': + requires: + - 'test-go-1.9' + - 'test-go-1.10' + - 'test-go-1.10-386' - 'release': requires: - 'test-go-1.9' - 'test-go-1.10' + - 'test-go-1.10-386' + filters: + tags: + only: /.*/ + branches: + ignore: /.*/ nightly: jobs: - 'deps' @@ -83,10 +128,14 @@ workflows: - 'test-go-1.10': requires: - 'deps' + - 'test-go-1.10-386': + requires: + - 'deps' - 'nightly': requires: - 'test-go-1.9' - 'test-go-1.10' + - 'test-go-1.10-386' triggers: - schedule: cron: "0 7 * * *" diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 66aa92cc3..1a6ace7fc 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -30,9 +30,8 @@ which can be found [on our website](http://influxdb.com/community/cla.html) Assuming you can already build the project, run these in the telegraf directory: -1. `go get -u github.com/golang/dep/cmd/dep` -2. `dep ensure -vendor-only` -3. `dep ensure -add github.com/[dependency]/[new-package]` +1. `dep ensure -vendor-only` +2. `dep ensure -add github.com/[dependency]/[new-package]` ## Input Plugins diff --git a/Makefile b/Makefile index b4f89c799..4521c5d7f 100644 --- a/Makefile +++ b/Makefile @@ -17,10 +17,8 @@ BUILDFLAGS ?= ifdef GOBIN PATH := $(GOBIN):$(PATH) -else ifdef GOPATH -PATH := $(subst :,/bin:,$(GOPATH))/bin:$(PATH) else -PATH := $(HOME)/go/bin:$(PATH) +PATH := $(subst :,/bin:,$(shell go env GOPATH))/bin:$(PATH) endif LDFLAGS := $(LDFLAGS) -X main.commit=$(COMMIT) -X main.branch=$(BRANCH) @@ -28,33 +26,39 @@ ifdef VERSION LDFLAGS += -X main.version=$(VERSION) endif +.PHONY: all all: - $(MAKE) deps - $(MAKE) telegraf + @$(MAKE) --no-print-directory deps + @$(MAKE) --no-print-directory telegraf +.PHONY: deps deps: - go get -u github.com/golang/lint/golint - go get -u github.com/golang/dep/cmd/dep dep ensure -vendor-only +.PHONY: telegraf telegraf: go build -ldflags "$(LDFLAGS)" ./cmd/telegraf +.PHONY: go-install go-install: go install -ldflags "-w -s $(LDFLAGS)" ./cmd/telegraf +.PHONY: install install: telegraf mkdir -p $(DESTDIR)$(PREFIX)/bin/ cp telegraf $(DESTDIR)$(PREFIX)/bin/ + +.PHONY: test test: go test -short ./... +.PHONY: fmt fmt: @gofmt -w $(filter-out plugins/parsers/influx/machine.go, $(GOFILES)) +.PHONY: fmtcheck fmtcheck: - @echo '[INFO] running gofmt to identify incorrectly formatted code...' @if [ ! -z "$(GOFMT)" ]; then \ echo "[ERROR] gofmt has found errors in the following files:" ; \ echo "$(GOFMT)" ; \ @@ -62,8 +66,8 @@ fmtcheck: echo "Run make fmt to fix them." ; \ exit 1 ;\ fi - @echo '[INFO] done.' +.PHONY: test-windows test-windows: go test -short ./plugins/inputs/ping/... go test -short ./plugins/inputs/win_perf_counters/... @@ -71,8 +75,7 @@ test-windows: go test -short ./plugins/inputs/procstat/... go test -short ./plugins/inputs/ntpq/... -# vet runs the Go source code static analysis tool `vet` to find -# any common errors. +.PHONY: vet vet: @echo 'go vet $$(go list ./... | grep -v ./plugins/parsers/influx)' @go vet $$(go list ./... | grep -v ./plugins/parsers/influx) ; if [ $$? -ne 0 ]; then \ @@ -82,19 +85,33 @@ vet: exit 1; \ fi -test-ci: fmtcheck vet - go test -short ./... +.PHONY: check +check: fmtcheck vet +.PHONY: test-all test-all: fmtcheck vet go test ./... +.PHONY: package package: ./scripts/build.py --package --platform=all --arch=all +.PHONY: package-release +package-release: + ./scripts/build.py --release --package --platform=all --arch=all \ + --upload --bucket=dl.influxdata.com/telegraf/releases + +.PHONY: package-nightly +package-nightly: + ./scripts/build.py --nightly --package --platform=all --arch=all \ + --upload --bucket=dl.influxdata.com/telegraf/nightlies + +.PHONY: clean clean: rm -f telegraf rm -f telegraf.exe +.PHONY: docker-image docker-image: ./scripts/build.py --package --platform=linux --arch=amd64 cp build/telegraf*$(COMMIT)*.deb . @@ -103,6 +120,7 @@ docker-image: plugins/parsers/influx/machine.go: plugins/parsers/influx/machine.go.rl ragel -Z -G2 $^ -o $@ +.PHONY: static static: @echo "Building static linux binary..." @CGO_ENABLED=0 \ @@ -110,8 +128,17 @@ static: GOARCH=amd64 \ go build -ldflags "$(LDFLAGS)" ./cmd/telegraf +.PHONY: plugin-% plugin-%: @echo "Starting dev environment for $${$(@)} input plugin..." @docker-compose -f plugins/inputs/$${$(@)}/dev/docker-compose.yml up -.PHONY: deps telegraf install test test-windows lint vet test-all package clean docker-image fmtcheck uint64 static +.PHONY: ci-1.10 +ci-1.10: + docker build -t quay.io/influxdb/telegraf-ci:1.10.3 - < scripts/ci-1.10.docker + docker push quay.io/influxdb/telegraf-ci:1.10.3 + +.PHONY: ci-1.9 +ci-1.9: + docker build -t quay.io/influxdb/telegraf-ci:1.9.7 - < scripts/ci-1.9.docker + docker push quay.io/influxdb/telegraf-ci:1.9.7 diff --git a/README.md b/README.md index 75ac9de1e..03e3a8f58 100644 --- a/README.md +++ b/README.md @@ -42,14 +42,17 @@ Ansible role: https://github.com/rossmcdonald/telegraf Telegraf requires golang version 1.9 or newer, the Makefile requires GNU make. -Dependencies are managed with [dep](https://github.com/golang/dep), -which is installed by the Makefile if you don't have it already. - -1. [Install Go](https://golang.org/doc/install) -2. [Setup your GOPATH](https://golang.org/doc/code.html#GOPATH) -3. Run `go get -d github.com/influxdata/telegraf` -4. Run `cd $GOPATH/src/github.com/influxdata/telegraf` -5. Run `make` +1. [Install Go](https://golang.org/doc/install) >=1.9 +2. [Install dep](https://golang.github.io/dep/docs/installation.html) ==v0.4.1 +3. Download Telegraf source: + ``` + go get -d github.com/influxdata/telegraf + ``` +4. Run make from the source directory + ``` + cd "$HOME/go/src/github.com/influxdata/telegraf" + make + ``` ### Nightly Builds diff --git a/appveyor.yml b/appveyor.yml index 76a5ab067..cd8938ff7 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -12,22 +12,27 @@ platform: x64 install: - IF NOT EXIST "C:\Cache" mkdir C:\Cache - - IF NOT EXIST "C:\Cache\go1.10.1.msi" curl -o "C:\Cache\go1.10.1.msi" https://storage.googleapis.com/golang/go1.10.1.windows-amd64.msi + - IF NOT EXIST "C:\Cache\go1.10.3.msi" curl -o "C:\Cache\go1.10.3.msi" https://storage.googleapis.com/golang/go1.10.3.windows-amd64.msi - IF NOT EXIST "C:\Cache\gnuwin32-bin.zip" curl -o "C:\Cache\gnuwin32-bin.zip" https://dl.influxdata.com/telegraf/ci/make-3.81-bin.zip - IF NOT EXIST "C:\Cache\gnuwin32-dep.zip" curl -o "C:\Cache\gnuwin32-dep.zip" https://dl.influxdata.com/telegraf/ci/make-3.81-dep.zip - IF EXIST "C:\Go" rmdir /S /Q C:\Go - - msiexec.exe /i "C:\Cache\go1.10.1.msi" /quiet + - msiexec.exe /i "C:\Cache\go1.10.3.msi" /quiet - 7z x "C:\Cache\gnuwin32-bin.zip" -oC:\GnuWin32 -y - 7z x "C:\Cache\gnuwin32-dep.zip" -oC:\GnuWin32 -y + - go get -d github.com/golang/dep + - cd "%GOPATH%\src\github.com\golang\dep" + - git checkout -q v0.4.1 + - go install -ldflags="-X main.version=v0.4.1" ./cmd/dep + - cd "%GOPATH%\src\github.com\influxdata\telegraf" + - git config --system core.longpaths true - go version - go env - - git config --system core.longpaths true build_script: - - cmd: C:\GnuWin32\bin\make deps - - cmd: C:\GnuWin32\bin\make telegraf + - cmd: C:\GnuWin32\bin\make test_script: + - cmd: C:\GnuWin32\bin\make check - cmd: C:\GnuWin32\bin\make test-windows artifacts: diff --git a/plugins/inputs/win_services/win_services_test.go b/plugins/inputs/win_services/win_services_test.go index 3c05e85c5..37dc3f08c 100644 --- a/plugins/inputs/win_services/win_services_test.go +++ b/plugins/inputs/win_services/win_services_test.go @@ -5,12 +5,13 @@ package win_services import ( "errors" "fmt" + "testing" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "golang.org/x/sys/windows/svc" "golang.org/x/sys/windows/svc/mgr" - "testing" ) //testData is DD wrapper for unit testing of WinServices @@ -84,14 +85,31 @@ func (m *FakeWinSvc) Config() (mgr.Config, error) { if m.testData.serviceConfigError != nil { return mgr.Config{}, m.testData.serviceConfigError } else { - return mgr.Config{0, uint32(m.testData.startUpMode), 0, "", "", 0, nil, m.testData.serviceName, m.testData.displayName, "", ""}, nil + return mgr.Config{ + ServiceType: 0, + StartType: uint32(m.testData.startUpMode), + ErrorControl: 0, + BinaryPathName: "", + LoadOrderGroup: "", + TagId: 0, + Dependencies: nil, + ServiceStartName: m.testData.serviceName, + DisplayName: m.testData.displayName, + Password: "", + Description: "", + }, nil } } func (m *FakeWinSvc) Query() (svc.Status, error) { if m.testData.serviceQueryError != nil { return svc.Status{}, m.testData.serviceQueryError } else { - return svc.Status{svc.State(m.testData.state), 0, 0, 0}, nil + return svc.Status{ + State: svc.State(m.testData.state), + Accepts: 0, + CheckPoint: 0, + WaitHint: 0, + }, nil } } diff --git a/scripts/build.py b/scripts/build.py index 344ee48a8..27f47f42f 100755 --- a/scripts/build.py +++ b/scripts/build.py @@ -155,12 +155,8 @@ def go_get(branch, update=False, no_uncommitted=False): if local_changes() and no_uncommitted: logging.error("There are uncommitted changes in the current directory.") return False - if not check_path_for("dep"): - logging.info("Downloading `dep`...") - get_command = "go get -u github.com/golang/dep/cmd/dep" - run(get_command) logging.info("Retrieving dependencies with `dep`...") - run("{}/bin/dep ensure -v".format(os.environ.get("GOPATH", + run("{}/bin/dep ensure -v -vendor-only".format(os.environ.get("GOPATH", os.path.expanduser("~/go")))) return True diff --git a/scripts/ci-1.10.docker b/scripts/ci-1.10.docker new file mode 100644 index 000000000..1cfe4c27d --- /dev/null +++ b/scripts/ci-1.10.docker @@ -0,0 +1,28 @@ +FROM golang:1.10.3 + +RUN chmod -R 755 "$GOPATH" + +RUN DEBIAN_FRONTEND=noninteractive \ + apt update && apt install -y --no-install-recommends \ + autoconf \ + git \ + libtool \ + locales \ + make \ + python-boto \ + rpm \ + ruby \ + ruby-dev \ + zip && \ + rm -rf /var/lib/apt/lists/* + +RUN ln -sf /usr/share/zoneinfo/Etc/UTC /etc/localtime +RUN locale-gen C.UTF-8 || true +ENV LANG=C.UTF-8 + +RUN gem install fpm + +RUN go get -d github.com/golang/dep && \ + cd src/github.com/golang/dep && \ + git checkout -q v0.4.1 && \ + go install -ldflags="-X main.version=v0.4.1" ./cmd/dep diff --git a/scripts/ci-1.9.docker b/scripts/ci-1.9.docker new file mode 100644 index 000000000..d1ac5f839 --- /dev/null +++ b/scripts/ci-1.9.docker @@ -0,0 +1,28 @@ +FROM golang:1.9.7 + +RUN chmod -R 755 "$GOPATH" + +RUN DEBIAN_FRONTEND=noninteractive \ + apt update && apt install -y --no-install-recommends \ + autoconf \ + git \ + libtool \ + locales \ + make \ + python-boto \ + rpm \ + ruby \ + ruby-dev \ + zip && \ + rm -rf /var/lib/apt/lists/* + +RUN ln -sf /usr/share/zoneinfo/Etc/UTC /etc/localtime +RUN locale-gen C.UTF-8 || true +ENV LANG=C.UTF-8 + +RUN gem install fpm + +RUN go get -d github.com/golang/dep && \ + cd src/github.com/golang/dep && \ + git checkout -q v0.4.1 && \ + go install -ldflags="-X main.version=v0.4.1" ./cmd/dep diff --git a/scripts/release.sh b/scripts/release.sh deleted file mode 100755 index 41b95db01..000000000 --- a/scripts/release.sh +++ /dev/null @@ -1,35 +0,0 @@ -#!/bin/bash - -ARTIFACT_DIR='artifacts' -run() -{ - "$@" - ret=$? - if [[ $ret -eq 0 ]] - then - echo "[INFO] [ $@ ]" - else - echo "[ERROR] [ $@ ] returned $ret" - exit $ret - fi -} - -run make -run mkdir -p ${ARTIFACT_DIR} -run gzip telegraf -c > "$ARTIFACT_DIR/telegraf.gz" - -# RPM is used to build packages for Enterprise Linux hosts. -# Boto is used to upload packages to S3. -run sudo apt-get update -run sudo apt-get install -y rpm python-boto ruby ruby-dev autoconf libtool -run sudo gem install fpm - -if git describe --exact-match HEAD 2>&1 >/dev/null; then - run ./scripts/build.py --release --package --platform=all --arch=all --upload --bucket=dl.influxdata.com/telegraf/releases -elif [ "${CIRCLE_STAGE}" = nightly ]; then - run ./scripts/build.py --nightly --package --platform=all --arch=all --upload --bucket=dl.influxdata.com/telegraf/nightlies -else - run ./scripts/build.py --package --platform=all --arch=all -fi - -run mv build $ARTIFACT_DIR From 0a4f827f9b79fda6b240f5d3d8765027cf5638e6 Mon Sep 17 00:00:00 2001 From: Chris Goller Date: Tue, 24 Jul 2018 21:29:00 -0500 Subject: [PATCH 0035/1815] Provide function to test metric equality (#4464) --- testutil/metric.go | 16 ++++++++++++ testutil/metric_test.go | 57 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 73 insertions(+) create mode 100644 testutil/metric.go create mode 100644 testutil/metric_test.go diff --git a/testutil/metric.go b/testutil/metric.go new file mode 100644 index 000000000..9620fea15 --- /dev/null +++ b/testutil/metric.go @@ -0,0 +1,16 @@ +package testutil + +import ( + "testing" + + "github.com/influxdata/telegraf" + "github.com/stretchr/testify/require" +) + +// MustEqual requires a and b to be identical. +func MustEqual(t *testing.T, got telegraf.Metric, want Metric) { + require.Equal(t, want.Measurement, got.Name()) + require.Equal(t, want.Fields, got.Fields()) + require.Equal(t, want.Tags, got.Tags()) + require.Equal(t, want.Time, got.Time()) +} diff --git a/testutil/metric_test.go b/testutil/metric_test.go new file mode 100644 index 000000000..7295227ce --- /dev/null +++ b/testutil/metric_test.go @@ -0,0 +1,57 @@ +package testutil + +import ( + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" +) + +func TestMustEqual(t *testing.T) { + type args struct { + } + tests := []struct { + name string + got telegraf.Metric + want Metric + }{ + { + name: "telegraf and testutil metrics should be equal", + got: func() telegraf.Metric { + m, _ := metric.New( + "test", + map[string]string{ + "t1": "v1", + "t2": "v2", + }, + map[string]interface{}{ + "f1": 1, + "f2": 3.14, + "f3": "v3", + }, + time.Unix(0, 0), + ) + return m + }(), + want: Metric{ + Measurement: "test", + Tags: map[string]string{ + "t1": "v1", + "t2": "v2", + }, + Fields: map[string]interface{}{ + "f1": int64(1), + "f2": 3.14, + "f3": "v3", + }, + Time: time.Unix(0, 0), + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + MustEqual(t, tt.got, tt.want) + }) + } +} From 6e245b5483a5a8e78fdf55607bff55483a59eceb Mon Sep 17 00:00:00 2001 From: Greg Date: Wed, 25 Jul 2018 17:10:28 -0600 Subject: [PATCH 0036/1815] Update docker input plugin to use new library (#4440) --- Gopkg.lock | 418 +++++++++++++++++++++++++------- Gopkg.toml | 28 +-- plugins/inputs/docker/client.go | 18 +- 3 files changed, 345 insertions(+), 119 deletions(-) diff --git a/Gopkg.lock b/Gopkg.lock index 600c7673b..4a70b057d 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -3,48 +3,61 @@ [[projects]] branch = "master" + digest = "1:d7582b4af1b0b953ff2bb9573a50f787c7e1669cb148fb086a3d1c670a1ac955" name = "code.cloudfoundry.org/clock" packages = ["."] + pruneopts = "" revision = "02e53af36e6c978af692887ed449b74026d76fec" [[projects]] + digest = "1:ce7dc0f1ffcd9a2aacc50ae6d322eebff8f4faa2d6c5f445c874cd0b77a63de7" name = "collectd.org" packages = [ "api", "cdtime", - "network" + "network", ] + pruneopts = "" revision = "2ce144541b8903101fb8f1483cc0497a68798122" version = "v0.3.0" [[projects]] + branch = "master" + digest = "1:c1269bfaddefd090935401c291ad5df6c03de605a440e941ecc568e19f0f9e3b" name = "github.com/Microsoft/ApplicationInsights-Go" packages = [ "appinsights", - "appinsights/contracts" + "appinsights/contracts", ] + pruneopts = "" revision = "d2df5d440eda5372f24fcac03839a64d6cb5f7e5" - version = "v0.4.2" [[projects]] + digest = "1:ec6a42cd98d70f0916216d8f6df8ca61145edeaad041014aa9c003068de7364c" name = "github.com/Microsoft/go-winio" packages = ["."] - revision = "7da180ee92d8bd8bb8c37fc560e673e6557c392f" - version = "v0.4.7" + pruneopts = "" + revision = "a6d595ae73cf27a1b8fc32930668708f45ce1c85" + version = "v0.4.9" [[projects]] + digest = "1:14af5ba5ac88efec490fb59734df34e1bd973198caefa7b0cceed0900ef6164c" name = "github.com/Shopify/sarama" packages = ["."] + pruneopts = "" revision = "35324cf48e33d8260e1c7c18854465a904ade249" version = "v1.17.0" [[projects]] + digest = "1:f82b8ac36058904227087141017bb82f4b0fc58272990a4cdae3e2d6d222644e" name = "github.com/StackExchange/wmi" packages = ["."] + pruneopts = "" revision = "5d049714c4a64225c3c79a7cf7d02f7fb5b96338" version = "1.0.0" [[projects]] + digest = "1:855af787df6b733016849082d66ffda5e0e00856513fcac08a7cf199a23515c2" name = "github.com/aerospike/aerospike-client-go" packages = [ ".", @@ -57,42 +70,52 @@ "types/atomic", "types/particle_type", "types/rand", - "utils/buffer" + "utils/buffer", ] + pruneopts = "" revision = "1dc8cf203d24cd454e71ce40ab4cd0bf3112df90" version = "v1.27.0" [[projects]] branch = "master" + digest = "1:1399282ad03ac819f0e8a747c888407c5c98bb497d33821a7047c7bae667ede0" name = "github.com/alecthomas/template" packages = [ ".", - "parse" + "parse", ] + pruneopts = "" revision = "a0175ee3bccc567396460bf5acd36800cb10c49c" [[projects]] branch = "master" + digest = "1:8483994d21404c8a1d489f6be756e25bfccd3b45d65821f25695577791a08e68" name = "github.com/alecthomas/units" packages = ["."] + pruneopts = "" revision = "2efee857e7cfd4f3d0138cc3cbb1b4966962b93a" [[projects]] branch = "master" + digest = "1:072692f8d76356228f31f64ca3140041a140011c7dea26e746206e8649c71b31" name = "github.com/amir/raidman" packages = [ ".", - "proto" + "proto", ] + pruneopts = "" revision = "1ccc43bfb9c93cb401a4025e49c64ba71e5e668b" [[projects]] branch = "master" + digest = "1:83a67d925714169fa5121021abef0276605c6e4d51c467dd1f0c04344abad1ff" name = "github.com/apache/thrift" packages = ["lib/go/thrift"] - revision = "f5f430df56871bc937950274b2c86681d3db6e59" + pruneopts = "" + revision = "f2867c24984aa53edec54a138c03db934221bdea" [[projects]] + digest = "1:ca172b51bfe0a1ae7725dc782339fed4ba697dcd44e29a0a1c765fffdbf05ddc" name = "github.com/aws/aws-sdk-go" packages = [ "aws", @@ -114,6 +137,7 @@ "aws/signer/v4", "internal/sdkio", "internal/sdkrand", + "internal/sdkuri", "internal/shareddefaults", "private/protocol", "private/protocol/json/jsonutil", @@ -124,162 +148,209 @@ "private/protocol/xml/xmlutil", "service/cloudwatch", "service/kinesis", - "service/sts" + "service/sts", ] - revision = "bfc1a07cf158c30c41a3eefba8aae043d0bb5bff" - version = "v1.14.8" + pruneopts = "" + revision = "8cf662a972fa7fba8f2c1ec57648cf840e2bb401" + version = "v1.14.30" [[projects]] branch = "master" + digest = "1:fca298802a2ab834d6eb0e284788ae037ebc324c0f325ff92c5eea592d189cc5" name = "github.com/beorn7/perks" packages = ["quantile"] + pruneopts = "" revision = "3a771d992973f24aa725d07868b467d1ddfceafb" [[projects]] + digest = "1:0edb96edcfeee9aeba92e605536fbb1542b0bf6a10cea9d0b5a2227d5a703eae" name = "github.com/bsm/sarama-cluster" packages = ["."] + pruneopts = "" revision = "cf455bc755fe41ac9bb2861e7a961833d9c2ecc3" version = "v2.1.13" [[projects]] + digest = "1:f619cb9b07aebe5416262cdd8b86082e8d5bdc5264cb3b615ff858df0b645f97" name = "github.com/cenkalti/backoff" packages = ["."] + pruneopts = "" revision = "2ea60e5f094469f9e65adb9cd103795b73ae743e" version = "v2.0.0" [[projects]] branch = "master" + digest = "1:65ae2d1625584ba8d16d1e15b25db1fc62334e2040f22dbbbdc7531c909843b2" name = "github.com/couchbase/go-couchbase" packages = ["."] + pruneopts = "" revision = "16db1f1fe037412f12738fa4d8448c549c4edd77" [[projects]] branch = "master" + digest = "1:5db54de7054c072f47806c91ef7625ffa00489ca2da5fbc6ca1c78e08018f6bf" name = "github.com/couchbase/gomemcached" packages = [ ".", - "client" + "client", ] + pruneopts = "" revision = "0da75df145308b9a4e6704d762ca9d9b77752efc" [[projects]] branch = "master" + digest = "1:0deaa0f28c823119725c8308703f019797bc077e251d1ed3f2b8eae2cc7791d7" name = "github.com/couchbase/goutils" packages = [ "logging", - "scramsha" + "scramsha", ] + pruneopts = "" revision = "e865a1461c8ac0032bd37e2d4dab3289faea3873" [[projects]] + branch = "master" + digest = "1:4c015b7445aa37becc220fde9bdbc4d4329f75af72ca1c98f9b0bd698d6068cb" + name = "github.com/crewjam/rfc5424" + packages = ["."] + pruneopts = "" + revision = "6ae4b209c3f0d5071494be6b883a1970acadda94" + +[[projects]] + digest = "1:0a39ec8bf5629610a4bc7873a92039ee509246da3cef1a0ea60f1ed7e5f9cea5" name = "github.com/davecgh/go-spew" packages = ["spew"] + pruneopts = "" revision = "346938d642f2ec3594ed81d874461961cd0faa76" version = "v1.1.0" [[projects]] + digest = "1:2426da75f49e5b8507a6ed5d4c49b06b2ff795f4aec401c106b7db8fb2625cd7" name = "github.com/dgrijalva/jwt-go" packages = ["."] + pruneopts = "" revision = "06ea1031745cb8b3dab3f6a236daf2b0aa468b7e" version = "v3.2.0" [[projects]] + digest = "1:68df19ee476d93359596377b7437bbe49d233fe014becd060ded757aeed531cd" name = "github.com/docker/distribution" packages = [ - "digest", - "reference" + "digestset", + "reference", ] - revision = "48294d928ced5dd9b378f7fd7c6f5da3ff3f2c89" - version = "v2.6.2" + pruneopts = "" + revision = "edc3ab29cdff8694dd6feb85cfeb4b5f1b38ed9c" [[projects]] + digest = "1:a21509491bfd5bd1f99abe1d38430fddd16c8c8dc0092f954e224b93ad87f06b" name = "github.com/docker/docker" packages = [ + "api", "api/types", "api/types/blkiodev", "api/types/container", "api/types/events", "api/types/filters", + "api/types/image", "api/types/mount", "api/types/network", - "api/types/reference", "api/types/registry", "api/types/strslice", "api/types/swarm", + "api/types/swarm/runtime", "api/types/time", "api/types/versions", "api/types/volume", "client", - "pkg/tlsconfig" ] - revision = "eef6495eddab52828327aade186443681ed71a4e" - version = "v17.03.2-ce-rc1" + pruneopts = "" + revision = "ed7b6428c133e7c59404251a09b7d6b02fa83cc2" [[projects]] + digest = "1:5b20afc76a36d3994194e2612e83b51bc2b12db3d4d2a722b24474b2d0e3a890" name = "github.com/docker/go-connections" packages = [ "nat", "sockets", - "tlsconfig" + "tlsconfig", ] + pruneopts = "" revision = "3ede32e2033de7505e6500d6c868c2b9ed9f169d" version = "v0.3.0" [[projects]] + digest = "1:582d54fcb7233da8dde1dfd2210a5b9675d0685f84246a8d317b07d680c18b1b" name = "github.com/docker/go-units" packages = ["."] + pruneopts = "" revision = "47565b4f722fb6ceae66b95f853feed578a4a51c" version = "v0.3.3" [[projects]] + digest = "1:7bbb118aeef9a6b9fef3d57b6cc5378f7cd6e915cabf4dea695e318e1a1bd4e6" name = "github.com/eapache/go-resiliency" packages = ["breaker"] + pruneopts = "" revision = "ea41b0fad31007accc7f806884dcdf3da98b79ce" version = "v1.1.0" [[projects]] branch = "master" + digest = "1:7b28f7f7c9fb914b30dff111fb910d49bd61d275101f665aea79409bb3ba2ae2" name = "github.com/eapache/go-xerial-snappy" packages = ["."] - revision = "bb955e01b9346ac19dc29eb16586c90ded99a98c" + pruneopts = "" + revision = "040cc1a32f578808623071247fdbd5cc43f37f5f" [[projects]] + digest = "1:d8d46d21073d0f65daf1740ebf4629c65e04bf92e14ce93c2201e8624843c3d3" name = "github.com/eapache/queue" packages = ["."] + pruneopts = "" revision = "44cc805cf13205b55f69e14bcb69867d1ae92f98" version = "v1.1.0" [[projects]] + digest = "1:d2e2aebcb8e8027345e16f9d0be8cdee3bb470ba406c7a54cb7457ae3ad4ace5" name = "github.com/eclipse/paho.mqtt.golang" packages = [ ".", - "packets" + "packets", ] + pruneopts = "" revision = "36d01c2b4cbeb3d2a12063e4880ce30800af9560" version = "v1.1.1" [[projects]] + digest = "1:d19c78214e03e297e9e30d2eb11892f731358b2951f2a5c7374658a156373e4c" name = "github.com/go-ini/ini" packages = ["."] - revision = "06f5f3d67269ccec1fe5fe4134ba6e982984f7f5" - version = "v1.37.0" + pruneopts = "" + revision = "358ee7663966325963d4e8b2e1fbd570c5195153" + version = "v1.38.1" [[projects]] + digest = "1:6a4a01d58b227c4b6b11111b9f172ec5c17682b82724e58e6daf3f19f4faccd8" name = "github.com/go-logfmt/logfmt" packages = ["."] + pruneopts = "" revision = "390ab7935ee28ec6b286364bba9b4dd6410cb3d5" version = "v0.3.0" [[projects]] + digest = "1:c3a5ae14424a38c244439732c31a08b5f956c46c4acdc159fc285a52dbf11de0" name = "github.com/go-ole/go-ole" packages = [ ".", - "oleutil" + "oleutil", ] + pruneopts = "" revision = "a41e3c4b706f6ae8dfbff342b06e40fa4d2d0506" version = "v1.2.1" [[projects]] + digest = "1:f2f6a616a1ca8aed667d956c98f7f6178efe72bbe0a419bd33b9d99841c7de69" name = "github.com/go-redis/redis" packages = [ ".", @@ -289,18 +360,22 @@ "internal/pool", "internal/proto", "internal/singleflight", - "internal/util" + "internal/util", ] + pruneopts = "" revision = "83fb42932f6145ce52df09860384a4653d2d332a" version = "v6.12.0" [[projects]] + digest = "1:dc876ae7727280d95f97af5320308131278b93d6c6f5cf953065e18cb8c88fd2" name = "github.com/go-sql-driver/mysql" packages = ["."] + pruneopts = "" revision = "d523deb1b23d913de5bdada721a6071e71283618" version = "v1.4.0" [[projects]] + digest = "1:b7a7e17513aeee6492d93015c7bf29c86a0c1c91210ea56b21e36c1a40958cba" name = "github.com/gobwas/glob" packages = [ ".", @@ -310,125 +385,157 @@ "syntax/ast", "syntax/lexer", "util/runes", - "util/strings" + "util/strings", ] + pruneopts = "" revision = "5ccd90ef52e1e632236f7326478d4faa74f99438" version = "v0.2.3" [[projects]] + digest = "1:673df1d02ca0c6f51458fe94bbb6fae0b05e54084a31db2288f1c4321255c2da" name = "github.com/gogo/protobuf" packages = ["proto"] - revision = "1adfc126b41513cc696b209667c8656ea7aac67c" - version = "v1.0.0" + pruneopts = "" + revision = "636bf0302bc95575d69441b25a2603156ffdddf1" + version = "v1.1.1" [[projects]] + digest = "1:b1d3041d568e065ab4d76f7477844458e9209c0bb241eaccdc0770bf0a13b120" name = "github.com/golang/protobuf" packages = [ "proto", "ptypes", "ptypes/any", "ptypes/duration", - "ptypes/timestamp" + "ptypes/timestamp", ] + pruneopts = "" revision = "b4deda0973fb4c70b50d226b1af49f3da59f5265" version = "v1.1.0" [[projects]] branch = "master" + digest = "1:075128b9fc42e6d99067da1a2e6c0a634a6043b5a60abe6909c51f5ecad37b6d" name = "github.com/golang/snappy" packages = ["."] + pruneopts = "" revision = "2e65f85255dbc3072edf28d6b5b8efc472979f5a" [[projects]] + digest = "1:cc082d7b9cc3f832f2aed9d06d1cbb33b6984a61d8ec403535b086415c181607" name = "github.com/google/go-cmp" packages = [ "cmp", "cmp/internal/diff", "cmp/internal/function", - "cmp/internal/value" + "cmp/internal/value", ] + pruneopts = "" revision = "3af367b6b30c263d47e8895973edcca9a49cf029" version = "v0.2.0" [[projects]] + digest = "1:dbbeb8ddb0be949954c8157ee8439c2adfd8dc1c9510eb44a6e58cb68c3dce28" name = "github.com/gorilla/context" packages = ["."] + pruneopts = "" revision = "08b5f424b9271eedf6f9f0ce86cb9396ed337a42" version = "v1.1.1" [[projects]] + digest = "1:c2c8666b4836c81a1d247bdf21c6a6fc1ab586538ab56f74437c2e0df5c375e1" name = "github.com/gorilla/mux" packages = ["."] + pruneopts = "" revision = "e3702bed27f0d39777b0b37b664b6280e8ef8fbf" version = "v1.6.2" [[projects]] branch = "master" + digest = "1:60b7bc5e043a11213472ae05252527287d20e0a6ccc18f6ae67fad88e41004de" name = "github.com/hailocab/go-hostpool" packages = ["."] + pruneopts = "" revision = "e80d13ce29ede4452c43dea11e79b9bc8a15b478" [[projects]] + digest = "1:db58383b43f583c44fb47c3331de943a11bb73ea951c2def55d29a454a57f4ee" name = "github.com/hashicorp/consul" packages = ["api"] - revision = "5174058f0d2bda63fa5198ab96c33d9a909c58ed" - version = "v1.1.0" + pruneopts = "" + revision = "39f93f011e591c842acc8053a7f5972aa6e592fd" + version = "v1.2.1" [[projects]] branch = "master" + digest = "1:f5d25fd7bdda08e39e01193ef94a1ebf7547b1b931bcdec785d08050598f306c" name = "github.com/hashicorp/go-cleanhttp" packages = ["."] + pruneopts = "" revision = "d5fe4b57a186c716b0e00b8c301cbd9b4182694d" [[projects]] branch = "master" + digest = "1:cd5813053beac0114f96a7da3924fc8a15e0cd2b139f079e0fcce5d3244ae304" name = "github.com/hashicorp/go-rootcerts" packages = ["."] + pruneopts = "" revision = "6bb64b370b90e7ef1fa532be9e591a81c3493e00" [[projects]] + digest = "1:d2b2cff454cb23a9769ef3c9075741f5985773a998584b3b3ce203fe4b1abbea" name = "github.com/hashicorp/serf" packages = ["coordinate"] + pruneopts = "" revision = "d6574a5bb1226678d7010325fb6c985db20ee458" version = "v0.8.1" [[projects]] + digest = "1:cc0cf2e12280074e5c6dc0f15a4bb3d6c43509e6091cdcdcc83eea491577257b" name = "github.com/influxdata/go-syslog" packages = [ "rfc5424", - "rfc5425" + "rfc5425", ] + pruneopts = "" revision = "eecd51df3ad85464a2bab9b7d3a45bc1e299059e" version = "v1.0.1" [[projects]] branch = "master" + digest = "1:effc58ad45323ad15159bbca533be4870eaddb2d9a513d3488d8bfe822c83532" name = "github.com/influxdata/tail" packages = [ ".", "ratelimiter", "util", "watch", - "winfile" + "winfile", ] + pruneopts = "" revision = "c43482518d410361b6c383d7aebce33d0471d7bc" [[projects]] branch = "master" + digest = "1:d31edcf33a3b36218de96e43f3fec18ea96deb2a28b838a3a01a4df856ded345" name = "github.com/influxdata/toml" packages = [ ".", - "ast" + "ast", ] + pruneopts = "" revision = "2a2e3012f7cfbef64091cc79776311e65dfa211b" [[projects]] branch = "master" + digest = "1:a0c157916be0b4de1d4565b1f094b8d746109f94968140dff40a42780fa6ccef" name = "github.com/influxdata/wlog" packages = ["."] + pruneopts = "" revision = "7c63b0a71ef8300adc255344d275e10e5c3a71ec" [[projects]] + digest = "1:4197871f269749786aa2406557dba15f10cf79161cdc3998180614c62c8b6351" name = "github.com/jackc/pgx" packages = [ ".", @@ -437,138 +544,195 @@ "pgio", "pgproto3", "pgtype", - "stdlib" + "stdlib", ] + pruneopts = "" revision = "da3231b0b66e2e74cdb779f1d46c5e958ba8be27" version = "v3.1.0" [[projects]] + digest = "1:4f767a115bc8e08576f6d38ab73c376fc1b1cd3bb5041171c9e8668cc7739b52" name = "github.com/jmespath/go-jmespath" packages = ["."] + pruneopts = "" revision = "0b12d6b5" [[projects]] branch = "master" + digest = "1:2c5ad58492804c40bdaf5d92039b0cde8b5becd2b7feeb37d7d1cc36a8aa8dbe" name = "github.com/kardianos/osext" packages = ["."] + pruneopts = "" revision = "ae77be60afb1dcacde03767a8c37337fad28ac14" [[projects]] branch = "master" + digest = "1:2df59f23f11c5c59982f737c98c5523b276bfc85a4773a04b411190402bb30fd" name = "github.com/kardianos/service" packages = ["."] + pruneopts = "" revision = "615a14ed75099c9eaac6949e22ac2341bf9d3197" [[projects]] branch = "master" + digest = "1:63e7368fcf6b54804076eaec26fd9cf0c4466166b272393db4b93102e1e962df" name = "github.com/kballard/go-shellquote" packages = ["."] + pruneopts = "" revision = "95032a82bc518f77982ea72343cc1ade730072f0" [[projects]] branch = "master" + digest = "1:1ed9eeebdf24aadfbca57eb50e6455bd1d2474525e0f0d4454de8c8e9bc7ee9a" name = "github.com/kr/logfmt" packages = ["."] + pruneopts = "" revision = "b84e30acd515aadc4b783ad4ff83aff3299bdfe0" [[projects]] branch = "master" + digest = "1:28ca57775f285ae87cbdc7280aad91c5f2ed3c2af98d9f035d75956d1ca97fe6" name = "github.com/mailru/easyjson" packages = [ ".", "buffer", "jlexer", - "jwriter" + "jwriter", ] - revision = "3fdea8d05856a0c8df22ed4bc71b3219245e4485" + pruneopts = "" + revision = "efc7eb8984d6655c26b5c9d2e65c024e5767c37c" [[projects]] + digest = "1:49a8b01a6cd6558d504b65608214ca40a78000e1b343ed0da5c6a9ccd83d6d30" name = "github.com/matttproud/golang_protobuf_extensions" packages = ["pbutil"] + pruneopts = "" revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c" version = "v1.0.1" [[projects]] + digest = "1:f0bad0fece0fb73c6ea249c18d8e80ffbe86be0457715b04463068f04686cf39" name = "github.com/miekg/dns" packages = ["."] + pruneopts = "" revision = "5a2b9fab83ff0f8bfc99684bd5f43a37abe560f1" version = "v1.0.8" [[projects]] branch = "master" + digest = "1:99651e95333755cbe5c9768c1b80031300acca64a80870b40309202b32585a5a" name = "github.com/mitchellh/go-homedir" packages = ["."] + pruneopts = "" revision = "3864e76763d94a6df2f9960b16a20a33da9f9a66" [[projects]] branch = "master" + digest = "1:f43ed2c836208c14f45158fd01577c985688a4d11cf9fd475a939819fef3b321" name = "github.com/mitchellh/mapstructure" packages = ["."] - revision = "bb74f1db0675b241733089d5a1faa5dd8b0ef57b" + pruneopts = "" + revision = "f15292f7a699fcc1a38a80977f80a046874ba8ac" [[projects]] + digest = "1:ee2e62b00a9ccc2dba1525f93396e35c847f90f87939df6f361b86315ea5f69a" name = "github.com/multiplay/go-ts3" packages = ["."] + pruneopts = "" revision = "d0d44555495c8776880a17e439399e715a4ef319" version = "v1.0.0" [[projects]] + digest = "1:ccd0def9f0b82b61c5e54fcbfccf528eabb13b489d008e46dc16b808c2e1f765" name = "github.com/naoina/go-stringutil" packages = ["."] + pruneopts = "" revision = "6b638e95a32d0c1131db0e7fe83775cbea4a0d0b" version = "v0.1.0" [[projects]] + digest = "1:e5894541d6ceec5dd283e24e3530aadf59c06449695d19189a7a27bb4c15840d" name = "github.com/nats-io/gnatsd" packages = [ "conf", "logger", "server", "server/pse", - "util" + "util", ] - revision = "add6d7930ae6d4bff8823b28999ea87bf1bfd23d" - version = "v1.1.0" + pruneopts = "" + revision = "6608e9ac3be979dcb0614b772cc86a87b71acaa3" + version = "v1.2.0" [[projects]] + digest = "1:88f1bde4c172e27b05ed46adfbd0e79dc1663a6281e4b39fa3e39d71ead9621d" name = "github.com/nats-io/go-nats" packages = [ ".", "encoders/builtin", - "util" + "util", ] + pruneopts = "" revision = "062418ea1c2181f52dc0f954f6204370519a868b" version = "v1.5.0" [[projects]] + digest = "1:be61e8224b84064109eaba8157cbb4bbe6ca12443e182b6624fdfa1c0dcf53d9" name = "github.com/nats-io/nuid" packages = ["."] + pruneopts = "" revision = "289cccf02c178dc782430d534e3c1f5b72af807f" version = "v1.0.0" [[projects]] + digest = "1:501cce26a54c785458b0dd54a08ddd984d4ad0c198255430d5d37cd2efe23149" name = "github.com/nsqio/go-nsq" packages = ["."] + pruneopts = "" revision = "eee57a3ac4174c55924125bb15eeeda8cffb6e6f" version = "v1.0.7" +[[projects]] + digest = "1:5d9b668b0b4581a978f07e7d2e3314af18eb27b3fb5d19b70185b7c575723d11" + name = "github.com/opencontainers/go-digest" + packages = ["."] + pruneopts = "" + revision = "279bed98673dd5bef374d3b6e4b09e2af76183bf" + version = "v1.0.0-rc1" + +[[projects]] + digest = "1:0d08f7224705b1df80beee92ffbdc63ab13fd6f6eb80bf287735f9bc7e8b83eb" + name = "github.com/opencontainers/image-spec" + packages = [ + "specs-go", + "specs-go/v1", + ] + pruneopts = "" + revision = "d60099175f88c47cd379c4738d158884749ed235" + version = "v1.0.1" + [[projects]] branch = "master" + digest = "1:2da0e5077ed40453dc281b9a2428d84cf6ad14063aed189f6296ca5dd25cf13d" name = "github.com/opentracing-contrib/go-observer" packages = ["."] + pruneopts = "" revision = "a52f2342449246d5bcc273e65cbdcfa5f7d6c63c" [[projects]] + digest = "1:bba12aa4747b212f75db3e7fee73fe1b66d303cb3ff0c1984b7f2ad20e8bd2bc" name = "github.com/opentracing/opentracing-go" packages = [ ".", "ext", - "log" + "log", ] + pruneopts = "" revision = "1949ddbfd147afd4d964a9f00b24eb291e0e7c38" version = "v1.0.2" [[projects]] + digest = "1:c6c0db6294924072f98a0de090d200bae4b7102b12a443ba9569c4ba7df52aa1" name = "github.com/openzipkin/zipkin-go-opentracing" packages = [ ".", @@ -576,88 +740,110 @@ "thrift/gen-go/scribe", "thrift/gen-go/zipkincore", "types", - "wire" + "wire", ] + pruneopts = "" revision = "26cf9707480e6b90e5eff22cf0bbf05319154232" version = "v0.3.4" [[projects]] + digest = "1:41de12a4684237dd55a11260c941c2c58a055951985e9473ba1661175a13fea7" name = "github.com/pierrec/lz4" packages = [ ".", - "internal/xxh32" + "internal/xxh32", ] - revision = "6b9367c9ff401dbc54fabce3fb8d972e799b702d" - version = "v2.0.2" + pruneopts = "" + revision = "1958fd8fff7f115e79725b1288e0b878b3e06b00" + version = "v2.0.3" [[projects]] + digest = "1:7365acd48986e205ccb8652cc746f09c8b7876030d53710ea6ef7d0bd0dcd7ca" name = "github.com/pkg/errors" packages = ["."] + pruneopts = "" revision = "645ef00459ed84a119197bfb8d8205042c6df63d" version = "v0.8.0" [[projects]] + digest = "1:256484dbbcd271f9ecebc6795b2df8cad4c458dd0f5fd82a8c2fa0c29f233411" name = "github.com/pmezard/go-difflib" packages = ["difflib"] + pruneopts = "" revision = "792786c7400a136282c1664665ae0a8db921c6c2" version = "v1.0.0" [[projects]] + digest = "1:981835985f655d1d380cc6aa7d9fa9ad7abfaf40c75da200fd40d864cd05a7c3" name = "github.com/prometheus/client_golang" packages = [ "prometheus", - "prometheus/promhttp" + "prometheus/promhttp", ] + pruneopts = "" revision = "c5b7fccd204277076155f10851dad72b76a49317" version = "v0.8.0" [[projects]] branch = "master" + digest = "1:562d53e436b244a9bb5c1ff43bcaf4882e007575d34ec37717b15751c65cc63a" name = "github.com/prometheus/client_model" packages = ["go"] - revision = "99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c" + pruneopts = "" + revision = "5c3871d89910bfb32f5fcab2aa4b9ec68e65a99f" [[projects]] branch = "master" + digest = "1:6a8420870eb2935977da1fff0f3afca9bdb3f1e66258c9e91a8a7ce0b5417c3b" name = "github.com/prometheus/common" packages = [ "expfmt", "internal/bitbucket.org/ww/goautoneg", "log", - "model" + "model", ] + pruneopts = "" revision = "7600349dcfe1abd18d72d3a1770870d9800a7801" [[projects]] branch = "master" + digest = "1:00fca823dfcdd8107226f67215afd948b001525223ed955a05b33a4c885c9591" name = "github.com/prometheus/procfs" packages = [ ".", "internal/util", "nfs", - "xfs" + "xfs", ] - revision = "7d6f385de8bea29190f15ba9931442a0eaef9af7" + pruneopts = "" + revision = "ae68e2d4c00fed4943b5f6698d504a5fe083da8a" [[projects]] branch = "master" + digest = "1:1b65925989a4dfb6d98ef1d530cda33ab1ff25945b14a22a8b8bb27cc282af70" name = "github.com/rcrowley/go-metrics" packages = ["."] + pruneopts = "" revision = "e2704e165165ec55d062f5919b4b29494e9fa790" [[projects]] branch = "master" + digest = "1:d8fe9f454582e04b5693b59cdebe3f0bd9dc29ad9651bfb1633cba4658b66c65" name = "github.com/samuel/go-zookeeper" packages = ["zk"] + pruneopts = "" revision = "c4fab1ac1bec58281ad0667dc3f0907a9476ac47" [[projects]] + digest = "1:7f569d906bdd20d906b606415b7d794f798f91a62fcfb6a4daa6d50690fb7a3f" name = "github.com/satori/go.uuid" packages = ["."] + pruneopts = "" revision = "f58768cc1a7a7e77a3bd49e98cdd21419399b6a3" version = "v1.2.0" [[projects]] + digest = "1:987ce58e999676c2e209831390f2d56621ff98def2ecca4928e73fe1e2569954" name = "github.com/shirou/gopsutil" packages = [ "cpu", @@ -667,100 +853,128 @@ "load", "mem", "net", - "process" + "process", ] - revision = "eeb1d38d69593f121e060d24d17f7b1f0936b203" - version = "v2.18.05" + pruneopts = "" + revision = "4a180b209f5f494e5923cfce81ea30ba23915877" + version = "v2.18.06" [[projects]] branch = "master" + digest = "1:99c6a6dab47067c9b898e8c8b13d130c6ab4ffbcc4b7cc6236c2cd0b1e344f5b" name = "github.com/shirou/w32" packages = ["."] + pruneopts = "" revision = "bb4de0191aa41b5507caa14b0650cdbddcd9280b" [[projects]] + digest = "1:f2cc92b78b2f3b76ab0f9daddddd28627bcfcc6cacf119029aa3850082d95079" name = "github.com/sirupsen/logrus" packages = ["."] + pruneopts = "" revision = "c155da19408a8799da419ed3eeb0cb5db0ad5dbc" version = "v1.0.5" [[projects]] branch = "master" + digest = "1:79e73b87cb07e380d1a3aaa14fbcc418e0d42eede5f971e7ee2f4a6e6d531deb" name = "github.com/soniah/gosnmp" packages = ["."] - revision = "bcf840db66be7d64bf96c3c0e075c92e3d98f793" + pruneopts = "" + revision = "96b86229e9b3ffb4b954144cdc7f98fe3ee1003f" [[projects]] branch = "master" + digest = "1:0a1f8d01a0191f558910bcbfd7e1dc11a53ac374473d13b68b8fe520f21efb07" name = "github.com/streadway/amqp" packages = ["."] + pruneopts = "" revision = "e5adc2ada8b8efff032bf61173a233d143e9318e" [[projects]] + digest = "1:34062a2274daa6ec4d2f50d6070cc51cf4674d6d553ed76b406cb3425b9528e8" name = "github.com/stretchr/objx" packages = ["."] + pruneopts = "" revision = "477a77ecc69700c7cdeb1fa9e129548e1c1c393c" version = "v0.1.1" [[projects]] + digest = "1:bc2a12c8863e1080226b7bc69192efd6c37aaa9b85cec508b0a8f54fabb9bd9f" name = "github.com/stretchr/testify" packages = [ "assert", "mock", - "require" + "require", ] + pruneopts = "" revision = "f35b8ab0b5a2cef36673838d662e249dd9c94686" version = "v1.2.2" [[projects]] + digest = "1:e139a0dfe24e723193005b291ed82a975041718cfcab9136aa6c9540df70a4ff" name = "github.com/tidwall/gjson" packages = ["."] - revision = "afaeb9562041a8018c74e006551143666aed08bf" - version = "v1.1.1" + pruneopts = "" + revision = "f123b340873a0084cb27267eddd8ff615115fbff" + version = "v1.1.2" [[projects]] branch = "master" + digest = "1:4db4f92bb9cb04cfc4fccb36aba2598b02a988008c4cc0692b241214ad8ac96e" name = "github.com/tidwall/match" packages = ["."] + pruneopts = "" revision = "1731857f09b1f38450e2c12409748407822dc6be" [[projects]] + digest = "1:23e2b9f3a20cd4a6427147377255ec2f6237e8606fa6ef0707ed79b7bfbe3a83" name = "github.com/vjeantet/grok" packages = ["."] + pruneopts = "" revision = "ce01e59abcf6fbc9833b7deb5e4b8ee1769bcc53" version = "v1.0.0" [[projects]] branch = "master" + digest = "1:5383edd40c7f6c95a7dc46a47bf0c83de4bf40a4252f12fa803f790037addffc" name = "github.com/wvanbergen/kafka" packages = ["consumergroup"] + pruneopts = "" revision = "e2edea948ddfee841ea9a263b32ccca15f7d6c2f" [[projects]] branch = "master" + digest = "1:f936b4936e1b092cc41c9b33fdc990ad78386545f1ffeca8427c72b2605bca85" name = "github.com/wvanbergen/kazoo-go" packages = ["."] + pruneopts = "" revision = "f72d8611297a7cf105da904c04198ad701a60101" [[projects]] branch = "master" + digest = "1:9946d558a909f63e31332c77b82649522da97ae7f7cfbfebc6f53549ab6b3e0f" name = "github.com/yuin/gopher-lua" packages = [ ".", "ast", "parse", - "pm" + "pm", ] - revision = "ca850f594eaafa5468da2bd53b865e4ee55be18b" + pruneopts = "" + revision = "46796da1b0b4794e1e341883a399f12cc7574b55" [[projects]] branch = "master" + digest = "1:84e9087a94f336c204887281046891769d2ed7bf1d2b31c21ff6fb5e1743abce" name = "github.com/zensqlmonitor/go-mssqldb" packages = ["."] + pruneopts = "" revision = "e8fbf836e44e86764eba398361d1825651709547" [[projects]] branch = "master" + digest = "1:21100b2e8b6922303dd109da81b3134ed0eff05cb3402881eabde9cce8f4e5e6" name = "golang.org/x/crypto" packages = [ "bcrypt", @@ -769,12 +983,14 @@ "ed25519/internal/edwards25519", "md4", "pbkdf2", - "ssh/terminal" + "ssh/terminal", ] - revision = "027cca12c2d63e3d62b670d901e8a2c95854feec" + pruneopts = "" + revision = "a2144134853fc9a27a7b1e3eb4f19f1a76df13c9" [[projects]] branch = "master" + digest = "1:58d8f8f3ad415b10d2145316519e5b7995b7cf9e663b33a1e9e0c2ddd96c1d58" name = "golang.org/x/net" packages = [ "bpf", @@ -795,12 +1011,14 @@ "ipv6", "proxy", "trace", - "websocket" + "websocket", ] - revision = "db08ff08e8622530d9ed3a0e8ac279f6d4c02196" + pruneopts = "" + revision = "a680a1efc54dd51c040b3b5ce4939ea3cf2ea0d1" [[projects]] branch = "master" + digest = "1:a8944db88149e7ecbea4b760c625b9ccf455fceae21387bc8890c3589d28b623" name = "golang.org/x/sys" packages = [ "unix", @@ -809,11 +1027,13 @@ "windows/svc", "windows/svc/debug", "windows/svc/eventlog", - "windows/svc/mgr" + "windows/svc/mgr", ] - revision = "6c888cc515d3ed83fc103cf1d84468aad274b0a7" + pruneopts = "" + revision = "ac767d655b305d4e9612f5f6e33120b9176c4ad4" [[projects]] + digest = "1:af9bfca4298ef7502c52b1459df274eed401a4f5498b900e9a92d28d3d87ac5a" name = "golang.org/x/text" packages = [ "collate", @@ -841,39 +1061,46 @@ "unicode/bidi", "unicode/cldr", "unicode/norm", - "unicode/rangetable" + "unicode/rangetable", ] + pruneopts = "" revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0" version = "v0.3.0" [[projects]] + digest = "1:eede11c81b63c8f6fd06ef24ba0a640dc077196ec9b7a58ecde03c82eee2f151" name = "google.golang.org/appengine" packages = ["cloudsql"] + pruneopts = "" revision = "b1f26356af11148e710935ed1ac8a7f5702c7612" version = "v1.1.0" [[projects]] branch = "master" + digest = "1:8d093c040b734e160cbe8291c7b539c36d2c6dd4581c4bb37cff56078c65bd07" name = "google.golang.org/genproto" packages = ["googleapis/rpc/status"] - revision = "32ee49c4dd805befd833990acba36cb75042378c" + pruneopts = "" + revision = "fedd2861243fd1a8152376292b921b394c7bef7e" [[projects]] + digest = "1:05f2028524c4eada11e3f46d23139f23e9e0a40b2552207a5af278e8063ce782" name = "google.golang.org/grpc" packages = [ ".", "balancer", "balancer/base", "balancer/roundrobin", - "channelz", "codes", "connectivity", "credentials", "encoding", "encoding/proto", - "grpclb/grpc_lb_v1/messages", "grpclog", "internal", + "internal/backoff", + "internal/channelz", + "internal/grpcrand", "keepalive", "metadata", "naming", @@ -884,84 +1111,105 @@ "stats", "status", "tap", - "transport" + "transport", ] - revision = "7a6a684ca69eb4cae85ad0a484f2e531598c047b" - version = "v1.12.2" + pruneopts = "" + revision = "168a6198bcb0ef175f7dacec0b8691fc141dc9b8" + version = "v1.13.0" [[projects]] + digest = "1:2840683aa0e9980689f85bf48b2a56ec7a108fd089f12af8ea7d98c172819589" name = "gopkg.in/alecthomas/kingpin.v2" packages = ["."] + pruneopts = "" revision = "947dcec5ba9c011838740e680966fd7087a71d0d" version = "v2.2.6" [[projects]] + digest = "1:a8f8c1725195c4324d4350fae001524ca7489e40d9b6bb47598772e3faa103ba" name = "gopkg.in/asn1-ber.v1" packages = ["."] + pruneopts = "" revision = "379148ca0225df7a432012b8df0355c2a2063ac0" version = "v1.2" [[projects]] + digest = "1:581450ae66d7970d91ef9132459fa583e937c6e502f1b96e4ee7783a56fa0b44" name = "gopkg.in/fatih/pool.v2" packages = ["."] + pruneopts = "" revision = "010e0b745d12eaf8426c95f9c3924d81dd0b668f" version = "v2.0.0" [[projects]] + digest = "1:b2106f1668ea5efc1ecc480f7e922a093adb9563fd9ce58585292871f0d0f229" name = "gopkg.in/fsnotify.v1" packages = ["."] + pruneopts = "" revision = "c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9" source = "https://github.com/fsnotify/fsnotify/archive/v1.4.7.tar.gz" version = "v1.4.7" [[projects]] + digest = "1:5fa5df18f3bd9cad28ed7f263b15da217945735110898fa2b9af25cdafb9cbf3" name = "gopkg.in/gorethink/gorethink.v3" packages = [ ".", "encoding", "ql2", - "types" + "types", ] + pruneopts = "" revision = "7f5bdfd858bb064d80559b2a32b86669c5de5d3b" version = "v3.0.5" [[projects]] + digest = "1:74163d1887c0821951e6f1795a1d10338f45f09d9067cb4a8edcf7ee481724ee" name = "gopkg.in/ldap.v2" packages = ["."] + pruneopts = "" revision = "bb7a9ca6e4fbc2129e3db588a34bc970ffe811a9" version = "v2.5.1" [[projects]] branch = "v2" + digest = "1:f799e95918890212dcf4ce5951291061d318f689977ec9cea0417b08433c2a9d" name = "gopkg.in/mgo.v2" packages = [ ".", "bson", "internal/json", "internal/sasl", - "internal/scram" + "internal/scram", ] - revision = "3f83fa5005286a7fe593b055f0d7771a7dce4655" + pruneopts = "" + revision = "9856a29383ce1c59f308dd1cf0363a79b5bef6b5" [[projects]] + digest = "1:427414c304a47b497759094220ce42dd2e838ab7d52de197c633b800c6ff84b5" name = "gopkg.in/olivere/elastic.v5" packages = [ ".", "config", - "uritemplates" + "uritemplates", ] - revision = "b708306d715bea9b983685e94ab4602cdc9f988b" - version = "v5.0.69" + pruneopts = "" + revision = "52741dc2ce53629cbe1e673869040d886cba2cd5" + version = "v5.0.70" [[projects]] branch = "v1" + digest = "1:a96d16bd088460f2e0685d46c39bcf1208ba46e0a977be2df49864ec7da447dd" name = "gopkg.in/tomb.v1" packages = ["."] + pruneopts = "" revision = "dd632973f1e7218eb1089048e0798ec9ae7dceb8" [[projects]] + digest = "1:f0620375dd1f6251d9973b5f2596228cc8042e887cd7f827e4220bc1ce8c30e2" name = "gopkg.in/yaml.v2" packages = ["."] + pruneopts = "" revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183" version = "v2.2.1" diff --git a/Gopkg.toml b/Gopkg.toml index abf1b4a06..799b5243c 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -17,12 +17,10 @@ [[constraint]] name = "github.com/aws/aws-sdk-go" version = "1.14.8" -# version = "1.8.39" [[constraint]] name = "github.com/bsm/sarama-cluster" version = "2.1.13" -# version = "2.1.10" [[constraint]] name = "github.com/couchbase/go-couchbase" @@ -31,46 +29,38 @@ [[constraint]] name = "github.com/dgrijalva/jwt-go" version = "3.2.0" -# version = "3.1.0" [[constraint]] name = "github.com/docker/docker" - version = "~17.03.2-ce" + revision = "ed7b6428c133e7c59404251a09b7d6b02fa83cc2" # v18.05.0-ce -[[constraint]] - name = "github.com/docker/go-connections" - version = "0.3.0" -# version = "0.2.1" +[[override]] + name = "github.com/docker/distribution" + revision = "edc3ab29cdff8694dd6feb85cfeb4b5f1b38ed9c" # v18.05.0-ce [[constraint]] name = "github.com/eclipse/paho.mqtt.golang" version = "~1.1.1" -# version = "1.1.0" [[constraint]] name = "github.com/go-sql-driver/mysql" version = "1.4.0" -# version = "1.3.0" [[constraint]] name = "github.com/gobwas/glob" version = "0.2.3" -# version = "0.2.2" [[constraint]] name = "github.com/golang/protobuf" version = "1.1.0" -# version = "1.0.0" [[constraint]] name = "github.com/google/go-cmp" version = "0.2.0" -# version = "0.1.0" [[constraint]] name = "github.com/gorilla/mux" version = "1.6.2" -# version = "1.6.1" [[constraint]] name = "github.com/go-redis/redis" @@ -119,7 +109,6 @@ [[constraint]] name = "github.com/miekg/dns" version = "1.0.8" -# version = "1.0.0" [[constraint]] name = "github.com/multiplay/go-ts3" @@ -128,12 +117,10 @@ [[constraint]] name = "github.com/nats-io/gnatsd" version = "1.1.0" -# version = "1.0.4" [[constraint]] name = "github.com/nats-io/go-nats" version = "1.5.0" -# version = "1.3.0" [[constraint]] name = "github.com/nsqio/go-nsq" @@ -142,7 +129,6 @@ [[constraint]] name = "github.com/openzipkin/zipkin-go-opentracing" version = "0.3.4" -# version = "0.3.0" [[constraint]] name = "github.com/prometheus/client_golang" @@ -163,12 +149,10 @@ [[constraint]] name = "github.com/shirou/gopsutil" version = "2.18.05" -# version = "2.18.04" [[constraint]] name = "github.com/Shopify/sarama" version = "1.17.0" -# version = "1.15.0" [[constraint]] name = "github.com/soniah/gosnmp" @@ -185,12 +169,10 @@ [[constraint]] name = "github.com/stretchr/testify" version = "1.2.2" -# version = "1.2.1" [[constraint]] name = "github.com/tidwall/gjson" version = "1.1.1" -# version = "1.0.0" [[constraint]] name = "github.com/vjeantet/grok" @@ -215,7 +197,6 @@ [[constraint]] name = "google.golang.org/grpc" version = "1.12.2" -# version = "1.8.0" [[constraint]] name = "gopkg.in/gorethink/gorethink.v3" @@ -232,7 +213,6 @@ [[constraint]] name = "gopkg.in/olivere/elastic.v5" version = "^5.0.69" -# version = "^6.1.23" [[constraint]] name = "gopkg.in/yaml.v2" diff --git a/plugins/inputs/docker/client.go b/plugins/inputs/docker/client.go index b66ad009d..3ea24ea74 100644 --- a/plugins/inputs/docker/client.go +++ b/plugins/inputs/docker/client.go @@ -8,11 +8,10 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/swarm" docker "github.com/docker/docker/client" - "github.com/docker/go-connections/sockets" ) var ( - version = "1.24" + version = "1.21" // 1.24 is when server first started returning its version defaultHeaders = map[string]string{"User-Agent": "engine-api-cli-1.0"} ) @@ -27,7 +26,7 @@ type Client interface { } func NewEnvClient() (Client, error) { - client, err := docker.NewEnvClient() + client, err := docker.NewClientWithOpts(docker.FromEnv) if err != nil { return nil, err } @@ -35,21 +34,20 @@ func NewEnvClient() (Client, error) { } func NewClient(host string, tlsConfig *tls.Config) (Client, error) { - proto, addr, _, err := docker.ParseHost(host) - if err != nil { - return nil, err - } - transport := &http.Transport{ TLSClientConfig: tlsConfig, } - sockets.ConfigureTransport(transport, proto, addr) httpClient := &http.Client{Transport: transport} - client, err := docker.NewClient(host, version, httpClient, defaultHeaders) + client, err := docker.NewClientWithOpts( + docker.WithHTTPHeaders(defaultHeaders), + docker.WithHTTPClient(httpClient), + docker.WithVersion(version), + docker.WithHost(host)) if err != nil { return nil, err } + return &SocketClient{client}, nil } From 4e1a2536334e4b11979fdc288a55c81a3d16d634 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 26 Jul 2018 15:51:21 -0700 Subject: [PATCH 0037/1815] Link to SampleConfig documentation in contributing guide. --- CONTRIBUTING.md | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 1a6ace7fc..55cc7f118 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -51,7 +51,9 @@ See below for a quick example. * Input Plugins must be added to the `github.com/influxdata/telegraf/plugins/inputs/all/all.go` file. * The `SampleConfig` function should return valid toml that describes how the -plugin can be configured. This is include in `telegraf config`. +plugin can be configured. This is included in `telegraf config`. Please +consult the [SampleConfig](https://github.com/influxdata/telegraf/wiki/SampleConfig) +page for the latest style guidelines. * The `Description` function should say in one line what this plugin does. Let's say you've written a plugin that emits metrics about processes on the @@ -192,7 +194,9 @@ See below for a quick example. * To be available within Telegraf itself, plugins must add themselves to the `github.com/influxdata/telegraf/plugins/outputs/all/all.go` file. * The `SampleConfig` function should return valid toml that describes how the -output can be configured. This is include in `telegraf config`. +plugin can be configured. This is included in `telegraf config`. Please +consult the [SampleConfig](https://github.com/influxdata/telegraf/wiki/SampleConfig) +page for the latest style guidelines. * The `Description` function should say in one line what this output does. ### Output Example From 83c4b81abe796699f12dd66d2ffbbd87ff29b71f Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 27 Jul 2018 18:28:33 -0700 Subject: [PATCH 0038/1815] Copy grok documentation from logparser to data format doc (#4475) --- docs/DATA_FORMATS_INPUT.md | 301 ++++++++++++++++++++--------- plugins/inputs/logparser/README.md | 4 +- 2 files changed, 214 insertions(+), 91 deletions(-) diff --git a/docs/DATA_FORMATS_INPUT.md b/docs/DATA_FORMATS_INPUT.md index 24335a453..5a63e9d83 100644 --- a/docs/DATA_FORMATS_INPUT.md +++ b/docs/DATA_FORMATS_INPUT.md @@ -2,14 +2,14 @@ Telegraf is able to parse the following input data formats into metrics: -1. [InfluxDB Line Protocol](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#influx) -1. [JSON](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#json) -1. [Graphite](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#graphite) -1. [Value](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#value), ie: 45 or "booyah" -1. [Nagios](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#nagios) (exec input only) -1. [Collectd](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#collectd) -1. [Dropwizard](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#dropwizard) -1. [Grok](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#grok) +1. [InfluxDB Line Protocol](#influx) +1. [JSON](#json) +1. [Graphite](#graphite) +1. [Value](#value), ie: 45 or "booyah" +1. [Nagios](#nagios) (exec input only) +1. [Collectd](#collectd) +1. [Dropwizard](#dropwizard) +1. [Grok](#grok) Telegraf metrics, like InfluxDB [points](https://docs.influxdata.com/influxdb/v0.10/write_protocols/line/), @@ -481,9 +481,9 @@ You can also change the path to the typesdb or add additional typesdb using ## Path of to TypesDB specifications collectd_typesdb = ["/usr/share/collectd/types.db"] - # Multi-value plugins can be handled two ways. + # Multi-value plugins can be handled two ways. # "split" will parse and store the multi-value plugin data into separate measurements - # "join" will parse and store the multi-value plugin as a single multi-value measurement. + # "join" will parse and store the multi-value plugin as a single multi-value measurement. # "split" is the default behavior for backward compatability with previous versions of influxdb. collectd_parse_multivalue = "split" ``` @@ -566,7 +566,7 @@ measurement,metric_type=histogram count=1,max=1.0,mean=1.0,min=1.0,p50=1.0,p75=1 measurement,metric_type=timer count=1,max=1.0,mean=1.0,min=1.0,p50=1.0,p75=1.0,p95=1.0,p98=1.0,p99=1.0,p999=1.0,stddev=1.0,m15_rate=1.0,m1_rate=1.0,m5_rate=1.0,mean_rate=1.0 ``` -You may also parse a dropwizard registry from any JSON document which contains a dropwizard registry in some inner field. +You may also parse a dropwizard registry from any JSON document which contains a dropwizard registry in some inner field. Eg. to parse the following JSON document: ```json @@ -577,7 +577,7 @@ Eg. to parse the following JSON document: "tag2" : "yellow" }, "metrics" : { - "counters" : { + "counters" : { "measurement" : { "count" : 1 } @@ -641,16 +641,16 @@ For more information about the dropwizard json format see ## By providing an empty template array, templating is disabled and measurements are parsed as influxdb line protocol keys (measurement<,tag_set>) templates = [] - ## You may use an appropriate [gjson path](https://github.com/tidwall/gjson#path-syntax) + ## You may use an appropriate [gjson path](https://github.com/tidwall/gjson#path-syntax) ## to locate the metric registry within the JSON document # dropwizard_metric_registry_path = "metrics" - - ## You may use an appropriate [gjson path](https://github.com/tidwall/gjson#path-syntax) + + ## You may use an appropriate [gjson path](https://github.com/tidwall/gjson#path-syntax) ## to locate the default time of the measurements within the JSON document # dropwizard_time_path = "time" # dropwizard_time_format = "2006-01-02T15:04:05Z07:00" - - ## You may use an appropriate [gjson path](https://github.com/tidwall/gjson#path-syntax) + + ## You may use an appropriate [gjson path](https://github.com/tidwall/gjson#path-syntax) ## to locate the tags map within the JSON document # dropwizard_tags_path = "tags" @@ -660,79 +660,32 @@ For more information about the dropwizard json format see # tag2 = "tags.tag2" ``` -#### Grok -Parse logstash-style "grok" patterns. Patterns can be added to patterns, or custom patterns read from custom_pattern_files. +# Grok -# View logstash grok pattern docs here: -# https://www.elastic.co/guide/en/logstash/current/plugins-filters-grok.html -# All default logstash patterns are supported, these can be viewed here: -# https://github.com/logstash-plugins/logstash-patterns-core/blob/master/patterns/grok-patterns +The grok data format parses line delimited data using a regular expression like +language. -# Available modifiers: -# string (default if nothing is specified) -# int -# float -# duration (ie, 5.23ms gets converted to int nanoseconds) -# tag (converts the field into a tag) -# drop (drops the field completely) -# Timestamp modifiers: -# ts-ansic ("Mon Jan _2 15:04:05 2006") -# ts-unix ("Mon Jan _2 15:04:05 MST 2006") -# ts-ruby ("Mon Jan 02 15:04:05 -0700 2006") -# ts-rfc822 ("02 Jan 06 15:04 MST") -# ts-rfc822z ("02 Jan 06 15:04 -0700") -# ts-rfc850 ("Monday, 02-Jan-06 15:04:05 MST") -# ts-rfc1123 ("Mon, 02 Jan 2006 15:04:05 MST") -# ts-rfc1123z ("Mon, 02 Jan 2006 15:04:05 -0700") -# ts-rfc3339 ("2006-01-02T15:04:05Z07:00") -# ts-rfc3339nano ("2006-01-02T15:04:05.999999999Z07:00") -# ts-httpd ("02/Jan/2006:15:04:05 -0700") -# ts-epoch (seconds since unix epoch) -# ts-epochnano (nanoseconds since unix epoch) -# ts-"CUSTOM" -# CUSTOM time layouts must be within quotes and be the representation of the -# "reference time", which is Mon Jan 2 15:04:05 -0700 MST 2006 -# See https://golang.org/pkg/time/#Parse for more details. - -# Example log file pattern, example log looks like this: -# [04/Jun/2016:12:41:45 +0100] 1.25 200 192.168.1.1 5.432µs -# Breakdown of the DURATION pattern below: -# NUMBER is a builtin logstash grok pattern matching float & int numbers. -# [nuµm]? is a regex specifying 0 or 1 of the characters within brackets. -# s is also regex, this pattern must end in "s". -# so DURATION will match something like '5.324ms' or '6.1µs' or '10s' -DURATION %{NUMBER}[nuµm]?s -RESPONSE_CODE %{NUMBER:response_code:tag} -RESPONSE_TIME %{DURATION:response_time_ns:duration} -EXAMPLE_LOG \[%{HTTPDATE:ts:ts-httpd}\] %{NUMBER:myfloat:float} %{RESPONSE_CODE} %{IPORHOST:clientip} %{RESPONSE_TIME} - -# Wider-ranging username matching vs. logstash built-in %{USER} -NGUSERNAME [a-zA-Z0-9\.\@\-\+_%]+ -NGUSER %{NGUSERNAME} -# Wider-ranging client IP matching -CLIENT (?:%{IPORHOST}|%{HOSTPORT}|::1) - -## -## COMMON LOG PATTERNS -## - -# apache & nginx logs, this is also known as the "common log format" -# see https://en.wikipedia.org/wiki/Common_Log_Format -COMMON_LOG_FORMAT %{CLIENT:client_ip} %{NOTSPACE:ident} %{NOTSPACE:auth} \[%{HTTPDATE:ts:ts-httpd}\] "(?:%{WORD:verb:tag} %{NOTSPACE:request}(?: HTTP/%{NUMBER:http_version:float})?|%{DATA})" %{NUMBER:resp_code:tag} (?:%{NUMBER:resp_bytes:int}|-) - -# Combined log format is the same as the common log format but with the addition -# of two quoted strings at the end for "referrer" and "agent" -# See Examples at http://httpd.apache.org/docs/current/mod/mod_log_config.html -COMBINED_LOG_FORMAT %{COMMON_LOG_FORMAT} %{QS:referrer} %{QS:agent} - -# HTTPD log formats -HTTPD20_ERRORLOG \[%{HTTPDERROR_DATE:timestamp}\] \[%{LOGLEVEL:loglevel:tag}\] (?:\[client %{IPORHOST:clientip}\] ){0,1}%{GREEDYDATA:errormsg} -HTTPD24_ERRORLOG \[%{HTTPDERROR_DATE:timestamp}\] \[%{WORD:module}:%{LOGLEVEL:loglevel:tag}\] \[pid %{POSINT:pid:int}:tid %{NUMBER:tid:int}\]( \(%{POSINT:proxy_errorcode:int}\)%{DATA:proxy_errormessage}:)?( \[client %{IPORHOST:client}:%{POSINT:clientport}\])? %{DATA:errorcode}: %{GREEDYDATA:message} -HTTPD_ERRORLOG %{HTTPD20_ERRORLOG}|%{HTTPD24_ERRORLOG} +The best way to get acquainted with grok patterns is to read the logstash docs, +which are available here: + https://www.elastic.co/guide/en/logstash/current/plugins-filters-grok.html #### Grok Configuration: ```toml [[inputs.reader]] + ## Files to parse each interval. + ## These accept standard unix glob matching rules, but with the addition of + ## ** as a "super asterisk". ie: + ## /var/log/**.log -> recursively find all .log files in /var/log + ## /var/log/*/*.log -> find all .log files with a parent dir in /var/log + ## /var/log/apache.log -> only tail the apache log file + files = ["/var/log/apache/access.log"] + + ## The dataformat to be read from files + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "grok" + ## This is a list of patterns to check the given log file(s) for. ## Note that adding patterns here increases processing time. The most ## efficient configuration is to have one pattern per logparser. @@ -741,9 +694,6 @@ HTTPD_ERRORLOG %{HTTPD20_ERRORLOG}|%{HTTPD24_ERRORLOG} ## %{COMBINED_LOG_FORMAT} (access logs + referrer & agent) grok_patterns = ["%{COMBINED_LOG_FORMAT}"] - ## Name of the outputted measurement name. - grok_name_override = "apache_access_log" - ## Full path(s) to custom pattern files. grok_custom_pattern_files = [] @@ -761,4 +711,179 @@ HTTPD_ERRORLOG %{HTTPD20_ERRORLOG}|%{HTTPD24_ERRORLOG} ## 2. "Canada/Eastern" -- Unix TZ values like those found in https://en.wikipedia.org/wiki/List_of_tz_database_time_zones ## 3. UTC -- or blank/unspecified, will return timestamp in UTC grok_timezone = "Canada/Eastern" -``` \ No newline at end of file +``` + +The Telegraf grok parser uses a slightly modified version of logstash "grok" +patterns, with the format + +``` +%{[:][:]} +``` + +The `capture_syntax` defines the grok pattern that's used to parse the input +line and the `semantic_name` is used to name the field or tag. The extension +`modifier` controls the data type that the parsed item is converted to or +other special handling. + +By default all named captures are converted into string fields. +Timestamp modifiers can be used to convert captures to the timestamp of the +parsed metric. If no timestamp is parsed the metric will be created using the +current time. + +You must capture at least one field per line. + +- Available modifiers: + - string (default if nothing is specified) + - int + - float + - duration (ie, 5.23ms gets converted to int nanoseconds) + - tag (converts the field into a tag) + - drop (drops the field completely) +- Timestamp modifiers: + - ts (This will auto-learn the timestamp format) + - ts-ansic ("Mon Jan _2 15:04:05 2006") + - ts-unix ("Mon Jan _2 15:04:05 MST 2006") + - ts-ruby ("Mon Jan 02 15:04:05 -0700 2006") + - ts-rfc822 ("02 Jan 06 15:04 MST") + - ts-rfc822z ("02 Jan 06 15:04 -0700") + - ts-rfc850 ("Monday, 02-Jan-06 15:04:05 MST") + - ts-rfc1123 ("Mon, 02 Jan 2006 15:04:05 MST") + - ts-rfc1123z ("Mon, 02 Jan 2006 15:04:05 -0700") + - ts-rfc3339 ("2006-01-02T15:04:05Z07:00") + - ts-rfc3339nano ("2006-01-02T15:04:05.999999999Z07:00") + - ts-httpd ("02/Jan/2006:15:04:05 -0700") + - ts-epoch (seconds since unix epoch, may contain decimal) + - ts-epochnano (nanoseconds since unix epoch) + - ts-syslog ("Jan 02 15:04:05", parsed time is set to the current year) + - ts-"CUSTOM" + +CUSTOM time layouts must be within quotes and be the representation of the +"reference time", which is `Mon Jan 2 15:04:05 -0700 MST 2006`. +To match a comma decimal point you can use a period. For example `%{TIMESTAMP:timestamp:ts-"2006-01-02 15:04:05.000"}` can be used to match `"2018-01-02 15:04:05,000"` +To match a comma decimal point you can use a period in the pattern string. +See https://golang.org/pkg/time/#Parse for more details. + +Telegraf has many of its own [built-in patterns](./grok/patterns/influx-patterns), +as well as support for most of +[logstash's builtin patterns](https://github.com/logstash-plugins/logstash-patterns-core/blob/master/patterns/grok-patterns). +_Golang regular expressions do not support lookahead or lookbehind. +logstash patterns that depend on these are not supported._ + +If you need help building patterns to match your logs, +you will find the https://grokdebug.herokuapp.com application quite useful! + +#### Timestamp Examples + +This example input and config parses a file using a custom timestamp conversion: + +``` +2017-02-21 13:10:34 value=42 +``` + +```toml +[[inputs.logparser]] + [inputs.logparser.grok] + patterns = ['%{TIMESTAMP_ISO8601:timestamp:ts-"2006-01-02 15:04:05"} value=%{NUMBER:value:int}'] +``` + +This example input and config parses a file using a timestamp in unix time: + +``` +1466004605 value=42 +1466004605.123456789 value=42 +``` + +```toml +[[inputs.logparser]] + [inputs.logparser.grok] + patterns = ['%{NUMBER:timestamp:ts-epoch} value=%{NUMBER:value:int}'] +``` + +This example parses a file using a built-in conversion and a custom pattern: + +``` +Wed Apr 12 13:10:34 PST 2017 value=42 +``` + +```toml +[[inputs.logparser]] + [inputs.logparser.grok] + patterns = ["%{TS_UNIX:timestamp:ts-unix} value=%{NUMBER:value:int}"] + custom_patterns = ''' + TS_UNIX %{DAY} %{MONTH} %{MONTHDAY} %{HOUR}:%{MINUTE}:%{SECOND} %{TZ} %{YEAR} + ''' +``` + +For cases where the timestamp itself is without offset, the `timezone` config var is available +to denote an offset. By default (with `timezone` either omit, blank or set to `"UTC"`), the times +are processed as if in the UTC timezone. If specified as `timezone = "Local"`, the timestamp +will be processed based on the current machine timezone configuration. Lastly, if using a +timezone from the list of Unix [timezones](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones), the logparser grok will attempt to offset +the timestamp accordingly. See test cases for more detailed examples. + +#### TOML Escaping + +When saving patterns to the configuration file, keep in mind the different TOML +[string](https://github.com/toml-lang/toml#string) types and the escaping +rules for each. These escaping rules must be applied in addition to the +escaping required by the grok syntax. Using the Multi-line line literal +syntax with `'''` may be useful. + +The following config examples will parse this input file: + +``` +|42|\uD83D\uDC2F|'telegraf'| +``` + +Since `|` is a special character in the grok language, we must escape it to +get a literal `|`. With a basic TOML string, special characters such as +backslash must be escaped, requiring us to escape the backslash a second time. + +```toml +[[inputs.logparser]] + [inputs.logparser.grok] + patterns = ["\\|%{NUMBER:value:int}\\|%{UNICODE_ESCAPE:escape}\\|'%{WORD:name}'\\|"] + custom_patterns = "UNICODE_ESCAPE (?:\\\\u[0-9A-F]{4})+" +``` + +We cannot use a literal TOML string for the pattern, because we cannot match a +`'` within it. However, it works well for the custom pattern. +```toml +[[inputs.logparser]] + [inputs.logparser.grok] + patterns = ["\\|%{NUMBER:value:int}\\|%{UNICODE_ESCAPE:escape}\\|'%{WORD:name}'\\|"] + custom_patterns = 'UNICODE_ESCAPE (?:\\u[0-9A-F]{4})+' +``` + +A multi-line literal string allows us to encode the pattern: +```toml +[[inputs.logparser]] + [inputs.logparser.grok] + patterns = [''' + \|%{NUMBER:value:int}\|%{UNICODE_ESCAPE:escape}\|'%{WORD:name}'\| + '''] + custom_patterns = 'UNICODE_ESCAPE (?:\\u[0-9A-F]{4})+' +``` + +#### Tips for creating patterns + +Writing complex patterns can be difficult, here is some advice for writing a +new pattern or testing a pattern developed [online](https://grokdebug.herokuapp.com). + +Create a file output that writes to stdout, and disable other outputs while +testing. This will allow you to see the captured metrics. Keep in mind that +the file output will only print once per `flush_interval`. + +```toml +[[outputs.file]] + files = ["stdout"] +``` + +- Start with a file containing only a single line of your input. +- Remove all but the first token or piece of the line. +- Add the section of your pattern to match this piece to your configuration file. +- Verify that the metric is parsed successfully by running Telegraf. +- If successful, add the next token, update the pattern and retest. +- Continue one token at a time until the entire line is successfully parsed. + + diff --git a/plugins/inputs/logparser/README.md b/plugins/inputs/logparser/README.md index 69aedc4b7..d35a94a70 100644 --- a/plugins/inputs/logparser/README.md +++ b/plugins/inputs/logparser/README.md @@ -1,8 +1,6 @@ # Logparser Input Plugin -### **Deprecated in version 1.8**: Please use the -[tail](/plugins/inputs/tail) plugin with the `grok` -[data format](/docs/DATA_FORMATS_INPUT.md). +### **Deprecated in version 1.8**: Please use the [tail](/plugins/inputs/tail) plugin with the `grok` [data format](/docs/DATA_FORMATS_INPUT.md). The `logparser` plugin streams and parses the given logfiles. Currently it has the capability of parsing "grok" patterns from logfiles, which also supports From 96cb0aaea02d4647e684c34cdeebbc72636c270c Mon Sep 17 00:00:00 2001 From: maxunt Date: Fri, 27 Jul 2018 18:29:54 -0700 Subject: [PATCH 0039/1815] Fix unit tests on Darwin (#4458) --- .../http_listener/http_listener_test.go | 2 +- .../http_response/http_response_test.go | 4 +- plugins/inputs/ping/ping.go | 10 +-- plugins/inputs/ping/ping_test.go | 79 +++++-------------- .../socket_listener/socket_listener_test.go | 6 +- plugins/inputs/syslog/rfc5425_test.go | 10 ++- plugins/inputs/syslog/rfc5426_test.go | 12 ++- plugins/outputs/influxdb/udp_test.go | 2 +- .../socket_writer/socket_writer_test.go | 4 +- 9 files changed, 50 insertions(+), 79 deletions(-) diff --git a/plugins/inputs/http_listener/http_listener_test.go b/plugins/inputs/http_listener/http_listener_test.go index 7c6cdf728..3277e5344 100644 --- a/plugins/inputs/http_listener/http_listener_test.go +++ b/plugins/inputs/http_listener/http_listener_test.go @@ -336,7 +336,7 @@ func TestWriteHTTPGzippedData(t *testing.T) { // writes 25,000 metrics to the listener with 10 different writers func TestWriteHTTPHighTraffic(t *testing.T) { - if runtime.GOOS != "darwin" { + if runtime.GOOS == "darwin" { t.Skip("Skipping due to hang on darwin") } listener := newTestHTTPListener() diff --git a/plugins/inputs/http_response/http_response_test.go b/plugins/inputs/http_response/http_response_test.go index 7d3780cec..a33805db3 100644 --- a/plugins/inputs/http_response/http_response_test.go +++ b/plugins/inputs/http_response/http_response_test.go @@ -662,10 +662,10 @@ func TestNetworkErrors(t *testing.T) { // Connecton failed h = &HTTPResponse{ - Address: "https://127.127.127.127", // Any non-routable IP works here + Address: "https:/nonexistent.nonexistent", // Any non-routable IP works here Body: "", Method: "GET", - ResponseTimeout: internal.Duration{Duration: time.Second * 20}, + ResponseTimeout: internal.Duration{Duration: time.Second * 5}, FollowRedirects: false, } diff --git a/plugins/inputs/ping/ping.go b/plugins/inputs/ping/ping.go index 492474786..7ddbf275e 100644 --- a/plugins/inputs/ping/ping.go +++ b/plugins/inputs/ping/ping.go @@ -94,7 +94,7 @@ func (p *Ping) Gather(acc telegraf.Accumulator) error { return } - args := p.args(u) + args := p.args(u, runtime.GOOS) totalTimeout := float64(p.Count)*p.Timeout + float64(p.Count-1)*p.PingInterval out, err := p.pingHost(totalTimeout, args...) @@ -167,14 +167,14 @@ func hostPinger(timeout float64, args ...string) (string, error) { } // args returns the arguments for the 'ping' executable -func (p *Ping) args(url string) []string { +func (p *Ping) args(url string, system string) []string { // Build the ping command args based on toml config args := []string{"-c", strconv.Itoa(p.Count), "-n", "-s", "16"} if p.PingInterval > 0 { args = append(args, "-i", strconv.FormatFloat(p.PingInterval, 'f', -1, 64)) } if p.Timeout > 0 { - switch runtime.GOOS { + switch system { case "darwin", "freebsd", "netbsd", "openbsd": args = append(args, "-W", strconv.FormatFloat(p.Timeout*1000, 'f', -1, 64)) case "linux": @@ -185,7 +185,7 @@ func (p *Ping) args(url string) []string { } } if p.Deadline > 0 { - switch runtime.GOOS { + switch system { case "darwin", "freebsd", "netbsd", "openbsd": args = append(args, "-t", strconv.Itoa(p.Deadline)) case "linux": @@ -196,7 +196,7 @@ func (p *Ping) args(url string) []string { } } if p.Interface != "" { - switch runtime.GOOS { + switch system { case "darwin", "freebsd", "netbsd", "openbsd": args = append(args, "-S", p.Interface) case "linux": diff --git a/plugins/inputs/ping/ping_test.go b/plugins/inputs/ping/ping_test.go index 9817d07c6..d5b82608a 100644 --- a/plugins/inputs/ping/ping_test.go +++ b/plugins/inputs/ping/ping_test.go @@ -5,12 +5,12 @@ package ping import ( "errors" "reflect" - "runtime" "sort" "testing" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) // BSD/Darwin ping output @@ -99,68 +99,29 @@ func TestErrorProcessPingOutput(t *testing.T) { // Test that arg lists and created correctly func TestArgs(t *testing.T) { p := Ping{ - Count: 2, + Count: 2, + Interface: "eth0", + Timeout: 12.0, + Deadline: 24, + PingInterval: 1.2, } - // Actual and Expected arg lists must be sorted for reflect.DeepEqual - - actual := p.args("www.google.com") - expected := []string{"-c", "2", "-n", "-s", "16", "www.google.com"} - sort.Strings(actual) - sort.Strings(expected) - assert.True(t, reflect.DeepEqual(expected, actual), - "Expected: %s Actual: %s", expected, actual) - - p.Interface = "eth0" - actual = p.args("www.google.com") - expected = []string{"-c", "2", "-n", "-s", "16", "-I", "eth0", - "www.google.com"} - sort.Strings(actual) - sort.Strings(expected) - assert.True(t, reflect.DeepEqual(expected, actual), - "Expected: %s Actual: %s", expected, actual) - - p.Timeout = 12.0 - actual = p.args("www.google.com") - switch runtime.GOOS { - case "darwin": - expected = []string{"-c", "2", "-n", "-s", "16", "-I", "eth0", "-W", - "12000.0", "www.google.com"} - default: - expected = []string{"-c", "2", "-n", "-s", "16", "-I", "eth0", "-W", - "12", "www.google.com"} + var systemCases = []struct { + system string + output []string + }{ + {"darwin", []string{"-c", "2", "-n", "-s", "16", "-i", "1.2", "-W", "12000", "-t", "24", "-S", "eth0", "www.google.com"}}, + {"linux", []string{"-c", "2", "-n", "-s", "16", "-i", "1.2", "-W", "12", "-w", "24", "-I", "eth0", "www.google.com"}}, + {"anything else", []string{"-c", "2", "-n", "-s", "16", "-i", "1.2", "-W", "12", "-w", "24", "-I", "eth0", "www.google.com"}}, } - - p.Deadline = 24 - actual = p.args("www.google.com") - switch runtime.GOOS { - case "darwin": - expected = []string{"-c", "2", "-n", "-s", "16", "-I", "eth0", "-W", - "12000.0", "-t", "24", "www.google.com"} - default: - expected = []string{"-c", "2", "-n", "-s", "16", "-I", "eth0", "-W", - "12", "-w", "24", "www.google.com"} + for i := range systemCases { + actual := p.args("www.google.com", systemCases[i].system) + expected := systemCases[i].output + sort.Strings(actual) + sort.Strings(expected) + require.True(t, reflect.DeepEqual(expected, actual), + "Expected: %s Actual: %s", expected, actual) } - - sort.Strings(actual) - sort.Strings(expected) - assert.True(t, reflect.DeepEqual(expected, actual), - "Expected: %s Actual: %s", expected, actual) - - p.PingInterval = 1.2 - actual = p.args("www.google.com") - switch runtime.GOOS { - case "darwin": - expected = []string{"-c", "2", "-n", "-s", "16", "-I", "eth0", "-W", - "12000.0", "-t", "24", "-i", "1.2", "www.google.com"} - default: - expected = []string{"-c", "2", "-n", "-s", "16", "-I", "eth0", "-W", - "12", "-w", "24", "-i", "1.2", "www.google.com"} - } - sort.Strings(actual) - sort.Strings(expected) - assert.True(t, reflect.DeepEqual(expected, actual), - "Expected: %s Actual: %s", expected, actual) } func mockHostPinger(timeout float64, args ...string) (string, error) { diff --git a/plugins/inputs/socket_listener/socket_listener_test.go b/plugins/inputs/socket_listener/socket_listener_test.go index 4370ac577..26691ef54 100644 --- a/plugins/inputs/socket_listener/socket_listener_test.go +++ b/plugins/inputs/socket_listener/socket_listener_test.go @@ -55,7 +55,7 @@ func TestSocketListener_unix_tls(t *testing.T) { tmpdir, err := ioutil.TempDir("", "telegraf") require.NoError(t, err) defer os.RemoveAll(tmpdir) - sock := filepath.Join(tmpdir, "socket_listener.TestSocketListener_unix_tls.sock") + sock := filepath.Join(tmpdir, "sl.TestSocketListener_unix_tls.sock") sl := newSocketListener() sl.ServiceAddress = "unix://" + sock @@ -116,7 +116,7 @@ func TestSocketListener_unix(t *testing.T) { tmpdir, err := ioutil.TempDir("", "telegraf") require.NoError(t, err) defer os.RemoveAll(tmpdir) - sock := filepath.Join(tmpdir, "socket_listener.TestSocketListener_unix.sock") + sock := filepath.Join(tmpdir, "sl.TestSocketListener_unix.sock") defer testEmptyLog(t)() @@ -140,7 +140,7 @@ func TestSocketListener_unixgram(t *testing.T) { tmpdir, err := ioutil.TempDir("", "telegraf") require.NoError(t, err) defer os.RemoveAll(tmpdir) - sock := filepath.Join(tmpdir, "socket_listener.TestSocketListener_unixgram.sock") + sock := filepath.Join(tmpdir, "sl.TestSocketListener_unixgram.sock") defer testEmptyLog(t)() diff --git a/plugins/inputs/syslog/rfc5425_test.go b/plugins/inputs/syslog/rfc5425_test.go index 1b69e6023..de5835e6f 100644 --- a/plugins/inputs/syslog/rfc5425_test.go +++ b/plugins/inputs/syslog/rfc5425_test.go @@ -403,13 +403,16 @@ func testStrictRFC5425(t *testing.T, protocol string, address string, wantTLS bo acc.Errors = make([]error, 0) // Write - conn.Write(tc.data) + _, err = conn.Write(tc.data) + conn.Close() + require.NoError(t, err) // Wait that the the number of data points is accumulated // Since the receiver is running concurrently if tc.wantStrict != nil { acc.Wait(len(tc.wantStrict)) } + // Wait the parsing error acc.WaitError(tc.werr) @@ -452,7 +455,6 @@ func testBestEffortRFC5425(t *testing.T, protocol string, address string, wantTL conn, err = tls.Dial(protocol, address, config) } else { conn, err = net.Dial(protocol, address) - defer conn.Close() } require.NotNil(t, conn) require.NoError(t, err) @@ -462,7 +464,9 @@ func testBestEffortRFC5425(t *testing.T, protocol string, address string, wantTL acc.Errors = make([]error, 0) // Write - conn.Write(tc.data) + _, err = conn.Write(tc.data) + require.NoError(t, err) + conn.Close() // Wait that the the number of data points is accumulated // Since the receiver is running concurrently diff --git a/plugins/inputs/syslog/rfc5426_test.go b/plugins/inputs/syslog/rfc5426_test.go index cae465189..8304a5406 100644 --- a/plugins/inputs/syslog/rfc5426_test.go +++ b/plugins/inputs/syslog/rfc5426_test.go @@ -234,12 +234,18 @@ func testRFC5426(t *testing.T, protocol string, address string, bestEffort bool) // Connect conn, err := net.Dial(protocol, address) require.NotNil(t, conn) - defer conn.Close() require.Nil(t, err) // Write - _, e := conn.Write(tc.data) - require.Nil(t, e) + _, err = conn.Write(tc.data) + conn.Close() + if err != nil { + if err, ok := err.(*net.OpError); ok { + if err.Err.Error() == "write: message too long" { + return + } + } + } // Waiting ... if tc.wantStrict == nil && tc.werr || bestEffort && tc.werr { diff --git a/plugins/outputs/influxdb/udp_test.go b/plugins/outputs/influxdb/udp_test.go index 017ee0be9..9bced4262 100644 --- a/plugins/outputs/influxdb/udp_test.go +++ b/plugins/outputs/influxdb/udp_test.go @@ -202,7 +202,7 @@ func TestUDP_SerializeError(t *testing.T) { } func TestUDP_WriteWithRealConn(t *testing.T) { - conn, err := net.ListenPacket("udp", "127.0.0.0:0") + conn, err := net.ListenPacket("udp", "127.0.0.1:0") require.NoError(t, err) metrics := []telegraf.Metric{ diff --git a/plugins/outputs/socket_writer/socket_writer_test.go b/plugins/outputs/socket_writer/socket_writer_test.go index 4d93469fa..f7eb159ea 100644 --- a/plugins/outputs/socket_writer/socket_writer_test.go +++ b/plugins/outputs/socket_writer/socket_writer_test.go @@ -49,7 +49,7 @@ func TestSocketWriter_unix(t *testing.T) { tmpdir, err := ioutil.TempDir("", "telegraf") require.NoError(t, err) defer os.RemoveAll(tmpdir) - sock := filepath.Join(tmpdir, "socket_writer.TestSocketWriter_unix.sock") + sock := filepath.Join(tmpdir, "sw.TestSocketWriter_unix.sock") listener, err := net.Listen("unix", sock) require.NoError(t, err) @@ -70,7 +70,7 @@ func TestSocketWriter_unixgram(t *testing.T) { tmpdir, err := ioutil.TempDir("", "telegraf") require.NoError(t, err) defer os.RemoveAll(tmpdir) - sock := filepath.Join(tmpdir, "socket_writer.TestSocketWriter_unixgram.sock") + sock := filepath.Join(tmpdir, "sw.TSW_unixgram.sock") listener, err := net.ListenPacket("unixgram", sock) require.NoError(t, err) From 019d265167eac47ed858766d77cc153f92129763 Mon Sep 17 00:00:00 2001 From: Greg Date: Fri, 27 Jul 2018 19:39:37 -0600 Subject: [PATCH 0040/1815] Add dev/telegraf.conf for docker, exec, and procstat input (#4460) --- plugins/inputs/docker/dev/telegraf.conf | 13 ++++++++++++ plugins/inputs/exec/dev/telegraf.conf | 26 +++++++++++++++++++++++ plugins/inputs/procstat/dev/telegraf.conf | 9 ++++++++ 3 files changed, 48 insertions(+) create mode 100644 plugins/inputs/docker/dev/telegraf.conf create mode 100644 plugins/inputs/exec/dev/telegraf.conf create mode 100644 plugins/inputs/procstat/dev/telegraf.conf diff --git a/plugins/inputs/docker/dev/telegraf.conf b/plugins/inputs/docker/dev/telegraf.conf new file mode 100644 index 000000000..06bbb46ae --- /dev/null +++ b/plugins/inputs/docker/dev/telegraf.conf @@ -0,0 +1,13 @@ +[agent] + interval="1s" + flush_interval="1s" + +[[inputs.docker]] + endpoint = "unix:///var/run/docker.sock" + timeout = "5s" + perdevice = true + total = false + container_names = [] + +[[outputs.file]] + files = ["stdout"] diff --git a/plugins/inputs/exec/dev/telegraf.conf b/plugins/inputs/exec/dev/telegraf.conf new file mode 100644 index 000000000..04433410e --- /dev/null +++ b/plugins/inputs/exec/dev/telegraf.conf @@ -0,0 +1,26 @@ +[agent] + interval="1s" + flush_interval="1s" + +[[inputs.exec]] + timeout = "1s" + data_format = "influx" + commands = [ + "echo 'deal,computer_name=hosta message=\"stuff\" 1530654676316265790'", + "echo 'deal,computer_name=hostb message=\"stuff\" 1530654676316265790'", + ] + +[[processors.regex]] + [[processors.regex.tags]] + key = "computer_name" + pattern = "^(.*?)a$" + replacement = "${1}" + result_key = "server_name" + [[processors.regex.tags]] + key = "computer_name" + pattern = "^(.*?)b$" + replacement = "${1}" + result_key = "server_name" + +[[outputs.file]] + files = ["stdout"] diff --git a/plugins/inputs/procstat/dev/telegraf.conf b/plugins/inputs/procstat/dev/telegraf.conf new file mode 100644 index 000000000..63b150d7c --- /dev/null +++ b/plugins/inputs/procstat/dev/telegraf.conf @@ -0,0 +1,9 @@ +[agent] + interval="1s" + flush_interval="1s" + +[[inputs.procstat]] + exe = "telegraf" + +[[outputs.file]] + files = ["stdout"] From a897b84049567c6eda0dfee214e670fb700f6dd0 Mon Sep 17 00:00:00 2001 From: Jiri Tyr Date: Mon, 30 Jul 2018 20:12:45 +0100 Subject: [PATCH 0041/1815] Adding x509_cert input plugin (#3768) --- plugins/inputs/all/all.go | 1 + plugins/inputs/x509_cert/README.md | 45 +++++ plugins/inputs/x509_cert/x509_cert.go | 163 ++++++++++++++++ plugins/inputs/x509_cert/x509_cert_test.go | 205 +++++++++++++++++++++ 4 files changed, 414 insertions(+) create mode 100644 plugins/inputs/x509_cert/README.md create mode 100644 plugins/inputs/x509_cert/x509_cert.go create mode 100644 plugins/inputs/x509_cert/x509_cert_test.go diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index 8594db0a9..8989684e4 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -124,6 +124,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/webhooks" _ "github.com/influxdata/telegraf/plugins/inputs/win_perf_counters" _ "github.com/influxdata/telegraf/plugins/inputs/win_services" + _ "github.com/influxdata/telegraf/plugins/inputs/x509_cert" _ "github.com/influxdata/telegraf/plugins/inputs/zfs" _ "github.com/influxdata/telegraf/plugins/inputs/zipkin" _ "github.com/influxdata/telegraf/plugins/inputs/zookeeper" diff --git a/plugins/inputs/x509_cert/README.md b/plugins/inputs/x509_cert/README.md new file mode 100644 index 000000000..6b922f0e1 --- /dev/null +++ b/plugins/inputs/x509_cert/README.md @@ -0,0 +1,45 @@ +# X509 Cert Input Plugin + +This plugin provides information about X509 certificate accessible via local +file or network connection. + + +### Configuration + +```toml +# Reads metrics from a SSL certificate +[[inputs.x509_cert]] + ## List certificate sources + sources = ["/etc/ssl/certs/ssl-cert-snakeoil.pem", "https://example.org"] + + ## Timeout for SSL connection + # timeout = 5s + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false +``` + + +### Metrics + +- `x509_cert` + - tags: + - `source` - source of the certificate + - fields: + - `expiry` (int, seconds) + - `age` (int, seconds) + - `startdate` (int, seconds) + - `enddate` (int, seconds) + + +### Example output + +``` +x509_cert,host=myhost,source=https://example.org age=1753627i,expiry=5503972i,startdate=1516092060i,enddate=1523349660i 1517845687000000000 +x509_cert,host=myhost,source=/etc/ssl/certs/ssl-cert-snakeoil.pem age=7522207i,expiry=308002732i,startdate=1510323480i,enddate=1825848420i 1517845687000000000 +``` diff --git a/plugins/inputs/x509_cert/x509_cert.go b/plugins/inputs/x509_cert/x509_cert.go new file mode 100644 index 000000000..2e5d26996 --- /dev/null +++ b/plugins/inputs/x509_cert/x509_cert.go @@ -0,0 +1,163 @@ +// Package x509_cert reports metrics from an SSL certificate. +package x509_cert + +import ( + "crypto/tls" + "crypto/x509" + "encoding/pem" + "fmt" + "io/ioutil" + "net" + "net/url" + "strings" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + _tls "github.com/influxdata/telegraf/internal/tls" + "github.com/influxdata/telegraf/plugins/inputs" +) + +const sampleConfig = ` + ## List certificate sources + sources = ["/etc/ssl/certs/ssl-cert-snakeoil.pem", "tcp://example.org:443"] + + ## Timeout for SSL connection + # timeout = 5s + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false +` +const description = "Reads metrics from a SSL certificate" + +// X509Cert holds the configuration of the plugin. +type X509Cert struct { + Sources []string `toml:"sources"` + Timeout internal.Duration `toml:"timeout"` + _tls.ClientConfig +} + +// Description returns description of the plugin. +func (c *X509Cert) Description() string { + return description +} + +// SampleConfig returns configuration sample for the plugin. +func (c *X509Cert) SampleConfig() string { + return sampleConfig +} + +func (c *X509Cert) getCert(location string, timeout time.Duration) ([]*x509.Certificate, error) { + if strings.HasPrefix(location, "/") { + location = "file://" + location + } + + u, err := url.Parse(location) + if err != nil { + return nil, fmt.Errorf("failed to parse cert location - %s\n", err.Error()) + } + + switch u.Scheme { + case "https": + u.Scheme = "tcp" + fallthrough + case "udp", "udp4", "udp6": + fallthrough + case "tcp", "tcp4", "tcp6": + tlsCfg, err := c.ClientConfig.TLSConfig() + if err != nil { + return nil, err + } + + ipConn, err := net.DialTimeout(u.Scheme, u.Host, timeout) + if err != nil { + return nil, err + } + defer ipConn.Close() + + conn := tls.Client(ipConn, tlsCfg) + defer conn.Close() + + hsErr := conn.Handshake() + if hsErr != nil { + return nil, hsErr + } + + certs := conn.ConnectionState().PeerCertificates + + return certs, nil + case "file": + content, err := ioutil.ReadFile(u.Path) + if err != nil { + return nil, err + } + + block, _ := pem.Decode(content) + if block == nil { + return nil, fmt.Errorf("failed to parse certificate PEM") + } + + cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return nil, err + } + + return []*x509.Certificate{cert}, nil + default: + return nil, fmt.Errorf("unsuported scheme '%s' in location %s\n", u.Scheme, location) + } +} + +func getFields(cert *x509.Certificate, now time.Time) map[string]interface{} { + age := int(now.Sub(cert.NotBefore).Seconds()) + expiry := int(cert.NotAfter.Sub(now).Seconds()) + startdate := cert.NotBefore.Unix() + enddate := cert.NotAfter.Unix() + + fields := map[string]interface{}{ + "age": age, + "expiry": expiry, + "startdate": startdate, + "enddate": enddate, + } + + return fields +} + +// Gather adds metrics into the accumulator. +func (c *X509Cert) Gather(acc telegraf.Accumulator) error { + now := time.Now() + + for _, location := range c.Sources { + certs, err := c.getCert(location, c.Timeout.Duration*time.Second) + if err != nil { + return fmt.Errorf("cannot get SSL cert '%s': %s", location, err.Error()) + } + + tags := map[string]string{ + "source": location, + } + + for _, cert := range certs { + fields := getFields(cert, now) + + acc.AddFields("x509_cert", fields, tags) + } + } + + return nil +} + +func init() { + inputs.Add("x509_cert", func() telegraf.Input { + return &X509Cert{ + Sources: []string{}, + Timeout: internal.Duration{Duration: 5}, + } + }) +} diff --git a/plugins/inputs/x509_cert/x509_cert_test.go b/plugins/inputs/x509_cert/x509_cert_test.go new file mode 100644 index 000000000..f4c6c8738 --- /dev/null +++ b/plugins/inputs/x509_cert/x509_cert_test.go @@ -0,0 +1,205 @@ +package x509_cert + +import ( + "crypto/tls" + "encoding/base64" + "fmt" + "io/ioutil" + "os" + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/testutil" +) + +var pki = testutil.NewPKI("../../../testutil/pki") + +// Make sure X509Cert implements telegraf.Input +var _ telegraf.Input = &X509Cert{} + +func TestGatherRemote(t *testing.T) { + if testing.Short() { + t.Skip("Skipping network-dependent test in short mode.") + } + + tmpfile, err := ioutil.TempFile("", "example") + if err != nil { + t.Fatal(err) + } + + defer os.Remove(tmpfile.Name()) + + if _, err := tmpfile.Write([]byte(pki.ReadServerCert())); err != nil { + t.Fatal(err) + } + + tests := []struct { + name string + server string + timeout time.Duration + close bool + unset bool + noshake bool + error bool + }{ + {name: "wrong port", server: ":99999", error: true}, + {name: "no server", timeout: 5}, + {name: "successful https", server: "https://example.org:443", timeout: 5}, + {name: "successful file", server: "file://" + tmpfile.Name(), timeout: 5}, + {name: "unsupported scheme", server: "foo://", timeout: 5, error: true}, + {name: "no certificate", timeout: 5, unset: true, error: true}, + {name: "closed connection", close: true, error: true}, + {name: "no handshake", timeout: 5, noshake: true, error: true}, + } + + pair, err := tls.X509KeyPair([]byte(pki.ReadServerCert()), []byte(pki.ReadServerKey())) + if err != nil { + t.Fatal(err) + } + + config := &tls.Config{ + InsecureSkipVerify: true, + Certificates: []tls.Certificate{pair}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if test.unset { + config.Certificates = nil + config.GetCertificate = func(i *tls.ClientHelloInfo) (*tls.Certificate, error) { + return nil, nil + } + } + + ln, err := tls.Listen("tcp", ":0", config) + if err != nil { + t.Fatal(err) + } + defer ln.Close() + + go func() { + sconn, err := ln.Accept() + if err != nil { + return + } + if test.close { + sconn.Close() + } + + serverConfig := config.Clone() + + srv := tls.Server(sconn, serverConfig) + if test.noshake { + srv.Close() + } + if err := srv.Handshake(); err != nil { + return + } + }() + + if test.server == "" { + test.server = "tcp://" + ln.Addr().String() + } + + sc := X509Cert{ + Sources: []string{test.server}, + Timeout: internal.Duration{Duration: test.timeout}, + } + + sc.InsecureSkipVerify = true + testErr := false + + acc := testutil.Accumulator{} + err = sc.Gather(&acc) + if err != nil { + testErr = true + } + + if testErr != test.error { + t.Errorf("%s", err) + } + }) + } +} + +func TestGatherLocal(t *testing.T) { + wrongCert := fmt.Sprintf("-----BEGIN CERTIFICATE-----\n%s\n-----END CERTIFICATE-----\n", base64.StdEncoding.EncodeToString([]byte("test"))) + + tests := []struct { + name string + mode os.FileMode + content string + error bool + }{ + {name: "permission denied", mode: 0001, error: true}, + {name: "not a certificate", mode: 0640, content: "test", error: true}, + {name: "wrong certificate", mode: 0640, content: wrongCert, error: true}, + {name: "correct certificate", mode: 0640, content: pki.ReadServerCert()}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + f, err := ioutil.TempFile("", "x509_cert") + if err != nil { + t.Fatal(err) + } + + _, err = f.Write([]byte(test.content)) + if err != nil { + t.Fatal(err) + } + + err = f.Chmod(test.mode) + if err != nil { + t.Fatal(err) + } + + err = f.Close() + if err != nil { + t.Fatal(err) + } + + defer os.Remove(f.Name()) + + sc := X509Cert{ + Sources: []string{f.Name()}, + } + + error := false + + acc := testutil.Accumulator{} + err = sc.Gather(&acc) + if err != nil { + error = true + } + + if error != test.error { + t.Errorf("%s", err) + } + }) + } +} + +func TestStrings(t *testing.T) { + sc := X509Cert{} + + tests := []struct { + name string + method string + returned string + expected string + }{ + {name: "description", method: "Description", returned: sc.Description(), expected: description}, + {name: "sample config", method: "SampleConfig", returned: sc.SampleConfig(), expected: sampleConfig}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if test.returned != test.expected { + t.Errorf("Expected method %s to return '%s', found '%s'.", test.method, test.expected, test.returned) + } + }) + } +} From 3d1c650c54b9cce279c9d7d8fc77bcab9bb34588 Mon Sep 17 00:00:00 2001 From: Greg Linton Date: Mon, 30 Jul 2018 13:14:55 -0600 Subject: [PATCH 0042/1815] Update changelog --- CHANGELOG.md | 1 + plugins/inputs/x509_cert/README.md | 2 +- plugins/inputs/x509_cert/x509_cert.go | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 49bc3a04b..de1b5abc7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -43,6 +43,7 @@ - [#4418](https://github.com/influxdata/telegraf/pull/4418): Add support for setting kafka client id. - [#4332](https://github.com/influxdata/telegraf/pull/4332): Add file input plugin and grok parser. - [#4320](https://github.com/influxdata/telegraf/pull/4320): Improve cloudwatch output performance. +- [#3768](https://github.com/influxdata/telegraf/pull/3768): Add x509_cert input plugin. ## v1.7.2 [2018-07-18] diff --git a/plugins/inputs/x509_cert/README.md b/plugins/inputs/x509_cert/README.md index 6b922f0e1..781b9332a 100644 --- a/plugins/inputs/x509_cert/README.md +++ b/plugins/inputs/x509_cert/README.md @@ -13,7 +13,7 @@ file or network connection. sources = ["/etc/ssl/certs/ssl-cert-snakeoil.pem", "https://example.org"] ## Timeout for SSL connection - # timeout = 5s + # timeout = "5s" ## Optional TLS Config # tls_ca = "/etc/telegraf/ca.pem" diff --git a/plugins/inputs/x509_cert/x509_cert.go b/plugins/inputs/x509_cert/x509_cert.go index 2e5d26996..252b60e1f 100644 --- a/plugins/inputs/x509_cert/x509_cert.go +++ b/plugins/inputs/x509_cert/x509_cert.go @@ -23,7 +23,7 @@ const sampleConfig = ` sources = ["/etc/ssl/certs/ssl-cert-snakeoil.pem", "tcp://example.org:443"] ## Timeout for SSL connection - # timeout = 5s + # timeout = "5s" ## Optional TLS Config # tls_ca = "/etc/telegraf/ca.pem" From 06d5501d928ec79c6fc40849472e52dbf799928f Mon Sep 17 00:00:00 2001 From: wegel Date: Mon, 30 Jul 2018 15:31:23 -0400 Subject: [PATCH 0043/1815] Add IPSIpAddress syntax to ipaddr conversion in snmp plugin (#4471) --- plugins/inputs/snmp/snmp.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/inputs/snmp/snmp.go b/plugins/inputs/snmp/snmp.go index 50a3cb0ae..112e85c7c 100644 --- a/plugins/inputs/snmp/snmp.go +++ b/plugins/inputs/snmp/snmp.go @@ -1010,7 +1010,7 @@ func snmpTranslateCall(oid string) (mibName string, oidNum string, oidText strin switch tc { case "MacAddress", "PhysAddress": conversion = "hwaddr" - case "InetAddressIPv4", "InetAddressIPv6", "InetAddress": + case "InetAddressIPv4", "InetAddressIPv6", "InetAddress", "IPSIpAddress": conversion = "ipaddr" } } else if strings.HasPrefix(line, "::= { ") { From 5c1ba5e377093e69dd4c77e1063c63ec1fcc9235 Mon Sep 17 00:00:00 2001 From: Greg Linton Date: Mon, 30 Jul 2018 13:32:16 -0600 Subject: [PATCH 0044/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index de1b5abc7..c9f2c3750 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -44,6 +44,7 @@ - [#4332](https://github.com/influxdata/telegraf/pull/4332): Add file input plugin and grok parser. - [#4320](https://github.com/influxdata/telegraf/pull/4320): Improve cloudwatch output performance. - [#3768](https://github.com/influxdata/telegraf/pull/3768): Add x509_cert input plugin. +- [#4471](https://github.com/influxdata/telegraf/pull/4471): Add IPSIpAddress syntax to ipaddr conversion in snmp plugin. ## v1.7.2 [2018-07-18] From 228efe9a1d832f4251efa864b19a53e9ecb3869d Mon Sep 17 00:00:00 2001 From: Sebastian Boehm Date: Wed, 1 Aug 2018 00:05:55 +0200 Subject: [PATCH 0045/1815] Add filecount input plugin (#4363) --- README.md | 1 + plugins/inputs/all/all.go | 1 + plugins/inputs/filecount/README.md | 49 ++++ plugins/inputs/filecount/filecount.go | 215 ++++++++++++++++++ plugins/inputs/filecount/filecount_test.go | 99 ++++++++ plugins/inputs/filecount/testdata/bar | 0 plugins/inputs/filecount/testdata/baz | 0 plugins/inputs/filecount/testdata/foo | 0 plugins/inputs/filecount/testdata/qux | 7 + plugins/inputs/filecount/testdata/subdir/quux | 0 plugins/inputs/filecount/testdata/subdir/quuz | 0 11 files changed, 372 insertions(+) create mode 100644 plugins/inputs/filecount/README.md create mode 100644 plugins/inputs/filecount/filecount.go create mode 100644 plugins/inputs/filecount/filecount_test.go create mode 100644 plugins/inputs/filecount/testdata/bar create mode 100644 plugins/inputs/filecount/testdata/baz create mode 100644 plugins/inputs/filecount/testdata/foo create mode 100644 plugins/inputs/filecount/testdata/qux create mode 100644 plugins/inputs/filecount/testdata/subdir/quux create mode 100644 plugins/inputs/filecount/testdata/subdir/quuz diff --git a/README.md b/README.md index 03e3a8f58..a0f396cad 100644 --- a/README.md +++ b/README.md @@ -158,6 +158,7 @@ configuration options. * [fibaro](./plugins/inputs/fibaro) * [file](./plugins/inputs/file) * [filestat](./plugins/inputs/filestat) +* [filecount](./plugins/inputs/filecount) * [fluentd](./plugins/inputs/fluentd) * [graylog](./plugins/inputs/graylog) * [haproxy](./plugins/inputs/haproxy) diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index 8989684e4..4d46a5490 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -31,6 +31,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/fail2ban" _ "github.com/influxdata/telegraf/plugins/inputs/fibaro" _ "github.com/influxdata/telegraf/plugins/inputs/file" + _ "github.com/influxdata/telegraf/plugins/inputs/filecount" _ "github.com/influxdata/telegraf/plugins/inputs/filestat" _ "github.com/influxdata/telegraf/plugins/inputs/fluentd" _ "github.com/influxdata/telegraf/plugins/inputs/graylog" diff --git a/plugins/inputs/filecount/README.md b/plugins/inputs/filecount/README.md new file mode 100644 index 000000000..ccec532aa --- /dev/null +++ b/plugins/inputs/filecount/README.md @@ -0,0 +1,49 @@ +# filecount Input Plugin + +Counts files in directories that match certain criteria. + +### Configuration: + +```toml +# Count files in a directory +[[inputs.filecount]] + ## Directory to gather stats about. + directory = "/var/cache/apt/archives" + + ## Only count files that match the name pattern. Defaults to "*". + name = "*.deb" + + ## Count files in subdirectories. Defaults to true. + recursive = false + + ## Only count regular files. Defaults to true. + regular_only = true + + ## Only count files that are at least this size in bytes. If size is + ## a negative number, only count files that are smaller than the + ## absolute value of size. Defaults to 0. + size = 0 + + ## Only count files that have not been touched for at least this + ## duration. If mtime is negative, only count files that have been + ## touched in this duration. Defaults to "0s". + mtime = "0s" +``` + +### Measurements & Fields: + +- filecount + - count (int) + +### Tags: + +- All measurements have the following tags: + - directory (the directory path, as specified in the config) + +### Example Output: + +``` +$ telegraf --config /etc/telegraf/telegraf.conf --input-filter filecount --test +> filecount,directory=/var/cache/apt,host=czernobog count=7i 1530034445000000000 +> filecount,directory=/tmp,host=czernobog count=17i 1530034445000000000 +``` diff --git a/plugins/inputs/filecount/filecount.go b/plugins/inputs/filecount/filecount.go new file mode 100644 index 000000000..6041ec7b5 --- /dev/null +++ b/plugins/inputs/filecount/filecount.go @@ -0,0 +1,215 @@ +package filecount + +import ( + "os" + "path/filepath" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/inputs" +) + +const sampleConfig = ` + ## Directory to gather stats about. + directory = "/var/cache/apt/archives" + + ## Only count files that match the name pattern. Defaults to "*". + name = "*.deb" + + ## Count files in subdirectories. Defaults to true. + recursive = false + + ## Only count regular files. Defaults to true. + regular_only = true + + ## Only count files that are at least this size in bytes. If size is + ## a negative number, only count files that are smaller than the + ## absolute value of size. Defaults to 0. + size = 0 + + ## Only count files that have not been touched for at least this + ## duration. If mtime is negative, only count files that have been + ## touched in this duration. Defaults to "0s". + mtime = "0s" +` + +type FileCount struct { + Directory string + Name string + Recursive bool + RegularOnly bool + Size int64 + MTime internal.Duration `toml:"mtime"` + fileFilters []fileFilterFunc +} + +type countFunc func(os.FileInfo) +type fileFilterFunc func(os.FileInfo) (bool, error) + +func (_ *FileCount) Description() string { + return "Count files in a directory" +} + +func (_ *FileCount) SampleConfig() string { return sampleConfig } + +func rejectNilFilters(filters []fileFilterFunc) []fileFilterFunc { + filtered := make([]fileFilterFunc, 0, len(filters)) + for _, f := range filters { + if f != nil { + filtered = append(filtered, f) + } + } + return filtered +} + +func (fc *FileCount) nameFilter() fileFilterFunc { + if fc.Name == "*" { + return nil + } + + return func(f os.FileInfo) (bool, error) { + match, err := filepath.Match(fc.Name, f.Name()) + if err != nil { + return false, err + } + return match, nil + } +} + +func (fc *FileCount) regularOnlyFilter() fileFilterFunc { + if !fc.RegularOnly { + return nil + } + + return func(f os.FileInfo) (bool, error) { + return f.Mode().IsRegular(), nil + } +} + +func (fc *FileCount) sizeFilter() fileFilterFunc { + if fc.Size == 0 { + return nil + } + + return func(f os.FileInfo) (bool, error) { + if !f.Mode().IsRegular() { + return false, nil + } + if fc.Size < 0 { + return f.Size() < -fc.Size, nil + } + return f.Size() >= fc.Size, nil + } +} + +func (fc *FileCount) mtimeFilter() fileFilterFunc { + if fc.MTime.Duration == 0 { + return nil + } + + return func(f os.FileInfo) (bool, error) { + age := absDuration(fc.MTime.Duration) + mtime := time.Now().Add(-age) + if fc.MTime.Duration < 0 { + return f.ModTime().After(mtime), nil + } + return f.ModTime().Before(mtime), nil + } +} + +func absDuration(x time.Duration) time.Duration { + if x < 0 { + return -x + } + return x +} + +func count(basedir string, recursive bool, countFn countFunc) error { + walkFn := func(path string, file os.FileInfo, err error) error { + if path == basedir { + return nil + } + countFn(file) + if !recursive && file.IsDir() { + return filepath.SkipDir + } + return nil + } + return filepath.Walk(basedir, walkFn) +} + +func (fc *FileCount) initFileFilters() { + filters := []fileFilterFunc{ + fc.nameFilter(), + fc.regularOnlyFilter(), + fc.sizeFilter(), + fc.mtimeFilter(), + } + fc.fileFilters = rejectNilFilters(filters) +} + +func (fc *FileCount) filter(file os.FileInfo) (bool, error) { + if fc.fileFilters == nil { + fc.initFileFilters() + } + + for _, fileFilter := range fc.fileFilters { + match, err := fileFilter(file) + if err != nil { + return false, err + } + if !match { + return false, nil + } + } + + return true, nil +} + +func (fc *FileCount) Gather(acc telegraf.Accumulator) error { + numFiles := int64(0) + countFn := func(f os.FileInfo) { + match, err := fc.filter(f) + if err != nil { + acc.AddError(err) + return + } + if !match { + return + } + numFiles++ + } + err := count(fc.Directory, fc.Recursive, countFn) + if err != nil { + acc.AddError(err) + } + + acc.AddFields("filecount", + map[string]interface{}{ + "count": numFiles, + }, + map[string]string{ + "directory": fc.Directory, + }) + + return nil +} + +func NewFileCount() *FileCount { + return &FileCount{ + Directory: "", + Name: "*", + Recursive: true, + RegularOnly: true, + Size: 0, + MTime: internal.Duration{Duration: 0}, + fileFilters: nil, + } +} + +func init() { + inputs.Add("filecount", func() telegraf.Input { + return NewFileCount() + }) +} diff --git a/plugins/inputs/filecount/filecount_test.go b/plugins/inputs/filecount/filecount_test.go new file mode 100644 index 000000000..294a8b965 --- /dev/null +++ b/plugins/inputs/filecount/filecount_test.go @@ -0,0 +1,99 @@ +package filecount + +import ( + "os" + "path/filepath" + "runtime" + "strings" + "testing" + "time" + + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +func TestNoFilters(t *testing.T) { + fc := getNoFilterFileCount() + matches := []string{"foo", "bar", "baz", "qux", + "subdir/", "subdir/quux", "subdir/quuz"} + require.True(t, fileCountEquals(fc, len(matches))) +} + +func TestNameFilter(t *testing.T) { + fc := getNoFilterFileCount() + fc.Name = "ba*" + matches := []string{"bar", "baz"} + require.True(t, fileCountEquals(fc, len(matches))) +} + +func TestNonRecursive(t *testing.T) { + fc := getNoFilterFileCount() + fc.Recursive = false + matches := []string{"foo", "bar", "baz", "qux", "subdir"} + require.True(t, fileCountEquals(fc, len(matches))) +} + +func TestRegularOnlyFilter(t *testing.T) { + fc := getNoFilterFileCount() + fc.RegularOnly = true + matches := []string{ + "foo", "bar", "baz", "qux", "subdir/quux", "subdir/quuz", + } + require.True(t, fileCountEquals(fc, len(matches))) +} + +func TestSizeFilter(t *testing.T) { + fc := getNoFilterFileCount() + fc.Size = -100 + matches := []string{"foo", "bar", "baz", + "subdir/quux", "subdir/quuz"} + require.True(t, fileCountEquals(fc, len(matches))) + + fc.Size = 100 + matches = []string{"qux"} + require.True(t, fileCountEquals(fc, len(matches))) +} + +func TestMTimeFilter(t *testing.T) { + oldFile := filepath.Join(getTestdataDir(), "baz") + mtime := time.Date(1979, time.December, 14, 18, 25, 5, 0, time.UTC) + if err := os.Chtimes(oldFile, mtime, mtime); err != nil { + t.Skip("skipping mtime filter test.") + } + fileAge := time.Since(mtime) - (60 * time.Second) + + fc := getNoFilterFileCount() + fc.MTime = internal.Duration{Duration: -fileAge} + matches := []string{"foo", "bar", "qux", + "subdir/", "subdir/quux", "subdir/quuz"} + require.True(t, fileCountEquals(fc, len(matches))) + + fc.MTime = internal.Duration{Duration: fileAge} + matches = []string{"baz"} + require.True(t, fileCountEquals(fc, len(matches))) +} + +func getNoFilterFileCount() FileCount { + return FileCount{ + Directory: getTestdataDir(), + Name: "*", + Recursive: true, + RegularOnly: false, + Size: 0, + MTime: internal.Duration{Duration: 0}, + fileFilters: nil, + } +} + +func getTestdataDir() string { + _, filename, _, _ := runtime.Caller(1) + return strings.Replace(filename, "filecount_test.go", "testdata/", 1) +} + +func fileCountEquals(fc FileCount, expectedCount int) bool { + tags := map[string]string{"directory": getTestdataDir()} + acc := testutil.Accumulator{} + acc.GatherError(fc.Gather) + return acc.HasPoint("filecount", tags, "count", int64(expectedCount)) +} diff --git a/plugins/inputs/filecount/testdata/bar b/plugins/inputs/filecount/testdata/bar new file mode 100644 index 000000000..e69de29bb diff --git a/plugins/inputs/filecount/testdata/baz b/plugins/inputs/filecount/testdata/baz new file mode 100644 index 000000000..e69de29bb diff --git a/plugins/inputs/filecount/testdata/foo b/plugins/inputs/filecount/testdata/foo new file mode 100644 index 000000000..e69de29bb diff --git a/plugins/inputs/filecount/testdata/qux b/plugins/inputs/filecount/testdata/qux new file mode 100644 index 000000000..c7288f23d --- /dev/null +++ b/plugins/inputs/filecount/testdata/qux @@ -0,0 +1,7 @@ +Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do +eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad +minim veniam, quis nostrud exercitation ullamco laboris nisi ut +aliquip ex ea commodo consequat. Duis aute irure dolor in +reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla +pariatur. Excepteur sint occaecat cupidatat non proident, sunt in +culpa qui officia deserunt mollit anim id est laborum. diff --git a/plugins/inputs/filecount/testdata/subdir/quux b/plugins/inputs/filecount/testdata/subdir/quux new file mode 100644 index 000000000..e69de29bb diff --git a/plugins/inputs/filecount/testdata/subdir/quuz b/plugins/inputs/filecount/testdata/subdir/quuz new file mode 100644 index 000000000..e69de29bb From 93ed28e7453f6b7ab00d1544e1b6eb88022570b1 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 31 Jul 2018 15:07:21 -0700 Subject: [PATCH 0046/1815] Add support for configuring an AWS endpoint_url (#4485) --- internal/config/aws/credentials.go | 21 ++++++++------ plugins/inputs/cloudwatch/README.md | 6 ++++ plugins/inputs/cloudwatch/cloudwatch.go | 36 +++++++++++++++--------- plugins/outputs/cloudwatch/cloudwatch.go | 36 +++++++++++++++--------- plugins/outputs/kinesis/kinesis.go | 36 +++++++++++++++--------- 5 files changed, 84 insertions(+), 51 deletions(-) diff --git a/internal/config/aws/credentials.go b/internal/config/aws/credentials.go index b1f57fceb..1e4f91b13 100644 --- a/internal/config/aws/credentials.go +++ b/internal/config/aws/credentials.go @@ -9,13 +9,14 @@ import ( ) type CredentialConfig struct { - Region string - AccessKey string - SecretKey string - RoleARN string - Profile string - Filename string - Token string + Region string + AccessKey string + SecretKey string + RoleARN string + Profile string + Filename string + Token string + EndpointURL string } func (c *CredentialConfig) Credentials() client.ConfigProvider { @@ -28,7 +29,8 @@ func (c *CredentialConfig) Credentials() client.ConfigProvider { func (c *CredentialConfig) rootCredentials() client.ConfigProvider { config := &aws.Config{ - Region: aws.String(c.Region), + Region: aws.String(c.Region), + Endpoint: &c.EndpointURL, } if c.AccessKey != "" || c.SecretKey != "" { config.Credentials = credentials.NewStaticCredentials(c.AccessKey, c.SecretKey, c.Token) @@ -42,7 +44,8 @@ func (c *CredentialConfig) rootCredentials() client.ConfigProvider { func (c *CredentialConfig) assumeCredentials() client.ConfigProvider { rootCredentials := c.rootCredentials() config := &aws.Config{ - Region: aws.String(c.Region), + Region: aws.String(c.Region), + Endpoint: &c.EndpointURL, } config.Credentials = stscreds.NewCredentials(rootCredentials, c.RoleARN) return session.New(config) diff --git a/plugins/inputs/cloudwatch/README.md b/plugins/inputs/cloudwatch/README.md index 88a5b098f..dfb5bf95d 100644 --- a/plugins/inputs/cloudwatch/README.md +++ b/plugins/inputs/cloudwatch/README.md @@ -35,6 +35,12 @@ API endpoint. In the following order the plugin will attempt to authenticate. #profile = "" #shared_credential_file = "" + ## Endpoint to make request against, the correct endpoint is automatically + ## determined and this option should only be set if you wish to override the + ## default. + ## ex: endpoint_url = "http://localhost:8000" + # endpoint_url = "" + # The minimum period for Cloudwatch metrics is 1 minute (60s). However not all # metrics are made available to the 1 minute period. Some are collected at # 3 minute, 5 minute, or larger intervals. See https://aws.amazon.com/cloudwatch/faqs/#monitoring. diff --git a/plugins/inputs/cloudwatch/cloudwatch.go b/plugins/inputs/cloudwatch/cloudwatch.go index b4f91f745..9ba15b6ac 100644 --- a/plugins/inputs/cloudwatch/cloudwatch.go +++ b/plugins/inputs/cloudwatch/cloudwatch.go @@ -19,13 +19,14 @@ import ( type ( CloudWatch struct { - Region string `toml:"region"` - AccessKey string `toml:"access_key"` - SecretKey string `toml:"secret_key"` - RoleARN string `toml:"role_arn"` - Profile string `toml:"profile"` - Filename string `toml:"shared_credential_file"` - Token string `toml:"token"` + Region string `toml:"region"` + AccessKey string `toml:"access_key"` + SecretKey string `toml:"secret_key"` + RoleARN string `toml:"role_arn"` + Profile string `toml:"profile"` + Filename string `toml:"shared_credential_file"` + Token string `toml:"token"` + EndpointURL string `toml:"endpoint_url"` Period internal.Duration `toml:"period"` Delay internal.Duration `toml:"delay"` @@ -79,6 +80,12 @@ func (c *CloudWatch) SampleConfig() string { #profile = "" #shared_credential_file = "" + ## Endpoint to make request against, the correct endpoint is automatically + ## determined and this option should only be set if you wish to override the + ## default. + ## ex: endpoint_url = "http://localhost:8000" + # endpoint_url = "" + # The minimum period for Cloudwatch metrics is 1 minute (60s). However not all # metrics are made available to the 1 minute period. Some are collected at # 3 minute, 5 minute, or larger intervals. See https://aws.amazon.com/cloudwatch/faqs/#monitoring. @@ -224,13 +231,14 @@ func init() { */ func (c *CloudWatch) initializeCloudWatch() error { credentialConfig := &internalaws.CredentialConfig{ - Region: c.Region, - AccessKey: c.AccessKey, - SecretKey: c.SecretKey, - RoleARN: c.RoleARN, - Profile: c.Profile, - Filename: c.Filename, - Token: c.Token, + Region: c.Region, + AccessKey: c.AccessKey, + SecretKey: c.SecretKey, + RoleARN: c.RoleARN, + Profile: c.Profile, + Filename: c.Filename, + Token: c.Token, + EndpointURL: c.EndpointURL, } configProvider := credentialConfig.Credentials() diff --git a/plugins/outputs/cloudwatch/cloudwatch.go b/plugins/outputs/cloudwatch/cloudwatch.go index f7ccc1fee..52ab41a28 100644 --- a/plugins/outputs/cloudwatch/cloudwatch.go +++ b/plugins/outputs/cloudwatch/cloudwatch.go @@ -17,13 +17,14 @@ import ( ) type CloudWatch struct { - Region string `toml:"region"` - AccessKey string `toml:"access_key"` - SecretKey string `toml:"secret_key"` - RoleARN string `toml:"role_arn"` - Profile string `toml:"profile"` - Filename string `toml:"shared_credential_file"` - Token string `toml:"token"` + Region string `toml:"region"` + AccessKey string `toml:"access_key"` + SecretKey string `toml:"secret_key"` + RoleARN string `toml:"role_arn"` + Profile string `toml:"profile"` + Filename string `toml:"shared_credential_file"` + Token string `toml:"token"` + EndpointURL string `toml:"endpoint_url"` Namespace string `toml:"namespace"` // CloudWatch Metrics Namespace svc *cloudwatch.CloudWatch @@ -48,6 +49,12 @@ var sampleConfig = ` #profile = "" #shared_credential_file = "" + ## Endpoint to make request against, the correct endpoint is automatically + ## determined and this option should only be set if you wish to override the + ## default. + ## ex: endpoint_url = "http://localhost:8000" + # endpoint_url = "" + ## Namespace for the CloudWatch MetricDatums namespace = "InfluxData/Telegraf" ` @@ -62,13 +69,14 @@ func (c *CloudWatch) Description() string { func (c *CloudWatch) Connect() error { credentialConfig := &internalaws.CredentialConfig{ - Region: c.Region, - AccessKey: c.AccessKey, - SecretKey: c.SecretKey, - RoleARN: c.RoleARN, - Profile: c.Profile, - Filename: c.Filename, - Token: c.Token, + Region: c.Region, + AccessKey: c.AccessKey, + SecretKey: c.SecretKey, + RoleARN: c.RoleARN, + Profile: c.Profile, + Filename: c.Filename, + Token: c.Token, + EndpointURL: c.EndpointURL, } configProvider := credentialConfig.Credentials() diff --git a/plugins/outputs/kinesis/kinesis.go b/plugins/outputs/kinesis/kinesis.go index d77ff08a5..014379146 100644 --- a/plugins/outputs/kinesis/kinesis.go +++ b/plugins/outputs/kinesis/kinesis.go @@ -17,13 +17,14 @@ import ( type ( KinesisOutput struct { - Region string `toml:"region"` - AccessKey string `toml:"access_key"` - SecretKey string `toml:"secret_key"` - RoleARN string `toml:"role_arn"` - Profile string `toml:"profile"` - Filename string `toml:"shared_credential_file"` - Token string `toml:"token"` + Region string `toml:"region"` + AccessKey string `toml:"access_key"` + SecretKey string `toml:"secret_key"` + RoleARN string `toml:"role_arn"` + Profile string `toml:"profile"` + Filename string `toml:"shared_credential_file"` + Token string `toml:"token"` + EndpointURL string `toml:"endpoint_url"` StreamName string `toml:"streamname"` PartitionKey string `toml:"partitionkey"` @@ -60,6 +61,12 @@ var sampleConfig = ` #profile = "" #shared_credential_file = "" + ## Endpoint to make request against, the correct endpoint is automatically + ## determined and this option should only be set if you wish to override the + ## default. + ## ex: endpoint_url = "http://localhost:8000" + # endpoint_url = "" + ## Kinesis StreamName must exist prior to starting telegraf. streamname = "StreamName" ## DEPRECATED: PartitionKey as used for sharding data. @@ -126,13 +133,14 @@ func (k *KinesisOutput) Connect() error { } credentialConfig := &internalaws.CredentialConfig{ - Region: k.Region, - AccessKey: k.AccessKey, - SecretKey: k.SecretKey, - RoleARN: k.RoleARN, - Profile: k.Profile, - Filename: k.Filename, - Token: k.Token, + Region: k.Region, + AccessKey: k.AccessKey, + SecretKey: k.SecretKey, + RoleARN: k.RoleARN, + Profile: k.Profile, + Filename: k.Filename, + Token: k.Token, + EndpointURL: k.EndpointURL, } configProvider := credentialConfig.Credentials() svc := kinesis.New(configProvider) From 943dcc0c49c3223e78482ce5e6c29e72957b8af1 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 31 Jul 2018 15:08:04 -0700 Subject: [PATCH 0047/1815] Send all messages before waiting for results in kafka output (#4491) --- plugins/outputs/kafka/kafka.go | 30 ++++++++++++++++-------------- 1 file changed, 16 insertions(+), 14 deletions(-) diff --git a/plugins/outputs/kafka/kafka.go b/plugins/outputs/kafka/kafka.go index a45e2a4e9..a99c8e1c2 100644 --- a/plugins/outputs/kafka/kafka.go +++ b/plugins/outputs/kafka/kafka.go @@ -246,32 +246,34 @@ func (k *Kafka) Description() string { } func (k *Kafka) Write(metrics []telegraf.Metric) error { - if len(metrics) == 0 { - return nil - } - + msgs := make([]*sarama.ProducerMessage, 0, len(metrics)) for _, metric := range metrics { buf, err := k.serializer.Serialize(metric) if err != nil { return err } - topicName := k.GetTopicName(metric) - m := &sarama.ProducerMessage{ - Topic: topicName, + Topic: k.GetTopicName(metric), Value: sarama.ByteEncoder(buf), } - if h, ok := metric.Tags()[k.RoutingTag]; ok { + if h, ok := metric.GetTag(k.RoutingTag); ok { m.Key = sarama.StringEncoder(h) } - - _, _, err = k.producer.SendMessage(m) - - if err != nil { - return fmt.Errorf("FAILED to send kafka message: %s\n", err) - } + msgs = append(msgs, m) } + + err := k.producer.SendMessages(msgs) + if err != nil { + // We could have many errors, return only the first encountered. + if errs, ok := err.(sarama.ProducerErrors); ok { + for _, prodErr := range errs { + return prodErr + } + } + return err + } + return nil } From f4032fc78de27fb49e3f026f349c1e2f63f8f74b Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 31 Jul 2018 15:09:30 -0700 Subject: [PATCH 0048/1815] Add support for lz4 compression to kafka output (#4492) --- plugins/outputs/kafka/README.md | 10 ++++++++-- plugins/outputs/kafka/kafka.go | 16 ++++++++++++++++ 2 files changed, 24 insertions(+), 2 deletions(-) diff --git a/plugins/outputs/kafka/README.md b/plugins/outputs/kafka/README.md index 562f3fd5d..bb410a1d5 100644 --- a/plugins/outputs/kafka/README.md +++ b/plugins/outputs/kafka/README.md @@ -10,9 +10,15 @@ This plugin writes to a [Kafka Broker](http://kafka.apache.org/07/quickstart.htm ## Kafka topic for producer messages topic = "telegraf" - ## Optional client id + ## Optional Client id # client_id = "Telegraf" + ## Set the minimal supported Kafka version. Setting this enables the use of new + ## Kafka features and APIs. Of particular interested, lz4 compression + ## requires at least version 0.10.0.0. + ## ex: version = "1.1.0" + # version = "" + ## Optional topic suffix configuration. ## If the section is omitted, no suffix is used. ## Following topic suffix methods are supported: @@ -20,7 +26,7 @@ This plugin writes to a [Kafka Broker](http://kafka.apache.org/07/quickstart.htm ## tags - suffix equals to separator + specified tags' values ## interleaved with separator - ## Suffix equals to "_" + measurement's name + ## Suffix equals to "_" + measurement name # [outputs.kafka.topic_suffix] # method = "measurement" # separator = "_" diff --git a/plugins/outputs/kafka/kafka.go b/plugins/outputs/kafka/kafka.go index a99c8e1c2..5fdb8d857 100644 --- a/plugins/outputs/kafka/kafka.go +++ b/plugins/outputs/kafka/kafka.go @@ -38,6 +38,8 @@ type ( // MaxRetry Tag MaxRetry int + Version string `toml:"version"` + // Legacy TLS config options // TLS client certificate Certificate string @@ -74,6 +76,12 @@ var sampleConfig = ` ## Optional Client id # client_id = "Telegraf" + ## Set the minimal supported Kafka version. Setting this enables the use of new + ## Kafka features and APIs. Of particular interested, lz4 compression + ## requires at least version 0.10.0.0. + ## ex: version = "1.1.0" + # version = "" + ## Optional topic suffix configuration. ## If the section is omitted, no suffix is used. ## Following topic suffix methods are supported: @@ -191,6 +199,14 @@ func (k *Kafka) Connect() error { } config := sarama.NewConfig() + if k.Version != "" { + version, err := sarama.ParseKafkaVersion(k.Version) + if err != nil { + return err + } + config.Version = version + } + if k.ClientID != "" { config.ClientID = k.ClientID } else { From efe61eeb73868dbdbb29a0d3fdb0b412e8c31d55 Mon Sep 17 00:00:00 2001 From: Greg Linton Date: Tue, 31 Jul 2018 16:10:01 -0600 Subject: [PATCH 0049/1815] Update changelog --- CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index c9f2c3750..b3f0a6289 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -45,6 +45,10 @@ - [#4320](https://github.com/influxdata/telegraf/pull/4320): Improve cloudwatch output performance. - [#3768](https://github.com/influxdata/telegraf/pull/3768): Add x509_cert input plugin. - [#4471](https://github.com/influxdata/telegraf/pull/4471): Add IPSIpAddress syntax to ipaddr conversion in snmp plugin. +- [#4363](https://github.com/influxdata/telegraf/pull/4363): Add filecount input plugin. +- [#4485](https://github.com/influxdata/telegraf/pull/4485): Add support for configuring an AWS endpoint_url. +- [#4491](https://github.com/influxdata/telegraf/pull/4491): Send all messages before waiting for results in kafka output. +- [#4492](https://github.com/influxdata/telegraf/pull/4492): Add support for lz4 compression to kafka output. ## v1.7.2 [2018-07-18] From b93460dd06e733f32f3c49f4f6c45c68fd4b18e3 Mon Sep 17 00:00:00 2001 From: Jonathan G Date: Tue, 31 Jul 2018 17:56:03 -0600 Subject: [PATCH 0050/1815] Split multiple sensor keys in ipmi input (#4450) --- etc/telegraf.conf | 3 + plugins/inputs/ipmi_sensor/README.md | 63 ++++++-- plugins/inputs/ipmi_sensor/ipmi.go | 137 ++++++++++++++--- plugins/inputs/ipmi_sensor/ipmi_test.go | 196 +++++++++++++++++++++++- 4 files changed, 361 insertions(+), 38 deletions(-) diff --git a/etc/telegraf.conf b/etc/telegraf.conf index 38942adee..912a93d10 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -1976,6 +1976,9 @@ # ## Timeout for the ipmitool command to complete # timeout = "20s" +# ## Schema Version: (Optional, defaults to version 1) +# schemaVersion = 2 + # # Gather packets and bytes counters from Linux ipsets # [[inputs.ipset]] diff --git a/plugins/inputs/ipmi_sensor/README.md b/plugins/inputs/ipmi_sensor/README.md index 74cfe3bc5..fb2e8f26e 100644 --- a/plugins/inputs/ipmi_sensor/README.md +++ b/plugins/inputs/ipmi_sensor/README.md @@ -8,6 +8,10 @@ If no servers are specified, the plugin will query the local machine sensor stat ``` ipmitool sdr ``` +or with the version 2 schema: +``` +ipmitool sdr elist +``` When one or more servers are specified, the plugin will use the following command to collect remote host sensor stats: @@ -41,19 +45,36 @@ ipmitool -I lan -H SERVER -U USERID -P PASSW0RD sdr ## Timeout for the ipmitool command to complete. Default is 20 seconds. timeout = "20s" + + ## Schema Version: (Optional, defaults to version 1) + metric_version = 2 ``` ### Measurements +Version 1 schema: - ipmi_sensor: - tags: - name - unit + - host - server (only when retrieving stats from remote servers) - fields: - - status (int) + - status (int, 1=ok status_code/0=anything else) - value (float) +Version 2 schema: +- ipmi_sensor: + - tags: + - name + - entity_id (can help uniquify duplicate names) + - status_code (two letter code from IPMI documentation) + - status_desc (extended status description field) + - unit (only on analog values) + - host + - server (only when retrieving stats from remote) + - fields: + - value (float) #### Permissions @@ -68,24 +89,36 @@ KERNEL=="ipmi*", MODE="660", GROUP="telegraf" ### Example Output +#### Version 1 Schema When retrieving stats from a remote server: ``` -ipmi_sensor,server=10.20.2.203,unit=degrees_c,name=ambient_temp status=1i,value=20 1458488465012559455 -ipmi_sensor,server=10.20.2.203,unit=feet,name=altitude status=1i,value=80 1458488465012688613 -ipmi_sensor,server=10.20.2.203,unit=watts,name=avg_power status=1i,value=220 1458488465012776511 -ipmi_sensor,server=10.20.2.203,unit=volts,name=planar_3.3v status=1i,value=3.28 1458488465012861875 -ipmi_sensor,server=10.20.2.203,unit=volts,name=planar_vbat status=1i,value=3.04 1458488465013072508 -ipmi_sensor,server=10.20.2.203,unit=rpm,name=fan_1a_tach status=1i,value=2610 1458488465013137932 -ipmi_sensor,server=10.20.2.203,unit=rpm,name=fan_1b_tach status=1i,value=1775 1458488465013279896 +ipmi_sensor,server=10.20.2.203,name=uid_light value=0,status=1i 1517125513000000000 +ipmi_sensor,server=10.20.2.203,name=sys._health_led status=1i,value=0 1517125513000000000 +ipmi_sensor,server=10.20.2.203,name=power_supply_1,unit=watts status=1i,value=110 1517125513000000000 +ipmi_sensor,server=10.20.2.203,name=power_supply_2,unit=watts status=1i,value=120 1517125513000000000 +ipmi_sensor,server=10.20.2.203,name=power_supplies value=0,status=1i 1517125513000000000 +ipmi_sensor,server=10.20.2.203,name=fan_1,unit=percent status=1i,value=43.12 1517125513000000000 ``` + When retrieving stats from the local machine (no server specified): ``` -ipmi_sensor,unit=degrees_c,name=ambient_temp status=1i,value=20 1458488465012559455 -ipmi_sensor,unit=feet,name=altitude status=1i,value=80 1458488465012688613 -ipmi_sensor,unit=watts,name=avg_power status=1i,value=220 1458488465012776511 -ipmi_sensor,unit=volts,name=planar_3.3v status=1i,value=3.28 1458488465012861875 -ipmi_sensor,unit=volts,name=planar_vbat status=1i,value=3.04 1458488465013072508 -ipmi_sensor,unit=rpm,name=fan_1a_tach status=1i,value=2610 1458488465013137932 -ipmi_sensor,unit=rpm,name=fan_1b_tach status=1i,value=1775 1458488465013279896 +ipmi_sensor,name=uid_light value=0,status=1i 1517125513000000000 +ipmi_sensor,name=sys._health_led status=1i,value=0 1517125513000000000 +ipmi_sensor,name=power_supply_1,unit=watts status=1i,value=110 1517125513000000000 +ipmi_sensor,name=power_supply_2,unit=watts status=1i,value=120 1517125513000000000 +ipmi_sensor,name=power_supplies value=0,status=1i 1517125513000000000 +ipmi_sensor,name=fan_1,unit=percent status=1i,value=43.12 1517125513000000000 +``` + +#### Version 2 Schema + +When retrieving stats from the local machine (no server specified): +``` +ipmi_sensor,name=uid_light,entity_id=23.1,status_code=ok,status_desc=ok value=0 1517125474000000000 +ipmi_sensor,name=sys._health_led,entity_id=23.2,status_code=ok,status_desc=ok value=0 1517125474000000000 +ipmi_sensor,entity_id=10.1,name=power_supply_1,status_code=ok,status_desc=presence_detected,unit=watts value=110 1517125474000000000 +ipmi_sensor,name=power_supply_2,entity_id=10.2,status_code=ok,unit=watts,status_desc=presence_detected value=125 1517125474000000000 +ipmi_sensor,name=power_supplies,entity_id=10.3,status_code=ok,status_desc=fully_redundant value=0 1517125474000000000 +ipmi_sensor,entity_id=7.1,name=fan_1,status_code=ok,status_desc=transition_to_running,unit=percent value=43.12 1517125474000000000 ``` diff --git a/plugins/inputs/ipmi_sensor/ipmi.go b/plugins/inputs/ipmi_sensor/ipmi.go index ee99b0a3d..65506e118 100644 --- a/plugins/inputs/ipmi_sensor/ipmi.go +++ b/plugins/inputs/ipmi_sensor/ipmi.go @@ -1,8 +1,11 @@ package ipmi_sensor import ( + "bufio" + "bytes" "fmt" "os/exec" + "regexp" "strconv" "strings" "sync" @@ -14,14 +17,20 @@ import ( ) var ( - execCommand = exec.Command // execCommand is used to mock commands in tests. + execCommand = exec.Command // execCommand is used to mock commands in tests. + re_v1_parse_line = regexp.MustCompile(`^(?P[^|]*)\|(?P[^|]*)\|(?P.*)`) + re_v2_parse_line = regexp.MustCompile(`^(?P[^|]*)\|[^|]+\|(?P[^|]*)\|(?P[^|]*)\|(?:(?P[^|]+))?`) + re_v2_parse_description = regexp.MustCompile(`^(?P[0-9.]+)\s(?P.*)|(?P.+)|^$`) + re_v2_parse_unit = regexp.MustCompile(`^(?P[^,]+)(?:,\s*(?P.*))?`) ) +// Ipmi stores the configuration values for the ipmi_sensor input plugin type Ipmi struct { - Path string - Privilege string - Servers []string - Timeout internal.Duration + Path string + Privilege string + Servers []string + Timeout internal.Duration + MetricVersion int } var sampleConfig = ` @@ -46,16 +55,22 @@ var sampleConfig = ` ## Timeout for the ipmitool command to complete timeout = "20s" + + ## Schema Version: (Optional, defaults to version 1) + metric_version = 2 ` +// SampleConfig returns the documentation about the sample configuration func (m *Ipmi) SampleConfig() string { return sampleConfig } +// Description returns a basic description for the plugin functions func (m *Ipmi) Description() string { return "Read metrics from the bare metal servers via IPMI" } +// Gather is the main execution function for the plugin func (m *Ipmi) Gather(acc telegraf.Accumulator) error { if len(m.Path) == 0 { return fmt.Errorf("ipmitool not found: verify that ipmitool is installed and that ipmitool is in your PATH") @@ -93,23 +108,33 @@ func (m *Ipmi) parse(acc telegraf.Accumulator, server string) error { opts = conn.options() } opts = append(opts, "sdr") + if m.MetricVersion == 2 { + opts = append(opts, "elist") + } cmd := execCommand(m.Path, opts...) out, err := internal.CombinedOutputTimeout(cmd, m.Timeout.Duration) + timestamp := time.Now() if err != nil { return fmt.Errorf("failed to run command %s: %s - %s", strings.Join(cmd.Args, " "), err, string(out)) } + if m.MetricVersion == 2 { + return parseV2(acc, hostname, out, timestamp) + } + return parseV1(acc, hostname, out, timestamp) +} +func parseV1(acc telegraf.Accumulator, hostname string, cmdOut []byte, measured_at time.Time) error { // each line will look something like // Planar VBAT | 3.05 Volts | ok - lines := strings.Split(string(out), "\n") - for i := 0; i < len(lines); i++ { - vals := strings.Split(lines[i], "|") - if len(vals) != 3 { + scanner := bufio.NewScanner(bytes.NewReader(cmdOut)) + for scanner.Scan() { + ipmiFields := extractFieldsFromRegex(re_v1_parse_line, scanner.Text()) + if len(ipmiFields) != 3 { continue } tags := map[string]string{ - "name": transform(vals[0]), + "name": transform(ipmiFields["name"]), } // tag the server is we have one @@ -118,18 +143,20 @@ func (m *Ipmi) parse(acc telegraf.Accumulator, server string) error { } fields := make(map[string]interface{}) - if strings.EqualFold("ok", trim(vals[2])) { + if strings.EqualFold("ok", trim(ipmiFields["status_code"])) { fields["status"] = 1 } else { fields["status"] = 0 } - val1 := trim(vals[1]) - - if strings.Index(val1, " ") > 0 { + if strings.Index(ipmiFields["description"], " ") > 0 { // split middle column into value and unit - valunit := strings.SplitN(val1, " ", 2) - fields["value"] = Atofloat(valunit[0]) + valunit := strings.SplitN(ipmiFields["description"], " ", 2) + var err error + fields["value"], err = aToFloat(valunit[0]) + if err != nil { + continue + } if len(valunit) > 1 { tags["unit"] = transform(valunit[1]) } @@ -137,19 +164,85 @@ func (m *Ipmi) parse(acc telegraf.Accumulator, server string) error { fields["value"] = 0.0 } - acc.AddFields("ipmi_sensor", fields, tags, time.Now()) + acc.AddFields("ipmi_sensor", fields, tags, measured_at) } - return nil + return scanner.Err() } -func Atofloat(val string) float64 { +func parseV2(acc telegraf.Accumulator, hostname string, cmdOut []byte, measured_at time.Time) error { + // each line will look something like + // CMOS Battery | 65h | ok | 7.1 | + // Temp | 0Eh | ok | 3.1 | 55 degrees C + // Drive 0 | A0h | ok | 7.1 | Drive Present + scanner := bufio.NewScanner(bytes.NewReader(cmdOut)) + for scanner.Scan() { + ipmiFields := extractFieldsFromRegex(re_v2_parse_line, scanner.Text()) + if len(ipmiFields) < 3 || len(ipmiFields) > 4 { + continue + } + + tags := map[string]string{ + "name": transform(ipmiFields["name"]), + } + + // tag the server is we have one + if hostname != "" { + tags["server"] = hostname + } + tags["entity_id"] = transform(ipmiFields["entity_id"]) + tags["status_code"] = trim(ipmiFields["status_code"]) + fields := make(map[string]interface{}) + descriptionResults := extractFieldsFromRegex(re_v2_parse_description, trim(ipmiFields["description"])) + // This is an analog value with a unit + if descriptionResults["analogValue"] != "" && len(descriptionResults["analogUnit"]) >= 1 { + var err error + fields["value"], err = aToFloat(descriptionResults["analogValue"]) + if err != nil { + continue + } + // Some implementations add an extra status to their analog units + unitResults := extractFieldsFromRegex(re_v2_parse_unit, descriptionResults["analogUnit"]) + tags["unit"] = transform(unitResults["realAnalogUnit"]) + if unitResults["statusDesc"] != "" { + tags["status_desc"] = transform(unitResults["statusDesc"]) + } + } else { + // This is a status value + fields["value"] = 0.0 + // Extended status descriptions aren't required, in which case for consistency re-use the status code + if descriptionResults["status"] != "" { + tags["status_desc"] = transform(descriptionResults["status"]) + } else { + tags["status_desc"] = transform(ipmiFields["status_code"]) + } + } + + acc.AddFields("ipmi_sensor", fields, tags, measured_at) + } + + return scanner.Err() +} + +// extractFieldsFromRegex consumes a regex with named capture groups and returns a kvp map of strings with the results +func extractFieldsFromRegex(re *regexp.Regexp, input string) map[string]string { + submatches := re.FindStringSubmatch(input) + results := make(map[string]string) + for i, name := range re.SubexpNames() { + if name != input && name != "" && input != "" { + results[name] = trim(submatches[i]) + } + } + return results +} + +// aToFloat converts string representations of numbers to float64 values +func aToFloat(val string) (float64, error) { f, err := strconv.ParseFloat(val, 64) if err != nil { - return 0.0 - } else { - return f + return 0.0, err } + return f, nil } func trim(s string) string { diff --git a/plugins/inputs/ipmi_sensor/ipmi_test.go b/plugins/inputs/ipmi_sensor/ipmi_test.go index 3d45f2fa8..d781ce7b5 100644 --- a/plugins/inputs/ipmi_sensor/ipmi_test.go +++ b/plugins/inputs/ipmi_sensor/ipmi_test.go @@ -28,7 +28,7 @@ func TestGather(t *testing.T) { require.NoError(t, err) - assert.Equal(t, acc.NFields(), 266, "non-numeric measurements should be ignored") + assert.Equal(t, acc.NFields(), 262, "non-numeric measurements should be ignored") conn := NewConnection(i.Servers[0], i.Privilege) assert.Equal(t, "USERID", conn.Username) @@ -127,6 +127,7 @@ func TestGather(t *testing.T) { } err = acc.GatherError(i.Gather) + require.NoError(t, err) var testsWithoutServer = []struct { fields map[string]interface{} @@ -378,3 +379,196 @@ OS RealTime Mod | 0x00 | ok } os.Exit(0) } + +func TestGatherV2(t *testing.T) { + i := &Ipmi{ + Servers: []string{"USERID:PASSW0RD@lan(192.168.1.1)"}, + Path: "ipmitool", + Privilege: "USER", + Timeout: internal.Duration{Duration: time.Second * 5}, + MetricVersion: 2, + } + // overwriting exec commands with mock commands + execCommand = fakeExecCommandV2 + var acc testutil.Accumulator + + err := acc.GatherError(i.Gather) + + require.NoError(t, err) + + conn := NewConnection(i.Servers[0], i.Privilege) + assert.Equal(t, "USERID", conn.Username) + assert.Equal(t, "lan", conn.Interface) + + var testsWithServer = []struct { + fields map[string]interface{} + tags map[string]string + }{ + //SEL | 72h | ns | 7.1 | No Reading + { + map[string]interface{}{ + "value": float64(0), + }, + map[string]string{ + "name": "sel", + "entity_id": "7.1", + "status_code": "ns", + "status_desc": "no_reading", + "server": "192.168.1.1", + }, + }, + } + + for _, test := range testsWithServer { + acc.AssertContainsTaggedFields(t, "ipmi_sensor", test.fields, test.tags) + } + + i = &Ipmi{ + Path: "ipmitool", + Timeout: internal.Duration{Duration: time.Second * 5}, + MetricVersion: 2, + } + + err = acc.GatherError(i.Gather) + require.NoError(t, err) + + var testsWithoutServer = []struct { + fields map[string]interface{} + tags map[string]string + }{ + //SEL | 72h | ns | 7.1 | No Reading + { + map[string]interface{}{ + "value": float64(0), + }, + map[string]string{ + "name": "sel", + "entity_id": "7.1", + "status_code": "ns", + "status_desc": "no_reading", + }, + }, + //Intrusion | 73h | ok | 7.1 | + { + map[string]interface{}{ + "value": float64(0), + }, + map[string]string{ + "name": "intrusion", + "entity_id": "7.1", + "status_code": "ok", + "status_desc": "ok", + }, + }, + //Fan1 | 30h | ok | 7.1 | 5040 RPM + { + map[string]interface{}{ + "value": float64(5040), + }, + map[string]string{ + "name": "fan1", + "entity_id": "7.1", + "status_code": "ok", + "unit": "rpm", + }, + }, + //Inlet Temp | 04h | ok | 7.1 | 25 degrees C + { + map[string]interface{}{ + "value": float64(25), + }, + map[string]string{ + "name": "inlet_temp", + "entity_id": "7.1", + "status_code": "ok", + "unit": "degrees_c", + }, + }, + //USB Cable Pres | 50h | ok | 7.1 | Connected + { + map[string]interface{}{ + "value": float64(0), + }, + map[string]string{ + "name": "usb_cable_pres", + "entity_id": "7.1", + "status_code": "ok", + "status_desc": "connected", + }, + }, + //Current 1 | 6Ah | ok | 10.1 | 7.20 Amps + { + map[string]interface{}{ + "value": float64(7.2), + }, + map[string]string{ + "name": "current_1", + "entity_id": "10.1", + "status_code": "ok", + "unit": "amps", + }, + }, + //Power Supply 1 | 03h | ok | 10.1 | 110 Watts, Presence detected + { + map[string]interface{}{ + "value": float64(110), + }, + map[string]string{ + "name": "power_supply_1", + "entity_id": "10.1", + "status_code": "ok", + "unit": "watts", + "status_desc": "presence_detected", + }, + }, + } + + for _, test := range testsWithoutServer { + acc.AssertContainsTaggedFields(t, "ipmi_sensor", test.fields, test.tags) + } +} + +// fackeExecCommandV2 is a helper function that mock +// the exec.Command call (and call the test binary) +func fakeExecCommandV2(command string, args ...string) *exec.Cmd { + cs := []string{"-test.run=TestHelperProcessV2", "--", command} + cs = append(cs, args...) + cmd := exec.Command(os.Args[0], cs...) + cmd.Env = []string{"GO_WANT_HELPER_PROCESS=1"} + return cmd +} + +// TestHelperProcessV2 isn't a real test. It's used to mock exec.Command +// For example, if you run: +// GO_WANT_HELPER_PROCESS=1 go test -test.run=TestHelperProcessV2 -- chrony tracking +// it returns below mockData. +func TestHelperProcessV2(t *testing.T) { + if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" { + return + } + + // Curated list of use cases instead of full dumps + mockData := `SEL | 72h | ns | 7.1 | No Reading +Intrusion | 73h | ok | 7.1 | +Fan1 | 30h | ok | 7.1 | 5040 RPM +Inlet Temp | 04h | ok | 7.1 | 25 degrees C +USB Cable Pres | 50h | ok | 7.1 | Connected +Current 1 | 6Ah | ok | 10.1 | 7.20 Amps +Power Supply 1 | 03h | ok | 10.1 | 110 Watts, Presence detected +` + + args := os.Args + + // Previous arguments are tests stuff, that looks like : + // /tmp/go-build970079519/…/_test/integration.test -test.run=TestHelperProcess -- + cmd, args := args[3], args[4:] + + if cmd == "ipmitool" { + fmt.Fprint(os.Stdout, mockData) + } else { + fmt.Fprint(os.Stdout, "command not found") + os.Exit(1) + + } + os.Exit(0) +} From ddf2d691e96c5705091c65fa55beecc7283bd27f Mon Sep 17 00:00:00 2001 From: Greg Linton Date: Tue, 31 Jul 2018 17:57:54 -0600 Subject: [PATCH 0051/1815] Update changelog --- CHANGELOG.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index b3f0a6289..6af318eed 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,6 +10,8 @@ - [file](./plugins/inputs/file/README.md) - Contributed by @maxunt - [tengine](./plugins/inputs/tengine/README.md) - Contributed by @ertaoxu +- [x509_cert](./plugins/inputs/x509_cert/README.md) - Contributed by @jtyr +- [filecount](./plugins/inputs/filecount/README.md) - Contributed by @sometimesfood ### New Processors @@ -49,6 +51,7 @@ - [#4485](https://github.com/influxdata/telegraf/pull/4485): Add support for configuring an AWS endpoint_url. - [#4491](https://github.com/influxdata/telegraf/pull/4491): Send all messages before waiting for results in kafka output. - [#4492](https://github.com/influxdata/telegraf/pull/4492): Add support for lz4 compression to kafka output. +- [#4450](https://github.com/influxdata/telegraf/pull/4450): Split multiple sensor keys in ipmi input. ## v1.7.2 [2018-07-18] From 6a32a7d85b9fab3b2c628dad480dddb302282fc6 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 31 Jul 2018 17:15:42 -0700 Subject: [PATCH 0052/1815] Keep leading whitespace for messages in syslog input (#4498) --- plugins/inputs/syslog/README.md | 4 ++-- plugins/inputs/syslog/rfc5426_test.go | 32 +++++++++++++++++++++++++++ plugins/inputs/syslog/syslog.go | 9 +++++--- 3 files changed, 40 insertions(+), 5 deletions(-) diff --git a/plugins/inputs/syslog/README.md b/plugins/inputs/syslog/README.md index 107727947..e57d28dd2 100644 --- a/plugins/inputs/syslog/README.md +++ b/plugins/inputs/syslog/README.md @@ -33,9 +33,9 @@ Syslog messages should be formatted according to ## Only applies to stream sockets (e.g. TCP). # max_connections = 1024 - ## Read timeout (default = 500ms). + ## Read timeout is the maximum time allowed for reading a single message (default = 5s). ## 0 means unlimited. - # read_timeout = 500ms + # read_timeout = "5s" ## Whether to parse in best effort mode or not (default = false). ## By default best effort parsing is off. diff --git a/plugins/inputs/syslog/rfc5426_test.go b/plugins/inputs/syslog/rfc5426_test.go index 8304a5406..67966ed1d 100644 --- a/plugins/inputs/syslog/rfc5426_test.go +++ b/plugins/inputs/syslog/rfc5426_test.go @@ -202,6 +202,38 @@ func getTestCasesForRFC5426() []testCase5426 { }, werr: true, }, + { + name: "trim message", + data: []byte("<1>1 - - - - - - \tA\n"), + wantBestEffort: &testutil.Metric{ + Measurement: "syslog", + Fields: map[string]interface{}{ + "version": uint16(1), + "message": "\tA", + "facility_code": 0, + "severity_code": 1, + }, + Tags: map[string]string{ + "severity": "alert", + "facility": "kern", + }, + Time: defaultTime, + }, + wantStrict: &testutil.Metric{ + Measurement: "syslog", + Fields: map[string]interface{}{ + "version": uint16(1), + "message": "\tA", + "facility_code": 0, + "severity_code": 1, + }, + Tags: map[string]string{ + "severity": "alert", + "facility": "kern", + }, + Time: defaultTime, + }, + }, } return testCases diff --git a/plugins/inputs/syslog/syslog.go b/plugins/inputs/syslog/syslog.go index 6f8d959ec..5b22cbcad 100644 --- a/plugins/inputs/syslog/syslog.go +++ b/plugins/inputs/syslog/syslog.go @@ -10,6 +10,7 @@ import ( "strings" "sync" "time" + "unicode" "github.com/influxdata/go-syslog/rfc5424" "github.com/influxdata/go-syslog/rfc5425" @@ -71,9 +72,9 @@ var sampleConfig = ` ## Only applies to stream sockets (e.g. TCP). # max_connections = 1024 - ## Read timeout (default = 500ms). + ## Read timeout is the maximum time allowed for reading a single message (default = 5s). ## 0 means unlimited. - # read_timeout = 500ms + # read_timeout = "5s" ## Whether to parse in best effort mode or not (default = false). ## By default best effort parsing is off. @@ -365,7 +366,9 @@ func fields(msg rfc5424.SyslogMessage, s *Syslog) map[string]interface{} { } if msg.Message() != nil { - flds["message"] = strings.TrimSpace(*msg.Message()) + flds["message"] = strings.TrimRightFunc(*msg.Message(), func(r rune) bool { + return unicode.IsSpace(r) + }) } if msg.StructuredData() != nil { From d7db4be5ef96d67615aa5f3f9a26207fe7ec8655 Mon Sep 17 00:00:00 2001 From: Greg Linton Date: Tue, 31 Jul 2018 18:16:51 -0600 Subject: [PATCH 0053/1815] Update changelog --- CHANGELOG.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6af318eed..f321c6960 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -53,6 +53,12 @@ - [#4492](https://github.com/influxdata/telegraf/pull/4492): Add support for lz4 compression to kafka output. - [#4450](https://github.com/influxdata/telegraf/pull/4450): Split multiple sensor keys in ipmi input. +## v1.7.3 [unreleased] + +### Bugfixes + +- [#4498](https://github.com/influxdata/telegraf/pull/4498): Keep leading whitespace for messages in syslog input. + ## v1.7.2 [2018-07-18] ### Bugfixes From 2adfccc97527d3ab17fbc083b785ae5576e0ff2e Mon Sep 17 00:00:00 2001 From: EthanHur Date: Thu, 2 Aug 2018 07:37:52 +0900 Subject: [PATCH 0054/1815] Update netstat link in README.md (#4494) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index a0f396cad..ae5750ddf 100644 --- a/README.md +++ b/README.md @@ -198,7 +198,7 @@ configuration options. * [nats](./plugins/inputs/nats) * [net](./plugins/inputs/net) * [net_response](./plugins/inputs/net_response) -* [netstat](./plugins/inputs/netstat) +* [netstat](./plugins/inputs/net) * [nginx](./plugins/inputs/nginx) * [nginx_plus](./plugins/inputs/nginx_plus) * [nsq_consumer](./plugins/inputs/nsq_consumer) From 4fff507ad697456aad63b83cfd32272144aa23f6 Mon Sep 17 00:00:00 2001 From: Greg Date: Wed, 1 Aug 2018 16:39:19 -0600 Subject: [PATCH 0055/1815] Skip bad entries on interrupt input (#4497) --- plugins/inputs/interrupts/interrupts.go | 9 +- plugins/inputs/interrupts/interrupts_test.go | 132 ++++++++++++++++++- 2 files changed, 137 insertions(+), 4 deletions(-) diff --git a/plugins/inputs/interrupts/interrupts.go b/plugins/inputs/interrupts/interrupts.go index 75cbf3be1..30b7ee182 100644 --- a/plugins/inputs/interrupts/interrupts.go +++ b/plugins/inputs/interrupts/interrupts.go @@ -3,12 +3,13 @@ package interrupts import ( "bufio" "fmt" - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/plugins/inputs" "io" "os" "strconv" "strings" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" ) type Interrupts struct{} @@ -50,6 +51,8 @@ func parseInterrupts(r io.Reader) ([]IRQ, error) { } cpucount = len(cpus) } + +scan: for scanner.Scan() { fields := strings.Fields(scanner.Text()) if !strings.HasSuffix(fields[0], ":") { @@ -62,7 +65,7 @@ func parseInterrupts(r io.Reader) ([]IRQ, error) { if i < len(irqvals) { irqval, err := strconv.ParseInt(irqvals[i], 10, 64) if err != nil { - return irqs, fmt.Errorf("Unable to parse %q from %q: %s", irqvals[i], scanner.Text(), err) + continue scan } irq.Cpus = append(irq.Cpus, irqval) } diff --git a/plugins/inputs/interrupts/interrupts_test.go b/plugins/inputs/interrupts/interrupts_test.go index 6c76c8504..cf1dc949e 100644 --- a/plugins/inputs/interrupts/interrupts_test.go +++ b/plugins/inputs/interrupts/interrupts_test.go @@ -2,9 +2,10 @@ package interrupts import ( "bytes" + "testing" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "testing" ) func TestParseInterrupts(t *testing.T) { @@ -58,3 +59,132 @@ TASKLET: 205 0` } } } + +// Tests #4470 +func TestParseInterruptsBad(t *testing.T) { + interruptStr := ` CPU0 CPU1 CPU2 CPU3 + 16: 0 0 0 0 bcm2836-timer 0 Edge arch_timer + 17: 127224250 118424219 127224437 117885416 bcm2836-timer 1 Edge arch_timer + 21: 0 0 0 0 bcm2836-pmu 9 Edge arm-pmu + 23: 1549514 0 0 0 ARMCTRL-level 1 Edge 3f00b880.mailbox + 24: 2 0 0 0 ARMCTRL-level 2 Edge VCHIQ doorbell + 46: 0 0 0 0 ARMCTRL-level 48 Edge bcm2708_fb dma + 48: 0 0 0 0 ARMCTRL-level 50 Edge DMA IRQ + 50: 0 0 0 0 ARMCTRL-level 52 Edge DMA IRQ + 51: 208 0 0 0 ARMCTRL-level 53 Edge DMA IRQ + 54: 883002 0 0 0 ARMCTRL-level 56 Edge DMA IRQ + 59: 0 0 0 0 ARMCTRL-level 61 Edge bcm2835-auxirq + 62: 521451447 0 0 0 ARMCTRL-level 64 Edge dwc_otg, dwc_otg_pcd, dwc_otg_hcd:usb1 + 86: 857597 0 0 0 ARMCTRL-level 88 Edge mmc0 + 87: 4938 0 0 0 ARMCTRL-level 89 Edge uart-pl011 + 92: 5669 0 0 0 ARMCTRL-level 94 Edge mmc1 + FIQ: usb_fiq + IPI0: 0 0 0 0 CPU wakeup interrupts + IPI1: 0 0 0 0 Timer broadcast interrupts + IPI2: 23564958 23464876 23531165 23040826 Rescheduling interrupts + IPI3: 148438 639704 644266 588150 Function call interrupts + IPI4: 0 0 0 0 CPU stop interrupts + IPI5: 4348149 1843985 3819457 1822877 IRQ work interrupts + IPI6: 0 0 0 0 completion interrupts` + f := bytes.NewBufferString(interruptStr) + parsed := []IRQ{ + IRQ{ + ID: "16", Type: "bcm2836-timer", Device: "0 Edge arch_timer", + Cpus: []int64{0, 0, 0, 0}, + }, + IRQ{ + ID: "17", Type: "bcm2836-timer", Device: "1 Edge arch_timer", + Cpus: []int64{127224250, 118424219, 127224437, 117885416}, Total: 490758322, + }, + IRQ{ + ID: "21", Type: "bcm2836-pmu", Device: "9 Edge arm-pmu", + Cpus: []int64{0, 0, 0, 0}, + }, + IRQ{ + ID: "23", Type: "ARMCTRL-level", Device: "1 Edge 3f00b880.mailbox", + Cpus: []int64{1549514, 0, 0, 0}, Total: 1549514, + }, + IRQ{ + ID: "24", Type: "ARMCTRL-level", Device: "2 Edge VCHIQ doorbell", + Cpus: []int64{2, 0, 0, 0}, Total: 2, + }, + IRQ{ + ID: "46", Type: "ARMCTRL-level", Device: "48 Edge bcm2708_fb dma", + Cpus: []int64{0, 0, 0, 0}, + }, + IRQ{ + ID: "48", Type: "ARMCTRL-level", Device: "50 Edge DMA IRQ", + Cpus: []int64{0, 0, 0, 0}, + }, + IRQ{ + ID: "50", Type: "ARMCTRL-level", Device: "52 Edge DMA IRQ", + Cpus: []int64{0, 0, 0, 0}, + }, + IRQ{ + ID: "51", Type: "ARMCTRL-level", Device: "53 Edge DMA IRQ", + Cpus: []int64{208, 0, 0, 0}, Total: 208, + }, + IRQ{ + ID: "54", Type: "ARMCTRL-level", Device: "56 Edge DMA IRQ", + Cpus: []int64{883002, 0, 0, 0}, Total: 883002, + }, + IRQ{ + ID: "59", Type: "ARMCTRL-level", Device: "61 Edge bcm2835-auxirq", + Cpus: []int64{0, 0, 0, 0}, + }, + IRQ{ + ID: "62", Type: "ARMCTRL-level", Device: "64 Edge dwc_otg, dwc_otg_pcd, dwc_otg_hcd:usb1", + Cpus: []int64{521451447, 0, 0, 0}, Total: 521451447, + }, + IRQ{ + ID: "86", Type: "ARMCTRL-level", Device: "88 Edge mmc0", + Cpus: []int64{857597, 0, 0, 0}, Total: 857597, + }, + IRQ{ + ID: "87", Type: "ARMCTRL-level", Device: "89 Edge uart-pl011", + Cpus: []int64{4938, 0, 0, 0}, Total: 4938, + }, + IRQ{ + ID: "92", Type: "ARMCTRL-level", Device: "94 Edge mmc1", + Cpus: []int64{5669, 0, 0, 0}, Total: 5669, + }, + IRQ{ + ID: "IPI0", Type: "CPU wakeup interrupts", + Cpus: []int64{0, 0, 0, 0}, + }, + IRQ{ + ID: "IPI1", Type: "Timer broadcast interrupts", + Cpus: []int64{0, 0, 0, 0}, + }, + IRQ{ + ID: "IPI2", Type: "Rescheduling interrupts", + Cpus: []int64{23564958, 23464876, 23531165, 23040826}, Total: 93601825, + }, + IRQ{ + ID: "IPI3", Type: "Function call interrupts", + Cpus: []int64{148438, 639704, 644266, 588150}, Total: 2020558, + }, + IRQ{ + ID: "IPI4", Type: "CPU stop interrupts", + Cpus: []int64{0, 0, 0, 0}, + }, + IRQ{ + ID: "IPI5", Type: "IRQ work interrupts", + Cpus: []int64{4348149, 1843985, 3819457, 1822877}, Total: 11834468, + }, + IRQ{ + ID: "IPI6", Type: "completion interrupts", + Cpus: []int64{0, 0, 0, 0}, + }, + } + got, err := parseInterrupts(f) + require.Equal(t, nil, err) + require.NotEqual(t, 0, len(got)) + require.Equal(t, len(got), len(parsed)) + for i := 0; i < len(parsed); i++ { + assert.Equal(t, parsed[i], got[i]) + for k := 0; k < len(parsed[i].Cpus); k++ { + assert.Equal(t, parsed[i].Cpus[k], got[i].Cpus[k]) + } + } +} From e5384339599161e48b8d32770cbb32a2c8e9df9a Mon Sep 17 00:00:00 2001 From: Shanshi Shi Date: Thu, 2 Aug 2018 06:39:54 +0800 Subject: [PATCH 0056/1815] Preserve metric type when using filters in output plugins (#4481) --- internal/models/running_output.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/internal/models/running_output.go b/internal/models/running_output.go index 713c28cce..a0d1f6b03 100644 --- a/internal/models/running_output.go +++ b/internal/models/running_output.go @@ -105,12 +105,13 @@ func (ro *RunningOutput) AddMetric(m telegraf.Metric) { tags := m.Tags() fields := m.Fields() t := m.Time() + tp := m.Type() if ok := ro.Config.Filter.Apply(name, fields, tags); !ok { ro.MetricsFiltered.Incr(1) return } // error is not possible if creating from another metric, so ignore. - m, _ = metric.New(name, tags, fields, t) + m, _ = metric.New(name, tags, fields, t, tp) } ro.metrics.Add(m) From 66528354a585a2de30513c16e02bbe1e75aa1264 Mon Sep 17 00:00:00 2001 From: Greg Linton Date: Wed, 1 Aug 2018 16:40:55 -0600 Subject: [PATCH 0057/1815] Update changelog --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index f321c6960..3431ed3cd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -58,6 +58,8 @@ ### Bugfixes - [#4498](https://github.com/influxdata/telegraf/pull/4498): Keep leading whitespace for messages in syslog input. +- [#4497](https://github.com/influxdata/telegraf/pull/4497): Skip bad entries on interrupt input. +- [#4481](https://github.com/influxdata/telegraf/pull/4481): Preserve metric type when using filters in output plugins. ## v1.7.2 [2018-07-18] From 199841a8205c38ea8198db1902df0759eaa9f128 Mon Sep 17 00:00:00 2001 From: david7482 Date: Thu, 2 Aug 2018 06:43:17 +0800 Subject: [PATCH 0058/1815] Support StatisticValues in cloudwatch output plugin (#4364) --- plugins/outputs/cloudwatch/README.md | 10 + plugins/outputs/cloudwatch/cloudwatch.go | 293 ++++++++++++++---- plugins/outputs/cloudwatch/cloudwatch_test.go | 38 ++- 3 files changed, 286 insertions(+), 55 deletions(-) diff --git a/plugins/outputs/cloudwatch/README.md b/plugins/outputs/cloudwatch/README.md index c44ac4ead..31619263f 100644 --- a/plugins/outputs/cloudwatch/README.md +++ b/plugins/outputs/cloudwatch/README.md @@ -36,3 +36,13 @@ Examples include but are not limited to: ### namespace The namespace used for AWS CloudWatch metrics. + +### write_statistics + +If you have a large amount of metrics, you should consider to send statistic +values instead of raw metrics which could not only improve performance but +also save AWS API cost. If enable this flag, this plugin would parse the required +[CloudWatch statistic fields](https://docs.aws.amazon.com/sdk-for-go/api/service/cloudwatch/#StatisticSet) +(count, min, max, and sum) and send them to CloudWatch. You could use `basicstats` +aggregator to calculate those fields. If not all statistic fields are available, +all fields would still be sent as raw metrics. \ No newline at end of file diff --git a/plugins/outputs/cloudwatch/cloudwatch.go b/plugins/outputs/cloudwatch/cloudwatch.go index 52ab41a28..d3bd66303 100644 --- a/plugins/outputs/cloudwatch/cloudwatch.go +++ b/plugins/outputs/cloudwatch/cloudwatch.go @@ -28,6 +28,128 @@ type CloudWatch struct { Namespace string `toml:"namespace"` // CloudWatch Metrics Namespace svc *cloudwatch.CloudWatch + + WriteStatistics bool `toml:"write_statistics"` +} + +type statisticType int + +const ( + statisticTypeNone statisticType = iota + statisticTypeMax + statisticTypeMin + statisticTypeSum + statisticTypeCount +) + +type cloudwatchField interface { + addValue(sType statisticType, value float64) + buildDatum() []*cloudwatch.MetricDatum +} + +type statisticField struct { + metricName string + fieldName string + tags map[string]string + values map[statisticType]float64 + timestamp time.Time +} + +func (f *statisticField) addValue(sType statisticType, value float64) { + if sType != statisticTypeNone { + f.values[sType] = value + } +} + +func (f *statisticField) buildDatum() []*cloudwatch.MetricDatum { + + var datums []*cloudwatch.MetricDatum + + if f.hasAllFields() { + // If we have all required fields, we build datum with StatisticValues + min, _ := f.values[statisticTypeMin] + max, _ := f.values[statisticTypeMax] + sum, _ := f.values[statisticTypeSum] + count, _ := f.values[statisticTypeCount] + + datum := &cloudwatch.MetricDatum{ + MetricName: aws.String(strings.Join([]string{f.metricName, f.fieldName}, "_")), + Dimensions: BuildDimensions(f.tags), + Timestamp: aws.Time(f.timestamp), + StatisticValues: &cloudwatch.StatisticSet{ + Minimum: aws.Float64(min), + Maximum: aws.Float64(max), + Sum: aws.Float64(sum), + SampleCount: aws.Float64(count), + }, + } + + datums = append(datums, datum) + + } else { + // If we don't have all required fields, we build each field as independent datum + for sType, value := range f.values { + datum := &cloudwatch.MetricDatum{ + Value: aws.Float64(value), + Dimensions: BuildDimensions(f.tags), + Timestamp: aws.Time(f.timestamp), + } + + switch sType { + case statisticTypeMin: + datum.MetricName = aws.String(strings.Join([]string{f.metricName, f.fieldName, "min"}, "_")) + case statisticTypeMax: + datum.MetricName = aws.String(strings.Join([]string{f.metricName, f.fieldName, "max"}, "_")) + case statisticTypeSum: + datum.MetricName = aws.String(strings.Join([]string{f.metricName, f.fieldName, "sum"}, "_")) + case statisticTypeCount: + datum.MetricName = aws.String(strings.Join([]string{f.metricName, f.fieldName, "count"}, "_")) + default: + // should not be here + continue + } + + datums = append(datums, datum) + } + } + + return datums +} + +func (f *statisticField) hasAllFields() bool { + + _, hasMin := f.values[statisticTypeMin] + _, hasMax := f.values[statisticTypeMax] + _, hasSum := f.values[statisticTypeSum] + _, hasCount := f.values[statisticTypeCount] + + return hasMin && hasMax && hasSum && hasCount +} + +type valueField struct { + metricName string + fieldName string + tags map[string]string + value float64 + timestamp time.Time +} + +func (f *valueField) addValue(sType statisticType, value float64) { + if sType == statisticTypeNone { + f.value = value + } +} + +func (f *valueField) buildDatum() []*cloudwatch.MetricDatum { + + return []*cloudwatch.MetricDatum{ + { + MetricName: aws.String(strings.Join([]string{f.metricName, f.fieldName}, "_")), + Value: aws.Float64(f.value), + Dimensions: BuildDimensions(f.tags), + Timestamp: aws.Time(f.timestamp), + }, + } } var sampleConfig = ` @@ -57,6 +179,14 @@ var sampleConfig = ` ## Namespace for the CloudWatch MetricDatums namespace = "InfluxData/Telegraf" + + ## If you have a large amount of metrics, you should consider to send statistic + ## values instead of raw metrics which could not only improve performance but + ## also save AWS API cost. If enable this flag, this plugin would parse the required + ## CloudWatch statistic fields (count, min, max, and sum) and send them to CloudWatch. + ## You could use basicstats aggregator to calculate those fields. If not all statistic + ## fields are available, all fields would still be sent as raw metrics. + # write_statistics = false ` func (c *CloudWatch) SampleConfig() string { @@ -104,7 +234,7 @@ func (c *CloudWatch) Write(metrics []telegraf.Metric) error { var datums []*cloudwatch.MetricDatum for _, m := range metrics { - d := BuildMetricDatum(m) + d := BuildMetricDatum(c.WriteStatistics, m) datums = append(datums, d...) } @@ -159,67 +289,58 @@ func PartitionDatums(size int, datums []*cloudwatch.MetricDatum) [][]*cloudwatch return partitions } -// Make a MetricDatum for each field in a Point. Only fields with values that can be -// converted to float64 are supported. Non-supported fields are skipped. -func BuildMetricDatum(point telegraf.Metric) []*cloudwatch.MetricDatum { - datums := make([]*cloudwatch.MetricDatum, len(point.Fields())) - i := 0 +// Make a MetricDatum from telegraf.Metric. It would check if all required fields of +// cloudwatch.StatisticSet are available. If so, it would build MetricDatum from statistic values. +// Otherwise, fields would still been built independently. +func BuildMetricDatum(buildStatistic bool, point telegraf.Metric) []*cloudwatch.MetricDatum { - var value float64 + fields := make(map[string]cloudwatchField) for k, v := range point.Fields() { - switch t := v.(type) { - case int: - value = float64(t) - case int32: - value = float64(t) - case int64: - value = float64(t) - case uint64: - value = float64(t) - case float64: - value = t - case bool: - if t { - value = 1 - } else { - value = 0 + + val, ok := convert(v) + if !ok { + // Only fields with values that can be converted to float64 (and within CloudWatch boundary) are supported. + // Non-supported fields are skipped. + continue + } + + sType, fieldName := getStatisticType(k) + + // If statistic metric is not enabled or non-statistic type, just take current field as a value field. + if !buildStatistic || sType == statisticTypeNone { + fields[k] = &valueField{ + metricName: point.Name(), + fieldName: k, + tags: point.Tags(), + timestamp: point.Time(), + value: val, } - case time.Time: - value = float64(t.Unix()) - default: - // Skip unsupported type. - datums = datums[:len(datums)-1] continue } - // Do CloudWatch boundary checking - // Constraints at: http://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_MetricDatum.html - if math.IsNaN(value) { - datums = datums[:len(datums)-1] - continue - } - if math.IsInf(value, 0) { - datums = datums[:len(datums)-1] - continue - } - if value > 0 && value < float64(8.515920e-109) { - datums = datums[:len(datums)-1] - continue - } - if value > float64(1.174271e+108) { - datums = datums[:len(datums)-1] - continue + // Otherwise, it shall be a statistic field. + if _, ok := fields[fieldName]; !ok { + // Hit an uncached field, create statisticField for first time + fields[fieldName] = &statisticField{ + metricName: point.Name(), + fieldName: fieldName, + tags: point.Tags(), + timestamp: point.Time(), + values: map[statisticType]float64{ + sType: val, + }, + } + } else { + // Add new statistic value to this field + fields[fieldName].addValue(sType, val) } + } - datums[i] = &cloudwatch.MetricDatum{ - MetricName: aws.String(strings.Join([]string{point.Name(), k}, "_")), - Value: aws.Float64(value), - Dimensions: BuildDimensions(point.Tags()), - Timestamp: aws.Time(point.Time()), - } - - i += 1 + var datums []*cloudwatch.MetricDatum + for _, f := range fields { + d := f.buildDatum() + datums = append(datums, d...) } return datums @@ -268,6 +389,72 @@ func BuildDimensions(mTags map[string]string) []*cloudwatch.Dimension { return dimensions } +func getStatisticType(name string) (sType statisticType, fieldName string) { + switch { + case strings.HasSuffix(name, "_max"): + sType = statisticTypeMax + fieldName = strings.TrimSuffix(name, "_max") + case strings.HasSuffix(name, "_min"): + sType = statisticTypeMin + fieldName = strings.TrimSuffix(name, "_min") + case strings.HasSuffix(name, "_sum"): + sType = statisticTypeSum + fieldName = strings.TrimSuffix(name, "_sum") + case strings.HasSuffix(name, "_count"): + sType = statisticTypeCount + fieldName = strings.TrimSuffix(name, "_count") + default: + sType = statisticTypeNone + fieldName = name + } + return +} + +func convert(v interface{}) (value float64, ok bool) { + + ok = true + + switch t := v.(type) { + case int: + value = float64(t) + case int32: + value = float64(t) + case int64: + value = float64(t) + case uint64: + value = float64(t) + case float64: + value = t + case bool: + if t { + value = 1 + } else { + value = 0 + } + case time.Time: + value = float64(t.Unix()) + default: + // Skip unsupported type. + ok = false + return + } + + // Do CloudWatch boundary checking + // Constraints at: http://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_MetricDatum.html + switch { + case math.IsNaN(value): + return 0, false + case math.IsInf(value, 0): + return 0, false + case value > 0 && value < float64(8.515920e-109): + return 0, false + case value > float64(1.174271e+108): + return 0, false + } + + return +} + func init() { outputs.Add("cloudwatch", func() telegraf.Output { return &CloudWatch{} diff --git a/plugins/outputs/cloudwatch/cloudwatch_test.go b/plugins/outputs/cloudwatch/cloudwatch_test.go index 8ab60de2f..c91c30e0c 100644 --- a/plugins/outputs/cloudwatch/cloudwatch_test.go +++ b/plugins/outputs/cloudwatch/cloudwatch_test.go @@ -5,11 +5,13 @@ import ( "math" "sort" "testing" + "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/cloudwatch" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" @@ -72,13 +74,45 @@ func TestBuildMetricDatums(t *testing.T) { testutil.TestMetric(float64(1.174272e+108)), // largest should be 1.174271e+108 } for _, point := range validMetrics { - datums := BuildMetricDatum(point) + datums := BuildMetricDatum(false, point) assert.Equal(1, len(datums), fmt.Sprintf("Valid point should create a Datum {value: %v}", point)) } for _, point := range invalidMetrics { - datums := BuildMetricDatum(point) + datums := BuildMetricDatum(false, point) assert.Equal(0, len(datums), fmt.Sprintf("Valid point should not create a Datum {value: %v}", point)) } + + statisticMetric, _ := metric.New( + "test1", + map[string]string{"tag1": "value1"}, + map[string]interface{}{"value_max": float64(10), "value_min": float64(0), "value_sum": float64(100), "value_count": float64(20)}, + time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC), + ) + datums := BuildMetricDatum(true, statisticMetric) + assert.Equal(1, len(datums), fmt.Sprintf("Valid point should create a Datum {value: %v}", statisticMetric)) + + multiFieldsMetric, _ := metric.New( + "test1", + map[string]string{"tag1": "value1"}, + map[string]interface{}{"valueA": float64(10), "valueB": float64(0), "valueC": float64(100), "valueD": float64(20)}, + time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC), + ) + datums = BuildMetricDatum(true, multiFieldsMetric) + assert.Equal(4, len(datums), fmt.Sprintf("Each field should create a Datum {value: %v}", multiFieldsMetric)) + + multiStatisticMetric, _ := metric.New( + "test1", + map[string]string{"tag1": "value1"}, + map[string]interface{}{ + "valueA_max": float64(10), "valueA_min": float64(0), "valueA_sum": float64(100), "valueA_count": float64(20), + "valueB_max": float64(10), "valueB_min": float64(0), "valueB_sum": float64(100), "valueB_count": float64(20), + "valueC_max": float64(10), "valueC_min": float64(0), "valueC_sum": float64(100), + "valueD": float64(10), "valueE": float64(0), + }, + time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC), + ) + datums = BuildMetricDatum(true, multiStatisticMetric) + assert.Equal(7, len(datums), fmt.Sprintf("Valid point should create a Datum {value: %v}", multiStatisticMetric)) } func TestPartitionDatums(t *testing.T) { From e1160c26bceab81e150a34c6f6366915ad045828 Mon Sep 17 00:00:00 2001 From: dupondje Date: Thu, 2 Aug 2018 00:43:34 +0200 Subject: [PATCH 0059/1815] Add ip restriction for the prometheus_client output (#4431) --- etc/telegraf.conf | 3 ++ plugins/outputs/prometheus_client/README.md | 3 ++ .../prometheus_client/prometheus_client.go | 30 +++++++++++++++++-- 3 files changed, 34 insertions(+), 2 deletions(-) diff --git a/etc/telegraf.conf b/etc/telegraf.conf index 912a93d10..c3a84569d 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -747,6 +747,9 @@ # #basic_username = "Foo" # #basic_password = "Bar" # +# ## IP Ranges which are allowed to access metrics +# #ip_range = ["192.168.0.0/24", "192.168.1.0/30"] +# # ## Interval to expire metrics and not deliver to prometheus, 0 == no expiration # # expiration_interval = "60s" # diff --git a/plugins/outputs/prometheus_client/README.md b/plugins/outputs/prometheus_client/README.md index 6cb0cc59e..d68cafe9d 100644 --- a/plugins/outputs/prometheus_client/README.md +++ b/plugins/outputs/prometheus_client/README.md @@ -18,6 +18,9 @@ This plugin starts a [Prometheus](https://prometheus.io/) Client, it exposes all basic_username = "Foo" basic_password = "Bar" + # IP Ranges which are allowed to access metrics + ip_range = ["192.168.0.0/24", "192.168.1.0/30"] + # Path to publish the metrics on, defaults to /metrics path = "/metrics" diff --git a/plugins/outputs/prometheus_client/prometheus_client.go b/plugins/outputs/prometheus_client/prometheus_client.go index b82c72cf0..9634e9227 100644 --- a/plugins/outputs/prometheus_client/prometheus_client.go +++ b/plugins/outputs/prometheus_client/prometheus_client.go @@ -5,6 +5,7 @@ import ( "crypto/subtle" "fmt" "log" + "net" "net/http" "os" "regexp" @@ -58,6 +59,7 @@ type PrometheusClient struct { TLSKey string `toml:"tls_key"` BasicUsername string `toml:"basic_username"` BasicPassword string `toml:"basic_password"` + IPRange []string `toml:"ip_range"` ExpirationInterval internal.Duration `toml:"expiration_interval"` Path string `toml:"path"` CollectorsExclude []string `toml:"collectors_exclude"` @@ -84,6 +86,9 @@ var sampleConfig = ` #basic_username = "Foo" #basic_password = "Bar" + ## IP Ranges which are allowed to access metrics + #ip_range = ["192.168.0.0/24", "192.168.1.0/30"] + ## Interval to expire metrics and not deliver to prometheus, 0 == no expiration # expiration_interval = "60s" @@ -96,7 +101,7 @@ var sampleConfig = ` string_as_label = true ` -func (p *PrometheusClient) basicAuth(h http.Handler) http.Handler { +func (p *PrometheusClient) auth(h http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if p.BasicUsername != "" && p.BasicPassword != "" { w.Header().Set("WWW-Authenticate", `Basic realm="Restricted"`) @@ -110,6 +115,27 @@ func (p *PrometheusClient) basicAuth(h http.Handler) http.Handler { } } + if len(p.IPRange) > 0 { + matched := false + remoteIPs, _, _ := net.SplitHostPort(r.RemoteAddr) + remoteIP := net.ParseIP(remoteIPs) + for _, iprange := range p.IPRange { + _, ipNet, err := net.ParseCIDR(iprange) + if err != nil { + http.Error(w, "Config Error in ip_range setting", 500) + return + } + if ipNet.Contains(remoteIP) { + matched = true + break + } + } + if !matched { + http.Error(w, "Not authorized", 401) + return + } + } + h.ServeHTTP(w, r) }) } @@ -146,7 +172,7 @@ func (p *PrometheusClient) Start() error { } mux := http.NewServeMux() - mux.Handle(p.Path, p.basicAuth(promhttp.HandlerFor( + mux.Handle(p.Path, p.auth(promhttp.HandlerFor( registry, promhttp.HandlerOpts{ErrorHandling: promhttp.ContinueOnError}))) p.server = &http.Server{ From 429d14101a7310d41695bfcddc70e1559cacb521 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lo=C3=AFc=20Blot?= Date: Thu, 2 Aug 2018 00:44:10 +0200 Subject: [PATCH 0060/1815] Add pgbouncer input plugin (#3918) --- README.md | 1 + docker-compose.yml | 7 + plugins/inputs/all/all.go | 1 + plugins/inputs/pgbouncer/README.md | 21 ++ plugins/inputs/pgbouncer/pgbouncer.go | 179 ++++++++++++++++++ plugins/inputs/pgbouncer/pgbouncer_test.go | 66 +++++++ plugins/inputs/postgresql/postgresql.go | 1 + plugins/inputs/postgresql/postgresql_test.go | 1 + plugins/inputs/postgresql/service.go | 33 +++- .../postgresql_extensible.go | 1 + .../postgresql_extensible_test.go | 1 + 11 files changed, 311 insertions(+), 1 deletion(-) create mode 100644 plugins/inputs/pgbouncer/README.md create mode 100644 plugins/inputs/pgbouncer/pgbouncer.go create mode 100644 plugins/inputs/pgbouncer/pgbouncer_test.go diff --git a/README.md b/README.md index ae5750ddf..6307b5356 100644 --- a/README.md +++ b/README.md @@ -209,6 +209,7 @@ configuration options. * [openldap](./plugins/inputs/openldap) * [opensmtpd](./plugins/inputs/opensmtpd) * [pf](./plugins/inputs/pf) +* [pgbouncer](./plugins/inputs/pgbouncer) * [phpfpm](./plugins/inputs/phpfpm) * [phusion passenger](./plugins/inputs/passenger) * [ping](./plugins/inputs/ping) diff --git a/docker-compose.yml b/docker-compose.yml index 822d7fff1..5ac47089d 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -40,6 +40,13 @@ services: image: memcached ports: - "11211:11211" + pgbouncer: + image: mbed/pgbouncer + environment: + PG_ENV_POSTGRESQL_USER: pgbouncer + PG_ENV_POSTGRESQL_PASS: pgbouncer + ports: + - "6432:6432" postgres: image: postgres:alpine ports: diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index 4d46a5490..bbca99521 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -84,6 +84,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/opensmtpd" _ "github.com/influxdata/telegraf/plugins/inputs/passenger" _ "github.com/influxdata/telegraf/plugins/inputs/pf" + _ "github.com/influxdata/telegraf/plugins/inputs/pgbouncer" _ "github.com/influxdata/telegraf/plugins/inputs/phpfpm" _ "github.com/influxdata/telegraf/plugins/inputs/ping" _ "github.com/influxdata/telegraf/plugins/inputs/postfix" diff --git a/plugins/inputs/pgbouncer/README.md b/plugins/inputs/pgbouncer/README.md new file mode 100644 index 000000000..2a841c45a --- /dev/null +++ b/plugins/inputs/pgbouncer/README.md @@ -0,0 +1,21 @@ +# PgBouncer plugin + +This PgBouncer plugin provides metrics for your PgBouncer load balancer. + +More information about the meaning of these metrics can be found in the [PgBouncer Documentation](https://pgbouncer.github.io/usage.html) + +## Configuration +Specify address via a url matching: + + `postgres://[pqgotest[:password]]@localhost[/dbname]?sslmode=[disable|verify-ca|verify-full]` + +All connection parameters are optional. + +Without the dbname parameter, the driver will default to a database with the same name as the user. +This dbname is just for instantiating a connection with the server and doesn't restrict the databases we are trying to grab metrics for. + +### Configuration example +``` +[[inputs.pgbouncer]] + address = "postgres://telegraf@localhost/pgbouncer" +``` diff --git a/plugins/inputs/pgbouncer/pgbouncer.go b/plugins/inputs/pgbouncer/pgbouncer.go new file mode 100644 index 000000000..722648c48 --- /dev/null +++ b/plugins/inputs/pgbouncer/pgbouncer.go @@ -0,0 +1,179 @@ +package pgbouncer + +import ( + "bytes" + "github.com/influxdata/telegraf/plugins/inputs/postgresql" + + // register in driver. + _ "github.com/jackc/pgx/stdlib" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/inputs" +) + +type PgBouncer struct { + postgresql.Service +} + +var ignoredColumns = map[string]bool{"user": true, "database": true, "pool_mode": true, + "avg_req": true, "avg_recv": true, "avg_sent": true, "avg_query": true, +} + +var sampleConfig = ` + ## specify address via a url matching: + ## postgres://[pqgotest[:password]]@localhost[/dbname]\ + ## ?sslmode=[disable|verify-ca|verify-full] + ## or a simple string: + ## host=localhost user=pqotest password=... sslmode=... dbname=app_production + ## + ## All connection parameters are optional. + ## + address = "host=localhost user=pgbouncer sslmode=disable" +` + +func (p *PgBouncer) SampleConfig() string { + return sampleConfig +} + +func (p *PgBouncer) Description() string { + return "Read metrics from one or many pgbouncer servers" +} + +func (p *PgBouncer) Gather(acc telegraf.Accumulator) error { + var ( + err error + query string + columns []string + ) + + query = `SHOW STATS` + + rows, err := p.DB.Query(query) + if err != nil { + return err + } + + defer rows.Close() + + // grab the column information from the result + if columns, err = rows.Columns(); err != nil { + return err + } + + for rows.Next() { + tags, columnMap, err := p.accRow(rows, acc, columns) + + if err != nil { + return err + } + + fields := make(map[string]interface{}) + for col, val := range columnMap { + _, ignore := ignoredColumns[col] + if !ignore { + fields[col] = *val + } + } + acc.AddFields("pgbouncer", fields, tags) + } + + query = `SHOW POOLS` + + poolRows, err := p.DB.Query(query) + if err != nil { + return err + } + + defer poolRows.Close() + + // grab the column information from the result + if columns, err = poolRows.Columns(); err != nil { + return err + } + + for poolRows.Next() { + tags, columnMap, err := p.accRow(poolRows, acc, columns) + if err != nil { + return err + } + + if s, ok := (*columnMap["user"]).(string); ok && s != "" { + tags["user"] = s + } + + if s, ok := (*columnMap["pool_mode"]).(string); ok && s != "" { + tags["pool_mode"] = s + } + + fields := make(map[string]interface{}) + for col, val := range columnMap { + _, ignore := ignoredColumns[col] + if !ignore { + fields[col] = *val + } + } + acc.AddFields("pgbouncer_pools", fields, tags) + } + + return poolRows.Err() +} + +type scanner interface { + Scan(dest ...interface{}) error +} + +func (p *PgBouncer) accRow(row scanner, acc telegraf.Accumulator, columns []string) (map[string]string, + map[string]*interface{}, error) { + var columnVars []interface{} + var dbname bytes.Buffer + + // this is where we'll store the column name with its *interface{} + columnMap := make(map[string]*interface{}) + + for _, column := range columns { + columnMap[column] = new(interface{}) + } + + // populate the array of interface{} with the pointers in the right order + for i := 0; i < len(columnMap); i++ { + columnVars = append(columnVars, columnMap[columns[i]]) + } + + // deconstruct array of variables and send to Scan + err := row.Scan(columnVars...) + + if err != nil { + return nil, nil, err + } + if columnMap["database"] != nil { + // extract the database name from the column map + dbname.WriteString((*columnMap["database"]).(string)) + } else { + dbname.WriteString("postgres") + } + + var tagAddress string + tagAddress, err = p.SanitizedAddress() + if err != nil { + return nil, nil, err + } + + // Return basic tags and the mapped columns + return map[string]string{"server": tagAddress, "db": dbname.String()}, columnMap, nil +} + +func init() { + inputs.Add("pgbouncer", func() telegraf.Input { + return &PgBouncer{ + Service: postgresql.Service{ + MaxIdle: 1, + MaxOpen: 1, + MaxLifetime: internal.Duration{ + Duration: 0, + }, + IsPgBouncer: true, + }, + } + }) +} diff --git a/plugins/inputs/pgbouncer/pgbouncer_test.go b/plugins/inputs/pgbouncer/pgbouncer_test.go new file mode 100644 index 000000000..44e28c7f3 --- /dev/null +++ b/plugins/inputs/pgbouncer/pgbouncer_test.go @@ -0,0 +1,66 @@ +package pgbouncer + +import ( + "fmt" + "github.com/influxdata/telegraf/plugins/inputs/postgresql" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "testing" +) + +func TestPgBouncerGeneratesMetrics(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + p := &PgBouncer{ + Service: postgresql.Service{ + Address: fmt.Sprintf( + "host=%s user=pgbouncer password=pgbouncer dbname=pgbouncer port=6432 sslmode=disable", + testutil.GetLocalHost(), + ), + IsPgBouncer: true, + }, + } + + var acc testutil.Accumulator + require.NoError(t, p.Start(&acc)) + require.NoError(t, p.Gather(&acc)) + + intMetrics := []string{ + "total_requests", + "total_received", + "total_sent", + "total_query_time", + "avg_req", + "avg_recv", + "avg_sent", + "avg_query", + "cl_active", + "cl_waiting", + "sv_active", + "sv_idle", + "sv_used", + "sv_tested", + "sv_login", + "maxwait", + } + + int32Metrics := []string{} + + metricsCounted := 0 + + for _, metric := range intMetrics { + assert.True(t, acc.HasInt64Field("pgbouncer", metric)) + metricsCounted++ + } + + for _, metric := range int32Metrics { + assert.True(t, acc.HasInt32Field("pgbouncer", metric)) + metricsCounted++ + } + + assert.True(t, metricsCounted > 0) + assert.Equal(t, len(intMetrics)+len(int32Metrics), metricsCounted) +} diff --git a/plugins/inputs/postgresql/postgresql.go b/plugins/inputs/postgresql/postgresql.go index 19c9db9ce..e136098f4 100644 --- a/plugins/inputs/postgresql/postgresql.go +++ b/plugins/inputs/postgresql/postgresql.go @@ -189,6 +189,7 @@ func init() { MaxLifetime: internal.Duration{ Duration: 0, }, + IsPgBouncer: false, }, } }) diff --git a/plugins/inputs/postgresql/postgresql_test.go b/plugins/inputs/postgresql/postgresql_test.go index 306dca3b6..b23321019 100644 --- a/plugins/inputs/postgresql/postgresql_test.go +++ b/plugins/inputs/postgresql/postgresql_test.go @@ -20,6 +20,7 @@ func TestPostgresqlGeneratesMetrics(t *testing.T) { "host=%s user=postgres sslmode=disable", testutil.GetLocalHost(), ), + IsPgBouncer: false, }, Databases: []string{"postgres"}, } diff --git a/plugins/inputs/postgresql/service.go b/plugins/inputs/postgresql/service.go index 4f7b21e54..9d3ab3963 100644 --- a/plugins/inputs/postgresql/service.go +++ b/plugins/inputs/postgresql/service.go @@ -3,6 +3,9 @@ package postgresql import ( "database/sql" "fmt" + "github.com/jackc/pgx" + "github.com/jackc/pgx/pgtype" + "github.com/jackc/pgx/stdlib" "net" "net/url" "regexp" @@ -90,6 +93,7 @@ type Service struct { MaxOpen int MaxLifetime internal.Duration DB *sql.DB + IsPgBouncer bool } // Start starts the ServiceInput's service, whatever that may be @@ -100,7 +104,34 @@ func (p *Service) Start(telegraf.Accumulator) (err error) { p.Address = localhost } - if p.DB, err = sql.Open("pgx", p.Address); err != nil { + connectionString := p.Address + + // Specific support to make it work with PgBouncer too + // See https://github.com/influxdata/telegraf/issues/3253#issuecomment-357505343 + if p.IsPgBouncer { + d := &stdlib.DriverConfig{ + ConnConfig: pgx.ConnConfig{ + PreferSimpleProtocol: true, + RuntimeParams: map[string]string{ + "client_encoding": "UTF8", + }, + CustomConnInfo: func(c *pgx.Conn) (*pgtype.ConnInfo, error) { + info := c.ConnInfo.DeepCopy() + info.RegisterDataType(pgtype.DataType{ + Value: &pgtype.OIDValue{}, + Name: "int8OID", + OID: pgtype.Int8OID, + }) + + return info, nil + }, + }, + } + stdlib.RegisterDriverConfig(d) + connectionString = d.ConnectionString(p.Address) + } + + if p.DB, err = sql.Open("pgx", connectionString); err != nil { return err } diff --git a/plugins/inputs/postgresql_extensible/postgresql_extensible.go b/plugins/inputs/postgresql_extensible/postgresql_extensible.go index 056f4afc8..a04382888 100644 --- a/plugins/inputs/postgresql_extensible/postgresql_extensible.go +++ b/plugins/inputs/postgresql_extensible/postgresql_extensible.go @@ -283,6 +283,7 @@ func init() { MaxLifetime: internal.Duration{ Duration: 0, }, + IsPgBouncer: false, }, } }) diff --git a/plugins/inputs/postgresql_extensible/postgresql_extensible_test.go b/plugins/inputs/postgresql_extensible/postgresql_extensible_test.go index 77db5feb5..0f9358da6 100644 --- a/plugins/inputs/postgresql_extensible/postgresql_extensible_test.go +++ b/plugins/inputs/postgresql_extensible/postgresql_extensible_test.go @@ -17,6 +17,7 @@ func queryRunner(t *testing.T, q query) *testutil.Accumulator { "host=%s user=postgres sslmode=disable", testutil.GetLocalHost(), ), + IsPgBouncer: false, }, Databases: []string{"postgres"}, Query: q, From 0759c8b22b922226fbe5cabf19896c4796babea5 Mon Sep 17 00:00:00 2001 From: Greg Linton Date: Wed, 1 Aug 2018 16:45:52 -0600 Subject: [PATCH 0061/1815] Update changelog --- CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3431ed3cd..aab301c40 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,6 +12,7 @@ - [tengine](./plugins/inputs/tengine/README.md) - Contributed by @ertaoxu - [x509_cert](./plugins/inputs/x509_cert/README.md) - Contributed by @jtyr - [filecount](./plugins/inputs/filecount/README.md) - Contributed by @sometimesfood +- [pgbouncer](./plugins/inputs/pgbouncer/README.md) - Contributed by @nerzhul ### New Processors @@ -52,6 +53,9 @@ - [#4491](https://github.com/influxdata/telegraf/pull/4491): Send all messages before waiting for results in kafka output. - [#4492](https://github.com/influxdata/telegraf/pull/4492): Add support for lz4 compression to kafka output. - [#4450](https://github.com/influxdata/telegraf/pull/4450): Split multiple sensor keys in ipmi input. +- [#4364](https://github.com/influxdata/telegraf/pull/4364): Support StatisticValues in cloudwatch output plugin. +- [#4431](https://github.com/influxdata/telegraf/pull/4431): Add ip restriction for the prometheus_client output. +- [#3918](https://github.com/influxdata/telegraf/pull/3918): Add pgbouncer input plugin. ## v1.7.3 [unreleased] From a5409d7cf227769da124bd6c93bc700d92a31fa5 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 7 Aug 2018 11:07:07 -0700 Subject: [PATCH 0062/1815] Use explicit zpool properties to fix parse error on FreeBSD 11.2 (#4510) --- plugins/inputs/zfs/zfs_freebsd.go | 14 +++++++++----- plugins/inputs/zfs/zfs_freebsd_test.go | 14 +++++++------- 2 files changed, 16 insertions(+), 12 deletions(-) diff --git a/plugins/inputs/zfs/zfs_freebsd.go b/plugins/inputs/zfs/zfs_freebsd.go index 63bbdd6e6..51c20682e 100644 --- a/plugins/inputs/zfs/zfs_freebsd.go +++ b/plugins/inputs/zfs/zfs_freebsd.go @@ -30,7 +30,11 @@ func (z *Zfs) gatherPoolStats(acc telegraf.Accumulator) (string, error) { if z.PoolMetrics { for _, line := range lines { col := strings.Split(line, "\t") - tags := map[string]string{"pool": col[0], "health": col[8]} + if len(col) != 8 { + continue + } + + tags := map[string]string{"pool": col[0], "health": col[1]} fields := map[string]interface{}{} if tags["health"] == "UNAVAIL" { @@ -39,19 +43,19 @@ func (z *Zfs) gatherPoolStats(acc telegraf.Accumulator) (string, error) { } else { - size, err := strconv.ParseInt(col[1], 10, 64) + size, err := strconv.ParseInt(col[2], 10, 64) if err != nil { return "", fmt.Errorf("Error parsing size: %s", err) } fields["size"] = size - alloc, err := strconv.ParseInt(col[2], 10, 64) + alloc, err := strconv.ParseInt(col[3], 10, 64) if err != nil { return "", fmt.Errorf("Error parsing allocation: %s", err) } fields["allocated"] = alloc - free, err := strconv.ParseInt(col[3], 10, 64) + free, err := strconv.ParseInt(col[4], 10, 64) if err != nil { return "", fmt.Errorf("Error parsing free: %s", err) } @@ -130,7 +134,7 @@ func run(command string, args ...string) ([]string, error) { } func zpool() ([]string, error) { - return run("zpool", []string{"list", "-Hp"}...) + return run("zpool", []string{"list", "-Hp", "-o", "name,health,size,alloc,free,fragmentation,capacity,dedupratio"}...) } func sysctl(metric string) ([]string, error) { diff --git a/plugins/inputs/zfs/zfs_freebsd_test.go b/plugins/inputs/zfs/zfs_freebsd_test.go index 60b95a39d..dba135cfd 100644 --- a/plugins/inputs/zfs/zfs_freebsd_test.go +++ b/plugins/inputs/zfs/zfs_freebsd_test.go @@ -10,21 +10,21 @@ import ( "github.com/stretchr/testify/require" ) -// $ zpool list -Hp +// $ zpool list -Hp -o name,health,size,alloc,free,fragmentation,capacity,dedupratio var zpool_output = []string{ - "freenas-boot 30601641984 2022177280 28579464704 - - 6 1.00x ONLINE -", - "red1 8933531975680 1126164848640 7807367127040 - 8% 12 1.83x ONLINE /mnt", - "temp1 2989297238016 1626309320704 1362987917312 - 38% 54 1.28x ONLINE /mnt", - "temp2 2989297238016 626958278656 2362338959360 - 12% 20 1.00x ONLINE /mnt", + "freenas-boot ONLINE 30601641984 2022177280 28579464704 - 6 1.00x", + "red1 ONLINE 8933531975680 1126164848640 7807367127040 8% 12 1.83x", + "temp1 ONLINE 2989297238016 1626309320704 1362987917312 38% 54 1.28x", + "temp2 ONLINE 2989297238016 626958278656 2362338959360 12% 20 1.00x", } func mock_zpool() ([]string, error) { return zpool_output, nil } -// $ zpool list -Hp +// $ zpool list -Hp -o name,health,size,alloc,free,fragmentation,capacity,dedupratio var zpool_output_unavail = []string{ - "temp2 - - - - - - - UNAVAIL -", + "temp2 UNAVAIL - - - - - -", } func mock_zpool_unavail() ([]string, error) { From 4dfb80d0fca1746d31b87c3a7398edd987b945e3 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 7 Aug 2018 11:07:46 -0700 Subject: [PATCH 0063/1815] Fix error message if URL is unparseable in influxdb output (#4511) --- plugins/outputs/influxdb/influxdb.go | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/plugins/outputs/influxdb/influxdb.go b/plugins/outputs/influxdb/influxdb.go index f80722bc3..bd53d4ed4 100644 --- a/plugins/outputs/influxdb/influxdb.go +++ b/plugins/outputs/influxdb/influxdb.go @@ -141,36 +141,36 @@ func (i *InfluxDB) Connect() error { } for _, u := range urls { - u, err := url.Parse(u) + parts, err := url.Parse(u) if err != nil { - return fmt.Errorf("error parsing url [%s]: %v", u, err) + return fmt.Errorf("error parsing url [%q]: %v", u, err) } var proxy *url.URL if len(i.HTTPProxy) > 0 { proxy, err = url.Parse(i.HTTPProxy) if err != nil { - return fmt.Errorf("error parsing proxy_url [%s]: %v", proxy, err) + return fmt.Errorf("error parsing proxy_url [%s]: %v", i.HTTPProxy, err) } } - switch u.Scheme { + switch parts.Scheme { case "udp", "udp4", "udp6": - c, err := i.udpClient(u) + c, err := i.udpClient(parts) if err != nil { return err } i.clients = append(i.clients, c) case "http", "https", "unix": - c, err := i.httpClient(ctx, u, proxy) + c, err := i.httpClient(ctx, parts, proxy) if err != nil { return err } i.clients = append(i.clients, c) default: - return fmt.Errorf("unsupported scheme [%s]: %q", u, u.Scheme) + return fmt.Errorf("unsupported scheme [%q]: %q", u, parts.Scheme) } } From feb75d493ac8c78c41e9f2346d17f528be6f1dfd Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 7 Aug 2018 11:22:10 -0700 Subject: [PATCH 0064/1815] Lock buffer when adding metrics (#4514) This function is not thread-safe but is currently used by multiple goroutines in RunningOutput --- internal/buffer/buffer.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/buffer/buffer.go b/internal/buffer/buffer.go index cdc81fed3..04835e042 100644 --- a/internal/buffer/buffer.go +++ b/internal/buffer/buffer.go @@ -40,18 +40,18 @@ func (b *Buffer) Len() int { // Add adds metrics to the buffer. func (b *Buffer) Add(metrics ...telegraf.Metric) { + b.mu.Lock() for i, _ := range metrics { MetricsWritten.Incr(1) select { case b.buf <- metrics[i]: default: - b.mu.Lock() MetricsDropped.Incr(1) <-b.buf b.buf <- metrics[i] - b.mu.Unlock() } } + b.mu.Unlock() } // Batch returns a batch of metrics of size batchSize. From b0b52692e93d8d6670d4313badf0876939cc8084 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 7 Aug 2018 11:26:28 -0700 Subject: [PATCH 0065/1815] Update changelog --- CHANGELOG.md | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index aab301c40..e47c8c9d8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -61,9 +61,13 @@ ### Bugfixes +- [#4434](https://github.com/influxdata/telegraf/issues/4434): Reduce required docker API version. - [#4498](https://github.com/influxdata/telegraf/pull/4498): Keep leading whitespace for messages in syslog input. -- [#4497](https://github.com/influxdata/telegraf/pull/4497): Skip bad entries on interrupt input. -- [#4481](https://github.com/influxdata/telegraf/pull/4481): Preserve metric type when using filters in output plugins. +- [#4470](https://github.com/influxdata/telegraf/issues/4470): Skip bad entries on interrupt input. +- [#4501](https://github.com/influxdata/telegraf/issues/4501): Preserve metric type when using filters in output plugins. +- [#3794](https://github.com/influxdata/telegraf/issues/3794): Fix error message if URL is unparseable in influxdb output. +- [#4059](https://github.com/influxdata/telegraf/issues/4059): Use explicit zpool properties to fix parse error on FreeBSD 11.2. +- [#4514](https://github.com/influxdata/telegraf/pull/4514): Lock buffer when adding metrics. ## v1.7.2 [2018-07-18] From 2a4267ed7273e1bfb7169259c0f13e6435dbf603 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 7 Aug 2018 17:17:09 -0700 Subject: [PATCH 0066/1815] Set 1.7.3 release date --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e47c8c9d8..d53c2576b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -57,7 +57,7 @@ - [#4431](https://github.com/influxdata/telegraf/pull/4431): Add ip restriction for the prometheus_client output. - [#3918](https://github.com/influxdata/telegraf/pull/3918): Add pgbouncer input plugin. -## v1.7.3 [unreleased] +## v1.7.3 [2018-08-07] ### Bugfixes From 22b3bc4f8e73c1d767e82ede9d5e3df69130323d Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 10 Aug 2018 18:11:12 -0700 Subject: [PATCH 0067/1815] Remove references to logparser from the grok documentation --- docs/DATA_FORMATS_INPUT.md | 58 +++++++++++++++++--------------------- 1 file changed, 26 insertions(+), 32 deletions(-) diff --git a/docs/DATA_FORMATS_INPUT.md b/docs/DATA_FORMATS_INPUT.md index 5a63e9d83..417238ec3 100644 --- a/docs/DATA_FORMATS_INPUT.md +++ b/docs/DATA_FORMATS_INPUT.md @@ -671,7 +671,7 @@ which are available here: #### Grok Configuration: ```toml -[[inputs.reader]] +[[inputs.file]] ## Files to parse each interval. ## These accept standard unix glob matching rules, but with the addition of ## ** as a "super asterisk". ie: @@ -688,7 +688,7 @@ which are available here: ## This is a list of patterns to check the given log file(s) for. ## Note that adding patterns here increases processing time. The most - ## efficient configuration is to have one pattern per logparser. + ## efficient configuration is to have one pattern. ## Other common built-in patterns are: ## %{COMMON_LOG_FORMAT} (plain apache & nginx access logs) ## %{COMBINED_LOG_FORMAT} (access logs + referrer & agent) @@ -713,8 +713,8 @@ which are available here: grok_timezone = "Canada/Eastern" ``` -The Telegraf grok parser uses a slightly modified version of logstash "grok" -patterns, with the format +The grok parser uses a slightly modified version of logstash "grok" +patterns, with the format: ``` %{[:][:]} @@ -781,9 +781,8 @@ This example input and config parses a file using a custom timestamp conversion: ``` ```toml -[[inputs.logparser]] - [inputs.logparser.grok] - patterns = ['%{TIMESTAMP_ISO8601:timestamp:ts-"2006-01-02 15:04:05"} value=%{NUMBER:value:int}'] +[[inputs.file]] + grok_patterns = ['%{TIMESTAMP_ISO8601:timestamp:ts-"2006-01-02 15:04:05"} value=%{NUMBER:value:int}'] ``` This example input and config parses a file using a timestamp in unix time: @@ -794,9 +793,8 @@ This example input and config parses a file using a timestamp in unix time: ``` ```toml -[[inputs.logparser]] - [inputs.logparser.grok] - patterns = ['%{NUMBER:timestamp:ts-epoch} value=%{NUMBER:value:int}'] +[[inputs.file]] + grok_patterns = ['%{NUMBER:timestamp:ts-epoch} value=%{NUMBER:value:int}'] ``` This example parses a file using a built-in conversion and a custom pattern: @@ -806,20 +804,19 @@ Wed Apr 12 13:10:34 PST 2017 value=42 ``` ```toml -[[inputs.logparser]] - [inputs.logparser.grok] - patterns = ["%{TS_UNIX:timestamp:ts-unix} value=%{NUMBER:value:int}"] - custom_patterns = ''' - TS_UNIX %{DAY} %{MONTH} %{MONTHDAY} %{HOUR}:%{MINUTE}:%{SECOND} %{TZ} %{YEAR} - ''' +[[inputs.file]] + grok_patterns = ["%{TS_UNIX:timestamp:ts-unix} value=%{NUMBER:value:int}"] + grok_custom_patterns = ''' + TS_UNIX %{DAY} %{MONTH} %{MONTHDAY} %{HOUR}:%{MINUTE}:%{SECOND} %{TZ} %{YEAR} + ''' ``` For cases where the timestamp itself is without offset, the `timezone` config var is available to denote an offset. By default (with `timezone` either omit, blank or set to `"UTC"`), the times are processed as if in the UTC timezone. If specified as `timezone = "Local"`, the timestamp will be processed based on the current machine timezone configuration. Lastly, if using a -timezone from the list of Unix [timezones](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones), the logparser grok will attempt to offset -the timestamp accordingly. See test cases for more detailed examples. +timezone from the list of Unix [timezones](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones), +grok will offset the timestamp accordingly. #### TOML Escaping @@ -840,29 +837,26 @@ get a literal `|`. With a basic TOML string, special characters such as backslash must be escaped, requiring us to escape the backslash a second time. ```toml -[[inputs.logparser]] - [inputs.logparser.grok] - patterns = ["\\|%{NUMBER:value:int}\\|%{UNICODE_ESCAPE:escape}\\|'%{WORD:name}'\\|"] - custom_patterns = "UNICODE_ESCAPE (?:\\\\u[0-9A-F]{4})+" +[[inputs.file]] + grok_patterns = ["\\|%{NUMBER:value:int}\\|%{UNICODE_ESCAPE:escape}\\|'%{WORD:name}'\\|"] + grok_custom_patterns = "UNICODE_ESCAPE (?:\\\\u[0-9A-F]{4})+" ``` We cannot use a literal TOML string for the pattern, because we cannot match a `'` within it. However, it works well for the custom pattern. ```toml -[[inputs.logparser]] - [inputs.logparser.grok] - patterns = ["\\|%{NUMBER:value:int}\\|%{UNICODE_ESCAPE:escape}\\|'%{WORD:name}'\\|"] - custom_patterns = 'UNICODE_ESCAPE (?:\\u[0-9A-F]{4})+' +[[inputs.file]] + grok_patterns = ["\\|%{NUMBER:value:int}\\|%{UNICODE_ESCAPE:escape}\\|'%{WORD:name}'\\|"] + grok_custom_patterns = 'UNICODE_ESCAPE (?:\\u[0-9A-F]{4})+' ``` A multi-line literal string allows us to encode the pattern: ```toml -[[inputs.logparser]] - [inputs.logparser.grok] - patterns = [''' - \|%{NUMBER:value:int}\|%{UNICODE_ESCAPE:escape}\|'%{WORD:name}'\| - '''] - custom_patterns = 'UNICODE_ESCAPE (?:\\u[0-9A-F]{4})+' +[[inputs.file]] + grok_patterns = [''' + \|%{NUMBER:value:int}\|%{UNICODE_ESCAPE:escape}\|'%{WORD:name}'\| + '''] + grok_custom_patterns = 'UNICODE_ESCAPE (?:\\u[0-9A-F]{4})+' ``` #### Tips for creating patterns From 035e6a6f78c9ba222a9f44c58aea46c0a29f4d29 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 10 Aug 2018 18:11:32 -0700 Subject: [PATCH 0068/1815] Skip lines that do not match in grok parser --- plugins/parsers/grok/parser.go | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/plugins/parsers/grok/parser.go b/plugins/parsers/grok/parser.go index e17f127fc..096cb8ed8 100644 --- a/plugins/parsers/grok/parser.go +++ b/plugins/parsers/grok/parser.go @@ -2,6 +2,7 @@ package grok import ( "bufio" + "bytes" "fmt" "log" "os" @@ -356,18 +357,20 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { } func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) { - scanner := bufio.NewScanner(strings.NewReader(string(buf))) - var lines []string - for scanner.Scan() { - lines = append(lines, scanner.Text()) - } - var metrics []telegraf.Metric - for _, line := range lines { + metrics := make([]telegraf.Metric, 0) + + scanner := bufio.NewScanner(bytes.NewReader(buf)) + for scanner.Scan() { + line := scanner.Text() m, err := p.ParseLine(line) if err != nil { return nil, err } + + if m == nil { + continue + } metrics = append(metrics, m) } From 98a785b0778818ee40a79f1ca014de669f3fc0ec Mon Sep 17 00:00:00 2001 From: Lee Jaeyong Date: Mon, 13 Aug 2018 08:32:07 +0900 Subject: [PATCH 0069/1815] Remove duplicate "Network Interface" section from win_perf_counters config (#4547) --- etc/telegraf_windows.conf | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/etc/telegraf_windows.conf b/etc/telegraf_windows.conf index 54b7ee0e1..220944d42 100644 --- a/etc/telegraf_windows.conf +++ b/etc/telegraf_windows.conf @@ -205,20 +205,6 @@ Instances = ["_Total"] Measurement = "win_swap" - [[inputs.win_perf_counters.object]] - ObjectName = "Network Interface" - Instances = ["*"] - Counters = [ - "Bytes Sent/sec", - "Bytes Received/sec", - "Packets Sent/sec", - "Packets Received/sec", - "Packets Received Discarded", - "Packets Received Errors", - "Packets Outbound Discarded", - "Packets Outbound Errors", - ] - # Windows system plugins using WMI (disabled by default, using From 31e1c04ed0e344cc4876c41281aecac1d51785a0 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Sun, 12 Aug 2018 16:52:35 -0700 Subject: [PATCH 0070/1815] Set version to unknown if not set (#4521) --- cmd/telegraf/telegraf.go | 47 ++++++++++++++++++++++------------------ 1 file changed, 26 insertions(+), 21 deletions(-) diff --git a/cmd/telegraf/telegraf.go b/cmd/telegraf/telegraf.go index 57ff846cf..e8ac7e660 100644 --- a/cmd/telegraf/telegraf.go +++ b/cmd/telegraf/telegraf.go @@ -58,22 +58,11 @@ var fService = flag.String("service", "", var fRunAsConsole = flag.Bool("console", false, "run as console application (windows only)") var ( - nextVersion = "1.8.0" - version string - commit string - branch string + version string + commit string + branch string ) -func init() { - // If commit or branch are not set, make that clear. - if commit == "" { - commit = "unknown" - } - if branch == "" { - branch = "unknown" - } -} - var stop chan struct{} func reloadLoop( @@ -165,7 +154,7 @@ func reloadLoop( } }() - log.Printf("I! Starting Telegraf %s\n", displayVersion()) + log.Printf("I! Starting Telegraf %s\n", version) log.Printf("I! Loaded inputs: %s", strings.Join(c.InputNames(), " ")) log.Printf("I! Loaded aggregators: %s", strings.Join(c.AggregatorNames(), " ")) log.Printf("I! Loaded processors: %s", strings.Join(c.ProcessorNames(), " ")) @@ -225,11 +214,27 @@ func (p *program) Stop(s service.Service) error { return nil } -func displayVersion() string { - if version == "" { - return fmt.Sprintf("v%s~%s", nextVersion, commit) +func formatFullVersion() string { + var parts = []string{"Telegraf"} + + if version != "" { + parts = append(parts, version) + } else { + parts = append(parts, "unknown") } - return "v" + version + + if branch != "" || commit != "" { + if branch == "" { + branch = "unknown" + } + if commit == "" { + commit = "unknown" + } + git := fmt.Sprintf("(git: %s %s)", branch, commit) + parts = append(parts, git) + } + + return strings.Join(parts, " ") } func main() { @@ -273,7 +278,7 @@ func main() { if len(args) > 0 { switch args[0] { case "version": - fmt.Printf("Telegraf %s (git: %s %s)\n", displayVersion(), branch, commit) + fmt.Println(formatFullVersion()) return case "config": config.PrintSampleConfig( @@ -301,7 +306,7 @@ func main() { } return case *fVersion: - fmt.Printf("Telegraf %s (git: %s %s)\n", displayVersion(), branch, commit) + fmt.Println(formatFullVersion()) return case *fSampleConfig: config.PrintSampleConfig( From b9ff1d042b157d057ae574d9f04a64b8ccc2400b Mon Sep 17 00:00:00 2001 From: LABOUARDY Mohamed Date: Tue, 14 Aug 2018 01:34:59 +0200 Subject: [PATCH 0071/1815] Add ActiveMQ input plugin (#2689) --- README.md | 1 + plugins/inputs/activemq/README.md | 86 ++++++++ plugins/inputs/activemq/activemq.go | 261 +++++++++++++++++++++++ plugins/inputs/activemq/activemq_test.go | 139 ++++++++++++ plugins/inputs/all/all.go | 1 + 5 files changed, 488 insertions(+) create mode 100644 plugins/inputs/activemq/README.md create mode 100644 plugins/inputs/activemq/activemq.go create mode 100644 plugins/inputs/activemq/activemq_test.go diff --git a/README.md b/README.md index 6307b5356..700f0dd2a 100644 --- a/README.md +++ b/README.md @@ -127,6 +127,7 @@ configuration options. ## Input Plugins +* [activemq](./plugins/inputs/activemq) * [aerospike](./plugins/inputs/aerospike) * [amqp_consumer](./plugins/inputs/amqp_consumer) (rabbitmq) * [apache](./plugins/inputs/apache) diff --git a/plugins/inputs/activemq/README.md b/plugins/inputs/activemq/README.md new file mode 100644 index 000000000..b44d12d22 --- /dev/null +++ b/plugins/inputs/activemq/README.md @@ -0,0 +1,86 @@ +# Telegraf Input Plugin: ActiveMQ + +This plugin gather queues, topics & subscribers metrics using ActiveMQ Console API. + +### Configuration: + +```toml +# Description +[[inputs.activemq]] + ## Required ActiveMQ Endpoint + # server = "192.168.50.10" + + ## Required ActiveMQ port + # port = 8161 + + ## Credentials for basic HTTP authentication + # username = "admin" + # password = "admin" + + ## Required ActiveMQ webadmin root path + # webadmin = "admin" + + ## Maximum time to receive response. + # response_timeout = "5s" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification +``` + +### Measurements & Fields: + +Every effort was made to preserve the names based on the XML response from the ActiveMQ Console API. + +- activemq_queues: + - size + - consumer_count + - enqueue_count + - dequeue_count + - activemq_topics: + - size + - consumer_count + - enqueue_count + - dequeue_count + - subscribers_metrics: + - pending_queue_size + - dispatched_queue_size + - dispatched_counter + - enqueue_counter + - dequeue_counter + +### Tags: + +- activemq_queues: + - name + - source + - port +- activemq_topics: + - name + - source + - port +- activemq_subscribers: + - client_id + - subscription_name + - connection_id + - destination_name + - selector + - active + - source + - port + +### Example Output: + +``` +$ ./telegraf -config telegraf.conf -input-filter activemq -test +activemq_queues,name=sandra,host=88284b2fe51b,source=localhost,port=8161 consumer_count=0i,enqueue_count=0i,dequeue_count=0i,size=0i 1492610703000000000 +activemq_queues,name=Test,host=88284b2fe51b,source=localhost,port=8161 dequeue_count=0i,size=0i,consumer_count=0i,enqueue_count=0i 1492610703000000000 +activemq_topics,name=ActiveMQ.Advisory.MasterBroker\ ,host=88284b2fe51b,source=localhost,port=8161 size=0i,consumer_count=0i,enqueue_count=1i,dequeue_count=0i 1492610703000000000 +activemq_topics,host=88284b2fe51b,name=AAA\,source=localhost,port=8161 size=0i,consumer_count=1i,enqueue_count=0i,dequeue_count=0i 1492610703000000000 +activemq_topics,name=ActiveMQ.Advisory.Topic\,source=localhost,port=8161 ,host=88284b2fe51b enqueue_count=1i,dequeue_count=0i,size=0i,consumer_count=0i 1492610703000000000 +activemq_topics,name=ActiveMQ.Advisory.Queue\,source=localhost,port=8161 ,host=88284b2fe51b size=0i,consumer_count=0i,enqueue_count=2i,dequeue_count=0i 1492610703000000000 +activemq_topics,name=AAAA\ ,host=88284b2fe51b,source=localhost,port=8161 consumer_count=0i,enqueue_count=0i,dequeue_count=0i,size=0i 1492610703000000000 +activemq_subscribers,connection_id=NOTSET,destination_name=AAA,,source=localhost,port=8161,selector=AA,active=no,host=88284b2fe51b,client_id=AAA,subscription_name=AAA pending_queue_size=0i,dispatched_queue_size=0i,dispatched_counter=0i,enqueue_counter=0i,dequeue_counter=0i 1492610703000000000 +``` diff --git a/plugins/inputs/activemq/activemq.go b/plugins/inputs/activemq/activemq.go new file mode 100644 index 000000000..5b59730d2 --- /dev/null +++ b/plugins/inputs/activemq/activemq.go @@ -0,0 +1,261 @@ +package activemq + +import ( + "encoding/xml" + "fmt" + "io/ioutil" + "net/http" + "strconv" + "time" + + "strings" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/internal/tls" + "github.com/influxdata/telegraf/plugins/inputs" +) + +type ActiveMQ struct { + Server string `json:"server"` + Port int `json:"port"` + Username string `json:"username"` + Password string `json:"password"` + Webadmin string `json:"webadmin"` + ResponseTimeout internal.Duration + tls.ClientConfig + + client *http.Client +} + +type Topics struct { + XMLName xml.Name `xml:"topics"` + TopicItems []Topic `xml:"topic"` +} + +type Topic struct { + XMLName xml.Name `xml:"topic"` + Name string `xml:"name,attr"` + Stats Stats `xml:"stats"` +} + +type Subscribers struct { + XMLName xml.Name `xml:"subscribers"` + SubscriberItems []Subscriber `xml:"subscriber"` +} + +type Subscriber struct { + XMLName xml.Name `xml:"subscriber"` + ClientId string `xml:"clientId,attr"` + SubscriptionName string `xml:"subscriptionName,attr"` + ConnectionId string `xml:"connectionId,attr"` + DestinationName string `xml:"destinationName,attr"` + Selector string `xml:"selector,attr"` + Active string `xml:"active,attr"` + Stats Stats `xml:"stats"` +} + +type Queues struct { + XMLName xml.Name `xml:"queues"` + QueueItems []Queue `xml:"queue"` +} + +type Queue struct { + XMLName xml.Name `xml:"queue"` + Name string `xml:"name,attr"` + Stats Stats `xml:"stats"` +} + +type Stats struct { + XMLName xml.Name `xml:"stats"` + Size int `xml:"size,attr"` + ConsumerCount int `xml:"consumerCount,attr"` + EnqueueCount int `xml:"enqueueCount,attr"` + DequeueCount int `xml:"dequeueCount,attr"` + PendingQueueSize int `xml:"pendingQueueSize,attr"` + DispatchedQueueSize int `xml:"dispatchedQueueSize,attr"` + DispatchedCounter int `xml:"dispatchedCounter,attr"` + EnqueueCounter int `xml:"enqueueCounter,attr"` + DequeueCounter int `xml:"dequeueCounter,attr"` +} + +const ( + QUEUES_STATS = "queues" + TOPICS_STATS = "topics" + SUBSCRIBERS_STATS = "subscribers" +) + +var sampleConfig = ` + ## Required ActiveMQ Endpoint + # server = "192.168.50.10" + + ## Required ActiveMQ port + # port = 8161 + + ## Credentials for basic HTTP authentication + # username = "admin" + # password = "admin" + + ## Required ActiveMQ webadmin root path + # webadmin = "admin" + + ## Maximum time to receive response. + # response_timeout = "5s" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + ` + +func (a *ActiveMQ) Description() string { + return "Gather ActiveMQ metrics" +} + +func (a *ActiveMQ) SampleConfig() string { + return sampleConfig +} + +func (a *ActiveMQ) createHttpClient() (*http.Client, error) { + tlsCfg, err := a.ClientConfig.TLSConfig() + if err != nil { + return nil, err + } + + client := &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: tlsCfg, + }, + Timeout: a.ResponseTimeout.Duration, + } + + return client, nil +} + +func (a *ActiveMQ) GetMetrics(keyword string) ([]byte, error) { + if a.ResponseTimeout.Duration < time.Second { + a.ResponseTimeout.Duration = time.Second * 5 + } + + if a.client == nil { + client, err := a.createHttpClient() + if err != nil { + return nil, err + } + a.client = client + } + url := fmt.Sprintf("http://%s:%d/%s/xml/%s.jsp", a.Server, a.Port, a.Webadmin, keyword) + + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return nil, err + } + + req.SetBasicAuth(a.Username, a.Password) + resp, err := a.client.Do(req) + if err != nil { + return nil, err + } + + defer resp.Body.Close() + return ioutil.ReadAll(resp.Body) +} + +func (a *ActiveMQ) GatherQueuesMetrics(acc telegraf.Accumulator, queues Queues) { + for _, queue := range queues.QueueItems { + records := make(map[string]interface{}) + tags := make(map[string]string) + + tags["name"] = strings.TrimSpace(queue.Name) + tags["source"] = a.Server + tags["port"] = strconv.Itoa(a.Port) + + records["size"] = queue.Stats.Size + records["consumer_count"] = queue.Stats.ConsumerCount + records["enqueue_count"] = queue.Stats.EnqueueCount + records["dequeue_count"] = queue.Stats.DequeueCount + + acc.AddFields("activemq_queues", records, tags) + } +} + +func (a *ActiveMQ) GatherTopicsMetrics(acc telegraf.Accumulator, topics Topics) { + for _, topic := range topics.TopicItems { + records := make(map[string]interface{}) + tags := make(map[string]string) + + tags["name"] = topic.Name + tags["source"] = a.Server + tags["port"] = strconv.Itoa(a.Port) + + records["size"] = topic.Stats.Size + records["consumer_count"] = topic.Stats.ConsumerCount + records["enqueue_count"] = topic.Stats.EnqueueCount + records["dequeue_count"] = topic.Stats.DequeueCount + + acc.AddFields("activemq_topics", records, tags) + } +} + +func (a *ActiveMQ) GatherSubscribersMetrics(acc telegraf.Accumulator, subscribers Subscribers) { + for _, subscriber := range subscribers.SubscriberItems { + records := make(map[string]interface{}) + tags := make(map[string]string) + + tags["client_id"] = subscriber.ClientId + tags["subscription_name"] = subscriber.SubscriptionName + tags["connection_id"] = subscriber.ConnectionId + tags["destination_name"] = subscriber.DestinationName + tags["selector"] = subscriber.Selector + tags["active"] = subscriber.Active + tags["source"] = a.Server + tags["port"] = strconv.Itoa(a.Port) + + records["pending_queue_size"] = subscriber.Stats.PendingQueueSize + records["dispatched_queue_size"] = subscriber.Stats.DispatchedQueueSize + records["dispatched_counter"] = subscriber.Stats.DispatchedCounter + records["enqueue_counter"] = subscriber.Stats.EnqueueCounter + records["dequeue_counter"] = subscriber.Stats.DequeueCounter + + acc.AddFields("activemq_subscribers", records, tags) + } +} + +func (a *ActiveMQ) Gather(acc telegraf.Accumulator) error { + dataQueues, err := a.GetMetrics(QUEUES_STATS) + queues := Queues{} + err = xml.Unmarshal(dataQueues, &queues) + if err != nil { + return err + } + + dataTopics, err := a.GetMetrics(TOPICS_STATS) + topics := Topics{} + err = xml.Unmarshal(dataTopics, &topics) + if err != nil { + return err + } + + dataSubscribers, err := a.GetMetrics(SUBSCRIBERS_STATS) + subscribers := Subscribers{} + err = xml.Unmarshal(dataSubscribers, &subscribers) + if err != nil { + return err + } + + a.GatherQueuesMetrics(acc, queues) + a.GatherTopicsMetrics(acc, topics) + a.GatherSubscribersMetrics(acc, subscribers) + + return nil +} + +func init() { + inputs.Add("activemq", func() telegraf.Input { + return &ActiveMQ{ + Server: "localhost", + Port: 8161, + } + }) +} diff --git a/plugins/inputs/activemq/activemq_test.go b/plugins/inputs/activemq/activemq_test.go new file mode 100644 index 000000000..c277af3c5 --- /dev/null +++ b/plugins/inputs/activemq/activemq_test.go @@ -0,0 +1,139 @@ +package activemq + +import ( + "encoding/xml" + "testing" + + "github.com/influxdata/telegraf/testutil" +) + +func TestGatherQueuesMetrics(t *testing.T) { + + s := ` + + + +queueBrowse/sandra?view=rss&feedType=atom_1.0 +queueBrowse/sandra?view=rss&feedType=rss_2.0 + + + + + +queueBrowse/Test?view=rss&feedType=atom_1.0 +queueBrowse/Test?view=rss&feedType=rss_2.0 + + +` + + queues := Queues{} + + xml.Unmarshal([]byte(s), &queues) + + records := make(map[string]interface{}) + tags := make(map[string]string) + + tags["name"] = "Test" + tags["source"] = "localhost" + tags["port"] = "8161" + + records["size"] = 0 + records["consumer_count"] = 0 + records["enqueue_count"] = 0 + records["dequeue_count"] = 0 + + var acc testutil.Accumulator + + activeMQ := new(ActiveMQ) + activeMQ.Server = "localhost" + activeMQ.Port = 8161 + + activeMQ.GatherQueuesMetrics(&acc, queues) + acc.AssertContainsTaggedFields(t, "activemq_queues", records, tags) +} + +func TestGatherTopicsMetrics(t *testing.T) { + + s := ` + + + + + + + + + + + + + + + +` + + topics := Topics{} + + xml.Unmarshal([]byte(s), &topics) + + records := make(map[string]interface{}) + tags := make(map[string]string) + + tags["name"] = "ActiveMQ.Advisory.MasterBroker " + tags["source"] = "localhost" + tags["port"] = "8161" + + records["size"] = 0 + records["consumer_count"] = 0 + records["enqueue_count"] = 1 + records["dequeue_count"] = 0 + + var acc testutil.Accumulator + + activeMQ := new(ActiveMQ) + activeMQ.Server = "localhost" + activeMQ.Port = 8161 + + activeMQ.GatherTopicsMetrics(&acc, topics) + acc.AssertContainsTaggedFields(t, "activemq_topics", records, tags) +} + +func TestGatherSubscribersMetrics(t *testing.T) { + + s := ` + + + +` + + subscribers := Subscribers{} + + xml.Unmarshal([]byte(s), &subscribers) + + records := make(map[string]interface{}) + tags := make(map[string]string) + + tags["client_id"] = "AAA" + tags["subscription_name"] = "AAA" + tags["connection_id"] = "NOTSET" + tags["destination_name"] = "AAA" + tags["selector"] = "AA" + tags["active"] = "no" + tags["source"] = "localhost" + tags["port"] = "8161" + + records["pending_queue_size"] = 0 + records["dispatched_queue_size"] = 0 + records["dispatched_counter"] = 0 + records["enqueue_counter"] = 0 + records["dequeue_counter"] = 0 + + var acc testutil.Accumulator + + activeMQ := new(ActiveMQ) + activeMQ.Server = "localhost" + activeMQ.Port = 8161 + + activeMQ.GatherSubscribersMetrics(&acc, subscribers) + acc.AssertContainsTaggedFields(t, "activemq_subscribers", records, tags) +} diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index bbca99521..ac86fb879 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -1,6 +1,7 @@ package all import ( + _ "github.com/influxdata/telegraf/plugins/inputs/activemq" _ "github.com/influxdata/telegraf/plugins/inputs/aerospike" _ "github.com/influxdata/telegraf/plugins/inputs/amqp_consumer" _ "github.com/influxdata/telegraf/plugins/inputs/apache" From 6454319062e20eaaf48370f4ccb451733f58b83d Mon Sep 17 00:00:00 2001 From: Pierre Tessier Date: Mon, 13 Aug 2018 19:37:06 -0400 Subject: [PATCH 0072/1815] Add Wavefront parser (#4402) --- docs/DATA_FORMATS_INPUT.md | 27 +++ plugins/outputs/wavefront/wavefront.go | 32 +-- plugins/parsers/registry.go | 7 + plugins/parsers/wavefront/element.go | 238 +++++++++++++++++++++++ plugins/parsers/wavefront/parser.go | 203 +++++++++++++++++++ plugins/parsers/wavefront/parser_test.go | 204 +++++++++++++++++++ plugins/parsers/wavefront/scanner.go | 69 +++++++ plugins/parsers/wavefront/token.go | 41 ++++ 8 files changed, 808 insertions(+), 13 deletions(-) create mode 100644 plugins/parsers/wavefront/element.go create mode 100644 plugins/parsers/wavefront/parser.go create mode 100644 plugins/parsers/wavefront/parser_test.go create mode 100644 plugins/parsers/wavefront/scanner.go create mode 100644 plugins/parsers/wavefront/token.go diff --git a/docs/DATA_FORMATS_INPUT.md b/docs/DATA_FORMATS_INPUT.md index 417238ec3..00ead6e38 100644 --- a/docs/DATA_FORMATS_INPUT.md +++ b/docs/DATA_FORMATS_INPUT.md @@ -10,6 +10,7 @@ Telegraf is able to parse the following input data formats into metrics: 1. [Collectd](#collectd) 1. [Dropwizard](#dropwizard) 1. [Grok](#grok) +1. [Wavefront](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#wavefront) Telegraf metrics, like InfluxDB [points](https://docs.influxdata.com/influxdb/v0.10/write_protocols/line/), @@ -881,3 +882,29 @@ the file output will only print once per `flush_interval`. - Continue one token at a time until the entire line is successfully parsed. +``` + +# Wavefront: + +Wavefront Data Format is metrics are parsed directly into Telegraf metrics. +For more information about the Wavefront Data Format see +[here](https://docs.wavefront.com/wavefront_data_format.html). + +There are no additional configuration options for Wavefront Data Format line-protocol. + +#### Wavefront Configuration: + +```toml +[[inputs.exec]] + ## Commands array + commands = ["/tmp/test.sh", "/usr/bin/mycollector --foo=bar"] + + ## measurement name suffix (for separating different commands) + name_suffix = "_mycollector" + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "wavefront" +``` diff --git a/plugins/outputs/wavefront/wavefront.go b/plugins/outputs/wavefront/wavefront.go index 18c5a6495..ef36d1804 100644 --- a/plugins/outputs/wavefront/wavefront.go +++ b/plugins/outputs/wavefront/wavefront.go @@ -189,26 +189,32 @@ func buildTags(mTags map[string]string, w *Wavefront) (string, map[string]string } var source string - sourceTagFound := false - for _, s := range w.SourceOverride { - for k, v := range mTags { - if k == s { - source = v - mTags["telegraf_host"] = mTags["host"] - sourceTagFound = true - delete(mTags, k) + if s, ok := mTags["source"]; ok { + source = s + delete(mTags, "source") + } else { + sourceTagFound := false + for _, s := range w.SourceOverride { + for k, v := range mTags { + if k == s { + source = v + mTags["telegraf_host"] = mTags["host"] + sourceTagFound = true + delete(mTags, k) + break + } + } + if sourceTagFound { break } } - if sourceTagFound { - break + + if !sourceTagFound { + source = mTags["host"] } } - if !sourceTagFound { - source = mTags["host"] - } delete(mTags, "host") return tagValueReplacer.Replace(source), mTags diff --git a/plugins/parsers/registry.go b/plugins/parsers/registry.go index 24e73d4b6..1e395047a 100644 --- a/plugins/parsers/registry.go +++ b/plugins/parsers/registry.go @@ -13,6 +13,7 @@ import ( "github.com/influxdata/telegraf/plugins/parsers/json" "github.com/influxdata/telegraf/plugins/parsers/nagios" "github.com/influxdata/telegraf/plugins/parsers/value" + "github.com/influxdata/telegraf/plugins/parsers/wavefront" ) // ParserInput is an interface for input plugins that are able to parse @@ -131,6 +132,8 @@ func NewParser(config *Config) (Parser, error) { config.DefaultTags, config.Separator, config.Templates) + case "wavefront": + parser, err = NewWavefrontParser(config.DefaultTags) case "grok": parser, err = newGrokParser( config.MetricName, @@ -238,3 +241,7 @@ func NewDropwizardParser( } return parser, err } + +func NewWavefrontParser(defaultTags map[string]string) (Parser, error) { + return wavefront.NewWavefrontParser(defaultTags), nil +} diff --git a/plugins/parsers/wavefront/element.go b/plugins/parsers/wavefront/element.go new file mode 100644 index 000000000..4e40238e7 --- /dev/null +++ b/plugins/parsers/wavefront/element.go @@ -0,0 +1,238 @@ +package wavefront + +import ( + "errors" + "fmt" + "strconv" + "time" +) + +var ( + ErrEOF = errors.New("EOF") + ErrInvalidTimestamp = errors.New("Invalid timestamp") +) + +// Interface for parsing line elements. +type ElementParser interface { + parse(p *PointParser, pt *Point) error +} + +type NameParser struct{} +type ValueParser struct{} +type TimestampParser struct { + optional bool +} +type WhiteSpaceParser struct { + nextOptional bool +} +type TagParser struct{} +type LoopedParser struct { + wrappedParser ElementParser + wsPaser *WhiteSpaceParser +} +type LiteralParser struct { + literal string +} + +func (ep *NameParser) parse(p *PointParser, pt *Point) error { + //Valid characters are: a-z, A-Z, 0-9, hyphen ("-"), underscore ("_"), dot ("."). + // Forward slash ("/") and comma (",") are allowed if metricName is enclosed in double quotes. + name, err := parseLiteral(p) + if err != nil { + return err + } + pt.Name = name + return nil +} + +func (ep *ValueParser) parse(p *PointParser, pt *Point) error { + tok, lit := p.scan() + if tok == EOF { + return fmt.Errorf("found %q, expected number", lit) + } + + p.writeBuf.Reset() + if tok == MINUS_SIGN { + p.writeBuf.WriteString(lit) + tok, lit = p.scan() + } + + for tok != EOF && (tok == LETTER || tok == NUMBER || tok == DOT) { + p.writeBuf.WriteString(lit) + tok, lit = p.scan() + } + p.unscan() + + pt.Value = p.writeBuf.String() + _, err := strconv.ParseFloat(pt.Value, 64) + if err != nil { + return fmt.Errorf("invalid metric value %s", pt.Value) + } + return nil +} + +func (ep *TimestampParser) parse(p *PointParser, pt *Point) error { + tok, lit := p.scan() + if tok == EOF { + if ep.optional { + p.unscanTokens(2) + return setTimestamp(pt, 0, 1) + } + return fmt.Errorf("found %q, expected number", lit) + } + + if tok != NUMBER { + if ep.optional { + p.unscanTokens(2) + return setTimestamp(pt, 0, 1) + } + return ErrInvalidTimestamp + } + + p.writeBuf.Reset() + for tok != EOF && tok == NUMBER { + p.writeBuf.WriteString(lit) + tok, lit = p.scan() + } + p.unscan() + + tsStr := p.writeBuf.String() + ts, err := strconv.ParseInt(tsStr, 10, 64) + if err != nil { + return err + } + return setTimestamp(pt, ts, len(tsStr)) +} + +func setTimestamp(pt *Point, ts int64, numDigits int) error { + + if numDigits == 19 { + // nanoseconds + ts = ts / 1e9 + } else if numDigits == 16 { + // microseconds + ts = ts / 1e6 + } else if numDigits == 13 { + // milliseconds + ts = ts / 1e3 + } else if numDigits != 10 { + // must be in seconds, return error if not 0 + if ts == 0 { + ts = getCurrentTime() + } else { + return ErrInvalidTimestamp + } + } + pt.Timestamp = ts + return nil +} + +func (ep *LoopedParser) parse(p *PointParser, pt *Point) error { + for { + err := ep.wrappedParser.parse(p, pt) + if err != nil { + return err + } + err = ep.wsPaser.parse(p, pt) + if err == ErrEOF { + break + } + } + return nil +} + +func (ep *TagParser) parse(p *PointParser, pt *Point) error { + k, err := parseLiteral(p) + if err != nil { + if k == "" { + return nil + } + return err + } + + next, lit := p.scan() + if next != EQUALS { + return fmt.Errorf("found %q, expected equals", lit) + } + + v, err := parseLiteral(p) + if err != nil { + return err + } + if len(pt.Tags) == 0 { + pt.Tags = make(map[string]string) + } + pt.Tags[k] = v + return nil +} + +func (ep *WhiteSpaceParser) parse(p *PointParser, pt *Point) error { + tok := WS + for tok != EOF && tok == WS { + tok, _ = p.scan() + } + + if tok == EOF { + if !ep.nextOptional { + return ErrEOF + } + return nil + } + p.unscan() + return nil +} + +func (ep *LiteralParser) parse(p *PointParser, pt *Point) error { + l, err := parseLiteral(p) + if err != nil { + return err + } + + if l != ep.literal { + return fmt.Errorf("found %s, expected %s", l, ep.literal) + } + return nil +} + +func parseQuotedLiteral(p *PointParser) (string, error) { + p.writeBuf.Reset() + + escaped := false + tok, lit := p.scan() + for tok != EOF && (tok != QUOTES || (tok == QUOTES && escaped)) { + // let everything through + escaped = tok == BACKSLASH + p.writeBuf.WriteString(lit) + tok, lit = p.scan() + } + if tok == EOF { + return "", fmt.Errorf("found %q, expected quotes", lit) + } + return p.writeBuf.String(), nil +} + +func parseLiteral(p *PointParser) (string, error) { + tok, lit := p.scan() + if tok == EOF { + return "", fmt.Errorf("found %q, expected literal", lit) + } + + if tok == QUOTES { + return parseQuotedLiteral(p) + } + + p.writeBuf.Reset() + for tok != EOF && tok > literal_beg && tok < literal_end { + p.writeBuf.WriteString(lit) + tok, lit = p.scan() + } + if tok == QUOTES { + return "", errors.New("found quote inside unquoted literal") + } + p.unscan() + return p.writeBuf.String(), nil +} + +func getCurrentTime() int64 { + return time.Now().UnixNano() / 1e9 +} diff --git a/plugins/parsers/wavefront/parser.go b/plugins/parsers/wavefront/parser.go new file mode 100644 index 000000000..f5fc88dbf --- /dev/null +++ b/plugins/parsers/wavefront/parser.go @@ -0,0 +1,203 @@ +package wavefront + +import ( + "bufio" + "bytes" + "io" + "log" + "strconv" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" +) + +const MAX_BUFFER_SIZE = 2 + +type Point struct { + Name string + Value string + Timestamp int64 + Source string + Tags map[string]string +} + +// Parser represents a parser. +type PointParser struct { + s *PointScanner + buf struct { + tok []Token // last read n tokens + lit []string // last read n literals + n int // unscanned buffer size (max=2) + } + scanBuf bytes.Buffer // buffer reused for scanning tokens + writeBuf bytes.Buffer // buffer reused for parsing elements + Elements []ElementParser + defaultTags map[string]string +} + +// Returns a slice of ElementParser's for the Graphite format +func NewWavefrontElements() []ElementParser { + var elements []ElementParser + wsParser := WhiteSpaceParser{} + wsParserNextOpt := WhiteSpaceParser{nextOptional: true} + repeatParser := LoopedParser{wrappedParser: &TagParser{}, wsPaser: &wsParser} + elements = append(elements, &NameParser{}, &wsParser, &ValueParser{}, &wsParserNextOpt, + &TimestampParser{optional: true}, &wsParserNextOpt, &repeatParser) + return elements +} + +func NewWavefrontParser(defaultTags map[string]string) *PointParser { + elements := NewWavefrontElements() + return &PointParser{Elements: elements, defaultTags: defaultTags} +} + +func (p *PointParser) Parse(buf []byte) ([]telegraf.Metric, error) { + + // parse even if the buffer begins with a newline + buf = bytes.TrimPrefix(buf, []byte("\n")) + // add newline to end if not exists: + if len(buf) > 0 && !bytes.HasSuffix(buf, []byte("\n")) { + buf = append(buf, []byte("\n")...) + } + + points := make([]Point, 0) + + buffer := bytes.NewBuffer(buf) + reader := bufio.NewReader(buffer) + for { + // Read up to the next newline. + buf, err := reader.ReadBytes('\n') + if err == io.EOF { + break + } + + p.reset(buf) + point := Point{} + for _, element := range p.Elements { + err := element.parse(p, &point) + if err != nil { + return nil, err + } + } + + points = append(points, point) + } + + metrics, err := p.convertPointToTelegrafMetric(points) + if err != nil { + return nil, err + } + return metrics, nil +} + +func (p *PointParser) ParseLine(line string) (telegraf.Metric, error) { + buf := []byte(line) + metrics, err := p.Parse(buf) + if err != nil { + return nil, err + } + + if len(metrics) > 0 { + return metrics[0], nil + } + + return nil, nil +} + +func (p *PointParser) SetDefaultTags(tags map[string]string) { + p.defaultTags = tags +} + +func (p *PointParser) convertPointToTelegrafMetric(points []Point) ([]telegraf.Metric, error) { + + metrics := make([]telegraf.Metric, 0) + + for _, point := range points { + tags := make(map[string]string) + for k, v := range point.Tags { + tags[k] = v + } + // apply default tags after parsed tags + for k, v := range p.defaultTags { + tags[k] = v + } + + // single field for value + fields := make(map[string]interface{}) + v, err := strconv.ParseFloat(point.Value, 64) + if err != nil { + return nil, err + } + fields["value"] = v + + m, err := metric.New(point.Name, tags, fields, time.Unix(point.Timestamp, 0)) + if err != nil { + return nil, err + } + + metrics = append(metrics, m) + } + + return metrics, nil +} + +// scan returns the next token from the underlying scanner. +// If a token has been unscanned then read that from the internal buffer instead. +func (p *PointParser) scan() (Token, string) { + // If we have a token on the buffer, then return it. + if p.buf.n != 0 { + idx := p.buf.n % MAX_BUFFER_SIZE + tok, lit := p.buf.tok[idx], p.buf.lit[idx] + p.buf.n -= 1 + return tok, lit + } + + // Otherwise read the next token from the scanner. + tok, lit := p.s.Scan() + + // Save it to the buffer in case we unscan later. + p.buffer(tok, lit) + + return tok, lit +} + +func (p *PointParser) buffer(tok Token, lit string) { + // create the buffer if it is empty + if len(p.buf.tok) == 0 { + p.buf.tok = make([]Token, MAX_BUFFER_SIZE) + p.buf.lit = make([]string, MAX_BUFFER_SIZE) + } + + // for now assume a simple circular buffer of length two + p.buf.tok[0], p.buf.lit[0] = p.buf.tok[1], p.buf.lit[1] + p.buf.tok[1], p.buf.lit[1] = tok, lit +} + +// unscan pushes the previously read token back onto the buffer. +func (p *PointParser) unscan() { + p.unscanTokens(1) +} + +func (p *PointParser) unscanTokens(n int) { + if n > MAX_BUFFER_SIZE { + // just log for now + log.Printf("cannot unscan more than %d tokens", MAX_BUFFER_SIZE) + } + p.buf.n += n +} + +func (p *PointParser) reset(buf []byte) { + + // reset the scan buffer and write new byte + p.scanBuf.Reset() + p.scanBuf.Write(buf) + + if p.s == nil { + p.s = NewScanner(&p.scanBuf) + } else { + // reset p.s.r passing in the buffer as the reader + p.s.r.Reset(&p.scanBuf) + } + p.buf.n = 0 +} diff --git a/plugins/parsers/wavefront/parser_test.go b/plugins/parsers/wavefront/parser_test.go new file mode 100644 index 000000000..85367fa1a --- /dev/null +++ b/plugins/parsers/wavefront/parser_test.go @@ -0,0 +1,204 @@ +package wavefront + +import ( + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" + "github.com/stretchr/testify/assert" +) + +func TestParse(t *testing.T) { + parser := NewWavefrontParser(nil) + + parsedMetrics, err := parser.Parse([]byte("test.metric 1")) + assert.NoError(t, err) + testMetric, err := metric.New("test.metric", map[string]string{}, map[string]interface{}{"value": 1.}, time.Unix(0, 0)) + assert.NoError(t, err) + assert.Equal(t, parsedMetrics[0].Name(), testMetric.Name()) + assert.Equal(t, parsedMetrics[0].Fields(), testMetric.Fields()) + + parsedMetrics, err = parser.Parse([]byte("test.metric 1 1530939936")) + assert.NoError(t, err) + testMetric, err = metric.New("test.metric", map[string]string{}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) + assert.NoError(t, err) + assert.EqualValues(t, parsedMetrics[0], testMetric) + + parsedMetrics, err = parser.Parse([]byte("test.metric 1 1530939936 source=mysource")) + assert.NoError(t, err) + testMetric, err = metric.New("test.metric", map[string]string{"source": "mysource"}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) + assert.NoError(t, err) + assert.EqualValues(t, parsedMetrics[0], testMetric) + + parsedMetrics, err = parser.Parse([]byte("\"test.metric\" 1.1234 1530939936 source=\"mysource\"")) + assert.NoError(t, err) + testMetric, err = metric.New("test.metric", map[string]string{"source": "mysource"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0)) + assert.NoError(t, err) + assert.EqualValues(t, parsedMetrics[0], testMetric) + + parsedMetrics, err = parser.Parse([]byte("\"test.metric\" 1.1234 1530939936 \"source\"=\"mysource\" tag2=value2")) + assert.NoError(t, err) + testMetric, err = metric.New("test.metric", map[string]string{"source": "mysource", "tag2": "value2"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0)) + assert.NoError(t, err) + assert.EqualValues(t, parsedMetrics[0], testMetric) + + parsedMetrics, err = parser.Parse([]byte("test.metric 1.1234 1530939936 source=\"mysource\" tag2=value2 ")) + assert.NoError(t, err) + testMetric, err = metric.New("test.metric", map[string]string{"source": "mysource", "tag2": "value2"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0)) + assert.NoError(t, err) + assert.EqualValues(t, parsedMetrics[0], testMetric) + +} + +func TestParseLine(t *testing.T) { + parser := NewWavefrontParser(nil) + + parsedMetric, err := parser.ParseLine("test.metric 1") + assert.NoError(t, err) + testMetric, err := metric.New("test.metric", map[string]string{}, map[string]interface{}{"value": 1.}, time.Unix(0, 0)) + assert.NoError(t, err) + assert.Equal(t, parsedMetric.Name(), testMetric.Name()) + assert.Equal(t, parsedMetric.Fields(), testMetric.Fields()) + + parsedMetric, err = parser.ParseLine("test.metric 1 1530939936") + assert.NoError(t, err) + testMetric, err = metric.New("test.metric", map[string]string{}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) + assert.NoError(t, err) + assert.EqualValues(t, parsedMetric, testMetric) + + parsedMetric, err = parser.ParseLine("test.metric 1 1530939936 source=mysource") + assert.NoError(t, err) + testMetric, err = metric.New("test.metric", map[string]string{"source": "mysource"}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) + assert.NoError(t, err) + assert.EqualValues(t, parsedMetric, testMetric) + + parsedMetric, err = parser.ParseLine("\"test.metric\" 1.1234 1530939936 source=\"mysource\"") + assert.NoError(t, err) + testMetric, err = metric.New("test.metric", map[string]string{"source": "mysource"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0)) + assert.NoError(t, err) + assert.EqualValues(t, parsedMetric, testMetric) + + parsedMetric, err = parser.ParseLine("\"test.metric\" 1.1234 1530939936 \"source\"=\"mysource\" tag2=value2") + assert.NoError(t, err) + testMetric, err = metric.New("test.metric", map[string]string{"source": "mysource", "tag2": "value2"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0)) + assert.NoError(t, err) + assert.EqualValues(t, parsedMetric, testMetric) + + parsedMetric, err = parser.ParseLine("test.metric 1.1234 1530939936 source=\"mysource\" tag2=value2 ") + assert.NoError(t, err) + testMetric, err = metric.New("test.metric", map[string]string{"source": "mysource", "tag2": "value2"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0)) + assert.NoError(t, err) + assert.EqualValues(t, parsedMetric, testMetric) +} + +func TestParseMultiple(t *testing.T) { + parser := NewWavefrontParser(nil) + + parsedMetrics, err := parser.Parse([]byte("test.metric 1\ntest.metric2 2 1530939936")) + assert.NoError(t, err) + testMetric1, err := metric.New("test.metric", map[string]string{}, map[string]interface{}{"value": 1.}, time.Unix(0, 0)) + assert.NoError(t, err) + testMetric2, err := metric.New("test.metric2", map[string]string{}, map[string]interface{}{"value": 2.}, time.Unix(1530939936, 0)) + assert.NoError(t, err) + testMetrics := []telegraf.Metric{testMetric1, testMetric2} + assert.Equal(t, parsedMetrics[0].Name(), testMetrics[0].Name()) + assert.Equal(t, parsedMetrics[0].Fields(), testMetrics[0].Fields()) + assert.EqualValues(t, parsedMetrics[1], testMetrics[1]) + + parsedMetrics, err = parser.Parse([]byte("test.metric 1 1530939936 source=mysource\n\"test.metric\" 1.1234 1530939936 source=\"mysource\"")) + assert.NoError(t, err) + testMetric1, err = metric.New("test.metric", map[string]string{"source": "mysource"}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) + assert.NoError(t, err) + testMetric2, err = metric.New("test.metric", map[string]string{"source": "mysource"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0)) + assert.NoError(t, err) + testMetrics = []telegraf.Metric{testMetric1, testMetric2} + assert.EqualValues(t, parsedMetrics, testMetrics) + + parsedMetrics, err = parser.Parse([]byte("\"test.metric\" 1.1234 1530939936 \"source\"=\"mysource\" tag2=value2\ntest.metric 1.1234 1530939936 source=\"mysource\" tag2=value2 ")) + assert.NoError(t, err) + testMetric1, err = metric.New("test.metric", map[string]string{"source": "mysource", "tag2": "value2"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0)) + assert.NoError(t, err) + testMetric2, err = metric.New("test.metric", map[string]string{"source": "mysource", "tag2": "value2"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0)) + assert.NoError(t, err) + testMetrics = []telegraf.Metric{testMetric1, testMetric2} + assert.EqualValues(t, parsedMetrics, testMetrics) + + parsedMetrics, err = parser.Parse([]byte("test.metric 1 1530939936 source=mysource\n\"test.metric\" 1.1234 1530939936 source=\"mysource\"\ntest.metric3 333 1530939936 tagit=valueit")) + assert.NoError(t, err) + testMetric1, err = metric.New("test.metric", map[string]string{"source": "mysource"}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) + assert.NoError(t, err) + testMetric2, err = metric.New("test.metric", map[string]string{"source": "mysource"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0)) + assert.NoError(t, err) + testMetric3, err := metric.New("test.metric3", map[string]string{"tagit": "valueit"}, map[string]interface{}{"value": 333.}, time.Unix(1530939936, 0)) + assert.NoError(t, err) + testMetrics = []telegraf.Metric{testMetric1, testMetric2, testMetric3} + assert.EqualValues(t, parsedMetrics, testMetrics) + +} + +func TestParseSpecial(t *testing.T) { + parser := NewWavefrontParser(nil) + + parsedMetric, err := parser.ParseLine("\"test.metric\" 1 1530939936") + assert.NoError(t, err) + testMetric, err := metric.New("test.metric", map[string]string{}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) + assert.NoError(t, err) + assert.EqualValues(t, parsedMetric, testMetric) + + parsedMetric, err = parser.ParseLine("test.metric 1 1530939936 tag1=\"val\\\"ue1\"") + assert.NoError(t, err) + testMetric, err = metric.New("test.metric", map[string]string{"tag1": "val\\\"ue1"}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) + assert.NoError(t, err) + assert.EqualValues(t, parsedMetric, testMetric) + +} + +func TestParseInvalid(t *testing.T) { + parser := NewWavefrontParser(nil) + + _, err := parser.Parse([]byte("test.metric")) + assert.Error(t, err) + + _, err = parser.Parse([]byte("test.metric string")) + assert.Error(t, err) + + _, err = parser.Parse([]byte("test.metric 1 string")) + assert.Error(t, err) + + _, err = parser.Parse([]byte("test.metric 1 1530939936 tag_no_pair")) + assert.Error(t, err) + + _, err = parser.Parse([]byte("test.metric 1 1530939936 tag_broken_value=\"")) + assert.Error(t, err) + + _, err = parser.Parse([]byte("\"test.metric 1 1530939936")) + assert.Error(t, err) + + _, err = parser.Parse([]byte("test.metric 1 1530939936 tag1=val\\\"ue1")) + assert.Error(t, err) + +} + +func TestParseDefaultTags(t *testing.T) { + parser := NewWavefrontParser(map[string]string{"myDefault": "value1", "another": "test2"}) + + parsedMetrics, err := parser.Parse([]byte("test.metric 1 1530939936")) + assert.NoError(t, err) + testMetric, err := metric.New("test.metric", map[string]string{"myDefault": "value1", "another": "test2"}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) + assert.NoError(t, err) + assert.EqualValues(t, parsedMetrics[0], testMetric) + + parsedMetrics, err = parser.Parse([]byte("test.metric 1 1530939936 source=mysource")) + assert.NoError(t, err) + testMetric, err = metric.New("test.metric", map[string]string{"myDefault": "value1", "another": "test2", "source": "mysource"}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) + assert.NoError(t, err) + assert.EqualValues(t, parsedMetrics[0], testMetric) + + parsedMetrics, err = parser.Parse([]byte("\"test.metric\" 1.1234 1530939936 another=\"test3\"")) + assert.NoError(t, err) + testMetric, err = metric.New("test.metric", map[string]string{"myDefault": "value1", "another": "test2"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0)) + assert.NoError(t, err) + assert.EqualValues(t, parsedMetrics[0], testMetric) + +} diff --git a/plugins/parsers/wavefront/scanner.go b/plugins/parsers/wavefront/scanner.go new file mode 100644 index 000000000..e64516f54 --- /dev/null +++ b/plugins/parsers/wavefront/scanner.go @@ -0,0 +1,69 @@ +package wavefront + +import ( + "bufio" + "io" +) + +// Lexical Point Scanner +type PointScanner struct { + r *bufio.Reader +} + +func NewScanner(r io.Reader) *PointScanner { + return &PointScanner{r: bufio.NewReader(r)} +} + +// read reads the next rune from the buffered reader. +// Returns rune(0) if an error occurs (or io.EOF is returned). +func (s *PointScanner) read() rune { + ch, _, err := s.r.ReadRune() + if err != nil { + return eof + } + return ch +} + +// unread places the previously read rune back on the reader. +func (s *PointScanner) unread() { + _ = s.r.UnreadRune() +} + +// Scan returns the next token and literal value. +func (s *PointScanner) Scan() (Token, string) { + + // Read the next rune + ch := s.read() + if isWhitespace(ch) { + return WS, string(ch) + } else if isLetter(ch) { + return LETTER, string(ch) + } else if isNumber(ch) { + return NUMBER, string(ch) + } + + // Otherwise read the individual character. + switch ch { + case eof: + return EOF, "" + case '\n': + return NEWLINE, string(ch) + case '.': + return DOT, string(ch) + case '-': + return MINUS_SIGN, string(ch) + case '_': + return UNDERSCORE, string(ch) + case '/': + return SLASH, string(ch) + case '\\': + return BACKSLASH, string(ch) + case ',': + return COMMA, string(ch) + case '"': + return QUOTES, string(ch) + case '=': + return EQUALS, string(ch) + } + return ILLEGAL, string(ch) +} diff --git a/plugins/parsers/wavefront/token.go b/plugins/parsers/wavefront/token.go new file mode 100644 index 000000000..bbcbf4e76 --- /dev/null +++ b/plugins/parsers/wavefront/token.go @@ -0,0 +1,41 @@ +package wavefront + +type Token int + +const ( + // Special tokens + ILLEGAL Token = iota + EOF + WS + + // Literals + literal_beg + LETTER // metric name, source/point tags + NUMBER + MINUS_SIGN + UNDERSCORE + DOT + SLASH + BACKSLASH + COMMA + literal_end + + // Misc characters + QUOTES + EQUALS + NEWLINE +) + +func isWhitespace(ch rune) bool { + return ch == ' ' || ch == '\t' || ch == '\n' +} + +func isLetter(ch rune) bool { + return (ch >= 'a' && ch <= 'z') || (ch >= 'A' && ch <= 'Z') +} + +func isNumber(ch rune) bool { + return ch >= '0' && ch <= '9' +} + +var eof = rune(0) From 7ca7f22e500567e979f69e43b57256ead5e22026 Mon Sep 17 00:00:00 2001 From: Harry Schmidt Date: Mon, 13 Aug 2018 16:38:46 -0700 Subject: [PATCH 0073/1815] Add rename processor (#4528) --- README.md | 1 + plugins/processors/all/all.go | 1 + plugins/processors/rename/README.md | 41 ++++++++++++ plugins/processors/rename/rename.go | 82 ++++++++++++++++++++++++ plugins/processors/rename/rename_test.go | 58 +++++++++++++++++ 5 files changed, 183 insertions(+) create mode 100644 plugins/processors/rename/README.md create mode 100644 plugins/processors/rename/rename.go create mode 100644 plugins/processors/rename/rename_test.go diff --git a/README.md b/README.md index 700f0dd2a..5f7cb31c9 100644 --- a/README.md +++ b/README.md @@ -279,6 +279,7 @@ formats may be used with input plugins supporting the `data_format` option: * [override](./plugins/processors/override) * [printer](./plugins/processors/printer) * [regex](./plugins/processors/regex) +* [rename](./plugins/processors/rename) * [topk](./plugins/processors/topk) ## Aggregator Plugins diff --git a/plugins/processors/all/all.go b/plugins/processors/all/all.go index c06bbd426..f581ea602 100644 --- a/plugins/processors/all/all.go +++ b/plugins/processors/all/all.go @@ -6,5 +6,6 @@ import ( _ "github.com/influxdata/telegraf/plugins/processors/override" _ "github.com/influxdata/telegraf/plugins/processors/printer" _ "github.com/influxdata/telegraf/plugins/processors/regex" + _ "github.com/influxdata/telegraf/plugins/processors/rename" _ "github.com/influxdata/telegraf/plugins/processors/topk" ) diff --git a/plugins/processors/rename/README.md b/plugins/processors/rename/README.md new file mode 100644 index 000000000..dbd31490e --- /dev/null +++ b/plugins/processors/rename/README.md @@ -0,0 +1,41 @@ +# Rename Processor Plugin + +The `rename` processor renames measurements, fields, and tags. + +### Configuration: + +```toml +## Measurement, tag, and field renamings are stored in separate sub-tables. +## Specify one sub-table per rename operation. +[[processors.rename]] +[[processors.rename.measurement]] + ## measurement to change + from = "network_interface_throughput" + to = "throughput" + +[[processors.rename.tag]] + ## tag to change + from = "hostname" + to = "host" + +[[processors.rename.field]] + ## field to change + from = "lower" + to = "min" + +[[processors.rename.field]] + ## field to change + from = "upper" + to = "max" +``` + +### Tags: + +No tags are applied by this processor, though it can alter them by renaming. + +### Example processing: + +```diff +- network_interface_throughput,hostname=backend.example.com,units=kbps lower=10i,upper=1000i,mean=500i 1502489900000000000 ++ throughput,host=backend.example.com,units=kbps min=10i,max=1000i,mean=500i 1502489900000000000 +``` diff --git a/plugins/processors/rename/rename.go b/plugins/processors/rename/rename.go new file mode 100644 index 000000000..2da787a35 --- /dev/null +++ b/plugins/processors/rename/rename.go @@ -0,0 +1,82 @@ +package rename + +import ( + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/processors" +) + +const sampleConfig = ` + ## Measurement, tag, and field renamings are stored in separate sub-tables. + ## Specify one sub-table per rename operation. + # [[processors.rename.measurement]] + # ## measurement to change + # from = "kilobytes_per_second" + # to = "kbps" + + # [[processors.rename.tag]] + # ## tag to change + # from = "host" + # to = "hostname" + + # [[processors.rename.field]] + # ## field to change + # from = "lower" + # to = "min" + + # [[processors.rename.field]] + # ## field to change + # from = "upper" + # to = "max" +` + +type renamer struct { + From string + To string +} + +type Rename struct { + Measurement []renamer + Tag []renamer + Field []renamer +} + +func (r *Rename) SampleConfig() string { + return sampleConfig +} + +func (r *Rename) Description() string { + return "Rename measurements, tags, and fields that pass through this filter." +} + +func (r *Rename) Apply(in ...telegraf.Metric) []telegraf.Metric { + for _, point := range in { + for _, measurementRenamer := range r.Measurement { + if point.Name() == measurementRenamer.From { + point.SetName(measurementRenamer.To) + break + } + } + + for _, tagRenamer := range r.Tag { + if value, ok := point.GetTag(tagRenamer.From); ok { + point.RemoveTag(tagRenamer.From) + point.AddTag(tagRenamer.To, value) + } + } + + for _, fieldRenamer := range r.Field { + if value, ok := point.GetField(fieldRenamer.From); ok { + point.RemoveField(fieldRenamer.From) + point.AddField(fieldRenamer.To, value) + } + } + } + + return in +} + +func init() { + processors.Add("rename", func() telegraf.Processor { + return &Rename{} + }) +} diff --git a/plugins/processors/rename/rename_test.go b/plugins/processors/rename/rename_test.go new file mode 100644 index 000000000..43f7fcc30 --- /dev/null +++ b/plugins/processors/rename/rename_test.go @@ -0,0 +1,58 @@ +package rename + +import ( + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" + "github.com/stretchr/testify/assert" +) + +func newMetric(name string, tags map[string]string, fields map[string]interface{}) telegraf.Metric { + if tags == nil { + tags = map[string]string{} + } + if fields == nil { + fields = map[string]interface{}{} + } + m, _ := metric.New(name, tags, fields, time.Now()) + return m +} + +func TestMeasurementRename(t *testing.T) { + r := Rename{} + r.Measurement = []renamer{ + {From: "foo", To: "bar"}, + {From: "baz", To: "quux"}, + } + m1 := newMetric("foo", nil, nil) + m2 := newMetric("bar", nil, nil) + m3 := newMetric("baz", nil, nil) + results := r.Apply(m1, m2, m3) + assert.Equal(t, "bar", results[0].Name(), "Should change name from 'foo' to 'bar'") + assert.Equal(t, "bar", results[1].Name(), "Should not name from 'bar'") + assert.Equal(t, "quux", results[2].Name(), "Should change name from 'baz' to 'quux'") +} + +func TestTagRename(t *testing.T) { + r := Rename{} + r.Tag = []renamer{ + {From: "hostname", To: "host"}, + } + m := newMetric("foo", map[string]string{"hostname": "localhost", "region": "east-1"}, nil) + results := r.Apply(m) + + assert.Equal(t, map[string]string{"host": "localhost", "region": "east-1"}, results[0].Tags(), "should change tag 'hostname' to 'host'") +} + +func TestFieldRename(t *testing.T) { + r := Rename{} + r.Field = []renamer{ + {From: "time_msec", To: "time"}, + } + m := newMetric("foo", nil, map[string]interface{}{"time_msec": int64(1250), "snakes": true}) + results := r.Apply(m) + + assert.Equal(t, map[string]interface{}{"time": int64(1250), "snakes": true}, results[0].Fields(), "should change field 'time_msec' to 'time'") +} From 6ad50893612886d7c75127844461fb05b6706b2f Mon Sep 17 00:00:00 2001 From: shrug42 <40281011+shrug42@users.noreply.github.com> Date: Mon, 13 Aug 2018 16:41:23 -0700 Subject: [PATCH 0074/1815] Add gopsutil meminfo fields to mem plugin (#4546) --- Gopkg.lock | 4 +-- Gopkg.toml | 2 +- plugins/inputs/mem/memory.go | 21 ++++++++++++++++ plugins/inputs/mem/memory_test.go | 42 +++++++++++++++++++++++++++++++ 4 files changed, 66 insertions(+), 3 deletions(-) diff --git a/Gopkg.lock b/Gopkg.lock index 4a70b057d..ce842f3e1 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -856,8 +856,8 @@ "process", ] pruneopts = "" - revision = "4a180b209f5f494e5923cfce81ea30ba23915877" - version = "v2.18.06" + revision = "8048a2e9c5773235122027dd585cf821b2af1249" + version = "v2.18.07" [[projects]] branch = "master" diff --git a/Gopkg.toml b/Gopkg.toml index 799b5243c..d282e1ebd 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -148,7 +148,7 @@ [[constraint]] name = "github.com/shirou/gopsutil" - version = "2.18.05" + version = "2.18.07" [[constraint]] name = "github.com/Shopify/sarama" diff --git a/plugins/inputs/mem/memory.go b/plugins/inputs/mem/memory.go index f664dd3f4..a7d887cbe 100644 --- a/plugins/inputs/mem/memory.go +++ b/plugins/inputs/mem/memory.go @@ -37,6 +37,27 @@ func (s *MemStats) Gather(acc telegraf.Accumulator) error { "slab": vm.Slab, "used_percent": 100 * float64(vm.Used) / float64(vm.Total), "available_percent": 100 * float64(vm.Available) / float64(vm.Total), + "commit_limit": vm.CommitLimit, + "committed_as": vm.CommittedAS, + "dirty": vm.Dirty, + "high_free": vm.HighFree, + "high_total": vm.HighTotal, + "huge_page_size": vm.HugePageSize, + "huge_pages_free": vm.HugePagesFree, + "huge_pages_total": vm.HugePagesTotal, + "low_free": vm.LowFree, + "low_total": vm.LowTotal, + "mapped": vm.Mapped, + "page_tables": vm.PageTables, + "shared": vm.Shared, + "swap_cached": vm.SwapCached, + "swap_free": vm.SwapFree, + "swap_total": vm.SwapTotal, + "vmalloc_chunk": vm.VMallocChunk, + "vmalloc_total": vm.VMallocTotal, + "vmalloc_used": vm.VMallocUsed, + "write_back": vm.Writeback, + "write_back_tmp": vm.WritebackTmp, } acc.AddGauge("mem", fields, nil) diff --git a/plugins/inputs/mem/memory_test.go b/plugins/inputs/mem/memory_test.go index ef9af8d22..06f2f6ea9 100644 --- a/plugins/inputs/mem/memory_test.go +++ b/plugins/inputs/mem/memory_test.go @@ -27,6 +27,27 @@ func TestMemStats(t *testing.T) { // Buffers: 771, // Cached: 4312, // Shared: 2142, + CommitLimit: 1, + CommittedAS: 118680, + Dirty: 4, + HighFree: 0, + HighTotal: 0, + HugePageSize: 4096, + HugePagesFree: 0, + HugePagesTotal: 0, + LowFree: 69936, + LowTotal: 255908, + Mapped: 42236, + PageTables: 1236, + Shared: 0, + SwapCached: 0, + SwapFree: 524280, + SwapTotal: 524280, + VMallocChunk: 3872908, + VMallocTotal: 3874808, + VMallocUsed: 1416, + Writeback: 0, + WritebackTmp: 0, } mps.On("VMStat").Return(vms, nil) @@ -47,6 +68,27 @@ func TestMemStats(t *testing.T) { "inactive": uint64(1124), "wired": uint64(134), "slab": uint64(1234), + "commit_limit": uint64(1), + "committed_as": uint64(118680), + "dirty": uint64(4), + "high_free": uint64(0), + "high_total": uint64(0), + "huge_page_size": uint64(4096), + "huge_pages_free": uint64(0), + "huge_pages_total": uint64(0), + "low_free": uint64(69936), + "low_total": uint64(255908), + "mapped": uint64(42236), + "page_tables": uint64(1236), + "shared": uint64(0), + "swap_cached": uint64(0), + "swap_free": uint64(524280), + "swap_total": uint64(524280), + "vmalloc_chunk": uint64(3872908), + "vmalloc_total": uint64(3874808), + "vmalloc_used": uint64(1416), + "write_back": uint64(0), + "write_back_tmp": uint64(0), } acc.AssertContainsTaggedFields(t, "mem", memfields, make(map[string]string)) From 3268937c4743ac4d977f74f757eca0f1a46c94b8 Mon Sep 17 00:00:00 2001 From: Greg Linton Date: Mon, 13 Aug 2018 17:47:09 -0600 Subject: [PATCH 0075/1815] Update changelog --- CHANGELOG.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index d53c2576b..461f4425d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,10 +13,12 @@ - [x509_cert](./plugins/inputs/x509_cert/README.md) - Contributed by @jtyr - [filecount](./plugins/inputs/filecount/README.md) - Contributed by @sometimesfood - [pgbouncer](./plugins/inputs/pgbouncer/README.md) - Contributed by @nerzhul +- [activemq](./plugins/inputs/activemq/README.md) - Contributed by @mlabouardy ### New Processors - [enum](./plugins/processors/enum/README.md) - Contributed by @KarstenSchnitter +- [rename](./plugins/processors/rename/README.md) - Contributed by @goldibex ### New Aggregators @@ -56,6 +58,11 @@ - [#4364](https://github.com/influxdata/telegraf/pull/4364): Support StatisticValues in cloudwatch output plugin. - [#4431](https://github.com/influxdata/telegraf/pull/4431): Add ip restriction for the prometheus_client output. - [#3918](https://github.com/influxdata/telegraf/pull/3918): Add pgbouncer input plugin. +- [#2689](https://github.com/influxdata/telegraf/pull/2689): Add ActiveMQ input plugin. +- [#4402](https://github.com/influxdata/telegraf/pull/4402): Add wavefront parser. +- [#4528](https://github.com/influxdata/telegraf/pull/4528): Add rename processor. +- [#4537](https://github.com/influxdata/telegraf/pull/4537): Add message 'max_bytes' configuration to kafka input. +- [#4546](https://github.com/influxdata/telegraf/pull/4546): Add gopsutil meminfo fields to mem plugin. ## v1.7.3 [2018-08-07] From 763dc6990c9a89cdd2278ef27f7933a3ac7288c3 Mon Sep 17 00:00:00 2001 From: dupondje Date: Tue, 14 Aug 2018 22:35:39 +0200 Subject: [PATCH 0076/1815] Fix powerdns input test (#4554) --- plugins/inputs/powerdns/powerdns_test.go | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/plugins/inputs/powerdns/powerdns_test.go b/plugins/inputs/powerdns/powerdns_test.go index 56666a886..fe64be5db 100644 --- a/plugins/inputs/powerdns/powerdns_test.go +++ b/plugins/inputs/powerdns/powerdns_test.go @@ -1,8 +1,6 @@ package powerdns import ( - "crypto/rand" - "encoding/binary" "fmt" "net" "testing" @@ -70,10 +68,9 @@ func (s statServer) serverSocket(l net.Listener) { } } -func TestMemcachedGeneratesMetrics(t *testing.T) { +func TestPowerdnsGeneratesMetrics(t *testing.T) { // We create a fake server to return test data - var randomNumber int64 - binary.Read(rand.Reader, binary.LittleEndian, &randomNumber) + randomNumber := int64(5239846799706671610) socket, err := net.Listen("unix", fmt.Sprintf("/tmp/pdns%d.controlsocket", randomNumber)) if err != nil { t.Fatal("Cannot initialize server on port ") From fa30f568ecd224ee64baf4399c22fe2aa9d5d532 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 14 Aug 2018 13:36:29 -0700 Subject: [PATCH 0077/1815] Skip unserializable metric in influxDB UDP output (#4534) --- plugins/outputs/influxdb/udp.go | 12 ++-- plugins/outputs/influxdb/udp_test.go | 88 +++++++++++++++-------- plugins/serializers/influx/influx.go | 73 ++++++++++--------- plugins/serializers/influx/influx_test.go | 15 ++-- plugins/serializers/influx/reader.go | 13 +--- 5 files changed, 119 insertions(+), 82 deletions(-) diff --git a/plugins/outputs/influxdb/udp.go b/plugins/outputs/influxdb/udp.go index 5b3f5ce51..62f2a6ab7 100644 --- a/plugins/outputs/influxdb/udp.go +++ b/plugins/outputs/influxdb/udp.go @@ -3,11 +3,11 @@ package influxdb import ( "context" "fmt" + "log" "net" "net/url" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/plugins/serializers" "github.com/influxdata/telegraf/plugins/serializers/influx" ) @@ -28,7 +28,7 @@ type Conn interface { type UDPConfig struct { MaxPayloadSize int URL *url.URL - Serializer serializers.Serializer + Serializer *influx.Serializer Dialer Dialer } @@ -65,7 +65,7 @@ func NewUDPClient(config *UDPConfig) (*udpClient, error) { type udpClient struct { conn Conn dialer Dialer - serializer serializers.Serializer + serializer *influx.Serializer url *url.URL } @@ -89,7 +89,11 @@ func (c *udpClient) Write(ctx context.Context, metrics []telegraf.Metric) error for _, metric := range metrics { octets, err := c.serializer.Serialize(metric) if err != nil { - return fmt.Errorf("could not serialize metric: %v", err) + // Since we are serializing multiple metrics, don't fail the + // entire batch just because of one unserializable metric. + log.Printf("E! [outputs.influxdb] when writing to [%s] could not serialize metric: %v", + c.URL(), err) + continue } _, err = c.conn.Write(octets) diff --git a/plugins/outputs/influxdb/udp_test.go b/plugins/outputs/influxdb/udp_test.go index 9bced4262..61b3f1ded 100644 --- a/plugins/outputs/influxdb/udp_test.go +++ b/plugins/outputs/influxdb/udp_test.go @@ -4,6 +4,7 @@ import ( "bytes" "context" "fmt" + "log" "net" "net/url" "sync" @@ -13,7 +14,6 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/plugins/outputs/influxdb" - "github.com/influxdata/telegraf/plugins/serializers/influx" "github.com/stretchr/testify/require" ) @@ -65,19 +65,6 @@ func (d *MockDialer) DialContext(ctx context.Context, network string, address st return d.DialContextF(network, address) } -type MockSerializer struct { - SerializeF func(metric telegraf.Metric) ([]byte, error) - SerializeBatchF func(metrics []telegraf.Metric) ([]byte, error) -} - -func (s *MockSerializer) Serialize(metric telegraf.Metric) ([]byte, error) { - return s.SerializeF(metric) -} - -func (s *MockSerializer) SerializeBatch(metrics []telegraf.Metric) ([]byte, error) { - return s.SerializeBatchF(metrics) -} - func TestUDP_NewUDPClientNoURL(t *testing.T) { config := &influxdb.UDPConfig{} _, err := influxdb.NewUDPClient(config) @@ -177,28 +164,69 @@ func TestUDP_WriteError(t *testing.T) { require.True(t, closed) } -func TestUDP_SerializeError(t *testing.T) { - config := &influxdb.UDPConfig{ - URL: getURL(), - Dialer: &MockDialer{ - DialContextF: func(network, address string) (influxdb.Conn, error) { - conn := &MockConn{} - return conn, nil +func TestUDP_ErrorLogging(t *testing.T) { + tests := []struct { + name string + config *influxdb.UDPConfig + metrics []telegraf.Metric + logContains string + }{ + { + name: "logs need more space", + config: &influxdb.UDPConfig{ + MaxPayloadSize: 1, + URL: getURL(), + Dialer: &MockDialer{ + DialContextF: func(network, address string) (influxdb.Conn, error) { + conn := &MockConn{} + return conn, nil + }, + }, }, + metrics: []telegraf.Metric{getMetric()}, + logContains: `could not serialize metric: "cpu": need more space`, }, - Serializer: &MockSerializer{ - SerializeF: func(metric telegraf.Metric) ([]byte, error) { - return nil, influx.ErrNeedMoreSpace + { + name: "logs series name", + config: &influxdb.UDPConfig{ + URL: getURL(), + Dialer: &MockDialer{ + DialContextF: func(network, address string) (influxdb.Conn, error) { + conn := &MockConn{} + return conn, nil + }, + }, }, + metrics: []telegraf.Metric{ + func() telegraf.Metric { + metric, _ := metric.New( + "cpu", + map[string]string{ + "host": "example.org", + }, + map[string]interface{}{}, + time.Unix(0, 0), + ) + return metric + }(), + }, + logContains: `could not serialize metric: "cpu,host=example.org": no serializable fields`, }, } - client, err := influxdb.NewUDPClient(config) - require.NoError(t, err) + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var b bytes.Buffer + log.SetOutput(&b) - ctx := context.Background() - err = client.Write(ctx, []telegraf.Metric{getMetric()}) - require.Error(t, err) - require.Contains(t, err.Error(), influx.ErrNeedMoreSpace.Error()) + client, err := influxdb.NewUDPClient(tt.config) + require.NoError(t, err) + + ctx := context.Background() + err = client.Write(ctx, tt.metrics) + require.NoError(t, err) + require.Contains(t, b.String(), tt.logContains) + }) + } } func TestUDP_WriteWithRealConn(t *testing.T) { diff --git a/plugins/serializers/influx/influx.go b/plugins/serializers/influx/influx.go index f052c9c93..2989e44e9 100644 --- a/plugins/serializers/influx/influx.go +++ b/plugins/serializers/influx/influx.go @@ -27,30 +27,34 @@ const ( UintSupport FieldTypeSupport = 1 << iota ) -// MetricError is an error causing a metric to be unserializable. +var ( + NeedMoreSpace = "need more space" + InvalidName = "invalid name" + NoFields = "no serializable fields" +) + +// MetricError is an error causing an entire metric to be unserializable. type MetricError struct { - s string + series string + reason string } func (e MetricError) Error() string { - return e.s + if e.series != "" { + return fmt.Sprintf("%q: %s", e.series, e.reason) + } + return e.reason } // FieldError is an error causing a field to be unserializable. type FieldError struct { - s string + reason string } func (e FieldError) Error() string { - return e.s + return e.reason } -var ( - ErrNeedMoreSpace = &MetricError{"need more space"} - ErrInvalidName = &MetricError{"invalid name"} - ErrNoFields = &MetricError{"no serializable fields"} -) - // Serializer is a serializer for line protocol. type Serializer struct { maxLineBytes int @@ -102,17 +106,20 @@ func (s *Serializer) Serialize(m telegraf.Metric) ([]byte, error) { return out, nil } +// SerializeBatch writes the slice of metrics and returns a byte slice of the +// results. The returned byte slice may contain multiple lines of data. func (s *Serializer) SerializeBatch(metrics []telegraf.Metric) ([]byte, error) { - var batch bytes.Buffer + s.buf.Reset() for _, m := range metrics { - _, err := s.Write(&batch, m) + _, err := s.Write(&s.buf, m) if err != nil { return nil, err } } - return batch.Bytes(), nil + out := make([]byte, s.buf.Len()) + copy(out, s.buf.Bytes()) + return out, nil } - func (s *Serializer) Write(w io.Writer, m telegraf.Metric) (int, error) { err := s.writeMetric(w, m) return s.bytesWritten, err @@ -135,7 +142,7 @@ func (s *Serializer) buildHeader(m telegraf.Metric) error { name := nameEscape(m.Name()) if name == "" { - return ErrInvalidName + return s.newMetricError(InvalidName) } s.header = append(s.header, name...) @@ -222,9 +229,10 @@ func (s *Serializer) writeMetric(w io.Writer, m telegraf.Metric) error { } if s.maxLineBytes > 0 && bytesNeeded > s.maxLineBytes { - // Need at least one field per line + // Need at least one field per line, this metric cannot be fit + // into the max line bytes. if firstField { - return ErrNeedMoreSpace + return s.newMetricError(NeedMoreSpace) } err = s.write(w, s.footer) @@ -232,21 +240,12 @@ func (s *Serializer) writeMetric(w io.Writer, m telegraf.Metric) error { return err } + firstField = true bytesNeeded = len(s.header) + len(s.pair) + len(s.footer) - if s.maxLineBytes > 0 && bytesNeeded > s.maxLineBytes { - return ErrNeedMoreSpace + if bytesNeeded > s.maxLineBytes { + return s.newMetricError(NeedMoreSpace) } - - err = s.write(w, s.header) - if err != nil { - return err - } - - s.write(w, s.pair) - pairsLen += len(s.pair) - firstField = false - continue } if firstField { @@ -261,18 +260,28 @@ func (s *Serializer) writeMetric(w io.Writer, m telegraf.Metric) error { } } - s.write(w, s.pair) + err = s.write(w, s.pair) + if err != nil { + return err + } pairsLen += len(s.pair) firstField = false } if firstField { - return ErrNoFields + return s.newMetricError(NoFields) } return s.write(w, s.footer) +} +func (s *Serializer) newMetricError(reason string) *MetricError { + if len(s.header) != 0 { + series := bytes.TrimRight(s.header, " ") + return &MetricError{series: string(series), reason: reason} + } + return &MetricError{reason: reason} } func (s *Serializer) appendFieldValue(buf []byte, value interface{}) ([]byte, error) { diff --git a/plugins/serializers/influx/influx_test.go b/plugins/serializers/influx/influx_test.go index 74bffe5e4..2c1cbd587 100644 --- a/plugins/serializers/influx/influx_test.go +++ b/plugins/serializers/influx/influx_test.go @@ -23,7 +23,7 @@ var tests = []struct { typeSupport FieldTypeSupport input telegraf.Metric output []byte - err error + errReason string }{ { name: "minimal", @@ -98,7 +98,7 @@ var tests = []struct { time.Unix(0, 0), ), ), - err: ErrNoFields, + errReason: NoFields, }, { name: "float Inf", @@ -333,8 +333,8 @@ var tests = []struct { time.Unix(1519194109, 42), ), ), - output: nil, - err: ErrNeedMoreSpace, + output: nil, + errReason: NeedMoreSpace, }, { name: "no fields", @@ -346,7 +346,7 @@ var tests = []struct { time.Unix(0, 0), ), ), - err: ErrNoFields, + errReason: NoFields, }, { name: "procstat", @@ -427,7 +427,10 @@ func TestSerializer(t *testing.T) { serializer.SetFieldSortOrder(SortFields) serializer.SetFieldTypeSupport(tt.typeSupport) output, err := serializer.Serialize(tt.input) - require.Equal(t, tt.err, err) + if tt.errReason != "" { + require.Error(t, err) + require.Contains(t, err.Error(), tt.errReason) + } require.Equal(t, string(tt.output), string(output)) }) } diff --git a/plugins/serializers/influx/reader.go b/plugins/serializers/influx/reader.go index 4a755c88d..d0dad8eeb 100644 --- a/plugins/serializers/influx/reader.go +++ b/plugins/serializers/influx/reader.go @@ -2,7 +2,6 @@ package influx import ( "bytes" - "fmt" "io" "log" @@ -54,17 +53,11 @@ func (r *reader) Read(p []byte) (int, error) { r.offset += 1 if err != nil { r.buf.Reset() - switch err.(type) { - case *MetricError: - // Since we are serializing an array of metrics, don't fail + if err != nil { + // Since we are serializing multiple metrics, don't fail the // the entire batch just because of one unserializable metric. - log.Printf( - "D! [serializers.influx] could not serialize metric %q: %v; discarding metric", - metric.Name(), err) + log.Printf("E! [serializers.influx] could not serialize metric: %v; discarding metric", err) continue - default: - fmt.Println(err) - return 0, err } } break From 027016aea2cd65254cdf11d70413fb74dbd763a0 Mon Sep 17 00:00:00 2001 From: Greg Linton Date: Tue, 14 Aug 2018 14:41:24 -0600 Subject: [PATCH 0078/1815] Update changelog --- CHANGELOG.md | 7 +++++++ docs/DATA_FORMATS_INPUT.md | 7 ++----- etc/telegraf.conf | 2 +- 3 files changed, 10 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 461f4425d..89bbfe90e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -64,6 +64,13 @@ - [#4537](https://github.com/influxdata/telegraf/pull/4537): Add message 'max_bytes' configuration to kafka input. - [#4546](https://github.com/influxdata/telegraf/pull/4546): Add gopsutil meminfo fields to mem plugin. +## v1.7.4 [unreleased] + +### Bugfixes + +- [#4534](https://github.com/influxdata/telegraf/pull/4534): Skip unserializable metric in influxDB UDP output. +- [#4554](https://github.com/influxdata/telegraf/pull/4554): Fix powerdns input tests. + ## v1.7.3 [2018-08-07] ### Bugfixes diff --git a/docs/DATA_FORMATS_INPUT.md b/docs/DATA_FORMATS_INPUT.md index 00ead6e38..753523843 100644 --- a/docs/DATA_FORMATS_INPUT.md +++ b/docs/DATA_FORMATS_INPUT.md @@ -10,7 +10,7 @@ Telegraf is able to parse the following input data formats into metrics: 1. [Collectd](#collectd) 1. [Dropwizard](#dropwizard) 1. [Grok](#grok) -1. [Wavefront](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#wavefront) +1. [Wavefront](#wavefront) Telegraf metrics, like InfluxDB [points](https://docs.influxdata.com/influxdb/v0.10/write_protocols/line/), @@ -661,7 +661,7 @@ For more information about the dropwizard json format see # tag2 = "tags.tag2" ``` -# Grok +# Grok: The grok data format parses line delimited data using a regular expression like language. @@ -881,9 +881,6 @@ the file output will only print once per `flush_interval`. - If successful, add the next token, update the pattern and retest. - Continue one token at a time until the entire line is successfully parsed. - -``` - # Wavefront: Wavefront Data Format is metrics are parsed directly into Telegraf metrics. diff --git a/etc/telegraf.conf b/etc/telegraf.conf index c3a84569d..9315aa457 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -1980,7 +1980,7 @@ # timeout = "20s" # ## Schema Version: (Optional, defaults to version 1) -# schemaVersion = 2 +# metric_version = 2 # # Gather packets and bytes counters from Linux ipsets From e50b0c17adb20adf7aaae116e2fd0688d7f6f2c8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adri=C3=A1n=20L=C3=B3pez?= Date: Wed, 15 Aug 2018 04:53:25 +0800 Subject: [PATCH 0079/1815] Document how to parse telegraf logs (#4285) --- plugins/inputs/logparser/README.md | 42 ++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) diff --git a/plugins/inputs/logparser/README.md b/plugins/inputs/logparser/README.md index d35a94a70..27cbc3cf4 100644 --- a/plugins/inputs/logparser/README.md +++ b/plugins/inputs/logparser/README.md @@ -220,6 +220,48 @@ A multi-line literal string allows us to encode the pattern: custom_patterns = 'UNICODE_ESCAPE (?:\\u[0-9A-F]{4})+' ``` +#### Parsing Telegraf log file +We can use logparser to convert the log lines generated by Telegraf in metrics. + +To do this we need to configure Telegraf to write logs to a file. +This could be done using the ``agent.logfile`` parameter or configuring syslog. +```toml +[agent] + logfile = "/var/log/telegraf/telegraf.log" +``` + +Logparser configuration: +```toml +[[inputs.logparser]] + files = ["/var/log/telegraf/telegraf.log"] + + [inputs.logparser.grok] + measurement = "telegraf_log" + patterns = ['^%{TIMESTAMP_ISO8601:timestamp:ts-rfc3339} %{TELEGRAF_LOG_LEVEL:level:tag}! %{GREEDYDATA:msg}'] + custom_patterns = ''' +TELEGRAF_LOG_LEVEL (?:[DIWE]+) +''' +``` + +Example log lines: +``` +2018-06-14T06:41:35Z I! Starting Telegraf v1.6.4 +2018-06-14T06:41:35Z I! Agent Config: Interval:3s, Quiet:false, Hostname:"archer", Flush Interval:3s +2018-02-20T22:39:20Z E! Error in plugin [inputs.docker]: took longer to collect than collection interval (10s) +2018-06-01T10:34:05Z W! Skipping a scheduled flush because there is already a flush ongoing. +2018-06-14T07:33:33Z D! Output [file] buffer fullness: 0 / 10000 metrics. +``` + +Generated metrics: +``` +telegraf_log,host=somehostname,level=I msg="Starting Telegraf v1.6.4" 1528958495000000000 +telegraf_log,host=somehostname,level=I msg="Agent Config: Interval:3s, Quiet:false, Hostname:\"somehostname\", Flush Interval:3s" 1528958495001000000 +telegraf_log,host=somehostname,level=E msg="Error in plugin [inputs.docker]: took longer to collect than collection interval (10s)" 1519166360000000000 +telegraf_log,host=somehostname,level=W msg="Skipping a scheduled flush because there is already a flush ongoing." 1527849245000000000 +telegraf_log,host=somehostname,level=D msg="Output [file] buffer fullness: 0 / 10000 metrics." 1528961613000000000 +``` + + ### Tips for creating patterns Writing complex patterns can be difficult, here is some advice for writing a From b29dd260b2f23d41b75c1e92c1bc6db3defd3c09 Mon Sep 17 00:00:00 2001 From: Greg Linton Date: Tue, 14 Aug 2018 14:54:19 -0600 Subject: [PATCH 0080/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 89bbfe90e..520a4fb43 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -63,6 +63,7 @@ - [#4528](https://github.com/influxdata/telegraf/pull/4528): Add rename processor. - [#4537](https://github.com/influxdata/telegraf/pull/4537): Add message 'max_bytes' configuration to kafka input. - [#4546](https://github.com/influxdata/telegraf/pull/4546): Add gopsutil meminfo fields to mem plugin. +- [#4285](https://github.com/influxdata/telegraf/pull/4285): Document how to parse telegraf logs. ## v1.7.4 [unreleased] From 6e92df45e74acf563760a004e80eff5a4a6b80fa Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 14 Aug 2018 14:55:38 -0700 Subject: [PATCH 0081/1815] Use dep v0.5.0 (#4542) --- Gopkg.lock | 265 +++++++++++++++++++++++++++-------------- README.md | 2 +- appveyor.yml | 4 +- scripts/ci-1.10.docker | 4 +- scripts/ci-1.9.docker | 4 +- 5 files changed, 181 insertions(+), 98 deletions(-) diff --git a/Gopkg.lock b/Gopkg.lock index ce842f3e1..ef76419ba 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -3,14 +3,14 @@ [[projects]] branch = "master" - digest = "1:d7582b4af1b0b953ff2bb9573a50f787c7e1669cb148fb086a3d1c670a1ac955" + digest = "1:fc0802104acded1f48e4860a9f2db85b82b4a754fca9eae750ff4e8b8cdf2116" name = "code.cloudfoundry.org/clock" packages = ["."] pruneopts = "" revision = "02e53af36e6c978af692887ed449b74026d76fec" [[projects]] - digest = "1:ce7dc0f1ffcd9a2aacc50ae6d322eebff8f4faa2d6c5f445c874cd0b77a63de7" + digest = "1:ca3acef20fd660d4df327accbf3ca2df9a12213d914f3113305dcd56579324b9" name = "collectd.org" packages = [ "api", @@ -23,7 +23,7 @@ [[projects]] branch = "master" - digest = "1:c1269bfaddefd090935401c291ad5df6c03de605a440e941ecc568e19f0f9e3b" + digest = "1:298712a3ee36b59c3ca91f4183bd75d174d5eaa8b4aed5072831f126e2e752f6" name = "github.com/Microsoft/ApplicationInsights-Go" packages = [ "appinsights", @@ -33,7 +33,7 @@ revision = "d2df5d440eda5372f24fcac03839a64d6cb5f7e5" [[projects]] - digest = "1:ec6a42cd98d70f0916216d8f6df8ca61145edeaad041014aa9c003068de7364c" + digest = "1:45ec6eb579713a01991ad07f538fed3b576ee55f5ce9f248320152a9270d9258" name = "github.com/Microsoft/go-winio" packages = ["."] pruneopts = "" @@ -41,7 +41,7 @@ version = "v0.4.9" [[projects]] - digest = "1:14af5ba5ac88efec490fb59734df34e1bd973198caefa7b0cceed0900ef6164c" + digest = "1:9362b2212139b7821f73a86169bf80ce6b0264956f87d82ab3aeedb2b5c08fea" name = "github.com/Shopify/sarama" packages = ["."] pruneopts = "" @@ -57,7 +57,7 @@ version = "1.0.0" [[projects]] - digest = "1:855af787df6b733016849082d66ffda5e0e00856513fcac08a7cf199a23515c2" + digest = "1:f296e8b29c60c94efed3b8cfae08d793cb95149cdd7343e6a9834b4ac7136475" name = "github.com/aerospike/aerospike-client-go" packages = [ ".", @@ -78,7 +78,7 @@ [[projects]] branch = "master" - digest = "1:1399282ad03ac819f0e8a747c888407c5c98bb497d33821a7047c7bae667ede0" + digest = "1:a74730e052a45a3fab1d310fdef2ec17ae3d6af16228421e238320846f2aaec8" name = "github.com/alecthomas/template" packages = [ ".", @@ -97,7 +97,7 @@ [[projects]] branch = "master" - digest = "1:072692f8d76356228f31f64ca3140041a140011c7dea26e746206e8649c71b31" + digest = "1:7f21a8f175ee7f91c659f919c61032e11889fba5dc25c0cec555087cbb87435a" name = "github.com/amir/raidman" packages = [ ".", @@ -108,14 +108,14 @@ [[projects]] branch = "master" - digest = "1:83a67d925714169fa5121021abef0276605c6e4d51c467dd1f0c04344abad1ff" + digest = "1:0828d8c0f95689f832cf348fe23827feb7640cd698d612ef59e2f9d041f54c68" name = "github.com/apache/thrift" packages = ["lib/go/thrift"] pruneopts = "" revision = "f2867c24984aa53edec54a138c03db934221bdea" [[projects]] - digest = "1:ca172b51bfe0a1ae7725dc782339fed4ba697dcd44e29a0a1c765fffdbf05ddc" + digest = "1:65a05bde9b02f645c73afa61c9f6af92d94d726c81a268f45cc70218bd58de65" name = "github.com/aws/aws-sdk-go" packages = [ "aws", @@ -156,14 +156,14 @@ [[projects]] branch = "master" - digest = "1:fca298802a2ab834d6eb0e284788ae037ebc324c0f325ff92c5eea592d189cc5" + digest = "1:c0bec5f9b98d0bc872ff5e834fac186b807b656683bd29cb82fb207a1513fabb" name = "github.com/beorn7/perks" packages = ["quantile"] pruneopts = "" revision = "3a771d992973f24aa725d07868b467d1ddfceafb" [[projects]] - digest = "1:0edb96edcfeee9aeba92e605536fbb1542b0bf6a10cea9d0b5a2227d5a703eae" + digest = "1:c5978131c797af795972c27c25396c81d1bf53b7b6e8e3e0259e58375765c071" name = "github.com/bsm/sarama-cluster" packages = ["."] pruneopts = "" @@ -180,7 +180,7 @@ [[projects]] branch = "master" - digest = "1:65ae2d1625584ba8d16d1e15b25db1fc62334e2040f22dbbbdc7531c909843b2" + digest = "1:298e42868718da06fc0899ae8fdb99c48a14477045234c9274d81caa79af6a8f" name = "github.com/couchbase/go-couchbase" packages = ["."] pruneopts = "" @@ -188,7 +188,7 @@ [[projects]] branch = "master" - digest = "1:5db54de7054c072f47806c91ef7625ffa00489ca2da5fbc6ca1c78e08018f6bf" + digest = "1:c734658274a6be88870a36742fdea96a3fce4fc99a7b90946c9e84335ceae71a" name = "github.com/couchbase/gomemcached" packages = [ ".", @@ -199,7 +199,7 @@ [[projects]] branch = "master" - digest = "1:0deaa0f28c823119725c8308703f019797bc077e251d1ed3f2b8eae2cc7791d7" + digest = "1:c1195c02bc8fbf5307cfb95bc79eddaa1351ee3587cc4a7bbe6932e2fb966ff2" name = "github.com/couchbase/goutils" packages = [ "logging", @@ -209,15 +209,7 @@ revision = "e865a1461c8ac0032bd37e2d4dab3289faea3873" [[projects]] - branch = "master" - digest = "1:4c015b7445aa37becc220fde9bdbc4d4329f75af72ca1c98f9b0bd698d6068cb" - name = "github.com/crewjam/rfc5424" - packages = ["."] - pruneopts = "" - revision = "6ae4b209c3f0d5071494be6b883a1970acadda94" - -[[projects]] - digest = "1:0a39ec8bf5629610a4bc7873a92039ee509246da3cef1a0ea60f1ed7e5f9cea5" + digest = "1:56c130d885a4aacae1dd9c7b71cfe39912c7ebc1ff7d2b46083c8812996dc43b" name = "github.com/davecgh/go-spew" packages = ["spew"] pruneopts = "" @@ -225,7 +217,7 @@ version = "v1.1.0" [[projects]] - digest = "1:2426da75f49e5b8507a6ed5d4c49b06b2ff795f4aec401c106b7db8fb2625cd7" + digest = "1:6098222470fe0172157ce9bbef5d2200df4edde17ee649c5d6e48330e4afa4c6" name = "github.com/dgrijalva/jwt-go" packages = ["."] pruneopts = "" @@ -233,7 +225,7 @@ version = "v3.2.0" [[projects]] - digest = "1:68df19ee476d93359596377b7437bbe49d233fe014becd060ded757aeed531cd" + digest = "1:522eff2a1f014a64fb403db60fc0110653e4dc5b59779894d208e697b0708ddc" name = "github.com/docker/distribution" packages = [ "digestset", @@ -243,7 +235,7 @@ revision = "edc3ab29cdff8694dd6feb85cfeb4b5f1b38ed9c" [[projects]] - digest = "1:a21509491bfd5bd1f99abe1d38430fddd16c8c8dc0092f954e224b93ad87f06b" + digest = "1:d149605f1b00713fdc48150122892d77d49d30c825f690dd92f497aeb6cf18f5" name = "github.com/docker/docker" packages = [ "api", @@ -268,7 +260,7 @@ revision = "ed7b6428c133e7c59404251a09b7d6b02fa83cc2" [[projects]] - digest = "1:5b20afc76a36d3994194e2612e83b51bc2b12db3d4d2a722b24474b2d0e3a890" + digest = "1:a5ecc2e70260a87aa263811281465a5effcfae8a54bac319cee87c4625f04d63" name = "github.com/docker/go-connections" packages = [ "nat", @@ -288,7 +280,7 @@ version = "v0.3.3" [[projects]] - digest = "1:7bbb118aeef9a6b9fef3d57b6cc5378f7cd6e915cabf4dea695e318e1a1bd4e6" + digest = "1:6d6672f85a84411509885eaa32f597577873de00e30729b9bb0eb1e1faa49c12" name = "github.com/eapache/go-resiliency" packages = ["breaker"] pruneopts = "" @@ -297,7 +289,7 @@ [[projects]] branch = "master" - digest = "1:7b28f7f7c9fb914b30dff111fb910d49bd61d275101f665aea79409bb3ba2ae2" + digest = "1:7b12ea8b50040c6c2378ec5b5a1ab722730b2bfb46e8724ded57f2c3905431fa" name = "github.com/eapache/go-xerial-snappy" packages = ["."] pruneopts = "" @@ -312,7 +304,7 @@ version = "v1.1.0" [[projects]] - digest = "1:d2e2aebcb8e8027345e16f9d0be8cdee3bb470ba406c7a54cb7457ae3ad4ace5" + digest = "1:3fa846cb3feb4e65371fe3c347c299de9b5bc3e71e256c0d940cd19b767a6ba0" name = "github.com/eclipse/paho.mqtt.golang" packages = [ ".", @@ -323,7 +315,7 @@ version = "v1.1.1" [[projects]] - digest = "1:d19c78214e03e297e9e30d2eb11892f731358b2951f2a5c7374658a156373e4c" + digest = "1:858b7fe7b0f4bc7ef9953926828f2816ea52d01a88d72d1c45bc8c108f23c356" name = "github.com/go-ini/ini" packages = ["."] pruneopts = "" @@ -339,7 +331,7 @@ version = "v0.3.0" [[projects]] - digest = "1:c3a5ae14424a38c244439732c31a08b5f956c46c4acdc159fc285a52dbf11de0" + digest = "1:96c4a6ff4206086347bfe28e96e092642882128f45ecb8dc8f15f3e6f6703af0" name = "github.com/go-ole/go-ole" packages = [ ".", @@ -350,7 +342,7 @@ version = "v1.2.1" [[projects]] - digest = "1:f2f6a616a1ca8aed667d956c98f7f6178efe72bbe0a419bd33b9d99841c7de69" + digest = "1:3dfd659219b6f63dc0677a62b8d4e8f10b5cf53900aef40858db10a19407e41d" name = "github.com/go-redis/redis" packages = [ ".", @@ -367,7 +359,7 @@ version = "v6.12.0" [[projects]] - digest = "1:dc876ae7727280d95f97af5320308131278b93d6c6f5cf953065e18cb8c88fd2" + digest = "1:c07de423ca37dc2765396d6971599ab652a339538084b9b58c9f7fc533b28525" name = "github.com/go-sql-driver/mysql" packages = ["."] pruneopts = "" @@ -375,7 +367,7 @@ version = "v1.4.0" [[projects]] - digest = "1:b7a7e17513aeee6492d93015c7bf29c86a0c1c91210ea56b21e36c1a40958cba" + digest = "1:9ab1b1c637d7c8f49e39d8538a650d7eb2137b076790cff69d160823b505964c" name = "github.com/gobwas/glob" packages = [ ".", @@ -392,7 +384,7 @@ version = "v0.2.3" [[projects]] - digest = "1:673df1d02ca0c6f51458fe94bbb6fae0b05e54084a31db2288f1c4321255c2da" + digest = "1:6e73003ecd35f4487a5e88270d3ca0a81bc80dc88053ac7e4dcfec5fba30d918" name = "github.com/gogo/protobuf" packages = ["proto"] pruneopts = "" @@ -400,7 +392,7 @@ version = "v1.1.1" [[projects]] - digest = "1:b1d3041d568e065ab4d76f7477844458e9209c0bb241eaccdc0770bf0a13b120" + digest = "1:f958a1c137db276e52f0b50efee41a1a389dcdded59a69711f3e872757dab34b" name = "github.com/golang/protobuf" packages = [ "proto", @@ -415,14 +407,14 @@ [[projects]] branch = "master" - digest = "1:075128b9fc42e6d99067da1a2e6c0a634a6043b5a60abe6909c51f5ecad37b6d" + digest = "1:2a5888946cdbc8aa360fd43301f9fc7869d663f60d5eedae7d4e6e5e4f06f2bf" name = "github.com/golang/snappy" packages = ["."] pruneopts = "" revision = "2e65f85255dbc3072edf28d6b5b8efc472979f5a" [[projects]] - digest = "1:cc082d7b9cc3f832f2aed9d06d1cbb33b6984a61d8ec403535b086415c181607" + digest = "1:f9f45f75f332e03fc7e9fe9188ea4e1ce4d14779ef34fa1b023da67518e36327" name = "github.com/google/go-cmp" packages = [ "cmp", @@ -459,7 +451,7 @@ revision = "e80d13ce29ede4452c43dea11e79b9bc8a15b478" [[projects]] - digest = "1:db58383b43f583c44fb47c3331de943a11bb73ea951c2def55d29a454a57f4ee" + digest = "1:e7224669901bab4094e6d6697c136557b7177db6ceb01b7fc8b20d08f4b5aacd" name = "github.com/hashicorp/consul" packages = ["api"] pruneopts = "" @@ -476,14 +468,14 @@ [[projects]] branch = "master" - digest = "1:cd5813053beac0114f96a7da3924fc8a15e0cd2b139f079e0fcce5d3244ae304" + digest = "1:ff65bf6fc4d1116f94ac305342725c21b55c16819c2606adc8f527755716937f" name = "github.com/hashicorp/go-rootcerts" packages = ["."] pruneopts = "" revision = "6bb64b370b90e7ef1fa532be9e591a81c3493e00" [[projects]] - digest = "1:d2b2cff454cb23a9769ef3c9075741f5985773a998584b3b3ce203fe4b1abbea" + digest = "1:f72168ea995f398bab88e84bd1ff58a983466ba162fb8d50d47420666cd57fad" name = "github.com/hashicorp/serf" packages = ["coordinate"] pruneopts = "" @@ -491,7 +483,7 @@ version = "v0.8.1" [[projects]] - digest = "1:cc0cf2e12280074e5c6dc0f15a4bb3d6c43509e6091cdcdcc83eea491577257b" + digest = "1:a39ef049cdeee03a57b132e7d60e32711b9d949c78458da78e702d9864c54369" name = "github.com/influxdata/go-syslog" packages = [ "rfc5424", @@ -503,7 +495,7 @@ [[projects]] branch = "master" - digest = "1:effc58ad45323ad15159bbca533be4870eaddb2d9a513d3488d8bfe822c83532" + digest = "1:bc3eb5ddfd59781ea1183f2b3d1eb105a1495d421f09b2ccd360c7fced0b612d" name = "github.com/influxdata/tail" packages = [ ".", @@ -517,7 +509,7 @@ [[projects]] branch = "master" - digest = "1:d31edcf33a3b36218de96e43f3fec18ea96deb2a28b838a3a01a4df856ded345" + digest = "1:7fb6cc9607eaa6ef309edebc42b57f704244bd4b9ab23bff128829c4ad09b95d" name = "github.com/influxdata/toml" packages = [ ".", @@ -535,7 +527,7 @@ revision = "7c63b0a71ef8300adc255344d275e10e5c3a71ec" [[projects]] - digest = "1:4197871f269749786aa2406557dba15f10cf79161cdc3998180614c62c8b6351" + digest = "1:2de1791b9e43f26c696e36950e42676565e7da7499a870bc02213da4b59b1d14" name = "github.com/jackc/pgx" packages = [ ".", @@ -551,7 +543,7 @@ version = "v3.1.0" [[projects]] - digest = "1:4f767a115bc8e08576f6d38ab73c376fc1b1cd3bb5041171c9e8668cc7739b52" + digest = "1:6f49eae0c1e5dab1dafafee34b207aeb7a42303105960944828c2079b92fc88e" name = "github.com/jmespath/go-jmespath" packages = ["."] pruneopts = "" @@ -567,7 +559,7 @@ [[projects]] branch = "master" - digest = "1:2df59f23f11c5c59982f737c98c5523b276bfc85a4773a04b411190402bb30fd" + digest = "1:fed90fa725d3b1bac0a760de64426834dfef4546474cf182f2ec94285afa74a8" name = "github.com/kardianos/service" packages = ["."] pruneopts = "" @@ -591,7 +583,7 @@ [[projects]] branch = "master" - digest = "1:28ca57775f285ae87cbdc7280aad91c5f2ed3c2af98d9f035d75956d1ca97fe6" + digest = "1:7e9956922e349af0190afa0b6621befcd201072679d8e51a9047ff149f2afe93" name = "github.com/mailru/easyjson" packages = [ ".", @@ -603,7 +595,7 @@ revision = "efc7eb8984d6655c26b5c9d2e65c024e5767c37c" [[projects]] - digest = "1:49a8b01a6cd6558d504b65608214ca40a78000e1b343ed0da5c6a9ccd83d6d30" + digest = "1:63722a4b1e1717be7b98fc686e0b30d5e7f734b9e93d7dee86293b6deab7ea28" name = "github.com/matttproud/golang_protobuf_extensions" packages = ["pbutil"] pruneopts = "" @@ -611,7 +603,7 @@ version = "v1.0.1" [[projects]] - digest = "1:f0bad0fece0fb73c6ea249c18d8e80ffbe86be0457715b04463068f04686cf39" + digest = "1:4c8d8358c45ba11ab7bb15df749d4df8664ff1582daead28bae58cf8cbe49890" name = "github.com/miekg/dns" packages = ["."] pruneopts = "" @@ -651,7 +643,7 @@ version = "v0.1.0" [[projects]] - digest = "1:e5894541d6ceec5dd283e24e3530aadf59c06449695d19189a7a27bb4c15840d" + digest = "1:e5ec850ce66beb0014fc40d8e64b7482172eee71d86d734d66def5e9eac16797" name = "github.com/nats-io/gnatsd" packages = [ "conf", @@ -665,7 +657,7 @@ version = "v1.2.0" [[projects]] - digest = "1:88f1bde4c172e27b05ed46adfbd0e79dc1663a6281e4b39fa3e39d71ead9621d" + digest = "1:665af347df4c5d1ae4c3eacd0754f5337a301f6a3f2444c9993b996605c8c02b" name = "github.com/nats-io/go-nats" packages = [ ".", @@ -685,7 +677,7 @@ version = "v1.0.0" [[projects]] - digest = "1:501cce26a54c785458b0dd54a08ddd984d4ad0c198255430d5d37cd2efe23149" + digest = "1:7a69f6a3a33929f8b66aa39c93868ad1698f06417fe627ae067559beb94504bd" name = "github.com/nsqio/go-nsq" packages = ["."] pruneopts = "" @@ -701,7 +693,7 @@ version = "v1.0.0-rc1" [[projects]] - digest = "1:0d08f7224705b1df80beee92ffbdc63ab13fd6f6eb80bf287735f9bc7e8b83eb" + digest = "1:f26c8670b11e29a49c8e45f7ec7f2d5bac62e8fd4e3c0ae1662baa4a697f984a" name = "github.com/opencontainers/image-spec" packages = [ "specs-go", @@ -720,7 +712,7 @@ revision = "a52f2342449246d5bcc273e65cbdcfa5f7d6c63c" [[projects]] - digest = "1:bba12aa4747b212f75db3e7fee73fe1b66d303cb3ff0c1984b7f2ad20e8bd2bc" + digest = "1:78fb99d6011c2ae6c72f3293a83951311147b12b06a5ffa43abf750c4fab6ac5" name = "github.com/opentracing/opentracing-go" packages = [ ".", @@ -732,7 +724,7 @@ version = "v1.0.2" [[projects]] - digest = "1:c6c0db6294924072f98a0de090d200bae4b7102b12a443ba9569c4ba7df52aa1" + digest = "1:fea0e67285d900e5a0a7ec19ff4b4c82865a28dddbee8454c5360ad908f7069c" name = "github.com/openzipkin/zipkin-go-opentracing" packages = [ ".", @@ -747,7 +739,7 @@ version = "v0.3.4" [[projects]] - digest = "1:41de12a4684237dd55a11260c941c2c58a055951985e9473ba1661175a13fea7" + digest = "1:29e34e58f26655c4d73135cdfc0517ea2ff1483eff34e5d5ef4b6fddbb81e31b" name = "github.com/pierrec/lz4" packages = [ ".", @@ -774,7 +766,7 @@ version = "v1.0.0" [[projects]] - digest = "1:981835985f655d1d380cc6aa7d9fa9ad7abfaf40c75da200fd40d864cd05a7c3" + digest = "1:4142d94383572e74b42352273652c62afec5b23f325222ed09198f46009022d1" name = "github.com/prometheus/client_golang" packages = [ "prometheus", @@ -786,7 +778,7 @@ [[projects]] branch = "master" - digest = "1:562d53e436b244a9bb5c1ff43bcaf4882e007575d34ec37717b15751c65cc63a" + digest = "1:185cf55b1f44a1bf243558901c3f06efa5c64ba62cfdcbb1bf7bbe8c3fb68561" name = "github.com/prometheus/client_model" packages = ["go"] pruneopts = "" @@ -794,7 +786,7 @@ [[projects]] branch = "master" - digest = "1:6a8420870eb2935977da1fff0f3afca9bdb3f1e66258c9e91a8a7ce0b5417c3b" + digest = "1:bfbc121ef802d245ef67421cff206615357d9202337a3d492b8f668906b485a8" name = "github.com/prometheus/common" packages = [ "expfmt", @@ -807,7 +799,7 @@ [[projects]] branch = "master" - digest = "1:00fca823dfcdd8107226f67215afd948b001525223ed955a05b33a4c885c9591" + digest = "1:b694a6bdecdace488f507cff872b30f6f490fdaf988abd74d87ea56406b23b6e" name = "github.com/prometheus/procfs" packages = [ ".", @@ -820,7 +812,7 @@ [[projects]] branch = "master" - digest = "1:1b65925989a4dfb6d98ef1d530cda33ab1ff25945b14a22a8b8bb27cc282af70" + digest = "1:15bcdc717654ef21128e8af3a63eec39a6d08a830e297f93d65163f87c8eb523" name = "github.com/rcrowley/go-metrics" packages = ["."] pruneopts = "" @@ -828,7 +820,7 @@ [[projects]] branch = "master" - digest = "1:d8fe9f454582e04b5693b59cdebe3f0bd9dc29ad9651bfb1633cba4658b66c65" + digest = "1:7fc2f428767a2521abc63f1a663d981f61610524275d6c0ea645defadd4e916f" name = "github.com/samuel/go-zookeeper" packages = ["zk"] pruneopts = "" @@ -843,7 +835,7 @@ version = "v1.2.0" [[projects]] - digest = "1:987ce58e999676c2e209831390f2d56621ff98def2ecca4928e73fe1e2569954" + digest = "1:fce9909f20bc6a6363a6d589e478bdcf8111044b41566d37d7552bf92d955540" name = "github.com/shirou/gopsutil" packages = [ "cpu", @@ -868,7 +860,7 @@ revision = "bb4de0191aa41b5507caa14b0650cdbddcd9280b" [[projects]] - digest = "1:f2cc92b78b2f3b76ab0f9daddddd28627bcfcc6cacf119029aa3850082d95079" + digest = "1:8cf46b6c18a91068d446e26b67512cf16f1540b45d90b28b9533706a127f0ca6" name = "github.com/sirupsen/logrus" packages = ["."] pruneopts = "" @@ -877,7 +869,7 @@ [[projects]] branch = "master" - digest = "1:79e73b87cb07e380d1a3aaa14fbcc418e0d42eede5f971e7ee2f4a6e6d531deb" + digest = "1:4b0cabe65ca903a7b2a3e6272c5304eb788ce196d35ecb901c6563e5e7582443" name = "github.com/soniah/gosnmp" packages = ["."] pruneopts = "" @@ -885,14 +877,14 @@ [[projects]] branch = "master" - digest = "1:0a1f8d01a0191f558910bcbfd7e1dc11a53ac374473d13b68b8fe520f21efb07" + digest = "1:4e8f1cae8e6d83af9000d82566efb8823907dae77ba4f1d76ff28fdd197c3c90" name = "github.com/streadway/amqp" packages = ["."] pruneopts = "" revision = "e5adc2ada8b8efff032bf61173a233d143e9318e" [[projects]] - digest = "1:34062a2274daa6ec4d2f50d6070cc51cf4674d6d553ed76b406cb3425b9528e8" + digest = "1:711eebe744c0151a9d09af2315f0bb729b2ec7637ef4c410fa90a18ef74b65b6" name = "github.com/stretchr/objx" packages = ["."] pruneopts = "" @@ -900,7 +892,7 @@ version = "v0.1.1" [[projects]] - digest = "1:bc2a12c8863e1080226b7bc69192efd6c37aaa9b85cec508b0a8f54fabb9bd9f" + digest = "1:c587772fb8ad29ad4db67575dad25ba17a51f072ff18a22b4f0257a4d9c24f75" name = "github.com/stretchr/testify" packages = [ "assert", @@ -928,7 +920,7 @@ revision = "1731857f09b1f38450e2c12409748407822dc6be" [[projects]] - digest = "1:23e2b9f3a20cd4a6427147377255ec2f6237e8606fa6ef0707ed79b7bfbe3a83" + digest = "1:343f20460c11a0d0529fe532553bfef9446918d1a1fda6d8661eb27d5b1a68b8" name = "github.com/vjeantet/grok" packages = ["."] pruneopts = "" @@ -937,7 +929,7 @@ [[projects]] branch = "master" - digest = "1:5383edd40c7f6c95a7dc46a47bf0c83de4bf40a4252f12fa803f790037addffc" + digest = "1:98ed05e9796df287b90c1d96854e3913c8e349dbc546412d3cabb472ecf4b417" name = "github.com/wvanbergen/kafka" packages = ["consumergroup"] pruneopts = "" @@ -945,7 +937,7 @@ [[projects]] branch = "master" - digest = "1:f936b4936e1b092cc41c9b33fdc990ad78386545f1ffeca8427c72b2605bca85" + digest = "1:12aff3cc417907bf9f683a6bf1dc78ffb08e41bc69f829491e593ea9b951a3cf" name = "github.com/wvanbergen/kazoo-go" packages = ["."] pruneopts = "" @@ -953,7 +945,7 @@ [[projects]] branch = "master" - digest = "1:9946d558a909f63e31332c77b82649522da97ae7f7cfbfebc6f53549ab6b3e0f" + digest = "1:c5918689b7e187382cc1066bf0260de54ba9d1b323105f46ed2551d2fb4a17c7" name = "github.com/yuin/gopher-lua" packages = [ ".", @@ -966,7 +958,7 @@ [[projects]] branch = "master" - digest = "1:84e9087a94f336c204887281046891769d2ed7bf1d2b31c21ff6fb5e1743abce" + digest = "1:2fcfc6c3fb8dfe0d80d7789272230d3ac7db15022b66817113f98d9fff880225" name = "github.com/zensqlmonitor/go-mssqldb" packages = ["."] pruneopts = "" @@ -974,7 +966,7 @@ [[projects]] branch = "master" - digest = "1:21100b2e8b6922303dd109da81b3134ed0eff05cb3402881eabde9cce8f4e5e6" + digest = "1:0773b5c3be42874166670a20aa177872edb450cd9fc70b1df97303d977702a50" name = "golang.org/x/crypto" packages = [ "bcrypt", @@ -990,7 +982,7 @@ [[projects]] branch = "master" - digest = "1:58d8f8f3ad415b10d2145316519e5b7995b7cf9e663b33a1e9e0c2ddd96c1d58" + digest = "1:00ff990baae4665bb0a8174af5ff78228574227ed96c89671247a56852a50e21" name = "golang.org/x/net" packages = [ "bpf", @@ -1018,7 +1010,7 @@ [[projects]] branch = "master" - digest = "1:a8944db88149e7ecbea4b760c625b9ccf455fceae21387bc8890c3589d28b623" + digest = "1:677e38cad6833ad266ec843739d167755eda1e6f2d8af1c63102b0426ad820db" name = "golang.org/x/sys" packages = [ "unix", @@ -1033,7 +1025,7 @@ revision = "ac767d655b305d4e9612f5f6e33120b9176c4ad4" [[projects]] - digest = "1:af9bfca4298ef7502c52b1459df274eed401a4f5498b900e9a92d28d3d87ac5a" + digest = "1:5acd3512b047305d49e8763eef7ba423901e85d5dd2fd1e71778a0ea8de10bd4" name = "golang.org/x/text" packages = [ "collate", @@ -1068,7 +1060,7 @@ version = "v0.3.0" [[projects]] - digest = "1:eede11c81b63c8f6fd06ef24ba0a640dc077196ec9b7a58ecde03c82eee2f151" + digest = "1:c1771ca6060335f9768dff6558108bc5ef6c58506821ad43377ee23ff059e472" name = "google.golang.org/appengine" packages = ["cloudsql"] pruneopts = "" @@ -1077,14 +1069,14 @@ [[projects]] branch = "master" - digest = "1:8d093c040b734e160cbe8291c7b539c36d2c6dd4581c4bb37cff56078c65bd07" + digest = "1:b1443b4e3cc990c84d27fcdece9d3302158c67dba870e33a6937a2c0076388c2" name = "google.golang.org/genproto" packages = ["googleapis/rpc/status"] pruneopts = "" revision = "fedd2861243fd1a8152376292b921b394c7bef7e" [[projects]] - digest = "1:05f2028524c4eada11e3f46d23139f23e9e0a40b2552207a5af278e8063ce782" + digest = "1:5f31b45ee9da7a87f140bef3ed0a7ca34ea2a6d38eb888123b8e28170e8aa4f2" name = "google.golang.org/grpc" packages = [ ".", @@ -1118,7 +1110,7 @@ version = "v1.13.0" [[projects]] - digest = "1:2840683aa0e9980689f85bf48b2a56ec7a108fd089f12af8ea7d98c172819589" + digest = "1:15d017551627c8bb091bde628215b2861bed128855343fdd570c62d08871f6e1" name = "gopkg.in/alecthomas/kingpin.v2" packages = ["."] pruneopts = "" @@ -1126,7 +1118,7 @@ version = "v2.2.6" [[projects]] - digest = "1:a8f8c1725195c4324d4350fae001524ca7489e40d9b6bb47598772e3faa103ba" + digest = "1:3cad99e0d1f94b8c162787c12e59d0a0b9df1ef75590eb145cdd625479091efe" name = "gopkg.in/asn1-ber.v1" packages = ["."] pruneopts = "" @@ -1142,7 +1134,7 @@ version = "v2.0.0" [[projects]] - digest = "1:b2106f1668ea5efc1ecc480f7e922a093adb9563fd9ce58585292871f0d0f229" + digest = "1:eb53021a8aa3f599d29c7102e65026242bdedce998a54837dc67f14b6a97c5fd" name = "gopkg.in/fsnotify.v1" packages = ["."] pruneopts = "" @@ -1151,7 +1143,7 @@ version = "v1.4.7" [[projects]] - digest = "1:5fa5df18f3bd9cad28ed7f263b15da217945735110898fa2b9af25cdafb9cbf3" + digest = "1:960720207d3d0992995f4576e1366fd9e9b1483473b07fb7243144f75f5b1546" name = "gopkg.in/gorethink/gorethink.v3" packages = [ ".", @@ -1164,7 +1156,7 @@ version = "v3.0.5" [[projects]] - digest = "1:74163d1887c0821951e6f1795a1d10338f45f09d9067cb4a8edcf7ee481724ee" + digest = "1:367baf06b7dbd0ef0bbdd785f6a79f929c96b0c18e9d3b29c0eed1ac3f5db133" name = "gopkg.in/ldap.v2" packages = ["."] pruneopts = "" @@ -1173,7 +1165,7 @@ [[projects]] branch = "v2" - digest = "1:f799e95918890212dcf4ce5951291061d318f689977ec9cea0417b08433c2a9d" + digest = "1:f54ba71a035aac92ced3e902d2bff3734a15d1891daff73ec0f90ef236750139" name = "gopkg.in/mgo.v2" packages = [ ".", @@ -1186,7 +1178,7 @@ revision = "9856a29383ce1c59f308dd1cf0363a79b5bef6b5" [[projects]] - digest = "1:427414c304a47b497759094220ce42dd2e838ab7d52de197c633b800c6ff84b5" + digest = "1:b49c4d3115800eace659c9a6a5c384a922f5b210178b24a01abb10731f404ea2" name = "gopkg.in/olivere/elastic.v5" packages = [ ".", @@ -1216,6 +1208,97 @@ [solve-meta] analyzer-name = "dep" analyzer-version = 1 - inputs-digest = "726abf0a241126b415293c203dddc516e4d8be9b0f2913fb3ab2c4eb332e3ce2" + input-imports = [ + "collectd.org/api", + "collectd.org/network", + "github.com/Microsoft/ApplicationInsights-Go/appinsights", + "github.com/Shopify/sarama", + "github.com/StackExchange/wmi", + "github.com/aerospike/aerospike-client-go", + "github.com/amir/raidman", + "github.com/apache/thrift/lib/go/thrift", + "github.com/aws/aws-sdk-go/aws", + "github.com/aws/aws-sdk-go/aws/client", + "github.com/aws/aws-sdk-go/aws/credentials", + "github.com/aws/aws-sdk-go/aws/credentials/stscreds", + "github.com/aws/aws-sdk-go/aws/session", + "github.com/aws/aws-sdk-go/service/cloudwatch", + "github.com/aws/aws-sdk-go/service/kinesis", + "github.com/aws/aws-sdk-go/service/sts", + "github.com/bsm/sarama-cluster", + "github.com/couchbase/go-couchbase", + "github.com/dgrijalva/jwt-go", + "github.com/docker/docker/api/types", + "github.com/docker/docker/api/types/container", + "github.com/docker/docker/api/types/filters", + "github.com/docker/docker/api/types/registry", + "github.com/docker/docker/api/types/swarm", + "github.com/docker/docker/client", + "github.com/eclipse/paho.mqtt.golang", + "github.com/go-redis/redis", + "github.com/go-sql-driver/mysql", + "github.com/gobwas/glob", + "github.com/golang/protobuf/proto", + "github.com/google/go-cmp/cmp", + "github.com/gorilla/mux", + "github.com/hashicorp/consul/api", + "github.com/influxdata/go-syslog/rfc5424", + "github.com/influxdata/go-syslog/rfc5425", + "github.com/influxdata/tail", + "github.com/influxdata/toml", + "github.com/influxdata/toml/ast", + "github.com/influxdata/wlog", + "github.com/jackc/pgx", + "github.com/jackc/pgx/pgtype", + "github.com/jackc/pgx/stdlib", + "github.com/kardianos/service", + "github.com/kballard/go-shellquote", + "github.com/matttproud/golang_protobuf_extensions/pbutil", + "github.com/miekg/dns", + "github.com/multiplay/go-ts3", + "github.com/nats-io/gnatsd/server", + "github.com/nats-io/go-nats", + "github.com/nsqio/go-nsq", + "github.com/openzipkin/zipkin-go-opentracing", + "github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore", + "github.com/prometheus/client_golang/prometheus", + "github.com/prometheus/client_golang/prometheus/promhttp", + "github.com/prometheus/client_model/go", + "github.com/prometheus/common/expfmt", + "github.com/prometheus/common/log", + "github.com/satori/go.uuid", + "github.com/shirou/gopsutil/cpu", + "github.com/shirou/gopsutil/disk", + "github.com/shirou/gopsutil/host", + "github.com/shirou/gopsutil/load", + "github.com/shirou/gopsutil/mem", + "github.com/shirou/gopsutil/net", + "github.com/shirou/gopsutil/process", + "github.com/soniah/gosnmp", + "github.com/streadway/amqp", + "github.com/stretchr/testify/assert", + "github.com/stretchr/testify/mock", + "github.com/stretchr/testify/require", + "github.com/tidwall/gjson", + "github.com/vjeantet/grok", + "github.com/wvanbergen/kafka/consumergroup", + "github.com/zensqlmonitor/go-mssqldb", + "golang.org/x/net/context", + "golang.org/x/net/html/charset", + "golang.org/x/sys/unix", + "golang.org/x/sys/windows", + "golang.org/x/sys/windows/svc", + "golang.org/x/sys/windows/svc/mgr", + "google.golang.org/grpc", + "google.golang.org/grpc/codes", + "google.golang.org/grpc/credentials", + "google.golang.org/grpc/status", + "gopkg.in/gorethink/gorethink.v3", + "gopkg.in/ldap.v2", + "gopkg.in/mgo.v2", + "gopkg.in/mgo.v2/bson", + "gopkg.in/olivere/elastic.v5", + "gopkg.in/yaml.v2", + ] solver-name = "gps-cdcl" solver-version = 1 diff --git a/README.md b/README.md index 5f7cb31c9..e0d88e414 100644 --- a/README.md +++ b/README.md @@ -43,7 +43,7 @@ Ansible role: https://github.com/rossmcdonald/telegraf Telegraf requires golang version 1.9 or newer, the Makefile requires GNU make. 1. [Install Go](https://golang.org/doc/install) >=1.9 -2. [Install dep](https://golang.github.io/dep/docs/installation.html) ==v0.4.1 +2. [Install dep](https://golang.github.io/dep/docs/installation.html) ==v0.5.0 3. Download Telegraf source: ``` go get -d github.com/influxdata/telegraf diff --git a/appveyor.yml b/appveyor.yml index cd8938ff7..a1af84d6c 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -21,8 +21,8 @@ install: - 7z x "C:\Cache\gnuwin32-dep.zip" -oC:\GnuWin32 -y - go get -d github.com/golang/dep - cd "%GOPATH%\src\github.com\golang\dep" - - git checkout -q v0.4.1 - - go install -ldflags="-X main.version=v0.4.1" ./cmd/dep + - git checkout -q v0.5.0 + - go install -ldflags="-X main.version=v0.5.0" ./cmd/dep - cd "%GOPATH%\src\github.com\influxdata\telegraf" - git config --system core.longpaths true - go version diff --git a/scripts/ci-1.10.docker b/scripts/ci-1.10.docker index 1cfe4c27d..33075adfc 100644 --- a/scripts/ci-1.10.docker +++ b/scripts/ci-1.10.docker @@ -24,5 +24,5 @@ RUN gem install fpm RUN go get -d github.com/golang/dep && \ cd src/github.com/golang/dep && \ - git checkout -q v0.4.1 && \ - go install -ldflags="-X main.version=v0.4.1" ./cmd/dep + git checkout -q v0.5.0 && \ + go install -ldflags="-X main.version=v0.5.0" ./cmd/dep diff --git a/scripts/ci-1.9.docker b/scripts/ci-1.9.docker index d1ac5f839..0a931c817 100644 --- a/scripts/ci-1.9.docker +++ b/scripts/ci-1.9.docker @@ -24,5 +24,5 @@ RUN gem install fpm RUN go get -d github.com/golang/dep && \ cd src/github.com/golang/dep && \ - git checkout -q v0.4.1 && \ - go install -ldflags="-X main.version=v0.4.1" ./cmd/dep + git checkout -q v0.5.0 && \ + go install -ldflags="-X main.version=v0.5.0" ./cmd/dep From 2395413cc8ec93f2d6368d636e7d46fcf3ea6f2d Mon Sep 17 00:00:00 2001 From: Greg Linton Date: Tue, 14 Aug 2018 15:56:48 -0600 Subject: [PATCH 0082/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 520a4fb43..38ddd3d95 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -64,6 +64,7 @@ - [#4537](https://github.com/influxdata/telegraf/pull/4537): Add message 'max_bytes' configuration to kafka input. - [#4546](https://github.com/influxdata/telegraf/pull/4546): Add gopsutil meminfo fields to mem plugin. - [#4285](https://github.com/influxdata/telegraf/pull/4285): Document how to parse telegraf logs. +- [#4542](https://github.com/influxdata/telegraf/pull/4542): Use dep v0.5.0. ## v1.7.4 [unreleased] From 61513c64b85b2cb325a49b930f5ae6ff44af4091 Mon Sep 17 00:00:00 2001 From: Mauro Murari Date: Mon, 13 Aug 2018 20:40:18 -0300 Subject: [PATCH 0083/1815] Add message 'max_bytes' configuration (#4537) --- plugins/outputs/kafka/kafka.go | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/plugins/outputs/kafka/kafka.go b/plugins/outputs/kafka/kafka.go index 5fdb8d857..dc8d846da 100644 --- a/plugins/outputs/kafka/kafka.go +++ b/plugins/outputs/kafka/kafka.go @@ -37,6 +37,8 @@ type ( RequiredAcks int // MaxRetry Tag MaxRetry int + // Max Message Bytes + MaxMessageBytes int Version string `toml:"version"` @@ -140,6 +142,9 @@ var sampleConfig = ` ## until the next flush. # max_retry = 3 + ## Max message bytes, should be lower than server message.max.bytes config + # MaxMessageBytes = 0 + ## Optional TLS Config # tls_ca = "/etc/telegraf/ca.pem" # tls_cert = "/etc/telegraf/cert.pem" @@ -218,6 +223,10 @@ func (k *Kafka) Connect() error { config.Producer.Retry.Max = k.MaxRetry config.Producer.Return.Successes = true + if k.MaxMessageBytes > 0 { + config.Producer.MaxMessageBytes = k.MaxMessageBytes + } + // Legacy support ssl config if k.Certificate != "" { k.TLSCert = k.Certificate From 34614582a7820ed59e05b64a7aef7cdfe9eb5e79 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 15 Aug 2018 14:12:22 -0700 Subject: [PATCH 0084/1815] Use snake case in kafka output config --- plugins/outputs/kafka/kafka.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/plugins/outputs/kafka/kafka.go b/plugins/outputs/kafka/kafka.go index dc8d846da..0bb5ca4a3 100644 --- a/plugins/outputs/kafka/kafka.go +++ b/plugins/outputs/kafka/kafka.go @@ -38,7 +38,7 @@ type ( // MaxRetry Tag MaxRetry int // Max Message Bytes - MaxMessageBytes int + MaxMessageBytes int `toml:"max_message_bytes"` Version string `toml:"version"` @@ -142,8 +142,9 @@ var sampleConfig = ` ## until the next flush. # max_retry = 3 - ## Max message bytes, should be lower than server message.max.bytes config - # MaxMessageBytes = 0 + ## The maximum permitted size of a message. Should be set equal to or + ## smaller than the broker's 'message.max.bytes'. + # max_message_bytes = 1000000 ## Optional TLS Config # tls_ca = "/etc/telegraf/ca.pem" From 9e0eb0c0e0cdd2a45a7629b494b914a59bf51b82 Mon Sep 17 00:00:00 2001 From: maxunt Date: Fri, 17 Aug 2018 13:45:22 -0700 Subject: [PATCH 0085/1815] Add ability to set measurement from matched text in grok parser (#4433) --- docs/DATA_FORMATS_INPUT.md | 119 +++++++++--------- plugins/inputs/file/README.md | 2 +- plugins/inputs/file/dev/docker-compose.yml | 2 +- plugins/inputs/file/dev/json_a.log | 14 --- .../file/{ => dev}/testfiles/grok_a.log | 0 .../file/{ => dev}/testfiles/json_a.log | 0 plugins/inputs/file/file.go | 9 +- plugins/inputs/file/file_test.go | 12 +- plugins/parsers/grok/parser.go | 6 +- plugins/parsers/grok/parser_test.go | 50 ++++++++ 10 files changed, 126 insertions(+), 88 deletions(-) delete mode 100644 plugins/inputs/file/dev/json_a.log rename plugins/inputs/file/{ => dev}/testfiles/grok_a.log (100%) rename plugins/inputs/file/{ => dev}/testfiles/json_a.log (100%) diff --git a/docs/DATA_FORMATS_INPUT.md b/docs/DATA_FORMATS_INPUT.md index 753523843..ded0170ec 100644 --- a/docs/DATA_FORMATS_INPUT.md +++ b/docs/DATA_FORMATS_INPUT.md @@ -670,6 +670,66 @@ The best way to get acquainted with grok patterns is to read the logstash docs, which are available here: https://www.elastic.co/guide/en/logstash/current/plugins-filters-grok.html +The grok parser uses a slightly modified version of logstash "grok" +patterns, with the format: + +``` +%{[:][:]} +``` + +The `capture_syntax` defines the grok pattern that's used to parse the input +line and the `semantic_name` is used to name the field or tag. The extension +`modifier` controls the data type that the parsed item is converted to or +other special handling. + +By default all named captures are converted into string fields. +Timestamp modifiers can be used to convert captures to the timestamp of the +parsed metric. If no timestamp is parsed the metric will be created using the +current time. + +You must capture at least one field per line. + +- Available modifiers: + - string (default if nothing is specified) + - int + - float + - duration (ie, 5.23ms gets converted to int nanoseconds) + - tag (converts the field into a tag) + - drop (drops the field completely) + - measurement (use the matched text as the measurement name) +- Timestamp modifiers: + - ts (This will auto-learn the timestamp format) + - ts-ansic ("Mon Jan _2 15:04:05 2006") + - ts-unix ("Mon Jan _2 15:04:05 MST 2006") + - ts-ruby ("Mon Jan 02 15:04:05 -0700 2006") + - ts-rfc822 ("02 Jan 06 15:04 MST") + - ts-rfc822z ("02 Jan 06 15:04 -0700") + - ts-rfc850 ("Monday, 02-Jan-06 15:04:05 MST") + - ts-rfc1123 ("Mon, 02 Jan 2006 15:04:05 MST") + - ts-rfc1123z ("Mon, 02 Jan 2006 15:04:05 -0700") + - ts-rfc3339 ("2006-01-02T15:04:05Z07:00") + - ts-rfc3339nano ("2006-01-02T15:04:05.999999999Z07:00") + - ts-httpd ("02/Jan/2006:15:04:05 -0700") + - ts-epoch (seconds since unix epoch, may contain decimal) + - ts-epochnano (nanoseconds since unix epoch) + - ts-syslog ("Jan 02 15:04:05", parsed time is set to the current year) + - ts-"CUSTOM" + +CUSTOM time layouts must be within quotes and be the representation of the +"reference time", which is `Mon Jan 2 15:04:05 -0700 MST 2006`. +To match a comma decimal point you can use a period. For example `%{TIMESTAMP:timestamp:ts-"2006-01-02 15:04:05.000"}` can be used to match `"2018-01-02 15:04:05,000"` +To match a comma decimal point you can use a period in the pattern string. +See https://golang.org/pkg/time/#Parse for more details. + +Telegraf has many of its own [built-in patterns](./grok/patterns/influx-patterns), +as well as support for most of +[logstash's builtin patterns](https://github.com/logstash-plugins/logstash-patterns-core/blob/master/patterns/grok-patterns). +_Golang regular expressions do not support lookahead or lookbehind. +logstash patterns that depend on these are not supported._ + +If you need help building patterns to match your logs, +you will find the https://grokdebug.herokuapp.com application quite useful! + #### Grok Configuration: ```toml [[inputs.file]] @@ -714,65 +774,6 @@ which are available here: grok_timezone = "Canada/Eastern" ``` -The grok parser uses a slightly modified version of logstash "grok" -patterns, with the format: - -``` -%{[:][:]} -``` - -The `capture_syntax` defines the grok pattern that's used to parse the input -line and the `semantic_name` is used to name the field or tag. The extension -`modifier` controls the data type that the parsed item is converted to or -other special handling. - -By default all named captures are converted into string fields. -Timestamp modifiers can be used to convert captures to the timestamp of the -parsed metric. If no timestamp is parsed the metric will be created using the -current time. - -You must capture at least one field per line. - -- Available modifiers: - - string (default if nothing is specified) - - int - - float - - duration (ie, 5.23ms gets converted to int nanoseconds) - - tag (converts the field into a tag) - - drop (drops the field completely) -- Timestamp modifiers: - - ts (This will auto-learn the timestamp format) - - ts-ansic ("Mon Jan _2 15:04:05 2006") - - ts-unix ("Mon Jan _2 15:04:05 MST 2006") - - ts-ruby ("Mon Jan 02 15:04:05 -0700 2006") - - ts-rfc822 ("02 Jan 06 15:04 MST") - - ts-rfc822z ("02 Jan 06 15:04 -0700") - - ts-rfc850 ("Monday, 02-Jan-06 15:04:05 MST") - - ts-rfc1123 ("Mon, 02 Jan 2006 15:04:05 MST") - - ts-rfc1123z ("Mon, 02 Jan 2006 15:04:05 -0700") - - ts-rfc3339 ("2006-01-02T15:04:05Z07:00") - - ts-rfc3339nano ("2006-01-02T15:04:05.999999999Z07:00") - - ts-httpd ("02/Jan/2006:15:04:05 -0700") - - ts-epoch (seconds since unix epoch, may contain decimal) - - ts-epochnano (nanoseconds since unix epoch) - - ts-syslog ("Jan 02 15:04:05", parsed time is set to the current year) - - ts-"CUSTOM" - -CUSTOM time layouts must be within quotes and be the representation of the -"reference time", which is `Mon Jan 2 15:04:05 -0700 MST 2006`. -To match a comma decimal point you can use a period. For example `%{TIMESTAMP:timestamp:ts-"2006-01-02 15:04:05.000"}` can be used to match `"2018-01-02 15:04:05,000"` -To match a comma decimal point you can use a period in the pattern string. -See https://golang.org/pkg/time/#Parse for more details. - -Telegraf has many of its own [built-in patterns](./grok/patterns/influx-patterns), -as well as support for most of -[logstash's builtin patterns](https://github.com/logstash-plugins/logstash-patterns-core/blob/master/patterns/grok-patterns). -_Golang regular expressions do not support lookahead or lookbehind. -logstash patterns that depend on these are not supported._ - -If you need help building patterns to match your logs, -you will find the https://grokdebug.herokuapp.com application quite useful! - #### Timestamp Examples This example input and config parses a file using a custom timestamp conversion: diff --git a/plugins/inputs/file/README.md b/plugins/inputs/file/README.md index 73a3a2362..4358b67ad 100644 --- a/plugins/inputs/file/README.md +++ b/plugins/inputs/file/README.md @@ -14,7 +14,7 @@ use the [tail input plugin](/plugins/inputs/tail) instead. ## ** as a "super asterisk". ie: ## /var/log/**.log -> recursively find all .log files in /var/log ## /var/log/*/*.log -> find all .log files with a parent dir in /var/log - ## /var/log/apache.log -> only tail the apache log file + ## /var/log/apache.log -> only read the apache log file files = ["/var/log/apache/access.log"] ## Data format to consume. diff --git a/plugins/inputs/file/dev/docker-compose.yml b/plugins/inputs/file/dev/docker-compose.yml index 3c16fca90..efce389f7 100644 --- a/plugins/inputs/file/dev/docker-compose.yml +++ b/plugins/inputs/file/dev/docker-compose.yml @@ -6,7 +6,7 @@ services: volumes: - ./telegraf.conf:/telegraf.conf - ../../../../telegraf:/telegraf - - ./json_a.log:/var/log/test.log + - ./dev/json_a.log:/var/log/test.log entrypoint: - /telegraf - --config diff --git a/plugins/inputs/file/dev/json_a.log b/plugins/inputs/file/dev/json_a.log deleted file mode 100644 index 0f52e9d1e..000000000 --- a/plugins/inputs/file/dev/json_a.log +++ /dev/null @@ -1,14 +0,0 @@ -{ -"parent": { - "child": 3.0, - "ignored_child": "hi" -}, -"ignored_null": null, -"integer": 4, -"list": [3, 4], -"ignored_parent": { - "another_ignored_null": null, - "ignored_string": "hello, world!" -}, -"another_list": [4] -} diff --git a/plugins/inputs/file/testfiles/grok_a.log b/plugins/inputs/file/dev/testfiles/grok_a.log similarity index 100% rename from plugins/inputs/file/testfiles/grok_a.log rename to plugins/inputs/file/dev/testfiles/grok_a.log diff --git a/plugins/inputs/file/testfiles/json_a.log b/plugins/inputs/file/dev/testfiles/json_a.log similarity index 100% rename from plugins/inputs/file/testfiles/json_a.log rename to plugins/inputs/file/dev/testfiles/json_a.log diff --git a/plugins/inputs/file/file.go b/plugins/inputs/file/file.go index 2779561fc..d6714301e 100644 --- a/plugins/inputs/file/file.go +++ b/plugins/inputs/file/file.go @@ -11,9 +11,8 @@ import ( ) type File struct { - Files []string `toml:"files"` - FromBeginning bool - parser parsers.Parser + Files []string `toml:"files"` + parser parsers.Parser filenames []string } @@ -24,7 +23,7 @@ const sampleConfig = ` ## ** as a "super asterisk". ie: ## /var/log/**.log -> recursively find all .log files in /var/log ## /var/log/*/*.log -> find all .log files with a parent dir in /var/log - ## /var/log/apache.log -> only tail the apache log file + ## /var/log/apache.log -> only read the apache log file files = ["/var/log/apache/access.log"] ## The dataformat to be read from files @@ -40,7 +39,7 @@ func (f *File) SampleConfig() string { } func (f *File) Description() string { - return "reload and gather from file[s] on telegraf's interval" + return "Reload and gather from file[s] on telegraf's interval." } func (f *File) Gather(acc telegraf.Accumulator) error { diff --git a/plugins/inputs/file/file_test.go b/plugins/inputs/file/file_test.go index 281056646..43322c2e8 100644 --- a/plugins/inputs/file/file_test.go +++ b/plugins/inputs/file/file_test.go @@ -14,26 +14,26 @@ import ( func TestRefreshFilePaths(t *testing.T) { wd, err := os.Getwd() r := File{ - Files: []string{filepath.Join(wd, "testfiles/**.log")}, + Files: []string{filepath.Join(wd, "dev/testfiles/**.log")}, } err = r.refreshFilePaths() require.NoError(t, err) - assert.Equal(t, len(r.filenames), 2) + assert.Equal(t, 2, len(r.filenames)) } func TestJSONParserCompile(t *testing.T) { var acc testutil.Accumulator wd, _ := os.Getwd() r := File{ - Files: []string{filepath.Join(wd, "testfiles/json_a.log")}, + Files: []string{filepath.Join(wd, "dev/testfiles/json_a.log")}, } parserConfig := parsers.Config{ DataFormat: "json", TagKeys: []string{"parent_ignored_child"}, } nParser, err := parsers.NewParser(&parserConfig) - r.parser = nParser assert.NoError(t, err) + r.parser = nParser r.Gather(&acc) assert.Equal(t, map[string]string{"parent_ignored_child": "hi"}, acc.Metrics[0].Tags) @@ -44,7 +44,7 @@ func TestGrokParser(t *testing.T) { wd, _ := os.Getwd() var acc testutil.Accumulator r := File{ - Files: []string{filepath.Join(wd, "testfiles/grok_a.log")}, + Files: []string{filepath.Join(wd, "dev/testfiles/grok_a.log")}, } parserConfig := parsers.Config{ @@ -57,5 +57,5 @@ func TestGrokParser(t *testing.T) { assert.NoError(t, err) err = r.Gather(&acc) - assert.Equal(t, 2, len(acc.Metrics)) + assert.Equal(t, len(acc.Metrics), 2) } diff --git a/plugins/parsers/grok/parser.go b/plugins/parsers/grok/parser.go index 096cb8ed8..bc65588eb 100644 --- a/plugins/parsers/grok/parser.go +++ b/plugins/parsers/grok/parser.go @@ -38,6 +38,7 @@ var timeLayouts = map[string]string{ } const ( + MEASUREMENT = "measurement" INT = "int" TAG = "tag" FLOAT = "float" @@ -217,7 +218,6 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { if k == "" || v == "" { continue } - // t is the modifier of the field var t string // check if pattern has some modifiers @@ -239,6 +239,8 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { } switch t { + case MEASUREMENT: + p.Measurement = v case INT: iv, err := strconv.ParseInt(v, 10, 64) if err != nil { @@ -350,7 +352,7 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { } if len(fields) == 0 { - return nil, fmt.Errorf("logparser_grok: must have one or more fields") + return nil, fmt.Errorf("grok: must have one or more fields") } return metric.New(p.Measurement, tags, fields, p.tsModder.tsMod(timestamp)) diff --git a/plugins/parsers/grok/parser_test.go b/plugins/parsers/grok/parser_test.go index 09f8fa16d..8133d3021 100644 --- a/plugins/parsers/grok/parser_test.go +++ b/plugins/parsers/grok/parser_test.go @@ -1,6 +1,7 @@ package grok import ( + "log" "testing" "time" @@ -959,3 +960,52 @@ func TestReplaceTimestampComma(t *testing.T) { //Convert Nanosecond to milisecond for compare require.Equal(t, 555, m.Time().Nanosecond()/1000000) } + +func TestDynamicMeasurementModifier(t *testing.T) { + p := &Parser{ + Patterns: []string{"%{TEST}"}, + CustomPatterns: "TEST %{NUMBER:var1:tag} %{NUMBER:var2:float} %{WORD:test:measurement}", + } + + require.NoError(t, p.Compile()) + m, err := p.ParseLine("4 5 hello") + require.NoError(t, err) + require.Equal(t, m.Name(), "hello") +} + +func TestStaticMeasurementModifier(t *testing.T) { + p := &Parser{ + Patterns: []string{"%{WORD:hi:measurement} %{NUMBER:num:string}"}, + } + + require.NoError(t, p.Compile()) + m, err := p.ParseLine("test_name 42") + log.Printf("%v", m) + require.NoError(t, err) + require.Equal(t, "test_name", m.Name()) +} + +// tests that the top level measurement name is used +func TestTwoMeasurementModifier(t *testing.T) { + p := &Parser{ + Patterns: []string{"%{TEST:test_name:measurement}"}, + CustomPatterns: "TEST %{NUMBER:var1:tag} %{NUMBER:var2:measurement} %{WORD:var3:measurement}", + } + + require.NoError(t, p.Compile()) + m, err := p.ParseLine("4 5 hello") + require.NoError(t, err) + require.Equal(t, m.Name(), "4 5 hello") +} + +func TestMeasurementModifierNoName(t *testing.T) { + p := &Parser{ + Patterns: []string{"%{TEST}"}, + CustomPatterns: "TEST %{NUMBER:var1:tag} %{NUMBER:var2:float} %{WORD:hi:measurement}", + } + + require.NoError(t, p.Compile()) + m, err := p.ParseLine("4 5 hello") + require.NoError(t, err) + require.Equal(t, m.Name(), "hello") +} From 1fafa616d74231517577defdeb61ecc4e3eb76f0 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 17 Aug 2018 13:46:35 -0700 Subject: [PATCH 0086/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 38ddd3d95..f86fd0310 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -65,6 +65,7 @@ - [#4546](https://github.com/influxdata/telegraf/pull/4546): Add gopsutil meminfo fields to mem plugin. - [#4285](https://github.com/influxdata/telegraf/pull/4285): Document how to parse telegraf logs. - [#4542](https://github.com/influxdata/telegraf/pull/4542): Use dep v0.5.0. +- [#4433](https://github.com/influxdata/telegraf/pull/4433): Add ability to set measurement from matched text in grok parser. ## v1.7.4 [unreleased] From 886d8cc84030451e054d0b6f74cc7a3354d257a3 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 17 Aug 2018 13:51:21 -0700 Subject: [PATCH 0087/1815] Drop message batches in kafka output if too large (#4565) --- internal/models/running_output.go | 1 + plugins/inputs/kafka_consumer/README.md | 2 +- .../inputs/kafka_consumer/kafka_consumer.go | 18 +++++++++++++++++- plugins/outputs/kafka/README.md | 1 + plugins/outputs/kafka/kafka.go | 8 +++++++- 5 files changed, 27 insertions(+), 3 deletions(-) diff --git a/internal/models/running_output.go b/internal/models/running_output.go index a0d1f6b03..25576d745 100644 --- a/internal/models/running_output.go +++ b/internal/models/running_output.go @@ -120,6 +120,7 @@ func (ro *RunningOutput) AddMetric(m telegraf.Metric) { err := ro.write(batch) if err != nil { ro.failMetrics.Add(batch...) + log.Printf("E! Error writing to output [%s]: %v", ro.Name, err) } } } diff --git a/plugins/inputs/kafka_consumer/README.md b/plugins/inputs/kafka_consumer/README.md index 24a0efc0f..b6fc8fc89 100644 --- a/plugins/inputs/kafka_consumer/README.md +++ b/plugins/inputs/kafka_consumer/README.md @@ -44,7 +44,7 @@ and use the old zookeeper connection method. ## Maximum length of a message to consume, in bytes (default 0/unlimited); ## larger messages are dropped - max_message_len = 65536 + max_message_len = 1000000 ``` ## Testing diff --git a/plugins/inputs/kafka_consumer/kafka_consumer.go b/plugins/inputs/kafka_consumer/kafka_consumer.go index d3791b224..eba9b68ac 100644 --- a/plugins/inputs/kafka_consumer/kafka_consumer.go +++ b/plugins/inputs/kafka_consumer/kafka_consumer.go @@ -21,6 +21,7 @@ type Kafka struct { Topics []string Brokers []string MaxMessageLen int + Version string `toml:"version"` Cluster *cluster.Consumer @@ -64,6 +65,12 @@ var sampleConfig = ` ## Optional Client id # client_id = "Telegraf" + ## Set the minimal supported Kafka version. Setting this enables the use of new + ## Kafka features and APIs. Of particular interest, lz4 compression + ## requires at least version 0.10.0.0. + ## ex: version = "1.1.0" + # version = "" + ## Optional TLS Config # tls_ca = "/etc/telegraf/ca.pem" # tls_cert = "/etc/telegraf/cert.pem" @@ -88,7 +95,7 @@ var sampleConfig = ` ## Maximum length of a message to consume, in bytes (default 0/unlimited); ## larger messages are dropped - max_message_len = 65536 + max_message_len = 1000000 ` func (k *Kafka) SampleConfig() string { @@ -111,6 +118,15 @@ func (k *Kafka) Start(acc telegraf.Accumulator) error { k.acc = acc config := cluster.NewConfig() + + if k.Version != "" { + version, err := sarama.ParseKafkaVersion(k.Version) + if err != nil { + return err + } + config.Version = version + } + config.Consumer.Return.Errors = true tlsConfig, err := k.ClientConfig.TLSConfig() diff --git a/plugins/outputs/kafka/README.md b/plugins/outputs/kafka/README.md index bb410a1d5..5f4758baa 100644 --- a/plugins/outputs/kafka/README.md +++ b/plugins/outputs/kafka/README.md @@ -55,6 +55,7 @@ This plugin writes to a [Kafka Broker](http://kafka.apache.org/07/quickstart.htm ## 0 : No compression ## 1 : Gzip compression ## 2 : Snappy compression + ## 3 : LZ4 compression # compression_codec = 0 ## RequiredAcks is used in Produce Requests to tell the broker how many diff --git a/plugins/outputs/kafka/kafka.go b/plugins/outputs/kafka/kafka.go index 0bb5ca4a3..b9ae35396 100644 --- a/plugins/outputs/kafka/kafka.go +++ b/plugins/outputs/kafka/kafka.go @@ -3,6 +3,7 @@ package kafka import ( "crypto/tls" "fmt" + "log" "strings" "github.com/influxdata/telegraf" @@ -79,7 +80,7 @@ var sampleConfig = ` # client_id = "Telegraf" ## Set the minimal supported Kafka version. Setting this enables the use of new - ## Kafka features and APIs. Of particular interested, lz4 compression + ## Kafka features and APIs. Of particular interest, lz4 compression ## requires at least version 0.10.0.0. ## ex: version = "1.1.0" # version = "" @@ -120,6 +121,7 @@ var sampleConfig = ` ## 0 : No compression ## 1 : Gzip compression ## 2 : Snappy compression + ## 3 : LZ4 compression # compression_codec = 0 ## RequiredAcks is used in Produce Requests to tell the broker how many @@ -294,6 +296,10 @@ func (k *Kafka) Write(metrics []telegraf.Metric) error { // We could have many errors, return only the first encountered. if errs, ok := err.(sarama.ProducerErrors); ok { for _, prodErr := range errs { + if prodErr.Err == sarama.ErrMessageSizeTooLarge { + log.Printf("E! Error writing to output [kafka]: Message too large, consider increasing `max_message_bytes`; dropping batch") + return nil + } return prodErr } } From 036981c3b53aaa2caca3a5b16f8b8174531ad879 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 17 Aug 2018 13:52:10 -0700 Subject: [PATCH 0088/1815] Update changelog --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index f86fd0310..a4d680f9b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -66,6 +66,8 @@ - [#4285](https://github.com/influxdata/telegraf/pull/4285): Document how to parse telegraf logs. - [#4542](https://github.com/influxdata/telegraf/pull/4542): Use dep v0.5.0. - [#4433](https://github.com/influxdata/telegraf/pull/4433): Add ability to set measurement from matched text in grok parser. +- [#4565](https://github.com/influxdata/telegraf/pull/4465): Drop message batches in kafka output if too large. + ## v1.7.4 [unreleased] From edb6e1f655b9d72517dfa76ee76d6e6ab1e1db78 Mon Sep 17 00:00:00 2001 From: estk Date: Mon, 20 Aug 2018 14:47:48 -0600 Subject: [PATCH 0089/1815] Implement a lock based ring buffer for internal/buffer. (#3377) --- internal/buffer/buffer.go | 98 ++++++++++++++++++++++++------- internal/buffer/buffer_test.go | 103 +++++++++++++++++++++++++++++++++ 2 files changed, 179 insertions(+), 22 deletions(-) diff --git a/internal/buffer/buffer.go b/internal/buffer/buffer.go index 04835e042..6a460eccb 100644 --- a/internal/buffer/buffer.go +++ b/internal/buffer/buffer.go @@ -14,9 +14,12 @@ var ( // Buffer is an object for storing metrics in a circular buffer. type Buffer struct { - buf chan telegraf.Metric - - mu sync.Mutex + sync.Mutex + buf []telegraf.Metric + first int + last int + size int + empty bool } // NewBuffer returns a Buffer @@ -24,47 +27,98 @@ type Buffer struct { // called when the buffer is full, then the oldest metric(s) will be dropped. func NewBuffer(size int) *Buffer { return &Buffer{ - buf: make(chan telegraf.Metric, size), + buf: make([]telegraf.Metric, size), + first: 0, + last: 0, + size: size, + empty: true, } } // IsEmpty returns true if Buffer is empty. func (b *Buffer) IsEmpty() bool { - return len(b.buf) == 0 + return b.empty } // Len returns the current length of the buffer. func (b *Buffer) Len() int { - return len(b.buf) + if b.empty { + return 0 + } else if b.first <= b.last { + return b.last - b.first + 1 + } + // Spans the end of array. + // size - gap in the middle + return b.size - (b.first - b.last - 1) // size - gap +} + +func (b *Buffer) push(m telegraf.Metric) { + // Empty + if b.empty { + b.last = b.first // Reset + b.buf[b.last] = m + b.empty = false + return + } + + b.last++ + b.last %= b.size + + // Full + if b.first == b.last { + MetricsDropped.Incr(1) + b.first = (b.first + 1) % b.size + } + b.buf[b.last] = m } // Add adds metrics to the buffer. func (b *Buffer) Add(metrics ...telegraf.Metric) { - b.mu.Lock() - for i, _ := range metrics { + b.Lock() + defer b.Unlock() + for i := range metrics { MetricsWritten.Incr(1) - select { - case b.buf <- metrics[i]: - default: - MetricsDropped.Incr(1) - <-b.buf - b.buf <- metrics[i] - } + b.push(metrics[i]) } - b.mu.Unlock() } // Batch returns a batch of metrics of size batchSize. // the batch will be of maximum length batchSize. It can be less than batchSize, // if the length of Buffer is less than batchSize. func (b *Buffer) Batch(batchSize int) []telegraf.Metric { - b.mu.Lock() - n := min(len(b.buf), batchSize) - out := make([]telegraf.Metric, n) - for i := 0; i < n; i++ { - out[i] = <-b.buf + b.Lock() + defer b.Unlock() + outLen := min(b.Len(), batchSize) + out := make([]telegraf.Metric, outLen) + if outLen == 0 { + return out + } + + // We copy everything right of first up to last, count or end + // b.last >= rightInd || b.last < b.first + // therefore wont copy past b.last + rightInd := min(b.size, b.first+outLen) - 1 + + copyCount := copy(out, b.buf[b.first:rightInd+1]) + + // We've emptied the ring + if rightInd == b.last { + b.empty = true + } + b.first = rightInd + 1 + b.first %= b.size + + // We circle back for the rest + if copyCount < outLen { + right := min(b.last, outLen-copyCount) + copy(out[copyCount:], b.buf[b.first:right+1]) + // We've emptied the ring + if right == b.last { + b.empty = true + } + b.first = right + 1 + b.first %= b.size } - b.mu.Unlock() return out } diff --git a/internal/buffer/buffer_test.go b/internal/buffer/buffer_test.go index f84d8c66d..b3f666fd0 100644 --- a/internal/buffer/buffer_test.go +++ b/internal/buffer/buffer_test.go @@ -1,6 +1,8 @@ package buffer import ( + "sync" + "sync/atomic" "testing" "github.com/influxdata/telegraf" @@ -17,6 +19,107 @@ var metricList = []telegraf.Metric{ testutil.TestMetric(8, "mymetric5"), } +func makeBench5(b *testing.B, freq, batchSize int) { + const k = 1000 + var wg sync.WaitGroup + buf := NewBuffer(10000) + m := testutil.TestMetric(1, "mymetric") + + for i := 0; i < b.N; i++ { + buf.Add(m, m, m, m, m) + if i%(freq*k) == 0 { + wg.Add(1) + go func() { + buf.Batch(batchSize * k) + wg.Done() + }() + } + } + // Flush + buf.Batch(b.N) + wg.Wait() + +} +func makeBenchStrict(b *testing.B, freq, batchSize int) { + const k = 1000 + var count uint64 + var wg sync.WaitGroup + buf := NewBuffer(10000) + m := testutil.TestMetric(1, "mymetric") + + for i := 0; i < b.N; i++ { + buf.Add(m) + if i%(freq*k) == 0 { + wg.Add(1) + go func() { + defer wg.Done() + l := len(buf.Batch(batchSize * k)) + atomic.AddUint64(&count, uint64(l)) + }() + } + } + // Flush + wg.Add(1) + go func() { + l := len(buf.Batch(b.N)) + atomic.AddUint64(&count, uint64(l)) + wg.Done() + }() + + wg.Wait() + if count != uint64(b.N) { + b.Errorf("not all metrics came out. %d of %d", count, b.N) + } +} +func makeBench(b *testing.B, freq, batchSize int) { + const k = 1000 + var wg sync.WaitGroup + buf := NewBuffer(10000) + m := testutil.TestMetric(1, "mymetric") + + for i := 0; i < b.N; i++ { + buf.Add(m) + if i%(freq*k) == 0 { + wg.Add(1) + go func() { + buf.Batch(batchSize * k) + wg.Done() + }() + } + } + wg.Wait() + // Flush + buf.Batch(b.N) +} + +func BenchmarkBufferBatch5Add(b *testing.B) { + makeBench5(b, 100, 101) +} +func BenchmarkBufferBigInfrequentBatchCatchup(b *testing.B) { + makeBench(b, 100, 101) +} +func BenchmarkBufferOftenBatch(b *testing.B) { + makeBench(b, 1, 1) +} +func BenchmarkBufferAlmostBatch(b *testing.B) { + makeBench(b, 10, 9) +} +func BenchmarkBufferSlowBatch(b *testing.B) { + makeBench(b, 10, 1) +} +func BenchmarkBufferBatchNoDrop(b *testing.B) { + makeBenchStrict(b, 1, 4) +} +func BenchmarkBufferCatchup(b *testing.B) { + buf := NewBuffer(10000) + m := testutil.TestMetric(1, "mymetric") + + for i := 0; i < b.N; i++ { + buf.Add(m) + } + buf.Batch(b.N) +} + func BenchmarkAddMetrics(b *testing.B) { buf := NewBuffer(10000) m := testutil.TestMetric(1, "mymetric") From d2cf9a715755657b06ffde37f4509fb1f5df1262 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 21 Aug 2018 12:44:10 -0700 Subject: [PATCH 0090/1815] Add support for static and random routing keys in kafka output (#4579) --- plugins/outputs/kafka/README.md | 7 ++++ plugins/outputs/kafka/kafka.go | 55 +++++++++++++++++---------- plugins/outputs/kafka/kafka_test.go | 59 +++++++++++++++++++++++++++++ 3 files changed, 102 insertions(+), 19 deletions(-) diff --git a/plugins/outputs/kafka/README.md b/plugins/outputs/kafka/README.md index 5f4758baa..25b173a02 100644 --- a/plugins/outputs/kafka/README.md +++ b/plugins/outputs/kafka/README.md @@ -50,6 +50,13 @@ This plugin writes to a [Kafka Broker](http://kafka.apache.org/07/quickstart.htm ## ie, if this tag exists, its value will be used as the routing key routing_tag = "host" + ## Static routing key. Used when no routing_tag is set or as a fallback + ## when the tag specified in routing tag is not found. If set to "random", + ## a random value will be generated for each message. + ## ex: routing_key = "random" + ## routing_key = "telegraf" + # routing_key = "" + ## CompressionCodec represents the various compression codecs recognized by ## Kafka in messages. ## 0 : No compression diff --git a/plugins/outputs/kafka/kafka.go b/plugins/outputs/kafka/kafka.go index b9ae35396..f2951e6d5 100644 --- a/plugins/outputs/kafka/kafka.go +++ b/plugins/outputs/kafka/kafka.go @@ -10,6 +10,7 @@ import ( tlsint "github.com/influxdata/telegraf/internal/tls" "github.com/influxdata/telegraf/plugins/outputs" "github.com/influxdata/telegraf/plugins/serializers" + uuid "github.com/satori/go.uuid" "github.com/Shopify/sarama" ) @@ -22,24 +23,16 @@ var ValidTopicSuffixMethods = []string{ type ( Kafka struct { - // Kafka brokers to send metrics to - Brokers []string - // Kafka topic - Topic string - // Kafka client id - ClientID string `toml:"client_id"` - // Kafka topic suffix option - TopicSuffix TopicSuffix `toml:"topic_suffix"` - // Routing Key Tag - RoutingTag string `toml:"routing_tag"` - // Compression Codec Tag + Brokers []string + Topic string + ClientID string `toml:"client_id"` + TopicSuffix TopicSuffix `toml:"topic_suffix"` + RoutingTag string `toml:"routing_tag"` + RoutingKey string `toml:"routing_key"` CompressionCodec int - // RequiredAcks Tag - RequiredAcks int - // MaxRetry Tag - MaxRetry int - // Max Message Bytes - MaxMessageBytes int `toml:"max_message_bytes"` + RequiredAcks int + MaxRetry int + MaxMessageBytes int `toml:"max_message_bytes"` Version string `toml:"version"` @@ -116,6 +109,13 @@ var sampleConfig = ` ## ie, if this tag exists, its value will be used as the routing key routing_tag = "host" + ## Static routing key. Used when no routing_tag is set or as a fallback + ## when the tag specified in routing tag is not found. If set to "random", + ## a random value will be generated for each message. + ## ex: routing_key = "random" + ## routing_key = "telegraf" + # routing_key = "" + ## CompressionCodec represents the various compression codecs recognized by ## Kafka in messages. ## 0 : No compression @@ -273,6 +273,22 @@ func (k *Kafka) Description() string { return "Configuration for the Kafka server to send metrics to" } +func (k *Kafka) routingKey(metric telegraf.Metric) string { + if k.RoutingTag != "" { + key, ok := metric.GetTag(k.RoutingTag) + if ok { + return key + } + } + + if k.RoutingKey == "random" { + u := uuid.NewV4() + return u.String() + } + + return k.RoutingKey +} + func (k *Kafka) Write(metrics []telegraf.Metric) error { msgs := make([]*sarama.ProducerMessage, 0, len(metrics)) for _, metric := range metrics { @@ -285,8 +301,9 @@ func (k *Kafka) Write(metrics []telegraf.Metric) error { Topic: k.GetTopicName(metric), Value: sarama.ByteEncoder(buf), } - if h, ok := metric.GetTag(k.RoutingTag); ok { - m.Key = sarama.StringEncoder(h) + key := k.routingKey(metric) + if key != "" { + m.Key = sarama.StringEncoder(key) } msgs = append(msgs, m) } diff --git a/plugins/outputs/kafka/kafka_test.go b/plugins/outputs/kafka/kafka_test.go index b18d9f15d..ba900e32c 100644 --- a/plugins/outputs/kafka/kafka_test.go +++ b/plugins/outputs/kafka/kafka_test.go @@ -2,7 +2,10 @@ package kafka import ( "testing" + "time" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/plugins/serializers" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" @@ -96,3 +99,59 @@ func TestValidateTopicSuffixMethod(t *testing.T) { require.NoError(t, err, "Topic suffix method used should be valid.") } } + +func TestRoutingKey(t *testing.T) { + tests := []struct { + name string + kafka *Kafka + metric telegraf.Metric + check func(t *testing.T, routingKey string) + }{ + { + name: "static routing key", + kafka: &Kafka{ + RoutingKey: "static", + }, + metric: func() telegraf.Metric { + m, _ := metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(0, 0), + ) + return m + }(), + check: func(t *testing.T, routingKey string) { + require.Equal(t, "static", routingKey) + }, + }, + { + name: "random routing key", + kafka: &Kafka{ + RoutingKey: "random", + }, + metric: func() telegraf.Metric { + m, _ := metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(0, 0), + ) + return m + }(), + check: func(t *testing.T, routingKey string) { + require.Equal(t, 36, len(routingKey)) + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + key := tt.kafka.routingKey(tt.metric) + tt.check(t, key) + }) + } +} From 430d7103da6eee0f078f786652bc51c6b9cea125 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 21 Aug 2018 12:45:13 -0700 Subject: [PATCH 0091/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index a4d680f9b..348eb145e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -67,6 +67,7 @@ - [#4542](https://github.com/influxdata/telegraf/pull/4542): Use dep v0.5.0. - [#4433](https://github.com/influxdata/telegraf/pull/4433): Add ability to set measurement from matched text in grok parser. - [#4565](https://github.com/influxdata/telegraf/pull/4465): Drop message batches in kafka output if too large. +- [#4579](https://github.com/influxdata/telegraf/pull/4579): Add support for static and random routing keys in kafka output. ## v1.7.4 [unreleased] From e893dc38a2ecbdf1387e937fc7561612c940002b Mon Sep 17 00:00:00 2001 From: Ayrdrie Date: Wed, 22 Aug 2018 14:55:41 -0600 Subject: [PATCH 0092/1815] Add logfmt parser (#4539) --- docs/DATA_FORMATS_INPUT.md | 17 ++ plugins/parsers/logfmt/parser.go | 111 +++++++++++++ plugins/parsers/logfmt/parser_test.go | 231 ++++++++++++++++++++++++++ plugins/parsers/registry.go | 8 + 4 files changed, 367 insertions(+) create mode 100644 plugins/parsers/logfmt/parser.go create mode 100644 plugins/parsers/logfmt/parser_test.go diff --git a/docs/DATA_FORMATS_INPUT.md b/docs/DATA_FORMATS_INPUT.md index ded0170ec..7f7c94930 100644 --- a/docs/DATA_FORMATS_INPUT.md +++ b/docs/DATA_FORMATS_INPUT.md @@ -10,6 +10,7 @@ Telegraf is able to parse the following input data formats into metrics: 1. [Collectd](#collectd) 1. [Dropwizard](#dropwizard) 1. [Grok](#grok) +1. [Logfmt](#logfmt) 1. [Wavefront](#wavefront) Telegraf metrics, like InfluxDB @@ -882,6 +883,22 @@ the file output will only print once per `flush_interval`. - If successful, add the next token, update the pattern and retest. - Continue one token at a time until the entire line is successfully parsed. +# Logfmt +This parser implements the logfmt format by extracting and converting key-value pairs from log text in the form `=`. +At the moment, the plugin will produce one metric per line and all keys +are added as fields. +A typical log +``` +method=GET host=influxdata.org ts=2018-07-24T19:43:40.275Z +connect=4ms service=8ms status=200 bytes=1653 +``` +will be converted into +``` +logfmt method="GET",host="influxdata.org",ts="2018-07-24T19:43:40.275Z",connect="4ms",service="8ms",status=200i,bytes=1653i + +``` +Additional information about the logfmt format can be found [here](https://brandur.org/logfmt). + # Wavefront: Wavefront Data Format is metrics are parsed directly into Telegraf metrics. diff --git a/plugins/parsers/logfmt/parser.go b/plugins/parsers/logfmt/parser.go new file mode 100644 index 000000000..603dbbae8 --- /dev/null +++ b/plugins/parsers/logfmt/parser.go @@ -0,0 +1,111 @@ +package logfmt + +import ( + "bytes" + "fmt" + "strconv" + "time" + + "github.com/go-logfmt/logfmt" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" +) + +var ( + ErrNoMetric = fmt.Errorf("no metric in line") +) + +// Parser decodes logfmt formatted messages into metrics. +type Parser struct { + MetricName string + DefaultTags map[string]string + Now func() time.Time +} + +// NewParser creates a parser. +func NewParser(metricName string, defaultTags map[string]string) *Parser { + return &Parser{ + MetricName: metricName, + DefaultTags: defaultTags, + Now: time.Now, + } +} + +// Parse converts a slice of bytes in logfmt format to metrics. +func (p *Parser) Parse(b []byte) ([]telegraf.Metric, error) { + reader := bytes.NewReader(b) + decoder := logfmt.NewDecoder(reader) + metrics := make([]telegraf.Metric, 0) + for { + ok := decoder.ScanRecord() + if !ok { + err := decoder.Err() + if err != nil { + return nil, err + } + break + } + fields := make(map[string]interface{}) + for decoder.ScanKeyval() { + if string(decoder.Value()) == "" { + continue + } + + //type conversions + value := string(decoder.Value()) + if iValue, err := strconv.ParseInt(value, 10, 64); err == nil { + fields[string(decoder.Key())] = iValue + } else if fValue, err := strconv.ParseFloat(value, 64); err == nil { + fields[string(decoder.Key())] = fValue + } else if bValue, err := strconv.ParseBool(value); err == nil { + fields[string(decoder.Key())] = bValue + } else { + fields[string(decoder.Key())] = value + } + } + if len(fields) == 0 { + continue + } + + m, err := metric.New(p.MetricName, map[string]string{}, fields, p.Now()) + if err != nil { + return nil, err + } + + metrics = append(metrics, m) + } + p.applyDefaultTags(metrics) + return metrics, nil +} + +// ParseLine converts a single line of text in logfmt format to metrics. +func (p *Parser) ParseLine(s string) (telegraf.Metric, error) { + metrics, err := p.Parse([]byte(s)) + if err != nil { + return nil, err + } + + if len(metrics) < 1 { + return nil, ErrNoMetric + } + return metrics[0], nil +} + +// SetDefaultTags adds tags to the metrics outputs of Parse and ParseLine. +func (p *Parser) SetDefaultTags(tags map[string]string) { + p.DefaultTags = tags +} + +func (p *Parser) applyDefaultTags(metrics []telegraf.Metric) { + if len(p.DefaultTags) == 0 { + return + } + + for _, m := range metrics { + for k, v := range p.DefaultTags { + if !m.HasTag(k) { + m.AddTag(k, v) + } + } + } +} diff --git a/plugins/parsers/logfmt/parser_test.go b/plugins/parsers/logfmt/parser_test.go new file mode 100644 index 000000000..c90964684 --- /dev/null +++ b/plugins/parsers/logfmt/parser_test.go @@ -0,0 +1,231 @@ +package logfmt + +import ( + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +func MustMetric(t *testing.T, m *testutil.Metric) telegraf.Metric { + t.Helper() + v, err := metric.New(m.Measurement, m.Tags, m.Fields, m.Time) + if err != nil { + t.Fatal(err) + } + return v +} + +func TestParse(t *testing.T) { + tests := []struct { + name string + measurement string + now func() time.Time + bytes []byte + want []testutil.Metric + wantErr bool + }{ + { + name: "no bytes returns no metrics", + now: func() time.Time { return time.Unix(0, 0) }, + want: []testutil.Metric{}, + }, + { + name: "test without trailing end", + bytes: []byte("foo=\"bar\""), + now: func() time.Time { return time.Unix(0, 0) }, + measurement: "testlog", + want: []testutil.Metric{ + testutil.Metric{ + Measurement: "testlog", + Tags: map[string]string{}, + Fields: map[string]interface{}{ + "foo": "bar", + }, + Time: time.Unix(0, 0), + }, + }, + }, + { + name: "test with trailing end", + bytes: []byte("foo=\"bar\"\n"), + now: func() time.Time { return time.Unix(0, 0) }, + measurement: "testlog", + want: []testutil.Metric{ + testutil.Metric{ + Measurement: "testlog", + Tags: map[string]string{}, + Fields: map[string]interface{}{ + "foo": "bar", + }, + Time: time.Unix(0, 0), + }, + }, + }, + { + name: "logfmt parser returns all the fields", + bytes: []byte(`ts=2018-07-24T19:43:40.275Z lvl=info msg="http request" method=POST`), + now: func() time.Time { return time.Unix(0, 0) }, + measurement: "testlog", + want: []testutil.Metric{ + testutil.Metric{ + Measurement: "testlog", + Tags: map[string]string{}, + Fields: map[string]interface{}{ + "lvl": "info", + "msg": "http request", + "method": "POST", + "ts": "2018-07-24T19:43:40.275Z", + }, + Time: time.Unix(0, 0), + }, + }, + }, + { + name: "logfmt parser parses every line", + bytes: []byte("ts=2018-07-24T19:43:40.275Z lvl=info msg=\"http request\" method=POST\nparent_id=088876RL000 duration=7.45 log_id=09R4e4Rl000"), + now: func() time.Time { return time.Unix(0, 0) }, + measurement: "testlog", + want: []testutil.Metric{ + testutil.Metric{ + Measurement: "testlog", + Tags: map[string]string{}, + Fields: map[string]interface{}{ + "lvl": "info", + "msg": "http request", + "method": "POST", + "ts": "2018-07-24T19:43:40.275Z", + }, + Time: time.Unix(0, 0), + }, + testutil.Metric{ + Measurement: "testlog", + Tags: map[string]string{}, + Fields: map[string]interface{}{ + "parent_id": "088876RL000", + "duration": 7.45, + "log_id": "09R4e4Rl000", + }, + Time: time.Unix(0, 0), + }, + }, + }, + { + name: "keys without = or values are ignored", + now: func() time.Time { return time.Unix(0, 0) }, + bytes: []byte(`i am no data.`), + want: []testutil.Metric{}, + wantErr: false, + }, + { + name: "keys without values are ignored", + now: func() time.Time { return time.Unix(0, 0) }, + bytes: []byte(`foo="" bar=`), + want: []testutil.Metric{}, + wantErr: false, + }, + { + name: "unterminated quote produces error", + now: func() time.Time { return time.Unix(0, 0) }, + measurement: "testlog", + bytes: []byte(`bar=baz foo="bar`), + want: []testutil.Metric{}, + wantErr: true, + }, + { + name: "malformed key", + now: func() time.Time { return time.Unix(0, 0) }, + measurement: "testlog", + bytes: []byte(`"foo=" bar=baz`), + want: []testutil.Metric{}, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + l := Parser{ + MetricName: tt.measurement, + Now: tt.now, + } + got, err := l.Parse(tt.bytes) + if (err != nil) != tt.wantErr { + t.Errorf("Logfmt.Parse error = %v, wantErr %v", err, tt.wantErr) + return + } + require.Equal(t, len(tt.want), len(got)) + for i, m := range got { + testutil.MustEqual(t, m, tt.want[i]) + } + }) + } +} + +func TestParseLine(t *testing.T) { + tests := []struct { + name string + s string + measurement string + now func() time.Time + want testutil.Metric + wantErr bool + }{ + { + name: "No Metric In line", + now: func() time.Time { return time.Unix(0, 0) }, + want: testutil.Metric{}, + wantErr: true, + }, + { + name: "Log parser fmt returns all fields", + now: func() time.Time { return time.Unix(0, 0) }, + measurement: "testlog", + s: `ts=2018-07-24T19:43:35.207268Z lvl=5 msg="Write failed" log_id=09R4e4Rl000`, + want: testutil.Metric{ + Measurement: "testlog", + Fields: map[string]interface{}{ + "ts": "2018-07-24T19:43:35.207268Z", + "lvl": int64(5), + "msg": "Write failed", + "log_id": "09R4e4Rl000", + }, + Tags: map[string]string{}, + Time: time.Unix(0, 0), + }, + }, + { + name: "ParseLine only returns metrics from first string", + now: func() time.Time { return time.Unix(0, 0) }, + measurement: "testlog", + s: "ts=2018-07-24T19:43:35.207268Z lvl=5 msg=\"Write failed\" log_id=09R4e4Rl000\nmethod=POST parent_id=088876RL000 duration=7.45 log_id=09R4e4Rl000", + want: testutil.Metric{ + Measurement: "testlog", + Fields: map[string]interface{}{ + "ts": "2018-07-24T19:43:35.207268Z", + "lvl": int64(5), + "msg": "Write failed", + "log_id": "09R4e4Rl000", + }, + Tags: map[string]string{}, + Time: time.Unix(0, 0), + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + l := Parser{ + MetricName: tt.measurement, + Now: tt.now, + } + got, err := l.ParseLine(tt.s) + if (err != nil) != tt.wantErr { + t.Fatalf("Logfmt.Parse error = %v, wantErr %v", err, tt.wantErr) + } + if got != nil { + testutil.MustEqual(t, got, tt.want) + } + }) + } +} diff --git a/plugins/parsers/registry.go b/plugins/parsers/registry.go index 1e395047a..e198cb2cb 100644 --- a/plugins/parsers/registry.go +++ b/plugins/parsers/registry.go @@ -11,6 +11,7 @@ import ( "github.com/influxdata/telegraf/plugins/parsers/grok" "github.com/influxdata/telegraf/plugins/parsers/influx" "github.com/influxdata/telegraf/plugins/parsers/json" + "github.com/influxdata/telegraf/plugins/parsers/logfmt" "github.com/influxdata/telegraf/plugins/parsers/nagios" "github.com/influxdata/telegraf/plugins/parsers/value" "github.com/influxdata/telegraf/plugins/parsers/wavefront" @@ -142,6 +143,8 @@ func NewParser(config *Config) (Parser, error) { config.GrokCustomPatterns, config.GrokCustomPatternFiles, config.GrokTimeZone) + case "logfmt": + parser, err = NewLogFmtParser(config.MetricName, config.DefaultTags) default: err = fmt.Errorf("Invalid data format: %s", config.DataFormat) } @@ -242,6 +245,11 @@ func NewDropwizardParser( return parser, err } +// NewLogFmtParser returns a logfmt parser with the default options. +func NewLogFmtParser(metricName string, defaultTags map[string]string) (Parser, error) { + return logfmt.NewParser(metricName, defaultTags), nil +} + func NewWavefrontParser(defaultTags map[string]string) (Parser, error) { return wavefront.NewWavefrontParser(defaultTags), nil } From 9f8de25e0ebd921dd7b0a8071b412499563900ee Mon Sep 17 00:00:00 2001 From: Ayrdrie Date: Wed, 22 Aug 2018 16:28:50 -0700 Subject: [PATCH 0093/1815] Add parser processor (#4551) --- metric/builder.go | 4 +- plugins/processors/all/all.go | 1 + plugins/processors/parser/README.md | 43 ++ plugins/processors/parser/parser.go | 124 +++++ plugins/processors/parser/parser_test.go | 670 +++++++++++++++++++++++ 5 files changed, 841 insertions(+), 1 deletion(-) create mode 100644 plugins/processors/parser/README.md create mode 100644 plugins/processors/parser/parser.go create mode 100644 plugins/processors/parser/parser_test.go diff --git a/metric/builder.go b/metric/builder.go index c579046df..9a331b9a4 100644 --- a/metric/builder.go +++ b/metric/builder.go @@ -41,7 +41,9 @@ func (b *Builder) SetTime(tm time.Time) { } func (b *Builder) Reset() { - b.metric = &metric{} + b.metric = &metric{ + tp: telegraf.Untyped, + } } func (b *Builder) Metric() (telegraf.Metric, error) { diff --git a/plugins/processors/all/all.go b/plugins/processors/all/all.go index f581ea602..5c2e2549e 100644 --- a/plugins/processors/all/all.go +++ b/plugins/processors/all/all.go @@ -4,6 +4,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/processors/converter" _ "github.com/influxdata/telegraf/plugins/processors/enum" _ "github.com/influxdata/telegraf/plugins/processors/override" + _ "github.com/influxdata/telegraf/plugins/processors/parser" _ "github.com/influxdata/telegraf/plugins/processors/printer" _ "github.com/influxdata/telegraf/plugins/processors/regex" _ "github.com/influxdata/telegraf/plugins/processors/rename" diff --git a/plugins/processors/parser/README.md b/plugins/processors/parser/README.md new file mode 100644 index 000000000..7564a75ea --- /dev/null +++ b/plugins/processors/parser/README.md @@ -0,0 +1,43 @@ +# Parser Processor Plugin + +This plugin parses defined fields containing the specified data format and +creates new metrics based on the contents of the field. + +## Configuration +```toml +[[processors.parser]] + ## The name of the fields whose value will be parsed. + parse_fields = ["message"] + + ## If true, incoming metrics are not emitted. + drop_original = false + + ## If set to override, emitted metrics will be merged by overriding the + ## original metric using the newly parsed metrics. + merge = "override" + + ## The dataformat to be read from files + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "influx" +``` + +### Example: + +```toml +[[processors.parser]] + parse_fields = ["message"] + merge = "override" + data_format = "logfmt" +``` + +**Input**: +``` +syslog,appname=influxd,facility=daemon,hostname=http://influxdb.example.org (influxdb.example.org),severity=info version=1i,severity_code=6i,facility_code=3i,timestamp=1533848508138040000i,procid="6629",message=" ts=2018-08-09T21:01:48.137963Z lvl=info msg=\"Executing query\" log_id=09p7QbOG000 service=query query=\"SHOW DATABASES\"" +``` + +**Output**: +``` +syslog,appname=influxd,facility=daemon,hostname=http://influxdb.example.org (influxdb.example.org),severity=info version=1i,severity_code=6i,facility_code=3i,timestamp=1533848508138040000i,procid="6629",ts="2018-08-09T21:01:48.137963Z",lvl=info msg="Executing query",log_id="09p7QbOG000",service="query",query="SHOW DATABASES" +``` diff --git a/plugins/processors/parser/parser.go b/plugins/processors/parser/parser.go new file mode 100644 index 000000000..63230763a --- /dev/null +++ b/plugins/processors/parser/parser.go @@ -0,0 +1,124 @@ +package parser + +import ( + "log" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/parsers" + "github.com/influxdata/telegraf/plugins/processors" +) + +type Parser struct { + parsers.Config + DropOriginal bool `toml:"drop_original"` + Merge string `toml:"merge"` + ParseFields []string `toml:"parse_fields"` + Parser parsers.Parser +} + +var SampleConfig = ` + ## The name of the fields whose value will be parsed. + parse_fields = [] + + ## If true, incoming metrics are not emitted. + drop_original = false + + ## If set to override, emitted metrics will be merged by overriding the + ## original metric using the newly parsed metrics. + merge = "override" + + ## The dataformat to be read from files + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "influx" +` + +func (p *Parser) SampleConfig() string { + return SampleConfig +} + +func (p *Parser) Description() string { + return "Parse a value in a specified field/tag(s) and add the result in a new metric" +} + +func (p *Parser) Apply(metrics ...telegraf.Metric) []telegraf.Metric { + if p.Parser == nil { + var err error + p.Parser, err = parsers.NewParser(&p.Config) + if err != nil { + log.Printf("E! [processors.parser] could not create parser: %v", err) + return metrics + } + } + + results := []telegraf.Metric{} + + for _, metric := range metrics { + newMetrics := []telegraf.Metric{} + if !p.DropOriginal { + newMetrics = append(newMetrics, metric) + } + + for _, key := range p.ParseFields { + for _, field := range metric.FieldList() { + if field.Key == key { + switch value := field.Value.(type) { + case string: + fromFieldMetric, err := p.parseField(value) + if err != nil { + log.Printf("E! [processors.parser] could not parse field %s: %v", key, err) + } + + for _, m := range fromFieldMetric { + if m.Name() == "" { + m.SetName(metric.Name()) + } + } + + // multiple parsed fields shouldn't create multiple + // metrics so we'll merge tags/fields down into one + // prior to returning. + newMetrics = append(newMetrics, fromFieldMetric...) + default: + log.Printf("E! [processors.parser] field '%s' not a string, skipping", key) + } + } + } + } + + if len(newMetrics) == 0 { + continue + } + + if p.Merge == "override" { + results = append(results, merge(newMetrics[0], newMetrics[1:])) + } else { + results = append(results, newMetrics...) + } + } + return results +} + +func merge(base telegraf.Metric, metrics []telegraf.Metric) telegraf.Metric { + for _, metric := range metrics { + for _, field := range metric.FieldList() { + base.AddField(field.Key, field.Value) + } + for _, tag := range metric.TagList() { + base.AddTag(tag.Key, tag.Value) + } + base.SetName(metric.Name()) + } + return base +} + +func (p *Parser) parseField(value string) ([]telegraf.Metric, error) { + return p.Parser.Parse([]byte(value)) +} + +func init() { + processors.Add("parser", func() telegraf.Processor { + return &Parser{DropOriginal: false} + }) +} diff --git a/plugins/processors/parser/parser_test.go b/plugins/processors/parser/parser_test.go new file mode 100644 index 000000000..ac042848f --- /dev/null +++ b/plugins/processors/parser/parser_test.go @@ -0,0 +1,670 @@ +package parser + +import ( + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" + "github.com/influxdata/telegraf/plugins/parsers" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +//compares metrics without comparing time +func compareMetrics(t *testing.T, expected, actual []telegraf.Metric) { + assert.Equal(t, len(expected), len(actual)) + for i, metric := range actual { + require.Equal(t, expected[i].Name(), metric.Name()) + require.Equal(t, expected[i].Fields(), metric.Fields()) + require.Equal(t, expected[i].Tags(), metric.Tags()) + } +} + +func Metric(v telegraf.Metric, err error) telegraf.Metric { + if err != nil { + panic(err) + } + return v +} + +func TestApply(t *testing.T) { + tests := []struct { + name string + parseFields []string + config parsers.Config + dropOriginal bool + merge string + input telegraf.Metric + expected []telegraf.Metric + }{ + { + name: "parse one field drop original", + parseFields: []string{"sample"}, + dropOriginal: true, + config: parsers.Config{ + DataFormat: "json", + TagKeys: []string{ + "ts", + "lvl", + "msg", + "method", + }, + }, + input: Metric( + metric.New( + "singleField", + map[string]string{ + "some": "tag", + }, + map[string]interface{}{ + "sample": `{"ts":"2018-07-24T19:43:40.275Z","lvl":"info","msg":"http request","method":"POST"}`, + }, + time.Unix(0, 0))), + expected: []telegraf.Metric{ + Metric(metric.New( + "singleField", + map[string]string{ + "ts": "2018-07-24T19:43:40.275Z", + "lvl": "info", + "msg": "http request", + "method": "POST", + }, + map[string]interface{}{}, + time.Unix(0, 0))), + }, + }, + { + name: "parse one field with merge", + parseFields: []string{"sample"}, + dropOriginal: false, + merge: "override", + config: parsers.Config{ + DataFormat: "json", + TagKeys: []string{ + "ts", + "lvl", + "msg", + "method", + }, + }, + input: Metric( + metric.New( + "singleField", + map[string]string{ + "some": "tag", + }, + map[string]interface{}{ + "sample": `{"ts":"2018-07-24T19:43:40.275Z","lvl":"info","msg":"http request","method":"POST"}`, + }, + time.Unix(0, 0))), + expected: []telegraf.Metric{ + Metric(metric.New( + "singleField", + map[string]string{ + "some": "tag", + "ts": "2018-07-24T19:43:40.275Z", + "lvl": "info", + "msg": "http request", + "method": "POST", + }, + map[string]interface{}{ + "sample": `{"ts":"2018-07-24T19:43:40.275Z","lvl":"info","msg":"http request","method":"POST"}`, + }, + time.Unix(0, 0))), + }, + }, + { + name: "parse one field keep", + parseFields: []string{"sample"}, + dropOriginal: false, + config: parsers.Config{ + DataFormat: "json", + TagKeys: []string{ + "ts", + "lvl", + "msg", + "method", + }, + }, + input: Metric( + metric.New( + "singleField", + map[string]string{ + "some": "tag", + }, + map[string]interface{}{ + "sample": `{"ts":"2018-07-24T19:43:40.275Z","lvl":"info","msg":"http request","method":"POST"}`, + }, + time.Unix(0, 0))), + expected: []telegraf.Metric{ + Metric(metric.New( + "singleField", + map[string]string{ + "some": "tag", + }, + map[string]interface{}{ + "sample": `{"ts":"2018-07-24T19:43:40.275Z","lvl":"info","msg":"http request","method":"POST"}`, + }, + time.Unix(0, 0))), + Metric(metric.New( + "singleField", + map[string]string{ + "ts": "2018-07-24T19:43:40.275Z", + "lvl": "info", + "msg": "http request", + "method": "POST", + }, + map[string]interface{}{}, + time.Unix(0, 0))), + }, + }, + { + name: "parse one field keep with measurement name", + parseFields: []string{"message"}, + config: parsers.Config{ + DataFormat: "influx", + }, + dropOriginal: false, + input: Metric( + metric.New( + "influxField", + map[string]string{}, + map[string]interface{}{ + "message": "deal,computer_name=hosta message=\"stuff\" 1530654676316265790", + }, + time.Unix(0, 0))), + expected: []telegraf.Metric{ + Metric(metric.New( + "influxField", + map[string]string{}, + map[string]interface{}{ + "message": "deal,computer_name=hosta message=\"stuff\" 1530654676316265790", + }, + time.Unix(0, 0))), + Metric(metric.New( + "deal", + map[string]string{ + "computer_name": "hosta", + }, + map[string]interface{}{ + "message": "stuff", + }, + time.Unix(0, 0))), + }, + }, + { + name: "parse one field override replaces name", + parseFields: []string{"message"}, + dropOriginal: false, + merge: "override", + config: parsers.Config{ + DataFormat: "influx", + }, + input: Metric( + metric.New( + "influxField", + map[string]string{ + "some": "tag", + }, + map[string]interface{}{ + "message": "deal,computer_name=hosta message=\"stuff\" 1530654676316265790", + }, + time.Unix(0, 0))), + expected: []telegraf.Metric{ + Metric(metric.New( + "deal", + map[string]string{ + "computer_name": "hosta", + "some": "tag", + }, + map[string]interface{}{ + "message": "stuff", + }, + time.Unix(0, 0))), + }, + }, + { + name: "parse grok field", + parseFields: []string{"grokSample"}, + dropOriginal: true, + config: parsers.Config{ + DataFormat: "grok", + GrokPatterns: []string{"%{COMBINED_LOG_FORMAT}"}, + }, + input: Metric( + metric.New( + "success", + map[string]string{}, + map[string]interface{}{ + "grokSample": "127.0.0.1 - - [11/Dec/2013:00:01:45 -0800] \"GET /xampp/status.php HTTP/1.1\" 200 3891 \"http://cadenza/xampp/navi.php\" \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.9; rv:25.0) Gecko/20100101 Firefox/25.0\"", + }, + time.Unix(0, 0))), + expected: []telegraf.Metric{ + Metric(metric.New( + "success", + map[string]string{ + "resp_code": "200", + "verb": "GET", + }, + map[string]interface{}{ + "resp_bytes": int64(3891), + "auth": "-", + "request": "/xampp/status.php", + "referrer": "http://cadenza/xampp/navi.php", + "agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.9; rv:25.0) Gecko/20100101 Firefox/25.0", + "client_ip": "127.0.0.1", + "ident": "-", + "http_version": float64(1.1), + }, + time.Unix(0, 0))), + }, + }, + { + name: "parse two fields [replace]", + parseFields: []string{"field_1", "field_2"}, + dropOriginal: true, + config: parsers.Config{ + DataFormat: "json", + TagKeys: []string{"lvl", "err"}, + }, + input: Metric( + metric.New( + "bigMeasure", + map[string]string{}, + map[string]interface{}{ + "field_1": `{"lvl":"info","msg":"http request"}`, + "field_2": `{"err":"fatal","fatal":"security threat"}`, + }, + time.Unix(0, 0))), + expected: []telegraf.Metric{ + Metric(metric.New( + "bigMeasure", + map[string]string{ + "lvl": "info", + }, + map[string]interface{}{}, + time.Unix(0, 0))), + Metric(metric.New( + "bigMeasure", + map[string]string{ + "err": "fatal", + }, + map[string]interface{}{}, + time.Unix(0, 0))), + }, + }, + { + name: "parse two fields [merge]", + parseFields: []string{"field_1", "field_2"}, + dropOriginal: false, + merge: "override", + config: parsers.Config{ + DataFormat: "json", + TagKeys: []string{"lvl", "msg", "err", "fatal"}, + }, + input: Metric( + metric.New( + "bigMeasure", + map[string]string{}, + map[string]interface{}{ + "field_1": `{"lvl":"info","msg":"http request"}`, + "field_2": `{"err":"fatal","fatal":"security threat"}`, + }, + time.Unix(0, 0))), + expected: []telegraf.Metric{ + Metric(metric.New( + "bigMeasure", + map[string]string{ + "lvl": "info", + "msg": "http request", + "err": "fatal", + "fatal": "security threat", + }, + map[string]interface{}{ + "field_1": `{"lvl":"info","msg":"http request"}`, + "field_2": `{"err":"fatal","fatal":"security threat"}`, + }, + time.Unix(0, 0))), + }, + }, + { + name: "parse two fields [keep]", + parseFields: []string{"field_1", "field_2"}, + dropOriginal: false, + config: parsers.Config{ + DataFormat: "json", + TagKeys: []string{"lvl", "msg", "err", "fatal"}, + }, + input: Metric( + metric.New( + "bigMeasure", + map[string]string{}, + map[string]interface{}{ + "field_1": `{"lvl":"info","msg":"http request"}`, + "field_2": `{"err":"fatal","fatal":"security threat"}`, + }, + time.Unix(0, 0))), + expected: []telegraf.Metric{ + Metric(metric.New( + "bigMeasure", + map[string]string{}, + map[string]interface{}{ + "field_1": `{"lvl":"info","msg":"http request"}`, + "field_2": `{"err":"fatal","fatal":"security threat"}`, + }, + time.Unix(0, 0))), + Metric(metric.New( + "bigMeasure", + map[string]string{ + "lvl": "info", + "msg": "http request", + }, + map[string]interface{}{}, + time.Unix(0, 0))), + Metric(metric.New( + "bigMeasure", + map[string]string{ + "err": "fatal", + "fatal": "security threat", + }, + map[string]interface{}{}, + time.Unix(0, 0))), + }, + }, + { + name: "Fail to parse one field but parses other [keep]", + parseFields: []string{"good", "bad"}, + dropOriginal: false, + config: parsers.Config{ + DataFormat: "json", + TagKeys: []string{"lvl"}, + }, + input: Metric( + metric.New( + "success", + map[string]string{}, + map[string]interface{}{ + "good": `{"lvl":"info"}`, + "bad": "why", + }, + time.Unix(0, 0))), + expected: []telegraf.Metric{ + Metric(metric.New( + "success", + map[string]string{}, + map[string]interface{}{ + "good": `{"lvl":"info"}`, + "bad": "why", + }, + time.Unix(0, 0))), + Metric(metric.New( + "success", + map[string]string{ + "lvl": "info", + }, + map[string]interface{}{}, + time.Unix(0, 0))), + }, + }, + { + name: "Fail to parse one field but parses other [keep] v2", + parseFields: []string{"bad", "good", "ok"}, + dropOriginal: false, + config: parsers.Config{ + DataFormat: "json", + TagKeys: []string{"lvl", "thing"}, + }, + input: Metric( + metric.New( + "success", + map[string]string{}, + map[string]interface{}{ + "bad": "why", + "good": `{"lvl":"info"}`, + "ok": `{"thing":"thang"}`, + }, + time.Unix(0, 0))), + expected: []telegraf.Metric{ + Metric(metric.New( + "success", + map[string]string{}, + map[string]interface{}{ + "bad": "why", + "good": `{"lvl":"info"}`, + "ok": `{"thing":"thang"}`, + }, + time.Unix(0, 0))), + Metric(metric.New( + "success", + map[string]string{ + "lvl": "info", + }, + map[string]interface{}{}, + time.Unix(0, 0))), + Metric(metric.New( + "success", + map[string]string{ + "thing": "thang", + }, + map[string]interface{}{}, + time.Unix(0, 0))), + }, + }, + { + name: "Fail to parse one field but parses other [merge]", + parseFields: []string{"good", "bad"}, + dropOriginal: false, + merge: "override", + config: parsers.Config{ + DataFormat: "json", + TagKeys: []string{"lvl"}, + }, + input: Metric( + metric.New( + "success", + map[string]string{ + "a": "tag", + }, + map[string]interface{}{ + "good": `{"lvl":"info"}`, + "bad": "why", + }, + time.Unix(0, 0))), + expected: []telegraf.Metric{ + Metric(metric.New( + "success", + map[string]string{ + "a": "tag", + "lvl": "info", + }, + map[string]interface{}{ + "good": `{"lvl":"info"}`, + "bad": "why", + }, + time.Unix(0, 0))), + }, + }, + { + name: "Fail to parse one field but parses other [replace]", + parseFields: []string{"good", "bad"}, + dropOriginal: true, + config: parsers.Config{ + DataFormat: "json", + TagKeys: []string{"lvl"}, + }, + input: Metric( + metric.New( + "success", + map[string]string{ + "thing": "tag", + }, + map[string]interface{}{ + "good": `{"lvl":"info"}`, + "bad": "why", + }, + time.Unix(0, 0))), + expected: []telegraf.Metric{ + Metric(metric.New( + "success", + map[string]string{ + "lvl": "info", + }, + map[string]interface{}{}, + time.Unix(0, 0))), + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + parser := Parser{ + Config: tt.config, + ParseFields: tt.parseFields, + DropOriginal: tt.dropOriginal, + Merge: tt.merge, + } + + output := parser.Apply(tt.input) + t.Logf("Testing: %s", tt.name) + compareMetrics(t, tt.expected, output) + }) + } +} + +func TestBadApply(t *testing.T) { + tests := []struct { + name string + parseFields []string + config parsers.Config + input telegraf.Metric + expected []telegraf.Metric + }{ + { + name: "field not found", + parseFields: []string{"bad_field"}, + config: parsers.Config{ + DataFormat: "json", + }, + input: Metric( + metric.New( + "bad", + map[string]string{}, + map[string]interface{}{ + "some_field": 5, + }, + time.Unix(0, 0))), + expected: []telegraf.Metric{ + Metric(metric.New( + "bad", + map[string]string{}, + map[string]interface{}{ + "some_field": 5, + }, + time.Unix(0, 0))), + }, + }, + { + name: "non string field", + parseFields: []string{"some_field"}, + config: parsers.Config{ + DataFormat: "json", + }, + input: Metric( + metric.New( + "bad", + map[string]string{}, + map[string]interface{}{ + "some_field": 5, + }, + time.Unix(0, 0))), + expected: []telegraf.Metric{ + Metric(metric.New( + "bad", + map[string]string{}, + map[string]interface{}{ + "some_field": 5, + }, + time.Unix(0, 0))), + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + parser := Parser{ + Config: tt.config, + ParseFields: tt.parseFields, + } + + output := parser.Apply(tt.input) + + compareMetrics(t, output, tt.expected) + }) + } +} + +// Benchmarks + +func getMetricFields(metric telegraf.Metric) interface{} { + key := "field3" + if value, ok := metric.Fields()[key]; ok { + return value + } + return nil +} + +func getMetricFieldList(metric telegraf.Metric) interface{} { + key := "field3" + fields := metric.FieldList() + for _, field := range fields { + if field.Key == key { + return field.Value + } + } + return nil +} + +func BenchmarkFieldListing(b *testing.B) { + metric := Metric(metric.New( + "test", + map[string]string{ + "some": "tag", + }, + map[string]interface{}{ + "field0": `{"ts":"2018-07-24T19:43:40.275Z","lvl":"info","msg":"http request","method":"POST"}`, + "field1": `{"ts":"2018-07-24T19:43:40.275Z","lvl":"info","msg":"http request","method":"POST"}`, + "field2": `{"ts":"2018-07-24T19:43:40.275Z","lvl":"info","msg":"http request","method":"POST"}`, + "field3": `{"ts":"2018-07-24T19:43:40.275Z","lvl":"info","msg":"http request","method":"POST"}`, + "field4": `{"ts":"2018-07-24T19:43:40.275Z","lvl":"info","msg":"http request","method":"POST"}`, + "field5": `{"ts":"2018-07-24T19:43:40.275Z","lvl":"info","msg":"http request","method":"POST"}`, + "field6": `{"ts":"2018-07-24T19:43:40.275Z","lvl":"info","msg":"http request","method":"POST"}`, + }, + time.Unix(0, 0))) + + for n := 0; n < b.N; n++ { + getMetricFieldList(metric) + } +} + +func BenchmarkFields(b *testing.B) { + metric := Metric(metric.New( + "test", + map[string]string{ + "some": "tag", + }, + map[string]interface{}{ + "field0": `{"ts":"2018-07-24T19:43:40.275Z","lvl":"info","msg":"http request","method":"POST"}`, + "field1": `{"ts":"2018-07-24T19:43:40.275Z","lvl":"info","msg":"http request","method":"POST"}`, + "field2": `{"ts":"2018-07-24T19:43:40.275Z","lvl":"info","msg":"http request","method":"POST"}`, + "field3": `{"ts":"2018-07-24T19:43:40.275Z","lvl":"info","msg":"http request","method":"POST"}`, + "field4": `{"ts":"2018-07-24T19:43:40.275Z","lvl":"info","msg":"http request","method":"POST"}`, + "field5": `{"ts":"2018-07-24T19:43:40.275Z","lvl":"info","msg":"http request","method":"POST"}`, + "field6": `{"ts":"2018-07-24T19:43:40.275Z","lvl":"info","msg":"http request","method":"POST"}`, + }, + time.Unix(0, 0))) + + for n := 0; n < b.N; n++ { + getMetricFields(metric) + } +} From 2ac06f5df0154e684f5b16f42c788e2f61623d5c Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 22 Aug 2018 13:57:33 -0700 Subject: [PATCH 0094/1815] Update changelog --- CHANGELOG.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 348eb145e..a88358a40 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -19,6 +19,7 @@ - [enum](./plugins/processors/enum/README.md) - Contributed by @KarstenSchnitter - [rename](./plugins/processors/rename/README.md) - Contributed by @goldibex +- [parser](./plugins/processors/parser/README.md) - Contributed by @maxunt & @Ayrdrie ### New Aggregators @@ -68,7 +69,8 @@ - [#4433](https://github.com/influxdata/telegraf/pull/4433): Add ability to set measurement from matched text in grok parser. - [#4565](https://github.com/influxdata/telegraf/pull/4465): Drop message batches in kafka output if too large. - [#4579](https://github.com/influxdata/telegraf/pull/4579): Add support for static and random routing keys in kafka output. - +- [#4539](https://github.com/influxdata/telegraf/pull/4539): Add logfmt parser. +- [#4551](https://github.com/influxdata/telegraf/pull/4551): Add parser processor. ## v1.7.4 [unreleased] From a8496f87b26a9afad6107592d5b676a7ea2e8019 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 22 Aug 2018 18:38:07 -0700 Subject: [PATCH 0095/1815] Fix example input and output in parser processor docs --- plugins/processors/parser/README.md | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/plugins/processors/parser/README.md b/plugins/processors/parser/README.md index 7564a75ea..134bbb59e 100644 --- a/plugins/processors/parser/README.md +++ b/plugins/processors/parser/README.md @@ -34,10 +34,12 @@ creates new metrics based on the contents of the field. **Input**: ``` -syslog,appname=influxd,facility=daemon,hostname=http://influxdb.example.org (influxdb.example.org),severity=info version=1i,severity_code=6i,facility_code=3i,timestamp=1533848508138040000i,procid="6629",message=" ts=2018-08-09T21:01:48.137963Z lvl=info msg=\"Executing query\" log_id=09p7QbOG000 service=query query=\"SHOW DATABASES\"" +syslog,appname=influxd,facility=daemon,hostname=http://influxdb.example.org\ (influxdb.example.org),severity=info facility_code=3i,message=" ts=2018-08-09T21:01:48.137963Z lvl=info msg=\"Executing query\" log_id=09p7QbOG000 service=query query=\"SHOW DATABASES\"",procid="6629",severity_code=6i,timestamp=1533848508138040000i,version=1i ``` **Output**: ``` -syslog,appname=influxd,facility=daemon,hostname=http://influxdb.example.org (influxdb.example.org),severity=info version=1i,severity_code=6i,facility_code=3i,timestamp=1533848508138040000i,procid="6629",ts="2018-08-09T21:01:48.137963Z",lvl=info msg="Executing query",log_id="09p7QbOG000",service="query",query="SHOW DATABASES" +syslog,appname=influxd,facility=daemon,hostname=http://influxdb.example.org\ (influxdb.example.org),severity=info facility_code=3i,log_id="09p7QbOG000",lvl="info",message=" ts=2018-08-09T21:01:48.137963Z lvl=info msg=\"Executing query\" log_id=09p7QbOG000 service=query query=\"SHOW DATABASES\"",msg="Executing query",procid="6629",query="SHOW DATABASES",service="query",severity_code=6i,timestamp=1533848508138040000i,ts="2018-08-09T21:01:48.137963Z",version=1i ``` + + From e72fab7cbecf24696645ccff1f0b7952d0f8d140 Mon Sep 17 00:00:00 2001 From: LABOUARDY Mohamed Date: Thu, 23 Aug 2018 03:10:40 +0100 Subject: [PATCH 0096/1815] Add Icinga2 input plugin (#4559) --- README.md | 1 + plugins/inputs/all/all.go | 1 + plugins/inputs/icinga2/README.md | 65 ++++++++++ plugins/inputs/icinga2/icinga2.go | 170 +++++++++++++++++++++++++ plugins/inputs/icinga2/icinga2_test.go | 91 +++++++++++++ 5 files changed, 328 insertions(+) create mode 100644 plugins/inputs/icinga2/README.md create mode 100644 plugins/inputs/icinga2/icinga2.go create mode 100644 plugins/inputs/icinga2/icinga2_test.go diff --git a/README.md b/README.md index e0d88e414..867aa10f7 100644 --- a/README.md +++ b/README.md @@ -168,6 +168,7 @@ configuration options. * [http_listener](./plugins/inputs/http_listener) * [http](./plugins/inputs/http) (generic HTTP plugin, supports using input data formats) * [http_response](./plugins/inputs/http_response) +* [icinga2](./plugins/inputs/icinga2) * [influxdb](./plugins/inputs/influxdb) * [internal](./plugins/inputs/internal) * [interrupts](./plugins/inputs/interrupts) diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index ac86fb879..17762d1c7 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -42,6 +42,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/http_listener" _ "github.com/influxdata/telegraf/plugins/inputs/http_response" _ "github.com/influxdata/telegraf/plugins/inputs/httpjson" + _ "github.com/influxdata/telegraf/plugins/inputs/icinga2" _ "github.com/influxdata/telegraf/plugins/inputs/influxdb" _ "github.com/influxdata/telegraf/plugins/inputs/internal" _ "github.com/influxdata/telegraf/plugins/inputs/interrupts" diff --git a/plugins/inputs/icinga2/README.md b/plugins/inputs/icinga2/README.md new file mode 100644 index 000000000..697c6c59c --- /dev/null +++ b/plugins/inputs/icinga2/README.md @@ -0,0 +1,65 @@ +# Icinga2 Input Plugin + +This plugin gather services & hosts status using Icinga2 Remote API. + +The icinga2 plugin uses the icinga2 remote API to gather status on running +services and hosts. You can read Icinga2's documentation for their remote API +[here](https://docs.icinga.com/icinga2/latest/doc/module/icinga2/chapter/icinga2-api) + +### Configuration: + +```toml +# Description +[[inputs.icinga2]] + ## Required Icinga2 server address (default: "https://localhost:5665") + # server = "https://localhost:5665" + + ## Required Icinga2 object type ("services" or "hosts, default "services") + # object_type = "services" + + ## Credentials for basic HTTP authentication + # username = "admin" + # password = "admin" + + ## Maximum time to receive response. + # response_timeout = "5s" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = true +``` + +### Measurements & Fields: + +- All measurements have the following fields: + - name (string) + - state_code (int) + +### Tags: + +- All measurements have the following tags: + - check_command + - display_name + - state + - source + - port + - scheme + +### Sample Queries: + +``` +SELECT * FROM "icinga2_services" WHERE state_code = 0 AND time > now() - 24h // Service with OK status +SELECT * FROM "icinga2_services" WHERE state_code = 1 AND time > now() - 24h // Service with WARNING status +SELECT * FROM "icinga2_services" WHERE state_code = 2 AND time > now() - 24h // Service with CRITICAL status +SELECT * FROM "icinga2_services" WHERE state_code = 3 AND time > now() - 24h // Service with UNKNOWN status +``` + +### Example Output: + +``` +$ ./telegraf -config telegraf.conf -input-filter icinga2 -test +icinga2_hosts,display_name=router-fr.eqx.fr,check_command=hostalive-custom,host=test-vm,source=localhost,port=5665,scheme=https,state=ok name="router-fr.eqx.fr",state=0 1492021603000000000 +``` diff --git a/plugins/inputs/icinga2/icinga2.go b/plugins/inputs/icinga2/icinga2.go new file mode 100644 index 000000000..37590ab8b --- /dev/null +++ b/plugins/inputs/icinga2/icinga2.go @@ -0,0 +1,170 @@ +package icinga2 + +import ( + "encoding/json" + "fmt" + "log" + "net/http" + "net/url" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/internal/tls" + "github.com/influxdata/telegraf/plugins/inputs" +) + +type Icinga2 struct { + Server string + ObjectType string + Username string + Password string + ResponseTimeout internal.Duration + tls.ClientConfig + + client *http.Client +} + +type Result struct { + Results []Object `json:"results"` +} + +type Object struct { + Attrs Attribute `json:"attrs"` + Name string `json:"name"` + Joins struct{} `json:"joins"` + Meta struct{} `json:"meta"` + Type ObjectType `json:"type"` +} + +type Attribute struct { + CheckCommand string `json:"check_command"` + DisplayName string `json:"display_name"` + Name string `json:"name"` + State int `json:"state"` +} + +var levels = []string{"ok", "warning", "critical", "unknown"} + +type ObjectType string + +var sampleConfig = ` + ## Required Icinga2 server address (default: "https://localhost:5665") + # server = "https://localhost:5665" + + ## Required Icinga2 object type ("services" or "hosts, default "services") + # object_type = "services" + + ## Credentials for basic HTTP authentication + # username = "admin" + # password = "admin" + + ## Maximum time to receive response. + # response_timeout = "5s" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = true + ` + +func (i *Icinga2) Description() string { + return "Gather Icinga2 status" +} + +func (i *Icinga2) SampleConfig() string { + return sampleConfig +} + +func (i *Icinga2) GatherStatus(acc telegraf.Accumulator, checks []Object) { + for _, check := range checks { + fields := make(map[string]interface{}) + tags := make(map[string]string) + + url, err := url.Parse(i.Server) + if err != nil { + log.Fatal(err) + } + + fields["name"] = check.Attrs.Name + fields["state_code"] = check.Attrs.State + + tags["display_name"] = check.Attrs.DisplayName + tags["check_command"] = check.Attrs.CheckCommand + tags["state"] = levels[check.Attrs.State] + tags["source"] = url.Hostname() + tags["scheme"] = url.Scheme + tags["port"] = url.Port() + + acc.AddFields(fmt.Sprintf("icinga2_%s", i.ObjectType), fields, tags) + } +} + +func (i *Icinga2) createHttpClient() (*http.Client, error) { + tlsCfg, err := i.ClientConfig.TLSConfig() + if err != nil { + return nil, err + } + + client := &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: tlsCfg, + }, + Timeout: i.ResponseTimeout.Duration, + } + + return client, nil +} + +func (i *Icinga2) Gather(acc telegraf.Accumulator) error { + if i.ResponseTimeout.Duration < time.Second { + i.ResponseTimeout.Duration = time.Second * 5 + } + + if i.client == nil { + client, err := i.createHttpClient() + if err != nil { + return err + } + i.client = client + } + + url := fmt.Sprintf("%s/v1/objects/%s?attrs=name&attrs=display_name&attrs=state&attrs=check_command", i.Server, i.ObjectType) + + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return err + } + + if i.Username != "" { + req.SetBasicAuth(i.Username, i.Password) + } + + resp, err := i.client.Do(req) + if err != nil { + return err + } + + defer resp.Body.Close() + + result := Result{} + json.NewDecoder(resp.Body).Decode(&result) + if err != nil { + return err + } + + i.GatherStatus(acc, result.Results) + + return nil +} + +func init() { + inputs.Add("icinga2", func() telegraf.Input { + return &Icinga2{ + Server: "https://localhost:5665", + ObjectType: "services", + } + }) +} diff --git a/plugins/inputs/icinga2/icinga2_test.go b/plugins/inputs/icinga2/icinga2_test.go new file mode 100644 index 000000000..ad9268347 --- /dev/null +++ b/plugins/inputs/icinga2/icinga2_test.go @@ -0,0 +1,91 @@ +package icinga2 + +import ( + "encoding/json" + "testing" + + "github.com/influxdata/telegraf/testutil" +) + +func TestGatherServicesStatus(t *testing.T) { + + s := `{"results":[ + { + "attrs": { + "check_command": "check-bgp-juniper-netconf", + "display_name": "eq-par.dc2.fr", + "name": "ef017af8-c684-4f3f-bb20-0dfe9fcd3dbe", + "state": 0 + }, + "joins": {}, + "meta": {}, + "name": "eq-par.dc2.fr!ef017af8-c684-4f3f-bb20-0dfe9fcd3dbe", + "type": "Service" + } + ]}` + + checks := Result{} + json.Unmarshal([]byte(s), &checks) + fields := map[string]interface{}{ + "name": "ef017af8-c684-4f3f-bb20-0dfe9fcd3dbe", + "state_code": 0, + } + tags := map[string]string{ + "display_name": "eq-par.dc2.fr", + "check_command": "check-bgp-juniper-netconf", + "state": "ok", + "source": "localhost", + "port": "5665", + "scheme": "https", + } + + var acc testutil.Accumulator + + icinga2 := new(Icinga2) + icinga2.ObjectType = "services" + icinga2.Server = "https://localhost:5665" + icinga2.GatherStatus(&acc, checks.Results) + acc.AssertContainsTaggedFields(t, "icinga2_services", fields, tags) +} + +func TestGatherHostsStatus(t *testing.T) { + + s := `{"results":[ + { + "attrs": { + "name": "webserver", + "address": "192.168.1.1", + "check_command": "ping", + "display_name": "apache", + "state": 2 + }, + "joins": {}, + "meta": {}, + "name": "webserver", + "type": "Host" + } + ]}` + + checks := Result{} + json.Unmarshal([]byte(s), &checks) + fields := map[string]interface{}{ + "name": "webserver", + "state_code": 2, + } + tags := map[string]string{ + "display_name": "apache", + "check_command": "ping", + "state": "critical", + "source": "localhost", + "port": "5665", + "scheme": "https", + } + + var acc testutil.Accumulator + + icinga2 := new(Icinga2) + icinga2.ObjectType = "hosts" + icinga2.Server = "https://localhost:5665" + icinga2.GatherStatus(&acc, checks.Results) + acc.AssertContainsTaggedFields(t, "icinga2_hosts", fields, tags) +} From d6d6539e26696ac41c8c5de1fe87b18fa0f408fa Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 22 Aug 2018 19:13:46 -0700 Subject: [PATCH 0097/1815] Update changelog --- CHANGELOG.md | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a88358a40..bc2998d4b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,6 +14,7 @@ - [filecount](./plugins/inputs/filecount/README.md) - Contributed by @sometimesfood - [pgbouncer](./plugins/inputs/pgbouncer/README.md) - Contributed by @nerzhul - [activemq](./plugins/inputs/activemq/README.md) - Contributed by @mlabouardy +- [icinga2](./plugins/inputs/icinga2/README.md) - Contributed by @mlabouardy ### New Processors @@ -43,7 +44,7 @@ - [#4347](https://github.com/influxdata/telegraf/pull/4347): Add http path configuration for OpenTSDB output. - [#4352](https://github.com/influxdata/telegraf/pull/4352): Gather IPMI metrics concurrently. - [#4362](https://github.com/influxdata/telegraf/pull/4362): Add mongo document and connection metrics. -- [#3772](https://github.com/influxdata/telegraf/pull/3772): Add Enum Processor. +- [#3772](https://github.com/influxdata/telegraf/pull/3772): Add enum processor plugin. - [#4386](https://github.com/influxdata/telegraf/pull/4386): Add user tag to procstat input. - [#4403](https://github.com/influxdata/telegraf/pull/4403): Add support for multivalue metrics to collectd parser. - [#4418](https://github.com/influxdata/telegraf/pull/4418): Add support for setting kafka client id. @@ -60,8 +61,8 @@ - [#4431](https://github.com/influxdata/telegraf/pull/4431): Add ip restriction for the prometheus_client output. - [#3918](https://github.com/influxdata/telegraf/pull/3918): Add pgbouncer input plugin. - [#2689](https://github.com/influxdata/telegraf/pull/2689): Add ActiveMQ input plugin. -- [#4402](https://github.com/influxdata/telegraf/pull/4402): Add wavefront parser. -- [#4528](https://github.com/influxdata/telegraf/pull/4528): Add rename processor. +- [#4402](https://github.com/influxdata/telegraf/pull/4402): Add wavefront parser plugin. +- [#4528](https://github.com/influxdata/telegraf/pull/4528): Add rename processor plugin. - [#4537](https://github.com/influxdata/telegraf/pull/4537): Add message 'max_bytes' configuration to kafka input. - [#4546](https://github.com/influxdata/telegraf/pull/4546): Add gopsutil meminfo fields to mem plugin. - [#4285](https://github.com/influxdata/telegraf/pull/4285): Document how to parse telegraf logs. @@ -69,8 +70,10 @@ - [#4433](https://github.com/influxdata/telegraf/pull/4433): Add ability to set measurement from matched text in grok parser. - [#4565](https://github.com/influxdata/telegraf/pull/4465): Drop message batches in kafka output if too large. - [#4579](https://github.com/influxdata/telegraf/pull/4579): Add support for static and random routing keys in kafka output. -- [#4539](https://github.com/influxdata/telegraf/pull/4539): Add logfmt parser. -- [#4551](https://github.com/influxdata/telegraf/pull/4551): Add parser processor. +- [#4539](https://github.com/influxdata/telegraf/pull/4539): Add logfmt parser plugin. +- [#4551](https://github.com/influxdata/telegraf/pull/4551): Add parser processor plugin. +- [#4559](https://github.com/influxdata/telegraf/pull/4559): Add Icinga2 input plugin. + ## v1.7.4 [unreleased] From 2729378b7f461a55e5e2d473e2a57ddf3977b5fe Mon Sep 17 00:00:00 2001 From: maxunt Date: Wed, 22 Aug 2018 19:26:48 -0700 Subject: [PATCH 0098/1815] Add name, time, path and string field options to JSON parser (#4351) --- docs/DATA_FORMATS_INPUT.md | 137 +++++- internal/config/config.go | 49 ++ internal/config/config_test.go | 5 +- plugins/inputs/exec/exec_test.go | 15 +- plugins/inputs/http/http_test.go | 24 +- plugins/inputs/httpjson/httpjson.go | 7 +- .../kafka_consumer/kafka_consumer_test.go | 5 +- .../kafka_consumer_legacy_test.go | 5 +- .../mqtt_consumer/mqtt_consumer_test.go | 5 +- .../nats_consumer/nats_consumer_test.go | 5 +- .../inputs/tcp_listener/tcp_listener_test.go | 5 +- .../inputs/udp_listener/udp_listener_test.go | 5 +- plugins/parsers/json/parser.go | 75 ++- plugins/parsers/json/parser_test.go | 449 +++++++++++++----- plugins/parsers/registry.go | 47 +- 15 files changed, 672 insertions(+), 166 deletions(-) diff --git a/docs/DATA_FORMATS_INPUT.md b/docs/DATA_FORMATS_INPUT.md index 7f7c94930..6e1b6a751 100644 --- a/docs/DATA_FORMATS_INPUT.md +++ b/docs/DATA_FORMATS_INPUT.md @@ -107,9 +107,31 @@ but can be overridden using the `name_override` config option. #### JSON Configuration: -The JSON data format supports specifying "tag keys". If specified, keys -will be searched for in the root-level of the JSON blob. If the key(s) exist, -they will be applied as tags to the Telegraf metrics. +The JSON data format supports specifying "tag_keys", "string_keys", and "json_query". +If specified, keys in "tag_keys" and "string_keys" will be searched for in the root-level +and any nested lists of the JSON blob. All int and float values are added to fields by default. +If the key(s) exist, they will be applied as tags or fields to the Telegraf metrics. +If "string_keys" is specified, the string will be added as a field. + +The "json_query" configuration is a gjson path to an JSON object or +list of JSON objects. If this path leads to an array of values or +single data point an error will be thrown. If this configuration +is specified, only the result of the query will be parsed and returned as metrics. + +The "json_name_key" configuration specifies the key of the field whos value will be +added as the metric name. + +Object paths are specified using gjson path format, which is denoted by object keys +concatenated with "." to go deeper in nested JSON objects. +Additional information on gjson paths can be found here: https://github.com/tidwall/gjson#path-syntax + +The JSON data format also supports extracting time values through the +config "json_time_key" and "json_time_format". If "json_time_key" is set, +"json_time_format" must be specified. The "json_time_key" describes the +name of the field containing time information. The "json_time_format" +must be a recognized Go time format. +If there is no year provided, the metrics will have the current year. +More info on time formats can be found here: https://golang.org/pkg/time/#Parse For example, if you had this configuration: @@ -127,11 +149,28 @@ For example, if you had this configuration: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "json" - ## List of tag names to extract from top-level of JSON server response + ## List of tag names to extract from JSON server response tag_keys = [ "my_tag_1", "my_tag_2" ] + + ## The json path specifying where to extract the metric name from + # json_name_key = "" + + ## List of field names to extract from JSON and add as string fields + # json_string_fields = [] + + ## gjson query path to specify a specific chunk of JSON to be parsed with + ## the above configuration. If not specified, the whole file will be parsed. + ## gjson query paths are described here: https://github.com/tidwall/gjson#path-syntax + # json_query = "" + + ## holds the name of the tag of timestamp + # json_time_key = "" + + ## holds the format of timestamp to be parsed + # json_time_format = "" ``` with this JSON output from a command: @@ -152,8 +191,9 @@ Your Telegraf metrics would get tagged with "my_tag_1" exec_mycollector,my_tag_1=foo a=5,b_c=6 ``` -If the JSON data is an array, then each element of the array is parsed with the configured settings. -Each resulting metric will be output with the same timestamp. +If the JSON data is an array, then each element of the array is +parsed with the configured settings. Each resulting metric will +be output with the same timestamp. For example, if the following configuration: @@ -176,6 +216,19 @@ For example, if the following configuration: "my_tag_1", "my_tag_2" ] + + ## List of field names to extract from JSON and add as string fields + # string_fields = [] + + ## gjson query path to specify a specific chunk of JSON to be parsed with + ## the above configuration. If not specified, the whole file will be parsed + # json_query = "" + + ## holds the name of the tag of timestamp + json_time_key = "b_time" + + ## holds the format of timestamp to be parsed + json_time_format = "02 Jan 06 15:04 MST" ``` with this JSON output from a command: @@ -185,7 +238,8 @@ with this JSON output from a command: { "a": 5, "b": { - "c": 6 + "c": 6, + "time":"04 Jan 06 15:04 MST" }, "my_tag_1": "foo", "my_tag_2": "baz" @@ -193,7 +247,8 @@ with this JSON output from a command: { "a": 7, "b": { - "c": 8 + "c": 8, + "time":"11 Jan 07 15:04 MST" }, "my_tag_1": "bar", "my_tag_2": "baz" @@ -201,11 +256,71 @@ with this JSON output from a command: ] ``` -Your Telegraf metrics would get tagged with "my_tag_1" and "my_tag_2" +Your Telegraf metrics would get tagged with "my_tag_1" and "my_tag_2" and fielded with "b_c" +The metric's time will be a time.Time object, as specified by "b_time" ``` -exec_mycollector,my_tag_1=foo,my_tag_2=baz a=5,b_c=6 -exec_mycollector,my_tag_1=bar,my_tag_2=baz a=7,b_c=8 +exec_mycollector,my_tag_1=foo,my_tag_2=baz b_c=6 1136387040000000000 +exec_mycollector,my_tag_1=bar,my_tag_2=baz b_c=8 1168527840000000000 +``` + +If you want to only use a specific portion of your JSON, use the "json_query" +configuration to specify a path to a JSON object. + +For example, with the following config: +```toml +[[inputs.exec]] + ## Commands array + commands = ["/usr/bin/mycollector --foo=bar"] + + ## measurement name suffix (for separating different commands) + name_suffix = "_mycollector" + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "json" + + ## List of tag names to extract from top-level of JSON server response + tag_keys = ["first"] + + ## List of field names to extract from JSON and add as string fields + string_fields = ["last"] + + ## gjson query path to specify a specific chunk of JSON to be parsed with + ## the above configuration. If not specified, the whole file will be parsed + json_query = "obj.friends" + + ## holds the name of the tag of timestamp + # json_time_key = "" + + ## holds the format of timestamp to be parsed + # json_time_format = "" +``` + +with this JSON as input: +```json +{ + "obj": { + "name": {"first": "Tom", "last": "Anderson"}, + "age":37, + "children": ["Sara","Alex","Jack"], + "fav.movie": "Deer Hunter", + "friends": [ + {"first": "Dale", "last": "Murphy", "age": 44}, + {"first": "Roger", "last": "Craig", "age": 68}, + {"first": "Jane", "last": "Murphy", "age": 47} + ] + } +} +``` +You would recieve 3 metrics tagged with "first", and fielded with "last" and "age" + +``` +exec_mycollector, "first":"Dale" "last":"Murphy","age":44 +exec_mycollector, "first":"Roger" "last":"Craig","age":68 +exec_mycollector, "first":"Jane" "last":"Murphy","age":47 ``` # Value: diff --git a/internal/config/config.go b/internal/config/config.go index 21c71d946..5926f6132 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -1261,6 +1261,50 @@ func buildParser(name string, tbl *ast.Table) (parsers.Parser, error) { } } + if node, ok := tbl.Fields["json_string_fields"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if ary, ok := kv.Value.(*ast.Array); ok { + for _, elem := range ary.Value { + if str, ok := elem.(*ast.String); ok { + c.JSONStringFields = append(c.JSONStringFields, str.Value) + } + } + } + } + } + + if node, ok := tbl.Fields["json_name_key"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if str, ok := kv.Value.(*ast.String); ok { + c.JSONNameKey = str.Value + } + } + } + + if node, ok := tbl.Fields["json_query"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if str, ok := kv.Value.(*ast.String); ok { + c.JSONQuery = str.Value + } + } + } + + if node, ok := tbl.Fields["json_time_key"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if str, ok := kv.Value.(*ast.String); ok { + c.JSONTimeKey = str.Value + } + } + } + + if node, ok := tbl.Fields["json_time_format"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if str, ok := kv.Value.(*ast.String); ok { + c.JSONTimeFormat = str.Value + } + } + } + if node, ok := tbl.Fields["data_type"]; ok { if kv, ok := node.(*ast.KeyValue); ok { if str, ok := kv.Value.(*ast.String); ok { @@ -1405,6 +1449,11 @@ func buildParser(name string, tbl *ast.Table) (parsers.Parser, error) { delete(tbl.Fields, "separator") delete(tbl.Fields, "templates") delete(tbl.Fields, "tag_keys") + delete(tbl.Fields, "string_fields") + delete(tbl.Fields, "json_query") + delete(tbl.Fields, "json_name_key") + delete(tbl.Fields, "json_time_key") + delete(tbl.Fields, "json_time_format") delete(tbl.Fields, "data_type") delete(tbl.Fields, "collectd_auth_file") delete(tbl.Fields, "collectd_security_level") diff --git a/internal/config/config_test.go b/internal/config/config_test.go index 3498d815d..b136fec8c 100644 --- a/internal/config/config_test.go +++ b/internal/config/config_test.go @@ -143,7 +143,10 @@ func TestConfig_LoadDirectory(t *testing.T) { "Testdata did not produce correct memcached metadata.") ex := inputs.Inputs["exec"]().(*exec.Exec) - p, err := parsers.NewJSONParser("exec", nil, nil) + p, err := parsers.NewParser(&parsers.Config{ + MetricName: "exec", + DataFormat: "json", + }) assert.NoError(t, err) ex.SetParser(p) ex.Command = "/usr/bin/myothercollector --foo=bar" diff --git a/plugins/inputs/exec/exec_test.go b/plugins/inputs/exec/exec_test.go index c7c181b17..0bfeece54 100644 --- a/plugins/inputs/exec/exec_test.go +++ b/plugins/inputs/exec/exec_test.go @@ -93,7 +93,10 @@ func (r runnerMock) Run(e *Exec, command string, acc telegraf.Accumulator) ([]by } func TestExec(t *testing.T) { - parser, _ := parsers.NewJSONParser("exec", []string{}, nil) + parser, _ := parsers.NewParser(&parsers.Config{ + DataFormat: "json", + MetricName: "exec", + }) e := &Exec{ runner: newRunnerMock([]byte(validJson), nil), Commands: []string{"testcommand arg1"}, @@ -119,7 +122,10 @@ func TestExec(t *testing.T) { } func TestExecMalformed(t *testing.T) { - parser, _ := parsers.NewJSONParser("exec", []string{}, nil) + parser, _ := parsers.NewParser(&parsers.Config{ + DataFormat: "json", + MetricName: "exec", + }) e := &Exec{ runner: newRunnerMock([]byte(malformedJson), nil), Commands: []string{"badcommand arg1"}, @@ -132,7 +138,10 @@ func TestExecMalformed(t *testing.T) { } func TestCommandError(t *testing.T) { - parser, _ := parsers.NewJSONParser("exec", []string{}, nil) + parser, _ := parsers.NewParser(&parsers.Config{ + DataFormat: "json", + MetricName: "exec", + }) e := &Exec{ runner: newRunnerMock(nil, fmt.Errorf("exit status code 1")), Commands: []string{"badcommand"}, diff --git a/plugins/inputs/http/http_test.go b/plugins/inputs/http/http_test.go index 486edabc9..4cd465bce 100644 --- a/plugins/inputs/http/http_test.go +++ b/plugins/inputs/http/http_test.go @@ -26,7 +26,11 @@ func TestHTTPwithJSONFormat(t *testing.T) { URLs: []string{url}, } metricName := "metricName" - p, _ := parsers.NewJSONParser(metricName, nil, nil) + + p, _ := parsers.NewParser(&parsers.Config{ + DataFormat: "json", + MetricName: "metricName", + }) plugin.SetParser(p) var acc testutil.Accumulator @@ -63,8 +67,11 @@ func TestHTTPHeaders(t *testing.T) { URLs: []string{url}, Headers: map[string]string{header: headerValue}, } - metricName := "metricName" - p, _ := parsers.NewJSONParser(metricName, nil, nil) + + p, _ := parsers.NewParser(&parsers.Config{ + DataFormat: "json", + MetricName: "metricName", + }) plugin.SetParser(p) var acc testutil.Accumulator @@ -83,7 +90,10 @@ func TestInvalidStatusCode(t *testing.T) { } metricName := "metricName" - p, _ := parsers.NewJSONParser(metricName, nil, nil) + p, _ := parsers.NewParser(&parsers.Config{ + DataFormat: "json", + MetricName: metricName, + }) plugin.SetParser(p) var acc testutil.Accumulator @@ -105,8 +115,10 @@ func TestMethod(t *testing.T) { Method: "POST", } - metricName := "metricName" - p, _ := parsers.NewJSONParser(metricName, nil, nil) + p, _ := parsers.NewParser(&parsers.Config{ + DataFormat: "json", + MetricName: "metricName", + }) plugin.SetParser(p) var acc testutil.Accumulator diff --git a/plugins/inputs/httpjson/httpjson.go b/plugins/inputs/httpjson/httpjson.go index c7324dee4..e09eafc94 100644 --- a/plugins/inputs/httpjson/httpjson.go +++ b/plugins/inputs/httpjson/httpjson.go @@ -181,7 +181,12 @@ func (h *HttpJson) gatherServer( "server": serverURL, } - parser, err := parsers.NewJSONParser(msrmnt_name, h.TagKeys, tags) + parser, err := parsers.NewParser(&parsers.Config{ + DataFormat: "json", + MetricName: msrmnt_name, + TagKeys: h.TagKeys, + DefaultTags: tags, + }) if err != nil { return err } diff --git a/plugins/inputs/kafka_consumer/kafka_consumer_test.go b/plugins/inputs/kafka_consumer/kafka_consumer_test.go index 9a585d6ed..18f7f80be 100644 --- a/plugins/inputs/kafka_consumer/kafka_consumer_test.go +++ b/plugins/inputs/kafka_consumer/kafka_consumer_test.go @@ -125,7 +125,10 @@ func TestRunParserAndGatherJSON(t *testing.T) { k.acc = &acc defer close(k.done) - k.parser, _ = parsers.NewJSONParser("kafka_json_test", []string{}, nil) + k.parser, _ = parsers.NewParser(&parsers.Config{ + DataFormat: "json", + MetricName: "kafka_json_test", + }) go k.receiver() in <- saramaMsg(testMsgJSON) acc.Wait(1) diff --git a/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy_test.go b/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy_test.go index 630aca163..38bc48290 100644 --- a/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy_test.go +++ b/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy_test.go @@ -125,7 +125,10 @@ func TestRunParserAndGatherJSON(t *testing.T) { k.acc = &acc defer close(k.done) - k.parser, _ = parsers.NewJSONParser("kafka_json_test", []string{}, nil) + k.parser, _ = parsers.NewParser(&parsers.Config{ + DataFormat: "json", + MetricName: "kafka_json_test", + }) go k.receiver() in <- saramaMsg(testMsgJSON) acc.Wait(1) diff --git a/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go b/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go index eb5e3048c..a2e5deaa8 100644 --- a/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go +++ b/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go @@ -172,7 +172,10 @@ func TestRunParserAndGatherJSON(t *testing.T) { n.acc = &acc defer close(n.done) - n.parser, _ = parsers.NewJSONParser("nats_json_test", []string{}, nil) + n.parser, _ = parsers.NewParser(&parsers.Config{ + DataFormat: "json", + MetricName: "nats_json_test", + }) go n.receiver() in <- mqttMsg(testMsgJSON) diff --git a/plugins/inputs/nats_consumer/nats_consumer_test.go b/plugins/inputs/nats_consumer/nats_consumer_test.go index a0b84ff2e..a1f499554 100644 --- a/plugins/inputs/nats_consumer/nats_consumer_test.go +++ b/plugins/inputs/nats_consumer/nats_consumer_test.go @@ -108,7 +108,10 @@ func TestRunParserAndGatherJSON(t *testing.T) { n.acc = &acc defer close(n.done) - n.parser, _ = parsers.NewJSONParser("nats_json_test", []string{}, nil) + n.parser, _ = parsers.NewParser(&parsers.Config{ + DataFormat: "json", + MetricName: "nats_json_test", + }) n.wg.Add(1) go n.receiver() in <- natsMsg(testMsgJSON) diff --git a/plugins/inputs/tcp_listener/tcp_listener_test.go b/plugins/inputs/tcp_listener/tcp_listener_test.go index 1063cb5c1..6ff40ad87 100644 --- a/plugins/inputs/tcp_listener/tcp_listener_test.go +++ b/plugins/inputs/tcp_listener/tcp_listener_test.go @@ -300,7 +300,10 @@ func TestRunParserJSONMsg(t *testing.T) { listener.acc = &acc defer close(listener.done) - listener.parser, _ = parsers.NewJSONParser("udp_json_test", []string{}, nil) + listener.parser, _ = parsers.NewParser(&parsers.Config{ + DataFormat: "json", + MetricName: "udp_json_test", + }) listener.wg.Add(1) go listener.tcpParser() diff --git a/plugins/inputs/udp_listener/udp_listener_test.go b/plugins/inputs/udp_listener/udp_listener_test.go index e0e0e862e..49115434a 100644 --- a/plugins/inputs/udp_listener/udp_listener_test.go +++ b/plugins/inputs/udp_listener/udp_listener_test.go @@ -193,7 +193,10 @@ func TestRunParserJSONMsg(t *testing.T) { listener.acc = &acc defer close(listener.done) - listener.parser, _ = parsers.NewJSONParser("udp_json_test", []string{}, nil) + listener.parser, _ = parsers.NewParser(&parsers.Config{ + DataFormat: "json", + MetricName: "udp_json_test", + }) listener.wg.Add(1) go listener.udpParser() diff --git a/plugins/parsers/json/parser.go b/plugins/parsers/json/parser.go index 62d17c76a..9fb0816fe 100644 --- a/plugins/parsers/json/parser.go +++ b/plugins/parsers/json/parser.go @@ -11,6 +11,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" + "github.com/tidwall/gjson" ) var ( @@ -18,9 +19,14 @@ var ( ) type JSONParser struct { - MetricName string - TagKeys []string - DefaultTags map[string]string + MetricName string + TagKeys []string + StringFields []string + JSONNameKey string + JSONQuery string + JSONTimeKey string + JSONTimeFormat string + DefaultTags map[string]string } func (p *JSONParser) parseArray(buf []byte) ([]telegraf.Metric, error) { @@ -34,6 +40,9 @@ func (p *JSONParser) parseArray(buf []byte) ([]telegraf.Metric, error) { } for _, item := range jsonOut { metrics, err = p.parseObject(metrics, item) + if err != nil { + return nil, err + } } return metrics, nil } @@ -51,10 +60,42 @@ func (p *JSONParser) parseObject(metrics []telegraf.Metric, jsonOut map[string]i return nil, err } + //checks if json_name_key is set + if p.JSONNameKey != "" { + p.MetricName = f.Fields[p.JSONNameKey].(string) + } + + //if time key is specified, set it to nTime + nTime := time.Now().UTC() + if p.JSONTimeKey != "" { + if p.JSONTimeFormat == "" { + err := fmt.Errorf("use of 'json_time_key' requires 'json_time_format'") + return nil, err + } + + if f.Fields[p.JSONTimeKey] == nil { + err := fmt.Errorf("JSON time key could not be found") + return nil, err + } + + timeStr, ok := f.Fields[p.JSONTimeKey].(string) + if !ok { + err := fmt.Errorf("time: %v could not be converted to string", f.Fields[p.JSONTimeKey]) + return nil, err + } + nTime, err = time.Parse(p.JSONTimeFormat, timeStr) + if err != nil { + return nil, err + } + + //if the year is 0, set to current year + if nTime.Year() == 0 { + nTime = nTime.AddDate(time.Now().Year(), 0, 0) + } + } + tags, nFields := p.switchFieldToTag(tags, f.Fields) - - metric, err := metric.New(p.MetricName, tags, nFields, time.Now().UTC()) - + metric, err := metric.New(p.MetricName, tags, nFields, nTime) if err != nil { return nil, err } @@ -88,6 +129,17 @@ func (p *JSONParser) switchFieldToTag(tags map[string]string, fields map[string] //remove any additional string/bool values from fields for k := range fields { + //check if field is in StringFields + sField := false + for _, v := range p.StringFields { + if v == k { + sField = true + } + } + if sField { + continue + } + switch fields[k].(type) { case string: delete(fields, k) @@ -99,6 +151,15 @@ func (p *JSONParser) switchFieldToTag(tags map[string]string, fields map[string] } func (p *JSONParser) Parse(buf []byte) ([]telegraf.Metric, error) { + if p.JSONQuery != "" { + result := gjson.GetBytes(buf, p.JSONQuery) + buf = []byte(result.Raw) + if !result.IsArray() && !result.IsObject() { + err := fmt.Errorf("E! Query path must lead to a JSON object or array of objects, but lead to: %v", result.Type) + return nil, err + } + } + buf = bytes.TrimSpace(buf) buf = bytes.TrimPrefix(buf, utf8BOM) if len(buf) == 0 { @@ -126,7 +187,7 @@ func (p *JSONParser) ParseLine(line string) (telegraf.Metric, error) { } if len(metrics) < 1 { - return nil, fmt.Errorf("Can not parse the line: %s, for data format: influx ", line) + return nil, fmt.Errorf("can not parse the line: %s, for data format: json ", line) } return metrics[0], nil diff --git a/plugins/parsers/json/parser_test.go b/plugins/parsers/json/parser_test.go index c26b209a2..39e43bece 100644 --- a/plugins/parsers/json/parser_test.go +++ b/plugins/parsers/json/parser_test.go @@ -1,9 +1,10 @@ package json import ( + "fmt" + "log" "testing" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -55,46 +56,46 @@ func TestParseValidJSON(t *testing.T) { // Most basic vanilla test metrics, err := parser.Parse([]byte(validJSON)) - assert.NoError(t, err) - assert.Len(t, metrics, 1) - assert.Equal(t, "json_test", metrics[0].Name()) - assert.Equal(t, map[string]interface{}{ + require.NoError(t, err) + require.Len(t, metrics, 1) + require.Equal(t, "json_test", metrics[0].Name()) + require.Equal(t, map[string]interface{}{ "a": float64(5), "b_c": float64(6), }, metrics[0].Fields()) - assert.Equal(t, map[string]string{}, metrics[0].Tags()) + require.Equal(t, map[string]string{}, metrics[0].Tags()) // Test that newlines are fine metrics, err = parser.Parse([]byte(validJSONNewline)) - assert.NoError(t, err) - assert.Len(t, metrics, 1) - assert.Equal(t, "json_test", metrics[0].Name()) - assert.Equal(t, map[string]interface{}{ + require.NoError(t, err) + require.Len(t, metrics, 1) + require.Equal(t, "json_test", metrics[0].Name()) + require.Equal(t, map[string]interface{}{ "d": float64(7), "b_d": float64(8), }, metrics[0].Fields()) - assert.Equal(t, map[string]string{}, metrics[0].Tags()) + require.Equal(t, map[string]string{}, metrics[0].Tags()) // Test that strings without TagKeys defined are ignored metrics, err = parser.Parse([]byte(validJSONTags)) - assert.NoError(t, err) - assert.Len(t, metrics, 1) - assert.Equal(t, "json_test", metrics[0].Name()) - assert.Equal(t, map[string]interface{}{ + require.NoError(t, err) + require.Len(t, metrics, 1) + require.Equal(t, "json_test", metrics[0].Name()) + require.Equal(t, map[string]interface{}{ "a": float64(5), "b_c": float64(6), }, metrics[0].Fields()) - assert.Equal(t, map[string]string{}, metrics[0].Tags()) + require.Equal(t, map[string]string{}, metrics[0].Tags()) // Test that whitespace only will parse as an empty list of metrics metrics, err = parser.Parse([]byte("\n\t")) - assert.NoError(t, err) - assert.Len(t, metrics, 0) + require.NoError(t, err) + require.Len(t, metrics, 0) // Test that an empty string will parse as an empty list of metrics metrics, err = parser.Parse([]byte("")) - assert.NoError(t, err) - assert.Len(t, metrics, 0) + require.NoError(t, err) + require.Len(t, metrics, 0) } func TestParseLineValidJSON(t *testing.T) { @@ -104,33 +105,33 @@ func TestParseLineValidJSON(t *testing.T) { // Most basic vanilla test metric, err := parser.ParseLine(validJSON) - assert.NoError(t, err) - assert.Equal(t, "json_test", metric.Name()) - assert.Equal(t, map[string]interface{}{ + require.NoError(t, err) + require.Equal(t, "json_test", metric.Name()) + require.Equal(t, map[string]interface{}{ "a": float64(5), "b_c": float64(6), }, metric.Fields()) - assert.Equal(t, map[string]string{}, metric.Tags()) + require.Equal(t, map[string]string{}, metric.Tags()) // Test that newlines are fine metric, err = parser.ParseLine(validJSONNewline) - assert.NoError(t, err) - assert.Equal(t, "json_test", metric.Name()) - assert.Equal(t, map[string]interface{}{ + require.NoError(t, err) + require.Equal(t, "json_test", metric.Name()) + require.Equal(t, map[string]interface{}{ "d": float64(7), "b_d": float64(8), }, metric.Fields()) - assert.Equal(t, map[string]string{}, metric.Tags()) + require.Equal(t, map[string]string{}, metric.Tags()) // Test that strings without TagKeys defined are ignored metric, err = parser.ParseLine(validJSONTags) - assert.NoError(t, err) - assert.Equal(t, "json_test", metric.Name()) - assert.Equal(t, map[string]interface{}{ + require.NoError(t, err) + require.Equal(t, "json_test", metric.Name()) + require.Equal(t, map[string]interface{}{ "a": float64(5), "b_c": float64(6), }, metric.Fields()) - assert.Equal(t, map[string]string{}, metric.Tags()) + require.Equal(t, map[string]string{}, metric.Tags()) } func TestParseInvalidJSON(t *testing.T) { @@ -139,11 +140,11 @@ func TestParseInvalidJSON(t *testing.T) { } _, err := parser.Parse([]byte(invalidJSON)) - assert.Error(t, err) + require.Error(t, err) _, err = parser.Parse([]byte(invalidJSON2)) - assert.Error(t, err) + require.Error(t, err) _, err = parser.ParseLine(invalidJSON) - assert.Error(t, err) + require.Error(t, err) } func TestParseWithTagKeys(t *testing.T) { @@ -153,14 +154,14 @@ func TestParseWithTagKeys(t *testing.T) { TagKeys: []string{"wrongtagkey"}, } metrics, err := parser.Parse([]byte(validJSONTags)) - assert.NoError(t, err) - assert.Len(t, metrics, 1) - assert.Equal(t, "json_test", metrics[0].Name()) - assert.Equal(t, map[string]interface{}{ + require.NoError(t, err) + require.Len(t, metrics, 1) + require.Equal(t, "json_test", metrics[0].Name()) + require.Equal(t, map[string]interface{}{ "a": float64(5), "b_c": float64(6), }, metrics[0].Fields()) - assert.Equal(t, map[string]string{}, metrics[0].Tags()) + require.Equal(t, map[string]string{}, metrics[0].Tags()) // Test that single tag key is found and applied parser = JSONParser{ @@ -168,14 +169,14 @@ func TestParseWithTagKeys(t *testing.T) { TagKeys: []string{"mytag"}, } metrics, err = parser.Parse([]byte(validJSONTags)) - assert.NoError(t, err) - assert.Len(t, metrics, 1) - assert.Equal(t, "json_test", metrics[0].Name()) - assert.Equal(t, map[string]interface{}{ + require.NoError(t, err) + require.Len(t, metrics, 1) + require.Equal(t, "json_test", metrics[0].Name()) + require.Equal(t, map[string]interface{}{ "a": float64(5), "b_c": float64(6), }, metrics[0].Fields()) - assert.Equal(t, map[string]string{ + require.Equal(t, map[string]string{ "mytag": "foobar", }, metrics[0].Tags()) @@ -185,14 +186,14 @@ func TestParseWithTagKeys(t *testing.T) { TagKeys: []string{"mytag", "othertag"}, } metrics, err = parser.Parse([]byte(validJSONTags)) - assert.NoError(t, err) - assert.Len(t, metrics, 1) - assert.Equal(t, "json_test", metrics[0].Name()) - assert.Equal(t, map[string]interface{}{ + require.NoError(t, err) + require.Len(t, metrics, 1) + require.Equal(t, "json_test", metrics[0].Name()) + require.Equal(t, map[string]interface{}{ "a": float64(5), "b_c": float64(6), }, metrics[0].Fields()) - assert.Equal(t, map[string]string{ + require.Equal(t, map[string]string{ "mytag": "foobar", "othertag": "baz", }, metrics[0].Tags()) @@ -205,13 +206,13 @@ func TestParseLineWithTagKeys(t *testing.T) { TagKeys: []string{"wrongtagkey"}, } metric, err := parser.ParseLine(validJSONTags) - assert.NoError(t, err) - assert.Equal(t, "json_test", metric.Name()) - assert.Equal(t, map[string]interface{}{ + require.NoError(t, err) + require.Equal(t, "json_test", metric.Name()) + require.Equal(t, map[string]interface{}{ "a": float64(5), "b_c": float64(6), }, metric.Fields()) - assert.Equal(t, map[string]string{}, metric.Tags()) + require.Equal(t, map[string]string{}, metric.Tags()) // Test that single tag key is found and applied parser = JSONParser{ @@ -219,13 +220,13 @@ func TestParseLineWithTagKeys(t *testing.T) { TagKeys: []string{"mytag"}, } metric, err = parser.ParseLine(validJSONTags) - assert.NoError(t, err) - assert.Equal(t, "json_test", metric.Name()) - assert.Equal(t, map[string]interface{}{ + require.NoError(t, err) + require.Equal(t, "json_test", metric.Name()) + require.Equal(t, map[string]interface{}{ "a": float64(5), "b_c": float64(6), }, metric.Fields()) - assert.Equal(t, map[string]string{ + require.Equal(t, map[string]string{ "mytag": "foobar", }, metric.Tags()) @@ -235,13 +236,13 @@ func TestParseLineWithTagKeys(t *testing.T) { TagKeys: []string{"mytag", "othertag"}, } metric, err = parser.ParseLine(validJSONTags) - assert.NoError(t, err) - assert.Equal(t, "json_test", metric.Name()) - assert.Equal(t, map[string]interface{}{ + require.NoError(t, err) + require.Equal(t, "json_test", metric.Name()) + require.Equal(t, map[string]interface{}{ "a": float64(5), "b_c": float64(6), }, metric.Fields()) - assert.Equal(t, map[string]string{ + require.Equal(t, map[string]string{ "mytag": "foobar", "othertag": "baz", }, metric.Tags()) @@ -258,25 +259,25 @@ func TestParseValidJSONDefaultTags(t *testing.T) { // Most basic vanilla test metrics, err := parser.Parse([]byte(validJSON)) - assert.NoError(t, err) - assert.Len(t, metrics, 1) - assert.Equal(t, "json_test", metrics[0].Name()) - assert.Equal(t, map[string]interface{}{ + require.NoError(t, err) + require.Len(t, metrics, 1) + require.Equal(t, "json_test", metrics[0].Name()) + require.Equal(t, map[string]interface{}{ "a": float64(5), "b_c": float64(6), }, metrics[0].Fields()) - assert.Equal(t, map[string]string{"t4g": "default"}, metrics[0].Tags()) + require.Equal(t, map[string]string{"t4g": "default"}, metrics[0].Tags()) // Test that tagkeys and default tags are applied metrics, err = parser.Parse([]byte(validJSONTags)) - assert.NoError(t, err) - assert.Len(t, metrics, 1) - assert.Equal(t, "json_test", metrics[0].Name()) - assert.Equal(t, map[string]interface{}{ + require.NoError(t, err) + require.Len(t, metrics, 1) + require.Equal(t, "json_test", metrics[0].Name()) + require.Equal(t, map[string]interface{}{ "a": float64(5), "b_c": float64(6), }, metrics[0].Fields()) - assert.Equal(t, map[string]string{ + require.Equal(t, map[string]string{ "t4g": "default", "mytag": "foobar", }, metrics[0].Tags()) @@ -294,25 +295,25 @@ func TestParseValidJSONDefaultTagsOverride(t *testing.T) { // Most basic vanilla test metrics, err := parser.Parse([]byte(validJSON)) - assert.NoError(t, err) - assert.Len(t, metrics, 1) - assert.Equal(t, "json_test", metrics[0].Name()) - assert.Equal(t, map[string]interface{}{ + require.NoError(t, err) + require.Len(t, metrics, 1) + require.Equal(t, "json_test", metrics[0].Name()) + require.Equal(t, map[string]interface{}{ "a": float64(5), "b_c": float64(6), }, metrics[0].Fields()) - assert.Equal(t, map[string]string{"mytag": "default"}, metrics[0].Tags()) + require.Equal(t, map[string]string{"mytag": "default"}, metrics[0].Tags()) // Test that tagkeys override default tags metrics, err = parser.Parse([]byte(validJSONTags)) - assert.NoError(t, err) - assert.Len(t, metrics, 1) - assert.Equal(t, "json_test", metrics[0].Name()) - assert.Equal(t, map[string]interface{}{ + require.NoError(t, err) + require.Len(t, metrics, 1) + require.Equal(t, "json_test", metrics[0].Name()) + require.Equal(t, map[string]interface{}{ "a": float64(5), "b_c": float64(6), }, metrics[0].Fields()) - assert.Equal(t, map[string]string{ + require.Equal(t, map[string]string{ "mytag": "foobar", }, metrics[0].Tags()) } @@ -325,31 +326,31 @@ func TestParseValidJSONArray(t *testing.T) { // Most basic vanilla test metrics, err := parser.Parse([]byte(validJSONArray)) - assert.NoError(t, err) - assert.Len(t, metrics, 1) - assert.Equal(t, "json_array_test", metrics[0].Name()) - assert.Equal(t, map[string]interface{}{ + require.NoError(t, err) + require.Len(t, metrics, 1) + require.Equal(t, "json_array_test", metrics[0].Name()) + require.Equal(t, map[string]interface{}{ "a": float64(5), "b_c": float64(6), }, metrics[0].Fields()) - assert.Equal(t, map[string]string{}, metrics[0].Tags()) + require.Equal(t, map[string]string{}, metrics[0].Tags()) // Basic multiple datapoints metrics, err = parser.Parse([]byte(validJSONArrayMultiple)) - assert.NoError(t, err) - assert.Len(t, metrics, 2) - assert.Equal(t, "json_array_test", metrics[0].Name()) - assert.Equal(t, map[string]interface{}{ + require.NoError(t, err) + require.Len(t, metrics, 2) + require.Equal(t, "json_array_test", metrics[0].Name()) + require.Equal(t, map[string]interface{}{ "a": float64(5), "b_c": float64(6), }, metrics[0].Fields()) - assert.Equal(t, map[string]string{}, metrics[1].Tags()) - assert.Equal(t, "json_array_test", metrics[1].Name()) - assert.Equal(t, map[string]interface{}{ + require.Equal(t, map[string]string{}, metrics[1].Tags()) + require.Equal(t, "json_array_test", metrics[1].Name()) + require.Equal(t, map[string]interface{}{ "a": float64(7), "b_c": float64(8), }, metrics[1].Fields()) - assert.Equal(t, map[string]string{}, metrics[1].Tags()) + require.Equal(t, map[string]string{}, metrics[1].Tags()) } func TestParseArrayWithTagKeys(t *testing.T) { @@ -359,21 +360,21 @@ func TestParseArrayWithTagKeys(t *testing.T) { TagKeys: []string{"wrongtagkey"}, } metrics, err := parser.Parse([]byte(validJSONArrayTags)) - assert.NoError(t, err) - assert.Len(t, metrics, 2) - assert.Equal(t, "json_array_test", metrics[0].Name()) - assert.Equal(t, map[string]interface{}{ + require.NoError(t, err) + require.Len(t, metrics, 2) + require.Equal(t, "json_array_test", metrics[0].Name()) + require.Equal(t, map[string]interface{}{ "a": float64(5), "b_c": float64(6), }, metrics[0].Fields()) - assert.Equal(t, map[string]string{}, metrics[0].Tags()) + require.Equal(t, map[string]string{}, metrics[0].Tags()) - assert.Equal(t, "json_array_test", metrics[1].Name()) - assert.Equal(t, map[string]interface{}{ + require.Equal(t, "json_array_test", metrics[1].Name()) + require.Equal(t, map[string]interface{}{ "a": float64(7), "b_c": float64(8), }, metrics[1].Fields()) - assert.Equal(t, map[string]string{}, metrics[1].Tags()) + require.Equal(t, map[string]string{}, metrics[1].Tags()) // Test that single tag key is found and applied parser = JSONParser{ @@ -381,23 +382,23 @@ func TestParseArrayWithTagKeys(t *testing.T) { TagKeys: []string{"mytag"}, } metrics, err = parser.Parse([]byte(validJSONArrayTags)) - assert.NoError(t, err) - assert.Len(t, metrics, 2) - assert.Equal(t, "json_array_test", metrics[0].Name()) - assert.Equal(t, map[string]interface{}{ + require.NoError(t, err) + require.Len(t, metrics, 2) + require.Equal(t, "json_array_test", metrics[0].Name()) + require.Equal(t, map[string]interface{}{ "a": float64(5), "b_c": float64(6), }, metrics[0].Fields()) - assert.Equal(t, map[string]string{ + require.Equal(t, map[string]string{ "mytag": "foo", }, metrics[0].Tags()) - assert.Equal(t, "json_array_test", metrics[1].Name()) - assert.Equal(t, map[string]interface{}{ + require.Equal(t, "json_array_test", metrics[1].Name()) + require.Equal(t, map[string]interface{}{ "a": float64(7), "b_c": float64(8), }, metrics[1].Fields()) - assert.Equal(t, map[string]string{ + require.Equal(t, map[string]string{ "mytag": "bar", }, metrics[1].Tags()) @@ -407,24 +408,24 @@ func TestParseArrayWithTagKeys(t *testing.T) { TagKeys: []string{"mytag", "othertag"}, } metrics, err = parser.Parse([]byte(validJSONArrayTags)) - assert.NoError(t, err) - assert.Len(t, metrics, 2) - assert.Equal(t, "json_array_test", metrics[0].Name()) - assert.Equal(t, map[string]interface{}{ + require.NoError(t, err) + require.Len(t, metrics, 2) + require.Equal(t, "json_array_test", metrics[0].Name()) + require.Equal(t, map[string]interface{}{ "a": float64(5), "b_c": float64(6), }, metrics[0].Fields()) - assert.Equal(t, map[string]string{ + require.Equal(t, map[string]string{ "mytag": "foo", "othertag": "baz", }, metrics[0].Tags()) - assert.Equal(t, "json_array_test", metrics[1].Name()) - assert.Equal(t, map[string]interface{}{ + require.Equal(t, "json_array_test", metrics[1].Name()) + require.Equal(t, map[string]interface{}{ "a": float64(7), "b_c": float64(8), }, metrics[1].Fields()) - assert.Equal(t, map[string]string{ + require.Equal(t, map[string]string{ "mytag": "bar", "othertag": "baz", }, metrics[1].Tags()) @@ -439,7 +440,7 @@ func TestHttpJsonBOM(t *testing.T) { // Most basic vanilla test _, err := parser.Parse(jsonBOM) - assert.NoError(t, err) + require.NoError(t, err) } //for testing issue #4260 @@ -448,22 +449,212 @@ func TestJSONParseNestedArray(t *testing.T) { "total_devices": 5, "total_threads": 10, "shares": { - "total": 5, - "accepted": 5, - "rejected": 0, - "avg_find_time": 4, - "tester": "work", - "tester2": "don't want this", - "tester3": 7.93 + "total": 5, + "accepted": 5, + "rejected": 0, + "avg_find_time": 4, + "tester": "work", + "tester2": "don't want this", + "tester3": { + "hello":"sup", + "fun":"money", + "break":9 + } } }` parser := JSONParser{ MetricName: "json_test", - TagKeys: []string{"total_devices", "total_threads", "shares_tester", "shares_tester3"}, + TagKeys: []string{"total_devices", "total_threads", "shares_tester3_fun"}, + } + + metrics, err := parser.Parse([]byte(testString)) + log.Printf("m[0] name: %v, tags: %v, fields: %v", metrics[0].Name(), metrics[0].Tags(), metrics[0].Fields()) + require.NoError(t, err) + require.Equal(t, len(parser.TagKeys), len(metrics[0].Tags())) +} + +func TestJSONQueryErrorOnArray(t *testing.T) { + testString := `{ + "total_devices": 5, + "total_threads": 10, + "shares": { + "total": 5, + "accepted": 6, + "test_string": "don't want this", + "test_obj": { + "hello":"sup", + "fun":"money", + "break":9 + }, + "myArr":[4,5,6] + } + }` + + parser := JSONParser{ + MetricName: "json_test", + TagKeys: []string{}, + JSONQuery: "shares.myArr", + } + + _, err := parser.Parse([]byte(testString)) + require.Error(t, err) +} + +func TestArrayOfObjects(t *testing.T) { + testString := `{ + "meta": { + "info":9, + "shares": [{ + "channel": 6, + "time": 1130, + "ice":"man" + }, + { + "channel": 5, + "time": 1030, + "ice":"bucket" + }, + { + "channel": 10, + "time": 330, + "ice":"cream" + }] + }, + "more_stuff":"junk" + }` + + parser := JSONParser{ + MetricName: "json_test", + TagKeys: []string{"ice"}, + JSONQuery: "meta.shares", } metrics, err := parser.Parse([]byte(testString)) require.NoError(t, err) - require.Equal(t, len(parser.TagKeys), len(metrics[0].Tags())) + require.Equal(t, 3, len(metrics)) +} + +func TestUseCaseJSONQuery(t *testing.T) { + testString := `{ + "obj": { + "name": {"first": "Tom", "last": "Anderson"}, + "age":37, + "children": ["Sara","Alex","Jack"], + "fav.movie": "Deer Hunter", + "friends": [ + {"first": "Dale", "last": "Murphy", "age": 44}, + {"first": "Roger", "last": "Craig", "age": 68}, + {"first": "Jane", "last": "Murphy", "age": 47} + ] + } + }` + + parser := JSONParser{ + MetricName: "json_test", + StringFields: []string{"last"}, + TagKeys: []string{"first"}, + JSONQuery: "obj.friends", + } + + metrics, err := parser.Parse([]byte(testString)) + require.NoError(t, err) + require.Equal(t, 3, len(metrics)) + require.Equal(t, metrics[0].Fields()["last"], "Murphy") +} + +func TestTimeParser(t *testing.T) { + testString := `[ + { + "a": 5, + "b": { + "c": 6, + "time":"04 Jan 06 15:04 MST" + }, + "my_tag_1": "foo", + "my_tag_2": "baz" + }, + { + "a": 7, + "b": { + "c": 8, + "time":"11 Jan 07 15:04 MST" + }, + "my_tag_1": "bar", + "my_tag_2": "baz" + } + ]` + + parser := JSONParser{ + MetricName: "json_test", + JSONTimeKey: "b_time", + JSONTimeFormat: "02 Jan 06 15:04 MST", + } + metrics, err := parser.Parse([]byte(testString)) + require.NoError(t, err) + require.Equal(t, 2, len(metrics)) + require.Equal(t, false, metrics[0].Time() == metrics[1].Time()) +} + +func TestTimeErrors(t *testing.T) { + testString := `{ + "a": 5, + "b": { + "c": 6, + "time":"04 Jan 06 15:04 MST" + }, + "my_tag_1": "foo", + "my_tag_2": "baz" + }` + + parser := JSONParser{ + MetricName: "json_test", + JSONTimeKey: "b_time", + JSONTimeFormat: "02 January 06 15:04 MST", + } + + metrics, err := parser.Parse([]byte(testString)) + require.Error(t, err) + require.Equal(t, 0, len(metrics)) + + testString2 := `{ + "a": 5, + "b": { + "c": 6 + }, + "my_tag_1": "foo", + "my_tag_2": "baz" + }` + + parser = JSONParser{ + MetricName: "json_test", + JSONTimeKey: "b_time", + JSONTimeFormat: "02 January 06 15:04 MST", + } + + metrics, err = parser.Parse([]byte(testString2)) + log.Printf("err: %v", err) + require.Error(t, err) + require.Equal(t, 0, len(metrics)) + require.Equal(t, fmt.Errorf("JSON time key could not be found"), err) +} + +func TestNameKey(t *testing.T) { + testString := `{ + "a": 5, + "b": { + "c": "this is my name", + "time":"04 Jan 06 15:04 MST" + }, + "my_tag_1": "foo", + "my_tag_2": "baz" + }` + + parser := JSONParser{ + JSONNameKey: "b_c", + } + + metrics, err := parser.Parse([]byte(testString)) + require.NoError(t, err) + require.Equal(t, "this is my name", metrics[0].Name()) } diff --git a/plugins/parsers/registry.go b/plugins/parsers/registry.go index e198cb2cb..89fdc9a10 100644 --- a/plugins/parsers/registry.go +++ b/plugins/parsers/registry.go @@ -59,9 +59,22 @@ type Config struct { // TagKeys only apply to JSON data TagKeys []string + // FieldKeys only apply to JSON + JSONStringFields []string + + JSONNameKey string // MetricName applies to JSON & value. This will be the name of the measurement. MetricName string + // holds a gjson path for json parser + JSONQuery string + + // key of time + JSONTimeKey string + + // time format + JSONTimeFormat string + // Authentication file for collectd CollectdAuthFile string // One of none (default), sign, or encrypt @@ -108,8 +121,14 @@ func NewParser(config *Config) (Parser, error) { var parser Parser switch config.DataFormat { case "json": - parser, err = NewJSONParser(config.MetricName, - config.TagKeys, config.DefaultTags) + parser = newJSONParser(config.MetricName, + config.TagKeys, + config.JSONNameKey, + config.JSONStringFields, + config.JSONQuery, + config.JSONTimeKey, + config.JSONTimeFormat, + config.DefaultTags) case "value": parser, err = NewValueParser(config.MetricName, config.DataType, config.DefaultTags) @@ -151,6 +170,30 @@ func NewParser(config *Config) (Parser, error) { return parser, err } +func newJSONParser( + metricName string, + tagKeys []string, + jsonNameKey string, + stringFields []string, + jsonQuery string, + timeKey string, + timeFormat string, + defaultTags map[string]string, +) Parser { + parser := &json.JSONParser{ + MetricName: metricName, + TagKeys: tagKeys, + StringFields: stringFields, + JSONNameKey: jsonNameKey, + JSONQuery: jsonQuery, + JSONTimeKey: timeKey, + JSONTimeFormat: timeFormat, + DefaultTags: defaultTags, + } + return parser +} + +//Deprecated: Use NewParser to get a JSONParser object func newGrokParser(metricName string, patterns []string, nPatterns []string, From 545b59f12e90cf0d936581b8869529d5896d0cbc Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 22 Aug 2018 19:28:47 -0700 Subject: [PATCH 0099/1815] Update changelog --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index bc2998d4b..b03ba6a9d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -73,7 +73,7 @@ - [#4539](https://github.com/influxdata/telegraf/pull/4539): Add logfmt parser plugin. - [#4551](https://github.com/influxdata/telegraf/pull/4551): Add parser processor plugin. - [#4559](https://github.com/influxdata/telegraf/pull/4559): Add Icinga2 input plugin. - +- [#4351](https://github.com/influxdata/telegraf/pull/4351): Add name, time, path and string field options to JSON parser. ## v1.7.4 [unreleased] From 2b026374ec474eea4ecc87cbd67e1ff095fa381e Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 23 Aug 2018 11:46:41 -0700 Subject: [PATCH 0100/1815] Update kafka_consumer sample config in README --- plugins/inputs/kafka_consumer/README.md | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/plugins/inputs/kafka_consumer/README.md b/plugins/inputs/kafka_consumer/README.md index b6fc8fc89..2bc290c6b 100644 --- a/plugins/inputs/kafka_consumer/README.md +++ b/plugins/inputs/kafka_consumer/README.md @@ -14,17 +14,20 @@ and use the old zookeeper connection method. ```toml # Read metrics from Kafka topic(s) [[inputs.kafka_consumer]] + ## kafka servers + brokers = ["localhost:9092"] ## topic(s) to consume topics = ["telegraf"] - brokers = ["localhost:9092"] - ## the name of the consumer group - consumer_group = "telegraf_metrics_consumers" - ## Offset (must be either "oldest" or "newest") - offset = "oldest" - ## Optional client id + ## Optional Client id # client_id = "Telegraf" + ## Set the minimal supported Kafka version. Setting this enables the use of new + ## Kafka features and APIs. Of particular interest, lz4 compression + ## requires at least version 0.10.0.0. + ## ex: version = "1.1.0" + # version = "" + ## Optional TLS Config # tls_ca = "/etc/telegraf/ca.pem" # tls_cert = "/etc/telegraf/cert.pem" @@ -36,6 +39,11 @@ and use the old zookeeper connection method. # sasl_username = "kafka" # sasl_password = "secret" + ## the name of the consumer group + consumer_group = "telegraf_metrics_consumers" + ## Offset (must be either "oldest" or "newest") + offset = "oldest" + ## Data format to consume. ## Each data format has its own unique set of configuration options, read ## more about them here: From 36959abce9b2874dfd32c87376260d26419f6eb8 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 23 Aug 2018 13:11:39 -0700 Subject: [PATCH 0101/1815] Fix toml error in converter processor README --- plugins/processors/converter/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/processors/converter/README.md b/plugins/processors/converter/README.md index f69c8728e..939b44ce8 100644 --- a/plugins/processors/converter/README.md +++ b/plugins/processors/converter/README.md @@ -12,7 +12,7 @@ will overwrite one another. ### Configuration: ```toml # Convert values to another metric value type -[processors.converter] +[[processors.converter]] ## Tags to convert ## ## The table key determines the target type, and the array of key-values @@ -42,7 +42,7 @@ will overwrite one another. ### Examples: ```toml -[processors.converter] +[[processors.converter]] [processors.converter.tags] string = ["port"] From 16a6feda4ab55672efe6709ab707c04eb47f176a Mon Sep 17 00:00:00 2001 From: rbrendler <41023594+rbrendler@users.noreply.github.com> Date: Thu, 23 Aug 2018 15:30:59 -0500 Subject: [PATCH 0102/1815] Fix burrow_group.offset calculation for Burrow plugin (#4584) --- plugins/inputs/burrow/README.md | 6 ++++-- plugins/inputs/burrow/burrow.go | 6 ++---- plugins/inputs/burrow/burrow_test.go | 2 +- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/plugins/inputs/burrow/README.md b/plugins/inputs/burrow/README.md index 19073a6ef..d30a054d6 100644 --- a/plugins/inputs/burrow/README.md +++ b/plugins/inputs/burrow/README.md @@ -50,7 +50,7 @@ Supported Burrow version: `1.x` # insecure_skip_verify = false ``` -### Partition Status mappings +### Group/Partition Status mappings * `OK` = 1 * `NOT_FOUND` = 2 @@ -66,9 +66,11 @@ Supported Burrow version: `1.x` * `burrow_group` (one event per each consumer group) - status (string, see Partition Status mappings) - status_code (int, `1..6`, see Partition status mappings) - - parition_count (int, `number of partitions`) + - partition_count (int, `number of partitions`) + - offset (int64, `total offset of all partitions`) - total_lag (int64, `totallag`) - lag (int64, `maxlag.current_lag || 0`) + - timestamp (int64, `end.timestamp`) * `burrow_partition` (one event per each topic partition) - status (string, see Partition Status mappings) diff --git a/plugins/inputs/burrow/burrow.go b/plugins/inputs/burrow/burrow.go index 91e3ffe15..9c532e3be 100644 --- a/plugins/inputs/burrow/burrow.go +++ b/plugins/inputs/burrow/burrow.go @@ -397,13 +397,11 @@ func (b *burrow) genGroupStatusMetrics(r *apiResponse, cluster, group string, ac partitionCount = len(r.Status.Partitions) } - // get max timestamp and offset from partitions list + // get max timestamp and total offset from partitions list offset := int64(0) timestamp := int64(0) for _, partition := range r.Status.Partitions { - if partition.End.Offset > offset { - offset = partition.End.Offset - } + offset += partition.End.Offset if partition.End.Timestamp > timestamp { timestamp = partition.End.Timestamp } diff --git a/plugins/inputs/burrow/burrow_test.go b/plugins/inputs/burrow/burrow_test.go index 5ea85798a..3847a5d7c 100644 --- a/plugins/inputs/burrow/burrow_test.go +++ b/plugins/inputs/burrow/burrow_test.go @@ -160,7 +160,7 @@ func TestBurrowGroup(t *testing.T) { "partition_count": 3, "total_lag": int64(0), "lag": int64(0), - "offset": int64(431323195), + "offset": int64(431323195 + 431322962 + 428636563), "timestamp": int64(1515609490008), }, } From d676381a0493533ca6855374e4ec9a45fedde44b Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 23 Aug 2018 13:32:18 -0700 Subject: [PATCH 0103/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index b03ba6a9d..c385146e7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -81,6 +81,7 @@ - [#4534](https://github.com/influxdata/telegraf/pull/4534): Skip unserializable metric in influxDB UDP output. - [#4554](https://github.com/influxdata/telegraf/pull/4554): Fix powerdns input tests. +- [#4584](https://github.com/influxdata/telegraf/pull/4584): Fix burrow_group offset calculation for burrow input. ## v1.7.3 [2018-08-07] From 14d9ef4f0c677387e08739254b670a970689487e Mon Sep 17 00:00:00 2001 From: prashanthjbabu Date: Fri, 24 Aug 2018 02:20:19 +0530 Subject: [PATCH 0104/1815] Add result_code value for errors running ping command (#4550) --- plugins/inputs/ping/README.md | 1 + plugins/inputs/ping/ping.go | 2 ++ 2 files changed, 3 insertions(+) diff --git a/plugins/inputs/ping/README.md b/plugins/inputs/ping/README.md index eadc60ab7..846430a44 100644 --- a/plugins/inputs/ping/README.md +++ b/plugins/inputs/ping/README.md @@ -39,6 +39,7 @@ urls = ["www.google.com"] # required - result_code - 0: success - 1: no such host + - 2: ping error ### Tags: diff --git a/plugins/inputs/ping/ping.go b/plugins/inputs/ping/ping.go index 7ddbf275e..46afb206a 100644 --- a/plugins/inputs/ping/ping.go +++ b/plugins/inputs/ping/ping.go @@ -117,6 +117,7 @@ func (p *Ping) Gather(acc telegraf.Accumulator) error { } else { acc.AddError(fmt.Errorf("host %s: %s", u, err)) } + fields["result_code"] = 2 acc.AddFields("ping", fields, tags) return } @@ -126,6 +127,7 @@ func (p *Ping) Gather(acc telegraf.Accumulator) error { if err != nil { // fatal error acc.AddError(fmt.Errorf("%s: %s", err, u)) + fields["result_code"] = 2 acc.AddFields("ping", fields, tags) return } From c82701771151a1489376d7421be932f086398bf6 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 23 Aug 2018 13:52:55 -0700 Subject: [PATCH 0105/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index c385146e7..6ee8d88c5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -82,6 +82,7 @@ - [#4534](https://github.com/influxdata/telegraf/pull/4534): Skip unserializable metric in influxDB UDP output. - [#4554](https://github.com/influxdata/telegraf/pull/4554): Fix powerdns input tests. - [#4584](https://github.com/influxdata/telegraf/pull/4584): Fix burrow_group offset calculation for burrow input. +- [#4550](https://github.com/influxdata/telegraf/pull/4550): Add result_code value for errors running ping command. ## v1.7.3 [2018-08-07] From 0785821a802d801519e47a4774c473553f76967d Mon Sep 17 00:00:00 2001 From: Tracy Boggiano Date: Thu, 23 Aug 2018 19:59:21 -0400 Subject: [PATCH 0106/1815] Add forwarded records to sqlserver input (#4571) --- plugins/inputs/sqlserver/sqlserver.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/plugins/inputs/sqlserver/sqlserver.go b/plugins/inputs/sqlserver/sqlserver.go index 41a8b7ec7..1ca29eca0 100644 --- a/plugins/inputs/sqlserver/sqlserver.go +++ b/plugins/inputs/sqlserver/sqlserver.go @@ -451,7 +451,8 @@ WHERE ( 'Buffer cache hit ratio base', 'Backup/Restore Throughput/sec', 'Total Server Memory (KB)', - 'Target Server Memory (KB)' + 'Target Server Memory (KB)', + 'Forwarded Recs/sec' ) ) OR ( instance_name IN ('_Total','Column store object pool') From 1beb3e73e68b99098583d13e911fcd5d206d134c Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 24 Aug 2018 10:23:15 -0700 Subject: [PATCH 0107/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6ee8d88c5..8d061b6d4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -74,6 +74,7 @@ - [#4551](https://github.com/influxdata/telegraf/pull/4551): Add parser processor plugin. - [#4559](https://github.com/influxdata/telegraf/pull/4559): Add Icinga2 input plugin. - [#4351](https://github.com/influxdata/telegraf/pull/4351): Add name, time, path and string field options to JSON parser. +- [#4571](https://github.com/influxdata/telegraf/pull/4571): Add forwarded records to sqlserver input. ## v1.7.4 [unreleased] From 3d84cee872aafb4267907d0281039dfd27ef5297 Mon Sep 17 00:00:00 2001 From: Leandro Piccilli Date: Fri, 24 Aug 2018 23:58:41 +0200 Subject: [PATCH 0108/1815] Add Kibana input plugin (#4585) --- plugins/inputs/all/all.go | 1 + plugins/inputs/kibana/README.md | 63 +++++++ plugins/inputs/kibana/kibana.go | 230 +++++++++++++++++++++++++ plugins/inputs/kibana/kibana_test.go | 66 +++++++ plugins/inputs/kibana/testdata_test.go | 199 +++++++++++++++++++++ 5 files changed, 559 insertions(+) create mode 100644 plugins/inputs/kibana/README.md create mode 100644 plugins/inputs/kibana/kibana.go create mode 100644 plugins/inputs/kibana/kibana_test.go create mode 100644 plugins/inputs/kibana/testdata_test.go diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index 17762d1c7..feb462368 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -57,6 +57,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/kapacitor" _ "github.com/influxdata/telegraf/plugins/inputs/kernel" _ "github.com/influxdata/telegraf/plugins/inputs/kernel_vmstat" + _ "github.com/influxdata/telegraf/plugins/inputs/kibana" _ "github.com/influxdata/telegraf/plugins/inputs/kubernetes" _ "github.com/influxdata/telegraf/plugins/inputs/leofs" _ "github.com/influxdata/telegraf/plugins/inputs/linux_sysctl_fs" diff --git a/plugins/inputs/kibana/README.md b/plugins/inputs/kibana/README.md new file mode 100644 index 000000000..7d885aed1 --- /dev/null +++ b/plugins/inputs/kibana/README.md @@ -0,0 +1,63 @@ +# Kibana input plugin + +The [kibana](https://www.elastic.co/) plugin queries Kibana status API to +obtain the health status of Kibana and some useful metrics. + +This plugin has been tested and works on Kibana 6.x versions. + +### Configuration + +```toml +[[inputs.kibana]] + ## specify a list of one or more Kibana servers + servers = ["http://localhost:5601"] + + ## Timeout for HTTP requests + timeout = "5s" + + ## HTTP Basic Auth credentials + # username = "username" + # password = "pa$$word" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false +``` + +### Status mappings + +When reporting health (green/yellow/red), additional field `status_code` +is reported. Field contains mapping from status:string to status_code:int +with following rules: + +- `green` - 1 +- `yellow` - 2 +- `red` - 3 +- `unknown` - 0 + +### Measurements & Fields + +- kibana + - status_code: integer (1, 2, 3, 0) + - heap_max_bytes: integer + - heap_used_bytes: integer + - uptime_ms: integer + - response_time_avg_ms: float + - response_time_max_ms: integer + - concurrent_connections: integer + - requests_per_sec: float + +### Tags + +- status (Kibana health: green, yellow, red) +- name (Kibana reported name) +- uuid (Kibana reported UUID) +- version (Kibana version) +- source (Kibana server hostname or IP) + +### Example Output + +kibana,host=myhost,name=my-kibana,source=localhost:5601,version=6.3.2 concurrent_connections=0i,heap_max_bytes=136478720i,heap_used_bytes=119231088i,response_time_avg_ms=0i,response_time_max_ms=0i,status="green",status_code=1i,uptime_ms=2187428019i 1534864502000000000 diff --git a/plugins/inputs/kibana/kibana.go b/plugins/inputs/kibana/kibana.go new file mode 100644 index 000000000..0e21ad800 --- /dev/null +++ b/plugins/inputs/kibana/kibana.go @@ -0,0 +1,230 @@ +package kibana + +import ( + "encoding/json" + "fmt" + "net/http" + "strings" + "sync" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/internal/tls" + "github.com/influxdata/telegraf/plugins/inputs" +) + +const statusPath = "/api/status" + +type kibanaStatus struct { + Name string `json:"name"` + Version version `json:"version"` + Status status `json:"status"` + Metrics metrics `json:"metrics"` +} + +type version struct { + Number string `json:"number"` + BuildHash string `json:"build_hash"` + BuildNumber int `json:"build_number"` + BuildSnapshot bool `json:"build_snapshot"` +} + +type status struct { + Overall overallStatus `json:"overall"` + Statuses interface{} `json:"statuses"` +} + +type overallStatus struct { + State string `json:"state"` +} + +type metrics struct { + UptimeInMillis int64 `json:"uptime_in_millis"` + ConcurrentConnections int64 `json:"concurrent_connections"` + CollectionIntervalInMilles int64 `json:"collection_interval_in_millis"` + ResponseTimes responseTimes `json:"response_times"` + Process process `json:"process"` + Requests requests `json:"requests"` +} + +type responseTimes struct { + AvgInMillis float64 `json:"avg_in_millis"` + MaxInMillis int64 `json:"max_in_millis"` +} + +type process struct { + Mem mem `json:"mem"` +} + +type requests struct { + Total int64 `json:"total"` +} + +type mem struct { + HeapMaxInBytes int64 `json:"heap_max_in_bytes"` + HeapUsedInBytes int64 `json:"heap_used_in_bytes"` +} + +const sampleConfig = ` + ## specify a list of one or more Kibana servers + servers = ["http://localhost:5601"] + + ## Timeout for HTTP requests + timeout = "5s" + + ## HTTP Basic Auth credentials + # username = "username" + # password = "pa$$word" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false +` + +type Kibana struct { + Local bool + Servers []string + Username string + Password string + Timeout internal.Duration + tls.ClientConfig + + client *http.Client +} + +func NewKibana() *Kibana { + return &Kibana{ + Timeout: internal.Duration{Duration: time.Second * 5}, + } +} + +// perform status mapping +func mapHealthStatusToCode(s string) int { + switch strings.ToLower(s) { + case "green": + return 1 + case "yellow": + return 2 + case "red": + return 3 + } + return 0 +} + +// SampleConfig returns sample configuration for this plugin. +func (k *Kibana) SampleConfig() string { + return sampleConfig +} + +// Description returns the plugin description. +func (k *Kibana) Description() string { + return "Read status information from one or more Kibana servers" +} + +func (k *Kibana) Gather(acc telegraf.Accumulator) error { + if k.client == nil { + client, err := k.createHttpClient() + + if err != nil { + return err + } + k.client = client + } + + var wg sync.WaitGroup + wg.Add(len(k.Servers)) + + for _, serv := range k.Servers { + go func(baseUrl string, acc telegraf.Accumulator) { + defer wg.Done() + if err := k.gatherKibanaStatus(baseUrl, acc); err != nil { + acc.AddError(fmt.Errorf("[url=%s]: %s", baseUrl, err)) + return + } + }(serv, acc) + } + + wg.Wait() + return nil +} + +func (k *Kibana) createHttpClient() (*http.Client, error) { + tlsCfg, err := k.ClientConfig.TLSConfig() + if err != nil { + return nil, err + } + + client := &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: tlsCfg, + }, + Timeout: k.Timeout.Duration, + } + + return client, nil +} + +func (k *Kibana) gatherKibanaStatus(baseUrl string, acc telegraf.Accumulator) error { + + kibanaStatus := &kibanaStatus{} + url := baseUrl + statusPath + + host, err := k.gatherJsonData(url, kibanaStatus) + if err != nil { + return err + } + + fields := make(map[string]interface{}) + tags := make(map[string]string) + + tags["name"] = kibanaStatus.Name + tags["source"] = host + tags["version"] = kibanaStatus.Version.Number + tags["status"] = kibanaStatus.Status.Overall.State + + fields["status_code"] = mapHealthStatusToCode(kibanaStatus.Status.Overall.State) + + fields["uptime_ms"] = kibanaStatus.Metrics.UptimeInMillis + fields["concurrent_connections"] = kibanaStatus.Metrics.ConcurrentConnections + fields["heap_max_bytes"] = kibanaStatus.Metrics.Process.Mem.HeapMaxInBytes + fields["heap_used_bytes"] = kibanaStatus.Metrics.Process.Mem.HeapUsedInBytes + fields["response_time_avg_ms"] = kibanaStatus.Metrics.ResponseTimes.AvgInMillis + fields["response_time_max_ms"] = kibanaStatus.Metrics.ResponseTimes.MaxInMillis + fields["requests_per_sec"] = float64(kibanaStatus.Metrics.Requests.Total) / float64(kibanaStatus.Metrics.CollectionIntervalInMilles) * 1000 + + acc.AddFields("kibana", fields, tags) + + return nil +} + +func (k *Kibana) gatherJsonData(url string, v interface{}) (host string, err error) { + + request, err := http.NewRequest("GET", url, nil) + + if (k.Username != "") || (k.Password != "") { + request.SetBasicAuth(k.Username, k.Password) + } + + response, err := k.client.Do(request) + if err != nil { + return "", err + } + + defer response.Body.Close() + + if err = json.NewDecoder(response.Body).Decode(v); err != nil { + return request.Host, err + } + + return request.Host, nil +} + +func init() { + inputs.Add("kibana", func() telegraf.Input { + return NewKibana() + }) +} diff --git a/plugins/inputs/kibana/kibana_test.go b/plugins/inputs/kibana/kibana_test.go new file mode 100644 index 000000000..ad5e32d29 --- /dev/null +++ b/plugins/inputs/kibana/kibana_test.go @@ -0,0 +1,66 @@ +package kibana + +import ( + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/influxdata/telegraf/testutil" +) + +func defaultTags() map[string]string { + return map[string]string{ + "name": "my-kibana", + "source": "example.com:5601", + "version": "6.3.2", + "status": "green", + } +} + +type transportMock struct { + statusCode int + body string +} + +func newTransportMock(statusCode int, body string) http.RoundTripper { + return &transportMock{ + statusCode: statusCode, + body: body, + } +} + +func (t *transportMock) RoundTrip(r *http.Request) (*http.Response, error) { + res := &http.Response{ + Header: make(http.Header), + Request: r, + StatusCode: t.statusCode, + } + res.Header.Set("Content-Type", "application/json") + res.Body = ioutil.NopCloser(strings.NewReader(t.body)) + return res, nil +} + +func checkKibanaStatusResult(t *testing.T, acc *testutil.Accumulator) { + tags := defaultTags() + acc.AssertContainsTaggedFields(t, "kibana", kibanaStatusExpected, tags) +} + +func TestGather(t *testing.T) { + ks := newKibanahWithClient() + ks.Servers = []string{"http://example.com:5601"} + ks.client.Transport = newTransportMock(http.StatusOK, kibanaStatusResponse) + + var acc testutil.Accumulator + if err := acc.GatherError(ks.Gather); err != nil { + t.Fatal(err) + } + + checkKibanaStatusResult(t, &acc) +} + +func newKibanahWithClient() *Kibana { + ks := NewKibana() + ks.client = &http.Client{} + return ks +} diff --git a/plugins/inputs/kibana/testdata_test.go b/plugins/inputs/kibana/testdata_test.go new file mode 100644 index 000000000..ec393bb19 --- /dev/null +++ b/plugins/inputs/kibana/testdata_test.go @@ -0,0 +1,199 @@ +package kibana + +const kibanaStatusResponse = ` +{ + "name": "my-kibana", + "uuid": "00000000-0000-0000-0000-000000000000", + "version": { + "number": "6.3.2", + "build_hash": "53d0c6758ac3fb38a3a1df198c1d4c87765e63f7", + "build_number": 17307, + "build_snapshot": false + }, + "status": { + "overall": { + "state": "green", + "title": "Green", + "nickname": "Looking good", + "icon": "success", + "since": "2018-07-27T07:37:42.567Z" + }, + "statuses": [{ + "id": "plugin:kibana@6.3.2", + "state": "green", + "icon": "success", + "message": "Ready", + "since": "2018-07-27T07:37:42.567Z" + }, + { + "id": "plugin:elasticsearch@6.3.2", + "state": "green", + "icon": "success", + "message": "Ready", + "since": "2018-07-28T10:07:04.920Z" + }, + { + "id": "plugin:xpack_main@6.3.2", + "state": "green", + "icon": "success", + "message": "Ready", + "since": "2018-07-28T10:07:02.393Z" + }, + { + "id": "plugin:searchprofiler@6.3.2", + "state": "green", + "icon": "success", + "message": "Ready", + "since": "2018-07-28T10:07:02.395Z" + }, + { + "id": "plugin:tilemap@6.3.2", + "state": "green", + "icon": "success", + "message": "Ready", + "since": "2018-07-28T10:07:02.396Z" + }, + { + "id": "plugin:watcher@6.3.2", + "state": "green", + "icon": "success", + "message": "Ready", + "since": "2018-07-28T10:07:02.397Z" + }, + { + "id": "plugin:license_management@6.3.2", + "state": "green", + "icon": "success", + "message": "Ready", + "since": "2018-07-27T07:37:42.668Z" + }, + { + "id": "plugin:index_management@6.3.2", + "state": "green", + "icon": "success", + "message": "Ready", + "since": "2018-07-28T10:07:02.399Z" + }, + { + "id": "plugin:timelion@6.3.2", + "state": "green", + "icon": "success", + "message": "Ready", + "since": "2018-07-27T07:37:42.912Z" + }, + { + "id": "plugin:logtrail@0.1.29", + "state": "green", + "icon": "success", + "message": "Ready", + "since": "2018-07-27T07:37:42.919Z" + }, + { + "id": "plugin:monitoring@6.3.2", + "state": "green", + "icon": "success", + "message": "Ready", + "since": "2018-07-27T07:37:42.922Z" + }, + { + "id": "plugin:grokdebugger@6.3.2", + "state": "green", + "icon": "success", + "message": "Ready", + "since": "2018-07-28T10:07:02.400Z" + }, + { + "id": "plugin:dashboard_mode@6.3.2", + "state": "green", + "icon": "success", + "message": "Ready", + "since": "2018-07-27T07:37:42.928Z" + }, + { + "id": "plugin:logstash@6.3.2", + "state": "green", + "icon": "success", + "message": "Ready", + "since": "2018-07-28T10:07:02.401Z" + }, + { + "id": "plugin:apm@6.3.2", + "state": "green", + "icon": "success", + "message": "Ready", + "since": "2018-07-27T07:37:42.950Z" + }, + { + "id": "plugin:console@6.3.2", + "state": "green", + "icon": "success", + "message": "Ready", + "since": "2018-07-27T07:37:42.958Z" + }, + { + "id": "plugin:console_extensions@6.3.2", + "state": "green", + "icon": "success", + "message": "Ready", + "since": "2018-07-27T07:37:42.961Z" + }, + { + "id": "plugin:metrics@6.3.2", + "state": "green", + "icon": "success", + "message": "Ready", + "since": "2018-07-27T07:37:42.965Z" + }, + { + "id": "plugin:reporting@6.3.2", + "state": "green", + "icon": "success", + "message": "Ready", + "since": "2018-07-28T10:07:02.402Z" + }] + }, + "metrics": { + "last_updated": "2018-08-21T11:24:25.823Z", + "collection_interval_in_millis": 5000, + "uptime_in_millis": 2173595336, + "process": { + "mem": { + "heap_max_in_bytes": 149954560, + "heap_used_in_bytes": 126274392 + } + }, + "os": { + "cpu": { + "load_average": { + "1m": 0.1806640625, + "5m": 0.49658203125, + "15m": 0.458984375 + } + } + }, + "response_times": { + "avg_in_millis": 12.5, + "max_in_millis": 123 + }, + "requests": { + "total": 2, + "disconnects": 0, + "status_codes": { + "200": 2 + } + }, + "concurrent_connections": 10 + } +} +` + +var kibanaStatusExpected = map[string]interface{}{ + "status_code": 1, + "heap_max_bytes": int64(149954560), + "heap_used_bytes": int64(126274392), + "uptime_ms": int64(2173595336), + "response_time_avg_ms": float64(12.5), + "response_time_max_ms": int64(123), + "concurrent_connections": int64(10), + "requests_per_sec": float64(0.4), +} From ff66a9de69ef689cc1316b1510d29cbf6f2c057e Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 24 Aug 2018 14:59:51 -0700 Subject: [PATCH 0109/1815] Update changelog --- CHANGELOG.md | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8d061b6d4..dd8cf460b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,19 +8,20 @@ ### New Inputs +- [activemq](./plugins/inputs/activemq/README.md) - Contributed by @mlabouardy +- [filecount](./plugins/inputs/filecount/README.md) - Contributed by @sometimesfood - [file](./plugins/inputs/file/README.md) - Contributed by @maxunt +- [icinga2](./plugins/inputs/icinga2/README.md) - Contributed by @mlabouardy +- [kibana](./plugins/inputs/icinga2/README.md) - Contributed by @lpic10 +- [pgbouncer](./plugins/inputs/pgbouncer/README.md) - Contributed by @nerzhul - [tengine](./plugins/inputs/tengine/README.md) - Contributed by @ertaoxu - [x509_cert](./plugins/inputs/x509_cert/README.md) - Contributed by @jtyr -- [filecount](./plugins/inputs/filecount/README.md) - Contributed by @sometimesfood -- [pgbouncer](./plugins/inputs/pgbouncer/README.md) - Contributed by @nerzhul -- [activemq](./plugins/inputs/activemq/README.md) - Contributed by @mlabouardy -- [icinga2](./plugins/inputs/icinga2/README.md) - Contributed by @mlabouardy ### New Processors - [enum](./plugins/processors/enum/README.md) - Contributed by @KarstenSchnitter -- [rename](./plugins/processors/rename/README.md) - Contributed by @goldibex - [parser](./plugins/processors/parser/README.md) - Contributed by @maxunt & @Ayrdrie +- [rename](./plugins/processors/rename/README.md) - Contributed by @goldibex ### New Aggregators @@ -75,6 +76,7 @@ - [#4559](https://github.com/influxdata/telegraf/pull/4559): Add Icinga2 input plugin. - [#4351](https://github.com/influxdata/telegraf/pull/4351): Add name, time, path and string field options to JSON parser. - [#4571](https://github.com/influxdata/telegraf/pull/4571): Add forwarded records to sqlserver input. +- [#4585](https://github.com/influxdata/telegraf/pull/4585): Add Kibana input plugin. ## v1.7.4 [unreleased] From 80346b2e932af94f93ea8f10a3ad76d248e3fb29 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 24 Aug 2018 16:37:11 -0700 Subject: [PATCH 0110/1815] Update prometheus output sample config and README --- plugins/outputs/prometheus_client/README.md | 39 +++++++++++-------- .../prometheus_client/prometheus_client.go | 32 ++++++++------- 2 files changed, 40 insertions(+), 31 deletions(-) diff --git a/plugins/outputs/prometheus_client/README.md b/plugins/outputs/prometheus_client/README.md index d68cafe9d..d4de4894a 100644 --- a/plugins/outputs/prometheus_client/README.md +++ b/plugins/outputs/prometheus_client/README.md @@ -4,30 +4,35 @@ This plugin starts a [Prometheus](https://prometheus.io/) Client, it exposes all ## Configuration -``` +```toml # Publish all metrics to /metrics for Prometheus to scrape [[outputs.prometheus_client]] - # Address to listen on + ## Address to listen on. listen = ":9273" - # Use TLS - tls_cert = "/etc/ssl/telegraf.crt" - tls_key = "/etc/ssl/telegraf.key" + ## Use HTTP Basic Authentication. + # basic_username = "Foo" + # basic_password = "Bar" - # Use http basic authentication - basic_username = "Foo" - basic_password = "Bar" + ## If set, the IP Ranges which are allowed to access metrics. + ## ex: ip_range = ["192.168.0.0/24", "192.168.1.0/30"] + # ip_range = [] - # IP Ranges which are allowed to access metrics - ip_range = ["192.168.0.0/24", "192.168.1.0/30"] + ## Path to publish the metrics on. + # path = "/metrics" - # Path to publish the metrics on, defaults to /metrics - path = "/metrics" + ## Expiration interval for each metric. 0 == no expiration + # expiration_interval = "60s" - # Expiration interval for each metric. 0 == no expiration - expiration_interval = "60s" + ## Collectors to enable, valid entries are "gocollector" and "process". + ## If unset, both are enabled. + # collectors_exclude = ["gocollector", "process"] - # Send string metrics as Prometheus labels. - # Unless set to false all string metrics will be sent as labels. - string_as_label = true + ## Send string metrics as Prometheus labels. + ## Unless set to false all string metrics will be sent as labels. + # string_as_label = true + + ## If set, enable TLS with the given certificate. + # tls_cert = "/etc/ssl/telegraf.crt" + # tls_key = "/etc/ssl/telegraf.key" ``` diff --git a/plugins/outputs/prometheus_client/prometheus_client.go b/plugins/outputs/prometheus_client/prometheus_client.go index 9634e9227..c038eba66 100644 --- a/plugins/outputs/prometheus_client/prometheus_client.go +++ b/plugins/outputs/prometheus_client/prometheus_client.go @@ -76,29 +76,33 @@ type PrometheusClient struct { var sampleConfig = ` ## Address to listen on - # listen = ":9273" + listen = ":9273" - ## Use TLS - #tls_cert = "/etc/ssl/telegraf.crt" - #tls_key = "/etc/ssl/telegraf.key" + ## Use HTTP Basic Authentication. + # basic_username = "Foo" + # basic_password = "Bar" - ## Use http basic authentication - #basic_username = "Foo" - #basic_password = "Bar" + ## If set, the IP Ranges which are allowed to access metrics. + ## ex: ip_range = ["192.168.0.0/24", "192.168.1.0/30"] + # ip_range = [] - ## IP Ranges which are allowed to access metrics - #ip_range = ["192.168.0.0/24", "192.168.1.0/30"] + ## Path to publish the metrics on. + # path = "/metrics" - ## Interval to expire metrics and not deliver to prometheus, 0 == no expiration + ## Expiration interval for each metric. 0 == no expiration # expiration_interval = "60s" ## Collectors to enable, valid entries are "gocollector" and "process". ## If unset, both are enabled. - collectors_exclude = ["gocollector", "process"] + # collectors_exclude = ["gocollector", "process"] - # Send string metrics as Prometheus labels. - # Unless set to false all string metrics will be sent as labels. - string_as_label = true + ## Send string metrics as Prometheus labels. + ## Unless set to false all string metrics will be sent as labels. + # string_as_label = true + + ## If set, enable TLS with the given certificate. + # tls_cert = "/etc/ssl/telegraf.crt" + # tls_key = "/etc/ssl/telegraf.key" ` func (p *PrometheusClient) auth(h http.Handler) http.Handler { From 889745a1120f49da32abd574ab3b5ec841797798 Mon Sep 17 00:00:00 2001 From: maxunt Date: Fri, 24 Aug 2018 16:40:41 -0700 Subject: [PATCH 0111/1815] Add csv parser (#4439) --- docs/DATA_FORMATS_INPUT.md | 122 ++++++++++++--- internal/config/config.go | 122 +++++++++++++++ plugins/parsers/csv/parser.go | 196 ++++++++++++++++++++++++ plugins/parsers/csv/parser_test.go | 231 +++++++++++++++++++++++++++++ plugins/parsers/registry.go | 82 ++++++++++ 5 files changed, 733 insertions(+), 20 deletions(-) create mode 100644 plugins/parsers/csv/parser.go create mode 100644 plugins/parsers/csv/parser_test.go diff --git a/docs/DATA_FORMATS_INPUT.md b/docs/DATA_FORMATS_INPUT.md index 6e1b6a751..7e57d9657 100644 --- a/docs/DATA_FORMATS_INPUT.md +++ b/docs/DATA_FORMATS_INPUT.md @@ -12,6 +12,7 @@ Telegraf is able to parse the following input data formats into metrics: 1. [Grok](#grok) 1. [Logfmt](#logfmt) 1. [Wavefront](#wavefront) +1. [CSV](#csv) Telegraf metrics, like InfluxDB [points](https://docs.influxdata.com/influxdb/v0.10/write_protocols/line/), @@ -107,28 +108,28 @@ but can be overridden using the `name_override` config option. #### JSON Configuration: -The JSON data format supports specifying "tag_keys", "string_keys", and "json_query". -If specified, keys in "tag_keys" and "string_keys" will be searched for in the root-level -and any nested lists of the JSON blob. All int and float values are added to fields by default. -If the key(s) exist, they will be applied as tags or fields to the Telegraf metrics. +The JSON data format supports specifying "tag_keys", "string_keys", and "json_query". +If specified, keys in "tag_keys" and "string_keys" will be searched for in the root-level +and any nested lists of the JSON blob. All int and float values are added to fields by default. +If the key(s) exist, they will be applied as tags or fields to the Telegraf metrics. If "string_keys" is specified, the string will be added as a field. -The "json_query" configuration is a gjson path to an JSON object or -list of JSON objects. If this path leads to an array of values or -single data point an error will be thrown. If this configuration +The "json_query" configuration is a gjson path to an JSON object or +list of JSON objects. If this path leads to an array of values or +single data point an error will be thrown. If this configuration is specified, only the result of the query will be parsed and returned as metrics. The "json_name_key" configuration specifies the key of the field whos value will be added as the metric name. -Object paths are specified using gjson path format, which is denoted by object keys -concatenated with "." to go deeper in nested JSON objects. +Object paths are specified using gjson path format, which is denoted by object keys +concatenated with "." to go deeper in nested JSON objects. Additional information on gjson paths can be found here: https://github.com/tidwall/gjson#path-syntax -The JSON data format also supports extracting time values through the -config "json_time_key" and "json_time_format". If "json_time_key" is set, -"json_time_format" must be specified. The "json_time_key" describes the -name of the field containing time information. The "json_time_format" +The JSON data format also supports extracting time values through the +config "json_time_key" and "json_time_format". If "json_time_key" is set, +"json_time_format" must be specified. The "json_time_key" describes the +name of the field containing time information. The "json_time_format" must be a recognized Go time format. If there is no year provided, the metrics will have the current year. More info on time formats can be found here: https://golang.org/pkg/time/#Parse @@ -161,8 +162,8 @@ For example, if you had this configuration: ## List of field names to extract from JSON and add as string fields # json_string_fields = [] - ## gjson query path to specify a specific chunk of JSON to be parsed with - ## the above configuration. If not specified, the whole file will be parsed. + ## gjson query path to specify a specific chunk of JSON to be parsed with + ## the above configuration. If not specified, the whole file will be parsed. ## gjson query paths are described here: https://github.com/tidwall/gjson#path-syntax # json_query = "" @@ -191,8 +192,8 @@ Your Telegraf metrics would get tagged with "my_tag_1" exec_mycollector,my_tag_1=foo a=5,b_c=6 ``` -If the JSON data is an array, then each element of the array is -parsed with the configured settings. Each resulting metric will +If the JSON data is an array, then each element of the array is +parsed with the configured settings. Each resulting metric will be output with the same timestamp. For example, if the following configuration: @@ -220,7 +221,7 @@ For example, if the following configuration: ## List of field names to extract from JSON and add as string fields # string_fields = [] - ## gjson query path to specify a specific chunk of JSON to be parsed with + ## gjson query path to specify a specific chunk of JSON to be parsed with ## the above configuration. If not specified, the whole file will be parsed # json_query = "" @@ -264,7 +265,7 @@ exec_mycollector,my_tag_1=foo,my_tag_2=baz b_c=6 1136387040000000000 exec_mycollector,my_tag_1=bar,my_tag_2=baz b_c=8 1168527840000000000 ``` -If you want to only use a specific portion of your JSON, use the "json_query" +If you want to only use a specific portion of your JSON, use the "json_query" configuration to specify a path to a JSON object. For example, with the following config: @@ -288,7 +289,7 @@ For example, with the following config: ## List of field names to extract from JSON and add as string fields string_fields = ["last"] - ## gjson query path to specify a specific chunk of JSON to be parsed with + ## gjson query path to specify a specific chunk of JSON to be parsed with ## the above configuration. If not specified, the whole file will be parsed json_query = "obj.friends" @@ -1038,3 +1039,84 @@ There are no additional configuration options for Wavefront Data Format line-pro ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "wavefront" ``` + +# CSV +Parse out metrics from a CSV formatted table. By default, the parser assumes there is no header and +will read data from the first line. If `csv_header_row_count` is set to anything besides 0, the parser +will extract column names from the first number of rows. Headers of more than 1 row will have their +names concatenated together. Any unnamed columns will be ignored by the parser. + +The `csv_skip_rows` config indicates the number of rows to skip before looking for header information or data +to parse. By default, no rows will be skipped. + +The `csv_skip_columns` config indicates the number of columns to be skipped before parsing data. These +columns will not be read out of the header. Naming with the `csv_column_names` will begin at the first +parsed column after skipping the indicated columns. By default, no columns are skipped. + +To assign custom column names, the `csv_column_names` config is available. If the `csv_column_names` +config is used, all columns must be named as additional columns will be ignored. If `csv_header_row_count` +is set to 0, `csv_column_names` must be specified. Names listed in `csv_column_names` will override names extracted +from the header. + +The `csv_tag_columns` and `csv_field_columns` configs are available to add the column data to the metric. +The name used to specify the column is the name in the header, or if specified, the corresponding +name assigned in `csv_column_names`. If neither config is specified, no data will be added to the metric. + +Additional configs are available to dynamically name metrics and set custom timestamps. If the +`csv_column_names` config is specified, the parser will assign the metric name to the value found +in that column. If the `csv_timestamp_column` is specified, the parser will extract the timestamp from +that column. If `csv_timestamp_column` is specified, the `csv_timestamp_format` must also be specified +or an error will be thrown. + +#### CSV Configuration +```toml + data_format = "csv" + + ## Indicates how many rows to treat as a header. By default, the parser assumes + ## there is no header and will parse the first row as data. If set to anything more + ## than 1, column names will be concatenated with the name listed in the next header row. + ## If `csv_column_names` is specified, the column names in header will be overridden. + # csv_header_row_count = 0 + + ## Indicates the number of rows to skip before looking for header information. + # csv_skip_rows = 0 + + ## Indicates the number of columns to skip before looking for data to parse. + ## These columns will be skipped in the header as well. + # csv_skip_columns = 0 + + ## The seperator between csv fields + ## By default, the parser assumes a comma (",") + # csv_delimiter = "," + + ## The character reserved for marking a row as a comment row + ## Commented rows are skipped and not parsed + # csv_comment = "" + + ## If set to true, the parser will remove leading whitespace from fields + ## By default, this is false + # csv_trim_space = false + + ## For assigning custom names to columns + ## If this is specified, all columns should have a name + ## Unnamed columns will be ignored by the parser. + ## If `csv_header_row_count` is set to 0, this config must be used + csv_column_names = [] + + ## Columns listed here will be added as tags. Any other columns + ## will be added as fields. + csv_tag_columns = [] + + ## The column to extract the name of the metric from + ## By default, this is the name of the plugin + ## the `name_override` config overrides this + # csv_measurement_column = "" + + ## The column to extract time information for the metric + ## `csv_timestamp_format` must be specified if this is used + # csv_timestamp_column = "" + + ## The format of time data extracted from `csv_timestamp_column` + ## this must be specified if `csv_timestamp_column` is specified + # csv_timestamp_format = "" + ``` diff --git a/internal/config/config.go b/internal/config/config.go index 5926f6132..c712af85e 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -1443,6 +1443,120 @@ func buildParser(name string, tbl *ast.Table) (parsers.Parser, error) { } } + //for csv parser + if node, ok := tbl.Fields["csv_column_names"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if ary, ok := kv.Value.(*ast.Array); ok { + for _, elem := range ary.Value { + if str, ok := elem.(*ast.String); ok { + c.CSVColumnNames = append(c.CSVColumnNames, str.Value) + } + } + } + } + } + + if node, ok := tbl.Fields["csv_tag_columns"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if ary, ok := kv.Value.(*ast.Array); ok { + for _, elem := range ary.Value { + if str, ok := elem.(*ast.String); ok { + c.CSVTagColumns = append(c.CSVTagColumns, str.Value) + } + } + } + } + } + + if node, ok := tbl.Fields["csv_delimiter"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if str, ok := kv.Value.(*ast.String); ok { + c.CSVDelimiter = str.Value + } + } + } + + if node, ok := tbl.Fields["csv_comment"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if str, ok := kv.Value.(*ast.String); ok { + c.CSVComment = str.Value + } + } + } + + if node, ok := tbl.Fields["csv_measurement_column"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if str, ok := kv.Value.(*ast.String); ok { + c.CSVMeasurementColumn = str.Value + } + } + } + + if node, ok := tbl.Fields["csv_timestamp_column"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if str, ok := kv.Value.(*ast.String); ok { + c.CSVTimestampColumn = str.Value + } + } + } + + if node, ok := tbl.Fields["csv_timestamp_format"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if str, ok := kv.Value.(*ast.String); ok { + c.CSVTimestampFormat = str.Value + } + } + } + + if node, ok := tbl.Fields["csv_header_row_count"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if str, ok := kv.Value.(*ast.String); ok { + iVal, err := strconv.Atoi(str.Value) + c.CSVHeaderRowCount = iVal + if err != nil { + return nil, fmt.Errorf("E! parsing to int: %v", err) + } + } + } + } + + if node, ok := tbl.Fields["csv_skip_rows"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if str, ok := kv.Value.(*ast.String); ok { + iVal, err := strconv.Atoi(str.Value) + c.CSVSkipRows = iVal + if err != nil { + return nil, fmt.Errorf("E! parsing to int: %v", err) + } + } + } + } + + if node, ok := tbl.Fields["csv_skip_columns"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if str, ok := kv.Value.(*ast.String); ok { + iVal, err := strconv.Atoi(str.Value) + c.CSVSkipColumns = iVal + if err != nil { + return nil, fmt.Errorf("E! parsing to int: %v", err) + } + } + } + } + + if node, ok := tbl.Fields["csv_trim_space"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if str, ok := kv.Value.(*ast.Boolean); ok { + //for config with no quotes + val, err := strconv.ParseBool(str.Value) + c.CSVTrimSpace = val + if err != nil { + return nil, fmt.Errorf("E! parsing to bool: %v", err) + } + } + } + } + c.MetricName = name delete(tbl.Fields, "data_format") @@ -1469,6 +1583,14 @@ func buildParser(name string, tbl *ast.Table) (parsers.Parser, error) { delete(tbl.Fields, "grok_custom_patterns") delete(tbl.Fields, "grok_custom_pattern_files") delete(tbl.Fields, "grok_timezone") + delete(tbl.Fields, "csv_data_columns") + delete(tbl.Fields, "csv_tag_columns") + delete(tbl.Fields, "csv_field_columns") + delete(tbl.Fields, "csv_name_column") + delete(tbl.Fields, "csv_timestamp_column") + delete(tbl.Fields, "csv_timestamp_format") + delete(tbl.Fields, "csv_delimiter") + delete(tbl.Fields, "csv_header") return parsers.NewParser(c) } diff --git a/plugins/parsers/csv/parser.go b/plugins/parsers/csv/parser.go new file mode 100644 index 000000000..9193fbf5b --- /dev/null +++ b/plugins/parsers/csv/parser.go @@ -0,0 +1,196 @@ +package csv + +import ( + "bytes" + "encoding/csv" + "fmt" + "strconv" + "strings" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" +) + +type Parser struct { + MetricName string + HeaderRowCount int + SkipRows int + SkipColumns int + Delimiter string + Comment string + TrimSpace bool + ColumnNames []string + TagColumns []string + MeasurementColumn string + TimestampColumn string + TimestampFormat string + DefaultTags map[string]string +} + +func (p *Parser) compile(r *bytes.Reader) (*csv.Reader, error) { + csvReader := csv.NewReader(r) + // ensures that the reader reads records of different lengths without an error + csvReader.FieldsPerRecord = -1 + if p.Delimiter != "" { + csvReader.Comma = []rune(p.Delimiter)[0] + } + if p.Comment != "" { + csvReader.Comment = []rune(p.Comment)[0] + } + return csvReader, nil +} + +func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) { + r := bytes.NewReader(buf) + csvReader, err := p.compile(r) + if err != nil { + return nil, err + } + // skip first rows + for i := 0; i < p.SkipRows; i++ { + csvReader.Read() + } + // if there is a header and nothing in DataColumns + // set DataColumns to names extracted from the header + headerNames := make([]string, 0) + if len(p.ColumnNames) == 0 { + for i := 0; i < p.HeaderRowCount; i++ { + header, err := csvReader.Read() + if err != nil { + return nil, err + } + //concatenate header names + for i := range header { + name := header[i] + if p.TrimSpace { + name = strings.Trim(name, " ") + } + if len(headerNames) <= i { + headerNames = append(headerNames, name) + } else { + headerNames[i] = headerNames[i] + name + } + } + } + p.ColumnNames = headerNames[p.SkipColumns:] + } else { + // if columns are named, just skip header rows + for i := 0; i < p.HeaderRowCount; i++ { + csvReader.Read() + } + } + + table, err := csvReader.ReadAll() + if err != nil { + return nil, err + } + + metrics := make([]telegraf.Metric, 0) + for _, record := range table { + m, err := p.parseRecord(record) + if err != nil { + return metrics, err + } + metrics = append(metrics, m) + } + return metrics, nil +} + +// ParseLine does not use any information in header and assumes DataColumns is set +// it will also not skip any rows +func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { + r := bytes.NewReader([]byte(line)) + csvReader, err := p.compile(r) + if err != nil { + return nil, err + } + + // if there is nothing in DataColumns, ParseLine will fail + if len(p.ColumnNames) == 0 { + return nil, fmt.Errorf("[parsers.csv] data columns must be specified") + } + + record, err := csvReader.Read() + if err != nil { + return nil, err + } + m, err := p.parseRecord(record) + if err != nil { + return nil, err + } + return m, nil +} + +func (p *Parser) parseRecord(record []string) (telegraf.Metric, error) { + recordFields := make(map[string]interface{}) + tags := make(map[string]string) + + // skip columns in record + record = record[p.SkipColumns:] +outer: + for i, fieldName := range p.ColumnNames { + if i < len(record) { + value := record[i] + if p.TrimSpace { + value = strings.Trim(value, " ") + } + + for _, tagName := range p.TagColumns { + if tagName == fieldName { + tags[tagName] = value + continue outer + } + } + + // attempt type conversions + if iValue, err := strconv.ParseInt(value, 10, 64); err == nil { + recordFields[fieldName] = iValue + } else if fValue, err := strconv.ParseFloat(value, 64); err == nil { + recordFields[fieldName] = fValue + } else if bValue, err := strconv.ParseBool(value); err == nil { + recordFields[fieldName] = bValue + } else { + recordFields[fieldName] = value + } + } + } + + // add default tags + for k, v := range p.DefaultTags { + tags[k] = v + } + + // will default to plugin name + measurementName := p.MetricName + if recordFields[p.MeasurementColumn] != nil { + measurementName = fmt.Sprintf("%v", recordFields[p.MeasurementColumn]) + } + + metricTime := time.Now() + if p.TimestampColumn != "" { + if recordFields[p.TimestampColumn] == nil { + return nil, fmt.Errorf("timestamp column: %v could not be found", p.TimestampColumn) + } + tStr := fmt.Sprintf("%v", recordFields[p.TimestampColumn]) + if p.TimestampFormat == "" { + return nil, fmt.Errorf("timestamp format must be specified") + } + + var err error + metricTime, err = time.Parse(p.TimestampFormat, tStr) + if err != nil { + return nil, err + } + } + + m, err := metric.New(measurementName, tags, recordFields, metricTime) + if err != nil { + return nil, err + } + return m, nil +} + +func (p *Parser) SetDefaultTags(tags map[string]string) { + p.DefaultTags = tags +} diff --git a/plugins/parsers/csv/parser_test.go b/plugins/parsers/csv/parser_test.go new file mode 100644 index 000000000..b488a1f16 --- /dev/null +++ b/plugins/parsers/csv/parser_test.go @@ -0,0 +1,231 @@ +package csv + +import ( + "fmt" + "testing" + "time" + + "github.com/influxdata/telegraf/metric" + "github.com/stretchr/testify/require" +) + +func TestBasicCSV(t *testing.T) { + p := Parser{ + ColumnNames: []string{"first", "second", "third"}, + TagColumns: []string{"third"}, + } + + _, err := p.ParseLine("1.4,true,hi") + require.NoError(t, err) +} + +func TestHeaderConcatenationCSV(t *testing.T) { + p := Parser{ + HeaderRowCount: 2, + MeasurementColumn: "3", + } + testCSV := `first,second +1,2,3 +3.4,70,test_name` + + metrics, err := p.Parse([]byte(testCSV)) + require.NoError(t, err) + require.Equal(t, "test_name", metrics[0].Name()) +} + +func TestHeaderOverride(t *testing.T) { + p := Parser{ + HeaderRowCount: 1, + ColumnNames: []string{"first", "second", "third"}, + MeasurementColumn: "third", + } + testCSV := `line1,line2,line3 +3.4,70,test_name` + metrics, err := p.Parse([]byte(testCSV)) + require.NoError(t, err) + require.Equal(t, "test_name", metrics[0].Name()) +} + +func TestTimestamp(t *testing.T) { + p := Parser{ + HeaderRowCount: 1, + ColumnNames: []string{"first", "second", "third"}, + MeasurementColumn: "third", + TimestampColumn: "first", + TimestampFormat: "02/01/06 03:04:05 PM", + } + testCSV := `line1,line2,line3 +23/05/09 04:05:06 PM,70,test_name +07/11/09 04:05:06 PM,80,test_name2` + metrics, err := p.Parse([]byte(testCSV)) + + require.NoError(t, err) + require.Equal(t, metrics[0].Time().UnixNano(), int64(1243094706000000000)) + require.Equal(t, metrics[1].Time().UnixNano(), int64(1257609906000000000)) +} + +func TestTimestampError(t *testing.T) { + p := Parser{ + HeaderRowCount: 1, + ColumnNames: []string{"first", "second", "third"}, + MeasurementColumn: "third", + TimestampColumn: "first", + } + testCSV := `line1,line2,line3 +23/05/09 04:05:06 PM,70,test_name +07/11/09 04:05:06 PM,80,test_name2` + _, err := p.Parse([]byte(testCSV)) + require.Equal(t, fmt.Errorf("timestamp format must be specified"), err) +} + +func TestQuotedCharacter(t *testing.T) { + p := Parser{ + HeaderRowCount: 1, + ColumnNames: []string{"first", "second", "third"}, + MeasurementColumn: "third", + } + + testCSV := `line1,line2,line3 +"3,4",70,test_name` + metrics, err := p.Parse([]byte(testCSV)) + require.NoError(t, err) + require.Equal(t, "3,4", metrics[0].Fields()["first"]) +} + +func TestDelimiter(t *testing.T) { + p := Parser{ + HeaderRowCount: 1, + Delimiter: "%", + ColumnNames: []string{"first", "second", "third"}, + MeasurementColumn: "third", + } + + testCSV := `line1%line2%line3 +3,4%70%test_name` + metrics, err := p.Parse([]byte(testCSV)) + require.NoError(t, err) + require.Equal(t, "3,4", metrics[0].Fields()["first"]) +} + +func TestValueConversion(t *testing.T) { + p := Parser{ + HeaderRowCount: 0, + Delimiter: ",", + ColumnNames: []string{"first", "second", "third", "fourth"}, + MetricName: "test_value", + } + testCSV := `3.3,4,true,hello` + + expectedTags := make(map[string]string) + expectedFields := map[string]interface{}{ + "first": 3.3, + "second": 4, + "third": true, + "fourth": "hello", + } + + metrics, err := p.Parse([]byte(testCSV)) + require.NoError(t, err) + + expectedMetric, err1 := metric.New("test_value", expectedTags, expectedFields, time.Unix(0, 0)) + returnedMetric, err2 := metric.New(metrics[0].Name(), metrics[0].Tags(), metrics[0].Fields(), time.Unix(0, 0)) + require.NoError(t, err1) + require.NoError(t, err2) + + //deep equal fields + require.Equal(t, expectedMetric.Fields(), returnedMetric.Fields()) +} + +func TestSkipComment(t *testing.T) { + p := Parser{ + HeaderRowCount: 0, + Comment: "#", + ColumnNames: []string{"first", "second", "third", "fourth"}, + MetricName: "test_value", + } + testCSV := `#3.3,4,true,hello +4,9.9,true,name_this` + + expectedFields := map[string]interface{}{ + "first": int64(4), + "second": 9.9, + "third": true, + "fourth": "name_this", + } + + metrics, err := p.Parse([]byte(testCSV)) + require.NoError(t, err) + require.Equal(t, expectedFields, metrics[0].Fields()) +} + +func TestTrimSpace(t *testing.T) { + p := Parser{ + HeaderRowCount: 0, + TrimSpace: true, + ColumnNames: []string{"first", "second", "third", "fourth"}, + MetricName: "test_value", + } + testCSV := ` 3.3, 4, true,hello` + + expectedFields := map[string]interface{}{ + "first": 3.3, + "second": int64(4), + "third": true, + "fourth": "hello", + } + + metrics, err := p.Parse([]byte(testCSV)) + require.NoError(t, err) + require.Equal(t, expectedFields, metrics[0].Fields()) +} + +func TestSkipRows(t *testing.T) { + p := Parser{ + HeaderRowCount: 1, + SkipRows: 1, + TagColumns: []string{"line1"}, + MeasurementColumn: "line3", + } + testCSV := `garbage nonsense +line1,line2,line3 +hello,80,test_name2` + + expectedFields := map[string]interface{}{ + "line2": int64(80), + "line3": "test_name2", + } + metrics, err := p.Parse([]byte(testCSV)) + require.NoError(t, err) + require.Equal(t, expectedFields, metrics[0].Fields()) +} + +func TestSkipColumns(t *testing.T) { + p := Parser{ + SkipColumns: 1, + ColumnNames: []string{"line1", "line2"}, + } + testCSV := `hello,80,test_name` + + expectedFields := map[string]interface{}{ + "line1": int64(80), + "line2": "test_name", + } + metrics, err := p.Parse([]byte(testCSV)) + require.NoError(t, err) + require.Equal(t, expectedFields, metrics[0].Fields()) +} + +func TestSkipColumnsWithHeader(t *testing.T) { + p := Parser{ + SkipColumns: 1, + HeaderRowCount: 2, + } + testCSV := `col,col,col + 1,2,3 + trash,80,test_name` + + // we should expect an error if we try to get col1 + metrics, err := p.Parse([]byte(testCSV)) + require.NoError(t, err) + require.Equal(t, map[string]interface{}{"col2": int64(80), "col3": "test_name"}, metrics[0].Fields()) +} diff --git a/plugins/parsers/registry.go b/plugins/parsers/registry.go index 89fdc9a10..32027e417 100644 --- a/plugins/parsers/registry.go +++ b/plugins/parsers/registry.go @@ -6,6 +6,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/parsers/collectd" + "github.com/influxdata/telegraf/plugins/parsers/csv" "github.com/influxdata/telegraf/plugins/parsers/dropwizard" "github.com/influxdata/telegraf/plugins/parsers/graphite" "github.com/influxdata/telegraf/plugins/parsers/grok" @@ -113,6 +114,19 @@ type Config struct { GrokCustomPatterns string GrokCustomPatternFiles []string GrokTimeZone string + + //csv configuration + CSVDelimiter string + CSVComment string + CSVTrimSpace bool + CSVColumnNames []string + CSVTagColumns []string + CSVMeasurementColumn string + CSVTimestampColumn string + CSVTimestampFormat string + CSVHeaderRowCount int + CSVSkipRows int + CSVSkipColumns int } // NewParser returns a Parser interface based on the given config. @@ -162,6 +176,20 @@ func NewParser(config *Config) (Parser, error) { config.GrokCustomPatterns, config.GrokCustomPatternFiles, config.GrokTimeZone) + case "csv": + parser, err = newCSVParser(config.MetricName, + config.CSVHeaderRowCount, + config.CSVSkipRows, + config.CSVSkipColumns, + config.CSVDelimiter, + config.CSVComment, + config.CSVTrimSpace, + config.CSVColumnNames, + config.CSVTagColumns, + config.CSVMeasurementColumn, + config.CSVTimestampColumn, + config.CSVTimestampFormat, + config.DefaultTags) case "logfmt": parser, err = NewLogFmtParser(config.MetricName, config.DefaultTags) default: @@ -170,6 +198,60 @@ func NewParser(config *Config) (Parser, error) { return parser, err } +func newCSVParser(metricName string, + header int, + skipRows int, + skipColumns int, + delimiter string, + comment string, + trimSpace bool, + dataColumns []string, + tagColumns []string, + nameColumn string, + timestampColumn string, + timestampFormat string, + defaultTags map[string]string) (Parser, error) { + + if header == 0 && len(dataColumns) == 0 { + // if there is no header and no DataColumns, that's an error + return nil, fmt.Errorf("there must be a header if `csv_data_columns` is not specified") + } + + if delimiter != "" { + runeStr := []rune(delimiter) + if len(runeStr) > 1 { + return nil, fmt.Errorf("delimiter must be a single character, got: %s", delimiter) + } + delimiter = fmt.Sprintf("%v", runeStr[0]) + } + + if comment != "" { + runeStr := []rune(comment) + if len(runeStr) > 1 { + return nil, fmt.Errorf("delimiter must be a single character, got: %s", comment) + } + comment = fmt.Sprintf("%v", runeStr[0]) + } + + parser := &csv.Parser{ + MetricName: metricName, + HeaderRowCount: header, + SkipRows: skipRows, + SkipColumns: skipColumns, + Delimiter: delimiter, + Comment: comment, + TrimSpace: trimSpace, + ColumnNames: dataColumns, + TagColumns: tagColumns, + MeasurementColumn: nameColumn, + TimestampColumn: timestampColumn, + TimestampFormat: timestampFormat, + DefaultTags: defaultTags, + } + + return parser, nil +} + func newJSONParser( metricName string, tagKeys []string, From d3061520f4f94cdd74d979305fa5de76731cdd03 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 24 Aug 2018 16:41:51 -0700 Subject: [PATCH 0112/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index dd8cf460b..a7b7743ec 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -77,6 +77,7 @@ - [#4351](https://github.com/influxdata/telegraf/pull/4351): Add name, time, path and string field options to JSON parser. - [#4571](https://github.com/influxdata/telegraf/pull/4571): Add forwarded records to sqlserver input. - [#4585](https://github.com/influxdata/telegraf/pull/4585): Add Kibana input plugin. +- [#4439](https://github.com/influxdata/telegraf/pull/4439): Add csv parser plugin. ## v1.7.4 [unreleased] From 61e5d500baeefc32c51e449969584d6581ee4e1e Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 27 Aug 2018 13:05:41 -0700 Subject: [PATCH 0113/1815] Add metric requirements to docker README --- plugins/inputs/docker/README.md | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/plugins/inputs/docker/README.md b/plugins/inputs/docker/README.md index a95fb61e9..b7e64af33 100644 --- a/plugins/inputs/docker/README.md +++ b/plugins/inputs/docker/README.md @@ -77,9 +77,6 @@ may prefer to exclude them: ### Metrics: -Every effort was made to preserve the names based on the JSON response from the -docker API. - - docker - tags: - unit @@ -96,7 +93,10 @@ docker API. - n_goroutines - n_listener_events - memory_total - - pool_blocksize + - pool_blocksize (requires devicemapper storage driver) + +The `docker_data` and `docker_metadata` measurements are available only for +some storage drivers such as devicemapper. - docker_data - tags: @@ -224,7 +224,11 @@ docker API. - io_serviced_recursive_write - container_id -- docker_container_health +The `docker_container_health` measurements report on a containers +[HEALTHCHECK](https://docs.docker.com/engine/reference/builder/#healthcheck) +status if configured. + +- docker_container_health (container must use the HEALTHCHECK) - tags: - engine_host - server_version From e2b1a6bc891bb32431091b8c9f6407d5ba437b92 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 27 Aug 2018 14:47:04 -0700 Subject: [PATCH 0114/1815] Add read_buffer_size option to statsd input (#4598) --- plugins/inputs/statsd/README.md | 4 ++++ plugins/inputs/statsd/statsd.go | 6 ++++++ 2 files changed, 10 insertions(+) diff --git a/plugins/inputs/statsd/README.md b/plugins/inputs/statsd/README.md index 648fa72ac..85cb4a46e 100644 --- a/plugins/inputs/statsd/README.md +++ b/plugins/inputs/statsd/README.md @@ -58,6 +58,10 @@ ## calculation of percentiles. Raising this limit increases the accuracy ## of percentiles but also increases the memory usage and cpu time. percentile_limit = 1000 + + ## Maximum socket buffer size in bytes, once the buffer fills up, metrics + ## will start dropping. Defaults to the OS default. + # read_buffer_size = 65535 ``` ### Description diff --git a/plugins/inputs/statsd/statsd.go b/plugins/inputs/statsd/statsd.go index 3e5a73aa3..60b55887e 100644 --- a/plugins/inputs/statsd/statsd.go +++ b/plugins/inputs/statsd/statsd.go @@ -76,6 +76,8 @@ type Statsd struct { // see https://github.com/influxdata/telegraf/pull/992 UDPPacketSize int `toml:"udp_packet_size"` + ReadBufferSize int `toml:"read_buffer_size"` + sync.Mutex // Lock for preventing a data race during resource cleanup cleanup sync.Mutex @@ -411,6 +413,10 @@ func (s *Statsd) udpListen() error { } log.Println("I! Statsd UDP listener listening on: ", s.UDPlistener.LocalAddr().String()) + if s.ReadBufferSize > 0 { + s.UDPlistener.SetReadBuffer(s.ReadBufferSize) + } + buf := make([]byte, UDP_MAX_PACKET_SIZE) for { select { From 3adcc3a93d9a7cd27f07dfb5b5e4c2b6476533ba Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 27 Aug 2018 14:48:09 -0700 Subject: [PATCH 0115/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index a7b7743ec..5f5e1898a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -78,6 +78,7 @@ - [#4571](https://github.com/influxdata/telegraf/pull/4571): Add forwarded records to sqlserver input. - [#4585](https://github.com/influxdata/telegraf/pull/4585): Add Kibana input plugin. - [#4439](https://github.com/influxdata/telegraf/pull/4439): Add csv parser plugin. +- [#4598](https://github.com/influxdata/telegraf/pull/4598): Add read_buffer_size option to statsd input. ## v1.7.4 [unreleased] From 1e3edbc55dc1fab5f7530f078384cfbdf72236d8 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 29 Aug 2018 11:38:40 -0700 Subject: [PATCH 0116/1815] Ensure channel closed if an error occurs in cgroup input (#4606) --- plugins/inputs/cgroup/cgroup_linux.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/inputs/cgroup/cgroup_linux.go b/plugins/inputs/cgroup/cgroup_linux.go index 0765416af..80c15c963 100644 --- a/plugins/inputs/cgroup/cgroup_linux.go +++ b/plugins/inputs/cgroup/cgroup_linux.go @@ -81,6 +81,7 @@ func isDir(path string) (bool, error) { } func (g *CGroup) generateDirs(list chan<- pathInfo) { + defer close(list) for _, dir := range g.Paths { // getting all dirs that match the pattern 'dir' items, err := filepath.Glob(dir) @@ -101,10 +102,10 @@ func (g *CGroup) generateDirs(list chan<- pathInfo) { } } } - close(list) } func (g *CGroup) generateFiles(dir string, list chan<- pathInfo) { + defer close(list) for _, file := range g.Files { // getting all file paths that match the pattern 'dir + file' // path.Base make sure that file variable does not contains part of path @@ -126,7 +127,6 @@ func (g *CGroup) generateFiles(dir string, list chan<- pathInfo) { } } } - close(list) } // ====================================================================== From fed959531ce74a12299b65c7abaec082e7769136 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 29 Aug 2018 11:39:10 -0700 Subject: [PATCH 0117/1815] Remove timeout deadline for udp syslog input. (#4605) --- plugins/inputs/syslog/syslog.go | 4 ---- 1 file changed, 4 deletions(-) diff --git a/plugins/inputs/syslog/syslog.go b/plugins/inputs/syslog/syslog.go index 5b22cbcad..034e03df2 100644 --- a/plugins/inputs/syslog/syslog.go +++ b/plugins/inputs/syslog/syslog.go @@ -213,10 +213,6 @@ func (s *Syslog) listenPacket(acc telegraf.Accumulator) { break } - if s.ReadTimeout != nil && s.ReadTimeout.Duration > 0 { - s.udpListener.SetReadDeadline(time.Now().Add(s.ReadTimeout.Duration)) - } - message, err := p.Parse(b[:n], &s.BestEffort) if message != nil { acc.AddFields("syslog", fields(*message, s), tags(*message), s.time()) From 59a64651f1d5c1f02d4b2e618d8fd8b2a40a2340 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 29 Aug 2018 11:43:14 -0700 Subject: [PATCH 0118/1815] Update changelog --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5f5e1898a..552be8021 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -88,6 +88,8 @@ - [#4554](https://github.com/influxdata/telegraf/pull/4554): Fix powerdns input tests. - [#4584](https://github.com/influxdata/telegraf/pull/4584): Fix burrow_group offset calculation for burrow input. - [#4550](https://github.com/influxdata/telegraf/pull/4550): Add result_code value for errors running ping command. +- [#4605](https://github.com/influxdata/telegraf/pull/4605): Remove timeout deadline for udp syslog input. +- [#4601](https://github.com/influxdata/telegraf/pull/4601): Ensure channel closed if an error occurs in cgroup input. ## v1.7.3 [2018-08-07] From 8b2d64585d8a39d9c18c659d73f959870b6952f0 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 29 Aug 2018 12:28:00 -0700 Subject: [PATCH 0119/1815] Use the correct GOARM value in the armel package (#4608) --- scripts/build.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/scripts/build.py b/scripts/build.py index 27f47f42f..bfaba5a8a 100755 --- a/scripts/build.py +++ b/scripts/build.py @@ -448,13 +448,14 @@ def build(version=None, build_command += "CGO_ENABLED=0 " # Handle variations in architecture output + goarch = arch if arch == "i386" or arch == "i686": - arch = "386" + goarch = "386" elif "arm64" in arch: - arch = "arm64" + goarch = "arm64" elif "arm" in arch: - arch = "arm" - build_command += "GOOS={} GOARCH={} ".format(platform, arch) + goarch = "arm" + build_command += "GOOS={} GOARCH={} ".format(platform, goarch) if "arm" in arch: if arch == "armel": From 7b05993a6e522e3761d7cbceb57387211a1ca73d Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 29 Aug 2018 12:28:29 -0700 Subject: [PATCH 0120/1815] Fix sending of basic auth credentials in http output (#4609) --- plugins/outputs/http/http.go | 4 ++++ plugins/outputs/http/http_test.go | 10 ++++------ 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/plugins/outputs/http/http.go b/plugins/outputs/http/http.go index e36460ac8..91c2954cd 100644 --- a/plugins/outputs/http/http.go +++ b/plugins/outputs/http/http.go @@ -131,6 +131,10 @@ func (h *HTTP) write(reqBody []byte) error { return err } + if h.Username != "" || h.Password != "" { + req.SetBasicAuth(h.Username, h.Password) + } + req.Header.Set("Content-Type", defaultContentType) for k, v := range h.Headers { req.Header.Set(k, v) diff --git a/plugins/outputs/http/http_test.go b/plugins/outputs/http/http_test.go index 1d511d85b..daec176be 100644 --- a/plugins/outputs/http/http_test.go +++ b/plugins/outputs/http/http_test.go @@ -235,10 +235,8 @@ func TestBasicAuth(t *testing.T) { require.NoError(t, err) tests := []struct { - name string - plugin *HTTP - username string - password string + name string + plugin *HTTP }{ { name: "default", @@ -274,8 +272,8 @@ func TestBasicAuth(t *testing.T) { t.Run(tt.name, func(t *testing.T) { ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { username, password, _ := r.BasicAuth() - require.Equal(t, tt.username, username) - require.Equal(t, tt.password, password) + require.Equal(t, tt.plugin.Username, username) + require.Equal(t, tt.plugin.Password, password) w.WriteHeader(http.StatusOK) }) From 687ef23596a9a9c62a7ea1732743cb19c7df3a32 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 29 Aug 2018 12:31:43 -0700 Subject: [PATCH 0121/1815] Update changelog --- CHANGELOG.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 552be8021..fcb1e3c6e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -89,7 +89,9 @@ - [#4584](https://github.com/influxdata/telegraf/pull/4584): Fix burrow_group offset calculation for burrow input. - [#4550](https://github.com/influxdata/telegraf/pull/4550): Add result_code value for errors running ping command. - [#4605](https://github.com/influxdata/telegraf/pull/4605): Remove timeout deadline for udp syslog input. -- [#4601](https://github.com/influxdata/telegraf/pull/4601): Ensure channel closed if an error occurs in cgroup input. +- [#4601](https://github.com/influxdata/telegraf/issues/4601): Ensure channel closed if an error occurs in cgroup input. +- [#4544](https://github.com/influxdata/telegraf/issues/4544): Fix sending of basic auth credentials in http output. +- [#4526](https://github.com/influxdata/telegraf/issues/4526): Use the correct GOARM value in the armel package. ## v1.7.3 [2018-08-07] From f6b08df163ad833dea8281c72b3c5a2e68d216b0 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 29 Aug 2018 13:20:49 -0700 Subject: [PATCH 0122/1815] Set 1.7.4 release date --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index fcb1e3c6e..5d27b826b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -80,7 +80,7 @@ - [#4439](https://github.com/influxdata/telegraf/pull/4439): Add csv parser plugin. - [#4598](https://github.com/influxdata/telegraf/pull/4598): Add read_buffer_size option to statsd input. -## v1.7.4 [unreleased] +## v1.7.4 [2018-08-29] ### Bugfixes From 5420e13f1417634d30ee4cfa044eb82301d9dfc5 Mon Sep 17 00:00:00 2001 From: Ayrdrie Date: Wed, 29 Aug 2018 19:11:13 -0700 Subject: [PATCH 0123/1815] Fix divide by zero in logparser input (#4338) --- plugins/parsers/grok/parser.go | 7 ++++++- plugins/parsers/grok/parser_test.go | 16 ++++++++++++++++ 2 files changed, 22 insertions(+), 1 deletion(-) diff --git a/plugins/parsers/grok/parser.go b/plugins/parsers/grok/parser.go index bc65588eb..c1ebf9003 100644 --- a/plugins/parsers/grok/parser.go +++ b/plugins/parsers/grok/parser.go @@ -344,6 +344,9 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { v = strings.Replace(v, ",", ".", -1) ts, err := time.ParseInLocation(t, v, p.loc) if err == nil { + if ts.Year() == 0 { + ts = ts.AddDate(timestamp.Year(), 0, 0) + } timestamp = ts } else { log.Printf("E! Error parsing %s to time layout [%s]: %s", v, t, err) @@ -485,6 +488,9 @@ type tsModder struct { // most significant time unit of ts. // ie, if the input is at ms precision, it will increment it 1µs. func (t *tsModder) tsMod(ts time.Time) time.Time { + if ts.IsZero() { + return ts + } defer func() { t.last = ts }() // don't mod the time if we don't need to if t.last.IsZero() || ts.IsZero() { @@ -498,7 +504,6 @@ func (t *tsModder) tsMod(ts time.Time) time.Time { t.rollover = 0 return ts } - if ts.Equal(t.last) { t.dupe = ts } diff --git a/plugins/parsers/grok/parser_test.go b/plugins/parsers/grok/parser_test.go index 8133d3021..60348fc63 100644 --- a/plugins/parsers/grok/parser_test.go +++ b/plugins/parsers/grok/parser_test.go @@ -1009,3 +1009,19 @@ func TestMeasurementModifierNoName(t *testing.T) { require.NoError(t, err) require.Equal(t, m.Name(), "hello") } + +func TestEmptyYearInTimestamp(t *testing.T) { + p := &Parser{ + Patterns: []string{`%{APPLE_SYSLOG_TIME_SHORT:timestamp:ts-"Jan 2 15:04:05"} %{HOSTNAME} %{APP_NAME:app_name}\[%{NUMBER:pid:int}\]%{GREEDYDATA:message}`}, + CustomPatterns: ` + APPLE_SYSLOG_TIME_SHORT %{MONTH} +%{MONTHDAY} %{TIME} + APP_NAME [a-zA-Z0-9\.]+ + `, + } + require.NoError(t, p.Compile()) + p.ParseLine("Nov 6 13:57:03 generic iTunes[6504]: info> Scale factor of main display = 2.0") + m, err := p.ParseLine("Nov 6 13:57:03 generic iTunes[6504]: objc[6504]: Object descriptor was null.") + require.NoError(t, err) + require.NotNil(t, m) + require.Equal(t, 2018, m.Time().Year()) +} From e7b50384cf13781ca2e0c332b08f656d23ed2bda Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 29 Aug 2018 19:12:38 -0700 Subject: [PATCH 0124/1815] Update changelog --- CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5d27b826b..42ee8fd6d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -80,6 +80,10 @@ - [#4439](https://github.com/influxdata/telegraf/pull/4439): Add csv parser plugin. - [#4598](https://github.com/influxdata/telegraf/pull/4598): Add read_buffer_size option to statsd input. +### Bugfixes + +- [#3438](https://github.com/influxdata/telegraf/issues/3438): Fix divide by zero in logparser input. + ## v1.7.4 [2018-08-29] ### Bugfixes From ca9505a3b19220e8685a4ee0bf985ba194b7e11b Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 31 Aug 2018 13:59:30 -0700 Subject: [PATCH 0125/1815] Update ping input readme --- plugins/inputs/ping/README.md | 93 ++++++++++++++++++++--------------- plugins/inputs/ping/ping.go | 21 ++++---- 2 files changed, 65 insertions(+), 49 deletions(-) diff --git a/plugins/inputs/ping/README.md b/plugins/inputs/ping/README.md index 846430a44..4996fdc37 100644 --- a/plugins/inputs/ping/README.md +++ b/plugins/inputs/ping/README.md @@ -1,55 +1,68 @@ -# Ping Input plugin +# Ping Input Plugin -This input plugin will measures the round-trip +Sends a ping message by executing the system ping command and reports the results. + +Currently there is no support for GNU Inetutils, use with iputils-ping +instead: +``` +apt-get install iputils-ping +``` ### Configuration: -``` -# NOTE: this plugin forks the ping command. You may need to set capabilities -# via setcap cap_net_raw+p /bin/ping +```toml [[inputs.ping]] -## List of urls to ping -urls = ["www.google.com"] # required -## number of pings to send per collection (ping -c ) -# count = 1 -## interval, in s, at which to ping. 0 == default (ping -i ) -## Not available in Windows. -# ping_interval = 1.0 -## per-ping timeout, in s. 0 == no timeout (ping -W ) -# timeout = 1.0 -## total-ping deadline, in s. 0 == no deadline (ping -w ) -# deadline = 10 -## interface or source address to send ping from (ping -I ) -## on Darwin and Freebsd only source address possible: (ping -S ) -# interface = "" + ## List of urls to ping + urls = ["example.org"] + + ## Number of pings to send per collection (ping -c ) + # count = 1 + + ## Interval, in s, at which to ping. 0 == default (ping -i ) + ## Not available in Windows. + # ping_interval = 1.0 + + ## Per-ping timeout, in s. 0 == no timeout (ping -W ) + # timeout = 1.0 + + ## Total-ping deadline, in s. 0 == no deadline (ping -w ) + # deadline = 10 + + ## Interface or source address to send ping from (ping -I ) + ## on Darwin and Freebsd only source address possible: (ping -S ) + # interface = "" ``` -### Measurements & Fields: +### Metrics: -- packets_transmitted ( from ping output ) -- reply_received ( increasing only on valid metric from echo replay, eg. 'Destination net unreachable' reply will increment packets_received but not reply_received ) -- packets_received ( from ping output ) -- percent_reply_loss ( compute from packets_transmitted and reply_received ) -- percent_packets_loss ( compute from packets_transmitted and packets_received ) -- errors ( when host can not be found or wrong parameters is passed to application ) -- response time - - average_response_ms ( compute from minimum_response_ms and maximum_response_ms ) - - minimum_response_ms ( from ping output ) - - maximum_response_ms ( from ping output ) -- result_code - - 0: success - - 1: no such host - - 2: ping error +- ping + - tags: + - url + - fields: + - packets_transmitted (integer) + - packets_received (integer) + - percent_packets_loss (float) + - average_response_ms (integer) + - minimum_response_ms (integer) + - maximum_response_ms (integer) + - standard_deviation_ms (integer, Not available on Windows) + - errors (float, Windows only) + - reply_received (integer, Windows only) + - percent_reply_loss (float, Windows only) + - result_code (int, success = 0, no such host = 1, ping error = 2) -### Tags: +##### reply_received vs packets_received -- host -- url +On Windows systems, "Destination net unreachable" reply will increment `packets_received` but not `reply_received`. ### Example Output: +**Windows:** ``` -$ ./telegraf --config telegraf.conf --input-filter ping --test -* Plugin: ping, Collection 1 -ping,host=WIN-PBAPLP511R7,url=www.google.com result_code=0i,average_response_ms=7i,maximum_response_ms=9i,minimum_response_ms=7i,packets_received=4i,packets_transmitted=4i,percent_packet_loss=0,percent_reply_loss=0,reply_received=4i 1469879119000000000 +ping,url=example.org result_code=0i,average_response_ms=7i,maximum_response_ms=9i,minimum_response_ms=7i,packets_received=4i,packets_transmitted=4i,percent_packet_loss=0,percent_reply_loss=0,reply_received=4i 1469879119000000000 +``` + +**Linux:** +``` +ping,url=example.org average_response_ms=23.066,maximum_response_ms=24.64,minimum_response_ms=22.451,packets_received=5i,packets_transmitted=5i,percent_packet_loss=0,result_code=0i,standard_deviation_ms=0.809 1535747258000000000 ``` diff --git a/plugins/inputs/ping/ping.go b/plugins/inputs/ping/ping.go index 46afb206a..430cbe6d4 100644 --- a/plugins/inputs/ping/ping.go +++ b/plugins/inputs/ping/ping.go @@ -52,20 +52,23 @@ func (_ *Ping) Description() string { } const sampleConfig = ` - ## NOTE: this plugin forks the ping command. You may need to set capabilities - ## via setcap cap_net_raw+p /bin/ping - # ## List of urls to ping - urls = ["www.google.com"] # required - ## number of pings to send per collection (ping -c ) + urls = ["example.org"] + + ## Number of pings to send per collection (ping -c ) # count = 1 - ## interval, in s, at which to ping. 0 == default (ping -i ) + + ## Interval, in s, at which to ping. 0 == default (ping -i ) + ## Not available in Windows. # ping_interval = 1.0 - ## per-ping timeout, in s. 0 == no timeout (ping -W ) + + ## Per-ping timeout, in s. 0 == no timeout (ping -W ) # timeout = 1.0 - ## total-ping deadline, in s. 0 == no deadline (ping -w ) + + ## Total-ping deadline, in s. 0 == no deadline (ping -w ) # deadline = 10 - ## interface or source address to send ping from (ping -I ) + + ## Interface or source address to send ping from (ping -I ) ## on Darwin and Freebsd only source address possible: (ping -S ) # interface = "" ` From 90b4a1e4358fc82af4aec10b795f49690e223179 Mon Sep 17 00:00:00 2001 From: Vlasta Hajek Date: Sun, 2 Sep 2018 03:59:03 +0200 Subject: [PATCH 0126/1815] Fix instance and object name in performance counters with backslashes (#4572) --- .../win_perf_counters/win_perf_counters.go | 66 +++++++++++++------ .../win_perf_counters_test.go | 46 ++++++++++++- 2 files changed, 92 insertions(+), 20 deletions(-) diff --git a/plugins/inputs/win_perf_counters/win_perf_counters.go b/plugins/inputs/win_perf_counters/win_perf_counters.go index d2ace5231..06a1a333c 100644 --- a/plugins/inputs/win_perf_counters/win_perf_counters.go +++ b/plugins/inputs/win_perf_counters/win_perf_counters.go @@ -6,7 +6,6 @@ import ( "errors" "fmt" "log" - "regexp" "strings" "time" @@ -119,28 +118,57 @@ type instanceGrouping struct { var sanitizedChars = strings.NewReplacer("/sec", "_persec", "/Sec", "_persec", " ", "_", "%", "Percent", `\`, "") -//General Counter path pattern is: \\computer\object(parent/instance#index)\counter -//parent/instance#index part is skipped in single instance objects (e.g. Memory): \\computer\object\counter +// extractCounterInfoFromCounterPath gets object name, instance name (if available) and counter name from counter path +// General Counter path pattern is: \\computer\object(parent/instance#index)\counter +// parent/instance#index part is skipped in single instance objects (e.g. Memory): \\computer\object\counter +func extractCounterInfoFromCounterPath(counterPath string) (object string, instance string, counter string, err error) { -var counterPathRE = regexp.MustCompile(`.*\\(.*)\\(.*)`) -var objectInstanceRE = regexp.MustCompile(`(.*)\((.*)\)`) + rightObjectBorderIndex := -1 + leftObjectBorderIndex := -1 + leftCounterBorderIndex := -1 + rightInstanceBorderIndex := -1 + leftInstanceBorderIndex := -1 + bracketLevel := 0 -//extractObjectInstanceCounterFromQuery gets object name, instance name (if available) and counter name from counter path -func extractObjectInstanceCounterFromQuery(query string) (object string, instance string, counter string, err error) { - pathParts := counterPathRE.FindAllStringSubmatch(query, -1) - if pathParts == nil || len(pathParts[0]) != 3 { - err = errors.New("Could not extract counter info from: " + query) + for i := len(counterPath) - 1; i >= 0; i-- { + switch counterPath[i] { + case '\\': + if bracketLevel == 0 { + if leftCounterBorderIndex == -1 { + leftCounterBorderIndex = i + } else if leftObjectBorderIndex == -1 { + leftObjectBorderIndex = i + } + } + case '(': + bracketLevel-- + if leftInstanceBorderIndex == -1 && bracketLevel == 0 && leftObjectBorderIndex == -1 && leftCounterBorderIndex > -1 { + leftInstanceBorderIndex = i + rightObjectBorderIndex = i + } + case ')': + if rightInstanceBorderIndex == -1 && bracketLevel == 0 && leftCounterBorderIndex > -1 { + rightInstanceBorderIndex = i + } + bracketLevel++ + } + } + if rightObjectBorderIndex == -1 { + rightObjectBorderIndex = leftCounterBorderIndex + } + if rightObjectBorderIndex == -1 || leftObjectBorderIndex == -1 { + err = errors.New("cannot parse object from: " + counterPath) return } - counter = pathParts[0][2] - //try to get instance name - objectInstanceParts := objectInstanceRE.FindAllStringSubmatch(pathParts[0][1], -1) - if objectInstanceParts == nil || len(objectInstanceParts[0]) != 3 { - object = pathParts[0][1] - } else { - object = objectInstanceParts[0][1] - instance = objectInstanceParts[0][2] + + if leftInstanceBorderIndex > -1 && rightInstanceBorderIndex > -1 { + instance = counterPath[leftInstanceBorderIndex+1 : rightInstanceBorderIndex] + } else if (leftInstanceBorderIndex == -1 && rightInstanceBorderIndex > -1) || (leftInstanceBorderIndex > -1 && rightInstanceBorderIndex == -1) { + err = errors.New("cannot parse instance from: " + counterPath) + return } + object = counterPath[leftObjectBorderIndex+1 : rightObjectBorderIndex] + counter = counterPath[leftCounterBorderIndex+1:] return } @@ -184,7 +212,7 @@ func (m *Win_PerfCounters) AddItem(counterPath string, objectName string, instan var err error counterHandle, err := m.query.AddCounterToQuery(counterPath) - objectName, instance, counterName, err = extractObjectInstanceCounterFromQuery(counterPath) + objectName, instance, counterName, err = extractCounterInfoFromCounterPath(counterPath) if err != nil { return err } diff --git a/plugins/inputs/win_perf_counters/win_perf_counters_test.go b/plugins/inputs/win_perf_counters/win_perf_counters_test.go index 07e1941a9..81959ef8c 100644 --- a/plugins/inputs/win_perf_counters/win_perf_counters_test.go +++ b/plugins/inputs/win_perf_counters/win_perf_counters_test.go @@ -28,7 +28,7 @@ type FakePerformanceQuery struct { var MetricTime = time.Date(2018, 5, 28, 12, 0, 0, 0, time.UTC) func (m *testCounter) ToCounterValue() *CounterValue { - _, inst, _, _ := extractObjectInstanceCounterFromQuery(m.path) + _, inst, _, _ := extractCounterInfoFromCounterPath(m.path) if inst == "" { inst = "--" } @@ -211,6 +211,50 @@ func createCounterMap(counterPaths []string, values []float64) map[string]testCo return counters } +var counterPathsAndRes = map[string][]string{ + "\\O\\CT": {"O", "", "CT"}, + "\\O\\CT(i)": {"O", "", "CT(i)"}, + "\\O\\CT(d:\\f\\i)": {"O", "", "CT(d:\\f\\i)"}, + "\\\\CM\\O\\CT": {"O", "", "CT"}, + "\\O(I)\\CT": {"O", "I", "CT"}, + "\\O(I)\\CT(i)": {"O", "I", "CT(i)"}, + "\\O(I)\\CT(i)x": {"O", "I", "CT(i)x"}, + "\\O(I)\\CT(d:\\f\\i)": {"O", "I", "CT(d:\\f\\i)"}, + "\\\\CM\\O(I)\\CT": {"O", "I", "CT"}, + "\\O(d:\\f\\I)\\CT": {"O", "d:\\f\\I", "CT"}, + "\\O(d:\\f\\I(d))\\CT": {"O", "d:\\f\\I(d)", "CT"}, + "\\O(d:\\f\\I(d)x)\\CT": {"O", "d:\\f\\I(d)x", "CT"}, + "\\O(d:\\f\\I)\\CT(i)": {"O", "d:\\f\\I", "CT(i)"}, + "\\O(d:\\f\\I)\\CT(d:\\f\\i)": {"O", "d:\\f\\I", "CT(d:\\f\\i)"}, + "\\\\CM\\O(d:\\f\\I)\\CT": {"O", "d:\\f\\I", "CT"}, + "\\\\CM\\O(d:\\f\\I)\\CT(d:\\f\\i)": {"O", "d:\\f\\I", "CT(d:\\f\\i)"}, + "\\O(I(info))\\CT": {"O", "I(info)", "CT"}, + "\\\\CM\\O(I(info))\\CT": {"O", "I(info)", "CT"}, +} + +var invalidCounterPaths = []string{ + "\\O(I\\C", + "\\OI)\\C", + "\\O(I\\C", + "\\O/C", + "\\O(I/C", + "\\O(I/C)", + "\\O(I\\)C", + "\\O(I\\C)", +} + +func TestCounterPathParsing(t *testing.T) { + for path, vals := range counterPathsAndRes { + o, i, c, err := extractCounterInfoFromCounterPath(path) + require.NoError(t, err) + require.True(t, assert.ObjectsAreEqual(vals, []string{o, i, c}), "arrays: %#v and %#v are not equal", vals, []string{o, i, c}) + } + for _, path := range invalidCounterPaths { + _, _, _, err := extractCounterInfoFromCounterPath(path) + require.Error(t, err) + } +} + func TestAddItemSimple(t *testing.T) { var err error cps1 := []string{"\\O(I)\\C"} From ab058b396f19279f63a7bfa0fb7a724eb501065d Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Sat, 1 Sep 2018 19:01:02 -0700 Subject: [PATCH 0127/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 42ee8fd6d..fbe42f2d9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -83,6 +83,7 @@ ### Bugfixes - [#3438](https://github.com/influxdata/telegraf/issues/3438): Fix divide by zero in logparser input. +- [#4499](https://github.com/influxdata/telegraf/issues/4499): Fix instance and object name in performance counters with backslashes. ## v1.7.4 [2018-08-29] From 87b8141d1376ce2c90cbaf8e5520b0b8bcfcf1f9 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 4 Sep 2018 13:19:54 -0700 Subject: [PATCH 0128/1815] Make influxdb output log message style more consistent --- plugins/outputs/influxdb/influxdb.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/outputs/influxdb/influxdb.go b/plugins/outputs/influxdb/influxdb.go index bd53d4ed4..06079dfc5 100644 --- a/plugins/outputs/influxdb/influxdb.go +++ b/plugins/outputs/influxdb/influxdb.go @@ -216,7 +216,7 @@ func (i *InfluxDB) Write(metrics []telegraf.Metric) error { } } - log.Printf("E! [outputs.influxdb]: when writing to [%s]: %v", client.URL(), err) + log.Printf("E! [outputs.influxdb] when writing to [%s]: %v", client.URL(), err) } return errors.New("could not write any address") From c7e2945a468230e21de8448aff5313bf6a4a8d46 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 4 Sep 2018 13:21:58 -0700 Subject: [PATCH 0129/1815] Fix exchange_durability sample config in amqp output --- plugins/outputs/amqp/README.md | 4 ++-- plugins/outputs/amqp/amqp.go | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/plugins/outputs/amqp/README.md b/plugins/outputs/amqp/README.md index 6002311f6..fe44ea4ed 100644 --- a/plugins/outputs/amqp/README.md +++ b/plugins/outputs/amqp/README.md @@ -35,8 +35,8 @@ For an introduction to AMQP see: ## If true, exchange will be passively declared. # exchange_declare_passive = false - ## If true, exchange will be created as a durable exchange. - # exchange_durable = true + ## Exchange durability can be either "transient" or "durable". + # exchange_durability = "durable" ## Additional exchange arguments. # exchange_arguments = { } diff --git a/plugins/outputs/amqp/amqp.go b/plugins/outputs/amqp/amqp.go index a69db1e6d..a41f0a1fe 100644 --- a/plugins/outputs/amqp/amqp.go +++ b/plugins/outputs/amqp/amqp.go @@ -93,8 +93,8 @@ var sampleConfig = ` ## If true, exchange will be passively declared. # exchange_declare_passive = false - ## If true, exchange will be created as a durable exchange. - # exchange_durable = true + ## Exchange durability can be either "transient" or "durable". + # exchange_durability = "durable" ## Additional exchange arguments. # exchange_arguments = { } From 13029a1fa4c5220bf9f5778bdadb9a4bd3c85974 Mon Sep 17 00:00:00 2001 From: Olli Janatuinen Date: Thu, 6 Sep 2018 00:19:56 +0300 Subject: [PATCH 0130/1815] Corrected application insights example config (#4635) --- plugins/outputs/application_insights/README.md | 4 ++-- plugins/outputs/application_insights/application_insights.go | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/plugins/outputs/application_insights/README.md b/plugins/outputs/application_insights/README.md index 08850a3e6..c64e84488 100644 --- a/plugins/outputs/application_insights/README.md +++ b/plugins/outputs/application_insights/README.md @@ -12,7 +12,7 @@ This plugin writes telegraf metrics to [Azure Application Insights](https://azur # timeout = "5s" ## Enable additional diagnostic logging. - # enable_diagnosic_logging = false + # enable_diagnostic_logging = false ## Context Tag Sources add Application Insights context tags to a tag value. ## @@ -37,7 +37,7 @@ foo,host=a first=42,second=43 1525293034000000000 In the special case of a single field named `value`, a single telemetry record is created named using only the measurement name -**Example:** Create a telemetry record `foo`: +**Example:** Create a telemetry record `bar`: ``` bar,host=a value=42 1525293034000000000 ``` diff --git a/plugins/outputs/application_insights/application_insights.go b/plugins/outputs/application_insights/application_insights.go index 26f3f8dc0..3da420218 100644 --- a/plugins/outputs/application_insights/application_insights.go +++ b/plugins/outputs/application_insights/application_insights.go @@ -48,7 +48,7 @@ var ( # timeout = "5s" ## Enable additional diagnostic logging. - # enable_diagnosic_logging = false + # enable_diagnostic_logging = false ## Context Tag Sources add Application Insights context tags to a tag value. ## From a47149765eb10de681d6ee640f8a44936b73210c Mon Sep 17 00:00:00 2001 From: Andrew Date: Wed, 5 Sep 2018 23:27:52 +0200 Subject: [PATCH 0131/1815] Add queue_durability parameter to amqp_consumer input (#4628) --- plugins/inputs/amqp_consumer/README.md | 4 +++ plugins/inputs/amqp_consumer/amqp_consumer.go | 34 ++++++++++++++----- 2 files changed, 29 insertions(+), 9 deletions(-) diff --git a/plugins/inputs/amqp_consumer/README.md b/plugins/inputs/amqp_consumer/README.md index bc42f9107..133531421 100644 --- a/plugins/inputs/amqp_consumer/README.md +++ b/plugins/inputs/amqp_consumer/README.md @@ -46,6 +46,10 @@ The following defaults are known to work with RabbitMQ: ## AMQP queue name queue = "telegraf" + + ## AMQP queue durability can be "transient" or "durable". + queue_durability = "durable" + ## Binding Key binding_key = "#" diff --git a/plugins/inputs/amqp_consumer/amqp_consumer.go b/plugins/inputs/amqp_consumer/amqp_consumer.go index 739ed76e4..33cd9971b 100644 --- a/plugins/inputs/amqp_consumer/amqp_consumer.go +++ b/plugins/inputs/amqp_consumer/amqp_consumer.go @@ -30,7 +30,9 @@ type AMQPConsumer struct { ExchangeArguments map[string]string `toml:"exchange_arguments"` // Queue Name - Queue string + Queue string `toml:"queue"` + QueueDurability string `toml:"queue_durability"` + // Binding Key BindingKey string `toml:"binding_key"` @@ -64,6 +66,8 @@ const ( DefaultExchangeType = "topic" DefaultExchangeDurability = "durable" + DefaultQueueDurability = "durable" + DefaultPrefetchCount = 50 ) @@ -98,10 +102,13 @@ func (a *AMQPConsumer) SampleConfig() string { # exchange_arguments = { } # exchange_arguments = {"hash_propery" = "timestamp"} - ## AMQP queue name + ## AMQP queue name. queue = "telegraf" - ## Binding Key + ## AMQP queue durability can be "transient" or "durable". + queue_durability = "durable" + + ## Binding Key. binding_key = "#" ## Maximum number of messages server should give to the worker. @@ -260,13 +267,21 @@ func (a *AMQPConsumer) connect(amqpConf *amqp.Config) (<-chan amqp.Delivery, err return nil, err } + var queueDurable = true + switch a.QueueDurability { + case "transient": + queueDurable = false + default: + queueDurable = true + } + q, err := ch.QueueDeclare( - a.Queue, // queue - true, // durable - false, // delete when unused - false, // exclusive - false, // no-wait - nil, // arguments + a.Queue, // queue + queueDurable, // durable + false, // delete when unused + false, // exclusive + false, // no-wait + nil, // arguments ) if err != nil { return nil, fmt.Errorf("Failed to declare a queue: %s", err) @@ -380,6 +395,7 @@ func init() { AuthMethod: DefaultAuthMethod, ExchangeType: DefaultExchangeType, ExchangeDurability: DefaultExchangeDurability, + QueueDurability: DefaultQueueDurability, PrefetchCount: DefaultPrefetchCount, } }) From f70d6519e77d71c761262391a1418277cfa3d600 Mon Sep 17 00:00:00 2001 From: Gunnar <628831+gunnaraasen@users.noreply.github.com> Date: Wed, 5 Sep 2018 14:50:32 -0700 Subject: [PATCH 0132/1815] Add Azure Monitor output plugin (#4089) --- Gopkg.lock | 22 +- Gopkg.toml | 4 + docs/LICENSE_OF_DEPENDENCIES.md | 2 + internal/models/running_output.go | 13 + output.go | 6 + plugins/outputs/all/all.go | 1 + plugins/outputs/azure_monitor/README.md | 139 ++++ .../outputs/azure_monitor/azure_monitor.go | 615 ++++++++++++++++++ .../azure_monitor/azure_monitor_test.go | 361 ++++++++++ plugins/parsers/logfmt/parser_test.go | 119 ++-- testutil/metric.go | 87 ++- testutil/metric_test.go | 37 +- 12 files changed, 1318 insertions(+), 88 deletions(-) create mode 100644 plugins/outputs/azure_monitor/README.md create mode 100644 plugins/outputs/azure_monitor/azure_monitor.go create mode 100644 plugins/outputs/azure_monitor/azure_monitor_test.go diff --git a/Gopkg.lock b/Gopkg.lock index ef76419ba..0f29ab59a 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -21,6 +21,18 @@ revision = "2ce144541b8903101fb8f1483cc0497a68798122" version = "v0.3.0" +[[projects]] + name = "github.com/Azure/go-autorest" + packages = [ + "autorest", + "autorest/adal", + "autorest/azure", + "autorest/azure/auth", + "autorest/date" + ] + revision = "1f7cd6cfe0adea687ad44a512dfe76140f804318" + version = "v10.12.0" + [[projects]] branch = "master" digest = "1:298712a3ee36b59c3ca91f4183bd75d174d5eaa8b4aed5072831f126e2e752f6" @@ -224,6 +236,12 @@ revision = "06ea1031745cb8b3dab3f6a236daf2b0aa468b7e" version = "v3.2.0" +[[projects]] + branch = "master" + name = "github.com/dimchansky/utfbom" + packages = ["."] + revision = "6c6132ff69f0f6c088739067407b5d32c52e1d0f" + [[projects]] digest = "1:522eff2a1f014a64fb403db60fc0110653e4dc5b59779894d208e697b0708ddc" name = "github.com/docker/distribution" @@ -975,7 +993,9 @@ "ed25519/internal/edwards25519", "md4", "pbkdf2", - "ssh/terminal", + "pkcs12", + "pkcs12/internal/rc2", + "ssh/terminal" ] pruneopts = "" revision = "a2144134853fc9a27a7b1e3eb4f19f1a76df13c9" diff --git a/Gopkg.toml b/Gopkg.toml index d282e1ebd..f942f3401 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -221,3 +221,7 @@ [[override]] source = "https://github.com/fsnotify/fsnotify/archive/v1.4.7.tar.gz" name = "gopkg.in/fsnotify.v1" + +[[constraint]] + name = "github.com/Azure/go-autorest" + version = "10.12.0" diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index 2d215984b..36f038994 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -8,6 +8,7 @@ following works: - github.com/aerospike/aerospike-client-go [APACHE](https://github.com/aerospike/aerospike-client-go/blob/master/LICENSE) - github.com/amir/raidman [PUBLIC DOMAIN](https://github.com/amir/raidman/blob/master/UNLICENSE) - github.com/armon/go-metrics [MIT](https://github.com/armon/go-metrics/blob/master/LICENSE) +- github.com/Azure/go-autorest [APACHE](https://github.com/Azure/go-autorest/blob/master/LICENSE) - github.com/aws/aws-sdk-go [APACHE](https://github.com/aws/aws-sdk-go/blob/master/LICENSE.txt) - github.com/beorn7/perks [MIT](https://github.com/beorn7/perks/blob/master/LICENSE) - github.com/boltdb/bolt [MIT](https://github.com/boltdb/bolt/blob/master/LICENSE) @@ -19,6 +20,7 @@ following works: - github.com/couchbase/goutils [MIT](https://github.com/couchbase/go-couchbase/blob/master/LICENSE) - github.com/dancannon/gorethink [APACHE](https://github.com/dancannon/gorethink/blob/master/LICENSE) - github.com/davecgh/go-spew [ISC](https://github.com/davecgh/go-spew/blob/master/LICENSE) +- github.com/dimchansky/utfbom [APACHE](https://github.com/dimchansky/utfbom/blob/master/LICENSE) - github.com/docker/docker [APACHE](https://github.com/docker/docker/blob/master/LICENSE) - github.com/docker/cli [APACHE](https://github.com/docker/cli/blob/master/LICENSE) - github.com/eapache/go-resiliency [MIT](https://github.com/eapache/go-resiliency/blob/master/LICENSE) diff --git a/internal/models/running_output.go b/internal/models/running_output.go index 25576d745..c926917d6 100644 --- a/internal/models/running_output.go +++ b/internal/models/running_output.go @@ -114,6 +114,13 @@ func (ro *RunningOutput) AddMetric(m telegraf.Metric) { m, _ = metric.New(name, tags, fields, t, tp) } + if output, ok := ro.Output.(telegraf.AggregatingOutput); ok { + ro.Lock() + defer ro.Unlock() + output.Add(m) + return + } + ro.metrics.Add(m) if ro.metrics.Len() == ro.MetricBatchSize { batch := ro.metrics.Batch(ro.MetricBatchSize) @@ -127,6 +134,12 @@ func (ro *RunningOutput) AddMetric(m telegraf.Metric) { // Write writes all cached points to this output. func (ro *RunningOutput) Write() error { + if output, ok := ro.Output.(telegraf.AggregatingOutput); ok { + metrics := output.Push() + ro.metrics.Add(metrics...) + output.Reset() + } + nFails, nMetrics := ro.failMetrics.Len(), ro.metrics.Len() ro.BufferSize.Set(int64(nFails + nMetrics)) log.Printf("D! Output [%s] buffer fullness: %d / %d metrics. ", diff --git a/output.go b/output.go index d66ea4556..39b371ac4 100644 --- a/output.go +++ b/output.go @@ -13,6 +13,12 @@ type Output interface { Write(metrics []Metric) error } +type AggregatingOutput interface { + Add(in Metric) + Push() []Metric + Reset() +} + type ServiceOutput interface { // Connect to the Output Connect() error diff --git a/plugins/outputs/all/all.go b/plugins/outputs/all/all.go index 037807c22..4d49c0c6e 100644 --- a/plugins/outputs/all/all.go +++ b/plugins/outputs/all/all.go @@ -4,6 +4,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/outputs/amon" _ "github.com/influxdata/telegraf/plugins/outputs/amqp" _ "github.com/influxdata/telegraf/plugins/outputs/application_insights" + _ "github.com/influxdata/telegraf/plugins/outputs/azure_monitor" _ "github.com/influxdata/telegraf/plugins/outputs/cloudwatch" _ "github.com/influxdata/telegraf/plugins/outputs/cratedb" _ "github.com/influxdata/telegraf/plugins/outputs/datadog" diff --git a/plugins/outputs/azure_monitor/README.md b/plugins/outputs/azure_monitor/README.md new file mode 100644 index 000000000..cd1386136 --- /dev/null +++ b/plugins/outputs/azure_monitor/README.md @@ -0,0 +1,139 @@ +# Azure Monitor + +This plugin will send custom metrics to Azure Monitor. Azure Monitor has a +metric resolution of one minute. To handle this in Telegraf, the Azure Monitor +output plugin will automatically aggregates metrics into one minute buckets, +which are then sent to Azure Monitor on every flush interval. + +The metrics from each input plugin will be written to a separate Azure Monitor +namespace, prefixed with `Telegraf/` by default. The field name for each +metric is written as the Azure Monitor metric name. All field values are +written as a summarized set that includes: min, max, sum, count. Tags are +written as a dimension on each Azure Monitor metric. + +Since Azure Monitor only accepts numeric values, string-typed fields are +dropped by default. There is a configuration option (`strings_as_dimensions`) +to retain fields that contain strings as extra dimensions. Azure Monitor +allows a maximum of 10 dimensions per metric so any dimensions over that +amount will be deterministically dropped. + +### Configuration: + +```toml +[[outputs.azure_monitor]] + ## Timeout for HTTP writes. + # timeout = "20s" + + ## Set the namespace prefix, defaults to "Telegraf/". + # namespace_prefix = "Telegraf/" + + ## Azure Monitor doesn't have a string value type, so convert string + ## fields to dimensions (a.k.a. tags) if enabled. Azure Monitor allows + ## a maximum of 10 dimensions so Telegraf will only send the first 10 + ## alphanumeric dimensions. + # strings_as_dimensions = false + + ## Both region and resource_id must be set or be available via the + ## Instance Metadata service on Azure Virtual Machines. + # + ## Azure Region to publish metrics against. + ## ex: region = "southcentralus" + # region = "" + # + ## The Azure Resource ID against which metric will be logged, e.g. + ## ex: resource_id = "/subscriptions//resourceGroups//providers/Microsoft.Compute/virtualMachines/" + # resource_id = "" +``` + +### Setup + +1. [Register the `microsoft.insights` resource provider in your Azure subscription][resource provider]. +2. If using Managed Service Identities to authenticate an Azure VM, + [enable system-assigned managed identity][enable msi]. +2. Use a region that supports Azure Monitor Custom Metrics, + For regions with Custom Metrics support, an endpoint will be available with + the format `https://.monitoring.azure.com`. The following regions + are currently known to be supported: + - East US (eastus) + - West US 2 (westus2) + - South Central US (southcentralus) + - West Central US (westcentralus) + - North Europe (northeurope) + - West Europe (westeurope) + - Southeast Asia (southeastasia) + +[resource provider]: https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-manager-supported-services +[enable msi]: https://docs.microsoft.com/en-us/azure/active-directory/managed-service-identity/qs-configure-portal-windows-vm + +### Region and Resource ID + +The plugin will attempt to discover the region and resource ID using the Azure +VM Instance Metadata service. If Telegraf is not running on a virtual machine +or the VM Instance Metadata service is not available, the following variables +are required for the output to function. + +* region +* resource_id + +### Authentication + +This plugin uses one of several different types of authenticate methods. The +preferred authentication methods are different from the *order* in which each +authentication is checked. Here are the preferred authentication methods: + +1. Managed Service Identity (MSI) token + - This is the prefered authentication method. Telegraf will automatically + authenticate using this method when running on Azure VMs. +2. AAD Application Tokens (Service Principals) + - Primarily useful if Telegraf is writing metrics for other resources. + [More information][principal]. + - A Service Principal or User Principal needs to be assigned the `Monitoring + Contributor` roles. +3. AAD User Tokens (User Principals) + - Allows Telegraf to authenticate like a user. It is best to use this method + for development. + +[principal]: https://docs.microsoft.com/en-us/azure/active-directory/develop/active-directory-application-objects + +The plugin will authenticate using the first available of the +following configurations: + +1. **Client Credentials**: Azure AD Application ID and Secret. + + Set the following Telegraf configuration variables: + + - `azure_tenant_id`: Specifies the Tenant to which to authenticate. + - `azure_client_id`: Specifies the app client ID to use. + - `azure_client_secret`: Specifies the app secret to use. + + Or set the following environment variables: + + - `AZURE_TENANT_ID`: Specifies the Tenant to which to authenticate. + - `AZURE_CLIENT_ID`: Specifies the app client ID to use. + - `AZURE_CLIENT_SECRET`: Specifies the app secret to use. + +2. **Client Certificate**: Azure AD Application ID and X.509 Certificate. + + - `AZURE_TENANT_ID`: Specifies the Tenant to which to authenticate. + - `AZURE_CLIENT_ID`: Specifies the app client ID to use. + - `AZURE_CERTIFICATE_PATH`: Specifies the certificate Path to use. + - `AZURE_CERTIFICATE_PASSWORD`: Specifies the certificate password to use. + +3. **Resource Owner Password**: Azure AD User and Password. This grant type is + *not recommended*, use device login instead if you need interactive login. + + - `AZURE_TENANT_ID`: Specifies the Tenant to which to authenticate. + - `AZURE_CLIENT_ID`: Specifies the app client ID to use. + - `AZURE_USERNAME`: Specifies the username to use. + - `AZURE_PASSWORD`: Specifies the password to use. + +4. **Azure Managed Service Identity**: Delegate credential management to the + platform. Requires that code is running in Azure, e.g. on a VM. All + configuration is handled by Azure. See [Azure Managed Service Identity][msi] + for more details. Only available when using the [Azure Resource Manager][arm]. + +[msi]: https://docs.microsoft.com/en-us/azure/active-directory/msi-overview +[arm]: https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-group-overview + +**Note: As shown above, the last option (#4) is the preferred way to +authenticate when running Telegraf on Azure VMs. diff --git a/plugins/outputs/azure_monitor/azure_monitor.go b/plugins/outputs/azure_monitor/azure_monitor.go new file mode 100644 index 000000000..afc3a20ed --- /dev/null +++ b/plugins/outputs/azure_monitor/azure_monitor.go @@ -0,0 +1,615 @@ +package azure_monitor + +import ( + "bytes" + "compress/gzip" + "encoding/binary" + "encoding/json" + "fmt" + "hash/fnv" + "io/ioutil" + "log" + "net/http" + "regexp" + "strings" + "time" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure/auth" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/metric" + "github.com/influxdata/telegraf/plugins/outputs" + "github.com/influxdata/telegraf/selfstat" +) + +// AzureMonitor allows publishing of metrics to the Azure Monitor custom metrics +// service +type AzureMonitor struct { + Timeout internal.Duration + NamespacePrefix string `toml:"namespace_prefix"` + StringsAsDimensions bool `toml:"strings_as_dimensions"` + Region string + ResourceID string `toml:"resource_id"` + + url string + auth autorest.Authorizer + client *http.Client + + cache map[time.Time]map[uint64]*aggregate + timeFunc func() time.Time + + MetricOutsideWindow selfstat.Stat +} + +type dimension struct { + name string + value string +} + +type aggregate struct { + name string + min float64 + max float64 + sum float64 + count int64 + dimensions []dimension + updated bool +} + +const ( + defaultRequestTimeout = time.Second * 5 + defaultNamespacePrefix = "Telegraf/" + defaultAuthResource = "https://monitoring.azure.com/" + + vmInstanceMetadataURL = "http://169.254.169.254/metadata/instance?api-version=2017-12-01" + resourceIDTemplate = "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/virtualMachines/%s" + urlTemplate = "https://%s.monitoring.azure.com%s/metrics" + maxRequestBodySize = 4000000 +) + +var sampleConfig = ` + ## Timeout for HTTP writes. + # timeout = "20s" + + ## Set the namespace prefix, defaults to "Telegraf/". + # namespace_prefix = "Telegraf/" + + ## Azure Monitor doesn't have a string value type, so convert string + ## fields to dimensions (a.k.a. tags) if enabled. Azure Monitor allows + ## a maximum of 10 dimensions so Telegraf will only send the first 10 + ## alphanumeric dimensions. + # strings_as_dimensions = false + + ## Both region and resource_id must be set or be available via the + ## Instance Metadata service on Azure Virtual Machines. + # + ## Azure Region to publish metrics against. + ## ex: region = "southcentralus" + # region = "" + # + ## The Azure Resource ID against which metric will be logged, e.g. + ## ex: resource_id = "/subscriptions//resourceGroups//providers/Microsoft.Compute/virtualMachines/" + # resource_id = "" +` + +// Description provides a description of the plugin +func (a *AzureMonitor) Description() string { + return "Send aggregate metrics to Azure Monitor" +} + +// SampleConfig provides a sample configuration for the plugin +func (a *AzureMonitor) SampleConfig() string { + return sampleConfig +} + +// Connect initializes the plugin and validates connectivity +func (a *AzureMonitor) Connect() error { + a.cache = make(map[time.Time]map[uint64]*aggregate, 36) + + if a.Timeout.Duration == 0 { + a.Timeout.Duration = defaultRequestTimeout + } + + a.client = &http.Client{ + Transport: &http.Transport{ + Proxy: http.ProxyFromEnvironment, + }, + Timeout: a.Timeout.Duration, + } + + if a.NamespacePrefix == "" { + a.NamespacePrefix = defaultNamespacePrefix + } + + var err error + var region string + var resourceID string + if a.Region == "" || a.ResourceID == "" { + // Pull region and resource identifier + region, resourceID, err = vmInstanceMetadata(a.client) + if err != nil { + return err + } + } + if a.Region != "" { + region = a.Region + } + if a.ResourceID != "" { + resourceID = a.ResourceID + } + + if resourceID == "" { + return fmt.Errorf("no resource ID configured or available via VM instance metadata") + } else if region == "" { + return fmt.Errorf("no region configured or available via VM instance metadata") + } + a.url = fmt.Sprintf(urlTemplate, region, resourceID) + + log.Printf("D! Writing to Azure Monitor URL: %s", a.url) + + a.auth, err = auth.NewAuthorizerFromEnvironmentWithResource(defaultAuthResource) + if err != nil { + return nil + } + + a.Reset() + + tags := map[string]string{ + "region": region, + "resource_id": resourceID, + } + a.MetricOutsideWindow = selfstat.Register("azure_monitor", "metric_outside_window", tags) + + return nil +} + +// vmMetadata retrieves metadata about the current Azure VM +func vmInstanceMetadata(c *http.Client) (string, string, error) { + req, err := http.NewRequest("GET", vmInstanceMetadataURL, nil) + if err != nil { + return "", "", fmt.Errorf("error creating request: %v", err) + } + req.Header.Set("Metadata", "true") + + resp, err := c.Do(req) + if err != nil { + return "", "", err + } + defer resp.Body.Close() + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return "", "", err + } + if resp.StatusCode >= 300 || resp.StatusCode < 200 { + return "", "", fmt.Errorf("unable to fetch instance metadata: [%v] %s", resp.StatusCode, body) + } + + // VirtualMachineMetadata contains information about a VM from the metadata service + type VirtualMachineMetadata struct { + Compute struct { + Location string `json:"location"` + Name string `json:"name"` + ResourceGroupName string `json:"resourceGroupName"` + SubscriptionID string `json:"subscriptionId"` + } `json:"compute"` + } + + var metadata VirtualMachineMetadata + if err := json.Unmarshal(body, &metadata); err != nil { + return "", "", err + } + + region := metadata.Compute.Location + resourceID := fmt.Sprintf( + resourceIDTemplate, + metadata.Compute.SubscriptionID, + metadata.Compute.ResourceGroupName, + metadata.Compute.Name, + ) + + return region, resourceID, nil +} + +// Close shuts down an any active connections +func (a *AzureMonitor) Close() error { + a.client = nil + return nil +} + +type azureMonitorMetric struct { + Time time.Time `json:"time"` + Data *azureMonitorData `json:"data"` +} + +type azureMonitorData struct { + BaseData *azureMonitorBaseData `json:"baseData"` +} + +type azureMonitorBaseData struct { + Metric string `json:"metric"` + Namespace string `json:"namespace"` + DimensionNames []string `json:"dimNames"` + Series []*azureMonitorSeries `json:"series"` +} + +type azureMonitorSeries struct { + DimensionValues []string `json:"dimValues"` + Min float64 `json:"min"` + Max float64 `json:"max"` + Sum float64 `json:"sum"` + Count int64 `json:"count"` +} + +// Write writes metrics to the remote endpoint +func (a *AzureMonitor) Write(metrics []telegraf.Metric) error { + azmetrics := make(map[uint64]*azureMonitorMetric, len(metrics)) + for _, m := range metrics { + id := hashIDWithTagKeysOnly(m) + if azm, ok := azmetrics[id]; !ok { + amm, err := translate(m, a.NamespacePrefix) + if err != nil { + log.Printf("E! [outputs.azure_monitor]: could not create azure metric for %q; discarding point", m.Name()) + continue + } + azmetrics[id] = amm + } else { + amm, err := translate(m, a.NamespacePrefix) + if err != nil { + log.Printf("E! [outputs.azure_monitor]: could not create azure metric for %q; discarding point", m.Name()) + continue + } + + azmetrics[id].Data.BaseData.Series = append( + azm.Data.BaseData.Series, + amm.Data.BaseData.Series..., + ) + } + } + + if len(azmetrics) == 0 { + return nil + } + + var body []byte + for _, m := range azmetrics { + // Azure Monitor accepts new batches of points in new-line delimited + // JSON, following RFC 4288 (see https://github.com/ndjson/ndjson-spec). + jsonBytes, err := json.Marshal(&m) + if err != nil { + return err + } + // Azure Monitor's maximum request body size of 4MB. Send batches that + // exceed this size via separate write requests. + if (len(body) + len(jsonBytes) + 1) > maxRequestBodySize { + err := a.send(body) + if err != nil { + return err + } + body = nil + } + body = append(body, jsonBytes...) + body = append(body, '\n') + } + + return a.send(body) +} + +func (a *AzureMonitor) send(body []byte) error { + var buf bytes.Buffer + g := gzip.NewWriter(&buf) + if _, err := g.Write(body); err != nil { + return err + } + if err := g.Close(); err != nil { + return err + } + + req, err := http.NewRequest("POST", a.url, &buf) + if err != nil { + return err + } + + req.Header.Set("Content-Encoding", "gzip") + req.Header.Set("Content-Type", "application/x-ndjson") + + // Add the authorization header. WithAuthorization will automatically + // refresh the token if needed. + req, err = autorest.CreatePreparer(a.auth.WithAuthorization()).Prepare(req) + if err != nil { + return fmt.Errorf("unable to fetch authentication credentials: %v", err) + } + + resp, err := a.client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + _, err = ioutil.ReadAll(resp.Body) + if err != nil || resp.StatusCode < 200 || resp.StatusCode > 299 { + return fmt.Errorf("failed to write batch: [%v] %s", resp.StatusCode, resp.Status) + } + + return nil +} + +func hashIDWithTagKeysOnly(m telegraf.Metric) uint64 { + h := fnv.New64a() + h.Write([]byte(m.Name())) + h.Write([]byte("\n")) + for _, tag := range m.TagList() { + h.Write([]byte(tag.Key)) + h.Write([]byte("\n")) + } + b := make([]byte, binary.MaxVarintLen64) + n := binary.PutUvarint(b, uint64(m.Time().UnixNano())) + h.Write(b[:n]) + h.Write([]byte("\n")) + return h.Sum64() +} + +func translate(m telegraf.Metric, prefix string) (*azureMonitorMetric, error) { + var dimensionNames []string + var dimensionValues []string + for _, tag := range m.TagList() { + // Azure custom metrics service supports up to 10 dimensions + if len(dimensionNames) > 10 { + continue + } + + if tag.Key == "" || tag.Value == "" { + continue + } + + dimensionNames = append(dimensionNames, tag.Key) + dimensionValues = append(dimensionValues, tag.Value) + } + + min, err := getFloatField(m, "min") + if err != nil { + return nil, err + } + max, err := getFloatField(m, "max") + if err != nil { + return nil, err + } + sum, err := getFloatField(m, "sum") + if err != nil { + return nil, err + } + count, err := getIntField(m, "count") + if err != nil { + return nil, err + } + + mn, ns := "Missing", "Missing" + names := strings.SplitN(m.Name(), "-", 2) + if len(names) > 1 { + mn = names[1] + } + if len(names) > 0 { + ns = names[0] + } + ns = prefix + ns + + return &azureMonitorMetric{ + Time: m.Time(), + Data: &azureMonitorData{ + BaseData: &azureMonitorBaseData{ + Metric: mn, + Namespace: ns, + DimensionNames: dimensionNames, + Series: []*azureMonitorSeries{ + &azureMonitorSeries{ + DimensionValues: dimensionValues, + Min: min, + Max: max, + Sum: sum, + Count: count, + }, + }, + }, + }, + }, nil +} + +func getFloatField(m telegraf.Metric, key string) (float64, error) { + fv, ok := m.GetField(key) + if !ok { + return 0, fmt.Errorf("missing field: %s", key) + } + + if value, ok := fv.(float64); ok { + return value, nil + } + return 0, fmt.Errorf("unexpected type: %s: %T", key, fv) +} + +func getIntField(m telegraf.Metric, key string) (int64, error) { + fv, ok := m.GetField(key) + if !ok { + return 0, fmt.Errorf("missing field: %s", key) + } + + if value, ok := fv.(int64); ok { + return value, nil + } + return 0, fmt.Errorf("unexpected type: %s: %T", key, fv) +} + +// Add will append a metric to the output aggregate +func (a *AzureMonitor) Add(m telegraf.Metric) { + // Azure Monitor only supports aggregates 30 minutes into the past and 4 + // minutes into the future. Future metrics are dropped when pushed. + t := m.Time() + tbucket := time.Date(t.Year(), t.Month(), t.Day(), t.Hour(), t.Minute(), 0, 0, t.Location()) + if tbucket.Before(a.timeFunc().Add(-time.Minute * 30)) { + a.MetricOutsideWindow.Incr(1) + return + } + + // Azure Monitor doesn't have a string value type, so convert string fields + // to dimensions (a.k.a. tags) if enabled. + if a.StringsAsDimensions { + for _, f := range m.FieldList() { + if v, ok := f.Value.(string); ok { + m.AddTag(f.Key, v) + } + } + } + + for _, f := range m.FieldList() { + fv, ok := convert(f.Value) + if !ok { + continue + } + + // Azure Monitor does not support fields so the field name is appended + // to the metric name. + name := m.Name() + "-" + sanitize(f.Key) + id := hashIDWithField(m.HashID(), f.Key) + + _, ok = a.cache[tbucket] + if !ok { + // Time bucket does not exist and needs to be created. + a.cache[tbucket] = make(map[uint64]*aggregate) + } + + // Fetch existing aggregate + var agg *aggregate + agg, ok = a.cache[tbucket][id] + if !ok { + agg := &aggregate{ + name: name, + min: fv, + max: fv, + sum: fv, + count: 1, + } + for _, tag := range m.TagList() { + dim := dimension{ + name: tag.Key, + value: tag.Value, + } + agg.dimensions = append(agg.dimensions, dim) + } + agg.updated = true + a.cache[tbucket][id] = agg + continue + } + + if fv < agg.min { + agg.min = fv + } + if fv > agg.max { + agg.max = fv + } + agg.sum += fv + agg.count++ + agg.updated = true + } +} + +func convert(in interface{}) (float64, bool) { + switch v := in.(type) { + case int64: + return float64(v), true + case uint64: + return float64(v), true + case float64: + return v, true + case bool: + if v { + return 1, true + } + return 0, true + default: + return 0, false + } +} + +var invalidNameCharRE = regexp.MustCompile(`[^a-zA-Z0-9_]`) + +func sanitize(value string) string { + return invalidNameCharRE.ReplaceAllString(value, "_") +} + +func hashIDWithField(id uint64, fk string) uint64 { + h := fnv.New64a() + b := make([]byte, binary.MaxVarintLen64) + n := binary.PutUvarint(b, id) + h.Write(b[:n]) + h.Write([]byte("\n")) + h.Write([]byte(fk)) + h.Write([]byte("\n")) + return h.Sum64() +} + +// Push sends metrics to the output metric buffer +func (a *AzureMonitor) Push() []telegraf.Metric { + var metrics []telegraf.Metric + for tbucket, aggs := range a.cache { + // Do not send metrics early + if tbucket.After(a.timeFunc().Add(-time.Minute)) { + continue + } + for _, agg := range aggs { + // Only send aggregates that have had an update since the last push. + if !agg.updated { + continue + } + + tags := make(map[string]string, len(agg.dimensions)) + for _, tag := range agg.dimensions { + tags[tag.name] = tag.value + } + + m, err := metric.New(agg.name, + tags, + map[string]interface{}{ + "min": agg.min, + "max": agg.max, + "sum": agg.sum, + "count": agg.count, + }, + tbucket, + ) + + if err != nil { + log.Printf("E! [outputs.azure_monitor]: could not create metric for aggregation %q; discarding point", agg.name) + } + + metrics = append(metrics, m) + } + } + return metrics +} + +// Reset clears the cache of aggregate metrics +func (a *AzureMonitor) Reset() { + for tbucket := range a.cache { + // Remove aggregates older than 30 minutes + if tbucket.Before(a.timeFunc().Add(-time.Minute * 30)) { + delete(a.cache, tbucket) + continue + } + // Metrics updated within the latest 1m have not been pushed and should + // not be cleared. + if tbucket.After(a.timeFunc().Add(-time.Minute)) { + continue + } + for id := range a.cache[tbucket] { + a.cache[tbucket][id].updated = false + } + } +} + +func init() { + outputs.Add("azure_monitor", func() telegraf.Output { + return &AzureMonitor{ + timeFunc: time.Now, + } + }) +} diff --git a/plugins/outputs/azure_monitor/azure_monitor_test.go b/plugins/outputs/azure_monitor/azure_monitor_test.go new file mode 100644 index 000000000..6fb40805e --- /dev/null +++ b/plugins/outputs/azure_monitor/azure_monitor_test.go @@ -0,0 +1,361 @@ +package azure_monitor + +import ( + "bufio" + "compress/gzip" + "encoding/json" + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/Azure/go-autorest/autorest" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +func TestAggregate(t *testing.T) { + tests := []struct { + name string + plugin *AzureMonitor + metrics []telegraf.Metric + addTime time.Time + pushTime time.Time + check func(t *testing.T, plugin *AzureMonitor, metrics []telegraf.Metric) + }{ + { + name: "add metric outside window is dropped", + plugin: &AzureMonitor{ + Region: "test", + ResourceID: "/test", + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42, + }, + time.Unix(0, 0), + ), + }, + addTime: time.Unix(3600, 0), + pushTime: time.Unix(3600, 0), + check: func(t *testing.T, plugin *AzureMonitor, metrics []telegraf.Metric) { + require.Equal(t, int64(1), plugin.MetricOutsideWindow.Get()) + require.Len(t, metrics, 0) + }, + }, + { + name: "metric not sent until period expires", + plugin: &AzureMonitor{ + Region: "test", + ResourceID: "/test", + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42, + }, + time.Unix(0, 0), + ), + }, + addTime: time.Unix(0, 0), + pushTime: time.Unix(0, 0), + check: func(t *testing.T, plugin *AzureMonitor, metrics []telegraf.Metric) { + require.Len(t, metrics, 0) + }, + }, + { + name: "add strings as dimensions", + plugin: &AzureMonitor{ + Region: "test", + ResourceID: "/test", + StringsAsDimensions: true, + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{ + "host": "localhost", + }, + map[string]interface{}{ + "value": 42, + "message": "howdy", + }, + time.Unix(0, 0), + ), + }, + addTime: time.Unix(0, 0), + pushTime: time.Unix(3600, 0), + check: func(t *testing.T, plugin *AzureMonitor, metrics []telegraf.Metric) { + expected := []telegraf.Metric{ + testutil.MustMetric( + "cpu-value", + map[string]string{ + "host": "localhost", + "message": "howdy", + }, + map[string]interface{}{ + "min": 42.0, + "max": 42.0, + "sum": 42.0, + "count": 1, + }, + time.Unix(0, 0), + ), + } + testutil.RequireMetricsEqual(t, expected, metrics) + }, + }, + { + name: "add metric to cache and push", + plugin: &AzureMonitor{ + Region: "test", + ResourceID: "/test", + cache: make(map[time.Time]map[uint64]*aggregate, 36), + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42, + }, + time.Unix(0, 0), + ), + }, + addTime: time.Unix(0, 0), + pushTime: time.Unix(3600, 0), + check: func(t *testing.T, plugin *AzureMonitor, metrics []telegraf.Metric) { + expected := []telegraf.Metric{ + testutil.MustMetric( + "cpu-value", + map[string]string{}, + map[string]interface{}{ + "min": 42.0, + "max": 42.0, + "sum": 42.0, + "count": 1, + }, + time.Unix(0, 0), + ), + } + + testutil.RequireMetricsEqual(t, expected, metrics) + }, + }, + { + name: "added metric are aggregated", + plugin: &AzureMonitor{ + Region: "test", + ResourceID: "/test", + cache: make(map[time.Time]map[uint64]*aggregate, 36), + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42, + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 84, + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 2, + }, + time.Unix(0, 0), + ), + }, + addTime: time.Unix(0, 0), + pushTime: time.Unix(3600, 0), + check: func(t *testing.T, plugin *AzureMonitor, metrics []telegraf.Metric) { + expected := []telegraf.Metric{ + testutil.MustMetric( + "cpu-value", + map[string]string{}, + map[string]interface{}{ + "min": 2.0, + "max": 84.0, + "sum": 128.0, + "count": 3, + }, + time.Unix(0, 0), + ), + } + + testutil.RequireMetricsEqual(t, expected, metrics) + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := tt.plugin.Connect() + require.NoError(t, err) + + // Reset globals + tt.plugin.MetricOutsideWindow.Set(0) + + tt.plugin.timeFunc = func() time.Time { return tt.addTime } + for _, m := range tt.metrics { + tt.plugin.Add(m) + } + + tt.plugin.timeFunc = func() time.Time { return tt.pushTime } + metrics := tt.plugin.Push() + tt.plugin.Reset() + + tt.check(t, tt.plugin, metrics) + }) + } +} + +func TestWrite(t *testing.T) { + readBody := func(r *http.Request) ([]*azureMonitorMetric, error) { + gz, err := gzip.NewReader(r.Body) + if err != nil { + return nil, err + } + scanner := bufio.NewScanner(gz) + + azmetrics := make([]*azureMonitorMetric, 0) + for scanner.Scan() { + line := scanner.Text() + var amm azureMonitorMetric + err = json.Unmarshal([]byte(line), &amm) + if err != nil { + return nil, err + } + azmetrics = append(azmetrics, &amm) + } + + return azmetrics, nil + } + + ts := httptest.NewServer(http.NotFoundHandler()) + defer ts.Close() + + url := "http://" + ts.Listener.Addr().String() + "/metrics" + + tests := []struct { + name string + plugin *AzureMonitor + metrics []telegraf.Metric + handler func(t *testing.T, w http.ResponseWriter, r *http.Request) + }{ + { + name: "if not an azure metric nothing is sent", + plugin: &AzureMonitor{ + Region: "test", + ResourceID: "/test", + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42, + }, + time.Unix(0, 0), + ), + }, + handler: func(t *testing.T, w http.ResponseWriter, r *http.Request) { + t.Fatal("should not call") + }, + }, + { + name: "single azure metric", + plugin: &AzureMonitor{ + Region: "test", + ResourceID: "/test", + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu-value", + map[string]string{}, + map[string]interface{}{ + "min": float64(42), + "max": float64(42), + "sum": float64(42), + "count": int64(1), + }, + time.Unix(0, 0), + ), + }, + handler: func(t *testing.T, w http.ResponseWriter, r *http.Request) { + azmetrics, err := readBody(r) + require.NoError(t, err) + require.Len(t, azmetrics, 1) + w.WriteHeader(http.StatusOK) + }, + }, + { + name: "multiple azure metric", + plugin: &AzureMonitor{ + Region: "test", + ResourceID: "/test", + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu-value", + map[string]string{}, + map[string]interface{}{ + "min": float64(42), + "max": float64(42), + "sum": float64(42), + "count": int64(1), + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "cpu-value", + map[string]string{}, + map[string]interface{}{ + "min": float64(42), + "max": float64(42), + "sum": float64(42), + "count": int64(1), + }, + time.Unix(60, 0), + ), + }, + handler: func(t *testing.T, w http.ResponseWriter, r *http.Request) { + azmetrics, err := readBody(r) + require.NoError(t, err) + require.Len(t, azmetrics, 2) + w.WriteHeader(http.StatusOK) + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + tt.handler(t, w, r) + }) + + err := tt.plugin.Connect() + require.NoError(t, err) + + // override real authorizer and write url + tt.plugin.auth = autorest.NullAuthorizer{} + tt.plugin.url = url + + err = tt.plugin.Write(tt.metrics) + require.NoError(t, err) + }) + } +} diff --git a/plugins/parsers/logfmt/parser_test.go b/plugins/parsers/logfmt/parser_test.go index c90964684..dfacd8c8f 100644 --- a/plugins/parsers/logfmt/parser_test.go +++ b/plugins/parsers/logfmt/parser_test.go @@ -7,7 +7,6 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" ) func MustMetric(t *testing.T, m *testutil.Metric) telegraf.Metric { @@ -25,28 +24,28 @@ func TestParse(t *testing.T) { measurement string now func() time.Time bytes []byte - want []testutil.Metric + want []telegraf.Metric wantErr bool }{ { name: "no bytes returns no metrics", now: func() time.Time { return time.Unix(0, 0) }, - want: []testutil.Metric{}, + want: []telegraf.Metric{}, }, { name: "test without trailing end", bytes: []byte("foo=\"bar\""), now: func() time.Time { return time.Unix(0, 0) }, measurement: "testlog", - want: []testutil.Metric{ - testutil.Metric{ - Measurement: "testlog", - Tags: map[string]string{}, - Fields: map[string]interface{}{ + want: []telegraf.Metric{ + testutil.MustMetric( + "testlog", + map[string]string{}, + map[string]interface{}{ "foo": "bar", }, - Time: time.Unix(0, 0), - }, + time.Unix(0, 0), + ), }, }, { @@ -54,15 +53,15 @@ func TestParse(t *testing.T) { bytes: []byte("foo=\"bar\"\n"), now: func() time.Time { return time.Unix(0, 0) }, measurement: "testlog", - want: []testutil.Metric{ - testutil.Metric{ - Measurement: "testlog", - Tags: map[string]string{}, - Fields: map[string]interface{}{ + want: []telegraf.Metric{ + testutil.MustMetric( + "testlog", + map[string]string{}, + map[string]interface{}{ "foo": "bar", }, - Time: time.Unix(0, 0), - }, + time.Unix(0, 0), + ), }, }, { @@ -70,18 +69,18 @@ func TestParse(t *testing.T) { bytes: []byte(`ts=2018-07-24T19:43:40.275Z lvl=info msg="http request" method=POST`), now: func() time.Time { return time.Unix(0, 0) }, measurement: "testlog", - want: []testutil.Metric{ - testutil.Metric{ - Measurement: "testlog", - Tags: map[string]string{}, - Fields: map[string]interface{}{ + want: []telegraf.Metric{ + testutil.MustMetric( + "testlog", + map[string]string{}, + map[string]interface{}{ "lvl": "info", "msg": "http request", "method": "POST", "ts": "2018-07-24T19:43:40.275Z", }, - Time: time.Unix(0, 0), - }, + time.Unix(0, 0), + ), }, }, { @@ -89,42 +88,42 @@ func TestParse(t *testing.T) { bytes: []byte("ts=2018-07-24T19:43:40.275Z lvl=info msg=\"http request\" method=POST\nparent_id=088876RL000 duration=7.45 log_id=09R4e4Rl000"), now: func() time.Time { return time.Unix(0, 0) }, measurement: "testlog", - want: []testutil.Metric{ - testutil.Metric{ - Measurement: "testlog", - Tags: map[string]string{}, - Fields: map[string]interface{}{ + want: []telegraf.Metric{ + testutil.MustMetric( + "testlog", + map[string]string{}, + map[string]interface{}{ "lvl": "info", "msg": "http request", "method": "POST", "ts": "2018-07-24T19:43:40.275Z", }, - Time: time.Unix(0, 0), - }, - testutil.Metric{ - Measurement: "testlog", - Tags: map[string]string{}, - Fields: map[string]interface{}{ + time.Unix(0, 0), + ), + testutil.MustMetric( + "testlog", + map[string]string{}, + map[string]interface{}{ "parent_id": "088876RL000", "duration": 7.45, "log_id": "09R4e4Rl000", }, - Time: time.Unix(0, 0), - }, + time.Unix(0, 0), + ), }, }, { name: "keys without = or values are ignored", now: func() time.Time { return time.Unix(0, 0) }, bytes: []byte(`i am no data.`), - want: []testutil.Metric{}, + want: []telegraf.Metric{}, wantErr: false, }, { name: "keys without values are ignored", now: func() time.Time { return time.Unix(0, 0) }, bytes: []byte(`foo="" bar=`), - want: []testutil.Metric{}, + want: []telegraf.Metric{}, wantErr: false, }, { @@ -132,7 +131,7 @@ func TestParse(t *testing.T) { now: func() time.Time { return time.Unix(0, 0) }, measurement: "testlog", bytes: []byte(`bar=baz foo="bar`), - want: []testutil.Metric{}, + want: []telegraf.Metric{}, wantErr: true, }, { @@ -140,7 +139,7 @@ func TestParse(t *testing.T) { now: func() time.Time { return time.Unix(0, 0) }, measurement: "testlog", bytes: []byte(`"foo=" bar=baz`), - want: []testutil.Metric{}, + want: []telegraf.Metric{}, wantErr: true, }, } @@ -155,10 +154,8 @@ func TestParse(t *testing.T) { t.Errorf("Logfmt.Parse error = %v, wantErr %v", err, tt.wantErr) return } - require.Equal(t, len(tt.want), len(got)) - for i, m := range got { - testutil.MustEqual(t, m, tt.want[i]) - } + + testutil.RequireMetricsEqual(t, tt.want, got) }) } } @@ -169,13 +166,13 @@ func TestParseLine(t *testing.T) { s string measurement string now func() time.Time - want testutil.Metric + want telegraf.Metric wantErr bool }{ { name: "No Metric In line", now: func() time.Time { return time.Unix(0, 0) }, - want: testutil.Metric{}, + want: nil, wantErr: true, }, { @@ -183,34 +180,34 @@ func TestParseLine(t *testing.T) { now: func() time.Time { return time.Unix(0, 0) }, measurement: "testlog", s: `ts=2018-07-24T19:43:35.207268Z lvl=5 msg="Write failed" log_id=09R4e4Rl000`, - want: testutil.Metric{ - Measurement: "testlog", - Fields: map[string]interface{}{ + want: testutil.MustMetric( + "testlog", + map[string]string{}, + map[string]interface{}{ "ts": "2018-07-24T19:43:35.207268Z", "lvl": int64(5), "msg": "Write failed", "log_id": "09R4e4Rl000", }, - Tags: map[string]string{}, - Time: time.Unix(0, 0), - }, + time.Unix(0, 0), + ), }, { name: "ParseLine only returns metrics from first string", now: func() time.Time { return time.Unix(0, 0) }, measurement: "testlog", s: "ts=2018-07-24T19:43:35.207268Z lvl=5 msg=\"Write failed\" log_id=09R4e4Rl000\nmethod=POST parent_id=088876RL000 duration=7.45 log_id=09R4e4Rl000", - want: testutil.Metric{ - Measurement: "testlog", - Fields: map[string]interface{}{ + want: testutil.MustMetric( + "testlog", + map[string]string{}, + map[string]interface{}{ "ts": "2018-07-24T19:43:35.207268Z", "lvl": int64(5), "msg": "Write failed", "log_id": "09R4e4Rl000", }, - Tags: map[string]string{}, - Time: time.Unix(0, 0), - }, + time.Unix(0, 0), + ), }, } for _, tt := range tests { @@ -223,9 +220,7 @@ func TestParseLine(t *testing.T) { if (err != nil) != tt.wantErr { t.Fatalf("Logfmt.Parse error = %v, wantErr %v", err, tt.wantErr) } - if got != nil { - testutil.MustEqual(t, got, tt.want) - } + testutil.RequireMetricEqual(t, tt.want, got) }) } } diff --git a/testutil/metric.go b/testutil/metric.go index 9620fea15..56debd093 100644 --- a/testutil/metric.go +++ b/testutil/metric.go @@ -1,16 +1,89 @@ package testutil import ( + "sort" "testing" + "time" + "github.com/google/go-cmp/cmp" "github.com/influxdata/telegraf" - "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf/metric" ) -// MustEqual requires a and b to be identical. -func MustEqual(t *testing.T, got telegraf.Metric, want Metric) { - require.Equal(t, want.Measurement, got.Name()) - require.Equal(t, want.Fields, got.Fields()) - require.Equal(t, want.Tags, got.Tags()) - require.Equal(t, want.Time, got.Time()) +type metricDiff struct { + Measurement string + Tags []*telegraf.Tag + Fields []*telegraf.Field + Type telegraf.ValueType + Time time.Time +} + +func newMetricDiff(metric telegraf.Metric) *metricDiff { + m := &metricDiff{} + m.Measurement = metric.Name() + + for _, tag := range metric.TagList() { + m.Tags = append(m.Tags, tag) + } + sort.Slice(m.Tags, func(i, j int) bool { + return m.Tags[i].Key < m.Tags[j].Key + }) + + for _, field := range metric.FieldList() { + m.Fields = append(m.Fields, field) + } + sort.Slice(m.Fields, func(i, j int) bool { + return m.Fields[i].Key < m.Fields[j].Key + }) + + m.Type = metric.Type() + m.Time = metric.Time() + return m +} + +func RequireMetricEqual(t *testing.T, expected, actual telegraf.Metric) { + t.Helper() + + var lhs, rhs *metricDiff + if expected != nil { + lhs = newMetricDiff(expected) + } + if actual != nil { + rhs = newMetricDiff(actual) + } + + if diff := cmp.Diff(lhs, rhs); diff != "" { + t.Fatalf("telegraf.Metric\n--- expected\n+++ actual\n%s", diff) + } +} + +func RequireMetricsEqual(t *testing.T, expected, actual []telegraf.Metric) { + t.Helper() + + lhs := make([]*metricDiff, len(expected)) + for _, m := range expected { + lhs = append(lhs, newMetricDiff(m)) + } + rhs := make([]*metricDiff, len(actual)) + for _, m := range actual { + rhs = append(rhs, newMetricDiff(m)) + } + if diff := cmp.Diff(lhs, rhs); diff != "" { + t.Fatalf("[]telegraf.Metric\n--- expected\n+++ actual\n%s", diff) + } +} + +// Metric creates a new metric or panics on error. +func MustMetric( + name string, + tags map[string]string, + fields map[string]interface{}, + tm time.Time, + tp ...telegraf.ValueType, +) telegraf.Metric { + m, err := metric.New(name, tags, fields, tm, tp...) + if err != nil { + panic("MustMetric") + } + return m } diff --git a/testutil/metric_test.go b/testutil/metric_test.go index 7295227ce..5b5ef01f4 100644 --- a/testutil/metric_test.go +++ b/testutil/metric_test.go @@ -8,13 +8,11 @@ import ( "github.com/influxdata/telegraf/metric" ) -func TestMustEqual(t *testing.T) { - type args struct { - } +func TestRequireMetricsEqual(t *testing.T) { tests := []struct { name string got telegraf.Metric - want Metric + want telegraf.Metric }{ { name: "telegraf and testutil metrics should be equal", @@ -34,24 +32,27 @@ func TestMustEqual(t *testing.T) { ) return m }(), - want: Metric{ - Measurement: "test", - Tags: map[string]string{ - "t1": "v1", - "t2": "v2", - }, - Fields: map[string]interface{}{ - "f1": int64(1), - "f2": 3.14, - "f3": "v3", - }, - Time: time.Unix(0, 0), - }, + want: func() telegraf.Metric { + m, _ := metric.New( + "test", + map[string]string{ + "t1": "v1", + "t2": "v2", + }, + map[string]interface{}{ + "f1": int64(1), + "f2": 3.14, + "f3": "v3", + }, + time.Unix(0, 0), + ) + return m + }(), }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - MustEqual(t, tt.got, tt.want) + RequireMetricEqual(t, tt.want, tt.got) }) } } From 9ec7f749aa9cf24899464662cc20407aea85f373 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 5 Sep 2018 14:57:20 -0700 Subject: [PATCH 0133/1815] Update changelog and readme --- CHANGELOG.md | 6 ++++++ README.md | 1 + 2 files changed, 7 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index fbe42f2d9..f7a3ccd6c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -27,6 +27,10 @@ - [valuecounter](./plugins/aggregators/valuecounter/README.md) - Contributed by @piotr1212 +### New Outputs + +- [azure_monitor](./plugins/outputs/azure_monitor/README.md) - Contributed by @influxdata + ### Features - [#4236](https://github.com/influxdata/telegraf/pull/4236): Add SSL/TLS support to redis input. @@ -79,6 +83,8 @@ - [#4585](https://github.com/influxdata/telegraf/pull/4585): Add Kibana input plugin. - [#4439](https://github.com/influxdata/telegraf/pull/4439): Add csv parser plugin. - [#4598](https://github.com/influxdata/telegraf/pull/4598): Add read_buffer_size option to statsd input. +- [#4089](https://github.com/influxdata/telegraf/pull/4089): Add azure_monitor output plugin. +- [#4628](https://github.com/influxdata/telegraf/pull/4628): Add queue_durability parameter to amqp_consumer input. ### Bugfixes diff --git a/README.md b/README.md index 867aa10f7..22d31556c 100644 --- a/README.md +++ b/README.md @@ -298,6 +298,7 @@ formats may be used with input plugins supporting the `data_format` option: * [application_insights](./plugins/outputs/application_insights) * [aws kinesis](./plugins/outputs/kinesis) * [aws cloudwatch](./plugins/outputs/cloudwatch) +* [azure_monitor](./plugins/inputs/azure_monitor) * [cratedb](./plugins/outputs/cratedb) * [datadog](./plugins/outputs/datadog) * [discard](./plugins/outputs/discard) From 12ff8bb5e031d44bfcb1c79bfa20b35f207c6a3e Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 5 Sep 2018 14:58:13 -0700 Subject: [PATCH 0134/1815] Fix link in readme --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 22d31556c..00dc8ecf6 100644 --- a/README.md +++ b/README.md @@ -298,7 +298,7 @@ formats may be used with input plugins supporting the `data_format` option: * [application_insights](./plugins/outputs/application_insights) * [aws kinesis](./plugins/outputs/kinesis) * [aws cloudwatch](./plugins/outputs/cloudwatch) -* [azure_monitor](./plugins/inputs/azure_monitor) +* [azure_monitor](./plugins/outputs/azure_monitor) * [cratedb](./plugins/outputs/cratedb) * [datadog](./plugins/outputs/datadog) * [discard](./plugins/outputs/discard) From d6467e966f4ef84f25f08ec694adb4dffe06e593 Mon Sep 17 00:00:00 2001 From: bsmaldon Date: Wed, 5 Sep 2018 23:13:29 +0100 Subject: [PATCH 0135/1815] Add strings processor (#4476) --- plugins/processors/all/all.go | 1 + plugins/processors/strings/README.md | 83 ++++ plugins/processors/strings/strings.go | 199 +++++++++ plugins/processors/strings/strings_test.go | 483 +++++++++++++++++++++ 4 files changed, 766 insertions(+) create mode 100644 plugins/processors/strings/README.md create mode 100644 plugins/processors/strings/strings.go create mode 100644 plugins/processors/strings/strings_test.go diff --git a/plugins/processors/all/all.go b/plugins/processors/all/all.go index 5c2e2549e..a8386a608 100644 --- a/plugins/processors/all/all.go +++ b/plugins/processors/all/all.go @@ -7,6 +7,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/processors/parser" _ "github.com/influxdata/telegraf/plugins/processors/printer" _ "github.com/influxdata/telegraf/plugins/processors/regex" + _ "github.com/influxdata/telegraf/plugins/processors/strings" _ "github.com/influxdata/telegraf/plugins/processors/rename" _ "github.com/influxdata/telegraf/plugins/processors/topk" ) diff --git a/plugins/processors/strings/README.md b/plugins/processors/strings/README.md new file mode 100644 index 000000000..f1e7361fe --- /dev/null +++ b/plugins/processors/strings/README.md @@ -0,0 +1,83 @@ +# Strings Processor Plugin + +The `strings` plugin maps certain go string functions onto measurement, tag, and field values. Values can be modified in place or stored in another key. + +Implemented functions are: +- lowercase +- uppercase +- trim +- trim_left +- trim_right +- trim_prefix +- trim_suffix + +Please note that in this implementation these are processed in the order that they appear above. + +Specify the `measurement`, `tag` or `field` that you want processed in each section and optionally a `dest` if you want the result stored in a new tag or field. You can specify lots of transformations on data with a single strings processor. + +### Configuration: + +```toml +[[processors.strings]] + # [[processors.strings.uppercase]] + # tag = "method" + + # [[processors.strings.lowercase]] + # field = "uri_stem" + # dest = "uri_stem_normalised" + + ## Convert a tag value to lowercase + # [[processors.strings.trim]] + # field = "message" + + # [[processors.strings.trim_left]] + # field = "message" + # cutset = "\t" + + # [[processors.strings.trim_right]] + # field = "message" + # cutset = "\r\n" + + # [[processors.strings.trim_prefix]] + # field = "my_value" + # prefix = "my_" + + # [[processors.strings.trim_suffix]] + # field = "read_count" + # suffix = "_count" +``` + +#### Trim, TrimLeft, TrimRight + +The `trim`, `trim_left`, and `trim_right` functions take an optional parameter: `cutset`. This value is a string containing the characters to remove from the value. + +#### TrimPrefix, TrimSuffix + +The `trim_prefix` and `trim_suffix` functions remote the given `prefix` or `suffix` +respectively from the string. + +### Example +**Config** +```toml +[[processors.strings]] + [[processors.strings.lowercase]] + field = "uri-stem" + + [[processors.strings.trim_prefix]] + field = "uri_stem" + prefix = "/api/" + + [[processors.strings.uppercase]] + field = "cs-host" + dest = "cs-host_normalised" +``` + +**Input** +``` +iis_log,method=get,uri_stem=/API/HealthCheck cs-host="MIXEDCASE_host",referrer="-",ident="-",http_version=1.1,agent="UserAgent",resp_bytes=270i 1519652321000000000 +``` + +**Output** +``` +iis_log,method=get,uri_stem=healthcheck cs-host="MIXEDCASE_host",cs-host_normalised="MIXEDCASE_HOST",referrer="-",ident="-",http_version=1.1,agent="UserAgent",resp_bytes=270i 1519652321000000000 +``` diff --git a/plugins/processors/strings/strings.go b/plugins/processors/strings/strings.go new file mode 100644 index 000000000..8e68dbc52 --- /dev/null +++ b/plugins/processors/strings/strings.go @@ -0,0 +1,199 @@ +package strings + +import ( + "strings" + "unicode" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/processors" +) + +type Strings struct { + Lowercase []converter `toml:"lowercase"` + Uppercase []converter `toml:"uppercase"` + Trim []converter `toml:"trim"` + TrimLeft []converter `toml:"trim_left"` + TrimRight []converter `toml:"trim_right"` + TrimPrefix []converter `toml:"trim_prefix"` + TrimSuffix []converter `toml:"trim_suffix"` + + converters []converter + init bool +} + +type ConvertFunc func(s string) string + +type converter struct { + Field string + Tag string + Measurement string + Dest string + Cutset string + Suffix string + Prefix string + + fn ConvertFunc +} + +const sampleConfig = ` + ## Convert a tag value to uppercase + # [[processors.strings.uppercase]] + # tag = "method" + + ## Convert a field value to lowercase and store in a new field + # [[processors.strings.lowercase]] + # field = "uri_stem" + # dest = "uri_stem_normalised" + + ## Trim leading and trailing whitespace using the default cutset + # [[processors.strings.trim]] + # field = "message" + + ## Trim leading characters in cutset + # [[processors.strings.trim_left]] + # field = "message" + # cutset = "\t" + + ## Trim trailing characters in cutset + # [[processors.strings.trim_right]] + # field = "message" + # cutset = "\r\n" + + ## Trim the given prefix from the field + # [[processors.strings.trim_prefix]] + # field = "my_value" + # prefix = "my_" + + ## Trim the given suffix from the field + # [[processors.strings.trim_suffix]] + # field = "read_count" + # suffix = "_count" +` + +func (s *Strings) SampleConfig() string { + return sampleConfig +} + +func (s *Strings) Description() string { + return "Perform string processing on tags, fields, and measurements" +} + +func (c *converter) convertTag(metric telegraf.Metric) { + tv, ok := metric.GetTag(c.Tag) + if !ok { + return + } + + dest := c.Tag + if c.Dest != "" { + dest = c.Dest + } + + metric.AddTag(dest, c.fn(tv)) +} + +func (c *converter) convertField(metric telegraf.Metric) { + fv, ok := metric.GetField(c.Field) + if !ok { + return + } + + dest := c.Field + if c.Dest != "" { + dest = c.Dest + } + + if fv, ok := fv.(string); ok { + metric.AddField(dest, c.fn(fv)) + } +} + +func (c *converter) convertMeasurement(metric telegraf.Metric) { + if metric.Name() != c.Measurement { + return + } + + metric.SetName(c.fn(metric.Name())) +} + +func (c *converter) convert(metric telegraf.Metric) { + if c.Field != "" { + c.convertField(metric) + } + + if c.Tag != "" { + c.convertTag(metric) + } + + if c.Measurement != "" { + c.convertMeasurement(metric) + } +} + +func (s *Strings) initOnce() { + if s.init { + return + } + + s.converters = make([]converter, 0) + for _, c := range s.Lowercase { + c.fn = strings.ToLower + s.converters = append(s.converters, c) + } + for _, c := range s.Uppercase { + c.fn = strings.ToUpper + s.converters = append(s.converters, c) + } + for _, c := range s.Trim { + if c.Cutset != "" { + c.fn = func(s string) string { return strings.Trim(s, c.Cutset) } + } else { + c.fn = func(s string) string { return strings.TrimFunc(s, unicode.IsSpace) } + } + s.converters = append(s.converters, c) + } + for _, c := range s.TrimLeft { + if c.Cutset != "" { + c.fn = func(s string) string { return strings.TrimLeft(s, c.Cutset) } + } else { + c.fn = func(s string) string { return strings.TrimLeftFunc(s, unicode.IsSpace) } + } + s.converters = append(s.converters, c) + } + for _, c := range s.TrimRight { + if c.Cutset != "" { + c.fn = func(s string) string { return strings.TrimRight(s, c.Cutset) } + } else { + c.fn = func(s string) string { return strings.TrimRightFunc(s, unicode.IsSpace) } + } + s.converters = append(s.converters, c) + } + for _, c := range s.TrimPrefix { + c.fn = func(s string) string { return strings.TrimPrefix(s, c.Prefix) } + s.converters = append(s.converters, c) + } + for _, c := range s.TrimSuffix { + c.fn = func(s string) string { return strings.TrimSuffix(s, c.Suffix) } + s.converters = append(s.converters, c) + } + + s.init = true +} + +func (s *Strings) Apply(in ...telegraf.Metric) []telegraf.Metric { + s.initOnce() + + for _, metric := range in { + for _, converter := range s.converters { + converter.convert(metric) + } + } + + return in +} + +func init() { + processors.Add("strings", func() telegraf.Processor { + return &Strings{} + }) +} diff --git a/plugins/processors/strings/strings_test.go b/plugins/processors/strings/strings_test.go new file mode 100644 index 000000000..2097ac5a8 --- /dev/null +++ b/plugins/processors/strings/strings_test.go @@ -0,0 +1,483 @@ +package strings + +import ( + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func newM1() telegraf.Metric { + m1, _ := metric.New("IIS_log", + map[string]string{ + "verb": "GET", + "s-computername": "MIXEDCASE_hostname", + }, + map[string]interface{}{ + "request": "/mixed/CASE/paTH/?from=-1D&to=now", + "whitespace": " whitespace\t", + }, + time.Now(), + ) + return m1 +} + +func newM2() telegraf.Metric { + m2, _ := metric.New("IIS_log", + map[string]string{ + "verb": "GET", + "resp_code": "200", + "s-computername": "MIXEDCASE_hostname", + }, + map[string]interface{}{ + "request": "/mixed/CASE/paTH/?from=-1D&to=now", + "cs-host": "AAAbbb", + "ignore_number": int64(200), + "ignore_bool": true, + }, + time.Now(), + ) + return m2 +} + +func TestFieldConversions(t *testing.T) { + tests := []struct { + name string + plugin *Strings + check func(t *testing.T, actual telegraf.Metric) + }{ + { + name: "Should change existing field to lowercase", + plugin: &Strings{ + Lowercase: []converter{ + converter{ + Field: "request", + }, + }, + }, + check: func(t *testing.T, actual telegraf.Metric) { + fv, ok := actual.GetField("request") + require.True(t, ok) + require.Equal(t, "/mixed/case/path/?from=-1d&to=now", fv) + }, + }, + { + name: "Should change existing field to uppercase", + plugin: &Strings{ + Uppercase: []converter{ + converter{ + Field: "request", + }, + }, + }, + check: func(t *testing.T, actual telegraf.Metric) { + fv, ok := actual.GetField("request") + require.True(t, ok) + require.Equal(t, "/MIXED/CASE/PATH/?FROM=-1D&TO=NOW", fv) + }, + }, + { + name: "Should add new lowercase field", + plugin: &Strings{ + Lowercase: []converter{ + converter{ + Field: "request", + Dest: "lowercase_request", + }, + }, + }, + check: func(t *testing.T, actual telegraf.Metric) { + fv, ok := actual.GetField("request") + require.True(t, ok) + require.Equal(t, "/mixed/CASE/paTH/?from=-1D&to=now", fv) + + fv, ok = actual.GetField("lowercase_request") + require.True(t, ok) + require.Equal(t, "/mixed/case/path/?from=-1d&to=now", fv) + }, + }, + { + name: "Should trim from both sides", + plugin: &Strings{ + Trim: []converter{ + converter{ + Field: "request", + Cutset: "/w", + }, + }, + }, + check: func(t *testing.T, actual telegraf.Metric) { + fv, ok := actual.GetField("request") + require.True(t, ok) + require.Equal(t, "mixed/CASE/paTH/?from=-1D&to=no", fv) + }, + }, + { + name: "Should trim from both sides and make lowercase", + plugin: &Strings{ + Trim: []converter{ + converter{ + Field: "request", + Cutset: "/w", + }, + }, + Lowercase: []converter{ + converter{ + Field: "request", + }, + }, + }, + check: func(t *testing.T, actual telegraf.Metric) { + fv, ok := actual.GetField("request") + require.True(t, ok) + require.Equal(t, "mixed/case/path/?from=-1d&to=no", fv) + }, + }, + { + name: "Should trim from left side", + plugin: &Strings{ + TrimLeft: []converter{ + converter{ + Field: "request", + Cutset: "/w", + }, + }, + }, + check: func(t *testing.T, actual telegraf.Metric) { + fv, ok := actual.GetField("request") + require.True(t, ok) + require.Equal(t, "mixed/CASE/paTH/?from=-1D&to=now", fv) + }, + }, + { + name: "Should trim from right side", + plugin: &Strings{ + TrimRight: []converter{ + converter{ + Field: "request", + Cutset: "/w", + }, + }, + }, + check: func(t *testing.T, actual telegraf.Metric) { + fv, ok := actual.GetField("request") + require.True(t, ok) + require.Equal(t, "/mixed/CASE/paTH/?from=-1D&to=no", fv) + }, + }, + { + name: "Should trim prefix '/mixed'", + plugin: &Strings{ + TrimPrefix: []converter{ + converter{ + Field: "request", + Prefix: "/mixed", + }, + }, + }, + check: func(t *testing.T, actual telegraf.Metric) { + fv, ok := actual.GetField("request") + require.True(t, ok) + require.Equal(t, "/CASE/paTH/?from=-1D&to=now", fv) + }, + }, + { + name: "Should trim suffix '-1D&to=now'", + plugin: &Strings{ + TrimSuffix: []converter{ + converter{ + Field: "request", + Suffix: "-1D&to=now", + }, + }, + }, + check: func(t *testing.T, actual telegraf.Metric) { + fv, ok := actual.GetField("request") + require.True(t, ok) + require.Equal(t, "/mixed/CASE/paTH/?from=", fv) + }, + }, + { + name: "Trim without cutset removes whitespace", + plugin: &Strings{ + Trim: []converter{ + converter{ + Field: "whitespace", + }, + }, + }, + check: func(t *testing.T, actual telegraf.Metric) { + fv, ok := actual.GetField("whitespace") + require.True(t, ok) + require.Equal(t, "whitespace", fv) + }, + }, + { + name: "Trim left without cutset removes whitespace", + plugin: &Strings{ + TrimLeft: []converter{ + converter{ + Field: "whitespace", + }, + }, + }, + check: func(t *testing.T, actual telegraf.Metric) { + fv, ok := actual.GetField("whitespace") + require.True(t, ok) + require.Equal(t, "whitespace\t", fv) + }, + }, + { + name: "Trim right without cutset removes whitespace", + plugin: &Strings{ + TrimRight: []converter{ + converter{ + Field: "whitespace", + }, + }, + }, + check: func(t *testing.T, actual telegraf.Metric) { + fv, ok := actual.GetField("whitespace") + require.True(t, ok) + require.Equal(t, " whitespace", fv) + }, + }, + { + name: "No change if field missing", + plugin: &Strings{ + Lowercase: []converter{ + converter{ + Field: "xyzzy", + Suffix: "-1D&to=now", + }, + }, + }, + check: func(t *testing.T, actual telegraf.Metric) { + fv, ok := actual.GetField("request") + require.True(t, ok) + require.Equal(t, "/mixed/CASE/paTH/?from=-1D&to=now", fv) + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + metrics := tt.plugin.Apply(newM1()) + require.Len(t, metrics, 1) + tt.check(t, metrics[0]) + }) + } +} + +func TestTagConversions(t *testing.T) { + tests := []struct { + name string + plugin *Strings + check func(t *testing.T, actual telegraf.Metric) + }{ + { + name: "Should change existing tag to lowercase", + plugin: &Strings{ + Lowercase: []converter{ + converter{ + Tag: "s-computername", + }, + }, + }, + check: func(t *testing.T, actual telegraf.Metric) { + tv, ok := actual.GetTag("verb") + require.True(t, ok) + require.Equal(t, "GET", tv) + + tv, ok = actual.GetTag("s-computername") + require.True(t, ok) + require.Equal(t, "mixedcase_hostname", tv) + }, + }, + { + name: "Should add new lowercase tag", + plugin: &Strings{ + Lowercase: []converter{ + converter{ + Tag: "s-computername", + Dest: "s-computername_lowercase", + }, + }, + }, + check: func(t *testing.T, actual telegraf.Metric) { + tv, ok := actual.GetTag("verb") + require.True(t, ok) + require.Equal(t, "GET", tv) + + tv, ok = actual.GetTag("s-computername") + require.True(t, ok) + require.Equal(t, "MIXEDCASE_hostname", tv) + + tv, ok = actual.GetTag("s-computername_lowercase") + require.True(t, ok) + require.Equal(t, "mixedcase_hostname", tv) + }, + }, + { + name: "Should add new uppercase tag", + plugin: &Strings{ + Uppercase: []converter{ + converter{ + Tag: "s-computername", + Dest: "s-computername_uppercase", + }, + }, + }, + check: func(t *testing.T, actual telegraf.Metric) { + tv, ok := actual.GetTag("verb") + require.True(t, ok) + require.Equal(t, "GET", tv) + + tv, ok = actual.GetTag("s-computername") + require.True(t, ok) + require.Equal(t, "MIXEDCASE_hostname", tv) + + tv, ok = actual.GetTag("s-computername_uppercase") + require.True(t, ok) + require.Equal(t, "MIXEDCASE_HOSTNAME", tv) + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + metrics := tt.plugin.Apply(newM1()) + require.Len(t, metrics, 1) + tt.check(t, metrics[0]) + }) + } +} + +func TestMeasurementConversions(t *testing.T) { + tests := []struct { + name string + plugin *Strings + check func(t *testing.T, actual telegraf.Metric) + }{ + { + name: "lowercase measurement", + plugin: &Strings{ + Lowercase: []converter{ + converter{ + Measurement: "IIS_log", + }, + }, + }, + check: func(t *testing.T, actual telegraf.Metric) { + name := actual.Name() + require.Equal(t, "iis_log", name) + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + metrics := tt.plugin.Apply(newM1()) + require.Len(t, metrics, 1) + tt.check(t, metrics[0]) + }) + } +} + +func TestMultipleConversions(t *testing.T) { + plugin := &Strings{ + Lowercase: []converter{ + converter{ + Tag: "s-computername", + }, + converter{ + Field: "request", + }, + converter{ + Field: "cs-host", + Dest: "cs-host_lowercase", + }, + }, + Uppercase: []converter{ + converter{ + Tag: "verb", + }, + }, + } + + processed := plugin.Apply(newM2()) + + expectedFields := map[string]interface{}{ + "request": "/mixed/case/path/?from=-1d&to=now", + "ignore_number": int64(200), + "ignore_bool": true, + "cs-host": "AAAbbb", + "cs-host_lowercase": "aaabbb", + } + expectedTags := map[string]string{ + "verb": "GET", + "resp_code": "200", + "s-computername": "mixedcase_hostname", + } + + assert.Equal(t, expectedFields, processed[0].Fields()) + assert.Equal(t, expectedTags, processed[0].Tags()) +} + +func TestReadmeExample(t *testing.T) { + plugin := &Strings{ + Lowercase: []converter{ + converter{ + Tag: "uri_stem", + }, + }, + TrimPrefix: []converter{ + converter{ + Tag: "uri_stem", + Prefix: "/api/", + }, + }, + Uppercase: []converter{ + converter{ + Field: "cs-host", + Dest: "cs-host_normalised", + }, + }, + } + + m, _ := metric.New("iis_log", + map[string]string{ + "verb": "get", + "uri_stem": "/API/HealthCheck", + }, + map[string]interface{}{ + "cs-host": "MIXEDCASE_host", + "referrer": "-", + "ident": "-", + "http_version": "1.1", + "agent": "UserAgent", + "resp_bytes": int64(270), + }, + time.Now(), + ) + + processed := plugin.Apply(m) + + expectedTags := map[string]string{ + "verb": "get", + "uri_stem": "healthcheck", + } + expectedFields := map[string]interface{}{ + "cs-host": "MIXEDCASE_host", + "cs-host_normalised": "MIXEDCASE_HOST", + "referrer": "-", + "ident": "-", + "http_version": "1.1", + "agent": "UserAgent", + "resp_bytes": int64(270), + } + + assert.Equal(t, expectedFields, processed[0].Fields()) + assert.Equal(t, expectedTags, processed[0].Tags()) +} From 433454aa3ca16e2c2c789aa36f50fc1a1591b4c0 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 5 Sep 2018 15:15:54 -0700 Subject: [PATCH 0136/1815] Fix go fmt issue --- plugins/processors/all/all.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/processors/all/all.go b/plugins/processors/all/all.go index a8386a608..41e2707d3 100644 --- a/plugins/processors/all/all.go +++ b/plugins/processors/all/all.go @@ -7,7 +7,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/processors/parser" _ "github.com/influxdata/telegraf/plugins/processors/printer" _ "github.com/influxdata/telegraf/plugins/processors/regex" - _ "github.com/influxdata/telegraf/plugins/processors/strings" _ "github.com/influxdata/telegraf/plugins/processors/rename" + _ "github.com/influxdata/telegraf/plugins/processors/strings" _ "github.com/influxdata/telegraf/plugins/processors/topk" ) From d73fe7bcedcac2ca01a9e19d4ce9502549339f4b Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 5 Sep 2018 15:17:50 -0700 Subject: [PATCH 0137/1815] Update changelog --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index f7a3ccd6c..43579ee8b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -22,6 +22,7 @@ - [enum](./plugins/processors/enum/README.md) - Contributed by @KarstenSchnitter - [parser](./plugins/processors/parser/README.md) - Contributed by @maxunt & @Ayrdrie - [rename](./plugins/processors/rename/README.md) - Contributed by @goldibex +- [strings](./plugins/processors/strings/README.md) - Contributed by @bsmaldon ### New Aggregators @@ -85,6 +86,7 @@ - [#4598](https://github.com/influxdata/telegraf/pull/4598): Add read_buffer_size option to statsd input. - [#4089](https://github.com/influxdata/telegraf/pull/4089): Add azure_monitor output plugin. - [#4628](https://github.com/influxdata/telegraf/pull/4628): Add queue_durability parameter to amqp_consumer input. +- [#4476](https://github.com/influxdata/telegraf/pull/4476): Add strings processor. ### Bugfixes From 54f28eefa941d063620fe23c1dd46db4d185ade4 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 5 Sep 2018 18:47:13 -0700 Subject: [PATCH 0138/1815] Make dep check happy --- Gopkg.lock | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/Gopkg.lock b/Gopkg.lock index 0f29ab59a..76c273c58 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -22,14 +22,16 @@ version = "v0.3.0" [[projects]] + digest = "1:5923e22a060ab818a015593422f9e8a35b9d881d4cfcfed0669a82959b11c7ee" name = "github.com/Azure/go-autorest" packages = [ "autorest", "autorest/adal", "autorest/azure", "autorest/azure/auth", - "autorest/date" + "autorest/date", ] + pruneopts = "" revision = "1f7cd6cfe0adea687ad44a512dfe76140f804318" version = "v10.12.0" @@ -238,8 +240,10 @@ [[projects]] branch = "master" + digest = "1:654ac9799e7a8a586d8690bb2229a4f3408bbfe2c5494bf4dfe043053eeb5496" name = "github.com/dimchansky/utfbom" packages = ["."] + pruneopts = "" revision = "6c6132ff69f0f6c088739067407b5d32c52e1d0f" [[projects]] @@ -853,7 +857,7 @@ version = "v1.2.0" [[projects]] - digest = "1:fce9909f20bc6a6363a6d589e478bdcf8111044b41566d37d7552bf92d955540" + digest = "1:02715a2fb4b9279af36651a59a51dd4164eb689bd6785874811899f43eeb2a54" name = "github.com/shirou/gopsutil" packages = [ "cpu", @@ -995,7 +999,7 @@ "pbkdf2", "pkcs12", "pkcs12/internal/rc2", - "ssh/terminal" + "ssh/terminal", ] pruneopts = "" revision = "a2144134853fc9a27a7b1e3eb4f19f1a76df13c9" @@ -1231,6 +1235,8 @@ input-imports = [ "collectd.org/api", "collectd.org/network", + "github.com/Azure/go-autorest/autorest", + "github.com/Azure/go-autorest/autorest/azure/auth", "github.com/Microsoft/ApplicationInsights-Go/appinsights", "github.com/Shopify/sarama", "github.com/StackExchange/wmi", @@ -1255,6 +1261,7 @@ "github.com/docker/docker/api/types/swarm", "github.com/docker/docker/client", "github.com/eclipse/paho.mqtt.golang", + "github.com/go-logfmt/logfmt", "github.com/go-redis/redis", "github.com/go-sql-driver/mysql", "github.com/gobwas/glob", From 091af7e645e45a1d12f32da1f95fb99ad4165a66 Mon Sep 17 00:00:00 2001 From: Vikrant Date: Thu, 6 Sep 2018 10:54:05 -0700 Subject: [PATCH 0139/1815] Add OAuth2 support to HTTP output plugin (#4536) --- Gopkg.lock | 25 ++++++++++- Gopkg.toml | 4 ++ docs/LICENSE_OF_DEPENDENCIES.md | 1 + plugins/outputs/http/README.md | 8 +++- plugins/outputs/http/http.go | 66 +++++++++++++++++++++------- plugins/outputs/http/http_test.go | 73 +++++++++++++++++++++++++++++++ 6 files changed, 160 insertions(+), 17 deletions(-) diff --git a/Gopkg.lock b/Gopkg.lock index 76c273c58..b592346a8 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -1032,6 +1032,18 @@ pruneopts = "" revision = "a680a1efc54dd51c040b3b5ce4939ea3cf2ea0d1" +[[projects]] + branch = "master" + digest = "1:b697592485cb412be4188c08ca0beed9aab87f36b86418e21acc4a3998f63734" + name = "golang.org/x/oauth2" + packages = [ + ".", + "clientcredentials", + "internal", + ] + pruneopts = "" + revision = "d2e6202438beef2727060aa7cabdd924d92ebfd9" + [[projects]] branch = "master" digest = "1:677e38cad6833ad266ec843739d167755eda1e6f2d8af1c63102b0426ad820db" @@ -1086,7 +1098,16 @@ [[projects]] digest = "1:c1771ca6060335f9768dff6558108bc5ef6c58506821ad43377ee23ff059e472" name = "google.golang.org/appengine" - packages = ["cloudsql"] + packages = [ + "cloudsql", + "internal", + "internal/base", + "internal/datastore", + "internal/log", + "internal/remote_api", + "internal/urlfetch", + "urlfetch", + ] pruneopts = "" revision = "b1f26356af11148e710935ed1ac8a7f5702c7612" version = "v1.1.0" @@ -1312,6 +1333,8 @@ "github.com/zensqlmonitor/go-mssqldb", "golang.org/x/net/context", "golang.org/x/net/html/charset", + "golang.org/x/oauth2", + "golang.org/x/oauth2/clientcredentials", "golang.org/x/sys/unix", "golang.org/x/sys/windows", "golang.org/x/sys/windows/svc", diff --git a/Gopkg.toml b/Gopkg.toml index f942f3401..b4576ed6f 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -225,3 +225,7 @@ [[constraint]] name = "github.com/Azure/go-autorest" version = "10.12.0" + +[[constraint]] + branch = "master" + name = "golang.org/x/oauth2" diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index 36f038994..f5496fc2e 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -100,6 +100,7 @@ following works: - github.com/zensqlmonitor/go-mssqldb [BSD](https://github.com/zensqlmonitor/go-mssqldb/blob/master/LICENSE.txt) - golang.org/x/crypto [BSD](https://github.com/golang/crypto/blob/master/LICENSE) - golang.org/x/net [BSD](https://go.googlesource.com/net/+/master/LICENSE) +- golang.org/x/oauth2 [BSD](https://go.googlesource.com/oauth2/+/master/LICENSE) - golang.org/x/text [BSD](https://go.googlesource.com/text/+/master/LICENSE) - golang.org/x/sys [BSD](https://go.googlesource.com/sys/+/master/LICENSE) - google.golang.org/grpc [APACHE](https://github.com/google/grpc-go/blob/master/LICENSE) diff --git a/plugins/outputs/http/README.md b/plugins/outputs/http/README.md index 5005e9f02..0c11896f9 100644 --- a/plugins/outputs/http/README.md +++ b/plugins/outputs/http/README.md @@ -21,6 +21,12 @@ data formats. For data_formats that support batching, metrics are sent in batch # username = "username" # password = "pa$$word" + ## OAuth2 Client Credentials Grant + # client_id = "clientid" + # client_secret = "secret" + # token_url = "https://indentityprovider/oauth2/v1/token" + # scopes = ["urn:opc:idm:__myscopes__"] + ## Optional TLS Config # tls_ca = "/etc/telegraf/ca.pem" # tls_cert = "/etc/telegraf/cert.pem" @@ -33,7 +39,7 @@ data formats. For data_formats that support batching, metrics are sent in batch ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md # data_format = "influx" - + ## Additional HTTP headers # [outputs.http.headers] # # Should be set manually to "application/json" for json data_format diff --git a/plugins/outputs/http/http.go b/plugins/outputs/http/http.go index 91c2954cd..ccb8f8949 100644 --- a/plugins/outputs/http/http.go +++ b/plugins/outputs/http/http.go @@ -2,6 +2,7 @@ package http import ( "bytes" + "context" "fmt" "io/ioutil" "net/http" @@ -13,6 +14,8 @@ import ( "github.com/influxdata/telegraf/internal/tls" "github.com/influxdata/telegraf/plugins/outputs" "github.com/influxdata/telegraf/plugins/serializers" + "golang.org/x/oauth2" + "golang.org/x/oauth2/clientcredentials" ) var sampleConfig = ` @@ -29,6 +32,12 @@ var sampleConfig = ` # username = "username" # password = "pa$$word" + ## OAuth2 Client Credentials Grant + # client_id = "clientid" + # client_secret = "secret" + # token_url = "https://indentityprovider/oauth2/v1/token" + # scopes = ["urn:opc:idm:__myscopes__"] + ## Optional TLS Config # tls_ca = "/etc/telegraf/ca.pem" # tls_cert = "/etc/telegraf/cert.pem" @@ -41,7 +50,7 @@ var sampleConfig = ` ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md # data_format = "influx" - + ## Additional HTTP headers # [outputs.http.headers] # # Should be set manually to "application/json" for json data_format @@ -55,12 +64,16 @@ const ( ) type HTTP struct { - URL string `toml:"url"` - Timeout internal.Duration `toml:"timeout"` - Method string `toml:"method"` - Username string `toml:"username"` - Password string `toml:"password"` - Headers map[string]string `toml:"headers"` + URL string `toml:"url"` + Timeout internal.Duration `toml:"timeout"` + Method string `toml:"method"` + Username string `toml:"username"` + Password string `toml:"password"` + Headers map[string]string `toml:"headers"` + ClientID string `toml:"client_id"` + ClientSecret string `toml:"client_secret"` + TokenURL string `toml:"token_url"` + Scopes []string `toml:"scopes"` tls.ClientConfig client *http.Client @@ -71,6 +84,34 @@ func (h *HTTP) SetSerializer(serializer serializers.Serializer) { h.serializer = serializer } +func (h *HTTP) createClient(ctx context.Context) (*http.Client, error) { + tlsCfg, err := h.ClientConfig.TLSConfig() + if err != nil { + return nil, err + } + + client := &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: tlsCfg, + Proxy: http.ProxyFromEnvironment, + }, + Timeout: h.Timeout.Duration, + } + + if h.ClientID != "" && h.ClientSecret != "" && h.TokenURL != "" { + oauthConfig := clientcredentials.Config{ + ClientID: h.ClientID, + ClientSecret: h.ClientSecret, + TokenURL: h.TokenURL, + Scopes: h.Scopes, + } + ctx = context.WithValue(ctx, oauth2.HTTPClient, client) + client = oauthConfig.Client(ctx) + } + + return client, nil +} + func (h *HTTP) Connect() error { if h.Method == "" { h.Method = http.MethodPost @@ -84,18 +125,13 @@ func (h *HTTP) Connect() error { h.Timeout.Duration = defaultClientTimeout } - tlsCfg, err := h.ClientConfig.TLSConfig() + ctx := context.Background() + client, err := h.createClient(ctx) if err != nil { return err } - h.client = &http.Client{ - Transport: &http.Transport{ - TLSClientConfig: tlsCfg, - Proxy: http.ProxyFromEnvironment, - }, - Timeout: h.Timeout.Duration, - } + h.client = client return nil } diff --git a/plugins/outputs/http/http_test.go b/plugins/outputs/http/http_test.go index daec176be..0b6c78455 100644 --- a/plugins/outputs/http/http_test.go +++ b/plugins/outputs/http/http_test.go @@ -287,3 +287,76 @@ func TestBasicAuth(t *testing.T) { }) } } + +type TestHandlerFunc func(t *testing.T, w http.ResponseWriter, r *http.Request) + +func TestOAuthClientCredentialsGrant(t *testing.T) { + ts := httptest.NewServer(http.NotFoundHandler()) + defer ts.Close() + + var token = "2YotnFZFEjr1zCsicMWpAA" + + u, err := url.Parse(fmt.Sprintf("http://%s", ts.Listener.Addr().String())) + require.NoError(t, err) + + tests := []struct { + name string + plugin *HTTP + tokenHandler TestHandlerFunc + handler TestHandlerFunc + }{ + { + name: "no credentials", + plugin: &HTTP{ + URL: u.String(), + }, + handler: func(t *testing.T, w http.ResponseWriter, r *http.Request) { + require.Len(t, r.Header["Authorization"], 0) + w.WriteHeader(http.StatusOK) + }, + }, + { + name: "success", + plugin: &HTTP{ + URL: u.String() + "/write", + ClientID: "howdy", + ClientSecret: "secret", + TokenURL: u.String() + "/token", + Scopes: []string{"urn:opc:idm:__myscopes__"}, + }, + tokenHandler: func(t *testing.T, w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + values := url.Values{} + values.Add("access_token", token) + values.Add("token_type", "bearer") + values.Add("expires_in", "3600") + w.Write([]byte(values.Encode())) + }, + handler: func(t *testing.T, w http.ResponseWriter, r *http.Request) { + require.Equal(t, []string{"Bearer " + token}, r.Header["Authorization"]) + w.WriteHeader(http.StatusOK) + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/write": + tt.handler(t, w, r) + case "/token": + tt.tokenHandler(t, w, r) + } + }) + + serializer := influx.NewSerializer() + tt.plugin.SetSerializer(serializer) + err = tt.plugin.Connect() + require.NoError(t, err) + + err = tt.plugin.Write([]telegraf.Metric{getMetric()}) + require.NoError(t, err) + }) + } +} From 50a82c695722e624ffde42d7068f04064470a544 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 6 Sep 2018 10:55:05 -0700 Subject: [PATCH 0140/1815] Update changelog --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 43579ee8b..e206f7072 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -87,6 +87,8 @@ - [#4089](https://github.com/influxdata/telegraf/pull/4089): Add azure_monitor output plugin. - [#4628](https://github.com/influxdata/telegraf/pull/4628): Add queue_durability parameter to amqp_consumer input. - [#4476](https://github.com/influxdata/telegraf/pull/4476): Add strings processor. +- [#4536](https://github.com/influxdata/telegraf/pull/4536): Add OAuth2 support to HTTP output plugin. + ### Bugfixes From cd4c4e7fbda1bca1812f92d248daf6dc0718278f Mon Sep 17 00:00:00 2001 From: David Reniz Date: Thu, 6 Sep 2018 19:44:33 -0500 Subject: [PATCH 0141/1815] Added Unix epoch timestamp support for JSON parser (#4633) --- docs/DATA_FORMATS_INPUT.md | 6 +++ plugins/parsers/json/parser.go | 69 +++++++++++++++++++++++++---- plugins/parsers/json/parser_test.go | 66 +++++++++++++++++++++++++++ 3 files changed, 133 insertions(+), 8 deletions(-) diff --git a/docs/DATA_FORMATS_INPUT.md b/docs/DATA_FORMATS_INPUT.md index 7e57d9657..235e3b308 100644 --- a/docs/DATA_FORMATS_INPUT.md +++ b/docs/DATA_FORMATS_INPUT.md @@ -131,6 +131,12 @@ config "json_time_key" and "json_time_format". If "json_time_key" is set, "json_time_format" must be specified. The "json_time_key" describes the name of the field containing time information. The "json_time_format" must be a recognized Go time format. +If parsing a Unix epoch timestamp in seconds, e.g. 1536092344.1, this config must be set to "unix" (case insensitive); +corresponding JSON value can have a decimal part and can be a string or a number JSON representation. +If value is in number representation, it'll be treated as a double precision float, and could have some precision loss. +If value is in string representation, there'll be no precision loss up to nanosecond precision. Decimal positions beyond that will be dropped. +If parsing a Unix epoch timestamp in milliseconds, e.g. 1536092344100, this config must be set to "unix_ms" (case insensitive); +corresponding JSON value must be a (long) integer and be in number JSON representation. If there is no year provided, the metrics will have the current year. More info on time formats can be found here: https://golang.org/pkg/time/#Parse diff --git a/plugins/parsers/json/parser.go b/plugins/parsers/json/parser.go index 9fb0816fe..697296a12 100644 --- a/plugins/parsers/json/parser.go +++ b/plugins/parsers/json/parser.go @@ -11,7 +11,10 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" + "github.com/pkg/errors" "github.com/tidwall/gjson" + "math" + "regexp" ) var ( @@ -47,6 +50,49 @@ func (p *JSONParser) parseArray(buf []byte) ([]telegraf.Metric, error) { return metrics, nil } +// format = "unix": epoch is assumed to be in seconds and can come as number or string. Can have a decimal part. +// format = "unix_ms": epoch is assumed to be in milliseconds and can come as number or string. Cannot have a decimal part. +func parseUnixTimestamp(jsonValue interface{}, format string) (time.Time, error) { + timeInt, timeFractional := int64(0), int64(0) + timeEpochStr, ok := jsonValue.(string) + var err error + + if !ok { + timeEpochFloat, ok := jsonValue.(float64) + if !ok { + err := fmt.Errorf("time: %v could not be converted to string nor float64", jsonValue) + return time.Time{}, err + } + intPart, frac := math.Modf(timeEpochFloat) + timeInt, timeFractional = int64(intPart), int64(frac*1e9) + } else { + splitted := regexp.MustCompile("[.,]").Split(timeEpochStr, 2) + timeInt, err = strconv.ParseInt(splitted[0], 10, 64) + if err != nil { + return time.Time{}, err + } + + if len(splitted) == 2 { + if len(splitted[1]) > 9 { + splitted[1] = splitted[1][:9] //truncates decimal part to nanoseconds precision + } + nanosecStr := splitted[1] + strings.Repeat("0", 9-len(splitted[1])) //adds 0's to the right to obtain a valid number of nanoseconds + + timeFractional, err = strconv.ParseInt(nanosecStr, 10, 64) + if err != nil { + return time.Time{}, err + } + } + } + if strings.EqualFold(format, "unix") { + return time.Unix(timeInt, timeFractional).UTC(), nil + } else if strings.EqualFold(format, "unix_ms") { + return time.Unix(timeInt/1000, (timeInt%1000)*1e6).UTC(), nil + } else { + return time.Time{}, errors.New("Invalid unix format") + } +} + func (p *JSONParser) parseObject(metrics []telegraf.Metric, jsonOut map[string]interface{}) ([]telegraf.Metric, error) { tags := make(map[string]string) @@ -78,14 +124,21 @@ func (p *JSONParser) parseObject(metrics []telegraf.Metric, jsonOut map[string]i return nil, err } - timeStr, ok := f.Fields[p.JSONTimeKey].(string) - if !ok { - err := fmt.Errorf("time: %v could not be converted to string", f.Fields[p.JSONTimeKey]) - return nil, err - } - nTime, err = time.Parse(p.JSONTimeFormat, timeStr) - if err != nil { - return nil, err + if strings.EqualFold(p.JSONTimeFormat, "unix") || strings.EqualFold(p.JSONTimeFormat, "unix_ms") { + nTime, err = parseUnixTimestamp(f.Fields[p.JSONTimeKey], p.JSONTimeFormat) + if err != nil { + return nil, err + } + } else { + timeStr, ok := f.Fields[p.JSONTimeKey].(string) + if !ok { + err := fmt.Errorf("time: %v could not be converted to string", f.Fields[p.JSONTimeKey]) + return nil, err + } + nTime, err = time.Parse(p.JSONTimeFormat, timeStr) + if err != nil { + return nil, err + } } //if the year is 0, set to current year diff --git a/plugins/parsers/json/parser_test.go b/plugins/parsers/json/parser_test.go index 39e43bece..ec9ade251 100644 --- a/plugins/parsers/json/parser_test.go +++ b/plugins/parsers/json/parser_test.go @@ -596,6 +596,72 @@ func TestTimeParser(t *testing.T) { require.Equal(t, false, metrics[0].Time() == metrics[1].Time()) } +func TestUnixTimeParser(t *testing.T) { + testString := `[ + { + "a": 5, + "b": { + "c": 6, + "time": "1536001411.1234567890" + }, + "my_tag_1": "foo", + "my_tag_2": "baz" + }, + { + "a": 7, + "b": { + "c": 8, + "time": 1536002769.123 + }, + "my_tag_1": "bar", + "my_tag_2": "baz" + } + ]` + + parser := JSONParser{ + MetricName: "json_test", + JSONTimeKey: "b_time", + JSONTimeFormat: "unix", + } + metrics, err := parser.Parse([]byte(testString)) + require.NoError(t, err) + require.Equal(t, 2, len(metrics)) + require.Equal(t, false, metrics[0].Time() == metrics[1].Time()) +} + +func TestUnixMsTimeParser(t *testing.T) { + testString := `[ + { + "a": 5, + "b": { + "c": 6, + "time": "1536001411100" + }, + "my_tag_1": "foo", + "my_tag_2": "baz" + }, + { + "a": 7, + "b": { + "c": 8, + "time": 1536002769123 + }, + "my_tag_1": "bar", + "my_tag_2": "baz" + } + ]` + + parser := JSONParser{ + MetricName: "json_test", + JSONTimeKey: "b_time", + JSONTimeFormat: "unix_ms", + } + metrics, err := parser.Parse([]byte(testString)) + require.NoError(t, err) + require.Equal(t, 2, len(metrics)) + require.Equal(t, false, metrics[0].Time() == metrics[1].Time()) +} + func TestTimeErrors(t *testing.T) { testString := `{ "a": 5, From e9eeda555ec1a0ade90d953dd4f5dd599e8541d7 Mon Sep 17 00:00:00 2001 From: Greg Date: Thu, 6 Sep 2018 18:45:40 -0600 Subject: [PATCH 0142/1815] Reset/flush saved contents from bad metric (#4646) --- plugins/parsers/influx/parser.go | 1 + 1 file changed, 1 insertion(+) diff --git a/plugins/parsers/influx/parser.go b/plugins/parsers/influx/parser.go index 0b16a2a39..b236a6f10 100644 --- a/plugins/parsers/influx/parser.go +++ b/plugins/parsers/influx/parser.go @@ -63,6 +63,7 @@ func (p *Parser) Parse(input []byte) ([]telegraf.Metric, error) { for p.machine.ParseLine() { err := p.machine.Err() if err != nil { + p.handler.Reset() return nil, &ParseError{ Offset: p.machine.Position(), msg: err.Error(), From 5b5b2e3b39c66498573ada11f8d69021e9a08e2b Mon Sep 17 00:00:00 2001 From: Greg Linton Date: Thu, 6 Sep 2018 18:47:07 -0600 Subject: [PATCH 0143/1815] Update changelog --- CHANGELOG.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e206f7072..d672e6719 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -88,12 +88,13 @@ - [#4628](https://github.com/influxdata/telegraf/pull/4628): Add queue_durability parameter to amqp_consumer input. - [#4476](https://github.com/influxdata/telegraf/pull/4476): Add strings processor. - [#4536](https://github.com/influxdata/telegraf/pull/4536): Add OAuth2 support to HTTP output plugin. - +- [#4633](https://github.com/influxdata/telegraf/pull/4633): Add Unix epoch timestamp support for JSON parser. ### Bugfixes - [#3438](https://github.com/influxdata/telegraf/issues/3438): Fix divide by zero in logparser input. - [#4499](https://github.com/influxdata/telegraf/issues/4499): Fix instance and object name in performance counters with backslashes. +- [#4646](https://github.com/influxdata/telegraf/issues/4646): Reset/flush saved contents from bad metric. ## v1.7.4 [2018-08-29] From 710c101fe0c76856347474a8c74cba0134e792ba Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 7 Sep 2018 16:13:46 -0700 Subject: [PATCH 0144/1815] Undeprecate logparser Until dynamic file tailing can be added to the tail plugin. --- CHANGELOG.md | 6 ------ plugins/inputs/logparser/README.md | 5 ----- plugins/inputs/logparser/logparser.go | 6 ------ 3 files changed, 17 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index d672e6719..247ca4a58 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,11 +1,5 @@ ## v1.8 [unreleased] -### Release Notes - -- With the addition of the standalone `grok` input data format, the - `logparser` input plugin has been deprecated in favor of using the `tail` - input plugin combined with `data_format="grok"` . - ### New Inputs - [activemq](./plugins/inputs/activemq/README.md) - Contributed by @mlabouardy diff --git a/plugins/inputs/logparser/README.md b/plugins/inputs/logparser/README.md index 27cbc3cf4..94e37f4c2 100644 --- a/plugins/inputs/logparser/README.md +++ b/plugins/inputs/logparser/README.md @@ -1,7 +1,5 @@ # Logparser Input Plugin -### **Deprecated in version 1.8**: Please use the [tail](/plugins/inputs/tail) plugin with the `grok` [data format](/docs/DATA_FORMATS_INPUT.md). - The `logparser` plugin streams and parses the given logfiles. Currently it has the capability of parsing "grok" patterns from logfiles, which also supports regex patterns. @@ -10,9 +8,6 @@ regex patterns. ```toml [[inputs.logparser]] - ## DEPRECATED: The `logparser` plugin is deprecated in 1.8. Please use the - ## `tail` plugin with the grok data_format instead. - ## Log files to parse. ## These accept standard unix glob matching rules, but with the addition of ## ** as a "super asterisk". ie: diff --git a/plugins/inputs/logparser/logparser.go b/plugins/inputs/logparser/logparser.go index b6ce72546..bdfa4bacc 100644 --- a/plugins/inputs/logparser/logparser.go +++ b/plugins/inputs/logparser/logparser.go @@ -54,9 +54,6 @@ type LogParserPlugin struct { } const sampleConfig = ` - ## DEPRECATED: The 'logparser' plugin is deprecated in 1.8. Please use the - ## 'tail' plugin with the grok data_format as a replacement. - ## Log files to parse. ## These accept standard unix glob matching rules, but with the addition of ## ** as a "super asterisk". ie: @@ -126,9 +123,6 @@ func (l *LogParserPlugin) Gather(acc telegraf.Accumulator) error { // Start kicks off collection of stats for the plugin func (l *LogParserPlugin) Start(acc telegraf.Accumulator) error { - log.Println("W! DEPRECATED: The logparser plugin is deprecated in 1.8. " + - "Please use the tail plugin with the grok data_format as a replacement.") - l.Lock() defer l.Unlock() From 69100f60b8676048e30572cac00db16aaf1c27b3 Mon Sep 17 00:00:00 2001 From: Alexander Shepelin Date: Mon, 10 Sep 2018 21:51:03 +0300 Subject: [PATCH 0145/1815] Add Beanstalkd input plugin (#4272) --- plugins/inputs/all/all.go | 1 + plugins/inputs/beanstalkd/README.md | 98 ++++++ plugins/inputs/beanstalkd/beanstalkd.go | 270 +++++++++++++++ plugins/inputs/beanstalkd/beanstalkd_test.go | 332 +++++++++++++++++++ 4 files changed, 701 insertions(+) create mode 100644 plugins/inputs/beanstalkd/README.md create mode 100644 plugins/inputs/beanstalkd/beanstalkd.go create mode 100644 plugins/inputs/beanstalkd/beanstalkd_test.go diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index feb462368..ef49f6538 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -7,6 +7,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/apache" _ "github.com/influxdata/telegraf/plugins/inputs/aurora" _ "github.com/influxdata/telegraf/plugins/inputs/bcache" + _ "github.com/influxdata/telegraf/plugins/inputs/beanstalkd" _ "github.com/influxdata/telegraf/plugins/inputs/bond" _ "github.com/influxdata/telegraf/plugins/inputs/burrow" _ "github.com/influxdata/telegraf/plugins/inputs/cassandra" diff --git a/plugins/inputs/beanstalkd/README.md b/plugins/inputs/beanstalkd/README.md new file mode 100644 index 000000000..e4fe2203d --- /dev/null +++ b/plugins/inputs/beanstalkd/README.md @@ -0,0 +1,98 @@ +# Beanstalkd Input Plugin + +The `beanstalkd` plugin collects server stats as well as tube stats (reported by `stats` and `stats-tube` commands respectively). + +### Configuration: + +```toml +[[inputs.beanstalkd]] + ## Server to collect data from + server = "localhost:11300" + + ## List of tubes to gather stats about. + ## If no tubes specified then data gathered for each tube on server reported by list-tubes command + tubes = ["notifications"] +``` + +### Metrics: + +Please see the [Beanstalk Protocol doc](https://raw.githubusercontent.com/kr/beanstalkd/master/doc/protocol.txt) for detailed explanation of `stats` and `stats-tube` commands output. + +`beanstalkd_overview` – statistical information about the system as a whole +- fields + - cmd_delete + - cmd_pause_tube + - current_jobs_buried + - current_jobs_delayed + - current_jobs_ready + - current_jobs_reserved + - current_jobs_urgent + - current_using + - current_waiting + - current_watching + - pause + - pause_time_left + - total_jobs +- tags + - name + - server (address taken from config) + +`beanstalkd_tube` – statistical information about the specified tube +- fields + - binlog_current_index + - binlog_max_size + - binlog_oldest_index + - binlog_records_migrated + - binlog_records_written + - cmd_bury + - cmd_delete + - cmd_ignore + - cmd_kick + - cmd_list_tube_used + - cmd_list_tubes + - cmd_list_tubes_watched + - cmd_pause_tube + - cmd_peek + - cmd_peek_buried + - cmd_peek_delayed + - cmd_peek_ready + - cmd_put + - cmd_release + - cmd_reserve + - cmd_reserve_with_timeout + - cmd_stats + - cmd_stats_job + - cmd_stats_tube + - cmd_touch + - cmd_use + - cmd_watch + - current_connections + - current_jobs_buried + - current_jobs_delayed + - current_jobs_ready + - current_jobs_reserved + - current_jobs_urgent + - current_producers + - current_tubes + - current_waiting + - current_workers + - job_timeouts + - max_job_size + - pid + - rusage_stime + - rusage_utime + - total_connections + - total_jobs + - uptime +- tags + - hostname + - id + - server (address taken from config) + - version + +### Example Output: +``` +beanstalkd_overview,host=server.local,hostname=a2ab22ed12e0,id=232485800aa11b24,server=localhost:11300,version=1.10 cmd_stats_tube=29482i,current_jobs_delayed=0i,current_jobs_urgent=6i,cmd_kick=0i,cmd_stats=7378i,cmd_stats_job=0i,current_waiting=0i,max_job_size=65535i,pid=6i,cmd_bury=0i,cmd_reserve_with_timeout=0i,cmd_touch=0i,current_connections=1i,current_jobs_ready=6i,current_producers=0i,cmd_delete=0i,cmd_list_tubes=7369i,cmd_peek_ready=0i,cmd_put=6i,cmd_use=3i,cmd_watch=0i,current_jobs_reserved=0i,rusage_stime=6.07,cmd_list_tubes_watched=0i,cmd_pause_tube=0i,total_jobs=6i,binlog_records_migrated=0i,cmd_list_tube_used=0i,cmd_peek_delayed=0i,cmd_release=0i,current_jobs_buried=0i,job_timeouts=0i,binlog_current_index=0i,binlog_max_size=10485760i,total_connections=7378i,cmd_peek_buried=0i,cmd_reserve=0i,current_tubes=4i,binlog_records_written=0i,cmd_peek=0i,rusage_utime=1.13,uptime=7099i,binlog_oldest_index=0i,current_workers=0i,cmd_ignore=0i 1528801650000000000 + +beanstalkd_tube,host=server.local,name=notifications,server=localhost:11300 pause_time_left=0i,current_jobs_buried=0i,current_jobs_delayed=0i,current_jobs_reserved=0i,current_using=0i,current_waiting=0i,pause=0i,total_jobs=3i,cmd_delete=0i,cmd_pause_tube=0i,current_jobs_ready=3i,current_jobs_urgent=3i,current_watching=0i 1528801650000000000 +``` diff --git a/plugins/inputs/beanstalkd/beanstalkd.go b/plugins/inputs/beanstalkd/beanstalkd.go new file mode 100644 index 000000000..932edd301 --- /dev/null +++ b/plugins/inputs/beanstalkd/beanstalkd.go @@ -0,0 +1,270 @@ +package beanstalkd + +import ( + "fmt" + "io" + "net/textproto" + "sync" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" + "gopkg.in/yaml.v2" +) + +const sampleConfig = ` + ## Server to collect data from + server = "localhost:11300" + + ## List of tubes to gather stats about. + ## If no tubes specified then data gathered for each tube on server reported by list-tubes command + tubes = ["notifications"] +` + +type Beanstalkd struct { + Server string `toml:"server"` + Tubes []string `toml:"tubes"` +} + +func (b *Beanstalkd) Description() string { + return "Collects Beanstalkd server and tubes stats" +} + +func (b *Beanstalkd) SampleConfig() string { + return sampleConfig +} + +func (b *Beanstalkd) Gather(acc telegraf.Accumulator) error { + connection, err := textproto.Dial("tcp", b.Server) + if err != nil { + return err + } + defer connection.Close() + + tubes := b.Tubes + if len(tubes) == 0 { + err = runQuery(connection, "list-tubes", &tubes) + if err != nil { + acc.AddError(err) + } + } + + var wg sync.WaitGroup + + wg.Add(1) + go func() { + err := b.gatherServerStats(connection, acc) + if err != nil { + acc.AddError(err) + } + wg.Done() + }() + + for _, tube := range tubes { + wg.Add(1) + go func(tube string) { + b.gatherTubeStats(connection, tube, acc) + wg.Done() + }(tube) + } + + wg.Wait() + + return nil +} + +func (b *Beanstalkd) gatherServerStats(connection *textproto.Conn, acc telegraf.Accumulator) error { + stats := new(statsResponse) + if err := runQuery(connection, "stats", stats); err != nil { + return err + } + + acc.AddFields("beanstalkd_overview", + map[string]interface{}{ + "binlog_current_index": stats.BinlogCurrentIndex, + "binlog_max_size": stats.BinlogMaxSize, + "binlog_oldest_index": stats.BinlogOldestIndex, + "binlog_records_migrated": stats.BinlogRecordsMigrated, + "binlog_records_written": stats.BinlogRecordsWritten, + "cmd_bury": stats.CmdBury, + "cmd_delete": stats.CmdDelete, + "cmd_ignore": stats.CmdIgnore, + "cmd_kick": stats.CmdKick, + "cmd_list_tube_used": stats.CmdListTubeUsed, + "cmd_list_tubes": stats.CmdListTubes, + "cmd_list_tubes_watched": stats.CmdListTubesWatched, + "cmd_pause_tube": stats.CmdPauseTube, + "cmd_peek": stats.CmdPeek, + "cmd_peek_buried": stats.CmdPeekBuried, + "cmd_peek_delayed": stats.CmdPeekDelayed, + "cmd_peek_ready": stats.CmdPeekReady, + "cmd_put": stats.CmdPut, + "cmd_release": stats.CmdRelease, + "cmd_reserve": stats.CmdReserve, + "cmd_reserve_with_timeout": stats.CmdReserveWithTimeout, + "cmd_stats": stats.CmdStats, + "cmd_stats_job": stats.CmdStatsJob, + "cmd_stats_tube": stats.CmdStatsTube, + "cmd_touch": stats.CmdTouch, + "cmd_use": stats.CmdUse, + "cmd_watch": stats.CmdWatch, + "current_connections": stats.CurrentConnections, + "current_jobs_buried": stats.CurrentJobsBuried, + "current_jobs_delayed": stats.CurrentJobsDelayed, + "current_jobs_ready": stats.CurrentJobsReady, + "current_jobs_reserved": stats.CurrentJobsReserved, + "current_jobs_urgent": stats.CurrentJobsUrgent, + "current_producers": stats.CurrentProducers, + "current_tubes": stats.CurrentTubes, + "current_waiting": stats.CurrentWaiting, + "current_workers": stats.CurrentWorkers, + "job_timeouts": stats.JobTimeouts, + "max_job_size": stats.MaxJobSize, + "pid": stats.Pid, + "rusage_stime": stats.RusageStime, + "rusage_utime": stats.RusageUtime, + "total_connections": stats.TotalConnections, + "total_jobs": stats.TotalJobs, + "uptime": stats.Uptime, + }, + map[string]string{ + "hostname": stats.Hostname, + "id": stats.Id, + "server": b.Server, + "version": stats.Version, + }, + ) + + return nil +} + +func (b *Beanstalkd) gatherTubeStats(connection *textproto.Conn, tube string, acc telegraf.Accumulator) error { + stats := new(statsTubeResponse) + if err := runQuery(connection, "stats-tube "+tube, stats); err != nil { + return err + } + + acc.AddFields("beanstalkd_tube", + map[string]interface{}{ + "cmd_delete": stats.CmdDelete, + "cmd_pause_tube": stats.CmdPauseTube, + "current_jobs_buried": stats.CurrentJobsBuried, + "current_jobs_delayed": stats.CurrentJobsDelayed, + "current_jobs_ready": stats.CurrentJobsReady, + "current_jobs_reserved": stats.CurrentJobsReserved, + "current_jobs_urgent": stats.CurrentJobsUrgent, + "current_using": stats.CurrentUsing, + "current_waiting": stats.CurrentWaiting, + "current_watching": stats.CurrentWatching, + "pause": stats.Pause, + "pause_time_left": stats.PauseTimeLeft, + "total_jobs": stats.TotalJobs, + }, + map[string]string{ + "name": stats.Name, + "server": b.Server, + }, + ) + + return nil +} + +func runQuery(connection *textproto.Conn, cmd string, result interface{}) error { + requestId, err := connection.Cmd(cmd) + if err != nil { + return err + } + + connection.StartResponse(requestId) + defer connection.EndResponse(requestId) + + status, err := connection.ReadLine() + if err != nil { + return err + } + + size := 0 + if _, err = fmt.Sscanf(status, "OK %d", &size); err != nil { + return err + } + + body := make([]byte, size+2) + if _, err = io.ReadFull(connection.R, body); err != nil { + return err + } + + return yaml.Unmarshal(body, result) +} + +func init() { + inputs.Add("beanstalkd", func() telegraf.Input { + return &Beanstalkd{} + }) +} + +type statsResponse struct { + BinlogCurrentIndex int `yaml:"binlog-current-index"` + BinlogMaxSize int `yaml:"binlog-max-size"` + BinlogOldestIndex int `yaml:"binlog-oldest-index"` + BinlogRecordsMigrated int `yaml:"binlog-records-migrated"` + BinlogRecordsWritten int `yaml:"binlog-records-written"` + CmdBury int `yaml:"cmd-bury"` + CmdDelete int `yaml:"cmd-delete"` + CmdIgnore int `yaml:"cmd-ignore"` + CmdKick int `yaml:"cmd-kick"` + CmdListTubeUsed int `yaml:"cmd-list-tube-used"` + CmdListTubes int `yaml:"cmd-list-tubes"` + CmdListTubesWatched int `yaml:"cmd-list-tubes-watched"` + CmdPauseTube int `yaml:"cmd-pause-tube"` + CmdPeek int `yaml:"cmd-peek"` + CmdPeekBuried int `yaml:"cmd-peek-buried"` + CmdPeekDelayed int `yaml:"cmd-peek-delayed"` + CmdPeekReady int `yaml:"cmd-peek-ready"` + CmdPut int `yaml:"cmd-put"` + CmdRelease int `yaml:"cmd-release"` + CmdReserve int `yaml:"cmd-reserve"` + CmdReserveWithTimeout int `yaml:"cmd-reserve-with-timeout"` + CmdStats int `yaml:"cmd-stats"` + CmdStatsJob int `yaml:"cmd-stats-job"` + CmdStatsTube int `yaml:"cmd-stats-tube"` + CmdTouch int `yaml:"cmd-touch"` + CmdUse int `yaml:"cmd-use"` + CmdWatch int `yaml:"cmd-watch"` + CurrentConnections int `yaml:"current-connections"` + CurrentJobsBuried int `yaml:"current-jobs-buried"` + CurrentJobsDelayed int `yaml:"current-jobs-delayed"` + CurrentJobsReady int `yaml:"current-jobs-ready"` + CurrentJobsReserved int `yaml:"current-jobs-reserved"` + CurrentJobsUrgent int `yaml:"current-jobs-urgent"` + CurrentProducers int `yaml:"current-producers"` + CurrentTubes int `yaml:"current-tubes"` + CurrentWaiting int `yaml:"current-waiting"` + CurrentWorkers int `yaml:"current-workers"` + Hostname string `yaml:"hostname"` + Id string `yaml:"id"` + JobTimeouts int `yaml:"job-timeouts"` + MaxJobSize int `yaml:"max-job-size"` + Pid int `yaml:"pid"` + RusageStime float64 `yaml:"rusage-stime"` + RusageUtime float64 `yaml:"rusage-utime"` + TotalConnections int `yaml:"total-connections"` + TotalJobs int `yaml:"total-jobs"` + Uptime int `yaml:"uptime"` + Version string `yaml:"version"` +} + +type statsTubeResponse struct { + CmdDelete int `yaml:"cmd-delete"` + CmdPauseTube int `yaml:"cmd-pause-tube"` + CurrentJobsBuried int `yaml:"current-jobs-buried"` + CurrentJobsDelayed int `yaml:"current-jobs-delayed"` + CurrentJobsReady int `yaml:"current-jobs-ready"` + CurrentJobsReserved int `yaml:"current-jobs-reserved"` + CurrentJobsUrgent int `yaml:"current-jobs-urgent"` + CurrentUsing int `yaml:"current-using"` + CurrentWaiting int `yaml:"current-waiting"` + CurrentWatching int `yaml:"current-watching"` + Name string `yaml:"name"` + Pause int `yaml:"pause"` + PauseTimeLeft int `yaml:"pause-time-left"` + TotalJobs int `yaml:"total-jobs"` +} diff --git a/plugins/inputs/beanstalkd/beanstalkd_test.go b/plugins/inputs/beanstalkd/beanstalkd_test.go new file mode 100644 index 000000000..92c108e06 --- /dev/null +++ b/plugins/inputs/beanstalkd/beanstalkd_test.go @@ -0,0 +1,332 @@ +package beanstalkd_test + +import ( + "io" + "net" + "net/textproto" + "testing" + + "github.com/influxdata/telegraf/plugins/inputs/beanstalkd" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +func TestBeanstalkd(t *testing.T) { + type tubeStats struct { + name string + fields map[string]interface{} + } + + tests := []struct { + name string + tubesConfig []string + expectedTubes []tubeStats + notExpectedTubes []tubeStats + }{ + { + name: "All tubes stats", + tubesConfig: []string{}, + expectedTubes: []tubeStats{ + {name: "default", fields: defaultTubeFields}, + {name: "test", fields: testTubeFields}, + }, + notExpectedTubes: []tubeStats{}, + }, + { + name: "Specified tubes stats", + tubesConfig: []string{"test"}, + expectedTubes: []tubeStats{ + {name: "test", fields: testTubeFields}, + }, + notExpectedTubes: []tubeStats{ + {name: "default", fields: defaultTubeFields}, + }, + }, + { + name: "Unknown tube stats", + tubesConfig: []string{"unknown"}, + expectedTubes: []tubeStats{}, + notExpectedTubes: []tubeStats{ + {name: "default", fields: defaultTubeFields}, + {name: "test", fields: testTubeFields}, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + server, err := startTestServer(t) + if err != nil { + t.Fatalf("Unable to create test server") + } + defer server.Close() + + serverAddress := server.Addr().String() + plugin := beanstalkd.Beanstalkd{ + Server: serverAddress, + Tubes: test.tubesConfig, + } + + var acc testutil.Accumulator + require.NoError(t, acc.GatherError(plugin.Gather)) + + acc.AssertContainsTaggedFields(t, "beanstalkd_overview", + overviewFields, + getOverviewTags(serverAddress), + ) + + for _, expectedTube := range test.expectedTubes { + acc.AssertContainsTaggedFields(t, "beanstalkd_tube", + expectedTube.fields, + getTubeTags(serverAddress, expectedTube.name), + ) + } + + for _, notExpectedTube := range test.notExpectedTubes { + acc.AssertDoesNotContainsTaggedFields(t, "beanstalkd_tube", + notExpectedTube.fields, + getTubeTags(serverAddress, notExpectedTube.name), + ) + } + }) + } +} + +func startTestServer(t *testing.T) (net.Listener, error) { + server, err := net.Listen("tcp", "localhost:0") + if err != nil { + return nil, err + } + + go func() { + defer server.Close() + + connection, err := server.Accept() + if err != nil { + t.Log("Test server: failed to accept connection. Error: ", err) + return + } + + tp := textproto.NewConn(connection) + defer tp.Close() + + sendSuccessResponse := func(body string) { + tp.PrintfLine("OK %d\r\n%s", len(body), body) + } + + for { + cmd, err := tp.ReadLine() + if err == io.EOF { + return + } else if err != nil { + t.Log("Test server: failed read command. Error: ", err) + return + } + + switch cmd { + case "list-tubes": + sendSuccessResponse(listTubesResponse) + case "stats": + sendSuccessResponse(statsResponse) + case "stats-tube default": + sendSuccessResponse(statsTubeDefaultResponse) + case "stats-tube test": + sendSuccessResponse(statsTubeTestResponse) + case "stats-tube unknown": + tp.PrintfLine("NOT_FOUND") + default: + t.Log("Test server: unknown command") + } + } + }() + + return server, nil +} + +const ( + listTubesResponse = `--- +- default +- test +` + statsResponse = `--- +current-jobs-urgent: 5 +current-jobs-ready: 5 +current-jobs-reserved: 0 +current-jobs-delayed: 1 +current-jobs-buried: 0 +cmd-put: 6 +cmd-peek: 0 +cmd-peek-ready: 1 +cmd-peek-delayed: 0 +cmd-peek-buried: 0 +cmd-reserve: 0 +cmd-reserve-with-timeout: 1 +cmd-delete: 1 +cmd-release: 0 +cmd-use: 2 +cmd-watch: 0 +cmd-ignore: 0 +cmd-bury: 1 +cmd-kick: 1 +cmd-touch: 0 +cmd-stats: 1 +cmd-stats-job: 0 +cmd-stats-tube: 2 +cmd-list-tubes: 1 +cmd-list-tube-used: 0 +cmd-list-tubes-watched: 0 +cmd-pause-tube: 0 +job-timeouts: 0 +total-jobs: 6 +max-job-size: 65535 +current-tubes: 2 +current-connections: 2 +current-producers: 1 +current-workers: 1 +current-waiting: 0 +total-connections: 2 +pid: 6 +version: 1.10 +rusage-utime: 0.000000 +rusage-stime: 0.000000 +uptime: 20 +binlog-oldest-index: 0 +binlog-current-index: 0 +binlog-records-migrated: 0 +binlog-records-written: 0 +binlog-max-size: 10485760 +id: bba7546657efdd4c +hostname: 2873efd3e88c +` + statsTubeDefaultResponse = `--- +name: default +current-jobs-urgent: 0 +current-jobs-ready: 0 +current-jobs-reserved: 0 +current-jobs-delayed: 0 +current-jobs-buried: 0 +total-jobs: 0 +current-using: 2 +current-watching: 2 +current-waiting: 0 +cmd-delete: 0 +cmd-pause-tube: 0 +pause: 0 +pause-time-left: 0 +` + statsTubeTestResponse = `--- +name: test +current-jobs-urgent: 5 +current-jobs-ready: 5 +current-jobs-reserved: 0 +current-jobs-delayed: 1 +current-jobs-buried: 0 +total-jobs: 6 +current-using: 0 +current-watching: 0 +current-waiting: 0 +cmd-delete: 0 +cmd-pause-tube: 0 +pause: 0 +pause-time-left: 0 +` +) + +var ( + // Default tube without stats + defaultTubeFields = map[string]interface{}{ + "cmd_delete": 0, + "cmd_pause_tube": 0, + "current_jobs_buried": 0, + "current_jobs_delayed": 0, + "current_jobs_ready": 0, + "current_jobs_reserved": 0, + "current_jobs_urgent": 0, + "current_using": 2, + "current_waiting": 0, + "current_watching": 2, + "pause": 0, + "pause_time_left": 0, + "total_jobs": 0, + } + // Test tube with stats + testTubeFields = map[string]interface{}{ + "cmd_delete": 0, + "cmd_pause_tube": 0, + "current_jobs_buried": 0, + "current_jobs_delayed": 1, + "current_jobs_ready": 5, + "current_jobs_reserved": 0, + "current_jobs_urgent": 5, + "current_using": 0, + "current_waiting": 0, + "current_watching": 0, + "pause": 0, + "pause_time_left": 0, + "total_jobs": 6, + } + // Server stats + overviewFields = map[string]interface{}{ + "binlog_current_index": 0, + "binlog_max_size": 10485760, + "binlog_oldest_index": 0, + "binlog_records_migrated": 0, + "binlog_records_written": 0, + "cmd_bury": 1, + "cmd_delete": 1, + "cmd_ignore": 0, + "cmd_kick": 1, + "cmd_list_tube_used": 0, + "cmd_list_tubes": 1, + "cmd_list_tubes_watched": 0, + "cmd_pause_tube": 0, + "cmd_peek": 0, + "cmd_peek_buried": 0, + "cmd_peek_delayed": 0, + "cmd_peek_ready": 1, + "cmd_put": 6, + "cmd_release": 0, + "cmd_reserve": 0, + "cmd_reserve_with_timeout": 1, + "cmd_stats": 1, + "cmd_stats_job": 0, + "cmd_stats_tube": 2, + "cmd_touch": 0, + "cmd_use": 2, + "cmd_watch": 0, + "current_connections": 2, + "current_jobs_buried": 0, + "current_jobs_delayed": 1, + "current_jobs_ready": 5, + "current_jobs_reserved": 0, + "current_jobs_urgent": 5, + "current_producers": 1, + "current_tubes": 2, + "current_waiting": 0, + "current_workers": 1, + "job_timeouts": 0, + "max_job_size": 65535, + "pid": 6, + "rusage_stime": 0.0, + "rusage_utime": 0.0, + "total_connections": 2, + "total_jobs": 6, + "uptime": 20, + } +) + +func getOverviewTags(server string) map[string]string { + return map[string]string{ + "hostname": "2873efd3e88c", + "id": "bba7546657efdd4c", + "server": server, + "version": "1.10", + } +} + +func getTubeTags(server string, tube string) map[string]string { + return map[string]string{ + "name": tube, + "server": server, + } +} From 25f9cc0b8dedac6a6acd89fdc4188c345536343d Mon Sep 17 00:00:00 2001 From: pytimer Date: Tue, 11 Sep 2018 02:52:15 +0800 Subject: [PATCH 0146/1815] Add temp input plugin (#4411) --- plugins/inputs/all/all.go | 1 + plugins/inputs/system/mock_PS.go | 10 +++++++ plugins/inputs/system/ps.go | 6 +++++ plugins/inputs/temp/README.md | 32 ++++++++++++++++++++++ plugins/inputs/temp/temp.go | 46 ++++++++++++++++++++++++++++++++ plugins/inputs/temp/temp_test.go | 38 ++++++++++++++++++++++++++ 6 files changed, 133 insertions(+) create mode 100644 plugins/inputs/temp/README.md create mode 100644 plugins/inputs/temp/temp.go create mode 100644 plugins/inputs/temp/temp_test.go diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index ef49f6538..9dcb0dbd3 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -120,6 +120,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/tail" _ "github.com/influxdata/telegraf/plugins/inputs/tcp_listener" _ "github.com/influxdata/telegraf/plugins/inputs/teamspeak" + _ "github.com/influxdata/telegraf/plugins/inputs/temp" _ "github.com/influxdata/telegraf/plugins/inputs/tengine" _ "github.com/influxdata/telegraf/plugins/inputs/tomcat" _ "github.com/influxdata/telegraf/plugins/inputs/trig" diff --git a/plugins/inputs/system/mock_PS.go b/plugins/inputs/system/mock_PS.go index 323332f3e..b3cf2c170 100644 --- a/plugins/inputs/system/mock_PS.go +++ b/plugins/inputs/system/mock_PS.go @@ -7,6 +7,7 @@ import ( "github.com/shirou/gopsutil/cpu" "github.com/shirou/gopsutil/disk" + "github.com/shirou/gopsutil/host" "github.com/shirou/gopsutil/load" "github.com/shirou/gopsutil/mem" @@ -100,6 +101,15 @@ func (m *MockPS) SwapStat() (*mem.SwapMemoryStat, error) { return r0, r1 } +func (m *MockPS) Temperature() ([]host.TemperatureStat, error) { + ret := m.Called() + + r0 := ret.Get(0).([]host.TemperatureStat) + r1 := ret.Error(1) + + return r0, r1 +} + func (m *MockPS) NetConnections() ([]net.ConnectionStat, error) { ret := m.Called() diff --git a/plugins/inputs/system/ps.go b/plugins/inputs/system/ps.go index 038e2a0a8..256aca059 100644 --- a/plugins/inputs/system/ps.go +++ b/plugins/inputs/system/ps.go @@ -10,6 +10,7 @@ import ( "github.com/shirou/gopsutil/cpu" "github.com/shirou/gopsutil/disk" + "github.com/shirou/gopsutil/host" "github.com/shirou/gopsutil/mem" "github.com/shirou/gopsutil/net" ) @@ -23,6 +24,7 @@ type PS interface { VMStat() (*mem.VirtualMemoryStat, error) SwapStat() (*mem.SwapMemoryStat, error) NetConnections() ([]net.ConnectionStat, error) + Temperature() ([]host.TemperatureStat, error) } type PSDiskDeps interface { @@ -168,6 +170,10 @@ func (s *SystemPS) SwapStat() (*mem.SwapMemoryStat, error) { return mem.SwapMemory() } +func (s *SystemPS) Temperature() ([]host.TemperatureStat, error) { + return host.SensorsTemperatures() +} + func (s *SystemPSDisk) Partitions(all bool) ([]disk.PartitionStat, error) { return disk.Partitions(all) } diff --git a/plugins/inputs/temp/README.md b/plugins/inputs/temp/README.md new file mode 100644 index 000000000..87f365ca0 --- /dev/null +++ b/plugins/inputs/temp/README.md @@ -0,0 +1,32 @@ +# Temp Input plugin + +This input plugin collect temperature. + +### Configuration: + +``` +[[inputs.temp]] +``` + +### Measurements & Fields: + +All fields are float64. + +- temp ( unit: °Celsius) + +### Tags: + +- All measurements have the following tags: + - host + - sensor + +### Example Output: + +``` +$ ./telegraf --config telegraf.conf --input-filter temp --test +* Plugin: temp, Collection 1 +> temp,host=localhost,sensor=coretemp_physicalid0_crit temp=100 1531298763000000000 +> temp,host=localhost,sensor=coretemp_physicalid0_critalarm temp=0 1531298763000000000 +> temp,host=localhost,sensor=coretemp_physicalid0_input temp=100 1531298763000000000 +> temp,host=localhost,sensor=coretemp_physicalid0_max temp=100 1531298763000000000 +``` diff --git a/plugins/inputs/temp/temp.go b/plugins/inputs/temp/temp.go new file mode 100644 index 000000000..10e61673d --- /dev/null +++ b/plugins/inputs/temp/temp.go @@ -0,0 +1,46 @@ +package temp + +import ( + "fmt" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/inputs/system" +) + +type Temperature struct { + ps system.PS +} + +func (t *Temperature) Description() string { + return "Read metrics about temperature" +} + +const sampleConfig = "" + +func (t *Temperature) SampleConfig() string { + return sampleConfig +} + +func (t *Temperature) Gather(acc telegraf.Accumulator) error { + temps, err := t.ps.Temperature() + if err != nil { + return fmt.Errorf("error getting temperatures info: %s", err) + } + for _, temp := range temps { + tags := map[string]string{ + "sensor": temp.SensorKey, + } + fields := map[string]interface{}{ + "temp": temp.Temperature, + } + acc.AddFields("temp", fields, tags) + } + return nil +} + +func init() { + inputs.Add("temp", func() telegraf.Input { + return &Temperature{ps: system.NewSystemPS()} + }) +} diff --git a/plugins/inputs/temp/temp_test.go b/plugins/inputs/temp/temp_test.go new file mode 100644 index 000000000..080ff66ac --- /dev/null +++ b/plugins/inputs/temp/temp_test.go @@ -0,0 +1,38 @@ +package temp + +import ( + "testing" + + "github.com/shirou/gopsutil/host" + "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/plugins/inputs/system" + "github.com/influxdata/telegraf/testutil" +) + +func TestTemperature(t *testing.T) { + var mps system.MockPS + var err error + defer mps.AssertExpectations(t) + var acc testutil.Accumulator + + ts := host.TemperatureStat{ + SensorKey: "coretemp_sensor1_crit", + Temperature: 60.5, + } + + mps.On("Temperature").Return([]host.TemperatureStat{ts}, nil) + + err = (&Temperature{ps: &mps}).Gather(&acc) + require.NoError(t, err) + + expectedFields := map[string]interface{}{ + "temp": float64(60.5), + } + + expectedTags := map[string]string{ + "sensor": "coretemp_sensor1_crit", + } + acc.AssertContainsTaggedFields(t, "temp", expectedFields, expectedTags) + +} From 4ef058c1202e7099d88eb16cb0587c0c7f04f7d5 Mon Sep 17 00:00:00 2001 From: Greg Date: Mon, 10 Sep 2018 12:53:04 -0600 Subject: [PATCH 0147/1815] Document all supported cli arguments (#4655) --- cmd/telegraf/telegraf.go | 4 ++-- internal/usage.go | 25 ++++++++++++++++--------- internal/usage_windows.go | 29 ++++++++++++++++++----------- 3 files changed, 36 insertions(+), 22 deletions(-) diff --git a/cmd/telegraf/telegraf.go b/cmd/telegraf/telegraf.go index e8ac7e660..5b7295d6d 100644 --- a/cmd/telegraf/telegraf.go +++ b/cmd/telegraf/telegraf.go @@ -35,7 +35,7 @@ var fTest = flag.Bool("test", false, "gather metrics, print them out, and exit") var fConfig = flag.String("config", "", "configuration file to load") var fConfigDirectory = flag.String("config-directory", "", "directory containing additional *.conf files") -var fVersion = flag.Bool("version", false, "display the version") +var fVersion = flag.Bool("version", false, "display the version and exit") var fSampleConfig = flag.Bool("sample-config", false, "print out full sample configuration") var fPidfile = flag.String("pidfile", "", "file to write our pid to") @@ -54,7 +54,7 @@ var fProcessorFilters = flag.String("processor-filter", "", var fUsage = flag.String("usage", "", "print usage for a plugin, ie, 'telegraf --usage mysql'") var fService = flag.String("service", "", - "operate on the service") + "operate on the service (windows only)") var fRunAsConsole = flag.Bool("console", false, "run as console application (windows only)") var ( diff --git a/internal/usage.go b/internal/usage.go index 4c00d8f83..99db5aebb 100644 --- a/internal/usage.go +++ b/internal/usage.go @@ -13,15 +13,22 @@ The commands & flags are: config print out full sample configuration to stdout version print the version to stdout - --config configuration file to load - --test gather metrics once, print them to stdout, and exit - --config-directory directory containing additional *.conf files - --input-filter filter the input plugins to enable, separator is : - --output-filter filter the output plugins to enable, separator is : - --usage print usage for a plugin, ie, 'telegraf --usage mysql' - --debug print metrics as they're generated to stdout - --pprof-addr pprof address to listen on, format: localhost:6060 or :6060 - --quiet run in quiet mode + --aggregator-filter filter the aggregators to enable, separator is : + --config configuration file to load + --config-directory directory containing additional *.conf files + --debug turn on debug logging + --input-filter filter the inputs to enable, separator is : + --input-list print available input plugins. + --output-filter filter the outputs to enable, separator is : + --output-list print available output plugins. + --pidfile file to write our pid to + --pprof-addr
pprof address to listen on, don't activate pprof if empty + --processor-filter filter the processors to enable, separator is : + --quiet run in quiet mode + --sample-config print out full sample configuration + --test gather metrics, print them out, and exit + --usage print usage for a plugin, ie, 'telegraf --usage mysql' + --version display the version and exit Examples: diff --git a/internal/usage_windows.go b/internal/usage_windows.go index 109d309ed..585c2996f 100644 --- a/internal/usage_windows.go +++ b/internal/usage_windows.go @@ -13,18 +13,25 @@ The commands & flags are: config print out full sample configuration to stdout version print the version to stdout - --config configuration file to load - --test gather metrics once, print them to stdout, and exit - --config-directory directory containing additional *.conf files - --input-filter filter the input plugins to enable, separator is : - --output-filter filter the output plugins to enable, separator is : - --usage print usage for a plugin, ie, 'telegraf --usage mysql' - --debug print metrics as they're generated to stdout - --pprof-addr pprof address to listen on, format: localhost:6060 or :6060 - --quiet run in quiet mode + --aggregator-filter filter the aggregators to enable, separator is : + --config configuration file to load + --config-directory directory containing additional *.conf files + --debug turn on debug logging + --input-filter filter the inputs to enable, separator is : + --input-list print available input plugins. + --output-filter filter the outputs to enable, separator is : + --output-list print available output plugins. + --pidfile file to write our pid to + --pprof-addr
pprof address to listen on, don't activate pprof if empty + --processor-filter filter the processors to enable, separator is : + --quiet run in quiet mode + --sample-config print out full sample configuration + --test gather metrics, print them out, and exit + --usage print usage for a plugin, ie, 'telegraf --usage mysql' + --version display the version and exit - --console run as console application - --service operate on service, one of: install, uninstall, start, stop + --console run as console application (windows only) + --service operate on the service (windows only) Examples: From 1ca17652cda94b731473e52315a311653f3a200b Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 10 Sep 2018 11:55:08 -0700 Subject: [PATCH 0148/1815] Fix parsing and documentation for json_string_fields (#4656) --- docs/DATA_FORMATS_INPUT.md | 10 +++++----- internal/config/config.go | 6 +++--- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/docs/DATA_FORMATS_INPUT.md b/docs/DATA_FORMATS_INPUT.md index 235e3b308..e0358e95c 100644 --- a/docs/DATA_FORMATS_INPUT.md +++ b/docs/DATA_FORMATS_INPUT.md @@ -108,11 +108,11 @@ but can be overridden using the `name_override` config option. #### JSON Configuration: -The JSON data format supports specifying "tag_keys", "string_keys", and "json_query". -If specified, keys in "tag_keys" and "string_keys" will be searched for in the root-level +The JSON data format supports specifying "tag_keys", "json_string_fields", and "json_query". +If specified, keys in "tag_keys" and "json_string_fields" will be searched for in the root-level and any nested lists of the JSON blob. All int and float values are added to fields by default. If the key(s) exist, they will be applied as tags or fields to the Telegraf metrics. -If "string_keys" is specified, the string will be added as a field. +If "json_string_fields" is specified, the string will be added as a field. The "json_query" configuration is a gjson path to an JSON object or list of JSON objects. If this path leads to an array of values or @@ -225,7 +225,7 @@ For example, if the following configuration: ] ## List of field names to extract from JSON and add as string fields - # string_fields = [] + # json_string_fields = [] ## gjson query path to specify a specific chunk of JSON to be parsed with ## the above configuration. If not specified, the whole file will be parsed @@ -293,7 +293,7 @@ For example, with the following config: tag_keys = ["first"] ## List of field names to extract from JSON and add as string fields - string_fields = ["last"] + json_string_fields = ["last"] ## gjson query path to specify a specific chunk of JSON to be parsed with ## the above configuration. If not specified, the whole file will be parsed diff --git a/internal/config/config.go b/internal/config/config.go index c712af85e..e10a5fc4c 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -1563,11 +1563,11 @@ func buildParser(name string, tbl *ast.Table) (parsers.Parser, error) { delete(tbl.Fields, "separator") delete(tbl.Fields, "templates") delete(tbl.Fields, "tag_keys") - delete(tbl.Fields, "string_fields") - delete(tbl.Fields, "json_query") delete(tbl.Fields, "json_name_key") - delete(tbl.Fields, "json_time_key") + delete(tbl.Fields, "json_query") + delete(tbl.Fields, "json_string_fields") delete(tbl.Fields, "json_time_format") + delete(tbl.Fields, "json_time_key") delete(tbl.Fields, "data_type") delete(tbl.Fields, "collectd_auth_file") delete(tbl.Fields, "collectd_security_level") From eb36e8f4964b68859f420503232bcab4f65eb76f Mon Sep 17 00:00:00 2001 From: Greg Date: Mon, 10 Sep 2018 12:56:42 -0600 Subject: [PATCH 0149/1815] Add options for basic auth to haproxy input (#4657) --- plugins/inputs/haproxy/haproxy.go | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/plugins/inputs/haproxy/haproxy.go b/plugins/inputs/haproxy/haproxy.go index 19087a978..9c22acad9 100644 --- a/plugins/inputs/haproxy/haproxy.go +++ b/plugins/inputs/haproxy/haproxy.go @@ -23,6 +23,8 @@ import ( type haproxy struct { Servers []string KeepFieldNames bool + Username string + Password string tls.ClientConfig client *http.Client @@ -37,6 +39,10 @@ var sampleConfig = ` ## If no servers are specified, then default to 127.0.0.1:1936/haproxy?stats servers = ["http://myhaproxy.com:1936/haproxy?stats"] + ## Credentials for basic HTTP authentication + # username = "admin" + # password = "admin" + ## You can also use local socket with standard wildcard globbing. ## Server address not starting with 'http' will be treated as a possible ## socket, so both examples below are valid. @@ -163,6 +169,12 @@ func (g *haproxy) gatherServer(addr string, acc telegraf.Accumulator) error { if u.User != nil { p, _ := u.User.Password() req.SetBasicAuth(u.User.Username(), p) + u.User = &url.Userinfo{} + addr = u.String() + } + + if g.Username != "" || g.Password != "" { + req.SetBasicAuth(g.Username, g.Password) } res, err := g.client.Do(req) From 3618f5dc984e65e79e14851fdc17620377159aa4 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 10 Sep 2018 12:34:05 -0700 Subject: [PATCH 0150/1815] Update changelog --- CHANGELOG.md | 6 ++++++ README.md | 2 ++ 2 files changed, 8 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 247ca4a58..d30531eb1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,11 +3,13 @@ ### New Inputs - [activemq](./plugins/inputs/activemq/README.md) - Contributed by @mlabouardy +- [beanstalkd](./plugins/inputs/beanstalkd/README.md) - Contributed by @44px - [filecount](./plugins/inputs/filecount/README.md) - Contributed by @sometimesfood - [file](./plugins/inputs/file/README.md) - Contributed by @maxunt - [icinga2](./plugins/inputs/icinga2/README.md) - Contributed by @mlabouardy - [kibana](./plugins/inputs/icinga2/README.md) - Contributed by @lpic10 - [pgbouncer](./plugins/inputs/pgbouncer/README.md) - Contributed by @nerzhul +- [temp](./plugins/inputs/temp/README.md) - Contributed by @pytimer - [tengine](./plugins/inputs/tengine/README.md) - Contributed by @ertaoxu - [x509_cert](./plugins/inputs/x509_cert/README.md) - Contributed by @jtyr @@ -83,12 +85,16 @@ - [#4476](https://github.com/influxdata/telegraf/pull/4476): Add strings processor. - [#4536](https://github.com/influxdata/telegraf/pull/4536): Add OAuth2 support to HTTP output plugin. - [#4633](https://github.com/influxdata/telegraf/pull/4633): Add Unix epoch timestamp support for JSON parser. +- [#4657](https://github.com/influxdata/telegraf/pull/4657): Add options for basic auth to haproxy input. +- [#4411](https://github.com/influxdata/telegraf/pull/4411): Add temp input plugin. +- [#4272](https://github.com/influxdata/telegraf/pull/4272): Add Beanstalkd input plugin. ### Bugfixes - [#3438](https://github.com/influxdata/telegraf/issues/3438): Fix divide by zero in logparser input. - [#4499](https://github.com/influxdata/telegraf/issues/4499): Fix instance and object name in performance counters with backslashes. - [#4646](https://github.com/influxdata/telegraf/issues/4646): Reset/flush saved contents from bad metric. +- [#4520](https://github.com/influxdata/telegraf/issues/4520): Document all supported cli arguments. ## v1.7.4 [2018-08-29] diff --git a/README.md b/README.md index 00dc8ecf6..7ac997ff6 100644 --- a/README.md +++ b/README.md @@ -134,6 +134,7 @@ configuration options. * [aurora](./plugins/inputs/aurora) * [aws cloudwatch](./plugins/inputs/cloudwatch) * [bcache](./plugins/inputs/bcache) +* [beanstalkd](./plugins/inputs/beanstalkd) * [bond](./plugins/inputs/bond) * [burrow](./plugins/inputs/burrow) * [cassandra](./plugins/inputs/cassandra) (deprecated, use [jolokia2](./plugins/inputs/jolokia2)) @@ -242,6 +243,7 @@ configuration options. * [sysstat](./plugins/inputs/sysstat) * [system](./plugins/inputs/system) * [tail](./plugins/inputs/tail) +* [temp](./plugins/inputs/temp) * [tcp_listener](./plugins/inputs/socket_listener) * [teamspeak](./plugins/inputs/teamspeak) * [tengine](./plugins/inputs/tengine) From 23a8498963f7c06afb2ed6fd9389e882d70ee8a3 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 10 Sep 2018 15:14:14 -0700 Subject: [PATCH 0151/1815] Fix locking if output is an AggregatingOutput --- internal/models/running_output.go | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/internal/models/running_output.go b/internal/models/running_output.go index c926917d6..014202454 100644 --- a/internal/models/running_output.go +++ b/internal/models/running_output.go @@ -94,6 +94,9 @@ func NewRunningOutput( // AddMetric adds a metric to the output. This function can also write cached // points if FlushBufferWhenFull is true. func (ro *RunningOutput) AddMetric(m telegraf.Metric) { + ro.Lock() + defer ro.Unlock() + if m == nil { return } @@ -115,8 +118,6 @@ func (ro *RunningOutput) AddMetric(m telegraf.Metric) { } if output, ok := ro.Output.(telegraf.AggregatingOutput); ok { - ro.Lock() - defer ro.Unlock() output.Add(m) return } @@ -134,6 +135,9 @@ func (ro *RunningOutput) AddMetric(m telegraf.Metric) { // Write writes all cached points to this output. func (ro *RunningOutput) Write() error { + ro.Lock() + defer ro.Unlock() + if output, ok := ro.Output.(telegraf.AggregatingOutput); ok { metrics := output.Push() ro.metrics.Add(metrics...) @@ -188,8 +192,6 @@ func (ro *RunningOutput) write(metrics []telegraf.Metric) error { if nMetrics == 0 { return nil } - ro.Lock() - defer ro.Unlock() start := time.Now() err := ro.Output.Write(metrics) elapsed := time.Since(start) From ed28cfb9f6baddbeaa53a52dce75be099fb6e1ef Mon Sep 17 00:00:00 2001 From: Greg Date: Mon, 10 Sep 2018 17:45:36 -0600 Subject: [PATCH 0152/1815] Add means to specify server password for redis input (#4669) --- plugins/inputs/redis/redis.go | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/plugins/inputs/redis/redis.go b/plugins/inputs/redis/redis.go index 766463cfd..cd438397c 100644 --- a/plugins/inputs/redis/redis.go +++ b/plugins/inputs/redis/redis.go @@ -18,7 +18,8 @@ import ( ) type Redis struct { - Servers []string + Servers []string + Password string tls.ClientConfig clients []Client @@ -59,6 +60,9 @@ var sampleConfig = ` ## If no port is specified, 6379 is used servers = ["tcp://localhost:6379"] + ## specify server password + # password = "s#cr@t%" + ## Optional TLS Config # tls_ca = "/etc/telegraf/ca.pem" # tls_cert = "/etc/telegraf/cert.pem" @@ -110,6 +114,9 @@ func (r *Redis) init(acc telegraf.Accumulator) error { password = pw } } + if len(r.Password) > 0 { + password = r.Password + } var address string if u.Scheme == "unix" { From c0485a50eedfc7c5b6af94bdd4a83c48fc5697c0 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 10 Sep 2018 16:46:39 -0700 Subject: [PATCH 0153/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index d30531eb1..a49c22db7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -88,6 +88,7 @@ - [#4657](https://github.com/influxdata/telegraf/pull/4657): Add options for basic auth to haproxy input. - [#4411](https://github.com/influxdata/telegraf/pull/4411): Add temp input plugin. - [#4272](https://github.com/influxdata/telegraf/pull/4272): Add Beanstalkd input plugin. +- [#4669](https://github.com/influxdata/telegraf/pull/4669): Add means to specify server password for redis input. ### Bugfixes From e85a9e09565949c204ef522b4dc3f9a27bfc7a6d Mon Sep 17 00:00:00 2001 From: Jesse Date: Tue, 11 Sep 2018 12:32:12 +1000 Subject: [PATCH 0154/1815] Add Zookeeper Jolokia2 example config (#4659) --- plugins/inputs/jolokia2/README.md | 1 + .../inputs/jolokia2/examples/zookeeper.conf | 18 ++++++++++++++++++ 2 files changed, 19 insertions(+) create mode 100644 plugins/inputs/jolokia2/examples/zookeeper.conf diff --git a/plugins/inputs/jolokia2/README.md b/plugins/inputs/jolokia2/README.md index 1efc59f1f..190e6627d 100644 --- a/plugins/inputs/jolokia2/README.md +++ b/plugins/inputs/jolokia2/README.md @@ -181,5 +181,6 @@ Both `jolokia2_agent` and `jolokia2_proxy` plugins support default configuration - [Kafka](/plugins/inputs/jolokia2/examples/kafka.conf) - [Tomcat](/plugins/inputs/jolokia2/examples/tomcat.conf) - [Weblogic](/plugins/inputs/jolokia2/examples/weblogic.conf) +- [ZooKeeper](/plugins/inputs/jolokia2/examples/zookeeper.conf) Please help improve this list and contribute new configuration files by opening an issue or pull request. diff --git a/plugins/inputs/jolokia2/examples/zookeeper.conf b/plugins/inputs/jolokia2/examples/zookeeper.conf new file mode 100644 index 000000000..eac29c284 --- /dev/null +++ b/plugins/inputs/jolokia2/examples/zookeeper.conf @@ -0,0 +1,18 @@ +[[inputs.jolokia2_agent]] + urls = ["http://localhost:8080/jolokia"] + name_prefix = "zk_" + + [[inputs.jolokia2_agent.metrics]] + name = "quorum" + mbean = "org.apache.ZooKeeperService:name0=*" + tag_keys = ["name0"] + + [[inputs.jolokia2_agent.metrics]] + name = "leader" + mbean = "org.apache.ZooKeeperService:name0=*,name1=*,name2=Leader" + tag_keys = ["name1"] + + [[inputs.jolokia2_agent.metrics]] + name = "follower" + mbean = "org.apache.ZooKeeperService:name0=*,name1=*,name2=Follower" + tag_keys = ["name1"] From c80aab0445446ea9b7791e15abab8b6c863e4c1d Mon Sep 17 00:00:00 2001 From: Lance O'Connor Date: Tue, 11 Sep 2018 13:01:08 -0700 Subject: [PATCH 0155/1815] Add Splunk Metrics serializer (#4339) --- docs/DATA_FORMATS_OUTPUT.md | 1 + internal/config/config.go | 13 ++ plugins/serializers/registry.go | 10 + plugins/serializers/splunkmetric/README.md | 139 +++++++++++++ .../serializers/splunkmetric/splunkmetric.go | 126 ++++++++++++ .../splunkmetric/splunkmetric_test.go | 182 ++++++++++++++++++ 6 files changed, 471 insertions(+) create mode 100644 plugins/serializers/splunkmetric/README.md create mode 100644 plugins/serializers/splunkmetric/splunkmetric.go create mode 100644 plugins/serializers/splunkmetric/splunkmetric_test.go diff --git a/docs/DATA_FORMATS_OUTPUT.md b/docs/DATA_FORMATS_OUTPUT.md index f4e41c254..609021656 100644 --- a/docs/DATA_FORMATS_OUTPUT.md +++ b/docs/DATA_FORMATS_OUTPUT.md @@ -7,6 +7,7 @@ plugins. 1. [InfluxDB Line Protocol](#influx) 1. [JSON](#json) 1. [Graphite](#graphite) +1. [SplunkMetric](../plugins/serializers/splunkmetric/README.md) You will be able to identify the plugins with support by the presence of a `data_format` config option, for example, in the `file` output plugin: diff --git a/internal/config/config.go b/internal/config/config.go index e10a5fc4c..2208268d2 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -1693,6 +1693,18 @@ func buildSerializer(name string, tbl *ast.Table) (serializers.Serializer, error } } + if node, ok := tbl.Fields["splunkmetric_hec_routing"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if b, ok := kv.Value.(*ast.Boolean); ok { + var err error + c.HecRouting, err = b.Boolean() + if err != nil { + return nil, err + } + } + } + } + delete(tbl.Fields, "influx_max_line_bytes") delete(tbl.Fields, "influx_sort_fields") delete(tbl.Fields, "influx_uint_support") @@ -1701,6 +1713,7 @@ func buildSerializer(name string, tbl *ast.Table) (serializers.Serializer, error delete(tbl.Fields, "prefix") delete(tbl.Fields, "template") delete(tbl.Fields, "json_timestamp_units") + delete(tbl.Fields, "splunkmetric_hec_routing") return serializers.NewSerializer(c) } diff --git a/plugins/serializers/registry.go b/plugins/serializers/registry.go index 277d33206..b8a0aef07 100644 --- a/plugins/serializers/registry.go +++ b/plugins/serializers/registry.go @@ -9,6 +9,7 @@ import ( "github.com/influxdata/telegraf/plugins/serializers/graphite" "github.com/influxdata/telegraf/plugins/serializers/influx" "github.com/influxdata/telegraf/plugins/serializers/json" + "github.com/influxdata/telegraf/plugins/serializers/splunkmetric" ) // SerializerOutput is an interface for output plugins that are able to @@ -60,6 +61,9 @@ type Config struct { // Timestamp units to use for JSON formatted output TimestampUnits time.Duration + + // Include HEC routing fields for splunkmetric output + HecRouting bool } // NewSerializer a Serializer interface based on the given config. @@ -73,6 +77,8 @@ func NewSerializer(config *Config) (Serializer, error) { serializer, err = NewGraphiteSerializer(config.Prefix, config.Template, config.GraphiteTagSupport) case "json": serializer, err = NewJsonSerializer(config.TimestampUnits) + case "splunkmetric": + serializer, err = NewSplunkmetricSerializer(config.HecRouting) default: err = fmt.Errorf("Invalid data format: %s", config.DataFormat) } @@ -83,6 +89,10 @@ func NewJsonSerializer(timestampUnits time.Duration) (Serializer, error) { return json.NewSerializer(timestampUnits) } +func NewSplunkmetricSerializer(splunkmetric_hec_routing bool) (Serializer, error) { + return splunkmetric.NewSerializer(splunkmetric_hec_routing) +} + func NewInfluxSerializerConfig(config *Config) (Serializer, error) { var sort influx.FieldSortOrder if config.InfluxSortFields { diff --git a/plugins/serializers/splunkmetric/README.md b/plugins/serializers/splunkmetric/README.md new file mode 100644 index 000000000..02d69db66 --- /dev/null +++ b/plugins/serializers/splunkmetric/README.md @@ -0,0 +1,139 @@ +# Splunk Metrics serializer + +This serializer formats and outputs the metric data in a format that can be consumed by a Splunk metrics index. +It can be used to write to a file using the file output, or for sending metrics to a HEC using the standard telegraf HTTP output. + +If you're using the HTTP output, this serializer knows how to batch the metrics so you don't end up with an HTTP POST per metric. + +Th data is output in a format that conforms to the specified Splunk HEC JSON format as found here: +[Send metrics in JSON format](http://dev.splunk.com/view/event-collector/SP-CAAAFDN). + +An example event looks like: +```javascript +{ + "time": 1529708430, + "event": "metric", + "host": "patas-mbp", + "fields": { + "_value": 0.6, + "cpu": "cpu0", + "dc": "mobile", + "metric_name": "cpu.usage_user", + "user": "ronnocol" + } +} +``` +In the above snippet, the following keys are dimensions: +* cpu +* dc +* user + +## Using with the HTTP output + +To send this data to a Splunk HEC, you can use the HTTP output, there are some custom headers that you need to add +to manage the HEC authorization, here's a sample config for an HTTP output: + +```toml +[[outputs.http]] + ## URL is the address to send metrics to + url = "https://localhost:8088/services/collector" + + ## Timeout for HTTP message + # timeout = "5s" + + ## HTTP method, one of: "POST" or "PUT" + # method = "POST" + + ## HTTP Basic Auth credentials + # username = "username" + # password = "pa$$word" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false + + ## Data format to output. + ## Each data format has it's own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md + data_format = "splunkmetric" + ## Provides time, index, source overrides for the HEC + splunkmetric_hec_routing = true + + ## Additional HTTP headers + [outputs.http.headers] + # Should be set manually to "application/json" for json data_format + Content-Type = "application/json" + Authorization = "Splunk xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + X-Splunk-Request-Channel = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" +``` + +## Overrides +You can override the default values for the HEC token you are using by adding additional tags to the config file. + +The following aspects of the token can be overriden with tags: +* index +* source + +You can either use `[global_tags]` or using a more advanced configuration as documented [here](https://github.com/influxdata/telegraf/blob/master/docs/CONFIGURATION.md). + +Such as this example which overrides the index just on the cpu metric: +```toml +[[inputs.cpu]] + percpu = false + totalcpu = true + [inputs.cpu.tags] + index = "cpu_metrics" +``` + +## Using with the File output + +You can use the file output when running telegraf on a machine with a Splunk forwarder. + +A sample event when `hec_routing` is false (or unset) looks like: +```javascript +{ + "_value": 0.6, + "cpu": "cpu0", + "dc": "mobile", + "metric_name": "cpu.usage_user", + "user": "ronnocol", + "time": 1529708430 +} +``` +Data formatted in this manner can be ingested with a simple `props.conf` file that +looks like this: + +```ini +[telegraf] +category = Metrics +description = Telegraf Metrics +pulldown_type = 1 +DATETIME_CONFIG = +NO_BINARY_CHECK = true +SHOULD_LINEMERGE = true +disabled = false +INDEXED_EXTRACTIONS = json +KV_MODE = none +TIMESTAMP_FIELDS = time +TIME_FORMAT = %s.%3N +``` + +An example configuration of a file based output is: + +```toml + # Send telegraf metrics to file(s) +[[outputs.file]] + ## Files to write to, "stdout" is a specially handled file. + files = ["/tmp/metrics.out"] + + ## Data format to output. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md + data_format = "splunkmetric" + hec_routing = false +``` diff --git a/plugins/serializers/splunkmetric/splunkmetric.go b/plugins/serializers/splunkmetric/splunkmetric.go new file mode 100644 index 000000000..77de49ee0 --- /dev/null +++ b/plugins/serializers/splunkmetric/splunkmetric.go @@ -0,0 +1,126 @@ +package splunkmetric + +import ( + "encoding/json" + "fmt" + "log" + + "github.com/influxdata/telegraf" +) + +type serializer struct { + HecRouting bool +} + +func NewSerializer(splunkmetric_hec_routing bool) (*serializer, error) { + s := &serializer{ + HecRouting: splunkmetric_hec_routing, + } + return s, nil +} + +func (s *serializer) Serialize(metric telegraf.Metric) ([]byte, error) { + + m, err := s.createObject(metric) + if err != nil { + return nil, fmt.Errorf("D! [serializer.splunkmetric] Dropping invalid metric: %s", metric.Name()) + } + + return m, nil +} + +func (s *serializer) SerializeBatch(metrics []telegraf.Metric) ([]byte, error) { + + var serialized []byte + + for _, metric := range metrics { + m, err := s.createObject(metric) + if err != nil { + return nil, fmt.Errorf("D! [serializer.splunkmetric] Dropping invalid metric: %s", metric.Name()) + } else if m != nil { + serialized = append(serialized, m...) + } + } + + return serialized, nil +} + +func (s *serializer) createObject(metric telegraf.Metric) (metricGroup []byte, err error) { + + /* Splunk supports one metric json object, and does _not_ support an array of JSON objects. + ** Splunk has the following required names for the metric store: + ** metric_name: The name of the metric + ** _value: The value for the metric + ** time: The timestamp for the metric + ** All other index fields become deminsions. + */ + type HECTimeSeries struct { + Time float64 `json:"time"` + Event string `json:"event"` + Host string `json:"host,omitempty"` + Index string `json:"index,omitempty"` + Source string `json:"source,omitempty"` + Fields map[string]interface{} `json:"fields"` + } + + dataGroup := HECTimeSeries{} + var metricJson []byte + + for _, field := range metric.FieldList() { + + if !verifyValue(field.Value) { + log.Printf("D! Can not parse value: %v for key: %v", field.Value, field.Key) + continue + } + + obj := map[string]interface{}{} + obj["metric_name"] = metric.Name() + "." + field.Key + obj["_value"] = field.Value + + dataGroup.Event = "metric" + // Convert ns to float seconds since epoch. + dataGroup.Time = float64(metric.Time().UnixNano()) / float64(1000000000) + dataGroup.Fields = obj + + // Break tags out into key(n)=value(t) pairs + for n, t := range metric.Tags() { + if n == "host" { + dataGroup.Host = t + } else if n == "index" { + dataGroup.Index = t + } else if n == "source" { + dataGroup.Source = t + } else { + dataGroup.Fields[n] = t + } + } + dataGroup.Fields["metric_name"] = metric.Name() + "." + field.Key + dataGroup.Fields["_value"] = field.Value + + switch s.HecRouting { + case true: + // Output the data as a fields array and host,index,time,source overrides for the HEC. + metricJson, err = json.Marshal(dataGroup) + default: + // Just output the data and the time, useful for file based outuputs + dataGroup.Fields["time"] = dataGroup.Time + metricJson, err = json.Marshal(dataGroup.Fields) + } + + metricGroup = append(metricGroup, metricJson...) + + if err != nil { + return nil, err + } + } + + return metricGroup, nil +} + +func verifyValue(v interface{}) bool { + switch v.(type) { + case string: + return false + } + return true +} diff --git a/plugins/serializers/splunkmetric/splunkmetric_test.go b/plugins/serializers/splunkmetric/splunkmetric_test.go new file mode 100644 index 000000000..f3825d803 --- /dev/null +++ b/plugins/serializers/splunkmetric/splunkmetric_test.go @@ -0,0 +1,182 @@ +package splunkmetric + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" +) + +func MustMetric(v telegraf.Metric, err error) telegraf.Metric { + if err != nil { + panic(err) + } + return v +} + +func TestSerializeMetricFloat(t *testing.T) { + // Test sub-second time + now := time.Unix(1529875740, 819000000) + tags := map[string]string{ + "cpu": "cpu0", + } + fields := map[string]interface{}{ + "usage_idle": float64(91.5), + } + m, err := metric.New("cpu", tags, fields, now) + assert.NoError(t, err) + + s, _ := NewSerializer(false) + var buf []byte + buf, err = s.Serialize(m) + assert.NoError(t, err) + expS := `{"_value":91.5,"cpu":"cpu0","metric_name":"cpu.usage_idle","time":1529875740.819}` + assert.Equal(t, string(expS), string(buf)) +} + +func TestSerializeMetricFloatHec(t *testing.T) { + // Test sub-second time + now := time.Unix(1529875740, 819000000) + tags := map[string]string{ + "cpu": "cpu0", + } + fields := map[string]interface{}{ + "usage_idle": float64(91.5), + } + m, err := metric.New("cpu", tags, fields, now) + assert.NoError(t, err) + + s, _ := NewSerializer(true) + var buf []byte + buf, err = s.Serialize(m) + assert.NoError(t, err) + expS := `{"time":1529875740.819,"event":"metric","fields":{"_value":91.5,"cpu":"cpu0","metric_name":"cpu.usage_idle"}}` + assert.Equal(t, string(expS), string(buf)) +} + +func TestSerializeMetricInt(t *testing.T) { + now := time.Unix(0, 0) + tags := map[string]string{ + "cpu": "cpu0", + } + fields := map[string]interface{}{ + "usage_idle": int64(90), + } + m, err := metric.New("cpu", tags, fields, now) + assert.NoError(t, err) + + s, _ := NewSerializer(false) + var buf []byte + buf, err = s.Serialize(m) + assert.NoError(t, err) + + expS := `{"_value":90,"cpu":"cpu0","metric_name":"cpu.usage_idle","time":0}` + assert.Equal(t, string(expS), string(buf)) +} + +func TestSerializeMetricIntHec(t *testing.T) { + now := time.Unix(0, 0) + tags := map[string]string{ + "cpu": "cpu0", + } + fields := map[string]interface{}{ + "usage_idle": int64(90), + } + m, err := metric.New("cpu", tags, fields, now) + assert.NoError(t, err) + + s, _ := NewSerializer(true) + var buf []byte + buf, err = s.Serialize(m) + assert.NoError(t, err) + + expS := `{"time":0,"event":"metric","fields":{"_value":90,"cpu":"cpu0","metric_name":"cpu.usage_idle"}}` + assert.Equal(t, string(expS), string(buf)) +} + +func TestSerializeMetricString(t *testing.T) { + now := time.Unix(0, 0) + tags := map[string]string{ + "cpu": "cpu0", + } + fields := map[string]interface{}{ + "processorType": "ARMv7 Processor rev 4 (v7l)", + "usage_idle": int64(5), + } + m, err := metric.New("cpu", tags, fields, now) + assert.NoError(t, err) + + s, _ := NewSerializer(false) + var buf []byte + buf, err = s.Serialize(m) + assert.NoError(t, err) + + expS := `{"_value":5,"cpu":"cpu0","metric_name":"cpu.usage_idle","time":0}` + assert.Equal(t, string(expS), string(buf)) + assert.NoError(t, err) +} + +func TestSerializeBatch(t *testing.T) { + m := MustMetric( + metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(0, 0), + ), + ) + n := MustMetric( + metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 92.0, + }, + time.Unix(0, 0), + ), + ) + + metrics := []telegraf.Metric{m, n} + s, _ := NewSerializer(false) + buf, err := s.SerializeBatch(metrics) + assert.NoError(t, err) + + expS := `{"_value":42,"metric_name":"cpu.value","time":0}` + `{"_value":92,"metric_name":"cpu.value","time":0}` + assert.Equal(t, string(expS), string(buf)) +} + +func TestSerializeBatchHec(t *testing.T) { + m := MustMetric( + metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(0, 0), + ), + ) + n := MustMetric( + metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 92.0, + }, + time.Unix(0, 0), + ), + ) + + metrics := []telegraf.Metric{m, n} + s, _ := NewSerializer(true) + buf, err := s.SerializeBatch(metrics) + assert.NoError(t, err) + + expS := `{"time":0,"event":"metric","fields":{"_value":42,"metric_name":"cpu.value"}}` + `{"time":0,"event":"metric","fields":{"_value":92,"metric_name":"cpu.value"}}` + assert.Equal(t, string(expS), string(buf)) +} From 54446132288182b561697714178963b7e5fab3cc Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 11 Sep 2018 13:22:07 -0700 Subject: [PATCH 0156/1815] Update changelog --- CHANGELOG.md | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a49c22db7..37885ec00 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,7 +16,7 @@ ### New Processors - [enum](./plugins/processors/enum/README.md) - Contributed by @KarstenSchnitter -- [parser](./plugins/processors/parser/README.md) - Contributed by @maxunt & @Ayrdrie +- [parser](./plugins/processors/parser/README.md) - Contributed by @Ayrdrie & @maxunt - [rename](./plugins/processors/rename/README.md) - Contributed by @goldibex - [strings](./plugins/processors/strings/README.md) - Contributed by @bsmaldon @@ -28,6 +28,16 @@ - [azure_monitor](./plugins/outputs/azure_monitor/README.md) - Contributed by @influxdata +### New Parsers + +- [csv](/docs/DATA_FORMATS_INPUT.md#csv) - Contributed by @maxunt +- [grok](/docs/DATA_FORMATS_INPUT.md#grok) - Contributed by @maxunt +- [logfmt](/docs/DATA_FORMATS_INPUT.md#logfmt) - Contributed by @Ayrdrie & @maxunt + +### New Serializers + +- [splunkmetric](/plugins/serializer/splunkmetric/README.md) - Contributed by @ronnocol + ### Features - [#4236](https://github.com/influxdata/telegraf/pull/4236): Add SSL/TLS support to redis input. @@ -89,6 +99,7 @@ - [#4411](https://github.com/influxdata/telegraf/pull/4411): Add temp input plugin. - [#4272](https://github.com/influxdata/telegraf/pull/4272): Add Beanstalkd input plugin. - [#4669](https://github.com/influxdata/telegraf/pull/4669): Add means to specify server password for redis input. +- [#4339](https://github.com/influxdata/telegraf/pull/4339): Add Splunk Metrics serializer ### Bugfixes From 85c004bb056c078ab1216163488fed5d967159a7 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 11 Sep 2018 13:38:57 -0700 Subject: [PATCH 0157/1815] Fix documentation link --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 37885ec00..cf05e9311 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -36,7 +36,7 @@ ### New Serializers -- [splunkmetric](/plugins/serializer/splunkmetric/README.md) - Contributed by @ronnocol +- [splunkmetric](/plugins/serializers/splunkmetric/README.md) - Contributed by @ronnocol ### Features From fa1c572096376727ca46b7ae5620ee890af93c01 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 11 Sep 2018 13:52:13 -0700 Subject: [PATCH 0158/1815] Add link to line protocol in data fromat docs --- docs/DATA_FORMATS_INPUT.md | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/docs/DATA_FORMATS_INPUT.md b/docs/DATA_FORMATS_INPUT.md index e0358e95c..ff9160812 100644 --- a/docs/DATA_FORMATS_INPUT.md +++ b/docs/DATA_FORMATS_INPUT.md @@ -14,9 +14,10 @@ Telegraf is able to parse the following input data formats into metrics: 1. [Wavefront](#wavefront) 1. [CSV](#csv) -Telegraf metrics, like InfluxDB -[points](https://docs.influxdata.com/influxdb/v0.10/write_protocols/line/), -are a combination of four basic parts: +Telegraf metrics, similar to InfluxDB's [points][influxdb key concepts], are a +combination of four basic parts: + +[influxdb key concepts]: https://docs.influxdata.com/influxdb/v1.6/concepts/key_concepts/ 1. Measurement Name 1. Tags @@ -59,9 +60,11 @@ I'll go over below. # Influx: -There are no additional configuration options for InfluxDB line-protocol. The +There are no additional configuration options for InfluxDB [line protocol][]. The metrics are parsed directly into Telegraf metrics. +[line protocol]: https://docs.influxdata.com/influxdb/latest/write_protocols/line/ + #### Influx Configuration: ```toml From 5f3c331f79924514ad83f6f8ed4f44250c38b877 Mon Sep 17 00:00:00 2001 From: Pontus Rydin Date: Tue, 11 Sep 2018 17:53:46 -0400 Subject: [PATCH 0159/1815] Add input plugin for VMware vSphere (#4141) --- Gopkg.lock | 45 ++ Gopkg.toml | 4 + plugins/inputs/all/all.go | 1 + plugins/inputs/vsphere/METRICS.MD | 287 +++++++++ plugins/inputs/vsphere/README.MD | 354 ++++++++++ plugins/inputs/vsphere/client.go | 175 +++++ plugins/inputs/vsphere/endpoint.go | 852 +++++++++++++++++++++++++ plugins/inputs/vsphere/selfhealth.go | 53 ++ plugins/inputs/vsphere/vsphere.go | 312 +++++++++ plugins/inputs/vsphere/vsphere_test.go | 246 +++++++ plugins/inputs/vsphere/workerpool.go | 119 ++++ 11 files changed, 2448 insertions(+) create mode 100644 plugins/inputs/vsphere/METRICS.MD create mode 100644 plugins/inputs/vsphere/README.MD create mode 100644 plugins/inputs/vsphere/client.go create mode 100644 plugins/inputs/vsphere/endpoint.go create mode 100644 plugins/inputs/vsphere/selfhealth.go create mode 100644 plugins/inputs/vsphere/vsphere.go create mode 100644 plugins/inputs/vsphere/vsphere_test.go create mode 100644 plugins/inputs/vsphere/workerpool.go diff --git a/Gopkg.lock b/Gopkg.lock index b592346a8..ed161e69b 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -448,6 +448,14 @@ revision = "3af367b6b30c263d47e8895973edcca9a49cf029" version = "v0.2.0" +[[projects]] + digest = "1:c1d7e883c50a26ea34019320d8ae40fad86c9e5d56e63a1ba2cb618cef43e986" + name = "github.com/google/uuid" + packages = ["."] + pruneopts = "" + revision = "064e2069ce9c359c118179501254f67d7d37ba24" + version = "0.2" + [[projects]] digest = "1:dbbeb8ddb0be949954c8157ee8439c2adfd8dc1c9510eb44a6e58cb68c3dce28" name = "github.com/gorilla/context" @@ -949,6 +957,36 @@ revision = "ce01e59abcf6fbc9833b7deb5e4b8ee1769bcc53" version = "v1.0.0" +[[projects]] + digest = "1:f9fe29bf856d49f9a51d6001588cb5ee5d65c8a7ff5e8b0dd5423c3a510f0833" + name = "github.com/vmware/govmomi" + packages = [ + ".", + "find", + "list", + "nfc", + "object", + "performance", + "property", + "session", + "simulator", + "simulator/esx", + "simulator/vpx", + "task", + "view", + "vim25", + "vim25/debug", + "vim25/methods", + "vim25/mo", + "vim25/progress", + "vim25/soap", + "vim25/types", + "vim25/xml", + ] + pruneopts = "" + revision = "e3a01f9611c32b2362366434bcd671516e78955d" + version = "v0.18.0" + [[projects]] branch = "master" digest = "1:98ed05e9796df287b90c1d96854e3913c8e349dbc546412d3cabb472ecf4b417" @@ -1329,6 +1367,13 @@ "github.com/stretchr/testify/require", "github.com/tidwall/gjson", "github.com/vjeantet/grok", + "github.com/vmware/govmomi", + "github.com/vmware/govmomi/performance", + "github.com/vmware/govmomi/simulator", + "github.com/vmware/govmomi/view", + "github.com/vmware/govmomi/vim25/mo", + "github.com/vmware/govmomi/vim25/soap", + "github.com/vmware/govmomi/vim25/types", "github.com/wvanbergen/kafka/consumergroup", "github.com/zensqlmonitor/go-mssqldb", "golang.org/x/net/context", diff --git a/Gopkg.toml b/Gopkg.toml index b4576ed6f..c89578397 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -223,6 +223,10 @@ name = "gopkg.in/fsnotify.v1" [[constraint]] + name = "github.com/vmware/govmomi" + version = "0.18.0" + + [[constraint]] name = "github.com/Azure/go-autorest" version = "10.12.0" diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index 9dcb0dbd3..02008ffd5 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -128,6 +128,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/udp_listener" _ "github.com/influxdata/telegraf/plugins/inputs/unbound" _ "github.com/influxdata/telegraf/plugins/inputs/varnish" + _ "github.com/influxdata/telegraf/plugins/inputs/vsphere" _ "github.com/influxdata/telegraf/plugins/inputs/webhooks" _ "github.com/influxdata/telegraf/plugins/inputs/win_perf_counters" _ "github.com/influxdata/telegraf/plugins/inputs/win_services" diff --git a/plugins/inputs/vsphere/METRICS.MD b/plugins/inputs/vsphere/METRICS.MD new file mode 100644 index 000000000..0b9e0482f --- /dev/null +++ b/plugins/inputs/vsphere/METRICS.MD @@ -0,0 +1,287 @@ +# Common vSphere Performance Metrics +The set of performance metrics in vSphere is open ended. Metrics may be added or removed in new releases +and the set of available metrics may vary depending hardware, as well as what plugins and add-on products +are installed. Therefore, providing a definitive list of available metrics is difficult. The metrics listed +below are the most commonly available as of vSphere 6.5. + +To list the exact set in your environment, please use the govc tool available [here](https://github.com/vmware/govmomi/tree/master/govc) + +To obtain the set of metrics for e.g. a VM, you may use the following command: +``` +govc metric.ls vm/* +``` + +## Virtual Machine Metrics +``` +cpu.demandEntitlementRatio.latest +cpu.usage.average +cpu.ready.summation +cpu.run.summation +cpu.system.summation +cpu.swapwait.summation +cpu.costop.summation +cpu.demand.average +cpu.readiness.average +cpu.maxlimited.summation +cpu.wait.summation +cpu.usagemhz.average +cpu.latency.average +cpu.used.summation +cpu.overlap.summation +cpu.idle.summation +cpu.entitlement.latest +datastore.maxTotalLatency.latest +disk.usage.average +disk.read.average +disk.write.average +disk.maxTotalLatency.latest +mem.llSwapUsed.average +mem.swapin.average +mem.vmmemctltarget.average +mem.activewrite.average +mem.overhead.average +mem.vmmemctl.average +mem.zero.average +mem.swapoutRate.average +mem.active.average +mem.llSwapOutRate.average +mem.swapout.average +mem.llSwapInRate.average +mem.swapinRate.average +mem.granted.average +mem.latency.average +mem.overheadMax.average +mem.swapped.average +mem.compressionRate.average +mem.swaptarget.average +mem.shared.average +mem.zipSaved.latest +mem.overheadTouched.average +mem.zipped.latest +mem.consumed.average +mem.entitlement.average +mem.usage.average +mem.decompressionRate.average +mem.compressed.average +net.multicastRx.summation +net.transmitted.average +net.received.average +net.usage.average +net.broadcastTx.summation +net.broadcastRx.summation +net.packetsRx.summation +net.pnicBytesRx.average +net.multicastTx.summation +net.bytesTx.average +net.bytesRx.average +net.droppedRx.summation +net.pnicBytesTx.average +net.droppedTx.summation +net.packetsTx.summation +power.power.average +power.energy.summation +rescpu.runpk1.latest +rescpu.runpk15.latest +rescpu.maxLimited5.latest +rescpu.actpk5.latest +rescpu.samplePeriod.latest +rescpu.runav1.latest +rescpu.runav15.latest +rescpu.sampleCount.latest +rescpu.actpk1.latest +rescpu.runpk5.latest +rescpu.runav5.latest +rescpu.actav15.latest +rescpu.actav1.latest +rescpu.actpk15.latest +rescpu.actav5.latest +rescpu.maxLimited1.latest +rescpu.maxLimited15.latest +sys.osUptime.latest +sys.uptime.latest +sys.heartbeat.latest +virtualDisk.write.average +virtualDisk.read.average +``` + +## Host System Metrics +``` +cpu.corecount.contention.average +cpu.usage.average +cpu.reservedCapacity.average +cpu.usagemhz.minimum +cpu.usagemhz.maximum +cpu.usage.minimum +cpu.usage.maximum +cpu.capacity.provisioned.average +cpu.capacity.usage.average +cpu.capacity.demand.average +cpu.capacity.contention.average +cpu.corecount.provisioned.average +cpu.corecount.usage.average +cpu.usagemhz.average +disk.throughput.contention.average +disk.throughput.usage.average +mem.decompressionRate.average +mem.granted.average +mem.active.average +mem.shared.average +mem.zero.average +mem.swapused.average +mem.vmmemctl.average +mem.compressed.average +mem.compressionRate.average +mem.reservedCapacity.average +mem.capacity.provisioned.average +mem.capacity.usable.average +mem.capacity.usage.average +mem.capacity.entitlement.average +mem.capacity.contention.average +mem.usage.minimum +mem.overhead.minimum +mem.consumed.minimum +mem.granted.minimum +mem.active.minimum +mem.shared.minimum +mem.zero.minimum +mem.swapused.minimum +mem.consumed.average +mem.usage.maximum +mem.overhead.maximum +mem.consumed.maximum +mem.granted.maximum +mem.overhead.average +mem.shared.maximum +mem.zero.maximum +mem.swapused.maximum +mem.vmmemctl.maximum +mem.usage.average +mem.active.maximum +mem.vmmemctl.minimum +net.throughput.contention.summation +net.throughput.usage.average +net.throughput.usable.average +net.throughput.provisioned.average +power.power.average +power.powerCap.average +power.energy.summation +vmop.numShutdownGuest.latest +vmop.numPoweroff.latest +vmop.numSuspend.latest +vmop.numReset.latest +vmop.numRebootGuest.latest +vmop.numStandbyGuest.latest +vmop.numPoweron.latest +vmop.numCreate.latest +vmop.numDestroy.latest +vmop.numRegister.latest +vmop.numUnregister.latest +vmop.numReconfigure.latest +vmop.numClone.latest +vmop.numDeploy.latest +vmop.numChangeHost.latest +vmop.numChangeDS.latest +vmop.numChangeHostDS.latest +vmop.numVMotion.latest +vmop.numSVMotion.latest +vmop.numXVMotion.latest +``` + +## Cluster Metrics +``` +cpu.corecount.contention.average +cpu.usage.average +cpu.reservedCapacity.average +cpu.usagemhz.minimum +cpu.usagemhz.maximum +cpu.usage.minimum +cpu.usage.maximum +cpu.capacity.provisioned.average +cpu.capacity.usage.average +cpu.capacity.demand.average +cpu.capacity.contention.average +cpu.corecount.provisioned.average +cpu.corecount.usage.average +cpu.usagemhz.average +disk.throughput.contention.average +disk.throughput.usage.average +mem.decompressionRate.average +mem.granted.average +mem.active.average +mem.shared.average +mem.zero.average +mem.swapused.average +mem.vmmemctl.average +mem.compressed.average +mem.compressionRate.average +mem.reservedCapacity.average +mem.capacity.provisioned.average +mem.capacity.usable.average +mem.capacity.usage.average +mem.capacity.entitlement.average +mem.capacity.contention.average +mem.usage.minimum +mem.overhead.minimum +mem.consumed.minimum +mem.granted.minimum +mem.active.minimum +mem.shared.minimum +mem.zero.minimum +mem.swapused.minimum +mem.consumed.average +mem.usage.maximum +mem.overhead.maximum +mem.consumed.maximum +mem.granted.maximum +mem.overhead.average +mem.shared.maximum +mem.zero.maximum +mem.swapused.maximum +mem.vmmemctl.maximum +mem.usage.average +mem.active.maximum +mem.vmmemctl.minimum +net.throughput.contention.summation +net.throughput.usage.average +net.throughput.usable.average +net.throughput.provisioned.average +power.power.average +power.powerCap.average +power.energy.summation +vmop.numShutdownGuest.latest +vmop.numPoweroff.latest +vmop.numSuspend.latest +vmop.numReset.latest +vmop.numRebootGuest.latest +vmop.numStandbyGuest.latest +vmop.numPoweron.latest +vmop.numCreate.latest +vmop.numDestroy.latest +vmop.numRegister.latest +vmop.numUnregister.latest +vmop.numReconfigure.latest +vmop.numClone.latest +vmop.numDeploy.latest +vmop.numChangeHost.latest +vmop.numChangeDS.latest +vmop.numChangeHostDS.latest +vmop.numVMotion.latest +vmop.numSVMotion.latest +vmop.numXVMotion.latest +``` + +## Datastore Metrics +``` +datastore.numberReadAveraged.average +datastore.throughput.contention.average +datastore.throughput.usage.average +datastore.write.average +datastore.read.average +datastore.numberWriteAveraged.average +disk.used.latest +disk.provisioned.latest +disk.capacity.latest +disk.capacity.contention.average +disk.capacity.provisioned.average +disk.capacity.usage.average +``` \ No newline at end of file diff --git a/plugins/inputs/vsphere/README.MD b/plugins/inputs/vsphere/README.MD new file mode 100644 index 000000000..12332ea66 --- /dev/null +++ b/plugins/inputs/vsphere/README.MD @@ -0,0 +1,354 @@ +# VMware vSphere Input Plugin + +The VMware vSphere plugin uses the vSphere API to gather metrics from multiple vCenter servers. + +* Clusters +* Hosts +* VMs +* Data stores + +## Configuration + +NOTE: To disable collection of a specific resource type, simply exclude all metrics using the XX_metric_exclude. +For example, to disable collection of VMs, add this: +```vm_metric_exclude = [ "*" ]``` + +``` +# Read metrics from one or many vCenters +[[inputs.vsphere]] + ## List of vCenter URLs to be monitored. These three lines must be uncommented + ## and edited for the plugin to work. + vcenters = [ "https://vcenter.local/sdk" ] + username = "user@corp.local" + password = "secret" + + ## VMs + ## Typical VM metrics (if omitted or empty, all metrics are collected) + vm_metric_include = [ + "cpu.demand.average", + "cpu.idle.summation", + "cpu.latency.average", + "cpu.readiness.average", + "cpu.ready.summation", + "cpu.run.summation", + "cpu.usagemhz.average", + "cpu.used.summation", + "cpu.wait.summation", + "mem.active.average", + "mem.granted.average", + "mem.latency.average", + "mem.swapin.average", + "mem.swapinRate.average", + "mem.swapout.average", + "mem.swapoutRate.average", + "mem.usage.average", + "mem.vmmemctl.average", + "net.bytesRx.average", + "net.bytesTx.average", + "net.droppedRx.summation", + "net.droppedTx.summation", + "net.usage.average", + "power.power.average", + "virtualDisk.numberReadAveraged.average", + "virtualDisk.numberWriteAveraged.average", + "virtualDisk.read.average", + "virtualDisk.readOIO.latest", + "virtualDisk.throughput.usage.average", + "virtualDisk.totalReadLatency.average", + "virtualDisk.totalWriteLatency.average", + "virtualDisk.write.average", + "virtualDisk.writeOIO.latest", + "sys.uptime.latest", + ] + # vm_metric_exclude = [] ## Nothing is excluded by default + # vm_instances = true ## true by default + + ## Hosts + ## Typical host metrics (if omitted or empty, all metrics are collected) + host_metric_include = [ + "cpu.coreUtilization.average", + "cpu.costop.summation", + "cpu.demand.average", + "cpu.idle.summation", + "cpu.latency.average", + "cpu.readiness.average", + "cpu.ready.summation", + "cpu.swapwait.summation", + "cpu.usage.average", + "cpu.usagemhz.average", + "cpu.used.summation", + "cpu.utilization.average", + "cpu.wait.summation", + "disk.deviceReadLatency.average", + "disk.deviceWriteLatency.average", + "disk.kernelReadLatency.average", + "disk.kernelWriteLatency.average", + "disk.numberReadAveraged.average", + "disk.numberWriteAveraged.average", + "disk.read.average", + "disk.totalReadLatency.average", + "disk.totalWriteLatency.average", + "disk.write.average", + "mem.active.average", + "mem.latency.average", + "mem.state.latest", + "mem.swapin.average", + "mem.swapinRate.average", + "mem.swapout.average", + "mem.swapoutRate.average", + "mem.totalCapacity.average", + "mem.usage.average", + "mem.vmmemctl.average", + "net.bytesRx.average", + "net.bytesTx.average", + "net.droppedRx.summation", + "net.droppedTx.summation", + "net.errorsRx.summation", + "net.errorsTx.summation", + "net.usage.average", + "power.power.average", + "storageAdapter.numberReadAveraged.average", + "storageAdapter.numberWriteAveraged.average", + "storageAdapter.read.average", + "storageAdapter.write.average", + "sys.uptime.latest", + ] + # host_metric_exclude = [] ## Nothing excluded by default + # host_instances = true ## true by default + + ## Clusters + # cluster_metric_include = [] ## if omitted or empty, all metrics are collected + # cluster_metric_exclude = [] ## Nothing excluded by default + # cluster_instances = true ## true by default + + ## Datastores + # datastore_metric_include = [] ## if omitted or empty, all metrics are collected + # datastore_metric_exclude = [] ## Nothing excluded by default + # datastore_instances = false ## false by default for Datastores only + + ## Datacenters + datacenter_metric_include = [] ## if omitted or empty, all metrics are collected + datacenter_metric_exclude = [ "*" ] ## Datacenters are not collected by default. + # datacenter_instances = false ## false by default for Datastores only + + ## Plugin Settings + ## separator character to use for measurement and field names (default: "_") + # separator = "_" + + ## number of objects to retreive per query for realtime resources (vms and hosts) + ## set to 64 for vCenter 5.5 and 6.0 (default: 256) + # max_query_objects = 256 + + ## number of metrics to retreive per query for non-realtime resources (clusters and datastores) + ## set to 64 for vCenter 5.5 and 6.0 (default: 256) + # max_query_metrics = 256 + + ## number of go routines to use for collection and discovery of objects and metrics + # collect_concurrency = 1 + # discover_concurrency = 1 + + ## whether or not to force discovery of new objects on initial gather call before collecting metrics + ## when true for large environments this may cause errors for time elapsed while collecting metrics + ## when false (default) the first collection cycle may result in no or limited metrics while objects are discovered + # force_discover_on_init = false + + ## the interval before (re)discovering objects subject to metrics collection (default: 300s) + # object_discovery_interval = "300s" + + ## timeout applies to any of the api request made to vcenter + # timeout = "20s" + + ## Optional SSL Config + # ssl_ca = "/path/to/cafile" + # ssl_cert = "/path/to/certfile" + # ssl_key = "/path/to/keyfile" + ## Use SSL but skip chain & host verification + # insecure_skip_verify = false +``` + +### Objects and Metrics Per Query + +Default settings for vCenter 6.5 and above is 256. Prior versions of vCenter have this set to 64. A vCenter administrator +can change this setting, which should be reflected in this plugin. See this [VMware KB article](https://kb.vmware.com/s/article/2107096) +for more information. + +### Collection and Discovery concurrency + +On large vCenter setups it may be prudent to have multiple concurrent go routines collect performance metrics +in order to avoid potential errors for time elapsed during a collection cycle. This should never be greater than 8, +though the default of 1 (no concurrency) should be sufficient for most configurations. + +## Measurements & Fields + +- Cluster Stats + - Cluster services: CPU, memory, failover + - CPU: total, usage + - Memory: consumed, total, vmmemctl + - VM operations: # changes, clone, create, deploy, destroy, power, reboot, reconfigure, register, reset, shutdown, standby, vmotion +- Host Stats: + - CPU: total, usage, cost, mhz + - Datastore: iops, latency, read/write bytes, # reads/writes + - Disk: commands, latency, kernel reads/writes, # reads/writes, queues + - Memory: total, usage, active, latency, swap, shared, vmmemctl + - Network: broadcast, bytes, dropped, errors, multicast, packets, usage + - Power: energy, usage, capacity + - Res CPU: active, max, running + - Storage Adapter: commands, latency, # reads/writes + - Storage Path: commands, latency, # reads/writes + - System Resources: cpu active, cpu max, cpu running, cpu usage, mem allocated, mem consumed, mem shared, swap + - System: uptime + - Flash Module: active VMDKs +- VM Stats: + - CPU: demand, usage, readiness, cost, mhz + - Datastore: latency, # reads/writes + - Disk: commands, latency, # reads/writes, provisioned, usage + - Memory: granted, usage, active, swap, vmmemctl + - Network: broadcast, bytes, dropped, multicast, packets, usage + - Power: energy, usage + - Res CPU: active, max, running + - System: operating system uptime, uptime + - Virtual Disk: seeks, # reads/writes, latency, load +- Datastore stats: + - Disk: Capacity, provisioned, used + +For a detailed list of commonly available metrics, please refer to [METRICS.MD](METRICS.MD) + +## Tags + +- all metrics + - vcenter (vcenter url) +- all host metrics + - cluster (vcenter cluster) +- all vm metrics + - cluster (vcenter cluster) + - esxhost (name of ESXi host) + - guest (guest operating system id) +- cpu stats for Host and VM + - cpu (cpu core - not all CPU fields will have this tag) +- datastore stats for Host and VM + - datastore (id of datastore) +- disk stats for Host and VM + - disk (name of disk) +- disk.used.capacity for Datastore + - disk (type of disk) +- net stats for Host and VM + - interface (name of network interface) +- storageAdapter stats for Host + - adapter (name of storage adapter) +- storagePath stats for Host + - path (id of storage path) +- sys.resource* stats for Host + - resource (resource type) +- vflashModule stats for Host + - module (name of flash module) +- virtualDisk stats for VM + - disk (name of virtual disk) + +## Sample output + +``` +vsphere_vm_cpu,esxhostname=DC0_H0,guest=other,host=host.example.com,moid=vm-35,os=Mac,source=DC0_H0_VM0,vcenter=localhost:8989,vmname=DC0_H0_VM0 run_summation=2608i,ready_summation=129i,usage_average=5.01,used_summation=2134i,demand_average=326i 1535660299000000000 +vsphere_vm_net,esxhostname=DC0_H0,guest=other,host=host.example.com,moid=vm-35,os=Mac,source=DC0_H0_VM0,vcenter=localhost:8989,vmname=DC0_H0_VM0 bytesRx_average=321i,bytesTx_average=335i 1535660299000000000 +vsphere_vm_virtualDisk,esxhostname=DC0_H0,guest=other,host=host.example.com,moid=vm-35,os=Mac,source=DC0_H0_VM0,vcenter=localhost:8989,vmname=DC0_H0_VM0 write_average=144i,read_average=4i 1535660299000000000 +vsphere_vm_net,esxhostname=DC0_H0,guest=other,host=host.example.com,moid=vm-38,os=Mac,source=DC0_H0_VM1,vcenter=localhost:8989,vmname=DC0_H0_VM1 bytesRx_average=242i,bytesTx_average=308i 1535660299000000000 +vsphere_vm_virtualDisk,esxhostname=DC0_H0,guest=other,host=host.example.com,moid=vm-38,os=Mac,source=DC0_H0_VM1,vcenter=localhost:8989,vmname=DC0_H0_VM1 write_average=232i,read_average=4i 1535660299000000000 +vsphere_vm_cpu,esxhostname=DC0_H0,guest=other,host=host.example.com,moid=vm-38,os=Mac,source=DC0_H0_VM1,vcenter=localhost:8989,vmname=DC0_H0_VM1 usage_average=5.49,used_summation=1804i,demand_average=308i,run_summation=2001i,ready_summation=120i 1535660299000000000 +vsphere_vm_cpu,clustername=DC0_C0,esxhostname=DC0_C0_H0,guest=other,host=host.example.com,moid=vm-41,os=Mac,source=DC0_C0_RP0_VM0,vcenter=localhost:8989,vmname=DC0_C0_RP0_VM0 usage_average=4.19,used_summation=2108i,demand_average=285i,run_summation=1793i,ready_summation=93i 1535660299000000000 +vsphere_vm_net,clustername=DC0_C0,esxhostname=DC0_C0_H0,guest=other,host=host.example.com,moid=vm-41,os=Mac,source=DC0_C0_RP0_VM0,vcenter=localhost:8989,vmname=DC0_C0_RP0_VM0 bytesRx_average=272i,bytesTx_average=419i 1535660299000000000 +vsphere_vm_virtualDisk,clustername=DC0_C0,esxhostname=DC0_C0_H0,guest=other,host=host.example.com,moid=vm-41,os=Mac,source=DC0_C0_RP0_VM0,vcenter=localhost:8989,vmname=DC0_C0_RP0_VM0 write_average=229i,read_average=4i 1535660299000000000 +vsphere_vm_cpu,clustername=DC0_C0,esxhostname=DC0_C0_H0,guest=other,host=host.example.com,moid=vm-44,os=Mac,source=DC0_C0_RP0_VM1,vcenter=localhost:8989,vmname=DC0_C0_RP0_VM1 run_summation=2277i,ready_summation=118i,usage_average=4.67,used_summation=2546i,demand_average=289i 1535660299000000000 +vsphere_vm_net,clustername=DC0_C0,esxhostname=DC0_C0_H0,guest=other,host=host.example.com,moid=vm-44,os=Mac,source=DC0_C0_RP0_VM1,vcenter=localhost:8989,vmname=DC0_C0_RP0_VM1 bytesRx_average=243i,bytesTx_average=296i 1535660299000000000 +vsphere_vm_virtualDisk,clustername=DC0_C0,esxhostname=DC0_C0_H0,guest=other,host=host.example.com,moid=vm-44,os=Mac,source=DC0_C0_RP0_VM1,vcenter=localhost:8989,vmname=DC0_C0_RP0_VM1 write_average=158i,read_average=4i 1535660299000000000 +vsphere_host_net,esxhostname=DC0_H0,host=host.example.com,interface=vmnic0,moid=host-19,os=Mac,source=DC0_H0,vcenter=localhost:8989 usage_average=1042i,bytesTx_average=753i,bytesRx_average=660i 1535660299000000000 +vsphere_host_cpu,esxhostname=DC0_H0,host=host.example.com,moid=host-19,os=Mac,source=DC0_H0,vcenter=localhost:8989 utilization_average=10.46,usage_average=22.4,readiness_average=0.4,costop_summation=2i,coreUtilization_average=19.61,wait_summation=5148518i,idle_summation=58581i,latency_average=0.6,ready_summation=13370i,used_summation=19219i 1535660299000000000 +vsphere_host_cpu,cpu=0,esxhostname=DC0_H0,host=host.example.com,moid=host-19,os=Mac,source=DC0_H0,vcenter=localhost:8989 coreUtilization_average=25.6,utilization_average=11.58,used_summation=24306i,usage_average=24.26,idle_summation=86688i 1535660299000000000 +vsphere_host_cpu,cpu=1,esxhostname=DC0_H0,host=host.example.com,moid=host-19,os=Mac,source=DC0_H0,vcenter=localhost:8989 coreUtilization_average=12.29,utilization_average=8.32,used_summation=31312i,usage_average=22.47,idle_summation=94934i 1535660299000000000 +vsphere_host_disk,esxhostname=DC0_H0,host=host.example.com,moid=host-19,os=Mac,source=DC0_H0,vcenter=localhost:8989 read_average=331i,write_average=2800i 1535660299000000000 +vsphere_host_disk,disk=/var/folders/rf/txwdm4pj409f70wnkdlp7sz80000gq/T/govcsim-DC0-LocalDS_0-367088371@folder-5,esxhostname=DC0_H0,host=host.example.com,moid=host-19,os=Mac,source=DC0_H0,vcenter=localhost:8989 write_average=2701i,read_average=258i 1535660299000000000 +vsphere_host_mem,esxhostname=DC0_H0,host=host.example.com,moid=host-19,os=Mac,source=DC0_H0,vcenter=localhost:8989 usage_average=93.27 1535660299000000000 +vsphere_host_net,esxhostname=DC0_H0,host=host.example.com,moid=host-19,os=Mac,source=DC0_H0,vcenter=localhost:8989 bytesTx_average=650i,usage_average=1414i,bytesRx_average=569i 1535660299000000000 +vsphere_host_cpu,clustername=DC0_C0,cpu=1,esxhostname=DC0_C0_H0,host=host.example.com,moid=host-30,os=Mac,source=DC0_C0_H0,vcenter=localhost:8989 utilization_average=12.6,used_summation=25775i,usage_average=24.44,idle_summation=68886i,coreUtilization_average=17.59 1535660299000000000 +vsphere_host_disk,clustername=DC0_C0,esxhostname=DC0_C0_H0,host=host.example.com,moid=host-30,os=Mac,source=DC0_C0_H0,vcenter=localhost:8989 read_average=340i,write_average=2340i 1535660299000000000 +vsphere_host_disk,clustername=DC0_C0,disk=/var/folders/rf/txwdm4pj409f70wnkdlp7sz80000gq/T/govcsim-DC0-LocalDS_0-367088371@folder-5,esxhostname=DC0_C0_H0,host=host.example.com,moid=host-30,os=Mac,source=DC0_C0_H0,vcenter=localhost:8989 write_average=2277i,read_average=282i 1535660299000000000 +vsphere_host_mem,clustername=DC0_C0,esxhostname=DC0_C0_H0,host=host.example.com,moid=host-30,os=Mac,source=DC0_C0_H0,vcenter=localhost:8989 usage_average=104.78 1535660299000000000 +vsphere_host_net,clustername=DC0_C0,esxhostname=DC0_C0_H0,host=host.example.com,moid=host-30,os=Mac,source=DC0_C0_H0,vcenter=localhost:8989 bytesTx_average=463i,usage_average=1131i,bytesRx_average=719i 1535660299000000000 +vsphere_host_net,clustername=DC0_C0,esxhostname=DC0_C0_H0,host=host.example.com,interface=vmnic0,moid=host-30,os=Mac,source=DC0_C0_H0,vcenter=localhost:8989 usage_average=1668i,bytesTx_average=838i,bytesRx_average=921i 1535660299000000000 +vsphere_host_cpu,clustername=DC0_C0,esxhostname=DC0_C0_H0,host=host.example.com,moid=host-30,os=Mac,source=DC0_C0_H0,vcenter=localhost:8989 used_summation=28952i,utilization_average=11.36,idle_summation=93261i,latency_average=0.46,ready_summation=12837i,usage_average=21.56,readiness_average=0.39,costop_summation=2i,coreUtilization_average=27.19,wait_summation=3820829i 1535660299000000000 +vsphere_host_cpu,clustername=DC0_C0,cpu=0,esxhostname=DC0_C0_H0,host=host.example.com,moid=host-30,os=Mac,source=DC0_C0_H0,vcenter=localhost:8989 coreUtilization_average=24.12,utilization_average=13.83,used_summation=22462i,usage_average=24.69,idle_summation=96993i 1535660299000000000 +internal_vsphere,host=host.example.com,os=Mac,vcenter=localhost:8989 connect_ns=4727607i,discover_ns=65389011i,discovered_objects=8i 1535660309000000000 +internal_vsphere,host=host.example.com,os=Mac,resourcetype=datastore,vcenter=localhost:8989 gather_duration_ns=296223i,gather_count=0i 1535660309000000000 +internal_vsphere,host=host.example.com,os=Mac,resourcetype=vm,vcenter=192.168.1.151 gather_duration_ns=136050i,gather_count=0i 1535660309000000000 +internal_vsphere,host=host.example.com,os=Mac,resourcetype=host,vcenter=localhost:8989 gather_count=62i,gather_duration_ns=8788033i 1535660309000000000 +internal_vsphere,host=host.example.com,os=Mac,resourcetype=host,vcenter=192.168.1.151 gather_count=0i,gather_duration_ns=162002i 1535660309000000000 +internal_gather,host=host.example.com,input=vsphere,os=Mac gather_time_ns=17483653i,metrics_gathered=28i 1535660309000000000 +internal_vsphere,host=host.example.com,os=Mac,vcenter=192.168.1.151 connect_ns=0i 1535660309000000000 +internal_vsphere,host=host.example.com,os=Mac,resourcetype=vm,vcenter=localhost:8989 gather_duration_ns=7291897i,gather_count=36i 1535660309000000000 +internal_vsphere,host=host.example.com,os=Mac,resourcetype=datastore,vcenter=192.168.1.151 gather_duration_ns=958474i,gather_count=0i 1535660309000000000 +vsphere_vm_cpu,esxhostname=DC0_H0,guest=other,host=host.example.com,moid=vm-38,os=Mac,source=DC0_H0_VM1,vcenter=localhost:8989,vmname=DC0_H0_VM1 usage_average=8.82,used_summation=3192i,demand_average=283i,run_summation=2419i,ready_summation=115i 1535660319000000000 +vsphere_vm_net,esxhostname=DC0_H0,guest=other,host=host.example.com,moid=vm-38,os=Mac,source=DC0_H0_VM1,vcenter=localhost:8989,vmname=DC0_H0_VM1 bytesRx_average=277i,bytesTx_average=343i 1535660319000000000 +vsphere_vm_virtualDisk,esxhostname=DC0_H0,guest=other,host=host.example.com,moid=vm-38,os=Mac,source=DC0_H0_VM1,vcenter=localhost:8989,vmname=DC0_H0_VM1 read_average=1i,write_average=741i 1535660319000000000 +vsphere_vm_net,clustername=DC0_C0,esxhostname=DC0_C0_H0,guest=other,host=host.example.com,moid=vm-41,os=Mac,source=DC0_C0_RP0_VM0,vcenter=localhost:8989,vmname=DC0_C0_RP0_VM0 bytesRx_average=386i,bytesTx_average=369i 1535660319000000000 +vsphere_vm_virtualDisk,clustername=DC0_C0,esxhostname=DC0_C0_H0,guest=other,host=host.example.com,moid=vm-41,os=Mac,source=DC0_C0_RP0_VM0,vcenter=localhost:8989,vmname=DC0_C0_RP0_VM0 write_average=814i,read_average=1i 1535660319000000000 +vsphere_vm_cpu,clustername=DC0_C0,esxhostname=DC0_C0_H0,guest=other,host=host.example.com,moid=vm-41,os=Mac,source=DC0_C0_RP0_VM0,vcenter=localhost:8989,vmname=DC0_C0_RP0_VM0 run_summation=1778i,ready_summation=111i,usage_average=7.54,used_summation=2339i,demand_average=297i 1535660319000000000 +vsphere_vm_cpu,clustername=DC0_C0,esxhostname=DC0_C0_H0,guest=other,host=host.example.com,moid=vm-44,os=Mac,source=DC0_C0_RP0_VM1,vcenter=localhost:8989,vmname=DC0_C0_RP0_VM1 usage_average=6.98,used_summation=2125i,demand_average=211i,run_summation=2990i,ready_summation=141i 1535660319000000000 +vsphere_vm_net,clustername=DC0_C0,esxhostname=DC0_C0_H0,guest=other,host=host.example.com,moid=vm-44,os=Mac,source=DC0_C0_RP0_VM1,vcenter=localhost:8989,vmname=DC0_C0_RP0_VM1 bytesRx_average=357i,bytesTx_average=268i 1535660319000000000 +vsphere_vm_virtualDisk,clustername=DC0_C0,esxhostname=DC0_C0_H0,guest=other,host=host.example.com,moid=vm-44,os=Mac,source=DC0_C0_RP0_VM1,vcenter=localhost:8989,vmname=DC0_C0_RP0_VM1 write_average=528i,read_average=1i 1535660319000000000 +vsphere_vm_cpu,esxhostname=DC0_H0,guest=other,host=host.example.com,moid=vm-35,os=Mac,source=DC0_H0_VM0,vcenter=localhost:8989,vmname=DC0_H0_VM0 used_summation=2374i,demand_average=195i,run_summation=3454i,ready_summation=110i,usage_average=7.34 1535660319000000000 +vsphere_vm_net,esxhostname=DC0_H0,guest=other,host=host.example.com,moid=vm-35,os=Mac,source=DC0_H0_VM0,vcenter=localhost:8989,vmname=DC0_H0_VM0 bytesRx_average=308i,bytesTx_average=246i 1535660319000000000 +vsphere_vm_virtualDisk,esxhostname=DC0_H0,guest=other,host=host.example.com,moid=vm-35,os=Mac,source=DC0_H0_VM0,vcenter=localhost:8989,vmname=DC0_H0_VM0 write_average=1178i,read_average=1i 1535660319000000000 +vsphere_host_net,esxhostname=DC0_H0,host=host.example.com,interface=vmnic0,moid=host-19,os=Mac,source=DC0_H0,vcenter=localhost:8989 bytesRx_average=773i,usage_average=1521i,bytesTx_average=890i 1535660319000000000 +vsphere_host_cpu,esxhostname=DC0_H0,host=host.example.com,moid=host-19,os=Mac,source=DC0_H0,vcenter=localhost:8989 wait_summation=3421258i,idle_summation=67994i,latency_average=0.36,usage_average=29.86,readiness_average=0.37,used_summation=25244i,costop_summation=2i,coreUtilization_average=21.94,utilization_average=17.19,ready_summation=15897i 1535660319000000000 +vsphere_host_cpu,cpu=0,esxhostname=DC0_H0,host=host.example.com,moid=host-19,os=Mac,source=DC0_H0,vcenter=localhost:8989 utilization_average=11.32,used_summation=19333i,usage_average=14.29,idle_summation=92708i,coreUtilization_average=27.68 1535660319000000000 +vsphere_host_cpu,cpu=1,esxhostname=DC0_H0,host=host.example.com,moid=host-19,os=Mac,source=DC0_H0,vcenter=localhost:8989 used_summation=28596i,usage_average=25.32,idle_summation=79553i,coreUtilization_average=28.01,utilization_average=11.33 1535660319000000000 +vsphere_host_disk,esxhostname=DC0_H0,host=host.example.com,moid=host-19,os=Mac,source=DC0_H0,vcenter=localhost:8989 read_average=86i,write_average=1659i 1535660319000000000 +vsphere_host_disk,disk=/var/folders/rf/txwdm4pj409f70wnkdlp7sz80000gq/T/govcsim-DC0-LocalDS_0-367088371@folder-5,esxhostname=DC0_H0,host=host.example.com,moid=host-19,os=Mac,source=DC0_H0,vcenter=localhost:8989 write_average=1997i,read_average=58i 1535660319000000000 +vsphere_host_mem,esxhostname=DC0_H0,host=host.example.com,moid=host-19,os=Mac,source=DC0_H0,vcenter=localhost:8989 usage_average=68.45 1535660319000000000 +vsphere_host_net,esxhostname=DC0_H0,host=host.example.com,moid=host-19,os=Mac,source=DC0_H0,vcenter=localhost:8989 bytesTx_average=679i,usage_average=2286i,bytesRx_average=719i 1535660319000000000 +vsphere_host_cpu,clustername=DC0_C0,cpu=1,esxhostname=DC0_C0_H0,host=host.example.com,moid=host-30,os=Mac,source=DC0_C0_H0,vcenter=localhost:8989 utilization_average=10.52,used_summation=21693i,usage_average=23.09,idle_summation=84590i,coreUtilization_average=29.92 1535660319000000000 +vsphere_host_disk,clustername=DC0_C0,esxhostname=DC0_C0_H0,host=host.example.com,moid=host-30,os=Mac,source=DC0_C0_H0,vcenter=localhost:8989 read_average=113i,write_average=1236i 1535660319000000000 +vsphere_host_disk,clustername=DC0_C0,disk=/var/folders/rf/txwdm4pj409f70wnkdlp7sz80000gq/T/govcsim-DC0-LocalDS_0-367088371@folder-5,esxhostname=DC0_C0_H0,host=host.example.com,moid=host-30,os=Mac,source=DC0_C0_H0,vcenter=localhost:8989 write_average=1708i,read_average=110i 1535660319000000000 +vsphere_host_mem,clustername=DC0_C0,esxhostname=DC0_C0_H0,host=host.example.com,moid=host-30,os=Mac,source=DC0_C0_H0,vcenter=localhost:8989 usage_average=111.46 1535660319000000000 +vsphere_host_net,clustername=DC0_C0,esxhostname=DC0_C0_H0,host=host.example.com,moid=host-30,os=Mac,source=DC0_C0_H0,vcenter=localhost:8989 bytesTx_average=998i,usage_average=2000i,bytesRx_average=881i 1535660319000000000 +vsphere_host_net,clustername=DC0_C0,esxhostname=DC0_C0_H0,host=host.example.com,interface=vmnic0,moid=host-30,os=Mac,source=DC0_C0_H0,vcenter=localhost:8989 usage_average=1683i,bytesTx_average=675i,bytesRx_average=1078i 1535660319000000000 +vsphere_host_cpu,clustername=DC0_C0,esxhostname=DC0_C0_H0,host=host.example.com,moid=host-30,os=Mac,source=DC0_C0_H0,vcenter=localhost:8989 used_summation=28531i,wait_summation=3139129i,utilization_average=9.99,idle_summation=98579i,latency_average=0.51,costop_summation=2i,coreUtilization_average=14.35,ready_summation=16121i,usage_average=34.19,readiness_average=0.4 1535660319000000000 +vsphere_host_cpu,clustername=DC0_C0,cpu=0,esxhostname=DC0_C0_H0,host=host.example.com,moid=host-30,os=Mac,source=DC0_C0_H0,vcenter=localhost:8989 utilization_average=12.2,used_summation=22750i,usage_average=18.84,idle_summation=99539i,coreUtilization_average=23.05 1535660319000000000 +internal_vsphere,host=host.example.com,os=Mac,resourcetype=host,vcenter=localhost:8989 gather_duration_ns=7076543i,gather_count=62i 1535660339000000000 +internal_vsphere,host=host.example.com,os=Mac,resourcetype=host,vcenter=192.168.1.151 gather_duration_ns=4051303i,gather_count=0i 1535660339000000000 +internal_gather,host=host.example.com,input=vsphere,os=Mac metrics_gathered=56i,gather_time_ns=13555029i 1535660339000000000 +internal_vsphere,host=host.example.com,os=Mac,vcenter=192.168.1.151 connect_ns=0i 1535660339000000000 +internal_vsphere,host=host.example.com,os=Mac,resourcetype=vm,vcenter=localhost:8989 gather_duration_ns=6335467i,gather_count=36i 1535660339000000000 +internal_vsphere,host=host.example.com,os=Mac,resourcetype=datastore,vcenter=192.168.1.151 gather_duration_ns=958474i,gather_count=0i 1535660339000000000 +internal_vsphere,host=host.example.com,os=Mac,vcenter=localhost:8989 discover_ns=65389011i,discovered_objects=8i,connect_ns=4727607i 1535660339000000000 +internal_vsphere,host=host.example.com,os=Mac,resourcetype=datastore,vcenter=localhost:8989 gather_duration_ns=296223i,gather_count=0i 1535660339000000000 +internal_vsphere,host=host.example.com,os=Mac,resourcetype=vm,vcenter=192.168.1.151 gather_count=0i,gather_duration_ns=1540920i 1535660339000000000 +vsphere_vm_virtualDisk,esxhostname=DC0_H0,guest=other,host=host.example.com,moid=vm-35,os=Mac,source=DC0_H0_VM0,vcenter=localhost:8989,vmname=DC0_H0_VM0 write_average=302i,read_average=11i 1535660339000000000 +vsphere_vm_cpu,esxhostname=DC0_H0,guest=other,host=host.example.com,moid=vm-35,os=Mac,source=DC0_H0_VM0,vcenter=localhost:8989,vmname=DC0_H0_VM0 usage_average=5.58,used_summation=2941i,demand_average=298i,run_summation=3255i,ready_summation=96i 1535660339000000000 +vsphere_vm_net,esxhostname=DC0_H0,guest=other,host=host.example.com,moid=vm-35,os=Mac,source=DC0_H0_VM0,vcenter=localhost:8989,vmname=DC0_H0_VM0 bytesRx_average=155i,bytesTx_average=241i 1535660339000000000 +vsphere_vm_cpu,esxhostname=DC0_H0,guest=other,host=host.example.com,moid=vm-38,os=Mac,source=DC0_H0_VM1,vcenter=localhost:8989,vmname=DC0_H0_VM1 usage_average=10.3,used_summation=3053i,demand_average=346i,run_summation=3289i,ready_summation=122i 1535660339000000000 +vsphere_vm_net,esxhostname=DC0_H0,guest=other,host=host.example.com,moid=vm-38,os=Mac,source=DC0_H0_VM1,vcenter=localhost:8989,vmname=DC0_H0_VM1 bytesRx_average=215i,bytesTx_average=275i 1535660339000000000 +vsphere_vm_virtualDisk,esxhostname=DC0_H0,guest=other,host=host.example.com,moid=vm-38,os=Mac,source=DC0_H0_VM1,vcenter=localhost:8989,vmname=DC0_H0_VM1 write_average=252i,read_average=14i 1535660339000000000 +vsphere_vm_cpu,clustername=DC0_C0,esxhostname=DC0_C0_H0,guest=other,host=host.example.com,moid=vm-41,os=Mac,source=DC0_C0_RP0_VM0,vcenter=localhost:8989,vmname=DC0_C0_RP0_VM0 usage_average=8,used_summation=2183i,demand_average=354i,run_summation=3542i,ready_summation=128i 1535660339000000000 +vsphere_vm_net,clustername=DC0_C0,esxhostname=DC0_C0_H0,guest=other,host=host.example.com,moid=vm-41,os=Mac,source=DC0_C0_RP0_VM0,vcenter=localhost:8989,vmname=DC0_C0_RP0_VM0 bytesRx_average=178i,bytesTx_average=200i 1535660339000000000 +vsphere_vm_virtualDisk,clustername=DC0_C0,esxhostname=DC0_C0_H0,guest=other,host=host.example.com,moid=vm-41,os=Mac,source=DC0_C0_RP0_VM0,vcenter=localhost:8989,vmname=DC0_C0_RP0_VM0 write_average=283i,read_average=12i 1535660339000000000 +vsphere_vm_cpu,clustername=DC0_C0,esxhostname=DC0_C0_H0,guest=other,host=host.example.com,moid=vm-44,os=Mac,source=DC0_C0_RP0_VM1,vcenter=localhost:8989,vmname=DC0_C0_RP0_VM1 demand_average=328i,run_summation=3481i,ready_summation=122i,usage_average=7.95,used_summation=2167i 1535660339000000000 +vsphere_vm_net,clustername=DC0_C0,esxhostname=DC0_C0_H0,guest=other,host=host.example.com,moid=vm-44,os=Mac,source=DC0_C0_RP0_VM1,vcenter=localhost:8989,vmname=DC0_C0_RP0_VM1 bytesTx_average=282i,bytesRx_average=196i 1535660339000000000 +vsphere_vm_virtualDisk,clustername=DC0_C0,esxhostname=DC0_C0_H0,guest=other,host=host.example.com,moid=vm-44,os=Mac,source=DC0_C0_RP0_VM1,vcenter=localhost:8989,vmname=DC0_C0_RP0_VM1 write_average=321i,read_average=13i 1535660339000000000 +vsphere_host_disk,esxhostname=DC0_H0,host=host.example.com,moid=host-19,os=Mac,source=DC0_H0,vcenter=localhost:8989 read_average=39i,write_average=2635i 1535660339000000000 +vsphere_host_disk,disk=/var/folders/rf/txwdm4pj409f70wnkdlp7sz80000gq/T/govcsim-DC0-LocalDS_0-367088371@folder-5,esxhostname=DC0_H0,host=host.example.com,moid=host-19,os=Mac,source=DC0_H0,vcenter=localhost:8989 write_average=2635i,read_average=30i 1535660339000000000 +vsphere_host_mem,esxhostname=DC0_H0,host=host.example.com,moid=host-19,os=Mac,source=DC0_H0,vcenter=localhost:8989 usage_average=98.5 1535660339000000000 +vsphere_host_net,esxhostname=DC0_H0,host=host.example.com,moid=host-19,os=Mac,source=DC0_H0,vcenter=localhost:8989 usage_average=1887i,bytesRx_average=662i,bytesTx_average=251i 1535660339000000000 +vsphere_host_net,esxhostname=DC0_H0,host=host.example.com,interface=vmnic0,moid=host-19,os=Mac,source=DC0_H0,vcenter=localhost:8989 usage_average=1481i,bytesTx_average=899i,bytesRx_average=992i 1535660339000000000 +vsphere_host_cpu,esxhostname=DC0_H0,host=host.example.com,moid=host-19,os=Mac,source=DC0_H0,vcenter=localhost:8989 used_summation=50405i,costop_summation=2i,utilization_average=17.32,latency_average=0.61,ready_summation=14843i,usage_average=27.94,coreUtilization_average=32.12,wait_summation=3058787i,idle_summation=56600i,readiness_average=0.36 1535660339000000000 +vsphere_host_cpu,cpu=0,esxhostname=DC0_H0,host=host.example.com,moid=host-19,os=Mac,source=DC0_H0,vcenter=localhost:8989 coreUtilization_average=37.61,utilization_average=17.05,used_summation=38013i,usage_average=32.66,idle_summation=89575i 1535660339000000000 +vsphere_host_cpu,cpu=1,esxhostname=DC0_H0,host=host.example.com,moid=host-19,os=Mac,source=DC0_H0,vcenter=localhost:8989 coreUtilization_average=25.92,utilization_average=18.72,used_summation=39790i,usage_average=40.42,idle_summation=69457i 1535660339000000000 +vsphere_host_net,clustername=DC0_C0,esxhostname=DC0_C0_H0,host=host.example.com,interface=vmnic0,moid=host-30,os=Mac,source=DC0_C0_H0,vcenter=localhost:8989 usage_average=1246i,bytesTx_average=673i,bytesRx_average=781i 1535660339000000000 +vsphere_host_cpu,clustername=DC0_C0,esxhostname=DC0_C0_H0,host=host.example.com,moid=host-30,os=Mac,source=DC0_C0_H0,vcenter=localhost:8989 coreUtilization_average=33.8,idle_summation=77121i,ready_summation=15857i,readiness_average=0.39,used_summation=29554i,costop_summation=2i,wait_summation=4338417i,utilization_average=17.87,latency_average=0.44,usage_average=28.78 1535660339000000000 +vsphere_host_cpu,clustername=DC0_C0,cpu=0,esxhostname=DC0_C0_H0,host=host.example.com,moid=host-30,os=Mac,source=DC0_C0_H0,vcenter=localhost:8989 idle_summation=86610i,coreUtilization_average=34.36,utilization_average=19.03,used_summation=28766i,usage_average=23.72 1535660339000000000 +vsphere_host_cpu,clustername=DC0_C0,cpu=1,esxhostname=DC0_C0_H0,host=host.example.com,moid=host-30,os=Mac,source=DC0_C0_H0,vcenter=localhost:8989 coreUtilization_average=33.15,utilization_average=16.8,used_summation=44282i,usage_average=30.08,idle_summation=93490i 1535660339000000000 +vsphere_host_disk,clustername=DC0_C0,esxhostname=DC0_C0_H0,host=host.example.com,moid=host-30,os=Mac,source=DC0_C0_H0,vcenter=localhost:8989 read_average=56i,write_average=1672i 1535660339000000000 +vsphere_host_disk,clustername=DC0_C0,disk=/var/folders/rf/txwdm4pj409f70wnkdlp7sz80000gq/T/govcsim-DC0-LocalDS_0-367088371@folder-5,esxhostname=DC0_C0_H0,host=host.example.com,moid=host-30,os=Mac,source=DC0_C0_H0,vcenter=localhost:8989 write_average=2110i,read_average=48i 1535660339000000000 +vsphere_host_mem,clustername=DC0_C0,esxhostname=DC0_C0_H0,host=host.example.com,moid=host-30,os=Mac,source=DC0_C0_H0,vcenter=localhost:8989 usage_average=116.21 1535660339000000000 +vsphere_host_net,clustername=DC0_C0,esxhostname=DC0_C0_H0,host=host.example.com,moid=host-30,os=Mac,source=DC0_C0_H0,vcenter=localhost:8989 bytesRx_average=726i,bytesTx_average=643i,usage_average=1504i 1535660339000000000 +vsphere_host_mem,clustername=DC0_C0,esxhostname=DC0_C0_H0,host=host.example.com,moid=host-30,os=Mac,source=DC0_C0_H0,vcenter=localhost:8989 usage_average=116.21 1535660339000000000 +vsphere_host_net,clustername=DC0_C0,esxhostname=DC0_C0_H0,host=host.example.com,moid=host-30,os=Mac,source=DC0_C0_H0,vcenter=localhost:8989 bytesRx_average=726i,bytesTx_average=643i,usage_average=1504i 1535660339000000000 +``` diff --git a/plugins/inputs/vsphere/client.go b/plugins/inputs/vsphere/client.go new file mode 100644 index 000000000..b9547b179 --- /dev/null +++ b/plugins/inputs/vsphere/client.go @@ -0,0 +1,175 @@ +package vsphere + +import ( + "context" + "crypto/tls" + "log" + "net/url" + "sync" + + "github.com/vmware/govmomi" + "github.com/vmware/govmomi/performance" + "github.com/vmware/govmomi/session" + "github.com/vmware/govmomi/view" + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/soap" +) + +// ClientFactory is used to obtain Clients to be used throughout the plugin. Typically, +// a single Client is reused across all functions and goroutines, but the client +// is periodically recycled to avoid authentication expiration issues. +type ClientFactory struct { + client *Client + mux sync.Mutex + url *url.URL + parent *VSphere +} + +// Client represents a connection to vSphere and is backed by a govmoni connection +type Client struct { + Client *govmomi.Client + Views *view.Manager + Root *view.ContainerView + Perf *performance.Manager + Valid bool + closeGate sync.Once +} + +// NewClientFactory creates a new ClientFactory and prepares it for use. +func NewClientFactory(ctx context.Context, url *url.URL, parent *VSphere) *ClientFactory { + return &ClientFactory{ + client: nil, + parent: parent, + url: url, + } +} + +// GetClient returns a client. The caller is responsible for calling Release() +// on the client once it's done using it. +func (cf *ClientFactory) GetClient(ctx context.Context) (*Client, error) { + cf.mux.Lock() + defer cf.mux.Unlock() + if cf.client == nil { + var err error + if cf.client, err = NewClient(cf.url, cf.parent); err != nil { + return nil, err + } + } + + // Execute a dummy call against the server to make sure the client is + // still functional. If not, try to log back in. If that doesn't work, + // we give up. + if _, err := methods.GetCurrentTime(ctx, cf.client.Client); err != nil { + log.Printf("I! [input.vsphere]: Client session seems to have time out. Reauthenticating!") + if cf.client.Client.SessionManager.Login(ctx, url.UserPassword(cf.parent.Username, cf.parent.Password)) != nil { + return nil, err + } + } + + return cf.client, nil +} + +// NewClient creates a new vSphere client based on the url and setting passed as parameters. +func NewClient(u *url.URL, vs *VSphere) (*Client, error) { + sw := NewStopwatch("connect", u.Host) + tlsCfg, err := vs.ClientConfig.TLSConfig() + if err != nil { + return nil, err + } + // Use a default TLS config if it's missing + if tlsCfg == nil { + tlsCfg = &tls.Config{} + } + if vs.Username != "" { + u.User = url.UserPassword(vs.Username, vs.Password) + } + ctx := context.Background() + + log.Printf("D! [input.vsphere]: Creating client: %s", u.Host) + soapClient := soap.NewClient(u, tlsCfg.InsecureSkipVerify) + + // Add certificate if we have it. Use it to log us in. + if tlsCfg != nil && len(tlsCfg.Certificates) > 0 { + soapClient.SetCertificate(tlsCfg.Certificates[0]) + } + + // Set up custom CA chain if specified. We need to do this before we create the vim25 client, + // since it might fail on missing CA chains otherwise. + if vs.TLSCA != "" { + if err := soapClient.SetRootCAs(vs.TLSCA); err != nil { + return nil, err + } + } + + vimClient, err := vim25.NewClient(ctx, soapClient) + if err != nil { + return nil, err + } + sm := session.NewManager(vimClient) + + // If TSLKey is specified, try to log in as an extension using a cert. + if vs.TLSKey != "" { + if err := sm.LoginExtensionByCertificate(ctx, vs.TLSKey); err != nil { + return nil, err + } + } + + // Create the govmomi client. + c := &govmomi.Client{ + Client: vimClient, + SessionManager: sm, + } + + // Only login if the URL contains user information. + if u.User != nil { + if err := c.Login(ctx, u.User); err != nil { + return nil, err + } + } + + c.Timeout = vs.Timeout.Duration + m := view.NewManager(c.Client) + + v, err := m.CreateContainerView(ctx, c.ServiceContent.RootFolder, []string{}, true) + if err != nil { + return nil, err + } + + p := performance.NewManager(c.Client) + + sw.Stop() + + return &Client{ + Client: c, + Views: m, + Root: v, + Perf: p, + Valid: true, + }, nil +} + +// Close shuts down a ClientFactory and releases any resources associated with it. +func (cf *ClientFactory) Close() { + cf.mux.Lock() + defer cf.mux.Unlock() + if cf.client != nil { + cf.client.close() + } +} + +func (c *Client) close() { + + // Use a Once to prevent us from panics stemming from trying + // to close it multiple times. + c.closeGate.Do(func() { + ctx := context.Background() + if c.Views != nil { + c.Views.Destroy(ctx) + + } + if c.Client != nil { + c.Client.Logout(ctx) + } + }) +} diff --git a/plugins/inputs/vsphere/endpoint.go b/plugins/inputs/vsphere/endpoint.go new file mode 100644 index 000000000..cad4dec00 --- /dev/null +++ b/plugins/inputs/vsphere/endpoint.go @@ -0,0 +1,852 @@ +package vsphere + +import ( + "context" + "fmt" + "log" + "net/url" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/influxdata/telegraf/filter" + + "github.com/influxdata/telegraf" + "github.com/vmware/govmomi/object" + "github.com/vmware/govmomi/performance" + "github.com/vmware/govmomi/view" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/types" +) + +// Endpoint is a high-level representation of a connected vCenter endpoint. It is backed by the lower +// level Client type. +type Endpoint struct { + Parent *VSphere + URL *url.URL + lastColls map[string]time.Time + instanceInfo map[string]resourceInfo + resourceKinds map[string]resourceKind + discoveryTicker *time.Ticker + collectMux sync.RWMutex + initialized bool + clientFactory *ClientFactory + busy sync.Mutex +} + +type resourceKind struct { + name string + pKey string + parentTag string + enabled bool + realTime bool + sampling int32 + objects objectMap + filters filter.Filter + collectInstances bool + getObjects func(context.Context, *view.ContainerView) (objectMap, error) +} + +type metricEntry struct { + tags map[string]string + name string + ts time.Time + fields map[string]interface{} +} + +type objectMap map[string]objectRef + +type objectRef struct { + name string + ref types.ManagedObjectReference + parentRef *types.ManagedObjectReference //Pointer because it must be nillable + guest string + dcname string +} + +type resourceInfo struct { + name string + metrics performance.MetricList + parentRef *types.ManagedObjectReference +} + +type metricQRequest struct { + res *resourceKind + obj objectRef +} + +type metricQResponse struct { + obj objectRef + metrics *performance.MetricList +} + +type multiError []error + +// NewEndpoint returns a new connection to a vCenter based on the URL and configuration passed +// as parameters. +func NewEndpoint(ctx context.Context, parent *VSphere, url *url.URL) (*Endpoint, error) { + e := Endpoint{ + URL: url, + Parent: parent, + lastColls: make(map[string]time.Time), + instanceInfo: make(map[string]resourceInfo), + initialized: false, + clientFactory: NewClientFactory(ctx, url, parent), + } + + e.resourceKinds = map[string]resourceKind{ + "datacenter": { + name: "datacenter", + pKey: "dcname", + parentTag: "", + enabled: anythingEnabled(parent.DatacenterMetricExclude), + realTime: false, + sampling: 300, + objects: make(objectMap), + filters: newFilterOrPanic(parent.DatacenterMetricInclude, parent.DatacenterMetricExclude), + collectInstances: parent.DatacenterInstances, + getObjects: getDatacenters, + }, + "cluster": { + name: "cluster", + pKey: "clustername", + parentTag: "dcname", + enabled: anythingEnabled(parent.ClusterMetricExclude), + realTime: false, + sampling: 300, + objects: make(objectMap), + filters: newFilterOrPanic(parent.ClusterMetricInclude, parent.ClusterMetricExclude), + collectInstances: parent.ClusterInstances, + getObjects: getClusters, + }, + "host": { + name: "host", + pKey: "esxhostname", + parentTag: "clustername", + enabled: anythingEnabled(parent.HostMetricExclude), + realTime: true, + sampling: 20, + objects: make(objectMap), + filters: newFilterOrPanic(parent.HostMetricInclude, parent.HostMetricExclude), + collectInstances: parent.HostInstances, + getObjects: getHosts, + }, + "vm": { + name: "vm", + pKey: "vmname", + parentTag: "esxhostname", + enabled: anythingEnabled(parent.VMMetricExclude), + realTime: true, + sampling: 20, + objects: make(objectMap), + filters: newFilterOrPanic(parent.VMMetricInclude, parent.VMMetricExclude), + collectInstances: parent.VMInstances, + getObjects: getVMs, + }, + "datastore": { + name: "datastore", + pKey: "dsname", + enabled: anythingEnabled(parent.DatastoreMetricExclude), + realTime: false, + sampling: 300, + objects: make(objectMap), + filters: newFilterOrPanic(parent.DatastoreMetricInclude, parent.DatastoreMetricExclude), + collectInstances: parent.DatastoreInstances, + getObjects: getDatastores, + }, + } + + // Start discover and other goodness + err := e.init(ctx) + + return &e, err +} + +func (m multiError) Error() string { + switch len(m) { + case 0: + return "No error recorded. Something is wrong!" + case 1: + return m[0].Error() + default: + s := "Multiple errors detected concurrently: " + for i, e := range m { + if i != 0 { + s += ", " + } + s += e.Error() + } + return s + } +} + +func anythingEnabled(ex []string) bool { + for _, s := range ex { + if s == "*" { + return false + } + } + return true +} + +func newFilterOrPanic(include []string, exclude []string) filter.Filter { + f, err := filter.NewIncludeExcludeFilter(include, exclude) + if err != nil { + panic(fmt.Sprintf("Include/exclude filters are invalid: %s", err)) + } + return f +} + +func (e *Endpoint) startDiscovery(ctx context.Context) { + e.discoveryTicker = time.NewTicker(e.Parent.ObjectDiscoveryInterval.Duration) + go func() { + for { + select { + case <-e.discoveryTicker.C: + err := e.discover(ctx) + if err != nil && err != context.Canceled { + log.Printf("E! [input.vsphere]: Error in discovery for %s: %v", e.URL.Host, err) + } + case <-ctx.Done(): + log.Printf("D! [input.vsphere]: Exiting discovery goroutine for %s", e.URL.Host) + e.discoveryTicker.Stop() + return + } + } + }() +} + +func (e *Endpoint) initalDiscovery(ctx context.Context) { + err := e.discover(ctx) + if err != nil && err != context.Canceled { + log.Printf("E! [input.vsphere]: Error in discovery for %s: %v", e.URL.Host, err) + } + e.startDiscovery(ctx) +} + +func (e *Endpoint) init(ctx context.Context) error { + + if e.Parent.ObjectDiscoveryInterval.Duration > 0 { + + // Run an initial discovery. If force_discovery_on_init isn't set, we kick it off as a + // goroutine without waiting for it. This will probably cause us to report an empty + // dataset on the first collection, but it solves the issue of the first collection timing out. + if e.Parent.ForceDiscoverOnInit { + log.Printf("D! [input.vsphere]: Running initial discovery and waiting for it to finish") + e.initalDiscovery(ctx) + } else { + // Otherwise, just run it in the background. We'll probably have an incomplete first metric + // collection this way. + go e.initalDiscovery(ctx) + } + } + e.initialized = true + return nil +} + +func (e *Endpoint) getMetricNameMap(ctx context.Context) (map[int32]string, error) { + client, err := e.clientFactory.GetClient(ctx) + if err != nil { + return nil, err + } + + mn, err := client.Perf.CounterInfoByName(ctx) + + if err != nil { + return nil, err + } + names := make(map[int32]string) + for name, m := range mn { + names[m.Key] = name + } + return names, nil +} + +func (e *Endpoint) getMetadata(ctx context.Context, in interface{}) interface{} { + client, err := e.clientFactory.GetClient(ctx) + if err != nil { + return err + } + + rq := in.(*metricQRequest) + metrics, err := client.Perf.AvailableMetric(ctx, rq.obj.ref.Reference(), rq.res.sampling) + if err != nil && err != context.Canceled { + log.Printf("E! [input.vsphere]: Error while getting metric metadata. Discovery will be incomplete. Error: %s", err) + } + return &metricQResponse{metrics: &metrics, obj: rq.obj} +} + +func (e *Endpoint) getDatacenterName(ctx context.Context, client *Client, cache map[string]string, r types.ManagedObjectReference) string { + path := make([]string, 0) + returnVal := "" + here := r + for { + if name, ok := cache[here.Reference().String()]; ok { + // Populate cache for the entire chain of objects leading here. + returnVal = name + break + } + path = append(path, here.Reference().String()) + o := object.NewCommon(client.Client.Client, r) + var result mo.ManagedEntity + err := o.Properties(ctx, here, []string{"parent", "name"}, &result) + if err != nil { + log.Printf("W! [input.vsphere]: Error while resolving parent. Assuming no parent exists. Error: %s", err) + break + } + if result.Reference().Type == "Datacenter" { + // Populate cache for the entire chain of objects leading here. + returnVal = result.Name + break + } + if result.Parent == nil { + log.Printf("D! [input.vsphere]: No parent found for %s (ascending from %s)", here.Reference(), r.Reference()) + break + } + here = result.Parent.Reference() + } + for _, s := range path { + cache[s] = returnVal + } + return returnVal +} + +func (e *Endpoint) discover(ctx context.Context) error { + e.busy.Lock() + defer e.busy.Unlock() + if ctx.Err() != nil { + return ctx.Err() + } + + metricNames, err := e.getMetricNameMap(ctx) + if err != nil { + return err + } + + sw := NewStopwatch("discover", e.URL.Host) + + client, err := e.clientFactory.GetClient(ctx) + if err != nil { + return err + } + + log.Printf("D! [input.vsphere]: Discover new objects for %s", e.URL.Host) + + instInfo := make(map[string]resourceInfo) + resourceKinds := make(map[string]resourceKind) + dcNameCache := make(map[string]string) + + // Populate resource objects, and endpoint instance info. + for k, res := range e.resourceKinds { + log.Printf("D! [input.vsphere] Discovering resources for %s", res.name) + // Need to do this for all resource types even if they are not enabled (but datastore) + if res.enabled || (k != "datastore" && k != "vm") { + objects, err := res.getObjects(ctx, client.Root) + if err != nil { + return err + } + + // Fill in datacenter names where available (no need to do it for Datacenters) + if res.name != "Datacenter" { + for k, obj := range objects { + if obj.parentRef != nil { + obj.dcname = e.getDatacenterName(ctx, client, dcNameCache, *obj.parentRef) + objects[k] = obj + } + } + } + + // Set up a worker pool for processing metadata queries concurrently + wp := NewWorkerPool(10) + wp.Run(ctx, e.getMetadata, e.Parent.DiscoverConcurrency) + + // Fill the input channels with resources that need to be queried + // for metadata. + wp.Fill(ctx, func(ctx context.Context, f PushFunc) { + for _, obj := range objects { + f(ctx, &metricQRequest{obj: obj, res: &res}) + } + }) + + // Drain the resulting metadata and build instance infos. + wp.Drain(ctx, func(ctx context.Context, in interface{}) bool { + switch resp := in.(type) { + case *metricQResponse: + mList := make(performance.MetricList, 0) + if res.enabled { + for _, m := range *resp.metrics { + if m.Instance != "" && !res.collectInstances { + continue + } + if res.filters.Match(metricNames[m.CounterId]) { + mList = append(mList, m) + } + } + } + instInfo[resp.obj.ref.Value] = resourceInfo{name: resp.obj.name, metrics: mList, parentRef: resp.obj.parentRef} + case error: + log.Printf("W! [input.vsphere]: Error while discovering resources: %s", resp) + return false + } + return true + }) + res.objects = objects + resourceKinds[k] = res + } + } + + // Atomically swap maps + // + e.collectMux.Lock() + defer e.collectMux.Unlock() + + e.instanceInfo = instInfo + e.resourceKinds = resourceKinds + + sw.Stop() + SendInternalCounter("discovered_objects", e.URL.Host, int64(len(instInfo))) + return nil +} + +func getDatacenters(ctx context.Context, root *view.ContainerView) (objectMap, error) { + var resources []mo.Datacenter + err := root.Retrieve(ctx, []string{"Datacenter"}, []string{"name", "parent"}, &resources) + if err != nil { + return nil, err + } + m := make(objectMap, len(resources)) + for _, r := range resources { + m[r.ExtensibleManagedObject.Reference().Value] = objectRef{ + name: r.Name, ref: r.ExtensibleManagedObject.Reference(), parentRef: r.Parent, dcname: r.Name} + } + return m, nil +} + +func getClusters(ctx context.Context, root *view.ContainerView) (objectMap, error) { + var resources []mo.ClusterComputeResource + err := root.Retrieve(ctx, []string{"ClusterComputeResource"}, []string{"name", "parent"}, &resources) + if err != nil { + return nil, err + } + cache := make(map[string]*types.ManagedObjectReference) + m := make(objectMap, len(resources)) + for _, r := range resources { + // We're not interested in the immediate parent (a folder), but the data center. + p, ok := cache[r.Parent.Value] + if !ok { + o := object.NewFolder(root.Client(), *r.Parent) + var folder mo.Folder + err := o.Properties(ctx, *r.Parent, []string{"parent"}, &folder) + if err != nil { + log.Printf("W! [input.vsphere] Error while getting folder parent: %e", err) + p = nil + } else { + pp := folder.Parent.Reference() + p = &pp + cache[r.Parent.Value] = p + } + } + m[r.ExtensibleManagedObject.Reference().Value] = objectRef{ + name: r.Name, ref: r.ExtensibleManagedObject.Reference(), parentRef: p} + } + return m, nil +} + +func getHosts(ctx context.Context, root *view.ContainerView) (objectMap, error) { + var resources []mo.HostSystem + err := root.Retrieve(ctx, []string{"HostSystem"}, []string{"name", "parent"}, &resources) + if err != nil { + return nil, err + } + m := make(objectMap) + for _, r := range resources { + m[r.ExtensibleManagedObject.Reference().Value] = objectRef{ + name: r.Name, ref: r.ExtensibleManagedObject.Reference(), parentRef: r.Parent} + } + return m, nil +} + +func getVMs(ctx context.Context, root *view.ContainerView) (objectMap, error) { + var resources []mo.VirtualMachine + err := root.Retrieve(ctx, []string{"VirtualMachine"}, []string{"name", "runtime.host", "config.guestId"}, &resources) + if err != nil { + return nil, err + } + m := make(objectMap) + for _, r := range resources { + var guest string + // Sometimes Config is unknown and returns a nil pointer + // + if r.Config != nil { + guest = cleanGuestID(r.Config.GuestId) + } else { + guest = "unknown" + } + m[r.ExtensibleManagedObject.Reference().Value] = objectRef{ + name: r.Name, ref: r.ExtensibleManagedObject.Reference(), parentRef: r.Runtime.Host, guest: guest} + } + return m, nil +} + +func getDatastores(ctx context.Context, root *view.ContainerView) (objectMap, error) { + var resources []mo.Datastore + err := root.Retrieve(ctx, []string{"Datastore"}, []string{"name", "parent"}, &resources) + if err != nil { + return nil, err + } + m := make(objectMap) + for _, r := range resources { + m[r.ExtensibleManagedObject.Reference().Value] = objectRef{ + name: r.Name, ref: r.ExtensibleManagedObject.Reference(), parentRef: r.Parent} + } + return m, nil +} + +// Close shuts down an Endpoint and releases any resources associated with it. +func (e *Endpoint) Close() { + e.clientFactory.Close() +} + +// Collect runs a round of data collections as specified in the configuration. +func (e *Endpoint) Collect(ctx context.Context, acc telegraf.Accumulator) error { + // If we never managed to do a discovery, collection will be a no-op. Therefore, + // we need to check that a connection is available, or the collection will + // silently fail. + // + if _, err := e.clientFactory.GetClient(ctx); err != nil { + return err + } + + e.collectMux.RLock() + defer e.collectMux.RUnlock() + + if ctx.Err() != nil { + return ctx.Err() + } + + // If discovery interval is disabled (0), discover on each collection cycle + // + if e.Parent.ObjectDiscoveryInterval.Duration == 0 { + err := e.discover(ctx) + if err != nil { + return err + } + } + for k, res := range e.resourceKinds { + if res.enabled { + err := e.collectResource(ctx, k, acc) + if err != nil { + return err + } + } + } + return nil +} + +func (e *Endpoint) chunker(ctx context.Context, f PushFunc, res *resourceKind, now time.Time, latest time.Time) { + pqs := make([]types.PerfQuerySpec, 0, e.Parent.MaxQueryObjects) + metrics := 0 + total := 0 + nRes := 0 + for _, object := range res.objects { + info, found := e.instanceInfo[object.ref.Value] + if !found { + log.Printf("E! [input.vsphere]: Internal error: Instance info not found for MOID %s", object.ref) + } + mr := len(info.metrics) + for mr > 0 { + mc := mr + headroom := e.Parent.MaxQueryMetrics - metrics + if !res.realTime && mc > headroom { // Metric query limit only applies to non-realtime metrics + mc = headroom + } + fm := len(info.metrics) - mr + pq := types.PerfQuerySpec{ + Entity: object.ref, + MaxSample: 1, + MetricId: info.metrics[fm : fm+mc], + IntervalId: res.sampling, + } + + if !res.realTime { + pq.StartTime = &latest + pq.EndTime = &now + } + pqs = append(pqs, pq) + mr -= mc + metrics += mc + + // We need to dump the current chunk of metrics for one of two reasons: + // 1) We filled up the metric quota while processing the current resource + // 2) We are at the last resource and have no more data to process. + if mr > 0 || (!res.realTime && metrics >= e.Parent.MaxQueryMetrics) || nRes >= e.Parent.MaxQueryObjects { + log.Printf("D! [input.vsphere]: Querying %d objects, %d metrics (%d remaining) of type %s for %s. Processed objects: %d. Total objects %d", + len(pqs), metrics, mr, res.name, e.URL.Host, total+1, len(res.objects)) + + // To prevent deadlocks, don't send work items if the context has been cancelled. + if ctx.Err() == context.Canceled { + return + } + + // Call push function + f(ctx, pqs) + pqs = make([]types.PerfQuerySpec, 0, e.Parent.MaxQueryObjects) + metrics = 0 + nRes = 0 + } + } + total++ + nRes++ + } + // There may be dangling stuff in the queue. Handle them + // + if len(pqs) > 0 { + // Call push function + f(ctx, pqs) + } +} + +func (e *Endpoint) collectResource(ctx context.Context, resourceType string, acc telegraf.Accumulator) error { + + // Do we have new data yet? + res := e.resourceKinds[resourceType] + now := time.Now() + latest, hasLatest := e.lastColls[resourceType] + if hasLatest { + elapsed := time.Now().Sub(latest).Seconds() + 5.0 // Allow 5 second jitter. + log.Printf("D! [input.vsphere]: Latest: %s, elapsed: %f, resource: %s", latest, elapsed, resourceType) + if !res.realTime && elapsed < float64(res.sampling) { + // No new data would be available. We're outta herE! [input.vsphere]: + log.Printf("D! [input.vsphere]: Sampling period for %s of %d has not elapsed for %s", + resourceType, res.sampling, e.URL.Host) + return nil + } + } else { + latest = time.Now().Add(time.Duration(-res.sampling) * time.Second) + } + + internalTags := map[string]string{"resourcetype": resourceType} + sw := NewStopwatchWithTags("gather_duration", e.URL.Host, internalTags) + + log.Printf("D! [input.vsphere]: Start of sample period deemed to be %s", latest) + log.Printf("D! [input.vsphere]: Collecting metrics for %d objects of type %s for %s", + len(res.objects), resourceType, e.URL.Host) + + count := int64(0) + + // Set up a worker pool for collecting chunk metrics + wp := NewWorkerPool(10) + wp.Run(ctx, func(ctx context.Context, in interface{}) interface{} { + chunk := in.([]types.PerfQuerySpec) + n, err := e.collectChunk(ctx, chunk, resourceType, res, acc) + log.Printf("D! [input.vsphere]: Query returned %d metrics", n) + if err != nil { + return err + } + atomic.AddInt64(&count, int64(n)) + return nil + + }, e.Parent.CollectConcurrency) + + // Fill the input channel of the worker queue by running the chunking + // logic implemented in chunker() + wp.Fill(ctx, func(ctx context.Context, f PushFunc) { + e.chunker(ctx, f, &res, now, latest) + }) + + // Drain the pool. We're getting errors back. They should all be nil + var mux sync.Mutex + err := make(multiError, 0) + wp.Drain(ctx, func(ctx context.Context, in interface{}) bool { + if in != nil { + mux.Unlock() + defer mux.Unlock() + err = append(err, in.(error)) + return false + } + return true + }) + e.lastColls[resourceType] = now // Use value captured at the beginning to avoid blind spots. + + sw.Stop() + SendInternalCounterWithTags("gather_count", e.URL.Host, internalTags, count) + if len(err) > 0 { + return err + } + return nil +} + +func (e *Endpoint) collectChunk(ctx context.Context, pqs []types.PerfQuerySpec, resourceType string, + res resourceKind, acc telegraf.Accumulator) (int, error) { + count := 0 + prefix := "vsphere" + e.Parent.Separator + resourceType + + client, err := e.clientFactory.GetClient(ctx) + if err != nil { + return 0, err + } + + metricInfo, err := client.Perf.CounterInfoByName(ctx) + if err != nil { + return count, err + } + + metrics, err := client.Perf.Query(ctx, pqs) + if err != nil { + return count, err + } + + ems, err := client.Perf.ToMetricSeries(ctx, metrics) + if err != nil { + return count, err + } + + // Iterate through results + for _, em := range ems { + moid := em.Entity.Reference().Value + instInfo, found := e.instanceInfo[moid] + if !found { + log.Printf("E! [input.vsphere]: MOID %s not found in cache. Skipping! (This should not happen!)", moid) + continue + } + buckets := make(map[string]metricEntry) + for _, v := range em.Value { + name := v.Name + t := map[string]string{ + "vcenter": e.URL.Host, + "source": instInfo.name, + "moid": moid, + } + + // Populate tags + objectRef, ok := res.objects[moid] + if !ok { + log.Printf("E! [input.vsphere]: MOID %s not found in cache. Skipping", moid) + continue + } + e.populateTags(&objectRef, resourceType, &res, t, &v) + + // Now deal with the values + for idx, value := range v.Value { + ts := em.SampleInfo[idx].Timestamp + + // Organize the metrics into a bucket per measurement. + // Data SHOULD be presented to us with the same timestamp for all samples, but in case + // they don't we use the measurement name + timestamp as the key for the bucket. + mn, fn := e.makeMetricIdentifier(prefix, name) + bKey := mn + " " + v.Instance + " " + strconv.FormatInt(ts.UnixNano(), 10) + bucket, found := buckets[bKey] + if !found { + bucket = metricEntry{name: mn, ts: ts, fields: make(map[string]interface{}), tags: t} + buckets[bKey] = bucket + } + if value < 0 { + log.Printf("D! [input.vsphere]: Negative value for %s on %s. Indicates missing samples", name, objectRef.name) + continue + } + + // Percentage values must be scaled down by 100. + info, ok := metricInfo[name] + if !ok { + log.Printf("E! [input.vsphere]: Could not determine unit for %s. Skipping", name) + } + if info.UnitInfo.GetElementDescription().Key == "percent" { + bucket.fields[fn] = float64(value) / 100.0 + } else { + bucket.fields[fn] = value + } + count++ + } + } + // We've iterated through all the metrics and collected buckets for each + // measurement name. Now emit them! + for _, bucket := range buckets { + acc.AddFields(bucket.name, bucket.fields, bucket.tags, bucket.ts) + } + } + return count, nil +} + +func (e *Endpoint) getParent(obj resourceInfo) (resourceInfo, bool) { + p := obj.parentRef + if p == nil { + log.Printf("D! [input.vsphere] No parent found for %s", obj.name) + return resourceInfo{}, false + } + r, ok := e.instanceInfo[p.Value] + return r, ok +} + +func (e *Endpoint) populateTags(objectRef *objectRef, resourceType string, resource *resourceKind, t map[string]string, v *performance.MetricSeries) { + // Map name of object. + if resource.pKey != "" { + t[resource.pKey] = objectRef.name + } + + // Map parent reference + parent, found := e.instanceInfo[objectRef.parentRef.Value] + if found { + t[resource.parentTag] = parent.name + if resourceType == "vm" { + if objectRef.guest != "" { + t["guest"] = objectRef.guest + } + if c, ok := e.getParent(parent); ok { + t["clustername"] = c.name + } + } + } + + // Fill in Datacenter name + if objectRef.dcname != "" { + t["dcname"] = objectRef.dcname + } + + // Determine which point tag to map to the instance + name := v.Name + instance := "instance-total" + if v.Instance != "" { + instance = v.Instance + } + if strings.HasPrefix(name, "cpu.") { + t["cpu"] = instance + } else if strings.HasPrefix(name, "datastore.") { + t["lun"] = instance + } else if strings.HasPrefix(name, "disk.") { + t["disk"] = cleanDiskTag(instance) + } else if strings.HasPrefix(name, "net.") { + t["interface"] = instance + } else if strings.HasPrefix(name, "storageAdapter.") { + t["adapter"] = instance + } else if strings.HasPrefix(name, "storagePath.") { + t["path"] = instance + } else if strings.HasPrefix(name, "sys.resource") { + t["resource"] = instance + } else if strings.HasPrefix(name, "vflashModule.") { + t["module"] = instance + } else if strings.HasPrefix(name, "virtualDisk.") { + t["disk"] = instance + } else if v.Instance != "" { + // default + t["instance"] = v.Instance + } +} + +func (e *Endpoint) makeMetricIdentifier(prefix, metric string) (string, string) { + parts := strings.Split(metric, ".") + if len(parts) == 1 { + return prefix, parts[0] + } + return prefix + e.Parent.Separator + parts[0], strings.Join(parts[1:], e.Parent.Separator) +} + +func cleanGuestID(id string) string { + return strings.TrimSuffix(id, "Guest") +} + +func cleanDiskTag(disk string) string { + // Remove enclosing "<>" + return strings.TrimSuffix(strings.TrimPrefix(disk, "<"), ">") +} diff --git a/plugins/inputs/vsphere/selfhealth.go b/plugins/inputs/vsphere/selfhealth.go new file mode 100644 index 000000000..66069ca75 --- /dev/null +++ b/plugins/inputs/vsphere/selfhealth.go @@ -0,0 +1,53 @@ +package vsphere + +import ( + "time" + + "github.com/influxdata/telegraf/selfstat" +) + +// Stopwatch is a simple helper for recording timing information, +// such as gather times and discovery times. +type Stopwatch struct { + stat selfstat.Stat + start time.Time +} + +// NewStopwatch creates a new StopWatch and starts measuring time +// its creation. +func NewStopwatch(name, vCenter string) *Stopwatch { + return &Stopwatch{ + stat: selfstat.RegisterTiming("vsphere", name+"_ns", map[string]string{"vcenter": vCenter}), + start: time.Now(), + } +} + +// NewStopwatchWithTags creates a new StopWatch and starts measuring time +// its creation. Allows additional tags. +func NewStopwatchWithTags(name, vCenter string, tags map[string]string) *Stopwatch { + tags["vcenter"] = vCenter + return &Stopwatch{ + stat: selfstat.RegisterTiming("vsphere", name+"_ns", tags), + start: time.Now(), + } +} + +// Stop stops a Stopwatch and records the time. +func (s *Stopwatch) Stop() { + s.stat.Set(time.Since(s.start).Nanoseconds()) +} + +// SendInternalCounter is a convenience method for sending +// non-timing internal metrics. +func SendInternalCounter(name, vCenter string, value int64) { + s := selfstat.Register("vsphere", name, map[string]string{"vcenter": vCenter}) + s.Set(value) +} + +// SendInternalCounterWithTags is a convenience method for sending +// non-timing internal metrics. Allows additional tags +func SendInternalCounterWithTags(name, vCenter string, tags map[string]string, value int64) { + tags["vcenter"] = vCenter + s := selfstat.Register("vsphere", name, tags) + s.Set(value) +} diff --git a/plugins/inputs/vsphere/vsphere.go b/plugins/inputs/vsphere/vsphere.go new file mode 100644 index 000000000..26af1e8cc --- /dev/null +++ b/plugins/inputs/vsphere/vsphere.go @@ -0,0 +1,312 @@ +package vsphere + +import ( + "context" + "log" + "sync" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/internal/tls" + "github.com/influxdata/telegraf/plugins/inputs" + "github.com/vmware/govmomi/vim25/soap" +) + +// VSphere is the top level type for the vSphere input plugin. It contains all the configuration +// and a list of connected vSphere endpoints +type VSphere struct { + Vcenters []string + Username string + Password string + DatacenterInstances bool + DatacenterMetricInclude []string + DatacenterMetricExclude []string + ClusterInstances bool + ClusterMetricInclude []string + ClusterMetricExclude []string + HostInstances bool + HostMetricInclude []string + HostMetricExclude []string + VMInstances bool `toml:"vm_instances"` + VMMetricInclude []string `toml:"vm_metric_include"` + VMMetricExclude []string `toml:"vm_metric_exclude"` + DatastoreInstances bool + DatastoreMetricInclude []string + DatastoreMetricExclude []string + Separator string + + MaxQueryObjects int + MaxQueryMetrics int + CollectConcurrency int + DiscoverConcurrency int + ForceDiscoverOnInit bool + ObjectDiscoveryInterval internal.Duration + Timeout internal.Duration + + endpoints []*Endpoint + cancel context.CancelFunc + + // Mix in the TLS/SSL goodness from core + tls.ClientConfig +} + +var sampleConfig = ` + ## List of vCenter URLs to be monitored. These three lines must be uncommented + ## and edited for the plugin to work. + vcenters = [ "https://vcenter.local/sdk" ] + username = "user@corp.local" + password = "secret" + + ## VMs + ## Typical VM metrics (if omitted or empty, all metrics are collected) + vm_metric_include = [ + "cpu.demand.average", + "cpu.idle.summation", + "cpu.latency.average", + "cpu.readiness.average", + "cpu.ready.summation", + "cpu.run.summation", + "cpu.usagemhz.average", + "cpu.used.summation", + "cpu.wait.summation", + "mem.active.average", + "mem.granted.average", + "mem.latency.average", + "mem.swapin.average", + "mem.swapinRate.average", + "mem.swapout.average", + "mem.swapoutRate.average", + "mem.usage.average", + "mem.vmmemctl.average", + "net.bytesRx.average", + "net.bytesTx.average", + "net.droppedRx.summation", + "net.droppedTx.summation", + "net.usage.average", + "power.power.average", + "virtualDisk.numberReadAveraged.average", + "virtualDisk.numberWriteAveraged.average", + "virtualDisk.read.average", + "virtualDisk.readOIO.latest", + "virtualDisk.throughput.usage.average", + "virtualDisk.totalReadLatency.average", + "virtualDisk.totalWriteLatency.average", + "virtualDisk.write.average", + "virtualDisk.writeOIO.latest", + "sys.uptime.latest", + ] + # vm_metric_exclude = [] ## Nothing is excluded by default + # vm_instances = true ## true by default + + ## Hosts + ## Typical host metrics (if omitted or empty, all metrics are collected) + host_metric_include = [ + "cpu.coreUtilization.average", + "cpu.costop.summation", + "cpu.demand.average", + "cpu.idle.summation", + "cpu.latency.average", + "cpu.readiness.average", + "cpu.ready.summation", + "cpu.swapwait.summation", + "cpu.usage.average", + "cpu.usagemhz.average", + "cpu.used.summation", + "cpu.utilization.average", + "cpu.wait.summation", + "disk.deviceReadLatency.average", + "disk.deviceWriteLatency.average", + "disk.kernelReadLatency.average", + "disk.kernelWriteLatency.average", + "disk.numberReadAveraged.average", + "disk.numberWriteAveraged.average", + "disk.read.average", + "disk.totalReadLatency.average", + "disk.totalWriteLatency.average", + "disk.write.average", + "mem.active.average", + "mem.latency.average", + "mem.state.latest", + "mem.swapin.average", + "mem.swapinRate.average", + "mem.swapout.average", + "mem.swapoutRate.average", + "mem.totalCapacity.average", + "mem.usage.average", + "mem.vmmemctl.average", + "net.bytesRx.average", + "net.bytesTx.average", + "net.droppedRx.summation", + "net.droppedTx.summation", + "net.errorsRx.summation", + "net.errorsTx.summation", + "net.usage.average", + "power.power.average", + "storageAdapter.numberReadAveraged.average", + "storageAdapter.numberWriteAveraged.average", + "storageAdapter.read.average", + "storageAdapter.write.average", + "sys.uptime.latest", + ] + # host_metric_exclude = [] ## Nothing excluded by default + # host_instances = true ## true by default + + ## Clusters + # cluster_metric_include = [] ## if omitted or empty, all metrics are collected + # cluster_metric_exclude = [] ## Nothing excluded by default + # cluster_instances = true ## true by default + + ## Datastores + # datastore_metric_include = [] ## if omitted or empty, all metrics are collected + # datastore_metric_exclude = [] ## Nothing excluded by default + # datastore_instances = false ## false by default for Datastores only + + ## Datacenters + datacenter_metric_include = [] ## if omitted or empty, all metrics are collected + datacenter_metric_exclude = [ "*" ] ## Datacenters are not collected by default. + # datacenter_instances = false ## false by default for Datastores only + + ## Plugin Settings + ## separator character to use for measurement and field names (default: "_") + # separator = "_" + + ## number of objects to retreive per query for realtime resources (vms and hosts) + ## set to 64 for vCenter 5.5 and 6.0 (default: 256) + # max_query_objects = 256 + + ## number of metrics to retreive per query for non-realtime resources (clusters and datastores) + ## set to 64 for vCenter 5.5 and 6.0 (default: 256) + # max_query_metrics = 256 + + ## number of go routines to use for collection and discovery of objects and metrics + # collect_concurrency = 1 + # discover_concurrency = 1 + + ## whether or not to force discovery of new objects on initial gather call before collecting metrics + ## when true for large environments this may cause errors for time elapsed while collecting metrics + ## when false (default) the first collection cycle may result in no or limited metrics while objects are discovered + # force_discover_on_init = false + + ## the interval before (re)discovering objects subject to metrics collection (default: 300s) + # object_discovery_interval = "300s" + + ## timeout applies to any of the api request made to vcenter + # timeout = "20s" + + ## Optional SSL Config + # ssl_ca = "/path/to/cafile" + # ssl_cert = "/path/to/certfile" + # ssl_key = "/path/to/keyfile" + ## Use SSL but skip chain & host verification + # insecure_skip_verify = false +` + +// SampleConfig returns a set of default configuration to be used as a boilerplate when setting up +// Telegraf. +func (v *VSphere) SampleConfig() string { + return sampleConfig +} + +// Description returns a short textual description of the plugin +func (v *VSphere) Description() string { + return "Read metrics from VMware vCenter" +} + +// Start is called from telegraf core when a plugin is started and allows it to +// perform initialization tasks. +func (v *VSphere) Start(acc telegraf.Accumulator) error { + log.Println("D! [input.vsphere]: Starting plugin") + ctx, cancel := context.WithCancel(context.Background()) + v.cancel = cancel + + // Create endpoints, one for each vCenter we're monitoring + v.endpoints = make([]*Endpoint, len(v.Vcenters)) + for i, rawURL := range v.Vcenters { + u, err := soap.ParseURL(rawURL) + if err != nil { + return err + } + ep, err := NewEndpoint(ctx, v, u) + if err != nil { + return err + } + v.endpoints[i] = ep + } + return nil +} + +// Stop is called from telegraf core when a plugin is stopped and allows it to +// perform shutdown tasks. +func (v *VSphere) Stop() { + log.Println("D! [input.vsphere]: Stopping plugin") + v.cancel() + + // Wait for all endpoints to finish. No need to wait for + // Gather() to finish here, since it Stop() will only be called + // after the last Gather() has finished. We do, however, need to + // wait for any discovery to complete by trying to grab the + // "busy" mutex. + for _, ep := range v.endpoints { + log.Printf("D! [input.vsphere]: Waiting for endpoint %s to finish", ep.URL.Host) + func() { + ep.busy.Lock() // Wait until discovery is finished + defer ep.busy.Unlock() + ep.Close() + }() + } +} + +// Gather is the main data collection function called by the Telegraf core. It performs all +// the data collection and writes all metrics into the Accumulator passed as an argument. +func (v *VSphere) Gather(acc telegraf.Accumulator) error { + var wg sync.WaitGroup + for _, ep := range v.endpoints { + wg.Add(1) + go func(endpoint *Endpoint) { + defer wg.Done() + err := endpoint.Collect(context.Background(), acc) + if err == context.Canceled { + + // No need to signal errors if we were merely canceled. + err = nil + } + if err != nil { + acc.AddError(err) + } + }(ep) + } + + wg.Wait() + return nil +} + +func init() { + inputs.Add("vsphere", func() telegraf.Input { + return &VSphere{ + Vcenters: []string{}, + + ClusterInstances: true, + ClusterMetricInclude: nil, + ClusterMetricExclude: nil, + HostInstances: true, + HostMetricInclude: nil, + HostMetricExclude: nil, + VMInstances: true, + VMMetricInclude: nil, + VMMetricExclude: nil, + DatastoreInstances: false, + DatastoreMetricInclude: nil, + DatastoreMetricExclude: nil, + Separator: "_", + + MaxQueryObjects: 256, + MaxQueryMetrics: 256, + CollectConcurrency: 1, + DiscoverConcurrency: 1, + ForceDiscoverOnInit: false, + ObjectDiscoveryInterval: internal.Duration{Duration: time.Second * 300}, + Timeout: internal.Duration{Duration: time.Second * 20}, + } + }) +} diff --git a/plugins/inputs/vsphere/vsphere_test.go b/plugins/inputs/vsphere/vsphere_test.go new file mode 100644 index 000000000..20c61d92b --- /dev/null +++ b/plugins/inputs/vsphere/vsphere_test.go @@ -0,0 +1,246 @@ +package vsphere + +import ( + "context" + "crypto/tls" + "fmt" + "regexp" + "sort" + "testing" + "time" + + "github.com/influxdata/telegraf/internal" + itls "github.com/influxdata/telegraf/internal/tls" + "github.com/influxdata/telegraf/testutil" + "github.com/influxdata/toml" + "github.com/stretchr/testify/require" + "github.com/vmware/govmomi/simulator" +) + +var configHeader = ` +# Telegraf Configuration +# +# Telegraf is entirely plugin driven. All metrics are gathered from the +# declared inputs, and sent to the declared outputs. +# +# Plugins must be declared in here to be active. +# To deactivate a plugin, comment out the name and any variables. +# +# Use 'telegraf -config telegraf.conf -test' to see what metrics a config +# file would generate. +# +# Environment variables can be used anywhere in this config file, simply prepend +# them with $. For strings the variable must be within quotes (ie, "$STR_VAR"), +# for numbers and booleans they should be plain (ie, $INT_VAR, $BOOL_VAR) + + +# Global tags can be specified here in key="value" format. +[global_tags] + # dc = "us-east-1" # will tag all metrics with dc=us-east-1 + # rack = "1a" + ## Environment variables can be used as tags, and throughout the config file + # user = "$USER" + + +# Configuration for telegraf agent +[agent] + ## Default data collection interval for all inputs + interval = "10s" + ## Rounds collection interval to 'interval' + ## ie, if interval="10s" then always collect on :00, :10, :20, etc. + round_interval = true + + ## Telegraf will send metrics to outputs in batches of at most + ## metric_batch_size metrics. + ## This controls the size of writes that Telegraf sends to output plugins. + metric_batch_size = 1000 + + ## For failed writes, telegraf will cache metric_buffer_limit metrics for each + ## output, and will flush this buffer on a successful write. Oldest metrics + ## are dropped first when this buffer fills. + ## This buffer only fills when writes fail to output plugin(s). + metric_buffer_limit = 10000 + + ## Collection jitter is used to jitter the collection by a random amount. + ## Each plugin will sleep for a random time within jitter before collecting. + ## This can be used to avoid many plugins querying things like sysfs at the + ## same time, which can have a measurable effect on the system. + collection_jitter = "0s" + + ## Default flushing interval for all outputs. You shouldn't set this below + ## interval. Maximum flush_interval will be flush_interval + flush_jitter + flush_interval = "10s" + ## Jitter the flush interval by a random amount. This is primarily to avoid + ## large write spikes for users running a large number of telegraf instances. + ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s + flush_jitter = "0s" + + ## By default or when set to "0s", precision will be set to the same + ## timestamp order as the collection interval, with the maximum being 1s. + ## ie, when interval = "10s", precision will be "1s" + ## when interval = "250ms", precision will be "1ms" + ## Precision will NOT be used for service inputs. It is up to each individual + ## service input to set the timestamp at the appropriate precision. + ## Valid time units are "ns", "us" (or "µs"), "ms", "s". + precision = "" + + ## Logging configuration: + ## Run telegraf with debug log messages. + debug = false + ## Run telegraf in quiet mode (error log messages only). + quiet = false + ## Specify the log file name. The empty string means to log to stderr. + logfile = "" + + ## Override default hostname, if empty use os.Hostname() + hostname = "" + ## If set to true, do no set the "host" tag in the telegraf agent. + omit_hostname = false +` + +func defaultVSphere() *VSphere { + return &VSphere{ + ClusterMetricInclude: []string{ + "cpu.usage.*", + "cpu.usagemhz.*", + "mem.usage.*", + "mem.active.*"}, + ClusterMetricExclude: nil, + HostMetricInclude: []string{ + "cpu.ready.summation.delta.millisecond", + "cpu.latency.average.rate.percent", + "cpu.coreUtilization.average.rate.percent", + "mem.usage.average.absolute.percent", + "mem.swapinRate.average.rate.kiloBytesPerSecond", + "mem.state.latest.absolute.number", + "mem.latency.average.absolute.percent", + "mem.vmmemctl.average.absolute.kiloBytes", + "disk.read.average.rate.kiloBytesPerSecond", + "disk.write.average.rate.kiloBytesPerSecond", + "disk.numberReadAveraged.average.rate.number", + "disk.numberWriteAveraged.average.rate.number", + "disk.deviceReadLatency.average.absolute.millisecond", + "disk.deviceWriteLatency.average.absolute.millisecond", + "disk.totalReadLatency.average.absolute.millisecond", + "disk.totalWriteLatency.average.absolute.millisecond", + "storageAdapter.read.average.rate.kiloBytesPerSecond", + "storageAdapter.write.average.rate.kiloBytesPerSecond", + "storageAdapter.numberReadAveraged.average.rate.number", + "storageAdapter.numberWriteAveraged.average.rate.number", + "net.errorsRx.summation.delta.number", + "net.errorsTx.summation.delta.number", + "net.bytesRx.average.rate.kiloBytesPerSecond", + "net.bytesTx.average.rate.kiloBytesPerSecond", + "cpu.used.summation.delta.millisecond", + "cpu.usage.average.rate.percent", + "cpu.utilization.average.rate.percent", + "cpu.wait.summation.delta.millisecond", + "cpu.idle.summation.delta.millisecond", + "cpu.readiness.average.rate.percent", + "cpu.costop.summation.delta.millisecond", + "cpu.swapwait.summation.delta.millisecond", + "mem.swapoutRate.average.rate.kiloBytesPerSecond", + "disk.kernelReadLatency.average.absolute.millisecond", + "disk.kernelWriteLatency.average.absolute.millisecond"}, + HostMetricExclude: nil, + VMMetricInclude: []string{ + "cpu.ready.summation.delta.millisecond", + "mem.swapinRate.average.rate.kiloBytesPerSecond", + "virtualDisk.numberReadAveraged.average.rate.number", + "virtualDisk.numberWriteAveraged.average.rate.number", + "virtualDisk.totalReadLatency.average.absolute.millisecond", + "virtualDisk.totalWriteLatency.average.absolute.millisecond", + "virtualDisk.readOIO.latest.absolute.number", + "virtualDisk.writeOIO.latest.absolute.number", + "net.bytesRx.average.rate.kiloBytesPerSecond", + "net.bytesTx.average.rate.kiloBytesPerSecond", + "net.droppedRx.summation.delta.number", + "net.droppedTx.summation.delta.number", + "cpu.run.summation.delta.millisecond", + "cpu.used.summation.delta.millisecond", + "mem.swapoutRate.average.rate.kiloBytesPerSecond", + "virtualDisk.read.average.rate.kiloBytesPerSecond", + "virtualDisk.write.average.rate.kiloBytesPerSecond"}, + VMMetricExclude: nil, + DatastoreMetricInclude: []string{ + "disk.used.*", + "disk.provsioned.*"}, + DatastoreMetricExclude: nil, + ClientConfig: itls.ClientConfig{InsecureSkipVerify: true}, + + MaxQueryObjects: 256, + ObjectDiscoveryInterval: internal.Duration{Duration: time.Second * 300}, + Timeout: internal.Duration{Duration: time.Second * 20}, + ForceDiscoverOnInit: true, + } +} + +func createSim() (*simulator.Model, *simulator.Server, error) { + model := simulator.VPX() + + err := model.Create() + if err != nil { + return nil, nil, err + } + + model.Service.TLS = new(tls.Config) + + s := model.Service.NewServer() + //fmt.Printf("Server created at: %s\n", s.URL) + + return model, s, nil +} + +func TestParseConfig(t *testing.T) { + v := VSphere{} + c := v.SampleConfig() + p := regexp.MustCompile("\n#") + fmt.Printf("Source=%s", p.ReplaceAllLiteralString(c, "\n")) + c = configHeader + "\n[[inputs.vsphere]]\n" + p.ReplaceAllLiteralString(c, "\n") + fmt.Printf("Source=%s", c) + tab, err := toml.Parse([]byte(c)) + require.NoError(t, err) + require.NotNil(t, tab) +} + +func TestWorkerPool(t *testing.T) { + wp := NewWorkerPool(100) + ctx := context.Background() + wp.Run(ctx, func(ctx context.Context, p interface{}) interface{} { + return p.(int) * 2 + }, 10) + + n := 100000 + wp.Fill(ctx, func(ctx context.Context, f PushFunc) { + for i := 0; i < n; i++ { + f(ctx, i) + } + }) + results := make([]int, n) + i := 0 + wp.Drain(ctx, func(ctx context.Context, p interface{}) bool { + results[i] = p.(int) + i++ + return true + }) + sort.Ints(results) + for i := 0; i < n; i++ { + require.Equal(t, results[i], i*2) + } +} + +func TestAll(t *testing.T) { + m, s, err := createSim() + if err != nil { + t.Fatal(err) + } + defer m.Remove() + defer s.Close() + + var acc testutil.Accumulator + v := defaultVSphere() + v.Vcenters = []string{s.URL.String()} + v.Start(nil) // We're not using the Accumulator, so it can be nil. + defer v.Stop() + require.NoError(t, v.Gather(&acc)) +} diff --git a/plugins/inputs/vsphere/workerpool.go b/plugins/inputs/vsphere/workerpool.go new file mode 100644 index 000000000..6695735ce --- /dev/null +++ b/plugins/inputs/vsphere/workerpool.go @@ -0,0 +1,119 @@ +package vsphere + +import ( + "context" + "log" + "sync" +) + +// WorkerFunc is a function that is supposed to do the actual work +// of the WorkerPool. It is similar to the "map" portion of the +// map/reduce semantics, in that it takes a single value as an input, +// does some processing and returns a single result. +type WorkerFunc func(context.Context, interface{}) interface{} + +// PushFunc is called from a FillerFunc to push a workitem onto +// the input channel. Wraps some logic for gracefulk shutdowns. +type PushFunc func(context.Context, interface{}) bool + +// DrainerFunc represents a function used to "drain" the WorkerPool, +// i.e. pull out all the results generated by the workers and processing +// them. The DrainerFunc is called once per result produced. +// If the function returns false, the draining of the pool is aborted. +type DrainerFunc func(context.Context, interface{}) bool + +// FillerFunc represents a function for filling the WorkerPool with jobs. +// It is called once and is responsible for pushing jobs onto the supplied channel. +type FillerFunc func(context.Context, PushFunc) + +// WorkerPool implements a simple work pooling mechanism. It runs a predefined +// number of goroutines to process jobs. Jobs are inserted using the Fill call +// and results are retrieved through the Drain function. +type WorkerPool struct { + wg sync.WaitGroup + In chan interface{} + Out chan interface{} +} + +// NewWorkerPool creates a worker pool +func NewWorkerPool(bufsize int) *WorkerPool { + return &WorkerPool{ + In: make(chan interface{}, bufsize), + Out: make(chan interface{}, bufsize), + } +} + +func (w *WorkerPool) push(ctx context.Context, job interface{}) bool { + select { + case w.In <- job: + return true + case <-ctx.Done(): + return false + } +} + +func (w *WorkerPool) pushOut(ctx context.Context, result interface{}) bool { + select { + case w.Out <- result: + return true + case <-ctx.Done(): + return false + } +} + +// Run takes a WorkerFunc and runs it in 'n' goroutines. +func (w *WorkerPool) Run(ctx context.Context, f WorkerFunc, n int) bool { + w.wg.Add(1) + go func() { + defer w.wg.Done() + var localWg sync.WaitGroup + localWg.Add(n) + for i := 0; i < n; i++ { + go func() { + defer localWg.Done() + for { + select { + case job, ok := <-w.In: + if !ok { + return + } + w.pushOut(ctx, f(ctx, job)) + case <-ctx.Done(): + log.Printf("D! [input.vsphere]: Stop requested for worker pool. Exiting.") + return + } + } + }() + } + localWg.Wait() + close(w.Out) + }() + return ctx.Err() == nil +} + +// Fill runs a FillerFunc responsible for supplying work to the pool. You may only +// call Fill once. Calling it twice will panic. +func (w *WorkerPool) Fill(ctx context.Context, f FillerFunc) bool { + w.wg.Add(1) + go func() { + defer w.wg.Done() + f(ctx, w.push) + close(w.In) + }() + return true +} + +// Drain runs a DrainerFunc for each result generated by the workers. +func (w *WorkerPool) Drain(ctx context.Context, f DrainerFunc) bool { + w.wg.Add(1) + go func() { + defer w.wg.Done() + for result := range w.Out { + if !f(ctx, result) { + break + } + } + }() + w.wg.Wait() + return ctx.Err() != nil +} From 51bb937fdd909a4a750f97a8c3983983b3543ac7 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 11 Sep 2018 14:55:18 -0700 Subject: [PATCH 0160/1815] Update changelog and readme --- CHANGELOG.md | 4 +++- README.md | 1 + 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index cf05e9311..49ff3c700 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,7 @@ - [pgbouncer](./plugins/inputs/pgbouncer/README.md) - Contributed by @nerzhul - [temp](./plugins/inputs/temp/README.md) - Contributed by @pytimer - [tengine](./plugins/inputs/tengine/README.md) - Contributed by @ertaoxu +- [vsphere](./plugins/inputs/vsphere/README.md) - Contributed by @prydin - [x509_cert](./plugins/inputs/x509_cert/README.md) - Contributed by @jtyr ### New Processors @@ -99,7 +100,8 @@ - [#4411](https://github.com/influxdata/telegraf/pull/4411): Add temp input plugin. - [#4272](https://github.com/influxdata/telegraf/pull/4272): Add Beanstalkd input plugin. - [#4669](https://github.com/influxdata/telegraf/pull/4669): Add means to specify server password for redis input. -- [#4339](https://github.com/influxdata/telegraf/pull/4339): Add Splunk Metrics serializer +- [#4339](https://github.com/influxdata/telegraf/pull/4339): Add Splunk Metrics serializer. +- [#4141](https://github.com/influxdata/telegraf/pull/4141): Add input plugin for VMware vSphere. ### Bugfixes diff --git a/README.md b/README.md index 7ac997ff6..c93d9ea77 100644 --- a/README.md +++ b/README.md @@ -252,6 +252,7 @@ configuration options. * [udp_listener](./plugins/inputs/socket_listener) * [unbound](./plugins/inputs/unbound) * [varnish](./plugins/inputs/varnish) +* [vsphere](./plugins/inputs/vsphere) VMware vSphere * [webhooks](./plugins/inputs/webhooks) * [filestack](./plugins/inputs/webhooks/filestack) * [github](./plugins/inputs/webhooks/github) From 03a119e322e260568ec86dcd420f9c285756e4bd Mon Sep 17 00:00:00 2001 From: Jon McKenzie Date: Tue, 11 Sep 2018 17:59:39 -0400 Subject: [PATCH 0161/1815] Align metrics window to interval in cloudwatch input (#4667) --- plugins/inputs/cloudwatch/cloudwatch.go | 36 ++++++++++++++----- plugins/inputs/cloudwatch/cloudwatch_test.go | 37 +++++++++++++++++++- 2 files changed, 64 insertions(+), 9 deletions(-) diff --git a/plugins/inputs/cloudwatch/cloudwatch.go b/plugins/inputs/cloudwatch/cloudwatch.go index 9ba15b6ac..626511e2f 100644 --- a/plugins/inputs/cloudwatch/cloudwatch.go +++ b/plugins/inputs/cloudwatch/cloudwatch.go @@ -36,6 +36,8 @@ type ( RateLimit int `toml:"ratelimit"` client cloudwatchClient metricCache *MetricCache + windowStart time.Time + windowEnd time.Time } Metric struct { @@ -197,6 +199,11 @@ func (c *CloudWatch) Gather(acc telegraf.Accumulator) error { now := time.Now() + err = c.updateWindow(now) + if err != nil { + return err + } + // limit concurrency or we can easily exhaust user connection limit // see cloudwatch API request limits: // http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_limits.html @@ -208,7 +215,7 @@ func (c *CloudWatch) Gather(acc telegraf.Accumulator) error { <-lmtr.C go func(inm *cloudwatch.Metric) { defer wg.Done() - acc.AddError(c.gatherMetric(acc, inm, now)) + acc.AddError(c.gatherMetric(acc, inm)) }(m) } wg.Wait() @@ -216,6 +223,22 @@ func (c *CloudWatch) Gather(acc telegraf.Accumulator) error { return nil } +func (c *CloudWatch) updateWindow(relativeTo time.Time) error { + windowEnd := relativeTo.Add(-c.Delay.Duration) + + if c.windowEnd.IsZero() { + // this is the first run, no window info, so just get a single period + c.windowStart = windowEnd.Add(-c.Period.Duration) + } else { + // subsequent window, start where last window left off + c.windowStart = c.windowEnd + } + + c.windowEnd = windowEnd + + return nil +} + func init() { inputs.Add("cloudwatch", func() telegraf.Input { ttl, _ := time.ParseDuration("1hr") @@ -291,9 +314,8 @@ func (c *CloudWatch) fetchNamespaceMetrics() ([]*cloudwatch.Metric, error) { func (c *CloudWatch) gatherMetric( acc telegraf.Accumulator, metric *cloudwatch.Metric, - now time.Time, ) error { - params := c.getStatisticsInput(metric, now) + params := c.getStatisticsInput(metric) resp, err := c.client.GetMetricStatistics(params) if err != nil { return err @@ -356,12 +378,10 @@ func snakeCase(s string) string { /* * Map Metric to *cloudwatch.GetMetricStatisticsInput for given timeframe */ -func (c *CloudWatch) getStatisticsInput(metric *cloudwatch.Metric, now time.Time) *cloudwatch.GetMetricStatisticsInput { - end := now.Add(-c.Delay.Duration) - +func (c *CloudWatch) getStatisticsInput(metric *cloudwatch.Metric) *cloudwatch.GetMetricStatisticsInput { input := &cloudwatch.GetMetricStatisticsInput{ - StartTime: aws.Time(end.Add(-c.Period.Duration)), - EndTime: aws.Time(end), + StartTime: aws.Time(c.windowStart), + EndTime: aws.Time(c.windowEnd), MetricName: metric.MetricName, Namespace: metric.Namespace, Period: aws.Int64(int64(c.Period.Duration.Seconds())), diff --git a/plugins/inputs/cloudwatch/cloudwatch_test.go b/plugins/inputs/cloudwatch/cloudwatch_test.go index c52b3a353..57c92b3f6 100644 --- a/plugins/inputs/cloudwatch/cloudwatch_test.go +++ b/plugins/inputs/cloudwatch/cloudwatch_test.go @@ -197,7 +197,9 @@ func TestGenerateStatisticsInputParams(t *testing.T) { now := time.Now() - params := c.getStatisticsInput(m, now) + c.updateWindow(now) + + params := c.getStatisticsInput(m) assert.EqualValues(t, *params.EndTime, now.Add(-c.Delay.Duration)) assert.EqualValues(t, *params.StartTime, now.Add(-c.Period.Duration).Add(-c.Delay.Duration)) @@ -217,3 +219,36 @@ func TestMetricsCacheTimeout(t *testing.T) { cache.Fetched = time.Now().Add(-time.Minute) assert.False(t, cache.IsValid()) } + +func TestUpdateWindow(t *testing.T) { + duration, _ := time.ParseDuration("1m") + internalDuration := internal.Duration{ + Duration: duration, + } + + c := &CloudWatch{ + Namespace: "AWS/ELB", + Delay: internalDuration, + Period: internalDuration, + } + + now := time.Now() + + assert.True(t, c.windowEnd.IsZero()) + assert.True(t, c.windowStart.IsZero()) + + c.updateWindow(now) + + newStartTime := c.windowEnd + + // initial window just has a single period + assert.EqualValues(t, c.windowEnd, now.Add(-c.Delay.Duration)) + assert.EqualValues(t, c.windowStart, now.Add(-c.Delay.Duration).Add(-c.Period.Duration)) + + now = time.Now() + c.updateWindow(now) + + // subsequent window uses previous end time as start time + assert.EqualValues(t, c.windowEnd, now.Add(-c.Delay.Duration)) + assert.EqualValues(t, c.windowStart, newStartTime) +} From d4a74337faff1c441edbe9c41ac70d9526a2c45b Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 11 Sep 2018 15:00:21 -0700 Subject: [PATCH 0162/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 49ff3c700..91374d12d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -102,6 +102,7 @@ - [#4669](https://github.com/influxdata/telegraf/pull/4669): Add means to specify server password for redis input. - [#4339](https://github.com/influxdata/telegraf/pull/4339): Add Splunk Metrics serializer. - [#4141](https://github.com/influxdata/telegraf/pull/4141): Add input plugin for VMware vSphere. +- [#4667](https://github.com/influxdata/telegraf/pull/4667): Align metrics window to interval in cloudwatch input. ### Bugfixes From 9d72d078a37da0234fc6b41a24624c0806e7f0a3 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 11 Sep 2018 15:15:45 -0700 Subject: [PATCH 0163/1815] Add new fields to mem input readme --- plugins/inputs/mem/README.md | 49 ++++++++++++++++++++++++++---------- 1 file changed, 36 insertions(+), 13 deletions(-) diff --git a/plugins/inputs/mem/README.md b/plugins/inputs/mem/README.md index 8a9ff823c..842546825 100644 --- a/plugins/inputs/mem/README.md +++ b/plugins/inputs/mem/README.md @@ -14,22 +14,45 @@ For a more complete explanation of the difference between *used* and ### Metrics: +Available fields are dependent on platform. + - mem - fields: - - active (int) - - available (int) - - buffered (int) - - cached (int) - - free (int) - - inactive (int) - - slab (int) - - total (int) - - used (int) - - available_percent (float) - - used_percent (float) - - wired (int) + - active (integer) + - available (integer) + - buffered (integer) + - cached (integer) + - free (integer) + - inactive (integer) + - slab (integer) + - total (integer) + - used (integer) + - available_percent (float) + - used_percent (float) + - wired (integer) + - commit_limit (integer) + - committed_as (integer) + - dirty (integer) + - high_free (integer) + - high_total (integer) + - huge_page_size (integer) + - huge_pages_free (integer) + - huge_pages_total (integer) + - low_free (integer) + - low_total (integer) + - mapped (integer) + - page_tables (integer) + - shared (integer) + - swap_cached (integer) + - swap_free (integer) + - swap_total (integer) + - vmalloc_chunk (integer) + - vmalloc_total (integer) + - vmalloc_used (integer) + - write_back (integer) + - write_back_tmp (integer) ### Example Output: ``` -mem cached=7809495040i,inactive=6348988416i,total=20855394304i,available=11378946048i,buffered=927199232i,active=11292905472i,slab=1351340032i,used_percent=45.43883523785713,available_percent=54.56116476214287,used=9476448256i,free=1715331072i 1511894782000000000 +mem active=11347566592i,available=18705133568i,available_percent=89.4288960571006,buffered=1976709120i,cached=13975572480i,commit_limit=14753067008i,committed_as=2872422400i,dirty=87461888i,free=1352400896i,high_free=0i,high_total=0i,huge_page_size=2097152i,huge_pages_free=0i,huge_pages_total=0i,inactive=6201593856i,low_free=0i,low_total=0i,mapped=310427648i,page_tables=14397440i,shared=200781824i,slab=1937526784i,swap_cached=0i,swap_free=4294963200i,swap_total=4294963200i,total=20916207616i,used=3611525120i,used_percent=17.26663449848977,vmalloc_chunk=0i,vmalloc_total=35184372087808i,vmalloc_used=0i,wired=0i,write_back=0i,write_back_tmp=0i 1536704085000000000 ``` From eff7f0f0830f4ac19b891ecaf90463c2ebb211fd Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 11 Sep 2018 16:03:47 -0700 Subject: [PATCH 0164/1815] Use operation subtables in enum and rename processors (#4672) --- plugins/processors/enum/README.md | 20 ++++--- plugins/processors/enum/enum.go | 33 ++++++------ plugins/processors/enum/enum_test.go | 14 ++--- plugins/processors/rename/README.md | 35 ++++++------ plugins/processors/rename/rename.go | 69 ++++++++++-------------- plugins/processors/rename/rename_test.go | 23 ++++---- 6 files changed, 93 insertions(+), 101 deletions(-) diff --git a/plugins/processors/enum/README.md b/plugins/processors/enum/README.md index f0ed58566..291c25c8f 100644 --- a/plugins/processors/enum/README.md +++ b/plugins/processors/enum/README.md @@ -13,13 +13,13 @@ source field is overwritten. ```toml [[processors.enum]] - [[processors.enum.fields]] + [[processors.enum.mapping]] ## Name of the field to map - source = "name" + field = "status" ## Destination field to be used for the mapped value. By default the source ## field is used, overwriting the original value. - # destination = "mapped" + # dest = "status_code" ## Default value to be used for all values not contained in the mapping ## table. When unset, the unmodified value for the field will be used if no @@ -27,7 +27,15 @@ source field is overwritten. # default = 0 ## Table of mappings - [processors.enum.fields.value_mappings] - value1 = 1 - value2 = 2 + [processors.enum.mapping.value_mappings] + green = 1 + amber = 2 + red = 3 +``` + +### Example: + +```diff +- xyzzy status="green" 1502489900000000000 ++ xyzzy status="green",status_code=1i 1502489900000000000 ``` diff --git a/plugins/processors/enum/enum.go b/plugins/processors/enum/enum.go index 134a02bb1..b08307f09 100644 --- a/plugins/processors/enum/enum.go +++ b/plugins/processors/enum/enum.go @@ -8,13 +8,13 @@ import ( ) var sampleConfig = ` - [[processors.enum.fields]] + [[processors.enum.mapping]] ## Name of the field to map - source = "name" + field = "status" ## Destination field to be used for the mapped value. By default the source ## field is used, overwriting the original value. - # destination = "mapped" + # dest = "status_code" ## Default value to be used for all values not contained in the mapping ## table. When unset, the unmodified value for the field will be used if no @@ -22,18 +22,19 @@ var sampleConfig = ` # default = 0 ## Table of mappings - [processors.enum.fields.value_mappings] - value1 = 1 - value2 = 2 + [processors.enum.mapping.value_mappings] + green = 1 + yellow = 2 + red = 3 ` type EnumMapper struct { - Fields []Mapping + Mappings []Mapping `toml:"mapping"` } type Mapping struct { - Source string - Destination string + Field string + Dest string Default interface{} ValueMappings map[string]interface{} } @@ -54,8 +55,8 @@ func (mapper *EnumMapper) Apply(in ...telegraf.Metric) []telegraf.Metric { } func (mapper *EnumMapper) applyMappings(metric telegraf.Metric) telegraf.Metric { - for _, mapping := range mapper.Fields { - if originalValue, isPresent := metric.GetField(mapping.Source); isPresent == true { + for _, mapping := range mapper.Mappings { + if originalValue, isPresent := metric.GetField(mapping.Field); isPresent == true { if adjustedValue, isString := adjustBoolValue(originalValue).(string); isString == true { if mappedValue, isMappedValuePresent := mapping.mapValue(adjustedValue); isMappedValuePresent == true { writeField(metric, mapping.getDestination(), mappedValue) @@ -84,16 +85,14 @@ func (mapping *Mapping) mapValue(original string) (interface{}, bool) { } func (mapping *Mapping) getDestination() string { - if mapping.Destination != "" { - return mapping.Destination + if mapping.Dest != "" { + return mapping.Dest } - return mapping.Source + return mapping.Field } func writeField(metric telegraf.Metric, name string, value interface{}) { - if metric.HasField(name) { - metric.RemoveField(name) - } + metric.RemoveField(name) metric.AddField(name, value) } diff --git a/plugins/processors/enum/enum_test.go b/plugins/processors/enum/enum_test.go index 2185b91b6..d8c0e26de 100644 --- a/plugins/processors/enum/enum_test.go +++ b/plugins/processors/enum/enum_test.go @@ -49,7 +49,7 @@ func TestRetainsMetric(t *testing.T) { } func TestMapsSingleStringValue(t *testing.T) { - mapper := EnumMapper{Fields: []Mapping{{Source: "string_value", ValueMappings: map[string]interface{}{"test": int64(1)}}}} + mapper := EnumMapper{Mappings: []Mapping{{Field: "string_value", ValueMappings: map[string]interface{}{"test": int64(1)}}}} fields := calculateProcessedValues(mapper, createTestMetric()) @@ -57,7 +57,7 @@ func TestMapsSingleStringValue(t *testing.T) { } func TestNoFailureOnMappingsOnNonStringValuedFields(t *testing.T) { - mapper := EnumMapper{Fields: []Mapping{{Source: "int_value", ValueMappings: map[string]interface{}{"13i": int64(7)}}}} + mapper := EnumMapper{Mappings: []Mapping{{Field: "int_value", ValueMappings: map[string]interface{}{"13i": int64(7)}}}} fields := calculateProcessedValues(mapper, createTestMetric()) @@ -65,7 +65,7 @@ func TestNoFailureOnMappingsOnNonStringValuedFields(t *testing.T) { } func TestMapSingleBoolValue(t *testing.T) { - mapper := EnumMapper{Fields: []Mapping{{Source: "true_value", ValueMappings: map[string]interface{}{"true": int64(1)}}}} + mapper := EnumMapper{Mappings: []Mapping{{Field: "true_value", ValueMappings: map[string]interface{}{"true": int64(1)}}}} fields := calculateProcessedValues(mapper, createTestMetric()) @@ -73,7 +73,7 @@ func TestMapSingleBoolValue(t *testing.T) { } func TestMapsToDefaultValueOnUnknownSourceValue(t *testing.T) { - mapper := EnumMapper{Fields: []Mapping{{Source: "string_value", Default: int64(42), ValueMappings: map[string]interface{}{"other": int64(1)}}}} + mapper := EnumMapper{Mappings: []Mapping{{Field: "string_value", Default: int64(42), ValueMappings: map[string]interface{}{"other": int64(1)}}}} fields := calculateProcessedValues(mapper, createTestMetric()) @@ -81,7 +81,7 @@ func TestMapsToDefaultValueOnUnknownSourceValue(t *testing.T) { } func TestDoNotMapToDefaultValueKnownSourceValue(t *testing.T) { - mapper := EnumMapper{Fields: []Mapping{{Source: "string_value", Default: int64(42), ValueMappings: map[string]interface{}{"test": int64(1)}}}} + mapper := EnumMapper{Mappings: []Mapping{{Field: "string_value", Default: int64(42), ValueMappings: map[string]interface{}{"test": int64(1)}}}} fields := calculateProcessedValues(mapper, createTestMetric()) @@ -89,7 +89,7 @@ func TestDoNotMapToDefaultValueKnownSourceValue(t *testing.T) { } func TestNoMappingWithoutDefaultOrDefinedMappingValue(t *testing.T) { - mapper := EnumMapper{Fields: []Mapping{{Source: "string_value", ValueMappings: map[string]interface{}{"other": int64(1)}}}} + mapper := EnumMapper{Mappings: []Mapping{{Field: "string_value", ValueMappings: map[string]interface{}{"other": int64(1)}}}} fields := calculateProcessedValues(mapper, createTestMetric()) @@ -97,7 +97,7 @@ func TestNoMappingWithoutDefaultOrDefinedMappingValue(t *testing.T) { } func TestWritesToDestination(t *testing.T) { - mapper := EnumMapper{Fields: []Mapping{{Source: "string_value", Destination: "string_code", ValueMappings: map[string]interface{}{"test": int64(1)}}}} + mapper := EnumMapper{Mappings: []Mapping{{Field: "string_value", Dest: "string_code", ValueMappings: map[string]interface{}{"test": int64(1)}}}} fields := calculateProcessedValues(mapper, createTestMetric()) diff --git a/plugins/processors/rename/README.md b/plugins/processors/rename/README.md index dbd31490e..cc3c61a94 100644 --- a/plugins/processors/rename/README.md +++ b/plugins/processors/rename/README.md @@ -5,28 +5,23 @@ The `rename` processor renames measurements, fields, and tags. ### Configuration: ```toml -## Measurement, tag, and field renamings are stored in separate sub-tables. -## Specify one sub-table per rename operation. [[processors.rename]] -[[processors.rename.measurement]] - ## measurement to change - from = "network_interface_throughput" - to = "throughput" + ## Specify one sub-table per rename operation. + [[processors.rename.replace]] + measurement = "network_interface_throughput" + dest = "throughput" -[[processors.rename.tag]] - ## tag to change - from = "hostname" - to = "host" + [[processors.rename.replace]] + tag = "hostname" + dest = "host" -[[processors.rename.field]] - ## field to change - from = "lower" - to = "min" + [[processors.rename.replace]] + field = "lower" + dest = "min" -[[processors.rename.field]] - ## field to change - from = "upper" - to = "max" + [[processors.rename.replace]] + field = "upper" + dest = "max" ``` ### Tags: @@ -36,6 +31,6 @@ No tags are applied by this processor, though it can alter them by renaming. ### Example processing: ```diff -- network_interface_throughput,hostname=backend.example.com,units=kbps lower=10i,upper=1000i,mean=500i 1502489900000000000 -+ throughput,host=backend.example.com,units=kbps min=10i,max=1000i,mean=500i 1502489900000000000 +- network_interface_throughput,hostname=backend.example.com lower=10i,upper=1000i,mean=500i 1502489900000000000 ++ throughput,host=backend.example.com min=10i,max=1000i,mean=500i 1502489900000000000 ``` diff --git a/plugins/processors/rename/rename.go b/plugins/processors/rename/rename.go index 2da787a35..acb6d2ccc 100644 --- a/plugins/processors/rename/rename.go +++ b/plugins/processors/rename/rename.go @@ -6,38 +6,17 @@ import ( ) const sampleConfig = ` - ## Measurement, tag, and field renamings are stored in separate sub-tables. - ## Specify one sub-table per rename operation. - # [[processors.rename.measurement]] - # ## measurement to change - # from = "kilobytes_per_second" - # to = "kbps" - - # [[processors.rename.tag]] - # ## tag to change - # from = "host" - # to = "hostname" - - # [[processors.rename.field]] - # ## field to change - # from = "lower" - # to = "min" - - # [[processors.rename.field]] - # ## field to change - # from = "upper" - # to = "max" ` -type renamer struct { - From string - To string +type Replace struct { + Measurement string `toml:"measurement"` + Tag string `toml:"tag"` + Field string `toml:"field"` + Dest string `toml:"dest"` } type Rename struct { - Measurement []renamer - Tag []renamer - Field []renamer + Replaces []Replace `toml:"replace"` } func (r *Rename) SampleConfig() string { @@ -50,24 +29,32 @@ func (r *Rename) Description() string { func (r *Rename) Apply(in ...telegraf.Metric) []telegraf.Metric { for _, point := range in { - for _, measurementRenamer := range r.Measurement { - if point.Name() == measurementRenamer.From { - point.SetName(measurementRenamer.To) - break + for _, replace := range r.Replaces { + if replace.Dest == "" { + continue } - } - for _, tagRenamer := range r.Tag { - if value, ok := point.GetTag(tagRenamer.From); ok { - point.RemoveTag(tagRenamer.From) - point.AddTag(tagRenamer.To, value) + if replace.Measurement != "" { + if value := point.Name(); value == replace.Measurement { + point.SetName(replace.Dest) + } + continue } - } - for _, fieldRenamer := range r.Field { - if value, ok := point.GetField(fieldRenamer.From); ok { - point.RemoveField(fieldRenamer.From) - point.AddField(fieldRenamer.To, value) + if replace.Tag != "" { + if value, ok := point.GetTag(replace.Tag); ok { + point.RemoveTag(replace.Tag) + point.AddTag(replace.Dest, value) + } + continue + } + + if replace.Field != "" { + if value, ok := point.GetField(replace.Field); ok { + point.RemoveField(replace.Field) + point.AddField(replace.Dest, value) + } + continue } } } diff --git a/plugins/processors/rename/rename_test.go b/plugins/processors/rename/rename_test.go index 43f7fcc30..1f8e0b7db 100644 --- a/plugins/processors/rename/rename_test.go +++ b/plugins/processors/rename/rename_test.go @@ -21,10 +21,11 @@ func newMetric(name string, tags map[string]string, fields map[string]interface{ } func TestMeasurementRename(t *testing.T) { - r := Rename{} - r.Measurement = []renamer{ - {From: "foo", To: "bar"}, - {From: "baz", To: "quux"}, + r := Rename{ + Replaces: []Replace{ + {Measurement: "foo", Dest: "bar"}, + {Measurement: "baz", Dest: "quux"}, + }, } m1 := newMetric("foo", nil, nil) m2 := newMetric("bar", nil, nil) @@ -36,9 +37,10 @@ func TestMeasurementRename(t *testing.T) { } func TestTagRename(t *testing.T) { - r := Rename{} - r.Tag = []renamer{ - {From: "hostname", To: "host"}, + r := Rename{ + Replaces: []Replace{ + {Tag: "hostname", Dest: "host"}, + }, } m := newMetric("foo", map[string]string{"hostname": "localhost", "region": "east-1"}, nil) results := r.Apply(m) @@ -47,9 +49,10 @@ func TestTagRename(t *testing.T) { } func TestFieldRename(t *testing.T) { - r := Rename{} - r.Field = []renamer{ - {From: "time_msec", To: "time"}, + r := Rename{ + Replaces: []Replace{ + {Field: "time_msec", Dest: "time"}, + }, } m := newMetric("foo", nil, map[string]interface{}{"time_msec": int64(1250), "snakes": true}) results := r.Apply(m) From 4c571d2cfadabba92e0d330338031b35a8962c9b Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 11 Sep 2018 16:04:16 -0700 Subject: [PATCH 0165/1815] Log access denied opening a service at debug level (#4674) --- plugins/inputs/win_services/README.md | 19 +- plugins/inputs/win_services/win_services.go | 164 ++++++++++-------- .../win_services_integration_test.go | 98 ++--------- 3 files changed, 125 insertions(+), 156 deletions(-) diff --git a/plugins/inputs/win_services/README.md b/plugins/inputs/win_services/README.md index 4aa9e6b86..eef641718 100644 --- a/plugins/inputs/win_services/README.md +++ b/plugins/inputs/win_services/README.md @@ -1,7 +1,9 @@ -# Telegraf Plugin: win_services -Input plugin to report Windows services info. +# Windows Services Input Plugin + +Reports information about Windows service status. + +Monitoring some services may require running Telegraf with administrator privileges. -It requires that Telegraf must be running under the administrator privileges. ### Configuration: ```toml @@ -25,7 +27,7 @@ The `state` field can have the following values: - 3 - stop pending - 4 - running - 5 - continue pending -- 6 - pause pending +- 6 - pause pending - 7 - paused The `startup_mode` field can have the following values: @@ -33,7 +35,7 @@ The `startup_mode` field can have the following values: - 1 - system start - 2 - auto start - 3 - demand start -- 4 - disabled +- 4 - disabled ### Tags: @@ -43,14 +45,13 @@ The `startup_mode` field can have the following values: ### Example Output: ``` -* Plugin: inputs.win_services, Collection 1 -> win_services,host=WIN2008R2H401,display_name=Server,service_name=LanmanServer state=4i,startup_mode=2i 1500040669000000000 -> win_services,display_name=Remote\ Desktop\ Services,service_name=TermService,host=WIN2008R2H401 state=1i,startup_mode=3i 1500040669000000000 +win_services,host=WIN2008R2H401,display_name=Server,service_name=LanmanServer state=4i,startup_mode=2i 1500040669000000000 +win_services,display_name=Remote\ Desktop\ Services,service_name=TermService,host=WIN2008R2H401 state=1i,startup_mode=3i 1500040669000000000 ``` ### TICK Scripts A sample TICK script for a notification about a not running service. -It sends a notification whenever any service changes its state to be not _running_ and when it changes that state back to _running_. +It sends a notification whenever any service changes its state to be not _running_ and when it changes that state back to _running_. The notification is sent via an HTTP POST call. ``` diff --git a/plugins/inputs/win_services/win_services.go b/plugins/inputs/win_services/win_services.go index 8e56a96d0..1befc4a60 100644 --- a/plugins/inputs/win_services/win_services.go +++ b/plugins/inputs/win_services/win_services.go @@ -4,32 +4,52 @@ package win_services import ( "fmt" + "log" + "os" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" "golang.org/x/sys/windows/svc" "golang.org/x/sys/windows/svc/mgr" ) -//WinService provides interface for svc.Service +type ServiceErr struct { + Message string + Service string + Err error +} + +func (e *ServiceErr) Error() string { + return fmt.Sprintf("%s: '%s': %v", e.Message, e.Service, e.Err) +} + +func IsPermission(err error) bool { + if err, ok := err.(*ServiceErr); ok { + return os.IsPermission(err.Err) + } + return false +} + +// WinService provides interface for svc.Service type WinService interface { Close() error Config() (mgr.Config, error) Query() (svc.Status, error) } -//WinServiceManagerProvider sets interface for acquiring manager instance, like mgr.Mgr -type WinServiceManagerProvider interface { +// ManagerProvider sets interface for acquiring manager instance, like mgr.Mgr +type ManagerProvider interface { Connect() (WinServiceManager, error) } -//WinServiceManager provides interface for mgr.Mgr +// WinServiceManager provides interface for mgr.Mgr type WinServiceManager interface { Disconnect() error OpenService(name string) (WinService, error) ListServices() ([]string, error) } -//WinSvcMgr is wrapper for mgr.Mgr implementing WinServiceManager interface +// WinSvcMgr is wrapper for mgr.Mgr implementing WinServiceManager interface type WinSvcMgr struct { realMgr *mgr.Mgr } @@ -45,7 +65,7 @@ func (m *WinSvcMgr) ListServices() ([]string, error) { return m.realMgr.ListServices() } -//MgProvider is an implementation of WinServiceManagerProvider interface returning WinSvcMgr +// MgProvider is an implementation of WinServiceManagerProvider interface returning WinSvcMgr type MgProvider struct { } @@ -71,7 +91,7 @@ var description = "Input plugin to report Windows services info." //WinServices is an implementation if telegraf.Input interface, providing info about Windows Services type WinServices struct { ServiceNames []string `toml:"service_names"` - mgrProvider WinServiceManagerProvider + mgrProvider ManagerProvider } type ServiceInfo struct { @@ -79,7 +99,6 @@ type ServiceInfo struct { DisplayName string State int StartUpMode int - Error error } func (m *WinServices) Description() string { @@ -91,93 +110,102 @@ func (m *WinServices) SampleConfig() string { } func (m *WinServices) Gather(acc telegraf.Accumulator) error { + scmgr, err := m.mgrProvider.Connect() + if err != nil { + return fmt.Errorf("Could not open service manager: %s", err) + } + defer scmgr.Disconnect() - serviceInfos, err := listServices(m.mgrProvider, m.ServiceNames) - + serviceNames, err := listServices(scmgr, m.ServiceNames) if err != nil { return err } - for _, service := range serviceInfos { - if service.Error == nil { - fields := make(map[string]interface{}) - tags := make(map[string]string) - - //display name could be empty, but still valid service - if len(service.DisplayName) > 0 { - tags["display_name"] = service.DisplayName + for _, srvName := range serviceNames { + service, err := collectServiceInfo(scmgr, srvName) + if err != nil { + if IsPermission(err) { + log.Printf("D! Error in plugin [inputs.win_services]: %v", err) + } else { + acc.AddError(err) } - tags["service_name"] = service.ServiceName - - fields["state"] = service.State - fields["startup_mode"] = service.StartUpMode - - acc.AddFields("win_services", fields, tags) - } else { - acc.AddError(service.Error) + continue } + + tags := map[string]string{ + "service_name": service.ServiceName, + } + //display name could be empty, but still valid service + if len(service.DisplayName) > 0 { + tags["display_name"] = service.DisplayName + } + + fields := map[string]interface{}{ + "state": service.State, + "startup_mode": service.StartUpMode, + } + acc.AddFields("win_services", fields, tags) } return nil } -//listServices gathers info about given services. If userServices is empty, it return info about all services on current Windows host. Any a critical error is returned. -func listServices(mgrProv WinServiceManagerProvider, userServices []string) ([]ServiceInfo, error) { - scmgr, err := mgrProv.Connect() +// listServices returns a list of services to gather. +func listServices(scmgr WinServiceManager, userServices []string) ([]string, error) { + if len(userServices) != 0 { + return userServices, nil + } + + names, err := scmgr.ListServices() if err != nil { - return nil, fmt.Errorf("Could not open service manager: %s", err) + return nil, fmt.Errorf("Could not list services: %s", err) } - defer scmgr.Disconnect() - - var serviceNames []string - if len(userServices) == 0 { - //Listing service names from system - serviceNames, err = scmgr.ListServices() - if err != nil { - return nil, fmt.Errorf("Could not list services: %s", err) - } - } else { - serviceNames = userServices - } - serviceInfos := make([]ServiceInfo, len(serviceNames)) - - for i, srvName := range serviceNames { - serviceInfos[i] = collectServiceInfo(scmgr, srvName) - } - - return serviceInfos, nil + return names, nil } -//collectServiceInfo gathers info about a service from WindowsAPI -func collectServiceInfo(scmgr WinServiceManager, serviceName string) (serviceInfo ServiceInfo) { - - serviceInfo.ServiceName = serviceName +// collectServiceInfo gathers info about a service. +func collectServiceInfo(scmgr WinServiceManager, serviceName string) (*ServiceInfo, error) { srv, err := scmgr.OpenService(serviceName) if err != nil { - serviceInfo.Error = fmt.Errorf("Could not open service '%s': %s", serviceName, err) - return + return nil, &ServiceErr{ + Message: "could not open service", + Service: serviceName, + Err: err, + } } defer srv.Close() srvStatus, err := srv.Query() - if err == nil { - serviceInfo.State = int(srvStatus.State) - } else { - serviceInfo.Error = fmt.Errorf("Could not query service '%s': %s", serviceName, err) - //finish collecting info on first found error - return + if err != nil { + return nil, &ServiceErr{ + Message: "could not query service", + Service: serviceName, + Err: err, + } } srvCfg, err := srv.Config() - if err == nil { - serviceInfo.DisplayName = srvCfg.DisplayName - serviceInfo.StartUpMode = int(srvCfg.StartType) - } else { - serviceInfo.Error = fmt.Errorf("Could not get config of service '%s': %s", serviceName, err) + if err != nil { + return nil, &ServiceErr{ + Message: "could not get config of service", + Service: serviceName, + Err: err, + } } - return + + serviceInfo := &ServiceInfo{ + ServiceName: serviceName, + DisplayName: srvCfg.DisplayName, + StartUpMode: int(srvCfg.StartType), + State: int(srvStatus.State), + } + return serviceInfo, nil } func init() { - inputs.Add("win_services", func() telegraf.Input { return &WinServices{mgrProvider: &MgProvider{}} }) + inputs.Add("win_services", func() telegraf.Input { + return &WinServices{ + mgrProvider: &MgProvider{}, + } + }) } diff --git a/plugins/inputs/win_services/win_services_integration_test.go b/plugins/inputs/win_services/win_services_integration_test.go index 201746514..a39df49c7 100644 --- a/plugins/inputs/win_services/win_services_integration_test.go +++ b/plugins/inputs/win_services/win_services_integration_test.go @@ -4,11 +4,10 @@ package win_services import ( - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "golang.org/x/sys/windows/svc/mgr" "testing" + + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" ) var InvalidServices = []string{"XYZ1@", "ZYZ@", "SDF_@#"} @@ -18,57 +17,30 @@ func TestList(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } - services, err := listServices(&MgProvider{}, KnownServices) + provider := &MgProvider{} + scmgr, err := provider.Connect() require.NoError(t, err) - assert.Len(t, services, 2, "Different number of services") - assert.Equal(t, services[0].ServiceName, KnownServices[0]) - assert.Nil(t, services[0].Error) - assert.Equal(t, services[1].ServiceName, KnownServices[1]) - assert.Nil(t, services[1].Error) + defer scmgr.Disconnect() + + services, err := listServices(scmgr, KnownServices) + require.NoError(t, err) + require.Len(t, services, 2, "Different number of services") + require.Equal(t, services[0], KnownServices[0]) + require.Equal(t, services[1], KnownServices[1]) } func TestEmptyList(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } - services, err := listServices(&MgProvider{}, []string{}) + provider := &MgProvider{} + scmgr, err := provider.Connect() require.NoError(t, err) - assert.Condition(t, func() bool { return len(services) > 20 }, "Too few service") -} + defer scmgr.Disconnect() -func TestListEr(t *testing.T) { - if testing.Short() { - t.Skip("Skipping integration test in short mode") - } - services, err := listServices(&MgProvider{}, InvalidServices) + services, err := listServices(scmgr, []string{}) require.NoError(t, err) - assert.Len(t, services, 3, "Different number of services") - for i := 0; i < 3; i++ { - assert.Equal(t, services[i].ServiceName, InvalidServices[i]) - assert.NotNil(t, services[i].Error) - } -} - -func TestGather(t *testing.T) { - if testing.Short() { - t.Skip("Skipping integration test in short mode") - } - ws := &WinServices{KnownServices, &MgProvider{}} - assert.Len(t, ws.ServiceNames, 2, "Different number of services") - var acc testutil.Accumulator - require.NoError(t, ws.Gather(&acc)) - assert.Len(t, acc.Errors, 0, "There should be no errors after gather") - - for i := 0; i < 2; i++ { - fields := make(map[string]interface{}) - tags := make(map[string]string) - si := getServiceInfo(KnownServices[i]) - fields["state"] = int(si.State) - fields["startup_mode"] = int(si.StartUpMode) - tags["service_name"] = si.ServiceName - tags["display_name"] = si.DisplayName - acc.AssertContainsTaggedFields(t, "win_services", fields, tags) - } + require.Condition(t, func() bool { return len(services) > 20 }, "Too few service") } func TestGatherErrors(t *testing.T) { @@ -76,40 +48,8 @@ func TestGatherErrors(t *testing.T) { t.Skip("Skipping integration test in short mode") } ws := &WinServices{InvalidServices, &MgProvider{}} - assert.Len(t, ws.ServiceNames, 3, "Different number of services") + require.Len(t, ws.ServiceNames, 3, "Different number of services") var acc testutil.Accumulator require.NoError(t, ws.Gather(&acc)) - assert.Len(t, acc.Errors, 3, "There should be 3 errors after gather") -} - -func getServiceInfo(srvName string) *ServiceInfo { - - scmgr, err := mgr.Connect() - if err != nil { - return nil - } - defer scmgr.Disconnect() - - srv, err := scmgr.OpenService(srvName) - if err != nil { - return nil - } - var si ServiceInfo - si.ServiceName = srvName - srvStatus, err := srv.Query() - if err == nil { - si.State = int(srvStatus.State) - } else { - si.Error = err - } - - srvCfg, err := srv.Config() - if err == nil { - si.DisplayName = srvCfg.DisplayName - si.StartUpMode = int(srvCfg.StartType) - } else { - si.Error = err - } - srv.Close() - return &si + require.Len(t, acc.Errors, 3, "There should be 3 errors after gather") } From 6a60e3f9ffae5295e8e413aab802ebd8fd44957a Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 11 Sep 2018 16:08:22 -0700 Subject: [PATCH 0166/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 91374d12d..99ea85e8f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -110,6 +110,7 @@ - [#4499](https://github.com/influxdata/telegraf/issues/4499): Fix instance and object name in performance counters with backslashes. - [#4646](https://github.com/influxdata/telegraf/issues/4646): Reset/flush saved contents from bad metric. - [#4520](https://github.com/influxdata/telegraf/issues/4520): Document all supported cli arguments. +- [#4674](https://github.com/influxdata/telegraf/pull/4674): Log access denied opening a service at debug level in win_services. ## v1.7.4 [2018-08-29] From b43165f2d859813a077a4460e96e5b94400308b2 Mon Sep 17 00:00:00 2001 From: Mark Wilkinson - m82labs Date: Tue, 11 Sep 2018 21:47:30 -0400 Subject: [PATCH 0167/1815] Improve Azure Managed Instance support + more in sqlserver input (#4642) --- Gopkg.lock | 35 ++-- Gopkg.toml | 2 +- plugins/inputs/sqlserver/README.md | 15 +- plugins/inputs/sqlserver/sqlserver.go | 276 ++++++++++++++++---------- 4 files changed, 211 insertions(+), 117 deletions(-) diff --git a/Gopkg.lock b/Gopkg.lock index ed161e69b..7a44f79b3 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -1,6 +1,14 @@ # This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. +[[projects]] + digest = "1:972f38a9c879a4920d1e3a3d3438104b6c06163bfa3e6f4064adb00468d40587" + name = "cloud.google.com/go" + packages = ["civil"] + pruneopts = "" + revision = "c728a003b238b26cef9ab6753a5dc424b331c3ad" + version = "v0.27.0" + [[projects]] branch = "master" digest = "1:fc0802104acded1f48e4860a9f2db85b82b4a754fca9eae750ff4e8b8cdf2116" @@ -122,7 +130,7 @@ [[projects]] branch = "master" - digest = "1:0828d8c0f95689f832cf348fe23827feb7640cd698d612ef59e2f9d041f54c68" + digest = "1:5a5f28fcfe3a74247733a31ceaac0e53bfc2723e43c596b2e3f110eda9378575" name = "github.com/apache/thrift" packages = ["lib/go/thrift"] pruneopts = "" @@ -230,6 +238,17 @@ revision = "346938d642f2ec3594ed81d874461961cd0faa76" version = "v1.1.0" +[[projects]] + branch = "master" + digest = "1:7fdc54859cd901c25b9d8db964410a4e0d98fa0dca267fe4cf49c0eede5e06c2" + name = "github.com/denisenkom/go-mssqldb" + packages = [ + ".", + "internal/cp", + ] + pruneopts = "" + revision = "1eb28afdf9b6e56cf673badd47545f844fe81103" + [[projects]] digest = "1:6098222470fe0172157ce9bbef5d2200df4edde17ee649c5d6e48330e4afa4c6" name = "github.com/dgrijalva/jwt-go" @@ -257,7 +276,7 @@ revision = "edc3ab29cdff8694dd6feb85cfeb4b5f1b38ed9c" [[projects]] - digest = "1:d149605f1b00713fdc48150122892d77d49d30c825f690dd92f497aeb6cf18f5" + digest = "1:d2ca9295cce7d0e7b26b498c6b59ff903d8315e8ead97f0f6cadf9e7d613e1e8" name = "github.com/docker/docker" packages = [ "api", @@ -498,7 +517,7 @@ [[projects]] branch = "master" - digest = "1:ff65bf6fc4d1116f94ac305342725c21b55c16819c2606adc8f527755716937f" + digest = "1:e1c91a91cc738cebecbf12fc98f554f6f932c8b97e2052ad63ea43948df5bcb0" name = "github.com/hashicorp/go-rootcerts" packages = ["."] pruneopts = "" @@ -1016,14 +1035,6 @@ pruneopts = "" revision = "46796da1b0b4794e1e341883a399f12cc7574b55" -[[projects]] - branch = "master" - digest = "1:2fcfc6c3fb8dfe0d80d7789272230d3ac7db15022b66817113f98d9fff880225" - name = "github.com/zensqlmonitor/go-mssqldb" - packages = ["."] - pruneopts = "" - revision = "e8fbf836e44e86764eba398361d1825651709547" - [[projects]] branch = "master" digest = "1:0773b5c3be42874166670a20aa177872edb450cd9fc70b1df97303d977702a50" @@ -1312,6 +1323,7 @@ "github.com/aws/aws-sdk-go/service/sts", "github.com/bsm/sarama-cluster", "github.com/couchbase/go-couchbase", + "github.com/denisenkom/go-mssqldb", "github.com/dgrijalva/jwt-go", "github.com/docker/docker/api/types", "github.com/docker/docker/api/types/container", @@ -1375,7 +1387,6 @@ "github.com/vmware/govmomi/vim25/soap", "github.com/vmware/govmomi/vim25/types", "github.com/wvanbergen/kafka/consumergroup", - "github.com/zensqlmonitor/go-mssqldb", "golang.org/x/net/context", "golang.org/x/net/html/charset", "golang.org/x/oauth2", diff --git a/Gopkg.toml b/Gopkg.toml index c89578397..110e8480e 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -183,7 +183,7 @@ branch = "master" [[constraint]] - name = "github.com/zensqlmonitor/go-mssqldb" + name = "github.com/denisenkom/go-mssqldb" branch = "master" [[constraint]] diff --git a/plugins/inputs/sqlserver/README.md b/plugins/inputs/sqlserver/README.md index 766bb95e0..33800ca2a 100644 --- a/plugins/inputs/sqlserver/README.md +++ b/plugins/inputs/sqlserver/README.md @@ -90,7 +90,17 @@ The new (version 2) metrics provide: - *TempDB*: Free space, Version store usage, Active temp tables, temp table creation rate, + more - *Resource Governor*: CPU Usage, Requests/sec, Queued Requests, and Blocked tasks per workload group + more - *Server properties*: Number of databases in all possible states (online, offline, suspect, etc.), cpu count, physical memory, SQL Server service uptime, and SQL Server version -- *Wait stats*: Wait time in ms, number of waiting tasks, resource wait time, signal wait time, max wait time in ms, wait type, and wait category. The waits are categorized using the sasme categories used in Query Store. +- *Wait stats*: Wait time in ms, number of waiting tasks, resource wait time, signal wait time, max wait time in ms, wait type, and wait category. The waits are categorized using the same categories used in Query Store. +- *Azure Managed Instances* + - Stats from `sys.server_resource_stats`: + - cpu_count + - server_memory + - sku + - engine_edition + - hardware_type + - total_storage_mb + - available_storage_mb + - uptime The following metrics can be used directly, with no delta calculations: - SQLServer:Buffer Manager\Buffer cache hit ratio @@ -129,5 +139,4 @@ The following metrics can be used directly, with no delta calculations: - SQLServer:Workload Group Stats\Requests completed/sec Version 2 queries have the following tags: -- `sql_instance`: Physical host and instance name (hostname:instance) - +- `sql_instance`: Physical host and instance name (hostname:instance) \ No newline at end of file diff --git a/plugins/inputs/sqlserver/sqlserver.go b/plugins/inputs/sqlserver/sqlserver.go index 1ca29eca0..5f2cbb29c 100644 --- a/plugins/inputs/sqlserver/sqlserver.go +++ b/plugins/inputs/sqlserver/sqlserver.go @@ -9,7 +9,7 @@ import ( "github.com/influxdata/telegraf/plugins/inputs" // go-mssqldb initialization - _ "github.com/zensqlmonitor/go-mssqldb" + _ "github.com/denisenkom/go-mssqldb" ) // SQLServer struct @@ -348,7 +348,9 @@ ELSE EXEC(@SQL) ` -const sqlDatabaseIOV2 = `SELECT +const sqlDatabaseIOV2 = `IF SERVERPROPERTY('EngineEdition') = 5 +BEGIN +SELECT 'sqlserver_database_io' As [measurement], REPLACE(@@SERVERNAME,'\',':') AS [sql_instance], DB_NAME([vfs].[database_id]) [database_name], @@ -358,43 +360,123 @@ vfs.num_of_bytes_read AS read_bytes, vfs.io_stall_write_ms AS write_latency_ms, vfs.num_of_writes AS writes, vfs.num_of_bytes_written AS write_bytes, -CASE WHEN vfs.file_id = 2 THEN 'LOG' ELSE 'ROWS' END AS file_type +b.name as logical_filename, +b.physical_name as physical_filename, +CASE WHEN vfs.file_id = 2 THEN 'LOG' ELSE 'DATA' END AS file_type FROM [sys].[dm_io_virtual_file_stats](NULL,NULL) AS vfs -OPTION( RECOMPILE ); +inner join sys.database_files b on b.file_id = vfs.file_id +END +ELSE +BEGIN +SELECT +'sqlserver_database_io' As [measurement], +REPLACE(@@SERVERNAME,'\',':') AS [sql_instance], +DB_NAME([vfs].[database_id]) [database_name], +vfs.io_stall_read_ms AS read_latency_ms, +vfs.num_of_reads AS reads, +vfs.num_of_bytes_read AS read_bytes, +vfs.io_stall_write_ms AS write_latency_ms, +vfs.num_of_writes AS writes, +vfs.num_of_bytes_written AS write_bytes, +b.name as logical_filename, +b.physical_name as physical_filename, +CASE WHEN vfs.file_id = 2 THEN 'LOG' ELSE 'DATA' END AS file_type +FROM +[sys].[dm_io_virtual_file_stats](NULL,NULL) AS vfs +inner join sys.master_files b on b.database_id = vfs.database_id and b.file_id = vfs.file_id +END ` const sqlServerPropertiesV2 = `DECLARE @sys_info TABLE ( cpu_count INT, - server_memory INT, + server_memory BIGINT, + sku NVARCHAR(64), + engine_edition SMALLINT, + hardware_type VARCHAR(15), + total_storage_mb BIGINT, + available_storage_mb BIGINT, uptime INT ) IF OBJECT_ID('master.sys.dm_os_sys_info') IS NOT NULL BEGIN - INSERT INTO @sys_info ( cpu_count, server_memory, uptime ) - EXEC('SELECT cpu_count, (select total_physical_memory_kb from sys.dm_os_sys_memory) AS physical_memory_kb, DATEDIFF(MINUTE,sqlserver_start_time,GETDATE()) FROM sys.dm_os_sys_info') -END + + IF SERVERPROPERTY('EngineEdition') = 8 -- Managed Instance + INSERT INTO @sys_info ( cpu_count, server_memory, sku, engine_edition, hardware_type, total_storage_mb, available_storage_mb, uptime ) + SELECT TOP(1) + virtual_core_count AS cpu_count, + (SELECT process_memory_limit_mb FROM sys.dm_os_job_object) AS server_memory, + sku, + cast(SERVERPROPERTY('EngineEdition') as smallint) AS engine_edition, + hardware_generation AS hardware_type, + reserved_storage_mb AS total_storage_mb, + (reserved_storage_mb - storage_space_used_mb) AS available_storage_mb, + (select DATEDIFF(MINUTE,sqlserver_start_time,GETDATE()) from sys.dm_os_sys_info) as uptime + FROM sys.server_resource_stats + ORDER BY start_time DESC -SELECT -'sqlserver_server_properties' As [measurement], -REPLACE(@@SERVERNAME,'\',':') AS [sql_instance], -SUM( CASE WHEN state = 0 THEN 1 ELSE 0 END ) AS db_online, -SUM( CASE WHEN state = 1 THEN 1 ELSE 0 END ) AS db_restoring, -SUM( CASE WHEN state = 2 THEN 1 ELSE 0 END ) AS db_recovering, -SUM( CASE WHEN state = 3 THEN 1 ELSE 0 END ) AS db_recoveryPending, -SUM( CASE WHEN state = 4 THEN 1 ELSE 0 END ) AS db_suspect, -SUM( CASE WHEN state = 10 THEN 1 ELSE 0 END ) AS db_offline, -MAX( sinfo.cpu_count ) AS cpu_count, -MAX( sinfo.server_memory ) AS server_memory, -MAX( sinfo.uptime ) AS uptime, -SERVERPROPERTY('ProductVersion') AS sql_version -FROM sys.databases -CROSS APPLY ( - SELECT * - FROM @sys_info -) AS sinfo -OPTION( RECOMPILE ); + ELSE + BEGIN + DECLARE @total_disk_size_mb BIGINT, + @available_space_mb BIGINT + + SELECT @total_disk_size_mb = sum(total_disk_size_mb), + @available_space_mb = sum(free_disk_space_mb) + FROM ( + SELECT distinct logical_volume_name AS LogicalName, + total_bytes/(1024*1024)as total_disk_size_mb, + available_bytes /(1024*1024) free_disk_space_mb + FROM sys.master_files AS f + CROSS APPLY sys.dm_os_volume_stats(f.database_id, f.file_id) + ) as osVolumes + + INSERT INTO @sys_info ( cpu_count, server_memory, sku, engine_edition, hardware_type, total_storage_mb, available_storage_mb, uptime ) + SELECT cpu_count, + (SELECT total_physical_memory_kb FROM sys.dm_os_sys_memory) AS server_memory, + CAST(SERVERPROPERTY('Edition') AS NVARCHAR(64)) as sku, + CAST(SERVERPROPERTY('EngineEdition') as smallint) as engine_edition, + CASE virtual_machine_type_desc + WHEN 'NONE' THEN 'PHYSICAL Machine' + ELSE virtual_machine_type_desc + END AS hardware_type, + @total_disk_size_mb, + @available_space_mb, + DATEDIFF(MINUTE,sqlserver_start_time,GETDATE()) + FROM sys.dm_os_sys_info + END +END + +SELECT 'sqlserver_server_properties' AS [measurement], + REPLACE(@@SERVERNAME,'\',':') AS [sql_instance], + s.cpu_count, + s.server_memory, + s.sku, + s.engine_edition, + s.hardware_type, + s.total_storage_mb, + s.available_storage_mb, + s.uptime, + db_online, + db_restoring, + db_recovering, + db_recoveryPending, + db_suspect, + db_offline +FROM ( + SELECT SUM( CASE WHEN state = 0 THEN 1 ELSE 0 END ) AS db_online, + SUM( CASE WHEN state = 1 THEN 1 ELSE 0 END ) AS db_restoring, + SUM( CASE WHEN state = 2 THEN 1 ELSE 0 END ) AS db_recovering, + SUM( CASE WHEN state = 3 THEN 1 ELSE 0 END ) AS db_recoveryPending, + SUM( CASE WHEN state = 4 THEN 1 ELSE 0 END ) AS db_suspect, + SUM( CASE WHEN state = 10 THEN 1 ELSE 0 END ) AS db_offline + FROM sys.databases + ) AS dbs + CROSS APPLY ( + SELECT cpu_count, server_memory, sku, engine_edition, hardware_type, total_storage_mb, available_storage_mb, uptime + FROM @sys_info + ) AS s +OPTION( RECOMPILE ) ` const sqlPerformanceCountersV2 string = ` @@ -416,53 +498,45 @@ SELECT DISTINCT spi.cntr_type FROM sys.dm_os_performance_counters AS spi WHERE ( - counter_name IN ( - 'SQL Compilations/sec', - 'SQL Re-Compilations/sec', - 'User Connections', - 'Batch Requests/sec', - 'Logouts/sec', - 'Logins/sec', - 'Processes blocked', - 'Latch Waits/sec', - 'Full Scans/sec', - 'Index Searches/sec', - 'Page Splits/sec', - 'Page Lookups/sec', - 'Page Reads/sec', - 'Page Writes/sec', - 'Readahead Pages/sec', - 'Lazy Writes/sec', - 'Checkpoint Pages/sec', - 'Page life expectancy', - 'Log File(s) Size (KB)', - 'Log File(s) Used Size (KB)', - 'Data File(s) Size (KB)', - 'Transactions/sec', - 'Write Transactions/sec', - 'Active Temp Tables', - 'Temp Tables Creation Rate', - 'Temp Tables For Destruction', - 'Free Space in tempdb (KB)', - 'Version Store Size (KB)', - 'Memory Grants Pending', - 'Free list stalls/sec', - 'Buffer cache hit ratio', - 'Buffer cache hit ratio base', - 'Backup/Restore Throughput/sec', - 'Total Server Memory (KB)', - 'Target Server Memory (KB)', - 'Forwarded Recs/sec' - ) - ) OR ( - instance_name IN ('_Total','Column store object pool') - AND counter_name IN ( + counter_name IN ( + 'SQL Compilations/sec', + 'SQL Re-Compilations/sec', + 'User Connections', + 'Batch Requests/sec', + 'Logouts/sec', + 'Logins/sec', + 'Processes blocked', + 'Latch Waits/sec', + 'Full Scans/sec', + 'Index Searches/sec', + 'Page Splits/sec', + 'Page Lookups/sec', + 'Page Reads/sec', + 'Page Writes/sec', + 'Readahead Pages/sec', + 'Lazy Writes/sec', + 'Checkpoint Pages/sec', + 'Page life expectancy', + 'Log File(s) Size (KB)', + 'Log File(s) Used Size (KB)', + 'Data File(s) Size (KB)', + 'Transactions/sec', + 'Write Transactions/sec', + 'Active Temp Tables', + 'Temp Tables Creation Rate', + 'Temp Tables For Destruction', + 'Free Space in tempdb (KB)', + 'Version Store Size (KB)', + 'Memory Grants Pending', + 'Memory Grants Outstanding', + 'Free list stalls/sec', + 'Buffer cache hit ratio', + 'Buffer cache hit ratio base', + 'Backup/Restore Throughput/sec', + 'Total Server Memory (KB)', + 'Target Server Memory (KB)', 'Log Flushes/sec', 'Log Flush Wait Time', - 'Lock Timeouts/sec', - 'Number of Deadlocks/sec', - 'Lock Waits/sec', - 'Latch Waits/sec', 'Memory broker clerk size', 'Log Bytes Flushed/sec', 'Bytes Sent to Replica/sec', @@ -477,29 +551,18 @@ WHERE ( 'Flow Control/sec', 'Resent Messages/sec', 'Redone Bytes/sec', - 'XTP Memory Used (KB)' - ) OR ( - counter_name IN ( - 'Log Bytes Received/sec', - 'Log Apply Pending Queue', - 'Redone Bytes/sec', - 'Recovery Queue', - 'Log Apply Ready Queue' - ) - AND instance_name = '_Total' - ) - ) OR ( - counter_name IN ('Transaction Delay') - ) OR ( - counter_name IN ( + 'XTP Memory Used (KB)', + 'Transaction Delay', + 'Log Bytes Received/sec', + 'Log Apply Pending Queue', + 'Redone Bytes/sec', + 'Recovery Queue', + 'Log Apply Ready Queue', 'CPU usage %', 'CPU usage % base', 'Queued requests', 'Requests completed/sec', - 'Blocked tasks' - ) - ) OR ( - counter_name IN ( + 'Blocked tasks', 'Active memory grant amount (KB)', 'Disk Read Bytes/sec', 'Disk Read IO Throttled/sec', @@ -507,11 +570,22 @@ WHERE ( 'Disk Write Bytes/sec', 'Disk Write IO Throttled/sec', 'Disk Write IO/sec', - 'Used memory (KB)' + 'Used memory (KB)', + 'Forwarded Recs/sec', + 'Background Writer pages/sec', + 'Percent Log Used' ) - ) OR ( + ) OR ( object_name LIKE '%User Settable%' OR object_name LIKE '%SQL Errors%' + ) OR ( + instance_name IN ('_Total') + AND counter_name IN ( + 'Lock Timeouts/sec', + 'Number of Deadlocks/sec', + 'Lock Waits/sec', + 'Latch Waits/sec' + ) ) DECLARE @SQL NVARCHAR(MAX) @@ -617,6 +691,7 @@ LEFT OUTER JOIN ( VALUES ('CMEMPARTITIONED','Memory'), ('CMEMTHREAD','Memory'), ('CXPACKET','Parallelism'), +('CXCONSUMER','Parallelism'), ('DBMIRROR_DBM_EVENT','Mirroring'), ('DBMIRROR_DBM_MUTEX','Mirroring'), ('DBMIRROR_EVENTS_QUEUE','Mirroring'), @@ -1098,10 +1173,8 @@ ws.wait_type NOT IN ( N'PARALLEL_REDO_WORKER_WAIT_WORK', N'PREEMPTIVE_HADR_LEASE_MECHANISM', N'PREEMPTIVE_SP_SERVER_DIAGNOSTICS', N'PREEMPTIVE_OS_LIBRARYOPS', N'PREEMPTIVE_OS_COMOPS', N'PREEMPTIVE_OS_CRYPTOPS', - N'PREEMPTIVE_OS_PIPEOPS', N'PREEMPTIVE_OS_AUTHENTICATIONOPS', - N'PREEMPTIVE_OS_GENERICOPS', N'PREEMPTIVE_OS_VERIFYTRUST', - N'PREEMPTIVE_OS_FILEOPS', N'PREEMPTIVE_OS_DEVICEOPS', N'PREEMPTIVE_OS_QUERYREGISTRY', - N'PREEMPTIVE_OS_WRITEFILE', + N'PREEMPTIVE_OS_PIPEOPS','PREEMPTIVE_OS_GENERICOPS', N'PREEMPTIVE_OS_VERIFYTRUST', + N'PREEMPTIVE_OS_DEVICEOPS', N'PREEMPTIVE_XE_CALLBACKEXECUTE', N'PREEMPTIVE_XE_DISPATCHER', N'PREEMPTIVE_XE_GETTARGETSTATE', N'PREEMPTIVE_XE_SESSIONCOMMIT', N'PREEMPTIVE_XE_TARGETINIT', N'PREEMPTIVE_XE_TARGETFINALIZE', @@ -1113,14 +1186,15 @@ ws.wait_type NOT IN ( N'SLEEP_DCOMSTARTUP', N'SLEEP_MASTERDBREADY', N'SLEEP_MASTERMDREADY', N'SLEEP_MASTERUPGRADED', N'SLEEP_MSDBSTARTUP', N'SLEEP_SYSTEMTASK', N'SLEEP_TASK', N'SLEEP_TEMPDBSTARTUP', N'SNI_HTTP_ACCEPT', N'SP_SERVER_DIAGNOSTICS_SLEEP', - N'SQLTRACE_BUFFER_FLUSH', N'SQLTRACE_INCREMENTAL_FLUSH_SLEEP', N'SQLTRACE_WAIT_ENTRIES', + N'SQLTRACE_BUFFER_FLUSH', N'SQLTRACE_INCREMENTAL_FLUSH_SLEEP', + N'SQLTRACE_WAIT_ENTRIES', N'WAIT_FOR_RESULTS', N'WAITFOR', N'WAITFOR_TASKSHUTDOWN', N'WAIT_XTP_HOST_WAIT', N'WAIT_XTP_OFFLINE_CKPT_NEW_LOG', N'WAIT_XTP_CKPT_CLOSE', N'XE_BUFFERMGR_ALLPROCESSED_EVENT', N'XE_DISPATCHER_JOIN', - N'XE_DISPATCHER_WAIT', N'XE_LIVE_TARGET_TVF', N'XE_TIMER_EVENT') + N'XE_DISPATCHER_WAIT', N'XE_LIVE_TARGET_TVF', N'XE_TIMER_EVENT', + N'SOS_WORK_DISPATCHER','RESERVED_MEMORY_ALLOCATION_EXT') AND waiting_tasks_count > 0 -ORDER BY -waiting_tasks_count DESC +AND wait_time_ms > 100 OPTION (RECOMPILE); ` From 94566098cc448bf6e982382f5def1683674b4953 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 11 Sep 2018 18:51:58 -0700 Subject: [PATCH 0168/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 99ea85e8f..fa95dd2b8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -103,6 +103,7 @@ - [#4339](https://github.com/influxdata/telegraf/pull/4339): Add Splunk Metrics serializer. - [#4141](https://github.com/influxdata/telegraf/pull/4141): Add input plugin for VMware vSphere. - [#4667](https://github.com/influxdata/telegraf/pull/4667): Align metrics window to interval in cloudwatch input. +- [#4642](https://github.com/influxdata/telegraf/pull/4642): Improve Azure Managed Instance support + more in sqlserver input. ### Bugfixes From 1aa969aabcbd4c446fccc12558446bf7346fad04 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 12 Sep 2018 00:23:50 -0700 Subject: [PATCH 0169/1815] Use separate mutexes for write and aggregation in running_output --- internal/models/running_output.go | 15 +++++++++------ output.go | 18 ++++++++++++++++++ 2 files changed, 27 insertions(+), 6 deletions(-) diff --git a/internal/models/running_output.go b/internal/models/running_output.go index 014202454..bad1f7659 100644 --- a/internal/models/running_output.go +++ b/internal/models/running_output.go @@ -36,8 +36,10 @@ type RunningOutput struct { metrics *buffer.Buffer failMetrics *buffer.Buffer + // Guards against concurrent calls to Add, Push, Reset + aggMutex sync.Mutex // Guards against concurrent calls to the Output as described in #3009 - sync.Mutex + writeMutex sync.Mutex } func NewRunningOutput( @@ -94,8 +96,6 @@ func NewRunningOutput( // AddMetric adds a metric to the output. This function can also write cached // points if FlushBufferWhenFull is true. func (ro *RunningOutput) AddMetric(m telegraf.Metric) { - ro.Lock() - defer ro.Unlock() if m == nil { return @@ -118,7 +118,9 @@ func (ro *RunningOutput) AddMetric(m telegraf.Metric) { } if output, ok := ro.Output.(telegraf.AggregatingOutput); ok { + ro.aggMutex.Lock() output.Add(m) + ro.aggMutex.Unlock() return } @@ -135,13 +137,12 @@ func (ro *RunningOutput) AddMetric(m telegraf.Metric) { // Write writes all cached points to this output. func (ro *RunningOutput) Write() error { - ro.Lock() - defer ro.Unlock() - if output, ok := ro.Output.(telegraf.AggregatingOutput); ok { + ro.aggMutex.Lock() metrics := output.Push() ro.metrics.Add(metrics...) output.Reset() + ro.aggMutex.Unlock() } nFails, nMetrics := ro.failMetrics.Len(), ro.metrics.Len() @@ -192,6 +193,8 @@ func (ro *RunningOutput) write(metrics []telegraf.Metric) error { if nMetrics == 0 { return nil } + ro.writeMutex.Lock() + defer ro.writeMutex.Unlock() start := time.Now() err := ro.Output.Write(metrics) elapsed := time.Since(start) diff --git a/output.go b/output.go index 39b371ac4..2421048f0 100644 --- a/output.go +++ b/output.go @@ -13,9 +13,26 @@ type Output interface { Write(metrics []Metric) error } +// AggregatingOutput adds aggregating functionality to an Output. May be used +// if the Output only accepts a fixed set of aggregations over a time period. +// These functions may be called concurrently to the Write function. type AggregatingOutput interface { + // Connect to the Output + Connect() error + // Close any connections to the Output + Close() error + // Description returns a one-sentence description on the Output + Description() string + // SampleConfig returns the default configuration of the Output + SampleConfig() string + // Write takes in group of points to be written to the Output + Write(metrics []Metric) error + + // Add the metric to the aggregator Add(in Metric) + // Push returns the aggregated metrics and is called every flush interval. Push() []Metric + // Reset signals the the aggregator period is completed. Reset() } @@ -30,6 +47,7 @@ type ServiceOutput interface { SampleConfig() string // Write takes in group of points to be written to the Output Write(metrics []Metric) error + // Start the "service" that will provide an Output Start() error // Stop the "service" that will provide an Output From 9bd14b283fc013790fad77693da008e9cc1ea773 Mon Sep 17 00:00:00 2001 From: Greg Date: Wed, 12 Sep 2018 02:03:59 -0600 Subject: [PATCH 0170/1815] Add support for couchdb 2.0+ API (#4654) --- plugins/inputs/couchdb/README.md | 196 +-------------- plugins/inputs/couchdb/couchdb.go | 291 +++++++++++++++-------- plugins/inputs/couchdb/couchdb_test.go | 9 +- plugins/inputs/couchdb/dev/telegraf.conf | 9 + 4 files changed, 207 insertions(+), 298 deletions(-) create mode 100644 plugins/inputs/couchdb/dev/telegraf.conf diff --git a/plugins/inputs/couchdb/README.md b/plugins/inputs/couchdb/README.md index 686914583..0af06cc54 100644 --- a/plugins/inputs/couchdb/README.md +++ b/plugins/inputs/couchdb/README.md @@ -62,194 +62,12 @@ httpd statistics: ### Example output: +**Post Couchdb 2.0** ``` -➜ telegraf git:(master) ✗ ./telegraf --config ./config.conf --input-filter couchdb --test -* Plugin: couchdb, - Collection 1 -> couchdb,server=http://localhost:5984/_stats couchdb_auth_cache_hits_current=0, -couchdb_auth_cache_hits_max=0, -couchdb_auth_cache_hits_mean=0, -couchdb_auth_cache_hits_min=0, -couchdb_auth_cache_hits_stddev=0, -couchdb_auth_cache_hits_sum=0, -couchdb_auth_cache_misses_current=0, -couchdb_auth_cache_misses_max=0, -couchdb_auth_cache_misses_mean=0, -couchdb_auth_cache_misses_min=0, -couchdb_auth_cache_misses_stddev=0, -couchdb_auth_cache_misses_sum=0, -couchdb_database_reads_current=0, -couchdb_database_reads_max=0, -couchdb_database_reads_mean=0, -couchdb_database_reads_min=0, -couchdb_database_reads_stddev=0, -couchdb_database_reads_sum=0, -couchdb_database_writes_current=1102, -couchdb_database_writes_max=131, -couchdb_database_writes_mean=0.116, -couchdb_database_writes_min=0, -couchdb_database_writes_stddev=3.536, -couchdb_database_writes_sum=1102, -couchdb_open_databases_current=1, -couchdb_open_databases_max=1, -couchdb_open_databases_mean=0, -couchdb_open_databases_min=0, -couchdb_open_databases_stddev=0.01, -couchdb_open_databases_sum=1, -couchdb_open_os_files_current=2, -couchdb_open_os_files_max=2, -couchdb_open_os_files_mean=0, -couchdb_open_os_files_min=0, -couchdb_open_os_files_stddev=0.02, -couchdb_open_os_files_sum=2, -couchdb_request_time_current=242.21, -couchdb_request_time_max=102, -couchdb_request_time_mean=5.767, -couchdb_request_time_min=1, -couchdb_request_time_stddev=17.369, -couchdb_request_time_sum=242.21, -httpd_bulk_requests_current=0, -httpd_bulk_requests_max=0, -httpd_bulk_requests_mean=0, -httpd_bulk_requests_min=0, -httpd_bulk_requests_stddev=0, -httpd_bulk_requests_sum=0, -httpd_clients_requesting_changes_current=0, -httpd_clients_requesting_changes_max=0, -httpd_clients_requesting_changes_mean=0, -httpd_clients_requesting_changes_min=0, -httpd_clients_requesting_changes_stddev=0, -httpd_clients_requesting_changes_sum=0, -httpd_request_methods_copy_current=0, -httpd_request_methods_copy_max=0, -httpd_request_methods_copy_mean=0, -httpd_request_methods_copy_min=0, -httpd_request_methods_copy_stddev=0, -httpd_request_methods_copy_sum=0, -httpd_request_methods_delete_current=0, -httpd_request_methods_delete_max=0, -httpd_request_methods_delete_mean=0, -httpd_request_methods_delete_min=0, -httpd_request_methods_delete_stddev=0, -httpd_request_methods_delete_sum=0, -httpd_request_methods_get_current=31, -httpd_request_methods_get_max=1, -httpd_request_methods_get_mean=0.003, -httpd_request_methods_get_min=0, -httpd_request_methods_get_stddev=0.057, -httpd_request_methods_get_sum=31, -httpd_request_methods_head_current=0, -httpd_request_methods_head_max=0, -httpd_request_methods_head_mean=0, -httpd_request_methods_head_min=0, -httpd_request_methods_head_stddev=0, -httpd_request_methods_head_sum=0, -httpd_request_methods_post_current=1102, -httpd_request_methods_post_max=131, -httpd_request_methods_post_mean=0.116, -httpd_request_methods_post_min=0, -httpd_request_methods_post_stddev=3.536, -httpd_request_methods_post_sum=1102, -httpd_request_methods_put_current=1, -httpd_request_methods_put_max=1, -httpd_request_methods_put_mean=0, -httpd_request_methods_put_min=0, -httpd_request_methods_put_stddev=0.01, -httpd_request_methods_put_sum=1, -httpd_requests_current=1133, -httpd_requests_max=130, -httpd_requests_mean=0.118, -httpd_requests_min=0, -httpd_requests_stddev=3.512, -httpd_requests_sum=1133, -httpd_status_codes_200_current=31, -httpd_status_codes_200_max=1, -httpd_status_codes_200_mean=0.003, -httpd_status_codes_200_min=0, -httpd_status_codes_200_stddev=0.057, -httpd_status_codes_200_sum=31, -httpd_status_codes_201_current=1103, -httpd_status_codes_201_max=130, -httpd_status_codes_201_mean=0.116, -httpd_status_codes_201_min=0, -httpd_status_codes_201_stddev=3.532, -httpd_status_codes_201_sum=1103, -httpd_status_codes_202_current=0, -httpd_status_codes_202_max=0, -httpd_status_codes_202_mean=0, -httpd_status_codes_202_min=0, -httpd_status_codes_202_stddev=0, -httpd_status_codes_202_sum=0, -httpd_status_codes_301_current=0, -httpd_status_codes_301_max=0, -httpd_status_codes_301_mean=0, -httpd_status_codes_301_min=0, -httpd_status_codes_301_stddev=0, -httpd_status_codes_301_sum=0, -httpd_status_codes_304_current=0, -httpd_status_codes_304_max=0, -httpd_status_codes_304_mean=0, -httpd_status_codes_304_min=0, -httpd_status_codes_304_stddev=0, -httpd_status_codes_304_sum=0, -httpd_status_codes_400_current=0, -httpd_status_codes_400_max=0, -httpd_status_codes_400_mean=0, -httpd_status_codes_400_min=0, -httpd_status_codes_400_stddev=0, -httpd_status_codes_400_sum=0, -httpd_status_codes_401_current=0, -httpd_status_codes_401_max=0, -httpd_status_codes_401_mean=0, -httpd_status_codes_401_min=0, -httpd_status_codes_401_stddev=0, -httpd_status_codes_401_sum=0, -httpd_status_codes_403_current=0, -httpd_status_codes_403_max=0, -httpd_status_codes_403_mean=0, -httpd_status_codes_403_min=0, -httpd_status_codes_403_stddev=0, -httpd_status_codes_403_sum=0, -httpd_status_codes_404_current=0, -httpd_status_codes_404_max=0, -httpd_status_codes_404_mean=0, -httpd_status_codes_404_min=0, -httpd_status_codes_404_stddev=0, -httpd_status_codes_404_sum=0, -httpd_status_codes_405_current=0, -httpd_status_codes_405_max=0, -httpd_status_codes_405_mean=0, -httpd_status_codes_405_min=0, -httpd_status_codes_405_stddev=0, -httpd_status_codes_405_sum=0, -httpd_status_codes_409_current=0, -httpd_status_codes_409_max=0, -httpd_status_codes_409_mean=0, -httpd_status_codes_409_min=0, -httpd_status_codes_409_stddev=0, -httpd_status_codes_409_sum=0, -httpd_status_codes_412_current=0, -httpd_status_codes_412_max=0, -httpd_status_codes_412_mean=0, -httpd_status_codes_412_min=0, -httpd_status_codes_412_stddev=0, -httpd_status_codes_412_sum=0, -httpd_status_codes_500_current=0, -httpd_status_codes_500_max=0, -httpd_status_codes_500_mean=0, -httpd_status_codes_500_min=0, -httpd_status_codes_500_stddev=0, -httpd_status_codes_500_sum=0, -httpd_temporary_view_reads_current=0, -httpd_temporary_view_reads_max=0, -httpd_temporary_view_reads_mean=0, -httpd_temporary_view_reads_min=0, -httpd_temporary_view_reads_stddev=0, -httpd_temporary_view_reads_sum=0, -httpd_view_reads_current=0, -httpd_view_reads_max=0, -httpd_view_reads_mean=0, -httpd_view_reads_min=0, -httpd_view_reads_stddev=0, -httpd_view_reads_sum=0 1454692257621938169 +couchdb,server=http://couchdb22:5984/_node/_local/_stats couchdb_auth_cache_hits_value=0,httpd_request_methods_delete_value=0,couchdb_auth_cache_misses_value=0,httpd_request_methods_get_value=42,httpd_status_codes_304_value=0,httpd_status_codes_400_value=0,httpd_request_methods_head_value=0,httpd_status_codes_201_value=0,couchdb_database_reads_value=0,httpd_request_methods_copy_value=0,couchdb_request_time_max=0,httpd_status_codes_200_value=42,httpd_status_codes_301_value=0,couchdb_open_os_files_value=2,httpd_request_methods_put_value=0,httpd_request_methods_post_value=0,httpd_status_codes_202_value=0,httpd_status_codes_403_value=0,httpd_status_codes_409_value=0,couchdb_database_writes_value=0,couchdb_request_time_min=0,httpd_status_codes_412_value=0,httpd_status_codes_500_value=0,httpd_status_codes_401_value=0,httpd_status_codes_404_value=0,httpd_status_codes_405_value=0,couchdb_open_databases_value=0 1536707179000000000 +``` + +**Pre Couchdb 2.0** +``` +couchdb,server=http://couchdb16:5984/_stats couchdb_request_time_sum=96,httpd_status_codes_200_sum=37,httpd_status_codes_200_min=0,httpd_requests_mean=0.005,httpd_requests_min=0,couchdb_request_time_stddev=3.833,couchdb_request_time_min=1,httpd_request_methods_get_stddev=0.073,httpd_request_methods_get_min=0,httpd_status_codes_200_mean=0.005,httpd_status_codes_200_max=1,httpd_requests_sum=37,couchdb_request_time_current=96,httpd_request_methods_get_sum=37,httpd_request_methods_get_mean=0.005,httpd_request_methods_get_max=1,httpd_status_codes_200_stddev=0.073,couchdb_request_time_mean=2.595,couchdb_request_time_max=25,httpd_request_methods_get_current=37,httpd_status_codes_200_current=37,httpd_requests_current=37,httpd_requests_stddev=0.073,httpd_requests_max=1 1536707179000000000 ``` diff --git a/plugins/inputs/couchdb/couchdb.go b/plugins/inputs/couchdb/couchdb.go index da6ba67dc..bc9f31688 100644 --- a/plugins/inputs/couchdb/couchdb.go +++ b/plugins/inputs/couchdb/couchdb.go @@ -3,44 +3,52 @@ package couchdb import ( "encoding/json" "fmt" - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/plugins/inputs" "net/http" - "reflect" "sync" "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" ) -// Schema: -type metaData struct { - Description string `json:"description"` - Current float64 `json:"current"` - Sum float64 `json:"sum"` - Mean float64 `json:"mean"` - Stddev float64 `json:"stddev"` - Min float64 `json:"min"` - Max float64 `json:"max"` -} +type ( + metaData struct { + Current *float64 `json:"current"` + Sum *float64 `json:"sum"` + Mean *float64 `json:"mean"` + Stddev *float64 `json:"stddev"` + Min *float64 `json:"min"` + Max *float64 `json:"max"` + Value *float64 `json:"value"` + } -type Stats struct { - Couchdb struct { - AuthCacheMisses metaData `json:"auth_cache_misses"` - DatabaseWrites metaData `json:"database_writes"` - OpenDatabases metaData `json:"open_databases"` - AuthCacheHits metaData `json:"auth_cache_hits"` - RequestTime metaData `json:"request_time"` - DatabaseReads metaData `json:"database_reads"` - OpenOsFiles metaData `json:"open_os_files"` - } `json:"couchdb"` - HttpdRequestMethods struct { + oldValue struct { + Value metaData `json:"value"` + metaData + } + + couchdb struct { + AuthCacheHits metaData `json:"auth_cache_hits"` + AuthCacheMisses metaData `json:"auth_cache_misses"` + DatabaseWrites metaData `json:"database_writes"` + DatabaseReads metaData `json:"database_reads"` + OpenDatabases metaData `json:"open_databases"` + OpenOsFiles metaData `json:"open_os_files"` + RequestTime oldValue `json:"request_time"` + HttpdRequestMethods httpdRequestMethods `json:"httpd_request_methods"` + HttpdStatusCodes httpdStatusCodes `json:"httpd_status_codes"` + } + + httpdRequestMethods struct { Put metaData `json:"PUT"` Get metaData `json:"GET"` Copy metaData `json:"COPY"` Delete metaData `json:"DELETE"` Post metaData `json:"POST"` Head metaData `json:"HEAD"` - } `json:"httpd_request_methods"` - HttpdStatusCodes struct { + } + + httpdStatusCodes struct { Status200 metaData `json:"200"` Status201 metaData `json:"201"` Status202 metaData `json:"202"` @@ -54,19 +62,29 @@ type Stats struct { Status409 metaData `json:"409"` Status412 metaData `json:"412"` Status500 metaData `json:"500"` - } `json:"httpd_status_codes"` - Httpd struct { - ClientsRequestingChanges metaData `json:"clients_requesting_changes"` - TemporaryViewReads metaData `json:"temporary_view_reads"` - Requests metaData `json:"requests"` - BulkRequests metaData `json:"bulk_requests"` - ViewReads metaData `json:"view_reads"` - } `json:"httpd"` -} + } -type CouchDB struct { - HOSTs []string `toml:"hosts"` -} + httpd struct { + BulkRequests metaData `json:"bulk_requests"` + Requests metaData `json:"requests"` + TemporaryViewReads metaData `json:"temporary_view_reads"` + ViewReads metaData `json:"view_reads"` + ClientsRequestingChanges metaData `json:"clients_requesting_changes"` + } + + Stats struct { + Couchdb couchdb `json:"couchdb"` + HttpdRequestMethods httpdRequestMethods `json:"httpd_request_methods"` + HttpdStatusCodes httpdStatusCodes `json:"httpd_status_codes"` + Httpd httpd `json:"httpd"` + } + + CouchDB struct { + Hosts []string `toml:"hosts"` + + client *http.Client + } +) func (*CouchDB) Description() string { return "Read CouchDB Stats from one or more servers" @@ -75,14 +93,14 @@ func (*CouchDB) Description() string { func (*CouchDB) SampleConfig() string { return ` ## Works with CouchDB stats endpoints out of the box - ## Multiple HOSTs from which to read CouchDB stats: + ## Multiple Hosts from which to read CouchDB stats: hosts = ["http://localhost:8086/_stats"] ` } func (c *CouchDB) Gather(accumulator telegraf.Accumulator) error { var wg sync.WaitGroup - for _, u := range c.HOSTs { + for _, u := range c.Hosts { wg.Add(1) go func(host string) { defer wg.Done() @@ -97,67 +115,125 @@ func (c *CouchDB) Gather(accumulator telegraf.Accumulator) error { return nil } -var tr = &http.Transport{ - ResponseHeaderTimeout: time.Duration(3 * time.Second), -} - -var client = &http.Client{ - Transport: tr, - Timeout: time.Duration(4 * time.Second), -} - func (c *CouchDB) fetchAndInsertData(accumulator telegraf.Accumulator, host string) error { - - response, error := client.Get(host) + if c.client == nil { + c.client = &http.Client{ + Transport: &http.Transport{ + ResponseHeaderTimeout: time.Duration(3 * time.Second), + }, + Timeout: time.Duration(4 * time.Second), + } + } + response, error := c.client.Get(host) if error != nil { return error } defer response.Body.Close() - var stats Stats + if response.StatusCode != 200 { + return fmt.Errorf("Failed to get stats from couchdb: HTTP responded %d", response.StatusCode) + } + + stats := Stats{} decoder := json.NewDecoder(response.Body) decoder.Decode(&stats) fields := map[string]interface{}{} + // for couchdb 2.0 API changes + requestTime := metaData{ + Current: stats.Couchdb.RequestTime.Current, + Sum: stats.Couchdb.RequestTime.Sum, + Mean: stats.Couchdb.RequestTime.Mean, + Stddev: stats.Couchdb.RequestTime.Stddev, + Min: stats.Couchdb.RequestTime.Min, + Max: stats.Couchdb.RequestTime.Max, + } + + httpdRequestMethodsPut := stats.HttpdRequestMethods.Put + httpdRequestMethodsGet := stats.HttpdRequestMethods.Get + httpdRequestMethodsCopy := stats.HttpdRequestMethods.Copy + httpdRequestMethodsDelete := stats.HttpdRequestMethods.Delete + httpdRequestMethodsPost := stats.HttpdRequestMethods.Post + httpdRequestMethodsHead := stats.HttpdRequestMethods.Head + + httpdStatusCodesStatus200 := stats.HttpdStatusCodes.Status200 + httpdStatusCodesStatus201 := stats.HttpdStatusCodes.Status201 + httpdStatusCodesStatus202 := stats.HttpdStatusCodes.Status202 + httpdStatusCodesStatus301 := stats.HttpdStatusCodes.Status301 + httpdStatusCodesStatus304 := stats.HttpdStatusCodes.Status304 + httpdStatusCodesStatus400 := stats.HttpdStatusCodes.Status400 + httpdStatusCodesStatus401 := stats.HttpdStatusCodes.Status401 + httpdStatusCodesStatus403 := stats.HttpdStatusCodes.Status403 + httpdStatusCodesStatus404 := stats.HttpdStatusCodes.Status404 + httpdStatusCodesStatus405 := stats.HttpdStatusCodes.Status405 + httpdStatusCodesStatus409 := stats.HttpdStatusCodes.Status409 + httpdStatusCodesStatus412 := stats.HttpdStatusCodes.Status412 + httpdStatusCodesStatus500 := stats.HttpdStatusCodes.Status500 + // check if couchdb2.0 is used + if stats.Couchdb.HttpdRequestMethods.Get.Value != nil { + requestTime = stats.Couchdb.RequestTime.Value + + httpdRequestMethodsPut = stats.Couchdb.HttpdRequestMethods.Put + httpdRequestMethodsGet = stats.Couchdb.HttpdRequestMethods.Get + httpdRequestMethodsCopy = stats.Couchdb.HttpdRequestMethods.Copy + httpdRequestMethodsDelete = stats.Couchdb.HttpdRequestMethods.Delete + httpdRequestMethodsPost = stats.Couchdb.HttpdRequestMethods.Post + httpdRequestMethodsHead = stats.Couchdb.HttpdRequestMethods.Head + + httpdStatusCodesStatus200 = stats.Couchdb.HttpdStatusCodes.Status200 + httpdStatusCodesStatus201 = stats.Couchdb.HttpdStatusCodes.Status201 + httpdStatusCodesStatus202 = stats.Couchdb.HttpdStatusCodes.Status202 + httpdStatusCodesStatus301 = stats.Couchdb.HttpdStatusCodes.Status301 + httpdStatusCodesStatus304 = stats.Couchdb.HttpdStatusCodes.Status304 + httpdStatusCodesStatus400 = stats.Couchdb.HttpdStatusCodes.Status400 + httpdStatusCodesStatus401 = stats.Couchdb.HttpdStatusCodes.Status401 + httpdStatusCodesStatus403 = stats.Couchdb.HttpdStatusCodes.Status403 + httpdStatusCodesStatus404 = stats.Couchdb.HttpdStatusCodes.Status404 + httpdStatusCodesStatus405 = stats.Couchdb.HttpdStatusCodes.Status405 + httpdStatusCodesStatus409 = stats.Couchdb.HttpdStatusCodes.Status409 + httpdStatusCodesStatus412 = stats.Couchdb.HttpdStatusCodes.Status412 + httpdStatusCodesStatus500 = stats.Couchdb.HttpdStatusCodes.Status500 + } + // CouchDB meta stats: - c.MapCopy(fields, c.generateFields("couchdb_auth_cache_misses", stats.Couchdb.AuthCacheMisses)) - c.MapCopy(fields, c.generateFields("couchdb_database_writes", stats.Couchdb.DatabaseWrites)) - c.MapCopy(fields, c.generateFields("couchdb_open_databases", stats.Couchdb.OpenDatabases)) - c.MapCopy(fields, c.generateFields("couchdb_auth_cache_hits", stats.Couchdb.AuthCacheHits)) - c.MapCopy(fields, c.generateFields("couchdb_request_time", stats.Couchdb.RequestTime)) - c.MapCopy(fields, c.generateFields("couchdb_database_reads", stats.Couchdb.DatabaseReads)) - c.MapCopy(fields, c.generateFields("couchdb_open_os_files", stats.Couchdb.OpenOsFiles)) + c.generateFields(fields, "couchdb_auth_cache_misses", stats.Couchdb.AuthCacheMisses) + c.generateFields(fields, "couchdb_database_writes", stats.Couchdb.DatabaseWrites) + c.generateFields(fields, "couchdb_open_databases", stats.Couchdb.OpenDatabases) + c.generateFields(fields, "couchdb_auth_cache_hits", stats.Couchdb.AuthCacheHits) + c.generateFields(fields, "couchdb_request_time", requestTime) + c.generateFields(fields, "couchdb_database_reads", stats.Couchdb.DatabaseReads) + c.generateFields(fields, "couchdb_open_os_files", stats.Couchdb.OpenOsFiles) // http request methods stats: - c.MapCopy(fields, c.generateFields("httpd_request_methods_put", stats.HttpdRequestMethods.Put)) - c.MapCopy(fields, c.generateFields("httpd_request_methods_get", stats.HttpdRequestMethods.Get)) - c.MapCopy(fields, c.generateFields("httpd_request_methods_copy", stats.HttpdRequestMethods.Copy)) - c.MapCopy(fields, c.generateFields("httpd_request_methods_delete", stats.HttpdRequestMethods.Delete)) - c.MapCopy(fields, c.generateFields("httpd_request_methods_post", stats.HttpdRequestMethods.Post)) - c.MapCopy(fields, c.generateFields("httpd_request_methods_head", stats.HttpdRequestMethods.Head)) + c.generateFields(fields, "httpd_request_methods_put", httpdRequestMethodsPut) + c.generateFields(fields, "httpd_request_methods_get", httpdRequestMethodsGet) + c.generateFields(fields, "httpd_request_methods_copy", httpdRequestMethodsCopy) + c.generateFields(fields, "httpd_request_methods_delete", httpdRequestMethodsDelete) + c.generateFields(fields, "httpd_request_methods_post", httpdRequestMethodsPost) + c.generateFields(fields, "httpd_request_methods_head", httpdRequestMethodsHead) // status code stats: - c.MapCopy(fields, c.generateFields("httpd_status_codes_200", stats.HttpdStatusCodes.Status200)) - c.MapCopy(fields, c.generateFields("httpd_status_codes_201", stats.HttpdStatusCodes.Status201)) - c.MapCopy(fields, c.generateFields("httpd_status_codes_202", stats.HttpdStatusCodes.Status202)) - c.MapCopy(fields, c.generateFields("httpd_status_codes_301", stats.HttpdStatusCodes.Status301)) - c.MapCopy(fields, c.generateFields("httpd_status_codes_304", stats.HttpdStatusCodes.Status304)) - c.MapCopy(fields, c.generateFields("httpd_status_codes_400", stats.HttpdStatusCodes.Status400)) - c.MapCopy(fields, c.generateFields("httpd_status_codes_401", stats.HttpdStatusCodes.Status401)) - c.MapCopy(fields, c.generateFields("httpd_status_codes_403", stats.HttpdStatusCodes.Status403)) - c.MapCopy(fields, c.generateFields("httpd_status_codes_404", stats.HttpdStatusCodes.Status404)) - c.MapCopy(fields, c.generateFields("httpd_status_codes_405", stats.HttpdStatusCodes.Status405)) - c.MapCopy(fields, c.generateFields("httpd_status_codes_409", stats.HttpdStatusCodes.Status409)) - c.MapCopy(fields, c.generateFields("httpd_status_codes_412", stats.HttpdStatusCodes.Status412)) - c.MapCopy(fields, c.generateFields("httpd_status_codes_500", stats.HttpdStatusCodes.Status500)) + c.generateFields(fields, "httpd_status_codes_200", httpdStatusCodesStatus200) + c.generateFields(fields, "httpd_status_codes_201", httpdStatusCodesStatus201) + c.generateFields(fields, "httpd_status_codes_202", httpdStatusCodesStatus202) + c.generateFields(fields, "httpd_status_codes_301", httpdStatusCodesStatus301) + c.generateFields(fields, "httpd_status_codes_304", httpdStatusCodesStatus304) + c.generateFields(fields, "httpd_status_codes_400", httpdStatusCodesStatus400) + c.generateFields(fields, "httpd_status_codes_401", httpdStatusCodesStatus401) + c.generateFields(fields, "httpd_status_codes_403", httpdStatusCodesStatus403) + c.generateFields(fields, "httpd_status_codes_404", httpdStatusCodesStatus404) + c.generateFields(fields, "httpd_status_codes_405", httpdStatusCodesStatus405) + c.generateFields(fields, "httpd_status_codes_409", httpdStatusCodesStatus409) + c.generateFields(fields, "httpd_status_codes_412", httpdStatusCodesStatus412) + c.generateFields(fields, "httpd_status_codes_500", httpdStatusCodesStatus500) // httpd stats: - c.MapCopy(fields, c.generateFields("httpd_clients_requesting_changes", stats.Httpd.ClientsRequestingChanges)) - c.MapCopy(fields, c.generateFields("httpd_temporary_view_reads", stats.Httpd.TemporaryViewReads)) - c.MapCopy(fields, c.generateFields("httpd_requests", stats.Httpd.Requests)) - c.MapCopy(fields, c.generateFields("httpd_bulk_requests", stats.Httpd.BulkRequests)) - c.MapCopy(fields, c.generateFields("httpd_view_reads", stats.Httpd.ViewReads)) + c.generateFields(fields, "httpd_clients_requesting_changes", stats.Httpd.ClientsRequestingChanges) + c.generateFields(fields, "httpd_temporary_view_reads", stats.Httpd.TemporaryViewReads) + c.generateFields(fields, "httpd_requests", stats.Httpd.Requests) + c.generateFields(fields, "httpd_bulk_requests", stats.Httpd.BulkRequests) + c.generateFields(fields, "httpd_view_reads", stats.Httpd.ViewReads) tags := map[string]string{ "server": host, @@ -166,34 +242,39 @@ func (c *CouchDB) fetchAndInsertData(accumulator telegraf.Accumulator, host stri return nil } -func (*CouchDB) MapCopy(dst, src interface{}) { - dv, sv := reflect.ValueOf(dst), reflect.ValueOf(src) - for _, k := range sv.MapKeys() { - dv.SetMapIndex(k, sv.MapIndex(k)) +func (c *CouchDB) generateFields(fields map[string]interface{}, prefix string, obj metaData) { + if obj.Value != nil { + fields[prefix+"_value"] = *obj.Value } -} - -func (*CouchDB) safeCheck(value interface{}) interface{} { - if value == nil { - return 0.0 + if obj.Current != nil { + fields[prefix+"_current"] = *obj.Current } - return value -} - -func (c *CouchDB) generateFields(prefix string, obj metaData) map[string]interface{} { - fields := map[string]interface{}{ - prefix + "_current": c.safeCheck(obj.Current), - prefix + "_sum": c.safeCheck(obj.Sum), - prefix + "_mean": c.safeCheck(obj.Mean), - prefix + "_stddev": c.safeCheck(obj.Stddev), - prefix + "_min": c.safeCheck(obj.Min), - prefix + "_max": c.safeCheck(obj.Max), + if obj.Sum != nil { + fields[prefix+"_sum"] = *obj.Sum + } + if obj.Mean != nil { + fields[prefix+"_mean"] = *obj.Mean + } + if obj.Stddev != nil { + fields[prefix+"_stddev"] = *obj.Stddev + } + if obj.Min != nil { + fields[prefix+"_min"] = *obj.Min + } + if obj.Max != nil { + fields[prefix+"_max"] = *obj.Max } - return fields } func init() { inputs.Add("couchdb", func() telegraf.Input { - return &CouchDB{} + return &CouchDB{ + client: &http.Client{ + Transport: &http.Transport{ + ResponseHeaderTimeout: time.Duration(3 * time.Second), + }, + Timeout: time.Duration(4 * time.Second), + }, + } }) } diff --git a/plugins/inputs/couchdb/couchdb_test.go b/plugins/inputs/couchdb/couchdb_test.go index 4c0370852..933d6cc8d 100644 --- a/plugins/inputs/couchdb/couchdb_test.go +++ b/plugins/inputs/couchdb/couchdb_test.go @@ -1,12 +1,13 @@ package couchdb_test import ( - "github.com/influxdata/telegraf/plugins/inputs/couchdb" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" "net/http" "net/http/httptest" "testing" + + "github.com/influxdata/telegraf/plugins/inputs/couchdb" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" ) func TestBasic(t *testing.T) { @@ -312,7 +313,7 @@ func TestBasic(t *testing.T) { defer fakeServer.Close() plugin := &couchdb.CouchDB{ - HOSTs: []string{fakeServer.URL + "/_stats"}, + Hosts: []string{fakeServer.URL + "/_stats"}, } var acc testutil.Accumulator diff --git a/plugins/inputs/couchdb/dev/telegraf.conf b/plugins/inputs/couchdb/dev/telegraf.conf new file mode 100644 index 000000000..30366e922 --- /dev/null +++ b/plugins/inputs/couchdb/dev/telegraf.conf @@ -0,0 +1,9 @@ +[agent] + interval="1s" + flush_interval="1s" + +[[inputs.couchdb]] + hosts = ["http://couchdb16:5984/_stats", "http://couchdb22:5984/_node/_local/_stats"] + +[[outputs.file]] + files = ["stdout"] From a84ca7bcdb128a166f832aeac8a792ef614d2f34 Mon Sep 17 00:00:00 2001 From: Pontus Rydin Date: Wed, 12 Sep 2018 14:06:38 -0400 Subject: [PATCH 0171/1815] Remove call to View.Destroy() that causes errors to be logged by vCenter (#4684) --- plugins/inputs/vsphere/client.go | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/plugins/inputs/vsphere/client.go b/plugins/inputs/vsphere/client.go index b9547b179..2148a72ff 100644 --- a/plugins/inputs/vsphere/client.go +++ b/plugins/inputs/vsphere/client.go @@ -164,12 +164,10 @@ func (c *Client) close() { // to close it multiple times. c.closeGate.Do(func() { ctx := context.Background() - if c.Views != nil { - c.Views.Destroy(ctx) - - } if c.Client != nil { - c.Client.Logout(ctx) + if err := c.Client.Logout(ctx); err != nil { + log.Printf("E! [input.vsphere]: Error during logout: %s", err) + } } }) } From 9c9a5f84384dcb7d78b5ac8047bc2f5738d21840 Mon Sep 17 00:00:00 2001 From: Thanabodee Charoenpiriyakij Date: Thu, 13 Sep 2018 01:39:21 +0700 Subject: [PATCH 0172/1815] Use sarama version 1.18.0 to support Kafka 2.0 (#4676) --- Gopkg.lock | 6 +++--- Gopkg.toml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Gopkg.lock b/Gopkg.lock index 7a44f79b3..cb6338110 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -63,12 +63,12 @@ version = "v0.4.9" [[projects]] - digest = "1:9362b2212139b7821f73a86169bf80ce6b0264956f87d82ab3aeedb2b5c08fea" + digest = "1:213b41361ad1cb4768add9d26c2e27794c65264eefdb24ed6ea34cdfeeff3f3c" name = "github.com/Shopify/sarama" packages = ["."] pruneopts = "" - revision = "35324cf48e33d8260e1c7c18854465a904ade249" - version = "v1.17.0" + revision = "a6144ae922fd99dd0ea5046c8137acfb7fab0914" + version = "v1.18.0" [[projects]] digest = "1:f82b8ac36058904227087141017bb82f4b0fc58272990a4cdae3e2d6d222644e" diff --git a/Gopkg.toml b/Gopkg.toml index 110e8480e..2fa3e4c40 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -152,7 +152,7 @@ [[constraint]] name = "github.com/Shopify/sarama" - version = "1.17.0" + version = "1.18.0" [[constraint]] name = "github.com/soniah/gosnmp" From bcb65a5ee82c80d92c85b1d0590203bcbddd2241 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 12 Sep 2018 11:40:43 -0700 Subject: [PATCH 0173/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index fa95dd2b8..3a5b37d35 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -112,6 +112,7 @@ - [#4646](https://github.com/influxdata/telegraf/issues/4646): Reset/flush saved contents from bad metric. - [#4520](https://github.com/influxdata/telegraf/issues/4520): Document all supported cli arguments. - [#4674](https://github.com/influxdata/telegraf/pull/4674): Log access denied opening a service at debug level in win_services. +- [#4588](https://github.com/influxdata/telegraf/issues/4588): Add support for Kafka 2.0. ## v1.7.4 [2018-08-29] From 6361fd377416593322a9b5ee8e8c867d70754e03 Mon Sep 17 00:00:00 2001 From: dangeist Date: Wed, 12 Sep 2018 14:47:45 -0400 Subject: [PATCH 0174/1815] Allow alternate binaries for iptables input plugin. (#4682) --- etc/telegraf.conf | 2 ++ plugins/inputs/iptables/README.md | 2 ++ plugins/inputs/iptables/iptables.go | 11 ++++++++++- 3 files changed, 14 insertions(+), 1 deletion(-) diff --git a/etc/telegraf.conf b/etc/telegraf.conf index 9315aa457..11842e7e1 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -2004,6 +2004,8 @@ # ## Setting 'use_lock' to true runs iptables with the "-w" option. # ## Adjust your sudo settings appropriately if using this option ("iptables -wnvl") # use_lock = false +# ## Define an alternate executable, such as "ip6tables". Default is "iptables". +# # binary = "ip6tables" # ## defines the table to monitor: # table = "filter" # ## defines the chains to monitor. diff --git a/plugins/inputs/iptables/README.md b/plugins/inputs/iptables/README.md index 527723f09..03bf784e6 100644 --- a/plugins/inputs/iptables/README.md +++ b/plugins/inputs/iptables/README.md @@ -45,6 +45,8 @@ Defining multiple instances of this plugin in telegraf.conf can lead to concurre use_sudo = false # run iptables with the lock option use_lock = false + # Define an alternate executable, such as "ip6tables". Default is "iptables". + # binary = "ip6tables" # defines the table to monitor: table = "filter" # defines the chains to monitor: diff --git a/plugins/inputs/iptables/iptables.go b/plugins/inputs/iptables/iptables.go index 01041fcc1..21f6642a9 100644 --- a/plugins/inputs/iptables/iptables.go +++ b/plugins/inputs/iptables/iptables.go @@ -17,6 +17,7 @@ import ( type Iptables struct { UseSudo bool UseLock bool + Binary string Table string Chains []string lister chainLister @@ -38,6 +39,8 @@ func (ipt *Iptables) SampleConfig() string { ## Setting 'use_lock' to true runs iptables with the "-w" option. ## Adjust your sudo settings appropriately if using this option ("iptables -wnvl") use_lock = false + ## Define an alternate executable, such as "ip6tables". Default is "iptables". + # binary = "ip6tables" ## defines the table to monitor: table = "filter" ## defines the chains to monitor. @@ -70,7 +73,13 @@ func (ipt *Iptables) Gather(acc telegraf.Accumulator) error { } func (ipt *Iptables) chainList(table, chain string) (string, error) { - iptablePath, err := exec.LookPath("iptables") + var binary string + if ipt.Binary != "" { + binary = ipt.Binary + } else { + binary = "iptables" + } + iptablePath, err := exec.LookPath(binary) if err != nil { return "", err } From 436dfdb44a20a91de9eb034902f36bd0e895200e Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 12 Sep 2018 11:48:54 -0700 Subject: [PATCH 0175/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3a5b37d35..6bf96251c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -104,6 +104,7 @@ - [#4141](https://github.com/influxdata/telegraf/pull/4141): Add input plugin for VMware vSphere. - [#4667](https://github.com/influxdata/telegraf/pull/4667): Align metrics window to interval in cloudwatch input. - [#4642](https://github.com/influxdata/telegraf/pull/4642): Improve Azure Managed Instance support + more in sqlserver input. +- [#4682](https://github.com/influxdata/telegraf/pull/4682): Allow alternate binaries for iptables input plugin. ### Bugfixes From 37fefa300b87d72e8d96028af3da0230b1093d36 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 12 Sep 2018 11:53:25 -0700 Subject: [PATCH 0176/1815] Remove non-existant option from sample config --- plugins/inputs/http/http.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/plugins/inputs/http/http.go b/plugins/inputs/http/http.go index c9c3460be..f5a2544c8 100644 --- a/plugins/inputs/http/http.go +++ b/plugins/inputs/http/http.go @@ -52,9 +52,6 @@ var sampleConfig = ` # username = "username" # password = "pa$$word" - ## Tag all metrics with the url - # tag_url = true - ## Optional TLS Config # tls_ca = "/etc/telegraf/ca.pem" # tls_cert = "/etc/telegraf/cert.pem" From 71aaa844f52455c6543f84b9de3ba2333f7d76c3 Mon Sep 17 00:00:00 2001 From: Gunnar Aasen Date: Wed, 12 Sep 2018 12:01:42 -0700 Subject: [PATCH 0177/1815] Update Azure Monitor README --- plugins/outputs/azure_monitor/README.md | 28 ++++++++++++++++++------- 1 file changed, 21 insertions(+), 7 deletions(-) diff --git a/plugins/outputs/azure_monitor/README.md b/plugins/outputs/azure_monitor/README.md index cd1386136..d1b91a838 100644 --- a/plugins/outputs/azure_monitor/README.md +++ b/plugins/outputs/azure_monitor/README.md @@ -1,5 +1,8 @@ # Azure Monitor +__The Azure Monitor custom metrics service is currently in preview and not +available in a subset of Azure regions.__ + This plugin will send custom metrics to Azure Monitor. Azure Monitor has a metric resolution of one minute. To handle this in Telegraf, the Azure Monitor output plugin will automatically aggregates metrics into one minute buckets, @@ -11,12 +14,6 @@ metric is written as the Azure Monitor metric name. All field values are written as a summarized set that includes: min, max, sum, count. Tags are written as a dimension on each Azure Monitor metric. -Since Azure Monitor only accepts numeric values, string-typed fields are -dropped by default. There is a configuration option (`strings_as_dimensions`) -to retain fields that contain strings as extra dimensions. Azure Monitor -allows a maximum of 10 dimensions per metric so any dimensions over that -amount will be deterministically dropped. - ### Configuration: ```toml @@ -88,7 +85,8 @@ authentication is checked. Here are the preferred authentication methods: - Primarily useful if Telegraf is writing metrics for other resources. [More information][principal]. - A Service Principal or User Principal needs to be assigned the `Monitoring - Contributor` roles. + Metrics Publisher` role on the resource(s) metrics will be emitted + against. 3. AAD User Tokens (User Principals) - Allows Telegraf to authenticate like a user. It is best to use this method for development. @@ -137,3 +135,19 @@ following configurations: **Note: As shown above, the last option (#4) is the preferred way to authenticate when running Telegraf on Azure VMs. + +### Dimensions + +Azure Monitor only accepts values with a numeric type. The plugin will drop +fields with a string type by default. The plugin can set all string type fields +as extra dimensions in the Azure Monitor custom metric by setting the +configuration option `strings_as_dimensions` to `true`. + +Keep in mind, Azure Monitor allows a maximum of 10 dimensions per metric. The +plugin will deterministically dropped any dimensions that exceed the 10 +dimension limit. + +To convert only a subset of string-typed fields as dimensions, enable +`strings_as_dimensions` and use the [`fieldpass` or `fielddrop` +processors](https://docs.influxdata.com/telegraf/v1.7/administration/configuration/#processor-configuration) +to limit the string-typed fields that are sent to the plugin. From 1fdf032db057ba0b83fd506730c62ed9aa92fda8 Mon Sep 17 00:00:00 2001 From: Greg Date: Wed, 12 Sep 2018 15:48:59 -0600 Subject: [PATCH 0178/1815] Add influx v2 output plugin (#4645) --- plugins/outputs/all/all.go | 1 + plugins/outputs/influxdb_v2/README.md | 51 +++ plugins/outputs/influxdb_v2/http.go | 292 ++++++++++++++++++ .../outputs/influxdb_v2/http_internal_test.go | 59 ++++ plugins/outputs/influxdb_v2/http_test.go | 49 +++ plugins/outputs/influxdb_v2/influxdb.go | 201 ++++++++++++ plugins/outputs/influxdb_v2/influxdb_test.go | 103 ++++++ 7 files changed, 756 insertions(+) create mode 100644 plugins/outputs/influxdb_v2/README.md create mode 100644 plugins/outputs/influxdb_v2/http.go create mode 100644 plugins/outputs/influxdb_v2/http_internal_test.go create mode 100644 plugins/outputs/influxdb_v2/http_test.go create mode 100644 plugins/outputs/influxdb_v2/influxdb.go create mode 100644 plugins/outputs/influxdb_v2/influxdb_test.go diff --git a/plugins/outputs/all/all.go b/plugins/outputs/all/all.go index 4d49c0c6e..24748c53e 100644 --- a/plugins/outputs/all/all.go +++ b/plugins/outputs/all/all.go @@ -15,6 +15,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/outputs/graylog" _ "github.com/influxdata/telegraf/plugins/outputs/http" _ "github.com/influxdata/telegraf/plugins/outputs/influxdb" + _ "github.com/influxdata/telegraf/plugins/outputs/influxdb_v2" _ "github.com/influxdata/telegraf/plugins/outputs/instrumental" _ "github.com/influxdata/telegraf/plugins/outputs/kafka" _ "github.com/influxdata/telegraf/plugins/outputs/kinesis" diff --git a/plugins/outputs/influxdb_v2/README.md b/plugins/outputs/influxdb_v2/README.md new file mode 100644 index 000000000..795f4467c --- /dev/null +++ b/plugins/outputs/influxdb_v2/README.md @@ -0,0 +1,51 @@ +# InfluxDB Output Plugin + +This InfluxDB output plugin writes metrics to the [InfluxDB 2.0](https://github.com/influxdata/platform) HTTP service. + +### Configuration: + +```toml +# Configuration for sending metrics to InfluxDB 2.0 +[[outputs.influxdb_v2]] + ## The URLs of the InfluxDB cluster nodes. + ## + ## Multiple URLs can be specified for a single cluster, only ONE of the + ## urls will be written to each interval. + urls = ["http://127.0.0.1:9999"] + + ## Token for authentication. + token = "" + + ## Organization is the name of the organization you wish to write to. + organization = "" + + ## Bucket to the name fo the bucketwrite into; must exist. + bucket = "" + + ## Timeout for HTTP messages. + # timeout = "5s" + + ## Additional HTTP headers + # http_headers = {"X-Special-Header" = "Special-Value"} + + ## HTTP Proxy override, if unset values the standard proxy environment + ## variables are consulted to determine which proxy, if any, should be used. + # http_proxy = "http://corporate.proxy:3128" + + ## HTTP User-Agent + # user_agent = "telegraf" + + ## Content-Encoding for write request body, can be set to "gzip" to + ## compress body or "identity" to apply no encoding. + # content_encoding = "gzip" + + ## Enable or disable uint support for writing uints influxdb 2.0. + # influx_uint_support = false + + ## Optional TLS Config for use on HTTP connections. + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false +``` diff --git a/plugins/outputs/influxdb_v2/http.go b/plugins/outputs/influxdb_v2/http.go new file mode 100644 index 000000000..1e7061a27 --- /dev/null +++ b/plugins/outputs/influxdb_v2/http.go @@ -0,0 +1,292 @@ +package influxdb_v2 + +import ( + "compress/gzip" + "context" + "crypto/tls" + "encoding/json" + "errors" + "fmt" + "io" + "log" + "net" + "net/http" + "net/url" + "path" + "strconv" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/serializers/influx" +) + +type APIErrorType int + +type APIError struct { + StatusCode int + Title string + Description string + Type APIErrorType +} + +func (e APIError) Error() string { + if e.Description != "" { + return fmt.Sprintf("%s: %s", e.Title, e.Description) + } + return e.Title +} + +const ( + defaultRequestTimeout = time.Second * 5 + defaultMaxWait = 10 // seconds + defaultDatabase = "telegraf" + defaultUserAgent = "telegraf" +) + +type HTTPConfig struct { + URL *url.URL + Token string + Organization string + Bucket string + Timeout time.Duration + Headers map[string]string + Proxy *url.URL + UserAgent string + ContentEncoding string + TLSConfig *tls.Config + + Serializer *influx.Serializer +} + +type httpClient struct { + WriteURL string + ContentEncoding string + Timeout time.Duration + Headers map[string]string + + client *http.Client + serializer *influx.Serializer + url *url.URL + retryTime time.Time +} + +func NewHTTPClient(config *HTTPConfig) (*httpClient, error) { + if config.URL == nil { + return nil, ErrMissingURL + } + + timeout := config.Timeout + if timeout == 0 { + timeout = defaultRequestTimeout + } + + userAgent := config.UserAgent + if userAgent == "" { + userAgent = defaultUserAgent + } + + var headers = make(map[string]string, len(config.Headers)+2) + headers["User-Agent"] = userAgent + headers["Authorization"] = "Token " + config.Token + for k, v := range config.Headers { + headers[k] = v + } + + var proxy func(*http.Request) (*url.URL, error) + if config.Proxy != nil { + proxy = http.ProxyURL(config.Proxy) + } else { + proxy = http.ProxyFromEnvironment + } + + serializer := config.Serializer + if serializer == nil { + serializer = influx.NewSerializer() + } + + writeURL, err := makeWriteURL( + *config.URL, + config.Organization, + config.Bucket) + if err != nil { + return nil, err + } + + var transport *http.Transport + switch config.URL.Scheme { + case "http", "https": + transport = &http.Transport{ + Proxy: proxy, + TLSClientConfig: config.TLSConfig, + } + case "unix": + transport = &http.Transport{ + Dial: func(_, _ string) (net.Conn, error) { + return net.DialTimeout( + config.URL.Scheme, + config.URL.Path, + timeout, + ) + }, + } + default: + return nil, fmt.Errorf("unsupported scheme %q", config.URL.Scheme) + } + + client := &httpClient{ + serializer: serializer, + client: &http.Client{ + Timeout: timeout, + Transport: transport, + }, + url: config.URL, + WriteURL: writeURL, + ContentEncoding: config.ContentEncoding, + Timeout: timeout, + Headers: headers, + } + return client, nil +} + +// URL returns the origin URL that this client connects too. +func (c *httpClient) URL() string { + return c.url.String() +} + +type genericRespError struct { + Code string + Message string + Line *int32 + MaxLength *int32 +} + +func (g genericRespError) Error() string { + errString := fmt.Sprintf("%s: %s", g.Code, g.Message) + if g.Line != nil { + return fmt.Sprintf("%s - line[%d]", errString, g.Line) + } else if g.MaxLength != nil { + return fmt.Sprintf("%s - maxlen[%d]", errString, g.MaxLength) + } + return errString +} + +func (c *httpClient) Write(ctx context.Context, metrics []telegraf.Metric) error { + if c.retryTime.After(time.Now()) { + return errors.New("Retry time has not elapsed") + } + reader := influx.NewReader(metrics, c.serializer) + req, err := c.makeWriteRequest(reader) + if err != nil { + return err + } + + resp, err := c.client.Do(req.WithContext(ctx)) + if err != nil { + return err + } + defer resp.Body.Close() + + if resp.StatusCode == http.StatusNoContent { + return nil + } + + writeResp := &genericRespError{} + err = json.NewDecoder(resp.Body).Decode(writeResp) + desc := writeResp.Error() + if err != nil { + desc = err.Error() + } + + switch resp.StatusCode { + case http.StatusBadRequest, http.StatusUnauthorized, + http.StatusForbidden, http.StatusRequestEntityTooLarge: + log.Printf("E! [outputs.influxdb_v2] Failed to write metric: %s\n", desc) + return nil + case http.StatusTooManyRequests, http.StatusServiceUnavailable: + retryAfter := resp.Header.Get("Retry-After") + retry, err := strconv.Atoi(retryAfter) + if err != nil { + retry = 0 + } + if retry > defaultMaxWait { + retry = defaultMaxWait + } + c.retryTime = time.Now().Add(time.Duration(retry) * time.Second) + return fmt.Errorf("Waiting %ds for server before sending metric again", retry) + } + + // This is only until platform spec is fully implemented. As of the + // time of writing, there is no error body returned. + if xErr := resp.Header.Get("X-Influx-Error"); xErr != "" { + desc = fmt.Sprintf("%s; %s", desc, xErr) + } + + return &APIError{ + StatusCode: resp.StatusCode, + Title: resp.Status, + Description: desc, + } +} + +func (c *httpClient) makeWriteRequest(body io.Reader) (*http.Request, error) { + var err error + if c.ContentEncoding == "gzip" { + body, err = compressWithGzip(body) + if err != nil { + return nil, err + } + } + + req, err := http.NewRequest("POST", c.WriteURL, body) + if err != nil { + return nil, err + } + + req.Header.Set("Content-Type", "text/plain; charset=utf-8") + c.addHeaders(req) + + if c.ContentEncoding == "gzip" { + req.Header.Set("Content-Encoding", "gzip") + } + + return req, nil +} + +func (c *httpClient) addHeaders(req *http.Request) { + for header, value := range c.Headers { + req.Header.Set(header, value) + } +} + +func compressWithGzip(data io.Reader) (io.Reader, error) { + pipeReader, pipeWriter := io.Pipe() + gzipWriter := gzip.NewWriter(pipeWriter) + var err error + + go func() { + _, err = io.Copy(gzipWriter, data) + gzipWriter.Close() + pipeWriter.Close() + }() + + return pipeReader, err +} + +func makeWriteURL(loc url.URL, org, bucket string) (string, error) { + params := url.Values{} + params.Set("bucket", bucket) + params.Set("org", org) + + switch loc.Scheme { + case "unix": + loc.Scheme = "http" + loc.Host = "127.0.0.1" + loc.Path = "v2/write" + case "http", "https": + loc.Path = path.Join(loc.Path, "v2/write") + default: + return "", fmt.Errorf("unsupported scheme: %q", loc.Scheme) + } + loc.RawQuery = params.Encode() + return loc.String(), nil +} diff --git a/plugins/outputs/influxdb_v2/http_internal_test.go b/plugins/outputs/influxdb_v2/http_internal_test.go new file mode 100644 index 000000000..5df51fc85 --- /dev/null +++ b/plugins/outputs/influxdb_v2/http_internal_test.go @@ -0,0 +1,59 @@ +package influxdb_v2 + +import ( + "io" + "net/url" + "testing" + + "github.com/stretchr/testify/require" +) + +func genURL(u string) *url.URL { + URL, _ := url.Parse(u) + return URL +} + +func TestMakeWriteURL(t *testing.T) { + tests := []struct { + err bool + url *url.URL + act string + }{ + { + url: genURL("http://localhost:9999"), + act: "http://localhost:9999/v2/write?bucket=telegraf&org=influx", + }, + { + url: genURL("unix://var/run/influxd.sock"), + act: "http://127.0.0.1/v2/write?bucket=telegraf&org=influx", + }, + { + err: true, + url: genURL("udp://localhost:9999"), + }, + } + + for i := range tests { + rURL, err := makeWriteURL(*tests[i].url, "influx", "telegraf") + if !tests[i].err { + require.NoError(t, err) + } else { + require.Error(t, err) + t.Log(err) + } + if err == nil { + require.Equal(t, tests[i].act, rURL) + } + } +} + +func TestMakeWriteRequest(t *testing.T) { + reader, _ := io.Pipe() + cli := httpClient{ + WriteURL: "http://localhost:9999/v2/write?bucket=telegraf&org=influx", + ContentEncoding: "gzip", + Headers: map[string]string{"x": "y"}, + } + _, err := cli.makeWriteRequest(reader) + require.NoError(t, err) +} diff --git a/plugins/outputs/influxdb_v2/http_test.go b/plugins/outputs/influxdb_v2/http_test.go new file mode 100644 index 000000000..33ff9e24b --- /dev/null +++ b/plugins/outputs/influxdb_v2/http_test.go @@ -0,0 +1,49 @@ +package influxdb_v2_test + +import ( + "net/url" + "testing" + + influxdb "github.com/influxdata/telegraf/plugins/outputs/influxdb_v2" + "github.com/stretchr/testify/require" +) + +func genURL(u string) *url.URL { + URL, _ := url.Parse(u) + return URL +} +func TestNewHTTPClient(t *testing.T) { + tests := []struct { + err bool + cfg *influxdb.HTTPConfig + }{ + { + err: true, + cfg: &influxdb.HTTPConfig{}, + }, + { + err: true, + cfg: &influxdb.HTTPConfig{ + URL: genURL("udp://localhost:9999"), + }, + }, + { + cfg: &influxdb.HTTPConfig{ + URL: genURL("unix://var/run/influxd.sock"), + }, + }, + } + + for i := range tests { + client, err := influxdb.NewHTTPClient(tests[i].cfg) + if !tests[i].err { + require.NoError(t, err) + } else { + require.Error(t, err) + t.Log(err) + } + if err == nil { + client.URL() + } + } +} diff --git a/plugins/outputs/influxdb_v2/influxdb.go b/plugins/outputs/influxdb_v2/influxdb.go new file mode 100644 index 000000000..886907c03 --- /dev/null +++ b/plugins/outputs/influxdb_v2/influxdb.go @@ -0,0 +1,201 @@ +package influxdb_v2 + +import ( + "context" + "errors" + "fmt" + "log" + "math/rand" + "net/url" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/internal/tls" + "github.com/influxdata/telegraf/plugins/outputs" + "github.com/influxdata/telegraf/plugins/serializers/influx" +) + +var ( + defaultURL = "http://localhost:9999" + + ErrMissingURL = errors.New("missing URL") +) + +var sampleConfig = ` + ## The URLs of the InfluxDB cluster nodes. + ## + ## Multiple URLs can be specified for a single cluster, only ONE of the + ## urls will be written to each interval. + urls = ["http://127.0.0.1:9999"] + + ## Token for authentication. + token = "" + + ## Organization is the name of the organization you wish to write to; must exist. + organization = "" + + ## Bucket to the name fo the bucketwrite into; must exist. + bucket = "" + + ## Timeout for HTTP messages. + # timeout = "5s" + + ## Additional HTTP headers + # http_headers = {"X-Special-Header" = "Special-Value"} + + ## HTTP Proxy override, if unset values the standard proxy environment + ## variables are consulted to determine which proxy, if any, should be used. + # http_proxy = "http://corporate.proxy:3128" + + ## HTTP User-Agent + # user_agent = "telegraf" + + ## Content-Encoding for write request body, can be set to "gzip" to + ## compress body or "identity" to apply no encoding. + # content_encoding = "gzip" + + ## Enable or disable uint support for writing uints influxdb 2.0. + # influx_uint_support = false + + ## Optional TLS Config for use on HTTP connections. + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false +` + +type Client interface { + Write(context.Context, []telegraf.Metric) error + + URL() string // for logging +} + +type InfluxDB struct { + URLs []string `toml:"urls"` + Token string `toml:"token"` + Organization string `toml:"organization"` + Bucket string `toml:"bucket"` + Timeout internal.Duration `toml:"timeout"` + HTTPHeaders map[string]string `toml:"http_headers"` + HTTPProxy string `toml:"http_proxy"` + UserAgent string `toml:"user_agent"` + ContentEncoding string `toml:"content_encoding"` + UintSupport bool `toml:"influx_uint_support"` + tls.ClientConfig + + clients []Client + serializer *influx.Serializer +} + +func (i *InfluxDB) Connect() error { + ctx := context.Background() + + if len(i.URLs) == 0 { + i.URLs = append(i.URLs, defaultURL) + } + + i.serializer = influx.NewSerializer() + if i.UintSupport { + i.serializer.SetFieldTypeSupport(influx.UintSupport) + } + + for _, u := range i.URLs { + parts, err := url.Parse(u) + if err != nil { + return fmt.Errorf("error parsing url [%q]: %v", u, err) + } + + var proxy *url.URL + if len(i.HTTPProxy) > 0 { + proxy, err = url.Parse(i.HTTPProxy) + if err != nil { + return fmt.Errorf("error parsing proxy_url [%s]: %v", i.HTTPProxy, err) + } + } + + switch parts.Scheme { + case "http", "https", "unix": + c, err := i.getHTTPClient(ctx, parts, proxy) + if err != nil { + return err + } + + i.clients = append(i.clients, c) + default: + return fmt.Errorf("unsupported scheme [%q]: %q", u, parts.Scheme) + } + } + + return nil +} + +func (i *InfluxDB) Close() error { + return nil +} + +func (i *InfluxDB) Description() string { + return "Configuration for sending metrics to InfluxDB" +} + +func (i *InfluxDB) SampleConfig() string { + return sampleConfig +} + +// Write sends metrics to one of the configured servers, logging each +// unsuccessful. If all servers fail, return an error. +func (i *InfluxDB) Write(metrics []telegraf.Metric) error { + ctx := context.Background() + + var err error + p := rand.Perm(len(i.clients)) + for _, n := range p { + client := i.clients[n] + err = client.Write(ctx, metrics) + if err == nil { + return nil + } + + log.Printf("E! [outputs.influxdb] when writing to [%s]: %v", client.URL(), err) + } + + return errors.New("could not write any address") +} + +func (i *InfluxDB) getHTTPClient(ctx context.Context, url *url.URL, proxy *url.URL) (Client, error) { + tlsConfig, err := i.ClientConfig.TLSConfig() + if err != nil { + return nil, err + } + + config := &HTTPConfig{ + URL: url, + Token: i.Token, + Organization: i.Organization, + Bucket: i.Bucket, + Timeout: i.Timeout.Duration, + Headers: i.HTTPHeaders, + Proxy: proxy, + UserAgent: i.UserAgent, + ContentEncoding: i.ContentEncoding, + TLSConfig: tlsConfig, + Serializer: i.serializer, + } + + c, err := NewHTTPClient(config) + if err != nil { + return nil, fmt.Errorf("error creating HTTP client [%s]: %v", url, err) + } + + return c, nil +} + +func init() { + outputs.Add("influxdb_v2", func() telegraf.Output { + return &InfluxDB{ + Timeout: internal.Duration{Duration: time.Second * 5}, + ContentEncoding: "gzip", + } + }) +} diff --git a/plugins/outputs/influxdb_v2/influxdb_test.go b/plugins/outputs/influxdb_v2/influxdb_test.go new file mode 100644 index 000000000..3702b4309 --- /dev/null +++ b/plugins/outputs/influxdb_v2/influxdb_test.go @@ -0,0 +1,103 @@ +package influxdb_v2_test + +import ( + "testing" + + "github.com/influxdata/telegraf/internal/tls" + "github.com/influxdata/telegraf/plugins/outputs" + influxdb "github.com/influxdata/telegraf/plugins/outputs/influxdb_v2" + "github.com/stretchr/testify/require" +) + +func TestDefaultURL(t *testing.T) { + output := influxdb.InfluxDB{} + err := output.Connect() + require.NoError(t, err) + if len(output.URLs) < 1 { + t.Fatal("Default URL failed to get set") + } + require.Equal(t, "http://localhost:9999", output.URLs[0]) +} +func TestConnect(t *testing.T) { + tests := []struct { + err bool + out influxdb.InfluxDB + }{ + { + out: influxdb.InfluxDB{ + URLs: []string{"http://localhost:1234"}, + HTTPProxy: "http://localhost:9999", + HTTPHeaders: map[string]string{ + "x": "y", + }, + }, + }, + { + err: true, + out: influxdb.InfluxDB{ + URLs: []string{"!@#$qwert"}, + HTTPProxy: "http://localhost:9999", + HTTPHeaders: map[string]string{ + "x": "y", + }, + }, + }, + { + err: true, + out: influxdb.InfluxDB{ + URLs: []string{"http://localhost:1234"}, + HTTPProxy: "!@#$%^&*()_+", + HTTPHeaders: map[string]string{ + "x": "y", + }, + }, + }, + { + err: true, + out: influxdb.InfluxDB{ + URLs: []string{"!@#$%^&*()_+"}, + HTTPProxy: "http://localhost:9999", + HTTPHeaders: map[string]string{ + "x": "y", + }, + }, + }, + { + err: true, + out: influxdb.InfluxDB{ + URLs: []string{":::@#$qwert"}, + HTTPProxy: "http://localhost:9999", + HTTPHeaders: map[string]string{ + "x": "y", + }, + }, + }, + { + err: true, + out: influxdb.InfluxDB{ + URLs: []string{"https://localhost:8080"}, + ClientConfig: tls.ClientConfig{ + TLSCA: "thing", + }, + }, + }, + } + + for i := range tests { + err := tests[i].out.Connect() + if !tests[i].err { + require.NoError(t, err) + } else { + require.Error(t, err) + t.Log(err) + } + } +} + +func TestUnused(t *testing.T) { + thing := influxdb.InfluxDB{} + thing.Close() + thing.Description() + thing.SampleConfig() + outputs.Outputs["influxdb_v2"]() +} From 634762b739a34fd4456691be257cbe66e8d8d293 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 12 Sep 2018 14:50:53 -0700 Subject: [PATCH 0179/1815] Update changelog --- CHANGELOG.md | 2 ++ README.md | 1 + 2 files changed, 3 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6bf96251c..8240478c6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -28,6 +28,7 @@ ### New Outputs - [azure_monitor](./plugins/outputs/azure_monitor/README.md) - Contributed by @influxdata +- [influxdb_v2](./plugins/outputs/influxdb_v2/README.md) - Contributed by @influxdata ### New Parsers @@ -105,6 +106,7 @@ - [#4667](https://github.com/influxdata/telegraf/pull/4667): Align metrics window to interval in cloudwatch input. - [#4642](https://github.com/influxdata/telegraf/pull/4642): Improve Azure Managed Instance support + more in sqlserver input. - [#4682](https://github.com/influxdata/telegraf/pull/4682): Allow alternate binaries for iptables input plugin. +- [#4645](https://github.com/influxdata/telegraf/pull/4645): Add influxdb_v2 output plugin. ### Bugfixes diff --git a/README.md b/README.md index c93d9ea77..6ddb793ef 100644 --- a/README.md +++ b/README.md @@ -171,6 +171,7 @@ configuration options. * [http_response](./plugins/inputs/http_response) * [icinga2](./plugins/inputs/icinga2) * [influxdb](./plugins/inputs/influxdb) +* [influxdb_v2](./plugins/inputs/influxdb_v2) * [internal](./plugins/inputs/internal) * [interrupts](./plugins/inputs/interrupts) * [ipmi_sensor](./plugins/inputs/ipmi_sensor) From 96f3d7def4e1a1c0afd9586cd7ddc6c12469fa45 Mon Sep 17 00:00:00 2001 From: Greg Linton Date: Wed, 12 Sep 2018 16:06:31 -0600 Subject: [PATCH 0180/1815] Update telegraf.conf --- etc/telegraf.conf | 465 +++++++++++++++++++++++++++++++++++++++++----- scripts/build.py | 2 +- 2 files changed, 422 insertions(+), 45 deletions(-) diff --git a/etc/telegraf.conf b/etc/telegraf.conf index 11842e7e1..8d1371a24 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -183,8 +183,8 @@ # ## If true, exchange will be passively declared. # # exchange_declare_passive = false # -# ## If true, exchange will be created as a durable exchange. -# # exchange_durable = true +# ## Exchange durability can be either "transient" or "durable". +# # exchange_durability = "durable" # # ## Additional exchange arguments. # # exchange_arguments = { } @@ -256,7 +256,7 @@ # # timeout = "5s" # # ## Enable additional diagnostic logging. -# # enable_diagnosic_logging = false +# # enable_diagnostic_logging = false # # ## Context Tag Sources add Application Insights context tags to a tag value. # ## @@ -267,6 +267,32 @@ # # "ai.cloud.roleInstance" = "kubernetes_pod_name" +# # Send aggregate metrics to Azure Monitor +# [[outputs.azure_monitor]] +# ## Timeout for HTTP writes. +# # timeout = "20s" +# +# ## Set the namespace prefix, defaults to "Telegraf/". +# # namespace_prefix = "Telegraf/" +# +# ## Azure Monitor doesn't have a string value type, so convert string +# ## fields to dimensions (a.k.a. tags) if enabled. Azure Monitor allows +# ## a maximum of 10 dimensions so Telegraf will only send the first 10 +# ## alphanumeric dimensions. +# # strings_as_dimensions = false +# +# ## Both region and resource_id must be set or be available via the +# ## Instance Metadata service on Azure Virtual Machines. +# # +# ## Azure Region to publish metrics against. +# ## ex: region = "southcentralus" +# # region = "" +# # +# ## The Azure Resource ID against which metric will be logged, e.g. +# ## ex: resource_id = "/subscriptions//resourceGroups//providers/Microsoft.Compute/virtualMachines/" +# # resource_id = "" + + # # Configuration for AWS CloudWatch output. # [[outputs.cloudwatch]] # ## Amazon REGION @@ -287,8 +313,22 @@ # #profile = "" # #shared_credential_file = "" # +# ## Endpoint to make request against, the correct endpoint is automatically +# ## determined and this option should only be set if you wish to override the +# ## default. +# ## ex: endpoint_url = "http://localhost:8000" +# # endpoint_url = "" +# # ## Namespace for the CloudWatch MetricDatums # namespace = "InfluxData/Telegraf" +# +# ## If you have a large amount of metrics, you should consider to send statistic +# ## values instead of raw metrics which could not only improve performance but +# ## also save AWS API cost. If enable this flag, this plugin would parse the required +# ## CloudWatch statistic fields (count, min, max, and sum) and send them to CloudWatch. +# ## You could use basicstats aggregator to calculate those fields. If not all statistic +# ## fields are available, all fields would still be sent as raw metrics. +# # write_statistics = false # # Configuration for CrateDB to send metrics to. @@ -429,6 +469,12 @@ # # username = "username" # # password = "pa$$word" # +# ## OAuth2 Client Credentials Grant +# # client_id = "clientid" +# # client_secret = "secret" +# # token_url = "https://indentityprovider/oauth2/v1/token" +# # scopes = ["urn:opc:idm:__myscopes__"] +# # ## Optional TLS Config # # tls_ca = "/etc/telegraf/ca.pem" # # tls_cert = "/etc/telegraf/cert.pem" @@ -470,6 +516,15 @@ # ## Kafka topic for producer messages # topic = "telegraf" # +# ## Optional Client id +# # client_id = "Telegraf" +# +# ## Set the minimal supported Kafka version. Setting this enables the use of new +# ## Kafka features and APIs. Of particular interest, lz4 compression +# ## requires at least version 0.10.0.0. +# ## ex: version = "1.1.0" +# # version = "" +# # ## Optional topic suffix configuration. # ## If the section is omitted, no suffix is used. # ## Following topic suffix methods are supported: @@ -501,11 +556,19 @@ # ## ie, if this tag exists, its value will be used as the routing key # routing_tag = "host" # +# ## Static routing key. Used when no routing_tag is set or as a fallback +# ## when the tag specified in routing tag is not found. If set to "random", +# ## a random value will be generated for each message. +# ## ex: routing_key = "random" +# ## routing_key = "telegraf" +# # routing_key = "" +# # ## CompressionCodec represents the various compression codecs recognized by # ## Kafka in messages. # ## 0 : No compression # ## 1 : Gzip compression # ## 2 : Snappy compression +# ## 3 : LZ4 compression # # compression_codec = 0 # # ## RequiredAcks is used in Produce Requests to tell the broker how many @@ -528,6 +591,10 @@ # ## until the next flush. # # max_retry = 3 # +# ## The maximum permitted size of a message. Should be set equal to or +# ## smaller than the broker's 'message.max.bytes'. +# # max_message_bytes = 1000000 +# # ## Optional TLS Config # # tls_ca = "/etc/telegraf/ca.pem" # # tls_cert = "/etc/telegraf/cert.pem" @@ -566,6 +633,12 @@ # #profile = "" # #shared_credential_file = "" # +# ## Endpoint to make request against, the correct endpoint is automatically +# ## determined and this option should only be set if you wish to override the +# ## default. +# ## ex: endpoint_url = "http://localhost:8000" +# # endpoint_url = "" +# # ## Kinesis StreamName must exist prior to starting telegraf. # streamname = "StreamName" # ## DEPRECATED: PartitionKey as used for sharding data. @@ -721,11 +794,11 @@ # # ## Number of data points to send to OpenTSDB in Http requests. # ## Not used with telnet API. -# httpBatchSize = 50 +# http_batch_size = 50 # # ## URI Path for Http requests to OpenTSDB. # ## Used in cases where OpenTSDB is located behind a reverse proxy. -# httpPath = "/api/put" +# http_path = "/api/put" # # ## Debug true - Prints OpenTSDB communication # debug = false @@ -737,29 +810,33 @@ # # Configuration for the Prometheus client to spawn # [[outputs.prometheus_client]] # ## Address to listen on -# # listen = ":9273" +# listen = ":9273" # -# ## Use TLS -# #tls_cert = "/etc/ssl/telegraf.crt" -# #tls_key = "/etc/ssl/telegraf.key" +# ## Use HTTP Basic Authentication. +# # basic_username = "Foo" +# # basic_password = "Bar" # -# ## Use http basic authentication -# #basic_username = "Foo" -# #basic_password = "Bar" +# ## If set, the IP Ranges which are allowed to access metrics. +# ## ex: ip_range = ["192.168.0.0/24", "192.168.1.0/30"] +# # ip_range = [] # -# ## IP Ranges which are allowed to access metrics -# #ip_range = ["192.168.0.0/24", "192.168.1.0/30"] +# ## Path to publish the metrics on. +# # path = "/metrics" # -# ## Interval to expire metrics and not deliver to prometheus, 0 == no expiration +# ## Expiration interval for each metric. 0 == no expiration # # expiration_interval = "60s" # # ## Collectors to enable, valid entries are "gocollector" and "process". # ## If unset, both are enabled. -# collectors_exclude = ["gocollector", "process"] +# # collectors_exclude = ["gocollector", "process"] # -# # Send string metrics as Prometheus labels. -# # Unless set to false all string metrics will be sent as labels. -# string_as_label = true +# ## Send string metrics as Prometheus labels. +# ## Unless set to false all string metrics will be sent as labels. +# # string_as_label = true +# +# ## If set, enable TLS with the given certificate. +# # tls_cert = "/etc/ssl/telegraf.crt" +# # tls_key = "/etc/ssl/telegraf.key" # # Configuration for the Riemann server to send metrics to @@ -913,6 +990,27 @@ # float = [] +# # Map enum values according to given table. +# [[processors.enum]] +# [[processors.enum.fields]] +# ## Name of the field to map +# source = "name" +# +# ## Destination field to be used for the mapped value. By default the source +# ## field is used, overwriting the original value. +# # destination = "mapped" +# +# ## Default value to be used for all values not contained in the mapping +# ## table. When unset, the unmodified value for the field will be used if no +# ## match is found. +# # default = 0 +# +# ## Table of mappings +# [processors.enum.fields.value_mappings] +# value1 = 1 +# value2 = 2 + + # # Apply metric modifications using override semantics. # [[processors.override]] # ## All modifications on inputs and aggregators can be overridden: @@ -925,6 +1023,25 @@ # # additional_tag = "tag_value" +# # Parse a value in a specified field/tag(s) and add the result in a new metric +# [[processors.parser]] +# ## The name of the fields whose value will be parsed. +# parse_fields = [] +# +# ## If true, incoming metrics are not emitted. +# drop_original = false +# +# ## If set to override, emitted metrics will be merged by overriding the +# ## original metric using the newly parsed metrics. +# merge = "override" +# +# ## The dataformat to be read from files +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + # # Print all metrics that pass through this filter. # [[processors.printer]] @@ -959,6 +1076,67 @@ # # result_key = "search_category" +# # Rename measurements, tags, and fields that pass through this filter. +# [[processors.rename]] +# ## Measurement, tag, and field renamings are stored in separate sub-tables. +# ## Specify one sub-table per rename operation. +# # [[processors.rename.measurement]] +# # ## measurement to change +# # from = "kilobytes_per_second" +# # to = "kbps" +# +# # [[processors.rename.tag]] +# # ## tag to change +# # from = "host" +# # to = "hostname" +# +# # [[processors.rename.field]] +# # ## field to change +# # from = "lower" +# # to = "min" +# +# # [[processors.rename.field]] +# # ## field to change +# # from = "upper" +# # to = "max" + + +# # Perform string processing on tags, fields, and measurements +# [[processors.strings]] +# ## Convert a tag value to uppercase +# # [[processors.strings.uppercase]] +# # tag = "method" +# +# ## Convert a field value to lowercase and store in a new field +# # [[processors.strings.lowercase]] +# # field = "uri_stem" +# # dest = "uri_stem_normalised" +# +# ## Trim leading and trailing whitespace using the default cutset +# # [[processors.strings.trim]] +# # field = "message" +# +# ## Trim leading characters in cutset +# # [[processors.strings.trim_left]] +# # field = "message" +# # cutset = "\t" +# +# ## Trim trailing characters in cutset +# # [[processors.strings.trim_right]] +# # field = "message" +# # cutset = "\r\n" +# +# ## Trim the given prefix from the field +# # [[processors.strings.trim_prefix]] +# # field = "my_value" +# # prefix = "my_" +# +# ## Trim the given suffix from the field +# # [[processors.strings.trim_suffix]] +# # field = "read_count" +# # suffix = "_count" + + # # Print all metrics that pass through this filter. # [[processors.topk]] # ## How many seconds between aggregations @@ -1060,6 +1238,18 @@ # drop_original = false +# # Count the occurance of values in fields. +# [[aggregators.valuecounter]] +# ## General Aggregator Arguments: +# ## The period on which to flush & clear the aggregator. +# period = "30s" +# ## If true, the original metric will be dropped by the +# ## aggregator and will not get sent to the output plugins. +# drop_original = false +# ## The fields for which the values will be counted +# fields = [] + + ############################################################################### # INPUT PLUGINS # @@ -1139,6 +1329,31 @@ # no configuration +# # Gather ActiveMQ metrics +# [[inputs.activemq]] +# ## Required ActiveMQ Endpoint +# # server = "192.168.50.10" +# +# ## Required ActiveMQ port +# # port = 8161 +# +# ## Credentials for basic HTTP authentication +# # username = "admin" +# # password = "admin" +# +# ## Required ActiveMQ webadmin root path +# # webadmin = "admin" +# +# ## Maximum time to receive response. +# # response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification + + # # Read stats from aerospike server(s) # [[inputs.aerospike]] # ## Aerospike servers to connect to (with port) @@ -1349,6 +1564,12 @@ # #profile = "" # #shared_credential_file = "" # +# ## Endpoint to make request against, the correct endpoint is automatically +# ## determined and this option should only be set if you wish to override the +# ## default. +# ## ex: endpoint_url = "http://localhost:8000" +# # endpoint_url = "" +# # # The minimum period for Cloudwatch metrics is 1 minute (60s). However not all # # metrics are made available to the 1 minute period. Some are collected at # # 3 minute, 5 minute, or larger intervals. See https://aws.amazon.com/cloudwatch/faqs/#monitoring. @@ -1385,7 +1606,9 @@ # #[[inputs.cloudwatch.metrics]] # # names = ["Latency", "RequestCount"] # # -# # ## Dimension filters for Metric (optional) +# # ## Dimension filters for Metric. These are optional however all dimensions +# # ## defined for the metric names must be specified in order to retrieve +# # ## the metric statistics. # # [[inputs.cloudwatch.metrics.dimensions]] # # name = "LoadBalancerName" # # value = "p-example" @@ -1455,7 +1678,7 @@ # # Read CouchDB Stats from one or more servers # [[inputs.couchdb]] # ## Works with CouchDB stats endpoints out of the box -# ## Multiple HOSTs from which to read CouchDB stats: +# ## Multiple Hosts from which to read CouchDB stats: # hosts = ["http://localhost:8086/_stats"] @@ -1685,6 +1908,48 @@ # # timeout = "5s" +# # Reload and gather from file[s] on telegraf's interval. +# [[inputs.file]] +# ## Files to parse each interval. +# ## These accept standard unix glob matching rules, but with the addition of +# ## ** as a "super asterisk". ie: +# ## /var/log/**.log -> recursively find all .log files in /var/log +# ## /var/log/*/*.log -> find all .log files with a parent dir in /var/log +# ## /var/log/apache.log -> only read the apache log file +# files = ["/var/log/apache/access.log"] +# +# ## The dataformat to be read from files +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Count files in a directory +# [[inputs.filecount]] +# ## Directory to gather stats about. +# directory = "/var/cache/apt/archives" +# +# ## Only count files that match the name pattern. Defaults to "*". +# name = "*.deb" +# +# ## Count files in subdirectories. Defaults to true. +# recursive = false +# +# ## Only count regular files. Defaults to true. +# regular_only = true +# +# ## Only count files that are at least this size in bytes. If size is +# ## a negative number, only count files that are smaller than the +# ## absolute value of size. Defaults to 0. +# size = 0 +# +# ## Only count files that have not been touched for at least this +# ## duration. If mtime is negative, only count files that have been +# ## touched in this duration. Defaults to "0s". +# mtime = "0s" + + # # Read stats about given file(s) # [[inputs.filestat]] # ## Files to gather stats about. @@ -1919,6 +2184,29 @@ # # apiVersion = "v1" +# # Gather Icinga2 status +# [[inputs.icinga2]] +# ## Required Icinga2 server address (default: "https://localhost:5665") +# # server = "https://localhost:5665" +# +# ## Required Icinga2 object type ("services" or "hosts, default "services") +# # object_type = "services" +# +# ## Credentials for basic HTTP authentication +# # username = "admin" +# # password = "admin" +# +# ## Maximum time to receive response. +# # response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = true + + # # Read InfluxDB-formatted JSON metrics from one or more HTTP endpoints # [[inputs.influxdb]] # ## Works with InfluxDB debug endpoints out of the box, @@ -1978,7 +2266,7 @@ # # ## Timeout for the ipmitool command to complete # timeout = "20s" - +# # ## Schema Version: (Optional, defaults to version 1) # metric_version = 2 @@ -2004,8 +2292,6 @@ # ## Setting 'use_lock' to true runs iptables with the "-w" option. # ## Adjust your sudo settings appropriately if using this option ("iptables -wnvl") # use_lock = false -# ## Define an alternate executable, such as "ip6tables". Default is "iptables". -# # binary = "ip6tables" # ## defines the table to monitor: # table = "filter" # ## defines the chains to monitor. @@ -2164,6 +2450,26 @@ # # no configuration +# # Read status information from one or more Kibana servers +# [[inputs.kibana]] +# ## specify a list of one or more Kibana servers +# servers = ["http://localhost:5601"] +# +# ## Timeout for HTTP requests +# timeout = "5s" +# +# ## HTTP Basic Auth credentials +# # username = "username" +# # password = "pa$$word" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + # # Read metrics from the kubernetes kubelet api # [[inputs.kubernetes]] # ## URL for the kubelet @@ -2348,7 +2654,7 @@ # ## gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST # gather_process_list = true # # -# ## gather thread state counts from INFORMATION_SCHEMA.USER_STATISTICS +# ## gather user statistics from INFORMATION_SCHEMA.USER_STATISTICS # gather_user_statistics = true # # # ## gather auto_increment columns and max values from information schema @@ -2499,11 +2805,11 @@ # # Pulls statistics from nvidia GPUs attached to the host # [[inputs.nvidia_smi]] -# ## Optional: path to nvidia-smi binary, defaults to $PATH via exec.LookPath -# # bin_path = /usr/bin/nvidia-smi +# ## Optional: path to nvidia-smi binary, defaults to $PATH via exec.LookPath +# # bin_path = "/usr/bin/nvidia-smi" # -# ## Optional: timeout for GPU polling -# # timeout = 5s +# ## Optional: timeout for GPU polling +# # timeout = "5s" # # OpenLDAP cn=Monitor plugin @@ -2514,7 +2820,7 @@ # # ldaps, starttls, or no encryption. default is an empty string, disabling all encryption. # # note that port will likely need to be changed to 636 for ldaps # # valid options: "" | "starttls" | "ldaps" -# ssl = "" +# tls = "" # # # skip peer certificate verification. Default is false. # insecure_skip_verify = false @@ -2591,20 +2897,23 @@ # # Ping given url(s) and return statistics # [[inputs.ping]] -# ## NOTE: this plugin forks the ping command. You may need to set capabilities -# ## via setcap cap_net_raw+p /bin/ping -# # # ## List of urls to ping -# urls = ["www.google.com"] # required -# ## number of pings to send per collection (ping -c ) +# urls = ["example.org"] +# +# ## Number of pings to send per collection (ping -c ) # # count = 1 -# ## interval, in s, at which to ping. 0 == default (ping -i ) +# +# ## Interval, in s, at which to ping. 0 == default (ping -i ) +# ## Not available in Windows. # # ping_interval = 1.0 -# ## per-ping timeout, in s. 0 == no timeout (ping -W ) +# +# ## Per-ping timeout, in s. 0 == no timeout (ping -W ) # # timeout = 1.0 -# ## total-ping deadline, in s. 0 == no deadline (ping -w ) +# +# ## Total-ping deadline, in s. 0 == no deadline (ping -w ) # # deadline = 10 -# ## interface or source address to send ping from (ping -I ) +# +# ## Interface or source address to send ping from (ping -I ) # ## on Darwin and Freebsd only source address possible: (ping -S ) # # interface = "" @@ -2748,6 +3057,13 @@ # ## If no servers are specified, then localhost is used as the host. # ## If no port is specified, 6379 is used # servers = ["tcp://localhost:6379"] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = true # # Read metrics from one or many RethinkDB servers @@ -3119,6 +3435,22 @@ # # virtual_servers = [1] +# # Read Tengine's basic status information (ngx_http_reqstat_module) +# [[inputs.tengine]] +# # An array of Tengine reqstat module URI to gather stats. +# urls = ["http://127.0.0.1/us"] +# +# # HTTP response timeout (default: 5s) +# # response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.cer" +# # tls_key = "/etc/telegraf/key.key" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + # # Gather metrics from the Tomcat server status page. # [[inputs.tomcat]] # ## URL of the Tomcat server status @@ -3195,6 +3527,23 @@ # # instance_name = instanceName +# # Reads metrics from a SSL certificate +# [[inputs.x509_cert]] +# ## List certificate sources +# sources = ["/etc/ssl/certs/ssl-cert-snakeoil.pem", "tcp://example.org:443"] +# +# ## Timeout for SSL connection +# # timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + # # Read metrics of ZFS from arcstats, zfetchstats, vdev_cache_stats, and pools # [[inputs.zfs]] # ## ZFS kstat path. Ignored on FreeBSD @@ -3268,10 +3617,13 @@ # # exchange_arguments = { } # # exchange_arguments = {"hash_propery" = "timestamp"} # -# ## AMQP queue name +# ## AMQP queue name. # queue = "telegraf" # -# ## Binding Key +# ## AMQP queue durability can be "transient" or "durable". +# queue_durability = "durable" +# +# ## Binding Key. # binding_key = "#" # # ## Maximum number of messages server should give to the worker. @@ -3404,6 +3756,15 @@ # ## topic(s) to consume # topics = ["telegraf"] # +# ## Optional Client id +# # client_id = "Telegraf" +# +# ## Set the minimal supported Kafka version. Setting this enables the use of new +# ## Kafka features and APIs. Of particular interest, lz4 compression +# ## requires at least version 0.10.0.0. +# ## ex: version = "1.1.0" +# # version = "" +# # ## Optional TLS Config # # tls_ca = "/etc/telegraf/ca.pem" # # tls_cert = "/etc/telegraf/cert.pem" @@ -3428,7 +3789,7 @@ # # ## Maximum length of a message to consume, in bytes (default 0/unlimited); # ## larger messages are dropped -# max_message_len = 65536 +# max_message_len = 1000000 # # Read metrics from Kafka topic(s) @@ -3457,6 +3818,9 @@ # # Stream and parse log file(s). # [[inputs.logparser]] +# ## DEPRECATED: The 'logparser' plugin is deprecated in 1.8. Please use the +# ## 'tail' plugin with the grok data_format as a replacement. +# # ## Log files to parse. # ## These accept standard unix glob matching rules, but with the addition of # ## ** as a "super asterisk". ie: @@ -3590,6 +3954,19 @@ # data_format = "influx" +# # Read metrics from one or many pgbouncer servers +# [[inputs.pgbouncer]] +# ## specify address via a url matching: +# ## postgres://[pqgotest[:password]]@localhost[/dbname]\ +# ## ?sslmode=[disable|verify-ca|verify-full] +# ## or a simple string: +# ## host=localhost user=pqotest password=... sslmode=... dbname=app_production +# ## +# ## All connection parameters are optional. +# ## +# address = "host=localhost user=pgbouncer sslmode=disable" + + # # Read metrics from one or many postgresql servers # [[inputs.postgresql]] # ## specify address via a url matching: @@ -3820,9 +4197,9 @@ # ## Only applies to stream sockets (e.g. TCP). # # max_connections = 1024 # -# ## Read timeout (default = 500ms). +# ## Read timeout is the maximum time allowed for reading a single message (default = 5s). # ## 0 means unlimited. -# # read_timeout = 500ms +# # read_timeout = "5s" # # ## Whether to parse in best effort mode or not (default = false). # ## By default best effort parsing is off. diff --git a/scripts/build.py b/scripts/build.py index bfaba5a8a..675a4c3c0 100755 --- a/scripts/build.py +++ b/scripts/build.py @@ -95,7 +95,7 @@ supported_packages = { "freebsd": [ "tar" ] } -next_version = '1.8.0' +next_version = '1.9.0' ################ #### Telegraf Functions From 41d528c8ce02fe00ca42c281e506e1e20e60f378 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 17 Sep 2018 11:45:08 -0700 Subject: [PATCH 0181/1815] Split parser/serializer docs (#4690) --- README.md | 39 +- docs/DATA_FORMATS_INPUT.md | 1131 +------------------- docs/DATA_FORMATS_OUTPUT.md | 197 +--- docs/METRICS.md | 22 + docs/README.md | 21 + docs/TEMPLATE_PATTERN.md | 135 +++ plugins/inputs/statsd/README.md | 8 +- plugins/inputs/statsd/statsd.go | 2 +- plugins/parsers/EXAMPLE_README.md | 46 + plugins/parsers/collectd/README.md | 44 + plugins/parsers/csv/README.md | 104 ++ plugins/parsers/dropwizard/README.md | 171 +++ plugins/parsers/graphite/README.md | 48 + plugins/parsers/grok/README.md | 222 ++++ plugins/parsers/influx/README.md | 20 + plugins/parsers/json/README.md | 214 ++++ plugins/parsers/logfmt/README.md | 34 + plugins/parsers/nagios/README.md | 17 + plugins/parsers/value/README.md | 36 + plugins/parsers/wavefront/README.md | 20 + plugins/serializers/EXAMPLE_README.md | 46 + plugins/serializers/graphite/README.md | 51 + plugins/serializers/influx/README.md | 34 + plugins/serializers/json/README.md | 77 ++ plugins/serializers/splunkmetric/README.md | 4 +- 25 files changed, 1412 insertions(+), 1331 deletions(-) create mode 100644 docs/METRICS.md create mode 100644 docs/README.md create mode 100644 docs/TEMPLATE_PATTERN.md create mode 100644 plugins/parsers/EXAMPLE_README.md create mode 100644 plugins/parsers/collectd/README.md create mode 100644 plugins/parsers/csv/README.md create mode 100644 plugins/parsers/dropwizard/README.md create mode 100644 plugins/parsers/graphite/README.md create mode 100644 plugins/parsers/grok/README.md create mode 100644 plugins/parsers/influx/README.md create mode 100644 plugins/parsers/json/README.md create mode 100644 plugins/parsers/logfmt/README.md create mode 100644 plugins/parsers/nagios/README.md create mode 100644 plugins/parsers/value/README.md create mode 100644 plugins/parsers/wavefront/README.md create mode 100644 plugins/serializers/EXAMPLE_README.md create mode 100644 plugins/serializers/graphite/README.md create mode 100644 plugins/serializers/influx/README.md create mode 100644 plugins/serializers/json/README.md diff --git a/README.md b/README.md index 6ddb793ef..5bc830457 100644 --- a/README.md +++ b/README.md @@ -1,23 +1,19 @@ # Telegraf [![Circle CI](https://circleci.com/gh/influxdata/telegraf.svg?style=svg)](https://circleci.com/gh/influxdata/telegraf) [![Docker pulls](https://img.shields.io/docker/pulls/library/telegraf.svg)](https://hub.docker.com/_/telegraf/) -Telegraf is an agent written in Go for collecting, processing, aggregating, -and writing metrics. +Telegraf is an agent for collecting, processing, aggregating, and writing metrics. Design goals are to have a minimal memory footprint with a plugin system so -that developers in the community can easily add support for collecting metrics -. For an example configuration referencet from local or remote services. +that developers in the community can easily add support for collecting +metrics. -Telegraf is plugin-driven and has the concept of 4 distinct plugins: +Telegraf is plugin-driven and has the concept of 4 distinct plugin types: 1. [Input Plugins](#input-plugins) collect metrics from the system, services, or 3rd party APIs 2. [Processor Plugins](#processor-plugins) transform, decorate, and/or filter metrics 3. [Aggregator Plugins](#aggregator-plugins) create aggregate metrics (e.g. mean, min, max, quantiles, etc.) 4. [Output Plugins](#output-plugins) write metrics to various destinations -For more information on Processor and Aggregator plugins please [read this](./docs/AGGREGATORS_AND_PROCESSORS.md). - -New plugins are designed to be easy to contribute, -we'll eagerly accept pull +New plugins are designed to be easy to contribute, we'll eagerly accept pull requests and will manage the set of plugins that Telegraf supports. ## Contributing @@ -26,7 +22,7 @@ There are many ways to contribute: - Fix and [report bugs](https://github.com/influxdata/telegraf/issues/new) - [Improve documentation](https://github.com/influxdata/telegraf/issues?q=is%3Aopen+label%3Adocumentation) - [Review code and feature proposals](https://github.com/influxdata/telegraf/pulls) -- Answer questions on github and on the [Community Site](https://community.influxdata.com/) +- Answer questions and discuss here on github and on the [Community Site](https://community.influxdata.com/) - [Contribute plugins](CONTRIBUTING.md) ## Installation: @@ -42,7 +38,7 @@ Ansible role: https://github.com/rossmcdonald/telegraf Telegraf requires golang version 1.9 or newer, the Makefile requires GNU make. -1. [Install Go](https://golang.org/doc/install) >=1.9 +1. [Install Go](https://golang.org/doc/install) >=1.9 (1.10 recommended) 2. [Install dep](https://golang.github.io/dep/docs/installation.html) ==v0.5.0 3. Download Telegraf source: ``` @@ -86,44 +82,47 @@ These builds are generated from the master branch: See usage with: ``` -./telegraf --help +telegraf --help ``` #### Generate a telegraf config file: ``` -./telegraf config > telegraf.conf +telegraf config > telegraf.conf ``` #### Generate config with only cpu input & influxdb output plugins defined: ``` -./telegraf --input-filter cpu --output-filter influxdb config +telegraf --input-filter cpu --output-filter influxdb config ``` #### Run a single telegraf collection, outputing metrics to stdout: ``` -./telegraf --config telegraf.conf --test +telegraf --config telegraf.conf --test ``` #### Run telegraf with all plugins defined in config file: ``` -./telegraf --config telegraf.conf +telegraf --config telegraf.conf ``` #### Run telegraf, enabling the cpu & memory input, and influxdb output plugins: ``` -./telegraf --config telegraf.conf --input-filter cpu:mem --output-filter influxdb +telegraf --config telegraf.conf --input-filter cpu:mem --output-filter influxdb ``` +## Documentation -## Configuration +[Latest Release Documentation][release docs]. -See the [configuration guide](docs/CONFIGURATION.md) for a rundown of the more advanced -configuration options. +For documentation on the latest development code see the [documentation index][devel docs]. + +[release docs]: https://docs.influxdata.com/telegraf +[devel docs]: docs ## Input Plugins diff --git a/docs/DATA_FORMATS_INPUT.md b/docs/DATA_FORMATS_INPUT.md index ff9160812..b71650168 100644 --- a/docs/DATA_FORMATS_INPUT.md +++ b/docs/DATA_FORMATS_INPUT.md @@ -1,42 +1,24 @@ -# Telegraf Input Data Formats +# Input Data Formats -Telegraf is able to parse the following input data formats into metrics: +Telegraf contains many general purpose plugins that support parsing input data +using a configurable parser into [metrics][]. This allows, for example, the +`kafka_consumer` input plugin to process messages in either InfluxDB Line +Protocol or in JSON format. -1. [InfluxDB Line Protocol](#influx) -1. [JSON](#json) -1. [Graphite](#graphite) -1. [Value](#value), ie: 45 or "booyah" -1. [Nagios](#nagios) (exec input only) -1. [Collectd](#collectd) -1. [Dropwizard](#dropwizard) -1. [Grok](#grok) -1. [Logfmt](#logfmt) -1. [Wavefront](#wavefront) -1. [CSV](#csv) +- [InfluxDB Line Protocol](/plugins/parsers/influx) +- [Collectd](/plugins/parsers/collectd) +- [CSV](/plugins/parsers/csv) +- [Dropwizard](/plugins/parsers/dropwizard) +- [Graphite](/plugins/parsers/graphite) +- [Grok](/plugins/parsers/grok) +- [JSON](/plugins/parsers/json) +- [Logfmt](/plugins/parsers/logfmt) +- [Nagios](/plugins/parsers/nagios) +- [Value](/plugins/parsers/value), ie: 45 or "booyah" +- [Wavefront](/plugins/parsers/wavefront) -Telegraf metrics, similar to InfluxDB's [points][influxdb key concepts], are a -combination of four basic parts: - -[influxdb key concepts]: https://docs.influxdata.com/influxdb/v1.6/concepts/key_concepts/ - -1. Measurement Name -1. Tags -1. Fields -1. Timestamp - -These four parts are easily defined when using InfluxDB line-protocol as a -data format. But there are other data formats that users may want to use which -require more advanced configuration to create usable Telegraf metrics. - -Plugins such as `exec` and `kafka_consumer` parse textual data. Up until now, -these plugins were statically configured to parse just a single -data format. `exec` mostly only supported parsing JSON, and `kafka_consumer` only -supported data in InfluxDB line-protocol. - -But now we are normalizing the parsing of various data formats across all -plugins that can support it. You will be able to identify a plugin that supports -different data formats by the presence of a `data_format` config option, for -example, in the exec plugin: +Any input plugin containing the `data_format` option can use it to select the +desired parser: ```toml [[inputs.exec]] @@ -51,1081 +33,6 @@ example, in the exec plugin: ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "json" - - ## Additional configuration options go here -``` - -Each data_format has an additional set of configuration options available, which -I'll go over below. - -# Influx: - -There are no additional configuration options for InfluxDB [line protocol][]. The -metrics are parsed directly into Telegraf metrics. - -[line protocol]: https://docs.influxdata.com/influxdb/latest/write_protocols/line/ - -#### Influx Configuration: - -```toml -[[inputs.exec]] - ## Commands array - commands = ["/tmp/test.sh", "/usr/bin/mycollector --foo=bar"] - - ## measurement name suffix (for separating different commands) - name_suffix = "_mycollector" - - ## Data format to consume. - ## Each data format has its own unique set of configuration options, read - ## more about them here: - ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md - data_format = "influx" -``` - -# JSON: - -The JSON data format flattens JSON into metric _fields_. -NOTE: Only numerical values are converted to fields, and they are converted -into a float. strings are ignored unless specified as a tag_key (see below). - -So for example, this JSON: - -```json -{ - "a": 5, - "b": { - "c": 6 - }, - "ignored": "I'm a string" -} -``` - -Would get translated into _fields_ of a measurement: - -``` -myjsonmetric a=5,b_c=6 -``` - -The _measurement_ _name_ is usually the name of the plugin, -but can be overridden using the `name_override` config option. - -#### JSON Configuration: - -The JSON data format supports specifying "tag_keys", "json_string_fields", and "json_query". -If specified, keys in "tag_keys" and "json_string_fields" will be searched for in the root-level -and any nested lists of the JSON blob. All int and float values are added to fields by default. -If the key(s) exist, they will be applied as tags or fields to the Telegraf metrics. -If "json_string_fields" is specified, the string will be added as a field. - -The "json_query" configuration is a gjson path to an JSON object or -list of JSON objects. If this path leads to an array of values or -single data point an error will be thrown. If this configuration -is specified, only the result of the query will be parsed and returned as metrics. - -The "json_name_key" configuration specifies the key of the field whos value will be -added as the metric name. - -Object paths are specified using gjson path format, which is denoted by object keys -concatenated with "." to go deeper in nested JSON objects. -Additional information on gjson paths can be found here: https://github.com/tidwall/gjson#path-syntax - -The JSON data format also supports extracting time values through the -config "json_time_key" and "json_time_format". If "json_time_key" is set, -"json_time_format" must be specified. The "json_time_key" describes the -name of the field containing time information. The "json_time_format" -must be a recognized Go time format. -If parsing a Unix epoch timestamp in seconds, e.g. 1536092344.1, this config must be set to "unix" (case insensitive); -corresponding JSON value can have a decimal part and can be a string or a number JSON representation. -If value is in number representation, it'll be treated as a double precision float, and could have some precision loss. -If value is in string representation, there'll be no precision loss up to nanosecond precision. Decimal positions beyond that will be dropped. -If parsing a Unix epoch timestamp in milliseconds, e.g. 1536092344100, this config must be set to "unix_ms" (case insensitive); -corresponding JSON value must be a (long) integer and be in number JSON representation. -If there is no year provided, the metrics will have the current year. -More info on time formats can be found here: https://golang.org/pkg/time/#Parse - -For example, if you had this configuration: - -```toml -[[inputs.exec]] - ## Commands array - commands = ["/tmp/test.sh", "/usr/bin/mycollector --foo=bar"] - - ## measurement name suffix (for separating different commands) - name_suffix = "_mycollector" - - ## Data format to consume. - ## Each data format has its own unique set of configuration options, read - ## more about them here: - ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md - data_format = "json" - - ## List of tag names to extract from JSON server response - tag_keys = [ - "my_tag_1", - "my_tag_2" - ] - - ## The json path specifying where to extract the metric name from - # json_name_key = "" - - ## List of field names to extract from JSON and add as string fields - # json_string_fields = [] - - ## gjson query path to specify a specific chunk of JSON to be parsed with - ## the above configuration. If not specified, the whole file will be parsed. - ## gjson query paths are described here: https://github.com/tidwall/gjson#path-syntax - # json_query = "" - - ## holds the name of the tag of timestamp - # json_time_key = "" - - ## holds the format of timestamp to be parsed - # json_time_format = "" ``` -with this JSON output from a command: - -```json -{ - "a": 5, - "b": { - "c": 6 - }, - "my_tag_1": "foo" -} -``` - -Your Telegraf metrics would get tagged with "my_tag_1" - -``` -exec_mycollector,my_tag_1=foo a=5,b_c=6 -``` - -If the JSON data is an array, then each element of the array is -parsed with the configured settings. Each resulting metric will -be output with the same timestamp. - -For example, if the following configuration: - -```toml -[[inputs.exec]] - ## Commands array - commands = ["/usr/bin/mycollector --foo=bar"] - - ## measurement name suffix (for separating different commands) - name_suffix = "_mycollector" - - ## Data format to consume. - ## Each data format has its own unique set of configuration options, read - ## more about them here: - ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md - data_format = "json" - - ## List of tag names to extract from top-level of JSON server response - tag_keys = [ - "my_tag_1", - "my_tag_2" - ] - - ## List of field names to extract from JSON and add as string fields - # json_string_fields = [] - - ## gjson query path to specify a specific chunk of JSON to be parsed with - ## the above configuration. If not specified, the whole file will be parsed - # json_query = "" - - ## holds the name of the tag of timestamp - json_time_key = "b_time" - - ## holds the format of timestamp to be parsed - json_time_format = "02 Jan 06 15:04 MST" -``` - -with this JSON output from a command: - -```json -[ - { - "a": 5, - "b": { - "c": 6, - "time":"04 Jan 06 15:04 MST" - }, - "my_tag_1": "foo", - "my_tag_2": "baz" - }, - { - "a": 7, - "b": { - "c": 8, - "time":"11 Jan 07 15:04 MST" - }, - "my_tag_1": "bar", - "my_tag_2": "baz" - } -] -``` - -Your Telegraf metrics would get tagged with "my_tag_1" and "my_tag_2" and fielded with "b_c" -The metric's time will be a time.Time object, as specified by "b_time" - -``` -exec_mycollector,my_tag_1=foo,my_tag_2=baz b_c=6 1136387040000000000 -exec_mycollector,my_tag_1=bar,my_tag_2=baz b_c=8 1168527840000000000 -``` - -If you want to only use a specific portion of your JSON, use the "json_query" -configuration to specify a path to a JSON object. - -For example, with the following config: -```toml -[[inputs.exec]] - ## Commands array - commands = ["/usr/bin/mycollector --foo=bar"] - - ## measurement name suffix (for separating different commands) - name_suffix = "_mycollector" - - ## Data format to consume. - ## Each data format has its own unique set of configuration options, read - ## more about them here: - ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md - data_format = "json" - - ## List of tag names to extract from top-level of JSON server response - tag_keys = ["first"] - - ## List of field names to extract from JSON and add as string fields - json_string_fields = ["last"] - - ## gjson query path to specify a specific chunk of JSON to be parsed with - ## the above configuration. If not specified, the whole file will be parsed - json_query = "obj.friends" - - ## holds the name of the tag of timestamp - # json_time_key = "" - - ## holds the format of timestamp to be parsed - # json_time_format = "" -``` - -with this JSON as input: -```json -{ - "obj": { - "name": {"first": "Tom", "last": "Anderson"}, - "age":37, - "children": ["Sara","Alex","Jack"], - "fav.movie": "Deer Hunter", - "friends": [ - {"first": "Dale", "last": "Murphy", "age": 44}, - {"first": "Roger", "last": "Craig", "age": 68}, - {"first": "Jane", "last": "Murphy", "age": 47} - ] - } -} -``` -You would recieve 3 metrics tagged with "first", and fielded with "last" and "age" - -``` -exec_mycollector, "first":"Dale" "last":"Murphy","age":44 -exec_mycollector, "first":"Roger" "last":"Craig","age":68 -exec_mycollector, "first":"Jane" "last":"Murphy","age":47 -``` - -# Value: - -The "value" data format translates single values into Telegraf metrics. This -is done by assigning a measurement name and setting a single field ("value") -as the parsed metric. - -#### Value Configuration: - -You **must** tell Telegraf what type of metric to collect by using the -`data_type` configuration option. Available options are: - -1. integer -2. float or long -3. string -4. boolean - -**Note:** It is also recommended that you set `name_override` to a measurement -name that makes sense for your metric, otherwise it will just be set to the -name of the plugin. - -```toml -[[inputs.exec]] - ## Commands array - commands = ["cat /proc/sys/kernel/random/entropy_avail"] - - ## override the default metric name of "exec" - name_override = "entropy_available" - - ## Data format to consume. - ## Each data format has its own unique set of configuration options, read - ## more about them here: - ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md - data_format = "value" - data_type = "integer" # required -``` - -# Graphite: - -The Graphite data format translates graphite _dot_ buckets directly into -telegraf measurement names, with a single value field, and without any tags. -By default, the separator is left as ".", but this can be changed using the -"separator" argument. For more advanced options, -Telegraf supports specifying "templates" to translate -graphite buckets into Telegraf metrics. - -Templates are of the form: - -``` -"host.mytag.mytag.measurement.measurement.field*" -``` - -Where the following keywords exist: - -1. `measurement`: specifies that this section of the graphite bucket corresponds -to the measurement name. This can be specified multiple times. -2. `field`: specifies that this section of the graphite bucket corresponds -to the field name. This can be specified multiple times. -3. `measurement*`: specifies that all remaining elements of the graphite bucket -correspond to the measurement name. -4. `field*`: specifies that all remaining elements of the graphite bucket -correspond to the field name. - -Any part of the template that is not a keyword is treated as a tag key. This -can also be specified multiple times. - -NOTE: `field*` cannot be used in conjunction with `measurement*`! - -#### Measurement & Tag Templates: - -The most basic template is to specify a single transformation to apply to all -incoming metrics. So the following template: - -```toml -templates = [ - "region.region.measurement*" -] -``` - -would result in the following Graphite -> Telegraf transformation. - -``` -us.west.cpu.load 100 -=> cpu.load,region=us.west value=100 -``` - -Multiple templates can also be specified, but these should be differentiated -using _filters_ (see below for more details) - -```toml -templates = [ - "*.*.* region.region.measurement", # <- all 3-part measurements will match this one. - "*.*.*.* region.region.host.measurement", # <- all 4-part measurements will match this one. -] -``` - -#### Field Templates: - -The field keyword tells Telegraf to give the metric that field name. -So the following template: - -```toml -separator = "_" -templates = [ - "measurement.measurement.field.field.region" -] -``` - -would result in the following Graphite -> Telegraf transformation. - -``` -cpu.usage.idle.percent.eu-east 100 -=> cpu_usage,region=eu-east idle_percent=100 -``` - -The field key can also be derived from all remaining elements of the graphite -bucket by specifying `field*`: - -```toml -separator = "_" -templates = [ - "measurement.measurement.region.field*" -] -``` - -which would result in the following Graphite -> Telegraf transformation. - -``` -cpu.usage.eu-east.idle.percentage 100 -=> cpu_usage,region=eu-east idle_percentage=100 -``` - -#### Filter Templates: - -Users can also filter the template(s) to use based on the name of the bucket, -using glob matching, like so: - -```toml -templates = [ - "cpu.* measurement.measurement.region", - "mem.* measurement.measurement.host" -] -``` - -which would result in the following transformation: - -``` -cpu.load.eu-east 100 -=> cpu_load,region=eu-east value=100 - -mem.cached.localhost 256 -=> mem_cached,host=localhost value=256 -``` - -#### Adding Tags: - -Additional tags can be added to a metric that don't exist on the received metric. -You can add additional tags by specifying them after the pattern. -Tags have the same format as the line protocol. -Multiple tags are separated by commas. - -```toml -templates = [ - "measurement.measurement.field.region datacenter=1a" -] -``` - -would result in the following Graphite -> Telegraf transformation. - -``` -cpu.usage.idle.eu-east 100 -=> cpu_usage,region=eu-east,datacenter=1a idle=100 -``` - -There are many more options available, -[More details can be found here](https://github.com/influxdata/influxdb/tree/master/services/graphite#templates) - -#### Graphite Configuration: - -```toml -[[inputs.exec]] - ## Commands array - commands = ["/tmp/test.sh", "/usr/bin/mycollector --foo=bar"] - - ## measurement name suffix (for separating different commands) - name_suffix = "_mycollector" - - ## Data format to consume. - ## Each data format has its own unique set of configuration options, read - ## more about them here: - ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md - data_format = "graphite" - - ## This string will be used to join the matched values. - separator = "_" - - ## Each template line requires a template pattern. It can have an optional - ## filter before the template and separated by spaces. It can also have optional extra - ## tags following the template. Multiple tags should be separated by commas and no spaces - ## similar to the line protocol format. There can be only one default template. - ## Templates support below format: - ## 1. filter + template - ## 2. filter + template + extra tag(s) - ## 3. filter + template with field key - ## 4. default template - templates = [ - "*.app env.service.resource.measurement", - "stats.* .host.measurement* region=eu-east,agent=sensu", - "stats2.* .host.measurement.field", - "measurement*" - ] -``` - -# Nagios: - -There are no additional configuration options for Nagios line-protocol. The -metrics are parsed directly into Telegraf metrics. - -Note: Nagios Input Data Formats is only supported in `exec` input plugin. - -#### Nagios Configuration: - -```toml -[[inputs.exec]] - ## Commands array - commands = ["/usr/lib/nagios/plugins/check_load -w 5,6,7 -c 7,8,9"] - - ## measurement name suffix (for separating different commands) - name_suffix = "_mycollector" - - ## Data format to consume. - ## Each data format has its own unique set of configuration options, read - ## more about them here: - ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md - data_format = "nagios" -``` - -# Collectd: - -The collectd format parses the collectd binary network protocol. Tags are -created for host, instance, type, and type instance. All collectd values are -added as float64 fields. - -For more information about the binary network protocol see -[here](https://collectd.org/wiki/index.php/Binary_protocol). - -You can control the cryptographic settings with parser options. Create an -authentication file and set `collectd_auth_file` to the path of the file, then -set the desired security level in `collectd_security_level`. - -Additional information including client setup can be found -[here](https://collectd.org/wiki/index.php/Networking_introduction#Cryptographic_setup). - -You can also change the path to the typesdb or add additional typesdb using -`collectd_typesdb`. - -#### Collectd Configuration: - -```toml -[[inputs.socket_listener]] - service_address = "udp://127.0.0.1:25826" - name_prefix = "collectd_" - - ## Data format to consume. - ## Each data format has its own unique set of configuration options, read - ## more about them here: - ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md - data_format = "collectd" - - ## Authentication file for cryptographic security levels - collectd_auth_file = "/etc/collectd/auth_file" - ## One of none (default), sign, or encrypt - collectd_security_level = "encrypt" - ## Path of to TypesDB specifications - collectd_typesdb = ["/usr/share/collectd/types.db"] - - # Multi-value plugins can be handled two ways. - # "split" will parse and store the multi-value plugin data into separate measurements - # "join" will parse and store the multi-value plugin as a single multi-value measurement. - # "split" is the default behavior for backward compatability with previous versions of influxdb. - collectd_parse_multivalue = "split" -``` - -# Dropwizard: - -The dropwizard format can parse the JSON representation of a single dropwizard metric registry. By default, tags are parsed from metric names as if they were actual influxdb line protocol keys (`measurement<,tag_set>`) which can be overriden by defining custom [measurement & tag templates](./DATA_FORMATS_INPUT.md#measurement--tag-templates). All field value types are supported, `string`, `number` and `boolean`. - -A typical JSON of a dropwizard metric registry: - -```json -{ - "version": "3.0.0", - "counters" : { - "measurement,tag1=green" : { - "count" : 1 - } - }, - "meters" : { - "measurement" : { - "count" : 1, - "m15_rate" : 1.0, - "m1_rate" : 1.0, - "m5_rate" : 1.0, - "mean_rate" : 1.0, - "units" : "events/second" - } - }, - "gauges" : { - "measurement" : { - "value" : 1 - } - }, - "histograms" : { - "measurement" : { - "count" : 1, - "max" : 1.0, - "mean" : 1.0, - "min" : 1.0, - "p50" : 1.0, - "p75" : 1.0, - "p95" : 1.0, - "p98" : 1.0, - "p99" : 1.0, - "p999" : 1.0, - "stddev" : 1.0 - } - }, - "timers" : { - "measurement" : { - "count" : 1, - "max" : 1.0, - "mean" : 1.0, - "min" : 1.0, - "p50" : 1.0, - "p75" : 1.0, - "p95" : 1.0, - "p98" : 1.0, - "p99" : 1.0, - "p999" : 1.0, - "stddev" : 1.0, - "m15_rate" : 1.0, - "m1_rate" : 1.0, - "m5_rate" : 1.0, - "mean_rate" : 1.0, - "duration_units" : "seconds", - "rate_units" : "calls/second" - } - } -} -``` - -Would get translated into 4 different measurements: - -``` -measurement,metric_type=counter,tag1=green count=1 -measurement,metric_type=meter count=1,m15_rate=1.0,m1_rate=1.0,m5_rate=1.0,mean_rate=1.0 -measurement,metric_type=gauge value=1 -measurement,metric_type=histogram count=1,max=1.0,mean=1.0,min=1.0,p50=1.0,p75=1.0,p95=1.0,p98=1.0,p99=1.0,p999=1.0 -measurement,metric_type=timer count=1,max=1.0,mean=1.0,min=1.0,p50=1.0,p75=1.0,p95=1.0,p98=1.0,p99=1.0,p999=1.0,stddev=1.0,m15_rate=1.0,m1_rate=1.0,m5_rate=1.0,mean_rate=1.0 -``` - -You may also parse a dropwizard registry from any JSON document which contains a dropwizard registry in some inner field. -Eg. to parse the following JSON document: - -```json -{ - "time" : "2017-02-22T14:33:03.662+02:00", - "tags" : { - "tag1" : "green", - "tag2" : "yellow" - }, - "metrics" : { - "counters" : { - "measurement" : { - "count" : 1 - } - }, - "meters" : {}, - "gauges" : {}, - "histograms" : {}, - "timers" : {} - } -} -``` -and translate it into: - -``` -measurement,metric_type=counter,tag1=green,tag2=yellow count=1 1487766783662000000 -``` - -you simply need to use the following additional configuration properties: - -```toml -dropwizard_metric_registry_path = "metrics" -dropwizard_time_path = "time" -dropwizard_time_format = "2006-01-02T15:04:05Z07:00" -dropwizard_tags_path = "tags" -## tag paths per tag are supported too, eg. -#[inputs.yourinput.dropwizard_tag_paths] -# tag1 = "tags.tag1" -# tag2 = "tags.tag2" -``` - - -For more information about the dropwizard json format see -[here](http://metrics.dropwizard.io/3.1.0/manual/json/). - -#### Dropwizard Configuration: - -```toml -[[inputs.exec]] - ## Commands array - commands = ["curl http://localhost:8080/sys/metrics"] - timeout = "5s" - - ## Data format to consume. - ## Each data format has its own unique set of configuration options, read - ## more about them here: - ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md - data_format = "dropwizard" - - ## Used by the templating engine to join matched values when cardinality is > 1 - separator = "_" - - ## Each template line requires a template pattern. It can have an optional - ## filter before the template and separated by spaces. It can also have optional extra - ## tags following the template. Multiple tags should be separated by commas and no spaces - ## similar to the line protocol format. There can be only one default template. - ## Templates support below format: - ## 1. filter + template - ## 2. filter + template + extra tag(s) - ## 3. filter + template with field key - ## 4. default template - ## By providing an empty template array, templating is disabled and measurements are parsed as influxdb line protocol keys (measurement<,tag_set>) - templates = [] - - ## You may use an appropriate [gjson path](https://github.com/tidwall/gjson#path-syntax) - ## to locate the metric registry within the JSON document - # dropwizard_metric_registry_path = "metrics" - - ## You may use an appropriate [gjson path](https://github.com/tidwall/gjson#path-syntax) - ## to locate the default time of the measurements within the JSON document - # dropwizard_time_path = "time" - # dropwizard_time_format = "2006-01-02T15:04:05Z07:00" - - ## You may use an appropriate [gjson path](https://github.com/tidwall/gjson#path-syntax) - ## to locate the tags map within the JSON document - # dropwizard_tags_path = "tags" - - ## You may even use tag paths per tag - # [inputs.exec.dropwizard_tag_paths] - # tag1 = "tags.tag1" - # tag2 = "tags.tag2" -``` - -# Grok: - -The grok data format parses line delimited data using a regular expression like -language. - -The best way to get acquainted with grok patterns is to read the logstash docs, -which are available here: - https://www.elastic.co/guide/en/logstash/current/plugins-filters-grok.html - -The grok parser uses a slightly modified version of logstash "grok" -patterns, with the format: - -``` -%{[:][:]} -``` - -The `capture_syntax` defines the grok pattern that's used to parse the input -line and the `semantic_name` is used to name the field or tag. The extension -`modifier` controls the data type that the parsed item is converted to or -other special handling. - -By default all named captures are converted into string fields. -Timestamp modifiers can be used to convert captures to the timestamp of the -parsed metric. If no timestamp is parsed the metric will be created using the -current time. - -You must capture at least one field per line. - -- Available modifiers: - - string (default if nothing is specified) - - int - - float - - duration (ie, 5.23ms gets converted to int nanoseconds) - - tag (converts the field into a tag) - - drop (drops the field completely) - - measurement (use the matched text as the measurement name) -- Timestamp modifiers: - - ts (This will auto-learn the timestamp format) - - ts-ansic ("Mon Jan _2 15:04:05 2006") - - ts-unix ("Mon Jan _2 15:04:05 MST 2006") - - ts-ruby ("Mon Jan 02 15:04:05 -0700 2006") - - ts-rfc822 ("02 Jan 06 15:04 MST") - - ts-rfc822z ("02 Jan 06 15:04 -0700") - - ts-rfc850 ("Monday, 02-Jan-06 15:04:05 MST") - - ts-rfc1123 ("Mon, 02 Jan 2006 15:04:05 MST") - - ts-rfc1123z ("Mon, 02 Jan 2006 15:04:05 -0700") - - ts-rfc3339 ("2006-01-02T15:04:05Z07:00") - - ts-rfc3339nano ("2006-01-02T15:04:05.999999999Z07:00") - - ts-httpd ("02/Jan/2006:15:04:05 -0700") - - ts-epoch (seconds since unix epoch, may contain decimal) - - ts-epochnano (nanoseconds since unix epoch) - - ts-syslog ("Jan 02 15:04:05", parsed time is set to the current year) - - ts-"CUSTOM" - -CUSTOM time layouts must be within quotes and be the representation of the -"reference time", which is `Mon Jan 2 15:04:05 -0700 MST 2006`. -To match a comma decimal point you can use a period. For example `%{TIMESTAMP:timestamp:ts-"2006-01-02 15:04:05.000"}` can be used to match `"2018-01-02 15:04:05,000"` -To match a comma decimal point you can use a period in the pattern string. -See https://golang.org/pkg/time/#Parse for more details. - -Telegraf has many of its own [built-in patterns](./grok/patterns/influx-patterns), -as well as support for most of -[logstash's builtin patterns](https://github.com/logstash-plugins/logstash-patterns-core/blob/master/patterns/grok-patterns). -_Golang regular expressions do not support lookahead or lookbehind. -logstash patterns that depend on these are not supported._ - -If you need help building patterns to match your logs, -you will find the https://grokdebug.herokuapp.com application quite useful! - -#### Grok Configuration: -```toml -[[inputs.file]] - ## Files to parse each interval. - ## These accept standard unix glob matching rules, but with the addition of - ## ** as a "super asterisk". ie: - ## /var/log/**.log -> recursively find all .log files in /var/log - ## /var/log/*/*.log -> find all .log files with a parent dir in /var/log - ## /var/log/apache.log -> only tail the apache log file - files = ["/var/log/apache/access.log"] - - ## The dataformat to be read from files - ## Each data format has its own unique set of configuration options, read - ## more about them here: - ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md - data_format = "grok" - - ## This is a list of patterns to check the given log file(s) for. - ## Note that adding patterns here increases processing time. The most - ## efficient configuration is to have one pattern. - ## Other common built-in patterns are: - ## %{COMMON_LOG_FORMAT} (plain apache & nginx access logs) - ## %{COMBINED_LOG_FORMAT} (access logs + referrer & agent) - grok_patterns = ["%{COMBINED_LOG_FORMAT}"] - - ## Full path(s) to custom pattern files. - grok_custom_pattern_files = [] - - ## Custom patterns can also be defined here. Put one pattern per line. - grok_custom_patterns = ''' - ''' - - ## Timezone allows you to provide an override for timestamps that - ## don't already include an offset - ## e.g. 04/06/2016 12:41:45 data one two 5.43µs - ## - ## Default: "" which renders UTC - ## Options are as follows: - ## 1. Local -- interpret based on machine localtime - ## 2. "Canada/Eastern" -- Unix TZ values like those found in https://en.wikipedia.org/wiki/List_of_tz_database_time_zones - ## 3. UTC -- or blank/unspecified, will return timestamp in UTC - grok_timezone = "Canada/Eastern" -``` - -#### Timestamp Examples - -This example input and config parses a file using a custom timestamp conversion: - -``` -2017-02-21 13:10:34 value=42 -``` - -```toml -[[inputs.file]] - grok_patterns = ['%{TIMESTAMP_ISO8601:timestamp:ts-"2006-01-02 15:04:05"} value=%{NUMBER:value:int}'] -``` - -This example input and config parses a file using a timestamp in unix time: - -``` -1466004605 value=42 -1466004605.123456789 value=42 -``` - -```toml -[[inputs.file]] - grok_patterns = ['%{NUMBER:timestamp:ts-epoch} value=%{NUMBER:value:int}'] -``` - -This example parses a file using a built-in conversion and a custom pattern: - -``` -Wed Apr 12 13:10:34 PST 2017 value=42 -``` - -```toml -[[inputs.file]] - grok_patterns = ["%{TS_UNIX:timestamp:ts-unix} value=%{NUMBER:value:int}"] - grok_custom_patterns = ''' - TS_UNIX %{DAY} %{MONTH} %{MONTHDAY} %{HOUR}:%{MINUTE}:%{SECOND} %{TZ} %{YEAR} - ''' -``` - -For cases where the timestamp itself is without offset, the `timezone` config var is available -to denote an offset. By default (with `timezone` either omit, blank or set to `"UTC"`), the times -are processed as if in the UTC timezone. If specified as `timezone = "Local"`, the timestamp -will be processed based on the current machine timezone configuration. Lastly, if using a -timezone from the list of Unix [timezones](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones), -grok will offset the timestamp accordingly. - -#### TOML Escaping - -When saving patterns to the configuration file, keep in mind the different TOML -[string](https://github.com/toml-lang/toml#string) types and the escaping -rules for each. These escaping rules must be applied in addition to the -escaping required by the grok syntax. Using the Multi-line line literal -syntax with `'''` may be useful. - -The following config examples will parse this input file: - -``` -|42|\uD83D\uDC2F|'telegraf'| -``` - -Since `|` is a special character in the grok language, we must escape it to -get a literal `|`. With a basic TOML string, special characters such as -backslash must be escaped, requiring us to escape the backslash a second time. - -```toml -[[inputs.file]] - grok_patterns = ["\\|%{NUMBER:value:int}\\|%{UNICODE_ESCAPE:escape}\\|'%{WORD:name}'\\|"] - grok_custom_patterns = "UNICODE_ESCAPE (?:\\\\u[0-9A-F]{4})+" -``` - -We cannot use a literal TOML string for the pattern, because we cannot match a -`'` within it. However, it works well for the custom pattern. -```toml -[[inputs.file]] - grok_patterns = ["\\|%{NUMBER:value:int}\\|%{UNICODE_ESCAPE:escape}\\|'%{WORD:name}'\\|"] - grok_custom_patterns = 'UNICODE_ESCAPE (?:\\u[0-9A-F]{4})+' -``` - -A multi-line literal string allows us to encode the pattern: -```toml -[[inputs.file]] - grok_patterns = [''' - \|%{NUMBER:value:int}\|%{UNICODE_ESCAPE:escape}\|'%{WORD:name}'\| - '''] - grok_custom_patterns = 'UNICODE_ESCAPE (?:\\u[0-9A-F]{4})+' -``` - -#### Tips for creating patterns - -Writing complex patterns can be difficult, here is some advice for writing a -new pattern or testing a pattern developed [online](https://grokdebug.herokuapp.com). - -Create a file output that writes to stdout, and disable other outputs while -testing. This will allow you to see the captured metrics. Keep in mind that -the file output will only print once per `flush_interval`. - -```toml -[[outputs.file]] - files = ["stdout"] -``` - -- Start with a file containing only a single line of your input. -- Remove all but the first token or piece of the line. -- Add the section of your pattern to match this piece to your configuration file. -- Verify that the metric is parsed successfully by running Telegraf. -- If successful, add the next token, update the pattern and retest. -- Continue one token at a time until the entire line is successfully parsed. - -# Logfmt -This parser implements the logfmt format by extracting and converting key-value pairs from log text in the form `=`. -At the moment, the plugin will produce one metric per line and all keys -are added as fields. -A typical log -``` -method=GET host=influxdata.org ts=2018-07-24T19:43:40.275Z -connect=4ms service=8ms status=200 bytes=1653 -``` -will be converted into -``` -logfmt method="GET",host="influxdata.org",ts="2018-07-24T19:43:40.275Z",connect="4ms",service="8ms",status=200i,bytes=1653i - -``` -Additional information about the logfmt format can be found [here](https://brandur.org/logfmt). - -# Wavefront: - -Wavefront Data Format is metrics are parsed directly into Telegraf metrics. -For more information about the Wavefront Data Format see -[here](https://docs.wavefront.com/wavefront_data_format.html). - -There are no additional configuration options for Wavefront Data Format line-protocol. - -#### Wavefront Configuration: - -```toml -[[inputs.exec]] - ## Commands array - commands = ["/tmp/test.sh", "/usr/bin/mycollector --foo=bar"] - - ## measurement name suffix (for separating different commands) - name_suffix = "_mycollector" - - ## Data format to consume. - ## Each data format has its own unique set of configuration options, read - ## more about them here: - ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md - data_format = "wavefront" -``` - -# CSV -Parse out metrics from a CSV formatted table. By default, the parser assumes there is no header and -will read data from the first line. If `csv_header_row_count` is set to anything besides 0, the parser -will extract column names from the first number of rows. Headers of more than 1 row will have their -names concatenated together. Any unnamed columns will be ignored by the parser. - -The `csv_skip_rows` config indicates the number of rows to skip before looking for header information or data -to parse. By default, no rows will be skipped. - -The `csv_skip_columns` config indicates the number of columns to be skipped before parsing data. These -columns will not be read out of the header. Naming with the `csv_column_names` will begin at the first -parsed column after skipping the indicated columns. By default, no columns are skipped. - -To assign custom column names, the `csv_column_names` config is available. If the `csv_column_names` -config is used, all columns must be named as additional columns will be ignored. If `csv_header_row_count` -is set to 0, `csv_column_names` must be specified. Names listed in `csv_column_names` will override names extracted -from the header. - -The `csv_tag_columns` and `csv_field_columns` configs are available to add the column data to the metric. -The name used to specify the column is the name in the header, or if specified, the corresponding -name assigned in `csv_column_names`. If neither config is specified, no data will be added to the metric. - -Additional configs are available to dynamically name metrics and set custom timestamps. If the -`csv_column_names` config is specified, the parser will assign the metric name to the value found -in that column. If the `csv_timestamp_column` is specified, the parser will extract the timestamp from -that column. If `csv_timestamp_column` is specified, the `csv_timestamp_format` must also be specified -or an error will be thrown. - -#### CSV Configuration -```toml - data_format = "csv" - - ## Indicates how many rows to treat as a header. By default, the parser assumes - ## there is no header and will parse the first row as data. If set to anything more - ## than 1, column names will be concatenated with the name listed in the next header row. - ## If `csv_column_names` is specified, the column names in header will be overridden. - # csv_header_row_count = 0 - - ## Indicates the number of rows to skip before looking for header information. - # csv_skip_rows = 0 - - ## Indicates the number of columns to skip before looking for data to parse. - ## These columns will be skipped in the header as well. - # csv_skip_columns = 0 - - ## The seperator between csv fields - ## By default, the parser assumes a comma (",") - # csv_delimiter = "," - - ## The character reserved for marking a row as a comment row - ## Commented rows are skipped and not parsed - # csv_comment = "" - - ## If set to true, the parser will remove leading whitespace from fields - ## By default, this is false - # csv_trim_space = false - - ## For assigning custom names to columns - ## If this is specified, all columns should have a name - ## Unnamed columns will be ignored by the parser. - ## If `csv_header_row_count` is set to 0, this config must be used - csv_column_names = [] - - ## Columns listed here will be added as tags. Any other columns - ## will be added as fields. - csv_tag_columns = [] - - ## The column to extract the name of the metric from - ## By default, this is the name of the plugin - ## the `name_override` config overrides this - # csv_measurement_column = "" - - ## The column to extract time information for the metric - ## `csv_timestamp_format` must be specified if this is used - # csv_timestamp_column = "" - - ## The format of time data extracted from `csv_timestamp_column` - ## this must be specified if `csv_timestamp_column` is specified - # csv_timestamp_format = "" - ``` +[metrics]: /docs/METRICS.md diff --git a/docs/DATA_FORMATS_OUTPUT.md b/docs/DATA_FORMATS_OUTPUT.md index 609021656..c06ab4719 100644 --- a/docs/DATA_FORMATS_OUTPUT.md +++ b/docs/DATA_FORMATS_OUTPUT.md @@ -4,13 +4,14 @@ In addition to output specific data formats, Telegraf supports a set of standard data formats that may be selected from when configuring many output plugins. -1. [InfluxDB Line Protocol](#influx) -1. [JSON](#json) -1. [Graphite](#graphite) -1. [SplunkMetric](../plugins/serializers/splunkmetric/README.md) +1. [InfluxDB Line Protocol](/plugins/serializers/influx) +1. [JSON](/plugins/serializers/json) +1. [Graphite](/plugins/serializers/graphite) +1. [SplunkMetric](/plugins/serializers/splunkmetric) You will be able to identify the plugins with support by the presence of a `data_format` config option, for example, in the `file` output plugin: + ```toml [[outputs.file]] ## Files to write to, "stdout" is a specially handled file. @@ -22,191 +23,3 @@ You will be able to identify the plugins with support by the presence of a ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md data_format = "influx" ``` - -## Influx - -The `influx` data format outputs metrics using -[InfluxDB Line Protocol](https://docs.influxdata.com/influxdb/latest/write_protocols/line_protocol_tutorial/). -This is the recommended format unless another format is required for -interoperability. - -### Influx Configuration -```toml -[[outputs.file]] - ## Files to write to, "stdout" is a specially handled file. - files = ["stdout", "/tmp/metrics.out"] - - ## Data format to output. - ## Each data format has its own unique set of configuration options, read - ## more about them here: - ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md - data_format = "influx" - - ## Maximum line length in bytes. Useful only for debugging. - # influx_max_line_bytes = 0 - - ## When true, fields will be output in ascending lexical order. Enabling - ## this option will result in decreased performance and is only recommended - ## when you need predictable ordering while debugging. - # influx_sort_fields = false - - ## When true, Telegraf will output unsigned integers as unsigned values, - ## i.e.: `42u`. You will need a version of InfluxDB supporting unsigned - ## integer values. Enabling this option will result in field type errors if - ## existing data has been written. - # influx_uint_support = false -``` - -## Graphite - -The Graphite data format is translated from Telegraf Metrics using either the -template pattern or tag support method. You can select between the two -methods using the [`graphite_tag_support`](#graphite-tag-support) option. When set, the tag support -method is used, otherwise the [`template` pattern](#template-pattern) is used. - -#### Template Pattern - -The `template` option describes how Telegraf traslates metrics into _dot_ -buckets. The default template is: - -``` -template = "host.tags.measurement.field" -``` - -In the above template, we have four parts: - -1. _host_ is a tag key. This can be any tag key that is in the Telegraf -metric(s). If the key doesn't exist, it will be ignored. If it does exist, the -tag value will be filled in. -1. _tags_ is a special keyword that outputs all remaining tag values, separated -by dots and in alphabetical order (by tag key). These will be filled after all -tag keys are filled. -1. _measurement_ is a special keyword that outputs the measurement name. -1. _field_ is a special keyword that outputs the field name. - -**Example Conversion**: - -``` -cpu,cpu=cpu-total,dc=us-east-1,host=tars usage_idle=98.09,usage_user=0.89 1455320660004257758 -=> -tars.cpu-total.us-east-1.cpu.usage_user 0.89 1455320690 -tars.cpu-total.us-east-1.cpu.usage_idle 98.09 1455320690 -``` - -Fields with string values will be skipped. Boolean fields will be converted -to 1 (true) or 0 (false). - -#### Graphite Tag Support - -When the `graphite_tag_support` option is enabled, the template pattern is not -used. Instead, tags are encoded using -[Graphite tag support](http://graphite.readthedocs.io/en/latest/tags.html) -added in Graphite 1.1. The `metric_path` is a combination of the optional -`prefix` option, measurement name, and field name. - -The tag `name` is reserved by Graphite, any conflicting tags and will be encoded as `_name`. - -**Example Conversion**: -``` -cpu,cpu=cpu-total,dc=us-east-1,host=tars usage_idle=98.09,usage_user=0.89 1455320660004257758 -=> -cpu.usage_user;cpu=cpu-total;dc=us-east-1;host=tars 0.89 1455320690 -cpu.usage_idle;cpu=cpu-total;dc=us-east-1;host=tars 98.09 1455320690 -``` - -### Graphite Configuration - -```toml -[[outputs.file]] - ## Files to write to, "stdout" is a specially handled file. - files = ["stdout", "/tmp/metrics.out"] - - ## Data format to output. - ## Each data format has its own unique set of configuration options, read - ## more about them here: - ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md - data_format = "graphite" - - ## Prefix added to each graphite bucket - prefix = "telegraf" - ## Graphite template pattern - template = "host.tags.measurement.field" - - ## Support Graphite tags, recommended to enable when using Graphite 1.1 or later. - # graphite_tag_support = false -``` - -## JSON - -The JSON output data format output for a single metric is in the -form: -```json -{ - "fields": { - "field_1": 30, - "field_2": 4, - "field_N": 59, - "n_images": 660 - }, - "name": "docker", - "tags": { - "host": "raynor" - }, - "timestamp": 1458229140 -} -``` - -When an output plugin needs to emit multiple metrics at one time, it may use -the batch format. The use of batch format is determined by the plugin, -reference the documentation for the specific plugin. -```json -{ - "metrics": [ - { - "fields": { - "field_1": 30, - "field_2": 4, - "field_N": 59, - "n_images": 660 - }, - "name": "docker", - "tags": { - "host": "raynor" - }, - "timestamp": 1458229140 - }, - { - "fields": { - "field_1": 30, - "field_2": 4, - "field_N": 59, - "n_images": 660 - }, - "name": "docker", - "tags": { - "host": "raynor" - }, - "timestamp": 1458229140 - } - ] -} -``` - -### JSON Configuration - -```toml -[[outputs.file]] - ## Files to write to, "stdout" is a specially handled file. - files = ["stdout", "/tmp/metrics.out"] - - ## Data format to output. - ## Each data format has its own unique set of configuration options, read - ## more about them here: - ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md - data_format = "json" - - ## The resolution to use for the metric timestamp. Must be a duration string - ## such as "1ns", "1us", "1ms", "10ms", "1s". Durations are truncated to - ## the power of 10 less than the specified units. - json_timestamp_units = "1s" -``` diff --git a/docs/METRICS.md b/docs/METRICS.md new file mode 100644 index 000000000..1c238e30a --- /dev/null +++ b/docs/METRICS.md @@ -0,0 +1,22 @@ +# Metrics + +Telegraf metrics are the internal representation used to model data during +processing. Metrics are closely based on InfluxDB's data model and contain +four main components: + +- **Measurement Name**: Description and namespace for the metric. +- **Tags**: Key/Value string pairs and usually used to identify the + metric. +- **Fields**: Key/Value pairs that are typed and usually contain the + metric data. +- **Timestamp**: Date and time associated with the fields. + +This metric type exists only in memory and must be converted to a concrete +representation in order to be transmitted or viewed. To acheive this we +provide several [output data formats][] sometimes referred to as +*serializers*. Our default serializer converts to [InfluxDB Line +Protocol][line protocol] which provides a high performance and one-to-one +direct mapping from Telegraf metrics. + +[output data formats]: /docs/DATA_FORMATS_OUTPUT.md +[line protocol]: /plugins/serializers/influx diff --git a/docs/README.md b/docs/README.md new file mode 100644 index 000000000..b7b55336c --- /dev/null +++ b/docs/README.md @@ -0,0 +1,21 @@ +# Telegraf + +- Concepts + - [Metrics][metrics] + - [Input Data Formats][parsers] + - [Output Data Formats][serializers] + - [Aggregators & Processors][aggproc] +- Administration + - [Configuration][conf] + - [Profiling][profiling] + - [Windows Service][winsvc] + - [FAQ][faq] + +[conf]: /docs/CONFIGURATION.md +[metrics]: /docs/METRICS.md +[parsers]: /docs/DATA_FORMATS_INPUT.md +[serializers]: /docs/DATA_FORMATS_OUTPUT.md +[aggproc]: /docs/AGGREGATORS_AND_PROCESSORS.md +[profiling]: /docs/PROFILING.md +[winsvc]: /docs/WINDOWS_SERVICE.md +[faq]: /docs/FAQ.md diff --git a/docs/TEMPLATE_PATTERN.md b/docs/TEMPLATE_PATTERN.md new file mode 100644 index 000000000..4244369d7 --- /dev/null +++ b/docs/TEMPLATE_PATTERN.md @@ -0,0 +1,135 @@ +# Template Patterns + +Template patterns are a mini language that describes how a dot delimited +string should be mapped to and from [metrics][]. + +A template has the form: +``` +"host.mytag.mytag.measurement.measurement.field*" +``` + +Where the following keywords can be set: + +1. `measurement`: specifies that this section of the graphite bucket corresponds +to the measurement name. This can be specified multiple times. +2. `field`: specifies that this section of the graphite bucket corresponds +to the field name. This can be specified multiple times. +3. `measurement*`: specifies that all remaining elements of the graphite bucket +correspond to the measurement name. +4. `field*`: specifies that all remaining elements of the graphite bucket +correspond to the field name. + +Any part of the template that is not a keyword is treated as a tag key. This +can also be specified multiple times. + +**NOTE:** `field*` cannot be used in conjunction with `measurement*`. + +### Examples + +#### Measurement & Tag Templates + +The most basic template is to specify a single transformation to apply to all +incoming metrics. So the following template: + +```toml +templates = [ + "region.region.measurement*" +] +``` + +would result in the following Graphite -> Telegraf transformation. + +``` +us.west.cpu.load 100 +=> cpu.load,region=us.west value=100 +``` + +Multiple templates can also be specified, but these should be differentiated +using _filters_ (see below for more details) + +```toml +templates = [ + "*.*.* region.region.measurement", # <- all 3-part measurements will match this one. + "*.*.*.* region.region.host.measurement", # <- all 4-part measurements will match this one. +] +``` + +#### Field Templates + +The field keyword tells Telegraf to give the metric that field name. +So the following template: + +```toml +separator = "_" +templates = [ + "measurement.measurement.field.field.region" +] +``` + +would result in the following Graphite -> Telegraf transformation. + +``` +cpu.usage.idle.percent.eu-east 100 +=> cpu_usage,region=eu-east idle_percent=100 +``` + +The field key can also be derived from all remaining elements of the graphite +bucket by specifying `field*`: + +```toml +separator = "_" +templates = [ + "measurement.measurement.region.field*" +] +``` + +which would result in the following Graphite -> Telegraf transformation. + +``` +cpu.usage.eu-east.idle.percentage 100 +=> cpu_usage,region=eu-east idle_percentage=100 +``` + +#### Filter Templates + +Users can also filter the template(s) to use based on the name of the bucket, +using glob matching, like so: + +```toml +templates = [ + "cpu.* measurement.measurement.region", + "mem.* measurement.measurement.host" +] +``` + +which would result in the following transformation: + +``` +cpu.load.eu-east 100 +=> cpu_load,region=eu-east value=100 + +mem.cached.localhost 256 +=> mem_cached,host=localhost value=256 +``` + +#### Adding Tags + +Additional tags can be added to a metric that don't exist on the received metric. +You can add additional tags by specifying them after the pattern. +Tags have the same format as the line protocol. +Multiple tags are separated by commas. + +```toml +templates = [ + "measurement.measurement.field.region datacenter=1a" +] +``` + +would result in the following Graphite -> Telegraf transformation. + +``` +cpu.usage.idle.eu-east 100 +=> cpu_usage,region=eu-east,datacenter=1a idle=100 +``` + +[metrics]: /docs/METRICS.md diff --git a/plugins/inputs/statsd/README.md b/plugins/inputs/statsd/README.md index 85cb4a46e..c1093bf39 100644 --- a/plugins/inputs/statsd/README.md +++ b/plugins/inputs/statsd/README.md @@ -10,7 +10,7 @@ ## MaxTCPConnection - applicable when protocol is set to tcp (default=250) max_tcp_connections = 250 - + ## Enable TCP keep alive probes (default=false) tcp_keep_alive = false @@ -45,7 +45,7 @@ parse_data_dog_tags = false ## Statsd data translation templates, more info can be read here: - ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#graphite + ## https://github.com/influxdata/telegraf/blob/master/docs/TEMPLATE_PATTERN.md # templates = [ # "cpu.* measurement*" # ] @@ -227,5 +227,5 @@ mem.cached.localhost:256|g => mem_cached,host=localhost 256 ``` -There are many more options available, -[More details can be found here](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#graphite) +Consult the [Template Patterns](/docs/TEMPLATE_PATTERN.md) documentation for +additional details. diff --git a/plugins/inputs/statsd/statsd.go b/plugins/inputs/statsd/statsd.go index 60b55887e..6b0dd0b78 100644 --- a/plugins/inputs/statsd/statsd.go +++ b/plugins/inputs/statsd/statsd.go @@ -216,7 +216,7 @@ const sampleConfig = ` parse_data_dog_tags = false ## Statsd data translation templates, more info can be read here: - ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#graphite + ## https://github.com/influxdata/telegraf/blob/master/docs/TEMPLATE_PATTERN.md # templates = [ # "cpu.* measurement*" # ] diff --git a/plugins/parsers/EXAMPLE_README.md b/plugins/parsers/EXAMPLE_README.md new file mode 100644 index 000000000..b3c1bc2e2 --- /dev/null +++ b/plugins/parsers/EXAMPLE_README.md @@ -0,0 +1,46 @@ +# Example + +This description explains at a high level what the parser does and provides +links to where additional information about the format can be found. + +### Configuration + +This section contains the sample configuration for the parser. Since the +configuration for a parser is not have a standalone plugin, use the `file` or +`exec` input as the base config. + +```toml +[[inputs.file]] + files = ["example"] + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "example" + + ## Describe variables using the standard SampleConfig style. + ## https://github.com/influxdata/telegraf/wiki/SampleConfig + example_option = "example_value" +``` + +#### example_option + +If an option requires a more expansive explanation than can be included inline +in the sample configuration, it may be described here. + +### Metrics + +The optional Metrics section contains details about how the parser converts +input data into Telegraf metrics. + +### Examples + +The optional Examples section can show an example conversion from the input +format using InfluxDB Line Protocol as the reference format. + +For line delimited text formats a diff may be appropriate: +```diff +- cpu|host=localhost|source=example.org|value=42 ++ cpu,host=localhost,source=example.org value=42 +``` diff --git a/plugins/parsers/collectd/README.md b/plugins/parsers/collectd/README.md new file mode 100644 index 000000000..06f14d6d4 --- /dev/null +++ b/plugins/parsers/collectd/README.md @@ -0,0 +1,44 @@ +# Collectd + +The collectd format parses the collectd binary network protocol. Tags are +created for host, instance, type, and type instance. All collectd values are +added as float64 fields. + +For more information about the binary network protocol see +[here](https://collectd.org/wiki/index.php/Binary_protocol). + +You can control the cryptographic settings with parser options. Create an +authentication file and set `collectd_auth_file` to the path of the file, then +set the desired security level in `collectd_security_level`. + +Additional information including client setup can be found +[here](https://collectd.org/wiki/index.php/Networking_introduction#Cryptographic_setup). + +You can also change the path to the typesdb or add additional typesdb using +`collectd_typesdb`. + +### Configuration + +```toml +[[inputs.file]] + files = ["example"] + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "collectd" + + ## Authentication file for cryptographic security levels + collectd_auth_file = "/etc/collectd/auth_file" + ## One of none (default), sign, or encrypt + collectd_security_level = "encrypt" + ## Path of to TypesDB specifications + collectd_typesdb = ["/usr/share/collectd/types.db"] + + ## Multi-value plugins can be handled two ways. + ## "split" will parse and store the multi-value plugin data into separate measurements + ## "join" will parse and store the multi-value plugin as a single multi-value measurement. + ## "split" is the default behavior for backward compatability with previous versions of influxdb. + collectd_parse_multivalue = "split" +``` diff --git a/plugins/parsers/csv/README.md b/plugins/parsers/csv/README.md new file mode 100644 index 000000000..532980991 --- /dev/null +++ b/plugins/parsers/csv/README.md @@ -0,0 +1,104 @@ +# CSV + +The `csv` parser creates metrics from a document containing comma separated +values. + +### Configuration + +```toml +[[inputs.file]] + files = ["example"] + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "csv" + + ## Indicates how many rows to treat as a header. By default, the parser assumes + ## there is no header and will parse the first row as data. If set to anything more + ## than 1, column names will be concatenated with the name listed in the next header row. + ## If `csv_column_names` is specified, the column names in header will be overridden. + csv_header_row_count = 0 + + ## For assigning custom names to columns + ## If this is specified, all columns should have a name + ## Unnamed columns will be ignored by the parser. + ## If `csv_header_row_count` is set to 0, this config must be used + csv_column_names = [] + + ## Indicates the number of rows to skip before looking for header information. + csv_skip_rows = 0 + + ## Indicates the number of columns to skip before looking for data to parse. + ## These columns will be skipped in the header as well. + csv_skip_columns = 0 + + ## The seperator between csv fields + ## By default, the parser assumes a comma (",") + csv_delimiter = "," + + ## The character reserved for marking a row as a comment row + ## Commented rows are skipped and not parsed + csv_comment = "" + + ## If set to true, the parser will remove leading whitespace from fields + ## By default, this is false + csv_trim_space = false + + ## Columns listed here will be added as tags. Any other columns + ## will be added as fields. + csv_tag_columns = [] + + ## The column to extract the name of the metric from + csv_measurement_column = "" + + ## The column to extract time information for the metric + ## `csv_timestamp_format` must be specified if this is used + csv_timestamp_column = "" + + ## The format of time data extracted from `csv_timestamp_column` + ## this must be specified if `csv_timestamp_column` is specified + csv_timestamp_format = "" + ``` +#### csv_timestamp_column, csv_timestamp_format + +By default the current time will be used for all created metrics, to set the +time using the JSON document you can use the `csv_timestamp_column` and +`csv_timestamp_format` options together to set the time to a value in the parsed +document. + +The `csv_timestamp_column` option specifies the column name containing the +time value and `csv_timestamp_format` must be set to a Go "reference time" +which is defined to be the specific time: `Mon Jan 2 15:04:05 MST 2006`. + +Consult the Go [time][time parse] package for details and additional examples +on how to set the time format. + +### Metrics + +One metric is created for each row with the columns added as fields. The type +of the field is automatically determined based on the contents of the value. + +### Examples + +Config: +``` +[[inputs.file]] + files = ["example"] + data_format = "csv" + csv_header_row_count = 1 + csv_timestamp_column = "time" + csv_timestamp_format = "2006-01-02T15:04:05Z07:00" +``` + +Input: +``` +measurement,cpu,time_user,time_system,time_idle,time +cpu,cpu0,42,42,42,2018-09-13T13:03:28Z +``` + +Output: +``` +cpu cpu=cpu0,time_user=42,time_system=42,time_idle=42 1536869008000000000 +``` diff --git a/plugins/parsers/dropwizard/README.md b/plugins/parsers/dropwizard/README.md new file mode 100644 index 000000000..f0ff6d15c --- /dev/null +++ b/plugins/parsers/dropwizard/README.md @@ -0,0 +1,171 @@ +# Dropwizard + +The `dropwizard` data format can parse the [JSON Dropwizard][dropwizard] representation of a single dropwizard metric registry. By default, tags are parsed from metric names as if they were actual influxdb line protocol keys (`measurement<,tag_set>`) which can be overriden by defining a custom [template pattern][templates]. All field value types are supported, `string`, `number` and `boolean`. + +[templates]: /docs/TEMPLATE_PATTERN.md +[dropwizard]: http://metrics.dropwizard.io/3.1.0/manual/json/ + +### Configuration + +```toml +[[inputs.file]] + files = ["example"] + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "dropwizard" + + ## Used by the templating engine to join matched values when cardinality is > 1 + separator = "_" + + ## Each template line requires a template pattern. It can have an optional + ## filter before the template and separated by spaces. It can also have optional extra + ## tags following the template. Multiple tags should be separated by commas and no spaces + ## similar to the line protocol format. There can be only one default template. + ## Templates support below format: + ## 1. filter + template + ## 2. filter + template + extra tag(s) + ## 3. filter + template with field key + ## 4. default template + ## By providing an empty template array, templating is disabled and measurements are parsed as influxdb line protocol keys (measurement<,tag_set>) + templates = [] + + ## You may use an appropriate [gjson path](https://github.com/tidwall/gjson#path-syntax) + ## to locate the metric registry within the JSON document + # dropwizard_metric_registry_path = "metrics" + + ## You may use an appropriate [gjson path](https://github.com/tidwall/gjson#path-syntax) + ## to locate the default time of the measurements within the JSON document + # dropwizard_time_path = "time" + # dropwizard_time_format = "2006-01-02T15:04:05Z07:00" + + ## You may use an appropriate [gjson path](https://github.com/tidwall/gjson#path-syntax) + ## to locate the tags map within the JSON document + # dropwizard_tags_path = "tags" + + ## You may even use tag paths per tag + # [inputs.exec.dropwizard_tag_paths] + # tag1 = "tags.tag1" + # tag2 = "tags.tag2" +``` + + +### Examples + +A typical JSON of a dropwizard metric registry: + +```json +{ + "version": "3.0.0", + "counters" : { + "measurement,tag1=green" : { + "count" : 1 + } + }, + "meters" : { + "measurement" : { + "count" : 1, + "m15_rate" : 1.0, + "m1_rate" : 1.0, + "m5_rate" : 1.0, + "mean_rate" : 1.0, + "units" : "events/second" + } + }, + "gauges" : { + "measurement" : { + "value" : 1 + } + }, + "histograms" : { + "measurement" : { + "count" : 1, + "max" : 1.0, + "mean" : 1.0, + "min" : 1.0, + "p50" : 1.0, + "p75" : 1.0, + "p95" : 1.0, + "p98" : 1.0, + "p99" : 1.0, + "p999" : 1.0, + "stddev" : 1.0 + } + }, + "timers" : { + "measurement" : { + "count" : 1, + "max" : 1.0, + "mean" : 1.0, + "min" : 1.0, + "p50" : 1.0, + "p75" : 1.0, + "p95" : 1.0, + "p98" : 1.0, + "p99" : 1.0, + "p999" : 1.0, + "stddev" : 1.0, + "m15_rate" : 1.0, + "m1_rate" : 1.0, + "m5_rate" : 1.0, + "mean_rate" : 1.0, + "duration_units" : "seconds", + "rate_units" : "calls/second" + } + } +} +``` + +Would get translated into 4 different measurements: + +``` +measurement,metric_type=counter,tag1=green count=1 +measurement,metric_type=meter count=1,m15_rate=1.0,m1_rate=1.0,m5_rate=1.0,mean_rate=1.0 +measurement,metric_type=gauge value=1 +measurement,metric_type=histogram count=1,max=1.0,mean=1.0,min=1.0,p50=1.0,p75=1.0,p95=1.0,p98=1.0,p99=1.0,p999=1.0 +measurement,metric_type=timer count=1,max=1.0,mean=1.0,min=1.0,p50=1.0,p75=1.0,p95=1.0,p98=1.0,p99=1.0,p999=1.0,stddev=1.0,m15_rate=1.0,m1_rate=1.0,m5_rate=1.0,mean_rate=1.0 +``` + +You may also parse a dropwizard registry from any JSON document which contains a dropwizard registry in some inner field. +Eg. to parse the following JSON document: + +```json +{ + "time" : "2017-02-22T14:33:03.662+02:00", + "tags" : { + "tag1" : "green", + "tag2" : "yellow" + }, + "metrics" : { + "counters" : { + "measurement" : { + "count" : 1 + } + }, + "meters" : {}, + "gauges" : {}, + "histograms" : {}, + "timers" : {} + } +} +``` +and translate it into: + +``` +measurement,metric_type=counter,tag1=green,tag2=yellow count=1 1487766783662000000 +``` + +you simply need to use the following additional configuration properties: + +```toml +dropwizard_metric_registry_path = "metrics" +dropwizard_time_path = "time" +dropwizard_time_format = "2006-01-02T15:04:05Z07:00" +dropwizard_tags_path = "tags" +## tag paths per tag are supported too, eg. +#[inputs.yourinput.dropwizard_tag_paths] +# tag1 = "tags.tag1" +# tag2 = "tags.tag2" +``` diff --git a/plugins/parsers/graphite/README.md b/plugins/parsers/graphite/README.md new file mode 100644 index 000000000..b0b1127aa --- /dev/null +++ b/plugins/parsers/graphite/README.md @@ -0,0 +1,48 @@ +# Graphite + +The Graphite data format translates graphite *dot* buckets directly into +telegraf measurement names, with a single value field, and without any tags. +By default, the separator is left as `.`, but this can be changed using the +`separator` argument. For more advanced options, Telegraf supports specifying +[templates](#templates) to translate graphite buckets into Telegraf metrics. + +### Configuration + +```toml +[[inputs.exec]] + ## Commands array + commands = ["/tmp/test.sh", "/usr/bin/mycollector --foo=bar"] + + ## measurement name suffix (for separating different commands) + name_suffix = "_mycollector" + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "graphite" + + ## This string will be used to join the matched values. + separator = "_" + + ## Each template line requires a template pattern. It can have an optional + ## filter before the template and separated by spaces. It can also have optional extra + ## tags following the template. Multiple tags should be separated by commas and no spaces + ## similar to the line protocol format. There can be only one default template. + ## Templates support below format: + ## 1. filter + template + ## 2. filter + template + extra tag(s) + ## 3. filter + template with field key + ## 4. default template + templates = [ + "*.app env.service.resource.measurement", + "stats.* .host.measurement* region=eu-east,agent=sensu", + "stats2.* .host.measurement.field", + "measurement*" + ] +``` + +#### templates + +Consult the [Template Patterns](/docs/TEMPLATE_PATTERN.md) documentation for +details. diff --git a/plugins/parsers/grok/README.md b/plugins/parsers/grok/README.md new file mode 100644 index 000000000..7b22d340e --- /dev/null +++ b/plugins/parsers/grok/README.md @@ -0,0 +1,222 @@ +# Grok + +The grok data format parses line delimited data using a regular expression like +language. + +The best way to get acquainted with grok patterns is to read the logstash docs, +which are available here: + https://www.elastic.co/guide/en/logstash/current/plugins-filters-grok.html + +The grok parser uses a slightly modified version of logstash "grok" +patterns, with the format: + +``` +%{[:][:]} +``` + +The `capture_syntax` defines the grok pattern that's used to parse the input +line and the `semantic_name` is used to name the field or tag. The extension +`modifier` controls the data type that the parsed item is converted to or +other special handling. + +By default all named captures are converted into string fields. +Timestamp modifiers can be used to convert captures to the timestamp of the +parsed metric. If no timestamp is parsed the metric will be created using the +current time. + +You must capture at least one field per line. + +- Available modifiers: + - string (default if nothing is specified) + - int + - float + - duration (ie, 5.23ms gets converted to int nanoseconds) + - tag (converts the field into a tag) + - drop (drops the field completely) + - measurement (use the matched text as the measurement name) +- Timestamp modifiers: + - ts (This will auto-learn the timestamp format) + - ts-ansic ("Mon Jan _2 15:04:05 2006") + - ts-unix ("Mon Jan _2 15:04:05 MST 2006") + - ts-ruby ("Mon Jan 02 15:04:05 -0700 2006") + - ts-rfc822 ("02 Jan 06 15:04 MST") + - ts-rfc822z ("02 Jan 06 15:04 -0700") + - ts-rfc850 ("Monday, 02-Jan-06 15:04:05 MST") + - ts-rfc1123 ("Mon, 02 Jan 2006 15:04:05 MST") + - ts-rfc1123z ("Mon, 02 Jan 2006 15:04:05 -0700") + - ts-rfc3339 ("2006-01-02T15:04:05Z07:00") + - ts-rfc3339nano ("2006-01-02T15:04:05.999999999Z07:00") + - ts-httpd ("02/Jan/2006:15:04:05 -0700") + - ts-epoch (seconds since unix epoch, may contain decimal) + - ts-epochnano (nanoseconds since unix epoch) + - ts-syslog ("Jan 02 15:04:05", parsed time is set to the current year) + - ts-"CUSTOM" + +CUSTOM time layouts must be within quotes and be the representation of the +"reference time", which is `Mon Jan 2 15:04:05 -0700 MST 2006`. +To match a comma decimal point you can use a period. For example `%{TIMESTAMP:timestamp:ts-"2006-01-02 15:04:05.000"}` can be used to match `"2018-01-02 15:04:05,000"` +To match a comma decimal point you can use a period in the pattern string. +See https://golang.org/pkg/time/#Parse for more details. + +Telegraf has many of its own [built-in patterns](./grok/patterns/influx-patterns), +as well as support for most of +[logstash's builtin patterns](https://github.com/logstash-plugins/logstash-patterns-core/blob/master/patterns/grok-patterns). +_Golang regular expressions do not support lookahead or lookbehind. +logstash patterns that depend on these are not supported._ + +If you need help building patterns to match your logs, +you will find the https://grokdebug.herokuapp.com application quite useful! + +### Configuration +```toml +[[inputs.file]] + ## Files to parse each interval. + ## These accept standard unix glob matching rules, but with the addition of + ## ** as a "super asterisk". ie: + ## /var/log/**.log -> recursively find all .log files in /var/log + ## /var/log/*/*.log -> find all .log files with a parent dir in /var/log + ## /var/log/apache.log -> only tail the apache log file + files = ["/var/log/apache/access.log"] + + ## The dataformat to be read from files + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "grok" + + ## This is a list of patterns to check the given log file(s) for. + ## Note that adding patterns here increases processing time. The most + ## efficient configuration is to have one pattern. + ## Other common built-in patterns are: + ## %{COMMON_LOG_FORMAT} (plain apache & nginx access logs) + ## %{COMBINED_LOG_FORMAT} (access logs + referrer & agent) + grok_patterns = ["%{COMBINED_LOG_FORMAT}"] + + ## Full path(s) to custom pattern files. + grok_custom_pattern_files = [] + + ## Custom patterns can also be defined here. Put one pattern per line. + grok_custom_patterns = ''' + ''' + + ## Timezone allows you to provide an override for timestamps that + ## don't already include an offset + ## e.g. 04/06/2016 12:41:45 data one two 5.43µs + ## + ## Default: "" which renders UTC + ## Options are as follows: + ## 1. Local -- interpret based on machine localtime + ## 2. "Canada/Eastern" -- Unix TZ values like those found in https://en.wikipedia.org/wiki/List_of_tz_database_time_zones + ## 3. UTC -- or blank/unspecified, will return timestamp in UTC + grok_timezone = "Canada/Eastern" +``` + +#### Timestamp Examples + +This example input and config parses a file using a custom timestamp conversion: + +``` +2017-02-21 13:10:34 value=42 +``` + +```toml +[[inputs.file]] + grok_patterns = ['%{TIMESTAMP_ISO8601:timestamp:ts-"2006-01-02 15:04:05"} value=%{NUMBER:value:int}'] +``` + +This example input and config parses a file using a timestamp in unix time: + +``` +1466004605 value=42 +1466004605.123456789 value=42 +``` + +```toml +[[inputs.file]] + grok_patterns = ['%{NUMBER:timestamp:ts-epoch} value=%{NUMBER:value:int}'] +``` + +This example parses a file using a built-in conversion and a custom pattern: + +``` +Wed Apr 12 13:10:34 PST 2017 value=42 +``` + +```toml +[[inputs.file]] + grok_patterns = ["%{TS_UNIX:timestamp:ts-unix} value=%{NUMBER:value:int}"] + grok_custom_patterns = ''' + TS_UNIX %{DAY} %{MONTH} %{MONTHDAY} %{HOUR}:%{MINUTE}:%{SECOND} %{TZ} %{YEAR} + ''' +``` + +For cases where the timestamp itself is without offset, the `timezone` config var is available +to denote an offset. By default (with `timezone` either omit, blank or set to `"UTC"`), the times +are processed as if in the UTC timezone. If specified as `timezone = "Local"`, the timestamp +will be processed based on the current machine timezone configuration. Lastly, if using a +timezone from the list of Unix [timezones](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones), +grok will offset the timestamp accordingly. + +#### TOML Escaping + +When saving patterns to the configuration file, keep in mind the different TOML +[string](https://github.com/toml-lang/toml#string) types and the escaping +rules for each. These escaping rules must be applied in addition to the +escaping required by the grok syntax. Using the Multi-line line literal +syntax with `'''` may be useful. + +The following config examples will parse this input file: + +``` +|42|\uD83D\uDC2F|'telegraf'| +``` + +Since `|` is a special character in the grok language, we must escape it to +get a literal `|`. With a basic TOML string, special characters such as +backslash must be escaped, requiring us to escape the backslash a second time. + +```toml +[[inputs.file]] + grok_patterns = ["\\|%{NUMBER:value:int}\\|%{UNICODE_ESCAPE:escape}\\|'%{WORD:name}'\\|"] + grok_custom_patterns = "UNICODE_ESCAPE (?:\\\\u[0-9A-F]{4})+" +``` + +We cannot use a literal TOML string for the pattern, because we cannot match a +`'` within it. However, it works well for the custom pattern. +```toml +[[inputs.file]] + grok_patterns = ["\\|%{NUMBER:value:int}\\|%{UNICODE_ESCAPE:escape}\\|'%{WORD:name}'\\|"] + grok_custom_patterns = 'UNICODE_ESCAPE (?:\\u[0-9A-F]{4})+' +``` + +A multi-line literal string allows us to encode the pattern: +```toml +[[inputs.file]] + grok_patterns = [''' + \|%{NUMBER:value:int}\|%{UNICODE_ESCAPE:escape}\|'%{WORD:name}'\| + '''] + grok_custom_patterns = 'UNICODE_ESCAPE (?:\\u[0-9A-F]{4})+' +``` + +#### Tips for creating patterns + +Writing complex patterns can be difficult, here is some advice for writing a +new pattern or testing a pattern developed [online](https://grokdebug.herokuapp.com). + +Create a file output that writes to stdout, and disable other outputs while +testing. This will allow you to see the captured metrics. Keep in mind that +the file output will only print once per `flush_interval`. + +```toml +[[outputs.file]] + files = ["stdout"] +``` + +- Start with a file containing only a single line of your input. +- Remove all but the first token or piece of the line. +- Add the section of your pattern to match this piece to your configuration file. +- Verify that the metric is parsed successfully by running Telegraf. +- If successful, add the next token, update the pattern and retest. +- Continue one token at a time until the entire line is successfully parsed. + + diff --git a/plugins/parsers/influx/README.md b/plugins/parsers/influx/README.md new file mode 100644 index 000000000..51c0106e6 --- /dev/null +++ b/plugins/parsers/influx/README.md @@ -0,0 +1,20 @@ +# InfluxDB Line Protocol + +There are no additional configuration options for InfluxDB [line protocol][]. The +metrics are parsed directly into Telegraf metrics. + +[line protocol]: https://docs.influxdata.com/influxdb/latest/write_protocols/line/ + +### Configuration + +```toml +[[inputs.file]] + files = ["example"] + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "influx" +``` + diff --git a/plugins/parsers/json/README.md b/plugins/parsers/json/README.md new file mode 100644 index 000000000..fa0d767ff --- /dev/null +++ b/plugins/parsers/json/README.md @@ -0,0 +1,214 @@ +# JSON + +The JSON data format parses a [JSON][json] object or an array of objects into +metric fields. + +**NOTE:** All JSON numbers are converted to float fields. JSON String are +ignored unless specified in the `tag_key` or `json_string_fields` options. + +### Configuration + +```toml +[[inputs.file]] + files = ["example"] + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "json" + + ## Query is a GJSON path that specifies a specific chunk of JSON to be + ## parsed, if not specified the whole document will be parsed. + ## + ## GJSON query paths are described here: + ## https://github.com/tidwall/gjson#path-syntax + json_query = "" + + ## Tag keys is an array of keys that should be added as tags. + tag_keys = [ + "my_tag_1", + "my_tag_2" + ] + + ## String fields is an array of keys that should be added as string fields. + json_string_fields = [] + + ## Name key is the key to use as the measurement name. + json_name_key = "" + + ## Time key is the key containing the time that should be used to create the + ## metric. + json_time_key = "" + + ## Time format is the time layout that should be used to interprete the + ## json_time_key. The time must be `unix`, `unix_ms` or a time in the + ## "reference time". + ## ex: json_time_format = "Mon Jan 2 15:04:05 -0700 MST 2006" + ## json_time_format = "2006-01-02T15:04:05Z07:00" + ## json_time_format = "unix" + ## json_time_format = "unix_ms" + json_time_format = "" +``` + +#### json_query + +The `json_query` is a [GJSON][gjson] path that can be used to limit the +portion of the overall JSON document that should be parsed. The result of the +query should contain a JSON object or an array of objects. + +Consult the GJSON [path syntax][gjson syntax] for details and examples. + +#### json_time_key, json_time_format + +By default the current time will be used for all created metrics, to set the +time using the JSON document you can use the `json_time_key` and +`json_time_format` options together to set the time to a value in the parsed +document. + +The `json_time_key` option specifies the key containing the time value and +`json_time_format` must be set to `unix`, `unix_ms`, or the Go "reference +time" which is defined to be the specific time: `Mon Jan 2 15:04:05 MST 2006`. + +Consult the Go [time][time parse] package for details and additional examples +on how to set the time format. + +### Examples + +#### Basic Parsing +Config: +```toml +[[inputs.file]] + files = ["example"] + name_override = "myjsonmetric" + data_format = "json" +``` + +Input: +```json +{ + "a": 5, + "b": { + "c": 6 + }, + "ignored": "I'm a string" +} +``` + +Output: +``` +myjsonmetric a=5,b_c=6 +``` + +#### Name, Tags, and String Fields + +Config: +```toml +[[inputs.file]] + files = ["example"] + name_key = "name" + tag_keys = ["my_tag_1"] + string_fields = ["my_field"] + data_format = "json" +``` + +Input: +```json +{ + "a": 5, + "b": { + "c": 6, + "my_field": "description" + }, + "my_tag_1": "foo", + "name": "my_json" +} +``` + +Output: +``` +my_json,my_tag_1=foo a=5,b_c=6,my_field="description" +``` + +#### Arrays + +If the JSON data is an array, then each object within the array is parsed with +the configured settings. + +Config: +```toml +[[inputs.file]] + files = ["example"] + data_format = "json" + json_time_key = "b_time" + json_time_format = "02 Jan 06 15:04 MST" +``` + +Input: +```json +[ + { + "a": 5, + "b": { + "c": 6, + "time":"04 Jan 06 15:04 MST" + }, + }, + { + "a": 7, + "b": { + "c": 8, + "time":"11 Jan 07 15:04 MST" + }, + } +] +``` + +Output: +``` +file a=5,b_c=6 1136387040000000000 +file a=7,b_c=8 1168527840000000000 +``` + +#### Query + +The `json_query` option can be used to parse a subset of the document. + +Config: +```toml +[[inputs.file]] + files = ["example"] + data_format = "json" + tag_keys = ["first"] + json_string_fields = ["last"] + json_query = "obj.friends" +``` + +Input: +```json +{ + "obj": { + "name": {"first": "Tom", "last": "Anderson"}, + "age":37, + "children": ["Sara","Alex","Jack"], + "fav.movie": "Deer Hunter", + "friends": [ + {"first": "Dale", "last": "Murphy", "age": 44}, + {"first": "Roger", "last": "Craig", "age": 68}, + {"first": "Jane", "last": "Murphy", "age": 47} + ] + } +} +``` + +Output: +``` +file,first=Dale last="Murphy",age=44 +file,first=Roger last="Craig",age=68 +file,first=Jane last="Murphy",age=47 +``` + +[gjson]: https://github.com/tidwall/gjson +[gjson syntax]: https://github.com/tidwall/gjson#path-syntax +[json]: https://www.json.org/ +[time parse]: https://golang.org/pkg/time/#Parse diff --git a/plugins/parsers/logfmt/README.md b/plugins/parsers/logfmt/README.md new file mode 100644 index 000000000..fb3a565b3 --- /dev/null +++ b/plugins/parsers/logfmt/README.md @@ -0,0 +1,34 @@ +# Logfmt + +The `logfmt` data format parses data in [logfmt] format. + +[logfmt]: https://brandur.org/logfmt + +### Configuration + +```toml +[[inputs.file]] + files = ["example"] + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "logfmt" + + ## Set the name of the created metric, if unset the name of the plugin will + ## be used. + metric_name = "logfmt" +``` + +### Metrics + +Each key/value pair in the line is added to a new metric as a field. The type +of the field is automatically determined based on the contents of the value. + +### Examples + +``` +- method=GET host=example.org ts=2018-07-24T19:43:40.275Z connect=4ms service=8ms status=200 bytes=1653 ++ logfmt method="GET",host="example.org",ts="2018-07-24T19:43:40.275Z",connect="4ms",service="8ms",status=200i,bytes=1653i +``` diff --git a/plugins/parsers/nagios/README.md b/plugins/parsers/nagios/README.md new file mode 100644 index 000000000..e9be6a0dd --- /dev/null +++ b/plugins/parsers/nagios/README.md @@ -0,0 +1,17 @@ +# Nagios + +The `nagios` data format parses the output of nagios plugins. + +### Configuration + +```toml +[[inputs.exec]] + ## Commands array + commands = ["/usr/lib/nagios/plugins/check_load -w 5,6,7 -c 7,8,9"] + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "nagios" +``` diff --git a/plugins/parsers/value/README.md b/plugins/parsers/value/README.md new file mode 100644 index 000000000..db184d4e8 --- /dev/null +++ b/plugins/parsers/value/README.md @@ -0,0 +1,36 @@ +# Value + +The "value" data format translates single values into Telegraf metrics. This +is done by assigning a measurement name and setting a single field ("value") +as the parsed metric. + +### Configuration + +You **must** tell Telegraf what type of metric to collect by using the +`data_type` configuration option. Available options are: + +1. integer +2. float or long +3. string +4. boolean + +**Note:** It is also recommended that you set `name_override` to a measurement +name that makes sense for your metric, otherwise it will just be set to the +name of the plugin. + +```toml +[[inputs.exec]] + ## Commands array + commands = ["cat /proc/sys/kernel/random/entropy_avail"] + + ## override the default metric name of "exec" + name_override = "entropy_available" + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "value" + data_type = "integer" # required +``` + diff --git a/plugins/parsers/wavefront/README.md b/plugins/parsers/wavefront/README.md new file mode 100644 index 000000000..ab7c56eed --- /dev/null +++ b/plugins/parsers/wavefront/README.md @@ -0,0 +1,20 @@ +# Wavefront + +Wavefront Data Format is metrics are parsed directly into Telegraf metrics. +For more information about the Wavefront Data Format see +[here](https://docs.wavefront.com/wavefront_data_format.html). + +### Configuration + +There are no additional configuration options for Wavefront Data Format line-protocol. + +```toml +[[inputs.file]] + files = ["example"] + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "wavefront" +``` diff --git a/plugins/serializers/EXAMPLE_README.md b/plugins/serializers/EXAMPLE_README.md new file mode 100644 index 000000000..11965c07f --- /dev/null +++ b/plugins/serializers/EXAMPLE_README.md @@ -0,0 +1,46 @@ +# Example + +This description explains at a high level what the serializer does and +provides links to where additional information about the format can be found. + +### Configuration + +This section contains the sample configuration for the serializer. Since the +configuration for a serializer is not have a standalone plugin, use the `file` +or `http` outputs as the base config. + +```toml +[[inputs.file]] + files = ["stdout"] + + ## Describe variables using the standard SampleConfig style. + ## https://github.com/influxdata/telegraf/wiki/SampleConfig + example_option = "example_value" + + ## Data format to output. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "example" +``` + +#### example_option + +If an option requires a more expansive explanation than can be included inline +in the sample configuration, it may be described here. + +### Metrics + +The optional Metrics section contains details about how the serializer converts +Telegraf metrics into output. + +### Example + +The optional Example section can show an example conversion to the output +format using InfluxDB Line Protocol as the reference format. + +For line delimited text formats a diff may be appropriate: +```diff +- cpu,host=localhost,source=example.org value=42 ++ cpu|host=localhost|source=example.org|value=42 +``` diff --git a/plugins/serializers/graphite/README.md b/plugins/serializers/graphite/README.md new file mode 100644 index 000000000..031dee376 --- /dev/null +++ b/plugins/serializers/graphite/README.md @@ -0,0 +1,51 @@ +# Graphite + +The Graphite data format is translated from Telegraf Metrics using either the +template pattern or tag support method. You can select between the two +methods using the [`graphite_tag_support`](#graphite-tag-support) option. When set, the tag support +method is used, otherwise the [Template Pattern][templates]) is used. + +### Configuration + +```toml +[[outputs.file]] + ## Files to write to, "stdout" is a specially handled file. + files = ["stdout", "/tmp/metrics.out"] + + ## Data format to output. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md + data_format = "graphite" + + ## Prefix added to each graphite bucket + prefix = "telegraf" + ## Graphite template pattern + template = "host.tags.measurement.field" + + ## Support Graphite tags, recommended to enable when using Graphite 1.1 or later. + # graphite_tag_support = false +``` + +#### graphite_tag_support + +When the `graphite_tag_support` option is enabled, the template pattern is not +used. Instead, tags are encoded using +[Graphite tag support](http://graphite.readthedocs.io/en/latest/tags.html) +added in Graphite 1.1. The `metric_path` is a combination of the optional +`prefix` option, measurement name, and field name. + +The tag `name` is reserved by Graphite, any conflicting tags and will be encoded as `_name`. + +**Example Conversion**: +``` +cpu,cpu=cpu-total,dc=us-east-1,host=tars usage_idle=98.09,usage_user=0.89 1455320660004257758 +=> +cpu.usage_user;cpu=cpu-total;dc=us-east-1;host=tars 0.89 1455320690 +cpu.usage_idle;cpu=cpu-total;dc=us-east-1;host=tars 98.09 1455320690 +``` + +#### templates + +Consult the [Template Patterns](/docs/TEMPLATE_PATTERN.md) documentation for +details. diff --git a/plugins/serializers/influx/README.md b/plugins/serializers/influx/README.md new file mode 100644 index 000000000..d97fd42c8 --- /dev/null +++ b/plugins/serializers/influx/README.md @@ -0,0 +1,34 @@ +# Influx + +The `influx` data format outputs metrics into [InfluxDB Line Protocol][line +protocol]. This is the recommended format unless another format is required +for interoperability. + +### Configuration +```toml +[[outputs.file]] + ## Files to write to, "stdout" is a specially handled file. + files = ["stdout", "/tmp/metrics.out"] + + ## Data format to output. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md + data_format = "influx" + + ## Maximum line length in bytes. Useful only for debugging. + influx_max_line_bytes = 0 + + ## When true, fields will be output in ascending lexical order. Enabling + ## this option will result in decreased performance and is only recommended + ## when you need predictable ordering while debugging. + influx_sort_fields = false + + ## When true, Telegraf will output unsigned integers as unsigned values, + ## i.e.: `42u`. You will need a version of InfluxDB supporting unsigned + ## integer values. Enabling this option will result in field type errors if + ## existing data has been written. + influx_uint_support = false +``` + +[line protocol]: https://docs.influxdata.com/influxdb/latest/write_protocols/line_protocol_tutorial/ diff --git a/plugins/serializers/json/README.md b/plugins/serializers/json/README.md new file mode 100644 index 000000000..08bb9d4f7 --- /dev/null +++ b/plugins/serializers/json/README.md @@ -0,0 +1,77 @@ +# JSON + +The `json` output data format converts metrics into JSON documents. + +### Configuration + +```toml +[[outputs.file]] + ## Files to write to, "stdout" is a specially handled file. + files = ["stdout", "/tmp/metrics.out"] + + ## Data format to output. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md + data_format = "json" + + ## The resolution to use for the metric timestamp. Must be a duration string + ## such as "1ns", "1us", "1ms", "10ms", "1s". Durations are truncated to + ## the power of 10 less than the specified units. + json_timestamp_units = "1s" +``` + +### Examples: + +Standard form: +```json +{ + "fields": { + "field_1": 30, + "field_2": 4, + "field_N": 59, + "n_images": 660 + }, + "name": "docker", + "tags": { + "host": "raynor" + }, + "timestamp": 1458229140 +} +``` + +When an output plugin needs to emit multiple metrics at one time, it may use +the batch format. The use of batch format is determined by the plugin, +reference the documentation for the specific plugin. +```json +{ + "metrics": [ + { + "fields": { + "field_1": 30, + "field_2": 4, + "field_N": 59, + "n_images": 660 + }, + "name": "docker", + "tags": { + "host": "raynor" + }, + "timestamp": 1458229140 + }, + { + "fields": { + "field_1": 30, + "field_2": 4, + "field_N": 59, + "n_images": 660 + }, + "name": "docker", + "tags": { + "host": "raynor" + }, + "timestamp": 1458229140 + } + ] +} +``` diff --git a/plugins/serializers/splunkmetric/README.md b/plugins/serializers/splunkmetric/README.md index 02d69db66..e00286e57 100644 --- a/plugins/serializers/splunkmetric/README.md +++ b/plugins/serializers/splunkmetric/README.md @@ -79,7 +79,7 @@ The following aspects of the token can be overriden with tags: * source You can either use `[global_tags]` or using a more advanced configuration as documented [here](https://github.com/influxdata/telegraf/blob/master/docs/CONFIGURATION.md). - + Such as this example which overrides the index just on the cpu metric: ```toml [[inputs.cpu]] @@ -122,7 +122,7 @@ TIMESTAMP_FIELDS = time TIME_FORMAT = %s.%3N ``` -An example configuration of a file based output is: +An example configuration of a file based output is: ```toml # Send telegraf metrics to file(s) From 44c2435f6471859381650700a28eaf0382851ce9 Mon Sep 17 00:00:00 2001 From: Lee Jaeyong Date: Tue, 18 Sep 2018 08:20:50 +0900 Subject: [PATCH 0182/1815] Support tailing files created after startup in tail input (#4704) --- plugins/inputs/tail/tail.go | 24 ++++++++++++++++++------ 1 file changed, 18 insertions(+), 6 deletions(-) diff --git a/plugins/inputs/tail/tail.go b/plugins/inputs/tail/tail.go index 0de2a344c..ad3d713f3 100644 --- a/plugins/inputs/tail/tail.go +++ b/plugins/inputs/tail/tail.go @@ -25,7 +25,7 @@ type Tail struct { Pipe bool WatchMethod string - tailers []*tail.Tail + tailers map[string]*tail.Tail parser parsers.Parser wg sync.WaitGroup acc telegraf.Accumulator @@ -74,7 +74,10 @@ func (t *Tail) Description() string { } func (t *Tail) Gather(acc telegraf.Accumulator) error { - return nil + t.Lock() + defer t.Unlock() + + return t.tailNewFiles(true) } func (t *Tail) Start(acc telegraf.Accumulator) error { @@ -82,9 +85,14 @@ func (t *Tail) Start(acc telegraf.Accumulator) error { defer t.Unlock() t.acc = acc + t.tailers = make(map[string]*tail.Tail) + return t.tailNewFiles(t.FromBeginning) +} + +func (t *Tail) tailNewFiles(fromBeginning bool) error { var seek *tail.SeekInfo - if !t.Pipe && !t.FromBeginning { + if !t.Pipe && !fromBeginning { seek = &tail.SeekInfo{ Whence: 2, Offset: 0, @@ -103,6 +111,11 @@ func (t *Tail) Start(acc telegraf.Accumulator) error { t.acc.AddError(fmt.Errorf("E! Error Glob %s failed to compile, %s", filepath, err)) } for file, _ := range g.Match() { + if _, ok := t.tailers[file]; ok { + // we're already tailing this file + continue + } + tailer, err := tail.TailFile(file, tail.Config{ ReOpen: true, @@ -114,16 +127,15 @@ func (t *Tail) Start(acc telegraf.Accumulator) error { Logger: tail.DiscardingLogger, }) if err != nil { - acc.AddError(err) + t.acc.AddError(err) continue } // create a goroutine for each "tailer" t.wg.Add(1) go t.receiver(tailer) - t.tailers = append(t.tailers, tailer) + t.tailers[file] = tailer } } - return nil } From 77f669344b0a54496023dae6c08a06beecc8c39a Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 17 Sep 2018 18:00:12 -0700 Subject: [PATCH 0183/1815] Exclude Windows WMI tests from -short tests --- plugins/inputs/procstat/native_finder_windows_test.go | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/plugins/inputs/procstat/native_finder_windows_test.go b/plugins/inputs/procstat/native_finder_windows_test.go index 2f51a3f92..ef9c5ffb1 100644 --- a/plugins/inputs/procstat/native_finder_windows_test.go +++ b/plugins/inputs/procstat/native_finder_windows_test.go @@ -11,6 +11,9 @@ import ( ) func TestGather_RealPattern(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } pg, err := NewNativeFinder() require.NoError(t, err) pids, err := pg.Pattern(`procstat`) @@ -20,6 +23,9 @@ func TestGather_RealPattern(t *testing.T) { } func TestGather_RealFullPattern(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } pg, err := NewNativeFinder() require.NoError(t, err) pids, err := pg.FullPattern(`%procstat%`) @@ -29,6 +35,9 @@ func TestGather_RealFullPattern(t *testing.T) { } func TestGather_RealUser(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } user, err := user.Current() require.NoError(t, err) pg, err := NewNativeFinder() From f05fdde48ba103200a0dbb5cd5e60e865949bdf0 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 18 Sep 2018 09:07:28 -0700 Subject: [PATCH 0184/1815] Remove the startup authentication check from the cloudwatch output (#4695) --- plugins/outputs/cloudwatch/cloudwatch.go | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/plugins/outputs/cloudwatch/cloudwatch.go b/plugins/outputs/cloudwatch/cloudwatch.go index d3bd66303..b5dca364e 100644 --- a/plugins/outputs/cloudwatch/cloudwatch.go +++ b/plugins/outputs/cloudwatch/cloudwatch.go @@ -9,7 +9,6 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/cloudwatch" - "github.com/aws/aws-sdk-go/service/sts" "github.com/influxdata/telegraf" internalaws "github.com/influxdata/telegraf/internal/config/aws" @@ -209,20 +208,7 @@ func (c *CloudWatch) Connect() error { EndpointURL: c.EndpointURL, } configProvider := credentialConfig.Credentials() - - stsService := sts.New(configProvider) - - params := &sts.GetCallerIdentityInput{} - - _, err := stsService.GetCallerIdentity(params) - - if err != nil { - log.Printf("E! cloudwatch: Cannot use credentials to connect to AWS : %+v \n", err.Error()) - return err - } - c.svc = cloudwatch.New(configProvider) - return nil } From d3ad591481e29787d9bcc9c86661d9d633ad40a5 Mon Sep 17 00:00:00 2001 From: Greg Date: Tue, 18 Sep 2018 10:08:13 -0600 Subject: [PATCH 0185/1815] Fix null value crash in postgresql_extensible input (#4689) --- .../postgresql_extensible.go | 21 +++++----- .../postgresql_extensible_test.go | 39 +++++++++++++++++++ 2 files changed, 48 insertions(+), 12 deletions(-) diff --git a/plugins/inputs/postgresql_extensible/postgresql_extensible.go b/plugins/inputs/postgresql_extensible/postgresql_extensible.go index a04382888..a247b603a 100644 --- a/plugins/inputs/postgresql_extensible/postgresql_extensible.go +++ b/plugins/inputs/postgresql_extensible/postgresql_extensible.go @@ -19,14 +19,8 @@ type Postgresql struct { postgresql.Service Databases []string AdditionalTags []string - Query []struct { - Sqlquery string - Version int - Withdbname bool - Tagvalue string - Measurement string - } - Debug bool + Query query + Debug bool } type query []struct { @@ -127,7 +121,6 @@ func (p *Postgresql) Gather(acc telegraf.Accumulator) error { ) // Retreiving the database version - query = `select substring(setting from 1 for 3) as version from pg_settings where name='server_version_num'` if err = p.DB.QueryRow(query).Scan(&db_version); err != nil { db_version = 0 @@ -135,7 +128,6 @@ func (p *Postgresql) Gather(acc telegraf.Accumulator) error { // We loop in order to process each query // Query is not run if Database version does not match the query version. - for i := range p.Query { sql_query = p.Query[i].Sqlquery tag_value = p.Query[i].Tagvalue @@ -221,9 +213,14 @@ func (p *Postgresql) accRow(meas_name string, row scanner, acc telegraf.Accumula return err } - if columnMap["datname"] != nil { + if c, ok := columnMap["datname"]; ok && *c != nil { // extract the database name from the column map - dbname.WriteString((*columnMap["datname"]).(string)) + switch datname := (*c).(type) { + case string: + dbname.WriteString(datname) + default: + dbname.WriteString("postgres") + } } else { dbname.WriteString("postgres") } diff --git a/plugins/inputs/postgresql_extensible/postgresql_extensible_test.go b/plugins/inputs/postgresql_extensible/postgresql_extensible_test.go index 0f9358da6..1ed62a1cd 100644 --- a/plugins/inputs/postgresql_extensible/postgresql_extensible_test.go +++ b/plugins/inputs/postgresql_extensible/postgresql_extensible_test.go @@ -1,6 +1,7 @@ package postgresql_extensible import ( + "errors" "fmt" "testing" @@ -223,3 +224,41 @@ func TestPostgresqlIgnoresUnwantedColumns(t *testing.T) { assert.False(t, acc.HasMeasurement(col)) } } + +func TestAccRow(t *testing.T) { + p := Postgresql{} + var acc testutil.Accumulator + columns := []string{"datname", "cat"} + + testRows := []fakeRow{ + {fields: []interface{}{1, "gato"}}, + {fields: []interface{}{nil, "gato"}}, + {fields: []interface{}{"name", "gato"}}, + } + for i := range testRows { + err := p.accRow("pgTEST", testRows[i], &acc, columns) + if err != nil { + t.Fatalf("Scan failed: %s", err) + } + } +} + +type fakeRow struct { + fields []interface{} +} + +func (f fakeRow) Scan(dest ...interface{}) error { + if len(f.fields) != len(dest) { + return errors.New("Nada matchy buddy") + } + + for i, d := range dest { + switch d.(type) { + case (*interface{}): + *d.(*interface{}) = f.fields[i] + default: + return fmt.Errorf("Bad type %T", d) + } + } + return nil +} From 1d763434224e149253b7d2a3d256b8de02c99813 Mon Sep 17 00:00:00 2001 From: Greg Date: Tue, 18 Sep 2018 10:08:46 -0600 Subject: [PATCH 0186/1815] Enhance performance data for nagios parser (#4691) --- plugins/parsers/nagios/parser.go | 167 +++++++++++++++++--------- plugins/parsers/nagios/parser_test.go | 119 ++++++++++++++---- 2 files changed, 210 insertions(+), 76 deletions(-) diff --git a/plugins/parsers/nagios/parser.go b/plugins/parsers/nagios/parser.go index 4d5f7f008..858f5082c 100644 --- a/plugins/parsers/nagios/parser.go +++ b/plugins/parsers/nagios/parser.go @@ -1,6 +1,8 @@ package nagios import ( + "errors" + "log" "regexp" "strconv" "strings" @@ -17,8 +19,10 @@ type NagiosParser struct { // Got from Alignak // https://github.com/Alignak-monitoring/alignak/blob/develop/alignak/misc/perfdata.py -var perfSplitRegExp, _ = regexp.Compile(`([^=]+=\S+)`) -var nagiosRegExp, _ = regexp.Compile(`^([^=]+)=([\d\.\-\+eE]+)([\w\/%]*);?([\d\.\-\+eE:~@]+)?;?([\d\.\-\+eE:~@]+)?;?([\d\.\-\+eE]+)?;?([\d\.\-\+eE]+)?;?\s*`) +var ( + perfSplitRegExp = regexp.MustCompile(`([^=]+=\S+)`) + nagiosRegExp = regexp.MustCompile(`^([^=]+)=([\d\.\-\+eE]+)([\w\/%]*);?([\d\.\-\+eE:~@]+)?;?([\d\.\-\+eE:~@]+)?;?([\d\.\-\+eE]+)?;?([\d\.\-\+eE]+)?;?\s*`) +) func (p *NagiosParser) ParseLine(line string) (telegraf.Metric, error) { metrics, err := p.Parse([]byte(line)) @@ -29,88 +33,99 @@ func (p *NagiosParser) SetDefaultTags(tags map[string]string) { p.DefaultTags = tags } -//> rta,host=absol,unit=ms critical=6000,min=0,value=0.332,warning=4000 1456374625003628099 -//> pl,host=absol,unit=% critical=90,min=0,value=0,warning=80 1456374625003693967 - func (p *NagiosParser) Parse(buf []byte) ([]telegraf.Metric, error) { metrics := make([]telegraf.Metric, 0) - // Convert to string - out := string(buf) - // Prepare output for splitting - // Delete escaped pipes - out = strings.Replace(out, `\|`, "___PROTECT_PIPE___", -1) - // Split lines and get the first one - lines := strings.Split(out, "\n") - // Split output and perfdatas - data_splitted := strings.Split(lines[0], "|") - if len(data_splitted) <= 1 { - // No pipe == no perf data - return nil, nil + lines := strings.Split(strings.TrimSpace(string(buf)), "\n") + + for _, line := range lines { + data_splitted := strings.Split(line, "|") + + if len(data_splitted) != 2 { + // got human readable output only or bad line + continue + } + m, err := parsePerfData(data_splitted[1]) + if err != nil { + log.Printf("E! [parser.nagios] failed to parse performance data: %s\n", err.Error()) + continue + } + metrics = append(metrics, m...) } - // Get perfdatas - perfdatas := data_splitted[1] - // Add escaped pipes - perfdatas = strings.Replace(perfdatas, "___PROTECT_PIPE___", `\|`, -1) - // Split perfs - unParsedPerfs := perfSplitRegExp.FindAllSubmatch([]byte(perfdatas), -1) - // Iterate on all perfs - for _, unParsedPerfs := range unParsedPerfs { - // Get metrics - // Trim perf - trimedPerf := strings.Trim(string(unParsedPerfs[0]), " ") - // Parse perf - perf := nagiosRegExp.FindAllSubmatch([]byte(trimedPerf), -1) - // Bad string - if len(perf) == 0 { + return metrics, nil +} + +func parsePerfData(perfdatas string) ([]telegraf.Metric, error) { + metrics := make([]telegraf.Metric, 0) + + for _, unParsedPerf := range perfSplitRegExp.FindAllString(perfdatas, -1) { + trimedPerf := strings.TrimSpace(unParsedPerf) + perf := nagiosRegExp.FindStringSubmatch(trimedPerf) + + // verify at least `'label'=value[UOM];` existed + if len(perf) < 3 { continue } - if len(perf[0]) <= 2 { + if perf[1] == "" || perf[2] == "" { continue } - if perf[0][1] == nil || perf[0][2] == nil { - continue - } - fieldName := string(perf[0][1]) - tags := make(map[string]string) - if perf[0][3] != nil { - str := string(perf[0][3]) + + fieldName := strings.Trim(perf[1], "'") + tags := map[string]string{"perfdata": fieldName} + if perf[3] != "" { + str := string(perf[3]) if str != "" { tags["unit"] = str } } + fields := make(map[string]interface{}) - f, err := strconv.ParseFloat(string(perf[0][2]), 64) + if perf[2] == "U" { + return nil, errors.New("Value undetermined") + } + + f, err := strconv.ParseFloat(string(perf[2]), 64) if err == nil { fields["value"] = f } - // TODO should we set empty field - // if metric if there is no data ? - if perf[0][4] != nil { - f, err := strconv.ParseFloat(string(perf[0][4]), 64) + if perf[4] != "" { + low, high, err := parseThreshold(perf[4]) if err == nil { - fields["warning"] = f + if strings.Contains(perf[4], "@") { + fields["warning_le"] = low + fields["warning_ge"] = high + } else { + fields["warning_lt"] = low + fields["warning_gt"] = high + } } } - if perf[0][5] != nil { - f, err := strconv.ParseFloat(string(perf[0][5]), 64) + if perf[5] != "" { + low, high, err := parseThreshold(perf[5]) if err == nil { - fields["critical"] = f + if strings.Contains(perf[5], "@") { + fields["critical_le"] = low + fields["critical_ge"] = high + } else { + fields["critical_lt"] = low + fields["critical_gt"] = high + } } } - if perf[0][6] != nil { - f, err := strconv.ParseFloat(string(perf[0][6]), 64) + if perf[6] != "" { + f, err := strconv.ParseFloat(perf[6], 64) if err == nil { fields["min"] = f } } - if perf[0][7] != nil { - f, err := strconv.ParseFloat(string(perf[0][7]), 64) + if perf[7] != "" { + f, err := strconv.ParseFloat(perf[7], 64) if err == nil { fields["max"] = f } } + // Create metric - metric, err := metric.New(fieldName, tags, fields, time.Now().UTC()) + metric, err := metric.New("nagios", tags, fields, time.Now().UTC()) if err != nil { return nil, err } @@ -120,3 +135,47 @@ func (p *NagiosParser) Parse(buf []byte) ([]telegraf.Metric, error) { return metrics, nil } + +// from math +const ( + MaxFloat64 = 1.797693134862315708145274237317043567981e+308 // 2**1023 * (2**53 - 1) / 2**52 + MinFloat64 = 4.940656458412465441765687928682213723651e-324 // 1 / 2**(1023 - 1 + 52) +) + +var ErrBadThresholdFormat = errors.New("Bad threshold format") + +// Handles all cases from https://nagios-plugins.org/doc/guidelines.html#THRESHOLDFORMAT +func parseThreshold(threshold string) (min float64, max float64, err error) { + thresh := strings.Split(threshold, ":") + switch len(thresh) { + case 1: + max, err = strconv.ParseFloat(string(thresh[0]), 64) + if err != nil { + return 0, 0, ErrBadThresholdFormat + } + + return 0, max, nil + case 2: + if thresh[0] == "~" { + min = MinFloat64 + } else { + min, err = strconv.ParseFloat(string(thresh[0]), 64) + if err != nil { + min = 0 + } + } + + if thresh[1] == "" { + max = MaxFloat64 + } else { + max, err = strconv.ParseFloat(string(thresh[1]), 64) + if err != nil { + return 0, 0, ErrBadThresholdFormat + } + } + default: + return 0, 0, ErrBadThresholdFormat + } + + return +} diff --git a/plugins/parsers/nagios/parser_test.go b/plugins/parsers/nagios/parser_test.go index b1e3d6fdd..a4da30030 100644 --- a/plugins/parsers/nagios/parser_test.go +++ b/plugins/parsers/nagios/parser_test.go @@ -13,6 +13,7 @@ with three lines ` const validOutput2 = "TCP OK - 0.008 second response time on port 80|time=0.008457s;;;0.000000;10.000000" const validOutput3 = "TCP OK - 0.008 second response time on port 80|time=0.008457" +const validOutput4 = "OK: Load average: 0.00, 0.01, 0.05 | 'load1'=0.00;~:4;@0:6;0; 'load5'=0.01;3;0:5;0; 'load15'=0.05;0:2;0:4;0;" const invalidOutput3 = "PING OK - Packet loss = 0%, RTA = 0.30 ms" const invalidOutput4 = "PING OK - Packet loss = 0%, RTA = 0.30 ms| =3;;;; dgasdg =;;;; sff=;;;;" @@ -24,50 +25,71 @@ func TestParseValidOutput(t *testing.T) { // Output1 metrics, err := parser.Parse([]byte(validOutput1)) require.NoError(t, err) - assert.Len(t, metrics, 2) + require.Len(t, metrics, 2) // rta - assert.Equal(t, "rta", metrics[0].Name()) + assert.Equal(t, "rta", metrics[0].Tags()["perfdata"]) assert.Equal(t, map[string]interface{}{ - "value": float64(0.298), - "warning": float64(4000), - "critical": float64(6000), - "min": float64(0), + "value": float64(0.298), + "warning_lt": float64(0), + "warning_gt": float64(4000), + "critical_lt": float64(0), + "critical_gt": float64(6000), + "min": float64(0), }, metrics[0].Fields()) - assert.Equal(t, map[string]string{"unit": "ms"}, metrics[0].Tags()) + assert.Equal(t, map[string]string{"unit": "ms", "perfdata": "rta"}, metrics[0].Tags()) // pl - assert.Equal(t, "pl", metrics[1].Name()) + assert.Equal(t, "pl", metrics[1].Tags()["perfdata"]) assert.Equal(t, map[string]interface{}{ - "value": float64(0), - "warning": float64(80), - "critical": float64(90), - "min": float64(0), - "max": float64(100), + "value": float64(0), + "warning_lt": float64(0), + "warning_gt": float64(80), + "critical_lt": float64(0), + "critical_gt": float64(90), + "min": float64(0), + "max": float64(100), }, metrics[1].Fields()) - assert.Equal(t, map[string]string{"unit": "%"}, metrics[1].Tags()) + assert.Equal(t, map[string]string{"unit": "%", "perfdata": "pl"}, metrics[1].Tags()) // Output2 metrics, err = parser.Parse([]byte(validOutput2)) require.NoError(t, err) - assert.Len(t, metrics, 1) + require.Len(t, metrics, 1) // time - assert.Equal(t, "time", metrics[0].Name()) + assert.Equal(t, "time", metrics[0].Tags()["perfdata"]) assert.Equal(t, map[string]interface{}{ "value": float64(0.008457), "min": float64(0), "max": float64(10), }, metrics[0].Fields()) - assert.Equal(t, map[string]string{"unit": "s"}, metrics[0].Tags()) + assert.Equal(t, map[string]string{"unit": "s", "perfdata": "time"}, metrics[0].Tags()) // Output3 metrics, err = parser.Parse([]byte(validOutput3)) require.NoError(t, err) - assert.Len(t, metrics, 1) + require.Len(t, metrics, 1) // time - assert.Equal(t, "time", metrics[0].Name()) + assert.Equal(t, "time", metrics[0].Tags()["perfdata"]) assert.Equal(t, map[string]interface{}{ "value": float64(0.008457), }, metrics[0].Fields()) - assert.Equal(t, map[string]string{}, metrics[0].Tags()) + assert.Equal(t, map[string]string{"perfdata": "time"}, metrics[0].Tags()) + + // Output4 + metrics, err = parser.Parse([]byte(validOutput4)) + require.NoError(t, err) + require.Len(t, metrics, 3) + // load + // const validOutput4 = "OK: Load average: 0.00, 0.01, 0.05 | 'load1'=0.00;0:4;0:6;0; 'load5'=0.01;0:3;0:5;0; 'load15'=0.05;0:2;0:4;0;" + assert.Equal(t, map[string]interface{}{ + "value": float64(0.00), + "warning_lt": MinFloat64, + "warning_gt": float64(4), + "critical_le": float64(0), + "critical_ge": float64(6), + "min": float64(0), + }, metrics[0].Fields()) + + assert.Equal(t, map[string]string{"perfdata": "load1"}, metrics[0].Tags()) } func TestParseInvalidOutput(t *testing.T) { @@ -78,11 +100,64 @@ func TestParseInvalidOutput(t *testing.T) { // invalidOutput3 metrics, err := parser.Parse([]byte(invalidOutput3)) require.NoError(t, err) - assert.Len(t, metrics, 0) + require.Len(t, metrics, 0) // invalidOutput4 metrics, err = parser.Parse([]byte(invalidOutput4)) require.NoError(t, err) - assert.Len(t, metrics, 0) + require.Len(t, metrics, 0) } + +func TestParseThreshold(t *testing.T) { + tests := []struct { + input string + eMin float64 + eMax float64 + eErr error + }{ + { + input: "10", + eMin: 0, + eMax: 10, + eErr: nil, + }, + { + input: "10:", + eMin: 10, + eMax: MaxFloat64, + eErr: nil, + }, + { + input: "~:10", + eMin: MinFloat64, + eMax: 10, + eErr: nil, + }, + { + input: "10:20", + eMin: 10, + eMax: 20, + eErr: nil, + }, + { + input: "10:20", + eMin: 10, + eMax: 20, + eErr: nil, + }, + { + input: "10:20:30", + eMin: 0, + eMax: 0, + eErr: ErrBadThresholdFormat, + }, + } + + for i := range tests { + min, max, err := parseThreshold(tests[i].input) + require.Equal(t, tests[i].eMin, min) + require.Equal(t, tests[i].eMax, max) + require.Equal(t, tests[i].eErr, err) + } +} From b5299f4cc457e4984c54428312a9e93060e6fcaf Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 18 Sep 2018 09:23:45 -0700 Subject: [PATCH 0187/1815] Fix cleanup of csv parser options, use per file parser (#4712) --- internal/config/config.go | 42 ++++++++++++++++------ plugins/inputs/logparser/logparser.go | 6 ++-- plugins/inputs/tail/tail.go | 51 +++++++++++++++++++++------ plugins/inputs/tail/tail_test.go | 12 +++---- plugins/parsers/csv/parser.go | 7 +++- plugins/parsers/csv/parser_test.go | 45 +++++++++++++++++++++++ plugins/parsers/registry.go | 50 +++++++++++++++----------- 7 files changed, 160 insertions(+), 53 deletions(-) diff --git a/internal/config/config.go b/internal/config/config.go index 2208268d2..c613244fd 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -855,6 +855,17 @@ func (c *Config) addInput(name string, table *ast.Table) error { t.SetParser(parser) } + switch t := input.(type) { + case parsers.ParserFuncInput: + config, err := getParserConfig(name, table) + if err != nil { + return err + } + t.SetParserFunc(func() (parsers.Parser, error) { + return parsers.NewParser(config) + }) + } + pluginConfig, err := buildInput(name, table) if err != nil { return err @@ -1212,6 +1223,14 @@ func buildInput(name string, tbl *ast.Table) (*models.InputConfig, error) { // a parsers.Parser object, and creates it, which can then be added onto // an Input object. func buildParser(name string, tbl *ast.Table) (parsers.Parser, error) { + config, err := getParserConfig(name, tbl) + if err != nil { + return nil, err + } + return parsers.NewParser(config) +} + +func getParserConfig(name string, tbl *ast.Table) (*parsers.Config, error) { c := &parsers.Config{} if node, ok := tbl.Fields["data_format"]; ok { @@ -1510,12 +1529,12 @@ func buildParser(name string, tbl *ast.Table) (parsers.Parser, error) { if node, ok := tbl.Fields["csv_header_row_count"]; ok { if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - iVal, err := strconv.Atoi(str.Value) - c.CSVHeaderRowCount = iVal + if integer, ok := kv.Value.(*ast.Integer); ok { + v, err := integer.Int() if err != nil { - return nil, fmt.Errorf("E! parsing to int: %v", err) + return nil, err } + c.CSVHeaderRowCount = int(v) } } } @@ -1583,16 +1602,19 @@ func buildParser(name string, tbl *ast.Table) (parsers.Parser, error) { delete(tbl.Fields, "grok_custom_patterns") delete(tbl.Fields, "grok_custom_pattern_files") delete(tbl.Fields, "grok_timezone") - delete(tbl.Fields, "csv_data_columns") - delete(tbl.Fields, "csv_tag_columns") + delete(tbl.Fields, "csv_column_names") + delete(tbl.Fields, "csv_comment") + delete(tbl.Fields, "csv_delimiter") delete(tbl.Fields, "csv_field_columns") - delete(tbl.Fields, "csv_name_column") + delete(tbl.Fields, "csv_header_row_count") + delete(tbl.Fields, "csv_measurement_column") + delete(tbl.Fields, "csv_skip_columns") + delete(tbl.Fields, "csv_skip_rows") + delete(tbl.Fields, "csv_tag_columns") delete(tbl.Fields, "csv_timestamp_column") delete(tbl.Fields, "csv_timestamp_format") - delete(tbl.Fields, "csv_delimiter") - delete(tbl.Fields, "csv_header") - return parsers.NewParser(c) + return c, nil } // buildSerializer grabs the necessary entries from the ast.Table for creating diff --git a/plugins/inputs/logparser/logparser.go b/plugins/inputs/logparser/logparser.go index bdfa4bacc..d52df3aa9 100644 --- a/plugins/inputs/logparser/logparser.go +++ b/plugins/inputs/logparser/logparser.go @@ -191,15 +191,13 @@ func (l *LogParserPlugin) tailNewfiles(fromBeginning bool) error { Poll: poll, Logger: tail.DiscardingLogger, }) - - //add message saying a new tailer was added for the file - log.Printf("D! tail added for file: %v", file) - if err != nil { l.acc.AddError(err) continue } + log.Printf("D! [inputs.logparser] tail added for file: %v", file) + // create a goroutine for each "tailer" l.wg.Add(1) go l.receiver(tailer) diff --git a/plugins/inputs/tail/tail.go b/plugins/inputs/tail/tail.go index ad3d713f3..cdea675e0 100644 --- a/plugins/inputs/tail/tail.go +++ b/plugins/inputs/tail/tail.go @@ -4,6 +4,7 @@ package tail import ( "fmt" + "log" "strings" "sync" @@ -25,10 +26,10 @@ type Tail struct { Pipe bool WatchMethod string - tailers map[string]*tail.Tail - parser parsers.Parser - wg sync.WaitGroup - acc telegraf.Accumulator + tailers map[string]*tail.Tail + parserFunc parsers.ParserFunc + wg sync.WaitGroup + acc telegraf.Accumulator sync.Mutex } @@ -130,10 +131,18 @@ func (t *Tail) tailNewFiles(fromBeginning bool) error { t.acc.AddError(err) continue } + + log.Printf("D! [inputs.tail] tail added for file: %v", file) + + parser, err := t.parserFunc() + if err != nil { + t.acc.AddError(fmt.Errorf("error creating parser: %v", err)) + } + // create a goroutine for each "tailer" t.wg.Add(1) - go t.receiver(tailer) - t.tailers[file] = tailer + go t.receiver(parser, tailer) + t.tailers[tailer.Filename] = tailer } } return nil @@ -141,9 +150,11 @@ func (t *Tail) tailNewFiles(fromBeginning bool) error { // this is launched as a goroutine to continuously watch a tailed logfile // for changes, parse any incoming msgs, and add to the accumulator. -func (t *Tail) receiver(tailer *tail.Tail) { +func (t *Tail) receiver(parser parsers.Parser, tailer *tail.Tail) { defer t.wg.Done() + var firstLine = true + var metrics []telegraf.Metric var m telegraf.Metric var err error var line *tail.Line @@ -156,7 +167,21 @@ func (t *Tail) receiver(tailer *tail.Tail) { // Fix up files with Windows line endings. text := strings.TrimRight(line.Text, "\r") - m, err = t.parser.ParseLine(text) + if firstLine { + metrics, err = parser.Parse([]byte(text)) + if err == nil { + if len(metrics) == 0 { + firstLine = false + continue + } else { + m = metrics[0] + } + } + firstLine = false + } else { + m, err = parser.ParseLine(text) + } + if err == nil { if m != nil { tags := m.Tags() @@ -168,6 +193,9 @@ func (t *Tail) receiver(tailer *tail.Tail) { tailer.Filename, line.Text, err)) } } + + log.Printf("D! [inputs.tail] tail removed for file: %v", tailer.Filename) + if err := tailer.Err(); err != nil { t.acc.AddError(fmt.Errorf("E! Error tailing file %s, Error: %s\n", tailer.Filename, err)) @@ -183,13 +211,16 @@ func (t *Tail) Stop() { if err != nil { t.acc.AddError(fmt.Errorf("E! Error stopping tail on file %s\n", tailer.Filename)) } + } + + for _, tailer := range t.tailers { tailer.Cleanup() } t.wg.Wait() } -func (t *Tail) SetParser(parser parsers.Parser) { - t.parser = parser +func (t *Tail) SetParserFunc(fn parsers.ParserFunc) { + t.parserFunc = fn } func init() { diff --git a/plugins/inputs/tail/tail_test.go b/plugins/inputs/tail/tail_test.go index 23df0d0b8..06db2c172 100644 --- a/plugins/inputs/tail/tail_test.go +++ b/plugins/inputs/tail/tail_test.go @@ -27,8 +27,7 @@ func TestTailFromBeginning(t *testing.T) { tt := NewTail() tt.FromBeginning = true tt.Files = []string{tmpfile.Name()} - p, _ := parsers.NewInfluxParser() - tt.SetParser(p) + tt.SetParserFunc(parsers.NewInfluxParser) defer tt.Stop() defer tmpfile.Close() @@ -60,8 +59,7 @@ func TestTailFromEnd(t *testing.T) { tt := NewTail() tt.Files = []string{tmpfile.Name()} - p, _ := parsers.NewInfluxParser() - tt.SetParser(p) + tt.SetParserFunc(parsers.NewInfluxParser) defer tt.Stop() defer tmpfile.Close() @@ -98,8 +96,7 @@ func TestTailBadLine(t *testing.T) { tt := NewTail() tt.FromBeginning = true tt.Files = []string{tmpfile.Name()} - p, _ := parsers.NewInfluxParser() - tt.SetParser(p) + tt.SetParserFunc(parsers.NewInfluxParser) defer tt.Stop() defer tmpfile.Close() @@ -124,8 +121,7 @@ func TestTailDosLineendings(t *testing.T) { tt := NewTail() tt.FromBeginning = true tt.Files = []string{tmpfile.Name()} - p, _ := parsers.NewInfluxParser() - tt.SetParser(p) + tt.SetParserFunc(parsers.NewInfluxParser) defer tt.Stop() defer tmpfile.Close() diff --git a/plugins/parsers/csv/parser.go b/plugins/parsers/csv/parser.go index 9193fbf5b..8e0b8b47e 100644 --- a/plugins/parsers/csv/parser.go +++ b/plugins/parsers/csv/parser.go @@ -26,6 +26,11 @@ type Parser struct { TimestampColumn string TimestampFormat string DefaultTags map[string]string + TimeFunc func() time.Time +} + +func (p *Parser) SetTimeFunc(fn metric.TimeFunc) { + p.TimeFunc = fn } func (p *Parser) compile(r *bytes.Reader) (*csv.Reader, error) { @@ -167,7 +172,7 @@ outer: measurementName = fmt.Sprintf("%v", recordFields[p.MeasurementColumn]) } - metricTime := time.Now() + metricTime := p.TimeFunc() if p.TimestampColumn != "" { if recordFields[p.TimestampColumn] == nil { return nil, fmt.Errorf("timestamp column: %v could not be found", p.TimestampColumn) diff --git a/plugins/parsers/csv/parser_test.go b/plugins/parsers/csv/parser_test.go index b488a1f16..e3668d3ac 100644 --- a/plugins/parsers/csv/parser_test.go +++ b/plugins/parsers/csv/parser_test.go @@ -6,13 +6,19 @@ import ( "time" "github.com/influxdata/telegraf/metric" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) +var DefaultTime = func() time.Time { + return time.Unix(3600, 0) +} + func TestBasicCSV(t *testing.T) { p := Parser{ ColumnNames: []string{"first", "second", "third"}, TagColumns: []string{"third"}, + TimeFunc: DefaultTime, } _, err := p.ParseLine("1.4,true,hi") @@ -23,6 +29,7 @@ func TestHeaderConcatenationCSV(t *testing.T) { p := Parser{ HeaderRowCount: 2, MeasurementColumn: "3", + TimeFunc: DefaultTime, } testCSV := `first,second 1,2,3 @@ -38,6 +45,7 @@ func TestHeaderOverride(t *testing.T) { HeaderRowCount: 1, ColumnNames: []string{"first", "second", "third"}, MeasurementColumn: "third", + TimeFunc: DefaultTime, } testCSV := `line1,line2,line3 3.4,70,test_name` @@ -53,6 +61,7 @@ func TestTimestamp(t *testing.T) { MeasurementColumn: "third", TimestampColumn: "first", TimestampFormat: "02/01/06 03:04:05 PM", + TimeFunc: DefaultTime, } testCSV := `line1,line2,line3 23/05/09 04:05:06 PM,70,test_name @@ -70,6 +79,7 @@ func TestTimestampError(t *testing.T) { ColumnNames: []string{"first", "second", "third"}, MeasurementColumn: "third", TimestampColumn: "first", + TimeFunc: DefaultTime, } testCSV := `line1,line2,line3 23/05/09 04:05:06 PM,70,test_name @@ -83,6 +93,7 @@ func TestQuotedCharacter(t *testing.T) { HeaderRowCount: 1, ColumnNames: []string{"first", "second", "third"}, MeasurementColumn: "third", + TimeFunc: DefaultTime, } testCSV := `line1,line2,line3 @@ -98,6 +109,7 @@ func TestDelimiter(t *testing.T) { Delimiter: "%", ColumnNames: []string{"first", "second", "third"}, MeasurementColumn: "third", + TimeFunc: DefaultTime, } testCSV := `line1%line2%line3 @@ -113,6 +125,7 @@ func TestValueConversion(t *testing.T) { Delimiter: ",", ColumnNames: []string{"first", "second", "third", "fourth"}, MetricName: "test_value", + TimeFunc: DefaultTime, } testCSV := `3.3,4,true,hello` @@ -142,6 +155,7 @@ func TestSkipComment(t *testing.T) { Comment: "#", ColumnNames: []string{"first", "second", "third", "fourth"}, MetricName: "test_value", + TimeFunc: DefaultTime, } testCSV := `#3.3,4,true,hello 4,9.9,true,name_this` @@ -164,6 +178,7 @@ func TestTrimSpace(t *testing.T) { TrimSpace: true, ColumnNames: []string{"first", "second", "third", "fourth"}, MetricName: "test_value", + TimeFunc: DefaultTime, } testCSV := ` 3.3, 4, true,hello` @@ -185,6 +200,7 @@ func TestSkipRows(t *testing.T) { SkipRows: 1, TagColumns: []string{"line1"}, MeasurementColumn: "line3", + TimeFunc: DefaultTime, } testCSV := `garbage nonsense line1,line2,line3 @@ -203,6 +219,7 @@ func TestSkipColumns(t *testing.T) { p := Parser{ SkipColumns: 1, ColumnNames: []string{"line1", "line2"}, + TimeFunc: DefaultTime, } testCSV := `hello,80,test_name` @@ -219,6 +236,7 @@ func TestSkipColumnsWithHeader(t *testing.T) { p := Parser{ SkipColumns: 1, HeaderRowCount: 2, + TimeFunc: DefaultTime, } testCSV := `col,col,col 1,2,3 @@ -229,3 +247,30 @@ func TestSkipColumnsWithHeader(t *testing.T) { require.NoError(t, err) require.Equal(t, map[string]interface{}{"col2": int64(80), "col3": "test_name"}, metrics[0].Fields()) } + +func TestParseStream(t *testing.T) { + p := Parser{ + MetricName: "csv", + HeaderRowCount: 1, + TimeFunc: DefaultTime, + } + + csvHeader := "a,b,c" + csvBody := "1,2,3" + + metrics, err := p.Parse([]byte(csvHeader)) + require.NoError(t, err) + require.Len(t, metrics, 0) + metric, err := p.ParseLine(csvBody) + testutil.RequireMetricEqual(t, + testutil.MustMetric( + "csv", + map[string]string{}, + map[string]interface{}{ + "a": int64(1), + "b": int64(2), + "c": int64(3), + }, + DefaultTime(), + ), metric) +} diff --git a/plugins/parsers/registry.go b/plugins/parsers/registry.go index 32027e417..28ff30261 100644 --- a/plugins/parsers/registry.go +++ b/plugins/parsers/registry.go @@ -2,6 +2,7 @@ package parsers import ( "fmt" + "time" "github.com/influxdata/telegraf" @@ -18,6 +19,8 @@ import ( "github.com/influxdata/telegraf/plugins/parsers/wavefront" ) +type ParserFunc func() (Parser, error) + // ParserInput is an interface for input plugins that are able to parse // arbitrary data formats. type ParserInput interface { @@ -25,6 +28,13 @@ type ParserInput interface { SetParser(parser Parser) } +// ParserFuncInput is an interface for input plugins that are able to parse +// arbitrary data formats. +type ParserFuncInput interface { + // GetParser returns a new parser. + SetParserFunc(fn ParserFunc) +} + // Parser is an interface defining functions that a parser plugin must satisfy. type Parser interface { // Parse takes a byte buffer separated by newlines @@ -116,17 +126,17 @@ type Config struct { GrokTimeZone string //csv configuration - CSVDelimiter string - CSVComment string - CSVTrimSpace bool - CSVColumnNames []string - CSVTagColumns []string - CSVMeasurementColumn string - CSVTimestampColumn string - CSVTimestampFormat string - CSVHeaderRowCount int - CSVSkipRows int - CSVSkipColumns int + CSVColumnNames []string `toml:"csv_column_names"` + CSVComment string `toml:"csv_comment"` + CSVDelimiter string `toml:"csv_delimiter"` + CSVHeaderRowCount int `toml:"csv_header_row_count"` + CSVMeasurementColumn string `toml:"csv_measurement_column"` + CSVSkipColumns int `toml:"csv_skip_columns"` + CSVSkipRows int `toml:"csv_skip_rows"` + CSVTagColumns []string `toml:"csv_tag_columns"` + CSVTimestampColumn string `toml:"csv_timestamp_column"` + CSVTimestampFormat string `toml:"csv_timestamp_format"` + CSVTrimSpace bool `toml:"csv_trim_space"` } // NewParser returns a Parser interface based on the given config. @@ -199,28 +209,27 @@ func NewParser(config *Config) (Parser, error) { } func newCSVParser(metricName string, - header int, + headerRowCount int, skipRows int, skipColumns int, delimiter string, comment string, trimSpace bool, - dataColumns []string, + columnNames []string, tagColumns []string, nameColumn string, timestampColumn string, timestampFormat string, defaultTags map[string]string) (Parser, error) { - if header == 0 && len(dataColumns) == 0 { - // if there is no header and no DataColumns, that's an error - return nil, fmt.Errorf("there must be a header if `csv_data_columns` is not specified") + if headerRowCount == 0 && len(columnNames) == 0 { + return nil, fmt.Errorf("there must be a header if `csv_column_names` is not specified") } if delimiter != "" { runeStr := []rune(delimiter) if len(runeStr) > 1 { - return nil, fmt.Errorf("delimiter must be a single character, got: %s", delimiter) + return nil, fmt.Errorf("csv_delimiter must be a single character, got: %s", delimiter) } delimiter = fmt.Sprintf("%v", runeStr[0]) } @@ -228,25 +237,26 @@ func newCSVParser(metricName string, if comment != "" { runeStr := []rune(comment) if len(runeStr) > 1 { - return nil, fmt.Errorf("delimiter must be a single character, got: %s", comment) + return nil, fmt.Errorf("csv_delimiter must be a single character, got: %s", comment) } comment = fmt.Sprintf("%v", runeStr[0]) } parser := &csv.Parser{ MetricName: metricName, - HeaderRowCount: header, + HeaderRowCount: headerRowCount, SkipRows: skipRows, SkipColumns: skipColumns, Delimiter: delimiter, Comment: comment, TrimSpace: trimSpace, - ColumnNames: dataColumns, + ColumnNames: columnNames, TagColumns: tagColumns, MeasurementColumn: nameColumn, TimestampColumn: timestampColumn, TimestampFormat: timestampFormat, DefaultTags: defaultTags, + TimeFunc: time.Now, } return parser, nil From 448f92be1dc22cd84a805d337b8cae057500f2e8 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 18 Sep 2018 09:24:25 -0700 Subject: [PATCH 0188/1815] Update changelog --- CHANGELOG.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8240478c6..76bb62455 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -116,6 +116,12 @@ - [#4520](https://github.com/influxdata/telegraf/issues/4520): Document all supported cli arguments. - [#4674](https://github.com/influxdata/telegraf/pull/4674): Log access denied opening a service at debug level in win_services. - [#4588](https://github.com/influxdata/telegraf/issues/4588): Add support for Kafka 2.0. +- [#4087](https://github.com/influxdata/telegraf/issues/4087): Fix nagios parser does not support ranges in performance data. +- [#4088](https://github.com/influxdata/telegraf/issues/4088): Fix nagios parser does not strip quotes from performance data. +- [#4688](https://github.com/influxdata/telegraf/issues/4688): Fix null value crash in postgresql_extensible input. +- [#4681](https://github.com/influxdata/telegraf/pull/4681): Remove the startup authentication check from the cloudwatch output. +- [#4644](https://github.com/influxdata/telegraf/issues/4644): Support tailing files created after startup in tail input. +- [#4706](https://github.com/influxdata/telegraf/issues/4706): Fix csv format configuration loading. ## v1.7.4 [2018-08-29] From a75c789e3ecdc92fede5635da9465c044001bace Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 18 Sep 2018 18:13:20 -0700 Subject: [PATCH 0189/1815] Remove warning not to set flush_interval below interval --- internal/config/config.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/config/config.go b/internal/config/config.go index c613244fd..8010ab6fe 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -246,8 +246,8 @@ var header = `# Telegraf Configuration ## same time, which can have a measurable effect on the system. collection_jitter = "0s" - ## Default flushing interval for all outputs. You shouldn't set this below - ## interval. Maximum flush_interval will be flush_interval + flush_jitter + ## Default flushing interval for all outputs. Maximum flush_interval will be + ## flush_interval + flush_jitter flush_interval = "10s" ## Jitter the flush interval by a random amount. This is primarily to avoid ## large write spikes for users running a large number of telegraf instances. From dab6ed7d8f015b608e234b445c63160c4aaa45c1 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 18 Sep 2018 18:15:10 -0700 Subject: [PATCH 0190/1815] Update sample telegraf.conf --- etc/telegraf.conf | 267 +++++++++++++++++++++++++++++++++++++++------- 1 file changed, 231 insertions(+), 36 deletions(-) diff --git a/etc/telegraf.conf b/etc/telegraf.conf index 8d1371a24..d81e1b993 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -47,8 +47,8 @@ ## same time, which can have a measurable effect on the system. collection_jitter = "0s" - ## Default flushing interval for all outputs. You shouldn't set this below - ## interval. Maximum flush_interval will be flush_interval + flush_jitter + ## Default flushing interval for all outputs. Maximum flush_interval will be + ## flush_interval + flush_jitter flush_interval = "10s" ## Jitter the flush interval by a random amount. This is primarily to avoid ## large write spikes for users running a large number of telegraf instances. @@ -494,6 +494,51 @@ # # Content-Type = "text/plain; charset=utf-8" +# # Configuration for sending metrics to InfluxDB +# [[outputs.influxdb_v2]] +# ## The URLs of the InfluxDB cluster nodes. +# ## +# ## Multiple URLs can be specified for a single cluster, only ONE of the +# ## urls will be written to each interval. +# urls = ["http://127.0.0.1:9999"] +# +# ## Token for authentication. +# token = "" +# +# ## Organization is the name of the organization you wish to write to; must exist. +# organization = "" +# +# ## Bucket to the name fo the bucketwrite into; must exist. +# bucket = "" +# +# ## Timeout for HTTP messages. +# # timeout = "5s" +# +# ## Additional HTTP headers +# # http_headers = {"X-Special-Header" = "Special-Value"} +# +# ## HTTP Proxy override, if unset values the standard proxy environment +# ## variables are consulted to determine which proxy, if any, should be used. +# # http_proxy = "http://corporate.proxy:3128" +# +# ## HTTP User-Agent +# # user_agent = "telegraf" +# +# ## Content-Encoding for write request body, can be set to "gzip" to +# ## compress body or "identity" to apply no encoding. +# # content_encoding = "gzip" +# +# ## Enable or disable uint support for writing uints influxdb 2.0. +# # influx_uint_support = false +# +# ## Optional TLS Config for use on HTTP connections. +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + # # Configuration for sending metrics to an Instrumental project # [[outputs.instrumental]] # ## Project API Token (required) @@ -992,13 +1037,13 @@ # # Map enum values according to given table. # [[processors.enum]] -# [[processors.enum.fields]] +# [[processors.enum.mapping]] # ## Name of the field to map -# source = "name" +# field = "status" # # ## Destination field to be used for the mapped value. By default the source # ## field is used, overwriting the original value. -# # destination = "mapped" +# # dest = "status_code" # # ## Default value to be used for all values not contained in the mapping # ## table. When unset, the unmodified value for the field will be used if no @@ -1006,9 +1051,10 @@ # # default = 0 # # ## Table of mappings -# [processors.enum.fields.value_mappings] -# value1 = 1 -# value2 = 2 +# [processors.enum.mapping.value_mappings] +# green = 1 +# yellow = 2 +# red = 3 # # Apply metric modifications using override semantics. @@ -1078,27 +1124,6 @@ # # Rename measurements, tags, and fields that pass through this filter. # [[processors.rename]] -# ## Measurement, tag, and field renamings are stored in separate sub-tables. -# ## Specify one sub-table per rename operation. -# # [[processors.rename.measurement]] -# # ## measurement to change -# # from = "kilobytes_per_second" -# # to = "kbps" -# -# # [[processors.rename.tag]] -# # ## tag to change -# # from = "host" -# # to = "hostname" -# -# # [[processors.rename.field]] -# # ## field to change -# # from = "lower" -# # to = "min" -# -# # [[processors.rename.field]] -# # ## field to change -# # from = "upper" -# # to = "max" # # Perform string processing on tags, fields, and measurements @@ -1433,6 +1458,16 @@ # bcacheDevs = ["bcache0"] +# # Collects Beanstalkd server and tubes stats +# [[inputs.beanstalkd]] +# ## Server to collect data from +# server = "localhost:11300" +# +# ## List of tubes to gather stats about. +# ## If no tubes specified then data gathered for each tube on server reported by list-tubes command +# tubes = ["notifications"] + + # # Collect bond interface status, slaves statuses and failures count # [[inputs.bond]] # ## Sets 'proc' directory path @@ -2029,6 +2064,10 @@ # ## If no servers are specified, then default to 127.0.0.1:1936/haproxy?stats # servers = ["http://myhaproxy.com:1936/haproxy?stats"] # +# ## Credentials for basic HTTP authentication +# # username = "admin" +# # password = "admin" +# # ## You can also use local socket with standard wildcard globbing. # ## Server address not starting with 'http' will be treated as a possible # ## socket, so both examples below are valid. @@ -2077,9 +2116,6 @@ # # username = "username" # # password = "pa$$word" # -# ## Tag all metrics with the url -# # tag_url = true -# # ## Optional TLS Config # # tls_ca = "/etc/telegraf/ca.pem" # # tls_cert = "/etc/telegraf/cert.pem" @@ -2292,6 +2328,8 @@ # ## Setting 'use_lock' to true runs iptables with the "-w" option. # ## Adjust your sudo settings appropriately if using this option ("iptables -wnvl") # use_lock = false +# ## Define an alternate executable, such as "ip6tables". Default is "iptables". +# # binary = "ip6tables" # ## defines the table to monitor: # table = "filter" # ## defines the chains to monitor. @@ -3058,6 +3096,9 @@ # ## If no port is specified, 6379 is used # servers = ["tcp://localhost:6379"] # +# ## specify server password +# # password = "s#cr@t%" +# # ## Optional TLS Config # # tls_ca = "/etc/telegraf/ca.pem" # # tls_cert = "/etc/telegraf/cert.pem" @@ -3435,6 +3476,11 @@ # # virtual_servers = [1] +# # Read metrics about temperature +# [[inputs.temp]] +# # no configuration + + # # Read Tengine's basic status information (ngx_http_reqstat_module) # [[inputs.tengine]] # # An array of Tengine reqstat module URI to gather stats. @@ -3818,9 +3864,6 @@ # # Stream and parse log file(s). # [[inputs.logparser]] -# ## DEPRECATED: The 'logparser' plugin is deprecated in 1.8. Please use the -# ## 'tail' plugin with the grok data_format as a replacement. -# # ## Log files to parse. # ## These accept standard unix glob matching rules, but with the addition of # ## ** as a "super asterisk". ie: @@ -4158,7 +4201,7 @@ # parse_data_dog_tags = false # # ## Statsd data translation templates, more info can be read here: -# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#graphite +# ## https://github.com/influxdata/telegraf/blob/master/docs/TEMPLATE_PATTERN.md # # templates = [ # # "cpu.* measurement*" # # ] @@ -4254,6 +4297,158 @@ # # see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/socket_listener +# # Read metrics from VMware vCenter +# [[inputs.vsphere]] +# ## List of vCenter URLs to be monitored. These three lines must be uncommented +# ## and edited for the plugin to work. +# vcenters = [ "https://vcenter.local/sdk" ] +# username = "user@corp.local" +# password = "secret" +# +# ## VMs +# ## Typical VM metrics (if omitted or empty, all metrics are collected) +# vm_metric_include = [ +# "cpu.demand.average", +# "cpu.idle.summation", +# "cpu.latency.average", +# "cpu.readiness.average", +# "cpu.ready.summation", +# "cpu.run.summation", +# "cpu.usagemhz.average", +# "cpu.used.summation", +# "cpu.wait.summation", +# "mem.active.average", +# "mem.granted.average", +# "mem.latency.average", +# "mem.swapin.average", +# "mem.swapinRate.average", +# "mem.swapout.average", +# "mem.swapoutRate.average", +# "mem.usage.average", +# "mem.vmmemctl.average", +# "net.bytesRx.average", +# "net.bytesTx.average", +# "net.droppedRx.summation", +# "net.droppedTx.summation", +# "net.usage.average", +# "power.power.average", +# "virtualDisk.numberReadAveraged.average", +# "virtualDisk.numberWriteAveraged.average", +# "virtualDisk.read.average", +# "virtualDisk.readOIO.latest", +# "virtualDisk.throughput.usage.average", +# "virtualDisk.totalReadLatency.average", +# "virtualDisk.totalWriteLatency.average", +# "virtualDisk.write.average", +# "virtualDisk.writeOIO.latest", +# "sys.uptime.latest", +# ] +# # vm_metric_exclude = [] ## Nothing is excluded by default +# # vm_instances = true ## true by default +# +# ## Hosts +# ## Typical host metrics (if omitted or empty, all metrics are collected) +# host_metric_include = [ +# "cpu.coreUtilization.average", +# "cpu.costop.summation", +# "cpu.demand.average", +# "cpu.idle.summation", +# "cpu.latency.average", +# "cpu.readiness.average", +# "cpu.ready.summation", +# "cpu.swapwait.summation", +# "cpu.usage.average", +# "cpu.usagemhz.average", +# "cpu.used.summation", +# "cpu.utilization.average", +# "cpu.wait.summation", +# "disk.deviceReadLatency.average", +# "disk.deviceWriteLatency.average", +# "disk.kernelReadLatency.average", +# "disk.kernelWriteLatency.average", +# "disk.numberReadAveraged.average", +# "disk.numberWriteAveraged.average", +# "disk.read.average", +# "disk.totalReadLatency.average", +# "disk.totalWriteLatency.average", +# "disk.write.average", +# "mem.active.average", +# "mem.latency.average", +# "mem.state.latest", +# "mem.swapin.average", +# "mem.swapinRate.average", +# "mem.swapout.average", +# "mem.swapoutRate.average", +# "mem.totalCapacity.average", +# "mem.usage.average", +# "mem.vmmemctl.average", +# "net.bytesRx.average", +# "net.bytesTx.average", +# "net.droppedRx.summation", +# "net.droppedTx.summation", +# "net.errorsRx.summation", +# "net.errorsTx.summation", +# "net.usage.average", +# "power.power.average", +# "storageAdapter.numberReadAveraged.average", +# "storageAdapter.numberWriteAveraged.average", +# "storageAdapter.read.average", +# "storageAdapter.write.average", +# "sys.uptime.latest", +# ] +# # host_metric_exclude = [] ## Nothing excluded by default +# # host_instances = true ## true by default +# +# ## Clusters +# # cluster_metric_include = [] ## if omitted or empty, all metrics are collected +# # cluster_metric_exclude = [] ## Nothing excluded by default +# # cluster_instances = true ## true by default +# +# ## Datastores +# # datastore_metric_include = [] ## if omitted or empty, all metrics are collected +# # datastore_metric_exclude = [] ## Nothing excluded by default +# # datastore_instances = false ## false by default for Datastores only +# +# ## Datacenters +# datacenter_metric_include = [] ## if omitted or empty, all metrics are collected +# datacenter_metric_exclude = [ "*" ] ## Datacenters are not collected by default. +# # datacenter_instances = false ## false by default for Datastores only +# +# ## Plugin Settings +# ## separator character to use for measurement and field names (default: "_") +# # separator = "_" +# +# ## number of objects to retreive per query for realtime resources (vms and hosts) +# ## set to 64 for vCenter 5.5 and 6.0 (default: 256) +# # max_query_objects = 256 +# +# ## number of metrics to retreive per query for non-realtime resources (clusters and datastores) +# ## set to 64 for vCenter 5.5 and 6.0 (default: 256) +# # max_query_metrics = 256 +# +# ## number of go routines to use for collection and discovery of objects and metrics +# # collect_concurrency = 1 +# # discover_concurrency = 1 +# +# ## whether or not to force discovery of new objects on initial gather call before collecting metrics +# ## when true for large environments this may cause errors for time elapsed while collecting metrics +# ## when false (default) the first collection cycle may result in no or limited metrics while objects are discovered +# # force_discover_on_init = false +# +# ## the interval before (re)discovering objects subject to metrics collection (default: 300s) +# # object_discovery_interval = "300s" +# +# ## timeout applies to any of the api request made to vcenter +# # timeout = "20s" +# +# ## Optional SSL Config +# # ssl_ca = "/path/to/cafile" +# # ssl_cert = "/path/to/certfile" +# # ssl_key = "/path/to/keyfile" +# ## Use SSL but skip chain & host verification +# # insecure_skip_verify = false + + # # A Webhooks Event collector # [[inputs.webhooks]] # ## Address and port to host Webhook listener on From 820d1afa2eea113c5ad1654e95618873541f3bc6 Mon Sep 17 00:00:00 2001 From: Leandro Piccilli Date: Thu, 20 Sep 2018 03:23:58 +0200 Subject: [PATCH 0191/1815] Fix changelog and add Kibana input plugin to README (#4718) --- CHANGELOG.md | 2 +- README.md | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 76bb62455..019e802fd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,7 +7,7 @@ - [filecount](./plugins/inputs/filecount/README.md) - Contributed by @sometimesfood - [file](./plugins/inputs/file/README.md) - Contributed by @maxunt - [icinga2](./plugins/inputs/icinga2/README.md) - Contributed by @mlabouardy -- [kibana](./plugins/inputs/icinga2/README.md) - Contributed by @lpic10 +- [kibana](./plugins/inputs/kibana/README.md) - Contributed by @lpic10 - [pgbouncer](./plugins/inputs/pgbouncer/README.md) - Contributed by @nerzhul - [temp](./plugins/inputs/temp/README.md) - Contributed by @pytimer - [tengine](./plugins/inputs/tengine/README.md) - Contributed by @ertaoxu diff --git a/README.md b/README.md index 5bc830457..4ba1a66c9 100644 --- a/README.md +++ b/README.md @@ -183,6 +183,7 @@ For documentation on the latest development code see the [documentation index][d * [kapacitor](./plugins/inputs/kapacitor) * [kernel](./plugins/inputs/kernel) * [kernel_vmstat](./plugins/inputs/kernel_vmstat) +* [kibana](./plugins/inputs/kibana) * [kubernetes](./plugins/inputs/kubernetes) * [leofs](./plugins/inputs/leofs) * [linux_sysctl_fs](./plugins/inputs/linux_sysctl_fs) From dfe8e3b47381167a73a5122c794aa711188c2bea Mon Sep 17 00:00:00 2001 From: Lee Jaeyong Date: Fri, 21 Sep 2018 04:43:39 +0900 Subject: [PATCH 0192/1815] Fix config file types of csv_skip_rows and csv_skip_columns (#4726) --- internal/config/config.go | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/internal/config/config.go b/internal/config/config.go index 8010ab6fe..cd4d9825c 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -1541,24 +1541,24 @@ func getParserConfig(name string, tbl *ast.Table) (*parsers.Config, error) { if node, ok := tbl.Fields["csv_skip_rows"]; ok { if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - iVal, err := strconv.Atoi(str.Value) - c.CSVSkipRows = iVal + if integer, ok := kv.Value.(*ast.Integer); ok { + v, err := integer.Int() if err != nil { - return nil, fmt.Errorf("E! parsing to int: %v", err) + return nil, err } + c.CSVHeaderRowCount = int(v) } } } if node, ok := tbl.Fields["csv_skip_columns"]; ok { if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - iVal, err := strconv.Atoi(str.Value) - c.CSVSkipColumns = iVal + if integer, ok := kv.Value.(*ast.Integer); ok { + v, err := integer.Int() if err != nil { - return nil, fmt.Errorf("E! parsing to int: %v", err) + return nil, err } + c.CSVHeaderRowCount = int(v) } } } From 1e3e28428d8c9cf00a6350fd381d938b2f90373c Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 20 Sep 2018 12:55:58 -0700 Subject: [PATCH 0193/1815] Clean up csv_trim_space after building parser config --- internal/config/config.go | 1 + 1 file changed, 1 insertion(+) diff --git a/internal/config/config.go b/internal/config/config.go index cd4d9825c..d62536cf9 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -1613,6 +1613,7 @@ func getParserConfig(name string, tbl *ast.Table) (*parsers.Config, error) { delete(tbl.Fields, "csv_tag_columns") delete(tbl.Fields, "csv_timestamp_column") delete(tbl.Fields, "csv_timestamp_format") + delete(tbl.Fields, "csv_trim_space") return c, nil } From fe0b964d3e7004f39ab0870dd4d25ca5fb39a1e8 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 20 Sep 2018 15:00:05 -0700 Subject: [PATCH 0194/1815] Document that proc/agg/outputs are not run as part of --test --- internal/usage.go | 3 ++- internal/usage_windows.go | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/internal/usage.go b/internal/usage.go index 99db5aebb..a49021b43 100644 --- a/internal/usage.go +++ b/internal/usage.go @@ -26,7 +26,8 @@ The commands & flags are: --processor-filter filter the processors to enable, separator is : --quiet run in quiet mode --sample-config print out full sample configuration - --test gather metrics, print them out, and exit + --test gather metrics, print them out, and exit; + processors, aggregators, and outputs are not run --usage print usage for a plugin, ie, 'telegraf --usage mysql' --version display the version and exit diff --git a/internal/usage_windows.go b/internal/usage_windows.go index 585c2996f..db5e492a9 100644 --- a/internal/usage_windows.go +++ b/internal/usage_windows.go @@ -26,7 +26,8 @@ The commands & flags are: --processor-filter filter the processors to enable, separator is : --quiet run in quiet mode --sample-config print out full sample configuration - --test gather metrics, print them out, and exit + --test gather metrics, print them out, and exit; + processors, aggregators, and outputs are not run --usage print usage for a plugin, ie, 'telegraf --usage mysql' --version display the version and exit From a55e141264e4b7f85efb3505a474ceeca54b75a9 Mon Sep 17 00:00:00 2001 From: Simon Murray Date: Thu, 20 Sep 2018 23:05:47 +0100 Subject: [PATCH 0195/1815] Use typed struct instead of type assertions in Ceph Input Plugin (#4721) --- plugins/inputs/ceph/ceph.go | 251 ++++++++++++++++--------------- plugins/inputs/ceph/ceph_test.go | 244 ++++++++++++++++++++++++++++-- 2 files changed, 356 insertions(+), 139 deletions(-) diff --git a/plugins/inputs/ceph/ceph.go b/plugins/inputs/ceph/ceph.go index 0de9cb13b..369795e7f 100644 --- a/plugins/inputs/ceph/ceph.go +++ b/plugins/inputs/ceph/ceph.go @@ -294,6 +294,7 @@ func flatten(data interface{}) []*metric { return metrics } +// exec executes the 'ceph' command with the supplied arguments, returning JSON formatted output func (c *Ceph) exec(command string) (string, error) { cmdArgs := []string{"--conf", c.CephConfig, "--name", c.CephUser, "--format", "json"} cmdArgs = append(cmdArgs, strings.Split(command, " ")...) @@ -317,145 +318,145 @@ func (c *Ceph) exec(command string) (string, error) { return output, nil } +// CephStatus is used to unmarshal "ceph -s" output +type CephStatus struct { + OSDMap struct { + OSDMap struct { + Epoch float64 `json:"epoch"` + NumOSDs float64 `json:"num_osds"` + NumUpOSDs float64 `json:"num_up_osds"` + NumInOSDs float64 `json:"num_in_osds"` + Full bool `json:"full"` + NearFull bool `json:"nearfull"` + NumRemappedPGs float64 `json:"num_rempapped_pgs"` + } `json:"osdmap"` + } `json:"osdmap"` + PGMap struct { + PGsByState []struct { + StateName string `json:"state_name"` + Count float64 `json:"count"` + } `json:"pgs_by_state"` + Version float64 `json:"version"` + NumPGs float64 `json:"num_pgs"` + DataBytes float64 `json:"data_bytes"` + BytesUsed float64 `json:"bytes_used"` + BytesAvail float64 `json:"bytes_avail"` + BytesTotal float64 `json:"bytes_total"` + ReadBytesSec float64 `json:"read_bytes_sec"` + WriteBytesSec float64 `json:"write_bytes_sec"` + OpPerSec float64 `json:"op_per_sec"` + } `json:"pgmap"` +} + +// decodeStatus decodes the output of 'ceph -s' func decodeStatus(acc telegraf.Accumulator, input string) error { - data := make(map[string]interface{}) - err := json.Unmarshal([]byte(input), &data) - if err != nil { + data := &CephStatus{} + if err := json.Unmarshal([]byte(input), data); err != nil { return fmt.Errorf("failed to parse json: '%s': %v", input, err) } - err = decodeStatusOsdmap(acc, data) - if err != nil { - return err + decoders := []func(telegraf.Accumulator, *CephStatus) error{ + decodeStatusOsdmap, + decodeStatusPgmap, + decodeStatusPgmapState, } - err = decodeStatusPgmap(acc, data) - if err != nil { - return err - } - - err = decodeStatusPgmapState(acc, data) - if err != nil { - return err + for _, decoder := range decoders { + if err := decoder(acc, data); err != nil { + return err + } } return nil } -func decodeStatusOsdmap(acc telegraf.Accumulator, data map[string]interface{}) error { - osdmap, ok := data["osdmap"].(map[string]interface{}) - if !ok { - return fmt.Errorf("WARNING %s - unable to decode osdmap", measurement) - } - fields, ok := osdmap["osdmap"].(map[string]interface{}) - if !ok { - return fmt.Errorf("WARNING %s - unable to decode osdmap", measurement) +// decodeStatusOsdmap decodes the OSD map portion of the output of 'ceph -s' +func decodeStatusOsdmap(acc telegraf.Accumulator, data *CephStatus) error { + fields := map[string]interface{}{ + "epoch": data.OSDMap.OSDMap.Epoch, + "num_osds": data.OSDMap.OSDMap.NumOSDs, + "num_up_osds": data.OSDMap.OSDMap.NumUpOSDs, + "num_in_osds": data.OSDMap.OSDMap.NumInOSDs, + "full": data.OSDMap.OSDMap.Full, + "nearfull": data.OSDMap.OSDMap.NearFull, + "num_rempapped_pgs": data.OSDMap.OSDMap.NumRemappedPGs, } acc.AddFields("ceph_osdmap", fields, map[string]string{}) return nil } -func decodeStatusPgmap(acc telegraf.Accumulator, data map[string]interface{}) error { - pgmap, ok := data["pgmap"].(map[string]interface{}) - if !ok { - return fmt.Errorf("WARNING %s - unable to decode pgmap", measurement) - } - fields := make(map[string]interface{}) - for key, value := range pgmap { - switch value.(type) { - case float64: - fields[key] = value - } +// decodeStatusPgmap decodes the PG map portion of the output of 'ceph -s' +func decodeStatusPgmap(acc telegraf.Accumulator, data *CephStatus) error { + fields := map[string]interface{}{ + "version": data.PGMap.Version, + "num_pgs": data.PGMap.NumPGs, + "data_bytes": data.PGMap.DataBytes, + "bytes_used": data.PGMap.BytesUsed, + "bytes_avail": data.PGMap.BytesAvail, + "bytes_total": data.PGMap.BytesTotal, + "read_bytes_sec": data.PGMap.ReadBytesSec, + "write_bytes_sec": data.PGMap.WriteBytesSec, + "op_per_sec": data.PGMap.OpPerSec, } acc.AddFields("ceph_pgmap", fields, map[string]string{}) return nil } -func extractPgmapStates(data map[string]interface{}) ([]interface{}, error) { - const key = "pgs_by_state" - - pgmap, ok := data["pgmap"].(map[string]interface{}) - if !ok { - return nil, fmt.Errorf("WARNING %s - unable to decode pgmap", measurement) - } - - s, ok := pgmap[key] - if !ok { - return nil, fmt.Errorf("WARNING %s - pgmap is missing the %s field", measurement, key) - } - - states, ok := s.([]interface{}) - if !ok { - return nil, fmt.Errorf("WARNING %s - pgmap[%s] is not a list", measurement, key) - } - return states, nil -} - -func decodeStatusPgmapState(acc telegraf.Accumulator, data map[string]interface{}) error { - states, err := extractPgmapStates(data) - if err != nil { - return err - } - for _, state := range states { - stateMap, ok := state.(map[string]interface{}) - if !ok { - return fmt.Errorf("WARNING %s - unable to decode pg state", measurement) - } - stateName, ok := stateMap["state_name"].(string) - if !ok { - return fmt.Errorf("WARNING %s - unable to decode pg state name", measurement) - } - stateCount, ok := stateMap["count"].(float64) - if !ok { - return fmt.Errorf("WARNING %s - unable to decode pg state count", measurement) - } - +// decodeStatusPgmapState decodes the PG map state portion of the output of 'ceph -s' +func decodeStatusPgmapState(acc telegraf.Accumulator, data *CephStatus) error { + for _, pgState := range data.PGMap.PGsByState { tags := map[string]string{ - "state": stateName, + "state": pgState.StateName, } fields := map[string]interface{}{ - "count": stateCount, + "count": pgState.Count, } acc.AddFields("ceph_pgmap_state", fields, tags) } return nil } +// CephDF is used to unmarshal 'ceph df' output +type CephDf struct { + Stats struct { + TotalSpace float64 `json:"total_space"` + TotalUsed float64 `json:"total_used"` + TotalAvail float64 `json:"total_avail"` + } `json:"stats"` + Pools []struct { + Name string `json:"name"` + Stats struct { + KBUsed float64 `json:"kb_used"` + BytesUsed float64 `json:"bytes_used"` + Objects float64 `json:"objects"` + } `json:"stats"` + } `json:"pools"` +} + +// decodeDf decodes the output of 'ceph df' func decodeDf(acc telegraf.Accumulator, input string) error { - data := make(map[string]interface{}) - err := json.Unmarshal([]byte(input), &data) - if err != nil { + data := &CephDf{} + if err := json.Unmarshal([]byte(input), data); err != nil { return fmt.Errorf("failed to parse json: '%s': %v", input, err) } // ceph.usage: records global utilization and number of objects - stats_fields, ok := data["stats"].(map[string]interface{}) - if !ok { - return fmt.Errorf("WARNING %s - unable to decode df stats", measurement) + fields := map[string]interface{}{ + "total_space": data.Stats.TotalSpace, + "total_used": data.Stats.TotalUsed, + "total_avail": data.Stats.TotalAvail, } - acc.AddFields("ceph_usage", stats_fields, map[string]string{}) + acc.AddFields("ceph_usage", fields, map[string]string{}) // ceph.pool.usage: records per pool utilization and number of objects - pools, ok := data["pools"].([]interface{}) - if !ok { - return fmt.Errorf("WARNING %s - unable to decode df pools", measurement) - } - - for _, pool := range pools { - pool_map, ok := pool.(map[string]interface{}) - if !ok { - return fmt.Errorf("WARNING %s - unable to decode df pool", measurement) - } - pool_name, ok := pool_map["name"].(string) - if !ok { - return fmt.Errorf("WARNING %s - unable to decode df pool name", measurement) - } - fields, ok := pool_map["stats"].(map[string]interface{}) - if !ok { - return fmt.Errorf("WARNING %s - unable to decode df pool stats", measurement) - } + for _, pool := range data.Pools { tags := map[string]string{ - "name": pool_name, + "name": pool.Name, + } + fields := map[string]interface{}{ + "kb_used": pool.Stats.KBUsed, + "bytes_used": pool.Stats.BytesUsed, + "objects": pool.Stats.Objects, } acc.AddFields("ceph_pool_usage", fields, tags) } @@ -463,36 +464,40 @@ func decodeDf(acc telegraf.Accumulator, input string) error { return nil } +// CephOSDPoolStats is used to unmarshal 'ceph osd pool stats' output +type CephOSDPoolStats []struct { + PoolName string `json:"pool_name"` + ClientIORate struct { + ReadBytesSec float64 `json:"read_bytes_sec"` + WriteBytesSec float64 `json:"write_bytes_sec"` + OpPerSec float64 `json:"op_per_sec"` + } `json:"client_io_rate"` + RecoveryRate struct { + RecoveringObjectsPerSec float64 `json:"recovering_objects_per_sec"` + RecoveringBytesPerSec float64 `json:"recovering_bytes_per_sec"` + RecoveringKeysPerSec float64 `json:"recovering_keys_per_sec"` + } `json:"recovery_rate"` +} + +// decodeOsdPoolStats decodes the output of 'ceph osd pool stats' func decodeOsdPoolStats(acc telegraf.Accumulator, input string) error { - data := make([]map[string]interface{}, 0) - err := json.Unmarshal([]byte(input), &data) - if err != nil { + data := CephOSDPoolStats{} + if err := json.Unmarshal([]byte(input), &data); err != nil { return fmt.Errorf("failed to parse json: '%s': %v", input, err) } // ceph.pool.stats: records pre pool IO and recovery throughput for _, pool := range data { - pool_name, ok := pool["pool_name"].(string) - if !ok { - return fmt.Errorf("WARNING %s - unable to decode osd pool stats name", measurement) - } - // Note: the 'recovery' object looks broken (in hammer), so it's omitted - objects := []string{ - "client_io_rate", - "recovery_rate", - } - fields := make(map[string]interface{}) - for _, object := range objects { - perfdata, ok := pool[object].(map[string]interface{}) - if !ok { - return fmt.Errorf("WARNING %s - unable to decode osd pool stats", measurement) - } - for key, value := range perfdata { - fields[key] = value - } - } tags := map[string]string{ - "name": pool_name, + "name": pool.PoolName, + } + fields := map[string]interface{}{ + "read_bytes_sec": pool.ClientIORate.ReadBytesSec, + "write_bytes_sec": pool.ClientIORate.WriteBytesSec, + "op_per_sec": pool.ClientIORate.OpPerSec, + "recovering_objects_per_sec": pool.RecoveryRate.RecoveringObjectsPerSec, + "recovering_bytes_per_sec": pool.RecoveryRate.RecoveringBytesPerSec, + "recovering_keys_per_sec": pool.RecoveryRate.RecoveringKeysPerSec, } acc.AddFields("ceph_pool_stats", fields, tags) } diff --git a/plugins/inputs/ceph/ceph_test.go b/plugins/inputs/ceph/ceph_test.go index f4a3ebb83..9f3ded529 100644 --- a/plugins/inputs/ceph/ceph_test.go +++ b/plugins/inputs/ceph/ceph_test.go @@ -1,7 +1,6 @@ package ceph import ( - "encoding/json" "fmt" "io/ioutil" "os" @@ -18,6 +17,12 @@ const ( epsilon = float64(0.00000001) ) +type expectedResult struct { + metric string + fields map[string]interface{} + tags map[string]string +} + func TestParseSockId(t *testing.T) { s := parseSockId(sockFile(osdPrefix, 1), osdPrefix, sockSuffix) assert.Equal(t, s, "1") @@ -37,26 +42,33 @@ func TestParseOsdDump(t *testing.T) { assert.Equal(t, float64(0), dump["mutex-FileJournal::finisher_lock"]["wait.avgcount"]) } -func TestDecodeStatusPgmapState(t *testing.T) { - data := make(map[string]interface{}) - err := json.Unmarshal([]byte(clusterStatusDump), &data) - assert.NoError(t, err) - +func TestDecodeStatus(t *testing.T) { acc := &testutil.Accumulator{} - err = decodeStatusPgmapState(acc, data) + err := decodeStatus(acc, clusterStatusDump) assert.NoError(t, err) - var results = []struct { - fields map[string]interface{} - tags map[string]string - }{ - {map[string]interface{}{"count": float64(2560)}, map[string]string{"state": "active+clean"}}, - {map[string]interface{}{"count": float64(10)}, map[string]string{"state": "active+scrubbing"}}, - {map[string]interface{}{"count": float64(5)}, map[string]string{"state": "active+backfilling"}}, + for _, r := range cephStatusResults { + acc.AssertContainsTaggedFields(t, r.metric, r.fields, r.tags) } +} - for _, r := range results { - acc.AssertContainsTaggedFields(t, "ceph_pgmap_state", r.fields, r.tags) +func TestDecodeDf(t *testing.T) { + acc := &testutil.Accumulator{} + err := decodeDf(acc, cephDFDump) + assert.NoError(t, err) + + for _, r := range cephDfResults { + acc.AssertContainsTaggedFields(t, r.metric, r.fields, r.tags) + } +} + +func TestDecodeOSDPoolStats(t *testing.T) { + acc := &testutil.Accumulator{} + err := decodeOsdPoolStats(acc, cephODSPoolStatsDump) + assert.NoError(t, err) + + for _, r := range cephOSDPoolStatsResults { + acc.AssertContainsTaggedFields(t, r.metric, r.fields, r.tags) } } @@ -834,3 +846,203 @@ var clusterStatusDump = ` } } ` + +var cephStatusResults = []expectedResult{ + { + metric: "ceph_osdmap", + fields: map[string]interface{}{ + "epoch": float64(21734), + "num_osds": float64(24), + "num_up_osds": float64(24), + "num_in_osds": float64(24), + "full": false, + "nearfull": false, + "num_rempapped_pgs": float64(0), + }, + tags: map[string]string{}, + }, + { + metric: "ceph_pgmap", + fields: map[string]interface{}{ + "version": float64(52314277), + "num_pgs": float64(2560), + "data_bytes": float64(2700031960713), + "bytes_used": float64(7478347665408), + "bytes_avail": float64(9857462382592), + "bytes_total": float64(17335810048000), + "read_bytes_sec": float64(0), + "write_bytes_sec": float64(367217), + "op_per_sec": float64(98), + }, + tags: map[string]string{}, + }, + { + metric: "ceph_pgmap_state", + fields: map[string]interface{}{ + "count": float64(2560), + }, + tags: map[string]string{ + "state": "active+clean", + }, + }, + { + metric: "ceph_pgmap_state", + fields: map[string]interface{}{ + "count": float64(10), + }, + tags: map[string]string{ + "state": "active+scrubbing", + }, + }, + { + metric: "ceph_pgmap_state", + fields: map[string]interface{}{ + "count": float64(5), + }, + tags: map[string]string{ + "state": "active+backfilling", + }, + }, +} + +var cephDFDump = ` +{ "stats": { "total_space": 472345880, + "total_used": 71058504, + "total_avail": 377286864}, + "pools": [ + { "name": "data", + "id": 0, + "stats": { "kb_used": 0, + "bytes_used": 0, + "objects": 0}}, + { "name": "metadata", + "id": 1, + "stats": { "kb_used": 25, + "bytes_used": 25052, + "objects": 53}}, + { "name": "rbd", + "id": 2, + "stats": { "kb_used": 0, + "bytes_used": 0, + "objects": 0}}, + { "name": "test", + "id": 3, + "stats": { "kb_used": 55476, + "bytes_used": 56806602, + "objects": 1}}]}` + +var cephDfResults = []expectedResult{ + { + metric: "ceph_usage", + fields: map[string]interface{}{ + "total_space": float64(472345880), + "total_used": float64(71058504), + "total_avail": float64(377286864), + }, + tags: map[string]string{}, + }, + { + metric: "ceph_pool_usage", + fields: map[string]interface{}{ + "kb_used": float64(0), + "bytes_used": float64(0), + "objects": float64(0), + }, + tags: map[string]string{ + "name": "data", + }, + }, + { + metric: "ceph_pool_usage", + fields: map[string]interface{}{ + "kb_used": float64(25), + "bytes_used": float64(25052), + "objects": float64(53), + }, + tags: map[string]string{ + "name": "metadata", + }, + }, + { + metric: "ceph_pool_usage", + fields: map[string]interface{}{ + "kb_used": float64(0), + "bytes_used": float64(0), + "objects": float64(0), + }, + tags: map[string]string{ + "name": "rbd", + }, + }, + { + metric: "ceph_pool_usage", + fields: map[string]interface{}{ + "kb_used": float64(55476), + "bytes_used": float64(56806602), + "objects": float64(1), + }, + tags: map[string]string{ + "name": "test", + }, + }, +} + +var cephODSPoolStatsDump = ` +[ + { "pool_name": "data", + "pool_id": 0, + "recovery": {}, + "recovery_rate": {}, + "client_io_rate": {}}, + { "pool_name": "metadata", + "pool_id": 1, + "recovery": {}, + "recovery_rate": {}, + "client_io_rate": {}}, + { "pool_name": "rbd", + "pool_id": 2, + "recovery": {}, + "recovery_rate": {}, + "client_io_rate": {}}, + { "pool_name": "pbench", + "pool_id": 3, + "recovery": { "degraded_objects": 18446744073709551562, + "degraded_total": 412, + "degrated_ratio": "-13.107"}, + "recovery_rate": { "recovering_objects_per_sec": 279, + "recovering_bytes_per_sec": 176401059, + "recovering_keys_per_sec": 0}, + "client_io_rate": { "read_bytes_sec": 10566067, + "write_bytes_sec": 15165220376, + "op_per_sec": 9828}}]` + +var cephOSDPoolStatsResults = []expectedResult{ + { + metric: "ceph_pool_stats", + fields: map[string]interface{}{ + "read_bytes_sec": float64(0), + "write_bytes_sec": float64(0), + "op_per_sec": float64(0), + "recovering_objects_per_sec": float64(0), + "recovering_bytes_per_sec": float64(0), + "recovering_keys_per_sec": float64(0), + }, + tags: map[string]string{ + "name": "data", + }, + }, + { + metric: "ceph_pool_stats", + fields: map[string]interface{}{ + "read_bytes_sec": float64(10566067), + "write_bytes_sec": float64(15165220376), + "op_per_sec": float64(9828), + "recovering_objects_per_sec": float64(279), + "recovering_bytes_per_sec": float64(176401059), + "recovering_keys_per_sec": float64(0), + }, + tags: map[string]string{ + "name": "pbench", + }, + }, +} From 3349b53905f9b5efad83db44545439cc9209e071 Mon Sep 17 00:00:00 2001 From: Greg Date: Fri, 21 Sep 2018 11:17:09 -0700 Subject: [PATCH 0196/1815] Rename vsphere markdown files (#4733) --- plugins/inputs/vsphere/{METRICS.MD => METRICS.md} | 0 plugins/inputs/vsphere/{README.MD => README.md} | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename plugins/inputs/vsphere/{METRICS.MD => METRICS.md} (100%) rename plugins/inputs/vsphere/{README.MD => README.md} (100%) diff --git a/plugins/inputs/vsphere/METRICS.MD b/plugins/inputs/vsphere/METRICS.md similarity index 100% rename from plugins/inputs/vsphere/METRICS.MD rename to plugins/inputs/vsphere/METRICS.md diff --git a/plugins/inputs/vsphere/README.MD b/plugins/inputs/vsphere/README.md similarity index 100% rename from plugins/inputs/vsphere/README.MD rename to plugins/inputs/vsphere/README.md From adf2668c1d0e432574668594376632abcefc2e45 Mon Sep 17 00:00:00 2001 From: Tracy Boggiano Date: Fri, 21 Sep 2018 15:18:27 -0400 Subject: [PATCH 0197/1815] Fix forwarded records and offline state in sqlserver input (#4730) --- plugins/inputs/sqlserver/sqlserver.go | 34 +++++++++++++-------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/plugins/inputs/sqlserver/sqlserver.go b/plugins/inputs/sqlserver/sqlserver.go index 5f2cbb29c..a4b5fc701 100644 --- a/plugins/inputs/sqlserver/sqlserver.go +++ b/plugins/inputs/sqlserver/sqlserver.go @@ -244,7 +244,7 @@ func init() { // Thanks Bob Ward (http://aka.ms/bobwardms) // and the folks at Stack Overflow (https://github.com/opserver/Opserver/blob/9c89c7e9936b58ad237b30e6f4cc6cd59c406889/Opserver.Core/Data/SQL/SQLInstance.Memory.cs) // for putting most of the memory clerk definitions online! -const sqlMemoryClerkV2 = `DECLARE @SQL NVARCHAR(MAX) = 'SELECT +const sqlMemoryClerkV2 = `DECLARE @SQL NVARCHAR(MAX) = 'SELECT "sqlserver_memory_clerks" As [measurement], REPLACE(@@SERVERNAME,"\",":") AS [sql_instance], ISNULL(clerk_names.name,mc.type) AS clerk_type, @@ -401,7 +401,7 @@ const sqlServerPropertiesV2 = `DECLARE @sys_info TABLE ( IF OBJECT_ID('master.sys.dm_os_sys_info') IS NOT NULL BEGIN - + IF SERVERPROPERTY('EngineEdition') = 8 -- Managed Instance INSERT INTO @sys_info ( cpu_count, server_memory, sku, engine_edition, hardware_type, total_storage_mb, available_storage_mb, uptime ) SELECT TOP(1) @@ -409,8 +409,8 @@ BEGIN (SELECT process_memory_limit_mb FROM sys.dm_os_job_object) AS server_memory, sku, cast(SERVERPROPERTY('EngineEdition') as smallint) AS engine_edition, - hardware_generation AS hardware_type, - reserved_storage_mb AS total_storage_mb, + hardware_generation AS hardware_type, + reserved_storage_mb AS total_storage_mb, (reserved_storage_mb - storage_space_used_mb) AS available_storage_mb, (select DATEDIFF(MINUTE,sqlserver_start_time,GETDATE()) from sys.dm_os_sys_info) as uptime FROM sys.server_resource_stats @@ -422,12 +422,12 @@ BEGIN @available_space_mb BIGINT SELECT @total_disk_size_mb = sum(total_disk_size_mb), - @available_space_mb = sum(free_disk_space_mb) + @available_space_mb = sum(free_disk_space_mb) FROM ( SELECT distinct logical_volume_name AS LogicalName, - total_bytes/(1024*1024)as total_disk_size_mb, + total_bytes/(1024*1024)as total_disk_size_mb, available_bytes /(1024*1024) free_disk_space_mb - FROM sys.master_files AS f + FROM sys.master_files AS f CROSS APPLY sys.dm_os_volume_stats(f.database_id, f.file_id) ) as osVolumes @@ -446,7 +446,7 @@ BEGIN FROM sys.dm_os_sys_info END END - + SELECT 'sqlserver_server_properties' AS [measurement], REPLACE(@@SERVERNAME,'\',':') AS [sql_instance], s.cpu_count, @@ -469,7 +469,7 @@ FROM ( SUM( CASE WHEN state = 2 THEN 1 ELSE 0 END ) AS db_recovering, SUM( CASE WHEN state = 3 THEN 1 ELSE 0 END ) AS db_recoveryPending, SUM( CASE WHEN state = 4 THEN 1 ELSE 0 END ) AS db_suspect, - SUM( CASE WHEN state = 10 THEN 1 ELSE 0 END ) AS db_offline + SUM( CASE WHEN state = 6 or state = 10 THEN 1 ELSE 0 END ) AS db_offline FROM sys.databases ) AS dbs CROSS APPLY ( @@ -571,7 +571,7 @@ WHERE ( 'Disk Write IO Throttled/sec', 'Disk Write IO/sec', 'Used memory (KB)', - 'Forwarded Recs/sec', + 'Forwarded Records/sec', 'Background Writer pages/sec', 'Percent Log Used' ) @@ -598,7 +598,7 @@ CAST(vs.value AS BIGINT) AS value, 1 FROM ( - SELECT + SELECT rgwg.name AS instance, rgwg.total_request_count AS "Request Count", rgwg.total_queued_request_count AS "Queued Request Count", @@ -1166,15 +1166,15 @@ ws.wait_type NOT IN ( N'DBMIRROR_DBM_EVENT', N'DBMIRROR_EVENTS_QUEUE', N'DBMIRROR_WORKER_QUEUE', N'DBMIRRORING_CMD', N'DIRTY_PAGE_POLL', N'DISPATCHER_QUEUE_SEMAPHORE', N'EXECSYNC', N'FSAGENT', N'FT_IFTS_SCHEDULER_IDLE_WAIT', N'FT_IFTSHC_MUTEX', - N'HADR_CLUSAPI_CALL', N'HADR_FILESTREAM_IOMGR_IOCOMPLETION', N'HADR_LOGCAPTURE_WAIT', + N'HADR_CLUSAPI_CALL', N'HADR_FILESTREAM_IOMGR_IOCOMPLETION', N'HADR_LOGCAPTURE_WAIT', N'HADR_NOTIFICATION_DEQUEUE', N'HADR_TIMER_TASK', N'HADR_WORK_QUEUE', - N'KSOURCE_WAKEUP', N'LAZYWRITER_SLEEP', N'LOGMGR_QUEUE', + N'KSOURCE_WAKEUP', N'LAZYWRITER_SLEEP', N'LOGMGR_QUEUE', N'MEMORY_ALLOCATION_EXT', N'ONDEMAND_TASK_QUEUE', N'PARALLEL_REDO_WORKER_WAIT_WORK', N'PREEMPTIVE_HADR_LEASE_MECHANISM', N'PREEMPTIVE_SP_SERVER_DIAGNOSTICS', N'PREEMPTIVE_OS_LIBRARYOPS', N'PREEMPTIVE_OS_COMOPS', N'PREEMPTIVE_OS_CRYPTOPS', N'PREEMPTIVE_OS_PIPEOPS','PREEMPTIVE_OS_GENERICOPS', N'PREEMPTIVE_OS_VERIFYTRUST', - N'PREEMPTIVE_OS_DEVICEOPS', + N'PREEMPTIVE_OS_DEVICEOPS', N'PREEMPTIVE_XE_CALLBACKEXECUTE', N'PREEMPTIVE_XE_DISPATCHER', N'PREEMPTIVE_XE_GETTARGETSTATE', N'PREEMPTIVE_XE_SESSIONCOMMIT', N'PREEMPTIVE_XE_TARGETINIT', N'PREEMPTIVE_XE_TARGETFINALIZE', @@ -1186,7 +1186,7 @@ ws.wait_type NOT IN ( N'SLEEP_DCOMSTARTUP', N'SLEEP_MASTERDBREADY', N'SLEEP_MASTERMDREADY', N'SLEEP_MASTERUPGRADED', N'SLEEP_MSDBSTARTUP', N'SLEEP_SYSTEMTASK', N'SLEEP_TASK', N'SLEEP_TEMPDBSTARTUP', N'SNI_HTTP_ACCEPT', N'SP_SERVER_DIAGNOSTICS_SLEEP', - N'SQLTRACE_BUFFER_FLUSH', N'SQLTRACE_INCREMENTAL_FLUSH_SLEEP', + N'SQLTRACE_BUFFER_FLUSH', N'SQLTRACE_INCREMENTAL_FLUSH_SLEEP', N'SQLTRACE_WAIT_ENTRIES', N'WAIT_FOR_RESULTS', N'WAITFOR', N'WAITFOR_TASKSHUTDOWN', N'WAIT_XTP_HOST_WAIT', N'WAIT_XTP_OFFLINE_CKPT_NEW_LOG', N'WAIT_XTP_CKPT_CLOSE', @@ -1212,9 +1212,9 @@ BEGIN max_session_percent, dtu_limit, avg_login_rate_percent, - end_time + end_time FROM - sys.dm_db_resource_stats WITH (NOLOCK) + sys.dm_db_resource_stats WITH (NOLOCK) ORDER BY end_time DESC OPTION (RECOMPILE) From e08c975fbd90a2e477355a971a070ac51ec5e675 Mon Sep 17 00:00:00 2001 From: Greg Date: Fri, 21 Sep 2018 12:39:05 -0700 Subject: [PATCH 0198/1815] Fix rune conversion in csv parser (#4728) --- plugins/parsers/registry.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/plugins/parsers/registry.go b/plugins/parsers/registry.go index 28ff30261..c662cf300 100644 --- a/plugins/parsers/registry.go +++ b/plugins/parsers/registry.go @@ -231,7 +231,6 @@ func newCSVParser(metricName string, if len(runeStr) > 1 { return nil, fmt.Errorf("csv_delimiter must be a single character, got: %s", delimiter) } - delimiter = fmt.Sprintf("%v", runeStr[0]) } if comment != "" { @@ -239,7 +238,6 @@ func newCSVParser(metricName string, if len(runeStr) > 1 { return nil, fmt.Errorf("csv_delimiter must be a single character, got: %s", comment) } - comment = fmt.Sprintf("%v", runeStr[0]) } parser := &csv.Parser{ From 85db54c2f20e115632630fb2106c4ab7d146cc73 Mon Sep 17 00:00:00 2001 From: Gunnar <628831+gunnaraasen@users.noreply.github.com> Date: Fri, 21 Sep 2018 12:39:37 -0700 Subject: [PATCH 0199/1815] Add note about docker socket permissions (#4724) --- plugins/inputs/docker/README.md | 26 +++++++++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) diff --git a/plugins/inputs/docker/README.md b/plugins/inputs/docker/README.md index b7e64af33..39fc7d6a6 100644 --- a/plugins/inputs/docker/README.md +++ b/plugins/inputs/docker/README.md @@ -66,6 +66,31 @@ to gather stats from the [Engine API](https://docs.docker.com/engine/api/v1.24/) When using the `"ENV"` endpoint, the connection is configured using the [cli Docker environment variables](https://godoc.org/github.com/moby/moby/client#NewEnvClient). +#### Security + +Giving telegraf access to the Docker daemon expands the [attack surface](https://docs.docker.com/engine/security/security/#docker-daemon-attack-surface) that could result in an attacker gaining root access to a machine. This is especially relevant if the telegraf configuration can be changed by untrusted users. + +#### Docker Daemon Permissions + +Typically, telegraf must be given permission to access the docker daemon unix +socket when using the default endpoint. This can be done by adding the +`telegraf` unix user (created when installing a Telegraf package) to the +`docker` unix group with the following command: + +``` +sudo usermod -aG docker telegraf +``` + +If telegraf is run within a container, the unix socket will need to be exposed +within the telegraf container. This can be done in the docker CLI by add the +option `-v /var/run/docker.sock:/var/run/docker.sock` or adding the following +lines to the telegraf container definition in a docker compose file: + +``` +volumes: + - /var/run/docker.sock:/var/run/docker.sock +``` + #### Kubernetes Labels Kubernetes may add many labels to your containers, if they are not needed you @@ -74,7 +99,6 @@ may prefer to exclude them: docker_label_exclude = ["annotation.kubernetes*"] ``` - ### Metrics: - docker From 403ed001bf06a6ad633447fc1c3422a4762de110 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 21 Sep 2018 13:07:12 -0700 Subject: [PATCH 0200/1815] Add version to the influxdb output plugins readme --- plugins/outputs/influxdb/README.md | 6 ++++-- plugins/outputs/influxdb_v2/README.md | 6 ++++-- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/plugins/outputs/influxdb/README.md b/plugins/outputs/influxdb/README.md index aed96e463..e9b3b0346 100644 --- a/plugins/outputs/influxdb/README.md +++ b/plugins/outputs/influxdb/README.md @@ -1,6 +1,6 @@ -# InfluxDB Output Plugin +# InfluxDB v1.x Output Plugin -This InfluxDB output plugin writes metrics to the [InfluxDB](https://github.com/influxdata/influxdb) HTTP or UDP service. +The InfluxDB output plugin writes metrics to the [InfluxDB v1.x] HTTP or UDP service. ### Configuration: @@ -68,3 +68,5 @@ This InfluxDB output plugin writes metrics to the [InfluxDB](https://github.com/ ## existing data has been written. # influx_uint_support = false ``` + +[InfluxDB v1.x]: https://github.com/influxdata/influxdb diff --git a/plugins/outputs/influxdb_v2/README.md b/plugins/outputs/influxdb_v2/README.md index 795f4467c..5755e6b13 100644 --- a/plugins/outputs/influxdb_v2/README.md +++ b/plugins/outputs/influxdb_v2/README.md @@ -1,6 +1,6 @@ -# InfluxDB Output Plugin +# InfluxDB v2.x Output Plugin -This InfluxDB output plugin writes metrics to the [InfluxDB 2.0](https://github.com/influxdata/platform) HTTP service. +The InfluxDB output plugin writes metrics to the [InfluxDB v2.x] HTTP service. ### Configuration: @@ -49,3 +49,5 @@ This InfluxDB output plugin writes metrics to the [InfluxDB 2.0](https://github. ## Use TLS but skip chain & host verification # insecure_skip_verify = false ``` + +[InfluxDB v2.x]: https://github.com/influxdata/platform From d07bbe24e3791e918c574a8de69710b82b7ff480 Mon Sep 17 00:00:00 2001 From: Greg Linton Date: Fri, 21 Sep 2018 15:05:36 -0700 Subject: [PATCH 0201/1815] Update link in graphite serializer README --- plugins/serializers/graphite/README.md | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/plugins/serializers/graphite/README.md b/plugins/serializers/graphite/README.md index 031dee376..6cff2cbe5 100644 --- a/plugins/serializers/graphite/README.md +++ b/plugins/serializers/graphite/README.md @@ -3,7 +3,7 @@ The Graphite data format is translated from Telegraf Metrics using either the template pattern or tag support method. You can select between the two methods using the [`graphite_tag_support`](#graphite-tag-support) option. When set, the tag support -method is used, otherwise the [Template Pattern][templates]) is used. +method is used, otherwise the [Template Pattern](templates) is used. ### Configuration @@ -45,7 +45,4 @@ cpu.usage_user;cpu=cpu-total;dc=us-east-1;host=tars 0.89 1455320690 cpu.usage_idle;cpu=cpu-total;dc=us-east-1;host=tars 98.09 1455320690 ``` -#### templates - -Consult the [Template Patterns](/docs/TEMPLATE_PATTERN.md) documentation for -details. +[templates]: /docs/TEMPLATE_PATTERN.md From 146a30e065b03789ded3d72cce52bbb9defd1044 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 21 Sep 2018 13:23:46 -0700 Subject: [PATCH 0202/1815] Clarify output format for splunkmetric --- plugins/serializers/splunkmetric/README.md | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/plugins/serializers/splunkmetric/README.md b/plugins/serializers/splunkmetric/README.md index e00286e57..552b90ea4 100644 --- a/plugins/serializers/splunkmetric/README.md +++ b/plugins/serializers/splunkmetric/README.md @@ -1,12 +1,11 @@ # Splunk Metrics serializer -This serializer formats and outputs the metric data in a format that can be consumed by a Splunk metrics index. -It can be used to write to a file using the file output, or for sending metrics to a HEC using the standard telegraf HTTP output. +The Splunk Metrics serializer outputs metrics in the [Splunk metric HEC JSON format][splunk-format]. +It can be used to write to a file using the file output, or for sending metrics to a HEC using the standard telegraf HTTP output. If you're using the HTTP output, this serializer knows how to batch the metrics so you don't end up with an HTTP POST per metric. -Th data is output in a format that conforms to the specified Splunk HEC JSON format as found here: -[Send metrics in JSON format](http://dev.splunk.com/view/event-collector/SP-CAAAFDN). +[splunk-format]: http://dev.splunk.com/view/event-collector/SP-CAAAFDN#json An example event looks like: ```javascript From 4c9c31c34fb92335bea0dc8fd464769aab1e3906 Mon Sep 17 00:00:00 2001 From: Greg Date: Fri, 21 Sep 2018 15:47:41 -0700 Subject: [PATCH 0203/1815] Fix panic if JSONNameKey is not found (#4735) --- plugins/parsers/json/README.md | 4 ++-- plugins/parsers/json/parser.go | 10 ++++++---- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/plugins/parsers/json/README.md b/plugins/parsers/json/README.md index fa0d767ff..e1fce5550 100644 --- a/plugins/parsers/json/README.md +++ b/plugins/parsers/json/README.md @@ -108,7 +108,7 @@ Config: files = ["example"] name_key = "name" tag_keys = ["my_tag_1"] - string_fields = ["my_field"] + string_fields = ["b_my_field"] data_format = "json" ``` @@ -127,7 +127,7 @@ Input: Output: ``` -my_json,my_tag_1=foo a=5,b_c=6,my_field="description" +my_json,my_tag_1=foo a=5,b_c=6,b_my_field="description" ``` #### Arrays diff --git a/plugins/parsers/json/parser.go b/plugins/parsers/json/parser.go index 697296a12..1d8ce2d02 100644 --- a/plugins/parsers/json/parser.go +++ b/plugins/parsers/json/parser.go @@ -5,6 +5,8 @@ import ( "encoding/json" "fmt" "log" + "math" + "regexp" "strconv" "strings" "time" @@ -13,8 +15,6 @@ import ( "github.com/influxdata/telegraf/metric" "github.com/pkg/errors" "github.com/tidwall/gjson" - "math" - "regexp" ) var ( @@ -94,7 +94,6 @@ func parseUnixTimestamp(jsonValue interface{}, format string) (time.Time, error) } func (p *JSONParser) parseObject(metrics []telegraf.Metric, jsonOut map[string]interface{}) ([]telegraf.Metric, error) { - tags := make(map[string]string) for k, v := range p.DefaultTags { tags[k] = v @@ -108,7 +107,10 @@ func (p *JSONParser) parseObject(metrics []telegraf.Metric, jsonOut map[string]i //checks if json_name_key is set if p.JSONNameKey != "" { - p.MetricName = f.Fields[p.JSONNameKey].(string) + switch field := f.Fields[p.JSONNameKey].(type) { + case string: + p.MetricName = field + } } //if time key is specified, set it to nTime From 54b7262228f53d1b5eace9c8ff907714a94166de Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 21 Sep 2018 16:11:36 -0700 Subject: [PATCH 0204/1815] Update changelog --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 019e802fd..cd02a3d32 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,4 @@ -## v1.8 [unreleased] +## v1.8 [2018-09-21] ### New Inputs From 1a437e5690af31646eb42e7dd010645f74f60816 Mon Sep 17 00:00:00 2001 From: JP Mens Date: Mon, 24 Sep 2018 21:13:36 +0200 Subject: [PATCH 0205/1815] Clarify monitoring backend provenance (#4739) --- plugins/inputs/openldap/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/inputs/openldap/README.md b/plugins/inputs/openldap/README.md index 619e845c7..48f29cb60 100644 --- a/plugins/inputs/openldap/README.md +++ b/plugins/inputs/openldap/README.md @@ -4,7 +4,7 @@ This plugin gathers metrics from OpenLDAP's cn=Monitor backend. ### Configuration: -To use this plugin you must enable the [monitoring](https://www.openldap.org/devel/admin/monitoringslapd.html) backend. +To use this plugin you must enable the [slapd monitoring](https://www.openldap.org/devel/admin/monitoringslapd.html) backend. ```toml [[inputs.openldap]] From 05c9197aafc4e71657ff43f906cfd67c5ca936cb Mon Sep 17 00:00:00 2001 From: JP Mens Date: Mon, 24 Sep 2018 22:41:12 +0200 Subject: [PATCH 0206/1815] Fix grammar in exec input readme (#4740) --- plugins/inputs/exec/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/inputs/exec/README.md b/plugins/inputs/exec/README.md index 788c8eec0..f4e917242 100644 --- a/plugins/inputs/exec/README.md +++ b/plugins/inputs/exec/README.md @@ -40,7 +40,7 @@ This script produces static values, since no timestamp is specified the values a echo 'example,tag1=a,tag2=b i=42i,j=43i,k=44i' ``` -It can be paired with the following configuration and will be ran at the `interval` of the agent. +It can be paired with the following configuration and will be run at the `interval` of the agent. ```toml [[inputs.exec]] commands = ["sh /tmp/test.sh"] From f72e52528d1edf757a068c059fcebf23ad27f29f Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 24 Sep 2018 15:35:12 -0700 Subject: [PATCH 0207/1815] Fix dep check errors in Gopkg.lock --- Gopkg.lock | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/Gopkg.lock b/Gopkg.lock index cb6338110..80a6277bd 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -130,7 +130,7 @@ [[projects]] branch = "master" - digest = "1:5a5f28fcfe3a74247733a31ceaac0e53bfc2723e43c596b2e3f110eda9378575" + digest = "1:0828d8c0f95689f832cf348fe23827feb7640cd698d612ef59e2f9d041f54c68" name = "github.com/apache/thrift" packages = ["lib/go/thrift"] pruneopts = "" @@ -276,7 +276,7 @@ revision = "edc3ab29cdff8694dd6feb85cfeb4b5f1b38ed9c" [[projects]] - digest = "1:d2ca9295cce7d0e7b26b498c6b59ff903d8315e8ead97f0f6cadf9e7d613e1e8" + digest = "1:d149605f1b00713fdc48150122892d77d49d30c825f690dd92f497aeb6cf18f5" name = "github.com/docker/docker" packages = [ "api", @@ -517,7 +517,7 @@ [[projects]] branch = "master" - digest = "1:e1c91a91cc738cebecbf12fc98f554f6f932c8b97e2052ad63ea43948df5bcb0" + digest = "1:ff65bf6fc4d1116f94ac305342725c21b55c16819c2606adc8f527755716937f" name = "github.com/hashicorp/go-rootcerts" packages = ["."] pruneopts = "" @@ -1320,7 +1320,6 @@ "github.com/aws/aws-sdk-go/aws/session", "github.com/aws/aws-sdk-go/service/cloudwatch", "github.com/aws/aws-sdk-go/service/kinesis", - "github.com/aws/aws-sdk-go/service/sts", "github.com/bsm/sarama-cluster", "github.com/couchbase/go-couchbase", "github.com/denisenkom/go-mssqldb", @@ -1359,6 +1358,7 @@ "github.com/nsqio/go-nsq", "github.com/openzipkin/zipkin-go-opentracing", "github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore", + "github.com/pkg/errors", "github.com/prometheus/client_golang/prometheus", "github.com/prometheus/client_golang/prometheus/promhttp", "github.com/prometheus/client_model/go", @@ -1380,9 +1380,13 @@ "github.com/tidwall/gjson", "github.com/vjeantet/grok", "github.com/vmware/govmomi", + "github.com/vmware/govmomi/object", "github.com/vmware/govmomi/performance", + "github.com/vmware/govmomi/session", "github.com/vmware/govmomi/simulator", "github.com/vmware/govmomi/view", + "github.com/vmware/govmomi/vim25", + "github.com/vmware/govmomi/vim25/methods", "github.com/vmware/govmomi/vim25/mo", "github.com/vmware/govmomi/vim25/soap", "github.com/vmware/govmomi/vim25/types", From 69f6612c2a427d90ef3799b7dd4edc52366dbec7 Mon Sep 17 00:00:00 2001 From: Tracy Boggiano Date: Wed, 26 Sep 2018 22:02:29 -0400 Subject: [PATCH 0208/1815] Fix hardware_type may be truncated in sqlserver input (#4750) --- plugins/inputs/sqlserver/sqlserver.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/inputs/sqlserver/sqlserver.go b/plugins/inputs/sqlserver/sqlserver.go index a4b5fc701..bf1fb9af7 100644 --- a/plugins/inputs/sqlserver/sqlserver.go +++ b/plugins/inputs/sqlserver/sqlserver.go @@ -393,7 +393,7 @@ const sqlServerPropertiesV2 = `DECLARE @sys_info TABLE ( server_memory BIGINT, sku NVARCHAR(64), engine_edition SMALLINT, - hardware_type VARCHAR(15), + hardware_type VARCHAR(16), total_storage_mb BIGINT, available_storage_mb BIGINT, uptime INT From 74e9d1f078775b20ebbb10f0030b2abecc335e8d Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 26 Sep 2018 19:04:21 -0700 Subject: [PATCH 0209/1815] Update changelog --- CHANGELOG.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index cd02a3d32..79f90656a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,9 @@ +## v1.8.1 [unreleased] + +### Bugfixes + +- [#4750](https://github.com/influxdata/telegraf/pull/4750): Fix hardware_type may be truncated in sqlserver input. + ## v1.8 [2018-09-21] ### New Inputs From 38e5e103ceff8494e0ab8339ac5bfdb42c15ba3f Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 26 Sep 2018 19:05:55 -0700 Subject: [PATCH 0210/1815] Clarify platform support for temp input (#4756) --- plugins/inputs/temp/README.md | 29 +++++++++++++---------------- plugins/inputs/temp/temp.go | 4 ++++ 2 files changed, 17 insertions(+), 16 deletions(-) diff --git a/plugins/inputs/temp/README.md b/plugins/inputs/temp/README.md index 87f365ca0..873a73285 100644 --- a/plugins/inputs/temp/README.md +++ b/plugins/inputs/temp/README.md @@ -1,6 +1,9 @@ # Temp Input plugin -This input plugin collect temperature. +The temp input plugin gather metrics on system temperature. This plugin is +meant to be multi platform and uses platform specific collection methods. + +Currently supports Linux and Windows. ### Configuration: @@ -8,25 +11,19 @@ This input plugin collect temperature. [[inputs.temp]] ``` -### Measurements & Fields: +### Metrics: -All fields are float64. - -- temp ( unit: °Celsius) - -### Tags: - -- All measurements have the following tags: - - host +- temp + - tags: - sensor + - fields: + - temp (float, celcius) ### Example Output: ``` -$ ./telegraf --config telegraf.conf --input-filter temp --test -* Plugin: temp, Collection 1 -> temp,host=localhost,sensor=coretemp_physicalid0_crit temp=100 1531298763000000000 -> temp,host=localhost,sensor=coretemp_physicalid0_critalarm temp=0 1531298763000000000 -> temp,host=localhost,sensor=coretemp_physicalid0_input temp=100 1531298763000000000 -> temp,host=localhost,sensor=coretemp_physicalid0_max temp=100 1531298763000000000 +temp,sensor=coretemp_physicalid0_crit temp=100 1531298763000000000 +temp,sensor=coretemp_physicalid0_critalarm temp=0 1531298763000000000 +temp,sensor=coretemp_physicalid0_input temp=100 1531298763000000000 +temp,sensor=coretemp_physicalid0_max temp=100 1531298763000000000 ``` diff --git a/plugins/inputs/temp/temp.go b/plugins/inputs/temp/temp.go index 10e61673d..baf647b59 100644 --- a/plugins/inputs/temp/temp.go +++ b/plugins/inputs/temp/temp.go @@ -2,6 +2,7 @@ package temp import ( "fmt" + "strings" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" @@ -25,6 +26,9 @@ func (t *Temperature) SampleConfig() string { func (t *Temperature) Gather(acc telegraf.Accumulator) error { temps, err := t.ps.Temperature() if err != nil { + if strings.Contains(err.Error(), "not implemented yet") { + return fmt.Errorf("plugin is not supported on this platform: %v", err) + } return fmt.Errorf("error getting temperatures info: %s", err) } for _, temp := range temps { From 54e61aa78aab4b49a1ba17bbba4a606111cd4274 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 26 Sep 2018 19:08:46 -0700 Subject: [PATCH 0211/1815] Use FieldList in basicstats to improve performance (#4741) --- plugins/aggregators/basicstats/basicstats.go | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/plugins/aggregators/basicstats/basicstats.go b/plugins/aggregators/basicstats/basicstats.go index 42b795ab6..701cd8a85 100644 --- a/plugins/aggregators/basicstats/basicstats.go +++ b/plugins/aggregators/basicstats/basicstats.go @@ -72,9 +72,9 @@ func (m *BasicStats) Add(in telegraf.Metric) { tags: in.Tags(), fields: make(map[string]basicstats), } - for k, v := range in.Fields() { - if fv, ok := convert(v); ok { - a.fields[k] = basicstats{ + for _, field := range in.FieldList() { + if fv, ok := convert(field.Value); ok { + a.fields[field.Key] = basicstats{ count: 1, min: fv, max: fv, @@ -86,11 +86,11 @@ func (m *BasicStats) Add(in telegraf.Metric) { } m.cache[id] = a } else { - for k, v := range in.Fields() { - if fv, ok := convert(v); ok { - if _, ok := m.cache[id].fields[k]; !ok { + for _, field := range in.FieldList() { + if fv, ok := convert(field.Value); ok { + if _, ok := m.cache[id].fields[field.Key]; !ok { // hit an uncached field of a cached metric - m.cache[id].fields[k] = basicstats{ + m.cache[id].fields[field.Key] = basicstats{ count: 1, min: fv, max: fv, @@ -101,7 +101,7 @@ func (m *BasicStats) Add(in telegraf.Metric) { continue } - tmp := m.cache[id].fields[k] + tmp := m.cache[id].fields[field.Key] //https://en.m.wikipedia.org/wiki/Algorithms_for_calculating_variance //variable initialization x := fv @@ -126,7 +126,7 @@ func (m *BasicStats) Add(in telegraf.Metric) { //sum compute tmp.sum += fv //store final data - m.cache[id].fields[k] = tmp + m.cache[id].fields[field.Key] = tmp } } } From 358920e6bafa13d71727baab57af6d5e8a9fdf53 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 26 Sep 2018 19:09:53 -0700 Subject: [PATCH 0212/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 79f90656a..4f75756e9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,7 @@ ### Bugfixes - [#4750](https://github.com/influxdata/telegraf/pull/4750): Fix hardware_type may be truncated in sqlserver input. +- [#4723](https://github.com/influxdata/telegraf/issues/4723): Improve performance in basicstats aggregator. ## v1.8 [2018-09-21] From a086ea6989cbbec69591e22560e2384dc181b047 Mon Sep 17 00:00:00 2001 From: Lee Jaeyong Date: Thu, 27 Sep 2018 11:15:38 +0900 Subject: [PATCH 0213/1815] Use time.AfterFunc to avoid need for goroutine in WaitTimeout (#4702) --- internal/internal.go | 26 ++++++++++++++------------ 1 file changed, 14 insertions(+), 12 deletions(-) diff --git a/internal/internal.go b/internal/internal.go index adc4df820..f7d75dfb3 100644 --- a/internal/internal.go +++ b/internal/internal.go @@ -153,22 +153,24 @@ func RunTimeout(c *exec.Cmd, timeout time.Duration) error { // It assumes the command has already been started. // If the command times out, it attempts to kill the process. func WaitTimeout(c *exec.Cmd, timeout time.Duration) error { - timer := time.NewTimer(timeout) - done := make(chan error) - go func() { done <- c.Wait() }() - select { - case err := <-done: - timer.Stop() - return err - case <-timer.C: - if err := c.Process.Kill(); err != nil { + timer := time.AfterFunc(timeout, func() { + err := c.Process.Kill() + if err != nil { log.Printf("E! FATAL error killing process: %s", err) - return err + return } - // wait for the command to return after killing it - <-done + }) + + err := c.Wait() + isTimeout := timer.Stop() + + if err != nil { + return err + } else if isTimeout == false { return TimeoutErr } + + return err } // RandomSleep will sleep for a random amount of time up to max. From a21524c6b33d6a24d7a31b5c80124239a4c81553 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Onur=20G=C3=BCzel?= Date: Thu, 27 Sep 2018 05:16:39 +0300 Subject: [PATCH 0214/1815] Add hostname to TLS config for SNI support (#4747) --- plugins/inputs/x509_cert/x509_cert.go | 1 + 1 file changed, 1 insertion(+) diff --git a/plugins/inputs/x509_cert/x509_cert.go b/plugins/inputs/x509_cert/x509_cert.go index 252b60e1f..affd3fa04 100644 --- a/plugins/inputs/x509_cert/x509_cert.go +++ b/plugins/inputs/x509_cert/x509_cert.go @@ -80,6 +80,7 @@ func (c *X509Cert) getCert(location string, timeout time.Duration) ([]*x509.Cert } defer ipConn.Close() + tlsCfg.ServerName = u.Host conn := tls.Client(ipConn, tlsCfg) defer conn.Close() From d70c80722de83e193aa8b80a3e37a312a0cff428 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 26 Sep 2018 19:17:55 -0700 Subject: [PATCH 0215/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4f75756e9..69c817ddd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,7 @@ - [#4750](https://github.com/influxdata/telegraf/pull/4750): Fix hardware_type may be truncated in sqlserver input. - [#4723](https://github.com/influxdata/telegraf/issues/4723): Improve performance in basicstats aggregator. +- [#4747](https://github.com/influxdata/telegraf/pull/4723): Add hostname to TLS config for SNI support. ## v1.8 [2018-09-21] From 2e2e998ebdcca812c4337042adccf285871ed0f5 Mon Sep 17 00:00:00 2001 From: Greg Date: Wed, 26 Sep 2018 20:26:23 -0600 Subject: [PATCH 0216/1815] Don't add tags with empty values to opentsdb output (#4751) --- plugins/outputs/opentsdb/opentsdb.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/plugins/outputs/opentsdb/opentsdb.go b/plugins/outputs/opentsdb/opentsdb.go index 964b1768f..1dfd2ce38 100644 --- a/plugins/outputs/opentsdb/opentsdb.go +++ b/plugins/outputs/opentsdb/opentsdb.go @@ -213,7 +213,10 @@ func (o *OpenTSDB) WriteTelnet(metrics []telegraf.Metric, u *url.URL) error { func cleanTags(tags map[string]string) map[string]string { tagSet := make(map[string]string, len(tags)) for k, v := range tags { - tagSet[sanitize(k)] = sanitize(v) + val := sanitize(v) + if val != "" { + tagSet[sanitize(k)] = val + } } return tagSet } From 0a8301ec3c9a9f04709114fe65d4b8a5e54d1784 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 26 Sep 2018 19:27:31 -0700 Subject: [PATCH 0217/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 69c817ddd..04d40576e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,7 @@ - [#4750](https://github.com/influxdata/telegraf/pull/4750): Fix hardware_type may be truncated in sqlserver input. - [#4723](https://github.com/influxdata/telegraf/issues/4723): Improve performance in basicstats aggregator. - [#4747](https://github.com/influxdata/telegraf/pull/4723): Add hostname to TLS config for SNI support. +- [#4675](https://github.com/influxdata/telegraf/issues/4675): Don't add tags with empty values to opentsdb output. ## v1.8 [2018-09-21] From 8cbd39501ba01ca30fa56cfefde03aa756b19ec9 Mon Sep 17 00:00:00 2001 From: Bo Zhao Date: Wed, 26 Sep 2018 19:30:02 -0700 Subject: [PATCH 0218/1815] Add replace function to strings processor (#4686) --- plugins/processors/strings/README.md | 18 ++++++ plugins/processors/strings/strings.go | 74 ++++++++++++++++------ plugins/processors/strings/strings_test.go | 49 ++++++++++++++ 3 files changed, 122 insertions(+), 19 deletions(-) diff --git a/plugins/processors/strings/README.md b/plugins/processors/strings/README.md index f1e7361fe..06bffaee8 100644 --- a/plugins/processors/strings/README.md +++ b/plugins/processors/strings/README.md @@ -10,11 +10,14 @@ Implemented functions are: - trim_right - trim_prefix - trim_suffix +- replace Please note that in this implementation these are processed in the order that they appear above. Specify the `measurement`, `tag` or `field` that you want processed in each section and optionally a `dest` if you want the result stored in a new tag or field. You can specify lots of transformations on data with a single strings processor. +If you'd like to apply the change to every `tag`, `field`, or `measurement`, use the value "*" for each respective field. Note that the `dest` field will be ignored if "*" is used + ### Configuration: ```toml @@ -45,6 +48,11 @@ Specify the `measurement`, `tag` or `field` that you want processed in each sect # [[processors.strings.trim_suffix]] # field = "read_count" # suffix = "_count" + + # [[processors.strings.replace]] + # measurement = "*" + # old = ":" + # new = "_" ``` #### Trim, TrimLeft, TrimRight @@ -56,6 +64,16 @@ The `trim`, `trim_left`, and `trim_right` functions take an optional parameter: The `trim_prefix` and `trim_suffix` functions remote the given `prefix` or `suffix` respectively from the string. +#### Replace + +The `replace` function does a substring replacement across the entire +string to allow for different conventions between various input and output +plugins. Some example usages are eliminating disallowed characters in +field names or replacing separators between different separators. +Can also be used to eliminate unneeded chars that were in metrics. +If the entire name would be deleted, it will refuse to perform +the operation and keep the old name. + ### Example **Config** ```toml diff --git a/plugins/processors/strings/strings.go b/plugins/processors/strings/strings.go index 8e68dbc52..69e89f025 100644 --- a/plugins/processors/strings/strings.go +++ b/plugins/processors/strings/strings.go @@ -16,6 +16,7 @@ type Strings struct { TrimRight []converter `toml:"trim_right"` TrimPrefix []converter `toml:"trim_prefix"` TrimSuffix []converter `toml:"trim_suffix"` + Replace []converter `toml:"replace"` converters []converter init bool @@ -31,6 +32,8 @@ type converter struct { Cutset string Suffix string Prefix string + Old string + New string fn ConvertFunc } @@ -68,6 +71,12 @@ const sampleConfig = ` # [[processors.strings.trim_suffix]] # field = "read_count" # suffix = "_count" + + ## Replace substrings within field names + # [[processors.strings.trim_suffix]] + # measurement = "*" + # old = ":" + # new = "_" ` func (s *Strings) SampleConfig() string { @@ -79,37 +88,53 @@ func (s *Strings) Description() string { } func (c *converter) convertTag(metric telegraf.Metric) { - tv, ok := metric.GetTag(c.Tag) - if !ok { - return + var tags map[string]string + if c.Tag == "*" { + tags = metric.Tags() + } else { + tags = make(map[string]string) + tv, ok := metric.GetTag(c.Tag) + if !ok { + return + } + tags[c.Tag] = tv } - dest := c.Tag - if c.Dest != "" { - dest = c.Dest + for tag, value := range tags { + dest := tag + if c.Tag != "*" && c.Dest != "" { + dest = c.Dest + } + metric.AddTag(dest, c.fn(value)) } - - metric.AddTag(dest, c.fn(tv)) } func (c *converter) convertField(metric telegraf.Metric) { - fv, ok := metric.GetField(c.Field) - if !ok { - return + var fields map[string]interface{} + if c.Field == "*" { + fields = metric.Fields() + } else { + fields = make(map[string]interface{}) + fv, ok := metric.GetField(c.Field) + if !ok { + return + } + fields[c.Field] = fv } - dest := c.Field - if c.Dest != "" { - dest = c.Dest - } - - if fv, ok := fv.(string); ok { - metric.AddField(dest, c.fn(fv)) + for tag, value := range fields { + dest := tag + if c.Tag != "*" && c.Dest != "" { + dest = c.Dest + } + if fv, ok := value.(string); ok { + metric.AddField(dest, c.fn(fv)) + } } } func (c *converter) convertMeasurement(metric telegraf.Metric) { - if metric.Name() != c.Measurement { + if metric.Name() != c.Measurement && c.Measurement != "*" { return } @@ -176,6 +201,17 @@ func (s *Strings) initOnce() { c.fn = func(s string) string { return strings.TrimSuffix(s, c.Suffix) } s.converters = append(s.converters, c) } + for _, c := range s.Replace { + c.fn = func(s string) string { + newString := strings.Replace(s, c.Old, c.New, -1) + if newString == "" { + return s + } else { + return newString + } + } + s.converters = append(s.converters, c) + } s.init = true } diff --git a/plugins/processors/strings/strings_test.go b/plugins/processors/strings/strings_test.go index 2097ac5a8..a4a16d7dd 100644 --- a/plugins/processors/strings/strings_test.go +++ b/plugins/processors/strings/strings_test.go @@ -481,3 +481,52 @@ func TestReadmeExample(t *testing.T) { assert.Equal(t, expectedFields, processed[0].Fields()) assert.Equal(t, expectedTags, processed[0].Tags()) } + +func newMetric(name string) telegraf.Metric { + tags := map[string]string{} + fields := map[string]interface{}{} + m, _ := metric.New(name, tags, fields, time.Now()) + return m +} + +func TestMeasurementReplace(t *testing.T) { + plugin := &Strings{ + Replace: []converter{ + converter{ + Old: "_", + New: "-", + Measurement: "*", + }, + }, + } + metrics := []telegraf.Metric{ + newMetric("foo:some_value:bar"), + newMetric("average:cpu:usage"), + newMetric("average_cpu_usage"), + } + results := plugin.Apply(metrics...) + assert.Equal(t, "foo:some-value:bar", results[0].Name(), "`_` was not changed to `-`") + assert.Equal(t, "average:cpu:usage", results[1].Name(), "Input name should have been unchanged") + assert.Equal(t, "average-cpu-usage", results[2].Name(), "All instances of `_` should have been changed to `-`") +} + +func TestMeasurementCharDeletion(t *testing.T) { + plugin := &Strings{ + Replace: []converter{ + converter{ + Old: "foo", + New: "", + Measurement: "*", + }, + }, + } + metrics := []telegraf.Metric{ + newMetric("foo:bar:baz"), + newMetric("foofoofoo"), + newMetric("barbarbar"), + } + results := plugin.Apply(metrics...) + assert.Equal(t, ":bar:baz", results[0].Name(), "Should have deleted the initial `foo`") + assert.Equal(t, "foofoofoo", results[1].Name(), "Should have refused to delete the whole string") + assert.Equal(t, "barbarbar", results[2].Name(), "Should not have changed the input") +} From c47a75ae53f162b99365d540e7306d9f2e0e839a Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 26 Sep 2018 19:31:43 -0700 Subject: [PATCH 0219/1815] Update changelog --- CHANGELOG.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 04d40576e..43a97dfab 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,9 @@ +## v1.9 [unreleased] + +### Features + +- [#4686](https://github.com/influxdata/telegraf/pull/4686): Add replace function to strings processor. + ## v1.8.1 [unreleased] ### Bugfixes From af0ef55c0281af67fe2db52d8c700fa8f636010e Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 27 Sep 2018 10:10:27 -0700 Subject: [PATCH 0220/1815] Fix license type for go-ole --- docs/LICENSE_OF_DEPENDENCIES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index f5496fc2e..2333650fc 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -37,7 +37,7 @@ following works: - github.com/go-logfmt/logfmt [MIT](https://github.com/go-logfmt/logfmt/blob/master/LICENSE) - github.com/gorilla/mux [BSD](https://github.com/gorilla/mux/blob/master/LICENSE) - github.com/go-ini/ini [APACHE](https://github.com/go-ini/ini/blob/master/LICENSE) -- github.com/go-ole/go-ole [MPL](http://mattn.mit-license.org/2013) +- github.com/go-ole/go-ole [MIT](https://github.com/go-ole/go-ole/blob/master/LICENSE) - github.com/go-sql-driver/mysql [MPL](https://github.com/go-sql-driver/mysql/blob/master/LICENSE) - github.com/hailocab/go-hostpool [MIT](https://github.com/hailocab/go-hostpool/blob/master/LICENSE) - github.com/hashicorp/consul [MPL](https://github.com/hashicorp/consul/blob/master/LICENSE) From 7d97ae64218c63d3960121173c57bda3ec929253 Mon Sep 17 00:00:00 2001 From: Lee Jaeyong Date: Fri, 28 Sep 2018 09:26:36 +0900 Subject: [PATCH 0221/1815] Query servers in parallel in dns_query input (#4754) --- plugins/inputs/dns_query/dns_query.go | 40 +++++++++++++++------------ 1 file changed, 23 insertions(+), 17 deletions(-) diff --git a/plugins/inputs/dns_query/dns_query.go b/plugins/inputs/dns_query/dns_query.go index 98fcc09c2..ec524d95b 100644 --- a/plugins/inputs/dns_query/dns_query.go +++ b/plugins/inputs/dns_query/dns_query.go @@ -5,6 +5,7 @@ import ( "fmt" "net" "strconv" + "sync" "time" "github.com/miekg/dns" @@ -70,32 +71,37 @@ func (d *DnsQuery) Description() string { return "Query given DNS server and gives statistics" } func (d *DnsQuery) Gather(acc telegraf.Accumulator) error { + var wg sync.WaitGroup d.setDefaultValues() for _, domain := range d.Domains { for _, server := range d.Servers { - fields := make(map[string]interface{}, 2) - tags := map[string]string{ - "server": server, - "domain": domain, - "record_type": d.RecordType, - } + wg.Add(1) + go func(domain, server string) { + fields := make(map[string]interface{}, 2) + tags := map[string]string{ + "server": server, + "domain": domain, + "record_type": d.RecordType, + } - dnsQueryTime, err := d.getDnsQueryTime(domain, server) - if err == nil { - setResult(Success, fields, tags) - fields["query_time_ms"] = dnsQueryTime - } else if opErr, ok := err.(*net.OpError); ok && opErr.Timeout() { - setResult(Timeout, fields, tags) - } else if err != nil { - setResult(Error, fields, tags) - acc.AddError(err) - } + dnsQueryTime, err := d.getDnsQueryTime(domain, server) + if err == nil { + setResult(Success, fields, tags) + fields["query_time_ms"] = dnsQueryTime + } else if opErr, ok := err.(*net.OpError); ok && opErr.Timeout() { + setResult(Timeout, fields, tags) + } else if err != nil { + setResult(Error, fields, tags) + acc.AddError(err) + } - acc.AddFields("dns_query", fields, tags) + acc.AddFields("dns_query", fields, tags) + }(domain, server) } } + wg.Wait() return nil } From c369c1989f541ac0e7c76d5c7610de0cf5f940ee Mon Sep 17 00:00:00 2001 From: Pontus Rydin Date: Thu, 27 Sep 2018 20:29:17 -0400 Subject: [PATCH 0222/1815] Fix panic during network error in vsphere input (#4765) --- plugins/inputs/vsphere/endpoint.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/inputs/vsphere/endpoint.go b/plugins/inputs/vsphere/endpoint.go index cad4dec00..22b479a5c 100644 --- a/plugins/inputs/vsphere/endpoint.go +++ b/plugins/inputs/vsphere/endpoint.go @@ -661,7 +661,7 @@ func (e *Endpoint) collectResource(ctx context.Context, resourceType string, acc err := make(multiError, 0) wp.Drain(ctx, func(ctx context.Context, in interface{}) bool { if in != nil { - mux.Unlock() + mux.Lock() defer mux.Unlock() err = append(err, in.(error)) return false From 008ee617cb6959b61a2dea35d4ab2de35f74541d Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 27 Sep 2018 17:55:31 -0700 Subject: [PATCH 0223/1815] Update changelog. --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 43a97dfab..f8d3aa624 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,7 @@ ### Features - [#4686](https://github.com/influxdata/telegraf/pull/4686): Add replace function to strings processor. +- [#4754](https://github.com/influxdata/telegraf/pull/4754): Query servers in parallel in dns_query input. ## v1.8.1 [unreleased] @@ -12,6 +13,7 @@ - [#4723](https://github.com/influxdata/telegraf/issues/4723): Improve performance in basicstats aggregator. - [#4747](https://github.com/influxdata/telegraf/pull/4723): Add hostname to TLS config for SNI support. - [#4675](https://github.com/influxdata/telegraf/issues/4675): Don't add tags with empty values to opentsdb output. +- [#4765](https://github.com/influxdata/telegraf/pull/4765): Fix panic during network error in vsphere input. ## v1.8 [2018-09-21] From 6b7d64f1d6057f71e71a49648cec36049ae0532f Mon Sep 17 00:00:00 2001 From: Lee Jaeyong Date: Fri, 28 Sep 2018 10:41:24 +0900 Subject: [PATCH 0224/1815] Add ability to define a custom service name when installing as a Windows service (#4753) --- cmd/telegraf/telegraf.go | 3 ++- docs/WINDOWS_SERVICE.md | 10 ++++++++++ internal/usage_windows.go | 4 ++++ 3 files changed, 16 insertions(+), 1 deletion(-) diff --git a/cmd/telegraf/telegraf.go b/cmd/telegraf/telegraf.go index 5b7295d6d..93336ffb4 100644 --- a/cmd/telegraf/telegraf.go +++ b/cmd/telegraf/telegraf.go @@ -55,6 +55,7 @@ var fUsage = flag.String("usage", "", "print usage for a plugin, ie, 'telegraf --usage mysql'") var fService = flag.String("service", "", "operate on the service (windows only)") +var fServiceName = flag.String("service-name", "telegraf", "service name (windows only)") var fRunAsConsole = flag.Bool("console", false, "run as console application (windows only)") var ( @@ -327,7 +328,7 @@ func main() { if runtime.GOOS == "windows" && !(*fRunAsConsole) { svcConfig := &service.Config{ - Name: "telegraf", + Name: *fServiceName, DisplayName: "Telegraf Data Collector Service", Description: "Collects data using a series of plugins and publishes it to" + "another series of plugins.", diff --git a/docs/WINDOWS_SERVICE.md b/docs/WINDOWS_SERVICE.md index 886887d52..51ce6a7a6 100644 --- a/docs/WINDOWS_SERVICE.md +++ b/docs/WINDOWS_SERVICE.md @@ -46,6 +46,16 @@ Telegraf can manage its own service through the --service flag: | `telegraf.exe --service start` | Start the telegraf service | | `telegraf.exe --service stop` | Stop the telegraf service | +## Install multiple services + +You can install multiple telegraf instances with --service-name flag: + +``` + > C:\"Program Files"\Telegraf\telegraf.exe --service install --service-name telegraf-1 + > C:\"Program Files"\Telegraf\telegraf.exe --service install --service-name telegraf-2 + > C:\"Program Files"\Telegraf\telegraf.exe --service uninstall --service-name telegraf-1 +``` + Troubleshooting common error #1067 When installing as service in Windows, always double check to specify full path of the config file, otherwise windows service will fail to start diff --git a/internal/usage_windows.go b/internal/usage_windows.go index db5e492a9..0bdd73026 100644 --- a/internal/usage_windows.go +++ b/internal/usage_windows.go @@ -33,6 +33,7 @@ The commands & flags are: --console run as console application (windows only) --service operate on the service (windows only) + --service-name service name (windows only) Examples: @@ -59,4 +60,7 @@ Examples: # install telegraf service telegraf --service install --config "C:\Program Files\Telegraf\telegraf.conf" + + # install telegraf service with custom name + telegraf --service install --service-name=my-telegraf ` From cc64b14ab41d33844917c38c6374074f133029d1 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 27 Sep 2018 18:42:13 -0700 Subject: [PATCH 0225/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index f8d3aa624..5c517ab6a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,7 @@ - [#4686](https://github.com/influxdata/telegraf/pull/4686): Add replace function to strings processor. - [#4754](https://github.com/influxdata/telegraf/pull/4754): Query servers in parallel in dns_query input. +- [#4753](https://github.com/influxdata/telegraf/pull/4753): Add ability to define a custom service name when installing as a Windows service. ## v1.8.1 [unreleased] From 7553c8fd1362618a11f04c5578e8c9feb4e9c73d Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 28 Sep 2018 14:48:20 -0700 Subject: [PATCH 0226/1815] Remove metric recreation when filtering (#4767) --- agent/accumulator.go | 56 ++--- agent/accumulator_test.go | 26 +- docs/CONFIGURATION.md | 127 ++++++---- internal/config/config.go | 15 -- internal/models/filter.go | 129 +++++----- internal/models/filter_test.go | 176 +++++++------ internal/models/makemetric.go | 80 ++---- internal/models/running_aggregator.go | 62 ++--- internal/models/running_aggregator_test.go | 82 +++--- internal/models/running_input.go | 37 ++- internal/models/running_input_test.go | 98 ++++---- internal/models/running_output.go | 42 ++-- internal/models/running_output_test.go | 17 -- internal/models/running_processor.go | 18 +- internal/models/running_processor_test.go | 276 ++++++++++++++------- 15 files changed, 635 insertions(+), 606 deletions(-) diff --git a/agent/accumulator.go b/agent/accumulator.go index 51c213a81..05e99350b 100644 --- a/agent/accumulator.go +++ b/agent/accumulator.go @@ -5,6 +5,7 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/selfstat" ) @@ -14,13 +15,13 @@ var ( type MetricMaker interface { Name() string - MakeMetric( - measurement string, - fields map[string]interface{}, - tags map[string]string, - mType telegraf.ValueType, - t time.Time, - ) telegraf.Metric + MakeMetric(metric telegraf.Metric) telegraf.Metric +} + +type accumulator struct { + maker MetricMaker + metrics chan telegraf.Metric + precision time.Duration } func NewAccumulator( @@ -35,23 +36,13 @@ func NewAccumulator( return &acc } -type accumulator struct { - metrics chan telegraf.Metric - - maker MetricMaker - - precision time.Duration -} - func (ac *accumulator) AddFields( measurement string, fields map[string]interface{}, tags map[string]string, t ...time.Time, ) { - if m := ac.maker.MakeMetric(measurement, fields, tags, telegraf.Untyped, ac.getTime(t)); m != nil { - ac.metrics <- m - } + ac.addMetric(measurement, tags, fields, telegraf.Untyped, t...) } func (ac *accumulator) AddGauge( @@ -60,9 +51,7 @@ func (ac *accumulator) AddGauge( tags map[string]string, t ...time.Time, ) { - if m := ac.maker.MakeMetric(measurement, fields, tags, telegraf.Gauge, ac.getTime(t)); m != nil { - ac.metrics <- m - } + ac.addMetric(measurement, tags, fields, telegraf.Gauge, t...) } func (ac *accumulator) AddCounter( @@ -71,9 +60,7 @@ func (ac *accumulator) AddCounter( tags map[string]string, t ...time.Time, ) { - if m := ac.maker.MakeMetric(measurement, fields, tags, telegraf.Counter, ac.getTime(t)); m != nil { - ac.metrics <- m - } + ac.addMetric(measurement, tags, fields, telegraf.Counter, t...) } func (ac *accumulator) AddSummary( @@ -82,9 +69,7 @@ func (ac *accumulator) AddSummary( tags map[string]string, t ...time.Time, ) { - if m := ac.maker.MakeMetric(measurement, fields, tags, telegraf.Summary, ac.getTime(t)); m != nil { - ac.metrics <- m - } + ac.addMetric(measurement, tags, fields, telegraf.Summary, t...) } func (ac *accumulator) AddHistogram( @@ -93,7 +78,21 @@ func (ac *accumulator) AddHistogram( tags map[string]string, t ...time.Time, ) { - if m := ac.maker.MakeMetric(measurement, fields, tags, telegraf.Histogram, ac.getTime(t)); m != nil { + ac.addMetric(measurement, tags, fields, telegraf.Histogram, t...) +} + +func (ac *accumulator) addMetric( + measurement string, + tags map[string]string, + fields map[string]interface{}, + tp telegraf.ValueType, + t ...time.Time, +) { + m, err := metric.New(measurement, tags, fields, ac.getTime(t), tp) + if err != nil { + return + } + if m := ac.maker.MakeMetric(m); m != nil { ac.metrics <- m } } @@ -105,7 +104,6 @@ func (ac *accumulator) AddError(err error) { return } NErrors.Incr(1) - //TODO suppress/throttle consecutive duplicate errors? log.Printf("E! Error in plugin [%s]: %s", ac.maker.Name(), err) } diff --git a/agent/accumulator_test.go b/agent/accumulator_test.go index 22fa3e409..2bb08920f 100644 --- a/agent/accumulator_test.go +++ b/agent/accumulator_test.go @@ -9,7 +9,6 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/metric" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -134,26 +133,7 @@ type TestMetricMaker struct { func (tm *TestMetricMaker) Name() string { return "TestPlugin" } -func (tm *TestMetricMaker) MakeMetric( - measurement string, - fields map[string]interface{}, - tags map[string]string, - mType telegraf.ValueType, - t time.Time, -) telegraf.Metric { - switch mType { - case telegraf.Untyped: - if m, err := metric.New(measurement, tags, fields, t); err == nil { - return m - } - case telegraf.Counter: - if m, err := metric.New(measurement, tags, fields, t, telegraf.Counter); err == nil { - return m - } - case telegraf.Gauge: - if m, err := metric.New(measurement, tags, fields, t, telegraf.Gauge); err == nil { - return m - } - } - return nil + +func (tm *TestMetricMaker) MakeMetric(metric telegraf.Metric) telegraf.Metric { + return metric } diff --git a/docs/CONFIGURATION.md b/docs/CONFIGURATION.md index 39825376d..0698721e4 100644 --- a/docs/CONFIGURATION.md +++ b/docs/CONFIGURATION.md @@ -1,11 +1,14 @@ -# Telegraf Configuration +# Configuration -You can see the latest config file with all available plugins here: -[telegraf.conf](https://github.com/influxdata/telegraf/blob/master/etc/telegraf.conf) +Telegraf's configuration file is written using +[TOML](https://github.com/toml-lang/toml#toml). + +[View the telegraf.conf config file with all available +plugins](/etc/telegraf.conf). ## Generating a Configuration File -A default Telegraf config file can be auto-generated by telegraf: +A default config file can be generated by telegraf: ``` telegraf config > telegraf.conf @@ -18,7 +21,7 @@ To generate a file with specific inputs and outputs, you can use the telegraf --input-filter cpu:mem:net:swap --output-filter influxdb:kafka config ``` -## Environment Variables +### Environment Variables Environment variables can be used anywhere in the config file, simply prepend them with $. For strings the variable must be within quotes (ie, "$STR_VAR"), @@ -27,7 +30,7 @@ for numbers and booleans they should be plain (ie, $INT_VAR, $BOOL_VAR) When using the `.deb` or `.rpm` packages, you can define environment variables in the `/etc/default/telegraf` file. -## Configuration file locations +### Configuration file locations The location of the configuration file can be set via the `--config` command line flag. @@ -40,13 +43,13 @@ On most systems, the default locations are `/etc/telegraf/telegraf.conf` for the main configuration file and `/etc/telegraf/telegraf.d` for the directory of configuration files. -# Global Tags +### Global Tags Global tags can be specified in the `[global_tags]` section of the config file in key="value" format. All metrics being gathered on this host will be tagged with the tags specified here. -## Agent Configuration +### Agent Configuration Telegraf has a few options you can configure under the `[agent]` section of the config. @@ -85,7 +88,7 @@ ie, a jitter of 5s and flush_interval 10s means flushes will happen every 10-15s * **hostname**: Override default hostname, if empty use os.Hostname(). * **omit_hostname**: If true, do no set the "host" tag in the telegraf agent. -## Input Configuration +### Input Configuration The following config parameters are available for all inputs: @@ -98,15 +101,15 @@ you can configure that here. * **name_suffix**: Specifies a suffix to attach to the measurement name. * **tags**: A map of tags to apply to a specific input's measurements. -The [measurement filtering](#measurement-filtering) parameters can be used to -limit what metrics are emitted from the input plugin. +The [metric filtering](#metric-filtering) parameters can be used to limit what metrics are +emitted from the input plugin. -## Output Configuration +### Output Configuration -The [measurement filtering](#measurement-filtering) parameters can be used to -limit what metrics are emitted from the output plugin. +The [metric filtering](#metric-filtering) parameters can be used to limit what metrics are +emitted from the output plugin. -## Aggregator Configuration +### Aggregator Configuration The following config parameters are available for all aggregators: @@ -125,63 +128,77 @@ aggregator and will not get sent to the output plugins. * **name_suffix**: Specifies a suffix to attach to the measurement name. * **tags**: A map of tags to apply to a specific input's measurements. -The [measurement filtering](#measurement-filtering) parameters can be used to -limit what metrics are handled by the aggregator. Excluded metrics are passed -downstream to the next aggregator. +The [metric filtering](#metric-filtering) parameters can be used to limit what metrics are +handled by the aggregator. Excluded metrics are passed downstream to the next +aggregator. -## Processor Configuration +### Processor Configuration The following config parameters are available for all processors: * **order**: This is the order in which the processor(s) get executed. If this is not specified then processor execution order will be random. -The [measurement filtering](#measurement-filtering) parameters can be used -to limit what metrics are handled by the processor. Excluded metrics are -passed downstream to the next processor. +The [metric filtering](#metric-filtering) parameters can be used to limit what metrics are +handled by the processor. Excluded metrics are passed downstream to the next +processor. -#### Measurement Filtering + +### Metric Filtering -Filters can be configured per input, output, processor, or aggregator, -see below for examples. +Metric filtering can be configured per plugin on any input, output, processor, +and aggregator plugin. Filters fall under two categories: Selectors and +Modifiers. -* **namepass**: -An array of glob pattern strings. Only points whose measurement name matches +#### Selectors + +Selector filters include or exclude entire metrics. When a metric is excluded +from a Input or an Output plugin, the metric is dropped. If a metric is +excluded from a Processor or Aggregator plugin, it is skips the plugin and is +sent onwards to the next stage of processing. + +- **namepass**: +An array of glob pattern strings. Only metrics whose measurement name matches a pattern in this list are emitted. -* **namedrop**: -The inverse of `namepass`. If a match is found the point is discarded. This -is tested on points after they have passed the `namepass` test. -* **fieldpass**: -An array of glob pattern strings. Only fields whose field key matches a -pattern in this list are emitted. -* **fielddrop**: -The inverse of `fieldpass`. Fields with a field key matching one of the -patterns will be discarded from the point. This is tested on points after -they have passed the `fieldpass` test. -* **tagpass**: -A table mapping tag keys to arrays of glob pattern strings. Only points + +- **namedrop**: +The inverse of `namepass`. If a match is found the metric is discarded. This +is tested on metrics after they have passed the `namepass` test. + +- **tagpass**: +A table mapping tag keys to arrays of glob pattern strings. Only metrics that contain a tag key in the table and a tag value matching one of its patterns is emitted. -* **tagdrop**: -The inverse of `tagpass`. If a match is found the point is discarded. This -is tested on points after they have passed the `tagpass` test. -* **taginclude**: + +- **tagdrop**: +The inverse of `tagpass`. If a match is found the metric is discarded. This +is tested on metrics after they have passed the `tagpass` test. + +#### Modifiers + +Modifier filters remove tags and fields from a metric. If all fields are +removed the metric is removed. + +- **fieldpass**: +An array of glob pattern strings. Only fields whose field key matches a +pattern in this list are emitted. + +- **fielddrop**: +The inverse of `fieldpass`. Fields with a field key matching one of the +patterns will be discarded from the metric. This is tested on metrics after +they have passed the `fieldpass` test. + +- **taginclude**: An array of glob pattern strings. Only tags with a tag key matching one of the patterns are emitted. In contrast to `tagpass`, which will pass an entire -point based on its tag, `taginclude` removes all non matching tags from the -point. This filter can be used on both inputs & outputs, but it is -_recommended_ to be used on inputs, as it is more efficient to filter out tags -at the ingestion point. -* **tagexclude**: +metric based on its tag, `taginclude` removes all non matching tags from the +metric. + +- **tagexclude**: The inverse of `taginclude`. Tags with a tag key matching one of the patterns -will be discarded from the point. +will be discarded from the metric. -**NOTE** Due to the way TOML is parsed, `tagpass` and `tagdrop` parameters -must be defined at the _end_ of the plugin definition, otherwise subsequent -plugin config options will be interpreted as part of the tagpass/tagdrop -tables. - -#### Input Configuration Examples +### Input Configuration Examples This is a full working config that will output CPU data to an InfluxDB instance at 192.168.59.103:8086, tagging measurements with dc="denver-1". It will output diff --git a/internal/config/config.go b/internal/config/config.go index d62536cf9..3d0510978 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -884,14 +884,6 @@ func (c *Config) addInput(name string, table *ast.Table) error { // builds the filter and returns a // models.AggregatorConfig to be inserted into models.RunningAggregator func buildAggregator(name string, tbl *ast.Table) (*models.AggregatorConfig, error) { - unsupportedFields := []string{"tagexclude", "taginclude"} - for _, field := range unsupportedFields { - if _, ok := tbl.Fields[field]; ok { - return nil, fmt.Errorf("%s is not supported for aggregator plugins (%s).", - field, name) - } - } - conf := &models.AggregatorConfig{ Name: name, Delay: time.Millisecond * 100, @@ -989,13 +981,6 @@ func buildAggregator(name string, tbl *ast.Table) (*models.AggregatorConfig, err // models.ProcessorConfig to be inserted into models.RunningProcessor func buildProcessor(name string, tbl *ast.Table) (*models.ProcessorConfig, error) { conf := &models.ProcessorConfig{Name: name} - unsupportedFields := []string{"tagexclude", "taginclude", "fielddrop", "fieldpass"} - for _, field := range unsupportedFields { - if _, ok := tbl.Fields[field]; ok { - return nil, fmt.Errorf("%s is not supported for processor plugins (%s).", - field, name) - } - } if node, ok := tbl.Fields["order"]; ok { if kv, ok := node.(*ast.KeyValue); ok { diff --git a/internal/models/filter.go b/internal/models/filter.go index 2848ccf09..664a6ff06 100644 --- a/internal/models/filter.go +++ b/internal/models/filter.go @@ -3,6 +3,7 @@ package models import ( "fmt" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/filter" ) @@ -93,45 +94,35 @@ func (f *Filter) Compile() error { return nil } -// Apply applies the filter to the given measurement name, fields map, and -// tags map. It will return false if the metric should be "filtered out", and -// true if the metric should "pass". -// It will modify tags & fields in-place if they need to be deleted. -func (f *Filter) Apply( - measurement string, - fields map[string]interface{}, - tags map[string]string, -) bool { +// Select returns true if the metric matches according to the +// namepass/namedrop and tagpass/tagdrop filters. The metric is not modified. +func (f *Filter) Select(metric telegraf.Metric) bool { if !f.isActive { return true } - // check if the measurement name should pass - if !f.shouldNamePass(measurement) { + if !f.shouldNamePass(metric.Name()) { return false } - // check if the tags should pass - if !f.shouldTagsPass(tags) { + if !f.shouldTagsPass(metric.TagList()) { return false } - // filter fields - for fieldkey, _ := range fields { - if !f.shouldFieldPass(fieldkey) { - delete(fields, fieldkey) - } - } - if len(fields) == 0 { - return false - } - - // filter tags - f.filterTags(tags) - return true } +// Modify removes any tags and fields from the metric according to the +// fieldpass/fielddrop and taginclude/tagexclude filters. +func (f *Filter) Modify(metric telegraf.Metric) { + if !f.isActive { + return + } + + f.filterFields(metric) + f.filterTags(metric) +} + // IsActive checking if filter is active func (f *Filter) IsActive() bool { return f.isActive @@ -140,7 +131,6 @@ func (f *Filter) IsActive() bool { // shouldNamePass returns true if the metric should pass, false if should drop // based on the drop/pass filter parameters func (f *Filter) shouldNamePass(key string) bool { - pass := func(f *Filter) bool { if f.namePass.Match(key) { return true @@ -169,44 +159,29 @@ func (f *Filter) shouldNamePass(key string) bool { // shouldFieldPass returns true if the metric should pass, false if should drop // based on the drop/pass filter parameters func (f *Filter) shouldFieldPass(key string) bool { - - pass := func(f *Filter) bool { - if f.fieldPass.Match(key) { - return true - } - return false - } - - drop := func(f *Filter) bool { - if f.fieldDrop.Match(key) { - return false - } - return true - } - if f.fieldPass != nil && f.fieldDrop != nil { - return pass(f) && drop(f) + return f.fieldPass.Match(key) && !f.fieldDrop.Match(key) } else if f.fieldPass != nil { - return pass(f) + return f.fieldPass.Match(key) } else if f.fieldDrop != nil { - return drop(f) + return !f.fieldDrop.Match(key) } - return true } // shouldTagsPass returns true if the metric should pass, false if should drop // based on the tagdrop/tagpass filter parameters -func (f *Filter) shouldTagsPass(tags map[string]string) bool { - +func (f *Filter) shouldTagsPass(tags []*telegraf.Tag) bool { pass := func(f *Filter) bool { for _, pat := range f.TagPass { if pat.filter == nil { continue } - if tagval, ok := tags[pat.Name]; ok { - if pat.filter.Match(tagval) { - return true + for _, tag := range tags { + if tag.Key == pat.Name { + if pat.filter.Match(tag.Value) { + return true + } } } } @@ -218,9 +193,11 @@ func (f *Filter) shouldTagsPass(tags map[string]string) bool { if pat.filter == nil { continue } - if tagval, ok := tags[pat.Name]; ok { - if pat.filter.Match(tagval) { - return false + for _, tag := range tags { + if tag.Key == pat.Name { + if pat.filter.Match(tag.Value) { + return false + } } } } @@ -242,22 +219,42 @@ func (f *Filter) shouldTagsPass(tags map[string]string) bool { return true } -// Apply TagInclude and TagExclude filters. -// modifies the tags map in-place. -func (f *Filter) filterTags(tags map[string]string) { - if f.tagInclude != nil { - for k, _ := range tags { - if !f.tagInclude.Match(k) { - delete(tags, k) - } +// filterFields removes fields according to fieldpass/fielddrop. +func (f *Filter) filterFields(metric telegraf.Metric) { + filterKeys := []string{} + for _, field := range metric.FieldList() { + if !f.shouldFieldPass(field.Key) { + filterKeys = append(filterKeys, field.Key) } } - if f.tagExclude != nil { - for k, _ := range tags { - if f.tagExclude.Match(k) { - delete(tags, k) + for _, key := range filterKeys { + metric.RemoveField(key) + } +} + +// filterTags removes tags according to taginclude/tagexclude. +func (f *Filter) filterTags(metric telegraf.Metric) { + filterKeys := []string{} + if f.tagInclude != nil { + for _, tag := range metric.TagList() { + if !f.tagInclude.Match(tag.Key) { + filterKeys = append(filterKeys, tag.Key) } } } + for _, key := range filterKeys { + metric.RemoveTag(key) + } + + if f.tagExclude != nil { + for _, tag := range metric.TagList() { + if f.tagExclude.Match(tag.Key) { + filterKeys = append(filterKeys, tag.Key) + } + } + } + for _, key := range filterKeys { + metric.RemoveTag(key) + } } diff --git a/internal/models/filter_test.go b/internal/models/filter_test.go index 46f16e835..16a147cad 100644 --- a/internal/models/filter_test.go +++ b/internal/models/filter_test.go @@ -2,17 +2,24 @@ package models import ( "testing" + "time" - "github.com/stretchr/testify/assert" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" "github.com/stretchr/testify/require" ) func TestFilter_ApplyEmpty(t *testing.T) { f := Filter{} require.NoError(t, f.Compile()) - assert.False(t, f.IsActive()) + require.False(t, f.IsActive()) - assert.True(t, f.Apply("m", map[string]interface{}{"value": int64(1)}, map[string]string{})) + m, err := metric.New("m", + map[string]string{}, + map[string]interface{}{"value": int64(1)}, + time.Now()) + require.NoError(t, err) + require.True(t, f.Select(m)) } func TestFilter_ApplyTagsDontPass(t *testing.T) { @@ -27,11 +34,14 @@ func TestFilter_ApplyTagsDontPass(t *testing.T) { } require.NoError(t, f.Compile()) require.NoError(t, f.Compile()) - assert.True(t, f.IsActive()) + require.True(t, f.IsActive()) - assert.False(t, f.Apply("m", + m, err := metric.New("m", + map[string]string{"cpu": "cpu-total"}, map[string]interface{}{"value": int64(1)}, - map[string]string{"cpu": "cpu-total"})) + time.Now()) + require.NoError(t, err) + require.False(t, f.Select(m)) } func TestFilter_ApplyDeleteFields(t *testing.T) { @@ -40,11 +50,19 @@ func TestFilter_ApplyDeleteFields(t *testing.T) { } require.NoError(t, f.Compile()) require.NoError(t, f.Compile()) - assert.True(t, f.IsActive()) + require.True(t, f.IsActive()) - fields := map[string]interface{}{"value": int64(1), "value2": int64(2)} - assert.True(t, f.Apply("m", fields, nil)) - assert.Equal(t, map[string]interface{}{"value2": int64(2)}, fields) + m, err := metric.New("m", + map[string]string{}, + map[string]interface{}{ + "value": int64(1), + "value2": int64(2), + }, + time.Now()) + require.NoError(t, err) + require.True(t, f.Select(m)) + f.Modify(m) + require.Equal(t, map[string]interface{}{"value2": int64(2)}, m.Fields()) } func TestFilter_ApplyDeleteAllFields(t *testing.T) { @@ -53,10 +71,19 @@ func TestFilter_ApplyDeleteAllFields(t *testing.T) { } require.NoError(t, f.Compile()) require.NoError(t, f.Compile()) - assert.True(t, f.IsActive()) + require.True(t, f.IsActive()) - fields := map[string]interface{}{"value": int64(1), "value2": int64(2)} - assert.False(t, f.Apply("m", fields, nil)) + m, err := metric.New("m", + map[string]string{}, + map[string]interface{}{ + "value": int64(1), + "value2": int64(2), + }, + time.Now()) + require.NoError(t, err) + require.True(t, f.Select(m)) + f.Modify(m) + require.Len(t, m.FieldList(), 0) } func TestFilter_Empty(t *testing.T) { @@ -230,20 +257,20 @@ func TestFilter_TagPass(t *testing.T) { } require.NoError(t, f.Compile()) - passes := []map[string]string{ - {"cpu": "cpu-total"}, - {"cpu": "cpu-0"}, - {"cpu": "cpu-1"}, - {"cpu": "cpu-2"}, - {"mem": "mem_free"}, + passes := [][]*telegraf.Tag{ + []*telegraf.Tag{&telegraf.Tag{Key: "cpu", Value: "cpu-total"}}, + []*telegraf.Tag{&telegraf.Tag{Key: "cpu", Value: "cpu-0"}}, + []*telegraf.Tag{&telegraf.Tag{Key: "cpu", Value: "cpu-1"}}, + []*telegraf.Tag{&telegraf.Tag{Key: "cpu", Value: "cpu-2"}}, + []*telegraf.Tag{&telegraf.Tag{Key: "mem", Value: "mem_free"}}, } - drops := []map[string]string{ - {"cpu": "cputotal"}, - {"cpu": "cpu0"}, - {"cpu": "cpu1"}, - {"cpu": "cpu2"}, - {"mem": "mem_used"}, + drops := [][]*telegraf.Tag{ + []*telegraf.Tag{&telegraf.Tag{Key: "cpu", Value: "cputotal"}}, + []*telegraf.Tag{&telegraf.Tag{Key: "cpu", Value: "cpu0"}}, + []*telegraf.Tag{&telegraf.Tag{Key: "cpu", Value: "cpu1"}}, + []*telegraf.Tag{&telegraf.Tag{Key: "cpu", Value: "cpu2"}}, + []*telegraf.Tag{&telegraf.Tag{Key: "mem", Value: "mem_used"}}, } for _, tags := range passes { @@ -274,20 +301,20 @@ func TestFilter_TagDrop(t *testing.T) { } require.NoError(t, f.Compile()) - drops := []map[string]string{ - {"cpu": "cpu-total"}, - {"cpu": "cpu-0"}, - {"cpu": "cpu-1"}, - {"cpu": "cpu-2"}, - {"mem": "mem_free"}, + drops := [][]*telegraf.Tag{ + []*telegraf.Tag{&telegraf.Tag{Key: "cpu", Value: "cpu-total"}}, + []*telegraf.Tag{&telegraf.Tag{Key: "cpu", Value: "cpu-0"}}, + []*telegraf.Tag{&telegraf.Tag{Key: "cpu", Value: "cpu-1"}}, + []*telegraf.Tag{&telegraf.Tag{Key: "cpu", Value: "cpu-2"}}, + []*telegraf.Tag{&telegraf.Tag{Key: "mem", Value: "mem_free"}}, } - passes := []map[string]string{ - {"cpu": "cputotal"}, - {"cpu": "cpu0"}, - {"cpu": "cpu1"}, - {"cpu": "cpu2"}, - {"mem": "mem_used"}, + passes := [][]*telegraf.Tag{ + []*telegraf.Tag{&telegraf.Tag{Key: "cpu", Value: "cputotal"}}, + []*telegraf.Tag{&telegraf.Tag{Key: "cpu", Value: "cpu0"}}, + []*telegraf.Tag{&telegraf.Tag{Key: "cpu", Value: "cpu1"}}, + []*telegraf.Tag{&telegraf.Tag{Key: "cpu", Value: "cpu2"}}, + []*telegraf.Tag{&telegraf.Tag{Key: "mem", Value: "mem_used"}}, } for _, tags := range passes { @@ -304,58 +331,70 @@ func TestFilter_TagDrop(t *testing.T) { } func TestFilter_FilterTagsNoMatches(t *testing.T) { - pretags := map[string]string{ - "host": "localhost", - "mytag": "foobar", - } + m, err := metric.New("m", + map[string]string{ + "host": "localhost", + "mytag": "foobar", + }, + map[string]interface{}{"value": int64(1)}, + time.Now()) + require.NoError(t, err) f := Filter{ TagExclude: []string{"nomatch"}, } require.NoError(t, f.Compile()) - f.filterTags(pretags) - assert.Equal(t, map[string]string{ + f.filterTags(m) + require.Equal(t, map[string]string{ "host": "localhost", "mytag": "foobar", - }, pretags) + }, m.Tags()) f = Filter{ TagInclude: []string{"nomatch"}, } require.NoError(t, f.Compile()) - f.filterTags(pretags) - assert.Equal(t, map[string]string{}, pretags) + f.filterTags(m) + require.Equal(t, map[string]string{}, m.Tags()) } func TestFilter_FilterTagsMatches(t *testing.T) { - pretags := map[string]string{ - "host": "localhost", - "mytag": "foobar", - } + m, err := metric.New("m", + map[string]string{ + "host": "localhost", + "mytag": "foobar", + }, + map[string]interface{}{"value": int64(1)}, + time.Now()) + require.NoError(t, err) f := Filter{ TagExclude: []string{"ho*"}, } require.NoError(t, f.Compile()) - f.filterTags(pretags) - assert.Equal(t, map[string]string{ + f.filterTags(m) + require.Equal(t, map[string]string{ "mytag": "foobar", - }, pretags) + }, m.Tags()) - pretags = map[string]string{ - "host": "localhost", - "mytag": "foobar", - } + m, err = metric.New("m", + map[string]string{ + "host": "localhost", + "mytag": "foobar", + }, + map[string]interface{}{"value": int64(1)}, + time.Now()) + require.NoError(t, err) f = Filter{ TagInclude: []string{"my*"}, } require.NoError(t, f.Compile()) - f.filterTags(pretags) - assert.Equal(t, map[string]string{ + f.filterTags(m) + require.Equal(t, map[string]string{ "mytag": "foobar", - }, pretags) + }, m.Tags()) } // TestFilter_FilterNamePassAndDrop used for check case when @@ -374,7 +413,7 @@ func TestFilter_FilterNamePassAndDrop(t *testing.T) { require.NoError(t, f.Compile()) for i, name := range inputData { - assert.Equal(t, f.shouldNamePass(name), expectedResult[i]) + require.Equal(t, f.shouldNamePass(name), expectedResult[i]) } } @@ -394,7 +433,7 @@ func TestFilter_FilterFieldPassAndDrop(t *testing.T) { require.NoError(t, f.Compile()) for i, field := range inputData { - assert.Equal(t, f.shouldFieldPass(field), expectedResult[i]) + require.Equal(t, f.shouldFieldPass(field), expectedResult[i]) } } @@ -402,12 +441,11 @@ func TestFilter_FilterFieldPassAndDrop(t *testing.T) { // both parameters were defined // see: https://github.com/influxdata/telegraf/issues/2860 func TestFilter_FilterTagsPassAndDrop(t *testing.T) { - - inputData := []map[string]string{ - {"tag1": "1", "tag2": "3"}, - {"tag1": "1", "tag2": "2"}, - {"tag1": "2", "tag2": "1"}, - {"tag1": "4", "tag2": "1"}, + inputData := [][]*telegraf.Tag{ + []*telegraf.Tag{&telegraf.Tag{Key: "tag1", Value: "1"}, &telegraf.Tag{Key: "tag2", Value: "3"}}, + []*telegraf.Tag{&telegraf.Tag{Key: "tag1", Value: "1"}, &telegraf.Tag{Key: "tag2", Value: "2"}}, + []*telegraf.Tag{&telegraf.Tag{Key: "tag1", Value: "2"}, &telegraf.Tag{Key: "tag2", Value: "1"}}, + []*telegraf.Tag{&telegraf.Tag{Key: "tag1", Value: "4"}, &telegraf.Tag{Key: "tag2", Value: "1"}}, } expectedResult := []bool{false, true, false, false} @@ -438,7 +476,7 @@ func TestFilter_FilterTagsPassAndDrop(t *testing.T) { require.NoError(t, f.Compile()) for i, tag := range inputData { - assert.Equal(t, f.shouldTagsPass(tag), expectedResult[i]) + require.Equal(t, f.shouldTagsPass(tag), expectedResult[i]) } } diff --git a/internal/models/makemetric.go b/internal/models/makemetric.go index b74e236cd..29ef5f452 100644 --- a/internal/models/makemetric.go +++ b/internal/models/makemetric.go @@ -1,86 +1,42 @@ package models import ( - "log" - "time" - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/metric" ) -// makemetric is used by both RunningAggregator & RunningInput -// to make metrics. -// nameOverride: override the name of the measurement being made. -// namePrefix: add this prefix to each measurement name. -// nameSuffix: add this suffix to each measurement name. -// pluginTags: these are tags that are specific to this plugin. -// daemonTags: these are daemon-wide global tags, and get applied after pluginTags. -// filter: this is a filter to apply to each metric being made. -// applyFilter: if false, the above filter is not applied to each metric. -// This is used by Aggregators, because aggregators use filters -// on incoming metrics instead of on created metrics. -// TODO refactor this to not have such a huge func signature. +// Makemetric applies new metric plugin and agent measurement and tag +// settings. func makemetric( - measurement string, - fields map[string]interface{}, - tags map[string]string, + metric telegraf.Metric, nameOverride string, namePrefix string, nameSuffix string, - pluginTags map[string]string, - daemonTags map[string]string, - filter Filter, - applyFilter bool, - mType telegraf.ValueType, - t time.Time, + tags map[string]string, + globalTags map[string]string, ) telegraf.Metric { - if len(fields) == 0 || len(measurement) == 0 { - return nil - } - if tags == nil { - tags = make(map[string]string) + if len(nameOverride) != 0 { + metric.SetName(nameOverride) } - // Override measurement name if set - if len(nameOverride) != 0 { - measurement = nameOverride - } - // Apply measurement prefix and suffix if set if len(namePrefix) != 0 { - measurement = namePrefix + measurement + metric.AddPrefix(namePrefix) } if len(nameSuffix) != 0 { - measurement = measurement + nameSuffix + metric.AddSuffix(nameSuffix) } - // Apply plugin-wide tags if set - for k, v := range pluginTags { - if _, ok := tags[k]; !ok { - tags[k] = v + // Apply plugin-wide tags + for k, v := range tags { + if _, ok := metric.GetTag(k); !ok { + metric.AddTag(k, v) } } - // Apply daemon-wide tags if set - for k, v := range daemonTags { - if _, ok := tags[k]; !ok { - tags[k] = v + // Apply global tags + for k, v := range globalTags { + if _, ok := metric.GetTag(k); !ok { + metric.AddTag(k, v) } } - // Apply the metric filter(s) - // for aggregators, the filter does not get applied when the metric is made. - // instead, the filter is applied to metric incoming into the plugin. - // ie, it gets applied in the RunningAggregator.Apply function. - if applyFilter { - if ok := filter.Apply(measurement, fields, tags); !ok { - return nil - } - } - - m, err := metric.New(measurement, tags, fields, t, mType) - if err != nil { - log.Printf("Error adding point [%s]: %s\n", measurement, err.Error()) - return nil - } - - return m + return metric } diff --git a/internal/models/running_aggregator.go b/internal/models/running_aggregator.go index 8cb04e4f6..960fd3131 100644 --- a/internal/models/running_aggregator.go +++ b/internal/models/running_aggregator.go @@ -5,7 +5,6 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/metric" ) type RunningAggregator struct { @@ -29,47 +28,32 @@ func NewRunningAggregator( } } -// AggregatorConfig containing configuration parameters for the running -// aggregator plugin. +// AggregatorConfig is the common config for all aggregators. type AggregatorConfig struct { - Name string + Name string + DropOriginal bool + Period time.Duration + Delay time.Duration - DropOriginal bool NameOverride string MeasurementPrefix string MeasurementSuffix string Tags map[string]string Filter Filter - - Period time.Duration - Delay time.Duration } func (r *RunningAggregator) Name() string { return "aggregators." + r.Config.Name } -func (r *RunningAggregator) MakeMetric( - measurement string, - fields map[string]interface{}, - tags map[string]string, - mType telegraf.ValueType, - t time.Time, -) telegraf.Metric { +func (r *RunningAggregator) MakeMetric(metric telegraf.Metric) telegraf.Metric { m := makemetric( - measurement, - fields, - tags, + metric, r.Config.NameOverride, r.Config.MeasurementPrefix, r.Config.MeasurementSuffix, r.Config.Tags, - nil, - r.Config.Filter, - false, - mType, - t, - ) + nil) if m != nil { m.SetAggregate(true) @@ -78,27 +62,23 @@ func (r *RunningAggregator) MakeMetric( return m } -// Add applies the given metric to the aggregator. -// Before applying to the plugin, it will run any defined filters on the metric. -// Apply returns true if the original metric should be dropped. -func (r *RunningAggregator) Add(in telegraf.Metric) bool { - if r.Config.Filter.IsActive() { - // check if the aggregator should apply this metric - name := in.Name() - fields := in.Fields() - tags := in.Tags() - t := in.Time() - if ok := r.Config.Filter.Apply(name, fields, tags); !ok { - // aggregator should not apply this metric - return false - } - - in, _ = metric.New(name, tags, fields, t) +// Add a metric to the aggregator and return true if the original metric +// should be dropped. +func (r *RunningAggregator) Add(metric telegraf.Metric) bool { + if ok := r.Config.Filter.Select(metric); !ok { + return false } - r.metrics <- in + r.Config.Filter.Modify(metric) + if len(metric.FieldList()) == 0 { + return r.Config.DropOriginal + } + + r.metrics <- metric + return r.Config.DropOriginal } + func (r *RunningAggregator) add(in telegraf.Metric) { r.a.Add(in) } diff --git a/internal/models/running_aggregator_test.go b/internal/models/running_aggregator_test.go index cf92fe675..34d513646 100644 --- a/internal/models/running_aggregator_test.go +++ b/internal/models/running_aggregator_test.go @@ -7,9 +7,11 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestAdd(t *testing.T) { @@ -25,13 +27,15 @@ func TestAdd(t *testing.T) { acc := testutil.Accumulator{} go ra.Run(&acc, make(chan struct{})) - m := ra.MakeMetric( - "RITest", - map[string]interface{}{"value": int(101)}, + m, err := metric.New("RITest", map[string]string{}, - telegraf.Untyped, + map[string]interface{}{ + "value": int64(101), + }, time.Now().Add(time.Millisecond*150), - ) + telegraf.Untyped) + require.NoError(t, err) + assert.False(t, ra.Add(m)) for { @@ -56,34 +60,37 @@ func TestAddMetricsOutsideCurrentPeriod(t *testing.T) { acc := testutil.Accumulator{} go ra.Run(&acc, make(chan struct{})) - // metric before current period - m := ra.MakeMetric( - "RITest", - map[string]interface{}{"value": int(101)}, + m, err := metric.New("RITest", map[string]string{}, - telegraf.Untyped, + map[string]interface{}{ + "value": int64(101), + }, time.Now().Add(-time.Hour), - ) + telegraf.Untyped) + require.NoError(t, err) + assert.False(t, ra.Add(m)) // metric after current period - m = ra.MakeMetric( - "RITest", - map[string]interface{}{"value": int(101)}, + m, err = metric.New("RITest", map[string]string{}, - telegraf.Untyped, + map[string]interface{}{ + "value": int64(101), + }, time.Now().Add(time.Hour), - ) + telegraf.Untyped) + require.NoError(t, err) assert.False(t, ra.Add(m)) // "now" metric - m = ra.MakeMetric( - "RITest", - map[string]interface{}{"value": int(101)}, + m, err = metric.New("RITest", map[string]string{}, - telegraf.Untyped, + map[string]interface{}{ + "value": int64(101), + }, time.Now().Add(time.Millisecond*50), - ) + telegraf.Untyped) + require.NoError(t, err) assert.False(t, ra.Add(m)) for { @@ -115,13 +122,14 @@ func TestAddAndPushOnePeriod(t *testing.T) { ra.Run(&acc, shutdown) }() - m := ra.MakeMetric( - "RITest", - map[string]interface{}{"value": int(101)}, + m, err := metric.New("RITest", map[string]string{}, - telegraf.Untyped, + map[string]interface{}{ + "value": int64(101), + }, time.Now().Add(time.Millisecond*100), - ) + telegraf.Untyped) + require.NoError(t, err) assert.False(t, ra.Add(m)) for { @@ -146,23 +154,25 @@ func TestAddDropOriginal(t *testing.T) { }) assert.NoError(t, ra.Config.Filter.Compile()) - m := ra.MakeMetric( - "RITest", - map[string]interface{}{"value": int(101)}, + m, err := metric.New("RITest", map[string]string{}, - telegraf.Untyped, + map[string]interface{}{ + "value": int64(101), + }, time.Now(), - ) + telegraf.Untyped) + require.NoError(t, err) assert.True(t, ra.Add(m)) // this metric name doesn't match the filter, so Add will return false - m2 := ra.MakeMetric( - "foobar", - map[string]interface{}{"value": int(101)}, + m2, err := metric.New("foobar", map[string]string{}, - telegraf.Untyped, + map[string]interface{}{ + "value": int64(101), + }, time.Now(), - ) + telegraf.Untyped) + require.NoError(t, err) assert.False(t, ra.Add(m2)) } diff --git a/internal/models/running_input.go b/internal/models/running_input.go index ffe0b5f59..fce2437ca 100644 --- a/internal/models/running_input.go +++ b/internal/models/running_input.go @@ -36,44 +36,39 @@ func NewRunningInput( } } -// InputConfig containing a name, interval, and filter +// InputConfig is the common config for all inputs. type InputConfig struct { - Name string + Name string + Interval time.Duration + NameOverride string MeasurementPrefix string MeasurementSuffix string Tags map[string]string Filter Filter - Interval time.Duration } func (r *RunningInput) Name() string { return "inputs." + r.Config.Name } -// MakeMetric either returns a metric, or returns nil if the metric doesn't -// need to be created (because of filtering, an error, etc.) -func (r *RunningInput) MakeMetric( - measurement string, - fields map[string]interface{}, - tags map[string]string, - mType telegraf.ValueType, - t time.Time, -) telegraf.Metric { +func (r *RunningInput) MakeMetric(metric telegraf.Metric) telegraf.Metric { + if ok := r.Config.Filter.Select(metric); !ok { + return nil + } + + r.Config.Filter.Modify(metric) + if len(metric.FieldList()) == 0 { + return nil + } + m := makemetric( - measurement, - fields, - tags, + metric, r.Config.NameOverride, r.Config.MeasurementPrefix, r.Config.MeasurementSuffix, r.Config.Tags, - r.defaultTags, - r.Config.Filter, - true, - mType, - t, - ) + r.defaultTags) if r.trace && m != nil { s := influx.NewSerializer() diff --git a/internal/models/running_input_test.go b/internal/models/running_input_test.go index 4d016851a..b83f75ea9 100644 --- a/internal/models/running_input_test.go +++ b/internal/models/running_input_test.go @@ -17,13 +17,13 @@ func TestMakeMetricNoFields(t *testing.T) { Name: "TestRunningInput", }) - m := ri.MakeMetric( - "RITest", - map[string]interface{}{}, + m, err := metric.New("RITest", map[string]string{}, - telegraf.Untyped, + map[string]interface{}{}, now, - ) + telegraf.Untyped) + m = ri.MakeMetric(m) + require.NoError(t, err) assert.Nil(t, m) } @@ -34,16 +34,16 @@ func TestMakeMetricNilFields(t *testing.T) { Name: "TestRunningInput", }) - m := ri.MakeMetric( - "RITest", + m, err := metric.New("RITest", + map[string]string{}, map[string]interface{}{ - "value": int(101), + "value": int64(101), "nil": nil, }, - map[string]string{}, - telegraf.Untyped, now, - ) + telegraf.Untyped) + require.NoError(t, err) + m = ri.MakeMetric(m) expected, err := metric.New("RITest", map[string]string{}, @@ -69,13 +69,15 @@ func TestMakeMetricWithPluginTags(t *testing.T) { ri.SetTrace(true) assert.Equal(t, true, ri.Trace()) - m := ri.MakeMetric( - "RITest", - map[string]interface{}{"value": int(101)}, - nil, - telegraf.Untyped, + m, err := metric.New("RITest", + map[string]string{}, + map[string]interface{}{ + "value": int64(101), + }, now, - ) + telegraf.Untyped) + require.NoError(t, err) + m = ri.MakeMetric(m) expected, err := metric.New("RITest", map[string]string{ @@ -104,13 +106,15 @@ func TestMakeMetricFilteredOut(t *testing.T) { assert.Equal(t, true, ri.Trace()) assert.NoError(t, ri.Config.Filter.Compile()) - m := ri.MakeMetric( - "RITest", - map[string]interface{}{"value": int(101)}, - nil, - telegraf.Untyped, + m, err := metric.New("RITest", + map[string]string{}, + map[string]interface{}{ + "value": int64(101), + }, now, - ) + telegraf.Untyped) + m = ri.MakeMetric(m) + require.NoError(t, err) assert.Nil(t, m) } @@ -126,13 +130,15 @@ func TestMakeMetricWithDaemonTags(t *testing.T) { ri.SetTrace(true) assert.Equal(t, true, ri.Trace()) - m := ri.MakeMetric( - "RITest", - map[string]interface{}{"value": int(101)}, + m, err := metric.New("RITest", map[string]string{}, - telegraf.Untyped, + map[string]interface{}{ + "value": int64(101), + }, now, - ) + telegraf.Untyped) + require.NoError(t, err) + m = ri.MakeMetric(m) expected, err := metric.New("RITest", map[string]string{ "foo": "bar", @@ -153,13 +159,15 @@ func TestMakeMetricNameOverride(t *testing.T) { NameOverride: "foobar", }) - m := ri.MakeMetric( - "RITest", - map[string]interface{}{"value": int(101)}, + m, err := metric.New("RITest", map[string]string{}, - telegraf.Untyped, + map[string]interface{}{ + "value": int64(101), + }, now, - ) + telegraf.Untyped) + require.NoError(t, err) + m = ri.MakeMetric(m) expected, err := metric.New("foobar", nil, map[string]interface{}{ @@ -178,13 +186,15 @@ func TestMakeMetricNamePrefix(t *testing.T) { MeasurementPrefix: "foobar_", }) - m := ri.MakeMetric( - "RITest", - map[string]interface{}{"value": int(101)}, + m, err := metric.New("RITest", map[string]string{}, - telegraf.Untyped, + map[string]interface{}{ + "value": int64(101), + }, now, - ) + telegraf.Untyped) + require.NoError(t, err) + m = ri.MakeMetric(m) expected, err := metric.New("foobar_RITest", nil, map[string]interface{}{ @@ -203,13 +213,15 @@ func TestMakeMetricNameSuffix(t *testing.T) { MeasurementSuffix: "_foobar", }) - m := ri.MakeMetric( - "RITest", - map[string]interface{}{"value": int(101)}, + m, err := metric.New("RITest", map[string]string{}, - telegraf.Untyped, + map[string]interface{}{ + "value": int64(101), + }, now, - ) + telegraf.Untyped) + require.NoError(t, err) + m = ri.MakeMetric(m) expected, err := metric.New("RITest_foobar", nil, map[string]interface{}{ diff --git a/internal/models/running_output.go b/internal/models/running_output.go index bad1f7659..0f2c138a6 100644 --- a/internal/models/running_output.go +++ b/internal/models/running_output.go @@ -7,7 +7,6 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal/buffer" - "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/selfstat" ) @@ -42,6 +41,12 @@ type RunningOutput struct { writeMutex sync.Mutex } +// OutputConfig containing name and filter +type OutputConfig struct { + Name string + Filter Filter +} + func NewRunningOutput( name string, output telegraf.Output, @@ -95,36 +100,25 @@ func NewRunningOutput( // AddMetric adds a metric to the output. This function can also write cached // points if FlushBufferWhenFull is true. -func (ro *RunningOutput) AddMetric(m telegraf.Metric) { - - if m == nil { +func (ro *RunningOutput) AddMetric(metric telegraf.Metric) { + if ok := ro.Config.Filter.Select(metric); !ok { + ro.MetricsFiltered.Incr(1) return } - // Filter any tagexclude/taginclude parameters before adding metric - if ro.Config.Filter.IsActive() { - // In order to filter out tags, we need to create a new metric, since - // metrics are immutable once created. - name := m.Name() - tags := m.Tags() - fields := m.Fields() - t := m.Time() - tp := m.Type() - if ok := ro.Config.Filter.Apply(name, fields, tags); !ok { - ro.MetricsFiltered.Incr(1) - return - } - // error is not possible if creating from another metric, so ignore. - m, _ = metric.New(name, tags, fields, t, tp) + + ro.Config.Filter.Modify(metric) + if len(metric.FieldList()) == 0 { + return } if output, ok := ro.Output.(telegraf.AggregatingOutput); ok { ro.aggMutex.Lock() - output.Add(m) + output.Add(metric) ro.aggMutex.Unlock() return } - ro.metrics.Add(m) + ro.metrics.Add(metric) if ro.metrics.Len() == ro.MetricBatchSize { batch := ro.metrics.Batch(ro.MetricBatchSize) err := ro.write(batch) @@ -206,9 +200,3 @@ func (ro *RunningOutput) write(metrics []telegraf.Metric) error { } return err } - -// OutputConfig containing name and filter -type OutputConfig struct { - Name string - Filter Filter -} diff --git a/internal/models/running_output_test.go b/internal/models/running_output_test.go index bd39f2f9b..c55334218 100644 --- a/internal/models/running_output_test.go +++ b/internal/models/running_output_test.go @@ -75,23 +75,6 @@ func BenchmarkRunningOutputAddFailWrites(b *testing.B) { } } -func TestAddingNilMetric(t *testing.T) { - conf := &OutputConfig{ - Filter: Filter{}, - } - - m := &mockOutput{} - ro := NewRunningOutput("test", m, conf, 1000, 10000) - - ro.AddMetric(nil) - ro.AddMetric(nil) - ro.AddMetric(nil) - - err := ro.Write() - assert.NoError(t, err) - assert.Len(t, m.Metrics(), 0) -} - // Test that NameDrop filters ger properly applied. func TestRunningOutput_DropFilter(t *testing.T) { conf := &OutputConfig{ diff --git a/internal/models/running_processor.go b/internal/models/running_processor.go index 92d3d44d0..a210d9799 100644 --- a/internal/models/running_processor.go +++ b/internal/models/running_processor.go @@ -34,14 +34,18 @@ func (rp *RunningProcessor) Apply(in ...telegraf.Metric) []telegraf.Metric { ret := []telegraf.Metric{} for _, metric := range in { - if rp.Config.Filter.IsActive() { - // check if the filter should be applied to this metric - if ok := rp.Config.Filter.Apply(metric.Name(), metric.Fields(), metric.Tags()); !ok { - // this means filter should not be applied - ret = append(ret, metric) - continue - } + // In processors when a filter selects a metric it is sent through the + // processor. Otherwise the metric continues downstream unmodified. + if ok := rp.Config.Filter.Select(metric); !ok { + ret = append(ret, metric) + continue } + + rp.Config.Filter.Modify(metric) + if len(metric.FieldList()) == 0 { + continue + } + // This metric should pass through the filter, so call the filter Apply // function and append results to the output slice. ret = append(ret, rp.Processor.Apply(metric)...) diff --git a/internal/models/running_processor_test.go b/internal/models/running_processor_test.go index 8a691a9b8..02db40460 100644 --- a/internal/models/running_processor_test.go +++ b/internal/models/running_processor_test.go @@ -1,117 +1,203 @@ package models import ( + "sort" "testing" + "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/testutil" + "github.com/influxdata/telegraf/metric" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) -type TestProcessor struct { +// MockProcessor is a Processor with an overrideable Apply implementation. +type MockProcessor struct { + ApplyF func(in ...telegraf.Metric) []telegraf.Metric } -func (f *TestProcessor) SampleConfig() string { return "" } -func (f *TestProcessor) Description() string { return "" } - -// Apply renames: -// "foo" to "fuz" -// "bar" to "baz" -// And it also drops measurements named "dropme" -func (f *TestProcessor) Apply(in ...telegraf.Metric) []telegraf.Metric { - out := make([]telegraf.Metric, 0) - for _, m := range in { - switch m.Name() { - case "foo": - out = append(out, testutil.TestMetric(1, "fuz")) - case "bar": - out = append(out, testutil.TestMetric(1, "baz")) - case "dropme": - // drop the metric! - default: - out = append(out, m) - } - } - return out +func (p *MockProcessor) SampleConfig() string { + return "" } -func NewTestRunningProcessor() *RunningProcessor { - out := &RunningProcessor{ - Name: "test", - Processor: &TestProcessor{}, - Config: &ProcessorConfig{Filter: Filter{}}, - } - return out +func (p *MockProcessor) Description() string { + return "" } -func TestRunningProcessor(t *testing.T) { - inmetrics := []telegraf.Metric{ - testutil.TestMetric(1, "foo"), - testutil.TestMetric(1, "bar"), - testutil.TestMetric(1, "baz"), - } - - expectedNames := []string{ - "fuz", - "baz", - "baz", - } - rfp := NewTestRunningProcessor() - filteredMetrics := rfp.Apply(inmetrics...) - - actualNames := []string{ - filteredMetrics[0].Name(), - filteredMetrics[1].Name(), - filteredMetrics[2].Name(), - } - assert.Equal(t, expectedNames, actualNames) +func (p *MockProcessor) Apply(in ...telegraf.Metric) []telegraf.Metric { + return p.ApplyF(in...) } -func TestRunningProcessor_WithNameDrop(t *testing.T) { - inmetrics := []telegraf.Metric{ - testutil.TestMetric(1, "foo"), - testutil.TestMetric(1, "bar"), - testutil.TestMetric(1, "baz"), +// TagProcessor returns a Processor whose Apply function adds the tag and +// value. +func TagProcessor(key, value string) *MockProcessor { + return &MockProcessor{ + ApplyF: func(in ...telegraf.Metric) []telegraf.Metric { + for _, m := range in { + m.AddTag(key, value) + } + return in + }, } - - expectedNames := []string{ - "foo", - "baz", - "baz", - } - rfp := NewTestRunningProcessor() - - rfp.Config.Filter.NameDrop = []string{"foo"} - assert.NoError(t, rfp.Config.Filter.Compile()) - - filteredMetrics := rfp.Apply(inmetrics...) - - actualNames := []string{ - filteredMetrics[0].Name(), - filteredMetrics[1].Name(), - filteredMetrics[2].Name(), - } - assert.Equal(t, expectedNames, actualNames) } -func TestRunningProcessor_DroppedMetric(t *testing.T) { - inmetrics := []telegraf.Metric{ - testutil.TestMetric(1, "dropme"), - testutil.TestMetric(1, "foo"), - testutil.TestMetric(1, "bar"), +func Metric( + name string, + tags map[string]string, + fields map[string]interface{}, + tm time.Time, + tp ...telegraf.ValueType, +) telegraf.Metric { + m, err := metric.New(name, tags, fields, tm, tp...) + if err != nil { + panic(err) } - - expectedNames := []string{ - "fuz", - "baz", - } - rfp := NewTestRunningProcessor() - filteredMetrics := rfp.Apply(inmetrics...) - - actualNames := []string{ - filteredMetrics[0].Name(), - filteredMetrics[1].Name(), - } - assert.Equal(t, expectedNames, actualNames) + return m +} + +func TestRunningProcessor_Apply(t *testing.T) { + type args struct { + Processor telegraf.Processor + Config *ProcessorConfig + } + + tests := []struct { + name string + args args + input []telegraf.Metric + expected []telegraf.Metric + }{ + { + name: "inactive filter applies metrics", + args: args{ + Processor: TagProcessor("apply", "true"), + Config: &ProcessorConfig{ + Filter: Filter{}, + }, + }, + input: []telegraf.Metric{ + Metric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(0, 0), + ), + }, + expected: []telegraf.Metric{ + Metric( + "cpu", + map[string]string{ + "apply": "true", + }, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(0, 0), + ), + }, + }, + { + name: "filter applies", + args: args{ + Processor: TagProcessor("apply", "true"), + Config: &ProcessorConfig{ + Filter: Filter{ + NamePass: []string{"cpu"}, + }, + }, + }, + input: []telegraf.Metric{ + Metric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(0, 0), + ), + }, + expected: []telegraf.Metric{ + Metric( + "cpu", + map[string]string{ + "apply": "true", + }, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(0, 0), + ), + }, + }, + { + name: "filter doesn't apply", + args: args{ + Processor: TagProcessor("apply", "true"), + Config: &ProcessorConfig{ + Filter: Filter{ + NameDrop: []string{"cpu"}, + }, + }, + }, + input: []telegraf.Metric{ + Metric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(0, 0), + ), + }, + expected: []telegraf.Metric{ + Metric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(0, 0), + ), + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + rp := &RunningProcessor{ + Processor: tt.args.Processor, + Config: tt.args.Config, + } + rp.Config.Filter.Compile() + + actual := rp.Apply(tt.input...) + require.Equal(t, tt.expected, actual) + }) + } +} + +func TestRunningProcessor_Order(t *testing.T) { + rp1 := &RunningProcessor{ + Config: &ProcessorConfig{ + Order: 1, + }, + } + rp2 := &RunningProcessor{ + Config: &ProcessorConfig{ + Order: 2, + }, + } + rp3 := &RunningProcessor{ + Config: &ProcessorConfig{ + Order: 3, + }, + } + + procs := RunningProcessors{rp2, rp3, rp1} + sort.Sort(procs) + require.Equal(t, + RunningProcessors{rp1, rp2, rp3}, + procs) } From 07c5f39c5bf8e9a1253c5bd28f8c572376829a2f Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 1 Oct 2018 10:55:50 -0700 Subject: [PATCH 0227/1815] Add wavefront parser to 1.8 changelog --- CHANGELOG.md | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5c517ab6a..a0383050a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -50,9 +50,10 @@ ### New Parsers -- [csv](/docs/DATA_FORMATS_INPUT.md#csv) - Contributed by @maxunt -- [grok](/docs/DATA_FORMATS_INPUT.md#grok) - Contributed by @maxunt -- [logfmt](/docs/DATA_FORMATS_INPUT.md#logfmt) - Contributed by @Ayrdrie & @maxunt +- [csv](/plugins/parsers/csv/README.md) - Contributed by @maxunt +- [grok](/plugins/parsers/grok/README.md) - Contributed by @maxunt +- [logfmt](/plugins/parsers/logfmt/README.md) - Contributed by @Ayrdrie & @maxunt +- [wavefront](/plugins/parsers/wavefront/README.md) - Contributed by @puckpuck ### New Serializers From f712e63a890857da1aa053b170057f7b1962a797 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 1 Oct 2018 11:00:57 -0700 Subject: [PATCH 0228/1815] Add full list of parsers and serializers to README --- README.md | 28 +++++++++++++++++++--------- 1 file changed, 19 insertions(+), 9 deletions(-) diff --git a/README.md b/README.md index 4ba1a66c9..51deeec5a 100644 --- a/README.md +++ b/README.md @@ -267,16 +267,26 @@ For documentation on the latest development code see the [documentation index][d * [zipkin](./plugins/inputs/zipkin) * [zookeeper](./plugins/inputs/zookeeper) -Telegraf is able to parse the following input data formats into metrics, these -formats may be used with input plugins supporting the `data_format` option: +## Parsers -* [InfluxDB Line Protocol](./docs/DATA_FORMATS_INPUT.md#influx) -* [JSON](./docs/DATA_FORMATS_INPUT.md#json) -* [Graphite](./docs/DATA_FORMATS_INPUT.md#graphite) -* [Value](./docs/DATA_FORMATS_INPUT.md#value) -* [Nagios](./docs/DATA_FORMATS_INPUT.md#nagios) -* [Collectd](./docs/DATA_FORMATS_INPUT.md#collectd) -* [Dropwizard](./docs/DATA_FORMATS_INPUT.md#dropwizard) +- [InfluxDB Line Protocol](/plugins/parsers/influx) +- [Collectd](/plugins/parsers/collectd) +- [CSV](/plugins/parsers/csv) +- [Dropwizard](/plugins/parsers/dropwizard) +- [Graphite](/plugins/parsers/graphite) +- [Grok](/plugins/parsers/grok) +- [JSON](/plugins/parsers/json) +- [Logfmt](/plugins/parsers/logfmt) +- [Nagios](/plugins/parsers/nagios) +- [Value](/plugins/parsers/value), ie: 45 or "booyah" +- [Wavefront](/plugins/parsers/wavefront) + +## Serializers + +- [InfluxDB Line Protocol](/plugins/serializers/influx) +- [JSON](/plugins/serializers/json) +- [Graphite](/plugins/serializers/graphite) +- [SplunkMetric](/plugins/serializers/splunkmetric) ## Processor Plugins From a63c0dda94a729f8118f3b62af903bcc8fa78807 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 1 Oct 2018 12:02:28 -0700 Subject: [PATCH 0229/1815] Add not about RFC3164 to syslog input readme --- plugins/inputs/syslog/README.md | 48 ++++++++++++++++++++------------- 1 file changed, 29 insertions(+), 19 deletions(-) diff --git a/plugins/inputs/syslog/README.md b/plugins/inputs/syslog/README.md index e57d28dd2..04ea409f2 100644 --- a/plugins/inputs/syslog/README.md +++ b/plugins/inputs/syslog/README.md @@ -55,25 +55,7 @@ The [`best_effort`](https://github.com/influxdata/go-syslog#best-effort-mode) option instructs the parser to extract partial but valid info from syslog messages. If unset only full messages will be collected. -### Metrics - -- syslog - - tags - - severity (string) - - facility (string) - - hostname (string) - - appname (string) - - fields - - version (integer) - - severity_code (integer) - - facility_code (integer) - - timestamp (integer) - - procid (string) - - msgid (string) - - sdid (bool) - - *Structured Data* (string) - -### Rsyslog Integration +#### Rsyslog Integration Rsyslog can be configured to forward logging messages to Telegraf by configuring [remote logging](https://www.rsyslog.com/doc/v8-stable/configuration/actions.html#remote-machine). @@ -96,3 +78,31 @@ $ActionQueueSaveOnShutdown on # save in-memory data if rsyslog shuts down ``` To complete TLS setup please refer to [rsyslog docs](https://www.rsyslog.com/doc/v8-stable/tutorials/tls.html). + +### Metrics + +- syslog + - tags + - severity (string) + - facility (string) + - hostname (string) + - appname (string) + - fields + - version (integer) + - severity_code (integer) + - facility_code (integer) + - timestamp (integer) + - procid (string) + - msgid (string) + - sdid (bool) + - *Structured Data* (string) + + +### Troubleshooting + +The syslog plugin does not yet support RFC3164 format data. You may see the following error when this syslog format is received: +`` +E! Error in plugin [inputs.syslog]: expecting a version value in the range 1-999 [col 5] +``` + +You can use rsyslog to translate RFC3164 syslog messages to RFC5424 or RFC5425 format. From 5ec50b28ed0e4914ba4075ba44f886140f86dbae Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 1 Oct 2018 16:01:18 -0700 Subject: [PATCH 0230/1815] Update syslog documentation --- plugins/inputs/syslog/README.md | 42 +++++++++++++++++++++++++++++---- 1 file changed, 38 insertions(+), 4 deletions(-) diff --git a/plugins/inputs/syslog/README.md b/plugins/inputs/syslog/README.md index 04ea409f2..ad9b9b572 100644 --- a/plugins/inputs/syslog/README.md +++ b/plugins/inputs/syslog/README.md @@ -75,6 +75,18 @@ $ActionQueueSaveOnShutdown on # save in-memory data if rsyslog shuts down # forward over tcp with octet framing according to RFC 5425 *.* @@(o)127.0.0.1:6514;RSYSLOG_SyslogProtocol23Format + +# uncomment to use udp according to RFC 5424 +#*.* @127.0.0.1:6514;RSYSLOG_SyslogProtocol23Format +``` + +You can alternately use `advanced` format (aka RainerScript): +``` +# forward over tcp with octet framing according to RFC 5425 +action(type="omfwd" Protocol="tcp" TCP_Framing="octet-counted" Target="127.0.0.1" Port="6514" Template="RSYSLOG_SyslogProtocol23Format") + +# uncomment to use udp according to RFC 5424 +#action(type="omfwd" Protocol="udp" Target="127.0.0.1" Port="6514" Template="RSYSLOG_SyslogProtocol23Format") ``` To complete TLS setup please refer to [rsyslog docs](https://www.rsyslog.com/doc/v8-stable/tutorials/tls.html). @@ -91,18 +103,40 @@ To complete TLS setup please refer to [rsyslog docs](https://www.rsyslog.com/doc - version (integer) - severity_code (integer) - facility_code (integer) - - timestamp (integer) + - timestamp (integer): the time recorded in the syslog message - procid (string) - msgid (string) - sdid (bool) - *Structured Data* (string) + - timestamp: the time the messages was received +#### Structured Data + +Structured data produces field keys by combining the `SD_ID` with the `PARAM_NAME` combined using the `sdparam_separator` as in the following example: +``` +170 <165>1 2018-10-01:14:15.000Z mymachine.example.com evntslog - ID47 [exampleSDID@32473 iut="3" eventSource="Application" eventID="1011"] An application event log entry... +``` +``` +syslog,appname=evntslog,facility=local4,hostname=mymachine.example.com,severity=notice exampleSDID@32473_eventID="1011",exampleSDID@32473_eventSource="Application",exampleSDID@32473_iut="3",facility_code=20i,message="An application event log entry...",msgid="ID47",severity_code=5i,timestamp=1065910455003000000i,version=1i 1538421339749472344 +``` ### Troubleshooting -The syslog plugin does not yet support RFC3164 format data. You may see the following error when this syslog format is received: -`` +You can send debugging messages directly to the input plugin using netcat: + +```sh +# TCP with octet framing +echo "57 <13>1 2018-10-01T12:00:00.0Z example.org root - - - test" | nc 127.0.0.1 6514 + +# UDP +echo "<13>1 2018-10-01T12:00:00.0Z example.org root - - - test" | nc -u 127.0.0.1 6514 +``` + +#### RFC3164 + +RFC3164 encoded messages are not currently supported. You may see the following error if a message encoded in this format: +``` E! Error in plugin [inputs.syslog]: expecting a version value in the range 1-999 [col 5] ``` -You can use rsyslog to translate RFC3164 syslog messages to RFC5424 or RFC5425 format. +You can use rsyslog to translate RFC3164 syslog messages into RFC5424 format. From 797fbf7215210cc7fc6a2126d7c3fd0d3a5f354e Mon Sep 17 00:00:00 2001 From: Pontus Rydin Date: Mon, 1 Oct 2018 19:13:32 -0400 Subject: [PATCH 0231/1815] Add UUID to VMs in vSphere input (#4769) --- plugins/inputs/vsphere/endpoint.go | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/plugins/inputs/vsphere/endpoint.go b/plugins/inputs/vsphere/endpoint.go index 22b479a5c..9f4c55250 100644 --- a/plugins/inputs/vsphere/endpoint.go +++ b/plugins/inputs/vsphere/endpoint.go @@ -60,6 +60,7 @@ type objectMap map[string]objectRef type objectRef struct { name string + altID string ref types.ManagedObjectReference parentRef *types.ManagedObjectReference //Pointer because it must be nillable guest string @@ -470,22 +471,22 @@ func getHosts(ctx context.Context, root *view.ContainerView) (objectMap, error) func getVMs(ctx context.Context, root *view.ContainerView) (objectMap, error) { var resources []mo.VirtualMachine - err := root.Retrieve(ctx, []string{"VirtualMachine"}, []string{"name", "runtime.host", "config.guestId"}, &resources) + err := root.Retrieve(ctx, []string{"VirtualMachine"}, []string{"name", "runtime.host", "config.guestId", "config.uuid"}, &resources) if err != nil { return nil, err } m := make(objectMap) for _, r := range resources { - var guest string + guest := "unknown" + uuid := "" // Sometimes Config is unknown and returns a nil pointer // if r.Config != nil { guest = cleanGuestID(r.Config.GuestId) - } else { - guest = "unknown" + uuid = r.Config.Uuid } m[r.ExtensibleManagedObject.Reference().Value] = objectRef{ - name: r.Name, ref: r.ExtensibleManagedObject.Reference(), parentRef: r.Runtime.Host, guest: guest} + name: r.Name, ref: r.ExtensibleManagedObject.Reference(), parentRef: r.Runtime.Host, guest: guest, altID: uuid} } return m, nil } @@ -785,6 +786,10 @@ func (e *Endpoint) populateTags(objectRef *objectRef, resourceType string, resou t[resource.pKey] = objectRef.name } + if resourceType == "vm" && objectRef.altID != "" { + t["uuid"] = objectRef.altID + } + // Map parent reference parent, found := e.instanceInfo[objectRef.parentRef.Value] if found { From 11baebd6c989b2dbba5d6e0ff684e71b8d1e7a58 Mon Sep 17 00:00:00 2001 From: Greg Date: Mon, 1 Oct 2018 17:14:54 -0600 Subject: [PATCH 0232/1815] Unify http_listener error response with influxdb (#4766) --- plugins/inputs/http_listener/http_listener.go | 39 ++++++++++++------- 1 file changed, 25 insertions(+), 14 deletions(-) diff --git a/plugins/inputs/http_listener/http_listener.go b/plugins/inputs/http_listener/http_listener.go index 6415ebc9f..cd82e40c0 100644 --- a/plugins/inputs/http_listener/http_listener.go +++ b/plugins/inputs/http_listener/http_listener.go @@ -5,6 +5,7 @@ import ( "compress/gzip" "crypto/subtle" "crypto/tls" + "fmt" "io" "log" "net" @@ -255,8 +256,8 @@ func (h *HTTPListener) serveWrite(res http.ResponseWriter, req *http.Request) { body, err = gzip.NewReader(req.Body) defer body.Close() if err != nil { - log.Println("E! " + err.Error()) - badRequest(res) + log.Println("D! " + err.Error()) + badRequest(res, err.Error()) return } } @@ -270,16 +271,16 @@ func (h *HTTPListener) serveWrite(res http.ResponseWriter, req *http.Request) { for { n, err := io.ReadFull(body, buf[bufStart:]) if err != nil && err != io.ErrUnexpectedEOF && err != io.EOF { - log.Println("E! " + err.Error()) + log.Println("D! " + err.Error()) // problem reading the request body - badRequest(res) + badRequest(res, err.Error()) return } h.BytesRecv.Incr(int64(n)) if err == io.EOF { if return400 { - badRequest(res) + badRequest(res, "") } else { res.WriteHeader(http.StatusNoContent) } @@ -304,12 +305,17 @@ func (h *HTTPListener) serveWrite(res http.ResponseWriter, req *http.Request) { if err == io.ErrUnexpectedEOF { // finished reading the request body - if err := h.parse(buf[:n+bufStart], now, precision); err != nil { - log.Println("E! " + err.Error()) + err = h.parse(buf[:n+bufStart], now, precision) + if err != nil { + log.Println("D! "+err.Error(), bufStart+n) return400 = true } if return400 { - badRequest(res) + if err != nil { + badRequest(res, err.Error()) + } else { + badRequest(res, "") + } } else { res.WriteHeader(http.StatusNoContent) } @@ -322,7 +328,7 @@ func (h *HTTPListener) serveWrite(res http.ResponseWriter, req *http.Request) { i := bytes.LastIndexByte(buf, '\n') if i == -1 { // drop any line longer than the max buffer size - log.Printf("E! http_listener received a single line longer than the maximum of %d bytes", + log.Printf("D! http_listener received a single line longer than the maximum of %d bytes", len(buf)) hangingBytes = true return400 = true @@ -330,7 +336,7 @@ func (h *HTTPListener) serveWrite(res http.ResponseWriter, req *http.Request) { continue } if err := h.parse(buf[:i+1], now, precision); err != nil { - log.Println("E! " + err.Error()) + log.Println("D! " + err.Error()) return400 = true } // rotate the bit remaining after the last newline to the front of the buffer @@ -350,28 +356,33 @@ func (h *HTTPListener) parse(b []byte, t time.Time, precision string) error { h.handler.SetTimeFunc(func() time.Time { return t }) metrics, err := h.parser.Parse(b) if err != nil { - return err + return fmt.Errorf("unable to parse: %s", err.Error()) } for _, m := range metrics { h.acc.AddFields(m.Name(), m.Fields(), m.Tags(), m.Time()) } - return err + return nil } func tooLarge(res http.ResponseWriter) { res.Header().Set("Content-Type", "application/json") res.Header().Set("X-Influxdb-Version", "1.0") + res.Header().Set("X-Influxdb-Error", "http: request body too large") res.WriteHeader(http.StatusRequestEntityTooLarge) res.Write([]byte(`{"error":"http: request body too large"}`)) } -func badRequest(res http.ResponseWriter) { +func badRequest(res http.ResponseWriter, errString string) { res.Header().Set("Content-Type", "application/json") res.Header().Set("X-Influxdb-Version", "1.0") + if errString == "" { + errString = "http: bad request" + } + res.Header().Set("X-Influxdb-Error", errString) res.WriteHeader(http.StatusBadRequest) - res.Write([]byte(`{"error":"http: bad request"}`)) + res.Write([]byte(fmt.Sprintf(`{"error":%q}`, errString))) } func (h *HTTPListener) AuthenticateIfSet(handler http.HandlerFunc, res http.ResponseWriter, req *http.Request) { From 2bb7ddd0b6436108914e3308726118fda13d78d7 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 1 Oct 2018 16:16:19 -0700 Subject: [PATCH 0233/1815] Update changelog --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index a0383050a..fe0d69a6d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -15,6 +15,8 @@ - [#4747](https://github.com/influxdata/telegraf/pull/4723): Add hostname to TLS config for SNI support. - [#4675](https://github.com/influxdata/telegraf/issues/4675): Don't add tags with empty values to opentsdb output. - [#4765](https://github.com/influxdata/telegraf/pull/4765): Fix panic during network error in vsphere input. +- [#4766](https://github.com/influxdata/telegraf/pull/4766): Unify http_listener error response with InfluxDB. +- [#4769](https://github.com/influxdata/telegraf/pull/4769): Add UUID to VMs in vSphere input. ## v1.8 [2018-09-21] From 86b21452729d9b4f6a85ce4601b3738b7a583e46 Mon Sep 17 00:00:00 2001 From: Lee Jaeyong Date: Tue, 2 Oct 2018 09:38:13 +0900 Subject: [PATCH 0234/1815] Add support for IPv6 in the ping plugin (#4703) --- plugins/inputs/ping/ping.go | 198 +++++++++++--------- plugins/inputs/ping/ping_test.go | 44 ++++- plugins/inputs/ping/ping_windows.go | 220 ++++++++++++----------- plugins/inputs/ping/ping_windows_test.go | 39 +++- 4 files changed, 303 insertions(+), 198 deletions(-) diff --git a/plugins/inputs/ping/ping.go b/plugins/inputs/ping/ping.go index 430cbe6d4..a95f27ebf 100644 --- a/plugins/inputs/ping/ping.go +++ b/plugins/inputs/ping/ping.go @@ -22,9 +22,11 @@ import ( // HostPinger is a function that runs the "ping" function using a list of // passed arguments. This can be easily switched with a mocked ping function // for unit test purposes (see ping_test.go) -type HostPinger func(timeout float64, args ...string) (string, error) +type HostPinger func(binary string, timeout float64, args ...string) (string, error) type Ping struct { + wg sync.WaitGroup + // Interval at which to ping (ping -i ) PingInterval float64 `toml:"ping_interval"` @@ -43,6 +45,13 @@ type Ping struct { // URLs to ping Urls []string + // Ping executable binary + Binary string + + // Arguments for ping command. + // when `Arguments` is not empty, other options (ping_interval, timeout, etc) will be ignored + Arguments []string + // host ping function pingHost HostPinger } @@ -71,6 +80,13 @@ const sampleConfig = ` ## Interface or source address to send ping from (ping -I ) ## on Darwin and Freebsd only source address possible: (ping -S ) # interface = "" + + ## Specify the ping executable binary, default is "ping" + # binary = "ping" + + ## Arguments for ping command + ## when arguments is not empty, other options (ping_interval, timeout, etc) will be ignored + # arguments = ["-c", "3"] ` func (_ *Ping) SampleConfig() string { @@ -78,90 +94,92 @@ func (_ *Ping) SampleConfig() string { } func (p *Ping) Gather(acc telegraf.Accumulator) error { - - var wg sync.WaitGroup - // Spin off a go routine for each url to ping for _, url := range p.Urls { - wg.Add(1) - go func(u string) { - defer wg.Done() - tags := map[string]string{"url": u} - fields := map[string]interface{}{"result_code": 0} - - _, err := net.LookupHost(u) - if err != nil { - acc.AddError(err) - fields["result_code"] = 1 - acc.AddFields("ping", fields, tags) - return - } - - args := p.args(u, runtime.GOOS) - totalTimeout := float64(p.Count)*p.Timeout + float64(p.Count-1)*p.PingInterval - - out, err := p.pingHost(totalTimeout, args...) - if err != nil { - // Some implementations of ping return a 1 exit code on - // timeout, if this occurs we will not exit and try to parse - // the output. - status := -1 - if exitError, ok := err.(*exec.ExitError); ok { - if ws, ok := exitError.Sys().(syscall.WaitStatus); ok { - status = ws.ExitStatus() - } - } - - if status != 1 { - // Combine go err + stderr output - out = strings.TrimSpace(out) - if len(out) > 0 { - acc.AddError(fmt.Errorf("host %s: %s, %s", u, out, err)) - } else { - acc.AddError(fmt.Errorf("host %s: %s", u, err)) - } - fields["result_code"] = 2 - acc.AddFields("ping", fields, tags) - return - } - } - - trans, rec, min, avg, max, stddev, err := processPingOutput(out) - if err != nil { - // fatal error - acc.AddError(fmt.Errorf("%s: %s", err, u)) - fields["result_code"] = 2 - acc.AddFields("ping", fields, tags) - return - } - // Calculate packet loss percentage - loss := float64(trans-rec) / float64(trans) * 100.0 - fields["packets_transmitted"] = trans - fields["packets_received"] = rec - fields["percent_packet_loss"] = loss - if min >= 0 { - fields["minimum_response_ms"] = min - } - if avg >= 0 { - fields["average_response_ms"] = avg - } - if max >= 0 { - fields["maximum_response_ms"] = max - } - if stddev >= 0 { - fields["standard_deviation_ms"] = stddev - } - acc.AddFields("ping", fields, tags) - }(url) + p.wg.Add(1) + go p.pingToURL(url, acc) } - wg.Wait() + p.wg.Wait() return nil } -func hostPinger(timeout float64, args ...string) (string, error) { - bin, err := exec.LookPath("ping") +func (p *Ping) pingToURL(u string, acc telegraf.Accumulator) { + defer p.wg.Done() + tags := map[string]string{"url": u} + fields := map[string]interface{}{"result_code": 0} + + _, err := net.LookupHost(u) + if err != nil { + acc.AddError(err) + fields["result_code"] = 1 + acc.AddFields("ping", fields, tags) + return + } + + args := p.args(u, runtime.GOOS) + totalTimeout := 60.0 + if len(p.Arguments) == 0 { + totalTimeout = float64(p.Count)*p.Timeout + float64(p.Count-1)*p.PingInterval + } + + out, err := p.pingHost(p.Binary, totalTimeout, args...) + if err != nil { + // Some implementations of ping return a 1 exit code on + // timeout, if this occurs we will not exit and try to parse + // the output. + status := -1 + if exitError, ok := err.(*exec.ExitError); ok { + if ws, ok := exitError.Sys().(syscall.WaitStatus); ok { + status = ws.ExitStatus() + } + } + + if status != 1 { + // Combine go err + stderr output + out = strings.TrimSpace(out) + if len(out) > 0 { + acc.AddError(fmt.Errorf("host %s: %s, %s", u, out, err)) + } else { + acc.AddError(fmt.Errorf("host %s: %s", u, err)) + } + fields["result_code"] = 2 + acc.AddFields("ping", fields, tags) + return + } + } + + trans, rec, min, avg, max, stddev, err := processPingOutput(out) + if err != nil { + // fatal error + acc.AddError(fmt.Errorf("%s: %s", err, u)) + fields["result_code"] = 2 + acc.AddFields("ping", fields, tags) + return + } + // Calculate packet loss percentage + loss := float64(trans-rec) / float64(trans) * 100.0 + fields["packets_transmitted"] = trans + fields["packets_received"] = rec + fields["percent_packet_loss"] = loss + if min >= 0 { + fields["minimum_response_ms"] = min + } + if avg >= 0 { + fields["average_response_ms"] = avg + } + if max >= 0 { + fields["maximum_response_ms"] = max + } + if stddev >= 0 { + fields["standard_deviation_ms"] = stddev + } + acc.AddFields("ping", fields, tags) +} + +func hostPinger(binary string, timeout float64, args ...string) (string, error) { + bin, err := exec.LookPath(binary) if err != nil { return "", err } @@ -173,15 +191,21 @@ func hostPinger(timeout float64, args ...string) (string, error) { // args returns the arguments for the 'ping' executable func (p *Ping) args(url string, system string) []string { - // Build the ping command args based on toml config + if len(p.Arguments) > 0 { + return p.Arguments + } + + // build the ping command args based on toml config args := []string{"-c", strconv.Itoa(p.Count), "-n", "-s", "16"} if p.PingInterval > 0 { args = append(args, "-i", strconv.FormatFloat(p.PingInterval, 'f', -1, 64)) } if p.Timeout > 0 { switch system { - case "darwin", "freebsd", "netbsd", "openbsd": + case "darwin": args = append(args, "-W", strconv.FormatFloat(p.Timeout*1000, 'f', -1, 64)) + case "freebsd", "netbsd", "openbsd": + args = append(args, "-w", strconv.FormatFloat(p.Timeout*1000, 'f', -1, 64)) case "linux": args = append(args, "-W", strconv.FormatFloat(p.Timeout, 'f', -1, 64)) default: @@ -196,19 +220,21 @@ func (p *Ping) args(url string, system string) []string { case "linux": args = append(args, "-w", strconv.Itoa(p.Deadline)) default: - // Not sure the best option here, just assume GNU ping? + // not sure the best option here, just assume gnu ping? args = append(args, "-w", strconv.Itoa(p.Deadline)) } } if p.Interface != "" { switch system { - case "darwin", "freebsd", "netbsd", "openbsd": - args = append(args, "-S", p.Interface) + case "darwin": + args = append(args, "-I", p.Interface) + case "freebsd", "netbsd", "openbsd": + args = append(args, "-s", p.Interface) case "linux": args = append(args, "-I", p.Interface) default: - // Not sure the best option here, just assume GNU ping? - args = append(args, "-I", p.Interface) + // not sure the best option here, just assume gnu ping? + args = append(args, "-i", p.Interface) } } args = append(args, url) @@ -217,7 +243,7 @@ func (p *Ping) args(url string, system string) []string { // processPingOutput takes in a string output from the ping command, like: // -// PING www.google.com (173.194.115.84): 56 data bytes +// ping www.google.com (173.194.115.84): 56 data bytes // 64 bytes from 173.194.115.84: icmp_seq=0 ttl=54 time=52.172 ms // 64 bytes from 173.194.115.84: icmp_seq=1 ttl=54 time=34.843 ms // @@ -280,6 +306,8 @@ func init() { Count: 1, Timeout: 1.0, Deadline: 10, + Binary: "ping", + Arguments: []string{}, } }) } diff --git a/plugins/inputs/ping/ping_test.go b/plugins/inputs/ping/ping_test.go index d5b82608a..867220b20 100644 --- a/plugins/inputs/ping/ping_test.go +++ b/plugins/inputs/ping/ping_test.go @@ -110,9 +110,9 @@ func TestArgs(t *testing.T) { system string output []string }{ - {"darwin", []string{"-c", "2", "-n", "-s", "16", "-i", "1.2", "-W", "12000", "-t", "24", "-S", "eth0", "www.google.com"}}, + {"darwin", []string{"-c", "2", "-n", "-s", "16", "-i", "1.2", "-W", "12000", "-t", "24", "-I", "eth0", "www.google.com"}}, {"linux", []string{"-c", "2", "-n", "-s", "16", "-i", "1.2", "-W", "12", "-w", "24", "-I", "eth0", "www.google.com"}}, - {"anything else", []string{"-c", "2", "-n", "-s", "16", "-i", "1.2", "-W", "12", "-w", "24", "-I", "eth0", "www.google.com"}}, + {"anything else", []string{"-c", "2", "-n", "-s", "16", "-i", "1.2", "-W", "12", "-w", "24", "-i", "eth0", "www.google.com"}}, } for i := range systemCases { actual := p.args("www.google.com", systemCases[i].system) @@ -124,7 +124,24 @@ func TestArgs(t *testing.T) { } } -func mockHostPinger(timeout float64, args ...string) (string, error) { +func TestArguments(t *testing.T) { + arguments := []string{"-c", "3"} + p := Ping{ + Count: 2, + Interface: "eth0", + Timeout: 12.0, + Deadline: 24, + PingInterval: 1.2, + Arguments: arguments, + } + + for _, system := range []string{"darwin", "linux", "anything else"} { + actual := p.args("www.google.com", system) + require.True(t, reflect.DeepEqual(actual, arguments), "Expected: %s Actual: %s", arguments, actual) + } +} + +func mockHostPinger(binary string, timeout float64, args ...string) (string, error) { return linuxPingOutput, nil } @@ -165,7 +182,7 @@ PING www.google.com (216.58.218.164) 56(84) bytes of data. rtt min/avg/max/mdev = 35.225/44.033/51.806/5.325 ms ` -func mockLossyHostPinger(timeout float64, args ...string) (string, error) { +func mockLossyHostPinger(binary string, timeout float64, args ...string) (string, error) { return lossyPingOutput, nil } @@ -200,7 +217,7 @@ Request timeout for icmp_seq 0 2 packets transmitted, 0 packets received, 100.0% packet loss ` -func mockErrorHostPinger(timeout float64, args ...string) (string, error) { +func mockErrorHostPinger(binary string, timeout float64, args ...string) (string, error) { // This error will not trigger correct error paths return errorPingOutput, nil } @@ -225,7 +242,7 @@ func TestBadPingGather(t *testing.T) { acc.AssertContainsTaggedFields(t, "ping", fields, tags) } -func mockFatalHostPinger(timeout float64, args ...string) (string, error) { +func mockFatalHostPinger(binary string, timeout float64, args ...string) (string, error) { return fatalPingOutput, errors.New("So very bad") } @@ -265,7 +282,7 @@ func TestErrorWithHostNamePingGather(t *testing.T) { var acc testutil.Accumulator p := Ping{ Urls: []string{"www.amazon.com"}, - pingHost: func(timeout float64, args ...string) (string, error) { + pingHost: func(binary string, timeout float64, args ...string) (string, error) { return param.out, errors.New("So very bad") }, } @@ -274,3 +291,16 @@ func TestErrorWithHostNamePingGather(t *testing.T) { assert.Contains(t, acc.Errors, param.error) } } + +func TestPingBinary(t *testing.T) { + var acc testutil.Accumulator + p := Ping{ + Urls: []string{"www.google.com"}, + Binary: "ping6", + pingHost: func(binary string, timeout float64, args ...string) (string, error) { + assert.True(t, binary == "ping6") + return "", nil + }, + } + acc.GatherError(p.Gather) +} diff --git a/plugins/inputs/ping/ping_windows.go b/plugins/inputs/ping/ping_windows.go index 06a7f590e..6064fabe4 100644 --- a/plugins/inputs/ping/ping_windows.go +++ b/plugins/inputs/ping/ping_windows.go @@ -4,6 +4,7 @@ package ping import ( "errors" + "fmt" "net" "os/exec" "regexp" @@ -20,9 +21,11 @@ import ( // HostPinger is a function that runs the "ping" function using a list of // passed arguments. This can be easily switched with a mocked ping function // for unit test purposes (see ping_test.go) -type HostPinger func(timeout float64, args ...string) (string, error) +type HostPinger func(binary string, timeout float64, args ...string) (string, error) type Ping struct { + wg sync.WaitGroup + // Number of pings to send (ping -c ) Count int @@ -32,6 +35,13 @@ type Ping struct { // URLs to ping Urls []string + // Ping executable binary + Binary string + + // Arguments for ping command. + // when `Arguments` is not empty, other options (ping_interval, timeout, etc) will be ignored + Arguments []string + // host ping function pingHost HostPinger } @@ -49,14 +59,100 @@ const sampleConfig = ` ## Ping timeout, in seconds. 0.0 means default timeout (ping -w ) # timeout = 0.0 + + ## Specify the ping executable binary, default is "ping" + # binary = "ping" + + ## Arguments for ping command + ## when arguments is not empty, other options (ping_interval, timeout, etc) will be ignored + # arguments = ["-c", "3"] ` func (s *Ping) SampleConfig() string { return sampleConfig } -func hostPinger(timeout float64, args ...string) (string, error) { - bin, err := exec.LookPath("ping") +func (p *Ping) Gather(acc telegraf.Accumulator) error { + if p.Count < 1 { + p.Count = 1 + } + + // Spin off a go routine for each url to ping + for _, url := range p.Urls { + p.wg.Add(1) + go p.pingToURL(url, acc) + } + + p.wg.Wait() + + return nil +} + +func (p *Ping) pingToURL(u string, acc telegraf.Accumulator) { + defer p.wg.Done() + + tags := map[string]string{"url": u} + fields := map[string]interface{}{"result_code": 0} + + _, err := net.LookupHost(u) + if err != nil { + acc.AddError(err) + fields["result_code"] = 1 + acc.AddFields("ping", fields, tags) + return + } + + args := p.args(u) + totalTimeout := 60.0 + if len(p.Arguments) == 0 { + totalTimeout = p.timeout() * float64(p.Count) + } + + out, err := p.pingHost(p.Binary, totalTimeout, args...) + // ping host return exitcode != 0 also when there was no response from host + // but command was execute successfully + var pendingError error + if err != nil { + // Combine go err + stderr output + pendingError = errors.New(strings.TrimSpace(out) + ", " + err.Error()) + } + trans, recReply, receivePacket, avg, min, max, err := processPingOutput(out) + if err != nil { + // fatal error + if pendingError != nil { + acc.AddError(fmt.Errorf("%s: %s", pendingError, u)) + } else { + acc.AddError(fmt.Errorf("%s: %s", err, u)) + } + + fields["result_code"] = 2 + fields["errors"] = 100.0 + acc.AddFields("ping", fields, tags) + return + } + // Calculate packet loss percentage + lossReply := float64(trans-recReply) / float64(trans) * 100.0 + lossPackets := float64(trans-receivePacket) / float64(trans) * 100.0 + + fields["packets_transmitted"] = trans + fields["reply_received"] = recReply + fields["packets_received"] = receivePacket + fields["percent_packet_loss"] = lossPackets + fields["percent_reply_loss"] = lossReply + if avg >= 0 { + fields["average_response_ms"] = float64(avg) + } + if min >= 0 { + fields["minimum_response_ms"] = float64(min) + } + if max >= 0 { + fields["maximum_response_ms"] = float64(max) + } + acc.AddFields("ping", fields, tags) +} + +func hostPinger(binary string, timeout float64, args ...string) (string, error) { + bin, err := exec.LookPath(binary) if err != nil { return "", err } @@ -66,6 +162,23 @@ func hostPinger(timeout float64, args ...string) (string, error) { return string(out), err } +// args returns the arguments for the 'ping' executable +func (p *Ping) args(url string) []string { + if len(p.Arguments) > 0 { + return p.Arguments + } + + args := []string{"-n", strconv.Itoa(p.Count)} + + if p.Timeout > 0 { + args = append(args, "-w", strconv.FormatFloat(p.Timeout*1000, 'f', 0, 64)) + } + + args = append(args, url) + + return args +} + // processPingOutput takes in a string output from the ping command // based on linux implementation but using regex ( multilanguage support ) // It returns (, , , , , ) @@ -134,106 +247,13 @@ func (p *Ping) timeout() float64 { return 4 + 1 } -// args returns the arguments for the 'ping' executable -func (p *Ping) args(url string) []string { - args := []string{"-n", strconv.Itoa(p.Count)} - - if p.Timeout > 0 { - args = append(args, "-w", strconv.FormatFloat(p.Timeout*1000, 'f', 0, 64)) - } - - args = append(args, url) - - return args -} - -func (p *Ping) Gather(acc telegraf.Accumulator) error { - if p.Count < 1 { - p.Count = 1 - } - var wg sync.WaitGroup - errorChannel := make(chan error, len(p.Urls)*2) - var pendingError error = nil - // Spin off a go routine for each url to ping - for _, url := range p.Urls { - wg.Add(1) - go func(u string) { - defer wg.Done() - - tags := map[string]string{"url": u} - fields := map[string]interface{}{"result_code": 0} - - _, err := net.LookupHost(u) - if err != nil { - errorChannel <- err - fields["result_code"] = 1 - acc.AddFields("ping", fields, tags) - return - } - - args := p.args(u) - totalTimeout := p.timeout() * float64(p.Count) - out, err := p.pingHost(totalTimeout, args...) - // ping host return exitcode != 0 also when there was no response from host - // but command was execute successfully - if err != nil { - // Combine go err + stderr output - pendingError = errors.New(strings.TrimSpace(out) + ", " + err.Error()) - } - trans, recReply, receivePacket, avg, min, max, err := processPingOutput(out) - if err != nil { - // fatal error - if pendingError != nil { - errorChannel <- pendingError - } - errorChannel <- err - - fields["errors"] = 100.0 - acc.AddFields("ping", fields, tags) - return - } - // Calculate packet loss percentage - lossReply := float64(trans-recReply) / float64(trans) * 100.0 - lossPackets := float64(trans-receivePacket) / float64(trans) * 100.0 - - fields["packets_transmitted"] = trans - fields["reply_received"] = recReply - fields["packets_received"] = receivePacket - fields["percent_packet_loss"] = lossPackets - fields["percent_reply_loss"] = lossReply - if avg >= 0 { - fields["average_response_ms"] = float64(avg) - } - if min >= 0 { - fields["minimum_response_ms"] = float64(min) - } - if max >= 0 { - fields["maximum_response_ms"] = float64(max) - } - acc.AddFields("ping", fields, tags) - }(url) - } - - wg.Wait() - close(errorChannel) - - // Get all errors and return them as one giant error - errorStrings := []string{} - for err := range errorChannel { - errorStrings = append(errorStrings, err.Error()) - } - - if len(errorStrings) == 0 { - return nil - } - return errors.New(strings.Join(errorStrings, "\n")) -} - func init() { inputs.Add("ping", func() telegraf.Input { return &Ping{ - pingHost: hostPinger, - Count: 1, + pingHost: hostPinger, + Count: 1, + Binary: "ping", + Arguments: []string{}, } }) } diff --git a/plugins/inputs/ping/ping_windows_test.go b/plugins/inputs/ping/ping_windows_test.go index 178e42fcb..4618ec4db 100644 --- a/plugins/inputs/ping/ping_windows_test.go +++ b/plugins/inputs/ping/ping_windows_test.go @@ -4,10 +4,12 @@ package ping import ( "errors" + "reflect" "testing" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) // Windows ping format ( should support multilanguage ?) @@ -59,7 +61,7 @@ func TestHost(t *testing.T) { assert.Equal(t, 52, max, "Max 52") } -func mockHostPinger(timeout float64, args ...string) (string, error) { +func mockHostPinger(binary string, timeout float64, args ...string) (string, error) { return winENPingOutput, nil } @@ -102,7 +104,7 @@ Statystyka badania ping dla 195.187.242.157: (100% straty), ` -func mockErrorHostPinger(timeout float64, args ...string) (string, error) { +func mockErrorHostPinger(binary string, timeout float64, args ...string) (string, error) { return errorPingOutput, errors.New("No packets received") } @@ -128,6 +130,18 @@ func TestBadPingGather(t *testing.T) { acc.AssertContainsTaggedFields(t, "ping", fields, tags) } +func TestArguments(t *testing.T) { + arguments := []string{"-c", "3"} + p := Ping{ + Count: 2, + Timeout: 12.0, + Arguments: arguments, + } + + actual := p.args("www.google.com") + require.True(t, reflect.DeepEqual(actual, arguments), "Expected : %s Actual: %s", arguments, actual) +} + var lossyPingOutput = ` Badanie thecodinglove.com [66.6.44.4] z 9800 bajtami danych: Upłynął limit czasu żądania. @@ -147,7 +161,7 @@ Szacunkowy czas błądzenia pakietów w millisekundach: Minimum = 114 ms, Maksimum = 119 ms, Czas średni = 115 ms ` -func mockLossyHostPinger(timeout float64, args ...string) (string, error) { +func mockLossyHostPinger(binary string, timeout float64, args ...string) (string, error) { return lossyPingOutput, nil } @@ -207,7 +221,7 @@ Options: ` -func mockFatalHostPinger(timeout float64, args ...string) (string, error) { +func mockFatalHostPinger(binary string, timeout float64, args ...string) (string, error) { return fatalPingOutput, errors.New("So very bad") } @@ -249,7 +263,7 @@ Ping statistics for 8.8.8.8: Packets: Sent = 4, Received = 1, Lost = 3 (75% loss), ` -func mockUnreachableHostPinger(timeout float64, args ...string) (string, error) { +func mockUnreachableHostPinger(binary string, timeout float64, args ...string) (string, error) { return UnreachablePingOutput, errors.New("So very bad") } @@ -298,7 +312,7 @@ Ping statistics for 8.8.8.8: Packets: Sent = 4, Received = 1, Lost = 3 (75% loss), ` -func mockTTLExpiredPinger(timeout float64, args ...string) (string, error) { +func mockTTLExpiredPinger(binary string, timeout float64, args ...string) (string, error) { return TTLExpiredPingOutput, errors.New("So very bad") } @@ -333,3 +347,16 @@ func TestTTLExpiredPingGather(t *testing.T) { assert.False(t, acc.HasInt64Field("ping", "minimum_response_ms"), "Fatal ping should not have packet measurements") } + +func TestPingBinary(t *testing.T) { + var acc testutil.Accumulator + p := Ping{ + Urls: []string{"www.google.com"}, + Binary: "ping6", + pingHost: func(binary string, timeout float64, args ...string) (string, error) { + assert.True(t, binary == "ping6") + return "", nil + }, + } + acc.GatherError(p.Gather) +} From 798ce7e88fcc9ceafca469d98d211bd9d74bfce7 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 1 Oct 2018 17:40:28 -0700 Subject: [PATCH 0235/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index fe0d69a6d..b9a9edca1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,7 @@ - [#4686](https://github.com/influxdata/telegraf/pull/4686): Add replace function to strings processor. - [#4754](https://github.com/influxdata/telegraf/pull/4754): Query servers in parallel in dns_query input. - [#4753](https://github.com/influxdata/telegraf/pull/4753): Add ability to define a custom service name when installing as a Windows service. +- [#4703](https://github.com/influxdata/telegraf/pull/4703): Add support for IPv6 in the ping plugin. ## v1.8.1 [unreleased] From 5101f075e19cb5d82a9bc5deb318a527b114634e Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 1 Oct 2018 18:27:44 -0700 Subject: [PATCH 0236/1815] Skip tags with empty values in cloudwatch output (#4785) --- plugins/outputs/cloudwatch/cloudwatch.go | 28 +++++++++---------- plugins/outputs/cloudwatch/cloudwatch_test.go | 18 ++++++++++++ 2 files changed, 32 insertions(+), 14 deletions(-) diff --git a/plugins/outputs/cloudwatch/cloudwatch.go b/plugins/outputs/cloudwatch/cloudwatch.go index b5dca364e..aaefa89ec 100644 --- a/plugins/outputs/cloudwatch/cloudwatch.go +++ b/plugins/outputs/cloudwatch/cloudwatch.go @@ -281,6 +281,7 @@ func PartitionDatums(size int, datums []*cloudwatch.MetricDatum) [][]*cloudwatch func BuildMetricDatum(buildStatistic bool, point telegraf.Metric) []*cloudwatch.MetricDatum { fields := make(map[string]cloudwatchField) + tags := point.Tags() for k, v := range point.Fields() { @@ -298,7 +299,7 @@ func BuildMetricDatum(buildStatistic bool, point telegraf.Metric) []*cloudwatch. fields[k] = &valueField{ metricName: point.Name(), fieldName: k, - tags: point.Tags(), + tags: tags, timestamp: point.Time(), value: val, } @@ -311,7 +312,7 @@ func BuildMetricDatum(buildStatistic bool, point telegraf.Metric) []*cloudwatch. fields[fieldName] = &statisticField{ metricName: point.Name(), fieldName: fieldName, - tags: point.Tags(), + tags: tags, timestamp: point.Time(), values: map[statisticType]float64{ sType: val, @@ -336,19 +337,15 @@ func BuildMetricDatum(buildStatistic bool, point telegraf.Metric) []*cloudwatch. // 10 dimensions per metric so we only keep up to the first 10 alphabetically. // This always includes the "host" tag if it exists. func BuildDimensions(mTags map[string]string) []*cloudwatch.Dimension { - const MaxDimensions = 10 - dimensions := make([]*cloudwatch.Dimension, int(math.Min(float64(len(mTags)), MaxDimensions))) - - i := 0 + dimensions := make([]*cloudwatch.Dimension, 0, MaxDimensions) // This is pretty ugly but we always want to include the "host" tag if it exists. if host, ok := mTags["host"]; ok { - dimensions[i] = &cloudwatch.Dimension{ + dimensions = append(dimensions, &cloudwatch.Dimension{ Name: aws.String("host"), Value: aws.String(host), - } - i += 1 + }) } var keys []string @@ -360,16 +357,19 @@ func BuildDimensions(mTags map[string]string) []*cloudwatch.Dimension { sort.Strings(keys) for _, k := range keys { - if i >= MaxDimensions { + if len(dimensions) >= MaxDimensions { break } - dimensions[i] = &cloudwatch.Dimension{ - Name: aws.String(k), - Value: aws.String(mTags[k]), + value := mTags[k] + if value == "" { + continue } - i += 1 + dimensions = append(dimensions, &cloudwatch.Dimension{ + Name: aws.String(k), + Value: aws.String(mTags[k]), + }) } return dimensions diff --git a/plugins/outputs/cloudwatch/cloudwatch_test.go b/plugins/outputs/cloudwatch/cloudwatch_test.go index c91c30e0c..cdb55ec19 100644 --- a/plugins/outputs/cloudwatch/cloudwatch_test.go +++ b/plugins/outputs/cloudwatch/cloudwatch_test.go @@ -15,6 +15,7 @@ import ( "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) // Test that each tag becomes one dimension @@ -115,6 +116,23 @@ func TestBuildMetricDatums(t *testing.T) { assert.Equal(7, len(datums), fmt.Sprintf("Valid point should create a Datum {value: %v}", multiStatisticMetric)) } +func TestBuildMetricDatums_SkipEmptyTags(t *testing.T) { + input := testutil.MustMetric( + "cpu", + map[string]string{ + "host": "example.org", + "foo": "", + }, + map[string]interface{}{ + "value": int64(42), + }, + time.Unix(0, 0), + ) + + datums := BuildMetricDatum(true, input) + require.Len(t, datums[0].Dimensions, 1) +} + func TestPartitionDatums(t *testing.T) { assert := assert.New(t) From b9c64df5fcb1998e2f0d6060b37721e08ab0d65d Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 1 Oct 2018 18:29:00 -0700 Subject: [PATCH 0237/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index b9a9edca1..dd3f3de63 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -18,6 +18,7 @@ - [#4765](https://github.com/influxdata/telegraf/pull/4765): Fix panic during network error in vsphere input. - [#4766](https://github.com/influxdata/telegraf/pull/4766): Unify http_listener error response with InfluxDB. - [#4769](https://github.com/influxdata/telegraf/pull/4769): Add UUID to VMs in vSphere input. +- [#4758](https://github.com/influxdata/telegraf/issues/4758): Skip tags with empty values in cloudwatch output. ## v1.8 [2018-09-21] From 21b488a3d3900313198e857fe5f4ea669e7825df Mon Sep 17 00:00:00 2001 From: Pontus Rydin Date: Wed, 3 Oct 2018 15:02:06 -0400 Subject: [PATCH 0238/1815] Use server time to fix missing non-realtime samples in vsphere (#4791) --- plugins/inputs/vsphere/client.go | 10 ++++++++++ plugins/inputs/vsphere/endpoint.go | 19 +++++++++++++------ 2 files changed, 23 insertions(+), 6 deletions(-) diff --git a/plugins/inputs/vsphere/client.go b/plugins/inputs/vsphere/client.go index 2148a72ff..9b77b750a 100644 --- a/plugins/inputs/vsphere/client.go +++ b/plugins/inputs/vsphere/client.go @@ -6,6 +6,7 @@ import ( "log" "net/url" "sync" + "time" "github.com/vmware/govmomi" "github.com/vmware/govmomi/performance" @@ -171,3 +172,12 @@ func (c *Client) close() { } }) } + +// GetServerTime returns the time at the vCenter server +func (c *Client) GetServerTime(ctx context.Context) (time.Time, error) { + t, err := methods.GetCurrentTime(ctx, c.Client) + if err != nil { + return time.Time{}, err + } + return *t, nil +} diff --git a/plugins/inputs/vsphere/endpoint.go b/plugins/inputs/vsphere/endpoint.go index 9f4c55250..8c3795869 100644 --- a/plugins/inputs/vsphere/endpoint.go +++ b/plugins/inputs/vsphere/endpoint.go @@ -613,10 +613,17 @@ func (e *Endpoint) collectResource(ctx context.Context, resourceType string, acc // Do we have new data yet? res := e.resourceKinds[resourceType] - now := time.Now() + client, err := e.clientFactory.GetClient(ctx) + if err != nil { + return err + } + now, err := client.GetServerTime(ctx) + if err != nil { + return err + } latest, hasLatest := e.lastColls[resourceType] if hasLatest { - elapsed := time.Now().Sub(latest).Seconds() + 5.0 // Allow 5 second jitter. + elapsed := now.Sub(latest).Seconds() + 5.0 // Allow 5 second jitter. log.Printf("D! [input.vsphere]: Latest: %s, elapsed: %f, resource: %s", latest, elapsed, resourceType) if !res.realTime && elapsed < float64(res.sampling) { // No new data would be available. We're outta herE! [input.vsphere]: @@ -625,7 +632,7 @@ func (e *Endpoint) collectResource(ctx context.Context, resourceType string, acc return nil } } else { - latest = time.Now().Add(time.Duration(-res.sampling) * time.Second) + latest = now.Add(time.Duration(-res.sampling) * time.Second) } internalTags := map[string]string{"resourcetype": resourceType} @@ -659,12 +666,12 @@ func (e *Endpoint) collectResource(ctx context.Context, resourceType string, acc // Drain the pool. We're getting errors back. They should all be nil var mux sync.Mutex - err := make(multiError, 0) + merr := make(multiError, 0) wp.Drain(ctx, func(ctx context.Context, in interface{}) bool { if in != nil { mux.Lock() defer mux.Unlock() - err = append(err, in.(error)) + merr = append(merr, in.(error)) return false } return true @@ -673,7 +680,7 @@ func (e *Endpoint) collectResource(ctx context.Context, resourceType string, acc sw.Stop() SendInternalCounterWithTags("gather_count", e.URL.Host, internalTags, count) - if len(err) > 0 { + if len(merr) > 0 { return err } return nil From 8500d9345f02dc3c22cde726f24176ba9b75ff0e Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 3 Oct 2018 12:04:06 -0700 Subject: [PATCH 0239/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index dd3f3de63..5d4a3ed43 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -19,6 +19,7 @@ - [#4766](https://github.com/influxdata/telegraf/pull/4766): Unify http_listener error response with InfluxDB. - [#4769](https://github.com/influxdata/telegraf/pull/4769): Add UUID to VMs in vSphere input. - [#4758](https://github.com/influxdata/telegraf/issues/4758): Skip tags with empty values in cloudwatch output. +- [#4783](https://github.com/influxdata/telegraf/issues/4783): Fix missing non-realtime samples in vSphere input. ## v1.8 [2018-09-21] From c3bab78ea8e04fdb11810a23509153d2309c935c Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 3 Oct 2018 12:58:21 -0700 Subject: [PATCH 0240/1815] Fix case of timezone/grok_timezone options. (#4799) --- internal/config/config.go | 2 +- plugins/inputs/logparser/logparser.go | 4 ++-- plugins/parsers/registry.go | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/internal/config/config.go b/internal/config/config.go index 3d0510978..4382e0cbe 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -1442,7 +1442,7 @@ func getParserConfig(name string, tbl *ast.Table) (*parsers.Config, error) { if node, ok := tbl.Fields["grok_timezone"]; ok { if kv, ok := node.(*ast.KeyValue); ok { if str, ok := kv.Value.(*ast.String); ok { - c.GrokTimeZone = str.Value + c.GrokTimezone = str.Value } } } diff --git a/plugins/inputs/logparser/logparser.go b/plugins/inputs/logparser/logparser.go index d52df3aa9..089c22d94 100644 --- a/plugins/inputs/logparser/logparser.go +++ b/plugins/inputs/logparser/logparser.go @@ -27,7 +27,7 @@ type GrokConfig struct { NamedPatterns []string CustomPatterns string CustomPatternFiles []string - TimeZone string + Timezone string } type logEntry struct { @@ -137,7 +137,7 @@ func (l *LogParserPlugin) Start(acc telegraf.Accumulator) error { GrokNamedPatterns: l.GrokConfig.NamedPatterns, GrokCustomPatterns: l.GrokConfig.CustomPatterns, GrokCustomPatternFiles: l.GrokConfig.CustomPatternFiles, - GrokTimeZone: l.GrokConfig.TimeZone, + GrokTimezone: l.GrokConfig.Timezone, DataFormat: "grok", } diff --git a/plugins/parsers/registry.go b/plugins/parsers/registry.go index c662cf300..8f972fb1b 100644 --- a/plugins/parsers/registry.go +++ b/plugins/parsers/registry.go @@ -123,7 +123,7 @@ type Config struct { GrokNamedPatterns []string GrokCustomPatterns string GrokCustomPatternFiles []string - GrokTimeZone string + GrokTimezone string //csv configuration CSVColumnNames []string `toml:"csv_column_names"` @@ -185,7 +185,7 @@ func NewParser(config *Config) (Parser, error) { config.GrokNamedPatterns, config.GrokCustomPatterns, config.GrokCustomPatternFiles, - config.GrokTimeZone) + config.GrokTimezone) case "csv": parser, err = newCSVParser(config.MetricName, config.CSVHeaderRowCount, From e6c98e5ce047671c41775fb5710bb6ebc54640ad Mon Sep 17 00:00:00 2001 From: Greg Linton Date: Wed, 3 Oct 2018 14:06:04 -0600 Subject: [PATCH 0241/1815] Set 1.8.1 release date --- CHANGELOG.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5d4a3ed43..b5b8ac195 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,7 +7,7 @@ - [#4753](https://github.com/influxdata/telegraf/pull/4753): Add ability to define a custom service name when installing as a Windows service. - [#4703](https://github.com/influxdata/telegraf/pull/4703): Add support for IPv6 in the ping plugin. -## v1.8.1 [unreleased] +## v1.8.1 [2018-10-03] ### Bugfixes @@ -20,6 +20,7 @@ - [#4769](https://github.com/influxdata/telegraf/pull/4769): Add UUID to VMs in vSphere input. - [#4758](https://github.com/influxdata/telegraf/issues/4758): Skip tags with empty values in cloudwatch output. - [#4783](https://github.com/influxdata/telegraf/issues/4783): Fix missing non-realtime samples in vSphere input. +- [#4799](https://github.com/influxdata/telegraf/pull/4799): Fix case of timezone/grok_timezone options. ## v1.8 [2018-09-21] From 4898edbb2d687b48ed87cae9233542854484b370 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 3 Oct 2018 15:09:35 -0700 Subject: [PATCH 0242/1815] Increment timestamp by one second in multi metric tests This avoids accidentally triggering tsMod time adjustments in certain timezones. --- plugins/parsers/grok/parser_test.go | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/plugins/parsers/grok/parser_test.go b/plugins/parsers/grok/parser_test.go index 60348fc63..cda88ed3d 100644 --- a/plugins/parsers/grok/parser_test.go +++ b/plugins/parsers/grok/parser_test.go @@ -487,7 +487,7 @@ func TestCompileFileAndParse(t *testing.T) { time.Date(2016, time.June, 4, 12, 41, 45, 0, time.FixedZone("foo", 60*60)).Nanosecond(), metricA.Time().Nanosecond()) - metricB, err := p.ParseLine(`[04/06/2016--12:41:45] 1.25 mystring dropme nomodifier`) + metricB, err := p.ParseLine(`[04/06/2016--12:41:46] 1.25 mystring dropme nomodifier`) require.NotNil(t, metricB) assert.NoError(t, err) assert.Equal(t, @@ -499,7 +499,7 @@ func TestCompileFileAndParse(t *testing.T) { metricB.Fields()) assert.Equal(t, map[string]string{}, metricB.Tags()) assert.Equal(t, - time.Date(2016, time.June, 4, 12, 41, 45, 0, time.FixedZone("foo", 60*60)).Nanosecond(), + time.Date(2016, time.June, 4, 12, 41, 46, 0, time.FixedZone("foo", 60*60)).Nanosecond(), metricB.Time().Nanosecond()) } @@ -736,7 +736,7 @@ func TestTimezoneEmptyCompileFileAndParse(t *testing.T) { assert.Equal(t, map[string]string{"response_code": "200"}, metricA.Tags()) assert.Equal(t, int64(1465040505000000000), metricA.Time().UnixNano()) - metricB, err := p.ParseLine(`[04/06/2016--12:41:45] 1.25 mystring dropme nomodifier`) + metricB, err := p.ParseLine(`[04/06/2016--12:41:46] 1.25 mystring dropme nomodifier`) require.NotNil(t, metricB) assert.NoError(t, err) assert.Equal(t, @@ -747,7 +747,7 @@ func TestTimezoneEmptyCompileFileAndParse(t *testing.T) { }, metricB.Fields()) assert.Equal(t, map[string]string{}, metricB.Tags()) - assert.Equal(t, int64(1465044105000000000), metricB.Time().UnixNano()) + assert.Equal(t, int64(1465044106000000000), metricB.Time().UnixNano()) } func TestTimezoneMalformedCompileFileAndParse(t *testing.T) { @@ -772,7 +772,7 @@ func TestTimezoneMalformedCompileFileAndParse(t *testing.T) { assert.Equal(t, map[string]string{"response_code": "200"}, metricA.Tags()) assert.Equal(t, int64(1465040505000000000), metricA.Time().UnixNano()) - metricB, err := p.ParseLine(`[04/06/2016--12:41:45] 1.25 mystring dropme nomodifier`) + metricB, err := p.ParseLine(`[04/06/2016--12:41:46] 1.25 mystring dropme nomodifier`) require.NotNil(t, metricB) assert.NoError(t, err) assert.Equal(t, @@ -783,7 +783,7 @@ func TestTimezoneMalformedCompileFileAndParse(t *testing.T) { }, metricB.Fields()) assert.Equal(t, map[string]string{}, metricB.Tags()) - assert.Equal(t, int64(1465044105000000000), metricB.Time().UnixNano()) + assert.Equal(t, int64(1465044106000000000), metricB.Time().UnixNano()) } func TestTimezoneEuropeCompileFileAndParse(t *testing.T) { @@ -808,7 +808,7 @@ func TestTimezoneEuropeCompileFileAndParse(t *testing.T) { assert.Equal(t, map[string]string{"response_code": "200"}, metricA.Tags()) assert.Equal(t, int64(1465040505000000000), metricA.Time().UnixNano()) - metricB, err := p.ParseLine(`[04/06/2016--12:41:45] 1.25 mystring dropme nomodifier`) + metricB, err := p.ParseLine(`[04/06/2016--12:41:46] 1.25 mystring dropme nomodifier`) require.NotNil(t, metricB) assert.NoError(t, err) assert.Equal(t, @@ -819,7 +819,7 @@ func TestTimezoneEuropeCompileFileAndParse(t *testing.T) { }, metricB.Fields()) assert.Equal(t, map[string]string{}, metricB.Tags()) - assert.Equal(t, int64(1465036905000000000), metricB.Time().UnixNano()) + assert.Equal(t, int64(1465036906000000000), metricB.Time().UnixNano()) } func TestTimezoneAmericasCompileFileAndParse(t *testing.T) { @@ -844,7 +844,7 @@ func TestTimezoneAmericasCompileFileAndParse(t *testing.T) { assert.Equal(t, map[string]string{"response_code": "200"}, metricA.Tags()) assert.Equal(t, int64(1465040505000000000), metricA.Time().UnixNano()) - metricB, err := p.ParseLine(`[04/06/2016--12:41:45] 1.25 mystring dropme nomodifier`) + metricB, err := p.ParseLine(`[04/06/2016--12:41:46] 1.25 mystring dropme nomodifier`) require.NotNil(t, metricB) assert.NoError(t, err) assert.Equal(t, @@ -855,7 +855,7 @@ func TestTimezoneAmericasCompileFileAndParse(t *testing.T) { }, metricB.Fields()) assert.Equal(t, map[string]string{}, metricB.Tags()) - assert.Equal(t, int64(1465058505000000000), metricB.Time().UnixNano()) + assert.Equal(t, int64(1465058506000000000), metricB.Time().UnixNano()) } func TestTimezoneLocalCompileFileAndParse(t *testing.T) { @@ -880,7 +880,7 @@ func TestTimezoneLocalCompileFileAndParse(t *testing.T) { assert.Equal(t, map[string]string{"response_code": "200"}, metricA.Tags()) assert.Equal(t, int64(1465040505000000000), metricA.Time().UnixNano()) - metricB, err := p.ParseLine(`[04/06/2016--12:41:45] 1.25 mystring dropme nomodifier`) + metricB, err := p.ParseLine(`[04/06/2016--12:41:46] 1.25 mystring dropme nomodifier`) require.NotNil(t, metricB) assert.NoError(t, err) assert.Equal(t, @@ -891,7 +891,7 @@ func TestTimezoneLocalCompileFileAndParse(t *testing.T) { }, metricB.Fields()) assert.Equal(t, map[string]string{}, metricB.Tags()) - assert.Equal(t, time.Date(2016, time.June, 4, 12, 41, 45, 0, time.Local).UnixNano(), metricB.Time().UnixNano()) + assert.Equal(t, time.Date(2016, time.June, 4, 12, 41, 46, 0, time.Local).UnixNano(), metricB.Time().UnixNano()) } func TestNewlineInPatterns(t *testing.T) { From 927cac00748a8dd61eaf34eecb6cc40ecb129d6c Mon Sep 17 00:00:00 2001 From: Greg Linton Date: Wed, 3 Oct 2018 17:35:48 -0600 Subject: [PATCH 0243/1815] Update changelog --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b5b8ac195..231dacc7b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,7 +13,7 @@ - [#4750](https://github.com/influxdata/telegraf/pull/4750): Fix hardware_type may be truncated in sqlserver input. - [#4723](https://github.com/influxdata/telegraf/issues/4723): Improve performance in basicstats aggregator. -- [#4747](https://github.com/influxdata/telegraf/pull/4723): Add hostname to TLS config for SNI support. +- [#4747](https://github.com/influxdata/telegraf/pull/4747): Add hostname to TLS config for SNI support. - [#4675](https://github.com/influxdata/telegraf/issues/4675): Don't add tags with empty values to opentsdb output. - [#4765](https://github.com/influxdata/telegraf/pull/4765): Fix panic during network error in vsphere input. - [#4766](https://github.com/influxdata/telegraf/pull/4766): Unify http_listener error response with InfluxDB. From 9efe7c12f0a30be2e1475dd71bfe8e4dddb389df Mon Sep 17 00:00:00 2001 From: Rodney Gitzel Date: Wed, 3 Oct 2018 17:21:30 -0700 Subject: [PATCH 0244/1815] Expand documentation of 'qos' configuration in mqtt_consumer input (#4784) --- etc/telegraf.conf | 9 ++++++++- plugins/inputs/mqtt_consumer/README.md | 10 +++++++++- plugins/inputs/mqtt_consumer/mqtt_consumer.go | 9 ++++++++- 3 files changed, 25 insertions(+), 3 deletions(-) diff --git a/etc/telegraf.conf b/etc/telegraf.conf index d81e1b993..0a1607281 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -3918,8 +3918,15 @@ # ## schema can be tcp, ssl, or ws. # servers = ["tcp://localhost:1883"] # -# ## MQTT QoS, must be 0, 1, or 2 +# ## QoS policy for messages +# ## 0 = at most once +# ## 1 = at least once +# ## 2 = exactly once +# ## +# ## When using a QoS of 1 or 2, you should enable persistent_session to allow +# ## resuming unacknowledged messages. # qos = 0 +# # ## Connection timeout for initial connection in seconds # connection_timeout = "30s" # diff --git a/plugins/inputs/mqtt_consumer/README.md b/plugins/inputs/mqtt_consumer/README.md index df7869a86..0ec668c40 100644 --- a/plugins/inputs/mqtt_consumer/README.md +++ b/plugins/inputs/mqtt_consumer/README.md @@ -13,8 +13,16 @@ The plugin expects messages in the ## MQTT broker URLs to be used. The format should be scheme://host:port, ## schema can be tcp, ssl, or ws. servers = ["tcp://localhost:1883"] - ## MQTT QoS, must be 0, 1, or 2 + + ## QoS policy for messages + ## 0 = at most once + ## 1 = at least once + ## 2 = exactly once + ## + ## When using a QoS of 1 or 2, you should enable persistent_session to allow + ## resuming unacknowledged messages. qos = 0 + ## Connection timeout for initial connection in seconds connection_timeout = "30s" diff --git a/plugins/inputs/mqtt_consumer/mqtt_consumer.go b/plugins/inputs/mqtt_consumer/mqtt_consumer.go index 58074af79..5853ad939 100644 --- a/plugins/inputs/mqtt_consumer/mqtt_consumer.go +++ b/plugins/inputs/mqtt_consumer/mqtt_consumer.go @@ -53,8 +53,15 @@ var sampleConfig = ` ## schema can be tcp, ssl, or ws. servers = ["tcp://localhost:1883"] - ## MQTT QoS, must be 0, 1, or 2 + ## QoS policy for messages + ## 0 = at most once + ## 1 = at least once + ## 2 = exactly once + ## + ## When using a QoS of 1 or 2, you should enable persistent_session to allow + ## resuming unacknowledged messages. qos = 0 + ## Connection timeout for initial connection in seconds connection_timeout = "30s" From a1f9f6346340d18ab679b0ebdd6293bb65fadc8b Mon Sep 17 00:00:00 2001 From: Rudy Date: Thu, 4 Oct 2018 08:19:44 +0700 Subject: [PATCH 0245/1815] Add new config for csv column explicit type conversion (#4781) --- internal/config/config.go | 13 +++++++++++ plugins/parsers/csv/README.md | 5 +++++ plugins/parsers/csv/parser.go | 35 ++++++++++++++++++++++++++++++ plugins/parsers/csv/parser_test.go | 12 ++++++++++ plugins/parsers/registry.go | 8 +++++++ 5 files changed, 73 insertions(+) diff --git a/internal/config/config.go b/internal/config/config.go index 4382e0cbe..36027834b 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -1460,6 +1460,18 @@ func getParserConfig(name string, tbl *ast.Table) (*parsers.Config, error) { } } + if node, ok := tbl.Fields["csv_column_types"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if ary, ok := kv.Value.(*ast.Array); ok { + for _, elem := range ary.Value { + if str, ok := elem.(*ast.String); ok { + c.CSVColumnTypes = append(c.CSVColumnTypes, str.Value) + } + } + } + } + } + if node, ok := tbl.Fields["csv_tag_columns"]; ok { if kv, ok := node.(*ast.KeyValue); ok { if ary, ok := kv.Value.(*ast.Array); ok { @@ -1588,6 +1600,7 @@ func getParserConfig(name string, tbl *ast.Table) (*parsers.Config, error) { delete(tbl.Fields, "grok_custom_pattern_files") delete(tbl.Fields, "grok_timezone") delete(tbl.Fields, "csv_column_names") + delete(tbl.Fields, "csv_column_types") delete(tbl.Fields, "csv_comment") delete(tbl.Fields, "csv_delimiter") delete(tbl.Fields, "csv_field_columns") diff --git a/plugins/parsers/csv/README.md b/plugins/parsers/csv/README.md index 532980991..e4cfbfc37 100644 --- a/plugins/parsers/csv/README.md +++ b/plugins/parsers/csv/README.md @@ -27,6 +27,11 @@ values. ## If `csv_header_row_count` is set to 0, this config must be used csv_column_names = [] + ## For assigning explicit data types to columns. + ## Supported types: "int", "float", "bool", "string". + ## If this is not specified, type conversion will be done on the types above. + csv_column_types = [] + ## Indicates the number of rows to skip before looking for header information. csv_skip_rows = 0 diff --git a/plugins/parsers/csv/parser.go b/plugins/parsers/csv/parser.go index 8e0b8b47e..f18068eb7 100644 --- a/plugins/parsers/csv/parser.go +++ b/plugins/parsers/csv/parser.go @@ -21,6 +21,7 @@ type Parser struct { Comment string TrimSpace bool ColumnNames []string + ColumnTypes []string TagColumns []string MeasurementColumn string TimestampColumn string @@ -148,6 +149,40 @@ outer: } } + // Try explicit conversion only when column types is defined. + if len(p.ColumnTypes) > 0 { + // Throw error if current column count exceeds defined types. + if i >= len(p.ColumnTypes) { + return nil, fmt.Errorf("column type: column count exceeded") + } + + var val interface{} + var err error + + switch p.ColumnTypes[i] { + case "int": + val, err = strconv.ParseInt(value, 10, 64) + if err != nil { + return nil, fmt.Errorf("column type: parse int error %s", err) + } + case "float": + val, err = strconv.ParseFloat(value, 64) + if err != nil { + return nil, fmt.Errorf("column type: parse float error %s", err) + } + case "bool": + val, err = strconv.ParseBool(value) + if err != nil { + return nil, fmt.Errorf("column type: parse bool error %s", err) + } + default: + val = value + } + + recordFields[fieldName] = val + continue + } + // attempt type conversions if iValue, err := strconv.ParseInt(value, 10, 64); err == nil { recordFields[fieldName] = iValue diff --git a/plugins/parsers/csv/parser_test.go b/plugins/parsers/csv/parser_test.go index e3668d3ac..eff6f953f 100644 --- a/plugins/parsers/csv/parser_test.go +++ b/plugins/parsers/csv/parser_test.go @@ -147,6 +147,18 @@ func TestValueConversion(t *testing.T) { //deep equal fields require.Equal(t, expectedMetric.Fields(), returnedMetric.Fields()) + + // Test explicit type conversion. + p.ColumnTypes = []string{"float", "int", "bool", "string"} + + metrics, err = p.Parse([]byte(testCSV)) + require.NoError(t, err) + + returnedMetric, err2 = metric.New(metrics[0].Name(), metrics[0].Tags(), metrics[0].Fields(), time.Unix(0, 0)) + require.NoError(t, err2) + + //deep equal fields + require.Equal(t, expectedMetric.Fields(), returnedMetric.Fields()) } func TestSkipComment(t *testing.T) { diff --git a/plugins/parsers/registry.go b/plugins/parsers/registry.go index 8f972fb1b..c3e4b1cbf 100644 --- a/plugins/parsers/registry.go +++ b/plugins/parsers/registry.go @@ -127,6 +127,7 @@ type Config struct { //csv configuration CSVColumnNames []string `toml:"csv_column_names"` + CSVColumnTypes []string `toml:"csv_column_types"` CSVComment string `toml:"csv_comment"` CSVDelimiter string `toml:"csv_delimiter"` CSVHeaderRowCount int `toml:"csv_header_row_count"` @@ -195,6 +196,7 @@ func NewParser(config *Config) (Parser, error) { config.CSVComment, config.CSVTrimSpace, config.CSVColumnNames, + config.CSVColumnTypes, config.CSVTagColumns, config.CSVMeasurementColumn, config.CSVTimestampColumn, @@ -216,6 +218,7 @@ func newCSVParser(metricName string, comment string, trimSpace bool, columnNames []string, + columnTypes []string, tagColumns []string, nameColumn string, timestampColumn string, @@ -240,6 +243,10 @@ func newCSVParser(metricName string, } } + if len(columnNames) > 0 && len(columnTypes) > 0 && len(columnNames) != len(columnTypes) { + return nil, fmt.Errorf("csv_column_names field count doesn't match with csv_column_types") + } + parser := &csv.Parser{ MetricName: metricName, HeaderRowCount: headerRowCount, @@ -249,6 +256,7 @@ func newCSVParser(metricName string, Comment: comment, TrimSpace: trimSpace, ColumnNames: columnNames, + ColumnTypes: columnTypes, TagColumns: tagColumns, MeasurementColumn: nameColumn, TimestampColumn: timestampColumn, From 6bf5643351c86aa09ae4819816ec783b8a4e1c2f Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 3 Oct 2018 18:20:37 -0700 Subject: [PATCH 0246/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 231dacc7b..4edaeccba 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,7 @@ - [#4754](https://github.com/influxdata/telegraf/pull/4754): Query servers in parallel in dns_query input. - [#4753](https://github.com/influxdata/telegraf/pull/4753): Add ability to define a custom service name when installing as a Windows service. - [#4703](https://github.com/influxdata/telegraf/pull/4703): Add support for IPv6 in the ping plugin. +- [#4781](https://github.com/influxdata/telegraf/pull/4781): Add new config for csv column explicit type conversion. ## v1.8.1 [2018-10-03] From 5d31c04e947fa28559ff931f0e8f45e7be3189b3 Mon Sep 17 00:00:00 2001 From: Mihai Todor Date: Thu, 4 Oct 2018 19:47:49 +0100 Subject: [PATCH 0247/1815] Fix linter contributing guidelines (#4806) --- CONTRIBUTING.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 55cc7f118..4bc7daf71 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -477,7 +477,7 @@ the short tests. ### Execute linter -execute `make lint` +execute `make check` ### Execute short tests From db3967eb978fe82cb61a3552a99e73b7482f820b Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 4 Oct 2018 13:24:25 -0700 Subject: [PATCH 0248/1815] Add missing processors to readme --- README.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/README.md b/README.md index 51deeec5a..647256f3e 100644 --- a/README.md +++ b/README.md @@ -291,10 +291,13 @@ For documentation on the latest development code see the [documentation index][d ## Processor Plugins * [converter](./plugins/processors/converter) +* [enum](./plugins/processors/enum) * [override](./plugins/processors/override) +* [parser](./plugins/processors/parser) * [printer](./plugins/processors/printer) * [regex](./plugins/processors/regex) * [rename](./plugins/processors/rename) +* [strings](./plugins/processors/strings) * [topk](./plugins/processors/topk) ## Aggregator Plugins From f81696b6b5853fab8fe9d9cbb59b99dffbae8ac9 Mon Sep 17 00:00:00 2001 From: pytimer Date: Sat, 6 Oct 2018 02:14:44 +0800 Subject: [PATCH 0249/1815] Add windows service name lookup to procstat input (#4811) --- plugins/inputs/procstat/README.md | 6 +++ plugins/inputs/procstat/procstat.go | 22 ++++++++- .../inputs/procstat/win_service_notwindows.go | 11 +++++ .../inputs/procstat/win_service_windows.go | 49 +++++++++++++++++++ 4 files changed, 87 insertions(+), 1 deletion(-) create mode 100644 plugins/inputs/procstat/win_service_notwindows.go create mode 100644 plugins/inputs/procstat/win_service_windows.go diff --git a/plugins/inputs/procstat/README.md b/plugins/inputs/procstat/README.md index efa71489b..852c109f0 100644 --- a/plugins/inputs/procstat/README.md +++ b/plugins/inputs/procstat/README.md @@ -11,6 +11,7 @@ Processes can be selected for monitoring using one of several methods: - user - systemd_unit - cgroup +- win_service ### Configuration: @@ -30,6 +31,9 @@ Processes can be selected for monitoring using one of several methods: ## CGroup name or path # cgroup = "systemd/system.slice/nginx.service" + ## Windows service name + # win_service = "" + ## override for process_name ## This is optional; default is sourced from /proc//status # process_name = "bar" @@ -75,6 +79,7 @@ implemented as a WMI query. The pattern allows fuzzy matching using only - user (when selected) - systemd_unit (when defined) - cgroup (when defined) + - win_service (when defined) - fields: - cpu_time (int) - cpu_time_guest (float) @@ -139,6 +144,7 @@ implemented as a WMI query. The pattern allows fuzzy matching using only - user (string) - systemd_unit (string) - cgroup (string) + - win_service (string) - fields: - pid_count (int) *NOTE: Resource limit > 2147483647 will be reported as 2147483647.* diff --git a/plugins/inputs/procstat/procstat.go b/plugins/inputs/procstat/procstat.go index 4b253fd1c..d9c1ee7b6 100644 --- a/plugins/inputs/procstat/procstat.go +++ b/plugins/inputs/procstat/procstat.go @@ -32,6 +32,7 @@ type Procstat struct { SystemdUnit string CGroup string `toml:"cgroup"` PidTag bool + WinService string `tom:"win_service"` finder PIDFinder @@ -54,6 +55,9 @@ var sampleConfig = ` ## CGroup name or path # cgroup = "systemd/system.slice/nginx.service" + ## Windows service name + # win_service = "" + ## override for process_name ## This is optional; default is sourced from /proc//status # process_name = "bar" @@ -317,8 +321,11 @@ func (p *Procstat) findPids(acc telegraf.Accumulator) ([]PID, map[string]string, } else if p.CGroup != "" { pids, err = p.cgroupPIDs() tags = map[string]string{"cgroup": p.CGroup} + } else if p.WinService != "" { + pids, err = p.winServicePIDs() + tags = map[string]string{"win_service": p.WinService} } else { - err = fmt.Errorf("Either exe, pid_file, user, pattern, systemd_unit, or cgroup must be specified") + err = fmt.Errorf("Either exe, pid_file, user, pattern, systemd_unit, cgroup, or win_service must be specified") } rTags := make(map[string]string) @@ -391,6 +398,19 @@ func (p *Procstat) cgroupPIDs() ([]PID, error) { return pids, nil } +func (p *Procstat) winServicePIDs() ([]PID, error) { + var pids []PID + + pid, err := queryPidWithWinServiceName(p.WinService) + if err != nil { + return pids, err + } + + pids = append(pids, PID(pid)) + + return pids, nil +} + func init() { inputs.Add("procstat", func() telegraf.Input { return &Procstat{} diff --git a/plugins/inputs/procstat/win_service_notwindows.go b/plugins/inputs/procstat/win_service_notwindows.go new file mode 100644 index 000000000..3d539d9f9 --- /dev/null +++ b/plugins/inputs/procstat/win_service_notwindows.go @@ -0,0 +1,11 @@ +// +build !windows + +package procstat + +import ( + "fmt" +) + +func queryPidWithWinServiceName(winServiceName string) (uint32, error) { + return 0, fmt.Errorf("os not support win_service option") +} diff --git a/plugins/inputs/procstat/win_service_windows.go b/plugins/inputs/procstat/win_service_windows.go new file mode 100644 index 000000000..70a542263 --- /dev/null +++ b/plugins/inputs/procstat/win_service_windows.go @@ -0,0 +1,49 @@ +// +build windows + +package procstat + +import ( + "unsafe" + + "golang.org/x/sys/windows" + "golang.org/x/sys/windows/svc/mgr" +) + +func getService(name string) (*mgr.Service, error) { + m, err := mgr.Connect() + if err != nil { + return nil, err + } + defer m.Disconnect() + + srv, err := m.OpenService(name) + if err != nil { + return nil, err + } + + return srv, nil +} + +func queryPidWithWinServiceName(winServiceName string) (uint32, error) { + + srv, err := getService(winServiceName) + if err != nil { + return 0, err + } + + var p *windows.SERVICE_STATUS_PROCESS + var bytesNeeded uint32 + var buf []byte + + if err := windows.QueryServiceStatusEx(srv.Handle, windows.SC_STATUS_PROCESS_INFO, nil, 0, &bytesNeeded); err != windows.ERROR_INSUFFICIENT_BUFFER { + return 0, err + } + + buf = make([]byte, bytesNeeded) + p = (*windows.SERVICE_STATUS_PROCESS)(unsafe.Pointer(&buf[0])) + if err := windows.QueryServiceStatusEx(srv.Handle, windows.SC_STATUS_PROCESS_INFO, &buf[0], uint32(len(buf)), &bytesNeeded); err != nil { + return 0, err + } + + return p.ProcessId, nil +} From 030f94450562ab2d3823d298df5c6eeecbe3a649 Mon Sep 17 00:00:00 2001 From: Lee Jaeyong Date: Sat, 6 Oct 2018 04:55:23 +0900 Subject: [PATCH 0250/1815] Add per-directory file counts in the filecount input (#4752) --- plugins/inputs/filecount/filecount.go | 101 +++++++++++++++------ plugins/inputs/filecount/filecount_test.go | 95 +++++++++++++------ 2 files changed, 141 insertions(+), 55 deletions(-) diff --git a/plugins/inputs/filecount/filecount.go b/plugins/inputs/filecount/filecount.go index 6041ec7b5..a0dcd2cb4 100644 --- a/plugins/inputs/filecount/filecount.go +++ b/plugins/inputs/filecount/filecount.go @@ -1,19 +1,30 @@ package filecount import ( + "fmt" "os" "path/filepath" "time" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/internal/globpath" "github.com/influxdata/telegraf/plugins/inputs" ) const sampleConfig = ` ## Directory to gather stats about. + ## deprecated in 1.9; use the directories option directory = "/var/cache/apt/archives" + ## Directories to gather stats about. + ## This accept standard unit glob matching rules, but with the addition of + ## ** as a "super asterisk". ie: + ## /var/log/** -> recursively find all directories in /var/log and count files in each directories + ## /var/log/*/* -> find all directories with a parent dir in /var/log and count files in each directories + ## /var/log -> count all files in /var/log and all of its subdirectories + directories = ["/var/cache/apt/archives"] + ## Only count files that match the name pattern. Defaults to "*". name = "*.deb" @@ -35,7 +46,8 @@ const sampleConfig = ` ` type FileCount struct { - Directory string + Directory string // deprecated in 1.9 + Directories []string Name string Recursive bool RegularOnly bool @@ -44,7 +56,6 @@ type FileCount struct { fileFilters []fileFilterFunc } -type countFunc func(os.FileInfo) type fileFilterFunc func(os.FileInfo) (bool, error) func (_ *FileCount) Description() string { @@ -125,18 +136,40 @@ func absDuration(x time.Duration) time.Duration { return x } -func count(basedir string, recursive bool, countFn countFunc) error { +func (fc *FileCount) count(acc telegraf.Accumulator, basedir string, recursive bool) { + numFiles := int64(0) walkFn := func(path string, file os.FileInfo, err error) error { if path == basedir { return nil } - countFn(file) + match, err := fc.filter(file) + if err != nil { + acc.AddError(err) + return nil + } + if match { + numFiles++ + } if !recursive && file.IsDir() { return filepath.SkipDir } return nil } - return filepath.Walk(basedir, walkFn) + + err := filepath.Walk(basedir, walkFn) + if err != nil { + acc.AddError(err) + return + } + + acc.AddFields("filecount", + map[string]interface{}{ + "count": numFiles, + }, + map[string]string{ + "directory": basedir, + }, + ) } func (fc *FileCount) initFileFilters() { @@ -168,37 +201,53 @@ func (fc *FileCount) filter(file os.FileInfo) (bool, error) { } func (fc *FileCount) Gather(acc telegraf.Accumulator) error { - numFiles := int64(0) - countFn := func(f os.FileInfo) { - match, err := fc.filter(f) - if err != nil { - acc.AddError(err) - return - } - if !match { - return - } - numFiles++ - } - err := count(fc.Directory, fc.Recursive, countFn) + globDirs := fc.getDirs() + dirs, err := getCompiledDirs(globDirs) if err != nil { - acc.AddError(err) + return err } - acc.AddFields("filecount", - map[string]interface{}{ - "count": numFiles, - }, - map[string]string{ - "directory": fc.Directory, - }) + for _, dir := range dirs { + fc.count(acc, dir, fc.Recursive) + } return nil } +func (fc *FileCount) getDirs() []string { + dirs := make([]string, len(fc.Directories)) + for i, dir := range fc.Directories { + dirs[i] = dir + } + + if fc.Directory != "" { + dirs = append(dirs, fc.Directory) + } + + return dirs +} + +func getCompiledDirs(dirs []string) ([]string, error) { + compiledDirs := []string{} + for _, dir := range dirs { + g, err := globpath.Compile(dir) + if err != nil { + return nil, fmt.Errorf("could not compile glob %v: %v", dir, err) + } + + for path, file := range g.Match() { + if file.IsDir() { + compiledDirs = append(compiledDirs, path) + } + } + } + return compiledDirs, nil +} + func NewFileCount() *FileCount { return &FileCount{ Directory: "", + Directories: []string{}, Name: "*", Recursive: true, RegularOnly: true, diff --git a/plugins/inputs/filecount/filecount_test.go b/plugins/inputs/filecount/filecount_test.go index 294a8b965..16bb83de5 100644 --- a/plugins/inputs/filecount/filecount_test.go +++ b/plugins/inputs/filecount/filecount_test.go @@ -14,69 +14,108 @@ import ( ) func TestNoFilters(t *testing.T) { - fc := getNoFilterFileCount() - matches := []string{"foo", "bar", "baz", "qux", - "subdir/", "subdir/quux", "subdir/quuz"} - require.True(t, fileCountEquals(fc, len(matches))) + fc := getNoFilterFileCount("*") + matches := []string{"foo", "bar", "baz", "qux", "subdir/", "subdir/quux", "subdir/quuz"} + + acc := testutil.Accumulator{} + acc.GatherError(fc.Gather) + + require.True(t, assertFileCount(&acc, "testdata", len(matches))) +} + +func TestNoFiltersOnChildDir(t *testing.T) { + fc := getNoFilterFileCount("testdata/*") + matches := []string{"subdir/quux", "subdir/quuz"} + + acc := testutil.Accumulator{} + acc.GatherError(fc.Gather) + + require.True(t, assertFileCount(&acc, "testdata/subdir", len(matches))) } func TestNameFilter(t *testing.T) { - fc := getNoFilterFileCount() + fc := getNoFilterFileCount("testdata") fc.Name = "ba*" matches := []string{"bar", "baz"} - require.True(t, fileCountEquals(fc, len(matches))) + + acc := testutil.Accumulator{} + acc.GatherError(fc.Gather) + + require.True(t, assertFileCount(&acc, "testdata", len(matches))) } func TestNonRecursive(t *testing.T) { - fc := getNoFilterFileCount() + fc := getNoFilterFileCount("testdata") fc.Recursive = false matches := []string{"foo", "bar", "baz", "qux", "subdir"} - require.True(t, fileCountEquals(fc, len(matches))) + + acc := testutil.Accumulator{} + acc.GatherError(fc.Gather) + + require.True(t, assertFileCount(&acc, "testdata", len(matches))) } func TestRegularOnlyFilter(t *testing.T) { - fc := getNoFilterFileCount() + fc := getNoFilterFileCount("testdata") fc.RegularOnly = true matches := []string{ "foo", "bar", "baz", "qux", "subdir/quux", "subdir/quuz", } - require.True(t, fileCountEquals(fc, len(matches))) + + acc := testutil.Accumulator{} + acc.GatherError(fc.Gather) + + require.True(t, assertFileCount(&acc, "testdata", len(matches))) } func TestSizeFilter(t *testing.T) { - fc := getNoFilterFileCount() + fc := getNoFilterFileCount("testdata") fc.Size = -100 - matches := []string{"foo", "bar", "baz", - "subdir/quux", "subdir/quuz"} - require.True(t, fileCountEquals(fc, len(matches))) + matches := []string{"foo", "bar", "baz", "subdir/quux", "subdir/quuz"} + + acc := testutil.Accumulator{} + acc.GatherError(fc.Gather) + + require.True(t, assertFileCount(&acc, "testdata", len(matches))) fc.Size = 100 matches = []string{"qux"} - require.True(t, fileCountEquals(fc, len(matches))) + + acc = testutil.Accumulator{} + acc.GatherError(fc.Gather) + + require.True(t, assertFileCount(&acc, "testdata", len(matches))) } func TestMTimeFilter(t *testing.T) { - oldFile := filepath.Join(getTestdataDir(), "baz") + oldFile := filepath.Join(getTestdataDir("testdata"), "baz") mtime := time.Date(1979, time.December, 14, 18, 25, 5, 0, time.UTC) if err := os.Chtimes(oldFile, mtime, mtime); err != nil { t.Skip("skipping mtime filter test.") } fileAge := time.Since(mtime) - (60 * time.Second) - fc := getNoFilterFileCount() + fc := getNoFilterFileCount("testdata") fc.MTime = internal.Duration{Duration: -fileAge} - matches := []string{"foo", "bar", "qux", - "subdir/", "subdir/quux", "subdir/quuz"} - require.True(t, fileCountEquals(fc, len(matches))) + matches := []string{"foo", "bar", "qux", "subdir/", "subdir/quux", "subdir/quuz"} + + acc := testutil.Accumulator{} + acc.GatherError(fc.Gather) + + require.True(t, assertFileCount(&acc, "testdata", len(matches))) fc.MTime = internal.Duration{Duration: fileAge} matches = []string{"baz"} - require.True(t, fileCountEquals(fc, len(matches))) + + acc = testutil.Accumulator{} + acc.GatherError(fc.Gather) + + require.True(t, assertFileCount(&acc, "testdata", len(matches))) } -func getNoFilterFileCount() FileCount { +func getNoFilterFileCount(dir string) FileCount { return FileCount{ - Directory: getTestdataDir(), + Directories: []string{getTestdataDir(dir)}, Name: "*", Recursive: true, RegularOnly: false, @@ -86,14 +125,12 @@ func getNoFilterFileCount() FileCount { } } -func getTestdataDir() string { +func getTestdataDir(dir string) string { _, filename, _, _ := runtime.Caller(1) - return strings.Replace(filename, "filecount_test.go", "testdata/", 1) + return strings.Replace(filename, "filecount_test.go", dir, 1) } -func fileCountEquals(fc FileCount, expectedCount int) bool { - tags := map[string]string{"directory": getTestdataDir()} - acc := testutil.Accumulator{} - acc.GatherError(fc.Gather) +func assertFileCount(acc *testutil.Accumulator, expectedDir string, expectedCount int) bool { + tags := map[string]string{"directory": getTestdataDir(expectedDir)} return acc.HasPoint("filecount", tags, "count", int64(expectedCount)) } From 422c142463d16912ab033e135ca05a77c8a2be1a Mon Sep 17 00:00:00 2001 From: Kevin Conaway Date: Fri, 5 Oct 2018 16:48:18 -0400 Subject: [PATCH 0251/1815] Use non-allocating field and tag accessors in datadog output (#4803) --- plugins/outputs/datadog/datadog.go | 27 ++++++++++++------------- plugins/outputs/datadog/datadog_test.go | 22 ++++++++++++++++---- 2 files changed, 31 insertions(+), 18 deletions(-) diff --git a/plugins/outputs/datadog/datadog.go b/plugins/outputs/datadog/datadog.go index 2ab3dcd58..2ef03a7b2 100644 --- a/plugins/outputs/datadog/datadog.go +++ b/plugins/outputs/datadog/datadog.go @@ -7,7 +7,6 @@ import ( "log" "net/http" "net/url" - "sort" "strings" "github.com/influxdata/telegraf" @@ -76,6 +75,9 @@ func (d *Datadog) Write(metrics []telegraf.Metric) error { for _, m := range metrics { if dogMs, err := buildMetrics(m); err == nil { + metricTags := buildTags(m.TagList()) + host, _ := m.GetTag("host") + for fieldName, dogM := range dogMs { // name of the datadog measurement var dname string @@ -85,11 +87,9 @@ func (d *Datadog) Write(metrics []telegraf.Metric) error { } else { dname = m.Name() + "." + fieldName } - var host string - host, _ = m.Tags()["host"] metric := &Metric{ Metric: dname, - Tags: buildTags(m.Tags()), + Tags: metricTags, Host: host, } metric.Points[0] = dogM @@ -144,28 +144,27 @@ func (d *Datadog) authenticatedUrl() string { func buildMetrics(m telegraf.Metric) (map[string]Point, error) { ms := make(map[string]Point) - for k, v := range m.Fields() { - if !verifyValue(v) { + for _, field := range m.FieldList() { + if !verifyValue(field.Value) { continue } var p Point - if err := p.setValue(v); err != nil { - return ms, fmt.Errorf("unable to extract value from Fields %v error %v", k, err.Error()) + if err := p.setValue(field.Value); err != nil { + return ms, fmt.Errorf("unable to extract value from Fields %v error %v", field.Key, err.Error()) } p[0] = float64(m.Time().Unix()) - ms[k] = p + ms[field.Key] = p } return ms, nil } -func buildTags(mTags map[string]string) []string { - tags := make([]string, len(mTags)) +func buildTags(tagList []*telegraf.Tag) []string { + tags := make([]string, len(tagList)) index := 0 - for k, v := range mTags { - tags[index] = fmt.Sprintf("%s:%s", k, v) + for _, tag := range tagList { + tags[index] = fmt.Sprintf("%s:%s", tag.Key, tag.Value) index += 1 } - sort.Strings(tags) return tags } diff --git a/plugins/outputs/datadog/datadog_test.go b/plugins/outputs/datadog/datadog_test.go index 045bf4b43..f21ecc588 100644 --- a/plugins/outputs/datadog/datadog_test.go +++ b/plugins/outputs/datadog/datadog_test.go @@ -74,19 +74,33 @@ func TestAuthenticatedUrl(t *testing.T) { func TestBuildTags(t *testing.T) { var tagtests = []struct { - ptIn map[string]string + ptIn []*telegraf.Tag outTags []string }{ { - map[string]string{"one": "two", "three": "four"}, + []*telegraf.Tag{ + &telegraf.Tag{ + Key: "one", + Value: "two", + }, + &telegraf.Tag{ + Key: "three", + Value: "four", + }, + }, []string{"one:two", "three:four"}, }, { - map[string]string{"aaa": "bbb"}, + []*telegraf.Tag{ + &telegraf.Tag{ + Key: "aaa", + Value: "bbb", + }, + }, []string{"aaa:bbb"}, }, { - map[string]string{}, + []*telegraf.Tag{}, []string{}, }, } From 34caf12db578b56454735c420ef522eb5c164ab0 Mon Sep 17 00:00:00 2001 From: Kevin Conaway Date: Fri, 5 Oct 2018 16:51:16 -0400 Subject: [PATCH 0252/1815] Add an option to specify a custom datadog URL (#4800) --- plugins/outputs/datadog/datadog.go | 17 ++++++++--------- plugins/outputs/datadog/datadog_test.go | 6 ++++++ 2 files changed, 14 insertions(+), 9 deletions(-) diff --git a/plugins/outputs/datadog/datadog.go b/plugins/outputs/datadog/datadog.go index 2ef03a7b2..62e73f115 100644 --- a/plugins/outputs/datadog/datadog.go +++ b/plugins/outputs/datadog/datadog.go @@ -18,7 +18,7 @@ type Datadog struct { Apikey string Timeout internal.Duration - apiUrl string + URL string `toml:"url"` client *http.Client } @@ -26,6 +26,9 @@ var sampleConfig = ` ## Datadog API key apikey = "my-secret-key" # required. + # The base endpoint URL can optionally be specified but it defaults to: + #url = "https://app.datadoghq.com/api/v1/series" + ## Connection timeout. # timeout = "5s" ` @@ -45,12 +48,6 @@ type Point [2]float64 const datadog_api = "https://app.datadoghq.com/api/v1/series" -func NewDatadog(apiUrl string) *Datadog { - return &Datadog{ - apiUrl: apiUrl, - } -} - func (d *Datadog) Connect() error { if d.Apikey == "" { return fmt.Errorf("apikey is a required field for datadog output") @@ -139,7 +136,7 @@ func (d *Datadog) authenticatedUrl() string { q := url.Values{ "api_key": []string{d.Apikey}, } - return fmt.Sprintf("%s?%s", d.apiUrl, q.Encode()) + return fmt.Sprintf("%s?%s", d.URL, q.Encode()) } func buildMetrics(m telegraf.Metric) (map[string]Point, error) { @@ -201,6 +198,8 @@ func (d *Datadog) Close() error { func init() { outputs.Add("datadog", func() telegraf.Output { - return NewDatadog(datadog_api) + return &Datadog{ + URL: datadog_api, + } }) } diff --git a/plugins/outputs/datadog/datadog_test.go b/plugins/outputs/datadog/datadog_test.go index f21ecc588..7c5e3cc1f 100644 --- a/plugins/outputs/datadog/datadog_test.go +++ b/plugins/outputs/datadog/datadog_test.go @@ -21,6 +21,12 @@ var ( fakeApiKey = "123456" ) +func NewDatadog(url string) *Datadog { + return &Datadog{ + URL: url, + } +} + func fakeDatadog() *Datadog { d := NewDatadog(fakeUrl) d.Apikey = fakeApiKey From fafe9d30bfa062084276ac8255d4fe7320bab5d0 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 5 Oct 2018 14:44:20 -0700 Subject: [PATCH 0253/1815] Update changelog --- CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4edaeccba..35974219c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,10 @@ - [#4753](https://github.com/influxdata/telegraf/pull/4753): Add ability to define a custom service name when installing as a Windows service. - [#4703](https://github.com/influxdata/telegraf/pull/4703): Add support for IPv6 in the ping plugin. - [#4781](https://github.com/influxdata/telegraf/pull/4781): Add new config for csv column explicit type conversion. +- [#4800](https://github.com/influxdata/telegraf/pull/4800): Add an option to specify a custom datadog URL. +- [#4803](https://github.com/influxdata/telegraf/pull/4803): Use non-allocating field and tag accessors in datadog output. +- [#4752](https://github.com/influxdata/telegraf/pull/4752): Add per-directory file counts in the filecount input. +- [#4811](https://github.com/influxdata/telegraf/pull/4811): Add windows service name lookup to procstat input. ## v1.8.1 [2018-10-03] From f3da717a8805a1da03435caea4511442464db7bf Mon Sep 17 00:00:00 2001 From: Mihai Todor Date: Fri, 5 Oct 2018 23:06:41 +0100 Subject: [PATCH 0254/1815] Add entity-body compression to http output (#4807) --- internal/internal.go | 22 ++++++++++ internal/internal_test.go | 20 ++++++++++ plugins/outputs/http/README.md | 4 ++ plugins/outputs/http/http.go | 41 ++++++++++++++----- plugins/outputs/http/http_test.go | 62 +++++++++++++++++++++++++++++ plugins/outputs/influxdb/http.go | 18 +-------- plugins/outputs/influxdb_v2/http.go | 18 +-------- 7 files changed, 142 insertions(+), 43 deletions(-) diff --git a/internal/internal.go b/internal/internal.go index f7d75dfb3..6d087cceb 100644 --- a/internal/internal.go +++ b/internal/internal.go @@ -3,8 +3,10 @@ package internal import ( "bufio" "bytes" + "compress/gzip" "crypto/rand" "errors" + "io" "log" "math/big" "os" @@ -208,3 +210,23 @@ func ExitStatus(err error) (int, bool) { } return 0, false } + +// CompressWithGzip takes an io.Reader as input and pipes +// it through a gzip.Writer returning an io.Reader containing +// the gzipped data. +// An error is returned if passing data to the gzip.Writer fails +func CompressWithGzip(data io.Reader) (io.Reader, error) { + pipeReader, pipeWriter := io.Pipe() + gzipWriter := gzip.NewWriter(pipeWriter) + + var err error + go func() { + _, err = io.Copy(gzipWriter, data) + gzipWriter.Close() + // subsequent reads from the read half of the pipe will + // return no bytes and the error err, or EOF if err is nil. + pipeWriter.CloseWithError(err) + }() + + return pipeReader, err +} diff --git a/internal/internal_test.go b/internal/internal_test.go index ee1d24418..3b4ec5dda 100644 --- a/internal/internal_test.go +++ b/internal/internal_test.go @@ -1,6 +1,9 @@ package internal import ( + "bytes" + "compress/gzip" + "io/ioutil" "os/exec" "testing" "time" @@ -162,3 +165,20 @@ func TestDuration(t *testing.T) { d.UnmarshalTOML([]byte(`1.5`)) assert.Equal(t, time.Second, d.Duration) } + +func TestCompressWithGzip(t *testing.T) { + testData := "the quick brown fox jumps over the lazy dog" + inputBuffer := bytes.NewBuffer([]byte(testData)) + + outputBuffer, err := CompressWithGzip(inputBuffer) + assert.NoError(t, err) + + gzipReader, err := gzip.NewReader(outputBuffer) + assert.NoError(t, err) + defer gzipReader.Close() + + output, err := ioutil.ReadAll(gzipReader) + assert.NoError(t, err) + + assert.Equal(t, testData, string(output)) +} diff --git a/plugins/outputs/http/README.md b/plugins/outputs/http/README.md index 0c11896f9..5697b6030 100644 --- a/plugins/outputs/http/README.md +++ b/plugins/outputs/http/README.md @@ -44,4 +44,8 @@ data formats. For data_formats that support batching, metrics are sent in batch # [outputs.http.headers] # # Should be set manually to "application/json" for json data_format # Content-Type = "text/plain; charset=utf-8" + + ## HTTP Content-Encoding for write request body, can be set to "gzip" to + ## compress body or "identity" to apply no encoding. + # content_encoding = "identity" ``` diff --git a/plugins/outputs/http/http.go b/plugins/outputs/http/http.go index ccb8f8949..8393d0499 100644 --- a/plugins/outputs/http/http.go +++ b/plugins/outputs/http/http.go @@ -4,6 +4,7 @@ import ( "bytes" "context" "fmt" + "io" "io/ioutil" "net/http" "strings" @@ -55,6 +56,10 @@ var sampleConfig = ` # [outputs.http.headers] # # Should be set manually to "application/json" for json data_format # Content-Type = "text/plain; charset=utf-8" + + ## HTTP Content-Encoding for write request body, can be set to "gzip" to + ## compress body or "identity" to apply no encoding. + # content_encoding = "identity" ` const ( @@ -64,16 +69,17 @@ const ( ) type HTTP struct { - URL string `toml:"url"` - Timeout internal.Duration `toml:"timeout"` - Method string `toml:"method"` - Username string `toml:"username"` - Password string `toml:"password"` - Headers map[string]string `toml:"headers"` - ClientID string `toml:"client_id"` - ClientSecret string `toml:"client_secret"` - TokenURL string `toml:"token_url"` - Scopes []string `toml:"scopes"` + URL string `toml:"url"` + Timeout internal.Duration `toml:"timeout"` + Method string `toml:"method"` + Username string `toml:"username"` + Password string `toml:"password"` + Headers map[string]string `toml:"headers"` + ClientID string `toml:"client_id"` + ClientSecret string `toml:"client_secret"` + TokenURL string `toml:"token_url"` + Scopes []string `toml:"scopes"` + ContentEncoding string `toml:"content_encoding"` tls.ClientConfig client *http.Client @@ -162,7 +168,17 @@ func (h *HTTP) Write(metrics []telegraf.Metric) error { } func (h *HTTP) write(reqBody []byte) error { - req, err := http.NewRequest(h.Method, h.URL, bytes.NewBuffer(reqBody)) + var reqBodyBuffer io.Reader = bytes.NewBuffer(reqBody) + + var err error + if h.ContentEncoding == "gzip" { + reqBodyBuffer, err = internal.CompressWithGzip(reqBodyBuffer) + if err != nil { + return err + } + } + + req, err := http.NewRequest(h.Method, h.URL, reqBodyBuffer) if err != nil { return err } @@ -172,6 +188,9 @@ func (h *HTTP) write(reqBody []byte) error { } req.Header.Set("Content-Type", defaultContentType) + if h.ContentEncoding == "gzip" { + req.Header.Set("Content-Encoding", "gzip") + } for k, v := range h.Headers { req.Header.Set(k, v) } diff --git a/plugins/outputs/http/http_test.go b/plugins/outputs/http/http_test.go index 0b6c78455..5b314cceb 100644 --- a/plugins/outputs/http/http_test.go +++ b/plugins/outputs/http/http_test.go @@ -1,7 +1,9 @@ package http import ( + "compress/gzip" "fmt" + "io/ioutil" "net/http" "net/http/httptest" "net/url" @@ -227,6 +229,66 @@ func TestContentType(t *testing.T) { } } +func TestContentEncodingGzip(t *testing.T) { + ts := httptest.NewServer(http.NotFoundHandler()) + defer ts.Close() + + u, err := url.Parse(fmt.Sprintf("http://%s", ts.Listener.Addr().String())) + require.NoError(t, err) + + tests := []struct { + name string + plugin *HTTP + payload string + expected string + }{ + { + name: "default is no content encoding", + plugin: &HTTP{ + URL: u.String(), + }, + expected: "", + }, + { + name: "overwrite content_encoding", + plugin: &HTTP{ + URL: u.String(), + ContentEncoding: "gzip", + }, + expected: "gzip", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + require.Equal(t, tt.expected, r.Header.Get("Content-Encoding")) + + body := r.Body + var err error + if r.Header.Get("Content-Encoding") == "gzip" { + body, err = gzip.NewReader(r.Body) + require.NoError(t, err) + } + + payload, err := ioutil.ReadAll(body) + require.NoError(t, err) + require.Contains(t, string(payload), "cpu value=42") + + w.WriteHeader(http.StatusNoContent) + }) + + serializer := influx.NewSerializer() + tt.plugin.SetSerializer(serializer) + err = tt.plugin.Connect() + require.NoError(t, err) + + err = tt.plugin.Write([]telegraf.Metric{getMetric()}) + require.NoError(t, err) + }) + } +} + func TestBasicAuth(t *testing.T) { ts := httptest.NewServer(http.NotFoundHandler()) defer ts.Close() diff --git a/plugins/outputs/influxdb/http.go b/plugins/outputs/influxdb/http.go index 164261feb..f32ad79a4 100644 --- a/plugins/outputs/influxdb/http.go +++ b/plugins/outputs/influxdb/http.go @@ -1,7 +1,6 @@ package influxdb import ( - "compress/gzip" "context" "crypto/tls" "encoding/json" @@ -16,6 +15,7 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/serializers/influx" ) @@ -360,7 +360,7 @@ func (c *httpClient) makeQueryRequest(query string) (*http.Request, error) { func (c *httpClient) makeWriteRequest(body io.Reader) (*http.Request, error) { var err error if c.ContentEncoding == "gzip" { - body, err = compressWithGzip(body) + body, err = internal.CompressWithGzip(body) if err != nil { return nil, err } @@ -381,20 +381,6 @@ func (c *httpClient) makeWriteRequest(body io.Reader) (*http.Request, error) { return req, nil } -func compressWithGzip(data io.Reader) (io.Reader, error) { - pr, pw := io.Pipe() - gw := gzip.NewWriter(pw) - var err error - - go func() { - _, err = io.Copy(gw, data) - gw.Close() - pw.Close() - }() - - return pr, err -} - func (c *httpClient) addHeaders(req *http.Request) { if c.Username != "" || c.Password != "" { req.SetBasicAuth(c.Username, c.Password) diff --git a/plugins/outputs/influxdb_v2/http.go b/plugins/outputs/influxdb_v2/http.go index 1e7061a27..12826ff92 100644 --- a/plugins/outputs/influxdb_v2/http.go +++ b/plugins/outputs/influxdb_v2/http.go @@ -1,7 +1,6 @@ package influxdb_v2 import ( - "compress/gzip" "context" "crypto/tls" "encoding/json" @@ -17,6 +16,7 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/serializers/influx" ) @@ -231,7 +231,7 @@ func (c *httpClient) Write(ctx context.Context, metrics []telegraf.Metric) error func (c *httpClient) makeWriteRequest(body io.Reader) (*http.Request, error) { var err error if c.ContentEncoding == "gzip" { - body, err = compressWithGzip(body) + body, err = internal.CompressWithGzip(body) if err != nil { return nil, err } @@ -258,20 +258,6 @@ func (c *httpClient) addHeaders(req *http.Request) { } } -func compressWithGzip(data io.Reader) (io.Reader, error) { - pipeReader, pipeWriter := io.Pipe() - gzipWriter := gzip.NewWriter(pipeWriter) - var err error - - go func() { - _, err = io.Copy(gzipWriter, data) - gzipWriter.Close() - pipeWriter.Close() - }() - - return pipeReader, err -} - func makeWriteURL(loc url.URL, org, bucket string) (string, error) { params := url.Values{} params.Set("bucket", bucket) From 25d40c2849d81687e4b7ae5a8405bbcf667e4f58 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 5 Oct 2018 15:08:01 -0700 Subject: [PATCH 0255/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 35974219c..f147e2578 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,7 @@ - [#4803](https://github.com/influxdata/telegraf/pull/4803): Use non-allocating field and tag accessors in datadog output. - [#4752](https://github.com/influxdata/telegraf/pull/4752): Add per-directory file counts in the filecount input. - [#4811](https://github.com/influxdata/telegraf/pull/4811): Add windows service name lookup to procstat input. +- [#4807](https://github.com/influxdata/telegraf/pull/4807): Add entity-body compression to http output. ## v1.8.1 [2018-10-03] From 3579d1d1d4efb39929dcaac71638911d21bf8d94 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Nicol=C3=A1s=20Alvarez?= Date: Fri, 5 Oct 2018 23:03:15 -0300 Subject: [PATCH 0256/1815] Fix formatting in net plugin docs (#4818) --- plugins/inputs/net/NET_README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/inputs/net/NET_README.md b/plugins/inputs/net/NET_README.md index f265e2448..d9e747119 100644 --- a/plugins/inputs/net/NET_README.md +++ b/plugins/inputs/net/NET_README.md @@ -51,7 +51,7 @@ Under Linux the system wide protocol metrics have the interface=all tag. ### Sample Queries: -You can use the following query to get the upload/download traffic rate per second for all interfaces in the last hour. The query uses the (derivative function)[https://docs.influxdata.com/influxdb/v1.2/query_language/functions#derivative] which calculates the rate of change between subsequent field values. +You can use the following query to get the upload/download traffic rate per second for all interfaces in the last hour. The query uses the [derivative function](https://docs.influxdata.com/influxdb/v1.2/query_language/functions#derivative) which calculates the rate of change between subsequent field values. ``` SELECT derivative(first(bytes_recv), 1s) as "download bytes/sec", derivative(first(bytes_sent), 1s) as "upload bytes/sec" FROM net WHERE time > now() - 1h AND interface != 'all' GROUP BY time(10s), interface fill(0); From 0d2dcc2dc3e74174be6b8e5e06e7488159b7db70 Mon Sep 17 00:00:00 2001 From: Alex Date: Mon, 8 Oct 2018 20:55:33 +0100 Subject: [PATCH 0257/1815] Add Windows nvidia-smi bin_path to readme (#4819) --- plugins/inputs/nvidia_smi/README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/plugins/inputs/nvidia_smi/README.md b/plugins/inputs/nvidia_smi/README.md index 97fd9ff04..2a5c8a626 100644 --- a/plugins/inputs/nvidia_smi/README.md +++ b/plugins/inputs/nvidia_smi/README.md @@ -1,6 +1,7 @@ # `nvidia-smi` Input Plugin This plugin uses a query on the [`nvidia-smi`](https://developer.nvidia.com/nvidia-system-management-interface) binary to pull GPU stats including memory and GPU usage, temp and other. +On windows, `nvidia-smi` is generally located at "C:\Program Files\NVIDIA Corporation\NVSMI\nvidia-smi.exe" ### Configuration From 709eadffc4040d51d5d2ba5d1e7bb13185294b17 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 8 Oct 2018 12:57:30 -0700 Subject: [PATCH 0258/1815] Move nvidia-smi Windows docs into config section --- plugins/inputs/nvidia_smi/README.md | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/plugins/inputs/nvidia_smi/README.md b/plugins/inputs/nvidia_smi/README.md index 2a5c8a626..b59f2ee6a 100644 --- a/plugins/inputs/nvidia_smi/README.md +++ b/plugins/inputs/nvidia_smi/README.md @@ -1,7 +1,6 @@ # `nvidia-smi` Input Plugin This plugin uses a query on the [`nvidia-smi`](https://developer.nvidia.com/nvidia-system-management-interface) binary to pull GPU stats including memory and GPU usage, temp and other. -On windows, `nvidia-smi` is generally located at "C:\Program Files\NVIDIA Corporation\NVSMI\nvidia-smi.exe" ### Configuration @@ -15,6 +14,10 @@ On windows, `nvidia-smi` is generally located at "C:\Program Files\NVIDIA Corpor # timeout = "5s" ``` +#### Windows + +On Windows, `nvidia-smi` is generally located at `C:\Program Files\NVIDIA Corporation\NVSMI\nvidia-smi.exe` + ### Metrics - measurement: `nvidia_smi` - tags From 7bb219222a6fdc26954f5a1f05991730f377a718 Mon Sep 17 00:00:00 2001 From: Kevin Conaway Date: Tue, 9 Oct 2018 16:45:07 -0400 Subject: [PATCH 0259/1815] Add internal function for telegraf version (#4828) --- cmd/telegraf/telegraf.go | 10 ++++++++++ internal/internal.go | 19 +++++++++++++++++++ internal/internal_test.go | 12 ++++++++++++ 3 files changed, 41 insertions(+) diff --git a/cmd/telegraf/telegraf.go b/cmd/telegraf/telegraf.go index 93336ffb4..02f22353b 100644 --- a/cmd/telegraf/telegraf.go +++ b/cmd/telegraf/telegraf.go @@ -326,6 +326,16 @@ func main() { return } + shortVersion := version + if shortVersion == "" { + shortVersion = "unknown" + } + + // Configure version + if err := internal.SetVersion(shortVersion); err != nil { + log.Println("Telegraf version already configured to: " + internal.Version()) + } + if runtime.GOOS == "windows" && !(*fRunAsConsole) { svcConfig := &service.Config{ Name: *fServiceName, diff --git a/internal/internal.go b/internal/internal.go index 6d087cceb..f6b85de84 100644 --- a/internal/internal.go +++ b/internal/internal.go @@ -24,13 +24,32 @@ var ( TimeoutErr = errors.New("Command timed out.") NotImplementedError = errors.New("not implemented yet") + + VersionAlreadySetError = errors.New("version has already been set") ) +// Set via the main module +var version string + // Duration just wraps time.Duration type Duration struct { Duration time.Duration } +// SetVersion sets the telegraf agent version +func SetVersion(v string) error { + if version != "" { + return VersionAlreadySetError + } + version = v + return nil +} + +// Version returns the telegraf agent version +func Version() string { + return version +} + // UnmarshalTOML parses the duration from the TOML config file func (d *Duration) UnmarshalTOML(b []byte) error { var err error diff --git a/internal/internal_test.go b/internal/internal_test.go index 3b4ec5dda..486c3d744 100644 --- a/internal/internal_test.go +++ b/internal/internal_test.go @@ -182,3 +182,15 @@ func TestCompressWithGzip(t *testing.T) { assert.Equal(t, testData, string(output)) } + +func TestVersionAlreadySet(t *testing.T) { + err := SetVersion("foo") + assert.Nil(t, err) + + err = SetVersion("bar") + + assert.NotNil(t, err) + assert.IsType(t, VersionAlreadySetError, err) + + assert.Equal(t, "foo", Version()) +} From b07568640046493443c32f7411217cb100082734 Mon Sep 17 00:00:00 2001 From: Greg Date: Wed, 10 Oct 2018 16:53:01 -0600 Subject: [PATCH 0260/1815] Update write path to match updated InfluxDB v2 API (#4844) --- plugins/outputs/influxdb_v2/http.go | 4 ++-- plugins/outputs/influxdb_v2/http_internal_test.go | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/plugins/outputs/influxdb_v2/http.go b/plugins/outputs/influxdb_v2/http.go index 12826ff92..4f1cd32dd 100644 --- a/plugins/outputs/influxdb_v2/http.go +++ b/plugins/outputs/influxdb_v2/http.go @@ -267,9 +267,9 @@ func makeWriteURL(loc url.URL, org, bucket string) (string, error) { case "unix": loc.Scheme = "http" loc.Host = "127.0.0.1" - loc.Path = "v2/write" + loc.Path = "/api/v2/write" case "http", "https": - loc.Path = path.Join(loc.Path, "v2/write") + loc.Path = path.Join(loc.Path, "/api/v2/write") default: return "", fmt.Errorf("unsupported scheme: %q", loc.Scheme) } diff --git a/plugins/outputs/influxdb_v2/http_internal_test.go b/plugins/outputs/influxdb_v2/http_internal_test.go index 5df51fc85..748519a7b 100644 --- a/plugins/outputs/influxdb_v2/http_internal_test.go +++ b/plugins/outputs/influxdb_v2/http_internal_test.go @@ -21,11 +21,11 @@ func TestMakeWriteURL(t *testing.T) { }{ { url: genURL("http://localhost:9999"), - act: "http://localhost:9999/v2/write?bucket=telegraf&org=influx", + act: "http://localhost:9999/api/v2/write?bucket=telegraf&org=influx", }, { url: genURL("unix://var/run/influxd.sock"), - act: "http://127.0.0.1/v2/write?bucket=telegraf&org=influx", + act: "http://127.0.0.1/api/v2/write?bucket=telegraf&org=influx", }, { err: true, From a777fdda1eb4221cb813c51546f1d23314358b85 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 10 Oct 2018 15:54:37 -0700 Subject: [PATCH 0261/1815] Update changelog --- CHANGELOG.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index f147e2578..f7949d5c9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,6 +13,12 @@ - [#4811](https://github.com/influxdata/telegraf/pull/4811): Add windows service name lookup to procstat input. - [#4807](https://github.com/influxdata/telegraf/pull/4807): Add entity-body compression to http output. +## v1.8.2 [unreleased] + +### Bugfixes + +- [#4844](https://github.com/influxdata/telegraf/pull/4844): Update write path to match updated InfluxDB v2 API. + ## v1.8.1 [2018-10-03] ### Bugfixes From a6b5f2c0931c5abd6c0a69f7702c1aaa0821a9ce Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 10 Oct 2018 18:06:24 -0700 Subject: [PATCH 0262/1815] Document that taginclude/tagexclude can remove ANY tag (#4847) --- docs/CONFIGURATION.md | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/docs/CONFIGURATION.md b/docs/CONFIGURATION.md index 0698721e4..27002be0d 100644 --- a/docs/CONFIGURATION.md +++ b/docs/CONFIGURATION.md @@ -192,11 +192,13 @@ they have passed the `fieldpass` test. An array of glob pattern strings. Only tags with a tag key matching one of the patterns are emitted. In contrast to `tagpass`, which will pass an entire metric based on its tag, `taginclude` removes all non matching tags from the -metric. +metric. Any tag can be filtered including global tags and the agent `host` +tag. - **tagexclude**: The inverse of `taginclude`. Tags with a tag key matching one of the patterns -will be discarded from the metric. +will be discarded from the metric. Any tag can be filtered including global +tags and the agent `host` tag. ### Input Configuration Examples From 7344693ca81add5098973753f701585c00a447f9 Mon Sep 17 00:00:00 2001 From: kostya-sh Date: Thu, 11 Oct 2018 02:29:33 +0100 Subject: [PATCH 0263/1815] Fix hang in dns_query plugin (#4841) --- plugins/inputs/dns_query/dns_query.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/plugins/inputs/dns_query/dns_query.go b/plugins/inputs/dns_query/dns_query.go index ec524d95b..3f0b1ab2f 100644 --- a/plugins/inputs/dns_query/dns_query.go +++ b/plugins/inputs/dns_query/dns_query.go @@ -97,6 +97,8 @@ func (d *DnsQuery) Gather(acc telegraf.Accumulator) error { } acc.AddFields("dns_query", fields, tags) + + wg.Done() }(domain, server) } } From a0eee37ed28a8c289ea80c895adccc17073d03ce Mon Sep 17 00:00:00 2001 From: kelwang <8237958+kelwang@users.noreply.github.com> Date: Wed, 10 Oct 2018 21:34:32 -0400 Subject: [PATCH 0264/1815] Fix grammar in influxdb_v2 sample config (#4815) --- plugins/outputs/influxdb_v2/influxdb.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/outputs/influxdb_v2/influxdb.go b/plugins/outputs/influxdb_v2/influxdb.go index 886907c03..a3722a046 100644 --- a/plugins/outputs/influxdb_v2/influxdb.go +++ b/plugins/outputs/influxdb_v2/influxdb.go @@ -35,7 +35,7 @@ var sampleConfig = ` ## Organization is the name of the organization you wish to write to; must exist. organization = "" - ## Bucket to the name fo the bucketwrite into; must exist. + ## Destination bucket to write into. bucket = "" ## Timeout for HTTP messages. From eece559fe7eec05a942faa8177ec263e95aef5f5 Mon Sep 17 00:00:00 2001 From: "Jonathan A. Sternberg" Date: Wed, 10 Oct 2018 20:38:43 -0500 Subject: [PATCH 0265/1815] Add a Dockerfile that matches influxdata-docker to build images from source (#4793) The images in influxdata-docker are meant to be built by downloading official releases. Sometimes, it is useful to build directly from source when you need an unofficial release. These images are meant to be used then using multi-stage builds so that it can build from source and then copy the results to images that match the official counterpart. --- Dockerfile | 18 ++++++++++++++++++ Dockerfile.alpine | 21 +++++++++++++++++++++ docker/entrypoint.sh | 8 ++++++++ 3 files changed, 47 insertions(+) create mode 100644 Dockerfile create mode 100644 Dockerfile.alpine create mode 100755 docker/entrypoint.sh diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 000000000..7c97201fc --- /dev/null +++ b/Dockerfile @@ -0,0 +1,18 @@ +FROM golang:1.11.0 as builder +ENV DEP_VERSION 0.5.0 +RUN curl -fsSL -o /usr/local/bin/dep https://github.com/golang/dep/releases/download/v${DEP_VERSION}/dep-linux-amd64 && chmod +x /usr/local/bin/dep +WORKDIR /go/src/github.com/influxdata/telegraf +COPY Gopkg.toml Gopkg.lock ./ +RUN dep ensure -vendor-only +COPY . /go/src/github.com/influxdata/telegraf +RUN go install ./cmd/... + +FROM buildpack-deps:stretch-curl +COPY --from=builder /go/bin/* /usr/bin/ +COPY etc/telegraf.conf /etc/telegraf/telegraf.conf + +EXPOSE 8125/udp 8092/udp 8094 + +COPY docker/entrypoint.sh /entrypoint.sh +ENTRYPOINT ["/entrypoint.sh"] +CMD ["telegraf"] diff --git a/Dockerfile.alpine b/Dockerfile.alpine new file mode 100644 index 000000000..d5ad52523 --- /dev/null +++ b/Dockerfile.alpine @@ -0,0 +1,21 @@ +FROM golang:1.11.0 as builder +ENV DEP_VERSION 0.5.0 +RUN curl -fsSL -o /usr/local/bin/dep https://github.com/golang/dep/releases/download/v${DEP_VERSION}/dep-linux-amd64 && chmod +x /usr/local/bin/dep +WORKDIR /go/src/github.com/influxdata/telegraf +COPY Gopkg.toml Gopkg.lock ./ +RUN dep ensure -vendor-only +COPY . /go/src/github.com/influxdata/telegraf +RUN CGO_ENABLED=0 go install ./cmd/... + +FROM alpine:3.6 +RUN echo 'hosts: files dns' >> /etc/nsswitch.conf +RUN apk add --no-cache iputils ca-certificates net-snmp-tools procps lm_sensors && \ + update-ca-certificates +COPY --from=builder /go/bin/* /usr/bin/ +COPY etc/telegraf.conf /etc/telegraf/telegraf.conf + +EXPOSE 8125/udp 8092/udp 8094 + +COPY docker/entrypoint.sh /entrypoint.sh +ENTRYPOINT ["/entrypoint.sh"] +CMD ["telegraf"] diff --git a/docker/entrypoint.sh b/docker/entrypoint.sh new file mode 100755 index 000000000..6e7580b21 --- /dev/null +++ b/docker/entrypoint.sh @@ -0,0 +1,8 @@ +#!/bin/bash +set -e + +if [ "${1:0:1}" = '-' ]; then + set -- telegraf "$@" +fi + +exec "$@" From 8a03a21de21a4ac2a6215f2e8451dbe139f4e1c6 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 11 Oct 2018 00:05:51 -0700 Subject: [PATCH 0266/1815] Place docker files in scripts and update makefile --- Makefile | 4 +--- Dockerfile.alpine => scripts/alpine.docker | 4 ++-- scripts/dev.docker | 8 -------- docker/entrypoint.sh => scripts/docker-entrypoint.sh | 0 Dockerfile => scripts/stretch.docker | 4 ++-- 5 files changed, 5 insertions(+), 15 deletions(-) rename Dockerfile.alpine => scripts/alpine.docker (89%) delete mode 100644 scripts/dev.docker rename docker/entrypoint.sh => scripts/docker-entrypoint.sh (100%) rename Dockerfile => scripts/stretch.docker (88%) diff --git a/Makefile b/Makefile index 4521c5d7f..e5b0ce8de 100644 --- a/Makefile +++ b/Makefile @@ -113,9 +113,7 @@ clean: .PHONY: docker-image docker-image: - ./scripts/build.py --package --platform=linux --arch=amd64 - cp build/telegraf*$(COMMIT)*.deb . - docker build -f scripts/dev.docker --build-arg "package=telegraf*$(COMMIT)*.deb" -t "telegraf-dev:$(COMMIT)" . + docker build -f scripts/stretch.docker -t "telegraf:$(COMMIT)" . plugins/parsers/influx/machine.go: plugins/parsers/influx/machine.go.rl ragel -Z -G2 $^ -o $@ diff --git a/Dockerfile.alpine b/scripts/alpine.docker similarity index 89% rename from Dockerfile.alpine rename to scripts/alpine.docker index d5ad52523..0103a16d4 100644 --- a/Dockerfile.alpine +++ b/scripts/alpine.docker @@ -5,7 +5,7 @@ WORKDIR /go/src/github.com/influxdata/telegraf COPY Gopkg.toml Gopkg.lock ./ RUN dep ensure -vendor-only COPY . /go/src/github.com/influxdata/telegraf -RUN CGO_ENABLED=0 go install ./cmd/... +RUN CGO_ENABLED=0 make go-install FROM alpine:3.6 RUN echo 'hosts: files dns' >> /etc/nsswitch.conf @@ -16,6 +16,6 @@ COPY etc/telegraf.conf /etc/telegraf/telegraf.conf EXPOSE 8125/udp 8092/udp 8094 -COPY docker/entrypoint.sh /entrypoint.sh +COPY scripts/docker-entrypoint.sh /entrypoint.sh ENTRYPOINT ["/entrypoint.sh"] CMD ["telegraf"] diff --git a/scripts/dev.docker b/scripts/dev.docker deleted file mode 100644 index 902c7f9aa..000000000 --- a/scripts/dev.docker +++ /dev/null @@ -1,8 +0,0 @@ -FROM debian:stretch -ARG package -ADD ${package} ${package} -RUN dpkg -i ${package} - -EXPOSE 8125/udp 8092/udp 8094 - -CMD ["telegraf"] diff --git a/docker/entrypoint.sh b/scripts/docker-entrypoint.sh similarity index 100% rename from docker/entrypoint.sh rename to scripts/docker-entrypoint.sh diff --git a/Dockerfile b/scripts/stretch.docker similarity index 88% rename from Dockerfile rename to scripts/stretch.docker index 7c97201fc..906e0c504 100644 --- a/Dockerfile +++ b/scripts/stretch.docker @@ -5,7 +5,7 @@ WORKDIR /go/src/github.com/influxdata/telegraf COPY Gopkg.toml Gopkg.lock ./ RUN dep ensure -vendor-only COPY . /go/src/github.com/influxdata/telegraf -RUN go install ./cmd/... +RUN make go-install FROM buildpack-deps:stretch-curl COPY --from=builder /go/bin/* /usr/bin/ @@ -13,6 +13,6 @@ COPY etc/telegraf.conf /etc/telegraf/telegraf.conf EXPOSE 8125/udp 8092/udp 8094 -COPY docker/entrypoint.sh /entrypoint.sh +COPY scripts/docker-entrypoint.sh /entrypoint.sh ENTRYPOINT ["/entrypoint.sh"] CMD ["telegraf"] From 502d9ab4997190ce770107c9b945776cf75f345c Mon Sep 17 00:00:00 2001 From: kostya-sh Date: Thu, 11 Oct 2018 20:15:17 +0100 Subject: [PATCH 0267/1815] Fix TestGatheringTimeout test for dns_query plugin (#4842) --- plugins/inputs/dns_query/dns_query_test.go | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/plugins/inputs/dns_query/dns_query_test.go b/plugins/inputs/dns_query/dns_query_test.go index 3f70153e4..5a1379764 100644 --- a/plugins/inputs/dns_query/dns_query_test.go +++ b/plugins/inputs/dns_query/dns_query_test.go @@ -117,21 +117,17 @@ func TestGatheringTimeout(t *testing.T) { var acc testutil.Accumulator dnsConfig.Port = 60054 dnsConfig.Timeout = 1 - var err error channel := make(chan error, 1) go func() { channel <- acc.GatherError(dnsConfig.Gather) }() select { - case res := <-channel: - err = res + case err := <-channel: + assert.NoError(t, err) case <-time.After(time.Second * 2): - err = nil + assert.Fail(t, "DNS query did not timeout") } - - assert.Error(t, err) - assert.Contains(t, err.Error(), "i/o timeout") } func TestSettingDefaultValues(t *testing.T) { From 44fd74d688fb64f01a198b8dd9e659415a75b008 Mon Sep 17 00:00:00 2001 From: Kevin Conaway Date: Thu, 11 Oct 2018 15:25:21 -0400 Subject: [PATCH 0268/1815] Add telegraf version to User-Agent header (#4838) Header is added in influxdb, influxdb_v2, and http outputs. --- plugins/outputs/http/http.go | 1 + plugins/outputs/http/http_test.go | 31 +++++++++++++++++++++++++++ plugins/outputs/influxdb/http.go | 8 +++---- plugins/outputs/influxdb/http_test.go | 14 ++++++++++++ plugins/outputs/influxdb_v2/http.go | 3 +-- 5 files changed, 50 insertions(+), 7 deletions(-) diff --git a/plugins/outputs/http/http.go b/plugins/outputs/http/http.go index 8393d0499..abcea74b5 100644 --- a/plugins/outputs/http/http.go +++ b/plugins/outputs/http/http.go @@ -187,6 +187,7 @@ func (h *HTTP) write(reqBody []byte) error { req.SetBasicAuth(h.Username, h.Password) } + req.Header.Set("User-Agent", "Telegraf/"+internal.Version()) req.Header.Set("Content-Type", defaultContentType) if h.ContentEncoding == "gzip" { req.Header.Set("Content-Encoding", "gzip") diff --git a/plugins/outputs/http/http_test.go b/plugins/outputs/http/http_test.go index 5b314cceb..0decdf024 100644 --- a/plugins/outputs/http/http_test.go +++ b/plugins/outputs/http/http_test.go @@ -11,6 +11,7 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/plugins/serializers/influx" "github.com/stretchr/testify/require" @@ -422,3 +423,33 @@ func TestOAuthClientCredentialsGrant(t *testing.T) { }) } } + +func TestDefaultUserAgent(t *testing.T) { + ts := httptest.NewServer(http.NotFoundHandler()) + defer ts.Close() + + u, err := url.Parse(fmt.Sprintf("http://%s", ts.Listener.Addr().String())) + require.NoError(t, err) + + internal.SetVersion("1.2.3") + + t.Run("default-user-agent", func(t *testing.T) { + ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + require.Equal(t, "Telegraf/1.2.3", r.Header.Get("User-Agent")) + w.WriteHeader(http.StatusOK) + }) + + client := &HTTP{ + URL: u.String(), + Method: defaultMethod, + } + + serializer := influx.NewSerializer() + client.SetSerializer(serializer) + err = client.Connect() + require.NoError(t, err) + + err = client.Write([]telegraf.Metric{getMetric()}) + require.NoError(t, err) + }) +} diff --git a/plugins/outputs/influxdb/http.go b/plugins/outputs/influxdb/http.go index f32ad79a4..236d04321 100644 --- a/plugins/outputs/influxdb/http.go +++ b/plugins/outputs/influxdb/http.go @@ -27,10 +27,8 @@ const ( ) const ( - defaultRequestTimeout = time.Second * 5 - defaultDatabase = "telegraf" - defaultUserAgent = "telegraf" - + defaultRequestTimeout = time.Second * 5 + defaultDatabase = "telegraf" errStringDatabaseNotFound = "database not found" errStringHintedHandoffNotEmpty = "hinted handoff queue not empty" errStringPartialWrite = "partial write" @@ -138,7 +136,7 @@ func NewHTTPClient(config *HTTPConfig) (*httpClient, error) { userAgent := config.UserAgent if userAgent == "" { - userAgent = defaultUserAgent + userAgent = "Telegraf/" + internal.Version() } var headers = make(map[string]string, len(config.Headers)+1) diff --git a/plugins/outputs/influxdb/http_test.go b/plugins/outputs/influxdb/http_test.go index 30cc1f8b6..fa648f0f8 100644 --- a/plugins/outputs/influxdb/http_test.go +++ b/plugins/outputs/influxdb/http_test.go @@ -18,6 +18,7 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/plugins/outputs/influxdb" "github.com/stretchr/testify/require" @@ -246,6 +247,8 @@ func TestHTTP_Write(t *testing.T) { u, err := url.Parse(fmt.Sprintf("http://%s", ts.Listener.Addr().String())) require.NoError(t, err) + internal.SetVersion("1.2.3") + tests := []struct { name string config *influxdb.HTTPConfig @@ -295,6 +298,17 @@ func TestHTTP_Write(t *testing.T) { w.WriteHeader(http.StatusNoContent) }, }, + { + name: "default user agent", + config: &influxdb.HTTPConfig{ + URL: u, + Database: "telegraf", + }, + queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { + require.Equal(t, r.Header.Get("User-Agent"), "Telegraf/1.2.3") + w.WriteHeader(http.StatusNoContent) + }, + }, { name: "default database", config: &influxdb.HTTPConfig{ diff --git a/plugins/outputs/influxdb_v2/http.go b/plugins/outputs/influxdb_v2/http.go index 4f1cd32dd..d6d9d9076 100644 --- a/plugins/outputs/influxdb_v2/http.go +++ b/plugins/outputs/influxdb_v2/http.go @@ -40,7 +40,6 @@ const ( defaultRequestTimeout = time.Second * 5 defaultMaxWait = 10 // seconds defaultDatabase = "telegraf" - defaultUserAgent = "telegraf" ) type HTTPConfig struct { @@ -82,7 +81,7 @@ func NewHTTPClient(config *HTTPConfig) (*httpClient, error) { userAgent := config.UserAgent if userAgent == "" { - userAgent = defaultUserAgent + userAgent = "Telegraf/" + internal.Version() } var headers = make(map[string]string, len(config.Headers)+2) From bde73d83285b0cddc6703bb43a6d4d2d6afa07f4 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 11 Oct 2018 12:26:13 -0700 Subject: [PATCH 0269/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index f7949d5c9..3f08fef88 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,6 +12,7 @@ - [#4752](https://github.com/influxdata/telegraf/pull/4752): Add per-directory file counts in the filecount input. - [#4811](https://github.com/influxdata/telegraf/pull/4811): Add windows service name lookup to procstat input. - [#4807](https://github.com/influxdata/telegraf/pull/4807): Add entity-body compression to http output. +- [#4838](https://github.com/influxdata/telegraf/pull/4838): Add telegraf version to User-Agent header. ## v1.8.2 [unreleased] From c117ed624d8b033e0f53fb7e9543d9b459df6ad9 Mon Sep 17 00:00:00 2001 From: Pontus Rydin Date: Thu, 11 Oct 2018 16:08:09 -0400 Subject: [PATCH 0270/1815] Fix missing timeouts in vsphere input (#4840) --- plugins/inputs/vsphere/client.go | 36 ++++++++++------ plugins/inputs/vsphere/endpoint.go | 58 ++++++++++++++++++-------- plugins/inputs/vsphere/vsphere_test.go | 22 ++++++++++ 3 files changed, 85 insertions(+), 31 deletions(-) diff --git a/plugins/inputs/vsphere/client.go b/plugins/inputs/vsphere/client.go index 9b77b750a..5278cd349 100644 --- a/plugins/inputs/vsphere/client.go +++ b/plugins/inputs/vsphere/client.go @@ -34,6 +34,7 @@ type Client struct { Root *view.ContainerView Perf *performance.Manager Valid bool + Timeout time.Duration closeGate sync.Once } @@ -53,7 +54,7 @@ func (cf *ClientFactory) GetClient(ctx context.Context) (*Client, error) { defer cf.mux.Unlock() if cf.client == nil { var err error - if cf.client, err = NewClient(cf.url, cf.parent); err != nil { + if cf.client, err = NewClient(ctx, cf.url, cf.parent); err != nil { return nil, err } } @@ -61,9 +62,13 @@ func (cf *ClientFactory) GetClient(ctx context.Context) (*Client, error) { // Execute a dummy call against the server to make sure the client is // still functional. If not, try to log back in. If that doesn't work, // we give up. - if _, err := methods.GetCurrentTime(ctx, cf.client.Client); err != nil { + ctx1, cancel1 := context.WithTimeout(ctx, cf.parent.Timeout.Duration) + defer cancel1() + if _, err := methods.GetCurrentTime(ctx1, cf.client.Client); err != nil { log.Printf("I! [input.vsphere]: Client session seems to have time out. Reauthenticating!") - if cf.client.Client.SessionManager.Login(ctx, url.UserPassword(cf.parent.Username, cf.parent.Password)) != nil { + ctx2, cancel2 := context.WithTimeout(ctx, cf.parent.Timeout.Duration) + defer cancel2() + if cf.client.Client.SessionManager.Login(ctx2, url.UserPassword(cf.parent.Username, cf.parent.Password)) != nil { return nil, err } } @@ -72,7 +77,7 @@ func (cf *ClientFactory) GetClient(ctx context.Context) (*Client, error) { } // NewClient creates a new vSphere client based on the url and setting passed as parameters. -func NewClient(u *url.URL, vs *VSphere) (*Client, error) { +func NewClient(ctx context.Context, u *url.URL, vs *VSphere) (*Client, error) { sw := NewStopwatch("connect", u.Host) tlsCfg, err := vs.ClientConfig.TLSConfig() if err != nil { @@ -85,7 +90,6 @@ func NewClient(u *url.URL, vs *VSphere) (*Client, error) { if vs.Username != "" { u.User = url.UserPassword(vs.Username, vs.Password) } - ctx := context.Background() log.Printf("D! [input.vsphere]: Creating client: %s", u.Host) soapClient := soap.NewClient(u, tlsCfg.InsecureSkipVerify) @@ -103,7 +107,9 @@ func NewClient(u *url.URL, vs *VSphere) (*Client, error) { } } - vimClient, err := vim25.NewClient(ctx, soapClient) + ctx1, cancel1 := context.WithTimeout(ctx, vs.Timeout.Duration) + defer cancel1() + vimClient, err := vim25.NewClient(ctx1, soapClient) if err != nil { return nil, err } @@ -111,7 +117,9 @@ func NewClient(u *url.URL, vs *VSphere) (*Client, error) { // If TSLKey is specified, try to log in as an extension using a cert. if vs.TLSKey != "" { - if err := sm.LoginExtensionByCertificate(ctx, vs.TLSKey); err != nil { + ctx2, cancel2 := context.WithTimeout(ctx, vs.Timeout.Duration) + defer cancel2() + if err := sm.LoginExtensionByCertificate(ctx2, vs.TLSKey); err != nil { return nil, err } } @@ -142,11 +150,12 @@ func NewClient(u *url.URL, vs *VSphere) (*Client, error) { sw.Stop() return &Client{ - Client: c, - Views: m, - Root: v, - Perf: p, - Valid: true, + Client: c, + Views: m, + Root: v, + Perf: p, + Valid: true, + Timeout: vs.Timeout.Duration, }, nil } @@ -164,7 +173,8 @@ func (c *Client) close() { // Use a Once to prevent us from panics stemming from trying // to close it multiple times. c.closeGate.Do(func() { - ctx := context.Background() + ctx, cancel := context.WithTimeout(context.Background(), c.Timeout) + defer cancel() if c.Client != nil { if err := c.Client.Logout(ctx); err != nil { log.Printf("E! [input.vsphere]: Error during logout: %s", err) diff --git a/plugins/inputs/vsphere/endpoint.go b/plugins/inputs/vsphere/endpoint.go index 8c3795869..f052fefde 100644 --- a/plugins/inputs/vsphere/endpoint.go +++ b/plugins/inputs/vsphere/endpoint.go @@ -46,7 +46,7 @@ type resourceKind struct { objects objectMap filters filter.Filter collectInstances bool - getObjects func(context.Context, *view.ContainerView) (objectMap, error) + getObjects func(context.Context, *Endpoint, *view.ContainerView) (objectMap, error) } type metricEntry struct { @@ -253,7 +253,9 @@ func (e *Endpoint) getMetricNameMap(ctx context.Context) (map[int32]string, erro return nil, err } - mn, err := client.Perf.CounterInfoByName(ctx) + ctx1, cancel1 := context.WithTimeout(ctx, e.Parent.Timeout.Duration) + defer cancel1() + mn, err := client.Perf.CounterInfoByName(ctx1) if err != nil { return nil, err @@ -272,7 +274,9 @@ func (e *Endpoint) getMetadata(ctx context.Context, in interface{}) interface{} } rq := in.(*metricQRequest) - metrics, err := client.Perf.AvailableMetric(ctx, rq.obj.ref.Reference(), rq.res.sampling) + ctx1, cancel1 := context.WithTimeout(ctx, e.Parent.Timeout.Duration) + defer cancel1() + metrics, err := client.Perf.AvailableMetric(ctx1, rq.obj.ref.Reference(), rq.res.sampling) if err != nil && err != context.Canceled { log.Printf("E! [input.vsphere]: Error while getting metric metadata. Discovery will be incomplete. Error: %s", err) } @@ -292,7 +296,9 @@ func (e *Endpoint) getDatacenterName(ctx context.Context, client *Client, cache path = append(path, here.Reference().String()) o := object.NewCommon(client.Client.Client, r) var result mo.ManagedEntity - err := o.Properties(ctx, here, []string{"parent", "name"}, &result) + ctx1, cancel1 := context.WithTimeout(ctx, e.Parent.Timeout.Duration) + defer cancel1() + err := o.Properties(ctx1, here, []string{"parent", "name"}, &result) if err != nil { log.Printf("W! [input.vsphere]: Error while resolving parent. Assuming no parent exists. Error: %s", err) break @@ -344,7 +350,7 @@ func (e *Endpoint) discover(ctx context.Context) error { log.Printf("D! [input.vsphere] Discovering resources for %s", res.name) // Need to do this for all resource types even if they are not enabled (but datastore) if res.enabled || (k != "datastore" && k != "vm") { - objects, err := res.getObjects(ctx, client.Root) + objects, err := res.getObjects(ctx, e, client.Root) if err != nil { return err } @@ -411,9 +417,11 @@ func (e *Endpoint) discover(ctx context.Context) error { return nil } -func getDatacenters(ctx context.Context, root *view.ContainerView) (objectMap, error) { +func getDatacenters(ctx context.Context, e *Endpoint, root *view.ContainerView) (objectMap, error) { var resources []mo.Datacenter - err := root.Retrieve(ctx, []string{"Datacenter"}, []string{"name", "parent"}, &resources) + ctx1, cancel1 := context.WithTimeout(ctx, e.Parent.Timeout.Duration) + defer cancel1() + err := root.Retrieve(ctx1, []string{"Datacenter"}, []string{"name", "parent"}, &resources) if err != nil { return nil, err } @@ -425,9 +433,11 @@ func getDatacenters(ctx context.Context, root *view.ContainerView) (objectMap, e return m, nil } -func getClusters(ctx context.Context, root *view.ContainerView) (objectMap, error) { +func getClusters(ctx context.Context, e *Endpoint, root *view.ContainerView) (objectMap, error) { var resources []mo.ClusterComputeResource - err := root.Retrieve(ctx, []string{"ClusterComputeResource"}, []string{"name", "parent"}, &resources) + ctx1, cancel1 := context.WithTimeout(ctx, e.Parent.Timeout.Duration) + defer cancel1() + err := root.Retrieve(ctx1, []string{"ClusterComputeResource"}, []string{"name", "parent"}, &resources) if err != nil { return nil, err } @@ -439,7 +449,9 @@ func getClusters(ctx context.Context, root *view.ContainerView) (objectMap, erro if !ok { o := object.NewFolder(root.Client(), *r.Parent) var folder mo.Folder - err := o.Properties(ctx, *r.Parent, []string{"parent"}, &folder) + ctx2, cancel2 := context.WithTimeout(ctx, e.Parent.Timeout.Duration) + defer cancel2() + err := o.Properties(ctx2, *r.Parent, []string{"parent"}, &folder) if err != nil { log.Printf("W! [input.vsphere] Error while getting folder parent: %e", err) p = nil @@ -455,7 +467,7 @@ func getClusters(ctx context.Context, root *view.ContainerView) (objectMap, erro return m, nil } -func getHosts(ctx context.Context, root *view.ContainerView) (objectMap, error) { +func getHosts(ctx context.Context, e *Endpoint, root *view.ContainerView) (objectMap, error) { var resources []mo.HostSystem err := root.Retrieve(ctx, []string{"HostSystem"}, []string{"name", "parent"}, &resources) if err != nil { @@ -469,9 +481,11 @@ func getHosts(ctx context.Context, root *view.ContainerView) (objectMap, error) return m, nil } -func getVMs(ctx context.Context, root *view.ContainerView) (objectMap, error) { +func getVMs(ctx context.Context, e *Endpoint, root *view.ContainerView) (objectMap, error) { var resources []mo.VirtualMachine - err := root.Retrieve(ctx, []string{"VirtualMachine"}, []string{"name", "runtime.host", "config.guestId", "config.uuid"}, &resources) + ctx1, cancel1 := context.WithTimeout(ctx, e.Parent.Timeout.Duration) + defer cancel1() + err := root.Retrieve(ctx1, []string{"VirtualMachine"}, []string{"name", "runtime.host", "config.guestId", "config.uuid"}, &resources) if err != nil { return nil, err } @@ -491,9 +505,11 @@ func getVMs(ctx context.Context, root *view.ContainerView) (objectMap, error) { return m, nil } -func getDatastores(ctx context.Context, root *view.ContainerView) (objectMap, error) { +func getDatastores(ctx context.Context, e *Endpoint, root *view.ContainerView) (objectMap, error) { var resources []mo.Datastore - err := root.Retrieve(ctx, []string{"Datastore"}, []string{"name", "parent"}, &resources) + ctx1, cancel1 := context.WithTimeout(ctx, e.Parent.Timeout.Duration) + defer cancel1() + err := root.Retrieve(ctx1, []string{"Datastore"}, []string{"name", "parent"}, &resources) if err != nil { return nil, err } @@ -696,17 +712,23 @@ func (e *Endpoint) collectChunk(ctx context.Context, pqs []types.PerfQuerySpec, return 0, err } - metricInfo, err := client.Perf.CounterInfoByName(ctx) + ctx1, cancel1 := context.WithTimeout(ctx, e.Parent.Timeout.Duration) + defer cancel1() + metricInfo, err := client.Perf.CounterInfoByName(ctx1) if err != nil { return count, err } - metrics, err := client.Perf.Query(ctx, pqs) + ctx2, cancel2 := context.WithTimeout(ctx, e.Parent.Timeout.Duration) + defer cancel2() + metrics, err := client.Perf.Query(ctx2, pqs) if err != nil { return count, err } - ems, err := client.Perf.ToMetricSeries(ctx, metrics) + ctx3, cancel3 := context.WithTimeout(ctx, e.Parent.Timeout.Duration) + defer cancel3() + ems, err := client.Perf.ToMetricSeries(ctx3, metrics) if err != nil { return count, err } diff --git a/plugins/inputs/vsphere/vsphere_test.go b/plugins/inputs/vsphere/vsphere_test.go index 20c61d92b..3290da2e9 100644 --- a/plugins/inputs/vsphere/vsphere_test.go +++ b/plugins/inputs/vsphere/vsphere_test.go @@ -6,6 +6,7 @@ import ( "fmt" "regexp" "sort" + "strings" "testing" "time" @@ -229,6 +230,27 @@ func TestWorkerPool(t *testing.T) { } } +func TestTimeout(t *testing.T) { + m, s, err := createSim() + if err != nil { + t.Fatal(err) + } + defer m.Remove() + defer s.Close() + + var acc testutil.Accumulator + v := defaultVSphere() + v.Vcenters = []string{s.URL.String()} + v.Timeout = internal.Duration{Duration: 1 * time.Nanosecond} + require.NoError(t, v.Start(nil)) // We're not using the Accumulator, so it can be nil. + defer v.Stop() + require.NoError(t, v.Gather(&acc)) + + // The accumulator must contain exactly one error and it must be a deadline exceeded. + require.Equal(t, 1, len(acc.Errors)) + require.True(t, strings.Contains(acc.Errors[0].Error(), "context deadline exceeded")) +} + func TestAll(t *testing.T) { m, s, err := createSim() if err != nil { From 0049e01d69c6d6287756127b412723eeaef3c380 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 11 Oct 2018 13:08:57 -0700 Subject: [PATCH 0271/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3f08fef88..b331369c8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -19,6 +19,7 @@ ### Bugfixes - [#4844](https://github.com/influxdata/telegraf/pull/4844): Update write path to match updated InfluxDB v2 API. +- [#4840](https://github.com/influxdata/telegraf/pull/4840): Fix missing timeouts in vsphere input. ## v1.8.1 [2018-10-03] From 65f7e988bb2391a03dbe4a0b7250333289050332 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Erwan=20Qu=C3=A9lin?= Date: Fri, 12 Oct 2018 00:42:43 +0200 Subject: [PATCH 0272/1815] Enhancement of README.md for the vSphere input plugin. (#4788) --- plugins/inputs/vsphere/README.md | 30 ++++++++++++++++++++++++------ 1 file changed, 24 insertions(+), 6 deletions(-) diff --git a/plugins/inputs/vsphere/README.md b/plugins/inputs/vsphere/README.md index 12332ea66..16274a65a 100644 --- a/plugins/inputs/vsphere/README.md +++ b/plugins/inputs/vsphere/README.md @@ -5,13 +5,16 @@ The VMware vSphere plugin uses the vSphere API to gather metrics from multiple v * Clusters * Hosts * VMs -* Data stores +* Datastores ## Configuration NOTE: To disable collection of a specific resource type, simply exclude all metrics using the XX_metric_exclude. For example, to disable collection of VMs, add this: -```vm_metric_exclude = [ "*" ]``` + +``` +vm_metric_exclude = [ "*" ] +``` ``` # Read metrics from one or many vCenters @@ -168,15 +171,30 @@ For example, to disable collection of VMs, add this: ### Objects and Metrics Per Query -Default settings for vCenter 6.5 and above is 256. Prior versions of vCenter have this set to 64. A vCenter administrator -can change this setting, which should be reflected in this plugin. See this [VMware KB article](https://kb.vmware.com/s/article/2107096) -for more information. +By default, in vCenter's configuration a limit is set to the number of entities that are included in a performance chart query. Default settings for vCenter 6.5 and above is 256. Prior versions of vCenter have this set to 64. +A vCenter administrator can change this setting, see this [VMware KB article](https://kb.vmware.com/s/article/2107096) for more information. + +Any modification should be reflected in this plugin by modifying the parameter `max_query_objects` + +``` + ## number of objects to retreive per query for realtime resources (vms and hosts) + ## set to 64 for vCenter 5.5 and 6.0 (default: 256) + # max_query_objects = 256 +``` ### Collection and Discovery concurrency On large vCenter setups it may be prudent to have multiple concurrent go routines collect performance metrics in order to avoid potential errors for time elapsed during a collection cycle. This should never be greater than 8, -though the default of 1 (no concurrency) should be sufficient for most configurations. +though the default of 1 (no concurrency) should be sufficient for most configurations. + +For setting up concurrency, modify `collect_concurrency` and `discover_concurrency` parameters. + +``` + ## number of go routines to use for collection and discovery of objects and metrics + # collect_concurrency = 1 + # discover_concurrency = 1 +``` ## Measurements & Fields From 0bb264536f1abf63033155408cab45cb85823aa8 Mon Sep 17 00:00:00 2001 From: Julius Marozas Date: Fri, 12 Oct 2018 23:40:17 +0300 Subject: [PATCH 0273/1815] Add http_listener_v2 input input plugin (#4755) --- plugins/inputs/all/all.go | 1 + plugins/inputs/http_listener_v2/README.md | 62 +++ .../http_listener_v2/http_listener_v2.go | 274 +++++++++++++ .../http_listener_v2/http_listener_v2_test.go | 380 ++++++++++++++++++ .../http_listener_v2/testdata/testmsgs.gz | Bin 0 -> 97 bytes 5 files changed, 717 insertions(+) create mode 100644 plugins/inputs/http_listener_v2/README.md create mode 100644 plugins/inputs/http_listener_v2/http_listener_v2.go create mode 100644 plugins/inputs/http_listener_v2/http_listener_v2_test.go create mode 100644 plugins/inputs/http_listener_v2/testdata/testmsgs.gz diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index 02008ffd5..909236c21 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -41,6 +41,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/hddtemp" _ "github.com/influxdata/telegraf/plugins/inputs/http" _ "github.com/influxdata/telegraf/plugins/inputs/http_listener" + _ "github.com/influxdata/telegraf/plugins/inputs/http_listener_v2" _ "github.com/influxdata/telegraf/plugins/inputs/http_response" _ "github.com/influxdata/telegraf/plugins/inputs/httpjson" _ "github.com/influxdata/telegraf/plugins/inputs/icinga2" diff --git a/plugins/inputs/http_listener_v2/README.md b/plugins/inputs/http_listener_v2/README.md new file mode 100644 index 000000000..74532c90f --- /dev/null +++ b/plugins/inputs/http_listener_v2/README.md @@ -0,0 +1,62 @@ +# Generic HTTP listener service input plugin + +> NOTE: This is a new version of HTTP listener plugin. +> This plugin supports all [data formats](/docs/DATA_FORMATS_INPUT.md) while the old [http_listener](/plugins/inputs/http_listener) +> only accepts data in InfluxDB line-protocol only + +The HTTP listener is a service input plugin that listens for messages sent via HTTP POST. + +Enable TLS by specifying the file names of a service TLS certificate and key. + +Enable mutually authenticated TLS and authorize client connections by signing certificate authority by including a list of allowed CA certificate file names in ````tls_allowed_cacerts````. + +Enable basic HTTP authentication of clients by specifying a username and password to check for. These credentials will be received from the client _as plain text_ if TLS is not configured. + +**Example:** +``` +curl -i -XPOST 'http://localhost:8080/write' --data-binary 'cpu_load_short,host=server01,region=us-west value=0.64 1434055562000000000' +``` + +### Configuration: + +This is a sample configuration for the plugin. + +```toml +[[inputs.http_listener_v2]] + ## Address and port to host HTTP listener on + service_address = ":8080" + + ## Path to listen to. + path = "/telegraf" + + ## HTTP methods to accept. + methods = ["POST", "PUT"] + + ## maximum duration before timing out read of the request + read_timeout = "10s" + ## maximum duration before timing out write of the response + write_timeout = "10s" + + ## Maximum allowed http request body size in bytes. + ## 0 means to use the default of 536,870,912 bytes (500 mebibytes) + max_body_size = 0 + + ## Set one or more allowed client CA certificate file names to + ## enable mutually authenticated TLS connections + tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] + + ## Add service certificate and key + tls_cert = "/etc/telegraf/cert.pem" + tls_key = "/etc/telegraf/key.pem" + + ## Optional username and password to accept for HTTP basic authentication. + ## You probably want to make sure you have TLS configured above for this. + basic_username = "foobar" + basic_password = "barfoo" + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "influx" +``` diff --git a/plugins/inputs/http_listener_v2/http_listener_v2.go b/plugins/inputs/http_listener_v2/http_listener_v2.go new file mode 100644 index 000000000..96997785e --- /dev/null +++ b/plugins/inputs/http_listener_v2/http_listener_v2.go @@ -0,0 +1,274 @@ +package http_listener_v2 + +import ( + "compress/gzip" + "crypto/subtle" + "crypto/tls" + "io/ioutil" + "log" + "net" + "net/http" + "sync" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + tlsint "github.com/influxdata/telegraf/internal/tls" + "github.com/influxdata/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/parsers" +) + +// defaultMaxBodySize is the default maximum request body size, in bytes. +// if the request body is over this size, we will return an HTTP 413 error. +// 500 MB +const defaultMaxBodySize = 500 * 1024 * 1024 + +type TimeFunc func() time.Time + +type HTTPListenerV2 struct { + ServiceAddress string + Path string + Methods []string + ReadTimeout internal.Duration + WriteTimeout internal.Duration + MaxBodySize int64 + Port int + + tlsint.ServerConfig + + BasicUsername string + BasicPassword string + + TimeFunc + + wg sync.WaitGroup + + listener net.Listener + + parsers.Parser + acc telegraf.Accumulator +} + +const sampleConfig = ` + ## Address and port to host HTTP listener on + service_address = ":8080" + + ## Path to listen to. + path = "/telegraf" + + ## HTTP methods to accept. + methods = ["POST", "PUT"] + + ## maximum duration before timing out read of the request + read_timeout = "10s" + ## maximum duration before timing out write of the response + write_timeout = "10s" + + ## Maximum allowed http request body size in bytes. + ## 0 means to use the default of 536,870,912 bytes (500 mebibytes) + max_body_size = 0 + + ## Set one or more allowed client CA certificate file names to + ## enable mutually authenticated TLS connections + tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] + + ## Add service certificate and key + tls_cert = "/etc/telegraf/cert.pem" + tls_key = "/etc/telegraf/key.pem" + + ## Optional username and password to accept for HTTP basic authentication. + ## You probably want to make sure you have TLS configured above for this. + basic_username = "foobar" + basic_password = "barfoo" + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "influx" +` + +func (h *HTTPListenerV2) SampleConfig() string { + return sampleConfig +} + +func (h *HTTPListenerV2) Description() string { + return "Generic HTTP write listener" +} + +func (h *HTTPListenerV2) Gather(_ telegraf.Accumulator) error { + return nil +} + +func (h *HTTPListenerV2) SetParser(parser parsers.Parser) { + h.Parser = parser +} + +// Start starts the http listener service. +func (h *HTTPListenerV2) Start(acc telegraf.Accumulator) error { + if h.MaxBodySize == 0 { + h.MaxBodySize = defaultMaxBodySize + } + + if h.ReadTimeout.Duration < time.Second { + h.ReadTimeout.Duration = time.Second * 10 + } + if h.WriteTimeout.Duration < time.Second { + h.WriteTimeout.Duration = time.Second * 10 + } + + h.acc = acc + + tlsConf, err := h.ServerConfig.TLSConfig() + if err != nil { + return err + } + + server := &http.Server{ + Addr: h.ServiceAddress, + Handler: h, + ReadTimeout: h.ReadTimeout.Duration, + WriteTimeout: h.WriteTimeout.Duration, + TLSConfig: tlsConf, + } + + var listener net.Listener + if tlsConf != nil { + listener, err = tls.Listen("tcp", h.ServiceAddress, tlsConf) + } else { + listener, err = net.Listen("tcp", h.ServiceAddress) + } + if err != nil { + return err + } + h.listener = listener + h.Port = listener.Addr().(*net.TCPAddr).Port + + h.wg.Add(1) + go func() { + defer h.wg.Done() + server.Serve(h.listener) + }() + + log.Printf("I! Started HTTP listener V2 service on %s\n", h.ServiceAddress) + + return nil +} + +// Stop cleans up all resources +func (h *HTTPListenerV2) Stop() { + h.listener.Close() + h.wg.Wait() + + log.Println("I! Stopped HTTP listener V2 service on ", h.ServiceAddress) +} + +func (h *HTTPListenerV2) ServeHTTP(res http.ResponseWriter, req *http.Request) { + if req.URL.Path == h.Path { + h.AuthenticateIfSet(h.serveWrite, res, req) + } else { + h.AuthenticateIfSet(http.NotFound, res, req) + } +} + +func (h *HTTPListenerV2) serveWrite(res http.ResponseWriter, req *http.Request) { + // Check that the content length is not too large for us to handle. + if req.ContentLength > h.MaxBodySize { + tooLarge(res) + return + } + + // Check if the requested HTTP method was specified in config. + isAcceptedMethod := false + for _, method := range h.Methods { + if req.Method == method { + isAcceptedMethod = true + break + } + } + if !isAcceptedMethod { + methodNotAllowed(res) + return + } + + // Handle gzip request bodies + body := req.Body + if req.Header.Get("Content-Encoding") == "gzip" { + var err error + body, err = gzip.NewReader(req.Body) + if err != nil { + log.Println("D! " + err.Error()) + badRequest(res) + return + } + defer body.Close() + } + + body = http.MaxBytesReader(res, body, h.MaxBodySize) + bytes, err := ioutil.ReadAll(body) + if err != nil { + tooLarge(res) + return + } + + metrics, err := h.Parse(bytes) + if err != nil { + log.Println("D! " + err.Error()) + badRequest(res) + return + } + for _, m := range metrics { + h.acc.AddFields(m.Name(), m.Fields(), m.Tags(), m.Time()) + } + res.WriteHeader(http.StatusNoContent) +} + +func tooLarge(res http.ResponseWriter) { + res.Header().Set("Content-Type", "application/json") + res.WriteHeader(http.StatusRequestEntityTooLarge) + res.Write([]byte(`{"error":"http: request body too large"}`)) +} + +func methodNotAllowed(res http.ResponseWriter) { + res.Header().Set("Content-Type", "application/json") + res.WriteHeader(http.StatusMethodNotAllowed) + res.Write([]byte(`{"error":"http: method not allowed"}`)) +} + +func internalServerError(res http.ResponseWriter) { + res.Header().Set("Content-Type", "application/json") + res.WriteHeader(http.StatusInternalServerError) +} + +func badRequest(res http.ResponseWriter) { + res.Header().Set("Content-Type", "application/json") + res.WriteHeader(http.StatusBadRequest) + res.Write([]byte(`{"error":"http: bad request"}`)) +} + +func (h *HTTPListenerV2) AuthenticateIfSet(handler http.HandlerFunc, res http.ResponseWriter, req *http.Request) { + if h.BasicUsername != "" && h.BasicPassword != "" { + reqUsername, reqPassword, ok := req.BasicAuth() + if !ok || + subtle.ConstantTimeCompare([]byte(reqUsername), []byte(h.BasicUsername)) != 1 || + subtle.ConstantTimeCompare([]byte(reqPassword), []byte(h.BasicPassword)) != 1 { + + http.Error(res, "Unauthorized.", http.StatusUnauthorized) + return + } + handler(res, req) + } else { + handler(res, req) + } +} + +func init() { + inputs.Add("http_listener_v2", func() telegraf.Input { + return &HTTPListenerV2{ + ServiceAddress: ":8080", + TimeFunc: time.Now, + Path: "/telegraf", + Methods: []string{"POST", "PUT"}, + } + }) +} diff --git a/plugins/inputs/http_listener_v2/http_listener_v2_test.go b/plugins/inputs/http_listener_v2/http_listener_v2_test.go new file mode 100644 index 000000000..3287ea59e --- /dev/null +++ b/plugins/inputs/http_listener_v2/http_listener_v2_test.go @@ -0,0 +1,380 @@ +package http_listener_v2 + +import ( + "bytes" + "crypto/tls" + "crypto/x509" + "io/ioutil" + "net/http" + "net/url" + "runtime" + "strconv" + "sync" + "testing" + "time" + + "github.com/influxdata/telegraf/plugins/parsers" + "github.com/influxdata/telegraf/testutil" + + "github.com/stretchr/testify/require" +) + +const ( + testMsg = "cpu_load_short,host=server01 value=12.0 1422568543702900257\n" + + testMsgNoNewline = "cpu_load_short,host=server01 value=12.0 1422568543702900257" + + testMsgs = `cpu_load_short,host=server02 value=12.0 1422568543702900257 +cpu_load_short,host=server03 value=12.0 1422568543702900257 +cpu_load_short,host=server04 value=12.0 1422568543702900257 +cpu_load_short,host=server05 value=12.0 1422568543702900257 +cpu_load_short,host=server06 value=12.0 1422568543702900257 +` + badMsg = "blahblahblah: 42\n" + + emptyMsg = "" + + basicUsername = "test-username-please-ignore" + basicPassword = "super-secure-password!" +) + +var ( + pki = testutil.NewPKI("../../../testutil/pki") +) + +func newTestHTTPListenerV2() *HTTPListenerV2 { + parser, _ := parsers.NewInfluxParser() + + listener := &HTTPListenerV2{ + ServiceAddress: "localhost:0", + Path: "/write", + Methods: []string{"POST"}, + Parser: parser, + TimeFunc: time.Now, + MaxBodySize: 70000, + } + return listener +} + +func newTestHTTPAuthListener() *HTTPListenerV2 { + listener := newTestHTTPListenerV2() + listener.BasicUsername = basicUsername + listener.BasicPassword = basicPassword + return listener +} + +func newTestHTTPSListenerV2() *HTTPListenerV2 { + parser, _ := parsers.NewInfluxParser() + + listener := &HTTPListenerV2{ + ServiceAddress: "localhost:0", + Path: "/write", + Methods: []string{"POST"}, + Parser: parser, + ServerConfig: *pki.TLSServerConfig(), + TimeFunc: time.Now, + } + + return listener +} + +func getHTTPSClient() *http.Client { + tlsConfig, err := pki.TLSClientConfig().TLSConfig() + if err != nil { + panic(err) + } + return &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: tlsConfig, + }, + } +} + +func createURL(listener *HTTPListenerV2, scheme string, path string, rawquery string) string { + u := url.URL{ + Scheme: scheme, + Host: "localhost:" + strconv.Itoa(listener.Port), + Path: path, + RawQuery: rawquery, + } + return u.String() +} + +func TestWriteHTTPSNoClientAuth(t *testing.T) { + listener := newTestHTTPSListenerV2() + listener.TLSAllowedCACerts = nil + + acc := &testutil.Accumulator{} + require.NoError(t, listener.Start(acc)) + defer listener.Stop() + + cas := x509.NewCertPool() + cas.AppendCertsFromPEM([]byte(pki.ReadServerCert())) + noClientAuthClient := &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: &tls.Config{ + RootCAs: cas, + }, + }, + } + + // post single message to listener + resp, err := noClientAuthClient.Post(createURL(listener, "https", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsg))) + require.NoError(t, err) + resp.Body.Close() + require.EqualValues(t, 204, resp.StatusCode) +} + +func TestWriteHTTPSWithClientAuth(t *testing.T) { + listener := newTestHTTPSListenerV2() + + acc := &testutil.Accumulator{} + require.NoError(t, listener.Start(acc)) + defer listener.Stop() + + // post single message to listener + resp, err := getHTTPSClient().Post(createURL(listener, "https", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsg))) + require.NoError(t, err) + resp.Body.Close() + require.EqualValues(t, 204, resp.StatusCode) +} + +func TestWriteHTTPBasicAuth(t *testing.T) { + listener := newTestHTTPAuthListener() + + acc := &testutil.Accumulator{} + require.NoError(t, listener.Start(acc)) + defer listener.Stop() + + client := &http.Client{} + + req, err := http.NewRequest("POST", createURL(listener, "http", "/write", "db=mydb"), bytes.NewBuffer([]byte(testMsg))) + require.NoError(t, err) + req.SetBasicAuth(basicUsername, basicPassword) + resp, err := client.Do(req) + require.NoError(t, err) + resp.Body.Close() + require.EqualValues(t, http.StatusNoContent, resp.StatusCode) +} + +func TestWriteHTTP(t *testing.T) { + listener := newTestHTTPListenerV2() + + acc := &testutil.Accumulator{} + require.NoError(t, listener.Start(acc)) + defer listener.Stop() + + // post single message to listener + resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsg))) + require.NoError(t, err) + resp.Body.Close() + require.EqualValues(t, 204, resp.StatusCode) + + acc.Wait(1) + acc.AssertContainsTaggedFields(t, "cpu_load_short", + map[string]interface{}{"value": float64(12)}, + map[string]string{"host": "server01"}, + ) + + // post multiple message to listener + resp, err = http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsgs))) + require.NoError(t, err) + resp.Body.Close() + require.EqualValues(t, 204, resp.StatusCode) + + acc.Wait(2) + hostTags := []string{"server02", "server03", + "server04", "server05", "server06"} + for _, hostTag := range hostTags { + acc.AssertContainsTaggedFields(t, "cpu_load_short", + map[string]interface{}{"value": float64(12)}, + map[string]string{"host": hostTag}, + ) + } + + // Post a gigantic metric to the listener and verify that an error is returned: + resp, err = http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(hugeMetric))) + require.NoError(t, err) + resp.Body.Close() + require.EqualValues(t, 413, resp.StatusCode) + + acc.Wait(3) + acc.AssertContainsTaggedFields(t, "cpu_load_short", + map[string]interface{}{"value": float64(12)}, + map[string]string{"host": "server01"}, + ) +} + +// http listener should add a newline at the end of the buffer if it's not there +func TestWriteHTTPNoNewline(t *testing.T) { + listener := newTestHTTPListenerV2() + + acc := &testutil.Accumulator{} + require.NoError(t, listener.Start(acc)) + defer listener.Stop() + + // post single message to listener + resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsgNoNewline))) + require.NoError(t, err) + resp.Body.Close() + require.EqualValues(t, 204, resp.StatusCode) + + acc.Wait(1) + acc.AssertContainsTaggedFields(t, "cpu_load_short", + map[string]interface{}{"value": float64(12)}, + map[string]string{"host": "server01"}, + ) +} + +func TestWriteHTTPExactMaxBodySize(t *testing.T) { + parser, _ := parsers.NewInfluxParser() + + listener := &HTTPListenerV2{ + ServiceAddress: "localhost:0", + Path: "/write", + Methods: []string{"POST"}, + Parser: parser, + MaxBodySize: int64(len(hugeMetric)), + TimeFunc: time.Now, + } + + acc := &testutil.Accumulator{} + require.NoError(t, listener.Start(acc)) + defer listener.Stop() + + resp, err := http.Post(createURL(listener, "http", "/write", ""), "", bytes.NewBuffer([]byte(hugeMetric))) + require.NoError(t, err) + resp.Body.Close() + require.EqualValues(t, 204, resp.StatusCode) +} + +func TestWriteHTTPVerySmallMaxBody(t *testing.T) { + parser, _ := parsers.NewInfluxParser() + + listener := &HTTPListenerV2{ + ServiceAddress: "localhost:0", + Path: "/write", + Methods: []string{"POST"}, + Parser: parser, + MaxBodySize: 4096, + TimeFunc: time.Now, + } + + acc := &testutil.Accumulator{} + require.NoError(t, listener.Start(acc)) + defer listener.Stop() + + resp, err := http.Post(createURL(listener, "http", "/write", ""), "", bytes.NewBuffer([]byte(hugeMetric))) + require.NoError(t, err) + resp.Body.Close() + require.EqualValues(t, 413, resp.StatusCode) +} + +// test that writing gzipped data works +func TestWriteHTTPGzippedData(t *testing.T) { + listener := newTestHTTPListenerV2() + + acc := &testutil.Accumulator{} + require.NoError(t, listener.Start(acc)) + defer listener.Stop() + + data, err := ioutil.ReadFile("./testdata/testmsgs.gz") + require.NoError(t, err) + + req, err := http.NewRequest("POST", createURL(listener, "http", "/write", ""), bytes.NewBuffer(data)) + require.NoError(t, err) + req.Header.Set("Content-Encoding", "gzip") + + client := &http.Client{} + resp, err := client.Do(req) + require.NoError(t, err) + require.EqualValues(t, 204, resp.StatusCode) + + hostTags := []string{"server02", "server03", + "server04", "server05", "server06"} + acc.Wait(len(hostTags)) + for _, hostTag := range hostTags { + acc.AssertContainsTaggedFields(t, "cpu_load_short", + map[string]interface{}{"value": float64(12)}, + map[string]string{"host": hostTag}, + ) + } +} + +// writes 25,000 metrics to the listener with 10 different writers +func TestWriteHTTPHighTraffic(t *testing.T) { + if runtime.GOOS == "darwin" { + t.Skip("Skipping due to hang on darwin") + } + listener := newTestHTTPListenerV2() + + acc := &testutil.Accumulator{} + require.NoError(t, listener.Start(acc)) + defer listener.Stop() + + // post many messages to listener + var wg sync.WaitGroup + for i := 0; i < 10; i++ { + wg.Add(1) + go func(innerwg *sync.WaitGroup) { + defer innerwg.Done() + for i := 0; i < 500; i++ { + resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsgs))) + require.NoError(t, err) + resp.Body.Close() + require.EqualValues(t, 204, resp.StatusCode) + } + }(&wg) + } + + wg.Wait() + listener.Gather(acc) + + acc.Wait(25000) + require.Equal(t, int64(25000), int64(acc.NMetrics())) +} + +func TestReceive404ForInvalidEndpoint(t *testing.T) { + listener := newTestHTTPListenerV2() + + acc := &testutil.Accumulator{} + require.NoError(t, listener.Start(acc)) + defer listener.Stop() + + // post single message to listener + resp, err := http.Post(createURL(listener, "http", "/foobar", ""), "", bytes.NewBuffer([]byte(testMsg))) + require.NoError(t, err) + resp.Body.Close() + require.EqualValues(t, 404, resp.StatusCode) +} + +func TestWriteHTTPInvalid(t *testing.T) { + listener := newTestHTTPListenerV2() + + acc := &testutil.Accumulator{} + require.NoError(t, listener.Start(acc)) + defer listener.Stop() + + // post single message to listener + resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(badMsg))) + require.NoError(t, err) + resp.Body.Close() + require.EqualValues(t, 400, resp.StatusCode) +} + +func TestWriteHTTPEmpty(t *testing.T) { + listener := newTestHTTPListenerV2() + + acc := &testutil.Accumulator{} + require.NoError(t, listener.Start(acc)) + defer listener.Stop() + + // post single message to listener + resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(emptyMsg))) + require.NoError(t, err) + resp.Body.Close() + require.EqualValues(t, 204, resp.StatusCode) +} + +const hugeMetric = `super_long_metric,foo=bar clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i +` diff --git a/plugins/inputs/http_listener_v2/testdata/testmsgs.gz b/plugins/inputs/http_listener_v2/testdata/testmsgs.gz new file mode 100644 index 0000000000000000000000000000000000000000..f524dc07128b95fa256b4e0df66bc2b6f04d7058 GIT binary patch literal 97 zcmV-n0G|IJiwFSz6b@Jb14}L_jnBzXOo=bf$S*3<$;dA*u`Nz5DoZUgFj6Q>%qdN^ zH8j#QP%tzxGBP!@Ff}nYH!!j^FfcMT=Ss${*O&smCKTv3r9iJ4A-!Axe?y61Edc-k Db0r~l literal 0 HcmV?d00001 From 2d8cda02df64a1d313088efbc9cfe19f61f7c510 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 12 Oct 2018 14:20:12 -0700 Subject: [PATCH 0274/1815] Rename http_listener to influxdb_listener --- CHANGELOG.md | 12 ++++ README.md | 4 +- plugins/inputs/all/all.go | 2 +- plugins/inputs/http_listener/README.md | 48 ------------- plugins/inputs/http_listener_v2/README.md | 43 ++++++----- plugins/inputs/influxdb_listener/README.md | 67 ++++++++++++++++++ .../bufferpool.go | 0 .../http_listener.go | 9 ++- .../http_listener_test.go | 0 .../testdata/testmsgs.gz | Bin 10 files changed, 117 insertions(+), 68 deletions(-) delete mode 100644 plugins/inputs/http_listener/README.md create mode 100644 plugins/inputs/influxdb_listener/README.md rename plugins/inputs/{http_listener => influxdb_listener}/bufferpool.go (100%) rename plugins/inputs/{http_listener => influxdb_listener}/http_listener.go (98%) rename plugins/inputs/{http_listener => influxdb_listener}/http_listener_test.go (100%) rename plugins/inputs/{http_listener => influxdb_listener}/testdata/testmsgs.gz (100%) diff --git a/CHANGELOG.md b/CHANGELOG.md index b331369c8..880506fa4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,17 @@ ## v1.9 [unreleased] +### Release Notes + +- The `http_listener` input plugin has been renamed to `influxdb_listener` and + use of the original name is deprecated. The new name better describes the + intended use of the plugin as a InfluxDB relay. For general purpose + transfer of metrics in any format via HTTP, it is recommended to use + `http_listener_v2` instead. + +### New Inputs + +- [http_listener_v2](/plugins/inputs/http_listener_v2/README.md) - Contributed by @jul1u5 + ### Features - [#4686](https://github.com/influxdata/telegraf/pull/4686): Add replace function to strings processor. diff --git a/README.md b/README.md index 647256f3e..c7f4be899 100644 --- a/README.md +++ b/README.md @@ -165,12 +165,14 @@ For documentation on the latest development code see the [documentation index][d * [haproxy](./plugins/inputs/haproxy) * [hddtemp](./plugins/inputs/hddtemp) * [httpjson](./plugins/inputs/httpjson) (generic JSON-emitting http service plugin) -* [http_listener](./plugins/inputs/http_listener) +* [http_listener](./plugins/inputs/influxdb_listener) (deprecated, renamed to [influxdb_listener](/plugins/inputs/influxdb_listener)) +* [http_listener_v2](./plugins/inputs/http_listener_v2) * [http](./plugins/inputs/http) (generic HTTP plugin, supports using input data formats) * [http_response](./plugins/inputs/http_response) * [icinga2](./plugins/inputs/icinga2) * [influxdb](./plugins/inputs/influxdb) * [influxdb_v2](./plugins/inputs/influxdb_v2) +* [influxdb_listener](./plugins/inputs/influxdb_listener) * [internal](./plugins/inputs/internal) * [interrupts](./plugins/inputs/interrupts) * [ipmi_sensor](./plugins/inputs/ipmi_sensor) diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index 909236c21..5840893e9 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -40,12 +40,12 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/haproxy" _ "github.com/influxdata/telegraf/plugins/inputs/hddtemp" _ "github.com/influxdata/telegraf/plugins/inputs/http" - _ "github.com/influxdata/telegraf/plugins/inputs/http_listener" _ "github.com/influxdata/telegraf/plugins/inputs/http_listener_v2" _ "github.com/influxdata/telegraf/plugins/inputs/http_response" _ "github.com/influxdata/telegraf/plugins/inputs/httpjson" _ "github.com/influxdata/telegraf/plugins/inputs/icinga2" _ "github.com/influxdata/telegraf/plugins/inputs/influxdb" + _ "github.com/influxdata/telegraf/plugins/inputs/influxdb_listener" _ "github.com/influxdata/telegraf/plugins/inputs/internal" _ "github.com/influxdata/telegraf/plugins/inputs/interrupts" _ "github.com/influxdata/telegraf/plugins/inputs/ipmi_sensor" diff --git a/plugins/inputs/http_listener/README.md b/plugins/inputs/http_listener/README.md deleted file mode 100644 index f1ff71f0a..000000000 --- a/plugins/inputs/http_listener/README.md +++ /dev/null @@ -1,48 +0,0 @@ -# HTTP listener service input plugin - -The HTTP listener is a service input plugin that listens for messages sent via HTTP POST. -The plugin expects messages in the InfluxDB line-protocol ONLY, other Telegraf input data formats are not supported. -The intent of the plugin is to allow Telegraf to serve as a proxy/router for the `/write` endpoint of the InfluxDB HTTP API. - -The `/write` endpoint supports the `precision` query parameter and can be set to one of `ns`, `u`, `ms`, `s`, `m`, `h`. All other parameters are ignored and defer to the output plugins configuration. - -When chaining Telegraf instances using this plugin, CREATE DATABASE requests receive a 200 OK response with message body `{"results":[]}` but they are not relayed. The output configuration of the Telegraf instance which ultimately submits data to InfluxDB determines the destination database. - -Enable TLS by specifying the file names of a service TLS certificate and key. - -Enable mutually authenticated TLS and authorize client connections by signing certificate authority by including a list of allowed CA certificate file names in ````tls_allowed_cacerts````. - -Enable basic HTTP authentication of clients by specifying a username and password to check for. These credentials will be received from the client _as plain text_ if TLS is not configured. - -See: [Telegraf Input Data Formats](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#influx). - -**Example:** -``` -curl -i -XPOST 'http://localhost:8186/write' --data-binary 'cpu_load_short,host=server01,region=us-west value=0.64 1434055562000000000' -``` - -### Configuration: - -This is a sample configuration for the plugin. - -```toml -# # Influx HTTP write listener -[[inputs.http_listener]] - ## Address and port to host HTTP listener on - service_address = ":8186" - - ## timeouts - read_timeout = "10s" - write_timeout = "10s" - - ## HTTPS - tls_cert= "/etc/telegraf/cert.pem" - tls_key = "/etc/telegraf/key.pem" - - ## MTLS - tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] - - ## Basic authentication - basic_username = "foobar" - basic_password = "barfoo" -``` diff --git a/plugins/inputs/http_listener_v2/README.md b/plugins/inputs/http_listener_v2/README.md index 74532c90f..a2d5798dd 100644 --- a/plugins/inputs/http_listener_v2/README.md +++ b/plugins/inputs/http_listener_v2/README.md @@ -1,21 +1,11 @@ -# Generic HTTP listener service input plugin +# HTTP Listener v2 Input Plugin -> NOTE: This is a new version of HTTP listener plugin. -> This plugin supports all [data formats](/docs/DATA_FORMATS_INPUT.md) while the old [http_listener](/plugins/inputs/http_listener) -> only accepts data in InfluxDB line-protocol only +HTTP Listener v2 is a service input plugin that listens for metrics sent via +HTTP. Metrics may be sent in any supported [data format][data_format]. -The HTTP listener is a service input plugin that listens for messages sent via HTTP POST. - -Enable TLS by specifying the file names of a service TLS certificate and key. - -Enable mutually authenticated TLS and authorize client connections by signing certificate authority by including a list of allowed CA certificate file names in ````tls_allowed_cacerts````. - -Enable basic HTTP authentication of clients by specifying a username and password to check for. These credentials will be received from the client _as plain text_ if TLS is not configured. - -**Example:** -``` -curl -i -XPOST 'http://localhost:8080/write' --data-binary 'cpu_load_short,host=server01,region=us-west value=0.64 1434055562000000000' -``` +**Note:** The plugin previously known as `http_listener` has been renamed +`influxdb_listener`. If you would like Telegraf to act as a proxy/relay for +InfluxDB it is recommended to use [`influxdb_listener`][influxdb_listener]. ### Configuration: @@ -41,7 +31,7 @@ This is a sample configuration for the plugin. ## 0 means to use the default of 536,870,912 bytes (500 mebibytes) max_body_size = 0 - ## Set one or more allowed client CA certificate file names to + ## Set one or more allowed client CA certificate file names to ## enable mutually authenticated TLS connections tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] @@ -60,3 +50,22 @@ This is a sample configuration for the plugin. ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "influx" ``` + +### Metrics: + +Metrics are created from the request body and are dependant on the value of `data_format`. + +### Troubleshooting: + +**Send Line Protocol** +``` +curl -i -XPOST 'http://localhost:8080/telegraf' --data-binary 'cpu_load_short,host=server01,region=us-west value=0.64 1434055562000000000' +``` + +**Send JSON** +``` +curl -i -XPOST 'http://localhost:8080/telegraf' --data-binary '{"value1": 42, "value2": 42}' +``` + +[data_format]: /docs/DATA_FORMATS_INPUT.md +[influxdb_listener]: /plugins/inputs/influxdb_listener/README.md diff --git a/plugins/inputs/influxdb_listener/README.md b/plugins/inputs/influxdb_listener/README.md new file mode 100644 index 000000000..38039c606 --- /dev/null +++ b/plugins/inputs/influxdb_listener/README.md @@ -0,0 +1,67 @@ +# InfluxDB Listener Input Plugin + +InfluxDB Listener is a service input plugin that listens for requests sent +according to the [InfluxDB HTTP API][influxdb_http_api]. The intent of the +plugin is to allow Telegraf to serve as a proxy/router for the `/write` +endpoint of the InfluxDB HTTP API. + +**Note:** This plugin was previously known as `http_listener`. If you wish to +send general metrics via HTTP it is recommended to use the +[`http_listener_v2`][http_listener_v2] instead. + +The `/write` endpoint supports the `precision` query parameter and can be set +to one of `ns`, `u`, `ms`, `s`, `m`, `h`. All other parameters are ignored and +defer to the output plugins configuration. + +When chaining Telegraf instances using this plugin, CREATE DATABASE requests +receive a 200 OK response with message body `{"results":[]}` but they are not +relayed. The output configuration of the Telegraf instance which ultimately +submits data to InfluxDB determines the destination database. + +### Configuration: + +```toml +[[inputs.influxdb_listener]] + ## Address and port to host HTTP listener on + service_address = ":8186" + + ## maximum duration before timing out read of the request + read_timeout = "10s" + ## maximum duration before timing out write of the response + write_timeout = "10s" + + ## Maximum allowed http request body size in bytes. + ## 0 means to use the default of 536,870,912 bytes (500 mebibytes) + max_body_size = 0 + + ## Maximum line size allowed to be sent in bytes. + ## 0 means to use the default of 65536 bytes (64 kibibytes) + max_line_size = 0 + + ## Set one or more allowed client CA certificate file names to + ## enable mutually authenticated TLS connections + tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] + + ## Add service certificate and key + tls_cert = "/etc/telegraf/cert.pem" + tls_key = "/etc/telegraf/key.pem" + + ## Optional username and password to accept for HTTP basic authentication. + ## You probably want to make sure you have TLS configured above for this. + # basic_username = "foobar" + # basic_password = "barfoo" +``` + +### Metrics: + +Metrics are created from InfluxDB Line Protocol in the request body. + +### Troubleshooting: + +**Example Query:** +``` +curl -i -XPOST 'http://localhost:8186/write' --data-binary 'cpu_load_short,host=server01,region=us-west value=0.64 1434055562000000000' +``` + +[influxdb_http_api]: https://docs.influxdata.com/influxdb/latest/guides/writing_data/ +[http_listener_v2]: /plugins/inputs/influxdb_listener_v2/README.md diff --git a/plugins/inputs/http_listener/bufferpool.go b/plugins/inputs/influxdb_listener/bufferpool.go similarity index 100% rename from plugins/inputs/http_listener/bufferpool.go rename to plugins/inputs/influxdb_listener/bufferpool.go diff --git a/plugins/inputs/http_listener/http_listener.go b/plugins/inputs/influxdb_listener/http_listener.go similarity index 98% rename from plugins/inputs/http_listener/http_listener.go rename to plugins/inputs/influxdb_listener/http_listener.go index cd82e40c0..29beff9a8 100644 --- a/plugins/inputs/http_listener/http_listener.go +++ b/plugins/inputs/influxdb_listener/http_listener.go @@ -91,7 +91,7 @@ const sampleConfig = ` ## 0 means to use the default of 65536 bytes (64 kibibytes) max_line_size = 0 - ## Set one or more allowed client CA certificate file names to + ## Set one or more allowed client CA certificate file names to ## enable mutually authenticated TLS connections tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] @@ -420,10 +420,17 @@ func getPrecisionMultiplier(precision string) time.Duration { } func init() { + // http_listener deprecated in 1.9 inputs.Add("http_listener", func() telegraf.Input { return &HTTPListener{ ServiceAddress: ":8186", TimeFunc: time.Now, } }) + inputs.Add("influxdb_listener", func() telegraf.Input { + return &HTTPListener{ + ServiceAddress: ":8186", + TimeFunc: time.Now, + } + }) } diff --git a/plugins/inputs/http_listener/http_listener_test.go b/plugins/inputs/influxdb_listener/http_listener_test.go similarity index 100% rename from plugins/inputs/http_listener/http_listener_test.go rename to plugins/inputs/influxdb_listener/http_listener_test.go diff --git a/plugins/inputs/http_listener/testdata/testmsgs.gz b/plugins/inputs/influxdb_listener/testdata/testmsgs.gz similarity index 100% rename from plugins/inputs/http_listener/testdata/testmsgs.gz rename to plugins/inputs/influxdb_listener/testdata/testmsgs.gz From 37fd99abb9ff6e47a03a4b0ca030df4361560f13 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 12 Oct 2018 14:23:29 -0700 Subject: [PATCH 0275/1815] Comment optional fields in http_listener sample config --- plugins/inputs/http_listener_v2/README.md | 20 +++++++++---------- .../http_listener_v2/http_listener_v2.go | 20 +++++++++---------- 2 files changed, 20 insertions(+), 20 deletions(-) diff --git a/plugins/inputs/http_listener_v2/README.md b/plugins/inputs/http_listener_v2/README.md index a2d5798dd..6d5d25aa4 100644 --- a/plugins/inputs/http_listener_v2/README.md +++ b/plugins/inputs/http_listener_v2/README.md @@ -17,32 +17,32 @@ This is a sample configuration for the plugin. service_address = ":8080" ## Path to listen to. - path = "/telegraf" + # path = "/telegraf" ## HTTP methods to accept. - methods = ["POST", "PUT"] + # methods = ["POST", "PUT"] ## maximum duration before timing out read of the request - read_timeout = "10s" + # read_timeout = "10s" ## maximum duration before timing out write of the response - write_timeout = "10s" + # write_timeout = "10s" ## Maximum allowed http request body size in bytes. ## 0 means to use the default of 536,870,912 bytes (500 mebibytes) - max_body_size = 0 + # max_body_size = 0 ## Set one or more allowed client CA certificate file names to ## enable mutually authenticated TLS connections - tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] + # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] ## Add service certificate and key - tls_cert = "/etc/telegraf/cert.pem" - tls_key = "/etc/telegraf/key.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" ## Optional username and password to accept for HTTP basic authentication. ## You probably want to make sure you have TLS configured above for this. - basic_username = "foobar" - basic_password = "barfoo" + # basic_username = "foobar" + # basic_password = "barfoo" ## Data format to consume. ## Each data format has its own unique set of configuration options, read diff --git a/plugins/inputs/http_listener_v2/http_listener_v2.go b/plugins/inputs/http_listener_v2/http_listener_v2.go index 96997785e..871d1080b 100644 --- a/plugins/inputs/http_listener_v2/http_listener_v2.go +++ b/plugins/inputs/http_listener_v2/http_listener_v2.go @@ -54,32 +54,32 @@ const sampleConfig = ` service_address = ":8080" ## Path to listen to. - path = "/telegraf" + # path = "/telegraf" ## HTTP methods to accept. - methods = ["POST", "PUT"] + # methods = ["POST", "PUT"] ## maximum duration before timing out read of the request - read_timeout = "10s" + # read_timeout = "10s" ## maximum duration before timing out write of the response - write_timeout = "10s" + # write_timeout = "10s" ## Maximum allowed http request body size in bytes. ## 0 means to use the default of 536,870,912 bytes (500 mebibytes) - max_body_size = 0 + # max_body_size = 0 ## Set one or more allowed client CA certificate file names to ## enable mutually authenticated TLS connections - tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] + # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] ## Add service certificate and key - tls_cert = "/etc/telegraf/cert.pem" - tls_key = "/etc/telegraf/key.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" ## Optional username and password to accept for HTTP basic authentication. ## You probably want to make sure you have TLS configured above for this. - basic_username = "foobar" - basic_password = "barfoo" + # basic_username = "foobar" + # basic_password = "barfoo" ## Data format to consume. ## Each data format has its own unique set of configuration options, read From 27bd51b9ac9aeaf37a5e86159f082dd3d4adfbc0 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 12 Oct 2018 14:36:55 -0700 Subject: [PATCH 0276/1815] Use container name from list if no name in container stats (#4854) --- plugins/inputs/docker/docker.go | 17 ++++--- plugins/inputs/docker/docker_test.go | 68 ++++++++++++++++++++++++++++ 2 files changed, 78 insertions(+), 7 deletions(-) diff --git a/plugins/inputs/docker/docker.go b/plugins/inputs/docker/docker.go index c98f1f845..f5633f099 100644 --- a/plugins/inputs/docker/docker.go +++ b/plugins/inputs/docker/docker.go @@ -416,7 +416,9 @@ func (d *Docker) gatherContainer( daemonOSType := r.OSType // use common (printed at `docker ps`) name for container - tags["container_name"] = strings.TrimPrefix(v.Name, "/") + if v.Name != "" { + tags["container_name"] = strings.TrimPrefix(v.Name, "/") + } // Add labels to tags for k, label := range container.Labels { @@ -442,6 +444,7 @@ func (d *Docker) gatherContainer( } } } + if info.State != nil { tags["container_status"] = info.State.Status statefields := map[string]interface{}{ @@ -458,14 +461,14 @@ func (d *Docker) gatherContainer( statefields["finished_at"] = container_time.UnixNano() } acc.AddFields("docker_container_status", statefields, tags, time.Now()) - } - if info.State.Health != nil { - healthfields := map[string]interface{}{ - "health_status": info.State.Health.Status, - "failing_streak": info.ContainerJSONBase.State.Health.FailingStreak, + if info.State.Health != nil { + healthfields := map[string]interface{}{ + "health_status": info.State.Health.Status, + "failing_streak": info.ContainerJSONBase.State.Health.FailingStreak, + } + acc.AddFields("docker_container_health", healthfields, tags, time.Now()) } - acc.AddFields("docker_container_health", healthfields, tags, time.Now()) } parseContainerStats(v, acc, tags, container.ID, d.PerDevice, d.Total, daemonOSType) diff --git a/plugins/inputs/docker/docker_test.go b/plugins/inputs/docker/docker_test.go index b97c34b6b..968dbf725 100644 --- a/plugins/inputs/docker/docker_test.go +++ b/plugins/inputs/docker/docker_test.go @@ -3,7 +3,9 @@ package docker import ( "context" "crypto/tls" + "io/ioutil" "sort" + "strings" "testing" "github.com/influxdata/telegraf/testutil" @@ -747,3 +749,69 @@ func TestContainerStateFilter(t *testing.T) { }) } } + +func TestContainerName(t *testing.T) { + tests := []struct { + name string + clientFunc func(host string, tlsConfig *tls.Config) (Client, error) + expected string + }{ + { + name: "container stats name is preferred", + clientFunc: func(host string, tlsConfig *tls.Config) (Client, error) { + client := baseClient + client.ContainerListF = func(context.Context, types.ContainerListOptions) ([]types.Container, error) { + var containers []types.Container + containers = append(containers, types.Container{ + Names: []string{"/logspout/foo"}, + }) + return containers, nil + } + client.ContainerStatsF = func(ctx context.Context, containerID string, stream bool) (types.ContainerStats, error) { + return types.ContainerStats{ + Body: ioutil.NopCloser(strings.NewReader(`{"name": "logspout"}`)), + }, nil + } + return &client, nil + }, + expected: "logspout", + }, + { + name: "container stats without name uses container list name", + clientFunc: func(host string, tlsConfig *tls.Config) (Client, error) { + client := baseClient + client.ContainerListF = func(context.Context, types.ContainerListOptions) ([]types.Container, error) { + var containers []types.Container + containers = append(containers, types.Container{ + Names: []string{"/logspout"}, + }) + return containers, nil + } + client.ContainerStatsF = func(ctx context.Context, containerID string, stream bool) (types.ContainerStats, error) { + return types.ContainerStats{ + Body: ioutil.NopCloser(strings.NewReader(`{}`)), + }, nil + } + return &client, nil + }, + expected: "logspout", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + d := Docker{ + newClient: tt.clientFunc, + } + var acc testutil.Accumulator + err := d.Gather(&acc) + require.NoError(t, err) + + for _, metric := range acc.Metrics { + // This tag is set on all container measurements + if metric.Measurement == "docker_container_mem" { + require.Equal(t, tt.expected, metric.Tags["container_name"]) + } + } + }) + } +} From 38e644ff12bb5880d28e6ee0f6d62c560e092fc5 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 12 Oct 2018 14:37:30 -0700 Subject: [PATCH 0277/1815] Support uint fields in aerospike input (#4851) --- plugins/inputs/aerospike/aerospike.go | 32 ++++++++-------------- plugins/inputs/aerospike/aerospike_test.go | 15 ++++------ 2 files changed, 17 insertions(+), 30 deletions(-) diff --git a/plugins/inputs/aerospike/aerospike.go b/plugins/inputs/aerospike/aerospike.go index 3caee7e7d..d4c4fce85 100644 --- a/plugins/inputs/aerospike/aerospike.go +++ b/plugins/inputs/aerospike/aerospike.go @@ -2,8 +2,6 @@ package aerospike import ( "crypto/tls" - "errors" - "log" "net" "strconv" "strings" @@ -120,12 +118,8 @@ func (a *Aerospike) gatherServer(hostport string, acc telegraf.Accumulator) erro return err } for k, v := range stats { - val, err := parseValue(v) - if err == nil { - fields[strings.Replace(k, "-", "_", -1)] = val - } else { - log.Printf("I! skipping aerospike field %v with int64 overflow: %q", k, v) - } + val := parseValue(v) + fields[strings.Replace(k, "-", "_", -1)] = val } acc.AddFields("aerospike_node", fields, tags, time.Now()) @@ -152,12 +146,8 @@ func (a *Aerospike) gatherServer(hostport string, acc telegraf.Accumulator) erro if len(parts) < 2 { continue } - val, err := parseValue(parts[1]) - if err == nil { - nFields[strings.Replace(parts[0], "-", "_", -1)] = val - } else { - log.Printf("I! skipping aerospike field %v with int64 overflow: %q", parts[0], parts[1]) - } + val := parseValue(parts[1]) + nFields[strings.Replace(parts[0], "-", "_", -1)] = val } acc.AddFields("aerospike_namespace", nFields, nTags, time.Now()) } @@ -165,16 +155,16 @@ func (a *Aerospike) gatherServer(hostport string, acc telegraf.Accumulator) erro return nil } -func parseValue(v string) (interface{}, error) { +func parseValue(v string) interface{} { if parsed, err := strconv.ParseInt(v, 10, 64); err == nil { - return parsed, nil - } else if _, err := strconv.ParseUint(v, 10, 64); err == nil { - // int64 overflow, yet valid uint64 - return nil, errors.New("Number is too large") + return parsed + } else if parsed, err := strconv.ParseUint(v, 10, 64); err == nil { + return parsed } else if parsed, err := strconv.ParseBool(v); err == nil { - return parsed, nil + return parsed } else { - return v, nil + // leave as string + return v } } diff --git a/plugins/inputs/aerospike/aerospike_test.go b/plugins/inputs/aerospike/aerospike_test.go index 078e148f5..724102195 100644 --- a/plugins/inputs/aerospike/aerospike_test.go +++ b/plugins/inputs/aerospike/aerospike_test.go @@ -52,17 +52,14 @@ func TestAerospikeStatisticsPartialErr(t *testing.T) { func TestAerospikeParseValue(t *testing.T) { // uint64 with value bigger than int64 max - val, err := parseValue("18446744041841121751") - assert.Nil(t, val) - assert.Error(t, err) + val := parseValue("18446744041841121751") + require.Equal(t, uint64(18446744041841121751), val) // int values - val, err = parseValue("42") - assert.NoError(t, err) - assert.Equal(t, val, int64(42), "must be parsed as int") + val = parseValue("42") + require.Equal(t, val, int64(42), "must be parsed as int") // string values - val, err = parseValue("BB977942A2CA502") - assert.NoError(t, err) - assert.Equal(t, val, `BB977942A2CA502`, "must be left as string") + val = parseValue("BB977942A2CA502") + require.Equal(t, val, `BB977942A2CA502`, "must be left as string") } From d3078ec9d86b33ec91a2dffe6dc2cd26a2dc473f Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 12 Oct 2018 14:40:51 -0700 Subject: [PATCH 0278/1815] Update changelog --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 880506fa4..454f5f665 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -32,6 +32,8 @@ - [#4844](https://github.com/influxdata/telegraf/pull/4844): Update write path to match updated InfluxDB v2 API. - [#4840](https://github.com/influxdata/telegraf/pull/4840): Fix missing timeouts in vsphere input. +- [#4851](https://github.com/influxdata/telegraf/pull/4851): Support uint fields in aerospike input. +- [#4854](https://github.com/influxdata/telegraf/pull/4854): Use container name from list if no name in container stats. ## v1.8.1 [2018-10-03] From 9cc534c62458b41d5a0446e7d33f3732a73caaa2 Mon Sep 17 00:00:00 2001 From: Greg Date: Fri, 12 Oct 2018 15:43:06 -0600 Subject: [PATCH 0279/1815] Prevent panic if fileinfo is nil (#4850) --- plugins/inputs/filecount/filecount.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/plugins/inputs/filecount/filecount.go b/plugins/inputs/filecount/filecount.go index a0dcd2cb4..66d5a33fe 100644 --- a/plugins/inputs/filecount/filecount.go +++ b/plugins/inputs/filecount/filecount.go @@ -139,6 +139,12 @@ func absDuration(x time.Duration) time.Duration { func (fc *FileCount) count(acc telegraf.Accumulator, basedir string, recursive bool) { numFiles := int64(0) walkFn := func(path string, file os.FileInfo, err error) error { + if err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } if path == basedir { return nil } From a2ac9115b3339cfd8af0b0bfd61b789e2639f012 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 12 Oct 2018 14:45:04 -0700 Subject: [PATCH 0280/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 454f5f665..c6b1a7e63 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -34,6 +34,7 @@ - [#4840](https://github.com/influxdata/telegraf/pull/4840): Fix missing timeouts in vsphere input. - [#4851](https://github.com/influxdata/telegraf/pull/4851): Support uint fields in aerospike input. - [#4854](https://github.com/influxdata/telegraf/pull/4854): Use container name from list if no name in container stats. +- [#4850](https://github.com/influxdata/telegraf/pull/4850): Prevent panic in filecount input on error in file stat. ## v1.8.1 [2018-10-03] From f259229a3574ad01f1c5a19c947a3c0f41135227 Mon Sep 17 00:00:00 2001 From: Samuel-BF <36996277+Samuel-BF@users.noreply.github.com> Date: Fri, 12 Oct 2018 23:48:11 +0200 Subject: [PATCH 0281/1815] Improve performance of globpath with some patterns (#4836) --- internal/globpath/globpath.go | 56 ++++++++---------------------- internal/globpath/globpath_test.go | 28 +++++++-------- 2 files changed, 29 insertions(+), 55 deletions(-) diff --git a/internal/globpath/globpath.go b/internal/globpath/globpath.go index 6067f65b2..a08731ad9 100644 --- a/internal/globpath/globpath.go +++ b/internal/globpath/globpath.go @@ -15,8 +15,8 @@ type GlobPath struct { path string hasMeta bool hasSuperMeta bool + rootGlob string g glob.Glob - root string } func Compile(path string) (*GlobPath, error) { @@ -27,23 +27,25 @@ func Compile(path string) (*GlobPath, error) { } // if there are no glob meta characters in the path, don't bother compiling - // a glob object or finding the root directory. (see short-circuit in Match) + // a glob object if !out.hasMeta || !out.hasSuperMeta { return &out, nil } + // find the root elements of the object path, the entry point for recursion + // when you have a super-meta in your path (which are : + // glob(/your/expression/until/first/star/of/super-meta)) + out.rootGlob = path[:strings.Index(path, "**")+1] var err error if out.g, err = glob.Compile(path, os.PathSeparator); err != nil { return nil, err } - // Get the root directory for this filepath - out.root = findRootDir(path) return &out, nil } func (g *GlobPath) Match() map[string]os.FileInfo { + out := make(map[string]os.FileInfo) if !g.hasMeta { - out := make(map[string]os.FileInfo) info, err := os.Stat(g.path) if err == nil { out[g.path] = info @@ -51,7 +53,6 @@ func (g *GlobPath) Match() map[string]os.FileInfo { return out } if !g.hasSuperMeta { - out := make(map[string]os.FileInfo) files, _ := filepath.Glob(g.path) for _, file := range files { info, err := os.Stat(file) @@ -61,46 +62,19 @@ func (g *GlobPath) Match() map[string]os.FileInfo { } return out } - return walkFilePath(g.root, g.g) -} - -// walk the filepath from the given root and return a list of files that match -// the given glob. -func walkFilePath(root string, g glob.Glob) map[string]os.FileInfo { - matchedFiles := make(map[string]os.FileInfo) + roots, err := filepath.Glob(g.rootGlob) + if err != nil { + return out + } walkfn := func(path string, info os.FileInfo, _ error) error { - if g.Match(path) { - matchedFiles[path] = info + if g.g.Match(path) { + out[path] = info } return nil - } - filepath.Walk(root, walkfn) - return matchedFiles -} -// find the root dir of the given path (could include globs). -// ie: -// /var/log/telegraf.conf -> /var/log -// /home/** -> /home -// /home/*/** -> /home -// /lib/share/*/*/**.txt -> /lib/share -func findRootDir(path string) string { - pathItems := strings.Split(path, sepStr) - out := sepStr - for i, item := range pathItems { - if i == len(pathItems)-1 { - break - } - if item == "" { - continue - } - if hasMeta(item) { - break - } - out += item + sepStr } - if out != "/" { - out = strings.TrimSuffix(out, "/") + for _, root := range roots { + filepath.Walk(root, walkfn) } return out } diff --git a/internal/globpath/globpath_test.go b/internal/globpath/globpath_test.go index 20bfbcbb9..e67474fa9 100644 --- a/internal/globpath/globpath_test.go +++ b/internal/globpath/globpath_test.go @@ -6,7 +6,6 @@ import ( "strings" "testing" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -29,31 +28,32 @@ func TestCompileAndMatch(t *testing.T) { require.NoError(t, err) matches := g1.Match() - assert.Len(t, matches, 6) + require.Len(t, matches, 6) matches = g2.Match() - assert.Len(t, matches, 2) + require.Len(t, matches, 2) matches = g3.Match() - assert.Len(t, matches, 1) + require.Len(t, matches, 1) matches = g4.Match() - assert.Len(t, matches, 0) + require.Len(t, matches, 0) matches = g5.Match() - assert.Len(t, matches, 0) + require.Len(t, matches, 0) } -func TestFindRootDir(t *testing.T) { +func TestRootGlob(t *testing.T) { + dir := getTestdataDir() tests := []struct { input string output string }{ - {"/var/log/telegraf.conf", "/var/log"}, - {"/home/**", "/home"}, - {"/home/*/**", "/home"}, - {"/lib/share/*/*/**.txt", "/lib/share"}, + {dir + "/**", dir + "/*"}, + {dir + "/nested?/**", dir + "/nested?/*"}, + {dir + "/ne**/nest*", dir + "/ne*"}, + {dir + "/nested?/*", ""}, } for _, test := range tests { - actual := findRootDir(test.input) - assert.Equal(t, test.output, actual) + actual, _ := Compile(test.input) + require.Equal(t, actual.rootGlob, test.output) } } @@ -64,7 +64,7 @@ func TestFindNestedTextFile(t *testing.T) { require.NoError(t, err) matches := g1.Match() - assert.Len(t, matches, 1) + require.Len(t, matches, 1) } func getTestdataDir() string { From 152365ae06785d464d2695ec199b1bfafc11d2cd Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 15 Oct 2018 13:03:52 -0700 Subject: [PATCH 0282/1815] Rework mqtt_consumer connect/reconnect (#4846) --- plugins/inputs/mqtt_consumer/mqtt_consumer.go | 123 ++++++++---------- .../mqtt_consumer/mqtt_consumer_test.go | 106 ++------------- 2 files changed, 68 insertions(+), 161 deletions(-) diff --git a/plugins/inputs/mqtt_consumer/mqtt_consumer.go b/plugins/inputs/mqtt_consumer/mqtt_consumer.go index 5853ad939..0a253b8d8 100644 --- a/plugins/inputs/mqtt_consumer/mqtt_consumer.go +++ b/plugins/inputs/mqtt_consumer/mqtt_consumer.go @@ -1,10 +1,10 @@ package mqtt_consumer import ( + "errors" "fmt" "log" "strings" - "sync" "time" "github.com/influxdata/telegraf" @@ -19,6 +19,14 @@ import ( // 30 Seconds is the default used by paho.mqtt.golang var defaultConnectionTimeout = internal.Duration{Duration: 30 * time.Second} +type ConnectionState int + +const ( + Disconnected ConnectionState = iota + Connecting + Connected +) + type MQTTConsumer struct { Servers []string Topics []string @@ -36,16 +44,10 @@ type MQTTConsumer struct { ClientID string `toml:"client_id"` tls.ClientConfig - sync.Mutex - client mqtt.Client - // channel of all incoming raw mqtt messages - in chan mqtt.Message - done chan struct{} - - // keep the accumulator internally: - acc telegraf.Accumulator - - connected bool + client mqtt.Client + acc telegraf.Accumulator + state ConnectionState + subscribed bool } var sampleConfig = ` @@ -110,22 +112,19 @@ func (m *MQTTConsumer) SetParser(parser parsers.Parser) { } func (m *MQTTConsumer) Start(acc telegraf.Accumulator) error { - m.Lock() - defer m.Unlock() - m.connected = false + m.state = Disconnected if m.PersistentSession && m.ClientID == "" { - return fmt.Errorf("ERROR MQTT Consumer: When using persistent_session" + - " = true, you MUST also set client_id") + return errors.New("persistent_session requires client_id") } m.acc = acc if m.QoS > 2 || m.QoS < 0 { - return fmt.Errorf("MQTT Consumer, invalid QoS value: %d", m.QoS) + return fmt.Errorf("qos value must be 0, 1, or 2: %d", m.QoS) } if m.ConnectionTimeout.Duration < 1*time.Second { - return fmt.Errorf("MQTT Consumer, invalid connection_timeout value: %s", m.ConnectionTimeout.Duration) + return fmt.Errorf("connection_timeout must be greater than 1s: %s", m.ConnectionTimeout.Duration) } opts, err := m.createOpts() @@ -134,9 +133,7 @@ func (m *MQTTConsumer) Start(acc telegraf.Accumulator) error { } m.client = mqtt.NewClient(opts) - m.in = make(chan mqtt.Message, 1000) - m.done = make(chan struct{}) - + m.state = Connecting m.connect() return nil @@ -145,80 +142,68 @@ func (m *MQTTConsumer) Start(acc telegraf.Accumulator) error { func (m *MQTTConsumer) connect() error { if token := m.client.Connect(); token.Wait() && token.Error() != nil { err := token.Error() - log.Printf("D! MQTT Consumer, connection error - %v", err) - + m.state = Disconnected return err } - go m.receiver() + log.Printf("I! [inputs.mqtt_consumer]: connected %v", m.Servers) + m.state = Connected - return nil -} - -func (m *MQTTConsumer) onConnect(c mqtt.Client) { - log.Printf("I! MQTT Client Connected") - if !m.PersistentSession || !m.connected { + // Only subscribe on first connection when using persistent sessions. On + // subsequent connections the subscriptions should be stored in the + // session, but the proper way to do this is to check the connection + // response to ensure a session was found. + if !m.PersistentSession || !m.subscribed { topics := make(map[string]byte) for _, topic := range m.Topics { topics[topic] = byte(m.QoS) } - subscribeToken := c.SubscribeMultiple(topics, m.recvMessage) + subscribeToken := m.client.SubscribeMultiple(topics, m.recvMessage) subscribeToken.Wait() if subscribeToken.Error() != nil { - m.acc.AddError(fmt.Errorf("E! MQTT Subscribe Error\ntopics: %s\nerror: %s", + m.acc.AddError(fmt.Errorf("subscription error: topics: %s: %v", strings.Join(m.Topics[:], ","), subscribeToken.Error())) } - m.connected = true + m.subscribed = true } - return + + return nil } func (m *MQTTConsumer) onConnectionLost(c mqtt.Client, err error) { - m.acc.AddError(fmt.Errorf("E! MQTT Connection lost\nerror: %s\nMQTT Client will try to reconnect", err.Error())) + m.acc.AddError(fmt.Errorf("connection lost: %v", err)) + log.Printf("D! [inputs.mqtt_consumer]: disconnected %v", m.Servers) + m.state = Disconnected return } -// receiver() reads all incoming messages from the consumer, and parses them into -// influxdb metric points. -func (m *MQTTConsumer) receiver() { - for { - select { - case <-m.done: - return - case msg := <-m.in: - topic := msg.Topic() - metrics, err := m.parser.Parse(msg.Payload()) - if err != nil { - m.acc.AddError(fmt.Errorf("E! MQTT Parse Error\nmessage: %s\nerror: %s", - string(msg.Payload()), err.Error())) - } +func (m *MQTTConsumer) recvMessage(c mqtt.Client, msg mqtt.Message) { + topic := msg.Topic() + metrics, err := m.parser.Parse(msg.Payload()) + if err != nil { + m.acc.AddError(err) + } - for _, metric := range metrics { - tags := metric.Tags() - tags["topic"] = topic - m.acc.AddFields(metric.Name(), metric.Fields(), tags, metric.Time()) - } - } + for _, metric := range metrics { + tags := metric.Tags() + tags["topic"] = topic + m.acc.AddFields(metric.Name(), metric.Fields(), tags, metric.Time()) } } -func (m *MQTTConsumer) recvMessage(_ mqtt.Client, msg mqtt.Message) { - m.in <- msg -} - func (m *MQTTConsumer) Stop() { - m.Lock() - defer m.Unlock() - - if m.connected { - close(m.done) + if m.state == Connected { + log.Printf("D! [inputs.mqtt_consumer]: disconnecting %v", m.Servers) m.client.Disconnect(200) - m.connected = false + log.Printf("D! [inputs.mqtt_consumer]: disconnected %v", m.Servers) + m.state = Disconnected } } func (m *MQTTConsumer) Gather(acc telegraf.Accumulator) error { - if !m.connected { + if m.state == Disconnected { + m.state = Connecting + log.Printf("D! [inputs.mqtt_consumer]: connecting %v", m.Servers) m.connect() } @@ -261,7 +246,7 @@ func (m *MQTTConsumer) createOpts() (*mqtt.ClientOptions, error) { for _, server := range m.Servers { // Preserve support for host:port style servers; deprecated in Telegraf 1.4.4 if !strings.Contains(server, "://") { - log.Printf("W! mqtt_consumer server %q should be updated to use `scheme://host:port` format", server) + log.Printf("W! [inputs.mqtt_consumer] server %q should be updated to use `scheme://host:port` format", server) if tlsCfg == nil { server = "tcp://" + server } else { @@ -271,10 +256,9 @@ func (m *MQTTConsumer) createOpts() (*mqtt.ClientOptions, error) { opts.AddBroker(server) } - opts.SetAutoReconnect(true) + opts.SetAutoReconnect(false) opts.SetKeepAlive(time.Second * 60) opts.SetCleanSession(!m.PersistentSession) - opts.SetOnConnectHandler(m.onConnect) opts.SetConnectionLostHandler(m.onConnectionLost) return opts, nil @@ -284,6 +268,7 @@ func init() { inputs.Add("mqtt_consumer", func() telegraf.Input { return &MQTTConsumer{ ConnectionTimeout: defaultConnectionTimeout, + state: Disconnected, } }) } diff --git a/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go b/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go index a2e5deaa8..c04bd18a7 100644 --- a/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go +++ b/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go @@ -12,24 +12,17 @@ import ( ) const ( - testMsg = "cpu_load_short,host=server01 value=23422.0 1422568543702900257\n" - testMsgNeg = "cpu_load_short,host=server01 value=-23422.0 1422568543702900257\n" - testMsgGraphite = "cpu.load.short.graphite 23422 1454780029" - testMsgJSON = "{\"a\": 5, \"b\": {\"c\": 6}}\n" - invalidMsg = "cpu_load_short,host=server01 1422568543702900257\n" + testMsg = "cpu_load_short,host=server01 value=23422.0 1422568543702900257\n" + invalidMsg = "cpu_load_short,host=server01 1422568543702900257\n" ) -func newTestMQTTConsumer() (*MQTTConsumer, chan mqtt.Message) { - in := make(chan mqtt.Message, 100) +func newTestMQTTConsumer() *MQTTConsumer { n := &MQTTConsumer{ - Topics: []string{"telegraf"}, - Servers: []string{"localhost:1883"}, - in: in, - done: make(chan struct{}), - connected: true, + Topics: []string{"telegraf"}, + Servers: []string{"localhost:1883"}, } - return n, in + return n } // Test that default client has random ID @@ -79,31 +72,12 @@ func TestPersistentClientIDFail(t *testing.T) { } func TestRunParser(t *testing.T) { - n, in := newTestMQTTConsumer() + n := newTestMQTTConsumer() acc := testutil.Accumulator{} n.acc = &acc - defer close(n.done) - n.parser, _ = parsers.NewInfluxParser() - go n.receiver() - in <- mqttMsg(testMsgNeg) - acc.Wait(1) - if a := acc.NFields(); a != 1 { - t.Errorf("got %v, expected %v", a, 1) - } -} - -func TestRunParserNegativeNumber(t *testing.T) { - n, in := newTestMQTTConsumer() - acc := testutil.Accumulator{} - n.acc = &acc - defer close(n.done) - - n.parser, _ = parsers.NewInfluxParser() - go n.receiver() - in <- mqttMsg(testMsg) - acc.Wait(1) + n.recvMessage(nil, mqttMsg(testMsg)) if a := acc.NFields(); a != 1 { t.Errorf("got %v, expected %v", a, 1) @@ -112,84 +86,32 @@ func TestRunParserNegativeNumber(t *testing.T) { // Test that the parser ignores invalid messages func TestRunParserInvalidMsg(t *testing.T) { - n, in := newTestMQTTConsumer() + n := newTestMQTTConsumer() acc := testutil.Accumulator{} n.acc = &acc - defer close(n.done) - n.parser, _ = parsers.NewInfluxParser() - go n.receiver() - in <- mqttMsg(invalidMsg) - acc.WaitError(1) + + n.recvMessage(nil, mqttMsg(invalidMsg)) if a := acc.NFields(); a != 0 { t.Errorf("got %v, expected %v", a, 0) } - assert.Contains(t, acc.Errors[0].Error(), "MQTT Parse Error") + assert.Len(t, acc.Errors, 1) } // Test that the parser parses line format messages into metrics func TestRunParserAndGather(t *testing.T) { - n, in := newTestMQTTConsumer() + n := newTestMQTTConsumer() acc := testutil.Accumulator{} n.acc = &acc - - defer close(n.done) - n.parser, _ = parsers.NewInfluxParser() - go n.receiver() - in <- mqttMsg(testMsg) - acc.Wait(1) - n.Gather(&acc) + n.recvMessage(nil, mqttMsg(testMsg)) acc.AssertContainsFields(t, "cpu_load_short", map[string]interface{}{"value": float64(23422)}) } -// Test that the parser parses graphite format messages into metrics -func TestRunParserAndGatherGraphite(t *testing.T) { - n, in := newTestMQTTConsumer() - acc := testutil.Accumulator{} - n.acc = &acc - defer close(n.done) - - n.parser, _ = parsers.NewGraphiteParser("_", []string{}, nil) - go n.receiver() - in <- mqttMsg(testMsgGraphite) - - n.Gather(&acc) - acc.Wait(1) - - acc.AssertContainsFields(t, "cpu_load_short_graphite", - map[string]interface{}{"value": float64(23422)}) -} - -// Test that the parser parses json format messages into metrics -func TestRunParserAndGatherJSON(t *testing.T) { - n, in := newTestMQTTConsumer() - acc := testutil.Accumulator{} - n.acc = &acc - defer close(n.done) - - n.parser, _ = parsers.NewParser(&parsers.Config{ - DataFormat: "json", - MetricName: "nats_json_test", - }) - go n.receiver() - in <- mqttMsg(testMsgJSON) - - n.Gather(&acc) - - acc.Wait(1) - - acc.AssertContainsFields(t, "nats_json_test", - map[string]interface{}{ - "a": float64(5), - "b_c": float64(6), - }) -} - func mqttMsg(val string) mqtt.Message { return &message{ topic: "telegraf/unit_test", From 36193aea1bf42cb1434d4173d3c5c964566a2345 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 15 Oct 2018 13:05:46 -0700 Subject: [PATCH 0283/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index c6b1a7e63..47ef1a4cd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -35,6 +35,7 @@ - [#4851](https://github.com/influxdata/telegraf/pull/4851): Support uint fields in aerospike input. - [#4854](https://github.com/influxdata/telegraf/pull/4854): Use container name from list if no name in container stats. - [#4850](https://github.com/influxdata/telegraf/pull/4850): Prevent panic in filecount input on error in file stat. +- [#4846](https://github.com/influxdata/telegraf/pull/4846): Fix mqtt_consumer connect and reconnect. ## v1.8.1 [2018-10-03] From ef848b49248f6ada0357d9dd70ab12b1889f9dc8 Mon Sep 17 00:00:00 2001 From: timhallinflux Date: Tue, 16 Oct 2018 11:05:58 -0700 Subject: [PATCH 0284/1815] Update License (#4865) --- LICENSE | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LICENSE b/LICENSE index 1393544bb..057cf997d 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,6 @@ The MIT License (MIT) -Copyright (c) 2015 InfluxDB +Copyright (c) 2015-2018 InfluxData Inc. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal From 0b601513a28627e8614cf3340186103a8a94f3ee Mon Sep 17 00:00:00 2001 From: James Maidment Date: Tue, 16 Oct 2018 14:47:10 -0400 Subject: [PATCH 0285/1815] Add stackdriver output plugin (#3876) --- Gopkg.lock | 101 +- Gopkg.lock.old | 1537 +++++++++++++++++ Gopkg.toml | 4 + plugins/outputs/all/all.go | 1 + plugins/outputs/stackdriver/README.md | 18 + plugins/outputs/stackdriver/stackdriver.go | 303 ++++ .../outputs/stackdriver/stackdriver_test.go | 119 ++ 7 files changed, 2082 insertions(+), 1 deletion(-) create mode 100644 Gopkg.lock.old create mode 100644 plugins/outputs/stackdriver/README.md create mode 100644 plugins/outputs/stackdriver/stackdriver.go create mode 100644 plugins/outputs/stackdriver/stackdriver_test.go diff --git a/Gopkg.lock b/Gopkg.lock index 80a6277bd..54b40a863 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -29,6 +29,14 @@ revision = "2ce144541b8903101fb8f1483cc0497a68798122" version = "v0.3.0" +[[projects]] + digest = "1:5f61d4466cef935862c262f6bc00e24beb5b39b551e906f3cfb180dfac096d57" + name = "contrib.go.opencensus.io/exporter/stackdriver" + packages = ["propagation"] + pruneopts = "" + revision = "2b93072101d466aa4120b3c23c2e1b08af01541c" + version = "v0.6.0" + [[projects]] digest = "1:5923e22a060ab818a015593422f9e8a35b9d881d4cfcfed0669a82959b11c7ee" name = "github.com/Azure/go-autorest" @@ -437,10 +445,14 @@ name = "github.com/golang/protobuf" packages = [ "proto", + "protoc-gen-go/descriptor", "ptypes", "ptypes/any", "ptypes/duration", + "ptypes/empty", + "ptypes/struct", "ptypes/timestamp", + "ptypes/wrappers", ] pruneopts = "" revision = "b4deda0973fb4c70b50d226b1af49f3da59f5265" @@ -467,6 +479,14 @@ revision = "3af367b6b30c263d47e8895973edcca9a49cf029" version = "v0.2.0" +[[projects]] + digest = "1:e097a364f4e8d8d91b9b9eeafb992d3796a41fde3eb548c1a87eb9d9f60725cf" + name = "github.com/googleapis/gax-go" + packages = ["."] + pruneopts = "" + revision = "317e0006254c44a0ac427cc52a0e083ff0b9622f" + version = "v2.0.0" + [[projects]] digest = "1:c1d7e883c50a26ea34019320d8ae40fad86c9e5d56e63a1ba2cb618cef43e986" name = "github.com/google/uuid" @@ -1035,6 +1055,37 @@ pruneopts = "" revision = "46796da1b0b4794e1e341883a399f12cc7574b55" +[[projects]] + branch = "master" + digest = "1:2fcfc6c3fb8dfe0d80d7789272230d3ac7db15022b66817113f98d9fff880225" + name = "github.com/zensqlmonitor/go-mssqldb" + packages = ["."] + pruneopts = "" + revision = "e8fbf836e44e86764eba398361d1825651709547" + +[[projects]] + digest = "1:8c8ec859c77fccd10a347b7219b597c4c21c448949e8bdf3fc3e6f4c78f952b4" + name = "go.opencensus.io" + packages = [ + ".", + "internal", + "internal/tagencoding", + "plugin/ocgrpc", + "plugin/ochttp", + "plugin/ochttp/propagation/b3", + "stats", + "stats/internal", + "stats/view", + "tag", + "trace", + "trace/internal", + "trace/propagation", + "trace/tracestate", + ] + pruneopts = "" + revision = "79993219becaa7e29e3b60cb67f5b8e82dee11d6" + version = "v0.17.0" + [[projects]] branch = "master" digest = "1:0773b5c3be42874166670a20aa177872edb450cd9fc70b1df97303d977702a50" @@ -1087,6 +1138,10 @@ name = "golang.org/x/oauth2" packages = [ ".", + "google", + "internal", + "jws", + "jwt", "clientcredentials", "internal", ] @@ -1144,10 +1199,38 @@ revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0" version = "v0.3.0" +[[projects]] + branch = "master" + digest = "1:2d878ecef4b17dbdd067b8fb98eb64f768f0802b1176b91b9e3c01b457efd01f" + name = "google.golang.org/api" + packages = [ + "googleapi/transport", + "internal", + "iterator", + "option", + "transport", + "transport/grpc", + "transport/http", + ] + pruneopts = "" + revision = "19ff8768a5c0b8e46ea281065664787eefc24121" + [[projects]] digest = "1:c1771ca6060335f9768dff6558108bc5ef6c58506821ad43377ee23ff059e472" name = "google.golang.org/appengine" packages = [ + ".", + "cloudsql", + "internal", + "internal/app_identity", + "internal/base", + "internal/datastore", + "internal/log", + "internal/modules", + "internal/remote_api", + "internal/socket", + "internal/urlfetch", + "socket", "cloudsql", "internal", "internal/base", @@ -1165,7 +1248,16 @@ branch = "master" digest = "1:b1443b4e3cc990c84d27fcdece9d3302158c67dba870e33a6937a2c0076388c2" name = "google.golang.org/genproto" - packages = ["googleapis/rpc/status"] + packages = [ + "googleapis/api/annotations", + "googleapis/api/distribution", + "googleapis/api/label", + "googleapis/api/metric", + "googleapis/api/monitoredres", + "googleapis/monitoring/v3", + "googleapis/rpc/status", + "protobuf/field_mask", + ] pruneopts = "" revision = "fedd2861243fd1a8152376292b921b394c7bef7e" @@ -1180,6 +1272,7 @@ "codes", "connectivity", "credentials", + "credentials/oauth", "encoding", "encoding/proto", "grpclog", @@ -1303,6 +1396,7 @@ analyzer-name = "dep" analyzer-version = 1 input-imports = [ + "cloud.google.com/go/monitoring/apiv3", "collectd.org/api", "collectd.org/network", "github.com/Azure/go-autorest/autorest", @@ -1336,6 +1430,7 @@ "github.com/go-sql-driver/mysql", "github.com/gobwas/glob", "github.com/golang/protobuf/proto", + "github.com/golang/protobuf/ptypes/timestamp", "github.com/google/go-cmp/cmp", "github.com/gorilla/mux", "github.com/hashicorp/consul/api", @@ -1399,6 +1494,10 @@ "golang.org/x/sys/windows", "golang.org/x/sys/windows/svc", "golang.org/x/sys/windows/svc/mgr", + "google.golang.org/api/option", + "google.golang.org/genproto/googleapis/api/metric", + "google.golang.org/genproto/googleapis/api/monitoredres", + "google.golang.org/genproto/googleapis/monitoring/v3", "google.golang.org/grpc", "google.golang.org/grpc/codes", "google.golang.org/grpc/credentials", diff --git a/Gopkg.lock.old b/Gopkg.lock.old new file mode 100644 index 000000000..f4993ed95 --- /dev/null +++ b/Gopkg.lock.old @@ -0,0 +1,1537 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] +<<<<<<< HEAD + digest = "1:1acadbbc24182315b628f727b2e9ac653266d1644ca4007e0766c28110afc072" + name = "cloud.google.com/go" + packages = [ + "compute/metadata", + "internal/version", + "monitoring/apiv3", + ] + pruneopts = "" + revision = "97efc2c9ffd9fe8ef47f7f3203dc60bbca547374" + version = "v0.28.0" +======= + digest = "1:972f38a9c879a4920d1e3a3d3438104b6c06163bfa3e6f4064adb00468d40587" + name = "cloud.google.com/go" + packages = ["civil"] + pruneopts = "" + revision = "c728a003b238b26cef9ab6753a5dc424b331c3ad" + version = "v0.27.0" +>>>>>>> master + +[[projects]] + branch = "master" + digest = "1:fc0802104acded1f48e4860a9f2db85b82b4a754fca9eae750ff4e8b8cdf2116" + name = "code.cloudfoundry.org/clock" + packages = ["."] + pruneopts = "" + revision = "02e53af36e6c978af692887ed449b74026d76fec" + +[[projects]] + digest = "1:ca3acef20fd660d4df327accbf3ca2df9a12213d914f3113305dcd56579324b9" + name = "collectd.org" + packages = [ + "api", + "cdtime", + "network", + ] + pruneopts = "" + revision = "2ce144541b8903101fb8f1483cc0497a68798122" + version = "v0.3.0" + +[[projects]] +<<<<<<< HEAD + digest = "1:5f61d4466cef935862c262f6bc00e24beb5b39b551e906f3cfb180dfac096d57" + name = "contrib.go.opencensus.io/exporter/stackdriver" + packages = ["propagation"] + pruneopts = "" + revision = "2b93072101d466aa4120b3c23c2e1b08af01541c" + version = "v0.6.0" +======= + digest = "1:5923e22a060ab818a015593422f9e8a35b9d881d4cfcfed0669a82959b11c7ee" + name = "github.com/Azure/go-autorest" + packages = [ + "autorest", + "autorest/adal", + "autorest/azure", + "autorest/azure/auth", + "autorest/date", + ] + pruneopts = "" + revision = "1f7cd6cfe0adea687ad44a512dfe76140f804318" + version = "v10.12.0" +>>>>>>> master + +[[projects]] + branch = "master" + digest = "1:298712a3ee36b59c3ca91f4183bd75d174d5eaa8b4aed5072831f126e2e752f6" + name = "github.com/Microsoft/ApplicationInsights-Go" + packages = [ + "appinsights", + "appinsights/contracts", + ] + pruneopts = "" + revision = "d2df5d440eda5372f24fcac03839a64d6cb5f7e5" + +[[projects]] + digest = "1:45ec6eb579713a01991ad07f538fed3b576ee55f5ce9f248320152a9270d9258" + name = "github.com/Microsoft/go-winio" + packages = ["."] + pruneopts = "" + revision = "a6d595ae73cf27a1b8fc32930668708f45ce1c85" + version = "v0.4.9" + +[[projects]] + digest = "1:213b41361ad1cb4768add9d26c2e27794c65264eefdb24ed6ea34cdfeeff3f3c" + name = "github.com/Shopify/sarama" + packages = ["."] + pruneopts = "" + revision = "a6144ae922fd99dd0ea5046c8137acfb7fab0914" + version = "v1.18.0" + +[[projects]] + digest = "1:f82b8ac36058904227087141017bb82f4b0fc58272990a4cdae3e2d6d222644e" + name = "github.com/StackExchange/wmi" + packages = ["."] + pruneopts = "" + revision = "5d049714c4a64225c3c79a7cf7d02f7fb5b96338" + version = "1.0.0" + +[[projects]] + digest = "1:f296e8b29c60c94efed3b8cfae08d793cb95149cdd7343e6a9834b4ac7136475" + name = "github.com/aerospike/aerospike-client-go" + packages = [ + ".", + "internal/lua", + "internal/lua/resources", + "logger", + "pkg/bcrypt", + "pkg/ripemd160", + "types", + "types/atomic", + "types/particle_type", + "types/rand", + "utils/buffer", + ] + pruneopts = "" + revision = "1dc8cf203d24cd454e71ce40ab4cd0bf3112df90" + version = "v1.27.0" + +[[projects]] + branch = "master" + digest = "1:a74730e052a45a3fab1d310fdef2ec17ae3d6af16228421e238320846f2aaec8" + name = "github.com/alecthomas/template" + packages = [ + ".", + "parse", + ] + pruneopts = "" + revision = "a0175ee3bccc567396460bf5acd36800cb10c49c" + +[[projects]] + branch = "master" + digest = "1:8483994d21404c8a1d489f6be756e25bfccd3b45d65821f25695577791a08e68" + name = "github.com/alecthomas/units" + packages = ["."] + pruneopts = "" + revision = "2efee857e7cfd4f3d0138cc3cbb1b4966962b93a" + +[[projects]] + branch = "master" + digest = "1:7f21a8f175ee7f91c659f919c61032e11889fba5dc25c0cec555087cbb87435a" + name = "github.com/amir/raidman" + packages = [ + ".", + "proto", + ] + pruneopts = "" + revision = "1ccc43bfb9c93cb401a4025e49c64ba71e5e668b" + +[[projects]] + branch = "master" + digest = "1:0828d8c0f95689f832cf348fe23827feb7640cd698d612ef59e2f9d041f54c68" + name = "github.com/apache/thrift" + packages = ["lib/go/thrift"] + pruneopts = "" + revision = "f2867c24984aa53edec54a138c03db934221bdea" + +[[projects]] + digest = "1:65a05bde9b02f645c73afa61c9f6af92d94d726c81a268f45cc70218bd58de65" + name = "github.com/aws/aws-sdk-go" + packages = [ + "aws", + "aws/awserr", + "aws/awsutil", + "aws/client", + "aws/client/metadata", + "aws/corehandlers", + "aws/credentials", + "aws/credentials/ec2rolecreds", + "aws/credentials/endpointcreds", + "aws/credentials/stscreds", + "aws/csm", + "aws/defaults", + "aws/ec2metadata", + "aws/endpoints", + "aws/request", + "aws/session", + "aws/signer/v4", + "internal/sdkio", + "internal/sdkrand", + "internal/sdkuri", + "internal/shareddefaults", + "private/protocol", + "private/protocol/json/jsonutil", + "private/protocol/jsonrpc", + "private/protocol/query", + "private/protocol/query/queryutil", + "private/protocol/rest", + "private/protocol/xml/xmlutil", + "service/cloudwatch", + "service/kinesis", + "service/sts", + ] + pruneopts = "" + revision = "8cf662a972fa7fba8f2c1ec57648cf840e2bb401" + version = "v1.14.30" + +[[projects]] + branch = "master" + digest = "1:c0bec5f9b98d0bc872ff5e834fac186b807b656683bd29cb82fb207a1513fabb" + name = "github.com/beorn7/perks" + packages = ["quantile"] + pruneopts = "" + revision = "3a771d992973f24aa725d07868b467d1ddfceafb" + +[[projects]] + digest = "1:c5978131c797af795972c27c25396c81d1bf53b7b6e8e3e0259e58375765c071" + name = "github.com/bsm/sarama-cluster" + packages = ["."] + pruneopts = "" + revision = "cf455bc755fe41ac9bb2861e7a961833d9c2ecc3" + version = "v2.1.13" + +[[projects]] + digest = "1:f619cb9b07aebe5416262cdd8b86082e8d5bdc5264cb3b615ff858df0b645f97" + name = "github.com/cenkalti/backoff" + packages = ["."] + pruneopts = "" + revision = "2ea60e5f094469f9e65adb9cd103795b73ae743e" + version = "v2.0.0" + +[[projects]] + branch = "master" + digest = "1:298e42868718da06fc0899ae8fdb99c48a14477045234c9274d81caa79af6a8f" + name = "github.com/couchbase/go-couchbase" + packages = ["."] + pruneopts = "" + revision = "16db1f1fe037412f12738fa4d8448c549c4edd77" + +[[projects]] + branch = "master" + digest = "1:c734658274a6be88870a36742fdea96a3fce4fc99a7b90946c9e84335ceae71a" + name = "github.com/couchbase/gomemcached" + packages = [ + ".", + "client", + ] + pruneopts = "" + revision = "0da75df145308b9a4e6704d762ca9d9b77752efc" + +[[projects]] + branch = "master" + digest = "1:c1195c02bc8fbf5307cfb95bc79eddaa1351ee3587cc4a7bbe6932e2fb966ff2" + name = "github.com/couchbase/goutils" + packages = [ + "logging", + "scramsha", + ] + pruneopts = "" + revision = "e865a1461c8ac0032bd37e2d4dab3289faea3873" + +[[projects]] + digest = "1:56c130d885a4aacae1dd9c7b71cfe39912c7ebc1ff7d2b46083c8812996dc43b" + name = "github.com/davecgh/go-spew" + packages = ["spew"] + pruneopts = "" + revision = "346938d642f2ec3594ed81d874461961cd0faa76" + version = "v1.1.0" + +[[projects]] + branch = "master" + digest = "1:7fdc54859cd901c25b9d8db964410a4e0d98fa0dca267fe4cf49c0eede5e06c2" + name = "github.com/denisenkom/go-mssqldb" + packages = [ + ".", + "internal/cp", + ] + pruneopts = "" + revision = "1eb28afdf9b6e56cf673badd47545f844fe81103" + +[[projects]] + digest = "1:6098222470fe0172157ce9bbef5d2200df4edde17ee649c5d6e48330e4afa4c6" + name = "github.com/dgrijalva/jwt-go" + packages = ["."] + pruneopts = "" + revision = "06ea1031745cb8b3dab3f6a236daf2b0aa468b7e" + version = "v3.2.0" + +[[projects]] + branch = "master" + digest = "1:654ac9799e7a8a586d8690bb2229a4f3408bbfe2c5494bf4dfe043053eeb5496" + name = "github.com/dimchansky/utfbom" + packages = ["."] + pruneopts = "" + revision = "6c6132ff69f0f6c088739067407b5d32c52e1d0f" + +[[projects]] + digest = "1:522eff2a1f014a64fb403db60fc0110653e4dc5b59779894d208e697b0708ddc" + name = "github.com/docker/distribution" + packages = [ + "digestset", + "reference", + ] + pruneopts = "" + revision = "edc3ab29cdff8694dd6feb85cfeb4b5f1b38ed9c" + +[[projects]] + digest = "1:d149605f1b00713fdc48150122892d77d49d30c825f690dd92f497aeb6cf18f5" + name = "github.com/docker/docker" + packages = [ + "api", + "api/types", + "api/types/blkiodev", + "api/types/container", + "api/types/events", + "api/types/filters", + "api/types/image", + "api/types/mount", + "api/types/network", + "api/types/registry", + "api/types/strslice", + "api/types/swarm", + "api/types/swarm/runtime", + "api/types/time", + "api/types/versions", + "api/types/volume", + "client", + ] + pruneopts = "" + revision = "ed7b6428c133e7c59404251a09b7d6b02fa83cc2" + +[[projects]] + digest = "1:a5ecc2e70260a87aa263811281465a5effcfae8a54bac319cee87c4625f04d63" + name = "github.com/docker/go-connections" + packages = [ + "nat", + "sockets", + "tlsconfig", + ] + pruneopts = "" + revision = "3ede32e2033de7505e6500d6c868c2b9ed9f169d" + version = "v0.3.0" + +[[projects]] + digest = "1:582d54fcb7233da8dde1dfd2210a5b9675d0685f84246a8d317b07d680c18b1b" + name = "github.com/docker/go-units" + packages = ["."] + pruneopts = "" + revision = "47565b4f722fb6ceae66b95f853feed578a4a51c" + version = "v0.3.3" + +[[projects]] + digest = "1:6d6672f85a84411509885eaa32f597577873de00e30729b9bb0eb1e1faa49c12" + name = "github.com/eapache/go-resiliency" + packages = ["breaker"] + pruneopts = "" + revision = "ea41b0fad31007accc7f806884dcdf3da98b79ce" + version = "v1.1.0" + +[[projects]] + branch = "master" + digest = "1:7b12ea8b50040c6c2378ec5b5a1ab722730b2bfb46e8724ded57f2c3905431fa" + name = "github.com/eapache/go-xerial-snappy" + packages = ["."] + pruneopts = "" + revision = "040cc1a32f578808623071247fdbd5cc43f37f5f" + +[[projects]] + digest = "1:d8d46d21073d0f65daf1740ebf4629c65e04bf92e14ce93c2201e8624843c3d3" + name = "github.com/eapache/queue" + packages = ["."] + pruneopts = "" + revision = "44cc805cf13205b55f69e14bcb69867d1ae92f98" + version = "v1.1.0" + +[[projects]] + digest = "1:3fa846cb3feb4e65371fe3c347c299de9b5bc3e71e256c0d940cd19b767a6ba0" + name = "github.com/eclipse/paho.mqtt.golang" + packages = [ + ".", + "packets", + ] + pruneopts = "" + revision = "36d01c2b4cbeb3d2a12063e4880ce30800af9560" + version = "v1.1.1" + +[[projects]] + digest = "1:858b7fe7b0f4bc7ef9953926828f2816ea52d01a88d72d1c45bc8c108f23c356" + name = "github.com/go-ini/ini" + packages = ["."] + pruneopts = "" + revision = "358ee7663966325963d4e8b2e1fbd570c5195153" + version = "v1.38.1" + +[[projects]] + digest = "1:6a4a01d58b227c4b6b11111b9f172ec5c17682b82724e58e6daf3f19f4faccd8" + name = "github.com/go-logfmt/logfmt" + packages = ["."] + pruneopts = "" + revision = "390ab7935ee28ec6b286364bba9b4dd6410cb3d5" + version = "v0.3.0" + +[[projects]] + digest = "1:96c4a6ff4206086347bfe28e96e092642882128f45ecb8dc8f15f3e6f6703af0" + name = "github.com/go-ole/go-ole" + packages = [ + ".", + "oleutil", + ] + pruneopts = "" + revision = "a41e3c4b706f6ae8dfbff342b06e40fa4d2d0506" + version = "v1.2.1" + +[[projects]] + digest = "1:3dfd659219b6f63dc0677a62b8d4e8f10b5cf53900aef40858db10a19407e41d" + name = "github.com/go-redis/redis" + packages = [ + ".", + "internal", + "internal/consistenthash", + "internal/hashtag", + "internal/pool", + "internal/proto", + "internal/singleflight", + "internal/util", + ] + pruneopts = "" + revision = "83fb42932f6145ce52df09860384a4653d2d332a" + version = "v6.12.0" + +[[projects]] + digest = "1:c07de423ca37dc2765396d6971599ab652a339538084b9b58c9f7fc533b28525" + name = "github.com/go-sql-driver/mysql" + packages = ["."] + pruneopts = "" + revision = "d523deb1b23d913de5bdada721a6071e71283618" + version = "v1.4.0" + +[[projects]] + digest = "1:9ab1b1c637d7c8f49e39d8538a650d7eb2137b076790cff69d160823b505964c" + name = "github.com/gobwas/glob" + packages = [ + ".", + "compiler", + "match", + "syntax", + "syntax/ast", + "syntax/lexer", + "util/runes", + "util/strings", + ] + pruneopts = "" + revision = "5ccd90ef52e1e632236f7326478d4faa74f99438" + version = "v0.2.3" + +[[projects]] + digest = "1:6e73003ecd35f4487a5e88270d3ca0a81bc80dc88053ac7e4dcfec5fba30d918" + name = "github.com/gogo/protobuf" + packages = ["proto"] + pruneopts = "" + revision = "636bf0302bc95575d69441b25a2603156ffdddf1" + version = "v1.1.1" + +[[projects]] + digest = "1:f958a1c137db276e52f0b50efee41a1a389dcdded59a69711f3e872757dab34b" + name = "github.com/golang/protobuf" + packages = [ + "proto", + "protoc-gen-go/descriptor", + "ptypes", + "ptypes/any", + "ptypes/duration", + "ptypes/empty", + "ptypes/struct", + "ptypes/timestamp", + "ptypes/wrappers", + ] + pruneopts = "" + revision = "b4deda0973fb4c70b50d226b1af49f3da59f5265" + version = "v1.1.0" + +[[projects]] + branch = "master" + digest = "1:2a5888946cdbc8aa360fd43301f9fc7869d663f60d5eedae7d4e6e5e4f06f2bf" + name = "github.com/golang/snappy" + packages = ["."] + pruneopts = "" + revision = "2e65f85255dbc3072edf28d6b5b8efc472979f5a" + +[[projects]] + digest = "1:f9f45f75f332e03fc7e9fe9188ea4e1ce4d14779ef34fa1b023da67518e36327" + name = "github.com/google/go-cmp" + packages = [ + "cmp", + "cmp/internal/diff", + "cmp/internal/function", + "cmp/internal/value", + ] + pruneopts = "" + revision = "3af367b6b30c263d47e8895973edcca9a49cf029" + version = "v0.2.0" + +[[projects]] +<<<<<<< HEAD + digest = "1:e097a364f4e8d8d91b9b9eeafb992d3796a41fde3eb548c1a87eb9d9f60725cf" + name = "github.com/googleapis/gax-go" + packages = ["."] + pruneopts = "" + revision = "317e0006254c44a0ac427cc52a0e083ff0b9622f" + version = "v2.0.0" +======= + digest = "1:c1d7e883c50a26ea34019320d8ae40fad86c9e5d56e63a1ba2cb618cef43e986" + name = "github.com/google/uuid" + packages = ["."] + pruneopts = "" + revision = "064e2069ce9c359c118179501254f67d7d37ba24" + version = "0.2" +>>>>>>> master + +[[projects]] + digest = "1:dbbeb8ddb0be949954c8157ee8439c2adfd8dc1c9510eb44a6e58cb68c3dce28" + name = "github.com/gorilla/context" + packages = ["."] + pruneopts = "" + revision = "08b5f424b9271eedf6f9f0ce86cb9396ed337a42" + version = "v1.1.1" + +[[projects]] + digest = "1:c2c8666b4836c81a1d247bdf21c6a6fc1ab586538ab56f74437c2e0df5c375e1" + name = "github.com/gorilla/mux" + packages = ["."] + pruneopts = "" + revision = "e3702bed27f0d39777b0b37b664b6280e8ef8fbf" + version = "v1.6.2" + +[[projects]] + branch = "master" + digest = "1:60b7bc5e043a11213472ae05252527287d20e0a6ccc18f6ae67fad88e41004de" + name = "github.com/hailocab/go-hostpool" + packages = ["."] + pruneopts = "" + revision = "e80d13ce29ede4452c43dea11e79b9bc8a15b478" + +[[projects]] + digest = "1:e7224669901bab4094e6d6697c136557b7177db6ceb01b7fc8b20d08f4b5aacd" + name = "github.com/hashicorp/consul" + packages = ["api"] + pruneopts = "" + revision = "39f93f011e591c842acc8053a7f5972aa6e592fd" + version = "v1.2.1" + +[[projects]] + branch = "master" + digest = "1:f5d25fd7bdda08e39e01193ef94a1ebf7547b1b931bcdec785d08050598f306c" + name = "github.com/hashicorp/go-cleanhttp" + packages = ["."] + pruneopts = "" + revision = "d5fe4b57a186c716b0e00b8c301cbd9b4182694d" + +[[projects]] + branch = "master" + digest = "1:ff65bf6fc4d1116f94ac305342725c21b55c16819c2606adc8f527755716937f" + name = "github.com/hashicorp/go-rootcerts" + packages = ["."] + pruneopts = "" + revision = "6bb64b370b90e7ef1fa532be9e591a81c3493e00" + +[[projects]] + digest = "1:f72168ea995f398bab88e84bd1ff58a983466ba162fb8d50d47420666cd57fad" + name = "github.com/hashicorp/serf" + packages = ["coordinate"] + pruneopts = "" + revision = "d6574a5bb1226678d7010325fb6c985db20ee458" + version = "v0.8.1" + +[[projects]] + digest = "1:a39ef049cdeee03a57b132e7d60e32711b9d949c78458da78e702d9864c54369" + name = "github.com/influxdata/go-syslog" + packages = [ + "rfc5424", + "rfc5425", + ] + pruneopts = "" + revision = "eecd51df3ad85464a2bab9b7d3a45bc1e299059e" + version = "v1.0.1" + +[[projects]] + branch = "master" + digest = "1:bc3eb5ddfd59781ea1183f2b3d1eb105a1495d421f09b2ccd360c7fced0b612d" + name = "github.com/influxdata/tail" + packages = [ + ".", + "ratelimiter", + "util", + "watch", + "winfile", + ] + pruneopts = "" + revision = "c43482518d410361b6c383d7aebce33d0471d7bc" + +[[projects]] + branch = "master" + digest = "1:7fb6cc9607eaa6ef309edebc42b57f704244bd4b9ab23bff128829c4ad09b95d" + name = "github.com/influxdata/toml" + packages = [ + ".", + "ast", + ] + pruneopts = "" + revision = "2a2e3012f7cfbef64091cc79776311e65dfa211b" + +[[projects]] + branch = "master" + digest = "1:a0c157916be0b4de1d4565b1f094b8d746109f94968140dff40a42780fa6ccef" + name = "github.com/influxdata/wlog" + packages = ["."] + pruneopts = "" + revision = "7c63b0a71ef8300adc255344d275e10e5c3a71ec" + +[[projects]] + digest = "1:2de1791b9e43f26c696e36950e42676565e7da7499a870bc02213da4b59b1d14" + name = "github.com/jackc/pgx" + packages = [ + ".", + "chunkreader", + "internal/sanitize", + "pgio", + "pgproto3", + "pgtype", + "stdlib", + ] + pruneopts = "" + revision = "da3231b0b66e2e74cdb779f1d46c5e958ba8be27" + version = "v3.1.0" + +[[projects]] + digest = "1:6f49eae0c1e5dab1dafafee34b207aeb7a42303105960944828c2079b92fc88e" + name = "github.com/jmespath/go-jmespath" + packages = ["."] + pruneopts = "" + revision = "0b12d6b5" + +[[projects]] + branch = "master" + digest = "1:2c5ad58492804c40bdaf5d92039b0cde8b5becd2b7feeb37d7d1cc36a8aa8dbe" + name = "github.com/kardianos/osext" + packages = ["."] + pruneopts = "" + revision = "ae77be60afb1dcacde03767a8c37337fad28ac14" + +[[projects]] + branch = "master" + digest = "1:fed90fa725d3b1bac0a760de64426834dfef4546474cf182f2ec94285afa74a8" + name = "github.com/kardianos/service" + packages = ["."] + pruneopts = "" + revision = "615a14ed75099c9eaac6949e22ac2341bf9d3197" + +[[projects]] + branch = "master" + digest = "1:63e7368fcf6b54804076eaec26fd9cf0c4466166b272393db4b93102e1e962df" + name = "github.com/kballard/go-shellquote" + packages = ["."] + pruneopts = "" + revision = "95032a82bc518f77982ea72343cc1ade730072f0" + +[[projects]] + branch = "master" + digest = "1:1ed9eeebdf24aadfbca57eb50e6455bd1d2474525e0f0d4454de8c8e9bc7ee9a" + name = "github.com/kr/logfmt" + packages = ["."] + pruneopts = "" + revision = "b84e30acd515aadc4b783ad4ff83aff3299bdfe0" + +[[projects]] + branch = "master" + digest = "1:7e9956922e349af0190afa0b6621befcd201072679d8e51a9047ff149f2afe93" + name = "github.com/mailru/easyjson" + packages = [ + ".", + "buffer", + "jlexer", + "jwriter", + ] + pruneopts = "" + revision = "efc7eb8984d6655c26b5c9d2e65c024e5767c37c" + +[[projects]] + digest = "1:63722a4b1e1717be7b98fc686e0b30d5e7f734b9e93d7dee86293b6deab7ea28" + name = "github.com/matttproud/golang_protobuf_extensions" + packages = ["pbutil"] + pruneopts = "" + revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c" + version = "v1.0.1" + +[[projects]] + digest = "1:4c8d8358c45ba11ab7bb15df749d4df8664ff1582daead28bae58cf8cbe49890" + name = "github.com/miekg/dns" + packages = ["."] + pruneopts = "" + revision = "5a2b9fab83ff0f8bfc99684bd5f43a37abe560f1" + version = "v1.0.8" + +[[projects]] + branch = "master" + digest = "1:99651e95333755cbe5c9768c1b80031300acca64a80870b40309202b32585a5a" + name = "github.com/mitchellh/go-homedir" + packages = ["."] + pruneopts = "" + revision = "3864e76763d94a6df2f9960b16a20a33da9f9a66" + +[[projects]] + branch = "master" + digest = "1:f43ed2c836208c14f45158fd01577c985688a4d11cf9fd475a939819fef3b321" + name = "github.com/mitchellh/mapstructure" + packages = ["."] + pruneopts = "" + revision = "f15292f7a699fcc1a38a80977f80a046874ba8ac" + +[[projects]] + digest = "1:ee2e62b00a9ccc2dba1525f93396e35c847f90f87939df6f361b86315ea5f69a" + name = "github.com/multiplay/go-ts3" + packages = ["."] + pruneopts = "" + revision = "d0d44555495c8776880a17e439399e715a4ef319" + version = "v1.0.0" + +[[projects]] + digest = "1:ccd0def9f0b82b61c5e54fcbfccf528eabb13b489d008e46dc16b808c2e1f765" + name = "github.com/naoina/go-stringutil" + packages = ["."] + pruneopts = "" + revision = "6b638e95a32d0c1131db0e7fe83775cbea4a0d0b" + version = "v0.1.0" + +[[projects]] + digest = "1:e5ec850ce66beb0014fc40d8e64b7482172eee71d86d734d66def5e9eac16797" + name = "github.com/nats-io/gnatsd" + packages = [ + "conf", + "logger", + "server", + "server/pse", + "util", + ] + pruneopts = "" + revision = "6608e9ac3be979dcb0614b772cc86a87b71acaa3" + version = "v1.2.0" + +[[projects]] + digest = "1:665af347df4c5d1ae4c3eacd0754f5337a301f6a3f2444c9993b996605c8c02b" + name = "github.com/nats-io/go-nats" + packages = [ + ".", + "encoders/builtin", + "util", + ] + pruneopts = "" + revision = "062418ea1c2181f52dc0f954f6204370519a868b" + version = "v1.5.0" + +[[projects]] + digest = "1:be61e8224b84064109eaba8157cbb4bbe6ca12443e182b6624fdfa1c0dcf53d9" + name = "github.com/nats-io/nuid" + packages = ["."] + pruneopts = "" + revision = "289cccf02c178dc782430d534e3c1f5b72af807f" + version = "v1.0.0" + +[[projects]] + digest = "1:7a69f6a3a33929f8b66aa39c93868ad1698f06417fe627ae067559beb94504bd" + name = "github.com/nsqio/go-nsq" + packages = ["."] + pruneopts = "" + revision = "eee57a3ac4174c55924125bb15eeeda8cffb6e6f" + version = "v1.0.7" + +[[projects]] + digest = "1:5d9b668b0b4581a978f07e7d2e3314af18eb27b3fb5d19b70185b7c575723d11" + name = "github.com/opencontainers/go-digest" + packages = ["."] + pruneopts = "" + revision = "279bed98673dd5bef374d3b6e4b09e2af76183bf" + version = "v1.0.0-rc1" + +[[projects]] + digest = "1:f26c8670b11e29a49c8e45f7ec7f2d5bac62e8fd4e3c0ae1662baa4a697f984a" + name = "github.com/opencontainers/image-spec" + packages = [ + "specs-go", + "specs-go/v1", + ] + pruneopts = "" + revision = "d60099175f88c47cd379c4738d158884749ed235" + version = "v1.0.1" + +[[projects]] + branch = "master" + digest = "1:2da0e5077ed40453dc281b9a2428d84cf6ad14063aed189f6296ca5dd25cf13d" + name = "github.com/opentracing-contrib/go-observer" + packages = ["."] + pruneopts = "" + revision = "a52f2342449246d5bcc273e65cbdcfa5f7d6c63c" + +[[projects]] + digest = "1:78fb99d6011c2ae6c72f3293a83951311147b12b06a5ffa43abf750c4fab6ac5" + name = "github.com/opentracing/opentracing-go" + packages = [ + ".", + "ext", + "log", + ] + pruneopts = "" + revision = "1949ddbfd147afd4d964a9f00b24eb291e0e7c38" + version = "v1.0.2" + +[[projects]] + digest = "1:fea0e67285d900e5a0a7ec19ff4b4c82865a28dddbee8454c5360ad908f7069c" + name = "github.com/openzipkin/zipkin-go-opentracing" + packages = [ + ".", + "flag", + "thrift/gen-go/scribe", + "thrift/gen-go/zipkincore", + "types", + "wire", + ] + pruneopts = "" + revision = "26cf9707480e6b90e5eff22cf0bbf05319154232" + version = "v0.3.4" + +[[projects]] + digest = "1:29e34e58f26655c4d73135cdfc0517ea2ff1483eff34e5d5ef4b6fddbb81e31b" + name = "github.com/pierrec/lz4" + packages = [ + ".", + "internal/xxh32", + ] + pruneopts = "" + revision = "1958fd8fff7f115e79725b1288e0b878b3e06b00" + version = "v2.0.3" + +[[projects]] + digest = "1:7365acd48986e205ccb8652cc746f09c8b7876030d53710ea6ef7d0bd0dcd7ca" + name = "github.com/pkg/errors" + packages = ["."] + pruneopts = "" + revision = "645ef00459ed84a119197bfb8d8205042c6df63d" + version = "v0.8.0" + +[[projects]] + digest = "1:256484dbbcd271f9ecebc6795b2df8cad4c458dd0f5fd82a8c2fa0c29f233411" + name = "github.com/pmezard/go-difflib" + packages = ["difflib"] + pruneopts = "" + revision = "792786c7400a136282c1664665ae0a8db921c6c2" + version = "v1.0.0" + +[[projects]] + digest = "1:4142d94383572e74b42352273652c62afec5b23f325222ed09198f46009022d1" + name = "github.com/prometheus/client_golang" + packages = [ + "prometheus", + "prometheus/promhttp", + ] + pruneopts = "" + revision = "c5b7fccd204277076155f10851dad72b76a49317" + version = "v0.8.0" + +[[projects]] + branch = "master" + digest = "1:185cf55b1f44a1bf243558901c3f06efa5c64ba62cfdcbb1bf7bbe8c3fb68561" + name = "github.com/prometheus/client_model" + packages = ["go"] + pruneopts = "" + revision = "5c3871d89910bfb32f5fcab2aa4b9ec68e65a99f" + +[[projects]] + branch = "master" + digest = "1:bfbc121ef802d245ef67421cff206615357d9202337a3d492b8f668906b485a8" + name = "github.com/prometheus/common" + packages = [ + "expfmt", + "internal/bitbucket.org/ww/goautoneg", + "log", + "model", + ] + pruneopts = "" + revision = "7600349dcfe1abd18d72d3a1770870d9800a7801" + +[[projects]] + branch = "master" + digest = "1:b694a6bdecdace488f507cff872b30f6f490fdaf988abd74d87ea56406b23b6e" + name = "github.com/prometheus/procfs" + packages = [ + ".", + "internal/util", + "nfs", + "xfs", + ] + pruneopts = "" + revision = "ae68e2d4c00fed4943b5f6698d504a5fe083da8a" + +[[projects]] + branch = "master" + digest = "1:15bcdc717654ef21128e8af3a63eec39a6d08a830e297f93d65163f87c8eb523" + name = "github.com/rcrowley/go-metrics" + packages = ["."] + pruneopts = "" + revision = "e2704e165165ec55d062f5919b4b29494e9fa790" + +[[projects]] + branch = "master" + digest = "1:7fc2f428767a2521abc63f1a663d981f61610524275d6c0ea645defadd4e916f" + name = "github.com/samuel/go-zookeeper" + packages = ["zk"] + pruneopts = "" + revision = "c4fab1ac1bec58281ad0667dc3f0907a9476ac47" + +[[projects]] + digest = "1:7f569d906bdd20d906b606415b7d794f798f91a62fcfb6a4daa6d50690fb7a3f" + name = "github.com/satori/go.uuid" + packages = ["."] + pruneopts = "" + revision = "f58768cc1a7a7e77a3bd49e98cdd21419399b6a3" + version = "v1.2.0" + +[[projects]] + digest = "1:02715a2fb4b9279af36651a59a51dd4164eb689bd6785874811899f43eeb2a54" + name = "github.com/shirou/gopsutil" + packages = [ + "cpu", + "disk", + "host", + "internal/common", + "load", + "mem", + "net", + "process", + ] + pruneopts = "" + revision = "8048a2e9c5773235122027dd585cf821b2af1249" + version = "v2.18.07" + +[[projects]] + branch = "master" + digest = "1:99c6a6dab47067c9b898e8c8b13d130c6ab4ffbcc4b7cc6236c2cd0b1e344f5b" + name = "github.com/shirou/w32" + packages = ["."] + pruneopts = "" + revision = "bb4de0191aa41b5507caa14b0650cdbddcd9280b" + +[[projects]] + digest = "1:8cf46b6c18a91068d446e26b67512cf16f1540b45d90b28b9533706a127f0ca6" + name = "github.com/sirupsen/logrus" + packages = ["."] + pruneopts = "" + revision = "c155da19408a8799da419ed3eeb0cb5db0ad5dbc" + version = "v1.0.5" + +[[projects]] + branch = "master" + digest = "1:4b0cabe65ca903a7b2a3e6272c5304eb788ce196d35ecb901c6563e5e7582443" + name = "github.com/soniah/gosnmp" + packages = ["."] + pruneopts = "" + revision = "96b86229e9b3ffb4b954144cdc7f98fe3ee1003f" + +[[projects]] + branch = "master" + digest = "1:4e8f1cae8e6d83af9000d82566efb8823907dae77ba4f1d76ff28fdd197c3c90" + name = "github.com/streadway/amqp" + packages = ["."] + pruneopts = "" + revision = "e5adc2ada8b8efff032bf61173a233d143e9318e" + +[[projects]] + digest = "1:711eebe744c0151a9d09af2315f0bb729b2ec7637ef4c410fa90a18ef74b65b6" + name = "github.com/stretchr/objx" + packages = ["."] + pruneopts = "" + revision = "477a77ecc69700c7cdeb1fa9e129548e1c1c393c" + version = "v0.1.1" + +[[projects]] + digest = "1:c587772fb8ad29ad4db67575dad25ba17a51f072ff18a22b4f0257a4d9c24f75" + name = "github.com/stretchr/testify" + packages = [ + "assert", + "mock", + "require", + ] + pruneopts = "" + revision = "f35b8ab0b5a2cef36673838d662e249dd9c94686" + version = "v1.2.2" + +[[projects]] + digest = "1:e139a0dfe24e723193005b291ed82a975041718cfcab9136aa6c9540df70a4ff" + name = "github.com/tidwall/gjson" + packages = ["."] + pruneopts = "" + revision = "f123b340873a0084cb27267eddd8ff615115fbff" + version = "v1.1.2" + +[[projects]] + branch = "master" + digest = "1:4db4f92bb9cb04cfc4fccb36aba2598b02a988008c4cc0692b241214ad8ac96e" + name = "github.com/tidwall/match" + packages = ["."] + pruneopts = "" + revision = "1731857f09b1f38450e2c12409748407822dc6be" + +[[projects]] + digest = "1:343f20460c11a0d0529fe532553bfef9446918d1a1fda6d8661eb27d5b1a68b8" + name = "github.com/vjeantet/grok" + packages = ["."] + pruneopts = "" + revision = "ce01e59abcf6fbc9833b7deb5e4b8ee1769bcc53" + version = "v1.0.0" + +[[projects]] + digest = "1:f9fe29bf856d49f9a51d6001588cb5ee5d65c8a7ff5e8b0dd5423c3a510f0833" + name = "github.com/vmware/govmomi" + packages = [ + ".", + "find", + "list", + "nfc", + "object", + "performance", + "property", + "session", + "simulator", + "simulator/esx", + "simulator/vpx", + "task", + "view", + "vim25", + "vim25/debug", + "vim25/methods", + "vim25/mo", + "vim25/progress", + "vim25/soap", + "vim25/types", + "vim25/xml", + ] + pruneopts = "" + revision = "e3a01f9611c32b2362366434bcd671516e78955d" + version = "v0.18.0" + +[[projects]] + branch = "master" + digest = "1:98ed05e9796df287b90c1d96854e3913c8e349dbc546412d3cabb472ecf4b417" + name = "github.com/wvanbergen/kafka" + packages = ["consumergroup"] + pruneopts = "" + revision = "e2edea948ddfee841ea9a263b32ccca15f7d6c2f" + +[[projects]] + branch = "master" + digest = "1:12aff3cc417907bf9f683a6bf1dc78ffb08e41bc69f829491e593ea9b951a3cf" + name = "github.com/wvanbergen/kazoo-go" + packages = ["."] + pruneopts = "" + revision = "f72d8611297a7cf105da904c04198ad701a60101" + +[[projects]] + branch = "master" + digest = "1:c5918689b7e187382cc1066bf0260de54ba9d1b323105f46ed2551d2fb4a17c7" + name = "github.com/yuin/gopher-lua" + packages = [ + ".", + "ast", + "parse", + "pm", + ] + pruneopts = "" + revision = "46796da1b0b4794e1e341883a399f12cc7574b55" + +[[projects]] + branch = "master" +<<<<<<< HEAD + digest = "1:2fcfc6c3fb8dfe0d80d7789272230d3ac7db15022b66817113f98d9fff880225" + name = "github.com/zensqlmonitor/go-mssqldb" + packages = ["."] + pruneopts = "" + revision = "e8fbf836e44e86764eba398361d1825651709547" + +[[projects]] + digest = "1:8c8ec859c77fccd10a347b7219b597c4c21c448949e8bdf3fc3e6f4c78f952b4" + name = "go.opencensus.io" + packages = [ + ".", + "internal", + "internal/tagencoding", + "plugin/ocgrpc", + "plugin/ochttp", + "plugin/ochttp/propagation/b3", + "stats", + "stats/internal", + "stats/view", + "tag", + "trace", + "trace/internal", + "trace/propagation", + "trace/tracestate", + ] + pruneopts = "" + revision = "79993219becaa7e29e3b60cb67f5b8e82dee11d6" + version = "v0.17.0" + +[[projects]] + branch = "master" +======= +>>>>>>> master + digest = "1:0773b5c3be42874166670a20aa177872edb450cd9fc70b1df97303d977702a50" + name = "golang.org/x/crypto" + packages = [ + "bcrypt", + "blowfish", + "ed25519", + "ed25519/internal/edwards25519", + "md4", + "pbkdf2", + "pkcs12", + "pkcs12/internal/rc2", + "ssh/terminal", + ] + pruneopts = "" + revision = "a2144134853fc9a27a7b1e3eb4f19f1a76df13c9" + +[[projects]] + branch = "master" + digest = "1:00ff990baae4665bb0a8174af5ff78228574227ed96c89671247a56852a50e21" + name = "golang.org/x/net" + packages = [ + "bpf", + "context", + "context/ctxhttp", + "html", + "html/atom", + "html/charset", + "http/httpguts", + "http2", + "http2/hpack", + "idna", + "internal/iana", + "internal/socket", + "internal/socks", + "internal/timeseries", + "ipv4", + "ipv6", + "proxy", + "trace", + "websocket", + ] + pruneopts = "" + revision = "a680a1efc54dd51c040b3b5ce4939ea3cf2ea0d1" + +[[projects]] + branch = "master" + digest = "1:b697592485cb412be4188c08ca0beed9aab87f36b86418e21acc4a3998f63734" + name = "golang.org/x/oauth2" + packages = [ + ".", +<<<<<<< HEAD + "google", + "internal", + "jws", + "jwt", +======= + "clientcredentials", + "internal", +>>>>>>> master + ] + pruneopts = "" + revision = "d2e6202438beef2727060aa7cabdd924d92ebfd9" + +[[projects]] + branch = "master" + digest = "1:677e38cad6833ad266ec843739d167755eda1e6f2d8af1c63102b0426ad820db" + name = "golang.org/x/sys" + packages = [ + "unix", + "windows", + "windows/registry", + "windows/svc", + "windows/svc/debug", + "windows/svc/eventlog", + "windows/svc/mgr", + ] + pruneopts = "" + revision = "ac767d655b305d4e9612f5f6e33120b9176c4ad4" + +[[projects]] + digest = "1:5acd3512b047305d49e8763eef7ba423901e85d5dd2fd1e71778a0ea8de10bd4" + name = "golang.org/x/text" + packages = [ + "collate", + "collate/build", + "encoding", + "encoding/charmap", + "encoding/htmlindex", + "encoding/internal", + "encoding/internal/identifier", + "encoding/japanese", + "encoding/korean", + "encoding/simplifiedchinese", + "encoding/traditionalchinese", + "encoding/unicode", + "internal/colltab", + "internal/gen", + "internal/tag", + "internal/triegen", + "internal/ucd", + "internal/utf8internal", + "language", + "runes", + "secure/bidirule", + "transform", + "unicode/bidi", + "unicode/cldr", + "unicode/norm", + "unicode/rangetable", + ] + pruneopts = "" + revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0" + version = "v0.3.0" + +[[projects]] + branch = "master" + digest = "1:2d878ecef4b17dbdd067b8fb98eb64f768f0802b1176b91b9e3c01b457efd01f" + name = "google.golang.org/api" + packages = [ + "googleapi/transport", + "internal", + "iterator", + "option", + "transport", + "transport/grpc", + "transport/http", + ] + pruneopts = "" + revision = "19ff8768a5c0b8e46ea281065664787eefc24121" + +[[projects]] + digest = "1:c1771ca6060335f9768dff6558108bc5ef6c58506821ad43377ee23ff059e472" + name = "google.golang.org/appengine" + packages = [ +<<<<<<< HEAD + ".", + "cloudsql", + "internal", + "internal/app_identity", + "internal/base", + "internal/datastore", + "internal/log", + "internal/modules", + "internal/remote_api", + "internal/socket", + "internal/urlfetch", + "socket", +======= + "cloudsql", + "internal", + "internal/base", + "internal/datastore", + "internal/log", + "internal/remote_api", + "internal/urlfetch", +>>>>>>> master + "urlfetch", + ] + pruneopts = "" + revision = "b1f26356af11148e710935ed1ac8a7f5702c7612" + version = "v1.1.0" + +[[projects]] + branch = "master" + digest = "1:b1443b4e3cc990c84d27fcdece9d3302158c67dba870e33a6937a2c0076388c2" + name = "google.golang.org/genproto" + packages = [ + "googleapis/api/annotations", + "googleapis/api/distribution", + "googleapis/api/label", + "googleapis/api/metric", + "googleapis/api/monitoredres", + "googleapis/monitoring/v3", + "googleapis/rpc/status", + "protobuf/field_mask", + ] + pruneopts = "" + revision = "fedd2861243fd1a8152376292b921b394c7bef7e" + +[[projects]] + digest = "1:5f31b45ee9da7a87f140bef3ed0a7ca34ea2a6d38eb888123b8e28170e8aa4f2" + name = "google.golang.org/grpc" + packages = [ + ".", + "balancer", + "balancer/base", + "balancer/roundrobin", + "codes", + "connectivity", + "credentials", + "credentials/oauth", + "encoding", + "encoding/proto", + "grpclog", + "internal", + "internal/backoff", + "internal/channelz", + "internal/grpcrand", + "keepalive", + "metadata", + "naming", + "peer", + "resolver", + "resolver/dns", + "resolver/passthrough", + "stats", + "status", + "tap", + "transport", + ] + pruneopts = "" + revision = "168a6198bcb0ef175f7dacec0b8691fc141dc9b8" + version = "v1.13.0" + +[[projects]] + digest = "1:15d017551627c8bb091bde628215b2861bed128855343fdd570c62d08871f6e1" + name = "gopkg.in/alecthomas/kingpin.v2" + packages = ["."] + pruneopts = "" + revision = "947dcec5ba9c011838740e680966fd7087a71d0d" + version = "v2.2.6" + +[[projects]] + digest = "1:3cad99e0d1f94b8c162787c12e59d0a0b9df1ef75590eb145cdd625479091efe" + name = "gopkg.in/asn1-ber.v1" + packages = ["."] + pruneopts = "" + revision = "379148ca0225df7a432012b8df0355c2a2063ac0" + version = "v1.2" + +[[projects]] + digest = "1:581450ae66d7970d91ef9132459fa583e937c6e502f1b96e4ee7783a56fa0b44" + name = "gopkg.in/fatih/pool.v2" + packages = ["."] + pruneopts = "" + revision = "010e0b745d12eaf8426c95f9c3924d81dd0b668f" + version = "v2.0.0" + +[[projects]] + digest = "1:eb53021a8aa3f599d29c7102e65026242bdedce998a54837dc67f14b6a97c5fd" + name = "gopkg.in/fsnotify.v1" + packages = ["."] + pruneopts = "" + revision = "c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9" + source = "https://github.com/fsnotify/fsnotify/archive/v1.4.7.tar.gz" + version = "v1.4.7" + +[[projects]] + digest = "1:960720207d3d0992995f4576e1366fd9e9b1483473b07fb7243144f75f5b1546" + name = "gopkg.in/gorethink/gorethink.v3" + packages = [ + ".", + "encoding", + "ql2", + "types", + ] + pruneopts = "" + revision = "7f5bdfd858bb064d80559b2a32b86669c5de5d3b" + version = "v3.0.5" + +[[projects]] + digest = "1:367baf06b7dbd0ef0bbdd785f6a79f929c96b0c18e9d3b29c0eed1ac3f5db133" + name = "gopkg.in/ldap.v2" + packages = ["."] + pruneopts = "" + revision = "bb7a9ca6e4fbc2129e3db588a34bc970ffe811a9" + version = "v2.5.1" + +[[projects]] + branch = "v2" + digest = "1:f54ba71a035aac92ced3e902d2bff3734a15d1891daff73ec0f90ef236750139" + name = "gopkg.in/mgo.v2" + packages = [ + ".", + "bson", + "internal/json", + "internal/sasl", + "internal/scram", + ] + pruneopts = "" + revision = "9856a29383ce1c59f308dd1cf0363a79b5bef6b5" + +[[projects]] + digest = "1:b49c4d3115800eace659c9a6a5c384a922f5b210178b24a01abb10731f404ea2" + name = "gopkg.in/olivere/elastic.v5" + packages = [ + ".", + "config", + "uritemplates", + ] + pruneopts = "" + revision = "52741dc2ce53629cbe1e673869040d886cba2cd5" + version = "v5.0.70" + +[[projects]] + branch = "v1" + digest = "1:a96d16bd088460f2e0685d46c39bcf1208ba46e0a977be2df49864ec7da447dd" + name = "gopkg.in/tomb.v1" + packages = ["."] + pruneopts = "" + revision = "dd632973f1e7218eb1089048e0798ec9ae7dceb8" + +[[projects]] + digest = "1:f0620375dd1f6251d9973b5f2596228cc8042e887cd7f827e4220bc1ce8c30e2" + name = "gopkg.in/yaml.v2" + packages = ["."] + pruneopts = "" + revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183" + version = "v2.2.1" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + input-imports = [ + "cloud.google.com/go/monitoring/apiv3", + "collectd.org/api", + "collectd.org/network", + "github.com/Azure/go-autorest/autorest", + "github.com/Azure/go-autorest/autorest/azure/auth", + "github.com/Microsoft/ApplicationInsights-Go/appinsights", + "github.com/Shopify/sarama", + "github.com/StackExchange/wmi", + "github.com/aerospike/aerospike-client-go", + "github.com/amir/raidman", + "github.com/apache/thrift/lib/go/thrift", + "github.com/aws/aws-sdk-go/aws", + "github.com/aws/aws-sdk-go/aws/client", + "github.com/aws/aws-sdk-go/aws/credentials", + "github.com/aws/aws-sdk-go/aws/credentials/stscreds", + "github.com/aws/aws-sdk-go/aws/session", + "github.com/aws/aws-sdk-go/service/cloudwatch", + "github.com/aws/aws-sdk-go/service/kinesis", + "github.com/bsm/sarama-cluster", + "github.com/couchbase/go-couchbase", + "github.com/denisenkom/go-mssqldb", + "github.com/dgrijalva/jwt-go", + "github.com/docker/docker/api/types", + "github.com/docker/docker/api/types/container", + "github.com/docker/docker/api/types/filters", + "github.com/docker/docker/api/types/registry", + "github.com/docker/docker/api/types/swarm", + "github.com/docker/docker/client", + "github.com/eclipse/paho.mqtt.golang", + "github.com/go-logfmt/logfmt", + "github.com/go-redis/redis", + "github.com/go-sql-driver/mysql", + "github.com/gobwas/glob", + "github.com/golang/protobuf/proto", + "github.com/golang/protobuf/ptypes/timestamp", + "github.com/google/go-cmp/cmp", + "github.com/gorilla/mux", + "github.com/hashicorp/consul/api", + "github.com/influxdata/go-syslog/rfc5424", + "github.com/influxdata/go-syslog/rfc5425", + "github.com/influxdata/tail", + "github.com/influxdata/toml", + "github.com/influxdata/toml/ast", + "github.com/influxdata/wlog", + "github.com/jackc/pgx", + "github.com/jackc/pgx/pgtype", + "github.com/jackc/pgx/stdlib", + "github.com/kardianos/service", + "github.com/kballard/go-shellquote", + "github.com/matttproud/golang_protobuf_extensions/pbutil", + "github.com/miekg/dns", + "github.com/multiplay/go-ts3", + "github.com/nats-io/gnatsd/server", + "github.com/nats-io/go-nats", + "github.com/nsqio/go-nsq", + "github.com/openzipkin/zipkin-go-opentracing", + "github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore", + "github.com/pkg/errors", + "github.com/prometheus/client_golang/prometheus", + "github.com/prometheus/client_golang/prometheus/promhttp", + "github.com/prometheus/client_model/go", + "github.com/prometheus/common/expfmt", + "github.com/prometheus/common/log", + "github.com/satori/go.uuid", + "github.com/shirou/gopsutil/cpu", + "github.com/shirou/gopsutil/disk", + "github.com/shirou/gopsutil/host", + "github.com/shirou/gopsutil/load", + "github.com/shirou/gopsutil/mem", + "github.com/shirou/gopsutil/net", + "github.com/shirou/gopsutil/process", + "github.com/soniah/gosnmp", + "github.com/streadway/amqp", + "github.com/stretchr/testify/assert", + "github.com/stretchr/testify/mock", + "github.com/stretchr/testify/require", + "github.com/tidwall/gjson", + "github.com/vjeantet/grok", + "github.com/vmware/govmomi", + "github.com/vmware/govmomi/object", + "github.com/vmware/govmomi/performance", + "github.com/vmware/govmomi/session", + "github.com/vmware/govmomi/simulator", + "github.com/vmware/govmomi/view", + "github.com/vmware/govmomi/vim25", + "github.com/vmware/govmomi/vim25/methods", + "github.com/vmware/govmomi/vim25/mo", + "github.com/vmware/govmomi/vim25/soap", + "github.com/vmware/govmomi/vim25/types", + "github.com/wvanbergen/kafka/consumergroup", + "golang.org/x/net/context", + "golang.org/x/net/html/charset", + "golang.org/x/oauth2", + "golang.org/x/oauth2/clientcredentials", + "golang.org/x/sys/unix", + "golang.org/x/sys/windows", + "golang.org/x/sys/windows/svc", + "golang.org/x/sys/windows/svc/mgr", + "google.golang.org/api/option", + "google.golang.org/genproto/googleapis/api/metric", + "google.golang.org/genproto/googleapis/api/monitoredres", + "google.golang.org/genproto/googleapis/monitoring/v3", + "google.golang.org/grpc", + "google.golang.org/grpc/codes", + "google.golang.org/grpc/credentials", + "google.golang.org/grpc/status", + "gopkg.in/gorethink/gorethink.v3", + "gopkg.in/ldap.v2", + "gopkg.in/mgo.v2", + "gopkg.in/mgo.v2/bson", + "gopkg.in/olivere/elastic.v5", + "gopkg.in/yaml.v2", + ] + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml index 2fa3e4c40..7566b68b1 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -222,6 +222,10 @@ source = "https://github.com/fsnotify/fsnotify/archive/v1.4.7.tar.gz" name = "gopkg.in/fsnotify.v1" +[[constraint]] + branch = "master" + name = "google.golang.org/genproto" + [[constraint]] name = "github.com/vmware/govmomi" version = "0.18.0" diff --git a/plugins/outputs/all/all.go b/plugins/outputs/all/all.go index 24748c53e..94c1421b5 100644 --- a/plugins/outputs/all/all.go +++ b/plugins/outputs/all/all.go @@ -28,5 +28,6 @@ import ( _ "github.com/influxdata/telegraf/plugins/outputs/riemann" _ "github.com/influxdata/telegraf/plugins/outputs/riemann_legacy" _ "github.com/influxdata/telegraf/plugins/outputs/socket_writer" + _ "github.com/influxdata/telegraf/plugins/outputs/stackdriver" _ "github.com/influxdata/telegraf/plugins/outputs/wavefront" ) diff --git a/plugins/outputs/stackdriver/README.md b/plugins/outputs/stackdriver/README.md new file mode 100644 index 000000000..c3ecea790 --- /dev/null +++ b/plugins/outputs/stackdriver/README.md @@ -0,0 +1,18 @@ +# Stackdriver Output Plugin + +This plugin writes to the [Google Cloud Stackdriver API](https://cloud.google.com/monitoring/api/v3/) +and requires [authentication](https://cloud.google.com/docs/authentication/getting-started) with Google Cloud using either a service account or user credentials. See the [Stackdriver documentation](https://cloud.google.com/stackdriver/pricing#stackdriver_monitoring_services) for details on pricing. + +Requires `project` to specify where Stackdriver metrics will be delivered to. + +Metrics are grouped by the `namespace` variable and metric key - eg: `custom.googleapis.com/telegraf/system/load5` + +### Configuration + +``` + # GCP Project + project = "erudite-bloom-151019" + + # The namespace for the metric descriptor + namespace = "telegraf" +``` \ No newline at end of file diff --git a/plugins/outputs/stackdriver/stackdriver.go b/plugins/outputs/stackdriver/stackdriver.go new file mode 100644 index 000000000..0b2403358 --- /dev/null +++ b/plugins/outputs/stackdriver/stackdriver.go @@ -0,0 +1,303 @@ +package stackdriver + +import ( + "context" + "fmt" + "log" + "path" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/outputs" + + // Imports the Stackdriver Monitoring client package. + monitoring "cloud.google.com/go/monitoring/apiv3" + googlepb "github.com/golang/protobuf/ptypes/timestamp" + metricpb "google.golang.org/genproto/googleapis/api/metric" + monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres" + monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" +) + +// Stackdriver is the Google Stackdriver config info. +type Stackdriver struct { + Project string + Namespace string + + client *monitoring.MetricClient +} + +const ( + // QuotaLabelsPerMetricDescriptor is the limit + // to labels (tags) per metric descriptor. + QuotaLabelsPerMetricDescriptor = 10 + // QuotaStringLengthForLabelKey is the limit + // to string length for label key. + QuotaStringLengthForLabelKey = 100 + // QuotaStringLengthForLabelValue is the limit + // to string length for label value. + QuotaStringLengthForLabelValue = 1024 + + // StartTime for cumulative metrics. + StartTime = int64(1) + // MaxInt is the max int64 value. + MaxInt = int(^uint(0) >> 1) +) + +var sampleConfig = ` + # GCP Project + project = "erudite-bloom-151019" + + # The namespace for the metric descriptor + namespace = "telegraf" +` + +// Connect initiates the primary connection to the GCP project. +func (s *Stackdriver) Connect() error { + if s.Project == "" { + return fmt.Errorf("Project is a required field for stackdriver output") + } + + if s.Namespace == "" { + return fmt.Errorf("Namespace is a required field for stackdriver output") + } + + if s.client == nil { + ctx := context.Background() + client, err := monitoring.NewMetricClient(ctx) + if err != nil { + return err + } + s.client = client + } + + return nil +} + +// Write the metrics to Google Cloud Stackdriver. +func (s *Stackdriver) Write(metrics []telegraf.Metric) error { + ctx := context.Background() + + for _, m := range metrics { + timeSeries := []*monitoringpb.TimeSeries{} + + for _, f := range m.FieldList() { + value, err := getStackdriverTypedValue(f.Value) + if err != nil { + log.Printf("E! [output.stackdriver] get type failed: %s", err) + continue + } + + metricKind, err := getStackdriverMetricKind(m.Type()) + if err != nil { + log.Printf("E! [output.stackdriver] get metric failed: %s", err) + continue + } + + timeInterval, err := getStackdriverTimeInterval(metricKind, StartTime, m.Time().Unix()) + if err != nil { + log.Printf("E! [output.stackdriver] get time interval failed: %s", err) + continue + } + + // Prepare an individual data point. + dataPoint := &monitoringpb.Point{ + Interval: timeInterval, + Value: value, + } + + // Prepare time series. + timeSeries = append(timeSeries, + &monitoringpb.TimeSeries{ + Metric: &metricpb.Metric{ + Type: path.Join("custom.googleapis.com", s.Namespace, m.Name(), f.Key), + Labels: getStackdriverLabels(m.TagList()), + }, + MetricKind: metricKind, + Resource: &monitoredrespb.MonitoredResource{ + Type: "global", + Labels: map[string]string{ + "project_id": s.Project, + }, + }, + Points: []*monitoringpb.Point{ + dataPoint, + }, + }) + } + + if len(timeSeries) < 1 { + continue + } + + // Prepare time series request. + timeSeriesRequest := &monitoringpb.CreateTimeSeriesRequest{ + Name: monitoring.MetricProjectPath(s.Project), + TimeSeries: timeSeries, + } + + // Create the time series in Stackdriver. + err := s.client.CreateTimeSeries(ctx, timeSeriesRequest) + if err != nil { + log.Printf("E! [output.stackdriver] unable to write to Stackdriver: %s", err) + return err + } + } + + return nil +} + +func getStackdriverTimeInterval( + m metricpb.MetricDescriptor_MetricKind, + start int64, + end int64, +) (*monitoringpb.TimeInterval, error) { + switch m { + case metricpb.MetricDescriptor_GAUGE: + return &monitoringpb.TimeInterval{ + EndTime: &googlepb.Timestamp{ + Seconds: end, + }, + }, nil + case metricpb.MetricDescriptor_CUMULATIVE: + return &monitoringpb.TimeInterval{ + StartTime: &googlepb.Timestamp{ + Seconds: start, + }, + EndTime: &googlepb.Timestamp{ + Seconds: end, + }, + }, nil + case metricpb.MetricDescriptor_DELTA, metricpb.MetricDescriptor_METRIC_KIND_UNSPECIFIED: + fallthrough + default: + return nil, fmt.Errorf("unsupported metric kind %T", m) + } +} + +func getStackdriverMetricKind(vt telegraf.ValueType) (metricpb.MetricDescriptor_MetricKind, error) { + switch vt { + case telegraf.Untyped: + return metricpb.MetricDescriptor_GAUGE, nil + case telegraf.Gauge: + return metricpb.MetricDescriptor_GAUGE, nil + case telegraf.Counter: + return metricpb.MetricDescriptor_CUMULATIVE, nil + case telegraf.Histogram, telegraf.Summary: + fallthrough + default: + return metricpb.MetricDescriptor_METRIC_KIND_UNSPECIFIED, fmt.Errorf("unsupported telegraf value type") + } +} + +func getStackdriverTypedValue(value interface{}) (*monitoringpb.TypedValue, error) { + switch v := value.(type) { + case uint64: + if v <= uint64(MaxInt) { + return &monitoringpb.TypedValue{ + Value: &monitoringpb.TypedValue_Int64Value{ + Int64Value: int64(v), + }, + }, nil + } + return &monitoringpb.TypedValue{ + Value: &monitoringpb.TypedValue_Int64Value{ + Int64Value: int64(MaxInt), + }, + }, nil + case int64: + return &monitoringpb.TypedValue{ + Value: &monitoringpb.TypedValue_Int64Value{ + Int64Value: int64(v), + }, + }, nil + case float64: + return &monitoringpb.TypedValue{ + Value: &monitoringpb.TypedValue_DoubleValue{ + DoubleValue: float64(v), + }, + }, nil + case bool: + return &monitoringpb.TypedValue{ + Value: &monitoringpb.TypedValue_BoolValue{ + BoolValue: bool(v), + }, + }, nil + case string: + return &monitoringpb.TypedValue{ + Value: &monitoringpb.TypedValue_StringValue{ + StringValue: string(v), + }, + }, nil + default: + return nil, fmt.Errorf("value type \"%T\" not supported for stackdriver custom metrics", v) + } +} + +func getStackdriverLabels(tags []*telegraf.Tag) map[string]string { + labels := make(map[string]string) + for _, t := range tags { + labels[t.Key] = t.Value + } + for k, v := range labels { + if len(k) > QuotaStringLengthForLabelKey { + log.Printf( + "W! [output.stackdriver] removing tag [%s] key exceeds string length for label key [%d]", + k, + QuotaStringLengthForLabelKey, + ) + delete(labels, k) + continue + } + if len(v) > QuotaStringLengthForLabelValue { + log.Printf( + "W! [output.stackdriver] removing tag [%s] value exceeds string length for label value [%d]", + k, + QuotaStringLengthForLabelValue, + ) + delete(labels, k) + continue + } + } + if len(labels) > QuotaLabelsPerMetricDescriptor { + excess := len(labels) - QuotaLabelsPerMetricDescriptor + log.Printf( + "W! [output.stackdriver] tag count [%d] exceeds quota for stackdriver labels [%d] removing [%d] random tags", + len(labels), + QuotaLabelsPerMetricDescriptor, + excess, + ) + for k := range labels { + if excess == 0 { + break + } + excess-- + delete(labels, k) + } + } + + return labels +} + +// Close will terminate the session to the backend, returning error if an issue arises. +func (s *Stackdriver) Close() error { + return s.client.Close() +} + +// SampleConfig returns the formatted sample configuration for the plugin. +func (s *Stackdriver) SampleConfig() string { + return sampleConfig +} + +// Description returns the human-readable function definition of the plugin. +func (s *Stackdriver) Description() string { + return "Configuration for Google Cloud Stackdriver to send metrics to" +} + +func newStackdriver() *Stackdriver { + return &Stackdriver{} +} + +func init() { + outputs.Add("stackdriver", func() telegraf.Output { + return newStackdriver() + }) +} diff --git a/plugins/outputs/stackdriver/stackdriver_test.go b/plugins/outputs/stackdriver/stackdriver_test.go new file mode 100644 index 000000000..94a3e6ce4 --- /dev/null +++ b/plugins/outputs/stackdriver/stackdriver_test.go @@ -0,0 +1,119 @@ +package stackdriver + +import ( + "context" + "fmt" + "log" + "net" + "os" + "strings" + "testing" + + monitoring "cloud.google.com/go/monitoring/apiv3" + "github.com/golang/protobuf/proto" + emptypb "github.com/golang/protobuf/ptypes/empty" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" + "google.golang.org/api/option" + monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" + "google.golang.org/grpc" + "google.golang.org/grpc/metadata" +) + +// clientOpt is the option tests should use to connect to the test server. +// It is initialized by TestMain. +var clientOpt option.ClientOption + +var mockMetric mockMetricServer + +type mockMetricServer struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + monitoringpb.MetricServiceServer + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockMetricServer) CreateTimeSeries(ctx context.Context, req *monitoringpb.CreateTimeSeriesRequest) (*emptypb.Empty, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*emptypb.Empty), nil +} + +func TestMain(m *testing.M) { + serv := grpc.NewServer() + monitoringpb.RegisterMetricServiceServer(serv, &mockMetric) + + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + log.Fatal(err) + } + go serv.Serve(lis) + + conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + if err != nil { + log.Fatal(err) + } + clientOpt = option.WithGRPCConn(conn) + + os.Exit(m.Run()) +} + +func TestWrite(t *testing.T) { + expectedResponse := &emptypb.Empty{} + mockMetric.err = nil + mockMetric.reqs = nil + mockMetric.resps = append(mockMetric.resps[:0], expectedResponse) + + c, err := monitoring.NewMetricClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + s := &Stackdriver{ + Project: fmt.Sprintf("projects/%s", "[PROJECT]"), + Namespace: "test", + client: c, + } + + err = s.Connect() + require.NoError(t, err) + err = s.Write(testutil.MockMetrics()) + require.NoError(t, err) +} + +func TestGetStackdriverLabels(t *testing.T) { + tags := []*telegraf.Tag{ + {Key: "project", Value: "bar"}, + {Key: "discuss", Value: "revolutionary"}, + {Key: "marble", Value: "discount"}, + {Key: "applied", Value: "falsify"}, + {Key: "test", Value: "foo"}, + {Key: "porter", Value: "discount"}, + {Key: "play", Value: "tiger"}, + {Key: "fireplace", Value: "display"}, + {Key: "host", Value: "this"}, + {Key: "name", Value: "bat"}, + {Key: "device", Value: "local"}, + {Key: "reserve", Value: "publication"}, + {Key: "xpfqacltlmpguimhtjlou2qlmf9uqqwk3teajwlwqkoxtsppbnjksaxvzc1aa973pho9m96gfnl5op8ku7sv93rexyx42qe3zty12ityv", Value: "keyquota"}, + {Key: "valuequota", Value: "icym5wcpejnhljcvy2vwk15svmhrtueoppwlvix61vlbaeedufn1g6u4jgwjoekwew9s2dboxtgrkiyuircnl8h1lbzntt9gzcf60qunhxurhiz0g2bynzy1v6eyn4ravndeiiugobsrsj2bfaguahg4gxn7nx4irwfknunhkk6jdlldevawj8levebjajcrcbeugewd14fa8o34ycfwx2ymalyeqxhfqrsksxnii2deqq6cghrzi6qzwmittkzdtye3imoygqmjjshiskvnzz1e4ipd9c6wfor5jsygn1kvcg6jm4clnsl1fnxotbei9xp4swrkjpgursmfmkyvxcgq9hoy435nwnolo3ipnvdlhk6pmlzpdjn6gqi3v9gv7jn5ro2p1t5ufxzfsvqq1fyrgoi7gvmttil1banh3cftkph1dcoaqfhl7y0wkvhwwvrmslmmxp1wedyn8bacd7akmjgfwdvcmrymbzvmrzfvq1gs1xnmmg8rsfxci2h6r1ralo3splf4f3bdg4c7cy0yy9qbxzxhcmdpwekwc7tdjs8uj6wmofm2aor4hum8nwyfwwlxy3yvsnbjy32oucsrmhcnu6l2i8laujkrhvsr9fcix5jflygznlydbqw5uhw1rg1g5wiihqumwmqgggemzoaivm3ut41vjaff4uqtqyuhuwblmuiphfkd7si49vgeeswzg7tpuw0oxmkesgibkcjtev2h9ouxzjs3eb71jffhdacyiuyhuxwvm5bnrjewbm4x2kmhgbirz3eoj7ijgplggdkx5vixufg65ont8zi1jabsuxx0vsqgprunwkugqkxg2r7iy6fmgs4lob4dlseinowkst6gp6x1ejreauyzjz7atzm3hbmr5rbynuqp4lxrnhhcbuoun69mavvaaki0bdz5ybmbbbz5qdv0odtpjo2aezat5uosjuhzbvic05jlyclikynjgfhencdkz3qcqzbzhnsynj1zdke0sk4zfpvfyryzsxv9pu0qm"}, + } + + labels := getStackdriverLabels(tags) + require.Equal(t, QuotaLabelsPerMetricDescriptor, len(labels)) +} From 106f5b5ca8dde9cd6637f17d658f3329c4994608 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 16 Oct 2018 12:23:05 -0700 Subject: [PATCH 0286/1815] Update changelog --- CHANGELOG.md | 10 +++++++--- README.md | 1 + plugins/outputs/stackdriver/README.md | 5 +++-- 3 files changed, 11 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 47ef1a4cd..393c27707 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,6 @@ ## v1.9 [unreleased] -### Release Notes +#### Release Notes - The `http_listener` input plugin has been renamed to `influxdb_listener` and use of the original name is deprecated. The new name better describes the @@ -8,11 +8,15 @@ transfer of metrics in any format via HTTP, it is recommended to use `http_listener_v2` instead. -### New Inputs +#### New Inputs - [http_listener_v2](/plugins/inputs/http_listener_v2/README.md) - Contributed by @jul1u5 -### Features +#### New Outputs + +- [stackdriver](/plugins/outputs/stackdriver/README.md) - Contributed by @jamesmaidment + +#### Features - [#4686](https://github.com/influxdata/telegraf/pull/4686): Add replace function to strings processor. - [#4754](https://github.com/influxdata/telegraf/pull/4754): Query servers in parallel in dns_query input. diff --git a/README.md b/README.md index c7f4be899..17b248247 100644 --- a/README.md +++ b/README.md @@ -337,6 +337,7 @@ For documentation on the latest development code see the [documentation index][d * [riemann](./plugins/outputs/riemann) * [riemann_legacy](./plugins/outputs/riemann_legacy) * [socket_writer](./plugins/outputs/socket_writer) +* [stackdriver](./plugins/outputs/stackdriver) * [tcp](./plugins/outputs/socket_writer) * [udp](./plugins/outputs/socket_writer) * [wavefront](./plugins/outputs/wavefront) diff --git a/plugins/outputs/stackdriver/README.md b/plugins/outputs/stackdriver/README.md index c3ecea790..ead8a0a6e 100644 --- a/plugins/outputs/stackdriver/README.md +++ b/plugins/outputs/stackdriver/README.md @@ -9,10 +9,11 @@ Metrics are grouped by the `namespace` variable and metric key - eg: `custom.goo ### Configuration -``` +```toml +[[outputs.stackdriver]] # GCP Project project = "erudite-bloom-151019" # The namespace for the metric descriptor namespace = "telegraf" -``` \ No newline at end of file +``` From cd865cfd2274eea32bf37ff944b8a693bc47490a Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 17 Oct 2018 11:44:48 -0700 Subject: [PATCH 0287/1815] Lower authorization errors to debug level in mongodb input (#4869) --- plugins/inputs/mongodb/mongodb_server.go | 25 ++++++++++++++++++++---- 1 file changed, 21 insertions(+), 4 deletions(-) diff --git a/plugins/inputs/mongodb/mongodb_server.go b/plugins/inputs/mongodb/mongodb_server.go index 1d9db9181..6ab236b58 100644 --- a/plugins/inputs/mongodb/mongodb_server.go +++ b/plugins/inputs/mongodb/mongodb_server.go @@ -3,6 +3,7 @@ package mongodb import ( "log" "net/url" + "strings" "time" "github.com/influxdata/telegraf" @@ -26,6 +27,10 @@ type oplogEntry struct { Timestamp bson.MongoTimestamp `bson:"ts"` } +func IsAuthorization(err error) bool { + return strings.Contains(err.Error(), "not authorized") +} + func (s *Server) gatherOplogStats() *OplogStats { stats := &OplogStats{} localdb := s.Session.DB("local") @@ -39,14 +44,22 @@ func (s *Server) gatherOplogStats() *OplogStats { if err == mgo.ErrNotFound { continue } - log.Println("E! Error getting first oplog entry (" + err.Error() + ")") + if IsAuthorization(err) { + log.Println("D! Error getting first oplog entry (" + err.Error() + ")") + } else { + log.Println("E! Error getting first oplog entry (" + err.Error() + ")") + } return stats } if err := localdb.C(collection_name).Find(query).Sort("-$natural").Limit(1).One(&op_last); err != nil { - if err == mgo.ErrNotFound { + if err == mgo.ErrNotFound || IsAuthorization(err) { continue } - log.Println("E! Error getting last oplog entry (" + err.Error() + ")") + if IsAuthorization(err) { + log.Println("D! Error getting first oplog entry (" + err.Error() + ")") + } else { + log.Println("E! Error getting first oplog entry (" + err.Error() + ")") + } return stats } } @@ -98,7 +111,11 @@ func (s *Server) gatherData(acc telegraf.Accumulator, gatherDbStats bool) error }, }, &resultShards) if err != nil { - log.Println("E! Error getting database shard stats (" + err.Error() + ")") + if IsAuthorization(err) { + log.Println("D! Error getting database shard stats (" + err.Error() + ")") + } else { + log.Println("E! Error getting database shard stats (" + err.Error() + ")") + } } oplogStats := s.gatherOplogStats() From f10de93da487cef149b58d0d807ca2faae974e84 Mon Sep 17 00:00:00 2001 From: Greg Date: Wed, 17 Oct 2018 12:46:00 -0600 Subject: [PATCH 0288/1815] Return correct response code on ping input (#4875) --- plugins/inputs/ping/README.md | 7 +++++++ plugins/inputs/ping/ping.go | 1 + 2 files changed, 8 insertions(+) diff --git a/plugins/inputs/ping/README.md b/plugins/inputs/ping/README.md index 4996fdc37..4f953c8e1 100644 --- a/plugins/inputs/ping/README.md +++ b/plugins/inputs/ping/README.md @@ -31,6 +31,13 @@ apt-get install iputils-ping ## Interface or source address to send ping from (ping -I ) ## on Darwin and Freebsd only source address possible: (ping -S ) # interface = "" + + ## Specify the ping executable binary, default is "ping" + # binary = "ping" + + ## Arguments for ping command + ## when arguments is not empty, other options (ping_interval, timeout, etc) will be ignored + # arguments = ["-c", "3"] ``` ### Metrics: diff --git a/plugins/inputs/ping/ping.go b/plugins/inputs/ping/ping.go index a95f27ebf..53c109d75 100644 --- a/plugins/inputs/ping/ping.go +++ b/plugins/inputs/ping/ping.go @@ -133,6 +133,7 @@ func (p *Ping) pingToURL(u string, acc telegraf.Accumulator) { if exitError, ok := err.(*exec.ExitError); ok { if ws, ok := exitError.Sys().(syscall.WaitStatus); ok { status = ws.ExitStatus() + fields["result_code"] = status } } From 48745c31711d089cbfbe73c7f8c319e80d3cee9d Mon Sep 17 00:00:00 2001 From: Fred Cox Date: Wed, 17 Oct 2018 21:46:44 +0300 Subject: [PATCH 0289/1815] Fix segfault in x509_cert (#4874) --- plugins/inputs/x509_cert/README.md | 2 +- plugins/inputs/x509_cert/dev/telegraf.conf | 5 +++++ plugins/inputs/x509_cert/x509_cert.go | 5 ++++- plugins/inputs/x509_cert/x509_cert_test.go | 18 ++++++++++++++++++ 4 files changed, 28 insertions(+), 2 deletions(-) create mode 100644 plugins/inputs/x509_cert/dev/telegraf.conf diff --git a/plugins/inputs/x509_cert/README.md b/plugins/inputs/x509_cert/README.md index 781b9332a..82fe520f9 100644 --- a/plugins/inputs/x509_cert/README.md +++ b/plugins/inputs/x509_cert/README.md @@ -10,7 +10,7 @@ file or network connection. # Reads metrics from a SSL certificate [[inputs.x509_cert]] ## List certificate sources - sources = ["/etc/ssl/certs/ssl-cert-snakeoil.pem", "https://example.org"] + sources = ["/etc/ssl/certs/ssl-cert-snakeoil.pem", "https://example.org:443"] ## Timeout for SSL connection # timeout = "5s" diff --git a/plugins/inputs/x509_cert/dev/telegraf.conf b/plugins/inputs/x509_cert/dev/telegraf.conf new file mode 100644 index 000000000..1eda94f02 --- /dev/null +++ b/plugins/inputs/x509_cert/dev/telegraf.conf @@ -0,0 +1,5 @@ +[[inputs.x509_cert]] + sources = ["https://www.influxdata.com:443"] + +[[outputs.file]] + files = ["stdout"] diff --git a/plugins/inputs/x509_cert/x509_cert.go b/plugins/inputs/x509_cert/x509_cert.go index affd3fa04..45eddf3aa 100644 --- a/plugins/inputs/x509_cert/x509_cert.go +++ b/plugins/inputs/x509_cert/x509_cert.go @@ -80,7 +80,10 @@ func (c *X509Cert) getCert(location string, timeout time.Duration) ([]*x509.Cert } defer ipConn.Close() - tlsCfg.ServerName = u.Host + if tlsCfg == nil { + tlsCfg = &tls.Config{} + } + tlsCfg.ServerName = u.Hostname() conn := tls.Client(ipConn, tlsCfg) defer conn.Close() diff --git a/plugins/inputs/x509_cert/x509_cert_test.go b/plugins/inputs/x509_cert/x509_cert_test.go index f4c6c8738..fc75bc825 100644 --- a/plugins/inputs/x509_cert/x509_cert_test.go +++ b/plugins/inputs/x509_cert/x509_cert_test.go @@ -4,6 +4,8 @@ import ( "crypto/tls" "encoding/base64" "fmt" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "io/ioutil" "os" "testing" @@ -203,3 +205,19 @@ func TestStrings(t *testing.T) { }) } } + +func TestGatherCert(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + m := &X509Cert{ + Sources: []string{"https://www.influxdata.com:443"}, + } + + var acc testutil.Accumulator + err := m.Gather(&acc) + require.NoError(t, err) + + assert.True(t, acc.HasMeasurement("x509_cert")) +} From 6e8b7e3cc677ae12beb0e7b8d103d37cd07c8edd Mon Sep 17 00:00:00 2001 From: Greg Date: Wed, 17 Oct 2018 12:43:58 -0600 Subject: [PATCH 0290/1815] Fix panic in logparser input (#4849) --- plugins/inputs/logparser/logparser.go | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/plugins/inputs/logparser/logparser.go b/plugins/inputs/logparser/logparser.go index 089c22d94..20ebeaa9e 100644 --- a/plugins/inputs/logparser/logparser.go +++ b/plugins/inputs/logparser/logparser.go @@ -131,8 +131,14 @@ func (l *LogParserPlugin) Start(acc telegraf.Accumulator) error { l.done = make(chan struct{}) l.tailers = make(map[string]*tail.Tail) + mName := "logparser" + if l.GrokConfig.MeasurementName != "" { + mName = l.GrokConfig.MeasurementName + } + // Looks for fields which implement LogParser interface config := &parsers.Config{ + MetricName: mName, GrokPatterns: l.GrokConfig.Patterns, GrokNamedPatterns: l.GrokConfig.NamedPatterns, GrokCustomPatterns: l.GrokConfig.CustomPatterns, @@ -260,7 +266,7 @@ func (l *LogParserPlugin) parser() { if m != nil { tags := m.Tags() tags["path"] = entry.path - l.acc.AddFields(l.GrokConfig.MeasurementName, m.Fields(), tags, m.Time()) + l.acc.AddFields(m.Name(), m.Fields(), tags, m.Time()) } } else { log.Println("E! Error parsing log line: " + err.Error()) From 42fa8f437b891c09fcc8933afd471bbf31938072 Mon Sep 17 00:00:00 2001 From: Greg Linton Date: Wed, 17 Oct 2018 13:02:46 -0600 Subject: [PATCH 0291/1815] Update changelog --- CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 393c27707..5668b0da7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -40,6 +40,10 @@ - [#4854](https://github.com/influxdata/telegraf/pull/4854): Use container name from list if no name in container stats. - [#4850](https://github.com/influxdata/telegraf/pull/4850): Prevent panic in filecount input on error in file stat. - [#4846](https://github.com/influxdata/telegraf/pull/4846): Fix mqtt_consumer connect and reconnect. +- [#4849](https://github.com/influxdata/telegraf/pull/4849): Fix panic in logparser input. +- [#4869](https://github.com/influxdata/telegraf/pull/4869): Lower authorization errors to debug level in mongodb input. +- [#4875](https://github.com/influxdata/telegraf/pull/4875): Return correct response code on ping input. +- [#4874](https://github.com/influxdata/telegraf/pull/4874): Fix segfault in x509_cert input. ## v1.8.1 [2018-10-03] From 3202bcdf191c12ced43f70b8246bde01e91cdced Mon Sep 17 00:00:00 2001 From: Greg Linton Date: Wed, 17 Oct 2018 13:30:11 -0600 Subject: [PATCH 0292/1815] Set 1.8.2 release date --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5668b0da7..9e047478f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -30,7 +30,7 @@ - [#4807](https://github.com/influxdata/telegraf/pull/4807): Add entity-body compression to http output. - [#4838](https://github.com/influxdata/telegraf/pull/4838): Add telegraf version to User-Agent header. -## v1.8.2 [unreleased] +## v1.8.2 [2018-10-17] ### Bugfixes From 7cb75ca9791fef9316bd775f924b2e1a3c045476 Mon Sep 17 00:00:00 2001 From: Dirk Pahl Date: Thu, 18 Oct 2018 21:59:03 +0200 Subject: [PATCH 0293/1815] Add more detailed descriptions for fields in swap input (#4763) --- plugins/inputs/swap/README.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/plugins/inputs/swap/README.md b/plugins/inputs/swap/README.md index a5444ff2a..983892871 100644 --- a/plugins/inputs/swap/README.md +++ b/plugins/inputs/swap/README.md @@ -16,12 +16,12 @@ For more information on what swap memory is, read [All about Linux swap space](h - swap - fields: - - free (int) - - total (int) - - used (int) - - used_percent (float) - - in (int) - - out (int) + - free (int, bytes): free swap memory + - total (int, bytes): total swap memory + - used (int, bytes): used swap memory + - used_percent (float, percent): percentage of swap memory used + - in (int, bytes): data swapped in since last boot calculated from page number + - out (int, bytes): data swapped out since last boot calculated from page number ### Example Output: From 136a5724bdaefdd14a98aad717b0a862928d7846 Mon Sep 17 00:00:00 2001 From: Trevor Pounds Date: Thu, 18 Oct 2018 16:05:43 -0400 Subject: [PATCH 0294/1815] Use DescribeStreamSummary in place of ListStreams in kinesis output (#4864) --- Gopkg.lock | 6 ++--- Gopkg.toml | 2 +- plugins/outputs/kinesis/kinesis.go | 42 ++++++------------------------ 3 files changed, 12 insertions(+), 38 deletions(-) diff --git a/Gopkg.lock b/Gopkg.lock index 54b40a863..f5f119a7f 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -145,7 +145,7 @@ revision = "f2867c24984aa53edec54a138c03db934221bdea" [[projects]] - digest = "1:65a05bde9b02f645c73afa61c9f6af92d94d726c81a268f45cc70218bd58de65" + digest = "1:996727880e06dcf037f712c4d046e241d1b1b01844636fefb0fbaa480cfd230e" name = "github.com/aws/aws-sdk-go" packages = [ "aws", @@ -181,8 +181,8 @@ "service/sts", ] pruneopts = "" - revision = "8cf662a972fa7fba8f2c1ec57648cf840e2bb401" - version = "v1.14.30" + revision = "bf8067ceb6e7f51e150c218972dccfeeed892b85" + version = "v1.15.54" [[projects]] branch = "master" diff --git a/Gopkg.toml b/Gopkg.toml index 7566b68b1..dba4ec4b3 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -16,7 +16,7 @@ [[constraint]] name = "github.com/aws/aws-sdk-go" - version = "1.14.8" + version = "1.15.54" [[constraint]] name = "github.com/bsm/sarama-cluster" diff --git a/plugins/outputs/kinesis/kinesis.go b/plugins/outputs/kinesis/kinesis.go index 014379146..402f95156 100644 --- a/plugins/outputs/kinesis/kinesis.go +++ b/plugins/outputs/kinesis/kinesis.go @@ -2,7 +2,6 @@ package kinesis import ( "log" - "os" "time" "github.com/aws/aws-sdk-go/aws" @@ -115,17 +114,11 @@ func (k *KinesisOutput) Description() string { return "Configuration for the AWS Kinesis output." } -func checkstream(l []*string, s string) bool { - // Check if the StreamName exists in the slice returned from the ListStreams API request. - for _, stream := range l { - if *stream == s { - return true - } - } - return false -} - func (k *KinesisOutput) Connect() error { + if k.Partition == nil { + log.Print("E! kinesis : Deprecated paritionkey configuration in use, please consider using outputs.kinesis.partition") + } + // We attempt first to create a session to Kinesis using an IAMS role, if that fails it will fall through to using // environment variables, and then Shared Credentials. if k.Debug { @@ -145,29 +138,10 @@ func (k *KinesisOutput) Connect() error { configProvider := credentialConfig.Credentials() svc := kinesis.New(configProvider) - KinesisParams := &kinesis.ListStreamsInput{ - Limit: aws.Int64(100), - } - - resp, err := svc.ListStreams(KinesisParams) - - if err != nil { - log.Printf("E! kinesis: Error in ListSteams API call : %+v \n", err) - } - - if checkstream(resp.StreamNames, k.StreamName) { - if k.Debug { - log.Printf("E! kinesis: Stream Exists") - } - k.svc = svc - return nil - } else { - log.Printf("E! kinesis : You have configured a StreamName %+v which does not exist. exiting.", k.StreamName) - os.Exit(1) - } - if k.Partition == nil { - log.Print("E! kinesis : Deprecated paritionkey configuration in use, please consider using outputs.kinesis.partition") - } + _, err := svc.DescribeStreamSummary(&kinesis.DescribeStreamSummaryInput{ + StreamName: aws.String(k.StreamName), + }) + k.svc = svc return err } From fb435b2fa573a359f041347b5582ccdf6a56fbfc Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 18 Oct 2018 13:07:33 -0700 Subject: [PATCH 0295/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9e047478f..b0b4a07fe 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -29,6 +29,7 @@ - [#4811](https://github.com/influxdata/telegraf/pull/4811): Add windows service name lookup to procstat input. - [#4807](https://github.com/influxdata/telegraf/pull/4807): Add entity-body compression to http output. - [#4838](https://github.com/influxdata/telegraf/pull/4838): Add telegraf version to User-Agent header. +- [#4864](https://github.com/influxdata/telegraf/pull/4864): Use DescribeStreamSummary in place of ListStreams in kinesis output. ## v1.8.2 [2018-10-17] From ecaaa20ed059c07cd80b216229adea70e16daddf Mon Sep 17 00:00:00 2001 From: Trevor Pounds Date: Thu, 18 Oct 2018 18:45:18 -0400 Subject: [PATCH 0296/1815] Update CI to Go 1.10.4 (#4860) --- .circleci/config.yml | 2 +- Makefile | 4 ++-- appveyor.yml | 4 ++-- scripts/ci-1.10.docker | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 2ddcce1e6..d9c085dff 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -7,7 +7,7 @@ defaults: - image: 'quay.io/influxdb/telegraf-ci:1.9.7' go-1_10: &go-1_10 docker: - - image: 'quay.io/influxdb/telegraf-ci:1.10.3' + - image: 'quay.io/influxdb/telegraf-ci:1.10.4' version: 2 jobs: diff --git a/Makefile b/Makefile index e5b0ce8de..d19d4d4cd 100644 --- a/Makefile +++ b/Makefile @@ -133,8 +133,8 @@ plugin-%: .PHONY: ci-1.10 ci-1.10: - docker build -t quay.io/influxdb/telegraf-ci:1.10.3 - < scripts/ci-1.10.docker - docker push quay.io/influxdb/telegraf-ci:1.10.3 + docker build -t quay.io/influxdb/telegraf-ci:1.10.4 - < scripts/ci-1.10.docker + docker push quay.io/influxdb/telegraf-ci:1.10.4 .PHONY: ci-1.9 ci-1.9: diff --git a/appveyor.yml b/appveyor.yml index a1af84d6c..16d53388e 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -12,11 +12,11 @@ platform: x64 install: - IF NOT EXIST "C:\Cache" mkdir C:\Cache - - IF NOT EXIST "C:\Cache\go1.10.3.msi" curl -o "C:\Cache\go1.10.3.msi" https://storage.googleapis.com/golang/go1.10.3.windows-amd64.msi + - IF NOT EXIST "C:\Cache\go1.10.4.msi" curl -o "C:\Cache\go1.10.4.msi" https://storage.googleapis.com/golang/go1.10.4.windows-amd64.msi - IF NOT EXIST "C:\Cache\gnuwin32-bin.zip" curl -o "C:\Cache\gnuwin32-bin.zip" https://dl.influxdata.com/telegraf/ci/make-3.81-bin.zip - IF NOT EXIST "C:\Cache\gnuwin32-dep.zip" curl -o "C:\Cache\gnuwin32-dep.zip" https://dl.influxdata.com/telegraf/ci/make-3.81-dep.zip - IF EXIST "C:\Go" rmdir /S /Q C:\Go - - msiexec.exe /i "C:\Cache\go1.10.3.msi" /quiet + - msiexec.exe /i "C:\Cache\go1.10.4.msi" /quiet - 7z x "C:\Cache\gnuwin32-bin.zip" -oC:\GnuWin32 -y - 7z x "C:\Cache\gnuwin32-dep.zip" -oC:\GnuWin32 -y - go get -d github.com/golang/dep diff --git a/scripts/ci-1.10.docker b/scripts/ci-1.10.docker index 33075adfc..b37e908ce 100644 --- a/scripts/ci-1.10.docker +++ b/scripts/ci-1.10.docker @@ -1,4 +1,4 @@ -FROM golang:1.10.3 +FROM golang:1.10.4 RUN chmod -R 755 "$GOPATH" From f5af2ab7993a8ac3331019038cd06b2374ff0c3e Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 18 Oct 2018 23:26:42 -0700 Subject: [PATCH 0297/1815] Remove dead link from logparser sampleconfig and fix syntax (#4883) --- plugins/inputs/logparser/README.md | 4 ++-- plugins/inputs/logparser/logparser.go | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/plugins/inputs/logparser/README.md b/plugins/inputs/logparser/README.md index 94e37f4c2..2bbd5253d 100644 --- a/plugins/inputs/logparser/README.md +++ b/plugins/inputs/logparser/README.md @@ -7,6 +7,7 @@ regex patterns. ### Configuration: ```toml +# Stream and parse log file(s). [[inputs.logparser]] ## Log files to parse. ## These accept standard unix glob matching rules, but with the addition of @@ -25,7 +26,6 @@ regex patterns. # watch_method = "inotify" ## Parse logstash-style "grok" patterns: - ## Telegraf built-in parsing patterns: https://goo.gl/dkay10 [inputs.logparser.grok] ## This is a list of patterns to check the given log file(s) for. ## Note that adding patterns here increases processing time. The most @@ -54,7 +54,7 @@ regex patterns. ## 1. Local -- interpret based on machine localtime ## 2. "Canada/Eastern" -- Unix TZ values like those found in https://en.wikipedia.org/wiki/List_of_tz_database_time_zones ## 3. UTC -- or blank/unspecified, will return timestamp in UTC - timezone = "Canada/Eastern" + # timezone = "Canada/Eastern" ``` ### Grok Parser diff --git a/plugins/inputs/logparser/logparser.go b/plugins/inputs/logparser/logparser.go index 20ebeaa9e..163436a3c 100644 --- a/plugins/inputs/logparser/logparser.go +++ b/plugins/inputs/logparser/logparser.go @@ -88,6 +88,7 @@ const sampleConfig = ` ## Custom patterns can also be defined here. Put one pattern per line. custom_patterns = ''' + ''' ## Timezone allows you to provide an override for timestamps that ## don't already include an offset @@ -98,8 +99,7 @@ const sampleConfig = ` ## 1. Local -- interpret based on machine localtime ## 2. "Canada/Eastern" -- Unix TZ values like those found in https://en.wikipedia.org/wiki/List_of_tz_database_time_zones ## 3. UTC -- or blank/unspecified, will return timestamp in UTC - timezone = "Canada/Eastern" - ''' + # timezone = "Canada/Eastern" ` // SampleConfig returns the sample configuration for the plugin From b9107641ecfa652610bc3447c02f2cf8b72a3a97 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 18 Oct 2018 23:27:27 -0700 Subject: [PATCH 0298/1815] Regenerate telegraf.conf --- etc/telegraf.conf | 119 ++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 116 insertions(+), 3 deletions(-) diff --git a/etc/telegraf.conf b/etc/telegraf.conf index 0a1607281..a8a4f6679 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -349,6 +349,9 @@ # ## Datadog API key # apikey = "my-secret-key" # required. # +# # The base endpoint URL can optionally be specified but it defaults to: +# #url = "https://app.datadoghq.com/api/v1/series" +# # ## Connection timeout. # # timeout = "5s" @@ -492,6 +495,10 @@ # # [outputs.http.headers] # # # Should be set manually to "application/json" for json data_format # # Content-Type = "text/plain; charset=utf-8" +# +# ## HTTP Content-Encoding for write request body, can be set to "gzip" to +# ## compress body or "identity" to apply no encoding. +# # content_encoding = "identity" # # Configuration for sending metrics to InfluxDB @@ -508,7 +515,7 @@ # ## Organization is the name of the organization you wish to write to; must exist. # organization = "" # -# ## Bucket to the name fo the bucketwrite into; must exist. +# ## Destination bucket to write into. # bucket = "" # # ## Timeout for HTTP messages. @@ -962,6 +969,15 @@ # # data_format = "influx" +# # Configuration for Google Cloud Stackdriver to send metrics to +# [[outputs.stackdriver]] +# # GCP Project +# project = "erudite-bloom-151019" +# +# # The namespace for the metric descriptor +# namespace = "telegraf" + + # # Configuration for Wavefront server to send metrics to # [[outputs.wavefront]] # ## DNS name of the wavefront proxy server @@ -1160,6 +1176,12 @@ # # [[processors.strings.trim_suffix]] # # field = "read_count" # # suffix = "_count" +# +# ## Replace substrings within field names +# # [[processors.strings.trim_suffix]] +# # measurement = "*" +# # old = ":" +# # new = "_" # # Print all metrics that pass through this filter. @@ -1963,8 +1985,17 @@ # # Count files in a directory # [[inputs.filecount]] # ## Directory to gather stats about. +# ## deprecated in 1.9; use the directories option # directory = "/var/cache/apt/archives" # +# ## Directories to gather stats about. +# ## This accept standard unit glob matching rules, but with the addition of +# ## ** as a "super asterisk". ie: +# ## /var/log/** -> recursively find all directories in /var/log and count files in each directories +# ## /var/log/*/* -> find all directories with a parent dir in /var/log and count files in each directories +# ## /var/log -> count all files in /var/log and all of its subdirectories +# directories = ["/var/cache/apt/archives"] +# # ## Only count files that match the name pattern. Defaults to "*". # name = "*.deb" # @@ -2954,6 +2985,13 @@ # ## Interface or source address to send ping from (ping -I ) # ## on Darwin and Freebsd only source address possible: (ping -S ) # # interface = "" +# +# ## Specify the ping executable binary, default is "ping" +# # binary = "ping" +# +# ## Arguments for ping command +# ## when arguments is not empty, other options (ping_interval, timeout, etc) will be ignored +# # arguments = ["-c", "3"] # # Measure postfix queue statistics @@ -2985,6 +3023,9 @@ # ## CGroup name or path # # cgroup = "systemd/system.slice/nginx.service" # +# ## Windows service name +# # win_service = "" +# # ## override for process_name # ## This is optional; default is sourced from /proc//status # # process_name = "bar" @@ -3748,6 +3789,78 @@ # # basic_password = "barfoo" +# # Generic HTTP write listener +# [[inputs.http_listener_v2]] +# ## Address and port to host HTTP listener on +# service_address = ":8080" +# +# ## Path to listen to. +# # path = "/telegraf" +# +# ## HTTP methods to accept. +# # methods = ["POST", "PUT"] +# +# ## maximum duration before timing out read of the request +# # read_timeout = "10s" +# ## maximum duration before timing out write of the response +# # write_timeout = "10s" +# +# ## Maximum allowed http request body size in bytes. +# ## 0 means to use the default of 536,870,912 bytes (500 mebibytes) +# # max_body_size = 0 +# +# ## Set one or more allowed client CA certificate file names to +# ## enable mutually authenticated TLS connections +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# +# ## Add service certificate and key +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## Optional username and password to accept for HTTP basic authentication. +# ## You probably want to make sure you have TLS configured above for this. +# # basic_username = "foobar" +# # basic_password = "barfoo" +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Influx HTTP write listener +# [[inputs.influxdb_listener]] +# ## Address and port to host HTTP listener on +# service_address = ":8186" +# +# ## maximum duration before timing out read of the request +# read_timeout = "10s" +# ## maximum duration before timing out write of the response +# write_timeout = "10s" +# +# ## Maximum allowed http request body size in bytes. +# ## 0 means to use the default of 536,870,912 bytes (500 mebibytes) +# max_body_size = 0 +# +# ## Maximum line size allowed to be sent in bytes. +# ## 0 means to use the default of 65536 bytes (64 kibibytes) +# max_line_size = 0 +# +# ## Set one or more allowed client CA certificate file names to +# ## enable mutually authenticated TLS connections +# tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# +# ## Add service certificate and key +# tls_cert = "/etc/telegraf/cert.pem" +# tls_key = "/etc/telegraf/key.pem" +# +# ## Optional username and password to accept for HTTP basic authentication. +# ## You probably want to make sure you have TLS configured above for this. +# # basic_username = "foobar" +# # basic_password = "barfoo" + + # # Read JTI OpenConfig Telemetry from listed sensors # [[inputs.jti_openconfig_telemetry]] # ## List of device addresses to collect telemetry from @@ -3898,6 +4011,7 @@ # # ## Custom patterns can also be defined here. Put one pattern per line. # custom_patterns = ''' +# ''' # # ## Timezone allows you to provide an override for timestamps that # ## don't already include an offset @@ -3908,8 +4022,7 @@ # ## 1. Local -- interpret based on machine localtime # ## 2. "Canada/Eastern" -- Unix TZ values like those found in https://en.wikipedia.org/wiki/List_of_tz_database_time_zones # ## 3. UTC -- or blank/unspecified, will return timestamp in UTC -# timezone = "Canada/Eastern" -# ''' +# # timezone = "Canada/Eastern" # # Read metrics from MQTT topic(s) From d33116381bd204ee456088a97c87537389ba70e7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Onur=20G=C3=BCzel?= Date: Fri, 19 Oct 2018 09:32:43 +0300 Subject: [PATCH 0299/1815] Add certificate distinguished name as a tags in x509_cert input (#4873) --- plugins/inputs/x509_cert/x509_cert.go | 31 ++++++++++++-- plugins/inputs/x509_cert/x509_cert_test.go | 50 ++++++++++++++++++++++ 2 files changed, 77 insertions(+), 4 deletions(-) diff --git a/plugins/inputs/x509_cert/x509_cert.go b/plugins/inputs/x509_cert/x509_cert.go index 45eddf3aa..ba4708ea2 100644 --- a/plugins/inputs/x509_cert/x509_cert.go +++ b/plugins/inputs/x509_cert/x509_cert.go @@ -4,6 +4,7 @@ package x509_cert import ( "crypto/tls" "crypto/x509" + "crypto/x509/pkix" "encoding/pem" "fmt" "io/ioutil" @@ -133,6 +134,31 @@ func getFields(cert *x509.Certificate, now time.Time) map[string]interface{} { return fields } +func getTags(subject pkix.Name, location string) map[string]string { + tags := map[string]string{ + "source": location, + "common_name": subject.CommonName, + } + + if len(subject.Organization) > 0 { + tags["organization"] = subject.Organization[0] + } + if len(subject.OrganizationalUnit) > 0 { + tags["organizational_unit"] = subject.OrganizationalUnit[0] + } + if len(subject.Country) > 0 { + tags["country"] = subject.Country[0] + } + if len(subject.Province) > 0 { + tags["province"] = subject.Province[0] + } + if len(subject.Locality) > 0 { + tags["locality"] = subject.Locality[0] + } + + return tags +} + // Gather adds metrics into the accumulator. func (c *X509Cert) Gather(acc telegraf.Accumulator) error { now := time.Now() @@ -143,12 +169,9 @@ func (c *X509Cert) Gather(acc telegraf.Accumulator) error { return fmt.Errorf("cannot get SSL cert '%s': %s", location, err.Error()) } - tags := map[string]string{ - "source": location, - } - for _, cert := range certs { fields := getFields(cert, now) + tags := getTags(cert.Subject, location) acc.AddFields("x509_cert", fields, tags) } diff --git a/plugins/inputs/x509_cert/x509_cert_test.go b/plugins/inputs/x509_cert/x509_cert_test.go index fc75bc825..a9bd80568 100644 --- a/plugins/inputs/x509_cert/x509_cert_test.go +++ b/plugins/inputs/x509_cert/x509_cert_test.go @@ -184,6 +184,56 @@ func TestGatherLocal(t *testing.T) { } } +func TestGatherChain(t *testing.T) { + cert := fmt.Sprintf("%s\n%s", pki.ReadServerCert(), pki.ReadCACert()) + + tests := []struct { + name string + content string + error bool + }{ + {name: "chain certificate", content: cert}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + f, err := ioutil.TempFile("", "x509_cert") + if err != nil { + t.Fatal(err) + } + + _, err = f.Write([]byte(test.content)) + if err != nil { + t.Fatal(err) + } + + err = f.Close() + if err != nil { + t.Fatal(err) + } + + defer os.Remove(f.Name()) + + sc := X509Cert{ + Sources: []string{f.Name()}, + } + + error := false + + acc := testutil.Accumulator{} + err = sc.Gather(&acc) + if err != nil { + error = true + } + + if error != test.error { + t.Errorf("%s", err) + } + }) + } + +} + func TestStrings(t *testing.T) { sc := X509Cert{} From 1af2cf902cb9c43f894a8ca3a7f2da3f41b3d33c Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 18 Oct 2018 23:34:59 -0700 Subject: [PATCH 0300/1815] Add new DN tags to x509_cert readme --- plugins/inputs/x509_cert/README.md | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/plugins/inputs/x509_cert/README.md b/plugins/inputs/x509_cert/README.md index 82fe520f9..a85d05463 100644 --- a/plugins/inputs/x509_cert/README.md +++ b/plugins/inputs/x509_cert/README.md @@ -27,14 +27,19 @@ file or network connection. ### Metrics -- `x509_cert` +- x509_cert - tags: - - `source` - source of the certificate + - source - source of the certificate + - organization + - organizational_unit + - country + - province + - locality - fields: - - `expiry` (int, seconds) - - `age` (int, seconds) - - `startdate` (int, seconds) - - `enddate` (int, seconds) + - expiry (int, seconds) + - age (int, seconds) + - startdate (int, seconds) + - enddate (int, seconds) ### Example output From 42483b39ecc3f9602e799a0105a79b8dbb484049 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 18 Oct 2018 23:39:18 -0700 Subject: [PATCH 0301/1815] Update changelog --- CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index b0b4a07fe..95b36d35d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -31,6 +31,10 @@ - [#4838](https://github.com/influxdata/telegraf/pull/4838): Add telegraf version to User-Agent header. - [#4864](https://github.com/influxdata/telegraf/pull/4864): Use DescribeStreamSummary in place of ListStreams in kinesis output. +## v1.8.3 [unreleased] + +- [#4873](https://github.com/influxdata/telegraf/pull/4873): Add DN attributes as tags in x509_cert input to avoid series overwrite. + ## v1.8.2 [2018-10-17] ### Bugfixes From 1ec12ba6ad972e60d0201920798921894c9784f7 Mon Sep 17 00:00:00 2001 From: Trevor Pounds Date: Fri, 19 Oct 2018 14:01:31 -0400 Subject: [PATCH 0302/1815] Add Go 1.11 CI support (#4859) --- .circleci/config.yml | 48 +++++++++++++++++++++++++++++++----------- Makefile | 5 +++++ scripts/ci-1.11.docker | 28 ++++++++++++++++++++++++ 3 files changed, 69 insertions(+), 12 deletions(-) create mode 100644 scripts/ci-1.11.docker diff --git a/.circleci/config.yml b/.circleci/config.yml index d9c085dff..ae8771583 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -8,11 +8,14 @@ defaults: go-1_10: &go-1_10 docker: - image: 'quay.io/influxdb/telegraf-ci:1.10.4' + go-1_11: &go-1_11 + docker: + - image: 'quay.io/influxdb/telegraf-ci:1.11.1' version: 2 jobs: deps: - <<: [ *defaults, *go-1_10 ] + <<: [ *defaults, *go-1_11 ] steps: - checkout - restore_cache: @@ -42,16 +45,25 @@ jobs: at: '/go/src' - run: 'make check' - run: 'make test' - test-go-1.10-386: - <<: [ *defaults, *go-1_10 ] + test-go-1.11: + <<: [ *defaults, *go-1_11 ] steps: - attach_workspace: at: '/go/src' - - run: 'GOARCH=386 make check' + # disabled due to gofmt differences (1.10 vs 1.11). + # - run: 'make check' + - run: 'make test' + test-go-1.11-386: + <<: [ *defaults, *go-1_11 ] + steps: + - attach_workspace: + at: '/go/src' + # disabled due to gofmt differences (1.10 vs 1.11). + # - run: 'GOARCH=386 make check' - run: 'GOARCH=386 make test' package: - <<: [ *defaults, *go-1_10 ] + <<: [ *defaults, *go-1_11 ] steps: - attach_workspace: at: '/go/src' @@ -60,7 +72,7 @@ jobs: path: './build' destination: 'build' release: - <<: [ *defaults, *go-1_10 ] + <<: [ *defaults, *go-1_11 ] steps: - attach_workspace: at: '/go/src' @@ -69,7 +81,7 @@ jobs: path: './build' destination: 'build' nightly: - <<: [ *defaults, *go-1_10 ] + <<: [ *defaults, *go-1_11 ] steps: - attach_workspace: at: '/go/src' @@ -98,7 +110,13 @@ workflows: filters: tags: only: /.*/ - - 'test-go-1.10-386': + - 'test-go-1.11': + requires: + - 'deps' + filters: + tags: + only: /.*/ + - 'test-go-1.11-386': requires: - 'deps' filters: @@ -108,12 +126,14 @@ workflows: requires: - 'test-go-1.9' - 'test-go-1.10' - - 'test-go-1.10-386' + - 'test-go-1.11' + - 'test-go-1.11-386' - 'release': requires: - 'test-go-1.9' - 'test-go-1.10' - - 'test-go-1.10-386' + - 'test-go-1.11' + - 'test-go-1.11-386' filters: tags: only: /.*/ @@ -128,14 +148,18 @@ workflows: - 'test-go-1.10': requires: - 'deps' - - 'test-go-1.10-386': + - 'test-go-1.11': + requires: + - 'deps' + - 'test-go-1.11-386': requires: - 'deps' - 'nightly': requires: - 'test-go-1.9' - 'test-go-1.10' - - 'test-go-1.10-386' + - 'test-go-1.11' + - 'test-go-1.11-386' triggers: - schedule: cron: "0 7 * * *" diff --git a/Makefile b/Makefile index d19d4d4cd..b55924e3f 100644 --- a/Makefile +++ b/Makefile @@ -131,6 +131,11 @@ plugin-%: @echo "Starting dev environment for $${$(@)} input plugin..." @docker-compose -f plugins/inputs/$${$(@)}/dev/docker-compose.yml up +.PHONY: ci-1.11 +ci-1.11: + docker build -t quay.io/influxdb/telegraf-ci:1.11.1 - < scripts/ci-1.11.docker + docker push quay.io/influxdb/telegraf-ci:1.11.1 + .PHONY: ci-1.10 ci-1.10: docker build -t quay.io/influxdb/telegraf-ci:1.10.4 - < scripts/ci-1.10.docker diff --git a/scripts/ci-1.11.docker b/scripts/ci-1.11.docker new file mode 100644 index 000000000..31ff34842 --- /dev/null +++ b/scripts/ci-1.11.docker @@ -0,0 +1,28 @@ +FROM golang:1.11.1 + +RUN chmod -R 755 "$GOPATH" + +RUN DEBIAN_FRONTEND=noninteractive \ + apt update && apt install -y --no-install-recommends \ + autoconf \ + git \ + libtool \ + locales \ + make \ + python-boto \ + rpm \ + ruby \ + ruby-dev \ + zip && \ + rm -rf /var/lib/apt/lists/* + +RUN ln -sf /usr/share/zoneinfo/Etc/UTC /etc/localtime +RUN locale-gen C.UTF-8 || true +ENV LANG=C.UTF-8 + +RUN gem install fpm + +RUN go get -d github.com/golang/dep && \ + cd src/github.com/golang/dep && \ + git checkout -q v0.5.0 && \ + go install -ldflags="-X main.version=v0.5.0" ./cmd/dep From 17360f079ce89b896f06a8c12222c441f0e8f131 Mon Sep 17 00:00:00 2001 From: Trevor Pounds Date: Fri, 19 Oct 2018 14:12:01 -0400 Subject: [PATCH 0303/1815] Fix spelling mistakes (#4888) --- plugins/aggregators/basicstats/basicstats_test.go | 4 ++-- plugins/aggregators/valuecounter/valuecounter.go | 4 ++-- plugins/inputs/procstat/native_finder_notwindows.go | 2 +- plugins/outputs/cratedb/cratedb.go | 4 ++-- plugins/outputs/graphite/graphite.go | 2 +- plugins/outputs/influxdb/http.go | 2 +- plugins/parsers/grok/parser_test.go | 2 +- plugins/processors/topk/topk.go | 4 ++-- plugins/serializers/splunkmetric/splunkmetric.go | 2 +- 9 files changed, 13 insertions(+), 13 deletions(-) diff --git a/plugins/aggregators/basicstats/basicstats_test.go b/plugins/aggregators/basicstats/basicstats_test.go index 5c55284de..040cb0b82 100644 --- a/plugins/aggregators/basicstats/basicstats_test.go +++ b/plugins/aggregators/basicstats/basicstats_test.go @@ -291,7 +291,7 @@ func TestBasicStatsWithOnlySum(t *testing.T) { } // Verify that sum doesn't suffer from floating point errors. Early -// implementations of sum were calulated from mean and count, which +// implementations of sum were calculated from mean and count, which // e.g. summed "1, 1, 5, 1" as "7.999999..." instead of 8. func TestBasicStatsWithOnlySumFloatingPointErrata(t *testing.T) { @@ -509,7 +509,7 @@ func TestBasicStatsWithUnknownStat(t *testing.T) { } // Test that if Stats isn't supplied, then we only do count, min, max, mean, -// stdev, and s2. We purposely exclude sum for backwards compatability, +// stdev, and s2. We purposely exclude sum for backwards compatibility, // otherwise user's working systems will suddenly (and surprisingly) start // capturing sum without their input. func TestBasicStatsWithDefaultStats(t *testing.T) { diff --git a/plugins/aggregators/valuecounter/valuecounter.go b/plugins/aggregators/valuecounter/valuecounter.go index c43b7723b..05f4945d2 100644 --- a/plugins/aggregators/valuecounter/valuecounter.go +++ b/plugins/aggregators/valuecounter/valuecounter.go @@ -20,7 +20,7 @@ type ValueCounter struct { Fields []string } -// NewValueCounter create a new aggregation plugin which counts the occurances +// NewValueCounter create a new aggregation plugin which counts the occurrences // of fields and emits the count. func NewValueCounter() telegraf.Aggregator { vc := &ValueCounter{} @@ -46,7 +46,7 @@ func (vc *ValueCounter) SampleConfig() string { // Description returns the description of the ValueCounter plugin func (vc *ValueCounter) Description() string { - return "Count the occurance of values in fields." + return "Count the occurrence of values in fields." } // Add is run on every metric which passes the plugin diff --git a/plugins/inputs/procstat/native_finder_notwindows.go b/plugins/inputs/procstat/native_finder_notwindows.go index 533b7333a..a1683aad3 100644 --- a/plugins/inputs/procstat/native_finder_notwindows.go +++ b/plugins/inputs/procstat/native_finder_notwindows.go @@ -33,7 +33,7 @@ func (pg *NativeFinder) Pattern(pattern string) ([]PID, error) { return pids, err } -//FullPattern matches on the command line when the proccess was executed +//FullPattern matches on the command line when the process was executed func (pg *NativeFinder) FullPattern(pattern string) ([]PID, error) { var pids []PID regxPattern, err := regexp.Compile(pattern) diff --git a/plugins/outputs/cratedb/cratedb.go b/plugins/outputs/cratedb/cratedb.go index 01213011f..bac1c730a 100644 --- a/plugins/outputs/cratedb/cratedb.go +++ b/plugins/outputs/cratedb/cratedb.go @@ -183,7 +183,7 @@ func escapeObject(m map[string]interface{}) (string, error) { return `{` + strings.Join(pairs, ", ") + `}`, nil } -// escapeString wraps s in the given quote string and replaces all occurences +// escapeString wraps s in the given quote string and replaces all occurrences // of it inside of s with a double quote. func escapeString(s string, quote string) string { return quote + strings.Replace(s, quote, quote+quote, -1) + quote @@ -191,7 +191,7 @@ func escapeString(s string, quote string) string { // hashID returns a cryptographic hash int64 hash that includes the metric name // and tags. It's used instead of m.HashID() because it's not considered stable -// and because a cryptogtaphic hash makes more sense for the use case of +// and because a cryptographic hash makes more sense for the use case of // deduplication. // [1] https://github.com/influxdata/telegraf/pull/3210#discussion_r148411201 func hashID(m telegraf.Metric) int64 { diff --git a/plugins/outputs/graphite/graphite.go b/plugins/outputs/graphite/graphite.go index c26c1587f..09cdbe080 100644 --- a/plugins/outputs/graphite/graphite.go +++ b/plugins/outputs/graphite/graphite.go @@ -173,7 +173,7 @@ func (g *Graphite) send(batch []byte) error { if _, e := g.conns[n].Write(batch); e != nil { // Error log.Println("E! Graphite Error: " + e.Error()) - // Close explicitely + // Close explicitly g.conns[n].Close() // Let's try the next one } else { diff --git a/plugins/outputs/influxdb/http.go b/plugins/outputs/influxdb/http.go index 236d04321..5a589dc0e 100644 --- a/plugins/outputs/influxdb/http.go +++ b/plugins/outputs/influxdb/http.go @@ -220,7 +220,7 @@ func (c *httpClient) Database() string { return c.database } -// CreateDatabase attemps to create a new database in the InfluxDB server. +// CreateDatabase attempts to create a new database in the InfluxDB server. // Note that some names are not allowed by the server, notably those with // non-printable characters or slashes. func (c *httpClient) CreateDatabase(ctx context.Context) error { diff --git a/plugins/parsers/grok/parser_test.go b/plugins/parsers/grok/parser_test.go index cda88ed3d..e3426b0fc 100644 --- a/plugins/parsers/grok/parser_test.go +++ b/plugins/parsers/grok/parser_test.go @@ -957,7 +957,7 @@ func TestReplaceTimestampComma(t *testing.T) { require.Equal(t, 2018, m.Time().Year()) require.Equal(t, 13, m.Time().Hour()) require.Equal(t, 34, m.Time().Second()) - //Convert Nanosecond to milisecond for compare + // Convert nanosecond to millisecond for compare require.Equal(t, 555, m.Time().Nanosecond()/1000000) } diff --git a/plugins/processors/topk/topk.go b/plugins/processors/topk/topk.go index 8a52fa8d4..ba5a0c783 100644 --- a/plugins/processors/topk/topk.go +++ b/plugins/processors/topk/topk.go @@ -209,7 +209,7 @@ func (t *TopK) Apply(in ...telegraf.Metric) []telegraf.Metric { // Add the metrics received to our internal cache for _, m := range in { - // Check if the metric has any of the fields over wich we are aggregating + // Check if the metric has any of the fields over which we are aggregating hasField := false for _, f := range t.Fields { if m.HasField(f) { @@ -279,7 +279,7 @@ func (t *TopK) push() []telegraf.Metric { // Sort the aggregations sortMetrics(aggregations, field, t.Bottomk) - // Create a one dimentional list with the top K metrics of each key + // Create a one dimensional list with the top K metrics of each key for i, ag := range aggregations[0:min(t.K, len(aggregations))] { // Check whether of not we need to add fields of tags to the selected metrics diff --git a/plugins/serializers/splunkmetric/splunkmetric.go b/plugins/serializers/splunkmetric/splunkmetric.go index 77de49ee0..01643e334 100644 --- a/plugins/serializers/splunkmetric/splunkmetric.go +++ b/plugins/serializers/splunkmetric/splunkmetric.go @@ -52,7 +52,7 @@ func (s *serializer) createObject(metric telegraf.Metric) (metricGroup []byte, e ** metric_name: The name of the metric ** _value: The value for the metric ** time: The timestamp for the metric - ** All other index fields become deminsions. + ** All other index fields become dimensions. */ type HECTimeSeries struct { Time float64 `json:"time"` From 589d0587f6bcb2ad749443a43157de10831af78d Mon Sep 17 00:00:00 2001 From: Samuel-BF <36996277+Samuel-BF@users.noreply.github.com> Date: Fri, 19 Oct 2018 20:17:18 +0200 Subject: [PATCH 0304/1815] Add ability to specify bytes options as strings with units (KB, MiB, ...) (#4852) --- internal/internal.go | 28 +++++++++++++++++++ internal/internal_test.go | 23 +++++++++++++++ plugins/inputs/filecount/README.md | 7 +++-- plugins/inputs/filecount/filecount.go | 19 +++++++------ plugins/inputs/filecount/filecount_test.go | 6 ++-- plugins/inputs/http_listener_v2/README.md | 4 +-- .../http_listener_v2/http_listener_v2.go | 14 +++++----- .../http_listener_v2/http_listener_v2_test.go | 7 +++-- .../inputs/influxdb_listener/http_listener.go | 24 ++++++++-------- .../influxdb_listener/http_listener_test.go | 9 +++--- plugins/inputs/socket_listener/README.md | 4 +-- .../inputs/socket_listener/socket_listener.go | 14 +++++----- .../socket_listener/socket_listener_test.go | 9 +++--- plugins/outputs/influxdb/README.md | 2 +- plugins/outputs/influxdb/influxdb.go | 6 ++-- plugins/outputs/influxdb/influxdb_test.go | 2 +- 16 files changed, 117 insertions(+), 61 deletions(-) diff --git a/internal/internal.go b/internal/internal.go index f6b85de84..567b0f773 100644 --- a/internal/internal.go +++ b/internal/internal.go @@ -16,6 +16,8 @@ import ( "syscall" "time" "unicode" + + "github.com/alecthomas/units" ) const alphanum string = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" @@ -36,6 +38,11 @@ type Duration struct { Duration time.Duration } +// Size just wraps an int64 +type Size struct { + Size int64 +} + // SetVersion sets the telegraf agent version func SetVersion(v string) error { if version != "" { @@ -85,6 +92,27 @@ func (d *Duration) UnmarshalTOML(b []byte) error { return nil } +func (s *Size) UnmarshalTOML(b []byte) error { + var err error + b = bytes.Trim(b, `'`) + + val, err := strconv.ParseInt(string(b), 10, 64) + if err == nil { + s.Size = val + return nil + } + uq, err := strconv.Unquote(string(b)) + if err != nil { + return err + } + val, err = units.ParseStrictBytes(uq) + if err != nil { + return err + } + s.Size = val + return nil +} + // ReadLines reads contents from a file and splits them by new lines. // A convenience wrapper to ReadLinesOffsetN(filename, 0, -1). func ReadLines(filename string) ([]string, error) { diff --git a/internal/internal_test.go b/internal/internal_test.go index 486c3d744..89ee06903 100644 --- a/internal/internal_test.go +++ b/internal/internal_test.go @@ -166,6 +166,29 @@ func TestDuration(t *testing.T) { assert.Equal(t, time.Second, d.Duration) } +func TestSize(t *testing.T) { + var s Size + + s.UnmarshalTOML([]byte(`"1B"`)) + assert.Equal(t, int64(1), s.Size) + + s = Size{} + s.UnmarshalTOML([]byte(`1`)) + assert.Equal(t, int64(1), s.Size) + + s = Size{} + s.UnmarshalTOML([]byte(`'1'`)) + assert.Equal(t, int64(1), s.Size) + + s = Size{} + s.UnmarshalTOML([]byte(`"1GB"`)) + assert.Equal(t, int64(1000*1000*1000), s.Size) + + s = Size{} + s.UnmarshalTOML([]byte(`"12GiB"`)) + assert.Equal(t, int64(12*1024*1024*1024), s.Size) +} + func TestCompressWithGzip(t *testing.T) { testData := "the quick brown fox jumps over the lazy dog" inputBuffer := bytes.NewBuffer([]byte(testData)) diff --git a/plugins/inputs/filecount/README.md b/plugins/inputs/filecount/README.md index ccec532aa..cf11b7d90 100644 --- a/plugins/inputs/filecount/README.md +++ b/plugins/inputs/filecount/README.md @@ -19,10 +19,11 @@ Counts files in directories that match certain criteria. ## Only count regular files. Defaults to true. regular_only = true - ## Only count files that are at least this size in bytes. If size is + ## Only count files that are at least this size. If size is ## a negative number, only count files that are smaller than the - ## absolute value of size. Defaults to 0. - size = 0 + ## absolute value of size. Acceptable units are B, KiB, MiB, KB, ... + ## Without quotes and units, interpreted as size in bytes. + size = "0B" ## Only count files that have not been touched for at least this ## duration. If mtime is negative, only count files that have been diff --git a/plugins/inputs/filecount/filecount.go b/plugins/inputs/filecount/filecount.go index 66d5a33fe..d613f3b77 100644 --- a/plugins/inputs/filecount/filecount.go +++ b/plugins/inputs/filecount/filecount.go @@ -34,10 +34,11 @@ const sampleConfig = ` ## Only count regular files. Defaults to true. regular_only = true - ## Only count files that are at least this size in bytes. If size is + ## Only count files that are at least this size. If size is ## a negative number, only count files that are smaller than the - ## absolute value of size. Defaults to 0. - size = 0 + ## absolute value of size. Acceptable units are B, KiB, MiB, KB, ... + ## Without quotes and units, interpreted as size in bytes. + size = "0B" ## Only count files that have not been touched for at least this ## duration. If mtime is negative, only count files that have been @@ -51,7 +52,7 @@ type FileCount struct { Name string Recursive bool RegularOnly bool - Size int64 + Size internal.Size MTime internal.Duration `toml:"mtime"` fileFilters []fileFilterFunc } @@ -99,7 +100,7 @@ func (fc *FileCount) regularOnlyFilter() fileFilterFunc { } func (fc *FileCount) sizeFilter() fileFilterFunc { - if fc.Size == 0 { + if fc.Size.Size == 0 { return nil } @@ -107,10 +108,10 @@ func (fc *FileCount) sizeFilter() fileFilterFunc { if !f.Mode().IsRegular() { return false, nil } - if fc.Size < 0 { - return f.Size() < -fc.Size, nil + if fc.Size.Size < 0 { + return f.Size() < -fc.Size.Size, nil } - return f.Size() >= fc.Size, nil + return f.Size() >= fc.Size.Size, nil } } @@ -257,7 +258,7 @@ func NewFileCount() *FileCount { Name: "*", Recursive: true, RegularOnly: true, - Size: 0, + Size: internal.Size{Size: 0}, MTime: internal.Duration{Duration: 0}, fileFilters: nil, } diff --git a/plugins/inputs/filecount/filecount_test.go b/plugins/inputs/filecount/filecount_test.go index 16bb83de5..7a48c2166 100644 --- a/plugins/inputs/filecount/filecount_test.go +++ b/plugins/inputs/filecount/filecount_test.go @@ -70,7 +70,7 @@ func TestRegularOnlyFilter(t *testing.T) { func TestSizeFilter(t *testing.T) { fc := getNoFilterFileCount("testdata") - fc.Size = -100 + fc.Size = internal.Size{Size: -100} matches := []string{"foo", "bar", "baz", "subdir/quux", "subdir/quuz"} acc := testutil.Accumulator{} @@ -78,7 +78,7 @@ func TestSizeFilter(t *testing.T) { require.True(t, assertFileCount(&acc, "testdata", len(matches))) - fc.Size = 100 + fc.Size = internal.Size{Size: 100} matches = []string{"qux"} acc = testutil.Accumulator{} @@ -119,7 +119,7 @@ func getNoFilterFileCount(dir string) FileCount { Name: "*", Recursive: true, RegularOnly: false, - Size: 0, + Size: internal.Size{Size: 0}, MTime: internal.Duration{Duration: 0}, fileFilters: nil, } diff --git a/plugins/inputs/http_listener_v2/README.md b/plugins/inputs/http_listener_v2/README.md index 6d5d25aa4..f5a853189 100644 --- a/plugins/inputs/http_listener_v2/README.md +++ b/plugins/inputs/http_listener_v2/README.md @@ -28,8 +28,8 @@ This is a sample configuration for the plugin. # write_timeout = "10s" ## Maximum allowed http request body size in bytes. - ## 0 means to use the default of 536,870,912 bytes (500 mebibytes) - # max_body_size = 0 + ## 0 means to use the default of 524,288,000 bytes (500 mebibytes) + # max_body_size = "500MB" ## Set one or more allowed client CA certificate file names to ## enable mutually authenticated TLS connections diff --git a/plugins/inputs/http_listener_v2/http_listener_v2.go b/plugins/inputs/http_listener_v2/http_listener_v2.go index 871d1080b..3fd8989f9 100644 --- a/plugins/inputs/http_listener_v2/http_listener_v2.go +++ b/plugins/inputs/http_listener_v2/http_listener_v2.go @@ -31,7 +31,7 @@ type HTTPListenerV2 struct { Methods []string ReadTimeout internal.Duration WriteTimeout internal.Duration - MaxBodySize int64 + MaxBodySize internal.Size Port int tlsint.ServerConfig @@ -65,8 +65,8 @@ const sampleConfig = ` # write_timeout = "10s" ## Maximum allowed http request body size in bytes. - ## 0 means to use the default of 536,870,912 bytes (500 mebibytes) - # max_body_size = 0 + ## 0 means to use the default of 524,288,00 bytes (500 mebibytes) + # max_body_size = "500MB" ## Set one or more allowed client CA certificate file names to ## enable mutually authenticated TLS connections @@ -106,8 +106,8 @@ func (h *HTTPListenerV2) SetParser(parser parsers.Parser) { // Start starts the http listener service. func (h *HTTPListenerV2) Start(acc telegraf.Accumulator) error { - if h.MaxBodySize == 0 { - h.MaxBodySize = defaultMaxBodySize + if h.MaxBodySize.Size == 0 { + h.MaxBodySize.Size = defaultMaxBodySize } if h.ReadTimeout.Duration < time.Second { @@ -173,7 +173,7 @@ func (h *HTTPListenerV2) ServeHTTP(res http.ResponseWriter, req *http.Request) { func (h *HTTPListenerV2) serveWrite(res http.ResponseWriter, req *http.Request) { // Check that the content length is not too large for us to handle. - if req.ContentLength > h.MaxBodySize { + if req.ContentLength > h.MaxBodySize.Size { tooLarge(res) return } @@ -204,7 +204,7 @@ func (h *HTTPListenerV2) serveWrite(res http.ResponseWriter, req *http.Request) defer body.Close() } - body = http.MaxBytesReader(res, body, h.MaxBodySize) + body = http.MaxBytesReader(res, body, h.MaxBodySize.Size) bytes, err := ioutil.ReadAll(body) if err != nil { tooLarge(res) diff --git a/plugins/inputs/http_listener_v2/http_listener_v2_test.go b/plugins/inputs/http_listener_v2/http_listener_v2_test.go index 3287ea59e..ab0c89f81 100644 --- a/plugins/inputs/http_listener_v2/http_listener_v2_test.go +++ b/plugins/inputs/http_listener_v2/http_listener_v2_test.go @@ -13,6 +13,7 @@ import ( "testing" "time" + "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/testutil" @@ -51,7 +52,7 @@ func newTestHTTPListenerV2() *HTTPListenerV2 { Methods: []string{"POST"}, Parser: parser, TimeFunc: time.Now, - MaxBodySize: 70000, + MaxBodySize: internal.Size{Size: 70000}, } return listener } @@ -234,7 +235,7 @@ func TestWriteHTTPExactMaxBodySize(t *testing.T) { Path: "/write", Methods: []string{"POST"}, Parser: parser, - MaxBodySize: int64(len(hugeMetric)), + MaxBodySize: internal.Size{Size: int64(len(hugeMetric))}, TimeFunc: time.Now, } @@ -256,7 +257,7 @@ func TestWriteHTTPVerySmallMaxBody(t *testing.T) { Path: "/write", Methods: []string{"POST"}, Parser: parser, - MaxBodySize: 4096, + MaxBodySize: internal.Size{Size: 4096}, TimeFunc: time.Now, } diff --git a/plugins/inputs/influxdb_listener/http_listener.go b/plugins/inputs/influxdb_listener/http_listener.go index 29beff9a8..b8abeecd7 100644 --- a/plugins/inputs/influxdb_listener/http_listener.go +++ b/plugins/inputs/influxdb_listener/http_listener.go @@ -39,8 +39,8 @@ type HTTPListener struct { ServiceAddress string ReadTimeout internal.Duration WriteTimeout internal.Duration - MaxBodySize int64 - MaxLineSize int + MaxBodySize internal.Size + MaxLineSize internal.Size Port int tlsint.ServerConfig @@ -84,12 +84,12 @@ const sampleConfig = ` write_timeout = "10s" ## Maximum allowed http request body size in bytes. - ## 0 means to use the default of 536,870,912 bytes (500 mebibytes) - max_body_size = 0 + ## 0 means to use the default of 524,288,000 bytes (500 mebibytes) + max_body_size = "500MiB" ## Maximum line size allowed to be sent in bytes. ## 0 means to use the default of 65536 bytes (64 kibibytes) - max_line_size = 0 + max_line_size = "64KiB" ## Set one or more allowed client CA certificate file names to ## enable mutually authenticated TLS connections @@ -139,11 +139,11 @@ func (h *HTTPListener) Start(acc telegraf.Accumulator) error { h.BuffersCreated = selfstat.Register("http_listener", "buffers_created", tags) h.AuthFailures = selfstat.Register("http_listener", "auth_failures", tags) - if h.MaxBodySize == 0 { - h.MaxBodySize = DEFAULT_MAX_BODY_SIZE + if h.MaxBodySize.Size == 0 { + h.MaxBodySize.Size = DEFAULT_MAX_BODY_SIZE } - if h.MaxLineSize == 0 { - h.MaxLineSize = DEFAULT_MAX_LINE_SIZE + if h.MaxLineSize.Size == 0 { + h.MaxLineSize.Size = DEFAULT_MAX_LINE_SIZE } if h.ReadTimeout.Duration < time.Second { @@ -154,7 +154,7 @@ func (h *HTTPListener) Start(acc telegraf.Accumulator) error { } h.acc = acc - h.pool = NewPool(200, h.MaxLineSize) + h.pool = NewPool(200, int(h.MaxLineSize.Size)) tlsConf, err := h.ServerConfig.TLSConfig() if err != nil { @@ -241,7 +241,7 @@ func (h *HTTPListener) ServeHTTP(res http.ResponseWriter, req *http.Request) { func (h *HTTPListener) serveWrite(res http.ResponseWriter, req *http.Request) { // Check that the content length is not too large for us to handle. - if req.ContentLength > h.MaxBodySize { + if req.ContentLength > h.MaxBodySize.Size { tooLarge(res) return } @@ -261,7 +261,7 @@ func (h *HTTPListener) serveWrite(res http.ResponseWriter, req *http.Request) { return } } - body = http.MaxBytesReader(res, body, h.MaxBodySize) + body = http.MaxBytesReader(res, body, h.MaxBodySize.Size) var return400 bool var hangingBytes bool diff --git a/plugins/inputs/influxdb_listener/http_listener_test.go b/plugins/inputs/influxdb_listener/http_listener_test.go index 3277e5344..964295061 100644 --- a/plugins/inputs/influxdb_listener/http_listener_test.go +++ b/plugins/inputs/influxdb_listener/http_listener_test.go @@ -13,6 +13,7 @@ import ( "testing" "time" + "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" @@ -217,7 +218,7 @@ func TestWriteHTTPNoNewline(t *testing.T) { func TestWriteHTTPMaxLineSizeIncrease(t *testing.T) { listener := &HTTPListener{ ServiceAddress: "localhost:0", - MaxLineSize: 128 * 1000, + MaxLineSize: internal.Size{Size: 128 * 1000}, TimeFunc: time.Now, } @@ -235,7 +236,7 @@ func TestWriteHTTPMaxLineSizeIncrease(t *testing.T) { func TestWriteHTTPVerySmallMaxBody(t *testing.T) { listener := &HTTPListener{ ServiceAddress: "localhost:0", - MaxBodySize: 4096, + MaxBodySize: internal.Size{Size: 4096}, TimeFunc: time.Now, } @@ -252,7 +253,7 @@ func TestWriteHTTPVerySmallMaxBody(t *testing.T) { func TestWriteHTTPVerySmallMaxLineSize(t *testing.T) { listener := &HTTPListener{ ServiceAddress: "localhost:0", - MaxLineSize: 70, + MaxLineSize: internal.Size{Size: 70}, TimeFunc: time.Now, } @@ -279,7 +280,7 @@ func TestWriteHTTPVerySmallMaxLineSize(t *testing.T) { func TestWriteHTTPLargeLinesSkipped(t *testing.T) { listener := &HTTPListener{ ServiceAddress: "localhost:0", - MaxLineSize: 100, + MaxLineSize: internal.Size{Size: 100}, TimeFunc: time.Now, } diff --git a/plugins/inputs/socket_listener/README.md b/plugins/inputs/socket_listener/README.md index ff73b1fbb..2f1a0572e 100644 --- a/plugins/inputs/socket_listener/README.md +++ b/plugins/inputs/socket_listener/README.md @@ -42,11 +42,11 @@ This is a sample configuration for the plugin. ## Enables client authentication if set. # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] - ## Maximum socket buffer size in bytes. + ## Maximum socket buffer size (in bytes when no unit specified). ## For stream sockets, once the buffer fills up, the sender will start backing up. ## For datagram sockets, once the buffer fills up, metrics will start dropping. ## Defaults to the OS default. - # read_buffer_size = 65535 + # read_buffer_size = "64KiB" ## Period between keep alive probes. ## Only applies to TCP sockets. diff --git a/plugins/inputs/socket_listener/socket_listener.go b/plugins/inputs/socket_listener/socket_listener.go index daab84952..73c321f81 100644 --- a/plugins/inputs/socket_listener/socket_listener.go +++ b/plugins/inputs/socket_listener/socket_listener.go @@ -47,9 +47,9 @@ func (ssl *streamSocketListener) listen() { break } - if ssl.ReadBufferSize > 0 { + if ssl.ReadBufferSize.Size > 0 { if srb, ok := c.(setReadBufferer); ok { - srb.SetReadBuffer(ssl.ReadBufferSize) + srb.SetReadBuffer(int(ssl.ReadBufferSize.Size)) } else { log.Printf("W! Unable to set read buffer on a %s socket", ssl.sockType) } @@ -164,7 +164,7 @@ func (psl *packetSocketListener) listen() { type SocketListener struct { ServiceAddress string `toml:"service_address"` MaxConnections int `toml:"max_connections"` - ReadBufferSize int `toml:"read_buffer_size"` + ReadBufferSize internal.Size `toml:"read_buffer_size"` ReadTimeout *internal.Duration `toml:"read_timeout"` KeepAlivePeriod *internal.Duration `toml:"keep_alive_period"` tlsint.ServerConfig @@ -209,11 +209,11 @@ func (sl *SocketListener) SampleConfig() string { ## Enables client authentication if set. # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] - ## Maximum socket buffer size in bytes. + ## Maximum socket buffer size (in bytes when no unit specified). ## For stream sockets, once the buffer fills up, the sender will start backing up. ## For datagram sockets, once the buffer fills up, metrics will start dropping. ## Defaults to the OS default. - # read_buffer_size = 65535 + # read_buffer_size = "64KiB" ## Period between keep alive probes. ## Only applies to TCP sockets. @@ -286,9 +286,9 @@ func (sl *SocketListener) Start(acc telegraf.Accumulator) error { return err } - if sl.ReadBufferSize > 0 { + if sl.ReadBufferSize.Size > 0 { if srb, ok := pc.(setReadBufferer); ok { - srb.SetReadBuffer(sl.ReadBufferSize) + srb.SetReadBuffer(int(sl.ReadBufferSize.Size)) } else { log.Printf("W! Unable to set read buffer on a %s socket", spl[0]) } diff --git a/plugins/inputs/socket_listener/socket_listener_test.go b/plugins/inputs/socket_listener/socket_listener_test.go index 26691ef54..ae7fef8b9 100644 --- a/plugins/inputs/socket_listener/socket_listener_test.go +++ b/plugins/inputs/socket_listener/socket_listener_test.go @@ -11,6 +11,7 @@ import ( "testing" "time" + "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -81,7 +82,7 @@ func TestSocketListener_tcp(t *testing.T) { sl := newSocketListener() sl.ServiceAddress = "tcp://127.0.0.1:0" - sl.ReadBufferSize = 1024 + sl.ReadBufferSize = internal.Size{Size: 1024} acc := &testutil.Accumulator{} err := sl.Start(acc) @@ -99,7 +100,7 @@ func TestSocketListener_udp(t *testing.T) { sl := newSocketListener() sl.ServiceAddress = "udp://127.0.0.1:0" - sl.ReadBufferSize = 1024 + sl.ReadBufferSize = internal.Size{Size: 1024} acc := &testutil.Accumulator{} err := sl.Start(acc) @@ -123,7 +124,7 @@ func TestSocketListener_unix(t *testing.T) { os.Create(sock) sl := newSocketListener() sl.ServiceAddress = "unix://" + sock - sl.ReadBufferSize = 1024 + sl.ReadBufferSize = internal.Size{Size: 1024} acc := &testutil.Accumulator{} err = sl.Start(acc) @@ -147,7 +148,7 @@ func TestSocketListener_unixgram(t *testing.T) { os.Create(sock) sl := newSocketListener() sl.ServiceAddress = "unixgram://" + sock - sl.ReadBufferSize = 1024 + sl.ReadBufferSize = internal.Size{Size: 1024} acc := &testutil.Accumulator{} err = sl.Start(acc) diff --git a/plugins/outputs/influxdb/README.md b/plugins/outputs/influxdb/README.md index e9b3b0346..5d223ca3d 100644 --- a/plugins/outputs/influxdb/README.md +++ b/plugins/outputs/influxdb/README.md @@ -42,7 +42,7 @@ The InfluxDB output plugin writes metrics to the [InfluxDB v1.x] HTTP or UDP ser # user_agent = "telegraf" ## UDP payload size is the maximum packet size to send. - # udp_payload = 512 + # udp_payload = "512B" ## Optional TLS Config for use on HTTP connections. # tls_ca = "/etc/telegraf/ca.pem" diff --git a/plugins/outputs/influxdb/influxdb.go b/plugins/outputs/influxdb/influxdb.go index 06079dfc5..1f61b801f 100644 --- a/plugins/outputs/influxdb/influxdb.go +++ b/plugins/outputs/influxdb/influxdb.go @@ -41,7 +41,7 @@ type InfluxDB struct { RetentionPolicy string WriteConsistency string Timeout internal.Duration - UDPPayload int `toml:"udp_payload"` + UDPPayload internal.Size `toml:"udp_payload"` HTTPProxy string `toml:"http_proxy"` HTTPHeaders map[string]string `toml:"http_headers"` ContentEncoding string `toml:"content_encoding"` @@ -95,7 +95,7 @@ var sampleConfig = ` # user_agent = "telegraf" ## UDP payload size is the maximum packet size to send. - # udp_payload = 512 + # udp_payload = "512B" ## Optional TLS Config for use on HTTP connections. # tls_ca = "/etc/telegraf/ca.pem" @@ -225,7 +225,7 @@ func (i *InfluxDB) Write(metrics []telegraf.Metric) error { func (i *InfluxDB) udpClient(url *url.URL) (Client, error) { config := &UDPConfig{ URL: url, - MaxPayloadSize: i.UDPPayload, + MaxPayloadSize: int(i.UDPPayload.Size), Serializer: i.serializer, } diff --git a/plugins/outputs/influxdb/influxdb_test.go b/plugins/outputs/influxdb/influxdb_test.go index 3ec10989e..63ecc47be 100644 --- a/plugins/outputs/influxdb/influxdb_test.go +++ b/plugins/outputs/influxdb/influxdb_test.go @@ -74,7 +74,7 @@ func TestConnectUDPConfig(t *testing.T) { output := influxdb.InfluxDB{ URLs: []string{"udp://localhost:8089"}, - UDPPayload: 42, + UDPPayload: internal.Size{Size: 42}, CreateUDPClientF: func(config *influxdb.UDPConfig) (influxdb.Client, error) { actual = config From 4a311830c6a450b52944c2073c50e95a5f53b609 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 19 Oct 2018 11:18:30 -0700 Subject: [PATCH 0305/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 95b36d35d..3cf3cac6a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -30,6 +30,7 @@ - [#4807](https://github.com/influxdata/telegraf/pull/4807): Add entity-body compression to http output. - [#4838](https://github.com/influxdata/telegraf/pull/4838): Add telegraf version to User-Agent header. - [#4864](https://github.com/influxdata/telegraf/pull/4864): Use DescribeStreamSummary in place of ListStreams in kinesis output. +- [#4852](https://github.com/influxdata/telegraf/pull/4852): Add ability to specify bytes options as strings with units. ## v1.8.3 [unreleased] From ee056278f5e5860c252be4731280f75c91a40bc0 Mon Sep 17 00:00:00 2001 From: Trevor Pounds Date: Fri, 19 Oct 2018 16:32:54 -0400 Subject: [PATCH 0306/1815] Enable gofmt code simplification (#4887) --- Makefile | 4 +- cmd/telegraf/telegraf.go | 4 +- internal/config/config_test.go | 12 +- internal/models/filter.go | 4 +- internal/models/filter_test.go | 64 +-- plugins/inputs/bcache/bcache.go | 2 +- plugins/inputs/ceph/ceph.go | 2 +- plugins/inputs/ceph/ceph_test.go | 10 +- plugins/inputs/cgroup/cgroup_linux.go | 8 +- plugins/inputs/cloudwatch/cloudwatch_test.go | 14 +- plugins/inputs/consul/consul_test.go | 2 +- plugins/inputs/cpu/cpu_test.go | 6 +- plugins/inputs/dcos/client_test.go | 4 +- plugins/inputs/dcos/dcos_test.go | 4 +- plugins/inputs/disk/disk_test.go | 6 +- plugins/inputs/diskio/diskio_test.go | 10 +- plugins/inputs/docker/docker_test.go | 12 +- plugins/inputs/docker/docker_testdata.go | 40 +- plugins/inputs/graylog/graylog_test.go | 2 +- plugins/inputs/hddtemp/hddtemp_test.go | 4 +- plugins/inputs/httpjson/httpjson_test.go | 4 +- plugins/inputs/interrupts/interrupts.go | 2 +- plugins/inputs/interrupts/interrupts_test.go | 58 +-- plugins/inputs/ipmi_sensor/connection.go | 2 +- plugins/inputs/ipset/ipset_test.go | 8 +- plugins/inputs/iptables/iptables_test.go | 16 +- plugins/inputs/jolokia/jolokia_test.go | 2 +- plugins/inputs/jolokia2/gatherer_test.go | 14 +- plugins/inputs/jolokia2/jolokia_test.go | 2 +- plugins/inputs/mesos/mesos.go | 4 +- plugins/inputs/mongodb/mongodb_data_test.go | 12 +- plugins/inputs/mongodb/mongodb_server_test.go | 2 +- plugins/inputs/net/net_test.go | 10 +- .../inputs/nsq_consumer/nsq_consumer_test.go | 8 +- plugins/inputs/nvidia_smi/nvidia_smi.go | 26 +- plugins/inputs/pf/pf.go | 42 +- plugins/inputs/pf/pf_test.go | 18 +- plugins/inputs/snmp/snmp_mocks_test.go | 44 +- plugins/inputs/snmp/snmp_test.go | 4 +- plugins/inputs/statsd/statsd_test.go | 8 +- plugins/inputs/syslog/rfc5425_test.go | 32 +- plugins/inputs/tail/tail.go | 2 +- plugins/inputs/varnish/varnish_test.go | 6 +- plugins/inputs/zipkin/codec/codec_test.go | 4 +- .../inputs/zipkin/codec/thrift/thrift_test.go | 12 +- plugins/inputs/zipkin/convert_test.go | 22 +- plugins/inputs/zipkin/zipkin_test.go | 32 +- .../outputs/azure_monitor/azure_monitor.go | 2 +- plugins/outputs/cloudwatch/cloudwatch_test.go | 2 +- plugins/outputs/cratedb/cratedb.go | 2 +- plugins/outputs/datadog/datadog_test.go | 6 +- plugins/outputs/influxdb/udp_test.go | 2 +- .../prometheus_client/prometheus_client.go | 6 +- plugins/parsers/collectd/parser_test.go | 10 +- plugins/parsers/influx/machine_test.go | 394 +++++++++--------- plugins/processors/strings/strings_test.go | 54 +-- plugins/processors/topk/topk.go | 2 +- plugins/processors/topk/topk_test.go | 106 ++--- testutil/accumulator.go | 2 +- 59 files changed, 599 insertions(+), 599 deletions(-) diff --git a/Makefile b/Makefile index b55924e3f..9c7fe1cae 100644 --- a/Makefile +++ b/Makefile @@ -12,7 +12,7 @@ PREFIX := /usr/local BRANCH := $(shell git rev-parse --abbrev-ref HEAD) COMMIT := $(shell git rev-parse --short HEAD) GOFILES ?= $(shell git ls-files '*.go') -GOFMT ?= $(shell gofmt -l $(filter-out plugins/parsers/influx/machine.go, $(GOFILES))) +GOFMT ?= $(shell gofmt -l -s $(filter-out plugins/parsers/influx/machine.go, $(GOFILES))) BUILDFLAGS ?= ifdef GOBIN @@ -55,7 +55,7 @@ test: .PHONY: fmt fmt: - @gofmt -w $(filter-out plugins/parsers/influx/machine.go, $(GOFILES)) + @gofmt -s -w $(filter-out plugins/parsers/influx/machine.go, $(GOFILES)) .PHONY: fmtcheck fmtcheck: diff --git a/cmd/telegraf/telegraf.go b/cmd/telegraf/telegraf.go index 02f22353b..7c451c2db 100644 --- a/cmd/telegraf/telegraf.go +++ b/cmd/telegraf/telegraf.go @@ -296,13 +296,13 @@ func main() { switch { case *fOutputList: fmt.Println("Available Output Plugins:") - for k, _ := range outputs.Outputs { + for k := range outputs.Outputs { fmt.Printf(" %s\n", k) } return case *fInputList: fmt.Println("Available Input Plugins:") - for k, _ := range inputs.Inputs { + for k := range inputs.Inputs { fmt.Printf(" %s\n", k) } return diff --git a/internal/config/config_test.go b/internal/config/config_test.go index b136fec8c..cd7d2301c 100644 --- a/internal/config/config_test.go +++ b/internal/config/config_test.go @@ -32,13 +32,13 @@ func TestConfig_LoadSingleInputWithEnvVars(t *testing.T) { FieldDrop: []string{"other", "stuff"}, FieldPass: []string{"some", "strings"}, TagDrop: []models.TagFilter{ - models.TagFilter{ + { Name: "badtag", Filter: []string{"othertag"}, }, }, TagPass: []models.TagFilter{ - models.TagFilter{ + { Name: "goodtag", Filter: []string{"mytag"}, }, @@ -71,13 +71,13 @@ func TestConfig_LoadSingleInput(t *testing.T) { FieldDrop: []string{"other", "stuff"}, FieldPass: []string{"some", "strings"}, TagDrop: []models.TagFilter{ - models.TagFilter{ + { Name: "badtag", Filter: []string{"othertag"}, }, }, TagPass: []models.TagFilter{ - models.TagFilter{ + { Name: "goodtag", Filter: []string{"mytag"}, }, @@ -117,13 +117,13 @@ func TestConfig_LoadDirectory(t *testing.T) { FieldDrop: []string{"other", "stuff"}, FieldPass: []string{"some", "strings"}, TagDrop: []models.TagFilter{ - models.TagFilter{ + { Name: "badtag", Filter: []string{"othertag"}, }, }, TagPass: []models.TagFilter{ - models.TagFilter{ + { Name: "goodtag", Filter: []string{"mytag"}, }, diff --git a/internal/models/filter.go b/internal/models/filter.go index 664a6ff06..13627daad 100644 --- a/internal/models/filter.go +++ b/internal/models/filter.go @@ -79,13 +79,13 @@ func (f *Filter) Compile() error { return fmt.Errorf("Error compiling 'taginclude', %s", err) } - for i, _ := range f.TagDrop { + for i := range f.TagDrop { f.TagDrop[i].filter, err = filter.Compile(f.TagDrop[i].Filter) if err != nil { return fmt.Errorf("Error compiling 'tagdrop', %s", err) } } - for i, _ := range f.TagPass { + for i := range f.TagPass { f.TagPass[i].filter, err = filter.Compile(f.TagPass[i].Filter) if err != nil { return fmt.Errorf("Error compiling 'tagpass', %s", err) diff --git a/internal/models/filter_test.go b/internal/models/filter_test.go index 16a147cad..eb208f7c3 100644 --- a/internal/models/filter_test.go +++ b/internal/models/filter_test.go @@ -24,7 +24,7 @@ func TestFilter_ApplyEmpty(t *testing.T) { func TestFilter_ApplyTagsDontPass(t *testing.T) { filters := []TagFilter{ - TagFilter{ + { Name: "cpu", Filter: []string{"cpu-*"}, }, @@ -244,11 +244,11 @@ func TestFilter_FieldDrop(t *testing.T) { func TestFilter_TagPass(t *testing.T) { filters := []TagFilter{ - TagFilter{ + { Name: "cpu", Filter: []string{"cpu-*"}, }, - TagFilter{ + { Name: "mem", Filter: []string{"mem_free"}, }} @@ -258,19 +258,19 @@ func TestFilter_TagPass(t *testing.T) { require.NoError(t, f.Compile()) passes := [][]*telegraf.Tag{ - []*telegraf.Tag{&telegraf.Tag{Key: "cpu", Value: "cpu-total"}}, - []*telegraf.Tag{&telegraf.Tag{Key: "cpu", Value: "cpu-0"}}, - []*telegraf.Tag{&telegraf.Tag{Key: "cpu", Value: "cpu-1"}}, - []*telegraf.Tag{&telegraf.Tag{Key: "cpu", Value: "cpu-2"}}, - []*telegraf.Tag{&telegraf.Tag{Key: "mem", Value: "mem_free"}}, + {{Key: "cpu", Value: "cpu-total"}}, + {{Key: "cpu", Value: "cpu-0"}}, + {{Key: "cpu", Value: "cpu-1"}}, + {{Key: "cpu", Value: "cpu-2"}}, + {{Key: "mem", Value: "mem_free"}}, } drops := [][]*telegraf.Tag{ - []*telegraf.Tag{&telegraf.Tag{Key: "cpu", Value: "cputotal"}}, - []*telegraf.Tag{&telegraf.Tag{Key: "cpu", Value: "cpu0"}}, - []*telegraf.Tag{&telegraf.Tag{Key: "cpu", Value: "cpu1"}}, - []*telegraf.Tag{&telegraf.Tag{Key: "cpu", Value: "cpu2"}}, - []*telegraf.Tag{&telegraf.Tag{Key: "mem", Value: "mem_used"}}, + {{Key: "cpu", Value: "cputotal"}}, + {{Key: "cpu", Value: "cpu0"}}, + {{Key: "cpu", Value: "cpu1"}}, + {{Key: "cpu", Value: "cpu2"}}, + {{Key: "mem", Value: "mem_used"}}, } for _, tags := range passes { @@ -288,11 +288,11 @@ func TestFilter_TagPass(t *testing.T) { func TestFilter_TagDrop(t *testing.T) { filters := []TagFilter{ - TagFilter{ + { Name: "cpu", Filter: []string{"cpu-*"}, }, - TagFilter{ + { Name: "mem", Filter: []string{"mem_free"}, }} @@ -302,19 +302,19 @@ func TestFilter_TagDrop(t *testing.T) { require.NoError(t, f.Compile()) drops := [][]*telegraf.Tag{ - []*telegraf.Tag{&telegraf.Tag{Key: "cpu", Value: "cpu-total"}}, - []*telegraf.Tag{&telegraf.Tag{Key: "cpu", Value: "cpu-0"}}, - []*telegraf.Tag{&telegraf.Tag{Key: "cpu", Value: "cpu-1"}}, - []*telegraf.Tag{&telegraf.Tag{Key: "cpu", Value: "cpu-2"}}, - []*telegraf.Tag{&telegraf.Tag{Key: "mem", Value: "mem_free"}}, + {{Key: "cpu", Value: "cpu-total"}}, + {{Key: "cpu", Value: "cpu-0"}}, + {{Key: "cpu", Value: "cpu-1"}}, + {{Key: "cpu", Value: "cpu-2"}}, + {{Key: "mem", Value: "mem_free"}}, } passes := [][]*telegraf.Tag{ - []*telegraf.Tag{&telegraf.Tag{Key: "cpu", Value: "cputotal"}}, - []*telegraf.Tag{&telegraf.Tag{Key: "cpu", Value: "cpu0"}}, - []*telegraf.Tag{&telegraf.Tag{Key: "cpu", Value: "cpu1"}}, - []*telegraf.Tag{&telegraf.Tag{Key: "cpu", Value: "cpu2"}}, - []*telegraf.Tag{&telegraf.Tag{Key: "mem", Value: "mem_used"}}, + {{Key: "cpu", Value: "cputotal"}}, + {{Key: "cpu", Value: "cpu0"}}, + {{Key: "cpu", Value: "cpu1"}}, + {{Key: "cpu", Value: "cpu2"}}, + {{Key: "mem", Value: "mem_used"}}, } for _, tags := range passes { @@ -442,27 +442,27 @@ func TestFilter_FilterFieldPassAndDrop(t *testing.T) { // see: https://github.com/influxdata/telegraf/issues/2860 func TestFilter_FilterTagsPassAndDrop(t *testing.T) { inputData := [][]*telegraf.Tag{ - []*telegraf.Tag{&telegraf.Tag{Key: "tag1", Value: "1"}, &telegraf.Tag{Key: "tag2", Value: "3"}}, - []*telegraf.Tag{&telegraf.Tag{Key: "tag1", Value: "1"}, &telegraf.Tag{Key: "tag2", Value: "2"}}, - []*telegraf.Tag{&telegraf.Tag{Key: "tag1", Value: "2"}, &telegraf.Tag{Key: "tag2", Value: "1"}}, - []*telegraf.Tag{&telegraf.Tag{Key: "tag1", Value: "4"}, &telegraf.Tag{Key: "tag2", Value: "1"}}, + {{Key: "tag1", Value: "1"}, {Key: "tag2", Value: "3"}}, + {{Key: "tag1", Value: "1"}, {Key: "tag2", Value: "2"}}, + {{Key: "tag1", Value: "2"}, {Key: "tag2", Value: "1"}}, + {{Key: "tag1", Value: "4"}, {Key: "tag2", Value: "1"}}, } expectedResult := []bool{false, true, false, false} filterPass := []TagFilter{ - TagFilter{ + { Name: "tag1", Filter: []string{"1", "4"}, }, } filterDrop := []TagFilter{ - TagFilter{ + { Name: "tag1", Filter: []string{"4"}, }, - TagFilter{ + { Name: "tag2", Filter: []string{"3"}, }, diff --git a/plugins/inputs/bcache/bcache.go b/plugins/inputs/bcache/bcache.go index 1171dbd92..8d20e3623 100644 --- a/plugins/inputs/bcache/bcache.go +++ b/plugins/inputs/bcache/bcache.go @@ -59,7 +59,7 @@ func prettyToBytes(v string) uint64 { } var factor uint64 factor = 1 - prefix := v[len(v)-1 : len(v)] + prefix := v[len(v)-1:] if factors[prefix] != 0 { v = v[:len(v)-1] factor = factors[prefix] diff --git a/plugins/inputs/ceph/ceph.go b/plugins/inputs/ceph/ceph.go index 369795e7f..e4d6ff249 100644 --- a/plugins/inputs/ceph/ceph.go +++ b/plugins/inputs/ceph/ceph.go @@ -278,7 +278,7 @@ func flatten(data interface{}) []*metric { switch val := data.(type) { case float64: - metrics = []*metric{&metric{make([]string, 0, 1), val}} + metrics = []*metric{{make([]string, 0, 1), val}} case map[string]interface{}: metrics = make([]*metric, 0, len(val)) for k, v := range val { diff --git a/plugins/inputs/ceph/ceph_test.go b/plugins/inputs/ceph/ceph_test.go index 9f3ded529..8197d0575 100644 --- a/plugins/inputs/ceph/ceph_test.go +++ b/plugins/inputs/ceph/ceph_test.go @@ -81,7 +81,7 @@ func TestGather(t *testing.T) { }() findSockets = func(c *Ceph) ([]*socket, error) { - return []*socket{&socket{"osd.1", typeOsd, ""}}, nil + return []*socket{{"osd.1", typeOsd, ""}}, nil } perfDump = func(binary string, s *socket) (string, error) { @@ -190,17 +190,17 @@ type SockTest struct { } var sockTestParams = []*SockTest{ - &SockTest{ + { osds: 2, mons: 2, }, - &SockTest{ + { mons: 1, }, - &SockTest{ + { osds: 1, }, - &SockTest{}, + {}, } var monPerfDump = ` diff --git a/plugins/inputs/cgroup/cgroup_linux.go b/plugins/inputs/cgroup/cgroup_linux.go index 80c15c963..bb38525b7 100644 --- a/plugins/inputs/cgroup/cgroup_linux.go +++ b/plugins/inputs/cgroup/cgroup_linux.go @@ -173,7 +173,7 @@ const valuePattern = "[\\d-]+" var fileFormats = [...]fileFormat{ // VAL\n - fileFormat{ + { name: "Single value", pattern: "^" + valuePattern + "\n$", parser: func(measurement string, fields map[string]interface{}, b []byte) { @@ -185,7 +185,7 @@ var fileFormats = [...]fileFormat{ // VAL0\n // VAL1\n // ... - fileFormat{ + { name: "New line separated values", pattern: "^(" + valuePattern + "\n){2,}$", parser: func(measurement string, fields map[string]interface{}, b []byte) { @@ -197,7 +197,7 @@ var fileFormats = [...]fileFormat{ }, }, // VAL0 VAL1 ...\n - fileFormat{ + { name: "Space separated values", pattern: "^(" + valuePattern + " )+\n$", parser: func(measurement string, fields map[string]interface{}, b []byte) { @@ -211,7 +211,7 @@ var fileFormats = [...]fileFormat{ // KEY0 VAL0\n // KEY1 VAL1\n // ... - fileFormat{ + { name: "New line separated key-space-value's", pattern: "^(" + keyPattern + " " + valuePattern + "\n)+$", parser: func(measurement string, fields map[string]interface{}, b []byte) { diff --git a/plugins/inputs/cloudwatch/cloudwatch_test.go b/plugins/inputs/cloudwatch/cloudwatch_test.go index 57c92b3f6..9449cbead 100644 --- a/plugins/inputs/cloudwatch/cloudwatch_test.go +++ b/plugins/inputs/cloudwatch/cloudwatch_test.go @@ -18,7 +18,7 @@ func (m *mockGatherCloudWatchClient) ListMetrics(params *cloudwatch.ListMetricsI Namespace: params.Namespace, MetricName: aws.String("Latency"), Dimensions: []*cloudwatch.Dimension{ - &cloudwatch.Dimension{ + { Name: aws.String("LoadBalancerName"), Value: aws.String("p-example"), }, @@ -100,7 +100,7 @@ func (m *mockSelectMetricsCloudWatchClient) ListMetrics(params *cloudwatch.ListM Namespace: aws.String("AWS/ELB"), MetricName: aws.String(m), Dimensions: []*cloudwatch.Dimension{ - &cloudwatch.Dimension{ + { Name: aws.String("LoadBalancerName"), Value: aws.String(lb), }, @@ -112,11 +112,11 @@ func (m *mockSelectMetricsCloudWatchClient) ListMetrics(params *cloudwatch.ListM Namespace: aws.String("AWS/ELB"), MetricName: aws.String(m), Dimensions: []*cloudwatch.Dimension{ - &cloudwatch.Dimension{ + { Name: aws.String("LoadBalancerName"), Value: aws.String(lb), }, - &cloudwatch.Dimension{ + { Name: aws.String("AvailabilityZone"), Value: aws.String(az), }, @@ -148,14 +148,14 @@ func TestSelectMetrics(t *testing.T) { Period: internalDuration, RateLimit: 200, Metrics: []*Metric{ - &Metric{ + { MetricNames: []string{"Latency", "RequestCount"}, Dimensions: []*Dimension{ - &Dimension{ + { Name: "LoadBalancerName", Value: "*", }, - &Dimension{ + { Name: "AvailabilityZone", Value: "*", }, diff --git a/plugins/inputs/consul/consul_test.go b/plugins/inputs/consul/consul_test.go index e3a7f2fdc..da345ce89 100644 --- a/plugins/inputs/consul/consul_test.go +++ b/plugins/inputs/consul/consul_test.go @@ -8,7 +8,7 @@ import ( ) var sampleChecks = []*api.HealthCheck{ - &api.HealthCheck{ + { Node: "localhost", CheckID: "foo.health123", Name: "foo.health", diff --git a/plugins/inputs/cpu/cpu_test.go b/plugins/inputs/cpu/cpu_test.go index b4a6f87ff..34d785350 100644 --- a/plugins/inputs/cpu/cpu_test.go +++ b/plugins/inputs/cpu/cpu_test.go @@ -163,7 +163,7 @@ func TestCPUCountIncrease(t *testing.T) { mps.On("CPUTimes").Return( []cpu.TimesStat{ - cpu.TimesStat{ + { CPU: "cpu0", }, }, nil) @@ -173,10 +173,10 @@ func TestCPUCountIncrease(t *testing.T) { mps2.On("CPUTimes").Return( []cpu.TimesStat{ - cpu.TimesStat{ + { CPU: "cpu0", }, - cpu.TimesStat{ + { CPU: "cpu1", }, }, nil) diff --git a/plugins/inputs/dcos/client_test.go b/plugins/inputs/dcos/client_test.go index 1b563c63f..7d154a43e 100644 --- a/plugins/inputs/dcos/client_test.go +++ b/plugins/inputs/dcos/client_test.go @@ -115,8 +115,8 @@ func TestGetSummary(t *testing.T) { expectedValue: &Summary{ Cluster: "a", Slaves: []Slave{ - Slave{ID: "a"}, - Slave{ID: "b"}, + {ID: "a"}, + {ID: "b"}, }, }, expectedError: nil, diff --git a/plugins/inputs/dcos/dcos_test.go b/plugins/inputs/dcos/dcos_test.go index 6a76f7b64..3914fa577 100644 --- a/plugins/inputs/dcos/dcos_test.go +++ b/plugins/inputs/dcos/dcos_test.go @@ -385,8 +385,8 @@ func TestGatherFilterNode(t *testing.T) { return &Summary{ Cluster: "a", Slaves: []Slave{ - Slave{ID: "x"}, - Slave{ID: "y"}, + {ID: "x"}, + {ID: "y"}, }, }, nil }, diff --git a/plugins/inputs/disk/disk_test.go b/plugins/inputs/disk/disk_test.go index c20df41db..aeb2ae92b 100644 --- a/plugins/inputs/disk/disk_test.go +++ b/plugins/inputs/disk/disk_test.go @@ -138,7 +138,7 @@ func TestDiskUsageHostMountPrefix(t *testing.T) { }, }, usageStats: []*disk.UsageStat{ - &disk.UsageStat{ + { Path: "/", Total: 42, }, @@ -170,7 +170,7 @@ func TestDiskUsageHostMountPrefix(t *testing.T) { }, }, usageStats: []*disk.UsageStat{ - &disk.UsageStat{ + { Path: "/hostfs/var", Total: 42, }, @@ -203,7 +203,7 @@ func TestDiskUsageHostMountPrefix(t *testing.T) { }, }, usageStats: []*disk.UsageStat{ - &disk.UsageStat{ + { Path: "/hostfs", Total: 42, }, diff --git a/plugins/inputs/diskio/diskio_test.go b/plugins/inputs/diskio/diskio_test.go index ac5833165..41c4b53e2 100644 --- a/plugins/inputs/diskio/diskio_test.go +++ b/plugins/inputs/diskio/diskio_test.go @@ -30,7 +30,7 @@ func TestDiskIO(t *testing.T) { name: "minimal", result: Result{ stats: map[string]disk.IOCountersStat{ - "sda": disk.IOCountersStat{ + "sda": { ReadCount: 888, WriteCount: 5341, ReadBytes: 100000, @@ -46,7 +46,7 @@ func TestDiskIO(t *testing.T) { }, err: nil, metrics: []Metric{ - Metric{ + { tags: map[string]string{ "name": "sda", "serial": "ab-123-ad", @@ -70,11 +70,11 @@ func TestDiskIO(t *testing.T) { devices: []string{"sd*"}, result: Result{ stats: map[string]disk.IOCountersStat{ - "sda": disk.IOCountersStat{ + "sda": { Name: "sda", ReadCount: 42, }, - "vda": disk.IOCountersStat{ + "vda": { Name: "vda", ReadCount: 42, }, @@ -83,7 +83,7 @@ func TestDiskIO(t *testing.T) { }, err: nil, metrics: []Metric{ - Metric{ + { tags: map[string]string{ "name": "sda", "serial": "unknown", diff --git a/plugins/inputs/docker/docker_test.go b/plugins/inputs/docker/docker_test.go index 968dbf725..ac95b5ccd 100644 --- a/plugins/inputs/docker/docker_test.go +++ b/plugins/inputs/docker/docker_test.go @@ -678,35 +678,35 @@ func TestContainerStateFilter(t *testing.T) { { name: "default", expected: map[string][]string{ - "status": []string{"running"}, + "status": {"running"}, }, }, { name: "include running", include: []string{"running"}, expected: map[string][]string{ - "status": []string{"running"}, + "status": {"running"}, }, }, { name: "include glob", include: []string{"r*"}, expected: map[string][]string{ - "status": []string{"restarting", "running", "removing"}, + "status": {"restarting", "running", "removing"}, }, }, { name: "include all", include: []string{"*"}, expected: map[string][]string{ - "status": []string{"created", "restarting", "running", "removing", "paused", "exited", "dead"}, + "status": {"created", "restarting", "running", "removing", "paused", "exited", "dead"}, }, }, { name: "exclude all", exclude: []string{"*"}, expected: map[string][]string{ - "status": []string{}, + "status": {}, }, }, { @@ -714,7 +714,7 @@ func TestContainerStateFilter(t *testing.T) { include: []string{"*"}, exclude: []string{"exited"}, expected: map[string][]string{ - "status": []string{"created", "restarting", "running", "removing", "paused", "dead"}, + "status": {"created", "restarting", "running", "removing", "paused", "dead"}, }, }, } diff --git a/plugins/inputs/docker/docker_testdata.go b/plugins/inputs/docker/docker_testdata.go index bb275a1cc..7302e219d 100644 --- a/plugins/inputs/docker/docker_testdata.go +++ b/plugins/inputs/docker/docker_testdata.go @@ -60,7 +60,7 @@ var info = types.Info{ } var containerList = []types.Container{ - types.Container{ + { ID: "e2173b9478a6ae55e237d4d74f8bbb753f0817192b5081334dc78476296b7dfb", Names: []string{"/etcd"}, Image: "quay.io/coreos/etcd:v2.2.2", @@ -68,22 +68,22 @@ var containerList = []types.Container{ Created: 1455941930, Status: "Up 4 hours", Ports: []types.Port{ - types.Port{ + { PrivatePort: 7001, PublicPort: 0, Type: "tcp", }, - types.Port{ + { PrivatePort: 4001, PublicPort: 0, Type: "tcp", }, - types.Port{ + { PrivatePort: 2380, PublicPort: 0, Type: "tcp", }, - types.Port{ + { PrivatePort: 2379, PublicPort: 2379, Type: "tcp", @@ -97,7 +97,7 @@ var containerList = []types.Container{ SizeRw: 0, SizeRootFs: 0, }, - types.Container{ + { ID: "b7dfbb9478a6ae55e237d4d74f8bbb753f0817192b5081334dc78476296e2173", Names: []string{"/etcd2"}, Image: "quay.io:4443/coreos/etcd:v2.2.2", @@ -105,22 +105,22 @@ var containerList = []types.Container{ Created: 1455941933, Status: "Up 4 hours", Ports: []types.Port{ - types.Port{ + { PrivatePort: 7002, PublicPort: 0, Type: "tcp", }, - types.Port{ + { PrivatePort: 4002, PublicPort: 0, Type: "tcp", }, - types.Port{ + { PrivatePort: 2381, PublicPort: 0, Type: "tcp", }, - types.Port{ + { PrivatePort: 2382, PublicPort: 2382, Type: "tcp", @@ -134,15 +134,15 @@ var containerList = []types.Container{ SizeRw: 0, SizeRootFs: 0, }, - types.Container{ + { ID: "e8a713dd90604f5a257b97c15945e047ab60ed5b2c4397c5a6b5bf40e1bd2791", Names: []string{"/acme"}, }, - types.Container{ + { ID: "9bc6faf9ba8106fae32e8faafd38a1dd6f6d262bec172398cc10bc03c0d6841a", Names: []string{"/acme-test"}, }, - types.Container{ + { ID: "d4ccced494a1d5fe8ebdb0a86335a0dab069319912221e5838a132ab18a8bc84", Names: []string{"/foo"}, }, @@ -150,7 +150,7 @@ var containerList = []types.Container{ var two = uint64(2) var ServiceList = []swarm.Service{ - swarm.Service{ + { ID: "qolkls9g5iasdiuihcyz9rnx2", Spec: swarm.ServiceSpec{ Annotations: swarm.Annotations{ @@ -163,7 +163,7 @@ var ServiceList = []swarm.Service{ }, }, }, - swarm.Service{ + { ID: "qolkls9g5iasdiuihcyz9rn3", Spec: swarm.ServiceSpec{ Annotations: swarm.Annotations{ @@ -177,7 +177,7 @@ var ServiceList = []swarm.Service{ } var TaskList = []swarm.Task{ - swarm.Task{ + { ID: "kwh0lv7hwwbh", ServiceID: "qolkls9g5iasdiuihcyz9rnx2", NodeID: "0cl4jturcyd1ks3fwpd010kor", @@ -186,7 +186,7 @@ var TaskList = []swarm.Task{ }, DesiredState: "running", }, - swarm.Task{ + { ID: "u78m5ojbivc3", ServiceID: "qolkls9g5iasdiuihcyz9rnx2", NodeID: "0cl4jturcyd1ks3fwpd010kor", @@ -195,7 +195,7 @@ var TaskList = []swarm.Task{ }, DesiredState: "running", }, - swarm.Task{ + { ID: "1n1uilkhr98l", ServiceID: "qolkls9g5iasdiuihcyz9rn3", NodeID: "0cl4jturcyd1ks3fwpd010kor", @@ -207,13 +207,13 @@ var TaskList = []swarm.Task{ } var NodeList = []swarm.Node{ - swarm.Node{ + { ID: "0cl4jturcyd1ks3fwpd010kor", Status: swarm.NodeStatus{ State: "ready", }, }, - swarm.Node{ + { ID: "0cl4jturcyd1ks3fwpd010kor", Status: swarm.NodeStatus{ State: "ready", diff --git a/plugins/inputs/graylog/graylog_test.go b/plugins/inputs/graylog/graylog_test.go index a5088cf7d..f8008f1d9 100644 --- a/plugins/inputs/graylog/graylog_test.go +++ b/plugins/inputs/graylog/graylog_test.go @@ -135,7 +135,7 @@ func (c *mockHTTPClient) HTTPClient() *http.Client { // *HttpJson: Pointer to an HttpJson object that uses the generated mock HTTP client func genMockGrayLog(response string, statusCode int) []*GrayLog { return []*GrayLog{ - &GrayLog{ + { client: &mockHTTPClient{responseBody: response, statusCode: statusCode}, Servers: []string{ "http://localhost:12900/system/metrics/multiple", diff --git a/plugins/inputs/hddtemp/hddtemp_test.go b/plugins/inputs/hddtemp/hddtemp_test.go index 37dfef7d6..e09e833e7 100644 --- a/plugins/inputs/hddtemp/hddtemp_test.go +++ b/plugins/inputs/hddtemp/hddtemp_test.go @@ -14,13 +14,13 @@ type mockFetcher struct { func (h *mockFetcher) Fetch(address string) ([]hddtemp.Disk, error) { return []hddtemp.Disk{ - hddtemp.Disk{ + { DeviceName: "Disk1", Model: "Model1", Temperature: 13, Unit: "C", }, - hddtemp.Disk{ + { DeviceName: "Disk2", Model: "Model2", Temperature: 14, diff --git a/plugins/inputs/httpjson/httpjson_test.go b/plugins/inputs/httpjson/httpjson_test.go index 7134ffb46..909759199 100644 --- a/plugins/inputs/httpjson/httpjson_test.go +++ b/plugins/inputs/httpjson/httpjson_test.go @@ -163,7 +163,7 @@ func (c *mockHTTPClient) HTTPClient() *http.Client { // *HttpJson: Pointer to an HttpJson object that uses the generated mock HTTP client func genMockHttpJson(response string, statusCode int) []*HttpJson { return []*HttpJson{ - &HttpJson{ + { client: &mockHTTPClient{responseBody: response, statusCode: statusCode}, Servers: []string{ "http://server1.example.com/metrics/", @@ -180,7 +180,7 @@ func genMockHttpJson(response string, statusCode int) []*HttpJson { "apiVersion": "v1", }, }, - &HttpJson{ + { client: &mockHTTPClient{responseBody: response, statusCode: statusCode}, Servers: []string{ "http://server3.example.com/metrics/", diff --git a/plugins/inputs/interrupts/interrupts.go b/plugins/inputs/interrupts/interrupts.go index 30b7ee182..9e8c8ea24 100644 --- a/plugins/inputs/interrupts/interrupts.go +++ b/plugins/inputs/interrupts/interrupts.go @@ -60,7 +60,7 @@ scan: } irqid := strings.TrimRight(fields[0], ":") irq := NewIRQ(irqid) - irqvals := fields[1:len(fields)] + irqvals := fields[1:] for i := 0; i < cpucount; i++ { if i < len(irqvals) { irqval, err := strconv.ParseInt(irqvals[i], 10, 64) diff --git a/plugins/inputs/interrupts/interrupts_test.go b/plugins/inputs/interrupts/interrupts_test.go index cf1dc949e..3990461b1 100644 --- a/plugins/inputs/interrupts/interrupts_test.go +++ b/plugins/inputs/interrupts/interrupts_test.go @@ -19,31 +19,31 @@ NET_RX: 867028 225 TASKLET: 205 0` f := bytes.NewBufferString(interruptStr) parsed := []IRQ{ - IRQ{ + { ID: "0", Type: "IO-APIC-edge", Device: "timer", Cpus: []int64{int64(134), int64(0)}, Total: int64(134), }, - IRQ{ + { ID: "1", Type: "IO-APIC-edge", Device: "i8042", Cpus: []int64{int64(7), int64(3)}, Total: int64(10), }, - IRQ{ + { ID: "NMI", Type: "Non-maskable interrupts", Cpus: []int64{int64(0), int64(0)}, Total: int64(0), }, - IRQ{ + { ID: "LOC", Type: "Local timer interrupts", Cpus: []int64{int64(2338608687), int64(2334309625)}, Total: int64(4672918312), }, - IRQ{ + { ID: "MIS", Cpus: []int64{int64(0)}, Total: int64(0), }, - IRQ{ + { ID: "NET_RX", Cpus: []int64{int64(867028), int64(225)}, Total: int64(867253), }, - IRQ{ + { ID: "TASKLET", Cpus: []int64{int64(205), int64(0)}, Total: int64(205), }, @@ -88,91 +88,91 @@ func TestParseInterruptsBad(t *testing.T) { IPI6: 0 0 0 0 completion interrupts` f := bytes.NewBufferString(interruptStr) parsed := []IRQ{ - IRQ{ + { ID: "16", Type: "bcm2836-timer", Device: "0 Edge arch_timer", Cpus: []int64{0, 0, 0, 0}, }, - IRQ{ + { ID: "17", Type: "bcm2836-timer", Device: "1 Edge arch_timer", Cpus: []int64{127224250, 118424219, 127224437, 117885416}, Total: 490758322, }, - IRQ{ + { ID: "21", Type: "bcm2836-pmu", Device: "9 Edge arm-pmu", Cpus: []int64{0, 0, 0, 0}, }, - IRQ{ + { ID: "23", Type: "ARMCTRL-level", Device: "1 Edge 3f00b880.mailbox", Cpus: []int64{1549514, 0, 0, 0}, Total: 1549514, }, - IRQ{ + { ID: "24", Type: "ARMCTRL-level", Device: "2 Edge VCHIQ doorbell", Cpus: []int64{2, 0, 0, 0}, Total: 2, }, - IRQ{ + { ID: "46", Type: "ARMCTRL-level", Device: "48 Edge bcm2708_fb dma", Cpus: []int64{0, 0, 0, 0}, }, - IRQ{ + { ID: "48", Type: "ARMCTRL-level", Device: "50 Edge DMA IRQ", Cpus: []int64{0, 0, 0, 0}, }, - IRQ{ + { ID: "50", Type: "ARMCTRL-level", Device: "52 Edge DMA IRQ", Cpus: []int64{0, 0, 0, 0}, }, - IRQ{ + { ID: "51", Type: "ARMCTRL-level", Device: "53 Edge DMA IRQ", Cpus: []int64{208, 0, 0, 0}, Total: 208, }, - IRQ{ + { ID: "54", Type: "ARMCTRL-level", Device: "56 Edge DMA IRQ", Cpus: []int64{883002, 0, 0, 0}, Total: 883002, }, - IRQ{ + { ID: "59", Type: "ARMCTRL-level", Device: "61 Edge bcm2835-auxirq", Cpus: []int64{0, 0, 0, 0}, }, - IRQ{ + { ID: "62", Type: "ARMCTRL-level", Device: "64 Edge dwc_otg, dwc_otg_pcd, dwc_otg_hcd:usb1", Cpus: []int64{521451447, 0, 0, 0}, Total: 521451447, }, - IRQ{ + { ID: "86", Type: "ARMCTRL-level", Device: "88 Edge mmc0", Cpus: []int64{857597, 0, 0, 0}, Total: 857597, }, - IRQ{ + { ID: "87", Type: "ARMCTRL-level", Device: "89 Edge uart-pl011", Cpus: []int64{4938, 0, 0, 0}, Total: 4938, }, - IRQ{ + { ID: "92", Type: "ARMCTRL-level", Device: "94 Edge mmc1", Cpus: []int64{5669, 0, 0, 0}, Total: 5669, }, - IRQ{ + { ID: "IPI0", Type: "CPU wakeup interrupts", Cpus: []int64{0, 0, 0, 0}, }, - IRQ{ + { ID: "IPI1", Type: "Timer broadcast interrupts", Cpus: []int64{0, 0, 0, 0}, }, - IRQ{ + { ID: "IPI2", Type: "Rescheduling interrupts", Cpus: []int64{23564958, 23464876, 23531165, 23040826}, Total: 93601825, }, - IRQ{ + { ID: "IPI3", Type: "Function call interrupts", Cpus: []int64{148438, 639704, 644266, 588150}, Total: 2020558, }, - IRQ{ + { ID: "IPI4", Type: "CPU stop interrupts", Cpus: []int64{0, 0, 0, 0}, }, - IRQ{ + { ID: "IPI5", Type: "IRQ work interrupts", Cpus: []int64{4348149, 1843985, 3819457, 1822877}, Total: 11834468, }, - IRQ{ + { ID: "IPI6", Type: "completion interrupts", Cpus: []int64{0, 0, 0, 0}, }, diff --git a/plugins/inputs/ipmi_sensor/connection.go b/plugins/inputs/ipmi_sensor/connection.go index 87922b984..8ce5e3448 100644 --- a/plugins/inputs/ipmi_sensor/connection.go +++ b/plugins/inputs/ipmi_sensor/connection.go @@ -28,7 +28,7 @@ func NewConnection(server string, privilege string) *Connection { if inx1 > 0 { security := server[0:inx1] - connstr = server[inx1+1 : len(server)] + connstr = server[inx1+1:] up := strings.SplitN(security, ":", 2) conn.Username = up[0] conn.Password = up[1] diff --git a/plugins/inputs/ipset/ipset_test.go b/plugins/inputs/ipset/ipset_test.go index 9438c806d..31a9f3cfc 100644 --- a/plugins/inputs/ipset/ipset_test.go +++ b/plugins/inputs/ipset/ipset_test.go @@ -50,8 +50,8 @@ func TestIpset(t *testing.T) { add myset 3.4.5.6 packets 3 bytes 222 `, tags: []map[string]string{ - map[string]string{"set": "myset", "rule": "1.2.3.4"}, - map[string]string{"set": "myset", "rule": "3.4.5.6"}, + {"set": "myset", "rule": "1.2.3.4"}, + {"set": "myset", "rule": "3.4.5.6"}, }, fields: [][]map[string]interface{}{ {map[string]interface{}{"packets_total": uint64(1328), "bytes_total": uint64(79680)}}, @@ -66,8 +66,8 @@ func TestIpset(t *testing.T) { add myset 3.4.5.6 packets 3 bytes 222 "3rd IP" `, tags: []map[string]string{ - map[string]string{"set": "myset", "rule": "1.2.3.4"}, - map[string]string{"set": "myset", "rule": "3.4.5.6"}, + {"set": "myset", "rule": "1.2.3.4"}, + {"set": "myset", "rule": "3.4.5.6"}, }, fields: [][]map[string]interface{}{ {map[string]interface{}{"packets_total": uint64(1328), "bytes_total": uint64(79680)}}, diff --git a/plugins/inputs/iptables/iptables_test.go b/plugins/inputs/iptables/iptables_test.go index a98c24190..cca41e1f4 100644 --- a/plugins/inputs/iptables/iptables_test.go +++ b/plugins/inputs/iptables/iptables_test.go @@ -42,7 +42,7 @@ func TestIptables_Gather(t *testing.T) { pkts bytes target prot opt in out source destination 57 4520 RETURN tcp -- * * 0.0.0.0/0 0.0.0.0/0 /* foobar */ `}, - tags: []map[string]string{map[string]string{"table": "filter", "chain": "INPUT", "ruleid": "foobar"}}, + tags: []map[string]string{{"table": "filter", "chain": "INPUT", "ruleid": "foobar"}}, fields: [][]map[string]interface{}{ {map[string]interface{}{"pkts": uint64(57), "bytes": uint64(4520)}}, }, @@ -98,9 +98,9 @@ func TestIptables_Gather(t *testing.T) { `, }, tags: []map[string]string{ - map[string]string{"table": "filter", "chain": "INPUT", "ruleid": "foo"}, - map[string]string{"table": "filter", "chain": "FORWARD", "ruleid": "bar"}, - map[string]string{"table": "filter", "chain": "FORWARD", "ruleid": "foobar"}, + {"table": "filter", "chain": "INPUT", "ruleid": "foo"}, + {"table": "filter", "chain": "FORWARD", "ruleid": "bar"}, + {"table": "filter", "chain": "FORWARD", "ruleid": "foobar"}, }, fields: [][]map[string]interface{}{ {map[string]interface{}{"pkts": uint64(200), "bytes": uint64(4520)}}, @@ -118,7 +118,7 @@ func TestIptables_Gather(t *testing.T) { 100 4520 RETURN tcp -- * * 0.0.0.0/0 0.0.0.0/0 tcp dpt:80 `}, tags: []map[string]string{ - map[string]string{"table": "filter", "chain": "INPUT", "ruleid": "foobar"}, + {"table": "filter", "chain": "INPUT", "ruleid": "foobar"}, }, fields: [][]map[string]interface{}{ {map[string]interface{}{"pkts": uint64(57), "bytes": uint64(4520)}}, @@ -134,8 +134,8 @@ func TestIptables_Gather(t *testing.T) { 0 0 CLASSIFY all -- * * 1.3.5.7 0.0.0.0/0 /* test2 */ CLASSIFY set 1:4 `}, tags: []map[string]string{ - map[string]string{"table": "mangle", "chain": "SHAPER", "ruleid": "test"}, - map[string]string{"table": "mangle", "chain": "SHAPER", "ruleid": "test2"}, + {"table": "mangle", "chain": "SHAPER", "ruleid": "test"}, + {"table": "mangle", "chain": "SHAPER", "ruleid": "test2"}, }, fields: [][]map[string]interface{}{ {map[string]interface{}{"pkts": uint64(0), "bytes": uint64(0)}}, @@ -163,7 +163,7 @@ func TestIptables_Gather(t *testing.T) { 123 456 all -- eth0 * 0.0.0.0/0 0.0.0.0/0 /* all_recv */ `}, tags: []map[string]string{ - map[string]string{"table": "all_recv", "chain": "accountfwd", "ruleid": "all_recv"}, + {"table": "all_recv", "chain": "accountfwd", "ruleid": "all_recv"}, }, fields: [][]map[string]interface{}{ {map[string]interface{}{"pkts": uint64(123), "bytes": uint64(456)}}, diff --git a/plugins/inputs/jolokia/jolokia_test.go b/plugins/inputs/jolokia/jolokia_test.go index b47ffbc26..a6acd2953 100644 --- a/plugins/inputs/jolokia/jolokia_test.go +++ b/plugins/inputs/jolokia/jolokia_test.go @@ -117,7 +117,7 @@ const invalidJSON = "I don't think this is JSON" const empty = "" -var Servers = []Server{Server{Name: "as1", Host: "127.0.0.1", Port: "8080"}} +var Servers = []Server{{Name: "as1", Host: "127.0.0.1", Port: "8080"}} var HeapMetric = Metric{Name: "heap_memory_usage", Mbean: "java.lang:type=Memory", Attribute: "HeapMemoryUsage"} var UsedHeapMetric = Metric{Name: "heap_memory_usage", diff --git a/plugins/inputs/jolokia2/gatherer_test.go b/plugins/inputs/jolokia2/gatherer_test.go index ca83cf0ac..4ba4b586a 100644 --- a/plugins/inputs/jolokia2/gatherer_test.go +++ b/plugins/inputs/jolokia2/gatherer_test.go @@ -17,7 +17,7 @@ func TestJolokia2_makeReadRequests(t *testing.T) { Mbean: "test:foo=bar", }, expected: []ReadRequest{ - ReadRequest{ + { Mbean: "test:foo=bar", Attributes: []string{}, }, @@ -29,7 +29,7 @@ func TestJolokia2_makeReadRequests(t *testing.T) { Paths: []string{"biz"}, }, expected: []ReadRequest{ - ReadRequest{ + { Mbean: "test:foo=bar", Attributes: []string{"biz"}, }, @@ -41,7 +41,7 @@ func TestJolokia2_makeReadRequests(t *testing.T) { Paths: []string{"baz", "biz"}, }, expected: []ReadRequest{ - ReadRequest{ + { Mbean: "test:foo=bar", Attributes: []string{"baz", "biz"}, }, @@ -53,7 +53,7 @@ func TestJolokia2_makeReadRequests(t *testing.T) { Paths: []string{"biz/baz"}, }, expected: []ReadRequest{ - ReadRequest{ + { Mbean: "test:foo=bar", Attributes: []string{"biz"}, Path: "baz", @@ -66,7 +66,7 @@ func TestJolokia2_makeReadRequests(t *testing.T) { Paths: []string{"biz/baz/fiz/faz"}, }, expected: []ReadRequest{ - ReadRequest{ + { Mbean: "test:foo=bar", Attributes: []string{"biz"}, Path: "baz/fiz/faz", @@ -79,12 +79,12 @@ func TestJolokia2_makeReadRequests(t *testing.T) { Paths: []string{"baz/biz", "faz/fiz"}, }, expected: []ReadRequest{ - ReadRequest{ + { Mbean: "test:foo=bar", Attributes: []string{"baz"}, Path: "biz", }, - ReadRequest{ + { Mbean: "test:foo=bar", Attributes: []string{"faz"}, Path: "fiz", diff --git a/plugins/inputs/jolokia2/jolokia_test.go b/plugins/inputs/jolokia2/jolokia_test.go index f94606ae6..62dd1d3a0 100644 --- a/plugins/inputs/jolokia2/jolokia_test.go +++ b/plugins/inputs/jolokia2/jolokia_test.go @@ -748,7 +748,7 @@ func setupPlugin(t *testing.T, conf string) telegraf.Input { t.Fatalf("Unable to parse config! %v", err) } - for name, _ := range table.Fields { + for name := range table.Fields { object := table.Fields[name] switch name { case "jolokia2_agent": diff --git a/plugins/inputs/mesos/mesos.go b/plugins/inputs/mesos/mesos.go index 15e2bfccb..9190ceae8 100644 --- a/plugins/inputs/mesos/mesos.go +++ b/plugins/inputs/mesos/mesos.go @@ -42,8 +42,8 @@ type Mesos struct { } var allMetrics = map[Role][]string{ - MASTER: []string{"resources", "master", "system", "agents", "frameworks", "tasks", "messages", "evqueue", "registrar"}, - SLAVE: []string{"resources", "agent", "system", "executors", "tasks", "messages"}, + MASTER: {"resources", "master", "system", "agents", "frameworks", "tasks", "messages", "evqueue", "registrar"}, + SLAVE: {"resources", "agent", "system", "executors", "tasks", "messages"}, } var sampleConfig = ` diff --git a/plugins/inputs/mongodb/mongodb_data_test.go b/plugins/inputs/mongodb/mongodb_data_test.go index 5f4dd4c2c..de75ed7e1 100644 --- a/plugins/inputs/mongodb/mongodb_data_test.go +++ b/plugins/inputs/mongodb/mongodb_data_test.go @@ -53,7 +53,7 @@ func TestAddNonReplStats(t *testing.T) { d.AddDefaultStats() d.flush(&acc) - for key, _ := range DefaultStats { + for key := range DefaultStats { assert.True(t, acc.HasInt64Field("mongodb", key)) } } @@ -74,7 +74,7 @@ func TestAddReplStats(t *testing.T) { d.AddDefaultStats() d.flush(&acc) - for key, _ := range MmapStats { + for key := range MmapStats { assert.True(t, acc.HasInt64Field("mongodb", key)) } } @@ -106,7 +106,7 @@ func TestAddWiredTigerStats(t *testing.T) { d.AddDefaultStats() d.flush(&acc) - for key, _ := range WiredTigerStats { + for key := range WiredTigerStats { assert.True(t, acc.HasFloatField("mongodb", key)) } } @@ -127,7 +127,7 @@ func TestAddShardStats(t *testing.T) { d.AddDefaultStats() d.flush(&acc) - for key, _ := range DefaultShardStats { + for key := range DefaultShardStats { assert.True(t, acc.HasInt64Field("mongodb", key)) } } @@ -156,8 +156,8 @@ func TestAddShardHostStats(t *testing.T) { d.flush(&acc) var hostsFound []string - for host, _ := range hostStatLines { - for key, _ := range ShardHostStats { + for host := range hostStatLines { + for key := range ShardHostStats { assert.True(t, acc.HasInt64Field("mongodb_shard_stats", key)) } diff --git a/plugins/inputs/mongodb/mongodb_server_test.go b/plugins/inputs/mongodb/mongodb_server_test.go index c8ef5f240..91a3c0709 100644 --- a/plugins/inputs/mongodb/mongodb_server_test.go +++ b/plugins/inputs/mongodb/mongodb_server_test.go @@ -35,7 +35,7 @@ func TestAddDefaultStats(t *testing.T) { err = server.gatherData(&acc, false) require.NoError(t, err) - for key, _ := range DefaultStats { + for key := range DefaultStats { assert.True(t, acc.HasInt64Field("mongodb", key)) } } diff --git a/plugins/inputs/net/net_test.go b/plugins/inputs/net/net_test.go index 035dbaecd..3c4c3c7ef 100644 --- a/plugins/inputs/net/net_test.go +++ b/plugins/inputs/net/net_test.go @@ -31,7 +31,7 @@ func TestNetStats(t *testing.T) { mps.On("NetIO").Return([]net.IOCountersStat{netio}, nil) netprotos := []net.ProtoCountersStat{ - net.ProtoCountersStat{ + { Protocol: "Udp", Stats: map[string]int64{ "InDatagrams": 4655, @@ -42,16 +42,16 @@ func TestNetStats(t *testing.T) { mps.On("NetProto").Return(netprotos, nil) netstats := []net.ConnectionStat{ - net.ConnectionStat{ + { Type: syscall.SOCK_DGRAM, }, - net.ConnectionStat{ + { Status: "ESTABLISHED", }, - net.ConnectionStat{ + { Status: "ESTABLISHED", }, - net.ConnectionStat{ + { Status: "CLOSE", }, } diff --git a/plugins/inputs/nsq_consumer/nsq_consumer_test.go b/plugins/inputs/nsq_consumer/nsq_consumer_test.go index a6d8c27e5..a8e743c12 100644 --- a/plugins/inputs/nsq_consumer/nsq_consumer_test.go +++ b/plugins/inputs/nsq_consumer/nsq_consumer_test.go @@ -24,12 +24,12 @@ func TestReadsMetricsFromNSQ(t *testing.T) { script := []instruction{ // SUB - instruction{0, nsq.FrameTypeResponse, []byte("OK")}, + {0, nsq.FrameTypeResponse, []byte("OK")}, // IDENTIFY - instruction{0, nsq.FrameTypeResponse, []byte("OK")}, - instruction{20 * time.Millisecond, nsq.FrameTypeMessage, frameMessage(msg)}, + {0, nsq.FrameTypeResponse, []byte("OK")}, + {20 * time.Millisecond, nsq.FrameTypeMessage, frameMessage(msg)}, // needed to exit test - instruction{100 * time.Millisecond, -1, []byte("exit")}, + {100 * time.Millisecond, -1, []byte("exit")}, } addr, _ := net.ResolveTCPAddr("tcp", "127.0.0.1:4155") diff --git a/plugins/inputs/nvidia_smi/nvidia_smi.go b/plugins/inputs/nvidia_smi/nvidia_smi.go index abce80dcb..ea708f24f 100644 --- a/plugins/inputs/nvidia_smi/nvidia_smi.go +++ b/plugins/inputs/nvidia_smi/nvidia_smi.go @@ -18,19 +18,19 @@ var ( measurement = "nvidia_smi" metrics = "fan.speed,memory.total,memory.used,memory.free,pstate,temperature.gpu,name,uuid,compute_mode,utilization.gpu,utilization.memory,index,power.draw" metricNames = [][]string{ - []string{"fan_speed", "integer"}, - []string{"memory_total", "integer"}, - []string{"memory_used", "integer"}, - []string{"memory_free", "integer"}, - []string{"pstate", "tag"}, - []string{"temperature_gpu", "integer"}, - []string{"name", "tag"}, - []string{"uuid", "tag"}, - []string{"compute_mode", "tag"}, - []string{"utilization_gpu", "integer"}, - []string{"utilization_memory", "integer"}, - []string{"index", "tag"}, - []string{"power_draw", "float"}, + {"fan_speed", "integer"}, + {"memory_total", "integer"}, + {"memory_used", "integer"}, + {"memory_free", "integer"}, + {"pstate", "tag"}, + {"temperature_gpu", "integer"}, + {"name", "tag"}, + {"uuid", "tag"}, + {"compute_mode", "tag"}, + {"utilization_gpu", "integer"}, + {"utilization_memory", "integer"}, + {"index", "tag"}, + {"power_draw", "float"}, } ) diff --git a/plugins/inputs/pf/pf.go b/plugins/inputs/pf/pf.go index 04b5f9d21..035c44fbe 100644 --- a/plugins/inputs/pf/pf.go +++ b/plugins/inputs/pf/pf.go @@ -72,11 +72,11 @@ type pfctlOutputStanza struct { } var pfctlOutputStanzas = []*pfctlOutputStanza{ - &pfctlOutputStanza{ + { HeaderRE: regexp.MustCompile("^State Table"), ParseFunc: parseStateTable, }, - &pfctlOutputStanza{ + { HeaderRE: regexp.MustCompile("^Counters"), ParseFunc: parseCounterTable, }, @@ -127,10 +127,10 @@ type Entry struct { } var StateTable = []*Entry{ - &Entry{"entries", "current entries", -1}, - &Entry{"searches", "searches", -1}, - &Entry{"inserts", "inserts", -1}, - &Entry{"removals", "removals", -1}, + {"entries", "current entries", -1}, + {"searches", "searches", -1}, + {"inserts", "inserts", -1}, + {"removals", "removals", -1}, } var stateTableRE = regexp.MustCompile(`^ (.*?)\s+(\d+)`) @@ -140,21 +140,21 @@ func parseStateTable(lines []string, fields map[string]interface{}) error { } var CounterTable = []*Entry{ - &Entry{"match", "match", -1}, - &Entry{"bad-offset", "bad-offset", -1}, - &Entry{"fragment", "fragment", -1}, - &Entry{"short", "short", -1}, - &Entry{"normalize", "normalize", -1}, - &Entry{"memory", "memory", -1}, - &Entry{"bad-timestamp", "bad-timestamp", -1}, - &Entry{"congestion", "congestion", -1}, - &Entry{"ip-option", "ip-option", -1}, - &Entry{"proto-cksum", "proto-cksum", -1}, - &Entry{"state-mismatch", "state-mismatch", -1}, - &Entry{"state-insert", "state-insert", -1}, - &Entry{"state-limit", "state-limit", -1}, - &Entry{"src-limit", "src-limit", -1}, - &Entry{"synproxy", "synproxy", -1}, + {"match", "match", -1}, + {"bad-offset", "bad-offset", -1}, + {"fragment", "fragment", -1}, + {"short", "short", -1}, + {"normalize", "normalize", -1}, + {"memory", "memory", -1}, + {"bad-timestamp", "bad-timestamp", -1}, + {"congestion", "congestion", -1}, + {"ip-option", "ip-option", -1}, + {"proto-cksum", "proto-cksum", -1}, + {"state-mismatch", "state-mismatch", -1}, + {"state-insert", "state-insert", -1}, + {"state-limit", "state-limit", -1}, + {"src-limit", "src-limit", -1}, + {"synproxy", "synproxy", -1}, } var counterTableRE = regexp.MustCompile(`^ (.*?)\s+(\d+)`) diff --git a/plugins/inputs/pf/pf_test.go b/plugins/inputs/pf/pf_test.go index 0b90d949a..af73d66ad 100644 --- a/plugins/inputs/pf/pf_test.go +++ b/plugins/inputs/pf/pf_test.go @@ -23,13 +23,13 @@ func TestPfctlInvocation(t *testing.T) { var testCases = []pfctlInvocationTestCase{ // 0: no sudo - pfctlInvocationTestCase{ + { config: PF{UseSudo: false}, cmd: "fakepfctl", args: []string{"-s", "info"}, }, // 1: with sudo - pfctlInvocationTestCase{ + { config: PF{UseSudo: true}, cmd: "fakesudo", args: []string{"fakepfctl", "-s", "info"}, @@ -60,9 +60,9 @@ func TestPfMeasurements(t *testing.T) { testCases := []pfTestCase{ // 0: nil input should raise an error - pfTestCase{TestInput: "", err: errParseHeader}, + {TestInput: "", err: errParseHeader}, // 1: changes to pfctl output should raise an error - pfTestCase{TestInput: `Status: Enabled for 161 days 21:24:45 Debug: Urgent + {TestInput: `Status: Enabled for 161 days 21:24:45 Debug: Urgent Interface Stats for re1 IPv4 IPv6 Bytes In 2585823744614 1059233657221 @@ -99,7 +99,7 @@ Counters err: errMissingData("current entries"), }, // 2: bad numbers should raise an error - pfTestCase{TestInput: `Status: Enabled for 0 days 00:26:05 Debug: Urgent + {TestInput: `Status: Enabled for 0 days 00:26:05 Debug: Urgent State Table Total Rate current entries -23 @@ -125,7 +125,7 @@ Counters `, err: errMissingData("current entries"), }, - pfTestCase{TestInput: `Status: Enabled for 0 days 00:26:05 Debug: Urgent + {TestInput: `Status: Enabled for 0 days 00:26:05 Debug: Urgent State Table Total Rate current entries 2 @@ -150,7 +150,7 @@ Counters synproxy 0 0.0/s `, measurements: []measurementResult{ - measurementResult{ + { fields: map[string]interface{}{ "entries": int64(2), "searches": int64(11325), @@ -175,7 +175,7 @@ Counters }, }, }, - pfTestCase{TestInput: `Status: Enabled for 161 days 21:24:45 Debug: Urgent + {TestInput: `Status: Enabled for 161 days 21:24:45 Debug: Urgent Interface Stats for re1 IPv4 IPv6 Bytes In 2585823744614 1059233657221 @@ -210,7 +210,7 @@ Counters synproxy 0 0.0/s `, measurements: []measurementResult{ - measurementResult{ + { fields: map[string]interface{}{ "entries": int64(649), "searches": int64(18421725761), diff --git a/plugins/inputs/snmp/snmp_mocks_test.go b/plugins/inputs/snmp/snmp_mocks_test.go index 63a8a80ec..56d9326f1 100644 --- a/plugins/inputs/snmp/snmp_mocks_test.go +++ b/plugins/inputs/snmp/snmp_mocks_test.go @@ -61,26 +61,26 @@ func init() { // BEGIN GO GENERATE CONTENT var mockedCommandResults = map[string]mockedCommandResult{ - "snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.1.0.0.0": mockedCommandResult{stdout: "TEST::testTable\ntestTable OBJECT-TYPE\n -- FROM\tTEST\n MAX-ACCESS\tnot-accessible\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) 0 }\n", stderr: "", exitError: false}, - "snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.1.0.0.1.1": mockedCommandResult{stdout: "TEST::hostname\nhostname OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) 1 1 }\n", stderr: "", exitError: false}, - "snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.1.0.0.1.2": mockedCommandResult{stdout: "TEST::1.2\nanonymous#1 OBJECT-TYPE\n -- FROM\tTEST\n::= { iso(1) 0 testOID(0) 1 2 }\n", stderr: "", exitError: false}, - "snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x001.0.0.1.1": mockedCommandResult{stdout: "TEST::hostname\nhostname OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) 1 1 }\n", stderr: "", exitError: false}, - "snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.1.0.0.0.1.1": mockedCommandResult{stdout: "TEST::server\nserver OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) 1 }\n", stderr: "", exitError: false}, - "snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.1.0.0.0.1.1.0": mockedCommandResult{stdout: "TEST::server.0\nserver OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) server(1) 0 }\n", stderr: "", exitError: false}, - "snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.1.0.0.0.1.5": mockedCommandResult{stdout: "TEST::testTableEntry.5\ntestTableEntry OBJECT-TYPE\n -- FROM\tTEST\n MAX-ACCESS\tnot-accessible\n STATUS\tcurrent\n INDEX\t\t{ server }\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) 5 }\n", stderr: "", exitError: false}, - "snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.1.2.3": mockedCommandResult{stdout: "iso.2.3\niso OBJECT-TYPE\n -- FROM\t#-1\n::= { iso(1) 2 3 }\n", stderr: "", exitError: false}, - "snmptranslate\x00-Td\x00-Ob\x00.iso.2.3": mockedCommandResult{stdout: "iso.2.3\niso OBJECT-TYPE\n -- FROM\t#-1\n::= { iso(1) 2 3 }\n", stderr: "", exitError: false}, - "snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.999": mockedCommandResult{stdout: ".999\n [TRUNCATED]\n", stderr: "", exitError: false}, - "snmptranslate\x00-Td\x00-Ob\x00TEST::server": mockedCommandResult{stdout: "TEST::server\nserver OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) 1 }\n", stderr: "", exitError: false}, - "snmptranslate\x00-Td\x00-Ob\x00TEST::server.0": mockedCommandResult{stdout: "TEST::server.0\nserver OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) server(1) 0 }\n", stderr: "", exitError: false}, - "snmptranslate\x00-Td\x00-Ob\x00TEST::testTable": mockedCommandResult{stdout: "TEST::testTable\ntestTable OBJECT-TYPE\n -- FROM\tTEST\n MAX-ACCESS\tnot-accessible\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) 0 }\n", stderr: "", exitError: false}, - "snmptranslate\x00-Td\x00-Ob\x00TEST::connections": mockedCommandResult{stdout: "TEST::connections\nconnections OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tINTEGER\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) 2 }\n", stderr: "", exitError: false}, - "snmptranslate\x00-Td\x00-Ob\x00TEST::latency": mockedCommandResult{stdout: "TEST::latency\nlatency OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) 3 }\n", stderr: "", exitError: false}, - "snmptranslate\x00-Td\x00-Ob\x00TEST::description": mockedCommandResult{stdout: "TEST::description\ndescription OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) 4 }\n", stderr: "", exitError: false}, - "snmptranslate\x00-Td\x00-Ob\x00TEST::hostname": mockedCommandResult{stdout: "TEST::hostname\nhostname OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) 1 1 }\n", stderr: "", exitError: false}, - "snmptranslate\x00-Td\x00-Ob\x00IF-MIB::ifPhysAddress.1": mockedCommandResult{stdout: "IF-MIB::ifPhysAddress.1\nifPhysAddress OBJECT-TYPE\n -- FROM\tIF-MIB\n -- TEXTUAL CONVENTION PhysAddress\n SYNTAX\tOCTET STRING\n DISPLAY-HINT\t\"1x:\"\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n DESCRIPTION\t\"The interface's address at its protocol sub-layer. For\n example, for an 802.x interface, this object normally\n contains a MAC address. The interface's media-specific MIB\n must define the bit and byte ordering and the format of the\n value of this object. For interfaces which do not have such\n an address (e.g., a serial line), this object should contain\n an octet string of zero length.\"\n::= { iso(1) org(3) dod(6) internet(1) mgmt(2) mib-2(1) interfaces(2) ifTable(2) ifEntry(1) ifPhysAddress(6) 1 }\n", stderr: "", exitError: false}, - "snmptranslate\x00-Td\x00-Ob\x00BRIDGE-MIB::dot1dTpFdbAddress.1": mockedCommandResult{stdout: "BRIDGE-MIB::dot1dTpFdbAddress.1\ndot1dTpFdbAddress OBJECT-TYPE\n -- FROM\tBRIDGE-MIB\n -- TEXTUAL CONVENTION MacAddress\n SYNTAX\tOCTET STRING (6) \n DISPLAY-HINT\t\"1x:\"\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n DESCRIPTION\t\"A unicast MAC address for which the bridge has\n forwarding and/or filtering information.\"\n::= { iso(1) org(3) dod(6) internet(1) mgmt(2) mib-2(1) dot1dBridge(17) dot1dTp(4) dot1dTpFdbTable(3) dot1dTpFdbEntry(1) dot1dTpFdbAddress(1) 1 }\n", stderr: "", exitError: false}, - "snmptranslate\x00-Td\x00-Ob\x00TCP-MIB::tcpConnectionLocalAddress.1": mockedCommandResult{stdout: "TCP-MIB::tcpConnectionLocalAddress.1\ntcpConnectionLocalAddress OBJECT-TYPE\n -- FROM\tTCP-MIB\n -- TEXTUAL CONVENTION InetAddress\n SYNTAX\tOCTET STRING (0..255) \n MAX-ACCESS\tnot-accessible\n STATUS\tcurrent\n DESCRIPTION\t\"The local IP address for this TCP connection. The type\n of this address is determined by the value of\n tcpConnectionLocalAddressType.\n\n As this object is used in the index for the\n tcpConnectionTable, implementors should be\n careful not to create entries that would result in OIDs\n with more than 128 subidentifiers; otherwise the information\n cannot be accessed by using SNMPv1, SNMPv2c, or SNMPv3.\"\n::= { iso(1) org(3) dod(6) internet(1) mgmt(2) mib-2(1) tcp(6) tcpConnectionTable(19) tcpConnectionEntry(1) tcpConnectionLocalAddress(2) 1 }\n", stderr: "", exitError: false}, - "snmptranslate\x00-Td\x00TEST::testTable.1": mockedCommandResult{stdout: "TEST::testTableEntry\ntestTableEntry OBJECT-TYPE\n -- FROM\tTEST\n MAX-ACCESS\tnot-accessible\n STATUS\tcurrent\n INDEX\t\t{ server }\n::= { iso(1) 0 testOID(0) testTable(0) 1 }\n", stderr: "", exitError: false}, - "snmptable\x00-Ch\x00-Cl\x00-c\x00public\x00127.0.0.1\x00TEST::testTable": mockedCommandResult{stdout: "server connections latency description \nTEST::testTable: No entries\n", stderr: "", exitError: false}, + "snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.1.0.0.0": {stdout: "TEST::testTable\ntestTable OBJECT-TYPE\n -- FROM\tTEST\n MAX-ACCESS\tnot-accessible\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) 0 }\n", stderr: "", exitError: false}, + "snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.1.0.0.1.1": {stdout: "TEST::hostname\nhostname OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) 1 1 }\n", stderr: "", exitError: false}, + "snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.1.0.0.1.2": {stdout: "TEST::1.2\nanonymous#1 OBJECT-TYPE\n -- FROM\tTEST\n::= { iso(1) 0 testOID(0) 1 2 }\n", stderr: "", exitError: false}, + "snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x001.0.0.1.1": {stdout: "TEST::hostname\nhostname OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) 1 1 }\n", stderr: "", exitError: false}, + "snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.1.0.0.0.1.1": {stdout: "TEST::server\nserver OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) 1 }\n", stderr: "", exitError: false}, + "snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.1.0.0.0.1.1.0": {stdout: "TEST::server.0\nserver OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) server(1) 0 }\n", stderr: "", exitError: false}, + "snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.1.0.0.0.1.5": {stdout: "TEST::testTableEntry.5\ntestTableEntry OBJECT-TYPE\n -- FROM\tTEST\n MAX-ACCESS\tnot-accessible\n STATUS\tcurrent\n INDEX\t\t{ server }\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) 5 }\n", stderr: "", exitError: false}, + "snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.1.2.3": {stdout: "iso.2.3\niso OBJECT-TYPE\n -- FROM\t#-1\n::= { iso(1) 2 3 }\n", stderr: "", exitError: false}, + "snmptranslate\x00-Td\x00-Ob\x00.iso.2.3": {stdout: "iso.2.3\niso OBJECT-TYPE\n -- FROM\t#-1\n::= { iso(1) 2 3 }\n", stderr: "", exitError: false}, + "snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.999": {stdout: ".999\n [TRUNCATED]\n", stderr: "", exitError: false}, + "snmptranslate\x00-Td\x00-Ob\x00TEST::server": {stdout: "TEST::server\nserver OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) 1 }\n", stderr: "", exitError: false}, + "snmptranslate\x00-Td\x00-Ob\x00TEST::server.0": {stdout: "TEST::server.0\nserver OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) server(1) 0 }\n", stderr: "", exitError: false}, + "snmptranslate\x00-Td\x00-Ob\x00TEST::testTable": {stdout: "TEST::testTable\ntestTable OBJECT-TYPE\n -- FROM\tTEST\n MAX-ACCESS\tnot-accessible\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) 0 }\n", stderr: "", exitError: false}, + "snmptranslate\x00-Td\x00-Ob\x00TEST::connections": {stdout: "TEST::connections\nconnections OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tINTEGER\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) 2 }\n", stderr: "", exitError: false}, + "snmptranslate\x00-Td\x00-Ob\x00TEST::latency": {stdout: "TEST::latency\nlatency OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) 3 }\n", stderr: "", exitError: false}, + "snmptranslate\x00-Td\x00-Ob\x00TEST::description": {stdout: "TEST::description\ndescription OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) 4 }\n", stderr: "", exitError: false}, + "snmptranslate\x00-Td\x00-Ob\x00TEST::hostname": {stdout: "TEST::hostname\nhostname OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) 1 1 }\n", stderr: "", exitError: false}, + "snmptranslate\x00-Td\x00-Ob\x00IF-MIB::ifPhysAddress.1": {stdout: "IF-MIB::ifPhysAddress.1\nifPhysAddress OBJECT-TYPE\n -- FROM\tIF-MIB\n -- TEXTUAL CONVENTION PhysAddress\n SYNTAX\tOCTET STRING\n DISPLAY-HINT\t\"1x:\"\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n DESCRIPTION\t\"The interface's address at its protocol sub-layer. For\n example, for an 802.x interface, this object normally\n contains a MAC address. The interface's media-specific MIB\n must define the bit and byte ordering and the format of the\n value of this object. For interfaces which do not have such\n an address (e.g., a serial line), this object should contain\n an octet string of zero length.\"\n::= { iso(1) org(3) dod(6) internet(1) mgmt(2) mib-2(1) interfaces(2) ifTable(2) ifEntry(1) ifPhysAddress(6) 1 }\n", stderr: "", exitError: false}, + "snmptranslate\x00-Td\x00-Ob\x00BRIDGE-MIB::dot1dTpFdbAddress.1": {stdout: "BRIDGE-MIB::dot1dTpFdbAddress.1\ndot1dTpFdbAddress OBJECT-TYPE\n -- FROM\tBRIDGE-MIB\n -- TEXTUAL CONVENTION MacAddress\n SYNTAX\tOCTET STRING (6) \n DISPLAY-HINT\t\"1x:\"\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n DESCRIPTION\t\"A unicast MAC address for which the bridge has\n forwarding and/or filtering information.\"\n::= { iso(1) org(3) dod(6) internet(1) mgmt(2) mib-2(1) dot1dBridge(17) dot1dTp(4) dot1dTpFdbTable(3) dot1dTpFdbEntry(1) dot1dTpFdbAddress(1) 1 }\n", stderr: "", exitError: false}, + "snmptranslate\x00-Td\x00-Ob\x00TCP-MIB::tcpConnectionLocalAddress.1": {stdout: "TCP-MIB::tcpConnectionLocalAddress.1\ntcpConnectionLocalAddress OBJECT-TYPE\n -- FROM\tTCP-MIB\n -- TEXTUAL CONVENTION InetAddress\n SYNTAX\tOCTET STRING (0..255) \n MAX-ACCESS\tnot-accessible\n STATUS\tcurrent\n DESCRIPTION\t\"The local IP address for this TCP connection. The type\n of this address is determined by the value of\n tcpConnectionLocalAddressType.\n\n As this object is used in the index for the\n tcpConnectionTable, implementors should be\n careful not to create entries that would result in OIDs\n with more than 128 subidentifiers; otherwise the information\n cannot be accessed by using SNMPv1, SNMPv2c, or SNMPv3.\"\n::= { iso(1) org(3) dod(6) internet(1) mgmt(2) mib-2(1) tcp(6) tcpConnectionTable(19) tcpConnectionEntry(1) tcpConnectionLocalAddress(2) 1 }\n", stderr: "", exitError: false}, + "snmptranslate\x00-Td\x00TEST::testTable.1": {stdout: "TEST::testTableEntry\ntestTableEntry OBJECT-TYPE\n -- FROM\tTEST\n MAX-ACCESS\tnot-accessible\n STATUS\tcurrent\n INDEX\t\t{ server }\n::= { iso(1) 0 testOID(0) testTable(0) 1 }\n", stderr: "", exitError: false}, + "snmptable\x00-Ch\x00-Cl\x00-c\x00public\x00127.0.0.1\x00TEST::testTable": {stdout: "server connections latency description \nTEST::testTable: No entries\n", stderr: "", exitError: false}, } diff --git a/plugins/inputs/snmp/snmp_test.go b/plugins/inputs/snmp/snmp_test.go index c2e842a00..db1a49605 100644 --- a/plugins/inputs/snmp/snmp_test.go +++ b/plugins/inputs/snmp/snmp_test.go @@ -721,7 +721,7 @@ func TestSnmpTranslateCache_miss(t *testing.T) { func TestSnmpTranslateCache_hit(t *testing.T) { snmpTranslateCaches = map[string]snmpTranslateCache{ - "foo": snmpTranslateCache{ + "foo": { mibName: "a", oidNum: "b", oidText: "c", @@ -754,7 +754,7 @@ func TestSnmpTableCache_miss(t *testing.T) { func TestSnmpTableCache_hit(t *testing.T) { snmpTableCaches = map[string]snmpTableCache{ - "foo": snmpTableCache{ + "foo": { mibName: "a", oidNum: "b", oidText: "c", diff --git a/plugins/inputs/statsd/statsd_test.go b/plugins/inputs/statsd/statsd_test.go index 3fbc45640..1e50c8341 100644 --- a/plugins/inputs/statsd/statsd_test.go +++ b/plugins/inputs/statsd/statsd_test.go @@ -874,21 +874,21 @@ func TestParse_DataDogTags(t *testing.T) { } testTags := map[string]map[string]string{ - "my_counter": map[string]string{ + "my_counter": { "host": "localhost", "environment": "prod", "endpoint": "/:tenant?/oauth/ro", }, - "my_gauge": map[string]string{ + "my_gauge": { "live": "", }, - "my_set": map[string]string{ + "my_set": { "host": "localhost", }, - "my_timer": map[string]string{ + "my_timer": { "live": "", "host": "localhost", }, diff --git a/plugins/inputs/syslog/rfc5425_test.go b/plugins/inputs/syslog/rfc5425_test.go index de5835e6f..d629024b7 100644 --- a/plugins/inputs/syslog/rfc5425_test.go +++ b/plugins/inputs/syslog/rfc5425_test.go @@ -34,7 +34,7 @@ func getTestCasesForRFC5425() []testCase5425 { name: "1st/avg/ok", data: []byte(`188 <29>1 2016-02-21T04:32:57+00:00 web1 someservice 2341 2 [origin][meta sequence="14125553" service="someservice"] "GET /v1/ok HTTP/1.1" 200 145 "-" "hacheck 0.9.0" 24306 127.0.0.1:40124 575`), wantStrict: []testutil.Metric{ - testutil.Metric{ + { Measurement: "syslog", Fields: map[string]interface{}{ "version": uint16(1), @@ -58,7 +58,7 @@ func getTestCasesForRFC5425() []testCase5425 { }, }, wantBestEffort: []testutil.Metric{ - testutil.Metric{ + { Measurement: "syslog", Fields: map[string]interface{}{ "version": uint16(1), @@ -86,7 +86,7 @@ func getTestCasesForRFC5425() []testCase5425 { name: "1st/min/ok//2nd/min/ok", data: []byte("16 <1>2 - - - - - -17 <4>11 - - - - - -"), wantStrict: []testutil.Metric{ - testutil.Metric{ + { Measurement: "syslog", Fields: map[string]interface{}{ "version": uint16(2), @@ -99,7 +99,7 @@ func getTestCasesForRFC5425() []testCase5425 { }, Time: defaultTime, }, - testutil.Metric{ + { Measurement: "syslog", Fields: map[string]interface{}{ "version": uint16(11), @@ -114,7 +114,7 @@ func getTestCasesForRFC5425() []testCase5425 { }, }, wantBestEffort: []testutil.Metric{ - testutil.Metric{ + { Measurement: "syslog", Fields: map[string]interface{}{ "version": uint16(2), @@ -127,7 +127,7 @@ func getTestCasesForRFC5425() []testCase5425 { }, Time: defaultTime, }, - testutil.Metric{ + { Measurement: "syslog", Fields: map[string]interface{}{ "version": uint16(11), @@ -146,7 +146,7 @@ func getTestCasesForRFC5425() []testCase5425 { name: "1st/utf8/ok", data: []byte("23 <1>1 - - - - - - hellø"), wantStrict: []testutil.Metric{ - testutil.Metric{ + { Measurement: "syslog", Fields: map[string]interface{}{ "version": uint16(1), @@ -162,7 +162,7 @@ func getTestCasesForRFC5425() []testCase5425 { }, }, wantBestEffort: []testutil.Metric{ - testutil.Metric{ + { Measurement: "syslog", Fields: map[string]interface{}{ "version": uint16(1), @@ -182,7 +182,7 @@ func getTestCasesForRFC5425() []testCase5425 { name: "1st/nl/ok", // newline data: []byte("28 <1>3 - - - - - - hello\nworld"), wantStrict: []testutil.Metric{ - testutil.Metric{ + { Measurement: "syslog", Fields: map[string]interface{}{ "version": uint16(3), @@ -198,7 +198,7 @@ func getTestCasesForRFC5425() []testCase5425 { }, }, wantBestEffort: []testutil.Metric{ - testutil.Metric{ + { Measurement: "syslog", Fields: map[string]interface{}{ "version": uint16(3), @@ -219,7 +219,7 @@ func getTestCasesForRFC5425() []testCase5425 { data: []byte("16 <1>2"), wantStrict: nil, wantBestEffort: []testutil.Metric{ - testutil.Metric{ + { Measurement: "syslog", Fields: map[string]interface{}{ "version": uint16(2), @@ -239,7 +239,7 @@ func getTestCasesForRFC5425() []testCase5425 { name: "1st/min/ok", data: []byte("16 <1>1 - - - - - -"), wantStrict: []testutil.Metric{ - testutil.Metric{ + { Measurement: "syslog", Fields: map[string]interface{}{ "version": uint16(1), @@ -254,7 +254,7 @@ func getTestCasesForRFC5425() []testCase5425 { }, }, wantBestEffort: []testutil.Metric{ - testutil.Metric{ + { Measurement: "syslog", Fields: map[string]interface{}{ "version": uint16(1), @@ -274,7 +274,7 @@ func getTestCasesForRFC5425() []testCase5425 { data: []byte("16 <1>217 <11>1 - - - - - -"), wantStrict: nil, wantBestEffort: []testutil.Metric{ - testutil.Metric{ + { Measurement: "syslog", Fields: map[string]interface{}{ "version": uint16(217), @@ -299,7 +299,7 @@ func getTestCasesForRFC5425() []testCase5425 { name: "1st/max/ok", data: []byte(fmt.Sprintf("8192 <%d>%d %s %s %s %s %s - %s", maxP, maxV, maxTS, maxH, maxA, maxPID, maxMID, message7681)), wantStrict: []testutil.Metric{ - testutil.Metric{ + { Measurement: "syslog", Fields: map[string]interface{}{ "version": maxV, @@ -320,7 +320,7 @@ func getTestCasesForRFC5425() []testCase5425 { }, }, wantBestEffort: []testutil.Metric{ - testutil.Metric{ + { Measurement: "syslog", Fields: map[string]interface{}{ "version": maxV, diff --git a/plugins/inputs/tail/tail.go b/plugins/inputs/tail/tail.go index cdea675e0..598287963 100644 --- a/plugins/inputs/tail/tail.go +++ b/plugins/inputs/tail/tail.go @@ -111,7 +111,7 @@ func (t *Tail) tailNewFiles(fromBeginning bool) error { if err != nil { t.acc.AddError(fmt.Errorf("E! Error Glob %s failed to compile, %s", filepath, err)) } - for file, _ := range g.Match() { + for file := range g.Match() { if _, ok := t.tailers[file]; ok { // we're already tailing this file continue diff --git a/plugins/inputs/varnish/varnish_test.go b/plugins/inputs/varnish/varnish_test.go index 30f91e237..465e9e8dd 100644 --- a/plugins/inputs/varnish/varnish_test.go +++ b/plugins/inputs/varnish/varnish_test.go @@ -113,16 +113,16 @@ MEMPOOL.vbc.sz_wanted 88 . Size requested ` var parsedSmOutput = map[string]map[string]interface{}{ - "MAIN": map[string]interface{}{ + "MAIN": { "uptime": uint64(895), "cache_hit": uint64(95), "cache_miss": uint64(5), }, - "MGT": map[string]interface{}{ + "MGT": { "uptime": uint64(896), "child_start": uint64(1), }, - "MEMPOOL": map[string]interface{}{ + "MEMPOOL": { "vbc.live": uint64(0), "vbc.pool": uint64(10), "vbc.sz_wanted": uint64(88), diff --git a/plugins/inputs/zipkin/codec/codec_test.go b/plugins/inputs/zipkin/codec/codec_test.go index c3a9fbd73..3525f30c2 100644 --- a/plugins/inputs/zipkin/codec/codec_test.go +++ b/plugins/inputs/zipkin/codec/codec_test.go @@ -382,7 +382,7 @@ func TestNewBinaryAnnotations(t *testing.T) { name: "myservice", }, want: []trace.BinaryAnnotation{ - trace.BinaryAnnotation{ + { Host: "myhost", ServiceName: "myservice", Key: "mykey", @@ -424,7 +424,7 @@ func TestNewAnnotations(t *testing.T) { name: "myservice", }, want: []trace.Annotation{ - trace.Annotation{ + { Host: "myhost", ServiceName: "myservice", Timestamp: time.Unix(0, 0).UTC(), diff --git a/plugins/inputs/zipkin/codec/thrift/thrift_test.go b/plugins/inputs/zipkin/codec/thrift/thrift_test.go index 000ac628c..798fc269e 100644 --- a/plugins/inputs/zipkin/codec/thrift/thrift_test.go +++ b/plugins/inputs/zipkin/codec/thrift/thrift_test.go @@ -113,7 +113,7 @@ func TestUnmarshalThrift(t *testing.T) { Duration: addr(53106), Annotations: []*zipkincore.Annotation{}, BinaryAnnotations: []*zipkincore.BinaryAnnotation{ - &zipkincore.BinaryAnnotation{ + { Key: "lc", AnnotationType: zipkincore.AnnotationType_STRING, Value: []byte("trivial"), @@ -133,7 +133,7 @@ func TestUnmarshalThrift(t *testing.T) { Duration: addr(50410), Annotations: []*zipkincore.Annotation{}, BinaryAnnotations: []*zipkincore.BinaryAnnotation{ - &zipkincore.BinaryAnnotation{ + { Key: "lc", AnnotationType: zipkincore.AnnotationType_STRING, Value: []byte("trivial"), @@ -151,7 +151,7 @@ func TestUnmarshalThrift(t *testing.T) { Timestamp: addr(1498688360851318), Duration: addr(103680), Annotations: []*zipkincore.Annotation{ - &zipkincore.Annotation{ + { Timestamp: 1498688360851325, Value: "Starting child #0", Host: &zipkincore.Endpoint{ @@ -159,7 +159,7 @@ func TestUnmarshalThrift(t *testing.T) { ServiceName: "trivial", }, }, - &zipkincore.Annotation{ + { Timestamp: 1498688360904545, Value: "Starting child #1", Host: &zipkincore.Endpoint{ @@ -167,7 +167,7 @@ func TestUnmarshalThrift(t *testing.T) { ServiceName: "trivial", }, }, - &zipkincore.Annotation{ + { Timestamp: 1498688360954992, Value: "A Log", Host: &zipkincore.Endpoint{ @@ -177,7 +177,7 @@ func TestUnmarshalThrift(t *testing.T) { }, }, BinaryAnnotations: []*zipkincore.BinaryAnnotation{ - &zipkincore.BinaryAnnotation{ + { Key: "lc", AnnotationType: zipkincore.AnnotationType_STRING, Value: []byte("trivial"), diff --git a/plugins/inputs/zipkin/convert_test.go b/plugins/inputs/zipkin/convert_test.go index 5085deecb..92c1ba3ff 100644 --- a/plugins/inputs/zipkin/convert_test.go +++ b/plugins/inputs/zipkin/convert_test.go @@ -108,7 +108,7 @@ func TestLineProtocolConverter_Record(t *testing.T) { }, }, want: []testutil.Metric{ - testutil.Metric{ + { Measurement: "zipkin", Tags: map[string]string{ "id": "8090652509916334619", @@ -122,7 +122,7 @@ func TestLineProtocolConverter_Record(t *testing.T) { }, Time: time.Unix(0, 1498688360851331000).UTC(), }, - testutil.Metric{ + { Measurement: "zipkin", Tags: map[string]string{ "id": "8090652509916334619", @@ -139,7 +139,7 @@ func TestLineProtocolConverter_Record(t *testing.T) { }, Time: time.Unix(0, 1498688360851331000).UTC(), }, - testutil.Metric{ + { Measurement: "zipkin", Tags: map[string]string{ "id": "103618986556047333", @@ -153,7 +153,7 @@ func TestLineProtocolConverter_Record(t *testing.T) { }, Time: time.Unix(0, 1498688360904552000).UTC(), }, - testutil.Metric{ + { Measurement: "zipkin", Tags: map[string]string{ "id": "103618986556047333", @@ -170,7 +170,7 @@ func TestLineProtocolConverter_Record(t *testing.T) { }, Time: time.Unix(0, 1498688360904552000).UTC(), }, - testutil.Metric{ + { Measurement: "zipkin", Tags: map[string]string{ "id": "22964302721410078", @@ -184,7 +184,7 @@ func TestLineProtocolConverter_Record(t *testing.T) { }, Time: time.Unix(0, 1498688360851318000).UTC(), }, - testutil.Metric{ + { Measurement: "zipkin", Tags: map[string]string{ "service_name": "trivial", @@ -200,7 +200,7 @@ func TestLineProtocolConverter_Record(t *testing.T) { }, Time: time.Unix(0, 1498688360851318000).UTC(), }, - testutil.Metric{ + { Measurement: "zipkin", Tags: map[string]string{ "service_name": "trivial", @@ -216,7 +216,7 @@ func TestLineProtocolConverter_Record(t *testing.T) { }, Time: time.Unix(0, 1498688360851318000).UTC(), }, - testutil.Metric{ + { Measurement: "zipkin", Tags: map[string]string{ "parent_id": "22964302721410078", @@ -232,7 +232,7 @@ func TestLineProtocolConverter_Record(t *testing.T) { }, Time: time.Unix(0, 1498688360851318000).UTC(), }, - testutil.Metric{ + { Measurement: "zipkin", Tags: map[string]string{ "trace_id": "2505404965370368069", @@ -283,7 +283,7 @@ func TestLineProtocolConverter_Record(t *testing.T) { }, }, want: []testutil.Metric{ - testutil.Metric{ + { Measurement: "zipkin", Tags: map[string]string{ "id": "6802735349851856000", @@ -297,7 +297,7 @@ func TestLineProtocolConverter_Record(t *testing.T) { }, Time: time.Unix(1, 0).UTC(), }, - testutil.Metric{ + { Measurement: "zipkin", Tags: map[string]string{ "annotation": "cs", diff --git a/plugins/inputs/zipkin/zipkin_test.go b/plugins/inputs/zipkin/zipkin_test.go index b71e5bf4e..2ac269db1 100644 --- a/plugins/inputs/zipkin/zipkin_test.go +++ b/plugins/inputs/zipkin/zipkin_test.go @@ -27,7 +27,7 @@ func TestZipkinPlugin(t *testing.T) { datafile: "testdata/threespans.dat", contentType: "application/x-thrift", want: []testutil.Metric{ - testutil.Metric{ + { Measurement: "zipkin", Tags: map[string]string{ "id": "7047c59776af8a1b", @@ -41,7 +41,7 @@ func TestZipkinPlugin(t *testing.T) { }, Time: time.Unix(0, 1498688360851331000).UTC(), }, - testutil.Metric{ + { Measurement: "zipkin", Tags: map[string]string{ "id": "7047c59776af8a1b", @@ -58,7 +58,7 @@ func TestZipkinPlugin(t *testing.T) { }, Time: time.Unix(0, 1498688360851331000).UTC(), }, - testutil.Metric{ + { Measurement: "zipkin", Tags: map[string]string{ "id": "17020eb55a8bfe5", @@ -72,7 +72,7 @@ func TestZipkinPlugin(t *testing.T) { }, Time: time.Unix(0, 1498688360904552000).UTC(), }, - testutil.Metric{ + { Measurement: "zipkin", Tags: map[string]string{ "id": "17020eb55a8bfe5", @@ -89,7 +89,7 @@ func TestZipkinPlugin(t *testing.T) { }, Time: time.Unix(0, 1498688360904552000).UTC(), }, - testutil.Metric{ + { Measurement: "zipkin", Tags: map[string]string{ "id": "5195e96239641e", @@ -103,7 +103,7 @@ func TestZipkinPlugin(t *testing.T) { }, Time: time.Unix(0, 1498688360851318000).UTC(), }, - testutil.Metric{ + { Measurement: "zipkin", Tags: map[string]string{ "service_name": "trivial", @@ -119,7 +119,7 @@ func TestZipkinPlugin(t *testing.T) { }, Time: time.Unix(0, 1498688360851318000).UTC(), }, - testutil.Metric{ + { Measurement: "zipkin", Tags: map[string]string{ "service_name": "trivial", @@ -135,7 +135,7 @@ func TestZipkinPlugin(t *testing.T) { }, Time: time.Unix(0, 1498688360851318000).UTC(), }, - testutil.Metric{ + { Measurement: "zipkin", Tags: map[string]string{ "parent_id": "5195e96239641e", @@ -151,7 +151,7 @@ func TestZipkinPlugin(t *testing.T) { }, Time: time.Unix(0, 1498688360851318000).UTC(), }, - testutil.Metric{ + { Measurement: "zipkin", Tags: map[string]string{ "trace_id": "22c4fc8ab3669045", @@ -176,7 +176,7 @@ func TestZipkinPlugin(t *testing.T) { datafile: "testdata/distributed_trace_sample.dat", contentType: "application/x-thrift", want: []testutil.Metric{ - testutil.Metric{ + { Measurement: "zipkin", Tags: map[string]string{ "id": "5e682bc21ce99c80", @@ -190,7 +190,7 @@ func TestZipkinPlugin(t *testing.T) { }, Time: time.Unix(0, 1433330263415871*int64(time.Microsecond)).UTC(), }, - testutil.Metric{ + { Measurement: "zipkin", Tags: map[string]string{ "annotation": "cs", @@ -206,7 +206,7 @@ func TestZipkinPlugin(t *testing.T) { }, Time: time.Unix(0, 1433330263415871*int64(time.Microsecond)).UTC(), }, - testutil.Metric{ + { Measurement: "zipkin", Tags: map[string]string{ "annotation": "cr", @@ -486,7 +486,7 @@ func TestZipkinPlugin(t *testing.T) { }, Time: time.Unix(0, 1503031538778000*int64(time.Microsecond)).UTC(), }, - testutil.Metric{ + { Measurement: "zipkin", Tags: map[string]string{ "annotation": "ss", @@ -502,7 +502,7 @@ func TestZipkinPlugin(t *testing.T) { }, Time: time.Unix(0, 1503031538778000*int64(time.Microsecond)).UTC(), }, - testutil.Metric{ + { Measurement: "zipkin", Tags: map[string]string{ "annotation": "Demo2Application", @@ -519,7 +519,7 @@ func TestZipkinPlugin(t *testing.T) { }, Time: time.Unix(0, 1503031538778000*int64(time.Microsecond)).UTC(), }, - testutil.Metric{ + { Measurement: "zipkin", Tags: map[string]string{ "annotation": "hi", @@ -536,7 +536,7 @@ func TestZipkinPlugin(t *testing.T) { }, Time: time.Unix(0, 1503031538778000*int64(time.Microsecond)).UTC(), }, - testutil.Metric{ + { Measurement: "zipkin", Tags: map[string]string{ "annotation": "192.168.0.8:test:8010", diff --git a/plugins/outputs/azure_monitor/azure_monitor.go b/plugins/outputs/azure_monitor/azure_monitor.go index afc3a20ed..5c435ac0d 100644 --- a/plugins/outputs/azure_monitor/azure_monitor.go +++ b/plugins/outputs/azure_monitor/azure_monitor.go @@ -402,7 +402,7 @@ func translate(m telegraf.Metric, prefix string) (*azureMonitorMetric, error) { Namespace: ns, DimensionNames: dimensionNames, Series: []*azureMonitorSeries{ - &azureMonitorSeries{ + { DimensionValues: dimensionValues, Min: min, Max: max, diff --git a/plugins/outputs/cloudwatch/cloudwatch_test.go b/plugins/outputs/cloudwatch/cloudwatch_test.go index cdb55ec19..acadca842 100644 --- a/plugins/outputs/cloudwatch/cloudwatch_test.go +++ b/plugins/outputs/cloudwatch/cloudwatch_test.go @@ -29,7 +29,7 @@ func TestBuildDimensions(t *testing.T) { tagKeys := make([]string, len(testPoint.Tags())) i := 0 - for k, _ := range testPoint.Tags() { + for k := range testPoint.Tags() { tagKeys[i] = k i += 1 } diff --git a/plugins/outputs/cratedb/cratedb.go b/plugins/outputs/cratedb/cratedb.go index bac1c730a..f6840cc38 100644 --- a/plugins/outputs/cratedb/cratedb.go +++ b/plugins/outputs/cratedb/cratedb.go @@ -165,7 +165,7 @@ func escapeObject(m map[string]interface{}) (string, error) { // We find all keys and sort them first because iterating a map in go is // randomized and we need consistent output for our unit tests. keys := make([]string, 0, len(m)) - for k, _ := range m { + for k := range m { keys = append(keys, k) } sort.Strings(keys) diff --git a/plugins/outputs/datadog/datadog_test.go b/plugins/outputs/datadog/datadog_test.go index 7c5e3cc1f..7bbc91254 100644 --- a/plugins/outputs/datadog/datadog_test.go +++ b/plugins/outputs/datadog/datadog_test.go @@ -85,11 +85,11 @@ func TestBuildTags(t *testing.T) { }{ { []*telegraf.Tag{ - &telegraf.Tag{ + { Key: "one", Value: "two", }, - &telegraf.Tag{ + { Key: "three", Value: "four", }, @@ -98,7 +98,7 @@ func TestBuildTags(t *testing.T) { }, { []*telegraf.Tag{ - &telegraf.Tag{ + { Key: "aaa", Value: "bbb", }, diff --git a/plugins/outputs/influxdb/udp_test.go b/plugins/outputs/influxdb/udp_test.go index 61b3f1ded..2d21fd7bf 100644 --- a/plugins/outputs/influxdb/udp_test.go +++ b/plugins/outputs/influxdb/udp_test.go @@ -244,7 +244,7 @@ func TestUDP_WriteWithRealConn(t *testing.T) { go func() { defer wg.Done() var total int - for _, _ = range metrics { + for range metrics { n, _, err := conn.ReadFrom(buf[total:]) if err != nil { break diff --git a/plugins/outputs/prometheus_client/prometheus_client.go b/plugins/outputs/prometheus_client/prometheus_client.go index c038eba66..1b8e06a49 100644 --- a/plugins/outputs/prometheus_client/prometheus_client.go +++ b/plugins/outputs/prometheus_client/prometheus_client.go @@ -154,7 +154,7 @@ func (p *PrometheusClient) Start() error { } registry := prometheus.NewRegistry() - for collector, _ := range defaultCollectors { + for collector := range defaultCollectors { switch collector { case "gocollector": registry.Register(prometheus.NewGoCollector()) @@ -236,7 +236,7 @@ func (p *PrometheusClient) Expire() { for name, family := range p.fam { for key, sample := range family.Samples { if p.ExpirationInterval.Duration != 0 && now.After(sample.Expiration) { - for k, _ := range sample.Labels { + for k := range sample.Labels { family.LabelSet[k]-- } delete(family.Samples, key) @@ -323,7 +323,7 @@ func CreateSampleID(tags map[string]string) SampleID { func addSample(fam *MetricFamily, sample *Sample, sampleID SampleID) { - for k, _ := range sample.Labels { + for k := range sample.Labels { fam.LabelSet[k]++ } diff --git a/plugins/parsers/collectd/parser_test.go b/plugins/parsers/collectd/parser_test.go index afd58ec72..42a4d4c7a 100644 --- a/plugins/parsers/collectd/parser_test.go +++ b/plugins/parsers/collectd/parser_test.go @@ -33,7 +33,7 @@ type testCase struct { var singleMetric = testCase{ []api.ValueList{ - api.ValueList{ + { Identifier: api.Identifier{ Host: "xyzzy", Plugin: "cpu", @@ -48,7 +48,7 @@ var singleMetric = testCase{ }, }, []metricData{ - metricData{ + { "cpu_value", map[string]string{ "type_instance": "user", @@ -65,7 +65,7 @@ var singleMetric = testCase{ var multiMetric = testCase{ []api.ValueList{ - api.ValueList{ + { Identifier: api.Identifier{ Host: "xyzzy", Plugin: "cpu", @@ -81,7 +81,7 @@ var multiMetric = testCase{ }, }, []metricData{ - metricData{ + { "cpu_0", map[string]string{ "type_instance": "user", @@ -93,7 +93,7 @@ var multiMetric = testCase{ "value": float64(42), }, }, - metricData{ + { "cpu_1", map[string]string{ "type_instance": "user", diff --git a/plugins/parsers/influx/machine_test.go b/plugins/parsers/influx/machine_test.go index 1c617919b..1a9cb196e 100644 --- a/plugins/parsers/influx/machine_test.go +++ b/plugins/parsers/influx/machine_test.go @@ -234,15 +234,15 @@ var tests = []struct { name: "minimal", input: []byte("cpu value=42"), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ + { Name: FieldKey, Value: []byte("value"), }, - Result{ + { Name: FieldFloat, Value: []byte("42"), }, @@ -252,15 +252,15 @@ var tests = []struct { name: "newline", input: []byte("cpu value=42\n"), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ + { Name: FieldKey, Value: []byte("value"), }, - Result{ + { Name: FieldFloat, Value: []byte("42"), }, @@ -270,19 +270,19 @@ var tests = []struct { name: "minimal with timestamp", input: []byte("cpu value=42 1516241192000000000"), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ + { Name: FieldKey, Value: []byte("value"), }, - Result{ + { Name: FieldFloat, Value: []byte("42"), }, - Result{ + { Name: Timestamp, Value: []byte("1516241192000000000"), }, @@ -292,15 +292,15 @@ var tests = []struct { name: "measurement escape non-special", input: []byte(`c\pu value=42`), results: []Result{ - Result{ + { Name: Measurement, Value: []byte(`c\pu`), }, - Result{ + { Name: FieldKey, Value: []byte("value"), }, - Result{ + { Name: FieldFloat, Value: []byte("42"), }, @@ -310,15 +310,15 @@ var tests = []struct { name: "measurement escaped trailing backslash", input: []byte(`cpu\\ value=42`), results: []Result{ - Result{ + { Name: Measurement, Value: []byte(`cpu\\`), }, - Result{ + { Name: FieldKey, Value: []byte("value"), }, - Result{ + { Name: FieldFloat, Value: []byte("42"), }, @@ -328,15 +328,15 @@ var tests = []struct { name: "single char measurement", input: []byte("c value=42"), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("c"), }, - Result{ + { Name: FieldKey, Value: []byte("value"), }, - Result{ + { Name: FieldFloat, Value: []byte("42"), }, @@ -346,15 +346,15 @@ var tests = []struct { name: "escape backslash in measurement", input: []byte(`cp\\u value=42`), results: []Result{ - Result{ + { Name: Measurement, Value: []byte(`cp\\u`), }, - Result{ + { Name: FieldKey, Value: []byte("value"), }, - Result{ + { Name: FieldFloat, Value: []byte("42"), }, @@ -364,15 +364,15 @@ var tests = []struct { name: "measurement escape space", input: []byte(`cpu\ abc value=42`), results: []Result{ - Result{ + { Name: Measurement, Value: []byte(`cpu\ abc`), }, - Result{ + { Name: FieldKey, Value: []byte("value"), }, - Result{ + { Name: FieldFloat, Value: []byte("42"), }, @@ -382,15 +382,15 @@ var tests = []struct { name: "scientific float", input: []byte("cpu value=42e0"), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ + { Name: FieldKey, Value: []byte("value"), }, - Result{ + { Name: FieldFloat, Value: []byte("42e0"), }, @@ -400,15 +400,15 @@ var tests = []struct { name: "scientific float negative mantissa", input: []byte("cpu value=-42e0"), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ + { Name: FieldKey, Value: []byte("value"), }, - Result{ + { Name: FieldFloat, Value: []byte("-42e0"), }, @@ -418,15 +418,15 @@ var tests = []struct { name: "scientific float negative exponent", input: []byte("cpu value=42e-1"), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ + { Name: FieldKey, Value: []byte("value"), }, - Result{ + { Name: FieldFloat, Value: []byte("42e-1"), }, @@ -436,15 +436,15 @@ var tests = []struct { name: "scientific float big e", input: []byte("cpu value=42E0"), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ + { Name: FieldKey, Value: []byte("value"), }, - Result{ + { Name: FieldFloat, Value: []byte("42E0"), }, @@ -454,11 +454,11 @@ var tests = []struct { name: "scientific float missing exponent", input: []byte("cpu value=42E"), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ + { err: ErrFieldParse, }, }, @@ -467,15 +467,15 @@ var tests = []struct { name: "float with decimal", input: []byte("cpu value=42.2"), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ + { Name: FieldKey, Value: []byte("value"), }, - Result{ + { Name: FieldFloat, Value: []byte("42.2"), }, @@ -485,15 +485,15 @@ var tests = []struct { name: "negative float", input: []byte("cpu value=-42"), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ + { Name: FieldKey, Value: []byte("value"), }, - Result{ + { Name: FieldFloat, Value: []byte("-42"), }, @@ -503,15 +503,15 @@ var tests = []struct { name: "float without integer digits", input: []byte("cpu value=.42"), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ + { Name: FieldKey, Value: []byte("value"), }, - Result{ + { Name: FieldFloat, Value: []byte(".42"), }, @@ -521,15 +521,15 @@ var tests = []struct { name: "float without integer digits negative", input: []byte("cpu value=-.42"), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ + { Name: FieldKey, Value: []byte("value"), }, - Result{ + { Name: FieldFloat, Value: []byte("-.42"), }, @@ -539,15 +539,15 @@ var tests = []struct { name: "float with multiple leading 0", input: []byte("cpu value=00.42"), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ + { Name: FieldKey, Value: []byte("value"), }, - Result{ + { Name: FieldFloat, Value: []byte("00.42"), }, @@ -557,11 +557,11 @@ var tests = []struct { name: "invalid float with only dot", input: []byte("cpu value=."), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ + { err: ErrFieldParse, }, }, @@ -570,23 +570,23 @@ var tests = []struct { name: "multiple fields", input: []byte("cpu x=42,y=42"), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ + { Name: FieldKey, Value: []byte("x"), }, - Result{ + { Name: FieldFloat, Value: []byte("42"), }, - Result{ + { Name: FieldKey, Value: []byte("y"), }, - Result{ + { Name: FieldFloat, Value: []byte("42"), }, @@ -596,15 +596,15 @@ var tests = []struct { name: "integer field", input: []byte("cpu value=42i"), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ + { Name: FieldKey, Value: []byte("value"), }, - Result{ + { Name: FieldInt, Value: []byte("42i"), }, @@ -614,15 +614,15 @@ var tests = []struct { name: "negative integer field", input: []byte("cpu value=-42i"), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ + { Name: FieldKey, Value: []byte("value"), }, - Result{ + { Name: FieldInt, Value: []byte("-42i"), }, @@ -632,15 +632,15 @@ var tests = []struct { name: "zero integer field", input: []byte("cpu value=0i"), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ + { Name: FieldKey, Value: []byte("value"), }, - Result{ + { Name: FieldInt, Value: []byte("0i"), }, @@ -650,15 +650,15 @@ var tests = []struct { name: "negative zero integer field", input: []byte("cpu value=-0i"), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ + { Name: FieldKey, Value: []byte("value"), }, - Result{ + { Name: FieldInt, Value: []byte("-0i"), }, @@ -668,11 +668,11 @@ var tests = []struct { name: "invalid field", input: []byte("cpu value=howdy"), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ + { err: ErrFieldParse, }, }, @@ -681,15 +681,15 @@ var tests = []struct { name: "string field", input: []byte(`cpu value="42"`), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ + { Name: FieldKey, Value: []byte("value"), }, - Result{ + { Name: FieldString, Value: []byte("42"), }, @@ -699,15 +699,15 @@ var tests = []struct { name: "bool field", input: []byte(`cpu value=true`), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ + { Name: FieldKey, Value: []byte("value"), }, - Result{ + { Name: FieldBool, Value: []byte("true"), }, @@ -717,23 +717,23 @@ var tests = []struct { name: "tag", input: []byte(`cpu,host=localhost value=42`), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ + { Name: TagKey, Value: []byte("host"), }, - Result{ + { Name: TagValue, Value: []byte("localhost"), }, - Result{ + { Name: FieldKey, Value: []byte("value"), }, - Result{ + { Name: FieldFloat, Value: []byte("42"), }, @@ -743,23 +743,23 @@ var tests = []struct { name: "tag key escape space", input: []byte(`cpu,h\ ost=localhost value=42`), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ + { Name: TagKey, Value: []byte(`h\ ost`), }, - Result{ + { Name: TagValue, Value: []byte("localhost"), }, - Result{ + { Name: FieldKey, Value: []byte("value"), }, - Result{ + { Name: FieldFloat, Value: []byte("42"), }, @@ -769,23 +769,23 @@ var tests = []struct { name: "tag key escape comma", input: []byte(`cpu,h\,ost=localhost value=42`), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ + { Name: TagKey, Value: []byte(`h\,ost`), }, - Result{ + { Name: TagValue, Value: []byte("localhost"), }, - Result{ + { Name: FieldKey, Value: []byte("value"), }, - Result{ + { Name: FieldFloat, Value: []byte("42"), }, @@ -795,23 +795,23 @@ var tests = []struct { name: "tag key escape equal", input: []byte(`cpu,h\=ost=localhost value=42`), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ + { Name: TagKey, Value: []byte(`h\=ost`), }, - Result{ + { Name: TagValue, Value: []byte("localhost"), }, - Result{ + { Name: FieldKey, Value: []byte("value"), }, - Result{ + { Name: FieldFloat, Value: []byte("42"), }, @@ -821,31 +821,31 @@ var tests = []struct { name: "multiple tags", input: []byte(`cpu,host=localhost,cpu=cpu0 value=42`), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ + { Name: TagKey, Value: []byte("host"), }, - Result{ + { Name: TagValue, Value: []byte("localhost"), }, - Result{ + { Name: TagKey, Value: []byte("cpu"), }, - Result{ + { Name: TagValue, Value: []byte("cpu0"), }, - Result{ + { Name: FieldKey, Value: []byte("value"), }, - Result{ + { Name: FieldFloat, Value: []byte("42"), }, @@ -855,11 +855,11 @@ var tests = []struct { name: "tag invalid missing separator", input: []byte("cpu,xyzzy value=42"), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ + { err: ErrTagParse, }, }, @@ -868,11 +868,11 @@ var tests = []struct { name: "tag invalid missing value", input: []byte("cpu,xyzzy= value=42"), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ + { err: ErrTagParse, }, }, @@ -881,11 +881,11 @@ var tests = []struct { name: "tag invalid unescaped space", input: []byte("cpu,h ost=localhost value=42"), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ + { err: ErrTagParse, }, }, @@ -894,11 +894,11 @@ var tests = []struct { name: "tag invalid unescaped comma", input: []byte("cpu,h,ost=localhost value=42"), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ + { err: ErrTagParse, }, }, @@ -907,11 +907,11 @@ var tests = []struct { name: "tag invalid unescaped equals", input: []byte("cpu,h=ost=localhost value=42"), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ + { err: ErrTagParse, }, }, @@ -920,19 +920,19 @@ var tests = []struct { name: "timestamp negative", input: []byte("cpu value=42 -1"), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ + { Name: FieldKey, Value: []byte("value"), }, - Result{ + { Name: FieldFloat, Value: []byte("42"), }, - Result{ + { Name: Timestamp, Value: []byte("-1"), }, @@ -942,19 +942,19 @@ var tests = []struct { name: "timestamp zero", input: []byte("cpu value=42 0"), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ + { Name: FieldKey, Value: []byte("value"), }, - Result{ + { Name: FieldFloat, Value: []byte("42"), }, - Result{ + { Name: Timestamp, Value: []byte("0"), }, @@ -964,27 +964,27 @@ var tests = []struct { name: "multiline", input: []byte("cpu value=42\n\n\ncpu value=43\n"), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ + { Name: FieldKey, Value: []byte("value"), }, - Result{ + { Name: FieldFloat, Value: []byte("42"), }, - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ + { Name: FieldKey, Value: []byte("value"), }, - Result{ + { Name: FieldFloat, Value: []byte("43"), }, @@ -994,25 +994,25 @@ var tests = []struct { name: "error recovery", input: []byte("cpu value=howdy\ncpu\ncpu value=42\n"), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ + { err: ErrFieldParse, }, - Result{ + { err: ErrFieldParse, }, - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ + { Name: FieldKey, Value: []byte("value"), }, - Result{ + { Name: FieldFloat, Value: []byte("42"), }, @@ -1022,31 +1022,31 @@ var tests = []struct { name: "line whitespace", input: []byte(" cpu value=42 1516241192000000000 \n\n cpu value=42"), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ + { Name: FieldKey, Value: []byte("value"), }, - Result{ + { Name: FieldFloat, Value: []byte("42"), }, - Result{ + { Name: Timestamp, Value: []byte("1516241192000000000"), }, - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ + { Name: FieldKey, Value: []byte("value"), }, - Result{ + { Name: FieldFloat, Value: []byte("42"), }, @@ -1056,15 +1056,15 @@ var tests = []struct { name: "leading newline", input: []byte("\ncpu value=42"), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ + { Name: FieldKey, Value: []byte("value"), }, - Result{ + { Name: FieldFloat, Value: []byte("42"), }, @@ -1074,11 +1074,11 @@ var tests = []struct { name: "invalid missing field value", input: []byte("cpu value="), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ + { err: ErrFieldParse, }, }, @@ -1087,11 +1087,11 @@ var tests = []struct { name: "invalid eof field key", input: []byte("cpu value"), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ + { err: ErrFieldParse, }, }, @@ -1100,7 +1100,7 @@ var tests = []struct { name: "invalid measurement only", input: []byte("cpu"), results: []Result{ - Result{ + { err: ErrFieldParse, }, }, @@ -1109,7 +1109,7 @@ var tests = []struct { name: "invalid measurement only eol", input: []byte("cpu\n"), results: []Result{ - Result{ + { err: ErrFieldParse, }, }, @@ -1118,11 +1118,11 @@ var tests = []struct { name: "invalid missing tag", input: []byte("cpu, value=42"), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ + { err: ErrTagParse, }, }, @@ -1131,19 +1131,19 @@ var tests = []struct { name: "invalid missing field", input: []byte("cpu,x=y "), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ + { Name: TagKey, Value: []byte("x"), }, - Result{ + { Name: TagValue, Value: []byte("y"), }, - Result{ + { err: ErrFieldParse, }, }, @@ -1152,19 +1152,19 @@ var tests = []struct { name: "invalid too many fields", input: []byte("cpu value=42 value=43"), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ + { Name: FieldKey, Value: []byte("value"), }, - Result{ + { Name: FieldFloat, Value: []byte("42"), }, - Result{ + { err: ErrTimestampParse, }, }, @@ -1173,19 +1173,19 @@ var tests = []struct { name: "invalid timestamp too long", input: []byte("cpu value=42 12345678901234567890"), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ + { Name: FieldKey, Value: []byte("value"), }, - Result{ + { Name: FieldFloat, Value: []byte("42"), }, - Result{ + { err: ErrTimestampParse, }, }, @@ -1194,11 +1194,11 @@ var tests = []struct { name: "invalid open string field", input: []byte(`cpu value="42 12345678901234567890`), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ + { err: ErrFieldParse, }, }, @@ -1207,14 +1207,14 @@ var tests = []struct { name: "invalid newline in string field", input: []byte("cpu value=\"4\n2\""), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ + { err: ErrFieldParse, }, - Result{ + { err: ErrFieldParse, }, }, @@ -1223,11 +1223,11 @@ var tests = []struct { name: "invalid field value", input: []byte(`cpu value=howdy`), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ + { err: ErrFieldParse, }, }, @@ -1236,19 +1236,19 @@ var tests = []struct { name: "invalid quoted timestamp", input: []byte(`cpu value=42 "12345678901234567890"`), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ + { Name: FieldKey, Value: []byte("value"), }, - Result{ + { Name: FieldFloat, Value: []byte("42"), }, - Result{ + { err: ErrTimestampParse, }, }, @@ -1257,15 +1257,15 @@ var tests = []struct { name: "commented line", input: []byte("# blah blah\ncpu value=42"), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ + { Name: FieldKey, Value: []byte("value"), }, - Result{ + { Name: FieldFloat, Value: []byte("42"), }, @@ -1275,15 +1275,15 @@ var tests = []struct { name: "end with comment", input: []byte("cpu value=42\n# blah blah"), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ + { Name: FieldKey, Value: []byte("value"), }, - Result{ + { Name: FieldFloat, Value: []byte("42"), }, @@ -1293,15 +1293,15 @@ var tests = []struct { name: "end with comment and whitespace", input: []byte("cpu value=42\n# blah blah\n\n "), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ + { Name: FieldKey, Value: []byte("value"), }, - Result{ + { Name: FieldFloat, Value: []byte("42"), }, @@ -1311,15 +1311,15 @@ var tests = []struct { name: "unicode", input: []byte("cpu ☺=42"), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ + { Name: FieldKey, Value: []byte("☺"), }, - Result{ + { Name: FieldFloat, Value: []byte("42"), }, @@ -1407,7 +1407,7 @@ func TestSeriesMachine(t *testing.T) { name: "no tags", input: []byte("cpu"), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, @@ -1417,23 +1417,23 @@ func TestSeriesMachine(t *testing.T) { name: "tags", input: []byte("cpu,a=x,b=y"), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ + { Name: TagKey, Value: []byte("a"), }, - Result{ + { Name: TagValue, Value: []byte("x"), }, - Result{ + { Name: TagKey, Value: []byte("b"), }, - Result{ + { Name: TagValue, Value: []byte("y"), }, diff --git a/plugins/processors/strings/strings_test.go b/plugins/processors/strings/strings_test.go index a4a16d7dd..f1bf93419 100644 --- a/plugins/processors/strings/strings_test.go +++ b/plugins/processors/strings/strings_test.go @@ -53,7 +53,7 @@ func TestFieldConversions(t *testing.T) { name: "Should change existing field to lowercase", plugin: &Strings{ Lowercase: []converter{ - converter{ + { Field: "request", }, }, @@ -68,7 +68,7 @@ func TestFieldConversions(t *testing.T) { name: "Should change existing field to uppercase", plugin: &Strings{ Uppercase: []converter{ - converter{ + { Field: "request", }, }, @@ -83,7 +83,7 @@ func TestFieldConversions(t *testing.T) { name: "Should add new lowercase field", plugin: &Strings{ Lowercase: []converter{ - converter{ + { Field: "request", Dest: "lowercase_request", }, @@ -103,7 +103,7 @@ func TestFieldConversions(t *testing.T) { name: "Should trim from both sides", plugin: &Strings{ Trim: []converter{ - converter{ + { Field: "request", Cutset: "/w", }, @@ -119,13 +119,13 @@ func TestFieldConversions(t *testing.T) { name: "Should trim from both sides and make lowercase", plugin: &Strings{ Trim: []converter{ - converter{ + { Field: "request", Cutset: "/w", }, }, Lowercase: []converter{ - converter{ + { Field: "request", }, }, @@ -140,7 +140,7 @@ func TestFieldConversions(t *testing.T) { name: "Should trim from left side", plugin: &Strings{ TrimLeft: []converter{ - converter{ + { Field: "request", Cutset: "/w", }, @@ -156,7 +156,7 @@ func TestFieldConversions(t *testing.T) { name: "Should trim from right side", plugin: &Strings{ TrimRight: []converter{ - converter{ + { Field: "request", Cutset: "/w", }, @@ -172,7 +172,7 @@ func TestFieldConversions(t *testing.T) { name: "Should trim prefix '/mixed'", plugin: &Strings{ TrimPrefix: []converter{ - converter{ + { Field: "request", Prefix: "/mixed", }, @@ -188,7 +188,7 @@ func TestFieldConversions(t *testing.T) { name: "Should trim suffix '-1D&to=now'", plugin: &Strings{ TrimSuffix: []converter{ - converter{ + { Field: "request", Suffix: "-1D&to=now", }, @@ -204,7 +204,7 @@ func TestFieldConversions(t *testing.T) { name: "Trim without cutset removes whitespace", plugin: &Strings{ Trim: []converter{ - converter{ + { Field: "whitespace", }, }, @@ -219,7 +219,7 @@ func TestFieldConversions(t *testing.T) { name: "Trim left without cutset removes whitespace", plugin: &Strings{ TrimLeft: []converter{ - converter{ + { Field: "whitespace", }, }, @@ -234,7 +234,7 @@ func TestFieldConversions(t *testing.T) { name: "Trim right without cutset removes whitespace", plugin: &Strings{ TrimRight: []converter{ - converter{ + { Field: "whitespace", }, }, @@ -249,7 +249,7 @@ func TestFieldConversions(t *testing.T) { name: "No change if field missing", plugin: &Strings{ Lowercase: []converter{ - converter{ + { Field: "xyzzy", Suffix: "-1D&to=now", }, @@ -281,7 +281,7 @@ func TestTagConversions(t *testing.T) { name: "Should change existing tag to lowercase", plugin: &Strings{ Lowercase: []converter{ - converter{ + { Tag: "s-computername", }, }, @@ -300,7 +300,7 @@ func TestTagConversions(t *testing.T) { name: "Should add new lowercase tag", plugin: &Strings{ Lowercase: []converter{ - converter{ + { Tag: "s-computername", Dest: "s-computername_lowercase", }, @@ -324,7 +324,7 @@ func TestTagConversions(t *testing.T) { name: "Should add new uppercase tag", plugin: &Strings{ Uppercase: []converter{ - converter{ + { Tag: "s-computername", Dest: "s-computername_uppercase", }, @@ -365,7 +365,7 @@ func TestMeasurementConversions(t *testing.T) { name: "lowercase measurement", plugin: &Strings{ Lowercase: []converter{ - converter{ + { Measurement: "IIS_log", }, }, @@ -388,19 +388,19 @@ func TestMeasurementConversions(t *testing.T) { func TestMultipleConversions(t *testing.T) { plugin := &Strings{ Lowercase: []converter{ - converter{ + { Tag: "s-computername", }, - converter{ + { Field: "request", }, - converter{ + { Field: "cs-host", Dest: "cs-host_lowercase", }, }, Uppercase: []converter{ - converter{ + { Tag: "verb", }, }, @@ -428,18 +428,18 @@ func TestMultipleConversions(t *testing.T) { func TestReadmeExample(t *testing.T) { plugin := &Strings{ Lowercase: []converter{ - converter{ + { Tag: "uri_stem", }, }, TrimPrefix: []converter{ - converter{ + { Tag: "uri_stem", Prefix: "/api/", }, }, Uppercase: []converter{ - converter{ + { Field: "cs-host", Dest: "cs-host_normalised", }, @@ -492,7 +492,7 @@ func newMetric(name string) telegraf.Metric { func TestMeasurementReplace(t *testing.T) { plugin := &Strings{ Replace: []converter{ - converter{ + { Old: "_", New: "-", Measurement: "*", @@ -513,7 +513,7 @@ func TestMeasurementReplace(t *testing.T) { func TestMeasurementCharDeletion(t *testing.T) { plugin := &Strings{ Replace: []converter{ - converter{ + { Old: "foo", New: "", Measurement: "*", diff --git a/plugins/processors/topk/topk.go b/plugins/processors/topk/topk.go index ba5a0c783..36283482b 100644 --- a/plugins/processors/topk/topk.go +++ b/plugins/processors/topk/topk.go @@ -405,7 +405,7 @@ func (t *TopK) getAggregationFunction(aggOperation string) (func([]telegraf.Metr } // Divide by the number of recorded measurements collected for every field noMeasurementsFound := true // Canary to check if no field with values was found, so we can return nil - for k, _ := range mean { + for k := range mean { if meanCounters[k] == 0 { mean[k] = 0 continue diff --git a/plugins/processors/topk/topk_test.go b/plugins/processors/topk/topk_test.go index 2f5844448..67d80cbf9 100644 --- a/plugins/processors/topk/topk_test.go +++ b/plugins/processors/topk/topk_test.go @@ -178,11 +178,11 @@ func TestTopkMeanAddAggregateFields(t *testing.T) { // Generate the answer chng := fieldList(field{"a_topk_aggregate", float64(28.044)}) changeSet := map[int]metricChange{ - 0: metricChange{newFields: chng}, - 1: metricChange{newFields: chng}, - 2: metricChange{newFields: chng}, - 3: metricChange{newFields: chng}, - 4: metricChange{newFields: chng}, + 0: {newFields: chng}, + 1: {newFields: chng}, + 2: {newFields: chng}, + 3: {newFields: chng}, + 4: {newFields: chng}, } answer := generateAns(input, changeSet) @@ -208,11 +208,11 @@ func TestTopkSumAddAggregateFields(t *testing.T) { // Generate the answer chng := fieldList(field{"a_topk_aggregate", float64(140.22)}) changeSet := map[int]metricChange{ - 0: metricChange{newFields: chng}, - 1: metricChange{newFields: chng}, - 2: metricChange{newFields: chng}, - 3: metricChange{newFields: chng}, - 4: metricChange{newFields: chng}, + 0: {newFields: chng}, + 1: {newFields: chng}, + 2: {newFields: chng}, + 3: {newFields: chng}, + 4: {newFields: chng}, } answer := generateAns(input, changeSet) @@ -238,11 +238,11 @@ func TestTopkMaxAddAggregateFields(t *testing.T) { // Generate the answer chng := fieldList(field{"a_topk_aggregate", float64(50.5)}) changeSet := map[int]metricChange{ - 0: metricChange{newFields: chng}, - 1: metricChange{newFields: chng}, - 2: metricChange{newFields: chng}, - 3: metricChange{newFields: chng}, - 4: metricChange{newFields: chng}, + 0: {newFields: chng}, + 1: {newFields: chng}, + 2: {newFields: chng}, + 3: {newFields: chng}, + 4: {newFields: chng}, } answer := generateAns(input, changeSet) @@ -268,11 +268,11 @@ func TestTopkMinAddAggregateFields(t *testing.T) { // Generate the answer chng := fieldList(field{"a_topk_aggregate", float64(0.3)}) changeSet := map[int]metricChange{ - 0: metricChange{newFields: chng}, - 1: metricChange{newFields: chng}, - 2: metricChange{newFields: chng}, - 3: metricChange{newFields: chng}, - 4: metricChange{newFields: chng}, + 0: {newFields: chng}, + 1: {newFields: chng}, + 2: {newFields: chng}, + 3: {newFields: chng}, + 4: {newFields: chng}, } answer := generateAns(input, changeSet) @@ -297,10 +297,10 @@ func TestTopkGroupby1(t *testing.T) { // Generate the answer changeSet := map[int]metricChange{ - 2: metricChange{newFields: fieldList(field{"value_topk_aggregate", float64(74.18)})}, - 3: metricChange{newFields: fieldList(field{"value_topk_aggregate", float64(72)})}, - 4: metricChange{newFields: fieldList(field{"value_topk_aggregate", float64(163.22)})}, - 5: metricChange{newFields: fieldList(field{"value_topk_aggregate", float64(163.22)})}, + 2: {newFields: fieldList(field{"value_topk_aggregate", float64(74.18)})}, + 3: {newFields: fieldList(field{"value_topk_aggregate", float64(72)})}, + 4: {newFields: fieldList(field{"value_topk_aggregate", float64(163.22)})}, + 5: {newFields: fieldList(field{"value_topk_aggregate", float64(163.22)})}, } answer := generateAns(input, changeSet) @@ -326,11 +326,11 @@ func TestTopkGroupby2(t *testing.T) { chng2 := fieldList(field{"value_topk_aggregate", float64(72)}) chng3 := fieldList(field{"value_topk_aggregate", float64(81.61)}) changeSet := map[int]metricChange{ - 1: metricChange{newFields: chng1}, - 2: metricChange{newFields: chng1}, - 3: metricChange{newFields: chng2}, - 4: metricChange{newFields: chng3}, - 5: metricChange{newFields: chng3}, + 1: {newFields: chng1}, + 2: {newFields: chng1}, + 3: {newFields: chng2}, + 4: {newFields: chng3}, + 5: {newFields: chng3}, } answer := generateAns(input, changeSet) @@ -354,8 +354,8 @@ func TestTopkGroupby3(t *testing.T) { // Generate the answer chng := fieldList(field{"value_topk_aggregate", float64(75.3)}) changeSet := map[int]metricChange{ - 4: metricChange{newFields: chng}, - 5: metricChange{newFields: chng}, + 4: {newFields: chng}, + 5: {newFields: chng}, } answer := generateAns(input, changeSet) @@ -381,10 +381,10 @@ func TestTopkGroupbyFields1(t *testing.T) { // Generate the answer changeSet := map[int]metricChange{ - 0: metricChange{newFields: fieldList(field{"A_topk_aggregate", float64(95.36)})}, - 1: metricChange{newFields: fieldList(field{"A_topk_aggregate", float64(39.01)})}, - 2: metricChange{newFields: fieldList(field{"A_topk_aggregate", float64(39.01)})}, - 5: metricChange{newFields: fieldList(field{"A_topk_aggregate", float64(29.45)})}, + 0: {newFields: fieldList(field{"A_topk_aggregate", float64(95.36)})}, + 1: {newFields: fieldList(field{"A_topk_aggregate", float64(39.01)})}, + 2: {newFields: fieldList(field{"A_topk_aggregate", float64(39.01)})}, + 5: {newFields: fieldList(field{"A_topk_aggregate", float64(29.45)})}, } answer := generateAns(input, changeSet) @@ -409,10 +409,10 @@ func TestTopkGroupbyFields2(t *testing.T) { // Generate the answer changeSet := map[int]metricChange{ - 0: metricChange{newFields: fieldList(field{"C_topk_aggregate", float64(72.41)})}, - 2: metricChange{newFields: fieldList(field{"B_topk_aggregate", float64(60.96)})}, - 4: metricChange{newFields: fieldList(field{"B_topk_aggregate", float64(81.55)}, field{"C_topk_aggregate", float64(49.96)})}, - 5: metricChange{newFields: fieldList(field{"C_topk_aggregate", float64(49.96)})}, + 0: {newFields: fieldList(field{"C_topk_aggregate", float64(72.41)})}, + 2: {newFields: fieldList(field{"B_topk_aggregate", float64(60.96)})}, + 4: {newFields: fieldList(field{"B_topk_aggregate", float64(81.55)}, field{"C_topk_aggregate", float64(49.96)})}, + 5: {newFields: fieldList(field{"C_topk_aggregate", float64(49.96)})}, } answer := generateAns(input, changeSet) @@ -438,9 +438,9 @@ func TestTopkGroupbyMetricName1(t *testing.T) { // Generate the answer chng := fieldList(field{"value_topk_aggregate", float64(235.22000000000003)}) changeSet := map[int]metricChange{ - 3: metricChange{newFields: chng}, - 4: metricChange{newFields: chng}, - 5: metricChange{newFields: chng}, + 3: {newFields: chng}, + 4: {newFields: chng}, + 5: {newFields: chng}, } answer := generateAns(input, changeSet) @@ -465,10 +465,10 @@ func TestTopkGroupbyMetricName2(t *testing.T) { // Generate the answer changeSet := map[int]metricChange{ - 0: metricChange{newFields: fieldList(field{"A_topk_aggregate", float64(95.36)})}, - 1: metricChange{newFields: fieldList(field{"A_topk_aggregate", float64(78.02)}, field{"value_topk_aggregate", float64(133.61)})}, - 2: metricChange{newFields: fieldList(field{"A_topk_aggregate", float64(78.02)}, field{"value_topk_aggregate", float64(133.61)})}, - 4: metricChange{newFields: fieldList(field{"value_topk_aggregate", float64(87.92)})}, + 0: {newFields: fieldList(field{"A_topk_aggregate", float64(95.36)})}, + 1: {newFields: fieldList(field{"A_topk_aggregate", float64(78.02)}, field{"value_topk_aggregate", float64(133.61)})}, + 2: {newFields: fieldList(field{"A_topk_aggregate", float64(78.02)}, field{"value_topk_aggregate", float64(133.61)})}, + 4: {newFields: fieldList(field{"value_topk_aggregate", float64(87.92)})}, } answer := generateAns(input, changeSet) @@ -493,9 +493,9 @@ func TestTopkBottomk(t *testing.T) { // Generate the answer changeSet := map[int]metricChange{ - 0: metricChange{}, - 1: metricChange{}, - 3: metricChange{}, + 0: {}, + 1: {}, + 3: {}, } answer := generateAns(input, changeSet) @@ -520,10 +520,10 @@ func TestTopkGroupByKeyTag(t *testing.T) { // Generate the answer changeSet := map[int]metricChange{ - 2: metricChange{newTags: tagList(tag{"gbt", "metric1&tag1=TWO&tag3=SIX&"})}, - 3: metricChange{newTags: tagList(tag{"gbt", "metric2&tag1=ONE&tag3=THREE&"})}, - 4: metricChange{newTags: tagList(tag{"gbt", "metric2&tag1=TWO&tag3=SEVEN&"})}, - 5: metricChange{newTags: tagList(tag{"gbt", "metric2&tag1=TWO&tag3=SEVEN&"})}, + 2: {newTags: tagList(tag{"gbt", "metric1&tag1=TWO&tag3=SIX&"})}, + 3: {newTags: tagList(tag{"gbt", "metric2&tag1=ONE&tag3=THREE&"})}, + 4: {newTags: tagList(tag{"gbt", "metric2&tag1=TWO&tag3=SEVEN&"})}, + 5: {newTags: tagList(tag{"gbt", "metric2&tag1=TWO&tag3=SEVEN&"})}, } answer := generateAns(input, changeSet) diff --git a/testutil/accumulator.go b/testutil/accumulator.go index 8784cc1db..d4a4bebd8 100644 --- a/testutil/accumulator.go +++ b/testutil/accumulator.go @@ -237,7 +237,7 @@ func (a *Accumulator) NFields() int { defer a.Unlock() counter := 0 for _, pt := range a.Metrics { - for _, _ = range pt.Fields { + for range pt.Fields { counter++ } } From 2f7450ec042f2c3379afefb7344f3e5b68c37575 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 22 Oct 2018 12:41:37 -0700 Subject: [PATCH 0307/1815] Document units of filestat modification time --- plugins/inputs/filestat/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/inputs/filestat/README.md b/plugins/inputs/filestat/README.md index 79eec6c71..3102c13b0 100644 --- a/plugins/inputs/filestat/README.md +++ b/plugins/inputs/filestat/README.md @@ -20,7 +20,7 @@ The filestat plugin gathers metrics about file existence, size, and other stats. - filestat - exists (int, 0 | 1) - size_bytes (int, bytes) - - modification_time (int, unixtime) + - modification_time (int, unix time nanoseconds) - md5 (optional, string) ### Tags: From ff98ad710bb718b26a55e7862f2d7fb70b14a1f7 Mon Sep 17 00:00:00 2001 From: Bugagazavr Date: Mon, 22 Oct 2018 22:54:50 +0300 Subject: [PATCH 0308/1815] Add Nginx Plus API input (#4837) --- plugins/inputs/all/all.go | 1 + plugins/inputs/nginx_plus_api/README.md | 216 ++++ .../inputs/nginx_plus_api/nginx_plus_api.go | 115 ++ .../nginx_plus_api/nginx_plus_api_metrics.go | 454 +++++++ .../nginx_plus_api_metrics_test.go | 1075 +++++++++++++++++ .../nginx_plus_api/nginx_plus_api_types.go | 133 ++ 6 files changed, 1994 insertions(+) create mode 100644 plugins/inputs/nginx_plus_api/README.md create mode 100644 plugins/inputs/nginx_plus_api/nginx_plus_api.go create mode 100644 plugins/inputs/nginx_plus_api/nginx_plus_api_metrics.go create mode 100644 plugins/inputs/nginx_plus_api/nginx_plus_api_metrics_test.go create mode 100644 plugins/inputs/nginx_plus_api/nginx_plus_api_types.go diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index 5840893e9..1d666947a 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -80,6 +80,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/net_response" _ "github.com/influxdata/telegraf/plugins/inputs/nginx" _ "github.com/influxdata/telegraf/plugins/inputs/nginx_plus" + _ "github.com/influxdata/telegraf/plugins/inputs/nginx_plus_api" _ "github.com/influxdata/telegraf/plugins/inputs/nsq" _ "github.com/influxdata/telegraf/plugins/inputs/nsq_consumer" _ "github.com/influxdata/telegraf/plugins/inputs/nstat" diff --git a/plugins/inputs/nginx_plus_api/README.md b/plugins/inputs/nginx_plus_api/README.md new file mode 100644 index 000000000..e90645e43 --- /dev/null +++ b/plugins/inputs/nginx_plus_api/README.md @@ -0,0 +1,216 @@ +# Telegraf Plugin: nginx_plus_api + +Nginx Plus is a commercial version of the open source web server Nginx. The use this plugin you will need a license. For more information about the differences between Nginx (F/OSS) and Nginx Plus, [click here](https://www.nginx.com/blog/whats-difference-nginx-foss-nginx-plus/). + +### Configuration: + +``` +# Read Nginx Plus API advanced status information +[[inputs.nginx_plus_api]] + ## An array of Nginx API URIs to gather stats. + urls = ["http://localhost/api"] + # Nginx API version, default: 3 + # api_version = 3 +``` + +### Migration from Nginx Plus (Status) input plugin + +| Nginx Plus | Nginx Plus API | +|---------------------------------|--------------------------------------| +| nginx_plus_processes | nginx_plus_api_processes | +| nginx_plus_connections | nginx_plus_api_connections | +| nginx_plus_ssl | nginx_plus_api_ssl | +| nginx_plus_requests | nginx_plus_api_http_requests | +| nginx_plus_zone | nginx_plus_api_http_server_zones | +| nginx_plus_upstream | nginx_plus_api_http_upstreams | +| nginx_plus_upstream_peer | nginx_plus_api_http_upstream_peers | +| nginx_plus_cache | nginx_plus_api_http_caches | +| nginx_plus_stream_upstream | nginx_plus_api_stream_upstreams | +| nginx_plus_stream_upstream_peer | nginx_plus_api_stream_upstream_peers | +| nginx.stream.zone | nginx_plus_api_stream_server_zones | + +### Measurements & Fields: + +- nginx_plus_api_processes + - respawned +- nginx_plus_api_connections + - accepted + - dropped + - active + - idle +- nginx_plus_api_ssl + - handshakes + - handshakes_failed + - session_reuses +- nginx_plus_api_http_requests + - total + - current +- nginx_plus_api_http_server_zones + - processing + - requests + - responses_1xx + - responses_2xx + - responses_3xx + - responses_4xx + - responses_5xx + - responses_total + - received + - sent + - discarded +- nginx_plus_api_http_upstreams + - keepalive + - zombies +- nginx_plus_api_http_upstream_peers + - requests + - unavail + - healthchecks_checks + - header_time + - state + - response_time + - active + - healthchecks_last_passed + - weight + - responses_1xx + - responses_2xx + - responses_3xx + - responses_4xx + - responses_5xx + - received + - healthchecks_fails + - healthchecks_unhealthy + - backup + - responses_total + - sent + - fails + - downtime +- nginx_plus_api_http_caches + - size + - max_size + - cold + - hit_responses + - hit_bytes + - stale_responses + - stale_bytes + - updating_responses + - updating_bytes + - revalidated_responses + - revalidated_bytes + - miss_responses + - miss_bytes + - miss_responses_written + - miss_bytes_written + - expired_responses + - expired_bytes + - expired_responses_written + - expired_bytes_written + - bypass_responses + - bypass_bytes + - bypass_responses_written + - bypass_bytes_written +- nginx_plus_api_stream_upstreams + - zombies +- nginx_plus_api_stream_upstream_peers + - unavail + - healthchecks_checks + - healthchecks_fails + - healthchecks_unhealthy + - healthchecks_last_passed + - response_time + - state + - active + - weight + - received + - backup + - sent + - fails + - downtime +- nginx_plus_api_stream_server_zones + - processing + - connections + - received + - sent + + +### Tags: + +- nginx_plus_api_processes, nginx_plus_api_connections, nginx_plus_api_ssl, nginx_plus_api_http_requests + - source + - port + +- nginx_plus_api_http_upstreams, nginx_plus_api_stream_upstreams + - upstream + - source + - port + +- nginx_plus_api_http_server_zones, nginx_plus_api_upstream_server_zones + - source + - port + - zone + +- nginx_plus_api_upstream_peers, nginx_plus_api_stream_upstream_peers + - id + - upstream + - source + - port + - upstream_address + +- nginx_plus_api_http_caches + - source + - port + +### Example Output: + +Using this configuration: +``` +[[inputs.nginx_plus_api]] + ## An array of Nginx Plus API URIs to gather stats. + urls = ["http://localhost/api"] +``` + +When run with: +``` +./telegraf -config telegraf.conf -input-filter nginx_plus_api -test +``` + +It produces: +``` +> nginx_plus_api_processes,host=localhost,port=80,source=localhost respawned=0i 1539163505000000000 +> nginx_plus_api_connections,host=localhost,port=80,source=localhost accepted=120890747i,active=6i,dropped=0i,idle=67i 1539163505000000000 +> nginx_plus_api_ssl,host=localhost,port=80,source=localhost handshakes=2983938i,handshakes_failed=54350i,session_reuses=2485267i 1539163506000000000 +> nginx_plus_api_http_requests,host=localhost,port=80,source=localhost current=12i,total=175270198i 1539163506000000000 +> nginx_plus_api_http_server_zones,host=localhost,port=80,source=localhost,zone=hg.nginx.org discarded=45i,processing=0i,received=35723884i,requests=134102i,responses_1xx=0i,responses_2xx=96890i,responses_3xx=6892i,responses_4xx=30270i,responses_5xx=5i,responses_total=134057i,sent=3681826618i 1539163506000000000 +> nginx_plus_api_http_server_zones,host=localhost,port=80,source=localhost,zone=trac.nginx.org discarded=4034i,processing=9i,received=282399663i,requests=336129i,responses_1xx=0i,responses_2xx=101264i,responses_3xx=25454i,responses_4xx=68961i,responses_5xx=136407i,responses_total=332086i,sent=2346677493i 1539163506000000000 +> nginx_plus_api_http_server_zones,host=localhost,port=80,source=localhost,zone=lxr.nginx.org discarded=4i,processing=1i,received=7223569i,requests=29661i,responses_1xx=0i,responses_2xx=28584i,responses_3xx=73i,responses_4xx=390i,responses_5xx=609i,responses_total=29656i,sent=5811238975i 1539163506000000000 +> nginx_plus_api_http_upstreams,host=localhost,port=80,source=localhost,upstream=trac-backend keepalive=0i,zombies=0i 1539163506000000000 +> nginx_plus_api_http_upstream_peers,host=localhost,id=0,port=80,source=localhost,upstream=trac-backend,upstream_address=10.0.0.1:8080 active=0i,backup=false,downtime=53870i,fails=5i,header_time=421i,healthchecks_checks=17275i,healthchecks_fails=0i,healthchecks_last_passed=true,healthchecks_unhealthy=0i,received=1885213684i,requests=88476i,response_time=423i,responses_1xx=0i,responses_2xx=50997i,responses_3xx=205i,responses_4xx=34344i,responses_5xx=2076i,responses_total=87622i,sent=189938404i,state="up",unavail=5i,weight=1i 1539163506000000000 +> nginx_plus_api_http_upstream_peers,host=localhost,id=1,port=80,source=localhost,upstream=trac-backend,upstream_address=10.0.0.1:8081 active=0i,backup=true,downtime=173957231i,fails=0i,healthchecks_checks=17394i,healthchecks_fails=17394i,healthchecks_last_passed=false,healthchecks_unhealthy=1i,received=0i,requests=0i,responses_1xx=0i,responses_2xx=0i,responses_3xx=0i,responses_4xx=0i,responses_5xx=0i,responses_total=0i,sent=0i,state="unhealthy",unavail=0i,weight=1i 1539163506000000000 +> nginx_plus_api_http_upstreams,host=localhost,port=80,source=localhost,upstream=hg-backend keepalive=0i,zombies=0i 1539163506000000000 +> nginx_plus_api_http_upstream_peers,host=localhost,id=0,port=80,source=localhost,upstream=hg-backend,upstream_address=10.0.0.1:8088 active=0i,backup=false,downtime=0i,fails=0i,header_time=22i,healthchecks_checks=17319i,healthchecks_fails=0i,healthchecks_last_passed=true,healthchecks_unhealthy=0i,received=3724240605i,requests=89563i,response_time=44i,responses_1xx=0i,responses_2xx=81996i,responses_3xx=6886i,responses_4xx=639i,responses_5xx=5i,responses_total=89526i,sent=31597952i,state="up",unavail=0i,weight=5i 1539163506000000000 +> nginx_plus_api_http_upstream_peers,host=localhost,id=1,port=80,source=localhost,upstream=hg-backend,upstream_address=10.0.0.1:8089 active=0i,backup=true,downtime=173957231i,fails=0i,healthchecks_checks=17394i,healthchecks_fails=17394i,healthchecks_last_passed=false,healthchecks_unhealthy=1i,received=0i,requests=0i,responses_1xx=0i,responses_2xx=0i,responses_3xx=0i,responses_4xx=0i,responses_5xx=0i,responses_total=0i,sent=0i,state="unhealthy",unavail=0i,weight=1i 1539163506000000000 +> nginx_plus_api_http_upstreams,host=localhost,port=80,source=localhost,upstream=lxr-backend keepalive=0i,zombies=0i 1539163506000000000 +> nginx_plus_api_http_upstream_peers,host=localhost,id=0,port=80,source=localhost,upstream=lxr-backend,upstream_address=unix:/tmp/cgi.sock active=0i,backup=false,downtime=0i,fails=609i,header_time=111i,healthchecks_checks=0i,healthchecks_fails=0i,healthchecks_unhealthy=0i,received=6220215064i,requests=28278i,response_time=172i,responses_1xx=0i,responses_2xx=27665i,responses_3xx=0i,responses_4xx=0i,responses_5xx=0i,responses_total=27665i,sent=21337016i,state="up",unavail=0i,weight=1i 1539163506000000000 +> nginx_plus_api_http_upstream_peers,host=localhost,id=1,port=80,source=localhost,upstream=lxr-backend,upstream_address=unix:/tmp/cgib.sock active=0i,backup=true,downtime=0i,fails=0i,healthchecks_checks=0i,healthchecks_fails=0i,healthchecks_unhealthy=0i,max_conns=42i,received=0i,requests=0i,responses_1xx=0i,responses_2xx=0i,responses_3xx=0i,responses_4xx=0i,responses_5xx=0i,responses_total=0i,sent=0i,state="up",unavail=0i,weight=1i 1539163506000000000 +> nginx_plus_api_http_upstreams,host=localhost,port=80,source=localhost,upstream=demo-backend keepalive=0i,zombies=0i 1539163506000000000 +> nginx_plus_api_http_upstream_peers,host=localhost,id=0,port=80,source=localhost,upstream=demo-backend,upstream_address=10.0.0.2:15431 active=0i,backup=false,downtime=0i,fails=0i,healthchecks_checks=173640i,healthchecks_fails=0i,healthchecks_last_passed=true,healthchecks_unhealthy=0i,received=0i,requests=0i,responses_1xx=0i,responses_2xx=0i,responses_3xx=0i,responses_4xx=0i,responses_5xx=0i,responses_total=0i,sent=0i,state="up",unavail=0i,weight=1i 1539163506000000000 +> nginx_plus_api_http_caches,cache=http_cache,host=localhost,port=80,source=localhost bypass_bytes=0i,bypass_bytes_written=0i,bypass_responses=0i,bypass_responses_written=0i,cold=false,expired_bytes=133671410i,expired_bytes_written=129210272i,expired_responses=15721i,expired_responses_written=15213i,hit_bytes=2459840828i,hit_responses=231195i,max_size=536870912i,miss_bytes=18742246i,miss_bytes_written=85199i,miss_responses=2816i,miss_responses_written=69i,revalidated_bytes=0i,revalidated_responses=0i,size=774144i,stale_bytes=0i,stale_responses=0i,updating_bytes=0i,updating_responses=0i 1539163506000000000 +> nginx_plus_api_stream_server_zones,host=localhost,port=80,source=localhost,zone=postgresql_loadbalancer connections=173639i,processing=0i,received=17884817i,sent=33685966i 1539163506000000000 +> nginx_plus_api_stream_server_zones,host=localhost,port=80,source=localhost,zone=dns_loadbalancer connections=97255i,processing=0i,received=2699082i,sent=16566552i 1539163506000000000 +> nginx_plus_api_stream_upstreams,host=localhost,port=80,source=localhost,upstream=postgresql_backends zombies=0i 1539163507000000000 +> nginx_plus_api_stream_upstream_peers,host=localhost,id=0,port=80,source=localhost,upstream=postgresql_backends,upstream_address=10.0.0.2:15432 active=0i,backup=false,connect_time=4i,connections=57880i,downtime=0i,fails=0i,first_byte_time=10i,healthchecks_checks=34781i,healthchecks_fails=0i,healthchecks_last_passed=true,healthchecks_unhealthy=0i,received=11228720i,response_time=10i,sent=5961640i,state="up",unavail=0i,weight=1i 1539163507000000000 +> nginx_plus_api_stream_upstream_peers,host=localhost,id=1,port=80,source=localhost,upstream=postgresql_backends,upstream_address=10.0.0.2:15433 active=0i,backup=false,connect_time=3i,connections=57880i,downtime=0i,fails=0i,first_byte_time=9i,healthchecks_checks=34781i,healthchecks_fails=0i,healthchecks_last_passed=true,healthchecks_unhealthy=0i,received=11228720i,response_time=10i,sent=5961640i,state="up",unavail=0i,weight=1i 1539163507000000000 +> nginx_plus_api_stream_upstream_peers,host=localhost,id=2,port=80,source=localhost,upstream=postgresql_backends,upstream_address=10.0.0.2:15434 active=0i,backup=false,connect_time=2i,connections=57879i,downtime=0i,fails=0i,first_byte_time=9i,healthchecks_checks=34781i,healthchecks_fails=0i,healthchecks_last_passed=true,healthchecks_unhealthy=0i,received=11228526i,response_time=9i,sent=5961537i,state="up",unavail=0i,weight=1i 1539163507000000000 +> nginx_plus_api_stream_upstream_peers,host=localhost,id=3,port=80,source=localhost,upstream=postgresql_backends,upstream_address=10.0.0.2:15435 active=0i,backup=false,connections=0i,downtime=0i,fails=0i,healthchecks_checks=0i,healthchecks_fails=0i,healthchecks_unhealthy=0i,received=0i,sent=0i,state="down",unavail=0i,weight=1i 1539163507000000000 +> nginx_plus_api_stream_upstreams,host=localhost,port=80,source=localhost,upstream=dns_udp_backends zombies=0i 1539163507000000000 +> nginx_plus_api_stream_upstream_peers,host=localhost,id=0,port=80,source=localhost,upstream=dns_udp_backends,upstream_address=10.0.0.5:53 active=0i,backup=false,connect_time=0i,connections=64837i,downtime=0i,fails=0i,first_byte_time=17i,healthchecks_checks=34761i,healthchecks_fails=0i,healthchecks_last_passed=true,healthchecks_unhealthy=0i,received=10996616i,response_time=17i,sent=1791693i,state="up",unavail=0i,weight=2i 1539163507000000000 +> nginx_plus_api_stream_upstream_peers,host=localhost,id=1,port=80,source=localhost,upstream=dns_udp_backends,upstream_address=10.0.0.2:53 active=0i,backup=false,connect_time=0i,connections=32418i,downtime=0i,fails=0i,first_byte_time=17i,healthchecks_checks=34761i,healthchecks_fails=0i,healthchecks_last_passed=true,healthchecks_unhealthy=0i,received=5569936i,response_time=17i,sent=907389i,state="up",unavail=0i,weight=1i 1539163507000000000 +> nginx_plus_api_stream_upstream_peers,host=localhost,id=2,port=80,source=localhost,upstream=dns_udp_backends,upstream_address=10.0.0.7:53 active=0i,backup=false,connections=0i,downtime=0i,fails=0i,healthchecks_checks=0i,healthchecks_fails=0i,healthchecks_unhealthy=0i,received=0i,sent=0i,state="down",unavail=0i,weight=1i 1539163507000000000 +> nginx_plus_api_stream_upstreams,host=localhost,port=80,source=localhost,upstream=unused_tcp_backends zombies=0i 1539163507000000000 +> nginx_plus_api_stream_upstream_peers,host=localhost,id=1,port=80,source=localhost,upstream=unused_tcp_backends,upstream_address=95.211.80.227:80 active=0i,backup=false,connections=0i,downtime=0i,fails=0i,healthchecks_checks=0i,healthchecks_fails=0i,healthchecks_unhealthy=0i,received=0i,sent=0i,state="down",unavail=0i,weight=1i 1539163507000000000 +> nginx_plus_api_stream_upstream_peers,host=localhost,id=2,port=80,source=localhost,upstream=unused_tcp_backends,upstream_address=206.251.255.63:80 active=0i,backup=false,connections=0i,downtime=0i,fails=0i,healthchecks_checks=0i,healthchecks_fails=0i,healthchecks_unhealthy=0i,received=0i,sent=0i,state="down",unavail=0i,weight=1i 1539163507000000000 +> nginx_plus_api_stream_upstream_peers,host=localhost,id=3,port=80,source=localhost,upstream=unused_tcp_backends,upstream_address=[2001:1af8:4060:a004:21::e3]:80 active=0i,backup=false,connections=0i,downtime=0i,fails=0i,healthchecks_checks=0i,healthchecks_fails=0i,healthchecks_unhealthy=0i,received=0i,sent=0i,state="down",unavail=0i,weight=1i 1539163507000000000 +> nginx_plus_api_stream_upstream_peers,host=localhost,id=4,port=80,source=localhost,upstream=unused_tcp_backends,upstream_address=[2606:7100:1:69::3f]:80 active=0i,backup=false,connections=0i,downtime=0i,fails=0i,healthchecks_checks=0i,healthchecks_fails=0i,healthchecks_unhealthy=0i,received=0i,sent=0i,state="down",unavail=0i,weight=1i 1539163507000000000 +``` + +### Reference material + +[api documentation](http://demo.nginx.com/swagger-ui/#/) diff --git a/plugins/inputs/nginx_plus_api/nginx_plus_api.go b/plugins/inputs/nginx_plus_api/nginx_plus_api.go new file mode 100644 index 000000000..d44f793f1 --- /dev/null +++ b/plugins/inputs/nginx_plus_api/nginx_plus_api.go @@ -0,0 +1,115 @@ +package nginx_plus_api + +import ( + "fmt" + "net/http" + "net/url" + "sync" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/inputs" +) + +type NginxPlusApi struct { + Urls []string + + ApiVersion int64 + + client *http.Client + + ResponseTimeout internal.Duration +} + +const ( + // Default settings + defaultApiVersion = 3 + + // Paths + processesPath = "processes" + connectionsPath = "connections" + sslPath = "ssl" + + httpRequestsPath = "http/requests" + httpServerZonesPath = "http/server_zones" + httpUpstreamsPath = "http/upstreams" + httpCachesPath = "http/caches" + + streamServerZonesPath = "stream/server_zones" + streamUpstreamsPath = "stream/upstreams" +) + +var sampleConfig = ` + ## An array of API URI to gather stats. + urls = ["http://localhost/api"] + + # Nginx API version, default: 3 + # api_version = 3 + + # HTTP response timeout (default: 5s) + response_timeout = "5s" +` + +func (n *NginxPlusApi) SampleConfig() string { + return sampleConfig +} + +func (n *NginxPlusApi) Description() string { + return "Read Nginx Plus Api documentation" +} + +func (n *NginxPlusApi) Gather(acc telegraf.Accumulator) error { + var wg sync.WaitGroup + + // Create an HTTP client that is re-used for each + // collection interval + + if n.ApiVersion == 0 { + n.ApiVersion = defaultApiVersion + } + + if n.client == nil { + client, err := n.createHttpClient() + if err != nil { + return err + } + n.client = client + } + + for _, u := range n.Urls { + addr, err := url.Parse(u) + if err != nil { + acc.AddError(fmt.Errorf("Unable to parse address '%s': %s", u, err)) + continue + } + + wg.Add(1) + go func(addr *url.URL) { + defer wg.Done() + n.gatherMetrics(addr, acc) + }(addr) + } + + wg.Wait() + return nil +} + +func (n *NginxPlusApi) createHttpClient() (*http.Client, error) { + if n.ResponseTimeout.Duration < time.Second { + n.ResponseTimeout.Duration = time.Second * 5 + } + + client := &http.Client{ + Transport: &http.Transport{}, + Timeout: n.ResponseTimeout.Duration, + } + + return client, nil +} + +func init() { + inputs.Add("nginx_plus_api", func() telegraf.Input { + return &NginxPlusApi{} + }) +} diff --git a/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics.go b/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics.go new file mode 100644 index 000000000..5583670e4 --- /dev/null +++ b/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics.go @@ -0,0 +1,454 @@ +package nginx_plus_api + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/influxdata/telegraf" +) + +func (n *NginxPlusApi) gatherMetrics(addr *url.URL, acc telegraf.Accumulator) { + acc.AddError(n.gatherProcessesMetrics(addr, acc)) + acc.AddError(n.gatherConnectionsMetrics(addr, acc)) + acc.AddError(n.gatherSslMetrics(addr, acc)) + acc.AddError(n.gatherHttpRequestsMetrics(addr, acc)) + acc.AddError(n.gatherHttpServerZonesMetrics(addr, acc)) + acc.AddError(n.gatherHttpUpstreamsMetrics(addr, acc)) + acc.AddError(n.gatherHttpCachesMetrics(addr, acc)) + acc.AddError(n.gatherStreamServerZonesMetrics(addr, acc)) + acc.AddError(n.gatherStreamUpstreamsMetrics(addr, acc)) +} + +func (n *NginxPlusApi) gatherUrl(addr *url.URL, path string) ([]byte, error) { + url := fmt.Sprintf("%s/%d/%s", addr.String(), n.ApiVersion, path) + resp, err := n.client.Get(url) + + if err != nil { + return nil, fmt.Errorf("error making HTTP request to %s: %s", addr.String(), err) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("%s returned HTTP status %s", addr.String(), resp.Status) + } + contentType := strings.Split(resp.Header.Get("Content-Type"), ";")[0] + switch contentType { + case "application/json": + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + return body, nil + default: + return nil, fmt.Errorf("%s returned unexpected content type %s", url, contentType) + } +} + +func (n *NginxPlusApi) gatherProcessesMetrics(addr *url.URL, acc telegraf.Accumulator) error { + body, err := n.gatherUrl(addr, processesPath) + if err != nil { + return err + } + + var processes = &Processes{} + + if err := json.Unmarshal(body, processes); err != nil { + return err + } + + acc.AddFields( + "nginx_plus_api_processes", + map[string]interface{}{ + "respawned": processes.Respawned, + }, + getTags(addr), + ) + + return nil +} + +func (n *NginxPlusApi) gatherConnectionsMetrics(addr *url.URL, acc telegraf.Accumulator) error { + body, err := n.gatherUrl(addr, connectionsPath) + if err != nil { + return err + } + + var connections = &Connections{} + + if err := json.Unmarshal(body, connections); err != nil { + return err + } + + acc.AddFields( + "nginx_plus_api_connections", + map[string]interface{}{ + "accepted": connections.Accepted, + "dropped": connections.Dropped, + "active": connections.Active, + "idle": connections.Idle, + }, + getTags(addr), + ) + + return nil +} + +func (n *NginxPlusApi) gatherSslMetrics(addr *url.URL, acc telegraf.Accumulator) error { + body, err := n.gatherUrl(addr, sslPath) + if err != nil { + return err + } + + var ssl = &Ssl{} + + if err := json.Unmarshal(body, ssl); err != nil { + return err + } + + acc.AddFields( + "nginx_plus_api_ssl", + map[string]interface{}{ + "handshakes": ssl.Handshakes, + "handshakes_failed": ssl.HandshakesFailed, + "session_reuses": ssl.SessionReuses, + }, + getTags(addr), + ) + + return nil +} + +func (n *NginxPlusApi) gatherHttpRequestsMetrics(addr *url.URL, acc telegraf.Accumulator) error { + body, err := n.gatherUrl(addr, httpRequestsPath) + if err != nil { + return err + } + + var httpRequests = &HttpRequests{} + + if err := json.Unmarshal(body, httpRequests); err != nil { + return err + } + + acc.AddFields( + "nginx_plus_api_http_requests", + map[string]interface{}{ + "total": httpRequests.Total, + "current": httpRequests.Current, + }, + getTags(addr), + ) + + return nil +} + +func (n *NginxPlusApi) gatherHttpServerZonesMetrics(addr *url.URL, acc telegraf.Accumulator) error { + body, err := n.gatherUrl(addr, httpServerZonesPath) + if err != nil { + return err + } + + var httpServerZones HttpServerZones + + if err := json.Unmarshal(body, &httpServerZones); err != nil { + return err + } + + tags := getTags(addr) + + for zoneName, zone := range httpServerZones { + zoneTags := map[string]string{} + for k, v := range tags { + zoneTags[k] = v + } + zoneTags["zone"] = zoneName + acc.AddFields( + "nginx_plus_api_http_server_zones", + func() map[string]interface{} { + result := map[string]interface{}{ + "processing": zone.Processing, + "requests": zone.Requests, + "responses_1xx": zone.Responses.Responses1xx, + "responses_2xx": zone.Responses.Responses2xx, + "responses_3xx": zone.Responses.Responses3xx, + "responses_4xx": zone.Responses.Responses4xx, + "responses_5xx": zone.Responses.Responses5xx, + "responses_total": zone.Responses.Total, + "received": zone.Received, + "sent": zone.Sent, + } + if zone.Discarded != nil { + result["discarded"] = *zone.Discarded + } + return result + }(), + zoneTags, + ) + } + + return nil +} + +func (n *NginxPlusApi) gatherHttpUpstreamsMetrics(addr *url.URL, acc telegraf.Accumulator) error { + body, err := n.gatherUrl(addr, httpUpstreamsPath) + if err != nil { + return err + } + + var httpUpstreams HttpUpstreams + + if err := json.Unmarshal(body, &httpUpstreams); err != nil { + return err + } + + tags := getTags(addr) + + for upstreamName, upstream := range httpUpstreams { + upstreamTags := map[string]string{} + for k, v := range tags { + upstreamTags[k] = v + } + upstreamTags["upstream"] = upstreamName + upstreamFields := map[string]interface{}{ + "keepalive": upstream.Keepalive, + "zombies": upstream.Zombies, + } + if upstream.Queue != nil { + upstreamFields["queue_size"] = upstream.Queue.Size + upstreamFields["queue_max_size"] = upstream.Queue.MaxSize + upstreamFields["queue_overflows"] = upstream.Queue.Overflows + } + acc.AddFields( + "nginx_plus_api_http_upstreams", + upstreamFields, + upstreamTags, + ) + for _, peer := range upstream.Peers { + peerFields := map[string]interface{}{ + "backup": peer.Backup, + "weight": peer.Weight, + "state": peer.State, + "active": peer.Active, + "requests": peer.Requests, + "responses_1xx": peer.Responses.Responses1xx, + "responses_2xx": peer.Responses.Responses2xx, + "responses_3xx": peer.Responses.Responses3xx, + "responses_4xx": peer.Responses.Responses4xx, + "responses_5xx": peer.Responses.Responses5xx, + "responses_total": peer.Responses.Total, + "sent": peer.Sent, + "received": peer.Received, + "fails": peer.Fails, + "unavail": peer.Unavail, + "healthchecks_checks": peer.HealthChecks.Checks, + "healthchecks_fails": peer.HealthChecks.Fails, + "healthchecks_unhealthy": peer.HealthChecks.Unhealthy, + "downtime": peer.Downtime, + //"selected": peer.Selected.toInt64, + //"downstart": peer.Downstart.toInt64, + } + if peer.HealthChecks.LastPassed != nil { + peerFields["healthchecks_last_passed"] = *peer.HealthChecks.LastPassed + } + if peer.HeaderTime != nil { + peerFields["header_time"] = *peer.HeaderTime + } + if peer.ResponseTime != nil { + peerFields["response_time"] = *peer.ResponseTime + } + if peer.MaxConns != nil { + peerFields["max_conns"] = *peer.MaxConns + } + peerTags := map[string]string{} + for k, v := range upstreamTags { + peerTags[k] = v + } + peerTags["upstream_address"] = peer.Server + if peer.ID != nil { + peerTags["id"] = strconv.Itoa(*peer.ID) + } + acc.AddFields("nginx_plus_api_http_upstream_peers", peerFields, peerTags) + } + } + return nil +} + +func (n *NginxPlusApi) gatherHttpCachesMetrics(addr *url.URL, acc telegraf.Accumulator) error { + body, err := n.gatherUrl(addr, httpCachesPath) + if err != nil { + return err + } + + var httpCaches HttpCaches + + if err := json.Unmarshal(body, &httpCaches); err != nil { + return err + } + + tags := getTags(addr) + + for cacheName, cache := range httpCaches { + cacheTags := map[string]string{} + for k, v := range tags { + cacheTags[k] = v + } + cacheTags["cache"] = cacheName + acc.AddFields( + "nginx_plus_api_http_caches", + map[string]interface{}{ + "size": cache.Size, + "max_size": cache.MaxSize, + "cold": cache.Cold, + "hit_responses": cache.Hit.Responses, + "hit_bytes": cache.Hit.Bytes, + "stale_responses": cache.Stale.Responses, + "stale_bytes": cache.Stale.Bytes, + "updating_responses": cache.Updating.Responses, + "updating_bytes": cache.Updating.Bytes, + "revalidated_responses": cache.Revalidated.Responses, + "revalidated_bytes": cache.Revalidated.Bytes, + "miss_responses": cache.Miss.Responses, + "miss_bytes": cache.Miss.Bytes, + "miss_responses_written": cache.Miss.ResponsesWritten, + "miss_bytes_written": cache.Miss.BytesWritten, + "expired_responses": cache.Expired.Responses, + "expired_bytes": cache.Expired.Bytes, + "expired_responses_written": cache.Expired.ResponsesWritten, + "expired_bytes_written": cache.Expired.BytesWritten, + "bypass_responses": cache.Bypass.Responses, + "bypass_bytes": cache.Bypass.Bytes, + "bypass_responses_written": cache.Bypass.ResponsesWritten, + "bypass_bytes_written": cache.Bypass.BytesWritten, + }, + cacheTags, + ) + } + + return nil +} + +func (n *NginxPlusApi) gatherStreamServerZonesMetrics(addr *url.URL, acc telegraf.Accumulator) error { + body, err := n.gatherUrl(addr, streamServerZonesPath) + if err != nil { + return err + } + + var streamServerZones StreamServerZones + + if err := json.Unmarshal(body, &streamServerZones); err != nil { + return err + } + + tags := getTags(addr) + + for zoneName, zone := range streamServerZones { + zoneTags := map[string]string{} + for k, v := range tags { + zoneTags[k] = v + } + zoneTags["zone"] = zoneName + acc.AddFields( + "nginx_plus_api_stream_server_zones", + map[string]interface{}{ + "processing": zone.Processing, + "connections": zone.Connections, + "received": zone.Received, + "sent": zone.Sent, + }, + zoneTags, + ) + } + + return nil +} + +func (n *NginxPlusApi) gatherStreamUpstreamsMetrics(addr *url.URL, acc telegraf.Accumulator) error { + body, err := n.gatherUrl(addr, streamUpstreamsPath) + if err != nil { + return err + } + + var streamUpstreams StreamUpstreams + + if err := json.Unmarshal(body, &streamUpstreams); err != nil { + return err + } + + tags := getTags(addr) + + for upstreamName, upstream := range streamUpstreams { + upstreamTags := map[string]string{} + for k, v := range tags { + upstreamTags[k] = v + } + upstreamTags["upstream"] = upstreamName + acc.AddFields( + "nginx_plus_api_stream_upstreams", + map[string]interface{}{ + "zombies": upstream.Zombies, + }, + upstreamTags, + ) + for _, peer := range upstream.Peers { + peerFields := map[string]interface{}{ + "backup": peer.Backup, + "weight": peer.Weight, + "state": peer.State, + "active": peer.Active, + "connections": peer.Connections, + "sent": peer.Sent, + "received": peer.Received, + "fails": peer.Fails, + "unavail": peer.Unavail, + "healthchecks_checks": peer.HealthChecks.Checks, + "healthchecks_fails": peer.HealthChecks.Fails, + "healthchecks_unhealthy": peer.HealthChecks.Unhealthy, + "downtime": peer.Downtime, + } + if peer.HealthChecks.LastPassed != nil { + peerFields["healthchecks_last_passed"] = *peer.HealthChecks.LastPassed + } + if peer.ConnectTime != nil { + peerFields["connect_time"] = *peer.ConnectTime + } + if peer.FirstByteTime != nil { + peerFields["first_byte_time"] = *peer.FirstByteTime + } + if peer.ResponseTime != nil { + peerFields["response_time"] = *peer.ResponseTime + } + peerTags := map[string]string{} + for k, v := range upstreamTags { + peerTags[k] = v + } + peerTags["upstream_address"] = peer.Server + peerTags["id"] = strconv.Itoa(peer.ID) + + acc.AddFields("nginx_plus_api_stream_upstream_peers", peerFields, peerTags) + } + } + + return nil +} + +func getTags(addr *url.URL) map[string]string { + h := addr.Host + host, port, err := net.SplitHostPort(h) + if err != nil { + host = addr.Host + if addr.Scheme == "http" { + port = "80" + } else if addr.Scheme == "https" { + port = "443" + } else { + port = "" + } + } + return map[string]string{"source": host, "port": port} +} diff --git a/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics_test.go b/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics_test.go new file mode 100644 index 000000000..a7516dee5 --- /dev/null +++ b/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics_test.go @@ -0,0 +1,1075 @@ +package nginx_plus_api + +import ( + "fmt" + "net" + "net/http" + "net/http/httptest" + "net/url" + "testing" + + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +const processesPayload = ` +{ + "respawned": 0 +} +` + +const connectionsPayload = ` +{ + "accepted": 1234567890000, + "dropped": 2345678900000, + "active": 345, + "idle": 567 +} +` + +const sslPayload = ` +{ + "handshakes": 79572, + "handshakes_failed": 21025, + "session_reuses": 15762 +} +` + +const httpRequestsPayload = ` +{ + "total": 10624511, + "current": 4 +} +` + +const httpServerZonesPayload = ` +{ + "site1": { + "processing": 2, + "requests": 736395, + "responses": { + "1xx": 0, + "2xx": 727290, + "3xx": 4614, + "4xx": 934, + "5xx": 1535, + "total": 734373 + }, + "discarded": 2020, + "received": 180157219, + "sent": 20183175459 + }, + "site2": { + "processing": 1, + "requests": 185307, + "responses": { + "1xx": 0, + "2xx": 112674, + "3xx": 45383, + "4xx": 2504, + "5xx": 4419, + "total": 164980 + }, + "discarded": 20326, + "received": 51575327, + "sent": 2983241510 + } +} +` + +const httpUpstreamsPayload = ` +{ + "trac-backend": { + "peers": [ + { + "id": 0, + "server": "10.0.0.1:8088", + "name": "10.0.0.1:8088", + "backup": false, + "weight": 5, + "state": "up", + "active": 0, + "requests": 667231, + "header_time": 20, + "response_time": 36, + "responses": { + "1xx": 0, + "2xx": 666310, + "3xx": 0, + "4xx": 915, + "5xx": 6, + "total": 667231 + }, + "sent": 251946292, + "received": 19222475454, + "fails": 0, + "unavail": 0, + "health_checks": { + "checks": 26214, + "fails": 0, + "unhealthy": 0, + "last_passed": true + }, + "downtime": 0, + "downstart": {}, + "selected": {} + }, + { + "id": 1, + "server": "10.0.0.1:8089", + "name": "10.0.0.1:8089", + "backup": true, + "weight": 1, + "state": "unhealthy", + "active": 0, + "requests": 0, + "responses": { + "1xx": 0, + "2xx": 0, + "3xx": 0, + "4xx": 0, + "5xx": 0, + "total": 0 + }, + "sent": 0, + "received": 0, + "fails": 0, + "unavail": 0, + "health_checks": { + "checks": 26284, + "fails": 26284, + "unhealthy": 1, + "last_passed": false + }, + "downtime": 262925617, + "downstart": {}, + "selected": {} + } + ], + "keepalive": 0, + "zombies": 0, + "zone": "trac-backend" + }, + "hg-backend": { + "peers": [ + { + "id": 0, + "server": "10.0.0.1:8088", + "name": "10.0.0.1:8088", + "backup": false, + "weight": 5, + "state": "up", + "active": 0, + "requests": 667231, + "header_time": 20, + "response_time": 36, + "responses": { + "1xx": 0, + "2xx": 666310, + "3xx": 0, + "4xx": 915, + "5xx": 6, + "total": 667231 + }, + "sent": 251946292, + "received": 19222475454, + "fails": 0, + "unavail": 0, + "health_checks": { + "checks": 26214, + "fails": 0, + "unhealthy": 0, + "last_passed": true + }, + "downtime": 0, + "downstart": {}, + "selected": {} + }, + { + "id": 1, + "server": "10.0.0.1:8089", + "name": "10.0.0.1:8089", + "backup": true, + "weight": 1, + "state": "unhealthy", + "active": 0, + "requests": 0, + "responses": { + "1xx": 0, + "2xx": 0, + "3xx": 0, + "4xx": 0, + "5xx": 0, + "total": 0 + }, + "sent": 0, + "received": 0, + "fails": 0, + "unavail": 0, + "health_checks": { + "checks": 26284, + "fails": 26284, + "unhealthy": 1, + "last_passed": false + }, + "downtime": 262925617, + "downstart": {}, + "selected": {} + } + ], + "keepalive": 0, + "zombies": 0, + "zone": "hg-backend" + } +} +` + +const httpCachesPayload = ` +{ + "http-cache": { + "size": 530915328, + "max_size": 536870912, + "cold": false, + "hit": { + "responses": 254032, + "bytes": 6685627875 + }, + "stale": { + "responses": 0, + "bytes": 0 + }, + "updating": { + "responses": 0, + "bytes": 0 + }, + "revalidated": { + "responses": 0, + "bytes": 0 + }, + "miss": { + "responses": 1619201, + "bytes": 53841943822 + }, + "expired": { + "responses": 45859, + "bytes": 1656847080, + "responses_written": 44992, + "bytes_written": 1641825173 + }, + "bypass": { + "responses": 200187, + "bytes": 5510647548, + "responses_written": 200173, + "bytes_written": 44992 + } + }, + "frontend-cache": { + "size": 530915328, + "max_size": 536870912, + "cold": false, + "hit": { + "responses": 254032, + "bytes": 6685627875 + }, + "stale": { + "responses": 0, + "bytes": 0 + }, + "updating": { + "responses": 0, + "bytes": 0 + }, + "revalidated": { + "responses": 0, + "bytes": 0 + }, + "miss": { + "responses": 1619201, + "bytes": 53841943822 + }, + "expired": { + "responses": 45859, + "bytes": 1656847080, + "responses_written": 44992, + "bytes_written": 1641825173 + }, + "bypass": { + "responses": 200187, + "bytes": 5510647548, + "responses_written": 200173, + "bytes_written": 44992 + } + } +} +` + +const streamUpstreamsPayload = ` +{ + "mysql_backends": { + "peers": [ + { + "id": 0, + "server": "10.0.0.1:12345", + "name": "10.0.0.1:12345", + "backup": false, + "weight": 5, + "state": "up", + "active": 0, + "max_conns": 30, + "connecions": 1231, + "sent": 251946292, + "received": 19222475454, + "fails": 0, + "unavail": 0, + "health_checks": { + "checks": 26214, + "fails": 0, + "unhealthy": 0, + "last_passed": true + }, + "downtime": 0, + "downstart": {}, + "selected": {} + }, + { + "id": 1, + "server": "10.0.0.1:12346", + "name": "10.0.0.1:12346", + "backup": true, + "weight": 1, + "state": "unhealthy", + "active": 0, + "max_conns": 30, + "connections": 0, + "sent": 0, + "received": 0, + "fails": 0, + "unavail": 0, + "health_checks": { + "checks": 26284, + "fails": 26284, + "unhealthy": 1, + "last_passed": false + }, + "downtime": 262925617, + "downstart": {}, + "selected": {} + } + ], + "zombies": 0, + "zone": "mysql_backends" + }, + "dns": { + "peers": [ + { + "id": 0, + "server": "10.0.0.1:12347", + "name": "10.0.0.1:12347", + "backup": false, + "weight": 5, + "state": "up", + "active": 0, + "max_conns": 30, + "connections": 667231, + "sent": 251946292, + "received": 19222475454, + "fails": 0, + "unavail": 0, + "health_checks": { + "checks": 26214, + "fails": 0, + "unhealthy": 0, + "last_passed": true + }, + "downtime": 0, + "downstart": {}, + "selected": {} + }, + { + "id": 1, + "server": "10.0.0.1:12348", + "name": "10.0.0.1:12348", + "backup": true, + "weight": 1, + "state": "unhealthy", + "active": 0, + "connections": 0, + "max_conns": 30, + "sent": 0, + "received": 0, + "fails": 0, + "unavail": 0, + "health_checks": { + "checks": 26284, + "fails": 26284, + "unhealthy": 1, + "last_passed": false + }, + "downtime": 262925617, + "downstart": {}, + "selected": {} + } + ], + "zombies": 0, + "zone": "dns" + } +} +` + +const streamServerZonesPayload = ` +{ + "mysql-frontend": { + "processing": 2, + "connections": 270925, + "sessions": { + "2xx": 155564, + "4xx": 0, + "5xx": 0, + "total": 270925 + }, + "discarded": 0, + "received": 28988975, + "sent": 3879346317 + }, + "dns": { + "processing": 1, + "connections": 155569, + "sessions": { + "2xx": 155564, + "4xx": 0, + "5xx": 0, + "total": 155569 + }, + "discarded": 0, + "received": 4200363, + "sent": 20489184 + } +} +` + +func TestGatherProcessesMetrics(t *testing.T) { + ts, n := prepareEndpoint(processesPath, defaultApiVersion, processesPayload) + defer ts.Close() + + var acc testutil.Accumulator + addr, host, port := prepareAddr(ts) + + require.NoError(t, n.gatherProcessesMetrics(addr, &acc)) + + acc.AssertContainsTaggedFields( + t, + "nginx_plus_api_processes", + map[string]interface{}{ + "respawned": int(0), + }, + map[string]string{ + "source": host, + "port": port, + }) +} + +func TestGatherConnectioinsMetrics(t *testing.T) { + ts, n := prepareEndpoint(connectionsPath, defaultApiVersion, connectionsPayload) + defer ts.Close() + + var acc testutil.Accumulator + addr, host, port := prepareAddr(ts) + + require.NoError(t, n.gatherConnectionsMetrics(addr, &acc)) + + acc.AssertContainsTaggedFields( + t, + "nginx_plus_api_connections", + map[string]interface{}{ + "accepted": int64(1234567890000), + "dropped": int64(2345678900000), + "active": int64(345), + "idle": int64(567), + }, + map[string]string{ + "source": host, + "port": port, + }) +} + +func TestGatherSslMetrics(t *testing.T) { + ts, n := prepareEndpoint(sslPath, defaultApiVersion, sslPayload) + defer ts.Close() + + var acc testutil.Accumulator + addr, host, port := prepareAddr(ts) + + require.NoError(t, n.gatherSslMetrics(addr, &acc)) + + acc.AssertContainsTaggedFields( + t, + "nginx_plus_api_ssl", + map[string]interface{}{ + "handshakes": int64(79572), + "handshakes_failed": int64(21025), + "session_reuses": int64(15762), + }, + map[string]string{ + "source": host, + "port": port, + }) +} + +func TestGatherHttpRequestsMetrics(t *testing.T) { + ts, n := prepareEndpoint(httpRequestsPath, defaultApiVersion, httpRequestsPayload) + defer ts.Close() + + var acc testutil.Accumulator + addr, host, port := prepareAddr(ts) + + require.NoError(t, n.gatherHttpRequestsMetrics(addr, &acc)) + + acc.AssertContainsTaggedFields( + t, + "nginx_plus_api_http_requests", + map[string]interface{}{ + "total": int64(10624511), + "current": int64(4), + }, + map[string]string{ + "source": host, + "port": port, + }) +} + +func TestGatherHttpServerZonesMetrics(t *testing.T) { + ts, n := prepareEndpoint(httpServerZonesPath, defaultApiVersion, httpServerZonesPayload) + defer ts.Close() + + var acc testutil.Accumulator + addr, host, port := prepareAddr(ts) + + require.NoError(t, n.gatherHttpServerZonesMetrics(addr, &acc)) + + acc.AssertContainsTaggedFields( + t, + "nginx_plus_api_http_server_zones", + map[string]interface{}{ + "discarded": int64(2020), + "processing": int(2), + "received": int64(180157219), + "requests": int64(736395), + "responses_1xx": int64(0), + "responses_2xx": int64(727290), + "responses_3xx": int64(4614), + "responses_4xx": int64(934), + "responses_5xx": int64(1535), + "responses_total": int64(734373), + "sent": int64(20183175459), + }, + map[string]string{ + "source": host, + "port": port, + "zone": "site1", + }) + + acc.AssertContainsTaggedFields( + t, + "nginx_plus_api_http_server_zones", + map[string]interface{}{ + "discarded": int64(20326), + "processing": int(1), + "received": int64(51575327), + "requests": int64(185307), + "responses_1xx": int64(0), + "responses_2xx": int64(112674), + "responses_3xx": int64(45383), + "responses_4xx": int64(2504), + "responses_5xx": int64(4419), + "responses_total": int64(164980), + "sent": int64(2983241510), + }, + map[string]string{ + "source": host, + "port": port, + "zone": "site2", + }) +} + +func TestHatherHttpUpstreamsMetrics(t *testing.T) { + ts, n := prepareEndpoint(httpUpstreamsPath, defaultApiVersion, httpUpstreamsPayload) + defer ts.Close() + + var acc testutil.Accumulator + addr, host, port := prepareAddr(ts) + + require.NoError(t, n.gatherHttpUpstreamsMetrics(addr, &acc)) + + acc.AssertContainsTaggedFields( + t, + "nginx_plus_api_http_upstreams", + map[string]interface{}{ + "keepalive": int(0), + "zombies": int(0), + }, + map[string]string{ + "source": host, + "port": port, + "upstream": "trac-backend", + }) + + acc.AssertContainsTaggedFields( + t, + "nginx_plus_api_http_upstreams", + map[string]interface{}{ + "keepalive": int(0), + "zombies": int(0), + }, + map[string]string{ + "source": host, + "port": port, + "upstream": "hg-backend", + }) + + acc.AssertContainsTaggedFields( + t, + "nginx_plus_api_http_upstream_peers", + map[string]interface{}{ + "active": int(0), + "backup": false, + "downtime": int64(0), + "fails": int64(0), + "header_time": int64(20), + "healthchecks_checks": int64(26214), + "healthchecks_fails": int64(0), + "healthchecks_last_passed": true, + "healthchecks_unhealthy": int64(0), + "received": int64(19222475454), + "requests": int64(667231), + "response_time": int64(36), + "responses_1xx": int64(0), + "responses_2xx": int64(666310), + "responses_3xx": int64(0), + "responses_4xx": int64(915), + "responses_5xx": int64(6), + "responses_total": int64(667231), + "sent": int64(251946292), + "state": "up", + "unavail": int64(0), + "weight": int(5), + }, + map[string]string{ + "source": host, + "port": port, + "upstream": "trac-backend", + "upstream_address": "10.0.0.1:8088", + "id": "0", + }) + + acc.AssertContainsTaggedFields( + t, + "nginx_plus_api_http_upstream_peers", + map[string]interface{}{ + "active": int(0), + "backup": true, + "downtime": int64(262925617), + "fails": int64(0), + "healthchecks_checks": int64(26284), + "healthchecks_fails": int64(26284), + "healthchecks_last_passed": false, + "healthchecks_unhealthy": int64(1), + "received": int64(0), + "requests": int64(0), + "responses_1xx": int64(0), + "responses_2xx": int64(0), + "responses_3xx": int64(0), + "responses_4xx": int64(0), + "responses_5xx": int64(0), + "responses_total": int64(0), + "sent": int64(0), + "state": "unhealthy", + "unavail": int64(0), + "weight": int(1), + }, + map[string]string{ + "source": host, + "port": port, + "upstream": "trac-backend", + "upstream_address": "10.0.0.1:8089", + "id": "1", + }) + + acc.AssertContainsTaggedFields( + t, + "nginx_plus_api_http_upstream_peers", + map[string]interface{}{ + "active": int(0), + "backup": false, + "downtime": int64(0), + "fails": int64(0), + "header_time": int64(20), + "healthchecks_checks": int64(26214), + "healthchecks_fails": int64(0), + "healthchecks_last_passed": true, + "healthchecks_unhealthy": int64(0), + "received": int64(19222475454), + "requests": int64(667231), + "response_time": int64(36), + "responses_1xx": int64(0), + "responses_2xx": int64(666310), + "responses_3xx": int64(0), + "responses_4xx": int64(915), + "responses_5xx": int64(6), + "responses_total": int64(667231), + "sent": int64(251946292), + "state": "up", + "unavail": int64(0), + "weight": int(5), + }, + map[string]string{ + "source": host, + "port": port, + "upstream": "hg-backend", + "upstream_address": "10.0.0.1:8088", + "id": "0", + }) + + acc.AssertContainsTaggedFields( + t, + "nginx_plus_api_http_upstream_peers", + map[string]interface{}{ + "active": int(0), + "backup": true, + "downtime": int64(262925617), + "fails": int64(0), + "healthchecks_checks": int64(26284), + "healthchecks_fails": int64(26284), + "healthchecks_last_passed": false, + "healthchecks_unhealthy": int64(1), + "received": int64(0), + "requests": int64(0), + "responses_1xx": int64(0), + "responses_2xx": int64(0), + "responses_3xx": int64(0), + "responses_4xx": int64(0), + "responses_5xx": int64(0), + "responses_total": int64(0), + "sent": int64(0), + "state": "unhealthy", + "unavail": int64(0), + "weight": int(1), + }, + map[string]string{ + "source": host, + "port": port, + "upstream": "hg-backend", + "upstream_address": "10.0.0.1:8089", + "id": "1", + }) +} + +func TestGatherHttpCachesMetrics(t *testing.T) { + ts, n := prepareEndpoint(httpCachesPath, defaultApiVersion, httpCachesPayload) + defer ts.Close() + + var acc testutil.Accumulator + addr, host, port := prepareAddr(ts) + + require.NoError(t, n.gatherHttpCachesMetrics(addr, &acc)) + + acc.AssertContainsTaggedFields( + t, + "nginx_plus_api_http_caches", + map[string]interface{}{ + "bypass_bytes": int64(5510647548), + "bypass_bytes_written": int64(44992), + "bypass_responses": int64(200187), + "bypass_responses_written": int64(200173), + "cold": false, + "expired_bytes": int64(1656847080), + "expired_bytes_written": int64(1641825173), + "expired_responses": int64(45859), + "expired_responses_written": int64(44992), + "hit_bytes": int64(6685627875), + "hit_responses": int64(254032), + "max_size": int64(536870912), + "miss_bytes": int64(53841943822), + "miss_bytes_written": int64(0), + "miss_responses": int64(1619201), + "miss_responses_written": int64(0), + "revalidated_bytes": int64(0), + "revalidated_responses": int64(0), + "size": int64(530915328), + "stale_bytes": int64(0), + "stale_responses": int64(0), + "updating_bytes": int64(0), + "updating_responses": int64(0), + }, + map[string]string{ + "source": host, + "port": port, + "cache": "http-cache", + }) + + acc.AssertContainsTaggedFields( + t, + "nginx_plus_api_http_caches", + map[string]interface{}{ + "bypass_bytes": int64(5510647548), + "bypass_bytes_written": int64(44992), + "bypass_responses": int64(200187), + "bypass_responses_written": int64(200173), + "cold": false, + "expired_bytes": int64(1656847080), + "expired_bytes_written": int64(1641825173), + "expired_responses": int64(45859), + "expired_responses_written": int64(44992), + "hit_bytes": int64(6685627875), + "hit_responses": int64(254032), + "max_size": int64(536870912), + "miss_bytes": int64(53841943822), + "miss_bytes_written": int64(0), + "miss_responses": int64(1619201), + "miss_responses_written": int64(0), + "revalidated_bytes": int64(0), + "revalidated_responses": int64(0), + "size": int64(530915328), + "stale_bytes": int64(0), + "stale_responses": int64(0), + "updating_bytes": int64(0), + "updating_responses": int64(0), + }, + map[string]string{ + "source": host, + "port": port, + "cache": "frontend-cache", + }) +} + +func TestGatherStreamUpstreams(t *testing.T) { + ts, n := prepareEndpoint(streamUpstreamsPath, defaultApiVersion, streamUpstreamsPayload) + defer ts.Close() + + var acc testutil.Accumulator + addr, host, port := prepareAddr(ts) + + require.NoError(t, n.gatherStreamUpstreamsMetrics(addr, &acc)) + + acc.AssertContainsTaggedFields( + t, + "nginx_plus_api_stream_upstreams", + map[string]interface{}{ + "zombies": int(0), + }, + map[string]string{ + "source": host, + "port": port, + "upstream": "mysql_backends", + }) + + acc.AssertContainsTaggedFields( + t, + "nginx_plus_api_stream_upstreams", + map[string]interface{}{ + "zombies": int(0), + }, + map[string]string{ + "source": host, + "port": port, + "upstream": "dns", + }) + + acc.AssertContainsTaggedFields( + t, + "nginx_plus_api_stream_upstream_peers", + map[string]interface{}{ + "active": int(0), + "backup": false, + "connections": int64(0), + "downtime": int64(0), + "fails": int64(0), + "healthchecks_checks": int64(26214), + "healthchecks_fails": int64(0), + "healthchecks_last_passed": true, + "healthchecks_unhealthy": int64(0), + "received": int64(19222475454), + "sent": int64(251946292), + "state": "up", + "unavail": int64(0), + "weight": int(5), + }, + map[string]string{ + "source": host, + "port": port, + "upstream": "mysql_backends", + "upstream_address": "10.0.0.1:12345", + "id": "0", + }) + + acc.AssertContainsTaggedFields( + t, + "nginx_plus_api_stream_upstream_peers", + map[string]interface{}{ + "active": int(0), + "backup": true, + "connections": int64(0), + "downtime": int64(262925617), + "fails": int64(0), + "healthchecks_checks": int64(26284), + "healthchecks_fails": int64(26284), + "healthchecks_last_passed": false, + "healthchecks_unhealthy": int64(1), + "received": int64(0), + "sent": int64(0), + "state": "unhealthy", + "unavail": int64(0), + "weight": int(1), + }, + map[string]string{ + "source": host, + "port": port, + "upstream": "mysql_backends", + "upstream_address": "10.0.0.1:12346", + "id": "1", + }) + + acc.AssertContainsTaggedFields( + t, + "nginx_plus_api_stream_upstream_peers", + map[string]interface{}{ + "active": int(0), + "backup": false, + "connections": int64(667231), + "downtime": int64(0), + "fails": int64(0), + "healthchecks_checks": int64(26214), + "healthchecks_fails": int64(0), + "healthchecks_last_passed": true, + "healthchecks_unhealthy": int64(0), + "received": int64(19222475454), + "sent": int64(251946292), + "state": "up", + "unavail": int64(0), + "weight": int(5), + }, + map[string]string{ + "source": host, + "port": port, + "upstream": "dns", + "upstream_address": "10.0.0.1:12347", + "id": "0", + }) + + acc.AssertContainsTaggedFields( + t, + "nginx_plus_api_stream_upstream_peers", + map[string]interface{}{ + "active": int(0), + "backup": true, + "connections": int64(0), + "downtime": int64(262925617), + "fails": int64(0), + "healthchecks_checks": int64(26284), + "healthchecks_fails": int64(26284), + "healthchecks_last_passed": false, + "healthchecks_unhealthy": int64(1), + "received": int64(0), + "sent": int64(0), + "state": "unhealthy", + "unavail": int64(0), + "weight": int(1), + }, + map[string]string{ + "source": host, + "port": port, + "upstream": "dns", + "upstream_address": "10.0.0.1:12348", + "id": "1", + }) + +} + +func TestGatherStreamServerZonesMatrics(t *testing.T) { + ts, n := prepareEndpoint(streamServerZonesPath, defaultApiVersion, streamServerZonesPayload) + defer ts.Close() + + var acc testutil.Accumulator + addr, host, port := prepareAddr(ts) + + require.NoError(t, n.gatherStreamServerZonesMetrics(addr, &acc)) + + acc.AssertContainsTaggedFields( + t, + "nginx_plus_api_stream_server_zones", + map[string]interface{}{ + "connections": int(270925), + "processing": int(2), + "received": int64(28988975), + "sent": int64(3879346317), + }, + map[string]string{ + "source": host, + "port": port, + "zone": "mysql-frontend", + }) + + acc.AssertContainsTaggedFields( + t, + "nginx_plus_api_stream_server_zones", + map[string]interface{}{ + "connections": int(155569), + "processing": int(1), + "received": int64(4200363), + "sent": int64(20489184), + }, + map[string]string{ + "source": host, + "port": port, + "zone": "dns", + }) +} + +func prepareAddr(ts *httptest.Server) (*url.URL, string, string) { + addr, err := url.Parse(fmt.Sprintf("%s/api", ts.URL)) + if err != nil { + panic(err) + } + + host, port, err := net.SplitHostPort(addr.Host) + + if err != nil { + host = addr.Host + if addr.Scheme == "http" { + port = "80" + } else if addr.Scheme == "https" { + port = "443" + } else { + port = "" + } + } + + return addr, host, port +} + +func prepareEndpoint(path string, apiVersion int64, payload string) (*httptest.Server, *NginxPlusApi) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var rsp string + + if r.URL.Path == fmt.Sprintf("/api/%d/%s", apiVersion, path) { + rsp = payload + w.Header()["Content-Type"] = []string{"application/json"} + } else { + panic("Cannot handle request") + } + + fmt.Fprintln(w, rsp) + })) + + n := &NginxPlusApi{ + Urls: []string{fmt.Sprintf("%s/api", ts.URL)}, + ApiVersion: apiVersion, + } + + client, err := n.createHttpClient() + if err != nil { + panic(err) + } + n.client = client + + return ts, n +} diff --git a/plugins/inputs/nginx_plus_api/nginx_plus_api_types.go b/plugins/inputs/nginx_plus_api/nginx_plus_api_types.go new file mode 100644 index 000000000..b8240f844 --- /dev/null +++ b/plugins/inputs/nginx_plus_api/nginx_plus_api_types.go @@ -0,0 +1,133 @@ +package nginx_plus_api + +type Processes struct { + Respawned int `json:"respawned"` +} + +type Connections struct { + Accepted int64 `json:"accepted"` + Dropped int64 `json:"dropped"` + Active int64 `json:"active"` + Idle int64 `json:"idle"` +} + +type Ssl struct { // added in version 6 + Handshakes int64 `json:"handshakes"` + HandshakesFailed int64 `json:"handshakes_failed"` + SessionReuses int64 `json:"session_reuses"` +} + +type HttpRequests struct { + Total int64 `json:"total"` + Current int64 `json:"current"` +} + +type ResponseStats struct { + Responses1xx int64 `json:"1xx"` + Responses2xx int64 `json:"2xx"` + Responses3xx int64 `json:"3xx"` + Responses4xx int64 `json:"4xx"` + Responses5xx int64 `json:"5xx"` + Total int64 `json:"total"` +} + +type HttpServerZones map[string]struct { + Processing int `json:"processing"` + Requests int64 `json:"requests"` + Responses ResponseStats `json:"responses"` + Discarded *int64 `json:"discarded"` // added in version 6 + Received int64 `json:"received"` + Sent int64 `json:"sent"` +} + +type HealthCheckStats struct { + Checks int64 `json:"checks"` + Fails int64 `json:"fails"` + Unhealthy int64 `json:"unhealthy"` + LastPassed *bool `json:"last_passed"` +} + +type HttpUpstreams map[string]struct { + Peers []struct { + ID *int `json:"id"` // added in version 3 + Server string `json:"server"` + Backup bool `json:"backup"` + Weight int `json:"weight"` + State string `json:"state"` + Active int `json:"active"` + Keepalive *int `json:"keepalive"` // removed in version 5 + MaxConns *int `json:"max_conns"` // added in version 3 + Requests int64 `json:"requests"` + Responses ResponseStats `json:"responses"` + Sent int64 `json:"sent"` + Received int64 `json:"received"` + Fails int64 `json:"fails"` + Unavail int64 `json:"unavail"` + HealthChecks HealthCheckStats `json:"health_checks"` + Downtime int64 `json:"downtime"` + HeaderTime *int64 `json:"header_time"` // added in version 5 + ResponseTime *int64 `json:"response_time"` // added in version 5 + } `json:"peers"` + Keepalive int `json:"keepalive"` + Zombies int `json:"zombies"` // added in version 6 + Queue *struct { // added in version 6 + Size int `json:"size"` + MaxSize int `json:"max_size"` + Overflows int64 `json:"overflows"` + } `json:"queue"` +} + +type StreamServerZones map[string]struct { + Processing int `json:"processing"` + Connections int `json:"connections"` + Sessions *ResponseStats `json:"sessions"` + Discarded *int64 `json:"discarded"` // added in version 7 + Received int64 `json:"received"` + Sent int64 `json:"sent"` +} + +type StreamUpstreams map[string]struct { + Peers []struct { + ID int `json:"id"` + Server string `json:"server"` + Backup bool `json:"backup"` + Weight int `json:"weight"` + State string `json:"state"` + Active int `json:"active"` + Connections int64 `json:"connections"` + ConnectTime *int `json:"connect_time"` + FirstByteTime *int `json:"first_byte_time"` + ResponseTime *int `json:"response_time"` + Sent int64 `json:"sent"` + Received int64 `json:"received"` + Fails int64 `json:"fails"` + Unavail int64 `json:"unavail"` + HealthChecks HealthCheckStats `json:"health_checks"` + Downtime int64 `json:"downtime"` + } `json:"peers"` + Zombies int `json:"zombies"` +} + +type BasicHitStats struct { + Responses int64 `json:"responses"` + Bytes int64 `json:"bytes"` +} + +type ExtendedHitStats struct { + BasicHitStats + ResponsesWritten int64 `json:"responses_written"` + BytesWritten int64 `json:"bytes_written"` +} + +type HttpCaches map[string]struct { // added in version 2 + Size int64 `json:"size"` + MaxSize int64 `json:"max_size"` + Cold bool `json:"cold"` + Hit BasicHitStats `json:"hit"` + Stale BasicHitStats `json:"stale"` + Updating BasicHitStats `json:"updating"` + Revalidated *BasicHitStats `json:"revalidated"` // added in version 3 + Miss ExtendedHitStats `json:"miss"` + Expired ExtendedHitStats `json:"expired"` + Bypass ExtendedHitStats `json:"bypass"` +} From 55f3645e8cbb353afacfae00dec7e74663387f4d Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 22 Oct 2018 12:57:24 -0700 Subject: [PATCH 0309/1815] Update changelog --- CHANGELOG.md | 1 + README.md | 1 + 2 files changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3cf3cac6a..f6b599029 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,7 @@ #### New Inputs - [http_listener_v2](/plugins/inputs/http_listener_v2/README.md) - Contributed by @jul1u5 +- [nginx_plus_api](/plugins/inputs/nginx_plus_api/README.md) - Contributed by Bugagazavr #### New Outputs diff --git a/README.md b/README.md index 17b248247..8e2ea94de 100644 --- a/README.md +++ b/README.md @@ -207,6 +207,7 @@ For documentation on the latest development code see the [documentation index][d * [netstat](./plugins/inputs/net) * [nginx](./plugins/inputs/nginx) * [nginx_plus](./plugins/inputs/nginx_plus) +* [nginx_plus_api](./plugins/inputs/nginx_plus_api) * [nsq_consumer](./plugins/inputs/nsq_consumer) * [nsq](./plugins/inputs/nsq) * [nstat](./plugins/inputs/nstat) From 100d11f24ee8cbb1e6ff77c44d659edb1244ca0d Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 19 Oct 2018 15:43:45 -0700 Subject: [PATCH 0310/1815] Remove unused cruft --- metric/uint_support.go | 7 ------- 1 file changed, 7 deletions(-) delete mode 100644 metric/uint_support.go diff --git a/metric/uint_support.go b/metric/uint_support.go deleted file mode 100644 index 98383fb23..000000000 --- a/metric/uint_support.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build uint64 - -package metric - -func init() { - EnableUintSupport() -} From 2e59e4dd6c418997ce43ab31ce8405950a73ac66 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 19 Oct 2018 15:46:20 -0700 Subject: [PATCH 0311/1815] Add deprecation version for MetricBuffer --- plugins/inputs/mqtt_consumer/mqtt_consumer.go | 2 +- plugins/inputs/nats_consumer/nats_consumer.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/inputs/mqtt_consumer/mqtt_consumer.go b/plugins/inputs/mqtt_consumer/mqtt_consumer.go index 0a253b8d8..6d1e2cf58 100644 --- a/plugins/inputs/mqtt_consumer/mqtt_consumer.go +++ b/plugins/inputs/mqtt_consumer/mqtt_consumer.go @@ -37,7 +37,7 @@ type MQTTConsumer struct { parser parsers.Parser - // Legacy metric buffer support + // Legacy metric buffer support; deprecated in v0.10.3 MetricBuffer int PersistentSession bool diff --git a/plugins/inputs/nats_consumer/nats_consumer.go b/plugins/inputs/nats_consumer/nats_consumer.go index cb3eb3017..dac80476d 100644 --- a/plugins/inputs/nats_consumer/nats_consumer.go +++ b/plugins/inputs/nats_consumer/nats_consumer.go @@ -32,7 +32,7 @@ type natsConsumer struct { PendingMessageLimit int PendingBytesLimit int - // Legacy metric buffer support + // Legacy metric buffer support; deprecated in v0.10.3 MetricBuffer int parser parsers.Parser From 12279042d3a7347a6f90a2c8e0f83be2592d17ef Mon Sep 17 00:00:00 2001 From: Soulou Date: Tue, 23 Oct 2018 02:50:32 +0200 Subject: [PATCH 0312/1815] Add support for TLS configuration in NSQ input (#3903) --- plugins/inputs/nsq/nsq.go | 49 +++++++++++++++++++++++++++------- plugins/inputs/nsq/nsq_test.go | 10 +++---- 2 files changed, 43 insertions(+), 16 deletions(-) diff --git a/plugins/inputs/nsq/nsq.go b/plugins/inputs/nsq/nsq.go index 1ef47ef05..b5aa43d1f 100644 --- a/plugins/inputs/nsq/nsq.go +++ b/plugins/inputs/nsq/nsq.go @@ -33,17 +33,27 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal/tls" "github.com/influxdata/telegraf/plugins/inputs" ) // Might add Lookupd endpoints for cluster discovery type NSQ struct { Endpoints []string + tls.ClientConfig + httpClient *http.Client } var sampleConfig = ` ## An array of NSQD HTTP API endpoints - endpoints = ["http://localhost:4151"] + endpoints = ["http://localhost:4151"] + + ## Or using HTTPS endpoint + endpoints = ["https://localhost:4152"] + tls_cert = "/path/to/client-cert.pem" + tls_key = "/path/to/client-key.pem" + tls_ca = "/path/to/ca.pem" + insecure_skip_verify = false ` const ( @@ -52,10 +62,14 @@ const ( func init() { inputs.Add("nsq", func() telegraf.Input { - return &NSQ{} + return New() }) } +func New() *NSQ { + return &NSQ{} +} + func (n *NSQ) SampleConfig() string { return sampleConfig } @@ -65,6 +79,15 @@ func (n *NSQ) Description() string { } func (n *NSQ) Gather(acc telegraf.Accumulator) error { + var err error + + if n.httpClient == nil { + n.httpClient, err = n.getHttpClient() + if err != nil { + return err + } + } + var wg sync.WaitGroup for _, e := range n.Endpoints { wg.Add(1) @@ -78,13 +101,19 @@ func (n *NSQ) Gather(acc telegraf.Accumulator) error { return nil } -var tr = &http.Transport{ - ResponseHeaderTimeout: time.Duration(3 * time.Second), -} - -var client = &http.Client{ - Transport: tr, - Timeout: time.Duration(4 * time.Second), +func (n *NSQ) getHttpClient() (*http.Client, error) { + tlsConfig, err := n.ClientConfig.TLSConfig() + if err != nil { + return nil, err + } + tr := &http.Transport{ + TLSClientConfig: tlsConfig, + } + httpClient := &http.Client{ + Transport: tr, + Timeout: time.Duration(4 * time.Second), + } + return httpClient, nil } func (n *NSQ) gatherEndpoint(e string, acc telegraf.Accumulator) error { @@ -92,7 +121,7 @@ func (n *NSQ) gatherEndpoint(e string, acc telegraf.Accumulator) error { if err != nil { return err } - r, err := client.Get(u.String()) + r, err := n.httpClient.Get(u.String()) if err != nil { return fmt.Errorf("Error while polling %s: %s", u.String(), err) } diff --git a/plugins/inputs/nsq/nsq_test.go b/plugins/inputs/nsq/nsq_test.go index f3e9ce868..1d3b541e5 100644 --- a/plugins/inputs/nsq/nsq_test.go +++ b/plugins/inputs/nsq/nsq_test.go @@ -19,9 +19,8 @@ func TestNSQStatsV1(t *testing.T) { })) defer ts.Close() - n := &NSQ{ - Endpoints: []string{ts.URL}, - } + n := New() + n.Endpoints = []string{ts.URL} var acc testutil.Accumulator err := acc.GatherError(n.Gather) @@ -276,9 +275,8 @@ func TestNSQStatsPreV1(t *testing.T) { })) defer ts.Close() - n := &NSQ{ - Endpoints: []string{ts.URL}, - } + n := New() + n.Endpoints = []string{ts.URL} var acc testutil.Accumulator err := acc.GatherError(n.Gather) From 8d0ec993c7bb0e9a4d3a7a83839b905e128494e7 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 22 Oct 2018 17:54:57 -0700 Subject: [PATCH 0313/1815] Update changelog and add basic nsq input readme --- CHANGELOG.md | 1 + plugins/inputs/nsq/README.md | 17 +++++++++++++++++ plugins/inputs/nsq/nsq.go | 12 ++++++------ 3 files changed, 24 insertions(+), 6 deletions(-) create mode 100644 plugins/inputs/nsq/README.md diff --git a/CHANGELOG.md b/CHANGELOG.md index f6b599029..25a2b584c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -32,6 +32,7 @@ - [#4838](https://github.com/influxdata/telegraf/pull/4838): Add telegraf version to User-Agent header. - [#4864](https://github.com/influxdata/telegraf/pull/4864): Use DescribeStreamSummary in place of ListStreams in kinesis output. - [#4852](https://github.com/influxdata/telegraf/pull/4852): Add ability to specify bytes options as strings with units. +- [#3903](https://github.com/influxdata/telegraf/pull/3903): Add support for TLS configuration in NSQ input. ## v1.8.3 [unreleased] diff --git a/plugins/inputs/nsq/README.md b/plugins/inputs/nsq/README.md new file mode 100644 index 000000000..00c1089af --- /dev/null +++ b/plugins/inputs/nsq/README.md @@ -0,0 +1,17 @@ +# NSQ Input Plugin + +### Configuration: + +```toml +# Description +[[inputs.nsq]] + ## An array of NSQD HTTP API endpoints + endpoints = ["http://localhost:4151"] + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false +``` diff --git a/plugins/inputs/nsq/nsq.go b/plugins/inputs/nsq/nsq.go index b5aa43d1f..5eab48ea5 100644 --- a/plugins/inputs/nsq/nsq.go +++ b/plugins/inputs/nsq/nsq.go @@ -48,12 +48,12 @@ var sampleConfig = ` ## An array of NSQD HTTP API endpoints endpoints = ["http://localhost:4151"] - ## Or using HTTPS endpoint - endpoints = ["https://localhost:4152"] - tls_cert = "/path/to/client-cert.pem" - tls_key = "/path/to/client-key.pem" - tls_ca = "/path/to/ca.pem" - insecure_skip_verify = false + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false ` const ( From 67cd2888dbaae2501e23e9063e4e707e19545a5e Mon Sep 17 00:00:00 2001 From: Mikhail Leonov Date: Thu, 25 Oct 2018 01:02:44 +0300 Subject: [PATCH 0314/1815] Collect additional stats in memcached input (#4914) --- plugins/inputs/memcached/README.md | 57 +++++++++++++++++---------- plugins/inputs/memcached/memcached.go | 57 +++++++++++++++++---------- 2 files changed, 72 insertions(+), 42 deletions(-) diff --git a/plugins/inputs/memcached/README.md b/plugins/inputs/memcached/README.md index 16a19973b..721be9130 100644 --- a/plugins/inputs/memcached/README.md +++ b/plugins/inputs/memcached/README.md @@ -20,31 +20,46 @@ The fields from this plugin are gathered in the *memcached* measurement. Fields: -* get_hits - Number of keys that have been requested and found present -* get_misses - Number of items that have been requested and not found -* evictions - Number of valid items removed from cache to free memory for new items -* limit_maxbytes - Number of bytes this server is allowed to use for storage +* accepting_conns - Whether or not server is accepting conns +* auth_cmds - Number of authentication commands handled, success or failure +* auth_errors - Number of failed authentications * bytes - Current number of bytes used to store items -* uptime - Number of secs since the server started -* curr_items - Current number of items stored -* total_items - Total number of items stored since the server started -* curr_connections - Number of open connections -* total_connections - Total number of connections opened since the server started running -* connection_structures - Number of connection structures allocated by the server -* cmd_get - Cumulative number of retrieval reqs -* cmd_set - Cumulative number of storage reqs -* delete_hits - Number of deletion reqs resulting in an item being removed -* delete_misses - umber of deletions reqs for missing keys -* incr_hits - Number of successful incr reqs -* incr_misses - Number of incr reqs against missing keys -* decr_hits - Number of successful decr reqs -* decr_misses - Number of decr reqs against missing keys -* cas_hits - Number of successful CAS reqs -* cas_misses - Number of CAS reqs against missing keys * bytes_read - Total number of bytes read by this server from network * bytes_written - Total number of bytes sent by this server to network -* threads - Number of worker threads requested +* cas_badval - Number of CAS reqs for which a key was found, but the CAS value did not match +* cas_hits - Number of successful CAS reqs +* cas_misses - Number of CAS reqs against missing keys +* cmd_flush - Cumulative number of flush reqs +* cmd_get - Cumulative number of retrieval reqs +* cmd_set - Cumulative number of storage reqs +* cmd_touch - Cumulative number of touch reqs * conn_yields - Number of times any connection yielded to another due to hitting the -R limit +* connection_structures - Number of connection structures allocated by the server +* curr_connections - Number of open connections +* curr_items - Current number of items stored +* decr_hits - Number of successful decr reqs +* decr_misses - Number of decr reqs against missing keys +* delete_hits - Number of deletion reqs resulting in an item being removed +* delete_misses - umber of deletions reqs for missing keys +* evicted_unfetched - Items evicted from LRU that were never touched by get/incr/append/etc +* evictions - Number of valid items removed from cache to free memory for new items +* expired_unfetched - Items pulled from LRU that were never touched by get/incr/append/etc before expiring +* get_hits - Number of keys that have been requested and found present +* get_misses - Number of items that have been requested and not found +* hash_bytes - Bytes currently used by hash tables +* hash_is_expanding - Indicates if the hash table is being grown to a new size +* hash_power_level - Current size multiplier for hash table +* incr_hits - Number of successful incr reqs +* incr_misses - Number of incr reqs against missing keys +* limit_maxbytes - Number of bytes this server is allowed to use for storage +* listen_disabled_num - Number of times server has stopped accepting new connections (maxconns) +* reclaimed - Number of times an entry was stored using memory from an expired entry +* threads - Number of worker threads requested +* total_connections - Total number of connections opened since the server started running +* total_items - Total number of items stored since the server started +* touch_hits - Number of keys that have been touched with a new expiration time +* touch_misses - Number of items that have been touched and not found +* uptime - Number of secs since the server started Description of gathered fields taken from [here](https://github.com/memcached/memcached/blob/master/doc/protocol.txt). diff --git a/plugins/inputs/memcached/memcached.go b/plugins/inputs/memcached/memcached.go index 2b6b120c8..99128263a 100644 --- a/plugins/inputs/memcached/memcached.go +++ b/plugins/inputs/memcached/memcached.go @@ -29,31 +29,46 @@ var defaultTimeout = 5 * time.Second // The list of metrics that should be sent var sendMetrics = []string{ - "get_hits", - "get_misses", - "evictions", - "limit_maxbytes", + "accepting_conns", + "auth_cmds", + "auth_errors", "bytes", - "uptime", - "curr_items", - "total_items", - "curr_connections", - "total_connections", - "connection_structures", - "cmd_get", - "cmd_set", - "delete_hits", - "delete_misses", - "incr_hits", - "incr_misses", - "decr_hits", - "decr_misses", - "cas_hits", - "cas_misses", "bytes_read", "bytes_written", - "threads", + "cas_badval", + "cas_hits", + "cas_misses", + "cmd_flush", + "cmd_get", + "cmd_set", + "cmd_touch", "conn_yields", + "connection_structures", + "curr_connections", + "curr_items", + "decr_hits", + "decr_misses", + "delete_hits", + "delete_misses", + "evicted_unfetched", + "evictions", + "expired_unfetched", + "get_hits", + "get_misses", + "hash_bytes", + "hash_is_expanding", + "hash_power_level", + "incr_hits", + "incr_misses", + "limit_maxbytes", + "listen_disabled_num", + "reclaimed", + "threads", + "total_connections", + "total_items", + "touch_hits", + "touch_misses", + "uptime", } // SampleConfig returns sample configuration message From 600b468db230d575393f1ddd0be404559bf83e4c Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 24 Oct 2018 15:04:18 -0700 Subject: [PATCH 0315/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 25a2b584c..6af10f7ea 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -33,6 +33,7 @@ - [#4864](https://github.com/influxdata/telegraf/pull/4864): Use DescribeStreamSummary in place of ListStreams in kinesis output. - [#4852](https://github.com/influxdata/telegraf/pull/4852): Add ability to specify bytes options as strings with units. - [#3903](https://github.com/influxdata/telegraf/pull/3903): Add support for TLS configuration in NSQ input. +- [#4914](https://github.com/influxdata/telegraf/pull/4914): Collect additional stats in memcached input. ## v1.8.3 [unreleased] From b88436c9d79f009014fb8f912c6e343f54f06ddc Mon Sep 17 00:00:00 2001 From: Akshay Moghe Date: Thu, 25 Oct 2018 12:14:19 -0700 Subject: [PATCH 0316/1815] Add IPVS input plugin (#4890) --- Gopkg.lock | 24 ++++++ Gopkg.toml | 8 ++ plugins/inputs/all/all.go | 1 + plugins/inputs/ipvs/README.md | 26 +++++++ plugins/inputs/ipvs/ipvs.go | 112 +++++++++++++++++++++++++++ plugins/inputs/ipvs/ipvs_notlinux.go | 3 + 6 files changed, 174 insertions(+) create mode 100644 plugins/inputs/ipvs/README.md create mode 100644 plugins/inputs/ipvs/ipvs.go create mode 100644 plugins/inputs/ipvs/ipvs_notlinux.go diff --git a/Gopkg.lock b/Gopkg.lock index f5f119a7f..8c500356c 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -328,6 +328,14 @@ revision = "47565b4f722fb6ceae66b95f853feed578a4a51c" version = "v0.3.3" +[[projects]] + branch = "master" + digest = "1:809792497a26f3936462cc5787a0d644b4d3cbfd59587e4f8845a9396ca2eb8a" + name = "github.com/docker/libnetwork" + packages = ["ipvs"] + pruneopts = "" + revision = "d7b61745d16675c9f548b19f06fda80d422a74f0" + [[projects]] digest = "1:6d6672f85a84411509885eaa32f597577873de00e30729b9bb0eb1e1faa49c12" name = "github.com/eapache/go-resiliency" @@ -988,6 +996,21 @@ pruneopts = "" revision = "1731857f09b1f38450e2c12409748407822dc6be" +[[projects]] + digest = "1:026b6ceaabbacaa147e94a63579efc3d3c73e00c73b67fa5c43ab46191ed04eb" + name = "github.com/vishvananda/netlink" + packages = ["nl"] + pruneopts = "" + revision = "b2de5d10e38ecce8607e6b438b6d174f389a004e" + +[[projects]] + branch = "master" + digest = "1:c09fddfdd491edaa4383396503e57023a26e5a824283a78c2310613a1252c649" + name = "github.com/vishvananda/netns" + packages = ["."] + pruneopts = "" + revision = "13995c7128ccc8e51e9a6bd2b551020a27180abd" + [[projects]] digest = "1:343f20460c11a0d0529fe532553bfef9446918d1a1fda6d8661eb27d5b1a68b8" name = "github.com/vjeantet/grok" @@ -1424,6 +1447,7 @@ "github.com/docker/docker/api/types/registry", "github.com/docker/docker/api/types/swarm", "github.com/docker/docker/client", + "github.com/docker/libnetwork/ipvs", "github.com/eclipse/paho.mqtt.golang", "github.com/go-logfmt/logfmt", "github.com/go-redis/redis", diff --git a/Gopkg.toml b/Gopkg.toml index dba4ec4b3..23b8444fe 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -237,3 +237,11 @@ [[constraint]] branch = "master" name = "golang.org/x/oauth2" + +[[constraint]] + branch = "master" + name = "github.com/docker/libnetwork" + +[[override]] + name = "github.com/vishvananda/netlink" + revision = "b2de5d10e38ecce8607e6b438b6d174f389a004e" diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index 1d666947a..ad193224a 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -51,6 +51,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/ipmi_sensor" _ "github.com/influxdata/telegraf/plugins/inputs/ipset" _ "github.com/influxdata/telegraf/plugins/inputs/iptables" + _ "github.com/influxdata/telegraf/plugins/inputs/ipvs" _ "github.com/influxdata/telegraf/plugins/inputs/jolokia" _ "github.com/influxdata/telegraf/plugins/inputs/jolokia2" _ "github.com/influxdata/telegraf/plugins/inputs/jti_openconfig_telemetry" diff --git a/plugins/inputs/ipvs/README.md b/plugins/inputs/ipvs/README.md new file mode 100644 index 000000000..78bb94574 --- /dev/null +++ b/plugins/inputs/ipvs/README.md @@ -0,0 +1,26 @@ +# IPVS Input Plugin (Linux) + +The IPVS input plugin uses the linux kernel netlink socket interface to gather +metrics about ipvs virtual and real servers. + +## Configuration + +[[inputs.ipvs]] + # no configuration + +## Permissions + +Assuming you installed the telegraf package via one of the published packages, +the process will be running as the `telegraf` user. However, in order for this +plugin to communicate over netlink sockets it needs the telegraf process to be +running as `root` (or some user with `CAP_NET_ADMIN` and `CAP_NET_RAW`). Be sure +to ensure these permissions before running telegraf with this plugin included. + +## Sample Output + +This is what you can expect the emitted metrics to look like + +``` +ipvs_virtual_server,address=172.18.64.234,address_family=inet,netmask=32,port=9000,protocol=tcp,sched=mh_418 bytes_out=0i,pps_in=0i,pps_out=0i,cps=0i,pkts_in=0i,pkts_out=0i,connections=0i,bytes_in=0i 1540407540000000000 +ipvs_virtual_server,address_family=inet,fwmark=47,netmask=32,sched=mh_418 connections=0i,pkts_in=0i,bytes_out=0i,pps_in=0i,pps_out=0i,pkts_out=0i,bytes_in=0i,cps=0i 1540407540000000000 +``` diff --git a/plugins/inputs/ipvs/ipvs.go b/plugins/inputs/ipvs/ipvs.go new file mode 100644 index 000000000..5a4e0dc66 --- /dev/null +++ b/plugins/inputs/ipvs/ipvs.go @@ -0,0 +1,112 @@ +// +build linux + +package ipvs + +import ( + "errors" + "fmt" + "math/bits" + "strconv" + "syscall" + + "github.com/docker/libnetwork/ipvs" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" +) + +// IPVS holds the state for this input plugin +type IPVS struct { + handle *ipvs.Handle +} + +// Description returns a description string +func (i *IPVS) Description() string { + return "Collect virtual and real server stats from Linux IPVS" +} + +// SampleConfig returns a sample configuration for this input plugin +func (i *IPVS) SampleConfig() string { + return `` +} + +// Gather gathers the stats +func (i *IPVS) Gather(acc telegraf.Accumulator) error { + if i.handle == nil { + h, err := ipvs.New("") // TODO: make the namespace configurable + if err != nil { + return errors.New("Unable to open IPVS handle") + } + i.handle = h + } + + services, err := i.handle.GetServices() + if err != nil { + i.handle.Close() + i.handle = nil // trigger a reopen on next call to gather + return errors.New("Failed to list IPVS services") + } + for _, s := range services { + fields := map[string]interface{}{ + "connections": s.Stats.Connections, + "pkts_in": s.Stats.PacketsIn, + "pkts_out": s.Stats.PacketsOut, + "bytes_in": s.Stats.BytesIn, + "bytes_out": s.Stats.BytesOut, + "pps_in": s.Stats.PPSIn, + "pps_out": s.Stats.PPSOut, + "cps": s.Stats.CPS, + } + acc.AddGauge("ipvs_virtual_server", fields, serviceTags(s)) + } + + return nil +} + +// helper: given a Service, return tags that identify it +func serviceTags(s *ipvs.Service) map[string]string { + ret := map[string]string{ + "sched": s.SchedName, + "netmask": fmt.Sprintf("%d", bits.OnesCount32(s.Netmask)), + "address_family": addressFamilyToString(s.AddressFamily), + } + // Per the ipvsadm man page, a virtual service is defined "based on + // protocol/addr/port or firewall mark" + if s.FWMark > 0 { + ret["fwmark"] = strconv.Itoa(int(s.FWMark)) + } else { + ret["protocol"] = protocolToString(s.Protocol) + ret["address"] = s.Address.String() + ret["port"] = strconv.Itoa(int(s.Port)) + } + return ret +} + +// helper: convert protocol uint16 to human readable string (if possible) +func protocolToString(p uint16) string { + switch p { + case syscall.IPPROTO_TCP: + return "tcp" + case syscall.IPPROTO_UDP: + return "udp" + case syscall.IPPROTO_SCTP: + return "sctp" + default: + return fmt.Sprintf("%d", p) + } +} + +// helper: convert addressFamily to a human readable string +func addressFamilyToString(af uint16) string { + switch af { + case syscall.AF_INET: + return "inet" + case syscall.AF_INET6: + return "inet6" + default: + return fmt.Sprintf("%d", af) + } +} + +func init() { + inputs.Add("ipvs", func() telegraf.Input { return &IPVS{} }) +} diff --git a/plugins/inputs/ipvs/ipvs_notlinux.go b/plugins/inputs/ipvs/ipvs_notlinux.go new file mode 100644 index 000000000..bbbb1240b --- /dev/null +++ b/plugins/inputs/ipvs/ipvs_notlinux.go @@ -0,0 +1,3 @@ +// +build !linux + +package ipvs From ed417882f3ed9408dec72247307601a03407496b Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 25 Oct 2018 12:26:35 -0700 Subject: [PATCH 0317/1815] Update Gopkg.lock by running `dep ensure` --- Gopkg.lock | 44 ++++++++++++++++++-------------------------- 1 file changed, 18 insertions(+), 26 deletions(-) diff --git a/Gopkg.lock b/Gopkg.lock index 8c500356c..a93816c1e 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -4,7 +4,12 @@ [[projects]] digest = "1:972f38a9c879a4920d1e3a3d3438104b6c06163bfa3e6f4064adb00468d40587" name = "cloud.google.com/go" - packages = ["civil"] + packages = [ + "civil", + "compute/metadata", + "internal/version", + "monitoring/apiv3", + ] pruneopts = "" revision = "c728a003b238b26cef9ab6753a5dc424b331c3ad" version = "v0.27.0" @@ -487,14 +492,6 @@ revision = "3af367b6b30c263d47e8895973edcca9a49cf029" version = "v0.2.0" -[[projects]] - digest = "1:e097a364f4e8d8d91b9b9eeafb992d3796a41fde3eb548c1a87eb9d9f60725cf" - name = "github.com/googleapis/gax-go" - packages = ["."] - pruneopts = "" - revision = "317e0006254c44a0ac427cc52a0e083ff0b9622f" - version = "v2.0.0" - [[projects]] digest = "1:c1d7e883c50a26ea34019320d8ae40fad86c9e5d56e63a1ba2cb618cef43e986" name = "github.com/google/uuid" @@ -503,6 +500,14 @@ revision = "064e2069ce9c359c118179501254f67d7d37ba24" version = "0.2" +[[projects]] + digest = "1:e097a364f4e8d8d91b9b9eeafb992d3796a41fde3eb548c1a87eb9d9f60725cf" + name = "github.com/googleapis/gax-go" + packages = ["."] + pruneopts = "" + revision = "317e0006254c44a0ac427cc52a0e083ff0b9622f" + version = "v2.0.0" + [[projects]] digest = "1:dbbeb8ddb0be949954c8157ee8439c2adfd8dc1c9510eb44a6e58cb68c3dce28" name = "github.com/gorilla/context" @@ -1078,14 +1083,6 @@ pruneopts = "" revision = "46796da1b0b4794e1e341883a399f12cc7574b55" -[[projects]] - branch = "master" - digest = "1:2fcfc6c3fb8dfe0d80d7789272230d3ac7db15022b66817113f98d9fff880225" - name = "github.com/zensqlmonitor/go-mssqldb" - packages = ["."] - pruneopts = "" - revision = "e8fbf836e44e86764eba398361d1825651709547" - [[projects]] digest = "1:8c8ec859c77fccd10a347b7219b597c4c21c448949e8bdf3fc3e6f4c78f952b4" name = "go.opencensus.io" @@ -1161,12 +1158,11 @@ name = "golang.org/x/oauth2" packages = [ ".", + "clientcredentials", "google", "internal", "jws", "jwt", - "clientcredentials", - "internal", ] pruneopts = "" revision = "d2e6202438beef2727060aa7cabdd924d92ebfd9" @@ -1254,13 +1250,6 @@ "internal/socket", "internal/urlfetch", "socket", - "cloudsql", - "internal", - "internal/base", - "internal/datastore", - "internal/log", - "internal/remote_api", - "internal/urlfetch", "urlfetch", ] pruneopts = "" @@ -1428,6 +1417,7 @@ "github.com/Shopify/sarama", "github.com/StackExchange/wmi", "github.com/aerospike/aerospike-client-go", + "github.com/alecthomas/units", "github.com/amir/raidman", "github.com/apache/thrift/lib/go/thrift", "github.com/aws/aws-sdk-go/aws", @@ -1454,6 +1444,7 @@ "github.com/go-sql-driver/mysql", "github.com/gobwas/glob", "github.com/golang/protobuf/proto", + "github.com/golang/protobuf/ptypes/empty", "github.com/golang/protobuf/ptypes/timestamp", "github.com/google/go-cmp/cmp", "github.com/gorilla/mux", @@ -1525,6 +1516,7 @@ "google.golang.org/grpc", "google.golang.org/grpc/codes", "google.golang.org/grpc/credentials", + "google.golang.org/grpc/metadata", "google.golang.org/grpc/status", "gopkg.in/gorethink/gorethink.v3", "gopkg.in/ldap.v2", From 3374c7583d56c882d39c834f336ca233f3d66f0d Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 25 Oct 2018 12:38:27 -0700 Subject: [PATCH 0318/1815] Update license of dependencies list --- docs/LICENSE_OF_DEPENDENCIES.md | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index 2333650fc..dace554a2 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -21,8 +21,9 @@ following works: - github.com/dancannon/gorethink [APACHE](https://github.com/dancannon/gorethink/blob/master/LICENSE) - github.com/davecgh/go-spew [ISC](https://github.com/davecgh/go-spew/blob/master/LICENSE) - github.com/dimchansky/utfbom [APACHE](https://github.com/dimchansky/utfbom/blob/master/LICENSE) -- github.com/docker/docker [APACHE](https://github.com/docker/docker/blob/master/LICENSE) - github.com/docker/cli [APACHE](https://github.com/docker/cli/blob/master/LICENSE) +- github.com/docker/docker [APACHE](https://github.com/docker/docker/blob/master/LICENSE) +- github.com/docker/libnetwork [APACHE](https://github.com/docker/libnetwork/blob/master/LICENSE) - github.com/eapache/go-resiliency [MIT](https://github.com/eapache/go-resiliency/blob/master/LICENSE) - github.com/eapache/go-xerial-snappy [MIT](https://github.com/eapache/go-xerial-snappy/blob/master/LICENSE) - github.com/eapache/queue [MIT](https://github.com/eapache/queue/blob/master/LICENSE) @@ -93,6 +94,8 @@ following works: - github.com/tidwall/match [MIT](https://github.com/tidwall/match/blob/master/LICENSE) - github.com/mitchellh/mapstructure [MIT](https://github.com/mitchellh/mapstructure/blob/master/LICENSE) - github.com/multiplay/go-ts3 [BSD](https://github.com/multiplay/go-ts3/blob/master/LICENSE) +- github.com/vishvananda/netlink [APACHE](https://github.com/vishvananda/netlink/blob/master/LICENSE) +- github.com/vishvananda/netns [APACHE](https://github.com/vishvananda/netns/blob/master/LICENSE) - github.com/vjeantet/grok [APACHE](https://github.com/vjeantet/grok/blob/master/LICENSE) - github.com/wvanbergen/kafka [MIT](https://github.com/wvanbergen/kafka/blob/master/LICENSE) - github.com/wvanbergen/kazoo-go [MIT](https://github.com/wvanbergen/kazoo-go/blob/master/MIT-LICENSE) From 3821d670832958243af3f60e21864e42c154427c Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 25 Oct 2018 12:42:19 -0700 Subject: [PATCH 0319/1815] Update changelog and readme --- CHANGELOG.md | 1 + README.md | 1 + 2 files changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6af10f7ea..8d122a39e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,7 @@ #### New Inputs - [http_listener_v2](/plugins/inputs/http_listener_v2/README.md) - Contributed by @jul1u5 +- [ipvs](/plugins/inputs/ipvs/README.md) - Contributed by @amoghe - [nginx_plus_api](/plugins/inputs/nginx_plus_api/README.md) - Contributed by Bugagazavr #### New Outputs diff --git a/README.md b/README.md index 8e2ea94de..5d013863a 100644 --- a/README.md +++ b/README.md @@ -178,6 +178,7 @@ For documentation on the latest development code see the [documentation index][d * [ipmi_sensor](./plugins/inputs/ipmi_sensor) * [ipset](./plugins/inputs/ipset) * [iptables](./plugins/inputs/iptables) +* [ipvs](./plugins/inputs/ipvs) * [jolokia2](./plugins/inputs/jolokia2) (java, cassandra, kafka) * [jolokia](./plugins/inputs/jolokia) (deprecated, use [jolokia2](./plugins/inputs/jolokia2)) * [jti_openconfig_telemetry](./plugins/inputs/jti_openconfig_telemetry) From 133fabc672d5592523cb9b9fd5f2e965098aac10 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 25 Oct 2018 12:44:43 -0700 Subject: [PATCH 0320/1815] Update ipvs readme for style --- plugins/inputs/ipvs/README.md | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/plugins/inputs/ipvs/README.md b/plugins/inputs/ipvs/README.md index 78bb94574..6a4eaf6fe 100644 --- a/plugins/inputs/ipvs/README.md +++ b/plugins/inputs/ipvs/README.md @@ -1,14 +1,18 @@ -# IPVS Input Plugin (Linux) +# IPVS Input Plugin The IPVS input plugin uses the linux kernel netlink socket interface to gather metrics about ipvs virtual and real servers. -## Configuration +**Supported Platforms:** Linux +### Configuration: + +```toml [[inputs.ipvs]] # no configuration +``` -## Permissions +### Permissions: Assuming you installed the telegraf package via one of the published packages, the process will be running as the `telegraf` user. However, in order for this @@ -16,9 +20,7 @@ plugin to communicate over netlink sockets it needs the telegraf process to be running as `root` (or some user with `CAP_NET_ADMIN` and `CAP_NET_RAW`). Be sure to ensure these permissions before running telegraf with this plugin included. -## Sample Output - -This is what you can expect the emitted metrics to look like +### Example Output: ``` ipvs_virtual_server,address=172.18.64.234,address_family=inet,netmask=32,port=9000,protocol=tcp,sched=mh_418 bytes_out=0i,pps_in=0i,pps_out=0i,cps=0i,pkts_in=0i,pkts_out=0i,connections=0i,bytes_in=0i 1540407540000000000 From 21208d2686d132a7f2188e2c7416d9709deaba33 Mon Sep 17 00:00:00 2001 From: Greg <2653109+glinton@users.noreply.github.com> Date: Fri, 26 Oct 2018 00:37:18 -0600 Subject: [PATCH 0321/1815] Prevent connection leak by closing unused connections in amqp output (#4924) --- plugins/outputs/amqp/amqp.go | 1 + plugins/outputs/amqp/client.go | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/plugins/outputs/amqp/amqp.go b/plugins/outputs/amqp/amqp.go index a41f0a1fe..dd45f72dc 100644 --- a/plugins/outputs/amqp/amqp.go +++ b/plugins/outputs/amqp/amqp.go @@ -249,6 +249,7 @@ func (q *AMQP) Write(metrics []telegraf.Metric) error { if q.sentMessages >= q.MaxMessages && q.MaxMessages > 0 { log.Printf("D! Output [amqp] sent MaxMessages; closing connection") + q.client.Close() q.client = nil } diff --git a/plugins/outputs/amqp/client.go b/plugins/outputs/amqp/client.go index ba4e45162..0ee45d950 100644 --- a/plugins/outputs/amqp/client.go +++ b/plugins/outputs/amqp/client.go @@ -55,7 +55,7 @@ func Connect(config *ClientConfig) (*client, error) { log.Printf("D! Output [amqp] connected to %q", broker) break } - log.Printf("D! Output [amqp] error connecting to %q", broker) + log.Printf("D! Output [amqp] error connecting to %q - %s", broker, err.Error()) } if client.conn == nil { From 170cddc9560cf068295ff7631dc416d42fda46c6 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 25 Oct 2018 23:39:13 -0700 Subject: [PATCH 0322/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8d122a39e..4d723832d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -39,6 +39,7 @@ ## v1.8.3 [unreleased] - [#4873](https://github.com/influxdata/telegraf/pull/4873): Add DN attributes as tags in x509_cert input to avoid series overwrite. +- [#4921](https://github.com/influxdata/telegraf/issues/4921): Prevent connection leak by closing unused connections in amqp output. ## v1.8.2 [2018-10-17] From 742a74dcf0d9cdc8f8d589df7d34c6c6ef5353ae Mon Sep 17 00:00:00 2001 From: Marcos Nils Date: Fri, 26 Oct 2018 03:51:14 -0300 Subject: [PATCH 0323/1815] Use default partition key when tag does not exist in kinesis output (#4904) --- plugins/outputs/kinesis/README.md | 2 +- plugins/outputs/kinesis/kinesis.go | 17 +++++++++++------ plugins/outputs/kinesis/kinesis_test.go | 11 ++++++++++- 3 files changed, 22 insertions(+), 8 deletions(-) diff --git a/plugins/outputs/kinesis/README.md b/plugins/outputs/kinesis/README.md index 809bb7790..12b6178fd 100644 --- a/plugins/outputs/kinesis/README.md +++ b/plugins/outputs/kinesis/README.md @@ -71,7 +71,7 @@ All metrics will be mapped to the same shard which may limit throughput. #### tag This will take the value of the specified tag from each metric as the paritionKey. -If the tag is not found an empty string will be used. +If the tag is not found the `default` value will be used or `telegraf` if unspecified #### measurement diff --git a/plugins/outputs/kinesis/kinesis.go b/plugins/outputs/kinesis/kinesis.go index 402f95156..93fc87a66 100644 --- a/plugins/outputs/kinesis/kinesis.go +++ b/plugins/outputs/kinesis/kinesis.go @@ -36,8 +36,9 @@ type ( } Partition struct { - Method string `toml:"method"` - Key string `toml:"key"` + Method string `toml:"method"` + Key string `toml:"key"` + Default string `toml:"default"` } ) @@ -90,10 +91,11 @@ var sampleConfig = ` # method = "measurement" # ## Use the value of a tag for all writes, if the tag is not set the empty - ## string will be used: + ## default option will be used. When no default, defaults to "telegraf" # [outputs.kinesis.partition] # method = "tag" # key = "host" + # default = "mykey" ## Data format to output. @@ -187,10 +189,13 @@ func (k *KinesisOutput) getPartitionKey(metric telegraf.Metric) string { case "measurement": return metric.Name() case "tag": - if metric.HasTag(k.Partition.Key) { - return metric.Tags()[k.Partition.Key] + if t, ok := metric.GetTag(k.Partition.Key); ok { + return t + } else if len(k.Partition.Default) > 0 { + return k.Partition.Default } - log.Printf("E! kinesis : You have configured a Partition using tag %+v which does not exist.", k.Partition.Key) + // Default partition name if default is not set + return "telegraf" default: log.Printf("E! kinesis : You have configured a Partition method of %+v which is not supported", k.Partition.Method) } diff --git a/plugins/outputs/kinesis/kinesis_test.go b/plugins/outputs/kinesis/kinesis_test.go index 3c6321abd..627a459db 100644 --- a/plugins/outputs/kinesis/kinesis_test.go +++ b/plugins/outputs/kinesis/kinesis_test.go @@ -29,13 +29,22 @@ func TestPartitionKey(t *testing.T) { } assert.Equal(testPoint.Tags()["tag1"], k.getPartitionKey(testPoint), "PartitionKey should be value of 'tag1'") + k = KinesisOutput{ + Partition: &Partition{ + Method: "tag", + Key: "doesnotexist", + Default: "somedefault", + }, + } + assert.Equal("somedefault", k.getPartitionKey(testPoint), "PartitionKey should use default") + k = KinesisOutput{ Partition: &Partition{ Method: "tag", Key: "doesnotexist", }, } - assert.Equal("", k.getPartitionKey(testPoint), "PartitionKey should be value of ''") + assert.Equal("telegraf", k.getPartitionKey(testPoint), "PartitionKey should be telegraf") k = KinesisOutput{ Partition: &Partition{ From c304dd98bf478714d5e5f1c3ded7e5141659597a Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 25 Oct 2018 23:52:15 -0700 Subject: [PATCH 0324/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4d723832d..268f7d8b2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -40,6 +40,7 @@ - [#4873](https://github.com/influxdata/telegraf/pull/4873): Add DN attributes as tags in x509_cert input to avoid series overwrite. - [#4921](https://github.com/influxdata/telegraf/issues/4921): Prevent connection leak by closing unused connections in amqp output. +- [#4904](https://github.com/influxdata/telegraf/issues/4904): Use default partition key when tag does not exist in kinesis output. ## v1.8.2 [2018-10-17] From b24e03b5974cb6741ca90df9629e837f751f8413 Mon Sep 17 00:00:00 2001 From: James Maidment Date: Mon, 29 Oct 2018 18:57:39 -0400 Subject: [PATCH 0325/1815] Add wireless input plugin (#3847) --- plugins/inputs/all/all.go | 1 + plugins/inputs/wireless/README.md | 38 +++++ plugins/inputs/wireless/wireless.go | 3 + plugins/inputs/wireless/wireless_linux.go | 165 ++++++++++++++++++++++ plugins/inputs/wireless/wireless_test.go | 52 +++++++ 5 files changed, 259 insertions(+) create mode 100644 plugins/inputs/wireless/README.md create mode 100644 plugins/inputs/wireless/wireless.go create mode 100644 plugins/inputs/wireless/wireless_linux.go create mode 100644 plugins/inputs/wireless/wireless_test.go diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index ad193224a..d45013bf3 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -135,6 +135,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/webhooks" _ "github.com/influxdata/telegraf/plugins/inputs/win_perf_counters" _ "github.com/influxdata/telegraf/plugins/inputs/win_services" + _ "github.com/influxdata/telegraf/plugins/inputs/wireless" _ "github.com/influxdata/telegraf/plugins/inputs/x509_cert" _ "github.com/influxdata/telegraf/plugins/inputs/zfs" _ "github.com/influxdata/telegraf/plugins/inputs/zipkin" diff --git a/plugins/inputs/wireless/README.md b/plugins/inputs/wireless/README.md new file mode 100644 index 000000000..6be7bd383 --- /dev/null +++ b/plugins/inputs/wireless/README.md @@ -0,0 +1,38 @@ +# Wireless Input Plugin + +The wireless plugin gathers metrics about wireless link quality by reading the `/proc/net/wireless` file. This plugin currently supports linux only. + +### Configuration: + +```toml +# Monitor wifi signal strength and quality +[[inputs.wireless]] + ## Sets 'proc' directory path + ## If not specified, then default is /proc + # host_proc = "/proc" +``` + +### Metrics: + +- metric + - tags: + - interface (wireless interface) + - fields: + - status (int64, gauge) - Its current state. This is a device dependent information + - link (int64, percentage, gauge) - general quality of the reception + - level (int64, dBm, gauge) - signal strength at the receiver + - noise (int64, dBm, gauge) - silence level (no packet) at the receiver + - nwid (int64, packets, counter) - number of discarded packets due to invalid network id + - crypt (int64, packets, counter) - number of packet unable to decrypt + - frag (int64, packets, counter) - fragmented packets + - retry (int64, packets, counter) - cumulative retry counts + - misc (int64, packets, counter) - dropped for un-specified reason + - missed_beacon (int64, packets, counter) - missed beacon packets + +### Example Output: + +This section shows example output in Line Protocol format. + +``` +wireless,host=example.localdomain,interface=wlan0 misc=0i,frag=0i,link=60i,level=-50i,noise=-256i,nwid=0i,crypt=0i,retry=1525i,missed_beacon=0i,status=0i 1519843022000000000 +``` diff --git a/plugins/inputs/wireless/wireless.go b/plugins/inputs/wireless/wireless.go new file mode 100644 index 000000000..a992e2efe --- /dev/null +++ b/plugins/inputs/wireless/wireless.go @@ -0,0 +1,3 @@ +// +build !linux + +package wireless diff --git a/plugins/inputs/wireless/wireless_linux.go b/plugins/inputs/wireless/wireless_linux.go new file mode 100644 index 000000000..ed5dff27f --- /dev/null +++ b/plugins/inputs/wireless/wireless_linux.go @@ -0,0 +1,165 @@ +// +build linux + +package wireless + +import ( + "bytes" + "io/ioutil" + "log" + "os" + "path" + "strconv" + "strings" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" +) + +// default host proc path +const defaultHostProc = "/proc" + +// env host proc variable name +const envProc = "HOST_PROC" + +// length of wireless interface fields +const interfaceFieldLength = 10 + +var newLineByte = []byte("\n") + +type wirelessInterface struct { + Interface string + Status int64 + Link int64 + Level int64 + Noise int64 + Nwid int64 + Crypt int64 + Frag int64 + Retry int64 + Misc int64 + Beacon int64 +} + +// Wireless is used to store configuration values. +type Wireless struct { + HostProc string `toml:"host_proc"` +} + +var sampleConfig = ` + ## Sets 'proc' directory path + ## If not specified, then default is /proc + # host_proc = "/proc" +` + +// Description returns information about the plugin. +func (w *Wireless) Description() string { + return "Monitor wifi signal strength and quality" +} + +// SampleConfig displays configuration instructions. +func (w *Wireless) SampleConfig() string { + return sampleConfig +} + +// Gather collects the wireless information. +func (w *Wireless) Gather(acc telegraf.Accumulator) error { + // load proc path, get default value if config value and env variable are empty + w.loadPath() + + wirelessPath := path.Join(w.HostProc, "net", "wireless") + table, err := ioutil.ReadFile(wirelessPath) + if err != nil { + return err + } + + interfaces, err := loadWirelessTable(table) + if err != nil { + return err + } + for _, w := range interfaces { + tags := map[string]string{ + "interface": w.Interface, + } + fieldsG := map[string]interface{}{ + "status": w.Status, + "link": w.Link, + "level": w.Level, + "noise": w.Noise, + } + fieldsC := map[string]interface{}{ + "nwid": w.Nwid, + "crypt": w.Crypt, + "frag": w.Frag, + "retry": w.Retry, + "misc": w.Misc, + "beacon": w.Beacon, + } + acc.AddGauge("wireless", fieldsG, tags) + acc.AddCounter("wireless", fieldsC, tags) + } + + return nil +} + +func loadWirelessTable(table []byte) ([]*wirelessInterface, error) { + var w []*wirelessInterface + lines := bytes.Split(table, newLineByte) + + // iterate over interfaces + for i := 2; i < len(lines); i = i + 1 { + if len(lines[i]) == 0 { + continue + } + values := make([]int64, 0, interfaceFieldLength) + fields := strings.Fields(string(lines[i])) + for j := 1; j < len(fields); j = j + 1 { + v, err := strconv.ParseInt(strings.Trim(fields[j], "."), 10, 64) + if err != nil { + return nil, err + } + values = append(values, v) + } + if len(values) != interfaceFieldLength { + log.Printf("E! [input.wireless] invalid length of interface values") + continue + } + w = append(w, &wirelessInterface{ + Interface: strings.Trim(fields[0], ":"), + Status: values[0], + Link: values[1], + Level: values[2], + Noise: values[3], + Nwid: values[4], + Crypt: values[5], + Frag: values[6], + Retry: values[7], + Misc: values[8], + Beacon: values[9], + }) + } + return w, nil +} + +// loadPath can be used to read path firstly from config +// if it is empty then try read from env variable +func (w *Wireless) loadPath() { + if w.HostProc == "" { + w.HostProc = proc(envProc, defaultHostProc) + } +} + +// proc can be used to read file paths from env +func proc(env, path string) string { + // try to read full file path + if p := os.Getenv(env); p != "" { + return p + } + // return default path + return path +} + +func init() { + inputs.Add("wireless", func() telegraf.Input { + return &Wireless{} + }) +} diff --git a/plugins/inputs/wireless/wireless_test.go b/plugins/inputs/wireless/wireless_test.go new file mode 100644 index 000000000..f2ca1fc21 --- /dev/null +++ b/plugins/inputs/wireless/wireless_test.go @@ -0,0 +1,52 @@ +// +build linux + +package wireless + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +var testInput = []byte(`Inter-| sta-| Quality | Discarded packets | Missed | WE + face | tus | link level noise | nwid crypt frag retry misc | beacon | 22 + wlan0: 0000 60. -50. -256 0 0 0 1525 0 0 + wlan1: 0000 70. -39. -256 0 0 0 12096 191188 0`) + +func TestLoadWirelessTable(t *testing.T) { + expectedMetrics := []*wirelessInterface{ + &wirelessInterface{ + Interface: "wlan0", + Status: int64(0000), + Link: int64(60), + Level: int64(-50), + Noise: int64(-256), + Nwid: int64(0), + Crypt: int64(0), + Frag: int64(0), + Retry: int64(1525), + Misc: int64(0), + Beacon: int64(0), + }, + &wirelessInterface{ + Interface: "wlan1", + Status: int64(0000), + Link: int64(70), + Level: int64(-39), + Noise: int64(-256), + Nwid: int64(0), + Crypt: int64(0), + Frag: int64(0), + Retry: int64(12096), + Misc: int64(191188), + Beacon: int64(0), + }, + } + metrics, err := loadWirelessTable(testInput) + if err != nil { + t.Fatal(err) + } + + as := assert.New(t) + as.Equal(metrics, expectedMetrics) +} From 454c3be2d298214a115f2d6b90b856fd5dfa83da Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 29 Oct 2018 15:59:37 -0700 Subject: [PATCH 0326/1815] Update changelog and readme --- CHANGELOG.md | 4 +++- README.md | 1 + 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 268f7d8b2..2627d72ea 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,7 +12,8 @@ - [http_listener_v2](/plugins/inputs/http_listener_v2/README.md) - Contributed by @jul1u5 - [ipvs](/plugins/inputs/ipvs/README.md) - Contributed by @amoghe -- [nginx_plus_api](/plugins/inputs/nginx_plus_api/README.md) - Contributed by Bugagazavr +- [nginx_plus_api](/plugins/inputs/nginx_plus_api/README.md) - Contributed by @Bugagazavr +- [wireless](/plugins/inputs/wireless/README.md) - Contributed by @jamesmaidment #### New Outputs @@ -35,6 +36,7 @@ - [#4852](https://github.com/influxdata/telegraf/pull/4852): Add ability to specify bytes options as strings with units. - [#3903](https://github.com/influxdata/telegraf/pull/3903): Add support for TLS configuration in NSQ input. - [#4914](https://github.com/influxdata/telegraf/pull/4914): Collect additional stats in memcached input. +- [#3847](https://github.com/influxdata/telegraf/pull/3847): Add wireless input plugin. ## v1.8.3 [unreleased] diff --git a/README.md b/README.md index 5d013863a..6381efb92 100644 --- a/README.md +++ b/README.md @@ -267,6 +267,7 @@ For documentation on the latest development code see the [documentation index][d * [rollbar](./plugins/inputs/webhooks/rollbar) * [win_perf_counters](./plugins/inputs/win_perf_counters) (windows performance counters) * [win_services](./plugins/inputs/win_services) +* [wireless](./plugins/inputs/wireless) * [zfs](./plugins/inputs/zfs) * [zipkin](./plugins/inputs/zipkin) * [zookeeper](./plugins/inputs/zookeeper) From f38da903298a7b9ab8073915465315c7d6c2e49b Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 29 Oct 2018 16:00:54 -0700 Subject: [PATCH 0327/1815] Run `make fmt` with Go 1.11 --- plugins/inputs/haproxy/haproxy_test.go | 48 +++++++++---------- plugins/inputs/mysql/mysql.go | 12 ++--- .../nginx_plus_api_metrics_test.go | 16 +++---- .../inputs/udp_listener/udp_listener_test.go | 4 +- plugins/inputs/wireless/wireless_test.go | 4 +- plugins/parsers/dropwizard/parser_test.go | 6 +-- 6 files changed, 45 insertions(+), 45 deletions(-) diff --git a/plugins/inputs/haproxy/haproxy_test.go b/plugins/inputs/haproxy/haproxy_test.go index 27a197304..e05031f19 100644 --- a/plugins/inputs/haproxy/haproxy_test.go +++ b/plugins/inputs/haproxy/haproxy_test.go @@ -248,30 +248,30 @@ func HaproxyGetFieldValues() map[string]interface{} { "http_response.4xx": uint64(140), "http_response.5xx": uint64(0), "http_response.other": uint64(0), - "iid": uint64(4), - "last_chk": "OK", - "lastchg": uint64(1036557), - "lastsess": int64(1342), - "lbtot": uint64(9481), - "mode": "http", - "pid": uint64(1), - "qcur": uint64(0), - "qmax": uint64(0), - "qtime": uint64(1268), - "rate": uint64(0), - "rate_max": uint64(2), - "rtime": uint64(2908), - "sid": uint64(1), - "scur": uint64(0), - "slim": uint64(2), - "smax": uint64(2), - "srv_abort": uint64(0), - "status": "UP", - "stot": uint64(14539), - "ttime": uint64(4500), - "weight": uint64(1), - "wredis": uint64(0), - "wretr": uint64(0), + "iid": uint64(4), + "last_chk": "OK", + "lastchg": uint64(1036557), + "lastsess": int64(1342), + "lbtot": uint64(9481), + "mode": "http", + "pid": uint64(1), + "qcur": uint64(0), + "qmax": uint64(0), + "qtime": uint64(1268), + "rate": uint64(0), + "rate_max": uint64(2), + "rtime": uint64(2908), + "sid": uint64(1), + "scur": uint64(0), + "slim": uint64(2), + "smax": uint64(2), + "srv_abort": uint64(0), + "status": "UP", + "stot": uint64(14539), + "ttime": uint64(4500), + "weight": uint64(1), + "wredis": uint64(0), + "wretr": uint64(0), } return fields } diff --git a/plugins/inputs/mysql/mysql.go b/plugins/inputs/mysql/mysql.go index c17de3dcd..87848cf13 100644 --- a/plugins/inputs/mysql/mysql.go +++ b/plugins/inputs/mysql/mysql.go @@ -202,10 +202,10 @@ var ( "deleting": uint32(0), "executing": uint32(0), "execution of init_command": uint32(0), - "end": uint32(0), - "freeing items": uint32(0), - "flushing tables": uint32(0), - "fulltext initialization": uint32(0), + "end": uint32(0), + "freeing items": uint32(0), + "flushing tables": uint32(0), + "fulltext initialization": uint32(0), "idle": uint32(0), "init": uint32(0), "killed": uint32(0), @@ -241,8 +241,8 @@ var ( } // plaintext statuses stateStatusMappings = map[string]string{ - "user sleep": "idle", - "creating index": "altering table", + "user sleep": "idle", + "creating index": "altering table", "committing alter table to storage engine": "altering table", "discard or import tablespace": "altering table", "rename": "altering table", diff --git a/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics_test.go b/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics_test.go index a7516dee5..8105f35fb 100644 --- a/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics_test.go +++ b/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics_test.go @@ -776,10 +776,10 @@ func TestGatherHttpCachesMetrics(t *testing.T) { t, "nginx_plus_api_http_caches", map[string]interface{}{ - "bypass_bytes": int64(5510647548), - "bypass_bytes_written": int64(44992), - "bypass_responses": int64(200187), - "bypass_responses_written": int64(200173), + "bypass_bytes": int64(5510647548), + "bypass_bytes_written": int64(44992), + "bypass_responses": int64(200187), + "bypass_responses_written": int64(200173), "cold": false, "expired_bytes": int64(1656847080), "expired_bytes_written": int64(1641825173), @@ -810,10 +810,10 @@ func TestGatherHttpCachesMetrics(t *testing.T) { t, "nginx_plus_api_http_caches", map[string]interface{}{ - "bypass_bytes": int64(5510647548), - "bypass_bytes_written": int64(44992), - "bypass_responses": int64(200187), - "bypass_responses_written": int64(200173), + "bypass_bytes": int64(5510647548), + "bypass_bytes_written": int64(44992), + "bypass_responses": int64(200187), + "bypass_responses_written": int64(200173), "cold": false, "expired_bytes": int64(1656847080), "expired_bytes_written": int64(1641825173), diff --git a/plugins/inputs/udp_listener/udp_listener_test.go b/plugins/inputs/udp_listener/udp_listener_test.go index 49115434a..ed206f173 100644 --- a/plugins/inputs/udp_listener/udp_listener_test.go +++ b/plugins/inputs/udp_listener/udp_listener_test.go @@ -36,8 +36,8 @@ func newTestUdpListener() (*UdpListener, chan []byte) { listener := &UdpListener{ ServiceAddress: ":8125", AllowedPendingMessages: 10000, - in: in, - done: make(chan struct{}), + in: in, + done: make(chan struct{}), } return listener, in } diff --git a/plugins/inputs/wireless/wireless_test.go b/plugins/inputs/wireless/wireless_test.go index f2ca1fc21..6c562887e 100644 --- a/plugins/inputs/wireless/wireless_test.go +++ b/plugins/inputs/wireless/wireless_test.go @@ -15,7 +15,7 @@ var testInput = []byte(`Inter-| sta-| Quality | Discarded packets func TestLoadWirelessTable(t *testing.T) { expectedMetrics := []*wirelessInterface{ - &wirelessInterface{ + { Interface: "wlan0", Status: int64(0000), Link: int64(60), @@ -28,7 +28,7 @@ func TestLoadWirelessTable(t *testing.T) { Misc: int64(0), Beacon: int64(0), }, - &wirelessInterface{ + { Interface: "wlan1", Status: int64(0000), Link: int64(70), diff --git a/plugins/parsers/dropwizard/parser_test.go b/plugins/parsers/dropwizard/parser_test.go index 8ddcf7714..df33562db 100644 --- a/plugins/parsers/dropwizard/parser_test.go +++ b/plugins/parsers/dropwizard/parser_test.go @@ -106,9 +106,9 @@ func TestParseValidEmbeddedCounterJSON(t *testing.T) { "count": float64(1), }, metrics[0].Fields()) assert.Equal(t, map[string]string{ - "metric_type": "counter", - "tag1": "green", - "tag2": "yellow", + "metric_type": "counter", + "tag1": "green", + "tag2": "yellow", "tag3 space,comma=equals": "red ,=", }, metrics[0].Tags()) assert.True(t, metricTime.Equal(metrics[0].Time()), fmt.Sprintf("%s should be equal to %s", metrics[0].Time(), metricTime)) From 9fcd279b7edc68c8c623e269b11c6a17c4e653f9 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 29 Oct 2018 16:12:37 -0700 Subject: [PATCH 0328/1815] Run gofmt with Go 1.10 --- plugins/inputs/haproxy/haproxy_test.go | 48 +++++++++---------- plugins/inputs/mysql/mysql.go | 12 ++--- .../nginx_plus_api_metrics_test.go | 16 +++---- .../inputs/udp_listener/udp_listener_test.go | 4 +- plugins/parsers/dropwizard/parser_test.go | 6 +-- 5 files changed, 43 insertions(+), 43 deletions(-) diff --git a/plugins/inputs/haproxy/haproxy_test.go b/plugins/inputs/haproxy/haproxy_test.go index e05031f19..27a197304 100644 --- a/plugins/inputs/haproxy/haproxy_test.go +++ b/plugins/inputs/haproxy/haproxy_test.go @@ -248,30 +248,30 @@ func HaproxyGetFieldValues() map[string]interface{} { "http_response.4xx": uint64(140), "http_response.5xx": uint64(0), "http_response.other": uint64(0), - "iid": uint64(4), - "last_chk": "OK", - "lastchg": uint64(1036557), - "lastsess": int64(1342), - "lbtot": uint64(9481), - "mode": "http", - "pid": uint64(1), - "qcur": uint64(0), - "qmax": uint64(0), - "qtime": uint64(1268), - "rate": uint64(0), - "rate_max": uint64(2), - "rtime": uint64(2908), - "sid": uint64(1), - "scur": uint64(0), - "slim": uint64(2), - "smax": uint64(2), - "srv_abort": uint64(0), - "status": "UP", - "stot": uint64(14539), - "ttime": uint64(4500), - "weight": uint64(1), - "wredis": uint64(0), - "wretr": uint64(0), + "iid": uint64(4), + "last_chk": "OK", + "lastchg": uint64(1036557), + "lastsess": int64(1342), + "lbtot": uint64(9481), + "mode": "http", + "pid": uint64(1), + "qcur": uint64(0), + "qmax": uint64(0), + "qtime": uint64(1268), + "rate": uint64(0), + "rate_max": uint64(2), + "rtime": uint64(2908), + "sid": uint64(1), + "scur": uint64(0), + "slim": uint64(2), + "smax": uint64(2), + "srv_abort": uint64(0), + "status": "UP", + "stot": uint64(14539), + "ttime": uint64(4500), + "weight": uint64(1), + "wredis": uint64(0), + "wretr": uint64(0), } return fields } diff --git a/plugins/inputs/mysql/mysql.go b/plugins/inputs/mysql/mysql.go index 87848cf13..c17de3dcd 100644 --- a/plugins/inputs/mysql/mysql.go +++ b/plugins/inputs/mysql/mysql.go @@ -202,10 +202,10 @@ var ( "deleting": uint32(0), "executing": uint32(0), "execution of init_command": uint32(0), - "end": uint32(0), - "freeing items": uint32(0), - "flushing tables": uint32(0), - "fulltext initialization": uint32(0), + "end": uint32(0), + "freeing items": uint32(0), + "flushing tables": uint32(0), + "fulltext initialization": uint32(0), "idle": uint32(0), "init": uint32(0), "killed": uint32(0), @@ -241,8 +241,8 @@ var ( } // plaintext statuses stateStatusMappings = map[string]string{ - "user sleep": "idle", - "creating index": "altering table", + "user sleep": "idle", + "creating index": "altering table", "committing alter table to storage engine": "altering table", "discard or import tablespace": "altering table", "rename": "altering table", diff --git a/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics_test.go b/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics_test.go index 8105f35fb..a7516dee5 100644 --- a/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics_test.go +++ b/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics_test.go @@ -776,10 +776,10 @@ func TestGatherHttpCachesMetrics(t *testing.T) { t, "nginx_plus_api_http_caches", map[string]interface{}{ - "bypass_bytes": int64(5510647548), - "bypass_bytes_written": int64(44992), - "bypass_responses": int64(200187), - "bypass_responses_written": int64(200173), + "bypass_bytes": int64(5510647548), + "bypass_bytes_written": int64(44992), + "bypass_responses": int64(200187), + "bypass_responses_written": int64(200173), "cold": false, "expired_bytes": int64(1656847080), "expired_bytes_written": int64(1641825173), @@ -810,10 +810,10 @@ func TestGatherHttpCachesMetrics(t *testing.T) { t, "nginx_plus_api_http_caches", map[string]interface{}{ - "bypass_bytes": int64(5510647548), - "bypass_bytes_written": int64(44992), - "bypass_responses": int64(200187), - "bypass_responses_written": int64(200173), + "bypass_bytes": int64(5510647548), + "bypass_bytes_written": int64(44992), + "bypass_responses": int64(200187), + "bypass_responses_written": int64(200173), "cold": false, "expired_bytes": int64(1656847080), "expired_bytes_written": int64(1641825173), diff --git a/plugins/inputs/udp_listener/udp_listener_test.go b/plugins/inputs/udp_listener/udp_listener_test.go index ed206f173..49115434a 100644 --- a/plugins/inputs/udp_listener/udp_listener_test.go +++ b/plugins/inputs/udp_listener/udp_listener_test.go @@ -36,8 +36,8 @@ func newTestUdpListener() (*UdpListener, chan []byte) { listener := &UdpListener{ ServiceAddress: ":8125", AllowedPendingMessages: 10000, - in: in, - done: make(chan struct{}), + in: in, + done: make(chan struct{}), } return listener, in } diff --git a/plugins/parsers/dropwizard/parser_test.go b/plugins/parsers/dropwizard/parser_test.go index df33562db..8ddcf7714 100644 --- a/plugins/parsers/dropwizard/parser_test.go +++ b/plugins/parsers/dropwizard/parser_test.go @@ -106,9 +106,9 @@ func TestParseValidEmbeddedCounterJSON(t *testing.T) { "count": float64(1), }, metrics[0].Fields()) assert.Equal(t, map[string]string{ - "metric_type": "counter", - "tag1": "green", - "tag2": "yellow", + "metric_type": "counter", + "tag1": "green", + "tag2": "yellow", "tag3 space,comma=equals": "red ,=", }, metrics[0].Tags()) assert.True(t, metricTime.Equal(metrics[0].Time()), fmt.Sprintf("%s should be equal to %s", metrics[0].Time(), metricTime)) From 563b6766ce7558834edf443ae9f55b9a611afb46 Mon Sep 17 00:00:00 2001 From: Greg <2653109+glinton@users.noreply.github.com> Date: Tue, 30 Oct 2018 15:05:18 -0600 Subject: [PATCH 0329/1815] Log the correct error in jti_openconfig (#4901) --- plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry.go b/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry.go index 49a593a08..b721c4943 100644 --- a/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry.go +++ b/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry.go @@ -394,7 +394,7 @@ func (m *OpenConfigTelemetry) Start(acc telegraf.Accumulator) error { &authentication.LoginRequest{UserName: m.Username, Password: m.Password, ClientId: m.ClientID}) if loginErr != nil { - log.Printf("E! Could not initiate login check for %s: %v", server, err) + log.Printf("E! Could not initiate login check for %s: %v", server, loginErr) continue } From d0e6da5eba5a66e014349fd5707a985f1134b29a Mon Sep 17 00:00:00 2001 From: Greg <2653109+glinton@users.noreply.github.com> Date: Tue, 30 Oct 2018 15:05:41 -0600 Subject: [PATCH 0330/1815] Handle panic when ipmi_sensor input gets bad input (#4937) --- plugins/inputs/ipmi_sensor/ipmi.go | 8 +++++- plugins/inputs/ipmi_sensor/ipmi_test.go | 38 +++++++++++++++++++++++++ 2 files changed, 45 insertions(+), 1 deletion(-) diff --git a/plugins/inputs/ipmi_sensor/ipmi.go b/plugins/inputs/ipmi_sensor/ipmi.go index 65506e118..e4832cc65 100644 --- a/plugins/inputs/ipmi_sensor/ipmi.go +++ b/plugins/inputs/ipmi_sensor/ipmi.go @@ -4,6 +4,7 @@ import ( "bufio" "bytes" "fmt" + "log" "os/exec" "regexp" "strconv" @@ -228,7 +229,12 @@ func parseV2(acc telegraf.Accumulator, hostname string, cmdOut []byte, measured_ func extractFieldsFromRegex(re *regexp.Regexp, input string) map[string]string { submatches := re.FindStringSubmatch(input) results := make(map[string]string) - for i, name := range re.SubexpNames() { + subexpNames := re.SubexpNames() + if len(subexpNames) > len(submatches) { + log.Printf("D! No matches found in '%s'", input) + return results + } + for i, name := range subexpNames { if name != input && name != "" && input != "" { results[name] = trim(submatches[i]) } diff --git a/plugins/inputs/ipmi_sensor/ipmi_test.go b/plugins/inputs/ipmi_sensor/ipmi_test.go index d781ce7b5..a66cabfeb 100644 --- a/plugins/inputs/ipmi_sensor/ipmi_test.go +++ b/plugins/inputs/ipmi_sensor/ipmi_test.go @@ -572,3 +572,41 @@ Power Supply 1 | 03h | ok | 10.1 | 110 Watts, Presence detected } os.Exit(0) } + +func TestExtractFields(t *testing.T) { + v1Data := `Ambient Temp | 20 degrees C | ok +Altitude | 80 feet | ok +Avg Power | 210 Watts | ok +Planar 3.3V | 3.29 Volts | ok +Planar 5V | 4.90 Volts | ok +Planar 12V | 12.04 Volts | ok +B | 0x00 | ok +Unable to send command: Invalid argument +ECC Corr Err | Not Readable | ns +Unable to send command: Invalid argument +ECC Uncorr Err | Not Readable | ns +Unable to send command: Invalid argument +` + + v2Data := `SEL | 72h | ns | 7.1 | No Reading +Intrusion | 73h | ok | 7.1 | +Fan1 | 30h | ok | 7.1 | 5040 RPM +Inlet Temp | 04h | ok | 7.1 | 25 degrees C +USB Cable Pres | 50h | ok | 7.1 | Connected +Unable to send command: Invalid argument +Current 1 | 6Ah | ok | 10.1 | 7.20 Amps +Unable to send command: Invalid argument +Power Supply 1 | 03h | ok | 10.1 | 110 Watts, Presence detected +` + + tests := []string{ + v1Data, + v2Data, + } + + for i := range tests { + t.Logf("Checking v%d data...", i+1) + extractFieldsFromRegex(re_v1_parse_line, tests[i]) + extractFieldsFromRegex(re_v2_parse_line, tests[i]) + } +} From 0003c8fba7d480cc48390f4c1f2b5e993c8aeede Mon Sep 17 00:00:00 2001 From: Greg <2653109+glinton@users.noreply.github.com> Date: Tue, 30 Oct 2018 15:06:05 -0600 Subject: [PATCH 0331/1815] Don't add unserializable fields to jolokia2 input (#4930) --- .../inputs/jolokia2/examples/zookeeper.conf | 6 ++-- plugins/inputs/jolokia2/jolokia_test.go | 36 +++++++++++++++++-- plugins/inputs/jolokia2/point_builder.go | 9 ++++- 3 files changed, 45 insertions(+), 6 deletions(-) diff --git a/plugins/inputs/jolokia2/examples/zookeeper.conf b/plugins/inputs/jolokia2/examples/zookeeper.conf index eac29c284..514e43ea8 100644 --- a/plugins/inputs/jolokia2/examples/zookeeper.conf +++ b/plugins/inputs/jolokia2/examples/zookeeper.conf @@ -2,17 +2,17 @@ urls = ["http://localhost:8080/jolokia"] name_prefix = "zk_" - [[inputs.jolokia2_agent.metrics]] + [[inputs.jolokia2_agent.metric]] name = "quorum" mbean = "org.apache.ZooKeeperService:name0=*" tag_keys = ["name0"] - [[inputs.jolokia2_agent.metrics]] + [[inputs.jolokia2_agent.metric]] name = "leader" mbean = "org.apache.ZooKeeperService:name0=*,name1=*,name2=Leader" tag_keys = ["name1"] - [[inputs.jolokia2_agent.metrics]] + [[inputs.jolokia2_agent.metric]] name = "follower" mbean = "org.apache.ZooKeeperService:name0=*,name1=*,name2=Follower" tag_keys = ["name1"] diff --git a/plugins/inputs/jolokia2/jolokia_test.go b/plugins/inputs/jolokia2/jolokia_test.go index 62dd1d3a0..61c410c0b 100644 --- a/plugins/inputs/jolokia2/jolokia_test.go +++ b/plugins/inputs/jolokia2/jolokia_test.go @@ -143,7 +143,12 @@ func TestJolokia2_ObjectValues(t *testing.T) { [[jolokia2_agent.metric]] name = "object_with_key_pattern" mbean = "object_with_key_pattern:test=*" - tag_keys = ["test"]` + tag_keys = ["test"] + + [[jolokia2_agent.metric]] + name = "ColumnFamily" + mbean = "org.apache.cassandra.metrics:keyspace=*,name=EstimatedRowSizeHistogram,scope=schema_columns,type=ColumnFamily" + tag_keys = ["keyspace", "name", "scope"]` response := `[{ "request": { @@ -214,7 +219,20 @@ func TestJolokia2_ObjectValues(t *testing.T) { } }, "status": 200 - }]` + }, { + "request": { + "mbean": "org.apache.cassandra.metrics:keyspace=*,name=EstimatedRowSizeHistogram,scope=schema_columns,type=ColumnFamily", + "type": "read" + }, + "value": { + "org.apache.cassandra.metrics:keyspace=system,name=EstimatedRowSizeHistogram,scope=schema_columns,type=ColumnFamily": { + "Value": [ + 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 + ] + } + }, + "status": 200 + }]` server := setupServer(http.StatusOK, response) defer server.Close() @@ -730,6 +748,20 @@ func TestJolokia2_ProxyTargets(t *testing.T) { }) } +func TestFillFields(t *testing.T) { + complex := map[string]interface{}{"Value": []interface{}{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}} + var scalar interface{} + scalar = []interface{}{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + + results := map[string]interface{}{} + newPointBuilder(Metric{Name: "test", Mbean: "complex"}, []string{"this", "that"}, "/").fillFields("", complex, results) + assert.Equal(t, map[string]interface{}{}, results) + + results = map[string]interface{}{} + newPointBuilder(Metric{Name: "test", Mbean: "scalar"}, []string{"this", "that"}, "/").fillFields("", scalar, results) + assert.Equal(t, map[string]interface{}{}, results) +} + func setupServer(status int, resp string) *httptest.Server { return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) diff --git a/plugins/inputs/jolokia2/point_builder.go b/plugins/inputs/jolokia2/point_builder.go index 02877ea70..f5ae1d314 100644 --- a/plugins/inputs/jolokia2/point_builder.go +++ b/plugins/inputs/jolokia2/point_builder.go @@ -158,8 +158,11 @@ func (pb *pointBuilder) fillFields(name string, value interface{}, fieldMap map[ if valueMap, ok := value.(map[string]interface{}); ok { // keep going until we get to something that is not a map for key, innerValue := range valueMap { - var innerName string + if _, ok := innerValue.([]interface{}); ok { + continue + } + var innerName string if name == "" { innerName = pb.metric.FieldPrefix + key } else { @@ -172,6 +175,10 @@ func (pb *pointBuilder) fillFields(name string, value interface{}, fieldMap map[ return } + if _, ok := value.([]interface{}); ok { + return + } + if pb.metric.FieldName != "" { name = pb.metric.FieldName if prefix := pb.metric.FieldPrefix; prefix != "" { From 69d21a5876003a7686edc994b2d938297ebd3308 Mon Sep 17 00:00:00 2001 From: Beaujolais ! Date: Tue, 30 Oct 2018 22:06:47 +0100 Subject: [PATCH 0332/1815] Fix version check in postgresql_extensible (#4866) --- plugins/inputs/postgresql_extensible/postgresql_extensible.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/inputs/postgresql_extensible/postgresql_extensible.go b/plugins/inputs/postgresql_extensible/postgresql_extensible.go index a247b603a..c2bcb7b60 100644 --- a/plugins/inputs/postgresql_extensible/postgresql_extensible.go +++ b/plugins/inputs/postgresql_extensible/postgresql_extensible.go @@ -121,7 +121,7 @@ func (p *Postgresql) Gather(acc telegraf.Accumulator) error { ) // Retreiving the database version - query = `select substring(setting from 1 for 3) as version from pg_settings where name='server_version_num'` + query = `SELECT setting::integer / 100 AS version FROM pg_settings WHERE name = 'server_version_num'` if err = p.DB.QueryRow(query).Scan(&db_version); err != nil { db_version = 0 } From d2717f78f200a73021449d6b786ca37ef11ff240 Mon Sep 17 00:00:00 2001 From: Greg Linton Date: Tue, 30 Oct 2018 15:10:48 -0600 Subject: [PATCH 0333/1815] Set 1.8.3 release date --- CHANGELOG.md | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2627d72ea..ae7b38f86 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -38,11 +38,15 @@ - [#4914](https://github.com/influxdata/telegraf/pull/4914): Collect additional stats in memcached input. - [#3847](https://github.com/influxdata/telegraf/pull/3847): Add wireless input plugin. -## v1.8.3 [unreleased] +## v1.8.3 [2018-10-30] - [#4873](https://github.com/influxdata/telegraf/pull/4873): Add DN attributes as tags in x509_cert input to avoid series overwrite. - [#4921](https://github.com/influxdata/telegraf/issues/4921): Prevent connection leak by closing unused connections in amqp output. - [#4904](https://github.com/influxdata/telegraf/issues/4904): Use default partition key when tag does not exist in kinesis output. +- [#4901](https://github.com/influxdata/telegraf/pull/4901): Log the correct error in jti_openconfig. +- [#4937](https://github.com/influxdata/telegraf/pull/4937): Handle panic when ipmi_sensor input gets bad input. +- [#4930](https://github.com/influxdata/telegraf/pull/4930): Don't add unserializable fields to jolokia2 input. +- [#4866](https://github.com/influxdata/telegraf/pull/4866): Fix version check in postgresql_extensible. ## v1.8.2 [2018-10-17] From 7fa4db07955d71f595844804dff00eb46cb87fba Mon Sep 17 00:00:00 2001 From: Dylan Khor Date: Thu, 1 Nov 2018 15:25:27 -0400 Subject: [PATCH 0334/1815] Fix broken link to vSphere METRICS.md (#4945) --- plugins/inputs/vsphere/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/inputs/vsphere/README.md b/plugins/inputs/vsphere/README.md index 16274a65a..b56393345 100644 --- a/plugins/inputs/vsphere/README.md +++ b/plugins/inputs/vsphere/README.md @@ -229,7 +229,7 @@ For setting up concurrency, modify `collect_concurrency` and `discover_concurren - Datastore stats: - Disk: Capacity, provisioned, used -For a detailed list of commonly available metrics, please refer to [METRICS.MD](METRICS.MD) +For a detailed list of commonly available metrics, please refer to [METRICS.md](METRICS.md) ## Tags From 1ec6c8e3332ecc52a45a7d77124ffc1ba9dc7bc6 Mon Sep 17 00:00:00 2001 From: Akshay Moghe Date: Fri, 2 Nov 2018 10:48:43 -0700 Subject: [PATCH 0335/1815] Add metrics for real servers to ipvs (#4929) --- plugins/inputs/ipvs/README.md | 79 ++++++++++++++++++++++++++++++++--- plugins/inputs/ipvs/ipvs.go | 42 ++++++++++++++++++- 2 files changed, 115 insertions(+), 6 deletions(-) diff --git a/plugins/inputs/ipvs/README.md b/plugins/inputs/ipvs/README.md index 6a4eaf6fe..ab55d45ae 100644 --- a/plugins/inputs/ipvs/README.md +++ b/plugins/inputs/ipvs/README.md @@ -5,14 +5,14 @@ metrics about ipvs virtual and real servers. **Supported Platforms:** Linux -### Configuration: +## Configuration ```toml [[inputs.ipvs]] # no configuration ``` -### Permissions: +## Permissions Assuming you installed the telegraf package via one of the published packages, the process will be running as the `telegraf` user. However, in order for this @@ -20,9 +20,78 @@ plugin to communicate over netlink sockets it needs the telegraf process to be running as `root` (or some user with `CAP_NET_ADMIN` and `CAP_NET_RAW`). Be sure to ensure these permissions before running telegraf with this plugin included. -### Example Output: +## Metrics + +### Virtual Servers + +Metrics report for each `ipvs_virtual_server`: + +- `ipvs_virtual_server` + - tags: + - `sched` - the scheduler in use + - `netmask` - the mask used for determining affinity + - `address_family` - inet/inet6 + - ONE of `address` + `port` + `protocol` *OR* `fwmark` + - fields: + - Connections + - PacketsIn + - PacketsOut + - BytesIn + - BytesOut + - CPS + - PPSIn + - PPSOut + - BPSIn + - BPSOut + +Each virtual server will contain tags identifying how it was configured, using +one of `address` + `port` + `protocol` *OR* `fwmark`. This is how one would +normally configure a virtual server using `ipvsadm`. + +### Real Servers + +Metrics reported for each `ipvs_real_server`: + +- `ipvs_real_server` + - tags: + - `address` + - `port` + - `address_family` + - ONE of `virtual_address` + `virtual_port` + `virtual_protocol` OR `virtual_fwmark` + - fields: + - ActiveConnections + - InactiveConnections + - Connections + - PacketsIn + - PacketsOut + - BytesIn + - BytesOut + - CPS + - PPSIn + - PPSOut + - BPSIn + - BPSOut + +Each real server can be identified as belonging to a virtual server using one of +either `virtual_address + virtual_port + virtual_protocol` OR `virtual_fwmark` + +## Example Output + +### Virtual servers + +Example (when a virtual server is configured using `fwmark` and backed by 2 real servers): +``` +ipvs_virtual_server,address=172.18.64.234,address_family=inet,netmask=32,port=9000,protocol=tcp,sched=rr bytes_in=0i,bytes_out=0i,pps_in=0i,pps_out=0i,cps=0i,connections=0i,pkts_in=0i,pkts_out=0i 1541019340000000000 +ipvs_real_server,address=172.18.64.220,address_family=inet,port=9000,virtual_address=172.18.64.234,virtual_port=9000,virtual_protocol=tcp active_connections=0i,inactive_connections=0i,pkts_in=0i,bytes_out=0i,pps_out=0i,connections=0i,pkts_out=0i,bytes_in=0i,pps_in=0i,cps=0i 1541019340000000000 +ipvs_real_server,address=172.18.64.219,address_family=inet,port=9000,virtual_address=172.18.64.234,virtual_port=9000,virtual_protocol=tcp active_connections=0i,inactive_connections=0i,pps_in=0i,pps_out=0i,connections=0i,pkts_in=0i,pkts_out=0i,bytes_in=0i,bytes_out=0i,cps=0i 1541019340000000000 ``` -ipvs_virtual_server,address=172.18.64.234,address_family=inet,netmask=32,port=9000,protocol=tcp,sched=mh_418 bytes_out=0i,pps_in=0i,pps_out=0i,cps=0i,pkts_in=0i,pkts_out=0i,connections=0i,bytes_in=0i 1540407540000000000 -ipvs_virtual_server,address_family=inet,fwmark=47,netmask=32,sched=mh_418 connections=0i,pkts_in=0i,bytes_out=0i,pps_in=0i,pps_out=0i,pkts_out=0i,bytes_in=0i,cps=0i 1540407540000000000 + +### Real servers + +Example (when a real server is configured using `proto+addr+port` and backed by 2 real servers): +``` +ipvs_virtual_server,address_family=inet,fwmark=47,netmask=32,sched=rr cps=0i,connections=0i,pkts_in=0i,pkts_out=0i,bytes_in=0i,bytes_out=0i,pps_in=0i,pps_out=0i 1541019340000000000 +ipvs_real_server,address=172.18.64.220,address_family=inet,port=9000,virtual_fwmark=47 inactive_connections=0i,pkts_out=0i,bytes_out=0i,pps_in=0i,cps=0i,active_connections=0i,pkts_in=0i,bytes_in=0i,pps_out=0i,connections=0i 1541019340000000000 +ipvs_real_server,address=172.18.64.219,address_family=inet,port=9000,virtual_fwmark=47 cps=0i,active_connections=0i,inactive_connections=0i,connections=0i,pkts_in=0i,bytes_out=0i,pkts_out=0i,bytes_in=0i,pps_in=0i,pps_out=0i 1541019340000000000 ``` diff --git a/plugins/inputs/ipvs/ipvs.go b/plugins/inputs/ipvs/ipvs.go index 5a4e0dc66..2d3ad0278 100644 --- a/plugins/inputs/ipvs/ipvs.go +++ b/plugins/inputs/ipvs/ipvs.go @@ -5,6 +5,7 @@ package ipvs import ( "errors" "fmt" + "log" "math/bits" "strconv" "syscall" @@ -57,6 +58,36 @@ func (i *IPVS) Gather(acc telegraf.Accumulator) error { "cps": s.Stats.CPS, } acc.AddGauge("ipvs_virtual_server", fields, serviceTags(s)) + + destinations, err := i.handle.GetDestinations(s) + if err != nil { + log.Println("E! Failed to list destinations for a virtual server") + continue // move on to the next virtual server + } + + for _, d := range destinations { + fields := map[string]interface{}{ + "active_connections": d.ActiveConnections, + "inactive_connections": d.InactiveConnections, + "connections": d.Stats.Connections, + "pkts_in": d.Stats.PacketsIn, + "pkts_out": d.Stats.PacketsOut, + "bytes_in": d.Stats.BytesIn, + "bytes_out": d.Stats.BytesOut, + "pps_in": d.Stats.PPSIn, + "pps_out": d.Stats.PPSOut, + "cps": d.Stats.CPS, + } + destTags := destinationTags(d) + if s.FWMark > 0 { + destTags["virtual_fwmark"] = strconv.Itoa(int(s.FWMark)) + } else { + destTags["virtual_protocol"] = protocolToString(s.Protocol) + destTags["virtual_address"] = s.Address.String() + destTags["virtual_port"] = strconv.Itoa(int(s.Port)) + } + acc.AddGauge("ipvs_real_server", fields, destTags) + } } return nil @@ -66,7 +97,7 @@ func (i *IPVS) Gather(acc telegraf.Accumulator) error { func serviceTags(s *ipvs.Service) map[string]string { ret := map[string]string{ "sched": s.SchedName, - "netmask": fmt.Sprintf("%d", bits.OnesCount32(s.Netmask)), + "netmask": strconv.Itoa(bits.OnesCount32(s.Netmask)), "address_family": addressFamilyToString(s.AddressFamily), } // Per the ipvsadm man page, a virtual service is defined "based on @@ -81,6 +112,15 @@ func serviceTags(s *ipvs.Service) map[string]string { return ret } +// helper: given a Destination, return tags that identify it +func destinationTags(d *ipvs.Destination) map[string]string { + return map[string]string{ + "address": d.Address.String(), + "port": strconv.Itoa(int(d.Port)), + "address_family": addressFamilyToString(d.AddressFamily), + } +} + // helper: convert protocol uint16 to human readable string (if possible) func protocolToString(p uint16) string { switch p { From f0f99d18e04d23ba582a32d88bf7d9052331dce9 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 2 Nov 2018 10:59:14 -0700 Subject: [PATCH 0336/1815] Update ipvs readme --- plugins/inputs/ipvs/README.md | 100 +++++++++++++++------------------- 1 file changed, 43 insertions(+), 57 deletions(-) diff --git a/plugins/inputs/ipvs/README.md b/plugins/inputs/ipvs/README.md index ab55d45ae..75e5b5103 100644 --- a/plugins/inputs/ipvs/README.md +++ b/plugins/inputs/ipvs/README.md @@ -5,14 +5,14 @@ metrics about ipvs virtual and real servers. **Supported Platforms:** Linux -## Configuration +### Configuration ```toml [[inputs.ipvs]] # no configuration ``` -## Permissions +#### Permissions Assuming you installed the telegraf package via one of the published packages, the process will be running as the `telegraf` user. However, in order for this @@ -20,76 +20,62 @@ plugin to communicate over netlink sockets it needs the telegraf process to be running as `root` (or some user with `CAP_NET_ADMIN` and `CAP_NET_RAW`). Be sure to ensure these permissions before running telegraf with this plugin included. -## Metrics +### Metrics -### Virtual Servers +Server will contain tags identifying how it was configured, using one of +`address` + `port` + `protocol` *OR* `fwmark`. This is how one would normally +configure a virtual server using `ipvsadm`. -Metrics report for each `ipvs_virtual_server`: - -- `ipvs_virtual_server` +- ipvs_virtual_server - tags: - - `sched` - the scheduler in use - - `netmask` - the mask used for determining affinity - - `address_family` - inet/inet6 - - ONE of `address` + `port` + `protocol` *OR* `fwmark` + - sched (the scheduler in use) + - netmask (the mask used for determining affinity) + - address_family (inet/inet6) + - address + - port + - protocol + - fwmark - fields: - - Connections - - PacketsIn - - PacketsOut - - BytesIn - - BytesOut - - CPS - - PPSIn - - PPSOut - - BPSIn - - BPSOut + - connections + - pkts_in + - pkts_out + - bytes_in + - bytes_out + - pps_in + - pps_out + - cps -Each virtual server will contain tags identifying how it was configured, using -one of `address` + `port` + `protocol` *OR* `fwmark`. This is how one would -normally configure a virtual server using `ipvsadm`. - -### Real Servers - -Metrics reported for each `ipvs_real_server`: - -- `ipvs_real_server` +- ipvs_real_server - tags: - - `address` - - `port` - - `address_family` - - ONE of `virtual_address` + `virtual_port` + `virtual_protocol` OR `virtual_fwmark` + - address + - port + - address_family (inet/inet6) + - virtual_address + - virtual_port + - virtual_protocol + - virtual_fwmark - fields: - - ActiveConnections - - InactiveConnections - - Connections - - PacketsIn - - PacketsOut - - BytesIn - - BytesOut - - CPS - - PPSIn - - PPSOut - - BPSIn - - BPSOut + - active_connections + - inactive_connections + - connections + - pkts_in + - pkts_out + - bytes_in + - bytes_out + - pps_in + - pps_out + - cps -Each real server can be identified as belonging to a virtual server using one of -either `virtual_address + virtual_port + virtual_protocol` OR `virtual_fwmark` +### Example Output -## Example Output - -### Virtual servers - -Example (when a virtual server is configured using `fwmark` and backed by 2 real servers): +Virtual server is configured using `fwmark` and backed by 2 real servers: ``` ipvs_virtual_server,address=172.18.64.234,address_family=inet,netmask=32,port=9000,protocol=tcp,sched=rr bytes_in=0i,bytes_out=0i,pps_in=0i,pps_out=0i,cps=0i,connections=0i,pkts_in=0i,pkts_out=0i 1541019340000000000 ipvs_real_server,address=172.18.64.220,address_family=inet,port=9000,virtual_address=172.18.64.234,virtual_port=9000,virtual_protocol=tcp active_connections=0i,inactive_connections=0i,pkts_in=0i,bytes_out=0i,pps_out=0i,connections=0i,pkts_out=0i,bytes_in=0i,pps_in=0i,cps=0i 1541019340000000000 ipvs_real_server,address=172.18.64.219,address_family=inet,port=9000,virtual_address=172.18.64.234,virtual_port=9000,virtual_protocol=tcp active_connections=0i,inactive_connections=0i,pps_in=0i,pps_out=0i,connections=0i,pkts_in=0i,pkts_out=0i,bytes_in=0i,bytes_out=0i,cps=0i 1541019340000000000 - ``` -### Real servers - -Example (when a real server is configured using `proto+addr+port` and backed by 2 real servers): +Virtual server is configured using `proto+addr+port` and backed by 2 real servers: ``` ipvs_virtual_server,address_family=inet,fwmark=47,netmask=32,sched=rr cps=0i,connections=0i,pkts_in=0i,pkts_out=0i,bytes_in=0i,bytes_out=0i,pps_in=0i,pps_out=0i 1541019340000000000 ipvs_real_server,address=172.18.64.220,address_family=inet,port=9000,virtual_fwmark=47 inactive_connections=0i,pkts_out=0i,bytes_out=0i,pps_in=0i,cps=0i,active_connections=0i,pkts_in=0i,bytes_in=0i,pps_out=0i,connections=0i 1541019340000000000 From 2a9bef64ae369441fab8db8b122c3d3161a24da4 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 2 Nov 2018 11:00:47 -0700 Subject: [PATCH 0337/1815] Update opensmtpd readme --- plugins/inputs/opensmtpd/README.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/plugins/inputs/opensmtpd/README.md b/plugins/inputs/opensmtpd/README.md index c1166d9e5..ba73ed024 100644 --- a/plugins/inputs/opensmtpd/README.md +++ b/plugins/inputs/opensmtpd/README.md @@ -5,8 +5,7 @@ This plugin gathers stats from [OpenSMTPD - a FREE implementation of the server- ### Configuration: ```toml - # A plugin to collect stats from OpenSMTPD - a FREE implementation of the server-side SMTP protocol - [[inputs.smtpctl]] + [[inputs.opensmtpd]] ## If running as a restricted user you can prepend sudo for additional access: #use_sudo = false From 69170d24bcab95ff83ea114d3e660f052a11de5f Mon Sep 17 00:00:00 2001 From: Pontus Rydin Date: Fri, 2 Nov 2018 15:05:28 -0400 Subject: [PATCH 0338/1815] Add LUN to datasource translation in vsphere input (#4934) --- plugins/inputs/vsphere/endpoint.go | 36 +++++++++++++++++++++++++++--- 1 file changed, 33 insertions(+), 3 deletions(-) diff --git a/plugins/inputs/vsphere/endpoint.go b/plugins/inputs/vsphere/endpoint.go index f052fefde..55444ebf3 100644 --- a/plugins/inputs/vsphere/endpoint.go +++ b/plugins/inputs/vsphere/endpoint.go @@ -5,6 +5,7 @@ import ( "fmt" "log" "net/url" + "regexp" "strconv" "strings" "sync" @@ -21,6 +22,8 @@ import ( "github.com/vmware/govmomi/vim25/types" ) +var isolateLUN = regexp.MustCompile(".*/([^/]+)/?$") + // Endpoint is a high-level representation of a connected vCenter endpoint. It is backed by the lower // level Client type. type Endpoint struct { @@ -29,6 +32,7 @@ type Endpoint struct { lastColls map[string]time.Time instanceInfo map[string]resourceInfo resourceKinds map[string]resourceKind + lun2ds map[string]string discoveryTicker *time.Ticker collectMux sync.RWMutex initialized bool @@ -93,6 +97,7 @@ func NewEndpoint(ctx context.Context, parent *VSphere, url *url.URL) (*Endpoint, Parent: parent, lastColls: make(map[string]time.Time), instanceInfo: make(map[string]resourceInfo), + lun2ds: make(map[string]string), initialized: false, clientFactory: NewClientFactory(ctx, url, parent), } @@ -404,13 +409,25 @@ func (e *Endpoint) discover(ctx context.Context) error { } } + // Build lun2ds map + dss := resourceKinds["datastore"] + l2d := make(map[string]string) + for _, ds := range dss.objects { + url := ds.altID + m := isolateLUN.FindStringSubmatch(url) + if m != nil { + log.Printf("D! [input.vsphere]: LUN: %s", m[1]) + l2d[m[1]] = ds.name + } + } + // Atomically swap maps - // e.collectMux.Lock() defer e.collectMux.Unlock() e.instanceInfo = instInfo e.resourceKinds = resourceKinds + e.lun2ds = l2d sw.Stop() SendInternalCounter("discovered_objects", e.URL.Host, int64(len(instInfo))) @@ -509,14 +526,22 @@ func getDatastores(ctx context.Context, e *Endpoint, root *view.ContainerView) ( var resources []mo.Datastore ctx1, cancel1 := context.WithTimeout(ctx, e.Parent.Timeout.Duration) defer cancel1() - err := root.Retrieve(ctx1, []string{"Datastore"}, []string{"name", "parent"}, &resources) + err := root.Retrieve(ctx1, []string{"Datastore"}, []string{"name", "parent", "info"}, &resources) if err != nil { return nil, err } m := make(objectMap) for _, r := range resources { + url := "" + if r.Info != nil { + info := r.Info.GetDatastoreInfo() + if info != nil { + url = info.Url + } + } + log.Printf("D! [input.vsphere]: DS URL: %s %s", url, r.Name) m[r.ExtensibleManagedObject.Reference().Value] = objectRef{ - name: r.Name, ref: r.ExtensibleManagedObject.Reference(), parentRef: r.Parent} + name: r.Name, ref: r.ExtensibleManagedObject.Reference(), parentRef: r.Parent, altID: url} } return m, nil } @@ -848,6 +873,11 @@ func (e *Endpoint) populateTags(objectRef *objectRef, resourceType string, resou t["cpu"] = instance } else if strings.HasPrefix(name, "datastore.") { t["lun"] = instance + if ds, ok := e.lun2ds[instance]; ok { + t["dsname"] = ds + } else { + t["dsname"] = instance + } } else if strings.HasPrefix(name, "disk.") { t["disk"] = cleanDiskTag(instance) } else if strings.HasPrefix(name, "net.") { From 9035505e08af92894fd2795598edd9b134956af1 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 2 Nov 2018 12:08:05 -0700 Subject: [PATCH 0339/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index ae7b38f86..a8af67e8e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -37,6 +37,7 @@ - [#3903](https://github.com/influxdata/telegraf/pull/3903): Add support for TLS configuration in NSQ input. - [#4914](https://github.com/influxdata/telegraf/pull/4914): Collect additional stats in memcached input. - [#3847](https://github.com/influxdata/telegraf/pull/3847): Add wireless input plugin. +- [#4934](https://github.com/influxdata/telegraf/pull/4934): Add LUN to datasource translation in vsphere input. ## v1.8.3 [2018-10-30] From ad5fcf8efb56104fed8755e3be10d0450a321be1 Mon Sep 17 00:00:00 2001 From: Greg <2653109+glinton@users.noreply.github.com> Date: Fri, 2 Nov 2018 18:50:55 -0600 Subject: [PATCH 0340/1815] Improve error description in influxdb_v2 output (#4952) --- plugins/outputs/influxdb_v2/http.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/outputs/influxdb_v2/http.go b/plugins/outputs/influxdb_v2/http.go index d6d9d9076..8709a9b84 100644 --- a/plugins/outputs/influxdb_v2/http.go +++ b/plugins/outputs/influxdb_v2/http.go @@ -193,7 +193,7 @@ func (c *httpClient) Write(ctx context.Context, metrics []telegraf.Metric) error err = json.NewDecoder(resp.Body).Decode(writeResp) desc := writeResp.Error() if err != nil { - desc = err.Error() + desc = resp.Status } switch resp.StatusCode { From ddcbfe79bb69b0e36993bf9a68a0afcb6b53c31a Mon Sep 17 00:00:00 2001 From: Greg <2653109+glinton@users.noreply.github.com> Date: Fri, 2 Nov 2018 18:51:40 -0600 Subject: [PATCH 0341/1815] Allow connecting to prometheus via unix socket (#4798) --- plugins/inputs/prometheus/README.md | 2 + plugins/inputs/prometheus/prometheus.go | 59 ++++++++++++++------ plugins/inputs/prometheus/prometheus_test.go | 2 +- 3 files changed, 45 insertions(+), 18 deletions(-) diff --git a/plugins/inputs/prometheus/README.md b/plugins/inputs/prometheus/README.md index 227f3f737..294d84150 100644 --- a/plugins/inputs/prometheus/README.md +++ b/plugins/inputs/prometheus/README.md @@ -28,6 +28,8 @@ in Prometheus format. # insecure_skip_verify = false ``` +`urls` can contain a unix socket as well. If a different path is required (default is `/metrics` for both http[s] and unix) for a unix socket, add `path` as a query parameter as follows: `unix:///var/run/prometheus.sock?path=/custom/metrics` + #### Kubernetes Service Discovery URLs listed in the `kubernetes_services` parameter will be expanded diff --git a/plugins/inputs/prometheus/prometheus.go b/plugins/inputs/prometheus/prometheus.go index 23709790f..b8e346032 100644 --- a/plugins/inputs/prometheus/prometheus.go +++ b/plugins/inputs/prometheus/prometheus.go @@ -125,7 +125,7 @@ func (p *Prometheus) GetAllURLs() ([]URLAndAddress, error) { // Returns one of the errors encountered while gather stats (if any). func (p *Prometheus) Gather(acc telegraf.Accumulator) error { if p.client == nil { - client, err := p.createHttpClient() + client, err := p.createHTTPClient() if err != nil { return err } @@ -151,16 +151,7 @@ func (p *Prometheus) Gather(acc telegraf.Accumulator) error { return nil } -var tr = &http.Transport{ - ResponseHeaderTimeout: time.Duration(3 * time.Second), -} - -var client = &http.Client{ - Transport: tr, - Timeout: time.Duration(4 * time.Second), -} - -func (p *Prometheus) createHttpClient() (*http.Client, error) { +func (p *Prometheus) createHTTPClient() (*http.Client, error) { tlsCfg, err := p.ClientConfig.TLSConfig() if err != nil { return nil, err @@ -178,11 +169,39 @@ func (p *Prometheus) createHttpClient() (*http.Client, error) { } func (p *Prometheus) gatherURL(u URLAndAddress, acc telegraf.Accumulator) error { - var req, err = http.NewRequest("GET", u.URL.String(), nil) - req.Header.Add("Accept", acceptHeader) - var token []byte - var resp *http.Response + var req *http.Request + var err error + var uClient *http.Client + if u.URL.Scheme == "unix" { + path := u.URL.Query().Get("path") + if path == "" { + path = "/metrics" + } + req, err = http.NewRequest("GET", "http://localhost"+path, nil) + // ignore error because it's been handled before getting here + tlsCfg, _ := p.ClientConfig.TLSConfig() + uClient = &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: tlsCfg, + DisableKeepAlives: true, + Dial: func(network, addr string) (net.Conn, error) { + c, err := net.Dial("unix", u.URL.Path) + return c, err + }, + }, + Timeout: p.ResponseTimeout.Duration, + } + } else { + if u.URL.Path == "" { + u.URL.Path = "/metrics" + } + req, err = http.NewRequest("GET", u.URL.String(), nil) + } + + req.Header.Add("Accept", acceptHeader) + + var token []byte if p.BearerToken != "" { token, err = ioutil.ReadFile(p.BearerToken) if err != nil { @@ -191,11 +210,17 @@ func (p *Prometheus) gatherURL(u URLAndAddress, acc telegraf.Accumulator) error req.Header.Set("Authorization", "Bearer "+string(token)) } - resp, err = p.client.Do(req) + var resp *http.Response + if u.URL.Scheme != "unix" { + resp, err = p.client.Do(req) + } else { + resp, err = uClient.Do(req) + } if err != nil { return fmt.Errorf("error making HTTP request to %s: %s", u.URL, err) } defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { return fmt.Errorf("%s returned HTTP status %s", u.URL, resp.Status) } @@ -210,7 +235,7 @@ func (p *Prometheus) gatherURL(u URLAndAddress, acc telegraf.Accumulator) error return fmt.Errorf("error reading metrics for %s: %s", u.URL, err) } - // Add (or not) collected metrics + for _, metric := range metrics { tags := metric.Tags() // strip user and password from URL diff --git a/plugins/inputs/prometheus/prometheus_test.go b/plugins/inputs/prometheus/prometheus_test.go index 9a2982ff9..ef3902fc9 100644 --- a/plugins/inputs/prometheus/prometheus_test.go +++ b/plugins/inputs/prometheus/prometheus_test.go @@ -50,7 +50,7 @@ func TestPrometheusGeneratesMetrics(t *testing.T) { assert.True(t, acc.HasFloatField("test_metric", "value")) assert.True(t, acc.HasTimestamp("test_metric", time.Unix(1490802350, 0))) assert.False(t, acc.HasTag("test_metric", "address")) - assert.True(t, acc.TagValue("test_metric", "url") == ts.URL) + assert.True(t, acc.TagValue("test_metric", "url") == ts.URL+"/metrics") } func TestPrometheusGeneratesMetricsWithHostNameTag(t *testing.T) { From 02ad1f46be3590446a5be2ca17c3c50a44033f1b Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 2 Nov 2018 17:53:10 -0700 Subject: [PATCH 0342/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index a8af67e8e..c5663d2e2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -38,6 +38,7 @@ - [#4914](https://github.com/influxdata/telegraf/pull/4914): Collect additional stats in memcached input. - [#3847](https://github.com/influxdata/telegraf/pull/3847): Add wireless input plugin. - [#4934](https://github.com/influxdata/telegraf/pull/4934): Add LUN to datasource translation in vsphere input. +- [#4798](https://github.com/influxdata/telegraf/pull/4798): Allow connecting to prometheus via unix socket. ## v1.8.3 [2018-10-30] From ad320ac1e060e01994553d0fe128e2812aafdb06 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 2 Nov 2018 17:53:45 -0700 Subject: [PATCH 0343/1815] Remove the time_key from the field values in JSON parser (#4951) --- plugins/parsers/json/parser.go | 2 ++ plugins/parsers/json/parser_test.go | 27 +++++++++++++++++++++++++++ 2 files changed, 29 insertions(+) diff --git a/plugins/parsers/json/parser.go b/plugins/parsers/json/parser.go index 1d8ce2d02..19a92275b 100644 --- a/plugins/parsers/json/parser.go +++ b/plugins/parsers/json/parser.go @@ -143,6 +143,8 @@ func (p *JSONParser) parseObject(metrics []telegraf.Metric, jsonOut map[string]i } } + delete(f.Fields, p.JSONTimeKey) + //if the year is 0, set to current year if nTime.Year() == 0 { nTime = nTime.AddDate(time.Now().Year(), 0, 0) diff --git a/plugins/parsers/json/parser_test.go b/plugins/parsers/json/parser_test.go index ec9ade251..382afcd35 100644 --- a/plugins/parsers/json/parser_test.go +++ b/plugins/parsers/json/parser_test.go @@ -4,7 +4,10 @@ import ( "fmt" "log" "testing" + "time" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) @@ -724,3 +727,27 @@ func TestNameKey(t *testing.T) { require.NoError(t, err) require.Equal(t, "this is my name", metrics[0].Name()) } + +func TestTimeKeyDelete(t *testing.T) { + data := `{ + "timestamp": 1541183052, + "value": 42 + }` + + parser := JSONParser{ + MetricName: "json", + JSONTimeKey: "timestamp", + JSONTimeFormat: "unix", + } + + metrics, err := parser.Parse([]byte(data)) + require.NoError(t, err) + expected := []telegraf.Metric{ + testutil.MustMetric("json", + map[string]string{}, + map[string]interface{}{"value": 42.0}, + time.Unix(1541183052, 0)), + } + + testutil.RequireMetricsEqual(t, expected, metrics) +} From 2ff2d033894f26f668625f8ebef4b37472d51709 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 2 Nov 2018 17:58:33 -0700 Subject: [PATCH 0344/1815] Update changelog --- CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index c5663d2e2..9d0e06bf8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -40,6 +40,10 @@ - [#4934](https://github.com/influxdata/telegraf/pull/4934): Add LUN to datasource translation in vsphere input. - [#4798](https://github.com/influxdata/telegraf/pull/4798): Allow connecting to prometheus via unix socket. +#### Bugfixes + +- [#4950](https://github.com/influxdata/telegraf/pull/4950): Remove the time_key from the field values in JSON parser. + ## v1.8.3 [2018-10-30] - [#4873](https://github.com/influxdata/telegraf/pull/4873): Add DN attributes as tags in x509_cert input to avoid series overwrite. From 9a864d11d2bfb3639a9ea5592ec7b66387ff9a7a Mon Sep 17 00:00:00 2001 From: Aleksejs Sinicins Date: Sat, 3 Nov 2018 03:18:40 +0200 Subject: [PATCH 0345/1815] Add nginx-module-vts input plugin. (#3782) --- README.md | 1 + plugins/inputs/all/all.go | 1 + plugins/inputs/nginx_vts/README.md | 121 +++++ plugins/inputs/nginx_vts/nginx_vts.go | 341 +++++++++++++++ plugins/inputs/nginx_vts/nginx_vts_test.go | 486 +++++++++++++++++++++ 5 files changed, 950 insertions(+) create mode 100644 plugins/inputs/nginx_vts/README.md create mode 100644 plugins/inputs/nginx_vts/nginx_vts.go create mode 100644 plugins/inputs/nginx_vts/nginx_vts_test.go diff --git a/README.md b/README.md index 6381efb92..193bfa8d8 100644 --- a/README.md +++ b/README.md @@ -210,6 +210,7 @@ For documentation on the latest development code see the [documentation index][d * [nginx_plus](./plugins/inputs/nginx_plus) * [nginx_plus_api](./plugins/inputs/nginx_plus_api) * [nsq_consumer](./plugins/inputs/nsq_consumer) +* [nginx_vts](./plugins/inputs/nginx_vts) * [nsq](./plugins/inputs/nsq) * [nstat](./plugins/inputs/nstat) * [ntpq](./plugins/inputs/ntpq) diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index d45013bf3..c64fec0a7 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -82,6 +82,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/nginx" _ "github.com/influxdata/telegraf/plugins/inputs/nginx_plus" _ "github.com/influxdata/telegraf/plugins/inputs/nginx_plus_api" + _ "github.com/influxdata/telegraf/plugins/inputs/nginx_vts" _ "github.com/influxdata/telegraf/plugins/inputs/nsq" _ "github.com/influxdata/telegraf/plugins/inputs/nsq_consumer" _ "github.com/influxdata/telegraf/plugins/inputs/nstat" diff --git a/plugins/inputs/nginx_vts/README.md b/plugins/inputs/nginx_vts/README.md new file mode 100644 index 000000000..d4be55139 --- /dev/null +++ b/plugins/inputs/nginx_vts/README.md @@ -0,0 +1,121 @@ +# Telegraf Plugin: nginx_vts + +This plugin gathers Nginx status using external virtual host traffic status module - https://github.com/vozlt/nginx-module-vts. This is an Nginx module that provides access to virtual host status information. It contains the current status such as servers, upstreams, caches. This is similar to the live activity monitoring of Nginx plus. +For module configuration details please see its [documentation](https://github.com/vozlt/nginx-module-vts#synopsis). + +### Configuration: + +``` +# Read nginx status information using nginx-module-vts module +[[inputs.nginx_vts]] + ## An array of Nginx status URIs to gather stats. + urls = ["http://localhost/status"] +``` + +### Measurements & Fields: + +- nginx_vts_connections + - active + - reading + - writing + - waiting + - accepted + - handled + - requests +- nginx_vts_server, nginx_vts_filter + - requests + - request_time + - in_bytes + - out_bytes + - response_1xx_count + - response_2xx_count + - response_3xx_count + - response_4xx_count + - response_5xx_count + - cache_miss + - cache_bypass + - cache_expired + - cache_stale + - cache_updating + - cache_revalidated + - cache_hit + - cache_scarce +- nginx_vts_upstream + - requests + - request_time + - response_time + - in_bytes + - out_bytes + - response_1xx_count + - response_2xx_count + - response_3xx_count + - response_4xx_count + - response_5xx_count + - weight + - max_fails + - fail_timeout + - backup + - down +- nginx_vts_cache + - max_bytes + - used_bytes + - in_bytes + - out_bytes + - miss + - bypass + - expired + - stale + - updating + - revalidated + - hit + - scarce + + +### Tags: + +- nginx_vts_connections + - server + - port +- nginx_vts_server + - server + - port + - zone +- nginx_vts_filter + - server + - port + - filter_name + - filter_key +- nginx_vts_upstream + - server + - port + - upstream + - upstream_address +- nginx_vts_cache + - server + - port + - zone + + +### Example Output: + +Using this configuration: +``` +[[inputs.nginx_vts]] + ## An array of Nginx status URIs to gather stats. + urls = ["http://localhost/status"] +``` + +When run with: +``` +./telegraf -config telegraf.conf -input-filter nginx_vts -test +``` + +It produces: +``` +nginx_vts_connections,server=localhost,port=80,host=localhost waiting=30i,accepted=295333i,handled=295333i,requests=6833487i,active=33i,reading=0i,writing=3i 1518341521000000000 +nginx_vts_server,zone=example.com,port=80,host=localhost,server=localhost cache_hit=158915i,in_bytes=1935528964i,out_bytes=6531366419i,response_2xx_count=809994i,response_4xx_count=16664i,cache_bypass=0i,cache_stale=0i,cache_revalidated=0i,requests=2187977i,response_1xx_count=0i,response_3xx_count=1360390i,cache_miss=2249i,cache_updating=0i,cache_scarce=0i,request_time=13i,response_5xx_count=929i,cache_expired=0i 1518341521000000000 +nginx_vts_server,host=localhost,server=localhost,port=80,zone=* requests=6775284i,in_bytes=5003242389i,out_bytes=36858233827i,cache_expired=318881i,cache_updating=0i,request_time=51i,response_1xx_count=0i,response_2xx_count=4385916i,response_4xx_count=83680i,response_5xx_count=1186i,cache_bypass=0i,cache_revalidated=0i,cache_hit=1972222i,cache_scarce=0i,response_3xx_count=2304502i,cache_miss=408251i,cache_stale=0i 1518341521000000000 +nginx_vts_filter,filter_key=FI,filter_name=country,port=80,host=localhost,server=localhost request_time=0i,in_bytes=139701i,response_3xx_count=0i,out_bytes=2644495i,response_1xx_count=0i,cache_expired=0i,cache_scarce=0i,requests=179i,cache_miss=0i,cache_bypass=0i,cache_stale=0i,cache_updating=0i,cache_revalidated=0i,cache_hit=0i,response_2xx_count=177i,response_4xx_count=2i,response_5xx_count=0i 1518341521000000000 +nginx_vts_upstream,port=80,host=localhost,upstream=backend_cluster,upstream_address=127.0.0.1:6000,server=localhost fail_timeout=10i,backup=false,request_time=31i,response_5xx_count=1081i,response_2xx_count=1877498i,max_fails=1i,in_bytes=2763336289i,out_bytes=19470265071i,weight=1i,down=false,response_time=31i,response_1xx_count=0i,response_4xx_count=76125i,requests=3379232i,response_3xx_count=1424528i 1518341521000000000 +nginx_vts_cache,server=localhost,port=80,host=localhost,zone=example stale=0i,used_bytes=64334336i,miss=394573i,bypass=0i,expired=318788i,updating=0i,revalidated=0i,hit=689883i,scarce=0i,max_bytes=9223372036854775296i,in_bytes=1111161581i,out_bytes=19175548290i 1518341521000000000 +``` diff --git a/plugins/inputs/nginx_vts/nginx_vts.go b/plugins/inputs/nginx_vts/nginx_vts.go new file mode 100644 index 000000000..66a16e6c1 --- /dev/null +++ b/plugins/inputs/nginx_vts/nginx_vts.go @@ -0,0 +1,341 @@ +package nginx_vts + +import ( + "bufio" + "encoding/json" + "fmt" + "net" + "net/http" + "net/url" + "strings" + "sync" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/inputs" +) + +type NginxVTS struct { + Urls []string + + client *http.Client + + ResponseTimeout internal.Duration +} + +var sampleConfig = ` + ## An array of ngx_http_status_module or status URI to gather stats. + urls = ["http://localhost/status"] + + ## HTTP response timeout (default: 5s) + response_timeout = "5s" +` + +func (n *NginxVTS) SampleConfig() string { + return sampleConfig +} + +func (n *NginxVTS) Description() string { + return "Read Nginx virtual host traffic status module information (nginx-module-vts)" +} + +func (n *NginxVTS) Gather(acc telegraf.Accumulator) error { + var wg sync.WaitGroup + + // Create an HTTP client that is re-used for each + // collection interval + + if n.client == nil { + client, err := n.createHTTPClient() + if err != nil { + return err + } + n.client = client + } + + for _, u := range n.Urls { + addr, err := url.Parse(u) + if err != nil { + acc.AddError(fmt.Errorf("Unable to parse address '%s': %s", u, err)) + continue + } + + wg.Add(1) + go func(addr *url.URL) { + defer wg.Done() + acc.AddError(n.gatherURL(addr, acc)) + }(addr) + } + + wg.Wait() + return nil +} + +func (n *NginxVTS) createHTTPClient() (*http.Client, error) { + if n.ResponseTimeout.Duration < time.Second { + n.ResponseTimeout.Duration = time.Second * 5 + } + + client := &http.Client{ + Transport: &http.Transport{}, + Timeout: n.ResponseTimeout.Duration, + } + + return client, nil +} + +func (n *NginxVTS) gatherURL(addr *url.URL, acc telegraf.Accumulator) error { + resp, err := n.client.Get(addr.String()) + if err != nil { + return fmt.Errorf("error making HTTP request to %s: %s", addr.String(), err) + } + + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("%s returned HTTP status %s", addr.String(), resp.Status) + } + contentType := strings.Split(resp.Header.Get("Content-Type"), ";")[0] + switch contentType { + case "application/json": + return gatherStatusURL(bufio.NewReader(resp.Body), getTags(addr), acc) + default: + return fmt.Errorf("%s returned unexpected content type %s", addr.String(), contentType) + } +} + +type NginxVTSResponse struct { + Connections struct { + Active uint64 `json:"active"` + Reading uint64 `json:"reading"` + Writing uint64 `json:"writing"` + Waiting uint64 `json:"waiting"` + Accepted uint64 `json:"accepted"` + Handled uint64 `json:"handled"` + Requests uint64 `json:"requests"` + } `json:"connections"` + ServerZones map[string]Server `json:"serverZones"` + FilterZones map[string]map[string]Server `json:"filterZones"` + UpstreamZones map[string][]Upstream `json:"upstreamZones"` + CacheZones map[string]Cache `json:"cacheZones"` +} + +type Server struct { + RequestCounter uint64 `json:"requestCounter"` + InBytes uint64 `json:"inBytes"` + OutBytes uint64 `json:"outBytes"` + RequestMsec uint64 `json:"requestMsec"` + Responses struct { + OneXx uint64 `json:"1xx"` + TwoXx uint64 `json:"2xx"` + ThreeXx uint64 `json:"3xx"` + FourXx uint64 `json:"4xx"` + FiveXx uint64 `json:"5xx"` + Miss uint64 `json:"miss"` + Bypass uint64 `json:"bypass"` + Expired uint64 `json:"expired"` + Stale uint64 `json:"stale"` + Updating uint64 `json:"updating"` + Revalidated uint64 `json:"revalidated"` + Hit uint64 `json:"hit"` + Scarce uint64 `json:"scarce"` + } `json:"responses"` +} + +type Upstream struct { + Server string `json:"server"` + RequestCounter uint64 `json:"requestCounter"` + InBytes uint64 `json:"inBytes"` + OutBytes uint64 `json:"outBytes"` + Responses struct { + OneXx uint64 `json:"1xx"` + TwoXx uint64 `json:"2xx"` + ThreeXx uint64 `json:"3xx"` + FourXx uint64 `json:"4xx"` + FiveXx uint64 `json:"5xx"` + } `json:"responses"` + ResponseMsec uint64 `json:"responseMsec"` + RequestMsec uint64 `json:"requestMsec"` + Weight uint64 `json:"weight"` + MaxFails uint64 `json:"maxFails"` + FailTimeout uint64 `json:"failTimeout"` + Backup bool `json:"backup"` + Down bool `json:"down"` +} + +type Cache struct { + MaxSize uint64 `json:"maxSize"` + UsedSize uint64 `json:"usedSize"` + InBytes uint64 `json:"inBytes"` + OutBytes uint64 `json:"outBytes"` + Responses struct { + Miss uint64 `json:"miss"` + Bypass uint64 `json:"bypass"` + Expired uint64 `json:"expired"` + Stale uint64 `json:"stale"` + Updating uint64 `json:"updating"` + Revalidated uint64 `json:"revalidated"` + Hit uint64 `json:"hit"` + Scarce uint64 `json:"scarce"` + } `json:"responses"` +} + +func gatherStatusURL(r *bufio.Reader, tags map[string]string, acc telegraf.Accumulator) error { + dec := json.NewDecoder(r) + status := &NginxVTSResponse{} + if err := dec.Decode(status); err != nil { + return fmt.Errorf("Error while decoding JSON response") + } + + acc.AddFields("nginx_vts_connections", map[string]interface{}{ + "active": status.Connections.Active, + "reading": status.Connections.Reading, + "writing": status.Connections.Writing, + "waiting": status.Connections.Waiting, + "accepted": status.Connections.Accepted, + "handled": status.Connections.Handled, + "requests": status.Connections.Requests, + }, tags) + + for zoneName, zone := range status.ServerZones { + zoneTags := map[string]string{} + for k, v := range tags { + zoneTags[k] = v + } + zoneTags["zone"] = zoneName + + acc.AddFields("nginx_vts_server", map[string]interface{}{ + "requests": zone.RequestCounter, + "request_time": zone.RequestMsec, + "in_bytes": zone.InBytes, + "out_bytes": zone.OutBytes, + + "response_1xx_count": zone.Responses.OneXx, + "response_2xx_count": zone.Responses.TwoXx, + "response_3xx_count": zone.Responses.ThreeXx, + "response_4xx_count": zone.Responses.FourXx, + "response_5xx_count": zone.Responses.FiveXx, + + "cache_miss": zone.Responses.Miss, + "cache_bypass": zone.Responses.Bypass, + "cache_expired": zone.Responses.Expired, + "cache_stale": zone.Responses.Stale, + "cache_updating": zone.Responses.Updating, + "cache_revalidated": zone.Responses.Revalidated, + "cache_hit": zone.Responses.Hit, + "cache_scarce": zone.Responses.Scarce, + }, zoneTags) + } + + for filterName, filters := range status.FilterZones { + for filterKey, upstream := range filters { + filterTags := map[string]string{} + for k, v := range tags { + filterTags[k] = v + } + filterTags["filter_key"] = filterKey + filterTags["filter_name"] = filterName + + acc.AddFields("nginx_vts_filter", map[string]interface{}{ + "requests": upstream.RequestCounter, + "request_time": upstream.RequestMsec, + "in_bytes": upstream.InBytes, + "out_bytes": upstream.OutBytes, + + "response_1xx_count": upstream.Responses.OneXx, + "response_2xx_count": upstream.Responses.TwoXx, + "response_3xx_count": upstream.Responses.ThreeXx, + "response_4xx_count": upstream.Responses.FourXx, + "response_5xx_count": upstream.Responses.FiveXx, + + "cache_miss": upstream.Responses.Miss, + "cache_bypass": upstream.Responses.Bypass, + "cache_expired": upstream.Responses.Expired, + "cache_stale": upstream.Responses.Stale, + "cache_updating": upstream.Responses.Updating, + "cache_revalidated": upstream.Responses.Revalidated, + "cache_hit": upstream.Responses.Hit, + "cache_scarce": upstream.Responses.Scarce, + }, filterTags) + } + } + + for upstreamName, upstreams := range status.UpstreamZones { + for _, upstream := range upstreams { + upstreamServerTags := map[string]string{} + for k, v := range tags { + upstreamServerTags[k] = v + } + upstreamServerTags["upstream"] = upstreamName + upstreamServerTags["upstream_address"] = upstream.Server + acc.AddFields("nginx_vts_upstream", map[string]interface{}{ + "requests": upstream.RequestCounter, + "request_time": upstream.RequestMsec, + "response_time": upstream.ResponseMsec, + "in_bytes": upstream.InBytes, + "out_bytes": upstream.OutBytes, + + "response_1xx_count": upstream.Responses.OneXx, + "response_2xx_count": upstream.Responses.TwoXx, + "response_3xx_count": upstream.Responses.ThreeXx, + "response_4xx_count": upstream.Responses.FourXx, + "response_5xx_count": upstream.Responses.FiveXx, + + "weight": upstream.Weight, + "max_fails": upstream.MaxFails, + "fail_timeout": upstream.FailTimeout, + "backup": upstream.Backup, + "down": upstream.Down, + }, upstreamServerTags) + } + } + + for zoneName, zone := range status.CacheZones { + zoneTags := map[string]string{} + for k, v := range tags { + zoneTags[k] = v + } + zoneTags["zone"] = zoneName + + acc.AddFields("nginx_vts_cache", map[string]interface{}{ + "max_bytes": zone.MaxSize, + "used_bytes": zone.UsedSize, + "in_bytes": zone.InBytes, + "out_bytes": zone.OutBytes, + + "miss": zone.Responses.Miss, + "bypass": zone.Responses.Bypass, + "expired": zone.Responses.Expired, + "stale": zone.Responses.Stale, + "updating": zone.Responses.Updating, + "revalidated": zone.Responses.Revalidated, + "hit": zone.Responses.Hit, + "scarce": zone.Responses.Scarce, + }, zoneTags) + } + + return nil +} + +// Get tag(s) for the nginx plugin +func getTags(addr *url.URL) map[string]string { + h := addr.Host + host, port, err := net.SplitHostPort(h) + if err != nil { + host = addr.Host + if addr.Scheme == "http" { + port = "80" + } else if addr.Scheme == "https" { + port = "443" + } else { + port = "" + } + } + return map[string]string{"source": host, "port": port} +} + +func init() { + inputs.Add("nginx_vts", func() telegraf.Input { + return &NginxVTS{} + }) +} diff --git a/plugins/inputs/nginx_vts/nginx_vts_test.go b/plugins/inputs/nginx_vts/nginx_vts_test.go new file mode 100644 index 000000000..085fc3843 --- /dev/null +++ b/plugins/inputs/nginx_vts/nginx_vts_test.go @@ -0,0 +1,486 @@ +package nginx_vts + +import ( + "fmt" + "net" + "net/http" + "net/http/httptest" + "net/url" + "testing" + + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +const sampleStatusResponse = ` +{ + "hostName": "test.example.com", + "nginxVersion": "1.12.2", + "loadMsec": 1518180328331, + "nowMsec": 1518256058416, + "connections": { + "active": 111, + "reading": 222, + "writing": 333, + "waiting": 444, + "accepted": 555, + "handled": 666, + "requests": 777 + }, + "serverZones": { + "example.com": { + "requestCounter": 1415887, + "inBytes": 1296356607, + "outBytes": 4404939605, + "responses": { + "1xx": 100, + "2xx": 200, + "3xx": 300, + "4xx": 400, + "5xx": 500, + "miss": 14, + "bypass": 15, + "expired": 16, + "stale": 17, + "updating": 18, + "revalidated": 19, + "hit": 20, + "scarce": 21 + }, + "requestMsec": 13 + }, + "other.example.com": { + "requestCounter": 505, + "inBytes": 171388, + "outBytes": 1273382, + "responses": { + "1xx": 101, + "2xx": 201, + "3xx": 301, + "4xx": 401, + "5xx": 501, + "miss": 22, + "bypass": 23, + "expired": 24, + "stale": 25, + "updating": 26, + "revalidated": 27, + "hit": 28, + "scarce": 29 + }, + "requestMsec": 12 + } + }, + "filterZones": { + "country": { + "FI": { + "requestCounter": 60, + "inBytes": 2570, + "outBytes": 53597, + "responses": { + "1xx": 106, + "2xx": 206, + "3xx": 306, + "4xx": 406, + "5xx": 506, + "miss": 61, + "bypass": 62, + "expired": 63, + "stale": 64, + "updating": 65, + "revalidated": 66, + "hit": 67, + "scarce": 68 + }, + "requestMsec": 69 + } + } + }, + "upstreamZones": { + "backend_cluster": [ + { + "server": "127.0.0.1:6000", + "requestCounter": 2103849, + "inBytes": 1774680141, + "outBytes": 11727669190, + "responses": { + "1xx": 103, + "2xx": 203, + "3xx": 303, + "4xx": 403, + "5xx": 503 + }, + "requestMsec": 30, + "responseMsec": 31, + "weight": 32, + "maxFails": 33, + "failTimeout": 34, + "backup": false, + "down": false + } + ], + "::nogroups": [ + { + "server": "127.0.0.1:4433", + "requestCounter": 8, + "inBytes": 5013, + "outBytes": 487585, + "responses": { + "1xx": 104, + "2xx": 204, + "3xx": 304, + "4xx": 404, + "5xx": 504 + }, + "requestMsec": 34, + "responseMsec": 35, + "weight": 36, + "maxFails": 37, + "failTimeout": 38, + "backup": true, + "down": false + }, + { + "server": "127.0.0.1:8080", + "requestCounter": 7, + "inBytes": 2926, + "outBytes": 3846638, + "responses": { + "1xx": 105, + "2xx": 205, + "3xx": 305, + "4xx": 405, + "5xx": 505 + }, + "requestMsec": 39, + "responseMsec": 40, + "weight": 41, + "maxFails": 42, + "failTimeout": 43, + "backup": true, + "down": true + } + ] + }, + "cacheZones": { + "example": { + "maxSize": 9223372036854776000, + "usedSize": 68639232, + "inBytes": 697138673, + "outBytes": 11305044106, + "responses": { + "miss": 44, + "bypass": 45, + "expired": 46, + "stale": 47, + "updating": 48, + "revalidated": 49, + "hit": 50, + "scarce": 51 + } + }, + "static": { + "maxSize": 9223372036854776000, + "usedSize": 569856, + "inBytes": 551652333, + "outBytes": 1114889271, + "responses": { + "miss": 52, + "bypass": 53, + "expired": 54, + "stale": 55, + "updating": 56, + "revalidated": 57, + "hit": 58, + "scarce": 59 + } + } + } +} +` + +func TestNginxPlusGeneratesMetrics(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var rsp string + + if r.URL.Path == "/status" { + rsp = sampleStatusResponse + w.Header()["Content-Type"] = []string{"application/json"} + } else { + panic("Cannot handle request") + } + + fmt.Fprintln(w, rsp) + })) + defer ts.Close() + + n := &NginxVTS{ + Urls: []string{fmt.Sprintf("%s/status", ts.URL)}, + } + + var acc testutil.Accumulator + + err := n.Gather(&acc) + + require.NoError(t, err) + + addr, err := url.Parse(ts.URL) + if err != nil { + panic(err) + } + + host, port, err := net.SplitHostPort(addr.Host) + if err != nil { + host = addr.Host + if addr.Scheme == "http" { + port = "80" + } else if addr.Scheme == "https" { + port = "443" + } else { + port = "" + } + } + + acc.AssertContainsTaggedFields( + t, + "nginx_vts_connections", + map[string]interface{}{ + "accepted": uint64(555), + "active": uint64(111), + "handled": uint64(666), + "reading": uint64(222), + "requests": uint64(777), + "waiting": uint64(444), + "writing": uint64(333), + }, + map[string]string{ + "source": host, + "port": port, + }) + + acc.AssertContainsTaggedFields( + t, + "nginx_vts_server", + map[string]interface{}{ + "requests": uint64(1415887), + "request_time": uint64(13), + "in_bytes": uint64(1296356607), + "out_bytes": uint64(4404939605), + + "response_1xx_count": uint64(100), + "response_2xx_count": uint64(200), + "response_3xx_count": uint64(300), + "response_4xx_count": uint64(400), + "response_5xx_count": uint64(500), + + "cache_miss": uint64(14), + "cache_bypass": uint64(15), + "cache_expired": uint64(16), + "cache_stale": uint64(17), + "cache_updating": uint64(18), + "cache_revalidated": uint64(19), + "cache_hit": uint64(20), + "cache_scarce": uint64(21), + }, + map[string]string{ + "source": host, + "port": port, + "zone": "example.com", + }) + + acc.AssertContainsTaggedFields( + t, + "nginx_vts_filter", + map[string]interface{}{ + "requests": uint64(60), + "request_time": uint64(69), + "in_bytes": uint64(2570), + "out_bytes": uint64(53597), + + "response_1xx_count": uint64(106), + "response_2xx_count": uint64(206), + "response_3xx_count": uint64(306), + "response_4xx_count": uint64(406), + "response_5xx_count": uint64(506), + + "cache_miss": uint64(61), + "cache_bypass": uint64(62), + "cache_expired": uint64(63), + "cache_stale": uint64(64), + "cache_updating": uint64(65), + "cache_revalidated": uint64(66), + "cache_hit": uint64(67), + "cache_scarce": uint64(68), + }, + map[string]string{ + "source": host, + "port": port, + "filter_key": "FI", + "filter_name": "country", + }) + + acc.AssertContainsTaggedFields( + t, + "nginx_vts_server", + map[string]interface{}{ + "requests": uint64(505), + "request_time": uint64(12), + "in_bytes": uint64(171388), + "out_bytes": uint64(1273382), + + "response_1xx_count": uint64(101), + "response_2xx_count": uint64(201), + "response_3xx_count": uint64(301), + "response_4xx_count": uint64(401), + "response_5xx_count": uint64(501), + + "cache_miss": uint64(22), + "cache_bypass": uint64(23), + "cache_expired": uint64(24), + "cache_stale": uint64(25), + "cache_updating": uint64(26), + "cache_revalidated": uint64(27), + "cache_hit": uint64(28), + "cache_scarce": uint64(29), + }, + map[string]string{ + "source": host, + "port": port, + "zone": "other.example.com", + }) + + acc.AssertContainsTaggedFields( + t, + "nginx_vts_upstream", + map[string]interface{}{ + "requests": uint64(2103849), + "request_time": uint64(30), + "response_time": uint64(31), + "in_bytes": uint64(1774680141), + "out_bytes": uint64(11727669190), + + "response_1xx_count": uint64(103), + "response_2xx_count": uint64(203), + "response_3xx_count": uint64(303), + "response_4xx_count": uint64(403), + "response_5xx_count": uint64(503), + + "weight": uint64(32), + "max_fails": uint64(33), + "fail_timeout": uint64(34), + "backup": bool(false), + "down": bool(false), + }, + map[string]string{ + "source": host, + "port": port, + "upstream": "backend_cluster", + "upstream_address": "127.0.0.1:6000", + }) + + acc.AssertContainsTaggedFields( + t, + "nginx_vts_upstream", + map[string]interface{}{ + "requests": uint64(8), + "request_time": uint64(34), + "response_time": uint64(35), + "in_bytes": uint64(5013), + "out_bytes": uint64(487585), + + "response_1xx_count": uint64(104), + "response_2xx_count": uint64(204), + "response_3xx_count": uint64(304), + "response_4xx_count": uint64(404), + "response_5xx_count": uint64(504), + + "weight": uint64(36), + "max_fails": uint64(37), + "fail_timeout": uint64(38), + "backup": bool(true), + "down": bool(false), + }, + map[string]string{ + "source": host, + "port": port, + "upstream": "::nogroups", + "upstream_address": "127.0.0.1:4433", + }) + + acc.AssertContainsTaggedFields( + t, + "nginx_vts_upstream", + map[string]interface{}{ + "requests": uint64(7), + "request_time": uint64(39), + "response_time": uint64(40), + "in_bytes": uint64(2926), + "out_bytes": uint64(3846638), + + "response_1xx_count": uint64(105), + "response_2xx_count": uint64(205), + "response_3xx_count": uint64(305), + "response_4xx_count": uint64(405), + "response_5xx_count": uint64(505), + + "weight": uint64(41), + "max_fails": uint64(42), + "fail_timeout": uint64(43), + "backup": bool(true), + "down": bool(true), + }, + map[string]string{ + "source": host, + "port": port, + "upstream": "::nogroups", + "upstream_address": "127.0.0.1:8080", + }) + + acc.AssertContainsTaggedFields( + t, + "nginx_vts_cache", + map[string]interface{}{ + "max_bytes": uint64(9223372036854776000), + "used_bytes": uint64(68639232), + "in_bytes": uint64(697138673), + "out_bytes": uint64(11305044106), + + "miss": uint64(44), + "bypass": uint64(45), + "expired": uint64(46), + "stale": uint64(47), + "updating": uint64(48), + "revalidated": uint64(49), + "hit": uint64(50), + "scarce": uint64(51), + }, + map[string]string{ + "source": host, + "port": port, + "zone": "example", + }) + + acc.AssertContainsTaggedFields( + t, + "nginx_vts_cache", + map[string]interface{}{ + "max_bytes": uint64(9223372036854776000), + "used_bytes": uint64(569856), + "in_bytes": uint64(551652333), + "out_bytes": uint64(1114889271), + + "miss": uint64(52), + "bypass": uint64(53), + "expired": uint64(54), + "stale": uint64(55), + "updating": uint64(56), + "revalidated": uint64(57), + "hit": uint64(58), + "scarce": uint64(59), + }, + map[string]string{ + "source": host, + "port": port, + "zone": "static", + }) +} From 715a7cc6708a9af3f6284363a9b0a50ad9272011 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 2 Nov 2018 18:20:37 -0700 Subject: [PATCH 0346/1815] Add nginx_vts plugin to changelog and readme --- CHANGELOG.md | 1 + README.md | 1 + 2 files changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9d0e06bf8..021f0d563 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,6 +13,7 @@ - [http_listener_v2](/plugins/inputs/http_listener_v2/README.md) - Contributed by @jul1u5 - [ipvs](/plugins/inputs/ipvs/README.md) - Contributed by @amoghe - [nginx_plus_api](/plugins/inputs/nginx_plus_api/README.md) - Contributed by @Bugagazavr +- [nginx_vts](/plugins/inputs/nginx_vts/README.md) - Contributed by @monder - [wireless](/plugins/inputs/wireless/README.md) - Contributed by @jamesmaidment #### New Outputs diff --git a/README.md b/README.md index 193bfa8d8..6016925b1 100644 --- a/README.md +++ b/README.md @@ -209,6 +209,7 @@ For documentation on the latest development code see the [documentation index][d * [nginx](./plugins/inputs/nginx) * [nginx_plus](./plugins/inputs/nginx_plus) * [nginx_plus_api](./plugins/inputs/nginx_plus_api) +* [nginx_vts](./plugins/inputs/nginx_vts) * [nsq_consumer](./plugins/inputs/nsq_consumer) * [nginx_vts](./plugins/inputs/nginx_vts) * [nsq](./plugins/inputs/nsq) From 19a338b92221fbaa18131ebd81303779b426545e Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 2 Nov 2018 18:23:41 -0700 Subject: [PATCH 0347/1815] Update to source tag in nginx_vts readme --- plugins/inputs/nginx_vts/README.md | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/plugins/inputs/nginx_vts/README.md b/plugins/inputs/nginx_vts/README.md index d4be55139..ac22b7c2d 100644 --- a/plugins/inputs/nginx_vts/README.md +++ b/plugins/inputs/nginx_vts/README.md @@ -74,24 +74,24 @@ For module configuration details please see its [documentation](https://github.c ### Tags: - nginx_vts_connections - - server + - source - port - nginx_vts_server - - server + - source - port - zone - nginx_vts_filter - - server + - source - port - filter_name - filter_key - nginx_vts_upstream - - server + - source - port - upstream - upstream_address - nginx_vts_cache - - server + - source - port - zone @@ -112,10 +112,10 @@ When run with: It produces: ``` -nginx_vts_connections,server=localhost,port=80,host=localhost waiting=30i,accepted=295333i,handled=295333i,requests=6833487i,active=33i,reading=0i,writing=3i 1518341521000000000 -nginx_vts_server,zone=example.com,port=80,host=localhost,server=localhost cache_hit=158915i,in_bytes=1935528964i,out_bytes=6531366419i,response_2xx_count=809994i,response_4xx_count=16664i,cache_bypass=0i,cache_stale=0i,cache_revalidated=0i,requests=2187977i,response_1xx_count=0i,response_3xx_count=1360390i,cache_miss=2249i,cache_updating=0i,cache_scarce=0i,request_time=13i,response_5xx_count=929i,cache_expired=0i 1518341521000000000 -nginx_vts_server,host=localhost,server=localhost,port=80,zone=* requests=6775284i,in_bytes=5003242389i,out_bytes=36858233827i,cache_expired=318881i,cache_updating=0i,request_time=51i,response_1xx_count=0i,response_2xx_count=4385916i,response_4xx_count=83680i,response_5xx_count=1186i,cache_bypass=0i,cache_revalidated=0i,cache_hit=1972222i,cache_scarce=0i,response_3xx_count=2304502i,cache_miss=408251i,cache_stale=0i 1518341521000000000 -nginx_vts_filter,filter_key=FI,filter_name=country,port=80,host=localhost,server=localhost request_time=0i,in_bytes=139701i,response_3xx_count=0i,out_bytes=2644495i,response_1xx_count=0i,cache_expired=0i,cache_scarce=0i,requests=179i,cache_miss=0i,cache_bypass=0i,cache_stale=0i,cache_updating=0i,cache_revalidated=0i,cache_hit=0i,response_2xx_count=177i,response_4xx_count=2i,response_5xx_count=0i 1518341521000000000 -nginx_vts_upstream,port=80,host=localhost,upstream=backend_cluster,upstream_address=127.0.0.1:6000,server=localhost fail_timeout=10i,backup=false,request_time=31i,response_5xx_count=1081i,response_2xx_count=1877498i,max_fails=1i,in_bytes=2763336289i,out_bytes=19470265071i,weight=1i,down=false,response_time=31i,response_1xx_count=0i,response_4xx_count=76125i,requests=3379232i,response_3xx_count=1424528i 1518341521000000000 -nginx_vts_cache,server=localhost,port=80,host=localhost,zone=example stale=0i,used_bytes=64334336i,miss=394573i,bypass=0i,expired=318788i,updating=0i,revalidated=0i,hit=689883i,scarce=0i,max_bytes=9223372036854775296i,in_bytes=1111161581i,out_bytes=19175548290i 1518341521000000000 +nginx_vts_connections,source=localhost,port=80,host=localhost waiting=30i,accepted=295333i,handled=295333i,requests=6833487i,active=33i,reading=0i,writing=3i 1518341521000000000 +nginx_vts_server,zone=example.com,port=80,host=localhost,source=localhost cache_hit=158915i,in_bytes=1935528964i,out_bytes=6531366419i,response_2xx_count=809994i,response_4xx_count=16664i,cache_bypass=0i,cache_stale=0i,cache_revalidated=0i,requests=2187977i,response_1xx_count=0i,response_3xx_count=1360390i,cache_miss=2249i,cache_updating=0i,cache_scarce=0i,request_time=13i,response_5xx_count=929i,cache_expired=0i 1518341521000000000 +nginx_vts_server,host=localhost,source=localhost,port=80,zone=* requests=6775284i,in_bytes=5003242389i,out_bytes=36858233827i,cache_expired=318881i,cache_updating=0i,request_time=51i,response_1xx_count=0i,response_2xx_count=4385916i,response_4xx_count=83680i,response_5xx_count=1186i,cache_bypass=0i,cache_revalidated=0i,cache_hit=1972222i,cache_scarce=0i,response_3xx_count=2304502i,cache_miss=408251i,cache_stale=0i 1518341521000000000 +nginx_vts_filter,filter_key=FI,filter_name=country,port=80,host=localhost,source=localhost request_time=0i,in_bytes=139701i,response_3xx_count=0i,out_bytes=2644495i,response_1xx_count=0i,cache_expired=0i,cache_scarce=0i,requests=179i,cache_miss=0i,cache_bypass=0i,cache_stale=0i,cache_updating=0i,cache_revalidated=0i,cache_hit=0i,response_2xx_count=177i,response_4xx_count=2i,response_5xx_count=0i 1518341521000000000 +nginx_vts_upstream,port=80,host=localhost,upstream=backend_cluster,upstream_address=127.0.0.1:6000,source=localhost fail_timeout=10i,backup=false,request_time=31i,response_5xx_count=1081i,response_2xx_count=1877498i,max_fails=1i,in_bytes=2763336289i,out_bytes=19470265071i,weight=1i,down=false,response_time=31i,response_1xx_count=0i,response_4xx_count=76125i,requests=3379232i,response_3xx_count=1424528i 1518341521000000000 +nginx_vts_cache,source=localhost,port=80,host=localhost,zone=example stale=0i,used_bytes=64334336i,miss=394573i,bypass=0i,expired=318788i,updating=0i,revalidated=0i,hit=689883i,scarce=0i,max_bytes=9223372036854775296i,in_bytes=1111161581i,out_bytes=19175548290i 1518341521000000000 ``` From 9c866553e86a3ddf4d52c4865298715324f5b863 Mon Sep 17 00:00:00 2001 From: Greg <2653109+glinton@users.noreply.github.com> Date: Mon, 5 Nov 2018 14:30:16 -0700 Subject: [PATCH 0348/1815] Add scraping for Prometheus endpoint in Kubernetes (#4920) --- Gopkg.lock | 25 ++- plugins/inputs/prometheus/README.md | 23 +++ plugins/inputs/prometheus/kubernetes.go | 198 +++++++++++++++++++ plugins/inputs/prometheus/kubernetes_test.go | 88 +++++++++ plugins/inputs/prometheus/prometheus.go | 51 ++++- 5 files changed, 381 insertions(+), 4 deletions(-) create mode 100644 plugins/inputs/prometheus/kubernetes.go create mode 100644 plugins/inputs/prometheus/kubernetes_test.go diff --git a/Gopkg.lock b/Gopkg.lock index a93816c1e..7dd2261c0 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -376,6 +376,24 @@ revision = "36d01c2b4cbeb3d2a12063e4880ce30800af9560" version = "v1.1.1" +[[projects]] + digest = "1:99a0607f79d36202b64b674c0464781549917cfc4bfb88037aaa98b31e124a18" + name = "github.com/ericchiang/k8s" + packages = [ + ".", + "apis/apiextensions/v1beta1", + "apis/core/v1", + "apis/meta/v1", + "apis/resource", + "runtime", + "runtime/schema", + "util/intstr", + "watch/versioned", + ] + pruneopts = "" + revision = "d1bbc0cffaf9849ddcae7b9efffae33e2dd52e9a" + version = "v1.2.0" + [[projects]] digest = "1:858b7fe7b0f4bc7ef9953926828f2816ea52d01a88d72d1c45bc8c108f23c356" name = "github.com/go-ini/ini" @@ -1154,7 +1172,7 @@ [[projects]] branch = "master" - digest = "1:b697592485cb412be4188c08ca0beed9aab87f36b86418e21acc4a3998f63734" + digest = "0:" name = "golang.org/x/oauth2" packages = [ ".", @@ -1235,7 +1253,7 @@ revision = "19ff8768a5c0b8e46ea281065664787eefc24121" [[projects]] - digest = "1:c1771ca6060335f9768dff6558108bc5ef6c58506821ad43377ee23ff059e472" + digest = "0:" name = "google.golang.org/appengine" packages = [ ".", @@ -1439,6 +1457,9 @@ "github.com/docker/docker/client", "github.com/docker/libnetwork/ipvs", "github.com/eclipse/paho.mqtt.golang", + "github.com/ericchiang/k8s", + "github.com/ericchiang/k8s/apis/core/v1", + "github.com/ericchiang/k8s/apis/meta/v1", "github.com/go-logfmt/logfmt", "github.com/go-redis/redis", "github.com/go-sql-driver/mysql", diff --git a/plugins/inputs/prometheus/README.md b/plugins/inputs/prometheus/README.md index 294d84150..37265d332 100644 --- a/plugins/inputs/prometheus/README.md +++ b/plugins/inputs/prometheus/README.md @@ -14,6 +14,17 @@ in Prometheus format. ## An array of Kubernetes services to scrape metrics from. # kubernetes_services = ["http://my-service-dns.my-namespace:9100/metrics"] + ## Kubernetes config file to create client from. + # kube_config = "/path/to/kubernetes.config" + + ## Scrape Kubernetes pods for the following prometheus annotations: + ## - prometheus.io/scrape: Enable scraping for this pod + ## - prometheus.io/scheme: If the metrics endpoint is secured then you will need to + ## set this to `https` & most likely set the tls config. + ## - prometheus.io/path: If the metrics path is not /metrics, define it with this annotation. + ## - prometheus.io/port: If port is not 9102 use this annotation + # monitor_kubernetes_pods = true + ## Use bearer token for authorization # bearer_token = /path/to/bearer/token @@ -39,6 +50,18 @@ by looking up all A records assigned to the hostname as described in This method can be used to locate all [Kubernetes headless services](https://kubernetes.io/docs/concepts/services-networking/service/#headless-services). +#### Kubernetes scraping + +Enabling this option will allow the plugin to scrape for prometheus annotation on Kubernetes +pods. Currently, you can run this plugin in your kubernetes cluster, or we use the kubeconfig +file to determine where to monitor. +Currently the following annotation are supported: + +* `prometheus.io/scrape` Enable scraping for this pod. +* `prometheus.io/scheme` If the metrics endpoint is secured then you will need to set this to `https` & most likely set the tls config. (default 'http') +* `prometheus.io/path` Override the path for the metrics endpoint on the service. (default '/metrics') +* `prometheus.io/port` Used to override the port. (default 9102) + #### Bearer Token If set, the file specified by the `bearer_token` parameter will be read on diff --git a/plugins/inputs/prometheus/kubernetes.go b/plugins/inputs/prometheus/kubernetes.go new file mode 100644 index 000000000..4faf2d55e --- /dev/null +++ b/plugins/inputs/prometheus/kubernetes.go @@ -0,0 +1,198 @@ +package prometheus + +import ( + "context" + "fmt" + "io/ioutil" + "log" + "net" + "net/url" + "os/user" + "path/filepath" + "sync" + "time" + + "github.com/ericchiang/k8s" + corev1 "github.com/ericchiang/k8s/apis/core/v1" + "gopkg.in/yaml.v2" +) + +type payload struct { + eventype string + pod *corev1.Pod +} + +// loadClient parses a kubeconfig from a file and returns a Kubernetes +// client. It does not support extensions or client auth providers. +func loadClient(kubeconfigPath string) (*k8s.Client, error) { + data, err := ioutil.ReadFile(kubeconfigPath) + if err != nil { + return nil, fmt.Errorf("failed reading '%s': %v", kubeconfigPath, err) + } + + // Unmarshal YAML into a Kubernetes config object. + var config k8s.Config + if err := yaml.Unmarshal(data, &config); err != nil { + return nil, err + } + return k8s.NewClient(&config) +} + +func (p *Prometheus) start(ctx context.Context) error { + client, err := k8s.NewInClusterClient() + if err != nil { + u, err := user.Current() + if err != nil { + return fmt.Errorf("Failed to get current user - %v", err) + } + configLocation := filepath.Join(u.HomeDir, ".kube/config") + if p.KubeConfig != "" { + configLocation = p.KubeConfig + } + client, err = loadClient(configLocation) + if err != nil { + return err + } + } + + p.wg = sync.WaitGroup{} + + p.wg.Add(1) + go func() { + defer p.wg.Done() + for { + select { + case <-ctx.Done(): + return + case <-time.After(time.Second): + err := p.watch(ctx, client) + if err != nil { + log.Printf("E! [inputs.prometheus] unable to watch resources: %v", err) + } + } + } + }() + + return nil +} + +func (p *Prometheus) watch(ctx context.Context, client *k8s.Client) error { + pod := &corev1.Pod{} + watcher, err := client.Watch(ctx, "", &corev1.Pod{}) + if err != nil { + return err + } + defer watcher.Close() + + for { + select { + case <-ctx.Done(): + return nil + default: + pod = &corev1.Pod{} + // An error here means we need to reconnect the watcher. + eventType, err := watcher.Next(pod) + if err != nil { + return err + } + + switch eventType { + case k8s.EventAdded: + registerPod(pod, p) + case k8s.EventDeleted: + unregisterPod(pod, p) + case k8s.EventModified: + } + } + } +} + +func registerPod(pod *corev1.Pod, p *Prometheus) { + targetURL := getScrapeURL(pod) + if targetURL == nil { + return + } + + log.Printf("D! [inputs.prometheus] will scrape metrics from %s", *targetURL) + // add annotation as metrics tags + tags := pod.GetMetadata().GetAnnotations() + tags["pod_name"] = pod.GetMetadata().GetName() + tags["namespace"] = pod.GetMetadata().GetNamespace() + // add labels as metrics tags + for k, v := range pod.GetMetadata().GetLabels() { + tags[k] = v + } + URL, err := url.Parse(*targetURL) + if err != nil { + log.Printf("E! [inputs.prometheus] could not parse URL %s: %v", *targetURL, err) + return + } + podURL := p.AddressToURL(URL, URL.Hostname()) + p.lock.Lock() + p.kubernetesPods = append(p.kubernetesPods, + URLAndAddress{ + URL: podURL, + Address: URL.Hostname(), + OriginalURL: URL, + Tags: tags}) + p.lock.Unlock() +} + +func getScrapeURL(pod *corev1.Pod) *string { + scrape := pod.GetMetadata().GetAnnotations()["prometheus.io/scrape"] + if scrape != "true" { + return nil + } + ip := pod.Status.GetPodIP() + if ip == "" { + // return as if scrape was disabled, we will be notified again once the pod + // has an IP + return nil + } + + scheme := pod.GetMetadata().GetAnnotations()["prometheus.io/scheme"] + path := pod.GetMetadata().GetAnnotations()["prometheus.io/path"] + port := pod.GetMetadata().GetAnnotations()["prometheus.io/port"] + + if scheme == "" { + scheme = "http" + } + if port == "" { + port = "9102" + } + if path == "" { + path = "/metrics" + } + + u := &url.URL{ + Scheme: scheme, + Host: net.JoinHostPort(ip, port), + Path: path, + } + + x := u.String() + + return &x +} + +func unregisterPod(pod *corev1.Pod, p *Prometheus) { + url := getScrapeURL(pod) + if url == nil { + return + } + + p.lock.Lock() + defer p.lock.Unlock() + log.Printf("D! [inputs.prometheus] registered a delete request for %s in namespace %s", + pod.GetMetadata().GetName(), pod.GetMetadata().GetNamespace()) + var result []URLAndAddress + for _, v := range p.kubernetesPods { + if v.URL.String() != *url { + result = append(result, v) + } else { + log.Printf("D! [inputs.prometheus] will stop scraping for %s", *url) + } + + } + p.kubernetesPods = result +} diff --git a/plugins/inputs/prometheus/kubernetes_test.go b/plugins/inputs/prometheus/kubernetes_test.go new file mode 100644 index 000000000..2afdbc5ec --- /dev/null +++ b/plugins/inputs/prometheus/kubernetes_test.go @@ -0,0 +1,88 @@ +package prometheus + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + v1 "github.com/ericchiang/k8s/apis/core/v1" + metav1 "github.com/ericchiang/k8s/apis/meta/v1" +) + +func TestScrapeURLNoAnnotations(t *testing.T) { + p := &v1.Pod{Metadata: &metav1.ObjectMeta{}} + p.GetMetadata().Annotations = map[string]string{} + url := getScrapeURL(p) + assert.Nil(t, url) +} +func TestScrapeURLAnnotationsNoScrape(t *testing.T) { + p := &v1.Pod{Metadata: &metav1.ObjectMeta{}} + p.Metadata.Name = str("myPod") + p.Metadata.Annotations = map[string]string{"prometheus.io/scrape": "false"} + url := getScrapeURL(p) + assert.Nil(t, url) +} +func TestScrapeURLAnnotations(t *testing.T) { + p := pod() + p.Metadata.Annotations = map[string]string{"prometheus.io/scrape": "true"} + url := getScrapeURL(p) + assert.Equal(t, "http://127.0.0.1:9102/metrics", *url) +} +func TestScrapeURLAnnotationsCustomPort(t *testing.T) { + p := pod() + p.Metadata.Annotations = map[string]string{"prometheus.io/scrape": "true", "prometheus.io/port": "9000"} + url := getScrapeURL(p) + assert.Equal(t, "http://127.0.0.1:9000/metrics", *url) +} +func TestScrapeURLAnnotationsCustomPath(t *testing.T) { + p := pod() + p.Metadata.Annotations = map[string]string{"prometheus.io/scrape": "true", "prometheus.io/path": "mymetrics"} + url := getScrapeURL(p) + assert.Equal(t, "http://127.0.0.1:9102/mymetrics", *url) +} + +func TestScrapeURLAnnotationsCustomPathWithSep(t *testing.T) { + p := pod() + p.Metadata.Annotations = map[string]string{"prometheus.io/scrape": "true", "prometheus.io/path": "/mymetrics"} + url := getScrapeURL(p) + assert.Equal(t, "http://127.0.0.1:9102/mymetrics", *url) +} + +func TestAddPod(t *testing.T) { + prom := &Prometheus{} + p := pod() + p.Metadata.Annotations = map[string]string{"prometheus.io/scrape": "true"} + registerPod(p, prom) + assert.Equal(t, 1, len(prom.kubernetesPods)) +} +func TestAddMultiplePods(t *testing.T) { + prom := &Prometheus{} + + p := pod() + p.Metadata.Annotations = map[string]string{"prometheus.io/scrape": "true"} + registerPod(p, prom) + p.Metadata.Name = str("Pod2") + registerPod(p, prom) + assert.Equal(t, 2, len(prom.kubernetesPods)) +} +func TestDeletePods(t *testing.T) { + prom := &Prometheus{} + + p := pod() + p.Metadata.Annotations = map[string]string{"prometheus.io/scrape": "true"} + registerPod(p, prom) + unregisterPod(p, prom) + assert.Equal(t, 0, len(prom.kubernetesPods)) +} + +func pod() *v1.Pod { + p := &v1.Pod{Metadata: &metav1.ObjectMeta{}, Status: &v1.PodStatus{}} + p.Status.PodIP = str("127.0.0.1") + p.Metadata.Name = str("myPod") + p.Metadata.Namespace = str("default") + return p +} + +func str(x string) *string { + return &x +} diff --git a/plugins/inputs/prometheus/prometheus.go b/plugins/inputs/prometheus/prometheus.go index b8e346032..84fc31800 100644 --- a/plugins/inputs/prometheus/prometheus.go +++ b/plugins/inputs/prometheus/prometheus.go @@ -1,6 +1,7 @@ package prometheus import ( + "context" "errors" "fmt" "io/ioutil" @@ -26,6 +27,9 @@ type Prometheus struct { // An array of Kubernetes services to scrape metrics from. KubernetesServices []string + // Location of kubernetes config file + KubeConfig string + // Bearer Token authorization file path BearerToken string `toml:"bearer_token"` @@ -34,6 +38,13 @@ type Prometheus struct { tls.ClientConfig client *http.Client + + // Should we scrape Kubernetes services for prometheus annotations + MonitorPods bool `toml:"monitor_kubernetes_pods"` + lock sync.Mutex + kubernetesPods []URLAndAddress + cancel context.CancelFunc + wg sync.WaitGroup } var sampleConfig = ` @@ -43,6 +54,17 @@ var sampleConfig = ` ## An array of Kubernetes services to scrape metrics from. # kubernetes_services = ["http://my-service-dns.my-namespace:9100/metrics"] + ## Kubernetes config file to create client from. + # kube_config = "/path/to/kubernetes.config" + + ## Scrape Kubernetes pods for the following prometheus annotations: + ## - prometheus.io/scrape: Enable scraping for this pod + ## - prometheus.io/scheme: If the metrics endpoint is secured then you will need to + ## set this to 'https' & most likely set the tls config. + ## - prometheus.io/path: If the metrics path is not /metrics, define it with this annotation. + ## - prometheus.io/port: If port is not 9102 use this annotation + # monitor_kubernetes_pods = true + ## Use bearer token for authorization # bearer_token = /path/to/bearer/token @@ -90,6 +112,7 @@ type URLAndAddress struct { OriginalURL *url.URL URL *url.URL Address string + Tags map[string]string } func (p *Prometheus) GetAllURLs() ([]URLAndAddress, error) { @@ -97,20 +120,26 @@ func (p *Prometheus) GetAllURLs() ([]URLAndAddress, error) { for _, u := range p.URLs { URL, err := url.Parse(u) if err != nil { - log.Printf("prometheus: Could not parse %s, skipping it. Error: %s", u, err) + log.Printf("prometheus: Could not parse %s, skipping it. Error: %s", u, err.Error()) continue } allURLs = append(allURLs, URLAndAddress{URL: URL, OriginalURL: URL}) } + p.lock.Lock() + defer p.lock.Unlock() + // loop through all pods scraped via the prometheus annotation on the pods + allURLs = append(allURLs, p.kubernetesPods...) + for _, service := range p.KubernetesServices { URL, err := url.Parse(service) if err != nil { return nil, err } + resolvedAddresses, err := net.LookupHost(URL.Hostname()) if err != nil { - log.Printf("prometheus: Could not resolve %s, skipping it. Error: %s", URL.Host, err) + log.Printf("prometheus: Could not resolve %s, skipping it. Error: %s", URL.Host, err.Error()) continue } for _, resolved := range resolvedAddresses { @@ -244,6 +273,9 @@ func (p *Prometheus) gatherURL(u URLAndAddress, acc telegraf.Accumulator) error if u.Address != "" { tags["address"] = u.Address } + for k, v := range u.Tags { + tags[k] = v + } switch metric.Type() { case telegraf.Counter: @@ -262,6 +294,21 @@ func (p *Prometheus) gatherURL(u URLAndAddress, acc telegraf.Accumulator) error return nil } +// Start will start the Kubernetes scraping if enabled in the configuration +func (p *Prometheus) Start(a telegraf.Accumulator) error { + if p.MonitorPods { + var ctx context.Context + ctx, p.cancel = context.WithCancel(context.Background()) + return p.start(ctx) + } + return nil +} + +func (p *Prometheus) Stop() { + p.cancel() + p.wg.Wait() +} + func init() { inputs.Add("prometheus", func() telegraf.Input { return &Prometheus{ResponseTimeout: internal.Duration{Duration: time.Second * 3}} From 6120c65a5a1171a2ec011ce7d770efe76bae0f97 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 5 Nov 2018 13:31:39 -0800 Subject: [PATCH 0349/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 021f0d563..3bf8e0631 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -40,6 +40,7 @@ - [#3847](https://github.com/influxdata/telegraf/pull/3847): Add wireless input plugin. - [#4934](https://github.com/influxdata/telegraf/pull/4934): Add LUN to datasource translation in vsphere input. - [#4798](https://github.com/influxdata/telegraf/pull/4798): Allow connecting to prometheus via unix socket. +- [#4920](https://github.com/influxdata/telegraf/pull/4920): Add scraping for Prometheus endpoint in Kubernetes. #### Bugfixes From 74667cd68157aa8fe20c98e292526ae12e3a398a Mon Sep 17 00:00:00 2001 From: Greg <2653109+glinton@users.noreply.github.com> Date: Mon, 5 Nov 2018 14:33:02 -0700 Subject: [PATCH 0350/1815] Fix toml struct tag in win_service #4811 (#4936) --- plugins/inputs/procstat/procstat.go | 3 +-- plugins/inputs/procstat/win_service_windows.go | 1 - 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/plugins/inputs/procstat/procstat.go b/plugins/inputs/procstat/procstat.go index d9c1ee7b6..0a877b162 100644 --- a/plugins/inputs/procstat/procstat.go +++ b/plugins/inputs/procstat/procstat.go @@ -32,7 +32,7 @@ type Procstat struct { SystemdUnit string CGroup string `toml:"cgroup"` PidTag bool - WinService string `tom:"win_service"` + WinService string `toml:"win_service"` finder PIDFinder @@ -281,7 +281,6 @@ func (p *Procstat) updateProcesses(acc telegraf.Accumulator, prevInfo map[PID]Pr // Create and return PIDGatherer lazily func (p *Procstat) getPIDFinder() (PIDFinder, error) { - if p.finder == nil { f, err := p.createPIDFinder() if err != nil { diff --git a/plugins/inputs/procstat/win_service_windows.go b/plugins/inputs/procstat/win_service_windows.go index 70a542263..06dffc847 100644 --- a/plugins/inputs/procstat/win_service_windows.go +++ b/plugins/inputs/procstat/win_service_windows.go @@ -25,7 +25,6 @@ func getService(name string) (*mgr.Service, error) { } func queryPidWithWinServiceName(winServiceName string) (uint32, error) { - srv, err := getService(winServiceName) if err != nil { return 0, err From 6e5c2f8bb6cc0f6888aa8d0707e1a32274e50c7c Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 5 Nov 2018 13:34:28 -0800 Subject: [PATCH 0351/1815] Remove outputs blocking inputs when output is slow (#4938) --- CONTRIBUTING.md | 500 +--------- accumulator.go | 52 +- agent/accumulator.go | 70 +- agent/agent.go | 898 +++++++++++------- agent/tick.go | 57 ++ cmd/telegraf/telegraf.go | 177 ++-- docs/AGGREGATORS.md | 126 +++ docs/CONFIGURATION.md | 8 + docs/INPUTS.md | 143 +++ docs/OUTPUTS.md | 95 ++ docs/PROCESSORS.md | 63 ++ input.go | 13 +- internal/buffer/buffer.go | 130 --- internal/buffer/buffer_test.go | 203 ---- internal/config/config.go | 61 +- internal/internal.go | 46 + internal/internal_test.go | 53 ++ internal/models/buffer.go | 214 +++++ internal/models/buffer_test.go | 385 ++++++++ internal/models/filter_test.go | 43 + internal/models/running_aggregator.go | 150 +-- internal/models/running_aggregator_test.go | 112 +-- internal/models/running_input.go | 41 +- internal/models/running_input_test.go | 15 +- internal/models/running_output.go | 180 ++-- internal/models/running_output_test.go | 50 - internal/models/running_processor.go | 14 + internal/models/running_processor_test.go | 28 +- metric.go | 11 + metric/metric.go | 9 + metric/tracking.go | 171 ++++ metric/tracking_test.go | 260 +++++ output.go | 29 +- plugins/aggregators/basicstats/basicstats.go | 1 - plugins/inputs/amqp_consumer/README.md | 15 +- plugins/inputs/amqp_consumer/amqp_consumer.go | 166 +++- plugins/inputs/internal/README.md | 64 +- plugins/inputs/kafka_consumer/README.md | 37 +- .../inputs/kafka_consumer/kafka_consumer.go | 219 +++-- .../kafka_consumer_integration_test.go | 1 - .../kafka_consumer/kafka_consumer_test.go | 110 ++- plugins/inputs/mqtt_consumer/README.md | 20 +- plugins/inputs/mqtt_consumer/mqtt_consumer.go | 103 +- .../mqtt_consumer/mqtt_consumer_test.go | 48 +- plugins/inputs/nats_consumer/README.md | 36 +- plugins/inputs/nats_consumer/nats_consumer.go | 145 +-- .../nats_consumer/nats_consumer_test.go | 134 --- plugins/inputs/nsq_consumer/README.md | 20 +- plugins/inputs/nsq_consumer/nsq_consumer.go | 139 ++- .../inputs/nsq_consumer/nsq_consumer_test.go | 11 +- .../inputs/socket_listener/socket_listener.go | 8 +- plugins/outputs/discard/discard.go | 12 +- .../prometheus_client/prometheus_client.go | 11 +- .../prometheus_client_test.go | 4 +- plugins/processors/topk/topk.go | 22 +- plugins/processors/topk/topk_test.go | 4 +- processor.go | 2 +- testutil/accumulator.go | 49 +- testutil/metric.go | 16 +- 59 files changed, 3615 insertions(+), 2189 deletions(-) create mode 100644 agent/tick.go create mode 100644 docs/AGGREGATORS.md create mode 100644 docs/INPUTS.md create mode 100644 docs/OUTPUTS.md create mode 100644 docs/PROCESSORS.md delete mode 100644 internal/buffer/buffer.go delete mode 100644 internal/buffer/buffer_test.go create mode 100644 internal/models/buffer.go create mode 100644 internal/models/buffer_test.go create mode 100644 metric/tracking.go create mode 100644 metric/tracking_test.go delete mode 100644 plugins/inputs/nats_consumer/nats_consumer_test.go diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 4bc7daf71..0015cd5eb 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,489 +1,52 @@ -## Steps for Contributing: +### Contributing -1. [Sign the CLA](http://influxdb.com/community/cla.html) -1. Make changes or write plugin (see below for details) -1. Add your plugin to one of: `plugins/{inputs,outputs,aggregators,processors}/all/all.go` -1. If your plugin requires a new Go package, -[add it](https://github.com/influxdata/telegraf/blob/master/CONTRIBUTING.md#adding-a-dependency) -1. Write a README for your plugin, if it's an input plugin, it should be structured -like the [input example here](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/EXAMPLE_README.md). -Output plugins READMEs are less structured, -but any information you can provide on how the data will look is appreciated. -See the [OpenTSDB output](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/opentsdb) -for a good example. -1. **Optional:** Help users of your plugin by including example queries for populating dashboards. Include these sample queries in the `README.md` for the plugin. -1. **Optional:** Write a [tickscript](https://docs.influxdata.com/kapacitor/v1.0/tick/syntax/) for your plugin and add it to [Kapacitor](https://github.com/influxdata/kapacitor/tree/master/examples/telegraf). +1. [Sign the CLA][cla]. +1. Open a [new issue][] to discuss the changes you would like to make. This is + not strictly required but it may help reduce the amount of rework you need + to do later. +1. Make changes or write plugin using the guidelines in the following + documents: + - [Input Plugins][inputs] + - [Processor Plugins][processors] + - [Aggregator Plugins][aggregators] + - [Output Plugins][outputs] +1. Ensure you have added proper unit tests and documentation. +1. Open a new [pull request][]. -## GoDoc +### GoDoc Public interfaces for inputs, outputs, processors, aggregators, metrics, -and the accumulator can be found on the GoDoc +and the accumulator can be found in the GoDoc: [![GoDoc](https://godoc.org/github.com/influxdata/telegraf?status.svg)](https://godoc.org/github.com/influxdata/telegraf) -## Sign the CLA +### Common development tasks -Before we can merge a pull request, you will need to sign the CLA, -which can be found [on our website](http://influxdb.com/community/cla.html) - -## Adding a dependency +**Adding a dependency:** Assuming you can already build the project, run these in the telegraf directory: 1. `dep ensure -vendor-only` 2. `dep ensure -add github.com/[dependency]/[new-package]` -## Input Plugins - -This section is for developers who want to create new collection inputs. -Telegraf is entirely plugin driven. This interface allows for operators to -pick and chose what is gathered and makes it easy for developers -to create new ways of generating metrics. - -Plugin authorship is kept as simple as possible to promote people to develop -and submit new inputs. - -### Input Plugin Guidelines - -* A plugin must conform to the [`telegraf.Input`](https://godoc.org/github.com/influxdata/telegraf#Input) interface. -* Input Plugins should call `inputs.Add` in their `init` function to register themselves. -See below for a quick example. -* Input Plugins must be added to the -`github.com/influxdata/telegraf/plugins/inputs/all/all.go` file. -* The `SampleConfig` function should return valid toml that describes how the -plugin can be configured. This is included in `telegraf config`. Please -consult the [SampleConfig](https://github.com/influxdata/telegraf/wiki/SampleConfig) -page for the latest style guidelines. -* The `Description` function should say in one line what this plugin does. - -Let's say you've written a plugin that emits metrics about processes on the -current host. - -### Input Plugin Example - -```go -package simple - -// simple.go - -import ( - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/plugins/inputs" -) - -type Simple struct { - Ok bool -} - -func (s *Simple) Description() string { - return "a demo plugin" -} - -func (s *Simple) SampleConfig() string { - return ` - ## Indicate if everything is fine - ok = true -` -} - -func (s *Simple) Gather(acc telegraf.Accumulator) error { - if s.Ok { - acc.AddFields("state", map[string]interface{}{"value": "pretty good"}, nil) - } else { - acc.AddFields("state", map[string]interface{}{"value": "not great"}, nil) - } - - return nil -} - -func init() { - inputs.Add("simple", func() telegraf.Input { return &Simple{} }) -} -``` - -### Input Plugin Development - -* Run `make static` followed by `make plugin-[pluginName]` to spin up a docker dev environment -using docker-compose. -* ***[Optional]*** When developing a plugin, add a `dev` directory with a `docker-compose.yml` and `telegraf.conf` -as well as any other supporting files, where sensible. - -## Adding Typed Metrics - -In addition the the `AddFields` function, the accumulator also supports an -`AddGauge` and `AddCounter` function. These functions are for adding _typed_ -metrics. Metric types are ignored for the InfluxDB output, but can be used -for other outputs, such as [prometheus](https://prometheus.io/docs/concepts/metric_types/). - -## Input Plugins Accepting Arbitrary Data Formats - -Some input plugins (such as -[exec](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/exec)) -accept arbitrary input data formats. An overview of these data formats can -be found -[here](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md). - -In order to enable this, you must specify a `SetParser(parser parsers.Parser)` -function on the plugin object (see the exec plugin for an example), as well as -defining `parser` as a field of the object. - -You can then utilize the parser internally in your plugin, parsing data as you -see fit. Telegraf's configuration layer will take care of instantiating and -creating the `Parser` object. - -You should also add the following to your SampleConfig() return: - -```toml - ## Data format to consume. - ## Each data format has its own unique set of configuration options, read - ## more about them here: - ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md - data_format = "influx" -``` - -Below is the `Parser` interface. - -```go -// Parser is an interface defining functions that a parser plugin must satisfy. -type Parser interface { - // Parse takes a byte buffer separated by newlines - // ie, `cpu.usage.idle 90\ncpu.usage.busy 10` - // and parses it into telegraf metrics - Parse(buf []byte) ([]telegraf.Metric, error) - - // ParseLine takes a single string metric - // ie, "cpu.usage.idle 90" - // and parses it into a telegraf metric. - ParseLine(line string) (telegraf.Metric, error) -} -``` - -And you can view the code -[here.](https://github.com/influxdata/telegraf/blob/henrypfhu-master/plugins/parsers/registry.go) - -## Service Input Plugins - -This section is for developers who want to create new "service" collection -inputs. A service plugin differs from a regular plugin in that it operates -a background service while Telegraf is running. One example would be the `statsd` -plugin, which operates a statsd server. - -Service Input Plugins are substantially more complicated than a regular plugin, as they -will require threads and locks to verify data integrity. Service Input Plugins should -be avoided unless there is no way to create their behavior with a regular plugin. - -Their interface is quite similar to a regular plugin, with the addition of `Start()` -and `Stop()` methods. - -### Service Plugin Guidelines - -* Same as the `Plugin` guidelines, except that they must conform to the -[`telegraf.ServiceInput`](https://godoc.org/github.com/influxdata/telegraf#ServiceInput) interface. - -## Output Plugins - -This section is for developers who want to create a new output sink. Outputs -are created in a similar manner as collection plugins, and their interface has -similar constructs. - -### Output Plugin Guidelines - -* An output must conform to the [`telegraf.Output`](https://godoc.org/github.com/influxdata/telegraf#Output) interface. -* Outputs should call `outputs.Add` in their `init` function to register themselves. -See below for a quick example. -* To be available within Telegraf itself, plugins must add themselves to the -`github.com/influxdata/telegraf/plugins/outputs/all/all.go` file. -* The `SampleConfig` function should return valid toml that describes how the -plugin can be configured. This is included in `telegraf config`. Please -consult the [SampleConfig](https://github.com/influxdata/telegraf/wiki/SampleConfig) -page for the latest style guidelines. -* The `Description` function should say in one line what this output does. - -### Output Example - -```go -package simpleoutput - -// simpleoutput.go - -import ( - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/plugins/outputs" -) - -type Simple struct { - Ok bool -} - -func (s *Simple) Description() string { - return "a demo output" -} - -func (s *Simple) SampleConfig() string { - return ` - ok = true -` -} - -func (s *Simple) Connect() error { - // Make a connection to the URL here - return nil -} - -func (s *Simple) Close() error { - // Close connection to the URL here - return nil -} - -func (s *Simple) Write(metrics []telegraf.Metric) error { - for _, metric := range metrics { - // write `metric` to the output sink here - } - return nil -} - -func init() { - outputs.Add("simpleoutput", func() telegraf.Output { return &Simple{} }) -} - -``` - -## Output Plugins Writing Arbitrary Data Formats - -Some output plugins (such as -[file](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/file)) -can write arbitrary output data formats. An overview of these data formats can -be found -[here](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md). - -In order to enable this, you must specify a -`SetSerializer(serializer serializers.Serializer)` -function on the plugin object (see the file plugin for an example), as well as -defining `serializer` as a field of the object. - -You can then utilize the serializer internally in your plugin, serializing data -before it's written. Telegraf's configuration layer will take care of -instantiating and creating the `Serializer` object. - -You should also add the following to your SampleConfig() return: - -```toml - ## Data format to output. - ## Each data format has its own unique set of configuration options, read - ## more about them here: - ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md - data_format = "influx" -``` - -## Service Output Plugins - -This section is for developers who want to create new "service" output. A -service output differs from a regular output in that it operates a background service -while Telegraf is running. One example would be the `prometheus_client` output, -which operates an HTTP server. - -Their interface is quite similar to a regular output, with the addition of `Start()` -and `Stop()` methods. - -### Service Output Guidelines - -* Same as the `Output` guidelines, except that they must conform to the -`output.ServiceOutput` interface. - -## Processor Plugins - -This section is for developers who want to create a new processor plugin. - -### Processor Plugin Guidelines - -* A processor must conform to the [`telegraf.Processor`](https://godoc.org/github.com/influxdata/telegraf#Processor) interface. -* Processors should call `processors.Add` in their `init` function to register themselves. -See below for a quick example. -* To be available within Telegraf itself, plugins must add themselves to the -`github.com/influxdata/telegraf/plugins/processors/all/all.go` file. -* The `SampleConfig` function should return valid toml that describes how the -processor can be configured. This is include in the output of `telegraf config`. -* The `Description` function should say in one line what this processor does. - -### Processor Example - -```go -package printer - -// printer.go - -import ( - "fmt" - - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/plugins/processors" -) - -type Printer struct { -} - -var sampleConfig = ` -` - -func (p *Printer) SampleConfig() string { - return sampleConfig -} - -func (p *Printer) Description() string { - return "Print all metrics that pass through this filter." -} - -func (p *Printer) Apply(in ...telegraf.Metric) []telegraf.Metric { - for _, metric := range in { - fmt.Println(metric.String()) - } - return in -} - -func init() { - processors.Add("printer", func() telegraf.Processor { - return &Printer{} - }) -} -``` - -## Aggregator Plugins - -This section is for developers who want to create a new aggregator plugin. - -### Aggregator Plugin Guidelines - -* A aggregator must conform to the [`telegraf.Aggregator`](https://godoc.org/github.com/influxdata/telegraf#Aggregator) interface. -* Aggregators should call `aggregators.Add` in their `init` function to register themselves. -See below for a quick example. -* To be available within Telegraf itself, plugins must add themselves to the -`github.com/influxdata/telegraf/plugins/aggregators/all/all.go` file. -* The `SampleConfig` function should return valid toml that describes how the -aggregator can be configured. This is include in `telegraf config`. -* The `Description` function should say in one line what this aggregator does. -* The Aggregator plugin will need to keep caches of metrics that have passed -through it. This should be done using the builtin `HashID()` function of each -metric. -* When the `Reset()` function is called, all caches should be cleared. - -### Aggregator Example - -```go -package min - -// min.go - -import ( - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/plugins/aggregators" -) - -type Min struct { - // caches for metric fields, names, and tags - fieldCache map[uint64]map[string]float64 - nameCache map[uint64]string - tagCache map[uint64]map[string]string -} - -func NewMin() telegraf.Aggregator { - m := &Min{} - m.Reset() - return m -} - -var sampleConfig = ` - ## period is the flush & clear interval of the aggregator. - period = "30s" - ## If true drop_original will drop the original metrics and - ## only send aggregates. - drop_original = false -` - -func (m *Min) SampleConfig() string { - return sampleConfig -} - -func (m *Min) Description() string { - return "Keep the aggregate min of each metric passing through." -} - -func (m *Min) Add(in telegraf.Metric) { - id := in.HashID() - if _, ok := m.nameCache[id]; !ok { - // hit an uncached metric, create caches for first time: - m.nameCache[id] = in.Name() - m.tagCache[id] = in.Tags() - m.fieldCache[id] = make(map[string]float64) - for k, v := range in.Fields() { - if fv, ok := convert(v); ok { - m.fieldCache[id][k] = fv - } - } - } else { - for k, v := range in.Fields() { - if fv, ok := convert(v); ok { - if _, ok := m.fieldCache[id][k]; !ok { - // hit an uncached field of a cached metric - m.fieldCache[id][k] = fv - continue - } - if fv < m.fieldCache[id][k] { - // set new minimum - m.fieldCache[id][k] = fv - } - } - } - } -} - -func (m *Min) Push(acc telegraf.Accumulator) { - for id, _ := range m.nameCache { - fields := map[string]interface{}{} - for k, v := range m.fieldCache[id] { - fields[k+"_min"] = v - } - acc.AddFields(m.nameCache[id], fields, m.tagCache[id]) - } -} - -func (m *Min) Reset() { - m.fieldCache = make(map[uint64]map[string]float64) - m.nameCache = make(map[uint64]string) - m.tagCache = make(map[uint64]map[string]string) -} - -func convert(in interface{}) (float64, bool) { - switch v := in.(type) { - case float64: - return v, true - case int64: - return float64(v), true - default: - return 0, false - } -} - -func init() { - aggregators.Add("min", func() telegraf.Aggregator { - return NewMin() - }) -} -``` - -## Unit Tests +**Unit Tests:** Before opening a pull request you should run the linter checks and the short tests. -### Execute linter +**Run static analysis:** -execute `make check` +``` +make check +``` -### Execute short tests +**Run short tests:** -execute `make test` +``` +make test +``` -### Execute integration tests +**Execute integration tests:** Running the integration tests requires several docker containers to be running. You can start the containers with: @@ -497,3 +60,12 @@ make test-all ``` Use `make docker-kill` to stop the containers. + + +[cla]: https://www.influxdata.com/legal/cla/ +[new issue]: https://github.com/influxdata/telegraf/issues/new/choose +[pull request]: https://github.com/influxdata/telegraf/compare +[inputs]: /docs/INPUTS.md +[processors]: /docs/PROCESSORS.md +[aggregators]: /docs/AGGREGATORS.md +[outputs]: /docs/OUTPUTS.md diff --git a/accumulator.go b/accumulator.go index 370f0c70c..825455c4c 100644 --- a/accumulator.go +++ b/accumulator.go @@ -1,16 +1,14 @@ package telegraf -import "time" +import ( + "time" +) -// Accumulator is an interface for "accumulating" metrics from plugin(s). -// The metrics are sent down a channel shared between all plugins. +// Accumulator allows adding metrics to the processing flow. type Accumulator interface { // AddFields adds a metric to the accumulator with the given measurement // name, fields, and tags (and timestamp). If a timestamp is not provided, // then the accumulator sets it to "now". - // Create a point with a value, decorating it with tags - // NOTE: tags is expected to be owned by the caller, don't mutate - // it after passing to Add. AddFields(measurement string, fields map[string]interface{}, tags map[string]string, @@ -40,7 +38,49 @@ type Accumulator interface { tags map[string]string, t ...time.Time) + // AddMetric adds an metric to the accumulator. + AddMetric(Metric) + + // SetPrecision takes two time.Duration objects. If the first is non-zero, + // it sets that as the precision. Otherwise, it takes the second argument + // as the order of time that the metrics should be rounded to, with the + // maximum being 1s. SetPrecision(precision, interval time.Duration) + // Report an error. AddError(err error) + + // Upgrade to a TrackingAccumulator with space for maxTracked + // metrics/batches. + WithTracking(maxTracked int) TrackingAccumulator +} + +// TrackingID uniquely identifies a tracked metric group +type TrackingID uint64 + +// DeliveryInfo provides the results of a delivered metric group. +type DeliveryInfo interface { + // ID is the TrackingID + ID() TrackingID + + // Delivered returns true if the metric was processed successfully. + Delivered() bool +} + +// TrackingAccumulator is an Accumulator that provides a signal when the +// metric has been fully processed. Sending more metrics than the accumulator +// has been allocated for without reading status from the Accepted or Rejected +// channels is an error. +type TrackingAccumulator interface { + Accumulator + + // Add the Metric and arrange for tracking feedback after processing.. + AddTrackingMetric(m Metric) TrackingID + + // Add a group of Metrics and arrange for a signal when the group has been + // processed. + AddTrackingMetricGroup(group []Metric) TrackingID + + // Delivered returns a channel that will contain the tracking results. + Delivered() <-chan DeliveryInfo } diff --git a/agent/accumulator.go b/agent/accumulator.go index 05e99350b..c29b521e9 100644 --- a/agent/accumulator.go +++ b/agent/accumulator.go @@ -20,13 +20,13 @@ type MetricMaker interface { type accumulator struct { maker MetricMaker - metrics chan telegraf.Metric + metrics chan<- telegraf.Metric precision time.Duration } func NewAccumulator( maker MetricMaker, - metrics chan telegraf.Metric, + metrics chan<- telegraf.Metric, ) telegraf.Accumulator { acc := accumulator{ maker: maker, @@ -42,7 +42,7 @@ func (ac *accumulator) AddFields( tags map[string]string, t ...time.Time, ) { - ac.addMetric(measurement, tags, fields, telegraf.Untyped, t...) + ac.addFields(measurement, tags, fields, telegraf.Untyped, t...) } func (ac *accumulator) AddGauge( @@ -51,7 +51,7 @@ func (ac *accumulator) AddGauge( tags map[string]string, t ...time.Time, ) { - ac.addMetric(measurement, tags, fields, telegraf.Gauge, t...) + ac.addFields(measurement, tags, fields, telegraf.Gauge, t...) } func (ac *accumulator) AddCounter( @@ -60,7 +60,7 @@ func (ac *accumulator) AddCounter( tags map[string]string, t ...time.Time, ) { - ac.addMetric(measurement, tags, fields, telegraf.Counter, t...) + ac.addFields(measurement, tags, fields, telegraf.Counter, t...) } func (ac *accumulator) AddSummary( @@ -69,7 +69,7 @@ func (ac *accumulator) AddSummary( tags map[string]string, t ...time.Time, ) { - ac.addMetric(measurement, tags, fields, telegraf.Summary, t...) + ac.addFields(measurement, tags, fields, telegraf.Summary, t...) } func (ac *accumulator) AddHistogram( @@ -78,10 +78,16 @@ func (ac *accumulator) AddHistogram( tags map[string]string, t ...time.Time, ) { - ac.addMetric(measurement, tags, fields, telegraf.Histogram, t...) + ac.addFields(measurement, tags, fields, telegraf.Histogram, t...) } -func (ac *accumulator) addMetric( +func (ac *accumulator) AddMetric(m telegraf.Metric) { + if m := ac.maker.MakeMetric(m); m != nil { + ac.metrics <- m + } +} + +func (ac *accumulator) addFields( measurement string, tags map[string]string, fields map[string]interface{}, @@ -104,13 +110,9 @@ func (ac *accumulator) AddError(err error) { return } NErrors.Incr(1) - log.Printf("E! Error in plugin [%s]: %s", ac.maker.Name(), err) + log.Printf("E! [%s]: Error in plugin: %v", ac.maker.Name(), err) } -// SetPrecision takes two time.Duration objects. If the first is non-zero, -// it sets that as the precision. Otherwise, it takes the second argument -// as the order of time that the metrics should be rounded to, with the -// maximum being 1s. func (ac *accumulator) SetPrecision(precision, interval time.Duration) { if precision > 0 { ac.precision = precision @@ -128,7 +130,7 @@ func (ac *accumulator) SetPrecision(precision, interval time.Duration) { } } -func (ac accumulator) getTime(t []time.Time) time.Time { +func (ac *accumulator) getTime(t []time.Time) time.Time { var timestamp time.Time if len(t) > 0 { timestamp = t[0] @@ -137,3 +139,43 @@ func (ac accumulator) getTime(t []time.Time) time.Time { } return timestamp.Round(ac.precision) } + +func (ac *accumulator) WithTracking(maxTracked int) telegraf.TrackingAccumulator { + return &trackingAccumulator{ + Accumulator: ac, + delivered: make(chan telegraf.DeliveryInfo, maxTracked), + } +} + +type trackingAccumulator struct { + telegraf.Accumulator + delivered chan telegraf.DeliveryInfo +} + +func (a *trackingAccumulator) AddTrackingMetric(m telegraf.Metric) telegraf.TrackingID { + dm, id := metric.WithTracking(m, a.onDelivery) + a.AddMetric(dm) + return id +} + +func (a *trackingAccumulator) AddTrackingMetricGroup(group []telegraf.Metric) telegraf.TrackingID { + db, id := metric.WithGroupTracking(group, a.onDelivery) + for _, m := range db { + a.AddMetric(m) + } + return id +} + +func (a *trackingAccumulator) Delivered() <-chan telegraf.DeliveryInfo { + return a.delivered +} + +func (a *trackingAccumulator) onDelivery(info telegraf.DeliveryInfo) { + select { + case a.delivered <- info: + default: + // This is a programming error in the input. More items were sent for + // tracking than space requested. + panic("channel is full") + } +} diff --git a/agent/agent.go b/agent/agent.go index 6f7b540f2..d8875e447 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -1,9 +1,9 @@ package agent import ( + "context" "fmt" "log" - "os" "runtime" "sync" "time" @@ -12,187 +12,157 @@ import ( "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/internal/config" "github.com/influxdata/telegraf/internal/models" - "github.com/influxdata/telegraf/selfstat" + "github.com/influxdata/telegraf/plugins/serializers/influx" ) -// Agent runs telegraf and collects data based on the given config +// Agent runs a set of plugins. type Agent struct { Config *config.Config } -// NewAgent returns an Agent struct based off the given Config +// NewAgent returns an Agent for the given Config. func NewAgent(config *config.Config) (*Agent, error) { a := &Agent{ Config: config, } - - if !a.Config.Agent.OmitHostname { - if a.Config.Agent.Hostname == "" { - hostname, err := os.Hostname() - if err != nil { - return nil, err - } - - a.Config.Agent.Hostname = hostname - } - - config.Tags["host"] = a.Config.Agent.Hostname - } - return a, nil } -// Connect connects to all configured outputs -func (a *Agent) Connect() error { - for _, o := range a.Config.Outputs { - switch ot := o.Output.(type) { - case telegraf.ServiceOutput: - if err := ot.Start(); err != nil { - log.Printf("E! Service for output %s failed to start, exiting\n%s\n", - o.Name, err.Error()) - return err - } +// Run starts and runs the Agent until the context is done. +func (a *Agent) Run(ctx context.Context) error { + log.Printf("I! [agent] Config: Interval:%s, Quiet:%#v, Hostname:%#v, "+ + "Flush Interval:%s", + a.Config.Agent.Interval.Duration, a.Config.Agent.Quiet, + a.Config.Agent.Hostname, a.Config.Agent.FlushInterval.Duration) + + if ctx.Err() != nil { + return ctx.Err() + } + + log.Printf("D! [agent] Connecting outputs") + err := a.connectOutputs(ctx) + if err != nil { + return err + } + + inputC := make(chan telegraf.Metric, 100) + procC := make(chan telegraf.Metric, 100) + outputC := make(chan telegraf.Metric, 100) + + startTime := time.Now() + + log.Printf("D! [agent] Starting service inputs") + err = a.startServiceInputs(ctx, inputC) + if err != nil { + return err + } + + var wg sync.WaitGroup + + src := inputC + dst := inputC + + wg.Add(1) + go func(dst chan telegraf.Metric) { + defer wg.Done() + + err := a.runInputs(ctx, startTime, dst) + if err != nil { + log.Printf("E! [agent] Error running inputs: %v", err) } - log.Printf("D! Attempting connection to output: %s\n", o.Name) - err := o.Output.Connect() - if err != nil { - log.Printf("E! Failed to connect to output %s, retrying in 15s, "+ - "error was '%s' \n", o.Name, err) - time.Sleep(15 * time.Second) - err = o.Output.Connect() + log.Printf("D! [agent] Stopping service inputs") + a.stopServiceInputs() + + close(dst) + log.Printf("D! [agent] Input channel closed") + }(dst) + + src = dst + + if len(a.Config.Processors) > 0 { + dst = procC + + wg.Add(1) + go func(src, dst chan telegraf.Metric) { + defer wg.Done() + + err := a.runProcessors(src, dst) if err != nil { - return err + log.Printf("E! [agent] Error running processors: %v", err) } - } - log.Printf("D! Successfully connected to output: %s\n", o.Name) + close(dst) + log.Printf("D! [agent] Processor channel closed") + }(src, dst) + + src = dst } + + if len(a.Config.Aggregators) > 0 { + dst = outputC + + wg.Add(1) + go func(src, dst chan telegraf.Metric) { + defer wg.Done() + + err := a.runAggregators(startTime, src, dst) + if err != nil { + log.Printf("E! [agent] Error running aggregators: %v", err) + } + close(dst) + log.Printf("D! [agent] Output channel closed") + }(src, dst) + + src = dst + } + + wg.Add(1) + go func(src chan telegraf.Metric) { + defer wg.Done() + + err := a.runOutputs(startTime, src) + if err != nil { + log.Printf("E! [agent] Error running outputs: %v", err) + } + }(src) + + wg.Wait() + + log.Printf("D! [agent] Closing outputs") + err = a.closeOutputs() + if err != nil { + return err + } + return nil } -// Close closes the connection to all configured outputs -func (a *Agent) Close() error { - var err error - for _, o := range a.Config.Outputs { - err = o.Output.Close() - switch ot := o.Output.(type) { - case telegraf.ServiceOutput: - ot.Stop() - } - } - return err -} - -func panicRecover(input *models.RunningInput) { - if err := recover(); err != nil { - trace := make([]byte, 2048) - runtime.Stack(trace, true) - log.Printf("E! FATAL: Input [%s] panicked: %s, Stack:\n%s\n", - input.Name(), err, trace) - log.Println("E! PLEASE REPORT THIS PANIC ON GITHUB with " + - "stack trace, configuration, and OS information: " + - "https://github.com/influxdata/telegraf/issues/new") - } -} - -// gatherer runs the inputs that have been configured with their own -// reporting interval. -func (a *Agent) gatherer( - shutdown chan struct{}, - input *models.RunningInput, - interval time.Duration, - metricC chan telegraf.Metric, -) { - defer panicRecover(input) - - GatherTime := selfstat.RegisterTiming("gather", - "gather_time_ns", - map[string]string{"input": input.Config.Name}, - ) - - acc := NewAccumulator(input, metricC) - acc.SetPrecision(a.Config.Agent.Precision.Duration, - a.Config.Agent.Interval.Duration) - - ticker := time.NewTicker(interval) - defer ticker.Stop() - - for { - internal.RandomSleep(a.Config.Agent.CollectionJitter.Duration, shutdown) - - start := time.Now() - gatherWithTimeout(shutdown, input, acc, interval) - elapsed := time.Since(start) - - GatherTime.Incr(elapsed.Nanoseconds()) - - select { - case <-shutdown: - return - case <-ticker.C: - continue - } - } -} - -// gatherWithTimeout gathers from the given input, with the given timeout. -// when the given timeout is reached, gatherWithTimeout logs an error message -// but continues waiting for it to return. This is to avoid leaving behind -// hung processes, and to prevent re-calling the same hung process over and -// over. -func gatherWithTimeout( - shutdown chan struct{}, - input *models.RunningInput, - acc telegraf.Accumulator, - timeout time.Duration, -) { - ticker := time.NewTicker(timeout) - defer ticker.Stop() - done := make(chan error) - go func() { - done <- input.Input.Gather(acc) +// Test runs the inputs once and prints the output to stdout in line protocol. +func (a *Agent) Test() error { + var wg sync.WaitGroup + metricC := make(chan telegraf.Metric) + defer func() { + close(metricC) + wg.Wait() }() - for { - select { - case err := <-done: - if err != nil { - acc.AddError(err) - } - return - case <-ticker.C: - err := fmt.Errorf("took longer to collect than collection interval (%s)", - timeout) - acc.AddError(err) - continue - case <-shutdown: - return - } - } -} - -// Test verifies that we can 'Gather' from all inputs with their configured -// Config struct -func (a *Agent) Test() error { - shutdown := make(chan struct{}) - defer close(shutdown) - metricC := make(chan telegraf.Metric) - - // dummy receiver for the point channel + wg.Add(1) go func() { - for { - select { - case <-metricC: - // do nothing - case <-shutdown: - return + defer wg.Done() + + s := influx.NewSerializer() + s.SetFieldSortOrder(influx.SortFields) + for metric := range metricC { + octets, err := s.Serialize(metric) + if err == nil { + fmt.Print("> ", string(octets)) } } }() for _, input := range a.Config.Inputs { if _, ok := input.Input.(telegraf.ServiceInput); ok { - fmt.Printf("\nWARNING: skipping plugin [[%s]]: service inputs not supported in --test mode\n", + log.Printf("W!: [agent] skipping plugin [[%s]]: service inputs not supported in --test mode", input.Name()) continue } @@ -200,7 +170,6 @@ func (a *Agent) Test() error { acc := NewAccumulator(input, metricC) acc.SetPrecision(a.Config.Agent.Precision.Duration, a.Config.Agent.Interval.Duration) - input.SetTrace(true) input.SetDefaultTags(a.Config.Tags) if err := input.Input.Gather(acc); err != nil { @@ -218,216 +187,445 @@ func (a *Agent) Test() error { } } + return nil } -// flush writes a list of metrics to all configured outputs -func (a *Agent) flush() { - var wg sync.WaitGroup - - wg.Add(len(a.Config.Outputs)) - for _, o := range a.Config.Outputs { - go func(output *models.RunningOutput) { - defer wg.Done() - err := output.Write() - if err != nil { - log.Printf("E! Error writing to output [%s]: %s\n", - output.Name, err.Error()) - } - }(o) - } - - wg.Wait() -} - -// flusher monitors the metrics input channel and flushes on the minimum interval -func (a *Agent) flusher( - shutdown chan struct{}, - metricC chan telegraf.Metric, - aggMetricC chan telegraf.Metric, - outMetricC chan telegraf.Metric, +// runInputs starts and triggers the periodic gather for Inputs. +// +// When the context is done the timers are stopped and this function returns +// after all ongoing Gather calls complete. +func (a *Agent) runInputs( + ctx context.Context, + startTime time.Time, + dst chan<- telegraf.Metric, ) error { var wg sync.WaitGroup - wg.Add(1) - go func() { - defer wg.Done() - for { - select { - case <-shutdown: - if len(outMetricC) > 0 { - // keep going until channel is empty - continue - } - return - case metric := <-outMetricC: - for i, o := range a.Config.Outputs { - if i == len(a.Config.Outputs)-1 { - o.AddMetric(metric) - } else { - o.AddMetric(metric.Copy()) - } - } - } - } - }() - - wg.Add(1) - go func() { - defer wg.Done() - for metric := range aggMetricC { - // Apply Processors - metrics := []telegraf.Metric{metric} - for _, processor := range a.Config.Processors { - metrics = processor.Apply(metrics...) - } - outMetricC <- metric - } - }() - - wg.Add(1) - go func() { - defer wg.Done() - for { - select { - case <-shutdown: - if len(metricC) > 0 { - // keep going until channel is empty - continue - } - close(aggMetricC) - return - case metric := <-metricC: - // Apply Processors - metrics := []telegraf.Metric{metric} - for _, processor := range a.Config.Processors { - metrics = processor.Apply(metrics...) - } - - for _, metric := range metrics { - // Apply Aggregators - var dropOriginal bool - for _, agg := range a.Config.Aggregators { - if ok := agg.Add(metric.Copy()); ok { - dropOriginal = true - } - } - - // Forward metric to Outputs - if !dropOriginal { - outMetricC <- metric - } - } - } - } - }() - - ticker := time.NewTicker(a.Config.Agent.FlushInterval.Duration) - semaphore := make(chan struct{}, 1) - for { - select { - case <-shutdown: - log.Println("I! Hang on, flushing any cached metrics before shutdown") - // wait for outMetricC to get flushed before flushing outputs - wg.Wait() - a.flush() - return nil - case <-ticker.C: - go func() { - select { - case semaphore <- struct{}{}: - internal.RandomSleep(a.Config.Agent.FlushJitter.Duration, shutdown) - a.flush() - <-semaphore - default: - // skipping this flush because one is already happening - log.Println("W! Skipping a scheduled flush because there is" + - " already a flush ongoing.") - } - }() - } - } -} - -// Run runs the agent daemon, gathering every Interval -func (a *Agent) Run(shutdown chan struct{}) error { - var wg sync.WaitGroup - - log.Printf("I! Agent Config: Interval:%s, Quiet:%#v, Hostname:%#v, "+ - "Flush Interval:%s \n", - a.Config.Agent.Interval.Duration, a.Config.Agent.Quiet, - a.Config.Agent.Hostname, a.Config.Agent.FlushInterval.Duration) - - // Channel shared between all input threads for accumulating metrics - metricC := make(chan telegraf.Metric, 100) - - // Channel for metrics ready to be output - outMetricC := make(chan telegraf.Metric, 100) - - // Channel for aggregated metrics - aggMetricC := make(chan telegraf.Metric, 100) - - // Round collection to nearest interval by sleeping - if a.Config.Agent.RoundInterval { - i := int64(a.Config.Agent.Interval.Duration) - time.Sleep(time.Duration(i - (time.Now().UnixNano() % i))) - } - - wg.Add(1) - go func() { - defer wg.Done() - if err := a.flusher(shutdown, metricC, aggMetricC, outMetricC); err != nil { - log.Printf("E! Flusher routine failed, exiting: %s\n", err.Error()) - close(shutdown) - } - }() - - wg.Add(len(a.Config.Aggregators)) - for _, aggregator := range a.Config.Aggregators { - go func(agg *models.RunningAggregator) { - defer wg.Done() - acc := NewAccumulator(agg, aggMetricC) - acc.SetPrecision(a.Config.Agent.Precision.Duration, - a.Config.Agent.Interval.Duration) - agg.Run(acc, shutdown) - }(aggregator) - } - - // Service inputs may immediately add metrics, if metrics are added before - // the aggregator starts they will be dropped. Generally this occurs - // only during testing but it is an outstanding issue. - // - // https://github.com/influxdata/telegraf/issues/4394 - for _, input := range a.Config.Inputs { - input.SetDefaultTags(a.Config.Tags) - switch p := input.Input.(type) { - case telegraf.ServiceInput: - acc := NewAccumulator(input, metricC) - // Service input plugins should set their own precision of their - // metrics. - acc.SetPrecision(time.Nanosecond, 0) - if err := p.Start(acc); err != nil { - log.Printf("E! Service for input %s failed to start, exiting\n%s\n", - input.Name(), err.Error()) - return err - } - defer p.Stop() - } - } - - wg.Add(len(a.Config.Inputs)) for _, input := range a.Config.Inputs { interval := a.Config.Agent.Interval.Duration - // overwrite global interval if this plugin has it's own. + precision := a.Config.Agent.Precision.Duration + jitter := a.Config.Agent.CollectionJitter.Duration + + // Overwrite agent interval if this plugin has its own. if input.Config.Interval != 0 { interval = input.Config.Interval } - go func(in *models.RunningInput, interv time.Duration) { + + acc := NewAccumulator(input, dst) + acc.SetPrecision(precision, interval) + + wg.Add(1) + go func(input *models.RunningInput) { defer wg.Done() - a.gatherer(shutdown, in, interv, metricC) - }(input, interval) + + if a.Config.Agent.RoundInterval { + err := internal.SleepContext( + ctx, internal.AlignDuration(startTime, interval)) + if err != nil { + return + } + } + + a.gatherOnInterval(ctx, acc, input, interval, jitter) + }(input) + } + wg.Wait() + + return nil +} + +// gather runs an input's gather function periodically until the context is +// done. +func (a *Agent) gatherOnInterval( + ctx context.Context, + acc telegraf.Accumulator, + input *models.RunningInput, + interval time.Duration, + jitter time.Duration, +) { + defer panicRecover(input) + + ticker := time.NewTicker(interval) + defer ticker.Stop() + + for { + err := internal.SleepContext(ctx, internal.RandomDuration(jitter)) + if err != nil { + return + } + + err = a.gatherOnce(acc, input, interval) + if err != nil { + acc.AddError(err) + } + + select { + case <-ticker.C: + continue + case <-ctx.Done(): + return + } + } +} + +// gatherOnce runs the input's Gather function once, logging a warning each +// interval it fails to complete before. +func (a *Agent) gatherOnce( + acc telegraf.Accumulator, + input *models.RunningInput, + timeout time.Duration, +) error { + ticker := time.NewTicker(timeout) + defer ticker.Stop() + + done := make(chan error) + go func() { + done <- input.Gather(acc) + }() + + for { + select { + case err := <-done: + return err + case <-ticker.C: + log.Printf("W! [agent] input %q did not complete within its interval", + input.Name()) + } + } +} + +// runProcessors applies processors to metrics. +func (a *Agent) runProcessors( + src <-chan telegraf.Metric, + agg chan<- telegraf.Metric, +) error { + for metric := range src { + metrics := a.applyProcessors(metric) + + for _, metric := range metrics { + agg <- metric + } + } + + return nil +} + +// applyProcessors applies all processors to a metric. +func (a *Agent) applyProcessors(m telegraf.Metric) []telegraf.Metric { + metrics := []telegraf.Metric{m} + for _, processor := range a.Config.Processors { + metrics = processor.Apply(metrics...) + } + + return metrics +} + +// runAggregators triggers the periodic push for Aggregators. +// +// When the context is done a final push will occur and then this function +// will return. +func (a *Agent) runAggregators( + startTime time.Time, + src <-chan telegraf.Metric, + dst chan<- telegraf.Metric, +) error { + ctx, cancel := context.WithCancel(context.Background()) + + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + for metric := range src { + var dropOriginal bool + for _, agg := range a.Config.Aggregators { + if ok := agg.Add(metric); ok { + dropOriginal = true + } + } + + if !dropOriginal { + dst <- metric + } + } + cancel() + }() + + precision := a.Config.Agent.Precision.Duration + interval := a.Config.Agent.Interval.Duration + aggregations := make(chan telegraf.Metric, 100) + for _, agg := range a.Config.Aggregators { + wg.Add(1) + go func(agg *models.RunningAggregator) { + defer wg.Done() + + if a.Config.Agent.RoundInterval { + // Aggregators are aligned to the agent interval regardless of + // their period. + err := internal.SleepContext(ctx, internal.AlignDuration(startTime, interval)) + if err != nil { + return + } + } + + agg.SetPeriodStart(startTime) + + acc := NewAccumulator(agg, aggregations) + acc.SetPrecision(precision, interval) + a.push(ctx, agg, acc) + close(aggregations) + }(agg) + } + + for metric := range aggregations { + metrics := a.applyProcessors(metric) + for _, metric := range metrics { + dst <- metric + } } wg.Wait() - a.Close() return nil } + +// push runs the push for a single aggregator every period. More simple than +// the output/input version as timeout should be less likely.... not really +// because the output channel can block for now. +func (a *Agent) push( + ctx context.Context, + aggregator *models.RunningAggregator, + acc telegraf.Accumulator, +) { + ticker := time.NewTicker(aggregator.Period()) + defer ticker.Stop() + + for { + select { + case <-ticker.C: + break + case <-ctx.Done(): + aggregator.Push(acc) + return + } + + aggregator.Push(acc) + } +} + +// runOutputs triggers the periodic write for Outputs. +// +// When the context is done, outputs continue to run until their buffer is +// closed, afterwich they run flush once more. +func (a *Agent) runOutputs( + startTime time.Time, + src <-chan telegraf.Metric, +) error { + interval := a.Config.Agent.FlushInterval.Duration + jitter := a.Config.Agent.FlushJitter.Duration + + ctx, cancel := context.WithCancel(context.Background()) + + var wg sync.WaitGroup + for _, output := range a.Config.Outputs { + interval := interval + // Overwrite agent flush_interval if this plugin has its own. + if output.Config.FlushInterval != 0 { + interval = output.Config.FlushInterval + } + + wg.Add(1) + go func(output *models.RunningOutput) { + defer wg.Done() + + if a.Config.Agent.RoundInterval { + err := internal.SleepContext( + ctx, internal.AlignDuration(startTime, interval)) + if err != nil { + return + } + } + + a.flush(ctx, output, interval, jitter) + }(output) + } + + for metric := range src { + for i, output := range a.Config.Outputs { + if i == len(a.Config.Outputs)-1 { + output.AddMetric(metric) + } else { + output.AddMetric(metric.Copy()) + } + } + } + + log.Println("I! [agent] Hang on, flushing any cached metrics before shutdown") + cancel() + wg.Wait() + + return nil +} + +// flush runs an output's flush function periodically until the context is +// done. +func (a *Agent) flush( + ctx context.Context, + output *models.RunningOutput, + interval time.Duration, + jitter time.Duration, +) { + // since we are watching two channels we need a ticker with the jitter + // integrated. + ticker := NewTicker(interval, jitter) + defer ticker.Stop() + + logError := func(err error) { + if err != nil { + log.Printf("E! [agent] Error writing to output [%s]: %v", output.Name, err) + } + } + + for { + // Favor shutdown over other methods. + select { + case <-ctx.Done(): + logError(a.flushOnce(output, interval, output.Write)) + return + default: + } + + select { + case <-ticker.C: + logError(a.flushOnce(output, interval, output.Write)) + case <-output.BatchReady: + // Favor the ticker over batch ready + select { + case <-ticker.C: + logError(a.flushOnce(output, interval, output.Write)) + default: + logError(a.flushOnce(output, interval, output.WriteBatch)) + } + case <-ctx.Done(): + logError(a.flushOnce(output, interval, output.Write)) + return + } + } +} + +// flushOnce runs the output's Write function once, logging a warning each +// interval it fails to complete before. +func (a *Agent) flushOnce( + output *models.RunningOutput, + timeout time.Duration, + writeFunc func() error, +) error { + ticker := time.NewTicker(timeout) + defer ticker.Stop() + + done := make(chan error) + go func() { + done <- writeFunc() + }() + + for { + select { + case err := <-done: + output.LogBufferStatus() + return err + case <-ticker.C: + log.Printf("W! [agent] output %q did not complete within its flush interval", + output.Name) + output.LogBufferStatus() + } + } + +} + +// connectOutputs connects to all outputs. +func (a *Agent) connectOutputs(ctx context.Context) error { + for _, output := range a.Config.Outputs { + log.Printf("D! [agent] Attempting connection to output: %s\n", output.Name) + err := output.Output.Connect() + if err != nil { + log.Printf("E! [agent] Failed to connect to output %s, retrying in 15s, "+ + "error was '%s' \n", output.Name, err) + + err := internal.SleepContext(ctx, 15*time.Second) + if err != nil { + return err + } + + err = output.Output.Connect() + if err != nil { + return err + } + } + log.Printf("D! [agent] Successfully connected to output: %s\n", output.Name) + } + return nil +} + +// closeOutputs closes all outputs. +func (a *Agent) closeOutputs() error { + var err error + for _, output := range a.Config.Outputs { + err = output.Output.Close() + } + return err +} + +// startServiceInputs starts all service inputs. +func (a *Agent) startServiceInputs( + ctx context.Context, + dst chan<- telegraf.Metric, +) error { + started := []telegraf.ServiceInput{} + + for _, input := range a.Config.Inputs { + if si, ok := input.Input.(telegraf.ServiceInput); ok { + // Service input plugins are not subject to timestamp rounding. + // This only applies to the accumulator passed to Start(), the + // Gather() accumulator does apply rounding according to the + // precision agent setting. + acc := NewAccumulator(input, dst) + acc.SetPrecision(time.Nanosecond, 0) + + err := si.Start(acc) + if err != nil { + log.Printf("E! [agent] Service for input %s failed to start: %v", + input.Name(), err) + + for _, si := range started { + si.Stop() + } + + return err + } + + started = append(started, si) + } + } + + return nil +} + +// stopServiceInputs stops all service inputs. +func (a *Agent) stopServiceInputs() { + for _, input := range a.Config.Inputs { + if si, ok := input.Input.(telegraf.ServiceInput); ok { + si.Stop() + } + } +} + +// panicRecover displays an error if an input panics. +func panicRecover(input *models.RunningInput) { + if err := recover(); err != nil { + trace := make([]byte, 2048) + runtime.Stack(trace, true) + log.Printf("E! FATAL: Input [%s] panicked: %s, Stack:\n%s\n", + input.Name(), err, trace) + log.Println("E! PLEASE REPORT THIS PANIC ON GITHUB with " + + "stack trace, configuration, and OS information: " + + "https://github.com/influxdata/telegraf/issues/new/choose") + } +} diff --git a/agent/tick.go b/agent/tick.go new file mode 100644 index 000000000..64dbff50b --- /dev/null +++ b/agent/tick.go @@ -0,0 +1,57 @@ +package agent + +import ( + "context" + "sync" + "time" + + "github.com/influxdata/telegraf/internal" +) + +type Ticker struct { + C chan time.Time + ticker *time.Ticker + jitter time.Duration + wg sync.WaitGroup + cancelFunc context.CancelFunc +} + +func NewTicker( + interval time.Duration, + jitter time.Duration, +) *Ticker { + ctx, cancel := context.WithCancel(context.Background()) + + t := &Ticker{ + C: make(chan time.Time, 1), + ticker: time.NewTicker(interval), + jitter: jitter, + cancelFunc: cancel, + } + + t.wg.Add(1) + go t.relayTime(ctx) + + return t +} + +func (t *Ticker) Stop() { + t.cancelFunc() + t.wg.Wait() +} + +func (t *Ticker) relayTime(ctx context.Context) { + defer t.wg.Done() + for { + select { + case tm := <-t.ticker.C: + internal.SleepContext(ctx, internal.RandomDuration(t.jitter)) + select { + case t.C <- tm: + default: + } + case <-ctx.Done(): + return + } + } +} diff --git a/cmd/telegraf/telegraf.go b/cmd/telegraf/telegraf.go index 7c451c2db..0ad6fe717 100644 --- a/cmd/telegraf/telegraf.go +++ b/cmd/telegraf/telegraf.go @@ -1,6 +1,8 @@ package main import ( + "context" + "errors" "flag" "fmt" "log" @@ -78,112 +80,111 @@ func reloadLoop( for <-reload { reload <- false - // If no other options are specified, load the config file and run. - c := config.NewConfig() - c.OutputFilters = outputFilters - c.InputFilters = inputFilters - err := c.LoadConfig(*fConfig) - if err != nil { - log.Fatal("E! " + err.Error()) - } + ctx, cancel := context.WithCancel(context.Background()) - if *fConfigDirectory != "" { - err = c.LoadDirectory(*fConfigDirectory) - if err != nil { - log.Fatal("E! " + err.Error()) - } - } - if !*fTest && len(c.Outputs) == 0 { - log.Fatalf("E! Error: no outputs found, did you provide a valid config file?") - } - if len(c.Inputs) == 0 { - log.Fatalf("E! Error: no inputs found, did you provide a valid config file?") - } - - if int64(c.Agent.Interval.Duration) <= 0 { - log.Fatalf("E! Agent interval must be positive, found %s", - c.Agent.Interval.Duration) - } - - if int64(c.Agent.FlushInterval.Duration) <= 0 { - log.Fatalf("E! Agent flush_interval must be positive; found %s", - c.Agent.Interval.Duration) - } - - ag, err := agent.NewAgent(c) - if err != nil { - log.Fatal("E! " + err.Error()) - } - - // Setup logging - logger.SetupLogging( - ag.Config.Agent.Debug || *fDebug, - ag.Config.Agent.Quiet || *fQuiet, - ag.Config.Agent.Logfile, - ) - - if *fTest { - err = ag.Test() - if err != nil { - log.Fatal("E! " + err.Error()) - } - os.Exit(0) - } - - err = ag.Connect() - if err != nil { - log.Fatal("E! " + err.Error()) - } - - shutdown := make(chan struct{}) signals := make(chan os.Signal) signal.Notify(signals, os.Interrupt, syscall.SIGHUP, syscall.SIGTERM) go func() { select { case sig := <-signals: - if sig == os.Interrupt || sig == syscall.SIGTERM { - close(shutdown) - } if sig == syscall.SIGHUP { - log.Printf("I! Reloading Telegraf config\n") + log.Printf("I! Reloading Telegraf config") <-reload reload <- true - close(shutdown) } + cancel() case <-stop: - close(shutdown) + cancel() } }() - log.Printf("I! Starting Telegraf %s\n", version) - log.Printf("I! Loaded inputs: %s", strings.Join(c.InputNames(), " ")) - log.Printf("I! Loaded aggregators: %s", strings.Join(c.AggregatorNames(), " ")) - log.Printf("I! Loaded processors: %s", strings.Join(c.ProcessorNames(), " ")) - log.Printf("I! Loaded outputs: %s", strings.Join(c.OutputNames(), " ")) - log.Printf("I! Tags enabled: %s", c.ListTags()) - - if *fPidfile != "" { - f, err := os.OpenFile(*fPidfile, os.O_CREATE|os.O_WRONLY, 0644) - if err != nil { - log.Printf("E! Unable to create pidfile: %s", err) - } else { - fmt.Fprintf(f, "%d\n", os.Getpid()) - - f.Close() - - defer func() { - err := os.Remove(*fPidfile) - if err != nil { - log.Printf("E! Unable to remove pidfile: %s", err) - } - }() - } + err := runAgent(ctx, inputFilters, outputFilters) + if err != nil { + log.Fatalf("E! [telegraf] Error running agent: %v", err) } - - ag.Run(shutdown) } } +func runAgent(ctx context.Context, + inputFilters []string, + outputFilters []string, +) error { + // If no other options are specified, load the config file and run. + c := config.NewConfig() + c.OutputFilters = outputFilters + c.InputFilters = inputFilters + err := c.LoadConfig(*fConfig) + if err != nil { + return err + } + + if *fConfigDirectory != "" { + err = c.LoadDirectory(*fConfigDirectory) + if err != nil { + return err + } + } + if !*fTest && len(c.Outputs) == 0 { + return errors.New("Error: no outputs found, did you provide a valid config file?") + } + if len(c.Inputs) == 0 { + return errors.New("Error: no inputs found, did you provide a valid config file?") + } + + if int64(c.Agent.Interval.Duration) <= 0 { + return fmt.Errorf("Agent interval must be positive, found %s", + c.Agent.Interval.Duration) + } + + if int64(c.Agent.FlushInterval.Duration) <= 0 { + return fmt.Errorf("Agent flush_interval must be positive; found %s", + c.Agent.Interval.Duration) + } + + ag, err := agent.NewAgent(c) + if err != nil { + return err + } + + // Setup logging + logger.SetupLogging( + ag.Config.Agent.Debug || *fDebug, + ag.Config.Agent.Quiet || *fQuiet, + ag.Config.Agent.Logfile, + ) + + if *fTest { + return ag.Test() + } + + log.Printf("I! Starting Telegraf %s\n", version) + log.Printf("I! Loaded inputs: %s", strings.Join(c.InputNames(), " ")) + log.Printf("I! Loaded aggregators: %s", strings.Join(c.AggregatorNames(), " ")) + log.Printf("I! Loaded processors: %s", strings.Join(c.ProcessorNames(), " ")) + log.Printf("I! Loaded outputs: %s", strings.Join(c.OutputNames(), " ")) + log.Printf("I! Tags enabled: %s", c.ListTags()) + + if *fPidfile != "" { + f, err := os.OpenFile(*fPidfile, os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + log.Printf("E! Unable to create pidfile: %s", err) + } else { + fmt.Fprintf(f, "%d\n", os.Getpid()) + + f.Close() + + defer func() { + err := os.Remove(*fPidfile) + if err != nil { + log.Printf("E! Unable to remove pidfile: %s", err) + } + }() + } + } + + return ag.Run(ctx) +} + func usageExit(rc int) { fmt.Println(internal.Usage) os.Exit(rc) diff --git a/docs/AGGREGATORS.md b/docs/AGGREGATORS.md new file mode 100644 index 000000000..d0e926718 --- /dev/null +++ b/docs/AGGREGATORS.md @@ -0,0 +1,126 @@ +### Aggregator Plugins + +This section is for developers who want to create a new aggregator plugin. + +### Aggregator Plugin Guidelines + +* A aggregator must conform to the [telegraf.Aggregator][] interface. +* Aggregators should call `aggregators.Add` in their `init` function to + register themselves. See below for a quick example. +* To be available within Telegraf itself, plugins must add themselves to the + `github.com/influxdata/telegraf/plugins/aggregators/all/all.go` file. +- The `SampleConfig` function should return valid toml that describes how the + plugin can be configured. This is included in `telegraf config`. Please + consult the [SampleConfig][] page for the latest style guidelines. +* The `Description` function should say in one line what this aggregator does. +* The Aggregator plugin will need to keep caches of metrics that have passed + through it. This should be done using the builtin `HashID()` function of + each metric. +* When the `Reset()` function is called, all caches should be cleared. + +### Aggregator Plugin Example + +```go +package min + +// min.go + +import ( + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/aggregators" +) + +type Min struct { + // caches for metric fields, names, and tags + fieldCache map[uint64]map[string]float64 + nameCache map[uint64]string + tagCache map[uint64]map[string]string +} + +func NewMin() telegraf.Aggregator { + m := &Min{} + m.Reset() + return m +} + +var sampleConfig = ` + ## period is the flush & clear interval of the aggregator. + period = "30s" + ## If true drop_original will drop the original metrics and + ## only send aggregates. + drop_original = false +` + +func (m *Min) SampleConfig() string { + return sampleConfig +} + +func (m *Min) Description() string { + return "Keep the aggregate min of each metric passing through." +} + +func (m *Min) Add(in telegraf.Metric) { + id := in.HashID() + if _, ok := m.nameCache[id]; !ok { + // hit an uncached metric, create caches for first time: + m.nameCache[id] = in.Name() + m.tagCache[id] = in.Tags() + m.fieldCache[id] = make(map[string]float64) + for k, v := range in.Fields() { + if fv, ok := convert(v); ok { + m.fieldCache[id][k] = fv + } + } + } else { + for k, v := range in.Fields() { + if fv, ok := convert(v); ok { + if _, ok := m.fieldCache[id][k]; !ok { + // hit an uncached field of a cached metric + m.fieldCache[id][k] = fv + continue + } + if fv < m.fieldCache[id][k] { + // set new minimum + m.fieldCache[id][k] = fv + } + } + } + } +} + +func (m *Min) Push(acc telegraf.Accumulator) { + for id, _ := range m.nameCache { + fields := map[string]interface{}{} + for k, v := range m.fieldCache[id] { + fields[k+"_min"] = v + } + acc.AddFields(m.nameCache[id], fields, m.tagCache[id]) + } +} + +func (m *Min) Reset() { + m.fieldCache = make(map[uint64]map[string]float64) + m.nameCache = make(map[uint64]string) + m.tagCache = make(map[uint64]map[string]string) +} + +func convert(in interface{}) (float64, bool) { + switch v := in.(type) { + case float64: + return v, true + case int64: + return float64(v), true + default: + return 0, false + } +} + +func init() { + aggregators.Add("min", func() telegraf.Aggregator { + return NewMin() + }) +} +``` + +[telegraf.Aggregator]: https://godoc.org/github.com/influxdata/telegraf#Aggregator +[SampleConfig]: https://github.com/influxdata/telegraf/wiki/SampleConfig diff --git a/docs/CONFIGURATION.md b/docs/CONFIGURATION.md index 27002be0d..4677e54f2 100644 --- a/docs/CONFIGURATION.md +++ b/docs/CONFIGURATION.md @@ -106,6 +106,14 @@ emitted from the input plugin. ### Output Configuration +- **flush_interval**: The maximum time between flushes. Use this setting to + override the agent `flush_interval` on a per plugin basis. +- **metric_batch_size**: The maximum number of metrics to send at once. Use + this setting to override the agent `metric_batch_size` on a per plugin basis. +- **metric_buffer_limit**: The maximum number of unsent metrics to buffer. + Use this setting to override the agent `metric_buffer_limit` on a per plugin + basis. + The [metric filtering](#metric-filtering) parameters can be used to limit what metrics are emitted from the output plugin. diff --git a/docs/INPUTS.md b/docs/INPUTS.md new file mode 100644 index 000000000..b1b196398 --- /dev/null +++ b/docs/INPUTS.md @@ -0,0 +1,143 @@ +### Input Plugins + +This section is for developers who want to create new collection inputs. +Telegraf is entirely plugin driven. This interface allows for operators to +pick and chose what is gathered and makes it easy for developers +to create new ways of generating metrics. + +Plugin authorship is kept as simple as possible to promote people to develop +and submit new inputs. + +### Input Plugin Guidelines + +- A plugin must conform to the [telegraf.Input][] interface. +- Input Plugins should call `inputs.Add` in their `init` function to register + themselves. See below for a quick example. +- Input Plugins must be added to the + `github.com/influxdata/telegraf/plugins/inputs/all/all.go` file. +- The `SampleConfig` function should return valid toml that describes how the + plugin can be configured. This is included in `telegraf config`. Please + consult the [SampleConfig][] page for the latest style + guidelines. +- The `Description` function should say in one line what this plugin does. + +Let's say you've written a plugin that emits metrics about processes on the +current host. + +### Input Plugin Example + +```go +package simple + +// simple.go + +import ( + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" +) + +type Simple struct { + Ok bool +} + +func (s *Simple) Description() string { + return "a demo plugin" +} + +func (s *Simple) SampleConfig() string { + return ` + ## Indicate if everything is fine + ok = true +` +} + +func (s *Simple) Gather(acc telegraf.Accumulator) error { + if s.Ok { + acc.AddFields("state", map[string]interface{}{"value": "pretty good"}, nil) + } else { + acc.AddFields("state", map[string]interface{}{"value": "not great"}, nil) + } + + return nil +} + +func init() { + inputs.Add("simple", func() telegraf.Input { return &Simple{} }) +} +``` + +### Development + +* Run `make static` followed by `make plugin-[pluginName]` to spin up a docker + dev environment using docker-compose. +* ***[Optional]*** When developing a plugin, add a `dev` directory with a + `docker-compose.yml` and `telegraf.conf` as well as any other supporting + files, where sensible. + +### Typed Metrics + +In addition the the `AddFields` function, the accumulator also supports +functions to add typed metrics: `AddGauge`, `AddCounter`, etc. Metric types +are ignored by the InfluxDB output, but can be used for other outputs, such as +[prometheus][prom metric types]. + +### Data Formats + +Some input plugins, such as the [exec][] plugin, can accept any supported +[input data formats][]. + +In order to enable this, you must specify a `SetParser(parser parsers.Parser)` +function on the plugin object (see the exec plugin for an example), as well as +defining `parser` as a field of the object. + +You can then utilize the parser internally in your plugin, parsing data as you +see fit. Telegraf's configuration layer will take care of instantiating and +creating the `Parser` object. + +Add the following to the `SampleConfig()`: + +```toml + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "influx" +``` + +### Service Input Plugins + +This section is for developers who want to create new "service" collection +inputs. A service plugin differs from a regular plugin in that it operates a +background service while Telegraf is running. One example would be the +`statsd` plugin, which operates a statsd server. + +Service Input Plugins are substantially more complicated than a regular +plugin, as they will require threads and locks to verify data integrity. +Service Input Plugins should be avoided unless there is no way to create their +behavior with a regular plugin. + +To create a Service Input implement the [telegraf.ServiceInput][] interface. + +### Metric Tracking + +Metric Tracking provides a system to be notified when metrics have been +successfully written to their outputs or otherwise discarded. This allows +inputs to be created that function as reliable queue consumers. + +To get started with metric tracking begin by calling `WithTracking` on the +[telegraf.Accumulator][]. Add metrics using the `AddTrackingMetricGroup` +function on the returned [telegraf.TrackingAccumulator][] and store the +`TrackingID`. The `Delivered()` channel will return a type with information +about the final delivery status of the metric group. + +Check the [amqp_consumer][] for an example implementation. + +[exec]: https://github.com/influxdata/telegraf/tree/master/plugins/inputs/exec +[amqp_consumer]: https://github.com/influxdata/telegraf/tree/master/plugins/inputs/amqp_consumer +[prom metric types]: https://prometheus.io/docs/concepts/metric_types/ +[input data formats]: https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +[SampleConfig]: https://github.com/influxdata/telegraf/wiki/SampleConfig +[telegraf.Input]: https://godoc.org/github.com/influxdata/telegraf#Input +[telegraf.ServiceInput]: https://godoc.org/github.com/influxdata/telegraf#ServiceInput +[telegraf.Accumulator]: https://godoc.org/github.com/influxdata/telegraf#Accumulator +[telegraf.TrackingAccumulator]: https://godoc.org/github.com/influxdata/telegraf#Accumulator diff --git a/docs/OUTPUTS.md b/docs/OUTPUTS.md new file mode 100644 index 000000000..cfa8083b4 --- /dev/null +++ b/docs/OUTPUTS.md @@ -0,0 +1,95 @@ +### Output Plugins + +This section is for developers who want to create a new output sink. Outputs +are created in a similar manner as collection plugins, and their interface has +similar constructs. + +### Output Plugin Guidelines + +- An output must conform to the [telegraf.Output][] interface. +- Outputs should call `outputs.Add` in their `init` function to register + themselves. See below for a quick example. +- To be available within Telegraf itself, plugins must add themselves to the + `github.com/influxdata/telegraf/plugins/outputs/all/all.go` file. +- The `SampleConfig` function should return valid toml that describes how the + plugin can be configured. This is included in `telegraf config`. Please + consult the [SampleConfig][] page for the latest style guidelines. +- The `Description` function should say in one line what this output does. + +### Output Plugin Example + +```go +package simpleoutput + +// simpleoutput.go + +import ( + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/outputs" +) + +type Simple struct { + Ok bool +} + +func (s *Simple) Description() string { + return "a demo output" +} + +func (s *Simple) SampleConfig() string { + return ` + ok = true +` +} + +func (s *Simple) Connect() error { + // Make a connection to the URL here + return nil +} + +func (s *Simple) Close() error { + // Close connection to the URL here + return nil +} + +func (s *Simple) Write(metrics []telegraf.Metric) error { + for _, metric := range metrics { + // write `metric` to the output sink here + } + return nil +} + +func init() { + outputs.Add("simpleoutput", func() telegraf.Output { return &Simple{} }) +} + +``` + +## Data Formats + +Some output plugins, such as the [file][] plugin, can write in any supported +[output data formats][]. + +In order to enable this, you must specify a +`SetSerializer(serializer serializers.Serializer)` +function on the plugin object (see the file plugin for an example), as well as +defining `serializer` as a field of the object. + +You can then utilize the serializer internally in your plugin, serializing data +before it's written. Telegraf's configuration layer will take care of +instantiating and creating the `Serializer` object. + +You should also add the following to your `SampleConfig()`: + +```toml + ## Data format to output. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md + data_format = "influx" +``` + +[file]: https://github.com/influxdata/telegraf/tree/master/plugins/inputs/file +[output data formats]: https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +[SampleConfig]: https://github.com/influxdata/telegraf/wiki/SampleConfig +[telegraf.Output]: https://godoc.org/github.com/influxdata/telegraf#Output diff --git a/docs/PROCESSORS.md b/docs/PROCESSORS.md new file mode 100644 index 000000000..e1fa182ca --- /dev/null +++ b/docs/PROCESSORS.md @@ -0,0 +1,63 @@ +### Processor Plugins + +This section is for developers who want to create a new processor plugin. + +### Processor Plugin Guidelines + +* A processor must conform to the [telegraf.Processor][] interface. +* Processors should call `processors.Add` in their `init` function to register + themselves. See below for a quick example. +* To be available within Telegraf itself, plugins must add themselves to the + `github.com/influxdata/telegraf/plugins/processors/all/all.go` file. +* The `SampleConfig` function should return valid toml that describes how the + processor can be configured. This is include in the output of `telegraf + config`. +- The `SampleConfig` function should return valid toml that describes how the + plugin can be configured. This is included in `telegraf config`. Please + consult the [SampleConfig][] page for the latest style guidelines. +* The `Description` function should say in one line what this processor does. + +### Processor Plugin Example + +```go +package printer + +// printer.go + +import ( + "fmt" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/processors" +) + +type Printer struct { +} + +var sampleConfig = ` +` + +func (p *Printer) SampleConfig() string { + return sampleConfig +} + +func (p *Printer) Description() string { + return "Print all metrics that pass through this filter." +} + +func (p *Printer) Apply(in ...telegraf.Metric) []telegraf.Metric { + for _, metric := range in { + fmt.Println(metric.String()) + } + return in +} + +func init() { + processors.Add("printer", func() telegraf.Processor { + return &Printer{} + }) +} +``` + +[SampleConfig]: https://github.com/influxdata/telegraf/wiki/SampleConfig +[telegraf.Processor]: https://godoc.org/github.com/influxdata/telegraf#Processor diff --git a/input.go b/input.go index f7e1493e2..071ab7d9d 100644 --- a/input.go +++ b/input.go @@ -13,17 +13,10 @@ type Input interface { } type ServiceInput interface { - // SampleConfig returns the default configuration of the Input - SampleConfig() string + Input - // Description returns a one-sentence description on the Input - Description() string - - // Gather takes in an accumulator and adds the metrics that the Input - // gathers. This is called every "interval" - Gather(Accumulator) error - - // Start starts the ServiceInput's service, whatever that may be + // Start the ServiceInput. The Accumulator may be retained and used until + // Stop returns. Start(Accumulator) error // Stop stops the services and closes any necessary channels and connections diff --git a/internal/buffer/buffer.go b/internal/buffer/buffer.go deleted file mode 100644 index 6a460eccb..000000000 --- a/internal/buffer/buffer.go +++ /dev/null @@ -1,130 +0,0 @@ -package buffer - -import ( - "sync" - - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/selfstat" -) - -var ( - MetricsWritten = selfstat.Register("agent", "metrics_written", map[string]string{}) - MetricsDropped = selfstat.Register("agent", "metrics_dropped", map[string]string{}) -) - -// Buffer is an object for storing metrics in a circular buffer. -type Buffer struct { - sync.Mutex - buf []telegraf.Metric - first int - last int - size int - empty bool -} - -// NewBuffer returns a Buffer -// size is the maximum number of metrics that Buffer will cache. If Add is -// called when the buffer is full, then the oldest metric(s) will be dropped. -func NewBuffer(size int) *Buffer { - return &Buffer{ - buf: make([]telegraf.Metric, size), - first: 0, - last: 0, - size: size, - empty: true, - } -} - -// IsEmpty returns true if Buffer is empty. -func (b *Buffer) IsEmpty() bool { - return b.empty -} - -// Len returns the current length of the buffer. -func (b *Buffer) Len() int { - if b.empty { - return 0 - } else if b.first <= b.last { - return b.last - b.first + 1 - } - // Spans the end of array. - // size - gap in the middle - return b.size - (b.first - b.last - 1) // size - gap -} - -func (b *Buffer) push(m telegraf.Metric) { - // Empty - if b.empty { - b.last = b.first // Reset - b.buf[b.last] = m - b.empty = false - return - } - - b.last++ - b.last %= b.size - - // Full - if b.first == b.last { - MetricsDropped.Incr(1) - b.first = (b.first + 1) % b.size - } - b.buf[b.last] = m -} - -// Add adds metrics to the buffer. -func (b *Buffer) Add(metrics ...telegraf.Metric) { - b.Lock() - defer b.Unlock() - for i := range metrics { - MetricsWritten.Incr(1) - b.push(metrics[i]) - } -} - -// Batch returns a batch of metrics of size batchSize. -// the batch will be of maximum length batchSize. It can be less than batchSize, -// if the length of Buffer is less than batchSize. -func (b *Buffer) Batch(batchSize int) []telegraf.Metric { - b.Lock() - defer b.Unlock() - outLen := min(b.Len(), batchSize) - out := make([]telegraf.Metric, outLen) - if outLen == 0 { - return out - } - - // We copy everything right of first up to last, count or end - // b.last >= rightInd || b.last < b.first - // therefore wont copy past b.last - rightInd := min(b.size, b.first+outLen) - 1 - - copyCount := copy(out, b.buf[b.first:rightInd+1]) - - // We've emptied the ring - if rightInd == b.last { - b.empty = true - } - b.first = rightInd + 1 - b.first %= b.size - - // We circle back for the rest - if copyCount < outLen { - right := min(b.last, outLen-copyCount) - copy(out[copyCount:], b.buf[b.first:right+1]) - // We've emptied the ring - if right == b.last { - b.empty = true - } - b.first = right + 1 - b.first %= b.size - } - return out -} - -func min(a, b int) int { - if b < a { - return b - } - return a -} diff --git a/internal/buffer/buffer_test.go b/internal/buffer/buffer_test.go deleted file mode 100644 index b3f666fd0..000000000 --- a/internal/buffer/buffer_test.go +++ /dev/null @@ -1,203 +0,0 @@ -package buffer - -import ( - "sync" - "sync/atomic" - "testing" - - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/testutil" - - "github.com/stretchr/testify/assert" -) - -var metricList = []telegraf.Metric{ - testutil.TestMetric(2, "mymetric1"), - testutil.TestMetric(1, "mymetric2"), - testutil.TestMetric(11, "mymetric3"), - testutil.TestMetric(15, "mymetric4"), - testutil.TestMetric(8, "mymetric5"), -} - -func makeBench5(b *testing.B, freq, batchSize int) { - const k = 1000 - var wg sync.WaitGroup - buf := NewBuffer(10000) - m := testutil.TestMetric(1, "mymetric") - - for i := 0; i < b.N; i++ { - buf.Add(m, m, m, m, m) - if i%(freq*k) == 0 { - wg.Add(1) - go func() { - buf.Batch(batchSize * k) - wg.Done() - }() - } - } - // Flush - buf.Batch(b.N) - wg.Wait() - -} -func makeBenchStrict(b *testing.B, freq, batchSize int) { - const k = 1000 - var count uint64 - var wg sync.WaitGroup - buf := NewBuffer(10000) - m := testutil.TestMetric(1, "mymetric") - - for i := 0; i < b.N; i++ { - buf.Add(m) - if i%(freq*k) == 0 { - wg.Add(1) - go func() { - defer wg.Done() - l := len(buf.Batch(batchSize * k)) - atomic.AddUint64(&count, uint64(l)) - }() - } - } - // Flush - wg.Add(1) - go func() { - l := len(buf.Batch(b.N)) - atomic.AddUint64(&count, uint64(l)) - wg.Done() - }() - - wg.Wait() - if count != uint64(b.N) { - b.Errorf("not all metrics came out. %d of %d", count, b.N) - } -} -func makeBench(b *testing.B, freq, batchSize int) { - const k = 1000 - var wg sync.WaitGroup - buf := NewBuffer(10000) - m := testutil.TestMetric(1, "mymetric") - - for i := 0; i < b.N; i++ { - buf.Add(m) - if i%(freq*k) == 0 { - wg.Add(1) - go func() { - buf.Batch(batchSize * k) - wg.Done() - }() - } - } - wg.Wait() - // Flush - buf.Batch(b.N) -} - -func BenchmarkBufferBatch5Add(b *testing.B) { - makeBench5(b, 100, 101) -} -func BenchmarkBufferBigInfrequentBatchCatchup(b *testing.B) { - makeBench(b, 100, 101) -} -func BenchmarkBufferOftenBatch(b *testing.B) { - makeBench(b, 1, 1) -} -func BenchmarkBufferAlmostBatch(b *testing.B) { - makeBench(b, 10, 9) -} -func BenchmarkBufferSlowBatch(b *testing.B) { - makeBench(b, 10, 1) -} -func BenchmarkBufferBatchNoDrop(b *testing.B) { - makeBenchStrict(b, 1, 4) -} -func BenchmarkBufferCatchup(b *testing.B) { - buf := NewBuffer(10000) - m := testutil.TestMetric(1, "mymetric") - - for i := 0; i < b.N; i++ { - buf.Add(m) - } - buf.Batch(b.N) -} - -func BenchmarkAddMetrics(b *testing.B) { - buf := NewBuffer(10000) - m := testutil.TestMetric(1, "mymetric") - for n := 0; n < b.N; n++ { - buf.Add(m) - } -} - -func TestNewBufferBasicFuncs(t *testing.T) { - b := NewBuffer(10) - MetricsDropped.Set(0) - MetricsWritten.Set(0) - - assert.True(t, b.IsEmpty()) - assert.Zero(t, b.Len()) - assert.Zero(t, MetricsDropped.Get()) - assert.Zero(t, MetricsWritten.Get()) - - m := testutil.TestMetric(1, "mymetric") - b.Add(m) - assert.False(t, b.IsEmpty()) - assert.Equal(t, b.Len(), 1) - assert.Equal(t, int64(0), MetricsDropped.Get()) - assert.Equal(t, int64(1), MetricsWritten.Get()) - - b.Add(metricList...) - assert.False(t, b.IsEmpty()) - assert.Equal(t, b.Len(), 6) - assert.Equal(t, int64(0), MetricsDropped.Get()) - assert.Equal(t, int64(6), MetricsWritten.Get()) -} - -func TestDroppingMetrics(t *testing.T) { - b := NewBuffer(10) - MetricsDropped.Set(0) - MetricsWritten.Set(0) - - // Add up to the size of the buffer - b.Add(metricList...) - b.Add(metricList...) - assert.False(t, b.IsEmpty()) - assert.Equal(t, b.Len(), 10) - assert.Equal(t, int64(0), MetricsDropped.Get()) - assert.Equal(t, int64(10), MetricsWritten.Get()) - - // Add 5 more and verify they were dropped - b.Add(metricList...) - assert.False(t, b.IsEmpty()) - assert.Equal(t, b.Len(), 10) - assert.Equal(t, int64(5), MetricsDropped.Get()) - assert.Equal(t, int64(15), MetricsWritten.Get()) -} - -func TestGettingBatches(t *testing.T) { - b := NewBuffer(20) - MetricsDropped.Set(0) - MetricsWritten.Set(0) - - // Verify that the buffer returned is smaller than requested when there are - // not as many items as requested. - b.Add(metricList...) - batch := b.Batch(10) - assert.Len(t, batch, 5) - - // Verify that the buffer is now empty - assert.True(t, b.IsEmpty()) - assert.Zero(t, b.Len()) - assert.Zero(t, MetricsDropped.Get()) - assert.Equal(t, int64(5), MetricsWritten.Get()) - - // Verify that the buffer returned is not more than the size requested - b.Add(metricList...) - batch = b.Batch(3) - assert.Len(t, batch, 3) - - // Verify that buffer is not empty - assert.False(t, b.IsEmpty()) - assert.Equal(t, b.Len(), 2) - assert.Equal(t, int64(0), MetricsDropped.Get()) - assert.Equal(t, int64(10), MetricsWritten.Get()) -} diff --git a/internal/config/config.go b/internal/config/config.go index 36027834b..7d266852a 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -9,7 +9,6 @@ import ( "math" "os" "path/filepath" - "regexp" "runtime" "sort" @@ -26,7 +25,6 @@ import ( "github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/plugins/processors" "github.com/influxdata/telegraf/plugins/serializers" - "github.com/influxdata/toml" "github.com/influxdata/toml/ast" ) @@ -622,6 +620,19 @@ func (c *Config) LoadConfig(path string) error { } } + if !c.Agent.OmitHostname { + if c.Agent.Hostname == "" { + hostname, err := os.Hostname() + if err != nil { + return err + } + + c.Agent.Hostname = hostname + } + + c.Tags["host"] = c.Agent.Hostname + } + // Parse all the rest of the plugins: for name, val := range tbl.Fields { subTable, ok := val.(*ast.Table) @@ -709,6 +720,7 @@ func (c *Config) LoadConfig(path string) error { if len(c.Processors) > 1 { sort.Sort(c.Processors) } + return nil } @@ -876,6 +888,7 @@ func (c *Config) addInput(name string, table *ast.Table) error { } rp := models.NewRunningInput(input, pluginConfig) + rp.SetDefaultTags(c.Tags) c.Inputs = append(c.Inputs, rp) return nil } @@ -1751,6 +1764,8 @@ func buildOutput(name string, tbl *ast.Table) (*models.OutputConfig, error) { Name: name, Filter: filter, } + + // TODO // Outputs don't support FieldDrop/FieldPass, so set to NameDrop/NamePass if len(oc.Filter.FieldDrop) > 0 { oc.Filter.NameDrop = oc.Filter.FieldDrop @@ -1758,5 +1773,47 @@ func buildOutput(name string, tbl *ast.Table) (*models.OutputConfig, error) { if len(oc.Filter.FieldPass) > 0 { oc.Filter.NamePass = oc.Filter.FieldPass } + + if node, ok := tbl.Fields["flush_interval"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if str, ok := kv.Value.(*ast.String); ok { + dur, err := time.ParseDuration(str.Value) + if err != nil { + return nil, err + } + + oc.FlushInterval = dur + } + } + } + + if node, ok := tbl.Fields["metric_buffer_limit"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if integer, ok := kv.Value.(*ast.Integer); ok { + v, err := integer.Int() + if err != nil { + return nil, err + } + oc.MetricBufferLimit = int(v) + } + } + } + + if node, ok := tbl.Fields["metric_batch_size"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if integer, ok := kv.Value.(*ast.Integer); ok { + v, err := integer.Int() + if err != nil { + return nil, err + } + oc.MetricBatchSize = int(v) + } + } + } + + delete(tbl.Fields, "flush_interval") + delete(tbl.Fields, "metric_buffer_limit") + delete(tbl.Fields, "metric_batch_size") + return oc, nil } diff --git a/internal/internal.go b/internal/internal.go index 567b0f773..8acf63e96 100644 --- a/internal/internal.go +++ b/internal/internal.go @@ -4,6 +4,7 @@ import ( "bufio" "bytes" "compress/gzip" + "context" "crypto/rand" "errors" "io" @@ -246,6 +247,51 @@ func RandomSleep(max time.Duration, shutdown chan struct{}) { } } +// RandomDuration returns a random duration between 0 and max. +func RandomDuration(max time.Duration) time.Duration { + if max == 0 { + return 0 + } + + var sleepns int64 + maxSleep := big.NewInt(max.Nanoseconds()) + if j, err := rand.Int(rand.Reader, maxSleep); err == nil { + sleepns = j.Int64() + } + + return time.Duration(sleepns) +} + +// SleepContext sleeps until the context is closed or the duration is reached. +func SleepContext(ctx context.Context, duration time.Duration) error { + if duration == 0 { + return nil + } + + t := time.NewTimer(duration) + select { + case <-t.C: + return nil + case <-ctx.Done(): + t.Stop() + return ctx.Err() + } +} + +// AlignDuration returns the duration until next aligned interval. +func AlignDuration(tm time.Time, interval time.Duration) time.Duration { + return AlignTime(tm, interval).Sub(tm) +} + +// AlignTime returns the time of the next aligned interval. +func AlignTime(tm time.Time, interval time.Duration) time.Time { + truncated := tm.Truncate(interval) + if truncated == tm { + return tm + } + return truncated.Add(interval) +} + // Exit status takes the error from exec.Command // and returns the exit status and true // if error is not exit status, will return 0 and false diff --git a/internal/internal_test.go b/internal/internal_test.go index 89ee06903..46b1b5962 100644 --- a/internal/internal_test.go +++ b/internal/internal_test.go @@ -9,6 +9,7 @@ import ( "time" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) type SnakeTest struct { @@ -217,3 +218,55 @@ func TestVersionAlreadySet(t *testing.T) { assert.Equal(t, "foo", Version()) } + +func TestAlignDuration(t *testing.T) { + tests := []struct { + name string + now time.Time + interval time.Duration + expected time.Duration + }{ + { + name: "aligned", + now: time.Date(2018, 1, 1, 1, 1, 0, 0, time.UTC), + interval: 10 * time.Second, + expected: 0 * time.Second, + }, + { + name: "standard interval", + now: time.Date(2018, 1, 1, 1, 1, 1, 0, time.UTC), + interval: 10 * time.Second, + expected: 9 * time.Second, + }, + { + name: "odd interval", + now: time.Date(2018, 1, 1, 1, 1, 1, 0, time.UTC), + interval: 3 * time.Second, + expected: 2 * time.Second, + }, + { + name: "sub second interval", + now: time.Date(2018, 1, 1, 1, 1, 0, 5e8, time.UTC), + interval: 1 * time.Second, + expected: 500 * time.Millisecond, + }, + { + name: "non divisible not aligned on minutes", + now: time.Date(2018, 1, 1, 1, 0, 0, 0, time.UTC), + interval: 1*time.Second + 100*time.Millisecond, + expected: 400 * time.Millisecond, + }, + { + name: "long interval", + now: time.Date(2018, 1, 1, 1, 1, 0, 0, time.UTC), + interval: 1 * time.Hour, + expected: 59 * time.Minute, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + actual := AlignDuration(tt.now, tt.interval) + require.Equal(t, tt.expected, actual) + }) + } +} diff --git a/internal/models/buffer.go b/internal/models/buffer.go new file mode 100644 index 000000000..6848c26fa --- /dev/null +++ b/internal/models/buffer.go @@ -0,0 +1,214 @@ +package models + +import ( + "sync" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/selfstat" +) + +var ( + AgentMetricsWritten = selfstat.Register("agent", "metrics_written", map[string]string{}) + AgentMetricsDropped = selfstat.Register("agent", "metrics_dropped", map[string]string{}) +) + +// Buffer stores metrics in a circular buffer. +type Buffer struct { + sync.Mutex + buf []telegraf.Metric + first int // index of the first/oldest metric + last int // one after the index of the last/newest metric + size int // number of metrics currently in the buffer + cap int // the capacity of the buffer + + batchFirst int // index of the first metric in the batch + batchLast int // one after the index of the last metric in the batch + batchSize int // number of metrics current in the batch + + MetricsAdded selfstat.Stat + MetricsWritten selfstat.Stat + MetricsDropped selfstat.Stat +} + +// NewBuffer returns a new empty Buffer with the given capacity. +func NewBuffer(name string, capacity int) *Buffer { + b := &Buffer{ + buf: make([]telegraf.Metric, capacity), + first: 0, + last: 0, + size: 0, + cap: capacity, + + MetricsAdded: selfstat.Register( + "write", + "metrics_added", + map[string]string{"output": name}, + ), + MetricsWritten: selfstat.Register( + "write", + "metrics_written", + map[string]string{"output": name}, + ), + MetricsDropped: selfstat.Register( + "write", + "metrics_dropped", + map[string]string{"output": name}, + ), + } + return b +} + +// Len returns the number of metrics currently in the buffer. +func (b *Buffer) Len() int { + b.Lock() + defer b.Unlock() + + return b.size +} + +func (b *Buffer) metricAdded() { + b.MetricsAdded.Incr(1) +} + +func (b *Buffer) metricWritten(metric telegraf.Metric) { + AgentMetricsWritten.Incr(1) + b.MetricsWritten.Incr(1) + metric.Accept() +} + +func (b *Buffer) metricDropped(metric telegraf.Metric) { + AgentMetricsDropped.Incr(1) + b.MetricsDropped.Incr(1) + metric.Reject() +} + +func (b *Buffer) inBatch() bool { + if b.batchSize == 0 { + return false + } + + if b.batchFirst < b.batchLast { + return b.last >= b.batchFirst && b.last < b.batchLast + } else { + return b.last >= b.batchFirst || b.last < b.batchLast + } +} + +func (b *Buffer) add(m telegraf.Metric) { + // Check if Buffer is full + if b.size == b.cap { + if b.batchSize == 0 { + // No batch taken by the output, we can drop the metric now. + b.metricDropped(b.buf[b.last]) + } else if b.inBatch() { + // There is an outstanding batch and this will overwrite a metric + // in it, delay the dropping only in case the batch gets rejected. + b.batchSize-- + b.batchFirst++ + b.batchFirst %= b.cap + } else { + // There is an outstanding batch, but this overwrites a metric + // outside of it. + b.metricDropped(b.buf[b.last]) + } + } + + b.metricAdded() + + b.buf[b.last] = m + b.last++ + b.last %= b.cap + + if b.size == b.cap { + b.first++ + b.first %= b.cap + } + + b.size = min(b.size+1, b.cap) +} + +// Add adds metrics to the buffer +func (b *Buffer) Add(metrics ...telegraf.Metric) { + b.Lock() + defer b.Unlock() + + for i := range metrics { + b.add(metrics[i]) + } +} + +// Batch returns a slice containing up to batchSize of the most recently added +// metrics. +// +// The metrics contained in the batch are not removed from the buffer, instead +// the last batch is recorded and removed only if Accept is called. +func (b *Buffer) Batch(batchSize int) []telegraf.Metric { + b.Lock() + defer b.Unlock() + + outLen := min(b.size, batchSize) + out := make([]telegraf.Metric, outLen) + if outLen == 0 { + return out + } + + b.batchFirst = b.first + b.batchLast = b.first + outLen + b.batchLast %= b.cap + b.batchSize = outLen + + until := min(b.cap, b.first+outLen) + + n := copy(out, b.buf[b.first:until]) + if n < outLen { + copy(out[n:], b.buf[:outLen-n]) + } + return out +} + +// Accept removes the metrics contained in the last batch. +func (b *Buffer) Accept(batch []telegraf.Metric) { + b.Lock() + defer b.Unlock() + + for _, m := range batch { + b.metricWritten(m) + } + + if b.batchSize > 0 { + b.size -= b.batchSize + b.first += b.batchSize + b.first %= b.cap + } + + b.resetBatch() +} + +// Reject clears the current batch record so that calls to Accept will have no +// effect. +func (b *Buffer) Reject(batch []telegraf.Metric) { + b.Lock() + defer b.Unlock() + + if len(batch) > b.batchSize { + // Part or all of the batch was dropped before reject was called. + for _, m := range batch[b.batchSize:] { + b.metricDropped(m) + } + } + + b.resetBatch() +} + +func (b *Buffer) resetBatch() { + b.batchFirst = 0 + b.batchLast = 0 + b.batchSize = 0 +} + +func min(a, b int) int { + if b < a { + return b + } + return a +} diff --git a/internal/models/buffer_test.go b/internal/models/buffer_test.go new file mode 100644 index 000000000..246aaf6ea --- /dev/null +++ b/internal/models/buffer_test.go @@ -0,0 +1,385 @@ +package models + +import ( + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" + "github.com/stretchr/testify/require" +) + +type MockMetric struct { + telegraf.Metric + AcceptF func() + RejectF func() + DropF func() +} + +func (m *MockMetric) Accept() { + m.AcceptF() +} + +func (m *MockMetric) Reject() { + m.RejectF() +} + +func (m *MockMetric) Drop() { + m.DropF() +} + +func Metric() telegraf.Metric { + m, err := metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(0, 0), + ) + if err != nil { + panic(err) + } + return m +} + +func BenchmarkAddMetrics(b *testing.B) { + buf := NewBuffer("test", 10000) + m := Metric() + for n := 0; n < b.N; n++ { + buf.Add(m) + } +} + +func setup(b *Buffer) *Buffer { + b.MetricsAdded.Set(0) + b.MetricsWritten.Set(0) + b.MetricsDropped.Set(0) + return b +} + +func TestBuffer_LenEmpty(t *testing.T) { + b := setup(NewBuffer("test", 5)) + + require.Equal(t, 0, b.Len()) +} + +func TestBuffer_LenOne(t *testing.T) { + m := Metric() + b := setup(NewBuffer("test", 5)) + b.Add(m) + + require.Equal(t, 1, b.Len()) +} + +func TestBuffer_LenFull(t *testing.T) { + m := Metric() + b := setup(NewBuffer("test", 5)) + b.Add(m, m, m, m, m) + + require.Equal(t, 5, b.Len()) +} + +func TestBuffer_LenOverfill(t *testing.T) { + m := Metric() + b := setup(NewBuffer("test", 5)) + setup(b) + b.Add(m, m, m, m, m, m) + + require.Equal(t, 5, b.Len()) +} + +func TestBuffer_BatchLenZero(t *testing.T) { + b := setup(NewBuffer("test", 5)) + batch := b.Batch(0) + + require.Len(t, batch, 0) +} + +func TestBuffer_BatchLenBufferEmpty(t *testing.T) { + b := setup(NewBuffer("test", 5)) + batch := b.Batch(2) + + require.Len(t, batch, 0) +} + +func TestBuffer_BatchLenUnderfill(t *testing.T) { + m := Metric() + b := setup(NewBuffer("test", 5)) + b.Add(m) + batch := b.Batch(2) + + require.Len(t, batch, 1) +} + +func TestBuffer_BatchLenFill(t *testing.T) { + m := Metric() + b := setup(NewBuffer("test", 5)) + b.Add(m, m, m) + batch := b.Batch(2) + require.Len(t, batch, 2) +} + +func TestBuffer_BatchLenExact(t *testing.T) { + m := Metric() + b := setup(NewBuffer("test", 5)) + b.Add(m, m) + batch := b.Batch(2) + require.Len(t, batch, 2) +} + +func TestBuffer_BatchLenLargerThanBuffer(t *testing.T) { + m := Metric() + b := setup(NewBuffer("test", 5)) + b.Add(m, m, m, m, m) + batch := b.Batch(6) + require.Len(t, batch, 5) +} + +func TestBuffer_BatchWrap(t *testing.T) { + m := Metric() + b := setup(NewBuffer("test", 5)) + b.Add(m, m, m, m, m) + batch := b.Batch(2) + b.Accept(batch) + b.Add(m, m) + batch = b.Batch(5) + require.Len(t, batch, 5) +} + +func TestBuffer_AddDropsOverwrittenMetrics(t *testing.T) { + m := Metric() + b := setup(NewBuffer("test", 5)) + + b.Add(m, m, m, m, m) + b.Add(m, m, m, m, m) + + require.Equal(t, int64(5), b.MetricsDropped.Get()) + require.Equal(t, int64(0), b.MetricsWritten.Get()) +} + +func TestBuffer_AcceptRemovesBatch(t *testing.T) { + m := Metric() + b := setup(NewBuffer("test", 5)) + b.Add(m, m, m) + batch := b.Batch(2) + b.Accept(batch) + require.Equal(t, 1, b.Len()) +} + +func TestBuffer_RejectLeavesBatch(t *testing.T) { + m := Metric() + b := setup(NewBuffer("test", 5)) + b.Add(m, m, m) + batch := b.Batch(2) + b.Reject(batch) + require.Equal(t, 3, b.Len()) +} + +func TestBuffer_AcceptWritesOverwrittenBatch(t *testing.T) { + m := Metric() + b := setup(NewBuffer("test", 5)) + + b.Add(m, m, m, m, m) + batch := b.Batch(5) + b.Add(m, m, m, m, m) + b.Accept(batch) + + require.Equal(t, int64(0), b.MetricsDropped.Get()) + require.Equal(t, int64(5), b.MetricsWritten.Get()) +} + +func TestBuffer_BatchRejectDropsOverwrittenBatch(t *testing.T) { + m := Metric() + b := setup(NewBuffer("test", 5)) + + b.Add(m, m, m, m, m) + batch := b.Batch(5) + b.Add(m, m, m, m, m) + b.Reject(batch) + + require.Equal(t, int64(5), b.MetricsDropped.Get()) + require.Equal(t, int64(0), b.MetricsWritten.Get()) +} + +func TestBuffer_MetricsOverwriteBatchAccept(t *testing.T) { + m := Metric() + b := setup(NewBuffer("test", 5)) + + b.Add(m, m, m, m, m) + batch := b.Batch(3) + b.Add(m, m, m) + b.Accept(batch) + require.Equal(t, int64(0), b.MetricsDropped.Get()) + require.Equal(t, int64(3), b.MetricsWritten.Get()) +} + +func TestBuffer_MetricsOverwriteBatchReject(t *testing.T) { + m := Metric() + b := setup(NewBuffer("test", 5)) + + b.Add(m, m, m, m, m) + batch := b.Batch(3) + b.Add(m, m, m) + b.Reject(batch) + require.Equal(t, int64(3), b.MetricsDropped.Get()) + require.Equal(t, int64(0), b.MetricsWritten.Get()) +} + +func TestBuffer_MetricsBatchAcceptRemoved(t *testing.T) { + m := Metric() + b := setup(NewBuffer("test", 5)) + + b.Add(m, m, m, m, m) + batch := b.Batch(3) + b.Add(m, m, m, m, m) + b.Accept(batch) + require.Equal(t, int64(2), b.MetricsDropped.Get()) + require.Equal(t, int64(3), b.MetricsWritten.Get()) +} + +func TestBuffer_WrapWithBatch(t *testing.T) { + m := Metric() + b := setup(NewBuffer("test", 5)) + + b.Add(m, m, m) + b.Batch(3) + b.Add(m, m, m, m, m, m) + + require.Equal(t, int64(1), b.MetricsDropped.Get()) +} + +func TestBuffer_BatchNotRemoved(t *testing.T) { + m := Metric() + b := setup(NewBuffer("test", 5)) + b.Add(m, m, m, m, m) + b.Batch(2) + require.Equal(t, 5, b.Len()) +} + +func TestBuffer_BatchRejectAcceptNoop(t *testing.T) { + m := Metric() + b := setup(NewBuffer("test", 5)) + b.Add(m, m, m, m, m) + batch := b.Batch(2) + b.Reject(batch) + b.Accept(batch) + require.Equal(t, 5, b.Len()) +} + +func TestBuffer_AcceptCallsMetricAccept(t *testing.T) { + var accept int + mm := &MockMetric{ + Metric: Metric(), + AcceptF: func() { + accept++ + }, + } + b := setup(NewBuffer("test", 5)) + b.Add(mm, mm, mm) + batch := b.Batch(2) + b.Accept(batch) + require.Equal(t, 2, accept) +} + +func TestBuffer_AddCallsMetricRejectWhenNoBatch(t *testing.T) { + var reject int + mm := &MockMetric{ + Metric: Metric(), + RejectF: func() { + reject++ + }, + } + b := setup(NewBuffer("test", 5)) + setup(b) + b.Add(mm, mm, mm, mm, mm) + b.Add(mm, mm) + require.Equal(t, 2, reject) +} + +func TestBuffer_AddCallsMetricRejectWhenNotInBatch(t *testing.T) { + var reject int + mm := &MockMetric{ + Metric: Metric(), + RejectF: func() { + reject++ + }, + } + b := setup(NewBuffer("test", 5)) + setup(b) + b.Add(mm, mm, mm, mm, mm) + batch := b.Batch(2) + b.Add(mm, mm, mm, mm) + // metric[2] and metric[3] rejected + require.Equal(t, 2, reject) + b.Reject(batch) + // metric[1] and metric[2] now rejected + require.Equal(t, 4, reject) +} + +func TestBuffer_RejectCallsMetricRejectWithOverwritten(t *testing.T) { + var reject int + mm := &MockMetric{ + Metric: Metric(), + RejectF: func() { + reject++ + }, + } + b := setup(NewBuffer("test", 5)) + b.Add(mm, mm, mm, mm, mm) + batch := b.Batch(5) + b.Add(mm, mm) + require.Equal(t, 0, reject) + b.Reject(batch) + require.Equal(t, 2, reject) +} + +func TestBuffer_AddOverwriteAndReject(t *testing.T) { + var reject int + mm := &MockMetric{ + Metric: Metric(), + RejectF: func() { + reject++ + }, + } + b := setup(NewBuffer("test", 5)) + b.Add(mm, mm, mm, mm, mm) + batch := b.Batch(5) + b.Add(mm, mm, mm, mm, mm) + b.Add(mm, mm, mm, mm, mm) + b.Add(mm, mm, mm, mm, mm) + b.Add(mm, mm, mm, mm, mm) + require.Equal(t, 15, reject) + b.Reject(batch) + require.Equal(t, 20, reject) +} + +func TestBuffer_AddOverwriteAndRejectOffset(t *testing.T) { + var reject int + var accept int + mm := &MockMetric{ + Metric: Metric(), + RejectF: func() { + reject++ + }, + AcceptF: func() { + accept++ + }, + } + b := setup(NewBuffer("test", 5)) + b.Add(mm, mm, mm) + b.Add(mm, mm, mm, mm) + require.Equal(t, 2, reject) + batch := b.Batch(5) + b.Add(mm, mm, mm, mm) + require.Equal(t, 2, reject) + b.Add(mm, mm, mm, mm) + require.Equal(t, 5, reject) + b.Add(mm, mm, mm, mm) + require.Equal(t, 9, reject) + b.Add(mm, mm, mm, mm) + require.Equal(t, 13, reject) + b.Accept(batch) + require.Equal(t, 13, reject) + require.Equal(t, 5, accept) +} diff --git a/internal/models/filter_test.go b/internal/models/filter_test.go index eb208f7c3..84cd1d397 100644 --- a/internal/models/filter_test.go +++ b/internal/models/filter_test.go @@ -6,6 +6,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) @@ -480,3 +481,45 @@ func TestFilter_FilterTagsPassAndDrop(t *testing.T) { } } + +func BenchmarkFilter(b *testing.B) { + tests := []struct { + name string + filter Filter + metric telegraf.Metric + }{ + { + name: "empty filter", + filter: Filter{}, + metric: testutil.MustMetric("cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42, + }, + time.Unix(0, 0), + ), + }, + { + name: "namepass", + filter: Filter{ + NamePass: []string{"cpu"}, + }, + metric: testutil.MustMetric("cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42, + }, + time.Unix(0, 0), + ), + }, + } + + for _, tt := range tests { + b.Run(tt.name, func(b *testing.B) { + require.NoError(b, tt.filter.Compile()) + for n := 0; n < b.N; n++ { + tt.filter.Select(tt.metric) + } + }) + } +} diff --git a/internal/models/running_aggregator.go b/internal/models/running_aggregator.go index 960fd3131..0315aa671 100644 --- a/internal/models/running_aggregator.go +++ b/internal/models/running_aggregator.go @@ -1,30 +1,53 @@ package models import ( - "log" + "sync" "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/selfstat" ) type RunningAggregator struct { - a telegraf.Aggregator - Config *AggregatorConfig - - metrics chan telegraf.Metric - + sync.Mutex + Aggregator telegraf.Aggregator + Config *AggregatorConfig periodStart time.Time periodEnd time.Time + + MetricsPushed selfstat.Stat + MetricsFiltered selfstat.Stat + MetricsDropped selfstat.Stat + PushTime selfstat.Stat } func NewRunningAggregator( - a telegraf.Aggregator, - conf *AggregatorConfig, + aggregator telegraf.Aggregator, + config *AggregatorConfig, ) *RunningAggregator { return &RunningAggregator{ - a: a, - Config: conf, - metrics: make(chan telegraf.Metric, 100), + Aggregator: aggregator, + Config: config, + MetricsPushed: selfstat.Register( + "aggregate", + "metrics_pushed", + map[string]string{"aggregator": config.Name}, + ), + MetricsFiltered: selfstat.Register( + "aggregate", + "metrics_filtered", + map[string]string{"aggregator": config.Name}, + ), + MetricsDropped: selfstat.Register( + "aggregate", + "metrics_dropped", + map[string]string{"aggregator": config.Name}, + ), + PushTime: selfstat.Register( + "aggregate", + "push_time_ns", + map[string]string{"aggregator": config.Name}, + ), } } @@ -46,6 +69,15 @@ func (r *RunningAggregator) Name() string { return "aggregators." + r.Config.Name } +func (r *RunningAggregator) Period() time.Duration { + return r.Config.Period +} + +func (r *RunningAggregator) SetPeriodStart(start time.Time) { + r.periodStart = start + r.periodEnd = r.periodStart.Add(r.Config.Period).Add(r.Config.Delay) +} + func (r *RunningAggregator) MakeMetric(metric telegraf.Metric) telegraf.Metric { m := makemetric( metric, @@ -59,9 +91,21 @@ func (r *RunningAggregator) MakeMetric(metric telegraf.Metric) telegraf.Metric { m.SetAggregate(true) } + r.MetricsPushed.Incr(1) + return m } +func (r *RunningAggregator) metricFiltered(metric telegraf.Metric) { + r.MetricsFiltered.Incr(1) + metric.Accept() +} + +func (r *RunningAggregator) metricDropped(metric telegraf.Metric) { + r.MetricsDropped.Incr(1) + metric.Accept() +} + // Add a metric to the aggregator and return true if the original metric // should be dropped. func (r *RunningAggregator) Add(metric telegraf.Metric) bool { @@ -74,75 +118,31 @@ func (r *RunningAggregator) Add(metric telegraf.Metric) bool { return r.Config.DropOriginal } - r.metrics <- metric + r.Lock() + defer r.Unlock() + if r.periodStart.IsZero() || metric.Time().Before(r.periodStart) || metric.Time().After(r.periodEnd) { + r.metricDropped(metric) + return false + } + + r.Aggregator.Add(metric) return r.Config.DropOriginal } -func (r *RunningAggregator) add(in telegraf.Metric) { - r.a.Add(in) +func (r *RunningAggregator) Push(acc telegraf.Accumulator) { + r.Lock() + defer r.Unlock() + + r.periodStart = r.periodEnd + r.periodEnd = r.periodStart.Add(r.Config.Period).Add(r.Config.Delay) + r.push(acc) + r.Aggregator.Reset() } func (r *RunningAggregator) push(acc telegraf.Accumulator) { - r.a.Push(acc) -} - -func (r *RunningAggregator) reset() { - r.a.Reset() -} - -// Run runs the running aggregator, listens for incoming metrics, and waits -// for period ticks to tell it when to push and reset the aggregator. -func (r *RunningAggregator) Run( - acc telegraf.Accumulator, - shutdown chan struct{}, -) { - // The start of the period is truncated to the nearest second. - // - // Every metric then gets it's timestamp checked and is dropped if it - // is not within: - // - // start < t < end + truncation + delay - // - // So if we start at now = 00:00.2 with a 10s period and 0.3s delay: - // now = 00:00.2 - // start = 00:00 - // truncation = 00:00.2 - // end = 00:10 - // 1st interval: 00:00 - 00:10.5 - // 2nd interval: 00:10 - 00:20.5 - // etc. - // - now := time.Now() - r.periodStart = now.Truncate(time.Second) - truncation := now.Sub(r.periodStart) - r.periodEnd = r.periodStart.Add(r.Config.Period) - time.Sleep(r.Config.Delay) - periodT := time.NewTicker(r.Config.Period) - defer periodT.Stop() - - for { - select { - case <-shutdown: - if len(r.metrics) > 0 { - // wait until metrics are flushed before exiting - continue - } - return - case m := <-r.metrics: - if m.Time().Before(r.periodStart) || - m.Time().After(r.periodEnd.Add(truncation).Add(r.Config.Delay)) { - // the metric is outside the current aggregation period, so - // skip it. - log.Printf("D! aggregator: metric \"%s\" is not in the current timewindow, skipping", m.Name()) - continue - } - r.add(m) - case <-periodT.C: - r.periodStart = r.periodEnd - r.periodEnd = r.periodStart.Add(r.Config.Period) - r.push(acc) - r.reset() - } - } + start := time.Now() + r.Aggregator.Push(acc) + elapsed := time.Since(start) + r.PushTime.Incr(elapsed.Nanoseconds()) } diff --git a/internal/models/running_aggregator_test.go b/internal/models/running_aggregator_test.go index 34d513646..2212829f9 100644 --- a/internal/models/running_aggregator_test.go +++ b/internal/models/running_aggregator_test.go @@ -1,16 +1,13 @@ package models import ( - "sync" "sync/atomic" "testing" "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -23,28 +20,24 @@ func TestAdd(t *testing.T) { }, Period: time.Millisecond * 500, }) - assert.NoError(t, ra.Config.Filter.Compile()) + require.NoError(t, ra.Config.Filter.Compile()) acc := testutil.Accumulator{} - go ra.Run(&acc, make(chan struct{})) - m, err := metric.New("RITest", + now := time.Now() + ra.SetPeriodStart(now) + + m := testutil.MustMetric("RITest", map[string]string{}, map[string]interface{}{ "value": int64(101), }, time.Now().Add(time.Millisecond*150), telegraf.Untyped) - require.NoError(t, err) + require.False(t, ra.Add(m)) + ra.Push(&acc) - assert.False(t, ra.Add(m)) - - for { - time.Sleep(time.Millisecond) - if atomic.LoadInt64(&a.sum) > 0 { - break - } - } - assert.Equal(t, int64(101), atomic.LoadInt64(&a.sum)) + require.Equal(t, 1, len(acc.Metrics)) + require.Equal(t, int64(101), acc.Metrics[0].Fields["sum"]) } func TestAddMetricsOutsideCurrentPeriod(t *testing.T) { @@ -56,50 +49,45 @@ func TestAddMetricsOutsideCurrentPeriod(t *testing.T) { }, Period: time.Millisecond * 500, }) - assert.NoError(t, ra.Config.Filter.Compile()) + require.NoError(t, ra.Config.Filter.Compile()) acc := testutil.Accumulator{} - go ra.Run(&acc, make(chan struct{})) + now := time.Now() + ra.SetPeriodStart(now) - m, err := metric.New("RITest", + m := testutil.MustMetric("RITest", map[string]string{}, map[string]interface{}{ "value": int64(101), }, - time.Now().Add(-time.Hour), - telegraf.Untyped) - require.NoError(t, err) - - assert.False(t, ra.Add(m)) + now.Add(-time.Hour), + telegraf.Untyped, + ) + require.False(t, ra.Add(m)) // metric after current period - m, err = metric.New("RITest", + m = testutil.MustMetric("RITest", map[string]string{}, map[string]interface{}{ "value": int64(101), }, - time.Now().Add(time.Hour), - telegraf.Untyped) - require.NoError(t, err) - assert.False(t, ra.Add(m)) + now.Add(time.Hour), + telegraf.Untyped, + ) + require.False(t, ra.Add(m)) // "now" metric - m, err = metric.New("RITest", + m = testutil.MustMetric("RITest", map[string]string{}, map[string]interface{}{ "value": int64(101), }, time.Now().Add(time.Millisecond*50), telegraf.Untyped) - require.NoError(t, err) - assert.False(t, ra.Add(m)) + require.False(t, ra.Add(m)) - for { - time.Sleep(time.Millisecond) - if atomic.LoadInt64(&a.sum) > 0 { - break - } - } - assert.Equal(t, int64(101), atomic.LoadInt64(&a.sum)) + ra.Push(&acc) + require.Equal(t, 1, len(acc.Metrics)) + require.Equal(t, int64(101), acc.Metrics[0].Fields["sum"]) } func TestAddAndPushOnePeriod(t *testing.T) { @@ -111,37 +99,24 @@ func TestAddAndPushOnePeriod(t *testing.T) { }, Period: time.Millisecond * 500, }) - assert.NoError(t, ra.Config.Filter.Compile()) + require.NoError(t, ra.Config.Filter.Compile()) acc := testutil.Accumulator{} - shutdown := make(chan struct{}) - var wg sync.WaitGroup - wg.Add(1) - go func() { - defer wg.Done() - ra.Run(&acc, shutdown) - }() + now := time.Now() + ra.SetPeriodStart(now) - m, err := metric.New("RITest", + m := testutil.MustMetric("RITest", map[string]string{}, map[string]interface{}{ "value": int64(101), }, time.Now().Add(time.Millisecond*100), telegraf.Untyped) - require.NoError(t, err) - assert.False(t, ra.Add(m)) + require.False(t, ra.Add(m)) + + ra.Push(&acc) - for { - time.Sleep(time.Millisecond) - if acc.NMetrics() > 0 { - break - } - } acc.AssertContainsFields(t, "TestMetric", map[string]interface{}{"sum": int64(101)}) - - close(shutdown) - wg.Wait() } func TestAddDropOriginal(t *testing.T) { @@ -152,28 +127,29 @@ func TestAddDropOriginal(t *testing.T) { }, DropOriginal: true, }) - assert.NoError(t, ra.Config.Filter.Compile()) + require.NoError(t, ra.Config.Filter.Compile()) - m, err := metric.New("RITest", + now := time.Now() + ra.SetPeriodStart(now) + + m := testutil.MustMetric("RITest", map[string]string{}, map[string]interface{}{ "value": int64(101), }, - time.Now(), + now, telegraf.Untyped) - require.NoError(t, err) - assert.True(t, ra.Add(m)) + require.True(t, ra.Add(m)) // this metric name doesn't match the filter, so Add will return false - m2, err := metric.New("foobar", + m2 := testutil.MustMetric("foobar", map[string]string{}, map[string]interface{}{ "value": int64(101), }, - time.Now(), + now, telegraf.Untyped) - require.NoError(t, err) - assert.False(t, ra.Add(m2)) + require.False(t, ra.Add(m2)) } type TestAggregator struct { diff --git a/internal/models/running_input.go b/internal/models/running_input.go index fce2437ca..0775d5c5d 100644 --- a/internal/models/running_input.go +++ b/internal/models/running_input.go @@ -1,11 +1,9 @@ package models import ( - "fmt" "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/plugins/serializers/influx" "github.com/influxdata/telegraf/selfstat" ) @@ -15,16 +13,13 @@ type RunningInput struct { Input telegraf.Input Config *InputConfig - trace bool defaultTags map[string]string MetricsGathered selfstat.Stat + GatherTime selfstat.Stat } -func NewRunningInput( - input telegraf.Input, - config *InputConfig, -) *RunningInput { +func NewRunningInput(input telegraf.Input, config *InputConfig) *RunningInput { return &RunningInput{ Input: input, Config: config, @@ -33,6 +28,11 @@ func NewRunningInput( "metrics_gathered", map[string]string{"input": config.Name}, ), + GatherTime: selfstat.RegisterTiming( + "gather", + "gather_time_ns", + map[string]string{"input": config.Name}, + ), } } @@ -52,13 +52,19 @@ func (r *RunningInput) Name() string { return "inputs." + r.Config.Name } +func (r *RunningInput) metricFiltered(metric telegraf.Metric) { + metric.Drop() +} + func (r *RunningInput) MakeMetric(metric telegraf.Metric) telegraf.Metric { if ok := r.Config.Filter.Select(metric); !ok { + r.metricFiltered(metric) return nil } r.Config.Filter.Modify(metric) if len(metric.FieldList()) == 0 { + r.metricFiltered(metric) return nil } @@ -70,26 +76,17 @@ func (r *RunningInput) MakeMetric(metric telegraf.Metric) telegraf.Metric { r.Config.Tags, r.defaultTags) - if r.trace && m != nil { - s := influx.NewSerializer() - s.SetFieldSortOrder(influx.SortFields) - octets, err := s.Serialize(m) - if err == nil { - fmt.Print("> " + string(octets)) - } - } - r.MetricsGathered.Incr(1) GlobalMetricsGathered.Incr(1) return m } -func (r *RunningInput) Trace() bool { - return r.trace -} - -func (r *RunningInput) SetTrace(trace bool) { - r.trace = trace +func (r *RunningInput) Gather(acc telegraf.Accumulator) error { + start := time.Now() + err := r.Input.Gather(acc) + elapsed := time.Since(start) + r.GatherTime.Incr(elapsed.Nanoseconds()) + return err } func (r *RunningInput) SetDefaultTags(tags map[string]string) { diff --git a/internal/models/running_input_test.go b/internal/models/running_input_test.go index b83f75ea9..898007e61 100644 --- a/internal/models/running_input_test.go +++ b/internal/models/running_input_test.go @@ -6,6 +6,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -66,17 +67,13 @@ func TestMakeMetricWithPluginTags(t *testing.T) { }, }) - ri.SetTrace(true) - assert.Equal(t, true, ri.Trace()) - - m, err := metric.New("RITest", + m := testutil.MustMetric("RITest", map[string]string{}, map[string]interface{}{ "value": int64(101), }, now, telegraf.Untyped) - require.NoError(t, err) m = ri.MakeMetric(m) expected, err := metric.New("RITest", @@ -102,8 +99,6 @@ func TestMakeMetricFilteredOut(t *testing.T) { Filter: Filter{NamePass: []string{"foobar"}}, }) - ri.SetTrace(true) - assert.Equal(t, true, ri.Trace()) assert.NoError(t, ri.Config.Filter.Compile()) m, err := metric.New("RITest", @@ -127,17 +122,13 @@ func TestMakeMetricWithDaemonTags(t *testing.T) { "foo": "bar", }) - ri.SetTrace(true) - assert.Equal(t, true, ri.Trace()) - - m, err := metric.New("RITest", + m := testutil.MustMetric("RITest", map[string]string{}, map[string]interface{}{ "value": int64(101), }, now, telegraf.Untyped) - require.NoError(t, err) m = ri.MakeMetric(m) expected, err := metric.New("RITest", map[string]string{ diff --git a/internal/models/running_output.go b/internal/models/running_output.go index 0f2c138a6..8d7d9854b 100644 --- a/internal/models/running_output.go +++ b/internal/models/running_output.go @@ -6,7 +6,6 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal/buffer" "github.com/influxdata/telegraf/selfstat" ) @@ -18,6 +17,16 @@ const ( DEFAULT_METRIC_BUFFER_LIMIT = 10000 ) +// OutputConfig containing name and filter +type OutputConfig struct { + Name string + Filter Filter + + FlushInterval time.Duration + MetricBufferLimit int + MetricBatchSize int +} + // RunningOutput contains the output configuration type RunningOutput struct { Name string @@ -27,24 +36,16 @@ type RunningOutput struct { MetricBatchSize int MetricsFiltered selfstat.Stat - MetricsWritten selfstat.Stat BufferSize selfstat.Stat BufferLimit selfstat.Stat WriteTime selfstat.Stat - metrics *buffer.Buffer - failMetrics *buffer.Buffer + batch []telegraf.Metric + buffer *Buffer + BatchReady chan time.Time - // Guards against concurrent calls to Add, Push, Reset - aggMutex sync.Mutex - // Guards against concurrent calls to the Output as described in #3009 - writeMutex sync.Mutex -} - -// OutputConfig containing name and filter -type OutputConfig struct { - Name string - Filter Filter + aggMutex sync.Mutex + batchMutex sync.Mutex } func NewRunningOutput( @@ -54,25 +55,27 @@ func NewRunningOutput( batchSize int, bufferLimit int, ) *RunningOutput { + if conf.MetricBufferLimit > 0 { + bufferLimit = conf.MetricBufferLimit + } if bufferLimit == 0 { bufferLimit = DEFAULT_METRIC_BUFFER_LIMIT } + if conf.MetricBatchSize > 0 { + batchSize = conf.MetricBatchSize + } if batchSize == 0 { batchSize = DEFAULT_METRIC_BATCH_SIZE } ro := &RunningOutput{ Name: name, - metrics: buffer.NewBuffer(batchSize), - failMetrics: buffer.NewBuffer(bufferLimit), + batch: make([]telegraf.Metric, 0, batchSize), + buffer: NewBuffer(name, bufferLimit), + BatchReady: make(chan time.Time, 1), Output: output, Config: conf, MetricBufferLimit: bufferLimit, MetricBatchSize: batchSize, - MetricsWritten: selfstat.Register( - "write", - "metrics_written", - map[string]string{"output": name}, - ), MetricsFiltered: selfstat.Register( "write", "metrics_filtered", @@ -94,20 +97,28 @@ func NewRunningOutput( map[string]string{"output": name}, ), } + ro.BufferLimit.Set(int64(ro.MetricBufferLimit)) return ro } -// AddMetric adds a metric to the output. This function can also write cached -// points if FlushBufferWhenFull is true. +func (ro *RunningOutput) metricFiltered(metric telegraf.Metric) { + ro.MetricsFiltered.Incr(1) + metric.Drop() +} + +// AddMetric adds a metric to the output. +// +// Takes ownership of metric func (ro *RunningOutput) AddMetric(metric telegraf.Metric) { if ok := ro.Config.Filter.Select(metric); !ok { - ro.MetricsFiltered.Incr(1) + ro.metricFiltered(metric) return } ro.Config.Filter.Modify(metric) if len(metric.FieldList()) == 0 { + ro.metricFiltered(metric) return } @@ -118,85 +129,98 @@ func (ro *RunningOutput) AddMetric(metric telegraf.Metric) { return } - ro.metrics.Add(metric) - if ro.metrics.Len() == ro.MetricBatchSize { - batch := ro.metrics.Batch(ro.MetricBatchSize) - err := ro.write(batch) - if err != nil { - ro.failMetrics.Add(batch...) - log.Printf("E! Error writing to output [%s]: %v", ro.Name, err) + ro.batchMutex.Lock() + + ro.batch = append(ro.batch, metric) + if len(ro.batch) == ro.MetricBatchSize { + ro.addBatchToBuffer() + + nBuffer := ro.buffer.Len() + ro.BufferSize.Set(int64(nBuffer)) + + select { + case ro.BatchReady <- time.Now(): + default: } } + + ro.batchMutex.Unlock() } -// Write writes all cached points to this output. +// AddBatchToBuffer moves the metrics from the batch into the metric buffer. +func (ro *RunningOutput) addBatchToBuffer() { + ro.buffer.Add(ro.batch...) + ro.batch = ro.batch[:0] +} + +// Write writes all metrics to the output, stopping when all have been sent on +// or error. func (ro *RunningOutput) Write() error { if output, ok := ro.Output.(telegraf.AggregatingOutput); ok { ro.aggMutex.Lock() metrics := output.Push() - ro.metrics.Add(metrics...) + ro.buffer.Add(metrics...) output.Reset() ro.aggMutex.Unlock() } + // add and write can be called concurrently + ro.batchMutex.Lock() + ro.addBatchToBuffer() + ro.batchMutex.Unlock() - nFails, nMetrics := ro.failMetrics.Len(), ro.metrics.Len() - ro.BufferSize.Set(int64(nFails + nMetrics)) - log.Printf("D! Output [%s] buffer fullness: %d / %d metrics. ", - ro.Name, nFails+nMetrics, ro.MetricBufferLimit) - var err error - if !ro.failMetrics.IsEmpty() { - // how many batches of failed writes we need to write. - nBatches := nFails/ro.MetricBatchSize + 1 - batchSize := ro.MetricBatchSize + nBuffer := ro.buffer.Len() - for i := 0; i < nBatches; i++ { - // If it's the last batch, only grab the metrics that have not had - // a write attempt already (this is primarily to preserve order). - if i == nBatches-1 { - batchSize = nFails % ro.MetricBatchSize - } - batch := ro.failMetrics.Batch(batchSize) - // If we've already failed previous writes, don't bother trying to - // write to this output again. We are not exiting the loop just so - // that we can rotate the metrics to preserve order. - if err == nil { - err = ro.write(batch) - } - if err != nil { - ro.failMetrics.Add(batch...) - } + // Only process the metrics in the buffer now. Metrics added while we are + // writing will be sent on the next call. + nBatches := nBuffer/ro.MetricBatchSize + 1 + for i := 0; i < nBatches; i++ { + batch := ro.buffer.Batch(ro.MetricBatchSize) + if len(batch) == 0 { + break } - } - batch := ro.metrics.Batch(ro.MetricBatchSize) - // see comment above about not trying to write to an already failed output. - // if ro.failMetrics is empty then err will always be nil at this point. - if err == nil { - err = ro.write(batch) - } - - if err != nil { - ro.failMetrics.Add(batch...) - return err + err := ro.write(batch) + if err != nil { + ro.buffer.Reject(batch) + return err + } + ro.buffer.Accept(batch) } return nil } -func (ro *RunningOutput) write(metrics []telegraf.Metric) error { - nMetrics := len(metrics) - if nMetrics == 0 { +// WriteBatch writes only the batch metrics to the output. +func (ro *RunningOutput) WriteBatch() error { + batch := ro.buffer.Batch(ro.MetricBatchSize) + if len(batch) == 0 { return nil } - ro.writeMutex.Lock() - defer ro.writeMutex.Unlock() + + err := ro.write(batch) + if err != nil { + ro.buffer.Reject(batch) + return err + } + ro.buffer.Accept(batch) + + return nil +} + +func (ro *RunningOutput) write(metrics []telegraf.Metric) error { start := time.Now() err := ro.Output.Write(metrics) elapsed := time.Since(start) + ro.WriteTime.Incr(elapsed.Nanoseconds()) + if err == nil { - log.Printf("D! Output [%s] wrote batch of %d metrics in %s\n", - ro.Name, nMetrics, elapsed) - ro.MetricsWritten.Incr(int64(nMetrics)) - ro.WriteTime.Incr(elapsed.Nanoseconds()) + log.Printf("D! [outputs.%s] wrote batch of %d metrics in %s\n", + ro.Name, len(metrics), elapsed) } return err } + +func (ro *RunningOutput) LogBufferStatus() { + nBuffer := ro.buffer.Len() + log.Printf("D! [outputs.%s] buffer fullness: %d / %d metrics. ", + ro.Name, nBuffer, ro.MetricBufferLimit) +} diff --git a/internal/models/running_output_test.go b/internal/models/running_output_test.go index c55334218..fe8755395 100644 --- a/internal/models/running_output_test.go +++ b/internal/models/running_output_test.go @@ -231,56 +231,6 @@ func TestRunningOutputDefault(t *testing.T) { assert.Len(t, m.Metrics(), 10) } -// Test that running output doesn't flush until it's full when -// FlushBufferWhenFull is set. -func TestRunningOutputFlushWhenFull(t *testing.T) { - conf := &OutputConfig{ - Filter: Filter{}, - } - - m := &mockOutput{} - ro := NewRunningOutput("test", m, conf, 6, 10) - - // Fill buffer to 1 under limit - for _, metric := range first5 { - ro.AddMetric(metric) - } - // no flush yet - assert.Len(t, m.Metrics(), 0) - - // add one more metric - ro.AddMetric(next5[0]) - // now it flushed - assert.Len(t, m.Metrics(), 6) - - // add one more metric and write it manually - ro.AddMetric(next5[1]) - err := ro.Write() - assert.NoError(t, err) - assert.Len(t, m.Metrics(), 7) -} - -// Test that running output doesn't flush until it's full when -// FlushBufferWhenFull is set, twice. -func TestRunningOutputMultiFlushWhenFull(t *testing.T) { - conf := &OutputConfig{ - Filter: Filter{}, - } - - m := &mockOutput{} - ro := NewRunningOutput("test", m, conf, 4, 12) - - // Fill buffer past limit twive - for _, metric := range first5 { - ro.AddMetric(metric) - } - for _, metric := range next5 { - ro.AddMetric(metric) - } - // flushed twice - assert.Len(t, m.Metrics(), 8) -} - func TestRunningOutputWriteFail(t *testing.T) { conf := &OutputConfig{ Filter: Filter{}, diff --git a/internal/models/running_processor.go b/internal/models/running_processor.go index a210d9799..38369d03b 100644 --- a/internal/models/running_processor.go +++ b/internal/models/running_processor.go @@ -27,6 +27,19 @@ type ProcessorConfig struct { Filter Filter } +func (rp *RunningProcessor) metricFiltered(metric telegraf.Metric) { + metric.Drop() +} + +func containsMetric(item telegraf.Metric, metrics []telegraf.Metric) bool { + for _, m := range metrics { + if item == m { + return true + } + } + return false +} + func (rp *RunningProcessor) Apply(in ...telegraf.Metric) []telegraf.Metric { rp.Lock() defer rp.Unlock() @@ -43,6 +56,7 @@ func (rp *RunningProcessor) Apply(in ...telegraf.Metric) []telegraf.Metric { rp.Config.Filter.Modify(metric) if len(metric.FieldList()) == 0 { + rp.metricFiltered(metric) continue } diff --git a/internal/models/running_processor_test.go b/internal/models/running_processor_test.go index 02db40460..c24347b8e 100644 --- a/internal/models/running_processor_test.go +++ b/internal/models/running_processor_test.go @@ -6,7 +6,7 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/metric" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) @@ -41,20 +41,6 @@ func TagProcessor(key, value string) *MockProcessor { } } -func Metric( - name string, - tags map[string]string, - fields map[string]interface{}, - tm time.Time, - tp ...telegraf.ValueType, -) telegraf.Metric { - m, err := metric.New(name, tags, fields, tm, tp...) - if err != nil { - panic(err) - } - return m -} - func TestRunningProcessor_Apply(t *testing.T) { type args struct { Processor telegraf.Processor @@ -76,7 +62,7 @@ func TestRunningProcessor_Apply(t *testing.T) { }, }, input: []telegraf.Metric{ - Metric( + testutil.MustMetric( "cpu", map[string]string{}, map[string]interface{}{ @@ -86,7 +72,7 @@ func TestRunningProcessor_Apply(t *testing.T) { ), }, expected: []telegraf.Metric{ - Metric( + testutil.MustMetric( "cpu", map[string]string{ "apply": "true", @@ -109,7 +95,7 @@ func TestRunningProcessor_Apply(t *testing.T) { }, }, input: []telegraf.Metric{ - Metric( + testutil.MustMetric( "cpu", map[string]string{}, map[string]interface{}{ @@ -119,7 +105,7 @@ func TestRunningProcessor_Apply(t *testing.T) { ), }, expected: []telegraf.Metric{ - Metric( + testutil.MustMetric( "cpu", map[string]string{ "apply": "true", @@ -142,7 +128,7 @@ func TestRunningProcessor_Apply(t *testing.T) { }, }, input: []telegraf.Metric{ - Metric( + testutil.MustMetric( "cpu", map[string]string{}, map[string]interface{}{ @@ -152,7 +138,7 @@ func TestRunningProcessor_Apply(t *testing.T) { ), }, expected: []telegraf.Metric{ - Metric( + testutil.MustMetric( "cpu", map[string]string{}, map[string]interface{}{ diff --git a/metric.go b/metric.go index b8da02931..396321e6e 100644 --- a/metric.go +++ b/metric.go @@ -62,6 +62,17 @@ type Metric interface { // Copy returns a deep copy of the Metric. Copy() Metric + // Accept marks the metric as processed successfully and written to an + // output. + Accept() + + // Reject marks the metric as processed unsuccessfully. + Reject() + + // Drop marks the metric as processed successfully without being written + // to any output. + Drop() + // Mark Metric as an aggregate SetAggregate(bool) IsAggregate() bool diff --git a/metric/metric.go b/metric/metric.go index 9f1a42ccb..f2a49957e 100644 --- a/metric/metric.go +++ b/metric/metric.go @@ -248,6 +248,15 @@ func (m *metric) HashID() uint64 { return h.Sum64() } +func (m *metric) Accept() { +} + +func (m *metric) Reject() { +} + +func (m *metric) Drop() { +} + // Convert field to a supported type or nil if unconvertible func convertField(v interface{}) interface{} { switch v := v.(type) { diff --git a/metric/tracking.go b/metric/tracking.go new file mode 100644 index 000000000..83c3c7aec --- /dev/null +++ b/metric/tracking.go @@ -0,0 +1,171 @@ +package metric + +import ( + "log" + "runtime" + "sync/atomic" + + "github.com/influxdata/telegraf" +) + +// NotifyFunc is called when a tracking metric is done being processed with +// the tracking information. +type NotifyFunc = func(track telegraf.DeliveryInfo) + +// WithTracking adds tracking to the metric and registers the notify function +// to be called when processing is complete. +func WithTracking(metric telegraf.Metric, fn NotifyFunc) (telegraf.Metric, telegraf.TrackingID) { + return newTrackingMetric(metric, fn) +} + +// WithBatchTracking adds tracking to the metrics and registers the notify +// function to be called when processing is complete. +func WithGroupTracking(metric []telegraf.Metric, fn NotifyFunc) ([]telegraf.Metric, telegraf.TrackingID) { + return newTrackingMetricGroup(metric, fn) +} + +func EnableDebugFinalizer() { + finalizer = debugFinalizer +} + +var ( + lastID uint64 + finalizer func(*trackingData) +) + +func newTrackingID() telegraf.TrackingID { + atomic.AddUint64(&lastID, 1) + return telegraf.TrackingID(lastID) +} + +func debugFinalizer(d *trackingData) { + rc := atomic.LoadInt32(&d.rc) + if rc != 0 { + log.Fatalf("E! [agent] metric collected with non-zero reference count rc: %d", rc) + } +} + +type trackingData struct { + id telegraf.TrackingID + rc int32 + acceptCount int32 + rejectCount int32 + notify NotifyFunc +} + +func (d *trackingData) incr() { + atomic.AddInt32(&d.rc, 1) +} + +func (d *trackingData) decr() int32 { + return atomic.AddInt32(&d.rc, -1) +} + +func (d *trackingData) accept() { + atomic.AddInt32(&d.acceptCount, 1) +} + +func (d *trackingData) reject() { + atomic.AddInt32(&d.rejectCount, 1) +} + +type trackingMetric struct { + telegraf.Metric + d *trackingData +} + +func newTrackingMetric(metric telegraf.Metric, fn NotifyFunc) (telegraf.Metric, telegraf.TrackingID) { + m := &trackingMetric{ + Metric: metric, + d: &trackingData{ + id: newTrackingID(), + rc: 1, + acceptCount: 0, + rejectCount: 0, + notify: fn, + }, + } + + if finalizer != nil { + runtime.SetFinalizer(m.d, finalizer) + } + return m, m.d.id +} + +func newTrackingMetricGroup(group []telegraf.Metric, fn NotifyFunc) ([]telegraf.Metric, telegraf.TrackingID) { + d := &trackingData{ + id: newTrackingID(), + rc: 0, + acceptCount: 0, + rejectCount: 0, + notify: fn, + } + + for i, m := range group { + d.incr() + dm := &trackingMetric{ + Metric: m, + d: d, + } + group[i] = dm + + } + if finalizer != nil { + runtime.SetFinalizer(d, finalizer) + } + + return group, d.id +} + +func (m *trackingMetric) Copy() telegraf.Metric { + m.d.incr() + return &trackingMetric{ + Metric: m.Metric.Copy(), + d: m.d, + } +} + +func (m *trackingMetric) Accept() { + m.d.accept() + m.decr() +} + +func (m *trackingMetric) Reject() { + m.d.reject() + m.decr() +} + +func (m *trackingMetric) Drop() { + m.decr() +} + +func (m *trackingMetric) decr() { + v := m.d.decr() + if v < 0 { + panic("negative refcount") + } + + if v == 0 { + m.d.notify( + &deliveryInfo{ + id: m.d.id, + accepted: int(m.d.acceptCount), + rejected: int(m.d.rejectCount), + }, + ) + } +} + +type deliveryInfo struct { + id telegraf.TrackingID + accepted int + rejected int +} + +func (r *deliveryInfo) ID() telegraf.TrackingID { + return r.id +} + +func (r *deliveryInfo) Delivered() bool { + return r.rejected == 0 +} diff --git a/metric/tracking_test.go b/metric/tracking_test.go new file mode 100644 index 000000000..f950cfcd1 --- /dev/null +++ b/metric/tracking_test.go @@ -0,0 +1,260 @@ +package metric + +import ( + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/stretchr/testify/require" +) + +func mustMetric( + name string, + tags map[string]string, + fields map[string]interface{}, + tm time.Time, + tp ...telegraf.ValueType, +) telegraf.Metric { + m, err := New(name, tags, fields, tm, tp...) + if err != nil { + panic("mustMetric") + } + return m +} + +type deliveries struct { + Info map[telegraf.TrackingID]telegraf.DeliveryInfo +} + +func (d *deliveries) onDelivery(info telegraf.DeliveryInfo) { + d.Info[info.ID()] = info +} + +func TestTracking(t *testing.T) { + tests := []struct { + name string + metric telegraf.Metric + actions func(metric telegraf.Metric) + delivered bool + }{ + { + name: "accept", + metric: mustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42, + }, + time.Unix(0, 0), + ), + actions: func(m telegraf.Metric) { + m.Accept() + }, + delivered: true, + }, + { + name: "reject", + metric: mustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42, + }, + time.Unix(0, 0), + ), + actions: func(m telegraf.Metric) { + m.Reject() + }, + delivered: false, + }, + { + name: "accept copy", + metric: mustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42, + }, + time.Unix(0, 0), + ), + actions: func(m telegraf.Metric) { + m2 := m.Copy() + m.Accept() + m2.Accept() + }, + delivered: true, + }, + { + name: "copy with accept and done", + metric: mustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42, + }, + time.Unix(0, 0), + ), + actions: func(m telegraf.Metric) { + m2 := m.Copy() + m.Accept() + m2.Drop() + }, + delivered: true, + }, + { + name: "copy with mixed delivery", + metric: mustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42, + }, + time.Unix(0, 0), + ), + actions: func(m telegraf.Metric) { + m2 := m.Copy() + m.Accept() + m2.Reject() + }, + delivered: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + d := &deliveries{ + Info: make(map[telegraf.TrackingID]telegraf.DeliveryInfo), + } + metric, id := WithTracking(tt.metric, d.onDelivery) + tt.actions(metric) + + info := d.Info[id] + require.Equal(t, tt.delivered, info.Delivered()) + }) + } +} + +func TestGroupTracking(t *testing.T) { + tests := []struct { + name string + metrics []telegraf.Metric + actions func(metrics []telegraf.Metric) + delivered bool + }{ + { + name: "accept", + metrics: []telegraf.Metric{ + mustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42, + }, + time.Unix(0, 0), + ), + mustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42, + }, + time.Unix(0, 0), + ), + }, + actions: func(metrics []telegraf.Metric) { + metrics[0].Accept() + metrics[1].Accept() + }, + delivered: true, + }, + { + name: "reject", + metrics: []telegraf.Metric{ + mustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42, + }, + time.Unix(0, 0), + ), + mustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42, + }, + time.Unix(0, 0), + ), + }, + actions: func(metrics []telegraf.Metric) { + metrics[0].Reject() + metrics[1].Reject() + }, + delivered: false, + }, + { + name: "remove", + metrics: []telegraf.Metric{ + mustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42, + }, + time.Unix(0, 0), + ), + mustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42, + }, + time.Unix(0, 0), + ), + }, + actions: func(metrics []telegraf.Metric) { + metrics[0].Drop() + metrics[1].Drop() + }, + delivered: true, + }, + { + name: "mixed", + metrics: []telegraf.Metric{ + mustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42, + }, + time.Unix(0, 0), + ), + mustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42, + }, + time.Unix(0, 0), + ), + }, + actions: func(metrics []telegraf.Metric) { + metrics[0].Accept() + metrics[1].Reject() + }, + delivered: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + d := &deliveries{ + Info: make(map[telegraf.TrackingID]telegraf.DeliveryInfo), + } + metrics, id := WithGroupTracking(tt.metrics, d.onDelivery) + tt.actions(metrics) + + info := d.Info[id] + require.Equal(t, tt.delivered, info.Delivered()) + }) + } +} diff --git a/output.go b/output.go index 2421048f0..3c4a85ddb 100644 --- a/output.go +++ b/output.go @@ -17,16 +17,7 @@ type Output interface { // if the Output only accepts a fixed set of aggregations over a time period. // These functions may be called concurrently to the Write function. type AggregatingOutput interface { - // Connect to the Output - Connect() error - // Close any connections to the Output - Close() error - // Description returns a one-sentence description on the Output - Description() string - // SampleConfig returns the default configuration of the Output - SampleConfig() string - // Write takes in group of points to be written to the Output - Write(metrics []Metric) error + Output // Add the metric to the aggregator Add(in Metric) @@ -35,21 +26,3 @@ type AggregatingOutput interface { // Reset signals the the aggregator period is completed. Reset() } - -type ServiceOutput interface { - // Connect to the Output - Connect() error - // Close any connections to the Output - Close() error - // Description returns a one-sentence description on the Output - Description() string - // SampleConfig returns the default configuration of the Output - SampleConfig() string - // Write takes in group of points to be written to the Output - Write(metrics []Metric) error - - // Start the "service" that will provide an Output - Start() error - // Stop the "service" that will provide an Output - Stop() -} diff --git a/plugins/aggregators/basicstats/basicstats.go b/plugins/aggregators/basicstats/basicstats.go index 701cd8a85..c5c7e5d3f 100644 --- a/plugins/aggregators/basicstats/basicstats.go +++ b/plugins/aggregators/basicstats/basicstats.go @@ -133,7 +133,6 @@ func (m *BasicStats) Add(in telegraf.Metric) { } func (m *BasicStats) Push(acc telegraf.Accumulator) { - config := getConfiguredStats(m) for _, aggregate := range m.cache { diff --git a/plugins/inputs/amqp_consumer/README.md b/plugins/inputs/amqp_consumer/README.md index 133531421..ca1af800c 100644 --- a/plugins/inputs/amqp_consumer/README.md +++ b/plugins/inputs/amqp_consumer/README.md @@ -13,7 +13,6 @@ For an introduction to AMQP see: The following defaults are known to work with RabbitMQ: ```toml -# AMQP consumer plugin [[inputs.amqp_consumer]] ## Broker to consume from. ## deprecated in 1.7; use the brokers option @@ -46,16 +45,26 @@ The following defaults are known to work with RabbitMQ: ## AMQP queue name queue = "telegraf" - + ## AMQP queue durability can be "transient" or "durable". queue_durability = "durable" - + ## Binding Key binding_key = "#" ## Maximum number of messages server should give to the worker. # prefetch_count = 50 + ## Maximum messages to read from the broker that have not been written by an + ## output. For best throughput set based on the number of metrics within + ## each message and the size of the output's metric_batch_size. + ## + ## For example, if each message from the queue contains 10 metrics and the + ## output metric_batch_size is 1000, setting this to 100 will ensure that a + ## full batch is collected and the write is triggered immediately without + ## waiting until the next flush_interval. + # max_undelivered_messages = 1000 + ## Auth method. PLAIN and EXTERNAL are supported ## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as ## described here: https://www.rabbitmq.com/plugins.html diff --git a/plugins/inputs/amqp_consumer/amqp_consumer.go b/plugins/inputs/amqp_consumer/amqp_consumer.go index 33cd9971b..568ee6f38 100644 --- a/plugins/inputs/amqp_consumer/amqp_consumer.go +++ b/plugins/inputs/amqp_consumer/amqp_consumer.go @@ -1,6 +1,7 @@ package amqp_consumer import ( + "context" "errors" "fmt" "log" @@ -9,25 +10,32 @@ import ( "sync" "time" - "github.com/streadway/amqp" - "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal/tls" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/parsers" + "github.com/streadway/amqp" ) +const ( + defaultMaxUndeliveredMessages = 1000 +) + +type empty struct{} +type semaphore chan empty + // AMQPConsumer is the top level struct for this plugin type AMQPConsumer struct { - URL string `toml:"url"` // deprecated in 1.7; use brokers - Brokers []string `toml:"brokers"` - Username string `toml:"username"` - Password string `toml:"password"` - Exchange string `toml:"exchange"` - ExchangeType string `toml:"exchange_type"` - ExchangeDurability string `toml:"exchange_durability"` - ExchangePassive bool `toml:"exchange_passive"` - ExchangeArguments map[string]string `toml:"exchange_arguments"` + URL string `toml:"url"` // deprecated in 1.7; use brokers + Brokers []string `toml:"brokers"` + Username string `toml:"username"` + Password string `toml:"password"` + Exchange string `toml:"exchange"` + ExchangeType string `toml:"exchange_type"` + ExchangeDurability string `toml:"exchange_durability"` + ExchangePassive bool `toml:"exchange_passive"` + ExchangeArguments map[string]string `toml:"exchange_arguments"` + MaxUndeliveredMessages int `toml:"max_undelivered_messages"` // Queue Name Queue string `toml:"queue"` @@ -44,9 +52,12 @@ type AMQPConsumer struct { AuthMethod string tls.ClientConfig + deliveries map[telegraf.TrackingID]amqp.Delivery + parser parsers.Parser conn *amqp.Connection wg *sync.WaitGroup + cancel context.CancelFunc } type externalAuth struct{} @@ -114,6 +125,16 @@ func (a *AMQPConsumer) SampleConfig() string { ## Maximum number of messages server should give to the worker. # prefetch_count = 50 + ## Maximum messages to read from the broker that have not been written by an + ## output. For best throughput set based on the number of metrics within + ## each message and the size of the output's metric_batch_size. + ## + ## For example, if each message from the queue contains 10 metrics and the + ## output metric_batch_size is 1000, setting this to 100 will ensure that a + ## full batch is collected and the write is triggered immediately without + ## waiting until the next flush_interval. + # max_undelivered_messages = 1000 + ## Auth method. PLAIN and EXTERNAL are supported ## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as ## described here: https://www.rabbitmq.com/plugins.html @@ -185,9 +206,15 @@ func (a *AMQPConsumer) Start(acc telegraf.Accumulator) error { return err } + ctx, cancel := context.WithCancel(context.Background()) + a.cancel = cancel + a.wg = &sync.WaitGroup{} a.wg.Add(1) - go a.process(msgs, acc) + go func() { + defer a.wg.Done() + a.process(ctx, msgs, acc) + }() go func() { for { @@ -196,7 +223,7 @@ func (a *AMQPConsumer) Start(acc telegraf.Accumulator) error { break } - log.Printf("I! AMQP consumer connection closed: %s; trying to reconnect", err) + log.Printf("I! [inputs.amqp_consumer] connection closed: %s; trying to reconnect", err) for { msgs, err := a.connect(amqpConf) if err != nil { @@ -206,7 +233,10 @@ func (a *AMQPConsumer) Start(acc telegraf.Accumulator) error { } a.wg.Add(1) - go a.process(msgs, acc) + go func() { + defer a.wg.Done() + a.process(ctx, msgs, acc) + }() break } } @@ -224,14 +254,14 @@ func (a *AMQPConsumer) connect(amqpConf *amqp.Config) (<-chan amqp.Delivery, err p := rand.Perm(len(brokers)) for _, n := range p { broker := brokers[n] - log.Printf("D! [amqp_consumer] connecting to %q", broker) + log.Printf("D! [inputs.amqp_consumer] connecting to %q", broker) conn, err := amqp.DialConfig(broker, *amqpConf) if err == nil { a.conn = conn - log.Printf("D! [amqp_consumer] connected to %q", broker) + log.Printf("D! [inputs.amqp_consumer] connected to %q", broker) break } - log.Printf("D! [amqp_consumer] error connecting to %q", broker) + log.Printf("D! [inputs.amqp_consumer] error connecting to %q", broker) } if a.conn == nil { @@ -320,7 +350,6 @@ func (a *AMQPConsumer) connect(amqpConf *amqp.Config) (<-chan amqp.Delivery, err return nil, fmt.Errorf("Failed establishing connection to queue: %s", err) } - log.Println("I! Started AMQP consumer") return msgs, err } @@ -361,42 +390,101 @@ func declareExchange( } // Read messages from queue and add them to the Accumulator -func (a *AMQPConsumer) process(msgs <-chan amqp.Delivery, acc telegraf.Accumulator) { - defer a.wg.Done() - for d := range msgs { - metrics, err := a.parser.Parse(d.Body) - if err != nil { - log.Printf("E! %v: error parsing metric - %v", err, string(d.Body)) - } else { - for _, m := range metrics { - acc.AddFields(m.Name(), m.Fields(), m.Tags(), m.Time()) +func (a *AMQPConsumer) process(ctx context.Context, msgs <-chan amqp.Delivery, ac telegraf.Accumulator) { + a.deliveries = make(map[telegraf.TrackingID]amqp.Delivery) + + acc := ac.WithTracking(a.MaxUndeliveredMessages) + sem := make(semaphore, a.MaxUndeliveredMessages) + + for { + select { + case <-ctx.Done(): + return + case track := <-acc.Delivered(): + if a.onDelivery(track) { + <-sem + } + case sem <- empty{}: + select { + case <-ctx.Done(): + return + case track := <-acc.Delivered(): + if a.onDelivery(track) { + <-sem + <-sem + } + case d, ok := <-msgs: + if !ok { + return + } + err := a.onMessage(acc, d) + if err != nil { + acc.AddError(err) + <-sem + } } } - - d.Ack(false) } - log.Printf("I! AMQP consumer queue closed") +} + +func (a *AMQPConsumer) onMessage(acc telegraf.TrackingAccumulator, d amqp.Delivery) error { + metrics, err := a.parser.Parse(d.Body) + if err != nil { + return err + } + + id := acc.AddTrackingMetricGroup(metrics) + a.deliveries[id] = d + return nil +} + +func (a *AMQPConsumer) onDelivery(track telegraf.DeliveryInfo) bool { + delivery, ok := a.deliveries[track.ID()] + if !ok { + // Added by a previous connection + return false + } + + if track.Delivered() { + err := delivery.Ack(false) + if err != nil { + log.Printf("E! [inputs.amqp_consumer] Unable to ack written delivery: %d: %v", + delivery.DeliveryTag, err) + a.conn.Close() + } + } else { + err := delivery.Reject(false) + if err != nil { + log.Printf("E! [inputs.amqp_consumer] Unable to reject failed delivery: %d: %v", + delivery.DeliveryTag, err) + a.conn.Close() + } + } + + delete(a.deliveries, track.ID()) + return true } func (a *AMQPConsumer) Stop() { + a.cancel() + a.wg.Wait() err := a.conn.Close() if err != nil && err != amqp.ErrClosed { - log.Printf("E! Error closing AMQP connection: %s", err) + log.Printf("E! [inputs.amqp_consumer] Error closing AMQP connection: %s", err) return } - a.wg.Wait() - log.Println("I! Stopped AMQP service") } func init() { inputs.Add("amqp_consumer", func() telegraf.Input { return &AMQPConsumer{ - URL: DefaultBroker, - AuthMethod: DefaultAuthMethod, - ExchangeType: DefaultExchangeType, - ExchangeDurability: DefaultExchangeDurability, - QueueDurability: DefaultQueueDurability, - PrefetchCount: DefaultPrefetchCount, + URL: DefaultBroker, + AuthMethod: DefaultAuthMethod, + ExchangeType: DefaultExchangeType, + ExchangeDurability: DefaultExchangeDurability, + QueueDurability: DefaultQueueDurability, + PrefetchCount: DefaultPrefetchCount, + MaxUndeliveredMessages: defaultMaxUndeliveredMessages, } }) } diff --git a/plugins/inputs/internal/README.md b/plugins/inputs/internal/README.md index fbec4d86f..73f0b018e 100644 --- a/plugins/inputs/internal/README.md +++ b/plugins/inputs/internal/README.md @@ -18,52 +18,54 @@ plugin. memstats are taken from the Go runtime: https://golang.org/pkg/runtime/#MemStats -- internal\_memstats - - alloc\_bytes +- internal_memstats + - alloc_bytes - frees - - heap\_alloc\_bytes - - heap\_idle\_bytes - - heap\_in\_use\_bytes - - heap\_objects\_bytes - - heap\_released\_bytes - - heap\_sys\_bytes + - heap_alloc_bytes + - heap_idle_bytes + - heap_in_use_bytes + - heap_objects_bytes + - heap_released_bytes + - heap_sys_bytes - mallocs - - num\_gc - - pointer\_lookups - - sys\_bytes - - total\_alloc\_bytes + - num_gc + - pointer_lookups + - sys_bytes + - total_alloc_bytes agent stats collect aggregate stats on all telegraf plugins. -- internal\_agent - - gather\_errors - - metrics\_dropped - - metrics\_gathered - - metrics\_written +- internal_agent + - gather_errors + - metrics_dropped + - metrics_gathered + - metrics_written -internal\_gather stats collect aggregate stats on all input plugins +internal_gather stats collect aggregate stats on all input plugins that are of the same input type. They are tagged with `input=`. -- internal\_gather - - gather\_time\_ns - - metrics\_gathered +- internal_gather + - gather_time_ns + - metrics_gathered -internal\_write stats collect aggregate stats on all output plugins +internal_write stats collect aggregate stats on all output plugins that are of the same input type. They are tagged with `output=`. -- internal\_write - - buffer\_limit - - buffer\_size - - metrics\_written - - metrics\_filtered - - write\_time\_ns +- internal_write + - buffer_limit + - buffer_size + - metrics_added + - metrics_written + - metrics_dropped + - metrics_filtered + - write_time_ns -internal\_\ are metrics which are defined on a per-plugin basis, and +internal_ are metrics which are defined on a per-plugin basis, and usually contain tags which differentiate each instance of a particular type of plugin. -- internal\_\ +- internal_ - individual plugin-specific fields, such as requests counts. ### Tags: @@ -76,7 +78,7 @@ to each particular plugin. ``` internal_memstats,host=tyrion alloc_bytes=4457408i,sys_bytes=10590456i,pointer_lookups=7i,mallocs=17642i,frees=7473i,heap_sys_bytes=6848512i,heap_idle_bytes=1368064i,heap_in_use_bytes=5480448i,heap_released_bytes=0i,total_alloc_bytes=6875560i,heap_alloc_bytes=4457408i,heap_objects_bytes=10169i,num_gc=2i 1480682800000000000 internal_agent,host=tyrion metrics_written=18i,metrics_dropped=0i,metrics_gathered=19i,gather_errors=0i 1480682800000000000 -internal_write,output=file,host=tyrion buffer_limit=10000i,write_time_ns=636609i,metrics_written=18i,buffer_size=0i 1480682800000000000 +internal_write,output=file,host=tyrion buffer_limit=10000i,write_time_ns=636609i,metrics_added=18i,metrics_written=18i,buffer_size=0i 1480682800000000000 internal_gather,input=internal,host=tyrion metrics_gathered=19i,gather_time_ns=442114i 1480682800000000000 internal_gather,input=http_listener,host=tyrion metrics_gathered=0i,gather_time_ns=167285i 1480682800000000000 internal_http_listener,address=:8186,host=tyrion queries_received=0i,writes_received=0i,requests_received=0i,buffers_created=0i,requests_served=0i,pings_received=0i,bytes_received=0i,not_founds_served=0i,pings_served=0i,queries_served=0i,writes_served=0i 1480682800000000000 diff --git a/plugins/inputs/kafka_consumer/README.md b/plugins/inputs/kafka_consumer/README.md index 2bc290c6b..8922f5071 100644 --- a/plugins/inputs/kafka_consumer/README.md +++ b/plugins/inputs/kafka_consumer/README.md @@ -1,18 +1,14 @@ # Kafka Consumer Input Plugin -The [Kafka](http://kafka.apache.org/) consumer plugin polls a specified Kafka -topic and adds messages to InfluxDB. The plugin assumes messages follow the -line protocol. [Consumer Group](http://godoc.org/github.com/wvanbergen/kafka/consumergroup) -is used to talk to the Kafka cluster so multiple instances of telegraf can read -from the same topic in parallel. +The [Kafka][kafka] consumer plugin reads from Kafka +and creates metrics using one of the supported [input data formats][]. -For old kafka version (< 0.8), please use the kafka_consumer_legacy input plugin +For old kafka version (< 0.8), please use the [kafka_consumer_legacy][] input plugin and use the old zookeeper connection method. -## Configuration +### Configuration ```toml -# Read metrics from Kafka topic(s) [[inputs.kafka_consumer]] ## kafka servers brokers = ["localhost:9092"] @@ -44,18 +40,27 @@ and use the old zookeeper connection method. ## Offset (must be either "oldest" or "newest") offset = "oldest" + ## Maximum length of a message to consume, in bytes (default 0/unlimited); + ## larger messages are dropped + max_message_len = 1000000 + + ## Maximum messages to read from the broker that have not been written by an + ## output. For best throughput set based on the number of metrics within + ## each message and the size of the output's metric_batch_size. + ## + ## For example, if each message from the queue contains 10 metrics and the + ## output metric_batch_size is 1000, setting this to 100 will ensure that a + ## full batch is collected and the write is triggered immediately without + ## waiting until the next flush_interval. + # max_undelivered_messages = 1000 + ## Data format to consume. ## Each data format has its own unique set of configuration options, read ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "influx" - - ## Maximum length of a message to consume, in bytes (default 0/unlimited); - ## larger messages are dropped - max_message_len = 1000000 ``` -## Testing - -Running integration tests requires running Zookeeper & Kafka. See Makefile -for kafka container command. +[kafka]: https://kafka.apache.org +[kafka_consumer_legacy]: /plugins/inputs/kafka_consumer_legacy/README.md +[input data formats]: /docs/DATA_FORMATS_INPUT.md diff --git a/plugins/inputs/kafka_consumer/kafka_consumer.go b/plugins/inputs/kafka_consumer/kafka_consumer.go index eba9b68ac..31159def3 100644 --- a/plugins/inputs/kafka_consumer/kafka_consumer.go +++ b/plugins/inputs/kafka_consumer/kafka_consumer.go @@ -1,55 +1,54 @@ package kafka_consumer import ( + "context" "fmt" "log" "strings" "sync" + "github.com/Shopify/sarama" + cluster "github.com/bsm/sarama-cluster" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal/tls" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/parsers" - - "github.com/Shopify/sarama" - cluster "github.com/bsm/sarama-cluster" ) +const ( + defaultMaxUndeliveredMessages = 1000 +) + +type empty struct{} +type semaphore chan empty + +type Consumer interface { + Errors() <-chan error + Messages() <-chan *sarama.ConsumerMessage + MarkOffset(msg *sarama.ConsumerMessage, metadata string) + Close() error +} + type Kafka struct { - ConsumerGroup string - ClientID string `toml:"client_id"` - Topics []string - Brokers []string - MaxMessageLen int - Version string `toml:"version"` - - Cluster *cluster.Consumer - + ConsumerGroup string `toml:"consumer_group"` + ClientID string `toml:"client_id"` + Topics []string `toml:"topics"` + Brokers []string `toml:"brokers"` + MaxMessageLen int `toml:"max_message_len"` + Version string `toml:"version"` + MaxUndeliveredMessages int `toml:"max_undelivered_messages"` + Offset string `toml:"offset"` + SASLUsername string `toml:"sasl_username"` + SASLPassword string `toml:"sasl_password"` tls.ClientConfig - // SASL Username - SASLUsername string `toml:"sasl_username"` - // SASL Password - SASLPassword string `toml:"sasl_password"` + cluster Consumer + parser parsers.Parser + wg *sync.WaitGroup + cancel context.CancelFunc - // Legacy metric buffer support - MetricBuffer int - // TODO remove PointBuffer, legacy support - PointBuffer int - - Offset string - parser parsers.Parser - - sync.Mutex - - // channel for all incoming kafka messages - in <-chan *sarama.ConsumerMessage - // channel for all kafka consumer errors - errs <-chan error - done chan struct{} - - // keep the accumulator internally: - acc telegraf.Accumulator + // Unconfirmed messages + messages map[telegraf.TrackingID]*sarama.ConsumerMessage // doNotCommitMsgs tells the parser not to call CommitUpTo on the consumer // this is mostly for test purposes, but there may be a use-case for it later. @@ -86,16 +85,25 @@ var sampleConfig = ` consumer_group = "telegraf_metrics_consumers" ## Offset (must be either "oldest" or "newest") offset = "oldest" + ## Maximum length of a message to consume, in bytes (default 0/unlimited); + ## larger messages are dropped + max_message_len = 1000000 + + ## Maximum messages to read from the broker that have not been written by an + ## output. For best throughput set based on the number of metrics within + ## each message and the size of the output's metric_batch_size. + ## + ## For example, if each message from the queue contains 10 metrics and the + ## output metric_batch_size is 1000, setting this to 100 will ensure that a + ## full batch is collected and the write is triggered immediately without + ## waiting until the next flush_interval. + # max_undelivered_messages = 1000 ## Data format to consume. ## Each data format has its own unique set of configuration options, read ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "influx" - - ## Maximum length of a message to consume, in bytes (default 0/unlimited); - ## larger messages are dropped - max_message_len = 1000000 ` func (k *Kafka) SampleConfig() string { @@ -111,12 +119,8 @@ func (k *Kafka) SetParser(parser parsers.Parser) { } func (k *Kafka) Start(acc telegraf.Accumulator) error { - k.Lock() - defer k.Unlock() var clusterErr error - k.acc = acc - config := cluster.NewConfig() if k.Version != "" { @@ -159,13 +163,13 @@ func (k *Kafka) Start(acc telegraf.Accumulator) error { case "newest": config.Consumer.Offsets.Initial = sarama.OffsetNewest default: - log.Printf("I! WARNING: Kafka consumer invalid offset '%s', using 'oldest'\n", + log.Printf("I! WARNING: Kafka consumer invalid offset '%s', using 'oldest'", k.Offset) config.Consumer.Offsets.Initial = sarama.OffsetOldest } - if k.Cluster == nil { - k.Cluster, clusterErr = cluster.NewConsumer( + if k.cluster == nil { + k.cluster, clusterErr = cluster.NewConsumer( k.Brokers, k.ConsumerGroup, k.Topics, @@ -173,67 +177,110 @@ func (k *Kafka) Start(acc telegraf.Accumulator) error { ) if clusterErr != nil { - log.Printf("E! Error when creating Kafka Consumer, brokers: %v, topics: %v\n", + log.Printf("E! Error when creating Kafka Consumer, brokers: %v, topics: %v", k.Brokers, k.Topics) return clusterErr } - - // Setup message and error channels - k.in = k.Cluster.Messages() - k.errs = k.Cluster.Errors() } - k.done = make(chan struct{}) - // Start the kafka message reader - go k.receiver() - log.Printf("I! Started the kafka consumer service, brokers: %v, topics: %v\n", + ctx, cancel := context.WithCancel(context.Background()) + k.cancel = cancel + + // Start consumer goroutine + k.wg = &sync.WaitGroup{} + k.wg.Add(1) + go func() { + defer k.wg.Done() + k.receiver(ctx, acc) + }() + + log.Printf("I! Started the kafka consumer service, brokers: %v, topics: %v", k.Brokers, k.Topics) return nil } // receiver() reads all incoming messages from the consumer, and parses them into // influxdb metric points. -func (k *Kafka) receiver() { +func (k *Kafka) receiver(ctx context.Context, ac telegraf.Accumulator) { + k.messages = make(map[telegraf.TrackingID]*sarama.ConsumerMessage) + + acc := ac.WithTracking(k.MaxUndeliveredMessages) + sem := make(semaphore, k.MaxUndeliveredMessages) + for { select { - case <-k.done: + case <-ctx.Done(): return - case err := <-k.errs: - if err != nil { - k.acc.AddError(fmt.Errorf("Consumer Error: %s\n", err)) - } - case msg := <-k.in: - if k.MaxMessageLen != 0 && len(msg.Value) > k.MaxMessageLen { - k.acc.AddError(fmt.Errorf("Message longer than max_message_len (%d > %d)", - len(msg.Value), k.MaxMessageLen)) - } else { - metrics, err := k.parser.Parse(msg.Value) + case track := <-acc.Delivered(): + <-sem + k.onDelivery(track) + case err := <-k.cluster.Errors(): + acc.AddError(err) + case sem <- empty{}: + select { + case <-ctx.Done(): + return + case track := <-acc.Delivered(): + // Once for the delivered message, once to leave the case + <-sem + <-sem + k.onDelivery(track) + case err := <-k.cluster.Errors(): + <-sem + acc.AddError(err) + case msg := <-k.cluster.Messages(): + err := k.onMessage(acc, msg) if err != nil { - k.acc.AddError(fmt.Errorf("Message Parse Error\nmessage: %s\nerror: %s", - string(msg.Value), err.Error())) + acc.AddError(err) + <-sem } - for _, metric := range metrics { - k.acc.AddFields(metric.Name(), metric.Fields(), metric.Tags(), metric.Time()) - } - } - - if !k.doNotCommitMsgs { - // TODO(cam) this locking can be removed if this PR gets merged: - // https://github.com/wvanbergen/kafka/pull/84 - k.Lock() - k.Cluster.MarkOffset(msg, "") - k.Unlock() } } } } +func (k *Kafka) markOffset(msg *sarama.ConsumerMessage) { + if !k.doNotCommitMsgs { + k.cluster.MarkOffset(msg, "") + } +} + +func (k *Kafka) onMessage(acc telegraf.TrackingAccumulator, msg *sarama.ConsumerMessage) error { + if k.MaxMessageLen != 0 && len(msg.Value) > k.MaxMessageLen { + k.markOffset(msg) + return fmt.Errorf("Message longer than max_message_len (%d > %d)", + len(msg.Value), k.MaxMessageLen) + } + + metrics, err := k.parser.Parse(msg.Value) + if err != nil { + return err + } + + id := acc.AddTrackingMetricGroup(metrics) + k.messages[id] = msg + + return nil +} + +func (k *Kafka) onDelivery(track telegraf.DeliveryInfo) { + msg, ok := k.messages[track.ID()] + if !ok { + log.Printf("E! [inputs.kafka_consumer] Could not mark message delivered: %d", track.ID()) + } + + if track.Delivered() { + k.markOffset(msg) + } + delete(k.messages, track.ID()) +} + func (k *Kafka) Stop() { - k.Lock() - defer k.Unlock() - close(k.done) - if err := k.Cluster.Close(); err != nil { - k.acc.AddError(fmt.Errorf("Error closing consumer: %s\n", err.Error())) + k.cancel() + k.wg.Wait() + + if err := k.cluster.Close(); err != nil { + log.Printf("E! [inputs.kafka_consumer] Error closing consumer: %v", err) } } @@ -243,6 +290,8 @@ func (k *Kafka) Gather(acc telegraf.Accumulator) error { func init() { inputs.Add("kafka_consumer", func() telegraf.Input { - return &Kafka{} + return &Kafka{ + MaxUndeliveredMessages: defaultMaxUndeliveredMessages, + } }) } diff --git a/plugins/inputs/kafka_consumer/kafka_consumer_integration_test.go b/plugins/inputs/kafka_consumer/kafka_consumer_integration_test.go index a145a938a..23f9e0f92 100644 --- a/plugins/inputs/kafka_consumer/kafka_consumer_integration_test.go +++ b/plugins/inputs/kafka_consumer/kafka_consumer_integration_test.go @@ -38,7 +38,6 @@ func TestReadsMetricsFromKafka(t *testing.T) { ConsumerGroup: "telegraf_test_consumers", Topics: []string{testTopic}, Brokers: brokerPeers, - PointBuffer: 100000, Offset: "oldest", } p, _ := parsers.NewInfluxParser() diff --git a/plugins/inputs/kafka_consumer/kafka_consumer_test.go b/plugins/inputs/kafka_consumer/kafka_consumer_test.go index 18f7f80be..5bb7740a5 100644 --- a/plugins/inputs/kafka_consumer/kafka_consumer_test.go +++ b/plugins/inputs/kafka_consumer/kafka_consumer_test.go @@ -1,13 +1,14 @@ package kafka_consumer import ( + "context" "strings" "testing" + "github.com/Shopify/sarama" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/testutil" - - "github.com/Shopify/sarama" "github.com/stretchr/testify/assert" ) @@ -18,31 +19,57 @@ const ( invalidMsg = "cpu_load_short,host=server01 1422568543702900257\n" ) -func newTestKafka() (*Kafka, chan *sarama.ConsumerMessage) { - in := make(chan *sarama.ConsumerMessage, 1000) - k := Kafka{ - ConsumerGroup: "test", - Topics: []string{"telegraf"}, - Brokers: []string{"localhost:9092"}, - Offset: "oldest", - in: in, - doNotCommitMsgs: true, - errs: make(chan error, 1000), - done: make(chan struct{}), +type TestConsumer struct { + errors chan error + messages chan *sarama.ConsumerMessage +} + +func (c *TestConsumer) Errors() <-chan error { + return c.errors +} + +func (c *TestConsumer) Messages() <-chan *sarama.ConsumerMessage { + return c.messages +} + +func (c *TestConsumer) MarkOffset(msg *sarama.ConsumerMessage, metadata string) { +} + +func (c *TestConsumer) Close() error { + return nil +} + +func (c *TestConsumer) Inject(msg *sarama.ConsumerMessage) { + c.messages <- msg +} + +func newTestKafka() (*Kafka, *TestConsumer) { + consumer := &TestConsumer{ + errors: make(chan error), + messages: make(chan *sarama.ConsumerMessage, 1000), } - return &k, in + k := Kafka{ + cluster: consumer, + ConsumerGroup: "test", + Topics: []string{"telegraf"}, + Brokers: []string{"localhost:9092"}, + Offset: "oldest", + MaxUndeliveredMessages: defaultMaxUndeliveredMessages, + doNotCommitMsgs: true, + messages: make(map[telegraf.TrackingID]*sarama.ConsumerMessage), + } + return &k, consumer } // Test that the parser parses kafka messages into points func TestRunParser(t *testing.T) { - k, in := newTestKafka() + k, consumer := newTestKafka() acc := testutil.Accumulator{} - k.acc = &acc - defer close(k.done) + ctx := context.Background() k.parser, _ = parsers.NewInfluxParser() - go k.receiver() - in <- saramaMsg(testMsg) + go k.receiver(ctx, &acc) + consumer.Inject(saramaMsg(testMsg)) acc.Wait(1) assert.Equal(t, acc.NFields(), 1) @@ -50,14 +77,13 @@ func TestRunParser(t *testing.T) { // Test that the parser ignores invalid messages func TestRunParserInvalidMsg(t *testing.T) { - k, in := newTestKafka() + k, consumer := newTestKafka() acc := testutil.Accumulator{} - k.acc = &acc - defer close(k.done) + ctx := context.Background() k.parser, _ = parsers.NewInfluxParser() - go k.receiver() - in <- saramaMsg(invalidMsg) + go k.receiver(ctx, &acc) + consumer.Inject(saramaMsg(invalidMsg)) acc.WaitError(1) assert.Equal(t, acc.NFields(), 0) @@ -66,15 +92,14 @@ func TestRunParserInvalidMsg(t *testing.T) { // Test that overlong messages are dropped func TestDropOverlongMsg(t *testing.T) { const maxMessageLen = 64 * 1024 - k, in := newTestKafka() + k, consumer := newTestKafka() k.MaxMessageLen = maxMessageLen acc := testutil.Accumulator{} - k.acc = &acc - defer close(k.done) + ctx := context.Background() overlongMsg := strings.Repeat("v", maxMessageLen+1) - go k.receiver() - in <- saramaMsg(overlongMsg) + go k.receiver(ctx, &acc) + consumer.Inject(saramaMsg(overlongMsg)) acc.WaitError(1) assert.Equal(t, acc.NFields(), 0) @@ -82,14 +107,13 @@ func TestDropOverlongMsg(t *testing.T) { // Test that the parser parses kafka messages into points func TestRunParserAndGather(t *testing.T) { - k, in := newTestKafka() + k, consumer := newTestKafka() acc := testutil.Accumulator{} - k.acc = &acc - defer close(k.done) + ctx := context.Background() k.parser, _ = parsers.NewInfluxParser() - go k.receiver() - in <- saramaMsg(testMsg) + go k.receiver(ctx, &acc) + consumer.Inject(saramaMsg(testMsg)) acc.Wait(1) acc.GatherError(k.Gather) @@ -101,14 +125,13 @@ func TestRunParserAndGather(t *testing.T) { // Test that the parser parses kafka messages into points func TestRunParserAndGatherGraphite(t *testing.T) { - k, in := newTestKafka() + k, consumer := newTestKafka() acc := testutil.Accumulator{} - k.acc = &acc - defer close(k.done) + ctx := context.Background() k.parser, _ = parsers.NewGraphiteParser("_", []string{}, nil) - go k.receiver() - in <- saramaMsg(testMsgGraphite) + go k.receiver(ctx, &acc) + consumer.Inject(saramaMsg(testMsgGraphite)) acc.Wait(1) acc.GatherError(k.Gather) @@ -120,17 +143,16 @@ func TestRunParserAndGatherGraphite(t *testing.T) { // Test that the parser parses kafka messages into points func TestRunParserAndGatherJSON(t *testing.T) { - k, in := newTestKafka() + k, consumer := newTestKafka() acc := testutil.Accumulator{} - k.acc = &acc - defer close(k.done) + ctx := context.Background() k.parser, _ = parsers.NewParser(&parsers.Config{ DataFormat: "json", MetricName: "kafka_json_test", }) - go k.receiver() - in <- saramaMsg(testMsgJSON) + go k.receiver(ctx, &acc) + consumer.Inject(saramaMsg(testMsgJSON)) acc.Wait(1) acc.GatherError(k.Gather) diff --git a/plugins/inputs/mqtt_consumer/README.md b/plugins/inputs/mqtt_consumer/README.md index 0ec668c40..da3ce43f5 100644 --- a/plugins/inputs/mqtt_consumer/README.md +++ b/plugins/inputs/mqtt_consumer/README.md @@ -1,14 +1,11 @@ # MQTT Consumer Input Plugin -The [MQTT](http://mqtt.org/) consumer plugin reads from -specified MQTT topics and adds messages to InfluxDB. -The plugin expects messages in the -[Telegraf Input Data Formats](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md). +The [MQTT][mqtt] consumer plugin reads from the specified MQTT topics +and creates metrics using one of the supported [input data formats][]. ### Configuration: ```toml -# Read metrics from MQTT topic(s) [[inputs.mqtt_consumer]] ## MQTT broker URLs to be used. The format should be scheme://host:port, ## schema can be tcp, ssl, or ws. @@ -26,6 +23,16 @@ The plugin expects messages in the ## Connection timeout for initial connection in seconds connection_timeout = "30s" + ## Maximum messages to read from the broker that have not been written by an + ## output. For best throughput set based on the number of metrics within + ## each message and the size of the output's metric_batch_size. + ## + ## For example, if each message from the queue contains 10 metrics and the + ## output metric_batch_size is 1000, setting this to 100 will ensure that a + ## full batch is collected and the write is triggered immediately without + ## waiting until the next flush_interval. + # max_undelivered_messages = 1000 + ## Topics to subscribe to topics = [ "telegraf/host01/cpu", @@ -62,3 +69,6 @@ The plugin expects messages in the - All measurements are tagged with the incoming topic, ie `topic=telegraf/host01/cpu` + +[mqtt]: https://mqtt.org +[input data formats]: /docs/DATA_FORMATS_INPUT.md diff --git a/plugins/inputs/mqtt_consumer/mqtt_consumer.go b/plugins/inputs/mqtt_consumer/mqtt_consumer.go index 6d1e2cf58..03c3696f0 100644 --- a/plugins/inputs/mqtt_consumer/mqtt_consumer.go +++ b/plugins/inputs/mqtt_consumer/mqtt_consumer.go @@ -1,25 +1,31 @@ package mqtt_consumer import ( + "context" "errors" "fmt" "log" "strings" "time" + "github.com/eclipse/paho.mqtt.golang" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/internal/tls" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/parsers" - - "github.com/eclipse/paho.mqtt.golang" ) -// 30 Seconds is the default used by paho.mqtt.golang -var defaultConnectionTimeout = internal.Duration{Duration: 30 * time.Second} +var ( + // 30 Seconds is the default used by paho.mqtt.golang + defaultConnectionTimeout = internal.Duration{Duration: 30 * time.Second} + + defaultMaxUndeliveredMessages = 1000 +) type ConnectionState int +type empty struct{} +type semaphore chan empty const ( Disconnected ConnectionState = iota @@ -28,12 +34,13 @@ const ( ) type MQTTConsumer struct { - Servers []string - Topics []string - Username string - Password string - QoS int `toml:"qos"` - ConnectionTimeout internal.Duration `toml:"connection_timeout"` + Servers []string + Topics []string + Username string + Password string + QoS int `toml:"qos"` + ConnectionTimeout internal.Duration `toml:"connection_timeout"` + MaxUndeliveredMessages int `toml:"max_undelivered_messages"` parser parsers.Parser @@ -45,9 +52,14 @@ type MQTTConsumer struct { tls.ClientConfig client mqtt.Client - acc telegraf.Accumulator + acc telegraf.TrackingAccumulator state ConnectionState subscribed bool + sem semaphore + messages map[telegraf.TrackingID]bool + + ctx context.Context + cancel context.CancelFunc } var sampleConfig = ` @@ -67,6 +79,16 @@ var sampleConfig = ` ## Connection timeout for initial connection in seconds connection_timeout = "30s" + ## Maximum messages to read from the broker that have not been written by an + ## output. For best throughput set based on the number of metrics within + ## each message and the size of the output's metric_batch_size. + ## + ## For example, if each message from the queue contains 10 metrics and the + ## output metric_batch_size is 1000, setting this to 100 will ensure that a + ## full batch is collected and the write is triggered immediately without + ## waiting until the next flush_interval. + # max_undelivered_messages = 1000 + ## Topics to subscribe to topics = [ "telegraf/host01/cpu", @@ -118,7 +140,6 @@ func (m *MQTTConsumer) Start(acc telegraf.Accumulator) error { return errors.New("persistent_session requires client_id") } - m.acc = acc if m.QoS > 2 || m.QoS < 0 { return fmt.Errorf("qos value must be 0, 1, or 2: %d", m.QoS) } @@ -127,6 +148,9 @@ func (m *MQTTConsumer) Start(acc telegraf.Accumulator) error { return fmt.Errorf("connection_timeout must be greater than 1s: %s", m.ConnectionTimeout.Duration) } + m.acc = acc.WithTracking(m.MaxUndeliveredMessages) + m.ctx, m.cancel = context.WithCancel(context.Background()) + opts, err := m.createOpts() if err != nil { return err @@ -146,8 +170,10 @@ func (m *MQTTConsumer) connect() error { return err } - log.Printf("I! [inputs.mqtt_consumer]: connected %v", m.Servers) + log.Printf("I! [inputs.mqtt_consumer] Connected %v", m.Servers) m.state = Connected + m.sem = make(semaphore, m.MaxUndeliveredMessages) + m.messages = make(map[telegraf.TrackingID]bool) // Only subscribe on first connection when using persistent sessions. On // subsequent connections the subscriptions should be stored in the @@ -172,38 +198,64 @@ func (m *MQTTConsumer) connect() error { func (m *MQTTConsumer) onConnectionLost(c mqtt.Client, err error) { m.acc.AddError(fmt.Errorf("connection lost: %v", err)) - log.Printf("D! [inputs.mqtt_consumer]: disconnected %v", m.Servers) + log.Printf("D! [inputs.mqtt_consumer] Disconnected %v", m.Servers) m.state = Disconnected return } func (m *MQTTConsumer) recvMessage(c mqtt.Client, msg mqtt.Message) { - topic := msg.Topic() + for { + select { + case track := <-m.acc.Delivered(): + _, ok := m.messages[track.ID()] + if !ok { + // Added by a previous connection + continue + } + <-m.sem + // No ack, MQTT does not support durable handling + delete(m.messages, track.ID()) + case m.sem <- empty{}: + err := m.onMessage(m.acc, msg) + if err != nil { + m.acc.AddError(err) + <-m.sem + } + return + } + } +} + +func (m *MQTTConsumer) onMessage(acc telegraf.TrackingAccumulator, msg mqtt.Message) error { metrics, err := m.parser.Parse(msg.Payload()) if err != nil { - m.acc.AddError(err) + return err } + topic := msg.Topic() for _, metric := range metrics { - tags := metric.Tags() - tags["topic"] = topic - m.acc.AddFields(metric.Name(), metric.Fields(), tags, metric.Time()) + metric.AddTag("topic", topic) } + + id := acc.AddTrackingMetricGroup(metrics) + m.messages[id] = true + return nil } func (m *MQTTConsumer) Stop() { if m.state == Connected { - log.Printf("D! [inputs.mqtt_consumer]: disconnecting %v", m.Servers) + log.Printf("D! [inputs.mqtt_consumer] Disconnecting %v", m.Servers) m.client.Disconnect(200) - log.Printf("D! [inputs.mqtt_consumer]: disconnected %v", m.Servers) + log.Printf("D! [inputs.mqtt_consumer] Disconnected %v", m.Servers) m.state = Disconnected } + m.cancel() } func (m *MQTTConsumer) Gather(acc telegraf.Accumulator) error { if m.state == Disconnected { m.state = Connecting - log.Printf("D! [inputs.mqtt_consumer]: connecting %v", m.Servers) + log.Printf("D! [inputs.mqtt_consumer] Connecting %v", m.Servers) m.connect() } @@ -246,7 +298,7 @@ func (m *MQTTConsumer) createOpts() (*mqtt.ClientOptions, error) { for _, server := range m.Servers { // Preserve support for host:port style servers; deprecated in Telegraf 1.4.4 if !strings.Contains(server, "://") { - log.Printf("W! [inputs.mqtt_consumer] server %q should be updated to use `scheme://host:port` format", server) + log.Printf("W! [inputs.mqtt_consumer] Server %q should be updated to use `scheme://host:port` format", server) if tlsCfg == nil { server = "tcp://" + server } else { @@ -267,8 +319,9 @@ func (m *MQTTConsumer) createOpts() (*mqtt.ClientOptions, error) { func init() { inputs.Add("mqtt_consumer", func() telegraf.Input { return &MQTTConsumer{ - ConnectionTimeout: defaultConnectionTimeout, - state: Disconnected, + ConnectionTimeout: defaultConnectionTimeout, + MaxUndeliveredMessages: defaultMaxUndeliveredMessages, + state: Disconnected, } }) } diff --git a/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go b/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go index c04bd18a7..4209963bb 100644 --- a/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go +++ b/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go @@ -3,12 +3,9 @@ package mqtt_consumer import ( "testing" - "github.com/influxdata/telegraf/plugins/parsers" - "github.com/influxdata/telegraf/testutil" - - "github.com/stretchr/testify/assert" - "github.com/eclipse/paho.mqtt.golang" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/assert" ) const ( @@ -71,47 +68,6 @@ func TestPersistentClientIDFail(t *testing.T) { assert.Error(t, err) } -func TestRunParser(t *testing.T) { - n := newTestMQTTConsumer() - acc := testutil.Accumulator{} - n.acc = &acc - n.parser, _ = parsers.NewInfluxParser() - - n.recvMessage(nil, mqttMsg(testMsg)) - - if a := acc.NFields(); a != 1 { - t.Errorf("got %v, expected %v", a, 1) - } -} - -// Test that the parser ignores invalid messages -func TestRunParserInvalidMsg(t *testing.T) { - n := newTestMQTTConsumer() - acc := testutil.Accumulator{} - n.acc = &acc - n.parser, _ = parsers.NewInfluxParser() - - n.recvMessage(nil, mqttMsg(invalidMsg)) - - if a := acc.NFields(); a != 0 { - t.Errorf("got %v, expected %v", a, 0) - } - assert.Len(t, acc.Errors, 1) -} - -// Test that the parser parses line format messages into metrics -func TestRunParserAndGather(t *testing.T) { - n := newTestMQTTConsumer() - acc := testutil.Accumulator{} - n.acc = &acc - n.parser, _ = parsers.NewInfluxParser() - - n.recvMessage(nil, mqttMsg(testMsg)) - - acc.AssertContainsFields(t, "cpu_load_short", - map[string]interface{}{"value": float64(23422)}) -} - func mqttMsg(val string) mqtt.Message { return &message{ topic: "telegraf/unit_test", diff --git a/plugins/inputs/nats_consumer/README.md b/plugins/inputs/nats_consumer/README.md index 18dd57f07..8a89d90c5 100644 --- a/plugins/inputs/nats_consumer/README.md +++ b/plugins/inputs/nats_consumer/README.md @@ -1,16 +1,14 @@ # NATS Consumer Input Plugin -The [NATS](http://www.nats.io/about/) consumer plugin reads from -specified NATS subjects and adds messages to InfluxDB. The plugin expects messages -in the [Telegraf Input Data Formats](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md). -A [Queue Group](http://www.nats.io/documentation/concepts/nats-queueing/) -is used when subscribing to subjects so multiple instances of telegraf can read -from a NATS cluster in parallel. +The [NATS][nats] consumer plugin reads from the specified NATS subjects and +creates metrics using one of the supported [input data formats][]. -## Configuration +A [Queue Group][queue group] is used when subscribing to subjects so multiple +instances of telegraf can read from a NATS cluster in parallel. + +### Configuration: ```toml -# Read metrics from NATS subject(s) [[inputs.nats_consumer]] ## urls of NATS servers servers = ["nats://localhost:4222"] @@ -20,13 +18,29 @@ from a NATS cluster in parallel. subjects = ["telegraf"] ## name a queue group queue_group = "telegraf_consumers" - ## Maximum number of metrics to buffer between collection intervals - metric_buffer = 100000 - ## Data format to consume. + ## Sets the limits for pending msgs and bytes for each subscription + ## These shouldn't need to be adjusted except in very high throughput scenarios + # pending_message_limit = 65536 + # pending_bytes_limit = 67108864 + ## Maximum messages to read from the broker that have not been written by an + ## output. For best throughput set based on the number of metrics within + ## each message and the size of the output's metric_batch_size. + ## + ## For example, if each message from the queue contains 10 metrics and the + ## output metric_batch_size is 1000, setting this to 100 will ensure that a + ## full batch is collected and the write is triggered immediately without + ## waiting until the next flush_interval. + # max_undelivered_messages = 1000 + + ## Data format to consume. ## Each data format has its own unique set of configuration options, read ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "influx" ``` + +[nats]: https://www.nats.io/about/ +[input data formats]: /docs/DATA_FORMATS_INPUT.md +[queue group]: https://www.nats.io/documentation/concepts/nats-queueing/ diff --git a/plugins/inputs/nats_consumer/nats_consumer.go b/plugins/inputs/nats_consumer/nats_consumer.go index dac80476d..4411d8c3e 100644 --- a/plugins/inputs/nats_consumer/nats_consumer.go +++ b/plugins/inputs/nats_consumer/nats_consumer.go @@ -1,6 +1,7 @@ package natsconsumer import ( + "context" "fmt" "log" "sync" @@ -11,6 +12,13 @@ import ( nats "github.com/nats-io/go-nats" ) +var ( + defaultMaxUndeliveredMessages = 1000 +) + +type empty struct{} +type semaphore chan empty + type natsError struct { conn *nats.Conn sub *nats.Subscription @@ -23,48 +31,58 @@ func (e natsError) Error() string { } type natsConsumer struct { - QueueGroup string - Subjects []string - Servers []string - Secure bool + QueueGroup string `toml:"queue_group"` + Subjects []string `toml:"subjects"` + Servers []string `toml:"servers"` + Secure bool `toml:"secure"` // Client pending limits: - PendingMessageLimit int - PendingBytesLimit int + PendingMessageLimit int `toml:"pending_message_limit"` + PendingBytesLimit int `toml:"pending_bytes_limit"` + + MaxUndeliveredMessages int `toml:"max_undelivered_messages"` // Legacy metric buffer support; deprecated in v0.10.3 MetricBuffer int + conn *nats.Conn + subs []*nats.Subscription + parser parsers.Parser - - sync.Mutex - wg sync.WaitGroup - Conn *nats.Conn - Subs []*nats.Subscription - // channel for all incoming NATS messages in chan *nats.Msg // channel for all NATS read errors - errs chan error - done chan struct{} - acc telegraf.Accumulator + errs chan error + acc telegraf.TrackingAccumulator + wg sync.WaitGroup + cancel context.CancelFunc } var sampleConfig = ` ## urls of NATS servers - # servers = ["nats://localhost:4222"] + servers = ["nats://localhost:4222"] ## Use Transport Layer Security - # secure = false + secure = false ## subject(s) to consume - # subjects = ["telegraf"] + subjects = ["telegraf"] ## name a queue group - # queue_group = "telegraf_consumers" + queue_group = "telegraf_consumers" ## Sets the limits for pending msgs and bytes for each subscription ## These shouldn't need to be adjusted except in very high throughput scenarios # pending_message_limit = 65536 # pending_bytes_limit = 67108864 + ## Maximum messages to read from the broker that have not been written by an + ## output. For best throughput set based on the number of metrics within + ## each message and the size of the output's metric_batch_size. + ## + ## For example, if each message from the queue contains 10 metrics and the + ## output metric_batch_size is 1000, setting this to 100 will ensure that a + ## full batch is collected and the write is triggered immediately without + ## waiting until the next flush_interval. + # max_undelivered_messages = 1000 + ## Data format to consume. ## Each data format has its own unique set of configuration options, read ## more about them here: @@ -94,10 +112,7 @@ func (n *natsConsumer) natsErrHandler(c *nats.Conn, s *nats.Subscription, e erro // Start the nats consumer. Caller must call *natsConsumer.Stop() to clean up. func (n *natsConsumer) Start(acc telegraf.Accumulator) error { - n.Lock() - defer n.Unlock() - - n.acc = acc + n.acc = acc.WithTracking(n.MaxUndeliveredMessages) var connectErr error @@ -112,89 +127,106 @@ func (n *natsConsumer) Start(acc telegraf.Accumulator) error { opts.Secure = n.Secure - if n.Conn == nil || n.Conn.IsClosed() { - n.Conn, connectErr = opts.Connect() + if n.conn == nil || n.conn.IsClosed() { + n.conn, connectErr = opts.Connect() if connectErr != nil { return connectErr } // Setup message and error channels n.errs = make(chan error) - n.Conn.SetErrorHandler(n.natsErrHandler) + n.conn.SetErrorHandler(n.natsErrHandler) n.in = make(chan *nats.Msg, 1000) for _, subj := range n.Subjects { - sub, err := n.Conn.QueueSubscribe(subj, n.QueueGroup, func(m *nats.Msg) { + sub, err := n.conn.QueueSubscribe(subj, n.QueueGroup, func(m *nats.Msg) { n.in <- m }) if err != nil { return err } // ensure that the subscription has been processed by the server - if err = n.Conn.Flush(); err != nil { + if err = n.conn.Flush(); err != nil { return err } // set the subscription pending limits if err = sub.SetPendingLimits(n.PendingMessageLimit, n.PendingBytesLimit); err != nil { return err } - n.Subs = append(n.Subs, sub) + n.subs = append(n.subs, sub) } } - n.done = make(chan struct{}) + ctx, cancel := context.WithCancel(context.Background()) + n.cancel = cancel // Start the message reader n.wg.Add(1) - go n.receiver() + go func() { + defer n.wg.Done() + go n.receiver(ctx) + }() + log.Printf("I! Started the NATS consumer service, nats: %v, subjects: %v, queue: %v\n", - n.Conn.ConnectedUrl(), n.Subjects, n.QueueGroup) + n.conn.ConnectedUrl(), n.Subjects, n.QueueGroup) return nil } // receiver() reads all incoming messages from NATS, and parses them into // telegraf metrics. -func (n *natsConsumer) receiver() { - defer n.wg.Done() +func (n *natsConsumer) receiver(ctx context.Context) { + sem := make(semaphore, n.MaxUndeliveredMessages) + for { select { - case <-n.done: + case <-ctx.Done(): return + case <-n.acc.Delivered(): + <-sem case err := <-n.errs: - n.acc.AddError(fmt.Errorf("E! error reading from %s\n", err.Error())) - case msg := <-n.in: - metrics, err := n.parser.Parse(msg.Data) - if err != nil { - n.acc.AddError(fmt.Errorf("E! subject: %s, error: %s", msg.Subject, err.Error())) - } + n.acc.AddError(err) + case sem <- empty{}: + select { + case <-ctx.Done(): + return + case err := <-n.errs: + <-sem + n.acc.AddError(err) + case <-n.acc.Delivered(): + <-sem + <-sem + case msg := <-n.in: + metrics, err := n.parser.Parse(msg.Data) + if err != nil { + n.acc.AddError(fmt.Errorf("subject: %s, error: %s", msg.Subject, err.Error())) + <-sem + continue + } - for _, metric := range metrics { - n.acc.AddFields(metric.Name(), metric.Fields(), metric.Tags(), metric.Time()) + n.acc.AddTrackingMetricGroup(metrics) } } } } func (n *natsConsumer) clean() { - for _, sub := range n.Subs { + for _, sub := range n.subs { if err := sub.Unsubscribe(); err != nil { - n.acc.AddError(fmt.Errorf("E! Error unsubscribing from subject %s in queue %s: %s\n", + n.acc.AddError(fmt.Errorf("Error unsubscribing from subject %s in queue %s: %s\n", sub.Subject, sub.Queue, err.Error())) } } - if n.Conn != nil && !n.Conn.IsClosed() { - n.Conn.Close() + if n.conn != nil && !n.conn.IsClosed() { + n.conn.Close() } } func (n *natsConsumer) Stop() { - n.Lock() - close(n.done) + n.cancel() n.wg.Wait() n.clean() - n.Unlock() } func (n *natsConsumer) Gather(acc telegraf.Accumulator) error { @@ -204,12 +236,13 @@ func (n *natsConsumer) Gather(acc telegraf.Accumulator) error { func init() { inputs.Add("nats_consumer", func() telegraf.Input { return &natsConsumer{ - Servers: []string{"nats://localhost:4222"}, - Secure: false, - Subjects: []string{"telegraf"}, - QueueGroup: "telegraf_consumers", - PendingBytesLimit: nats.DefaultSubPendingBytesLimit, - PendingMessageLimit: nats.DefaultSubPendingMsgsLimit, + Servers: []string{"nats://localhost:4222"}, + Secure: false, + Subjects: []string{"telegraf"}, + QueueGroup: "telegraf_consumers", + PendingBytesLimit: nats.DefaultSubPendingBytesLimit, + PendingMessageLimit: nats.DefaultSubPendingMsgsLimit, + MaxUndeliveredMessages: defaultMaxUndeliveredMessages, } }) } diff --git a/plugins/inputs/nats_consumer/nats_consumer_test.go b/plugins/inputs/nats_consumer/nats_consumer_test.go deleted file mode 100644 index a1f499554..000000000 --- a/plugins/inputs/nats_consumer/nats_consumer_test.go +++ /dev/null @@ -1,134 +0,0 @@ -package natsconsumer - -import ( - "testing" - - "github.com/influxdata/telegraf/plugins/parsers" - "github.com/influxdata/telegraf/testutil" - nats "github.com/nats-io/go-nats" - "github.com/stretchr/testify/assert" -) - -const ( - testMsg = "cpu_load_short,host=server01 value=23422.0 1422568543702900257\n" - testMsgGraphite = "cpu.load.short.graphite 23422 1454780029" - testMsgJSON = "{\"a\": 5, \"b\": {\"c\": 6}}\n" - invalidMsg = "cpu_load_short,host=server01 1422568543702900257\n" - metricBuffer = 5 -) - -func newTestNatsConsumer() (*natsConsumer, chan *nats.Msg) { - in := make(chan *nats.Msg, metricBuffer) - n := &natsConsumer{ - QueueGroup: "test", - Subjects: []string{"telegraf"}, - Servers: []string{"nats://localhost:4222"}, - Secure: false, - in: in, - errs: make(chan error, metricBuffer), - done: make(chan struct{}), - } - return n, in -} - -// Test that the parser parses NATS messages into metrics -func TestRunParser(t *testing.T) { - n, in := newTestNatsConsumer() - acc := testutil.Accumulator{} - n.acc = &acc - defer close(n.done) - - n.parser, _ = parsers.NewInfluxParser() - n.wg.Add(1) - go n.receiver() - in <- natsMsg(testMsg) - - acc.Wait(1) -} - -// Test that the parser ignores invalid messages -func TestRunParserInvalidMsg(t *testing.T) { - n, in := newTestNatsConsumer() - acc := testutil.Accumulator{} - n.acc = &acc - defer close(n.done) - - n.parser, _ = parsers.NewInfluxParser() - n.wg.Add(1) - go n.receiver() - in <- natsMsg(invalidMsg) - - acc.WaitError(1) - assert.Contains(t, acc.Errors[0].Error(), "E! subject: telegraf, error: metric parse error") - assert.EqualValues(t, 0, acc.NMetrics()) -} - -// Test that the parser parses line format messages into metrics -func TestRunParserAndGather(t *testing.T) { - n, in := newTestNatsConsumer() - acc := testutil.Accumulator{} - n.acc = &acc - defer close(n.done) - - n.parser, _ = parsers.NewInfluxParser() - n.wg.Add(1) - go n.receiver() - in <- natsMsg(testMsg) - - n.Gather(&acc) - - acc.Wait(1) - acc.AssertContainsFields(t, "cpu_load_short", - map[string]interface{}{"value": float64(23422)}) -} - -// Test that the parser parses graphite format messages into metrics -func TestRunParserAndGatherGraphite(t *testing.T) { - n, in := newTestNatsConsumer() - acc := testutil.Accumulator{} - n.acc = &acc - defer close(n.done) - - n.parser, _ = parsers.NewGraphiteParser("_", []string{}, nil) - n.wg.Add(1) - go n.receiver() - in <- natsMsg(testMsgGraphite) - - n.Gather(&acc) - - acc.Wait(1) - acc.AssertContainsFields(t, "cpu_load_short_graphite", - map[string]interface{}{"value": float64(23422)}) -} - -// Test that the parser parses json format messages into metrics -func TestRunParserAndGatherJSON(t *testing.T) { - n, in := newTestNatsConsumer() - acc := testutil.Accumulator{} - n.acc = &acc - defer close(n.done) - - n.parser, _ = parsers.NewParser(&parsers.Config{ - DataFormat: "json", - MetricName: "nats_json_test", - }) - n.wg.Add(1) - go n.receiver() - in <- natsMsg(testMsgJSON) - - n.Gather(&acc) - - acc.Wait(1) - acc.AssertContainsFields(t, "nats_json_test", - map[string]interface{}{ - "a": float64(5), - "b_c": float64(6), - }) -} - -func natsMsg(val string) *nats.Msg { - return &nats.Msg{ - Subject: "telegraf", - Data: []byte(val), - } -} diff --git a/plugins/inputs/nsq_consumer/README.md b/plugins/inputs/nsq_consumer/README.md index 5ac156eec..0dae26e8c 100644 --- a/plugins/inputs/nsq_consumer/README.md +++ b/plugins/inputs/nsq_consumer/README.md @@ -1,9 +1,9 @@ # NSQ Consumer Input Plugin -The [NSQ](http://nsq.io/) consumer plugin polls a specified NSQD -topic and adds messages to InfluxDB. This plugin allows a message to be in any of the supported `data_format` types. +The [NSQ][nsq] consumer plugin reads from NSQD and creates metrics using one +of the supported [input data formats][]. -## Configuration +### Configuration: ```toml # Read metrics from NSQD topic(s) @@ -18,6 +18,16 @@ topic and adds messages to InfluxDB. This plugin allows a message to be in any o channel = "consumer" max_in_flight = 100 + ## Maximum messages to read from the broker that have not been written by an + ## output. For best throughput set based on the number of metrics within + ## each message and the size of the output's metric_batch_size. + ## + ## For example, if each message from the queue contains 10 metrics and the + ## output metric_batch_size is 1000, setting this to 100 will ensure that a + ## full batch is collected and the write is triggered immediately without + ## waiting until the next flush_interval. + # max_undelivered_messages = 1000 + ## Data format to consume. ## Each data format has its own unique set of configuration options, read ## more about them here: @@ -25,5 +35,5 @@ topic and adds messages to InfluxDB. This plugin allows a message to be in any o data_format = "influx" ``` -## Testing -The `nsq_consumer_test` mocks out the interaction with `NSQD`. It requires no outside dependencies. +[nsq]: https://nsq.io +[input data formats]: /docs/DATA_FORMATS_INPUT.md diff --git a/plugins/inputs/nsq_consumer/nsq_consumer.go b/plugins/inputs/nsq_consumer/nsq_consumer.go index 0823b3ac9..de7572316 100644 --- a/plugins/inputs/nsq_consumer/nsq_consumer.go +++ b/plugins/inputs/nsq_consumer/nsq_consumer.go @@ -1,7 +1,9 @@ package nsq_consumer import ( - "fmt" + "context" + "log" + "sync" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" @@ -9,17 +11,38 @@ import ( nsq "github.com/nsqio/go-nsq" ) +const ( + defaultMaxUndeliveredMessages = 1000 +) + +type empty struct{} +type semaphore chan empty + +type logger struct{} + +func (l *logger) Output(calldepth int, s string) error { + log.Println("D! [inputs.nsq_consumer] " + s) + return nil +} + //NSQConsumer represents the configuration of the plugin type NSQConsumer struct { - Server string - Nsqd []string - Nsqlookupd []string - Topic string - Channel string - MaxInFlight int - parser parsers.Parser - consumer *nsq.Consumer - acc telegraf.Accumulator + Server string `toml:"server"` + Nsqd []string `toml:"nsqd"` + Nsqlookupd []string `toml:"nsqlookupd"` + Topic string `toml:"topic"` + Channel string `toml:"channel"` + MaxInFlight int `toml:"max_in_flight"` + + MaxUndeliveredMessages int `toml:"max_undelivered_messages"` + + parser parsers.Parser + consumer *nsq.Consumer + + mu sync.Mutex + messages map[telegraf.TrackingID]*nsq.Message + wg sync.WaitGroup + cancel context.CancelFunc } var sampleConfig = ` @@ -33,6 +56,16 @@ var sampleConfig = ` channel = "consumer" max_in_flight = 100 + ## Maximum messages to read from the broker that have not been written by an + ## output. For best throughput set based on the number of metrics within + ## each message and the size of the output's metric_batch_size. + ## + ## For example, if each message from the queue contains 10 metrics and the + ## output metric_batch_size is 1000, setting this to 100 will ensure that a + ## full batch is collected and the write is triggered immediately without + ## waiting until the next flush_interval. + # max_undelivered_messages = 1000 + ## Data format to consume. ## Each data format has its own unique set of configuration options, read ## more about them here: @@ -40,12 +73,6 @@ var sampleConfig = ` data_format = "influx" ` -func init() { - inputs.Add("nsq_consumer", func() telegraf.Input { - return &NSQConsumer{} - }) -} - // SetParser takes the data_format from the config and finds the right parser for that format func (n *NSQConsumer) SetParser(parser parsers.Parser) { n.parser = parser @@ -62,32 +89,88 @@ func (n *NSQConsumer) Description() string { } // Start pulls data from nsq -func (n *NSQConsumer) Start(acc telegraf.Accumulator) error { - n.acc = acc +func (n *NSQConsumer) Start(ac telegraf.Accumulator) error { + acc := ac.WithTracking(n.MaxUndeliveredMessages) + sem := make(semaphore, n.MaxUndeliveredMessages) + n.messages = make(map[telegraf.TrackingID]*nsq.Message, n.MaxUndeliveredMessages) + + ctx, cancel := context.WithCancel(context.Background()) + n.cancel = cancel + n.connect() - n.consumer.AddConcurrentHandlers(nsq.HandlerFunc(func(message *nsq.Message) error { + n.consumer.SetLogger(&logger{}, nsq.LogLevelInfo) + n.consumer.AddHandler(nsq.HandlerFunc(func(message *nsq.Message) error { metrics, err := n.parser.Parse(message.Body) if err != nil { - acc.AddError(fmt.Errorf("E! NSQConsumer Parse Error\nmessage:%s\nerror:%s", string(message.Body), err.Error())) + acc.AddError(err) + // Remove the message from the queue + message.Finish() return nil } - for _, metric := range metrics { - n.acc.AddFields(metric.Name(), metric.Fields(), metric.Tags(), metric.Time()) + if len(metrics) == 0 { + message.Finish() + return nil } - message.Finish() + + select { + case <-ctx.Done(): + return ctx.Err() + case sem <- empty{}: + break + } + + n.mu.Lock() + id := acc.AddTrackingMetricGroup(metrics) + n.messages[id] = message + n.mu.Unlock() + message.DisableAutoResponse() return nil - }), n.MaxInFlight) + })) if len(n.Nsqlookupd) > 0 { n.consumer.ConnectToNSQLookupds(n.Nsqlookupd) } n.consumer.ConnectToNSQDs(append(n.Nsqd, n.Server)) + + n.wg.Add(1) + go func() { + defer n.wg.Done() + n.onDelivery(ctx, acc, sem) + }() return nil } +func (n *NSQConsumer) onDelivery(ctx context.Context, acc telegraf.TrackingAccumulator, sem semaphore) { + for { + select { + case <-ctx.Done(): + return + case info := <-acc.Delivered(): + n.mu.Lock() + msg, ok := n.messages[info.ID()] + if !ok { + n.mu.Unlock() + continue + } + <-sem + delete(n.messages, info.ID()) + n.mu.Unlock() + + if info.Delivered() { + msg.Finish() + } else { + msg.Requeue(-1) + } + } + } +} + // Stop processing messages func (n *NSQConsumer) Stop() { + n.cancel() + n.wg.Wait() n.consumer.Stop() + <-n.consumer.StopChan } // Gather is a noop @@ -107,3 +190,11 @@ func (n *NSQConsumer) connect() error { } return nil } + +func init() { + inputs.Add("nsq_consumer", func() telegraf.Input { + return &NSQConsumer{ + MaxUndeliveredMessages: defaultMaxUndeliveredMessages, + } + }) +} diff --git a/plugins/inputs/nsq_consumer/nsq_consumer_test.go b/plugins/inputs/nsq_consumer/nsq_consumer_test.go index a8e743c12..8376f7bb1 100644 --- a/plugins/inputs/nsq_consumer/nsq_consumer_test.go +++ b/plugins/inputs/nsq_consumer/nsq_consumer_test.go @@ -36,11 +36,12 @@ func TestReadsMetricsFromNSQ(t *testing.T) { newMockNSQD(script, addr.String()) consumer := &NSQConsumer{ - Server: "127.0.0.1:4155", - Topic: "telegraf", - Channel: "consume", - MaxInFlight: 1, - Nsqd: []string{"127.0.0.1:4155"}, + Server: "127.0.0.1:4155", + Topic: "telegraf", + Channel: "consume", + MaxInFlight: 1, + MaxUndeliveredMessages: defaultMaxUndeliveredMessages, + Nsqd: []string{"127.0.0.1:4155"}, } p, _ := parsers.NewInfluxParser() diff --git a/plugins/inputs/socket_listener/socket_listener.go b/plugins/inputs/socket_listener/socket_listener.go index 73c321f81..c83f3eb68 100644 --- a/plugins/inputs/socket_listener/socket_listener.go +++ b/plugins/inputs/socket_listener/socket_listener.go @@ -2,6 +2,7 @@ package socket_listener import ( "bufio" + "crypto/tls" "fmt" "io" "log" @@ -9,11 +10,8 @@ import ( "os" "strings" "sync" - "time" - "crypto/tls" - "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" tlsint "github.com/influxdata/telegraf/internal/tls" @@ -120,7 +118,7 @@ func (ssl *streamSocketListener) read(c net.Conn) { continue } for _, m := range metrics { - ssl.AddFields(m.Name(), m.Fields(), m.Tags(), m.Time()) + ssl.AddMetric(m) } } @@ -156,7 +154,7 @@ func (psl *packetSocketListener) listen() { continue } for _, m := range metrics { - psl.AddFields(m.Name(), m.Fields(), m.Tags(), m.Time()) + psl.AddMetric(m) } } } diff --git a/plugins/outputs/discard/discard.go b/plugins/outputs/discard/discard.go index 4a6d634b7..919f74b47 100644 --- a/plugins/outputs/discard/discard.go +++ b/plugins/outputs/discard/discard.go @@ -7,11 +7,13 @@ import ( type Discard struct{} -func (d *Discard) Connect() error { return nil } -func (d *Discard) Close() error { return nil } -func (d *Discard) SampleConfig() string { return "" } -func (d *Discard) Description() string { return "Send metrics to nowhere at all" } -func (d *Discard) Write(metrics []telegraf.Metric) error { return nil } +func (d *Discard) Connect() error { return nil } +func (d *Discard) Close() error { return nil } +func (d *Discard) SampleConfig() string { return "" } +func (d *Discard) Description() string { return "Send metrics to nowhere at all" } +func (d *Discard) Write(metrics []telegraf.Metric) error { + return nil +} func init() { outputs.Add("discard", func() telegraf.Output { return &Discard{} }) diff --git a/plugins/outputs/prometheus_client/prometheus_client.go b/plugins/outputs/prometheus_client/prometheus_client.go index 1b8e06a49..0192d935f 100644 --- a/plugins/outputs/prometheus_client/prometheus_client.go +++ b/plugins/outputs/prometheus_client/prometheus_client.go @@ -144,7 +144,7 @@ func (p *PrometheusClient) auth(h http.Handler) http.Handler { }) } -func (p *PrometheusClient) Start() error { +func (p *PrometheusClient) Connect() error { defaultCollectors := map[string]bool{ "gocollector": true, "process": true, @@ -200,15 +200,6 @@ func (p *PrometheusClient) Start() error { return nil } -func (p *PrometheusClient) Stop() { - // plugin gets cleaned up in Close() already. -} - -func (p *PrometheusClient) Connect() error { - // This service output does not need to make any further connections - return nil -} - func (p *PrometheusClient) Close() error { ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) defer cancel() diff --git a/plugins/outputs/prometheus_client/prometheus_client_test.go b/plugins/outputs/prometheus_client/prometheus_client_test.go index bd2398a23..b6bbe35fd 100644 --- a/plugins/outputs/prometheus_client/prometheus_client_test.go +++ b/plugins/outputs/prometheus_client/prometheus_client_test.go @@ -600,7 +600,7 @@ func TestPrometheusWritePointEmptyTag(t *testing.T) { pClient, p, err := setupPrometheus() require.NoError(t, err) - defer pClient.Stop() + defer pClient.Close() now := time.Now() tags := make(map[string]string) @@ -675,7 +675,7 @@ func setupPrometheus() (*PrometheusClient, *prometheus_input.Prometheus, error) pTesting = NewClient() pTesting.Listen = "localhost:9127" pTesting.Path = "/metrics" - err := pTesting.Start() + err := pTesting.Connect() if err != nil { return nil, nil, err } diff --git a/plugins/processors/topk/topk.go b/plugins/processors/topk/topk.go index 36283482b..df5d542e3 100644 --- a/plugins/processors/topk/topk.go +++ b/plugins/processors/topk/topk.go @@ -10,6 +10,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/filter" "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/plugins/processors" ) @@ -76,12 +77,12 @@ var sampleConfig = ` ## tags. If this setting is different than "" the plugin will add a ## tag (which name will be the value of this setting) to each metric with ## the value of the calculated GroupBy tag. Useful for debugging - # add_groupby_tag = "" + # add_groupby_tag = "" ## These settings provide a way to know the position of each metric in ## the top k. The 'add_rank_field' setting allows to specify for which ## fields the position is required. If the list is non empty, then a field - ## will be added to each and every metric for each string present in this + ## will be added to each and every metric for each string present in this ## setting. This field will contain the ranking of the group that ## the metric belonged to when aggregated over that field. ## The name of the field will be set to the name of the aggregation field, @@ -208,6 +209,11 @@ func (t *TopK) Apply(in ...telegraf.Metric) []telegraf.Metric { // Add the metrics received to our internal cache for _, m := range in { + // When tracking metrics this plugin could deadlock the input by + // holding undelivered metrics while the input waits for metrics to be + // delivered. Instead, treat all handled metrics as delivered and + // produced metrics as untracked in a similar way to aggregators. + m.Drop() // Check if the metric has any of the fields over which we are aggregating hasField := false @@ -281,7 +287,6 @@ func (t *TopK) push() []telegraf.Metric { // Create a one dimensional list with the top K metrics of each key for i, ag := range aggregations[0:min(t.K, len(aggregations))] { - // Check whether of not we need to add fields of tags to the selected metrics if len(t.aggFieldSet) != 0 || len(t.rankFieldSet) != 0 || groupTag != "" { for _, m := range t.cache[ag.groupbykey] { @@ -311,7 +316,16 @@ func (t *TopK) push() []telegraf.Metric { t.Reset() - return ret + result := make([]telegraf.Metric, 0, len(ret)) + for _, m := range ret { + copy, err := metric.New(m.Name(), m.Tags(), m.Fields(), m.Time(), m.Type()) + if err != nil { + continue + } + result = append(result, copy) + } + + return result } // Function that generates the aggregation functions diff --git a/plugins/processors/topk/topk_test.go b/plugins/processors/topk/topk_test.go index 67d80cbf9..ff0eb4d8b 100644 --- a/plugins/processors/topk/topk_test.go +++ b/plugins/processors/topk/topk_test.go @@ -1,12 +1,12 @@ package topk import ( - "reflect" "testing" "time" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/testutil" ) // Key, value pair that represents a telegraf.Metric Field @@ -95,7 +95,7 @@ func deepCopy(a []telegraf.Metric) []telegraf.Metric { func belongs(m telegraf.Metric, ms []telegraf.Metric) bool { for _, i := range ms { - if reflect.DeepEqual(i, m) { + if testutil.MetricEqual(i, m) { return true } } diff --git a/processor.go b/processor.go index f2b5133a5..e084adab7 100644 --- a/processor.go +++ b/processor.go @@ -7,6 +7,6 @@ type Processor interface { // Description returns a one-sentence description on the Input Description() string - // Apply the filter to the given metric + // Apply the filter to the given metric. Apply(in ...Metric) []Metric } diff --git a/testutil/accumulator.go b/testutil/accumulator.go index d4a4bebd8..c13f02ab3 100644 --- a/testutil/accumulator.go +++ b/testutil/accumulator.go @@ -14,6 +14,15 @@ import ( "github.com/stretchr/testify/assert" ) +var ( + lastID uint64 +) + +func newTrackingID() telegraf.TrackingID { + atomic.AddUint64(&lastID, 1) + return telegraf.TrackingID(lastID) +} + // Metric defines a single point measurement type Metric struct { Measurement string @@ -23,7 +32,7 @@ type Metric struct { } func (p *Metric) String() string { - return fmt.Sprintf("%s %v", p.Measurement, p.Fields) + return fmt.Sprintf("%s %v %v", p.Measurement, p.Tags, p.Fields) } // Accumulator defines a mocked out accumulator @@ -31,11 +40,12 @@ type Accumulator struct { sync.Mutex *sync.Cond - Metrics []*Metric - nMetrics uint64 - Discard bool - Errors []error - debug bool + Metrics []*Metric + nMetrics uint64 + Discard bool + Errors []error + debug bool + delivered chan telegraf.DeliveryInfo } func (a *Accumulator) NMetrics() uint64 { @@ -154,6 +164,33 @@ func (a *Accumulator) AddHistogram( a.AddFields(measurement, fields, tags, timestamp...) } +func (a *Accumulator) AddMetric(m telegraf.Metric) { + a.AddFields(m.Name(), m.Fields(), m.Tags(), m.Time()) +} + +func (a *Accumulator) WithTracking(maxTracked int) telegraf.TrackingAccumulator { + return a +} + +func (a *Accumulator) AddTrackingMetric(m telegraf.Metric) telegraf.TrackingID { + a.AddMetric(m) + return newTrackingID() +} + +func (a *Accumulator) AddTrackingMetricGroup(group []telegraf.Metric) telegraf.TrackingID { + for _, m := range group { + a.AddMetric(m) + } + return newTrackingID() +} + +func (a *Accumulator) Delivered() <-chan telegraf.DeliveryInfo { + if a.delivered == nil { + a.delivered = make(chan telegraf.DeliveryInfo) + } + return a.delivered +} + // AddError appends the given error to Accumulator.Errors. func (a *Accumulator) AddError(err error) { if err == nil { diff --git a/testutil/metric.go b/testutil/metric.go index 56debd093..6d0db4e17 100644 --- a/testutil/metric.go +++ b/testutil/metric.go @@ -41,6 +41,18 @@ func newMetricDiff(metric telegraf.Metric) *metricDiff { return m } +func MetricEqual(expected, actual telegraf.Metric) bool { + var lhs, rhs *metricDiff + if expected != nil { + lhs = newMetricDiff(expected) + } + if actual != nil { + rhs = newMetricDiff(actual) + } + + return cmp.Equal(lhs, rhs) +} + func RequireMetricEqual(t *testing.T, expected, actual telegraf.Metric) { t.Helper() @@ -60,11 +72,11 @@ func RequireMetricEqual(t *testing.T, expected, actual telegraf.Metric) { func RequireMetricsEqual(t *testing.T, expected, actual []telegraf.Metric) { t.Helper() - lhs := make([]*metricDiff, len(expected)) + lhs := make([]*metricDiff, 0, len(expected)) for _, m := range expected { lhs = append(lhs, newMetricDiff(m)) } - rhs := make([]*metricDiff, len(actual)) + rhs := make([]*metricDiff, 0, len(actual)) for _, m := range actual { rhs = append(rhs, newMetricDiff(m)) } From 7166833364e4b28a41b287cac7bf55ea1608d027 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 5 Nov 2018 13:52:14 -0800 Subject: [PATCH 0352/1815] Update changelog --- CHANGELOG.md | 14 ++++++++++++++ README.md | 5 +++++ 2 files changed, 19 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3bf8e0631..c70c1c348 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,16 @@ transfer of metrics in any format via HTTP, it is recommended to use `http_listener_v2` instead. +- Input plugins are no longer limited from adding metrics when the output is + writing, and new metrics will move into the metric buffer as needed. This + will provide more robust degradation and recovery when writing to a slow + output at high throughput. + + To avoid over consumption when reading from queue consumers: `kafka_consumer`, + `amqp_consumer`, `mqtt_consumer`, `nats_consumer`, and `nsq_consumer` use + the new option `max_undelivered_messages` to limit the number of outstanding + unwritten metrics. + #### New Inputs - [http_listener_v2](/plugins/inputs/http_listener_v2/README.md) - Contributed by @jul1u5 @@ -41,10 +51,14 @@ - [#4934](https://github.com/influxdata/telegraf/pull/4934): Add LUN to datasource translation in vsphere input. - [#4798](https://github.com/influxdata/telegraf/pull/4798): Allow connecting to prometheus via unix socket. - [#4920](https://github.com/influxdata/telegraf/pull/4920): Add scraping for Prometheus endpoint in Kubernetes. +- [#4938](https://github.com/influxdata/telegraf/pull/4938): Add per output flush_interval, metric_buffer_limit and metric_batch_size. #### Bugfixes - [#4950](https://github.com/influxdata/telegraf/pull/4950): Remove the time_key from the field values in JSON parser. +- [#3968](https://github.com/influxdata/telegraf/issues/3968): Fix input time rounding when using a custom interval. +- [#4938](https://github.com/influxdata/telegraf/pull/4938): Fix potential deadlock or leaked resources on restart/reload. +- [#2919](https://github.com/influxdata/telegraf/pull/2919): Fix outputs block inputs when batch size is reached. ## v1.8.3 [2018-10-30] diff --git a/README.md b/README.md index 6016925b1..b8477e952 100644 --- a/README.md +++ b/README.md @@ -50,6 +50,11 @@ Telegraf requires golang version 1.9 or newer, the Makefile requires GNU make. make ``` +### Changelog + +View the [changelog](/CHANGELOG.md) for the latest updates and changes by +version. + ### Nightly Builds These builds are generated from the master branch: From 3b0cee346c319e98b3fcd24c7f965b562c8ee58e Mon Sep 17 00:00:00 2001 From: kelwang <8237958+kelwang@users.noreply.github.com> Date: Mon, 5 Nov 2018 17:19:08 -0500 Subject: [PATCH 0353/1815] Add jenkins input plugin (#4289) --- Godeps | 0 plugins/inputs/all/all.go | 1 + plugins/inputs/jenkins/README.md | 96 ++++ plugins/inputs/jenkins/client.go | 156 +++++++ plugins/inputs/jenkins/jenkins.go | 443 ++++++++++++++++++ plugins/inputs/jenkins/jenkins_test.go | 615 +++++++++++++++++++++++++ 6 files changed, 1311 insertions(+) create mode 100644 Godeps create mode 100644 plugins/inputs/jenkins/README.md create mode 100644 plugins/inputs/jenkins/client.go create mode 100644 plugins/inputs/jenkins/jenkins.go create mode 100644 plugins/inputs/jenkins/jenkins_test.go diff --git a/Godeps b/Godeps new file mode 100644 index 000000000..e69de29bb diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index c64fec0a7..cfdc12ad2 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -52,6 +52,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/ipset" _ "github.com/influxdata/telegraf/plugins/inputs/iptables" _ "github.com/influxdata/telegraf/plugins/inputs/ipvs" + _ "github.com/influxdata/telegraf/plugins/inputs/jenkins" _ "github.com/influxdata/telegraf/plugins/inputs/jolokia" _ "github.com/influxdata/telegraf/plugins/inputs/jolokia2" _ "github.com/influxdata/telegraf/plugins/inputs/jti_openconfig_telemetry" diff --git a/plugins/inputs/jenkins/README.md b/plugins/inputs/jenkins/README.md new file mode 100644 index 000000000..16afcaa7c --- /dev/null +++ b/plugins/inputs/jenkins/README.md @@ -0,0 +1,96 @@ +# Jenkins Plugin + +The jenkins plugin gathers information about the nodes and jobs running in a jenkins instance. + +This plugin does not require a plugin on jenkins and it makes use of Jenkins API to retrieve all the information needed. + +### Configuration: + +```toml + ## The Jenkins URL + url = "http://my-jenkins-instance:8080" + # username = "admin" + # password = "admin" + + ## Set response_timeout + response_timeout = "5s" + + ## Optional SSL Config + # ssl_ca = /path/to/cafile + # ssl_cert = /path/to/certfile + # ssl_key = /path/to/keyfile + ## Use SSL but skip chain & host verification + # insecure_skip_verify = false + + ## Optional Max Job Build Age filter + ## Default 1 hour, ignore builds older than max_build_age + # max_build_age = "1h" + + ## Optional Sub Job Depth filter + ## Jenkins can have unlimited layer of sub jobs + ## This config will limit the layers of pulling, default value 0 means + ## unlimited pulling until no more sub jobs + # max_subjob_depth = 0 + + ## Optional Sub Job Per Layer + ## In workflow-multibranch-plugin, each branch will be created as a sub job. + ## This config will limit to call only the lasted branches in each layer, + ## empty will use default value 10 + # max_subjob_per_layer = 10 + + ## Jobs to exclude from gathering + # job_exclude = [ "job1", "job2/subjob1/subjob2", "job3/*"] + + ## Nodes to exclude from gathering + # node_exclude = [ "node1", "node2" ] + + ## Worker pool for jenkins plugin only + ## Empty this field will use default value 5 + # max_connections = 5 +``` + +### Metrics: + +- jenkins_node + - tags: + - arch + - disk_path + - temp_path + - node_name + - status ("online", "offline") + - fields: + - disk_available + - temp_available + - memory_available + - memory_total + - swap_available + - swap_total + - response_time + +- jenkins_job + - tags: + - name + - parents + - result + - fields: + - duration + - result_code (0 = SUCCESS, 1 = FAILURE, 2 = NOT_BUILD, 3 = UNSTABLE, 4 = ABORTED) + +### Sample Queries: + +``` +SELECT mean("memory_available") AS "mean_memory_available", mean("memory_total") AS "mean_memory_total", mean("temp_available") AS "mean_temp_available" FROM "jenkins_node" WHERE time > now() - 15m GROUP BY time(:interval:) FILL(null) +``` + +``` +SELECT mean("duration") AS "mean_duration" FROM "jenkins_job" WHERE time > now() - 24h GROUP BY time(:interval:) FILL(null) +``` + +### Example Output: + +``` +$ ./telegraf --config telegraf.conf --input-filter jenkins --test +jenkins_node,arch=Linux\ (amd64),disk_path=/var/jenkins_home,temp_path=/tmp,host=myhost,node_name=master swap_total=4294963200,memory_available=586711040,memory_total=6089498624,status=online,response_time=1000i,disk_available=152392036352,temp_available=152392036352,swap_available=3503263744 1516031535000000000 +jenkins_job,host=myhost,name=JOB1,parents=apps/br1,result=SUCCESS duration=2831i,result_code=0i 1516026630000000000 +jenkins_job,host=myhost,name=JOB2,parents=apps/br2,result=SUCCESS duration=2285i,result_code=0i 1516027230000000000 +``` \ No newline at end of file diff --git a/plugins/inputs/jenkins/client.go b/plugins/inputs/jenkins/client.go new file mode 100644 index 000000000..284b5eccf --- /dev/null +++ b/plugins/inputs/jenkins/client.go @@ -0,0 +1,156 @@ +package jenkins + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "strings" +) + +type client struct { + baseURL string + httpClient *http.Client + username string + password string + sessionCookie *http.Cookie + semaphore chan struct{} +} + +func newClient(httpClient *http.Client, url, username, password string, maxConnections int) *client { + return &client{ + baseURL: url, + httpClient: httpClient, + username: username, + password: password, + semaphore: make(chan struct{}, maxConnections), + } +} + +func (c *client) init() error { + // get session cookie + req, err := http.NewRequest("GET", c.baseURL, nil) + if err != nil { + return err + } + if c.username != "" && c.password != "" { + // set auth + req.SetBasicAuth(c.username, c.password) + } + resp, err := c.httpClient.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + for _, cc := range resp.Cookies() { + if strings.Contains(cc.Name, "JSESSIONID") { + c.sessionCookie = cc + break + } + } + // first api fetch + if err := c.doGet(context.Background(), jobPath, new(jobResponse)); err != nil { + return err + } + return nil +} + +func (c *client) doGet(ctx context.Context, url string, v interface{}) error { + req, err := createGetRequest(c.baseURL+url, c.username, c.password, c.sessionCookie) + if err != nil { + return err + } + select { + case c.semaphore <- struct{}{}: + break + case <-ctx.Done(): + return ctx.Err() + } + resp, err := c.httpClient.Do(req.WithContext(ctx)) + if err != nil { + <-c.semaphore + return err + } + defer func() { + resp.Body.Close() + <-c.semaphore + }() + // Clear invalid token if unauthorized + if resp.StatusCode == http.StatusUnauthorized { + c.sessionCookie = nil + return APIError{ + URL: url, + StatusCode: resp.StatusCode, + Title: resp.Status, + } + } + if resp.StatusCode < 200 || resp.StatusCode >= 300 { + return APIError{ + URL: url, + StatusCode: resp.StatusCode, + Title: resp.Status, + } + } + if resp.StatusCode == http.StatusNoContent { + return APIError{ + URL: url, + StatusCode: resp.StatusCode, + Title: resp.Status, + } + } + if err = json.NewDecoder(resp.Body).Decode(v); err != nil { + return err + } + return nil +} + +type APIError struct { + URL string + StatusCode int + Title string + Description string +} + +func (e APIError) Error() string { + if e.Description != "" { + return fmt.Sprintf("[%s] %s: %s", e.URL, e.Title, e.Description) + } + return fmt.Sprintf("[%s] %s", e.URL, e.Title) +} + +func createGetRequest(url string, username, password string, sessionCookie *http.Cookie) (*http.Request, error) { + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return nil, err + } + if sessionCookie != nil { + req.AddCookie(sessionCookie) + } else if username != "" && password != "" { + req.SetBasicAuth(username, password) + } + req.Header.Add("Accept", "application/json") + return req, nil +} + +func (c *client) getJobs(ctx context.Context, jr *jobRequest) (js *jobResponse, err error) { + js = new(jobResponse) + url := jobPath + if jr != nil { + url = jr.URL() + } + err = c.doGet(ctx, url, js) + return js, err +} + +func (c *client) getBuild(ctx context.Context, jr jobRequest, number int64) (b *buildResponse, err error) { + b = new(buildResponse) + url := jr.buildURL(number) + err = c.doGet(ctx, url, b) + return b, err +} + +func (c *client) getAllNodes(ctx context.Context) (nodeResp *nodeResponse, err error) { + nodeResp = new(nodeResponse) + err = c.doGet(ctx, nodePath, nodeResp) + return nodeResp, err +} diff --git a/plugins/inputs/jenkins/jenkins.go b/plugins/inputs/jenkins/jenkins.go new file mode 100644 index 000000000..b052b22a9 --- /dev/null +++ b/plugins/inputs/jenkins/jenkins.go @@ -0,0 +1,443 @@ +package jenkins + +import ( + "context" + "errors" + "fmt" + "log" + "net/http" + "strconv" + "strings" + "sync" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/filter" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/internal/tls" + "github.com/influxdata/telegraf/plugins/inputs" +) + +// Jenkins plugin gathers information about the nodes and jobs running in a jenkins instance. +type Jenkins struct { + URL string + Username string + Password string + // HTTP Timeout specified as a string - 3s, 1m, 1h + ResponseTimeout internal.Duration + + tls.ClientConfig + client *client + + MaxConnections int `toml:"max_connections"` + MaxBuildAge internal.Duration `toml:"max_build_age"` + MaxSubJobDepth int `toml:"max_subjob_depth"` + MaxSubJobPerLayer int `toml:"max_subjob_per_layer"` + JobExclude []string `toml:"job_exclude"` + jobFilter filter.Filter + + NodeExclude []string `toml:"node_exclude"` + nodeFilter filter.Filter + + semaphore chan struct{} +} + +const sampleConfig = ` + ## The Jenkins URL + url = "http://my-jenkins-instance:8080" + # username = "admin" + # password = "admin" + + ## Set response_timeout + response_timeout = "5s" + + ## Optional SSL Config + # ssl_ca = /path/to/cafile + # ssl_cert = /path/to/certfile + # ssl_key = /path/to/keyfile + ## Use SSL but skip chain & host verification + # insecure_skip_verify = false + + ## Optional Max Job Build Age filter + ## Default 1 hour, ignore builds older than max_build_age + # max_build_age = "1h" + + ## Optional Sub Job Depth filter + ## Jenkins can have unlimited layer of sub jobs + ## This config will limit the layers of pulling, default value 0 means + ## unlimited pulling until no more sub jobs + # max_subjob_depth = 0 + + ## Optional Sub Job Per Layer + ## In workflow-multibranch-plugin, each branch will be created as a sub job. + ## This config will limit to call only the lasted branches in each layer, + ## empty will use default value 10 + # max_subjob_per_layer = 10 + + ## Jobs to exclude from gathering + # job_exclude = [ "job1", "job2/subjob1/subjob2", "job3/*"] + + ## Nodes to exclude from gathering + # node_exclude = [ "node1", "node2" ] + + ## Worker pool for jenkins plugin only + ## Empty this field will use default value 5 + # max_connections = 5 +` + +// measurement +const ( + measurementNode = "jenkins_node" + measurementJob = "jenkins_job" +) + +// SampleConfig implements telegraf.Input interface +func (j *Jenkins) SampleConfig() string { + return sampleConfig +} + +// Description implements telegraf.Input interface +func (j *Jenkins) Description() string { + return "Read jobs and cluster metrics from Jenkins instances" +} + +// Gather implements telegraf.Input interface +func (j *Jenkins) Gather(acc telegraf.Accumulator) error { + if j.client == nil { + client, err := j.newHTTPClient() + if err != nil { + return err + } + if err = j.initialize(client); err != nil { + return err + } + } + + j.gatherNodesData(acc) + j.gatherJobs(acc) + + return nil +} + +func (j *Jenkins) newHTTPClient() (*http.Client, error) { + tlsCfg, err := j.ClientConfig.TLSConfig() + if err != nil { + return nil, fmt.Errorf("error parse jenkins config[%s]: %v", j.URL, err) + } + return &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: tlsCfg, + MaxIdleConns: j.MaxConnections, + }, + Timeout: j.ResponseTimeout.Duration, + }, nil +} + +// seperate the client as dependency to use httptest Client for mocking +func (j *Jenkins) initialize(client *http.Client) error { + var err error + + // init job filter + j.jobFilter, err = filter.Compile(j.JobExclude) + if err != nil { + return fmt.Errorf("error compile job filters[%s]: %v", j.URL, err) + } + + // init node filter + j.nodeFilter, err = filter.Compile(j.NodeExclude) + if err != nil { + return fmt.Errorf("error compile node filters[%s]: %v", j.URL, err) + } + + // init tcp pool with default value + if j.MaxConnections <= 0 { + j.MaxConnections = 5 + } + + // default sub jobs can be acquired + if j.MaxSubJobPerLayer <= 0 { + j.MaxSubJobPerLayer = 10 + } + + j.semaphore = make(chan struct{}, j.MaxConnections) + + j.client = newClient(client, j.URL, j.Username, j.Password, j.MaxConnections) + + return j.client.init() +} + +func (j *Jenkins) gatherNodeData(n node, acc telegraf.Accumulator) error { + + tags := map[string]string{} + if n.DisplayName == "" { + return fmt.Errorf("error empty node name") + } + + tags["node_name"] = n.DisplayName + // filter out excluded node_name + if j.nodeFilter != nil && j.nodeFilter.Match(tags["node_name"]) { + return nil + } + + tags["arch"] = n.MonitorData.HudsonNodeMonitorsArchitectureMonitor + + tags["status"] = "online" + if n.Offline { + tags["status"] = "offline" + } + monitorData := n.MonitorData + if monitorData.HudsonNodeMonitorsArchitectureMonitor == "" { + return errors.New("empty monitor data, please check your permission") + } + tags["disk_path"] = monitorData.HudsonNodeMonitorsDiskSpaceMonitor.Path + tags["temp_path"] = monitorData.HudsonNodeMonitorsTemporarySpaceMonitor.Path + + fields := map[string]interface{}{ + "response_time": monitorData.HudsonNodeMonitorsResponseTimeMonitor.Average, + "disk_available": monitorData.HudsonNodeMonitorsDiskSpaceMonitor.Size, + "temp_available": monitorData.HudsonNodeMonitorsTemporarySpaceMonitor.Size, + "swap_available": monitorData.HudsonNodeMonitorsSwapSpaceMonitor.SwapAvailable, + "memory_available": monitorData.HudsonNodeMonitorsSwapSpaceMonitor.MemoryAvailable, + "swap_total": monitorData.HudsonNodeMonitorsSwapSpaceMonitor.SwapTotal, + "memory_total": monitorData.HudsonNodeMonitorsSwapSpaceMonitor.MemoryTotal, + } + acc.AddFields(measurementNode, fields, tags) + + return nil +} + +func (j *Jenkins) gatherNodesData(acc telegraf.Accumulator) { + + nodeResp, err := j.client.getAllNodes(context.Background()) + if err != nil { + acc.AddError(err) + return + } + // get node data + for _, node := range nodeResp.Computers { + err = j.gatherNodeData(node, acc) + if err == nil { + continue + } + acc.AddError(err) + } +} + +func (j *Jenkins) gatherJobs(acc telegraf.Accumulator) { + js, err := j.client.getJobs(context.Background(), nil) + if err != nil { + acc.AddError(err) + return + } + var wg sync.WaitGroup + for _, job := range js.Jobs { + wg.Add(1) + go func(name string, wg *sync.WaitGroup, acc telegraf.Accumulator) { + defer wg.Done() + if err := j.getJobDetail(jobRequest{ + name: name, + parents: []string{}, + layer: 0, + }, acc); err != nil { + acc.AddError(err) + } + }(job.Name, &wg, acc) + } + wg.Wait() +} + +// wrap the tcp request with doGet +// block tcp request if buffered channel is full +func (j *Jenkins) doGet(tcp func() error) error { + j.semaphore <- struct{}{} + if err := tcp(); err != nil { + <-j.semaphore + return err + } + <-j.semaphore + return nil +} + +func (j *Jenkins) getJobDetail(jr jobRequest, acc telegraf.Accumulator) error { + if j.MaxSubJobDepth > 0 && jr.layer == j.MaxSubJobDepth { + return nil + } + // filter out excluded job. + if j.jobFilter != nil && j.jobFilter.Match(jr.hierarchyName()) { + return nil + } + + js, err := j.client.getJobs(context.Background(), &jr) + if err != nil { + return err + } + + var wg sync.WaitGroup + for k, ij := range js.Jobs { + if k < len(js.Jobs)-j.MaxSubJobPerLayer-1 { + continue + } + wg.Add(1) + // schedule tcp fetch for inner jobs + go func(ij innerJob, jr jobRequest, acc telegraf.Accumulator) { + defer wg.Done() + if err := j.getJobDetail(jobRequest{ + name: ij.Name, + parents: jr.combined(), + layer: jr.layer + 1, + }, acc); err != nil { + acc.AddError(err) + } + }(ij, jr, acc) + } + wg.Wait() + + // collect build info + number := js.LastBuild.Number + if number < 1 { + // no build info + return nil + } + build, err := j.client.getBuild(context.Background(), jr, number) + if err != nil { + return err + } + + if build.Building { + log.Printf("D! Ignore running build on %s, build %v", jr.name, number) + return nil + } + + // stop if build is too old + // Higher up in gatherJobs + cutoff := time.Now().Add(-1 * j.MaxBuildAge.Duration) + + // Here we just test + if build.GetTimestamp().Before(cutoff) { + return nil + } + + gatherJobBuild(jr, build, acc) + return nil +} + +type nodeResponse struct { + Computers []node `json:"computer"` +} + +type node struct { + DisplayName string `json:"displayName"` + Offline bool `json:"offline"` + MonitorData monitorData `json:"monitorData"` +} + +type monitorData struct { + HudsonNodeMonitorsArchitectureMonitor string `json:"hudson.node_monitors.ArchitectureMonitor"` + HudsonNodeMonitorsDiskSpaceMonitor nodeSpaceMonitor `json:"hudson.node_monitors.DiskSpaceMonitor"` + HudsonNodeMonitorsResponseTimeMonitor struct { + Average int64 `json:"average"` + } `json:"hudson.node_monitors.ResponseTimeMonitor"` + HudsonNodeMonitorsSwapSpaceMonitor struct { + SwapAvailable float64 `json:"availableSwapSpace"` + SwapTotal float64 `json:"totalSwapSpace"` + MemoryAvailable float64 `json:"availablePhysicalMemory"` + MemoryTotal float64 `json:"totalPhysicalMemory"` + } `json:"hudson.node_monitors.SwapSpaceMonitor"` + HudsonNodeMonitorsTemporarySpaceMonitor nodeSpaceMonitor `json:"hudson.node_monitors.TemporarySpaceMonitor"` +} + +type nodeSpaceMonitor struct { + Path string `json:"path"` + Size float64 `json:"size"` +} + +type jobResponse struct { + LastBuild jobBuild `json:"lastBuild"` + Jobs []innerJob `json:"jobs"` + Name string `json:"name"` +} + +type innerJob struct { + Name string `json:"name"` + URL string `json:"url"` + Color string `json:"color"` +} + +type jobBuild struct { + Number int64 + URL string +} + +type buildResponse struct { + Building bool `json:"building"` + Duration int64 `json:"duration"` + Result string `json:"result"` + Timestamp int64 `json:"timestamp"` +} + +func (b *buildResponse) GetTimestamp() time.Time { + return time.Unix(0, int64(b.Timestamp)*int64(time.Millisecond)) +} + +const ( + nodePath = "/computer/api/json" + jobPath = "/api/json" +) + +type jobRequest struct { + name string + parents []string + layer int +} + +func (jr jobRequest) combined() []string { + return append(jr.parents, jr.name) +} + +func (jr jobRequest) URL() string { + return "/job/" + strings.Join(jr.combined(), "/job/") + jobPath +} + +func (jr jobRequest) buildURL(number int64) string { + return "/job/" + strings.Join(jr.combined(), "/job/") + "/" + strconv.Itoa(int(number)) + jobPath +} + +func (jr jobRequest) hierarchyName() string { + return strings.Join(jr.combined(), "/") +} + +func (jr jobRequest) parentsString() string { + return strings.Join(jr.parents, "/") +} + +func gatherJobBuild(jr jobRequest, b *buildResponse, acc telegraf.Accumulator) { + tags := map[string]string{"name": jr.name, "parents": jr.parentsString(), "result": b.Result} + fields := make(map[string]interface{}) + fields["duration"] = b.Duration + fields["result_code"] = mapResultCode(b.Result) + + acc.AddFields(measurementJob, fields, tags, b.GetTimestamp()) +} + +// perform status mapping +func mapResultCode(s string) int { + switch strings.ToLower(s) { + case "success": + return 0 + case "failure": + return 1 + case "not_built": + return 2 + case "unstable": + return 3 + case "aborted": + return 4 + } + return -1 +} + +func init() { + inputs.Add("jenkins", func() telegraf.Input { + return &Jenkins{} + }) +} diff --git a/plugins/inputs/jenkins/jenkins_test.go b/plugins/inputs/jenkins/jenkins_test.go new file mode 100644 index 000000000..7724fc0e3 --- /dev/null +++ b/plugins/inputs/jenkins/jenkins_test.go @@ -0,0 +1,615 @@ +package jenkins + +import ( + "encoding/json" + "net/http" + "net/http/httptest" + "sort" + "strings" + "testing" + "time" + + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/testutil" +) + +func TestJobRequest(t *testing.T) { + tests := []struct { + input jobRequest + output string + }{ + { + jobRequest{}, + "", + }, + { + jobRequest{ + name: "1", + parents: []string{"3", "2"}, + }, + "3/2/1", + }, + } + for _, test := range tests { + output := test.input.hierarchyName() + if output != test.output { + t.Errorf("Expected %s, got %s\n", test.output, output) + } + } +} + +func TestResultCode(t *testing.T) { + tests := []struct { + input string + output int + }{ + {"SUCCESS", 0}, + {"Failure", 1}, + {"NOT_BUILT", 2}, + {"UNSTABLE", 3}, + {"ABORTED", 4}, + } + for _, test := range tests { + output := mapResultCode(test.input) + if output != test.output { + t.Errorf("Expected %d, got %d\n", test.output, output) + } + } +} + +type mockHandler struct { + // responseMap is the path to repsonse interface + // we will ouput the serialized response in json when serving http + // example '/computer/api/json': *gojenkins. + responseMap map[string]interface{} +} + +func (h mockHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + o, ok := h.responseMap[r.URL.Path] + if !ok { + w.WriteHeader(http.StatusNotFound) + return + } + + b, err := json.Marshal(o) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + return + } + if len(b) == 0 { + w.WriteHeader(http.StatusNoContent) + return + } + w.Write(b) +} + +func TestGatherNodeData(t *testing.T) { + tests := []struct { + name string + input mockHandler + output *testutil.Accumulator + wantErr bool + }{ + { + name: "bad node data", + input: mockHandler{ + responseMap: map[string]interface{}{ + "/api/json": struct{}{}, + "/computer/api/json": nodeResponse{ + Computers: []node{ + {}, + {}, + {}, + }, + }, + }, + }, + wantErr: true, + }, + { + name: "bad empty monitor data", + input: mockHandler{ + responseMap: map[string]interface{}{ + "/api/json": struct{}{}, + "/computer/api/json": nodeResponse{ + Computers: []node{ + {DisplayName: "master"}, + {DisplayName: "node1"}, + }, + }, + }, + }, + wantErr: true, + }, + { + name: "filtered nodes", + input: mockHandler{ + responseMap: map[string]interface{}{ + "/api/json": struct{}{}, + "/computer/api/json": nodeResponse{ + Computers: []node{ + {DisplayName: "ignore-1"}, + {DisplayName: "ignore-2"}, + }, + }, + }, + }, + }, + + { + name: "normal data collection", + input: mockHandler{ + responseMap: map[string]interface{}{ + "/api/json": struct{}{}, + "/computer/api/json": nodeResponse{ + Computers: []node{ + { + DisplayName: "master", + MonitorData: monitorData{ + HudsonNodeMonitorsArchitectureMonitor: "linux", + HudsonNodeMonitorsResponseTimeMonitor: struct { + Average int64 `json:"average"` + }{ + Average: 10032, + }, + HudsonNodeMonitorsDiskSpaceMonitor: nodeSpaceMonitor{ + Path: "/path/1", + Size: 123, + }, + HudsonNodeMonitorsTemporarySpaceMonitor: nodeSpaceMonitor{ + Path: "/path/2", + Size: 245, + }, + HudsonNodeMonitorsSwapSpaceMonitor: struct { + SwapAvailable float64 `json:"availableSwapSpace"` + SwapTotal float64 `json:"totalSwapSpace"` + MemoryAvailable float64 `json:"availablePhysicalMemory"` + MemoryTotal float64 `json:"totalPhysicalMemory"` + }{ + SwapAvailable: 212, + SwapTotal: 500, + MemoryAvailable: 101, + MemoryTotal: 500, + }, + }, + Offline: false, + }, + }, + }, + }, + }, + output: &testutil.Accumulator{ + Metrics: []*testutil.Metric{ + { + Tags: map[string]string{ + "node_name": "master", + "arch": "linux", + "status": "online", + "disk_path": "/path/1", + "temp_path": "/path/2", + }, + Fields: map[string]interface{}{ + "response_time": int64(10032), + "disk_available": float64(123), + "temp_available": float64(245), + "swap_available": float64(212), + "swap_total": float64(500), + "memory_available": float64(101), + "memory_total": float64(500), + }, + }, + }, + }, + }, + } + for _, test := range tests { + ts := httptest.NewServer(test.input) + defer ts.Close() + j := &Jenkins{ + URL: ts.URL, + ResponseTimeout: internal.Duration{Duration: time.Microsecond}, + NodeExclude: []string{"ignore-1", "ignore-2"}, + } + te := j.initialize(&http.Client{Transport: &http.Transport{}}) + acc := new(testutil.Accumulator) + j.gatherNodesData(acc) + if err := acc.FirstError(); err != nil { + te = err + } + + if !test.wantErr && te != nil { + t.Fatalf("%s: failed %s, expected to be nil", test.name, te.Error()) + } else if test.wantErr && te == nil { + t.Fatalf("%s: expected err, got nil", test.name) + } + if test.output == nil && len(acc.Metrics) > 0 { + t.Fatalf("%s: collected extra data", test.name) + } else if test.output != nil && len(test.output.Metrics) > 0 { + for k, m := range test.output.Metrics[0].Tags { + if acc.Metrics[0].Tags[k] != m { + t.Fatalf("%s: tag %s metrics unmatch Expected %s, got %s\n", test.name, k, m, acc.Metrics[0].Tags[k]) + } + } + for k, m := range test.output.Metrics[0].Fields { + if acc.Metrics[0].Fields[k] != m { + t.Fatalf("%s: field %s metrics unmatch Expected %v(%T), got %v(%T)\n", test.name, k, m, m, acc.Metrics[0].Fields[k], acc.Metrics[0].Fields[k]) + } + } + } + } +} + +func TestInitialize(t *testing.T) { + mh := mockHandler{ + responseMap: map[string]interface{}{ + "/api/json": struct{}{}, + }, + } + ts := httptest.NewServer(mh) + defer ts.Close() + mockClient := &http.Client{Transport: &http.Transport{}} + tests := []struct { + // name of the test + name string + input *Jenkins + output *Jenkins + wantErr bool + }{ + { + name: "bad jenkins config", + input: &Jenkins{ + URL: "http://a bad url", + ResponseTimeout: internal.Duration{Duration: time.Microsecond}, + }, + wantErr: true, + }, + { + name: "has filter", + input: &Jenkins{ + URL: ts.URL, + ResponseTimeout: internal.Duration{Duration: time.Microsecond}, + JobExclude: []string{"job1", "job2"}, + NodeExclude: []string{"node1", "node2"}, + }, + }, + { + name: "default config", + input: &Jenkins{ + URL: ts.URL, + ResponseTimeout: internal.Duration{Duration: time.Microsecond}, + }, + output: &Jenkins{ + MaxConnections: 5, + MaxSubJobPerLayer: 10, + }, + }, + } + for _, test := range tests { + te := test.input.initialize(mockClient) + if !test.wantErr && te != nil { + t.Fatalf("%s: failed %s, expected to be nil", test.name, te.Error()) + } else if test.wantErr && te == nil { + t.Fatalf("%s: expected err, got nil", test.name) + } + if test.output != nil { + if test.input.client == nil { + t.Fatalf("%s: failed %s, jenkins instance shouldn't be nil", test.name, te.Error()) + } + if test.input.MaxConnections != test.output.MaxConnections { + t.Fatalf("%s: different MaxConnections Expected %d, got %d\n", test.name, test.output.MaxConnections, test.input.MaxConnections) + } + } + + } +} + +func TestGatherJobs(t *testing.T) { + tests := []struct { + name string + input mockHandler + output *testutil.Accumulator + wantErr bool + }{ + { + name: "empty job", + input: mockHandler{ + responseMap: map[string]interface{}{ + "/api/json": &jobResponse{}, + }, + }, + }, + { + name: "bad inner jobs", + input: mockHandler{ + responseMap: map[string]interface{}{ + "/api/json": &jobResponse{ + Jobs: []innerJob{ + {Name: "job1"}, + }, + }, + }, + }, + wantErr: true, + }, + { + name: "jobs has no build", + input: mockHandler{ + responseMap: map[string]interface{}{ + "/api/json": &jobResponse{ + Jobs: []innerJob{ + {Name: "job1"}, + }, + }, + "/job/job1/api/json": &jobResponse{}, + }, + }, + }, + { + name: "bad build info", + input: mockHandler{ + responseMap: map[string]interface{}{ + "/api/json": &jobResponse{ + Jobs: []innerJob{ + {Name: "job1"}, + }, + }, + "/job/job1/api/json": &jobResponse{ + LastBuild: jobBuild{ + Number: 1, + }, + }, + }, + }, + wantErr: true, + }, + { + name: "ignore building job", + input: mockHandler{ + responseMap: map[string]interface{}{ + "/api/json": &jobResponse{ + Jobs: []innerJob{ + {Name: "job1"}, + }, + }, + "/job/job1/api/json": &jobResponse{ + LastBuild: jobBuild{ + Number: 1, + }, + }, + "/job/job1/1/api/json": &buildResponse{ + Building: true, + }, + }, + }, + }, + { + name: "ignore old build", + input: mockHandler{ + responseMap: map[string]interface{}{ + "/api/json": &jobResponse{ + Jobs: []innerJob{ + {Name: "job1"}, + }, + }, + "/job/job1/api/json": &jobResponse{ + LastBuild: jobBuild{ + Number: 2, + }, + }, + "/job/job1/2/api/json": &buildResponse{ + Building: false, + Timestamp: 100, + }, + }, + }, + }, + { + name: "gather metrics", + input: mockHandler{ + responseMap: map[string]interface{}{ + "/api/json": &jobResponse{ + Jobs: []innerJob{ + {Name: "job1"}, + {Name: "job2"}, + }, + }, + "/job/job1/api/json": &jobResponse{ + LastBuild: jobBuild{ + Number: 3, + }, + }, + "/job/job2/api/json": &jobResponse{ + LastBuild: jobBuild{ + Number: 1, + }, + }, + "/job/job1/3/api/json": &buildResponse{ + Building: false, + Result: "SUCCESS", + Duration: 25558, + Timestamp: (time.Now().Unix() - int64(time.Minute.Seconds())) * 1000, + }, + "/job/job2/1/api/json": &buildResponse{ + Building: false, + Result: "FAILURE", + Duration: 1558, + Timestamp: (time.Now().Unix() - int64(time.Minute.Seconds())) * 1000, + }, + }, + }, + output: &testutil.Accumulator{ + Metrics: []*testutil.Metric{ + { + Tags: map[string]string{ + "name": "job1", + "result": "SUCCESS", + }, + Fields: map[string]interface{}{ + "duration": int64(25558), + "result_code": 0, + }, + }, + { + Tags: map[string]string{ + "name": "job2", + "result": "FAILURE", + }, + Fields: map[string]interface{}{ + "duration": int64(1558), + "result_code": 1, + }, + }, + }, + }, + }, + { + name: "gather sub jobs, jobs filter", + input: mockHandler{ + responseMap: map[string]interface{}{ + "/api/json": &jobResponse{ + Jobs: []innerJob{ + {Name: "apps"}, + {Name: "ignore-1"}, + }, + }, + "/job/apps/api/json": &jobResponse{ + Jobs: []innerJob{ + {Name: "k8s-cloud"}, + {Name: "chronograf"}, + {Name: "ignore-all"}, + }, + }, + "/job/apps/job/ignore-all/api/json": &jobResponse{ + Jobs: []innerJob{ + {Name: "1"}, + {Name: "2"}, + }, + }, + "/job/apps/job/chronograf/api/json": &jobResponse{ + LastBuild: jobBuild{ + Number: 1, + }, + }, + "/job/apps/job/k8s-cloud/api/json": &jobResponse{ + Jobs: []innerJob{ + {Name: "PR-100"}, + {Name: "PR-101"}, + {Name: "PR-ignore2"}, + }, + }, + "/job/apps/job/k8s-cloud/job/PR-100/api/json": &jobResponse{ + LastBuild: jobBuild{ + Number: 1, + }, + }, + "/job/apps/job/k8s-cloud/job/PR-101/api/json": &jobResponse{ + LastBuild: jobBuild{ + Number: 4, + }, + }, + "/job/apps/job/chronograf/1/api/json": &buildResponse{ + Building: false, + Result: "FAILURE", + Duration: 1558, + Timestamp: (time.Now().Unix() - int64(time.Minute.Seconds())) * 1000, + }, + "/job/apps/job/k8s-cloud/job/PR-101/4/api/json": &buildResponse{ + Building: false, + Result: "SUCCESS", + Duration: 76558, + Timestamp: (time.Now().Unix() - int64(time.Minute.Seconds())) * 1000, + }, + "/job/apps/job/k8s-cloud/job/PR-100/1/api/json": &buildResponse{ + Building: false, + Result: "SUCCESS", + Duration: 91558, + Timestamp: (time.Now().Unix() - int64(time.Minute.Seconds())) * 1000, + }, + }, + }, + output: &testutil.Accumulator{ + Metrics: []*testutil.Metric{ + { + Tags: map[string]string{ + "name": "PR-100", + "parents": "apps/k8s-cloud", + "result": "SUCCESS", + }, + Fields: map[string]interface{}{ + "duration": int64(91558), + "result_code": 0, + }, + }, + { + Tags: map[string]string{ + "name": "PR-101", + "parents": "apps/k8s-cloud", + "result": "SUCCESS", + }, + Fields: map[string]interface{}{ + "duration": int64(76558), + "result_code": 0, + }, + }, + { + Tags: map[string]string{ + "name": "chronograf", + "parents": "apps", + "result": "FAILURE", + }, + Fields: map[string]interface{}{ + "duration": int64(1558), + "result_code": 1, + }, + }, + }, + }, + }, + } + for _, test := range tests { + ts := httptest.NewServer(test.input) + defer ts.Close() + j := &Jenkins{ + URL: ts.URL, + MaxBuildAge: internal.Duration{Duration: time.Hour}, + ResponseTimeout: internal.Duration{Duration: time.Microsecond}, + JobExclude: []string{ + "ignore-1", + "apps/ignore-all/*", + "apps/k8s-cloud/PR-ignore2", + }, + } + te := j.initialize(&http.Client{Transport: &http.Transport{}}) + acc := new(testutil.Accumulator) + acc.SetDebug(true) + j.gatherJobs(acc) + if err := acc.FirstError(); err != nil { + te = err + } + if !test.wantErr && te != nil { + t.Fatalf("%s: failed %s, expected to be nil", test.name, te.Error()) + } else if test.wantErr && te == nil { + t.Fatalf("%s: expected err, got nil", test.name) + } + + if test.output != nil && len(test.output.Metrics) > 0 { + // sort metrics + sort.Slice(acc.Metrics, func(i, j int) bool { + return strings.Compare(acc.Metrics[i].Tags["name"], acc.Metrics[j].Tags["name"]) < 0 + }) + for i := range test.output.Metrics { + for k, m := range test.output.Metrics[i].Tags { + if acc.Metrics[i].Tags[k] != m { + t.Fatalf("%s: tag %s metrics unmatch Expected %s, got %s\n", test.name, k, m, acc.Metrics[i].Tags[k]) + } + } + for k, m := range test.output.Metrics[i].Fields { + if acc.Metrics[i].Fields[k] != m { + t.Fatalf("%s: field %s metrics unmatch Expected %v(%T), got %v(%T)\n", test.name, k, m, m, acc.Metrics[i].Fields[k], acc.Metrics[0].Fields[k]) + } + } + } + + } + } +} From 574fa5a6be9f2aeec16bfb03cd73129b8708740d Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 5 Nov 2018 14:19:46 -0800 Subject: [PATCH 0354/1815] Add support for fetching config over https (#4637) --- internal/config/config.go | 49 +++++++++++++++++++++++++++++++++------ 1 file changed, 42 insertions(+), 7 deletions(-) diff --git a/internal/config/config.go b/internal/config/config.go index 7d266852a..469b80ade 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -7,6 +7,8 @@ import ( "io/ioutil" "log" "math" + "net/http" + "net/url" "os" "path/filepath" "regexp" @@ -589,7 +591,12 @@ func (c *Config) LoadConfig(path string) error { return err } } - tbl, err := parseFile(path) + data, err := loadConfig(path) + if err != nil { + return fmt.Errorf("Error loading %s, %s", path, err) + } + + tbl, err := parseConfig(data) if err != nil { return fmt.Errorf("Error parsing %s, %s", path, err) } @@ -736,15 +743,43 @@ func escapeEnv(value string) string { return envVarEscaper.Replace(value) } -// parseFile loads a TOML configuration from a provided path and -// returns the AST produced from the TOML parser. When loading the file, it -// will find environment variables and replace them. -func parseFile(fpath string) (*ast.Table, error) { - contents, err := ioutil.ReadFile(fpath) +func loadConfig(config string) ([]byte, error) { + u, err := url.Parse(config) if err != nil { return nil, err } - // ugh windows why + + switch u.Scheme { + case "https": // http not permitted + return fetchConfig(u) + default: + // If it isn't a https scheme, try it as a file. + } + return ioutil.ReadFile(config) + +} + +func fetchConfig(u *url.URL) ([]byte, error) { + v := os.Getenv("INFLUX_TOKEN") + + req, err := http.NewRequest("GET", u.String(), nil) + if err != nil { + return nil, err + } + req.Header.Add("Authorization", "Token "+v) + req.Header.Add("Accept", "application/toml") + resp, err := http.DefaultClient.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + return ioutil.ReadAll(resp.Body) +} + +// parseConfig loads a TOML configuration from a provided path and +// returns the AST produced from the TOML parser. When loading the file, it +// will find environment variables and replace them. +func parseConfig(contents []byte) (*ast.Table, error) { contents = trimBOM(contents) env_vars := envVarRe.FindAll(contents, -1) From cb84993b7e3ae526afc09de309f2bf7408772f92 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 5 Nov 2018 14:51:44 -0800 Subject: [PATCH 0355/1815] Update telegraf.conf --- etc/telegraf.conf | 196 +++++++++++++++++++++++++++++++++++----------- 1 file changed, 149 insertions(+), 47 deletions(-) diff --git a/etc/telegraf.conf b/etc/telegraf.conf index a8a4f6679..82df5cdb1 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -119,7 +119,7 @@ # user_agent = "telegraf" ## UDP payload size is the maximum packet size to send. - # udp_payload = 512 + # udp_payload = "512B" ## Optional TLS Config for use on HTTP connections. # tls_ca = "/etc/telegraf/ca.pem" @@ -715,10 +715,11 @@ # # method = "measurement" # # # ## Use the value of a tag for all writes, if the tag is not set the empty -# ## string will be used: +# ## default option will be used. When no default, defaults to "telegraf" # # [outputs.kinesis.partition] # # method = "tag" # # key = "host" +# # default = "mykey" # # # ## Data format to output. @@ -1285,7 +1286,7 @@ # drop_original = false -# # Count the occurance of values in fields. +# # Count the occurrence of values in fields. # [[aggregators.valuecounter]] # ## General Aggregator Arguments: # ## The period on which to flush & clear the aggregator. @@ -2005,10 +2006,11 @@ # ## Only count regular files. Defaults to true. # regular_only = true # -# ## Only count files that are at least this size in bytes. If size is +# ## Only count files that are at least this size. If size is # ## a negative number, only count files that are smaller than the -# ## absolute value of size. Defaults to 0. -# size = 0 +# ## absolute value of size. Acceptable units are B, KiB, MiB, KB, ... +# ## Without quotes and units, interpreted as size in bytes. +# size = "0B" # # ## Only count files that have not been touched for at least this # ## duration. If mtime is negative, only count files that have been @@ -2369,6 +2371,11 @@ # chains = [ "INPUT" ] +# # Collect virtual and real server stats from Linux IPVS +# [[inputs.ipvs]] +# # no configuration + + # # Read JMX metrics through Jolokia # [[inputs.jolokia]] # # DEPRECATED: the jolokia plugin has been deprecated in favor of the @@ -2848,10 +2855,38 @@ # response_timeout = "5s" +# # Read Nginx Plus Api documentation +# [[inputs.nginx_plus_api]] +# ## An array of API URI to gather stats. +# urls = ["http://localhost/api"] +# +# # Nginx API version, default: 3 +# # api_version = 3 +# +# # HTTP response timeout (default: 5s) +# response_timeout = "5s" + + +# # Read Nginx virtual host traffic status module information (nginx-module-vts) +# [[inputs.nginx_vts]] +# ## An array of ngx_http_status_module or status URI to gather stats. +# urls = ["http://localhost/status"] +# +# ## HTTP response timeout (default: 5s) +# response_timeout = "5s" + + # # Read NSQ topic and channel statistics. # [[inputs.nsq]] # ## An array of NSQD HTTP API endpoints -# endpoints = ["http://localhost:4151"] +# endpoints = ["http://localhost:4151"] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false # # Collect kernel snmp counters and network interface statistics @@ -3045,28 +3080,6 @@ # # pid_finder = "pgrep" -# # Read metrics from one or many prometheus clients -# [[inputs.prometheus]] -# ## An array of urls to scrape metrics from. -# urls = ["http://localhost:9100/metrics"] -# -# ## An array of Kubernetes services to scrape metrics from. -# # kubernetes_services = ["http://my-service-dns.my-namespace:9100/metrics"] -# -# ## Use bearer token for authorization -# # bearer_token = /path/to/bearer/token -# -# ## Specify timeout duration for slower prometheus clients (default is 3s) -# # response_timeout = "3s" -# -# ## Optional TLS Config -# # tls_ca = /path/to/cafile -# # tls_cert = /path/to/certfile -# # tls_key = /path/to/keyfile -# ## Use TLS but skip chain & host verification -# # insecure_skip_verify = false - - # # Reads last_run_summary.yaml file and converts to measurments # [[inputs.puppetagent]] # ## Location of puppet last run summary file @@ -3614,6 +3627,13 @@ # # instance_name = instanceName +# # Monitor wifi signal strength and quality +# [[inputs.wireless]] +# ## Sets 'proc' directory path +# ## If not specified, then default is /proc +# # host_proc = "/proc" + + # # Reads metrics from a SSL certificate # [[inputs.x509_cert]] # ## List certificate sources @@ -3716,6 +3736,16 @@ # ## Maximum number of messages server should give to the worker. # # prefetch_count = 50 # +# ## Maximum messages to read from the broker that have not been written by an +# ## output. For best throughput set based on the number of metrics within +# ## each message and the size of the output's metric_batch_size. +# ## +# ## For example, if each message from the queue contains 10 metrics and the +# ## output metric_batch_size is 1000, setting this to 100 will ensure that a +# ## full batch is collected and the write is triggered immediately without +# ## waiting until the next flush_interval. +# # max_undelivered_messages = 1000 +# # ## Auth method. PLAIN and EXTERNAL are supported # ## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as # ## described here: https://www.rabbitmq.com/plugins.html @@ -3768,12 +3798,12 @@ # write_timeout = "10s" # # ## Maximum allowed http request body size in bytes. -# ## 0 means to use the default of 536,870,912 bytes (500 mebibytes) -# max_body_size = 0 +# ## 0 means to use the default of 524,288,000 bytes (500 mebibytes) +# max_body_size = "500MiB" # # ## Maximum line size allowed to be sent in bytes. # ## 0 means to use the default of 65536 bytes (64 kibibytes) -# max_line_size = 0 +# max_line_size = "64KiB" # # ## Set one or more allowed client CA certificate file names to # ## enable mutually authenticated TLS connections @@ -3806,8 +3836,8 @@ # # write_timeout = "10s" # # ## Maximum allowed http request body size in bytes. -# ## 0 means to use the default of 536,870,912 bytes (500 mebibytes) -# # max_body_size = 0 +# ## 0 means to use the default of 524,288,00 bytes (500 mebibytes) +# # max_body_size = "500MB" # # ## Set one or more allowed client CA certificate file names to # ## enable mutually authenticated TLS connections @@ -3840,12 +3870,12 @@ # write_timeout = "10s" # # ## Maximum allowed http request body size in bytes. -# ## 0 means to use the default of 536,870,912 bytes (500 mebibytes) -# max_body_size = 0 +# ## 0 means to use the default of 524,288,000 bytes (500 mebibytes) +# max_body_size = "500MiB" # # ## Maximum line size allowed to be sent in bytes. # ## 0 means to use the default of 65536 bytes (64 kibibytes) -# max_line_size = 0 +# max_line_size = "64KiB" # # ## Set one or more allowed client CA certificate file names to # ## enable mutually authenticated TLS connections @@ -3939,16 +3969,25 @@ # consumer_group = "telegraf_metrics_consumers" # ## Offset (must be either "oldest" or "newest") # offset = "oldest" +# ## Maximum length of a message to consume, in bytes (default 0/unlimited); +# ## larger messages are dropped +# max_message_len = 1000000 +# +# ## Maximum messages to read from the broker that have not been written by an +# ## output. For best throughput set based on the number of metrics within +# ## each message and the size of the output's metric_batch_size. +# ## +# ## For example, if each message from the queue contains 10 metrics and the +# ## output metric_batch_size is 1000, setting this to 100 will ensure that a +# ## full batch is collected and the write is triggered immediately without +# ## waiting until the next flush_interval. +# # max_undelivered_messages = 1000 # # ## Data format to consume. # ## Each data format has its own unique set of configuration options, read # ## more about them here: # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md # data_format = "influx" -# -# ## Maximum length of a message to consume, in bytes (default 0/unlimited); -# ## larger messages are dropped -# max_message_len = 1000000 # # Read metrics from Kafka topic(s) @@ -4043,6 +4082,16 @@ # ## Connection timeout for initial connection in seconds # connection_timeout = "30s" # +# ## Maximum messages to read from the broker that have not been written by an +# ## output. For best throughput set based on the number of metrics within +# ## each message and the size of the output's metric_batch_size. +# ## +# ## For example, if each message from the queue contains 10 metrics and the +# ## output metric_batch_size is 1000, setting this to 100 will ensure that a +# ## full batch is collected and the write is triggered immediately without +# ## waiting until the next flush_interval. +# # max_undelivered_messages = 1000 +# # ## Topics to subscribe to # topics = [ # "telegraf/host01/cpu", @@ -4078,19 +4127,29 @@ # # Read metrics from NATS subject(s) # [[inputs.nats_consumer]] # ## urls of NATS servers -# # servers = ["nats://localhost:4222"] +# servers = ["nats://localhost:4222"] # ## Use Transport Layer Security -# # secure = false +# secure = false # ## subject(s) to consume -# # subjects = ["telegraf"] +# subjects = ["telegraf"] # ## name a queue group -# # queue_group = "telegraf_consumers" +# queue_group = "telegraf_consumers" # # ## Sets the limits for pending msgs and bytes for each subscription # ## These shouldn't need to be adjusted except in very high throughput scenarios # # pending_message_limit = 65536 # # pending_bytes_limit = 67108864 # +# ## Maximum messages to read from the broker that have not been written by an +# ## output. For best throughput set based on the number of metrics within +# ## each message and the size of the output's metric_batch_size. +# ## +# ## For example, if each message from the queue contains 10 metrics and the +# ## output metric_batch_size is 1000, setting this to 100 will ensure that a +# ## full batch is collected and the write is triggered immediately without +# ## waiting until the next flush_interval. +# # max_undelivered_messages = 1000 +# # ## Data format to consume. # ## Each data format has its own unique set of configuration options, read # ## more about them here: @@ -4110,6 +4169,16 @@ # channel = "consumer" # max_in_flight = 100 # +# ## Maximum messages to read from the broker that have not been written by an +# ## output. For best throughput set based on the number of metrics within +# ## each message and the size of the output's metric_batch_size. +# ## +# ## For example, if each message from the queue contains 10 metrics and the +# ## output metric_batch_size is 1000, setting this to 100 will ensure that a +# ## full batch is collected and the write is triggered immediately without +# ## waiting until the next flush_interval. +# # max_undelivered_messages = 1000 +# # ## Data format to consume. # ## Each data format has its own unique set of configuration options, read # ## more about them here: @@ -4229,6 +4298,39 @@ # tagvalue="postgresql.stats" +# # Read metrics from one or many prometheus clients +# [[inputs.prometheus]] +# ## An array of urls to scrape metrics from. +# urls = ["http://localhost:9100/metrics"] +# +# ## An array of Kubernetes services to scrape metrics from. +# # kubernetes_services = ["http://my-service-dns.my-namespace:9100/metrics"] +# +# ## Kubernetes config file to create client from. +# # kube_config = "/path/to/kubernetes.config" +# +# ## Scrape Kubernetes pods for the following prometheus annotations: +# ## - prometheus.io/scrape: Enable scraping for this pod +# ## - prometheus.io/scheme: If the metrics endpoint is secured then you will need to +# ## set this to 'https' & most likely set the tls config. +# ## - prometheus.io/path: If the metrics path is not /metrics, define it with this annotation. +# ## - prometheus.io/port: If port is not 9102 use this annotation +# # monitor_kubernetes_pods = true +# +# ## Use bearer token for authorization +# # bearer_token = /path/to/bearer/token +# +# ## Specify timeout duration for slower prometheus clients (default is 3s) +# # response_timeout = "3s" +# +# ## Optional TLS Config +# # tls_ca = /path/to/cafile +# # tls_cert = /path/to/certfile +# # tls_key = /path/to/keyfile +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + # # Generic socket listener capable of handling multiple socket types. # [[inputs.socket_listener]] # ## URL to listen on @@ -4260,11 +4362,11 @@ # ## Enables client authentication if set. # # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] # -# ## Maximum socket buffer size in bytes. +# ## Maximum socket buffer size (in bytes when no unit specified). # ## For stream sockets, once the buffer fills up, the sender will start backing up. # ## For datagram sockets, once the buffer fills up, metrics will start dropping. # ## Defaults to the OS default. -# # read_buffer_size = 65535 +# # read_buffer_size = "64KiB" # # ## Period between keep alive probes. # ## Only applies to TCP sockets. From f533d3b370ea604153f4571f6ec21098f9582e75 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 5 Nov 2018 14:54:13 -0800 Subject: [PATCH 0356/1815] Bump version in build.py --- scripts/build.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/build.py b/scripts/build.py index 675a4c3c0..efe9910ec 100755 --- a/scripts/build.py +++ b/scripts/build.py @@ -95,7 +95,7 @@ supported_packages = { "freebsd": [ "tar" ] } -next_version = '1.9.0' +next_version = '1.10.0' ################ #### Telegraf Functions From 7693c173fc28d982473dcd6fb191b604659f5d48 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 5 Nov 2018 14:58:23 -0800 Subject: [PATCH 0357/1815] Update changelog and readme --- CHANGELOG.md | 1 + README.md | 1 + 2 files changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index c70c1c348..4a6c0d055 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -22,6 +22,7 @@ - [http_listener_v2](/plugins/inputs/http_listener_v2/README.md) - Contributed by @jul1u5 - [ipvs](/plugins/inputs/ipvs/README.md) - Contributed by @amoghe +- [jenkins](/plugins/inputs/jenkins/README.md) - Contributed by @influxdata & @lpic10 - [nginx_plus_api](/plugins/inputs/nginx_plus_api/README.md) - Contributed by @Bugagazavr - [nginx_vts](/plugins/inputs/nginx_vts/README.md) - Contributed by @monder - [wireless](/plugins/inputs/wireless/README.md) - Contributed by @jamesmaidment diff --git a/README.md b/README.md index b8477e952..8db0be58f 100644 --- a/README.md +++ b/README.md @@ -184,6 +184,7 @@ For documentation on the latest development code see the [documentation index][d * [ipset](./plugins/inputs/ipset) * [iptables](./plugins/inputs/iptables) * [ipvs](./plugins/inputs/ipvs) +* [jenkins](./plugins/inputs/jenkins) * [jolokia2](./plugins/inputs/jolokia2) (java, cassandra, kafka) * [jolokia](./plugins/inputs/jolokia) (deprecated, use [jolokia2](./plugins/inputs/jolokia2)) * [jti_openconfig_telemetry](./plugins/inputs/jti_openconfig_telemetry) From 0e07bbb877aecc11f2f212ab86f5468fe6630d14 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 5 Nov 2018 17:15:07 -0800 Subject: [PATCH 0358/1815] Fix option names in json parser docs --- plugins/parsers/json/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/parsers/json/README.md b/plugins/parsers/json/README.md index e1fce5550..13efa6568 100644 --- a/plugins/parsers/json/README.md +++ b/plugins/parsers/json/README.md @@ -106,9 +106,9 @@ Config: ```toml [[inputs.file]] files = ["example"] - name_key = "name" + json_name_key = "name" tag_keys = ["my_tag_1"] - string_fields = ["b_my_field"] + json_string_fields = ["b_my_field"] data_format = "json" ``` From 2d782fbaacf195b0c1d97fe8a173a234e8ee6506 Mon Sep 17 00:00:00 2001 From: Pontus Rydin Date: Tue, 6 Nov 2018 15:22:43 -0700 Subject: [PATCH 0359/1815] Fix potential missing datastore metrics in vSphere plugin (#4968) --- plugins/inputs/vsphere/README.md | 2 +- plugins/inputs/vsphere/client.go | 72 ++++++++++++++++++++++++-- plugins/inputs/vsphere/endpoint.go | 68 +++++++++++++++++++----- plugins/inputs/vsphere/tscache.go | 57 ++++++++++++++++++++ plugins/inputs/vsphere/vsphere.go | 9 +++- plugins/inputs/vsphere/vsphere_test.go | 44 ++++++++++++++-- 6 files changed, 228 insertions(+), 24 deletions(-) create mode 100644 plugins/inputs/vsphere/tscache.go diff --git a/plugins/inputs/vsphere/README.md b/plugins/inputs/vsphere/README.md index b56393345..7ba323bc7 100644 --- a/plugins/inputs/vsphere/README.md +++ b/plugins/inputs/vsphere/README.md @@ -159,7 +159,7 @@ vm_metric_exclude = [ "*" ] # object_discovery_interval = "300s" ## timeout applies to any of the api request made to vcenter - # timeout = "20s" + # timeout = "60s" ## Optional SSL Config # ssl_ca = "/path/to/cafile" diff --git a/plugins/inputs/vsphere/client.go b/plugins/inputs/vsphere/client.go index 5278cd349..ebad2bea7 100644 --- a/plugins/inputs/vsphere/client.go +++ b/plugins/inputs/vsphere/client.go @@ -5,10 +5,13 @@ import ( "crypto/tls" "log" "net/url" + "strconv" + "strings" "sync" "time" "github.com/vmware/govmomi" + "github.com/vmware/govmomi/object" "github.com/vmware/govmomi/performance" "github.com/vmware/govmomi/session" "github.com/vmware/govmomi/view" @@ -17,6 +20,10 @@ import ( "github.com/vmware/govmomi/vim25/soap" ) +// The highest number of metrics we can query for, no matter what settings +// and server say. +const absoluteMaxMetrics = 10000 + // ClientFactory is used to obtain Clients to be used throughout the plugin. Typically, // a single Client is reused across all functions and goroutines, but the client // is periodically recycled to avoid authentication expiration issues. @@ -79,6 +86,8 @@ func (cf *ClientFactory) GetClient(ctx context.Context) (*Client, error) { // NewClient creates a new vSphere client based on the url and setting passed as parameters. func NewClient(ctx context.Context, u *url.URL, vs *VSphere) (*Client, error) { sw := NewStopwatch("connect", u.Host) + defer sw.Stop() + tlsCfg, err := vs.ClientConfig.TLSConfig() if err != nil { return nil, err @@ -147,16 +156,27 @@ func NewClient(ctx context.Context, u *url.URL, vs *VSphere) (*Client, error) { p := performance.NewManager(c.Client) - sw.Stop() - - return &Client{ + client := &Client{ Client: c, Views: m, Root: v, Perf: p, Valid: true, Timeout: vs.Timeout.Duration, - }, nil + } + // Adjust max query size if needed + ctx3, cancel3 := context.WithTimeout(ctx, vs.Timeout.Duration) + defer cancel3() + n, err := client.GetMaxQueryMetrics(ctx3) + if err != nil { + return nil, err + } + log.Printf("D! [input.vsphere] vCenter says max_query_metrics should be %d", n) + if n < vs.MaxQueryMetrics { + log.Printf("W! [input.vsphere] Configured max_query_metrics is %d, but server limits it to %d. Reducing.", vs.MaxQueryMetrics, n) + vs.MaxQueryMetrics = n + } + return client, nil } // Close shuts down a ClientFactory and releases any resources associated with it. @@ -191,3 +211,47 @@ func (c *Client) GetServerTime(ctx context.Context) (time.Time, error) { } return *t, nil } + +// GetMaxQueryMetrics returns the max_query_metrics setting as configured in vCenter +func (c *Client) GetMaxQueryMetrics(ctx context.Context) (int, error) { + ctx, cancel := context.WithTimeout(ctx, c.Timeout) + defer cancel() + + om := object.NewOptionManager(c.Client.Client, *c.Client.Client.ServiceContent.Setting) + res, err := om.Query(ctx, "config.vpxd.stats.maxQueryMetrics") + if err == nil { + if len(res) > 0 { + if s, ok := res[0].GetOptionValue().Value.(string); ok { + v, err := strconv.Atoi(s) + if err == nil { + log.Printf("D! [input.vsphere] vCenter maxQueryMetrics is defined: %d", v) + if v == -1 { + // Whatever the server says, we never ask for more metrics than this. + return absoluteMaxMetrics, nil + } + return v, nil + } + } + // Fall through version-based inference if value isn't usable + } + } else { + log.Println("I! [input.vsphere] Option query for maxQueryMetrics failed. Using default") + } + + // No usable maxQueryMetrics setting. Infer based on version + ver := c.Client.Client.ServiceContent.About.Version + parts := strings.Split(ver, ".") + if len(parts) < 2 { + log.Printf("W! [input.vsphere] vCenter returned an invalid version string: %s. Using default query size=64", ver) + return 64, nil + } + log.Printf("D! [input.vsphere] vCenter version is: %s", ver) + major, err := strconv.Atoi(parts[0]) + if err != nil { + return 0, err + } + if major < 6 || major == 6 && parts[1] == "0" { + return 64, nil + } + return 256, nil +} diff --git a/plugins/inputs/vsphere/endpoint.go b/plugins/inputs/vsphere/endpoint.go index 55444ebf3..dbc67dd95 100644 --- a/plugins/inputs/vsphere/endpoint.go +++ b/plugins/inputs/vsphere/endpoint.go @@ -24,6 +24,8 @@ import ( var isolateLUN = regexp.MustCompile(".*/([^/]+)/?$") +const metricLookback = 3 + // Endpoint is a high-level representation of a connected vCenter endpoint. It is backed by the lower // level Client type. type Endpoint struct { @@ -32,6 +34,7 @@ type Endpoint struct { lastColls map[string]time.Time instanceInfo map[string]resourceInfo resourceKinds map[string]resourceKind + hwMarks *TSCache lun2ds map[string]string discoveryTicker *time.Ticker collectMux sync.RWMutex @@ -96,6 +99,7 @@ func NewEndpoint(ctx context.Context, parent *VSphere, url *url.URL) (*Endpoint, URL: url, Parent: parent, lastColls: make(map[string]time.Time), + hwMarks: NewTSCache(1 * time.Hour), instanceInfo: make(map[string]resourceInfo), lun2ds: make(map[string]string), initialized: false, @@ -353,8 +357,8 @@ func (e *Endpoint) discover(ctx context.Context) error { // Populate resource objects, and endpoint instance info. for k, res := range e.resourceKinds { log.Printf("D! [input.vsphere] Discovering resources for %s", res.name) - // Need to do this for all resource types even if they are not enabled (but datastore) - if res.enabled || (k != "datastore" && k != "vm") { + // Need to do this for all resource types even if they are not enabled + if res.enabled || k != "vm" { objects, err := res.getObjects(ctx, e, client.Root) if err != nil { return err @@ -416,7 +420,6 @@ func (e *Endpoint) discover(ctx context.Context) error { url := ds.altID m := isolateLUN.FindStringSubmatch(url) if m != nil { - log.Printf("D! [input.vsphere]: LUN: %s", m[1]) l2d[m[1]] = ds.name } } @@ -539,7 +542,6 @@ func getDatastores(ctx context.Context, e *Endpoint, root *view.ContainerView) ( url = info.Url } } - log.Printf("D! [input.vsphere]: DS URL: %s %s", url, r.Name) m[r.ExtensibleManagedObject.Reference().Value] = objectRef{ name: r.Name, ref: r.ExtensibleManagedObject.Reference(), parentRef: r.Parent, altID: url} } @@ -584,10 +586,24 @@ func (e *Endpoint) Collect(ctx context.Context, acc telegraf.Accumulator) error } } } + + // Purge old timestamps from the cache + e.hwMarks.Purge() return nil } func (e *Endpoint) chunker(ctx context.Context, f PushFunc, res *resourceKind, now time.Time, latest time.Time) { + maxMetrics := e.Parent.MaxQueryMetrics + if maxMetrics < 1 { + maxMetrics = 1 + } + + // Workaround for vCenter weirdness. Cluster metrics seem to count multiple times + // when checking query size, so keep it at a low value. + // Revisit this when we better understand the reason why vCenter counts it this way! + if res.name == "cluster" && maxMetrics > 10 { + maxMetrics = 10 + } pqs := make([]types.PerfQuerySpec, 0, e.Parent.MaxQueryObjects) metrics := 0 total := 0 @@ -600,7 +616,7 @@ func (e *Endpoint) chunker(ctx context.Context, f PushFunc, res *resourceKind, n mr := len(info.metrics) for mr > 0 { mc := mr - headroom := e.Parent.MaxQueryMetrics - metrics + headroom := maxMetrics - metrics if !res.realTime && mc > headroom { // Metric query limit only applies to non-realtime metrics mc = headroom } @@ -610,10 +626,19 @@ func (e *Endpoint) chunker(ctx context.Context, f PushFunc, res *resourceKind, n MaxSample: 1, MetricId: info.metrics[fm : fm+mc], IntervalId: res.sampling, + Format: "normal", } + // For non-realtime metrics, we need to look back a few samples in case + // the vCenter is late reporting metrics. if !res.realTime { - pq.StartTime = &latest + pq.MaxSample = metricLookback + } + + // Look back 3 sampling periods + start := latest.Add(time.Duration(-res.sampling) * time.Second * (metricLookback - 1)) + if !res.realTime { + pq.StartTime = &start pq.EndTime = &now } pqs = append(pqs, pq) @@ -623,8 +648,8 @@ func (e *Endpoint) chunker(ctx context.Context, f PushFunc, res *resourceKind, n // We need to dump the current chunk of metrics for one of two reasons: // 1) We filled up the metric quota while processing the current resource // 2) We are at the last resource and have no more data to process. - if mr > 0 || (!res.realTime && metrics >= e.Parent.MaxQueryMetrics) || nRes >= e.Parent.MaxQueryObjects { - log.Printf("D! [input.vsphere]: Querying %d objects, %d metrics (%d remaining) of type %s for %s. Processed objects: %d. Total objects %d", + if mr > 0 || (!res.realTime && metrics >= maxMetrics) || nRes >= e.Parent.MaxQueryObjects { + log.Printf("D! [input.vsphere]: Queueing query: %d objects, %d metrics (%d remaining) of type %s for %s. Processed objects: %d. Total objects %d", len(pqs), metrics, mr, res.name, e.URL.Host, total+1, len(res.objects)) // To prevent deadlocks, don't send work items if the context has been cancelled. @@ -646,6 +671,8 @@ func (e *Endpoint) chunker(ctx context.Context, f PushFunc, res *resourceKind, n // if len(pqs) > 0 { // Call push function + log.Printf("D! [input.vsphere]: Queuing query: %d objects, %d metrics (0 remaining) of type %s for %s. Total objects %d (final chunk)", + len(pqs), metrics, res.name, e.URL.Host, len(res.objects)) f(ctx, pqs) } } @@ -668,7 +695,7 @@ func (e *Endpoint) collectResource(ctx context.Context, resourceType string, acc log.Printf("D! [input.vsphere]: Latest: %s, elapsed: %f, resource: %s", latest, elapsed, resourceType) if !res.realTime && elapsed < float64(res.sampling) { // No new data would be available. We're outta herE! [input.vsphere]: - log.Printf("D! [input.vsphere]: Sampling period for %s of %d has not elapsed for %s", + log.Printf("D! [input.vsphere]: Sampling period for %s of %d has not elapsed on %s", resourceType, res.sampling, e.URL.Host) return nil } @@ -679,7 +706,6 @@ func (e *Endpoint) collectResource(ctx context.Context, resourceType string, acc internalTags := map[string]string{"resourcetype": resourceType} sw := NewStopwatchWithTags("gather_duration", e.URL.Host, internalTags) - log.Printf("D! [input.vsphere]: Start of sample period deemed to be %s", latest) log.Printf("D! [input.vsphere]: Collecting metrics for %d objects of type %s for %s", len(res.objects), resourceType, e.URL.Host) @@ -690,7 +716,7 @@ func (e *Endpoint) collectResource(ctx context.Context, resourceType string, acc wp.Run(ctx, func(ctx context.Context, in interface{}) interface{} { chunk := in.([]types.PerfQuerySpec) n, err := e.collectChunk(ctx, chunk, resourceType, res, acc) - log.Printf("D! [input.vsphere]: Query returned %d metrics", n) + log.Printf("D! [input.vsphere] CollectChunk for %s returned %d metrics", resourceType, n) if err != nil { return err } @@ -722,7 +748,7 @@ func (e *Endpoint) collectResource(ctx context.Context, resourceType string, acc sw.Stop() SendInternalCounterWithTags("gather_count", e.URL.Host, internalTags, count) if len(merr) > 0 { - return err + return merr } return nil } @@ -757,6 +783,7 @@ func (e *Endpoint) collectChunk(ctx context.Context, pqs []types.PerfQuerySpec, if err != nil { return count, err } + log.Printf("D! [input.vsphere] Query for %s returned metrics for %d objects", resourceType, len(ems)) // Iterate through results for _, em := range ems { @@ -783,10 +810,18 @@ func (e *Endpoint) collectChunk(ctx context.Context, pqs []types.PerfQuerySpec, } e.populateTags(&objectRef, resourceType, &res, t, &v) - // Now deal with the values - for idx, value := range v.Value { + // Now deal with the values. Iterate backwards so we start with the latest value + tsKey := moid + "|" + name + "|" + v.Instance + for idx := len(v.Value) - 1; idx >= 0; idx-- { ts := em.SampleInfo[idx].Timestamp + // Since non-realtime metrics are queries with a lookback, we need to check the high-water mark + // to determine if this should be included. Only samples not seen before should be included. + if !(res.realTime || e.hwMarks.IsNew(tsKey, ts)) { + continue + } + value := v.Value[idx] + // Organize the metrics into a bucket per measurement. // Data SHOULD be presented to us with the same timestamp for all samples, but in case // they don't we use the measurement name + timestamp as the key for the bucket. @@ -813,6 +848,11 @@ func (e *Endpoint) collectChunk(ctx context.Context, pqs []types.PerfQuerySpec, bucket.fields[fn] = value } count++ + + // Update highwater marks for non-realtime metrics. + if !res.realTime { + e.hwMarks.Put(tsKey, ts) + } } } // We've iterated through all the metrics and collected buckets for each diff --git a/plugins/inputs/vsphere/tscache.go b/plugins/inputs/vsphere/tscache.go new file mode 100644 index 000000000..9abe24ea7 --- /dev/null +++ b/plugins/inputs/vsphere/tscache.go @@ -0,0 +1,57 @@ +package vsphere + +import ( + "log" + "sync" + "time" +) + +// TSCache is a cache of timestamps used to determine the validity of datapoints +type TSCache struct { + ttl time.Duration + table map[string]time.Time + done chan struct{} + mux sync.RWMutex +} + +// NewTSCache creates a new TSCache with a specified time-to-live after which timestamps are discarded. +func NewTSCache(ttl time.Duration) *TSCache { + return &TSCache{ + ttl: ttl, + table: make(map[string]time.Time), + done: make(chan struct{}), + } +} + +// Purge removes timestamps that are older than the time-to-live +func (t *TSCache) Purge() { + t.mux.Lock() + defer t.mux.Unlock() + n := 0 + for k, v := range t.table { + if time.Now().Sub(v) > t.ttl { + delete(t.table, k) + n++ + } + } + log.Printf("D! [input.vsphere] Purged timestamp cache. %d deleted with %d remaining", n, len(t.table)) +} + +// IsNew returns true if the supplied timestamp for the supplied key is more recent than the +// timestamp we have on record. +func (t *TSCache) IsNew(key string, tm time.Time) bool { + t.mux.RLock() + defer t.mux.RUnlock() + v, ok := t.table[key] + if !ok { + return true // We've never seen this before, so consider everything a new sample + } + return !tm.Before(v) +} + +// Put updates the latest timestamp for the supplied key. +func (t *TSCache) Put(key string, time time.Time) { + t.mux.Lock() + defer t.mux.Unlock() + t.table[key] = time +} diff --git a/plugins/inputs/vsphere/vsphere.go b/plugins/inputs/vsphere/vsphere.go index 26af1e8cc..f0bb5dca9 100644 --- a/plugins/inputs/vsphere/vsphere.go +++ b/plugins/inputs/vsphere/vsphere.go @@ -192,7 +192,7 @@ var sampleConfig = ` # object_discovery_interval = "300s" ## timeout applies to any of the api request made to vcenter - # timeout = "20s" + # timeout = "60s" ## Optional SSL Config # ssl_ca = "/path/to/cafile" @@ -260,6 +260,7 @@ func (v *VSphere) Stop() { // Gather is the main data collection function called by the Telegraf core. It performs all // the data collection and writes all metrics into the Accumulator passed as an argument. func (v *VSphere) Gather(acc telegraf.Accumulator) error { + merr := make(multiError, 0) var wg sync.WaitGroup for _, ep := range v.endpoints { wg.Add(1) @@ -273,11 +274,15 @@ func (v *VSphere) Gather(acc telegraf.Accumulator) error { } if err != nil { acc.AddError(err) + merr = append(merr, err) } }(ep) } wg.Wait() + if len(merr) > 0 { + return merr + } return nil } @@ -306,7 +311,7 @@ func init() { DiscoverConcurrency: 1, ForceDiscoverOnInit: false, ObjectDiscoveryInterval: internal.Duration{Duration: time.Second * 300}, - Timeout: internal.Duration{Duration: time.Second * 20}, + Timeout: internal.Duration{Duration: time.Second * 60}, } }) } diff --git a/plugins/inputs/vsphere/vsphere_test.go b/plugins/inputs/vsphere/vsphere_test.go index 3290da2e9..4eb3d28f8 100644 --- a/plugins/inputs/vsphere/vsphere_test.go +++ b/plugins/inputs/vsphere/vsphere_test.go @@ -15,7 +15,9 @@ import ( "github.com/influxdata/telegraf/testutil" "github.com/influxdata/toml" "github.com/stretchr/testify/require" + "github.com/vmware/govmomi/object" "github.com/vmware/govmomi/simulator" + "github.com/vmware/govmomi/vim25/types" ) var configHeader = ` @@ -187,8 +189,6 @@ func createSim() (*simulator.Model, *simulator.Server, error) { model.Service.TLS = new(tls.Config) s := model.Service.NewServer() - //fmt.Printf("Server created at: %s\n", s.URL) - return model, s, nil } @@ -244,13 +244,51 @@ func TestTimeout(t *testing.T) { v.Timeout = internal.Duration{Duration: 1 * time.Nanosecond} require.NoError(t, v.Start(nil)) // We're not using the Accumulator, so it can be nil. defer v.Stop() - require.NoError(t, v.Gather(&acc)) + err = v.Gather(&acc) + require.NotNil(t, err, "Error should not be nil here") // The accumulator must contain exactly one error and it must be a deadline exceeded. require.Equal(t, 1, len(acc.Errors)) require.True(t, strings.Contains(acc.Errors[0].Error(), "context deadline exceeded")) } +func TestMaxQuery(t *testing.T) { + m, s, err := createSim() + if err != nil { + t.Fatal(err) + } + defer m.Remove() + defer s.Close() + + v := defaultVSphere() + v.MaxQueryMetrics = 256 + ctx := context.Background() + c, err := NewClient(ctx, s.URL, v) + if err != nil { + t.Fatal(err) + } + require.Equal(t, 256, v.MaxQueryMetrics) + + om := object.NewOptionManager(c.Client.Client, *c.Client.Client.ServiceContent.Setting) + err = om.Update(ctx, []types.BaseOptionValue{&types.OptionValue{ + Key: "config.vpxd.stats.maxQueryMetrics", + Value: "42", + }}) + if err != nil { + t.Fatal(err) + } + + v.MaxQueryMetrics = 256 + ctx = context.Background() + c2, err := NewClient(ctx, s.URL, v) + if err != nil { + t.Fatal(err) + } + require.Equal(t, 42, v.MaxQueryMetrics) + c.close() + c2.close() +} + func TestAll(t *testing.T) { m, s, err := createSim() if err != nil { From f1758489bbd990936d4c1dfb685b8d4231fe14d4 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 6 Nov 2018 14:27:41 -0800 Subject: [PATCH 0360/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4a6c0d055..a554203b1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -60,6 +60,7 @@ - [#3968](https://github.com/influxdata/telegraf/issues/3968): Fix input time rounding when using a custom interval. - [#4938](https://github.com/influxdata/telegraf/pull/4938): Fix potential deadlock or leaked resources on restart/reload. - [#2919](https://github.com/influxdata/telegraf/pull/2919): Fix outputs block inputs when batch size is reached. +- [#4789](https://github.com/influxdata/telegraf/issues/4789): Fix potential missing datastore metrics in vSphere plugin. ## v1.8.3 [2018-10-30] From d67eb46c3dbb80b0d3abd28e04876af5ca0fa632 Mon Sep 17 00:00:00 2001 From: Mark Rushakoff Date: Fri, 9 Nov 2018 10:23:53 -0800 Subject: [PATCH 0361/1815] Remove mistakenly committed file (#4969) --- Gopkg.lock.old | 1537 ------------------------------------------------ 1 file changed, 1537 deletions(-) delete mode 100644 Gopkg.lock.old diff --git a/Gopkg.lock.old b/Gopkg.lock.old deleted file mode 100644 index f4993ed95..000000000 --- a/Gopkg.lock.old +++ /dev/null @@ -1,1537 +0,0 @@ -# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. - - -[[projects]] -<<<<<<< HEAD - digest = "1:1acadbbc24182315b628f727b2e9ac653266d1644ca4007e0766c28110afc072" - name = "cloud.google.com/go" - packages = [ - "compute/metadata", - "internal/version", - "monitoring/apiv3", - ] - pruneopts = "" - revision = "97efc2c9ffd9fe8ef47f7f3203dc60bbca547374" - version = "v0.28.0" -======= - digest = "1:972f38a9c879a4920d1e3a3d3438104b6c06163bfa3e6f4064adb00468d40587" - name = "cloud.google.com/go" - packages = ["civil"] - pruneopts = "" - revision = "c728a003b238b26cef9ab6753a5dc424b331c3ad" - version = "v0.27.0" ->>>>>>> master - -[[projects]] - branch = "master" - digest = "1:fc0802104acded1f48e4860a9f2db85b82b4a754fca9eae750ff4e8b8cdf2116" - name = "code.cloudfoundry.org/clock" - packages = ["."] - pruneopts = "" - revision = "02e53af36e6c978af692887ed449b74026d76fec" - -[[projects]] - digest = "1:ca3acef20fd660d4df327accbf3ca2df9a12213d914f3113305dcd56579324b9" - name = "collectd.org" - packages = [ - "api", - "cdtime", - "network", - ] - pruneopts = "" - revision = "2ce144541b8903101fb8f1483cc0497a68798122" - version = "v0.3.0" - -[[projects]] -<<<<<<< HEAD - digest = "1:5f61d4466cef935862c262f6bc00e24beb5b39b551e906f3cfb180dfac096d57" - name = "contrib.go.opencensus.io/exporter/stackdriver" - packages = ["propagation"] - pruneopts = "" - revision = "2b93072101d466aa4120b3c23c2e1b08af01541c" - version = "v0.6.0" -======= - digest = "1:5923e22a060ab818a015593422f9e8a35b9d881d4cfcfed0669a82959b11c7ee" - name = "github.com/Azure/go-autorest" - packages = [ - "autorest", - "autorest/adal", - "autorest/azure", - "autorest/azure/auth", - "autorest/date", - ] - pruneopts = "" - revision = "1f7cd6cfe0adea687ad44a512dfe76140f804318" - version = "v10.12.0" ->>>>>>> master - -[[projects]] - branch = "master" - digest = "1:298712a3ee36b59c3ca91f4183bd75d174d5eaa8b4aed5072831f126e2e752f6" - name = "github.com/Microsoft/ApplicationInsights-Go" - packages = [ - "appinsights", - "appinsights/contracts", - ] - pruneopts = "" - revision = "d2df5d440eda5372f24fcac03839a64d6cb5f7e5" - -[[projects]] - digest = "1:45ec6eb579713a01991ad07f538fed3b576ee55f5ce9f248320152a9270d9258" - name = "github.com/Microsoft/go-winio" - packages = ["."] - pruneopts = "" - revision = "a6d595ae73cf27a1b8fc32930668708f45ce1c85" - version = "v0.4.9" - -[[projects]] - digest = "1:213b41361ad1cb4768add9d26c2e27794c65264eefdb24ed6ea34cdfeeff3f3c" - name = "github.com/Shopify/sarama" - packages = ["."] - pruneopts = "" - revision = "a6144ae922fd99dd0ea5046c8137acfb7fab0914" - version = "v1.18.0" - -[[projects]] - digest = "1:f82b8ac36058904227087141017bb82f4b0fc58272990a4cdae3e2d6d222644e" - name = "github.com/StackExchange/wmi" - packages = ["."] - pruneopts = "" - revision = "5d049714c4a64225c3c79a7cf7d02f7fb5b96338" - version = "1.0.0" - -[[projects]] - digest = "1:f296e8b29c60c94efed3b8cfae08d793cb95149cdd7343e6a9834b4ac7136475" - name = "github.com/aerospike/aerospike-client-go" - packages = [ - ".", - "internal/lua", - "internal/lua/resources", - "logger", - "pkg/bcrypt", - "pkg/ripemd160", - "types", - "types/atomic", - "types/particle_type", - "types/rand", - "utils/buffer", - ] - pruneopts = "" - revision = "1dc8cf203d24cd454e71ce40ab4cd0bf3112df90" - version = "v1.27.0" - -[[projects]] - branch = "master" - digest = "1:a74730e052a45a3fab1d310fdef2ec17ae3d6af16228421e238320846f2aaec8" - name = "github.com/alecthomas/template" - packages = [ - ".", - "parse", - ] - pruneopts = "" - revision = "a0175ee3bccc567396460bf5acd36800cb10c49c" - -[[projects]] - branch = "master" - digest = "1:8483994d21404c8a1d489f6be756e25bfccd3b45d65821f25695577791a08e68" - name = "github.com/alecthomas/units" - packages = ["."] - pruneopts = "" - revision = "2efee857e7cfd4f3d0138cc3cbb1b4966962b93a" - -[[projects]] - branch = "master" - digest = "1:7f21a8f175ee7f91c659f919c61032e11889fba5dc25c0cec555087cbb87435a" - name = "github.com/amir/raidman" - packages = [ - ".", - "proto", - ] - pruneopts = "" - revision = "1ccc43bfb9c93cb401a4025e49c64ba71e5e668b" - -[[projects]] - branch = "master" - digest = "1:0828d8c0f95689f832cf348fe23827feb7640cd698d612ef59e2f9d041f54c68" - name = "github.com/apache/thrift" - packages = ["lib/go/thrift"] - pruneopts = "" - revision = "f2867c24984aa53edec54a138c03db934221bdea" - -[[projects]] - digest = "1:65a05bde9b02f645c73afa61c9f6af92d94d726c81a268f45cc70218bd58de65" - name = "github.com/aws/aws-sdk-go" - packages = [ - "aws", - "aws/awserr", - "aws/awsutil", - "aws/client", - "aws/client/metadata", - "aws/corehandlers", - "aws/credentials", - "aws/credentials/ec2rolecreds", - "aws/credentials/endpointcreds", - "aws/credentials/stscreds", - "aws/csm", - "aws/defaults", - "aws/ec2metadata", - "aws/endpoints", - "aws/request", - "aws/session", - "aws/signer/v4", - "internal/sdkio", - "internal/sdkrand", - "internal/sdkuri", - "internal/shareddefaults", - "private/protocol", - "private/protocol/json/jsonutil", - "private/protocol/jsonrpc", - "private/protocol/query", - "private/protocol/query/queryutil", - "private/protocol/rest", - "private/protocol/xml/xmlutil", - "service/cloudwatch", - "service/kinesis", - "service/sts", - ] - pruneopts = "" - revision = "8cf662a972fa7fba8f2c1ec57648cf840e2bb401" - version = "v1.14.30" - -[[projects]] - branch = "master" - digest = "1:c0bec5f9b98d0bc872ff5e834fac186b807b656683bd29cb82fb207a1513fabb" - name = "github.com/beorn7/perks" - packages = ["quantile"] - pruneopts = "" - revision = "3a771d992973f24aa725d07868b467d1ddfceafb" - -[[projects]] - digest = "1:c5978131c797af795972c27c25396c81d1bf53b7b6e8e3e0259e58375765c071" - name = "github.com/bsm/sarama-cluster" - packages = ["."] - pruneopts = "" - revision = "cf455bc755fe41ac9bb2861e7a961833d9c2ecc3" - version = "v2.1.13" - -[[projects]] - digest = "1:f619cb9b07aebe5416262cdd8b86082e8d5bdc5264cb3b615ff858df0b645f97" - name = "github.com/cenkalti/backoff" - packages = ["."] - pruneopts = "" - revision = "2ea60e5f094469f9e65adb9cd103795b73ae743e" - version = "v2.0.0" - -[[projects]] - branch = "master" - digest = "1:298e42868718da06fc0899ae8fdb99c48a14477045234c9274d81caa79af6a8f" - name = "github.com/couchbase/go-couchbase" - packages = ["."] - pruneopts = "" - revision = "16db1f1fe037412f12738fa4d8448c549c4edd77" - -[[projects]] - branch = "master" - digest = "1:c734658274a6be88870a36742fdea96a3fce4fc99a7b90946c9e84335ceae71a" - name = "github.com/couchbase/gomemcached" - packages = [ - ".", - "client", - ] - pruneopts = "" - revision = "0da75df145308b9a4e6704d762ca9d9b77752efc" - -[[projects]] - branch = "master" - digest = "1:c1195c02bc8fbf5307cfb95bc79eddaa1351ee3587cc4a7bbe6932e2fb966ff2" - name = "github.com/couchbase/goutils" - packages = [ - "logging", - "scramsha", - ] - pruneopts = "" - revision = "e865a1461c8ac0032bd37e2d4dab3289faea3873" - -[[projects]] - digest = "1:56c130d885a4aacae1dd9c7b71cfe39912c7ebc1ff7d2b46083c8812996dc43b" - name = "github.com/davecgh/go-spew" - packages = ["spew"] - pruneopts = "" - revision = "346938d642f2ec3594ed81d874461961cd0faa76" - version = "v1.1.0" - -[[projects]] - branch = "master" - digest = "1:7fdc54859cd901c25b9d8db964410a4e0d98fa0dca267fe4cf49c0eede5e06c2" - name = "github.com/denisenkom/go-mssqldb" - packages = [ - ".", - "internal/cp", - ] - pruneopts = "" - revision = "1eb28afdf9b6e56cf673badd47545f844fe81103" - -[[projects]] - digest = "1:6098222470fe0172157ce9bbef5d2200df4edde17ee649c5d6e48330e4afa4c6" - name = "github.com/dgrijalva/jwt-go" - packages = ["."] - pruneopts = "" - revision = "06ea1031745cb8b3dab3f6a236daf2b0aa468b7e" - version = "v3.2.0" - -[[projects]] - branch = "master" - digest = "1:654ac9799e7a8a586d8690bb2229a4f3408bbfe2c5494bf4dfe043053eeb5496" - name = "github.com/dimchansky/utfbom" - packages = ["."] - pruneopts = "" - revision = "6c6132ff69f0f6c088739067407b5d32c52e1d0f" - -[[projects]] - digest = "1:522eff2a1f014a64fb403db60fc0110653e4dc5b59779894d208e697b0708ddc" - name = "github.com/docker/distribution" - packages = [ - "digestset", - "reference", - ] - pruneopts = "" - revision = "edc3ab29cdff8694dd6feb85cfeb4b5f1b38ed9c" - -[[projects]] - digest = "1:d149605f1b00713fdc48150122892d77d49d30c825f690dd92f497aeb6cf18f5" - name = "github.com/docker/docker" - packages = [ - "api", - "api/types", - "api/types/blkiodev", - "api/types/container", - "api/types/events", - "api/types/filters", - "api/types/image", - "api/types/mount", - "api/types/network", - "api/types/registry", - "api/types/strslice", - "api/types/swarm", - "api/types/swarm/runtime", - "api/types/time", - "api/types/versions", - "api/types/volume", - "client", - ] - pruneopts = "" - revision = "ed7b6428c133e7c59404251a09b7d6b02fa83cc2" - -[[projects]] - digest = "1:a5ecc2e70260a87aa263811281465a5effcfae8a54bac319cee87c4625f04d63" - name = "github.com/docker/go-connections" - packages = [ - "nat", - "sockets", - "tlsconfig", - ] - pruneopts = "" - revision = "3ede32e2033de7505e6500d6c868c2b9ed9f169d" - version = "v0.3.0" - -[[projects]] - digest = "1:582d54fcb7233da8dde1dfd2210a5b9675d0685f84246a8d317b07d680c18b1b" - name = "github.com/docker/go-units" - packages = ["."] - pruneopts = "" - revision = "47565b4f722fb6ceae66b95f853feed578a4a51c" - version = "v0.3.3" - -[[projects]] - digest = "1:6d6672f85a84411509885eaa32f597577873de00e30729b9bb0eb1e1faa49c12" - name = "github.com/eapache/go-resiliency" - packages = ["breaker"] - pruneopts = "" - revision = "ea41b0fad31007accc7f806884dcdf3da98b79ce" - version = "v1.1.0" - -[[projects]] - branch = "master" - digest = "1:7b12ea8b50040c6c2378ec5b5a1ab722730b2bfb46e8724ded57f2c3905431fa" - name = "github.com/eapache/go-xerial-snappy" - packages = ["."] - pruneopts = "" - revision = "040cc1a32f578808623071247fdbd5cc43f37f5f" - -[[projects]] - digest = "1:d8d46d21073d0f65daf1740ebf4629c65e04bf92e14ce93c2201e8624843c3d3" - name = "github.com/eapache/queue" - packages = ["."] - pruneopts = "" - revision = "44cc805cf13205b55f69e14bcb69867d1ae92f98" - version = "v1.1.0" - -[[projects]] - digest = "1:3fa846cb3feb4e65371fe3c347c299de9b5bc3e71e256c0d940cd19b767a6ba0" - name = "github.com/eclipse/paho.mqtt.golang" - packages = [ - ".", - "packets", - ] - pruneopts = "" - revision = "36d01c2b4cbeb3d2a12063e4880ce30800af9560" - version = "v1.1.1" - -[[projects]] - digest = "1:858b7fe7b0f4bc7ef9953926828f2816ea52d01a88d72d1c45bc8c108f23c356" - name = "github.com/go-ini/ini" - packages = ["."] - pruneopts = "" - revision = "358ee7663966325963d4e8b2e1fbd570c5195153" - version = "v1.38.1" - -[[projects]] - digest = "1:6a4a01d58b227c4b6b11111b9f172ec5c17682b82724e58e6daf3f19f4faccd8" - name = "github.com/go-logfmt/logfmt" - packages = ["."] - pruneopts = "" - revision = "390ab7935ee28ec6b286364bba9b4dd6410cb3d5" - version = "v0.3.0" - -[[projects]] - digest = "1:96c4a6ff4206086347bfe28e96e092642882128f45ecb8dc8f15f3e6f6703af0" - name = "github.com/go-ole/go-ole" - packages = [ - ".", - "oleutil", - ] - pruneopts = "" - revision = "a41e3c4b706f6ae8dfbff342b06e40fa4d2d0506" - version = "v1.2.1" - -[[projects]] - digest = "1:3dfd659219b6f63dc0677a62b8d4e8f10b5cf53900aef40858db10a19407e41d" - name = "github.com/go-redis/redis" - packages = [ - ".", - "internal", - "internal/consistenthash", - "internal/hashtag", - "internal/pool", - "internal/proto", - "internal/singleflight", - "internal/util", - ] - pruneopts = "" - revision = "83fb42932f6145ce52df09860384a4653d2d332a" - version = "v6.12.0" - -[[projects]] - digest = "1:c07de423ca37dc2765396d6971599ab652a339538084b9b58c9f7fc533b28525" - name = "github.com/go-sql-driver/mysql" - packages = ["."] - pruneopts = "" - revision = "d523deb1b23d913de5bdada721a6071e71283618" - version = "v1.4.0" - -[[projects]] - digest = "1:9ab1b1c637d7c8f49e39d8538a650d7eb2137b076790cff69d160823b505964c" - name = "github.com/gobwas/glob" - packages = [ - ".", - "compiler", - "match", - "syntax", - "syntax/ast", - "syntax/lexer", - "util/runes", - "util/strings", - ] - pruneopts = "" - revision = "5ccd90ef52e1e632236f7326478d4faa74f99438" - version = "v0.2.3" - -[[projects]] - digest = "1:6e73003ecd35f4487a5e88270d3ca0a81bc80dc88053ac7e4dcfec5fba30d918" - name = "github.com/gogo/protobuf" - packages = ["proto"] - pruneopts = "" - revision = "636bf0302bc95575d69441b25a2603156ffdddf1" - version = "v1.1.1" - -[[projects]] - digest = "1:f958a1c137db276e52f0b50efee41a1a389dcdded59a69711f3e872757dab34b" - name = "github.com/golang/protobuf" - packages = [ - "proto", - "protoc-gen-go/descriptor", - "ptypes", - "ptypes/any", - "ptypes/duration", - "ptypes/empty", - "ptypes/struct", - "ptypes/timestamp", - "ptypes/wrappers", - ] - pruneopts = "" - revision = "b4deda0973fb4c70b50d226b1af49f3da59f5265" - version = "v1.1.0" - -[[projects]] - branch = "master" - digest = "1:2a5888946cdbc8aa360fd43301f9fc7869d663f60d5eedae7d4e6e5e4f06f2bf" - name = "github.com/golang/snappy" - packages = ["."] - pruneopts = "" - revision = "2e65f85255dbc3072edf28d6b5b8efc472979f5a" - -[[projects]] - digest = "1:f9f45f75f332e03fc7e9fe9188ea4e1ce4d14779ef34fa1b023da67518e36327" - name = "github.com/google/go-cmp" - packages = [ - "cmp", - "cmp/internal/diff", - "cmp/internal/function", - "cmp/internal/value", - ] - pruneopts = "" - revision = "3af367b6b30c263d47e8895973edcca9a49cf029" - version = "v0.2.0" - -[[projects]] -<<<<<<< HEAD - digest = "1:e097a364f4e8d8d91b9b9eeafb992d3796a41fde3eb548c1a87eb9d9f60725cf" - name = "github.com/googleapis/gax-go" - packages = ["."] - pruneopts = "" - revision = "317e0006254c44a0ac427cc52a0e083ff0b9622f" - version = "v2.0.0" -======= - digest = "1:c1d7e883c50a26ea34019320d8ae40fad86c9e5d56e63a1ba2cb618cef43e986" - name = "github.com/google/uuid" - packages = ["."] - pruneopts = "" - revision = "064e2069ce9c359c118179501254f67d7d37ba24" - version = "0.2" ->>>>>>> master - -[[projects]] - digest = "1:dbbeb8ddb0be949954c8157ee8439c2adfd8dc1c9510eb44a6e58cb68c3dce28" - name = "github.com/gorilla/context" - packages = ["."] - pruneopts = "" - revision = "08b5f424b9271eedf6f9f0ce86cb9396ed337a42" - version = "v1.1.1" - -[[projects]] - digest = "1:c2c8666b4836c81a1d247bdf21c6a6fc1ab586538ab56f74437c2e0df5c375e1" - name = "github.com/gorilla/mux" - packages = ["."] - pruneopts = "" - revision = "e3702bed27f0d39777b0b37b664b6280e8ef8fbf" - version = "v1.6.2" - -[[projects]] - branch = "master" - digest = "1:60b7bc5e043a11213472ae05252527287d20e0a6ccc18f6ae67fad88e41004de" - name = "github.com/hailocab/go-hostpool" - packages = ["."] - pruneopts = "" - revision = "e80d13ce29ede4452c43dea11e79b9bc8a15b478" - -[[projects]] - digest = "1:e7224669901bab4094e6d6697c136557b7177db6ceb01b7fc8b20d08f4b5aacd" - name = "github.com/hashicorp/consul" - packages = ["api"] - pruneopts = "" - revision = "39f93f011e591c842acc8053a7f5972aa6e592fd" - version = "v1.2.1" - -[[projects]] - branch = "master" - digest = "1:f5d25fd7bdda08e39e01193ef94a1ebf7547b1b931bcdec785d08050598f306c" - name = "github.com/hashicorp/go-cleanhttp" - packages = ["."] - pruneopts = "" - revision = "d5fe4b57a186c716b0e00b8c301cbd9b4182694d" - -[[projects]] - branch = "master" - digest = "1:ff65bf6fc4d1116f94ac305342725c21b55c16819c2606adc8f527755716937f" - name = "github.com/hashicorp/go-rootcerts" - packages = ["."] - pruneopts = "" - revision = "6bb64b370b90e7ef1fa532be9e591a81c3493e00" - -[[projects]] - digest = "1:f72168ea995f398bab88e84bd1ff58a983466ba162fb8d50d47420666cd57fad" - name = "github.com/hashicorp/serf" - packages = ["coordinate"] - pruneopts = "" - revision = "d6574a5bb1226678d7010325fb6c985db20ee458" - version = "v0.8.1" - -[[projects]] - digest = "1:a39ef049cdeee03a57b132e7d60e32711b9d949c78458da78e702d9864c54369" - name = "github.com/influxdata/go-syslog" - packages = [ - "rfc5424", - "rfc5425", - ] - pruneopts = "" - revision = "eecd51df3ad85464a2bab9b7d3a45bc1e299059e" - version = "v1.0.1" - -[[projects]] - branch = "master" - digest = "1:bc3eb5ddfd59781ea1183f2b3d1eb105a1495d421f09b2ccd360c7fced0b612d" - name = "github.com/influxdata/tail" - packages = [ - ".", - "ratelimiter", - "util", - "watch", - "winfile", - ] - pruneopts = "" - revision = "c43482518d410361b6c383d7aebce33d0471d7bc" - -[[projects]] - branch = "master" - digest = "1:7fb6cc9607eaa6ef309edebc42b57f704244bd4b9ab23bff128829c4ad09b95d" - name = "github.com/influxdata/toml" - packages = [ - ".", - "ast", - ] - pruneopts = "" - revision = "2a2e3012f7cfbef64091cc79776311e65dfa211b" - -[[projects]] - branch = "master" - digest = "1:a0c157916be0b4de1d4565b1f094b8d746109f94968140dff40a42780fa6ccef" - name = "github.com/influxdata/wlog" - packages = ["."] - pruneopts = "" - revision = "7c63b0a71ef8300adc255344d275e10e5c3a71ec" - -[[projects]] - digest = "1:2de1791b9e43f26c696e36950e42676565e7da7499a870bc02213da4b59b1d14" - name = "github.com/jackc/pgx" - packages = [ - ".", - "chunkreader", - "internal/sanitize", - "pgio", - "pgproto3", - "pgtype", - "stdlib", - ] - pruneopts = "" - revision = "da3231b0b66e2e74cdb779f1d46c5e958ba8be27" - version = "v3.1.0" - -[[projects]] - digest = "1:6f49eae0c1e5dab1dafafee34b207aeb7a42303105960944828c2079b92fc88e" - name = "github.com/jmespath/go-jmespath" - packages = ["."] - pruneopts = "" - revision = "0b12d6b5" - -[[projects]] - branch = "master" - digest = "1:2c5ad58492804c40bdaf5d92039b0cde8b5becd2b7feeb37d7d1cc36a8aa8dbe" - name = "github.com/kardianos/osext" - packages = ["."] - pruneopts = "" - revision = "ae77be60afb1dcacde03767a8c37337fad28ac14" - -[[projects]] - branch = "master" - digest = "1:fed90fa725d3b1bac0a760de64426834dfef4546474cf182f2ec94285afa74a8" - name = "github.com/kardianos/service" - packages = ["."] - pruneopts = "" - revision = "615a14ed75099c9eaac6949e22ac2341bf9d3197" - -[[projects]] - branch = "master" - digest = "1:63e7368fcf6b54804076eaec26fd9cf0c4466166b272393db4b93102e1e962df" - name = "github.com/kballard/go-shellquote" - packages = ["."] - pruneopts = "" - revision = "95032a82bc518f77982ea72343cc1ade730072f0" - -[[projects]] - branch = "master" - digest = "1:1ed9eeebdf24aadfbca57eb50e6455bd1d2474525e0f0d4454de8c8e9bc7ee9a" - name = "github.com/kr/logfmt" - packages = ["."] - pruneopts = "" - revision = "b84e30acd515aadc4b783ad4ff83aff3299bdfe0" - -[[projects]] - branch = "master" - digest = "1:7e9956922e349af0190afa0b6621befcd201072679d8e51a9047ff149f2afe93" - name = "github.com/mailru/easyjson" - packages = [ - ".", - "buffer", - "jlexer", - "jwriter", - ] - pruneopts = "" - revision = "efc7eb8984d6655c26b5c9d2e65c024e5767c37c" - -[[projects]] - digest = "1:63722a4b1e1717be7b98fc686e0b30d5e7f734b9e93d7dee86293b6deab7ea28" - name = "github.com/matttproud/golang_protobuf_extensions" - packages = ["pbutil"] - pruneopts = "" - revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c" - version = "v1.0.1" - -[[projects]] - digest = "1:4c8d8358c45ba11ab7bb15df749d4df8664ff1582daead28bae58cf8cbe49890" - name = "github.com/miekg/dns" - packages = ["."] - pruneopts = "" - revision = "5a2b9fab83ff0f8bfc99684bd5f43a37abe560f1" - version = "v1.0.8" - -[[projects]] - branch = "master" - digest = "1:99651e95333755cbe5c9768c1b80031300acca64a80870b40309202b32585a5a" - name = "github.com/mitchellh/go-homedir" - packages = ["."] - pruneopts = "" - revision = "3864e76763d94a6df2f9960b16a20a33da9f9a66" - -[[projects]] - branch = "master" - digest = "1:f43ed2c836208c14f45158fd01577c985688a4d11cf9fd475a939819fef3b321" - name = "github.com/mitchellh/mapstructure" - packages = ["."] - pruneopts = "" - revision = "f15292f7a699fcc1a38a80977f80a046874ba8ac" - -[[projects]] - digest = "1:ee2e62b00a9ccc2dba1525f93396e35c847f90f87939df6f361b86315ea5f69a" - name = "github.com/multiplay/go-ts3" - packages = ["."] - pruneopts = "" - revision = "d0d44555495c8776880a17e439399e715a4ef319" - version = "v1.0.0" - -[[projects]] - digest = "1:ccd0def9f0b82b61c5e54fcbfccf528eabb13b489d008e46dc16b808c2e1f765" - name = "github.com/naoina/go-stringutil" - packages = ["."] - pruneopts = "" - revision = "6b638e95a32d0c1131db0e7fe83775cbea4a0d0b" - version = "v0.1.0" - -[[projects]] - digest = "1:e5ec850ce66beb0014fc40d8e64b7482172eee71d86d734d66def5e9eac16797" - name = "github.com/nats-io/gnatsd" - packages = [ - "conf", - "logger", - "server", - "server/pse", - "util", - ] - pruneopts = "" - revision = "6608e9ac3be979dcb0614b772cc86a87b71acaa3" - version = "v1.2.0" - -[[projects]] - digest = "1:665af347df4c5d1ae4c3eacd0754f5337a301f6a3f2444c9993b996605c8c02b" - name = "github.com/nats-io/go-nats" - packages = [ - ".", - "encoders/builtin", - "util", - ] - pruneopts = "" - revision = "062418ea1c2181f52dc0f954f6204370519a868b" - version = "v1.5.0" - -[[projects]] - digest = "1:be61e8224b84064109eaba8157cbb4bbe6ca12443e182b6624fdfa1c0dcf53d9" - name = "github.com/nats-io/nuid" - packages = ["."] - pruneopts = "" - revision = "289cccf02c178dc782430d534e3c1f5b72af807f" - version = "v1.0.0" - -[[projects]] - digest = "1:7a69f6a3a33929f8b66aa39c93868ad1698f06417fe627ae067559beb94504bd" - name = "github.com/nsqio/go-nsq" - packages = ["."] - pruneopts = "" - revision = "eee57a3ac4174c55924125bb15eeeda8cffb6e6f" - version = "v1.0.7" - -[[projects]] - digest = "1:5d9b668b0b4581a978f07e7d2e3314af18eb27b3fb5d19b70185b7c575723d11" - name = "github.com/opencontainers/go-digest" - packages = ["."] - pruneopts = "" - revision = "279bed98673dd5bef374d3b6e4b09e2af76183bf" - version = "v1.0.0-rc1" - -[[projects]] - digest = "1:f26c8670b11e29a49c8e45f7ec7f2d5bac62e8fd4e3c0ae1662baa4a697f984a" - name = "github.com/opencontainers/image-spec" - packages = [ - "specs-go", - "specs-go/v1", - ] - pruneopts = "" - revision = "d60099175f88c47cd379c4738d158884749ed235" - version = "v1.0.1" - -[[projects]] - branch = "master" - digest = "1:2da0e5077ed40453dc281b9a2428d84cf6ad14063aed189f6296ca5dd25cf13d" - name = "github.com/opentracing-contrib/go-observer" - packages = ["."] - pruneopts = "" - revision = "a52f2342449246d5bcc273e65cbdcfa5f7d6c63c" - -[[projects]] - digest = "1:78fb99d6011c2ae6c72f3293a83951311147b12b06a5ffa43abf750c4fab6ac5" - name = "github.com/opentracing/opentracing-go" - packages = [ - ".", - "ext", - "log", - ] - pruneopts = "" - revision = "1949ddbfd147afd4d964a9f00b24eb291e0e7c38" - version = "v1.0.2" - -[[projects]] - digest = "1:fea0e67285d900e5a0a7ec19ff4b4c82865a28dddbee8454c5360ad908f7069c" - name = "github.com/openzipkin/zipkin-go-opentracing" - packages = [ - ".", - "flag", - "thrift/gen-go/scribe", - "thrift/gen-go/zipkincore", - "types", - "wire", - ] - pruneopts = "" - revision = "26cf9707480e6b90e5eff22cf0bbf05319154232" - version = "v0.3.4" - -[[projects]] - digest = "1:29e34e58f26655c4d73135cdfc0517ea2ff1483eff34e5d5ef4b6fddbb81e31b" - name = "github.com/pierrec/lz4" - packages = [ - ".", - "internal/xxh32", - ] - pruneopts = "" - revision = "1958fd8fff7f115e79725b1288e0b878b3e06b00" - version = "v2.0.3" - -[[projects]] - digest = "1:7365acd48986e205ccb8652cc746f09c8b7876030d53710ea6ef7d0bd0dcd7ca" - name = "github.com/pkg/errors" - packages = ["."] - pruneopts = "" - revision = "645ef00459ed84a119197bfb8d8205042c6df63d" - version = "v0.8.0" - -[[projects]] - digest = "1:256484dbbcd271f9ecebc6795b2df8cad4c458dd0f5fd82a8c2fa0c29f233411" - name = "github.com/pmezard/go-difflib" - packages = ["difflib"] - pruneopts = "" - revision = "792786c7400a136282c1664665ae0a8db921c6c2" - version = "v1.0.0" - -[[projects]] - digest = "1:4142d94383572e74b42352273652c62afec5b23f325222ed09198f46009022d1" - name = "github.com/prometheus/client_golang" - packages = [ - "prometheus", - "prometheus/promhttp", - ] - pruneopts = "" - revision = "c5b7fccd204277076155f10851dad72b76a49317" - version = "v0.8.0" - -[[projects]] - branch = "master" - digest = "1:185cf55b1f44a1bf243558901c3f06efa5c64ba62cfdcbb1bf7bbe8c3fb68561" - name = "github.com/prometheus/client_model" - packages = ["go"] - pruneopts = "" - revision = "5c3871d89910bfb32f5fcab2aa4b9ec68e65a99f" - -[[projects]] - branch = "master" - digest = "1:bfbc121ef802d245ef67421cff206615357d9202337a3d492b8f668906b485a8" - name = "github.com/prometheus/common" - packages = [ - "expfmt", - "internal/bitbucket.org/ww/goautoneg", - "log", - "model", - ] - pruneopts = "" - revision = "7600349dcfe1abd18d72d3a1770870d9800a7801" - -[[projects]] - branch = "master" - digest = "1:b694a6bdecdace488f507cff872b30f6f490fdaf988abd74d87ea56406b23b6e" - name = "github.com/prometheus/procfs" - packages = [ - ".", - "internal/util", - "nfs", - "xfs", - ] - pruneopts = "" - revision = "ae68e2d4c00fed4943b5f6698d504a5fe083da8a" - -[[projects]] - branch = "master" - digest = "1:15bcdc717654ef21128e8af3a63eec39a6d08a830e297f93d65163f87c8eb523" - name = "github.com/rcrowley/go-metrics" - packages = ["."] - pruneopts = "" - revision = "e2704e165165ec55d062f5919b4b29494e9fa790" - -[[projects]] - branch = "master" - digest = "1:7fc2f428767a2521abc63f1a663d981f61610524275d6c0ea645defadd4e916f" - name = "github.com/samuel/go-zookeeper" - packages = ["zk"] - pruneopts = "" - revision = "c4fab1ac1bec58281ad0667dc3f0907a9476ac47" - -[[projects]] - digest = "1:7f569d906bdd20d906b606415b7d794f798f91a62fcfb6a4daa6d50690fb7a3f" - name = "github.com/satori/go.uuid" - packages = ["."] - pruneopts = "" - revision = "f58768cc1a7a7e77a3bd49e98cdd21419399b6a3" - version = "v1.2.0" - -[[projects]] - digest = "1:02715a2fb4b9279af36651a59a51dd4164eb689bd6785874811899f43eeb2a54" - name = "github.com/shirou/gopsutil" - packages = [ - "cpu", - "disk", - "host", - "internal/common", - "load", - "mem", - "net", - "process", - ] - pruneopts = "" - revision = "8048a2e9c5773235122027dd585cf821b2af1249" - version = "v2.18.07" - -[[projects]] - branch = "master" - digest = "1:99c6a6dab47067c9b898e8c8b13d130c6ab4ffbcc4b7cc6236c2cd0b1e344f5b" - name = "github.com/shirou/w32" - packages = ["."] - pruneopts = "" - revision = "bb4de0191aa41b5507caa14b0650cdbddcd9280b" - -[[projects]] - digest = "1:8cf46b6c18a91068d446e26b67512cf16f1540b45d90b28b9533706a127f0ca6" - name = "github.com/sirupsen/logrus" - packages = ["."] - pruneopts = "" - revision = "c155da19408a8799da419ed3eeb0cb5db0ad5dbc" - version = "v1.0.5" - -[[projects]] - branch = "master" - digest = "1:4b0cabe65ca903a7b2a3e6272c5304eb788ce196d35ecb901c6563e5e7582443" - name = "github.com/soniah/gosnmp" - packages = ["."] - pruneopts = "" - revision = "96b86229e9b3ffb4b954144cdc7f98fe3ee1003f" - -[[projects]] - branch = "master" - digest = "1:4e8f1cae8e6d83af9000d82566efb8823907dae77ba4f1d76ff28fdd197c3c90" - name = "github.com/streadway/amqp" - packages = ["."] - pruneopts = "" - revision = "e5adc2ada8b8efff032bf61173a233d143e9318e" - -[[projects]] - digest = "1:711eebe744c0151a9d09af2315f0bb729b2ec7637ef4c410fa90a18ef74b65b6" - name = "github.com/stretchr/objx" - packages = ["."] - pruneopts = "" - revision = "477a77ecc69700c7cdeb1fa9e129548e1c1c393c" - version = "v0.1.1" - -[[projects]] - digest = "1:c587772fb8ad29ad4db67575dad25ba17a51f072ff18a22b4f0257a4d9c24f75" - name = "github.com/stretchr/testify" - packages = [ - "assert", - "mock", - "require", - ] - pruneopts = "" - revision = "f35b8ab0b5a2cef36673838d662e249dd9c94686" - version = "v1.2.2" - -[[projects]] - digest = "1:e139a0dfe24e723193005b291ed82a975041718cfcab9136aa6c9540df70a4ff" - name = "github.com/tidwall/gjson" - packages = ["."] - pruneopts = "" - revision = "f123b340873a0084cb27267eddd8ff615115fbff" - version = "v1.1.2" - -[[projects]] - branch = "master" - digest = "1:4db4f92bb9cb04cfc4fccb36aba2598b02a988008c4cc0692b241214ad8ac96e" - name = "github.com/tidwall/match" - packages = ["."] - pruneopts = "" - revision = "1731857f09b1f38450e2c12409748407822dc6be" - -[[projects]] - digest = "1:343f20460c11a0d0529fe532553bfef9446918d1a1fda6d8661eb27d5b1a68b8" - name = "github.com/vjeantet/grok" - packages = ["."] - pruneopts = "" - revision = "ce01e59abcf6fbc9833b7deb5e4b8ee1769bcc53" - version = "v1.0.0" - -[[projects]] - digest = "1:f9fe29bf856d49f9a51d6001588cb5ee5d65c8a7ff5e8b0dd5423c3a510f0833" - name = "github.com/vmware/govmomi" - packages = [ - ".", - "find", - "list", - "nfc", - "object", - "performance", - "property", - "session", - "simulator", - "simulator/esx", - "simulator/vpx", - "task", - "view", - "vim25", - "vim25/debug", - "vim25/methods", - "vim25/mo", - "vim25/progress", - "vim25/soap", - "vim25/types", - "vim25/xml", - ] - pruneopts = "" - revision = "e3a01f9611c32b2362366434bcd671516e78955d" - version = "v0.18.0" - -[[projects]] - branch = "master" - digest = "1:98ed05e9796df287b90c1d96854e3913c8e349dbc546412d3cabb472ecf4b417" - name = "github.com/wvanbergen/kafka" - packages = ["consumergroup"] - pruneopts = "" - revision = "e2edea948ddfee841ea9a263b32ccca15f7d6c2f" - -[[projects]] - branch = "master" - digest = "1:12aff3cc417907bf9f683a6bf1dc78ffb08e41bc69f829491e593ea9b951a3cf" - name = "github.com/wvanbergen/kazoo-go" - packages = ["."] - pruneopts = "" - revision = "f72d8611297a7cf105da904c04198ad701a60101" - -[[projects]] - branch = "master" - digest = "1:c5918689b7e187382cc1066bf0260de54ba9d1b323105f46ed2551d2fb4a17c7" - name = "github.com/yuin/gopher-lua" - packages = [ - ".", - "ast", - "parse", - "pm", - ] - pruneopts = "" - revision = "46796da1b0b4794e1e341883a399f12cc7574b55" - -[[projects]] - branch = "master" -<<<<<<< HEAD - digest = "1:2fcfc6c3fb8dfe0d80d7789272230d3ac7db15022b66817113f98d9fff880225" - name = "github.com/zensqlmonitor/go-mssqldb" - packages = ["."] - pruneopts = "" - revision = "e8fbf836e44e86764eba398361d1825651709547" - -[[projects]] - digest = "1:8c8ec859c77fccd10a347b7219b597c4c21c448949e8bdf3fc3e6f4c78f952b4" - name = "go.opencensus.io" - packages = [ - ".", - "internal", - "internal/tagencoding", - "plugin/ocgrpc", - "plugin/ochttp", - "plugin/ochttp/propagation/b3", - "stats", - "stats/internal", - "stats/view", - "tag", - "trace", - "trace/internal", - "trace/propagation", - "trace/tracestate", - ] - pruneopts = "" - revision = "79993219becaa7e29e3b60cb67f5b8e82dee11d6" - version = "v0.17.0" - -[[projects]] - branch = "master" -======= ->>>>>>> master - digest = "1:0773b5c3be42874166670a20aa177872edb450cd9fc70b1df97303d977702a50" - name = "golang.org/x/crypto" - packages = [ - "bcrypt", - "blowfish", - "ed25519", - "ed25519/internal/edwards25519", - "md4", - "pbkdf2", - "pkcs12", - "pkcs12/internal/rc2", - "ssh/terminal", - ] - pruneopts = "" - revision = "a2144134853fc9a27a7b1e3eb4f19f1a76df13c9" - -[[projects]] - branch = "master" - digest = "1:00ff990baae4665bb0a8174af5ff78228574227ed96c89671247a56852a50e21" - name = "golang.org/x/net" - packages = [ - "bpf", - "context", - "context/ctxhttp", - "html", - "html/atom", - "html/charset", - "http/httpguts", - "http2", - "http2/hpack", - "idna", - "internal/iana", - "internal/socket", - "internal/socks", - "internal/timeseries", - "ipv4", - "ipv6", - "proxy", - "trace", - "websocket", - ] - pruneopts = "" - revision = "a680a1efc54dd51c040b3b5ce4939ea3cf2ea0d1" - -[[projects]] - branch = "master" - digest = "1:b697592485cb412be4188c08ca0beed9aab87f36b86418e21acc4a3998f63734" - name = "golang.org/x/oauth2" - packages = [ - ".", -<<<<<<< HEAD - "google", - "internal", - "jws", - "jwt", -======= - "clientcredentials", - "internal", ->>>>>>> master - ] - pruneopts = "" - revision = "d2e6202438beef2727060aa7cabdd924d92ebfd9" - -[[projects]] - branch = "master" - digest = "1:677e38cad6833ad266ec843739d167755eda1e6f2d8af1c63102b0426ad820db" - name = "golang.org/x/sys" - packages = [ - "unix", - "windows", - "windows/registry", - "windows/svc", - "windows/svc/debug", - "windows/svc/eventlog", - "windows/svc/mgr", - ] - pruneopts = "" - revision = "ac767d655b305d4e9612f5f6e33120b9176c4ad4" - -[[projects]] - digest = "1:5acd3512b047305d49e8763eef7ba423901e85d5dd2fd1e71778a0ea8de10bd4" - name = "golang.org/x/text" - packages = [ - "collate", - "collate/build", - "encoding", - "encoding/charmap", - "encoding/htmlindex", - "encoding/internal", - "encoding/internal/identifier", - "encoding/japanese", - "encoding/korean", - "encoding/simplifiedchinese", - "encoding/traditionalchinese", - "encoding/unicode", - "internal/colltab", - "internal/gen", - "internal/tag", - "internal/triegen", - "internal/ucd", - "internal/utf8internal", - "language", - "runes", - "secure/bidirule", - "transform", - "unicode/bidi", - "unicode/cldr", - "unicode/norm", - "unicode/rangetable", - ] - pruneopts = "" - revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0" - version = "v0.3.0" - -[[projects]] - branch = "master" - digest = "1:2d878ecef4b17dbdd067b8fb98eb64f768f0802b1176b91b9e3c01b457efd01f" - name = "google.golang.org/api" - packages = [ - "googleapi/transport", - "internal", - "iterator", - "option", - "transport", - "transport/grpc", - "transport/http", - ] - pruneopts = "" - revision = "19ff8768a5c0b8e46ea281065664787eefc24121" - -[[projects]] - digest = "1:c1771ca6060335f9768dff6558108bc5ef6c58506821ad43377ee23ff059e472" - name = "google.golang.org/appengine" - packages = [ -<<<<<<< HEAD - ".", - "cloudsql", - "internal", - "internal/app_identity", - "internal/base", - "internal/datastore", - "internal/log", - "internal/modules", - "internal/remote_api", - "internal/socket", - "internal/urlfetch", - "socket", -======= - "cloudsql", - "internal", - "internal/base", - "internal/datastore", - "internal/log", - "internal/remote_api", - "internal/urlfetch", ->>>>>>> master - "urlfetch", - ] - pruneopts = "" - revision = "b1f26356af11148e710935ed1ac8a7f5702c7612" - version = "v1.1.0" - -[[projects]] - branch = "master" - digest = "1:b1443b4e3cc990c84d27fcdece9d3302158c67dba870e33a6937a2c0076388c2" - name = "google.golang.org/genproto" - packages = [ - "googleapis/api/annotations", - "googleapis/api/distribution", - "googleapis/api/label", - "googleapis/api/metric", - "googleapis/api/monitoredres", - "googleapis/monitoring/v3", - "googleapis/rpc/status", - "protobuf/field_mask", - ] - pruneopts = "" - revision = "fedd2861243fd1a8152376292b921b394c7bef7e" - -[[projects]] - digest = "1:5f31b45ee9da7a87f140bef3ed0a7ca34ea2a6d38eb888123b8e28170e8aa4f2" - name = "google.golang.org/grpc" - packages = [ - ".", - "balancer", - "balancer/base", - "balancer/roundrobin", - "codes", - "connectivity", - "credentials", - "credentials/oauth", - "encoding", - "encoding/proto", - "grpclog", - "internal", - "internal/backoff", - "internal/channelz", - "internal/grpcrand", - "keepalive", - "metadata", - "naming", - "peer", - "resolver", - "resolver/dns", - "resolver/passthrough", - "stats", - "status", - "tap", - "transport", - ] - pruneopts = "" - revision = "168a6198bcb0ef175f7dacec0b8691fc141dc9b8" - version = "v1.13.0" - -[[projects]] - digest = "1:15d017551627c8bb091bde628215b2861bed128855343fdd570c62d08871f6e1" - name = "gopkg.in/alecthomas/kingpin.v2" - packages = ["."] - pruneopts = "" - revision = "947dcec5ba9c011838740e680966fd7087a71d0d" - version = "v2.2.6" - -[[projects]] - digest = "1:3cad99e0d1f94b8c162787c12e59d0a0b9df1ef75590eb145cdd625479091efe" - name = "gopkg.in/asn1-ber.v1" - packages = ["."] - pruneopts = "" - revision = "379148ca0225df7a432012b8df0355c2a2063ac0" - version = "v1.2" - -[[projects]] - digest = "1:581450ae66d7970d91ef9132459fa583e937c6e502f1b96e4ee7783a56fa0b44" - name = "gopkg.in/fatih/pool.v2" - packages = ["."] - pruneopts = "" - revision = "010e0b745d12eaf8426c95f9c3924d81dd0b668f" - version = "v2.0.0" - -[[projects]] - digest = "1:eb53021a8aa3f599d29c7102e65026242bdedce998a54837dc67f14b6a97c5fd" - name = "gopkg.in/fsnotify.v1" - packages = ["."] - pruneopts = "" - revision = "c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9" - source = "https://github.com/fsnotify/fsnotify/archive/v1.4.7.tar.gz" - version = "v1.4.7" - -[[projects]] - digest = "1:960720207d3d0992995f4576e1366fd9e9b1483473b07fb7243144f75f5b1546" - name = "gopkg.in/gorethink/gorethink.v3" - packages = [ - ".", - "encoding", - "ql2", - "types", - ] - pruneopts = "" - revision = "7f5bdfd858bb064d80559b2a32b86669c5de5d3b" - version = "v3.0.5" - -[[projects]] - digest = "1:367baf06b7dbd0ef0bbdd785f6a79f929c96b0c18e9d3b29c0eed1ac3f5db133" - name = "gopkg.in/ldap.v2" - packages = ["."] - pruneopts = "" - revision = "bb7a9ca6e4fbc2129e3db588a34bc970ffe811a9" - version = "v2.5.1" - -[[projects]] - branch = "v2" - digest = "1:f54ba71a035aac92ced3e902d2bff3734a15d1891daff73ec0f90ef236750139" - name = "gopkg.in/mgo.v2" - packages = [ - ".", - "bson", - "internal/json", - "internal/sasl", - "internal/scram", - ] - pruneopts = "" - revision = "9856a29383ce1c59f308dd1cf0363a79b5bef6b5" - -[[projects]] - digest = "1:b49c4d3115800eace659c9a6a5c384a922f5b210178b24a01abb10731f404ea2" - name = "gopkg.in/olivere/elastic.v5" - packages = [ - ".", - "config", - "uritemplates", - ] - pruneopts = "" - revision = "52741dc2ce53629cbe1e673869040d886cba2cd5" - version = "v5.0.70" - -[[projects]] - branch = "v1" - digest = "1:a96d16bd088460f2e0685d46c39bcf1208ba46e0a977be2df49864ec7da447dd" - name = "gopkg.in/tomb.v1" - packages = ["."] - pruneopts = "" - revision = "dd632973f1e7218eb1089048e0798ec9ae7dceb8" - -[[projects]] - digest = "1:f0620375dd1f6251d9973b5f2596228cc8042e887cd7f827e4220bc1ce8c30e2" - name = "gopkg.in/yaml.v2" - packages = ["."] - pruneopts = "" - revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183" - version = "v2.2.1" - -[solve-meta] - analyzer-name = "dep" - analyzer-version = 1 - input-imports = [ - "cloud.google.com/go/monitoring/apiv3", - "collectd.org/api", - "collectd.org/network", - "github.com/Azure/go-autorest/autorest", - "github.com/Azure/go-autorest/autorest/azure/auth", - "github.com/Microsoft/ApplicationInsights-Go/appinsights", - "github.com/Shopify/sarama", - "github.com/StackExchange/wmi", - "github.com/aerospike/aerospike-client-go", - "github.com/amir/raidman", - "github.com/apache/thrift/lib/go/thrift", - "github.com/aws/aws-sdk-go/aws", - "github.com/aws/aws-sdk-go/aws/client", - "github.com/aws/aws-sdk-go/aws/credentials", - "github.com/aws/aws-sdk-go/aws/credentials/stscreds", - "github.com/aws/aws-sdk-go/aws/session", - "github.com/aws/aws-sdk-go/service/cloudwatch", - "github.com/aws/aws-sdk-go/service/kinesis", - "github.com/bsm/sarama-cluster", - "github.com/couchbase/go-couchbase", - "github.com/denisenkom/go-mssqldb", - "github.com/dgrijalva/jwt-go", - "github.com/docker/docker/api/types", - "github.com/docker/docker/api/types/container", - "github.com/docker/docker/api/types/filters", - "github.com/docker/docker/api/types/registry", - "github.com/docker/docker/api/types/swarm", - "github.com/docker/docker/client", - "github.com/eclipse/paho.mqtt.golang", - "github.com/go-logfmt/logfmt", - "github.com/go-redis/redis", - "github.com/go-sql-driver/mysql", - "github.com/gobwas/glob", - "github.com/golang/protobuf/proto", - "github.com/golang/protobuf/ptypes/timestamp", - "github.com/google/go-cmp/cmp", - "github.com/gorilla/mux", - "github.com/hashicorp/consul/api", - "github.com/influxdata/go-syslog/rfc5424", - "github.com/influxdata/go-syslog/rfc5425", - "github.com/influxdata/tail", - "github.com/influxdata/toml", - "github.com/influxdata/toml/ast", - "github.com/influxdata/wlog", - "github.com/jackc/pgx", - "github.com/jackc/pgx/pgtype", - "github.com/jackc/pgx/stdlib", - "github.com/kardianos/service", - "github.com/kballard/go-shellquote", - "github.com/matttproud/golang_protobuf_extensions/pbutil", - "github.com/miekg/dns", - "github.com/multiplay/go-ts3", - "github.com/nats-io/gnatsd/server", - "github.com/nats-io/go-nats", - "github.com/nsqio/go-nsq", - "github.com/openzipkin/zipkin-go-opentracing", - "github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore", - "github.com/pkg/errors", - "github.com/prometheus/client_golang/prometheus", - "github.com/prometheus/client_golang/prometheus/promhttp", - "github.com/prometheus/client_model/go", - "github.com/prometheus/common/expfmt", - "github.com/prometheus/common/log", - "github.com/satori/go.uuid", - "github.com/shirou/gopsutil/cpu", - "github.com/shirou/gopsutil/disk", - "github.com/shirou/gopsutil/host", - "github.com/shirou/gopsutil/load", - "github.com/shirou/gopsutil/mem", - "github.com/shirou/gopsutil/net", - "github.com/shirou/gopsutil/process", - "github.com/soniah/gosnmp", - "github.com/streadway/amqp", - "github.com/stretchr/testify/assert", - "github.com/stretchr/testify/mock", - "github.com/stretchr/testify/require", - "github.com/tidwall/gjson", - "github.com/vjeantet/grok", - "github.com/vmware/govmomi", - "github.com/vmware/govmomi/object", - "github.com/vmware/govmomi/performance", - "github.com/vmware/govmomi/session", - "github.com/vmware/govmomi/simulator", - "github.com/vmware/govmomi/view", - "github.com/vmware/govmomi/vim25", - "github.com/vmware/govmomi/vim25/methods", - "github.com/vmware/govmomi/vim25/mo", - "github.com/vmware/govmomi/vim25/soap", - "github.com/vmware/govmomi/vim25/types", - "github.com/wvanbergen/kafka/consumergroup", - "golang.org/x/net/context", - "golang.org/x/net/html/charset", - "golang.org/x/oauth2", - "golang.org/x/oauth2/clientcredentials", - "golang.org/x/sys/unix", - "golang.org/x/sys/windows", - "golang.org/x/sys/windows/svc", - "golang.org/x/sys/windows/svc/mgr", - "google.golang.org/api/option", - "google.golang.org/genproto/googleapis/api/metric", - "google.golang.org/genproto/googleapis/api/monitoredres", - "google.golang.org/genproto/googleapis/monitoring/v3", - "google.golang.org/grpc", - "google.golang.org/grpc/codes", - "google.golang.org/grpc/credentials", - "google.golang.org/grpc/status", - "gopkg.in/gorethink/gorethink.v3", - "gopkg.in/ldap.v2", - "gopkg.in/mgo.v2", - "gopkg.in/mgo.v2/bson", - "gopkg.in/olivere/elastic.v5", - "gopkg.in/yaml.v2", - ] - solver-name = "gps-cdcl" - solver-version = 1 From 625a1ca8fa44b385a903d485293803612c73e4fc Mon Sep 17 00:00:00 2001 From: Grace Do Date: Fri, 9 Nov 2018 10:59:33 -0800 Subject: [PATCH 0362/1815] Use fieldsCopy in testutil.Accumulator AddFields (#4970) --- testutil/accumulator.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/testutil/accumulator.go b/testutil/accumulator.go index c13f02ab3..65c8883e0 100644 --- a/testutil/accumulator.go +++ b/testutil/accumulator.go @@ -83,6 +83,10 @@ func (a *Accumulator) AddFields( return } + if len(fields) == 0 { + return + } + tagsCopy := map[string]string{} for k, v := range tags { tagsCopy[k] = v @@ -93,10 +97,6 @@ func (a *Accumulator) AddFields( fieldsCopy[k] = v } - if len(fields) == 0 { - return - } - var t time.Time if len(timestamp) > 0 { t = timestamp[0] @@ -114,7 +114,7 @@ func (a *Accumulator) AddFields( p := &Metric{ Measurement: measurement, - Fields: fields, + Fields: fieldsCopy, Tags: tagsCopy, Time: t, } From fbd3e7887ae8400191f55862d90e2066847daecd Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 9 Nov 2018 16:42:58 -0800 Subject: [PATCH 0363/1815] Update dovecot readme --- plugins/inputs/dovecot/README.md | 85 +++++++++++++++----------------- 1 file changed, 40 insertions(+), 45 deletions(-) diff --git a/plugins/inputs/dovecot/README.md b/plugins/inputs/dovecot/README.md index 0611ce098..c853832b6 100644 --- a/plugins/inputs/dovecot/README.md +++ b/plugins/inputs/dovecot/README.md @@ -1,9 +1,10 @@ # Dovecot Input Plugin -The dovecot plugin uses the dovecot Stats protocol to gather metrics on configured -domains. You can read Dovecot's documentation -[here](http://wiki2.dovecot.org/Statistics) +The dovecot plugin uses the Dovecot [v2.1 stats protocol][stats old] to gather +metrics on configured domains. +When using Dovecot v2.3 you are still able to use this protocol by following +the [upgrading steps][upgrading]. ### Configuration: @@ -23,52 +24,46 @@ domains. You can read Dovecot's documentation filters = [""] ``` +### Metrics: -### Tags: - server: hostname - type: query type - ip: ip addr - user: username - domain: domain name - - -### Fields: - - reset_timestamp time.Time - last_update time.Time - num_logins int64 - num_cmds int64 - num_connected_sessions int64 ## not in type - user_cpu float32 - sys_cpu float32 - clock_time float64 - min_faults int64 - maj_faults int64 - vol_cs int64 - invol_cs int64 - disk_input int64 - disk_output int64 - read_count int64 - read_bytes int64 - write_count int64 - write_bytes int64 - mail_lookup_path int64 - mail_lookup_attr int64 - mail_read_count int64 - mail_read_bytes int64 - mail_cache_hits int64 +- dovecot + - tags: + - server (hostname) + - type (query type) + - ip (ip addr) + - user (username) + - domain (domain name) + - fields: + - reset_timestamp (string) + - last_update (string) + - num_logins (integer) + - num_cmds (integer) + - num_connected_sessions (integer) + - user_cpu (float) + - sys_cpu (float) + - clock_time (float) + - min_faults (integer) + - maj_faults (integer) + - vol_cs (integer) + - invol_cs (integer) + - disk_input (integer) + - disk_output (integer) + - read_count (integer) + - read_bytes (integer) + - write_count (integer) + - write_bytes (integer) + - mail_lookup_path (integer) + - mail_lookup_attr (integer) + - mail_read_count (integer) + - mail_read_bytes (integer) + - mail_cache_hits (integer) ### Example Output: ``` -telegraf --config t.cfg --input-filter dovecot --test -* Plugin: dovecot, Collection 1 -> dovecot,ip=192.168.0.1,server=dovecot-1.domain.test,type=ip clock_time=0,disk_input=0i,disk_output=0i,invol_cs=0i,last_update="2016-04-08 10:59:47.000208479 +0200 CEST",mail_cache_hits=0i,mail_lookup_attr=0i,mail_lookup_path=0i,mail_read_bytes=0i,mail_read_count=0i,maj_faults=0i,min_faults=0i,num_cmds=12i,num_connected_sessions=0i,num_logins=6i,read_bytes=0i,read_count=0i,reset_timestamp="2016-04-08 10:33:34 +0200 CEST",sys_cpu=0,user_cpu=0,vol_cs=0i,write_bytes=0i,write_count=0i 1460106251633824223 -* Plugin: dovecot, Collection 1 -> dovecot,server=dovecot-1.domain.test,type=user,user=user-1@domain.test clock_time=0.00006,disk_input=405504i,disk_output=77824i,invol_cs=67i,last_update="2016-04-08 11:02:55.000111634 +0200 CEST",mail_cache_hits=26i,mail_lookup_attr=0i,mail_lookup_path=6i,mail_read_bytes=86233i,mail_read_count=5i,maj_faults=0i,min_faults=975i,num_cmds=41i,num_logins=3i,read_bytes=368833i,read_count=394i,reset_timestamp="2016-04-08 11:01:32 +0200 CEST",sys_cpu=0.008,user_cpu=0.004,vol_cs=323i,write_bytes=105086i,write_count=176i 1460106256637049167 -* Plugin: dovecot, Collection 1 -> dovecot,domain=domain.test,server=dovecot-1.domain.test,type=domain clock_time=100896189179847.7,disk_input=6467588263936i,disk_output=17933680439296i,invol_cs=1194808498i,last_update="2016-04-08 11:04:08.000377367 +0200 CEST",mail_cache_hits=46455781i,mail_lookup_attr=0i,mail_lookup_path=571490i,mail_read_bytes=79287033067i,mail_read_count=491243i,maj_faults=16992i,min_faults=1278442541i,num_cmds=606005i,num_connected_sessions=6597i,num_logins=166381i,read_bytes=30231409780721i,read_count=1624912080i,reset_timestamp="2016-04-08 10:28:45 +0200 CEST",sys_cpu=156440.372,user_cpu=216676.476,vol_cs=2749291157i,write_bytes=17097106707594i,write_count=944448998i 1460106261639672622 -* Plugin: dovecot, Collection 1 -> dovecot,server=dovecot-1.domain.test,type=global clock_time=101196971074203.94,disk_input=6493168218112i,disk_output=17978638815232i,invol_cs=1198855447i,last_update="2016-04-08 11:04:13.000379245 +0200 CEST",mail_cache_hits=68192209i,mail_lookup_attr=0i,mail_lookup_path=653861i,mail_read_bytes=86705151847i,mail_read_count=566125i,maj_faults=17208i,min_faults=1286179702i,num_cmds=917469i,num_connected_sessions=8896i,num_logins=174827i,read_bytes=30327690466186i,read_count=1772396430i,reset_timestamp="2016-04-08 10:28:45 +0200 CEST",sys_cpu=157965.692,user_cpu=219337.48,vol_cs=2827615787i,write_bytes=17150837661940i,write_count=992653220i 1460106266642153907 +dovecot,server=dovecot-1.domain.test,type=global clock_time=101196971074203.94,disk_input=6493168218112i,disk_output=17978638815232i,invol_cs=1198855447i,last_update="2016-04-08 11:04:13.000379245 +0200 CEST",mail_cache_hits=68192209i,mail_lookup_attr=0i,mail_lookup_path=653861i,mail_read_bytes=86705151847i,mail_read_count=566125i,maj_faults=17208i,min_faults=1286179702i,num_cmds=917469i,num_connected_sessions=8896i,num_logins=174827i,read_bytes=30327690466186i,read_count=1772396430i,reset_timestamp="2016-04-08 10:28:45 +0200 CEST",sys_cpu=157965.692,user_cpu=219337.48,vol_cs=2827615787i,write_bytes=17150837661940i,write_count=992653220i 1460106266642153907 ``` + +[stats old]: http://wiki2.dovecot.org/Statistics/Old +[upgrading]: https://wiki2.dovecot.org/Upgrading/2.3#Statistics_Redesign From 41c8fd7e937890c1c3cc9251a4d93bf4be18d537 Mon Sep 17 00:00:00 2001 From: Ivan Vandot Date: Tue, 13 Nov 2018 01:05:28 +0100 Subject: [PATCH 0364/1815] Clarify UDP influxdb endpoint (#4974) --- plugins/outputs/influxdb/README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/plugins/outputs/influxdb/README.md b/plugins/outputs/influxdb/README.md index 5d223ca3d..8a9f1a5b8 100644 --- a/plugins/outputs/influxdb/README.md +++ b/plugins/outputs/influxdb/README.md @@ -16,6 +16,7 @@ The InfluxDB output plugin writes metrics to the [InfluxDB v1.x] HTTP or UDP ser # urls = ["http://127.0.0.1:8086"] ## The target database for metrics; will be created as needed. + ## For UDP url endpoint database needs to be configured on server side. # database = "telegraf" ## If true, no CREATE DATABASE queries will be sent. Set to true when using From 56f2c435e701c63172f118c10555bdc98bfe39b2 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 12 Nov 2018 16:06:23 -0800 Subject: [PATCH 0365/1815] Add udp url note to influxdb output sample config --- plugins/outputs/influxdb/influxdb.go | 1 + 1 file changed, 1 insertion(+) diff --git a/plugins/outputs/influxdb/influxdb.go b/plugins/outputs/influxdb/influxdb.go index 1f61b801f..a3f2fd003 100644 --- a/plugins/outputs/influxdb/influxdb.go +++ b/plugins/outputs/influxdb/influxdb.go @@ -69,6 +69,7 @@ var sampleConfig = ` # urls = ["http://127.0.0.1:8086"] ## The target database for metrics; will be created as needed. + ## For UDP url endpoint database needs to be configured on server side. # database = "telegraf" ## If true, no CREATE DATABASE queries will be sent. Set to true when using From edeb21c3ccdb19eb7a97f724ea309b4916cc8171 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 13 Nov 2018 13:53:57 -0800 Subject: [PATCH 0366/1815] Update Gopkg.lock with current revisions --- Gopkg.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Gopkg.lock b/Gopkg.lock index 7dd2261c0..2dd0dd26c 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -1172,7 +1172,7 @@ [[projects]] branch = "master" - digest = "0:" + digest = "1:b697592485cb412be4188c08ca0beed9aab87f36b86418e21acc4a3998f63734" name = "golang.org/x/oauth2" packages = [ ".", @@ -1253,7 +1253,7 @@ revision = "19ff8768a5c0b8e46ea281065664787eefc24121" [[projects]] - digest = "0:" + digest = "1:c1771ca6060335f9768dff6558108bc5ef6c58506821ad43377ee23ff059e472" name = "google.golang.org/appengine" packages = [ ".", From b6fd7c5aabbbf1de41fefbbbf551195b8bd44757 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 14 Nov 2018 11:39:48 -0800 Subject: [PATCH 0367/1815] Add troubleshooting section to disk input README --- plugins/inputs/EXAMPLE_README.md | 23 ++++++++++++++++------- plugins/inputs/disk/README.md | 14 ++++++++++++-- 2 files changed, 28 insertions(+), 9 deletions(-) diff --git a/plugins/inputs/EXAMPLE_README.md b/plugins/inputs/EXAMPLE_README.md index 4ebedda87..b60d48c91 100644 --- a/plugins/inputs/EXAMPLE_README.md +++ b/plugins/inputs/EXAMPLE_README.md @@ -4,18 +4,22 @@ The example plugin gathers metrics about example things. This description explains at a high level what the plugin does and provides links to where additional information can be found. -### Configuration: +### Configuration This section contains the default TOML to configure the plugin. You can generate it using `telegraf --usage `. ```toml -# Description [[inputs.example]] example_option = "example_value" ``` -### Metrics: +#### example_option + +A more in depth description of an option can be provided here, but only do so +if the option cannot be fully described in the sample config. + +### Metrics Here you should add an optional description and links to where the user can get more information about the measurements. @@ -38,10 +42,10 @@ mapped to the output. - fields: - field3 (integer, bytes) -### Sample Queries: +### Sample Queries -This section should contain some useful InfluxDB queries that can be used to -get started with the plugin or to generate dashboards. For each query listed, +This section can contain some useful InfluxDB queries that can be used to get +started with the plugin or to generate dashboards. For each query listed, describe at a high level what data is returned. Get the max, mean, and min for the measurement in the last hour: @@ -49,7 +53,12 @@ Get the max, mean, and min for the measurement in the last hour: SELECT max(field1), mean(field1), min(field1) FROM measurement1 WHERE tag1=bar AND time > now() - 1h GROUP BY tag ``` -### Example Output: +### Troubleshooting + +This optional section can provide basic troubleshooting steps that a user can +perform. + +### Example Output This section shows example output in Line Protocol format. You can often use `telegraf --input-filter --test` or use the `file` output to get diff --git a/plugins/inputs/disk/README.md b/plugins/inputs/disk/README.md index fa84264be..5359cca62 100644 --- a/plugins/inputs/disk/README.md +++ b/plugins/inputs/disk/README.md @@ -9,7 +9,6 @@ https://en.wikipedia.org/wiki/Df_(Unix) for more details. ### Configuration: ```toml -# Read metrics about disk usage by mount point [[inputs.disk]] ## By default stats will be gathered for all mount points. ## Set mount_points will restrict the stats to only the specified mount points. @@ -49,6 +48,17 @@ docker run -v /:/hostfs:ro -e HOST_MOUNT_PREFIX=/hostfs -e HOST_PROC=/hostfs/pro - inodes_total (integer, files) - inodes_used (integer, files) +### Troubleshooting + +On Linux, the list of disks is taken from the `/proc/self/mounts` file and a +[statfs] call is made on the second column. If any expected filesystems are +missing ensure that the `telegraf` user can read these files: +``` +$ sudo -u telegraf cat /proc/self/mounts | grep sda2 +/dev/sda2 /home ext4 rw,relatime,data=ordered 0 0 +$ sudo -u telegraf stat /home +``` + ### Example Output: ``` @@ -58,4 +68,4 @@ disk,fstype=autofs,mode=rw,path=/net free=0i,inodes_free=0i,inodes_total=0i,inod disk,fstype=autofs,mode=rw,path=/home free=0i,inodes_free=0i,inodes_total=0i,inodes_used=0i,total=0i,used=0i,used_percent=0 1453832006274169688 ``` - +[statfs]: http://man7.org/linux/man-pages/man2/statfs.2.html From d886055f67e04b3a7acf664f381272807d9b8e0f Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 14 Nov 2018 12:06:23 -0800 Subject: [PATCH 0368/1815] Update telegraf_windows.conf --- etc/telegraf_windows.conf | 169 ++++++++++++++++++++++++++++---------- 1 file changed, 125 insertions(+), 44 deletions(-) diff --git a/etc/telegraf_windows.conf b/etc/telegraf_windows.conf index 220944d42..f0bfbdba0 100644 --- a/etc/telegraf_windows.conf +++ b/etc/telegraf_windows.conf @@ -1,18 +1,26 @@ -# Telegraf configuration - +# Telegraf Configuration +# # Telegraf is entirely plugin driven. All metrics are gathered from the # declared inputs, and sent to the declared outputs. - +# # Plugins must be declared in here to be active. # To deactivate a plugin, comment out the name and any variables. - +# # Use 'telegraf -config telegraf.conf -test' to see what metrics a config # file would generate. +# +# Environment variables can be used anywhere in this config file, simply prepend +# them with $. For strings the variable must be within quotes (ie, "$STR_VAR"), +# for numbers and booleans they should be plain (ie, $INT_VAR, $BOOL_VAR) + # Global tags can be specified here in key="value" format. [global_tags] # dc = "us-east-1" # will tag all metrics with dc=us-east-1 # rack = "1a" + ## Environment variables can be used as tags, and throughout the config file + # user = "$USER" + # Configuration for telegraf agent [agent] @@ -22,11 +30,16 @@ ## ie, if interval="10s" then always collect on :00, :10, :20, etc. round_interval = true - ## Telegraf will cache metric_buffer_limit metrics for each output, and will - ## flush this buffer on a successful write. - metric_buffer_limit = 1000 - ## Flush the buffer whenever full, regardless of flush_interval. - flush_buffer_when_full = true + ## Telegraf will send metrics to outputs in batches of at most + ## metric_batch_size metrics. + ## This controls the size of writes that Telegraf sends to output plugins. + metric_batch_size = 1000 + + ## For failed writes, telegraf will cache metric_buffer_limit metrics for each + ## output, and will flush this buffer on a successful write. Oldest metrics + ## are dropped first when this buffer fills. + ## This buffer only fills when writes fail to output plugin(s). + metric_buffer_limit = 10000 ## Collection jitter is used to jitter the collection by a random amount. ## Each plugin will sleep for a random time within jitter before collecting. @@ -34,52 +47,103 @@ ## same time, which can have a measurable effect on the system. collection_jitter = "0s" - ## Default flushing interval for all outputs. You shouldn't set this below - ## interval. Maximum flush_interval will be flush_interval + flush_jitter + ## Default flushing interval for all outputs. Maximum flush_interval will be + ## flush_interval + flush_jitter flush_interval = "10s" ## Jitter the flush interval by a random amount. This is primarily to avoid ## large write spikes for users running a large number of telegraf instances. ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s flush_jitter = "0s" + ## By default or when set to "0s", precision will be set to the same + ## timestamp order as the collection interval, with the maximum being 1s. + ## ie, when interval = "10s", precision will be "1s" + ## when interval = "250ms", precision will be "1ms" + ## Precision will NOT be used for service inputs. It is up to each individual + ## service input to set the timestamp at the appropriate precision. + ## Valid time units are "ns", "us" (or "µs"), "ms", "s". + precision = "" + ## Logging configuration: - ## Run telegraf in debug mode + ## Run telegraf with debug log messages. debug = false - ## Run telegraf in quiet mode + ## Run telegraf in quiet mode (error log messages only). quiet = false - ## Specify the log file name. The empty string means to log to stdout. + ## Specify the log file name. The empty string means to log to stderr. logfile = "/Program Files/Telegraf/telegraf.log" ## Override default hostname, if empty use os.Hostname() hostname = "" + ## If set to true, do no set the "host" tag in the telegraf agent. + omit_hostname = false ############################################################################### # OUTPUTS # ############################################################################### -# Configuration for influxdb server to send metrics to +# Configuration for sending metrics to InfluxDB [[outputs.influxdb]] - # The full HTTP or UDP endpoint URL for your InfluxDB instance. - # Multiple urls can be specified but it is assumed that they are part of the same - # cluster, this means that only ONE of the urls will be written to each interval. - # urls = ["udp://127.0.0.1:8089"] # UDP endpoint example - urls = ["http://127.0.0.1:8086"] # required - # The target database for metrics (telegraf will create it if not exists) - database = "telegraf" # required - # Precision of writes, valid values are "ns", "us" (or "µs"), "ms", "s", "m", "h". - # note: using second precision greatly helps InfluxDB compression - precision = "s" + ## The full HTTP or UDP URL for your InfluxDB instance. + ## + ## Multiple URLs can be specified for a single cluster, only ONE of the + ## urls will be written to each interval. + # urls = ["unix:///var/run/influxdb.sock"] + # urls = ["udp://127.0.0.1:8089"] + # urls = ["http://127.0.0.1:8086"] - ## Write timeout (for the InfluxDB client), formatted as a string. - ## If not provided, will default to 5s. 0s means no timeout (not recommended). - timeout = "5s" + ## The target database for metrics; will be created as needed. + # database = "telegraf" + + ## If true, no CREATE DATABASE queries will be sent. Set to true when using + ## Telegraf with a user without permissions to create databases or when the + ## database already exists. + # skip_database_creation = false + + ## Name of existing retention policy to write to. Empty string writes to + ## the default retention policy. Only takes effect when using HTTP. + # retention_policy = "" + + ## Write consistency (clusters only), can be: "any", "one", "quorum", "all". + ## Only takes effect when using HTTP. + # write_consistency = "any" + + ## Timeout for HTTP messages. + # timeout = "5s" + + ## HTTP Basic Auth # username = "telegraf" # password = "metricsmetricsmetricsmetrics" - # Set the user agent for HTTP POSTs (can be useful for log differentiation) + + ## HTTP User-Agent # user_agent = "telegraf" - # Set UDP payload size, defaults to InfluxDB UDP Client default (512 bytes) - # udp_payload = 512 + + ## UDP payload size is the maximum packet size to send. + # udp_payload = "512B" + + ## Optional TLS Config for use on HTTP connections. + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false + + ## HTTP Proxy override, if unset values the standard proxy environment + ## variables are consulted to determine which proxy, if any, should be used. + # http_proxy = "http://corporate.proxy:3128" + + ## Additional HTTP headers + # http_headers = {"X-Special-Header" = "Special-Value"} + + ## HTTP Content-Encoding for write request body, can be set to "gzip" to + ## compress body or "identity" to apply no encoding. + # content_encoding = "identity" + + ## When true, Telegraf will output unsigned integers as unsigned values, + ## i.e.: "42u". You will need a version of InfluxDB supporting unsigned + ## integer values. Enabling this option will result in field type errors if + ## existing data has been written. + # influx_uint_support = false ############################################################################### @@ -206,29 +270,30 @@ Measurement = "win_swap" - # Windows system plugins using WMI (disabled by default, using # win_perf_counters over WMI is recommended) + # # Read metrics about cpu usage # [[inputs.cpu]] # ## Whether to report per-cpu stats or not # percpu = true # ## Whether to report total system cpu stats or not # totalcpu = true -# ## Comment this line if you want the raw CPU time metrics -# fielddrop = ["time_*"] +# ## If true, collect raw CPU time metrics. +# collect_cpu_time = false +# ## If true, compute and report the sum of all non-idle CPU states. +# report_active = false # # Read metrics about disk usage by mount point # [[inputs.disk]] -# ## By default, telegraf gather stats for all mountpoints. -# ## Setting mountpoints will restrict the stats to the specified mountpoints. -# ## mount_points=["/"] +# ## By default stats will be gathered for all mount points. +# ## Set mount_points will restrict the stats to only the specified mount points. +# # mount_points = ["/"] # -# ## Ignore some mountpoints by filesystem type. For example (dev)tmpfs (usually -# ## present on /run, /var/run, /dev/shm or /dev). -# # ignore_fs = ["tmpfs", "devtmpfs", "devfs", "overlay", "aufs", "squashfs"] +# ## Ignore mount points by filesystem type. +# ignore_fs = ["tmpfs", "devtmpfs", "devfs", "overlay", "aufs", "squashfs"] # # Read metrics about disk IO by device @@ -236,9 +301,26 @@ # ## By default, telegraf will gather stats for all devices including # ## disk partitions. # ## Setting devices will restrict the stats to the specified devices. -# ## devices = ["sda", "sdb"] -# ## Uncomment the following line if you do not need disk serial numbers. -# ## skip_serial_number = true +# # devices = ["sda", "sdb", "vd*"] +# ## Uncomment the following line if you need disk serial numbers. +# # skip_serial_number = false +# # +# ## On systems which support it, device metadata can be added in the form of +# ## tags. +# ## Currently only Linux is supported via udev properties. You can view +# ## available properties for a device by running: +# ## 'udevadm info -q property -n /dev/sda' +# # device_tags = ["ID_FS_TYPE", "ID_FS_USAGE"] +# # +# ## Using the same metadata source as device_tags, you can also customize the +# ## name of the device via templates. +# ## The 'name_templates' parameter is a list of templates to try and apply to +# ## the device. The template may contain variables in the form of '$PROPERTY' or +# ## '${PROPERTY}'. The first template which does not contain any variables not +# ## present for the device is used as the device name tag. +# ## The typical use case is for LVM volumes, to get the VG/LV name instead of +# ## the near-meaningless DM-0 name. +# # name_templates = ["$ID_FS_LABEL","$DM_VG_NAME/$DM_LV_NAME"] # # Read metrics about memory usage @@ -249,4 +331,3 @@ # # Read metrics about swap memory usage # [[inputs.swap]] # # no configuration - From 274af39a5e22781e1e5fb1c73814985d0b7e7c6b Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 15 Nov 2018 15:43:47 -0800 Subject: [PATCH 0369/1815] Update kubernetes input docs (#4990) --- plugins/inputs/kubernetes/README.md | 360 +++++++++------------------- 1 file changed, 115 insertions(+), 245 deletions(-) diff --git a/plugins/inputs/kubernetes/README.md b/plugins/inputs/kubernetes/README.md index 099cf1526..37d713d18 100644 --- a/plugins/inputs/kubernetes/README.md +++ b/plugins/inputs/kubernetes/README.md @@ -1,7 +1,5 @@ # Kubernetes Input Plugin -**This plugin is experimental and may cause high cardinality issues with moderate to large Kubernetes deployments** - This input plugin talks to the kubelet api using the `/stats/summary` endpoint to gather metrics about the running pods and containers for a single host. It is assumed that this plugin is running as part of a `daemonset` within a kubernetes installation. This means that telegraf is running on every node within the cluster. Therefore, you should configure this plugin to talk to its locally running kubelet. To find the ip address of the host you are running on you can issue a command like the following: @@ -10,256 +8,128 @@ $ curl -s $API_URL/api/v1/namespaces/$POD_NAMESPACE/pods/$HOSTNAME --header "Aut ``` In this case we used the downward API to pass in the `$POD_NAMESPACE` and `$HOSTNAME` is the hostname of the pod which is set by the kubernetes API. -## Summary Data +#### Series Cardinality Warning -```json -{ - "node": { - "nodeName": "node1", - "systemContainers": [ - { - "name": "kubelet", - "startTime": "2016-08-25T18:46:52Z", - "cpu": { - "time": "2016-09-27T16:57:31Z", - "usageNanoCores": 56652446, - "usageCoreNanoSeconds": 101437561712262 - }, - "memory": { - "time": "2016-09-27T16:57:31Z", - "usageBytes": 62529536, - "workingSetBytes": 62349312, - "rssBytes": 47509504, - "pageFaults": 4769397409, - "majorPageFaults": 13 - }, - "rootfs": { - "availableBytes": 84379979776, - "capacityBytes": 105553100800 - }, - "logs": { - "availableBytes": 84379979776, - "capacityBytes": 105553100800 - }, - "userDefinedMetrics": null - }, - { - "name": "bar", - "startTime": "2016-08-25T18:46:52Z", - "cpu": { - "time": "2016-09-27T16:57:31Z", - "usageNanoCores": 56652446, - "usageCoreNanoSeconds": 101437561712262 - }, - "memory": { - "time": "2016-09-27T16:57:31Z", - "usageBytes": 62529536, - "workingSetBytes": 62349312, - "rssBytes": 47509504, - "pageFaults": 4769397409, - "majorPageFaults": 13 - }, - "rootfs": { - "availableBytes": 84379979776, - "capacityBytes": 105553100800 - }, - "logs": { - "availableBytes": 84379979776, - "capacityBytes": 105553100800 - }, - "userDefinedMetrics": null - } - ], - "startTime": "2016-08-25T18:46:52Z", - "cpu": { - "time": "2016-09-27T16:57:41Z", - "usageNanoCores": 576996212, - "usageCoreNanoSeconds": 774129887054161 - }, - "memory": { - "time": "2016-09-27T16:57:41Z", - "availableBytes": 10726387712, - "usageBytes": 12313182208, - "workingSetBytes": 5081538560, - "rssBytes": 35586048, - "pageFaults": 351742, - "majorPageFaults": 1236 - }, - "network": { - "time": "2016-09-27T16:57:41Z", - "rxBytes": 213281337459, - "rxErrors": 0, - "txBytes": 292869995684, - "txErrors": 0 - }, - "fs": { - "availableBytes": 84379979776, - "capacityBytes": 105553100800, - "usedBytes": 16754286592 - }, - "runtime": { - "imageFs": { - "availableBytes": 84379979776, - "capacityBytes": 105553100800, - "usedBytes": 5809371475 - } - } - }, - "pods": [ - { - "podRef": { - "name": "foopod", - "namespace": "foons", - "uid": "6d305b06-8419-11e6-825c-42010af000ae" - }, - "startTime": "2016-09-26T18:45:42Z", - "containers": [ - { - "name": "foocontainer", - "startTime": "2016-09-26T18:46:43Z", - "cpu": { - "time": "2016-09-27T16:57:32Z", - "usageNanoCores": 846503, - "usageCoreNanoSeconds": 56507553554 - }, - "memory": { - "time": "2016-09-27T16:57:32Z", - "usageBytes": 30789632, - "workingSetBytes": 30789632, - "rssBytes": 30695424, - "pageFaults": 10761, - "majorPageFaults": 0 - }, - "rootfs": { - "availableBytes": 84379979776, - "capacityBytes": 105553100800, - "usedBytes": 57344 - }, - "logs": { - "availableBytes": 84379979776, - "capacityBytes": 105553100800, - "usedBytes": 24576 - }, - "userDefinedMetrics": null - } - ], - "network": { - "time": "2016-09-27T16:57:34Z", - "rxBytes": 70749124, - "rxErrors": 0, - "txBytes": 47813506, - "txErrors": 0 - }, - "volume": [ - { - "availableBytes": 7903948800, - "capacityBytes": 7903961088, - "usedBytes": 12288, - "name": "volume1" - }, - { - "availableBytes": 7903956992, - "capacityBytes": 7903961088, - "usedBytes": 4096, - "name": "volume2" - }, - { - "availableBytes": 7903948800, - "capacityBytes": 7903961088, - "usedBytes": 12288, - "name": "volume3" - }, - { - "availableBytes": 7903952896, - "capacityBytes": 7903961088, - "usedBytes": 8192, - "name": "volume4" - } - ] - } - ] - } - ``` +This plugin may produce a high number of series which, when not controlled +for, will cause high load on your database. Use the following techniques to +avoid cardinality issues: - ### Daemonset YAML +- Use [metric filtering][] options to exclude unneeded measurements and tags. +- Write to a database with an appropriate [retention policy][]. +- Limit series cardinality in your database using the + [max-series-per-database][] and [max-values-per-tag][] settings. +- Consider using the [Time Series Index][tsi]. +- Monitor your databases [series cardinality][]. +- Consult the [InfluxDB documentation][influx-docs] for the most up-to-date techniques. -```yaml -apiVersion: extensions/v1beta1 -kind: DaemonSet -metadata: - name: telegraf - namespace: telegraf -spec: - template: - metadata: - labels: - app: telegraf - spec: - serviceAccount: telegraf - containers: - - name: telegraf - image: quay.io/org/image:latest - imagePullPolicy: IfNotPresent - env: - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: "HOST_PROC" - value: "/rootfs/proc" - - name: "HOST_SYS" - value: "/rootfs/sys" - volumeMounts: - - name: sysro - mountPath: /rootfs/sys - readOnly: true - - name: procro - mountPath: /rootfs/proc - readOnly: true - - name: varrunutmpro - mountPath: /var/run/utmp - readOnly: true - - name: logger-redis-creds - mountPath: /var/run/secrets/deis/redis/creds - volumes: - - name: sysro - hostPath: - path: /sys - - name: procro - hostPath: - path: /proc - - name: varrunutmpro - hostPath: - path: /var/run/utmp +### Configuration + +```toml +[[inputs.kubernetes]] + ## URL for the kubelet + url = "http://127.0.0.1:10255" + + ## Use bearer token for authorization + # bearer_token = /path/to/bearer/token + + ## Set response_timeout (default 5 seconds) + # response_timeout = "5s" + + ## Optional TLS Config + # tls_ca = /path/to/cafile + # tls_cert = /path/to/certfile + # tls_key = /path/to/keyfile + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false ``` -### Line Protocol +### DaemonSet -#### kubernetes_pod_container -``` -kubernetes_pod_container,host=ip-10-0-0-0.ec2.internal, -container_name=deis-controller,namespace=deis, -node_name=ip-10-0-0-0.ec2.internal, pod_name=deis-controller-3058870187-xazsr, cpu_usage_core_nanoseconds=2432835i,cpu_usage_nanocores=0i, -logsfs_avaialble_bytes=121128271872i,logsfs_capacity_bytes=153567944704i, -logsfs_used_bytes=20787200i,memory_major_page_faults=0i, -memory_page_faults=175i,memory_rss_bytes=0i, -memory_usage_bytes=0i,memory_working_set_bytes=0i, -rootfs_available_bytes=121128271872i,rootfs_capacity_bytes=153567944704i, -rootfs_used_bytes=1110016i 1476477530000000000 - ``` +For recommendations on running Telegraf as a DaemonSet see [Monitoring Kubernetes +Architecture][k8s-telegraf] or view the [Helm charts][tick-charts]. + +### Metrics + +- kubernetes_node + - tags: + - node_name + - fields: + - cpu_usage_nanocores + - cpu_usage_core_nanoseconds + - memory_available_bytes + - memory_usage_bytes + - memory_working_set_bytes + - memory_rss_bytes + - memory_page_faults + - memory_major_page_faults + - network_rx_bytes + - network_rx_errors + - network_tx_bytes + - network_tx_errors + - fs_available_bytes + - fs_capacity_bytes + - fs_used_bytes + - runtime_image_fs_available_bytes + - runtime_image_fs_capacity_bytes + - runtime_image_fs_used_bytes + +- kubernetes_pod_container + - tags: + - container_name + - namespace + - node_name + - pod_name + - fields: + - cpu_usage_nanocores + - cpu_usage_core_nanoseconds + - memory_usage_bytes + - memory_working_set_bytes + - memory_rss_bytes + - memory_page_faults + - memory_major_page_faults + - rootfs_available_bytes + - rootfs_capacity_bytes + - rootfs_used_bytes + - logsfs_avaialble_bytes + - logsfs_capacity_bytes + - logsfs_used_bytes + +- kubernetes_pod_volume + - tags: + - volume_name + - namespace + - node_name + - pod_name + - fields: + - available_bytes + - capacity_bytes + - used_bytes + +- kubernetes_pod_network + - tags: + - namespace + - node_name + - pod_name + - fields: + - rx_bytes + - rx_errors + - tx_bytes + - tx_errors + +### Example Output -#### kubernetes_pod_volume ``` -kubernetes_pod_volume,host=ip-10-0-0-0.ec2.internal,name=default-token-f7wts, -namespace=kube-system,node_name=ip-10-0-0-0.ec2.internal, -pod_name=kubernetes-dashboard-v1.1.1-t4x4t, available_bytes=8415240192i, -capacity_bytes=8415252480i,used_bytes=12288i 1476477530000000000 +kubernetes_pod_container,host=ip-10-0-0-0.ec2.internal,container_name=deis-controller,namespace=deis,node_name=ip-10-0-0-0.ec2.internal,pod_name=deis-controller-3058870187-xazsr cpu_usage_core_nanoseconds=2432835i,cpu_usage_nanocores=0i,logsfs_avaialble_bytes=121128271872i,logsfs_capacity_bytes=153567944704i,logsfs_used_bytes=20787200i,memory_major_page_faults=0i,memory_page_faults=175i,memory_rss_bytes=0i,memory_usage_bytes=0i,memory_working_set_bytes=0i,rootfs_available_bytes=121128271872i,rootfs_capacity_bytes=153567944704i,rootfs_used_bytes=1110016i 1476477530000000000 +kubernetes_pod_volume,host=ip-10-0-0-0.ec2.internal,name=default-token-f7wts,namespace=kube-system,node_name=ip-10-0-0-0.ec2.internal,pod_name=kubernetes-dashboard-v1.1.1-t4x4t available_bytes=8415240192i,capacity_bytes=8415252480i,used_bytes=12288i 1476477530000000000 +kubernetes_pod_network,host=ip-10-0-0-0.ec2.internal,namespace=deis,node_name=ip-10-0-0-0.ec2.internal,pod_name=deis-controller-3058870187-xazsr rx_bytes=120671099i,rx_errors=0i,tx_bytes=102451983i,tx_errors=0i 1476477530000000000 ``` -#### kubernetes_pod_network -``` -kubernetes_pod_network,host=ip-10-0-0-0.ec2.internal,namespace=deis, -node_name=ip-10-0-0-0.ec2.internal,pod_name=deis-controller-3058870187-xazsr, -rx_bytes=120671099i,rx_errors=0i, -tx_bytes=102451983i,tx_errors=0i 1476477530000000000 -``` +[metric filtering]: https://github.com/influxdata/telegraf/blob/master/docs/CONFIGURATION.md#metric-filtering +[retention policy]: https://docs.influxdata.com/influxdb/latest/guides/downsampling_and_retention/ +[max-series-per-database]: https://docs.influxdata.com/influxdb/latest/administration/config/#max-series-per-database-1000000 +[max-values-per-tag]: https://docs.influxdata.com/influxdb/latest/administration/config/#max-values-per-tag-100000 +[tsi]: https://docs.influxdata.com/influxdb/latest/concepts/time-series-index/ +[series cardinality]: https://docs.influxdata.com/influxdb/latest/query_language/spec/#show-cardinality +[influx-docs]: https://docs.influxdata.com/influxdb/latest/ +[k8s-telegraf]: https://www.influxdata.com/blog/monitoring-kubernetes-architecture/ +[tick-charts]: https://github.com/influxdata/tick-charts From 91ecec71eab17af82d92c7266c9866279aabb9d3 Mon Sep 17 00:00:00 2001 From: Greg <2653109+glinton@users.noreply.github.com> Date: Thu, 15 Nov 2018 16:44:36 -0700 Subject: [PATCH 0370/1815] Only print final collection when runing --test (#4991) --- agent/agent.go | 63 ++++++++++++++++++++++++++-------------- cmd/telegraf/telegraf.go | 5 ++-- 2 files changed, 45 insertions(+), 23 deletions(-) diff --git a/agent/agent.go b/agent/agent.go index d8875e447..40ec24456 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -138,11 +138,13 @@ func (a *Agent) Run(ctx context.Context) error { } // Test runs the inputs once and prints the output to stdout in line protocol. -func (a *Agent) Test() error { +func (a *Agent) Test(ctx context.Context) error { var wg sync.WaitGroup metricC := make(chan telegraf.Metric) + nulC := make(chan telegraf.Metric) defer func() { close(metricC) + close(nulC) wg.Wait() }() @@ -156,36 +158,55 @@ func (a *Agent) Test() error { octets, err := s.Serialize(metric) if err == nil { fmt.Print("> ", string(octets)) + } } }() + wg.Add(1) + go func() { + defer wg.Done() + for range nulC { + } + }() + for _, input := range a.Config.Inputs { - if _, ok := input.Input.(telegraf.ServiceInput); ok { - log.Printf("W!: [agent] skipping plugin [[%s]]: service inputs not supported in --test mode", - input.Name()) - continue - } + select { + case <-ctx.Done(): + return nil + default: + if _, ok := input.Input.(telegraf.ServiceInput); ok { + log.Printf("W!: [agent] skipping plugin [[%s]]: service inputs not supported in --test mode", + input.Name()) + continue + } - acc := NewAccumulator(input, metricC) - acc.SetPrecision(a.Config.Agent.Precision.Duration, - a.Config.Agent.Interval.Duration) - input.SetDefaultTags(a.Config.Tags) + acc := NewAccumulator(input, metricC) + acc.SetPrecision(a.Config.Agent.Precision.Duration, + a.Config.Agent.Interval.Duration) + input.SetDefaultTags(a.Config.Tags) - if err := input.Input.Gather(acc); err != nil { - return err - } + // Special instructions for some inputs. cpu, for example, needs to be + // run twice in order to return cpu usage percentages. + switch input.Name() { + case "inputs.cpu", "inputs.mongodb", "inputs.procstat": + nulAcc := NewAccumulator(input, nulC) + nulAcc.SetPrecision(a.Config.Agent.Precision.Duration, + a.Config.Agent.Interval.Duration) + if err := input.Input.Gather(nulAcc); err != nil { + return err + } - // Special instructions for some inputs. cpu, for example, needs to be - // run twice in order to return cpu usage percentages. - switch input.Name() { - case "inputs.cpu", "inputs.mongodb", "inputs.procstat": - time.Sleep(500 * time.Millisecond) - if err := input.Input.Gather(acc); err != nil { - return err + time.Sleep(500 * time.Millisecond) + if err := input.Input.Gather(acc); err != nil { + return err + } + default: + if err := input.Input.Gather(acc); err != nil { + return err + } } } - } return nil diff --git a/cmd/telegraf/telegraf.go b/cmd/telegraf/telegraf.go index 0ad6fe717..8260c6504 100644 --- a/cmd/telegraf/telegraf.go +++ b/cmd/telegraf/telegraf.go @@ -83,7 +83,8 @@ func reloadLoop( ctx, cancel := context.WithCancel(context.Background()) signals := make(chan os.Signal) - signal.Notify(signals, os.Interrupt, syscall.SIGHUP, syscall.SIGTERM) + signal.Notify(signals, os.Interrupt, syscall.SIGHUP, + syscall.SIGTERM, syscall.SIGINT) go func() { select { case sig := <-signals: @@ -154,7 +155,7 @@ func runAgent(ctx context.Context, ) if *fTest { - return ag.Test() + return ag.Test(ctx) } log.Printf("I! Starting Telegraf %s\n", version) From 46b340c899c03768cc0dcb12653103fd26d6b766 Mon Sep 17 00:00:00 2001 From: Greg <2653109+glinton@users.noreply.github.com> Date: Thu, 15 Nov 2018 16:45:18 -0700 Subject: [PATCH 0371/1815] Handle non-tls columns for mysql input (#4973) --- plugins/inputs/mysql/mysql.go | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/plugins/inputs/mysql/mysql.go b/plugins/inputs/mysql/mysql.go index c17de3dcd..3e9417cb0 100644 --- a/plugins/inputs/mysql/mysql.go +++ b/plugins/inputs/mysql/mysql.go @@ -995,6 +995,30 @@ func getColSlice(l int) ([]interface{}, error) { &total_ssl_connections, &max_statement_time_exceeded, }, nil + case 21: // mysql 5.5 + return []interface{}{ + &user, + &total_connections, + &concurrent_connections, + &connected_time, + &busy_time, + &cpu_time, + &bytes_received, + &bytes_sent, + &binlog_bytes_written, + &rows_fetched, + &rows_updated, + &table_rows_read, + &select_commands, + &update_commands, + &other_commands, + &commit_transactions, + &rollback_transactions, + &denied_connections, + &lost_connections, + &access_denied, + &empty_queries, + }, nil case 22: // percona return []interface{}{ &user, From 140387d2c3cf9b6de48ac5638f5571518d9c63a1 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 15 Nov 2018 15:45:56 -0800 Subject: [PATCH 0372/1815] Log warning when wireless plugin is used on unsupported platform (#4986) --- cmd/telegraf/telegraf.go | 8 +++-- plugins/inputs/wireless/wireless.go | 34 ++++++++++++++++++-- plugins/inputs/wireless/wireless_linux.go | 21 ------------ plugins/inputs/wireless/wireless_nonlinux.go | 21 ++++++++++++ 4 files changed, 59 insertions(+), 25 deletions(-) create mode 100644 plugins/inputs/wireless/wireless_nonlinux.go diff --git a/cmd/telegraf/telegraf.go b/cmd/telegraf/telegraf.go index 8260c6504..a3fae740c 100644 --- a/cmd/telegraf/telegraf.go +++ b/cmd/telegraf/telegraf.go @@ -110,6 +110,11 @@ func runAgent(ctx context.Context, inputFilters []string, outputFilters []string, ) error { + // Setup default logging. This may need to change after reading the config + // file, but we can configure it to use our logger implementation now. + logger.SetupLogging(false, false, "") + log.Printf("I! Starting Telegraf %s", version) + // If no other options are specified, load the config file and run. c := config.NewConfig() c.OutputFilters = outputFilters @@ -147,7 +152,7 @@ func runAgent(ctx context.Context, return err } - // Setup logging + // Setup logging as configured. logger.SetupLogging( ag.Config.Agent.Debug || *fDebug, ag.Config.Agent.Quiet || *fQuiet, @@ -158,7 +163,6 @@ func runAgent(ctx context.Context, return ag.Test(ctx) } - log.Printf("I! Starting Telegraf %s\n", version) log.Printf("I! Loaded inputs: %s", strings.Join(c.InputNames(), " ")) log.Printf("I! Loaded aggregators: %s", strings.Join(c.AggregatorNames(), " ")) log.Printf("I! Loaded processors: %s", strings.Join(c.ProcessorNames(), " ")) diff --git a/plugins/inputs/wireless/wireless.go b/plugins/inputs/wireless/wireless.go index a992e2efe..eb488ef59 100644 --- a/plugins/inputs/wireless/wireless.go +++ b/plugins/inputs/wireless/wireless.go @@ -1,3 +1,33 @@ -// +build !linux - package wireless + +import ( + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" +) + +// Wireless is used to store configuration values. +type Wireless struct { + HostProc string `toml:"host_proc"` +} + +var sampleConfig = ` + ## Sets 'proc' directory path + ## If not specified, then default is /proc + # host_proc = "/proc" +` + +// Description returns information about the plugin. +func (w *Wireless) Description() string { + return "Monitor wifi signal strength and quality" +} + +// SampleConfig displays configuration instructions. +func (w *Wireless) SampleConfig() string { + return sampleConfig +} + +func init() { + inputs.Add("wireless", func() telegraf.Input { + return &Wireless{} + }) +} diff --git a/plugins/inputs/wireless/wireless_linux.go b/plugins/inputs/wireless/wireless_linux.go index ed5dff27f..75890a790 100644 --- a/plugins/inputs/wireless/wireless_linux.go +++ b/plugins/inputs/wireless/wireless_linux.go @@ -40,27 +40,6 @@ type wirelessInterface struct { Beacon int64 } -// Wireless is used to store configuration values. -type Wireless struct { - HostProc string `toml:"host_proc"` -} - -var sampleConfig = ` - ## Sets 'proc' directory path - ## If not specified, then default is /proc - # host_proc = "/proc" -` - -// Description returns information about the plugin. -func (w *Wireless) Description() string { - return "Monitor wifi signal strength and quality" -} - -// SampleConfig displays configuration instructions. -func (w *Wireless) SampleConfig() string { - return sampleConfig -} - // Gather collects the wireless information. func (w *Wireless) Gather(acc telegraf.Accumulator) error { // load proc path, get default value if config value and env variable are empty diff --git a/plugins/inputs/wireless/wireless_nonlinux.go b/plugins/inputs/wireless/wireless_nonlinux.go new file mode 100644 index 000000000..0fbe5eb06 --- /dev/null +++ b/plugins/inputs/wireless/wireless_nonlinux.go @@ -0,0 +1,21 @@ +// +build !linux + +package wireless + +import ( + "log" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" +) + +func (w *Wireless) Gather(acc telegraf.Accumulator) error { + return nil +} + +func init() { + inputs.Add("wireless", func() telegraf.Input { + log.Print("W! [inputs.wireless] Current platform is not supported") + return &Wireless{} + }) +} From 17079288af8888e63a0bdfa0424b6f0a6b21faaf Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 15 Nov 2018 15:50:00 -0800 Subject: [PATCH 0373/1815] Update changelog --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index a554203b1..b7331428d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -61,6 +61,8 @@ - [#4938](https://github.com/influxdata/telegraf/pull/4938): Fix potential deadlock or leaked resources on restart/reload. - [#2919](https://github.com/influxdata/telegraf/pull/2919): Fix outputs block inputs when batch size is reached. - [#4789](https://github.com/influxdata/telegraf/issues/4789): Fix potential missing datastore metrics in vSphere plugin. +- [#4982](https://github.com/influxdata/telegraf/issues/4982): Log warning when wireless plugin is used on unsupported platform. +- [#4965](https://github.com/influxdata/telegraf/issues/4965): Handle non-tls columns for mysql input. ## v1.8.3 [2018-10-30] From d8e3c1d434c301adc8eecff6bf92f3768b57d72d Mon Sep 17 00:00:00 2001 From: Greg <2653109+glinton@users.noreply.github.com> Date: Fri, 16 Nov 2018 11:51:06 -0700 Subject: [PATCH 0374/1815] Prevent panic in influxdb_listener (#4997) --- plugins/inputs/influxdb_listener/http_listener.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/inputs/influxdb_listener/http_listener.go b/plugins/inputs/influxdb_listener/http_listener.go index b8abeecd7..29f055b53 100644 --- a/plugins/inputs/influxdb_listener/http_listener.go +++ b/plugins/inputs/influxdb_listener/http_listener.go @@ -254,12 +254,12 @@ func (h *HTTPListener) serveWrite(res http.ResponseWriter, req *http.Request) { if req.Header.Get("Content-Encoding") == "gzip" { var err error body, err = gzip.NewReader(req.Body) - defer body.Close() if err != nil { log.Println("D! " + err.Error()) badRequest(res, err.Error()) return } + defer body.Close() } body = http.MaxBytesReader(res, body, h.MaxBodySize.Size) From 7a779a7550011052dae5d8d11cfcf10d51045460 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 16 Nov 2018 10:53:09 -0800 Subject: [PATCH 0375/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index b7331428d..a54e3ecfe 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -63,6 +63,7 @@ - [#4789](https://github.com/influxdata/telegraf/issues/4789): Fix potential missing datastore metrics in vSphere plugin. - [#4982](https://github.com/influxdata/telegraf/issues/4982): Log warning when wireless plugin is used on unsupported platform. - [#4965](https://github.com/influxdata/telegraf/issues/4965): Handle non-tls columns for mysql input. +- [#4983](https://github.com/influxdata/telegraf/issues/4983): Fix panic in influxdb_listener when using gzip encoding. ## v1.8.3 [2018-10-30] From 9c9511bde9cc517b403d7619bd869b4bd1e51e49 Mon Sep 17 00:00:00 2001 From: Greg <2653109+glinton@users.noreply.github.com> Date: Mon, 19 Nov 2018 12:27:21 -0700 Subject: [PATCH 0376/1815] Be specific about required csv header definition (#5007) --- plugins/parsers/registry.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/parsers/registry.go b/plugins/parsers/registry.go index c3e4b1cbf..8ebb4a713 100644 --- a/plugins/parsers/registry.go +++ b/plugins/parsers/registry.go @@ -226,7 +226,7 @@ func newCSVParser(metricName string, defaultTags map[string]string) (Parser, error) { if headerRowCount == 0 && len(columnNames) == 0 { - return nil, fmt.Errorf("there must be a header if `csv_column_names` is not specified") + return nil, fmt.Errorf("`csv_header_row_count` must be defined if `csv_column_names` is not specified") } if delimiter != "" { From 0772076378e352a938b40b5508e1bef7008ae4c7 Mon Sep 17 00:00:00 2001 From: Pierre Fersing Date: Mon, 19 Nov 2018 20:53:09 +0100 Subject: [PATCH 0377/1815] Allow for force gathering ES cluster stats (#4345) --- etc/telegraf.conf | 6 ++-- plugins/inputs/elasticsearch/README.md | 6 ++-- plugins/inputs/elasticsearch/elasticsearch.go | 28 +++++++++++-------- 3 files changed, 24 insertions(+), 16 deletions(-) diff --git a/etc/telegraf.conf b/etc/telegraf.conf index 82df5cdb1..161754f6f 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -1907,10 +1907,12 @@ # ## - cluster # # cluster_health_level = "indices" # -# ## Set cluster_stats to true when you want to also obtain cluster stats from the -# ## Master node. +# ## Set cluster_stats to true when you want to also obtain cluster stats. # cluster_stats = false # +# ## Only gather cluster_stats from the master node. To work this require local = true +# cluster_stats_only_from_master = true +# # ## node_stats is a list of sub-stats that you want to have gathered. Valid options # ## are "indices", "os", "process", "jvm", "thread_pool", "fs", "transport", "http", # ## "breaker". Per default, all stats are gathered. diff --git a/plugins/inputs/elasticsearch/README.md b/plugins/inputs/elasticsearch/README.md index e88c3f4d6..d8e43da38 100644 --- a/plugins/inputs/elasticsearch/README.md +++ b/plugins/inputs/elasticsearch/README.md @@ -29,10 +29,12 @@ or [cluster-stats](https://www.elastic.co/guide/en/elasticsearch/reference/curre ## - cluster # cluster_health_level = "indices" - ## Set cluster_stats to true when you want to also obtain cluster stats from the - ## Master node. + ## Set cluster_stats to true when you want to also obtain cluster stats. cluster_stats = false + ## Only gather cluster_stats from the master node. To work this require local = true + cluster_stats_only_from_master = true + ## node_stats is a list of sub-stats that you want to have gathered. Valid options ## are "indices", "os", "process", "jvm", "thread_pool", "fs", "transport", "http", ## "breaker". Per default, all stats are gathered. diff --git a/plugins/inputs/elasticsearch/elasticsearch.go b/plugins/inputs/elasticsearch/elasticsearch.go index 9875b68aa..479bfcfda 100644 --- a/plugins/inputs/elasticsearch/elasticsearch.go +++ b/plugins/inputs/elasticsearch/elasticsearch.go @@ -104,10 +104,12 @@ const sampleConfig = ` ## - cluster # cluster_health_level = "indices" - ## Set cluster_stats to true when you want to also obtain cluster stats from the - ## Master node. + ## Set cluster_stats to true when you want to also obtain cluster stats. cluster_stats = false + ## Only gather cluster_stats from the master node. To work this require local = true + cluster_stats_only_from_master = true + ## node_stats is a list of sub-stats that you want to have gathered. Valid options ## are "indices", "os", "process", "jvm", "thread_pool", "fs", "transport", "http", ## "breaker". Per default, all stats are gathered. @@ -124,13 +126,14 @@ const sampleConfig = ` // Elasticsearch is a plugin to read stats from one or many Elasticsearch // servers. type Elasticsearch struct { - Local bool - Servers []string - HttpTimeout internal.Duration - ClusterHealth bool - ClusterHealthLevel string - ClusterStats bool - NodeStats []string + Local bool + Servers []string + HttpTimeout internal.Duration + ClusterHealth bool + ClusterHealthLevel string + ClusterStats bool + ClusterStatsOnlyFromMaster bool + NodeStats []string tls.ClientConfig client *http.Client @@ -141,8 +144,9 @@ type Elasticsearch struct { // NewElasticsearch return a new instance of Elasticsearch func NewElasticsearch() *Elasticsearch { return &Elasticsearch{ - HttpTimeout: internal.Duration{Duration: time.Second * 5}, - ClusterHealthLevel: "indices", + HttpTimeout: internal.Duration{Duration: time.Second * 5}, + ClusterStatsOnlyFromMaster: true, + ClusterHealthLevel: "indices", } } @@ -216,7 +220,7 @@ func (e *Elasticsearch) Gather(acc telegraf.Accumulator) error { } } - if e.ClusterStats && e.isMaster { + if e.ClusterStats && (e.isMaster || !e.ClusterStatsOnlyFromMaster || !e.Local) { if err := e.gatherClusterStats(s+"/_cluster/stats", acc); err != nil { acc.AddError(fmt.Errorf(mask.ReplaceAllString(err.Error(), "http(s)://XXX:XXX@"))) return From 65ce9d6d86215805d4063ca52c8e140b92895a38 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 19 Nov 2018 13:23:22 -0800 Subject: [PATCH 0378/1815] Update changelog --- CHANGELOG.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index a54e3ecfe..e5e146250 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,9 @@ +## v1.10 [unreleased] + +#### Features + +- [#4345](https://github.com/influxdata/telegraf/pull/4345): Allow for force gathering ES cluster stats. + ## v1.9 [unreleased] #### Release Notes From c35d124f780b69c510d0a6d10889b9caebe8c81f Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 20 Nov 2018 11:14:09 -0800 Subject: [PATCH 0379/1815] Set 1.9 release date --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e5e146250..6c4954623 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,7 +4,7 @@ - [#4345](https://github.com/influxdata/telegraf/pull/4345): Allow for force gathering ES cluster stats. -## v1.9 [unreleased] +## v1.9 [2018-11-20] #### Release Notes From f57b019e22cc17f43ed06407d4ee7f3120fe0cd7 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 20 Nov 2018 15:23:14 -0800 Subject: [PATCH 0380/1815] Fix link to http_listener_v2 --- plugins/inputs/influxdb_listener/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/inputs/influxdb_listener/README.md b/plugins/inputs/influxdb_listener/README.md index 38039c606..8b6d2ad51 100644 --- a/plugins/inputs/influxdb_listener/README.md +++ b/plugins/inputs/influxdb_listener/README.md @@ -64,4 +64,4 @@ curl -i -XPOST 'http://localhost:8186/write' --data-binary 'cpu_load_short,host= ``` [influxdb_http_api]: https://docs.influxdata.com/influxdb/latest/guides/writing_data/ -[http_listener_v2]: /plugins/inputs/influxdb_listener_v2/README.md +[http_listener_v2]: /plugins/inputs/http_listener_v2/README.md From 85ee3542556fce8129969d333a3f8de4c54a6fe8 Mon Sep 17 00:00:00 2001 From: Lance O'Connor Date: Wed, 21 Nov 2018 17:43:42 -0800 Subject: [PATCH 0381/1815] Fix boolean handling in splunkmetric serializer (#5008) --- .../serializers/splunkmetric/splunkmetric.go | 28 +++++++++---- .../splunkmetric/splunkmetric_test.go | 40 +++++++++++++++++++ 2 files changed, 61 insertions(+), 7 deletions(-) diff --git a/plugins/serializers/splunkmetric/splunkmetric.go b/plugins/serializers/splunkmetric/splunkmetric.go index 01643e334..cdcf6cc59 100644 --- a/plugins/serializers/splunkmetric/splunkmetric.go +++ b/plugins/serializers/splunkmetric/splunkmetric.go @@ -68,14 +68,16 @@ func (s *serializer) createObject(metric telegraf.Metric) (metricGroup []byte, e for _, field := range metric.FieldList() { - if !verifyValue(field.Value) { + value, valid := verifyValue(field.Value) + + if !valid { log.Printf("D! Can not parse value: %v for key: %v", field.Value, field.Key) continue } obj := map[string]interface{}{} obj["metric_name"] = metric.Name() + "." + field.Key - obj["_value"] = field.Value + obj["_value"] = value dataGroup.Event = "metric" // Convert ns to float seconds since epoch. @@ -94,8 +96,6 @@ func (s *serializer) createObject(metric telegraf.Metric) (metricGroup []byte, e dataGroup.Fields[n] = t } } - dataGroup.Fields["metric_name"] = metric.Name() + "." + field.Key - dataGroup.Fields["_value"] = field.Value switch s.HecRouting { case true: @@ -117,10 +117,24 @@ func (s *serializer) createObject(metric telegraf.Metric) (metricGroup []byte, e return metricGroup, nil } -func verifyValue(v interface{}) bool { +func verifyValue(v interface{}) (value interface{}, valid bool) { switch v.(type) { case string: - return false + valid = false + value = v + case bool: + if v == bool(true) { + // Store 1 for a "true" value + valid = true + value = 1 + } else { + // Otherwise store 0 + valid = true + value = 0 + } + default: + valid = true + value = v } - return true + return value, valid } diff --git a/plugins/serializers/splunkmetric/splunkmetric_test.go b/plugins/serializers/splunkmetric/splunkmetric_test.go index f3825d803..04f6e6538 100644 --- a/plugins/serializers/splunkmetric/splunkmetric_test.go +++ b/plugins/serializers/splunkmetric/splunkmetric_test.go @@ -97,6 +97,46 @@ func TestSerializeMetricIntHec(t *testing.T) { assert.Equal(t, string(expS), string(buf)) } +func TestSerializeMetricBool(t *testing.T) { + now := time.Unix(0, 0) + tags := map[string]string{ + "container-name": "telegraf-test", + } + fields := map[string]interface{}{ + "oomkiller": bool(true), + } + m, err := metric.New("docker", tags, fields, now) + assert.NoError(t, err) + + s, _ := NewSerializer(false) + var buf []byte + buf, err = s.Serialize(m) + assert.NoError(t, err) + + expS := `{"_value":1,"container-name":"telegraf-test","metric_name":"docker.oomkiller","time":0}` + assert.Equal(t, string(expS), string(buf)) +} + +func TestSerializeMetricBoolHec(t *testing.T) { + now := time.Unix(0, 0) + tags := map[string]string{ + "container-name": "telegraf-test", + } + fields := map[string]interface{}{ + "oomkiller": bool(false), + } + m, err := metric.New("docker", tags, fields, now) + assert.NoError(t, err) + + s, _ := NewSerializer(true) + var buf []byte + buf, err = s.Serialize(m) + assert.NoError(t, err) + + expS := `{"time":0,"event":"metric","fields":{"_value":0,"container-name":"telegraf-test","metric_name":"docker.oomkiller"}}` + assert.Equal(t, string(expS), string(buf)) +} + func TestSerializeMetricString(t *testing.T) { now := time.Unix(0, 0) tags := map[string]string{ From 581772a6a53c2972a7efef04bac297af5052a9c5 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 21 Nov 2018 17:45:14 -0800 Subject: [PATCH 0382/1815] Update changelog --- CHANGELOG.md | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6c4954623..f96d4f279 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,12 @@ - [#4345](https://github.com/influxdata/telegraf/pull/4345): Allow for force gathering ES cluster stats. +## v1.9.1 [unreleased] + +### Bugfixes + +- [#5006](https://github.com/influxdata/telegraf/issues/5006): Fix boolean handling in splunkmetric serializer. + ## v1.9 [2018-11-20] #### Release Notes @@ -73,6 +79,8 @@ ## v1.8.3 [2018-10-30] +### Bugfixes + - [#4873](https://github.com/influxdata/telegraf/pull/4873): Add DN attributes as tags in x509_cert input to avoid series overwrite. - [#4921](https://github.com/influxdata/telegraf/issues/4921): Prevent connection leak by closing unused connections in amqp output. - [#4904](https://github.com/influxdata/telegraf/issues/4904): Use default partition key when tag does not exist in kinesis output. From 6d2fb0027cf6042ad332ec96f48f63e758b17423 Mon Sep 17 00:00:00 2001 From: Jeppe Fihl-Pearson Date: Wed, 28 Nov 2018 01:26:22 +0000 Subject: [PATCH 0383/1815] Set default config values in jenkins input (#5046) --- plugins/inputs/jenkins/jenkins.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/plugins/inputs/jenkins/jenkins.go b/plugins/inputs/jenkins/jenkins.go index b052b22a9..8bb06052a 100644 --- a/plugins/inputs/jenkins/jenkins.go +++ b/plugins/inputs/jenkins/jenkins.go @@ -438,6 +438,10 @@ func mapResultCode(s string) int { func init() { inputs.Add("jenkins", func() telegraf.Input { - return &Jenkins{} + return &Jenkins{ + MaxBuildAge: internal.Duration{Duration: time.Duration(time.Hour)}, + MaxConnections: 5, + MaxSubJobPerLayer: 10, + } }) } From 1279db1a8f94770b046aa79780403f8f723b3148 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 27 Nov 2018 17:27:28 -0800 Subject: [PATCH 0384/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index f96d4f279..966a3f49f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,7 @@ ### Bugfixes - [#5006](https://github.com/influxdata/telegraf/issues/5006): Fix boolean handling in splunkmetric serializer. +- [#5046](https://github.com/influxdata/telegraf/issues/5046): Set default config values in jenkins input. ## v1.9 [2018-11-20] From 9bc92c5c7552a140ff91b4adbe497ed9cf8c16a9 Mon Sep 17 00:00:00 2001 From: Mauro Murari Date: Tue, 27 Nov 2018 23:45:23 -0200 Subject: [PATCH 0385/1815] Fix server connection info in mongodb input (#5048) --- plugins/inputs/mongodb/mongostat.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/plugins/inputs/mongodb/mongostat.go b/plugins/inputs/mongodb/mongostat.go index dcfd7f89d..1acda5826 100644 --- a/plugins/inputs/mongodb/mongostat.go +++ b/plugins/inputs/mongodb/mongostat.go @@ -599,6 +599,11 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec Faults: -1, } + // set connection info + returnVal.CurrentC = newStat.Connections.Current + returnVal.AvailableC = newStat.Connections.Available + returnVal.TotalCreatedC = newStat.Connections.TotalCreated + // set the storage engine appropriately if newStat.StorageEngine != nil && newStat.StorageEngine["name"] != "" { returnVal.StorageEngine = newStat.StorageEngine["name"] From 168c2b0ed15e358d0db510dd35e685dd2f6ea06b Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 28 Nov 2018 12:10:51 -0800 Subject: [PATCH 0386/1815] Fix influxdb_v2 link in readme --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 8db0be58f..fff17138a 100644 --- a/README.md +++ b/README.md @@ -176,7 +176,6 @@ For documentation on the latest development code see the [documentation index][d * [http_response](./plugins/inputs/http_response) * [icinga2](./plugins/inputs/icinga2) * [influxdb](./plugins/inputs/influxdb) -* [influxdb_v2](./plugins/inputs/influxdb_v2) * [influxdb_listener](./plugins/inputs/influxdb_listener) * [internal](./plugins/inputs/internal) * [interrupts](./plugins/inputs/interrupts) @@ -322,7 +321,8 @@ For documentation on the latest development code see the [documentation index][d ## Output Plugins -* [influxdb](./plugins/outputs/influxdb) +* [influxdb](./plugins/outputs/influxdb) (InfluxDB 1.x) +* [influxdb_v2](./plugins/outputs/influxdb_v2) ([InfluxDB 2.x](https://github.com/influxdata/platform)) * [amon](./plugins/outputs/amon) * [amqp](./plugins/outputs/amqp) (rabbitmq) * [application_insights](./plugins/outputs/application_insights) From f9113b63b775d3e939e9cdf0447704dec0322e0c Mon Sep 17 00:00:00 2001 From: Felipe Dutra Tine e Silva <5888407+tkanos@users.noreply.github.com> Date: Wed, 28 Nov 2018 19:07:25 -0500 Subject: [PATCH 0387/1815] Add csv parser unix timestamp support (#5047) --- plugins/parsers/csv/README.md | 3 +- plugins/parsers/csv/parser.go | 53 +++++++++++++++++++++--------- plugins/parsers/csv/parser_test.go | 18 ++++++++++ 3 files changed, 58 insertions(+), 16 deletions(-) diff --git a/plugins/parsers/csv/README.md b/plugins/parsers/csv/README.md index e4cfbfc37..f2cf34c69 100644 --- a/plugins/parsers/csv/README.md +++ b/plugins/parsers/csv/README.md @@ -75,7 +75,8 @@ document. The `csv_timestamp_column` option specifies the column name containing the time value and `csv_timestamp_format` must be set to a Go "reference time" -which is defined to be the specific time: `Mon Jan 2 15:04:05 MST 2006`. +which is defined to be the specific time: `Mon Jan 2 15:04:05 MST 2006`, +it can also be `unix` (for epoch in ms format like 1257894000 ) Consult the Go [time][time parse] package for details and additional examples on how to set the time format. diff --git a/plugins/parsers/csv/parser.go b/plugins/parsers/csv/parser.go index f18068eb7..e1bbdbbbb 100644 --- a/plugins/parsers/csv/parser.go +++ b/plugins/parsers/csv/parser.go @@ -207,21 +207,9 @@ outer: measurementName = fmt.Sprintf("%v", recordFields[p.MeasurementColumn]) } - metricTime := p.TimeFunc() - if p.TimestampColumn != "" { - if recordFields[p.TimestampColumn] == nil { - return nil, fmt.Errorf("timestamp column: %v could not be found", p.TimestampColumn) - } - tStr := fmt.Sprintf("%v", recordFields[p.TimestampColumn]) - if p.TimestampFormat == "" { - return nil, fmt.Errorf("timestamp format must be specified") - } - - var err error - metricTime, err = time.Parse(p.TimestampFormat, tStr) - if err != nil { - return nil, err - } + metricTime, err := parseTimestamp(p.TimeFunc, recordFields, p.TimestampColumn, p.TimestampFormat) + if err != nil { + return nil, err } m, err := metric.New(measurementName, tags, recordFields, metricTime) @@ -231,6 +219,41 @@ outer: return m, nil } +// ParseTimestamp return a timestamp, if there is no timestamp on the csv it will be the current timestamp, else it will try to parse the time according to the format +// if the format is "unix" it tries to parse assuming that on the csv it will find an epoch in ms. +func parseTimestamp(timeFunc func() time.Time, recordFields map[string]interface{}, timestampColumn, timestampFormat string) (metricTime time.Time, err error) { + metricTime = timeFunc() + + if timestampColumn != "" { + if recordFields[timestampColumn] == nil { + err = fmt.Errorf("timestamp column: %v could not be found", timestampColumn) + return + } + + tStr := fmt.Sprintf("%v", recordFields[timestampColumn]) + + switch timestampFormat { + case "": + err = fmt.Errorf("timestamp format must be specified") + return + case "unix": + var unixTime int64 + unixTime, err = strconv.ParseInt(tStr, 10, 64) + if err != nil { + return + } + metricTime = time.Unix(unixTime, 0) + default: + metricTime, err = time.Parse(timestampFormat, tStr) + if err != nil { + return + } + } + } + return +} + +// SetDefaultTags set the DefaultTags func (p *Parser) SetDefaultTags(tags map[string]string) { p.DefaultTags = tags } diff --git a/plugins/parsers/csv/parser_test.go b/plugins/parsers/csv/parser_test.go index eff6f953f..97da69cd2 100644 --- a/plugins/parsers/csv/parser_test.go +++ b/plugins/parsers/csv/parser_test.go @@ -88,6 +88,24 @@ func TestTimestampError(t *testing.T) { require.Equal(t, fmt.Errorf("timestamp format must be specified"), err) } +func TestTimestampUnixFormat(t *testing.T) { + p := Parser{ + HeaderRowCount: 1, + ColumnNames: []string{"first", "second", "third"}, + MeasurementColumn: "third", + TimestampColumn: "first", + TimestampFormat: "unix", + TimeFunc: DefaultTime, + } + testCSV := `line1,line2,line3 +1243094706,70,test_name +1257609906,80,test_name2` + metrics, err := p.Parse([]byte(testCSV)) + require.NoError(t, err) + require.Equal(t, metrics[0].Time().UnixNano(), int64(1243094706000000000)) + require.Equal(t, metrics[1].Time().UnixNano(), int64(1257609906000000000)) +} + func TestQuotedCharacter(t *testing.T) { p := Parser{ HeaderRowCount: 1, From a26aaa5e0360505a5b77607fc0419b00e90c5a7f Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 28 Nov 2018 16:19:53 -0800 Subject: [PATCH 0388/1815] Add support for unix_ms timestamps in csv parser. --- plugins/parsers/csv/README.md | 8 ++++---- plugins/parsers/csv/parser.go | 16 +++++++++++++--- plugins/parsers/csv/parser_test.go | 18 ++++++++++++++++++ 3 files changed, 35 insertions(+), 7 deletions(-) diff --git a/plugins/parsers/csv/README.md b/plugins/parsers/csv/README.md index f2cf34c69..488846b5e 100644 --- a/plugins/parsers/csv/README.md +++ b/plugins/parsers/csv/README.md @@ -73,10 +73,10 @@ time using the JSON document you can use the `csv_timestamp_column` and `csv_timestamp_format` options together to set the time to a value in the parsed document. -The `csv_timestamp_column` option specifies the column name containing the -time value and `csv_timestamp_format` must be set to a Go "reference time" -which is defined to be the specific time: `Mon Jan 2 15:04:05 MST 2006`, -it can also be `unix` (for epoch in ms format like 1257894000 ) +The `csv_timestamp_column` option specifies the key containing the time value and +`csv_timestamp_format` must be set to `unix`, `unix_ms`, or a format string in +using the Go "reference time" which is defined to be the **specific time**: +`Mon Jan 2 15:04:05 MST 2006`. Consult the Go [time][time parse] package for details and additional examples on how to set the time format. diff --git a/plugins/parsers/csv/parser.go b/plugins/parsers/csv/parser.go index e1bbdbbbb..9401c1dd1 100644 --- a/plugins/parsers/csv/parser.go +++ b/plugins/parsers/csv/parser.go @@ -219,9 +219,12 @@ outer: return m, nil } -// ParseTimestamp return a timestamp, if there is no timestamp on the csv it will be the current timestamp, else it will try to parse the time according to the format -// if the format is "unix" it tries to parse assuming that on the csv it will find an epoch in ms. -func parseTimestamp(timeFunc func() time.Time, recordFields map[string]interface{}, timestampColumn, timestampFormat string) (metricTime time.Time, err error) { +// ParseTimestamp return a timestamp, if there is no timestamp on the csv it +// will be the current timestamp, else it will try to parse the time according +// to the format. +func parseTimestamp(timeFunc func() time.Time, recordFields map[string]interface{}, + timestampColumn, timestampFormat string, +) (metricTime time.Time, err error) { metricTime = timeFunc() if timestampColumn != "" { @@ -243,6 +246,13 @@ func parseTimestamp(timeFunc func() time.Time, recordFields map[string]interface return } metricTime = time.Unix(unixTime, 0) + case "unix_ms": + var unixTime int64 + unixTime, err = strconv.ParseInt(tStr, 10, 64) + if err != nil { + return + } + metricTime = time.Unix(unixTime/1000, (unixTime%1000)*1e6) default: metricTime, err = time.Parse(timestampFormat, tStr) if err != nil { diff --git a/plugins/parsers/csv/parser_test.go b/plugins/parsers/csv/parser_test.go index 97da69cd2..93ae6bcdd 100644 --- a/plugins/parsers/csv/parser_test.go +++ b/plugins/parsers/csv/parser_test.go @@ -106,6 +106,24 @@ func TestTimestampUnixFormat(t *testing.T) { require.Equal(t, metrics[1].Time().UnixNano(), int64(1257609906000000000)) } +func TestTimestampUnixMSFormat(t *testing.T) { + p := Parser{ + HeaderRowCount: 1, + ColumnNames: []string{"first", "second", "third"}, + MeasurementColumn: "third", + TimestampColumn: "first", + TimestampFormat: "unix_ms", + TimeFunc: DefaultTime, + } + testCSV := `line1,line2,line3 +1243094706123,70,test_name +1257609906123,80,test_name2` + metrics, err := p.Parse([]byte(testCSV)) + require.NoError(t, err) + require.Equal(t, metrics[0].Time().UnixNano(), int64(1243094706123000000)) + require.Equal(t, metrics[1].Time().UnixNano(), int64(1257609906123000000)) +} + func TestQuotedCharacter(t *testing.T) { p := Parser{ HeaderRowCount: 1, From 35d08b2df75079264b50eeab115f0543edbd87aa Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 28 Nov 2018 16:21:49 -0800 Subject: [PATCH 0389/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 966a3f49f..13f6eb980 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,7 @@ #### Features - [#4345](https://github.com/influxdata/telegraf/pull/4345): Allow for force gathering ES cluster stats. +- [#5047](https://github.com/influxdata/telegraf/pull/5047): Add support for unix and unix_ms timestamps to csv parser. ## v1.9.1 [unreleased] From 448c98d82eabe20e703c320ef0428badcd1da3ef Mon Sep 17 00:00:00 2001 From: Sebastien Le Digabel Date: Thu, 29 Nov 2018 00:29:26 +0000 Subject: [PATCH 0390/1815] Add ability to tag metrics with topic in kafka_consumer (#5038) --- plugins/inputs/kafka_consumer/README.md | 2 + .../inputs/kafka_consumer/kafka_consumer.go | 10 ++++- .../kafka_consumer/kafka_consumer_test.go | 45 +++++++++++++++++++ 3 files changed, 56 insertions(+), 1 deletion(-) diff --git a/plugins/inputs/kafka_consumer/README.md b/plugins/inputs/kafka_consumer/README.md index 8922f5071..56fc59245 100644 --- a/plugins/inputs/kafka_consumer/README.md +++ b/plugins/inputs/kafka_consumer/README.md @@ -14,6 +14,8 @@ and use the old zookeeper connection method. brokers = ["localhost:9092"] ## topic(s) to consume topics = ["telegraf"] + ## Add topic as tag if topic_tag is not empty + # topic_tag = "" ## Optional Client id # client_id = "Telegraf" diff --git a/plugins/inputs/kafka_consumer/kafka_consumer.go b/plugins/inputs/kafka_consumer/kafka_consumer.go index 31159def3..0814d8e14 100644 --- a/plugins/inputs/kafka_consumer/kafka_consumer.go +++ b/plugins/inputs/kafka_consumer/kafka_consumer.go @@ -40,6 +40,8 @@ type Kafka struct { Offset string `toml:"offset"` SASLUsername string `toml:"sasl_username"` SASLPassword string `toml:"sasl_password"` + TopicTag string `toml:"topic_tag"` + tls.ClientConfig cluster Consumer @@ -60,6 +62,8 @@ var sampleConfig = ` brokers = ["localhost:9092"] ## topic(s) to consume topics = ["telegraf"] + ## Add topic as tag if topic_tag is not empty + # topic_tag = "" ## Optional Client id # client_id = "Telegraf" @@ -256,7 +260,11 @@ func (k *Kafka) onMessage(acc telegraf.TrackingAccumulator, msg *sarama.Consumer if err != nil { return err } - + if len(k.TopicTag) > 0 { + for _, metric := range metrics { + metric.AddTag(k.TopicTag, msg.Topic) + } + } id := acc.AddTrackingMetricGroup(metrics) k.messages[id] = msg diff --git a/plugins/inputs/kafka_consumer/kafka_consumer_test.go b/plugins/inputs/kafka_consumer/kafka_consumer_test.go index 5bb7740a5..a4d06efe6 100644 --- a/plugins/inputs/kafka_consumer/kafka_consumer_test.go +++ b/plugins/inputs/kafka_consumer/kafka_consumer_test.go @@ -61,6 +61,25 @@ func newTestKafka() (*Kafka, *TestConsumer) { return &k, consumer } +func newTestKafkaWithTopicTag() (*Kafka, *TestConsumer) { + consumer := &TestConsumer{ + errors: make(chan error), + messages: make(chan *sarama.ConsumerMessage, 1000), + } + k := Kafka{ + cluster: consumer, + ConsumerGroup: "test", + Topics: []string{"telegraf"}, + Brokers: []string{"localhost:9092"}, + Offset: "oldest", + MaxUndeliveredMessages: defaultMaxUndeliveredMessages, + doNotCommitMsgs: true, + messages: make(map[telegraf.TrackingID]*sarama.ConsumerMessage), + TopicTag: "topic", + } + return &k, consumer +} + // Test that the parser parses kafka messages into points func TestRunParser(t *testing.T) { k, consumer := newTestKafka() @@ -75,6 +94,22 @@ func TestRunParser(t *testing.T) { assert.Equal(t, acc.NFields(), 1) } +// Test that the parser parses kafka messages into points +// and adds the topic tag +func TestRunParserWithTopic(t *testing.T) { + k, consumer := newTestKafkaWithTopicTag() + acc := testutil.Accumulator{} + ctx := context.Background() + + k.parser, _ = parsers.NewInfluxParser() + go k.receiver(ctx, &acc) + consumer.Inject(saramaMsgWithTopic(testMsg, "test_topic")) + acc.Wait(1) + + assert.Equal(t, acc.NFields(), 1) + assert.True(t, acc.HasTag("cpu_load_short", "topic")) +} + // Test that the parser ignores invalid messages func TestRunParserInvalidMsg(t *testing.T) { k, consumer := newTestKafka() @@ -173,3 +208,13 @@ func saramaMsg(val string) *sarama.ConsumerMessage { Partition: 0, } } + +func saramaMsgWithTopic(val string, topic string) *sarama.ConsumerMessage { + return &sarama.ConsumerMessage{ + Key: nil, + Value: []byte(val), + Offset: 0, + Partition: 0, + Topic: topic, + } +} From db497a04ea70ac415d1a6322f5dfde5be7a7a5ee Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 28 Nov 2018 16:30:08 -0800 Subject: [PATCH 0391/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 13f6eb980..caaac655b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,7 @@ - [#4345](https://github.com/influxdata/telegraf/pull/4345): Allow for force gathering ES cluster stats. - [#5047](https://github.com/influxdata/telegraf/pull/5047): Add support for unix and unix_ms timestamps to csv parser. +- [#5038](https://github.com/influxdata/telegraf/pull/5038): Add ability to tag metrics with topic in kafka_consumer. ## v1.9.1 [unreleased] From 55b798bd20c28ea1af8d0f3ef2ee980048e6cbc2 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 28 Nov 2018 16:43:13 -0800 Subject: [PATCH 0392/1815] Fix mongodb document stats (#5049) --- plugins/inputs/mongodb/README.md | 7 +++++++ plugins/inputs/mongodb/mongostat.go | 6 ++++++ 2 files changed, 13 insertions(+) diff --git a/plugins/inputs/mongodb/README.md b/plugins/inputs/mongodb/README.md index 96852d724..105584462 100644 --- a/plugins/inputs/mongodb/README.md +++ b/plugins/inputs/mongodb/README.md @@ -48,11 +48,18 @@ Error in input [mongodb]: not authorized on admin to execute command { serverSta - active_reads (integer) - active_writes (integer) - commands_per_sec (integer) + - connections_current (integer) + - connections_available (integer) + - connections_total_created (integer) - cursor_timed_out (integer) - cursor_no_timeout (integer) - cursor_pinned (integer) - cursor_total (integer) - deletes_per_sec (integer) + - document_deleted (integer) + - document_inserted (integer) + - document_returned (integer) + - document_updated (integer) - flushes_per_sec (integer) - getmores_per_sec (integer) - inserts_per_sec (integer) diff --git a/plugins/inputs/mongodb/mongostat.go b/plugins/inputs/mongodb/mongostat.go index 1acda5826..e32596deb 100644 --- a/plugins/inputs/mongodb/mongostat.go +++ b/plugins/inputs/mongodb/mongostat.go @@ -633,6 +633,12 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec returnVal.TotalC = diff(newStat.Metrics.Cursor.Open.Total, oldStat.Metrics.Cursor.Open.Total, sampleSecs) } } + if newStat.Metrics.Document != nil { + returnVal.DeletedD = newStat.Metrics.Document.Deleted + returnVal.InsertedD = newStat.Metrics.Document.Inserted + returnVal.ReturnedD = newStat.Metrics.Document.Returned + returnVal.UpdatedD = newStat.Metrics.Document.Updated + } } if newStat.OpcountersRepl != nil && oldStat.OpcountersRepl != nil { From 2d1f97c2173b14b3d51313c6444d0b9402f9664b Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 28 Nov 2018 16:46:02 -0800 Subject: [PATCH 0393/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index caaac655b..9b106fe81 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,6 +12,7 @@ - [#5006](https://github.com/influxdata/telegraf/issues/5006): Fix boolean handling in splunkmetric serializer. - [#5046](https://github.com/influxdata/telegraf/issues/5046): Set default config values in jenkins input. +- [#4664](https://github.com/influxdata/telegraf/issues/4664): Fix server connection and document stats in mongodb input. ## v1.9 [2018-11-20] From ba612b670b1521663fe6c44688f3a3a89a4c5773 Mon Sep 17 00:00:00 2001 From: gurayyildirim Date: Thu, 29 Nov 2018 03:52:03 +0300 Subject: [PATCH 0394/1815] Add X-Requested-By header to graylog input (#5011) --- plugins/inputs/graylog/graylog.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/plugins/inputs/graylog/graylog.go b/plugins/inputs/graylog/graylog.go index 8e580480d..6e75f676a 100644 --- a/plugins/inputs/graylog/graylog.go +++ b/plugins/inputs/graylog/graylog.go @@ -235,6 +235,9 @@ func (h *GrayLog) sendRequest(serverURL string) (string, float64, error) { if err != nil { return "", -1, fmt.Errorf("Invalid server URL \"%s\"", serverURL) } + // Add X-Requested-By header + headers["X-Requested-By"] = requestURL.Hostname() + if strings.Contains(requestURL.String(), "multiple") { m := &Messagebody{Metrics: h.Metrics} http_body, err := json.Marshal(m) From fc7fba9020685c674013c61393011f5cccd59ed9 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 28 Nov 2018 16:52:40 -0800 Subject: [PATCH 0395/1815] Use Telegraf as value for graylog x-requested-by header --- plugins/inputs/graylog/graylog.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/inputs/graylog/graylog.go b/plugins/inputs/graylog/graylog.go index 6e75f676a..1e0439a42 100644 --- a/plugins/inputs/graylog/graylog.go +++ b/plugins/inputs/graylog/graylog.go @@ -236,7 +236,7 @@ func (h *GrayLog) sendRequest(serverURL string) (string, float64, error) { return "", -1, fmt.Errorf("Invalid server URL \"%s\"", serverURL) } // Add X-Requested-By header - headers["X-Requested-By"] = requestURL.Hostname() + headers["X-Requested-By"] = "Telegraf" if strings.Contains(requestURL.String(), "multiple") { m := &Messagebody{Metrics: h.Metrics} From 9b3452a65de06c9cf1a53ea41102cbff39ab297e Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 28 Nov 2018 16:54:07 -0800 Subject: [PATCH 0396/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9b106fe81..6b0a065e0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,6 +13,7 @@ - [#5006](https://github.com/influxdata/telegraf/issues/5006): Fix boolean handling in splunkmetric serializer. - [#5046](https://github.com/influxdata/telegraf/issues/5046): Set default config values in jenkins input. - [#4664](https://github.com/influxdata/telegraf/issues/4664): Fix server connection and document stats in mongodb input. +- [#5010](https://github.com/influxdata/telegraf/issues/5010): Add X-Requested-By header to graylog input. ## v1.9 [2018-11-20] From 0a506a93fe1b55944d01900deaca1a21ad812376 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 29 Nov 2018 12:58:48 -0800 Subject: [PATCH 0397/1815] Remove metrics from the buffer on write (#5052) --- internal/models/buffer.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/internal/models/buffer.go b/internal/models/buffer.go index 6848c26fa..8c03db3d4 100644 --- a/internal/models/buffer.go +++ b/internal/models/buffer.go @@ -175,9 +175,10 @@ func (b *Buffer) Accept(batch []telegraf.Metric) { b.metricWritten(m) } - if b.batchSize > 0 { - b.size -= b.batchSize - b.first += b.batchSize + b.size -= b.batchSize + for i := 0; i < b.batchSize; i++ { + b.buf[b.first] = nil + b.first++ b.first %= b.cap } From d69f81ae5e62872f21c01916168d86e7cc975fd9 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 29 Nov 2018 13:00:58 -0800 Subject: [PATCH 0398/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6b0a065e0..2a0a82756 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,6 +14,7 @@ - [#5046](https://github.com/influxdata/telegraf/issues/5046): Set default config values in jenkins input. - [#4664](https://github.com/influxdata/telegraf/issues/4664): Fix server connection and document stats in mongodb input. - [#5010](https://github.com/influxdata/telegraf/issues/5010): Add X-Requested-By header to graylog input. +- [#5052](https://github.com/influxdata/telegraf/issues/5052): Fix metric memory not freed from the metric buffer on write. ## v1.9 [2018-11-20] From cd9a79f5e672ff9c3b9fa391ee26064a2b122a15 Mon Sep 17 00:00:00 2001 From: Greg Volk Date: Thu, 29 Nov 2018 15:04:31 -0600 Subject: [PATCH 0399/1815] Add tagdrop example for win_perf_counters (#5061) --- docs/CONFIGURATION.md | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/docs/CONFIGURATION.md b/docs/CONFIGURATION.md index 4677e54f2..6bb6c2e09 100644 --- a/docs/CONFIGURATION.md +++ b/docs/CONFIGURATION.md @@ -258,6 +258,21 @@ interpreted as part of the tagpass/tagdrop map. fstype = [ "ext4", "xfs" ] # Globs can also be used on the tag values path = [ "/opt", "/home*" ] + + +[[inputs.win_perf_counters]] + [[inputs.win_perf_counters.object]] + ObjectName = "Network Interface" + Instances = ["*"] + Counters = [ + "Bytes Received/sec", + "Bytes Sent/sec" + ] + Measurement = "win_net" + # Don't send metrics where the Windows interface name (instance) begins with isatap or Local + [inputs.win_perf_counters.tagdrop] + instance = ["isatap*", "Local*"] + ``` #### Input Config: fieldpass and fielddrop From 34231f61416ad29f83598ccfefc0160a3dceaf97 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 29 Nov 2018 15:56:11 -0800 Subject: [PATCH 0400/1815] Update to pgx 3.2.0 (#5068) --- Gopkg.lock | 6 +++--- Gopkg.toml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Gopkg.lock b/Gopkg.lock index 2dd0dd26c..a2df3c81d 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -627,7 +627,7 @@ revision = "7c63b0a71ef8300adc255344d275e10e5c3a71ec" [[projects]] - digest = "1:2de1791b9e43f26c696e36950e42676565e7da7499a870bc02213da4b59b1d14" + digest = "1:5544f7badae00bc5b9ec6829857bc08f88fce4d3ef73fb616ee57d49abbf7f48" name = "github.com/jackc/pgx" packages = [ ".", @@ -639,8 +639,8 @@ "stdlib", ] pruneopts = "" - revision = "da3231b0b66e2e74cdb779f1d46c5e958ba8be27" - version = "v3.1.0" + revision = "89f1e6ac7276b61d885db5e5aed6fcbedd1c7e31" + version = "v3.2.0" [[projects]] digest = "1:6f49eae0c1e5dab1dafafee34b207aeb7a42303105960944828c2079b92fc88e" diff --git a/Gopkg.toml b/Gopkg.toml index 23b8444fe..791e265e8 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -88,7 +88,7 @@ [[constraint]] name = "github.com/jackc/pgx" - version = "3.1.0" + version = "3.2.0" [[constraint]] name = "github.com/kardianos/service" From 1d6db08dc859708c59315cbbfc8e9ed15d78b02b Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 29 Nov 2018 16:06:18 -0800 Subject: [PATCH 0401/1815] Update changelog --- CHANGELOG.md | 1 + plugins/inputs/postgresql/README.md | 7 +++++++ 2 files changed, 8 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2a0a82756..7094211f6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -15,6 +15,7 @@ - [#4664](https://github.com/influxdata/telegraf/issues/4664): Fix server connection and document stats in mongodb input. - [#5010](https://github.com/influxdata/telegraf/issues/5010): Add X-Requested-By header to graylog input. - [#5052](https://github.com/influxdata/telegraf/issues/5052): Fix metric memory not freed from the metric buffer on write. +- [#3817](https://github.com/influxdata/telegraf/issues/3817): Add support for client tls certificates in postgresql inputs. ## v1.9 [2018-11-20] diff --git a/plugins/inputs/postgresql/README.md b/plugins/inputs/postgresql/README.md index e31fcff69..a873ddac0 100644 --- a/plugins/inputs/postgresql/README.md +++ b/plugins/inputs/postgresql/README.md @@ -45,6 +45,13 @@ A list of databases to pull metrics about. If not specified, metrics for all dat `databases = ["app_production", "testing"]` +### TLS Configuration + +Add the `sslkey`, `sslcert` and `sslrootcert` options to your DSN: +``` +host=localhost user=pgotest dbname=app_production sslmode=require sslkey=/etc/telegraf/key.pem sslcert=/etc/telegraf/cert.pem sslrootcert=/etc/telegraf/ca.pem +``` + ### Configuration example ``` [[inputs.postgresql]] From 9a637eda0549fa2de9efe571ee27f4c4394d92c8 Mon Sep 17 00:00:00 2001 From: Wojciech Kudla Date: Fri, 30 Nov 2018 22:42:55 +0000 Subject: [PATCH 0402/1815] Switch CPU from field to tag in interrupts input plugin (#4999) (#5024) --- plugins/inputs/interrupts/README.md | 27 +- plugins/inputs/interrupts/interrupts.go | 29 +- plugins/inputs/interrupts/interrupts_test.go | 314 +++++++++---------- 3 files changed, 185 insertions(+), 185 deletions(-) diff --git a/plugins/inputs/interrupts/README.md b/plugins/inputs/interrupts/README.md index eb1e3979d..188070f74 100644 --- a/plugins/inputs/interrupts/README.md +++ b/plugins/inputs/interrupts/README.md @@ -5,6 +5,9 @@ The interrupts plugin gathers metrics about IRQs from `/proc/interrupts` and `/p ### Configuration ``` [[inputs.interrupts]] + # To report cpus as tags instead of fields use cpu_as_tags + # cpu_as_tags = false + # ## To filter which IRQs to collect, make use of tagpass / tagdrop, i.e. # [inputs.interrupts.tagdrop] # irq = [ "NET_RX", "TASKLET" ] @@ -16,20 +19,32 @@ There are two measurements reported by this plugin. - `soft_interrupts` gathers metrics from the `/proc/softirqs` file ### Fields -- CPUx: the amount of interrupts for the IRQ handled by that CPU -- total: total amount of interrupts for all CPUs +For cpu_as_tags=false (default): +- CPUx: the amount of interrupts for the IRQ handled by the CPU +- Total: sum of interrupts for the IRS for all CPUs +For cpu_as_tags=true (): +- Count: the amount of interrupts for the IRQ handled by CPU described in CPU tag ### Tags - irq: the IRQ - type: the type of interrupt - device: the name of the device that is located at that IRQ +- cpu: the CPU (when cpus_as_tags=true) ### Example Output ``` ./telegraf --config ~/interrupts_config.conf --test +For cpus_as_tags=false (default): * Plugin: inputs.interrupts, Collection 1 -> interrupts,irq=0,type=IO-APIC,device=2-edge\ timer,host=hostname CPU0=23i,total=23i 1489346531000000000 -> interrupts,irq=1,host=hostname,type=IO-APIC,device=1-edge\ i8042 CPU0=9i,total=9i 1489346531000000000 -> interrupts,irq=30,type=PCI-MSI,device=65537-edge\ virtio1-input.0,host=hostname CPU0=1i,total=1i 1489346531000000000 -> soft_interrupts,irq=NET_RX,host=hostname CPU0=280879i,total=280879i 1489346531000000000 +> interrupts,irq=0,type=IO-APIC,device=2-edge\ timer,host=hostname,cpu=cpu0 count=23i 1489346531000000000 +> interrupts,irq=1,host=hostname,type=IO-APIC,device=1-edge\ i8042,cpu=cpu0 count=9i 1489346531000000000 +> interrupts,irq=30,type=PCI-MSI,device=65537-edge\ virtio1-input.0,host=hostname,cpu=cpu1 count=1i 1489346531000000000 +> soft_interrupts,irq=NET_RX,host=hostname,cpu=cpu0 count=280879i 1489346531000000000 + +For cpus_as_tags=true: +> interrupts,cpu=cpu6,host=hostname,irq=PIW,type=Posted-interrupt\ wakeup\ event count=0i 1543539773000000000 +> interrupts,cpu=cpu7,host=hostname,irq=PIW,type=Posted-interrupt\ wakeup\ event count=0i 1543539773000000000 +> soft_interrupts,cpu=cpu0,host=hostname,irq=HI count=246441i 1543539773000000000 +> soft_interrupts,cpu=cpu1,host=hostname,irq=HI count=159154i 1543539773000000000 + ``` diff --git a/plugins/inputs/interrupts/interrupts.go b/plugins/inputs/interrupts/interrupts.go index 9e8c8ea24..142dc34ec 100644 --- a/plugins/inputs/interrupts/interrupts.go +++ b/plugins/inputs/interrupts/interrupts.go @@ -12,7 +12,9 @@ import ( "github.com/influxdata/telegraf/plugins/inputs" ) -type Interrupts struct{} +type Interrupts struct { + CpuAsTags bool +} type IRQ struct { ID string @@ -27,6 +29,9 @@ func NewIRQ(id string) *IRQ { } const sampleConfig = ` + ## To report cpus as tags instead of fields use cpu_as_tags + # cpu_as_tags = false + # ## To filter which IRQs to collect, make use of tagpass / tagdrop, i.e. # [inputs.interrupts.tagdrop] # irq = [ "NET_RX", "TASKLET" ] @@ -92,7 +97,7 @@ func gatherTagsFields(irq IRQ) (map[string]string, map[string]interface{}) { tags := map[string]string{"irq": irq.ID, "type": irq.Type, "device": irq.Device} fields := map[string]interface{}{"total": irq.Total} for i := 0; i < len(irq.Cpus); i++ { - cpu := fmt.Sprintf("CPU%d", i) + cpu := fmt.Sprintf("cpu%d", i) fields[cpu] = irq.Cpus[i] } return tags, fields @@ -111,12 +116,26 @@ func (s *Interrupts) Gather(acc telegraf.Accumulator) error { acc.AddError(fmt.Errorf("Parsing %s: %s", file, err)) continue } - for _, irq := range irqs { - tags, fields := gatherTagsFields(irq) + reportMetrics(measurement, irqs, acc, s.CpuAsTags) + } + return nil +} + +func reportMetrics(measurement string, irqs []IRQ, acc telegraf.Accumulator, cpusAsTags bool) { + for _, irq := range irqs { + tags, fields := gatherTagsFields(irq) + if cpusAsTags { + for cpu, count := range irq.Cpus { + cpuTags := map[string]string{"cpu": fmt.Sprintf("cpu%d", cpu)} + for k, v := range tags { + cpuTags[k] = v + } + acc.AddFields(measurement, map[string]interface{}{"count": count}, cpuTags) + } + } else { acc.AddFields(measurement, fields, tags) } } - return nil } func init() { diff --git a/plugins/inputs/interrupts/interrupts_test.go b/plugins/inputs/interrupts/interrupts_test.go index 3990461b1..2579d926d 100644 --- a/plugins/inputs/interrupts/interrupts_test.go +++ b/plugins/inputs/interrupts/interrupts_test.go @@ -2,189 +2,155 @@ package interrupts import ( "bytes" + "fmt" "testing" - "github.com/stretchr/testify/assert" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) -func TestParseInterrupts(t *testing.T) { - interruptStr := ` CPU0 CPU1 - 0: 134 0 IO-APIC-edge timer - 1: 7 3 IO-APIC-edge i8042 -NMI: 0 0 Non-maskable interrupts -LOC: 2338608687 2334309625 Local timer interrupts -MIS: 0 -NET_RX: 867028 225 -TASKLET: 205 0` - f := bytes.NewBufferString(interruptStr) - parsed := []IRQ{ - { - ID: "0", Type: "IO-APIC-edge", Device: "timer", - Cpus: []int64{int64(134), int64(0)}, Total: int64(134), - }, - { - ID: "1", Type: "IO-APIC-edge", Device: "i8042", - Cpus: []int64{int64(7), int64(3)}, Total: int64(10), - }, - { - ID: "NMI", Type: "Non-maskable interrupts", - Cpus: []int64{int64(0), int64(0)}, Total: int64(0), - }, - { - ID: "LOC", Type: "Local timer interrupts", - Cpus: []int64{int64(2338608687), int64(2334309625)}, - Total: int64(4672918312), - }, - { - ID: "MIS", Cpus: []int64{int64(0)}, Total: int64(0), - }, - { - ID: "NET_RX", Cpus: []int64{int64(867028), int64(225)}, - Total: int64(867253), - }, - { - ID: "TASKLET", Cpus: []int64{int64(205), int64(0)}, - Total: int64(205), - }, - } - got, err := parseInterrupts(f) - require.Equal(t, nil, err) - require.NotEqual(t, 0, len(got)) - require.Equal(t, len(got), len(parsed)) - for i := 0; i < len(parsed); i++ { - assert.Equal(t, parsed[i], got[i]) - for k := 0; k < len(parsed[i].Cpus); k++ { - assert.Equal(t, parsed[i].Cpus[k], got[i].Cpus[k]) - } +// ===================================================================================== +// Setup and helper functions +// ===================================================================================== + +func expectCpuAsTags(m *testutil.Accumulator, t *testing.T, measurement string, irq IRQ) { + for idx, value := range irq.Cpus { + m.AssertContainsTaggedFields(t, measurement, map[string]interface{}{"count": value}, map[string]string{"irq": irq.ID, "type": irq.Type, "device": irq.Device, "cpu": fmt.Sprintf("cpu%d", idx)}) } } -// Tests #4470 -func TestParseInterruptsBad(t *testing.T) { - interruptStr := ` CPU0 CPU1 CPU2 CPU3 - 16: 0 0 0 0 bcm2836-timer 0 Edge arch_timer - 17: 127224250 118424219 127224437 117885416 bcm2836-timer 1 Edge arch_timer - 21: 0 0 0 0 bcm2836-pmu 9 Edge arm-pmu - 23: 1549514 0 0 0 ARMCTRL-level 1 Edge 3f00b880.mailbox - 24: 2 0 0 0 ARMCTRL-level 2 Edge VCHIQ doorbell - 46: 0 0 0 0 ARMCTRL-level 48 Edge bcm2708_fb dma - 48: 0 0 0 0 ARMCTRL-level 50 Edge DMA IRQ - 50: 0 0 0 0 ARMCTRL-level 52 Edge DMA IRQ - 51: 208 0 0 0 ARMCTRL-level 53 Edge DMA IRQ - 54: 883002 0 0 0 ARMCTRL-level 56 Edge DMA IRQ - 59: 0 0 0 0 ARMCTRL-level 61 Edge bcm2835-auxirq - 62: 521451447 0 0 0 ARMCTRL-level 64 Edge dwc_otg, dwc_otg_pcd, dwc_otg_hcd:usb1 - 86: 857597 0 0 0 ARMCTRL-level 88 Edge mmc0 - 87: 4938 0 0 0 ARMCTRL-level 89 Edge uart-pl011 - 92: 5669 0 0 0 ARMCTRL-level 94 Edge mmc1 - FIQ: usb_fiq - IPI0: 0 0 0 0 CPU wakeup interrupts - IPI1: 0 0 0 0 Timer broadcast interrupts - IPI2: 23564958 23464876 23531165 23040826 Rescheduling interrupts - IPI3: 148438 639704 644266 588150 Function call interrupts - IPI4: 0 0 0 0 CPU stop interrupts - IPI5: 4348149 1843985 3819457 1822877 IRQ work interrupts - IPI6: 0 0 0 0 completion interrupts` - f := bytes.NewBufferString(interruptStr) - parsed := []IRQ{ - { - ID: "16", Type: "bcm2836-timer", Device: "0 Edge arch_timer", - Cpus: []int64{0, 0, 0, 0}, - }, - { - ID: "17", Type: "bcm2836-timer", Device: "1 Edge arch_timer", - Cpus: []int64{127224250, 118424219, 127224437, 117885416}, Total: 490758322, - }, - { - ID: "21", Type: "bcm2836-pmu", Device: "9 Edge arm-pmu", - Cpus: []int64{0, 0, 0, 0}, - }, - { - ID: "23", Type: "ARMCTRL-level", Device: "1 Edge 3f00b880.mailbox", - Cpus: []int64{1549514, 0, 0, 0}, Total: 1549514, - }, - { - ID: "24", Type: "ARMCTRL-level", Device: "2 Edge VCHIQ doorbell", - Cpus: []int64{2, 0, 0, 0}, Total: 2, - }, - { - ID: "46", Type: "ARMCTRL-level", Device: "48 Edge bcm2708_fb dma", - Cpus: []int64{0, 0, 0, 0}, - }, - { - ID: "48", Type: "ARMCTRL-level", Device: "50 Edge DMA IRQ", - Cpus: []int64{0, 0, 0, 0}, - }, - { - ID: "50", Type: "ARMCTRL-level", Device: "52 Edge DMA IRQ", - Cpus: []int64{0, 0, 0, 0}, - }, - { - ID: "51", Type: "ARMCTRL-level", Device: "53 Edge DMA IRQ", - Cpus: []int64{208, 0, 0, 0}, Total: 208, - }, - { - ID: "54", Type: "ARMCTRL-level", Device: "56 Edge DMA IRQ", - Cpus: []int64{883002, 0, 0, 0}, Total: 883002, - }, - { - ID: "59", Type: "ARMCTRL-level", Device: "61 Edge bcm2835-auxirq", - Cpus: []int64{0, 0, 0, 0}, - }, - { - ID: "62", Type: "ARMCTRL-level", Device: "64 Edge dwc_otg, dwc_otg_pcd, dwc_otg_hcd:usb1", - Cpus: []int64{521451447, 0, 0, 0}, Total: 521451447, - }, - { - ID: "86", Type: "ARMCTRL-level", Device: "88 Edge mmc0", - Cpus: []int64{857597, 0, 0, 0}, Total: 857597, - }, - { - ID: "87", Type: "ARMCTRL-level", Device: "89 Edge uart-pl011", - Cpus: []int64{4938, 0, 0, 0}, Total: 4938, - }, - { - ID: "92", Type: "ARMCTRL-level", Device: "94 Edge mmc1", - Cpus: []int64{5669, 0, 0, 0}, Total: 5669, - }, - { - ID: "IPI0", Type: "CPU wakeup interrupts", - Cpus: []int64{0, 0, 0, 0}, - }, - { - ID: "IPI1", Type: "Timer broadcast interrupts", - Cpus: []int64{0, 0, 0, 0}, - }, - { - ID: "IPI2", Type: "Rescheduling interrupts", - Cpus: []int64{23564958, 23464876, 23531165, 23040826}, Total: 93601825, - }, - { - ID: "IPI3", Type: "Function call interrupts", - Cpus: []int64{148438, 639704, 644266, 588150}, Total: 2020558, - }, - { - ID: "IPI4", Type: "CPU stop interrupts", - Cpus: []int64{0, 0, 0, 0}, - }, - { - ID: "IPI5", Type: "IRQ work interrupts", - Cpus: []int64{4348149, 1843985, 3819457, 1822877}, Total: 11834468, - }, - { - ID: "IPI6", Type: "completion interrupts", - Cpus: []int64{0, 0, 0, 0}, - }, +func expectCpuAsFields(m *testutil.Accumulator, t *testing.T, measurement string, irq IRQ) { + fields := map[string]interface{}{} + total := int64(0) + for idx, count := range irq.Cpus { + fields[fmt.Sprintf("cpu%d", idx)] = count + total += count } - got, err := parseInterrupts(f) + fields["total"] = total + + m.AssertContainsTaggedFields(t, measurement, fields, map[string]string{"irq": irq.ID, "type": irq.Type, "device": irq.Device}) +} + +func setup(t *testing.T, irqString string, cpuAsTags bool) (*testutil.Accumulator, []IRQ) { + f := bytes.NewBufferString(irqString) + irqs, err := parseInterrupts(f) require.Equal(t, nil, err) - require.NotEqual(t, 0, len(got)) - require.Equal(t, len(got), len(parsed)) - for i := 0; i < len(parsed); i++ { - assert.Equal(t, parsed[i], got[i]) - for k := 0; k < len(parsed[i].Cpus); k++ { - assert.Equal(t, parsed[i].Cpus[k], got[i].Cpus[k]) - } + require.NotEqual(t, 0, len(irqs)) + + acc := new(testutil.Accumulator) + reportMetrics("soft_interrupts", irqs, acc, cpuAsTags) + + return acc, irqs +} + +// ===================================================================================== +// Soft interrupts +// ===================================================================================== + +const softIrqsString = ` CPU0 CPU1 + 0: 134 0 IO-APIC-edge timer + 1: 7 3 IO-APIC-edge i8042 + NMI: 0 0 Non-maskable interrupts + LOC: 2338608687 2334309625 Local timer interrupts + MIS: 0 + NET_RX: 867028 225 + TASKLET: 205 0` + +var softIrqsExpectedArgs = []IRQ{ + {ID: "0", Type: "IO-APIC-edge", Device: "timer", Cpus: []int64{134, 0}}, + {ID: "1", Type: "IO-APIC-edge", Device: "i8042", Cpus: []int64{7, 3}}, + {ID: "NMI", Type: "Non-maskable interrupts", Cpus: []int64{0, 0}}, + {ID: "MIS", Cpus: []int64{0}}, + {ID: "NET_RX", Cpus: []int64{867028, 225}}, + {ID: "TASKLET", Cpus: []int64{205, 0}}, +} + +func TestCpuAsTagsSoftIrqs(t *testing.T) { + acc, irqs := setup(t, softIrqsString, true) + reportMetrics("soft_interrupts", irqs, acc, true) + + for _, irq := range softIrqsExpectedArgs { + expectCpuAsTags(acc, t, "soft_interrupts", irq) + } +} + +func TestCpuAsFieldsSoftIrqs(t *testing.T) { + acc, irqs := setup(t, softIrqsString, false) + reportMetrics("soft_interrupts", irqs, acc, false) + + for _, irq := range softIrqsExpectedArgs { + expectCpuAsFields(acc, t, "soft_interrupts", irq) + } +} + +// ===================================================================================== +// HW interrupts, tests #4470 +// ===================================================================================== + +const hwIrqsString = ` CPU0 CPU1 CPU2 CPU3 + 16: 0 0 0 0 bcm2836-timer 0 Edge arch_timer + 17: 127224250 118424219 127224437 117885416 bcm2836-timer 1 Edge arch_timer + 21: 0 0 0 0 bcm2836-pmu 9 Edge arm-pmu + 23: 1549514 0 0 0 ARMCTRL-level 1 Edge 3f00b880.mailbox + 24: 2 0 0 0 ARMCTRL-level 2 Edge VCHIQ doorbell + 46: 0 0 0 0 ARMCTRL-level 48 Edge bcm2708_fb dma + 48: 0 0 0 0 ARMCTRL-level 50 Edge DMA IRQ + 50: 0 0 0 0 ARMCTRL-level 52 Edge DMA IRQ + 51: 208 0 0 0 ARMCTRL-level 53 Edge DMA IRQ + 54: 883002 0 0 0 ARMCTRL-level 56 Edge DMA IRQ + 59: 0 0 0 0 ARMCTRL-level 61 Edge bcm2835-auxirq + 62: 521451447 0 0 0 ARMCTRL-level 64 Edge dwc_otg, dwc_otg_pcd, dwc_otg_hcd:usb1 + 86: 857597 0 0 0 ARMCTRL-level 88 Edge mmc0 + 87: 4938 0 0 0 ARMCTRL-level 89 Edge uart-pl011 + 92: 5669 0 0 0 ARMCTRL-level 94 Edge mmc1 + FIQ: usb_fiq + IPI0: 0 0 0 0 CPU wakeup interrupts + IPI1: 0 0 0 0 Timer broadcast interrupts + IPI2: 23564958 23464876 23531165 23040826 Rescheduling interrupts + IPI3: 148438 639704 644266 588150 Function call interrupts + IPI4: 0 0 0 0 CPU stop interrupts + IPI5: 4348149 1843985 3819457 1822877 IRQ work interrupts + IPI6: 0 0 0 0 completion interrupts` + +var hwIrqsExpectedArgs = []IRQ{ + {ID: "16", Type: "bcm2836-timer", Device: "0 Edge arch_timer", Cpus: []int64{0, 0, 0, 0}}, + {ID: "17", Type: "bcm2836-timer", Device: "1 Edge arch_timer", Cpus: []int64{127224250, 118424219, 127224437, 117885416}}, + {ID: "21", Type: "bcm2836-pmu", Device: "9 Edge arm-pmu", Cpus: []int64{0, 0, 0, 0}}, + {ID: "23", Type: "ARMCTRL-level", Device: "1 Edge 3f00b880.mailbox", Cpus: []int64{1549514, 0, 0, 0}}, + {ID: "24", Type: "ARMCTRL-level", Device: "2 Edge VCHIQ doorbell", Cpus: []int64{2, 0, 0, 0}}, + {ID: "46", Type: "ARMCTRL-level", Device: "48 Edge bcm2708_fb dma", Cpus: []int64{0, 0, 0, 0}}, + {ID: "48", Type: "ARMCTRL-level", Device: "50 Edge DMA IRQ", Cpus: []int64{0, 0, 0, 0}}, + {ID: "50", Type: "ARMCTRL-level", Device: "52 Edge DMA IRQ", Cpus: []int64{0, 0, 0, 0}}, + {ID: "51", Type: "ARMCTRL-level", Device: "53 Edge DMA IRQ", Cpus: []int64{208, 0, 0, 0}}, + {ID: "54", Type: "ARMCTRL-level", Device: "56 Edge DMA IRQ", Cpus: []int64{883002, 0, 0, 0}}, + {ID: "59", Type: "ARMCTRL-level", Device: "61 Edge bcm2835-auxirq", Cpus: []int64{0, 0, 0, 0}}, + {ID: "62", Type: "ARMCTRL-level", Device: "64 Edge dwc_otg, dwc_otg_pcd, dwc_otg_hcd:usb1", Cpus: []int64{521451447, 0, 0, 0}}, + {ID: "86", Type: "ARMCTRL-level", Device: "88 Edge mmc0", Cpus: []int64{857597, 0, 0, 0}}, + {ID: "87", Type: "ARMCTRL-level", Device: "89 Edge uart-pl011", Cpus: []int64{4938, 0, 0, 0}}, + {ID: "92", Type: "ARMCTRL-level", Device: "94 Edge mmc1", Cpus: []int64{5669, 0, 0, 0}}, + {ID: "IPI0", Type: "CPU wakeup interrupts", Cpus: []int64{0, 0, 0, 0}}, + {ID: "IPI1", Type: "Timer broadcast interrupts", Cpus: []int64{0, 0, 0, 0}}, + {ID: "IPI2", Type: "Rescheduling interrupts", Cpus: []int64{23564958, 23464876, 23531165, 23040826}}, + {ID: "IPI3", Type: "Function call interrupts", Cpus: []int64{148438, 639704, 644266, 588150}}, + {ID: "IPI4", Type: "CPU stop interrupts", Cpus: []int64{0, 0, 0, 0}}, + {ID: "IPI5", Type: "IRQ work interrupts", Cpus: []int64{4348149, 1843985, 3819457, 1822877}}, + {ID: "IPI6", Type: "completion interrupts", Cpus: []int64{0, 0, 0, 0}}, +} + +func TestCpuAsTagsHwIrqs(t *testing.T) { + acc, irqs := setup(t, hwIrqsString, true) + reportMetrics("interrupts", irqs, acc, true) + + for _, irq := range hwIrqsExpectedArgs { + expectCpuAsTags(acc, t, "interrupts", irq) + } +} + +func TestCpuAsFieldsHwIrqs(t *testing.T) { + acc, irqs := setup(t, hwIrqsString, false) + reportMetrics("interrupts", irqs, acc, false) + + for _, irq := range hwIrqsExpectedArgs { + expectCpuAsFields(acc, t, "interrupts", irq) } } From 7479352e4ab2d21ad97aa50c4c43f4da183defd0 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 30 Nov 2018 15:00:38 -0800 Subject: [PATCH 0403/1815] Rename interrupts cpu_as_tags to cpu_as_tag; update readme --- plugins/inputs/interrupts/README.md | 101 ++++++++++++++++-------- plugins/inputs/interrupts/interrupts.go | 17 ++-- plugins/inputs/unbound/README.md | 2 +- plugins/inputs/unbound/unbound.go | 2 +- 4 files changed, 80 insertions(+), 42 deletions(-) diff --git a/plugins/inputs/interrupts/README.md b/plugins/inputs/interrupts/README.md index 188070f74..5da647f47 100644 --- a/plugins/inputs/interrupts/README.md +++ b/plugins/inputs/interrupts/README.md @@ -3,48 +3,81 @@ The interrupts plugin gathers metrics about IRQs from `/proc/interrupts` and `/proc/softirqs`. ### Configuration -``` +```toml [[inputs.interrupts]] - # To report cpus as tags instead of fields use cpu_as_tags - # cpu_as_tags = false - # + ## When set to true, cpu metrics are tagged with the cpu. Otherwise cpu is + ## stored as a field. + ## + ## The default is false for backwards compatibility, and will be changed to + ## true in a future version. It is recommended to set to true on new + ## deployments. + # cpu_as_tag = false + ## To filter which IRQs to collect, make use of tagpass / tagdrop, i.e. # [inputs.interrupts.tagdrop] - # irq = [ "NET_RX", "TASKLET" ] + # irq = [ "NET_RX", "TASKLET" ] ``` -### Measurements -There are two measurements reported by this plugin. -- `interrupts` gathers metrics from the `/proc/interrupts` file -- `soft_interrupts` gathers metrics from the `/proc/softirqs` file +### Metrics -### Fields -For cpu_as_tags=false (default): -- CPUx: the amount of interrupts for the IRQ handled by the CPU -- Total: sum of interrupts for the IRS for all CPUs -For cpu_as_tags=true (): -- Count: the amount of interrupts for the IRQ handled by CPU described in CPU tag +There are two styles depending on the value of `cpu_as_tag`. -### Tags -- irq: the IRQ -- type: the type of interrupt -- device: the name of the device that is located at that IRQ -- cpu: the CPU (when cpus_as_tags=true) +With `cpu_as_tag = false`: + +- interrupts + - tags: + - irq (IRQ name) + - type + - device (name of the device that is located at the IRQ) + - cpu + - fields: + - cpu (int, number of interrupts per cpu) + - total (int, total number of interrupts) + +- soft_interrupts + - tags: + - irq (IRQ name) + - type + - device (name of the device that is located at the IRQ) + - cpu + - fields: + - cpu (int, number of interrupts per cpu) + - total (int, total number of interrupts) + +With `cpu_as_tag = true`: + +- interrupts + - tags: + - irq (IRQ name) + - type + - device (name of the device that is located at the IRQ) + - cpu + - fields: + - count (int, number of interrupts) + +- soft_interrupts + - tags: + - irq (IRQ name) + - type + - device (name of the device that is located at the IRQ) + - cpu + - fields: + - count (int, number of interrupts) ### Example Output -``` -./telegraf --config ~/interrupts_config.conf --test -For cpus_as_tags=false (default): -* Plugin: inputs.interrupts, Collection 1 -> interrupts,irq=0,type=IO-APIC,device=2-edge\ timer,host=hostname,cpu=cpu0 count=23i 1489346531000000000 -> interrupts,irq=1,host=hostname,type=IO-APIC,device=1-edge\ i8042,cpu=cpu0 count=9i 1489346531000000000 -> interrupts,irq=30,type=PCI-MSI,device=65537-edge\ virtio1-input.0,host=hostname,cpu=cpu1 count=1i 1489346531000000000 -> soft_interrupts,irq=NET_RX,host=hostname,cpu=cpu0 count=280879i 1489346531000000000 - -For cpus_as_tags=true: -> interrupts,cpu=cpu6,host=hostname,irq=PIW,type=Posted-interrupt\ wakeup\ event count=0i 1543539773000000000 -> interrupts,cpu=cpu7,host=hostname,irq=PIW,type=Posted-interrupt\ wakeup\ event count=0i 1543539773000000000 -> soft_interrupts,cpu=cpu0,host=hostname,irq=HI count=246441i 1543539773000000000 -> soft_interrupts,cpu=cpu1,host=hostname,irq=HI count=159154i 1543539773000000000 +With `cpu_as_tag = false`: +``` +interrupts,irq=0,type=IO-APIC,device=2-edge\ timer,cpu=cpu0 count=23i 1489346531000000000 +interrupts,irq=1,type=IO-APIC,device=1-edge\ i8042,cpu=cpu0 count=9i 1489346531000000000 +interrupts,irq=30,type=PCI-MSI,device=65537-edge\ virtio1-input.0,cpu=cpu1 count=1i 1489346531000000000 +soft_interrupts,irq=NET_RX,cpu=cpu0 count=280879i 1489346531000000000 +``` + +With `cpu_as_tag = true`: +``` +interrupts,cpu=cpu6,irq=PIW,type=Posted-interrupt\ wakeup\ event count=0i 1543539773000000000 +interrupts,cpu=cpu7,irq=PIW,type=Posted-interrupt\ wakeup\ event count=0i 1543539773000000000 +soft_interrupts,cpu=cpu0,irq=HI count=246441i 1543539773000000000 +soft_interrupts,cpu=cpu1,irq=HI count=159154i 1543539773000000000 ``` diff --git a/plugins/inputs/interrupts/interrupts.go b/plugins/inputs/interrupts/interrupts.go index 142dc34ec..5b0ca374c 100644 --- a/plugins/inputs/interrupts/interrupts.go +++ b/plugins/inputs/interrupts/interrupts.go @@ -13,7 +13,7 @@ import ( ) type Interrupts struct { - CpuAsTags bool + CpuAsTag bool `toml:"cpu_as_tag"` } type IRQ struct { @@ -29,12 +29,17 @@ func NewIRQ(id string) *IRQ { } const sampleConfig = ` - ## To report cpus as tags instead of fields use cpu_as_tags - # cpu_as_tags = false - # + ## When set to true, cpu metrics are tagged with the cpu. Otherwise cpu is + ## stored as a field. + ## + ## The default is false for backwards compatibility, and will be changed to + ## true in a future version. It is recommended to set to true on new + ## deployments. + # cpu_as_tag = false + ## To filter which IRQs to collect, make use of tagpass / tagdrop, i.e. # [inputs.interrupts.tagdrop] - # irq = [ "NET_RX", "TASKLET" ] + # irq = [ "NET_RX", "TASKLET" ] ` func (s *Interrupts) Description() string { @@ -116,7 +121,7 @@ func (s *Interrupts) Gather(acc telegraf.Accumulator) error { acc.AddError(fmt.Errorf("Parsing %s: %s", file, err)) continue } - reportMetrics(measurement, irqs, acc, s.CpuAsTags) + reportMetrics(measurement, irqs, acc, s.CpuAsTag) } return nil } diff --git a/plugins/inputs/unbound/README.md b/plugins/inputs/unbound/README.md index 4f1f862bb..2163bd375 100644 --- a/plugins/inputs/unbound/README.md +++ b/plugins/inputs/unbound/README.md @@ -23,7 +23,7 @@ a validating, recursive, and caching DNS resolver. ## When set to true, thread metrics are tagged with the thread id. ## - ## The default is false for backwards compatibility, and will be change to + ## The default is false for backwards compatibility, and will be changed to ## true in a future version. It is recommended to set to true on new ## deployments. thread_as_tag = false diff --git a/plugins/inputs/unbound/unbound.go b/plugins/inputs/unbound/unbound.go index 31a6d5005..02067c739 100644 --- a/plugins/inputs/unbound/unbound.go +++ b/plugins/inputs/unbound/unbound.go @@ -50,7 +50,7 @@ var sampleConfig = ` ## When set to true, thread metrics are tagged with the thread id. ## - ## The default is false for backwards compatibility, and will be change to + ## The default is false for backwards compatibility, and will be changed to ## true in a future version. It is recommended to set to true on new ## deployments. thread_as_tag = false From 97d833d440ac85a95db023f688be37b2955d835f Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 30 Nov 2018 15:01:57 -0800 Subject: [PATCH 0404/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7094211f6..ce691b44c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,7 @@ - [#4345](https://github.com/influxdata/telegraf/pull/4345): Allow for force gathering ES cluster stats. - [#5047](https://github.com/influxdata/telegraf/pull/5047): Add support for unix and unix_ms timestamps to csv parser. - [#5038](https://github.com/influxdata/telegraf/pull/5038): Add ability to tag metrics with topic in kafka_consumer. +- [#5024](https://github.com/influxdata/telegraf/pull/5024): Add option to store cpu as a tag in interrupts input. ## v1.9.1 [unreleased] From d108925d8138f2113e93200650b5bcbc467b17b2 Mon Sep 17 00:00:00 2001 From: Russ Savage Date: Mon, 10 Dec 2018 15:21:13 -0800 Subject: [PATCH 0405/1815] Update license information (#5075) --- docs/LICENSE_OF_DEPENDENCIES.md | 239 +++++++++++++++++--------------- 1 file changed, 126 insertions(+), 113 deletions(-) diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index dace554a2..fec320ced 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -3,116 +3,129 @@ When distributed in a binary form, Telegraf may contain portions of the following works: -- code.cloudfoundry.org/clock [APACHE](https://github.com/cloudfoundry/clock/blob/master/LICENSE) -- collectd.org [MIT](https://github.com/collectd/go-collectd/blob/master/LICENSE) -- github.com/aerospike/aerospike-client-go [APACHE](https://github.com/aerospike/aerospike-client-go/blob/master/LICENSE) -- github.com/amir/raidman [PUBLIC DOMAIN](https://github.com/amir/raidman/blob/master/UNLICENSE) -- github.com/armon/go-metrics [MIT](https://github.com/armon/go-metrics/blob/master/LICENSE) -- github.com/Azure/go-autorest [APACHE](https://github.com/Azure/go-autorest/blob/master/LICENSE) -- github.com/aws/aws-sdk-go [APACHE](https://github.com/aws/aws-sdk-go/blob/master/LICENSE.txt) -- github.com/beorn7/perks [MIT](https://github.com/beorn7/perks/blob/master/LICENSE) -- github.com/boltdb/bolt [MIT](https://github.com/boltdb/bolt/blob/master/LICENSE) -- github.com/bsm/sarama-cluster [MIT](https://github.com/bsm/sarama-cluster/blob/master/LICENSE) -- github.com/cenkalti/backoff [MIT](https://github.com/cenkalti/backoff/blob/master/LICENSE) -- github.com/chuckpreslar/rcon [MIT](https://github.com/chuckpreslar/rcon#license) -- github.com/couchbase/go-couchbase [MIT](https://github.com/couchbase/go-couchbase/blob/master/LICENSE) -- github.com/couchbase/gomemcached [MIT](https://github.com/couchbase/gomemcached/blob/master/LICENSE) -- github.com/couchbase/goutils [MIT](https://github.com/couchbase/go-couchbase/blob/master/LICENSE) -- github.com/dancannon/gorethink [APACHE](https://github.com/dancannon/gorethink/blob/master/LICENSE) -- github.com/davecgh/go-spew [ISC](https://github.com/davecgh/go-spew/blob/master/LICENSE) -- github.com/dimchansky/utfbom [APACHE](https://github.com/dimchansky/utfbom/blob/master/LICENSE) -- github.com/docker/cli [APACHE](https://github.com/docker/cli/blob/master/LICENSE) -- github.com/docker/docker [APACHE](https://github.com/docker/docker/blob/master/LICENSE) -- github.com/docker/libnetwork [APACHE](https://github.com/docker/libnetwork/blob/master/LICENSE) -- github.com/eapache/go-resiliency [MIT](https://github.com/eapache/go-resiliency/blob/master/LICENSE) -- github.com/eapache/go-xerial-snappy [MIT](https://github.com/eapache/go-xerial-snappy/blob/master/LICENSE) -- github.com/eapache/queue [MIT](https://github.com/eapache/queue/blob/master/LICENSE) -- github.com/eclipse/paho.mqtt.golang [ECLIPSE](https://github.com/eclipse/paho.mqtt.golang/blob/master/LICENSE) -- github.com/fsnotify/fsnotify [BSD](https://github.com/fsnotify/fsnotify/blob/master/LICENSE) -- github.com/fsouza/go-dockerclient [BSD](https://github.com/fsouza/go-dockerclient/blob/master/LICENSE) -- github.com/gobwas/glob [MIT](https://github.com/gobwas/glob/blob/master/LICENSE) -- github.com/google/go-cmp [BSD](https://github.com/google/go-cmp/blob/master/LICENSE) -- github.com/gogo/protobuf [BSD](https://github.com/gogo/protobuf/blob/master/LICENSE) -- github.com/golang/protobuf [BSD](https://github.com/golang/protobuf/blob/master/LICENSE) -- github.com/golang/snappy [BSD](https://github.com/golang/snappy/blob/master/LICENSE) -- github.com/go-logfmt/logfmt [MIT](https://github.com/go-logfmt/logfmt/blob/master/LICENSE) -- github.com/gorilla/mux [BSD](https://github.com/gorilla/mux/blob/master/LICENSE) -- github.com/go-ini/ini [APACHE](https://github.com/go-ini/ini/blob/master/LICENSE) -- github.com/go-ole/go-ole [MIT](https://github.com/go-ole/go-ole/blob/master/LICENSE) -- github.com/go-sql-driver/mysql [MPL](https://github.com/go-sql-driver/mysql/blob/master/LICENSE) -- github.com/hailocab/go-hostpool [MIT](https://github.com/hailocab/go-hostpool/blob/master/LICENSE) -- github.com/hashicorp/consul [MPL](https://github.com/hashicorp/consul/blob/master/LICENSE) -- github.com/hashicorp/go-msgpack [BSD](https://github.com/hashicorp/go-msgpack/blob/master/LICENSE) -- github.com/hashicorp/raft-boltdb [MPL](https://github.com/hashicorp/raft-boltdb/blob/master/LICENSE) -- github.com/hashicorp/raft [MPL](https://github.com/hashicorp/raft/blob/master/LICENSE) -- github.com/influxdata/tail [MIT](https://github.com/influxdata/tail/blob/master/LICENSE.txt) -- github.com/influxdata/toml [MIT](https://github.com/influxdata/toml/blob/master/LICENSE) -- github.com/influxdata/go-syslog [MIT](https://github.com/influxdata/go-syslog/blob/develop/LICENSE) -- github.com/influxdata/wlog [MIT](https://github.com/influxdata/wlog/blob/master/LICENSE) -- github.com/jackc/pgx [MIT](https://github.com/jackc/pgx/blob/master/LICENSE) -- github.com/jmespath/go-jmespath [APACHE](https://github.com/jmespath/go-jmespath/blob/master/LICENSE) -- github.com/kardianos/osext [BSD](https://github.com/kardianos/osext/blob/master/LICENSE) -- github.com/kardianos/service [ZLIB](https://github.com/kardianos/service/blob/master/LICENSE) (License not named but matches word for word with ZLib) -- github.com/kballard/go-shellquote [MIT](https://github.com/kballard/go-shellquote/blob/master/LICENSE) -- github.com/lib/pq [MIT](https://github.com/lib/pq/blob/master/LICENSE.md) -- github.com/matttproud/golang_protobuf_extensions [APACHE](https://github.com/matttproud/golang_protobuf_extensions/blob/master/LICENSE) -- github.com/Microsoft/ApplicationInsights-Go [APACHE](https://github.com/Microsoft/ApplicationInsights-Go/blob/master/LICENSE) -- github.com/Microsoft/go-winio [MIT](https://github.com/Microsoft/go-winio/blob/master/LICENSE) -- github.com/miekg/dns [BSD](https://github.com/miekg/dns/blob/master/LICENSE) -- github.com/naoina/go-stringutil [MIT](https://github.com/naoina/go-stringutil/blob/master/LICENSE) -- github.com/naoina/toml [MIT](https://github.com/naoina/toml/blob/master/LICENSE) -- github.com/nats-io/gnatsd [MIT](https://github.com/nats-io/gnatsd/blob/master/LICENSE) -- github.com/nats-io/go-nats [MIT](https://github.com/nats-io/go-nats/blob/master/LICENSE) -- github.com/nats-io/nats [MIT](https://github.com/nats-io/nats/blob/master/LICENSE) -- github.com/nats-io/nuid [MIT](https://github.com/nats-io/nuid/blob/master/LICENSE) -- github.com/nsqio/go-nsq [MIT](https://github.com/nsqio/go-nsq/blob/master/LICENSE) -- github.com/opentracing-contrib/go-observer [APACHE](https://github.com/opentracing-contrib/go-observer/blob/master/LICENSE) -- github.com/opentracing/opentracing-go [MIT](https://github.com/opentracing/opentracing-go/blob/master/LICENSE) -- github.com/openzipkin/zipkin-go-opentracing [MIT](https://github.com/openzipkin/zipkin-go-opentracing/blob/master/LICENSE) -- github.com/pierrec/lz4 [BSD](https://github.com/pierrec/lz4/blob/master/LICENSE) -- github.com/pierrec/xxHash [BSD](https://github.com/pierrec/xxHash/blob/master/LICENSE) -- github.com/pkg/errors [BSD](https://github.com/pkg/errors/blob/master/LICENSE) -- github.com/pmezard/go-difflib [BSD](https://github.com/pmezard/go-difflib/blob/master/LICENSE) -- github.com/prometheus/client_golang [APACHE](https://github.com/prometheus/client_golang/blob/master/LICENSE) -- github.com/prometheus/client_model [APACHE](https://github.com/prometheus/client_model/blob/master/LICENSE) -- github.com/prometheus/common [APACHE](https://github.com/prometheus/common/blob/master/LICENSE) -- github.com/prometheus/procfs [APACHE](https://github.com/prometheus/procfs/blob/master/LICENSE) -- github.com/rcrowley/go-metrics [BSD](https://github.com/rcrowley/go-metrics/blob/master/LICENSE) -- github.com/samuel/go-zookeeper [BSD](https://github.com/samuel/go-zookeeper/blob/master/LICENSE) -- github.com/satori/go.uuid [MIT](https://github.com/satori/go.uuid/blob/master/LICENSE) -- github.com/shirou/gopsutil [BSD](https://github.com/shirou/gopsutil/blob/master/LICENSE) -- github.com/shirou/w32 [BSD](https://github.com/shirou/w32/blob/master/LICENSE) -- github.com/Shopify/sarama [MIT](https://github.com/Shopify/sarama/blob/master/MIT-LICENSE) -- github.com/Sirupsen/logrus [MIT](https://github.com/Sirupsen/logrus/blob/master/LICENSE) -- github.com/StackExchange/wmi [MIT](https://github.com/StackExchange/wmi/blob/master/LICENSE) -- github.com/stretchr/objx [MIT](https://github.com/stretchr/objx/blob/master/LICENSE.md) -- github.com/soniah/gosnmp [BSD](https://github.com/soniah/gosnmp/blob/master/LICENSE) -- github.com/streadway/amqp [BSD](https://github.com/streadway/amqp/blob/master/LICENSE) -- github.com/stretchr/objx [MIT](https://github.com/stretchr/objx/blob/master/LICENSE.md) -- github.com/stretchr/testify [MIT](https://github.com/stretchr/testify/blob/master/LICENCE.txt) -- github.com/tidwall/gjson [MIT](https://github.com/tidwall/gjson/blob/master/LICENSE) -- github.com/tidwall/match [MIT](https://github.com/tidwall/match/blob/master/LICENSE) -- github.com/mitchellh/mapstructure [MIT](https://github.com/mitchellh/mapstructure/blob/master/LICENSE) -- github.com/multiplay/go-ts3 [BSD](https://github.com/multiplay/go-ts3/blob/master/LICENSE) -- github.com/vishvananda/netlink [APACHE](https://github.com/vishvananda/netlink/blob/master/LICENSE) -- github.com/vishvananda/netns [APACHE](https://github.com/vishvananda/netns/blob/master/LICENSE) -- github.com/vjeantet/grok [APACHE](https://github.com/vjeantet/grok/blob/master/LICENSE) -- github.com/wvanbergen/kafka [MIT](https://github.com/wvanbergen/kafka/blob/master/LICENSE) -- github.com/wvanbergen/kazoo-go [MIT](https://github.com/wvanbergen/kazoo-go/blob/master/MIT-LICENSE) -- github.com/yuin/gopher-lua [MIT](https://github.com/yuin/gopher-lua/blob/master/LICENSE) -- github.com/zensqlmonitor/go-mssqldb [BSD](https://github.com/zensqlmonitor/go-mssqldb/blob/master/LICENSE.txt) -- golang.org/x/crypto [BSD](https://github.com/golang/crypto/blob/master/LICENSE) -- golang.org/x/net [BSD](https://go.googlesource.com/net/+/master/LICENSE) -- golang.org/x/oauth2 [BSD](https://go.googlesource.com/oauth2/+/master/LICENSE) -- golang.org/x/text [BSD](https://go.googlesource.com/text/+/master/LICENSE) -- golang.org/x/sys [BSD](https://go.googlesource.com/sys/+/master/LICENSE) -- google.golang.org/grpc [APACHE](https://github.com/google/grpc-go/blob/master/LICENSE) -- google.golang.org/genproto [APACHE](https://github.com/google/go-genproto/blob/master/LICENSE) -- gopkg.in/asn1-ber.v1 [MIT](https://github.com/go-asn1-ber/asn1-ber/blob/v1.2/LICENSE) -- gopkg.in/dancannon/gorethink.v1 [APACHE](https://github.com/dancannon/gorethink/blob/v1.1.2/LICENSE) -- gopkg.in/fatih/pool.v2 [MIT](https://github.com/fatih/pool/blob/v2.0.0/LICENSE) -- gopkg.in/ldap.v2 [MIT](https://github.com/go-ldap/ldap/blob/v2.5.0/LICENSE) -- gopkg.in/mgo.v2 [BSD](https://github.com/go-mgo/mgo/blob/v2/LICENSE) -- gopkg.in/olivere/elastic.v5 [MIT](https://github.com/olivere/elastic/blob/v5.0.38/LICENSE) -- gopkg.in/tomb.v1 [BSD](https://github.com/go-tomb/tomb/blob/v1/LICENSE) -- gopkg.in/yaml.v2 [APACHE](https://github.com/go-yaml/yaml/blob/v2/LICENSE) +- cloud.google.com/go [Apache License 2.0](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/master/LICENSE) +- code.cloudfoundry.org/clock [Apache License 2.0](https://github.com/cloudfoundry/clock/blob/master/LICENSE) +- collectd.org [MIT License](https://git.octo.it/?p=collectd.git;a=blob;f=COPYING;hb=HEAD) +- contrib.go.opencensus.io/exporter/stackdriver [Apache License 2.0](https://github.com/census-ecosystem/opencensus-go-exporter-stackdriver/blob/master/LICENSE) +- github.com/aerospike/aerospike-client-go [Apache License 2.0](https://github.com/aerospike/aerospike-client-go/blob/master/LICENSE) +- github.com/alecthomas/template [BSD 3-Clause "New" or "Revised" License](https://github.com/alecthomas/template/blob/master/LICENSE) +- github.com/alecthomas/units [MIT License](https://github.com/alecthomas/units/blob/master/COPYING) +- github.com/amir/raidman [The Unlicense](https://github.com/amir/raidman/blob/master/UNLICENSE) +- github.com/apache/thrift [Apache License 2.0](https://github.com/apache/thrift/blob/master/LICENSE) +- github.com/aws/aws-sdk-go [Apache License 2.0](https://github.com/aws/aws-sdk-go/blob/master/LICENSE.txt) +- github.com/Azure/go-autorest [Apache License 2.0](https://github.com/Azure/go-autorest/blob/master/LICENSE) +- github.com/beorn7/perks [MIT License](https://github.com/beorn7/perks/blob/master/LICENSE) +- github.com/bsm/sarama-cluster [MIT License](https://github.com/bsm/sarama-cluster/blob/master/LICENSE) +- github.com/cenkalti/backoff [MIT License](https://github.com/cenkalti/backoff/blob/master/LICENSE) +- github.com/couchbase/go-couchbase [MIT License](https://github.com/couchbase/go-couchbase/blob/master/LICENSE) +- github.com/couchbase/gomemcached [MIT License](https://github.com/couchbase/gomemcached/blob/master/LICENSE) +- github.com/couchbase/goutils [COUCHBASE INC. COMMUNITY EDITION LICENSE](https://github.com/couchbase/goutils/blob/master/LICENSE.md) +- github.com/davecgh/go-spew [ISC License](https://github.com/davecgh/go-spew/blob/master/LICENSE) +- github.com/denisenkom/go-mssqldb [BSD 3-Clause "New" or "Revised" License](https://github.com/denisenkom/go-mssqldb/blob/master/LICENSE.txt) +- github.com/dgrijalva/jwt-go [MIT License](https://github.com/dgrijalva/jwt-go/blob/master/LICENSE) +- github.com/dimchansky/utfbom [Apache License 2.0](https://github.com/dimchansky/utfbom/blob/master/LICENSE) +- github.com/docker/distribution [Apache License 2.0](https://github.com/docker/distribution/blob/master/LICENSE) +- github.com/docker/docker [Apache License 2.0](https://github.com/docker/docker/blob/master/LICENSE) +- github.com/docker/go-connections [Apache License 2.0](https://github.com/docker/go-connections/blob/master/LICENSE) +- github.com/docker/go-units [Apache License 2.0](https://github.com/docker/go-units/blob/master/LICENSE) +- github.com/docker/libnetwork [Apache License 2.0](https://github.com/docker/libnetwork/blob/master/LICENSE) +- github.com/eapache/go-resiliency [MIT License](https://github.com/eapache/go-resiliency/blob/master/LICENSE) +- github.com/eapache/go-xerial-snappy [MIT License](https://github.com/eapache/go-xerial-snappy/blob/master/LICENSE) +- github.com/eapache/queue [MIT License](https://github.com/eapache/queue/blob/master/LICENSE) +- github.com/eclipse/paho.mqtt.golang [Eclipse Public License - v 1.0](https://github.com/eclipse/paho.mqtt.golang/blob/master/LICENSE) +- github.com/ericchiang/k8s [Apache License 2.0](https://github.com/ericchiang/k8s/blob/master/LICENSE) +- github.com/go-ini/ini [Apache License 2.0](https://github.com/go-ini/ini/blob/master/LICENSE) +- github.com/go-logfmt/logfmt [MIT License](https://github.com/go-logfmt/logfmt/blob/master/LICENSE) +- github.com/go-ole/go-ole [MIT License](https://github.com/go-ole/go-ole/blob/master/LICENSE) +- github.com/go-redis/redis [BSD 2-Clause "Simplified" License](https://github.com/go-redis/redis/blob/master/LICENSE) +- github.com/go-sql-driver/mysql [Mozilla Public License 2.0](https://github.com/go-sql-driver/mysql/blob/master/LICENSE) +- github.com/gobwas/glob [MIT License](https://github.com/gobwas/glob/blob/master/LICENSE) +- github.com/gogo/protobuf [BSD 3-Clause Clear License](https://github.com/gogo/protobuf/blob/master/LICENSE) +- github.com/golang/protobuf [BSD 3-Clause "New" or "Revised" License](https://github.com/golang/protobuf/blob/master/LICENSE) +- github.com/golang/snappy [BSD 3-Clause "New" or "Revised" License](https://github.com/golang/snappy/blob/master/LICENSE) +- github.com/google/go-cmp [BSD 3-Clause "New" or "Revised" License](https://github.com/google/go-cmp/blob/master/LICENSE) +- github.com/google/uuid [BSD 3-Clause "New" or "Revised" License](https://github.com/google/uuid/blob/master/LICENSE) +- github.com/googleapis/gax-go [BSD 3-Clause "New" or "Revised" License](https://github.com/googleapis/gax-go/blob/master/LICENSE) +- github.com/gorilla/context [BSD 3-Clause "New" or "Revised" License](https://github.com/gorilla/context/blob/master/LICENSE) +- github.com/gorilla/mux [BSD 3-Clause "New" or "Revised" License](https://github.com/gorilla/mux/blob/master/LICENSE) +- github.com/hailocab/go-hostpool [MIT License](https://github.com/hailocab/go-hostpool/blob/master/LICENSE) +- github.com/hashicorp/consul [Mozilla Public License 2.0](https://github.com/hashicorp/consul/blob/master/LICENSE) +- github.com/hashicorp/go-cleanhttp [Mozilla Public License 2.0](https://github.com/hashicorp/go-cleanhttp/blob/master/LICENSE) +- github.com/hashicorp/go-rootcerts [Mozilla Public License 2.0](https://github.com/hashicorp/go-rootcerts/blob/master/LICENSE) +- github.com/hashicorp/serf [Mozilla Public License 2.0](https://github.com/hashicorp/serf/blob/master/LICENSE) +- github.com/influxdata/go-syslog [MIT License](https://github.com/influxdata/go-syslog/blob/develop/LICENSE) +- github.com/influxdata/tail [MIT License](https://github.com/influxdata/tail/blob/master/LICENSE.txt) +- github.com/influxdata/toml [MIT License](https://github.com/influxdata/toml/blob/master/LICENSE) +- github.com/influxdata/wlog [MIT License](https://github.com/influxdata/wlog/blob/master/LICENSE) +- github.com/jackc/pgx [MIT License](https://github.com/jackc/pgx/blob/master/LICENSE) +- github.com/jmespath/go-jmespath [Apache License 2.0](https://github.com/jmespath/go-jmespath/blob/master/LICENSE) +- github.com/kardianos/osext [BSD 3-Clause "New" or "Revised" License](https://github.com/kardianos/osext/blob/master/LICENSE) +- github.com/kardianos/service [zlib License](https://github.com/kardianos/service/blob/master/LICENSE) +- github.com/kballard/go-shellquote [MIT License](https://github.com/kballard/go-shellquote/blob/master/LICENSE) +- github.com/kr/logfmt [MIT License](https://github.com/kr/logfmt/blob/master/Readme) +- github.com/mailru/easyjson [MIT License](https://github.com/mailru/easyjson/blob/master/LICENSE) +- github.com/matttproud/golang_protobuf_extensions [Apache License 2.0](https://github.com/matttproud/golang_protobuf_extensions/blob/master/LICENSE) +- github.com/Microsoft/ApplicationInsights-Go [MIT License](https://github.com/Microsoft/ApplicationInsights-Go/blob/master/LICENSE) +- github.com/Microsoft/go-winio [MIT License](https://github.com/Microsoft/go-winio/blob/master/LICENSE) +- github.com/miekg/dns [BSD 3-Clause Clear License](https://github.com/miekg/dns/blob/master/LICENSE) +- github.com/mitchellh/go-homedir [MIT License](https://github.com/mitchellh/go-homedir/blob/master/LICENSE) +- github.com/mitchellh/mapstructure [MIT License](https://github.com/mitchellh/mapstructure/blob/master/LICENSE) +- github.com/multiplay/go-ts3 [BSD 2-Clause "Simplified" License](https://github.com/multiplay/go-ts3/blob/master/LICENSE) +- github.com/naoina/go-stringutil [MIT License](https://github.com/naoina/go-stringutil/blob/master/LICENSE) +- github.com/nats-io/gnatsd [Apache License 2.0](https://github.com/nats-io/gnatsd/blob/master/LICENSE) +- github.com/nats-io/go-nats [Apache License 2.0](https://github.com/nats-io/go-nats/blob/master/LICENSE) +- github.com/nats-io/nuid [Apache License 2.0](https://github.com/nats-io/nuid/blob/master/LICENSE) +- github.com/nsqio/go-nsq [MIT License](https://github.com/nsqio/go-nsq/blob/master/LICENSE) +- github.com/opencontainers/go-digest [Apache License 2.0](https://github.com/opencontainers/go-digest/blob/master/LICENSE) +- github.com/opencontainers/image-spec [Apache License 2.0](https://github.com/opencontainers/image-spec/blob/master/LICENSE) +- github.com/opentracing-contrib/go-observer [Apache License 2.0](https://github.com/opentracing-contrib/go-observer/blob/master/LICENSE) +- github.com/opentracing/opentracing-go [MIT License](https://github.com/opentracing/opentracing-go/blob/master/LICENSE) +- github.com/openzipkin/zipkin-go-opentracing [MIT License](https://github.com/openzipkin/zipkin-go-opentracing/blob/master/LICENSE) +- github.com/pierrec/lz4 [BSD 3-Clause "New" or "Revised" License](https://github.com/pierrec/lz4/blob/master/LICENSE) +- github.com/pkg/errors [BSD 2-Clause "Simplified" License](https://github.com/pkg/errors/blob/master/LICENSE) +- github.com/pmezard/go-difflib [BSD 3-Clause Clear License](https://github.com/pmezard/go-difflib/blob/master/LICENSE) +- github.com/prometheus/client_golang [Apache License 2.0](https://github.com/prometheus/client_golang/blob/master/LICENSE) +- github.com/prometheus/client_model [Apache License 2.0](https://github.com/prometheus/client_model/blob/master/LICENSE) +- github.com/prometheus/common [Apache License 2.0](https://github.com/prometheus/common/blob/master/LICENSE) +- github.com/prometheus/procfs [Apache License 2.0](https://github.com/prometheus/procfs/blob/master/LICENSE) +- github.com/rcrowley/go-metrics [MIT License](https://github.com/rcrowley/go-metrics/blob/master/LICENSE) +- github.com/samuel/go-zookeeper [BSD 3-Clause Clear License](https://github.com/samuel/go-zookeeper/blob/master/LICENSE) +- github.com/satori/go.uuid [MIT License](https://github.com/satori/go.uuid/blob/master/LICENSE) +- github.com/shirou/gopsutil [BSD 3-Clause Clear License](https://github.com/shirou/gopsutil/blob/master/LICENSE) +- github.com/shirou/w32 [BSD 3-Clause Clear License](https://github.com/shirou/w32/blob/master/LICENSE) +- github.com/Shopify/sarama [MIT License](https://github.com/Shopify/sarama/blob/master/LICENSE) +- github.com/sirupsen/logrus [MIT License](https://github.com/sirupsen/logrus/blob/master/LICENSE) +- github.com/soniah/gosnmp [BSD 2-Clause "Simplified" License](https://github.com/soniah/gosnmp/blob/master/LICENSE) +- github.com/StackExchange/wmi [MIT License](https://github.com/StackExchange/wmi/blob/master/LICENSE) +- github.com/streadway/amqp [BSD 2-Clause "Simplified" License](https://github.com/streadway/amqp/blob/master/LICENSE) +- github.com/stretchr/objx [MIT License](https://github.com/stretchr/objx/blob/master/LICENSE) +- github.com/stretchr/testify [custom -- permissive](https://github.com/stretchr/testify/blob/master/LICENSE) +- github.com/tidwall/gjson [MIT License](https://github.com/tidwall/gjson/blob/master/LICENSE) +- github.com/tidwall/match [MIT License](https://github.com/tidwall/match/blob/master/LICENSE) +- github.com/vishvananda/netlink [Apache License 2.0](https://github.com/vishvananda/netlink/blob/master/LICENSE) +- github.com/vishvananda/netns [Apache License 2.0](https://github.com/vishvananda/netns/blob/master/LICENSE) +- github.com/vjeantet/grok [Apache License 2.0](https://github.com/vjeantet/grok/blob/master/LICENSE) +- github.com/vmware/govmomi [Apache License 2.0](https://github.com/vmware/govmomi/blob/master/LICENSE.txt) +- github.com/wvanbergen/kafka [MIT License](https://github.com/wvanbergen/kafka/blob/master/LICENSE) +- github.com/wvanbergen/kazoo-go [MIT License](https://github.com/wvanbergen/kazoo-go/blob/master/MIT-LICENSE) +- github.com/yuin/gopher-lua [MIT License](https://github.com/yuin/gopher-lua/blob/master/LICENSE) +- go.opencensus.io [Apache License 2.0](https://github.com/census-instrumentation/opencensus-go/blob/master/LICENSE) +- golang.org/x/crypto [BSD 3-Clause Clear License](https://github.com/golang/crypto/blob/master/LICENSE) +- golang.org/x/net [BSD 3-Clause Clear License](https://github.com/golang/net/blob/master/LICENSE) +- golang.org/x/oauth2 [BSD 3-Clause "New" or "Revised" License](https://github.com/golang/oauth2/blob/master/LICENSE) +- golang.org/x/sys [BSD 3-Clause Clear License](https://github.com/golang/sys/blob/master/LICENSE) +- golang.org/x/text [BSD 3-Clause Clear License](https://github.com/golang/text/blob/master/LICENSE) +- google.golang.org/api [BSD 3-Clause "New" or "Revised" License](https://github.com/googleapis/google-api-go-client/blob/master/LICENSE) +- google.golang.org/appengine [Apache License 2.0](https://github.com/golang/appengine/blob/master/LICENSE) +- google.golang.org/genproto [Apache License 2.0](https://github.com/google/go-genproto/blob/master/LICENSE) +- google.golang.org/grpc [Apache License 2.0](https://github.com/grpc/grpc-go/blob/master/LICENSE) +- gopkg.in/alecthomas/kingpin.v2 [MIT License](https://github.com/alecthomas/kingpin/blob/v2.2.6/COPYING) +- gopkg.in/asn1-ber.v1 [MIT License](https://github.com/go-asn1-ber/asn1-ber/blob/v1.3/LICENSE) +- gopkg.in/fatih/pool.v2 [MIT License](https://github.com/fatih/pool/blob/v2.0.0/LICENSE) +- gopkg.in/fsnotify.v1 [BSD 3-Clause "New" or "Revised" License](https://github.com/fsnotify/fsnotify/blob/v1.4.7/LICENSE) +- gopkg.in/gorethink/gorethink.v3 [Apache License 2.0](https://github.com/rethinkdb/rethinkdb-go/blob/v3.0.5/LICENSE) +- gopkg.in/ldap.v2 [MIT License](https://github.com/go-ldap/ldap/blob/v2.5.1/LICENSE) +- gopkg.in/mgo.v2 [BSD 2-Clause "Simplified" License](https://github.com/go-mgo/mgo/blob/v2/LICENSE) +- gopkg.in/olivere/elastic.v5 [MIT License](https://github.com/olivere/elastic/blob/v5.0.76/LICENSE) +- gopkg.in/tomb.v1 [BSD 3-Clause Clear License](https://github.com/go-tomb/tomb/blob/v1/LICENSE) +- gopkg.in/yaml.v2 [Apache License 2.0](https://github.com/go-yaml/yaml/blob/v2.2.2/LICENSE) From adce43f54618b5418635d0bcf1c478ccb2f7aa4f Mon Sep 17 00:00:00 2001 From: Emmanuel Nosa Evbuomwan Date: Tue, 11 Dec 2018 00:24:11 +0100 Subject: [PATCH 0406/1815] Fix grammar and typos in fluentd README.md (#5127) --- plugins/inputs/fluentd/README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/plugins/inputs/fluentd/README.md b/plugins/inputs/fluentd/README.md index e46428417..6c5bada3c 100644 --- a/plugins/inputs/fluentd/README.md +++ b/plugins/inputs/fluentd/README.md @@ -3,10 +3,10 @@ The fluentd plugin gathers metrics from plugin endpoint provided by [in_monitor plugin](http://docs.fluentd.org/v0.12/articles/monitoring). This plugin understands data provided by /api/plugin.json resource (/api/config.json is not covered). -You might need to adjust your fluentd configuration, in order to reduce series cardinality in case whene your fluentd restarts frequently. Every time when fluentd starts, `plugin_id` value is given a new random value. +You might need to adjust your fluentd configuration, in order to reduce series cardinality in case your fluentd restarts frequently. Every time fluentd starts, `plugin_id` value is given a new random value. According to [fluentd documentation](http://docs.fluentd.org/v0.12/articles/config-file), you are able to add `@id` parameter for each plugin to avoid this behaviour and define custom `plugin_id`. -example configuratio with `@id` parameter for http plugin: +example configuration with `@id` parameter for http plugin: ``` @type http @@ -36,7 +36,7 @@ example configuratio with `@id` parameter for http plugin: ### Measurements & Fields: -Fields may vary depends on type of the plugin +Fields may vary depending on the plugin type - fluentd - retry_count (float, unit) From d0a6051fd73dbd43470b4313a63c9dc201aebbcc Mon Sep 17 00:00:00 2001 From: Greg <2653109+glinton@users.noreply.github.com> Date: Mon, 10 Dec 2018 16:55:58 -0700 Subject: [PATCH 0407/1815] Prevent panic when marking the offset in kafka_consumer (#5118) --- plugins/inputs/kafka_consumer/kafka_consumer.go | 1 + 1 file changed, 1 insertion(+) diff --git a/plugins/inputs/kafka_consumer/kafka_consumer.go b/plugins/inputs/kafka_consumer/kafka_consumer.go index 0814d8e14..545e37f5a 100644 --- a/plugins/inputs/kafka_consumer/kafka_consumer.go +++ b/plugins/inputs/kafka_consumer/kafka_consumer.go @@ -275,6 +275,7 @@ func (k *Kafka) onDelivery(track telegraf.DeliveryInfo) { msg, ok := k.messages[track.ID()] if !ok { log.Printf("E! [inputs.kafka_consumer] Could not mark message delivered: %d", track.ID()) + return } if track.Delivered() { From 74d8523db62de1f8530ddd3699c9ac70f44a5d02 Mon Sep 17 00:00:00 2001 From: Greg <2653109+glinton@users.noreply.github.com> Date: Mon, 10 Dec 2018 17:07:08 -0700 Subject: [PATCH 0408/1815] Aggregate early metrics, rather than ignore (#5085) --- internal/models/running_aggregator.go | 5 +++-- internal/models/running_aggregator_test.go | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/internal/models/running_aggregator.go b/internal/models/running_aggregator.go index 0315aa671..4fb7bcbe1 100644 --- a/internal/models/running_aggregator.go +++ b/internal/models/running_aggregator.go @@ -109,6 +109,7 @@ func (r *RunningAggregator) metricDropped(metric telegraf.Metric) { // Add a metric to the aggregator and return true if the original metric // should be dropped. func (r *RunningAggregator) Add(metric telegraf.Metric) bool { + if ok := r.Config.Filter.Select(metric); !ok { return false } @@ -121,9 +122,9 @@ func (r *RunningAggregator) Add(metric telegraf.Metric) bool { r.Lock() defer r.Unlock() - if r.periodStart.IsZero() || metric.Time().Before(r.periodStart) || metric.Time().After(r.periodEnd) { + if r.periodStart.IsZero() || metric.Time().After(r.periodEnd) { r.metricDropped(metric) - return false + return r.Config.DropOriginal } r.Aggregator.Add(metric) diff --git a/internal/models/running_aggregator_test.go b/internal/models/running_aggregator_test.go index 2212829f9..6bacbf8ed 100644 --- a/internal/models/running_aggregator_test.go +++ b/internal/models/running_aggregator_test.go @@ -87,7 +87,7 @@ func TestAddMetricsOutsideCurrentPeriod(t *testing.T) { ra.Push(&acc) require.Equal(t, 1, len(acc.Metrics)) - require.Equal(t, int64(101), acc.Metrics[0].Fields["sum"]) + require.Equal(t, int64(202), acc.Metrics[0].Fields["sum"]) } func TestAddAndPushOnePeriod(t *testing.T) { From 83bc3d12772b56310c01751dcc9a1733605b881b Mon Sep 17 00:00:00 2001 From: Greg <2653109+glinton@users.noreply.github.com> Date: Mon, 10 Dec 2018 17:14:20 -0700 Subject: [PATCH 0409/1815] Improve docs to clarify common issues (#5054) --- plugins/inputs/jolokia2/examples/java.conf | 2 +- plugins/inputs/logparser/README.md | 1 + .../logparser/grok/patterns/influx-patterns | 73 ------------------- plugins/inputs/logparser/logparser_test.go | 24 +++--- .../{grok => }/testdata/test-patterns | 0 .../logparser/{grok => }/testdata/test_a.log | 0 .../logparser/{grok => }/testdata/test_b.log | 0 plugins/inputs/redis/README.md | 3 + plugins/inputs/win_perf_counters/README.md | 15 ++-- plugins/parsers/grok/README.md | 1 + plugins/parsers/grok/influx_patterns.go | 2 +- plugins/parsers/json/README.md | 5 +- plugins/processors/enum/README.md | 2 +- 13 files changed, 34 insertions(+), 94 deletions(-) delete mode 100644 plugins/inputs/logparser/grok/patterns/influx-patterns rename plugins/inputs/logparser/{grok => }/testdata/test-patterns (100%) rename plugins/inputs/logparser/{grok => }/testdata/test_a.log (100%) rename plugins/inputs/logparser/{grok => }/testdata/test_b.log (100%) diff --git a/plugins/inputs/jolokia2/examples/java.conf b/plugins/inputs/jolokia2/examples/java.conf index 361bce1d2..32a68195c 100644 --- a/plugins/inputs/jolokia2/examples/java.conf +++ b/plugins/inputs/jolokia2/examples/java.conf @@ -14,7 +14,7 @@ [[inputs.jolokia2_agent.metric]] name = "java_garbage_collector" - mbean = "java.lang:name=G1*,type=GarbageCollector" + mbean = "java.lang:name=*,type=GarbageCollector" paths = ["CollectionTime", "CollectionCount"] tag_keys = ["name"] diff --git a/plugins/inputs/logparser/README.md b/plugins/inputs/logparser/README.md index 2bbd5253d..00b37e504 100644 --- a/plugins/inputs/logparser/README.md +++ b/plugins/inputs/logparser/README.md @@ -76,6 +76,7 @@ line and the `semantic_name` is used to name the field or tag. The extension other special handling. By default all named captures are converted into string fields. +If a pattern does not have a semantic name it will not be captured. Timestamp modifiers can be used to convert captures to the timestamp of the parsed metric. If no timestamp is parsed the metric will be created using the current time. diff --git a/plugins/inputs/logparser/grok/patterns/influx-patterns b/plugins/inputs/logparser/grok/patterns/influx-patterns deleted file mode 100644 index 931b61bc8..000000000 --- a/plugins/inputs/logparser/grok/patterns/influx-patterns +++ /dev/null @@ -1,73 +0,0 @@ -# Captures are a slightly modified version of logstash "grok" patterns, with -# the format %{[:][:]} -# By default all named captures are converted into string fields. -# Modifiers can be used to convert captures to other types or tags. -# Timestamp modifiers can be used to convert captures to the timestamp of the -# parsed metric. - -# View logstash grok pattern docs here: -# https://www.elastic.co/guide/en/logstash/current/plugins-filters-grok.html -# All default logstash patterns are supported, these can be viewed here: -# https://github.com/logstash-plugins/logstash-patterns-core/blob/master/patterns/grok-patterns - -# Available modifiers: -# string (default if nothing is specified) -# int -# float -# duration (ie, 5.23ms gets converted to int nanoseconds) -# tag (converts the field into a tag) -# drop (drops the field completely) -# Timestamp modifiers: -# ts-ansic ("Mon Jan _2 15:04:05 2006") -# ts-unix ("Mon Jan _2 15:04:05 MST 2006") -# ts-ruby ("Mon Jan 02 15:04:05 -0700 2006") -# ts-rfc822 ("02 Jan 06 15:04 MST") -# ts-rfc822z ("02 Jan 06 15:04 -0700") -# ts-rfc850 ("Monday, 02-Jan-06 15:04:05 MST") -# ts-rfc1123 ("Mon, 02 Jan 2006 15:04:05 MST") -# ts-rfc1123z ("Mon, 02 Jan 2006 15:04:05 -0700") -# ts-rfc3339 ("2006-01-02T15:04:05Z07:00") -# ts-rfc3339nano ("2006-01-02T15:04:05.999999999Z07:00") -# ts-httpd ("02/Jan/2006:15:04:05 -0700") -# ts-epoch (seconds since unix epoch) -# ts-epochnano (nanoseconds since unix epoch) -# ts-"CUSTOM" -# CUSTOM time layouts must be within quotes and be the representation of the -# "reference time", which is Mon Jan 2 15:04:05 -0700 MST 2006 -# See https://golang.org/pkg/time/#Parse for more details. - -# Example log file pattern, example log looks like this: -# [04/Jun/2016:12:41:45 +0100] 1.25 200 192.168.1.1 5.432µs -# Breakdown of the DURATION pattern below: -# NUMBER is a builtin logstash grok pattern matching float & int numbers. -# [nuµm]? is a regex specifying 0 or 1 of the characters within brackets. -# s is also regex, this pattern must end in "s". -# so DURATION will match something like '5.324ms' or '6.1µs' or '10s' -DURATION %{NUMBER}[nuµm]?s -RESPONSE_CODE %{NUMBER:response_code:tag} -RESPONSE_TIME %{DURATION:response_time_ns:duration} -EXAMPLE_LOG \[%{HTTPDATE:ts:ts-httpd}\] %{NUMBER:myfloat:float} %{RESPONSE_CODE} %{IPORHOST:clientip} %{RESPONSE_TIME} - -# Wider-ranging username matching vs. logstash built-in %{USER} -NGUSERNAME [a-zA-Z0-9\.\@\-\+_%]+ -NGUSER %{NGUSERNAME} -# Wider-ranging client IP matching -CLIENT (?:%{IPORHOST}|%{HOSTPORT}|::1) - -## -## COMMON LOG PATTERNS -## - -# apache & nginx logs, this is also known as the "common log format" -# see https://en.wikipedia.org/wiki/Common_Log_Format -COMMON_LOG_FORMAT %{CLIENT:client_ip} %{NOTSPACE:ident} %{NOTSPACE:auth} \[%{HTTPDATE:ts:ts-httpd}\] "(?:%{WORD:verb:tag} %{NOTSPACE:request}(?: HTTP/%{NUMBER:http_version:float})?|%{DATA})" %{NUMBER:resp_code:tag} (?:%{NUMBER:resp_bytes:int}|-) - -# Combined log format is the same as the common log format but with the addition -# of two quoted strings at the end for "referrer" and "agent" -# See Examples at http://httpd.apache.org/docs/current/mod/mod_log_config.html -COMBINED_LOG_FORMAT %{COMMON_LOG_FORMAT} %{QS:referrer} %{QS:agent} - -# HTTPD log formats -HTTPD20_ERRORLOG \[%{HTTPDERROR_DATE:timestamp}\] \[%{LOGLEVEL:loglevel:tag}\] (?:\[client %{IPORHOST:clientip}\] ){0,1}%{GREEDYDATA:errormsg} -HTTPD24_ERRORLOG \[%{HTTPDERROR_DATE:timestamp}\] \[%{WORD:module}:%{LOGLEVEL:loglevel:tag}\] \[pid %{POSINT:pid:int}:tid %{NUMBER:tid:int}\]( \(%{POSINT:proxy_errorcode:int}\)%{DATA:proxy_errormessage}:)?( \[client %{IPORHOST:client}:%{POSINT:clientport}\])? %{DATA:errorcode}: %{GREEDYDATA:message} -HTTPD_ERRORLOG %{HTTPD20_ERRORLOG}|%{HTTPD24_ERRORLOG} diff --git a/plugins/inputs/logparser/logparser_test.go b/plugins/inputs/logparser/logparser_test.go index 3f0ab4daa..90ae39161 100644 --- a/plugins/inputs/logparser/logparser_test.go +++ b/plugins/inputs/logparser/logparser_test.go @@ -15,7 +15,7 @@ import ( func TestStartNoParsers(t *testing.T) { logparser := &LogParserPlugin{ FromBeginning: true, - Files: []string{"grok/testdata/*.log"}, + Files: []string{"testdata/*.log"}, } acc := testutil.Accumulator{} @@ -27,10 +27,10 @@ func TestGrokParseLogFilesNonExistPattern(t *testing.T) { logparser := &LogParserPlugin{ FromBeginning: true, - Files: []string{thisdir + "grok/testdata/*.log"}, + Files: []string{thisdir + "testdata/*.log"}, GrokConfig: GrokConfig{ Patterns: []string{"%{FOOBAR}"}, - CustomPatternFiles: []string{thisdir + "grok/testdata/test-patterns"}, + CustomPatternFiles: []string{thisdir + "testdata/test-patterns"}, }, } @@ -46,10 +46,10 @@ func TestGrokParseLogFiles(t *testing.T) { GrokConfig: GrokConfig{ MeasurementName: "logparser_grok", Patterns: []string{"%{TEST_LOG_A}", "%{TEST_LOG_B}"}, - CustomPatternFiles: []string{thisdir + "grok/testdata/test-patterns"}, + CustomPatternFiles: []string{thisdir + "testdata/test-patterns"}, }, FromBeginning: true, - Files: []string{thisdir + "grok/testdata/*.log"}, + Files: []string{thisdir + "testdata/*.log"}, } acc := testutil.Accumulator{} @@ -67,7 +67,7 @@ func TestGrokParseLogFiles(t *testing.T) { }, map[string]string{ "response_code": "200", - "path": thisdir + "grok/testdata/test_a.log", + "path": thisdir + "testdata/test_a.log", }) acc.AssertContainsTaggedFields(t, "logparser_grok", @@ -77,7 +77,7 @@ func TestGrokParseLogFiles(t *testing.T) { "nomodifier": "nomodifier", }, map[string]string{ - "path": thisdir + "grok/testdata/test_b.log", + "path": thisdir + "testdata/test_b.log", }) } @@ -94,7 +94,7 @@ func TestGrokParseLogFilesAppearLater(t *testing.T) { GrokConfig: GrokConfig{ MeasurementName: "logparser_grok", Patterns: []string{"%{TEST_LOG_A}", "%{TEST_LOG_B}"}, - CustomPatternFiles: []string{thisdir + "grok/testdata/test-patterns"}, + CustomPatternFiles: []string{thisdir + "testdata/test-patterns"}, }, } @@ -103,7 +103,7 @@ func TestGrokParseLogFilesAppearLater(t *testing.T) { assert.Equal(t, acc.NFields(), 0) - _ = os.Symlink(thisdir+"grok/testdata/test_a.log", emptydir+"/test_a.log") + _ = os.Symlink(thisdir+"testdata/test_a.log", emptydir+"/test_a.log") assert.NoError(t, acc.GatherError(logparser.Gather)) acc.Wait(1) @@ -129,11 +129,11 @@ func TestGrokParseLogFilesOneBad(t *testing.T) { logparser := &LogParserPlugin{ FromBeginning: true, - Files: []string{thisdir + "grok/testdata/test_a.log"}, + Files: []string{thisdir + "testdata/test_a.log"}, GrokConfig: GrokConfig{ MeasurementName: "logparser_grok", Patterns: []string{"%{TEST_LOG_A}", "%{TEST_LOG_BAD}"}, - CustomPatternFiles: []string{thisdir + "grok/testdata/test-patterns"}, + CustomPatternFiles: []string{thisdir + "testdata/test-patterns"}, }, } @@ -153,7 +153,7 @@ func TestGrokParseLogFilesOneBad(t *testing.T) { }, map[string]string{ "response_code": "200", - "path": thisdir + "grok/testdata/test_a.log", + "path": thisdir + "testdata/test_a.log", }) } diff --git a/plugins/inputs/logparser/grok/testdata/test-patterns b/plugins/inputs/logparser/testdata/test-patterns similarity index 100% rename from plugins/inputs/logparser/grok/testdata/test-patterns rename to plugins/inputs/logparser/testdata/test-patterns diff --git a/plugins/inputs/logparser/grok/testdata/test_a.log b/plugins/inputs/logparser/testdata/test_a.log similarity index 100% rename from plugins/inputs/logparser/grok/testdata/test_a.log rename to plugins/inputs/logparser/testdata/test_a.log diff --git a/plugins/inputs/logparser/grok/testdata/test_b.log b/plugins/inputs/logparser/testdata/test_b.log similarity index 100% rename from plugins/inputs/logparser/grok/testdata/test_b.log rename to plugins/inputs/logparser/testdata/test_b.log diff --git a/plugins/inputs/redis/README.md b/plugins/inputs/redis/README.md index da4e8b71a..79122f228 100644 --- a/plugins/inputs/redis/README.md +++ b/plugins/inputs/redis/README.md @@ -15,6 +15,9 @@ ## If no port is specified, 6379 is used servers = ["tcp://localhost:6379"] + ## specify server password + # password = "s#cr@t%" + ## Optional TLS Config # tls_ca = "/etc/telegraf/ca.pem" # tls_cert = "/etc/telegraf/cert.pem" diff --git a/plugins/inputs/win_perf_counters/README.md b/plugins/inputs/win_perf_counters/README.md index cf2ba4d64..11496baff 100644 --- a/plugins/inputs/win_perf_counters/README.md +++ b/plugins/inputs/win_perf_counters/README.md @@ -174,7 +174,7 @@ if any of the combinations of ObjectName/Instances/Counters are invalid. ### Generic Queries ``` - +[[inputs.win_perf_counters]] [[inputs.win_perf_counters.object]] # Processor usage, alternative to native, reports on a per core. ObjectName = "Processor" @@ -218,6 +218,9 @@ if any of the combinations of ObjectName/Instances/Counters are invalid. ### Active Directory Domain Controller ``` +[[inputs.win_perf_counters]] + [inputs.win_perf_counters.tags] + monitorgroup = "ActiveDirectory" [[inputs.win_perf_counters.object]] ObjectName = "DirectoryServices" Instances = ["*"] @@ -243,6 +246,7 @@ if any of the combinations of ObjectName/Instances/Counters are invalid. ### DFS Namespace + Domain Controllers ``` +[[inputs.win_perf_counters]] [[inputs.win_perf_counters.object]] # AD, DFS N, Useful if the server hosts a DFS Namespace or is a Domain Controller ObjectName = "DFS Namespace Service Referrals" @@ -253,9 +257,9 @@ if any of the combinations of ObjectName/Instances/Counters are invalid. #WarnOnMissing = false # Print out when the performance counter is missing, either of object, counter or instance. ``` - ### DFS Replication + Domain Controllers ``` +[[inputs.win_perf_counters]] [[inputs.win_perf_counters.object]] # AD, DFS R, Useful if the server hosts a DFS Replication folder or is a Domain Controller ObjectName = "DFS Replication Service Volumes" @@ -266,9 +270,9 @@ if any of the combinations of ObjectName/Instances/Counters are invalid. #WarnOnMissing = false # Print out when the performance counter is missing, either of object, counter or instance. ``` - ### DNS Server + Domain Controllers ``` +[[inputs.win_perf_counters]] [[inputs.win_perf_counters.object]] ObjectName = "DNS" Counters = ["Dynamic Update Received","Dynamic Update Rejected","Recursive Queries","Recursive Queries Failure","Secure Update Failure","Secure Update Received","TCP Query Received","TCP Response Sent","UDP Query Received","UDP Response Sent","Total Query Received","Total Response Sent"] @@ -279,6 +283,7 @@ if any of the combinations of ObjectName/Instances/Counters are invalid. ### IIS / ASP.NET ``` +[[inputs.win_perf_counters]] [[inputs.win_perf_counters.object]] # HTTP Service request queues in the Kernel before being handed over to User Mode. ObjectName = "HTTP Service Request Queues" @@ -320,9 +325,9 @@ if any of the combinations of ObjectName/Instances/Counters are invalid. #IncludeTotal=false #Set to true to include _Total instance when querying for all (*). ``` - ### Process ``` +[[inputs.win_perf_counters]] [[inputs.win_perf_counters.object]] # Process metrics, in this case for IIS only ObjectName = "Process" @@ -332,9 +337,9 @@ if any of the combinations of ObjectName/Instances/Counters are invalid. #IncludeTotal=false #Set to true to include _Total instance when querying for all (*). ``` - ### .NET Monitoring ``` +[[inputs.win_perf_counters]] [[inputs.win_perf_counters.object]] # .NET CLR Exceptions, in this case for IIS only ObjectName = ".NET CLR Exceptions" diff --git a/plugins/parsers/grok/README.md b/plugins/parsers/grok/README.md index 7b22d340e..03473fa95 100644 --- a/plugins/parsers/grok/README.md +++ b/plugins/parsers/grok/README.md @@ -20,6 +20,7 @@ line and the `semantic_name` is used to name the field or tag. The extension other special handling. By default all named captures are converted into string fields. +If a pattern does not have a semantic name it will not be captured. Timestamp modifiers can be used to convert captures to the timestamp of the parsed metric. If no timestamp is parsed the metric will be created using the current time. diff --git a/plugins/parsers/grok/influx_patterns.go b/plugins/parsers/grok/influx_patterns.go index 6dc990622..b7853c742 100644 --- a/plugins/parsers/grok/influx_patterns.go +++ b/plugins/parsers/grok/influx_patterns.go @@ -1,10 +1,10 @@ package grok -// DEFAULT_PATTERNS SHOULD BE KEPT IN-SYNC WITH patterns/influx-patterns const DEFAULT_PATTERNS = ` # Captures are a slightly modified version of logstash "grok" patterns, with # the format %{[:][:]} # By default all named captures are converted into string fields. +# If a pattern does not have a semantic name it will not be captured. # Modifiers can be used to convert captures to other types or tags. # Timestamp modifiers can be used to convert captures to the timestamp of the # parsed metric. diff --git a/plugins/parsers/json/README.md b/plugins/parsers/json/README.md index 13efa6568..05ed6242f 100644 --- a/plugins/parsers/json/README.md +++ b/plugins/parsers/json/README.md @@ -43,7 +43,10 @@ ignored unless specified in the `tag_key` or `json_string_fields` options. ## Time format is the time layout that should be used to interprete the ## json_time_key. The time must be `unix`, `unix_ms` or a time in the - ## "reference time". + ## "reference time". To define a different format, arrange the values from + ## the "reference time" in the example to match the format you will be + ## using. For more information on the "reference time", visit + ## https://golang.org/pkg/time/#Time.Format ## ex: json_time_format = "Mon Jan 2 15:04:05 -0700 MST 2006" ## json_time_format = "2006-01-02T15:04:05Z07:00" ## json_time_format = "unix" diff --git a/plugins/processors/enum/README.md b/plugins/processors/enum/README.md index 291c25c8f..20c6110a1 100644 --- a/plugins/processors/enum/README.md +++ b/plugins/processors/enum/README.md @@ -19,7 +19,7 @@ source field is overwritten. ## Destination field to be used for the mapped value. By default the source ## field is used, overwriting the original value. - # dest = "status_code" + dest = "status_code" ## Default value to be used for all values not contained in the mapping ## table. When unset, the unmodified value for the field will be used if no From 403a28d0780b20c32347236f400495e352cd65ee Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 10 Dec 2018 15:59:28 -0800 Subject: [PATCH 0410/1815] Update changelog --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index ce691b44c..4624a7654 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,6 +17,8 @@ - [#5010](https://github.com/influxdata/telegraf/issues/5010): Add X-Requested-By header to graylog input. - [#5052](https://github.com/influxdata/telegraf/issues/5052): Fix metric memory not freed from the metric buffer on write. - [#3817](https://github.com/influxdata/telegraf/issues/3817): Add support for client tls certificates in postgresql inputs. +- [#5118](https://github.com/influxdata/telegraf/issues/5118): Prevent panic when marking the offset in kafka_consumer. +- [#5085](https://github.com/influxdata/telegraf/issues/5085): Add early metrics to aggregator and honor drop_original setting. ## v1.9 [2018-11-20] From 1170367dd7c780d1df98ee1fcd74b8691769aa80 Mon Sep 17 00:00:00 2001 From: Alexander Thaller Date: Tue, 11 Dec 2018 01:20:04 +0100 Subject: [PATCH 0411/1815] Use -W flag on bsd variants in ping input (#5112) --- plugins/inputs/ping/ping.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/inputs/ping/ping.go b/plugins/inputs/ping/ping.go index 53c109d75..1d49cccc4 100644 --- a/plugins/inputs/ping/ping.go +++ b/plugins/inputs/ping/ping.go @@ -206,7 +206,7 @@ func (p *Ping) args(url string, system string) []string { case "darwin": args = append(args, "-W", strconv.FormatFloat(p.Timeout*1000, 'f', -1, 64)) case "freebsd", "netbsd", "openbsd": - args = append(args, "-w", strconv.FormatFloat(p.Timeout*1000, 'f', -1, 64)) + args = append(args, "-W", strconv.FormatFloat(p.Timeout*1000, 'f', -1, 64)) case "linux": args = append(args, "-W", strconv.FormatFloat(p.Timeout, 'f', -1, 64)) default: From 03a5fc9b88bbdd5e0ab6dc8de8ca9edf26725a20 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 10 Dec 2018 16:21:21 -0800 Subject: [PATCH 0412/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4624a7654..9467af2ee 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -19,6 +19,7 @@ - [#3817](https://github.com/influxdata/telegraf/issues/3817): Add support for client tls certificates in postgresql inputs. - [#5118](https://github.com/influxdata/telegraf/issues/5118): Prevent panic when marking the offset in kafka_consumer. - [#5085](https://github.com/influxdata/telegraf/issues/5085): Add early metrics to aggregator and honor drop_original setting. +- [#5112](https://github.com/influxdata/telegraf/issues/5112): Use -W flag on bsd variants in ping input. ## v1.9 [2018-11-20] From 04dfa430ef62ad5982a9a90089ec44f2e9febc8c Mon Sep 17 00:00:00 2001 From: Robert Fisher Date: Tue, 11 Dec 2018 00:42:25 +0000 Subject: [PATCH 0413/1815] Allow delta metrics in wavefront parser (#5115) --- plugins/parsers/wavefront/element.go | 6 ++++++ plugins/parsers/wavefront/parser_test.go | 23 +++++++++++++++++++++++ plugins/parsers/wavefront/scanner.go | 2 ++ plugins/parsers/wavefront/token.go | 5 +++++ 4 files changed, 36 insertions(+) diff --git a/plugins/parsers/wavefront/element.go b/plugins/parsers/wavefront/element.go index 4e40238e7..859eab1f2 100644 --- a/plugins/parsers/wavefront/element.go +++ b/plugins/parsers/wavefront/element.go @@ -37,7 +37,10 @@ type LiteralParser struct { func (ep *NameParser) parse(p *PointParser, pt *Point) error { //Valid characters are: a-z, A-Z, 0-9, hyphen ("-"), underscore ("_"), dot ("."). // Forward slash ("/") and comma (",") are allowed if metricName is enclosed in double quotes. + // Delta (U+2206) is allowed as the first characeter of the + // metricName name, err := parseLiteral(p) + if err != nil { return err } @@ -225,6 +228,9 @@ func parseLiteral(p *PointParser) (string, error) { for tok != EOF && tok > literal_beg && tok < literal_end { p.writeBuf.WriteString(lit) tok, lit = p.scan() + if tok == DELTA { + return "", errors.New("found delta inside metric name") + } } if tok == QUOTES { return "", errors.New("found quote inside unquoted literal") diff --git a/plugins/parsers/wavefront/parser_test.go b/plugins/parsers/wavefront/parser_test.go index 85367fa1a..e7d427dd8 100644 --- a/plugins/parsers/wavefront/parser_test.go +++ b/plugins/parsers/wavefront/parser_test.go @@ -19,6 +19,26 @@ func TestParse(t *testing.T) { assert.Equal(t, parsedMetrics[0].Name(), testMetric.Name()) assert.Equal(t, parsedMetrics[0].Fields(), testMetric.Fields()) + parsedMetrics, err = parser.Parse([]byte("\u2206test.delta 1 1530939936")) + assert.NoError(t, err) + testMetric, err = metric.New("\u2206test.delta", map[string]string{}, + map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) + assert.NoError(t, err) + assert.EqualValues(t, parsedMetrics[0], testMetric) + + parsedMetrics, err = parser.Parse([]byte("\u0394test.delta 1 1530939936")) + assert.NoError(t, err) + testMetric, err = metric.New("\u0394test.delta", map[string]string{}, + map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) + assert.NoError(t, err) + assert.EqualValues(t, parsedMetrics[0], testMetric) + + parsedMetrics, err = parser.Parse([]byte("\u0394test.delta 1.234 1530939936 source=\"mysource\" tag2=value2")) + assert.NoError(t, err) + testMetric, err = metric.New("\u0394test.delta", map[string]string{"source": "mysource", "tag2": "value2"}, map[string]interface{}{"value": 1.234}, time.Unix(1530939936, 0)) + assert.NoError(t, err) + assert.EqualValues(t, parsedMetrics[0], testMetric) + parsedMetrics, err = parser.Parse([]byte("test.metric 1 1530939936")) assert.NoError(t, err) testMetric, err = metric.New("test.metric", map[string]string{}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) @@ -166,6 +186,9 @@ func TestParseInvalid(t *testing.T) { _, err = parser.Parse([]byte("test.metric 1 string")) assert.Error(t, err) + _, err = parser.Parse([]byte("test.\u2206delta 1")) + assert.Error(t, err) + _, err = parser.Parse([]byte("test.metric 1 1530939936 tag_no_pair")) assert.Error(t, err) diff --git a/plugins/parsers/wavefront/scanner.go b/plugins/parsers/wavefront/scanner.go index e64516f54..a528f72ee 100644 --- a/plugins/parsers/wavefront/scanner.go +++ b/plugins/parsers/wavefront/scanner.go @@ -40,6 +40,8 @@ func (s *PointScanner) Scan() (Token, string) { return LETTER, string(ch) } else if isNumber(ch) { return NUMBER, string(ch) + } else if isDelta(ch) { + return DELTA, string(ch) } // Otherwise read the individual character. diff --git a/plugins/parsers/wavefront/token.go b/plugins/parsers/wavefront/token.go index bbcbf4e76..5b77d0cdb 100644 --- a/plugins/parsers/wavefront/token.go +++ b/plugins/parsers/wavefront/token.go @@ -18,6 +18,7 @@ const ( SLASH BACKSLASH COMMA + DELTA literal_end // Misc characters @@ -38,4 +39,8 @@ func isNumber(ch rune) bool { return ch >= '0' && ch <= '9' } +func isDelta(ch rune) bool { + return ch == '\u2206' || ch == '\u0394' +} + var eof = rune(0) From 385be709a11c3325b6e0fb945e1c7829abcb1a4e Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 10 Dec 2018 16:46:20 -0800 Subject: [PATCH 0414/1815] Update changelog --- CHANGELOG.md | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9467af2ee..278015cd0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,9 +17,10 @@ - [#5010](https://github.com/influxdata/telegraf/issues/5010): Add X-Requested-By header to graylog input. - [#5052](https://github.com/influxdata/telegraf/issues/5052): Fix metric memory not freed from the metric buffer on write. - [#3817](https://github.com/influxdata/telegraf/issues/3817): Add support for client tls certificates in postgresql inputs. -- [#5118](https://github.com/influxdata/telegraf/issues/5118): Prevent panic when marking the offset in kafka_consumer. -- [#5085](https://github.com/influxdata/telegraf/issues/5085): Add early metrics to aggregator and honor drop_original setting. -- [#5112](https://github.com/influxdata/telegraf/issues/5112): Use -W flag on bsd variants in ping input. +- [#5082](https://github.com/influxdata/telegraf/issues/5082): Prevent panic when marking the offset in kafka_consumer. +- [#5084](https://github.com/influxdata/telegraf/issues/5084): Add early metrics to aggregator and honor drop_original setting. +- [#5112](https://github.com/influxdata/telegraf/pull/5112): Use -W flag on bsd variants in ping input. +- [#5114](https://github.com/influxdata/telegraf/issues/5114): Allow delta metrics in wavefront parser. ## v1.9 [2018-11-20] From 4d026fce9721aa9ef223b32d749731c1dfad1df5 Mon Sep 17 00:00:00 2001 From: Greg Linton Date: Tue, 11 Dec 2018 15:59:32 -0700 Subject: [PATCH 0415/1815] Set 1.9.1 release date --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 278015cd0..e5bd80984 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,7 +7,7 @@ - [#5038](https://github.com/influxdata/telegraf/pull/5038): Add ability to tag metrics with topic in kafka_consumer. - [#5024](https://github.com/influxdata/telegraf/pull/5024): Add option to store cpu as a tag in interrupts input. -## v1.9.1 [unreleased] +## v1.9.1 [2018-12-11] ### Bugfixes From d954218f75ce49b536ed639bebc310691378ba8b Mon Sep 17 00:00:00 2001 From: Lasse Karstensen Date: Wed, 12 Dec 2018 01:57:08 +0100 Subject: [PATCH 0416/1815] Increase varnishstat timeout (#5130) --- plugins/inputs/varnish/varnish.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/inputs/varnish/varnish.go b/plugins/inputs/varnish/varnish.go index f1c703971..f30bead3b 100644 --- a/plugins/inputs/varnish/varnish.go +++ b/plugins/inputs/varnish/varnish.go @@ -78,7 +78,7 @@ func varnishRunner(cmdName string, UseSudo bool, InstanceName string) (*bytes.Bu var out bytes.Buffer cmd.Stdout = &out - err := internal.RunTimeout(cmd, time.Millisecond*200) + err := internal.RunTimeout(cmd, time.Millisecond*500) if err != nil { return &out, fmt.Errorf("error running varnishstat: %s", err) } From cf2b85f3838f222de9576cf633102b2cb141c48a Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 11 Dec 2018 19:11:33 -0800 Subject: [PATCH 0417/1815] Add running field to procstat_lookup (#5069) --- agent/accumulator.go | 1 + plugins/inputs/procstat/README.md | 22 ++++++++----- plugins/inputs/procstat/procstat.go | 51 +++++++++++++++++------------ 3 files changed, 44 insertions(+), 30 deletions(-) diff --git a/agent/accumulator.go b/agent/accumulator.go index c29b521e9..0533a06e2 100644 --- a/agent/accumulator.go +++ b/agent/accumulator.go @@ -82,6 +82,7 @@ func (ac *accumulator) AddHistogram( } func (ac *accumulator) AddMetric(m telegraf.Metric) { + m.SetTime(m.Time().Round(ac.precision)) if m := ac.maker.MakeMetric(m); m != nil { ac.metrics <- m } diff --git a/plugins/inputs/procstat/README.md b/plugins/inputs/procstat/README.md index 852c109f0..0dd631b05 100644 --- a/plugins/inputs/procstat/README.md +++ b/plugins/inputs/procstat/README.md @@ -136,17 +136,21 @@ implemented as a WMI query. The pattern allows fuzzy matching using only - write_count (int, *telegraf* may need to be ran as **root**) - procstat_lookup - tags: - - exe (string) - - pid_finder (string) - - pid_file (string) - - pattern (string) - - prefix (string) - - user (string) - - systemd_unit (string) - - cgroup (string) - - win_service (string) + - exe + - pid_finder + - pid_file + - pattern + - prefix + - user + - systemd_unit + - cgroup + - win_service + - result - fields: - pid_count (int) + - running (int) + - result_code (int, success = 0, lookup_error = 1) + *NOTE: Resource limit > 2147483647 will be reported as 2147483647.* ### Example Output: diff --git a/plugins/inputs/procstat/procstat.go b/plugins/inputs/procstat/procstat.go index 0a877b162..8424cd674 100644 --- a/plugins/inputs/procstat/procstat.go +++ b/plugins/inputs/procstat/procstat.go @@ -93,6 +93,7 @@ func (p *Procstat) Gather(acc telegraf.Accumulator) error { case "pgrep": p.createPIDFinder = NewPgrep default: + p.PidFinder = "pgrep" p.createPIDFinder = defaultPIDFinder } @@ -101,7 +102,22 @@ func (p *Procstat) Gather(acc telegraf.Accumulator) error { p.createProcess = defaultProcess } - procs, err := p.updateProcesses(acc, p.procs) + pids, tags, err := p.findPids(acc) + if err != nil { + fields := map[string]interface{}{ + "pid_count": 0, + "running": 0, + "result_code": 1, + } + tags := map[string]string{ + "pid_finder": p.PidFinder, + "result": "lookup_error", + } + acc.AddFields("procstat_lookup", fields, tags) + return err + } + + procs, err := p.updateProcesses(pids, tags, p.procs) if err != nil { acc.AddError(fmt.Errorf("E! Error: procstat getting process, exe: [%s] pidfile: [%s] pattern: [%s] user: [%s] %s", p.Exe, p.PidFile, p.Pattern, p.User, err.Error())) @@ -109,14 +125,23 @@ func (p *Procstat) Gather(acc telegraf.Accumulator) error { p.procs = procs for _, proc := range p.procs { - p.addMetrics(proc, acc) + p.addMetric(proc, acc) } + fields := map[string]interface{}{ + "pid_count": len(pids), + "running": len(procs), + "result_code": 0, + } + tags["pid_finder"] = p.PidFinder + tags["result"] = "success" + acc.AddFields("procstat_lookup", fields, tags) + return nil } // Add metrics a single Process -func (p *Procstat) addMetrics(proc Process, acc telegraf.Accumulator) { +func (p *Procstat) addMetric(proc Process, acc telegraf.Accumulator) { var prefix string if p.Prefix != "" { prefix = p.Prefix + "_" @@ -242,12 +267,7 @@ func (p *Procstat) addMetrics(proc Process, acc telegraf.Accumulator) { } // Update monitored Processes -func (p *Procstat) updateProcesses(acc telegraf.Accumulator, prevInfo map[PID]Process) (map[PID]Process, error) { - pids, tags, err := p.findPids(acc) - if err != nil { - return nil, err - } - +func (p *Procstat) updateProcesses(pids []PID, tags map[string]string, prevInfo map[PID]Process) (map[PID]Process, error) { procs := make(map[PID]Process, len(prevInfo)) for _, pid := range pids { @@ -327,18 +347,7 @@ func (p *Procstat) findPids(acc telegraf.Accumulator) ([]PID, map[string]string, err = fmt.Errorf("Either exe, pid_file, user, pattern, systemd_unit, cgroup, or win_service must be specified") } - rTags := make(map[string]string) - for k, v := range tags { - rTags[k] = v - } - - //adds a metric with info on the pgrep query - fields := make(map[string]interface{}) - tags["pid_finder"] = p.PidFinder - fields["pid_count"] = len(pids) - acc.AddFields("procstat_lookup", fields, tags) - - return pids, rTags, err + return pids, tags, err } // execCommand is so tests can mock out exec.Command usage. From 4d3519756c278c90d1207518bcc97a10c28555f3 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 11 Dec 2018 19:12:00 -0800 Subject: [PATCH 0418/1815] Add support for sending a request body to http input (#5074) --- plugins/inputs/http/README.md | 7 +++ plugins/inputs/http/http.go | 45 +++++++++++++--- plugins/inputs/http/http_test.go | 93 ++++++++++++++++++++++++++++++++ 3 files changed, 138 insertions(+), 7 deletions(-) diff --git a/plugins/inputs/http/README.md b/plugins/inputs/http/README.md index 25d3d2b2d..240fd90c9 100644 --- a/plugins/inputs/http/README.md +++ b/plugins/inputs/http/README.md @@ -19,6 +19,13 @@ The HTTP input plugin collects metrics from one or more HTTP(S) endpoints. The ## Optional HTTP headers # headers = {"X-Special-Header" = "Special-Value"} + ## HTTP entity-body to send with POST/PUT requests. + # body = "" + + ## HTTP Content-Encoding for write request body, can be set to "gzip" to + ## compress body or "identity" to apply no encoding. + # content_encoding = "identity" + ## Optional HTTP Basic Auth Credentials # username = "username" # password = "pa$$word" diff --git a/plugins/inputs/http/http.go b/plugins/inputs/http/http.go index f5a2544c8..6d2d528ba 100644 --- a/plugins/inputs/http/http.go +++ b/plugins/inputs/http/http.go @@ -3,6 +3,7 @@ package http import ( "errors" "fmt" + "io" "io/ioutil" "net/http" "strings" @@ -17,17 +18,19 @@ import ( ) type HTTP struct { - URLs []string `toml:"urls"` - Method string + URLs []string `toml:"urls"` + Method string `toml:"method"` + Body string `toml:"body"` + ContentEncoding string `toml:"content_encoding"` - Headers map[string]string + Headers map[string]string `toml:"headers"` // HTTP Basic Auth Credentials - Username string - Password string + Username string `toml:"username"` + Password string `toml:"password"` tls.ClientConfig - Timeout internal.Duration + Timeout internal.Duration `toml:"timeout"` client *http.Client @@ -52,6 +55,13 @@ var sampleConfig = ` # username = "username" # password = "pa$$word" + ## HTTP entity-body to send with POST/PUT requests. + # body = "" + + ## HTTP Content-Encoding for write request body, can be set to "gzip" to + ## compress body or "identity" to apply no encoding. + # content_encoding = "identity" + ## Optional TLS Config # tls_ca = "/etc/telegraf/ca.pem" # tls_cert = "/etc/telegraf/cert.pem" @@ -132,11 +142,20 @@ func (h *HTTP) gatherURL( acc telegraf.Accumulator, url string, ) error { - request, err := http.NewRequest(h.Method, url, nil) + body, err := makeRequestBodyReader(h.ContentEncoding, h.Body) if err != nil { return err } + request, err := http.NewRequest(h.Method, url, body) + if err != nil { + return err + } + + if h.ContentEncoding == "gzip" { + request.Header.Set("Content-Encoding", "gzip") + } + for k, v := range h.Headers { if strings.ToLower(k) == "host" { request.Host = v @@ -183,6 +202,18 @@ func (h *HTTP) gatherURL( return nil } +func makeRequestBodyReader(contentEncoding, body string) (io.Reader, error) { + var err error + var reader io.Reader = strings.NewReader(body) + if contentEncoding == "gzip" { + reader, err = internal.CompressWithGzip(reader) + if err != nil { + return nil, err + } + } + return reader, nil +} + func init() { inputs.Add("http", func() telegraf.Input { return &HTTP{ diff --git a/plugins/inputs/http/http_test.go b/plugins/inputs/http/http_test.go index 4cd465bce..7ac05e135 100644 --- a/plugins/inputs/http/http_test.go +++ b/plugins/inputs/http/http_test.go @@ -1,6 +1,9 @@ package http_test import ( + "compress/gzip" + "fmt" + "io/ioutil" "net/http" "net/http/httptest" "testing" @@ -149,3 +152,93 @@ const simpleJSON = ` "a": 1.2 } ` + +func TestBodyAndContentEncoding(t *testing.T) { + ts := httptest.NewServer(http.NotFoundHandler()) + defer ts.Close() + + url := fmt.Sprintf("http://%s", ts.Listener.Addr().String()) + + tests := []struct { + name string + plugin *plugin.HTTP + queryHandlerFunc func(t *testing.T, w http.ResponseWriter, r *http.Request) + }{ + { + name: "no body", + plugin: &plugin.HTTP{ + Method: "POST", + URLs: []string{url}, + }, + queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { + body, err := ioutil.ReadAll(r.Body) + require.NoError(t, err) + require.Equal(t, []byte(""), body) + w.WriteHeader(http.StatusOK) + }, + }, + { + name: "post body", + plugin: &plugin.HTTP{ + URLs: []string{url}, + Method: "POST", + Body: "test", + }, + queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { + body, err := ioutil.ReadAll(r.Body) + require.NoError(t, err) + require.Equal(t, []byte("test"), body) + w.WriteHeader(http.StatusOK) + }, + }, + { + name: "get method body is sent", + plugin: &plugin.HTTP{ + URLs: []string{url}, + Method: "GET", + Body: "test", + }, + queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { + body, err := ioutil.ReadAll(r.Body) + require.NoError(t, err) + require.Equal(t, []byte("test"), body) + w.WriteHeader(http.StatusOK) + }, + }, + { + name: "gzip encoding", + plugin: &plugin.HTTP{ + URLs: []string{url}, + Method: "GET", + Body: "test", + ContentEncoding: "gzip", + }, + queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { + require.Equal(t, r.Header.Get("Content-Encoding"), "gzip") + + gr, err := gzip.NewReader(r.Body) + require.NoError(t, err) + body, err := ioutil.ReadAll(gr) + require.NoError(t, err) + require.Equal(t, []byte("test"), body) + w.WriteHeader(http.StatusOK) + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + tt.queryHandlerFunc(t, w, r) + }) + + parser, err := parsers.NewParser(&parsers.Config{DataFormat: "influx"}) + require.NoError(t, err) + + tt.plugin.SetParser(parser) + + var acc testutil.Accumulator + err = tt.plugin.Gather(&acc) + require.NoError(t, err) + }) + } +} From e2ccce9e447dc32215c6e8841b8089b0f2a73fbc Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 11 Dec 2018 16:58:44 -0800 Subject: [PATCH 0419/1815] Update changelog --- CHANGELOG.md | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index e5bd80984..6fdd21e55 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,14 @@ - [#5047](https://github.com/influxdata/telegraf/pull/5047): Add support for unix and unix_ms timestamps to csv parser. - [#5038](https://github.com/influxdata/telegraf/pull/5038): Add ability to tag metrics with topic in kafka_consumer. - [#5024](https://github.com/influxdata/telegraf/pull/5024): Add option to store cpu as a tag in interrupts input. +- [#5074](https://github.com/influxdata/telegraf/pull/5074): Add support for sending a request body to http input. +- [#5069](https://github.com/influxdata/telegraf/pull/5069): Add running field to procstat_lookup. + +## v1.9.2 [unreleased] + +### Bugfixes + +- [#5130](https://github.com/influxdata/telegraf/pull/5130): Increase varnishstat timeout. ## v1.9.1 [2018-12-11] From 503f3ce0a6eb02fa72b14af8fa6b2558f8641b81 Mon Sep 17 00:00:00 2001 From: Mark Wilkinson - m82labs Date: Thu, 13 Dec 2018 14:42:35 -0500 Subject: [PATCH 0420/1815] Remove storage calculation for non Azure managed instances and add server version (#5135) - Removed the storage calculation for SQL Server instances that are NOT Azure Managed Instances, this reduces the time it takes to get this data on an instance with a lot of databases and/or database files. - Added the SQL Server version back to the server properties query. --- plugins/inputs/sqlserver/sqlserver.go | 20 +++----------------- 1 file changed, 3 insertions(+), 17 deletions(-) diff --git a/plugins/inputs/sqlserver/sqlserver.go b/plugins/inputs/sqlserver/sqlserver.go index bf1fb9af7..d6aa231f1 100644 --- a/plugins/inputs/sqlserver/sqlserver.go +++ b/plugins/inputs/sqlserver/sqlserver.go @@ -401,7 +401,6 @@ const sqlServerPropertiesV2 = `DECLARE @sys_info TABLE ( IF OBJECT_ID('master.sys.dm_os_sys_info') IS NOT NULL BEGIN - IF SERVERPROPERTY('EngineEdition') = 8 -- Managed Instance INSERT INTO @sys_info ( cpu_count, server_memory, sku, engine_edition, hardware_type, total_storage_mb, available_storage_mb, uptime ) SELECT TOP(1) @@ -418,19 +417,6 @@ BEGIN ELSE BEGIN - DECLARE @total_disk_size_mb BIGINT, - @available_space_mb BIGINT - - SELECT @total_disk_size_mb = sum(total_disk_size_mb), - @available_space_mb = sum(free_disk_space_mb) - FROM ( - SELECT distinct logical_volume_name AS LogicalName, - total_bytes/(1024*1024)as total_disk_size_mb, - available_bytes /(1024*1024) free_disk_space_mb - FROM sys.master_files AS f - CROSS APPLY sys.dm_os_volume_stats(f.database_id, f.file_id) - ) as osVolumes - INSERT INTO @sys_info ( cpu_count, server_memory, sku, engine_edition, hardware_type, total_storage_mb, available_storage_mb, uptime ) SELECT cpu_count, (SELECT total_physical_memory_kb FROM sys.dm_os_sys_memory) AS server_memory, @@ -440,13 +426,12 @@ BEGIN WHEN 'NONE' THEN 'PHYSICAL Machine' ELSE virtual_machine_type_desc END AS hardware_type, - @total_disk_size_mb, - @available_space_mb, + NULL, + NULL, DATEDIFF(MINUTE,sqlserver_start_time,GETDATE()) FROM sys.dm_os_sys_info END END - SELECT 'sqlserver_server_properties' AS [measurement], REPLACE(@@SERVERNAME,'\',':') AS [sql_instance], s.cpu_count, @@ -457,6 +442,7 @@ SELECT 'sqlserver_server_properties' AS [measurement], s.total_storage_mb, s.available_storage_mb, s.uptime, + SERVERPROPERTY('ProductVersion') AS sql_version, db_online, db_restoring, db_recovering, From dbea6dca307730089a9de79aafb64d5e57d58af9 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 13 Dec 2018 11:43:51 -0800 Subject: [PATCH 0421/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6fdd21e55..dcd0b27e2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,6 +14,7 @@ ### Bugfixes - [#5130](https://github.com/influxdata/telegraf/pull/5130): Increase varnishstat timeout. +- [#5135](https://github.com/influxdata/telegraf/pull/5135): Remove storage calculation for non Azure managed instances and add server version. ## v1.9.1 [2018-12-11] From 6b5ddbbf669ff17f803a3f2eb352900279747b41 Mon Sep 17 00:00:00 2001 From: dbergmanskytap <39205372+dbergmanskytap@users.noreply.github.com> Date: Thu, 13 Dec 2018 11:49:19 -0800 Subject: [PATCH 0422/1815] Include DEVLINKS in available diskio udev properties (#5116) --- plugins/inputs/diskio/README.md | 4 ++++ plugins/inputs/diskio/diskio_linux.go | 19 ++++++++++++++++++- plugins/inputs/diskio/diskio_linux_test.go | 4 ++++ 3 files changed, 26 insertions(+), 1 deletion(-) diff --git a/plugins/inputs/diskio/README.md b/plugins/inputs/diskio/README.md index 3cec5cf55..95ed16ec0 100644 --- a/plugins/inputs/diskio/README.md +++ b/plugins/inputs/diskio/README.md @@ -19,6 +19,10 @@ The diskio input plugin gathers metrics about disk traffic and timing. ## Currently only Linux is supported via udev properties. You can view ## available properties for a device by running: ## 'udevadm info -q property -n /dev/sda' + ## Note: Most, but not all, udev properties can be accessed this way. Properties + ## that are currently inaccessible include DEVTYPE, DEVNAME, and DEVPATH. + ## DEVLINKS, however, can be used as a tag as of Telegraf 1.10 + ## For more info see https://github.com/influxdata/telegraf/issues/3663 # device_tags = ["ID_FS_TYPE", "ID_FS_USAGE"] # ## Using the same metadata source as device_tags, you can also customize the diff --git a/plugins/inputs/diskio/diskio_linux.go b/plugins/inputs/diskio/diskio_linux.go index 38240a0a1..d27fd3b46 100644 --- a/plugins/inputs/diskio/diskio_linux.go +++ b/plugins/inputs/diskio/diskio_linux.go @@ -2,6 +2,7 @@ package diskio import ( "bufio" + "bytes" "fmt" "os" "strings" @@ -52,9 +53,21 @@ func (s *DiskIO) diskInfo(devName string) (map[string]string, error) { defer f.Close() scnr := bufio.NewScanner(f) + var devlinks bytes.Buffer for scnr.Scan() { l := scnr.Text() - if len(l) < 4 || l[:2] != "E:" { + if len(l) < 4 { + continue + } + if l[:2] == "S:" { + if devlinks.Len() > 0 { + devlinks.WriteString(" ") + } + devlinks.WriteString("/dev/") + devlinks.WriteString(l[2:]) + continue + } + if l[:2] != "E:" { continue } kv := strings.SplitN(l[2:], "=", 2) @@ -64,5 +77,9 @@ func (s *DiskIO) diskInfo(devName string) (map[string]string, error) { di[kv[0]] = kv[1] } + if devlinks.Len() > 0 { + di["DEVLINKS"] = devlinks.String() + } + return di, nil } diff --git a/plugins/inputs/diskio/diskio_linux_test.go b/plugins/inputs/diskio/diskio_linux_test.go index b18bb67a8..9e79be165 100644 --- a/plugins/inputs/diskio/diskio_linux_test.go +++ b/plugins/inputs/diskio/diskio_linux_test.go @@ -14,6 +14,8 @@ import ( var nullDiskInfo = []byte(` E:MY_PARAM_1=myval1 E:MY_PARAM_2=myval2 +S:foo/bar/devlink +S:foo/bar/devlink1 `) // setupNullDisk sets up fake udev info as if /dev/null were a disk. @@ -47,6 +49,7 @@ func TestDiskInfo(t *testing.T) { require.NoError(t, err) assert.Equal(t, "myval1", di["MY_PARAM_1"]) assert.Equal(t, "myval2", di["MY_PARAM_2"]) + assert.Equal(t, "/dev/foo/bar/devlink /dev/foo/bar/devlink1", di["DEVLINKS"]) // test that data is cached err = clean() @@ -56,6 +59,7 @@ func TestDiskInfo(t *testing.T) { require.NoError(t, err) assert.Equal(t, "myval1", di["MY_PARAM_1"]) assert.Equal(t, "myval2", di["MY_PARAM_2"]) + assert.Equal(t, "/dev/foo/bar/devlink /dev/foo/bar/devlink1", di["DEVLINKS"]) // unfortunately we can't adjust mtime on /dev/null to test cache invalidation } From 8526aa5c874edcc9adca5e0f305a2cfcb2b39a6c Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 13 Dec 2018 11:51:53 -0800 Subject: [PATCH 0423/1815] Update changelog --- CHANGELOG.md | 1 + plugins/inputs/diskio/README.md | 4 +--- plugins/inputs/diskio/diskio.go | 2 ++ 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index dcd0b27e2..8a3d3328d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,7 @@ - [#5024](https://github.com/influxdata/telegraf/pull/5024): Add option to store cpu as a tag in interrupts input. - [#5074](https://github.com/influxdata/telegraf/pull/5074): Add support for sending a request body to http input. - [#5069](https://github.com/influxdata/telegraf/pull/5069): Add running field to procstat_lookup. +- [#5116](https://github.com/influxdata/telegraf/pull/5116): Include DEVLINKS in available diskio udev properties. ## v1.9.2 [unreleased] diff --git a/plugins/inputs/diskio/README.md b/plugins/inputs/diskio/README.md index 95ed16ec0..07bc71456 100644 --- a/plugins/inputs/diskio/README.md +++ b/plugins/inputs/diskio/README.md @@ -20,9 +20,7 @@ The diskio input plugin gathers metrics about disk traffic and timing. ## available properties for a device by running: ## 'udevadm info -q property -n /dev/sda' ## Note: Most, but not all, udev properties can be accessed this way. Properties - ## that are currently inaccessible include DEVTYPE, DEVNAME, and DEVPATH. - ## DEVLINKS, however, can be used as a tag as of Telegraf 1.10 - ## For more info see https://github.com/influxdata/telegraf/issues/3663 + ## that are currently inaccessible include DEVTYPE, DEVNAME, and DEVPATH. # device_tags = ["ID_FS_TYPE", "ID_FS_USAGE"] # ## Using the same metadata source as device_tags, you can also customize the diff --git a/plugins/inputs/diskio/diskio.go b/plugins/inputs/diskio/diskio.go index 54e74d518..e0c6243bb 100644 --- a/plugins/inputs/diskio/diskio.go +++ b/plugins/inputs/diskio/diskio.go @@ -46,6 +46,8 @@ var diskIOsampleConfig = ` ## Currently only Linux is supported via udev properties. You can view ## available properties for a device by running: ## 'udevadm info -q property -n /dev/sda' + ## Note: Most, but not all, udev properties can be accessed this way. Properties + ## that are currently inaccessible include DEVTYPE, DEVNAME, and DEVPATH. # device_tags = ["ID_FS_TYPE", "ID_FS_USAGE"] # ## Using the same metadata source as device_tags, you can also customize the From f794d5b08adc74564153baeed7e47113ff6f4235 Mon Sep 17 00:00:00 2001 From: Raphael Couto Date: Thu, 13 Dec 2018 17:57:03 -0200 Subject: [PATCH 0424/1815] Fix error sending empty tag value in azure monitor output (#5083) --- plugins/outputs/azure_monitor/azure_monitor.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/plugins/outputs/azure_monitor/azure_monitor.go b/plugins/outputs/azure_monitor/azure_monitor.go index 5c435ac0d..e52d66b99 100644 --- a/plugins/outputs/azure_monitor/azure_monitor.go +++ b/plugins/outputs/azure_monitor/azure_monitor.go @@ -340,6 +340,10 @@ func hashIDWithTagKeysOnly(m telegraf.Metric) uint64 { h.Write([]byte(m.Name())) h.Write([]byte("\n")) for _, tag := range m.TagList() { + if tag.Key == "" || tag.Value == "" { + continue + } + h.Write([]byte(tag.Key)) h.Write([]byte("\n")) } From 0485119716a69828aacefa00b6adab0210ae168d Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 13 Dec 2018 11:58:44 -0800 Subject: [PATCH 0425/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8a3d3328d..f9d62fc50 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,6 +16,7 @@ - [#5130](https://github.com/influxdata/telegraf/pull/5130): Increase varnishstat timeout. - [#5135](https://github.com/influxdata/telegraf/pull/5135): Remove storage calculation for non Azure managed instances and add server version. +- [#5083](https://github.com/influxdata/telegraf/pull/5083): Fix error sending empty tag value in azure_monitor output. ## v1.9.1 [2018-12-11] From 8906e2796bd47a222f42ea1aa28e2c9ad530e719 Mon Sep 17 00:00:00 2001 From: Samuel-BF <36996277+Samuel-BF@users.noreply.github.com> Date: Thu, 13 Dec 2018 21:25:49 +0100 Subject: [PATCH 0426/1815] Add size to filecount input (#4778) --- Gopkg.lock | 9 ++ Gopkg.toml | 5 + internal/globpath/globpath.go | 38 ++++- plugins/inputs/filecount/README.md | 16 +- plugins/inputs/filecount/filecount.go | 152 ++++++++++-------- plugins/inputs/filecount/filecount_test.go | 123 +++++++------- .../filecount/testdata/subdir/nested2/qux | 7 + 7 files changed, 219 insertions(+), 131 deletions(-) create mode 100644 plugins/inputs/filecount/testdata/subdir/nested2/qux diff --git a/Gopkg.lock b/Gopkg.lock index a2df3c81d..d043bccd0 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -665,6 +665,14 @@ pruneopts = "" revision = "615a14ed75099c9eaac6949e22ac2341bf9d3197" +[[projects]] + digest = "1:a12b6f20a7e5eb7412d2e5cd15e1262a021f735fa958d664d9e7ba2160eefd0a" + name = "github.com/karrick/godirwalk" + packages = ["."] + pruneopts = "" + revision = "2de2192f9e35ce981c152a873ed943b93b79ced4" + version = "v1.7.5" + [[projects]] branch = "master" digest = "1:63e7368fcf6b54804076eaec26fd9cf0c4466166b272393db4b93102e1e962df" @@ -1480,6 +1488,7 @@ "github.com/jackc/pgx/pgtype", "github.com/jackc/pgx/stdlib", "github.com/kardianos/service", + "github.com/karrick/godirwalk", "github.com/kballard/go-shellquote", "github.com/matttproud/golang_protobuf_extensions/pbutil", "github.com/miekg/dns", diff --git a/Gopkg.toml b/Gopkg.toml index 791e265e8..80df324dc 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -245,3 +245,8 @@ [[override]] name = "github.com/vishvananda/netlink" revision = "b2de5d10e38ecce8607e6b438b6d174f389a004e" + +[[constraint]] + name = "github.com/karrick/godirwalk" + version = "1.7.5" + diff --git a/internal/globpath/globpath.go b/internal/globpath/globpath.go index a08731ad9..fc6a43618 100644 --- a/internal/globpath/globpath.go +++ b/internal/globpath/globpath.go @@ -1,7 +1,6 @@ package globpath import ( - "fmt" "os" "path/filepath" "strings" @@ -9,12 +8,10 @@ import ( "github.com/gobwas/glob" ) -var sepStr = fmt.Sprintf("%v", string(os.PathSeparator)) - type GlobPath struct { path string hasMeta bool - hasSuperMeta bool + HasSuperMeta bool rootGlob string g glob.Glob } @@ -22,13 +19,13 @@ type GlobPath struct { func Compile(path string) (*GlobPath, error) { out := GlobPath{ hasMeta: hasMeta(path), - hasSuperMeta: hasSuperMeta(path), + HasSuperMeta: hasSuperMeta(path), path: path, } // if there are no glob meta characters in the path, don't bother compiling // a glob object - if !out.hasMeta || !out.hasSuperMeta { + if !out.hasMeta || !out.HasSuperMeta { return &out, nil } @@ -43,6 +40,7 @@ func Compile(path string) (*GlobPath, error) { return &out, nil } +// Match returns all files matching the expression func (g *GlobPath) Match() map[string]os.FileInfo { out := make(map[string]os.FileInfo) if !g.hasMeta { @@ -52,7 +50,7 @@ func (g *GlobPath) Match() map[string]os.FileInfo { } return out } - if !g.hasSuperMeta { + if !g.HasSuperMeta { files, _ := filepath.Glob(g.path) for _, file := range files { info, err := os.Stat(file) @@ -79,6 +77,32 @@ func (g *GlobPath) Match() map[string]os.FileInfo { return out } +// MatchString test a string against the glob +func (g *GlobPath) MatchString(path string) bool { + if !g.HasSuperMeta { + res, _ := filepath.Match(g.path, path) + return res + } + return g.g.Match(path) +} + +// GetRoots returns a list of files and directories which should be optimal +// prefixes of matching files when you have a super-meta in your expression : +// - any directory under these roots may contain a matching file +// - no file outside of these roots can match the pattern +// Note that it returns both files and directories. +func (g *GlobPath) GetRoots() []string { + if !g.hasMeta { + return []string{g.path} + } + if !g.HasSuperMeta { + matches, _ := filepath.Glob(g.path) + return matches + } + roots, _ := filepath.Glob(g.rootGlob) + return roots +} + // hasMeta reports whether path contains any magic glob characters. func hasMeta(path string) bool { return strings.IndexAny(path, "*?[") >= 0 diff --git a/plugins/inputs/filecount/README.md b/plugins/inputs/filecount/README.md index cf11b7d90..260d18413 100644 --- a/plugins/inputs/filecount/README.md +++ b/plugins/inputs/filecount/README.md @@ -8,8 +8,17 @@ Counts files in directories that match certain criteria. # Count files in a directory [[inputs.filecount]] ## Directory to gather stats about. + ## deprecated in 1.9; use the directories option directory = "/var/cache/apt/archives" + ## Directories to gather stats about. + ## This accept standard unit glob matching rules, but with the addition of + ## ** as a "super asterisk". ie: + ## /var/log/** -> recursively find all directories in /var/log and count files in each directories + ## /var/log/*/* -> find all directories with a parent dir in /var/log and count files in each directories + ## /var/log -> count all files in /var/log and all of its subdirectories + directories = ["/var/cache/apt/archives"] + ## Only count files that match the name pattern. Defaults to "*". name = "*.deb" @@ -35,16 +44,17 @@ Counts files in directories that match certain criteria. - filecount - count (int) + - size_bytes (int) ### Tags: - All measurements have the following tags: - - directory (the directory path, as specified in the config) + - directory (the directory path) ### Example Output: ``` $ telegraf --config /etc/telegraf/telegraf.conf --input-filter filecount --test -> filecount,directory=/var/cache/apt,host=czernobog count=7i 1530034445000000000 -> filecount,directory=/tmp,host=czernobog count=17i 1530034445000000000 +> filecount,directory=/var/cache/apt,host=czernobog count=7i,size=7438336i 1530034445000000000 +> filecount,directory=/tmp,host=czernobog count=17i,size=28934786i 1530034445000000000 ``` diff --git a/plugins/inputs/filecount/filecount.go b/plugins/inputs/filecount/filecount.go index d613f3b77..f8840721b 100644 --- a/plugins/inputs/filecount/filecount.go +++ b/plugins/inputs/filecount/filecount.go @@ -1,15 +1,16 @@ package filecount import ( - "fmt" "os" "path/filepath" + "strings" "time" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/internal/globpath" "github.com/influxdata/telegraf/plugins/inputs" + "github.com/karrick/godirwalk" ) const sampleConfig = ` @@ -55,16 +56,17 @@ type FileCount struct { Size internal.Size MTime internal.Duration `toml:"mtime"` fileFilters []fileFilterFunc + globPaths []globpath.GlobPath } -type fileFilterFunc func(os.FileInfo) (bool, error) - func (_ *FileCount) Description() string { return "Count files in a directory" } func (_ *FileCount) SampleConfig() string { return sampleConfig } +type fileFilterFunc func(os.FileInfo) (bool, error) + func rejectNilFilters(filters []fileFilterFunc) []fileFilterFunc { filtered := make([]fileFilterFunc, 0, len(filters)) for _, f := range filters { @@ -137,48 +139,6 @@ func absDuration(x time.Duration) time.Duration { return x } -func (fc *FileCount) count(acc telegraf.Accumulator, basedir string, recursive bool) { - numFiles := int64(0) - walkFn := func(path string, file os.FileInfo, err error) error { - if err != nil { - if os.IsNotExist(err) { - return nil - } - return err - } - if path == basedir { - return nil - } - match, err := fc.filter(file) - if err != nil { - acc.AddError(err) - return nil - } - if match { - numFiles++ - } - if !recursive && file.IsDir() { - return filepath.SkipDir - } - return nil - } - - err := filepath.Walk(basedir, walkFn) - if err != nil { - acc.AddError(err) - return - } - - acc.AddFields("filecount", - map[string]interface{}{ - "count": numFiles, - }, - map[string]string{ - "directory": basedir, - }, - ) -} - func (fc *FileCount) initFileFilters() { filters := []fileFilterFunc{ fc.nameFilter(), @@ -189,6 +149,66 @@ func (fc *FileCount) initFileFilters() { fc.fileFilters = rejectNilFilters(filters) } +func (fc *FileCount) count(acc telegraf.Accumulator, basedir string, glob globpath.GlobPath) { + childCount := make(map[string]int64) + childSize := make(map[string]int64) + walkFn := func(path string, de *godirwalk.Dirent) error { + if path == basedir { + return nil + } + file, err := os.Stat(path) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + match, err := fc.filter(file) + if err != nil { + acc.AddError(err) + return nil + } + if match { + parent := path[:strings.LastIndex(path, "/")] + childCount[parent]++ + childSize[parent] += file.Size() + } + if file.IsDir() && !fc.Recursive && !glob.HasSuperMeta { + return filepath.SkipDir + } + return nil + } + postChildrenFn := func(path string, de *godirwalk.Dirent) error { + if glob.MatchString(path) { + gauge := map[string]interface{}{ + "count": childCount[path], + "size_bytes": childSize[path], + } + acc.AddGauge("filecount", gauge, + map[string]string{ + "directory": path, + }) + } + parent := path[:strings.LastIndex(path, "/")] + if fc.Recursive { + childCount[parent] += childCount[path] + childSize[parent] += childSize[path] + } + delete(childCount, path) + delete(childSize, path) + return nil + } + + err := godirwalk.Walk(basedir, &godirwalk.Options{ + Callback: walkFn, + PostChildrenCallback: postChildrenFn, + Unsorted: true, + }) + if err != nil { + acc.AddError(err) + } +} + func (fc *FileCount) filter(file os.FileInfo) (bool, error) { if fc.fileFilters == nil { fc.initFileFilters() @@ -208,19 +228,30 @@ func (fc *FileCount) filter(file os.FileInfo) (bool, error) { } func (fc *FileCount) Gather(acc telegraf.Accumulator) error { - globDirs := fc.getDirs() - dirs, err := getCompiledDirs(globDirs) - if err != nil { - return err + if fc.globPaths == nil { + fc.initGlobPaths(acc) } - for _, dir := range dirs { - fc.count(acc, dir, fc.Recursive) + for _, glob := range fc.globPaths { + for _, dir := range onlyDirectories(glob.GetRoots()) { + fc.count(acc, dir, glob) + } } return nil } +func onlyDirectories(directories []string) []string { + out := make([]string, 0) + for _, path := range directories { + info, err := os.Stat(path) + if err == nil && info.IsDir() { + out = append(out, path) + } + } + return out +} + func (fc *FileCount) getDirs() []string { dirs := make([]string, len(fc.Directories)) for i, dir := range fc.Directories { @@ -234,21 +265,16 @@ func (fc *FileCount) getDirs() []string { return dirs } -func getCompiledDirs(dirs []string) ([]string, error) { - compiledDirs := []string{} - for _, dir := range dirs { - g, err := globpath.Compile(dir) +func (fc *FileCount) initGlobPaths(acc telegraf.Accumulator) { + fc.globPaths = []globpath.GlobPath{} + for _, directory := range fc.getDirs() { + glob, err := globpath.Compile(directory) if err != nil { - return nil, fmt.Errorf("could not compile glob %v: %v", dir, err) - } - - for path, file := range g.Match() { - if file.IsDir() { - compiledDirs = append(compiledDirs, path) - } + acc.AddError(err) + } else { + fc.globPaths = append(fc.globPaths, *glob) } } - return compiledDirs, nil } func NewFileCount() *FileCount { diff --git a/plugins/inputs/filecount/filecount_test.go b/plugins/inputs/filecount/filecount_test.go index 7a48c2166..2294e8ce6 100644 --- a/plugins/inputs/filecount/filecount_test.go +++ b/plugins/inputs/filecount/filecount_test.go @@ -14,108 +14,112 @@ import ( ) func TestNoFilters(t *testing.T) { - fc := getNoFilterFileCount("*") - matches := []string{"foo", "bar", "baz", "qux", "subdir/", "subdir/quux", "subdir/quuz"} - - acc := testutil.Accumulator{} - acc.GatherError(fc.Gather) - - require.True(t, assertFileCount(&acc, "testdata", len(matches))) + fc := getNoFilterFileCount() + matches := []string{"foo", "bar", "baz", "qux", + "subdir/", "subdir/quux", "subdir/quuz", + "subdir/nested2", "subdir/nested2/qux"} + fileCountEquals(t, fc, len(matches), 9084) } func TestNoFiltersOnChildDir(t *testing.T) { - fc := getNoFilterFileCount("testdata/*") - matches := []string{"subdir/quux", "subdir/quuz"} + fc := getNoFilterFileCount() + fc.Directories = []string{getTestdataDir() + "/*"} + matches := []string{"subdir/quux", "subdir/quuz", + "subdir/nested2/qux", "subdir/nested2"} + tags := map[string]string{"directory": getTestdataDir() + "/subdir"} acc := testutil.Accumulator{} acc.GatherError(fc.Gather) - require.True(t, assertFileCount(&acc, "testdata/subdir", len(matches))) + require.True(t, acc.HasPoint("filecount", tags, "count", int64(len(matches)))) + require.True(t, acc.HasPoint("filecount", tags, "size_bytes", int64(4542))) +} + +func TestNoRecursiveButSuperMeta(t *testing.T) { + fc := getNoFilterFileCount() + fc.Recursive = false + fc.Directories = []string{getTestdataDir() + "/**"} + matches := []string{"subdir/quux", "subdir/quuz", "subdir/nested2"} + + tags := map[string]string{"directory": getTestdataDir() + "/subdir"} + acc := testutil.Accumulator{} + acc.GatherError(fc.Gather) + + require.True(t, acc.HasPoint("filecount", tags, "count", int64(len(matches)))) + require.True(t, acc.HasPoint("filecount", tags, "size_bytes", int64(4096))) } func TestNameFilter(t *testing.T) { - fc := getNoFilterFileCount("testdata") + fc := getNoFilterFileCount() fc.Name = "ba*" matches := []string{"bar", "baz"} - - acc := testutil.Accumulator{} - acc.GatherError(fc.Gather) - - require.True(t, assertFileCount(&acc, "testdata", len(matches))) + fileCountEquals(t, fc, len(matches), 0) } func TestNonRecursive(t *testing.T) { - fc := getNoFilterFileCount("testdata") + fc := getNoFilterFileCount() fc.Recursive = false matches := []string{"foo", "bar", "baz", "qux", "subdir"} + fileCountEquals(t, fc, len(matches), 4542) +} + +func TestDoubleAndSimpleStar(t *testing.T) { + fc := getNoFilterFileCount() + fc.Directories = []string{getTestdataDir() + "/**/*"} + matches := []string{"qux"} + tags := map[string]string{"directory": getTestdataDir() + "/subdir/nested2"} acc := testutil.Accumulator{} acc.GatherError(fc.Gather) - require.True(t, assertFileCount(&acc, "testdata", len(matches))) + require.True(t, acc.HasPoint("filecount", tags, "count", int64(len(matches)))) + require.True(t, acc.HasPoint("filecount", tags, "size_bytes", int64(446))) } func TestRegularOnlyFilter(t *testing.T) { - fc := getNoFilterFileCount("testdata") + fc := getNoFilterFileCount() fc.RegularOnly = true matches := []string{ "foo", "bar", "baz", "qux", "subdir/quux", "subdir/quuz", - } - - acc := testutil.Accumulator{} - acc.GatherError(fc.Gather) - - require.True(t, assertFileCount(&acc, "testdata", len(matches))) + "subdir/nested2/qux"} + fileCountEquals(t, fc, len(matches), 892) } func TestSizeFilter(t *testing.T) { - fc := getNoFilterFileCount("testdata") + fc := getNoFilterFileCount() fc.Size = internal.Size{Size: -100} - matches := []string{"foo", "bar", "baz", "subdir/quux", "subdir/quuz"} - - acc := testutil.Accumulator{} - acc.GatherError(fc.Gather) - - require.True(t, assertFileCount(&acc, "testdata", len(matches))) + matches := []string{"foo", "bar", "baz", + "subdir/quux", "subdir/quuz"} + fileCountEquals(t, fc, len(matches), 0) fc.Size = internal.Size{Size: 100} - matches = []string{"qux"} - - acc = testutil.Accumulator{} - acc.GatherError(fc.Gather) - - require.True(t, assertFileCount(&acc, "testdata", len(matches))) + matches = []string{"qux", "subdir/nested2//qux"} + fileCountEquals(t, fc, len(matches), 892) } func TestMTimeFilter(t *testing.T) { - oldFile := filepath.Join(getTestdataDir("testdata"), "baz") + oldFile := filepath.Join(getTestdataDir(), "baz") mtime := time.Date(1979, time.December, 14, 18, 25, 5, 0, time.UTC) if err := os.Chtimes(oldFile, mtime, mtime); err != nil { t.Skip("skipping mtime filter test.") } fileAge := time.Since(mtime) - (60 * time.Second) - fc := getNoFilterFileCount("testdata") + fc := getNoFilterFileCount() fc.MTime = internal.Duration{Duration: -fileAge} - matches := []string{"foo", "bar", "qux", "subdir/", "subdir/quux", "subdir/quuz"} - - acc := testutil.Accumulator{} - acc.GatherError(fc.Gather) - - require.True(t, assertFileCount(&acc, "testdata", len(matches))) + matches := []string{"foo", "bar", "qux", + "subdir/", "subdir/quux", "subdir/quuz", + "sbudir/nested2", "subdir/nested2/qux"} + fileCountEquals(t, fc, len(matches), 9084) fc.MTime = internal.Duration{Duration: fileAge} matches = []string{"baz"} - - acc = testutil.Accumulator{} - acc.GatherError(fc.Gather) - - require.True(t, assertFileCount(&acc, "testdata", len(matches))) + fileCountEquals(t, fc, len(matches), 0) } -func getNoFilterFileCount(dir string) FileCount { +func getNoFilterFileCount() FileCount { return FileCount{ - Directories: []string{getTestdataDir(dir)}, + Directories: []string{getTestdataDir()}, Name: "*", Recursive: true, RegularOnly: false, @@ -125,12 +129,15 @@ func getNoFilterFileCount(dir string) FileCount { } } -func getTestdataDir(dir string) string { +func getTestdataDir() string { _, filename, _, _ := runtime.Caller(1) - return strings.Replace(filename, "filecount_test.go", dir, 1) + return strings.Replace(filename, "filecount_test.go", "testdata", 1) } -func assertFileCount(acc *testutil.Accumulator, expectedDir string, expectedCount int) bool { - tags := map[string]string{"directory": getTestdataDir(expectedDir)} - return acc.HasPoint("filecount", tags, "count", int64(expectedCount)) +func fileCountEquals(t *testing.T, fc FileCount, expectedCount int, expectedSize int) { + tags := map[string]string{"directory": getTestdataDir()} + acc := testutil.Accumulator{} + acc.GatherError(fc.Gather) + require.True(t, acc.HasPoint("filecount", tags, "count", int64(expectedCount))) + require.True(t, acc.HasPoint("filecount", tags, "size_bytes", int64(expectedSize))) } diff --git a/plugins/inputs/filecount/testdata/subdir/nested2/qux b/plugins/inputs/filecount/testdata/subdir/nested2/qux new file mode 100644 index 000000000..c7288f23d --- /dev/null +++ b/plugins/inputs/filecount/testdata/subdir/nested2/qux @@ -0,0 +1,7 @@ +Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do +eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad +minim veniam, quis nostrud exercitation ullamco laboris nisi ut +aliquip ex ea commodo consequat. Duis aute irure dolor in +reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla +pariatur. Excepteur sint occaecat cupidatat non proident, sunt in +culpa qui officia deserunt mollit anim id est laborum. From 98231f8b6bdc8c741ee1155d4fc56b1b8f444086 Mon Sep 17 00:00:00 2001 From: Ruud Bijnen Date: Fri, 14 Dec 2018 19:56:21 +0100 Subject: [PATCH 0427/1815] Fix converter processor example (#5146) --- plugins/processors/converter/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/processors/converter/README.md b/plugins/processors/converter/README.md index 939b44ce8..d56985d84 100644 --- a/plugins/processors/converter/README.md +++ b/plugins/processors/converter/README.md @@ -48,7 +48,7 @@ will overwrite one another. [processors.converter.fields] integer = ["scboard_*"] - tag = ["ParseServerConfigGeneration"] + tag = ["ParentServerConfigGeneration"] ``` ```diff From 891eff09303ac9ed6e9ddda86105fab2f9ce892e Mon Sep 17 00:00:00 2001 From: Greg <2653109+glinton@users.noreply.github.com> Date: Fri, 14 Dec 2018 15:34:05 -0700 Subject: [PATCH 0428/1815] Run stop logic only if required in prometheus input (#5144) --- plugins/inputs/prometheus/prometheus.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/plugins/inputs/prometheus/prometheus.go b/plugins/inputs/prometheus/prometheus.go index 84fc31800..eaadf1452 100644 --- a/plugins/inputs/prometheus/prometheus.go +++ b/plugins/inputs/prometheus/prometheus.go @@ -305,7 +305,9 @@ func (p *Prometheus) Start(a telegraf.Accumulator) error { } func (p *Prometheus) Stop() { - p.cancel() + if p.MonitorPods { + p.cancel() + } p.wg.Wait() } From 99b6982cde5da1d0190db1b9e35074237d674e79 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 14 Dec 2018 14:36:19 -0800 Subject: [PATCH 0429/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index f9d62fc50..19c822ff0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,6 +17,7 @@ - [#5130](https://github.com/influxdata/telegraf/pull/5130): Increase varnishstat timeout. - [#5135](https://github.com/influxdata/telegraf/pull/5135): Remove storage calculation for non Azure managed instances and add server version. - [#5083](https://github.com/influxdata/telegraf/pull/5083): Fix error sending empty tag value in azure_monitor output. +- [#5143](https://github.com/influxdata/telegraf/issues/5143): Fix panic with prometheus input plugin on shutdown. ## v1.9.1 [2018-12-11] From 6ef331efeb760163bb21f9edbf4bcab8e3aa4185 Mon Sep 17 00:00:00 2001 From: Greg <2653109+glinton@users.noreply.github.com> Date: Fri, 14 Dec 2018 15:38:01 -0700 Subject: [PATCH 0430/1815] Add micro and nanosecond unix timestamp support to JSON parser (#5149) --- plugins/parsers/json/README.md | 9 +++++---- plugins/parsers/json/parser.go | 11 ++++++++++- 2 files changed, 15 insertions(+), 5 deletions(-) diff --git a/plugins/parsers/json/README.md b/plugins/parsers/json/README.md index 05ed6242f..a48575cba 100644 --- a/plugins/parsers/json/README.md +++ b/plugins/parsers/json/README.md @@ -41,8 +41,8 @@ ignored unless specified in the `tag_key` or `json_string_fields` options. ## metric. json_time_key = "" - ## Time format is the time layout that should be used to interprete the - ## json_time_key. The time must be `unix`, `unix_ms` or a time in the + ## Time format is the time layout that should be used to interprete the json_time_key. + ## The time must be `unix`, `unix_ms`, `unix_us`, `unix_ns`, or a time in the ## "reference time". To define a different format, arrange the values from ## the "reference time" in the example to match the format you will be ## using. For more information on the "reference time", visit @@ -70,8 +70,9 @@ time using the JSON document you can use the `json_time_key` and document. The `json_time_key` option specifies the key containing the time value and -`json_time_format` must be set to `unix`, `unix_ms`, or the Go "reference -time" which is defined to be the specific time: `Mon Jan 2 15:04:05 MST 2006`. +`json_time_format` must be set to `unix`, `unix_ms`, `unix_us`, `unix_ns`, or +the Go "reference time" which is defined to be the specific time: +`Mon Jan 2 15:04:05 MST 2006`. Consult the Go [time][time parse] package for details and additional examples on how to set the time format. diff --git a/plugins/parsers/json/parser.go b/plugins/parsers/json/parser.go index 19a92275b..b3bb9a488 100644 --- a/plugins/parsers/json/parser.go +++ b/plugins/parsers/json/parser.go @@ -52,6 +52,8 @@ func (p *JSONParser) parseArray(buf []byte) ([]telegraf.Metric, error) { // format = "unix": epoch is assumed to be in seconds and can come as number or string. Can have a decimal part. // format = "unix_ms": epoch is assumed to be in milliseconds and can come as number or string. Cannot have a decimal part. +// format = "unix_us": epoch is assumed to be in microseconds and can come as number or string. Cannot have a decimal part. +// format = "unix_ns": epoch is assumed to be in nanoseconds and can come as number or string. Cannot have a decimal part. func parseUnixTimestamp(jsonValue interface{}, format string) (time.Time, error) { timeInt, timeFractional := int64(0), int64(0) timeEpochStr, ok := jsonValue.(string) @@ -88,6 +90,10 @@ func parseUnixTimestamp(jsonValue interface{}, format string) (time.Time, error) return time.Unix(timeInt, timeFractional).UTC(), nil } else if strings.EqualFold(format, "unix_ms") { return time.Unix(timeInt/1000, (timeInt%1000)*1e6).UTC(), nil + } else if strings.EqualFold(format, "unix_us") { + return time.Unix(0, timeInt*1e3).UTC(), nil + } else if strings.EqualFold(format, "unix_ns") { + return time.Unix(0, timeInt).UTC(), nil } else { return time.Time{}, errors.New("Invalid unix format") } @@ -126,7 +132,10 @@ func (p *JSONParser) parseObject(metrics []telegraf.Metric, jsonOut map[string]i return nil, err } - if strings.EqualFold(p.JSONTimeFormat, "unix") || strings.EqualFold(p.JSONTimeFormat, "unix_ms") { + if strings.EqualFold(p.JSONTimeFormat, "unix") || + strings.EqualFold(p.JSONTimeFormat, "unix_ms") || + strings.EqualFold(p.JSONTimeFormat, "unix_us") || + strings.EqualFold(p.JSONTimeFormat, "unix_ns") { nTime, err = parseUnixTimestamp(f.Fields[p.JSONTimeKey], p.JSONTimeFormat) if err != nil { return nil, err From 1334919224dbc9bb08322331600496f62ea51277 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 14 Dec 2018 14:38:41 -0800 Subject: [PATCH 0431/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 19c822ff0..7272c25c3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,7 @@ - [#5074](https://github.com/influxdata/telegraf/pull/5074): Add support for sending a request body to http input. - [#5069](https://github.com/influxdata/telegraf/pull/5069): Add running field to procstat_lookup. - [#5116](https://github.com/influxdata/telegraf/pull/5116): Include DEVLINKS in available diskio udev properties. +- [#5149](https://github.com/influxdata/telegraf/pull/5149): Add micro and nanosecond unix timestamp support to JSON parser. ## v1.9.2 [unreleased] From 697381d4b55b3eefa2aed697d38a412c1e48b438 Mon Sep 17 00:00:00 2001 From: Leonardo Di Donato Date: Tue, 18 Dec 2018 19:54:38 +0100 Subject: [PATCH 0432/1815] Add support for non-transparent framing of syslog messages (#5148) --- Gopkg.lock | 25 +- Gopkg.toml | 2 +- plugins/inputs/syslog/README.md | 24 +- plugins/inputs/syslog/commons_test.go | 62 ++++ plugins/inputs/syslog/framing.go | 64 ++++ plugins/inputs/syslog/framing_test.go | 37 +++ plugins/inputs/syslog/nontransparent_test.go | 308 ++++++++++++++++++ ...{rfc5425_test.go => octetcounting_test.go} | 91 ++---- plugins/inputs/syslog/rfc5426_test.go | 23 +- plugins/inputs/syslog/syslog.go | 85 +++-- 10 files changed, 601 insertions(+), 120 deletions(-) create mode 100644 plugins/inputs/syslog/commons_test.go create mode 100644 plugins/inputs/syslog/framing.go create mode 100644 plugins/inputs/syslog/framing_test.go create mode 100644 plugins/inputs/syslog/nontransparent_test.go rename plugins/inputs/syslog/{rfc5425_test.go => octetcounting_test.go} (84%) diff --git a/Gopkg.lock b/Gopkg.lock index d043bccd0..8fd3e81c4 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -583,15 +583,17 @@ version = "v0.8.1" [[projects]] - digest = "1:a39ef049cdeee03a57b132e7d60e32711b9d949c78458da78e702d9864c54369" + digest = "1:824c4cd143ee15735f1c75d9072aad46e51dd27a4ef8bf6ce723a138265b7fb3" name = "github.com/influxdata/go-syslog" packages = [ + ".", + "nontransparent", + "octetcounting", "rfc5424", - "rfc5425", ] pruneopts = "" - revision = "eecd51df3ad85464a2bab9b7d3a45bc1e299059e" - version = "v1.0.1" + revision = "0cd00a9f0a5e5607d5ef9a294c260f77a74e3b5a" + version = "v2.0.0" [[projects]] branch = "master" @@ -689,6 +691,17 @@ pruneopts = "" revision = "b84e30acd515aadc4b783ad4ff83aff3299bdfe0" +[[projects]] + branch = "develop" + digest = "1:3e66a61a57bbbe832c338edb3a623be0deb3dec650c2f3515149658898287e37" + name = "github.com/leodido/ragel-machinery" + packages = [ + ".", + "parser", + ] + pruneopts = "" + revision = "299bdde78165d4ca4bc7d064d8d6a4f39ac6de8c" + [[projects]] branch = "master" digest = "1:7e9956922e349af0190afa0b6621befcd201072679d8e51a9047ff149f2afe93" @@ -1478,8 +1491,10 @@ "github.com/google/go-cmp/cmp", "github.com/gorilla/mux", "github.com/hashicorp/consul/api", + "github.com/influxdata/go-syslog", + "github.com/influxdata/go-syslog/nontransparent", + "github.com/influxdata/go-syslog/octetcounting", "github.com/influxdata/go-syslog/rfc5424", - "github.com/influxdata/go-syslog/rfc5425", "github.com/influxdata/tail", "github.com/influxdata/toml", "github.com/influxdata/toml/ast", diff --git a/Gopkg.toml b/Gopkg.toml index 80df324dc..3b5c1b917 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -72,7 +72,7 @@ [[constraint]] name = "github.com/influxdata/go-syslog" - version = "1.0.1" + version = "2.0.0" [[constraint]] name = "github.com/influxdata/tail" diff --git a/plugins/inputs/syslog/README.md b/plugins/inputs/syslog/README.md index ad9b9b572..8183d2c90 100644 --- a/plugins/inputs/syslog/README.md +++ b/plugins/inputs/syslog/README.md @@ -2,7 +2,8 @@ The syslog plugin listens for syslog messages transmitted over [UDP](https://tools.ietf.org/html/rfc5426) or -[TCP](https://tools.ietf.org/html/rfc5425). +[TCP](https://tools.ietf.org/html/rfc6587) or +[TLS](https://tools.ietf.org/html/rfc5425), with or without the octet counting framing. Syslog messages should be formatted according to [RFC 5424](https://tools.ietf.org/html/rfc5424). @@ -37,6 +38,16 @@ Syslog messages should be formatted according to ## 0 means unlimited. # read_timeout = "5s" + ## The framing technique with which it is expected that messages are transported (default = "octet-counting"). + ## Whether the messages come using the octect-counting (RFC5425#section-4.3.1, RFC6587#section-3.4.1), + ## or the non-transparent framing technique (RFC6587#section-3.4.2). + ## Must be one of "octect-counting", "non-transparent". + # framing = "octet-counting" + + ## The trailer to be expected in case of non-trasparent framing (default = "LF"). + ## Must be one of "LF", or "NUL". + # trailer = "LF" + ## Whether to parse in best effort mode or not (default = false). ## By default best effort parsing is off. # best_effort = false @@ -49,11 +60,18 @@ Syslog messages should be formatted according to # sdparam_separator = "_" ``` -#### Best Effort +#### Message transport + +The `framing` option only applies to streams. It governs the way we expect to receive messages within the stream. +Namely, with the [`"octet counting"`](https://tools.ietf.org/html/rfc5425#section-4.3) technique (default) or with the [`"non-transparent"`](https://tools.ietf.org/html/rfc6587#section-3.4.2) framing. + +The `trailer` option only applies when `framing` option is `"non-transparent"`. It must have one of the following values: `"LF"` (default), or `"NUL"`. + +#### Best effort The [`best_effort`](https://github.com/influxdata/go-syslog#best-effort-mode) option instructs the parser to extract partial but valid info from syslog -messages. If unset only full messages will be collected. +messages. If unset only full messages will be collected. #### Rsyslog Integration diff --git a/plugins/inputs/syslog/commons_test.go b/plugins/inputs/syslog/commons_test.go new file mode 100644 index 000000000..f55d080a1 --- /dev/null +++ b/plugins/inputs/syslog/commons_test.go @@ -0,0 +1,62 @@ +package syslog + +import ( + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/testutil" + "time" +) + +var ( + pki = testutil.NewPKI("../../../testutil/pki") +) + +type testCasePacket struct { + name string + data []byte + wantBestEffort *testutil.Metric + wantStrict *testutil.Metric + werr bool +} + +type testCaseStream struct { + name string + data []byte + wantBestEffort []testutil.Metric + wantStrict []testutil.Metric + werr int // how many errors we expect in the strict mode? +} + +func newUDPSyslogReceiver(address string, bestEffort bool) *Syslog { + return &Syslog{ + Address: address, + now: func() time.Time { + return defaultTime + }, + BestEffort: bestEffort, + Separator: "_", + } +} + +func newTCPSyslogReceiver(address string, keepAlive *internal.Duration, maxConn int, bestEffort bool, f Framing) *Syslog { + d := &internal.Duration{ + Duration: defaultReadTimeout, + } + s := &Syslog{ + Address: address, + now: func() time.Time { + return defaultTime + }, + Framing: f, + ReadTimeout: d, + BestEffort: bestEffort, + Separator: "_", + } + if keepAlive != nil { + s.KeepAlivePeriod = keepAlive + } + if maxConn > 0 { + s.MaxConnections = maxConn + } + + return s +} diff --git a/plugins/inputs/syslog/framing.go b/plugins/inputs/syslog/framing.go new file mode 100644 index 000000000..6edfc7058 --- /dev/null +++ b/plugins/inputs/syslog/framing.go @@ -0,0 +1,64 @@ +package syslog + +import ( + "fmt" + "strings" +) + +// Framing represents the framing technique we expect the messages to come. +type Framing int + +const ( + // OctetCounting indicates the transparent framing technique for syslog transport. + OctetCounting Framing = iota + // NonTransparent indicates the non-transparent framing technique for syslog transport. + NonTransparent +) + +func (f Framing) String() string { + switch f { + case OctetCounting: + return "OCTET-COUNTING" + case NonTransparent: + return "NON-TRANSPARENT" + } + return "" +} + +// UnmarshalTOML implements ability to unmarshal framing from TOML files. +func (f *Framing) UnmarshalTOML(data []byte) (err error) { + return f.UnmarshalText(data) +} + +// UnmarshalText implements encoding.TextUnmarshaler +func (f *Framing) UnmarshalText(data []byte) (err error) { + s := string(data) + switch strings.ToUpper(s) { + case `OCTET-COUNTING`: + fallthrough + case `"OCTET-COUNTING"`: + fallthrough + case `'OCTET-COUNTING'`: + *f = OctetCounting + return + + case `NON-TRANSPARENT`: + fallthrough + case `"NON-TRANSPARENT"`: + fallthrough + case `'NON-TRANSPARENT'`: + *f = NonTransparent + return + } + *f = -1 + return fmt.Errorf("unknown framing") +} + +// MarshalText implements encoding.TextMarshaler +func (f Framing) MarshalText() ([]byte, error) { + s := f.String() + if s != "" { + return []byte(s), nil + } + return nil, fmt.Errorf("unknown framing") +} diff --git a/plugins/inputs/syslog/framing_test.go b/plugins/inputs/syslog/framing_test.go new file mode 100644 index 000000000..1442eba7f --- /dev/null +++ b/plugins/inputs/syslog/framing_test.go @@ -0,0 +1,37 @@ +package syslog + +import ( + "github.com/stretchr/testify/assert" + "testing" +) + +func TestFraming(t *testing.T) { + var f1 Framing + f1.UnmarshalTOML([]byte(`"non-transparent"`)) + assert.Equal(t, NonTransparent, f1) + + var f2 Framing + f2.UnmarshalTOML([]byte(`non-transparent`)) + assert.Equal(t, NonTransparent, f2) + + var f3 Framing + f3.UnmarshalTOML([]byte(`'non-transparent'`)) + assert.Equal(t, NonTransparent, f3) + + var f4 Framing + f4.UnmarshalTOML([]byte(`"octet-counting"`)) + assert.Equal(t, OctetCounting, f4) + + var f5 Framing + f5.UnmarshalTOML([]byte(`octet-counting`)) + assert.Equal(t, OctetCounting, f5) + + var f6 Framing + f6.UnmarshalTOML([]byte(`'octet-counting'`)) + assert.Equal(t, OctetCounting, f6) + + var f7 Framing + err := f7.UnmarshalTOML([]byte(`nope`)) + assert.Equal(t, Framing(-1), f7) + assert.Error(t, err) +} diff --git a/plugins/inputs/syslog/nontransparent_test.go b/plugins/inputs/syslog/nontransparent_test.go new file mode 100644 index 000000000..1dea84144 --- /dev/null +++ b/plugins/inputs/syslog/nontransparent_test.go @@ -0,0 +1,308 @@ +package syslog + +import ( + "crypto/tls" + "io/ioutil" + "net" + "os" + "path/filepath" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +func getTestCasesForNonTransparent() []testCaseStream { + testCases := []testCaseStream{ + { + name: "1st/avg/ok", + data: []byte(`<29>1 2016-02-21T04:32:57+00:00 web1 someservice 2341 2 [origin][meta sequence="14125553" service="someservice"] "GET /v1/ok HTTP/1.1" 200 145 "-" "hacheck 0.9.0" 24306 127.0.0.1:40124 575`), + wantStrict: []testutil.Metric{ + { + Measurement: "syslog", + Fields: map[string]interface{}{ + "version": uint16(1), + "timestamp": time.Unix(1456029177, 0).UnixNano(), + "procid": "2341", + "msgid": "2", + "message": `"GET /v1/ok HTTP/1.1" 200 145 "-" "hacheck 0.9.0" 24306 127.0.0.1:40124 575`, + "origin": true, + "meta_sequence": "14125553", + "meta_service": "someservice", + "severity_code": 5, + "facility_code": 3, + }, + Tags: map[string]string{ + "severity": "notice", + "facility": "daemon", + "hostname": "web1", + "appname": "someservice", + }, + Time: defaultTime, + }, + }, + wantBestEffort: []testutil.Metric{ + { + Measurement: "syslog", + Fields: map[string]interface{}{ + "version": uint16(1), + "timestamp": time.Unix(1456029177, 0).UnixNano(), + "procid": "2341", + "msgid": "2", + "message": `"GET /v1/ok HTTP/1.1" 200 145 "-" "hacheck 0.9.0" 24306 127.0.0.1:40124 575`, + "origin": true, + "meta_sequence": "14125553", + "meta_service": "someservice", + "severity_code": 5, + "facility_code": 3, + }, + Tags: map[string]string{ + "severity": "notice", + "facility": "daemon", + "hostname": "web1", + "appname": "someservice", + }, + Time: defaultTime, + }, + }, + werr: 1, + }, + { + name: "1st/min/ok//2nd/min/ok", + data: []byte("<1>2 - - - - - -\n<4>11 - - - - - -\n"), + wantStrict: []testutil.Metric{ + { + Measurement: "syslog", + Fields: map[string]interface{}{ + "version": uint16(2), + "severity_code": 1, + "facility_code": 0, + }, + Tags: map[string]string{ + "severity": "alert", + "facility": "kern", + }, + Time: defaultTime, + }, + { + Measurement: "syslog", + Fields: map[string]interface{}{ + "version": uint16(11), + "severity_code": 4, + "facility_code": 0, + }, + Tags: map[string]string{ + "severity": "warning", + "facility": "kern", + }, + Time: defaultTime.Add(time.Nanosecond), + }, + }, + wantBestEffort: []testutil.Metric{ + { + Measurement: "syslog", + Fields: map[string]interface{}{ + "version": uint16(2), + "severity_code": 1, + "facility_code": 0, + }, + Tags: map[string]string{ + "severity": "alert", + "facility": "kern", + }, + Time: defaultTime, + }, + { + Measurement: "syslog", + Fields: map[string]interface{}{ + "version": uint16(11), + "severity_code": 4, + "facility_code": 0, + }, + Tags: map[string]string{ + "severity": "warning", + "facility": "kern", + }, + Time: defaultTime.Add(time.Nanosecond), + }, + }, + }, + } + return testCases +} + +func testStrictNonTransparent(t *testing.T, protocol string, address string, wantTLS bool, keepAlive *internal.Duration) { + for _, tc := range getTestCasesForNonTransparent() { + t.Run(tc.name, func(t *testing.T) { + // Creation of a strict mode receiver + receiver := newTCPSyslogReceiver(protocol+"://"+address, keepAlive, 0, false, NonTransparent) + require.NotNil(t, receiver) + if wantTLS { + receiver.ServerConfig = *pki.TLSServerConfig() + } + require.Equal(t, receiver.KeepAlivePeriod, keepAlive) + acc := &testutil.Accumulator{} + require.NoError(t, receiver.Start(acc)) + defer receiver.Stop() + + // Connect + var conn net.Conn + var err error + if wantTLS { + config, e := pki.TLSClientConfig().TLSConfig() + require.NoError(t, e) + config.ServerName = "localhost" + conn, err = tls.Dial(protocol, address, config) + } else { + conn, err = net.Dial(protocol, address) + defer conn.Close() + } + require.NotNil(t, conn) + require.NoError(t, err) + + // Clear + acc.ClearMetrics() + acc.Errors = make([]error, 0) + + // Write + _, err = conn.Write(tc.data) + conn.Close() + require.NoError(t, err) + + // Wait that the the number of data points is accumulated + // Since the receiver is running concurrently + if tc.wantStrict != nil { + acc.Wait(len(tc.wantStrict)) + } + + // Wait the parsing error + acc.WaitError(tc.werr) + + // Verify + if len(acc.Errors) != tc.werr { + t.Fatalf("Got unexpected errors. want error = %v, errors = %v\n", tc.werr, acc.Errors) + } + var got []testutil.Metric + for _, metric := range acc.Metrics { + got = append(got, *metric) + } + if !cmp.Equal(tc.wantStrict, got) { + t.Fatalf("Got (+) / Want (-)\n %s", cmp.Diff(tc.wantStrict, got)) + } + }) + } +} + +func testBestEffortNonTransparent(t *testing.T, protocol string, address string, wantTLS bool, keepAlive *internal.Duration) { + for _, tc := range getTestCasesForNonTransparent() { + t.Run(tc.name, func(t *testing.T) { + // Creation of a best effort mode receiver + receiver := newTCPSyslogReceiver(protocol+"://"+address, keepAlive, 0, true, NonTransparent) + require.NotNil(t, receiver) + if wantTLS { + receiver.ServerConfig = *pki.TLSServerConfig() + } + require.Equal(t, receiver.KeepAlivePeriod, keepAlive) + acc := &testutil.Accumulator{} + require.NoError(t, receiver.Start(acc)) + defer receiver.Stop() + + // Connect + var conn net.Conn + var err error + if wantTLS { + config, e := pki.TLSClientConfig().TLSConfig() + require.NoError(t, e) + config.ServerName = "localhost" + conn, err = tls.Dial(protocol, address, config) + } else { + conn, err = net.Dial(protocol, address) + } + require.NotNil(t, conn) + require.NoError(t, err) + + // Clear + acc.ClearMetrics() + acc.Errors = make([]error, 0) + + // Write + _, err = conn.Write(tc.data) + require.NoError(t, err) + conn.Close() + + // Wait that the the number of data points is accumulated + // Since the receiver is running concurrently + if tc.wantBestEffort != nil { + acc.Wait(len(tc.wantBestEffort)) + } + + // Verify + var got []testutil.Metric + for _, metric := range acc.Metrics { + got = append(got, *metric) + } + if !cmp.Equal(tc.wantBestEffort, got) { + t.Fatalf("Got (+) / Want (-)\n %s", cmp.Diff(tc.wantBestEffort, got)) + } + }) + } +} + +func TestNonTransparentStrict_tcp(t *testing.T) { + testStrictNonTransparent(t, "tcp", address, false, nil) +} + +func TestNonTransparentBestEffort_tcp(t *testing.T) { + testBestEffortNonTransparent(t, "tcp", address, false, nil) +} + +func TestNonTransparentStrict_tcp_tls(t *testing.T) { + testStrictNonTransparent(t, "tcp", address, true, nil) +} + +func TestNonTransparentBestEffort_tcp_tls(t *testing.T) { + testBestEffortNonTransparent(t, "tcp", address, true, nil) +} + +func TestNonTransparentStrictWithKeepAlive_tcp_tls(t *testing.T) { + testStrictNonTransparent(t, "tcp", address, true, &internal.Duration{Duration: time.Minute}) +} + +func TestNonTransparentStrictWithZeroKeepAlive_tcp_tls(t *testing.T) { + testStrictNonTransparent(t, "tcp", address, true, &internal.Duration{Duration: 0}) +} + +func TestNonTransparentStrict_unix(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "telegraf") + require.NoError(t, err) + defer os.RemoveAll(tmpdir) + sock := filepath.Join(tmpdir, "syslog.TestStrict_unix.sock") + testStrictNonTransparent(t, "unix", sock, false, nil) +} + +func TestNonTransparentBestEffort_unix(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "telegraf") + require.NoError(t, err) + defer os.RemoveAll(tmpdir) + sock := filepath.Join(tmpdir, "syslog.TestBestEffort_unix.sock") + testBestEffortNonTransparent(t, "unix", sock, false, nil) +} + +func TestNonTransparentStrict_unix_tls(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "telegraf") + require.NoError(t, err) + defer os.RemoveAll(tmpdir) + sock := filepath.Join(tmpdir, "syslog.TestStrict_unix_tls.sock") + testStrictNonTransparent(t, "unix", sock, true, nil) +} + +func TestNonTransparentBestEffort_unix_tls(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "telegraf") + require.NoError(t, err) + defer os.RemoveAll(tmpdir) + sock := filepath.Join(tmpdir, "syslog.TestBestEffort_unix_tls.sock") + testBestEffortNonTransparent(t, "unix", sock, true, nil) +} diff --git a/plugins/inputs/syslog/rfc5425_test.go b/plugins/inputs/syslog/octetcounting_test.go similarity index 84% rename from plugins/inputs/syslog/rfc5425_test.go rename to plugins/inputs/syslog/octetcounting_test.go index d629024b7..c61805131 100644 --- a/plugins/inputs/syslog/rfc5425_test.go +++ b/plugins/inputs/syslog/octetcounting_test.go @@ -16,20 +16,8 @@ import ( "github.com/stretchr/testify/require" ) -var ( - pki = testutil.NewPKI("../../../testutil/pki") -) - -type testCase5425 struct { - name string - data []byte - wantBestEffort []testutil.Metric - wantStrict []testutil.Metric - werr int // how many errors we expect in the strict mode? -} - -func getTestCasesForRFC5425() []testCase5425 { - testCases := []testCase5425{ +func getTestCasesForOctetCounting() []testCaseStream { + testCases := []testCaseStream{ { name: "1st/avg/ok", data: []byte(`188 <29>1 2016-02-21T04:32:57+00:00 web1 someservice 2341 2 [origin][meta sequence="14125553" service="someservice"] "GET /v1/ok HTTP/1.1" 200 145 "-" "hacheck 0.9.0" 24306 127.0.0.1:40124 575`), @@ -346,34 +334,11 @@ func getTestCasesForRFC5425() []testCase5425 { return testCases } -func newTCPSyslogReceiver(address string, keepAlive *internal.Duration, maxConn int, bestEffort bool) *Syslog { - d := &internal.Duration{ - Duration: defaultReadTimeout, - } - s := &Syslog{ - Address: address, - now: func() time.Time { - return defaultTime - }, - ReadTimeout: d, - BestEffort: bestEffort, - Separator: "_", - } - if keepAlive != nil { - s.KeepAlivePeriod = keepAlive - } - if maxConn > 0 { - s.MaxConnections = maxConn - } - - return s -} - -func testStrictRFC5425(t *testing.T, protocol string, address string, wantTLS bool, keepAlive *internal.Duration) { - for _, tc := range getTestCasesForRFC5425() { +func testStrictOctetCounting(t *testing.T, protocol string, address string, wantTLS bool, keepAlive *internal.Duration) { + for _, tc := range getTestCasesForOctetCounting() { t.Run(tc.name, func(t *testing.T) { // Creation of a strict mode receiver - receiver := newTCPSyslogReceiver(protocol+"://"+address, keepAlive, 0, false) + receiver := newTCPSyslogReceiver(protocol+"://"+address, keepAlive, 0, false, OctetCounting) require.NotNil(t, receiver) if wantTLS { receiver.ServerConfig = *pki.TLSServerConfig() @@ -431,11 +396,11 @@ func testStrictRFC5425(t *testing.T, protocol string, address string, wantTLS bo } } -func testBestEffortRFC5425(t *testing.T, protocol string, address string, wantTLS bool, keepAlive *internal.Duration) { - for _, tc := range getTestCasesForRFC5425() { +func testBestEffortOctetCounting(t *testing.T, protocol string, address string, wantTLS bool, keepAlive *internal.Duration) { + for _, tc := range getTestCasesForOctetCounting() { t.Run(tc.name, func(t *testing.T) { // Creation of a best effort mode receiver - receiver := newTCPSyslogReceiver(protocol+"://"+address, keepAlive, 0, true) + receiver := newTCPSyslogReceiver(protocol+"://"+address, keepAlive, 0, true, OctetCounting) require.NotNil(t, receiver) if wantTLS { receiver.ServerConfig = *pki.TLSServerConfig() @@ -486,58 +451,58 @@ func testBestEffortRFC5425(t *testing.T, protocol string, address string, wantTL } } -func TestStrict_tcp(t *testing.T) { - testStrictRFC5425(t, "tcp", address, false, nil) +func TestOctetCountingStrict_tcp(t *testing.T) { + testStrictOctetCounting(t, "tcp", address, false, nil) } -func TestBestEffort_tcp(t *testing.T) { - testBestEffortRFC5425(t, "tcp", address, false, nil) +func TestOctetCountingBestEffort_tcp(t *testing.T) { + testBestEffortOctetCounting(t, "tcp", address, false, nil) } -func TestStrict_tcp_tls(t *testing.T) { - testStrictRFC5425(t, "tcp", address, true, nil) +func TestOctetCountingStrict_tcp_tls(t *testing.T) { + testStrictOctetCounting(t, "tcp", address, true, nil) } -func TestBestEffort_tcp_tls(t *testing.T) { - testBestEffortRFC5425(t, "tcp", address, true, nil) +func TestOctetCountingBestEffort_tcp_tls(t *testing.T) { + testBestEffortOctetCounting(t, "tcp", address, true, nil) } -func TestStrictWithKeepAlive_tcp_tls(t *testing.T) { - testStrictRFC5425(t, "tcp", address, true, &internal.Duration{Duration: time.Minute}) +func TestOctetCountingStrictWithKeepAlive_tcp_tls(t *testing.T) { + testStrictOctetCounting(t, "tcp", address, true, &internal.Duration{Duration: time.Minute}) } -func TestStrictWithZeroKeepAlive_tcp_tls(t *testing.T) { - testStrictRFC5425(t, "tcp", address, true, &internal.Duration{Duration: 0}) +func TestOctetCountingStrictWithZeroKeepAlive_tcp_tls(t *testing.T) { + testStrictOctetCounting(t, "tcp", address, true, &internal.Duration{Duration: 0}) } -func TestStrict_unix(t *testing.T) { +func TestOctetCountingStrict_unix(t *testing.T) { tmpdir, err := ioutil.TempDir("", "telegraf") require.NoError(t, err) defer os.RemoveAll(tmpdir) sock := filepath.Join(tmpdir, "syslog.TestStrict_unix.sock") - testStrictRFC5425(t, "unix", sock, false, nil) + testStrictOctetCounting(t, "unix", sock, false, nil) } -func TestBestEffort_unix(t *testing.T) { +func TestOctetCountingBestEffort_unix(t *testing.T) { tmpdir, err := ioutil.TempDir("", "telegraf") require.NoError(t, err) defer os.RemoveAll(tmpdir) sock := filepath.Join(tmpdir, "syslog.TestBestEffort_unix.sock") - testBestEffortRFC5425(t, "unix", sock, false, nil) + testBestEffortOctetCounting(t, "unix", sock, false, nil) } -func TestStrict_unix_tls(t *testing.T) { +func TestOctetCountingStrict_unix_tls(t *testing.T) { tmpdir, err := ioutil.TempDir("", "telegraf") require.NoError(t, err) defer os.RemoveAll(tmpdir) sock := filepath.Join(tmpdir, "syslog.TestStrict_unix_tls.sock") - testStrictRFC5425(t, "unix", sock, true, nil) + testStrictOctetCounting(t, "unix", sock, true, nil) } -func TestBestEffort_unix_tls(t *testing.T) { +func TestOctetCountingBestEffort_unix_tls(t *testing.T) { tmpdir, err := ioutil.TempDir("", "telegraf") require.NoError(t, err) defer os.RemoveAll(tmpdir) sock := filepath.Join(tmpdir, "syslog.TestBestEffort_unix_tls.sock") - testBestEffortRFC5425(t, "unix", sock, true, nil) + testBestEffortOctetCounting(t, "unix", sock, true, nil) } diff --git a/plugins/inputs/syslog/rfc5426_test.go b/plugins/inputs/syslog/rfc5426_test.go index 67966ed1d..ba856b0ac 100644 --- a/plugins/inputs/syslog/rfc5426_test.go +++ b/plugins/inputs/syslog/rfc5426_test.go @@ -15,16 +15,8 @@ import ( "github.com/stretchr/testify/require" ) -type testCase5426 struct { - name string - data []byte - wantBestEffort *testutil.Metric - wantStrict *testutil.Metric - werr bool -} - -func getTestCasesForRFC5426() []testCase5426 { - testCases := []testCase5426{ +func getTestCasesForRFC5426() []testCasePacket { + testCases := []testCasePacket{ { name: "empty", data: []byte(""), @@ -239,17 +231,6 @@ func getTestCasesForRFC5426() []testCase5426 { return testCases } -func newUDPSyslogReceiver(address string, bestEffort bool) *Syslog { - return &Syslog{ - Address: address, - now: func() time.Time { - return defaultTime - }, - BestEffort: bestEffort, - Separator: "_", - } -} - func testRFC5426(t *testing.T, protocol string, address string, bestEffort bool) { for _, tc := range getTestCasesForRFC5426() { t.Run(tc.name, func(t *testing.T) { diff --git a/plugins/inputs/syslog/syslog.go b/plugins/inputs/syslog/syslog.go index 034e03df2..ab2277caa 100644 --- a/plugins/inputs/syslog/syslog.go +++ b/plugins/inputs/syslog/syslog.go @@ -12,8 +12,10 @@ import ( "time" "unicode" + "github.com/influxdata/go-syslog" + "github.com/influxdata/go-syslog/nontransparent" + "github.com/influxdata/go-syslog/octetcounting" "github.com/influxdata/go-syslog/rfc5424" - "github.com/influxdata/go-syslog/rfc5425" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" tlsConfig "github.com/influxdata/telegraf/internal/tls" @@ -28,8 +30,10 @@ type Syslog struct { tlsConfig.ServerConfig Address string `toml:"server"` KeepAlivePeriod *internal.Duration - ReadTimeout *internal.Duration MaxConnections int + ReadTimeout *internal.Duration + Framing Framing + Trailer nontransparent.TrailerType BestEffort bool Separator string `toml:"sdparam_separator"` @@ -76,6 +80,16 @@ var sampleConfig = ` ## 0 means unlimited. # read_timeout = "5s" + ## The framing technique with which it is expected that messages are transported (default = "octet-counting"). + ## Whether the messages come using the octect-counting (RFC5425#section-4.3.1, RFC6587#section-3.4.1), + ## or the non-transparent framing technique (RFC6587#section-3.4.2). + ## Must be one of "octect-counting", "non-transparent". + # framing = "octet-counting" + + ## The trailer to be expected in case of non-trasparent framing (default = "LF"). + ## Must be one of "LF", or "NUL". + # trailer = "LF" + ## Whether to parse in best effort mode or not (default = false). ## By default best effort parsing is off. # best_effort = false @@ -95,7 +109,7 @@ func (s *Syslog) SampleConfig() string { // Description returns the plugin description func (s *Syslog) Description() string { - return "Accepts syslog messages per RFC5425" + return "Accepts syslog messages following RFC5424 format with transports as per RFC5426, RFC5425, or RFC6587" } // Gather ... @@ -203,7 +217,12 @@ func getAddressParts(a string) (string, string, error) { func (s *Syslog) listenPacket(acc telegraf.Accumulator) { defer s.wg.Done() b := make([]byte, ipMaxPacketSize) - p := rfc5424.NewParser() + var p syslog.Machine + if s.BestEffort { + p = rfc5424.NewParser(rfc5424.WithBestEffort()) + } else { + p = rfc5424.NewParser() + } for { n, _, err := s.udpListener.ReadFrom(b) if err != nil { @@ -213,9 +232,9 @@ func (s *Syslog) listenPacket(acc telegraf.Accumulator) { break } - message, err := p.Parse(b[:n], &s.BestEffort) + message, err := p.Parse(b[:n]) if message != nil { - acc.AddFields("syslog", fields(*message, s), tags(*message), s.time()) + acc.AddFields("syslog", fields(message, s), tags(message), s.time()) } if err != nil { acc.AddError(err) @@ -276,24 +295,38 @@ func (s *Syslog) handle(conn net.Conn, acc telegraf.Accumulator) { conn.Close() }() - var p *rfc5425.Parser + var p syslog.Parser - if s.BestEffort { - p = rfc5425.NewParser(conn, rfc5425.WithBestEffort()) - } else { - p = rfc5425.NewParser(conn) - } - - if s.ReadTimeout != nil && s.ReadTimeout.Duration > 0 { - conn.SetReadDeadline(time.Now().Add(s.ReadTimeout.Duration)) - } - - p.ParseExecuting(func(r *rfc5425.Result) { + emit := func(r *syslog.Result) { s.store(*r, acc) if s.ReadTimeout != nil && s.ReadTimeout.Duration > 0 { conn.SetReadDeadline(time.Now().Add(s.ReadTimeout.Duration)) } - }) + } + + // Create parser options + opts := []syslog.ParserOption{ + syslog.WithListener(emit), + } + if s.BestEffort { + opts = append(opts, syslog.WithBestEffort()) + } + + // Select the parser to use depeding on transport framing + if s.Framing == OctetCounting { + // Octet counting transparent framing + p = octetcounting.NewParser(opts...) + } else { + // Non-transparent framing + opts = append(opts, nontransparent.WithTrailer(s.Trailer)) + p = nontransparent.NewParser(opts...) + } + + p.Parse(conn) + + if s.ReadTimeout != nil && s.ReadTimeout.Duration > 0 { + conn.SetReadDeadline(time.Now().Add(s.ReadTimeout.Duration)) + } } func (s *Syslog) setKeepAlive(c *net.TCPConn) error { @@ -310,20 +343,16 @@ func (s *Syslog) setKeepAlive(c *net.TCPConn) error { return c.SetKeepAlivePeriod(s.KeepAlivePeriod.Duration) } -func (s *Syslog) store(res rfc5425.Result, acc telegraf.Accumulator) { +func (s *Syslog) store(res syslog.Result, acc telegraf.Accumulator) { if res.Error != nil { acc.AddError(res.Error) } - if res.MessageError != nil { - acc.AddError(res.MessageError) - } if res.Message != nil { - msg := *res.Message - acc.AddFields("syslog", fields(msg, s), tags(msg), s.time()) + acc.AddFields("syslog", fields(res.Message, s), tags(res.Message), s.time()) } } -func tags(msg rfc5424.SyslogMessage) map[string]string { +func tags(msg syslog.Message) map[string]string { ts := map[string]string{} // Not checking assuming a minimally valid message @@ -341,7 +370,7 @@ func tags(msg rfc5424.SyslogMessage) map[string]string { return ts } -func fields(msg rfc5424.SyslogMessage, s *Syslog) map[string]interface{} { +func fields(msg syslog.Message, s *Syslog) map[string]interface{} { // Not checking assuming a minimally valid message flds := map[string]interface{}{ "version": msg.Version(), @@ -415,6 +444,8 @@ func init() { ReadTimeout: &internal.Duration{ Duration: defaultReadTimeout, }, + Framing: OctetCounting, + Trailer: nontransparent.LF, Separator: "_", } From 700e6b5ed301174eff106d74063dda7fc90d6fa5 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 18 Dec 2018 11:04:02 -0800 Subject: [PATCH 0433/1815] Update changelog --- CHANGELOG.md | 1 + docs/LICENSE_OF_DEPENDENCIES.md | 1 + 2 files changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7272c25c3..4f711d195 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -19,6 +19,7 @@ - [#5135](https://github.com/influxdata/telegraf/pull/5135): Remove storage calculation for non Azure managed instances and add server version. - [#5083](https://github.com/influxdata/telegraf/pull/5083): Fix error sending empty tag value in azure_monitor output. - [#5143](https://github.com/influxdata/telegraf/issues/5143): Fix panic with prometheus input plugin on shutdown. +- [#4482](https://github.com/influxdata/telegraf/issues/4482): Support non-transparent framing of syslog messages. ## v1.9.1 [2018-12-11] diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index fec320ced..43d096992 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -63,6 +63,7 @@ following works: - github.com/kardianos/service [zlib License](https://github.com/kardianos/service/blob/master/LICENSE) - github.com/kballard/go-shellquote [MIT License](https://github.com/kballard/go-shellquote/blob/master/LICENSE) - github.com/kr/logfmt [MIT License](https://github.com/kr/logfmt/blob/master/Readme) +- github.com/leodido/ragel-machinery [MIT License](https://github.com/leodido/ragel-machinery/blob/develop/LICENSE) - github.com/mailru/easyjson [MIT License](https://github.com/mailru/easyjson/blob/master/LICENSE) - github.com/matttproud/golang_protobuf_extensions [Apache License 2.0](https://github.com/matttproud/golang_protobuf_extensions/blob/master/LICENSE) - github.com/Microsoft/ApplicationInsights-Go [MIT License](https://github.com/Microsoft/ApplicationInsights-Go/blob/master/LICENSE) From 7caa5d20af4c38b4b77c527b191fc8a213713e0e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adri=C3=A1n=20L=C3=B3pez?= Date: Tue, 18 Dec 2018 21:21:36 +0100 Subject: [PATCH 0434/1815] Fix invalid JSON in readme (#5158) --- plugins/parsers/json/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/parsers/json/README.md b/plugins/parsers/json/README.md index a48575cba..8b73b7214 100644 --- a/plugins/parsers/json/README.md +++ b/plugins/parsers/json/README.md @@ -156,14 +156,14 @@ Input: "b": { "c": 6, "time":"04 Jan 06 15:04 MST" - }, + } }, { "a": 7, "b": { "c": 8, "time":"11 Jan 07 15:04 MST" - }, + } } ] ``` From 544262a23aeec96d9e2b8c93572b90a4f4b10325 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 18 Dec 2018 14:20:43 -0800 Subject: [PATCH 0435/1815] Apply global and plugin level metric modifications before filtering (#5152) --- internal/models/running_input.go | 12 +++++----- internal/models/running_input_test.go | 34 ++++++++++++++++++++++++++- 2 files changed, 39 insertions(+), 7 deletions(-) diff --git a/internal/models/running_input.go b/internal/models/running_input.go index 0775d5c5d..08a804c40 100644 --- a/internal/models/running_input.go +++ b/internal/models/running_input.go @@ -62,12 +62,6 @@ func (r *RunningInput) MakeMetric(metric telegraf.Metric) telegraf.Metric { return nil } - r.Config.Filter.Modify(metric) - if len(metric.FieldList()) == 0 { - r.metricFiltered(metric) - return nil - } - m := makemetric( metric, r.Config.NameOverride, @@ -76,6 +70,12 @@ func (r *RunningInput) MakeMetric(metric telegraf.Metric) telegraf.Metric { r.Config.Tags, r.defaultTags) + r.Config.Filter.Modify(metric) + if len(metric.FieldList()) == 0 { + r.metricFiltered(metric) + return nil + } + r.MetricsGathered.Incr(1) GlobalMetricsGathered.Incr(1) return m diff --git a/internal/models/running_input_test.go b/internal/models/running_input_test.go index 898007e61..5978a0061 100644 --- a/internal/models/running_input_test.go +++ b/internal/models/running_input_test.go @@ -7,11 +7,43 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) +func TestMakeMetricFilterAfterApplyingGlobalTags(t *testing.T) { + now := time.Now() + ri := NewRunningInput(&testInput{}, &InputConfig{ + Filter: Filter{ + TagInclude: []string{"b"}, + }, + }) + require.NoError(t, ri.Config.Filter.Compile()) + ri.SetDefaultTags(map[string]string{"a": "x", "b": "y"}) + + m, err := metric.New("cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42, + }, + now) + require.NoError(t, err) + + actual := ri.MakeMetric(m) + + expected, err := metric.New("cpu", + map[string]string{ + "b": "y", + }, + map[string]interface{}{ + "value": 42, + }, + now) + require.NoError(t, err) + + testutil.RequireMetricEqual(t, expected, actual) +} + func TestMakeMetricNoFields(t *testing.T) { now := time.Now() ri := NewRunningInput(&testInput{}, &InputConfig{ From 234975bcac9b8d430a300a91635372dae6de472b Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 18 Dec 2018 14:21:50 -0800 Subject: [PATCH 0436/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4f711d195..caee74f6d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -20,6 +20,7 @@ - [#5083](https://github.com/influxdata/telegraf/pull/5083): Fix error sending empty tag value in azure_monitor output. - [#5143](https://github.com/influxdata/telegraf/issues/5143): Fix panic with prometheus input plugin on shutdown. - [#4482](https://github.com/influxdata/telegraf/issues/4482): Support non-transparent framing of syslog messages. +- [#5151](https://github.com/influxdata/telegraf/issues/5151): Apply global and plugin level metric modifications before filtering. ## v1.9.1 [2018-12-11] From f530ca6e7ccc96091025c0156009d2365231744e Mon Sep 17 00:00:00 2001 From: Samuel-BF <36996277+Samuel-BF@users.noreply.github.com> Date: Tue, 18 Dec 2018 23:23:25 +0100 Subject: [PATCH 0437/1815] Use godirwalk in globpath (#5145) --- internal/globpath/globpath.go | 41 +++++++++++++++------------ internal/globpath/globpath_test.go | 9 +++--- plugins/inputs/file/file.go | 5 +--- plugins/inputs/filestat/filestat.go | 6 +++- plugins/inputs/logparser/logparser.go | 2 +- plugins/inputs/tail/tail.go | 2 +- 6 files changed, 35 insertions(+), 30 deletions(-) diff --git a/internal/globpath/globpath.go b/internal/globpath/globpath.go index fc6a43618..b21d93520 100644 --- a/internal/globpath/globpath.go +++ b/internal/globpath/globpath.go @@ -6,6 +6,7 @@ import ( "strings" "github.com/gobwas/glob" + "github.com/karrick/godirwalk" ) type GlobPath struct { @@ -41,38 +42,42 @@ func Compile(path string) (*GlobPath, error) { } // Match returns all files matching the expression -func (g *GlobPath) Match() map[string]os.FileInfo { - out := make(map[string]os.FileInfo) +// If it's a static path, returns path +func (g *GlobPath) Match() []string { if !g.hasMeta { - info, err := os.Stat(g.path) - if err == nil { - out[g.path] = info - } - return out + return []string{g.path} } if !g.HasSuperMeta { files, _ := filepath.Glob(g.path) - for _, file := range files { - info, err := os.Stat(file) - if err == nil { - out[file] = info - } - } - return out + return files } roots, err := filepath.Glob(g.rootGlob) if err != nil { - return out + return []string{} } - walkfn := func(path string, info os.FileInfo, _ error) error { + out := []string{} + walkfn := func(path string, _ *godirwalk.Dirent) error { if g.g.Match(path) { - out[path] = info + out = append(out, path) } return nil } for _, root := range roots { - filepath.Walk(root, walkfn) + fileinfo, err := os.Stat(root) + if err != nil { + continue + } + if !fileinfo.IsDir() { + if g.MatchString(root) { + out = append(out, root) + } + continue + } + godirwalk.Walk(root, &godirwalk.Options{ + Callback: walkfn, + Unsorted: true, + }) } return out } diff --git a/internal/globpath/globpath_test.go b/internal/globpath/globpath_test.go index e67474fa9..476ba9243 100644 --- a/internal/globpath/globpath_test.go +++ b/internal/globpath/globpath_test.go @@ -1,7 +1,6 @@ package globpath import ( - "os" "runtime" "strings" "testing" @@ -34,7 +33,7 @@ func TestCompileAndMatch(t *testing.T) { matches = g3.Match() require.Len(t, matches, 1) matches = g4.Match() - require.Len(t, matches, 0) + require.Len(t, matches, 1) matches = g5.Match() require.Len(t, matches, 0) } @@ -75,10 +74,10 @@ func getTestdataDir() string { func TestMatch_ErrPermission(t *testing.T) { tests := []struct { input string - expected map[string]os.FileInfo + expected []string }{ - {"/root/foo", map[string]os.FileInfo{}}, - {"/root/f*", map[string]os.FileInfo{}}, + {"/root/foo", []string{"/root/foo"}}, + {"/root/f*", []string(nil)}, } for _, test := range tests { diff --git a/plugins/inputs/file/file.go b/plugins/inputs/file/file.go index d6714301e..b93a7ba99 100644 --- a/plugins/inputs/file/file.go +++ b/plugins/inputs/file/file.go @@ -75,10 +75,7 @@ func (f *File) refreshFilePaths() error { if len(files) <= 0 { return fmt.Errorf("could not find file: %v", file) } - - for k := range files { - allFiles = append(allFiles, k) - } + allFiles = append(allFiles, files...) } f.filenames = allFiles diff --git a/plugins/inputs/filestat/filestat.go b/plugins/inputs/filestat/filestat.go index 762eaa420..692e58c53 100644 --- a/plugins/inputs/filestat/filestat.go +++ b/plugins/inputs/filestat/filestat.go @@ -73,13 +73,17 @@ func (f *FileStat) Gather(acc telegraf.Accumulator) error { continue } - for fileName, fileInfo := range files { + for _, fileName := range files { tags := map[string]string{ "file": fileName, } fields := map[string]interface{}{ "exists": int64(1), } + fileInfo, err := os.Stat(fileName) + if os.IsNotExist(err) { + fields["exists"] = int64(0) + } if fileInfo == nil { log.Printf("E! Unable to get info for file [%s], possible permissions issue", diff --git a/plugins/inputs/logparser/logparser.go b/plugins/inputs/logparser/logparser.go index 163436a3c..eb23e2b74 100644 --- a/plugins/inputs/logparser/logparser.go +++ b/plugins/inputs/logparser/logparser.go @@ -182,7 +182,7 @@ func (l *LogParserPlugin) tailNewfiles(fromBeginning bool) error { } files := g.Match() - for file := range files { + for _, file := range files { if _, ok := l.tailers[file]; ok { // we're already tailing this file continue diff --git a/plugins/inputs/tail/tail.go b/plugins/inputs/tail/tail.go index 598287963..bdfa2de44 100644 --- a/plugins/inputs/tail/tail.go +++ b/plugins/inputs/tail/tail.go @@ -111,7 +111,7 @@ func (t *Tail) tailNewFiles(fromBeginning bool) error { if err != nil { t.acc.AddError(fmt.Errorf("E! Error Glob %s failed to compile, %s", filepath, err)) } - for file := range g.Match() { + for _, file := range g.Match() { if _, ok := t.tailers[file]; ok { // we're already tailing this file continue From 841860890f1696984c5e2c6de5af0021c554fc2c Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 19 Dec 2018 12:59:27 -0800 Subject: [PATCH 0438/1815] Add support for basic auth to couchdb input (#5160) --- plugins/inputs/couchdb/README.md | 18 ++++++++++++------ plugins/inputs/couchdb/couchdb.go | 20 ++++++++++++++++++-- 2 files changed, 30 insertions(+), 8 deletions(-) diff --git a/plugins/inputs/couchdb/README.md b/plugins/inputs/couchdb/README.md index 0af06cc54..3a7f127db 100644 --- a/plugins/inputs/couchdb/README.md +++ b/plugins/inputs/couchdb/README.md @@ -1,14 +1,18 @@ # CouchDB Input Plugin ---- -The CouchDB plugin gathers metrics of CouchDB using [_stats](http://docs.couchdb.org/en/1.6.1/api/server/common.html?highlight=stats#get--_stats) endpoint. +The CouchDB plugin gathers metrics of CouchDB using [_stats] endpoint. -### Configuration: +### Configuration -``` -# Sample Config: +```toml [[inputs.couchdb]] - hosts = ["http://localhost:5984/_stats"] + ## Works with CouchDB stats endpoints out of the box + ## Multiple Hosts from which to read CouchDB stats: + hosts = ["http://localhost:8086/_stats"] + + ## Use HTTP Basic Authentication. + # basic_username = "telegraf" + # basic_password = "p@ssw0rd" ``` ### Measurements & Fields: @@ -71,3 +75,5 @@ couchdb,server=http://couchdb22:5984/_node/_local/_stats couchdb_auth_cache_hits ``` couchdb,server=http://couchdb16:5984/_stats couchdb_request_time_sum=96,httpd_status_codes_200_sum=37,httpd_status_codes_200_min=0,httpd_requests_mean=0.005,httpd_requests_min=0,couchdb_request_time_stddev=3.833,couchdb_request_time_min=1,httpd_request_methods_get_stddev=0.073,httpd_request_methods_get_min=0,httpd_status_codes_200_mean=0.005,httpd_status_codes_200_max=1,httpd_requests_sum=37,couchdb_request_time_current=96,httpd_request_methods_get_sum=37,httpd_request_methods_get_mean=0.005,httpd_request_methods_get_max=1,httpd_status_codes_200_stddev=0.073,couchdb_request_time_mean=2.595,couchdb_request_time_max=25,httpd_request_methods_get_current=37,httpd_status_codes_200_current=37,httpd_requests_current=37,httpd_requests_stddev=0.073,httpd_requests_max=1 1536707179000000000 ``` + +[_stats]: http://docs.couchdb.org/en/1.6.1/api/server/common.html?highlight=stats#get--_stats diff --git a/plugins/inputs/couchdb/couchdb.go b/plugins/inputs/couchdb/couchdb.go index bc9f31688..1b542d042 100644 --- a/plugins/inputs/couchdb/couchdb.go +++ b/plugins/inputs/couchdb/couchdb.go @@ -80,7 +80,9 @@ type ( } CouchDB struct { - Hosts []string `toml:"hosts"` + Hosts []string `toml:"hosts"` + BasicUsername string `toml:"basic_username"` + BasicPassword string `toml:"basic_password"` client *http.Client } @@ -95,6 +97,10 @@ func (*CouchDB) SampleConfig() string { ## Works with CouchDB stats endpoints out of the box ## Multiple Hosts from which to read CouchDB stats: hosts = ["http://localhost:8086/_stats"] + + ## Use HTTP Basic Authentication. + # basic_username = "telegraf" + # basic_password = "p@ssw0rd" ` } @@ -124,7 +130,17 @@ func (c *CouchDB) fetchAndInsertData(accumulator telegraf.Accumulator, host stri Timeout: time.Duration(4 * time.Second), } } - response, error := c.client.Get(host) + + req, err := http.NewRequest("GET", host, nil) + if err != nil { + return err + } + + if c.BasicUsername != "" || c.BasicPassword != "" { + req.SetBasicAuth(c.BasicUsername, c.BasicPassword) + } + + response, error := c.client.Do(req) if error != nil { return error } From 9bf2ef28f57f5c744732306f7b807ad4924de262 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 19 Dec 2018 13:00:19 -0800 Subject: [PATCH 0439/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index caee74f6d..288e5277b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,6 +10,7 @@ - [#5069](https://github.com/influxdata/telegraf/pull/5069): Add running field to procstat_lookup. - [#5116](https://github.com/influxdata/telegraf/pull/5116): Include DEVLINKS in available diskio udev properties. - [#5149](https://github.com/influxdata/telegraf/pull/5149): Add micro and nanosecond unix timestamp support to JSON parser. +- [#5160](https://github.com/influxdata/telegraf/pull/5160): Add support for basic auth to couchdb input. ## v1.9.2 [unreleased] From e85e6bd3e7575ae166775cd683ab8a36a7439fdf Mon Sep 17 00:00:00 2001 From: Maciej Mencner <12004055+mmencner@users.noreply.github.com> Date: Wed, 19 Dec 2018 15:51:24 -0800 Subject: [PATCH 0440/1815] Fix num_remapped_pgs field in ceph plugin (#5167) --- plugins/inputs/ceph/ceph.go | 16 ++++++++-------- plugins/inputs/ceph/ceph_test.go | 14 +++++++------- 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/plugins/inputs/ceph/ceph.go b/plugins/inputs/ceph/ceph.go index e4d6ff249..95f50958c 100644 --- a/plugins/inputs/ceph/ceph.go +++ b/plugins/inputs/ceph/ceph.go @@ -328,7 +328,7 @@ type CephStatus struct { NumInOSDs float64 `json:"num_in_osds"` Full bool `json:"full"` NearFull bool `json:"nearfull"` - NumRemappedPGs float64 `json:"num_rempapped_pgs"` + NumRemappedPGs float64 `json:"num_remapped_pgs"` } `json:"osdmap"` } `json:"osdmap"` PGMap struct { @@ -373,13 +373,13 @@ func decodeStatus(acc telegraf.Accumulator, input string) error { // decodeStatusOsdmap decodes the OSD map portion of the output of 'ceph -s' func decodeStatusOsdmap(acc telegraf.Accumulator, data *CephStatus) error { fields := map[string]interface{}{ - "epoch": data.OSDMap.OSDMap.Epoch, - "num_osds": data.OSDMap.OSDMap.NumOSDs, - "num_up_osds": data.OSDMap.OSDMap.NumUpOSDs, - "num_in_osds": data.OSDMap.OSDMap.NumInOSDs, - "full": data.OSDMap.OSDMap.Full, - "nearfull": data.OSDMap.OSDMap.NearFull, - "num_rempapped_pgs": data.OSDMap.OSDMap.NumRemappedPGs, + "epoch": data.OSDMap.OSDMap.Epoch, + "num_osds": data.OSDMap.OSDMap.NumOSDs, + "num_up_osds": data.OSDMap.OSDMap.NumUpOSDs, + "num_in_osds": data.OSDMap.OSDMap.NumInOSDs, + "full": data.OSDMap.OSDMap.Full, + "nearfull": data.OSDMap.OSDMap.NearFull, + "num_remapped_pgs": data.OSDMap.OSDMap.NumRemappedPGs, } acc.AddFields("ceph_osdmap", fields, map[string]string{}) return nil diff --git a/plugins/inputs/ceph/ceph_test.go b/plugins/inputs/ceph/ceph_test.go index 8197d0575..a0365c8fb 100644 --- a/plugins/inputs/ceph/ceph_test.go +++ b/plugins/inputs/ceph/ceph_test.go @@ -851,13 +851,13 @@ var cephStatusResults = []expectedResult{ { metric: "ceph_osdmap", fields: map[string]interface{}{ - "epoch": float64(21734), - "num_osds": float64(24), - "num_up_osds": float64(24), - "num_in_osds": float64(24), - "full": false, - "nearfull": false, - "num_rempapped_pgs": float64(0), + "epoch": float64(21734), + "num_osds": float64(24), + "num_up_osds": float64(24), + "num_in_osds": float64(24), + "full": false, + "nearfull": false, + "num_remapped_pgs": float64(0), }, tags: map[string]string{}, }, From 30d4088b4e699440b847dfa0d607b2d39f5eef45 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 19 Dec 2018 15:53:33 -0800 Subject: [PATCH 0441/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 288e5277b..906990ff8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -22,6 +22,7 @@ - [#5143](https://github.com/influxdata/telegraf/issues/5143): Fix panic with prometheus input plugin on shutdown. - [#4482](https://github.com/influxdata/telegraf/issues/4482): Support non-transparent framing of syslog messages. - [#5151](https://github.com/influxdata/telegraf/issues/5151): Apply global and plugin level metric modifications before filtering. +- [#5167](https://github.com/influxdata/telegraf/pull/5167): Fix num_remapped_pgs field in ceph plugin. ## v1.9.1 [2018-12-11] From d043da1976abf485f685b4631219b90395bdbf1b Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 19 Dec 2018 16:16:39 -0800 Subject: [PATCH 0442/1815] Fix link in grok documentation --- plugins/parsers/grok/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/parsers/grok/README.md b/plugins/parsers/grok/README.md index 03473fa95..e30831ae9 100644 --- a/plugins/parsers/grok/README.md +++ b/plugins/parsers/grok/README.md @@ -59,7 +59,7 @@ To match a comma decimal point you can use a period. For example `%{TIMESTAMP:t To match a comma decimal point you can use a period in the pattern string. See https://golang.org/pkg/time/#Parse for more details. -Telegraf has many of its own [built-in patterns](./grok/patterns/influx-patterns), +Telegraf has many of its own [built-in patterns](./patterns/influx-patterns), as well as support for most of [logstash's builtin patterns](https://github.com/logstash-plugins/logstash-patterns-core/blob/master/patterns/grok-patterns). _Golang regular expressions do not support lookahead or lookbehind. From 2c3fa0907e83cd5830e95774b3013cbd48a24721 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 19 Dec 2018 16:22:43 -0800 Subject: [PATCH 0443/1815] Even more fix link in grok documentation --- plugins/parsers/grok/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/parsers/grok/README.md b/plugins/parsers/grok/README.md index e30831ae9..9241af59f 100644 --- a/plugins/parsers/grok/README.md +++ b/plugins/parsers/grok/README.md @@ -59,7 +59,7 @@ To match a comma decimal point you can use a period. For example `%{TIMESTAMP:t To match a comma decimal point you can use a period in the pattern string. See https://golang.org/pkg/time/#Parse for more details. -Telegraf has many of its own [built-in patterns](./patterns/influx-patterns), +Telegraf has many of its own [built-in patterns](/blob/master/plugins/parsers/influx_patterns.go), as well as support for most of [logstash's builtin patterns](https://github.com/logstash-plugins/logstash-patterns-core/blob/master/patterns/grok-patterns). _Golang regular expressions do not support lookahead or lookbehind. From 76f8d294e4d9ce9022e5e1e4a202f47e1940bf9f Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 19 Dec 2018 16:24:00 -0800 Subject: [PATCH 0444/1815] Even more fix link in grok documentation --- plugins/parsers/grok/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/parsers/grok/README.md b/plugins/parsers/grok/README.md index 9241af59f..3b6c349e1 100644 --- a/plugins/parsers/grok/README.md +++ b/plugins/parsers/grok/README.md @@ -59,7 +59,7 @@ To match a comma decimal point you can use a period. For example `%{TIMESTAMP:t To match a comma decimal point you can use a period in the pattern string. See https://golang.org/pkg/time/#Parse for more details. -Telegraf has many of its own [built-in patterns](/blob/master/plugins/parsers/influx_patterns.go), +Telegraf has many of its own [built-in patterns](/plugins/parsers/influx_patterns.go), as well as support for most of [logstash's builtin patterns](https://github.com/logstash-plugins/logstash-patterns-core/blob/master/patterns/grok-patterns). _Golang regular expressions do not support lookahead or lookbehind. From 5027a516c7b6e13ce636e6d6027e8dfaae8dd344 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 19 Dec 2018 16:25:06 -0800 Subject: [PATCH 0445/1815] Even more fix link in grok documentation --- plugins/parsers/grok/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/parsers/grok/README.md b/plugins/parsers/grok/README.md index 3b6c349e1..32072b631 100644 --- a/plugins/parsers/grok/README.md +++ b/plugins/parsers/grok/README.md @@ -59,7 +59,7 @@ To match a comma decimal point you can use a period. For example `%{TIMESTAMP:t To match a comma decimal point you can use a period in the pattern string. See https://golang.org/pkg/time/#Parse for more details. -Telegraf has many of its own [built-in patterns](/plugins/parsers/influx_patterns.go), +Telegraf has many of its own [built-in patterns](/plugins/parsers/grok/influx_patterns.go), as well as support for most of [logstash's builtin patterns](https://github.com/logstash-plugins/logstash-patterns-core/blob/master/patterns/grok-patterns). _Golang regular expressions do not support lookahead or lookbehind. From ce8ec241001b20bdcc18a574a21fb16c97367907 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 20 Dec 2018 12:12:25 -0800 Subject: [PATCH 0446/1815] Document using posix acl in disk input --- plugins/inputs/disk/README.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/plugins/inputs/disk/README.md b/plugins/inputs/disk/README.md index 5359cca62..2979a5f2e 100644 --- a/plugins/inputs/disk/README.md +++ b/plugins/inputs/disk/README.md @@ -59,6 +59,11 @@ $ sudo -u telegraf cat /proc/self/mounts | grep sda2 $ sudo -u telegraf stat /home ``` +It may be desired to use POSIX ACLs to provide additional access: +``` +sudo setfacl -R -m u:telegraf:X /var/lib/docker/volumes/ +``` + ### Example Output: ``` From 675178f91569c19e78648593c33470817bcdb68f Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 20 Dec 2018 12:15:40 -0800 Subject: [PATCH 0447/1815] Update golang.org/x/sys to latest revision (#5174) --- Gopkg.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Gopkg.lock b/Gopkg.lock index 8fd3e81c4..27d81c89f 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -1208,7 +1208,7 @@ [[projects]] branch = "master" - digest = "1:677e38cad6833ad266ec843739d167755eda1e6f2d8af1c63102b0426ad820db" + digest = "1:6a6eed3727d0e15703d9e930d8dbe333bea09eda309d75a015d3c6dc4e5c92a6" name = "golang.org/x/sys" packages = [ "unix", @@ -1220,7 +1220,7 @@ "windows/svc/mgr", ] pruneopts = "" - revision = "ac767d655b305d4e9612f5f6e33120b9176c4ad4" + revision = "7c4c994c65f702f41ed7d6620a2cb34107576a77" [[projects]] digest = "1:5acd3512b047305d49e8763eef7ba423901e85d5dd2fd1e71778a0ea8de10bd4" From 9cc06702da192675e7905c83ff5d3f6a19dcc147 Mon Sep 17 00:00:00 2001 From: Pierre Tessier Date: Fri, 21 Dec 2018 14:26:07 -0500 Subject: [PATCH 0448/1815] Use wavefront sdk in wavefront output (#5161) --- Gopkg.lock | 12 ++ Gopkg.toml | 4 + plugins/outputs/wavefront/README.md | 40 +++-- plugins/outputs/wavefront/wavefront.go | 154 ++++++++++---------- plugins/outputs/wavefront/wavefront_test.go | 28 +--- 5 files changed, 118 insertions(+), 120 deletions(-) diff --git a/Gopkg.lock b/Gopkg.lock index 27d81c89f..2d9883f04 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -1093,6 +1093,17 @@ revision = "e3a01f9611c32b2362366434bcd671516e78955d" version = "v0.18.0" +[[projects]] + digest = "1:c1855527c165f0224708fbc7d76843b4b20bcb74b328f212f8d0e9c855d4c49d" + name = "github.com/wavefronthq/wavefront-sdk-go" + packages = [ + "internal", + "senders", + ] + pruneopts = "" + revision = "12511c8b82654d412b0334768d94dc080b617fd1" + version = "v0.9.0" + [[projects]] branch = "master" digest = "1:98ed05e9796df287b90c1d96854e3913c8e349dbc546412d3cabb472ecf4b417" @@ -1545,6 +1556,7 @@ "github.com/vmware/govmomi/vim25/mo", "github.com/vmware/govmomi/vim25/soap", "github.com/vmware/govmomi/vim25/types", + "github.com/wavefronthq/wavefront-sdk-go/senders", "github.com/wvanbergen/kafka/consumergroup", "golang.org/x/net/context", "golang.org/x/net/html/charset", diff --git a/Gopkg.toml b/Gopkg.toml index 3b5c1b917..3e430b4c3 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -246,6 +246,10 @@ name = "github.com/vishvananda/netlink" revision = "b2de5d10e38ecce8607e6b438b6d174f389a004e" +[[constraint]] + name = "github.com/wavefronthq/wavefront-sdk-go" + version = "v0.9.0" + [[constraint]] name = "github.com/karrick/godirwalk" version = "1.7.5" diff --git a/plugins/outputs/wavefront/README.md b/plugins/outputs/wavefront/README.md index be8fcd7dc..bc2156b13 100644 --- a/plugins/outputs/wavefront/README.md +++ b/plugins/outputs/wavefront/README.md @@ -6,25 +6,30 @@ This plugin writes to a [Wavefront](https://www.wavefront.com) proxy, in Wavefro ### Configuration: ```toml -# Configuration for Wavefront output -[[outputs.wavefront]] - ## DNS name of the wavefront proxy server - host = "wavefront.example.com" + ## Url for Wavefront Direct Ingestion or using HTTP with Wavefront Proxy + ## If using Wavefront Proxy, also specify port. example: http://proxyserver:2878 + url = "https://metrics.wavefront.com" - ## Port that the Wavefront proxy server listens on - port = 2878 + ## Authentication Token for Wavefront. Only required if using Direct Ingestion + #token = "DUMMY_TOKEN" + + ## DNS name of the wavefront proxy server. Do not use if url is specified + #host = "wavefront.example.com" + + ## Port that the Wavefront proxy server listens on. Do not use if url is specified + #port = 2878 ## prefix for metrics keys #prefix = "my.specific.prefix." - ## wether to use "value" for name of simple fields. default is false + ## whether to use "value" for name of simple fields. default is false #simple_fields = false - ## character to use between metric and field name. default is . (dot) + ## character to use between metric and field name. default is . (dot) #metric_separator = "." - ## Convert metric name paths to use metricSeperator character - ## When true will convert all _ (underscore) chartacters in final metric name. default is true + ## Convert metric name paths to use metricSeparator character + ## When true will convert all _ (underscore) characters in final metric name. default is true #convert_paths = true ## Use Regex to sanitize metric and tag names from invalid characters @@ -32,18 +37,10 @@ This plugin writes to a [Wavefront](https://www.wavefront.com) proxy, in Wavefro #use_regex = false ## point tags to use as the source name for Wavefront (if none found, host will be used) - #source_override = ["hostname", "agent_host", "node_host"] + #source_override = ["hostname", "address", "agent_host", "node_host"] ## whether to convert boolean values to numeric values, with false -> 0.0 and true -> 1.0. default is true #convert_bool = true - - ## Define a mapping, namespaced by metric prefix, from string values to numeric values - ## The example below maps "green" -> 1.0, "yellow" -> 0.5, "red" -> 0.0 for - ## any metrics beginning with "elasticsearch" - #[[outputs.wavefront.string_to_number.elasticsearch]] - # green = 1.0 - # yellow = 0.5 - # red = 0.0 ``` @@ -76,6 +73,5 @@ More information about the Wavefront data format is available [here](https://com ### Allowed values for metrics -Wavefront allows `integers` and `floats` as input values. It will ignore most `strings`, but when configured -will map certain `strings` to numeric values. By default it also maps `bool` values to numeric, false -> 0.0, -true -> 1.0 \ No newline at end of file +Wavefront allows `integers` and `floats` as input values. By default it also maps `bool` values to numeric, false -> 0.0, +true -> 1.0. To map `strings` use the [enum](../../processors/enum) processor plugin. diff --git a/plugins/outputs/wavefront/wavefront.go b/plugins/outputs/wavefront/wavefront.go index ef36d1804..257c5512e 100644 --- a/plugins/outputs/wavefront/wavefront.go +++ b/plugins/outputs/wavefront/wavefront.go @@ -1,24 +1,22 @@ package wavefront import ( - "bytes" "fmt" "log" - "net" "regexp" - "strconv" "strings" - "time" - "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/outputs" + wavefront "github.com/wavefronthq/wavefront-sdk-go/senders" ) type Wavefront struct { - Prefix string + Url string + Token string Host string Port int + Prefix string SimpleFields bool MetricSeparator string ConvertPaths bool @@ -26,6 +24,8 @@ type Wavefront struct { UseRegex bool SourceOverride []string StringToNumber map[string][]map[string]float64 + + sender wavefront.Sender } // catch many of the invalid chars that could appear in a metric or tag name @@ -40,43 +40,49 @@ var sanitizedChars = strings.NewReplacer( // instead of Replacer which may miss some special characters we can use a regex pattern, but this is significantly slower than Replacer var sanitizedRegex = regexp.MustCompile("[^a-zA-Z\\d_.-]") -var tagValueReplacer = strings.NewReplacer("\"", "\\\"", "*", "-") +var tagValueReplacer = strings.NewReplacer("*", "-") var pathReplacer = strings.NewReplacer("_", "_") var sampleConfig = ` - ## DNS name of the wavefront proxy server - host = "wavefront.example.com" + ## Url for Wavefront Direct Ingestion or using HTTP with Wavefront Proxy + ## If using Wavefront Proxy, also specify port. example: http://proxyserver:2878 + url = "https://metrics.wavefront.com" - ## Port that the Wavefront proxy server listens on - port = 2878 + ## Authentication Token for Wavefront. Only required if using Direct Ingestion + #token = "DUMMY_TOKEN" + + ## DNS name of the wavefront proxy server. Do not use if url is specified + #host = "wavefront.example.com" + + ## Port that the Wavefront proxy server listens on. Do not use if url is specified + #port = 2878 ## prefix for metrics keys #prefix = "my.specific.prefix." - ## whether to use "value" for name of simple fields + ## whether to use "value" for name of simple fields. default is false #simple_fields = false - ## character to use between metric and field name. defaults to . (dot) + ## character to use between metric and field name. default is . (dot) #metric_separator = "." - ## Convert metric name paths to use metricSeperator character - ## When true (default) will convert all _ (underscore) chartacters in final metric name + ## Convert metric name paths to use metricSeparator character + ## When true will convert all _ (underscore) characters in final metric name. default is true #convert_paths = true ## Use Regex to sanitize metric and tag names from invalid characters - ## Regex is more thorough, but significantly slower + ## Regex is more thorough, but significantly slower. default is false #use_regex = false ## point tags to use as the source name for Wavefront (if none found, host will be used) - #source_override = ["hostname", "agent_host", "node_host"] + #source_override = ["hostname", "address", "agent_host", "node_host"] - ## whether to convert boolean values to numeric values, with false -> 0.0 and true -> 1.0. default true + ## whether to convert boolean values to numeric values, with false -> 0.0 and true -> 1.0. default is true #convert_bool = true ## Define a mapping, namespaced by metric prefix, from string values to numeric values - ## The example below maps "green" -> 1.0, "yellow" -> 0.5, "red" -> 0.0 for - ## any metrics beginning with "elasticsearch" + ## deprecated in 1.9; use the enum processor plugin #[[outputs.wavefront.string_to_number.elasticsearch]] # green = 1.0 # yellow = 0.5 @@ -92,44 +98,51 @@ type MetricPoint struct { } func (w *Wavefront) Connect() error { + + if len(w.StringToNumber) > 0 { + log.Print("W! [outputs.wavefront] The string_to_number option is deprecated; please use the enum processor instead") + } + + if w.Url != "" { + log.Printf("D! [outputs.wavefront] connecting over http/https using Url: %s", w.Url) + sender, err := wavefront.NewDirectSender(&wavefront.DirectConfiguration{ + Server: w.Url, + Token: w.Token, + FlushIntervalSeconds: 5, + }) + if err != nil { + return fmt.Errorf("Wavefront: Could not create Wavefront Sender for Url: %s", w.Url) + } + w.sender = sender + } else { + log.Printf("D! Output [wavefront] connecting over tcp using Host: %s and Port: %d", w.Host, w.Port) + sender, err := wavefront.NewProxySender(&wavefront.ProxyConfiguration{ + Host: w.Host, + MetricsPort: w.Port, + FlushIntervalSeconds: 5, + }) + if err != nil { + return fmt.Errorf("Wavefront: Could not create Wavefront Sender for Host: %s and Port: %d", w.Host, w.Port) + } + w.sender = sender + } + if w.ConvertPaths && w.MetricSeparator == "_" { w.ConvertPaths = false } if w.ConvertPaths { pathReplacer = strings.NewReplacer("_", w.MetricSeparator) } - - // Test Connection to Wavefront proxy Server - uri := fmt.Sprintf("%s:%d", w.Host, w.Port) - _, err := net.ResolveTCPAddr("tcp", uri) - if err != nil { - return fmt.Errorf("Wavefront: TCP address cannot be resolved %s", err.Error()) - } - connection, err := net.Dial("tcp", uri) - if err != nil { - return fmt.Errorf("Wavefront: TCP connect fail %s", err.Error()) - } - defer connection.Close() return nil } func (w *Wavefront) Write(metrics []telegraf.Metric) error { - // Send Data to Wavefront proxy Server - uri := fmt.Sprintf("%s:%d", w.Host, w.Port) - connection, err := net.Dial("tcp", uri) - if err != nil { - return fmt.Errorf("Wavefront: TCP connect fail %s", err.Error()) - } - defer connection.Close() - connection.SetWriteDeadline(time.Now().Add(5 * time.Second)) - for _, m := range metrics { - for _, metricPoint := range buildMetrics(m, w) { - metricLine := formatMetricPoint(metricPoint, w) - _, err := connection.Write([]byte(metricLine)) + for _, point := range buildMetrics(m, w) { + err := w.sender.SendMetric(point.Metric, point.Value, point.Timestamp, point.Source, point.Tags) if err != nil { - return fmt.Errorf("Wavefront: TCP writing error %s", err.Error()) + return fmt.Errorf("Wavefront sending error: %s", err.Error()) } } } @@ -165,7 +178,7 @@ func buildMetrics(m telegraf.Metric, w *Wavefront) []*MetricPoint { metricValue, buildError := buildValue(value, metric.Metric, w) if buildError != nil { - log.Printf("D! Output [wavefront] %s\n", buildError.Error()) + log.Printf("D! [outputs.wavefront] %s\n", buildError.Error()) continue } metric.Value = metricValue @@ -188,8 +201,8 @@ func buildTags(mTags map[string]string, w *Wavefront) (string, map[string]string } } + // find source, use source_override property if needed var source string - if s, ok := mTags["source"]; ok { source = s delete(mTags, "source") @@ -214,10 +227,25 @@ func buildTags(mTags map[string]string, w *Wavefront) (string, map[string]string source = mTags["host"] } } + source = tagValueReplacer.Replace(source) + // remove default host tag delete(mTags, "host") - return tagValueReplacer.Replace(source), mTags + // sanitize tag keys and values + tags := make(map[string]string) + for k, v := range mTags { + var key string + if w.UseRegex { + key = sanitizedRegex.ReplaceAllLiteralString(k, "-") + } else { + key = sanitizedChars.Replace(k) + } + val := tagValueReplacer.Replace(v) + tags[key] = val + } + + return source, tags } func buildValue(v interface{}, name string, w *Wavefront) (float64, error) { @@ -255,34 +283,6 @@ func buildValue(v interface{}, name string, w *Wavefront) (float64, error) { return 0, fmt.Errorf("unexpected type: %T, with value: %v, for: %s", v, v, name) } -func formatMetricPoint(metricPoint *MetricPoint, w *Wavefront) string { - buffer := bytes.NewBufferString("") - buffer.WriteString(metricPoint.Metric) - buffer.WriteString(" ") - buffer.WriteString(strconv.FormatFloat(metricPoint.Value, 'f', 6, 64)) - buffer.WriteString(" ") - buffer.WriteString(strconv.FormatInt(metricPoint.Timestamp, 10)) - buffer.WriteString(" source=\"") - buffer.WriteString(metricPoint.Source) - buffer.WriteString("\"") - - for k, v := range metricPoint.Tags { - buffer.WriteString(" ") - if w.UseRegex { - buffer.WriteString(sanitizedRegex.ReplaceAllLiteralString(k, "-")) - } else { - buffer.WriteString(sanitizedChars.Replace(k)) - } - buffer.WriteString("=\"") - buffer.WriteString(tagValueReplacer.Replace(v)) - buffer.WriteString("\"") - } - - buffer.WriteString("\n") - - return buffer.String() -} - func (w *Wavefront) SampleConfig() string { return sampleConfig } @@ -292,12 +292,14 @@ func (w *Wavefront) Description() string { } func (w *Wavefront) Close() error { + w.sender.Close() return nil } func init() { outputs.Add("wavefront", func() telegraf.Output { return &Wavefront{ + Token: "DUMMY_TOKEN", MetricSeparator: ".", ConvertPaths: true, ConvertBool: true, diff --git a/plugins/outputs/wavefront/wavefront_test.go b/plugins/outputs/wavefront/wavefront_test.go index f1722e668..1fda6c7ae 100644 --- a/plugins/outputs/wavefront/wavefront_test.go +++ b/plugins/outputs/wavefront/wavefront_test.go @@ -140,6 +140,11 @@ func TestBuildTags(t *testing.T) { "aaa", map[string]string{"dc": "bbb"}, }, + { + map[string]string{"host": "aaa", "dc": "a*$a\\abbb\"som/et|hing else", "bad#k%e/y that*sho\\uld work": "value1"}, + "aaa", + map[string]string{"dc": "a-$a\\abbb\"som/et|hing else", "bad-k-e-y-that-sho-uld-work": "value1"}, + }, } for _, tt := range tagtests { @@ -189,7 +194,7 @@ func TestBuildTagsWithSource(t *testing.T) { }, { map[string]string{"something": "abc", "host": "r*@l\"Ho/st"}, - "r-@l\\\"Ho/st", + "r-@l\"Ho/st", map[string]string{"something": "abc"}, }, } @@ -264,27 +269,6 @@ func TestBuildValueString(t *testing.T) { } -func TestFormatMetricPoint(t *testing.T) { - w := defaultWavefront() - - testpoint := &MetricPoint{ - Metric: "test.metric.something", - Value: 123.456, - Timestamp: 1257894000, - Source: "testSource", - Tags: map[string]string{"sp*c!@l\"-ch/rs": "sp*c!@l/ val\"ue"}, - } - - expected := "test.metric.something 123.456000 1257894000 source=\"testSource\" sp-c--l--ch-rs=\"sp-c!@l/ val\\\"ue\"\n" - - received := formatMetricPoint(testpoint, w) - - if expected != received { - t.Errorf("\nexpected\t%+v\nreceived\t%+v\n", expected, received) - - } -} - // Benchmarks to test performance of string replacement via Regex and Replacer var testString = "this_is*my!test/string\\for=replacement" From acd176cf4255a096dbf39af209877e887bde1c35 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 21 Dec 2018 11:30:31 -0800 Subject: [PATCH 0449/1815] Update changelog --- CHANGELOG.md | 1 + docs/LICENSE_OF_DEPENDENCIES.md | 1 + 2 files changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 906990ff8..39444fcae 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,7 @@ - [#5116](https://github.com/influxdata/telegraf/pull/5116): Include DEVLINKS in available diskio udev properties. - [#5149](https://github.com/influxdata/telegraf/pull/5149): Add micro and nanosecond unix timestamp support to JSON parser. - [#5160](https://github.com/influxdata/telegraf/pull/5160): Add support for basic auth to couchdb input. +- [#5161](https://github.com/influxdata/telegraf/pull/5161): Add support in wavefront output for the Wavefront Direction Ingestion API. ## v1.9.2 [unreleased] diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index 43d096992..df178cae6 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -107,6 +107,7 @@ following works: - github.com/vishvananda/netns [Apache License 2.0](https://github.com/vishvananda/netns/blob/master/LICENSE) - github.com/vjeantet/grok [Apache License 2.0](https://github.com/vjeantet/grok/blob/master/LICENSE) - github.com/vmware/govmomi [Apache License 2.0](https://github.com/vmware/govmomi/blob/master/LICENSE.txt) +- github.com/wavefrontHQ/wavefront-sdk-go [Apache License 2.0](https://github.com/wavefrontHQ/wavefront-sdk-go/blob/master/LICENSE) - github.com/wvanbergen/kafka [MIT License](https://github.com/wvanbergen/kafka/blob/master/LICENSE) - github.com/wvanbergen/kazoo-go [MIT License](https://github.com/wvanbergen/kazoo-go/blob/master/MIT-LICENSE) - github.com/yuin/gopher-lua [MIT License](https://github.com/yuin/gopher-lua/blob/master/LICENSE) From 757132baf41fe3257097645eceb36d6d5456dc63 Mon Sep 17 00:00:00 2001 From: Greg <2653109+glinton@users.noreply.github.com> Date: Wed, 26 Dec 2018 14:15:13 -0700 Subject: [PATCH 0450/1815] Add test for include/exclude filter (#5193) --- filter/filter_test.go | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/filter/filter_test.go b/filter/filter_test.go index 2f52e036a..18ebcd795 100644 --- a/filter/filter_test.go +++ b/filter/filter_test.go @@ -37,6 +37,24 @@ func TestCompile(t *testing.T) { assert.True(t, f.Match("network")) } +func TestIncludeExclude(t *testing.T) { + tags := []string{} + labels := []string{"best", "com_influxdata", "timeseries", "com_influxdata_telegraf", "ever"} + + filter, err := NewIncludeExcludeFilter([]string{}, []string{"com_influx*"}) + if err != nil { + t.Fatalf("Failed to create include/exclude filter - %v", err) + } + + for i := range labels { + if filter.Match(labels[i]) { + tags = append(tags, labels[i]) + } + } + + assert.Equal(t, []string{"best", "timeseries", "ever"}, tags) +} + var benchbool bool func BenchmarkFilterSingleNoGlobFalse(b *testing.B) { From 10a067a6999ced7c2bcbb3272f4d635819ea68b0 Mon Sep 17 00:00:00 2001 From: Nic Grobler Date: Wed, 26 Dec 2018 23:51:31 +0100 Subject: [PATCH 0451/1815] Add PDH_NO_DATA to known counter error codes in win_perf_counters (#5182) --- .../win_perf_counters/win_perf_counters.go | 3 +- .../win_perf_counters_test.go | 119 +++++++++++------- 2 files changed, 79 insertions(+), 43 deletions(-) diff --git a/plugins/inputs/win_perf_counters/win_perf_counters.go b/plugins/inputs/win_perf_counters/win_perf_counters.go index 06a1a333c..2bf50e5cc 100644 --- a/plugins/inputs/win_perf_counters/win_perf_counters.go +++ b/plugins/inputs/win_perf_counters/win_perf_counters.go @@ -394,7 +394,8 @@ func addCounterMeasurement(metric *counter, instanceName string, value float64, func isKnownCounterDataError(err error) bool { if pdhErr, ok := err.(*PdhError); ok && (pdhErr.ErrorCode == PDH_INVALID_DATA || pdhErr.ErrorCode == PDH_CALC_NEGATIVE_VALUE || - pdhErr.ErrorCode == PDH_CSTATUS_INVALID_DATA) { + pdhErr.ErrorCode == PDH_CSTATUS_INVALID_DATA || + pdhErr.ErrorCode == PDH_NO_DATA) { return true } return false diff --git a/plugins/inputs/win_perf_counters/win_perf_counters_test.go b/plugins/inputs/win_perf_counters/win_perf_counters_test.go index 81959ef8c..5052fb7a2 100644 --- a/plugins/inputs/win_perf_counters/win_perf_counters_test.go +++ b/plugins/inputs/win_perf_counters/win_perf_counters_test.go @@ -5,18 +5,20 @@ package win_perf_counters import ( "errors" "fmt" + "testing" + "time" + "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "testing" - "time" ) type testCounter struct { handle PDH_HCOUNTER path string value float64 + status uint32 // allows for tests against specific pdh_error codes, rather than assuming all cases of "value == 0" to indicate error conditions } type FakePerformanceQuery struct { counters map[string]testCounter @@ -99,17 +101,10 @@ func (m *FakePerformanceQuery) GetFormattedCounterValueDouble(counterHandle PDH_ } for _, counter := range m.counters { if counter.handle == counterHandle { - if counter.value > 0 { - return counter.value, nil - } else { - if counter.value == 0 { - return 0, NewPdhError(PDH_INVALID_DATA) - } else if counter.value == -1 { - return 0, NewPdhError(PDH_CALC_NEGATIVE_VALUE) - } else { - return 0, NewPdhError(PDH_ACCESS_DENIED) - } + if counter.status > 0 { + return 0, NewPdhError(counter.status) } + return counter.value, nil } } return 0, fmt.Errorf("GetFormattedCounterValueDouble: invalid handle: %d", counterHandle) @@ -143,17 +138,10 @@ func (m *FakePerformanceQuery) GetFormattedCounterArrayDouble(hCounter PDH_HCOUN for _, p := range e { counter := m.findCounterByPath(p) if counter != nil { - if counter.value > 0 { - counters = append(counters, *counter.ToCounterValue()) - } else { - if counter.value == 0 { - return nil, NewPdhError(PDH_INVALID_DATA) - } else if counter.value == -1 { - return nil, NewPdhError(PDH_CALC_NEGATIVE_VALUE) - } else { - return nil, NewPdhError(PDH_ACCESS_DENIED) - } + if counter.status > 0 { + return nil, NewPdhError(counter.status) } + counters = append(counters, *counter.ToCounterValue()) } else { return nil, fmt.Errorf("GetFormattedCounterArrayDouble: invalid counter : %s", p) } @@ -199,13 +187,14 @@ func createPerfObject(measurement string, object string, instances []string, cou return perfobjects } -func createCounterMap(counterPaths []string, values []float64) map[string]testCounter { +func createCounterMap(counterPaths []string, values []float64, status []uint32) map[string]testCounter { counters := make(map[string]testCounter) for i, cp := range counterPaths { counters[cp] = testCounter{ PDH_HCOUNTER(i), cp, values[i], + status[i], } } return counters @@ -259,7 +248,7 @@ func TestAddItemSimple(t *testing.T) { var err error cps1 := []string{"\\O(I)\\C"} m := Win_PerfCounters{PrintValid: false, Object: nil, query: &FakePerformanceQuery{ - counters: createCounterMap(cps1, []float64{1.1}), + counters: createCounterMap(cps1, []float64{1.1}, []uint32{0}), expandPaths: map[string][]string{ cps1[0]: cps1, }, @@ -277,7 +266,7 @@ func TestAddItemInvalidCountPath(t *testing.T) { var err error cps1 := []string{"\\O\\C"} m := Win_PerfCounters{PrintValid: false, Object: nil, UseWildcardsExpansion: true, query: &FakePerformanceQuery{ - counters: createCounterMap(cps1, []float64{1.1}), + counters: createCounterMap(cps1, []float64{1.1}, []uint32{0}), expandPaths: map[string][]string{ cps1[0]: {"\\O/C"}, }, @@ -296,7 +285,7 @@ func TestParseConfigBasic(t *testing.T) { perfObjects := createPerfObject("m", "O", []string{"I1", "I2"}, []string{"C1", "C2"}, false, false) cps1 := []string{"\\O(I1)\\C1", "\\O(I1)\\C2", "\\O(I2)\\C1", "\\O(I2)\\C2"} m := Win_PerfCounters{PrintValid: false, Object: perfObjects, query: &FakePerformanceQuery{ - counters: createCounterMap(cps1, []float64{1.1, 1.2, 1.3, 1.4}), + counters: createCounterMap(cps1, []float64{1.1, 1.2, 1.3, 1.4}, []uint32{0, 0, 0, 0}), expandPaths: map[string][]string{ cps1[0]: {cps1[0]}, cps1[1]: {cps1[1]}, @@ -330,7 +319,7 @@ func TestParseConfigNoInstance(t *testing.T) { perfObjects := createPerfObject("m", "O", []string{"------"}, []string{"C1", "C2"}, false, false) cps1 := []string{"\\O\\C1", "\\O\\C2"} m := Win_PerfCounters{PrintValid: false, Object: perfObjects, UseWildcardsExpansion: false, query: &FakePerformanceQuery{ - counters: createCounterMap(cps1, []float64{1.1, 1.2}), + counters: createCounterMap(cps1, []float64{1.1, 1.2}, []uint32{0, 0}), expandPaths: map[string][]string{ cps1[0]: {cps1[0]}, cps1[1]: {cps1[1]}, @@ -362,7 +351,7 @@ func TestParseConfigInvalidCounterError(t *testing.T) { perfObjects := createPerfObject("m", "O", []string{"I1", "I2"}, []string{"C1", "C2"}, true, false) cps1 := []string{"\\O(I1)\\C2", "\\O(I2)\\C1", "\\O(I2)\\C2"} m := Win_PerfCounters{PrintValid: false, Object: perfObjects, query: &FakePerformanceQuery{ - counters: createCounterMap(cps1, []float64{1.1, 1.2, 1.3}), + counters: createCounterMap(cps1, []float64{1.1, 1.2, 1.3}, []uint32{0, 0, 0}), expandPaths: map[string][]string{ cps1[0]: {cps1[0]}, cps1[1]: {cps1[1]}, @@ -393,7 +382,7 @@ func TestParseConfigInvalidCounterNoError(t *testing.T) { perfObjects := createPerfObject("m", "O", []string{"I1", "I2"}, []string{"C1", "C2"}, false, false) cps1 := []string{"\\O(I1)\\C2", "\\O(I2)\\C1", "\\O(I2)\\C2"} m := Win_PerfCounters{PrintValid: false, Object: perfObjects, query: &FakePerformanceQuery{ - counters: createCounterMap(cps1, []float64{1.1, 1.2, 1.3}), + counters: createCounterMap(cps1, []float64{1.1, 1.2, 1.3}, []uint32{0, 0, 0}), expandPaths: map[string][]string{ cps1[0]: {cps1[0]}, cps1[1]: {cps1[1]}, @@ -425,7 +414,7 @@ func TestParseConfigTotalExpansion(t *testing.T) { perfObjects := createPerfObject("m", "O", []string{"*"}, []string{"*"}, true, true) cps1 := []string{"\\O(I1)\\C1", "\\O(I1)\\C2", "\\O(_Total)\\C1", "\\O(_Total)\\C2"} m := Win_PerfCounters{PrintValid: false, UseWildcardsExpansion: true, Object: perfObjects, query: &FakePerformanceQuery{ - counters: createCounterMap(append(cps1, "\\O(*)\\*"), []float64{1.1, 1.2, 1.3, 1.4, 0}), + counters: createCounterMap(append(cps1, "\\O(*)\\*"), []float64{1.1, 1.2, 1.3, 1.4, 0}, []uint32{0, 0, 0, 0, 0}), expandPaths: map[string][]string{ "\\O(*)\\*": cps1, }, @@ -442,7 +431,7 @@ func TestParseConfigTotalExpansion(t *testing.T) { perfObjects[0].IncludeTotal = false m = Win_PerfCounters{PrintValid: false, UseWildcardsExpansion: true, Object: perfObjects, query: &FakePerformanceQuery{ - counters: createCounterMap(append(cps1, "\\O(*)\\*"), []float64{1.1, 1.2, 1.3, 1.4, 0}), + counters: createCounterMap(append(cps1, "\\O(*)\\*"), []float64{1.1, 1.2, 1.3, 1.4, 0}, []uint32{0, 0, 0, 0, 0}), expandPaths: map[string][]string{ "\\O(*)\\*": cps1, }, @@ -462,7 +451,7 @@ func TestParseConfigExpand(t *testing.T) { perfObjects := createPerfObject("m", "O", []string{"*"}, []string{"*"}, false, false) cps1 := []string{"\\O(I1)\\C1", "\\O(I1)\\C2", "\\O(I2)\\C1", "\\O(I2)\\C2"} m := Win_PerfCounters{PrintValid: false, UseWildcardsExpansion: true, Object: perfObjects, query: &FakePerformanceQuery{ - counters: createCounterMap(append(cps1, "\\O(*)\\*"), []float64{1.1, 1.2, 1.3, 1.4, 0}), + counters: createCounterMap(append(cps1, "\\O(*)\\*"), []float64{1.1, 1.2, 1.3, 1.4, 0}, []uint32{0, 0, 0, 0, 0}), expandPaths: map[string][]string{ "\\O(*)\\*": cps1, }, @@ -486,7 +475,7 @@ func TestSimpleGather(t *testing.T) { perfObjects := createPerfObject(measurement, "O", []string{"I"}, []string{"C"}, false, false) cp1 := "\\O(I)\\C" m := Win_PerfCounters{PrintValid: false, Object: perfObjects, query: &FakePerformanceQuery{ - counters: createCounterMap([]string{cp1}, []float64{1.2}), + counters: createCounterMap([]string{cp1}, []float64{1.2}, []uint32{0}), expandPaths: map[string][]string{ cp1: {cp1}, }, @@ -516,6 +505,48 @@ func TestSimpleGather(t *testing.T) { acc1.AssertContainsTaggedFields(t, measurement, fields1, tags1) } +func TestSimpleGatherNoData(t *testing.T) { + var err error + if testing.Short() { + t.Skip("Skipping long taking test in short mode") + } + measurement := "test" + perfObjects := createPerfObject(measurement, "O", []string{"I"}, []string{"C"}, false, false) + cp1 := "\\O(I)\\C" + m := Win_PerfCounters{PrintValid: false, Object: perfObjects, query: &FakePerformanceQuery{ + counters: createCounterMap([]string{cp1}, []float64{1.2}, []uint32{PDH_NO_DATA}), + expandPaths: map[string][]string{ + cp1: {cp1}, + }, + vistaAndNewer: false, + }} + var acc1 testutil.Accumulator + err = m.Gather(&acc1) + // this "PDH_NO_DATA" error should not be returned to caller, but checked, and handled + require.NoError(t, err) + + // fields would contain if the error was ignored, and we simply added garbage + fields1 := map[string]interface{}{ + "C": float32(1.2), + } + // tags would contain if the error was ignored, and we simply added garbage + tags1 := map[string]string{ + "instance": "I", + "objectname": "O", + } + acc1.AssertDoesNotContainsTaggedFields(t, measurement, fields1, tags1) + + m.UseWildcardsExpansion = true + m.counters = nil + m.lastRefreshed = time.Time{} + + var acc2 testutil.Accumulator + + err = m.Gather(&acc2) + require.NoError(t, err) + acc1.AssertDoesNotContainsTaggedFields(t, measurement, fields1, tags1) +} + func TestSimpleGatherWithTimestamp(t *testing.T) { var err error if testing.Short() { @@ -525,7 +556,7 @@ func TestSimpleGatherWithTimestamp(t *testing.T) { perfObjects := createPerfObject(measurement, "O", []string{"I"}, []string{"C"}, false, false) cp1 := "\\O(I)\\C" m := Win_PerfCounters{PrintValid: false, UsePerfCounterTime: true, Object: perfObjects, query: &FakePerformanceQuery{ - counters: createCounterMap([]string{cp1}, []float64{1.2}), + counters: createCounterMap([]string{cp1}, []float64{1.2}, []uint32{0}), expandPaths: map[string][]string{ cp1: {cp1}, }, @@ -548,6 +579,7 @@ func TestSimpleGatherWithTimestamp(t *testing.T) { func TestGatherError(t *testing.T) { var err error + expected_error := "error while getting value for counter \\O(I)\\C: The information passed is not valid.\r\n" if testing.Short() { t.Skip("Skipping long taking test in short mode") } @@ -555,7 +587,7 @@ func TestGatherError(t *testing.T) { perfObjects := createPerfObject(measurement, "O", []string{"I"}, []string{"C"}, false, false) cp1 := "\\O(I)\\C" m := Win_PerfCounters{PrintValid: false, Object: perfObjects, query: &FakePerformanceQuery{ - counters: createCounterMap([]string{cp1}, []float64{-2}), + counters: createCounterMap([]string{cp1}, []float64{-2}, []uint32{PDH_PLA_VALIDATION_WARNING}), expandPaths: map[string][]string{ cp1: {cp1}, }, @@ -564,6 +596,7 @@ func TestGatherError(t *testing.T) { var acc1 testutil.Accumulator err = m.Gather(&acc1) require.Error(t, err) + require.Equal(t, expected_error, err.Error()) m.UseWildcardsExpansion = true m.counters = nil @@ -573,6 +606,7 @@ func TestGatherError(t *testing.T) { err = m.Gather(&acc2) require.Error(t, err) + require.Equal(t, expected_error, err.Error()) } func TestGatherInvalidDataIgnore(t *testing.T) { @@ -584,7 +618,7 @@ func TestGatherInvalidDataIgnore(t *testing.T) { perfObjects := createPerfObject(measurement, "O", []string{"I"}, []string{"C1", "C2", "C3"}, false, false) cps1 := []string{"\\O(I)\\C1", "\\O(I)\\C2", "\\O(I)\\C3"} m := Win_PerfCounters{PrintValid: false, Object: perfObjects, query: &FakePerformanceQuery{ - counters: createCounterMap(cps1, []float64{1.2, -1, 0}), + counters: createCounterMap(cps1, []float64{1.2, 1, 0}, []uint32{0, PDH_INVALID_DATA, 0}), expandPaths: map[string][]string{ cps1[0]: {cps1[0]}, cps1[1]: {cps1[1]}, @@ -598,6 +632,7 @@ func TestGatherInvalidDataIgnore(t *testing.T) { fields1 := map[string]interface{}{ "C1": float32(1.2), + "C3": float32(0), } tags1 := map[string]string{ "instance": "I", @@ -625,7 +660,7 @@ func TestGatherRefreshingWithExpansion(t *testing.T) { perfObjects := createPerfObject(measurement, "O", []string{"*"}, []string{"*"}, true, false) cps1 := []string{"\\O(I1)\\C1", "\\O(I1)\\C2", "\\O(I2)\\C1", "\\O(I2)\\C2"} fpm := &FakePerformanceQuery{ - counters: createCounterMap(append(cps1, "\\O(*)\\*"), []float64{1.1, 1.2, 1.3, 1.4, 0}), + counters: createCounterMap(append(cps1, "\\O(*)\\*"), []float64{1.1, 1.2, 1.3, 1.4, 0}, []uint32{0, 0, 0, 0, 0}), expandPaths: map[string][]string{ "\\O(*)\\*": cps1, }, @@ -659,7 +694,7 @@ func TestGatherRefreshingWithExpansion(t *testing.T) { acc1.AssertContainsTaggedFields(t, measurement, fields2, tags2) cps2 := []string{"\\O(I1)\\C1", "\\O(I1)\\C2", "\\O(I2)\\C1", "\\O(I2)\\C2", "\\O(I3)\\C1", "\\O(I3)\\C2"} fpm = &FakePerformanceQuery{ - counters: createCounterMap(append(cps2, "\\O(*)\\*"), []float64{1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 0}), + counters: createCounterMap(append(cps2, "\\O(*)\\*"), []float64{1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 0}, []uint32{0, 0, 0, 0, 0, 0, 0}), expandPaths: map[string][]string{ "\\O(*)\\*": cps2, }, @@ -710,7 +745,7 @@ func TestGatherRefreshingWithoutExpansion(t *testing.T) { perfObjects := createPerfObject(measurement, "O", []string{"*"}, []string{"C1", "C2"}, true, false) cps1 := []string{"\\O(I1)\\C1", "\\O(I1)\\C2", "\\O(I2)\\C1", "\\O(I2)\\C2"} fpm := &FakePerformanceQuery{ - counters: createCounterMap(append([]string{"\\O(*)\\C1", "\\O(*)\\C2"}, cps1...), []float64{0, 0, 1.1, 1.2, 1.3, 1.4}), + counters: createCounterMap(append([]string{"\\O(*)\\C1", "\\O(*)\\C2"}, cps1...), []float64{0, 0, 1.1, 1.2, 1.3, 1.4}, []uint32{0, 0, 0, 0, 0, 0}), expandPaths: map[string][]string{ "\\O(*)\\C1": {cps1[0], cps1[2]}, "\\O(*)\\C2": {cps1[1], cps1[3]}, @@ -746,7 +781,7 @@ func TestGatherRefreshingWithoutExpansion(t *testing.T) { //test finding new instance cps2 := []string{"\\O(I1)\\C1", "\\O(I1)\\C2", "\\O(I2)\\C1", "\\O(I2)\\C2", "\\O(I3)\\C1", "\\O(I3)\\C2"} fpm = &FakePerformanceQuery{ - counters: createCounterMap(append([]string{"\\O(*)\\C1", "\\O(*)\\C2"}, cps2...), []float64{0, 0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6}), + counters: createCounterMap(append([]string{"\\O(*)\\C1", "\\O(*)\\C2"}, cps2...), []float64{0, 0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6}, []uint32{0, 0, 0, 0, 0, 0, 0, 0}), expandPaths: map[string][]string{ "\\O(*)\\C1": {cps2[0], cps2[2], cps2[4]}, "\\O(*)\\C2": {cps2[1], cps2[3], cps2[5]}, @@ -779,7 +814,7 @@ func TestGatherRefreshingWithoutExpansion(t *testing.T) { perfObjects = createPerfObject(measurement, "O", []string{"*"}, []string{"C1", "C2", "C3"}, true, false) cps3 := []string{"\\O(I1)\\C1", "\\O(I1)\\C2", "\\O(I1)\\C3", "\\O(I2)\\C1", "\\O(I2)\\C2", "\\O(I2)\\C3"} fpm = &FakePerformanceQuery{ - counters: createCounterMap(append([]string{"\\O(*)\\C1", "\\O(*)\\C2", "\\O(*)\\C3"}, cps3...), []float64{0, 0, 0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6}), + counters: createCounterMap(append([]string{"\\O(*)\\C1", "\\O(*)\\C2", "\\O(*)\\C3"}, cps3...), []float64{0, 0, 0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6}, []uint32{0, 0, 0, 0, 0, 0, 0, 0, 0}), expandPaths: map[string][]string{ "\\O(*)\\C1": {cps3[0], cps3[3]}, "\\O(*)\\C2": {cps3[1], cps3[4]}, @@ -828,7 +863,7 @@ func TestGatherTotalNoExpansion(t *testing.T) { perfObjects := createPerfObject(measurement, "O", []string{"*"}, []string{"C1", "C2"}, true, true) cps1 := []string{"\\O(I1)\\C1", "\\O(I1)\\C2", "\\O(_Total)\\C1", "\\O(_Total)\\C2"} m := Win_PerfCounters{PrintValid: false, UseWildcardsExpansion: false, Object: perfObjects, query: &FakePerformanceQuery{ - counters: createCounterMap(append([]string{"\\O(*)\\C1", "\\O(*)\\C2"}, cps1...), []float64{0, 0, 1.1, 1.2, 1.3, 1.4}), + counters: createCounterMap(append([]string{"\\O(*)\\C1", "\\O(*)\\C2"}, cps1...), []float64{0, 0, 1.1, 1.2, 1.3, 1.4}, []uint32{0, 0, 0, 0, 0, 0}), expandPaths: map[string][]string{ "\\O(*)\\C1": {cps1[0], cps1[2]}, "\\O(*)\\C2": {cps1[1], cps1[3]}, From b1baa54cc482f89fe7f14775553dbd20d6039e0e Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 26 Dec 2018 14:58:19 -0800 Subject: [PATCH 0452/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 39444fcae..26ea19356 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -24,6 +24,7 @@ - [#4482](https://github.com/influxdata/telegraf/issues/4482): Support non-transparent framing of syslog messages. - [#5151](https://github.com/influxdata/telegraf/issues/5151): Apply global and plugin level metric modifications before filtering. - [#5167](https://github.com/influxdata/telegraf/pull/5167): Fix num_remapped_pgs field in ceph plugin. +- [#5179](https://github.com/influxdata/telegraf/issues/5179): Add PDH_NO_DATA to known counter error codes in win_perf_counters. ## v1.9.1 [2018-12-11] From c12eecc90e70001a7cdc6324d7018754937c1563 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 26 Dec 2018 18:54:50 -0800 Subject: [PATCH 0453/1815] Signal telegraf process until it exits (#5169) --- agent/agent.go | 1 + scripts/init.sh | 16 +++++++++++----- 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/agent/agent.go b/agent/agent.go index 40ec24456..ec9aa7f32 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -134,6 +134,7 @@ func (a *Agent) Run(ctx context.Context) error { return err } + log.Printf("D! [agent] Stopped Successfully") return nil } diff --git a/scripts/init.sh b/scripts/init.sh index 668dc549e..67236d8c7 100755 --- a/scripts/init.sh +++ b/scripts/init.sh @@ -152,11 +152,17 @@ case $1 in if [ -e $pidfile ]; then pidofproc -p $pidfile $daemon > /dev/null 2>&1 && status="0" || status="$?" if [ "$status" = 0 ]; then - if killproc -p $pidfile SIGTERM && /bin/rm -rf $pidfile; then - log_success_msg "$name process was stopped" - else - log_failure_msg "$name failed to stop service" - fi + # periodically signal until process exists + while true; do + if ! pidofproc -p $pidfile $daemon > /dev/null; then + break + fi + killproc -p $pidfile SIGTERM 2>&1 >/dev/null + sleep 2 + done + + log_success_msg "$name process was stopped" + rm -f $pidfile fi else log_failure_msg "$name process is not running" From 72089042bedc8d21241694b38a4838d7ccaf7e99 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 26 Dec 2018 19:10:18 -0800 Subject: [PATCH 0454/1815] Update changelog --- CHANGELOG.md | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 26ea19356..62fbd1066 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,9 +13,13 @@ - [#5160](https://github.com/influxdata/telegraf/pull/5160): Add support for basic auth to couchdb input. - [#5161](https://github.com/influxdata/telegraf/pull/5161): Add support in wavefront output for the Wavefront Direction Ingestion API. +#### Bugfixes + +- [#4610](https://github.com/influxdata/telegraf/pull/4610): Fix initscript removes pidfile of restarted Telegraf process. + ## v1.9.2 [unreleased] -### Bugfixes +#### Bugfixes - [#5130](https://github.com/influxdata/telegraf/pull/5130): Increase varnishstat timeout. - [#5135](https://github.com/influxdata/telegraf/pull/5135): Remove storage calculation for non Azure managed instances and add server version. @@ -28,7 +32,7 @@ ## v1.9.1 [2018-12-11] -### Bugfixes +#### Bugfixes - [#5006](https://github.com/influxdata/telegraf/issues/5006): Fix boolean handling in splunkmetric serializer. - [#5046](https://github.com/influxdata/telegraf/issues/5046): Set default config values in jenkins input. From 7497a2027baf59ff1ee3655745f141fbc4f559ba Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 26 Dec 2018 19:36:10 -0800 Subject: [PATCH 0455/1815] Deliver empty metric tracking group immediately (#5176) --- agent/accumulator_test.go | 16 +++++++++++++++- metric/tracking.go | 28 ++++++++++++++++++---------- 2 files changed, 33 insertions(+), 11 deletions(-) diff --git a/agent/accumulator_test.go b/agent/accumulator_test.go index 2bb08920f..316ad124b 100644 --- a/agent/accumulator_test.go +++ b/agent/accumulator_test.go @@ -9,7 +9,6 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -127,6 +126,21 @@ func TestSetPrecision(t *testing.T) { } } +func TestAddTrackingMetricGroupEmpty(t *testing.T) { + ch := make(chan telegraf.Metric, 10) + metrics := []telegraf.Metric{} + acc := NewAccumulator(&TestMetricMaker{}, ch).WithTracking(1) + + id := acc.AddTrackingMetricGroup(metrics) + + select { + case tracking := <-acc.Delivered(): + require.Equal(t, tracking.ID(), id) + default: + t.Fatal("empty group should be delivered immediately") + } +} + type TestMetricMaker struct { } diff --git a/metric/tracking.go b/metric/tracking.go index 83c3c7aec..3d8843240 100644 --- a/metric/tracking.go +++ b/metric/tracking.go @@ -50,7 +50,7 @@ type trackingData struct { rc int32 acceptCount int32 rejectCount int32 - notify NotifyFunc + notifyFunc NotifyFunc } func (d *trackingData) incr() { @@ -69,6 +69,16 @@ func (d *trackingData) reject() { atomic.AddInt32(&d.rejectCount, 1) } +func (d *trackingData) notify() { + d.notifyFunc( + &deliveryInfo{ + id: d.id, + accepted: int(d.acceptCount), + rejected: int(d.rejectCount), + }, + ) +} + type trackingMetric struct { telegraf.Metric d *trackingData @@ -82,7 +92,7 @@ func newTrackingMetric(metric telegraf.Metric, fn NotifyFunc) (telegraf.Metric, rc: 1, acceptCount: 0, rejectCount: 0, - notify: fn, + notifyFunc: fn, }, } @@ -98,7 +108,7 @@ func newTrackingMetricGroup(group []telegraf.Metric, fn NotifyFunc) ([]telegraf. rc: 0, acceptCount: 0, rejectCount: 0, - notify: fn, + notifyFunc: fn, } for i, m := range group { @@ -114,6 +124,10 @@ func newTrackingMetricGroup(group []telegraf.Metric, fn NotifyFunc) ([]telegraf. runtime.SetFinalizer(d, finalizer) } + if len(group) == 0 { + d.notify() + } + return group, d.id } @@ -146,13 +160,7 @@ func (m *trackingMetric) decr() { } if v == 0 { - m.d.notify( - &deliveryInfo{ - id: m.d.id, - accepted: int(m.d.acceptCount), - rejected: int(m.d.rejectCount), - }, - ) + m.d.notify() } } From c72d8a16632d1d70e2606ac1f16c675669a18681 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 26 Dec 2018 19:38:04 -0800 Subject: [PATCH 0456/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 62fbd1066..12fb970e5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -29,6 +29,7 @@ - [#5151](https://github.com/influxdata/telegraf/issues/5151): Apply global and plugin level metric modifications before filtering. - [#5167](https://github.com/influxdata/telegraf/pull/5167): Fix num_remapped_pgs field in ceph plugin. - [#5179](https://github.com/influxdata/telegraf/issues/5179): Add PDH_NO_DATA to known counter error codes in win_perf_counters. +- [#5170](https://github.com/influxdata/telegraf/issues/5170): Fix amqp_consumer stops consuming on empty message. ## v1.9.1 [2018-12-11] From dbe6f594a94126f70e3c3326f3cddb2468b2aeb8 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 26 Dec 2018 19:39:34 -0800 Subject: [PATCH 0457/1815] Allow floats in valuecounter aggregator (#5168) --- plugins/aggregators/valuecounter/README.md | 5 +++-- plugins/aggregators/valuecounter/valuecounter.go | 9 --------- plugins/aggregators/valuecounter/valuecounter_test.go | 5 ++--- 3 files changed, 5 insertions(+), 14 deletions(-) diff --git a/plugins/aggregators/valuecounter/README.md b/plugins/aggregators/valuecounter/README.md index 3d132c3bb..ef68e0f4e 100644 --- a/plugins/aggregators/valuecounter/README.md +++ b/plugins/aggregators/valuecounter/README.md @@ -11,8 +11,9 @@ configuration directive. When no `fields` is provided the plugin will not count any fields. The results are emitted in fields in the format: `originalfieldname_fieldvalue = count`. -Valuecounter only works on fields of the type int, bool or string. Float fields -are being dropped to prevent the creating of too many fields. +Counting fields with a high number of potential values may produce significant +amounts of new fields and memory usage, take care to only count fields with a +limited set of values. ### Configuration: diff --git a/plugins/aggregators/valuecounter/valuecounter.go b/plugins/aggregators/valuecounter/valuecounter.go index 05f4945d2..a25c9dcaf 100644 --- a/plugins/aggregators/valuecounter/valuecounter.go +++ b/plugins/aggregators/valuecounter/valuecounter.go @@ -2,7 +2,6 @@ package valuecounter import ( "fmt" - "log" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/aggregators" @@ -68,14 +67,6 @@ func (vc *ValueCounter) Add(in telegraf.Metric) { for fk, fv := range in.Fields() { for _, cf := range vc.Fields { if fk == cf { - // Do not process float types to prevent memory from blowing up - switch fv.(type) { - default: - log.Printf("I! Valuecounter: Unsupported field type. " + - "Must be an int, string or bool. Ignoring.") - continue - case uint64, int64, string, bool: - } fn := fmt.Sprintf("%v_%v", fk, fv) vc.cache[id].fieldCount[fn]++ } diff --git a/plugins/aggregators/valuecounter/valuecounter_test.go b/plugins/aggregators/valuecounter/valuecounter_test.go index 01c68c496..8cec5f366 100644 --- a/plugins/aggregators/valuecounter/valuecounter_test.go +++ b/plugins/aggregators/valuecounter/valuecounter_test.go @@ -22,9 +22,8 @@ func NewTestValueCounter(fields []string) telegraf.Aggregator { var m1, _ = metric.New("m1", map[string]string{"foo": "bar"}, map[string]interface{}{ - "status": 200, - "somefield": 20.1, - "foobar": "bar", + "status": 200, + "foobar": "bar", }, time.Now(), ) From 9a0861f7e2ca6d2774d283b276ebc33ee929214b Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 26 Dec 2018 19:40:18 -0800 Subject: [PATCH 0458/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 12fb970e5..da01dd054 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,6 +12,7 @@ - [#5149](https://github.com/influxdata/telegraf/pull/5149): Add micro and nanosecond unix timestamp support to JSON parser. - [#5160](https://github.com/influxdata/telegraf/pull/5160): Add support for basic auth to couchdb input. - [#5161](https://github.com/influxdata/telegraf/pull/5161): Add support in wavefront output for the Wavefront Direction Ingestion API. +- [#5168](https://github.com/influxdata/telegraf/pull/5168): Allow counting float values in valuecounter aggregator. #### Bugfixes From 3fbfe3acd2f4b55567c601c27640ae73e57b280a Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 27 Dec 2018 13:08:19 -0800 Subject: [PATCH 0459/1815] Fix usage of loop variable in function closure (#5195) --- plugins/processors/strings/strings.go | 20 ++++++--- plugins/processors/strings/strings_test.go | 51 ++++++++++++++-------- 2 files changed, 45 insertions(+), 26 deletions(-) diff --git a/plugins/processors/strings/strings.go b/plugins/processors/strings/strings.go index 69e89f025..00c7d99b1 100644 --- a/plugins/processors/strings/strings.go +++ b/plugins/processors/strings/strings.go @@ -72,8 +72,8 @@ const sampleConfig = ` # field = "read_count" # suffix = "_count" - ## Replace substrings within field names - # [[processors.strings.trim_suffix]] + ## Replace all non-overlapping instances of old with new + # [[processors.strings.replace]] # measurement = "*" # old = ":" # new = "_" @@ -100,8 +100,8 @@ func (c *converter) convertTag(metric telegraf.Metric) { tags[c.Tag] = tv } - for tag, value := range tags { - dest := tag + for key, value := range tags { + dest := key if c.Tag != "*" && c.Dest != "" { dest = c.Dest } @@ -122,9 +122,9 @@ func (c *converter) convertField(metric telegraf.Metric) { fields[c.Field] = fv } - for tag, value := range fields { - dest := tag - if c.Tag != "*" && c.Dest != "" { + for key, value := range fields { + dest := key + if c.Field != "*" && c.Dest != "" { dest = c.Dest } if fv, ok := value.(string); ok { @@ -170,6 +170,7 @@ func (s *Strings) initOnce() { s.converters = append(s.converters, c) } for _, c := range s.Trim { + c := c if c.Cutset != "" { c.fn = func(s string) string { return strings.Trim(s, c.Cutset) } } else { @@ -178,6 +179,7 @@ func (s *Strings) initOnce() { s.converters = append(s.converters, c) } for _, c := range s.TrimLeft { + c := c if c.Cutset != "" { c.fn = func(s string) string { return strings.TrimLeft(s, c.Cutset) } } else { @@ -186,6 +188,7 @@ func (s *Strings) initOnce() { s.converters = append(s.converters, c) } for _, c := range s.TrimRight { + c := c if c.Cutset != "" { c.fn = func(s string) string { return strings.TrimRight(s, c.Cutset) } } else { @@ -194,14 +197,17 @@ func (s *Strings) initOnce() { s.converters = append(s.converters, c) } for _, c := range s.TrimPrefix { + c := c c.fn = func(s string) string { return strings.TrimPrefix(s, c.Prefix) } s.converters = append(s.converters, c) } for _, c := range s.TrimSuffix { + c := c c.fn = func(s string) string { return strings.TrimSuffix(s, c.Suffix) } s.converters = append(s.converters, c) } for _, c := range s.Replace { + c := c c.fn = func(s string) string { newString := strings.Replace(s, c.Old, c.New, -1) if newString == "" { diff --git a/plugins/processors/strings/strings_test.go b/plugins/processors/strings/strings_test.go index f1bf93419..e108c04f7 100644 --- a/plugins/processors/strings/strings_test.go +++ b/plugins/processors/strings/strings_test.go @@ -25,24 +25,6 @@ func newM1() telegraf.Metric { return m1 } -func newM2() telegraf.Metric { - m2, _ := metric.New("IIS_log", - map[string]string{ - "verb": "GET", - "resp_code": "200", - "s-computername": "MIXEDCASE_hostname", - }, - map[string]interface{}{ - "request": "/mixed/CASE/paTH/?from=-1D&to=now", - "cs-host": "AAAbbb", - "ignore_number": int64(200), - "ignore_bool": true, - }, - time.Now(), - ) - return m2 -} - func TestFieldConversions(t *testing.T) { tests := []struct { name string @@ -404,9 +386,38 @@ func TestMultipleConversions(t *testing.T) { Tag: "verb", }, }, + Replace: []converter{ + { + Tag: "foo", + Old: "a", + New: "x", + }, + { + Tag: "bar", + Old: "b", + New: "y", + }, + }, } - processed := plugin.Apply(newM2()) + m, _ := metric.New("IIS_log", + map[string]string{ + "verb": "GET", + "resp_code": "200", + "s-computername": "MIXEDCASE_hostname", + "foo": "a", + "bar": "b", + }, + map[string]interface{}{ + "request": "/mixed/CASE/paTH/?from=-1D&to=now", + "cs-host": "AAAbbb", + "ignore_number": int64(200), + "ignore_bool": true, + }, + time.Now(), + ) + + processed := plugin.Apply(m) expectedFields := map[string]interface{}{ "request": "/mixed/case/path/?from=-1d&to=now", @@ -419,6 +430,8 @@ func TestMultipleConversions(t *testing.T) { "verb": "GET", "resp_code": "200", "s-computername": "mixedcase_hostname", + "foo": "x", + "bar": "y", } assert.Equal(t, expectedFields, processed[0].Fields()) From 219fad60a6086d9c889e916922f2090488bbabdf Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 27 Dec 2018 13:10:17 -0800 Subject: [PATCH 0460/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index da01dd054..5bd6dff63 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -31,6 +31,7 @@ - [#5167](https://github.com/influxdata/telegraf/pull/5167): Fix num_remapped_pgs field in ceph plugin. - [#5179](https://github.com/influxdata/telegraf/issues/5179): Add PDH_NO_DATA to known counter error codes in win_perf_counters. - [#5170](https://github.com/influxdata/telegraf/issues/5170): Fix amqp_consumer stops consuming on empty message. +- [#4906](https://github.com/influxdata/telegraf/issues/4906): Fix multiple replace tables not working in strings processor. ## v1.9.1 [2018-12-11] From 2ff3683b50515ef18162d5856db5afad49b83aae Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 27 Dec 2018 13:12:00 -0800 Subject: [PATCH 0461/1815] Improve config file environment variable documentation (#5200) --- docs/CONFIGURATION.md | 40 ++++++++++++++++++++++++++++++++++++++-- 1 file changed, 38 insertions(+), 2 deletions(-) diff --git a/docs/CONFIGURATION.md b/docs/CONFIGURATION.md index 6bb6c2e09..0b2c27156 100644 --- a/docs/CONFIGURATION.md +++ b/docs/CONFIGURATION.md @@ -24,12 +24,48 @@ telegraf --input-filter cpu:mem:net:swap --output-filter influxdb:kafka config ### Environment Variables Environment variables can be used anywhere in the config file, simply prepend -them with $. For strings the variable must be within quotes (ie, "$STR_VAR"), -for numbers and booleans they should be plain (ie, $INT_VAR, $BOOL_VAR) +them with `$`. Replacement occurs before file parsing. For strings +the variable must be within quotes, e.g., `"$STR_VAR"`, for numbers and booleans +they should be unquoted, e.g., `$INT_VAR`, `$BOOL_VAR`. When using the `.deb` or `.rpm` packages, you can define environment variables in the `/etc/default/telegraf` file. +**Example**: + +`/etc/default/telegraf`: +``` +USER="alice" +INFLUX_URL="http://localhost:8086" +INFLUX_SKIP_DATABASE_CREATION="true" +INFLUX_PASSWORD="monkey123" +``` + +`/etc/telegraf.conf`: +```toml +[global_tags] + user = "$USER" + +[[inputs.mem]] + +[[outputs.influxdb]] + urls = ["$INFLUX_URL"] + skip_database_creation = $INFLUX_SKIP_DATABASE_CREATION + password = "$INFLUX_PASSWORD" +``` + +The above files will produce the following effective configuration file to be +parsed: +```toml +[global_tags] + user = "alice" + +[[outputs.influxdb]] + urls = "http://localhost:8086" + skip_database_creation = true + password = "monkey123" +``` + ### Configuration file locations The location of the configuration file can be set via the `--config` command From cb9bacfedea67be9f0e53adf6871e976e2379b4b Mon Sep 17 00:00:00 2001 From: BoheeChoi1 <46016119+BoheeChoi1@users.noreply.github.com> Date: Fri, 28 Dec 2018 06:18:29 +0900 Subject: [PATCH 0462/1815] Add forwarded records to sqlserver input (#5177) --- plugins/inputs/sqlserver/sqlserver.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/plugins/inputs/sqlserver/sqlserver.go b/plugins/inputs/sqlserver/sqlserver.go index d6aa231f1..8f36255b4 100644 --- a/plugins/inputs/sqlserver/sqlserver.go +++ b/plugins/inputs/sqlserver/sqlserver.go @@ -559,7 +559,9 @@ WHERE ( 'Used memory (KB)', 'Forwarded Records/sec', 'Background Writer pages/sec', - 'Percent Log Used' + 'Percent Log Used', + 'Log Send Queue KB', + 'Redo Queue KB' ) ) OR ( object_name LIKE '%User Settable%' From 60cbdcb416cba88e3ab8fd07da9969ff5dab44d0 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 27 Dec 2018 13:20:10 -0800 Subject: [PATCH 0463/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5bd6dff63..2f5d3b9fc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,6 +13,7 @@ - [#5160](https://github.com/influxdata/telegraf/pull/5160): Add support for basic auth to couchdb input. - [#5161](https://github.com/influxdata/telegraf/pull/5161): Add support in wavefront output for the Wavefront Direction Ingestion API. - [#5168](https://github.com/influxdata/telegraf/pull/5168): Allow counting float values in valuecounter aggregator. +- [#5177](https://github.com/influxdata/telegraf/pull/5177): Add log send and redo queue fields to sqlserver input. #### Bugfixes From 1d6ff4fe4c3b535da56565a3271aece463123794 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 28 Dec 2018 13:02:16 -0800 Subject: [PATCH 0464/1815] Add link to CodeStyle wiki page in plugin guides --- docs/AGGREGATORS.md | 2 ++ docs/INPUTS.md | 2 ++ docs/OUTPUTS.md | 2 ++ docs/PROCESSORS.md | 2 ++ 4 files changed, 8 insertions(+) diff --git a/docs/AGGREGATORS.md b/docs/AGGREGATORS.md index d0e926718..eee5b1de5 100644 --- a/docs/AGGREGATORS.md +++ b/docs/AGGREGATORS.md @@ -17,6 +17,7 @@ This section is for developers who want to create a new aggregator plugin. through it. This should be done using the builtin `HashID()` function of each metric. * When the `Reset()` function is called, all caches should be cleared. +- Follow the recommended [CodeStyle][]. ### Aggregator Plugin Example @@ -124,3 +125,4 @@ func init() { [telegraf.Aggregator]: https://godoc.org/github.com/influxdata/telegraf#Aggregator [SampleConfig]: https://github.com/influxdata/telegraf/wiki/SampleConfig +[CodeStyle]: https://github.com/influxdata/telegraf/wiki/CodeStyle diff --git a/docs/INPUTS.md b/docs/INPUTS.md index b1b196398..32eb9b9f5 100644 --- a/docs/INPUTS.md +++ b/docs/INPUTS.md @@ -20,6 +20,7 @@ and submit new inputs. consult the [SampleConfig][] page for the latest style guidelines. - The `Description` function should say in one line what this plugin does. +- Follow the recommended [CodeStyle][]. Let's say you've written a plugin that emits metrics about processes on the current host. @@ -137,6 +138,7 @@ Check the [amqp_consumer][] for an example implementation. [prom metric types]: https://prometheus.io/docs/concepts/metric_types/ [input data formats]: https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md [SampleConfig]: https://github.com/influxdata/telegraf/wiki/SampleConfig +[CodeStyle]: https://github.com/influxdata/telegraf/wiki/CodeStyle [telegraf.Input]: https://godoc.org/github.com/influxdata/telegraf#Input [telegraf.ServiceInput]: https://godoc.org/github.com/influxdata/telegraf#ServiceInput [telegraf.Accumulator]: https://godoc.org/github.com/influxdata/telegraf#Accumulator diff --git a/docs/OUTPUTS.md b/docs/OUTPUTS.md index cfa8083b4..306b9ea6f 100644 --- a/docs/OUTPUTS.md +++ b/docs/OUTPUTS.md @@ -15,6 +15,7 @@ similar constructs. plugin can be configured. This is included in `telegraf config`. Please consult the [SampleConfig][] page for the latest style guidelines. - The `Description` function should say in one line what this output does. +- Follow the recommended [CodeStyle][]. ### Output Plugin Example @@ -92,4 +93,5 @@ You should also add the following to your `SampleConfig()`: [file]: https://github.com/influxdata/telegraf/tree/master/plugins/inputs/file [output data formats]: https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md [SampleConfig]: https://github.com/influxdata/telegraf/wiki/SampleConfig +[CodeStyle]: https://github.com/influxdata/telegraf/wiki/CodeStyle [telegraf.Output]: https://godoc.org/github.com/influxdata/telegraf#Output diff --git a/docs/PROCESSORS.md b/docs/PROCESSORS.md index e1fa182ca..4f18b2d55 100644 --- a/docs/PROCESSORS.md +++ b/docs/PROCESSORS.md @@ -16,6 +16,7 @@ This section is for developers who want to create a new processor plugin. plugin can be configured. This is included in `telegraf config`. Please consult the [SampleConfig][] page for the latest style guidelines. * The `Description` function should say in one line what this processor does. +- Follow the recommended [CodeStyle][]. ### Processor Plugin Example @@ -60,4 +61,5 @@ func init() { ``` [SampleConfig]: https://github.com/influxdata/telegraf/wiki/SampleConfig +[CodeStyle]: https://github.com/influxdata/telegraf/wiki/CodeStyle [telegraf.Processor]: https://godoc.org/github.com/influxdata/telegraf#Processor From 78c1ffbf27a3c8c3b769bce9abf53ba97290a5fa Mon Sep 17 00:00:00 2001 From: Pontus Rydin Date: Fri, 28 Dec 2018 16:24:43 -0500 Subject: [PATCH 0465/1815] Improve scalability of vsphere input (#5113) --- Gopkg.lock | 6 +- Gopkg.toml | 2 +- plugins/inputs/vsphere/README.md | 6 +- plugins/inputs/vsphere/client.go | 43 +- plugins/inputs/vsphere/endpoint.go | 568 +++++++++++++---------- plugins/inputs/vsphere/throttled_exec.go | 45 ++ plugins/inputs/vsphere/tscache.go | 8 + plugins/inputs/vsphere/vsphere.go | 9 +- plugins/inputs/vsphere/vsphere_test.go | 74 ++- plugins/inputs/vsphere/workerpool.go | 119 ----- 10 files changed, 482 insertions(+), 398 deletions(-) create mode 100644 plugins/inputs/vsphere/throttled_exec.go delete mode 100644 plugins/inputs/vsphere/workerpool.go diff --git a/Gopkg.lock b/Gopkg.lock index 2d9883f04..76a36e7cb 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -1064,7 +1064,7 @@ version = "v1.0.0" [[projects]] - digest = "1:f9fe29bf856d49f9a51d6001588cb5ee5d65c8a7ff5e8b0dd5423c3a510f0833" + digest = "1:6af52ce6dae9a912aa3113f247a63cd82599760ddc328a6721c3ef0426d31ca2" name = "github.com/vmware/govmomi" packages = [ ".", @@ -1090,8 +1090,8 @@ "vim25/xml", ] pruneopts = "" - revision = "e3a01f9611c32b2362366434bcd671516e78955d" - version = "v0.18.0" + revision = "3617f28d167d448f93f282a867870f109516d2a5" + version = "v0.19.0" [[projects]] digest = "1:c1855527c165f0224708fbc7d76843b4b20bcb74b328f212f8d0e9c855d4c49d" diff --git a/Gopkg.toml b/Gopkg.toml index 3e430b4c3..b875ec208 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -228,7 +228,7 @@ [[constraint]] name = "github.com/vmware/govmomi" - version = "0.18.0" + version = "0.19.0" [[constraint]] name = "github.com/Azure/go-autorest" diff --git a/plugins/inputs/vsphere/README.md b/plugins/inputs/vsphere/README.md index 7ba323bc7..4bccbb2c8 100644 --- a/plugins/inputs/vsphere/README.md +++ b/plugins/inputs/vsphere/README.md @@ -122,17 +122,17 @@ vm_metric_exclude = [ "*" ] ## Clusters # cluster_metric_include = [] ## if omitted or empty, all metrics are collected # cluster_metric_exclude = [] ## Nothing excluded by default - # cluster_instances = true ## true by default + # cluster_instances = false ## false by default ## Datastores # datastore_metric_include = [] ## if omitted or empty, all metrics are collected # datastore_metric_exclude = [] ## Nothing excluded by default - # datastore_instances = false ## false by default for Datastores only + # datastore_instances = false ## false by default ## Datacenters datacenter_metric_include = [] ## if omitted or empty, all metrics are collected datacenter_metric_exclude = [ "*" ] ## Datacenters are not collected by default. - # datacenter_instances = false ## false by default for Datastores only + # datacenter_instances = false ## false by default ## Plugin Settings ## separator character to use for measurement and field names (default: "_") diff --git a/plugins/inputs/vsphere/client.go b/plugins/inputs/vsphere/client.go index ebad2bea7..8b1c4866a 100644 --- a/plugins/inputs/vsphere/client.go +++ b/plugins/inputs/vsphere/client.go @@ -3,6 +3,7 @@ package vsphere import ( "context" "crypto/tls" + "fmt" "log" "net/url" "strconv" @@ -18,6 +19,7 @@ import ( "github.com/vmware/govmomi/vim25" "github.com/vmware/govmomi/vim25/methods" "github.com/vmware/govmomi/vim25/soap" + "github.com/vmware/govmomi/vim25/types" ) // The highest number of metrics we can query for, no matter what settings @@ -76,7 +78,7 @@ func (cf *ClientFactory) GetClient(ctx context.Context) (*Client, error) { ctx2, cancel2 := context.WithTimeout(ctx, cf.parent.Timeout.Duration) defer cancel2() if cf.client.Client.SessionManager.Login(ctx2, url.UserPassword(cf.parent.Username, cf.parent.Password)) != nil { - return nil, err + return nil, fmt.Errorf("Renewing authentication failed: %v", err) } } @@ -205,6 +207,8 @@ func (c *Client) close() { // GetServerTime returns the time at the vCenter server func (c *Client) GetServerTime(ctx context.Context) (time.Time, error) { + ctx, cancel := context.WithTimeout(ctx, c.Timeout) + defer cancel() t, err := methods.GetCurrentTime(ctx, c.Client) if err != nil { return time.Time{}, err @@ -235,7 +239,7 @@ func (c *Client) GetMaxQueryMetrics(ctx context.Context) (int, error) { // Fall through version-based inference if value isn't usable } } else { - log.Println("I! [input.vsphere] Option query for maxQueryMetrics failed. Using default") + log.Println("D! [input.vsphere] Option query for maxQueryMetrics failed. Using default") } // No usable maxQueryMetrics setting. Infer based on version @@ -255,3 +259,38 @@ func (c *Client) GetMaxQueryMetrics(ctx context.Context) (int, error) { } return 256, nil } + +// QueryMetrics wraps performance.Query to give it proper timeouts +func (c *Client) QueryMetrics(ctx context.Context, pqs []types.PerfQuerySpec) ([]performance.EntityMetric, error) { + ctx1, cancel1 := context.WithTimeout(ctx, c.Timeout) + defer cancel1() + metrics, err := c.Perf.Query(ctx1, pqs) + if err != nil { + return nil, err + } + + ctx2, cancel2 := context.WithTimeout(ctx, c.Timeout) + defer cancel2() + return c.Perf.ToMetricSeries(ctx2, metrics) +} + +// CounterInfoByName wraps performance.CounterInfoByName to give it proper timeouts +func (c *Client) CounterInfoByName(ctx context.Context) (map[string]*types.PerfCounterInfo, error) { + ctx1, cancel1 := context.WithTimeout(ctx, c.Timeout) + defer cancel1() + return c.Perf.CounterInfoByName(ctx1) +} + +// CounterInfoByKey wraps performance.CounterInfoByKey to give it proper timeouts +func (c *Client) CounterInfoByKey(ctx context.Context) (map[int32]*types.PerfCounterInfo, error) { + ctx1, cancel1 := context.WithTimeout(ctx, c.Timeout) + defer cancel1() + return c.Perf.CounterInfoByKey(ctx1) +} + +// ListResources wraps property.Collector.Retrieve to give it proper timeouts +func (c *Client) ListResources(ctx context.Context, root *view.ContainerView, kind []string, ps []string, dst interface{}) error { + ctx1, cancel1 := context.WithTimeout(ctx, c.Timeout) + defer cancel1() + return root.Retrieve(ctx1, kind, ps, dst) +} diff --git a/plugins/inputs/vsphere/endpoint.go b/plugins/inputs/vsphere/endpoint.go index dbc67dd95..27aca331c 100644 --- a/plugins/inputs/vsphere/endpoint.go +++ b/plugins/inputs/vsphere/endpoint.go @@ -2,8 +2,10 @@ package vsphere import ( "context" + "errors" "fmt" "log" + "math/rand" "net/url" "regexp" "strconv" @@ -24,15 +26,19 @@ import ( var isolateLUN = regexp.MustCompile(".*/([^/]+)/?$") -const metricLookback = 3 +const metricLookback = 3 // Number of time periods to look back at for non-realtime metrics + +const rtMetricLookback = 3 // Number of time periods to look back at for realtime metrics + +const maxSampleConst = 10 // Absolute maximim number of samples regardless of period + +const maxMetadataSamples = 100 // Number of resources to sample for metric metadata // Endpoint is a high-level representation of a connected vCenter endpoint. It is backed by the lower // level Client type. type Endpoint struct { Parent *VSphere URL *url.URL - lastColls map[string]time.Time - instanceInfo map[string]resourceInfo resourceKinds map[string]resourceKind hwMarks *TSCache lun2ds map[string]string @@ -52,8 +58,14 @@ type resourceKind struct { sampling int32 objects objectMap filters filter.Filter + include []string + simple bool + metrics performance.MetricList collectInstances bool - getObjects func(context.Context, *Endpoint, *view.ContainerView) (objectMap, error) + parent string + getObjects func(context.Context, *Client, *Endpoint, *view.ContainerView) (objectMap, error) + latestSample time.Time + lastColl time.Time } type metricEntry struct { @@ -74,33 +86,22 @@ type objectRef struct { dcname string } -type resourceInfo struct { - name string - metrics performance.MetricList - parentRef *types.ManagedObjectReference +func (e *Endpoint) getParent(obj *objectRef, res *resourceKind) (*objectRef, bool) { + if pKind, ok := e.resourceKinds[res.parent]; ok { + if p, ok := pKind.objects[obj.parentRef.Value]; ok { + return &p, true + } + } + return nil, false } -type metricQRequest struct { - res *resourceKind - obj objectRef -} - -type metricQResponse struct { - obj objectRef - metrics *performance.MetricList -} - -type multiError []error - // NewEndpoint returns a new connection to a vCenter based on the URL and configuration passed // as parameters. func NewEndpoint(ctx context.Context, parent *VSphere, url *url.URL) (*Endpoint, error) { e := Endpoint{ URL: url, Parent: parent, - lastColls: make(map[string]time.Time), hwMarks: NewTSCache(1 * time.Hour), - instanceInfo: make(map[string]resourceInfo), lun2ds: make(map[string]string), initialized: false, clientFactory: NewClientFactory(ctx, url, parent), @@ -116,8 +117,11 @@ func NewEndpoint(ctx context.Context, parent *VSphere, url *url.URL) (*Endpoint, sampling: 300, objects: make(objectMap), filters: newFilterOrPanic(parent.DatacenterMetricInclude, parent.DatacenterMetricExclude), + simple: isSimple(parent.DatacenterMetricInclude, parent.DatacenterMetricExclude), + include: parent.DatacenterMetricInclude, collectInstances: parent.DatacenterInstances, getObjects: getDatacenters, + parent: "", }, "cluster": { name: "cluster", @@ -128,8 +132,11 @@ func NewEndpoint(ctx context.Context, parent *VSphere, url *url.URL) (*Endpoint, sampling: 300, objects: make(objectMap), filters: newFilterOrPanic(parent.ClusterMetricInclude, parent.ClusterMetricExclude), + simple: isSimple(parent.ClusterMetricInclude, parent.ClusterMetricExclude), + include: parent.ClusterMetricInclude, collectInstances: parent.ClusterInstances, getObjects: getClusters, + parent: "datacenter", }, "host": { name: "host", @@ -140,8 +147,11 @@ func NewEndpoint(ctx context.Context, parent *VSphere, url *url.URL) (*Endpoint, sampling: 20, objects: make(objectMap), filters: newFilterOrPanic(parent.HostMetricInclude, parent.HostMetricExclude), + simple: isSimple(parent.HostMetricInclude, parent.HostMetricExclude), + include: parent.HostMetricInclude, collectInstances: parent.HostInstances, getObjects: getHosts, + parent: "cluster", }, "vm": { name: "vm", @@ -152,8 +162,11 @@ func NewEndpoint(ctx context.Context, parent *VSphere, url *url.URL) (*Endpoint, sampling: 20, objects: make(objectMap), filters: newFilterOrPanic(parent.VMMetricInclude, parent.VMMetricExclude), + simple: isSimple(parent.VMMetricInclude, parent.VMMetricExclude), + include: parent.VMMetricInclude, collectInstances: parent.VMInstances, getObjects: getVMs, + parent: "host", }, "datastore": { name: "datastore", @@ -163,8 +176,11 @@ func NewEndpoint(ctx context.Context, parent *VSphere, url *url.URL) (*Endpoint, sampling: 300, objects: make(objectMap), filters: newFilterOrPanic(parent.DatastoreMetricInclude, parent.DatastoreMetricExclude), + simple: isSimple(parent.DatastoreMetricInclude, parent.DatastoreMetricExclude), + include: parent.DatastoreMetricInclude, collectInstances: parent.DatastoreInstances, getObjects: getDatastores, + parent: "", }, } @@ -174,24 +190,6 @@ func NewEndpoint(ctx context.Context, parent *VSphere, url *url.URL) (*Endpoint, return &e, err } -func (m multiError) Error() string { - switch len(m) { - case 0: - return "No error recorded. Something is wrong!" - case 1: - return m[0].Error() - default: - s := "Multiple errors detected concurrently: " - for i, e := range m { - if i != 0 { - s += ", " - } - s += e.Error() - } - return s - } -} - func anythingEnabled(ex []string) bool { for _, s := range ex { if s == "*" { @@ -209,6 +207,18 @@ func newFilterOrPanic(include []string, exclude []string) filter.Filter { return f } +func isSimple(include []string, exclude []string) bool { + if len(exclude) > 0 || len(include) == 0 { + return false + } + for _, s := range include { + if strings.Contains(s, "*") { + return false + } + } + return true +} + func (e *Endpoint) startDiscovery(ctx context.Context) { e.discoveryTicker = time.NewTicker(e.Parent.ObjectDiscoveryInterval.Duration) go func() { @@ -249,7 +259,9 @@ func (e *Endpoint) init(ctx context.Context) error { } else { // Otherwise, just run it in the background. We'll probably have an incomplete first metric // collection this way. - go e.initalDiscovery(ctx) + go func() { + e.initalDiscovery(ctx) + }() } } e.initialized = true @@ -262,10 +274,7 @@ func (e *Endpoint) getMetricNameMap(ctx context.Context) (map[int32]string, erro return nil, err } - ctx1, cancel1 := context.WithTimeout(ctx, e.Parent.Timeout.Duration) - defer cancel1() - mn, err := client.Perf.CounterInfoByName(ctx1) - + mn, err := client.CounterInfoByName(ctx) if err != nil { return nil, err } @@ -276,20 +285,19 @@ func (e *Endpoint) getMetricNameMap(ctx context.Context) (map[int32]string, erro return names, nil } -func (e *Endpoint) getMetadata(ctx context.Context, in interface{}) interface{} { +func (e *Endpoint) getMetadata(ctx context.Context, obj objectRef, sampling int32) (performance.MetricList, error) { client, err := e.clientFactory.GetClient(ctx) if err != nil { - return err + return nil, err } - rq := in.(*metricQRequest) ctx1, cancel1 := context.WithTimeout(ctx, e.Parent.Timeout.Duration) defer cancel1() - metrics, err := client.Perf.AvailableMetric(ctx1, rq.obj.ref.Reference(), rq.res.sampling) - if err != nil && err != context.Canceled { - log.Printf("E! [input.vsphere]: Error while getting metric metadata. Discovery will be incomplete. Error: %s", err) + metrics, err := client.Perf.AvailableMetric(ctx1, obj.ref.Reference(), sampling) + if err != nil { + return nil, err } - return &metricQResponse{metrics: &metrics, obj: rq.obj} + return metrics, nil } func (e *Endpoint) getDatacenterName(ctx context.Context, client *Client, cache map[string]string, r types.ManagedObjectReference) string { @@ -349,17 +357,17 @@ func (e *Endpoint) discover(ctx context.Context) error { } log.Printf("D! [input.vsphere]: Discover new objects for %s", e.URL.Host) - - instInfo := make(map[string]resourceInfo) resourceKinds := make(map[string]resourceKind) dcNameCache := make(map[string]string) + numRes := int64(0) + // Populate resource objects, and endpoint instance info. for k, res := range e.resourceKinds { log.Printf("D! [input.vsphere] Discovering resources for %s", res.name) // Need to do this for all resource types even if they are not enabled if res.enabled || k != "vm" { - objects, err := res.getObjects(ctx, e, client.Root) + objects, err := res.getObjects(ctx, client, e, client.Root) if err != nil { return err } @@ -374,42 +382,19 @@ func (e *Endpoint) discover(ctx context.Context) error { } } - // Set up a worker pool for processing metadata queries concurrently - wp := NewWorkerPool(10) - wp.Run(ctx, e.getMetadata, e.Parent.DiscoverConcurrency) - - // Fill the input channels with resources that need to be queried - // for metadata. - wp.Fill(ctx, func(ctx context.Context, f PushFunc) { - for _, obj := range objects { - f(ctx, &metricQRequest{obj: obj, res: &res}) + // No need to collect metric metadata if resource type is not enabled + if res.enabled { + if res.simple { + e.simpleMetadataSelect(ctx, client, &res) + } else { + e.complexMetadataSelect(ctx, &res, objects, metricNames) } - }) - - // Drain the resulting metadata and build instance infos. - wp.Drain(ctx, func(ctx context.Context, in interface{}) bool { - switch resp := in.(type) { - case *metricQResponse: - mList := make(performance.MetricList, 0) - if res.enabled { - for _, m := range *resp.metrics { - if m.Instance != "" && !res.collectInstances { - continue - } - if res.filters.Match(metricNames[m.CounterId]) { - mList = append(mList, m) - } - } - } - instInfo[resp.obj.ref.Value] = resourceInfo{name: resp.obj.name, metrics: mList, parentRef: resp.obj.parentRef} - case error: - log.Printf("W! [input.vsphere]: Error while discovering resources: %s", resp) - return false - } - return true - }) + } res.objects = objects resourceKinds[k] = res + + SendInternalCounterWithTags("discovered_objects", e.URL.Host, map[string]string{"type": res.name}, int64(len(objects))) + numRes += int64(len(objects)) } } @@ -428,20 +413,100 @@ func (e *Endpoint) discover(ctx context.Context) error { e.collectMux.Lock() defer e.collectMux.Unlock() - e.instanceInfo = instInfo e.resourceKinds = resourceKinds e.lun2ds = l2d sw.Stop() - SendInternalCounter("discovered_objects", e.URL.Host, int64(len(instInfo))) + SendInternalCounterWithTags("discovered_objects", e.URL.Host, map[string]string{"type": "instance-total"}, numRes) return nil } -func getDatacenters(ctx context.Context, e *Endpoint, root *view.ContainerView) (objectMap, error) { +func (e *Endpoint) simpleMetadataSelect(ctx context.Context, client *Client, res *resourceKind) { + log.Printf("D! [input.vsphere] Using fast metric metadata selection for %s", res.name) + m, err := client.CounterInfoByName(ctx) + if err != nil { + log.Printf("E! [input.vsphere]: Error while getting metric metadata. Discovery will be incomplete. Error: %s", err) + return + } + res.metrics = make(performance.MetricList, 0, len(res.include)) + for _, s := range res.include { + if pci, ok := m[s]; ok { + cnt := types.PerfMetricId{ + CounterId: pci.Key, + } + if res.collectInstances { + cnt.Instance = "*" + } else { + cnt.Instance = "" + } + res.metrics = append(res.metrics, cnt) + } else { + log.Printf("W! [input.vsphere] Metric name %s is unknown. Will not be collected", s) + } + } +} + +func (e *Endpoint) complexMetadataSelect(ctx context.Context, res *resourceKind, objects objectMap, metricNames map[int32]string) { + // We're only going to get metadata from maxMetadataSamples resources. If we have + // more resources than that, we pick maxMetadataSamples samples at random. + sampledObjects := make([]objectRef, len(objects)) + i := 0 + for _, obj := range objects { + sampledObjects[i] = obj + i++ + } + n := len(sampledObjects) + if n > maxMetadataSamples { + // Shuffle samples into the maxMetadatSamples positions + for i := 0; i < maxMetadataSamples; i++ { + j := int(rand.Int31n(int32(i + 1))) + t := sampledObjects[i] + sampledObjects[i] = sampledObjects[j] + sampledObjects[j] = t + } + sampledObjects = sampledObjects[0:maxMetadataSamples] + } + + instInfoMux := sync.Mutex{} + te := NewThrottledExecutor(e.Parent.DiscoverConcurrency) + for _, obj := range sampledObjects { + func(obj objectRef) { + te.Run(ctx, func() { + metrics, err := e.getMetadata(ctx, obj, res.sampling) + if err != nil { + log.Printf("E! [input.vsphere]: Error while getting metric metadata. Discovery will be incomplete. Error: %s", err) + } + mMap := make(map[string]types.PerfMetricId) + for _, m := range metrics { + if m.Instance != "" && res.collectInstances { + m.Instance = "*" + } else { + m.Instance = "" + } + if res.filters.Match(metricNames[m.CounterId]) { + mMap[strconv.Itoa(int(m.CounterId))+"|"+m.Instance] = m + } + } + log.Printf("D! [input.vsphere] Found %d metrics for %s", len(mMap), obj.name) + instInfoMux.Lock() + defer instInfoMux.Unlock() + if len(mMap) > len(res.metrics) { + res.metrics = make(performance.MetricList, len(mMap)) + i := 0 + for _, m := range mMap { + res.metrics[i] = m + i++ + } + } + }) + }(obj) + } + te.Wait() +} + +func getDatacenters(ctx context.Context, client *Client, e *Endpoint, root *view.ContainerView) (objectMap, error) { var resources []mo.Datacenter - ctx1, cancel1 := context.WithTimeout(ctx, e.Parent.Timeout.Duration) - defer cancel1() - err := root.Retrieve(ctx1, []string{"Datacenter"}, []string{"name", "parent"}, &resources) + err := client.ListResources(ctx, root, []string{"Datacenter"}, []string{"name", "parent"}, &resources) if err != nil { return nil, err } @@ -453,11 +518,9 @@ func getDatacenters(ctx context.Context, e *Endpoint, root *view.ContainerView) return m, nil } -func getClusters(ctx context.Context, e *Endpoint, root *view.ContainerView) (objectMap, error) { +func getClusters(ctx context.Context, client *Client, e *Endpoint, root *view.ContainerView) (objectMap, error) { var resources []mo.ClusterComputeResource - ctx1, cancel1 := context.WithTimeout(ctx, e.Parent.Timeout.Duration) - defer cancel1() - err := root.Retrieve(ctx1, []string{"ClusterComputeResource"}, []string{"name", "parent"}, &resources) + err := client.ListResources(ctx, root, []string{"ClusterComputeResource"}, []string{"name", "parent"}, &resources) if err != nil { return nil, err } @@ -487,9 +550,9 @@ func getClusters(ctx context.Context, e *Endpoint, root *view.ContainerView) (ob return m, nil } -func getHosts(ctx context.Context, e *Endpoint, root *view.ContainerView) (objectMap, error) { +func getHosts(ctx context.Context, client *Client, e *Endpoint, root *view.ContainerView) (objectMap, error) { var resources []mo.HostSystem - err := root.Retrieve(ctx, []string{"HostSystem"}, []string{"name", "parent"}, &resources) + err := client.ListResources(ctx, root, []string{"HostSystem"}, []string{"name", "parent"}, &resources) if err != nil { return nil, err } @@ -501,16 +564,17 @@ func getHosts(ctx context.Context, e *Endpoint, root *view.ContainerView) (objec return m, nil } -func getVMs(ctx context.Context, e *Endpoint, root *view.ContainerView) (objectMap, error) { +func getVMs(ctx context.Context, client *Client, e *Endpoint, root *view.ContainerView) (objectMap, error) { var resources []mo.VirtualMachine - ctx1, cancel1 := context.WithTimeout(ctx, e.Parent.Timeout.Duration) - defer cancel1() - err := root.Retrieve(ctx1, []string{"VirtualMachine"}, []string{"name", "runtime.host", "config.guestId", "config.uuid"}, &resources) + err := client.ListResources(ctx, root, []string{"VirtualMachine"}, []string{"name", "runtime.host", "runtime.powerState", "config.guestId", "config.uuid"}, &resources) if err != nil { return nil, err } m := make(objectMap) for _, r := range resources { + if r.Runtime.PowerState != "poweredOn" { + continue + } guest := "unknown" uuid := "" // Sometimes Config is unknown and returns a nil pointer @@ -525,11 +589,9 @@ func getVMs(ctx context.Context, e *Endpoint, root *view.ContainerView) (objectM return m, nil } -func getDatastores(ctx context.Context, e *Endpoint, root *view.ContainerView) (objectMap, error) { +func getDatastores(ctx context.Context, client *Client, e *Endpoint, root *view.ContainerView) (objectMap, error) { var resources []mo.Datastore - ctx1, cancel1 := context.WithTimeout(ctx, e.Parent.Timeout.Duration) - defer cancel1() - err := root.Retrieve(ctx1, []string{"Datastore"}, []string{"name", "parent", "info"}, &resources) + err := client.ListResources(ctx, root, []string{"Datastore"}, []string{"name", "parent", "info"}, &resources) if err != nil { return nil, err } @@ -555,10 +617,10 @@ func (e *Endpoint) Close() { // Collect runs a round of data collections as specified in the configuration. func (e *Endpoint) Collect(ctx context.Context, acc telegraf.Accumulator) error { + // If we never managed to do a discovery, collection will be a no-op. Therefore, // we need to check that a connection is available, or the collection will // silently fail. - // if _, err := e.clientFactory.GetClient(ctx); err != nil { return err } @@ -571,28 +633,41 @@ func (e *Endpoint) Collect(ctx context.Context, acc telegraf.Accumulator) error } // If discovery interval is disabled (0), discover on each collection cycle - // if e.Parent.ObjectDiscoveryInterval.Duration == 0 { err := e.discover(ctx) if err != nil { return err } } + var wg sync.WaitGroup for k, res := range e.resourceKinds { if res.enabled { - err := e.collectResource(ctx, k, acc) - if err != nil { - return err - } + wg.Add(1) + go func(k string) { + defer wg.Done() + err := e.collectResource(ctx, k, acc) + if err != nil { + acc.AddError(err) + } + }(k) } } + wg.Wait() // Purge old timestamps from the cache e.hwMarks.Purge() return nil } -func (e *Endpoint) chunker(ctx context.Context, f PushFunc, res *resourceKind, now time.Time, latest time.Time) { +// Workaround to make sure pqs is a copy of the loop variable and won't change. +func submitChunkJob(ctx context.Context, te *ThrottledExecutor, job func([]types.PerfQuerySpec), pqs []types.PerfQuerySpec) { + te.Run(ctx, func() { + job(pqs) + }) +} + +func (e *Endpoint) chunkify(ctx context.Context, res *resourceKind, now time.Time, latest time.Time, acc telegraf.Accumulator, job func([]types.PerfQuerySpec)) { + te := NewThrottledExecutor(e.Parent.CollectConcurrency) maxMetrics := e.Parent.MaxQueryMetrics if maxMetrics < 1 { maxMetrics = 1 @@ -609,38 +684,30 @@ func (e *Endpoint) chunker(ctx context.Context, f PushFunc, res *resourceKind, n total := 0 nRes := 0 for _, object := range res.objects { - info, found := e.instanceInfo[object.ref.Value] - if !found { - log.Printf("E! [input.vsphere]: Internal error: Instance info not found for MOID %s", object.ref) - } - mr := len(info.metrics) + mr := len(res.metrics) for mr > 0 { mc := mr headroom := maxMetrics - metrics if !res.realTime && mc > headroom { // Metric query limit only applies to non-realtime metrics mc = headroom } - fm := len(info.metrics) - mr + fm := len(res.metrics) - mr pq := types.PerfQuerySpec{ Entity: object.ref, - MaxSample: 1, - MetricId: info.metrics[fm : fm+mc], + MaxSample: maxSampleConst, + MetricId: res.metrics[fm : fm+mc], IntervalId: res.sampling, Format: "normal", } - // For non-realtime metrics, we need to look back a few samples in case - // the vCenter is late reporting metrics. - if !res.realTime { - pq.MaxSample = metricLookback + start, ok := e.hwMarks.Get(object.ref.Value) + if !ok { + // Look back 3 sampling periods by default + start = latest.Add(time.Duration(-res.sampling) * time.Second * (metricLookback - 1)) } + pq.StartTime = &start + pq.EndTime = &now - // Look back 3 sampling periods - start := latest.Add(time.Duration(-res.sampling) * time.Second * (metricLookback - 1)) - if !res.realTime { - pq.StartTime = &start - pq.EndTime = &now - } pqs = append(pqs, pq) mr -= mc metrics += mc @@ -648,17 +715,18 @@ func (e *Endpoint) chunker(ctx context.Context, f PushFunc, res *resourceKind, n // We need to dump the current chunk of metrics for one of two reasons: // 1) We filled up the metric quota while processing the current resource // 2) We are at the last resource and have no more data to process. - if mr > 0 || (!res.realTime && metrics >= maxMetrics) || nRes >= e.Parent.MaxQueryObjects { + // 3) The query contains more than 100,000 individual metrics + if mr > 0 || nRes >= e.Parent.MaxQueryObjects || len(pqs) > 100000 { log.Printf("D! [input.vsphere]: Queueing query: %d objects, %d metrics (%d remaining) of type %s for %s. Processed objects: %d. Total objects %d", len(pqs), metrics, mr, res.name, e.URL.Host, total+1, len(res.objects)) - // To prevent deadlocks, don't send work items if the context has been cancelled. + // Don't send work items if the context has been cancelled. if ctx.Err() == context.Canceled { return } - // Call push function - f(ctx, pqs) + // Run collection job + submitChunkJob(ctx, te, job, pqs) pqs = make([]types.PerfQuerySpec, 0, e.Parent.MaxQueryObjects) metrics = 0 nRes = 0 @@ -667,19 +735,19 @@ func (e *Endpoint) chunker(ctx context.Context, f PushFunc, res *resourceKind, n total++ nRes++ } - // There may be dangling stuff in the queue. Handle them - // + // Handle final partially filled chunk if len(pqs) > 0 { - // Call push function + // Run collection job log.Printf("D! [input.vsphere]: Queuing query: %d objects, %d metrics (0 remaining) of type %s for %s. Total objects %d (final chunk)", len(pqs), metrics, res.name, e.URL.Host, len(res.objects)) - f(ctx, pqs) + submitChunkJob(ctx, te, job, pqs) } + + // Wait for background collection to finish + te.Wait() } func (e *Endpoint) collectResource(ctx context.Context, resourceType string, acc telegraf.Accumulator) error { - - // Do we have new data yet? res := e.resourceKinds[resourceType] client, err := e.clientFactory.GetClient(ctx) if err != nil { @@ -689,13 +757,23 @@ func (e *Endpoint) collectResource(ctx context.Context, resourceType string, acc if err != nil { return err } - latest, hasLatest := e.lastColls[resourceType] - if hasLatest { + + // Estimate the interval at which we're invoked. Use local time (not server time) + // since this is about how we got invoked locally. + localNow := time.Now() + estInterval := time.Duration(time.Minute) + if !res.lastColl.IsZero() { + estInterval = localNow.Sub(res.lastColl).Truncate(time.Duration(res.sampling) * time.Second) + } + log.Printf("D! [inputs.vsphere] Interval estimated to %s", estInterval) + + latest := res.latestSample + if !latest.IsZero() { elapsed := now.Sub(latest).Seconds() + 5.0 // Allow 5 second jitter. - log.Printf("D! [input.vsphere]: Latest: %s, elapsed: %f, resource: %s", latest, elapsed, resourceType) + log.Printf("D! [inputs.vsphere]: Latest: %s, elapsed: %f, resource: %s", latest, elapsed, resourceType) if !res.realTime && elapsed < float64(res.sampling) { - // No new data would be available. We're outta herE! [input.vsphere]: - log.Printf("D! [input.vsphere]: Sampling period for %s of %d has not elapsed on %s", + // No new data would be available. We're outta here! + log.Printf("D! [inputs.vsphere]: Sampling period for %s of %d has not elapsed on %s", resourceType, res.sampling, e.URL.Host) return nil } @@ -706,91 +784,108 @@ func (e *Endpoint) collectResource(ctx context.Context, resourceType string, acc internalTags := map[string]string{"resourcetype": resourceType} sw := NewStopwatchWithTags("gather_duration", e.URL.Host, internalTags) - log.Printf("D! [input.vsphere]: Collecting metrics for %d objects of type %s for %s", + log.Printf("D! [inputs.vsphere]: Collecting metrics for %d objects of type %s for %s", len(res.objects), resourceType, e.URL.Host) count := int64(0) - // Set up a worker pool for collecting chunk metrics - wp := NewWorkerPool(10) - wp.Run(ctx, func(ctx context.Context, in interface{}) interface{} { - chunk := in.([]types.PerfQuerySpec) - n, err := e.collectChunk(ctx, chunk, resourceType, res, acc) - log.Printf("D! [input.vsphere] CollectChunk for %s returned %d metrics", resourceType, n) - if err != nil { - return err - } - atomic.AddInt64(&count, int64(n)) - return nil + var tsMux sync.Mutex + latestSample := time.Time{} - }, e.Parent.CollectConcurrency) - - // Fill the input channel of the worker queue by running the chunking - // logic implemented in chunker() - wp.Fill(ctx, func(ctx context.Context, f PushFunc) { - e.chunker(ctx, f, &res, now, latest) - }) - - // Drain the pool. We're getting errors back. They should all be nil - var mux sync.Mutex - merr := make(multiError, 0) - wp.Drain(ctx, func(ctx context.Context, in interface{}) bool { - if in != nil { - mux.Lock() - defer mux.Unlock() - merr = append(merr, in.(error)) - return false - } - return true - }) - e.lastColls[resourceType] = now // Use value captured at the beginning to avoid blind spots. + // Divide workload into chunks and process them concurrently + e.chunkify(ctx, &res, now, latest, acc, + func(chunk []types.PerfQuerySpec) { + n, localLatest, err := e.collectChunk(ctx, chunk, &res, acc, now, estInterval) + log.Printf("D! [inputs.vsphere] CollectChunk for %s returned %d metrics", resourceType, n) + if err != nil { + acc.AddError(errors.New("While collecting " + res.name + ": " + err.Error())) + } + atomic.AddInt64(&count, int64(n)) + tsMux.Lock() + defer tsMux.Unlock() + if localLatest.After(latestSample) && !localLatest.IsZero() { + latestSample = localLatest + } + }) + log.Printf("D! [inputs.vsphere] Latest sample for %s set to %s", resourceType, latestSample) + if !latestSample.IsZero() { + res.latestSample = latestSample + } sw.Stop() SendInternalCounterWithTags("gather_count", e.URL.Host, internalTags, count) - if len(merr) > 0 { - return merr - } return nil } -func (e *Endpoint) collectChunk(ctx context.Context, pqs []types.PerfQuerySpec, resourceType string, - res resourceKind, acc telegraf.Accumulator) (int, error) { +func alignSamples(info []types.PerfSampleInfo, values []int64, interval time.Duration) ([]types.PerfSampleInfo, []float64) { + rInfo := make([]types.PerfSampleInfo, 0, len(info)) + rValues := make([]float64, 0, len(values)) + bi := 1.0 + var lastBucket time.Time + for idx := range info { + // According to the docs, SampleInfo and Value should have the same length, but we've seen corrupted + // data coming back with missing values. Take care of that gracefully! + if idx >= len(values) { + log.Printf("D! [inputs.vsphere] len(SampleInfo)>len(Value) %d > %d", len(info), len(values)) + break + } + v := float64(values[idx]) + if v < 0 { + continue + } + ts := info[idx].Timestamp + roundedTs := ts.Truncate(interval) + + // Are we still working on the same bucket? + if roundedTs == lastBucket { + bi++ + p := len(rValues) - 1 + rValues[p] = ((bi-1)/bi)*float64(rValues[p]) + v/bi + } else { + rValues = append(rValues, v) + roundedInfo := types.PerfSampleInfo{ + Timestamp: roundedTs, + Interval: info[idx].Interval, + } + rInfo = append(rInfo, roundedInfo) + bi = 1.0 + lastBucket = roundedTs + } + } + //log.Printf("D! [inputs.vsphere] Aligned samples: %d collapsed into %d", len(info), len(rInfo)) + return rInfo, rValues +} + +func (e *Endpoint) collectChunk(ctx context.Context, pqs []types.PerfQuerySpec, res *resourceKind, acc telegraf.Accumulator, now time.Time, interval time.Duration) (int, time.Time, error) { + log.Printf("D! [inputs.vsphere] Query for %s has %d QuerySpecs", res.name, len(pqs)) + latestSample := time.Time{} count := 0 + resourceType := res.name prefix := "vsphere" + e.Parent.Separator + resourceType client, err := e.clientFactory.GetClient(ctx) if err != nil { - return 0, err + return count, latestSample, err } - ctx1, cancel1 := context.WithTimeout(ctx, e.Parent.Timeout.Duration) - defer cancel1() - metricInfo, err := client.Perf.CounterInfoByName(ctx1) + metricInfo, err := client.CounterInfoByName(ctx) if err != nil { - return count, err + return count, latestSample, err } - ctx2, cancel2 := context.WithTimeout(ctx, e.Parent.Timeout.Duration) - defer cancel2() - metrics, err := client.Perf.Query(ctx2, pqs) + ems, err := client.QueryMetrics(ctx, pqs) if err != nil { - return count, err + return count, latestSample, err } - ctx3, cancel3 := context.WithTimeout(ctx, e.Parent.Timeout.Duration) - defer cancel3() - ems, err := client.Perf.ToMetricSeries(ctx3, metrics) - if err != nil { - return count, err - } - log.Printf("D! [input.vsphere] Query for %s returned metrics for %d objects", resourceType, len(ems)) + log.Printf("D! [inputs.vsphere] Query for %s returned metrics for %d objects", resourceType, len(ems)) // Iterate through results for _, em := range ems { moid := em.Entity.Reference().Value - instInfo, found := e.instanceInfo[moid] + instInfo, found := res.objects[moid] if !found { - log.Printf("E! [input.vsphere]: MOID %s not found in cache. Skipping! (This should not happen!)", moid) + log.Printf("E! [inputs.vsphere]: MOID %s not found in cache. Skipping! (This should not happen!)", moid) continue } buckets := make(map[string]metricEntry) @@ -805,26 +900,28 @@ func (e *Endpoint) collectChunk(ctx context.Context, pqs []types.PerfQuerySpec, // Populate tags objectRef, ok := res.objects[moid] if !ok { - log.Printf("E! [input.vsphere]: MOID %s not found in cache. Skipping", moid) + log.Printf("E! [inputs.vsphere]: MOID %s not found in cache. Skipping", moid) continue } - e.populateTags(&objectRef, resourceType, &res, t, &v) + e.populateTags(&objectRef, resourceType, res, t, &v) - // Now deal with the values. Iterate backwards so we start with the latest value - tsKey := moid + "|" + name + "|" + v.Instance - for idx := len(v.Value) - 1; idx >= 0; idx-- { - ts := em.SampleInfo[idx].Timestamp + nValues := 0 + alignedInfo, alignedValues := alignSamples(em.SampleInfo, v.Value, interval) // TODO: Estimate interval - // Since non-realtime metrics are queries with a lookback, we need to check the high-water mark - // to determine if this should be included. Only samples not seen before should be included. - if !(res.realTime || e.hwMarks.IsNew(tsKey, ts)) { - continue + for idx, sample := range alignedInfo { + // According to the docs, SampleInfo and Value should have the same length, but we've seen corrupted + // data coming back with missing values. Take care of that gracefully! + if idx >= len(alignedValues) { + log.Printf("D! [inputs.vsphere] len(SampleInfo)>len(Value) %d > %d", len(alignedInfo), len(alignedValues)) + break } - value := v.Value[idx] + ts := sample.Timestamp + if ts.After(latestSample) { + latestSample = ts + } + nValues++ // Organize the metrics into a bucket per measurement. - // Data SHOULD be presented to us with the same timestamp for all samples, but in case - // they don't we use the measurement name + timestamp as the key for the bucket. mn, fn := e.makeMetricIdentifier(prefix, name) bKey := mn + " " + v.Instance + " " + strconv.FormatInt(ts.UnixNano(), 10) bucket, found := buckets[bKey] @@ -832,27 +929,26 @@ func (e *Endpoint) collectChunk(ctx context.Context, pqs []types.PerfQuerySpec, bucket = metricEntry{name: mn, ts: ts, fields: make(map[string]interface{}), tags: t} buckets[bKey] = bucket } - if value < 0 { - log.Printf("D! [input.vsphere]: Negative value for %s on %s. Indicates missing samples", name, objectRef.name) - continue - } // Percentage values must be scaled down by 100. info, ok := metricInfo[name] if !ok { - log.Printf("E! [input.vsphere]: Could not determine unit for %s. Skipping", name) + log.Printf("E! [inputs.vsphere]: Could not determine unit for %s. Skipping", name) } + v := alignedValues[idx] if info.UnitInfo.GetElementDescription().Key == "percent" { - bucket.fields[fn] = float64(value) / 100.0 + bucket.fields[fn] = float64(v) / 100.0 } else { - bucket.fields[fn] = value + bucket.fields[fn] = v } count++ - // Update highwater marks for non-realtime metrics. - if !res.realTime { - e.hwMarks.Put(tsKey, ts) - } + // Update highwater marks + e.hwMarks.Put(moid, ts) + } + if nValues == 0 { + log.Printf("D! [inputs.vsphere]: Missing value for: %s, %s", name, objectRef.name) + continue } } // We've iterated through all the metrics and collected buckets for each @@ -861,17 +957,7 @@ func (e *Endpoint) collectChunk(ctx context.Context, pqs []types.PerfQuerySpec, acc.AddFields(bucket.name, bucket.fields, bucket.tags, bucket.ts) } } - return count, nil -} - -func (e *Endpoint) getParent(obj resourceInfo) (resourceInfo, bool) { - p := obj.parentRef - if p == nil { - log.Printf("D! [input.vsphere] No parent found for %s", obj.name) - return resourceInfo{}, false - } - r, ok := e.instanceInfo[p.Value] - return r, ok + return count, latestSample, nil } func (e *Endpoint) populateTags(objectRef *objectRef, resourceType string, resource *resourceKind, t map[string]string, v *performance.MetricSeries) { @@ -885,14 +971,14 @@ func (e *Endpoint) populateTags(objectRef *objectRef, resourceType string, resou } // Map parent reference - parent, found := e.instanceInfo[objectRef.parentRef.Value] + parent, found := e.getParent(objectRef, resource) if found { t[resource.parentTag] = parent.name if resourceType == "vm" { if objectRef.guest != "" { t["guest"] = objectRef.guest } - if c, ok := e.getParent(parent); ok { + if c, ok := e.resourceKinds["cluster"].objects[parent.parentRef.Value]; ok { t["clustername"] = c.name } } diff --git a/plugins/inputs/vsphere/throttled_exec.go b/plugins/inputs/vsphere/throttled_exec.go new file mode 100644 index 000000000..ac95b496c --- /dev/null +++ b/plugins/inputs/vsphere/throttled_exec.go @@ -0,0 +1,45 @@ +package vsphere + +import ( + "context" + "sync" +) + +// ThrottledExecutor provides a simple mechanism for running jobs in separate +// goroutines while limit the number of concurrent jobs running at any given time. +type ThrottledExecutor struct { + limiter chan struct{} + wg sync.WaitGroup +} + +// NewThrottledExecutor creates a new ThrottlesExecutor with a specified maximum +// number of concurrent jobs +func NewThrottledExecutor(limit int) *ThrottledExecutor { + if limit == 0 { + panic("Limit must be > 0") + } + return &ThrottledExecutor{limiter: make(chan struct{}, limit)} +} + +// Run schedules a job for execution as soon as possible while respecting the +// maximum concurrency limit. +func (t *ThrottledExecutor) Run(ctx context.Context, job func()) { + t.wg.Add(1) + go func() { + defer t.wg.Done() + select { + case t.limiter <- struct{}{}: + defer func() { + <-t.limiter + }() + job() + case <-ctx.Done(): + return + } + }() +} + +// Wait blocks until all scheduled jobs have finished +func (t *ThrottledExecutor) Wait() { + t.wg.Wait() +} diff --git a/plugins/inputs/vsphere/tscache.go b/plugins/inputs/vsphere/tscache.go index 9abe24ea7..1d1f00ebe 100644 --- a/plugins/inputs/vsphere/tscache.go +++ b/plugins/inputs/vsphere/tscache.go @@ -49,6 +49,14 @@ func (t *TSCache) IsNew(key string, tm time.Time) bool { return !tm.Before(v) } +// Get returns a timestamp (if present) +func (t *TSCache) Get(key string) (time.Time, bool) { + t.mux.RLock() + defer t.mux.RUnlock() + ts, ok := t.table[key] + return ts, ok +} + // Put updates the latest timestamp for the supplied key. func (t *TSCache) Put(key string, time time.Time) { t.mux.Lock() diff --git a/plugins/inputs/vsphere/vsphere.go b/plugins/inputs/vsphere/vsphere.go index f0bb5dca9..13186634f 100644 --- a/plugins/inputs/vsphere/vsphere.go +++ b/plugins/inputs/vsphere/vsphere.go @@ -155,7 +155,7 @@ var sampleConfig = ` ## Clusters # cluster_metric_include = [] ## if omitted or empty, all metrics are collected # cluster_metric_exclude = [] ## Nothing excluded by default - # cluster_instances = true ## true by default + # cluster_instances = false ## false by default ## Datastores # datastore_metric_include = [] ## if omitted or empty, all metrics are collected @@ -260,7 +260,6 @@ func (v *VSphere) Stop() { // Gather is the main data collection function called by the Telegraf core. It performs all // the data collection and writes all metrics into the Accumulator passed as an argument. func (v *VSphere) Gather(acc telegraf.Accumulator) error { - merr := make(multiError, 0) var wg sync.WaitGroup for _, ep := range v.endpoints { wg.Add(1) @@ -274,15 +273,11 @@ func (v *VSphere) Gather(acc telegraf.Accumulator) error { } if err != nil { acc.AddError(err) - merr = append(merr, err) } }(ep) } wg.Wait() - if len(merr) > 0 { - return merr - } return nil } @@ -291,7 +286,7 @@ func init() { return &VSphere{ Vcenters: []string{}, - ClusterInstances: true, + ClusterInstances: false, ClusterMetricInclude: nil, ClusterMetricExclude: nil, HostInstances: true, diff --git a/plugins/inputs/vsphere/vsphere_test.go b/plugins/inputs/vsphere/vsphere_test.go index 4eb3d28f8..a4b931bd9 100644 --- a/plugins/inputs/vsphere/vsphere_test.go +++ b/plugins/inputs/vsphere/vsphere_test.go @@ -7,8 +7,11 @@ import ( "regexp" "sort" "strings" + "sync" + "sync/atomic" "testing" "time" + "unsafe" "github.com/influxdata/telegraf/internal" itls "github.com/influxdata/telegraf/internal/tls" @@ -175,6 +178,8 @@ func defaultVSphere() *VSphere { ObjectDiscoveryInterval: internal.Duration{Duration: time.Second * 300}, Timeout: internal.Duration{Duration: time.Second * 20}, ForceDiscoverOnInit: true, + DiscoverConcurrency: 1, + CollectConcurrency: 1, } } @@ -205,32 +210,43 @@ func TestParseConfig(t *testing.T) { } func TestWorkerPool(t *testing.T) { - wp := NewWorkerPool(100) - ctx := context.Background() - wp.Run(ctx, func(ctx context.Context, p interface{}) interface{} { - return p.(int) * 2 - }, 10) - - n := 100000 - wp.Fill(ctx, func(ctx context.Context, f PushFunc) { - for i := 0; i < n; i++ { - f(ctx, i) - } - }) - results := make([]int, n) - i := 0 - wp.Drain(ctx, func(ctx context.Context, p interface{}) bool { - results[i] = p.(int) - i++ - return true - }) + max := int64(0) + ngr := int64(0) + n := 10000 + var mux sync.Mutex + results := make([]int, 0, n) + te := NewThrottledExecutor(5) + for i := 0; i < n; i++ { + func(i int) { + te.Run(context.Background(), func() { + atomic.AddInt64(&ngr, 1) + mux.Lock() + defer mux.Unlock() + results = append(results, i*2) + if ngr > max { + max = ngr + } + time.Sleep(100 * time.Microsecond) + atomic.AddInt64(&ngr, -1) + }) + }(i) + } + te.Wait() sort.Ints(results) for i := 0; i < n; i++ { - require.Equal(t, results[i], i*2) + require.Equal(t, results[i], i*2, "Some jobs didn't run") } + require.Equal(t, int64(5), max, "Wrong number of goroutines spawned") } func TestTimeout(t *testing.T) { + // Don't run test on 32-bit machines due to bug in simulator. + // https://github.com/vmware/govmomi/issues/1330 + var i int + if unsafe.Sizeof(i) < 8 { + return + } + m, s, err := createSim() if err != nil { t.Fatal(err) @@ -245,7 +261,7 @@ func TestTimeout(t *testing.T) { require.NoError(t, v.Start(nil)) // We're not using the Accumulator, so it can be nil. defer v.Stop() err = v.Gather(&acc) - require.NotNil(t, err, "Error should not be nil here") + require.True(t, len(acc.Errors) > 0, "Errors should not be empty here") // The accumulator must contain exactly one error and it must be a deadline exceeded. require.Equal(t, 1, len(acc.Errors)) @@ -253,6 +269,12 @@ func TestTimeout(t *testing.T) { } func TestMaxQuery(t *testing.T) { + // Don't run test on 32-bit machines due to bug in simulator. + // https://github.com/vmware/govmomi/issues/1330 + var i int + if unsafe.Sizeof(i) < 8 { + return + } m, s, err := createSim() if err != nil { t.Fatal(err) @@ -290,6 +312,13 @@ func TestMaxQuery(t *testing.T) { } func TestAll(t *testing.T) { + // Don't run test on 32-bit machines due to bug in simulator. + // https://github.com/vmware/govmomi/issues/1330 + var i int + if unsafe.Sizeof(i) < 8 { + return + } + m, s, err := createSim() if err != nil { t.Fatal(err) @@ -300,7 +329,8 @@ func TestAll(t *testing.T) { var acc testutil.Accumulator v := defaultVSphere() v.Vcenters = []string{s.URL.String()} - v.Start(nil) // We're not using the Accumulator, so it can be nil. + v.Start(&acc) defer v.Stop() require.NoError(t, v.Gather(&acc)) + require.Equal(t, 0, len(acc.Errors), fmt.Sprintf("Errors found: %s", acc.Errors)) } diff --git a/plugins/inputs/vsphere/workerpool.go b/plugins/inputs/vsphere/workerpool.go deleted file mode 100644 index 6695735ce..000000000 --- a/plugins/inputs/vsphere/workerpool.go +++ /dev/null @@ -1,119 +0,0 @@ -package vsphere - -import ( - "context" - "log" - "sync" -) - -// WorkerFunc is a function that is supposed to do the actual work -// of the WorkerPool. It is similar to the "map" portion of the -// map/reduce semantics, in that it takes a single value as an input, -// does some processing and returns a single result. -type WorkerFunc func(context.Context, interface{}) interface{} - -// PushFunc is called from a FillerFunc to push a workitem onto -// the input channel. Wraps some logic for gracefulk shutdowns. -type PushFunc func(context.Context, interface{}) bool - -// DrainerFunc represents a function used to "drain" the WorkerPool, -// i.e. pull out all the results generated by the workers and processing -// them. The DrainerFunc is called once per result produced. -// If the function returns false, the draining of the pool is aborted. -type DrainerFunc func(context.Context, interface{}) bool - -// FillerFunc represents a function for filling the WorkerPool with jobs. -// It is called once and is responsible for pushing jobs onto the supplied channel. -type FillerFunc func(context.Context, PushFunc) - -// WorkerPool implements a simple work pooling mechanism. It runs a predefined -// number of goroutines to process jobs. Jobs are inserted using the Fill call -// and results are retrieved through the Drain function. -type WorkerPool struct { - wg sync.WaitGroup - In chan interface{} - Out chan interface{} -} - -// NewWorkerPool creates a worker pool -func NewWorkerPool(bufsize int) *WorkerPool { - return &WorkerPool{ - In: make(chan interface{}, bufsize), - Out: make(chan interface{}, bufsize), - } -} - -func (w *WorkerPool) push(ctx context.Context, job interface{}) bool { - select { - case w.In <- job: - return true - case <-ctx.Done(): - return false - } -} - -func (w *WorkerPool) pushOut(ctx context.Context, result interface{}) bool { - select { - case w.Out <- result: - return true - case <-ctx.Done(): - return false - } -} - -// Run takes a WorkerFunc and runs it in 'n' goroutines. -func (w *WorkerPool) Run(ctx context.Context, f WorkerFunc, n int) bool { - w.wg.Add(1) - go func() { - defer w.wg.Done() - var localWg sync.WaitGroup - localWg.Add(n) - for i := 0; i < n; i++ { - go func() { - defer localWg.Done() - for { - select { - case job, ok := <-w.In: - if !ok { - return - } - w.pushOut(ctx, f(ctx, job)) - case <-ctx.Done(): - log.Printf("D! [input.vsphere]: Stop requested for worker pool. Exiting.") - return - } - } - }() - } - localWg.Wait() - close(w.Out) - }() - return ctx.Err() == nil -} - -// Fill runs a FillerFunc responsible for supplying work to the pool. You may only -// call Fill once. Calling it twice will panic. -func (w *WorkerPool) Fill(ctx context.Context, f FillerFunc) bool { - w.wg.Add(1) - go func() { - defer w.wg.Done() - f(ctx, w.push) - close(w.In) - }() - return true -} - -// Drain runs a DrainerFunc for each result generated by the workers. -func (w *WorkerPool) Drain(ctx context.Context, f DrainerFunc) bool { - w.wg.Add(1) - go func() { - defer w.wg.Done() - for result := range w.Out { - if !f(ctx, result) { - break - } - } - }() - w.wg.Wait() - return ctx.Err() != nil -} From bf4175b9cd7343f30a3e8abe998f1f2b9a70e2dc Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 28 Dec 2018 13:25:35 -0800 Subject: [PATCH 0466/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2f5d3b9fc..762aea447 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,6 +14,7 @@ - [#5161](https://github.com/influxdata/telegraf/pull/5161): Add support in wavefront output for the Wavefront Direction Ingestion API. - [#5168](https://github.com/influxdata/telegraf/pull/5168): Allow counting float values in valuecounter aggregator. - [#5177](https://github.com/influxdata/telegraf/pull/5177): Add log send and redo queue fields to sqlserver input. +- [#5113](https://github.com/influxdata/telegraf/pull/5113): Improve scalability of vsphere input. #### Bugfixes From df6fbdb1e8dfd5528215b817a79224a052fa84a0 Mon Sep 17 00:00:00 2001 From: svenwiltink Date: Wed, 2 Jan 2019 20:12:04 +0100 Subject: [PATCH 0467/1815] Fix unittests for new year (#5213) --- plugins/parsers/grok/parser_test.go | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/plugins/parsers/grok/parser_test.go b/plugins/parsers/grok/parser_test.go index e3426b0fc..22007971b 100644 --- a/plugins/parsers/grok/parser_test.go +++ b/plugins/parsers/grok/parser_test.go @@ -907,6 +907,7 @@ func TestNewlineInPatterns(t *testing.T) { } func TestSyslogTimestamp(t *testing.T) { + currentYear := time.Now().Year() tests := []struct { name string line string @@ -915,17 +916,17 @@ func TestSyslogTimestamp(t *testing.T) { { name: "two digit day of month", line: "Sep 25 09:01:55 value=42", - expected: time.Date(2018, time.September, 25, 9, 1, 55, 0, time.UTC), + expected: time.Date(currentYear, time.September, 25, 9, 1, 55, 0, time.UTC), }, { name: "one digit day of month single space", line: "Sep 2 09:01:55 value=42", - expected: time.Date(2018, time.September, 2, 9, 1, 55, 0, time.UTC), + expected: time.Date(currentYear, time.September, 2, 9, 1, 55, 0, time.UTC), }, { name: "one digit day of month double space", line: "Sep 2 09:01:55 value=42", - expected: time.Date(2018, time.September, 2, 9, 1, 55, 0, time.UTC), + expected: time.Date(currentYear, time.September, 2, 9, 1, 55, 0, time.UTC), }, } for _, tt := range tests { @@ -1023,5 +1024,5 @@ func TestEmptyYearInTimestamp(t *testing.T) { m, err := p.ParseLine("Nov 6 13:57:03 generic iTunes[6504]: objc[6504]: Object descriptor was null.") require.NoError(t, err) require.NotNil(t, m) - require.Equal(t, 2018, m.Time().Year()) + require.Equal(t, time.Now().Year(), m.Time().Year()) } From bf7a42643e7d33bc24f68810cb12acee685cec44 Mon Sep 17 00:00:00 2001 From: svenwiltink Date: Wed, 2 Jan 2019 22:06:54 +0100 Subject: [PATCH 0468/1815] Add read and write op per second fields (#5210) --- plugins/inputs/ceph/README.md | 8 ++++++-- plugins/inputs/ceph/ceph.go | 32 ++++++++++++++++++++------------ plugins/inputs/ceph/ceph_test.go | 32 +++++++++++++++++++++----------- 3 files changed, 47 insertions(+), 25 deletions(-) diff --git a/plugins/inputs/ceph/README.md b/plugins/inputs/ceph/README.md index 8d04c54b2..c53f908ab 100644 --- a/plugins/inputs/ceph/README.md +++ b/plugins/inputs/ceph/README.md @@ -108,7 +108,9 @@ All fields are collected under the **ceph** measurement and stored as float64s. * bytes\_used (float) * data\_bytes (float) * num\_pgs (float) - * op\_per\_sec (float) + * op\_per\_sec (float, ceph < 10) + * read_op\_per\_sec (float) + * write_op\_per\_sec (float) * read\_bytes\_sec (float) * version (float) * write\_bytes\_sec (float) @@ -132,7 +134,9 @@ All fields are collected under the **ceph** measurement and stored as float64s. * objects (float) * ceph\_pool\_stats - * op\_per\_sec (float) + * op\_per\_sec (float, ceph < 10) + * read_op\_per\_sec (float) + * write_op\_per\_sec (float) * read\_bytes\_sec (float) * write\_bytes\_sec (float) * recovering\_object\_per\_sec (float) diff --git a/plugins/inputs/ceph/ceph.go b/plugins/inputs/ceph/ceph.go index 95f50958c..b6a6c5c08 100644 --- a/plugins/inputs/ceph/ceph.go +++ b/plugins/inputs/ceph/ceph.go @@ -344,7 +344,9 @@ type CephStatus struct { BytesTotal float64 `json:"bytes_total"` ReadBytesSec float64 `json:"read_bytes_sec"` WriteBytesSec float64 `json:"write_bytes_sec"` - OpPerSec float64 `json:"op_per_sec"` + OpPerSec float64 `json:"op_per_sec"` // This field is no longer reported in ceph 10 and later + ReadOpPerSec float64 `json:"read_op_per_sec"` + WriteOpPerSec float64 `json:"write_op_per_sec"` } `json:"pgmap"` } @@ -388,15 +390,17 @@ func decodeStatusOsdmap(acc telegraf.Accumulator, data *CephStatus) error { // decodeStatusPgmap decodes the PG map portion of the output of 'ceph -s' func decodeStatusPgmap(acc telegraf.Accumulator, data *CephStatus) error { fields := map[string]interface{}{ - "version": data.PGMap.Version, - "num_pgs": data.PGMap.NumPGs, - "data_bytes": data.PGMap.DataBytes, - "bytes_used": data.PGMap.BytesUsed, - "bytes_avail": data.PGMap.BytesAvail, - "bytes_total": data.PGMap.BytesTotal, - "read_bytes_sec": data.PGMap.ReadBytesSec, - "write_bytes_sec": data.PGMap.WriteBytesSec, - "op_per_sec": data.PGMap.OpPerSec, + "version": data.PGMap.Version, + "num_pgs": data.PGMap.NumPGs, + "data_bytes": data.PGMap.DataBytes, + "bytes_used": data.PGMap.BytesUsed, + "bytes_avail": data.PGMap.BytesAvail, + "bytes_total": data.PGMap.BytesTotal, + "read_bytes_sec": data.PGMap.ReadBytesSec, + "write_bytes_sec": data.PGMap.WriteBytesSec, + "op_per_sec": data.PGMap.OpPerSec, // This field is no longer reported in ceph 10 and later + "read_op_per_sec": data.PGMap.ReadOpPerSec, + "write_op_per_sec": data.PGMap.WriteOpPerSec, } acc.AddFields("ceph_pgmap", fields, map[string]string{}) return nil @@ -470,7 +474,9 @@ type CephOSDPoolStats []struct { ClientIORate struct { ReadBytesSec float64 `json:"read_bytes_sec"` WriteBytesSec float64 `json:"write_bytes_sec"` - OpPerSec float64 `json:"op_per_sec"` + OpPerSec float64 `json:"op_per_sec"` // This field is no longer reported in ceph 10 and later + ReadOpPerSec float64 `json:"read_op_per_sec"` + WriteOpPerSec float64 `json:"write_op_per_sec"` } `json:"client_io_rate"` RecoveryRate struct { RecoveringObjectsPerSec float64 `json:"recovering_objects_per_sec"` @@ -494,7 +500,9 @@ func decodeOsdPoolStats(acc telegraf.Accumulator, input string) error { fields := map[string]interface{}{ "read_bytes_sec": pool.ClientIORate.ReadBytesSec, "write_bytes_sec": pool.ClientIORate.WriteBytesSec, - "op_per_sec": pool.ClientIORate.OpPerSec, + "op_per_sec": pool.ClientIORate.OpPerSec, // This field is no longer reported in ceph 10 and later + "read_op_per_sec": pool.ClientIORate.ReadOpPerSec, + "write_op_per_sec": pool.ClientIORate.WriteOpPerSec, "recovering_objects_per_sec": pool.RecoveryRate.RecoveringObjectsPerSec, "recovering_bytes_per_sec": pool.RecoveryRate.RecoveringBytesPerSec, "recovering_keys_per_sec": pool.RecoveryRate.RecoveringKeysPerSec, diff --git a/plugins/inputs/ceph/ceph_test.go b/plugins/inputs/ceph/ceph_test.go index a0365c8fb..ee2f96491 100644 --- a/plugins/inputs/ceph/ceph_test.go +++ b/plugins/inputs/ceph/ceph_test.go @@ -835,7 +835,9 @@ var clusterStatusDump = ` "bytes_total": 17335810048000, "read_bytes_sec": 0, "write_bytes_sec": 367217, - "op_per_sec": 98 + "op_per_sec": 98, + "read_op_per_sec": 322, + "write_op_per_sec": 1022 }, "mdsmap": { "epoch": 1, @@ -864,15 +866,17 @@ var cephStatusResults = []expectedResult{ { metric: "ceph_pgmap", fields: map[string]interface{}{ - "version": float64(52314277), - "num_pgs": float64(2560), - "data_bytes": float64(2700031960713), - "bytes_used": float64(7478347665408), - "bytes_avail": float64(9857462382592), - "bytes_total": float64(17335810048000), - "read_bytes_sec": float64(0), - "write_bytes_sec": float64(367217), - "op_per_sec": float64(98), + "version": float64(52314277), + "num_pgs": float64(2560), + "data_bytes": float64(2700031960713), + "bytes_used": float64(7478347665408), + "bytes_avail": float64(9857462382592), + "bytes_total": float64(17335810048000), + "read_bytes_sec": float64(0), + "write_bytes_sec": float64(367217), + "op_per_sec": float64(98), + "read_op_per_sec": float64(322), + "write_op_per_sec": float64(1022), }, tags: map[string]string{}, }, @@ -1014,7 +1018,9 @@ var cephODSPoolStatsDump = ` "recovering_keys_per_sec": 0}, "client_io_rate": { "read_bytes_sec": 10566067, "write_bytes_sec": 15165220376, - "op_per_sec": 9828}}]` + "op_per_sec": 9828, + "read_op_per_sec": 182, + "write_op_per_sec": 473}}]` var cephOSDPoolStatsResults = []expectedResult{ { @@ -1023,6 +1029,8 @@ var cephOSDPoolStatsResults = []expectedResult{ "read_bytes_sec": float64(0), "write_bytes_sec": float64(0), "op_per_sec": float64(0), + "read_op_per_sec": float64(0), + "write_op_per_sec": float64(0), "recovering_objects_per_sec": float64(0), "recovering_bytes_per_sec": float64(0), "recovering_keys_per_sec": float64(0), @@ -1037,6 +1045,8 @@ var cephOSDPoolStatsResults = []expectedResult{ "read_bytes_sec": float64(10566067), "write_bytes_sec": float64(15165220376), "op_per_sec": float64(9828), + "read_op_per_sec": float64(182), + "write_op_per_sec": float64(473), "recovering_objects_per_sec": float64(279), "recovering_bytes_per_sec": float64(176401059), "recovering_keys_per_sec": float64(0), From 79860be795ae6dfab6f2d6af31555c75f6bbfb82 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 2 Jan 2019 13:12:29 -0800 Subject: [PATCH 0469/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 762aea447..789b9383d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -15,6 +15,7 @@ - [#5168](https://github.com/influxdata/telegraf/pull/5168): Allow counting float values in valuecounter aggregator. - [#5177](https://github.com/influxdata/telegraf/pull/5177): Add log send and redo queue fields to sqlserver input. - [#5113](https://github.com/influxdata/telegraf/pull/5113): Improve scalability of vsphere input. +- [#5210](https://github.com/influxdata/telegraf/pull/5210): Add read and write op per second fields. #### Bugfixes From d28e6aebfa27fe34752bec4d70503a017e63e426 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 2 Jan 2019 13:13:34 -0800 Subject: [PATCH 0470/1815] Update changelog --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 789b9383d..d2e23de56 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -15,7 +15,7 @@ - [#5168](https://github.com/influxdata/telegraf/pull/5168): Allow counting float values in valuecounter aggregator. - [#5177](https://github.com/influxdata/telegraf/pull/5177): Add log send and redo queue fields to sqlserver input. - [#5113](https://github.com/influxdata/telegraf/pull/5113): Improve scalability of vsphere input. -- [#5210](https://github.com/influxdata/telegraf/pull/5210): Add read and write op per second fields. +- [#5210](https://github.com/influxdata/telegraf/pull/5210): Add read and write op per second fields to ceph input. #### Bugfixes From bc45629b709947b5ec0c56b19c717295b52aef39 Mon Sep 17 00:00:00 2001 From: Greg <2653109+glinton@users.noreply.github.com> Date: Wed, 2 Jan 2019 14:53:58 -0700 Subject: [PATCH 0471/1815] Allow non local udp connections in net_response (#5219) --- plugins/inputs/net_response/net_response.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/plugins/inputs/net_response/net_response.go b/plugins/inputs/net_response/net_response.go index 66511a319..55ee0e00d 100644 --- a/plugins/inputs/net_response/net_response.go +++ b/plugins/inputs/net_response/net_response.go @@ -141,9 +141,8 @@ func (n *NetResponse) UDPGather() (tags map[string]string, fields map[string]int start := time.Now() // Resolving udpAddr, err := net.ResolveUDPAddr("udp", n.Address) - LocalAddr, err := net.ResolveUDPAddr("udp", "127.0.0.1:0") // Connecting - conn, err := net.DialUDP("udp", LocalAddr, udpAddr) + conn, err := net.DialUDP("udp", nil, udpAddr) // Handle error if err != nil { setResult(ConnectionFailed, fields, tags, n.Expect) From a932cc24194febb2f2e7f24959a5e7a7f763c51e Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 2 Jan 2019 13:55:00 -0800 Subject: [PATCH 0472/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index d2e23de56..eb96f2c33 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -35,6 +35,7 @@ - [#5179](https://github.com/influxdata/telegraf/issues/5179): Add PDH_NO_DATA to known counter error codes in win_perf_counters. - [#5170](https://github.com/influxdata/telegraf/issues/5170): Fix amqp_consumer stops consuming on empty message. - [#4906](https://github.com/influxdata/telegraf/issues/4906): Fix multiple replace tables not working in strings processor. +- [#5219](https://github.com/influxdata/telegraf/issues/5219): Allow non local udp connections in net_response. ## v1.9.1 [2018-12-11] From 4e357322794bb837789ca970b28941d147fefcf9 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 2 Jan 2019 13:55:59 -0800 Subject: [PATCH 0473/1815] Fix toml option names in parser processor (#5218) --- plugins/parsers/registry.go | 53 ++++++++++++++++++------------------- 1 file changed, 26 insertions(+), 27 deletions(-) diff --git a/plugins/parsers/registry.go b/plugins/parsers/registry.go index 8ebb4a713..c6ef8ae1e 100644 --- a/plugins/parsers/registry.go +++ b/plugins/parsers/registry.go @@ -5,7 +5,6 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/plugins/parsers/collectd" "github.com/influxdata/telegraf/plugins/parsers/csv" "github.com/influxdata/telegraf/plugins/parsers/dropwizard" @@ -61,69 +60,69 @@ type Parser interface { // and can be used to instantiate _any_ of the parsers. type Config struct { // Dataformat can be one of: json, influx, graphite, value, nagios - DataFormat string + DataFormat string `toml:"data_format"` // Separator only applied to Graphite data. - Separator string + Separator string `toml:"separator"` // Templates only apply to Graphite data. - Templates []string + Templates []string `toml:"templates"` // TagKeys only apply to JSON data - TagKeys []string + TagKeys []string `toml:"tag_keys"` // FieldKeys only apply to JSON - JSONStringFields []string + JSONStringFields []string `toml:"json_string_fields"` - JSONNameKey string + JSONNameKey string `toml:"json_name_key"` // MetricName applies to JSON & value. This will be the name of the measurement. - MetricName string + MetricName string `toml:"metric_name"` // holds a gjson path for json parser - JSONQuery string + JSONQuery string `toml:"json_query"` // key of time - JSONTimeKey string + JSONTimeKey string `toml:"json_time_key"` // time format - JSONTimeFormat string + JSONTimeFormat string `toml:"json_time_format"` // Authentication file for collectd - CollectdAuthFile string + CollectdAuthFile string `toml:"collectd_auth_file"` // One of none (default), sign, or encrypt - CollectdSecurityLevel string + CollectdSecurityLevel string `toml:"collectd_security_level"` // Dataset specification for collectd - CollectdTypesDB []string + CollectdTypesDB []string `toml:"collectd_types_db"` // whether to split or join multivalue metrics - CollectdSplit string + CollectdSplit string `toml:"collectd_split"` // DataType only applies to value, this will be the type to parse value to - DataType string + DataType string `toml:"data_type"` // DefaultTags are the default tags that will be added to all parsed metrics. - DefaultTags map[string]string + DefaultTags map[string]string `toml:"default_tags"` // an optional json path containing the metric registry object // if left empty, the whole json object is parsed as a metric registry - DropwizardMetricRegistryPath string + DropwizardMetricRegistryPath string `toml:"dropwizard_metric_registry_path"` // an optional json path containing the default time of the metrics // if left empty, the processing time is used - DropwizardTimePath string + DropwizardTimePath string `toml:"dropwizard_time_path"` // time format to use for parsing the time field // defaults to time.RFC3339 - DropwizardTimeFormat string + DropwizardTimeFormat string `toml:"dropwizard_time_format"` // an optional json path pointing to a json object with tag key/value pairs // takes precedence over DropwizardTagPathsMap - DropwizardTagsPath string + DropwizardTagsPath string `toml:"dropwizard_tags_path"` // an optional map containing tag names as keys and json paths to retrieve the tag values from as values // used if TagsPath is empty or doesn't return any tags - DropwizardTagPathsMap map[string]string + DropwizardTagPathsMap map[string]string `toml:"dropwizard_tag_paths_map"` //grok patterns - GrokPatterns []string - GrokNamedPatterns []string - GrokCustomPatterns string - GrokCustomPatternFiles []string - GrokTimezone string + GrokPatterns []string `toml:"grok_patterns"` + GrokNamedPatterns []string `toml:"grok_named_patterns"` + GrokCustomPatterns string `toml:"grok_custom_patterns"` + GrokCustomPatternFiles []string `toml:"grok_custom_pattern_files"` + GrokTimezone string `toml:"grok_timezone"` //csv configuration CSVColumnNames []string `toml:"csv_column_names"` From 184f7b6a8badfe3f8905e12e2ad4fdc4c089f0b4 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 2 Jan 2019 13:56:56 -0800 Subject: [PATCH 0474/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index eb96f2c33..7d5fc36d3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -36,6 +36,7 @@ - [#5170](https://github.com/influxdata/telegraf/issues/5170): Fix amqp_consumer stops consuming on empty message. - [#4906](https://github.com/influxdata/telegraf/issues/4906): Fix multiple replace tables not working in strings processor. - [#5219](https://github.com/influxdata/telegraf/issues/5219): Allow non local udp connections in net_response. +- [#5218](https://github.com/influxdata/telegraf/issues/5218): Fix toml option names in parser processor. ## v1.9.1 [2018-12-11] From 3356f1dc82245ee8c2f90b41fc53fec16c9453ef Mon Sep 17 00:00:00 2001 From: Pontus Rydin Date: Thu, 3 Jan 2019 14:30:05 -0500 Subject: [PATCH 0475/1815] Fix discovery race condition in vsphere input (#5217) --- plugins/inputs/vsphere/endpoint.go | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/plugins/inputs/vsphere/endpoint.go b/plugins/inputs/vsphere/endpoint.go index 27aca331c..95040dd4f 100644 --- a/plugins/inputs/vsphere/endpoint.go +++ b/plugins/inputs/vsphere/endpoint.go @@ -39,7 +39,7 @@ const maxMetadataSamples = 100 // Number of resources to sample for metric metad type Endpoint struct { Parent *VSphere URL *url.URL - resourceKinds map[string]resourceKind + resourceKinds map[string]*resourceKind hwMarks *TSCache lun2ds map[string]string discoveryTicker *time.Ticker @@ -107,7 +107,7 @@ func NewEndpoint(ctx context.Context, parent *VSphere, url *url.URL) (*Endpoint, clientFactory: NewClientFactory(ctx, url, parent), } - e.resourceKinds = map[string]resourceKind{ + e.resourceKinds = map[string]*resourceKind{ "datacenter": { name: "datacenter", pKey: "dcname", @@ -363,6 +363,7 @@ func (e *Endpoint) discover(ctx context.Context) error { numRes := int64(0) // Populate resource objects, and endpoint instance info. + newObjects := make(map[string]objectMap) for k, res := range e.resourceKinds { log.Printf("D! [input.vsphere] Discovering resources for %s", res.name) // Need to do this for all resource types even if they are not enabled @@ -385,13 +386,12 @@ func (e *Endpoint) discover(ctx context.Context) error { // No need to collect metric metadata if resource type is not enabled if res.enabled { if res.simple { - e.simpleMetadataSelect(ctx, client, &res) + e.simpleMetadataSelect(ctx, client, res) } else { - e.complexMetadataSelect(ctx, &res, objects, metricNames) + e.complexMetadataSelect(ctx, res, objects, metricNames) } } - res.objects = objects - resourceKinds[k] = res + newObjects[k] = objects SendInternalCounterWithTags("discovered_objects", e.URL.Host, map[string]string{"type": res.name}, int64(len(objects))) numRes += int64(len(objects)) @@ -413,7 +413,9 @@ func (e *Endpoint) discover(ctx context.Context) error { e.collectMux.Lock() defer e.collectMux.Unlock() - e.resourceKinds = resourceKinds + for k, v := range newObjects { + e.resourceKinds[k].objects = v + } e.lun2ds = l2d sw.Stop() @@ -793,9 +795,9 @@ func (e *Endpoint) collectResource(ctx context.Context, resourceType string, acc latestSample := time.Time{} // Divide workload into chunks and process them concurrently - e.chunkify(ctx, &res, now, latest, acc, + e.chunkify(ctx, res, now, latest, acc, func(chunk []types.PerfQuerySpec) { - n, localLatest, err := e.collectChunk(ctx, chunk, &res, acc, now, estInterval) + n, localLatest, err := e.collectChunk(ctx, chunk, res, acc, now, estInterval) log.Printf("D! [inputs.vsphere] CollectChunk for %s returned %d metrics", resourceType, n) if err != nil { acc.AddError(errors.New("While collecting " + res.name + ": " + err.Error())) From bae742ecb008e3928c4e79f647536803e026c139 Mon Sep 17 00:00:00 2001 From: Pontus Rydin Date: Thu, 3 Jan 2019 14:30:55 -0500 Subject: [PATCH 0476/1815] Update wavefront-sdk-go to 0.9.1 (#5223) --- Gopkg.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Gopkg.toml b/Gopkg.toml index b875ec208..4f0deb08d 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -248,7 +248,7 @@ [[constraint]] name = "github.com/wavefronthq/wavefront-sdk-go" - version = "v0.9.0" + version = "v0.9.1" [[constraint]] name = "github.com/karrick/godirwalk" From 3f158429bccc1948bab746a19692358e9323dd42 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Kamil=20Szczygie=C5=82?= Date: Thu, 3 Jan 2019 20:33:04 +0100 Subject: [PATCH 0477/1815] Add configurable timeout to varnish input (#5214) --- plugins/inputs/activemq/activemq.go | 4 ++-- plugins/inputs/varnish/varnish.go | 15 +++++++++++---- plugins/inputs/varnish/varnish_test.go | 14 ++++++++------ 3 files changed, 21 insertions(+), 12 deletions(-) diff --git a/plugins/inputs/activemq/activemq.go b/plugins/inputs/activemq/activemq.go index 5b59730d2..9cc9037ed 100644 --- a/plugins/inputs/activemq/activemq.go +++ b/plugins/inputs/activemq/activemq.go @@ -91,7 +91,7 @@ var sampleConfig = ` ## Required ActiveMQ port # port = 8161 - + ## Credentials for basic HTTP authentication # username = "admin" # password = "admin" @@ -101,7 +101,7 @@ var sampleConfig = ` ## Maximum time to receive response. # response_timeout = "5s" - + ## Optional TLS Config # tls_ca = "/etc/telegraf/ca.pem" # tls_cert = "/etc/telegraf/cert.pem" diff --git a/plugins/inputs/varnish/varnish.go b/plugins/inputs/varnish/varnish.go index f30bead3b..3a18deb6c 100644 --- a/plugins/inputs/varnish/varnish.go +++ b/plugins/inputs/varnish/varnish.go @@ -17,7 +17,7 @@ import ( "github.com/influxdata/telegraf/plugins/inputs" ) -type runner func(cmdName string, UseSudo bool, InstanceName string) (*bytes.Buffer, error) +type runner func(cmdName string, UseSudo bool, InstanceName string, Timeout internal.Duration) (*bytes.Buffer, error) // Varnish is used to store configuration values type Varnish struct { @@ -25,6 +25,7 @@ type Varnish struct { Binary string UseSudo bool InstanceName string + Timeout internal.Duration filter filter.Filter run runner @@ -32,6 +33,7 @@ type Varnish struct { var defaultStats = []string{"MAIN.cache_hit", "MAIN.cache_miss", "MAIN.uptime"} var defaultBinary = "/usr/bin/varnishstat" +var defaultTimeout = internal.Duration{Duration: time.Second} var sampleConfig = ` ## If running as a restricted user you can prepend sudo for additional access: @@ -49,6 +51,9 @@ var sampleConfig = ` ## Optional name for the varnish instance (or working directory) to query ## Usually appened after -n in varnish cli # instance_name = instanceName + + ## Timeout for varnishstat command + # timeout = "1s" ` func (s *Varnish) Description() string { @@ -61,7 +66,7 @@ func (s *Varnish) SampleConfig() string { } // Shell out to varnish_stat and return the output -func varnishRunner(cmdName string, UseSudo bool, InstanceName string) (*bytes.Buffer, error) { +func varnishRunner(cmdName string, UseSudo bool, InstanceName string, Timeout internal.Duration) (*bytes.Buffer, error) { cmdArgs := []string{"-1"} if InstanceName != "" { @@ -78,7 +83,8 @@ func varnishRunner(cmdName string, UseSudo bool, InstanceName string) (*bytes.Bu var out bytes.Buffer cmd.Stdout = &out - err := internal.RunTimeout(cmd, time.Millisecond*500) + + err := internal.RunTimeout(cmd, Timeout.Duration) if err != nil { return &out, fmt.Errorf("error running varnishstat: %s", err) } @@ -109,7 +115,7 @@ func (s *Varnish) Gather(acc telegraf.Accumulator) error { } } - out, err := s.run(s.Binary, s.UseSudo, s.InstanceName) + out, err := s.run(s.Binary, s.UseSudo, s.InstanceName, s.Timeout) if err != nil { return fmt.Errorf("error gathering metrics: %s", err) } @@ -170,6 +176,7 @@ func init() { Binary: defaultBinary, UseSudo: false, InstanceName: "", + Timeout: defaultTimeout, } }) } diff --git a/plugins/inputs/varnish/varnish_test.go b/plugins/inputs/varnish/varnish_test.go index 465e9e8dd..e8ca94e3c 100644 --- a/plugins/inputs/varnish/varnish_test.go +++ b/plugins/inputs/varnish/varnish_test.go @@ -7,13 +7,15 @@ import ( "fmt" "strings" "testing" + "time" + "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" ) -func fakeVarnishStat(output string, useSudo bool, InstanceName string) func(string, bool, string) (*bytes.Buffer, error) { - return func(string, bool, string) (*bytes.Buffer, error) { +func fakeVarnishStat(output string, useSudo bool, InstanceName string, Timeout internal.Duration) func(string, bool, string, internal.Duration) (*bytes.Buffer, error) { + return func(string, bool, string, internal.Duration) (*bytes.Buffer, error) { return bytes.NewBuffer([]byte(output)), nil } } @@ -21,7 +23,7 @@ func fakeVarnishStat(output string, useSudo bool, InstanceName string) func(stri func TestGather(t *testing.T) { acc := &testutil.Accumulator{} v := &Varnish{ - run: fakeVarnishStat(smOutput, false, ""), + run: fakeVarnishStat(smOutput, false, "", internal.Duration{Duration: time.Second}), Stats: []string{"*"}, } v.Gather(acc) @@ -37,7 +39,7 @@ func TestGather(t *testing.T) { func TestParseFullOutput(t *testing.T) { acc := &testutil.Accumulator{} v := &Varnish{ - run: fakeVarnishStat(fullOutput, true, ""), + run: fakeVarnishStat(fullOutput, true, "", internal.Duration{Duration: time.Second}), Stats: []string{"*"}, } err := v.Gather(acc) @@ -52,7 +54,7 @@ func TestParseFullOutput(t *testing.T) { func TestFilterSomeStats(t *testing.T) { acc := &testutil.Accumulator{} v := &Varnish{ - run: fakeVarnishStat(fullOutput, false, ""), + run: fakeVarnishStat(fullOutput, false, "", internal.Duration{Duration: time.Second}), Stats: []string{"MGT.*", "VBE.*"}, } err := v.Gather(acc) @@ -75,7 +77,7 @@ func TestFieldConfig(t *testing.T) { for fieldCfg, expected := range expect { acc := &testutil.Accumulator{} v := &Varnish{ - run: fakeVarnishStat(fullOutput, true, ""), + run: fakeVarnishStat(fullOutput, true, "", internal.Duration{Duration: time.Second}), Stats: strings.Split(fieldCfg, ","), } err := v.Gather(acc) From bd54e4a002ba2423e7dca1ffa1411bc3e80f0a74 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 3 Jan 2019 11:35:13 -0800 Subject: [PATCH 0478/1815] Update readme for varnish input --- plugins/inputs/varnish/README.md | 29 ++++++++++++++++------------- 1 file changed, 16 insertions(+), 13 deletions(-) diff --git a/plugins/inputs/varnish/README.md b/plugins/inputs/varnish/README.md index 8949fe6db..380d1c75b 100644 --- a/plugins/inputs/varnish/README.md +++ b/plugins/inputs/varnish/README.md @@ -5,22 +5,25 @@ This plugin gathers stats from [Varnish HTTP Cache](https://varnish-cache.org/) ### Configuration: ```toml - # A plugin to collect stats from Varnish HTTP Cache - [[inputs.varnish]] - ## If running as a restricted user you can prepend sudo for additional access: - #use_sudo = false +[[inputs.varnish]] + ## If running as a restricted user you can prepend sudo for additional access: + #use_sudo = false - ## The default location of the varnishstat binary can be overridden with: - binary = "/usr/bin/varnishstat" + ## The default location of the varnishstat binary can be overridden with: + binary = "/usr/bin/varnishstat" - ## By default, telegraf gathers stats for 3 metric points. - ## Setting stats will override the defaults shown below. - ## stats may also be set to ["all"], which will collect all stats - stats = ["MAIN.cache_hit", "MAIN.cache_miss", "MAIN.uptime"] + ## By default, telegraf gather stats for 3 metric points. + ## Setting stats will override the defaults shown below. + ## Glob matching can be used, ie, stats = ["MAIN.*"] + ## stats may also be set to ["*"], which will collect all stats + stats = ["MAIN.cache_hit", "MAIN.cache_miss", "MAIN.uptime"] - ## Optional name for the varnish instance (or working directory) to query - ## Usually appened after -n in varnish cli - # instance_name = instanceName + ## Optional name for the varnish instance (or working directory) to query + ## Usually appened after -n in varnish cli + # instance_name = instanceName + + ## Timeout for varnishstat command + # timeout = "1s" ``` ### Measurements & Fields: From 10b2260daf0b6cac2ca2d3444ac45c50cb5f579d Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 3 Jan 2019 11:36:45 -0800 Subject: [PATCH 0479/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7d5fc36d3..be4a7e233 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,6 +16,7 @@ - [#5177](https://github.com/influxdata/telegraf/pull/5177): Add log send and redo queue fields to sqlserver input. - [#5113](https://github.com/influxdata/telegraf/pull/5113): Improve scalability of vsphere input. - [#5210](https://github.com/influxdata/telegraf/pull/5210): Add read and write op per second fields to ceph input. +- [#5214](https://github.com/influxdata/telegraf/pull/5214): Add configurable timeout to varnish input. #### Bugfixes From 3c4e737f6bc4d5febd53a5be15da1695e1d15b47 Mon Sep 17 00:00:00 2001 From: Greg <2653109+glinton@users.noreply.github.com> Date: Thu, 3 Jan 2019 13:06:56 -0700 Subject: [PATCH 0480/1815] Add example to topk readme. Fix defaults to skip loop (#5220) --- plugins/processors/topk/README.md | 33 +++++++++++++++++++++++++++++++ plugins/processors/topk/topk.go | 13 ++++++------ 2 files changed, 39 insertions(+), 7 deletions(-) diff --git a/plugins/processors/topk/README.md b/plugins/processors/topk/README.md index 9c9e48af9..15046991d 100644 --- a/plugins/processors/topk/README.md +++ b/plugins/processors/topk/README.md @@ -72,3 +72,36 @@ This processor does not add tags by default. But the setting `add_groupby_tag` w ### Fields: This processor does not add fields by default. But the settings `add_rank_fields` and `add_aggregation_fields` will add one or several fields if set to anything other than "" + + +### Example +**Config** +```toml +[[processors.topk]] + period = 20 + k = 3 + group_by = ["pid"] + fields = ["cpu_usage"] +``` + +**Output difference with topk** +```diff +< procstat,pid=2088,process_name=Xorg cpu_usage=7.296576662282613 1546473820000000000 +< procstat,pid=2780,process_name=ibus-engine-simple cpu_usage=0 1546473820000000000 +< procstat,pid=2554,process_name=gsd-sound cpu_usage=0 1546473820000000000 +< procstat,pid=3484,process_name=chrome cpu_usage=4.274300361942799 1546473820000000000 +< procstat,pid=2467,process_name=gnome-shell-calendar-server cpu_usage=0 1546473820000000000 +< procstat,pid=2525,process_name=gvfs-goa-volume-monitor cpu_usage=0 1546473820000000000 +< procstat,pid=2888,process_name=gnome-terminal-server cpu_usage=1.0224991500287577 1546473820000000000 +< procstat,pid=2454,process_name=ibus-x11 cpu_usage=0 1546473820000000000 +< procstat,pid=2564,process_name=gsd-xsettings cpu_usage=0 1546473820000000000 +< procstat,pid=12184,process_name=docker cpu_usage=0 1546473820000000000 +< procstat,pid=2432,process_name=pulseaudio cpu_usage=9.892858669796528 1546473820000000000 +--- +> procstat,pid=2432,process_name=pulseaudio cpu_usage=11.486933087507786 1546474120000000000 +> procstat,pid=2432,process_name=pulseaudio cpu_usage=10.056503212060552 1546474130000000000 +> procstat,pid=23620,process_name=chrome cpu_usage=2.098690278123081 1546474120000000000 +> procstat,pid=23620,process_name=chrome cpu_usage=17.52514619948493 1546474130000000000 +> procstat,pid=2088,process_name=Xorg cpu_usage=1.6016732172309973 1546474120000000000 +> procstat,pid=2088,process_name=Xorg cpu_usage=8.481040931533833 1546474130000000000 +``` diff --git a/plugins/processors/topk/topk.go b/plugins/processors/topk/topk.go index df5d542e3..c2244c6e3 100644 --- a/plugins/processors/topk/topk.go +++ b/plugins/processors/topk/topk.go @@ -43,8 +43,8 @@ func New() *TopK { topk.Aggregation = "mean" topk.GroupBy = []string{"*"} topk.AddGroupByTag = "" - topk.AddRankFields = []string{""} - topk.AddAggregateFields = []string{""} + topk.AddRankFields = []string{} + topk.AddAggregateFields = []string{} // Initialize cache topk.Reset() @@ -203,7 +203,9 @@ func (t *TopK) Apply(in ...telegraf.Metric) []telegraf.Metric { if t.aggFieldSet == nil { t.aggFieldSet = make(map[string]bool) for _, f := range t.AddAggregateFields { - t.aggFieldSet[f] = true + if f != "" { + t.aggFieldSet[f] = true + } } } @@ -279,7 +281,6 @@ func (t *TopK) push() []telegraf.Metric { // Get the top K metrics for each field and add them to the return value addedKeys := make(map[string]bool) - groupTag := t.AddGroupByTag for _, field := range t.Fields { // Sort the aggregations @@ -288,9 +289,8 @@ func (t *TopK) push() []telegraf.Metric { // Create a one dimensional list with the top K metrics of each key for i, ag := range aggregations[0:min(t.K, len(aggregations))] { // Check whether of not we need to add fields of tags to the selected metrics - if len(t.aggFieldSet) != 0 || len(t.rankFieldSet) != 0 || groupTag != "" { + if len(t.aggFieldSet) != 0 || len(t.rankFieldSet) != 0 || t.AddGroupByTag != "" { for _, m := range t.cache[ag.groupbykey] { - // Add the aggregation final value if requested _, addAggField := t.aggFieldSet[field] if addAggField && m.HasField(field) { @@ -330,7 +330,6 @@ func (t *TopK) push() []telegraf.Metric { // Function that generates the aggregation functions func (t *TopK) getAggregationFunction(aggOperation string) (func([]telegraf.Metric, []string) map[string]float64, error) { - // This is a function aggregates a set of metrics using a given aggregation function var aggregator = func(ms []telegraf.Metric, fields []string, f func(map[string]float64, float64, string)) map[string]float64 { agg := make(map[string]float64) From 334f9267b6ea2ccf824ed286a8f3ea891163879f Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 3 Jan 2019 12:41:16 -0800 Subject: [PATCH 0481/1815] Use Go 1.11.4 and 1.10.7 (#5221) --- .circleci/config.yml | 4 ++-- Makefile | 8 ++++---- scripts/ci-1.10.docker | 2 +- scripts/ci-1.11.docker | 2 +- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index ae8771583..af248dfa1 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -7,10 +7,10 @@ defaults: - image: 'quay.io/influxdb/telegraf-ci:1.9.7' go-1_10: &go-1_10 docker: - - image: 'quay.io/influxdb/telegraf-ci:1.10.4' + - image: 'quay.io/influxdb/telegraf-ci:1.10.7' go-1_11: &go-1_11 docker: - - image: 'quay.io/influxdb/telegraf-ci:1.11.1' + - image: 'quay.io/influxdb/telegraf-ci:1.11.4' version: 2 jobs: diff --git a/Makefile b/Makefile index 9c7fe1cae..8c256b713 100644 --- a/Makefile +++ b/Makefile @@ -133,13 +133,13 @@ plugin-%: .PHONY: ci-1.11 ci-1.11: - docker build -t quay.io/influxdb/telegraf-ci:1.11.1 - < scripts/ci-1.11.docker - docker push quay.io/influxdb/telegraf-ci:1.11.1 + docker build -t quay.io/influxdb/telegraf-ci:1.11.4 - < scripts/ci-1.11.docker + docker push quay.io/influxdb/telegraf-ci:1.11.4 .PHONY: ci-1.10 ci-1.10: - docker build -t quay.io/influxdb/telegraf-ci:1.10.4 - < scripts/ci-1.10.docker - docker push quay.io/influxdb/telegraf-ci:1.10.4 + docker build -t quay.io/influxdb/telegraf-ci:1.10.7 - < scripts/ci-1.10.docker + docker push quay.io/influxdb/telegraf-ci:1.10.7 .PHONY: ci-1.9 ci-1.9: diff --git a/scripts/ci-1.10.docker b/scripts/ci-1.10.docker index b37e908ce..c70b33038 100644 --- a/scripts/ci-1.10.docker +++ b/scripts/ci-1.10.docker @@ -1,4 +1,4 @@ -FROM golang:1.10.4 +FROM golang:1.10.7 RUN chmod -R 755 "$GOPATH" diff --git a/scripts/ci-1.11.docker b/scripts/ci-1.11.docker index 31ff34842..278d5d857 100644 --- a/scripts/ci-1.11.docker +++ b/scripts/ci-1.11.docker @@ -1,4 +1,4 @@ -FROM golang:1.11.1 +FROM golang:1.11.4 RUN chmod -R 755 "$GOPATH" From 723d8f0104ce565108f72debe88a51f9237b5bf0 Mon Sep 17 00:00:00 2001 From: Greg <2653109+glinton@users.noreply.github.com> Date: Thu, 3 Jan 2019 16:57:39 -0700 Subject: [PATCH 0482/1815] Fix panic in docker input with bad endpoint (#5226) --- plugins/inputs/docker/docker.go | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/plugins/inputs/docker/docker.go b/plugins/inputs/docker/docker.go index f5633f099..6d9d56372 100644 --- a/plugins/inputs/docker/docker.go +++ b/plugins/inputs/docker/docker.go @@ -129,18 +129,7 @@ func (d *Docker) SampleConfig() string { return sampleConfig } func (d *Docker) Gather(acc telegraf.Accumulator) error { if d.client == nil { - var c Client - var err error - if d.Endpoint == "ENV" { - c, err = d.newEnvClient() - } else { - tlsConfig, err := d.ClientConfig.TLSConfig() - if err != nil { - return err - } - - c, err = d.newClient(d.Endpoint, tlsConfig) - } + c, err := d.getNewClient() if err != nil { return err } @@ -219,7 +208,6 @@ func (d *Docker) Gather(acc telegraf.Accumulator) error { } func (d *Docker) gatherSwarmInfo(acc telegraf.Accumulator) error { - ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration) defer cancel() services, err := d.client.ServiceList(ctx, types.ServiceListOptions{}) @@ -228,7 +216,6 @@ func (d *Docker) gatherSwarmInfo(acc telegraf.Accumulator) error { } if len(services) > 0 { - tasks, err := d.client.TaskList(ctx, types.TaskListOptions{}) if err != nil { return err @@ -834,6 +821,19 @@ func (d *Docker) createContainerStateFilters() error { return nil } +func (d *Docker) getNewClient() (Client, error) { + if d.Endpoint == "ENV" { + return d.newEnvClient() + } + + tlsConfig, err := d.ClientConfig.TLSConfig() + if err != nil { + return nil, err + } + + return d.newClient(d.Endpoint, tlsConfig) +} + func init() { inputs.Add("docker", func() telegraf.Input { return &Docker{ From 206b8c9bc52570d95a47592228de874467acae2a Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 3 Jan 2019 15:58:35 -0800 Subject: [PATCH 0483/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index be4a7e233..15e1836bb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -38,6 +38,7 @@ - [#4906](https://github.com/influxdata/telegraf/issues/4906): Fix multiple replace tables not working in strings processor. - [#5219](https://github.com/influxdata/telegraf/issues/5219): Allow non local udp connections in net_response. - [#5218](https://github.com/influxdata/telegraf/issues/5218): Fix toml option names in parser processor. +- [#5225](https://github.com/influxdata/telegraf/issues/5225): Fix panic in docker input with bad endpoint. ## v1.9.1 [2018-12-11] From 3a7a40a0a4593cdd7cc082dc345be4b6679313b7 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 3 Jan 2019 16:09:07 -0800 Subject: [PATCH 0484/1815] Update Gopkg.lock --- Gopkg.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Gopkg.lock b/Gopkg.lock index 76a36e7cb..521740e05 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -1094,15 +1094,15 @@ version = "v0.19.0" [[projects]] - digest = "1:c1855527c165f0224708fbc7d76843b4b20bcb74b328f212f8d0e9c855d4c49d" + digest = "1:c3bdfb7e9b2a66bafbd47517a1a4e489706f75af37ad5bfb57621bf41c16b556" name = "github.com/wavefronthq/wavefront-sdk-go" packages = [ "internal", "senders", ] pruneopts = "" - revision = "12511c8b82654d412b0334768d94dc080b617fd1" - version = "v0.9.0" + revision = "7821ac6d8ae05fe70c6d090ebda380c64f1416e4" + version = "v0.9.1" [[projects]] branch = "master" From f42d9378ba70e04579f30d297c31850a5e8e5821 Mon Sep 17 00:00:00 2001 From: emily Date: Thu, 3 Jan 2019 16:20:07 -0800 Subject: [PATCH 0485/1815] Add cloud_pubsub input plugin (#5136) --- Gopkg.lock | 21 ++ internal/internal.go | 7 + plugins/inputs/all/all.go | 1 + plugins/inputs/cloud_pubsub/README.md | 90 +++++ plugins/inputs/cloud_pubsub/pubsub.go | 307 ++++++++++++++++++ plugins/inputs/cloud_pubsub/pubsub_test.go | 149 +++++++++ .../inputs/cloud_pubsub/subscription_gcp.go | 68 ++++ .../inputs/cloud_pubsub/subscription_stub.go | 104 ++++++ 8 files changed, 747 insertions(+) create mode 100644 plugins/inputs/cloud_pubsub/README.md create mode 100644 plugins/inputs/cloud_pubsub/pubsub.go create mode 100644 plugins/inputs/cloud_pubsub/pubsub_test.go create mode 100644 plugins/inputs/cloud_pubsub/subscription_gcp.go create mode 100644 plugins/inputs/cloud_pubsub/subscription_stub.go diff --git a/Gopkg.lock b/Gopkg.lock index 521740e05..ba4564a3d 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -7,8 +7,13 @@ packages = [ "civil", "compute/metadata", + "iam", + "internal/optional", "internal/version", "monitoring/apiv3", + "pubsub", + "pubsub/apiv1", + "pubsub/internal/distribution", ] pruneopts = "" revision = "c728a003b238b26cef9ab6753a5dc424b331c3ad" @@ -1217,6 +1222,17 @@ pruneopts = "" revision = "d2e6202438beef2727060aa7cabdd924d92ebfd9" +[[projects]] + branch = "master" + digest = "1:88ecca26e54f601a8733c9a31d9f0883b915216a177673f0467f6b864fd0d90f" + name = "golang.org/x/sync" + packages = [ + "errgroup", + "semaphore", + ] + pruneopts = "" + revision = "42b317875d0fa942474b76e1b46a6060d720ae6e" + [[projects]] branch = "master" digest = "1:6a6eed3727d0e15703d9e930d8dbe333bea09eda309d75a015d3c6dc4e5c92a6" @@ -1277,6 +1293,7 @@ "internal", "iterator", "option", + "support/bundler", "transport", "transport/grpc", "transport/http", @@ -1316,7 +1333,9 @@ "googleapis/api/label", "googleapis/api/metric", "googleapis/api/monitoredres", + "googleapis/iam/v1", "googleapis/monitoring/v3", + "googleapis/pubsub/v1", "googleapis/rpc/status", "protobuf/field_mask", ] @@ -1459,6 +1478,7 @@ analyzer-version = 1 input-imports = [ "cloud.google.com/go/monitoring/apiv3", + "cloud.google.com/go/pubsub", "collectd.org/api", "collectd.org/network", "github.com/Azure/go-autorest/autorest", @@ -1562,6 +1582,7 @@ "golang.org/x/net/html/charset", "golang.org/x/oauth2", "golang.org/x/oauth2/clientcredentials", + "golang.org/x/oauth2/google", "golang.org/x/sys/unix", "golang.org/x/sys/windows", "golang.org/x/sys/windows/svc", diff --git a/internal/internal.go b/internal/internal.go index 8acf63e96..a0a3ec0ec 100644 --- a/internal/internal.go +++ b/internal/internal.go @@ -18,7 +18,9 @@ import ( "time" "unicode" + "fmt" "github.com/alecthomas/units" + "runtime" ) const alphanum string = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" @@ -58,6 +60,11 @@ func Version() string { return version } +// ProductToken returns a tag for Telegraf that can be used in user agents. +func ProductToken() string { + return fmt.Sprintf("Telegraf/%s Go/%s", Version(), runtime.Version()) +} + // UnmarshalTOML parses the duration from the TOML config file func (d *Duration) UnmarshalTOML(b []byte) error { var err error diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index cfdc12ad2..9c183fcbb 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -14,6 +14,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/ceph" _ "github.com/influxdata/telegraf/plugins/inputs/cgroup" _ "github.com/influxdata/telegraf/plugins/inputs/chrony" + _ "github.com/influxdata/telegraf/plugins/inputs/cloud_pubsub" _ "github.com/influxdata/telegraf/plugins/inputs/cloudwatch" _ "github.com/influxdata/telegraf/plugins/inputs/conntrack" _ "github.com/influxdata/telegraf/plugins/inputs/consul" diff --git a/plugins/inputs/cloud_pubsub/README.md b/plugins/inputs/cloud_pubsub/README.md new file mode 100644 index 000000000..159c793f2 --- /dev/null +++ b/plugins/inputs/cloud_pubsub/README.md @@ -0,0 +1,90 @@ +# Google Cloud PubSub Input Plugin + +The GCP PubSub plugin ingests metrics from [Google Cloud PubSub][pubsub] +and creates metrics using one of the supported [input data formats][]. + + +### Configuration + +This section contains the default TOML to configure the plugin. You can +generate it using `telegraf --usage pubsub`. + +```toml +[[inputs.pubsub]] +## Required. Name of Google Cloud Platform (GCP) Project that owns + ## the given PubSub subscription. + project = "my-project" + + ## Required. Name of PubSub subscription to ingest metrics from. + subscription = "my-subscription" + + ## Required. Data format to consume. + ## Each data format has its own unique set of configuration options. + ## Read more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "influx" + + ## Optional. Filepath for GCP credentials JSON file to authorize calls to + ## PubSub APIs. If not set explicitly, Telegraf will attempt to use + ## Application Default Credentials, which is preferred. + # credentials_file = "path/to/my/creds.json" + + ## Optional. Maximum byte length of a message to consume. + ## Larger messages are dropped with an error. If less than 0 or unspecified, + ## treated as no limit. + # max_message_len = 1000000 + + ## Optional. Maximum messages to read from PubSub that have not been written + ## to an output. Defaults to %d. + ## For best throughput set based on the number of metrics within + ## each message and the size of the output's metric_batch_size. + ## + ## For example, if each message contains 10 metrics and the output + ## metric_batch_size is 1000, setting this to 100 will ensure that a + ## full batch is collected and the write is triggered immediately without + ## waiting until the next flush_interval. + # max_undelivered_messages = 1000 + + ## The following are optional Subscription ReceiveSettings in PubSub. + ## Read more about these values: + ## https://godoc.org/cloud.google.com/go/pubsub#ReceiveSettings + + ## Optional. Maximum number of seconds for which a PubSub subscription + ## should auto-extend the PubSub ACK deadline for each message. If less than + ## 0, auto-extension is disabled. + # max_extension = 0 + + ## Optional. Maximum number of unprocessed messages in PubSub + ## (unacknowledged but not yet expired in PubSub). + ## A value of 0 is treated as the default PubSub value. + ## Negative values will be treated as unlimited. + # max_outstanding_messages = 0 + + ## Optional. Maximum size in bytes of unprocessed messages in PubSub + ## (unacknowledged but not yet expired in PubSub). + ## A value of 0 is treated as the default PubSub value. + ## Negative values will be treated as unlimited. + # max_outstanding_bytes = 0 + + ## Optional. Max number of goroutines a PubSub Subscription receiver can spawn + ## to pull messages from PubSub concurrently. This limit applies to each + ## subscription separately and is treated as the PubSub default if less than + ## 1. Note this setting does not limit the number of messages that can be + ## processed concurrently (use "max_outstanding_messages" instead). + # max_receiver_go_routines = 0 +``` + +### Multiple Subscriptions and Topics + +This plugin assumes you have already created a PULL subscription for a given +PubSub topic. To learn how to do so, see [how to create a subscription][pubsub create sub]. + +Each plugin agent can listen to one subscription at a time, so you will +need to run multiple instances of the plugin to pull messages from multiple +subscriptions/topics. + + + +[pubsub]: https://cloud.google.com/pubsub +[pubsub create sub]: https://cloud.google.com/pubsub/docs/admin#create_a_pull_subscription +[input data formats]: /docs/DATA_FORMATS_INPUT.md diff --git a/plugins/inputs/cloud_pubsub/pubsub.go b/plugins/inputs/cloud_pubsub/pubsub.go new file mode 100644 index 000000000..bb22a8dcb --- /dev/null +++ b/plugins/inputs/cloud_pubsub/pubsub.go @@ -0,0 +1,307 @@ +package cloud_pubsub + +import ( + "cloud.google.com/go/pubsub" + "context" + "fmt" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/parsers" + "golang.org/x/oauth2/google" + "google.golang.org/api/option" + "sync" +) + +type empty struct{} +type semaphore chan empty + +const defaultMaxUndeliveredMessages = 1000 + +type PubSub struct { + CredentialsFile string `toml:"credentials_file"` + Project string `toml:"project"` + Subscription string `toml:"subscription"` + + // Subscription ReceiveSettings + MaxExtension internal.Duration `toml:"max_extension"` + MaxOutstandingMessages int `toml:"max_outstanding_messages"` + MaxOutstandingBytes int `toml:"max_outstanding_bytes"` + MaxReceiverGoRoutines int `toml:"max_receiver_go_routines"` + + // Agent settings + MaxMessageLen int `toml:"max_message_len"` + MaxUndeliveredMessages int `toml:"max_undelivered_messages"` + + sub subscription + stubSub func() subscription + + cancel context.CancelFunc + + parser parsers.Parser + wg *sync.WaitGroup + acc telegraf.TrackingAccumulator + mu sync.Mutex + + undelivered map[telegraf.TrackingID]message + sem semaphore +} + +func (ps *PubSub) Description() string { + return "Read metrics from Google PubSub" +} + +func (ps *PubSub) SampleConfig() string { + return fmt.Sprintf(sampleConfig, defaultMaxUndeliveredMessages) +} + +// Gather does nothing for this service input. +func (ps *PubSub) Gather(acc telegraf.Accumulator) error { + return nil +} + +// SetParser implements ParserInput interface. +func (ps *PubSub) SetParser(parser parsers.Parser) { + ps.parser = parser +} + +// Start initializes the plugin and processing messages from Google PubSub. +// Two goroutines are started - one pulling for the subscription, one +// receiving delivery notifications from the accumulator. +func (ps *PubSub) Start(ac telegraf.Accumulator) error { + if ps.Subscription == "" { + return fmt.Errorf(`"subscription" is required`) + } + + if ps.Project == "" { + return fmt.Errorf(`"project" is required`) + } + + cctx, cancel := context.WithCancel(context.Background()) + ps.cancel = cancel + + if ps.stubSub != nil { + ps.sub = ps.stubSub() + } else { + subRef, err := ps.getGCPSubscription(cctx, ps.Subscription) + if err != nil { + return err + } + ps.sub = subRef + } + + ps.wg = &sync.WaitGroup{} + ps.acc = ac.WithTracking(ps.MaxUndeliveredMessages) + ps.sem = make(semaphore, ps.MaxUndeliveredMessages) + + // Start receiver in new goroutine for each subscription. + ps.wg.Add(1) + go func() { + defer ps.wg.Done() + ps.subReceive(cctx) + }() + + // Start goroutine to handle delivery notifications from accumulator. + ps.wg.Add(1) + go func() { + defer ps.wg.Done() + ps.receiveDelivered(cctx) + }() + + return nil +} + +// Stop ensures the PubSub subscriptions receivers are stopped by +// canceling the context and waits for goroutines to finish. +func (ps *PubSub) Stop() { + ps.cancel() + ps.wg.Wait() +} + +func (ps *PubSub) subReceive(cctx context.Context) { + err := ps.sub.Receive(cctx, func(ctx context.Context, msg message) { + if err := ps.onMessage(ctx, msg); err != nil { + ps.acc.AddError(fmt.Errorf("unable to add message from subscription %s: %v", ps.sub.ID(), err)) + } + }) + ps.acc.AddError(fmt.Errorf("receiver for subscription %s exited: %v", ps.sub.ID(), err)) +} + +// onMessage handles parsing and adding a received message to the accumulator. +func (ps *PubSub) onMessage(ctx context.Context, msg message) error { + if ps.MaxMessageLen > 0 && len(msg.Data()) > ps.MaxMessageLen { + msg.Ack() + return fmt.Errorf("message longer than max_message_len (%d > %d)", len(msg.Data()), ps.MaxMessageLen) + } + + metrics, err := ps.parser.Parse(msg.Data()) + if err != nil { + msg.Ack() + return err + } + + if len(metrics) == 0 { + msg.Ack() + return nil + } + + select { + case <-ctx.Done(): + return ctx.Err() + case ps.sem <- empty{}: + break + } + + ps.mu.Lock() + defer ps.mu.Unlock() + + id := ps.acc.AddTrackingMetricGroup(metrics) + if ps.undelivered == nil { + ps.undelivered = make(map[telegraf.TrackingID]message) + } + ps.undelivered[id] = msg + + return nil +} + +func (ps *PubSub) receiveDelivered(ctx context.Context) { + for { + select { + case <-ctx.Done(): + return + case info := <-ps.acc.Delivered(): + <-ps.sem + msg := ps.removeDelivered(info.ID()) + + if msg != nil { + msg.Ack() + } + } + } +} + +func (ps *PubSub) removeDelivered(id telegraf.TrackingID) message { + ps.mu.Lock() + defer ps.mu.Unlock() + + msg, ok := ps.undelivered[id] + if !ok { + return nil + } + delete(ps.undelivered, id) + return msg +} + +func (ps *PubSub) getPubSubClient() (*pubsub.Client, error) { + var credsOpt option.ClientOption + if ps.CredentialsFile != "" { + credsOpt = option.WithCredentialsFile(ps.CredentialsFile) + } else { + creds, err := google.FindDefaultCredentials(context.Background(), pubsub.ScopeCloudPlatform) + if err != nil { + return nil, fmt.Errorf( + "unable to find GCP Application Default Credentials: %v."+ + "Either set ADC or provide CredentialsFile config", err) + } + credsOpt = option.WithCredentials(creds) + } + client, err := pubsub.NewClient( + context.Background(), + ps.Project, + credsOpt, + option.WithScopes(pubsub.ScopeCloudPlatform), + option.WithUserAgent(internal.ProductToken()), + ) + if err != nil { + return nil, fmt.Errorf("unable to generate PubSub client: %v", err) + } + return client, nil +} + +func (ps *PubSub) getGCPSubscription(ctx context.Context, subId string) (subscription, error) { + client, err := ps.getPubSubClient() + if err != nil { + return nil, err + } + s := client.Subscription(subId) + s.ReceiveSettings = pubsub.ReceiveSettings{ + NumGoroutines: ps.MaxReceiverGoRoutines, + MaxExtension: ps.MaxExtension.Duration, + MaxOutstandingMessages: ps.MaxOutstandingMessages, + MaxOutstandingBytes: ps.MaxOutstandingBytes, + } + return &gcpSubscription{s}, nil +} + +func init() { + inputs.Add("cloud_pubsub", func() telegraf.Input { + ps := &PubSub{ + MaxUndeliveredMessages: defaultMaxUndeliveredMessages, + } + return ps + }) +} + +const sampleConfig = ` + ## Required. Name of Google Cloud Platform (GCP) Project that owns + ## the given PubSub subscription. + project = "my-project" + + ## Required. Name of PubSub subscription to ingest metrics from. + subscription = "my-subscription" + + ## Required. Data format to consume. + ## Each data format has its own unique set of configuration options. + ## Read more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "influx" + + ## Optional. Filepath for GCP credentials JSON file to authorize calls to + ## PubSub APIs. If not set explicitly, Telegraf will attempt to use + ## Application Default Credentials, which is preferred. + # credentials_file = "path/to/my/creds.json" + + ## Optional. Maximum byte length of a message to consume. + ## Larger messages are dropped with an error. If less than 0 or unspecified, + ## treated as no limit. + # max_message_len = 1000000 + + ## Optional. Maximum messages to read from PubSub that have not been written + ## to an output. Defaults to %d. + ## For best throughput set based on the number of metrics within + ## each message and the size of the output's metric_batch_size. + ## + ## For example, if each message contains 10 metrics and the output + ## metric_batch_size is 1000, setting this to 100 will ensure that a + ## full batch is collected and the write is triggered immediately without + ## waiting until the next flush_interval. + # max_undelivered_messages = 1000 + + ## The following are optional Subscription ReceiveSettings in PubSub. + ## Read more about these values: + ## https://godoc.org/cloud.google.com/go/pubsub#ReceiveSettings + + ## Optional. Maximum number of seconds for which a PubSub subscription + ## should auto-extend the PubSub ACK deadline for each message. If less than + ## 0, auto-extension is disabled. + # max_extension = 0 + + ## Optional. Maximum number of unprocessed messages in PubSub + ## (unacknowledged but not yet expired in PubSub). + ## A value of 0 is treated as the default PubSub value. + ## Negative values will be treated as unlimited. + # max_outstanding_messages = 0 + + ## Optional. Maximum size in bytes of unprocessed messages in PubSub + ## (unacknowledged but not yet expired in PubSub). + ## A value of 0 is treated as the default PubSub value. + ## Negative values will be treated as unlimited. + # max_outstanding_bytes = 0 + + ## Optional. Max number of goroutines a PubSub Subscription receiver can spawn + ## to pull messages from PubSub concurrently. This limit applies to each + ## subscription separately and is treated as the PubSub default if less than + ## 1. Note this setting does not limit the number of messages that can be + ## processed concurrently (use "max_outstanding_messages" instead). + # max_receiver_go_routines = 0 +` diff --git a/plugins/inputs/cloud_pubsub/pubsub_test.go b/plugins/inputs/cloud_pubsub/pubsub_test.go new file mode 100644 index 000000000..fd3ffb63e --- /dev/null +++ b/plugins/inputs/cloud_pubsub/pubsub_test.go @@ -0,0 +1,149 @@ +package cloud_pubsub + +import ( + "github.com/influxdata/telegraf/plugins/parsers" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/assert" + "testing" +) + +const ( + msgInflux = "cpu_load_short,host=server01 value=23422.0 1422568543702900257\n" +) + +// Test ingesting InfluxDB-format PubSub message +func TestRunParse(t *testing.T) { + subId := "sub-run-parse" + + testParser, _ := parsers.NewInfluxParser() + + sub := &stubSub{ + id: subId, + messages: make(chan *testMsg, 100), + } + + ps := &PubSub{ + parser: testParser, + stubSub: func() subscription { return sub }, + Project: "projectIDontMatterForTests", + Subscription: subId, + MaxUndeliveredMessages: defaultMaxUndeliveredMessages, + } + + acc := &testutil.Accumulator{} + if err := ps.Start(acc); err != nil { + t.Fatalf("test PubSub failed to start: %s", err) + } + defer ps.Stop() + + if ps.sub == nil { + t.Fatal("expected plugin subscription to be non-nil") + } + + testTracker := &testTracker{} + msg := &testMsg{ + value: msgInflux, + tracker: testTracker, + } + sub.messages <- msg + + acc.Wait(1) + assert.Equal(t, acc.NFields(), 1) + metric := acc.Metrics[0] + validateTestInfluxMetric(t, metric) +} + +func TestRunInvalidMessages(t *testing.T) { + subId := "sub-invalid-messages" + + testParser, _ := parsers.NewInfluxParser() + + sub := &stubSub{ + id: subId, + messages: make(chan *testMsg, 100), + } + + ps := &PubSub{ + parser: testParser, + stubSub: func() subscription { return sub }, + Project: "projectIDontMatterForTests", + Subscription: subId, + MaxUndeliveredMessages: defaultMaxUndeliveredMessages, + } + + acc := &testutil.Accumulator{} + + if err := ps.Start(acc); err != nil { + t.Fatalf("test PubSub failed to start: %s", err) + } + defer ps.Stop() + if ps.sub == nil { + t.Fatal("expected plugin subscription to be non-nil") + } + + testTracker := &testTracker{} + msg := &testMsg{ + value: "~invalidInfluxMsg~", + tracker: testTracker, + } + sub.messages <- msg + + acc.WaitError(1) + + // Make sure we acknowledged message so we don't receive it again. + testTracker.WaitForAck(1) + + assert.Equal(t, acc.NFields(), 0) +} + +func TestRunOverlongMessages(t *testing.T) { + subId := "sub-message-too-long" + + acc := &testutil.Accumulator{} + + testParser, _ := parsers.NewInfluxParser() + + sub := &stubSub{ + id: subId, + messages: make(chan *testMsg, 100), + } + + ps := &PubSub{ + parser: testParser, + stubSub: func() subscription { return sub }, + Project: "projectIDontMatterForTests", + Subscription: subId, + MaxUndeliveredMessages: defaultMaxUndeliveredMessages, + // Add MaxMessageLen Param + MaxMessageLen: 1, + } + + if err := ps.Start(acc); err != nil { + t.Fatalf("test PubSub failed to start: %s", err) + } + defer ps.Stop() + if ps.sub == nil { + t.Fatal("expected plugin subscription to be non-nil") + } + + testTracker := &testTracker{} + msg := &testMsg{ + value: msgInflux, + tracker: testTracker, + } + sub.messages <- msg + + acc.WaitError(1) + + // Make sure we acknowledged message so we don't receive it again. + testTracker.WaitForAck(1) + + assert.Equal(t, acc.NFields(), 0) +} + +func validateTestInfluxMetric(t *testing.T, m *testutil.Metric) { + assert.Equal(t, "cpu_load_short", m.Measurement) + assert.Equal(t, "server01", m.Tags["host"]) + assert.Equal(t, 23422.0, m.Fields["value"]) + assert.Equal(t, int64(1422568543702900257), m.Time.UnixNano()) +} diff --git a/plugins/inputs/cloud_pubsub/subscription_gcp.go b/plugins/inputs/cloud_pubsub/subscription_gcp.go new file mode 100644 index 000000000..f436d5219 --- /dev/null +++ b/plugins/inputs/cloud_pubsub/subscription_gcp.go @@ -0,0 +1,68 @@ +package cloud_pubsub + +import ( + "cloud.google.com/go/pubsub" + "context" + "time" +) + +type ( + subscription interface { + ID() string + Receive(ctx context.Context, f func(context.Context, message)) error + } + + message interface { + Ack() + Nack() + ID() string + Data() []byte + Attributes() map[string]string + PublishTime() time.Time + } + + gcpSubscription struct { + sub *pubsub.Subscription + } + + gcpMessage struct { + msg *pubsub.Message + } +) + +func (s *gcpSubscription) ID() string { + if s.sub == nil { + return "" + } + return s.sub.ID() +} + +func (s *gcpSubscription) Receive(ctx context.Context, f func(context.Context, message)) error { + return s.sub.Receive(ctx, func(cctx context.Context, m *pubsub.Message) { + f(cctx, &gcpMessage{m}) + }) +} + +func (env *gcpMessage) Ack() { + env.msg.Ack() +} + +func (env *gcpMessage) Nack() { + env.msg.Nack() +} + +func (env *gcpMessage) ID() string { + return env.msg.ID +} + +func (env *gcpMessage) Data() []byte { + return env.msg.Data +} + +func (env *gcpMessage) Attributes() map[string]string { + return env.msg.Attributes +} + +func (env *gcpMessage) PublishTime() time.Time { + return env.msg.PublishTime +} diff --git a/plugins/inputs/cloud_pubsub/subscription_stub.go b/plugins/inputs/cloud_pubsub/subscription_stub.go new file mode 100644 index 000000000..018c5472c --- /dev/null +++ b/plugins/inputs/cloud_pubsub/subscription_stub.go @@ -0,0 +1,104 @@ +package cloud_pubsub + +import ( + "context" + "sync" + "time" +) + +type stubSub struct { + id string + messages chan *testMsg +} + +func (s *stubSub) ID() string { + return s.id +} + +func (s *stubSub) Receive(ctx context.Context, f func(context.Context, message)) error { + for { + select { + case <-ctx.Done(): + return ctx.Err() + case m := <-s.messages: + f(ctx, m) + } + } +} + +type testMsg struct { + id string + value string + attributes map[string]string + publishTime time.Time + + tracker *testTracker +} + +func (tm *testMsg) Ack() { + tm.tracker.Ack() +} + +func (tm *testMsg) Nack() { + tm.tracker.Nack() +} + +func (tm *testMsg) ID() string { + return tm.id +} + +func (tm *testMsg) Data() []byte { + return []byte(tm.value) +} + +func (tm *testMsg) Attributes() map[string]string { + return tm.attributes +} + +func (tm *testMsg) PublishTime() time.Time { + return tm.publishTime +} + +type testTracker struct { + sync.Mutex + *sync.Cond + + numAcks int + numNacks int +} + +func (t *testTracker) WaitForAck(num int) { + t.Lock() + if t.Cond == nil { + t.Cond = sync.NewCond(&t.Mutex) + } + for t.numAcks < num { + t.Wait() + } + t.Unlock() +} + +func (t *testTracker) WaitForNack(num int) { + t.Lock() + if t.Cond == nil { + t.Cond = sync.NewCond(&t.Mutex) + } + for t.numNacks < num { + t.Wait() + } + t.Unlock() +} + +func (t *testTracker) Ack() { + t.Lock() + defer t.Unlock() + + t.numAcks++ +} + +func (t *testTracker) Nack() { + t.Lock() + defer t.Unlock() + + t.numNacks++ +} From 0afa99c17bfff42bfe822192996fdf224fb09fa2 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 3 Jan 2019 16:28:15 -0800 Subject: [PATCH 0486/1815] Update changelog and supporting files for cloud_pubsub --- CHANGELOG.md | 4 +++ README.md | 1 + docs/LICENSE_OF_DEPENDENCIES.md | 1 + plugins/inputs/cloud_pubsub/README.md | 41 +++++++++++++-------------- 4 files changed, 25 insertions(+), 22 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 15e1836bb..42cc313b7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,9 @@ ## v1.10 [unreleased] +#### New Inputs + +- [cloud_pubsub](/plugins/inputs/cloud_pubsub/README.md) - Contributed by @emilymye + #### Features - [#4345](https://github.com/influxdata/telegraf/pull/4345): Allow for force gathering ES cluster stats. diff --git a/README.md b/README.md index fff17138a..78d8a9758 100644 --- a/README.md +++ b/README.md @@ -145,6 +145,7 @@ For documentation on the latest development code see the [documentation index][d * [ceph](./plugins/inputs/ceph) * [cgroup](./plugins/inputs/cgroup) * [chrony](./plugins/inputs/chrony) +* [cloud_pubsub](./plugins/inputs/cloud_pubsub) Google Cloud Pub/Sub * [conntrack](./plugins/inputs/conntrack) * [consul](./plugins/inputs/consul) * [couchbase](./plugins/inputs/couchbase) diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index df178cae6..427f54474 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -115,6 +115,7 @@ following works: - golang.org/x/crypto [BSD 3-Clause Clear License](https://github.com/golang/crypto/blob/master/LICENSE) - golang.org/x/net [BSD 3-Clause Clear License](https://github.com/golang/net/blob/master/LICENSE) - golang.org/x/oauth2 [BSD 3-Clause "New" or "Revised" License](https://github.com/golang/oauth2/blob/master/LICENSE) +- golang.org/x/sync [BSD 3-Clause "New" or "Revised" License](https://github.com/golang/sync/blob/master/LICENSE) - golang.org/x/sys [BSD 3-Clause Clear License](https://github.com/golang/sys/blob/master/LICENSE) - golang.org/x/text [BSD 3-Clause Clear License](https://github.com/golang/text/blob/master/LICENSE) - google.golang.org/api [BSD 3-Clause "New" or "Revised" License](https://github.com/googleapis/google-api-go-client/blob/master/LICENSE) diff --git a/plugins/inputs/cloud_pubsub/README.md b/plugins/inputs/cloud_pubsub/README.md index 159c793f2..eb08af105 100644 --- a/plugins/inputs/cloud_pubsub/README.md +++ b/plugins/inputs/cloud_pubsub/README.md @@ -6,9 +6,6 @@ and creates metrics using one of the supported [input data formats][]. ### Configuration -This section contains the default TOML to configure the plugin. You can -generate it using `telegraf --usage pubsub`. - ```toml [[inputs.pubsub]] ## Required. Name of Google Cloud Platform (GCP) Project that owns @@ -24,22 +21,22 @@ generate it using `telegraf --usage pubsub`. ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "influx" - ## Optional. Filepath for GCP credentials JSON file to authorize calls to - ## PubSub APIs. If not set explicitly, Telegraf will attempt to use - ## Application Default Credentials, which is preferred. + ## Optional. Filepath for GCP credentials JSON file to authorize calls to + ## PubSub APIs. If not set explicitly, Telegraf will attempt to use + ## Application Default Credentials, which is preferred. # credentials_file = "path/to/my/creds.json" - ## Optional. Maximum byte length of a message to consume. - ## Larger messages are dropped with an error. If less than 0 or unspecified, + ## Optional. Maximum byte length of a message to consume. + ## Larger messages are dropped with an error. If less than 0 or unspecified, ## treated as no limit. # max_message_len = 1000000 - ## Optional. Maximum messages to read from PubSub that have not been written + ## Optional. Maximum messages to read from PubSub that have not been written ## to an output. Defaults to %d. ## For best throughput set based on the number of metrics within ## each message and the size of the output's metric_batch_size. ## - ## For example, if each message contains 10 metrics and the output + ## For example, if each message contains 10 metrics and the output ## metric_batch_size is 1000, setting this to 100 will ensure that a ## full batch is collected and the write is triggered immediately without ## waiting until the next flush_interval. @@ -48,28 +45,28 @@ generate it using `telegraf --usage pubsub`. ## The following are optional Subscription ReceiveSettings in PubSub. ## Read more about these values: ## https://godoc.org/cloud.google.com/go/pubsub#ReceiveSettings - + ## Optional. Maximum number of seconds for which a PubSub subscription ## should auto-extend the PubSub ACK deadline for each message. If less than ## 0, auto-extension is disabled. # max_extension = 0 - ## Optional. Maximum number of unprocessed messages in PubSub - ## (unacknowledged but not yet expired in PubSub). - ## A value of 0 is treated as the default PubSub value. + ## Optional. Maximum number of unprocessed messages in PubSub + ## (unacknowledged but not yet expired in PubSub). + ## A value of 0 is treated as the default PubSub value. ## Negative values will be treated as unlimited. # max_outstanding_messages = 0 - ## Optional. Maximum size in bytes of unprocessed messages in PubSub - ## (unacknowledged but not yet expired in PubSub). - ## A value of 0 is treated as the default PubSub value. + ## Optional. Maximum size in bytes of unprocessed messages in PubSub + ## (unacknowledged but not yet expired in PubSub). + ## A value of 0 is treated as the default PubSub value. ## Negative values will be treated as unlimited. # max_outstanding_bytes = 0 - ## Optional. Max number of goroutines a PubSub Subscription receiver can spawn - ## to pull messages from PubSub concurrently. This limit applies to each - ## subscription separately and is treated as the PubSub default if less than - ## 1. Note this setting does not limit the number of messages that can be + ## Optional. Max number of goroutines a PubSub Subscription receiver can spawn + ## to pull messages from PubSub concurrently. This limit applies to each + ## subscription separately and is treated as the PubSub default if less than + ## 1. Note this setting does not limit the number of messages that can be ## processed concurrently (use "max_outstanding_messages" instead). # max_receiver_go_routines = 0 ``` @@ -79,7 +76,7 @@ generate it using `telegraf --usage pubsub`. This plugin assumes you have already created a PULL subscription for a given PubSub topic. To learn how to do so, see [how to create a subscription][pubsub create sub]. -Each plugin agent can listen to one subscription at a time, so you will +Each plugin agent can listen to one subscription at a time, so you will need to run multiple instances of the plugin to pull messages from multiple subscriptions/topics. From 4fd5fa006b4e9431b271ff350399bc1ebcb70930 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 4 Jan 2019 10:40:44 -0800 Subject: [PATCH 0487/1815] Allow non-tls config downloading --- internal/config/config.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/config/config.go b/internal/config/config.go index 469b80ade..2a5f62708 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -750,7 +750,7 @@ func loadConfig(config string) ([]byte, error) { } switch u.Scheme { - case "https": // http not permitted + case "https", "http": return fetchConfig(u) default: // If it isn't a https scheme, try it as a file. From 39022cd2f47f2a4e5282abc8f4291f21a31e2727 Mon Sep 17 00:00:00 2001 From: Mark Amery Date: Mon, 7 Jan 2019 19:05:22 +0000 Subject: [PATCH 0488/1815] Fix typo in graylog documentation (#5253) --- plugins/inputs/graylog/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/inputs/graylog/README.md b/plugins/inputs/graylog/README.md index 6ab4a70c4..46712ea1e 100644 --- a/plugins/inputs/graylog/README.md +++ b/plugins/inputs/graylog/README.md @@ -33,7 +33,7 @@ Note: if namespace end point specified metrics array will be ignored for that ca ## Metrics list ## List of metrics can be found on Graylog webservice documentation. - ## Or by hitting the the web service api at: + ## Or by hitting the web service api at: ## http://[graylog-host]:12900/system/metrics metrics = [ "jvm.cl.loaded", From 9dc9bd653aa23e01561f44372e828bcb0ab0b35c Mon Sep 17 00:00:00 2001 From: hydrandt Date: Tue, 8 Jan 2019 03:25:08 +0800 Subject: [PATCH 0489/1815] Document response_string_match field in http_response (#5251) --- plugins/inputs/http_response/README.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/plugins/inputs/http_response/README.md b/plugins/inputs/http_response/README.md index 4ccd236a5..7c66928e2 100644 --- a/plugins/inputs/http_response/README.md +++ b/plugins/inputs/http_response/README.md @@ -27,7 +27,7 @@ This input plugin checks HTTP/HTTPS connections. # {'fake':'data'} # ''' - ## Optional substring or regex match in body of the response + ## Optional substring or regex match in body of the response (case sensitive) # response_string_match = "\"service_status\": \"up\"" # response_string_match = "ok" # response_string_match = "\".*_status\".?:.?\"up\"" @@ -54,6 +54,7 @@ This input plugin checks HTTP/HTTPS connections. - result ([see below](#result--result_code)) - fields: - response_time (float, seconds) + - response_string_match (int, 0 = mismatch / body read error, 1 = match) - http_response_code (int, response status code) - result_type (string, deprecated in 1.6: use `result` tag and `result_code` field) - result_code (int, [see below](#result--result_code)) @@ -67,7 +68,7 @@ This tag is used to expose network and plugin errors. HTTP errors are considered |Tag value |Corresponding field value|Description| --------------------------|-------------------------|-----------| |success | 0 |The HTTP request completed, even if the HTTP code represents an error| -|response_string_mismatch | 1 |The option `response_string_match` was used, and the body of the response didn't match the regex| +|response_string_mismatch | 1 |The option `response_string_match` was used, and the body of the response didn't match the regex. HTTP errors with content in their body (like 4xx, 5xx) will trigger this error| |body_read_error | 2 |The option `response_string_match` was used, but the plugin wans't able to read the body of the response. Responses with empty bodies (like 3xx, HEAD, etc) will trigger this error| |connection_failed | 3 |Catch all for any network error not specifically handled by the plugin| |timeout | 4 |The plugin timed out while awaiting the HTTP connection to complete| From 9800779e64e38f6ab8b6f0f48b99038f0064cc21 Mon Sep 17 00:00:00 2001 From: j2gg0s <457862502@qq.com> Date: Tue, 8 Jan 2019 03:30:48 +0800 Subject: [PATCH 0490/1815] Fix err in graphite parser_test (#5257) --- plugins/parsers/graphite/parser_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/parsers/graphite/parser_test.go b/plugins/parsers/graphite/parser_test.go index 9a6b462f7..d84551add 100644 --- a/plugins/parsers/graphite/parser_test.go +++ b/plugins/parsers/graphite/parser_test.go @@ -241,7 +241,7 @@ func TestParseLine(t *testing.T) { len(test.tags), len(metric.Tags())) } f := metric.Fields()["value"].(float64) - if metric.Fields()["value"] != f { + if f != test.value { t.Fatalf("floatValue value mismatch. expected %v, got %v", test.value, f) } From 3621bcf5a65e526d9473f73622144066a0e09082 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 7 Jan 2019 14:13:37 -0800 Subject: [PATCH 0491/1815] Add basic unittest for templating engine --- internal/templating/engine_test.go | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) create mode 100644 internal/templating/engine_test.go diff --git a/internal/templating/engine_test.go b/internal/templating/engine_test.go new file mode 100644 index 000000000..b7dd23f38 --- /dev/null +++ b/internal/templating/engine_test.go @@ -0,0 +1,22 @@ +package templating + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestEngineAlternateSeparator(t *testing.T) { + defaultTemplate, _ := NewDefaultTemplateWithPattern("topic*") + engine, err := NewEngine("_", defaultTemplate, []string{ + "/ /*/*/* /measurement/origin/measurement*", + }) + require.NoError(t, err) + name, tags, field, err := engine.Apply("/telegraf/host01/cpu") + require.NoError(t, err) + require.Equal(t, "telegraf_cpu", name) + require.Equal(t, map[string]string{ + "origin": "host01", + }, tags) + require.Equal(t, "", field) +} From 0ceb10e017761844381ed2f894d8e10a3ffd00d2 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 7 Jan 2019 14:31:10 -0800 Subject: [PATCH 0492/1815] Rewrite configuration documentation (#5227) --- docs/CONFIGURATION.md | 536 ++++++++++++++++++++++++------------------ 1 file changed, 301 insertions(+), 235 deletions(-) diff --git a/docs/CONFIGURATION.md b/docs/CONFIGURATION.md index 0b2c27156..61dd09e10 100644 --- a/docs/CONFIGURATION.md +++ b/docs/CONFIGURATION.md @@ -1,26 +1,37 @@ # Configuration -Telegraf's configuration file is written using -[TOML](https://github.com/toml-lang/toml#toml). +Telegraf's configuration file is written using [TOML][] and is composed of +three sections: [global tags][], [agent][] settings, and [plugins][]. -[View the telegraf.conf config file with all available -plugins](/etc/telegraf.conf). +View the default [telegraf.conf][] config file with all available plugins. -## Generating a Configuration File +### Generating a Configuration File A default config file can be generated by telegraf: - -``` +```sh telegraf config > telegraf.conf ``` To generate a file with specific inputs and outputs, you can use the --input-filter and --output-filter flags: -``` +```sh telegraf --input-filter cpu:mem:net:swap --output-filter influxdb:kafka config ``` +### Configuration Loading + +The location of the configuration file can be set via the `--config` command +line flag. + +When the `--config-directory` command line flag is used files ending with +`.conf` in the specified directory will also be included in the Telegraf +configuration. + +On most systems, the default locations are `/etc/telegraf/telegraf.conf` for +the main configuration file and `/etc/telegraf/telegraf.d` for the directory of +configuration files. + ### Environment Variables Environment variables can be used anywhere in the config file, simply prepend @@ -66,81 +77,164 @@ parsed: password = "monkey123" ``` -### Configuration file locations +### Intervals -The location of the configuration file can be set via the `--config` command -line flag. - -When the `--config-directory` command line flag is used files ending with -`.conf` in the specified directory will also be included in the Telegraf -configuration. - -On most systems, the default locations are `/etc/telegraf/telegraf.conf` for -the main configuration file and `/etc/telegraf/telegraf.d` for the directory of -configuration files. +Intervals are durations of time and can be specified for supporting settings by +combining an integer value and time unit as a string value. Valid time units are +`ns`, `us` (or `µs`), `ms`, `s`, `m`, `h`. +```toml +[agent] + interval = "10s" +``` ### Global Tags -Global tags can be specified in the `[global_tags]` section of the config file -in key="value" format. All metrics being gathered on this host will be tagged -with the tags specified here. +Global tags can be specified in the `[global_tags]` table in key="value" +format. All metrics that are gathered will be tagged with the tags specified. -### Agent Configuration +```toml +[global_tags] + dc = "us-east-1" +``` -Telegraf has a few options you can configure under the `[agent]` section of the -config. +### Agent -* **interval**: Default data collection interval for all inputs -* **round_interval**: Rounds collection interval to 'interval' -ie, if interval="10s" then always collect on :00, :10, :20, etc. -* **metric_batch_size**: Telegraf will send metrics to output in batch of at -most metric_batch_size metrics. -* **metric_buffer_limit**: Telegraf will cache metric_buffer_limit metrics -for each output, and will flush this buffer on a successful write. -This should be a multiple of metric_batch_size and could not be less -than 2 times metric_batch_size. -* **collection_jitter**: Collection jitter is used to jitter -the collection by a random amount. -Each plugin will sleep for a random time within jitter before collecting. -This can be used to avoid many plugins querying things like sysfs at the -same time, which can have a measurable effect on the system. -* **flush_interval**: Default data flushing interval for all outputs. -You should not set this below -interval. Maximum flush_interval will be flush_interval + flush_jitter -* **flush_jitter**: Jitter the flush interval by a random amount. -This is primarily to avoid -large write spikes for users running a large number of telegraf instances. -ie, a jitter of 5s and flush_interval 10s means flushes will happen every 10-15s. -* **precision**: - By default or when set to "0s", precision will be set to the same - timestamp order as the collection interval, with the maximum being 1s. - Precision will NOT be used for service inputs. It is up to each individual - service input to set the timestamp at the appropriate precision. - Valid time units are "ns", "us" (or "µs"), "ms", "s". +The agent table configures Telegraf and the defaults used across all plugins. -* **logfile**: Specify the log file name. The empty string means to log to stderr. -* **debug**: Run telegraf in debug mode. -* **quiet**: Run telegraf in quiet mode (error messages only). -* **hostname**: Override default hostname, if empty use os.Hostname(). -* **omit_hostname**: If true, do no set the "host" tag in the telegraf agent. +- **interval**: Default data collection interval for all inputs. -### Input Configuration +- **round_interval**: Rounds collection interval to 'interval' + ie, if interval="10s" then always collect on :00, :10, :20, etc. -The following config parameters are available for all inputs: +- **metric_batch_size**: + Telegraf will send metrics to outputs in batches of at most + metric_batch_size metrics. + This controls the size of writes that Telegraf sends to output plugins. -* **interval**: How often to gather this metric. Normal plugins use a single -global interval, but if one particular input should be run less or more often, -you can configure that here. -* **name_override**: Override the base name of the measurement. -(Default is the name of the input). -* **name_prefix**: Specifies a prefix to attach to the measurement name. -* **name_suffix**: Specifies a suffix to attach to the measurement name. -* **tags**: A map of tags to apply to a specific input's measurements. +- **metric_buffer_limit**: + For failed writes, telegraf will cache metric_buffer_limit metrics for each + output, and will flush this buffer on a successful write. Oldest metrics + are dropped first when this buffer fills. + This buffer only fills when writes fail to output plugin(s). -The [metric filtering](#metric-filtering) parameters can be used to limit what metrics are +- **collection_jitter**: + Collection jitter is used to jitter the collection by a random amount. + Each plugin will sleep for a random time within jitter before collecting. + This can be used to avoid many plugins querying things like sysfs at the + same time, which can have a measurable effect on the system. + +- **flush_interval**: + Default flushing interval for all outputs. Maximum flush_interval will be + flush_interval + flush_jitter + +- **flush_jitter**: + Jitter the flush interval by a random amount. This is primarily to avoid + large write spikes for users running a large number of telegraf instances. + ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s + +- **precision**: + Collected metrics are rounded to the precision specified as a duration. + + Precision will NOT be used for service inputs. It is up to each individual + service input to set the timestamp at the appropriate precision. + +- **debug**: + Run telegraf with debug log messages. +- **quiet**: + Run telegraf in quiet mode (error log messages only). +- **logfile**: + Specify the log file name. The empty string means to log to stderr. + +- **hostname**: + Override default hostname, if empty use os.Hostname() +- **omit_hostname**: + If set to true, do no set the "host" tag in the telegraf agent. + +### Plugins + +Telegraf plugins are divided into 4 types: [inputs][], [outputs][], +[processors][], and [aggregators][]. + +Unlike the `global_tags` and `agent` tables, any plugin can be defined +multiple times and each instance will run independantly. This allows you to +have plugins defined with differing configurations as needed within a single +Telegraf process. + +Each plugin has a unique set of configuration options, reference the +sample configuration for details. Additionally, several options are available +on any plugin depending on its type. + +### Input Plugins + +Input plugins gather and create metrics. They support both polling and event +driven operation. + +Parameters that can be used with any input plugin: + +- **interval**: How often to gather this metric. Normal plugins use a single + global interval, but if one particular input should be run less or more + often, you can configure that here. +- **name_override**: Override the base name of the measurement. (Default is + the name of the input). +- **name_prefix**: Specifies a prefix to attach to the measurement name. +- **name_suffix**: Specifies a suffix to attach to the measurement name. +- **tags**: A map of tags to apply to a specific input's measurements. + +The [metric filtering][] parameters can be used to limit what metrics are emitted from the input plugin. -### Output Configuration +#### Examples + +Use the name_suffix parameter to emit measurements with the name `cpu_total`: +```toml +[[inputs.cpu]] + name_suffix = "_total" + percpu = false + totalcpu = true +``` + +Use the name_override parameter to emit measurements with the name `foobar`: +```toml +[[inputs.cpu]] + name_override = "foobar" + percpu = false + totalcpu = true +``` + +Emit measurements with two additional tags: `tag1=foo` and `tag2=bar` + +> **NOTE**: With TOML, order matters. Parameters belong to the last defined +> table header, place `[inputs.cpu.tags]` table at the _end_ of the plugin +> definition. +```toml +[[inputs.cpu]] + percpu = false + totalcpu = true + [inputs.cpu.tags] + tag1 = "foo" + tag2 = "bar" +``` + +Utilize `name_override`, `name_prefix`, or `name_suffix` config options to +avoid measurement collisions when defining multiple plugins: +```toml +[[inputs.cpu]] + percpu = false + totalcpu = true + +[[inputs.cpu]] + percpu = true + totalcpu = false + name_override = "percpu_usage" + fielddrop = ["cpu_time*"] +``` + +### Output Plugins + +Output plugins write metrics to a location. Outputs commonly write to +databases, network services, and messaging systems. + +Parameters that can be used with any output plugin: - **flush_interval**: The maximum time between flushes. Use this setting to override the agent `flush_interval` on a per plugin basis. @@ -150,42 +244,121 @@ emitted from the input plugin. Use this setting to override the agent `metric_buffer_limit` on a per plugin basis. -The [metric filtering](#metric-filtering) parameters can be used to limit what metrics are +The [metric filtering][] parameters can be used to limit what metrics are emitted from the output plugin. -### Aggregator Configuration +#### Examples -The following config parameters are available for all aggregators: +Override flush parameters for a single output: +```toml +[agent] + flush_interval = "10s" + metric_batch_size = 1000 -* **period**: The period on which to flush & clear each aggregator. All metrics -that are sent with timestamps outside of this period will be ignored by the -aggregator. -* **delay**: The delay before each aggregator is flushed. This is to control -how long for aggregators to wait before receiving metrics from input plugins, -in the case that aggregators are flushing and inputs are gathering on the -same interval. -* **drop_original**: If true, the original metric will be dropped by the -aggregator and will not get sent to the output plugins. -* **name_override**: Override the base name of the measurement. -(Default is the name of the input). -* **name_prefix**: Specifies a prefix to attach to the measurement name. -* **name_suffix**: Specifies a suffix to attach to the measurement name. -* **tags**: A map of tags to apply to a specific input's measurements. +[[outputs.influxdb]] + urls = [ "http://example.org:8086" ] + database = "telegraf" -The [metric filtering](#metric-filtering) parameters can be used to limit what metrics are +[[outputs.file]] + files = [ "stdout" ] + flush_interval = "1s" + metric_batch_size = 10 +``` + +### Processor Plugins + +Processor plugins perform processing tasks on metrics and are commonly used to +rename or apply transformations to metrics. Processors are applied after the +input plugins and before any aggregator plugins. + +Parameters that can be used with any processor plugin: + +- **order**: The order in which the processor(s) are executed. If this is not + specified then processor execution order will be random. + +The [metric filtering][] parameters can be used to limit what metrics are +handled by the processor. Excluded metrics are passed downstream to the next +processor. + +#### Examples + +If the order processors are applied matters you must set order on all involved +processors: +```toml +[[processors.rename]] + order = 1 + [[processors.rename.replace]] + tag = "path" + dest = "resource" + +[[processors.strings]] + order = 2 + [[processors.strings.trim_prefix]] + tag = "resource" + prefix = "/api/" +``` + +### Aggregator Plugins + +Aggregator plugins produce new metrics after examining metrics over a time +period, as the name suggests they are commonly used to produce new aggregates +such as mean/max/min metrics. Aggregators operate on metrics after any +processors have been applied. + +Parameters that can be used with any aggregator plugin: + +- **period**: The period on which to flush & clear each aggregator. All + metrics that are sent with timestamps outside of this period will be ignored + by the aggregator. +- **delay**: The delay before each aggregator is flushed. This is to control + how long for aggregators to wait before receiving metrics from input + plugins, in the case that aggregators are flushing and inputs are gathering + on the same interval. +- **drop_original**: If true, the original metric will be dropped by the + aggregator and will not get sent to the output plugins. +- **name_override**: Override the base name of the measurement. (Default is + the name of the input). +- **name_prefix**: Specifies a prefix to attach to the measurement name. +- **name_suffix**: Specifies a suffix to attach to the measurement name. +- **tags**: A map of tags to apply to a specific input's measurements. + +The [metric filtering][] parameters can be used to limit what metrics are handled by the aggregator. Excluded metrics are passed downstream to the next aggregator. -### Processor Configuration +#### Examples -The following config parameters are available for all processors: +Collect and emit the min/max of the system load1 metric every 30s, dropping +the originals. +```toml +[[inputs.system]] + fieldpass = ["load1"] # collects system load1 metric. -* **order**: This is the order in which the processor(s) get executed. If this -is not specified then processor execution order will be random. +[[aggregators.minmax]] + period = "30s" # send & clear the aggregate every 30s. + drop_original = true # drop the original metrics. -The [metric filtering](#metric-filtering) parameters can be used to limit what metrics are -handled by the processor. Excluded metrics are passed downstream to the next -processor. +[[outputs.file]] + files = ["stdout"] +``` + +Collect and emit the min/max of the swap metrics every 30s, dropping the +originals. The aggregator will not be applied to the system load metrics due +to the `namepass` parameter. +```toml +[[inputs.swap]] + +[[inputs.system]] + fieldpass = ["load1"] # collects system load1 metric. + +[[aggregators.minmax]] + period = "30s" # send & clear the aggregate every 30s. + drop_original = true # drop the original metrics. + namepass = ["swap"] # only "pass" swap metrics through the aggregator. + +[[outputs.file]] + files = ["stdout"] +``` ### Metric Filtering @@ -244,39 +417,9 @@ The inverse of `taginclude`. Tags with a tag key matching one of the patterns will be discarded from the metric. Any tag can be filtered including global tags and the agent `host` tag. -### Input Configuration Examples - -This is a full working config that will output CPU data to an InfluxDB instance -at 192.168.59.103:8086, tagging measurements with dc="denver-1". It will output -measurements at a 10s interval and will collect per-cpu data, dropping any -fields which begin with `time_`. - -```toml -[global_tags] - dc = "denver-1" - -[agent] - interval = "10s" - -# OUTPUTS -[[outputs.influxdb]] - url = "http://192.168.59.103:8086" # required. - database = "telegraf" # required. - -# INPUTS -[[inputs.cpu]] - percpu = true - totalcpu = false - # filter all fields beginning with 'time_' - fielddrop = ["time_*"] -``` - -#### Input Config: tagpass and tagdrop - -**NOTE** `tagpass` and `tagdrop` parameters must be defined at the _end_ of -the plugin definition, otherwise subsequent plugin config options will be -interpreted as part of the tagpass/tagdrop map. +##### Filtering Examples +Using tagpass and tagdrop: ```toml [[inputs.cpu]] percpu = true @@ -294,8 +437,7 @@ interpreted as part of the tagpass/tagdrop map. fstype = [ "ext4", "xfs" ] # Globs can also be used on the tag values path = [ "/opt", "/home*" ] - - + [[inputs.win_perf_counters]] [[inputs.win_perf_counters.object]] ObjectName = "Network Interface" @@ -308,11 +450,9 @@ interpreted as part of the tagpass/tagdrop map. # Don't send metrics where the Windows interface name (instance) begins with isatap or Local [inputs.win_perf_counters.tagdrop] instance = ["isatap*", "Local*"] - ``` -#### Input Config: fieldpass and fielddrop - +Using fieldpass and fielddrop: ```toml # Drop all metrics for guest & steal CPU usage [[inputs.cpu]] @@ -325,8 +465,7 @@ interpreted as part of the tagpass/tagdrop map. fieldpass = ["inodes*"] ``` -#### Input Config: namepass and namedrop - +Using namepass and namedrop: ```toml # Drop all metrics about containers for kubelet [[inputs.prometheus]] @@ -339,8 +478,7 @@ interpreted as part of the tagpass/tagdrop map. namepass = ["rest_client_*"] ``` -#### Input Config: taginclude and tagexclude - +Using taginclude and tagexclude: ```toml # Only include the "cpu" tag in the measurements for the cpu plugin. [[inputs.cpu]] @@ -353,64 +491,7 @@ interpreted as part of the tagpass/tagdrop map. tagexclude = ["fstype"] ``` -#### Input config: prefix, suffix, and override - -This plugin will emit measurements with the name `cpu_total` - -```toml -[[inputs.cpu]] - name_suffix = "_total" - percpu = false - totalcpu = true -``` - -This will emit measurements with the name `foobar` - -```toml -[[inputs.cpu]] - name_override = "foobar" - percpu = false - totalcpu = true -``` - -#### Input config: tags - -This plugin will emit measurements with two additional tags: `tag1=foo` and -`tag2=bar` - -NOTE: Order matters, the `[inputs.cpu.tags]` table must be at the _end_ of the -plugin definition. - -```toml -[[inputs.cpu]] - percpu = false - totalcpu = true - [inputs.cpu.tags] - tag1 = "foo" - tag2 = "bar" -``` - -#### Multiple inputs of the same type - -Additional inputs (or outputs) of the same type can be specified, -just define more instances in the config file. It is highly recommended that -you utilize `name_override`, `name_prefix`, or `name_suffix` config options -to avoid measurement collisions: - -```toml -[[inputs.cpu]] - percpu = false - totalcpu = true - -[[inputs.cpu]] - percpu = true - totalcpu = false - name_override = "percpu_usage" - fielddrop = ["cpu_time*"] -``` - -#### Output Configuration Examples: - +Metrics can be routed to different outputs using the metric name and tags: ```toml [[outputs.influxdb]] urls = [ "http://localhost:8086" ] @@ -432,50 +513,35 @@ to avoid measurement collisions: cpu = ["cpu0"] ``` -#### Aggregator Configuration Examples: - -This will collect and emit the min/max of the system load1 metric every -30s, dropping the originals. - +Routing metrics to different outputs based on the input. Metrics are tagged +with `influxdb_database` in the input, which is then used to select the +output. The tag is removed in the outputs before writing. ```toml -[[inputs.system]] - fieldpass = ["load1"] # collects system load1 metric. +[[outputs.influxdb]] + urls = ["http://influxdb.example.com"] + database = "db_default" + [outputs.influxdb.tagdrop] + influxdb_database = ["*"] -[[aggregators.minmax]] - period = "30s" # send & clear the aggregate every 30s. - drop_original = true # drop the original metrics. +[[outputs.influxdb]] + urls = ["http://influxdb.example.com"] + database = "db_other" + tagexclude = ["influxdb_database"] + [ouputs.influxdb.tagpass] + influxdb_database = ["other"] -[[outputs.file]] - files = ["stdout"] +[[inputs.disk]] + [inputs.disk.tags] + influxdb_database = "other" ``` -This will collect and emit the min/max of the swap metrics every -30s, dropping the originals. The aggregator will not be applied -to the system load metrics due to the `namepass` parameter. - -```toml -[[inputs.swap]] - -[[inputs.system]] - fieldpass = ["load1"] # collects system load1 metric. - -[[aggregators.minmax]] - period = "30s" # send & clear the aggregate every 30s. - drop_original = true # drop the original metrics. - namepass = ["swap"] # only "pass" swap metrics through the aggregator. - -[[outputs.file]] - files = ["stdout"] -``` - -#### Processor Configuration Examples: - -Print only the metrics with `cpu` as the measurement name, all metrics are -passed to the output: -```toml -[[processors.printer]] - namepass = "cpu" - -[[outputs.file]] - files = ["/tmp/metrics.out"] -``` +[TOML]: https://github.com/toml-lang/toml#toml +[global tags]: #global-tags +[agent]: #agent +[plugins]: #plugins +[inputs]: #input-plugins +[outputs]: #output-plugins +[processors]: #processor-plugins +[aggregators]: #aggregator-plugins +[metric filtering]: #metric-filtering +[telegraf.conf]: /etc/telegraf.conf From 0f75f3b304bfead5be0aa61f264927f9f06f9d83 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 7 Jan 2019 14:36:41 -0800 Subject: [PATCH 0493/1815] Link intervals to interval section in configuation docs --- docs/CONFIGURATION.md | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/docs/CONFIGURATION.md b/docs/CONFIGURATION.md index 61dd09e10..2b1993a45 100644 --- a/docs/CONFIGURATION.md +++ b/docs/CONFIGURATION.md @@ -101,9 +101,9 @@ format. All metrics that are gathered will be tagged with the tags specified. The agent table configures Telegraf and the defaults used across all plugins. -- **interval**: Default data collection interval for all inputs. +- **interval**: Default data collection [interval][] for all inputs. -- **round_interval**: Rounds collection interval to 'interval' +- **round_interval**: Rounds collection interval to [interval][] ie, if interval="10s" then always collect on :00, :10, :20, etc. - **metric_batch_size**: @@ -118,22 +118,22 @@ The agent table configures Telegraf and the defaults used across all plugins. This buffer only fills when writes fail to output plugin(s). - **collection_jitter**: - Collection jitter is used to jitter the collection by a random amount. + Collection jitter is used to jitter the collection by a random [interval][]. Each plugin will sleep for a random time within jitter before collecting. This can be used to avoid many plugins querying things like sysfs at the same time, which can have a measurable effect on the system. - **flush_interval**: - Default flushing interval for all outputs. Maximum flush_interval will be + Default flushing [interval][] for all outputs. Maximum flush_interval will be flush_interval + flush_jitter - **flush_jitter**: - Jitter the flush interval by a random amount. This is primarily to avoid + Jitter the flush [interval][] by a random amount. This is primarily to avoid large write spikes for users running a large number of telegraf instances. ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s - **precision**: - Collected metrics are rounded to the precision specified as a duration. + Collected metrics are rounded to the precision specified as an [interval][]. Precision will NOT be used for service inputs. It is up to each individual service input to set the timestamp at the appropriate precision. @@ -537,6 +537,7 @@ output. The tag is removed in the outputs before writing. [TOML]: https://github.com/toml-lang/toml#toml [global tags]: #global-tags +[interval]: #intervals [agent]: #agent [plugins]: #plugins [inputs]: #input-plugins From 84139cf890338141d328e909a391e5af4f417fc7 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 7 Jan 2019 17:14:07 -0800 Subject: [PATCH 0494/1815] Use gofmt from Go 1.11 (#5259) --- .circleci/config.yml | 12 ++--- README.md | 2 +- appveyor.yml | 4 +- plugins/inputs/haproxy/haproxy_test.go | 48 +++++++++---------- plugins/inputs/mqtt_consumer/mqtt_consumer.go | 2 +- plugins/inputs/mysql/mysql.go | 12 ++--- .../nginx_plus_api_metrics_test.go | 16 +++---- .../inputs/nsq_consumer/nsq_consumer_test.go | 2 +- .../inputs/udp_listener/udp_listener_test.go | 4 +- plugins/parsers/dropwizard/parser_test.go | 6 +-- 10 files changed, 54 insertions(+), 54 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index af248dfa1..16003bacd 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -36,30 +36,30 @@ jobs: steps: - attach_workspace: at: '/go/src' - - run: 'make check' + # disabled due to gofmt differences (1.10 vs 1.11). + #- run: 'make check' - run: 'make test' test-go-1.10: <<: [ *defaults, *go-1_10 ] steps: - attach_workspace: at: '/go/src' - - run: 'make check' + # disabled due to gofmt differences (1.10 vs 1.11). + #- run: 'make check' - run: 'make test' test-go-1.11: <<: [ *defaults, *go-1_11 ] steps: - attach_workspace: at: '/go/src' - # disabled due to gofmt differences (1.10 vs 1.11). - # - run: 'make check' + - run: 'make check' - run: 'make test' test-go-1.11-386: <<: [ *defaults, *go-1_11 ] steps: - attach_workspace: at: '/go/src' - # disabled due to gofmt differences (1.10 vs 1.11). - # - run: 'GOARCH=386 make check' + - run: 'GOARCH=386 make check' - run: 'GOARCH=386 make test' package: diff --git a/README.md b/README.md index 78d8a9758..d2d74a1f7 100644 --- a/README.md +++ b/README.md @@ -38,7 +38,7 @@ Ansible role: https://github.com/rossmcdonald/telegraf Telegraf requires golang version 1.9 or newer, the Makefile requires GNU make. -1. [Install Go](https://golang.org/doc/install) >=1.9 (1.10 recommended) +1. [Install Go](https://golang.org/doc/install) >=1.9 (1.11 recommended) 2. [Install dep](https://golang.github.io/dep/docs/installation.html) ==v0.5.0 3. Download Telegraf source: ``` diff --git a/appveyor.yml b/appveyor.yml index 16d53388e..dfdf31d50 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -12,11 +12,11 @@ platform: x64 install: - IF NOT EXIST "C:\Cache" mkdir C:\Cache - - IF NOT EXIST "C:\Cache\go1.10.4.msi" curl -o "C:\Cache\go1.10.4.msi" https://storage.googleapis.com/golang/go1.10.4.windows-amd64.msi + - IF NOT EXIST "C:\Cache\go1.11.4.msi" curl -o "C:\Cache\go1.11.4.msi" https://storage.googleapis.com/golang/go1.11.4.windows-amd64.msi - IF NOT EXIST "C:\Cache\gnuwin32-bin.zip" curl -o "C:\Cache\gnuwin32-bin.zip" https://dl.influxdata.com/telegraf/ci/make-3.81-bin.zip - IF NOT EXIST "C:\Cache\gnuwin32-dep.zip" curl -o "C:\Cache\gnuwin32-dep.zip" https://dl.influxdata.com/telegraf/ci/make-3.81-dep.zip - IF EXIST "C:\Go" rmdir /S /Q C:\Go - - msiexec.exe /i "C:\Cache\go1.10.4.msi" /quiet + - msiexec.exe /i "C:\Cache\go1.11.4.msi" /quiet - 7z x "C:\Cache\gnuwin32-bin.zip" -oC:\GnuWin32 -y - 7z x "C:\Cache\gnuwin32-dep.zip" -oC:\GnuWin32 -y - go get -d github.com/golang/dep diff --git a/plugins/inputs/haproxy/haproxy_test.go b/plugins/inputs/haproxy/haproxy_test.go index 27a197304..e05031f19 100644 --- a/plugins/inputs/haproxy/haproxy_test.go +++ b/plugins/inputs/haproxy/haproxy_test.go @@ -248,30 +248,30 @@ func HaproxyGetFieldValues() map[string]interface{} { "http_response.4xx": uint64(140), "http_response.5xx": uint64(0), "http_response.other": uint64(0), - "iid": uint64(4), - "last_chk": "OK", - "lastchg": uint64(1036557), - "lastsess": int64(1342), - "lbtot": uint64(9481), - "mode": "http", - "pid": uint64(1), - "qcur": uint64(0), - "qmax": uint64(0), - "qtime": uint64(1268), - "rate": uint64(0), - "rate_max": uint64(2), - "rtime": uint64(2908), - "sid": uint64(1), - "scur": uint64(0), - "slim": uint64(2), - "smax": uint64(2), - "srv_abort": uint64(0), - "status": "UP", - "stot": uint64(14539), - "ttime": uint64(4500), - "weight": uint64(1), - "wredis": uint64(0), - "wretr": uint64(0), + "iid": uint64(4), + "last_chk": "OK", + "lastchg": uint64(1036557), + "lastsess": int64(1342), + "lbtot": uint64(9481), + "mode": "http", + "pid": uint64(1), + "qcur": uint64(0), + "qmax": uint64(0), + "qtime": uint64(1268), + "rate": uint64(0), + "rate_max": uint64(2), + "rtime": uint64(2908), + "sid": uint64(1), + "scur": uint64(0), + "slim": uint64(2), + "smax": uint64(2), + "srv_abort": uint64(0), + "status": "UP", + "stot": uint64(14539), + "ttime": uint64(4500), + "weight": uint64(1), + "wredis": uint64(0), + "wretr": uint64(0), } return fields } diff --git a/plugins/inputs/mqtt_consumer/mqtt_consumer.go b/plugins/inputs/mqtt_consumer/mqtt_consumer.go index 03c3696f0..da556159e 100644 --- a/plugins/inputs/mqtt_consumer/mqtt_consumer.go +++ b/plugins/inputs/mqtt_consumer/mqtt_consumer.go @@ -321,7 +321,7 @@ func init() { return &MQTTConsumer{ ConnectionTimeout: defaultConnectionTimeout, MaxUndeliveredMessages: defaultMaxUndeliveredMessages, - state: Disconnected, + state: Disconnected, } }) } diff --git a/plugins/inputs/mysql/mysql.go b/plugins/inputs/mysql/mysql.go index 3e9417cb0..0516e22b7 100644 --- a/plugins/inputs/mysql/mysql.go +++ b/plugins/inputs/mysql/mysql.go @@ -202,10 +202,10 @@ var ( "deleting": uint32(0), "executing": uint32(0), "execution of init_command": uint32(0), - "end": uint32(0), - "freeing items": uint32(0), - "flushing tables": uint32(0), - "fulltext initialization": uint32(0), + "end": uint32(0), + "freeing items": uint32(0), + "flushing tables": uint32(0), + "fulltext initialization": uint32(0), "idle": uint32(0), "init": uint32(0), "killed": uint32(0), @@ -241,8 +241,8 @@ var ( } // plaintext statuses stateStatusMappings = map[string]string{ - "user sleep": "idle", - "creating index": "altering table", + "user sleep": "idle", + "creating index": "altering table", "committing alter table to storage engine": "altering table", "discard or import tablespace": "altering table", "rename": "altering table", diff --git a/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics_test.go b/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics_test.go index a7516dee5..8105f35fb 100644 --- a/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics_test.go +++ b/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics_test.go @@ -776,10 +776,10 @@ func TestGatherHttpCachesMetrics(t *testing.T) { t, "nginx_plus_api_http_caches", map[string]interface{}{ - "bypass_bytes": int64(5510647548), - "bypass_bytes_written": int64(44992), - "bypass_responses": int64(200187), - "bypass_responses_written": int64(200173), + "bypass_bytes": int64(5510647548), + "bypass_bytes_written": int64(44992), + "bypass_responses": int64(200187), + "bypass_responses_written": int64(200173), "cold": false, "expired_bytes": int64(1656847080), "expired_bytes_written": int64(1641825173), @@ -810,10 +810,10 @@ func TestGatherHttpCachesMetrics(t *testing.T) { t, "nginx_plus_api_http_caches", map[string]interface{}{ - "bypass_bytes": int64(5510647548), - "bypass_bytes_written": int64(44992), - "bypass_responses": int64(200187), - "bypass_responses_written": int64(200173), + "bypass_bytes": int64(5510647548), + "bypass_bytes_written": int64(44992), + "bypass_responses": int64(200187), + "bypass_responses_written": int64(200173), "cold": false, "expired_bytes": int64(1656847080), "expired_bytes_written": int64(1641825173), diff --git a/plugins/inputs/nsq_consumer/nsq_consumer_test.go b/plugins/inputs/nsq_consumer/nsq_consumer_test.go index 8376f7bb1..6558dfba2 100644 --- a/plugins/inputs/nsq_consumer/nsq_consumer_test.go +++ b/plugins/inputs/nsq_consumer/nsq_consumer_test.go @@ -41,7 +41,7 @@ func TestReadsMetricsFromNSQ(t *testing.T) { Channel: "consume", MaxInFlight: 1, MaxUndeliveredMessages: defaultMaxUndeliveredMessages, - Nsqd: []string{"127.0.0.1:4155"}, + Nsqd: []string{"127.0.0.1:4155"}, } p, _ := parsers.NewInfluxParser() diff --git a/plugins/inputs/udp_listener/udp_listener_test.go b/plugins/inputs/udp_listener/udp_listener_test.go index 49115434a..ed206f173 100644 --- a/plugins/inputs/udp_listener/udp_listener_test.go +++ b/plugins/inputs/udp_listener/udp_listener_test.go @@ -36,8 +36,8 @@ func newTestUdpListener() (*UdpListener, chan []byte) { listener := &UdpListener{ ServiceAddress: ":8125", AllowedPendingMessages: 10000, - in: in, - done: make(chan struct{}), + in: in, + done: make(chan struct{}), } return listener, in } diff --git a/plugins/parsers/dropwizard/parser_test.go b/plugins/parsers/dropwizard/parser_test.go index 8ddcf7714..df33562db 100644 --- a/plugins/parsers/dropwizard/parser_test.go +++ b/plugins/parsers/dropwizard/parser_test.go @@ -106,9 +106,9 @@ func TestParseValidEmbeddedCounterJSON(t *testing.T) { "count": float64(1), }, metrics[0].Fields()) assert.Equal(t, map[string]string{ - "metric_type": "counter", - "tag1": "green", - "tag2": "yellow", + "metric_type": "counter", + "tag1": "green", + "tag2": "yellow", "tag3 space,comma=equals": "red ,=", }, metrics[0].Tags()) assert.True(t, metricTime.Equal(metrics[0].Time()), fmt.Sprintf("%s should be equal to %s", metrics[0].Time(), metricTime)) From 361baaa4bbc7e854561815a89c8db5c451839e33 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 8 Jan 2019 11:11:02 -0800 Subject: [PATCH 0495/1815] Remove empty file --- Godeps | 0 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 Godeps diff --git a/Godeps b/Godeps deleted file mode 100644 index e69de29bb..000000000 From 0fd08dd65aa629f9c391344816c8c3175a9083c4 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 8 Jan 2019 11:56:44 -0800 Subject: [PATCH 0496/1815] Add a copy of the input metric when adding to aggregator (#5266) --- internal/models/running_aggregator.go | 3 ++- internal/models/running_aggregator_test.go | 27 +++++++++++++++++++++- 2 files changed, 28 insertions(+), 2 deletions(-) diff --git a/internal/models/running_aggregator.go b/internal/models/running_aggregator.go index 4fb7bcbe1..b1fa3637b 100644 --- a/internal/models/running_aggregator.go +++ b/internal/models/running_aggregator.go @@ -109,11 +109,12 @@ func (r *RunningAggregator) metricDropped(metric telegraf.Metric) { // Add a metric to the aggregator and return true if the original metric // should be dropped. func (r *RunningAggregator) Add(metric telegraf.Metric) bool { - if ok := r.Config.Filter.Select(metric); !ok { return false } + metric = metric.Copy() + r.Config.Filter.Modify(metric) if len(metric.FieldList()) == 0 { return r.Config.DropOriginal diff --git a/internal/models/running_aggregator_test.go b/internal/models/running_aggregator_test.go index 6bacbf8ed..76c7e4e5d 100644 --- a/internal/models/running_aggregator_test.go +++ b/internal/models/running_aggregator_test.go @@ -7,7 +7,6 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" ) @@ -152,6 +151,32 @@ func TestAddDropOriginal(t *testing.T) { require.False(t, ra.Add(m2)) } +func TestAddDoesNotModifyMetric(t *testing.T) { + ra := NewRunningAggregator(&TestAggregator{}, &AggregatorConfig{ + Name: "TestRunningAggregator", + Filter: Filter{ + FieldPass: []string{"a"}, + }, + DropOriginal: true, + }) + require.NoError(t, ra.Config.Filter.Compile()) + + now := time.Now() + + m := testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "a": int64(42), + "b": int64(42), + }, + now) + expected := m.Copy() + ra.Add(m) + + testutil.RequireMetricEqual(t, expected, m) +} + type TestAggregator struct { sum int64 } From 5d0b7011d2cf51f1f4fc3feb7570ccf12e5dfef2 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 8 Jan 2019 11:58:00 -0800 Subject: [PATCH 0497/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 42cc313b7..89ddba762 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -43,6 +43,7 @@ - [#5219](https://github.com/influxdata/telegraf/issues/5219): Allow non local udp connections in net_response. - [#5218](https://github.com/influxdata/telegraf/issues/5218): Fix toml option names in parser processor. - [#5225](https://github.com/influxdata/telegraf/issues/5225): Fix panic in docker input with bad endpoint. +- [#5209](https://github.com/influxdata/telegraf/issues/5209): Fix original metric modified by aggregator filters. ## v1.9.1 [2018-12-11] From 741a4d9c97582397a5bd6fc06b795d3f928dcfd1 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 8 Jan 2019 11:59:30 -0800 Subject: [PATCH 0498/1815] Set release date for 1.9.2 --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 89ddba762..7f7c911f4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -26,7 +26,7 @@ - [#4610](https://github.com/influxdata/telegraf/pull/4610): Fix initscript removes pidfile of restarted Telegraf process. -## v1.9.2 [unreleased] +## v1.9.2 [2019-01-08] #### Bugfixes From 1c3acafc8fbfc8c1541fdd1cfa2782b80254ff82 Mon Sep 17 00:00:00 2001 From: Phil Schwartz Date: Tue, 8 Jan 2019 16:13:14 -0600 Subject: [PATCH 0499/1815] Fix arithmetic overflow in sqlserver input (#5261) --- plugins/inputs/sqlserver/sqlserver.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/plugins/inputs/sqlserver/sqlserver.go b/plugins/inputs/sqlserver/sqlserver.go index 8f36255b4..5f55a27a6 100644 --- a/plugins/inputs/sqlserver/sqlserver.go +++ b/plugins/inputs/sqlserver/sqlserver.go @@ -2097,10 +2097,10 @@ SELECT -- value , value = CAST(CASE cc.cntr_type When 65792 Then cc.cntr_value -- Count - When 537003264 Then IsNull(Cast(cc.cntr_value as Money) / NullIf(cbc.cntr_value, 0), 0) -- Ratio + When 537003264 Then IsNull(Cast(cc.cntr_value as decimal(19,4)) / NullIf(cbc.cntr_value, 0), 0) -- Ratio When 272696576 Then cc.cntr_value - pc.cntr_value -- Per Second - When 1073874176 Then IsNull(Cast(cc.cntr_value - pc.cntr_value as Money) / NullIf(cbc.cntr_value - pbc.cntr_value, 0), 0) -- Avg - When 272696320 Then IsNull(Cast(cc.cntr_value - pc.cntr_value as Money) / NullIf(cbc.cntr_value - pbc.cntr_value, 0), 0) -- Avg/sec + When 1073874176 Then IsNull(Cast(cc.cntr_value - pc.cntr_value as decimal(19,4)) / NullIf(cbc.cntr_value - pbc.cntr_value, 0), 0) -- Avg + When 272696320 Then IsNull(Cast(cc.cntr_value - pc.cntr_value as decimal(19,4)) / NullIf(cbc.cntr_value - pbc.cntr_value, 0), 0) -- Avg/sec When 1073939712 Then cc.cntr_value - pc.cntr_value -- Base Else cc.cntr_value End as bigint) --, currentvalue= CAST(cc.cntr_value as bigint) From 8538894690c460cd75941faf06346f34b11263bd Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 8 Jan 2019 14:16:14 -0800 Subject: [PATCH 0500/1815] Update changelog --- CHANGELOG.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7f7c911f4..788fef6da 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -26,6 +26,12 @@ - [#4610](https://github.com/influxdata/telegraf/pull/4610): Fix initscript removes pidfile of restarted Telegraf process. +## v1.9.3 [unreleased] + +#### Bugfixes + +- [#5261](https://github.com/influxdata/telegraf/pull/5261): Fix arithmetic overflow in sqlserver input. + ## v1.9.2 [2019-01-08] #### Bugfixes From 4125e4161c18e7a613cff3b0675d97b32da5dff3 Mon Sep 17 00:00:00 2001 From: Max Renaud Date: Tue, 8 Jan 2019 15:02:32 -0800 Subject: [PATCH 0501/1815] Add input plugin for Neptune Apex aquarium controller (#5191) --- plugins/inputs/all/all.go | 1 + plugins/inputs/neptune_apex/README.md | 144 +++++ plugins/inputs/neptune_apex/neptune_apex.go | 300 +++++++++ .../inputs/neptune_apex/neptune_apex_test.go | 593 ++++++++++++++++++ 4 files changed, 1038 insertions(+) create mode 100644 plugins/inputs/neptune_apex/README.md create mode 100644 plugins/inputs/neptune_apex/neptune_apex.go create mode 100644 plugins/inputs/neptune_apex/neptune_apex_test.go diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index 9c183fcbb..2b189e5ff 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -79,6 +79,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/mysql" _ "github.com/influxdata/telegraf/plugins/inputs/nats" _ "github.com/influxdata/telegraf/plugins/inputs/nats_consumer" + _ "github.com/influxdata/telegraf/plugins/inputs/neptune_apex" _ "github.com/influxdata/telegraf/plugins/inputs/net" _ "github.com/influxdata/telegraf/plugins/inputs/net_response" _ "github.com/influxdata/telegraf/plugins/inputs/nginx" diff --git a/plugins/inputs/neptune_apex/README.md b/plugins/inputs/neptune_apex/README.md new file mode 100644 index 000000000..fae0c58b7 --- /dev/null +++ b/plugins/inputs/neptune_apex/README.md @@ -0,0 +1,144 @@ +# neptune_apex Input Plugin + +The neptune_apex Input Plugin collects real-time data from the Apex's status.xml page. + +### Configuration + +```toml +[[inputs.neptune_apex]] + ## The Neptune Apex plugin reads the publicly available status.xml data from a local Apex. + ## Measurements will be logged under "apex". + + ## The base URL of the local Apex(es). If you specify more than one server, they will + ## be differentiated by the "source" tag. + servers = [ + "http://apex.local", + ] + + ## The response_timeout specifies how long to wait for a reply from the Apex. + #response_timeout = "5s" + +``` + +### Metrics + +The [Neptune Apex](https://www.neptunesystems.com/) controller family allows an aquarium hobbyist to monitor and control +their tanks based on various probes. The data is taken directly from the /cgi-bin/status.xml at the interval specified +in the telegraf.conf configuration file. + +No manipulation is done on any of the fields to ensure future changes to the status.xml do not introduce conversion bugs +to this plugin. When reasonable and predictable, some tags are derived to make graphing easier and without front-end +programming. These tags are clearly marked in the list below and should be considered a convenience rather than authoritative. + +- neptune_apex (All metrics have this measurement name) + - tags: + - host (mandatory, string) is the host on which telegraf runs. + - source (mandatory, string) contains the hostname of the apex device. This can be used to differentiate between + different units. By using the source instead of the serial number, replacements units won't disturb graphs. + - type (mandatory, string) maps to the different types of data. Values can be "controller" (The Apex controller + itself), "probe" for the different input probes, or "output" for any physical or virtual outputs. The Watt and Amp + probes attached to the physical 120V outlets are aggregated under the output type. + - hardware (mandatory, string) controller hardware version + - software (mandatory, string) software version + - probe_type (optional, string) contains the probe type as reported by the Apex. + - name (optional, string) contains the name of the probe or output. + - output_id (optional, string) represents the internal unique output ID. This is different from the device_id. + - device_id (optional, string) maps to either the aquabus address or the internal reference. + - output_type (optional, string) categorizes the output into different categories. This tag is DERIVED from the + device_id. Possible values are: "variable" for the 0-10V signal ports, "outlet" for physical 120V sockets, "alert" + for alarms (email, sound), "virtual" for user-defined outputs, and "unknown" for everything else. + - fields: + - value (float, various unit) represents the probe reading. + - state (string) represents the output state as defined by the Apex. Examples include "AOF" for Auto (OFF), "TBL" + for operating according to a table, and "PF*" for different programs. + - amp (float, Ampere) is the amount of current flowing through the 120V outlet. + - watt (float, Watt) represents the amount of energy flowing through the 120V outlet. + - xstatus (string) indicates the xstatus of an outlet. Found on wireless Vortech devices. + - power_failed (int64, Unix epoch in ns) when the controller last lost power. + - power_restored (int64, Unix epoch in ns) when the controller last powered on. + - serial (string, serial number) + - time: + - The time used for the metric is parsed from the status.xml page. This helps when cross-referencing events with + the local system of Apex Fusion. Since the Apex uses NTP, this should not matter in most scenarios. + + +### Sample Queries + + +Get the max, mean, and min for the temperature in the last hour: +``` +SELECT mean("value") FROM "neptune_apex" WHERE ("probe_type" = 'Temp') AND time >= now() - 6h GROUP BY time(20s) +``` + +### Troubleshooting + +#### sendRequest failure +This indicates a problem communicating with the local Apex controller. If on Mac/Linux, try curl: +``` +$ curl apex.local/cgi-bin/status.xml +``` +to isolate the problem. + +#### parseXML errors +Ensure the XML being returned is valid. If you get valid XML back, open a bug request. + +#### Missing fields/data +The neptune_apex plugin is strict on its input to prevent any conversion errors. If you have fields in the status.xml +output that are not converted to a metric, open a feature request and paste your whole status.xml + +### Example Output + +``` +> neptune_apex,hardware=1.0,host=ubuntu,software=5.04_7A18,source=apex,type=controller power_failed=1544814000000000000i,power_restored=1544833875000000000i,serial="AC5:12345" 1545978278000000000 +> neptune_apex,device_id=base_Var1,hardware=1.0,host=ubuntu,name=VarSpd1_I1,output_id=0,output_type=variable,software=5.04_7A18,source=apex,type=output state="PF1" 1545978278000000000 +> neptune_apex,device_id=base_Var2,hardware=1.0,host=ubuntu,name=VarSpd2_I2,output_id=1,output_type=variable,software=5.04_7A18,source=apex,type=output state="PF2" 1545978278000000000 +> neptune_apex,device_id=base_Var3,hardware=1.0,host=ubuntu,name=VarSpd3_I3,output_id=2,output_type=variable,software=5.04_7A18,source=apex,type=output state="PF3" 1545978278000000000 +> neptune_apex,device_id=base_Var4,hardware=1.0,host=ubuntu,name=VarSpd4_I4,output_id=3,output_type=variable,software=5.04_7A18,source=apex,type=output state="PF4" 1545978278000000000 +> neptune_apex,device_id=base_Alarm,hardware=1.0,host=ubuntu,name=SndAlm_I6,output_id=4,output_type=alert,software=5.04_7A18,source=apex,type=output state="AOF" 1545978278000000000 +> neptune_apex,device_id=base_Warn,hardware=1.0,host=ubuntu,name=SndWrn_I7,output_id=5,output_type=alert,software=5.04_7A18,source=apex,type=output state="AOF" 1545978278000000000 +> neptune_apex,device_id=base_email,hardware=1.0,host=ubuntu,name=EmailAlm_I5,output_id=6,output_type=alert,software=5.04_7A18,source=apex,type=output state="AOF" 1545978278000000000 +> neptune_apex,device_id=base_email2,hardware=1.0,host=ubuntu,name=Email2Alm_I9,output_id=7,output_type=alert,software=5.04_7A18,source=apex,type=output state="AOF" 1545978278000000000 +> neptune_apex,device_id=2_1,hardware=1.0,host=ubuntu,name=RETURN_2_1,output_id=8,output_type=outlet,software=5.04_7A18,source=apex,type=output amp=0.3,state="AON",watt=34 1545978278000000000 +> neptune_apex,device_id=2_2,hardware=1.0,host=ubuntu,name=Heater1_2_2,output_id=9,output_type=outlet,software=5.04_7A18,source=apex,type=output amp=0,state="AOF",watt=0 1545978278000000000 +> neptune_apex,device_id=2_3,hardware=1.0,host=ubuntu,name=FREE_2_3,output_id=10,output_type=outlet,software=5.04_7A18,source=apex,type=output amp=0,state="OFF",watt=1 1545978278000000000 +> neptune_apex,device_id=2_4,hardware=1.0,host=ubuntu,name=LIGHT_2_4,output_id=11,output_type=outlet,software=5.04_7A18,source=apex,type=output amp=0,state="OFF",watt=1 1545978278000000000 +> neptune_apex,device_id=2_5,hardware=1.0,host=ubuntu,name=LHead_2_5,output_id=12,output_type=outlet,software=5.04_7A18,source=apex,type=output amp=0,state="AON",watt=4 1545978278000000000 +> neptune_apex,device_id=2_6,hardware=1.0,host=ubuntu,name=SKIMMER_2_6,output_id=13,output_type=outlet,software=5.04_7A18,source=apex,type=output amp=0.1,state="AON",watt=12 1545978278000000000 +> neptune_apex,device_id=2_7,hardware=1.0,host=ubuntu,name=FREE_2_7,output_id=14,output_type=outlet,software=5.04_7A18,source=apex,type=output amp=0,state="OFF",watt=1 1545978278000000000 +> neptune_apex,device_id=2_8,hardware=1.0,host=ubuntu,name=CABLIGHT_2_8,output_id=15,output_type=outlet,software=5.04_7A18,source=apex,type=output amp=0,state="AON",watt=1 1545978278000000000 +> neptune_apex,device_id=2_9,hardware=1.0,host=ubuntu,name=LinkA_2_9,output_id=16,output_type=unknown,software=5.04_7A18,source=apex,type=output state="AOF" 1545978278000000000 +> neptune_apex,device_id=2_10,hardware=1.0,host=ubuntu,name=LinkB_2_10,output_id=17,output_type=unknown,software=5.04_7A18,source=apex,type=output state="AOF" 1545978278000000000 +> neptune_apex,device_id=3_1,hardware=1.0,host=ubuntu,name=RVortech_3_1,output_id=18,output_type=unknown,software=5.04_7A18,source=apex,type=output state="TBL",xstatus="OK" 1545978278000000000 +> neptune_apex,device_id=3_2,hardware=1.0,host=ubuntu,name=LVortech_3_2,output_id=19,output_type=unknown,software=5.04_7A18,source=apex,type=output state="TBL",xstatus="OK" 1545978278000000000 +> neptune_apex,device_id=4_1,hardware=1.0,host=ubuntu,name=OSMOLATO_4_1,output_id=20,output_type=outlet,software=5.04_7A18,source=apex,type=output amp=0,state="AOF",watt=0 1545978278000000000 +> neptune_apex,device_id=4_2,hardware=1.0,host=ubuntu,name=HEATER2_4_2,output_id=21,output_type=outlet,software=5.04_7A18,source=apex,type=output amp=0,state="AOF",watt=0 1545978278000000000 +> neptune_apex,device_id=4_3,hardware=1.0,host=ubuntu,name=NUC_4_3,output_id=22,output_type=outlet,software=5.04_7A18,source=apex,type=output amp=0.1,state="AON",watt=8 1545978278000000000 +> neptune_apex,device_id=4_4,hardware=1.0,host=ubuntu,name=CABFAN_4_4,output_id=23,output_type=outlet,software=5.04_7A18,source=apex,type=output amp=0,state="AON",watt=1 1545978278000000000 +> neptune_apex,device_id=4_5,hardware=1.0,host=ubuntu,name=RHEAD_4_5,output_id=24,output_type=outlet,software=5.04_7A18,source=apex,type=output amp=0,state="AON",watt=3 1545978278000000000 +> neptune_apex,device_id=4_6,hardware=1.0,host=ubuntu,name=FIRE_4_6,output_id=25,output_type=outlet,software=5.04_7A18,source=apex,type=output amp=0,state="AON",watt=3 1545978278000000000 +> neptune_apex,device_id=4_7,hardware=1.0,host=ubuntu,name=LightGW_4_7,output_id=26,output_type=outlet,software=5.04_7A18,source=apex,type=output amp=0,state="AON",watt=1 1545978278000000000 +> neptune_apex,device_id=4_8,hardware=1.0,host=ubuntu,name=GBSWITCH_4_8,output_id=27,output_type=outlet,software=5.04_7A18,source=apex,type=output amp=0,state="AON",watt=0 1545978278000000000 +> neptune_apex,device_id=4_9,hardware=1.0,host=ubuntu,name=LinkA_4_9,output_id=28,output_type=unknown,software=5.04_7A18,source=apex,type=output state="AOF" 1545978278000000000 +> neptune_apex,device_id=4_10,hardware=1.0,host=ubuntu,name=LinkB_4_10,output_id=29,output_type=unknown,software=5.04_7A18,source=apex,type=output state="AOF" 1545978278000000000 +> neptune_apex,device_id=5_1,hardware=1.0,host=ubuntu,name=LinkA_5_1,output_id=30,output_type=unknown,software=5.04_7A18,source=apex,type=output state="AOF" 1545978278000000000 +> neptune_apex,device_id=Cntl_A1,hardware=1.0,host=ubuntu,name=ATO_EMPTY,output_id=31,output_type=virtual,software=5.04_7A18,source=apex,type=output state="AOF" 1545978278000000000 +> neptune_apex,device_id=Cntl_A2,hardware=1.0,host=ubuntu,name=LEAK,output_id=32,output_type=virtual,software=5.04_7A18,source=apex,type=output state="AOF" 1545978278000000000 +> neptune_apex,device_id=Cntl_A3,hardware=1.0,host=ubuntu,name=SKMR_NOPWR,output_id=33,output_type=virtual,software=5.04_7A18,source=apex,type=output state="AOF" 1545978278000000000 +> neptune_apex,hardware=1.0,host=ubuntu,name=Tmp,probe_type=Temp,software=5.04_7A18,source=apex,type=probe value=78.1 1545978278000000000 +> neptune_apex,hardware=1.0,host=ubuntu,name=pH,probe_type=pH,software=5.04_7A18,source=apex,type=probe value=7.93 1545978278000000000 +> neptune_apex,hardware=1.0,host=ubuntu,name=ORP,probe_type=ORP,software=5.04_7A18,source=apex,type=probe value=191 1545978278000000000 +> neptune_apex,hardware=1.0,host=ubuntu,name=Salt,probe_type=Cond,software=5.04_7A18,source=apex,type=probe value=29.4 1545978278000000000 +> neptune_apex,hardware=1.0,host=ubuntu,name=Volt_2,software=5.04_7A18,source=apex,type=probe value=117 1545978278000000000 +> neptune_apex,hardware=1.0,host=ubuntu,name=Volt_4,software=5.04_7A18,source=apex,type=probe value=118 1545978278000000000 + +``` + +### Contributing + +This plugin is used for mission-critical aquatic life support. A bug could very well result in the death of animals. +Neptune does not publish a schema file and as such, we have made this plugin very strict on input with no provisions for +automatically adding fields. We are also careful to not add default values when none are presented to prevent automation +errors. + +When writing unit tests, use actual Apex output to run tests. It's acceptable to abridge the number of repeated fields +but never inner fields or parameters. \ No newline at end of file diff --git a/plugins/inputs/neptune_apex/neptune_apex.go b/plugins/inputs/neptune_apex/neptune_apex.go new file mode 100644 index 000000000..370407a41 --- /dev/null +++ b/plugins/inputs/neptune_apex/neptune_apex.go @@ -0,0 +1,300 @@ +// Package neptuneapex implements an input plugin for the Neptune Apex +// aquarium controller. +package neptuneapex + +import ( + "encoding/xml" + "fmt" + "io/ioutil" + "math" + "net/http" + "strconv" + "strings" + "sync" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/inputs" +) + +// Measurement is constant across all metrics. +const Measurement = "neptune_apex" + +type xmlReply struct { + SoftwareVersion string `xml:"software,attr"` + HardwareVersion string `xml:"hardware,attr"` + Hostname string `xml:"hostname"` + Serial string `xml:"serial"` + Timezone float64 `xml:"timezone"` + Date string `xml:"date"` + PowerFailed string `xml:"power>failed"` + PowerRestored string `xml:"power>restored"` + Probe []probe `xml:"probes>probe"` + Outlet []outlet `xml:"outlets>outlet"` +} + +type probe struct { + Name string `xml:"name"` + Value string `xml:"value"` + Type *string `xml:"type"` +} + +type outlet struct { + Name string `xml:"name"` + OutputID string `xml:"outputID"` + State string `xml:"state"` + DeviceID string `xml:"deviceID"` + Xstatus *string `xml:"xstatus"` +} + +// NeptuneApex implements telegraf.Input. +type NeptuneApex struct { + Servers []string + ResponseTimeout internal.Duration + httpClient *http.Client +} + +// Description implements telegraf.Input.Description +func (*NeptuneApex) Description() string { + return "Neptune Apex data collector" +} + +// SampleConfig implements telegraf.Input.SampleConfig +func (*NeptuneApex) SampleConfig() string { + return ` + ## The Neptune Apex plugin reads the publicly available status.xml data from a local Apex. + ## Measurements will be logged under "apex". + + ## The base URL of the local Apex(es). If you specify more than one server, they will + ## be differentiated by the "source" tag. + servers = [ + "http://apex.local", + ] + + ## The response_timeout specifies how long to wait for a reply from the Apex. + #response_timeout = "5s" +` +} + +// Gather implements telegraf.Input.Gather +func (n *NeptuneApex) Gather(acc telegraf.Accumulator) error { + var wg sync.WaitGroup + for _, server := range n.Servers { + wg.Add(1) + go func(server string) { + defer wg.Done() + acc.AddError(n.gatherServer(acc, server)) + }(server) + } + wg.Wait() + return nil +} + +func (n *NeptuneApex) gatherServer( + acc telegraf.Accumulator, server string) error { + resp, err := n.sendRequest(server) + if err != nil { + return err + } + return n.parseXML(acc, resp) +} + +// parseXML is strict on the input and does not do best-effort parsing. +//This is because of the life-support nature of the Neptune Apex. +func (n *NeptuneApex) parseXML(acc telegraf.Accumulator, data []byte) error { + r := xmlReply{} + err := xml.Unmarshal(data, &r) + if err != nil { + return fmt.Errorf("unable to unmarshal XML: %v\nXML DATA: %q", + err, data) + } + + var reportTime time.Time + var powerFailed, powerRestored int64 + if reportTime, err = parseTime(r.Date, r.Timezone); err != nil { + return err + } + if val, err := parseTime(r.PowerFailed, r.Timezone); err != nil { + return err + } else { + powerFailed = val.UnixNano() + } + if val, err := parseTime(r.PowerRestored, r.Timezone); err != nil { + return err + } else { + powerRestored = val.UnixNano() + } + + mainFields := map[string]interface{}{ + "serial": r.Serial, + "power_failed": powerFailed, + "power_restored": powerRestored, + } + acc.AddFields(Measurement, mainFields, + map[string]string{ + "source": r.Hostname, + "type": "controller", + "software": r.SoftwareVersion, + "hardware": r.HardwareVersion, + }, + reportTime) + + // Outlets. + for _, o := range r.Outlet { + tags := map[string]string{ + "source": r.Hostname, + "output_id": o.OutputID, + "device_id": o.DeviceID, + "name": o.Name, + "type": "output", + "software": r.SoftwareVersion, + "hardware": r.HardwareVersion, + } + fields := map[string]interface{}{ + "state": o.State, + } + // Find Amp and Watt probes and add them as fields. + // Remove the redundant probe. + if pos := findProbe(fmt.Sprintf("%sW", o.Name), r.Probe); pos > -1 { + value, err := strconv.ParseFloat( + strings.TrimSpace(r.Probe[pos].Value), 64) + if err != nil { + acc.AddError( + fmt.Errorf( + "cannot convert string value %q to float64: %v", + r.Probe[pos].Value, err)) + continue // Skip the whole outlet. + } + fields["watt"] = value + r.Probe[pos] = r.Probe[len(r.Probe)-1] + r.Probe = r.Probe[:len(r.Probe)-1] + } + if pos := findProbe(fmt.Sprintf("%sA", o.Name), r.Probe); pos > -1 { + value, err := strconv.ParseFloat( + strings.TrimSpace(r.Probe[pos].Value), 64) + if err != nil { + acc.AddError( + fmt.Errorf( + "cannot convert string value %q to float64: %v", + r.Probe[pos].Value, err)) + break // // Skip the whole outlet. + } + fields["amp"] = value + r.Probe[pos] = r.Probe[len(r.Probe)-1] + r.Probe = r.Probe[:len(r.Probe)-1] + } + if o.Xstatus != nil { + fields["xstatus"] = *o.Xstatus + } + // Try to determine outlet type. Focus on accuracy, leaving the + //outlet_type "unknown" when ambiguous. 24v and vortech cannot be + // determined. + switch { + case strings.HasPrefix(o.DeviceID, "base_Var"): + tags["output_type"] = "variable" + case o.DeviceID == "base_Alarm": + fallthrough + case o.DeviceID == "base_Warn": + fallthrough + case strings.HasPrefix(o.DeviceID, "base_email"): + tags["output_type"] = "alert" + case fields["watt"] != nil || fields["amp"] != nil: + tags["output_type"] = "outlet" + case strings.HasPrefix(o.DeviceID, "Cntl_"): + tags["output_type"] = "virtual" + default: + tags["output_type"] = "unknown" + } + + acc.AddFields(Measurement, fields, tags, reportTime) + } + + // Probes. + for _, p := range r.Probe { + value, err := strconv.ParseFloat(strings.TrimSpace(p.Value), 64) + if err != nil { + acc.AddError(fmt.Errorf( + "cannot convert string value %q to float64: %v", + p.Value, err)) + continue + } + fields := map[string]interface{}{ + "value": value, + } + tags := map[string]string{ + "source": r.Hostname, + "type": "probe", + "name": p.Name, + "software": r.SoftwareVersion, + "hardware": r.HardwareVersion, + } + if p.Type != nil { + tags["probe_type"] = *p.Type + } + acc.AddFields(Measurement, fields, tags, reportTime) + } + + return nil +} + +func findProbe(probe string, probes []probe) int { + for i, p := range probes { + if p.Name == probe { + return i + } + } + return -1 +} + +// parseTime takes a Neptune Apex date/time string with a timezone and +// returns a time.Time struct. +func parseTime(val string, tz float64) (time.Time, error) { + // Magic time constant from https://golang.org/pkg/time/#Parse + const TimeLayout = "01/02/2006 15:04:05 -0700" + + // Timezone offset needs to be explicit + sign := '+' + if tz < 0 { + sign = '-' + } + + // Build a time string with the timezone in a format Go can parse. + tzs := fmt.Sprintf("%c%04d", sign, int(math.Abs(tz))*100) + ts := fmt.Sprintf("%s %s", val, tzs) + t, err := time.Parse(TimeLayout, ts) + if err != nil { + return time.Now(), fmt.Errorf("unable to parse %q (%v)", ts, err) + } + return t, nil +} + +func (n *NeptuneApex) sendRequest(server string) ([]byte, error) { + url := fmt.Sprintf("%s/cgi-bin/status.xml", server) + resp, err := n.httpClient.Get(url) + if err != nil { + return nil, fmt.Errorf("http GET failed: %v", err) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf( + "response from server URL %q returned %d (%s), expected %d (%s)", + url, resp.StatusCode, http.StatusText(resp.StatusCode), + http.StatusOK, http.StatusText(http.StatusOK)) + } + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("unable to read output from %q: %v", url, err) + } + return body, nil +} + +func init() { + inputs.Add("neptune_apex", func() telegraf.Input { + return &NeptuneApex{ + httpClient: &http.Client{ + Timeout: 5 * time.Second, + }, + } + }) +} diff --git a/plugins/inputs/neptune_apex/neptune_apex_test.go b/plugins/inputs/neptune_apex/neptune_apex_test.go new file mode 100644 index 000000000..1d554149e --- /dev/null +++ b/plugins/inputs/neptune_apex/neptune_apex_test.go @@ -0,0 +1,593 @@ +package neptuneapex + +import ( + "bytes" + "context" + "net" + "net/http" + "net/http/httptest" + "reflect" + "testing" + "time" + + "github.com/influxdata/telegraf/testutil" +) + +func TestGather(t *testing.T) { + h := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNotFound) + w.Write([]byte("data")) + }) + c, destroy := fakeHTTPClient(h) + defer destroy() + n := &NeptuneApex{ + httpClient: c, + } + tests := []struct { + name string + servers []string + }{ + { + name: "Good case, 2 servers", + servers: []string{"http://abc", "https://def"}, + }, + { + name: "Good case, 0 servers", + servers: []string{}, + }, + { + name: "Good case nil", + servers: nil, + }, + } + + for _, test := range tests { + test := test + t.Run(test.name, func(t *testing.T) { + t.Parallel() + var acc testutil.Accumulator + n.Servers = test.servers + n.Gather(&acc) + if len(acc.Errors) != len(test.servers) { + t.Errorf("Number of servers mismatch. got=%d, want=%d", + len(acc.Errors), len(test.servers)) + } + + }) + } +} + +func TestParseXML(t *testing.T) { + n := &NeptuneApex{} + goodTime := time.Date(2018, 12, 22, 21, 55, 37, 0, + time.FixedZone("PST", 3600*-8)) + tests := []struct { + name string + xmlResponse []byte + wantMetrics []*testutil.Metric + wantAccErr bool + wantErr bool + }{ + { + name: "Good test", + xmlResponse: []byte(APEX2016), + wantMetrics: []*testutil.Metric{ + { + Measurement: Measurement, + Time: goodTime, + Tags: map[string]string{ + "source": "apex", + "type": "controller", + "software": "5.04_7A18", + "hardware": "1.0", + }, + Fields: map[string]interface{}{ + "serial": "AC5:12345", + "power_failed": int64(1544814000000000000), + "power_restored": int64(1544833875000000000), + }, + }, + { + Measurement: Measurement, + Time: goodTime, + Tags: map[string]string{ + "source": "apex", + "output_id": "0", + "device_id": "base_Var1", + "name": "VarSpd1_I1", + "output_type": "variable", + "type": "output", + "software": "5.04_7A18", + "hardware": "1.0", + }, + Fields: map[string]interface{}{"state": "PF1"}, + }, + { + Measurement: Measurement, + Time: goodTime, + Tags: map[string]string{ + "source": "apex", + "output_id": "6", + "device_id": "base_email", + "name": "EmailAlm_I5", + "output_type": "alert", + "type": "output", + "software": "5.04_7A18", + "hardware": "1.0", + }, + Fields: map[string]interface{}{"state": "AOF"}, + }, + { + Measurement: Measurement, + Time: goodTime, + Tags: map[string]string{ + "source": "apex", + "output_id": "8", + "device_id": "2_1", + "name": "RETURN_2_1", + "output_type": "outlet", + "type": "output", + "software": "5.04_7A18", + "hardware": "1.0", + }, + Fields: map[string]interface{}{ + "state": "AON", + "watt": 35.0, + "amp": 0.3, + }, + }, + { + Measurement: Measurement, + Time: goodTime, + Tags: map[string]string{ + "source": "apex", + "output_id": "18", + "device_id": "3_1", + "name": "RVortech_3_1", + "output_type": "unknown", + "type": "output", + "software": "5.04_7A18", + "hardware": "1.0", + }, + Fields: map[string]interface{}{ + "state": "TBL", + "xstatus": "OK", + }, + }, + { + Measurement: Measurement, + Time: goodTime, + Tags: map[string]string{ + "source": "apex", + "output_id": "28", + "device_id": "4_9", + "name": "LinkA_4_9", + "output_type": "unknown", + "type": "output", + "software": "5.04_7A18", + "hardware": "1.0", + }, + Fields: map[string]interface{}{"state": "AOF"}, + }, + { + Measurement: Measurement, + Time: goodTime, + Tags: map[string]string{ + "source": "apex", + "output_id": "32", + "device_id": "Cntl_A2", + "name": "LEAK", + "output_type": "virtual", + "type": "output", + "software": "5.04_7A18", + "hardware": "1.0", + }, + Fields: map[string]interface{}{"state": "AOF"}, + }, + { + Measurement: Measurement, + Time: goodTime, + Tags: map[string]string{ + "source": "apex", + "name": "Salt", + "type": "probe", + "probe_type": "Cond", + "software": "5.04_7A18", + "hardware": "1.0", + }, + Fields: map[string]interface{}{"value": 30.1}, + }, + { + Measurement: Measurement, + Time: goodTime, + Tags: map[string]string{ + "source": "apex", + "name": "Volt_2", + "type": "probe", + "software": "5.04_7A18", + "hardware": "1.0", + }, + Fields: map[string]interface{}{"value": 115.0}, + }, + }, + }, + { + name: "Unmarshal error", + xmlResponse: []byte("Invalid"), + wantErr: true, + }, + { + name: "Report time failure", + xmlResponse: []byte(`abc`), + wantErr: true, + }, + { + name: "Power Failed time failure", + xmlResponse: []byte( + `12/22/2018 21:55:37 + -8.0a + 12/22/2018 22:55:37`), + wantErr: true, + }, + { + name: "Power restored time failure", + xmlResponse: []byte( + `12/22/2018 21:55:37 + -8.0a + 12/22/2018 22:55:37`), + wantErr: true, + }, + { + name: "Power failed failure", + xmlResponse: []byte( + `abc`), + wantErr: true, + }, + { + name: "Failed to parse watt to float", + xmlResponse: []byte( + ` + 12/22/2018 21:55:37-8.0 + 12/22/2018 21:55:37 + 12/22/2018 21:55:37 + o1 + o1Wabc + `), + wantAccErr: true, + wantMetrics: []*testutil.Metric{ + { + Measurement: Measurement, + Time: goodTime, + Tags: map[string]string{ + "source": "", + "type": "controller", + "hardware": "", + "software": "", + }, + Fields: map[string]interface{}{ + "serial": "", + "power_failed": int64(1545544537000000000), + "power_restored": int64(1545544537000000000), + }, + }, + }, + }, + { + name: "Failed to parse amp to float", + xmlResponse: []byte( + ` + 12/22/2018 21:55:37-8.0 + 12/22/2018 21:55:37 + 12/22/2018 21:55:37 + o1 + o1Aabc + `), + wantAccErr: true, + wantMetrics: []*testutil.Metric{ + { + Measurement: Measurement, + Time: goodTime, + Tags: map[string]string{ + "source": "", + "type": "controller", + "hardware": "", + "software": "", + }, + Fields: map[string]interface{}{ + "serial": "", + "power_failed": int64(1545544537000000000), + "power_restored": int64(1545544537000000000), + }, + }, + }, + }, + { + name: "Failed to parse probe value to float", + xmlResponse: []byte( + ` + 12/22/2018 21:55:37-8.0 + 12/22/2018 21:55:37 + 12/22/2018 21:55:37 + p1abc + `), + wantAccErr: true, + wantMetrics: []*testutil.Metric{ + { + Measurement: Measurement, + Time: goodTime, + Tags: map[string]string{ + "source": "", + "type": "controller", + "hardware": "", + "software": "", + }, + Fields: map[string]interface{}{ + "serial": "", + "power_failed": int64(1545544537000000000), + "power_restored": int64(1545544537000000000), + }, + }, + }, + }, + } + + for _, test := range tests { + test := test + t.Run(test.name, func(t *testing.T) { + t.Parallel() + var acc testutil.Accumulator + err := n.parseXML(&acc, []byte(test.xmlResponse)) + if (err != nil) != test.wantErr { + t.Errorf("err mismatch. got=%v, want=%t", err, test.wantErr) + } + if test.wantErr { + return + } + if len(acc.Errors) > 0 != test.wantAccErr { + t.Errorf("Accumulator errors. got=%v, want=none", acc.Errors) + } + if len(acc.Metrics) != len(test.wantMetrics) { + t.Fatalf("Invalid number of metrics received. got=%d, want=%d", len(acc.Metrics), len(test.wantMetrics)) + } + for i, m := range acc.Metrics { + if m.Measurement != test.wantMetrics[i].Measurement { + t.Errorf("Metric measurement mismatch at position %d:\ngot=\n%s\nWant=\n%s", i, m.Measurement, test.wantMetrics[i].Measurement) + } + if !reflect.DeepEqual(m.Tags, test.wantMetrics[i].Tags) { + t.Errorf("Metric tags mismatch at position %d:\ngot=\n%v\nwant=\n%v", i, m.Tags, test.wantMetrics[i].Tags) + } + if !reflect.DeepEqual(m.Fields, test.wantMetrics[i].Fields) { + t.Errorf("Metric fields mismatch at position %d:\ngot=\n%#v\nwant=:\n%#v", i, m.Fields, test.wantMetrics[i].Fields) + } + if !m.Time.Equal(test.wantMetrics[i].Time) { + t.Errorf("Metric time mismatch at position %d:\ngot=\n%s\nwant=\n%s", i, m.Time, test.wantMetrics[i].Time) + } + } + }) + } +} + +func TestSendRequest(t *testing.T) { + tests := []struct { + name string + statusCode int + wantErr bool + }{ + { + name: "Good case", + statusCode: http.StatusOK, + }, + { + name: "Get error", + statusCode: http.StatusNotFound, + wantErr: true, + }, + { + name: "Status 301", + statusCode: http.StatusMovedPermanently, + wantErr: true, + }, + } + + for _, test := range tests { + test := test + t.Run(test.name, func(t *testing.T) { + t.Parallel() + h := http.HandlerFunc(func( + w http.ResponseWriter, r *http.Request) { + w.WriteHeader(test.statusCode) + w.Write([]byte("data")) + }) + c, destroy := fakeHTTPClient(h) + defer destroy() + n := &NeptuneApex{ + httpClient: c, + } + resp, err := n.sendRequest("http://abc") + if (err != nil) != test.wantErr { + t.Errorf("err mismatch. got=%v, want=%t", err, test.wantErr) + } + if test.wantErr { + return + } + if bytes.Compare(resp, []byte("data")) != 0 { + t.Errorf( + "Response data mismatch. got=%q, want=%q", resp, "data") + } + }) + } +} + +func TestParseTime(t *testing.T) { + tests := []struct { + name string + input string + timeZone float64 + wantTime time.Time + wantErr bool + }{ + { + name: "Good case - Timezone positive", + input: "01/01/2023 12:34:56", + timeZone: 5, + wantTime: time.Date(2023, 1, 1, 12, 34, 56, 0, + time.FixedZone("a", 3600*5)), + }, + { + name: "Good case - Timezone negative", + input: "01/01/2023 12:34:56", + timeZone: -8, + wantTime: time.Date(2023, 1, 1, 12, 34, 56, 0, + time.FixedZone("a", 3600*-8)), + }, + { + name: "Cannot parse", + input: "Not a date", + wantErr: true, + }, + } + + for _, test := range tests { + test := test + t.Run(test.name, func(t *testing.T) { + t.Parallel() + res, err := parseTime(test.input, test.timeZone) + if (err != nil) != test.wantErr { + t.Errorf("err mismatch. got=%v, want=%t", err, test.wantErr) + } + if test.wantErr { + return + } + if !test.wantTime.Equal(res) { + t.Errorf("err mismatch. got=%s, want=%s", res, test.wantTime) + } + }) + } +} + +func TestFindProbe(t *testing.T) { + fakeProbes := []probe{ + { + Name: "test1", + }, + { + Name: "good", + }, + } + tests := []struct { + name string + probeName string + wantIndex int + }{ + { + name: "Good case - Found", + probeName: "good", + wantIndex: 1, + }, + { + name: "Not found", + probeName: "bad", + wantIndex: -1, + }, + } + + for _, test := range tests { + test := test + t.Run(test.name, func(t *testing.T) { + t.Parallel() + index := findProbe(test.probeName, fakeProbes) + if index != test.wantIndex { + t.Errorf("probe index mismatch; got=%d, want %d", index, test.wantIndex) + } + }) + } +} + +func TestDescription(t *testing.T) { + n := &NeptuneApex{} + if n.Description() == "" { + t.Errorf("Empty description") + } +} + +func TestSampleConfig(t *testing.T) { + n := &NeptuneApex{} + if n.SampleConfig() == "" { + t.Errorf("Empty sample config") + } +} + +// This fakeHttpClient creates a server and binds a client to it. +// That way, it is possible to control the http +// output from within the test without changes to the main code. +func fakeHTTPClient(h http.Handler) (*http.Client, func()) { + s := httptest.NewServer(h) + c := &http.Client{ + Transport: &http.Transport{ + DialContext: func( + _ context.Context, network, _ string) (net.Conn, error) { + return net.Dial(network, s.Listener.Addr().String()) + }, + }, + } + return c, s.Close +} + +// Sample configuration from a 2016 version Neptune Apex. +const APEX2016 = ` + +apex +AC5:12345 +-8.00 +12/22/2018 21:55:37 +12/14/2018 11:00:00 +12/14/2018 16:31:15 + + + Salt 30.1 + Cond + RETURN_2_1A 0.3 + + RETURN_2_1W 35 + + Volt_2 115 + + + + VarSpd1_I1 + 0 + PF1 + base_Var1 + + + EmailAlm_I5 + 6 + AOF + base_email + + + RETURN_2_1 + 8 + AON + 2_1 + + + RVortech_3_1 + 18 + TBL + 3_1 +OK + + LinkA_4_9 + 28 + AOF + 4_9 + + + LEAK + 32 + AOF + Cntl_A2 + + +` From dd20b1cd10a9a17b0deb17258361ce78ab079ee9 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 8 Jan 2019 15:09:36 -0800 Subject: [PATCH 0502/1815] Update changelog and docs for neptune_apex plugin --- CHANGELOG.md | 1 + README.md | 1 + plugins/inputs/neptune_apex/README.md | 123 ++++++++++++++------------ 3 files changed, 66 insertions(+), 59 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 788fef6da..0987f90e7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,7 @@ #### New Inputs - [cloud_pubsub](/plugins/inputs/cloud_pubsub/README.md) - Contributed by @emilymye +- [neptune_apex](/plugins/inputs/neptune_apex/README.md) - Contributed by @MaxRenaud #### Features diff --git a/README.md b/README.md index d2d74a1f7..8240135e6 100644 --- a/README.md +++ b/README.md @@ -209,6 +209,7 @@ For documentation on the latest development code see the [documentation index][d * [mysql](./plugins/inputs/mysql) * [nats_consumer](./plugins/inputs/nats_consumer) * [nats](./plugins/inputs/nats) +* [neptune_apex](./plugins/inputs/neptune_apex) * [net](./plugins/inputs/net) * [net_response](./plugins/inputs/net_response) * [netstat](./plugins/inputs/net) diff --git a/plugins/inputs/neptune_apex/README.md b/plugins/inputs/neptune_apex/README.md index fae0c58b7..5531d3fa9 100644 --- a/plugins/inputs/neptune_apex/README.md +++ b/plugins/inputs/neptune_apex/README.md @@ -1,6 +1,11 @@ -# neptune_apex Input Plugin +# Neptune Apex Input Plugin + +The Neptune Apex controller family allows an aquarium hobbyist to monitor and control +their tanks based on various probes. The data is taken directly from the `/cgi-bin/status.xml` at the interval specified +in the telegraf.conf configuration file. + +The [Neptune Apex](https://www.neptunesystems.com/) input plugin collects real-time data from the Apex's status.xml page. -The neptune_apex Input Plugin collects real-time data from the Apex's status.xml page. ### Configuration @@ -22,21 +27,21 @@ The neptune_apex Input Plugin collects real-time data from the Apex's status.xml ### Metrics -The [Neptune Apex](https://www.neptunesystems.com/) controller family allows an aquarium hobbyist to monitor and control -their tanks based on various probes. The data is taken directly from the /cgi-bin/status.xml at the interval specified -in the telegraf.conf configuration file. +The Neptune Apex controller family allows an aquarium hobbyist to monitor and control +their tanks based on various probes. The data is taken directly from the /cgi-bin/status.xml at the interval specified +in the telegraf.conf configuration file. No manipulation is done on any of the fields to ensure future changes to the status.xml do not introduce conversion bugs -to this plugin. When reasonable and predictable, some tags are derived to make graphing easier and without front-end +to this plugin. When reasonable and predictable, some tags are derived to make graphing easier and without front-end programming. These tags are clearly marked in the list below and should be considered a convenience rather than authoritative. - neptune_apex (All metrics have this measurement name) - tags: - host (mandatory, string) is the host on which telegraf runs. - - source (mandatory, string) contains the hostname of the apex device. This can be used to differentiate between + - source (mandatory, string) contains the hostname of the apex device. This can be used to differentiate between different units. By using the source instead of the serial number, replacements units won't disturb graphs. - - type (mandatory, string) maps to the different types of data. Values can be "controller" (The Apex controller - itself), "probe" for the different input probes, or "output" for any physical or virtual outputs. The Watt and Amp + - type (mandatory, string) maps to the different types of data. Values can be "controller" (The Apex controller + itself), "probe" for the different input probes, or "output" for any physical or virtual outputs. The Watt and Amp probes attached to the physical 120V outlets are aggregated under the output type. - hardware (mandatory, string) controller hardware version - software (mandatory, string) software version @@ -44,12 +49,12 @@ programming. These tags are clearly marked in the list below and should be consi - name (optional, string) contains the name of the probe or output. - output_id (optional, string) represents the internal unique output ID. This is different from the device_id. - device_id (optional, string) maps to either the aquabus address or the internal reference. - - output_type (optional, string) categorizes the output into different categories. This tag is DERIVED from the - device_id. Possible values are: "variable" for the 0-10V signal ports, "outlet" for physical 120V sockets, "alert" + - output_type (optional, string) categorizes the output into different categories. This tag is DERIVED from the + device_id. Possible values are: "variable" for the 0-10V signal ports, "outlet" for physical 120V sockets, "alert" for alarms (email, sound), "virtual" for user-defined outputs, and "unknown" for everything else. - fields: - value (float, various unit) represents the probe reading. - - state (string) represents the output state as defined by the Apex. Examples include "AOF" for Auto (OFF), "TBL" + - state (string) represents the output state as defined by the Apex. Examples include "AOF" for Auto (OFF), "TBL" for operating according to a table, and "PF*" for different programs. - amp (float, Ampere) is the amount of current flowing through the 120V outlet. - watt (float, Watt) represents the amount of energy flowing through the 120V outlet. @@ -58,9 +63,9 @@ programming. These tags are clearly marked in the list below and should be consi - power_restored (int64, Unix epoch in ns) when the controller last powered on. - serial (string, serial number) - time: - - The time used for the metric is parsed from the status.xml page. This helps when cross-referencing events with - the local system of Apex Fusion. Since the Apex uses NTP, this should not matter in most scenarios. - + - The time used for the metric is parsed from the status.xml page. This helps when cross-referencing events with + the local system of Apex Fusion. Since the Apex uses NTP, this should not matter in most scenarios. + ### Sample Queries @@ -89,56 +94,56 @@ output that are not converted to a metric, open a feature request and paste your ### Example Output ``` -> neptune_apex,hardware=1.0,host=ubuntu,software=5.04_7A18,source=apex,type=controller power_failed=1544814000000000000i,power_restored=1544833875000000000i,serial="AC5:12345" 1545978278000000000 -> neptune_apex,device_id=base_Var1,hardware=1.0,host=ubuntu,name=VarSpd1_I1,output_id=0,output_type=variable,software=5.04_7A18,source=apex,type=output state="PF1" 1545978278000000000 -> neptune_apex,device_id=base_Var2,hardware=1.0,host=ubuntu,name=VarSpd2_I2,output_id=1,output_type=variable,software=5.04_7A18,source=apex,type=output state="PF2" 1545978278000000000 -> neptune_apex,device_id=base_Var3,hardware=1.0,host=ubuntu,name=VarSpd3_I3,output_id=2,output_type=variable,software=5.04_7A18,source=apex,type=output state="PF3" 1545978278000000000 -> neptune_apex,device_id=base_Var4,hardware=1.0,host=ubuntu,name=VarSpd4_I4,output_id=3,output_type=variable,software=5.04_7A18,source=apex,type=output state="PF4" 1545978278000000000 -> neptune_apex,device_id=base_Alarm,hardware=1.0,host=ubuntu,name=SndAlm_I6,output_id=4,output_type=alert,software=5.04_7A18,source=apex,type=output state="AOF" 1545978278000000000 -> neptune_apex,device_id=base_Warn,hardware=1.0,host=ubuntu,name=SndWrn_I7,output_id=5,output_type=alert,software=5.04_7A18,source=apex,type=output state="AOF" 1545978278000000000 -> neptune_apex,device_id=base_email,hardware=1.0,host=ubuntu,name=EmailAlm_I5,output_id=6,output_type=alert,software=5.04_7A18,source=apex,type=output state="AOF" 1545978278000000000 -> neptune_apex,device_id=base_email2,hardware=1.0,host=ubuntu,name=Email2Alm_I9,output_id=7,output_type=alert,software=5.04_7A18,source=apex,type=output state="AOF" 1545978278000000000 -> neptune_apex,device_id=2_1,hardware=1.0,host=ubuntu,name=RETURN_2_1,output_id=8,output_type=outlet,software=5.04_7A18,source=apex,type=output amp=0.3,state="AON",watt=34 1545978278000000000 -> neptune_apex,device_id=2_2,hardware=1.0,host=ubuntu,name=Heater1_2_2,output_id=9,output_type=outlet,software=5.04_7A18,source=apex,type=output amp=0,state="AOF",watt=0 1545978278000000000 -> neptune_apex,device_id=2_3,hardware=1.0,host=ubuntu,name=FREE_2_3,output_id=10,output_type=outlet,software=5.04_7A18,source=apex,type=output amp=0,state="OFF",watt=1 1545978278000000000 -> neptune_apex,device_id=2_4,hardware=1.0,host=ubuntu,name=LIGHT_2_4,output_id=11,output_type=outlet,software=5.04_7A18,source=apex,type=output amp=0,state="OFF",watt=1 1545978278000000000 -> neptune_apex,device_id=2_5,hardware=1.0,host=ubuntu,name=LHead_2_5,output_id=12,output_type=outlet,software=5.04_7A18,source=apex,type=output amp=0,state="AON",watt=4 1545978278000000000 -> neptune_apex,device_id=2_6,hardware=1.0,host=ubuntu,name=SKIMMER_2_6,output_id=13,output_type=outlet,software=5.04_7A18,source=apex,type=output amp=0.1,state="AON",watt=12 1545978278000000000 -> neptune_apex,device_id=2_7,hardware=1.0,host=ubuntu,name=FREE_2_7,output_id=14,output_type=outlet,software=5.04_7A18,source=apex,type=output amp=0,state="OFF",watt=1 1545978278000000000 -> neptune_apex,device_id=2_8,hardware=1.0,host=ubuntu,name=CABLIGHT_2_8,output_id=15,output_type=outlet,software=5.04_7A18,source=apex,type=output amp=0,state="AON",watt=1 1545978278000000000 -> neptune_apex,device_id=2_9,hardware=1.0,host=ubuntu,name=LinkA_2_9,output_id=16,output_type=unknown,software=5.04_7A18,source=apex,type=output state="AOF" 1545978278000000000 -> neptune_apex,device_id=2_10,hardware=1.0,host=ubuntu,name=LinkB_2_10,output_id=17,output_type=unknown,software=5.04_7A18,source=apex,type=output state="AOF" 1545978278000000000 -> neptune_apex,device_id=3_1,hardware=1.0,host=ubuntu,name=RVortech_3_1,output_id=18,output_type=unknown,software=5.04_7A18,source=apex,type=output state="TBL",xstatus="OK" 1545978278000000000 -> neptune_apex,device_id=3_2,hardware=1.0,host=ubuntu,name=LVortech_3_2,output_id=19,output_type=unknown,software=5.04_7A18,source=apex,type=output state="TBL",xstatus="OK" 1545978278000000000 -> neptune_apex,device_id=4_1,hardware=1.0,host=ubuntu,name=OSMOLATO_4_1,output_id=20,output_type=outlet,software=5.04_7A18,source=apex,type=output amp=0,state="AOF",watt=0 1545978278000000000 -> neptune_apex,device_id=4_2,hardware=1.0,host=ubuntu,name=HEATER2_4_2,output_id=21,output_type=outlet,software=5.04_7A18,source=apex,type=output amp=0,state="AOF",watt=0 1545978278000000000 -> neptune_apex,device_id=4_3,hardware=1.0,host=ubuntu,name=NUC_4_3,output_id=22,output_type=outlet,software=5.04_7A18,source=apex,type=output amp=0.1,state="AON",watt=8 1545978278000000000 -> neptune_apex,device_id=4_4,hardware=1.0,host=ubuntu,name=CABFAN_4_4,output_id=23,output_type=outlet,software=5.04_7A18,source=apex,type=output amp=0,state="AON",watt=1 1545978278000000000 -> neptune_apex,device_id=4_5,hardware=1.0,host=ubuntu,name=RHEAD_4_5,output_id=24,output_type=outlet,software=5.04_7A18,source=apex,type=output amp=0,state="AON",watt=3 1545978278000000000 -> neptune_apex,device_id=4_6,hardware=1.0,host=ubuntu,name=FIRE_4_6,output_id=25,output_type=outlet,software=5.04_7A18,source=apex,type=output amp=0,state="AON",watt=3 1545978278000000000 -> neptune_apex,device_id=4_7,hardware=1.0,host=ubuntu,name=LightGW_4_7,output_id=26,output_type=outlet,software=5.04_7A18,source=apex,type=output amp=0,state="AON",watt=1 1545978278000000000 -> neptune_apex,device_id=4_8,hardware=1.0,host=ubuntu,name=GBSWITCH_4_8,output_id=27,output_type=outlet,software=5.04_7A18,source=apex,type=output amp=0,state="AON",watt=0 1545978278000000000 -> neptune_apex,device_id=4_9,hardware=1.0,host=ubuntu,name=LinkA_4_9,output_id=28,output_type=unknown,software=5.04_7A18,source=apex,type=output state="AOF" 1545978278000000000 -> neptune_apex,device_id=4_10,hardware=1.0,host=ubuntu,name=LinkB_4_10,output_id=29,output_type=unknown,software=5.04_7A18,source=apex,type=output state="AOF" 1545978278000000000 -> neptune_apex,device_id=5_1,hardware=1.0,host=ubuntu,name=LinkA_5_1,output_id=30,output_type=unknown,software=5.04_7A18,source=apex,type=output state="AOF" 1545978278000000000 -> neptune_apex,device_id=Cntl_A1,hardware=1.0,host=ubuntu,name=ATO_EMPTY,output_id=31,output_type=virtual,software=5.04_7A18,source=apex,type=output state="AOF" 1545978278000000000 -> neptune_apex,device_id=Cntl_A2,hardware=1.0,host=ubuntu,name=LEAK,output_id=32,output_type=virtual,software=5.04_7A18,source=apex,type=output state="AOF" 1545978278000000000 -> neptune_apex,device_id=Cntl_A3,hardware=1.0,host=ubuntu,name=SKMR_NOPWR,output_id=33,output_type=virtual,software=5.04_7A18,source=apex,type=output state="AOF" 1545978278000000000 -> neptune_apex,hardware=1.0,host=ubuntu,name=Tmp,probe_type=Temp,software=5.04_7A18,source=apex,type=probe value=78.1 1545978278000000000 -> neptune_apex,hardware=1.0,host=ubuntu,name=pH,probe_type=pH,software=5.04_7A18,source=apex,type=probe value=7.93 1545978278000000000 -> neptune_apex,hardware=1.0,host=ubuntu,name=ORP,probe_type=ORP,software=5.04_7A18,source=apex,type=probe value=191 1545978278000000000 -> neptune_apex,hardware=1.0,host=ubuntu,name=Salt,probe_type=Cond,software=5.04_7A18,source=apex,type=probe value=29.4 1545978278000000000 -> neptune_apex,hardware=1.0,host=ubuntu,name=Volt_2,software=5.04_7A18,source=apex,type=probe value=117 1545978278000000000 -> neptune_apex,hardware=1.0,host=ubuntu,name=Volt_4,software=5.04_7A18,source=apex,type=probe value=118 1545978278000000000 +neptune_apex,hardware=1.0,host=ubuntu,software=5.04_7A18,source=apex,type=controller power_failed=1544814000000000000i,power_restored=1544833875000000000i,serial="AC5:12345" 1545978278000000000 +neptune_apex,device_id=base_Var1,hardware=1.0,host=ubuntu,name=VarSpd1_I1,output_id=0,output_type=variable,software=5.04_7A18,source=apex,type=output state="PF1" 1545978278000000000 +neptune_apex,device_id=base_Var2,hardware=1.0,host=ubuntu,name=VarSpd2_I2,output_id=1,output_type=variable,software=5.04_7A18,source=apex,type=output state="PF2" 1545978278000000000 +neptune_apex,device_id=base_Var3,hardware=1.0,host=ubuntu,name=VarSpd3_I3,output_id=2,output_type=variable,software=5.04_7A18,source=apex,type=output state="PF3" 1545978278000000000 +neptune_apex,device_id=base_Var4,hardware=1.0,host=ubuntu,name=VarSpd4_I4,output_id=3,output_type=variable,software=5.04_7A18,source=apex,type=output state="PF4" 1545978278000000000 +neptune_apex,device_id=base_Alarm,hardware=1.0,host=ubuntu,name=SndAlm_I6,output_id=4,output_type=alert,software=5.04_7A18,source=apex,type=output state="AOF" 1545978278000000000 +neptune_apex,device_id=base_Warn,hardware=1.0,host=ubuntu,name=SndWrn_I7,output_id=5,output_type=alert,software=5.04_7A18,source=apex,type=output state="AOF" 1545978278000000000 +neptune_apex,device_id=base_email,hardware=1.0,host=ubuntu,name=EmailAlm_I5,output_id=6,output_type=alert,software=5.04_7A18,source=apex,type=output state="AOF" 1545978278000000000 +neptune_apex,device_id=base_email2,hardware=1.0,host=ubuntu,name=Email2Alm_I9,output_id=7,output_type=alert,software=5.04_7A18,source=apex,type=output state="AOF" 1545978278000000000 +neptune_apex,device_id=2_1,hardware=1.0,host=ubuntu,name=RETURN_2_1,output_id=8,output_type=outlet,software=5.04_7A18,source=apex,type=output amp=0.3,state="AON",watt=34 1545978278000000000 +neptune_apex,device_id=2_2,hardware=1.0,host=ubuntu,name=Heater1_2_2,output_id=9,output_type=outlet,software=5.04_7A18,source=apex,type=output amp=0,state="AOF",watt=0 1545978278000000000 +neptune_apex,device_id=2_3,hardware=1.0,host=ubuntu,name=FREE_2_3,output_id=10,output_type=outlet,software=5.04_7A18,source=apex,type=output amp=0,state="OFF",watt=1 1545978278000000000 +neptune_apex,device_id=2_4,hardware=1.0,host=ubuntu,name=LIGHT_2_4,output_id=11,output_type=outlet,software=5.04_7A18,source=apex,type=output amp=0,state="OFF",watt=1 1545978278000000000 +neptune_apex,device_id=2_5,hardware=1.0,host=ubuntu,name=LHead_2_5,output_id=12,output_type=outlet,software=5.04_7A18,source=apex,type=output amp=0,state="AON",watt=4 1545978278000000000 +neptune_apex,device_id=2_6,hardware=1.0,host=ubuntu,name=SKIMMER_2_6,output_id=13,output_type=outlet,software=5.04_7A18,source=apex,type=output amp=0.1,state="AON",watt=12 1545978278000000000 +neptune_apex,device_id=2_7,hardware=1.0,host=ubuntu,name=FREE_2_7,output_id=14,output_type=outlet,software=5.04_7A18,source=apex,type=output amp=0,state="OFF",watt=1 1545978278000000000 +neptune_apex,device_id=2_8,hardware=1.0,host=ubuntu,name=CABLIGHT_2_8,output_id=15,output_type=outlet,software=5.04_7A18,source=apex,type=output amp=0,state="AON",watt=1 1545978278000000000 +neptune_apex,device_id=2_9,hardware=1.0,host=ubuntu,name=LinkA_2_9,output_id=16,output_type=unknown,software=5.04_7A18,source=apex,type=output state="AOF" 1545978278000000000 +neptune_apex,device_id=2_10,hardware=1.0,host=ubuntu,name=LinkB_2_10,output_id=17,output_type=unknown,software=5.04_7A18,source=apex,type=output state="AOF" 1545978278000000000 +neptune_apex,device_id=3_1,hardware=1.0,host=ubuntu,name=RVortech_3_1,output_id=18,output_type=unknown,software=5.04_7A18,source=apex,type=output state="TBL",xstatus="OK" 1545978278000000000 +neptune_apex,device_id=3_2,hardware=1.0,host=ubuntu,name=LVortech_3_2,output_id=19,output_type=unknown,software=5.04_7A18,source=apex,type=output state="TBL",xstatus="OK" 1545978278000000000 +neptune_apex,device_id=4_1,hardware=1.0,host=ubuntu,name=OSMOLATO_4_1,output_id=20,output_type=outlet,software=5.04_7A18,source=apex,type=output amp=0,state="AOF",watt=0 1545978278000000000 +neptune_apex,device_id=4_2,hardware=1.0,host=ubuntu,name=HEATER2_4_2,output_id=21,output_type=outlet,software=5.04_7A18,source=apex,type=output amp=0,state="AOF",watt=0 1545978278000000000 +neptune_apex,device_id=4_3,hardware=1.0,host=ubuntu,name=NUC_4_3,output_id=22,output_type=outlet,software=5.04_7A18,source=apex,type=output amp=0.1,state="AON",watt=8 1545978278000000000 +neptune_apex,device_id=4_4,hardware=1.0,host=ubuntu,name=CABFAN_4_4,output_id=23,output_type=outlet,software=5.04_7A18,source=apex,type=output amp=0,state="AON",watt=1 1545978278000000000 +neptune_apex,device_id=4_5,hardware=1.0,host=ubuntu,name=RHEAD_4_5,output_id=24,output_type=outlet,software=5.04_7A18,source=apex,type=output amp=0,state="AON",watt=3 1545978278000000000 +neptune_apex,device_id=4_6,hardware=1.0,host=ubuntu,name=FIRE_4_6,output_id=25,output_type=outlet,software=5.04_7A18,source=apex,type=output amp=0,state="AON",watt=3 1545978278000000000 +neptune_apex,device_id=4_7,hardware=1.0,host=ubuntu,name=LightGW_4_7,output_id=26,output_type=outlet,software=5.04_7A18,source=apex,type=output amp=0,state="AON",watt=1 1545978278000000000 +neptune_apex,device_id=4_8,hardware=1.0,host=ubuntu,name=GBSWITCH_4_8,output_id=27,output_type=outlet,software=5.04_7A18,source=apex,type=output amp=0,state="AON",watt=0 1545978278000000000 +neptune_apex,device_id=4_9,hardware=1.0,host=ubuntu,name=LinkA_4_9,output_id=28,output_type=unknown,software=5.04_7A18,source=apex,type=output state="AOF" 1545978278000000000 +neptune_apex,device_id=4_10,hardware=1.0,host=ubuntu,name=LinkB_4_10,output_id=29,output_type=unknown,software=5.04_7A18,source=apex,type=output state="AOF" 1545978278000000000 +neptune_apex,device_id=5_1,hardware=1.0,host=ubuntu,name=LinkA_5_1,output_id=30,output_type=unknown,software=5.04_7A18,source=apex,type=output state="AOF" 1545978278000000000 +neptune_apex,device_id=Cntl_A1,hardware=1.0,host=ubuntu,name=ATO_EMPTY,output_id=31,output_type=virtual,software=5.04_7A18,source=apex,type=output state="AOF" 1545978278000000000 +neptune_apex,device_id=Cntl_A2,hardware=1.0,host=ubuntu,name=LEAK,output_id=32,output_type=virtual,software=5.04_7A18,source=apex,type=output state="AOF" 1545978278000000000 +neptune_apex,device_id=Cntl_A3,hardware=1.0,host=ubuntu,name=SKMR_NOPWR,output_id=33,output_type=virtual,software=5.04_7A18,source=apex,type=output state="AOF" 1545978278000000000 +neptune_apex,hardware=1.0,host=ubuntu,name=Tmp,probe_type=Temp,software=5.04_7A18,source=apex,type=probe value=78.1 1545978278000000000 +neptune_apex,hardware=1.0,host=ubuntu,name=pH,probe_type=pH,software=5.04_7A18,source=apex,type=probe value=7.93 1545978278000000000 +neptune_apex,hardware=1.0,host=ubuntu,name=ORP,probe_type=ORP,software=5.04_7A18,source=apex,type=probe value=191 1545978278000000000 +neptune_apex,hardware=1.0,host=ubuntu,name=Salt,probe_type=Cond,software=5.04_7A18,source=apex,type=probe value=29.4 1545978278000000000 +neptune_apex,hardware=1.0,host=ubuntu,name=Volt_2,software=5.04_7A18,source=apex,type=probe value=117 1545978278000000000 +neptune_apex,hardware=1.0,host=ubuntu,name=Volt_4,software=5.04_7A18,source=apex,type=probe value=118 1545978278000000000 ``` ### Contributing This plugin is used for mission-critical aquatic life support. A bug could very well result in the death of animals. -Neptune does not publish a schema file and as such, we have made this plugin very strict on input with no provisions for +Neptune does not publish a schema file and as such, we have made this plugin very strict on input with no provisions for automatically adding fields. We are also careful to not add default values when none are presented to prevent automation errors. -When writing unit tests, use actual Apex output to run tests. It's acceptable to abridge the number of repeated fields -but never inner fields or parameters. \ No newline at end of file +When writing unit tests, use actual Apex output to run tests. It's acceptable to abridge the number of repeated fields +but never inner fields or parameters. From e6724bfb7c619619a14c14871b59955b294114d4 Mon Sep 17 00:00:00 2001 From: JefMuller <37331926+JefMuller@users.noreply.github.com> Date: Wed, 9 Jan 2019 00:28:00 +0100 Subject: [PATCH 0503/1815] Add ServiceNow serializer (#4809) --- plugins/serializers/nowmetric/README.md | 83 ++++++++ plugins/serializers/nowmetric/nowmetric.go | 137 +++++++++++++ .../serializers/nowmetric/nowmetric_test.go | 184 ++++++++++++++++++ plugins/serializers/registry.go | 7 + 4 files changed, 411 insertions(+) create mode 100644 plugins/serializers/nowmetric/README.md create mode 100644 plugins/serializers/nowmetric/nowmetric.go create mode 100644 plugins/serializers/nowmetric/nowmetric_test.go diff --git a/plugins/serializers/nowmetric/README.md b/plugins/serializers/nowmetric/README.md new file mode 100644 index 000000000..9bfbc3346 --- /dev/null +++ b/plugins/serializers/nowmetric/README.md @@ -0,0 +1,83 @@ +# ServiceNow Metrics serializer + +The ServiceNow Metrics serializer outputs metrics in the [ServiceNow Operational Intelligence format][ServiceNow-format]. + +It can be used to write to a file using the file output, or for sending metrics to a MID Server with Enable REST endpoint activated using the standard telegraf HTTP output. +If you're using the HTTP output, this serializer knows how to batch the metrics so you don't end up with an HTTP POST per metric. + +[ServiceNow-format]: https://docs.servicenow.com/bundle/london-it-operations-management/page/product/event-management/reference/mid-POST-metrics.html + + +An example event looks like: +```javascript +[{ + "metric_type": "Disk C: % Free Space", + "resource": "C:\\", + "node": "lnux100", + "value": 50, + "timestamp": 1473183012000, + "ci2metric_id": { + "node": "lnux100" + }, + "source": “Telegraf” +}] +``` +## Using with the HTTP output + +To send this data to a ServiceNow MID Server with Web Server extension activated, you can use the HTTP output, there are some custom headers that you need to add to manage the MID Web Server authorization, here's a sample config for an HTTP output: + +```toml +[[outputs.http]] + ## URL is the address to send metrics to + url = "http://:9082/api/mid/sa/metrics" + + ## Timeout for HTTP message + # timeout = "5s" + + ## HTTP method, one of: "POST" or "PUT" + method = "POST" + + ## HTTP Basic Auth credentials + username = 'evt.integration' + password = 'P@$$w0rd!' + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false + + ## Data format to output. + ## Each data format has it's own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md + data_format = "nowmetric" + + ## Additional HTTP headers + [outputs.http.headers] + # # Should be set manually to "application/json" for json data_format + Content-Type = "application/json" + Accept = "application/json" +``` + +Starting with the London release, you also need to explicitly create event rule to allow binding of metric events to host CIs. + +https://docs.servicenow.com/bundle/london-it-operations-management/page/product/event-management/task/event-rule-bind-metrics-to-host.html + +## Using with the File output + +You can use the file output to output the payload in a file. +In this case, just add the following section to your telegraf config file + +```toml +[[outputs.file]] + ## Files to write to, "stdout" is a specially handled file. + files = ["C:/Telegraf/metrics.out"] + + ## Data format to output. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md + data_format = "nowmetric" +``` diff --git a/plugins/serializers/nowmetric/nowmetric.go b/plugins/serializers/nowmetric/nowmetric.go new file mode 100644 index 000000000..c9d0b9463 --- /dev/null +++ b/plugins/serializers/nowmetric/nowmetric.go @@ -0,0 +1,137 @@ +package nowmetric + +import ( + "bytes" + "encoding/json" + "fmt" + "time" + + "github.com/influxdata/telegraf" +) + +type serializer struct { + TimestampUnits time.Duration +} + +/* +Example for the JSON generated and pushed to the MID +{ + "metric_type":"cpu_usage_system", + "resource":"", + "node":"ASGARD", + "value": 0.89, + "timestamp":1487365430, + "ci2metric_id":{"node":"ASGARD"}, + "source":"Telegraf" +} +*/ + +type OIMetric struct { + Metric string `json:"metric_type"` + Resource string `json:"resource"` + Node string `json:"node"` + Value interface{} `json:"value"` + Timestamp int64 `json:"timestamp"` + CiMapping map[string]string `json:"ci2metric_id"` + Source string `json:"source"` +} + +type OIMetrics []OIMetric + +func NewSerializer() (*serializer, error) { + s := &serializer{} + return s, nil +} + +func (s *serializer) Serialize(metric telegraf.Metric) (out []byte, err error) { + serialized, err := s.createObject(metric) + if err != nil { + return []byte{}, nil + } + return serialized, err +} + +func (s *serializer) SerializeBatch(metrics []telegraf.Metric) (out []byte, err error) { + objects := make([]byte, 0) + for _, metric := range metrics { + m, err := s.createObject(metric) + if err != nil { + return nil, fmt.Errorf("D! [serializer.nowmetric] Dropping invalid metric: %s", metric.Name()) + } else if m != nil { + objects = append(objects, m...) + } + } + replaced := bytes.Replace(objects, []byte("]["), []byte(","), -1) + return replaced, nil +} + +func (s *serializer) createObject(metric telegraf.Metric) ([]byte, error) { + /* ServiceNow Operational Intelligence supports an array of JSON objects. + ** Following elements accepted in the request body: + ** metric_type: The name of the metric + ** resource: Information about the resource for which metric data is being collected. In the example below, C:\ is the resource for which metric data is collected + ** node: IP, FQDN, name of the CI, or host + ** value: Value of the metric + ** timestamp: Epoch timestamp of the metric in milliseconds + ** ci2metric_id: List of key-value pairs to identify the CI. + ** source: Data source monitoring the metric type + */ + var allmetrics OIMetrics + var oimetric OIMetric + + oimetric.Source = "Telegraf" + + // Process Tags to extract node & resource name info + for _, tag := range metric.TagList() { + if tag.Key == "" || tag.Value == "" { + continue + } + + if tag.Key == "objectname" { + oimetric.Resource = tag.Value + } + + if tag.Key == "host" { + oimetric.Node = tag.Value + } + } + + // Format timestamp to UNIX epoch + oimetric.Timestamp = (metric.Time().UnixNano() / int64(time.Millisecond)) + + // Loop of fields value pair and build datapoint for each of them + for _, field := range metric.FieldList() { + if !verifyValue(field.Value) { + // Ignore String + continue + } + + if field.Key == "" { + // Ignore Empty Key + continue + } + + oimetric.Metric = field.Key + oimetric.Value = field.Value + + if oimetric.Node != "" { + cimapping := map[string]string{} + cimapping["node"] = oimetric.Node + oimetric.CiMapping = cimapping + } + + allmetrics = append(allmetrics, oimetric) + } + + metricsJson, err := json.Marshal(allmetrics) + + return metricsJson, err +} + +func verifyValue(v interface{}) bool { + switch v.(type) { + case string: + return false + } + return true +} diff --git a/plugins/serializers/nowmetric/nowmetric_test.go b/plugins/serializers/nowmetric/nowmetric_test.go new file mode 100644 index 000000000..d326cef8c --- /dev/null +++ b/plugins/serializers/nowmetric/nowmetric_test.go @@ -0,0 +1,184 @@ +package nowmetric + +import ( + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" +) + +func MustMetric(v telegraf.Metric, err error) telegraf.Metric { + if err != nil { + panic(err) + } + return v +} + +func TestSerializeMetricFloat(t *testing.T) { + now := time.Now() + tags := map[string]string{ + "cpu": "cpu0", + } + fields := map[string]interface{}{ + "usage_idle": float64(91.5), + } + m, err := metric.New("cpu", tags, fields, now) + assert.NoError(t, err) + + s, _ := NewSerializer() + var buf []byte + buf, err = s.Serialize(m) + assert.NoError(t, err) + expS := []byte(fmt.Sprintf(`[{"metric_type":"usage_idle","resource":"","node":"","value":91.5,"timestamp":%d,"ci2metric_id":null,"source":"Telegraf"}]`, (now.UnixNano() / int64(time.Millisecond)))) + assert.Equal(t, string(expS), string(buf)) +} + +func TestSerialize_TimestampUnits(t *testing.T) { + tests := []struct { + name string + timestampUnits time.Duration + expected string + }{ + { + name: "1ms", + timestampUnits: 1 * time.Millisecond, + expected: `[{"metric_type":"value","resource":"","node":"","value":42,"timestamp":1525478795123,"ci2metric_id":null,"source":"Telegraf"}]`, + }, + { + name: "10ms", + timestampUnits: 10 * time.Millisecond, + expected: `[{"metric_type":"value","resource":"","node":"","value":42,"timestamp":1525478795123,"ci2metric_id":null,"source":"Telegraf"}]`, + }, + { + name: "15ms is reduced to 10ms", + timestampUnits: 15 * time.Millisecond, + expected: `[{"metric_type":"value","resource":"","node":"","value":42,"timestamp":1525478795123,"ci2metric_id":null,"source":"Telegraf"}]`, + }, + { + name: "65ms is reduced to 10ms", + timestampUnits: 65 * time.Millisecond, + expected: `[{"metric_type":"value","resource":"","node":"","value":42,"timestamp":1525478795123,"ci2metric_id":null,"source":"Telegraf"}]`, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + m := MustMetric( + metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(1525478795, 123456789), + ), + ) + s, _ := NewSerializer() + actual, err := s.Serialize(m) + require.NoError(t, err) + require.Equal(t, tt.expected, string(actual)) + }) + } +} + +func TestSerializeMetricInt(t *testing.T) { + now := time.Now() + tags := map[string]string{ + "cpu": "cpu0", + } + fields := map[string]interface{}{ + "usage_idle": int64(90), + } + m, err := metric.New("cpu", tags, fields, now) + assert.NoError(t, err) + + s, _ := NewSerializer() + var buf []byte + buf, err = s.Serialize(m) + assert.NoError(t, err) + + expS := []byte(fmt.Sprintf(`[{"metric_type":"usage_idle","resource":"","node":"","value":90,"timestamp":%d,"ci2metric_id":null,"source":"Telegraf"}]`, (now.UnixNano() / int64(time.Millisecond)))) + assert.Equal(t, string(expS), string(buf)) +} + +func TestSerializeMetricString(t *testing.T) { + now := time.Now() + tags := map[string]string{ + "cpu": "cpu0", + } + fields := map[string]interface{}{ + "usage_idle": "foobar", + } + m, err := metric.New("cpu", tags, fields, now) + assert.NoError(t, err) + + s, _ := NewSerializer() + var buf []byte + buf, err = s.Serialize(m) + assert.NoError(t, err) + + assert.Equal(t, "null", string(buf)) +} + +func TestSerializeMultiFields(t *testing.T) { + now := time.Now() + tags := map[string]string{ + "cpu": "cpu0", + } + fields := map[string]interface{}{ + "usage_idle": int64(90), + "usage_total": 8559615, + } + m, err := metric.New("cpu", tags, fields, now) + assert.NoError(t, err) + + s, _ := NewSerializer() + var buf []byte + buf, err = s.Serialize(m) + assert.NoError(t, err) + + expS := []byte(fmt.Sprintf(`[{"metric_type":"usage_idle","resource":"","node":"","value":90,"timestamp":%d,"ci2metric_id":null,"source":"Telegraf"},{"metric_type":"usage_total","resource":"","node":"","value":8559615,"timestamp":%d,"ci2metric_id":null,"source":"Telegraf"}]`, (now.UnixNano() / int64(time.Millisecond)), (now.UnixNano() / int64(time.Millisecond)))) + assert.Equal(t, string(expS), string(buf)) +} + +func TestSerializeMetricWithEscapes(t *testing.T) { + now := time.Now() + tags := map[string]string{ + "cpu tag": "cpu0", + } + fields := map[string]interface{}{ + "U,age=Idle": int64(90), + } + m, err := metric.New("My CPU", tags, fields, now) + assert.NoError(t, err) + + s, _ := NewSerializer() + buf, err := s.Serialize(m) + assert.NoError(t, err) + + expS := []byte(fmt.Sprintf(`[{"metric_type":"U,age=Idle","resource":"","node":"","value":90,"timestamp":%d,"ci2metric_id":null,"source":"Telegraf"}]`, (now.UnixNano() / int64(time.Millisecond)))) + assert.Equal(t, string(expS), string(buf)) +} + +func TestSerializeBatch(t *testing.T) { + m := MustMetric( + metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(0, 0), + ), + ) + + metrics := []telegraf.Metric{m, m} + s, _ := NewSerializer() + buf, err := s.SerializeBatch(metrics) + require.NoError(t, err) + require.Equal(t, []byte(`[{"metric_type":"value","resource":"","node":"","value":42,"timestamp":0,"ci2metric_id":null,"source":"Telegraf"},{"metric_type":"value","resource":"","node":"","value":42,"timestamp":0,"ci2metric_id":null,"source":"Telegraf"}]`), buf) +} diff --git a/plugins/serializers/registry.go b/plugins/serializers/registry.go index b8a0aef07..9ca2f42e7 100644 --- a/plugins/serializers/registry.go +++ b/plugins/serializers/registry.go @@ -9,6 +9,7 @@ import ( "github.com/influxdata/telegraf/plugins/serializers/graphite" "github.com/influxdata/telegraf/plugins/serializers/influx" "github.com/influxdata/telegraf/plugins/serializers/json" + "github.com/influxdata/telegraf/plugins/serializers/nowmetric" "github.com/influxdata/telegraf/plugins/serializers/splunkmetric" ) @@ -79,6 +80,8 @@ func NewSerializer(config *Config) (Serializer, error) { serializer, err = NewJsonSerializer(config.TimestampUnits) case "splunkmetric": serializer, err = NewSplunkmetricSerializer(config.HecRouting) + case "nowmetric": + serializer, err = NewNowSerializer() default: err = fmt.Errorf("Invalid data format: %s", config.DataFormat) } @@ -93,6 +96,10 @@ func NewSplunkmetricSerializer(splunkmetric_hec_routing bool) (Serializer, error return splunkmetric.NewSerializer(splunkmetric_hec_routing) } +func NewNowSerializer() (Serializer, error) { + return nowmetric.NewSerializer() +} + func NewInfluxSerializerConfig(config *Config) (Serializer, error) { var sort influx.FieldSortOrder if config.InfluxSortFields { From c9d8be9ab58689c1fb276383f80cebc08b1fc6d8 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 8 Jan 2019 15:32:49 -0800 Subject: [PATCH 0504/1815] Update changelog and docs for nowmetric serializer --- CHANGELOG.md | 4 + README.md | 1 + plugins/serializers/nowmetric/README.md | 166 ++++++++++++------------ 3 files changed, 88 insertions(+), 83 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0987f90e7..11f70b0a5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,10 @@ - [cloud_pubsub](/plugins/inputs/cloud_pubsub/README.md) - Contributed by @emilymye - [neptune_apex](/plugins/inputs/neptune_apex/README.md) - Contributed by @MaxRenaud +### New Serializers + +- [nowmetric](/plugins/serializers/nowmetric/README.md) - Contributed by @JefMuller + #### Features - [#4345](https://github.com/influxdata/telegraf/pull/4345): Allow for force gathering ES cluster stats. diff --git a/README.md b/README.md index 8240135e6..50fe6d48f 100644 --- a/README.md +++ b/README.md @@ -300,6 +300,7 @@ For documentation on the latest development code see the [documentation index][d - [InfluxDB Line Protocol](/plugins/serializers/influx) - [JSON](/plugins/serializers/json) - [Graphite](/plugins/serializers/graphite) +- [ServiceNow](/plugins/serializers/nowmetric) - [SplunkMetric](/plugins/serializers/splunkmetric) ## Processor Plugins diff --git a/plugins/serializers/nowmetric/README.md b/plugins/serializers/nowmetric/README.md index 9bfbc3346..c1bc22cbe 100644 --- a/plugins/serializers/nowmetric/README.md +++ b/plugins/serializers/nowmetric/README.md @@ -1,83 +1,83 @@ -# ServiceNow Metrics serializer - -The ServiceNow Metrics serializer outputs metrics in the [ServiceNow Operational Intelligence format][ServiceNow-format]. - -It can be used to write to a file using the file output, or for sending metrics to a MID Server with Enable REST endpoint activated using the standard telegraf HTTP output. -If you're using the HTTP output, this serializer knows how to batch the metrics so you don't end up with an HTTP POST per metric. - -[ServiceNow-format]: https://docs.servicenow.com/bundle/london-it-operations-management/page/product/event-management/reference/mid-POST-metrics.html - - -An example event looks like: -```javascript -[{ - "metric_type": "Disk C: % Free Space", - "resource": "C:\\", - "node": "lnux100", - "value": 50, - "timestamp": 1473183012000, - "ci2metric_id": { - "node": "lnux100" - }, - "source": “Telegraf” -}] -``` -## Using with the HTTP output - -To send this data to a ServiceNow MID Server with Web Server extension activated, you can use the HTTP output, there are some custom headers that you need to add to manage the MID Web Server authorization, here's a sample config for an HTTP output: - -```toml -[[outputs.http]] - ## URL is the address to send metrics to - url = "http://:9082/api/mid/sa/metrics" - - ## Timeout for HTTP message - # timeout = "5s" - - ## HTTP method, one of: "POST" or "PUT" - method = "POST" - - ## HTTP Basic Auth credentials - username = 'evt.integration' - password = 'P@$$w0rd!' - - ## Optional TLS Config - # tls_ca = "/etc/telegraf/ca.pem" - # tls_cert = "/etc/telegraf/cert.pem" - # tls_key = "/etc/telegraf/key.pem" - ## Use TLS but skip chain & host verification - # insecure_skip_verify = false - - ## Data format to output. - ## Each data format has it's own unique set of configuration options, read - ## more about them here: - ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md - data_format = "nowmetric" - - ## Additional HTTP headers - [outputs.http.headers] - # # Should be set manually to "application/json" for json data_format - Content-Type = "application/json" - Accept = "application/json" -``` - -Starting with the London release, you also need to explicitly create event rule to allow binding of metric events to host CIs. - -https://docs.servicenow.com/bundle/london-it-operations-management/page/product/event-management/task/event-rule-bind-metrics-to-host.html - -## Using with the File output - -You can use the file output to output the payload in a file. -In this case, just add the following section to your telegraf config file - -```toml -[[outputs.file]] - ## Files to write to, "stdout" is a specially handled file. - files = ["C:/Telegraf/metrics.out"] - - ## Data format to output. - ## Each data format has its own unique set of configuration options, read - ## more about them here: - ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md - data_format = "nowmetric" -``` +# ServiceNow Metrics serializer + +The ServiceNow Metrics serializer outputs metrics in the [ServiceNow Operational Intelligence format][ServiceNow-format]. + +It can be used to write to a file using the file output, or for sending metrics to a MID Server with Enable REST endpoint activated using the standard telegraf HTTP output. +If you're using the HTTP output, this serializer knows how to batch the metrics so you don't end up with an HTTP POST per metric. + +[ServiceNow-format]: https://docs.servicenow.com/bundle/london-it-operations-management/page/product/event-management/reference/mid-POST-metrics.html + + +An example event looks like: +```javascript +[{ + "metric_type": "Disk C: % Free Space", + "resource": "C:\\", + "node": "lnux100", + "value": 50, + "timestamp": 1473183012000, + "ci2metric_id": { + "node": "lnux100" + }, + "source": “Telegraf” +}] +``` +## Using with the HTTP output + +To send this data to a ServiceNow MID Server with Web Server extension activated, you can use the HTTP output, there are some custom headers that you need to add to manage the MID Web Server authorization, here's a sample config for an HTTP output: + +```toml +[[outputs.http]] + ## URL is the address to send metrics to + url = "http://:9082/api/mid/sa/metrics" + + ## Timeout for HTTP message + # timeout = "5s" + + ## HTTP method, one of: "POST" or "PUT" + method = "POST" + + ## HTTP Basic Auth credentials + username = 'evt.integration' + password = 'P@$$w0rd!' + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false + + ## Data format to output. + ## Each data format has it's own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md + data_format = "nowmetric" + + ## Additional HTTP headers + [outputs.http.headers] + # # Should be set manually to "application/json" for json data_format + Content-Type = "application/json" + Accept = "application/json" +``` + +Starting with the London release, you also need to explicitly create event rule to allow binding of metric events to host CIs. + +https://docs.servicenow.com/bundle/london-it-operations-management/page/product/event-management/task/event-rule-bind-metrics-to-host.html + +## Using with the File output + +You can use the file output to output the payload in a file. +In this case, just add the following section to your telegraf config file + +```toml +[[outputs.file]] + ## Files to write to, "stdout" is a specially handled file. + files = ["C:/Telegraf/metrics.out"] + + ## Data format to output. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md + data_format = "nowmetric" +``` From 2474a3a54b9c3b2a240100dd1f45ab0ed3136c2f Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 8 Jan 2019 15:42:26 -0800 Subject: [PATCH 0505/1815] Remove unsupported parallel testing --- plugins/inputs/neptune_apex/neptune_apex_test.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/plugins/inputs/neptune_apex/neptune_apex_test.go b/plugins/inputs/neptune_apex/neptune_apex_test.go index 1d554149e..4a3cc6458 100644 --- a/plugins/inputs/neptune_apex/neptune_apex_test.go +++ b/plugins/inputs/neptune_apex/neptune_apex_test.go @@ -44,7 +44,6 @@ func TestGather(t *testing.T) { for _, test := range tests { test := test t.Run(test.name, func(t *testing.T) { - t.Parallel() var acc testutil.Accumulator n.Servers = test.servers n.Gather(&acc) @@ -334,7 +333,6 @@ func TestParseXML(t *testing.T) { for _, test := range tests { test := test t.Run(test.name, func(t *testing.T) { - t.Parallel() var acc testutil.Accumulator err := n.parseXML(&acc, []byte(test.xmlResponse)) if (err != nil) != test.wantErr { From f5f85aa74fc32d045bb77f8f8d53c49f1568d36c Mon Sep 17 00:00:00 2001 From: emily Date: Tue, 8 Jan 2019 15:53:02 -0800 Subject: [PATCH 0506/1815] Add GCP Cloud Pubsub output plugin (#5202) --- Gopkg.lock | 1 + plugins/outputs/all/all.go | 1 + plugins/outputs/cloud_pubsub/README.md | 61 ++++ plugins/outputs/cloud_pubsub/pubsub.go | 263 ++++++++++++++++++ plugins/outputs/cloud_pubsub/pubsub_test.go | 167 +++++++++++ plugins/outputs/cloud_pubsub/topic_gcp.go | 46 +++ plugins/outputs/cloud_pubsub/topic_stubbed.go | 199 +++++++++++++ 7 files changed, 738 insertions(+) create mode 100644 plugins/outputs/cloud_pubsub/README.md create mode 100644 plugins/outputs/cloud_pubsub/pubsub.go create mode 100644 plugins/outputs/cloud_pubsub/pubsub_test.go create mode 100644 plugins/outputs/cloud_pubsub/topic_gcp.go create mode 100644 plugins/outputs/cloud_pubsub/topic_stubbed.go diff --git a/Gopkg.lock b/Gopkg.lock index ba4564a3d..c110818ec 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -1588,6 +1588,7 @@ "golang.org/x/sys/windows/svc", "golang.org/x/sys/windows/svc/mgr", "google.golang.org/api/option", + "google.golang.org/api/support/bundler", "google.golang.org/genproto/googleapis/api/metric", "google.golang.org/genproto/googleapis/api/monitoredres", "google.golang.org/genproto/googleapis/monitoring/v3", diff --git a/plugins/outputs/all/all.go b/plugins/outputs/all/all.go index 94c1421b5..a5d2a44da 100644 --- a/plugins/outputs/all/all.go +++ b/plugins/outputs/all/all.go @@ -5,6 +5,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/outputs/amqp" _ "github.com/influxdata/telegraf/plugins/outputs/application_insights" _ "github.com/influxdata/telegraf/plugins/outputs/azure_monitor" + _ "github.com/influxdata/telegraf/plugins/outputs/cloud_pubsub" _ "github.com/influxdata/telegraf/plugins/outputs/cloudwatch" _ "github.com/influxdata/telegraf/plugins/outputs/cratedb" _ "github.com/influxdata/telegraf/plugins/outputs/datadog" diff --git a/plugins/outputs/cloud_pubsub/README.md b/plugins/outputs/cloud_pubsub/README.md new file mode 100644 index 000000000..5c345de4b --- /dev/null +++ b/plugins/outputs/cloud_pubsub/README.md @@ -0,0 +1,61 @@ +# Google Cloud PubSub Output Plugin + +The GCP PubSub plugin publishes metrics to a [Google Cloud PubSub][pubsub] topic +as one of the supported [output data formats][]. + + +### Configuration + +This section contains the default TOML to configure the plugin. You can +generate it using `telegraf --usage pubsub`. + +```toml +[[inputs.pubsub]] + ## Required. Name of Google Cloud Platform (GCP) Project that owns + ## the given PubSub subscription. + project = "my-project" + + ## Required. Name of PubSub subscription to ingest metrics from. + subscription = "my-subscription" + + ## Required. Data format to consume. + ## Each data format has its own unique set of configuration options. + ## Read more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "influx" + + ## Optional. Filepath for GCP credentials JSON file to authorize calls to + ## PubSub APIs. If not set explicitly, Telegraf will attempt to use + ## Application Default Credentials, which is preferred. + # credentials_file = "path/to/my/creds.json" + + ## Optional. If true, will send all metrics per write in one PubSub message. + # send_batched = true + + ## The following publish_* parameters specifically configures batching + ## requests made to the GCP Cloud PubSub API via the PubSub Golang library. Read + ## more here: https://godoc.org/cloud.google.com/go/pubsub#PublishSettings + + ## Optional. Send a request to PubSub (i.e. actually publish a batch) + ## when it has this many PubSub messages. If send_batched is true, + ## this is ignored and treated as if it were 1. + # publish_count_threshold = 1000 + + ## Optional. Send a request to PubSub (i.e. actually publish a batch) + ## when it has this many PubSub messages. If send_batched is true, + ## this is ignored and treated as if it were 1 + # publish_byte_threshold = 1000000 + + ## Optional. Specifically configures requests made to the PubSub API. + # publish_num_go_routines = 2 + + ## Optional. Specifies a timeout for requests to the PubSub API. + # publish_timeout = "30s" + + ## Optional. PubSub attributes to add to metrics. + # [[inputs.pubsub.attributes]] + # my_attr = "tag_value" +``` + +[pubsub]: https://cloud.google.com/pubsub +[output data formats]: /docs/DATA_FORMATS_OUTPUT.md diff --git a/plugins/outputs/cloud_pubsub/pubsub.go b/plugins/outputs/cloud_pubsub/pubsub.go new file mode 100644 index 000000000..9811af1d7 --- /dev/null +++ b/plugins/outputs/cloud_pubsub/pubsub.go @@ -0,0 +1,263 @@ +package cloud_pubsub + +import ( + "cloud.google.com/go/pubsub" + "context" + "fmt" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/outputs" + "github.com/influxdata/telegraf/plugins/serializers" + "golang.org/x/oauth2/google" + "google.golang.org/api/option" + "sync" +) + +const sampleConfig = ` +[[inputs.pubsub]] + ## Required. Name of Google Cloud Platform (GCP) Project that owns + ## the given PubSub subscription. + project = "my-project" + + ## Required. Name of PubSub subscription to ingest metrics from. + subscription = "my-subscription" + + ## Required. Data format to consume. + ## Each data format has its own unique set of configuration options. + ## Read more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "influx" + + ## Optional. Filepath for GCP credentials JSON file to authorize calls to + ## PubSub APIs. If not set explicitly, Telegraf will attempt to use + ## Application Default Credentials, which is preferred. + # credentials_file = "path/to/my/creds.json" + + ## Optional. If true, will send all metrics per write in one PubSub message. + # send_batched = true + + ## The following publish_* parameters specifically configures batching + ## requests made to the GCP Cloud PubSub API via the PubSub Golang library. Read + ## more here: https://godoc.org/cloud.google.com/go/pubsub#PublishSettings + + ## Optional. Send a request to PubSub (i.e. actually publish a batch) + ## when it has this many PubSub messages. If send_batched is true, + ## this is ignored and treated as if it were 1. + # publish_count_threshold = 1000 + + ## Optional. Send a request to PubSub (i.e. actually publish a batch) + ## when it has this many PubSub messages. If send_batched is true, + ## this is ignored and treated as if it were 1 + # publish_byte_threshold = 1000000 + + ## Optional. Specifically configures requests made to the PubSub API. + # publish_num_go_routines = 2 + + ## Optional. Specifies a timeout for requests to the PubSub API. + # publish_timeout = "30s" + + ## Optional. PubSub attributes to add to metrics. + # [[inputs.pubsub.attributes]] + # my_attr = "tag_value" +` + +type PubSub struct { + CredentialsFile string `toml:"credentials_file"` + Project string `toml:"project"` + Topic string `toml:"topic"` + Attributes map[string]string `toml:"attributes"` + + SendBatched bool `toml:"send_batched"` + PublishCountThreshold int `toml:"publish_count_threshold"` + PublishByteThreshold int `toml:"publish_byte_threshold"` + PublishNumGoroutines int `toml:"publish_num_go_routines"` + PublishTimeout internal.Duration `toml:"publish_timeout"` + + t topic + c *pubsub.Client + + stubTopic func(id string) topic + + serializer serializers.Serializer + publishResults []publishResult +} + +func (ps *PubSub) Description() string { + return "Publish Telegraf metrics to a Google Cloud PubSub topic" +} + +func (ps *PubSub) SampleConfig() string { + return sampleConfig +} + +func (ps *PubSub) SetSerializer(serializer serializers.Serializer) { + ps.serializer = serializer +} + +func (ps *PubSub) Connect() error { + if ps.Topic == "" { + return fmt.Errorf(`"topic" is required`) + } + + if ps.Project == "" { + return fmt.Errorf(`"project" is required`) + } + + if ps.stubTopic == nil { + return ps.initPubSubClient() + } else { + return nil + } +} + +func (ps *PubSub) Close() error { + if ps.t != nil { + ps.t.Stop() + } + return nil +} + +func (ps *PubSub) Write(metrics []telegraf.Metric) error { + ps.refreshTopic() + + // Serialize metrics and package into appropriate PubSub messages + msgs, err := ps.toMessages(metrics) + if err != nil { + return err + } + + cctx, cancel := context.WithCancel(context.Background()) + + // Publish all messages - each call to Publish returns a future. + ps.publishResults = make([]publishResult, len(msgs)) + for i, m := range msgs { + ps.publishResults[i] = ps.t.Publish(cctx, m) + } + + // topic.Stop() forces all published messages to be sent, even + // if PubSub batch limits have not been reached. + go ps.t.Stop() + + return ps.waitForResults(cctx, cancel) +} + +func (ps *PubSub) initPubSubClient() error { + var credsOpt option.ClientOption + if ps.CredentialsFile != "" { + credsOpt = option.WithCredentialsFile(ps.CredentialsFile) + } else { + creds, err := google.FindDefaultCredentials(context.Background(), pubsub.ScopeCloudPlatform) + if err != nil { + return fmt.Errorf( + "unable to find GCP Application Default Credentials: %v."+ + "Either set ADC or provide CredentialsFile config", err) + } + credsOpt = option.WithCredentials(creds) + } + client, err := pubsub.NewClient( + context.Background(), + ps.Project, + credsOpt, + option.WithScopes(pubsub.ScopeCloudPlatform), + option.WithUserAgent(internal.ProductToken()), + ) + if err != nil { + return fmt.Errorf("unable to generate PubSub client: %v", err) + } + ps.c = client + return nil +} + +func (ps *PubSub) refreshTopic() { + if ps.stubTopic != nil { + ps.t = ps.stubTopic(ps.Topic) + } else { + t := ps.c.Topic(ps.Topic) + ps.t = &topicWrapper{t} + } + ps.t.SetPublishSettings(ps.publishSettings()) +} + +func (ps *PubSub) publishSettings() pubsub.PublishSettings { + settings := pubsub.PublishSettings{} + if ps.PublishNumGoroutines > 0 { + settings.NumGoroutines = ps.PublishNumGoroutines + } + + if ps.PublishTimeout.Duration > 0 { + settings.CountThreshold = 1 + } + + if ps.SendBatched { + settings.CountThreshold = 1 + } else if ps.PublishCountThreshold > 0 { + settings.CountThreshold = ps.PublishCountThreshold + } + + if ps.PublishByteThreshold > 0 { + settings.ByteThreshold = ps.PublishByteThreshold + } + + return settings +} + +func (ps *PubSub) toMessages(metrics []telegraf.Metric) ([]*pubsub.Message, error) { + if ps.SendBatched { + b, err := ps.serializer.SerializeBatch(metrics) + if err != nil { + return nil, err + } + msg := &pubsub.Message{Data: b} + if ps.Attributes != nil { + msg.Attributes = ps.Attributes + } + return []*pubsub.Message{msg}, nil + } + + msgs := make([]*pubsub.Message, len(metrics)) + for i, m := range metrics { + b, err := ps.serializer.Serialize(m) + if err != nil { + return nil, err + } + msgs[i] = &pubsub.Message{ + Data: b, + } + if ps.Attributes != nil { + msgs[i].Attributes = ps.Attributes + } + } + + return msgs, nil +} + +func (ps *PubSub) waitForResults(ctx context.Context, cancel context.CancelFunc) error { + var pErr error + var setErr sync.Once + var wg sync.WaitGroup + + for _, pr := range ps.publishResults { + wg.Add(1) + + go func(r publishResult) { + defer wg.Done() + // Wait on each future + _, err := r.Get(ctx) + if err != nil { + setErr.Do(func() { + pErr = err + cancel() + }) + } + }(pr) + } + + wg.Wait() + return pErr +} + +func init() { + outputs.Add("cloud_pubsub", func() telegraf.Output { + return &PubSub{} + }) +} diff --git a/plugins/outputs/cloud_pubsub/pubsub_test.go b/plugins/outputs/cloud_pubsub/pubsub_test.go new file mode 100644 index 000000000..a60f05eb0 --- /dev/null +++ b/plugins/outputs/cloud_pubsub/pubsub_test.go @@ -0,0 +1,167 @@ +package cloud_pubsub + +import ( + "cloud.google.com/go/pubsub" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/parsers" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/assert" + "testing" +) + +func TestPubSub_WriteSingle(t *testing.T) { + + testMetrics := []testMetric{ + {testutil.TestMetric("value_1", "test"), false /*return error */}, + } + + settings := pubsub.DefaultPublishSettings + settings.CountThreshold = 1 + ps, topic, metrics := getTestResources(t, settings, testMetrics) + + err := ps.Write(metrics) + if err != nil { + t.Fatalf("got unexpected error: %v", err) + } + + for _, testM := range testMetrics { + verifyMetricPublished(t, testM.m, topic.published) + } +} + +func TestPubSub_WriteWithAttribute(t *testing.T) { + testMetrics := []testMetric{ + {testutil.TestMetric("value_1", "test"), false /*return error*/}, + } + + settings := pubsub.DefaultPublishSettings + ps, topic, metrics := getTestResources(t, settings, testMetrics) + ps.Attributes = map[string]string{ + "foo1": "bar1", + "foo2": "bar2", + } + + err := ps.Write(metrics) + if err != nil { + t.Fatalf("got unexpected error: %v", err) + } + + for _, testM := range testMetrics { + msg := verifyMetricPublished(t, testM.m, topic.published) + assert.Equalf(t, "bar1", msg.Attributes["foo1"], "expected attribute foo1=bar1") + assert.Equalf(t, "bar2", msg.Attributes["foo2"], "expected attribute foo2=bar2") + } +} + +func TestPubSub_WriteMultiple(t *testing.T) { + testMetrics := []testMetric{ + {testutil.TestMetric("value_1", "test"), false /*return error*/}, + {testutil.TestMetric("value_2", "test"), false}, + } + + settings := pubsub.DefaultPublishSettings + + ps, topic, metrics := getTestResources(t, settings, testMetrics) + + err := ps.Write(metrics) + if err != nil { + t.Fatalf("got unexpected error: %v", err) + } + + for _, testM := range testMetrics { + verifyMetricPublished(t, testM.m, topic.published) + } + assert.Equalf(t, 1, topic.bundleCount, "unexpected bundle count") +} + +func TestPubSub_WriteOverCountThreshold(t *testing.T) { + testMetrics := []testMetric{ + {testutil.TestMetric("value_1", "test"), false /*return error*/}, + {testutil.TestMetric("value_2", "test"), false}, + {testutil.TestMetric("value_3", "test"), false}, + {testutil.TestMetric("value_4", "test"), false}, + } + + settings := pubsub.DefaultPublishSettings + settings.CountThreshold = 2 + + ps, topic, metrics := getTestResources(t, settings, testMetrics) + + err := ps.Write(metrics) + if err != nil { + t.Fatalf("got unexpected error: %v", err) + } + + for _, testM := range testMetrics { + verifyMetricPublished(t, testM.m, topic.published) + } + assert.Equalf(t, 2, topic.bundleCount, "unexpected bundle count") +} + +func TestPubSub_WriteOverByteThreshold(t *testing.T) { + testMetrics := []testMetric{ + {testutil.TestMetric("value_1", "test"), false /*return error*/}, + {testutil.TestMetric("value_2", "test"), false}, + } + + settings := pubsub.DefaultPublishSettings + settings.CountThreshold = 10 + settings.ByteThreshold = 1 + + ps, topic, metrics := getTestResources(t, settings, testMetrics) + + err := ps.Write(metrics) + if err != nil { + t.Fatalf("got unexpected error: %v", err) + } + + for _, testM := range testMetrics { + verifyMetricPublished(t, testM.m, topic.published) + } + assert.Equalf(t, 2, topic.bundleCount, "unexpected bundle count") +} + +func TestPubSub_Error(t *testing.T) { + testMetrics := []testMetric{ + // Force this batch to return error + {testutil.TestMetric("value_1", "test"), true}, + {testutil.TestMetric("value_2", "test"), false}, + } + + settings := pubsub.DefaultPublishSettings + ps, _, metrics := getTestResources(t, settings, testMetrics) + + err := ps.Write(metrics) + if err == nil { + t.Fatalf("expected error") + } + if err.Error() != errMockFail { + t.Fatalf("expected fake error, got %v", err) + } +} + +func verifyMetricPublished(t *testing.T, m telegraf.Metric, published map[string]*pubsub.Message) *pubsub.Message { + p, _ := parsers.NewInfluxParser() + + v, _ := m.GetField("value") + psMsg, ok := published[v.(string)] + if !ok { + t.Fatalf("expected metric to get published (value: %s)", v.(string)) + } + + parsed, err := p.Parse(psMsg.Data) + if err != nil { + t.Fatalf("could not parse influxdb metric from published message: %s", string(psMsg.Data)) + } + if len(parsed) > 1 { + t.Fatalf("expected only one influxdb metric per published message, got %d", len(published)) + } + + publishedV, ok := parsed[0].GetField("value") + if !ok { + t.Fatalf("expected published metric to have a value") + } + assert.Equal(t, v, publishedV, "incorrect published value") + + return psMsg +} diff --git a/plugins/outputs/cloud_pubsub/topic_gcp.go b/plugins/outputs/cloud_pubsub/topic_gcp.go new file mode 100644 index 000000000..a85c6f39e --- /dev/null +++ b/plugins/outputs/cloud_pubsub/topic_gcp.go @@ -0,0 +1,46 @@ +package cloud_pubsub + +import ( + "cloud.google.com/go/pubsub" + "context" +) + +type ( + topicFactory func(string) (topic, error) + + topic interface { + ID() string + Stop() + Publish(ctx context.Context, msg *pubsub.Message) publishResult + PublishSettings() pubsub.PublishSettings + SetPublishSettings(settings pubsub.PublishSettings) + } + + publishResult interface { + Get(ctx context.Context) (string, error) + } + + topicWrapper struct { + topic *pubsub.Topic + } +) + +func (tw *topicWrapper) ID() string { + return tw.topic.ID() +} + +func (tw *topicWrapper) Stop() { + tw.topic.Stop() +} + +func (tw *topicWrapper) Publish(ctx context.Context, msg *pubsub.Message) publishResult { + return tw.topic.Publish(ctx, msg) +} + +func (tw *topicWrapper) PublishSettings() pubsub.PublishSettings { + return tw.topic.PublishSettings +} + +func (tw *topicWrapper) SetPublishSettings(settings pubsub.PublishSettings) { + tw.topic.PublishSettings = settings +} diff --git a/plugins/outputs/cloud_pubsub/topic_stubbed.go b/plugins/outputs/cloud_pubsub/topic_stubbed.go new file mode 100644 index 000000000..fdae70bc3 --- /dev/null +++ b/plugins/outputs/cloud_pubsub/topic_stubbed.go @@ -0,0 +1,199 @@ +package cloud_pubsub + +import ( + "cloud.google.com/go/pubsub" + "context" + "errors" + "fmt" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/parsers" + "github.com/influxdata/telegraf/plugins/serializers" + "google.golang.org/api/support/bundler" + "runtime" + "sync" + "testing" +) + +const ( + errMockFail = "this is an error" +) + +type ( + testMetric struct { + m telegraf.Metric + returnErr bool + } + + bundledMsg struct { + *pubsub.Message + *stubResult + } + + stubResult struct { + metricIds []string + + sendError bool + err chan error + done chan struct{} + } + + stubTopic struct { + Settings pubsub.PublishSettings + ReturnErr map[string]bool + parsers.Parser + *testing.T + + stopped bool + pLock sync.Mutex + + published map[string]*pubsub.Message + + bundler *bundler.Bundler + bLock sync.Mutex + bundleCount int + } +) + +func getTestResources(tT *testing.T, settings pubsub.PublishSettings, testM []testMetric) (*PubSub, *stubTopic, []telegraf.Metric) { + s, _ := serializers.NewInfluxSerializer() + + metrics := make([]telegraf.Metric, len(testM)) + t := &stubTopic{ + T: tT, + ReturnErr: make(map[string]bool), + published: make(map[string]*pubsub.Message), + } + + for i, tm := range testM { + metrics[i] = tm.m + if tm.returnErr { + v, _ := tm.m.GetField("value") + t.ReturnErr[v.(string)] = true + } + } + + ps := &PubSub{ + Project: "test-project", + Topic: "test-topic", + stubTopic: func(string) topic { return t }, + PublishCountThreshold: settings.CountThreshold, + PublishByteThreshold: settings.ByteThreshold, + PublishNumGoroutines: settings.NumGoroutines, + PublishTimeout: internal.Duration{Duration: settings.Timeout}, + } + ps.SetSerializer(s) + + return ps, t, metrics +} + +func (t *stubTopic) ID() string { + return "test-topic" +} + +func (t *stubTopic) Stop() { + t.pLock.Lock() + defer t.pLock.Unlock() + + t.stopped = true + t.bundler.Flush() +} + +func (t *stubTopic) Publish(ctx context.Context, msg *pubsub.Message) publishResult { + t.pLock.Lock() + defer t.pLock.Unlock() + + if t.stopped || ctx.Err() != nil { + t.Fatalf("publish called after stop") + } + + ids := t.parseIDs(msg) + r := &stubResult{ + metricIds: ids, + err: make(chan error, 1), + done: make(chan struct{}, 1), + } + + for _, id := range ids { + _, ok := t.ReturnErr[id] + r.sendError = r.sendError || ok + } + + bundled := &bundledMsg{msg, r} + err := t.bundler.Add(bundled, len(msg.Data)) + if err != nil { + t.Fatalf("unexpected error while adding to bundle: %v", err) + } + return r +} + +func (t *stubTopic) PublishSettings() pubsub.PublishSettings { + return t.Settings +} + +func (t *stubTopic) SetPublishSettings(settings pubsub.PublishSettings) { + t.Settings = settings + t.initBundler() +} + +func (t *stubTopic) initBundler() *stubTopic { + t.bundler = bundler.NewBundler(&bundledMsg{}, t.sendBundle()) + t.bundler.DelayThreshold = t.Settings.DelayThreshold + t.bundler.BundleCountThreshold = t.Settings.CountThreshold + if t.bundler.BundleCountThreshold > pubsub.MaxPublishRequestCount { + t.bundler.BundleCountThreshold = pubsub.MaxPublishRequestCount + } + t.bundler.BundleByteThreshold = t.Settings.ByteThreshold + t.bundler.BundleByteLimit = pubsub.MaxPublishRequestBytes + t.bundler.HandlerLimit = 25 * runtime.GOMAXPROCS(0) + + return t +} + +func (t *stubTopic) sendBundle() func(items interface{}) { + return func(items interface{}) { + t.bLock.Lock() + defer t.bLock.Unlock() + + bundled := items.([]*bundledMsg) + + for _, msg := range bundled { + r := msg.stubResult + if r.sendError { + r.err <- errors.New(errMockFail) + } else { + r.done <- struct{}{} + } + for _, id := range r.metricIds { + t.published[id] = msg.Message + } + } + + t.bundleCount++ + } +} + +func (t *stubTopic) parseIDs(msg *pubsub.Message) []string { + p, _ := parsers.NewInfluxParser() + metrics, err := p.Parse(msg.Data) + if err != nil { + t.Fatalf("unexpected parsing error: %v", err) + } + ids := make([]string, len(metrics)) + for i, met := range metrics { + id, _ := met.GetField("value") + ids[i] = id.(string) + } + return ids +} + +func (r *stubResult) Get(ctx context.Context) (string, error) { + select { + case <-ctx.Done(): + return "", ctx.Err() + case err := <-r.err: + return "", err + case <-r.done: + return fmt.Sprintf("id-%s", r.metricIds[0]), nil + } +} From bed90f1942784c4d97e1ff6549d282ac8a661188 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 8 Jan 2019 15:56:10 -0800 Subject: [PATCH 0507/1815] Update changelog --- CHANGELOG.md | 6 +++++- README.md | 1 + 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 11f70b0a5..a42467cc6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,7 +5,11 @@ - [cloud_pubsub](/plugins/inputs/cloud_pubsub/README.md) - Contributed by @emilymye - [neptune_apex](/plugins/inputs/neptune_apex/README.md) - Contributed by @MaxRenaud -### New Serializers +#### New Outputs + +- [cloud_pubsub](/plugins/outputs/cloud_pubsub/README.md) - Contributed by @emilymye + +#### New Serializers - [nowmetric](/plugins/serializers/nowmetric/README.md) - Contributed by @JefMuller diff --git a/README.md b/README.md index 50fe6d48f..4da35f744 100644 --- a/README.md +++ b/README.md @@ -332,6 +332,7 @@ For documentation on the latest development code see the [documentation index][d * [aws kinesis](./plugins/outputs/kinesis) * [aws cloudwatch](./plugins/outputs/cloudwatch) * [azure_monitor](./plugins/outputs/azure_monitor) +* [cloud_pubsub](./plugins/outputs/cloud_pubsub) Google Cloud Pub/Sub * [cratedb](./plugins/outputs/cratedb) * [datadog](./plugins/outputs/datadog) * [discard](./plugins/outputs/discard) From 10b3e4577501ddba228721754339145f7b9b15ec Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 8 Jan 2019 17:57:38 -0800 Subject: [PATCH 0508/1815] Sort fields in nowmetric test --- plugins/serializers/nowmetric/nowmetric_test.go | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/plugins/serializers/nowmetric/nowmetric_test.go b/plugins/serializers/nowmetric/nowmetric_test.go index d326cef8c..e49b81c2d 100644 --- a/plugins/serializers/nowmetric/nowmetric_test.go +++ b/plugins/serializers/nowmetric/nowmetric_test.go @@ -2,14 +2,14 @@ package nowmetric import ( "fmt" + "sort" "testing" "time" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func MustMetric(v telegraf.Metric, err error) telegraf.Metric { @@ -136,6 +136,11 @@ func TestSerializeMultiFields(t *testing.T) { m, err := metric.New("cpu", tags, fields, now) assert.NoError(t, err) + // Sort for predictable field order + sort.Slice(m.FieldList(), func(i, j int) bool { + return m.FieldList()[i].Key < m.FieldList()[j].Key + }) + s, _ := NewSerializer() var buf []byte buf, err = s.Serialize(m) From 4b3580cceb89d7ddb9eeb9c608067fa58f0ea948 Mon Sep 17 00:00:00 2001 From: "Artem V. Navrotskiy" Date: Thu, 10 Jan 2019 02:48:45 +0300 Subject: [PATCH 0509/1815] Add raw counters to mongodb input (#5264) --- plugins/inputs/mongodb/mongodb_data.go | 22 +++++ plugins/inputs/mongodb/mongodb_data_test.go | 23 +++++ plugins/inputs/mongodb/mongostat.go | 100 +++++++++++--------- 3 files changed, 102 insertions(+), 43 deletions(-) diff --git a/plugins/inputs/mongodb/mongodb_data.go b/plugins/inputs/mongodb/mongodb_data.go index 0c69670d5..733018374 100644 --- a/plugins/inputs/mongodb/mongodb_data.go +++ b/plugins/inputs/mongodb/mongodb_data.go @@ -31,12 +31,19 @@ func NewMongodbData(statLine *StatLine, tags map[string]string) *MongodbData { } var DefaultStats = map[string]string{ + "inserts": "InsertCnt", "inserts_per_sec": "Insert", + "queries": "QueryCnt", "queries_per_sec": "Query", + "updates": "UpdateCnt", "updates_per_sec": "Update", + "deletes": "DeleteCnt", "deletes_per_sec": "Delete", + "getmores": "GetMoreCnt", "getmores_per_sec": "GetMore", + "commands": "CommandCnt", "commands_per_sec": "Command", + "flushes": "FlushesCnt", "flushes_per_sec": "Flushes", "vsize_megabytes": "Virtual", "resident_megabytes": "Resident", @@ -44,15 +51,23 @@ var DefaultStats = map[string]string{ "queued_writes": "QueuedWriters", "active_reads": "ActiveReaders", "active_writes": "ActiveWriters", + "net_in_bytes_count": "NetInCnt", "net_in_bytes": "NetIn", + "net_out_bytes_count": "NetOutCnt", "net_out_bytes": "NetOut", "open_connections": "NumConnections", + "ttl_deletes": "DeletedDocumentsCnt", "ttl_deletes_per_sec": "DeletedDocuments", + "ttl_passes": "PassesCnt", "ttl_passes_per_sec": "Passes", "cursor_timed_out": "TimedOutC", + "cursor_timed_out_count": "TimedOutCCnt", "cursor_no_timeout": "NoTimeoutC", + "cursor_no_timeout_count": "NoTimeoutCCnt", "cursor_pinned": "PinnedC", + "cursor_pinned_count": "PinnedCCnt", "cursor_total": "TotalC", + "cursor_total_count": "TotalCCnt", "document_deleted": "DeletedD", "document_inserted": "InsertedD", "document_returned": "ReturnedD", @@ -63,11 +78,17 @@ var DefaultStats = map[string]string{ } var DefaultReplStats = map[string]string{ + "repl_inserts": "InsertRCnt", "repl_inserts_per_sec": "InsertR", + "repl_queries": "QueryRCnt", "repl_queries_per_sec": "QueryR", + "repl_updates": "UpdateRCnt", "repl_updates_per_sec": "UpdateR", + "repl_deletes": "DeleteRCnt", "repl_deletes_per_sec": "DeleteR", + "repl_getmores": "GetMoreRCnt", "repl_getmores_per_sec": "GetMoreR", + "repl_commands": "CommandRCnt", "repl_commands_per_sec": "CommandR", "member_status": "NodeType", "state": "NodeState", @@ -96,6 +117,7 @@ var ShardHostStats = map[string]string{ var MmapStats = map[string]string{ "mapped_megabytes": "Mapped", "non-mapped_megabytes": "NonMapped", + "page_faults": "FaultsCnt", "page_faults_per_sec": "Faults", } diff --git a/plugins/inputs/mongodb/mongodb_data_test.go b/plugins/inputs/mongodb/mongodb_data_test.go index de75ed7e1..ca15ff977 100644 --- a/plugins/inputs/mongodb/mongodb_data_test.go +++ b/plugins/inputs/mongodb/mongodb_data_test.go @@ -19,10 +19,12 @@ func TestAddNonReplStats(t *testing.T) { Insert: 0, Query: 0, Update: 0, + UpdateCnt: 0, Delete: 0, GetMore: 0, Command: 0, Flushes: 0, + FlushesCnt: 0, Virtual: 0, Resident: 0, QueuedReaders: 0, @@ -191,31 +193,48 @@ func TestStateTag(t *testing.T) { fields := map[string]interface{}{ "active_reads": int64(0), "active_writes": int64(0), + "commands": int64(0), "commands_per_sec": int64(0), + "deletes": int64(0), "deletes_per_sec": int64(0), + "flushes": int64(0), "flushes_per_sec": int64(0), + "getmores": int64(0), "getmores_per_sec": int64(0), + "inserts": int64(0), "inserts_per_sec": int64(0), "member_status": "PRI", "state": "PRIMARY", + "net_in_bytes_count": int64(0), "net_in_bytes": int64(0), + "net_out_bytes_count": int64(0), "net_out_bytes": int64(0), "open_connections": int64(0), + "queries": int64(0), "queries_per_sec": int64(0), "queued_reads": int64(0), "queued_writes": int64(0), + "repl_commands": int64(0), "repl_commands_per_sec": int64(0), + "repl_deletes": int64(0), "repl_deletes_per_sec": int64(0), + "repl_getmores": int64(0), "repl_getmores_per_sec": int64(0), + "repl_inserts": int64(0), "repl_inserts_per_sec": int64(0), + "repl_queries": int64(0), "repl_queries_per_sec": int64(0), + "repl_updates": int64(0), "repl_updates_per_sec": int64(0), "repl_lag": int64(0), "repl_oplog_window_sec": int64(0), "resident_megabytes": int64(0), + "updates": int64(0), "updates_per_sec": int64(0), "vsize_megabytes": int64(0), + "ttl_deletes": int64(0), "ttl_deletes_per_sec": int64(0), + "ttl_passes": int64(0), "ttl_passes_per_sec": int64(0), "jumbo_chunks": int64(0), "total_in_use": int64(0), @@ -223,9 +242,13 @@ func TestStateTag(t *testing.T) { "total_created": int64(0), "total_refreshing": int64(0), "cursor_timed_out": int64(0), + "cursor_timed_out_count": int64(0), "cursor_no_timeout": int64(0), + "cursor_no_timeout_count": int64(0), "cursor_pinned": int64(0), + "cursor_pinned_count": int64(0), "cursor_total": int64(0), + "cursor_total_count": int64(0), "document_deleted": int64(0), "document_inserted": int64(0), "document_returned": int64(0), diff --git a/plugins/inputs/mongodb/mongostat.go b/plugins/inputs/mongodb/mongostat.go index e32596deb..1320c32e9 100644 --- a/plugins/inputs/mongodb/mongostat.go +++ b/plugins/inputs/mongodb/mongostat.go @@ -457,14 +457,22 @@ type StatLine struct { LastPrinted time.Time // Opcounter fields - Insert, Query, Update, Delete, GetMore, Command int64 + Insert, InsertCnt int64 + Query, QueryCnt int64 + Update, UpdateCnt int64 + Delete, DeleteCnt int64 + GetMore, GetMoreCnt int64 + Command, CommandCnt int64 // TTL fields - Passes, DeletedDocuments int64 + Passes, PassesCnt int64 + DeletedDocuments, DeletedDocumentsCnt int64 // Cursor fields - TimedOutC int64 - NoTimeoutC, PinnedC, TotalC int64 + TimedOutC, TimedOutCCnt int64 + NoTimeoutC, NoTimeoutCCnt int64 + PinnedC, PinnedCCnt int64 + TotalC, TotalCCnt int64 // Document fields DeletedD, InsertedD, ReturnedD, UpdatedD int64 @@ -494,20 +502,26 @@ type StatLine struct { WorkerThreadEvictingPages int64 // Replicated Opcounter fields - InsertR, QueryR, UpdateR, DeleteR, GetMoreR, CommandR int64 - ReplLag int64 - OplogTimeDiff int64 - Flushes int64 - Mapped, Virtual, Resident, NonMapped int64 - Faults int64 - HighestLocked *LockStatus - QueuedReaders, QueuedWriters int64 - ActiveReaders, ActiveWriters int64 - NetIn, NetOut int64 - NumConnections int64 - ReplSetName string - NodeType string - NodeState string + InsertR, InsertRCnt int64 + QueryR, QueryRCnt int64 + UpdateR, UpdateRCnt int64 + DeleteR, DeleteRCnt int64 + GetMoreR, GetMoreRCnt int64 + CommandR, CommandRCnt int64 + ReplLag int64 + OplogTimeDiff int64 + Flushes, FlushesCnt int64 + Mapped, Virtual, Resident, NonMapped int64 + Faults, FaultsCnt int64 + HighestLocked *LockStatus + QueuedReaders, QueuedWriters int64 + ActiveReaders, ActiveWriters int64 + NetIn, NetInCnt int64 + NetOut, NetOutCnt int64 + NumConnections int64 + ReplSetName string + NodeType string + NodeState string // Cluster fields JumboChunksCount int64 @@ -576,12 +590,12 @@ func computeLockDiffs(prevLocks, curLocks map[string]LockUsage) []LockUsage { return lockUsages } -func diff(newVal, oldVal, sampleTime int64) int64 { +func diff(newVal, oldVal, sampleTime int64) (int64, int64) { d := newVal - oldVal if d < 0 { d = newVal } - return d / sampleTime + return d / sampleTime, newVal } // NewStatLine constructs a StatLine object from two MongoStatus objects. @@ -612,25 +626,25 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec } if newStat.Opcounters != nil && oldStat.Opcounters != nil { - returnVal.Insert = diff(newStat.Opcounters.Insert, oldStat.Opcounters.Insert, sampleSecs) - returnVal.Query = diff(newStat.Opcounters.Query, oldStat.Opcounters.Query, sampleSecs) - returnVal.Update = diff(newStat.Opcounters.Update, oldStat.Opcounters.Update, sampleSecs) - returnVal.Delete = diff(newStat.Opcounters.Delete, oldStat.Opcounters.Delete, sampleSecs) - returnVal.GetMore = diff(newStat.Opcounters.GetMore, oldStat.Opcounters.GetMore, sampleSecs) - returnVal.Command = diff(newStat.Opcounters.Command, oldStat.Opcounters.Command, sampleSecs) + returnVal.Insert, returnVal.InsertCnt = diff(newStat.Opcounters.Insert, oldStat.Opcounters.Insert, sampleSecs) + returnVal.Query, returnVal.QueryCnt = diff(newStat.Opcounters.Query, oldStat.Opcounters.Query, sampleSecs) + returnVal.Update, returnVal.UpdateCnt = diff(newStat.Opcounters.Update, oldStat.Opcounters.Update, sampleSecs) + returnVal.Delete, returnVal.DeleteCnt = diff(newStat.Opcounters.Delete, oldStat.Opcounters.Delete, sampleSecs) + returnVal.GetMore, returnVal.GetMoreCnt = diff(newStat.Opcounters.GetMore, oldStat.Opcounters.GetMore, sampleSecs) + returnVal.Command, returnVal.CommandCnt = diff(newStat.Opcounters.Command, oldStat.Opcounters.Command, sampleSecs) } if newStat.Metrics != nil && oldStat.Metrics != nil { if newStat.Metrics.TTL != nil && oldStat.Metrics.TTL != nil { - returnVal.Passes = diff(newStat.Metrics.TTL.Passes, oldStat.Metrics.TTL.Passes, sampleSecs) - returnVal.DeletedDocuments = diff(newStat.Metrics.TTL.DeletedDocuments, oldStat.Metrics.TTL.DeletedDocuments, sampleSecs) + returnVal.Passes, returnVal.PassesCnt = diff(newStat.Metrics.TTL.Passes, oldStat.Metrics.TTL.Passes, sampleSecs) + returnVal.DeletedDocuments, returnVal.DeletedDocumentsCnt = diff(newStat.Metrics.TTL.DeletedDocuments, oldStat.Metrics.TTL.DeletedDocuments, sampleSecs) } if newStat.Metrics.Cursor != nil && oldStat.Metrics.Cursor != nil { - returnVal.TimedOutC = diff(newStat.Metrics.Cursor.TimedOut, oldStat.Metrics.Cursor.TimedOut, sampleSecs) + returnVal.TimedOutC, returnVal.TimedOutCCnt = diff(newStat.Metrics.Cursor.TimedOut, oldStat.Metrics.Cursor.TimedOut, sampleSecs) if newStat.Metrics.Cursor.Open != nil && oldStat.Metrics.Cursor.Open != nil { - returnVal.NoTimeoutC = diff(newStat.Metrics.Cursor.Open.NoTimeout, oldStat.Metrics.Cursor.Open.NoTimeout, sampleSecs) - returnVal.PinnedC = diff(newStat.Metrics.Cursor.Open.Pinned, oldStat.Metrics.Cursor.Open.Pinned, sampleSecs) - returnVal.TotalC = diff(newStat.Metrics.Cursor.Open.Total, oldStat.Metrics.Cursor.Open.Total, sampleSecs) + returnVal.NoTimeoutC, returnVal.NoTimeoutCCnt = diff(newStat.Metrics.Cursor.Open.NoTimeout, oldStat.Metrics.Cursor.Open.NoTimeout, sampleSecs) + returnVal.PinnedC, returnVal.PinnedCCnt = diff(newStat.Metrics.Cursor.Open.Pinned, oldStat.Metrics.Cursor.Open.Pinned, sampleSecs) + returnVal.TotalC, returnVal.TotalCCnt = diff(newStat.Metrics.Cursor.Open.Total, oldStat.Metrics.Cursor.Open.Total, sampleSecs) } } if newStat.Metrics.Document != nil { @@ -642,18 +656,18 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec } if newStat.OpcountersRepl != nil && oldStat.OpcountersRepl != nil { - returnVal.InsertR = diff(newStat.OpcountersRepl.Insert, oldStat.OpcountersRepl.Insert, sampleSecs) - returnVal.QueryR = diff(newStat.OpcountersRepl.Query, oldStat.OpcountersRepl.Query, sampleSecs) - returnVal.UpdateR = diff(newStat.OpcountersRepl.Update, oldStat.OpcountersRepl.Update, sampleSecs) - returnVal.DeleteR = diff(newStat.OpcountersRepl.Delete, oldStat.OpcountersRepl.Delete, sampleSecs) - returnVal.GetMoreR = diff(newStat.OpcountersRepl.GetMore, oldStat.OpcountersRepl.GetMore, sampleSecs) - returnVal.CommandR = diff(newStat.OpcountersRepl.Command, oldStat.OpcountersRepl.Command, sampleSecs) + returnVal.InsertR, returnVal.InsertRCnt = diff(newStat.OpcountersRepl.Insert, oldStat.OpcountersRepl.Insert, sampleSecs) + returnVal.QueryR, returnVal.QueryRCnt = diff(newStat.OpcountersRepl.Query, oldStat.OpcountersRepl.Query, sampleSecs) + returnVal.UpdateR, returnVal.UpdateRCnt = diff(newStat.OpcountersRepl.Update, oldStat.OpcountersRepl.Update, sampleSecs) + returnVal.DeleteR, returnVal.DeleteRCnt = diff(newStat.OpcountersRepl.Delete, oldStat.OpcountersRepl.Delete, sampleSecs) + returnVal.GetMoreR, returnVal.GetMoreRCnt = diff(newStat.OpcountersRepl.GetMore, oldStat.OpcountersRepl.GetMore, sampleSecs) + returnVal.CommandR, returnVal.CommandRCnt = diff(newStat.OpcountersRepl.Command, oldStat.OpcountersRepl.Command, sampleSecs) } returnVal.CacheDirtyPercent = -1 returnVal.CacheUsedPercent = -1 if newStat.WiredTiger != nil && oldStat.WiredTiger != nil { - returnVal.Flushes = newStat.WiredTiger.Transaction.TransCheckpoints - oldStat.WiredTiger.Transaction.TransCheckpoints + returnVal.Flushes, returnVal.FlushesCnt = diff(newStat.WiredTiger.Transaction.TransCheckpoints, oldStat.WiredTiger.Transaction.TransCheckpoints, sampleSecs) returnVal.CacheDirtyPercent = float64(newStat.WiredTiger.Cache.TrackedDirtyBytes) / float64(newStat.WiredTiger.Cache.MaxBytesConfigured) returnVal.CacheUsedPercent = float64(newStat.WiredTiger.Cache.CurrentCachedBytes) / float64(newStat.WiredTiger.Cache.MaxBytesConfigured) @@ -670,7 +684,7 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec returnVal.ServerEvictingPages = newStat.WiredTiger.Cache.ServerEvictingPages returnVal.WorkerThreadEvictingPages = newStat.WiredTiger.Cache.WorkerThreadEvictingPages } else if newStat.BackgroundFlushing != nil && oldStat.BackgroundFlushing != nil { - returnVal.Flushes = newStat.BackgroundFlushing.Flushes - oldStat.BackgroundFlushing.Flushes + returnVal.Flushes, returnVal.FlushesCnt = diff(newStat.BackgroundFlushing.Flushes, oldStat.BackgroundFlushing.Flushes, sampleSecs) } returnVal.Time = newMongo.SampleTime @@ -713,7 +727,7 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec if oldStat.ExtraInfo != nil && newStat.ExtraInfo != nil && oldStat.ExtraInfo.PageFaults != nil && newStat.ExtraInfo.PageFaults != nil { - returnVal.Faults = diff(*(newStat.ExtraInfo.PageFaults), *(oldStat.ExtraInfo.PageFaults), sampleSecs) + returnVal.Faults, returnVal.FaultsCnt = diff(*(newStat.ExtraInfo.PageFaults), *(oldStat.ExtraInfo.PageFaults), sampleSecs) } if !returnVal.IsMongos && oldStat.Locks != nil { globalCheck, hasGlobal := oldStat.Locks["Global"] @@ -812,8 +826,8 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec } if oldStat.Network != nil && newStat.Network != nil { - returnVal.NetIn = diff(newStat.Network.BytesIn, oldStat.Network.BytesIn, sampleSecs) - returnVal.NetOut = diff(newStat.Network.BytesOut, oldStat.Network.BytesOut, sampleSecs) + returnVal.NetIn, returnVal.NetInCnt = diff(newStat.Network.BytesIn, oldStat.Network.BytesIn, sampleSecs) + returnVal.NetOut, returnVal.NetOutCnt = diff(newStat.Network.BytesOut, oldStat.Network.BytesOut, sampleSecs) } if newStat.Connections != nil { From e20ba1e2b6d11eb21845e3a65701f3f6dc64c8e5 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 9 Jan 2019 15:55:57 -0800 Subject: [PATCH 0510/1815] Fix intermittent test cases in cloud_pubsub (#5271) --- plugins/outputs/cloud_pubsub/pubsub.go | 13 +++++++------ plugins/outputs/cloud_pubsub/pubsub_test.go | 3 ++- plugins/outputs/cloud_pubsub/topic_stubbed.go | 19 +++++++++++-------- 3 files changed, 20 insertions(+), 15 deletions(-) diff --git a/plugins/outputs/cloud_pubsub/pubsub.go b/plugins/outputs/cloud_pubsub/pubsub.go index 9811af1d7..bc81bf580 100644 --- a/plugins/outputs/cloud_pubsub/pubsub.go +++ b/plugins/outputs/cloud_pubsub/pubsub.go @@ -1,16 +1,17 @@ package cloud_pubsub import ( - "cloud.google.com/go/pubsub" "context" "fmt" + "sync" + + "cloud.google.com/go/pubsub" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/outputs" "github.com/influxdata/telegraf/plugins/serializers" "golang.org/x/oauth2/google" "google.golang.org/api/option" - "sync" ) const sampleConfig = ` @@ -28,9 +29,9 @@ const sampleConfig = ` ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "influx" - ## Optional. Filepath for GCP credentials JSON file to authorize calls to - ## PubSub APIs. If not set explicitly, Telegraf will attempt to use - ## Application Default Credentials, which is preferred. + ## Optional. Filepath for GCP credentials JSON file to authorize calls to + ## PubSub APIs. If not set explicitly, Telegraf will attempt to use + ## Application Default Credentials, which is preferred. # credentials_file = "path/to/my/creds.json" ## Optional. If true, will send all metrics per write in one PubSub message. @@ -55,7 +56,7 @@ const sampleConfig = ` ## Optional. Specifies a timeout for requests to the PubSub API. # publish_timeout = "30s" - + ## Optional. PubSub attributes to add to metrics. # [[inputs.pubsub.attributes]] # my_attr = "tag_value" diff --git a/plugins/outputs/cloud_pubsub/pubsub_test.go b/plugins/outputs/cloud_pubsub/pubsub_test.go index a60f05eb0..eb993b37c 100644 --- a/plugins/outputs/cloud_pubsub/pubsub_test.go +++ b/plugins/outputs/cloud_pubsub/pubsub_test.go @@ -1,12 +1,13 @@ package cloud_pubsub import ( + "testing" + "cloud.google.com/go/pubsub" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" - "testing" ) func TestPubSub_WriteSingle(t *testing.T) { diff --git a/plugins/outputs/cloud_pubsub/topic_stubbed.go b/plugins/outputs/cloud_pubsub/topic_stubbed.go index fdae70bc3..55f2e5a0a 100644 --- a/plugins/outputs/cloud_pubsub/topic_stubbed.go +++ b/plugins/outputs/cloud_pubsub/topic_stubbed.go @@ -1,18 +1,20 @@ package cloud_pubsub import ( - "cloud.google.com/go/pubsub" "context" "errors" "fmt" + "runtime" + "sync" + "testing" + "time" + + "cloud.google.com/go/pubsub" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/plugins/serializers" "google.golang.org/api/support/bundler" - "runtime" - "sync" - "testing" ) const ( @@ -138,7 +140,7 @@ func (t *stubTopic) SetPublishSettings(settings pubsub.PublishSettings) { func (t *stubTopic) initBundler() *stubTopic { t.bundler = bundler.NewBundler(&bundledMsg{}, t.sendBundle()) - t.bundler.DelayThreshold = t.Settings.DelayThreshold + t.bundler.DelayThreshold = 10 * time.Second t.bundler.BundleCountThreshold = t.Settings.CountThreshold if t.bundler.BundleCountThreshold > pubsub.MaxPublishRequestCount { t.bundler.BundleCountThreshold = pubsub.MaxPublishRequestCount @@ -159,14 +161,15 @@ func (t *stubTopic) sendBundle() func(items interface{}) { for _, msg := range bundled { r := msg.stubResult + for _, id := range r.metricIds { + t.published[id] = msg.Message + } + if r.sendError { r.err <- errors.New(errMockFail) } else { r.done <- struct{}{} } - for _, id := range r.metricIds { - t.published[id] = msg.Message - } } t.bundleCount++ From ccfd9ca52281a12b6d4d4fb23f92c828482e57d5 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 10 Jan 2019 14:34:19 -0800 Subject: [PATCH 0511/1815] Update mongodb readme --- plugins/inputs/mongodb/README.md | 70 +++++++++++++++++++++----------- 1 file changed, 46 insertions(+), 24 deletions(-) diff --git a/plugins/inputs/mongodb/README.md b/plugins/inputs/mongodb/README.md index 105584462..3751d51a4 100644 --- a/plugins/inputs/mongodb/README.md +++ b/plugins/inputs/mongodb/README.md @@ -47,39 +47,39 @@ Error in input [mongodb]: not authorized on admin to execute command { serverSta - fields: - active_reads (integer) - active_writes (integer) - - commands_per_sec (integer) + - commands (integer) - connections_current (integer) - connections_available (integer) - connections_total_created (integer) - - cursor_timed_out (integer) - - cursor_no_timeout (integer) - - cursor_pinned (integer) - - cursor_total (integer) - - deletes_per_sec (integer) + - cursor_timed_out_count (integer) + - cursor_no_timeout_count (integer) + - cursor_pinned_count (integer) + - cursor_total_count (integer) + - deletes (integer) - document_deleted (integer) - document_inserted (integer) - document_returned (integer) - document_updated (integer) - - flushes_per_sec (integer) - - getmores_per_sec (integer) - - inserts_per_sec (integer) + - flushes (integer) + - getmores (integer) + - inserts (integer - jumbo_chunks (integer) - member_status (string) - - net_in_bytes (integer) - - net_out_bytes (integer) + - net_in_bytes_count (integer) + - net_out_bytes_count (integer) - open_connections (integer) - percent_cache_dirty (float) - percent_cache_used (float) - - queries_per_sec (integer) + - queries (integer) - queued_reads (integer) - queued_writes (integer) - - repl_commands_per_sec (integer) - - repl_deletes_per_sec (integer) - - repl_getmores_per_sec (integer) - - repl_inserts_per_sec (integer) + - repl_commands (integer) + - repl_deletes (integer) + - repl_getmores (integer) + - repl_inserts (integer) - repl_lag (integer) - - repl_queries_per_sec (integer) - - repl_updates_per_sec (integer) + - repl_queries (integer) + - repl_updates (integer) - repl_oplog_window_sec (integer) - resident_megabytes (integer) - state (string) @@ -87,9 +87,9 @@ Error in input [mongodb]: not authorized on admin to execute command { serverSta - total_created (integer) - total_in_use (integer) - total_refreshing (integer) - - ttl_deletes_per_sec (integer) - - ttl_passes_per_sec (integer) - - updates_per_sec (integer) + - ttl_deletes (integer) + - ttl_passes (integer) + - updates (integer) - vsize_megabytes (integer) - wtcache_app_threads_page_read_count (integer) - wtcache_app_threads_page_read_time (integer) @@ -103,8 +103,29 @@ Error in input [mongodb]: not authorized on admin to execute command { serverSta - wtcache_server_evicting_pages (integer) - wtcache_tracked_dirty_bytes (integer) - wtcache_worker_thread_evictingpages (integer) + - commands_per_sec (integer, deprecated in 1.10; use `commands`)) + - cursor_no_timeout (integer, opened/sec, deprecated in 1.10; use `cursor_no_timeout_count`)) + - cursor_pinned (integer, opened/sec, deprecated in 1.10; use `cursor_pinned_count`)) + - cursor_timed_out (integer, opened/sec, deprecated in 1.10; use `cursor_timed_out_count`)) + - cursor_total (integer, opened/sec, deprecated in 1.10; use `cursor_total_count`)) + - deletes_per_sec (integer, deprecated in 1.10; use `deletes`)) + - flushes_per_sec (integer, deprecated in 1.10; use `flushes`)) + - getmores_per_sec (integer, deprecated in 1.10; use `getmores`)) + - inserts_per_sec (integer, deprecated in 1.10; use `inserts`)) + - net_in_bytes (integer, bytes/sec, deprecated in 1.10; use `net_out_bytes_count`)) + - net_out_bytes (integer, bytes/sec, deprecated in 1.10; use `net_out_bytes_count`)) + - queries_per_sec (integer, deprecated in 1.10; use `queries`)) + - repl_commands_per_sec (integer, deprecated in 1.10; use `repl_commands`)) + - repl_deletes_per_sec (integer, deprecated in 1.10; use `repl_deletes`) + - repl_getmores_per_sec (integer, deprecated in 1.10; use `repl_getmores`) + - repl_inserts_per_sec (integer, deprecated in 1.10; use `repl_inserts`)) + - repl_queries_per_sec (integer, deprecated in 1.10; use `repl_queries`)) + - repl_updates_per_sec (integer, deprecated in 1.10; use `repl_updates`)) + - ttl_deletes_per_sec (integer, deprecated in 1.10; use `ttl_deltes`)) + - ttl_passes_per_sec (integer, deprecated in 1.10; use `ttl_passes`)) + - updates_per_sec (integer, deprecated in 1.10; use `updates`)) -- mongodb_db_stats ++ mongodb_db_stats - tags: - db_name - hostname @@ -131,7 +152,8 @@ Error in input [mongodb]: not authorized on admin to execute command { serverSta ### Example Output: ``` -mongodb,hostname=127.0.0.1:27017 active_reads=0i,active_writes=0i,commands_per_sec=6i,cursor_no_timeout=0i,cursor_pinned=0i,cursor_timed_out=0i,cursor_total=0i,deletes_per_sec=0i,flushes_per_sec=0i,getmores_per_sec=1i,inserts_per_sec=0i,jumbo_chunks=0i,member_status="PRI",net_in_bytes=851i,net_out_bytes=23904i,open_connections=6i,percent_cache_dirty=0,percent_cache_used=0,queries_per_sec=2i,queued_reads=0i,queued_writes=0i,repl_commands_per_sec=0i,repl_deletes_per_sec=0i,repl_getmores_per_sec=0i,repl_inserts_per_sec=0i,repl_lag=0i,repl_queries_per_sec=0i,repl_updates_per_sec=0i,resident_megabytes=67i,state="PRIMARY",total_available=0i,total_created=0i,total_in_use=0i,total_refreshing=0i,ttl_deletes_per_sec=0i,ttl_passes_per_sec=0i,updates_per_sec=0i,vsize_megabytes=729i,wtcache_app_threads_page_read_count=4i,wtcache_app_threads_page_read_time=18i,wtcache_app_threads_page_write_count=6i,wtcache_bytes_read_into=10075i,wtcache_bytes_written_from=115711i,wtcache_current_bytes=86038i,wtcache_max_bytes_configured=1073741824i,wtcache_pages_evicted_by_app_thread=0i,wtcache_pages_queued_for_eviction=0i,wtcache_server_evicting_pages=0i,wtcache_tracked_dirty_bytes=0i,wtcache_worker_thread_evictingpages=0i 1522798796000000000 -mongodb_db_stats,db_name=local,hostname=127.0.0.1:27017 avg_obj_size=818.625,collections=5i,data_size=6549i,index_size=86016i,indexes=4i,num_extents=0i,objects=8i,ok=1i,storage_size=118784i,type="db_stat" 1522799074000000000 +mongodb,hostname=127.0.0.1:27017 active_reads=0i,active_writes=0i,commands=1335i,commands_per_sec=7i,connections_available=814i,connections_current=5i,connections_total_created=0i,cursor_no_timeout=0i,cursor_no_timeout_count=0i,cursor_pinned=0i,cursor_pinned_count=1i,cursor_timed_out=0i,cursor_timed_out_count=0i,cursor_total=0i,cursor_total_count=1i,deletes=0i,deletes_per_sec=0i,document_deleted=0i,document_inserted=0i,document_returned=13i,document_updated=0i,flushes=5i,flushes_per_sec=0i,getmores=269i,getmores_per_sec=0i,inserts=0i,inserts_per_sec=0i,jumbo_chunks=0i,member_status="PRI",net_in_bytes=986i,net_in_bytes_count=358006i,net_out_bytes=23906i,net_out_bytes_count=661507i,open_connections=5i,percent_cache_dirty=0,percent_cache_used=0,queries=18i,queries_per_sec=3i,queued_reads=0i,queued_writes=0i,repl_commands=0i,repl_commands_per_sec=0i,repl_deletes=0i,repl_deletes_per_sec=0i,repl_getmores=0i,repl_getmores_per_sec=0i,repl_inserts=0i,repl_inserts_per_sec=0i,repl_lag=0i,repl_oplog_window_sec=24355215i,repl_queries=0i,repl_queries_per_sec=0i,repl_updates=0i,repl_updates_per_sec=0i,resident_megabytes=62i,state="PRIMARY",total_available=0i,total_created=0i,total_in_use=0i,total_refreshing=0i,ttl_deletes=0i,ttl_deletes_per_sec=0i,ttl_passes=23i,ttl_passes_per_sec=0i,updates=0i,updates_per_sec=0i,vsize_megabytes=713i,wtcache_app_threads_page_read_count=13i,wtcache_app_threads_page_read_time=74i,wtcache_app_threads_page_write_count=0i,wtcache_bytes_read_into=55271i,wtcache_bytes_written_from=125402i,wtcache_current_bytes=117050i,wtcache_max_bytes_configured=1073741824i,wtcache_pages_evicted_by_app_thread=0i,wtcache_pages_queued_for_eviction=0i,wtcache_server_evicting_pages=0i,wtcache_tracked_dirty_bytes=0i,wtcache_worker_thread_evictingpages=0i 1547159491000000000 +mongodb_db_stats,db_name=admin,hostname=127.0.0.1:27017 avg_obj_size=241,collections=2i,data_size=723i,index_size=49152i,indexes=3i,num_extents=0i,objects=3i,ok=1i,storage_size=53248i,type="db_stat" 1547159491000000000 +mongodb_db_stats,db_name=local,hostname=127.0.0.1:27017 avg_obj_size=813.9705882352941,collections=6i,data_size=55350i,index_size=102400i,indexes=5i,num_extents=0i,objects=68i,ok=1i,storage_size=204800i,type="db_stat" 1547159491000000000 mongodb_shard_stats,hostname=127.0.0.1:27017,in_use=3i,available=3i,created=4i,refreshing=0i 1522799074000000000 ``` From df337597113016e9c9ffc4bed9854ac1b7eb98d1 Mon Sep 17 00:00:00 2001 From: "Artem V. Navrotskiy" Date: Tue, 15 Jan 2019 22:31:52 +0300 Subject: [PATCH 0512/1815] Add flush_total_time_ns and additional wired tiger fields to mongodb input (#5273) --- plugins/inputs/mongodb/README.md | 6 +++++ plugins/inputs/mongodb/mongodb_data.go | 6 +++++ plugins/inputs/mongodb/mongodb_data_test.go | 11 +++++--- plugins/inputs/mongodb/mongostat.go | 28 ++++++++++++++++++--- 4 files changed, 45 insertions(+), 6 deletions(-) diff --git a/plugins/inputs/mongodb/README.md b/plugins/inputs/mongodb/README.md index 3751d51a4..982936811 100644 --- a/plugins/inputs/mongodb/README.md +++ b/plugins/inputs/mongodb/README.md @@ -61,6 +61,7 @@ Error in input [mongodb]: not authorized on admin to execute command { serverSta - document_returned (integer) - document_updated (integer) - flushes (integer) + - flushes_total_time_ns (integer) - getmores (integer) - inserts (integer - jumbo_chunks (integer) @@ -96,8 +97,13 @@ Error in input [mongodb]: not authorized on admin to execute command { serverSta - wtcache_app_threads_page_write_count (integer) - wtcache_bytes_read_into (integer) - wtcache_bytes_written_from (integer) + - wtcache_pages_read_info (integer) + - wtcache_pages_requested_from (integer) - wtcache_current_bytes (integer) - wtcache_max_bytes_configured (integer) + - wtcache_internal_pages_evicted (integer) + - wtcache_modified_pages_evicted (integer) + - wtcache_unmodified_pages_evicted (integer) - wtcache_pages_evicted_by_app_thread (integer) - wtcache_pages_queued_for_eviction (integer) - wtcache_server_evicting_pages (integer) diff --git a/plugins/inputs/mongodb/mongodb_data.go b/plugins/inputs/mongodb/mongodb_data.go index 733018374..c0e7baf65 100644 --- a/plugins/inputs/mongodb/mongodb_data.go +++ b/plugins/inputs/mongodb/mongodb_data.go @@ -45,6 +45,7 @@ var DefaultStats = map[string]string{ "commands_per_sec": "Command", "flushes": "FlushesCnt", "flushes_per_sec": "Flushes", + "flushes_total_time_ns": "FlushesTotalTime", "vsize_megabytes": "Virtual", "resident_megabytes": "Resident", "queued_reads": "QueuedReaders", @@ -137,8 +138,13 @@ var WiredTigerExtStats = map[string]string{ "wtcache_bytes_read_into": "BytesReadInto", "wtcache_pages_evicted_by_app_thread": "PagesEvictedByAppThread", "wtcache_pages_queued_for_eviction": "PagesQueuedForEviction", + "wtcache_pages_read_info": "PagesReadIntoCache", + "wtcache_pages_requested_from": "PagesRequestedFromCache", "wtcache_server_evicting_pages": "ServerEvictingPages", "wtcache_worker_thread_evictingpages": "WorkerThreadEvictingPages", + "wtcache_internal_pages_evicted": "InternalPagesEvicted", + "wtcache_modified_pages_evicted": "ModifiedPagesEvicted", + "wtcache_unmodified_pages_evicted": "UnmodifiedPagesEvicted", } var DbDataStats = map[string]string{ diff --git a/plugins/inputs/mongodb/mongodb_data_test.go b/plugins/inputs/mongodb/mongodb_data_test.go index ca15ff977..da50bdc9e 100644 --- a/plugins/inputs/mongodb/mongodb_data_test.go +++ b/plugins/inputs/mongodb/mongodb_data_test.go @@ -56,7 +56,7 @@ func TestAddNonReplStats(t *testing.T) { d.flush(&acc) for key := range DefaultStats { - assert.True(t, acc.HasInt64Field("mongodb", key)) + assert.True(t, acc.HasFloatField("mongodb", key) || acc.HasInt64Field("mongodb", key), key) } } @@ -77,7 +77,7 @@ func TestAddReplStats(t *testing.T) { d.flush(&acc) for key := range MmapStats { - assert.True(t, acc.HasInt64Field("mongodb", key)) + assert.True(t, acc.HasInt64Field("mongodb", key), key) } } @@ -109,7 +109,11 @@ func TestAddWiredTigerStats(t *testing.T) { d.flush(&acc) for key := range WiredTigerStats { - assert.True(t, acc.HasFloatField("mongodb", key)) + assert.True(t, acc.HasFloatField("mongodb", key), key) + } + + for key := range WiredTigerExtStats { + assert.True(t, acc.HasFloatField("mongodb", key) || acc.HasInt64Field("mongodb", key), key) } } @@ -199,6 +203,7 @@ func TestStateTag(t *testing.T) { "deletes_per_sec": int64(0), "flushes": int64(0), "flushes_per_sec": int64(0), + "flushes_total_time_ns": int64(0), "getmores": int64(0), "getmores_per_sec": int64(0), "inserts": int64(0), diff --git a/plugins/inputs/mongodb/mongostat.go b/plugins/inputs/mongodb/mongostat.go index 1320c32e9..b763631ca 100644 --- a/plugins/inputs/mongodb/mongostat.go +++ b/plugins/inputs/mongodb/mongostat.go @@ -168,13 +168,19 @@ type CacheStats struct { BytesReadInto int64 `bson:"bytes read into cache"` PagesEvictedByAppThread int64 `bson:"pages evicted by application threads"` PagesQueuedForEviction int64 `bson:"pages queued for eviction"` + PagesReadIntoCache int64 `bson:"pages read into cache"` + PagesRequestedFromCache int64 `bson:"pages requested from the cache"` ServerEvictingPages int64 `bson:"eviction server evicting pages"` WorkerThreadEvictingPages int64 `bson:"eviction worker thread evicting pages"` + InternalPagesEvicted int64 `bson:"internal pages evicted"` + ModifiedPagesEvicted int64 `bson:"modified pages evicted"` + UnmodifiedPagesEvicted int64 `bson:"unmodified pages evicted"` } // TransactionStats stores transaction checkpoints in WiredTiger. type TransactionStats struct { - TransCheckpoints int64 `bson:"transaction checkpoints"` + TransCheckpointsTotalTimeMsecs int64 `bson:"transaction checkpoint total time (msecs)"` + TransCheckpoints int64 `bson:"transaction checkpoints"` } // ReplStatus stores data related to replica sets. @@ -498,8 +504,13 @@ type StatLine struct { BytesReadInto int64 PagesEvictedByAppThread int64 PagesQueuedForEviction int64 + PagesReadIntoCache int64 + PagesRequestedFromCache int64 ServerEvictingPages int64 WorkerThreadEvictingPages int64 + InternalPagesEvicted int64 + ModifiedPagesEvicted int64 + UnmodifiedPagesEvicted int64 // Replicated Opcounter fields InsertR, InsertRCnt int64 @@ -511,6 +522,7 @@ type StatLine struct { ReplLag int64 OplogTimeDiff int64 Flushes, FlushesCnt int64 + FlushesTotalTime int64 Mapped, Virtual, Resident, NonMapped int64 Faults, FaultsCnt int64 HighestLocked *LockStatus @@ -666,8 +678,7 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec returnVal.CacheDirtyPercent = -1 returnVal.CacheUsedPercent = -1 - if newStat.WiredTiger != nil && oldStat.WiredTiger != nil { - returnVal.Flushes, returnVal.FlushesCnt = diff(newStat.WiredTiger.Transaction.TransCheckpoints, oldStat.WiredTiger.Transaction.TransCheckpoints, sampleSecs) + if newStat.WiredTiger != nil { returnVal.CacheDirtyPercent = float64(newStat.WiredTiger.Cache.TrackedDirtyBytes) / float64(newStat.WiredTiger.Cache.MaxBytesConfigured) returnVal.CacheUsedPercent = float64(newStat.WiredTiger.Cache.CurrentCachedBytes) / float64(newStat.WiredTiger.Cache.MaxBytesConfigured) @@ -681,8 +692,19 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec returnVal.BytesReadInto = newStat.WiredTiger.Cache.BytesReadInto returnVal.PagesEvictedByAppThread = newStat.WiredTiger.Cache.PagesEvictedByAppThread returnVal.PagesQueuedForEviction = newStat.WiredTiger.Cache.PagesQueuedForEviction + returnVal.PagesReadIntoCache = newStat.WiredTiger.Cache.PagesReadIntoCache + returnVal.PagesRequestedFromCache = newStat.WiredTiger.Cache.PagesRequestedFromCache returnVal.ServerEvictingPages = newStat.WiredTiger.Cache.ServerEvictingPages returnVal.WorkerThreadEvictingPages = newStat.WiredTiger.Cache.WorkerThreadEvictingPages + + returnVal.InternalPagesEvicted = newStat.WiredTiger.Cache.InternalPagesEvicted + returnVal.ModifiedPagesEvicted = newStat.WiredTiger.Cache.ModifiedPagesEvicted + returnVal.UnmodifiedPagesEvicted = newStat.WiredTiger.Cache.UnmodifiedPagesEvicted + + returnVal.FlushesTotalTime = newStat.WiredTiger.Transaction.TransCheckpointsTotalTimeMsecs * int64(time.Millisecond) + } + if newStat.WiredTiger != nil && oldStat.WiredTiger != nil { + returnVal.Flushes, returnVal.FlushesCnt = diff(newStat.WiredTiger.Transaction.TransCheckpoints, oldStat.WiredTiger.Transaction.TransCheckpoints, sampleSecs) } else if newStat.BackgroundFlushing != nil && oldStat.BackgroundFlushing != nil { returnVal.Flushes, returnVal.FlushesCnt = diff(newStat.BackgroundFlushing.Flushes, oldStat.BackgroundFlushing.Flushes, sampleSecs) } From dcc4389a2ab9995b29500e744609d67582d4eb59 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 15 Jan 2019 11:36:27 -0800 Subject: [PATCH 0513/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index a42467cc6..37ea2b554 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -30,6 +30,7 @@ - [#5113](https://github.com/influxdata/telegraf/pull/5113): Improve scalability of vsphere input. - [#5210](https://github.com/influxdata/telegraf/pull/5210): Add read and write op per second fields to ceph input. - [#5214](https://github.com/influxdata/telegraf/pull/5214): Add configurable timeout to varnish input. +- [#5273](https://github.com/influxdata/telegraf/pull/5273): Add flush_total_time_ns and additional wired tiger fields to mongodb input. #### Bugfixes From 42184fd1c86e6e03c3c4387e7b11808cd59bb736 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 15 Jan 2019 11:47:06 -0800 Subject: [PATCH 0514/1815] Use gopsutil 2.18.12 (#5288) --- Gopkg.lock | 6 +++--- Gopkg.toml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Gopkg.lock b/Gopkg.lock index c110818ec..e36b76d9b 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -961,7 +961,7 @@ version = "v1.2.0" [[projects]] - digest = "1:02715a2fb4b9279af36651a59a51dd4164eb689bd6785874811899f43eeb2a54" + digest = "1:02135a4151567d48ebff6cf36f73b5d8dee566855df84ffd96111d5225848bb7" name = "github.com/shirou/gopsutil" packages = [ "cpu", @@ -974,8 +974,8 @@ "process", ] pruneopts = "" - revision = "8048a2e9c5773235122027dd585cf821b2af1249" - version = "v2.18.07" + revision = "ccc1c1016bc5d10e803189ee43417c50cdde7f1b" + version = "v2.18.12" [[projects]] branch = "master" diff --git a/Gopkg.toml b/Gopkg.toml index 4f0deb08d..02b499007 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -148,7 +148,7 @@ [[constraint]] name = "github.com/shirou/gopsutil" - version = "2.18.07" + version = "2.18.12" [[constraint]] name = "github.com/Shopify/sarama" From 193aba86735dfa8859be8f9db321b5b0f0fbe165 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 15 Jan 2019 11:48:36 -0800 Subject: [PATCH 0515/1815] Ack delivery if it is unparseable in amqp_consumer input (#5286) --- plugins/inputs/amqp_consumer/amqp_consumer.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/plugins/inputs/amqp_consumer/amqp_consumer.go b/plugins/inputs/amqp_consumer/amqp_consumer.go index 568ee6f38..d80a3683b 100644 --- a/plugins/inputs/amqp_consumer/amqp_consumer.go +++ b/plugins/inputs/amqp_consumer/amqp_consumer.go @@ -430,6 +430,14 @@ func (a *AMQPConsumer) process(ctx context.Context, msgs <-chan amqp.Delivery, a func (a *AMQPConsumer) onMessage(acc telegraf.TrackingAccumulator, d amqp.Delivery) error { metrics, err := a.parser.Parse(d.Body) if err != nil { + // Discard the message from the queue; will never be able to process + // this message. + rejErr := d.Ack(false) + if rejErr != nil { + log.Printf("E! [inputs.amqp_consumer] Unable to reject message: %d: %v", + d.DeliveryTag, rejErr) + a.conn.Close() + } return err } From da802768023bbaf40327358c3ab24a56b09372e1 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 15 Jan 2019 11:48:52 -0800 Subject: [PATCH 0516/1815] Use lifo order in metric buffer (#5287) --- internal/models/buffer.go | 145 ++++++++------ internal/models/buffer_test.go | 265 ++++++++++++++++++++++++- internal/models/running_output_test.go | 28 +-- testutil/metric.go | 4 + 4 files changed, 365 insertions(+), 77 deletions(-) diff --git a/internal/models/buffer.go b/internal/models/buffer.go index 8c03db3d4..3d82e573a 100644 --- a/internal/models/buffer.go +++ b/internal/models/buffer.go @@ -22,8 +22,7 @@ type Buffer struct { cap int // the capacity of the buffer batchFirst int // index of the first metric in the batch - batchLast int // one after the index of the last metric in the batch - batchSize int // number of metrics current in the batch + batchSize int // number of metrics currently in the batch MetricsAdded selfstat.Stat MetricsWritten selfstat.Stat @@ -82,46 +81,24 @@ func (b *Buffer) metricDropped(metric telegraf.Metric) { metric.Reject() } -func (b *Buffer) inBatch() bool { - if b.batchSize == 0 { - return false - } - - if b.batchFirst < b.batchLast { - return b.last >= b.batchFirst && b.last < b.batchLast - } else { - return b.last >= b.batchFirst || b.last < b.batchLast - } -} - func (b *Buffer) add(m telegraf.Metric) { // Check if Buffer is full if b.size == b.cap { - if b.batchSize == 0 { - // No batch taken by the output, we can drop the metric now. - b.metricDropped(b.buf[b.last]) - } else if b.inBatch() { - // There is an outstanding batch and this will overwrite a metric - // in it, delay the dropping only in case the batch gets rejected. + b.metricDropped(b.buf[b.last]) + + if b.last == b.batchFirst && b.batchSize > 0 { b.batchSize-- - b.batchFirst++ - b.batchFirst %= b.cap - } else { - // There is an outstanding batch, but this overwrites a metric - // outside of it. - b.metricDropped(b.buf[b.last]) + b.batchFirst = b.next(b.batchFirst) } } b.metricAdded() b.buf[b.last] = m - b.last++ - b.last %= b.cap + b.last = b.next(b.last) if b.size == b.cap { - b.first++ - b.first %= b.cap + b.first = b.next(b.first) } b.size = min(b.size+1, b.cap) @@ -138,10 +115,8 @@ func (b *Buffer) Add(metrics ...telegraf.Metric) { } // Batch returns a slice containing up to batchSize of the most recently added -// metrics. -// -// The metrics contained in the batch are not removed from the buffer, instead -// the last batch is recorded and removed only if Accept is called. +// metrics. Metrics are ordered from newest to oldest in the batch. The +// batch must not be modified by the client. func (b *Buffer) Batch(batchSize int) []telegraf.Metric { b.Lock() defer b.Unlock() @@ -152,21 +127,23 @@ func (b *Buffer) Batch(batchSize int) []telegraf.Metric { return out } - b.batchFirst = b.first - b.batchLast = b.first + outLen - b.batchLast %= b.cap + b.batchFirst = b.cap + b.last - outLen + b.batchFirst %= b.cap b.batchSize = outLen - until := min(b.cap, b.first+outLen) - - n := copy(out, b.buf[b.first:until]) - if n < outLen { - copy(out[n:], b.buf[:outLen-n]) + batchIndex := b.batchFirst + for i := range out { + out[len(out)-1-i] = b.buf[batchIndex] + b.buf[batchIndex] = nil + batchIndex = b.next(batchIndex) } + + b.last = b.batchFirst + b.size -= outLen return out } -// Accept removes the metrics contained in the last batch. +// Accept marks the batch, acquired from Batch(), as successfully written. func (b *Buffer) Accept(batch []telegraf.Metric) { b.Lock() defer b.Unlock() @@ -175,35 +152,89 @@ func (b *Buffer) Accept(batch []telegraf.Metric) { b.metricWritten(m) } - b.size -= b.batchSize - for i := 0; i < b.batchSize; i++ { - b.buf[b.first] = nil - b.first++ - b.first %= b.cap - } - b.resetBatch() } -// Reject clears the current batch record so that calls to Accept will have no -// effect. +// Reject returns the batch, acquired from Batch(), to the buffer and marks it +// as unsent. func (b *Buffer) Reject(batch []telegraf.Metric) { b.Lock() defer b.Unlock() - if len(batch) > b.batchSize { - // Part or all of the batch was dropped before reject was called. - for _, m := range batch[b.batchSize:] { - b.metricDropped(m) + older := b.dist(b.first, b.batchFirst) + free := b.cap - b.size + restore := min(len(batch), free+older) + + // Rotate newer metrics forward the number of metrics that we can restore. + rb := b.batchFirst + rp := b.last + re := b.nextby(rp, restore) + b.last = re + for rb != rp { + rp = b.prev(rp) + re = b.prev(re) + + if b.buf[re] != nil { + b.metricDropped(b.buf[re]) + } + + b.buf[re] = b.buf[rp] + b.buf[rp] = nil + } + + // Copy metrics from the batch back into the buffer; recall that the + // batch is in reverse order compared to b.buf + for i := range batch { + if i < restore { + re = b.prev(re) + b.buf[re] = batch[i] + b.size++ + } else { + b.metricDropped(batch[i]) } } b.resetBatch() } +// dist returns the distance between two indexes. Because this data structure +// uses a half open range the arguments must both either left side or right +// side pairs. +func (b *Buffer) dist(begin, end int) int { + if begin <= end { + return end - begin + } else { + return b.cap - begin - 1 + end + } +} + +// next returns the next index with wrapping. +func (b *Buffer) next(index int) int { + index++ + if index == b.cap { + return 0 + } + return index +} + +// next returns the index that is count newer with wrapping. +func (b *Buffer) nextby(index, count int) int { + index += count + index %= b.cap + return index +} + +// next returns the prev index with wrapping. +func (b *Buffer) prev(index int) int { + index-- + if index < 0 { + return b.cap - 1 + } + return index +} + func (b *Buffer) resetBatch() { b.batchFirst = 0 - b.batchLast = 0 b.batchSize = 0 } diff --git a/internal/models/buffer_test.go b/internal/models/buffer_test.go index 246aaf6ea..7aa55a2c2 100644 --- a/internal/models/buffer_test.go +++ b/internal/models/buffer_test.go @@ -6,6 +6,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) @@ -29,13 +30,17 @@ func (m *MockMetric) Drop() { } func Metric() telegraf.Metric { + return MetricTime(0) +} + +func MetricTime(sec int64) telegraf.Metric { m, err := metric.New( "cpu", map[string]string{}, map[string]interface{}{ "value": 42.0, }, - time.Unix(0, 0), + time.Unix(sec, 0), ) if err != nil { panic(err) @@ -147,6 +152,256 @@ func TestBuffer_BatchWrap(t *testing.T) { require.Len(t, batch, 5) } +func TestBuffer_BatchLatest(t *testing.T) { + b := setup(NewBuffer("test", 4)) + b.Add(MetricTime(1)) + b.Add(MetricTime(2)) + b.Add(MetricTime(3)) + batch := b.Batch(2) + + testutil.RequireMetricsEqual(t, + []telegraf.Metric{ + MetricTime(3), + MetricTime(2), + }, batch) +} + +func TestBuffer_BatchLatestWrap(t *testing.T) { + b := setup(NewBuffer("test", 4)) + b.Add(MetricTime(1)) + b.Add(MetricTime(2)) + b.Add(MetricTime(3)) + b.Add(MetricTime(4)) + b.Add(MetricTime(5)) + batch := b.Batch(2) + + testutil.RequireMetricsEqual(t, + []telegraf.Metric{ + MetricTime(5), + MetricTime(4), + }, batch) +} + +func TestBuffer_MultipleBatch(t *testing.T) { + b := setup(NewBuffer("test", 10)) + b.Add(MetricTime(1)) + b.Add(MetricTime(2)) + b.Add(MetricTime(3)) + b.Add(MetricTime(4)) + b.Add(MetricTime(5)) + b.Add(MetricTime(6)) + batch := b.Batch(5) + testutil.RequireMetricsEqual(t, + []telegraf.Metric{ + MetricTime(6), + MetricTime(5), + MetricTime(4), + MetricTime(3), + MetricTime(2), + }, batch) + b.Accept(batch) + batch = b.Batch(5) + testutil.RequireMetricsEqual(t, + []telegraf.Metric{ + MetricTime(1), + }, batch) + b.Accept(batch) +} + +func TestBuffer_RejectWithRoom(t *testing.T) { + b := setup(NewBuffer("test", 5)) + b.Add(MetricTime(1)) + b.Add(MetricTime(2)) + b.Add(MetricTime(3)) + batch := b.Batch(2) + b.Add(MetricTime(4)) + b.Add(MetricTime(5)) + b.Reject(batch) + + require.Equal(t, int64(0), b.MetricsDropped.Get()) + + batch = b.Batch(5) + testutil.RequireMetricsEqual(t, + []telegraf.Metric{ + MetricTime(5), + MetricTime(4), + MetricTime(3), + MetricTime(2), + MetricTime(1), + }, batch) +} + +func TestBuffer_RejectNothingNewFull(t *testing.T) { + b := setup(NewBuffer("test", 5)) + b.Add(MetricTime(1)) + b.Add(MetricTime(2)) + b.Add(MetricTime(3)) + b.Add(MetricTime(4)) + b.Add(MetricTime(5)) + batch := b.Batch(2) + b.Reject(batch) + + require.Equal(t, int64(0), b.MetricsDropped.Get()) + + batch = b.Batch(5) + testutil.RequireMetricsEqual(t, + []telegraf.Metric{ + MetricTime(5), + MetricTime(4), + MetricTime(3), + MetricTime(2), + MetricTime(1), + }, batch) +} + +func TestBuffer_RejectNoRoom(t *testing.T) { + b := setup(NewBuffer("test", 5)) + b.Add(MetricTime(1)) + + b.Add(MetricTime(2)) + b.Add(MetricTime(3)) + batch := b.Batch(2) + + b.Add(MetricTime(4)) + b.Add(MetricTime(5)) + b.Add(MetricTime(6)) + b.Add(MetricTime(7)) + b.Add(MetricTime(8)) + + b.Reject(batch) + + require.Equal(t, int64(3), b.MetricsDropped.Get()) + + batch = b.Batch(5) + testutil.RequireMetricsEqual(t, + []telegraf.Metric{ + MetricTime(8), + MetricTime(7), + MetricTime(6), + MetricTime(5), + MetricTime(4), + }, batch) +} + +func TestBuffer_RejectRoomExact(t *testing.T) { + b := setup(NewBuffer("test", 5)) + b.Add(MetricTime(1)) + b.Add(MetricTime(2)) + batch := b.Batch(2) + b.Add(MetricTime(3)) + b.Add(MetricTime(4)) + b.Add(MetricTime(5)) + + b.Reject(batch) + + require.Equal(t, int64(0), b.MetricsDropped.Get()) + + batch = b.Batch(5) + testutil.RequireMetricsEqual(t, + []telegraf.Metric{ + MetricTime(5), + MetricTime(4), + MetricTime(3), + MetricTime(2), + MetricTime(1), + }, batch) +} + +func TestBuffer_RejectRoomOverwriteOld(t *testing.T) { + b := setup(NewBuffer("test", 5)) + b.Add(MetricTime(1)) + b.Add(MetricTime(2)) + b.Add(MetricTime(3)) + batch := b.Batch(1) + b.Add(MetricTime(4)) + b.Add(MetricTime(5)) + b.Add(MetricTime(6)) + + b.Reject(batch) + + require.Equal(t, int64(1), b.MetricsDropped.Get()) + + batch = b.Batch(5) + testutil.RequireMetricsEqual(t, + []telegraf.Metric{ + MetricTime(6), + MetricTime(5), + MetricTime(4), + MetricTime(3), + MetricTime(2), + }, batch) +} + +func TestBuffer_RejectPartialRoom(t *testing.T) { + b := setup(NewBuffer("test", 5)) + b.Add(MetricTime(1)) + + b.Add(MetricTime(2)) + b.Add(MetricTime(3)) + batch := b.Batch(2) + + b.Add(MetricTime(4)) + b.Add(MetricTime(5)) + b.Add(MetricTime(6)) + b.Add(MetricTime(7)) + b.Reject(batch) + + require.Equal(t, int64(2), b.MetricsDropped.Get()) + + batch = b.Batch(5) + testutil.RequireMetricsEqual(t, + []telegraf.Metric{ + MetricTime(7), + MetricTime(6), + MetricTime(5), + MetricTime(4), + MetricTime(3), + }, batch) +} + +func TestBuffer_RejectWrapped(t *testing.T) { + b := setup(NewBuffer("test", 5)) + b.Add(MetricTime(1)) + b.Add(MetricTime(2)) + b.Add(MetricTime(3)) + batch := b.Batch(2) + b.Add(MetricTime(4)) + b.Add(MetricTime(5)) + + // buffer: 1, 4, 5; batch: 2, 3 + require.Equal(t, int64(0), b.MetricsDropped.Get()) + + b.Add(MetricTime(6)) + b.Add(MetricTime(7)) + b.Add(MetricTime(8)) + b.Add(MetricTime(9)) + b.Add(MetricTime(10)) + + // buffer: 8, 9, 10, 6, 7; batch: 2, 3 + require.Equal(t, int64(3), b.MetricsDropped.Get()) + + b.Add(MetricTime(11)) + b.Add(MetricTime(12)) + b.Add(MetricTime(13)) + b.Add(MetricTime(14)) + b.Add(MetricTime(15)) + // buffer: 13, 14, 15, 11, 12; batch: 2, 3 + require.Equal(t, int64(8), b.MetricsDropped.Get()) + b.Reject(batch) + + require.Equal(t, int64(10), b.MetricsDropped.Get()) + + batch = b.Batch(5) + testutil.RequireMetricsEqual(t, + []telegraf.Metric{ + MetricTime(15), + MetricTime(14), + MetricTime(13), + MetricTime(12), + MetricTime(11), + }, batch) +} + func TestBuffer_AddDropsOverwrittenMetrics(t *testing.T) { m := Metric() b := setup(NewBuffer("test", 5)) @@ -210,8 +465,8 @@ func TestBuffer_MetricsOverwriteBatchAccept(t *testing.T) { batch := b.Batch(3) b.Add(m, m, m) b.Accept(batch) - require.Equal(t, int64(0), b.MetricsDropped.Get()) - require.Equal(t, int64(3), b.MetricsWritten.Get()) + require.Equal(t, int64(0), b.MetricsDropped.Get(), "dropped") + require.Equal(t, int64(3), b.MetricsWritten.Get(), "written") } func TestBuffer_MetricsOverwriteBatchReject(t *testing.T) { @@ -254,7 +509,7 @@ func TestBuffer_BatchNotRemoved(t *testing.T) { b := setup(NewBuffer("test", 5)) b.Add(m, m, m, m, m) b.Batch(2) - require.Equal(t, 5, b.Len()) + require.Equal(t, 3, b.Len()) } func TestBuffer_BatchRejectAcceptNoop(t *testing.T) { @@ -310,10 +565,8 @@ func TestBuffer_AddCallsMetricRejectWhenNotInBatch(t *testing.T) { b.Add(mm, mm, mm, mm, mm) batch := b.Batch(2) b.Add(mm, mm, mm, mm) - // metric[2] and metric[3] rejected require.Equal(t, 2, reject) b.Reject(batch) - // metric[1] and metric[2] now rejected require.Equal(t, 4, reject) } diff --git a/internal/models/running_output_test.go b/internal/models/running_output_test.go index fe8755395..fd38b0faa 100644 --- a/internal/models/running_output_test.go +++ b/internal/models/running_output_test.go @@ -7,7 +7,6 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -28,6 +27,14 @@ var next5 = []telegraf.Metric{ testutil.TestMetric(101, "metric10"), } +func reverse(metrics []telegraf.Metric) []telegraf.Metric { + result := make([]telegraf.Metric, 0, len(metrics)) + for i := len(metrics) - 1; i >= 0; i-- { + result = append(result, metrics[i]) + } + return result +} + // Benchmark adding metrics. func BenchmarkRunningOutputAddWrite(b *testing.B) { conf := &OutputConfig{ @@ -297,7 +304,7 @@ func TestRunningOutputWriteFailOrder(t *testing.T) { // Verify that 10 metrics were written assert.Len(t, m.Metrics(), 10) // Verify that they are in order - expected := append(first5, next5...) + expected := append(reverse(next5), reverse(first5)...) assert.Equal(t, expected, m.Metrics()) } @@ -355,24 +362,17 @@ func TestRunningOutputWriteFailOrder2(t *testing.T) { err = ro.Write() require.NoError(t, err) - // Verify that 10 metrics were written + // Verify that 20 metrics were written assert.Len(t, m.Metrics(), 20) // Verify that they are in order - expected := append(first5, next5...) - expected = append(expected, first5...) - expected = append(expected, next5...) + expected := append(reverse(next5), reverse(first5)...) + expected = append(expected, reverse(next5)...) + expected = append(expected, reverse(first5)...) assert.Equal(t, expected, m.Metrics()) } // Verify that the order of points is preserved when there is a remainder // of points for the batch. -// -// ie, with a batch size of 5: -// -// 1 2 3 4 5 6 <-- order, failed points -// 6 1 2 3 4 5 <-- order, after 1st write failure (1 2 3 4 5 was batch) -// 1 2 3 4 5 6 <-- order, after 2nd write failure, (6 was batch) -// func TestRunningOutputWriteFailOrder3(t *testing.T) { conf := &OutputConfig{ Filter: Filter{}, @@ -408,7 +408,7 @@ func TestRunningOutputWriteFailOrder3(t *testing.T) { // Verify that 6 metrics were written assert.Len(t, m.Metrics(), 6) // Verify that they are in order - expected := append(first5, next5[0]) + expected := []telegraf.Metric{next5[0], first5[4], first5[3], first5[2], first5[1], first5[0]} assert.Equal(t, expected, m.Metrics()) } diff --git a/testutil/metric.go b/testutil/metric.go index 6d0db4e17..5ce0a99a6 100644 --- a/testutil/metric.go +++ b/testutil/metric.go @@ -19,6 +19,10 @@ type metricDiff struct { } func newMetricDiff(metric telegraf.Metric) *metricDiff { + if metric == nil { + return nil + } + m := &metricDiff{} m.Measurement = metric.Name() From 059ab5d16b6b15bfc452802f7330cade4c588956 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 15 Jan 2019 11:54:04 -0800 Subject: [PATCH 0517/1815] Update changelog --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 37ea2b554..88e2103e4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -41,6 +41,8 @@ #### Bugfixes - [#5261](https://github.com/influxdata/telegraf/pull/5261): Fix arithmetic overflow in sqlserver input. +- [#5194](https://github.com/influxdata/telegraf/issues/5194): Fix latest metrics not sent first when output fails. +- [#5285](https://github.com/influxdata/telegraf/issues/5285): Fix amqp_consumer stops consuming when it receives unparsable messages. ## v1.9.2 [2019-01-08] From e404e5145b9f0c1473ecf3b0097cef69c8fda118 Mon Sep 17 00:00:00 2001 From: Dmitry Ilyin Date: Tue, 15 Jan 2019 23:56:40 +0300 Subject: [PATCH 0518/1815] Add nginx_upstream_check input plugin (#4303) --- README.md | 1 + plugins/inputs/all/all.go | 1 + plugins/inputs/nginx_upstream_check/README.md | 75 ++++++ .../nginx_upstream_check.go | 224 ++++++++++++++++++ .../nginx_upstream_check_test.go | 135 +++++++++++ 5 files changed, 436 insertions(+) create mode 100644 plugins/inputs/nginx_upstream_check/README.md create mode 100644 plugins/inputs/nginx_upstream_check/nginx_upstream_check.go create mode 100644 plugins/inputs/nginx_upstream_check/nginx_upstream_check_test.go diff --git a/README.md b/README.md index 4da35f744..aff4d2073 100644 --- a/README.md +++ b/README.md @@ -219,6 +219,7 @@ For documentation on the latest development code see the [documentation index][d * [nginx_vts](./plugins/inputs/nginx_vts) * [nsq_consumer](./plugins/inputs/nsq_consumer) * [nginx_vts](./plugins/inputs/nginx_vts) +* [nginx_upstream_check](./plugins/inputs/nginx_upstream_check) * [nsq](./plugins/inputs/nsq) * [nstat](./plugins/inputs/nstat) * [ntpq](./plugins/inputs/ntpq) diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index 2b189e5ff..106c0118c 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -85,6 +85,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/nginx" _ "github.com/influxdata/telegraf/plugins/inputs/nginx_plus" _ "github.com/influxdata/telegraf/plugins/inputs/nginx_plus_api" + _ "github.com/influxdata/telegraf/plugins/inputs/nginx_upstream_check" _ "github.com/influxdata/telegraf/plugins/inputs/nginx_vts" _ "github.com/influxdata/telegraf/plugins/inputs/nsq" _ "github.com/influxdata/telegraf/plugins/inputs/nsq_consumer" diff --git a/plugins/inputs/nginx_upstream_check/README.md b/plugins/inputs/nginx_upstream_check/README.md new file mode 100644 index 000000000..4ff76889d --- /dev/null +++ b/plugins/inputs/nginx_upstream_check/README.md @@ -0,0 +1,75 @@ +# Telegraf Plugin: Nginx_upstream_check + +Read the status output of the nginx_upstream_check (https://github.com/yaoweibin/nginx_upstream_check_module). +This module can periodically check the servers in the Nginx's upstream with configured request and interval to determine +if the server is still available. If checks are failed the server is marked as "down" and will not receive any requests +until the check will pass and a server will be marked as "up" again. + +The status page displays the current status of all upstreams and servers as well as number of the failed and successful +checks. This information can be exported in JSON format and parsed by this input. + +### Configuration: + +``` + ## An URL where Nginx Upstream check module is enabled + ## It should be set to return a JSON formatted response + url = "http://127.0.0.1/status?format=json" + + ## HTTP method + # method = "GET" + + ## Optional HTTP headers + # headers = {"X-Special-Header" = "Special-Value"} + + ## Override HTTP "Host" header + # host_header = "check.example.com" + + ## Timeout for HTTP requests + timeout = "5s" + + ## Optional HTTP Basic Auth credentials + # username = "username" + # password = "pa$$word" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false +``` + +### Measurements & Fields: + +- Measurement + - fall (The number of failed server check attempts, counter) + - rise (The number of successful server check attempts, counter) + - status (The reporter server status as a string) + - status_code (The server status code. 1 - up, 2 - down, 0 - other) + +The "status_code" field most likely will be the most useful one because it allows you to determine the current +state of every server and, possible, add some monitoring to watch over it. InfluxDB can use string values and the +"status" field can be used instead, but for most other monitoring solutions the integer code will be appropriate. + +### Tags: + +- All measurements have the following tags: + - name (The hostname or IP of the upstream server) + - port (The alternative check port, 0 if the default one is used) + - type (The check type, http/tcp) + - upstream (The name of the upstream block in the Nginx configuration) + - url (The status url used by telegraf) + +### Example Output: + +When run with: +``` +./telegraf --config telegraf.conf --input-filter nginx_upstream_check --test +``` + +It produces: +``` +* Plugin: nginx_upstream_check, Collection 1 +> nginx_upstream_check,host=node1,name=192.168.0.1:8080,port=0,type=http,upstream=my_backends,url=http://127.0.0.1:80/status?format\=json fall=0i,rise=100i,status="up",status_code=1i 1529088524000000000 +> nginx_upstream_check,host=node2,name=192.168.0.2:8080,port=0,type=http,upstream=my_backends,url=http://127.0.0.1:80/status?format\=json fall=100i,rise=0i,status="down",status_code=2i 1529088524000000000 +``` diff --git a/plugins/inputs/nginx_upstream_check/nginx_upstream_check.go b/plugins/inputs/nginx_upstream_check/nginx_upstream_check.go new file mode 100644 index 000000000..e5a2e096d --- /dev/null +++ b/plugins/inputs/nginx_upstream_check/nginx_upstream_check.go @@ -0,0 +1,224 @@ +package nginx_upstream_check + +import ( + "encoding/json" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/internal/tls" + "github.com/influxdata/telegraf/plugins/inputs" + "net/http" + "net/url" + "strconv" + "time" +) + +const sampleConfig = ` + ## An URL where Nginx Upstream check module is enabled + ## It should be set to return a JSON formatted response + url = "http://127.0.0.1/status?format=json" + + ## HTTP method + # method = "GET" + + ## Optional HTTP headers + # headers = {"X-Special-Header" = "Special-Value"} + + ## Override HTTP "Host" header + # host_header = "check.example.com" + + ## Timeout for HTTP requests + timeout = "5s" + + ## Optional HTTP Basic Auth credentials + # username = "username" + # password = "pa$$word" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false +` + +const description = "Read nginx_upstream_check module status information (https://github.com/yaoweibin/nginx_upstream_check_module)" + +type NginxUpstreamCheck struct { + URL string `toml:"uls"` + + Username string `toml:"username"` + Password string `toml:"password"` + Method string `toml:"method"` + Headers map[string]string `toml:"headers"` + HostHeader string `toml:"host_header"` + Timeout internal.Duration `toml:"timeout"` + + tls.ClientConfig + client *http.Client +} + +func NewNginxUpstreamCheck() *NginxUpstreamCheck { + return &NginxUpstreamCheck{ + URL: "http://127.0.0.1/status?format=json", + Method: "GET", + Headers: make(map[string]string), + HostHeader: "", + Timeout: internal.Duration{Duration: time.Second * 5}, + } +} + +func init() { + inputs.Add("nginx_upstream_check", func() telegraf.Input { + return NewNginxUpstreamCheck() + }) +} + +func (check *NginxUpstreamCheck) SampleConfig() string { + return sampleConfig +} + +func (check *NginxUpstreamCheck) Description() string { + return description +} + +type NginxUpstreamCheckData struct { + Servers struct { + Total uint64 `json:"total"` + Generation uint64 `json:"generation"` + Server []NginxUpstreamCheckServer `json:"server"` + } `json:"servers"` +} + +type NginxUpstreamCheckServer struct { + Index uint64 `json:"index"` + Upstream string `json:"upstream"` + Name string `json:"name"` + Status string `json:"status"` + Rise uint64 `json:"rise"` + Fall uint64 `json:"fall"` + Type string `json:"type"` + Port uint16 `json:"port"` +} + +// createHttpClient create a clients to access API +func (check *NginxUpstreamCheck) createHttpClient() (*http.Client, error) { + tlsConfig, err := check.ClientConfig.TLSConfig() + if err != nil { + return nil, err + } + + client := &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: tlsConfig, + }, + Timeout: check.Timeout.Duration, + } + + return client, nil +} + +// gatherJsonData query the data source and parse the response JSON +func (check *NginxUpstreamCheck) gatherJsonData(url string, value interface{}) error { + + var method string + if check.Method != "" { + method = check.Method + } else { + method = "GET" + } + + request, err := http.NewRequest(method, url, nil) + if err != nil { + return err + } + + if (check.Username != "") || (check.Password != "") { + request.SetBasicAuth(check.Username, check.Password) + } + for header, value := range check.Headers { + request.Header.Add(header, value) + } + if check.HostHeader != "" { + request.Host = check.HostHeader + } + + response, err := check.client.Do(request) + if err != nil { + return err + } + + defer response.Body.Close() + + err = json.NewDecoder(response.Body).Decode(value) + if err != nil { + return err + } + + return nil +} + +func (check *NginxUpstreamCheck) Gather(accumulator telegraf.Accumulator) error { + if check.client == nil { + client, err := check.createHttpClient() + + if err != nil { + return err + } + check.client = client + } + + statusURL, err := url.Parse(check.URL) + if err != nil { + return err + } + + err = check.gatherStatusData(statusURL.String(), accumulator) + if err != nil { + return err + } + + return nil + +} + +func (check *NginxUpstreamCheck) gatherStatusData(url string, accumulator telegraf.Accumulator) error { + checkData := &NginxUpstreamCheckData{} + + err := check.gatherJsonData(url, checkData) + if err != nil { + return err + } + + for _, server := range checkData.Servers.Server { + + tags := map[string]string{ + "upstream": server.Upstream, + "type": server.Type, + "name": server.Name, + "port": strconv.Itoa(int(server.Port)), + "url": url, + } + + fields := map[string]interface{}{ + "status": server.Status, + "status_code": check.getStatusCode(server.Status), + "rise": server.Rise, + "fall": server.Fall, + } + + accumulator.AddFields("nginx_upstream_check", fields, tags) + } + + return nil +} + +func (check *NginxUpstreamCheck) getStatusCode(status string) uint8 { + switch status { + case "up": + return 1 + case "down": + return 2 + default: + return 0 + } +} diff --git a/plugins/inputs/nginx_upstream_check/nginx_upstream_check_test.go b/plugins/inputs/nginx_upstream_check/nginx_upstream_check_test.go new file mode 100644 index 000000000..1b70770d0 --- /dev/null +++ b/plugins/inputs/nginx_upstream_check/nginx_upstream_check_test.go @@ -0,0 +1,135 @@ +package nginx_upstream_check + +import ( + "fmt" + "net/http" + "net/http/httptest" + "testing" + + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +const sampleStatusResponse = ` +{ + "servers": { + "total": 2, + "generation": 1, + "server": [ + { + "index": 0, + "upstream": "upstream-1", + "name": "127.0.0.1:8081", + "status": "up", + "rise": 1000, + "fall": 0, + "type": "http", + "port": 0 + }, + { + "index": 1, + "upstream": "upstream-2", + "name": "127.0.0.1:8082", + "status": "down", + "rise": 0, + "fall": 2000, + "type": "tcp", + "port": 8080 + } + ] + } +} +` + +func TestNginxUpstreamCheckData(test *testing.T) { + testServer := httptest.NewServer(http.HandlerFunc(func(responseWriter http.ResponseWriter, request *http.Request) { + var response string + + if request.URL.Path == "/status" { + response = sampleStatusResponse + responseWriter.Header()["Content-Type"] = []string{"application/json"} + } else { + panic("Cannot handle request") + } + + fmt.Fprintln(responseWriter, response) + })) + defer testServer.Close() + + check := NewNginxUpstreamCheck() + check.URL = fmt.Sprintf("%s/status", testServer.URL) + + var accumulator testutil.Accumulator + + checkError := check.Gather(&accumulator) + require.NoError(test, checkError) + + accumulator.AssertContainsTaggedFields( + test, + "nginx_upstream_check", + map[string]interface{}{ + "status": string("up"), + "status_code": uint8(1), + "rise": uint64(1000), + "fall": uint64(0), + }, + map[string]string{ + "upstream": string("upstream-1"), + "type": string("http"), + "name": string("127.0.0.1:8081"), + "port": string("0"), + "url": fmt.Sprintf("%s/status", testServer.URL), + }) + + accumulator.AssertContainsTaggedFields( + test, + "nginx_upstream_check", + map[string]interface{}{ + "status": string("down"), + "status_code": uint8(2), + "rise": uint64(0), + "fall": uint64(2000), + }, + map[string]string{ + "upstream": string("upstream-2"), + "type": string("tcp"), + "name": string("127.0.0.1:8082"), + "port": string("8080"), + "url": fmt.Sprintf("%s/status", testServer.URL), + }) +} + +func TestNginxUpstreamCheckRequest(test *testing.T) { + testServer := httptest.NewServer(http.HandlerFunc(func(responseWriter http.ResponseWriter, request *http.Request) { + var response string + + if request.URL.Path == "/status" { + response = sampleStatusResponse + responseWriter.Header()["Content-Type"] = []string{"application/json"} + } else { + panic("Cannot handle request") + } + + fmt.Fprintln(responseWriter, response) + + require.Equal(test, request.Method, "POST") + require.Equal(test, request.Header.Get("X-Test"), "test-value") + require.Equal(test, request.Header.Get("Authorization"), "Basic dXNlcjpwYXNzd29yZA==") + require.Equal(test, request.Host, "status.local") + + })) + defer testServer.Close() + + check := NewNginxUpstreamCheck() + check.URL = fmt.Sprintf("%s/status", testServer.URL) + check.Headers["X-test"] = "test-value" + check.HostHeader = "status.local" + check.Username = "user" + check.Password = "password" + check.Method = "POST" + + var accumulator testutil.Accumulator + + checkError := check.Gather(&accumulator) + require.NoError(test, checkError) +} From a7b443c55b8603318e7f164c3d6ac02a3993f98a Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 15 Jan 2019 12:59:14 -0800 Subject: [PATCH 0519/1815] Update changelog --- CHANGELOG.md | 1 + README.md | 5 ++--- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 88e2103e4..2e8f34956 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,7 @@ - [cloud_pubsub](/plugins/inputs/cloud_pubsub/README.md) - Contributed by @emilymye - [neptune_apex](/plugins/inputs/neptune_apex/README.md) - Contributed by @MaxRenaud +- [nginx_upstream_check](/plugins/inputs/nginx_upstream_check/README.md) - Contributed by @dmitryilyin #### New Outputs diff --git a/README.md b/README.md index aff4d2073..2177be4cb 100644 --- a/README.md +++ b/README.md @@ -214,12 +214,11 @@ For documentation on the latest development code see the [documentation index][d * [net_response](./plugins/inputs/net_response) * [netstat](./plugins/inputs/net) * [nginx](./plugins/inputs/nginx) -* [nginx_plus](./plugins/inputs/nginx_plus) * [nginx_plus_api](./plugins/inputs/nginx_plus_api) +* [nginx_plus](./plugins/inputs/nginx_plus) +* [nginx_upstream_check](./plugins/inputs/nginx_upstream_check) * [nginx_vts](./plugins/inputs/nginx_vts) * [nsq_consumer](./plugins/inputs/nsq_consumer) -* [nginx_vts](./plugins/inputs/nginx_vts) -* [nginx_upstream_check](./plugins/inputs/nginx_upstream_check) * [nsq](./plugins/inputs/nsq) * [nstat](./plugins/inputs/nstat) * [ntpq](./plugins/inputs/ntpq) From d759b463453bedd900a621b6d8eb597d0bfa16f4 Mon Sep 17 00:00:00 2001 From: Greg <2653109+glinton@users.noreply.github.com> Date: Tue, 15 Jan 2019 16:25:26 -0700 Subject: [PATCH 0520/1815] Support passing bearer token directly in prometheus input (#5294) --- plugins/inputs/prometheus/README.md | 6 ++++-- plugins/inputs/prometheus/prometheus.go | 14 +++++++++----- 2 files changed, 13 insertions(+), 7 deletions(-) diff --git a/plugins/inputs/prometheus/README.md b/plugins/inputs/prometheus/README.md index 37265d332..9208f54be 100644 --- a/plugins/inputs/prometheus/README.md +++ b/plugins/inputs/prometheus/README.md @@ -25,8 +25,10 @@ in Prometheus format. ## - prometheus.io/port: If port is not 9102 use this annotation # monitor_kubernetes_pods = true - ## Use bearer token for authorization - # bearer_token = /path/to/bearer/token + ## Use bearer token for authorization. ('bearer_token' takes priority) + # bearer_token = "/path/to/bearer/token" + ## OR + # bearer_token_string = "abc_123" ## Specify timeout duration for slower prometheus clients (default is 3s) # response_timeout = "3s" diff --git a/plugins/inputs/prometheus/prometheus.go b/plugins/inputs/prometheus/prometheus.go index eaadf1452..94c8ae857 100644 --- a/plugins/inputs/prometheus/prometheus.go +++ b/plugins/inputs/prometheus/prometheus.go @@ -31,7 +31,8 @@ type Prometheus struct { KubeConfig string // Bearer Token authorization file path - BearerToken string `toml:"bearer_token"` + BearerToken string `toml:"bearer_token"` + BearerTokenString string `toml:"bearer_token_string"` ResponseTimeout internal.Duration `toml:"response_timeout"` @@ -65,8 +66,10 @@ var sampleConfig = ` ## - prometheus.io/port: If port is not 9102 use this annotation # monitor_kubernetes_pods = true - ## Use bearer token for authorization - # bearer_token = /path/to/bearer/token + ## Use bearer token for authorization. ('bearer_token' takes priority) + # bearer_token = "/path/to/bearer/token" + ## OR + # bearer_token_string = "abc_123" ## Specify timeout duration for slower prometheus clients (default is 3s) # response_timeout = "3s" @@ -230,13 +233,14 @@ func (p *Prometheus) gatherURL(u URLAndAddress, acc telegraf.Accumulator) error req.Header.Add("Accept", acceptHeader) - var token []byte if p.BearerToken != "" { - token, err = ioutil.ReadFile(p.BearerToken) + token, err := ioutil.ReadFile(p.BearerToken) if err != nil { return err } req.Header.Set("Authorization", "Bearer "+string(token)) + } else if p.BearerTokenString != "" { + req.Header.Set("Authorization", "Bearer "+p.BearerTokenString) } var resp *http.Response From 50ba5c15a4af97be2f18444154e9ed2dbca54be2 Mon Sep 17 00:00:00 2001 From: Greg <2653109+glinton@users.noreply.github.com> Date: Tue, 15 Jan 2019 16:26:18 -0700 Subject: [PATCH 0521/1815] Support passing bearer token directly in k8s input (#5295) --- plugins/inputs/kubernetes/README.md | 80 +++++++++++++------------ plugins/inputs/kubernetes/kubernetes.go | 29 +++++---- 2 files changed, 56 insertions(+), 53 deletions(-) diff --git a/plugins/inputs/kubernetes/README.md b/plugins/inputs/kubernetes/README.md index 37d713d18..33cca8590 100644 --- a/plugins/inputs/kubernetes/README.md +++ b/plugins/inputs/kubernetes/README.md @@ -29,8 +29,10 @@ avoid cardinality issues: ## URL for the kubelet url = "http://127.0.0.1:10255" - ## Use bearer token for authorization - # bearer_token = /path/to/bearer/token + ## Use bearer token for authorization. ('bearer_token' takes priority) + # bearer_token = "/path/to/bearer/token" + ## OR + # bearer_token_string = "abc_123" ## Set response_timeout (default 5 seconds) # response_timeout = "5s" @@ -54,45 +56,45 @@ Architecture][k8s-telegraf] or view the [Helm charts][tick-charts]. - tags: - node_name - fields: - - cpu_usage_nanocores - - cpu_usage_core_nanoseconds - - memory_available_bytes - - memory_usage_bytes - - memory_working_set_bytes - - memory_rss_bytes - - memory_page_faults - - memory_major_page_faults - - network_rx_bytes - - network_rx_errors - - network_tx_bytes - - network_tx_errors - - fs_available_bytes - - fs_capacity_bytes - - fs_used_bytes - - runtime_image_fs_available_bytes - - runtime_image_fs_capacity_bytes - - runtime_image_fs_used_bytes + - cpu_usage_nanocores + - cpu_usage_core_nanoseconds + - memory_available_bytes + - memory_usage_bytes + - memory_working_set_bytes + - memory_rss_bytes + - memory_page_faults + - memory_major_page_faults + - network_rx_bytes + - network_rx_errors + - network_tx_bytes + - network_tx_errors + - fs_available_bytes + - fs_capacity_bytes + - fs_used_bytes + - runtime_image_fs_available_bytes + - runtime_image_fs_capacity_bytes + - runtime_image_fs_used_bytes -- kubernetes_pod_container ++ kubernetes_pod_container - tags: - container_name - namespace - node_name - pod_name - fields: - - cpu_usage_nanocores - - cpu_usage_core_nanoseconds - - memory_usage_bytes - - memory_working_set_bytes - - memory_rss_bytes - - memory_page_faults - - memory_major_page_faults - - rootfs_available_bytes - - rootfs_capacity_bytes - - rootfs_used_bytes - - logsfs_avaialble_bytes - - logsfs_capacity_bytes - - logsfs_used_bytes + - cpu_usage_nanocores + - cpu_usage_core_nanoseconds + - memory_usage_bytes + - memory_working_set_bytes + - memory_rss_bytes + - memory_page_faults + - memory_major_page_faults + - rootfs_available_bytes + - rootfs_capacity_bytes + - rootfs_used_bytes + - logsfs_avaialble_bytes + - logsfs_capacity_bytes + - logsfs_used_bytes - kubernetes_pod_volume - tags: @@ -105,7 +107,7 @@ Architecture][k8s-telegraf] or view the [Helm charts][tick-charts]. - capacity_bytes - used_bytes -- kubernetes_pod_network ++ kubernetes_pod_network - tags: - namespace - node_name @@ -119,9 +121,11 @@ Architecture][k8s-telegraf] or view the [Helm charts][tick-charts]. ### Example Output ``` -kubernetes_pod_container,host=ip-10-0-0-0.ec2.internal,container_name=deis-controller,namespace=deis,node_name=ip-10-0-0-0.ec2.internal,pod_name=deis-controller-3058870187-xazsr cpu_usage_core_nanoseconds=2432835i,cpu_usage_nanocores=0i,logsfs_avaialble_bytes=121128271872i,logsfs_capacity_bytes=153567944704i,logsfs_used_bytes=20787200i,memory_major_page_faults=0i,memory_page_faults=175i,memory_rss_bytes=0i,memory_usage_bytes=0i,memory_working_set_bytes=0i,rootfs_available_bytes=121128271872i,rootfs_capacity_bytes=153567944704i,rootfs_used_bytes=1110016i 1476477530000000000 -kubernetes_pod_volume,host=ip-10-0-0-0.ec2.internal,name=default-token-f7wts,namespace=kube-system,node_name=ip-10-0-0-0.ec2.internal,pod_name=kubernetes-dashboard-v1.1.1-t4x4t available_bytes=8415240192i,capacity_bytes=8415252480i,used_bytes=12288i 1476477530000000000 -kubernetes_pod_network,host=ip-10-0-0-0.ec2.internal,namespace=deis,node_name=ip-10-0-0-0.ec2.internal,pod_name=deis-controller-3058870187-xazsr rx_bytes=120671099i,rx_errors=0i,tx_bytes=102451983i,tx_errors=0i 1476477530000000000 +kubernetes_node +kubernetes_pod_container,container_name=deis-controller,namespace=deis,node_name=ip-10-0-0-0.ec2.internal,pod_name=deis-controller-3058870187-xazsr cpu_usage_core_nanoseconds=2432835i,cpu_usage_nanocores=0i,logsfs_avaialble_bytes=121128271872i,logsfs_capacity_bytes=153567944704i,logsfs_used_bytes=20787200i,memory_major_page_faults=0i,memory_page_faults=175i,memory_rss_bytes=0i,memory_usage_bytes=0i,memory_working_set_bytes=0i,rootfs_available_bytes=121128271872i,rootfs_capacity_bytes=153567944704i,rootfs_used_bytes=1110016i 1476477530000000000 +kubernetes_pod_network,namespace=deis,node_name=ip-10-0-0-0.ec2.internal,pod_name=deis-controller-3058870187-xazsr rx_bytes=120671099i,rx_errors=0i,tx_bytes=102451983i,tx_errors=0i 1476477530000000000 +kubernetes_pod_volume,volume_name=default-token-f7wts,namespace=default,node_name=ip-172-17-0-1.internal,pod_name=storage-7 available_bytes=8415240192i,capacity_bytes=8415252480i,used_bytes=12288i 1546910783000000000 +kubernetes_system_container ``` [metric filtering]: https://github.com/influxdata/telegraf/blob/master/docs/CONFIGURATION.md#metric-filtering diff --git a/plugins/inputs/kubernetes/kubernetes.go b/plugins/inputs/kubernetes/kubernetes.go index 870524a80..fdeb78ec4 100644 --- a/plugins/inputs/kubernetes/kubernetes.go +++ b/plugins/inputs/kubernetes/kubernetes.go @@ -6,7 +6,7 @@ import ( "io/ioutil" "net/http" "net/url" - "sync" + "strings" "time" "github.com/influxdata/telegraf" @@ -20,7 +20,8 @@ type Kubernetes struct { URL string // Bearer Token authorization file path - BearerToken string `toml:"bearer_token"` + BearerToken string `toml:"bearer_token"` + BearerTokenString string `toml:"bearer_token_string"` // HTTP Timeout specified as a string - 3s, 1m, 1h ResponseTimeout internal.Duration @@ -32,10 +33,12 @@ type Kubernetes struct { var sampleConfig = ` ## URL for the kubelet - url = "http://1.1.1.1:10255" + url = "http://127.0.0.1:10255" - ## Use bearer token for authorization - # bearer_token = /path/to/bearer/token + ## Use bearer token for authorization. ('bearer_token' takes priority) + # bearer_token = "/path/to/bearer/token" + ## OR + # bearer_token_string = "abc_123" ## Set response_timeout (default 5 seconds) # response_timeout = "5s" @@ -70,13 +73,7 @@ func (k *Kubernetes) Description() string { //Gather collects kubernetes metrics from a given URL func (k *Kubernetes) Gather(acc telegraf.Accumulator) error { - var wg sync.WaitGroup - wg.Add(1) - go func(k *Kubernetes) { - defer wg.Done() - acc.AddError(k.gatherSummary(k.URL, acc)) - }(k) - wg.Wait() + acc.AddError(k.gatherSummary(k.URL, acc)) return nil } @@ -92,7 +89,6 @@ func buildURL(endpoint string, base string) (*url.URL, error) { func (k *Kubernetes) gatherSummary(baseURL string, acc telegraf.Accumulator) error { url := fmt.Sprintf("%s/stats/summary", baseURL) var req, err = http.NewRequest("GET", url, nil) - var token []byte var resp *http.Response tlsCfg, err := k.ClientConfig.TLSConfig() @@ -113,12 +109,15 @@ func (k *Kubernetes) gatherSummary(baseURL string, acc telegraf.Accumulator) err } if k.BearerToken != "" { - token, err = ioutil.ReadFile(k.BearerToken) + token, err := ioutil.ReadFile(k.BearerToken) if err != nil { return err } - req.Header.Set("Authorization", "Bearer "+string(token)) + req.Header.Set("Authorization", "Bearer "+strings.TrimSpace(string(token))) + } else if k.BearerTokenString != "" { + req.Header.Set("Authorization", "Bearer "+k.BearerTokenString) } + req.Header.Add("Accept", "application/json") resp, err = k.RoundTripper.RoundTrip(req) if err != nil { From e95b88e01b2c664e43ea45faf77569d8db44a656 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 15 Jan 2019 15:27:56 -0800 Subject: [PATCH 0522/1815] Update changelog --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2e8f34956..ecd307ca3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -32,6 +32,8 @@ - [#5210](https://github.com/influxdata/telegraf/pull/5210): Add read and write op per second fields to ceph input. - [#5214](https://github.com/influxdata/telegraf/pull/5214): Add configurable timeout to varnish input. - [#5273](https://github.com/influxdata/telegraf/pull/5273): Add flush_total_time_ns and additional wired tiger fields to mongodb input. +- [#5295](https://github.com/influxdata/telegraf/pull/5295): Support passing bearer token directly in k8s input. +- [#5294](https://github.com/influxdata/telegraf/pull/5294): Support passing bearer token directly in prometheus input. #### Bugfixes From 2b8729e0489e2d0dfb64e4e1f1aa28558eee18c2 Mon Sep 17 00:00:00 2001 From: "Artem V. Navrotskiy" Date: Thu, 17 Jan 2019 02:39:55 +0300 Subject: [PATCH 0523/1815] Fix typo in mongodb field name (#5299) --- plugins/inputs/mongodb/README.md | 2 +- plugins/inputs/mongodb/mongodb_data.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/inputs/mongodb/README.md b/plugins/inputs/mongodb/README.md index 982936811..07ab133d4 100644 --- a/plugins/inputs/mongodb/README.md +++ b/plugins/inputs/mongodb/README.md @@ -97,7 +97,7 @@ Error in input [mongodb]: not authorized on admin to execute command { serverSta - wtcache_app_threads_page_write_count (integer) - wtcache_bytes_read_into (integer) - wtcache_bytes_written_from (integer) - - wtcache_pages_read_info (integer) + - wtcache_pages_read_into (integer) - wtcache_pages_requested_from (integer) - wtcache_current_bytes (integer) - wtcache_max_bytes_configured (integer) diff --git a/plugins/inputs/mongodb/mongodb_data.go b/plugins/inputs/mongodb/mongodb_data.go index c0e7baf65..5fa0c237d 100644 --- a/plugins/inputs/mongodb/mongodb_data.go +++ b/plugins/inputs/mongodb/mongodb_data.go @@ -138,7 +138,7 @@ var WiredTigerExtStats = map[string]string{ "wtcache_bytes_read_into": "BytesReadInto", "wtcache_pages_evicted_by_app_thread": "PagesEvictedByAppThread", "wtcache_pages_queued_for_eviction": "PagesQueuedForEviction", - "wtcache_pages_read_info": "PagesReadIntoCache", + "wtcache_pages_read_into": "PagesReadIntoCache", "wtcache_pages_requested_from": "PagesRequestedFromCache", "wtcache_server_evicting_pages": "ServerEvictingPages", "wtcache_worker_thread_evictingpages": "WorkerThreadEvictingPages", From b620a56d21272ec3eb3ad6b73714322df21c16f4 Mon Sep 17 00:00:00 2001 From: Greg <2653109+glinton@users.noreply.github.com> Date: Wed, 16 Jan 2019 16:49:24 -0700 Subject: [PATCH 0524/1815] Collect from newly discovered/launched pods (#5293) --- plugins/inputs/prometheus/kubernetes.go | 69 +++++++++++++------- plugins/inputs/prometheus/kubernetes_test.go | 21 +++++- plugins/inputs/prometheus/prometheus.go | 25 ++++--- 3 files changed, 84 insertions(+), 31 deletions(-) diff --git a/plugins/inputs/prometheus/kubernetes.go b/plugins/inputs/prometheus/kubernetes.go index 4faf2d55e..87db15ffe 100644 --- a/plugins/inputs/prometheus/kubernetes.go +++ b/plugins/inputs/prometheus/kubernetes.go @@ -45,6 +45,7 @@ func (p *Prometheus) start(ctx context.Context) error { if err != nil { return fmt.Errorf("Failed to get current user - %v", err) } + configLocation := filepath.Join(u.HomeDir, ".kube/config") if p.KubeConfig != "" { configLocation = p.KubeConfig @@ -76,6 +77,10 @@ func (p *Prometheus) start(ctx context.Context) error { return nil } +// An edge case exists if a pod goes offline at the same time a new pod is created +// (without the scrape annotations). K8s may re-assign the old pod ip to the non-scrape +// pod, causing errors in the logs. This is only true if the pod going offline is not +// directed to do so by K8s. func (p *Prometheus) watch(ctx context.Context, client *k8s.Client) error { pod := &corev1.Pod{} watcher, err := client.Watch(ctx, "", &corev1.Pod{}) @@ -96,18 +101,44 @@ func (p *Prometheus) watch(ctx context.Context, client *k8s.Client) error { return err } + // If the pod is not "ready", there will be no ip associated with it. + if pod.GetMetadata().GetAnnotations()["prometheus.io/scrape"] != "true" || + !podReady(pod.Status.GetContainerStatuses()) { + continue + } + switch eventType { case k8s.EventAdded: registerPod(pod, p) - case k8s.EventDeleted: - unregisterPod(pod, p) case k8s.EventModified: + // To avoid multiple actions for each event, unregister on the first event + // in the delete sequence, when the containers are still "ready". + if pod.Metadata.GetDeletionTimestamp() != nil { + unregisterPod(pod, p) + } else { + registerPod(pod, p) + } } } } } +func podReady(statuss []*corev1.ContainerStatus) bool { + if len(statuss) == 0 { + return false + } + for _, cs := range statuss { + if !cs.GetReady() { + return false + } + } + return true +} + func registerPod(pod *corev1.Pod, p *Prometheus) { + if p.kubernetesPods == nil { + p.kubernetesPods = map[string]URLAndAddress{} + } targetURL := getScrapeURL(pod) if targetURL == nil { return @@ -116,6 +147,9 @@ func registerPod(pod *corev1.Pod, p *Prometheus) { log.Printf("D! [inputs.prometheus] will scrape metrics from %s", *targetURL) // add annotation as metrics tags tags := pod.GetMetadata().GetAnnotations() + if tags == nil { + tags = map[string]string{} + } tags["pod_name"] = pod.GetMetadata().GetName() tags["namespace"] = pod.GetMetadata().GetNamespace() // add labels as metrics tags @@ -129,20 +163,16 @@ func registerPod(pod *corev1.Pod, p *Prometheus) { } podURL := p.AddressToURL(URL, URL.Hostname()) p.lock.Lock() - p.kubernetesPods = append(p.kubernetesPods, - URLAndAddress{ - URL: podURL, - Address: URL.Hostname(), - OriginalURL: URL, - Tags: tags}) + p.kubernetesPods[podURL.String()] = URLAndAddress{ + URL: podURL, + Address: URL.Hostname(), + OriginalURL: URL, + Tags: tags, + } p.lock.Unlock() } func getScrapeURL(pod *corev1.Pod) *string { - scrape := pod.GetMetadata().GetAnnotations()["prometheus.io/scrape"] - if scrape != "true" { - return nil - } ip := pod.Status.GetPodIP() if ip == "" { // return as if scrape was disabled, we will be notified again once the pod @@ -181,18 +211,13 @@ func unregisterPod(pod *corev1.Pod, p *Prometheus) { return } - p.lock.Lock() - defer p.lock.Unlock() log.Printf("D! [inputs.prometheus] registered a delete request for %s in namespace %s", pod.GetMetadata().GetName(), pod.GetMetadata().GetNamespace()) - var result []URLAndAddress - for _, v := range p.kubernetesPods { - if v.URL.String() != *url { - result = append(result, v) - } else { - log.Printf("D! [inputs.prometheus] will stop scraping for %s", *url) - } + p.lock.Lock() + defer p.lock.Unlock() + if _, ok := p.kubernetesPods[*url]; ok { + delete(p.kubernetesPods, *url) + log.Printf("D! [inputs.prometheus] will stop scraping for %s", *url) } - p.kubernetesPods = result } diff --git a/plugins/inputs/prometheus/kubernetes_test.go b/plugins/inputs/prometheus/kubernetes_test.go index 2afdbc5ec..c1bbe0a1f 100644 --- a/plugins/inputs/prometheus/kubernetes_test.go +++ b/plugins/inputs/prometheus/kubernetes_test.go @@ -15,6 +15,7 @@ func TestScrapeURLNoAnnotations(t *testing.T) { url := getScrapeURL(p) assert.Nil(t, url) } + func TestScrapeURLAnnotationsNoScrape(t *testing.T) { p := &v1.Pod{Metadata: &metav1.ObjectMeta{}} p.Metadata.Name = str("myPod") @@ -22,18 +23,21 @@ func TestScrapeURLAnnotationsNoScrape(t *testing.T) { url := getScrapeURL(p) assert.Nil(t, url) } + func TestScrapeURLAnnotations(t *testing.T) { p := pod() p.Metadata.Annotations = map[string]string{"prometheus.io/scrape": "true"} url := getScrapeURL(p) assert.Equal(t, "http://127.0.0.1:9102/metrics", *url) } + func TestScrapeURLAnnotationsCustomPort(t *testing.T) { p := pod() p.Metadata.Annotations = map[string]string{"prometheus.io/scrape": "true", "prometheus.io/port": "9000"} url := getScrapeURL(p) assert.Equal(t, "http://127.0.0.1:9000/metrics", *url) } + func TestScrapeURLAnnotationsCustomPath(t *testing.T) { p := pod() p.Metadata.Annotations = map[string]string{"prometheus.io/scrape": "true", "prometheus.io/path": "mymetrics"} @@ -50,12 +54,14 @@ func TestScrapeURLAnnotationsCustomPathWithSep(t *testing.T) { func TestAddPod(t *testing.T) { prom := &Prometheus{} + p := pod() p.Metadata.Annotations = map[string]string{"prometheus.io/scrape": "true"} registerPod(p, prom) assert.Equal(t, 1, len(prom.kubernetesPods)) } -func TestAddMultiplePods(t *testing.T) { + +func TestAddMultipleDuplicatePods(t *testing.T) { prom := &Prometheus{} p := pod() @@ -63,8 +69,21 @@ func TestAddMultiplePods(t *testing.T) { registerPod(p, prom) p.Metadata.Name = str("Pod2") registerPod(p, prom) + assert.Equal(t, 1, len(prom.kubernetesPods)) +} + +func TestAddMultiplePods(t *testing.T) { + prom := &Prometheus{} + + p := pod() + p.Metadata.Annotations = map[string]string{"prometheus.io/scrape": "true"} + registerPod(p, prom) + p.Metadata.Name = str("Pod2") + p.Status.PodIP = str("127.0.0.2") + registerPod(p, prom) assert.Equal(t, 2, len(prom.kubernetesPods)) } + func TestDeletePods(t *testing.T) { prom := &Prometheus{} diff --git a/plugins/inputs/prometheus/prometheus.go b/plugins/inputs/prometheus/prometheus.go index 94c8ae857..879af4567 100644 --- a/plugins/inputs/prometheus/prometheus.go +++ b/plugins/inputs/prometheus/prometheus.go @@ -43,7 +43,7 @@ type Prometheus struct { // Should we scrape Kubernetes services for prometheus annotations MonitorPods bool `toml:"monitor_kubernetes_pods"` lock sync.Mutex - kubernetesPods []URLAndAddress + kubernetesPods map[string]URLAndAddress cancel context.CancelFunc wg sync.WaitGroup } @@ -118,21 +118,23 @@ type URLAndAddress struct { Tags map[string]string } -func (p *Prometheus) GetAllURLs() ([]URLAndAddress, error) { - allURLs := make([]URLAndAddress, 0) +func (p *Prometheus) GetAllURLs() (map[string]URLAndAddress, error) { + allURLs := make(map[string]URLAndAddress, 0) for _, u := range p.URLs { URL, err := url.Parse(u) if err != nil { log.Printf("prometheus: Could not parse %s, skipping it. Error: %s", u, err.Error()) continue } - - allURLs = append(allURLs, URLAndAddress{URL: URL, OriginalURL: URL}) + allURLs[URL.String()] = URLAndAddress{URL: URL, OriginalURL: URL} } + p.lock.Lock() defer p.lock.Unlock() // loop through all pods scraped via the prometheus annotation on the pods - allURLs = append(allURLs, p.kubernetesPods...) + for k, v := range p.kubernetesPods { + allURLs[k] = v + } for _, service := range p.KubernetesServices { URL, err := url.Parse(service) @@ -147,7 +149,11 @@ func (p *Prometheus) GetAllURLs() ([]URLAndAddress, error) { } for _, resolved := range resolvedAddresses { serviceURL := p.AddressToURL(URL, resolved) - allURLs = append(allURLs, URLAndAddress{URL: serviceURL, Address: resolved, OriginalURL: URL}) + allURLs[serviceURL.String()] = URLAndAddress{ + URL: serviceURL, + Address: resolved, + OriginalURL: URL, + } } } return allURLs, nil @@ -317,6 +323,9 @@ func (p *Prometheus) Stop() { func init() { inputs.Add("prometheus", func() telegraf.Input { - return &Prometheus{ResponseTimeout: internal.Duration{Duration: time.Second * 3}} + return &Prometheus{ + ResponseTimeout: internal.Duration{Duration: time.Second * 3}, + kubernetesPods: map[string]URLAndAddress{}, + } }) } From 452b13a4e30535905c6f7c3f410563bd56be9f84 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 16 Jan 2019 15:51:22 -0800 Subject: [PATCH 0525/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index ecd307ca3..e15dad90b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -46,6 +46,7 @@ - [#5261](https://github.com/influxdata/telegraf/pull/5261): Fix arithmetic overflow in sqlserver input. - [#5194](https://github.com/influxdata/telegraf/issues/5194): Fix latest metrics not sent first when output fails. - [#5285](https://github.com/influxdata/telegraf/issues/5285): Fix amqp_consumer stops consuming when it receives unparsable messages. +- [#5281](https://github.com/influxdata/telegraf/issues/5281): Fix prometheus input not detecting added and removed pods. ## v1.9.2 [2019-01-08] From 3380fdf69d893ad35a56044a1058511577bc6d3b Mon Sep 17 00:00:00 2001 From: "Artem V. Navrotskiy" Date: Thu, 17 Jan 2019 21:51:18 +0300 Subject: [PATCH 0526/1815] Add option to report input timestamp in prometheus output (#5292) --- Gopkg.lock | 7 ++++--- Gopkg.toml | 2 +- plugins/outputs/prometheus_client/README.md | 3 +++ .../prometheus_client/prometheus_client.go | 16 ++++++++++++++-- 4 files changed, 22 insertions(+), 6 deletions(-) diff --git a/Gopkg.lock b/Gopkg.lock index e36b76d9b..98eafd39d 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -892,15 +892,16 @@ version = "v1.0.0" [[projects]] - digest = "1:4142d94383572e74b42352273652c62afec5b23f325222ed09198f46009022d1" + digest = "1:6f218995d6a74636cfcab45ce03005371e682b4b9bee0e5eb0ccfd83ef85364f" name = "github.com/prometheus/client_golang" packages = [ "prometheus", + "prometheus/internal", "prometheus/promhttp", ] pruneopts = "" - revision = "c5b7fccd204277076155f10851dad72b76a49317" - version = "v0.8.0" + revision = "505eaef017263e299324067d40ca2c48f6a2cf50" + version = "v0.9.2" [[projects]] branch = "master" diff --git a/Gopkg.toml b/Gopkg.toml index 02b499007..d1cbd081d 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -132,7 +132,7 @@ [[constraint]] name = "github.com/prometheus/client_golang" - version = "0.8.0" + version = "0.9.2" [[constraint]] name = "github.com/prometheus/client_model" diff --git a/plugins/outputs/prometheus_client/README.md b/plugins/outputs/prometheus_client/README.md index d4de4894a..c06fdbaf1 100644 --- a/plugins/outputs/prometheus_client/README.md +++ b/plugins/outputs/prometheus_client/README.md @@ -35,4 +35,7 @@ This plugin starts a [Prometheus](https://prometheus.io/) Client, it exposes all ## If set, enable TLS with the given certificate. # tls_cert = "/etc/ssl/telegraf.crt" # tls_key = "/etc/ssl/telegraf.key" + + ## Export metric collection time. + # export_timestamp = false ``` diff --git a/plugins/outputs/prometheus_client/prometheus_client.go b/plugins/outputs/prometheus_client/prometheus_client.go index 0192d935f..ef81034cd 100644 --- a/plugins/outputs/prometheus_client/prometheus_client.go +++ b/plugins/outputs/prometheus_client/prometheus_client.go @@ -7,7 +7,6 @@ import ( "log" "net" "net/http" - "os" "regexp" "sort" "strconv" @@ -38,6 +37,8 @@ type Sample struct { // Histograms and Summaries need a count and a sum Count uint64 Sum float64 + // Metric timestamp + Timestamp time.Time // Expiration is the deadline that this Sample is valid until. Expiration time.Time } @@ -64,6 +65,7 @@ type PrometheusClient struct { Path string `toml:"path"` CollectorsExclude []string `toml:"collectors_exclude"` StringAsLabel bool `toml:"string_as_label"` + ExportTimestamp bool `toml:"export_timestamp"` server *http.Server @@ -103,6 +105,9 @@ var sampleConfig = ` ## If set, enable TLS with the given certificate. # tls_cert = "/etc/ssl/telegraf.crt" # tls_key = "/etc/ssl/telegraf.key" + + ## Export metric collection time. + # export_timestamp = false ` func (p *PrometheusClient) auth(h http.Handler) http.Handler { @@ -159,7 +164,7 @@ func (p *PrometheusClient) Connect() error { case "gocollector": registry.Register(prometheus.NewGoCollector()) case "process": - registry.Register(prometheus.NewProcessCollector(os.Getpid(), "")) + registry.Register(prometheus.NewProcessCollector(prometheus.ProcessCollectorOpts{})) default: return fmt.Errorf("unrecognized collector %s", collector) } @@ -282,6 +287,9 @@ func (p *PrometheusClient) Collect(ch chan<- prometheus.Metric) { name, labels, err.Error()) } + if p.ExportTimestamp { + metric = prometheus.NewMetricWithTimestamp(sample.Timestamp, metric) + } ch <- metric } } @@ -398,6 +406,7 @@ func (p *PrometheusClient) Write(metrics []telegraf.Metric) error { SummaryValue: summaryvalue, Count: count, Sum: sum, + Timestamp: point.Time(), Expiration: now.Add(p.ExpirationInterval.Duration), } mname = sanitize(point.Name()) @@ -439,6 +448,7 @@ func (p *PrometheusClient) Write(metrics []telegraf.Metric) error { HistogramValue: histogramvalue, Count: count, Sum: sum, + Timestamp: point.Time(), Expiration: now.Add(p.ExpirationInterval.Duration), } mname = sanitize(point.Name()) @@ -463,6 +473,7 @@ func (p *PrometheusClient) Write(metrics []telegraf.Metric) error { sample := &Sample{ Labels: labels, Value: value, + Timestamp: point.Time(), Expiration: now.Add(p.ExpirationInterval.Duration), } @@ -500,6 +511,7 @@ func init() { return &PrometheusClient{ ExpirationInterval: internal.Duration{Duration: time.Second * 60}, StringAsLabel: true, + ExportTimestamp: true, fam: make(map[string]*MetricFamily), now: time.Now, } From 8ea181d67c8fc60ab3983ff32662b67af23c9051 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 17 Jan 2019 10:52:26 -0800 Subject: [PATCH 0527/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index e15dad90b..d73e1b3d0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -34,6 +34,7 @@ - [#5273](https://github.com/influxdata/telegraf/pull/5273): Add flush_total_time_ns and additional wired tiger fields to mongodb input. - [#5295](https://github.com/influxdata/telegraf/pull/5295): Support passing bearer token directly in k8s input. - [#5294](https://github.com/influxdata/telegraf/pull/5294): Support passing bearer token directly in prometheus input. +- [#5292](https://github.com/influxdata/telegraf/pull/5292): Add option to report input timestamp in prometheus output. #### Bugfixes From b6cc324d10cc4d8a64396ee968d31acf3ecf0398 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 17 Jan 2019 12:00:07 -0800 Subject: [PATCH 0528/1815] Add note on performance to grok parser documentation (#5291) --- plugins/parsers/grok/README.md | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/plugins/parsers/grok/README.md b/plugins/parsers/grok/README.md index 32072b631..4ebbbd3f2 100644 --- a/plugins/parsers/grok/README.md +++ b/plugins/parsers/grok/README.md @@ -220,4 +220,14 @@ the file output will only print once per `flush_interval`. - If successful, add the next token, update the pattern and retest. - Continue one token at a time until the entire line is successfully parsed. +#### Performance +Performance depends heavily on the regular expressions that you use, but there +are a few techniques that can help: + +- Avoid using patterns such as `%{DATA}` that will always match. +- If possible, add `^` and `$` anchors to your pattern: + ``` + [[inputs.file]] + grok_patterns = ["^%{COMBINED_LOG_FORMAT}$"] + ``` From cf10d03bb97a10e3219dbab22358223a01136543 Mon Sep 17 00:00:00 2001 From: Greg <2653109+glinton@users.noreply.github.com> Date: Fri, 18 Jan 2019 12:43:24 -0700 Subject: [PATCH 0529/1815] Remove credentials from cluster tag in couchbase (#5313) --- plugins/inputs/couchbase/README.md | 14 ++------------ plugins/inputs/couchbase/couchbase.go | 2 +- 2 files changed, 3 insertions(+), 13 deletions(-) diff --git a/plugins/inputs/couchbase/README.md b/plugins/inputs/couchbase/README.md index 91e197b43..13eaa02c8 100644 --- a/plugins/inputs/couchbase/README.md +++ b/plugins/inputs/couchbase/README.md @@ -48,16 +48,6 @@ Fields: ## Example output ``` -$ telegraf --config telegraf.conf --input-filter couchbase --test -* Plugin: couchbase, Collection 1 -> couchbase_node,cluster=https://couchbase-0.example.com/,hostname=172.16.10.187:8091 memory_free=22927384576,memory_total=64424656896 1458381183695864929 -> couchbase_node,cluster=https://couchbase-0.example.com/,hostname=172.16.10.65:8091 memory_free=23520161792,memory_total=64424656896 1458381183695972112 -> couchbase_node,cluster=https://couchbase-0.example.com/,hostname=172.16.13.105:8091 memory_free=23531704320,memory_total=64424656896 1458381183695995259 -> couchbase_node,cluster=https://couchbase-0.example.com/,hostname=172.16.13.173:8091 memory_free=23628767232,memory_total=64424656896 1458381183696010870 -> couchbase_node,cluster=https://couchbase-0.example.com/,hostname=172.16.15.120:8091 memory_free=23616692224,memory_total=64424656896 1458381183696027406 -> couchbase_node,cluster=https://couchbase-0.example.com/,hostname=172.16.8.127:8091 memory_free=23431770112,memory_total=64424656896 1458381183696041040 -> couchbase_node,cluster=https://couchbase-0.example.com/,hostname=172.16.8.148:8091 memory_free=23811371008,memory_total=64424656896 1458381183696059060 -> couchbase_bucket,bucket=default,cluster=https://couchbase-0.example.com/ data_used=25743360,disk_fetches=0,disk_used=31744886,item_count=0,mem_used=77729224,ops_per_sec=0,quota_percent_used=10.58976636614118 1458381183696210074 -> couchbase_bucket,bucket=demoncat,cluster=https://couchbase-0.example.com/ data_used=38157584951,disk_fetches=0,disk_used=62730302441,item_count=14662532,mem_used=24015304256,ops_per_sec=1207.753207753208,quota_percent_used=79.87855353525707 1458381183696242695 -> couchbase_bucket,bucket=blastro-df,cluster=https://couchbase-0.example.com/ data_used=212552491622,disk_fetches=0,disk_used=413323157621,item_count=944655680,mem_used=202421103760,ops_per_sec=1692.176692176692,quota_percent_used=68.9442170551845 1458381183696272206 +couchbase_node,cluster=http://localhost:8091/,hostname=172.17.0.2:8091 memory_free=7705575424,memory_total=16558182400 1547829754000000000 +couchbase_bucket,bucket=beer-sample,cluster=http://localhost:8091/ quota_percent_used=27.09285736083984,ops_per_sec=0,disk_fetches=0,item_count=7303,disk_used=21662946,data_used=9325087,mem_used=28408920 1547829754000000000 ``` diff --git a/plugins/inputs/couchbase/couchbase.go b/plugins/inputs/couchbase/couchbase.go index f773f5d5b..de7f0bec0 100644 --- a/plugins/inputs/couchbase/couchbase.go +++ b/plugins/inputs/couchbase/couchbase.go @@ -86,7 +86,7 @@ func (r *Couchbase) gatherServer(addr string, acc telegraf.Accumulator, pool *co } for bucketName := range pool.BucketMap { - tags := map[string]string{"cluster": addr, "bucket": bucketName} + tags := map[string]string{"cluster": regexpURI.ReplaceAllString(addr, "${1}"), "bucket": bucketName} bs := pool.BucketMap[bucketName].BasicStats fields := make(map[string]interface{}) fields["quota_percent_used"] = bs["quotaPercentUsed"] From fa9a654f2d7dbe92ef9f981ab3e4d1a6e9e9227e Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 18 Jan 2019 11:45:10 -0800 Subject: [PATCH 0530/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index d73e1b3d0..e0ee632b4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -48,6 +48,7 @@ - [#5194](https://github.com/influxdata/telegraf/issues/5194): Fix latest metrics not sent first when output fails. - [#5285](https://github.com/influxdata/telegraf/issues/5285): Fix amqp_consumer stops consuming when it receives unparsable messages. - [#5281](https://github.com/influxdata/telegraf/issues/5281): Fix prometheus input not detecting added and removed pods. +- [#5215](https://github.com/influxdata/telegraf/issues/5215): Remove userinfo from cluster tag in couchbase. ## v1.9.2 [2019-01-08] From b34c5e0d04cad9ca9bce41369aea20d95c7e7a11 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 22 Jan 2019 13:43:51 -0800 Subject: [PATCH 0531/1815] Update the buffer_size internal metric after writes (#5314) --- internal/models/buffer.go | 29 +++++++++-- internal/models/buffer_test.go | 82 ++++++++++++++++++++++++++++++- internal/models/running_output.go | 54 ++++++-------------- 3 files changed, 120 insertions(+), 45 deletions(-) diff --git a/internal/models/buffer.go b/internal/models/buffer.go index 3d82e573a..7b27e6686 100644 --- a/internal/models/buffer.go +++ b/internal/models/buffer.go @@ -27,6 +27,8 @@ type Buffer struct { MetricsAdded selfstat.Stat MetricsWritten selfstat.Stat MetricsDropped selfstat.Stat + BufferSize selfstat.Stat + BufferLimit selfstat.Stat } // NewBuffer returns a new empty Buffer with the given capacity. @@ -53,7 +55,19 @@ func NewBuffer(name string, capacity int) *Buffer { "metrics_dropped", map[string]string{"output": name}, ), + BufferSize: selfstat.Register( + "write", + "buffer_size", + map[string]string{"output": name}, + ), + BufferLimit: selfstat.Register( + "write", + "buffer_limit", + map[string]string{"output": name}, + ), } + b.BufferSize.Set(int64(0)) + b.BufferLimit.Set(int64(capacity)) return b } @@ -62,7 +76,11 @@ func (b *Buffer) Len() int { b.Lock() defer b.Unlock() - return b.size + return b.length() +} + +func (b *Buffer) length() int { + return min(b.size+b.batchSize, b.cap) } func (b *Buffer) metricAdded() { @@ -112,6 +130,8 @@ func (b *Buffer) Add(metrics ...telegraf.Metric) { for i := range metrics { b.add(metrics[i]) } + + b.BufferSize.Set(int64(b.length())) } // Batch returns a slice containing up to batchSize of the most recently added @@ -153,6 +173,7 @@ func (b *Buffer) Accept(batch []telegraf.Metric) { } b.resetBatch() + b.BufferSize.Set(int64(b.length())) } // Reject returns the batch, acquired from Batch(), to the buffer and marks it @@ -176,6 +197,7 @@ func (b *Buffer) Reject(batch []telegraf.Metric) { if b.buf[re] != nil { b.metricDropped(b.buf[re]) + b.first = b.next(b.first) } b.buf[re] = b.buf[rp] @@ -188,13 +210,14 @@ func (b *Buffer) Reject(batch []telegraf.Metric) { if i < restore { re = b.prev(re) b.buf[re] = batch[i] - b.size++ + b.size = min(b.size+1, b.cap) } else { b.metricDropped(batch[i]) } } b.resetBatch() + b.BufferSize.Set(int64(b.length())) } // dist returns the distance between two indexes. Because this data structure @@ -204,7 +227,7 @@ func (b *Buffer) dist(begin, end int) int { if begin <= end { return end - begin } else { - return b.cap - begin - 1 + end + return b.cap - begin + end } } diff --git a/internal/models/buffer_test.go b/internal/models/buffer_test.go index 7aa55a2c2..892af8bd4 100644 --- a/internal/models/buffer_test.go +++ b/internal/models/buffer_test.go @@ -359,7 +359,7 @@ func TestBuffer_RejectPartialRoom(t *testing.T) { }, batch) } -func TestBuffer_RejectWrapped(t *testing.T) { +func TestBuffer_RejectNewMetricsWrapped(t *testing.T) { b := setup(NewBuffer("test", 5)) b.Add(MetricTime(1)) b.Add(MetricTime(2)) @@ -402,6 +402,84 @@ func TestBuffer_RejectWrapped(t *testing.T) { }, batch) } +func TestBuffer_RejectWrapped(t *testing.T) { + b := setup(NewBuffer("test", 5)) + b.Add(MetricTime(1)) + b.Add(MetricTime(2)) + b.Add(MetricTime(3)) + b.Add(MetricTime(4)) + b.Add(MetricTime(5)) + + b.Add(MetricTime(6)) + b.Add(MetricTime(7)) + b.Add(MetricTime(8)) + batch := b.Batch(3) + + b.Add(MetricTime(9)) + b.Add(MetricTime(10)) + b.Add(MetricTime(11)) + b.Add(MetricTime(12)) + + b.Reject(batch) + + batch = b.Batch(5) + testutil.RequireMetricsEqual(t, + []telegraf.Metric{ + MetricTime(12), + MetricTime(11), + MetricTime(10), + MetricTime(9), + MetricTime(8), + }, batch) +} + +func TestBuffer_RejectAdjustFirst(t *testing.T) { + b := setup(NewBuffer("test", 10)) + b.Add(MetricTime(1)) + b.Add(MetricTime(2)) + b.Add(MetricTime(3)) + batch := b.Batch(3) + b.Add(MetricTime(4)) + b.Add(MetricTime(5)) + b.Add(MetricTime(6)) + b.Reject(batch) + + b.Add(MetricTime(7)) + b.Add(MetricTime(8)) + b.Add(MetricTime(9)) + batch = b.Batch(3) + b.Add(MetricTime(10)) + b.Add(MetricTime(11)) + b.Add(MetricTime(12)) + b.Reject(batch) + + b.Add(MetricTime(13)) + b.Add(MetricTime(14)) + b.Add(MetricTime(15)) + batch = b.Batch(3) + b.Add(MetricTime(16)) + b.Add(MetricTime(17)) + b.Add(MetricTime(18)) + b.Reject(batch) + + b.Add(MetricTime(19)) + + batch = b.Batch(10) + testutil.RequireMetricsEqual(t, + []telegraf.Metric{ + MetricTime(19), + MetricTime(18), + MetricTime(17), + MetricTime(16), + MetricTime(15), + MetricTime(14), + MetricTime(13), + MetricTime(12), + MetricTime(11), + MetricTime(10), + }, batch) +} + func TestBuffer_AddDropsOverwrittenMetrics(t *testing.T) { m := Metric() b := setup(NewBuffer("test", 5)) @@ -509,7 +587,7 @@ func TestBuffer_BatchNotRemoved(t *testing.T) { b := setup(NewBuffer("test", 5)) b.Add(m, m, m, m, m) b.Batch(2) - require.Equal(t, 3, b.Len()) + require.Equal(t, 5, b.Len()) } func TestBuffer_BatchRejectAcceptNoop(t *testing.T) { diff --git a/internal/models/running_output.go b/internal/models/running_output.go index 8d7d9854b..531a3065b 100644 --- a/internal/models/running_output.go +++ b/internal/models/running_output.go @@ -3,6 +3,7 @@ package models import ( "log" "sync" + "sync/atomic" "time" "github.com/influxdata/telegraf" @@ -29,6 +30,9 @@ type OutputConfig struct { // RunningOutput contains the output configuration type RunningOutput struct { + // Must be 64-bit aligned + newMetricsCount int64 + Name string Output telegraf.Output Config *OutputConfig @@ -36,16 +40,13 @@ type RunningOutput struct { MetricBatchSize int MetricsFiltered selfstat.Stat - BufferSize selfstat.Stat - BufferLimit selfstat.Stat WriteTime selfstat.Stat - batch []telegraf.Metric - buffer *Buffer BatchReady chan time.Time - aggMutex sync.Mutex - batchMutex sync.Mutex + buffer *Buffer + + aggMutex sync.Mutex } func NewRunningOutput( @@ -69,7 +70,6 @@ func NewRunningOutput( } ro := &RunningOutput{ Name: name, - batch: make([]telegraf.Metric, 0, batchSize), buffer: NewBuffer(name, bufferLimit), BatchReady: make(chan time.Time, 1), Output: output, @@ -81,16 +81,6 @@ func NewRunningOutput( "metrics_filtered", map[string]string{"output": name}, ), - BufferSize: selfstat.Register( - "write", - "buffer_size", - map[string]string{"output": name}, - ), - BufferLimit: selfstat.Register( - "write", - "buffer_limit", - map[string]string{"output": name}, - ), WriteTime: selfstat.RegisterTiming( "write", "write_time_ns", @@ -98,7 +88,6 @@ func NewRunningOutput( ), } - ro.BufferLimit.Set(int64(ro.MetricBufferLimit)) return ro } @@ -129,28 +118,16 @@ func (ro *RunningOutput) AddMetric(metric telegraf.Metric) { return } - ro.batchMutex.Lock() - - ro.batch = append(ro.batch, metric) - if len(ro.batch) == ro.MetricBatchSize { - ro.addBatchToBuffer() - - nBuffer := ro.buffer.Len() - ro.BufferSize.Set(int64(nBuffer)) + ro.buffer.Add(metric) + count := atomic.AddInt64(&ro.newMetricsCount, 1) + if count == int64(ro.MetricBatchSize) { + atomic.StoreInt64(&ro.newMetricsCount, 0) select { case ro.BatchReady <- time.Now(): default: } } - - ro.batchMutex.Unlock() -} - -// AddBatchToBuffer moves the metrics from the batch into the metric buffer. -func (ro *RunningOutput) addBatchToBuffer() { - ro.buffer.Add(ro.batch...) - ro.batch = ro.batch[:0] } // Write writes all metrics to the output, stopping when all have been sent on @@ -163,15 +140,12 @@ func (ro *RunningOutput) Write() error { output.Reset() ro.aggMutex.Unlock() } - // add and write can be called concurrently - ro.batchMutex.Lock() - ro.addBatchToBuffer() - ro.batchMutex.Unlock() - nBuffer := ro.buffer.Len() + atomic.StoreInt64(&ro.newMetricsCount, 0) // Only process the metrics in the buffer now. Metrics added while we are // writing will be sent on the next call. + nBuffer := ro.buffer.Len() nBatches := nBuffer/ro.MetricBatchSize + 1 for i := 0; i < nBatches; i++ { batch := ro.buffer.Batch(ro.MetricBatchSize) @@ -189,7 +163,7 @@ func (ro *RunningOutput) Write() error { return nil } -// WriteBatch writes only the batch metrics to the output. +// WriteBatch writes a single batch of metrics to the output. func (ro *RunningOutput) WriteBatch() error { batch := ro.buffer.Batch(ro.MetricBatchSize) if len(batch) == 0 { From 7284dfc5022d59dfb1ac59b77fc898f807b43b90 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 22 Jan 2019 13:45:24 -0800 Subject: [PATCH 0532/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index e0ee632b4..4a9efab31 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -49,6 +49,7 @@ - [#5285](https://github.com/influxdata/telegraf/issues/5285): Fix amqp_consumer stops consuming when it receives unparsable messages. - [#5281](https://github.com/influxdata/telegraf/issues/5281): Fix prometheus input not detecting added and removed pods. - [#5215](https://github.com/influxdata/telegraf/issues/5215): Remove userinfo from cluster tag in couchbase. +- [#5298](https://github.com/influxdata/telegraf/issues/5298): Fix internal_write buffer_size not reset on timed writes. ## v1.9.2 [2019-01-08] From 739aeeb2e0ae9bae0f46d9952c13e01c49f6a89c Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 22 Jan 2019 13:48:20 -0800 Subject: [PATCH 0533/1815] Note 1.9.3 release date --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4a9efab31..8f1f88183 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -40,7 +40,7 @@ - [#4610](https://github.com/influxdata/telegraf/pull/4610): Fix initscript removes pidfile of restarted Telegraf process. -## v1.9.3 [unreleased] +## v1.9.3 [2019-01-22] #### Bugfixes From 5b85569316f941d8102d2696defc9a9d5a303468 Mon Sep 17 00:00:00 2001 From: Greg <2653109+glinton@users.noreply.github.com> Date: Tue, 22 Jan 2019 15:05:20 -0700 Subject: [PATCH 0534/1815] Cleanup logs in kinesis output (#5328) --- plugins/outputs/kinesis/kinesis.go | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/plugins/outputs/kinesis/kinesis.go b/plugins/outputs/kinesis/kinesis.go index 93fc87a66..d2f52abcd 100644 --- a/plugins/outputs/kinesis/kinesis.go +++ b/plugins/outputs/kinesis/kinesis.go @@ -124,7 +124,7 @@ func (k *KinesisOutput) Connect() error { // We attempt first to create a session to Kinesis using an IAMS role, if that fails it will fall through to using // environment variables, and then Shared Credentials. if k.Debug { - log.Printf("E! kinesis: Establishing a connection to Kinesis in %+v", k.Region) + log.Printf("I! kinesis: Establishing a connection to Kinesis in %s", k.Region) } credentialConfig := &internalaws.CredentialConfig{ @@ -165,14 +165,14 @@ func writekinesis(k *KinesisOutput, r []*kinesis.PutRecordsRequestEntry) time.Du if k.Debug { resp, err := k.svc.PutRecords(payload) if err != nil { - log.Printf("E! kinesis: Unable to write to Kinesis : %+v \n", err.Error()) + log.Printf("E! kinesis: Unable to write to Kinesis : %s", err.Error()) } - log.Printf("E! %+v \n", resp) + log.Printf("I! Wrote: '%+v'", resp) } else { _, err := k.svc.PutRecords(payload) if err != nil { - log.Printf("E! kinesis: Unable to write to Kinesis : %+v \n", err.Error()) + log.Printf("E! kinesis: Unable to write to Kinesis : %s", err.Error()) } } return time.Since(start) @@ -197,7 +197,7 @@ func (k *KinesisOutput) getPartitionKey(metric telegraf.Metric) string { // Default partition name if default is not set return "telegraf" default: - log.Printf("E! kinesis : You have configured a Partition method of %+v which is not supported", k.Partition.Method) + log.Printf("E! kinesis : You have configured a Partition method of '%s' which is not supported", k.Partition.Method) } } if k.RandomPartitionKey { @@ -236,7 +236,7 @@ func (k *KinesisOutput) Write(metrics []telegraf.Metric) error { if sz == 500 { // Max Messages Per PutRecordRequest is 500 elapsed := writekinesis(k, r) - log.Printf("E! Wrote a %+v point batch to Kinesis in %+v.\n", sz, elapsed) + log.Printf("I! Wrote a %d point batch to Kinesis in %+v.", sz, elapsed) sz = 0 r = nil } @@ -244,7 +244,7 @@ func (k *KinesisOutput) Write(metrics []telegraf.Metric) error { } if sz > 0 { elapsed := writekinesis(k, r) - log.Printf("E! Wrote a %+v point batch to Kinesis in %+v.\n", sz, elapsed) + log.Printf("I! Wrote a %d point batch to Kinesis in %+v.", sz, elapsed) } return nil From 5acf2e6ed70b27710a956e7516eb6de689412850 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 22 Jan 2019 14:12:41 -0800 Subject: [PATCH 0535/1815] Update filecount docs --- plugins/inputs/filecount/README.md | 20 ++++++++------------ 1 file changed, 8 insertions(+), 12 deletions(-) diff --git a/plugins/inputs/filecount/README.md b/plugins/inputs/filecount/README.md index 260d18413..a6836ffc3 100644 --- a/plugins/inputs/filecount/README.md +++ b/plugins/inputs/filecount/README.md @@ -1,11 +1,10 @@ -# filecount Input Plugin +# Filecount Input Plugin Counts files in directories that match certain criteria. ### Configuration: ```toml -# Count files in a directory [[inputs.filecount]] ## Directory to gather stats about. ## deprecated in 1.9; use the directories option @@ -40,21 +39,18 @@ Counts files in directories that match certain criteria. mtime = "0s" ``` -### Measurements & Fields: +### Metrics - filecount - - count (int) - - size_bytes (int) - -### Tags: - -- All measurements have the following tags: + - tags: - directory (the directory path) + - fields: + - count (integer) + - size_bytes (integer) ### Example Output: ``` -$ telegraf --config /etc/telegraf/telegraf.conf --input-filter filecount --test -> filecount,directory=/var/cache/apt,host=czernobog count=7i,size=7438336i 1530034445000000000 -> filecount,directory=/tmp,host=czernobog count=17i,size=28934786i 1530034445000000000 +filecount,directory=/var/cache/apt count=7i,size_bytes=7438336i 1530034445000000000 +filecount,directory=/tmp count=17i,size_bytes=28934786i 1530034445000000000 ``` From b2f6fd685dc0109dc8c0efd7c34bd39c12aa2821 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 22 Jan 2019 15:29:46 -0800 Subject: [PATCH 0536/1815] Update link to InfluxDB v2.x --- plugins/outputs/influxdb_v2/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/outputs/influxdb_v2/README.md b/plugins/outputs/influxdb_v2/README.md index 5755e6b13..0c99691bd 100644 --- a/plugins/outputs/influxdb_v2/README.md +++ b/plugins/outputs/influxdb_v2/README.md @@ -50,4 +50,4 @@ The InfluxDB output plugin writes metrics to the [InfluxDB v2.x] HTTP service. # insecure_skip_verify = false ``` -[InfluxDB v2.x]: https://github.com/influxdata/platform +[InfluxDB v2.x]: https://github.com/influxdata/influxdb From efbc83c8b60241a2d99211549ba86233256e6fcc Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 22 Jan 2019 15:47:57 -0800 Subject: [PATCH 0537/1815] Document that sqlserver input requires SP3 --- plugins/inputs/sqlserver/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/inputs/sqlserver/README.md b/plugins/inputs/sqlserver/README.md index 33800ca2a..e83aca304 100644 --- a/plugins/inputs/sqlserver/README.md +++ b/plugins/inputs/sqlserver/README.md @@ -1,7 +1,7 @@ # SQL Server Input Plugin The `sqlserver` plugin provides metrics for your SQL Server instance. It -currently works with SQL Server versions 2008+. Recorded metrics are +currently works with SQL Server 2008 SP3 and newer. Recorded metrics are lightweight and use Dynamic Management Views supplied by SQL Server. ### Additional Setup: @@ -139,4 +139,4 @@ The following metrics can be used directly, with no delta calculations: - SQLServer:Workload Group Stats\Requests completed/sec Version 2 queries have the following tags: -- `sql_instance`: Physical host and instance name (hostname:instance) \ No newline at end of file +- `sql_instance`: Physical host and instance name (hostname:instance) From 99d36eb45333fe6967c632967d5b637f44ec9c34 Mon Sep 17 00:00:00 2001 From: Pierre Tessier Date: Wed, 23 Jan 2019 12:59:03 -0500 Subject: [PATCH 0538/1815] Clarify change in changelog (#5333) --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8f1f88183..4d0d38aa6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -25,7 +25,7 @@ - [#5116](https://github.com/influxdata/telegraf/pull/5116): Include DEVLINKS in available diskio udev properties. - [#5149](https://github.com/influxdata/telegraf/pull/5149): Add micro and nanosecond unix timestamp support to JSON parser. - [#5160](https://github.com/influxdata/telegraf/pull/5160): Add support for basic auth to couchdb input. -- [#5161](https://github.com/influxdata/telegraf/pull/5161): Add support in wavefront output for the Wavefront Direction Ingestion API. +- [#5161](https://github.com/influxdata/telegraf/pull/5161): Add support in wavefront output for the Wavefront Direct Ingestion API. - [#5168](https://github.com/influxdata/telegraf/pull/5168): Allow counting float values in valuecounter aggregator. - [#5177](https://github.com/influxdata/telegraf/pull/5177): Add log send and redo queue fields to sqlserver input. - [#5113](https://github.com/influxdata/telegraf/pull/5113): Improve scalability of vsphere input. From 458d3109c09f6b4f270d9a24440f413e002608b6 Mon Sep 17 00:00:00 2001 From: Greg <2653109+glinton@users.noreply.github.com> Date: Wed, 23 Jan 2019 12:37:24 -0700 Subject: [PATCH 0539/1815] Set skip rows and columns in csv parser (#5336) --- internal/config/config.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/config/config.go b/internal/config/config.go index 2a5f62708..a24781949 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -1591,7 +1591,7 @@ func getParserConfig(name string, tbl *ast.Table) (*parsers.Config, error) { if err != nil { return nil, err } - c.CSVHeaderRowCount = int(v) + c.CSVSkipRows = int(v) } } } @@ -1603,7 +1603,7 @@ func getParserConfig(name string, tbl *ast.Table) (*parsers.Config, error) { if err != nil { return nil, err } - c.CSVHeaderRowCount = int(v) + c.CSVSkipColumns = int(v) } } } From 39eff3d62ba0f7f621ea26ef79dbb3c37a6d82d6 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 23 Jan 2019 11:40:22 -0800 Subject: [PATCH 0540/1815] Update changelog --- CHANGELOG.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4d0d38aa6..3e0ff5f64 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -40,6 +40,12 @@ - [#4610](https://github.com/influxdata/telegraf/pull/4610): Fix initscript removes pidfile of restarted Telegraf process. +## v1.9.4 [unreleased] + +#### Bugfixes + +- [#5334](https://github.com/influxdata/telegraf/issues/5334): Fix skip_rows and skip_columns options in csv parser. + ## v1.9.3 [2019-01-22] #### Bugfixes From f739ce2a35d7e4db14a3684a1ce46f5b8907c647 Mon Sep 17 00:00:00 2001 From: Marcelo Almeida <11047924+marceloalmeida@users.noreply.github.com> Date: Wed, 23 Jan 2019 22:10:38 +0000 Subject: [PATCH 0541/1815] Use datacenter option spelling in consul input (#5320) --- etc/telegraf.conf | 4 ++-- plugins/inputs/consul/README.md | 4 ++-- plugins/inputs/consul/consul.go | 9 +++++++-- 3 files changed, 11 insertions(+), 6 deletions(-) diff --git a/etc/telegraf.conf b/etc/telegraf.conf index 161754f6f..c6f679952 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -1703,8 +1703,8 @@ # # username = "" # # password = "" # -# ## Data centre to query the health checks from -# # datacentre = "" +# ## Data center to query the health checks from +# # datacenter = "" # # ## Optional TLS Config # # tls_ca = "/etc/telegraf/ca.pem" diff --git a/plugins/inputs/consul/README.md b/plugins/inputs/consul/README.md index f7dcdf362..2b2368388 100644 --- a/plugins/inputs/consul/README.md +++ b/plugins/inputs/consul/README.md @@ -24,8 +24,8 @@ report those stats already using StatsD protocol if needed. # username = "" # password = "" - ## Data centre to query the health checks from - # datacentre = "" + ## Data center to query the health checks from + # datacenter = "" ## Optional TLS Config # tls_ca = "/etc/telegraf/ca.pem" diff --git a/plugins/inputs/consul/consul.go b/plugins/inputs/consul/consul.go index 8649184dd..a3543f406 100644 --- a/plugins/inputs/consul/consul.go +++ b/plugins/inputs/consul/consul.go @@ -17,6 +17,7 @@ type Consul struct { Username string Password string Datacentre string + Datacenter string tls.ClientConfig TagDelimiter string @@ -38,8 +39,8 @@ var sampleConfig = ` # username = "" # password = "" - ## Data centre to query the health checks from - # datacentre = "" + ## Data center to query the health checks from + # datacenter = "" ## Optional TLS Config # tls_ca = "/etc/telegraf/ca.pem" @@ -77,6 +78,10 @@ func (c *Consul) createAPIClient() (*api.Client, error) { config.Datacenter = c.Datacentre } + if c.Datacenter != "" { + config.Datacenter = c.Datacenter + } + if c.Token != "" { config.Token = c.Token } From 17460be771c59efd0543b67c989c3733083d07af Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 23 Jan 2019 14:12:19 -0800 Subject: [PATCH 0542/1815] List deprecation version for consul datacentre --- plugins/inputs/consul/consul.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/inputs/consul/consul.go b/plugins/inputs/consul/consul.go index a3543f406..4662b54b0 100644 --- a/plugins/inputs/consul/consul.go +++ b/plugins/inputs/consul/consul.go @@ -16,7 +16,7 @@ type Consul struct { Token string Username string Password string - Datacentre string + Datacentre string // deprecated in 1.10; use Datacenter Datacenter string tls.ClientConfig TagDelimiter string From aabb60dfec5352662329f5b2626152322653214a Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 23 Jan 2019 14:13:37 -0800 Subject: [PATCH 0543/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3e0ff5f64..c2ff3dbac 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -39,6 +39,7 @@ #### Bugfixes - [#4610](https://github.com/influxdata/telegraf/pull/4610): Fix initscript removes pidfile of restarted Telegraf process. +- [#5320](https://github.com/influxdata/telegraf/pull/5320): Use datacenter option spelling in consul input. ## v1.9.4 [unreleased] From d1610d50e623b5e134b817ffa9ddfdc8a07bcc51 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 23 Jan 2019 14:17:57 -0800 Subject: [PATCH 0544/1815] Always send basic auth in jenkins input (#5329) --- plugins/inputs/jenkins/README.md | 28 ++++++++++++++-------------- plugins/inputs/jenkins/client.go | 8 ++++---- plugins/inputs/jenkins/jenkins.go | 26 +++++++++++++------------- 3 files changed, 31 insertions(+), 31 deletions(-) diff --git a/plugins/inputs/jenkins/README.md b/plugins/inputs/jenkins/README.md index 16afcaa7c..8d375f087 100644 --- a/plugins/inputs/jenkins/README.md +++ b/plugins/inputs/jenkins/README.md @@ -9,44 +9,44 @@ This plugin does not require a plugin on jenkins and it makes use of Jenkins API ```toml ## The Jenkins URL url = "http://my-jenkins-instance:8080" - # username = "admin" - # password = "admin" + # username = "admin" + # password = "admin" ## Set response_timeout response_timeout = "5s" - ## Optional SSL Config - # ssl_ca = /path/to/cafile - # ssl_cert = /path/to/certfile - # ssl_key = /path/to/keyfile + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" ## Use SSL but skip chain & host verification - # insecure_skip_verify = false + # insecure_skip_verify = false ## Optional Max Job Build Age filter ## Default 1 hour, ignore builds older than max_build_age - # max_build_age = "1h" + # max_build_age = "1h" ## Optional Sub Job Depth filter ## Jenkins can have unlimited layer of sub jobs ## This config will limit the layers of pulling, default value 0 means ## unlimited pulling until no more sub jobs - # max_subjob_depth = 0 + # max_subjob_depth = 0 ## Optional Sub Job Per Layer ## In workflow-multibranch-plugin, each branch will be created as a sub job. ## This config will limit to call only the lasted branches in each layer, ## empty will use default value 10 - # max_subjob_per_layer = 10 + # max_subjob_per_layer = 10 ## Jobs to exclude from gathering - # job_exclude = [ "job1", "job2/subjob1/subjob2", "job3/*"] + # job_exclude = [ "job1", "job2/subjob1/subjob2", "job3/*"] ## Nodes to exclude from gathering - # node_exclude = [ "node1", "node2" ] + # node_exclude = [ "node1", "node2" ] ## Worker pool for jenkins plugin only ## Empty this field will use default value 5 - # max_connections = 5 + # max_connections = 5 ``` ### Metrics: @@ -93,4 +93,4 @@ $ ./telegraf --config telegraf.conf --input-filter jenkins --test jenkins_node,arch=Linux\ (amd64),disk_path=/var/jenkins_home,temp_path=/tmp,host=myhost,node_name=master swap_total=4294963200,memory_available=586711040,memory_total=6089498624,status=online,response_time=1000i,disk_available=152392036352,temp_available=152392036352,swap_available=3503263744 1516031535000000000 jenkins_job,host=myhost,name=JOB1,parents=apps/br1,result=SUCCESS duration=2831i,result_code=0i 1516026630000000000 jenkins_job,host=myhost,name=JOB2,parents=apps/br2,result=SUCCESS duration=2285i,result_code=0i 1516027230000000000 -``` \ No newline at end of file +``` diff --git a/plugins/inputs/jenkins/client.go b/plugins/inputs/jenkins/client.go index 284b5eccf..6c0a125aa 100644 --- a/plugins/inputs/jenkins/client.go +++ b/plugins/inputs/jenkins/client.go @@ -33,8 +33,7 @@ func (c *client) init() error { if err != nil { return err } - if c.username != "" && c.password != "" { - // set auth + if c.username != "" || c.password != "" { req.SetBasicAuth(c.username, c.password) } resp, err := c.httpClient.Do(req) @@ -123,10 +122,11 @@ func createGetRequest(url string, username, password string, sessionCookie *http if err != nil { return nil, err } + if username != "" || password != "" { + req.SetBasicAuth(username, password) + } if sessionCookie != nil { req.AddCookie(sessionCookie) - } else if username != "" && password != "" { - req.SetBasicAuth(username, password) } req.Header.Add("Accept", "application/json") return req, nil diff --git a/plugins/inputs/jenkins/jenkins.go b/plugins/inputs/jenkins/jenkins.go index 8bb06052a..cfa0a38e4 100644 --- a/plugins/inputs/jenkins/jenkins.go +++ b/plugins/inputs/jenkins/jenkins.go @@ -45,44 +45,44 @@ type Jenkins struct { const sampleConfig = ` ## The Jenkins URL url = "http://my-jenkins-instance:8080" - # username = "admin" - # password = "admin" + # username = "admin" + # password = "admin" ## Set response_timeout response_timeout = "5s" - ## Optional SSL Config - # ssl_ca = /path/to/cafile - # ssl_cert = /path/to/certfile - # ssl_key = /path/to/keyfile + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" ## Use SSL but skip chain & host verification - # insecure_skip_verify = false + # insecure_skip_verify = false ## Optional Max Job Build Age filter ## Default 1 hour, ignore builds older than max_build_age - # max_build_age = "1h" + # max_build_age = "1h" ## Optional Sub Job Depth filter ## Jenkins can have unlimited layer of sub jobs ## This config will limit the layers of pulling, default value 0 means ## unlimited pulling until no more sub jobs - # max_subjob_depth = 0 + # max_subjob_depth = 0 ## Optional Sub Job Per Layer ## In workflow-multibranch-plugin, each branch will be created as a sub job. ## This config will limit to call only the lasted branches in each layer, ## empty will use default value 10 - # max_subjob_per_layer = 10 + # max_subjob_per_layer = 10 ## Jobs to exclude from gathering - # job_exclude = [ "job1", "job2/subjob1/subjob2", "job3/*"] + # job_exclude = [ "job1", "job2/subjob1/subjob2", "job3/*"] ## Nodes to exclude from gathering - # node_exclude = [ "node1", "node2" ] + # node_exclude = [ "node1", "node2" ] ## Worker pool for jenkins plugin only ## Empty this field will use default value 5 - # max_connections = 5 + # max_connections = 5 ` // measurement From 3de473721dee1e0318f4cbf80ddffcb11d80222b Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 23 Jan 2019 14:19:08 -0800 Subject: [PATCH 0545/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index c2ff3dbac..125326adf 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -46,6 +46,7 @@ #### Bugfixes - [#5334](https://github.com/influxdata/telegraf/issues/5334): Fix skip_rows and skip_columns options in csv parser. +- [#5181](https://github.com/influxdata/telegraf/issues/5181): Always send basic auth in jenkins input. ## v1.9.3 [2019-01-22] From 7a031c48cd1b30cf5f65a30e7e16fdf817217954 Mon Sep 17 00:00:00 2001 From: Martin Pittermann Date: Thu, 24 Jan 2019 01:23:58 +0100 Subject: [PATCH 0546/1815] Add multifile input plugin (#5256) --- plugins/inputs/all/all.go | 1 + plugins/inputs/multifile/README.md | 59 ++++++++ plugins/inputs/multifile/multifile.go | 149 +++++++++++++++++++ plugins/inputs/multifile/multifile_test.go | 76 ++++++++++ plugins/inputs/multifile/testdata/bool.txt | 1 + plugins/inputs/multifile/testdata/float.txt | 1 + plugins/inputs/multifile/testdata/int.txt | 1 + plugins/inputs/multifile/testdata/string.txt | 1 + plugins/inputs/multifile/testdata/tag.txt | 1 + 9 files changed, 290 insertions(+) create mode 100644 plugins/inputs/multifile/README.md create mode 100644 plugins/inputs/multifile/multifile.go create mode 100644 plugins/inputs/multifile/multifile_test.go create mode 100644 plugins/inputs/multifile/testdata/bool.txt create mode 100644 plugins/inputs/multifile/testdata/float.txt create mode 100644 plugins/inputs/multifile/testdata/int.txt create mode 100644 plugins/inputs/multifile/testdata/string.txt create mode 100644 plugins/inputs/multifile/testdata/tag.txt diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index 106c0118c..0a69ac21d 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -76,6 +76,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/minecraft" _ "github.com/influxdata/telegraf/plugins/inputs/mongodb" _ "github.com/influxdata/telegraf/plugins/inputs/mqtt_consumer" + _ "github.com/influxdata/telegraf/plugins/inputs/multifile" _ "github.com/influxdata/telegraf/plugins/inputs/mysql" _ "github.com/influxdata/telegraf/plugins/inputs/nats" _ "github.com/influxdata/telegraf/plugins/inputs/nats_consumer" diff --git a/plugins/inputs/multifile/README.md b/plugins/inputs/multifile/README.md new file mode 100644 index 000000000..2178f0dbd --- /dev/null +++ b/plugins/inputs/multifile/README.md @@ -0,0 +1,59 @@ +# Multifile Input Plugin + +### Description +The multifile input plugin allows telegraf to gather data from multiple files into a single point, creating one field or tag per file. + +### Configuration +``` +[[inputs.multifile]] + ## Base directory where telegraf will look for files. + ## Omit this option to use absolute paths. + base_dir = "/sys/bus/i2c/devices/1-0076/iio:device0" + + ## If true, Telegraf discard all data when a single file can't be read. + ## Else, Telegraf omits the field generated from this file. + # fail_early = true + + ## Files to parse each interval. + [[inputs.multifile.file]] + file = "in_pressure_input" + dest = "pressure" + conversion = "float" + [[inputs.multifile.file]] + file = "in_temp_input" + dest = "temperature" + conversion = "float(3)" + [[inputs.multifile.file]] + file = "in_humidityrelative_input" + dest = "humidityrelative" + conversion = "float(3)" +``` +* `file.file`: +Path of the file to be parsed +* `file.dest`: +Name of the field/tag created, defaults to `$(basename file)` +* `file.conversion`: +Data format used to parse the file contents + * `float(X)`: Converts the input value into a float and divides by the Xth power of 10. Efficively just moves the decimal left X places. For example a value of `123` with `float(2)` will result in `1.23`. + * `float`: Converts the value into a float with no adjustment. Same as `float(0)`. + * `int`: Convertes the value into an integer. + * `string`, `""`: No conversion + * `bool`: Convertes the value into a boolean + * `tag`: File content is used as a tag + +### Example Output +This example shows a BME280 connected to a Raspberry Pi, using the sample config. +``` +multifile pressure=101.343285156,temperature=20.4,humidityrelative=48.9 1547202076000000000 +``` + +To reproduce this, connect a BMP280 to the board's GPIO pins and register the BME280 device driver +``` +cd /sys/bus/i2c/devices/i2c-1 +echo bme280 0x76 > new_device +``` + +The kernel driver provides the following files in `/sys/bus/i2c/devices/1-0076/iio:device0`: +* `in_humidityrelative_input`: `48900` +* `in_pressure_input`: `101.343285156` +* `in_temp_input`: `20400` diff --git a/plugins/inputs/multifile/multifile.go b/plugins/inputs/multifile/multifile.go new file mode 100644 index 000000000..9c9813d9a --- /dev/null +++ b/plugins/inputs/multifile/multifile.go @@ -0,0 +1,149 @@ +package multifile + +import ( + "bytes" + "errors" + "fmt" + "io/ioutil" + "math" + "path" + "strconv" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" +) + +type MultiFile struct { + BaseDir string + FailEarly bool + Files []File `toml:"file"` + + initialized bool +} + +type File struct { + Name string `toml:"file"` + Dest string + Conversion string +} + +const sampleConfig = ` + ## Base directory where telegraf will look for files. + ## Omit this option to use absolute paths. + base_dir = "/sys/bus/i2c/devices/1-0076/iio:device0" + + ## If true, Telegraf discard all data when a single file can't be read. + ## Else, Telegraf omits the field generated from this file. + # fail_early = true + + ## Files to parse each interval. + [[inputs.multifile.file]] + file = "in_pressure_input" + dest = "pressure" + conversion = "float" + [[inputs.multifile.file]] + file = "in_temp_input" + dest = "temperature" + conversion = "float(3)" + [[inputs.multifile.file]] + file = "in_humidityrelative_input" + dest = "humidityrelative" + conversion = "float(3)" +` + +// SampleConfig returns the default configuration of the Input +func (m *MultiFile) SampleConfig() string { + return sampleConfig +} + +func (m *MultiFile) Description() string { + return "Aggregates the contents of multiple files into a single point" +} + +func (m *MultiFile) init() { + if m.initialized { + return + } + + for i, file := range m.Files { + if m.BaseDir != "" { + m.Files[i].Name = path.Join(m.BaseDir, file.Name) + } + if file.Dest == "" { + m.Files[i].Dest = path.Base(file.Name) + } + } + + m.initialized = true +} + +func (m *MultiFile) Gather(acc telegraf.Accumulator) error { + m.init() + now := time.Now() + fields := make(map[string]interface{}) + tags := make(map[string]string) + + for _, file := range m.Files { + fileContents, err := ioutil.ReadFile(file.Name) + + if err != nil { + if m.FailEarly { + return err + } + continue + } + + vStr := string(bytes.TrimSpace(bytes.Trim(fileContents, "\x00"))) + + if file.Conversion == "tag" { + tags[file.Dest] = vStr + continue + } + + var value interface{} + + var d int = 0 + if _, errfmt := fmt.Sscanf(file.Conversion, "float(%d)", &d); errfmt == nil || file.Conversion == "float" { + var v float64 + v, err = strconv.ParseFloat(vStr, 64) + value = v / math.Pow10(d) + } + + if file.Conversion == "int" { + value, err = strconv.ParseInt(vStr, 10, 64) + } + + if file.Conversion == "string" || file.Conversion == "" { + value = vStr + } + + if file.Conversion == "bool" { + value, err = strconv.ParseBool(vStr) + } + + if err != nil { + if m.FailEarly { + return err + } + continue + } + + if value == nil { + return errors.New(fmt.Sprintf("invalid conversion %v", file.Conversion)) + } + + fields[file.Dest] = value + } + + acc.AddGauge("multifile", fields, tags, now) + return nil +} + +func init() { + inputs.Add("multifile", func() telegraf.Input { + return &MultiFile{ + FailEarly: true, + } + }) +} diff --git a/plugins/inputs/multifile/multifile_test.go b/plugins/inputs/multifile/multifile_test.go new file mode 100644 index 000000000..b12f29f35 --- /dev/null +++ b/plugins/inputs/multifile/multifile_test.go @@ -0,0 +1,76 @@ +package multifile + +import ( + "os" + "path" + "testing" + + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestFileTypes(t *testing.T) { + wd, _ := os.Getwd() + + m := MultiFile{ + BaseDir: path.Join(wd, `testdata`), + FailEarly: true, + Files: []File{ + {Name: `bool.txt`, Dest: `examplebool`, Conversion: `bool`}, + {Name: `float.txt`, Dest: `examplefloat`, Conversion: `float`}, + {Name: `int.txt`, Dest: `examplefloatX`, Conversion: `float(3)`}, + {Name: `int.txt`, Dest: `exampleint`, Conversion: `int`}, + {Name: `string.txt`, Dest: `examplestring`}, + {Name: `tag.txt`, Dest: `exampletag`, Conversion: `tag`}, + {Name: `int.txt`, Conversion: `int`}, + }, + } + + var acc testutil.Accumulator + + err := m.Gather(&acc) + + require.NoError(t, err) + assert.Equal(t, map[string]string{"exampletag": "test"}, acc.Metrics[0].Tags) + assert.Equal(t, map[string]interface{}{ + "examplebool": true, + "examplestring": "hello world", + "exampleint": int64(123456), + "int.txt": int64(123456), + "examplefloat": 123.456, + "examplefloatX": 123.456, + }, acc.Metrics[0].Fields) +} + +func FailEarly(failEarly bool, t *testing.T) error { + wd, _ := os.Getwd() + + m := MultiFile{ + BaseDir: path.Join(wd, `testdata`), + FailEarly: failEarly, + Files: []File{ + {Name: `int.txt`, Dest: `exampleint`, Conversion: `int`}, + {Name: `int.txt`, Dest: `exampleerror`, Conversion: `bool`}, + }, + } + + var acc testutil.Accumulator + + err := m.Gather(&acc) + + if err == nil { + assert.Equal(t, map[string]interface{}{ + "exampleint": int64(123456), + }, acc.Metrics[0].Fields) + } + + return err +} + +func TestFailEarly(t *testing.T) { + err := FailEarly(false, t) + require.NoError(t, err) + err = FailEarly(true, t) + require.Error(t, err) +} diff --git a/plugins/inputs/multifile/testdata/bool.txt b/plugins/inputs/multifile/testdata/bool.txt new file mode 100644 index 000000000..27ba77dda --- /dev/null +++ b/plugins/inputs/multifile/testdata/bool.txt @@ -0,0 +1 @@ +true diff --git a/plugins/inputs/multifile/testdata/float.txt b/plugins/inputs/multifile/testdata/float.txt new file mode 100644 index 000000000..d5910a0a6 --- /dev/null +++ b/plugins/inputs/multifile/testdata/float.txt @@ -0,0 +1 @@ +123.456 diff --git a/plugins/inputs/multifile/testdata/int.txt b/plugins/inputs/multifile/testdata/int.txt new file mode 100644 index 000000000..9f358a4ad --- /dev/null +++ b/plugins/inputs/multifile/testdata/int.txt @@ -0,0 +1 @@ +123456 diff --git a/plugins/inputs/multifile/testdata/string.txt b/plugins/inputs/multifile/testdata/string.txt new file mode 100644 index 000000000..9409bd50f --- /dev/null +++ b/plugins/inputs/multifile/testdata/string.txt @@ -0,0 +1 @@ + hello world diff --git a/plugins/inputs/multifile/testdata/tag.txt b/plugins/inputs/multifile/testdata/tag.txt new file mode 100644 index 000000000..9daeafb98 --- /dev/null +++ b/plugins/inputs/multifile/testdata/tag.txt @@ -0,0 +1 @@ +test From 35d18d3fd725a9fbe2b51b1f15dcd847d3d5c2b4 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 23 Jan 2019 17:26:50 -0800 Subject: [PATCH 0547/1815] Update multifile plugin docs --- CHANGELOG.md | 1 + README.md | 1 + plugins/inputs/multifile/README.md | 35 +++++++++++++++++++----------- 3 files changed, 24 insertions(+), 13 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 125326adf..fa85eda7a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,7 @@ - [cloud_pubsub](/plugins/inputs/cloud_pubsub/README.md) - Contributed by @emilymye - [neptune_apex](/plugins/inputs/neptune_apex/README.md) - Contributed by @MaxRenaud - [nginx_upstream_check](/plugins/inputs/nginx_upstream_check/README.md) - Contributed by @dmitryilyin +- [multifile](/plugins/inputs/multifile/README.md) - Contributed by @martin2250 #### New Outputs diff --git a/README.md b/README.md index 2177be4cb..74dbc8ae5 100644 --- a/README.md +++ b/README.md @@ -206,6 +206,7 @@ For documentation on the latest development code see the [documentation index][d * [minecraft](./plugins/inputs/minecraft) * [mongodb](./plugins/inputs/mongodb) * [mqtt_consumer](./plugins/inputs/mqtt_consumer) +* [multifile](./plugins/inputs/multifile) * [mysql](./plugins/inputs/mysql) * [nats_consumer](./plugins/inputs/nats_consumer) * [nats](./plugins/inputs/nats) diff --git a/plugins/inputs/multifile/README.md b/plugins/inputs/multifile/README.md index 2178f0dbd..558d4e442 100644 --- a/plugins/inputs/multifile/README.md +++ b/plugins/inputs/multifile/README.md @@ -1,16 +1,20 @@ # Multifile Input Plugin -### Description -The multifile input plugin allows telegraf to gather data from multiple files into a single point, creating one field or tag per file. +The multifile input plugin allows Telegraf to combine data from multiple files +into a single metric, creating one field or tag per file. This is often +useful creating custom metrics from the `/sys` or `/proc` filesystems. + +> Note: If you wish to parse metrics from a single file formatted in one of the supported +> [input data formats][], you should use the [file][] input plugin instead. ### Configuration -``` +```toml [[inputs.multifile]] ## Base directory where telegraf will look for files. ## Omit this option to use absolute paths. base_dir = "/sys/bus/i2c/devices/1-0076/iio:device0" - ## If true, Telegraf discard all data when a single file can't be read. + ## If true discard all data when a single file can't be read. ## Else, Telegraf omits the field generated from this file. # fail_early = true @@ -28,18 +32,20 @@ The multifile input plugin allows telegraf to gather data from multiple files in dest = "humidityrelative" conversion = "float(3)" ``` -* `file.file`: -Path of the file to be parsed -* `file.dest`: -Name of the field/tag created, defaults to `$(basename file)` -* `file.conversion`: -Data format used to parse the file contents + +Each file table can contain the following options: +* `file`: +Path of the file to be parsed, relative to the `base_dir`. +* `dest`: +Name of the field/tag key, defaults to `$(basename file)`. +* `conversion`: +Data format used to parse the file contents: * `float(X)`: Converts the input value into a float and divides by the Xth power of 10. Efficively just moves the decimal left X places. For example a value of `123` with `float(2)` will result in `1.23`. * `float`: Converts the value into a float with no adjustment. Same as `float(0)`. * `int`: Convertes the value into an integer. - * `string`, `""`: No conversion - * `bool`: Convertes the value into a boolean - * `tag`: File content is used as a tag + * `string`, `""`: No conversion. + * `bool`: Convertes the value into a boolean. + * `tag`: File content is used as a tag. ### Example Output This example shows a BME280 connected to a Raspberry Pi, using the sample config. @@ -57,3 +63,6 @@ The kernel driver provides the following files in `/sys/bus/i2c/devices/1-0076/i * `in_humidityrelative_input`: `48900` * `in_pressure_input`: `101.343285156` * `in_temp_input`: `20400` + +[input data formats]: /docs/DATA_FORMATS_INPUT.md +[file]: /plugins/inputs/file/README.md From 7a229e25a6f43a1d6fe75b0a2fda351446aeb34b Mon Sep 17 00:00:00 2001 From: vignemail1 Date: Thu, 24 Jan 2019 19:54:25 +0100 Subject: [PATCH 0548/1815] Update sudo config recommendation (#5337) --- plugins/inputs/fail2ban/README.md | 17 +++++++++++++---- plugins/inputs/ipset/README.md | 15 ++++++++++++--- plugins/inputs/iptables/README.md | 16 +++++++++++++--- plugins/inputs/opensmtpd/README.md | 4 +++- plugins/inputs/smart/README.md | 21 +++++++++++++++++++++ plugins/inputs/unbound/README.md | 4 +++- plugins/inputs/varnish/README.md | 4 +++- 7 files changed, 68 insertions(+), 13 deletions(-) diff --git a/plugins/inputs/fail2ban/README.md b/plugins/inputs/fail2ban/README.md index b0f6666bb..0b0e65414 100644 --- a/plugins/inputs/fail2ban/README.md +++ b/plugins/inputs/fail2ban/README.md @@ -10,15 +10,24 @@ Acquiring the required permissions can be done using several methods: ### Using sudo -You may edit your sudo configuration with the following: +You will need the following in your telegraf config: +```toml +[[inputs.fail2ban]] + use_sudo = true +``` -``` sudo -telegraf ALL=(root) NOEXEC: NOPASSWD: /usr/bin/fail2ban-client status, /usr/bin/fail2ban-client status * +You will also need to update your sudoers file: +```bash +$ visudo +# Add the following line: +Cmnd_Alias FAIL2BAN = /usr/bin/fail2ban-client status, /usr/bin/fail2ban-client status * +telegraf ALL=(root) NOEXEC: NOPASSWD: FAIL2BAN +Defaults!FAIL2BAN !logfile, !syslog, !pam_session ``` ### Configuration: -``` toml +```toml # Read metrics from fail2ban. [[inputs.fail2ban]] ## Use sudo to run fail2ban-client diff --git a/plugins/inputs/ipset/README.md b/plugins/inputs/ipset/README.md index 2209de911..ae66ccfc0 100644 --- a/plugins/inputs/ipset/README.md +++ b/plugins/inputs/ipset/README.md @@ -25,10 +25,19 @@ AmbientCapabilities=CAP_NET_RAW CAP_NET_ADMIN ### Using sudo -You may edit your sudo configuration with the following: +You will need the following in your telegraf config: +```toml +[[inputs.ipset]] + use_sudo = true +``` -```sudo -telegraf ALL=(root) NOPASSWD: /sbin/ipset save +You will also need to update your sudoers file: +```bash +$ visudo +# Add the following line: +Cmnd_Alias IPSETSAVE = /sbin/ipset save +telegraf ALL=(root) NOPASSWD: IPSETSAVE +Defaults!IPSETSAVE !logfile, !syslog, !pam_session ``` ### Configuration diff --git a/plugins/inputs/iptables/README.md b/plugins/inputs/iptables/README.md index 03bf784e6..6b56febba 100644 --- a/plugins/inputs/iptables/README.md +++ b/plugins/inputs/iptables/README.md @@ -28,10 +28,20 @@ Since telegraf will fork a process to run iptables, `AmbientCapabilities` is req ### Using sudo -You may edit your sudo configuration with the following: +You will need the following in your telegraf config: +```toml +[[inputs.iptables]] + use_sudo = true +``` -```sudo -telegraf ALL=(root) NOPASSWD: /usr/bin/iptables -nvL * +You will also need to update your sudoers file: + +```bash +$ visudo +# Add the following line: +Cmnd_Alias IPTABLESSHOW = /usr/bin/iptables -nvL * +telegraf ALL=(root) NOPASSWD: IPTABLESSHOW +Defaults!IPTABLESSHOW !logfile, !syslog, !pam_session ``` ### Using IPtables lock feature diff --git a/plugins/inputs/opensmtpd/README.md b/plugins/inputs/opensmtpd/README.md index ba73ed024..4c1949869 100644 --- a/plugins/inputs/opensmtpd/README.md +++ b/plugins/inputs/opensmtpd/README.md @@ -86,7 +86,9 @@ You will also need to update your sudoers file: ```bash $ visudo # Add the following line: -telegraf ALL=(ALL) NOPASSWD: /usr/sbin/smtpctl +Cmnd_Alias SMTPCTL = /usr/sbin/smtpctl +telegraf ALL=(ALL) NOPASSWD: SMTPCTL +Defaults!SMTPCTL !logfile, !syslog, !pam_session ``` Please use the solution you see as most appropriate. diff --git a/plugins/inputs/smart/README.md b/plugins/inputs/smart/README.md index 4826edbc6..c60e11e35 100644 --- a/plugins/inputs/smart/README.md +++ b/plugins/inputs/smart/README.md @@ -61,6 +61,27 @@ smartctl -s on # devices = [ "/dev/ada0 -d atacam" ] ``` +### Permissions: + +It's important to note that this plugin references smartctl, which may require additional permissions to execute successfully. +Depending on the user/group permissions of the telegraf user executing this plugin, you may need to use sudo. + + +You will need the following in your telegraf config: +```toml +[[inputs.smart]] + use_sudo = true +``` + +You will also need to update your sudoers file: +```bash +$ visudo +# Add the following line: +Cmnd_Alias SMARTCTL = /usr/bin/smartctl +telegraf ALL=(ALL) NOPASSWD: SMARTCTL +Defaults!SMARTCTL !logfile, !syslog, !pam_session +``` + ### Metrics: - smart_device: diff --git a/plugins/inputs/unbound/README.md b/plugins/inputs/unbound/README.md index 2163bd375..36c9aa47d 100644 --- a/plugins/inputs/unbound/README.md +++ b/plugins/inputs/unbound/README.md @@ -56,7 +56,9 @@ You will also need to update your sudoers file: ```bash $ visudo # Add the following line: -telegraf ALL=(ALL) NOPASSWD: /usr/sbin/unbound-control +Cmnd_Alias UNBOUNDCTL = /usr/sbin/unbound-control +telegraf ALL=(ALL) NOPASSWD: UNBOUNDCTL +Defaults!UNBOUNDCTL !logfile, !syslog, !pam_session ``` Please use the solution you see as most appropriate. diff --git a/plugins/inputs/varnish/README.md b/plugins/inputs/varnish/README.md index 380d1c75b..3609b12e7 100644 --- a/plugins/inputs/varnish/README.md +++ b/plugins/inputs/varnish/README.md @@ -391,7 +391,9 @@ You will also need to update your sudoers file: ```bash $ visudo # Add the following line: -telegraf ALL=(ALL) NOPASSWD: /usr/bin/varnishstat +Cmnd_Alias VARNISHSTAT = /usr/bin/varnishstat +telegraf ALL=(ALL) NOPASSWD: VARNISHSTAT +Defaults!VARNISHSTAT !logfile, !syslog, !pam_session ``` Please use the solution you see as most appropriate. From 4ca0a04df183781260d936b95ed8ced3d626c463 Mon Sep 17 00:00:00 2001 From: Jeff Ashton Date: Fri, 25 Jan 2019 15:57:35 -0500 Subject: [PATCH 0549/1815] Removing authentication from the /ping route to match influxdb (#5316) --- plugins/inputs/influxdb_listener/http_listener.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/plugins/inputs/influxdb_listener/http_listener.go b/plugins/inputs/influxdb_listener/http_listener.go index 29f055b53..1de764bfd 100644 --- a/plugins/inputs/influxdb_listener/http_listener.go +++ b/plugins/inputs/influxdb_listener/http_listener.go @@ -229,9 +229,7 @@ func (h *HTTPListener) ServeHTTP(res http.ResponseWriter, req *http.Request) { h.PingsRecv.Incr(1) defer h.PingsServed.Incr(1) // respond to ping requests - h.AuthenticateIfSet(func(res http.ResponseWriter, req *http.Request) { - res.WriteHeader(http.StatusNoContent) - }, res, req) + res.WriteHeader(http.StatusNoContent) default: defer h.NotFoundsServed.Incr(1) // Don't know how to respond to calls to other endpoints From 7d64620440dac21192c0b5dea2f590b57b7da103 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 25 Jan 2019 12:59:48 -0800 Subject: [PATCH 0550/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index fa85eda7a..1566c60d7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -41,6 +41,7 @@ - [#4610](https://github.com/influxdata/telegraf/pull/4610): Fix initscript removes pidfile of restarted Telegraf process. - [#5320](https://github.com/influxdata/telegraf/pull/5320): Use datacenter option spelling in consul input. +- [#5316](https://github.com/influxdata/telegraf/pull/5316): Remove auth from /ping route in influxdb_listener. ## v1.9.4 [unreleased] From a15305385f0af89319262e1ee10039639ebe6390 Mon Sep 17 00:00:00 2001 From: Frank Reno Date: Fri, 25 Jan 2019 19:06:08 -0700 Subject: [PATCH 0551/1815] Add carbon2 serializer (#5345) --- README.md | 1 + docs/DATA_FORMATS_OUTPUT.md | 1 + plugins/serializers/carbon2/README.md | 49 +++++++ plugins/serializers/carbon2/carbon2.go | 67 ++++++++++ plugins/serializers/carbon2/carbon2_test.go | 138 ++++++++++++++++++++ plugins/serializers/registry.go | 7 + 6 files changed, 263 insertions(+) create mode 100644 plugins/serializers/carbon2/README.md create mode 100644 plugins/serializers/carbon2/carbon2.go create mode 100644 plugins/serializers/carbon2/carbon2_test.go diff --git a/README.md b/README.md index 74dbc8ae5..cf994dc20 100644 --- a/README.md +++ b/README.md @@ -303,6 +303,7 @@ For documentation on the latest development code see the [documentation index][d - [Graphite](/plugins/serializers/graphite) - [ServiceNow](/plugins/serializers/nowmetric) - [SplunkMetric](/plugins/serializers/splunkmetric) +- [Carbon2](/plugins/serializers/carbon2) ## Processor Plugins diff --git a/docs/DATA_FORMATS_OUTPUT.md b/docs/DATA_FORMATS_OUTPUT.md index c06ab4719..3ee16524d 100644 --- a/docs/DATA_FORMATS_OUTPUT.md +++ b/docs/DATA_FORMATS_OUTPUT.md @@ -8,6 +8,7 @@ plugins. 1. [JSON](/plugins/serializers/json) 1. [Graphite](/plugins/serializers/graphite) 1. [SplunkMetric](/plugins/serializers/splunkmetric) +1. [Carbon2](/plugins/serializers/carbon2) You will be able to identify the plugins with support by the presence of a `data_format` config option, for example, in the `file` output plugin: diff --git a/plugins/serializers/carbon2/README.md b/plugins/serializers/carbon2/README.md new file mode 100644 index 000000000..e88b18cf0 --- /dev/null +++ b/plugins/serializers/carbon2/README.md @@ -0,0 +1,49 @@ +# Carbon2 + +The `carbon2` serializer translates the Telegraf metric format to the [Carbon2 format](http://metrics20.org/implementations/). + +### Configuration + +```toml +[[outputs.file]] + ## Files to write to, "stdout" is a specially handled file. + files = ["stdout", "/tmp/metrics.out"] + + ## Data format to output. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md + data_format = "carbon2" +``` + +Standard form: +``` +metric=name field=field_1 host=foo 30 1234567890 +metric=name field=field_2 host=foo 4 1234567890 +metric=name field=field_N host=foo 59 1234567890 +``` + +### Metrics + +The serializer converts the metrics by creating `intrinsic_tags` using the combination of metric name and fields. So, if one Telegraf metric has 4 fields, the `carbon2` output will be 4 separate metrics. There will be a `metric` tag that represents the name of the metric and a `field` tag to represent the field. + +### Example + +If we take the following InfluxDB Line Protocol: + +``` +weather,location=us-midwest,season=summer temperature=82,wind=100 1234567890 +``` + +after serializing in Carbon2, the result would be: + +``` +metric=weather field=temperature location=us-midwest season=summer 82 1234567890 +metric=weather field=wind location=us-midwest season=summer 100 1234567890 +``` + +### Fields and Tags with spaces +When a field key or tag key/value have spaces, spaces will be replaced with `_`. + +### Tags with empty values +When a tag's value is empty, it will be replaced with `null` diff --git a/plugins/serializers/carbon2/carbon2.go b/plugins/serializers/carbon2/carbon2.go new file mode 100644 index 000000000..fc11de062 --- /dev/null +++ b/plugins/serializers/carbon2/carbon2.go @@ -0,0 +1,67 @@ +package carbon2 + +import ( + "bytes" + "fmt" + "github.com/influxdata/telegraf" + "strconv" + "strings" +) + +type serializer struct { +} + +func NewSerializer() (*serializer, error) { + s := &serializer{} + return s, nil +} + +func (s *serializer) Serialize(metric telegraf.Metric) ([]byte, error) { + return s.createObject(metric), nil +} + +func (s *serializer) SerializeBatch(metrics []telegraf.Metric) ([]byte, error) { + var batch bytes.Buffer + for _, metric := range metrics { + batch.Write(s.createObject(metric)) + } + return batch.Bytes(), nil +} + +func (s *serializer) createObject(metric telegraf.Metric) []byte { + var m bytes.Buffer + for fieldName, fieldValue := range metric.Fields() { + if isNumeric(fieldValue) { + m.WriteString("metric=") + m.WriteString(strings.Replace(metric.Name(), " ", "_", -1)) + m.WriteString(" field=") + m.WriteString(strings.Replace(fieldName, " ", "_", -1)) + m.WriteString(" ") + for _, tag := range metric.TagList() { + m.WriteString(strings.Replace(tag.Key, " ", "_", -1)) + m.WriteString("=") + value := tag.Value + if len(value) == 0 { + value = "null" + } + m.WriteString(strings.Replace(value, " ", "_", -1)) + m.WriteString(" ") + } + m.WriteString(" ") + m.WriteString(fmt.Sprintf("%v", fieldValue)) + m.WriteString(" ") + m.WriteString(strconv.FormatInt(metric.Time().Unix(), 10)) + m.WriteString("\n") + } + } + return m.Bytes() +} + +func isNumeric(v interface{}) bool { + switch v.(type) { + case string: + return false + default: + return true + } +} diff --git a/plugins/serializers/carbon2/carbon2_test.go b/plugins/serializers/carbon2/carbon2_test.go new file mode 100644 index 000000000..f335342d5 --- /dev/null +++ b/plugins/serializers/carbon2/carbon2_test.go @@ -0,0 +1,138 @@ +package carbon2 + +import ( + "fmt" + "github.com/stretchr/testify/require" + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" + "github.com/stretchr/testify/assert" +) + +func MustMetric(v telegraf.Metric, err error) telegraf.Metric { + if err != nil { + panic(err) + } + return v +} + +func TestSerializeMetricFloat(t *testing.T) { + now := time.Now() + tags := map[string]string{ + "cpu": "cpu0", + } + fields := map[string]interface{}{ + "usage_idle": float64(91.5), + } + m, err := metric.New("cpu", tags, fields, now) + assert.NoError(t, err) + + s, _ := NewSerializer() + var buf []byte + buf, err = s.Serialize(m) + assert.NoError(t, err) + expS := []byte(fmt.Sprintf(`metric=cpu field=usage_idle cpu=cpu0 91.5 %d`, now.Unix()) + "\n") + assert.Equal(t, string(expS), string(buf)) +} + +func TestSerializeMetricWithEmptyStringTag(t *testing.T) { + now := time.Now() + tags := map[string]string{ + "cpu": "", + } + fields := map[string]interface{}{ + "usage_idle": float64(91.5), + } + m, err := metric.New("cpu", tags, fields, now) + assert.NoError(t, err) + + s, _ := NewSerializer() + var buf []byte + buf, err = s.Serialize(m) + assert.NoError(t, err) + expS := []byte(fmt.Sprintf(`metric=cpu field=usage_idle cpu=null 91.5 %d`, now.Unix()) + "\n") + assert.Equal(t, string(expS), string(buf)) +} + +func TestSerializeWithSpaces(t *testing.T) { + now := time.Now() + tags := map[string]string{ + "cpu 0": "cpu 0", + } + fields := map[string]interface{}{ + "usage_idle 1": float64(91.5), + } + m, err := metric.New("cpu metric", tags, fields, now) + assert.NoError(t, err) + + s, _ := NewSerializer() + var buf []byte + buf, err = s.Serialize(m) + assert.NoError(t, err) + expS := []byte(fmt.Sprintf(`metric=cpu_metric field=usage_idle_1 cpu_0=cpu_0 91.5 %d`, now.Unix()) + "\n") + assert.Equal(t, string(expS), string(buf)) +} + +func TestSerializeMetricInt(t *testing.T) { + now := time.Now() + tags := map[string]string{ + "cpu": "cpu0", + } + fields := map[string]interface{}{ + "usage_idle": int64(90), + } + m, err := metric.New("cpu", tags, fields, now) + assert.NoError(t, err) + + s, _ := NewSerializer() + var buf []byte + buf, err = s.Serialize(m) + assert.NoError(t, err) + + expS := []byte(fmt.Sprintf(`metric=cpu field=usage_idle cpu=cpu0 90 %d`, now.Unix()) + "\n") + assert.Equal(t, string(expS), string(buf)) +} + +func TestSerializeMetricString(t *testing.T) { + now := time.Now() + tags := map[string]string{ + "cpu": "cpu0", + } + fields := map[string]interface{}{ + "usage_idle": "foobar", + } + m, err := metric.New("cpu", tags, fields, now) + assert.NoError(t, err) + + s, _ := NewSerializer() + var buf []byte + buf, err = s.Serialize(m) + assert.NoError(t, err) + + expS := []byte("") + assert.Equal(t, string(expS), string(buf)) +} + +func TestSerializeBatch(t *testing.T) { + m := MustMetric( + metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42, + }, + time.Unix(0, 0), + ), + ) + + metrics := []telegraf.Metric{m, m} + s, _ := NewSerializer() + buf, err := s.SerializeBatch(metrics) + require.NoError(t, err) + expS := []byte(`metric=cpu field=value 42 0 +metric=cpu field=value 42 0 +`) + assert.Equal(t, string(expS), string(buf)) +} diff --git a/plugins/serializers/registry.go b/plugins/serializers/registry.go index 9ca2f42e7..cbc5981a6 100644 --- a/plugins/serializers/registry.go +++ b/plugins/serializers/registry.go @@ -6,6 +6,7 @@ import ( "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/serializers/carbon2" "github.com/influxdata/telegraf/plugins/serializers/graphite" "github.com/influxdata/telegraf/plugins/serializers/influx" "github.com/influxdata/telegraf/plugins/serializers/json" @@ -82,6 +83,8 @@ func NewSerializer(config *Config) (Serializer, error) { serializer, err = NewSplunkmetricSerializer(config.HecRouting) case "nowmetric": serializer, err = NewNowSerializer() + case "carbon2": + serializer, err = NewCarbon2Serializer() default: err = fmt.Errorf("Invalid data format: %s", config.DataFormat) } @@ -92,6 +95,10 @@ func NewJsonSerializer(timestampUnits time.Duration) (Serializer, error) { return json.NewSerializer(timestampUnits) } +func NewCarbon2Serializer() (Serializer, error) { + return carbon2.NewSerializer() +} + func NewSplunkmetricSerializer(splunkmetric_hec_routing bool) (Serializer, error) { return splunkmetric.NewSerializer(splunkmetric_hec_routing) } From 36b55c3d76b91561b1f35725c02c81a2a1c6afce Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 25 Jan 2019 18:07:50 -0800 Subject: [PATCH 0552/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1566c60d7..669cb098a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,6 +14,7 @@ #### New Serializers - [nowmetric](/plugins/serializers/nowmetric/README.md) - Contributed by @JefMuller +- [carbon2](/plugins/serializers/carbon2/README.md) - Contributed by @frankreno #### Features From d5a03eb6900e40cbdb911ee8a5568d437dc1b3b7 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 25 Jan 2019 18:08:48 -0800 Subject: [PATCH 0553/1815] Use Go 1.10.8 and 1.11.5 (#5346) --- .circleci/config.yml | 4 ++-- Makefile | 8 ++++---- scripts/ci-1.10.docker | 2 +- scripts/ci-1.11.docker | 2 +- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 16003bacd..63cbf2549 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -7,10 +7,10 @@ defaults: - image: 'quay.io/influxdb/telegraf-ci:1.9.7' go-1_10: &go-1_10 docker: - - image: 'quay.io/influxdb/telegraf-ci:1.10.7' + - image: 'quay.io/influxdb/telegraf-ci:1.10.8' go-1_11: &go-1_11 docker: - - image: 'quay.io/influxdb/telegraf-ci:1.11.4' + - image: 'quay.io/influxdb/telegraf-ci:1.11.5' version: 2 jobs: diff --git a/Makefile b/Makefile index 8c256b713..100883198 100644 --- a/Makefile +++ b/Makefile @@ -133,13 +133,13 @@ plugin-%: .PHONY: ci-1.11 ci-1.11: - docker build -t quay.io/influxdb/telegraf-ci:1.11.4 - < scripts/ci-1.11.docker - docker push quay.io/influxdb/telegraf-ci:1.11.4 + docker build -t quay.io/influxdb/telegraf-ci:1.11.5 - < scripts/ci-1.11.docker + docker push quay.io/influxdb/telegraf-ci:1.11.5 .PHONY: ci-1.10 ci-1.10: - docker build -t quay.io/influxdb/telegraf-ci:1.10.7 - < scripts/ci-1.10.docker - docker push quay.io/influxdb/telegraf-ci:1.10.7 + docker build -t quay.io/influxdb/telegraf-ci:1.10.8 - < scripts/ci-1.10.docker + docker push quay.io/influxdb/telegraf-ci:1.10.8 .PHONY: ci-1.9 ci-1.9: diff --git a/scripts/ci-1.10.docker b/scripts/ci-1.10.docker index c70b33038..54c30f382 100644 --- a/scripts/ci-1.10.docker +++ b/scripts/ci-1.10.docker @@ -1,4 +1,4 @@ -FROM golang:1.10.7 +FROM golang:1.10.8 RUN chmod -R 755 "$GOPATH" diff --git a/scripts/ci-1.11.docker b/scripts/ci-1.11.docker index 278d5d857..823b3dadf 100644 --- a/scripts/ci-1.11.docker +++ b/scripts/ci-1.11.docker @@ -1,4 +1,4 @@ -FROM golang:1.11.4 +FROM golang:1.11.5 RUN chmod -R 755 "$GOPATH" From 6c6ff372ff3f93a03ed01b24af06a1595302f984 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 25 Jan 2019 18:10:25 -0800 Subject: [PATCH 0554/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 669cb098a..56576aa2c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -50,6 +50,7 @@ - [#5334](https://github.com/influxdata/telegraf/issues/5334): Fix skip_rows and skip_columns options in csv parser. - [#5181](https://github.com/influxdata/telegraf/issues/5181): Always send basic auth in jenkins input. +- [#5346](https://github.com/influxdata/telegraf/pull/5346): Build official packages with Go 1.11.5. ## v1.9.3 [2019-01-22] From d207269a30fcf0273e30ad165ab1b5285d1581db Mon Sep 17 00:00:00 2001 From: Pontus Rydin Date: Tue, 29 Jan 2019 20:32:48 -0500 Subject: [PATCH 0555/1815] Added performance section to vsphere README (#5353) --- plugins/inputs/vsphere/README.md | 80 ++++++++++++++++++++++++++++++++ 1 file changed, 80 insertions(+) diff --git a/plugins/inputs/vsphere/README.md b/plugins/inputs/vsphere/README.md index 4bccbb2c8..533c35257 100644 --- a/plugins/inputs/vsphere/README.md +++ b/plugins/inputs/vsphere/README.md @@ -196,6 +196,86 @@ For setting up concurrency, modify `collect_concurrency` and `discover_concurren # discover_concurrency = 1 ``` +## Performance Considerations + +### Realtime vs. historical metrics + +vCenter keeps two different kinds of metrics, known as realtime and historical metrics. + +* Realtime metrics: Avaialable at a 20 second granularity. These metrics are stored in memory and are very fast and cheap to query. Our tests have shown that a complete set of realtime metrics for 7000 virtual machines can be obtained in less than 20 seconds. Realtime metrics are only available on **ESXi hosts** and **virtual machine** resources. Realtime metrics are only stored for 1 hour in vCenter. +* Historical metrics: Available at a 5 minute, 30 minutes, 2 hours and 24 hours rollup levels. The vSphere Telegraf plugin only uses the 5 minute rollup. These metrics are stored in the vCenter database and can be expensive and slow to query. Historical metrics are the only type of metrics available for **clusters**, **datastores** and **datacenters**. + +For more information, refer to the vSphere documentation here: https://pubs.vmware.com/vsphere-50/index.jsp?topic=%2Fcom.vmware.wssdk.pg.doc_50%2FPG_Ch16_Performance.18.2.html + +This distinction has an impact on how Telegraf collects metrics. A single instance of an input plugin can have one and only one collection interval, which means that you typically set the collection interval based on the most frequently collected metric. Let's assume you set the collection interval to 1 minute. All realtime metrics will be collected every minute. Since the historical metrics are only available on a 5 minute interval, the vSphere Telegraf plugin automatically skips four out of five collection cycles for these metrics. This works fine in many cases. Problems arise when the collection of historical metrics takes longer than the collecition interval. This will cause error messages similar to this to appear in the Telegraf logs: + +```2019-01-16T13:41:10Z W! [agent] input "inputs.vsphere" did not complete within its interval``` + +This will disrupt the metric collection and can result in missed samples. The best practice workaround is to specify two instances of the vSphere plugin, one for the realtime metrics with a short collection interval and one for the historical metrics with a longer interval. You can use the ```*_metric_exclude``` to turn off the resources you don't want to collect metrics for in each instance. For example: + +``` +## Realtime instance +[[inputs.vsphere]] + interval = "60s" + vcenters = [ "https://someaddress/sdk" ] + username = "someuser@vsphere.local" + password = "secret" + + insecure_skip_verify = true + force_discover_on_init = true + + # Exclude all historical metrics + datastore_metric_exclude = ["*"] + cluster_metric_exclude = ["*"] + datacenter_metric_exclude = ["*"] + + collect_concurrency = 5 + discover_concurrency = 5 + +# Historical instance +[[inputs.vsphere]] + + interval = "300s" + + vcenters = [ "https://someaddress/sdk" ] + username = "someuser@vsphere.local" + password = "secret" + + insecure_skip_verify = true + force_discover_on_init = true + host_metric_exclude = ["*"] # Exclude realtime metrics + vm_metric_exclude = ["*"] # Exclude realtime metrics + + max_query_metrics = 256 + collect_concurrency = 3 +``` + +### Configuring max_query_metrics setting + +The ```max_query_metrics``` determines the maximum number of metrics to attempt to retrieve in one call to vCenter. Generally speaking, a higher number means faster and more efficient queries. However, the number of allowed metrics in a query is typically limited in vCenter by the ```config.vpxd.stats.maxQueryMetrics``` setting in vCenter. The value defaults to 64 on vSphere 5.5 and older and 256 on newver versions of vCenter. The vSphere plugin always checks this setting and will automatically reduce the number if the limit configured in vCenter is lower than max_query_metrics in the plugin. This will result in a log message similar to this: + +```2019-01-21T03:24:18Z W! [input.vsphere] Configured max_query_metrics is 256, but server limits it to 64. Reducing.``` + +You may ask a vCenter administrator to increase this limit to help boost performance. + +### Cluster metrics and the max_query_metrics setting + +Cluster metrics are handled a bit differently by vCenter. They are aggregated from ESXi and virtual machine metrics and may not be available when you query their most recent values. When this happens, vCenter will attempt to perform that aggregation on the fly. Unfortunately, all the subqueries needed internally in vCenter to perform this aggregation will count towards ```config.vpxd.stats.maxQueryMetrics```. This means that even a very small query may result in an error message similar to this: + +```2018-11-02T13:37:11Z E! Error in plugin [inputs.vsphere]: ServerFaultCode: This operation is restricted by the administrator - 'vpxd.stats.maxQueryMetrics'. Contact your system administrator``` + +There are two ways of addressing this: +* Ask your vCenter administrator to set ```config.vpxd.stats.maxQueryMetrics``` to a number that's higher than the total number of virtual machines managed by a vCenter instance. +* Exclude the cluster metrics and use either the basicstats aggregator to calculate sums and averages per cluster or use queries in the visualization tool to obtain the same result. + +### Concurrency settings + +The vSphere plugin allows you to specify two concurrency settings: +* ```collect_concurrency```: The maximum number of simultaneous queries for performance metrics allowed per resource type. +* ```discover_concurrency```: The maximum number of simultaneous queries for resource discovery allowed. + +While a higher level of concurrency typically has a positive impact on performance, increasing these numbers too much can cause performance issues at the vCenter server. A rule of thumb is to set these parameters to the number of virtual machines divided by 1500 and rounded up to the nearest integer. + ## Measurements & Fields - Cluster Stats From c37811ccd8b6c9dee832498ff6d62fe2a5371306 Mon Sep 17 00:00:00 2001 From: Pierre Tessier Date: Wed, 30 Jan 2019 01:01:17 -0500 Subject: [PATCH 0556/1815] Update docs on using filtering to group aggregates (#5349) --- docs/AGGREGATORS_AND_PROCESSORS.md | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/docs/AGGREGATORS_AND_PROCESSORS.md b/docs/AGGREGATORS_AND_PROCESSORS.md index ffa9c8f7e..9cbc39381 100644 --- a/docs/AGGREGATORS_AND_PROCESSORS.md +++ b/docs/AGGREGATORS_AND_PROCESSORS.md @@ -44,11 +44,13 @@ to control which metrics are passed through a processor or aggregator. If a metric is filtered out the metric bypasses the plugin and is passed downstream to the next plugin. -**Processor** plugins process metrics as they pass through and immediately emit +### Processor +Processor plugins process metrics as they pass through and immediately emit results based on the values they process. For example, this could be printing all metrics or adding a tag to all metrics that pass through. -**Aggregator** plugins, on the other hand, are a bit more complicated. Aggregators +### Aggregator +Aggregator plugins, on the other hand, are a bit more complicated. Aggregators are typically for emitting new _aggregate_ metrics, such as a running mean, minimum, maximum, quantiles, or standard deviation. For this reason, all _aggregator_ plugins are configured with a `period`. The `period` is the size of the window @@ -58,6 +60,10 @@ Since many users will only care about their aggregates and not every single metr gathered, there is also a `drop_original` argument, which tells Telegraf to only emit the aggregates and not the original metrics. +Since aggregates are created for each measurement, field, and unique tag combination +the plugin receives, you can make use of `taginclude` to group +aggregates by specific tags only. + **NOTE** That since aggregators only aggregate metrics within their period, that historical data is not supported. In other words, if your metric timestamp is more than `now() - period` in the past, it will not be aggregated. If this is a feature From 2857f07af9c1e94c28466838f8f323f436d5663f Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 1 Feb 2019 12:12:15 -0800 Subject: [PATCH 0557/1815] Add stats option to basicstats sample config --- plugins/aggregators/basicstats/README.md | 8 +------- plugins/aggregators/basicstats/basicstats.go | 4 +++- 2 files changed, 4 insertions(+), 8 deletions(-) diff --git a/plugins/aggregators/basicstats/README.md b/plugins/aggregators/basicstats/README.md index f5023dfc7..e9318036b 100644 --- a/plugins/aggregators/basicstats/README.md +++ b/plugins/aggregators/basicstats/README.md @@ -8,20 +8,14 @@ emitting the aggregate every `period` seconds. ```toml # Keep the aggregate basicstats of each metric passing through. [[aggregators.basicstats]] - - ## General Aggregator Arguments: - ## The period on which to flush & clear the aggregator. period = "30s" - ## If true, the original metric will be dropped by the ## aggregator and will not get sent to the output plugins. drop_original = false - ## BasicStats Arguments: - ## Configures which basic stats to push as fields - stats = ["count","min","max","mean","stdev","s2","sum"] + # stats = ["count", "min", "max", "mean", "stdev", "s2", "sum"] ``` - stats diff --git a/plugins/aggregators/basicstats/basicstats.go b/plugins/aggregators/basicstats/basicstats.go index c5c7e5d3f..d054f39f0 100644 --- a/plugins/aggregators/basicstats/basicstats.go +++ b/plugins/aggregators/basicstats/basicstats.go @@ -47,12 +47,14 @@ type basicstats struct { } var sampleConfig = ` - ## General Aggregator Arguments: ## The period on which to flush & clear the aggregator. period = "30s" ## If true, the original metric will be dropped by the ## aggregator and will not get sent to the output plugins. drop_original = false + + ## Configures which basic stats to push as fields + # stats = ["count", "min", "max", "mean", "stdev", "s2", "sum"] ` func (m *BasicStats) SampleConfig() string { From 8e46414da2901a4d25e00de14ebd2ac0bc91d6fb Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 1 Feb 2019 12:26:00 -0800 Subject: [PATCH 0558/1815] Clean pubsub sample config --- plugins/inputs/cloud_pubsub/pubsub.go | 41 +++++++++++++------------- plugins/outputs/cloud_pubsub/pubsub.go | 1 - 2 files changed, 21 insertions(+), 21 deletions(-) diff --git a/plugins/inputs/cloud_pubsub/pubsub.go b/plugins/inputs/cloud_pubsub/pubsub.go index bb22a8dcb..8c2b600b0 100644 --- a/plugins/inputs/cloud_pubsub/pubsub.go +++ b/plugins/inputs/cloud_pubsub/pubsub.go @@ -1,16 +1,17 @@ package cloud_pubsub import ( - "cloud.google.com/go/pubsub" "context" "fmt" + "sync" + + "cloud.google.com/go/pubsub" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/parsers" "golang.org/x/oauth2/google" "google.golang.org/api/option" - "sync" ) type empty struct{} @@ -256,22 +257,22 @@ const sampleConfig = ` ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "influx" - ## Optional. Filepath for GCP credentials JSON file to authorize calls to - ## PubSub APIs. If not set explicitly, Telegraf will attempt to use - ## Application Default Credentials, which is preferred. + ## Optional. Filepath for GCP credentials JSON file to authorize calls to + ## PubSub APIs. If not set explicitly, Telegraf will attempt to use + ## Application Default Credentials, which is preferred. # credentials_file = "path/to/my/creds.json" - ## Optional. Maximum byte length of a message to consume. - ## Larger messages are dropped with an error. If less than 0 or unspecified, + ## Optional. Maximum byte length of a message to consume. + ## Larger messages are dropped with an error. If less than 0 or unspecified, ## treated as no limit. # max_message_len = 1000000 - ## Optional. Maximum messages to read from PubSub that have not been written + ## Optional. Maximum messages to read from PubSub that have not been written ## to an output. Defaults to %d. ## For best throughput set based on the number of metrics within ## each message and the size of the output's metric_batch_size. ## - ## For example, if each message contains 10 metrics and the output + ## For example, if each message contains 10 metrics and the output ## metric_batch_size is 1000, setting this to 100 will ensure that a ## full batch is collected and the write is triggered immediately without ## waiting until the next flush_interval. @@ -280,28 +281,28 @@ const sampleConfig = ` ## The following are optional Subscription ReceiveSettings in PubSub. ## Read more about these values: ## https://godoc.org/cloud.google.com/go/pubsub#ReceiveSettings - + ## Optional. Maximum number of seconds for which a PubSub subscription ## should auto-extend the PubSub ACK deadline for each message. If less than ## 0, auto-extension is disabled. # max_extension = 0 - ## Optional. Maximum number of unprocessed messages in PubSub - ## (unacknowledged but not yet expired in PubSub). - ## A value of 0 is treated as the default PubSub value. + ## Optional. Maximum number of unprocessed messages in PubSub + ## (unacknowledged but not yet expired in PubSub). + ## A value of 0 is treated as the default PubSub value. ## Negative values will be treated as unlimited. # max_outstanding_messages = 0 - ## Optional. Maximum size in bytes of unprocessed messages in PubSub - ## (unacknowledged but not yet expired in PubSub). - ## A value of 0 is treated as the default PubSub value. + ## Optional. Maximum size in bytes of unprocessed messages in PubSub + ## (unacknowledged but not yet expired in PubSub). + ## A value of 0 is treated as the default PubSub value. ## Negative values will be treated as unlimited. # max_outstanding_bytes = 0 - ## Optional. Max number of goroutines a PubSub Subscription receiver can spawn - ## to pull messages from PubSub concurrently. This limit applies to each - ## subscription separately and is treated as the PubSub default if less than - ## 1. Note this setting does not limit the number of messages that can be + ## Optional. Max number of goroutines a PubSub Subscription receiver can spawn + ## to pull messages from PubSub concurrently. This limit applies to each + ## subscription separately and is treated as the PubSub default if less than + ## 1. Note this setting does not limit the number of messages that can be ## processed concurrently (use "max_outstanding_messages" instead). # max_receiver_go_routines = 0 ` diff --git a/plugins/outputs/cloud_pubsub/pubsub.go b/plugins/outputs/cloud_pubsub/pubsub.go index bc81bf580..0edaec617 100644 --- a/plugins/outputs/cloud_pubsub/pubsub.go +++ b/plugins/outputs/cloud_pubsub/pubsub.go @@ -15,7 +15,6 @@ import ( ) const sampleConfig = ` -[[inputs.pubsub]] ## Required. Name of Google Cloud Platform (GCP) Project that owns ## the given PubSub subscription. project = "my-project" From 35381707dbf3cf6e6679289ab75db0d36ecc5b4e Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 1 Feb 2019 12:26:22 -0800 Subject: [PATCH 0559/1815] Fix grammar in influxdb_v2 README --- plugins/outputs/influxdb_v2/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/outputs/influxdb_v2/README.md b/plugins/outputs/influxdb_v2/README.md index 0c99691bd..245391d48 100644 --- a/plugins/outputs/influxdb_v2/README.md +++ b/plugins/outputs/influxdb_v2/README.md @@ -19,7 +19,7 @@ The InfluxDB output plugin writes metrics to the [InfluxDB v2.x] HTTP service. ## Organization is the name of the organization you wish to write to. organization = "" - ## Bucket to the name fo the bucketwrite into; must exist. + ## Destination bucket to write into. bucket = "" ## Timeout for HTTP messages. From a98483cc1118a25da41a43b6b9f5ebdc3c320e7e Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 1 Feb 2019 12:27:02 -0800 Subject: [PATCH 0560/1815] Update telegraf.conf --- etc/telegraf.conf | 331 ++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 305 insertions(+), 26 deletions(-) diff --git a/etc/telegraf.conf b/etc/telegraf.conf index c6f679952..7cf955c4e 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -93,6 +93,7 @@ # urls = ["http://127.0.0.1:8086"] ## The target database for metrics; will be created as needed. + ## For UDP url endpoint database needs to be configured on server side. # database = "telegraf" ## If true, no CREATE DATABASE queries will be sent. Set to true when using @@ -293,6 +294,54 @@ # # resource_id = "" +# # Publish Telegraf metrics to a Google Cloud PubSub topic +# [[outputs.cloud_pubsub]] +# ## Required. Name of Google Cloud Platform (GCP) Project that owns +# ## the given PubSub subscription. +# project = "my-project" +# +# ## Required. Name of PubSub subscription to ingest metrics from. +# subscription = "my-subscription" +# +# ## Required. Data format to consume. +# ## Each data format has its own unique set of configuration options. +# ## Read more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" +# +# ## Optional. Filepath for GCP credentials JSON file to authorize calls to +# ## PubSub APIs. If not set explicitly, Telegraf will attempt to use +# ## Application Default Credentials, which is preferred. +# # credentials_file = "path/to/my/creds.json" +# +# ## Optional. If true, will send all metrics per write in one PubSub message. +# # send_batched = true +# +# ## The following publish_* parameters specifically configures batching +# ## requests made to the GCP Cloud PubSub API via the PubSub Golang library. Read +# ## more here: https://godoc.org/cloud.google.com/go/pubsub#PublishSettings +# +# ## Optional. Send a request to PubSub (i.e. actually publish a batch) +# ## when it has this many PubSub messages. If send_batched is true, +# ## this is ignored and treated as if it were 1. +# # publish_count_threshold = 1000 +# +# ## Optional. Send a request to PubSub (i.e. actually publish a batch) +# ## when it has this many PubSub messages. If send_batched is true, +# ## this is ignored and treated as if it were 1 +# # publish_byte_threshold = 1000000 +# +# ## Optional. Specifically configures requests made to the PubSub API. +# # publish_num_go_routines = 2 +# +# ## Optional. Specifies a timeout for requests to the PubSub API. +# # publish_timeout = "30s" +# +# ## Optional. PubSub attributes to add to metrics. +# # [[inputs.pubsub.attributes]] +# # my_attr = "tag_value" + + # # Configuration for AWS CloudWatch output. # [[outputs.cloudwatch]] # ## Amazon REGION @@ -890,6 +939,9 @@ # ## If set, enable TLS with the given certificate. # # tls_cert = "/etc/ssl/telegraf.crt" # # tls_key = "/etc/ssl/telegraf.key" +# +# ## Export metric collection time. +# # export_timestamp = false # # Configuration for the Riemann server to send metrics to @@ -981,38 +1033,44 @@ # # Configuration for Wavefront server to send metrics to # [[outputs.wavefront]] -# ## DNS name of the wavefront proxy server -# host = "wavefront.example.com" +# ## Url for Wavefront Direct Ingestion or using HTTP with Wavefront Proxy +# ## If using Wavefront Proxy, also specify port. example: http://proxyserver:2878 +# url = "https://metrics.wavefront.com" # -# ## Port that the Wavefront proxy server listens on -# port = 2878 +# ## Authentication Token for Wavefront. Only required if using Direct Ingestion +# #token = "DUMMY_TOKEN" +# +# ## DNS name of the wavefront proxy server. Do not use if url is specified +# #host = "wavefront.example.com" +# +# ## Port that the Wavefront proxy server listens on. Do not use if url is specified +# #port = 2878 # # ## prefix for metrics keys # #prefix = "my.specific.prefix." # -# ## whether to use "value" for name of simple fields +# ## whether to use "value" for name of simple fields. default is false # #simple_fields = false # -# ## character to use between metric and field name. defaults to . (dot) +# ## character to use between metric and field name. default is . (dot) # #metric_separator = "." # -# ## Convert metric name paths to use metricSeperator character -# ## When true (default) will convert all _ (underscore) chartacters in final metric name +# ## Convert metric name paths to use metricSeparator character +# ## When true will convert all _ (underscore) characters in final metric name. default is true # #convert_paths = true # # ## Use Regex to sanitize metric and tag names from invalid characters -# ## Regex is more thorough, but significantly slower +# ## Regex is more thorough, but significantly slower. default is false # #use_regex = false # # ## point tags to use as the source name for Wavefront (if none found, host will be used) -# #source_override = ["hostname", "agent_host", "node_host"] +# #source_override = ["hostname", "address", "agent_host", "node_host"] # -# ## whether to convert boolean values to numeric values, with false -> 0.0 and true -> 1.0. default true +# ## whether to convert boolean values to numeric values, with false -> 0.0 and true -> 1.0. default is true # #convert_bool = true # # ## Define a mapping, namespaced by metric prefix, from string values to numeric values -# ## The example below maps "green" -> 1.0, "yellow" -> 0.5, "red" -> 0.0 for -# ## any metrics beginning with "elasticsearch" +# ## deprecated in 1.9; use the enum processor plugin # #[[outputs.wavefront.string_to_number.elasticsearch]] # # green = 1.0 # # yellow = 0.5 @@ -1178,8 +1236,8 @@ # # field = "read_count" # # suffix = "_count" # -# ## Replace substrings within field names -# # [[processors.strings.trim_suffix]] +# ## Replace all non-overlapping instances of old with new +# # [[processors.strings.replace]] # # measurement = "*" # # old = ":" # # new = "_" @@ -1242,12 +1300,14 @@ # # Keep the aggregate basicstats of each metric passing through. # [[aggregators.basicstats]] -# ## General Aggregator Arguments: # ## The period on which to flush & clear the aggregator. # period = "30s" # ## If true, the original metric will be dropped by the # ## aggregator and will not get sent to the output plugins. # drop_original = false +# +# ## Configures which basic stats to push as fields +# # stats = ["count", "min", "max", "mean", "stdev", "s2", "sum"] # # Create aggregate histograms. @@ -1339,6 +1399,8 @@ ## Currently only Linux is supported via udev properties. You can view ## available properties for a device by running: ## 'udevadm info -q property -n /dev/sda' + ## Note: Most, but not all, udev properties can be accessed this way. Properties + ## that are currently inaccessible include DEVTYPE, DEVNAME, and DEVPATH. # device_tags = ["ID_FS_TYPE", "ID_FS_USAGE"] # ## Using the same metadata source as device_tags, you can also customize the @@ -1738,6 +1800,10 @@ # ## Works with CouchDB stats endpoints out of the box # ## Multiple Hosts from which to read CouchDB stats: # hosts = ["http://localhost:8086/_stats"] +# +# ## Use HTTP Basic Authentication. +# # basic_username = "telegraf" +# # basic_password = "p@ssw0rd" # # Input plugin for DC/OS metrics @@ -2151,6 +2217,13 @@ # # username = "username" # # password = "pa$$word" # +# ## HTTP entity-body to send with POST/PUT requests. +# # body = "" +# +# ## HTTP Content-Encoding for write request body, can be set to "gzip" to +# ## compress body or "identity" to apply no encoding. +# # content_encoding = "identity" +# # ## Optional TLS Config # # tls_ca = "/etc/telegraf/ca.pem" # # tls_cert = "/etc/telegraf/cert.pem" @@ -2309,9 +2382,17 @@ # # This plugin gathers interrupts data from /proc/interrupts and /proc/softirqs. # [[inputs.interrupts]] +# ## When set to true, cpu metrics are tagged with the cpu. Otherwise cpu is +# ## stored as a field. +# ## +# ## The default is false for backwards compatibility, and will be changed to +# ## true in a future version. It is recommended to set to true on new +# ## deployments. +# # cpu_as_tag = false +# # ## To filter which IRQs to collect, make use of tagpass / tagdrop, i.e. # # [inputs.interrupts.tagdrop] -# # irq = [ "NET_RX", "TASKLET" ] +# # irq = [ "NET_RX", "TASKLET" ] # # Read metrics from the bare metal servers via IPMI @@ -2378,6 +2459,50 @@ # # no configuration +# # Read jobs and cluster metrics from Jenkins instances +# [[inputs.jenkins]] +# ## The Jenkins URL +# url = "http://my-jenkins-instance:8080" +# # username = "admin" +# # password = "admin" +# +# ## Set response_timeout +# response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use SSL but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Optional Max Job Build Age filter +# ## Default 1 hour, ignore builds older than max_build_age +# # max_build_age = "1h" +# +# ## Optional Sub Job Depth filter +# ## Jenkins can have unlimited layer of sub jobs +# ## This config will limit the layers of pulling, default value 0 means +# ## unlimited pulling until no more sub jobs +# # max_subjob_depth = 0 +# +# ## Optional Sub Job Per Layer +# ## In workflow-multibranch-plugin, each branch will be created as a sub job. +# ## This config will limit to call only the lasted branches in each layer, +# ## empty will use default value 10 +# # max_subjob_per_layer = 10 +# +# ## Jobs to exclude from gathering +# # job_exclude = [ "job1", "job2/subjob1/subjob2", "job3/*"] +# +# ## Nodes to exclude from gathering +# # node_exclude = [ "node1", "node2" ] +# +# ## Worker pool for jenkins plugin only +# ## Empty this field will use default value 5 +# # max_connections = 5 + + # # Read JMX metrics through Jolokia # [[inputs.jolokia]] # # DEPRECATED: the jolokia plugin has been deprecated in favor of the @@ -2551,10 +2676,12 @@ # # Read metrics from the kubernetes kubelet api # [[inputs.kubernetes]] # ## URL for the kubelet -# url = "http://1.1.1.1:10255" +# url = "http://127.0.0.1:10255" # -# ## Use bearer token for authorization -# # bearer_token = /path/to/bearer/token +# ## Use bearer token for authorization. ('bearer_token' takes priority) +# # bearer_token = "/path/to/bearer/token" +# ## OR +# # bearer_token_string = "abc_123" # # ## Set response_timeout (default 5 seconds) # # response_timeout = "5s" @@ -2693,6 +2820,31 @@ # # insecure_skip_verify = false +# # Aggregates the contents of multiple files into a single point +# [[inputs.multifile]] +# ## Base directory where telegraf will look for files. +# ## Omit this option to use absolute paths. +# base_dir = "/sys/bus/i2c/devices/1-0076/iio:device0" +# +# ## If true, Telegraf discard all data when a single file can't be read. +# ## Else, Telegraf omits the field generated from this file. +# # fail_early = true +# +# ## Files to parse each interval. +# [[inputs.multifile.file]] +# file = "in_pressure_input" +# dest = "pressure" +# conversion = "float" +# [[inputs.multifile.file]] +# file = "in_temp_input" +# dest = "temperature" +# conversion = "float(3)" +# [[inputs.multifile.file]] +# file = "in_humidityrelative_input" +# dest = "humidityrelative" +# conversion = "float(3)" + + # # Read metrics from one or many mysql servers # [[inputs.mysql]] # ## specify servers via a url matching: @@ -2785,6 +2937,21 @@ # # response_timeout = "5s" +# # Neptune Apex data collector +# [[inputs.neptune_apex]] +# ## The Neptune Apex plugin reads the publicly available status.xml data from a local Apex. +# ## Measurements will be logged under "apex". +# +# ## The base URL of the local Apex(es). If you specify more than one server, they will +# ## be differentiated by the "source" tag. +# servers = [ +# "http://apex.local", +# ] +# +# ## The response_timeout specifies how long to wait for a reply from the Apex. +# #response_timeout = "5s" + + # # Read metrics about network interface usage # [[inputs.net]] # ## By default, telegraf gathers stats from any up interface (excluding loopback) @@ -2869,6 +3036,36 @@ # response_timeout = "5s" +# # Read nginx_upstream_check module status information (https://github.com/yaoweibin/nginx_upstream_check_module) +# [[inputs.nginx_upstream_check]] +# ## An URL where Nginx Upstream check module is enabled +# ## It should be set to return a JSON formatted response +# url = "http://127.0.0.1/status?format=json" +# +# ## HTTP method +# # method = "GET" +# +# ## Optional HTTP headers +# # headers = {"X-Special-Header" = "Special-Value"} +# +# ## Override HTTP "Host" header +# # host_header = "check.example.com" +# +# ## Timeout for HTTP requests +# timeout = "5s" +# +# ## Optional HTTP Basic Auth credentials +# # username = "username" +# # password = "pa$$word" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + # # Read Nginx virtual host traffic status module information (nginx-module-vts) # [[inputs.nginx_vts]] # ## An array of ngx_http_status_module or status URI to gather stats. @@ -3604,7 +3801,7 @@ # # ## When set to true, thread metrics are tagged with the thread id. # ## -# ## The default is false for backwards compatibility, and will be change to +# ## The default is false for backwards compatibility, and will be changed to # ## true in a future version. It is recommended to set to true on new # ## deployments. # thread_as_tag = false @@ -3627,6 +3824,9 @@ # ## Optional name for the varnish instance (or working directory) to query # ## Usually appened after -n in varnish cli # # instance_name = instanceName +# +# ## Timeout for varnishstat command +# # timeout = "1s" # # Monitor wifi signal strength and quality @@ -3789,6 +3989,71 @@ # ] +# # Read metrics from Google PubSub +# [[inputs.cloud_pubsub]] +# ## Required. Name of Google Cloud Platform (GCP) Project that owns +# ## the given PubSub subscription. +# project = "my-project" +# +# ## Required. Name of PubSub subscription to ingest metrics from. +# subscription = "my-subscription" +# +# ## Required. Data format to consume. +# ## Each data format has its own unique set of configuration options. +# ## Read more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" +# +# ## Optional. Filepath for GCP credentials JSON file to authorize calls to +# ## PubSub APIs. If not set explicitly, Telegraf will attempt to use +# ## Application Default Credentials, which is preferred. +# # credentials_file = "path/to/my/creds.json" +# +# ## Optional. Maximum byte length of a message to consume. +# ## Larger messages are dropped with an error. If less than 0 or unspecified, +# ## treated as no limit. +# # max_message_len = 1000000 +# +# ## Optional. Maximum messages to read from PubSub that have not been written +# ## to an output. Defaults to 1000. +# ## For best throughput set based on the number of metrics within +# ## each message and the size of the output's metric_batch_size. +# ## +# ## For example, if each message contains 10 metrics and the output +# ## metric_batch_size is 1000, setting this to 100 will ensure that a +# ## full batch is collected and the write is triggered immediately without +# ## waiting until the next flush_interval. +# # max_undelivered_messages = 1000 +# +# ## The following are optional Subscription ReceiveSettings in PubSub. +# ## Read more about these values: +# ## https://godoc.org/cloud.google.com/go/pubsub#ReceiveSettings +# +# ## Optional. Maximum number of seconds for which a PubSub subscription +# ## should auto-extend the PubSub ACK deadline for each message. If less than +# ## 0, auto-extension is disabled. +# # max_extension = 0 +# +# ## Optional. Maximum number of unprocessed messages in PubSub +# ## (unacknowledged but not yet expired in PubSub). +# ## A value of 0 is treated as the default PubSub value. +# ## Negative values will be treated as unlimited. +# # max_outstanding_messages = 0 +# +# ## Optional. Maximum size in bytes of unprocessed messages in PubSub +# ## (unacknowledged but not yet expired in PubSub). +# ## A value of 0 is treated as the default PubSub value. +# ## Negative values will be treated as unlimited. +# # max_outstanding_bytes = 0 +# +# ## Optional. Max number of goroutines a PubSub Subscription receiver can spawn +# ## to pull messages from PubSub concurrently. This limit applies to each +# ## subscription separately and is treated as the PubSub default if less than +# ## 1. Note this setting does not limit the number of messages that can be +# ## processed concurrently (use "max_outstanding_messages" instead). +# # max_receiver_go_routines = 0 + + # # Influx HTTP write listener # [[inputs.http_listener]] # ## Address and port to host HTTP listener on @@ -3946,6 +4211,8 @@ # brokers = ["localhost:9092"] # ## topic(s) to consume # topics = ["telegraf"] +# ## Add topic as tag if topic_tag is not empty +# # topic_tag = "" # # ## Optional Client id # # client_id = "Telegraf" @@ -4319,8 +4586,10 @@ # ## - prometheus.io/port: If port is not 9102 use this annotation # # monitor_kubernetes_pods = true # -# ## Use bearer token for authorization -# # bearer_token = /path/to/bearer/token +# ## Use bearer token for authorization. ('bearer_token' takes priority) +# # bearer_token = "/path/to/bearer/token" +# ## OR +# # bearer_token_string = "abc_123" # # ## Specify timeout duration for slower prometheus clients (default is 3s) # # response_timeout = "3s" @@ -4440,7 +4709,7 @@ # percentile_limit = 1000 -# # Accepts syslog messages per RFC5425 +# # Accepts syslog messages following RFC5424 format with transports as per RFC5426, RFC5425, or RFC6587 # [[inputs.syslog]] # ## Specify an ip or hostname with port - eg., tcp://localhost:6514, tcp://10.0.0.1:6514 # ## Protocol, address and port to host the syslog receiver. @@ -4468,6 +4737,16 @@ # ## 0 means unlimited. # # read_timeout = "5s" # +# ## The framing technique with which it is expected that messages are transported (default = "octet-counting"). +# ## Whether the messages come using the octect-counting (RFC5425#section-4.3.1, RFC6587#section-3.4.1), +# ## or the non-transparent framing technique (RFC6587#section-3.4.2). +# ## Must be one of "octect-counting", "non-transparent". +# # framing = "octet-counting" +# +# ## The trailer to be expected in case of non-trasparent framing (default = "LF"). +# ## Must be one of "LF", or "NUL". +# # trailer = "LF" +# # ## Whether to parse in best effort mode or not (default = false). # ## By default best effort parsing is off. # # best_effort = false @@ -4626,7 +4905,7 @@ # ## Clusters # # cluster_metric_include = [] ## if omitted or empty, all metrics are collected # # cluster_metric_exclude = [] ## Nothing excluded by default -# # cluster_instances = true ## true by default +# # cluster_instances = false ## false by default # # ## Datastores # # datastore_metric_include = [] ## if omitted or empty, all metrics are collected @@ -4663,7 +4942,7 @@ # # object_discovery_interval = "300s" # # ## timeout applies to any of the api request made to vcenter -# # timeout = "20s" +# # timeout = "60s" # # ## Optional SSL Config # # ssl_ca = "/path/to/cafile" From 15c65b08e59ba4b190529f81cb06e730c6d95f32 Mon Sep 17 00:00:00 2001 From: Grace Do Date: Fri, 1 Feb 2019 16:20:24 -0800 Subject: [PATCH 0561/1815] Check fields in testutil AssertDoesNotContainsTaggedFields (#5365) --- testutil/accumulator.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/testutil/accumulator.go b/testutil/accumulator.go index 65c8883e0..3fe291699 100644 --- a/testutil/accumulator.go +++ b/testutil/accumulator.go @@ -340,9 +340,10 @@ func (a *Accumulator) AssertDoesNotContainsTaggedFields( continue } - if p.Measurement == measurement { - assert.Equal(t, fields, p.Fields) - msg := fmt.Sprintf("found measurement %s with tags %v which should not be there", measurement, tags) + if p.Measurement == measurement && reflect.DeepEqual(fields, p.Fields) { + msg := fmt.Sprintf( + "found measurement %s with tagged fields (tags %v) which should not be there", + measurement, tags) assert.Fail(t, msg) } } From 4e8aa401e010dc6b73bddaaa2d42df55a9053d86 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Sat, 2 Feb 2019 18:05:16 -0800 Subject: [PATCH 0562/1815] Use github.com/go-logfmt/logfmt 0.4.0 --- Gopkg.lock | 6 +++--- Gopkg.toml | 5 ++++- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/Gopkg.lock b/Gopkg.lock index 98eafd39d..d07773be9 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -408,12 +408,12 @@ version = "v1.38.1" [[projects]] - digest = "1:6a4a01d58b227c4b6b11111b9f172ec5c17682b82724e58e6daf3f19f4faccd8" + digest = "1:df89444601379b2e1ee82bf8e6b72af9901cbeed4b469fa380a519c89c339310" name = "github.com/go-logfmt/logfmt" packages = ["."] pruneopts = "" - revision = "390ab7935ee28ec6b286364bba9b4dd6410cb3d5" - version = "v0.3.0" + revision = "07c9b44f60d7ffdfb7d8efe1ad539965737836dc" + version = "v0.4.0" [[projects]] digest = "1:96c4a6ff4206086347bfe28e96e092642882128f45ecb8dc8f15f3e6f6703af0" diff --git a/Gopkg.toml b/Gopkg.toml index d1cbd081d..62fe864ac 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -230,7 +230,7 @@ name = "github.com/vmware/govmomi" version = "0.19.0" - [[constraint]] +[[constraint]] name = "github.com/Azure/go-autorest" version = "10.12.0" @@ -254,3 +254,6 @@ name = "github.com/karrick/godirwalk" version = "1.7.5" +[[constraint]] + name = "github.com/go-logfmt/logfmt" + version = "0.4.0" From 6b144db504ed27607bb26a2ecb5cf6b3c4d74312 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 4 Feb 2019 11:19:04 -0800 Subject: [PATCH 0563/1815] Remove unimplemented auth options from azure_monitor documentation (#5348) --- plugins/outputs/azure_monitor/README.md | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/plugins/outputs/azure_monitor/README.md b/plugins/outputs/azure_monitor/README.md index d1b91a838..28bb66af6 100644 --- a/plugins/outputs/azure_monitor/README.md +++ b/plugins/outputs/azure_monitor/README.md @@ -98,13 +98,7 @@ following configurations: 1. **Client Credentials**: Azure AD Application ID and Secret. - Set the following Telegraf configuration variables: - - - `azure_tenant_id`: Specifies the Tenant to which to authenticate. - - `azure_client_id`: Specifies the app client ID to use. - - `azure_client_secret`: Specifies the app secret to use. - - Or set the following environment variables: + Set the following environment variables: - `AZURE_TENANT_ID`: Specifies the Tenant to which to authenticate. - `AZURE_CLIENT_ID`: Specifies the app client ID to use. From 00734c56c4183be3556442457e3ad7135985cb45 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 4 Feb 2019 11:21:16 -0800 Subject: [PATCH 0564/1815] Return new Syslog instance for each plugin (#5372) --- plugins/inputs/syslog/syslog.go | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/plugins/inputs/syslog/syslog.go b/plugins/inputs/syslog/syslog.go index ab2277caa..51d2ee455 100644 --- a/plugins/inputs/syslog/syslog.go +++ b/plugins/inputs/syslog/syslog.go @@ -438,16 +438,16 @@ func getNanoNow() time.Time { } func init() { - receiver := &Syslog{ - Address: ":6514", - now: getNanoNow, - ReadTimeout: &internal.Duration{ - Duration: defaultReadTimeout, - }, - Framing: OctetCounting, - Trailer: nontransparent.LF, - Separator: "_", - } - - inputs.Add("syslog", func() telegraf.Input { return receiver }) + inputs.Add("syslog", func() telegraf.Input { + return &Syslog{ + Address: ":6514", + now: getNanoNow, + ReadTimeout: &internal.Duration{ + Duration: defaultReadTimeout, + }, + Framing: OctetCounting, + Trailer: nontransparent.LF, + Separator: "_", + } + }) } From 6be5b55094f5b574715c68a780702d6ef23982dc Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 4 Feb 2019 11:24:49 -0800 Subject: [PATCH 0565/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 56576aa2c..b26d54936 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -51,6 +51,7 @@ - [#5334](https://github.com/influxdata/telegraf/issues/5334): Fix skip_rows and skip_columns options in csv parser. - [#5181](https://github.com/influxdata/telegraf/issues/5181): Always send basic auth in jenkins input. - [#5346](https://github.com/influxdata/telegraf/pull/5346): Build official packages with Go 1.11.5. +- [#5368](https://github.com/influxdata/telegraf/issues/5368): Cannot define multiple syslog plugins. ## v1.9.3 [2019-01-22] From 94de223916ffb732a46a277db29a87acd0f3d727 Mon Sep 17 00:00:00 2001 From: Greg <2653109+glinton@users.noreply.github.com> Date: Mon, 4 Feb 2019 13:28:43 -0700 Subject: [PATCH 0566/1815] Add kube_inventory input plugin (#5110) --- Gopkg.lock | 24 ++ Gopkg.toml | 4 + plugins/inputs/all/all.go | 1 + plugins/inputs/kube_inventory/README.md | 238 ++++++++++++++++++ plugins/inputs/kube_inventory/client.go | 97 +++++++ plugins/inputs/kube_inventory/client_test.go | 35 +++ plugins/inputs/kube_inventory/daemonset.go | 49 ++++ .../inputs/kube_inventory/daemonset_test.go | 123 +++++++++ plugins/inputs/kube_inventory/deployment.go | 40 +++ .../inputs/kube_inventory/deployment_test.go | 131 ++++++++++ plugins/inputs/kube_inventory/kube_state.go | 175 +++++++++++++ plugins/inputs/kube_inventory/node.go | 56 +++++ plugins/inputs/kube_inventory/node_test.go | 172 +++++++++++++ .../inputs/kube_inventory/persistentvolume.go | 52 ++++ .../kube_inventory/persistentvolume_test.go | 112 +++++++++ .../kube_inventory/persistentvolumeclaim.go | 49 ++++ .../persistentvolumeclaim_test.go | 115 +++++++++ plugins/inputs/kube_inventory/pod.go | 87 +++++++ plugins/inputs/kube_inventory/pod_test.go | 199 +++++++++++++++ plugins/inputs/kube_inventory/statefulset.go | 46 ++++ .../inputs/kube_inventory/statefulset_test.go | 123 +++++++++ 21 files changed, 1928 insertions(+) create mode 100644 plugins/inputs/kube_inventory/README.md create mode 100644 plugins/inputs/kube_inventory/client.go create mode 100644 plugins/inputs/kube_inventory/client_test.go create mode 100644 plugins/inputs/kube_inventory/daemonset.go create mode 100644 plugins/inputs/kube_inventory/daemonset_test.go create mode 100644 plugins/inputs/kube_inventory/deployment.go create mode 100644 plugins/inputs/kube_inventory/deployment_test.go create mode 100644 plugins/inputs/kube_inventory/kube_state.go create mode 100644 plugins/inputs/kube_inventory/node.go create mode 100644 plugins/inputs/kube_inventory/node_test.go create mode 100644 plugins/inputs/kube_inventory/persistentvolume.go create mode 100644 plugins/inputs/kube_inventory/persistentvolume_test.go create mode 100644 plugins/inputs/kube_inventory/persistentvolumeclaim.go create mode 100644 plugins/inputs/kube_inventory/persistentvolumeclaim_test.go create mode 100644 plugins/inputs/kube_inventory/pod.go create mode 100644 plugins/inputs/kube_inventory/pod_test.go create mode 100644 plugins/inputs/kube_inventory/statefulset.go create mode 100644 plugins/inputs/kube_inventory/statefulset_test.go diff --git a/Gopkg.lock b/Gopkg.lock index d07773be9..93834b78a 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -387,8 +387,11 @@ packages = [ ".", "apis/apiextensions/v1beta1", + "apis/apps/v1beta1", + "apis/apps/v1beta2", "apis/core/v1", "apis/meta/v1", + "apis/policy/v1beta1", "apis/resource", "runtime", "runtime/schema", @@ -696,6 +699,14 @@ pruneopts = "" revision = "b84e30acd515aadc4b783ad4ff83aff3299bdfe0" +[[projects]] + branch = "master" + digest = "1:e7737c09200582508f4f67227c39e7c4667cc6067a6d2b2e679654e43e8a8cb4" + name = "github.com/kubernetes/apimachinery" + packages = ["pkg/api/resource"] + pruneopts = "" + revision = "d41becfba9ee9bf8e55cec1dd3934cd7cfc04b99" + [[projects]] branch = "develop" digest = "1:3e66a61a57bbbe832c338edb3a623be0deb3dec650c2f3515149658898287e37" @@ -1424,6 +1435,14 @@ revision = "7f5bdfd858bb064d80559b2a32b86669c5de5d3b" version = "v3.0.5" +[[projects]] + digest = "1:75fb3fcfc73a8c723efde7777b40e8e8ff9babf30d8c56160d01beffea8a95a6" + name = "gopkg.in/inf.v0" + packages = ["."] + pruneopts = "" + revision = "d2d2541c53f18d2a059457998ce2876cc8e67cbf" + version = "v0.9.1" + [[projects]] digest = "1:367baf06b7dbd0ef0bbdd785f6a79f929c96b0c18e9d3b29c0eed1ac3f5db133" name = "gopkg.in/ldap.v2" @@ -1511,8 +1530,12 @@ "github.com/docker/libnetwork/ipvs", "github.com/eclipse/paho.mqtt.golang", "github.com/ericchiang/k8s", + "github.com/ericchiang/k8s/apis/apps/v1beta1", + "github.com/ericchiang/k8s/apis/apps/v1beta2", "github.com/ericchiang/k8s/apis/core/v1", "github.com/ericchiang/k8s/apis/meta/v1", + "github.com/ericchiang/k8s/apis/resource", + "github.com/ericchiang/k8s/util/intstr", "github.com/go-logfmt/logfmt", "github.com/go-redis/redis", "github.com/go-sql-driver/mysql", @@ -1537,6 +1560,7 @@ "github.com/kardianos/service", "github.com/karrick/godirwalk", "github.com/kballard/go-shellquote", + "github.com/kubernetes/apimachinery/pkg/api/resource", "github.com/matttproud/golang_protobuf_extensions/pbutil", "github.com/miekg/dns", "github.com/multiplay/go-ts3", diff --git a/Gopkg.toml b/Gopkg.toml index 62fe864ac..51fc1fbb6 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -254,6 +254,10 @@ name = "github.com/karrick/godirwalk" version = "1.7.5" +[[constraint]] + branch = "master" + name = "github.com/kubernetes/apimachinery" + [[constraint]] name = "github.com/go-logfmt/logfmt" version = "0.4.0" diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index 0a69ac21d..2435e1519 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -63,6 +63,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/kernel" _ "github.com/influxdata/telegraf/plugins/inputs/kernel_vmstat" _ "github.com/influxdata/telegraf/plugins/inputs/kibana" + _ "github.com/influxdata/telegraf/plugins/inputs/kube_inventory" _ "github.com/influxdata/telegraf/plugins/inputs/kubernetes" _ "github.com/influxdata/telegraf/plugins/inputs/leofs" _ "github.com/influxdata/telegraf/plugins/inputs/linux_sysctl_fs" diff --git a/plugins/inputs/kube_inventory/README.md b/plugins/inputs/kube_inventory/README.md new file mode 100644 index 000000000..9a71ec4a6 --- /dev/null +++ b/plugins/inputs/kube_inventory/README.md @@ -0,0 +1,238 @@ +# Kube_Inventory Plugin +This plugin generates metrics derived from the state of the following Kubernetes resources: + - daemonsets + - deployments + - nodes + - persistentvolumes + - persistentvolumeclaims + - pods (containers) + - statefulsets + +#### Series Cardinality Warning + +This plugin may produce a high number of series which, when not controlled +for, will cause high load on your database. Use the following techniques to +avoid cardinality issues: + +- Use [metric filtering][] options to exclude unneeded measurements and tags. +- Write to a database with an appropriate [retention policy][]. +- Limit series cardinality in your database using the + [max-series-per-database][] and [max-values-per-tag][] settings. +- Consider using the [Time Series Index][tsi]. +- Monitor your databases [series cardinality][]. +- Consult the [InfluxDB documentation][influx-docs] for the most up-to-date techniques. + +### Configuration: + +```toml +[[inputs.kube_inventory]] + ## URL for the Kubernetes API + url = "https://127.0.0.1" + + ## Namespace to use + # namespace = "default" + + ## Use bearer token for authorization. ('bearer_token' takes priority) + # bearer_token = "/path/to/bearer/token" + ## OR + # bearer_token_string = "abc_123" + + ## Set response_timeout (default 5 seconds) + # response_timeout = "5s" + + ## Optional Resources to exclude from gathering + ## Leave them with blank with try to gather everything available. + ## Values can be - "daemonsets", deployments", "nodes", "persistentvolumes", + ## "persistentvolumeclaims", "pods", "statefulsets" + # resource_exclude = [ "deployments", "nodes", "statefulsets" ] + + ## Optional Resources to include when gathering + ## Overrides resource_exclude if both set. + # resource_include = [ "deployments", "nodes", "statefulsets" ] + + ## Optional TLS Config + # tls_ca = "/path/to/cafile" + # tls_cert = "/path/to/certfile" + # tls_key = "/path/to/keyfile" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false +``` + +#### Kubernetes Permissions + +If using [RBAC authorization](https://kubernetes.io/docs/reference/access-authn-authz/rbac/), you will need to create a cluster role to list "persistentvolumes" and "nodes". You will then need to make an [aggregated ClusterRole](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#aggregated-clusterroles) that will eventually be bound to a user or group. +```yaml +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: influx:cluster:viewer + labels: + rbac.authorization.k8s.io/aggregate-view-telegraf: "true" +rules: +- apiGroups: [""] + resources: ["persistentvolumes","nodes"] + verbs: ["get","list"] + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: influx:telegraf +aggregationRule: + clusterRoleSelectors: + - matchLabels: + rbac.authorization.k8s.io/aggregate-view-telegraf: "true" + rbac.authorization.k8s.io/aggregate-to-view: "true" +rules: [] # Rules are automatically filled in by the controller manager. +``` + +Bind the newly created aggregated ClusterRole with the following config file, updating the subjects as needed. +```yaml +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: influx:telegraf:viewer +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: influx:telegraf +subjects: +- kind: ServiceAccount + name: telegraf + namespace: default +``` + + +### Metrics: + ++ kubernetes_daemonset + - tags: + - daemonset_name + - namespace + - fields: + - generation + - current_number_scheduled + - desired_number_scheduled + - number_available + - number_misscheduled + - number_ready + - number_unavailable + - updated_number_scheduled + +- kubernetes_deployment + - tags: + - deployment_name + - namespace + - fields: + - replicas_available + - replicas_unavailable + - created + ++ kubernetes_node + - tags: + - node_name + - fields: + - capacity_cpu_cores + - capacity_memory_bytes + - capacity_pods + - allocatable_cpu_cores + - allocatable_memory_bytes + - allocatable_pods + +- kubernetes_persistentvolume + - tags: + - pv_name + - phase + - storageclass + - fields: + - phase_type (int, [see below](#pv-phase_type)) + ++ kubernetes_persistentvolumeclaim + - tags: + - pvc_name + - namespace + - phase + - storageclass + - fields: + - phase_type (int, [see below](#pvc-phase_type)) + +- kubernetes_pod_container + - tags: + - container_name + - namespace + - node_name + - pod_name + - fields: + - restarts_total + - state + - terminated_reason + - resource_requests_cpu_units + - resource_requests_memory_bytes + - resource_limits_cpu_units + - resource_limits_memory_bytes + ++ kubernetes_statefulset + - tags: + - statefulset_name + - namespace + - fields: + - created + - generation + - replicas + - replicas_current + - replicas_ready + - replicas_updated + - spec_replicas + - observed_generation + +#### pv `phase_type` + +The persistentvolume "phase" is saved in the `phase` tag with a correlated numeric field called `phase_type` corresponding with that tag value. + +|Tag value |Corresponding field value| +-----------|-------------------------| +|bound | 0 | +|failed | 1 | +|pending | 2 | +|released | 3 | +|available | 4 | +|unknown | 5 | + +#### pvc `phase_type` + +The persistentvolumeclaim "phase" is saved in the `phase` tag with a correlated numeric field called `phase_type` corresponding with that tag value. + +|Tag value |Corresponding field value| +-----------|-------------------------| +|bound | 0 | +|lost | 1 | +|pending | 2 | +|unknown | 3 | + + +### Example Output: + +``` +kubernetes_configmap,configmap_name=envoy-config,namespace=default,resource_version=56593031 created=1544103867000000000i 1547597616000000000 +kubernetes_daemonset +kubernetes_deployment,deployment_name=deployd,namespace=default replicas_unavailable=0i,created=1544103082000000000i,replicas_available=1i 1547597616000000000 +kubernetes_node,node_name=ip-172-17-0-2.internal allocatable_pods=110i,capacity_memory_bytes=128837533696,capacity_pods=110i,capacity_cpu_cores=16i,allocatable_cpu_cores=16i,allocatable_memory_bytes=128732676096 1547597616000000000 +kubernetes_persistentvolume,phase=Released,pv_name=pvc-aaaaaaaa-bbbb-cccc-1111-222222222222,storageclass=ebs-1-retain phase_type=3i 1547597616000000000 +kubernetes_persistentvolumeclaim,namespace=default,phase=Bound,pvc_name=data-etcd-0,storageclass=ebs-1-retain phase_type=0i 1547597615000000000 +kubernetes_pod,namespace=default,node_name=ip-172-17-0-2.internal,pod_name=tick1 last_transition_time=1547578322000000000i,ready="false" 1547597616000000000 +kubernetes_pod_container,container_name=telegraf,namespace=default,node_name=ip-172-17-0-2.internal,pod_name=tick1,state=running resource_requests_cpu_units=0.1,resource_limits_memory_bytes=524288000,resource_limits_cpu_units=0.5,restarts_total=0i,state_code=0i,terminated_reason="",resource_requests_memory_bytes=524288000 1547597616000000000 +kubernetes_statefulset,namespace=default,statefulset_name=etcd replicas_updated=3i,spec_replicas=3i,observed_generation=1i,created=1544101669000000000i,generation=1i,replicas=3i,replicas_current=3i,replicas_ready=3i 1547597616000000000 +``` + + +[metric filtering]: https://github.com/influxdata/telegraf/blob/master/docs/CONFIGURATION.md#metric-filtering +[retention policy]: https://docs.influxdata.com/influxdb/latest/guides/downsampling_and_retention/ +[max-series-per-database]: https://docs.influxdata.com/influxdb/latest/administration/config/#max-series-per-database-1000000 +[max-values-per-tag]: https://docs.influxdata.com/influxdb/latest/administration/config/#max-values-per-tag-100000 +[tsi]: https://docs.influxdata.com/influxdb/latest/concepts/time-series-index/ +[series cardinality]: https://docs.influxdata.com/influxdb/latest/query_language/spec/#show-cardinality +[influx-docs]: https://docs.influxdata.com/influxdb/latest/ +[k8s-telegraf]: https://www.influxdata.com/blog/monitoring-kubernetes-architecture/ +[tick-charts]: https://github.com/influxdata/tick-charts diff --git a/plugins/inputs/kube_inventory/client.go b/plugins/inputs/kube_inventory/client.go new file mode 100644 index 000000000..bf207b0ad --- /dev/null +++ b/plugins/inputs/kube_inventory/client.go @@ -0,0 +1,97 @@ +package kube_inventory + +import ( + "context" + "time" + + "github.com/ericchiang/k8s" + "github.com/ericchiang/k8s/apis/apps/v1beta1" + "github.com/ericchiang/k8s/apis/apps/v1beta2" + "github.com/ericchiang/k8s/apis/core/v1" + + "github.com/influxdata/telegraf/internal/tls" +) + +type client struct { + namespace string + timeout time.Duration + *k8s.Client +} + +func newClient(baseURL, namespace, bearerToken string, timeout time.Duration, tlsConfig tls.ClientConfig) (*client, error) { + c, err := k8s.NewClient(&k8s.Config{ + Clusters: []k8s.NamedCluster{{Name: "cluster", Cluster: k8s.Cluster{ + Server: baseURL, + InsecureSkipTLSVerify: tlsConfig.InsecureSkipVerify, + CertificateAuthority: tlsConfig.TLSCA, + }}}, + Contexts: []k8s.NamedContext{{Name: "context", Context: k8s.Context{ + Cluster: "cluster", + AuthInfo: "auth", + Namespace: namespace, + }}}, + AuthInfos: []k8s.NamedAuthInfo{{Name: "auth", AuthInfo: k8s.AuthInfo{ + Token: bearerToken, + ClientCertificate: tlsConfig.TLSCert, + ClientKey: tlsConfig.TLSKey, + }}}, + }) + if err != nil { + return nil, err + } + + return &client{ + Client: c, + timeout: timeout, + namespace: namespace, + }, nil +} + +func (c *client) getDaemonSets(ctx context.Context) (*v1beta2.DaemonSetList, error) { + list := new(v1beta2.DaemonSetList) + ctx, cancel := context.WithTimeout(ctx, c.timeout) + defer cancel() + return list, c.List(ctx, c.namespace, list) +} + +func (c *client) getDeployments(ctx context.Context) (*v1beta1.DeploymentList, error) { + list := &v1beta1.DeploymentList{} + ctx, cancel := context.WithTimeout(ctx, c.timeout) + defer cancel() + return list, c.List(ctx, c.namespace, list) +} + +func (c *client) getNodes(ctx context.Context) (*v1.NodeList, error) { + list := new(v1.NodeList) + ctx, cancel := context.WithTimeout(ctx, c.timeout) + defer cancel() + return list, c.List(ctx, "", list) +} + +func (c *client) getPersistentVolumes(ctx context.Context) (*v1.PersistentVolumeList, error) { + list := new(v1.PersistentVolumeList) + ctx, cancel := context.WithTimeout(ctx, c.timeout) + defer cancel() + return list, c.List(ctx, "", list) +} + +func (c *client) getPersistentVolumeClaims(ctx context.Context) (*v1.PersistentVolumeClaimList, error) { + list := new(v1.PersistentVolumeClaimList) + ctx, cancel := context.WithTimeout(ctx, c.timeout) + defer cancel() + return list, c.List(ctx, c.namespace, list) +} + +func (c *client) getPods(ctx context.Context) (*v1.PodList, error) { + list := new(v1.PodList) + ctx, cancel := context.WithTimeout(ctx, c.timeout) + defer cancel() + return list, c.List(ctx, c.namespace, list) +} + +func (c *client) getStatefulSets(ctx context.Context) (*v1beta1.StatefulSetList, error) { + list := new(v1beta1.StatefulSetList) + ctx, cancel := context.WithTimeout(ctx, c.timeout) + defer cancel() + return list, c.List(ctx, c.namespace, list) +} diff --git a/plugins/inputs/kube_inventory/client_test.go b/plugins/inputs/kube_inventory/client_test.go new file mode 100644 index 000000000..4f54755b0 --- /dev/null +++ b/plugins/inputs/kube_inventory/client_test.go @@ -0,0 +1,35 @@ +package kube_inventory + +import ( + "testing" + "time" + + "github.com/influxdata/telegraf/internal/tls" +) + +type mockHandler struct { + responseMap map[string]interface{} +} + +func toStrPtr(s string) *string { + return &s +} + +func toInt32Ptr(i int32) *int32 { + return &i +} + +func toInt64Ptr(i int64) *int64 { + return &i +} + +func toBoolPtr(b bool) *bool { + return &b +} + +func TestNewClient(t *testing.T) { + _, err := newClient("https://127.0.0.1:443/", "default", "abc123", time.Second, tls.ClientConfig{}) + if err != nil { + t.Errorf("Failed to create new client - %s", err.Error()) + } +} diff --git a/plugins/inputs/kube_inventory/daemonset.go b/plugins/inputs/kube_inventory/daemonset.go new file mode 100644 index 000000000..92c7bc195 --- /dev/null +++ b/plugins/inputs/kube_inventory/daemonset.go @@ -0,0 +1,49 @@ +package kube_inventory + +import ( + "context" + "time" + + "github.com/ericchiang/k8s/apis/apps/v1beta2" + + "github.com/influxdata/telegraf" +) + +func collectDaemonSets(ctx context.Context, acc telegraf.Accumulator, ki *KubernetesInventory) { + list, err := ki.client.getDaemonSets(ctx) + if err != nil { + acc.AddError(err) + return + } + for _, d := range list.Items { + if err = ki.gatherDaemonSet(*d, acc); err != nil { + acc.AddError(err) + return + } + } +} + +func (ki *KubernetesInventory) gatherDaemonSet(d v1beta2.DaemonSet, acc telegraf.Accumulator) error { + fields := map[string]interface{}{ + "generation": d.Metadata.GetGeneration(), + "current_number_scheduled": d.Status.GetCurrentNumberScheduled(), + "desired_number_scheduled": d.Status.GetDesiredNumberScheduled(), + "number_available": d.Status.GetNumberAvailable(), + "number_misscheduled": d.Status.GetNumberMisscheduled(), + "number_ready": d.Status.GetNumberReady(), + "number_unavailable": d.Status.GetNumberUnavailable(), + "updated_number_scheduled": d.Status.GetUpdatedNumberScheduled(), + } + tags := map[string]string{ + "daemonset_name": d.Metadata.GetName(), + "namespace": d.Metadata.GetNamespace(), + } + + if d.Metadata.CreationTimestamp.GetSeconds() != 0 { + fields["created"] = time.Unix(d.Metadata.CreationTimestamp.GetSeconds(), int64(d.Metadata.CreationTimestamp.GetNanos())).UnixNano() + } + + acc.AddFields(daemonSetMeasurement, fields, tags) + + return nil +} diff --git a/plugins/inputs/kube_inventory/daemonset_test.go b/plugins/inputs/kube_inventory/daemonset_test.go new file mode 100644 index 000000000..3f11df1ca --- /dev/null +++ b/plugins/inputs/kube_inventory/daemonset_test.go @@ -0,0 +1,123 @@ +package kube_inventory + +import ( + "testing" + "time" + + "github.com/ericchiang/k8s/apis/apps/v1beta2" + metav1 "github.com/ericchiang/k8s/apis/meta/v1" + + "github.com/influxdata/telegraf/testutil" +) + +func TestDaemonSet(t *testing.T) { + cli := &client{} + now := time.Now() + now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 1, 36, 0, now.Location()) + tests := []struct { + name string + handler *mockHandler + output *testutil.Accumulator + hasError bool + }{ + { + name: "no daemon set", + handler: &mockHandler{ + responseMap: map[string]interface{}{ + "/daemonsets/": &v1beta2.DaemonSetList{}, + }, + }, + hasError: false, + }, + { + name: "collect daemonsets", + handler: &mockHandler{ + responseMap: map[string]interface{}{ + "/daemonsets/": &v1beta2.DaemonSetList{ + Items: []*v1beta2.DaemonSet{ + { + Status: &v1beta2.DaemonSetStatus{ + CurrentNumberScheduled: toInt32Ptr(3), + DesiredNumberScheduled: toInt32Ptr(5), + NumberAvailable: toInt32Ptr(2), + NumberMisscheduled: toInt32Ptr(2), + NumberReady: toInt32Ptr(1), + NumberUnavailable: toInt32Ptr(1), + UpdatedNumberScheduled: toInt32Ptr(2), + }, + Metadata: &metav1.ObjectMeta{ + Generation: toInt64Ptr(11221), + Namespace: toStrPtr("ns1"), + Name: toStrPtr("daemon1"), + Labels: map[string]string{ + "lab1": "v1", + "lab2": "v2", + }, + CreationTimestamp: &metav1.Time{Seconds: toInt64Ptr(now.Unix())}, + }, + }, + }, + }, + }, + }, + output: &testutil.Accumulator{ + Metrics: []*testutil.Metric{ + { + Fields: map[string]interface{}{ + "generation": int64(11221), + "current_number_scheduled": int32(3), + "desired_number_scheduled": int32(5), + "number_available": int32(2), + "number_misscheduled": int32(2), + "number_ready": int32(1), + "number_unavailable": int32(1), + "updated_number_scheduled": int32(2), + "created": now.UnixNano(), + }, + Tags: map[string]string{ + "daemonset_name": "daemon1", + "namespace": "ns1", + }, + }, + }, + }, + hasError: false, + }, + } + + for _, v := range tests { + ks := &KubernetesInventory{ + client: cli, + } + acc := new(testutil.Accumulator) + for _, dset := range ((v.handler.responseMap["/daemonsets/"]).(*v1beta2.DaemonSetList)).Items { + err := ks.gatherDaemonSet(*dset, acc) + if err != nil { + t.Errorf("Failed to gather daemonset - %s", err.Error()) + } + } + + err := acc.FirstError() + if err == nil && v.hasError { + t.Fatalf("%s failed, should have error", v.name) + } else if err != nil && !v.hasError { + t.Fatalf("%s failed, err: %v", v.name, err) + } + if v.output == nil && len(acc.Metrics) > 0 { + t.Fatalf("%s: collected extra data", v.name) + } else if v.output != nil && len(v.output.Metrics) > 0 { + for i := range v.output.Metrics { + for k, m := range v.output.Metrics[i].Tags { + if acc.Metrics[i].Tags[k] != m { + t.Fatalf("%s: tag %s metrics unmatch Expected %s, got %s\n", v.name, k, m, acc.Metrics[i].Tags[k]) + } + } + for k, m := range v.output.Metrics[i].Fields { + if acc.Metrics[i].Fields[k] != m { + t.Fatalf("%s: field %s metrics unmatch Expected %v(%T), got %v(%T)\n", v.name, k, m, m, acc.Metrics[i].Fields[k], acc.Metrics[i].Fields[k]) + } + } + } + } + } +} diff --git a/plugins/inputs/kube_inventory/deployment.go b/plugins/inputs/kube_inventory/deployment.go new file mode 100644 index 000000000..2d72e8d03 --- /dev/null +++ b/plugins/inputs/kube_inventory/deployment.go @@ -0,0 +1,40 @@ +package kube_inventory + +import ( + "context" + "time" + + "github.com/ericchiang/k8s/apis/apps/v1beta1" + + "github.com/influxdata/telegraf" +) + +func collectDeployments(ctx context.Context, acc telegraf.Accumulator, ki *KubernetesInventory) { + list, err := ki.client.getDeployments(ctx) + if err != nil { + acc.AddError(err) + return + } + for _, d := range list.Items { + if err = ki.gatherDeployment(*d, acc); err != nil { + acc.AddError(err) + return + } + } +} + +func (ki *KubernetesInventory) gatherDeployment(d v1beta1.Deployment, acc telegraf.Accumulator) error { + fields := map[string]interface{}{ + "replicas_available": d.Status.GetAvailableReplicas(), + "replicas_unavailable": d.Status.GetUnavailableReplicas(), + "created": time.Unix(d.Metadata.CreationTimestamp.GetSeconds(), int64(d.Metadata.CreationTimestamp.GetNanos())).UnixNano(), + } + tags := map[string]string{ + "deployment_name": d.Metadata.GetName(), + "namespace": d.Metadata.GetNamespace(), + } + + acc.AddFields(deploymentMeasurement, fields, tags) + + return nil +} diff --git a/plugins/inputs/kube_inventory/deployment_test.go b/plugins/inputs/kube_inventory/deployment_test.go new file mode 100644 index 000000000..0429b84fa --- /dev/null +++ b/plugins/inputs/kube_inventory/deployment_test.go @@ -0,0 +1,131 @@ +package kube_inventory + +import ( + "testing" + "time" + + "github.com/ericchiang/k8s/apis/apps/v1beta1" + metav1 "github.com/ericchiang/k8s/apis/meta/v1" + "github.com/ericchiang/k8s/util/intstr" + "github.com/influxdata/telegraf/testutil" +) + +func TestDeployment(t *testing.T) { + cli := &client{} + + now := time.Now() + now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 1, 36, 0, now.Location()) + outputMetric := &testutil.Metric{ + Fields: map[string]interface{}{ + "replicas_available": int32(1), + "replicas_unavailable": int32(4), + "created": now.UnixNano(), + }, + Tags: map[string]string{ + "namespace": "ns1", + "deployment_name": "deploy1", + }, + } + + tests := []struct { + name string + handler *mockHandler + output *testutil.Accumulator + hasError bool + }{ + { + name: "no deployments", + handler: &mockHandler{ + responseMap: map[string]interface{}{ + "/deployments/": &v1beta1.DeploymentList{}, + }, + }, + hasError: false, + }, + { + name: "collect deployments", + handler: &mockHandler{ + responseMap: map[string]interface{}{ + "/deployments/": &v1beta1.DeploymentList{ + Items: []*v1beta1.Deployment{ + { + Status: &v1beta1.DeploymentStatus{ + Replicas: toInt32Ptr(3), + AvailableReplicas: toInt32Ptr(1), + UnavailableReplicas: toInt32Ptr(4), + UpdatedReplicas: toInt32Ptr(2), + ObservedGeneration: toInt64Ptr(9121), + }, + Spec: &v1beta1.DeploymentSpec{ + Strategy: &v1beta1.DeploymentStrategy{ + RollingUpdate: &v1beta1.RollingUpdateDeployment{ + MaxUnavailable: &intstr.IntOrString{ + IntVal: toInt32Ptr(30), + }, + MaxSurge: &intstr.IntOrString{ + IntVal: toInt32Ptr(20), + }, + }, + }, + Replicas: toInt32Ptr(4), + }, + Metadata: &metav1.ObjectMeta{ + Generation: toInt64Ptr(11221), + Namespace: toStrPtr("ns1"), + Name: toStrPtr("deploy1"), + Labels: map[string]string{ + "lab1": "v1", + "lab2": "v2", + }, + CreationTimestamp: &metav1.Time{Seconds: toInt64Ptr(now.Unix())}, + }, + }, + }, + }, + }, + }, + output: &testutil.Accumulator{ + Metrics: []*testutil.Metric{ + outputMetric, + }, + }, + hasError: false, + }, + } + + for _, v := range tests { + ks := &KubernetesInventory{ + client: cli, + } + acc := new(testutil.Accumulator) + for _, deployment := range ((v.handler.responseMap["/deployments/"]).(*v1beta1.DeploymentList)).Items { + err := ks.gatherDeployment(*deployment, acc) + if err != nil { + t.Errorf("Failed to gather deployment - %s", err.Error()) + } + } + + err := acc.FirstError() + if err == nil && v.hasError { + t.Fatalf("%s failed, should have error", v.name) + } else if err != nil && !v.hasError { + t.Fatalf("%s failed, err: %v", v.name, err) + } + if v.output == nil && len(acc.Metrics) > 0 { + t.Fatalf("%s: collected extra data", v.name) + } else if v.output != nil && len(v.output.Metrics) > 0 { + for i := range v.output.Metrics { + for k, m := range v.output.Metrics[i].Tags { + if acc.Metrics[i].Tags[k] != m { + t.Fatalf("%s: tag %s metrics unmatch Expected %s, got '%v'\n", v.name, k, m, acc.Metrics[i].Tags[k]) + } + } + for k, m := range v.output.Metrics[i].Fields { + if acc.Metrics[i].Fields[k] != m { + t.Fatalf("%s: field %s metrics unmatch Expected %v(%T), got %v(%T)\n", v.name, k, m, m, acc.Metrics[i].Fields[k], acc.Metrics[i].Fields[k]) + } + } + } + } + } +} diff --git a/plugins/inputs/kube_inventory/kube_state.go b/plugins/inputs/kube_inventory/kube_state.go new file mode 100644 index 000000000..705d0f65e --- /dev/null +++ b/plugins/inputs/kube_inventory/kube_state.go @@ -0,0 +1,175 @@ +package kube_inventory + +import ( + "context" + "fmt" + "io/ioutil" + "log" + "strconv" + "strings" + "sync" + "time" + + "github.com/kubernetes/apimachinery/pkg/api/resource" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/filter" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/internal/tls" + "github.com/influxdata/telegraf/plugins/inputs" +) + +// KubernetesInventory represents the config object for the plugin. +type KubernetesInventory struct { + URL string `toml:"url"` + BearerToken string `toml:"bearer_token"` + BearerTokenString string `toml:"bearer_token_string"` + Namespace string `toml:"namespace"` + ResponseTimeout internal.Duration `toml:"response_timeout"` // Timeout specified as a string - 3s, 1m, 1h + ResourceExclude []string `toml:"resource_exclude"` + ResourceInclude []string `toml:"resource_include"` + MaxConfigMapAge internal.Duration `toml:"max_config_map_age"` + + tls.ClientConfig + client *client +} + +var sampleConfig = ` + ## URL for the Kubernetes API + url = "https://127.0.0.1" + + ## Namespace to use + # namespace = "default" + + ## Use bearer token for authorization. ('bearer_token' takes priority) + # bearer_token = "/path/to/bearer/token" + ## OR + # bearer_token_string = "abc_123" + + ## Set response_timeout (default 5 seconds) + # response_timeout = "5s" + + ## Optional Resources to exclude from gathering + ## Leave them with blank with try to gather everything available. + ## Values can be - "daemonsets", deployments", "nodes", "persistentvolumes", + ## "persistentvolumeclaims", "pods", "statefulsets" + # resource_exclude = [ "deployments", "nodes", "statefulsets" ] + + ## Optional Resources to include when gathering + ## Overrides resource_exclude if both set. + # resource_include = [ "deployments", "nodes", "statefulsets" ] + + ## Optional TLS Config + # tls_ca = "/path/to/cafile" + # tls_cert = "/path/to/certfile" + # tls_key = "/path/to/keyfile" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false +` + +// SampleConfig returns a sample config +func (ki *KubernetesInventory) SampleConfig() string { + return sampleConfig +} + +// Description returns the description of this plugin +func (ki *KubernetesInventory) Description() string { + return "Read metrics from the Kubernetes api" +} + +// Gather collects kubernetes metrics from a given URL. +func (ki *KubernetesInventory) Gather(acc telegraf.Accumulator) (err error) { + if ki.client == nil { + if ki.client, err = ki.initClient(); err != nil { + return err + } + } + + resourceFilter, err := filter.NewIncludeExcludeFilter(ki.ResourceInclude, ki.ResourceExclude) + if err != nil { + return err + } + + wg := sync.WaitGroup{} + ctx := context.Background() + + for collector, f := range availableCollectors { + if resourceFilter.Match(collector) { + wg.Add(1) + go func(f func(ctx context.Context, acc telegraf.Accumulator, k *KubernetesInventory)) { + defer wg.Done() + f(ctx, acc, ki) + }(f) + } + } + + wg.Wait() + + return nil +} + +var availableCollectors = map[string]func(ctx context.Context, acc telegraf.Accumulator, ki *KubernetesInventory){ + "daemonsets": collectDaemonSets, + "deployments": collectDeployments, + "nodes": collectNodes, + "persistentvolumes": collectPersistentVolumes, + "persistentvolumeclaims": collectPersistentVolumeClaims, + "pods": collectPods, + "statefulsets": collectStatefulSets, +} + +func (ki *KubernetesInventory) initClient() (*client, error) { + if ki.BearerToken != "" { + token, err := ioutil.ReadFile(ki.BearerToken) + if err != nil { + return nil, err + } + ki.BearerTokenString = strings.TrimSpace(string(token)) + } + + return newClient(ki.URL, ki.Namespace, ki.BearerTokenString, ki.ResponseTimeout.Duration, ki.ClientConfig) +} + +func atoi(s string) int64 { + i, err := strconv.ParseInt(s, 10, 64) + if err != nil { + return 0 + } + return int64(i) +} + +func convertQuantity(s string, m float64) int64 { + q, err := resource.ParseQuantity(s) + if err != nil { + log.Printf("E! Failed to parse quantity - %v", err) + return 0 + } + f, err := strconv.ParseFloat(fmt.Sprint(q.AsDec()), 64) + if err != nil { + log.Printf("E! Failed to parse float - %v", err) + return 0 + } + if m < 1 { + m = 1 + } + return int64(f * m) +} + +var ( + daemonSetMeasurement = "kubernetes_daemonset" + deploymentMeasurement = "kubernetes_deployment" + nodeMeasurement = "kubernetes_node" + persistentVolumeMeasurement = "kubernetes_persistentvolume" + persistentVolumeClaimMeasurement = "kubernetes_persistentvolumeclaim" + podContainerMeasurement = "kubernetes_pod_container" + statefulSetMeasurement = "kubernetes_statefulset" +) + +func init() { + inputs.Add("kube_inventory", func() telegraf.Input { + return &KubernetesInventory{ + ResponseTimeout: internal.Duration{Duration: time.Second * 5}, + Namespace: "default", + } + }) +} diff --git a/plugins/inputs/kube_inventory/node.go b/plugins/inputs/kube_inventory/node.go new file mode 100644 index 000000000..cccf6897f --- /dev/null +++ b/plugins/inputs/kube_inventory/node.go @@ -0,0 +1,56 @@ +package kube_inventory + +import ( + "context" + + "github.com/ericchiang/k8s/apis/core/v1" + + "github.com/influxdata/telegraf" +) + +func collectNodes(ctx context.Context, acc telegraf.Accumulator, ki *KubernetesInventory) { + list, err := ki.client.getNodes(ctx) + if err != nil { + acc.AddError(err) + return + } + for _, n := range list.Items { + if err = ki.gatherNode(*n, acc); err != nil { + acc.AddError(err) + return + } + } +} + +func (ki *KubernetesInventory) gatherNode(n v1.Node, acc telegraf.Accumulator) error { + fields := map[string]interface{}{} + tags := map[string]string{ + "node_name": *n.Metadata.Name, + } + + for resourceName, val := range n.Status.Capacity { + switch resourceName { + case "cpu": + fields["capacity_cpu_cores"] = atoi(val.GetString_()) + case "memory": + fields["capacity_memory_bytes"] = convertQuantity(val.GetString_(), 1) + case "pods": + fields["capacity_pods"] = atoi(val.GetString_()) + } + } + + for resourceName, val := range n.Status.Allocatable { + switch resourceName { + case "cpu": + fields["allocatable_cpu_cores"] = atoi(val.GetString_()) + case "memory": + fields["allocatable_memory_bytes"] = convertQuantity(val.GetString_(), 1) + case "pods": + fields["allocatable_pods"] = atoi(val.GetString_()) + } + } + + acc.AddFields(nodeMeasurement, fields, tags) + + return nil +} diff --git a/plugins/inputs/kube_inventory/node_test.go b/plugins/inputs/kube_inventory/node_test.go new file mode 100644 index 000000000..7573dd2c0 --- /dev/null +++ b/plugins/inputs/kube_inventory/node_test.go @@ -0,0 +1,172 @@ +package kube_inventory + +import ( + "testing" + "time" + + "github.com/ericchiang/k8s/apis/core/v1" + metav1 "github.com/ericchiang/k8s/apis/meta/v1" + "github.com/ericchiang/k8s/apis/resource" + + "github.com/influxdata/telegraf/testutil" +) + +func TestNode(t *testing.T) { + cli := &client{} + now := time.Now() + created := time.Date(now.Year(), now.Month(), now.Day(), now.Hour()-2, 1, 36, 0, now.Location()) + + tests := []struct { + name string + handler *mockHandler + output *testutil.Accumulator + hasError bool + }{ + { + name: "no nodes", + handler: &mockHandler{ + responseMap: map[string]interface{}{ + "/nodes/": &v1.NodeList{}, + }, + }, + hasError: false, + }, + { + name: "collect nodes", + handler: &mockHandler{ + responseMap: map[string]interface{}{ + "/nodes/": &v1.NodeList{ + Items: []*v1.Node{ + { + Status: &v1.NodeStatus{ + NodeInfo: &v1.NodeSystemInfo{ + KernelVersion: toStrPtr("4.14.48-coreos-r2"), + OsImage: toStrPtr("Container Linux by CoreOS 1745.7.0 (Rhyolite)"), + ContainerRuntimeVersion: toStrPtr("docker://18.3.1"), + KubeletVersion: toStrPtr("v1.10.3"), + KubeProxyVersion: toStrPtr("v1.10.3"), + }, + Phase: toStrPtr("Running"), + Capacity: map[string]*resource.Quantity{ + "cpu": {String_: toStrPtr("16")}, + "ephemeral_storage_bytes": {String_: toStrPtr("49536401408")}, + "hugepages_1Gi_bytes": {String_: toStrPtr("0")}, + "hugepages_2Mi_bytes": {String_: toStrPtr("0")}, + "memory": {String_: toStrPtr("125817904Ki")}, + "pods": {String_: toStrPtr("110")}, + }, + Allocatable: map[string]*resource.Quantity{ + "cpu": {String_: toStrPtr("16")}, + "ephemeral_storage_bytes": {String_: toStrPtr("44582761194")}, + "hugepages_1Gi_bytes": {String_: toStrPtr("0")}, + "hugepages_2Mi_bytes": {String_: toStrPtr("0")}, + "memory": {String_: toStrPtr("125715504Ki")}, + "pods": {String_: toStrPtr("110")}, + }, + Conditions: []*v1.NodeCondition{ + {Type: toStrPtr("Ready"), Status: toStrPtr("true"), LastTransitionTime: &metav1.Time{Seconds: toInt64Ptr(now.Unix())}}, + {Type: toStrPtr("OutOfDisk"), Status: toStrPtr("false"), LastTransitionTime: &metav1.Time{Seconds: toInt64Ptr(created.Unix())}}, + }, + }, + Spec: &v1.NodeSpec{ + ProviderID: toStrPtr("aws:///us-east-1c/i-0c00"), + Taints: []*v1.Taint{ + { + Key: toStrPtr("k1"), + Value: toStrPtr("v1"), + Effect: toStrPtr("NoExecute"), + }, + { + Key: toStrPtr("k2"), + Value: toStrPtr("v2"), + Effect: toStrPtr("NoSchedule"), + }, + }, + }, + Metadata: &metav1.ObjectMeta{ + Generation: toInt64Ptr(int64(11232)), + Namespace: toStrPtr("ns1"), + Name: toStrPtr("node1"), + Labels: map[string]string{ + "lab1": "v1", + "lab2": "v2", + }, + CreationTimestamp: &metav1.Time{Seconds: toInt64Ptr(created.Unix())}, + }, + }, + }, + }, + }, + }, + output: &testutil.Accumulator{ + Metrics: []*testutil.Metric{ + { + Measurement: nodeMeasurement, + Fields: map[string]interface{}{ + "capacity_cpu_cores": int64(16), + "capacity_memory_bytes": int64(1.28837533696e+11), + "capacity_pods": int64(110), + "allocatable_cpu_cores": int64(16), + "allocatable_memory_bytes": int64(1.28732676096e+11), + "allocatable_pods": int64(110), + }, + Tags: map[string]string{ + "node_name": "node1", + }, + }, + }, + }, + hasError: false, + }, + } + + for _, v := range tests { + ks := &KubernetesInventory{ + client: cli, + } + acc := new(testutil.Accumulator) + for _, node := range ((v.handler.responseMap["/nodes/"]).(*v1.NodeList)).Items { + err := ks.gatherNode(*node, acc) + if err != nil { + t.Errorf("Failed to gather node - %s", err.Error()) + } + } + + err := acc.FirstError() + if err == nil && v.hasError { + t.Fatalf("%s failed, should have error", v.name) + } else if err != nil && !v.hasError { + t.Fatalf("%s failed, err: %v", v.name, err) + } + if v.output == nil && len(acc.Metrics) > 0 { + t.Fatalf("%s: collected extra data", v.name) + } else if v.output != nil && len(v.output.Metrics) > 0 { + for i := range v.output.Metrics { + measurement := v.output.Metrics[i].Measurement + var keyTag string + switch measurement { + case nodeMeasurement: + keyTag = "node" + } + var j int + for j = range acc.Metrics { + if acc.Metrics[j].Measurement == measurement && + acc.Metrics[j].Tags[keyTag] == v.output.Metrics[i].Tags[keyTag] { + break + } + } + + for k, m := range v.output.Metrics[i].Tags { + if acc.Metrics[j].Tags[k] != m { + t.Fatalf("%s: tag %s metrics unmatch Expected %s, got %s, measurement %s, j %d\n", v.name, k, m, acc.Metrics[j].Tags[k], measurement, j) + } + } + for k, m := range v.output.Metrics[i].Fields { + if acc.Metrics[j].Fields[k] != m { + t.Fatalf("%s: field %s metrics unmatch Expected %v(%T), got %v(%T), measurement %s, j %d\n", v.name, k, m, m, acc.Metrics[j].Fields[k], acc.Metrics[i].Fields[k], measurement, j) + } + } + } + } + } +} diff --git a/plugins/inputs/kube_inventory/persistentvolume.go b/plugins/inputs/kube_inventory/persistentvolume.go new file mode 100644 index 000000000..05600522b --- /dev/null +++ b/plugins/inputs/kube_inventory/persistentvolume.go @@ -0,0 +1,52 @@ +package kube_inventory + +import ( + "context" + "strings" + + "github.com/ericchiang/k8s/apis/core/v1" + + "github.com/influxdata/telegraf" +) + +func collectPersistentVolumes(ctx context.Context, acc telegraf.Accumulator, ki *KubernetesInventory) { + list, err := ki.client.getPersistentVolumes(ctx) + if err != nil { + acc.AddError(err) + return + } + for _, pv := range list.Items { + if err = ki.gatherPersistentVolume(*pv, acc); err != nil { + acc.AddError(err) + return + } + } +} + +func (ki *KubernetesInventory) gatherPersistentVolume(pv v1.PersistentVolume, acc telegraf.Accumulator) error { + phaseType := 5 + switch strings.ToLower(pv.Status.GetPhase()) { + case "bound": + phaseType = 0 + case "failed": + phaseType = 1 + case "pending": + phaseType = 2 + case "released": + phaseType = 3 + case "available": + phaseType = 4 + } + fields := map[string]interface{}{ + "phase_type": phaseType, + } + tags := map[string]string{ + "pv_name": pv.Metadata.GetName(), + "phase": pv.Status.GetPhase(), + "storageclass": pv.Spec.GetStorageClassName(), + } + + acc.AddFields(persistentVolumeMeasurement, fields, tags) + + return nil +} diff --git a/plugins/inputs/kube_inventory/persistentvolume_test.go b/plugins/inputs/kube_inventory/persistentvolume_test.go new file mode 100644 index 000000000..a5d20d047 --- /dev/null +++ b/plugins/inputs/kube_inventory/persistentvolume_test.go @@ -0,0 +1,112 @@ +package kube_inventory + +import ( + "testing" + "time" + + "github.com/ericchiang/k8s/apis/core/v1" + metav1 "github.com/ericchiang/k8s/apis/meta/v1" + + "github.com/influxdata/telegraf/testutil" +) + +func TestPersistentVolume(t *testing.T) { + cli := &client{} + now := time.Now() + now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 1, 36, 0, now.Location()) + + tests := []struct { + name string + handler *mockHandler + output *testutil.Accumulator + hasError bool + }{ + { + name: "no pv", + handler: &mockHandler{ + responseMap: map[string]interface{}{ + "/persistentvolumes/": &v1.PersistentVolumeList{}, + }, + }, + hasError: false, + }, + { + name: "collect pvs", + handler: &mockHandler{ + responseMap: map[string]interface{}{ + "/persistentvolumes/": &v1.PersistentVolumeList{ + Items: []*v1.PersistentVolume{ + { + Status: &v1.PersistentVolumeStatus{ + Phase: toStrPtr("pending"), + }, + Spec: &v1.PersistentVolumeSpec{ + StorageClassName: toStrPtr("ebs-1"), + }, + Metadata: &metav1.ObjectMeta{ + Name: toStrPtr("pv1"), + Labels: map[string]string{ + "lab1": "v1", + "lab2": "v2", + }, + CreationTimestamp: &metav1.Time{Seconds: toInt64Ptr(now.Unix())}, + }, + }, + }, + }, + }, + }, + output: &testutil.Accumulator{ + Metrics: []*testutil.Metric{ + { + Fields: map[string]interface{}{ + "phase_type": 2, + }, + Tags: map[string]string{ + "pv_name": "pv1", + "storageclass": "ebs-1", + "phase": "pending", + }, + }, + }, + }, + hasError: false, + }, + } + + for _, v := range tests { + ks := &KubernetesInventory{ + client: cli, + } + acc := new(testutil.Accumulator) + for _, pv := range ((v.handler.responseMap["/persistentvolumes/"]).(*v1.PersistentVolumeList)).Items { + err := ks.gatherPersistentVolume(*pv, acc) + if err != nil { + t.Errorf("Failed to gather pv - %s", err.Error()) + } + } + + err := acc.FirstError() + if err == nil && v.hasError { + t.Fatalf("%s failed, should have error", v.name) + } else if err != nil && !v.hasError { + t.Fatalf("%s failed, err: %v", v.name, err) + } + if v.output == nil && len(acc.Metrics) > 0 { + t.Fatalf("%s: collected extra data", v.name) + } else if v.output != nil && len(v.output.Metrics) > 0 { + for i := range v.output.Metrics { + for k, m := range v.output.Metrics[i].Tags { + if acc.Metrics[i].Tags[k] != m { + t.Fatalf("%s: tag %s metrics unmatch Expected %s, got %s\n", v.name, k, m, acc.Metrics[i].Tags[k]) + } + } + for k, m := range v.output.Metrics[i].Fields { + if acc.Metrics[i].Fields[k] != m { + t.Fatalf("%s: field %s metrics unmatch Expected %v(%T), got %v(%T)\n", v.name, k, m, m, acc.Metrics[i].Fields[k], acc.Metrics[i].Fields[k]) + } + } + } + } + } +} diff --git a/plugins/inputs/kube_inventory/persistentvolumeclaim.go b/plugins/inputs/kube_inventory/persistentvolumeclaim.go new file mode 100644 index 000000000..0663462ae --- /dev/null +++ b/plugins/inputs/kube_inventory/persistentvolumeclaim.go @@ -0,0 +1,49 @@ +package kube_inventory + +import ( + "context" + "strings" + + "github.com/ericchiang/k8s/apis/core/v1" + + "github.com/influxdata/telegraf" +) + +func collectPersistentVolumeClaims(ctx context.Context, acc telegraf.Accumulator, ki *KubernetesInventory) { + list, err := ki.client.getPersistentVolumeClaims(ctx) + if err != nil { + acc.AddError(err) + return + } + for _, pvc := range list.Items { + if err = ki.gatherPersistentVolumeClaim(*pvc, acc); err != nil { + acc.AddError(err) + return + } + } +} + +func (ki *KubernetesInventory) gatherPersistentVolumeClaim(pvc v1.PersistentVolumeClaim, acc telegraf.Accumulator) error { + phaseType := 3 + switch strings.ToLower(pvc.Status.GetPhase()) { + case "bound": + phaseType = 0 + case "lost": + phaseType = 1 + case "pending": + phaseType = 2 + } + fields := map[string]interface{}{ + "phase_type": phaseType, + } + tags := map[string]string{ + "pvc_name": pvc.Metadata.GetName(), + "namespace": pvc.Metadata.GetNamespace(), + "phase": pvc.Status.GetPhase(), + "storageclass": pvc.Spec.GetStorageClassName(), + } + + acc.AddFields(persistentVolumeClaimMeasurement, fields, tags) + + return nil +} diff --git a/plugins/inputs/kube_inventory/persistentvolumeclaim_test.go b/plugins/inputs/kube_inventory/persistentvolumeclaim_test.go new file mode 100644 index 000000000..8a50c0f2e --- /dev/null +++ b/plugins/inputs/kube_inventory/persistentvolumeclaim_test.go @@ -0,0 +1,115 @@ +package kube_inventory + +import ( + "testing" + "time" + + "github.com/ericchiang/k8s/apis/core/v1" + metav1 "github.com/ericchiang/k8s/apis/meta/v1" + + "github.com/influxdata/telegraf/testutil" +) + +func TestPersistentVolumeClaim(t *testing.T) { + cli := &client{} + now := time.Now() + now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 1, 36, 0, now.Location()) + + tests := []struct { + name string + handler *mockHandler + output *testutil.Accumulator + hasError bool + }{ + { + name: "no pv claims", + handler: &mockHandler{ + responseMap: map[string]interface{}{ + "/persistentvolumeclaims/": &v1.PersistentVolumeClaimList{}, + }, + }, + hasError: false, + }, + { + name: "collect pv claims", + handler: &mockHandler{ + responseMap: map[string]interface{}{ + "/persistentvolumeclaims/": &v1.PersistentVolumeClaimList{ + Items: []*v1.PersistentVolumeClaim{ + { + Status: &v1.PersistentVolumeClaimStatus{ + Phase: toStrPtr("bound"), + }, + Spec: &v1.PersistentVolumeClaimSpec{ + VolumeName: toStrPtr("pvc-dc870fd6-1e08-11e8-b226-02aa4bc06eb8"), + StorageClassName: toStrPtr("ebs-1"), + }, + Metadata: &metav1.ObjectMeta{ + Namespace: toStrPtr("ns1"), + Name: toStrPtr("pc1"), + Labels: map[string]string{ + "lab1": "v1", + "lab2": "v2", + }, + CreationTimestamp: &metav1.Time{Seconds: toInt64Ptr(now.Unix())}, + }, + }, + }, + }, + }, + }, + output: &testutil.Accumulator{ + Metrics: []*testutil.Metric{ + { + Fields: map[string]interface{}{ + "phase_type": 0, + }, + Tags: map[string]string{ + "pvc_name": "pc1", + "namespace": "ns1", + "storageclass": "ebs-1", + "phase": "bound", + }, + }, + }, + }, + hasError: false, + }, + } + + for _, v := range tests { + ks := &KubernetesInventory{ + client: cli, + } + acc := new(testutil.Accumulator) + for _, pvc := range ((v.handler.responseMap["/persistentvolumeclaims/"]).(*v1.PersistentVolumeClaimList)).Items { + err := ks.gatherPersistentVolumeClaim(*pvc, acc) + if err != nil { + t.Errorf("Failed to gather pvc - %s", err.Error()) + } + } + + err := acc.FirstError() + if err == nil && v.hasError { + t.Fatalf("%s failed, should have error", v.name) + } else if err != nil && !v.hasError { + t.Fatalf("%s failed, err: %v", v.name, err) + } + if v.output == nil && len(acc.Metrics) > 0 { + t.Fatalf("%s: collected extra data", v.name) + } else if v.output != nil && len(v.output.Metrics) > 0 { + for i := range v.output.Metrics { + for k, m := range v.output.Metrics[i].Tags { + if acc.Metrics[i].Tags[k] != m { + t.Fatalf("%s: tag %s metrics unmatch Expected %s, got %s\n", v.name, k, m, acc.Metrics[i].Tags[k]) + } + } + for k, m := range v.output.Metrics[i].Fields { + if acc.Metrics[i].Fields[k] != m { + t.Fatalf("%s: field %s metrics unmatch Expected %v(%T), got %v(%T)\n", v.name, k, m, m, acc.Metrics[i].Fields[k], acc.Metrics[i].Fields[k]) + } + } + } + } + } +} diff --git a/plugins/inputs/kube_inventory/pod.go b/plugins/inputs/kube_inventory/pod.go new file mode 100644 index 000000000..7b5207616 --- /dev/null +++ b/plugins/inputs/kube_inventory/pod.go @@ -0,0 +1,87 @@ +package kube_inventory + +import ( + "context" + + "github.com/ericchiang/k8s/apis/core/v1" + + "github.com/influxdata/telegraf" +) + +func collectPods(ctx context.Context, acc telegraf.Accumulator, ki *KubernetesInventory) { + list, err := ki.client.getPods(ctx) + if err != nil { + acc.AddError(err) + return + } + for _, p := range list.Items { + if err = ki.gatherPod(*p, acc); err != nil { + acc.AddError(err) + return + } + } +} + +func (ki *KubernetesInventory) gatherPod(p v1.Pod, acc telegraf.Accumulator) error { + if p.Metadata.CreationTimestamp.GetSeconds() == 0 && p.Metadata.CreationTimestamp.GetNanos() == 0 { + return nil + } + + for i, cs := range p.Status.ContainerStatuses { + c := p.Spec.Containers[i] + gatherPodContainer(*p.Spec.NodeName, p, *cs, *c, acc) + } + + return nil +} + +func gatherPodContainer(nodeName string, p v1.Pod, cs v1.ContainerStatus, c v1.Container, acc telegraf.Accumulator) { + stateCode := 3 + state := "unknown" + switch { + case cs.State.Running != nil: + stateCode = 0 + state = "running" + case cs.State.Terminated != nil: + stateCode = 1 + state = "terminated" + case cs.State.Waiting != nil: + stateCode = 2 + state = "waiting" + } + + fields := map[string]interface{}{ + "restarts_total": cs.GetRestartCount(), + "state_code": stateCode, + "terminated_reason": cs.State.Terminated.GetReason(), + } + tags := map[string]string{ + "container_name": *c.Name, + "namespace": *p.Metadata.Namespace, + "node_name": *p.Spec.NodeName, + "pod_name": *p.Metadata.Name, + "state": state, + } + + req := c.Resources.Requests + lim := c.Resources.Limits + + for resourceName, val := range req { + switch resourceName { + case "cpu": + fields["resource_requests_millicpu_units"] = convertQuantity(val.GetString_(), 1000) + case "memory": + fields["resource_requests_memory_bytes"] = convertQuantity(val.GetString_(), 1) + } + } + for resourceName, val := range lim { + switch resourceName { + case "cpu": + fields["resource_limits_millicpu_units"] = convertQuantity(val.GetString_(), 1000) + case "memory": + fields["resource_limits_memory_bytes"] = convertQuantity(val.GetString_(), 1) + } + } + + acc.AddFields(podContainerMeasurement, fields, tags) +} diff --git a/plugins/inputs/kube_inventory/pod_test.go b/plugins/inputs/kube_inventory/pod_test.go new file mode 100644 index 000000000..50b093880 --- /dev/null +++ b/plugins/inputs/kube_inventory/pod_test.go @@ -0,0 +1,199 @@ +package kube_inventory + +import ( + "testing" + "time" + + "github.com/ericchiang/k8s/apis/core/v1" + metav1 "github.com/ericchiang/k8s/apis/meta/v1" + "github.com/ericchiang/k8s/apis/resource" + "github.com/influxdata/telegraf/testutil" +) + +func TestPod(t *testing.T) { + cli := &client{} + now := time.Now() + started := time.Date(now.Year(), now.Month(), now.Day(), now.Hour()-1, 1, 36, 0, now.Location()) + created := time.Date(now.Year(), now.Month(), now.Day(), now.Hour()-2, 1, 36, 0, now.Location()) + cond1 := time.Date(now.Year(), 7, 5, 7, 53, 29, 0, now.Location()) + cond2 := time.Date(now.Year(), 7, 5, 7, 53, 31, 0, now.Location()) + + tests := []struct { + name string + handler *mockHandler + output *testutil.Accumulator + hasError bool + }{ + { + name: "no pods", + handler: &mockHandler{ + responseMap: map[string]interface{}{ + "/pods/": &v1.PodList{}, + }, + }, + hasError: false, + }, + { + name: "collect pods", + handler: &mockHandler{ + responseMap: map[string]interface{}{ + "/pods/": &v1.PodList{ + Items: []*v1.Pod{ + { + Spec: &v1.PodSpec{ + NodeName: toStrPtr("node1"), + Containers: []*v1.Container{ + { + Name: toStrPtr("forwarder"), + Image: toStrPtr("image1"), + Ports: []*v1.ContainerPort{ + { + ContainerPort: toInt32Ptr(8080), + Protocol: toStrPtr("TCP"), + }, + }, + Resources: &v1.ResourceRequirements{ + Limits: map[string]*resource.Quantity{ + "cpu": {String_: toStrPtr("100m")}, + }, + Requests: map[string]*resource.Quantity{ + "cpu": {String_: toStrPtr("100m")}, + }, + }, + }, + }, + Volumes: []*v1.Volume{ + { + Name: toStrPtr("vol1"), + VolumeSource: &v1.VolumeSource{ + PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ + ClaimName: toStrPtr("pc1"), + ReadOnly: toBoolPtr(true), + }, + }, + }, + { + Name: toStrPtr("vol2"), + }, + }, + }, + Status: &v1.PodStatus{ + Phase: toStrPtr("Running"), + HostIP: toStrPtr("180.12.10.18"), + PodIP: toStrPtr("10.244.2.15"), + StartTime: &metav1.Time{Seconds: toInt64Ptr(started.Unix())}, + Conditions: []*v1.PodCondition{ + { + Type: toStrPtr("Initialized"), + Status: toStrPtr("True"), + LastTransitionTime: &metav1.Time{Seconds: toInt64Ptr(cond1.Unix())}, + }, + { + Type: toStrPtr("Ready"), + Status: toStrPtr("True"), + LastTransitionTime: &metav1.Time{Seconds: toInt64Ptr(cond2.Unix())}, + }, + { + Type: toStrPtr("Scheduled"), + Status: toStrPtr("True"), + LastTransitionTime: &metav1.Time{Seconds: toInt64Ptr(cond1.Unix())}, + }, + }, + ContainerStatuses: []*v1.ContainerStatus{ + { + Name: toStrPtr("forwarder"), + State: &v1.ContainerState{ + Running: &v1.ContainerStateRunning{ + StartedAt: &metav1.Time{Seconds: toInt64Ptr(cond2.Unix())}, + }, + }, + Ready: toBoolPtr(true), + RestartCount: toInt32Ptr(3), + Image: toStrPtr("image1"), + ImageID: toStrPtr("image_id1"), + ContainerID: toStrPtr("docker://54abe32d0094479d3d"), + }, + }, + }, + Metadata: &metav1.ObjectMeta{ + OwnerReferences: []*metav1.OwnerReference{ + { + ApiVersion: toStrPtr("apps/v1"), + Kind: toStrPtr("DaemonSet"), + Name: toStrPtr("forwarder"), + Controller: toBoolPtr(true), + }, + }, + Generation: toInt64Ptr(11232), + Namespace: toStrPtr("ns1"), + Name: toStrPtr("pod1"), + Labels: map[string]string{ + "lab1": "v1", + "lab2": "v2", + }, + CreationTimestamp: &metav1.Time{Seconds: toInt64Ptr(created.Unix())}, + }, + }, + }, + }, + }, + }, + output: &testutil.Accumulator{ + Metrics: []*testutil.Metric{ + { + Measurement: podContainerMeasurement, + Fields: map[string]interface{}{ + "restarts_total": int32(3), + "state_code": 0, + "resource_requests_millicpu_units": int64(100), + "resource_limits_millicpu_units": int64(100), + }, + Tags: map[string]string{ + "namespace": "ns1", + "container_name": "forwarder", + "node_name": "node1", + "pod_name": "pod1", + "state": "running", + }, + }, + }, + }, + hasError: false, + }, + } + for _, v := range tests { + ks := &KubernetesInventory{ + client: cli, + } + acc := new(testutil.Accumulator) + for _, pod := range ((v.handler.responseMap["/pods/"]).(*v1.PodList)).Items { + err := ks.gatherPod(*pod, acc) + if err != nil { + t.Errorf("Failed to gather pod - %s", err.Error()) + } + } + + err := acc.FirstError() + if err == nil && v.hasError { + t.Fatalf("%s failed, should have error", v.name) + } else if err != nil && !v.hasError { + t.Fatalf("%s failed, err: %v", v.name, err) + } + if v.output == nil && len(acc.Metrics) > 0 { + t.Fatalf("%s: collected extra data", v.name) + } else if v.output != nil && len(v.output.Metrics) > 0 { + for i := range v.output.Metrics { + for k, m := range v.output.Metrics[i].Tags { + if acc.Metrics[i].Tags[k] != m { + t.Fatalf("%s: tag %s metrics unmatch Expected %s, got %s, i %d\n", v.name, k, m, acc.Metrics[i].Tags[k], i) + } + } + for k, m := range v.output.Metrics[i].Fields { + if acc.Metrics[i].Fields[k] != m { + t.Fatalf("%s: field %s metrics unmatch Expected %v(%T), got %v(%T), i %d\n", v.name, k, m, m, acc.Metrics[i].Fields[k], acc.Metrics[i].Fields[k], i) + } + } + } + } + } +} diff --git a/plugins/inputs/kube_inventory/statefulset.go b/plugins/inputs/kube_inventory/statefulset.go new file mode 100644 index 000000000..407aaac2f --- /dev/null +++ b/plugins/inputs/kube_inventory/statefulset.go @@ -0,0 +1,46 @@ +package kube_inventory + +import ( + "context" + "time" + + "github.com/ericchiang/k8s/apis/apps/v1beta1" + + "github.com/influxdata/telegraf" +) + +func collectStatefulSets(ctx context.Context, acc telegraf.Accumulator, ki *KubernetesInventory) { + list, err := ki.client.getStatefulSets(ctx) + if err != nil { + acc.AddError(err) + return + } + for _, s := range list.Items { + if err = ki.gatherStatefulSet(*s, acc); err != nil { + acc.AddError(err) + return + } + } +} + +func (ki *KubernetesInventory) gatherStatefulSet(s v1beta1.StatefulSet, acc telegraf.Accumulator) error { + status := s.Status + fields := map[string]interface{}{ + "created": time.Unix(s.Metadata.CreationTimestamp.GetSeconds(), int64(s.Metadata.CreationTimestamp.GetNanos())).UnixNano(), + "generation": *s.Metadata.Generation, + "replicas": *status.Replicas, + "replicas_current": *status.CurrentReplicas, + "replicas_ready": *status.ReadyReplicas, + "replicas_updated": *status.UpdatedReplicas, + "spec_replicas": *s.Spec.Replicas, + "observed_generation": *s.Status.ObservedGeneration, + } + tags := map[string]string{ + "statefulset_name": *s.Metadata.Name, + "namespace": *s.Metadata.Namespace, + } + + acc.AddFields(statefulSetMeasurement, fields, tags) + + return nil +} diff --git a/plugins/inputs/kube_inventory/statefulset_test.go b/plugins/inputs/kube_inventory/statefulset_test.go new file mode 100644 index 000000000..6e94ad150 --- /dev/null +++ b/plugins/inputs/kube_inventory/statefulset_test.go @@ -0,0 +1,123 @@ +package kube_inventory + +import ( + "testing" + "time" + + "github.com/ericchiang/k8s/apis/apps/v1beta1" + metav1 "github.com/ericchiang/k8s/apis/meta/v1" + + "github.com/influxdata/telegraf/testutil" +) + +func TestStatefulSet(t *testing.T) { + cli := &client{} + now := time.Now() + now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 1, 36, 0, now.Location()) + tests := []struct { + name string + handler *mockHandler + output *testutil.Accumulator + hasError bool + }{ + { + name: "no statefulsets", + handler: &mockHandler{ + responseMap: map[string]interface{}{ + "/statefulsets/": &v1beta1.StatefulSetList{}, + }, + }, + hasError: false, + }, + { + name: "collect statefulsets", + handler: &mockHandler{ + responseMap: map[string]interface{}{ + "/statefulsets/": &v1beta1.StatefulSetList{ + Items: []*v1beta1.StatefulSet{ + { + Status: &v1beta1.StatefulSetStatus{ + Replicas: toInt32Ptr(2), + CurrentReplicas: toInt32Ptr(4), + ReadyReplicas: toInt32Ptr(1), + UpdatedReplicas: toInt32Ptr(3), + ObservedGeneration: toInt64Ptr(119), + }, + Spec: &v1beta1.StatefulSetSpec{ + Replicas: toInt32Ptr(3), + }, + Metadata: &metav1.ObjectMeta{ + Generation: toInt64Ptr(332), + Namespace: toStrPtr("ns1"), + Name: toStrPtr("sts1"), + Labels: map[string]string{ + "lab1": "v1", + "lab2": "v2", + }, + CreationTimestamp: &metav1.Time{Seconds: toInt64Ptr(now.Unix())}, + }, + }, + }, + }, + }, + }, + output: &testutil.Accumulator{ + Metrics: []*testutil.Metric{ + { + Fields: map[string]interface{}{ + "generation": int64(332), + "observed_generation": int64(119), + "created": now.UnixNano(), + "spec_replicas": int32(3), + "replicas": int32(2), + "replicas_current": int32(4), + "replicas_ready": int32(1), + "replicas_updated": int32(3), + }, + Tags: map[string]string{ + "namespace": "ns1", + "statefulset_name": "sts1", + }, + }, + }, + }, + hasError: false, + }, + } + + for _, v := range tests { + ks := &KubernetesInventory{ + client: cli, + } + acc := new(testutil.Accumulator) + for _, ss := range ((v.handler.responseMap["/statefulsets/"]).(*v1beta1.StatefulSetList)).Items { + err := ks.gatherStatefulSet(*ss, acc) + if err != nil { + t.Errorf("Failed to gather ss - %s", err.Error()) + } + } + + err := acc.FirstError() + if err == nil && v.hasError { + t.Fatalf("%s failed, should have error", v.name) + } else if err != nil && !v.hasError { + t.Fatalf("%s failed, err: %v", v.name, err) + } + if v.output == nil && len(acc.Metrics) > 0 { + t.Fatalf("%s: collected extra data", v.name) + } else if v.output != nil && len(v.output.Metrics) > 0 { + for i := range v.output.Metrics { + for k, m := range v.output.Metrics[i].Tags { + if acc.Metrics[i].Tags[k] != m { + t.Fatalf("%s: tag %s metrics unmatch Expected %s, got %s\n", v.name, k, m, acc.Metrics[i].Tags[k]) + } + } + for k, m := range v.output.Metrics[i].Fields { + if acc.Metrics[i].Fields[k] != m { + t.Fatalf("%s: field %s metrics unmatch Expected %v(%T), got %v(%T)\n", v.name, k, m, m, acc.Metrics[i].Fields[k], acc.Metrics[i].Fields[k]) + } + } + } + } + } +} From 96f99a7a790099760c02bd028a03cf5a07b991b9 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 4 Feb 2019 12:38:45 -0800 Subject: [PATCH 0567/1815] Update changelog --- CHANGELOG.md | 1 + README.md | 1 + docs/LICENSE_OF_DEPENDENCIES.md | 2 ++ 3 files changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index b26d54936..752898499 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,7 @@ #### New Inputs - [cloud_pubsub](/plugins/inputs/cloud_pubsub/README.md) - Contributed by @emilymye +- [kube_inventory](/plugins/inputs/kube_inventory/README.md) - Contributed by @influxdata - [neptune_apex](/plugins/inputs/neptune_apex/README.md) - Contributed by @MaxRenaud - [nginx_upstream_check](/plugins/inputs/nginx_upstream_check/README.md) - Contributed by @dmitryilyin - [multifile](/plugins/inputs/multifile/README.md) - Contributed by @martin2250 diff --git a/README.md b/README.md index cf994dc20..9ffc7d66b 100644 --- a/README.md +++ b/README.md @@ -194,6 +194,7 @@ For documentation on the latest development code see the [documentation index][d * [kernel_vmstat](./plugins/inputs/kernel_vmstat) * [kibana](./plugins/inputs/kibana) * [kubernetes](./plugins/inputs/kubernetes) +* [kube_inventory](./plugins/inputs/kube_inventory) * [leofs](./plugins/inputs/leofs) * [linux_sysctl_fs](./plugins/inputs/linux_sysctl_fs) * [logparser](./plugins/inputs/logparser) diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index 427f54474..3f7fab663 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -63,6 +63,7 @@ following works: - github.com/kardianos/service [zlib License](https://github.com/kardianos/service/blob/master/LICENSE) - github.com/kballard/go-shellquote [MIT License](https://github.com/kballard/go-shellquote/blob/master/LICENSE) - github.com/kr/logfmt [MIT License](https://github.com/kr/logfmt/blob/master/Readme) +- github.com/kubernetes/apimachinery [Apache License 2.0](https://github.com/kubernetes/apimachinery/blob/master/LICENSE) - github.com/leodido/ragel-machinery [MIT License](https://github.com/leodido/ragel-machinery/blob/develop/LICENSE) - github.com/mailru/easyjson [MIT License](https://github.com/mailru/easyjson/blob/master/LICENSE) - github.com/matttproud/golang_protobuf_extensions [Apache License 2.0](https://github.com/matttproud/golang_protobuf_extensions/blob/master/LICENSE) @@ -127,6 +128,7 @@ following works: - gopkg.in/fatih/pool.v2 [MIT License](https://github.com/fatih/pool/blob/v2.0.0/LICENSE) - gopkg.in/fsnotify.v1 [BSD 3-Clause "New" or "Revised" License](https://github.com/fsnotify/fsnotify/blob/v1.4.7/LICENSE) - gopkg.in/gorethink/gorethink.v3 [Apache License 2.0](https://github.com/rethinkdb/rethinkdb-go/blob/v3.0.5/LICENSE) +- gopkg.in/inf.v0 [BSD 3-Clause "New" or "Revised" License](https://github.com/go-inf/inf/blob/v0.9.1/LICENSE) - gopkg.in/ldap.v2 [MIT License](https://github.com/go-ldap/ldap/blob/v2.5.1/LICENSE) - gopkg.in/mgo.v2 [BSD 2-Clause "Simplified" License](https://github.com/go-mgo/mgo/blob/v2/LICENSE) - gopkg.in/olivere/elastic.v5 [MIT License](https://github.com/olivere/elastic/blob/v5.0.76/LICENSE) From c579a6bdb9e6f02fb0e387472a7377f228cef342 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 4 Feb 2019 13:50:13 -0800 Subject: [PATCH 0568/1815] Add linux mipsle packages (#5234) --- scripts/build.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/scripts/build.py b/scripts/build.py index efe9910ec..58b684f82 100755 --- a/scripts/build.py +++ b/scripts/build.py @@ -85,7 +85,7 @@ targets = { supported_builds = { "windows": [ "amd64", "i386" ], - "linux": [ "amd64", "i386", "armhf", "armel", "arm64", "static_amd64", "s390x"], + "linux": [ "amd64", "i386", "armhf", "armel", "arm64", "static_amd64", "s390x", "mipsel"], "freebsd": [ "amd64", "i386" ] } @@ -455,6 +455,8 @@ def build(version=None, goarch = "arm64" elif "arm" in arch: goarch = "arm" + elif arch == "mipsel": + goarch = "mipsle" build_command += "GOOS={} GOARCH={} ".format(platform, goarch) if "arm" in arch: @@ -569,6 +571,8 @@ def package(build_output, pkg_name, version, nightly=False, iteration=1, static= shutil.copy(fr, to) for package_type in supported_packages[platform]: + if package_type == "rpm" and arch == "mipsel": + continue # Package the directory structure for each package type for the platform logging.debug("Packaging directory '{}' as '{}'.".format(build_root, package_type)) name = pkg_name From 2f2ababbdb2432e221cc0897baed27ec0e883215 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 4 Feb 2019 13:52:00 -0800 Subject: [PATCH 0569/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 752898499..0ef8264f2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -38,6 +38,7 @@ - [#5295](https://github.com/influxdata/telegraf/pull/5295): Support passing bearer token directly in k8s input. - [#5294](https://github.com/influxdata/telegraf/pull/5294): Support passing bearer token directly in prometheus input. - [#5292](https://github.com/influxdata/telegraf/pull/5292): Add option to report input timestamp in prometheus output. +- [#5234](https://github.com/influxdata/telegraf/pull/5234): Add Linux mipsle packages. #### Bugfixes From cce160322e4221a9f8ba43d3aef62254bd77d349 Mon Sep 17 00:00:00 2001 From: Ami Blonder Date: Tue, 5 Feb 2019 21:04:51 +0200 Subject: [PATCH 0570/1815] Replace subscription with topic in the pubsub output plugin (#5378) --- plugins/outputs/cloud_pubsub/README.md | 10 +++++----- plugins/outputs/cloud_pubsub/pubsub.go | 6 +++--- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/plugins/outputs/cloud_pubsub/README.md b/plugins/outputs/cloud_pubsub/README.md index 5c345de4b..873f3c9b3 100644 --- a/plugins/outputs/cloud_pubsub/README.md +++ b/plugins/outputs/cloud_pubsub/README.md @@ -7,16 +7,16 @@ as one of the supported [output data formats][]. ### Configuration This section contains the default TOML to configure the plugin. You can -generate it using `telegraf --usage pubsub`. +generate it using `telegraf --usage cloud_pubsub`. ```toml -[[inputs.pubsub]] +[[outputs.cloud_pubsub]] ## Required. Name of Google Cloud Platform (GCP) Project that owns - ## the given PubSub subscription. + ## the given PubSub topic. project = "my-project" - ## Required. Name of PubSub subscription to ingest metrics from. - subscription = "my-subscription" + ## Required. Name of PubSub topic to publish metrics to. + topic = "my-topic" ## Required. Data format to consume. ## Each data format has its own unique set of configuration options. diff --git a/plugins/outputs/cloud_pubsub/pubsub.go b/plugins/outputs/cloud_pubsub/pubsub.go index 0edaec617..ee1611d3f 100644 --- a/plugins/outputs/cloud_pubsub/pubsub.go +++ b/plugins/outputs/cloud_pubsub/pubsub.go @@ -16,11 +16,11 @@ import ( const sampleConfig = ` ## Required. Name of Google Cloud Platform (GCP) Project that owns - ## the given PubSub subscription. + ## the given PubSub topic. project = "my-project" - ## Required. Name of PubSub subscription to ingest metrics from. - subscription = "my-subscription" + ## Required. Name of PubSub topic to publish metrics to. + topic = "my-topic" ## Required. Data format to consume. ## Each data format has its own unique set of configuration options. From 2c9fde451bc80437579df0de3fdb20518b739402 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 5 Feb 2019 11:32:20 -0800 Subject: [PATCH 0571/1815] Set 1.9.4 release date --- CHANGELOG.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0ef8264f2..31092b004 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -46,14 +46,14 @@ - [#5320](https://github.com/influxdata/telegraf/pull/5320): Use datacenter option spelling in consul input. - [#5316](https://github.com/influxdata/telegraf/pull/5316): Remove auth from /ping route in influxdb_listener. -## v1.9.4 [unreleased] +## v1.9.4 [2019-02-05] #### Bugfixes - [#5334](https://github.com/influxdata/telegraf/issues/5334): Fix skip_rows and skip_columns options in csv parser. - [#5181](https://github.com/influxdata/telegraf/issues/5181): Always send basic auth in jenkins input. - [#5346](https://github.com/influxdata/telegraf/pull/5346): Build official packages with Go 1.11.5. -- [#5368](https://github.com/influxdata/telegraf/issues/5368): Cannot define multiple syslog plugins. +- [#5368](https://github.com/influxdata/telegraf/issues/5368): Fix definition of multiple syslog plugins. ## v1.9.3 [2019-01-22] From 7dffb1b5f91f0aceb5a102ccf9c648a11afc3085 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 5 Feb 2019 13:09:56 -0800 Subject: [PATCH 0572/1815] Update to gopsutil v2.19.01 (#5380) --- Gopkg.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Gopkg.lock b/Gopkg.lock index 93834b78a..cb8b48cc8 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -973,7 +973,7 @@ version = "v1.2.0" [[projects]] - digest = "1:02135a4151567d48ebff6cf36f73b5d8dee566855df84ffd96111d5225848bb7" + digest = "1:066c1020d667e25a0614b56aee1f9ac47e75c77de98eddfb51e9be02c68c1577" name = "github.com/shirou/gopsutil" packages = [ "cpu", @@ -986,8 +986,8 @@ "process", ] pruneopts = "" - revision = "ccc1c1016bc5d10e803189ee43417c50cdde7f1b" - version = "v2.18.12" + revision = "071446942108a03a13cf0717275ad3bdbcb691b4" + version = "v2.19.01" [[projects]] branch = "master" From 1a81e49d05abc6ff5b3d45c5335f512e6156ebb3 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 5 Feb 2019 15:15:58 -0800 Subject: [PATCH 0573/1815] Return error loading config on non-200 response --- internal/config/config.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/internal/config/config.go b/internal/config/config.go index a24781949..504d8501c 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -772,6 +772,11 @@ func fetchConfig(u *url.URL) ([]byte, error) { if err != nil { return nil, err } + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("failed to retrieve remote config: %s", resp.Status) + } + defer resp.Body.Close() return ioutil.ReadAll(resp.Body) } From 1137fa50ad1f038a87b137e02ff878db62d15b3a Mon Sep 17 00:00:00 2001 From: Greg <2653109+glinton@users.noreply.github.com> Date: Tue, 5 Feb 2019 18:38:04 -0700 Subject: [PATCH 0574/1815] Continue checking cert from other sources if error occurs (#5381) --- plugins/inputs/x509_cert/x509_cert.go | 2 +- plugins/inputs/x509_cert/x509_cert_test.go | 9 +++++---- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/plugins/inputs/x509_cert/x509_cert.go b/plugins/inputs/x509_cert/x509_cert.go index ba4708ea2..81bcb0d2c 100644 --- a/plugins/inputs/x509_cert/x509_cert.go +++ b/plugins/inputs/x509_cert/x509_cert.go @@ -166,7 +166,7 @@ func (c *X509Cert) Gather(acc telegraf.Accumulator) error { for _, location := range c.Sources { certs, err := c.getCert(location, c.Timeout.Duration*time.Second) if err != nil { - return fmt.Errorf("cannot get SSL cert '%s': %s", location, err.Error()) + acc.AddError(fmt.Errorf("cannot get SSL cert '%s': %s", location, err.Error())) } for _, cert := range certs { diff --git a/plugins/inputs/x509_cert/x509_cert_test.go b/plugins/inputs/x509_cert/x509_cert_test.go index a9bd80568..933676417 100644 --- a/plugins/inputs/x509_cert/x509_cert_test.go +++ b/plugins/inputs/x509_cert/x509_cert_test.go @@ -4,13 +4,14 @@ import ( "crypto/tls" "encoding/base64" "fmt" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" "io/ioutil" "os" "testing" "time" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/testutil" @@ -115,7 +116,7 @@ func TestGatherRemote(t *testing.T) { acc := testutil.Accumulator{} err = sc.Gather(&acc) - if err != nil { + if len(acc.Errors) > 0 { testErr = true } @@ -173,7 +174,7 @@ func TestGatherLocal(t *testing.T) { acc := testutil.Accumulator{} err = sc.Gather(&acc) - if err != nil { + if len(acc.Errors) > 0 { error = true } From 7887e154465b1adfee9b4ab121eb4ef7c74755ae Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 5 Feb 2019 17:39:50 -0800 Subject: [PATCH 0575/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 31092b004..483071510 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -45,6 +45,7 @@ - [#4610](https://github.com/influxdata/telegraf/pull/4610): Fix initscript removes pidfile of restarted Telegraf process. - [#5320](https://github.com/influxdata/telegraf/pull/5320): Use datacenter option spelling in consul input. - [#5316](https://github.com/influxdata/telegraf/pull/5316): Remove auth from /ping route in influxdb_listener. +- [#5304](https://github.com/influxdata/telegraf/issues/5304): Fix x509_cert input stops checking certs after first error. ## v1.9.4 [2019-02-05] From 10ac030502563d078458722cc605d0c9c5fba49a Mon Sep 17 00:00:00 2001 From: Greg <2653109+glinton@users.noreply.github.com> Date: Tue, 5 Feb 2019 18:43:35 -0700 Subject: [PATCH 0576/1815] Unify time parsing in json/csv parsers (#5382) --- internal/internal.go | 56 +++++++++++++++++++++++- plugins/parsers/csv/parser.go | 17 +------- plugins/parsers/json/parser.go | 79 +++------------------------------- 3 files changed, 63 insertions(+), 89 deletions(-) diff --git a/internal/internal.go b/internal/internal.go index a0a3ec0ec..368bc8bcf 100644 --- a/internal/internal.go +++ b/internal/internal.go @@ -7,20 +7,22 @@ import ( "context" "crypto/rand" "errors" + "fmt" "io" "log" + "math" "math/big" "os" "os/exec" + "regexp" + "runtime" "strconv" "strings" "syscall" "time" "unicode" - "fmt" "github.com/alecthomas/units" - "runtime" ) const alphanum string = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" @@ -330,3 +332,53 @@ func CompressWithGzip(data io.Reader) (io.Reader, error) { return pipeReader, err } + +// ParseTimestamp parses a timestamp value as a unix epoch of various precision. +// +// format = "unix": epoch is assumed to be in seconds and can come as number or string. Can have a decimal part. +// format = "unix_ms": epoch is assumed to be in milliseconds and can come as number or string. Cannot have a decimal part. +// format = "unix_us": epoch is assumed to be in microseconds and can come as number or string. Cannot have a decimal part. +// format = "unix_ns": epoch is assumed to be in nanoseconds and can come as number or string. Cannot have a decimal part. +func ParseTimestamp(timestamp interface{}, format string) (time.Time, error) { + timeInt, timeFractional := int64(0), int64(0) + timeEpochStr, ok := timestamp.(string) + var err error + + if !ok { + timeEpochFloat, ok := timestamp.(float64) + if !ok { + return time.Time{}, fmt.Errorf("time: %v could not be converted to string nor float64", timestamp) + } + intPart, frac := math.Modf(timeEpochFloat) + timeInt, timeFractional = int64(intPart), int64(frac*1e9) + } else { + splitted := regexp.MustCompile("[.,]").Split(timeEpochStr, 2) + timeInt, err = strconv.ParseInt(splitted[0], 10, 64) + if err != nil { + return time.Parse(format, timeEpochStr) + } + + if len(splitted) == 2 { + if len(splitted[1]) > 9 { + splitted[1] = splitted[1][:9] //truncates decimal part to nanoseconds precision + } + nanosecStr := splitted[1] + strings.Repeat("0", 9-len(splitted[1])) //adds 0's to the right to obtain a valid number of nanoseconds + + timeFractional, err = strconv.ParseInt(nanosecStr, 10, 64) + if err != nil { + return time.Time{}, err + } + } + } + if strings.EqualFold(format, "unix") { + return time.Unix(timeInt, timeFractional).UTC(), nil + } else if strings.EqualFold(format, "unix_ms") { + return time.Unix(timeInt/1000, (timeInt%1000)*1e6).UTC(), nil + } else if strings.EqualFold(format, "unix_us") { + return time.Unix(0, timeInt*1e3).UTC(), nil + } else if strings.EqualFold(format, "unix_ns") { + return time.Unix(0, timeInt).UTC(), nil + } else { + return time.Time{}, errors.New("Invalid unix format") + } +} diff --git a/plugins/parsers/csv/parser.go b/plugins/parsers/csv/parser.go index 9401c1dd1..5f4fcc640 100644 --- a/plugins/parsers/csv/parser.go +++ b/plugins/parsers/csv/parser.go @@ -9,6 +9,7 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/metric" ) @@ -239,22 +240,8 @@ func parseTimestamp(timeFunc func() time.Time, recordFields map[string]interface case "": err = fmt.Errorf("timestamp format must be specified") return - case "unix": - var unixTime int64 - unixTime, err = strconv.ParseInt(tStr, 10, 64) - if err != nil { - return - } - metricTime = time.Unix(unixTime, 0) - case "unix_ms": - var unixTime int64 - unixTime, err = strconv.ParseInt(tStr, 10, 64) - if err != nil { - return - } - metricTime = time.Unix(unixTime/1000, (unixTime%1000)*1e6) default: - metricTime, err = time.Parse(timestampFormat, tStr) + metricTime, err = internal.ParseTimestamp(tStr, timestampFormat) if err != nil { return } diff --git a/plugins/parsers/json/parser.go b/plugins/parsers/json/parser.go index b3bb9a488..2f939a84f 100644 --- a/plugins/parsers/json/parser.go +++ b/plugins/parsers/json/parser.go @@ -5,16 +5,15 @@ import ( "encoding/json" "fmt" "log" - "math" - "regexp" "strconv" "strings" "time" - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/metric" - "github.com/pkg/errors" "github.com/tidwall/gjson" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/metric" ) var ( @@ -50,55 +49,6 @@ func (p *JSONParser) parseArray(buf []byte) ([]telegraf.Metric, error) { return metrics, nil } -// format = "unix": epoch is assumed to be in seconds and can come as number or string. Can have a decimal part. -// format = "unix_ms": epoch is assumed to be in milliseconds and can come as number or string. Cannot have a decimal part. -// format = "unix_us": epoch is assumed to be in microseconds and can come as number or string. Cannot have a decimal part. -// format = "unix_ns": epoch is assumed to be in nanoseconds and can come as number or string. Cannot have a decimal part. -func parseUnixTimestamp(jsonValue interface{}, format string) (time.Time, error) { - timeInt, timeFractional := int64(0), int64(0) - timeEpochStr, ok := jsonValue.(string) - var err error - - if !ok { - timeEpochFloat, ok := jsonValue.(float64) - if !ok { - err := fmt.Errorf("time: %v could not be converted to string nor float64", jsonValue) - return time.Time{}, err - } - intPart, frac := math.Modf(timeEpochFloat) - timeInt, timeFractional = int64(intPart), int64(frac*1e9) - } else { - splitted := regexp.MustCompile("[.,]").Split(timeEpochStr, 2) - timeInt, err = strconv.ParseInt(splitted[0], 10, 64) - if err != nil { - return time.Time{}, err - } - - if len(splitted) == 2 { - if len(splitted[1]) > 9 { - splitted[1] = splitted[1][:9] //truncates decimal part to nanoseconds precision - } - nanosecStr := splitted[1] + strings.Repeat("0", 9-len(splitted[1])) //adds 0's to the right to obtain a valid number of nanoseconds - - timeFractional, err = strconv.ParseInt(nanosecStr, 10, 64) - if err != nil { - return time.Time{}, err - } - } - } - if strings.EqualFold(format, "unix") { - return time.Unix(timeInt, timeFractional).UTC(), nil - } else if strings.EqualFold(format, "unix_ms") { - return time.Unix(timeInt/1000, (timeInt%1000)*1e6).UTC(), nil - } else if strings.EqualFold(format, "unix_us") { - return time.Unix(0, timeInt*1e3).UTC(), nil - } else if strings.EqualFold(format, "unix_ns") { - return time.Unix(0, timeInt).UTC(), nil - } else { - return time.Time{}, errors.New("Invalid unix format") - } -} - func (p *JSONParser) parseObject(metrics []telegraf.Metric, jsonOut map[string]interface{}) ([]telegraf.Metric, error) { tags := make(map[string]string) for k, v := range p.DefaultTags { @@ -132,24 +82,9 @@ func (p *JSONParser) parseObject(metrics []telegraf.Metric, jsonOut map[string]i return nil, err } - if strings.EqualFold(p.JSONTimeFormat, "unix") || - strings.EqualFold(p.JSONTimeFormat, "unix_ms") || - strings.EqualFold(p.JSONTimeFormat, "unix_us") || - strings.EqualFold(p.JSONTimeFormat, "unix_ns") { - nTime, err = parseUnixTimestamp(f.Fields[p.JSONTimeKey], p.JSONTimeFormat) - if err != nil { - return nil, err - } - } else { - timeStr, ok := f.Fields[p.JSONTimeKey].(string) - if !ok { - err := fmt.Errorf("time: %v could not be converted to string", f.Fields[p.JSONTimeKey]) - return nil, err - } - nTime, err = time.Parse(p.JSONTimeFormat, timeStr) - if err != nil { - return nil, err - } + nTime, err = internal.ParseTimestamp(f.Fields[p.JSONTimeKey], p.JSONTimeFormat) + if err != nil { + return nil, err } delete(f.Fields, p.JSONTimeKey) From f54da4d748b8ca1aa1467ee063a33d42a492e346 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 5 Feb 2019 17:45:44 -0800 Subject: [PATCH 0577/1815] Update changelog/csv parser docs --- CHANGELOG.md | 1 + plugins/parsers/csv/README.md | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 483071510..c7ecfdaec 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -39,6 +39,7 @@ - [#5294](https://github.com/influxdata/telegraf/pull/5294): Support passing bearer token directly in prometheus input. - [#5292](https://github.com/influxdata/telegraf/pull/5292): Add option to report input timestamp in prometheus output. - [#5234](https://github.com/influxdata/telegraf/pull/5234): Add Linux mipsle packages. +- [#5382](https://github.com/influxdata/telegraf/pull/5382): Support unix_us and unix_ns timestamp format in csv parser. #### Bugfixes diff --git a/plugins/parsers/csv/README.md b/plugins/parsers/csv/README.md index 488846b5e..ec1ffa1ca 100644 --- a/plugins/parsers/csv/README.md +++ b/plugins/parsers/csv/README.md @@ -74,9 +74,9 @@ time using the JSON document you can use the `csv_timestamp_column` and document. The `csv_timestamp_column` option specifies the key containing the time value and -`csv_timestamp_format` must be set to `unix`, `unix_ms`, or a format string in -using the Go "reference time" which is defined to be the **specific time**: -`Mon Jan 2 15:04:05 MST 2006`. +`csv_timestamp_format` must be set to `unix`, `unix_ms`, `unix_us`, `unix_ns`, +or a format string in using the Go "reference time" which is defined to be the +**specific time**: `Mon Jan 2 15:04:05 MST 2006`. Consult the Go [time][time parse] package for details and additional examples on how to set the time format. From d4ab5da34f6094de2cec1946af8f4a4fc22ec4ba Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 5 Feb 2019 18:22:42 -0800 Subject: [PATCH 0578/1815] Skip string fields in stackdriver output (#5384) --- plugins/outputs/stackdriver/README.md | 5 +++++ plugins/outputs/stackdriver/stackdriver.go | 17 ++++++++--------- 2 files changed, 13 insertions(+), 9 deletions(-) diff --git a/plugins/outputs/stackdriver/README.md b/plugins/outputs/stackdriver/README.md index ead8a0a6e..ce3eb626e 100644 --- a/plugins/outputs/stackdriver/README.md +++ b/plugins/outputs/stackdriver/README.md @@ -17,3 +17,8 @@ Metrics are grouped by the `namespace` variable and metric key - eg: `custom.goo # The namespace for the metric descriptor namespace = "telegraf" ``` + +### Restrictions + +Stackdriver does not support string values in custom metrics, any string +fields will not be written. diff --git a/plugins/outputs/stackdriver/stackdriver.go b/plugins/outputs/stackdriver/stackdriver.go index 0b2403358..a1cafdd98 100644 --- a/plugins/outputs/stackdriver/stackdriver.go +++ b/plugins/outputs/stackdriver/stackdriver.go @@ -6,12 +6,10 @@ import ( "log" "path" + monitoring "cloud.google.com/go/monitoring/apiv3" // Imports the Stackdriver Monitoring client package. + googlepb "github.com/golang/protobuf/ptypes/timestamp" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/outputs" - - // Imports the Stackdriver Monitoring client package. - monitoring "cloud.google.com/go/monitoring/apiv3" - googlepb "github.com/golang/protobuf/ptypes/timestamp" metricpb "google.golang.org/genproto/googleapis/api/metric" monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres" monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" @@ -86,6 +84,10 @@ func (s *Stackdriver) Write(metrics []telegraf.Metric) error { continue } + if value == nil { + continue + } + metricKind, err := getStackdriverMetricKind(m.Type()) if err != nil { log.Printf("E! [output.stackdriver] get metric failed: %s", err) @@ -222,11 +224,8 @@ func getStackdriverTypedValue(value interface{}) (*monitoringpb.TypedValue, erro }, }, nil case string: - return &monitoringpb.TypedValue{ - Value: &monitoringpb.TypedValue_StringValue{ - StringValue: string(v), - }, - }, nil + // String value types are not available for custom metrics + return nil, nil default: return nil, fmt.Errorf("value type \"%T\" not supported for stackdriver custom metrics", v) } From e65ab593b50c15eb0496b698e2b257ef766ddd8f Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 5 Feb 2019 18:24:02 -0800 Subject: [PATCH 0579/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index c7ecfdaec..2794ab22b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -47,6 +47,7 @@ - [#5320](https://github.com/influxdata/telegraf/pull/5320): Use datacenter option spelling in consul input. - [#5316](https://github.com/influxdata/telegraf/pull/5316): Remove auth from /ping route in influxdb_listener. - [#5304](https://github.com/influxdata/telegraf/issues/5304): Fix x509_cert input stops checking certs after first error. +- [#5315](https://github.com/influxdata/telegraf/issues/5315): Skip string fields when writing to stackdriver output. ## v1.9.4 [2019-02-05] From 7f54ae18b59241c246deeaf93a91ea5f20ae1fd8 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 6 Feb 2019 14:17:51 -0800 Subject: [PATCH 0580/1815] Send metrics in ascending time order in stackdriver output (#5385) --- plugins/outputs/stackdriver/README.md | 12 ++ plugins/outputs/stackdriver/stackdriver.go | 44 ++++-- .../outputs/stackdriver/stackdriver_test.go | 127 ++++++++++++++++++ 3 files changed, 175 insertions(+), 8 deletions(-) diff --git a/plugins/outputs/stackdriver/README.md b/plugins/outputs/stackdriver/README.md index ce3eb626e..a2d13e6e1 100644 --- a/plugins/outputs/stackdriver/README.md +++ b/plugins/outputs/stackdriver/README.md @@ -22,3 +22,15 @@ Metrics are grouped by the `namespace` variable and metric key - eg: `custom.goo Stackdriver does not support string values in custom metrics, any string fields will not be written. + +The Stackdriver API does not allow writing points which are out of order, +older than 24 hours, or more with resolution greater than than one per point +minute. Since Telegraf writes the newest points first and moves backwards +through the metric buffer, it may not be possible to write historical data +after an interruption. + +Points collected with greater than 1 minute precision may need to be +aggregated before then can be written. Consider using the [basicstats][] +aggregator to do this. + +[basicstats]: /plugins/aggregators/basicstats/README.md diff --git a/plugins/outputs/stackdriver/stackdriver.go b/plugins/outputs/stackdriver/stackdriver.go index a1cafdd98..a1c9e20da 100644 --- a/plugins/outputs/stackdriver/stackdriver.go +++ b/plugins/outputs/stackdriver/stackdriver.go @@ -5,6 +5,8 @@ import ( "fmt" "log" "path" + "sort" + "strings" monitoring "cloud.google.com/go/monitoring/apiv3" // Imports the Stackdriver Monitoring client package. googlepb "github.com/golang/protobuf/ptypes/timestamp" @@ -38,6 +40,10 @@ const ( StartTime = int64(1) // MaxInt is the max int64 value. MaxInt = int(^uint(0) >> 1) + + errStringPointsOutOfOrder = "One or more of the points specified had an older end time than the most recent point" + errStringPointsTooOld = "Data points cannot be written more than 24h in the past" + errStringPointsTooFrequent = "One or more points were written more frequently than the maximum sampling period configured for the metric" ) var sampleConfig = ` @@ -70,17 +76,33 @@ func (s *Stackdriver) Connect() error { return nil } +// Sorted returns a copy of the metrics in time ascending order. A copy is +// made to avoid modifying the input metric slice since doing so is not +// allowed. +func sorted(metrics []telegraf.Metric) []telegraf.Metric { + batch := make([]telegraf.Metric, 0, len(metrics)) + for i := len(metrics) - 1; i >= 0; i-- { + batch = append(batch, metrics[i]) + } + sort.Slice(batch, func(i, j int) bool { + return batch[i].Time().Before(batch[j].Time()) + }) + return batch +} + // Write the metrics to Google Cloud Stackdriver. func (s *Stackdriver) Write(metrics []telegraf.Metric) error { ctx := context.Background() - for _, m := range metrics { + batch := sorted(metrics) + + for _, m := range batch { timeSeries := []*monitoringpb.TimeSeries{} for _, f := range m.FieldList() { value, err := getStackdriverTypedValue(f.Value) if err != nil { - log.Printf("E! [output.stackdriver] get type failed: %s", err) + log.Printf("E! [outputs.stackdriver] get type failed: %s", err) continue } @@ -90,13 +112,13 @@ func (s *Stackdriver) Write(metrics []telegraf.Metric) error { metricKind, err := getStackdriverMetricKind(m.Type()) if err != nil { - log.Printf("E! [output.stackdriver] get metric failed: %s", err) + log.Printf("E! [outputs.stackdriver] get metric failed: %s", err) continue } timeInterval, err := getStackdriverTimeInterval(metricKind, StartTime, m.Time().Unix()) if err != nil { - log.Printf("E! [output.stackdriver] get time interval failed: %s", err) + log.Printf("E! [outputs.stackdriver] get time interval failed: %s", err) continue } @@ -139,7 +161,13 @@ func (s *Stackdriver) Write(metrics []telegraf.Metric) error { // Create the time series in Stackdriver. err := s.client.CreateTimeSeries(ctx, timeSeriesRequest) if err != nil { - log.Printf("E! [output.stackdriver] unable to write to Stackdriver: %s", err) + if strings.Contains(err.Error(), errStringPointsOutOfOrder) || + strings.Contains(err.Error(), errStringPointsTooOld) || + strings.Contains(err.Error(), errStringPointsTooFrequent) { + log.Printf("D! [outputs.stackdriver] unable to write to Stackdriver: %s", err) + return nil + } + log.Printf("E! [outputs.stackdriver] unable to write to Stackdriver: %s", err) return err } } @@ -239,7 +267,7 @@ func getStackdriverLabels(tags []*telegraf.Tag) map[string]string { for k, v := range labels { if len(k) > QuotaStringLengthForLabelKey { log.Printf( - "W! [output.stackdriver] removing tag [%s] key exceeds string length for label key [%d]", + "W! [outputs.stackdriver] removing tag [%s] key exceeds string length for label key [%d]", k, QuotaStringLengthForLabelKey, ) @@ -248,7 +276,7 @@ func getStackdriverLabels(tags []*telegraf.Tag) map[string]string { } if len(v) > QuotaStringLengthForLabelValue { log.Printf( - "W! [output.stackdriver] removing tag [%s] value exceeds string length for label value [%d]", + "W! [outputs.stackdriver] removing tag [%s] value exceeds string length for label value [%d]", k, QuotaStringLengthForLabelValue, ) @@ -259,7 +287,7 @@ func getStackdriverLabels(tags []*telegraf.Tag) map[string]string { if len(labels) > QuotaLabelsPerMetricDescriptor { excess := len(labels) - QuotaLabelsPerMetricDescriptor log.Printf( - "W! [output.stackdriver] tag count [%d] exceeds quota for stackdriver labels [%d] removing [%d] random tags", + "W! [outputs.stackdriver] tag count [%d] exceeds quota for stackdriver labels [%d] removing [%d] random tags", len(labels), QuotaLabelsPerMetricDescriptor, excess, diff --git a/plugins/outputs/stackdriver/stackdriver_test.go b/plugins/outputs/stackdriver/stackdriver_test.go index 94a3e6ce4..d9aab38fd 100644 --- a/plugins/outputs/stackdriver/stackdriver_test.go +++ b/plugins/outputs/stackdriver/stackdriver_test.go @@ -2,16 +2,19 @@ package stackdriver import ( "context" + "errors" "fmt" "log" "net" "os" "strings" "testing" + "time" monitoring "cloud.google.com/go/monitoring/apiv3" "github.com/golang/protobuf/proto" emptypb "github.com/golang/protobuf/ptypes/empty" + googlepb "github.com/golang/protobuf/ptypes/timestamp" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" @@ -96,6 +99,130 @@ func TestWrite(t *testing.T) { require.NoError(t, err) } +func TestWriteAscendingTime(t *testing.T) { + expectedResponse := &emptypb.Empty{} + mockMetric.err = nil + mockMetric.reqs = nil + mockMetric.resps = append(mockMetric.resps[:0], expectedResponse) + + c, err := monitoring.NewMetricClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + s := &Stackdriver{ + Project: fmt.Sprintf("projects/%s", "[PROJECT]"), + Namespace: "test", + client: c, + } + + // Metrics in descending order of timestamp + metrics := []telegraf.Metric{ + testutil.MustMetric("cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42, + }, + time.Unix(2, 0), + ), + testutil.MustMetric("cpu", + map[string]string{}, + map[string]interface{}{ + "value": 43, + }, + time.Unix(1, 0), + ), + } + + err = s.Connect() + require.NoError(t, err) + err = s.Write(metrics) + require.NoError(t, err) + + require.Len(t, mockMetric.reqs, 2) + request := mockMetric.reqs[0].(*monitoringpb.CreateTimeSeriesRequest) + require.Len(t, request.TimeSeries, 1) + ts := request.TimeSeries[0] + require.Len(t, ts.Points, 1) + require.Equal(t, ts.Points[0].Interval, &monitoringpb.TimeInterval{ + EndTime: &googlepb.Timestamp{ + Seconds: 1, + }, + }) + require.Equal(t, ts.Points[0].Value, &monitoringpb.TypedValue{ + Value: &monitoringpb.TypedValue_Int64Value{ + Int64Value: int64(43), + }, + }) + + request = mockMetric.reqs[1].(*monitoringpb.CreateTimeSeriesRequest) + require.Len(t, request.TimeSeries, 1) + ts = request.TimeSeries[0] + require.Len(t, ts.Points, 1) + require.Equal(t, ts.Points[0].Interval, &monitoringpb.TimeInterval{ + EndTime: &googlepb.Timestamp{ + Seconds: 2, + }, + }) + require.Equal(t, ts.Points[0].Value, &monitoringpb.TypedValue{ + Value: &monitoringpb.TypedValue_Int64Value{ + Int64Value: int64(42), + }, + }) +} + +func TestWriteIgnoredErrors(t *testing.T) { + tests := []struct { + name string + err error + expectedErr bool + }{ + { + name: "points too old", + err: errors.New(errStringPointsTooOld), + }, + { + name: "points out of order", + err: errors.New(errStringPointsOutOfOrder), + }, + { + name: "points too frequent", + err: errors.New(errStringPointsTooFrequent), + }, + { + name: "other errors reported", + err: errors.New("test"), + expectedErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + mockMetric.err = tt.err + mockMetric.reqs = nil + + c, err := monitoring.NewMetricClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + s := &Stackdriver{ + Project: fmt.Sprintf("projects/%s", "[PROJECT]"), + Namespace: "test", + client: c, + } + + err = s.Connect() + require.NoError(t, err) + err = s.Write(testutil.MockMetrics()) + if tt.expectedErr { + require.Error(t, err) + } else { + require.NoError(t, err) + } + }) + } +} + func TestGetStackdriverLabels(t *testing.T) { tags := []*telegraf.Tag{ {Key: "project", Value: "bar"}, From 2acfe16dd38f23ca2e49c1f1503db2cf0e2f6b9b Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 6 Feb 2019 14:22:07 -0800 Subject: [PATCH 0581/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2794ab22b..596eadeca 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -48,6 +48,7 @@ - [#5316](https://github.com/influxdata/telegraf/pull/5316): Remove auth from /ping route in influxdb_listener. - [#5304](https://github.com/influxdata/telegraf/issues/5304): Fix x509_cert input stops checking certs after first error. - [#5315](https://github.com/influxdata/telegraf/issues/5315): Skip string fields when writing to stackdriver output. +- [#5364](https://github.com/influxdata/telegraf/issues/5364): Send metrics in ascending time order in stackdriver output. ## v1.9.4 [2019-02-05] From 52bd698046d60c99ca781b5ead6d5b95fd19a7f1 Mon Sep 17 00:00:00 2001 From: Gunnar <628831+gunnaraasen@users.noreply.github.com> Date: Wed, 6 Feb 2019 16:17:11 -0800 Subject: [PATCH 0582/1815] Use Systemd for Amazon Linux 2 packages (#5387) --- scripts/post-install.sh | 5 ++++- scripts/post-remove.sh | 14 +++++++++----- 2 files changed, 13 insertions(+), 6 deletions(-) diff --git a/scripts/post-install.sh b/scripts/post-install.sh index 6c73fef8e..822a4e4de 100644 --- a/scripts/post-install.sh +++ b/scripts/post-install.sh @@ -88,7 +88,10 @@ elif [[ -f /etc/debian_version ]]; then fi elif [[ -f /etc/os-release ]]; then source /etc/os-release - if [[ $ID = "amzn" ]]; then + if [[ "$NAME" = "Amazon Linux" ]]; then + # Amazon Linux 2+ logic + install_systemd /usr/lib/systemd/system/telegraf.service + elif [[ "$NAME" = "Amazon Linux AMI" ]]; then # Amazon Linux logic install_init # Run update-rc.d or fallback to chkconfig if not available diff --git a/scripts/post-remove.sh b/scripts/post-remove.sh index b4b6f18fb..533a4fec1 100644 --- a/scripts/post-remove.sh +++ b/scripts/post-remove.sh @@ -48,11 +48,15 @@ elif [[ -f /etc/debian_version ]]; then fi elif [[ -f /etc/os-release ]]; then source /etc/os-release - if [[ $ID = "amzn" ]]; then - # Amazon Linux logic - if [[ "$1" = "0" ]]; then - # InfluxDB is no longer installed, remove from init system - rm -f /etc/default/telegraf + if [[ "$ID" = "amzn" ]] && [[ "$1" = "0" ]]; then + # InfluxDB is no longer installed, remove from init system + rm -f /etc/default/telegraf + + if [[ "$NAME" = "Amazon Linux" ]]; then + # Amazon Linux 2+ logic + disable_systemd /usr/lib/systemd/system/telegraf.service + elif [[ "$NAME" = "Amazon Linux AMI" ]]; then + # Amazon Linux logic disable_chkconfig fi fi From 3f9860a685d19648b55461a5b2b9f95a2c3086c4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Robert=20Edstr=C3=B6m?= <108799+Legogris@users.noreply.github.com> Date: Fri, 8 Feb 2019 21:13:33 +0100 Subject: [PATCH 0583/1815] Add resource type and resource label support to stackdriver output (#5391) --- plugins/outputs/stackdriver/README.md | 13 ++++++ plugins/outputs/stackdriver/stackdriver.go | 40 ++++++++++++++----- .../outputs/stackdriver/stackdriver_test.go | 36 +++++++++++++++++ 3 files changed, 79 insertions(+), 10 deletions(-) diff --git a/plugins/outputs/stackdriver/README.md b/plugins/outputs/stackdriver/README.md index a2d13e6e1..7c6e806bd 100644 --- a/plugins/outputs/stackdriver/README.md +++ b/plugins/outputs/stackdriver/README.md @@ -7,6 +7,10 @@ Requires `project` to specify where Stackdriver metrics will be delivered to. Metrics are grouped by the `namespace` variable and metric key - eg: `custom.googleapis.com/telegraf/system/load5` +[Resource type](https://cloud.google.com/monitoring/api/resources) is configured by the `resource_type` variable (default `global`). + +Additional resource labels can be configured by `resource_labels`. By default the required `project_id` label is always set to the `project` variable. + ### Configuration ```toml @@ -16,6 +20,15 @@ Metrics are grouped by the `namespace` variable and metric key - eg: `custom.goo # The namespace for the metric descriptor namespace = "telegraf" + + # Custom resource type + resource_type = "generic_node" + +# Additonal resource labels +[outputs.stackdriver.resource_labels] + node_id = "$HOSTNAME" + namespace = "myapp" + location = "eu-north0" ``` ### Restrictions diff --git a/plugins/outputs/stackdriver/stackdriver.go b/plugins/outputs/stackdriver/stackdriver.go index a1c9e20da..096f77ff3 100644 --- a/plugins/outputs/stackdriver/stackdriver.go +++ b/plugins/outputs/stackdriver/stackdriver.go @@ -19,8 +19,10 @@ import ( // Stackdriver is the Google Stackdriver config info. type Stackdriver struct { - Project string - Namespace string + Project string + Namespace string + ResourceType string `toml:"resource_type"` + ResourceLabels map[string]string `toml:"resource_labels"` client *monitoring.MetricClient } @@ -47,11 +49,21 @@ const ( ) var sampleConfig = ` - # GCP Project - project = "erudite-bloom-151019" + [[outputs.stackdriver]] + # GCP Project + project = "erudite-bloom-151019" - # The namespace for the metric descriptor - namespace = "telegraf" + # The namespace for the metric descriptor + namespace = "telegraf" + + # Custom resource type + resource_type = "generic_node" + + # Additonal resource labels + [outputs.stackdriver.resource_labels] + node_id = "$HOSTNAME" + namespace = "myapp" + location = "eu-north0" ` // Connect initiates the primary connection to the GCP project. @@ -64,6 +76,16 @@ func (s *Stackdriver) Connect() error { return fmt.Errorf("Namespace is a required field for stackdriver output") } + if s.ResourceType == "" { + s.ResourceType = "global" + } + + if s.ResourceLabels == nil { + s.ResourceLabels = make(map[string]string, 1) + } + + s.ResourceLabels["project_id"] = s.Project + if s.client == nil { ctx := context.Background() client, err := monitoring.NewMetricClient(ctx) @@ -137,10 +159,8 @@ func (s *Stackdriver) Write(metrics []telegraf.Metric) error { }, MetricKind: metricKind, Resource: &monitoredrespb.MonitoredResource{ - Type: "global", - Labels: map[string]string{ - "project_id": s.Project, - }, + Type: s.ResourceType, + Labels: s.ResourceLabels, }, Points: []*monitoringpb.Point{ dataPoint, diff --git a/plugins/outputs/stackdriver/stackdriver_test.go b/plugins/outputs/stackdriver/stackdriver_test.go index d9aab38fd..c60d72d36 100644 --- a/plugins/outputs/stackdriver/stackdriver_test.go +++ b/plugins/outputs/stackdriver/stackdriver_test.go @@ -97,6 +97,42 @@ func TestWrite(t *testing.T) { require.NoError(t, err) err = s.Write(testutil.MockMetrics()) require.NoError(t, err) + + request := mockMetric.reqs[0].(*monitoringpb.CreateTimeSeriesRequest) + require.Equal(t, request.TimeSeries[0].Resource.Type, "global") + require.Equal(t, request.TimeSeries[0].Resource.Labels["project_id"], "projects/[PROJECT]") +} + +func TestWriteResourceTypeAndLabels(t *testing.T) { + expectedResponse := &emptypb.Empty{} + mockMetric.err = nil + mockMetric.reqs = nil + mockMetric.resps = append(mockMetric.resps[:0], expectedResponse) + + c, err := monitoring.NewMetricClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + s := &Stackdriver{ + Project: fmt.Sprintf("projects/%s", "[PROJECT]"), + Namespace: "test", + ResourceType: "foo", + ResourceLabels: map[string]string{ + "mylabel": "myvalue", + }, + client: c, + } + + err = s.Connect() + require.NoError(t, err) + err = s.Write(testutil.MockMetrics()) + require.NoError(t, err) + + request := mockMetric.reqs[0].(*monitoringpb.CreateTimeSeriesRequest) + require.Equal(t, request.TimeSeries[0].Resource.Type, "foo") + require.Equal(t, request.TimeSeries[0].Resource.Labels["project_id"], "projects/[PROJECT]") + require.Equal(t, request.TimeSeries[0].Resource.Labels["mylabel"], "myvalue") } func TestWriteAscendingTime(t *testing.T) { From 516a5898fd22f3480e506714f1244c043d29bfd3 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 8 Feb 2019 12:16:48 -0800 Subject: [PATCH 0584/1815] Update changelog --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 596eadeca..2aab037ab 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -40,6 +40,7 @@ - [#5292](https://github.com/influxdata/telegraf/pull/5292): Add option to report input timestamp in prometheus output. - [#5234](https://github.com/influxdata/telegraf/pull/5234): Add Linux mipsle packages. - [#5382](https://github.com/influxdata/telegraf/pull/5382): Support unix_us and unix_ns timestamp format in csv parser. +- [#5391](https://github.com/influxdata/telegraf/pull/5391): Add resource type and resource label support to stackdriver output. #### Bugfixes @@ -49,6 +50,7 @@ - [#5304](https://github.com/influxdata/telegraf/issues/5304): Fix x509_cert input stops checking certs after first error. - [#5315](https://github.com/influxdata/telegraf/issues/5315): Skip string fields when writing to stackdriver output. - [#5364](https://github.com/influxdata/telegraf/issues/5364): Send metrics in ascending time order in stackdriver output. +- [#5117](https://github.com/influxdata/telegraf/issues/5117): Use systemd in Amazon Linux 2 rpm. ## v1.9.4 [2019-02-05] From 16a7ce39da4a5688cf9e5facdad297fa358cc2c3 Mon Sep 17 00:00:00 2001 From: Greg <2653109+glinton@users.noreply.github.com> Date: Mon, 11 Feb 2019 13:49:06 -0700 Subject: [PATCH 0585/1815] Add internal metric for line too long in influxdb_listener (#5396) --- plugins/inputs/influxdb_listener/http_listener.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/plugins/inputs/influxdb_listener/http_listener.go b/plugins/inputs/influxdb_listener/http_listener.go index 1de764bfd..2857ae9c9 100644 --- a/plugins/inputs/influxdb_listener/http_listener.go +++ b/plugins/inputs/influxdb_listener/http_listener.go @@ -72,6 +72,8 @@ type HTTPListener struct { NotFoundsServed selfstat.Stat BuffersCreated selfstat.Stat AuthFailures selfstat.Stat + + longLines selfstat.Stat } const sampleConfig = ` @@ -138,6 +140,7 @@ func (h *HTTPListener) Start(acc telegraf.Accumulator) error { h.NotFoundsServed = selfstat.Register("http_listener", "not_founds_served", tags) h.BuffersCreated = selfstat.Register("http_listener", "buffers_created", tags) h.AuthFailures = selfstat.Register("http_listener", "auth_failures", tags) + h.longLines = selfstat.Register("http_listener", "long_lines", tags) if h.MaxBodySize.Size == 0 { h.MaxBodySize.Size = DEFAULT_MAX_BODY_SIZE @@ -325,6 +328,7 @@ func (h *HTTPListener) serveWrite(res http.ResponseWriter, req *http.Request) { // final newline, then push the rest of the bytes into the next buffer. i := bytes.LastIndexByte(buf, '\n') if i == -1 { + h.longLines.Incr(1) // drop any line longer than the max buffer size log.Printf("D! http_listener received a single line longer than the maximum of %d bytes", len(buf)) From 412a7996e183c2668b8982306a084ea2bcacbaa8 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 11 Feb 2019 12:50:23 -0800 Subject: [PATCH 0586/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2aab037ab..a099422a2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -41,6 +41,7 @@ - [#5234](https://github.com/influxdata/telegraf/pull/5234): Add Linux mipsle packages. - [#5382](https://github.com/influxdata/telegraf/pull/5382): Support unix_us and unix_ns timestamp format in csv parser. - [#5391](https://github.com/influxdata/telegraf/pull/5391): Add resource type and resource label support to stackdriver output. +- [#5396](https://github.com/influxdata/telegraf/pull/5396): Add internal metric for line too long in influxdb_listener. #### Bugfixes From c612f707f4825a93ad8342e7e5c2311ab73e312e Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 11 Feb 2019 14:58:30 -0800 Subject: [PATCH 0587/1815] Comment out optional parameters to stackdriver output --- plugins/outputs/stackdriver/README.md | 18 ++++++++--------- plugins/outputs/stackdriver/stackdriver.go | 23 +++++++++++----------- 2 files changed, 20 insertions(+), 21 deletions(-) diff --git a/plugins/outputs/stackdriver/README.md b/plugins/outputs/stackdriver/README.md index 7c6e806bd..cdf0a1591 100644 --- a/plugins/outputs/stackdriver/README.md +++ b/plugins/outputs/stackdriver/README.md @@ -15,20 +15,20 @@ Additional resource labels can be configured by `resource_labels`. By default th ```toml [[outputs.stackdriver]] - # GCP Project + ## GCP Project project = "erudite-bloom-151019" - # The namespace for the metric descriptor + ## The namespace for the metric descriptor namespace = "telegraf" - # Custom resource type - resource_type = "generic_node" + ## Custom resource type + # resource_type = "generic_node" -# Additonal resource labels -[outputs.stackdriver.resource_labels] - node_id = "$HOSTNAME" - namespace = "myapp" - location = "eu-north0" + ## Additonal resource labels + # [outputs.stackdriver.resource_labels] + # node_id = "$HOSTNAME" + # namespace = "myapp" + # location = "eu-north0" ``` ### Restrictions diff --git a/plugins/outputs/stackdriver/stackdriver.go b/plugins/outputs/stackdriver/stackdriver.go index 096f77ff3..10823c8ed 100644 --- a/plugins/outputs/stackdriver/stackdriver.go +++ b/plugins/outputs/stackdriver/stackdriver.go @@ -49,21 +49,20 @@ const ( ) var sampleConfig = ` - [[outputs.stackdriver]] - # GCP Project - project = "erudite-bloom-151019" + ## GCP Project + project = "erudite-bloom-151019" - # The namespace for the metric descriptor - namespace = "telegraf" + ## The namespace for the metric descriptor + namespace = "telegraf" - # Custom resource type - resource_type = "generic_node" + ## Custom resource type + # resource_type = "generic_node" - # Additonal resource labels - [outputs.stackdriver.resource_labels] - node_id = "$HOSTNAME" - namespace = "myapp" - location = "eu-north0" + ## Additonal resource labels + # [outputs.stackdriver.resource_labels] + # node_id = "$HOSTNAME" + # namespace = "myapp" + # location = "eu-north0" ` // Connect initiates the primary connection to the GCP project. From c8832a28c4c4c9ef3411d62228012f2755645aed Mon Sep 17 00:00:00 2001 From: Phil Schwartz Date: Mon, 11 Feb 2019 19:10:42 -0600 Subject: [PATCH 0588/1815] Set deadlock priority in sqlserver input (#5301) --- plugins/inputs/sqlserver/sqlserver.go | 47 ++++++++++++++++++--------- 1 file changed, 31 insertions(+), 16 deletions(-) diff --git a/plugins/inputs/sqlserver/sqlserver.go b/plugins/inputs/sqlserver/sqlserver.go index 5f55a27a6..cb667e43f 100644 --- a/plugins/inputs/sqlserver/sqlserver.go +++ b/plugins/inputs/sqlserver/sqlserver.go @@ -244,7 +244,8 @@ func init() { // Thanks Bob Ward (http://aka.ms/bobwardms) // and the folks at Stack Overflow (https://github.com/opserver/Opserver/blob/9c89c7e9936b58ad237b30e6f4cc6cd59c406889/Opserver.Core/Data/SQL/SQLInstance.Memory.cs) // for putting most of the memory clerk definitions online! -const sqlMemoryClerkV2 = `DECLARE @SQL NVARCHAR(MAX) = 'SELECT +const sqlMemoryClerkV2 = `SET DEADLOCK_PRIORITY -10; +DECLARE @SQL NVARCHAR(MAX) = 'SELECT "sqlserver_memory_clerks" As [measurement], REPLACE(@@SERVERNAME,"\",":") AS [sql_instance], ISNULL(clerk_names.name,mc.type) AS clerk_type, @@ -348,7 +349,8 @@ ELSE EXEC(@SQL) ` -const sqlDatabaseIOV2 = `IF SERVERPROPERTY('EngineEdition') = 5 +const sqlDatabaseIOV2 = `SET DEADLOCK_PRIORITY -10; +IF SERVERPROPERTY('EngineEdition') = 5 BEGIN SELECT 'sqlserver_database_io' As [measurement], @@ -388,7 +390,8 @@ inner join sys.master_files b on b.database_id = vfs.database_id and b.file_id = END ` -const sqlServerPropertiesV2 = `DECLARE @sys_info TABLE ( +const sqlServerPropertiesV2 = `SET DEADLOCK_PRIORITY -10; +DECLARE @sys_info TABLE ( cpu_count INT, server_memory BIGINT, sku NVARCHAR(64), @@ -465,7 +468,7 @@ FROM ( OPTION( RECOMPILE ) ` -const sqlPerformanceCountersV2 string = ` +const sqlPerformanceCountersV2 string = `SET DEADLOCK_PRIORITY -10; DECLARE @PCounters TABLE ( object_name nvarchar(128), @@ -627,7 +630,8 @@ WHERE pc.counter_name NOT LIKE '% base' OPTION(RECOMPILE); ` -const sqlWaitStatsCategorizedV2 string = `SELECT +const sqlWaitStatsCategorizedV2 string = `SET DEADLOCK_PRIORITY -10; +SELECT 'sqlserver_waitstats' AS [measurement], REPLACE(@@SERVERNAME,'\',':') AS [sql_instance], ws.wait_type, @@ -1186,7 +1190,8 @@ AND wait_time_ms > 100 OPTION (RECOMPILE); ` -const sqlAzureDB string = `IF OBJECT_ID('sys.dm_db_resource_stats') IS NOT NULL +const sqlAzureDB string = `SET DEADLOCK_PRIORITY -10; +IF OBJECT_ID('sys.dm_db_resource_stats') IS NOT NULL BEGIN SELECT TOP(1) 'sqlserver_azurestats' AS [measurement], @@ -1213,7 +1218,8 @@ BEGIN END` // Queries V1 -const sqlPerformanceMetrics string = `SET NOCOUNT ON; +const sqlPerformanceMetrics string = `SET DEADLOCK_PRIORITY -10; +SET NOCOUNT ON; SET ARITHABORT ON; SET QUOTED_IDENTIFIER ON; SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED @@ -1306,7 +1312,8 @@ PIVOT(SUM(cntr_value) FOR counter_name IN (' + @ColumnName + ')) AS PVTTable EXEC sp_executesql @DynamicPivotQuery; ` -const sqlMemoryClerk string = `SET NOCOUNT ON; +const sqlMemoryClerk string = `SET DEADLOCK_PRIORITY -10; +SET NOCOUNT ON; SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; DECLARE @sqlVers numeric(4,2) @@ -1419,7 +1426,8 @@ PIVOT ) as T; ` -const sqlDatabaseSize string = `SET NOCOUNT ON; +const sqlDatabaseSize string = `SET DEADLOCK_PRIORITY -10; +SET NOCOUNT ON; SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED IF OBJECT_ID('tempdb..#baseline') IS NOT NULL @@ -1512,7 +1520,8 @@ PIVOT(SUM(database_max_size_8k_pages) FOR database_name IN (' + @ColumnName + ') EXEC sp_executesql @DynamicPivotQuery; ` -const sqlDatabaseStats string = `SET NOCOUNT ON; +const sqlDatabaseStats string = `SET DEADLOCK_PRIORITY -10; +SET NOCOUNT ON; SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; IF OBJECT_ID('tempdb..#baseline') IS NOT NULL @@ -1646,7 +1655,8 @@ PIVOT(SUM(AvgBytesPerWrite) FOR DatabaseName IN (' + @ColumnName + ')) AS PVTTab EXEC sp_executesql @DynamicPivotQuery; ` -const sqlDatabaseIO string = `SET NOCOUNT ON; +const sqlDatabaseIO string = `SET DEADLOCK_PRIORITY -10; +SET NOCOUNT ON; SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; DECLARE @secondsBetween tinyint = 5; DECLARE @delayInterval char(8) = CONVERT(Char(8), DATEADD(SECOND, @secondsBetween, '00:00:00'), 108); @@ -1783,7 +1793,8 @@ PIVOT(SUM(num_of_reads_persec) FOR database_name IN (' + @ColumnName + ')) AS PV EXEC sp_executesql @DynamicPivotQuery; ` -const sqlDatabaseProperties string = `SET NOCOUNT ON; +const sqlDatabaseProperties string = `SET DEADLOCK_PRIORITY -10; +SET NOCOUNT ON; SET ARITHABORT ON; SET QUOTED_IDENTIFIER ON; SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED @@ -1998,7 +2009,8 @@ PIVOT(SUM(Value) FOR DatabaseName IN (' + @ColumnName + ')) AS PVTTable EXEC sp_executesql @DynamicPivotQuery; ` -const sqlCPUHistory string = `SET NOCOUNT ON; +const sqlCPUHistory string = `SET DEADLOCK_PRIORITY -10; +SET NOCOUNT ON; SET ARITHABORT ON; SET QUOTED_IDENTIFIER ON; SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; @@ -2034,7 +2046,8 @@ ORDER BY timestamp_ms Desc ) as T; ` -const sqlPerformanceCounters string = `SET NOCOUNT ON; +const sqlPerformanceCounters string = `SET DEADLOCK_PRIORITY -10; +SET NOCOUNT ON; SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; IF OBJECT_ID('tempdb..#PCounters') IS NOT NULL DROP TABLE #PCounters CREATE TABLE #PCounters @@ -2133,7 +2146,8 @@ IF OBJECT_ID('tempdb..#CCounters') IS NOT NULL DROP TABLE #CCounters; IF OBJECT_ID('tempdb..#PCounters') IS NOT NULL DROP TABLE #PCounters; ` -const sqlWaitStatsCategorized string = `SET NOCOUNT ON; +const sqlWaitStatsCategorized string = `SET DEADLOCK_PRIORITY -10; +SET NOCOUNT ON; SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED DECLARE @secondsBetween tinyint = 5 DECLARE @delayInterval char(8) = CONVERT(Char(8), DATEADD(SECOND, @secondsBetween, '00:00:00'), 108); @@ -2538,7 +2552,8 @@ PIVOT ) as T; ` -const sqlVolumeSpace string = `SET NOCOUNT ON; +const sqlVolumeSpace string = `SET DEADLOCK_PRIORITY -10; +SET NOCOUNT ON; SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; IF OBJECT_ID('tempdb..#volumestats') IS NOT NULL From 0084138bc6b1789b63c731abefdbfb3507620868 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Kraszewski?= Date: Tue, 12 Feb 2019 02:22:31 +0100 Subject: [PATCH 0589/1815] Add option to set retain flag on messages in mqtt output (#4892) --- plugins/outputs/mqtt/README.md | 7 ++++++- plugins/outputs/mqtt/mqtt.go | 7 ++++++- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/plugins/outputs/mqtt/README.md b/plugins/outputs/mqtt/README.md index 14c166f9e..697418218 100644 --- a/plugins/outputs/mqtt/README.md +++ b/plugins/outputs/mqtt/README.md @@ -32,10 +32,14 @@ This plugin writes to a [MQTT Broker](http://http://mqtt.org/) acting as a mqtt # tls_key = "/etc/telegraf/key.pem" ## Use TLS but skip chain & host verification # insecure_skip_verify = false - + ## When true, metrics will be sent in one MQTT message per flush. Otherwise, ## metrics are written one metric per MQTT message. # batch = false + + ## When true, metric will have RETAIN flag set, making broker cache entries until someone + ## actually reads it + # retain = flase ## Data format to output. # data_format = "influx" @@ -56,4 +60,5 @@ This plugin writes to a [MQTT Broker](http://http://mqtt.org/) acting as a mqtt * `tls_cert`: TLS CERT * `tls_key`: TLS key * `insecure_skip_verify`: Use TLS but skip chain & host verification (default: false) +* `retain`: Set `retain` flag when publishing, instructing server to cache metric until someone reads it (default: false) * `data_format`: [About Telegraf data formats](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md) diff --git a/plugins/outputs/mqtt/mqtt.go b/plugins/outputs/mqtt/mqtt.go index 18e22daa6..55d9532db 100644 --- a/plugins/outputs/mqtt/mqtt.go +++ b/plugins/outputs/mqtt/mqtt.go @@ -49,6 +49,10 @@ var sampleConfig = ` ## When true, metrics will be sent in one MQTT message per flush. Otherwise, ## metrics are written one metric per MQTT message. # batch = false + + ## When true, metric will have RETAIN flag set, making broker cache entries until someone + ## actually reads it + # retain = flase ## Data format to output. ## Each data format has its own unique set of configuration options, read @@ -68,6 +72,7 @@ type MQTT struct { ClientID string `toml:"client_id"` tls.ClientConfig BatchMessage bool `toml:"batch"` + Retain bool client paho.Client opts *paho.ClientOptions @@ -174,7 +179,7 @@ func (m *MQTT) Write(metrics []telegraf.Metric) error { } func (m *MQTT) publish(topic string, body []byte) error { - token := m.client.Publish(topic, byte(m.QoS), false, body) + token := m.client.Publish(topic, byte(m.QoS), m.Retain, body) token.WaitTimeout(m.Timeout.Duration) if token.Error() != nil { return token.Error() From 6ac4fc8f06458fb35d387bafb7e2c62ac8de009c Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 11 Feb 2019 17:11:41 -0800 Subject: [PATCH 0590/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index a099422a2..bed8efbbd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -52,6 +52,7 @@ - [#5315](https://github.com/influxdata/telegraf/issues/5315): Skip string fields when writing to stackdriver output. - [#5364](https://github.com/influxdata/telegraf/issues/5364): Send metrics in ascending time order in stackdriver output. - [#5117](https://github.com/influxdata/telegraf/issues/5117): Use systemd in Amazon Linux 2 rpm. +- [#4988](https://github.com/influxdata/telegraf/issues/4988): Set deadlock priority in sqlserver input. ## v1.9.4 [2019-02-05] From c08b454af4a2b3816e73a09905cef771be517ae0 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 11 Feb 2019 17:25:25 -0800 Subject: [PATCH 0591/1815] Fix typos in mqtt output documentation --- plugins/outputs/mqtt/README.md | 11 +++++------ plugins/outputs/mqtt/mqtt.go | 9 ++++----- 2 files changed, 9 insertions(+), 11 deletions(-) diff --git a/plugins/outputs/mqtt/README.md b/plugins/outputs/mqtt/README.md index 697418218..38eec7c3b 100644 --- a/plugins/outputs/mqtt/README.md +++ b/plugins/outputs/mqtt/README.md @@ -32,14 +32,13 @@ This plugin writes to a [MQTT Broker](http://http://mqtt.org/) acting as a mqtt # tls_key = "/etc/telegraf/key.pem" ## Use TLS but skip chain & host verification # insecure_skip_verify = false - + ## When true, metrics will be sent in one MQTT message per flush. Otherwise, ## metrics are written one metric per MQTT message. # batch = false - - ## When true, metric will have RETAIN flag set, making broker cache entries until someone - ## actually reads it - # retain = flase + + ## When true, messages will have RETAIN flag set. + # retain = false ## Data format to output. # data_format = "influx" @@ -60,5 +59,5 @@ This plugin writes to a [MQTT Broker](http://http://mqtt.org/) acting as a mqtt * `tls_cert`: TLS CERT * `tls_key`: TLS key * `insecure_skip_verify`: Use TLS but skip chain & host verification (default: false) -* `retain`: Set `retain` flag when publishing, instructing server to cache metric until someone reads it (default: false) +* `retain`: Set `retain` flag when publishing * `data_format`: [About Telegraf data formats](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md) diff --git a/plugins/outputs/mqtt/mqtt.go b/plugins/outputs/mqtt/mqtt.go index 55d9532db..bacdd3b0e 100644 --- a/plugins/outputs/mqtt/mqtt.go +++ b/plugins/outputs/mqtt/mqtt.go @@ -6,13 +6,12 @@ import ( "sync" "time" + paho "github.com/eclipse/paho.mqtt.golang" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/internal/tls" "github.com/influxdata/telegraf/plugins/outputs" "github.com/influxdata/telegraf/plugins/serializers" - - paho "github.com/eclipse/paho.mqtt.golang" ) var sampleConfig = ` @@ -49,10 +48,10 @@ var sampleConfig = ` ## When true, metrics will be sent in one MQTT message per flush. Otherwise, ## metrics are written one metric per MQTT message. # batch = false - + ## When true, metric will have RETAIN flag set, making broker cache entries until someone ## actually reads it - # retain = flase + # retain = false ## Data format to output. ## Each data format has its own unique set of configuration options, read @@ -72,7 +71,7 @@ type MQTT struct { ClientID string `toml:"client_id"` tls.ClientConfig BatchMessage bool `toml:"batch"` - Retain bool + Retain bool `toml:"retain"` client paho.Client opts *paho.ClientOptions From 0df92dff0d01d182f3cda37a7f5ec220000d4635 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 11 Feb 2019 17:27:05 -0800 Subject: [PATCH 0592/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index bed8efbbd..5a6aa5dc9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -42,6 +42,7 @@ - [#5382](https://github.com/influxdata/telegraf/pull/5382): Support unix_us and unix_ns timestamp format in csv parser. - [#5391](https://github.com/influxdata/telegraf/pull/5391): Add resource type and resource label support to stackdriver output. - [#5396](https://github.com/influxdata/telegraf/pull/5396): Add internal metric for line too long in influxdb_listener. +- [#4892](https://github.com/influxdata/telegraf/pull/4892): Add option to set retain flag on messages to mqtt output. #### Bugfixes From ddf35ddaf32fb323fabb66a628579f00ce09b9e7 Mon Sep 17 00:00:00 2001 From: Greg <2653109+glinton@users.noreply.github.com> Date: Tue, 12 Feb 2019 12:36:22 -0700 Subject: [PATCH 0593/1815] Note how to use all namespaces in kube_inventory (#5416) --- plugins/inputs/kube_inventory/README.md | 4 ++-- plugins/inputs/kube_inventory/kube_state.go | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/plugins/inputs/kube_inventory/README.md b/plugins/inputs/kube_inventory/README.md index 9a71ec4a6..db13c86b9 100644 --- a/plugins/inputs/kube_inventory/README.md +++ b/plugins/inputs/kube_inventory/README.md @@ -29,7 +29,7 @@ avoid cardinality issues: ## URL for the Kubernetes API url = "https://127.0.0.1" - ## Namespace to use + ## Namespace to use. Set to "" to use all namespaces. # namespace = "default" ## Use bearer token for authorization. ('bearer_token' takes priority) @@ -216,7 +216,7 @@ The persistentvolumeclaim "phase" is saved in the `phase` tag with a correlated ``` kubernetes_configmap,configmap_name=envoy-config,namespace=default,resource_version=56593031 created=1544103867000000000i 1547597616000000000 -kubernetes_daemonset +kubernetes_daemonset,daemonset_name=telegraf,namespace=logging number_unavailable=0i,desired_number_scheduled=11i,number_available=11i,number_misscheduled=8i,number_ready=11i,updated_number_scheduled=11i,created=1527758699000000000i,generation=16i,current_number_scheduled=11i 1547597616000000000 kubernetes_deployment,deployment_name=deployd,namespace=default replicas_unavailable=0i,created=1544103082000000000i,replicas_available=1i 1547597616000000000 kubernetes_node,node_name=ip-172-17-0-2.internal allocatable_pods=110i,capacity_memory_bytes=128837533696,capacity_pods=110i,capacity_cpu_cores=16i,allocatable_cpu_cores=16i,allocatable_memory_bytes=128732676096 1547597616000000000 kubernetes_persistentvolume,phase=Released,pv_name=pvc-aaaaaaaa-bbbb-cccc-1111-222222222222,storageclass=ebs-1-retain phase_type=3i 1547597616000000000 diff --git a/plugins/inputs/kube_inventory/kube_state.go b/plugins/inputs/kube_inventory/kube_state.go index 705d0f65e..57d31908d 100644 --- a/plugins/inputs/kube_inventory/kube_state.go +++ b/plugins/inputs/kube_inventory/kube_state.go @@ -38,7 +38,7 @@ var sampleConfig = ` ## URL for the Kubernetes API url = "https://127.0.0.1" - ## Namespace to use + ## Namespace to use. Set to "" to use all namespaces. # namespace = "default" ## Use bearer token for authorization. ('bearer_token' takes priority) From c0bb8625dc64347fbd34a4d63c78ad0652236dbc Mon Sep 17 00:00:00 2001 From: Pontus Rydin Date: Tue, 12 Feb 2019 17:05:14 -0500 Subject: [PATCH 0594/1815] Add resource path based filtering to vsphere input (#5165) --- plugins/inputs/vsphere/README.md | 47 ++++ plugins/inputs/vsphere/client.go | 18 +- plugins/inputs/vsphere/endpoint.go | 106 ++++++--- plugins/inputs/vsphere/finder.go | 241 ++++++++++++++++++++ plugins/inputs/vsphere/tscache.go | 2 +- plugins/inputs/vsphere/vsphere.go | 45 ++-- plugins/inputs/vsphere/vsphere_test.go | 298 ++++++++++++++++++++----- 7 files changed, 641 insertions(+), 116 deletions(-) create mode 100644 plugins/inputs/vsphere/finder.go diff --git a/plugins/inputs/vsphere/README.md b/plugins/inputs/vsphere/README.md index 533c35257..e14de2cee 100644 --- a/plugins/inputs/vsphere/README.md +++ b/plugins/inputs/vsphere/README.md @@ -27,6 +27,7 @@ vm_metric_exclude = [ "*" ] ## VMs ## Typical VM metrics (if omitted or empty, all metrics are collected) + # vm_include = [ "/*/vm/**"] # Inventory path to VMs to collect (by default all are collected) vm_metric_include = [ "cpu.demand.average", "cpu.idle.summation", @@ -68,6 +69,7 @@ vm_metric_exclude = [ "*" ] ## Hosts ## Typical host metrics (if omitted or empty, all metrics are collected) + # host_include = [ "/*/host/**"] # Inventory path to hosts to collect (by default all are collected) host_metric_include = [ "cpu.coreUtilization.average", "cpu.costop.summation", @@ -120,16 +122,19 @@ vm_metric_exclude = [ "*" ] # host_instances = true ## true by default ## Clusters + # cluster_include = [ "/*/host/**"] # Inventory path to clusters to collect (by default all are collected) # cluster_metric_include = [] ## if omitted or empty, all metrics are collected # cluster_metric_exclude = [] ## Nothing excluded by default # cluster_instances = false ## false by default ## Datastores + # cluster_include = [ "/*/datastore/**"] # Inventory path to datastores to collect (by default all are collected) # datastore_metric_include = [] ## if omitted or empty, all metrics are collected # datastore_metric_exclude = [] ## Nothing excluded by default # datastore_instances = false ## false by default ## Datacenters + # datacenter_include = [ "/*/host/**"] # Inventory path to clusters to collect (by default all are collected) datacenter_metric_include = [] ## if omitted or empty, all metrics are collected datacenter_metric_exclude = [ "*" ] ## Datacenters are not collected by default. # datacenter_instances = false ## false by default @@ -196,6 +201,48 @@ For setting up concurrency, modify `collect_concurrency` and `discover_concurren # discover_concurrency = 1 ``` +### Inventory Paths +Resources to be monitored can be selected using Inventory Paths. This treats the vSphere inventory as a tree structure similar +to a file system. A vSphere inventory has a structure similar to this: + +``` + ++-DC0 # Virtual datacenter + +-datastore # Datastore folder (created by system) + | +-Datastore1 + +-host # Host folder (created by system) + | +-Cluster1 + | | +-Host1 + | | | +-VM1 + | | | +-VM2 + | | | +-hadoop1 + | +-Host2 # Dummy cluster created for non-clustered host + | | +-Host2 + | | | +-VM3 + | | | +-VM4 + +-vm # VM folder (created by system) + | +-VM1 + | +-VM2 + | +-Folder1 + | | +-hadoop1 + | | +-NestedFolder1 + | | | +-VM3 + | | | +-VM4 +``` + +#### Using Inventory Paths +Using familiar UNIX-style paths, one could select e.g. VM2 with the path ```/DC0/vm/VM2```. + +Often, we want to select a group of resource, such as all the VMs in a folder. We could use the path ```/DC0/vm/Folder1/*``` for that. + +Another possibility is to select objects using a partial name, such as ```/DC0/vm/Folder1/hadoop*``` yielding all vms in Folder1 with a name starting with "hadoop". + +Finally, due to the arbitrary nesting of the folder structure, we need a "recursive wildcard" for traversing multiple folders. We use the "**" symbol for that. If we want to look for a VM with a name starting with "hadoop" in any folder, we could use the following path: ```/DC0/vm/**/hadoop*``` + +#### Multiple paths to VMs +As we can see from the example tree above, VMs appear both in its on folder under the datacenter, as well as under the hosts. This is useful when you like to select VMs on a specific host. For example, ```/DC0/host/Cluster1/Host1/hadoop*``` selects all VMs with a name starting with "hadoop" that are running on Host1. + +We can extend this to looking at a cluster level: ```/DC0/host/Cluster1/*/hadoop*```. This selects any VM matching "hadoop*" on any host in Cluster1. ## Performance Considerations ### Realtime vs. historical metrics diff --git a/plugins/inputs/vsphere/client.go b/plugins/inputs/vsphere/client.go index 8b1c4866a..ca7af5843 100644 --- a/plugins/inputs/vsphere/client.go +++ b/plugins/inputs/vsphere/client.go @@ -74,7 +74,7 @@ func (cf *ClientFactory) GetClient(ctx context.Context) (*Client, error) { ctx1, cancel1 := context.WithTimeout(ctx, cf.parent.Timeout.Duration) defer cancel1() if _, err := methods.GetCurrentTime(ctx1, cf.client.Client); err != nil { - log.Printf("I! [input.vsphere]: Client session seems to have time out. Reauthenticating!") + log.Printf("I! [inputs.vsphere]: Client session seems to have time out. Reauthenticating!") ctx2, cancel2 := context.WithTimeout(ctx, cf.parent.Timeout.Duration) defer cancel2() if cf.client.Client.SessionManager.Login(ctx2, url.UserPassword(cf.parent.Username, cf.parent.Password)) != nil { @@ -102,7 +102,7 @@ func NewClient(ctx context.Context, u *url.URL, vs *VSphere) (*Client, error) { u.User = url.UserPassword(vs.Username, vs.Password) } - log.Printf("D! [input.vsphere]: Creating client: %s", u.Host) + log.Printf("D! [inputs.vsphere]: Creating client: %s", u.Host) soapClient := soap.NewClient(u, tlsCfg.InsecureSkipVerify) // Add certificate if we have it. Use it to log us in. @@ -173,9 +173,9 @@ func NewClient(ctx context.Context, u *url.URL, vs *VSphere) (*Client, error) { if err != nil { return nil, err } - log.Printf("D! [input.vsphere] vCenter says max_query_metrics should be %d", n) + log.Printf("D! [inputs.vsphere] vCenter says max_query_metrics should be %d", n) if n < vs.MaxQueryMetrics { - log.Printf("W! [input.vsphere] Configured max_query_metrics is %d, but server limits it to %d. Reducing.", vs.MaxQueryMetrics, n) + log.Printf("W! [inputs.vsphere] Configured max_query_metrics is %d, but server limits it to %d. Reducing.", vs.MaxQueryMetrics, n) vs.MaxQueryMetrics = n } return client, nil @@ -199,7 +199,7 @@ func (c *Client) close() { defer cancel() if c.Client != nil { if err := c.Client.Logout(ctx); err != nil { - log.Printf("E! [input.vsphere]: Error during logout: %s", err) + log.Printf("E! [inputs.vsphere]: Error during logout: %s", err) } } }) @@ -228,7 +228,7 @@ func (c *Client) GetMaxQueryMetrics(ctx context.Context) (int, error) { if s, ok := res[0].GetOptionValue().Value.(string); ok { v, err := strconv.Atoi(s) if err == nil { - log.Printf("D! [input.vsphere] vCenter maxQueryMetrics is defined: %d", v) + log.Printf("D! [inputs.vsphere] vCenter maxQueryMetrics is defined: %d", v) if v == -1 { // Whatever the server says, we never ask for more metrics than this. return absoluteMaxMetrics, nil @@ -239,17 +239,17 @@ func (c *Client) GetMaxQueryMetrics(ctx context.Context) (int, error) { // Fall through version-based inference if value isn't usable } } else { - log.Println("D! [input.vsphere] Option query for maxQueryMetrics failed. Using default") + log.Println("D! [inputs.vsphere] Option query for maxQueryMetrics failed. Using default") } // No usable maxQueryMetrics setting. Infer based on version ver := c.Client.Client.ServiceContent.About.Version parts := strings.Split(ver, ".") if len(parts) < 2 { - log.Printf("W! [input.vsphere] vCenter returned an invalid version string: %s. Using default query size=64", ver) + log.Printf("W! [inputs.vsphere] vCenter returned an invalid version string: %s. Using default query size=64", ver) return 64, nil } - log.Printf("D! [input.vsphere] vCenter version is: %s", ver) + log.Printf("D! [inputs.vsphere] vCenter version is: %s", ver) major, err := strconv.Atoi(parts[0]) if err != nil { return 0, err diff --git a/plugins/inputs/vsphere/endpoint.go b/plugins/inputs/vsphere/endpoint.go index 95040dd4f..192a4a487 100644 --- a/plugins/inputs/vsphere/endpoint.go +++ b/plugins/inputs/vsphere/endpoint.go @@ -19,7 +19,6 @@ import ( "github.com/influxdata/telegraf" "github.com/vmware/govmomi/object" "github.com/vmware/govmomi/performance" - "github.com/vmware/govmomi/view" "github.com/vmware/govmomi/vim25/mo" "github.com/vmware/govmomi/vim25/types" ) @@ -51,6 +50,7 @@ type Endpoint struct { type resourceKind struct { name string + vcName string pKey string parentTag string enabled bool @@ -58,12 +58,13 @@ type resourceKind struct { sampling int32 objects objectMap filters filter.Filter + paths []string + collectInstances bool + getObjects func(context.Context, *Endpoint, *ResourceFilter) (objectMap, error) include []string simple bool metrics performance.MetricList - collectInstances bool parent string - getObjects func(context.Context, *Client, *Endpoint, *view.ContainerView) (objectMap, error) latestSample time.Time lastColl time.Time } @@ -110,6 +111,7 @@ func NewEndpoint(ctx context.Context, parent *VSphere, url *url.URL) (*Endpoint, e.resourceKinds = map[string]*resourceKind{ "datacenter": { name: "datacenter", + vcName: "Datacenter", pKey: "dcname", parentTag: "", enabled: anythingEnabled(parent.DatacenterMetricExclude), @@ -117,6 +119,7 @@ func NewEndpoint(ctx context.Context, parent *VSphere, url *url.URL) (*Endpoint, sampling: 300, objects: make(objectMap), filters: newFilterOrPanic(parent.DatacenterMetricInclude, parent.DatacenterMetricExclude), + paths: parent.DatacenterInclude, simple: isSimple(parent.DatacenterMetricInclude, parent.DatacenterMetricExclude), include: parent.DatacenterMetricInclude, collectInstances: parent.DatacenterInstances, @@ -125,6 +128,7 @@ func NewEndpoint(ctx context.Context, parent *VSphere, url *url.URL) (*Endpoint, }, "cluster": { name: "cluster", + vcName: "ClusterComputeResource", pKey: "clustername", parentTag: "dcname", enabled: anythingEnabled(parent.ClusterMetricExclude), @@ -132,6 +136,7 @@ func NewEndpoint(ctx context.Context, parent *VSphere, url *url.URL) (*Endpoint, sampling: 300, objects: make(objectMap), filters: newFilterOrPanic(parent.ClusterMetricInclude, parent.ClusterMetricExclude), + paths: parent.ClusterInclude, simple: isSimple(parent.ClusterMetricInclude, parent.ClusterMetricExclude), include: parent.ClusterMetricInclude, collectInstances: parent.ClusterInstances, @@ -140,6 +145,7 @@ func NewEndpoint(ctx context.Context, parent *VSphere, url *url.URL) (*Endpoint, }, "host": { name: "host", + vcName: "HostSystem", pKey: "esxhostname", parentTag: "clustername", enabled: anythingEnabled(parent.HostMetricExclude), @@ -147,6 +153,7 @@ func NewEndpoint(ctx context.Context, parent *VSphere, url *url.URL) (*Endpoint, sampling: 20, objects: make(objectMap), filters: newFilterOrPanic(parent.HostMetricInclude, parent.HostMetricExclude), + paths: parent.HostInclude, simple: isSimple(parent.HostMetricInclude, parent.HostMetricExclude), include: parent.HostMetricInclude, collectInstances: parent.HostInstances, @@ -155,6 +162,7 @@ func NewEndpoint(ctx context.Context, parent *VSphere, url *url.URL) (*Endpoint, }, "vm": { name: "vm", + vcName: "VirtualMachine", pKey: "vmname", parentTag: "esxhostname", enabled: anythingEnabled(parent.VMMetricExclude), @@ -162,6 +170,7 @@ func NewEndpoint(ctx context.Context, parent *VSphere, url *url.URL) (*Endpoint, sampling: 20, objects: make(objectMap), filters: newFilterOrPanic(parent.VMMetricInclude, parent.VMMetricExclude), + paths: parent.VMInclude, simple: isSimple(parent.VMMetricInclude, parent.VMMetricExclude), include: parent.VMMetricInclude, collectInstances: parent.VMInstances, @@ -170,12 +179,14 @@ func NewEndpoint(ctx context.Context, parent *VSphere, url *url.URL) (*Endpoint, }, "datastore": { name: "datastore", + vcName: "Datastore", pKey: "dsname", enabled: anythingEnabled(parent.DatastoreMetricExclude), realTime: false, sampling: 300, objects: make(objectMap), filters: newFilterOrPanic(parent.DatastoreMetricInclude, parent.DatastoreMetricExclude), + paths: parent.DatastoreInclude, simple: isSimple(parent.DatastoreMetricInclude, parent.DatastoreMetricExclude), include: parent.DatastoreMetricInclude, collectInstances: parent.DatastoreInstances, @@ -227,10 +238,10 @@ func (e *Endpoint) startDiscovery(ctx context.Context) { case <-e.discoveryTicker.C: err := e.discover(ctx) if err != nil && err != context.Canceled { - log.Printf("E! [input.vsphere]: Error in discovery for %s: %v", e.URL.Host, err) + log.Printf("E! [inputs.vsphere]: Error in discovery for %s: %v", e.URL.Host, err) } case <-ctx.Done(): - log.Printf("D! [input.vsphere]: Exiting discovery goroutine for %s", e.URL.Host) + log.Printf("D! [inputs.vsphere]: Exiting discovery goroutine for %s", e.URL.Host) e.discoveryTicker.Stop() return } @@ -241,7 +252,7 @@ func (e *Endpoint) startDiscovery(ctx context.Context) { func (e *Endpoint) initalDiscovery(ctx context.Context) { err := e.discover(ctx) if err != nil && err != context.Canceled { - log.Printf("E! [input.vsphere]: Error in discovery for %s: %v", e.URL.Host, err) + log.Printf("E! [inputs.vsphere]: Error in discovery for %s: %v", e.URL.Host, err) } e.startDiscovery(ctx) } @@ -254,7 +265,7 @@ func (e *Endpoint) init(ctx context.Context) error { // goroutine without waiting for it. This will probably cause us to report an empty // dataset on the first collection, but it solves the issue of the first collection timing out. if e.Parent.ForceDiscoverOnInit { - log.Printf("D! [input.vsphere]: Running initial discovery and waiting for it to finish") + log.Printf("D! [inputs.vsphere]: Running initial discovery and waiting for it to finish") e.initalDiscovery(ctx) } else { // Otherwise, just run it in the background. We'll probably have an incomplete first metric @@ -317,7 +328,7 @@ func (e *Endpoint) getDatacenterName(ctx context.Context, client *Client, cache defer cancel1() err := o.Properties(ctx1, here, []string{"parent", "name"}, &result) if err != nil { - log.Printf("W! [input.vsphere]: Error while resolving parent. Assuming no parent exists. Error: %s", err) + log.Printf("W! [inputs.vsphere]: Error while resolving parent. Assuming no parent exists. Error: %s", err) break } if result.Reference().Type == "Datacenter" { @@ -326,7 +337,7 @@ func (e *Endpoint) getDatacenterName(ctx context.Context, client *Client, cache break } if result.Parent == nil { - log.Printf("D! [input.vsphere]: No parent found for %s (ascending from %s)", here.Reference(), r.Reference()) + log.Printf("D! [inputs.vsphere]: No parent found for %s (ascending from %s)", here.Reference(), r.Reference()) break } here = result.Parent.Reference() @@ -356,7 +367,7 @@ func (e *Endpoint) discover(ctx context.Context) error { return err } - log.Printf("D! [input.vsphere]: Discover new objects for %s", e.URL.Host) + log.Printf("D! [inputs.vsphere]: Discover new objects for %s", e.URL.Host) resourceKinds := make(map[string]resourceKind) dcNameCache := make(map[string]string) @@ -365,10 +376,17 @@ func (e *Endpoint) discover(ctx context.Context) error { // Populate resource objects, and endpoint instance info. newObjects := make(map[string]objectMap) for k, res := range e.resourceKinds { - log.Printf("D! [input.vsphere] Discovering resources for %s", res.name) + log.Printf("D! [inputs.vsphere] Discovering resources for %s", res.name) // Need to do this for all resource types even if they are not enabled if res.enabled || k != "vm" { - objects, err := res.getObjects(ctx, client, e, client.Root) + rf := ResourceFilter{ + finder: &Finder{client}, + resType: res.vcName, + paths: res.paths} + + ctx1, cancel1 := context.WithTimeout(ctx, e.Parent.Timeout.Duration) + defer cancel1() + objects, err := res.getObjects(ctx1, e, &rf) if err != nil { return err } @@ -424,10 +442,10 @@ func (e *Endpoint) discover(ctx context.Context) error { } func (e *Endpoint) simpleMetadataSelect(ctx context.Context, client *Client, res *resourceKind) { - log.Printf("D! [input.vsphere] Using fast metric metadata selection for %s", res.name) + log.Printf("D! [inputs.vsphere] Using fast metric metadata selection for %s", res.name) m, err := client.CounterInfoByName(ctx) if err != nil { - log.Printf("E! [input.vsphere]: Error while getting metric metadata. Discovery will be incomplete. Error: %s", err) + log.Printf("E! [inputs.vsphere]: Error while getting metric metadata. Discovery will be incomplete. Error: %s", err) return } res.metrics = make(performance.MetricList, 0, len(res.include)) @@ -443,7 +461,7 @@ func (e *Endpoint) simpleMetadataSelect(ctx context.Context, client *Client, res } res.metrics = append(res.metrics, cnt) } else { - log.Printf("W! [input.vsphere] Metric name %s is unknown. Will not be collected", s) + log.Printf("W! [inputs.vsphere] Metric name %s is unknown. Will not be collected", s) } } } @@ -476,7 +494,7 @@ func (e *Endpoint) complexMetadataSelect(ctx context.Context, res *resourceKind, te.Run(ctx, func() { metrics, err := e.getMetadata(ctx, obj, res.sampling) if err != nil { - log.Printf("E! [input.vsphere]: Error while getting metric metadata. Discovery will be incomplete. Error: %s", err) + log.Printf("E! [inputs.vsphere]: Error while getting metric metadata. Discovery will be incomplete. Error: %s", err) } mMap := make(map[string]types.PerfMetricId) for _, m := range metrics { @@ -489,7 +507,7 @@ func (e *Endpoint) complexMetadataSelect(ctx context.Context, res *resourceKind, mMap[strconv.Itoa(int(m.CounterId))+"|"+m.Instance] = m } } - log.Printf("D! [input.vsphere] Found %d metrics for %s", len(mMap), obj.name) + log.Printf("D! [inputs.vsphere] Found %d metrics for %s", len(mMap), obj.name) instInfoMux.Lock() defer instInfoMux.Unlock() if len(mMap) > len(res.metrics) { @@ -506,9 +524,11 @@ func (e *Endpoint) complexMetadataSelect(ctx context.Context, res *resourceKind, te.Wait() } -func getDatacenters(ctx context.Context, client *Client, e *Endpoint, root *view.ContainerView) (objectMap, error) { +func getDatacenters(ctx context.Context, e *Endpoint, filter *ResourceFilter) (objectMap, error) { var resources []mo.Datacenter - err := client.ListResources(ctx, root, []string{"Datacenter"}, []string{"name", "parent"}, &resources) + ctx1, cancel1 := context.WithTimeout(ctx, e.Parent.Timeout.Duration) + defer cancel1() + err := filter.FindAll(ctx1, &resources) if err != nil { return nil, err } @@ -520,9 +540,11 @@ func getDatacenters(ctx context.Context, client *Client, e *Endpoint, root *view return m, nil } -func getClusters(ctx context.Context, client *Client, e *Endpoint, root *view.ContainerView) (objectMap, error) { +func getClusters(ctx context.Context, e *Endpoint, filter *ResourceFilter) (objectMap, error) { var resources []mo.ClusterComputeResource - err := client.ListResources(ctx, root, []string{"ClusterComputeResource"}, []string{"name", "parent"}, &resources) + ctx1, cancel1 := context.WithTimeout(ctx, e.Parent.Timeout.Duration) + defer cancel1() + err := filter.FindAll(ctx1, &resources) if err != nil { return nil, err } @@ -532,13 +554,19 @@ func getClusters(ctx context.Context, client *Client, e *Endpoint, root *view.Co // We're not interested in the immediate parent (a folder), but the data center. p, ok := cache[r.Parent.Value] if !ok { - o := object.NewFolder(root.Client(), *r.Parent) - var folder mo.Folder ctx2, cancel2 := context.WithTimeout(ctx, e.Parent.Timeout.Duration) defer cancel2() - err := o.Properties(ctx2, *r.Parent, []string{"parent"}, &folder) + client, err := e.clientFactory.GetClient(ctx2) if err != nil { - log.Printf("W! [input.vsphere] Error while getting folder parent: %e", err) + return nil, err + } + o := object.NewFolder(client.Client.Client, *r.Parent) + var folder mo.Folder + ctx3, cancel3 := context.WithTimeout(ctx, e.Parent.Timeout.Duration) + defer cancel3() + err = o.Properties(ctx3, *r.Parent, []string{"parent"}, &folder) + if err != nil { + log.Printf("W! [inputs.vsphere] Error while getting folder parent: %e", err) p = nil } else { pp := folder.Parent.Reference() @@ -552,9 +580,9 @@ func getClusters(ctx context.Context, client *Client, e *Endpoint, root *view.Co return m, nil } -func getHosts(ctx context.Context, client *Client, e *Endpoint, root *view.ContainerView) (objectMap, error) { +func getHosts(ctx context.Context, e *Endpoint, filter *ResourceFilter) (objectMap, error) { var resources []mo.HostSystem - err := client.ListResources(ctx, root, []string{"HostSystem"}, []string{"name", "parent"}, &resources) + err := filter.FindAll(ctx, &resources) if err != nil { return nil, err } @@ -566,9 +594,11 @@ func getHosts(ctx context.Context, client *Client, e *Endpoint, root *view.Conta return m, nil } -func getVMs(ctx context.Context, client *Client, e *Endpoint, root *view.ContainerView) (objectMap, error) { +func getVMs(ctx context.Context, e *Endpoint, filter *ResourceFilter) (objectMap, error) { var resources []mo.VirtualMachine - err := client.ListResources(ctx, root, []string{"VirtualMachine"}, []string{"name", "runtime.host", "runtime.powerState", "config.guestId", "config.uuid"}, &resources) + ctx1, cancel1 := context.WithTimeout(ctx, e.Parent.Timeout.Duration) + defer cancel1() + err := filter.FindAll(ctx1, &resources) if err != nil { return nil, err } @@ -591,9 +621,11 @@ func getVMs(ctx context.Context, client *Client, e *Endpoint, root *view.Contain return m, nil } -func getDatastores(ctx context.Context, client *Client, e *Endpoint, root *view.ContainerView) (objectMap, error) { +func getDatastores(ctx context.Context, e *Endpoint, filter *ResourceFilter) (objectMap, error) { var resources []mo.Datastore - err := client.ListResources(ctx, root, []string{"Datastore"}, []string{"name", "parent", "info"}, &resources) + ctx1, cancel1 := context.WithTimeout(ctx, e.Parent.Timeout.Duration) + defer cancel1() + err := filter.FindAll(ctx1, &resources) if err != nil { return nil, err } @@ -710,6 +742,14 @@ func (e *Endpoint) chunkify(ctx context.Context, res *resourceKind, now time.Tim pq.StartTime = &start pq.EndTime = &now + // Make sure endtime is always after start time. We may occasionally see samples from the future + // returned from vCenter. This is presumably due to time drift between vCenter and EXSi nodes. + if pq.StartTime.After(*pq.EndTime) { + log.Printf("D! [inputs.vsphere] Future sample. Res: %s, StartTime: %s, EndTime: %s, Now: %s", pq.Entity, *pq.StartTime, *pq.EndTime, now) + end := start.Add(time.Second) + pq.EndTime = &end + } + pqs = append(pqs, pq) mr -= mc metrics += mc @@ -719,7 +759,7 @@ func (e *Endpoint) chunkify(ctx context.Context, res *resourceKind, now time.Tim // 2) We are at the last resource and have no more data to process. // 3) The query contains more than 100,000 individual metrics if mr > 0 || nRes >= e.Parent.MaxQueryObjects || len(pqs) > 100000 { - log.Printf("D! [input.vsphere]: Queueing query: %d objects, %d metrics (%d remaining) of type %s for %s. Processed objects: %d. Total objects %d", + log.Printf("D! [inputs.vsphere]: Queueing query: %d objects, %d metrics (%d remaining) of type %s for %s. Processed objects: %d. Total objects %d", len(pqs), metrics, mr, res.name, e.URL.Host, total+1, len(res.objects)) // Don't send work items if the context has been cancelled. @@ -740,7 +780,7 @@ func (e *Endpoint) chunkify(ctx context.Context, res *resourceKind, now time.Tim // Handle final partially filled chunk if len(pqs) > 0 { // Run collection job - log.Printf("D! [input.vsphere]: Queuing query: %d objects, %d metrics (0 remaining) of type %s for %s. Total objects %d (final chunk)", + log.Printf("D! [inputs.vsphere]: Queuing query: %d objects, %d metrics (0 remaining) of type %s for %s. Total objects %d (final chunk)", len(pqs), metrics, res.name, e.URL.Host, len(res.objects)) submitChunkJob(ctx, te, job, pqs) } diff --git a/plugins/inputs/vsphere/finder.go b/plugins/inputs/vsphere/finder.go new file mode 100644 index 000000000..372aa5e3b --- /dev/null +++ b/plugins/inputs/vsphere/finder.go @@ -0,0 +1,241 @@ +package vsphere + +import ( + "context" + "log" + "reflect" + "strings" + + "github.com/vmware/govmomi/property" + "github.com/vmware/govmomi/view" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/types" +) + +var childTypes map[string][]string + +var addFields map[string][]string + +// Finder allows callers to find resources in vCenter given a query string. +type Finder struct { + client *Client +} + +// ResourceFilter is a convenience class holding a finder and a set of paths. It is useful when you need a +// self contained object capable of returning a certain set of resources. +type ResourceFilter struct { + finder *Finder + resType string + paths []string +} + +type nameAndRef struct { + name string + ref types.ManagedObjectReference +} + +// FindAll returns the union of resources found given the supplied resource type and paths. +func (f *Finder) FindAll(ctx context.Context, resType string, paths []string, dst interface{}) error { + for _, p := range paths { + if err := f.Find(ctx, resType, p, dst); err != nil { + return err + } + } + return nil +} + +// Find returns the resources matching the specified path. +func (f *Finder) Find(ctx context.Context, resType, path string, dst interface{}) error { + p := strings.Split(path, "/") + flt := make([]property.Filter, len(p)-1) + for i := 1; i < len(p); i++ { + flt[i-1] = property.Filter{"name": p[i]} + } + objs := make(map[string]types.ObjectContent) + err := f.descend(ctx, f.client.Client.ServiceContent.RootFolder, resType, flt, 0, objs) + if err != nil { + return err + } + objectContentToTypedArray(objs, dst) + log.Printf("D! [inputs.vsphere] Find(%s, %s) returned %d objects", resType, path, len(objs)) + return nil +} + +func (f *Finder) descend(ctx context.Context, root types.ManagedObjectReference, resType string, + tokens []property.Filter, pos int, objs map[string]types.ObjectContent) error { + isLeaf := pos == len(tokens)-1 + + // No more tokens to match? + if pos >= len(tokens) { + return nil + } + + // Determine child types + + ct, ok := childTypes[root.Reference().Type] + if !ok { + // We don't know how to handle children of this type. Stop descending. + return nil + } + + m := view.NewManager(f.client.Client.Client) + defer m.Destroy(ctx) + v, err := m.CreateContainerView(ctx, root, ct, false) + if err != nil { + return err + } + defer v.Destroy(ctx) + var content []types.ObjectContent + + fields := []string{"name"} + if isLeaf { + // Special case: The last token is a recursive wildcard, so we can grab everything + // recursively in a single call. + if tokens[pos]["name"] == "**" { + v2, err := m.CreateContainerView(ctx, root, []string{resType}, true) + defer v2.Destroy(ctx) + if af, ok := addFields[resType]; ok { + fields = append(fields, af...) + } + err = v2.Retrieve(ctx, []string{resType}, fields, &content) + if err != nil { + return err + } + for _, c := range content { + objs[c.Obj.String()] = c + } + return nil + } + + if af, ok := addFields[resType]; ok { + fields = append(fields, af...) + } + err = v.Retrieve(ctx, []string{resType}, fields, &content) + if err != nil { + return err + } + } else { + err = v.Retrieve(ctx, ct, fields, &content) + if err != nil { + return err + } + } + + for _, c := range content { + if !tokens[pos].MatchPropertyList(c.PropSet[:1]) { + continue + } + + // Already been here through another path? Skip! + if _, ok := objs[root.Reference().String()]; ok { + continue + } + + if c.Obj.Type == resType && isLeaf { + // We found what we're looking for. Consider it a leaf and stop descending + objs[c.Obj.String()] = c + continue + } + + // Deal with recursive wildcards (**) + inc := 1 // Normally we advance one token. + if tokens[pos]["name"] == "**" { + if isLeaf { + inc = 0 // Can't advance past last token, so keep descending the tree + } else { + // Lookahead to next token. If it matches this child, we are out of + // the recursive wildcard handling and we can advance TWO tokens ahead, since + // the token that ended the recursive wildcard mode is now consumed. + if tokens[pos+1].MatchPropertyList(c.PropSet) { + if pos < len(tokens)-2 { + inc = 2 + } else { + // We found match and it's at a leaf! Grab it! + objs[c.Obj.String()] = c + continue + } + } else { + // We didn't break out of recursicve wildcard mode yet, so stay on this token. + inc = 0 + + } + } + } + err := f.descend(ctx, c.Obj, resType, tokens, pos+inc, objs) + if err != nil { + return err + } + } + return nil +} + +func nameFromObjectContent(o types.ObjectContent) string { + for _, p := range o.PropSet { + if p.Name == "name" { + return p.Val.(string) + } + } + return "" +} + +func objectContentToTypedArray(objs map[string]types.ObjectContent, dst interface{}) error { + rt := reflect.TypeOf(dst) + if rt == nil || rt.Kind() != reflect.Ptr { + panic("need pointer") + } + + rv := reflect.ValueOf(dst).Elem() + if !rv.CanSet() { + panic("cannot set dst") + } + for _, p := range objs { + v, err := mo.ObjectContentToType(p) + if err != nil { + return err + } + + vt := reflect.TypeOf(v) + + if !rv.Type().AssignableTo(vt) { + // For example: dst is []ManagedEntity, res is []HostSystem + if field, ok := vt.FieldByName(rt.Elem().Elem().Name()); ok && field.Anonymous { + rv.Set(reflect.Append(rv, reflect.ValueOf(v).FieldByIndex(field.Index))) + continue + } + } + + rv.Set(reflect.Append(rv, reflect.ValueOf(v))) + } + return nil +} + +// FindAll finds all resources matching the paths that were specified upon creation of +// the ResourceFilter. +func (r *ResourceFilter) FindAll(ctx context.Context, dst interface{}) error { + return r.finder.FindAll(ctx, r.resType, r.paths, dst) +} + +func init() { + childTypes = map[string][]string{ + "HostSystem": {"VirtualMachine"}, + "ComputeResource": {"HostSystem", "ResourcePool"}, + "ClusterComputeResource": {"HostSystem", "ResourcePool"}, + "Datacenter": {"Folder"}, + "Folder": { + "Folder", + "Datacenter", + "VirtualMachine", + "ComputeResource", + "ClusterComputeResource", + "Datastore", + }, + } + + addFields = map[string][]string{ + "HostSystem": {"parent"}, + "VirtualMachine": {"runtime.host", "config.guestId", "config.uuid", "runtime.powerState"}, + "Datastore": {"parent", "info"}, + "ClusterComputeResource": {"parent"}, + "Datacenter": {"parent"}, + } +} diff --git a/plugins/inputs/vsphere/tscache.go b/plugins/inputs/vsphere/tscache.go index 1d1f00ebe..4f73c4fe8 100644 --- a/plugins/inputs/vsphere/tscache.go +++ b/plugins/inputs/vsphere/tscache.go @@ -34,7 +34,7 @@ func (t *TSCache) Purge() { n++ } } - log.Printf("D! [input.vsphere] Purged timestamp cache. %d deleted with %d remaining", n, len(t.table)) + log.Printf("D! [inputs.vsphere] Purged timestamp cache. %d deleted with %d remaining", n, len(t.table)) } // IsNew returns true if the supplied timestamp for the supplied key is more recent than the diff --git a/plugins/inputs/vsphere/vsphere.go b/plugins/inputs/vsphere/vsphere.go index 13186634f..809026e3e 100644 --- a/plugins/inputs/vsphere/vsphere.go +++ b/plugins/inputs/vsphere/vsphere.go @@ -22,18 +22,23 @@ type VSphere struct { DatacenterInstances bool DatacenterMetricInclude []string DatacenterMetricExclude []string + DatacenterInclude []string ClusterInstances bool ClusterMetricInclude []string ClusterMetricExclude []string + ClusterInclude []string HostInstances bool HostMetricInclude []string HostMetricExclude []string + HostInclude []string VMInstances bool `toml:"vm_instances"` VMMetricInclude []string `toml:"vm_metric_include"` VMMetricExclude []string `toml:"vm_metric_exclude"` + VMInclude []string `toml:"vm_include"` DatastoreInstances bool DatastoreMetricInclude []string DatastoreMetricExclude []string + DatastoreInclude []string Separator string MaxQueryObjects int @@ -216,7 +221,7 @@ func (v *VSphere) Description() string { // Start is called from telegraf core when a plugin is started and allows it to // perform initialization tasks. func (v *VSphere) Start(acc telegraf.Accumulator) error { - log.Println("D! [input.vsphere]: Starting plugin") + log.Println("D! [inputs.vsphere]: Starting plugin") ctx, cancel := context.WithCancel(context.Background()) v.cancel = cancel @@ -239,7 +244,7 @@ func (v *VSphere) Start(acc telegraf.Accumulator) error { // Stop is called from telegraf core when a plugin is stopped and allows it to // perform shutdown tasks. func (v *VSphere) Stop() { - log.Println("D! [input.vsphere]: Stopping plugin") + log.Println("D! [inputs.vsphere]: Stopping plugin") v.cancel() // Wait for all endpoints to finish. No need to wait for @@ -248,7 +253,7 @@ func (v *VSphere) Stop() { // wait for any discovery to complete by trying to grab the // "busy" mutex. for _, ep := range v.endpoints { - log.Printf("D! [input.vsphere]: Waiting for endpoint %s to finish", ep.URL.Host) + log.Printf("D! [inputs.vsphere]: Waiting for endpoint %s to finish", ep.URL.Host) func() { ep.busy.Lock() // Wait until discovery is finished defer ep.busy.Unlock() @@ -286,19 +291,27 @@ func init() { return &VSphere{ Vcenters: []string{}, - ClusterInstances: false, - ClusterMetricInclude: nil, - ClusterMetricExclude: nil, - HostInstances: true, - HostMetricInclude: nil, - HostMetricExclude: nil, - VMInstances: true, - VMMetricInclude: nil, - VMMetricExclude: nil, - DatastoreInstances: false, - DatastoreMetricInclude: nil, - DatastoreMetricExclude: nil, - Separator: "_", + DatacenterInstances: false, + DatacenterMetricInclude: nil, + DatacenterMetricExclude: nil, + DatacenterInclude: []string{"/*"}, + ClusterInstances: false, + ClusterMetricInclude: nil, + ClusterMetricExclude: nil, + ClusterInclude: []string{"/*/host/**"}, + HostInstances: true, + HostMetricInclude: nil, + HostMetricExclude: nil, + HostInclude: []string{"/*/host/**"}, + VMInstances: true, + VMMetricInclude: nil, + VMMetricExclude: nil, + VMInclude: []string{"/*/vm/**"}, + DatastoreInstances: false, + DatastoreMetricInclude: nil, + DatastoreMetricExclude: nil, + DatastoreInclude: []string{"/*/datastore/**"}, + Separator: "_", MaxQueryObjects: 256, MaxQueryMetrics: 256, diff --git a/plugins/inputs/vsphere/vsphere_test.go b/plugins/inputs/vsphere/vsphere_test.go index a4b931bd9..eff56a89d 100644 --- a/plugins/inputs/vsphere/vsphere_test.go +++ b/plugins/inputs/vsphere/vsphere_test.go @@ -20,6 +20,7 @@ import ( "github.com/stretchr/testify/require" "github.com/vmware/govmomi/object" "github.com/vmware/govmomi/simulator" + "github.com/vmware/govmomi/vim25/mo" "github.com/vmware/govmomi/vim25/types" ) @@ -112,69 +113,105 @@ func defaultVSphere() *VSphere { "mem.usage.*", "mem.active.*"}, ClusterMetricExclude: nil, + ClusterInclude: []string{"/**"}, HostMetricInclude: []string{ - "cpu.ready.summation.delta.millisecond", - "cpu.latency.average.rate.percent", - "cpu.coreUtilization.average.rate.percent", - "mem.usage.average.absolute.percent", - "mem.swapinRate.average.rate.kiloBytesPerSecond", - "mem.state.latest.absolute.number", - "mem.latency.average.absolute.percent", - "mem.vmmemctl.average.absolute.kiloBytes", - "disk.read.average.rate.kiloBytesPerSecond", - "disk.write.average.rate.kiloBytesPerSecond", - "disk.numberReadAveraged.average.rate.number", - "disk.numberWriteAveraged.average.rate.number", - "disk.deviceReadLatency.average.absolute.millisecond", - "disk.deviceWriteLatency.average.absolute.millisecond", - "disk.totalReadLatency.average.absolute.millisecond", - "disk.totalWriteLatency.average.absolute.millisecond", - "storageAdapter.read.average.rate.kiloBytesPerSecond", - "storageAdapter.write.average.rate.kiloBytesPerSecond", - "storageAdapter.numberReadAveraged.average.rate.number", - "storageAdapter.numberWriteAveraged.average.rate.number", - "net.errorsRx.summation.delta.number", - "net.errorsTx.summation.delta.number", - "net.bytesRx.average.rate.kiloBytesPerSecond", - "net.bytesTx.average.rate.kiloBytesPerSecond", - "cpu.used.summation.delta.millisecond", - "cpu.usage.average.rate.percent", - "cpu.utilization.average.rate.percent", - "cpu.wait.summation.delta.millisecond", - "cpu.idle.summation.delta.millisecond", - "cpu.readiness.average.rate.percent", - "cpu.costop.summation.delta.millisecond", - "cpu.swapwait.summation.delta.millisecond", - "mem.swapoutRate.average.rate.kiloBytesPerSecond", - "disk.kernelReadLatency.average.absolute.millisecond", - "disk.kernelWriteLatency.average.absolute.millisecond"}, + "cpu.coreUtilization.average", + "cpu.costop.summation", + "cpu.demand.average", + "cpu.idle.summation", + "cpu.latency.average", + "cpu.readiness.average", + "cpu.ready.summation", + "cpu.swapwait.summation", + "cpu.usage.average", + "cpu.usagemhz.average", + "cpu.used.summation", + "cpu.utilization.average", + "cpu.wait.summation", + "disk.deviceReadLatency.average", + "disk.deviceWriteLatency.average", + "disk.kernelReadLatency.average", + "disk.kernelWriteLatency.average", + "disk.numberReadAveraged.average", + "disk.numberWriteAveraged.average", + "disk.read.average", + "disk.totalReadLatency.average", + "disk.totalWriteLatency.average", + "disk.write.average", + "mem.active.average", + "mem.latency.average", + "mem.state.latest", + "mem.swapin.average", + "mem.swapinRate.average", + "mem.swapout.average", + "mem.swapoutRate.average", + "mem.totalCapacity.average", + "mem.usage.average", + "mem.vmmemctl.average", + "net.bytesRx.average", + "net.bytesTx.average", + "net.droppedRx.summation", + "net.droppedTx.summation", + "net.errorsRx.summation", + "net.errorsTx.summation", + "net.usage.average", + "power.power.average", + "storageAdapter.numberReadAveraged.average", + "storageAdapter.numberWriteAveraged.average", + "storageAdapter.read.average", + "storageAdapter.write.average", + "sys.uptime.latest"}, HostMetricExclude: nil, + HostInclude: []string{"/**"}, VMMetricInclude: []string{ - "cpu.ready.summation.delta.millisecond", - "mem.swapinRate.average.rate.kiloBytesPerSecond", - "virtualDisk.numberReadAveraged.average.rate.number", - "virtualDisk.numberWriteAveraged.average.rate.number", - "virtualDisk.totalReadLatency.average.absolute.millisecond", - "virtualDisk.totalWriteLatency.average.absolute.millisecond", - "virtualDisk.readOIO.latest.absolute.number", - "virtualDisk.writeOIO.latest.absolute.number", - "net.bytesRx.average.rate.kiloBytesPerSecond", - "net.bytesTx.average.rate.kiloBytesPerSecond", - "net.droppedRx.summation.delta.number", - "net.droppedTx.summation.delta.number", - "cpu.run.summation.delta.millisecond", - "cpu.used.summation.delta.millisecond", - "mem.swapoutRate.average.rate.kiloBytesPerSecond", - "virtualDisk.read.average.rate.kiloBytesPerSecond", - "virtualDisk.write.average.rate.kiloBytesPerSecond"}, + "cpu.demand.average", + "cpu.idle.summation", + "cpu.latency.average", + "cpu.readiness.average", + "cpu.ready.summation", + "cpu.run.summation", + "cpu.usagemhz.average", + "cpu.used.summation", + "cpu.wait.summation", + "mem.active.average", + "mem.granted.average", + "mem.latency.average", + "mem.swapin.average", + "mem.swapinRate.average", + "mem.swapout.average", + "mem.swapoutRate.average", + "mem.usage.average", + "mem.vmmemctl.average", + "net.bytesRx.average", + "net.bytesTx.average", + "net.droppedRx.summation", + "net.droppedTx.summation", + "net.usage.average", + "power.power.average", + "virtualDisk.numberReadAveraged.average", + "virtualDisk.numberWriteAveraged.average", + "virtualDisk.read.average", + "virtualDisk.readOIO.latest", + "virtualDisk.throughput.usage.average", + "virtualDisk.totalReadLatency.average", + "virtualDisk.totalWriteLatency.average", + "virtualDisk.write.average", + "virtualDisk.writeOIO.latest", + "sys.uptime.latest"}, VMMetricExclude: nil, + VMInclude: []string{"/**"}, DatastoreMetricInclude: []string{ "disk.used.*", "disk.provsioned.*"}, - DatastoreMetricExclude: nil, - ClientConfig: itls.ClientConfig{InsecureSkipVerify: true}, + DatastoreMetricExclude: nil, + DatastoreInclude: []string{"/**"}, + DatacenterMetricInclude: nil, + DatacenterMetricExclude: nil, + DatacenterInclude: []string{"/**"}, + ClientConfig: itls.ClientConfig{InsecureSkipVerify: true}, MaxQueryObjects: 256, + MaxQueryMetrics: 256, ObjectDiscoveryInterval: internal.Duration{Duration: time.Second * 300}, Timeout: internal.Duration{Duration: time.Second * 20}, ForceDiscoverOnInit: true, @@ -197,6 +234,50 @@ func createSim() (*simulator.Model, *simulator.Server, error) { return model, s, nil } +func testAlignUniform(t *testing.T, n int) { + now := time.Now().Truncate(60 * time.Second) + info := make([]types.PerfSampleInfo, n) + values := make([]int64, n) + for i := 0; i < n; i++ { + info[i] = types.PerfSampleInfo{ + Timestamp: now.Add(time.Duration(20*i) * time.Second), + Interval: 20, + } + values[i] = 1 + } + newInfo, newValues := alignSamples(info, values, 60*time.Second) + require.Equal(t, n/3, len(newInfo), "Aligned infos have wrong size") + require.Equal(t, n/3, len(newValues), "Aligned values have wrong size") + for _, v := range newValues { + require.Equal(t, 1.0, v, "Aligned value should be 1") + } +} + +func TestAlignMetrics(t *testing.T) { + testAlignUniform(t, 3) + testAlignUniform(t, 30) + testAlignUniform(t, 333) + + // 20s to 60s of 1,2,3,1,2,3... (should average to 2) + n := 30 + now := time.Now().Truncate(60 * time.Second) + info := make([]types.PerfSampleInfo, n) + values := make([]int64, n) + for i := 0; i < n; i++ { + info[i] = types.PerfSampleInfo{ + Timestamp: now.Add(time.Duration(20*i) * time.Second), + Interval: 20, + } + values[i] = int64(i%3 + 1) + } + newInfo, newValues := alignSamples(info, values, 60*time.Second) + require.Equal(t, n/3, len(newInfo), "Aligned infos have wrong size") + require.Equal(t, n/3, len(newValues), "Aligned values have wrong size") + for _, v := range newValues { + require.Equal(t, 2.0, v, "Aligned value should be 2") + } +} + func TestParseConfig(t *testing.T) { v := VSphere{} c := v.SampleConfig() @@ -209,7 +290,7 @@ func TestParseConfig(t *testing.T) { require.NotNil(t, tab) } -func TestWorkerPool(t *testing.T) { +func TestThrottledExecutor(t *testing.T) { max := int64(0) ngr := int64(0) n := 10000 @@ -254,14 +335,13 @@ func TestTimeout(t *testing.T) { defer m.Remove() defer s.Close() - var acc testutil.Accumulator v := defaultVSphere() + var acc testutil.Accumulator v.Vcenters = []string{s.URL.String()} v.Timeout = internal.Duration{Duration: 1 * time.Nanosecond} require.NoError(t, v.Start(nil)) // We're not using the Accumulator, so it can be nil. defer v.Stop() err = v.Gather(&acc) - require.True(t, len(acc.Errors) > 0, "Errors should not be empty here") // The accumulator must contain exactly one error and it must be a deadline exceeded. require.Equal(t, 1, len(acc.Errors)) @@ -311,6 +391,109 @@ func TestMaxQuery(t *testing.T) { c2.close() } +func TestFinder(t *testing.T) { + // Don't run test on 32-bit machines due to bug in simulator. + // https://github.com/vmware/govmomi/issues/1330 + var i int + if unsafe.Sizeof(i) < 8 { + return + } + + m, s, err := createSim() + if err != nil { + t.Fatal(err) + } + defer m.Remove() + defer s.Close() + + v := defaultVSphere() + ctx := context.Background() + + c, err := NewClient(ctx, s.URL, v) + + f := Finder{c} + + dc := []mo.Datacenter{} + err = f.Find(ctx, "Datacenter", "/DC0", &dc) + require.NoError(t, err) + require.Equal(t, 1, len(dc)) + require.Equal(t, "DC0", dc[0].Name) + + host := []mo.HostSystem{} + err = f.Find(ctx, "HostSystem", "/DC0/host/DC0_H0/DC0_H0", &host) + require.NoError(t, err) + require.Equal(t, 1, len(host)) + require.Equal(t, "DC0_H0", host[0].Name) + + host = []mo.HostSystem{} + err = f.Find(ctx, "HostSystem", "/DC0/host/DC0_C0/DC0_C0_H0", &host) + require.NoError(t, err) + require.Equal(t, 1, len(host)) + require.Equal(t, "DC0_C0_H0", host[0].Name) + + host = []mo.HostSystem{} + err = f.Find(ctx, "HostSystem", "/DC0/host/DC0_C0/*", &host) + require.NoError(t, err) + require.Equal(t, 3, len(host)) + + vm := []mo.VirtualMachine{} + err = f.Find(ctx, "VirtualMachine", "/DC0/vm/DC0_H0_VM0", &vm) + require.NoError(t, err) + require.Equal(t, 1, len(dc)) + require.Equal(t, "DC0_H0_VM0", vm[0].Name) + + vm = []mo.VirtualMachine{} + err = f.Find(ctx, "VirtualMachine", "/DC0/vm/DC0_C0*", &vm) + require.NoError(t, err) + require.Equal(t, 1, len(dc)) + + vm = []mo.VirtualMachine{} + err = f.Find(ctx, "VirtualMachine", "/DC0/*/DC0_H0_VM0", &vm) + require.NoError(t, err) + require.Equal(t, 1, len(dc)) + require.Equal(t, "DC0_H0_VM0", vm[0].Name) + + vm = []mo.VirtualMachine{} + err = f.Find(ctx, "VirtualMachine", "/DC0/*/DC0_H0_*", &vm) + require.NoError(t, err) + require.Equal(t, 2, len(vm)) + + vm = []mo.VirtualMachine{} + err = f.Find(ctx, "VirtualMachine", "/DC0/**/DC0_H0_VM*", &vm) + require.NoError(t, err) + require.Equal(t, 2, len(vm)) + + vm = []mo.VirtualMachine{} + err = f.Find(ctx, "VirtualMachine", "/DC0/**", &vm) + require.NoError(t, err) + require.Equal(t, 4, len(vm)) + + vm = []mo.VirtualMachine{} + err = f.Find(ctx, "VirtualMachine", "/**", &vm) + require.NoError(t, err) + require.Equal(t, 4, len(vm)) + + vm = []mo.VirtualMachine{} + err = f.Find(ctx, "VirtualMachine", "/**/DC0_H0_VM*", &vm) + require.NoError(t, err) + require.Equal(t, 2, len(vm)) + + vm = []mo.VirtualMachine{} + err = f.Find(ctx, "VirtualMachine", "/**/vm/**", &vm) + require.NoError(t, err) + require.Equal(t, 4, len(vm)) + + vm = []mo.VirtualMachine{} + err = f.FindAll(ctx, "VirtualMachine", []string{"/DC0/vm/DC0_H0*", "/DC0/vm/DC0_C0*"}, &vm) + require.NoError(t, err) + require.Equal(t, 4, len(vm)) + + vm = []mo.VirtualMachine{} + err = f.FindAll(ctx, "VirtualMachine", []string{"/**"}, &vm) + require.NoError(t, err) + require.Equal(t, 4, len(vm)) +} + func TestAll(t *testing.T) { // Don't run test on 32-bit machines due to bug in simulator. // https://github.com/vmware/govmomi/issues/1330 @@ -333,4 +516,5 @@ func TestAll(t *testing.T) { defer v.Stop() require.NoError(t, v.Gather(&acc)) require.Equal(t, 0, len(acc.Errors), fmt.Sprintf("Errors found: %s", acc.Errors)) + require.True(t, len(acc.Metrics) > 0, "No metrics were collected") } From b490e7d273bbf6ec224d51eb02a593e9347ea4d5 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 12 Feb 2019 14:06:01 -0800 Subject: [PATCH 0595/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5a6aa5dc9..e093c6598 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -43,6 +43,7 @@ - [#5391](https://github.com/influxdata/telegraf/pull/5391): Add resource type and resource label support to stackdriver output. - [#5396](https://github.com/influxdata/telegraf/pull/5396): Add internal metric for line too long in influxdb_listener. - [#4892](https://github.com/influxdata/telegraf/pull/4892): Add option to set retain flag on messages to mqtt output. +- [#5165](https://github.com/influxdata/telegraf/pull/5165): Add resource path based filtering to vsphere input. #### Bugfixes From ce507e522f6241f16357382f2ef4c515d59e8c55 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 12 Feb 2019 15:34:57 -0800 Subject: [PATCH 0596/1815] Fix dep check errors --- Gopkg.lock | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Gopkg.lock b/Gopkg.lock index cb8b48cc8..79ad0477b 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -1569,7 +1569,6 @@ "github.com/nsqio/go-nsq", "github.com/openzipkin/zipkin-go-opentracing", "github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore", - "github.com/pkg/errors", "github.com/prometheus/client_golang/prometheus", "github.com/prometheus/client_golang/prometheus/promhttp", "github.com/prometheus/client_model/go", @@ -1593,6 +1592,7 @@ "github.com/vmware/govmomi", "github.com/vmware/govmomi/object", "github.com/vmware/govmomi/performance", + "github.com/vmware/govmomi/property", "github.com/vmware/govmomi/session", "github.com/vmware/govmomi/simulator", "github.com/vmware/govmomi/view", From ee5827ccbdfcf28e4e388de562124303d7bf8cd5 Mon Sep 17 00:00:00 2001 From: Tomas Barton Date: Wed, 13 Feb 2019 01:17:09 +0100 Subject: [PATCH 0597/1815] Remove error log when snmp6 directory does not exists with nstat input (#5403) (#5413) --- plugins/inputs/nstat/README.md | 2 ++ plugins/inputs/nstat/nstat.go | 13 +++++++------ 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/plugins/inputs/nstat/README.md b/plugins/inputs/nstat/README.md index c80f893b9..5d2ca6c0a 100644 --- a/plugins/inputs/nstat/README.md +++ b/plugins/inputs/nstat/README.md @@ -36,6 +36,8 @@ The sample config file # dump_zeros = true ``` +In case that `proc_net_snmp6` path doesn't exist (e.g. IPv6 is not enabled) no error would be raised. + ### Measurements & Fields - nstat diff --git a/plugins/inputs/nstat/nstat.go b/plugins/inputs/nstat/nstat.go index 5096d7b03..e6dcb420f 100644 --- a/plugins/inputs/nstat/nstat.go +++ b/plugins/inputs/nstat/nstat.go @@ -83,13 +83,14 @@ func (ns *Nstat) Gather(acc telegraf.Accumulator) error { return err } - // collect SNMP6 data + // collect SNMP6 data, if SNMP6 directory exists (IPv6 enabled) snmp6, err := ioutil.ReadFile(ns.ProcNetSNMP6) - if err != nil { - return err - } - err = ns.gatherSNMP6(snmp6, acc) - if err != nil { + if err == nil { + err = ns.gatherSNMP6(snmp6, acc) + if err != nil { + return err + } + } else if !os.IsNotExist(err) { return err } return nil From f001303189050982fd3ef439057bad489ad3a5b0 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 12 Feb 2019 16:49:40 -0800 Subject: [PATCH 0598/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index e093c6598..058bfdd18 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -55,6 +55,7 @@ - [#5364](https://github.com/influxdata/telegraf/issues/5364): Send metrics in ascending time order in stackdriver output. - [#5117](https://github.com/influxdata/telegraf/issues/5117): Use systemd in Amazon Linux 2 rpm. - [#4988](https://github.com/influxdata/telegraf/issues/4988): Set deadlock priority in sqlserver input. +- [#5403](https://github.com/influxdata/telegraf/issues/5403): Remove error log when snmp6 directory does not exists with nstat input. ## v1.9.4 [2019-02-05] From 3e9703a573c31a9fb0b14b0e9d42dd3215e21238 Mon Sep 17 00:00:00 2001 From: Greg <2653109+glinton@users.noreply.github.com> Date: Tue, 12 Feb 2019 18:57:20 -0700 Subject: [PATCH 0599/1815] Add rcode tag and field to dns_query input (#5417) --- plugins/inputs/dns_query/README.md | 30 ++++++++++++++++++++++++++- plugins/inputs/dns_query/dns_query.go | 16 ++++++++------ 2 files changed, 39 insertions(+), 7 deletions(-) diff --git a/plugins/inputs/dns_query/README.md b/plugins/inputs/dns_query/README.md index 766d9811f..51152a367 100644 --- a/plugins/inputs/dns_query/README.md +++ b/plugins/inputs/dns_query/README.md @@ -34,12 +34,40 @@ The DNS plugin gathers dns query times in miliseconds - like [Dig](https://en.wi - domain - record_type - result + - rcode - fields: - query_time_ms (float) - result_code (int, success = 0, timeout = 1, error = 2) + - rcode_value (int) + + +### Rcode Descriptions +|rcode_value|rcode|Description| +|---|-----------|-----------------------------------| +|0 | NoError | No Error | +|1 | FormErr | Format Error | +|2 | ServFail | Server Failure | +|3 | NXDomain | Non-Existent Domain | +|4 | NotImp | Not Implemented | +|5 | Refused | Query Refused | +|6 | YXDomain | Name Exists when it should not | +|7 | YXRRSet | RR Set Exists when it should not | +|8 | NXRRSet | RR Set that should exist does not | +|9 | NotAuth | Server Not Authoritative for zone | +|10 | NotZone | Name not contained in zone | +|16 | BADSIG | TSIG Signature Failure | +|16 | BADVERS | Bad OPT Version | +|17 | BADKEY | Key not recognized | +|18 | BADTIME | Signature out of time window | +|19 | BADMODE | Bad TKEY Mode | +|20 | BADNAME | Duplicate key name | +|21 | BADALG | Algorithm not supported | +|22 | BADTRUNC | Bad Truncation | +|23 | BADCOOKIE | Bad/missing Server Cookie | + ### Example Output: ``` -dns_query,domain=mjasion.pl,record_type=A,server=8.8.8.8 query_time_ms=67.189842 1456082743585760680 +dns_query,domain=google.com,rcode=NOERROR,record_type=A,result=success,server=127.0.0.1 rcode_value=0i,result_code=0i,query_time_ms=0.13746 1550020750001000000 ``` diff --git a/plugins/inputs/dns_query/dns_query.go b/plugins/inputs/dns_query/dns_query.go index 3f0b1ab2f..3fcf4a0b8 100644 --- a/plugins/inputs/dns_query/dns_query.go +++ b/plugins/inputs/dns_query/dns_query.go @@ -85,7 +85,11 @@ func (d *DnsQuery) Gather(acc telegraf.Accumulator) error { "record_type": d.RecordType, } - dnsQueryTime, err := d.getDnsQueryTime(domain, server) + dnsQueryTime, rcode, err := d.getDnsQueryTime(domain, server) + if rcode >= 0 { + tags["rcode"] = dns.RcodeToString[rcode] + fields["rcode_value"] = rcode + } if err == nil { setResult(Success, fields, tags) fields["query_time_ms"] = dnsQueryTime @@ -130,7 +134,7 @@ func (d *DnsQuery) setDefaultValues() { } } -func (d *DnsQuery) getDnsQueryTime(domain string, server string) (float64, error) { +func (d *DnsQuery) getDnsQueryTime(domain string, server string) (float64, int, error) { dnsQueryTime := float64(0) c := new(dns.Client) @@ -140,20 +144,20 @@ func (d *DnsQuery) getDnsQueryTime(domain string, server string) (float64, error m := new(dns.Msg) recordType, err := d.parseRecordType() if err != nil { - return dnsQueryTime, err + return dnsQueryTime, -1, err } m.SetQuestion(dns.Fqdn(domain), recordType) m.RecursionDesired = true r, rtt, err := c.Exchange(m, net.JoinHostPort(server, strconv.Itoa(d.Port))) if err != nil { - return dnsQueryTime, err + return dnsQueryTime, -1, err } if r.Rcode != dns.RcodeSuccess { - return dnsQueryTime, errors.New(fmt.Sprintf("Invalid answer name %s after %s query for %s\n", domain, d.RecordType, domain)) + return dnsQueryTime, r.Rcode, fmt.Errorf("Invalid answer (%s) from %s after %s query for %s", dns.RcodeToString[r.Rcode], server, d.RecordType, domain) } dnsQueryTime = float64(rtt.Nanoseconds()) / 1e6 - return dnsQueryTime, nil + return dnsQueryTime, r.Rcode, nil } func (d *DnsQuery) parseRecordType() (uint16, error) { From 7e91336c54438a313d8a478cc7dbb8ba4e04c107 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 12 Feb 2019 17:59:03 -0800 Subject: [PATCH 0600/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 058bfdd18..7779e34b5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -44,6 +44,7 @@ - [#5396](https://github.com/influxdata/telegraf/pull/5396): Add internal metric for line too long in influxdb_listener. - [#4892](https://github.com/influxdata/telegraf/pull/4892): Add option to set retain flag on messages to mqtt output. - [#5165](https://github.com/influxdata/telegraf/pull/5165): Add resource path based filtering to vsphere input. +- [#5417](https://github.com/influxdata/telegraf/pull/5417): Add rcode tag and field to dns_query input. #### Bugfixes From ab1a1b075d135cba28f60769825417677418dfda Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 14 Feb 2019 15:09:12 -0800 Subject: [PATCH 0601/1815] Disable export_timestamp by default in prometheus output --- plugins/outputs/prometheus_client/prometheus_client.go | 1 - 1 file changed, 1 deletion(-) diff --git a/plugins/outputs/prometheus_client/prometheus_client.go b/plugins/outputs/prometheus_client/prometheus_client.go index ef81034cd..d774b4088 100644 --- a/plugins/outputs/prometheus_client/prometheus_client.go +++ b/plugins/outputs/prometheus_client/prometheus_client.go @@ -511,7 +511,6 @@ func init() { return &PrometheusClient{ ExpirationInterval: internal.Duration{Duration: time.Second * 60}, StringAsLabel: true, - ExportTimestamp: true, fam: make(map[string]*MetricFamily), now: time.Now, } From c18934f06545ca7fe4e82dfe5aa0eb4ad65382d6 Mon Sep 17 00:00:00 2001 From: tgregory86 <45361664+tgregory86@users.noreply.github.com> Date: Fri, 15 Feb 2019 14:13:43 -0500 Subject: [PATCH 0602/1815] Fix typo in CONFIGURATION.md example (#5441) --- docs/CONFIGURATION.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/CONFIGURATION.md b/docs/CONFIGURATION.md index 2b1993a45..157ad023c 100644 --- a/docs/CONFIGURATION.md +++ b/docs/CONFIGURATION.md @@ -527,7 +527,7 @@ output. The tag is removed in the outputs before writing. urls = ["http://influxdb.example.com"] database = "db_other" tagexclude = ["influxdb_database"] - [ouputs.influxdb.tagpass] + [outputs.influxdb.tagpass] influxdb_database = ["other"] [[inputs.disk]] From e586fdb27f253ff7d6934169438bc2330db5c810 Mon Sep 17 00:00:00 2001 From: Greg <2653109+glinton@users.noreply.github.com> Date: Tue, 19 Feb 2019 12:18:15 -0700 Subject: [PATCH 0603/1815] Append host if arguments are specified for ping input (#5450) --- plugins/inputs/ping/ping.go | 2 +- plugins/inputs/ping/ping_test.go | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/plugins/inputs/ping/ping.go b/plugins/inputs/ping/ping.go index 1d49cccc4..69db140ae 100644 --- a/plugins/inputs/ping/ping.go +++ b/plugins/inputs/ping/ping.go @@ -193,7 +193,7 @@ func hostPinger(binary string, timeout float64, args ...string) (string, error) // args returns the arguments for the 'ping' executable func (p *Ping) args(url string, system string) []string { if len(p.Arguments) > 0 { - return p.Arguments + return append(p.Arguments, url) } // build the ping command args based on toml config diff --git a/plugins/inputs/ping/ping_test.go b/plugins/inputs/ping/ping_test.go index 867220b20..ad6fa306a 100644 --- a/plugins/inputs/ping/ping_test.go +++ b/plugins/inputs/ping/ping_test.go @@ -126,6 +126,7 @@ func TestArgs(t *testing.T) { func TestArguments(t *testing.T) { arguments := []string{"-c", "3"} + expected := append(arguments, "www.google.com") p := Ping{ Count: 2, Interface: "eth0", @@ -137,7 +138,7 @@ func TestArguments(t *testing.T) { for _, system := range []string{"darwin", "linux", "anything else"} { actual := p.args("www.google.com", system) - require.True(t, reflect.DeepEqual(actual, arguments), "Expected: %s Actual: %s", arguments, actual) + require.True(t, reflect.DeepEqual(actual, expected), "Expected: %s Actual: %s", expected, actual) } } From 091dd615480c1394574b3e72aaf0dfc94335e046 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 19 Feb 2019 11:20:46 -0800 Subject: [PATCH 0604/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7779e34b5..9f0d33087 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -57,6 +57,7 @@ - [#5117](https://github.com/influxdata/telegraf/issues/5117): Use systemd in Amazon Linux 2 rpm. - [#4988](https://github.com/influxdata/telegraf/issues/4988): Set deadlock priority in sqlserver input. - [#5403](https://github.com/influxdata/telegraf/issues/5403): Remove error log when snmp6 directory does not exists with nstat input. +- [#5437](https://github.com/influxdata/telegraf/issues/5437): Host not added when using custom arguments in ping plugin. ## v1.9.4 [2019-02-05] From e4d084fbc39f22b32fc6300736ab3314f2a5a4b1 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 19 Feb 2019 15:02:57 -0800 Subject: [PATCH 0605/1815] Update changelog --- CHANGELOG.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9f0d33087..9b91d8bdb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -52,6 +52,11 @@ - [#5320](https://github.com/influxdata/telegraf/pull/5320): Use datacenter option spelling in consul input. - [#5316](https://github.com/influxdata/telegraf/pull/5316): Remove auth from /ping route in influxdb_listener. - [#5304](https://github.com/influxdata/telegraf/issues/5304): Fix x509_cert input stops checking certs after first error. + +## v1.9.5 [unreleased] + +#### Bugfixes + - [#5315](https://github.com/influxdata/telegraf/issues/5315): Skip string fields when writing to stackdriver output. - [#5364](https://github.com/influxdata/telegraf/issues/5364): Send metrics in ascending time order in stackdriver output. - [#5117](https://github.com/influxdata/telegraf/issues/5117): Use systemd in Amazon Linux 2 rpm. From 5dfa3fa76920ef1893ea30da38c86f5d9e893c8f Mon Sep 17 00:00:00 2001 From: Olli-Pekka Lehto Date: Tue, 19 Feb 2019 17:08:54 -0600 Subject: [PATCH 0606/1815] Fix InfluxDB output UDP line splitting (#5439) --- plugins/outputs/influxdb/udp.go | 22 ++++++++++++++++++++-- plugins/serializers/influx/influx.go | 1 + plugins/serializers/influx/influx_test.go | 18 ++++++++++++++++++ 3 files changed, 39 insertions(+), 2 deletions(-) diff --git a/plugins/outputs/influxdb/udp.go b/plugins/outputs/influxdb/udp.go index 62f2a6ab7..8e636d340 100644 --- a/plugins/outputs/influxdb/udp.go +++ b/plugins/outputs/influxdb/udp.go @@ -1,6 +1,8 @@ package influxdb import ( + "bufio" + "bytes" "context" "fmt" "log" @@ -45,9 +47,9 @@ func NewUDPClient(config *UDPConfig) (*udpClient, error) { serializer := config.Serializer if serializer == nil { s := influx.NewSerializer() - s.SetMaxLineBytes(config.MaxPayloadSize) serializer = s } + serializer.SetMaxLineBytes(size) dialer := config.Dialer if dialer == nil { @@ -96,7 +98,11 @@ func (c *udpClient) Write(ctx context.Context, metrics []telegraf.Metric) error continue } - _, err = c.conn.Write(octets) + scanner := bufio.NewScanner(bytes.NewReader(octets)) + scanner.Split(scanLines) + for scanner.Scan() { + _, err = c.conn.Write(scanner.Bytes()) + } if err != nil { c.conn.Close() c.conn = nil @@ -118,3 +124,15 @@ type netDialer struct { func (d *netDialer) DialContext(ctx context.Context, network, address string) (Conn, error) { return d.Dialer.DialContext(ctx, network, address) } + +func scanLines(data []byte, atEOF bool) (advance int, token []byte, err error) { + if atEOF && len(data) == 0 { + return 0, nil, nil + } + if i := bytes.IndexByte(data, '\n'); i >= 0 { + // We have a full newline-terminated line. + return i + 1, data[0 : i+1], nil + + } + return 0, nil, nil +} diff --git a/plugins/serializers/influx/influx.go b/plugins/serializers/influx/influx.go index 2989e44e9..e7063cbd2 100644 --- a/plugins/serializers/influx/influx.go +++ b/plugins/serializers/influx/influx.go @@ -240,6 +240,7 @@ func (s *Serializer) writeMetric(w io.Writer, m telegraf.Metric) error { return err } + pairsLen = 0 firstField = true bytesNeeded = len(s.header) + len(s.pair) + len(s.footer) diff --git a/plugins/serializers/influx/influx_test.go b/plugins/serializers/influx/influx_test.go index 2c1cbd587..e3526428e 100644 --- a/plugins/serializers/influx/influx_test.go +++ b/plugins/serializers/influx/influx_test.go @@ -275,6 +275,24 @@ var tests = []struct { ), output: []byte("cpu abc=123i 1519194109000000042\ncpu def=456i 1519194109000000042\n"), }, + { + name: "split_fields_overflow", + maxBytes: 43, + input: MustMetric( + metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "abc": 123, + "def": 456, + "ghi": 789, + "jkl": 123, + }, + time.Unix(1519194109, 42), + ), + ), + output: []byte("cpu abc=123i,def=456i 1519194109000000042\ncpu ghi=789i,jkl=123i 1519194109000000042\n"), + }, { name: "name newline", input: MustMetric( From 431c58d84ff4dc10aee4e34488feabae0fc3d255 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 19 Feb 2019 15:10:53 -0800 Subject: [PATCH 0607/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9b91d8bdb..a03f0a2e3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -63,6 +63,7 @@ - [#4988](https://github.com/influxdata/telegraf/issues/4988): Set deadlock priority in sqlserver input. - [#5403](https://github.com/influxdata/telegraf/issues/5403): Remove error log when snmp6 directory does not exists with nstat input. - [#5437](https://github.com/influxdata/telegraf/issues/5437): Host not added when using custom arguments in ping plugin. +- [#5438](https://github.com/influxdata/telegraf/issues/5438): Fix InfluxDB output UDP line splitting. ## v1.9.4 [2019-02-05] From 5823fefb7ab4754577df13da7ba62cf01677c78d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Robert=20Edstr=C3=B6m?= <108799+Legogris@users.noreply.github.com> Date: Wed, 20 Feb 2019 22:16:02 +0100 Subject: [PATCH 0608/1815] Group stackdriver requests to send one point per timeseries (#5407) --- plugins/outputs/stackdriver/stackdriver.go | 87 ++++++++++++++----- .../outputs/stackdriver/stackdriver_test.go | 83 ++++++++++++++++++ 2 files changed, 149 insertions(+), 21 deletions(-) diff --git a/plugins/outputs/stackdriver/stackdriver.go b/plugins/outputs/stackdriver/stackdriver.go index 10823c8ed..c7d9e45bc 100644 --- a/plugins/outputs/stackdriver/stackdriver.go +++ b/plugins/outputs/stackdriver/stackdriver.go @@ -3,6 +3,7 @@ package stackdriver import ( "context" "fmt" + "hash/fnv" "log" "path" "sort" @@ -111,15 +112,34 @@ func sorted(metrics []telegraf.Metric) []telegraf.Metric { return batch } +type timeSeriesBuckets map[uint64][]*monitoringpb.TimeSeries + +func (tsb timeSeriesBuckets) Add(m telegraf.Metric, f *telegraf.Field, ts *monitoringpb.TimeSeries) { + h := fnv.New64a() + h.Write([]byte(m.Name())) + h.Write([]byte{'\n'}) + h.Write([]byte(f.Key)) + h.Write([]byte{'\n'}) + for key, value := range m.Tags() { + h.Write([]byte(key)) + h.Write([]byte{'\n'}) + h.Write([]byte(value)) + h.Write([]byte{'\n'}) + } + k := h.Sum64() + + s := tsb[k] + s = append(s, ts) + tsb[k] = s +} + // Write the metrics to Google Cloud Stackdriver. func (s *Stackdriver) Write(metrics []telegraf.Metric) error { ctx := context.Background() batch := sorted(metrics) - + buckets := make(timeSeriesBuckets) for _, m := range batch { - timeSeries := []*monitoringpb.TimeSeries{} - for _, f := range m.FieldList() { value, err := getStackdriverTypedValue(f.Value) if err != nil { @@ -150,25 +170,50 @@ func (s *Stackdriver) Write(metrics []telegraf.Metric) error { } // Prepare time series. - timeSeries = append(timeSeries, - &monitoringpb.TimeSeries{ - Metric: &metricpb.Metric{ - Type: path.Join("custom.googleapis.com", s.Namespace, m.Name(), f.Key), - Labels: getStackdriverLabels(m.TagList()), - }, - MetricKind: metricKind, - Resource: &monitoredrespb.MonitoredResource{ - Type: s.ResourceType, - Labels: s.ResourceLabels, - }, - Points: []*monitoringpb.Point{ - dataPoint, - }, - }) - } + timeSeries := &monitoringpb.TimeSeries{ + Metric: &metricpb.Metric{ + Type: path.Join("custom.googleapis.com", s.Namespace, m.Name(), f.Key), + Labels: getStackdriverLabels(m.TagList()), + }, + MetricKind: metricKind, + Resource: &monitoredrespb.MonitoredResource{ + Type: s.ResourceType, + Labels: s.ResourceLabels, + }, + Points: []*monitoringpb.Point{ + dataPoint, + }, + } - if len(timeSeries) < 1 { - continue + buckets.Add(m, f, timeSeries) + } + } + + // process the buckets in order + keys := make([]uint64, 0, len(buckets)) + for k := range buckets { + keys = append(keys, k) + } + sort.Slice(keys, func(i, j int) bool { return keys[i] < keys[j] }) + + for len(buckets) != 0 { + // can send up to 200 time series to stackdriver + timeSeries := make([]*monitoringpb.TimeSeries, 0, 200) + for i, k := range keys { + s := buckets[k] + timeSeries = append(timeSeries, s[0]) + if len(s) == 1 { + delete(buckets, k) + keys = append(keys[:i], keys[i+1:]...) + continue + } + + s = s[1:] + buckets[k] = s + + if len(timeSeries) == cap(timeSeries) { + break + } } // Prepare time series request. diff --git a/plugins/outputs/stackdriver/stackdriver_test.go b/plugins/outputs/stackdriver/stackdriver_test.go index c60d72d36..151c84020 100644 --- a/plugins/outputs/stackdriver/stackdriver_test.go +++ b/plugins/outputs/stackdriver/stackdriver_test.go @@ -207,6 +207,89 @@ func TestWriteAscendingTime(t *testing.T) { }) } +func TestWriteBatchable(t *testing.T) { + expectedResponse := &emptypb.Empty{} + mockMetric.err = nil + mockMetric.reqs = nil + mockMetric.resps = append(mockMetric.resps[:0], expectedResponse) + + c, err := monitoring.NewMetricClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + s := &Stackdriver{ + Project: fmt.Sprintf("projects/%s", "[PROJECT]"), + Namespace: "test", + client: c, + } + + // Metrics in descending order of timestamp + metrics := []telegraf.Metric{ + testutil.MustMetric("cpu", + map[string]string{ + "foo": "bar", + }, + map[string]interface{}{ + "value": 42, + }, + time.Unix(2, 0), + ), + testutil.MustMetric("cpu", + map[string]string{ + "foo": "foo", + }, + map[string]interface{}{ + "value": 43, + }, + time.Unix(3, 0), + ), + testutil.MustMetric("cpu", + map[string]string{ + "foo": "bar", + }, + map[string]interface{}{ + "value": 43, + }, + time.Unix(1, 0), + ), + } + + err = s.Connect() + require.NoError(t, err) + err = s.Write(metrics) + require.NoError(t, err) + + require.Len(t, mockMetric.reqs, 2) + request := mockMetric.reqs[0].(*monitoringpb.CreateTimeSeriesRequest) + require.Len(t, request.TimeSeries, 2) + ts := request.TimeSeries[0] + require.Len(t, ts.Points, 1) + require.Equal(t, ts.Points[0].Interval, &monitoringpb.TimeInterval{ + EndTime: &googlepb.Timestamp{ + Seconds: 3, + }, + }) + require.Equal(t, ts.Points[0].Value, &monitoringpb.TypedValue{ + Value: &monitoringpb.TypedValue_Int64Value{ + Int64Value: int64(43), + }, + }) + + ts = request.TimeSeries[1] + require.Len(t, ts.Points, 1) + require.Equal(t, ts.Points[0].Interval, &monitoringpb.TimeInterval{ + EndTime: &googlepb.Timestamp{ + Seconds: 1, + }, + }) + require.Equal(t, ts.Points[0].Value, &monitoringpb.TypedValue{ + Value: &monitoringpb.TypedValue_Int64Value{ + Int64Value: int64(43), + }, + }) +} + func TestWriteIgnoredErrors(t *testing.T) { tests := []struct { name string From 463df273ee82c36f2a13d3d9b953f8458fa17d61 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 20 Feb 2019 13:18:08 -0800 Subject: [PATCH 0609/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index a03f0a2e3..25b94570a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -52,6 +52,7 @@ - [#5320](https://github.com/influxdata/telegraf/pull/5320): Use datacenter option spelling in consul input. - [#5316](https://github.com/influxdata/telegraf/pull/5316): Remove auth from /ping route in influxdb_listener. - [#5304](https://github.com/influxdata/telegraf/issues/5304): Fix x509_cert input stops checking certs after first error. +- [#5404](https://github.com/influxdata/telegraf/issues/5404): Group stackdriver requests to send one point per timeseries. ## v1.9.5 [unreleased] From 843d842d02db291ceafa209704a7cf2c674ef921 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 20 Feb 2019 13:23:59 -0800 Subject: [PATCH 0610/1815] Add stackdriver input plugin (#5406) --- README.md | 1 + metric/series_grouper.go | 86 ++ plugins/inputs/all/all.go | 1 + plugins/inputs/stackdriver/README.md | 161 +++ plugins/inputs/stackdriver/stackdriver.go | 709 +++++++++++ .../inputs/stackdriver/stackdriver_test.go | 1125 +++++++++++++++++ testutil/metric.go | 8 + 7 files changed, 2091 insertions(+) create mode 100644 metric/series_grouper.go create mode 100644 plugins/inputs/stackdriver/README.md create mode 100644 plugins/inputs/stackdriver/stackdriver.go create mode 100644 plugins/inputs/stackdriver/stackdriver_test.go diff --git a/README.md b/README.md index 9ffc7d66b..96f797c73 100644 --- a/README.md +++ b/README.md @@ -253,6 +253,7 @@ For documentation on the latest development code see the [documentation index][d * [socket_listener](./plugins/inputs/socket_listener) * [solr](./plugins/inputs/solr) * [sql server](./plugins/inputs/sqlserver) (microsoft) +* [stackdriver](./plugins/inputs/stackdriver) * [statsd](./plugins/inputs/statsd) * [swap](./plugins/inputs/swap) * [syslog](./plugins/inputs/syslog) diff --git a/metric/series_grouper.go b/metric/series_grouper.go new file mode 100644 index 000000000..5dc66e11b --- /dev/null +++ b/metric/series_grouper.go @@ -0,0 +1,86 @@ +package metric + +import ( + "hash/fnv" + "io" + "sort" + "strconv" + "time" + + "github.com/influxdata/telegraf" +) + +// NewSeriesGrouper returns a type that can be used to group fields by series +// and time, so that fields which share these values will be combined into a +// single telegraf.Metric. +// +// This is useful to build telegraf.Metric's when all fields for a series are +// not available at once. +// +// ex: +// - cpu,host=localhost usage_time=42 +// - cpu,host=localhost idle_time=42 +// + cpu,host=localhost idle_time=42,usage_time=42 +func NewSeriesGrouper() *SeriesGrouper { + return &SeriesGrouper{ + metrics: make(map[uint64]telegraf.Metric), + ordered: []telegraf.Metric{}, + } +} + +type SeriesGrouper struct { + metrics map[uint64]telegraf.Metric + ordered []telegraf.Metric +} + +// Add adds a field key and value to the series. +func (g *SeriesGrouper) Add( + measurement string, + tags map[string]string, + tm time.Time, + field string, + fieldValue interface{}, +) error { + var err error + id := groupID(measurement, tags, tm) + metric := g.metrics[id] + if metric == nil { + metric, err = New(measurement, tags, map[string]interface{}{field: fieldValue}, tm) + if err != nil { + return err + } + g.metrics[id] = metric + g.ordered = append(g.ordered, metric) + } else { + metric.AddField(field, fieldValue) + } + return nil +} + +// Metrics returns the metrics grouped by series and time. +func (g *SeriesGrouper) Metrics() []telegraf.Metric { + return g.ordered +} + +func groupID(measurement string, tags map[string]string, tm time.Time) uint64 { + h := fnv.New64a() + h.Write([]byte(measurement)) + h.Write([]byte("\n")) + + taglist := make([]*telegraf.Tag, 0, len(tags)) + for k, v := range tags { + taglist = append(taglist, + &telegraf.Tag{Key: k, Value: v}) + } + sort.Slice(taglist, func(i, j int) bool { return taglist[i].Key < taglist[j].Key }) + for _, tag := range taglist { + h.Write([]byte(tag.Key)) + h.Write([]byte("\n")) + h.Write([]byte(tag.Value)) + h.Write([]byte("\n")) + } + h.Write([]byte("\n")) + + io.WriteString(h, strconv.FormatInt(tm.UnixNano(), 10)) + return h.Sum64() +} diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index 2435e1519..fe440bbba 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -122,6 +122,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/socket_listener" _ "github.com/influxdata/telegraf/plugins/inputs/solr" _ "github.com/influxdata/telegraf/plugins/inputs/sqlserver" + _ "github.com/influxdata/telegraf/plugins/inputs/stackdriver" _ "github.com/influxdata/telegraf/plugins/inputs/statsd" _ "github.com/influxdata/telegraf/plugins/inputs/swap" _ "github.com/influxdata/telegraf/plugins/inputs/syslog" diff --git a/plugins/inputs/stackdriver/README.md b/plugins/inputs/stackdriver/README.md new file mode 100644 index 000000000..f2ec1471b --- /dev/null +++ b/plugins/inputs/stackdriver/README.md @@ -0,0 +1,161 @@ +# Stackdriver Input Plugin + +Stackdriver gathers metrics from the [Stackdriver Monitoring API][stackdriver]. + +This plugin accesses APIs which are [chargeable][pricing]; you might incur +costs. + +### Configuration + +```toml +[[inputs.stackdriver]] + ## GCP Project + project = "erudite-bloom-151019" + + ## Include timeseries that start with the given metric type. + metric_type_prefix_include = [ + "compute.googleapis.com/", + ] + + ## Exclude timeseries that start with the given metric type. + # metric_type_prefix_exclude = [] + + ## Most metrics are updated no more than once per minute; it is recommended + ## to override the agent level interval with a value of 1m or greater. + interval = "1m" + + ## Maximum number of API calls to make per second. The quota for accounts + ## varies, it can be viewed on the API dashboard: + ## https://cloud.google.com/monitoring/quotas#quotas_and_limits + # rate_limit = 14 + + ## The delay and window options control the number of points selected on + ## each gather. When set, metrics are gathered between: + ## start: now() - delay - window + ## end: now() - delay + # + ## Collection delay; if set too low metrics may not yet be available. + # delay = "5m" + # + ## If unset, the window will start at 1m and be updated dynamically to span + ## the time between calls (approximately the length of the plugin interval). + # window = "1m" + + ## TTL for cached list of metric types. This is the maximum amount of time + ## it may take to discover new metrics. + # cache_ttl = "1h" + + ## If true, raw bucket counts are collected for distribution value types. + ## For a more lightweight collection, you may wish to disable and use + ## distribution_aggregation_aligners instead. + # gather_raw_distribution_buckets = true + + ## Aggregate functions to be used for metrics whose value type is + ## distribution. These aggregate values are recorded in in addition to raw + ## bucket counts; if they are enabled. + ## + ## For a list of aligner strings see: + ## https://cloud.google.com/monitoring/api/ref_v3/rpc/google.monitoring.v3#aligner + # distribution_aggregation_aligners = [ + # "ALIGN_PERCENTILE_99", + # "ALIGN_PERCENTILE_95", + # "ALIGN_PERCENTILE_50", + # ] + + ## Filters can be added to reduce the number of time series matched. All + ## functions are supported: starts_with, ends_with, has_substring, and + ## one_of. Only the '=' operator is supported. + ## + ## The logical operators when combining filters are defined statically using + ## the following values: + ## filter ::= {AND } + ## resource_labels ::= {OR } + ## metric_labels ::= {OR } + ## + ## For more details, see https://cloud.google.com/monitoring/api/v3/filters + # + ## Resource labels refine the time series selection with the following expression: + ## resource.labels. = + # [[inputs.stackdriver.filter.resource_labels]] + # key = "instance_name" + # value = 'starts_with("localhost")' + # + ## Metric labels refine the time series selection with the following expression: + ## metric.labels. = + # [[inputs.stackdriver.filter.metric_labels]] + # key = "device_name" + # value = 'one_of("sda", "sdb")' +``` + +#### Authentication + +It is recommended to use a service account to authenticate with the +Stackdriver Monitoring API. [Getting Started with Authentication][auth]. + +### Metrics + +Metrics are created using one of there patterns depending on if the value type +is a scalar value, raw distribution buckets, or aligned bucket values. + +In all cases, the Stackdriver metric type is split on the last component into +the measurement and field: +``` +compute.googleapis.com/instance/disk/read_bytes_count +└────────── measurement ─────────┘ └── field ───┘ +``` + +**Scalar Values:** + +- measurement + - tags: + - resource_labels + - metric_labels + - fields: + - field + + +**Distributions:** + +Distributions are represented by a set of fields along with the bucket values +tagged with the bucket boundary. Buckets are cumulative: each bucket +represents the total number of items less than the `lt` tag. + +- measurement + - tags: + - resource_labels + - metric_labels + - fields: + - field_count + - field_mean + - field_sum_of_squared_deviation + - field_range_min + - field_range_max + ++ measurement + - tags: + - resource_labels + - metric_labels + - lt (less than) + - fields: + - field_bucket + +**Aligned Aggregations:** + +- measurement + - tags: + - resource_labels + - metric_labels + - fields: + - field_alignment_function + +### Troubleshooting + +When Telegraf is ran with `--debug`, detailed information about the performed +queries will be logged. + +### Example Output +``` +``` +[stackdriver]: https://cloud.google.com/monitoring/api/v3/ +[auth]: https://cloud.google.com/docs/authentication/getting-started +[pricing]: https://cloud.google.com/stackdriver/pricing#stackdriver_monitoring_services diff --git a/plugins/inputs/stackdriver/stackdriver.go b/plugins/inputs/stackdriver/stackdriver.go new file mode 100644 index 000000000..4f4e35695 --- /dev/null +++ b/plugins/inputs/stackdriver/stackdriver.go @@ -0,0 +1,709 @@ +package stackdriver + +import ( + "context" + "fmt" + "log" + "math" + "strconv" + "strings" + "sync" + "time" + + monitoring "cloud.google.com/go/monitoring/apiv3" + googlepbduration "github.com/golang/protobuf/ptypes/duration" + googlepbts "github.com/golang/protobuf/ptypes/timestamp" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/internal/limiter" + "github.com/influxdata/telegraf/metric" + "github.com/influxdata/telegraf/plugins/inputs" // Imports the Stackdriver Monitoring client package. + "github.com/influxdata/telegraf/selfstat" + "google.golang.org/api/iterator" + distributionpb "google.golang.org/genproto/googleapis/api/distribution" + metricpb "google.golang.org/genproto/googleapis/api/metric" + monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" +) + +const ( + defaultRateLimit = 14 + description = "Gather timeseries from Google Cloud Platform v3 monitoring API" + sampleConfig = ` + ## GCP Project + project = "erudite-bloom-151019" + + ## Include timeseries that start with the given metric type. + metric_type_prefix_include = [ + "compute.googleapis.com/", + ] + + ## Exclude timeseries that start with the given metric type. + # metric_type_prefix_exclude = [] + + ## Many metrics are updated once per minute; it is recommended to override + ## the agent level interval with a value of 1m or greater. + interval = "1m" + + ## Maximum number of API calls to make per second. The quota for accounts + ## varies, it can be viewed on the API dashboard: + ## https://cloud.google.com/monitoring/quotas#quotas_and_limits + # rate_limit = 14 + + ## The delay and window options control the number of points selected on + ## each gather. When set, metrics are gathered between: + ## start: now() - delay - window + ## end: now() - delay + # + ## Collection delay; if set too low metrics may not yet be available. + # delay = "5m" + # + ## If unset, the window will start at 1m and be updated dynamically to span + ## the time between calls (approximately the length of the plugin interval). + # window = "1m" + + ## TTL for cached list of metric types. This is the maximum amount of time + ## it may take to discover new metrics. + # cache_ttl = "1h" + + ## If true, raw bucket counts are collected for distribution value types. + ## For a more lightweight collection, you may wish to disable and use + ## distribution_aggregation_aligners instead. + # gather_raw_distribution_buckets = true + + ## Aggregate functions to be used for metrics whose value type is + ## distribution. These aggregate values are recorded in in addition to raw + ## bucket counts; if they are enabled. + ## + ## For a list of aligner strings see: + ## https://cloud.google.com/monitoring/api/ref_v3/rpc/google.monitoring.v3#aligner + # distribution_aggregation_aligners = [ + # "ALIGN_PERCENTILE_99", + # "ALIGN_PERCENTILE_95", + # "ALIGN_PERCENTILE_50", + # ] + + ## Filters can be added to reduce the number of time series matched. All + ## functions are supported: starts_with, ends_with, has_substring, and + ## one_of. Only the '=' operator is supported. + ## + ## The logical operators when combining filters are defined statically using + ## the following values: + ## filter ::= {AND } + ## resource_labels ::= {OR } + ## metric_labels ::= {OR } + ## + ## For more details, see https://cloud.google.com/monitoring/api/v3/filters + # + ## Resource labels refine the time series selection with the following expression: + ## resource.labels. = + # [[inputs.stackdriver.filter.resource_labels]] + # key = "instance_name" + # value = 'starts_with("localhost")' + # + ## Metric labels refine the time series selection with the following expression: + ## metric.labels. = + # [[inputs.stackdriver.filter.metric_labels]] + # key = "device_name" + # value = 'one_of("sda", "sdb")' +` +) + +var ( + defaultCacheTTL = internal.Duration{Duration: 1 * time.Hour} + defaultWindow = internal.Duration{Duration: 1 * time.Minute} + defaultDelay = internal.Duration{Duration: 5 * time.Minute} +) + +type ( + // Stackdriver is the Google Stackdriver config info. + Stackdriver struct { + Project string `toml:"project"` + RateLimit int `toml:"rate_limit"` + Window internal.Duration `toml:"window"` + Delay internal.Duration `toml:"delay"` + CacheTTL internal.Duration `toml:"cache_ttl"` + MetricTypePrefixInclude []string `toml:"metric_type_prefix_include"` + MetricTypePrefixExclude []string `toml:"metric_type_prefix_exclude"` + GatherRawDistributionBuckets bool `toml:"gather_raw_distribution_buckets"` + DistributionAggregationAligners []string `toml:"distribution_aggregation_aligners"` + Filter *ListTimeSeriesFilter `toml:"filter"` + + client metricClient + timeSeriesConfCache *timeSeriesConfCache + prevEnd time.Time + } + + // ListTimeSeriesFilter contains resource labels and metric labels + ListTimeSeriesFilter struct { + ResourceLabels []*Label `json:"resource_labels"` + MetricLabels []*Label `json:"metric_labels"` + } + + // Label contains key and value + Label struct { + Key string `toml:"key"` + Value string `toml:"value"` + } + + // TimeSeriesConfCache caches generated timeseries configurations + timeSeriesConfCache struct { + TTL time.Duration + Generated time.Time + TimeSeriesConfs []*timeSeriesConf + } + + // Internal structure which holds our configuration for a particular GCP time + // series. + timeSeriesConf struct { + // The influx measurement name this time series maps to + measurement string + // The prefix to use before any influx field names that we'll write for + // this time series. (Or, if we only decide to write one field name, this + // field just holds the value of the field name.) + fieldKey string + // The GCP API request that we'll use to fetch data for this time series. + listTimeSeriesRequest *monitoringpb.ListTimeSeriesRequest + } + + // stackdriverMetricClient is a metric client for stackdriver + stackdriverMetricClient struct { + conn *monitoring.MetricClient + + listMetricDescriptorsCalls selfstat.Stat + listTimeSeriesCalls selfstat.Stat + } + + // metricClient is convenient for testing + metricClient interface { + ListMetricDescriptors(ctx context.Context, req *monitoringpb.ListMetricDescriptorsRequest) (<-chan *metricpb.MetricDescriptor, error) + ListTimeSeries(ctx context.Context, req *monitoringpb.ListTimeSeriesRequest) (<-chan *monitoringpb.TimeSeries, error) + Close() error + } + + lockedSeriesGrouper struct { + sync.Mutex + *metric.SeriesGrouper + } +) + +func (g *lockedSeriesGrouper) Add( + measurement string, + tags map[string]string, + tm time.Time, + field string, + fieldValue interface{}, +) error { + g.Lock() + defer g.Unlock() + return g.SeriesGrouper.Add(measurement, tags, tm, field, fieldValue) +} + +// ListMetricDescriptors implements metricClient interface +func (c *stackdriverMetricClient) ListMetricDescriptors( + ctx context.Context, + req *monitoringpb.ListMetricDescriptorsRequest, +) (<-chan *metricpb.MetricDescriptor, error) { + mdChan := make(chan *metricpb.MetricDescriptor, 1000) + + go func() { + log.Printf("D! [inputs.stackdriver] ListMetricDescriptors: %s", req.Filter) + defer close(mdChan) + + // Iterate over metric descriptors and send them to buffered channel + mdResp := c.conn.ListMetricDescriptors(ctx, req) + c.listMetricDescriptorsCalls.Incr(1) + for { + mdDesc, mdErr := mdResp.Next() + if mdErr != nil { + if mdErr != iterator.Done { + log.Printf("E! [inputs.stackdriver] Received error response: %s: %v", req, mdErr) + } + break + } + mdChan <- mdDesc + } + }() + + return mdChan, nil +} + +// ListTimeSeries implements metricClient interface +func (c *stackdriverMetricClient) ListTimeSeries( + ctx context.Context, + req *monitoringpb.ListTimeSeriesRequest, +) (<-chan *monitoringpb.TimeSeries, error) { + tsChan := make(chan *monitoringpb.TimeSeries, 1000) + + go func() { + log.Printf("D! [inputs.stackdriver] ListTimeSeries: %s", req.Filter) + defer close(tsChan) + + // Iterate over timeseries and send them to buffered channel + tsResp := c.conn.ListTimeSeries(ctx, req) + c.listTimeSeriesCalls.Incr(1) + for { + tsDesc, tsErr := tsResp.Next() + if tsErr != nil { + if tsErr != iterator.Done { + log.Printf("E! [inputs.stackdriver] Received error response: %s: %v", req, tsErr) + } + break + } + tsChan <- tsDesc + } + }() + + return tsChan, nil +} + +// Close implements metricClient interface +func (s *stackdriverMetricClient) Close() error { + return s.conn.Close() +} + +// Description implements telegraf.Input interface +func (s *Stackdriver) Description() string { + return description +} + +// SampleConfig implements telegraf.Input interface +func (s *Stackdriver) SampleConfig() string { + return sampleConfig +} + +// Gather implements telegraf.Input interface +func (s *Stackdriver) Gather(acc telegraf.Accumulator) error { + ctx := context.Background() + + if s.RateLimit == 0 { + s.RateLimit = defaultRateLimit + } + + err := s.initializeStackdriverClient(ctx) + if err != nil { + return err + } + + start, end := s.updateWindow(s.prevEnd) + s.prevEnd = end + + tsConfs, err := s.generatetimeSeriesConfs(ctx, start, end) + if err != nil { + return err + } + + lmtr := limiter.NewRateLimiter(s.RateLimit, time.Second) + defer lmtr.Stop() + + grouper := &lockedSeriesGrouper{ + SeriesGrouper: metric.NewSeriesGrouper(), + } + + var wg sync.WaitGroup + wg.Add(len(tsConfs)) + for _, tsConf := range tsConfs { + <-lmtr.C + go func(tsConf *timeSeriesConf) { + defer wg.Done() + acc.AddError(s.gatherTimeSeries(ctx, grouper, tsConf)) + }(tsConf) + } + wg.Wait() + + for _, metric := range grouper.Metrics() { + acc.AddMetric(metric) + } + + return nil +} + +// Returns the start and end time for the next collection. +func (s *Stackdriver) updateWindow(prevEnd time.Time) (time.Time, time.Time) { + var start time.Time + if s.Window.Duration != 0 { + start = time.Now().Add(-s.Delay.Duration).Add(-s.Window.Duration) + } else if prevEnd.IsZero() { + start = time.Now().Add(-s.Delay.Duration).Add(-defaultWindow.Duration) + } else { + start = prevEnd + } + end := time.Now().Add(-s.Delay.Duration) + return start, end +} + +// Generate filter string for ListTimeSeriesRequest +func (s *Stackdriver) newListTimeSeriesFilter(metricType string) string { + functions := []string{ + "starts_with", + "ends_with", + "has_substring", + "one_of", + } + filterString := fmt.Sprintf(`metric.type = "%s"`, metricType) + if s.Filter == nil { + return filterString + } + + var valueFmt string + if len(s.Filter.ResourceLabels) > 0 { + resourceLabelsFilter := make([]string, len(s.Filter.ResourceLabels)) + for i, resourceLabel := range s.Filter.ResourceLabels { + // check if resource label value contains function + if includeExcludeHelper(resourceLabel.Value, functions, nil) { + valueFmt = `resource.labels.%s = %s` + } else { + valueFmt = `resource.labels.%s = "%s"` + } + resourceLabelsFilter[i] = fmt.Sprintf(valueFmt, resourceLabel.Key, resourceLabel.Value) + } + if len(resourceLabelsFilter) == 1 { + filterString += fmt.Sprintf(" AND %s", resourceLabelsFilter[0]) + } else { + filterString += fmt.Sprintf(" AND (%s)", strings.Join(resourceLabelsFilter, " OR ")) + } + } + + if len(s.Filter.MetricLabels) > 0 { + metricLabelsFilter := make([]string, len(s.Filter.MetricLabels)) + for i, metricLabel := range s.Filter.MetricLabels { + // check if metric label value contains function + if includeExcludeHelper(metricLabel.Value, functions, nil) { + valueFmt = `metric.labels.%s = %s` + } else { + valueFmt = `metric.labels.%s = "%s"` + } + metricLabelsFilter[i] = fmt.Sprintf(valueFmt, metricLabel.Key, metricLabel.Value) + } + if len(metricLabelsFilter) == 1 { + filterString += fmt.Sprintf(" AND %s", metricLabelsFilter[0]) + } else { + filterString += fmt.Sprintf(" AND (%s)", strings.Join(metricLabelsFilter, " OR ")) + } + } + + return filterString +} + +// Create and initialize a timeSeriesConf for a given GCP metric type with +// defaults taken from the gcp_stackdriver plugin configuration. +func (s *Stackdriver) newTimeSeriesConf( + metricType string, startTime, endTime time.Time, +) *timeSeriesConf { + filter := s.newListTimeSeriesFilter(metricType) + interval := &monitoringpb.TimeInterval{ + EndTime: &googlepbts.Timestamp{Seconds: endTime.Unix()}, + StartTime: &googlepbts.Timestamp{Seconds: startTime.Unix()}, + } + tsReq := &monitoringpb.ListTimeSeriesRequest{ + Name: monitoring.MetricProjectPath(s.Project), + Filter: filter, + Interval: interval, + } + cfg := &timeSeriesConf{ + measurement: metricType, + fieldKey: "value", + listTimeSeriesRequest: tsReq, + } + + // GCP metric types have at least one slash, but we'll be defensive anyway. + slashIdx := strings.LastIndex(metricType, "/") + if slashIdx > 0 { + cfg.measurement = metricType[:slashIdx] + cfg.fieldKey = metricType[slashIdx+1:] + } + + return cfg +} + +// Change this configuration to query an aggregate by specifying an "aligner". +// In GCP monitoring, "aligning" is aggregation performed *within* a time +// series, to distill a pile of data points down to a single data point for +// some given time period (here, we specify 60s as our time period). This is +// especially useful for scraping GCP "distribution" metric types, whose raw +// data amounts to a ~60 bucket histogram, which is fairly hard to query and +// visualize in the TICK stack. +func (t *timeSeriesConf) initForAggregate(alignerStr string) { + // Check if alignerStr is valid + alignerInt, isValid := monitoringpb.Aggregation_Aligner_value[alignerStr] + if !isValid { + alignerStr = monitoringpb.Aggregation_Aligner_name[alignerInt] + } + aligner := monitoringpb.Aggregation_Aligner(alignerInt) + agg := &monitoringpb.Aggregation{ + AlignmentPeriod: &googlepbduration.Duration{Seconds: 60}, + PerSeriesAligner: aligner, + } + t.fieldKey = t.fieldKey + "_" + strings.ToLower(alignerStr) + t.listTimeSeriesRequest.Aggregation = agg +} + +// IsValid checks timeseriesconf cache validity +func (c *timeSeriesConfCache) IsValid() bool { + return c.TimeSeriesConfs != nil && time.Since(c.Generated) < c.TTL +} + +func (s *Stackdriver) initializeStackdriverClient(ctx context.Context) error { + if s.client == nil { + client, err := monitoring.NewMetricClient(ctx) + if err != nil { + return fmt.Errorf("failed to create stackdriver monitoring client: %v", err) + } + + tags := map[string]string{ + "project_id": s.Project, + } + listMetricDescriptorsCalls := selfstat.Register( + "stackdriver", "list_metric_descriptors_calls", tags) + listTimeSeriesCalls := selfstat.Register( + "stackdriver", "list_timeseries_calls", tags) + + s.client = &stackdriverMetricClient{ + conn: client, + listMetricDescriptorsCalls: listMetricDescriptorsCalls, + listTimeSeriesCalls: listTimeSeriesCalls, + } + } + + return nil +} + +func includeExcludeHelper(key string, includes []string, excludes []string) bool { + if len(includes) > 0 { + for _, includeStr := range includes { + if strings.HasPrefix(key, includeStr) { + return true + } + } + return false + } + if len(excludes) > 0 { + for _, excludeStr := range excludes { + if strings.HasPrefix(key, excludeStr) { + return false + } + } + return true + } + return true +} + +// Test whether a particular GCP metric type should be scraped by this plugin +// by checking the plugin name against the configuration's +// "includeMetricTypePrefixes" and "excludeMetricTypePrefixes" +func (s *Stackdriver) includeMetricType(metricType string) bool { + k := metricType + inc := s.MetricTypePrefixInclude + exc := s.MetricTypePrefixExclude + + return includeExcludeHelper(k, inc, exc) +} + +// Generates filter for list metric descriptors request +func (s *Stackdriver) newListMetricDescriptorsFilters() []string { + if len(s.MetricTypePrefixInclude) == 0 { + return nil + } + + metricTypeFilters := make([]string, len(s.MetricTypePrefixInclude)) + for i, metricTypePrefix := range s.MetricTypePrefixInclude { + metricTypeFilters[i] = fmt.Sprintf(`metric.type = starts_with(%q)`, metricTypePrefix) + } + return metricTypeFilters +} + +// Generate a list of timeSeriesConfig structs by making a ListMetricDescriptors +// API request and filtering the result against our configuration. +func (s *Stackdriver) generatetimeSeriesConfs( + ctx context.Context, startTime, endTime time.Time, +) ([]*timeSeriesConf, error) { + if s.timeSeriesConfCache != nil && s.timeSeriesConfCache.IsValid() { + // Update interval for timeseries requests in timeseries cache + interval := &monitoringpb.TimeInterval{ + EndTime: &googlepbts.Timestamp{Seconds: endTime.Unix()}, + StartTime: &googlepbts.Timestamp{Seconds: startTime.Unix()}, + } + for _, timeSeriesConf := range s.timeSeriesConfCache.TimeSeriesConfs { + timeSeriesConf.listTimeSeriesRequest.Interval = interval + } + return s.timeSeriesConfCache.TimeSeriesConfs, nil + } + + ret := []*timeSeriesConf{} + req := &monitoringpb.ListMetricDescriptorsRequest{ + Name: monitoring.MetricProjectPath(s.Project), + } + + filters := s.newListMetricDescriptorsFilters() + if len(filters) == 0 { + filters = []string{""} + } + + for _, filter := range filters { + // Add filter for list metric descriptors if + // includeMetricTypePrefixes is specified, + // this is more effecient than iterating over + // all metric descriptors + req.Filter = filter + mdRespChan, err := s.client.ListMetricDescriptors(ctx, req) + if err != nil { + return nil, err + } + + for metricDescriptor := range mdRespChan { + metricType := metricDescriptor.Type + valueType := metricDescriptor.ValueType + + if filter == "" && !s.includeMetricType(metricType) { + continue + } + + if valueType == metricpb.MetricDescriptor_DISTRIBUTION { + if s.GatherRawDistributionBuckets { + tsConf := s.newTimeSeriesConf(metricType, startTime, endTime) + ret = append(ret, tsConf) + } + for _, alignerStr := range s.DistributionAggregationAligners { + tsConf := s.newTimeSeriesConf(metricType, startTime, endTime) + tsConf.initForAggregate(alignerStr) + ret = append(ret, tsConf) + } + } else { + ret = append(ret, s.newTimeSeriesConf(metricType, startTime, endTime)) + } + } + } + + s.timeSeriesConfCache = &timeSeriesConfCache{ + TimeSeriesConfs: ret, + Generated: time.Now(), + TTL: s.CacheTTL.Duration, + } + + return ret, nil +} + +// Do the work to gather an individual time series. Runs inside a +// timeseries-specific goroutine. +func (s *Stackdriver) gatherTimeSeries( + ctx context.Context, grouper *lockedSeriesGrouper, tsConf *timeSeriesConf, +) error { + tsReq := tsConf.listTimeSeriesRequest + + tsRespChan, err := s.client.ListTimeSeries(ctx, tsReq) + if err != nil { + return err + } + + for tsDesc := range tsRespChan { + tags := map[string]string{ + "resource_type": tsDesc.Resource.Type, + } + for k, v := range tsDesc.Resource.Labels { + tags[k] = v + } + for k, v := range tsDesc.Metric.Labels { + tags[k] = v + } + + for _, p := range tsDesc.Points { + ts := time.Unix(p.Interval.EndTime.Seconds, 0) + + if tsDesc.ValueType == metricpb.MetricDescriptor_DISTRIBUTION { + dist := p.Value.GetDistributionValue() + s.addDistribution(dist, tags, ts, grouper, tsConf) + } else { + var value interface{} + + // Types that are valid to be assigned to Value + // See: https://godoc.org/google.golang.org/genproto/googleapis/monitoring/v3#TypedValue + switch tsDesc.ValueType { + case metricpb.MetricDescriptor_BOOL: + value = p.Value.GetBoolValue() + case metricpb.MetricDescriptor_INT64: + value = p.Value.GetInt64Value() + case metricpb.MetricDescriptor_DOUBLE: + value = p.Value.GetDoubleValue() + case metricpb.MetricDescriptor_STRING: + value = p.Value.GetStringValue() + } + + grouper.Add(tsConf.measurement, tags, ts, tsConf.fieldKey, value) + } + } + } + + return nil +} + +// AddDistribution adds metrics from a distribution value type. +func (s *Stackdriver) addDistribution( + metric *distributionpb.Distribution, + tags map[string]string, ts time.Time, grouper *lockedSeriesGrouper, tsConf *timeSeriesConf, +) { + field := tsConf.fieldKey + name := tsConf.measurement + + grouper.Add(name, tags, ts, field+"_count", metric.Count) + grouper.Add(name, tags, ts, field+"_mean", metric.Mean) + grouper.Add(name, tags, ts, field+"_sum_of_squared_deviation", metric.SumOfSquaredDeviation) + + if metric.Range != nil { + grouper.Add(name, tags, ts, field+"_range_min", metric.Range.Min) + grouper.Add(name, tags, ts, field+"_range_max", metric.Range.Max) + } + + linearBuckets := metric.BucketOptions.GetLinearBuckets() + exponentialBuckets := metric.BucketOptions.GetExponentialBuckets() + explicitBuckets := metric.BucketOptions.GetExplicitBuckets() + + var numBuckets int32 + if linearBuckets != nil { + numBuckets = linearBuckets.NumFiniteBuckets + 2 + } else if exponentialBuckets != nil { + numBuckets = exponentialBuckets.NumFiniteBuckets + 2 + } else { + numBuckets = int32(len(explicitBuckets.Bounds)) + 1 + } + + var i int32 + var count int64 + for i = 0; i < numBuckets; i++ { + // The last bucket is the overflow bucket, and includes all values + // greater than the previous bound. + if i == numBuckets-1 { + tags["lt"] = "+Inf" + } else { + var upperBound float64 + if linearBuckets != nil { + upperBound = linearBuckets.Offset + (linearBuckets.Width * float64(i)) + } else if exponentialBuckets != nil { + width := math.Pow(exponentialBuckets.GrowthFactor, float64(i)) + upperBound = exponentialBuckets.Scale * width + } else if explicitBuckets != nil { + upperBound = explicitBuckets.Bounds[i] + } + tags["lt"] = strconv.FormatFloat(upperBound, 'f', -1, 64) + } + + // Add to the cumulative count; trailing buckets with value 0 are + // omitted from the response. + if i < int32(len(metric.BucketCounts)) { + count += metric.BucketCounts[i] + } + grouper.Add(name, tags, ts, field+"_bucket", count) + } +} + +func init() { + f := func() telegraf.Input { + return &Stackdriver{ + CacheTTL: defaultCacheTTL, + RateLimit: defaultRateLimit, + Delay: defaultDelay, + GatherRawDistributionBuckets: true, + DistributionAggregationAligners: []string{}, + } + } + + inputs.Add("stackdriver", f) +} diff --git a/plugins/inputs/stackdriver/stackdriver_test.go b/plugins/inputs/stackdriver/stackdriver_test.go new file mode 100644 index 000000000..99e5deabd --- /dev/null +++ b/plugins/inputs/stackdriver/stackdriver_test.go @@ -0,0 +1,1125 @@ +package stackdriver + +import ( + "context" + "testing" + "time" + + "github.com/golang/protobuf/ptypes/timestamp" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" + "google.golang.org/genproto/googleapis/api/distribution" + metricpb "google.golang.org/genproto/googleapis/api/metric" + "google.golang.org/genproto/googleapis/api/monitoredres" + monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" +) + +type Call struct { + name string + args []interface{} +} + +type MockStackdriverClient struct { + ListMetricDescriptorsF func(ctx context.Context, req *monitoringpb.ListMetricDescriptorsRequest) (<-chan *metricpb.MetricDescriptor, error) + ListTimeSeriesF func(ctx context.Context, req *monitoringpb.ListTimeSeriesRequest) (<-chan *monitoringpb.TimeSeries, error) + CloseF func() error + + calls []*Call +} + +func (m *MockStackdriverClient) ListMetricDescriptors( + ctx context.Context, + req *monitoringpb.ListMetricDescriptorsRequest, +) (<-chan *metricpb.MetricDescriptor, error) { + call := &Call{name: "ListMetricDescriptors", args: []interface{}{ctx, req}} + m.calls = append(m.calls, call) + return m.ListMetricDescriptorsF(ctx, req) +} + +func (m *MockStackdriverClient) ListTimeSeries( + ctx context.Context, + req *monitoringpb.ListTimeSeriesRequest, +) (<-chan *monitoringpb.TimeSeries, error) { + call := &Call{name: "ListTimeSeries", args: []interface{}{ctx, req}} + m.calls = append(m.calls, call) + return m.ListTimeSeriesF(ctx, req) +} + +func (m *MockStackdriverClient) Close() error { + call := &Call{name: "Close", args: []interface{}{}} + m.calls = append(m.calls, call) + return m.CloseF() +} + +func TestInitAndRegister(t *testing.T) { + expected := &Stackdriver{ + CacheTTL: defaultCacheTTL, + RateLimit: defaultRateLimit, + Delay: defaultDelay, + GatherRawDistributionBuckets: true, + DistributionAggregationAligners: []string{}, + } + require.Equal(t, expected, inputs.Inputs["stackdriver"]()) +} + +func createTimeSeries( + point *monitoringpb.Point, valueType metricpb.MetricDescriptor_ValueType, +) *monitoringpb.TimeSeries { + return &monitoringpb.TimeSeries{ + Metric: &metricpb.Metric{Labels: make(map[string]string)}, + Resource: &monitoredres.MonitoredResource{ + Type: "global", + Labels: map[string]string{ + "project_id": "test", + }, + }, + Points: []*monitoringpb.Point{point}, + ValueType: valueType, + } +} + +func TestGather(t *testing.T) { + now := time.Now().Round(time.Second) + tests := []struct { + name string + descriptor *metricpb.MetricDescriptor + timeseries *monitoringpb.TimeSeries + expected []telegraf.Metric + }{ + { + name: "double", + descriptor: &metricpb.MetricDescriptor{ + Type: "telegraf/cpu/usage", + ValueType: metricpb.MetricDescriptor_DOUBLE, + }, + timeseries: createTimeSeries( + &monitoringpb.Point{ + Interval: &monitoringpb.TimeInterval{ + EndTime: ×tamp.Timestamp{ + Seconds: now.Unix(), + }, + }, + Value: &monitoringpb.TypedValue{ + Value: &monitoringpb.TypedValue_DoubleValue{ + DoubleValue: 42.0, + }, + }, + }, + metricpb.MetricDescriptor_DOUBLE, + ), + expected: []telegraf.Metric{ + testutil.MustMetric("telegraf/cpu", + map[string]string{ + "resource_type": "global", + "project_id": "test", + }, + map[string]interface{}{ + "usage": 42.0, + }, + now), + }, + }, + { + name: "int64", + descriptor: &metricpb.MetricDescriptor{ + Type: "telegraf/cpu/usage", + ValueType: metricpb.MetricDescriptor_INT64, + }, + timeseries: createTimeSeries( + &monitoringpb.Point{ + Interval: &monitoringpb.TimeInterval{ + EndTime: ×tamp.Timestamp{ + Seconds: now.Unix(), + }, + }, + Value: &monitoringpb.TypedValue{ + Value: &monitoringpb.TypedValue_Int64Value{ + Int64Value: 42, + }, + }, + }, + metricpb.MetricDescriptor_INT64, + ), + expected: []telegraf.Metric{ + testutil.MustMetric("telegraf/cpu", + map[string]string{ + "resource_type": "global", + "project_id": "test", + }, + map[string]interface{}{ + "usage": 42, + }, + now), + }, + }, + { + name: "bool", + descriptor: &metricpb.MetricDescriptor{ + Type: "telegraf/cpu/usage", + ValueType: metricpb.MetricDescriptor_BOOL, + }, + timeseries: createTimeSeries( + &monitoringpb.Point{ + Interval: &monitoringpb.TimeInterval{ + EndTime: ×tamp.Timestamp{ + Seconds: now.Unix(), + }, + }, + Value: &monitoringpb.TypedValue{ + Value: &monitoringpb.TypedValue_BoolValue{ + BoolValue: true, + }, + }, + }, + metricpb.MetricDescriptor_BOOL, + ), + expected: []telegraf.Metric{ + testutil.MustMetric("telegraf/cpu", + map[string]string{ + "resource_type": "global", + "project_id": "test", + }, + map[string]interface{}{ + "usage": true, + }, + now), + }, + }, + { + name: "string", + descriptor: &metricpb.MetricDescriptor{ + Type: "telegraf/cpu/usage", + ValueType: metricpb.MetricDescriptor_STRING, + }, + timeseries: createTimeSeries( + &monitoringpb.Point{ + Interval: &monitoringpb.TimeInterval{ + EndTime: ×tamp.Timestamp{ + Seconds: now.Unix(), + }, + }, + Value: &monitoringpb.TypedValue{ + Value: &monitoringpb.TypedValue_StringValue{ + StringValue: "foo", + }, + }, + }, + metricpb.MetricDescriptor_STRING, + ), + expected: []telegraf.Metric{ + testutil.MustMetric("telegraf/cpu", + map[string]string{ + "resource_type": "global", + "project_id": "test", + }, + map[string]interface{}{ + "usage": "foo", + }, + now), + }, + }, + { + name: "metric labels", + descriptor: &metricpb.MetricDescriptor{ + Type: "telegraf/cpu/usage", + ValueType: metricpb.MetricDescriptor_DOUBLE, + }, + timeseries: &monitoringpb.TimeSeries{ + Metric: &metricpb.Metric{ + Labels: map[string]string{ + "resource_type": "instance", + }, + }, + Resource: &monitoredres.MonitoredResource{ + Type: "global", + Labels: map[string]string{ + "project_id": "test", + }, + }, + Points: []*monitoringpb.Point{ + { + Interval: &monitoringpb.TimeInterval{ + EndTime: ×tamp.Timestamp{ + Seconds: now.Unix(), + }, + }, + Value: &monitoringpb.TypedValue{ + Value: &monitoringpb.TypedValue_DoubleValue{ + DoubleValue: 42.0, + }, + }, + }, + }, + ValueType: metricpb.MetricDescriptor_DOUBLE, + }, + expected: []telegraf.Metric{ + testutil.MustMetric("telegraf/cpu", + map[string]string{ + "resource_type": "instance", + "project_id": "test", + }, + map[string]interface{}{ + "usage": 42.0, + }, + now), + }, + }, + { + name: "linear buckets", + descriptor: &metricpb.MetricDescriptor{ + Type: "telegraf/cpu/usage", + ValueType: metricpb.MetricDescriptor_DISTRIBUTION, + }, + timeseries: createTimeSeries( + &monitoringpb.Point{ + Interval: &monitoringpb.TimeInterval{ + EndTime: ×tamp.Timestamp{ + Seconds: now.Unix(), + }, + }, + Value: &monitoringpb.TypedValue{ + Value: &monitoringpb.TypedValue_DistributionValue{ + DistributionValue: &distribution.Distribution{ + Count: 2, + Mean: 2.0, + SumOfSquaredDeviation: 1.0, + Range: &distribution.Distribution_Range{ + Min: 0.0, + Max: 3.0, + }, + BucketCounts: []int64{0, 1, 3, 0}, + BucketOptions: &distribution.Distribution_BucketOptions{ + Options: &distribution.Distribution_BucketOptions_LinearBuckets{ + LinearBuckets: &distribution.Distribution_BucketOptions_Linear{ + NumFiniteBuckets: 2, + Width: 1, + Offset: 1, + }, + }, + }, + }, + }, + }, + }, + metricpb.MetricDescriptor_DISTRIBUTION, + ), + expected: []telegraf.Metric{ + testutil.MustMetric("telegraf/cpu", + map[string]string{ + "resource_type": "global", + "project_id": "test", + }, + map[string]interface{}{ + "usage_count": int64(2), + "usage_range_min": 0.0, + "usage_range_max": 3.0, + "usage_mean": 2.0, + "usage_sum_of_squared_deviation": 1.0, + }, + now), + testutil.MustMetric("telegraf/cpu", + map[string]string{ + "resource_type": "global", + "project_id": "test", + "lt": "1", + }, + map[string]interface{}{ + "usage_bucket": int64(0), + }, + now), + testutil.MustMetric("telegraf/cpu", + map[string]string{ + "resource_type": "global", + "project_id": "test", + "lt": "2", + }, + map[string]interface{}{ + "usage_bucket": int64(1), + }, + now), + testutil.MustMetric("telegraf/cpu", + map[string]string{ + "resource_type": "global", + "project_id": "test", + "lt": "3", + }, + map[string]interface{}{ + "usage_bucket": int64(4), + }, + now), + testutil.MustMetric("telegraf/cpu", + map[string]string{ + "resource_type": "global", + "project_id": "test", + "lt": "+Inf", + }, + map[string]interface{}{ + "usage_bucket": int64(4), + }, + now), + }, + }, + { + name: "exponential buckets", + descriptor: &metricpb.MetricDescriptor{ + Type: "telegraf/cpu/usage", + ValueType: metricpb.MetricDescriptor_DISTRIBUTION, + }, + timeseries: createTimeSeries( + &monitoringpb.Point{ + Interval: &monitoringpb.TimeInterval{ + EndTime: ×tamp.Timestamp{ + Seconds: now.Unix(), + }, + }, + Value: &monitoringpb.TypedValue{ + Value: &monitoringpb.TypedValue_DistributionValue{ + DistributionValue: &distribution.Distribution{ + Count: 2, + Mean: 2.0, + SumOfSquaredDeviation: 1.0, + Range: &distribution.Distribution_Range{ + Min: 0.0, + Max: 3.0, + }, + BucketCounts: []int64{0, 1, 3, 0}, + BucketOptions: &distribution.Distribution_BucketOptions{ + Options: &distribution.Distribution_BucketOptions_ExponentialBuckets{ + ExponentialBuckets: &distribution.Distribution_BucketOptions_Exponential{ + NumFiniteBuckets: 2, + GrowthFactor: 2, + Scale: 1, + }, + }, + }, + }, + }, + }, + }, + metricpb.MetricDescriptor_DISTRIBUTION, + ), + expected: []telegraf.Metric{ + testutil.MustMetric("telegraf/cpu", + map[string]string{ + "resource_type": "global", + "project_id": "test", + }, + map[string]interface{}{ + "usage_count": int64(2), + "usage_range_min": 0.0, + "usage_range_max": 3.0, + "usage_mean": 2.0, + "usage_sum_of_squared_deviation": 1.0, + }, + now), + testutil.MustMetric("telegraf/cpu", + map[string]string{ + "resource_type": "global", + "project_id": "test", + "lt": "1", + }, + map[string]interface{}{ + "usage_bucket": int64(0), + }, + now), + testutil.MustMetric("telegraf/cpu", + map[string]string{ + "resource_type": "global", + "project_id": "test", + "lt": "2", + }, + map[string]interface{}{ + "usage_bucket": int64(1), + }, + now), + testutil.MustMetric("telegraf/cpu", + map[string]string{ + "resource_type": "global", + "project_id": "test", + "lt": "4", + }, + map[string]interface{}{ + "usage_bucket": int64(4), + }, + now), + testutil.MustMetric("telegraf/cpu", + map[string]string{ + "resource_type": "global", + "project_id": "test", + "lt": "+Inf", + }, + map[string]interface{}{ + "usage_bucket": int64(4), + }, + now), + }, + }, + { + name: "explicit buckets", + descriptor: &metricpb.MetricDescriptor{ + Type: "telegraf/cpu/usage", + ValueType: metricpb.MetricDescriptor_DISTRIBUTION, + }, + timeseries: createTimeSeries( + &monitoringpb.Point{ + Interval: &monitoringpb.TimeInterval{ + EndTime: ×tamp.Timestamp{ + Seconds: now.Unix(), + }, + }, + Value: &monitoringpb.TypedValue{ + Value: &monitoringpb.TypedValue_DistributionValue{ + DistributionValue: &distribution.Distribution{ + Count: 4, + Mean: 2.0, + SumOfSquaredDeviation: 1.0, + Range: &distribution.Distribution_Range{ + Min: 0.0, + Max: 3.0, + }, + BucketCounts: []int64{0, 1, 3}, + BucketOptions: &distribution.Distribution_BucketOptions{ + Options: &distribution.Distribution_BucketOptions_ExplicitBuckets{ + ExplicitBuckets: &distribution.Distribution_BucketOptions_Explicit{ + Bounds: []float64{1.0, 2.0}, + }, + }, + }, + }, + }, + }, + }, + metricpb.MetricDescriptor_DISTRIBUTION, + ), + expected: []telegraf.Metric{ + testutil.MustMetric("telegraf/cpu", + map[string]string{ + "resource_type": "global", + "project_id": "test", + }, + map[string]interface{}{ + "usage_count": int64(4), + "usage_range_min": 0.0, + "usage_range_max": 3.0, + "usage_mean": 2.0, + "usage_sum_of_squared_deviation": 1.0, + }, + now), + testutil.MustMetric("telegraf/cpu", + map[string]string{ + "resource_type": "global", + "project_id": "test", + "lt": "1", + }, + map[string]interface{}{ + "usage_bucket": int64(0), + }, + now), + testutil.MustMetric("telegraf/cpu", + map[string]string{ + "resource_type": "global", + "project_id": "test", + "lt": "2", + }, + map[string]interface{}{ + "usage_bucket": int64(1), + }, + now), + testutil.MustMetric("telegraf/cpu", + map[string]string{ + "resource_type": "global", + "project_id": "test", + "lt": "+Inf", + }, + map[string]interface{}{ + "usage_bucket": int64(4), + }, + now), + }, + }, + { + name: "implicit buckets are zero", + descriptor: &metricpb.MetricDescriptor{ + Type: "telegraf/cpu/usage", + ValueType: metricpb.MetricDescriptor_DISTRIBUTION, + }, + timeseries: createTimeSeries( + &monitoringpb.Point{ + Interval: &monitoringpb.TimeInterval{ + EndTime: ×tamp.Timestamp{ + Seconds: now.Unix(), + }, + }, + Value: &monitoringpb.TypedValue{ + Value: &monitoringpb.TypedValue_DistributionValue{ + DistributionValue: &distribution.Distribution{ + Count: 2, + Mean: 2.0, + SumOfSquaredDeviation: 1.0, + Range: &distribution.Distribution_Range{ + Min: 0.0, + Max: 3.0, + }, + BucketCounts: []int64{0, 1}, + BucketOptions: &distribution.Distribution_BucketOptions{ + Options: &distribution.Distribution_BucketOptions_LinearBuckets{ + LinearBuckets: &distribution.Distribution_BucketOptions_Linear{ + NumFiniteBuckets: 2, + Width: 1, + Offset: 1, + }, + }, + }, + }, + }, + }, + }, + metricpb.MetricDescriptor_DISTRIBUTION, + ), + expected: []telegraf.Metric{ + testutil.MustMetric("telegraf/cpu", + map[string]string{ + "resource_type": "global", + "project_id": "test", + }, + map[string]interface{}{ + "usage_count": int64(2), + "usage_range_min": 0.0, + "usage_range_max": 3.0, + "usage_mean": 2.0, + "usage_sum_of_squared_deviation": 1.0, + }, + now), + testutil.MustMetric("telegraf/cpu", + map[string]string{ + "resource_type": "global", + "project_id": "test", + "lt": "1", + }, + map[string]interface{}{ + "usage_bucket": int64(0), + }, + now), + testutil.MustMetric("telegraf/cpu", + map[string]string{ + "resource_type": "global", + "project_id": "test", + "lt": "2", + }, + map[string]interface{}{ + "usage_bucket": int64(1), + }, + now), + testutil.MustMetric("telegraf/cpu", + map[string]string{ + "resource_type": "global", + "project_id": "test", + "lt": "3", + }, + map[string]interface{}{ + "usage_bucket": int64(1), + }, + now), + testutil.MustMetric("telegraf/cpu", + map[string]string{ + "resource_type": "global", + "project_id": "test", + "lt": "+Inf", + }, + map[string]interface{}{ + "usage_bucket": int64(1), + }, + now), + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var acc testutil.Accumulator + s := &Stackdriver{ + Project: "test", + RateLimit: 10, + GatherRawDistributionBuckets: true, + client: &MockStackdriverClient{ + ListMetricDescriptorsF: func(ctx context.Context, req *monitoringpb.ListMetricDescriptorsRequest) (<-chan *metricpb.MetricDescriptor, error) { + ch := make(chan *metricpb.MetricDescriptor, 1) + ch <- tt.descriptor + close(ch) + return ch, nil + }, + ListTimeSeriesF: func(ctx context.Context, req *monitoringpb.ListTimeSeriesRequest) (<-chan *monitoringpb.TimeSeries, error) { + ch := make(chan *monitoringpb.TimeSeries, 1) + ch <- tt.timeseries + close(ch) + return ch, nil + }, + CloseF: func() error { + return nil + }, + }, + } + + err := s.Gather(&acc) + require.NoError(t, err) + + actual := []telegraf.Metric{} + for _, m := range acc.Metrics { + actual = append(actual, testutil.FromTestMetric(m)) + } + + testutil.RequireMetricsEqual(t, tt.expected, actual) + }) + } +} + +func TestGatherAlign(t *testing.T) { + now := time.Now().Round(time.Second) + tests := []struct { + name string + descriptor *metricpb.MetricDescriptor + timeseries []*monitoringpb.TimeSeries + expected []telegraf.Metric + }{ + { + name: "align", + descriptor: &metricpb.MetricDescriptor{ + Type: "telegraf/cpu/usage", + ValueType: metricpb.MetricDescriptor_DISTRIBUTION, + }, + timeseries: []*monitoringpb.TimeSeries{ + createTimeSeries( + &monitoringpb.Point{ + Interval: &monitoringpb.TimeInterval{ + EndTime: ×tamp.Timestamp{ + Seconds: now.Unix(), + }, + }, + Value: &monitoringpb.TypedValue{ + Value: &monitoringpb.TypedValue_DoubleValue{ + DoubleValue: 42.0, + }, + }, + }, + metricpb.MetricDescriptor_DOUBLE, + ), + createTimeSeries( + &monitoringpb.Point{ + Interval: &monitoringpb.TimeInterval{ + EndTime: ×tamp.Timestamp{ + Seconds: now.Unix(), + }, + }, + Value: &monitoringpb.TypedValue{ + Value: &monitoringpb.TypedValue_DoubleValue{ + DoubleValue: 42.0, + }, + }, + }, + metricpb.MetricDescriptor_DOUBLE, + ), + createTimeSeries( + &monitoringpb.Point{ + Interval: &monitoringpb.TimeInterval{ + EndTime: ×tamp.Timestamp{ + Seconds: now.Unix(), + }, + }, + Value: &monitoringpb.TypedValue{ + Value: &monitoringpb.TypedValue_DoubleValue{ + DoubleValue: 42.0, + }, + }, + }, + metricpb.MetricDescriptor_DOUBLE, + ), + }, + expected: []telegraf.Metric{ + testutil.MustMetric("telegraf/cpu", + map[string]string{ + "resource_type": "global", + "project_id": "test", + }, + map[string]interface{}{ + "usage_align_percentile_99": 42.0, + "usage_align_percentile_95": 42.0, + "usage_align_percentile_50": 42.0, + }, + now), + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + listCall := 0 + var acc testutil.Accumulator + client := &MockStackdriverClient{ + ListMetricDescriptorsF: func(ctx context.Context, req *monitoringpb.ListMetricDescriptorsRequest) (<-chan *metricpb.MetricDescriptor, error) { + ch := make(chan *metricpb.MetricDescriptor, 1) + ch <- tt.descriptor + close(ch) + return ch, nil + }, + ListTimeSeriesF: func(ctx context.Context, req *monitoringpb.ListTimeSeriesRequest) (<-chan *monitoringpb.TimeSeries, error) { + ch := make(chan *monitoringpb.TimeSeries, 1) + ch <- tt.timeseries[listCall] + listCall++ + close(ch) + return ch, nil + }, + CloseF: func() error { + return nil + }, + } + + s := &Stackdriver{ + Project: "test", + RateLimit: 10, + GatherRawDistributionBuckets: false, + DistributionAggregationAligners: []string{ + "ALIGN_PERCENTILE_99", + "ALIGN_PERCENTILE_95", + "ALIGN_PERCENTILE_50", + }, + client: client, + } + + err := s.Gather(&acc) + require.NoError(t, err) + + actual := []telegraf.Metric{} + for _, m := range acc.Metrics { + actual = append(actual, testutil.FromTestMetric(m)) + } + + testutil.RequireMetricsEqual(t, tt.expected, actual) + + }) + } +} + +func TestListMetricDescriptorFilter(t *testing.T) { + type call struct { + name string + filter string + } + now := time.Now().Round(time.Second) + tests := []struct { + name string + stackdriver *Stackdriver + descriptor *metricpb.MetricDescriptor + calls []call + }{ + { + name: "simple", + stackdriver: &Stackdriver{ + Project: "test", + MetricTypePrefixInclude: []string{"telegraf/cpu/usage"}, + RateLimit: 1, + }, + descriptor: &metricpb.MetricDescriptor{ + Type: "telegraf/cpu/usage", + ValueType: metricpb.MetricDescriptor_DOUBLE, + }, + calls: []call{ + { + name: "ListMetricDescriptors", + filter: `metric.type = starts_with("telegraf/cpu/usage")`, + }, { + name: "ListTimeSeries", + filter: `metric.type = "telegraf/cpu/usage"`, + }, + }, + }, + { + name: "single resource labels string", + stackdriver: &Stackdriver{ + Project: "test", + MetricTypePrefixInclude: []string{"telegraf/cpu/usage"}, + Filter: &ListTimeSeriesFilter{ + ResourceLabels: []*Label{ + { + Key: "instance_name", + Value: `localhost`, + }, + }, + }, + RateLimit: 1, + }, + descriptor: &metricpb.MetricDescriptor{ + Type: "telegraf/cpu/usage", + ValueType: metricpb.MetricDescriptor_DOUBLE, + }, + calls: []call{ + { + name: "ListMetricDescriptors", + filter: `metric.type = starts_with("telegraf/cpu/usage")`, + }, { + name: "ListTimeSeries", + filter: `metric.type = "telegraf/cpu/usage" AND resource.labels.instance_name = "localhost"`, + }, + }, + }, + { + name: "single resource labels function", + stackdriver: &Stackdriver{ + Project: "test", + MetricTypePrefixInclude: []string{"telegraf/cpu/usage"}, + Filter: &ListTimeSeriesFilter{ + ResourceLabels: []*Label{ + { + Key: "instance_name", + Value: `starts_with("localhost")`, + }, + }, + }, + RateLimit: 1, + }, + descriptor: &metricpb.MetricDescriptor{ + Type: "telegraf/cpu/usage", + ValueType: metricpb.MetricDescriptor_DOUBLE, + }, + calls: []call{ + { + name: "ListMetricDescriptors", + filter: `metric.type = starts_with("telegraf/cpu/usage")`, + }, { + name: "ListTimeSeries", + filter: `metric.type = "telegraf/cpu/usage" AND resource.labels.instance_name = starts_with("localhost")`, + }, + }, + }, + { + name: "multiple resource labels", + stackdriver: &Stackdriver{ + Project: "test", + MetricTypePrefixInclude: []string{"telegraf/cpu/usage"}, + Filter: &ListTimeSeriesFilter{ + ResourceLabels: []*Label{ + { + Key: "instance_name", + Value: `localhost`, + }, + { + Key: "zone", + Value: `starts_with("us-")`, + }, + }, + }, + RateLimit: 1, + }, + descriptor: &metricpb.MetricDescriptor{ + Type: "telegraf/cpu/usage", + ValueType: metricpb.MetricDescriptor_DOUBLE, + }, + calls: []call{ + { + name: "ListMetricDescriptors", + filter: `metric.type = starts_with("telegraf/cpu/usage")`, + }, { + name: "ListTimeSeries", + filter: `metric.type = "telegraf/cpu/usage" AND (resource.labels.instance_name = "localhost" OR resource.labels.zone = starts_with("us-"))`, + }, + }, + }, + { + name: "single metric label string", + stackdriver: &Stackdriver{ + Project: "test", + MetricTypePrefixInclude: []string{"telegraf/cpu/usage"}, + Filter: &ListTimeSeriesFilter{ + MetricLabels: []*Label{ + { + Key: "resource_type", + Value: `instance`, + }, + }, + }, + RateLimit: 1, + }, + descriptor: &metricpb.MetricDescriptor{ + Type: "telegraf/cpu/usage", + ValueType: metricpb.MetricDescriptor_DOUBLE, + }, + calls: []call{ + { + name: "ListMetricDescriptors", + filter: `metric.type = starts_with("telegraf/cpu/usage")`, + }, { + name: "ListTimeSeries", + filter: `metric.type = "telegraf/cpu/usage" AND metric.labels.resource_type = "instance"`, + }, + }, + }, + { + name: "single metric label function", + stackdriver: &Stackdriver{ + Project: "test", + MetricTypePrefixInclude: []string{"telegraf/cpu/usage"}, + Filter: &ListTimeSeriesFilter{ + MetricLabels: []*Label{ + { + Key: "resource_id", + Value: `starts_with("abc-")`, + }, + }, + }, + RateLimit: 1, + }, + descriptor: &metricpb.MetricDescriptor{ + Type: "telegraf/cpu/usage", + ValueType: metricpb.MetricDescriptor_DOUBLE, + }, + calls: []call{ + { + name: "ListMetricDescriptors", + filter: `metric.type = starts_with("telegraf/cpu/usage")`, + }, { + name: "ListTimeSeries", + filter: `metric.type = "telegraf/cpu/usage" AND metric.labels.resource_id = starts_with("abc-")`, + }, + }, + }, + { + name: "multiple metric labels", + stackdriver: &Stackdriver{ + Project: "test", + MetricTypePrefixInclude: []string{"telegraf/cpu/usage"}, + Filter: &ListTimeSeriesFilter{ + MetricLabels: []*Label{ + { + Key: "resource_type", + Value: "instance", + }, + { + Key: "resource_id", + Value: `starts_with("abc-")`, + }, + }, + }, + RateLimit: 1, + }, + descriptor: &metricpb.MetricDescriptor{ + Type: "telegraf/cpu/usage", + ValueType: metricpb.MetricDescriptor_DOUBLE, + }, + calls: []call{ + { + name: "ListMetricDescriptors", + filter: `metric.type = starts_with("telegraf/cpu/usage")`, + }, { + name: "ListTimeSeries", + filter: `metric.type = "telegraf/cpu/usage" AND (metric.labels.resource_type = "instance" OR metric.labels.resource_id = starts_with("abc-"))`, + }, + }, + }, + { + name: "all labels filters", + stackdriver: &Stackdriver{ + Project: "test", + MetricTypePrefixInclude: []string{"telegraf/cpu/usage"}, + Filter: &ListTimeSeriesFilter{ + ResourceLabels: []*Label{ + { + Key: "instance_name", + Value: `localhost`, + }, + { + Key: "zone", + Value: `starts_with("us-")`, + }, + }, + MetricLabels: []*Label{ + { + Key: "resource_type", + Value: "instance", + }, + { + Key: "resource_id", + Value: `starts_with("abc-")`, + }, + }, + }, + RateLimit: 1, + }, + descriptor: &metricpb.MetricDescriptor{ + Type: "telegraf/cpu/usage", + ValueType: metricpb.MetricDescriptor_DOUBLE, + }, + calls: []call{ + { + name: "ListMetricDescriptors", + filter: `metric.type = starts_with("telegraf/cpu/usage")`, + }, { + name: "ListTimeSeries", + filter: `metric.type = "telegraf/cpu/usage" AND (resource.labels.instance_name = "localhost" OR resource.labels.zone = starts_with("us-")) AND (metric.labels.resource_type = "instance" OR metric.labels.resource_id = starts_with("abc-"))`, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var acc testutil.Accumulator + client := &MockStackdriverClient{ + ListMetricDescriptorsF: func(ctx context.Context, req *monitoringpb.ListMetricDescriptorsRequest) (<-chan *metricpb.MetricDescriptor, error) { + ch := make(chan *metricpb.MetricDescriptor, 1) + ch <- tt.descriptor + close(ch) + return ch, nil + }, + ListTimeSeriesF: func(ctx context.Context, req *monitoringpb.ListTimeSeriesRequest) (<-chan *monitoringpb.TimeSeries, error) { + ch := make(chan *monitoringpb.TimeSeries, 1) + ch <- createTimeSeries( + &monitoringpb.Point{ + Interval: &monitoringpb.TimeInterval{ + EndTime: ×tamp.Timestamp{ + Seconds: now.Unix(), + }, + }, + Value: &monitoringpb.TypedValue{ + Value: &monitoringpb.TypedValue_DoubleValue{ + DoubleValue: 42.0, + }, + }, + }, + metricpb.MetricDescriptor_DOUBLE, + ) + close(ch) + return ch, nil + }, + CloseF: func() error { + return nil + }, + } + + s := tt.stackdriver + s.client = client + + err := s.Gather(&acc) + require.NoError(t, err) + + require.Equal(t, len(client.calls), len(tt.calls)) + for i, expected := range tt.calls { + actual := client.calls[i] + require.Equal(t, expected.name, actual.name) + + switch req := actual.args[1].(type) { + case *monitoringpb.ListMetricDescriptorsRequest: + require.Equal(t, expected.filter, req.Filter) + case *monitoringpb.ListTimeSeriesRequest: + require.Equal(t, expected.filter, req.Filter) + default: + panic("unknown request type") + } + } + }) + } +} + +func TestNewListTimeSeriesFilter(t *testing.T) { +} + +func TestTimeSeriesConfCacheIsValid(t *testing.T) { +} diff --git a/testutil/metric.go b/testutil/metric.go index 5ce0a99a6..afb3de7fe 100644 --- a/testutil/metric.go +++ b/testutil/metric.go @@ -103,3 +103,11 @@ func MustMetric( } return m } + +func FromTestMetric(met *Metric) telegraf.Metric { + m, err := metric.New(met.Measurement, met.Tags, met.Fields, met.Time) + if err != nil { + panic("MustMetric") + } + return m +} From 03776088f135b040d8af93db4ee09f8bba1a5de7 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 20 Feb 2019 13:26:21 -0800 Subject: [PATCH 0611/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 25b94570a..402c906fd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ - [neptune_apex](/plugins/inputs/neptune_apex/README.md) - Contributed by @MaxRenaud - [nginx_upstream_check](/plugins/inputs/nginx_upstream_check/README.md) - Contributed by @dmitryilyin - [multifile](/plugins/inputs/multifile/README.md) - Contributed by @martin2250 +- [stackdriver](/plugins/inputs/stackdriver/README.md) - Contributed by @WuHan0608 #### New Outputs From 6add84eb2597b04ffc109d7ac1edb053a93eeebc Mon Sep 17 00:00:00 2001 From: Andrew Ernst Date: Wed, 20 Feb 2019 15:16:23 -0800 Subject: [PATCH 0612/1815] Support Azure Sovereign Environments with endpoint_url option (#5453) --- plugins/outputs/azure_monitor/README.md | 5 +++++ .../outputs/azure_monitor/azure_monitor.go | 19 ++++++++++++++++++- 2 files changed, 23 insertions(+), 1 deletion(-) diff --git a/plugins/outputs/azure_monitor/README.md b/plugins/outputs/azure_monitor/README.md index 28bb66af6..fbb493586 100644 --- a/plugins/outputs/azure_monitor/README.md +++ b/plugins/outputs/azure_monitor/README.md @@ -40,6 +40,11 @@ written as a dimension on each Azure Monitor metric. ## The Azure Resource ID against which metric will be logged, e.g. ## ex: resource_id = "/subscriptions//resourceGroups//providers/Microsoft.Compute/virtualMachines/" # resource_id = "" + + ## Optionally, if in Azure US Government, China, or other sovereign + ## cloud environment, set the appropriate REST endpoint for receiving + ## metrics. (Note: region may be unused in this context) + # endpoint_url = "https://monitoring.core.usgovcloudapi.net" ``` ### Setup diff --git a/plugins/outputs/azure_monitor/azure_monitor.go b/plugins/outputs/azure_monitor/azure_monitor.go index e52d66b99..408976c53 100644 --- a/plugins/outputs/azure_monitor/azure_monitor.go +++ b/plugins/outputs/azure_monitor/azure_monitor.go @@ -31,6 +31,7 @@ type AzureMonitor struct { StringsAsDimensions bool `toml:"strings_as_dimensions"` Region string ResourceID string `toml:"resource_id"` + EndpointUrl string `toml:"endpoint_url"` url string auth autorest.Authorizer @@ -65,6 +66,7 @@ const ( vmInstanceMetadataURL = "http://169.254.169.254/metadata/instance?api-version=2017-12-01" resourceIDTemplate = "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/virtualMachines/%s" urlTemplate = "https://%s.monitoring.azure.com%s/metrics" + urlOverrideTemplate = "%s%s/metrics" maxRequestBodySize = 4000000 ) @@ -91,6 +93,11 @@ var sampleConfig = ` ## The Azure Resource ID against which metric will be logged, e.g. ## ex: resource_id = "/subscriptions//resourceGroups//providers/Microsoft.Compute/virtualMachines/" # resource_id = "" + + ## Optionally, if in Azure US Government, China or other sovereign + ## cloud environment, set appropriate REST endpoint for receiving + ## metrics. (Note: region may be unused in this context) + # endpoint_url = "https://monitoring.core.usgovcloudapi.net" ` // Description provides a description of the plugin @@ -125,6 +132,8 @@ func (a *AzureMonitor) Connect() error { var err error var region string var resourceID string + var endpointUrl string + if a.Region == "" || a.ResourceID == "" { // Pull region and resource identifier region, resourceID, err = vmInstanceMetadata(a.client) @@ -138,13 +147,21 @@ func (a *AzureMonitor) Connect() error { if a.ResourceID != "" { resourceID = a.ResourceID } + if a.EndpointUrl != "" { + endpointUrl = a.EndpointUrl + } if resourceID == "" { return fmt.Errorf("no resource ID configured or available via VM instance metadata") } else if region == "" { return fmt.Errorf("no region configured or available via VM instance metadata") } - a.url = fmt.Sprintf(urlTemplate, region, resourceID) + + if endpointUrl == "" { + a.url = fmt.Sprintf(urlTemplate, region, resourceID) + } else { + a.url = fmt.Sprintf(urlOverrideTemplate, endpointUrl, resourceID) + } log.Printf("D! Writing to Azure Monitor URL: %s", a.url) From c234ba291ec72e4b87f68326a2ec2fe3b8558158 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 20 Feb 2019 15:17:17 -0800 Subject: [PATCH 0613/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 402c906fd..f3a61f0eb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -46,6 +46,7 @@ - [#4892](https://github.com/influxdata/telegraf/pull/4892): Add option to set retain flag on messages to mqtt output. - [#5165](https://github.com/influxdata/telegraf/pull/5165): Add resource path based filtering to vsphere input. - [#5417](https://github.com/influxdata/telegraf/pull/5417): Add rcode tag and field to dns_query input. +- [#5453](https://github.com/influxdata/telegraf/pull/5453): Support Azure Sovereign Environments with endpoint_url option. #### Bugfixes From 0a01713bccef1acf77e9d67d47dd2d8c876a9312 Mon Sep 17 00:00:00 2001 From: emily Date: Wed, 20 Feb 2019 17:33:56 -0800 Subject: [PATCH 0614/1815] Retry restarting receiver on PubSub service error (#5458) --- plugins/inputs/cloud_pubsub/README.md | 6 ++ plugins/inputs/cloud_pubsub/pubsub.go | 92 +++++++++++++------ plugins/inputs/cloud_pubsub/pubsub_test.go | 39 ++++++++ .../inputs/cloud_pubsub/subscription_stub.go | 27 ++++-- 4 files changed, 132 insertions(+), 32 deletions(-) diff --git a/plugins/inputs/cloud_pubsub/README.md b/plugins/inputs/cloud_pubsub/README.md index eb08af105..6bf3fa29e 100644 --- a/plugins/inputs/cloud_pubsub/README.md +++ b/plugins/inputs/cloud_pubsub/README.md @@ -26,6 +26,12 @@ and creates metrics using one of the supported [input data formats][]. ## Application Default Credentials, which is preferred. # credentials_file = "path/to/my/creds.json" + ## Optional. Number of seconds to wait before attempting to restart the + ## PubSub subscription receiver after an unexpected error. + ## If the streaming pull for a PubSub Subscription fails (receiver), + ## the agent attempts to restart receiving messages after this many seconds. + # retry_delay_seconds = 5 + ## Optional. Maximum byte length of a message to consume. ## Larger messages are dropped with an error. If less than 0 or unspecified, ## treated as no limit. diff --git a/plugins/inputs/cloud_pubsub/pubsub.go b/plugins/inputs/cloud_pubsub/pubsub.go index 8c2b600b0..9f7125126 100644 --- a/plugins/inputs/cloud_pubsub/pubsub.go +++ b/plugins/inputs/cloud_pubsub/pubsub.go @@ -12,14 +12,19 @@ import ( "github.com/influxdata/telegraf/plugins/parsers" "golang.org/x/oauth2/google" "google.golang.org/api/option" + "log" + "time" ) type empty struct{} type semaphore chan empty const defaultMaxUndeliveredMessages = 1000 +const defaultRetryDelaySeconds = 5 type PubSub struct { + sync.Mutex + CredentialsFile string `toml:"credentials_file"` Project string `toml:"project"` Subscription string `toml:"subscription"` @@ -31,8 +36,9 @@ type PubSub struct { MaxReceiverGoRoutines int `toml:"max_receiver_go_routines"` // Agent settings - MaxMessageLen int `toml:"max_message_len"` - MaxUndeliveredMessages int `toml:"max_undelivered_messages"` + MaxMessageLen int `toml:"max_message_len"` + MaxUndeliveredMessages int `toml:"max_undelivered_messages"` + RetryReceiveDelaySeconds int `toml:"retry_delay_seconds"` sub subscription stubSub func() subscription @@ -42,7 +48,6 @@ type PubSub struct { parser parsers.Parser wg *sync.WaitGroup acc telegraf.TrackingAccumulator - mu sync.Mutex undelivered map[telegraf.TrackingID]message sem semaphore @@ -78,35 +83,36 @@ func (ps *PubSub) Start(ac telegraf.Accumulator) error { return fmt.Errorf(`"project" is required`) } - cctx, cancel := context.WithCancel(context.Background()) + ps.sem = make(semaphore, ps.MaxUndeliveredMessages) + ps.acc = ac.WithTracking(ps.MaxUndeliveredMessages) + + // Create top-level context with cancel that will be called on Stop(). + ctx, cancel := context.WithCancel(context.Background()) ps.cancel = cancel if ps.stubSub != nil { ps.sub = ps.stubSub() } else { - subRef, err := ps.getGCPSubscription(cctx, ps.Subscription) + subRef, err := ps.getGCPSubscription(ps.Subscription) if err != nil { - return err + return fmt.Errorf("unable to create subscription handle: %v", err) } ps.sub = subRef } ps.wg = &sync.WaitGroup{} - ps.acc = ac.WithTracking(ps.MaxUndeliveredMessages) - ps.sem = make(semaphore, ps.MaxUndeliveredMessages) - - // Start receiver in new goroutine for each subscription. - ps.wg.Add(1) - go func() { - defer ps.wg.Done() - ps.subReceive(cctx) - }() - // Start goroutine to handle delivery notifications from accumulator. ps.wg.Add(1) go func() { defer ps.wg.Done() - ps.receiveDelivered(cctx) + ps.waitForDelivery(ctx) + }() + + // Start goroutine for subscription receiver. + ps.wg.Add(1) + go func() { + defer ps.wg.Done() + ps.receiveWithRetry(ctx) }() return nil @@ -119,13 +125,41 @@ func (ps *PubSub) Stop() { ps.wg.Wait() } -func (ps *PubSub) subReceive(cctx context.Context) { +// startReceiver is called within a goroutine and manages keeping a +// subscription.Receive() up and running while the plugin has not been stopped. +func (ps *PubSub) receiveWithRetry(parentCtx context.Context) { + err := ps.startReceiver(parentCtx) + + for err != nil && parentCtx.Err() == nil { + log.Printf("E! [inputs.cloud_pubsub] Receiver for subscription %s exited with error: %v", ps.sub.ID(), err) + + delay := defaultRetryDelaySeconds + if ps.RetryReceiveDelaySeconds > 0 { + delay = ps.RetryReceiveDelaySeconds + } + + log.Printf("I! [inputs.cloud_pubsub] Waiting %d seconds before attempting to restart receiver...", delay) + time.Sleep(time.Duration(delay) * time.Second) + + err = ps.startReceiver(parentCtx) + } +} + +func (ps *PubSub) startReceiver(parentCtx context.Context) error { + log.Printf("I! [inputs.cloud_pubsub] Starting receiver for subscription %s...", ps.sub.ID()) + cctx, ccancel := context.WithCancel(parentCtx) err := ps.sub.Receive(cctx, func(ctx context.Context, msg message) { if err := ps.onMessage(ctx, msg); err != nil { ps.acc.AddError(fmt.Errorf("unable to add message from subscription %s: %v", ps.sub.ID(), err)) } }) - ps.acc.AddError(fmt.Errorf("receiver for subscription %s exited: %v", ps.sub.ID(), err)) + if err != nil { + ps.acc.AddError(fmt.Errorf("receiver for subscription %s exited: %v", ps.sub.ID(), err)) + } else { + log.Printf("I! [inputs.cloud_pubsub] subscription pull ended (no error, most likely stopped)") + } + ccancel() + return err } // onMessage handles parsing and adding a received message to the accumulator. @@ -153,8 +187,8 @@ func (ps *PubSub) onMessage(ctx context.Context, msg message) error { break } - ps.mu.Lock() - defer ps.mu.Unlock() + ps.Lock() + defer ps.Unlock() id := ps.acc.AddTrackingMetricGroup(metrics) if ps.undelivered == nil { @@ -165,10 +199,10 @@ func (ps *PubSub) onMessage(ctx context.Context, msg message) error { return nil } -func (ps *PubSub) receiveDelivered(ctx context.Context) { +func (ps *PubSub) waitForDelivery(parentCtx context.Context) { for { select { - case <-ctx.Done(): + case <-parentCtx.Done(): return case info := <-ps.acc.Delivered(): <-ps.sem @@ -182,8 +216,8 @@ func (ps *PubSub) receiveDelivered(ctx context.Context) { } func (ps *PubSub) removeDelivered(id telegraf.TrackingID) message { - ps.mu.Lock() - defer ps.mu.Unlock() + ps.Lock() + defer ps.Unlock() msg, ok := ps.undelivered[id] if !ok { @@ -219,7 +253,7 @@ func (ps *PubSub) getPubSubClient() (*pubsub.Client, error) { return client, nil } -func (ps *PubSub) getGCPSubscription(ctx context.Context, subId string) (subscription, error) { +func (ps *PubSub) getGCPSubscription(subId string) (subscription, error) { client, err := ps.getPubSubClient() if err != nil { return nil, err @@ -262,6 +296,12 @@ const sampleConfig = ` ## Application Default Credentials, which is preferred. # credentials_file = "path/to/my/creds.json" + ## Optional. Number of seconds to wait before attempting to restart the + ## PubSub subscription receiver after an unexpected error. + ## If the streaming pull for a PubSub Subscription fails (receiver), + ## the agent attempts to restart receiving messages after this many seconds. + # retry_delay_seconds = 5 + ## Optional. Maximum byte length of a message to consume. ## Larger messages are dropped with an error. If less than 0 or unspecified, ## treated as no limit. diff --git a/plugins/inputs/cloud_pubsub/pubsub_test.go b/plugins/inputs/cloud_pubsub/pubsub_test.go index fd3ffb63e..be6070d15 100644 --- a/plugins/inputs/cloud_pubsub/pubsub_test.go +++ b/plugins/inputs/cloud_pubsub/pubsub_test.go @@ -1,6 +1,7 @@ package cloud_pubsub import ( + "errors" "github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" @@ -21,6 +22,7 @@ func TestRunParse(t *testing.T) { id: subId, messages: make(chan *testMsg, 100), } + sub.receiver = testMessagesReceive(sub) ps := &PubSub{ parser: testParser, @@ -62,6 +64,7 @@ func TestRunInvalidMessages(t *testing.T) { id: subId, messages: make(chan *testMsg, 100), } + sub.receiver = testMessagesReceive(sub) ps := &PubSub{ parser: testParser, @@ -107,6 +110,7 @@ func TestRunOverlongMessages(t *testing.T) { id: subId, messages: make(chan *testMsg, 100), } + sub.receiver = testMessagesReceive(sub) ps := &PubSub{ parser: testParser, @@ -141,6 +145,41 @@ func TestRunOverlongMessages(t *testing.T) { assert.Equal(t, acc.NFields(), 0) } +func TestRunErrorInSubscriber(t *testing.T) { + subId := "sub-unexpected-error" + + acc := &testutil.Accumulator{} + + testParser, _ := parsers.NewInfluxParser() + + sub := &stubSub{ + id: subId, + messages: make(chan *testMsg, 100), + } + fakeErrStr := "a fake error" + sub.receiver = testMessagesError(sub, errors.New("a fake error")) + + ps := &PubSub{ + parser: testParser, + stubSub: func() subscription { return sub }, + Project: "projectIDontMatterForTests", + Subscription: subId, + MaxUndeliveredMessages: defaultMaxUndeliveredMessages, + RetryReceiveDelaySeconds: 1, + } + + if err := ps.Start(acc); err != nil { + t.Fatalf("test PubSub failed to start: %s", err) + } + defer ps.Stop() + + if ps.sub == nil { + t.Fatal("expected plugin subscription to be non-nil") + } + acc.WaitError(1) + assert.Regexp(t, fakeErrStr, acc.Errors[0]) +} + func validateTestInfluxMetric(t *testing.T, m *testutil.Metric) { assert.Equal(t, "cpu_load_short", m.Measurement) assert.Equal(t, "server01", m.Tags["host"]) diff --git a/plugins/inputs/cloud_pubsub/subscription_stub.go b/plugins/inputs/cloud_pubsub/subscription_stub.go index 018c5472c..e061728ca 100644 --- a/plugins/inputs/cloud_pubsub/subscription_stub.go +++ b/plugins/inputs/cloud_pubsub/subscription_stub.go @@ -9,6 +9,7 @@ import ( type stubSub struct { id string messages chan *testMsg + receiver receiveFunc } func (s *stubSub) ID() string { @@ -16,12 +17,26 @@ func (s *stubSub) ID() string { } func (s *stubSub) Receive(ctx context.Context, f func(context.Context, message)) error { - for { - select { - case <-ctx.Done(): - return ctx.Err() - case m := <-s.messages: - f(ctx, m) + return s.receiver(ctx, f) +} + +type receiveFunc func(ctx context.Context, f func(context.Context, message)) error + +func testMessagesError(s *stubSub, expectedErr error) receiveFunc { + return func(ctx context.Context, f func(context.Context, message)) error { + return expectedErr + } +} + +func testMessagesReceive(s *stubSub) receiveFunc { + return func(ctx context.Context, f func(context.Context, message)) error { + for { + select { + case <-ctx.Done(): + return ctx.Err() + case m := <-s.messages: + f(ctx, m) + } } } } From f8cc9719a237ab221594a7ac2c42c4cc18d1859a Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 20 Feb 2019 17:57:08 -0800 Subject: [PATCH 0615/1815] Document how to increase the file limit in the ping input --- plugins/inputs/ping/README.md | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/plugins/inputs/ping/README.md b/plugins/inputs/ping/README.md index 4f953c8e1..1083c0074 100644 --- a/plugins/inputs/ping/README.md +++ b/plugins/inputs/ping/README.md @@ -40,6 +40,27 @@ apt-get install iputils-ping # arguments = ["-c", "3"] ``` +#### File Limit + +Since this plugin runs the ping command, it may need to open several files per +host. With a large host list you may receive a `too many open files` error. + +To increase this limit on platforms using systemd it must be done in the +service file. + + +Find the service unit file: +``` +$ systemctl show telegraf.service -p FragmentPath +FragmentPath=/lib/systemd/system/telegraf.service +``` + +Set the file number limit: +``` +[Service] +LimitNOFILE=4096 +``` + ### Metrics: - ping From 3c95b255884a9faf5cd60c22d84ebfaad7cd956d Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 21 Feb 2019 12:08:59 -0800 Subject: [PATCH 0616/1815] Update link to grok built in patterns --- plugins/inputs/logparser/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/inputs/logparser/README.md b/plugins/inputs/logparser/README.md index 00b37e504..cf4c00d16 100644 --- a/plugins/inputs/logparser/README.md +++ b/plugins/inputs/logparser/README.md @@ -114,7 +114,7 @@ To match a comma decimal point you can use a period. For example `%{TIMESTAMP:t To match a comma decimal point you can use a period in the pattern string. See https://golang.org/pkg/time/#Parse for more details. -Telegraf has many of its own [built-in patterns](./grok/patterns/influx-patterns), +Telegraf has many of its own [built-in patterns](/plugins/parsers/grok/influx-patterns.go), as well as support for most of [logstash's builtin patterns](https://github.com/logstash-plugins/logstash-patterns-core/blob/master/patterns/grok-patterns). _Golang regular expressions do not support lookahead or lookbehind. From 0a2cc3ac3f12b2487a179ebe1dbcaec483157245 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 21 Feb 2019 12:11:00 -0800 Subject: [PATCH 0617/1815] Update link to grok built in patterns --- plugins/inputs/logparser/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/inputs/logparser/README.md b/plugins/inputs/logparser/README.md index cf4c00d16..47edbd296 100644 --- a/plugins/inputs/logparser/README.md +++ b/plugins/inputs/logparser/README.md @@ -114,7 +114,7 @@ To match a comma decimal point you can use a period. For example `%{TIMESTAMP:t To match a comma decimal point you can use a period in the pattern string. See https://golang.org/pkg/time/#Parse for more details. -Telegraf has many of its own [built-in patterns](/plugins/parsers/grok/influx-patterns.go), +Telegraf has many of its own [built-in patterns](/plugins/parsers/grok/influx_patterns.go), as well as support for most of [logstash's builtin patterns](https://github.com/logstash-plugins/logstash-patterns-core/blob/master/patterns/grok-patterns). _Golang regular expressions do not support lookahead or lookbehind. From 33dfbfdf5e7652ac7703e4cec48c7b65e1704e78 Mon Sep 17 00:00:00 2001 From: Nicolas Bazire Date: Thu, 21 Feb 2019 21:19:50 +0100 Subject: [PATCH 0618/1815] Fix delete in place of keys in stackdriver output (#5465) --- plugins/outputs/stackdriver/stackdriver.go | 4 +- .../outputs/stackdriver/stackdriver_test.go | 73 ++++++++++++++++++- 2 files changed, 75 insertions(+), 2 deletions(-) diff --git a/plugins/outputs/stackdriver/stackdriver.go b/plugins/outputs/stackdriver/stackdriver.go index c7d9e45bc..d57675bc3 100644 --- a/plugins/outputs/stackdriver/stackdriver.go +++ b/plugins/outputs/stackdriver/stackdriver.go @@ -199,12 +199,14 @@ func (s *Stackdriver) Write(metrics []telegraf.Metric) error { for len(buckets) != 0 { // can send up to 200 time series to stackdriver timeSeries := make([]*monitoringpb.TimeSeries, 0, 200) - for i, k := range keys { + for i := 0; i < len(keys); i++ { + k := keys[i] s := buckets[k] timeSeries = append(timeSeries, s[0]) if len(s) == 1 { delete(buckets, k) keys = append(keys[:i], keys[i+1:]...) + i-- continue } diff --git a/plugins/outputs/stackdriver/stackdriver_test.go b/plugins/outputs/stackdriver/stackdriver_test.go index 151c84020..7ddaa4485 100644 --- a/plugins/outputs/stackdriver/stackdriver_test.go +++ b/plugins/outputs/stackdriver/stackdriver_test.go @@ -253,6 +253,51 @@ func TestWriteBatchable(t *testing.T) { }, time.Unix(1, 0), ), + testutil.MustMetric("ram", + map[string]string{ + "foo": "bar", + }, + map[string]interface{}{ + "value": 42, + }, + time.Unix(4, 0), + ), + testutil.MustMetric("ram", + map[string]string{ + "foo": "foo", + }, + map[string]interface{}{ + "value": 43, + }, + time.Unix(5, 0), + ), + testutil.MustMetric("ram", + map[string]string{ + "foo": "bar", + }, + map[string]interface{}{ + "value": 43, + }, + time.Unix(3, 0), + ), + testutil.MustMetric("disk", + map[string]string{ + "foo": "foo", + }, + map[string]interface{}{ + "value": 43, + }, + time.Unix(3, 0), + ), + testutil.MustMetric("disk", + map[string]string{ + "foo": "bar", + }, + map[string]interface{}{ + "value": 43, + }, + time.Unix(1, 0), + ), } err = s.Connect() @@ -262,7 +307,7 @@ func TestWriteBatchable(t *testing.T) { require.Len(t, mockMetric.reqs, 2) request := mockMetric.reqs[0].(*monitoringpb.CreateTimeSeriesRequest) - require.Len(t, request.TimeSeries, 2) + require.Len(t, request.TimeSeries, 6) ts := request.TimeSeries[0] require.Len(t, ts.Points, 1) require.Equal(t, ts.Points[0].Interval, &monitoringpb.TimeInterval{ @@ -288,6 +333,32 @@ func TestWriteBatchable(t *testing.T) { Int64Value: int64(43), }, }) + + ts = request.TimeSeries[2] + require.Len(t, ts.Points, 1) + require.Equal(t, ts.Points[0].Interval, &monitoringpb.TimeInterval{ + EndTime: &googlepb.Timestamp{ + Seconds: 3, + }, + }) + require.Equal(t, ts.Points[0].Value, &monitoringpb.TypedValue{ + Value: &monitoringpb.TypedValue_Int64Value{ + Int64Value: int64(43), + }, + }) + + ts = request.TimeSeries[4] + require.Len(t, ts.Points, 1) + require.Equal(t, ts.Points[0].Interval, &monitoringpb.TimeInterval{ + EndTime: &googlepb.Timestamp{ + Seconds: 5, + }, + }) + require.Equal(t, ts.Points[0].Value, &monitoringpb.TypedValue{ + Value: &monitoringpb.TypedValue_Int64Value{ + Int64Value: int64(43), + }, + }) } func TestWriteIgnoredErrors(t *testing.T) { From 5f1bc9e49f333527d827bf5563620dc099aba4d8 Mon Sep 17 00:00:00 2001 From: Pierre Tessier Date: Thu, 21 Feb 2019 17:49:52 -0500 Subject: [PATCH 0619/1815] Accept values with a negative exponent in wavefront parser (#5462) --- plugins/parsers/wavefront/element.go | 2 +- plugins/parsers/wavefront/parser_test.go | 21 +++++++++++++++++++++ 2 files changed, 22 insertions(+), 1 deletion(-) diff --git a/plugins/parsers/wavefront/element.go b/plugins/parsers/wavefront/element.go index 859eab1f2..3b7c875a2 100644 --- a/plugins/parsers/wavefront/element.go +++ b/plugins/parsers/wavefront/element.go @@ -60,7 +60,7 @@ func (ep *ValueParser) parse(p *PointParser, pt *Point) error { tok, lit = p.scan() } - for tok != EOF && (tok == LETTER || tok == NUMBER || tok == DOT) { + for tok != EOF && (tok == LETTER || tok == NUMBER || tok == DOT || tok == MINUS_SIGN) { p.writeBuf.WriteString(lit) tok, lit = p.scan() } diff --git a/plugins/parsers/wavefront/parser_test.go b/plugins/parsers/wavefront/parser_test.go index e7d427dd8..fed31b5f2 100644 --- a/plugins/parsers/wavefront/parser_test.go +++ b/plugins/parsers/wavefront/parser_test.go @@ -63,6 +63,24 @@ func TestParse(t *testing.T) { assert.NoError(t, err) assert.EqualValues(t, parsedMetrics[0], testMetric) + parsedMetrics, err = parser.Parse([]byte("\"test.metric\" -1.1234 1530939936 \"source\"=\"mysource\" tag2=value2")) + assert.NoError(t, err) + testMetric, err = metric.New("test.metric", map[string]string{"source": "mysource", "tag2": "value2"}, map[string]interface{}{"value": -1.1234}, time.Unix(1530939936, 0)) + assert.NoError(t, err) + assert.EqualValues(t, parsedMetrics[0], testMetric) + + parsedMetrics, err = parser.Parse([]byte("\"test.metric\" 1.1234e04 1530939936 \"source\"=\"mysource\" tag2=value2")) + assert.NoError(t, err) + testMetric, err = metric.New("test.metric", map[string]string{"source": "mysource", "tag2": "value2"}, map[string]interface{}{"value": 1.1234e04}, time.Unix(1530939936, 0)) + assert.NoError(t, err) + assert.EqualValues(t, parsedMetrics[0], testMetric) + + parsedMetrics, err = parser.Parse([]byte("\"test.metric\" 1.1234e-04 1530939936 \"source\"=\"mysource\" tag2=value2")) + assert.NoError(t, err) + testMetric, err = metric.New("test.metric", map[string]string{"source": "mysource", "tag2": "value2"}, map[string]interface{}{"value": 1.1234e-04}, time.Unix(1530939936, 0)) + assert.NoError(t, err) + assert.EqualValues(t, parsedMetrics[0], testMetric) + parsedMetrics, err = parser.Parse([]byte("test.metric 1.1234 1530939936 source=\"mysource\" tag2=value2 ")) assert.NoError(t, err) testMetric, err = metric.New("test.metric", map[string]string{"source": "mysource", "tag2": "value2"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0)) @@ -201,6 +219,9 @@ func TestParseInvalid(t *testing.T) { _, err = parser.Parse([]byte("test.metric 1 1530939936 tag1=val\\\"ue1")) assert.Error(t, err) + _, err = parser.Parse([]byte("\"test.metric\" -1.12-34 1530939936 \"source\"=\"mysource\" tag2=value2")) + assert.Error(t, err) + } func TestParseDefaultTags(t *testing.T) { From cf18c4a2bffab89ca62c8333dca867397e39359d Mon Sep 17 00:00:00 2001 From: Pierre Tessier Date: Fri, 22 Feb 2019 14:10:59 -0500 Subject: [PATCH 0620/1815] Update wavefront-sdk-go version (#5461) --- Gopkg.lock | 15 ++++++++++++--- Gopkg.toml | 2 +- 2 files changed, 13 insertions(+), 4 deletions(-) diff --git a/Gopkg.lock b/Gopkg.lock index 79ad0477b..233fd9f3f 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -210,6 +210,14 @@ revision = "cf455bc755fe41ac9bb2861e7a961833d9c2ecc3" version = "v2.1.13" +[[projects]] + digest = "1:e5691038f8e87e7da05280095d968e50c17d624e25cca095d4e4cd947a805563" + name = "github.com/caio/go-tdigest" + packages = ["."] + pruneopts = "" + revision = "f3c8d94f65d3096ac96eda54ffcd10c0fe1477f1" + version = "v2.3.0" + [[projects]] digest = "1:f619cb9b07aebe5416262cdd8b86082e8d5bdc5264cb3b615ff858df0b645f97" name = "github.com/cenkalti/backoff" @@ -1111,15 +1119,16 @@ version = "v0.19.0" [[projects]] - digest = "1:c3bdfb7e9b2a66bafbd47517a1a4e489706f75af37ad5bfb57621bf41c16b556" + digest = "1:4cb7eb45ed9a5129bc77c726328c130abcbaae566c1fe4d82693fae86c8c621d" name = "github.com/wavefronthq/wavefront-sdk-go" packages = [ + "histogram", "internal", "senders", ] pruneopts = "" - revision = "7821ac6d8ae05fe70c6d090ebda380c64f1416e4" - version = "v0.9.1" + revision = "fa87530cd02a8ad08bd179e1c39fb319a0cc0dae" + version = "v0.9.2" [[projects]] branch = "master" diff --git a/Gopkg.toml b/Gopkg.toml index 51fc1fbb6..e14f5e763 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -248,7 +248,7 @@ [[constraint]] name = "github.com/wavefronthq/wavefront-sdk-go" - version = "v0.9.1" + version = "^0.9.1" [[constraint]] name = "github.com/karrick/godirwalk" From c9fb1fcdca43e04a6eeb8b9d5b1532cf5cadcd97 Mon Sep 17 00:00:00 2001 From: Jesse Weaver Date: Fri, 22 Feb 2019 12:02:03 -0700 Subject: [PATCH 0621/1815] Add mutual TLS support to prometheus_client output plugin Signed-off-by: Robert Sullivan --- Gopkg.lock | 25 +++ plugins/outputs/prometheus_client/.gitignore | 2 + plugins/outputs/prometheus_client/README.md | 6 + .../prometheus_client/prometheus_client.go | 56 ++++++- .../prometheus_client_tls_test.go | 158 ++++++++++++++++++ .../scripts/generate_certs.sh | 17 ++ 6 files changed, 259 insertions(+), 5 deletions(-) create mode 100644 plugins/outputs/prometheus_client/.gitignore create mode 100644 plugins/outputs/prometheus_client/prometheus_client_tls_test.go create mode 100755 plugins/outputs/prometheus_client/scripts/generate_certs.sh diff --git a/Gopkg.lock b/Gopkg.lock index 79ad0477b..c3c980f65 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -821,6 +821,27 @@ revision = "eee57a3ac4174c55924125bb15eeeda8cffb6e6f" version = "v1.0.7" +[[projects]] + digest = "1:c8f0c8c28c9c1c51db72d0e7f04797cfe5d0d50528274099b6b2d6c314db7f97" + name = "github.com/onsi/gomega" + packages = [ + ".", + "format", + "internal/assertion", + "internal/asyncassertion", + "internal/oraclematcher", + "internal/testingtsupport", + "matchers", + "matchers/support/goraph/bipartitegraph", + "matchers/support/goraph/edge", + "matchers/support/goraph/node", + "matchers/support/goraph/util", + "types", + ] + pruneopts = "" + revision = "65fb64232476ad9046e57c26cd0bff3d3a8dc6cd" + version = "v1.4.3" + [[projects]] digest = "1:5d9b668b0b4581a978f07e7d2e3314af18eb27b3fb5d19b70185b7c575723d11" name = "github.com/opencontainers/go-digest" @@ -1541,6 +1562,7 @@ "github.com/go-sql-driver/mysql", "github.com/gobwas/glob", "github.com/golang/protobuf/proto", + "github.com/golang/protobuf/ptypes/duration", "github.com/golang/protobuf/ptypes/empty", "github.com/golang/protobuf/ptypes/timestamp", "github.com/google/go-cmp/cmp", @@ -1567,6 +1589,7 @@ "github.com/nats-io/gnatsd/server", "github.com/nats-io/go-nats", "github.com/nsqio/go-nsq", + "github.com/onsi/gomega", "github.com/openzipkin/zipkin-go-opentracing", "github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore", "github.com/prometheus/client_golang/prometheus", @@ -1612,8 +1635,10 @@ "golang.org/x/sys/windows", "golang.org/x/sys/windows/svc", "golang.org/x/sys/windows/svc/mgr", + "google.golang.org/api/iterator", "google.golang.org/api/option", "google.golang.org/api/support/bundler", + "google.golang.org/genproto/googleapis/api/distribution", "google.golang.org/genproto/googleapis/api/metric", "google.golang.org/genproto/googleapis/api/monitoredres", "google.golang.org/genproto/googleapis/monitoring/v3", diff --git a/plugins/outputs/prometheus_client/.gitignore b/plugins/outputs/prometheus_client/.gitignore new file mode 100644 index 000000000..418f8fafd --- /dev/null +++ b/plugins/outputs/prometheus_client/.gitignore @@ -0,0 +1,2 @@ +vendor +assets diff --git a/plugins/outputs/prometheus_client/README.md b/plugins/outputs/prometheus_client/README.md index c06fdbaf1..c2f097fbd 100644 --- a/plugins/outputs/prometheus_client/README.md +++ b/plugins/outputs/prometheus_client/README.md @@ -35,6 +35,12 @@ This plugin starts a [Prometheus](https://prometheus.io/) Client, it exposes all ## If set, enable TLS with the given certificate. # tls_cert = "/etc/ssl/telegraf.crt" # tls_key = "/etc/ssl/telegraf.key" + + ## If set, enable TLS client authentication with the given CA. + # tls_ca = "/etc/ssl/telegraf_ca.crt" + + ## Boolean value indicating whether or not to skip SSL verification + # insecure_skip_verify = false ## Export metric collection time. # export_timestamp = false diff --git a/plugins/outputs/prometheus_client/prometheus_client.go b/plugins/outputs/prometheus_client/prometheus_client.go index d774b4088..c1365e44c 100644 --- a/plugins/outputs/prometheus_client/prometheus_client.go +++ b/plugins/outputs/prometheus_client/prometheus_client.go @@ -3,7 +3,10 @@ package prometheus_client import ( "context" "crypto/subtle" + cryptotls "crypto/tls" + "crypto/x509" "fmt" + "io/ioutil" "log" "net" "net/http" @@ -16,6 +19,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/internal/tls" "github.com/influxdata/telegraf/plugins/outputs" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" @@ -56,8 +60,6 @@ type MetricFamily struct { type PrometheusClient struct { Listen string - TLSCert string `toml:"tls_cert"` - TLSKey string `toml:"tls_key"` BasicUsername string `toml:"basic_username"` BasicPassword string `toml:"basic_password"` IPRange []string `toml:"ip_range"` @@ -67,6 +69,7 @@ type PrometheusClient struct { StringAsLabel bool `toml:"string_as_label"` ExportTimestamp bool `toml:"export_timestamp"` + tls.ClientConfig server *http.Server sync.Mutex @@ -105,6 +108,12 @@ var sampleConfig = ` ## If set, enable TLS with the given certificate. # tls_cert = "/etc/ssl/telegraf.crt" # tls_key = "/etc/ssl/telegraf.key" + + ## If set, enable TLS client authentication with the given CA. + # tls_ca = "/etc/ssl/telegraf_ca.crt" + + ## Boolean value indicating whether or not to skip SSL verification + # insecure_skip_verify = false ## Export metric collection time. # export_timestamp = false @@ -184,9 +193,18 @@ func (p *PrometheusClient) Connect() error { mux.Handle(p.Path, p.auth(promhttp.HandlerFor( registry, promhttp.HandlerOpts{ErrorHandling: promhttp.ContinueOnError}))) - p.server = &http.Server{ - Addr: p.Listen, - Handler: mux, + if p.TLSCA != "" { + log.Printf("Starting Prometheus Output Plugin Server with Mutual TLS enabled.\n") + p.server = &http.Server{ + Addr: p.Listen, + Handler: mux, + TLSConfig: p.buildMutualTLSConfig(), + } + } else { + p.server = &http.Server{ + Addr: p.Listen, + Handler: mux, + } } go func() { @@ -205,6 +223,34 @@ func (p *PrometheusClient) Connect() error { return nil } +func (p *PrometheusClient) buildMutualTLSConfig() *cryptotls.Config { + certPool := x509.NewCertPool() + caCert, err := ioutil.ReadFile(p.TLSCA) + if err != nil { + log.Printf("failed to read client ca cert: %s", err.Error()) + panic(err) + } + ok := certPool.AppendCertsFromPEM(caCert) + if !ok { + log.Printf("failed to append client certs: %s", err.Error()) + panic(err) + } + + clientAuth := cryptotls.RequireAndVerifyClientCert + if p.InsecureSkipVerify { + clientAuth = cryptotls.RequestClientCert + } + + return &cryptotls.Config{ + ClientAuth: clientAuth, + ClientCAs: certPool, + MinVersion: cryptotls.VersionTLS12, + CipherSuites: []uint16{cryptotls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, cryptotls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256}, + PreferServerCipherSuites: true, + InsecureSkipVerify: p.InsecureSkipVerify, + } +} + func (p *PrometheusClient) Close() error { ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) defer cancel() diff --git a/plugins/outputs/prometheus_client/prometheus_client_tls_test.go b/plugins/outputs/prometheus_client/prometheus_client_tls_test.go new file mode 100644 index 000000000..485f9143b --- /dev/null +++ b/plugins/outputs/prometheus_client/prometheus_client_tls_test.go @@ -0,0 +1,158 @@ +package prometheus_client_test + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "github.com/influxdata/telegraf/plugins/outputs/prometheus_client" + "github.com/influxdata/telegraf/testutil" + "github.com/influxdata/toml" + . "github.com/onsi/gomega" + "io/ioutil" + "net/http" + "os/exec" + "path/filepath" + "testing" +) + +var ca, _ = filepath.Abs("assets/telegrafCA.crt") +var cert, _ = filepath.Abs("assets/telegraf.crt") +var key, _ = filepath.Abs("assets/telegraf.key") +var configWithTLS = fmt.Sprintf(` + listen = "127.0.0.1:9090" + tls_ca = "%s" + tls_cert = "%s" + tls_key = "%s" +`, ca, cert, key) + +var configWithoutTLS = ` + listen = "127.0.0.1:9090" +` + +type PrometheusClientTestContext struct { + Output *prometheus_client.PrometheusClient + Accumulator *testutil.Accumulator + Client *http.Client + + *GomegaWithT +} + +func init() { + path, _ := filepath.Abs("./scripts/generate_certs.sh") + _, err := exec.Command(path).CombinedOutput() + if err != nil { + panic(err) + } +} + +func TestWorksWithoutTLS(t *testing.T) { + tc := buildTestContext(t, []byte(configWithoutTLS)) + err := tc.Output.Connect() + defer tc.Output.Close() + + if err != nil { + panic(err) + } + + var response *http.Response + tc.Eventually(func() bool { + response, err = tc.Client.Get("http://localhost:9090/metrics") + return err == nil + }, "5s").Should(BeTrue()) + + if err != nil { + panic(err) + } + + tc.Expect(response.StatusCode).To(Equal(http.StatusOK)) +} + +func TestWorksWithTLS(t *testing.T) { + tc := buildTestContext(t, []byte(configWithTLS)) + err := tc.Output.Connect() + defer tc.Output.Close() + + if err != nil { + panic(err) + } + + var response *http.Response + tc.Eventually(func() bool { + response, err = tc.Client.Get("https://localhost:9090/metrics") + return err == nil + }, "5s").Should(BeTrue()) + + if err != nil { + panic(err) + } + + tc.Expect(response.StatusCode).To(Equal(http.StatusOK)) + + response, err = tc.Client.Get("http://localhost:9090/metrics") + + tc.Expect(err).To(HaveOccurred()) + + tr := &http.Transport{ + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, + } + + client := &http.Client{Transport: tr} + response, err = client.Get("https://localhost:9090/metrics") + + tc.Expect(err).To(HaveOccurred()) +} + +func buildTestContext(t *testing.T, config []byte) *PrometheusClientTestContext { + output := prometheus_client.NewClient() + err := toml.Unmarshal(config, output) + + if err != nil { + panic(err) + } + + var ( + httpClient *http.Client + ) + + if output.TLSCA != "" { + httpClient = buildClientWithTLS(output) + } else { + httpClient = buildClientWithoutTLS() + } + + return &PrometheusClientTestContext{ + Output: output, + Accumulator: &testutil.Accumulator{}, + Client: httpClient, + GomegaWithT: NewGomegaWithT(t), + } +} + +func buildClientWithoutTLS() *http.Client { + return &http.Client{} +} + +func buildClientWithTLS(output *prometheus_client.PrometheusClient) *http.Client { + cert, err := tls.LoadX509KeyPair(output.TLSCert, output.TLSKey) + if err != nil { + panic(err) + } + + caCert, err := ioutil.ReadFile(output.TLSCA) + if err != nil { + panic(err) + } + caCertPool := x509.NewCertPool() + caCertPool.AppendCertsFromPEM(caCert) + + tlsConfig := &tls.Config{ + Certificates: []tls.Certificate{cert}, + RootCAs: caCertPool, + MinVersion: tls.VersionTLS12, + CipherSuites: []uint16{tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256}, + ServerName: "telegraf", + } + tlsConfig.BuildNameToCertificate() + transport := &http.Transport{TLSClientConfig: tlsConfig} + return &http.Client{Transport: transport} +} diff --git a/plugins/outputs/prometheus_client/scripts/generate_certs.sh b/plugins/outputs/prometheus_client/scripts/generate_certs.sh new file mode 100755 index 000000000..1f7c3418f --- /dev/null +++ b/plugins/outputs/prometheus_client/scripts/generate_certs.sh @@ -0,0 +1,17 @@ +#!/bin/bash -e + +scripts_dir=$(cd $(dirname $0) && pwd) + +mkdir -p ${scripts_dir}/../assets +assets_dir=$(cd ${scripts_dir}/../assets && pwd) + +echo "Generating certs into ${assets_dir}" + +test ! `which certstrap` && go get -u -v github.com/square/certstrap + +rm -f ${assets_dir}/* + +# CA to distribute to loggregator certs +certstrap --depot-path ${assets_dir} init --passphrase '' --common-name telegrafCA --expires "25 years" +certstrap --depot-path ${assets_dir} request-cert --passphrase '' --common-name telegraf +certstrap --depot-path ${assets_dir} sign telegraf --CA telegrafCA --expires "25 years" \ No newline at end of file From 05af32b1915aabe8dde076c38320ff61e9ed4691 Mon Sep 17 00:00:00 2001 From: Robert Sullivan Date: Fri, 22 Feb 2019 15:18:36 -0700 Subject: [PATCH 0622/1815] Clean up TLS configuration in prometheus_client output plugin Signed-off-by: Jesse Weaver --- plugins/outputs/prometheus_client/.gitignore | 2 - .../prometheus_client/prometheus_client.go | 56 ++++--------------- .../prometheus_client_tls_test.go | 41 ++------------ .../scripts/generate_certs.sh | 17 ------ 4 files changed, 17 insertions(+), 99 deletions(-) delete mode 100644 plugins/outputs/prometheus_client/.gitignore delete mode 100755 plugins/outputs/prometheus_client/scripts/generate_certs.sh diff --git a/plugins/outputs/prometheus_client/.gitignore b/plugins/outputs/prometheus_client/.gitignore deleted file mode 100644 index 418f8fafd..000000000 --- a/plugins/outputs/prometheus_client/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -vendor -assets diff --git a/plugins/outputs/prometheus_client/prometheus_client.go b/plugins/outputs/prometheus_client/prometheus_client.go index c1365e44c..c2af6f655 100644 --- a/plugins/outputs/prometheus_client/prometheus_client.go +++ b/plugins/outputs/prometheus_client/prometheus_client.go @@ -3,10 +3,7 @@ package prometheus_client import ( "context" "crypto/subtle" - cryptotls "crypto/tls" - "crypto/x509" "fmt" - "io/ioutil" "log" "net" "net/http" @@ -69,7 +66,8 @@ type PrometheusClient struct { StringAsLabel bool `toml:"string_as_label"` ExportTimestamp bool `toml:"export_timestamp"` - tls.ClientConfig + tls.ServerConfig + server *http.Server sync.Mutex @@ -193,24 +191,20 @@ func (p *PrometheusClient) Connect() error { mux.Handle(p.Path, p.auth(promhttp.HandlerFor( registry, promhttp.HandlerOpts{ErrorHandling: promhttp.ContinueOnError}))) - if p.TLSCA != "" { - log.Printf("Starting Prometheus Output Plugin Server with Mutual TLS enabled.\n") - p.server = &http.Server{ - Addr: p.Listen, - Handler: mux, - TLSConfig: p.buildMutualTLSConfig(), - } - } else { - p.server = &http.Server{ - Addr: p.Listen, - Handler: mux, - } + tlsConfig, err := p.TLSConfig() + if err != nil { + return err + } + p.server = &http.Server{ + Addr: p.Listen, + Handler: mux, + TLSConfig: tlsConfig, } go func() { var err error if p.TLSCert != "" && p.TLSKey != "" { - err = p.server.ListenAndServeTLS(p.TLSCert, p.TLSKey) + err = p.server.ListenAndServeTLS("", "") } else { err = p.server.ListenAndServe() } @@ -223,34 +217,6 @@ func (p *PrometheusClient) Connect() error { return nil } -func (p *PrometheusClient) buildMutualTLSConfig() *cryptotls.Config { - certPool := x509.NewCertPool() - caCert, err := ioutil.ReadFile(p.TLSCA) - if err != nil { - log.Printf("failed to read client ca cert: %s", err.Error()) - panic(err) - } - ok := certPool.AppendCertsFromPEM(caCert) - if !ok { - log.Printf("failed to append client certs: %s", err.Error()) - panic(err) - } - - clientAuth := cryptotls.RequireAndVerifyClientCert - if p.InsecureSkipVerify { - clientAuth = cryptotls.RequestClientCert - } - - return &cryptotls.Config{ - ClientAuth: clientAuth, - ClientCAs: certPool, - MinVersion: cryptotls.VersionTLS12, - CipherSuites: []uint16{cryptotls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, cryptotls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256}, - PreferServerCipherSuites: true, - InsecureSkipVerify: p.InsecureSkipVerify, - } -} - func (p *PrometheusClient) Close() error { ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) defer cancel() diff --git a/plugins/outputs/prometheus_client/prometheus_client_tls_test.go b/plugins/outputs/prometheus_client/prometheus_client_tls_test.go index 485f9143b..4658fcdb4 100644 --- a/plugins/outputs/prometheus_client/prometheus_client_tls_test.go +++ b/plugins/outputs/prometheus_client/prometheus_client_tls_test.go @@ -2,28 +2,23 @@ package prometheus_client_test import ( "crypto/tls" - "crypto/x509" "fmt" "github.com/influxdata/telegraf/plugins/outputs/prometheus_client" "github.com/influxdata/telegraf/testutil" "github.com/influxdata/toml" . "github.com/onsi/gomega" - "io/ioutil" "net/http" - "os/exec" - "path/filepath" "testing" ) -var ca, _ = filepath.Abs("assets/telegrafCA.crt") -var cert, _ = filepath.Abs("assets/telegraf.crt") -var key, _ = filepath.Abs("assets/telegraf.key") +var pki = testutil.NewPKI("../../../testutil/pki") + var configWithTLS = fmt.Sprintf(` listen = "127.0.0.1:9090" - tls_ca = "%s" + tls_allowed_cacerts = ["%s"] tls_cert = "%s" tls_key = "%s" -`, ca, cert, key) +`, pki.TLSServerConfig().TLSAllowedCACerts[0], pki.TLSServerConfig().TLSCert, pki.TLSServerConfig().TLSKey) var configWithoutTLS = ` listen = "127.0.0.1:9090" @@ -37,14 +32,6 @@ type PrometheusClientTestContext struct { *GomegaWithT } -func init() { - path, _ := filepath.Abs("./scripts/generate_certs.sh") - _, err := exec.Command(path).CombinedOutput() - if err != nil { - panic(err) - } -} - func TestWorksWithoutTLS(t *testing.T) { tc := buildTestContext(t, []byte(configWithoutTLS)) err := tc.Output.Connect() @@ -114,7 +101,7 @@ func buildTestContext(t *testing.T, config []byte) *PrometheusClientTestContext httpClient *http.Client ) - if output.TLSCA != "" { + if len(output.TLSAllowedCACerts) != 0 { httpClient = buildClientWithTLS(output) } else { httpClient = buildClientWithoutTLS() @@ -133,26 +120,10 @@ func buildClientWithoutTLS() *http.Client { } func buildClientWithTLS(output *prometheus_client.PrometheusClient) *http.Client { - cert, err := tls.LoadX509KeyPair(output.TLSCert, output.TLSKey) + tlsConfig, err := pki.TLSClientConfig().TLSConfig() if err != nil { panic(err) } - - caCert, err := ioutil.ReadFile(output.TLSCA) - if err != nil { - panic(err) - } - caCertPool := x509.NewCertPool() - caCertPool.AppendCertsFromPEM(caCert) - - tlsConfig := &tls.Config{ - Certificates: []tls.Certificate{cert}, - RootCAs: caCertPool, - MinVersion: tls.VersionTLS12, - CipherSuites: []uint16{tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256}, - ServerName: "telegraf", - } - tlsConfig.BuildNameToCertificate() transport := &http.Transport{TLSClientConfig: tlsConfig} return &http.Client{Transport: transport} } diff --git a/plugins/outputs/prometheus_client/scripts/generate_certs.sh b/plugins/outputs/prometheus_client/scripts/generate_certs.sh deleted file mode 100755 index 1f7c3418f..000000000 --- a/plugins/outputs/prometheus_client/scripts/generate_certs.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash -e - -scripts_dir=$(cd $(dirname $0) && pwd) - -mkdir -p ${scripts_dir}/../assets -assets_dir=$(cd ${scripts_dir}/../assets && pwd) - -echo "Generating certs into ${assets_dir}" - -test ! `which certstrap` && go get -u -v github.com/square/certstrap - -rm -f ${assets_dir}/* - -# CA to distribute to loggregator certs -certstrap --depot-path ${assets_dir} init --passphrase '' --common-name telegrafCA --expires "25 years" -certstrap --depot-path ${assets_dir} request-cert --passphrase '' --common-name telegraf -certstrap --depot-path ${assets_dir} sign telegraf --CA telegrafCA --expires "25 years" \ No newline at end of file From 9e0248898fc018fda6ea8cd8ed7707c89ad8a810 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 25 Feb 2019 10:54:19 -0800 Subject: [PATCH 0623/1815] Disable results by row in azuredb query (#5467) --- plugins/inputs/sqlserver/sqlserver.go | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/plugins/inputs/sqlserver/sqlserver.go b/plugins/inputs/sqlserver/sqlserver.go index cb667e43f..dc57c87a4 100644 --- a/plugins/inputs/sqlserver/sqlserver.go +++ b/plugins/inputs/sqlserver/sqlserver.go @@ -5,11 +5,9 @@ import ( "sync" "time" + _ "github.com/denisenkom/go-mssqldb" // go-mssqldb initialization "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" - - // go-mssqldb initialization - _ "github.com/denisenkom/go-mssqldb" ) // SQLServer struct @@ -90,7 +88,7 @@ func initQueries(s *SQLServer) { // If this is an AzureDB instance, grab some extra metrics if s.AzureDB { - queries["AzureDB"] = Query{Script: sqlAzureDB, ResultByRow: true} + queries["AzureDB"] = Query{Script: sqlAzureDB, ResultByRow: false} } // Decide if we want to run version 1 or version 2 queries From 2abed0e04fffe5ca70ea78320cab977cc2824b31 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 25 Feb 2019 10:55:14 -0800 Subject: [PATCH 0624/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index f3a61f0eb..bd62eaf78 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -67,6 +67,7 @@ - [#5403](https://github.com/influxdata/telegraf/issues/5403): Remove error log when snmp6 directory does not exists with nstat input. - [#5437](https://github.com/influxdata/telegraf/issues/5437): Host not added when using custom arguments in ping plugin. - [#5438](https://github.com/influxdata/telegraf/issues/5438): Fix InfluxDB output UDP line splitting. +- [#5456](https://github.com/influxdata/telegraf/issues/5456): Disable results by row in azuredb query. ## v1.9.4 [2019-02-05] From 0882479cbfb8fa7823f5f1b7b7733d65b190c83a Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 25 Feb 2019 11:04:34 -0800 Subject: [PATCH 0625/1815] Add command logging to snmp input at debug level (#5474) --- plugins/inputs/snmp/snmp.go | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/plugins/inputs/snmp/snmp.go b/plugins/inputs/snmp/snmp.go index 112e85c7c..24250c22a 100644 --- a/plugins/inputs/snmp/snmp.go +++ b/plugins/inputs/snmp/snmp.go @@ -4,6 +4,7 @@ import ( "bufio" "bytes" "fmt" + "log" "math" "net" "os/exec" @@ -15,7 +16,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" - + "github.com/influxdata/wlog" "github.com/soniah/gosnmp" ) @@ -84,6 +85,14 @@ var execCommand = exec.Command // execCmd executes the specified command, returning the STDOUT content. // If command exits with error status, the output is captured into the returned error. func execCmd(arg0 string, args ...string) ([]byte, error) { + if wlog.LogLevel() == wlog.DEBUG { + quoted := make([]string, 0, len(args)) + for _, arg := range args { + quoted = append(quoted, fmt.Sprintf("%q", arg)) + } + log.Printf("D! [inputs.snmp] Executing %q %s", arg0, strings.Join(quoted, " ")) + } + out, err := execCommand(arg0, args...).Output() if err != nil { if err, ok := err.(*exec.ExitError); ok { From eb794ec30fa9f422ba839f7435f5c64d8bca771f Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 25 Feb 2019 11:11:25 -0800 Subject: [PATCH 0626/1815] Log the protocol and address that socket_listener is listening on (#5454) --- .../inputs/socket_listener/socket_listener.go | 25 ++++++++++++------- .../socket_listener/socket_listener_test.go | 17 +++++++++++-- 2 files changed, 31 insertions(+), 11 deletions(-) diff --git a/plugins/inputs/socket_listener/socket_listener.go b/plugins/inputs/socket_listener/socket_listener.go index c83f3eb68..d81c45994 100644 --- a/plugins/inputs/socket_listener/socket_listener.go +++ b/plugins/inputs/socket_listener/socket_listener.go @@ -242,14 +242,17 @@ func (sl *SocketListener) Start(acc telegraf.Accumulator) error { return fmt.Errorf("invalid service address: %s", sl.ServiceAddress) } - if spl[0] == "unix" || spl[0] == "unixpacket" || spl[0] == "unixgram" { + protocol := spl[0] + addr := spl[1] + + if protocol == "unix" || protocol == "unixpacket" || protocol == "unixgram" { // no good way of testing for "file does not exist". // Instead just ignore error and blow up when we try to listen, which will // indicate "address already in use" if file existed and we couldn't remove. - os.Remove(spl[1]) + os.Remove(addr) } - switch spl[0] { + switch protocol { case "tcp", "tcp4", "tcp6", "unix", "unixpacket": var ( err error @@ -262,14 +265,16 @@ func (sl *SocketListener) Start(acc telegraf.Accumulator) error { } if tlsCfg == nil { - l, err = net.Listen(spl[0], spl[1]) + l, err = net.Listen(protocol, addr) } else { - l, err = tls.Listen(spl[0], spl[1], tlsCfg) + l, err = tls.Listen(protocol, addr, tlsCfg) } if err != nil { return err } + log.Printf("I! [inputs.socket_listener] Listening on %s://%s", protocol, l.Addr()) + ssl := &streamSocketListener{ Listener: l, SocketListener: sl, @@ -279,7 +284,7 @@ func (sl *SocketListener) Start(acc telegraf.Accumulator) error { sl.Closer = ssl go ssl.listen() case "udp", "udp4", "udp6", "ip", "ip4", "ip6", "unixgram": - pc, err := net.ListenPacket(spl[0], spl[1]) + pc, err := net.ListenPacket(protocol, addr) if err != nil { return err } @@ -288,10 +293,12 @@ func (sl *SocketListener) Start(acc telegraf.Accumulator) error { if srb, ok := pc.(setReadBufferer); ok { srb.SetReadBuffer(int(sl.ReadBufferSize.Size)) } else { - log.Printf("W! Unable to set read buffer on a %s socket", spl[0]) + log.Printf("W! Unable to set read buffer on a %s socket", protocol) } } + log.Printf("I! [inputs.socket_listener] Listening on %s://%s", protocol, pc.LocalAddr()) + psl := &packetSocketListener{ PacketConn: pc, SocketListener: sl, @@ -300,10 +307,10 @@ func (sl *SocketListener) Start(acc telegraf.Accumulator) error { sl.Closer = psl go psl.listen() default: - return fmt.Errorf("unknown protocol '%s' in '%s'", spl[0], sl.ServiceAddress) + return fmt.Errorf("unknown protocol '%s' in '%s'", protocol, sl.ServiceAddress) } - if spl[0] == "unix" || spl[0] == "unixpacket" || spl[0] == "unixgram" { + if protocol == "unix" || protocol == "unixpacket" || protocol == "unixgram" { sl.Closer = unixCloser{path: spl[1], closer: sl.Closer} } diff --git a/plugins/inputs/socket_listener/socket_listener_test.go b/plugins/inputs/socket_listener/socket_listener_test.go index ae7fef8b9..b4415e092 100644 --- a/plugins/inputs/socket_listener/socket_listener_test.go +++ b/plugins/inputs/socket_listener/socket_listener_test.go @@ -3,6 +3,7 @@ package socket_listener import ( "bytes" "crypto/tls" + "io" "io/ioutil" "log" "net" @@ -13,6 +14,7 @@ import ( "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/testutil" + "github.com/influxdata/wlog" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -23,11 +25,22 @@ var pki = testutil.NewPKI("../../../testutil/pki") // Should be called at the start of the test, and returns a function which should run at the end. func testEmptyLog(t *testing.T) func() { buf := bytes.NewBuffer(nil) - log.SetOutput(buf) + log.SetOutput(wlog.NewWriter(buf)) + + level := wlog.WARN + wlog.SetLevel(level) return func() { log.SetOutput(os.Stderr) - assert.Empty(t, string(buf.Bytes()), "log not empty") + + for { + line, err := buf.ReadBytes('\n') + if err != nil { + assert.Equal(t, io.EOF, err) + break + } + assert.Empty(t, string(line), "log not empty") + } } } From 1886676e14a1a26a4a6aa0de4b4968e256d6f023 Mon Sep 17 00:00:00 2001 From: Douglas Drinka Date: Mon, 25 Feb 2019 12:30:33 -0700 Subject: [PATCH 0627/1815] Support configuring a default timezone in JSON parser (#5472) --- internal/config/config.go | 9 +++++++++ internal/internal.go | 13 ++++++++++-- internal/internal_test.go | 31 +++++++++++++++++++++++++++++ plugins/parsers/json/README.md | 20 ++++++++++++++++++- plugins/parsers/json/parser.go | 3 ++- plugins/parsers/json/parser_test.go | 17 ++++++++++++++++ plugins/parsers/registry.go | 6 ++++++ 7 files changed, 95 insertions(+), 4 deletions(-) diff --git a/internal/config/config.go b/internal/config/config.go index 504d8501c..4388d658d 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -1362,6 +1362,14 @@ func getParserConfig(name string, tbl *ast.Table) (*parsers.Config, error) { } } + if node, ok := tbl.Fields["json_timezone"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if str, ok := kv.Value.(*ast.String); ok { + c.JSONTimezone = str.Value + } + } + } + if node, ok := tbl.Fields["data_type"]; ok { if kv, ok := node.(*ast.KeyValue); ok { if str, ok := kv.Value.(*ast.String); ok { @@ -1637,6 +1645,7 @@ func getParserConfig(name string, tbl *ast.Table) (*parsers.Config, error) { delete(tbl.Fields, "json_string_fields") delete(tbl.Fields, "json_time_format") delete(tbl.Fields, "json_time_key") + delete(tbl.Fields, "json_timezone") delete(tbl.Fields, "data_type") delete(tbl.Fields, "collectd_auth_file") delete(tbl.Fields, "collectd_security_level") diff --git a/internal/internal.go b/internal/internal.go index 368bc8bcf..b373c9c35 100644 --- a/internal/internal.go +++ b/internal/internal.go @@ -333,13 +333,18 @@ func CompressWithGzip(data io.Reader) (io.Reader, error) { return pipeReader, err } +// ParseTimestamp with no location provided parses a timestamp value as UTC +func ParseTimestamp(timestamp interface{}, format string) (time.Time, error) { + return ParseTimestampWithLocation(timestamp, format, "UTC") +} + // ParseTimestamp parses a timestamp value as a unix epoch of various precision. // // format = "unix": epoch is assumed to be in seconds and can come as number or string. Can have a decimal part. // format = "unix_ms": epoch is assumed to be in milliseconds and can come as number or string. Cannot have a decimal part. // format = "unix_us": epoch is assumed to be in microseconds and can come as number or string. Cannot have a decimal part. // format = "unix_ns": epoch is assumed to be in nanoseconds and can come as number or string. Cannot have a decimal part. -func ParseTimestamp(timestamp interface{}, format string) (time.Time, error) { +func ParseTimestampWithLocation(timestamp interface{}, format string, location string) (time.Time, error) { timeInt, timeFractional := int64(0), int64(0) timeEpochStr, ok := timestamp.(string) var err error @@ -355,7 +360,11 @@ func ParseTimestamp(timestamp interface{}, format string) (time.Time, error) { splitted := regexp.MustCompile("[.,]").Split(timeEpochStr, 2) timeInt, err = strconv.ParseInt(splitted[0], 10, 64) if err != nil { - return time.Parse(format, timeEpochStr) + loc, err := time.LoadLocation(location) + if err != nil { + return time.Time{}, fmt.Errorf("location: %s could not be loaded as a location", location) + } + return time.ParseInLocation(format, timeEpochStr, loc) } if len(splitted) == 2 { diff --git a/internal/internal_test.go b/internal/internal_test.go index 46b1b5962..681e1f808 100644 --- a/internal/internal_test.go +++ b/internal/internal_test.go @@ -270,3 +270,34 @@ func TestAlignDuration(t *testing.T) { }) } } + +func TestParseTimestamp(t *testing.T) { + time, err := ParseTimestamp("2019-02-20 21:50:34.029665", "2006-01-02 15:04:05.000000") + assert.Nil(t, err) + assert.EqualValues(t, int64(1550699434029665000), time.UnixNano()) + + time, err = ParseTimestamp("2019-02-20 21:50:34.029665-04:00", "2006-01-02 15:04:05.000000-07:00") + assert.Nil(t, err) + assert.EqualValues(t, int64(1550713834029665000), time.UnixNano()) + + time, err = ParseTimestamp("2019-02-20 21:50:34.029665", "2006-01-02 15:04:05.000000-06:00") + assert.NotNil(t, err) +} + +func TestParseTimestampWithLocation(t *testing.T) { + time, err := ParseTimestampWithLocation("2019-02-20 21:50:34.029665", "2006-01-02 15:04:05.000000", "UTC") + assert.Nil(t, err) + assert.EqualValues(t, int64(1550699434029665000), time.UnixNano()) + + time, err = ParseTimestampWithLocation("2019-02-20 21:50:34.029665", "2006-01-02 15:04:05.000000", "America/New_York") + assert.Nil(t, err) + assert.EqualValues(t, int64(1550717434029665000), time.UnixNano()) + + //Provided location is ignored if an offset is successfully parsed + time, err = ParseTimestampWithLocation("2019-02-20 21:50:34.029665-07:00", "2006-01-02 15:04:05.000000-07:00", "America/New_York") + assert.Nil(t, err) + assert.EqualValues(t, int64(1550724634029665000), time.UnixNano()) + + time, err = ParseTimestampWithLocation("2019-02-20 21:50:34.029665", "2006-01-02 15:04:05.000000", "InvalidTimeZone") + assert.NotNil(t, err) +} diff --git a/plugins/parsers/json/README.md b/plugins/parsers/json/README.md index 8b73b7214..60e1f3f9e 100644 --- a/plugins/parsers/json/README.md +++ b/plugins/parsers/json/README.md @@ -49,9 +49,21 @@ ignored unless specified in the `tag_key` or `json_string_fields` options. ## https://golang.org/pkg/time/#Time.Format ## ex: json_time_format = "Mon Jan 2 15:04:05 -0700 MST 2006" ## json_time_format = "2006-01-02T15:04:05Z07:00" + ## json_time_format = "01/02/2006 15:04:05" ## json_time_format = "unix" ## json_time_format = "unix_ms" json_time_format = "" + + ## Timezone allows you to provide an override for timestamps that + ## don't already include an offset + ## e.g. 04/06/2016 12:41:45 + ## + ## Default: "" which renders UTC + ## Options are as follows: + ## 1. Local -- interpret based on machine localtime + ## 2. "America/New_York" -- Unix TZ values like those found in https://en.wikipedia.org/wiki/List_of_tz_database_time_zones + ## 3. UTC -- or blank/unspecified, will return timestamp in UTC + json_timezone = "" ``` #### json_query @@ -62,7 +74,7 @@ query should contain a JSON object or an array of objects. Consult the GJSON [path syntax][gjson syntax] for details and examples. -#### json_time_key, json_time_format +#### json_time_key, json_time_format, json_timezone By default the current time will be used for all created metrics, to set the time using the JSON document you can use the `json_time_key` and @@ -77,6 +89,12 @@ the Go "reference time" which is defined to be the specific time: Consult the Go [time][time parse] package for details and additional examples on how to set the time format. +When parsing times that don't include a timezone specifier, times are assumed +to be UTC. To default to another timezone, or to local time, specify the +`json_timezone` option. This option should be set to a +[Unix TZ value](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones), +such as `America/New_York`, to `Local` to utilize the system timezone, or to `UTC`. + ### Examples #### Basic Parsing diff --git a/plugins/parsers/json/parser.go b/plugins/parsers/json/parser.go index 2f939a84f..ebe31fd23 100644 --- a/plugins/parsers/json/parser.go +++ b/plugins/parsers/json/parser.go @@ -28,6 +28,7 @@ type JSONParser struct { JSONQuery string JSONTimeKey string JSONTimeFormat string + JSONTimezone string DefaultTags map[string]string } @@ -82,7 +83,7 @@ func (p *JSONParser) parseObject(metrics []telegraf.Metric, jsonOut map[string]i return nil, err } - nTime, err = internal.ParseTimestamp(f.Fields[p.JSONTimeKey], p.JSONTimeFormat) + nTime, err = internal.ParseTimestampWithLocation(f.Fields[p.JSONTimeKey], p.JSONTimeFormat, p.JSONTimezone) if err != nil { return nil, err } diff --git a/plugins/parsers/json/parser_test.go b/plugins/parsers/json/parser_test.go index 382afcd35..2db9ad78f 100644 --- a/plugins/parsers/json/parser_test.go +++ b/plugins/parsers/json/parser_test.go @@ -599,6 +599,23 @@ func TestTimeParser(t *testing.T) { require.Equal(t, false, metrics[0].Time() == metrics[1].Time()) } +func TestTimeParserWithTimezone(t *testing.T) { + testString := `{ + "time": "04 Jan 06 15:04" + }` + + parser := JSONParser{ + MetricName: "json_test", + JSONTimeKey: "time", + JSONTimeFormat: "02 Jan 06 15:04", + JSONTimezone: "America/New_York", + } + metrics, err := parser.Parse([]byte(testString)) + require.NoError(t, err) + require.Equal(t, 1, len(metrics)) + require.EqualValues(t, int64(1136405040000000000), metrics[0].Time().UnixNano()) +} + func TestUnixTimeParser(t *testing.T) { testString := `[ { diff --git a/plugins/parsers/registry.go b/plugins/parsers/registry.go index c6ef8ae1e..ffa7d142f 100644 --- a/plugins/parsers/registry.go +++ b/plugins/parsers/registry.go @@ -85,6 +85,9 @@ type Config struct { // time format JSONTimeFormat string `toml:"json_time_format"` + // default timezone + JSONTimezone string `toml:"json_timezone"` + // Authentication file for collectd CollectdAuthFile string `toml:"collectd_auth_file"` // One of none (default), sign, or encrypt @@ -152,6 +155,7 @@ func NewParser(config *Config) (Parser, error) { config.JSONQuery, config.JSONTimeKey, config.JSONTimeFormat, + config.JSONTimezone, config.DefaultTags) case "value": parser, err = NewValueParser(config.MetricName, @@ -275,6 +279,7 @@ func newJSONParser( jsonQuery string, timeKey string, timeFormat string, + timezone string, defaultTags map[string]string, ) Parser { parser := &json.JSONParser{ @@ -285,6 +290,7 @@ func newJSONParser( JSONQuery: jsonQuery, JSONTimeKey: timeKey, JSONTimeFormat: timeFormat, + JSONTimezone: timezone, DefaultTags: defaultTags, } return parser From 2506da80c2ead0535259aae71e3b5497a1c85af2 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 25 Feb 2019 11:31:28 -0800 Subject: [PATCH 0628/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index bd62eaf78..991283efc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -47,6 +47,7 @@ - [#5165](https://github.com/influxdata/telegraf/pull/5165): Add resource path based filtering to vsphere input. - [#5417](https://github.com/influxdata/telegraf/pull/5417): Add rcode tag and field to dns_query input. - [#5453](https://github.com/influxdata/telegraf/pull/5453): Support Azure Sovereign Environments with endpoint_url option. +- [#5472](https://github.com/influxdata/telegraf/pull/5472): Support configuring a default timezone in JSON parser. #### Bugfixes From 9d8a574ac77e42fbfafb3375d0c15c7a8eba44e4 Mon Sep 17 00:00:00 2001 From: Greg <2653109+glinton@users.noreply.github.com> Date: Mon, 25 Feb 2019 13:02:57 -0700 Subject: [PATCH 0629/1815] Add kinesis input plugin (#5341) --- Gopkg.lock | 18 + Gopkg.toml | 4 + plugins/inputs/all/all.go | 1 + plugins/inputs/kinesis_consumer/README.md | 90 +++++ .../kinesis_consumer/kinesis_consumer.go | 351 ++++++++++++++++++ plugins/outputs/kinesis/kinesis.go | 4 +- 6 files changed, 466 insertions(+), 2 deletions(-) create mode 100644 plugins/inputs/kinesis_consumer/README.md create mode 100644 plugins/inputs/kinesis_consumer/kinesis_consumer.go diff --git a/Gopkg.lock b/Gopkg.lock index 233fd9f3f..97c69b1b7 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -187,7 +187,11 @@ "private/protocol/rest", "private/protocol/xml/xmlutil", "service/cloudwatch", + "service/dynamodb", + "service/dynamodb/dynamodbattribute", + "service/dynamodb/dynamodbiface", "service/kinesis", + "service/kinesis/kinesisiface", "service/sts", ] pruneopts = "" @@ -566,6 +570,17 @@ pruneopts = "" revision = "e80d13ce29ede4452c43dea11e79b9bc8a15b478" +[[projects]] + branch = "master" + digest = "1:c191ec4c50122cdfeedba867d25bbe2ed63ed6dd2130729220c6c0d654361ea4" + name = "github.com/harlow/kinesis-consumer" + packages = [ + ".", + "checkpoint/ddb", + ] + pruneopts = "" + revision = "2f58b136fee036f5de256b81a8461cc724fdf9df" + [[projects]] digest = "1:e7224669901bab4094e6d6697c136557b7177db6ceb01b7fc8b20d08f4b5aacd" name = "github.com/hashicorp/consul" @@ -1525,6 +1540,7 @@ "github.com/aws/aws-sdk-go/aws/credentials/stscreds", "github.com/aws/aws-sdk-go/aws/session", "github.com/aws/aws-sdk-go/service/cloudwatch", + "github.com/aws/aws-sdk-go/service/dynamodb", "github.com/aws/aws-sdk-go/service/kinesis", "github.com/bsm/sarama-cluster", "github.com/couchbase/go-couchbase", @@ -1554,6 +1570,8 @@ "github.com/golang/protobuf/ptypes/timestamp", "github.com/google/go-cmp/cmp", "github.com/gorilla/mux", + "github.com/harlow/kinesis-consumer", + "github.com/harlow/kinesis-consumer/checkpoint/ddb", "github.com/hashicorp/consul/api", "github.com/influxdata/go-syslog", "github.com/influxdata/go-syslog/nontransparent", diff --git a/Gopkg.toml b/Gopkg.toml index e14f5e763..cd7825ccb 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -254,6 +254,10 @@ name = "github.com/karrick/godirwalk" version = "1.7.5" +[[override]] + name = "github.com/harlow/kinesis-consumer" + branch = "master" + [[constraint]] branch = "master" name = "github.com/kubernetes/apimachinery" diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index fe440bbba..e03648036 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -63,6 +63,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/kernel" _ "github.com/influxdata/telegraf/plugins/inputs/kernel_vmstat" _ "github.com/influxdata/telegraf/plugins/inputs/kibana" + _ "github.com/influxdata/telegraf/plugins/inputs/kinesis_consumer" _ "github.com/influxdata/telegraf/plugins/inputs/kube_inventory" _ "github.com/influxdata/telegraf/plugins/inputs/kubernetes" _ "github.com/influxdata/telegraf/plugins/inputs/leofs" diff --git a/plugins/inputs/kinesis_consumer/README.md b/plugins/inputs/kinesis_consumer/README.md new file mode 100644 index 000000000..d6f3a707b --- /dev/null +++ b/plugins/inputs/kinesis_consumer/README.md @@ -0,0 +1,90 @@ +# Kinesis Consumer Input Plugin + +The [Kinesis][kinesis] consumer plugin reads from a Kinesis data stream +and creates metrics using one of the supported [input data formats][]. + + +### Configuration + +```toml +[[inputs.kinesis_consumer]] + ## Amazon REGION of kinesis endpoint. + region = "ap-southeast-2" + + ## Amazon Credentials + ## Credentials are loaded in the following order + ## 1) Assumed credentials via STS if role_arn is specified + ## 2) explicit credentials from 'access_key' and 'secret_key' + ## 3) shared profile from 'profile' + ## 4) environment variables + ## 5) shared credentials file + ## 6) EC2 Instance Profile + # access_key = "" + # secret_key = "" + # token = "" + # role_arn = "" + # profile = "" + # shared_credential_file = "" + + ## Endpoint to make request against, the correct endpoint is automatically + ## determined and this option should only be set if you wish to override the + ## default. + ## ex: endpoint_url = "http://localhost:8000" + # endpoint_url = "" + + ## Kinesis StreamName must exist prior to starting telegraf. + streamname = "StreamName" + + ## Shard iterator type (only 'TRIM_HORIZON' and 'LATEST' currently supported) + # shard_iterator_type = "TRIM_HORIZON" + + ## Maximum messages to read from the broker that have not been written by an + ## output. For best throughput set based on the number of metrics within + ## each message and the size of the output's metric_batch_size. + ## + ## For example, if each message from the queue contains 10 metrics and the + ## output metric_batch_size is 1000, setting this to 100 will ensure that a + ## full batch is collected and the write is triggered immediately without + ## waiting until the next flush_interval. + # max_undelivered_messages = 1000 + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "influx" + + ## Optional + ## Configuration for a dynamodb checkpoint + [inputs.kinesis_consumer.checkpoint_dynamodb] + ## unique name for this consumer + app_name = "default" + table_name = "default" +``` + + +#### Required AWS IAM permissions + +Kinesis: + - DescribeStream + - GetRecords + - GetShardIterator + +DynamoDB: + - GetItem + - PutItem + + +#### DynamoDB Checkpoint + +The DynamoDB checkpoint stores the last processed record in a DynamoDB. To leverage +this functionality, create a table with the folowing string type keys: + +``` +Partition key: namespace +Sort key: shard_id +``` + + +[kinesis]: https://aws.amazon.com/kinesis/ +[input data formats]: /docs/DATA_FORMATS_INPUT.md diff --git a/plugins/inputs/kinesis_consumer/kinesis_consumer.go b/plugins/inputs/kinesis_consumer/kinesis_consumer.go new file mode 100644 index 000000000..b9b98243b --- /dev/null +++ b/plugins/inputs/kinesis_consumer/kinesis_consumer.go @@ -0,0 +1,351 @@ +package kinesis_consumer + +import ( + "context" + "fmt" + "log" + "math/big" + "strings" + "sync" + "time" + + "github.com/aws/aws-sdk-go/service/dynamodb" + "github.com/aws/aws-sdk-go/service/kinesis" + consumer "github.com/harlow/kinesis-consumer" + "github.com/harlow/kinesis-consumer/checkpoint/ddb" + + "github.com/influxdata/telegraf" + internalaws "github.com/influxdata/telegraf/internal/config/aws" + "github.com/influxdata/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/parsers" +) + +type ( + DynamoDB struct { + AppName string `toml:"app_name"` + TableName string `toml:"table_name"` + } + + KinesisConsumer struct { + Region string `toml:"region"` + AccessKey string `toml:"access_key"` + SecretKey string `toml:"secret_key"` + RoleARN string `toml:"role_arn"` + Profile string `toml:"profile"` + Filename string `toml:"shared_credential_file"` + Token string `toml:"token"` + EndpointURL string `toml:"endpoint_url"` + StreamName string `toml:"streamname"` + ShardIteratorType string `toml:"shard_iterator_type"` + DynamoDB *DynamoDB `toml:"checkpoint_dynamodb"` + MaxUndeliveredMessages int `toml:"max_undelivered_messages"` + + cons *consumer.Consumer + parser parsers.Parser + cancel context.CancelFunc + ctx context.Context + acc telegraf.TrackingAccumulator + sem chan struct{} + + checkpoint consumer.Checkpoint + checkpoints map[string]checkpoint + records map[telegraf.TrackingID]string + checkpointTex sync.Mutex + recordsTex sync.Mutex + wg sync.WaitGroup + + lastSeqNum *big.Int + } + + checkpoint struct { + streamName string + shardID string + } +) + +const ( + defaultMaxUndeliveredMessages = 1000 +) + +// this is the largest sequence number allowed - https://docs.aws.amazon.com/kinesis/latest/APIReference/API_SequenceNumberRange.html +var maxSeq = strToBint(strings.Repeat("9", 129)) + +var sampleConfig = ` + ## Amazon REGION of kinesis endpoint. + region = "ap-southeast-2" + + ## Amazon Credentials + ## Credentials are loaded in the following order + ## 1) Assumed credentials via STS if role_arn is specified + ## 2) explicit credentials from 'access_key' and 'secret_key' + ## 3) shared profile from 'profile' + ## 4) environment variables + ## 5) shared credentials file + ## 6) EC2 Instance Profile + # access_key = "" + # secret_key = "" + # token = "" + # role_arn = "" + # profile = "" + # shared_credential_file = "" + + ## Endpoint to make request against, the correct endpoint is automatically + ## determined and this option should only be set if you wish to override the + ## default. + ## ex: endpoint_url = "http://localhost:8000" + # endpoint_url = "" + + ## Kinesis StreamName must exist prior to starting telegraf. + streamname = "StreamName" + + ## Shard iterator type (only 'TRIM_HORIZON' and 'LATEST' currently supported) + # shard_iterator_type = "TRIM_HORIZON" + + ## Maximum messages to read from the broker that have not been written by an + ## output. For best throughput set based on the number of metrics within + ## each message and the size of the output's metric_batch_size. + ## + ## For example, if each message from the queue contains 10 metrics and the + ## output metric_batch_size is 1000, setting this to 100 will ensure that a + ## full batch is collected and the write is triggered immediately without + ## waiting until the next flush_interval. + # max_undelivered_messages = 1000 + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "influx" + + ## Optional + ## Configuration for a dynamodb checkpoint + [inputs.kinesis_consumer.checkpoint_dynamodb] + ## unique name for this consumer + app_name = "default" + table_name = "default" +` + +func (k *KinesisConsumer) SampleConfig() string { + return sampleConfig +} + +func (k *KinesisConsumer) Description() string { + return "Configuration for the AWS Kinesis input." +} + +func (k *KinesisConsumer) SetParser(parser parsers.Parser) { + k.parser = parser +} + +func (k *KinesisConsumer) connect(ac telegraf.Accumulator) error { + credentialConfig := &internalaws.CredentialConfig{ + Region: k.Region, + AccessKey: k.AccessKey, + SecretKey: k.SecretKey, + RoleARN: k.RoleARN, + Profile: k.Profile, + Filename: k.Filename, + Token: k.Token, + EndpointURL: k.EndpointURL, + } + configProvider := credentialConfig.Credentials() + client := kinesis.New(configProvider) + + k.checkpoint = &noopCheckpoint{} + if k.DynamoDB != nil { + var err error + k.checkpoint, err = ddb.New( + k.DynamoDB.AppName, + k.DynamoDB.TableName, + ddb.WithDynamoClient(dynamodb.New((&internalaws.CredentialConfig{ + Region: k.Region, + AccessKey: k.AccessKey, + SecretKey: k.SecretKey, + RoleARN: k.RoleARN, + Profile: k.Profile, + Filename: k.Filename, + Token: k.Token, + EndpointURL: k.EndpointURL, + }).Credentials())), + ddb.WithMaxInterval(time.Second*10), + ) + if err != nil { + return err + } + } + + cons, err := consumer.New( + k.StreamName, + consumer.WithClient(client), + consumer.WithShardIteratorType(k.ShardIteratorType), + consumer.WithCheckpoint(k), + ) + if err != nil { + return err + } + + k.cons = cons + + k.acc = ac.WithTracking(k.MaxUndeliveredMessages) + k.records = make(map[telegraf.TrackingID]string, k.MaxUndeliveredMessages) + k.checkpoints = make(map[string]checkpoint, k.MaxUndeliveredMessages) + k.sem = make(chan struct{}, k.MaxUndeliveredMessages) + + ctx := context.Background() + ctx, k.cancel = context.WithCancel(ctx) + + k.wg.Add(1) + go func() { + defer k.wg.Done() + k.onDelivery(ctx) + }() + + k.wg.Add(1) + go func() { + defer k.wg.Done() + err := k.cons.Scan(ctx, func(r *consumer.Record) consumer.ScanStatus { + select { + case <-ctx.Done(): + return consumer.ScanStatus{Error: ctx.Err()} + case k.sem <- struct{}{}: + break + } + err := k.onMessage(k.acc, r) + if err != nil { + k.sem <- struct{}{} + return consumer.ScanStatus{Error: err} + } + + return consumer.ScanStatus{} + }) + if err != nil { + k.cancel() + log.Printf("E! [inputs.kinesis_consumer] Scan encounterred an error - %s", err.Error()) + k.cons = nil + } + }() + + return nil +} + +func (k *KinesisConsumer) Start(ac telegraf.Accumulator) error { + err := k.connect(ac) + if err != nil { + return err + } + + return nil +} + +func (k *KinesisConsumer) onMessage(acc telegraf.TrackingAccumulator, r *consumer.Record) error { + metrics, err := k.parser.Parse(r.Data) + if err != nil { + return err + } + + k.recordsTex.Lock() + id := acc.AddTrackingMetricGroup(metrics) + k.records[id] = *r.SequenceNumber + k.recordsTex.Unlock() + + return nil +} + +func (k *KinesisConsumer) onDelivery(ctx context.Context) { + for { + select { + case <-ctx.Done(): + return + case info := <-k.acc.Delivered(): + k.recordsTex.Lock() + sequenceNum, ok := k.records[info.ID()] + if !ok { + k.recordsTex.Unlock() + continue + } + <-k.sem + delete(k.records, info.ID()) + k.recordsTex.Unlock() + + if info.Delivered() { + k.checkpointTex.Lock() + chk, ok := k.checkpoints[sequenceNum] + if !ok { + k.checkpointTex.Unlock() + continue + } + delete(k.checkpoints, sequenceNum) + k.checkpointTex.Unlock() + + // at least once + if strToBint(sequenceNum).Cmp(k.lastSeqNum) > 0 { + continue + } + + k.lastSeqNum = strToBint(sequenceNum) + k.checkpoint.Set(chk.streamName, chk.shardID, sequenceNum) + } else { + log.Println("D! [inputs.kinesis_consumer] Metric group failed to process") + } + } + } +} + +var negOne *big.Int + +func strToBint(s string) *big.Int { + n, ok := new(big.Int).SetString(s, 10) + if !ok { + return negOne + } + return n +} + +func (k *KinesisConsumer) Stop() { + k.cancel() + k.wg.Wait() +} + +func (k *KinesisConsumer) Gather(acc telegraf.Accumulator) error { + if k.cons == nil { + return k.connect(acc) + } + k.lastSeqNum = maxSeq + + return nil +} + +// Get wraps the checkpoint's Get function (called by consumer library) +func (k *KinesisConsumer) Get(streamName, shardID string) (string, error) { + return k.checkpoint.Get(streamName, shardID) +} + +// Set wraps the checkpoint's Set function (called by consumer library) +func (k *KinesisConsumer) Set(streamName, shardID, sequenceNumber string) error { + if sequenceNumber == "" { + return fmt.Errorf("sequence number should not be empty") + } + + k.checkpointTex.Lock() + k.checkpoints[sequenceNumber] = checkpoint{streamName: streamName, shardID: shardID} + k.checkpointTex.Unlock() + + return nil +} + +type noopCheckpoint struct{} + +func (n noopCheckpoint) Set(string, string, string) error { return nil } +func (n noopCheckpoint) Get(string, string) (string, error) { return "", nil } + +func init() { + negOne, _ = new(big.Int).SetString("-1", 10) + + inputs.Add("kinesis_consumer", func() telegraf.Input { + return &KinesisConsumer{ + ShardIteratorType: "TRIM_HORIZON", + MaxUndeliveredMessages: defaultMaxUndeliveredMessages, + lastSeqNum: maxSeq, + } + }) +} diff --git a/plugins/outputs/kinesis/kinesis.go b/plugins/outputs/kinesis/kinesis.go index d2f52abcd..497676486 100644 --- a/plugins/outputs/kinesis/kinesis.go +++ b/plugins/outputs/kinesis/kinesis.go @@ -236,7 +236,7 @@ func (k *KinesisOutput) Write(metrics []telegraf.Metric) error { if sz == 500 { // Max Messages Per PutRecordRequest is 500 elapsed := writekinesis(k, r) - log.Printf("I! Wrote a %d point batch to Kinesis in %+v.", sz, elapsed) + log.Printf("D! Wrote a %d point batch to Kinesis in %+v.", sz, elapsed) sz = 0 r = nil } @@ -244,7 +244,7 @@ func (k *KinesisOutput) Write(metrics []telegraf.Metric) error { } if sz > 0 { elapsed := writekinesis(k, r) - log.Printf("I! Wrote a %d point batch to Kinesis in %+v.", sz, elapsed) + log.Printf("D! Wrote a %d point batch to Kinesis in %+v.", sz, elapsed) } return nil From 62678fae0622f64767a1e3707a35d8237d0ee980 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 25 Feb 2019 12:06:44 -0800 Subject: [PATCH 0630/1815] Add kinesis_consumer documentation links --- CHANGELOG.md | 1 + README.md | 1 + docs/LICENSE_OF_DEPENDENCIES.md | 1 + 3 files changed, 3 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 991283efc..16e5cfca1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,7 @@ #### New Inputs - [cloud_pubsub](/plugins/inputs/cloud_pubsub/README.md) - Contributed by @emilymye +- [kinesis_consumer](/plugins/inputs/kinesis_consumer/README.md) - Contributed by @influxdata - [kube_inventory](/plugins/inputs/kube_inventory/README.md) - Contributed by @influxdata - [neptune_apex](/plugins/inputs/neptune_apex/README.md) - Contributed by @MaxRenaud - [nginx_upstream_check](/plugins/inputs/nginx_upstream_check/README.md) - Contributed by @dmitryilyin diff --git a/README.md b/README.md index 96f797c73..62fe04afd 100644 --- a/README.md +++ b/README.md @@ -190,6 +190,7 @@ For documentation on the latest development code see the [documentation index][d * [jti_openconfig_telemetry](./plugins/inputs/jti_openconfig_telemetry) * [kafka_consumer](./plugins/inputs/kafka_consumer) * [kapacitor](./plugins/inputs/kapacitor) +* [kinesis](./plugins/inputs/kinesis_consumer) * [kernel](./plugins/inputs/kernel) * [kernel_vmstat](./plugins/inputs/kernel_vmstat) * [kibana](./plugins/inputs/kibana) diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index 3f7fab663..2bd6ea01d 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -49,6 +49,7 @@ following works: - github.com/gorilla/context [BSD 3-Clause "New" or "Revised" License](https://github.com/gorilla/context/blob/master/LICENSE) - github.com/gorilla/mux [BSD 3-Clause "New" or "Revised" License](https://github.com/gorilla/mux/blob/master/LICENSE) - github.com/hailocab/go-hostpool [MIT License](https://github.com/hailocab/go-hostpool/blob/master/LICENSE) +- github.com/harlow/kinesis-consumer [MIT License](https://github.com/harlow/kinesis-consumer/blob/master/MIT-LICENSE) - github.com/hashicorp/consul [Mozilla Public License 2.0](https://github.com/hashicorp/consul/blob/master/LICENSE) - github.com/hashicorp/go-cleanhttp [Mozilla Public License 2.0](https://github.com/hashicorp/go-cleanhttp/blob/master/LICENSE) - github.com/hashicorp/go-rootcerts [Mozilla Public License 2.0](https://github.com/hashicorp/go-rootcerts/blob/master/LICENSE) From 7fa8b3306691dfb4e671b272e5086cd9ddca23aa Mon Sep 17 00:00:00 2001 From: Greg <2653109+glinton@users.noreply.github.com> Date: Mon, 25 Feb 2019 16:32:05 -0700 Subject: [PATCH 0631/1815] Add backwards compatibility fields in ceph usage and pool stats (#5466) --- plugins/inputs/ceph/README.md | 213 ++++++++++++++++++------------- plugins/inputs/ceph/ceph.go | 67 +++++----- plugins/inputs/ceph/ceph_test.go | 56 +++++--- 3 files changed, 200 insertions(+), 136 deletions(-) diff --git a/plugins/inputs/ceph/README.md b/plugins/inputs/ceph/README.md index c53f908ab..88ca4aff7 100644 --- a/plugins/inputs/ceph/README.md +++ b/plugins/inputs/ceph/README.md @@ -2,6 +2,8 @@ Collects performance metrics from the MON and OSD nodes in a Ceph storage cluster. +Ceph has introduced a Telegraf and Influx plugin in the 13.x Mimic release. The Telegraf module sends to a Telegraf configured with a socket_listener. [Learn more in their docs](http://docs.ceph.com/docs/mimic/mgr/telegraf/) + *Admin Socket Stats* This gatherer works by scanning the configured SocketDir for OSD and MON socket files. When it finds @@ -85,69 +87,13 @@ the cluster. The currently supported commands are: gather_cluster_stats = false ``` -### Measurements & Fields: +### Metrics: *Admin Socket Stats* All fields are collected under the **ceph** measurement and stored as float64s. For a full list of fields, see the sample perf dumps in ceph_test.go. -*Cluster Stats* - -* ceph\_osdmap - * epoch (float) - * full (boolean) - * nearfull (boolean) - * num\_in\_osds (float) - * num\_osds (float) - * num\_remremapped\_pgs (float) - * num\_up\_osds (float) - -* ceph\_pgmap - * bytes\_avail (float) - * bytes\_total (float) - * bytes\_used (float) - * data\_bytes (float) - * num\_pgs (float) - * op\_per\_sec (float, ceph < 10) - * read_op\_per\_sec (float) - * write_op\_per\_sec (float) - * read\_bytes\_sec (float) - * version (float) - * write\_bytes\_sec (float) - * recovering\_bytes\_per\_sec (float) - * recovering\_keys\_per\_sec (float) - * recovering\_objects\_per\_sec (float) - -* ceph\_pgmap\_state - * count (float) - -* ceph\_usage - * bytes\_used (float) - * kb\_used (float) - * max\_avail (float) - * objects (float) - -* ceph\_pool\_usage - * bytes\_used (float) - * kb\_used (float) - * max\_avail (float) - * objects (float) - -* ceph\_pool\_stats - * op\_per\_sec (float, ceph < 10) - * read_op\_per\_sec (float) - * write_op\_per\_sec (float) - * read\_bytes\_sec (float) - * write\_bytes\_sec (float) - * recovering\_object\_per\_sec (float) - * recovering\_bytes\_per\_sec (float) - * recovering\_keys\_per\_sec (float) - -### Tags: - -*Admin Socket Stats* - -All measurements will have the following tags: +All admin measurements will have the following tags: - type: either 'osd' or 'mon' to indicate which type of node was queried - id: a unique string identifier, parsed from the socket file name for the node @@ -190,38 +136,129 @@ All measurements will have the following tags: *Cluster Stats* -* ceph\_pgmap\_state has the following tags: - * state (state for which the value applies e.g. active+clean, active+remapped+backfill) -* ceph\_pool\_usage has the following tags: - * id - * name -* ceph\_pool\_stats has the following tags: - * id - * name +- ceph_osdmap + - fields: + - epoch (float) + - num_osds (float) + - num_up_osds (float) + - num_in_osds (float) + - full (bool) + - nearfull (bool) + - num_remapped_pgs (float) + ++ ceph_pgmap + - fields: + - version (float) + - num_pgs (float) + - data_bytes (float) + - bytes_used (float) + - bytes_avail (float) + - bytes_total (float) + - read_bytes_sec (float) + - write_bytes_sec (float) + - op_per_sec (float, exists only in ceph <10) + - read_op_per_sec (float) + - write_op_per_sec (float) + +- ceph_pgmap_state + - tags: + - state + - fields: + - count (float) + ++ ceph_usage + - fields: + - total_bytes (float) + - total_used_bytes (float) + - total_avail_bytes (float) + - total_space (float, exists only in ceph <0.84) + - total_used (float, exists only in ceph <0.84) + - total_avail (float, exists only in ceph <0.84) + +- ceph_pool_usage + - tags: + - name + - fields: + - kb_used (float) + - bytes_used (float) + - objects (float) + - percent_used (float) + - max_avail (float) + ++ ceph_pool_stats + - tags: + - name + - fields: + - read_bytes_sec (float) + - write_bytes_sec (float) + - op_per_sec (float, exists only in ceph <10) + - read_op_per_sec (float) + - write_op_per_sec (float) + - recovering_objects_per_sec (float) + - recovering_bytes_per_sec (float) + - recovering_keys_per_sec (float) + ### Example Output: -*Admin Socket Stats* - -
-telegraf --config /etc/telegraf/telegraf.conf --config-directory /etc/telegraf/telegraf.d --input-filter ceph --test
-* Plugin: ceph, Collection 1
-> ceph,collection=paxos, id=node-2,role=openstack,type=mon accept_timeout=0,begin=14931264,begin_bytes.avgcount=14931264,begin_bytes.sum=180309683362,begin_keys.avgcount=0,begin_keys.sum=0,begin_latency.avgcount=14931264,begin_latency.sum=9293.29589,collect=1,collect_bytes.avgcount=1,collect_bytes.sum=24,collect_keys.avgcount=1,collect_keys.sum=1,collect_latency.avgcount=1,collect_latency.sum=0.00028,collect_timeout=0,collect_uncommitted=0,commit=14931264,commit_bytes.avgcount=0,commit_bytes.sum=0,commit_keys.avgcount=0,commit_keys.sum=0,commit_latency.avgcount=0,commit_latency.sum=0,lease_ack_timeout=0,lease_timeout=0,new_pn=0,new_pn_latency.avgcount=0,new_pn_latency.sum=0,refresh=14931264,refresh_latency.avgcount=14931264,refresh_latency.sum=8706.98498,restart=4,share_state=0,share_state_bytes.avgcount=0,share_state_bytes.sum=0,share_state_keys.avgcount=0,share_state_keys.sum=0,start_leader=0,start_peon=1,store_state=14931264,store_state_bytes.avgcount=14931264,store_state_bytes.sum=353119959211,store_state_keys.avgcount=14931264,store_state_keys.sum=289807523,store_state_latency.avgcount=14931264,store_state_latency.sum=10952.835724 1462821234814535148
-> ceph,collection=throttle-mon_client_bytes,id=node-2,type=mon get=1413017,get_or_fail_fail=0,get_or_fail_success=0,get_sum=71211705,max=104857600,put=1413013,put_sum=71211459,take=0,take_sum=0,val=246,wait.avgcount=0,wait.sum=0 1462821234814737219
-> ceph,collection=throttle-mon_daemon_bytes,id=node-2,type=mon get=4058121,get_or_fail_fail=0,get_or_fail_success=0,get_sum=6027348117,max=419430400,put=4058121,put_sum=6027348117,take=0,take_sum=0,val=0,wait.avgcount=0,wait.sum=0 1462821234814815661
-> ceph,collection=throttle-msgr_dispatch_throttler-mon,id=node-2,type=mon get=54276277,get_or_fail_fail=0,get_or_fail_success=0,get_sum=370232877040,max=104857600,put=54276277,put_sum=370232877040,take=0,take_sum=0,val=0,wait.avgcount=0,wait.sum=0 1462821234814872064
-
- *Cluster Stats* -
-> ceph_osdmap,host=ceph-mon-0 epoch=170772,full=false,nearfull=false,num_in_osds=340,num_osds=340,num_remapped_pgs=0,num_up_osds=340 1468841037000000000
-> ceph_pgmap,host=ceph-mon-0 bytes_avail=634895531270144,bytes_total=812117151809536,bytes_used=177221620539392,data_bytes=56979991615058,num_pgs=22952,op_per_sec=15869,read_bytes_sec=43956026,version=39387592,write_bytes_sec=165344818 1468841037000000000
-> ceph_pgmap_state,host=ceph-mon-0,state=active+clean count=22952 1468928660000000000
-> ceph_pgmap_state,host=ceph-mon-0,state=active+degraded count=16 1468928660000000000
-> ceph_usage,host=ceph-mon-0 total_avail_bytes=634895514791936,total_bytes=812117151809536,total_used_bytes=177221637017600 1468841037000000000
-> ceph_pool_usage,host=ceph-mon-0,id=150,name=cinder.volumes bytes_used=12648553794802,kb_used=12352103316,max_avail=154342562489244,objects=3026295 1468841037000000000
-> ceph_pool_usage,host=ceph-mon-0,id=182,name=cinder.volumes.flash bytes_used=8541308223964,kb_used=8341121313,max_avail=39388593563936,objects=2075066 1468841037000000000
-> ceph_pool_stats,host=ceph-mon-0,id=150,name=cinder.volumes op_per_sec=1706,read_bytes_sec=28671674,write_bytes_sec=29994541 1468841037000000000
-> ceph_pool_stats,host=ceph-mon-0,id=182,name=cinder.volumes.flash op_per_sec=9748,read_bytes_sec=9605524,write_bytes_sec=45593310 1468841037000000000
-
+``` +ceph_pool_stats,name=telegraf recovering_keys_per_sec=0,read_bytes_sec=0,write_bytes_sec=0,read_op_per_sec=0,write_op_per_sec=0,recovering_objects_per_sec=0,recovering_bytes_per_sec=0 1550658911000000000 +ceph_pool_usage,name=telegraf kb_used=0,bytes_used=0,objects=0 1550658911000000000 +ceph_pgmap_state,state=undersized+peered count=30 1550658910000000000 +ceph_pgmap bytes_total=10733223936,read_op_per_sec=0,write_op_per_sec=0,num_pgs=30,data_bytes=0,bytes_avail=9654697984,read_bytes_sec=0,write_bytes_sec=0,version=0,bytes_used=1078525952 1550658910000000000 +ceph_osdmap num_up_osds=1,num_in_osds=1,full=false,nearfull=false,num_remapped_pgs=0,epoch=34,num_osds=1 1550658910000000000 +``` + +*Admin Socket Stats* + +``` +ceph,collection=recoverystate_perf,id=0,type=osd reprecovering_latency.avgtime=0,repwaitrecoveryreserved_latency.avgcount=0,waitlocalbackfillreserved_latency.sum=0,reset_latency.avgtime=0.000090333,peering_latency.avgtime=0.824434333,stray_latency.avgtime=0.000030502,waitlocalrecoveryreserved_latency.sum=0,backfilling_latency.avgtime=0,reprecovering_latency.avgcount=0,incomplete_latency.avgtime=0,down_latency.avgtime=0,recovered_latency.sum=0.009692406,peering_latency.avgcount=40,notrecovering_latency.sum=0,waitremoterecoveryreserved_latency.sum=0,reprecovering_latency.sum=0,waitlocalbackfillreserved_latency.avgtime=0,started_latency.sum=9066.701648888,backfilling_latency.sum=0,waitactingchange_latency.avgcount=0,start_latency.avgtime=0.000030178,recovering_latency.avgtime=0,notbackfilling_latency.avgcount=0,waitremotebackfillreserved_latency.avgtime=0,incomplete_latency.avgcount=0,replicaactive_latency.sum=0,getinfo_latency.avgtime=0.000025945,down_latency.sum=0,recovered_latency.avgcount=40,waitactingchange_latency.avgtime=0,notrecovering_latency.avgcount=0,waitupthru_latency.sum=32.970965509,waitupthru_latency.avgtime=0.824274137,waitlocalrecoveryreserved_latency.avgcount=0,waitremoterecoveryreserved_latency.avgcount=0,activating_latency.avgcount=40,activating_latency.sum=0.83428466,activating_latency.avgtime=0.020857116,start_latency.avgcount=50,waitremotebackfillreserved_latency.avgcount=0,down_latency.avgcount=0,started_latency.avgcount=10,getlog_latency.avgcount=40,stray_latency.avgcount=10,notbackfilling_latency.sum=0,reset_latency.sum=0.00451665,active_latency.avgtime=906.505839265,repwaitbackfillreserved_latency.sum=0,waitactingchange_latency.sum=0,stray_latency.sum=0.000305022,waitremotebackfillreserved_latency.sum=0,repwaitrecoveryreserved_latency.avgtime=0,replicaactive_latency.avgtime=0,clean_latency.avgcount=10,waitremoterecoveryreserved_latency.avgtime=0,active_latency.avgcount=10,primary_latency.sum=9066.700828729,initial_latency.avgtime=0.000379351,waitlocalbackfillreserved_latency.avgcount=0,getinfo_latency.sum=0.001037815,reset_latency.avgcount=50,getlog_latency.sum=0.003079344,getlog_latency.avgtime=0.000076983,primary_latency.avgcount=10,repnotrecovering_latency.avgcount=0,initial_latency.sum=0.015174072,repwaitrecoveryreserved_latency.sum=0,replicaactive_latency.avgcount=0,clean_latency.avgtime=906.495755946,waitupthru_latency.avgcount=40,repnotrecovering_latency.sum=0,incomplete_latency.sum=0,active_latency.sum=9065.058392651,peering_latency.sum=32.977373355,repnotrecovering_latency.avgtime=0,notrecovering_latency.avgtime=0,waitlocalrecoveryreserved_latency.avgtime=0,repwaitbackfillreserved_latency.avgtime=0,recovering_latency.sum=0,getmissing_latency.sum=0.000902014,getmissing_latency.avgtime=0.00002255,clean_latency.sum=9064.957559467,getinfo_latency.avgcount=40,started_latency.avgtime=906.670164888,getmissing_latency.avgcount=40,notbackfilling_latency.avgtime=0,initial_latency.avgcount=40,recovered_latency.avgtime=0.00024231,repwaitbackfillreserved_latency.avgcount=0,backfilling_latency.avgcount=0,start_latency.sum=0.001508937,primary_latency.avgtime=906.670082872,recovering_latency.avgcount=0 1550658950000000000 +ceph,collection=throttle-msgr_dispatch_throttler-hb_back_server,id=0,type=osd put_sum=0,wait.avgtime=0,put=0,get_or_fail_success=0,wait.avgcount=0,val=0,get_sum=0,take=0,take_sum=0,max=104857600,get=0,get_or_fail_fail=0,wait.sum=0,get_started=0 1550658950000000000 +ceph,collection=throttle-msgr_dispatch_throttler-hb_front_client,id=0,type=osd wait.sum=0,val=0,take_sum=0,put=0,get_or_fail_success=0,put_sum=0,get=0,get_or_fail_fail=0,get_started=0,get_sum=0,wait.avgcount=0,wait.avgtime=0,max=104857600,take=0 1550658950000000000 +ceph,collection=bluefs,id=0,type=osd slow_used_bytes=0,wal_total_bytes=0,gift_bytes=1048576,log_compactions=0,logged_bytes=221184,files_written_sst=1,slow_total_bytes=0,bytes_written_wal=619403,bytes_written_sst=1517,reclaim_bytes=0,db_total_bytes=1086324736,wal_used_bytes=0,log_bytes=319488,num_files=10,files_written_wal=1,db_used_bytes=12582912 1550658950000000000 +ceph,collection=throttle-msgr_dispatch_throttler-ms_objecter,id=0,type=osd val=0,put=0,get=0,take=0,put_sum=0,get_started=0,take_sum=0,get_sum=0,wait.sum=0,wait.avgtime=0,get_or_fail_fail=0,get_or_fail_success=0,wait.avgcount=0,max=104857600 1550658950000000000 +ceph,collection=throttle-msgr_dispatch_throttler-client,id=0,type=osd put=100,max=104857600,wait.sum=0,wait.avgtime=0,get_or_fail_fail=0,take_sum=0,val=0,wait.avgcount=0,get_sum=48561,get_or_fail_success=100,take=0,put_sum=48561,get_started=0,get=100 1550658950000000000 +ceph,collection=mutex-OSDShard.2::sdata_wait_lock,id=0,type=osd wait.sum=0,wait.avgtime=0,wait.avgcount=0 1550658950000000000 +ceph,collection=throttle-objecter_ops,id=0,type=osd get_or_fail_fail=0,max=1024,get_sum=0,take=0,val=0,wait.avgtime=0,get_or_fail_success=0,wait.sum=0,put_sum=0,get=0,take_sum=0,put=0,wait.avgcount=0,get_started=0 1550658950000000000 +ceph,collection=AsyncMessenger::Worker-1,id=0,type=osd msgr_send_messages=266,msgr_recv_bytes=49074,msgr_active_connections=1,msgr_running_recv_time=0.136317251,msgr_running_fast_dispatch_time=0,msgr_created_connections=5,msgr_send_bytes=41569,msgr_running_send_time=0.514432253,msgr_recv_messages=81,msgr_running_total_time=0.766790051 1550658950000000000 +ceph,collection=throttle-bluestore_throttle_deferred_bytes,id=0,type=osd get_started=0,wait.sum=0,wait.avgcount=0,take_sum=0,val=12134038,max=201326592,take=0,get_or_fail_fail=0,put_sum=0,wait.avgtime=0,get_or_fail_success=18,get=18,get_sum=12134038,put=0 1550658950000000000 +ceph,collection=throttle-msgr_dispatch_throttler-hb_front_server,id=0,type=osd get=0,put_sum=0,val=0,get_or_fail_fail=0,get_or_fail_success=0,take=0,max=104857600,get_started=0,wait.sum=0,wait.avgtime=0,get_sum=0,take_sum=0,put=0,wait.avgcount=0 1550658950000000000 +ceph,collection=mutex-OSDShard.1::sdata_wait_lock,id=0,type=osd wait.avgcount=0,wait.sum=0,wait.avgtime=0 1550658950000000000 +ceph,collection=finisher-defered_finisher,id=0,type=osd queue_len=0,complete_latency.avgcount=0,complete_latency.sum=0,complete_latency.avgtime=0 1550658950000000000 +ceph,collection=mutex-OSDShard.3::shard_lock,id=0,type=osd wait.avgtime=0,wait.avgcount=0,wait.sum=0 1550658950000000000 +ceph,collection=mutex-OSDShard.0::shard_lock,id=0,type=osd wait.avgcount=0,wait.sum=0,wait.avgtime=0 1550658950000000000 +ceph,collection=throttle-osd_client_bytes,id=0,type=osd get_or_fail_fail=0,get=22,get_sum=6262,take=0,max=524288000,put=31,wait.sum=0,val=0,get_started=0,put_sum=6262,get_or_fail_success=22,take_sum=0,wait.avgtime=0,wait.avgcount=0 1550658950000000000 +ceph,collection=rocksdb,id=0,type=osd submit_latency.sum=0.019985172,rocksdb_write_pre_and_post_time.sum=0,rocksdb_write_wal_time.avgtime=0,rocksdb_write_delay_time.avgtime=0,rocksdb_write_pre_and_post_time.avgtime=0,rocksdb_write_pre_and_post_time.avgcount=0,submit_sync_latency.sum=0.559604552,compact=0,compact_queue_len=0,get_latency.avgcount=140,submit_latency.avgtime=0.000095622,submit_transaction=209,compact_range=0,rocksdb_write_wal_time.avgcount=0,submit_sync_latency.avgtime=0.011906479,compact_queue_merge=0,rocksdb_write_memtable_time.avgtime=0,get_latency.sum=0.013135139,submit_latency.avgcount=209,submit_sync_latency.avgcount=47,submit_transaction_sync=47,rocksdb_write_wal_time.sum=0,rocksdb_write_delay_time.avgcount=0,rocksdb_write_memtable_time.avgcount=0,rocksdb_write_memtable_time.sum=0,get=140,get_latency.avgtime=0.000093822,rocksdb_write_delay_time.sum=0 1550658950000000000 +ceph,collection=mutex-OSDShard.1::shard_lock,id=0,type=osd wait.avgcount=0,wait.sum=0,wait.avgtime=0 1550658950000000000 +ceph,collection=osd,id=0,type=osd subop_latency.avgtime=0,copyfrom=0,osd_pg_info=140,subop_push_latency.avgtime=0,subop_pull=0,op_rw_process_latency.sum=0,stat_bytes=10733223936,numpg_removing=0,op_latency.avgtime=0,op_w_process_latency.avgtime=0,op_rw_in_bytes=0,osd_map_cache_miss=0,loadavg=144,map_messages=31,op_w_latency.avgtime=0,op_prepare_latency.avgcount=0,op_r=0,op_latency.avgcount=0,osd_map_cache_hit=225,op_w_prepare_latency.sum=0,numpg_primary=30,op_rw_out_bytes=0,subop_w_latency.avgcount=0,subop_push_latency.avgcount=0,op_r_process_latency.avgcount=0,op_w_in_bytes=0,op_rw_latency.avgtime=0,subop_w_latency.avgtime=0,osd_map_cache_miss_low_avg.sum=0,agent_wake=0,op_before_queue_op_lat.avgtime=0.000065043,op_w_prepare_latency.avgcount=0,tier_proxy_write=0,op_rw_prepare_latency.avgtime=0,op_rw_process_latency.avgtime=0,op_in_bytes=0,op_cache_hit=0,tier_whiteout=0,op_w_prepare_latency.avgtime=0,heartbeat_to_peers=0,object_ctx_cache_hit=0,buffer_bytes=0,stat_bytes_avail=9654697984,op_w_latency.avgcount=0,tier_dirty=0,tier_flush_fail=0,op_rw_prepare_latency.avgcount=0,agent_flush=0,osd_tier_promote_lat.sum=0,subop_w_latency.sum=0,tier_promote=0,op_before_dequeue_op_lat.avgcount=22,push=0,tier_flush=0,osd_pg_biginfo=90,tier_try_flush_fail=0,subop_push_in_bytes=0,op_before_dequeue_op_lat.sum=0.00266744,osd_map_cache_miss_low=0,numpg=30,op_prepare_latency.avgtime=0,subop_pull_latency.avgtime=0,op_rw_latency.avgcount=0,subop_latency.avgcount=0,op=0,osd_tier_promote_lat.avgcount=0,cached_crc=0,op_r_prepare_latency.sum=0,subop_pull_latency.sum=0,op_before_dequeue_op_lat.avgtime=0.000121247,history_alloc_Mbytes=0,subop_push_latency.sum=0,subop_in_bytes=0,op_w_process_latency.sum=0,osd_map_cache_miss_low_avg.avgcount=0,subop=0,tier_clean=0,osd_tier_r_lat.avgtime=0,op_r_process_latency.avgtime=0,op_r_prepare_latency.avgcount=0,op_w_process_latency.avgcount=0,numpg_stray=0,op_r_prepare_latency.avgtime=0,object_ctx_cache_total=0,op_process_latency.avgtime=0,op_r_process_latency.sum=0,op_r_latency.sum=0,subop_w_in_bytes=0,op_rw=0,messages_delayed_for_map=4,map_message_epoch_dups=30,osd_map_bl_cache_miss=33,op_r_latency.avgtime=0,op_before_queue_op_lat.sum=0.001430955,map_message_epochs=64,agent_evict=0,op_out_bytes=0,op_process_latency.sum=0,osd_tier_flush_lat.sum=0,stat_bytes_used=1078525952,op_prepare_latency.sum=0,op_wip=0,osd_tier_flush_lat.avgtime=0,missed_crc=0,op_rw_latency.sum=0,op_r_latency.avgcount=0,pull=0,op_w_latency.sum=0,op_before_queue_op_lat.avgcount=22,tier_try_flush=0,numpg_replica=0,subop_push=0,osd_tier_r_lat.sum=0,op_latency.sum=0,push_out_bytes=0,op_w=0,osd_tier_promote_lat.avgtime=0,subop_latency.sum=0,osd_pg_fastinfo=0,tier_delay=0,op_rw_prepare_latency.sum=0,osd_tier_flush_lat.avgcount=0,osd_map_bl_cache_hit=0,op_r_out_bytes=0,subop_pull_latency.avgcount=0,op_process_latency.avgcount=0,tier_evict=0,tier_proxy_read=0,agent_skip=0,subop_w=0,history_alloc_num=0,osd_tier_r_lat.avgcount=0,recovery_ops=0,cached_crc_adjusted=0,op_rw_process_latency.avgcount=0 1550658950000000000 +ceph,collection=finisher-finisher-0,id=0,type=osd complete_latency.sum=0.015491438,complete_latency.avgtime=0.000174061,complete_latency.avgcount=89,queue_len=0 1550658950000000000 +ceph,collection=throttle-msgr_dispatch_throttler-hb_back_client,id=0,type=osd wait.avgtime=0,wait.avgcount=0,max=104857600,get_sum=0,take=0,get_or_fail_fail=0,val=0,get=0,get_or_fail_success=0,wait.sum=0,put=0,take_sum=0,get_started=0,put_sum=0 1550658950000000000 +ceph,collection=throttle-msgr_dispatch_throttler-cluster,id=0,type=osd get_sum=0,take=0,val=0,max=104857600,get_or_fail_success=0,put=0,put_sum=0,wait.sum=0,wait.avgtime=0,get_started=0,get_or_fail_fail=0,take_sum=0,wait.avgcount=0,get=0 1550658950000000000 +ceph,collection=mutex-OSDShard.0::sdata_wait_lock,id=0,type=osd wait.avgcount=0,wait.sum=0,wait.avgtime=0 1550658950000000000 +ceph,collection=throttle-bluestore_throttle_bytes,id=0,type=osd get_sum=140287253,put_sum=140287253,get=209,put=47,val=0,get_started=209,wait.sum=0,wait.avgcount=0,wait.avgtime=0,max=67108864,get_or_fail_fail=0,take=0,take_sum=0,get_or_fail_success=0 1550658950000000000 +ceph,collection=objecter,id=0,type=osd map_inc=15,op_w=0,osd_session_close=0,op=0,osdop_writefull=0,osdop_tmap_up=0,command_resend=0,poolstat_resend=0,osdop_setxattr=0,osdop_append=0,osdop_delete=0,op_rmw=0,poolstat_send=0,op_active=0,osdop_tmap_put=0,osdop_clonerange=0,osdop_rmxattr=0,op_send=0,op_resend=0,osdop_resetxattrs=0,osdop_call=0,osdop_pgls=0,poolstat_active=0,linger_resend=0,osdop_stat=0,op_reply=0,op_laggy=0,statfs_send=0,osdop_getxattr=0,osdop_pgls_filter=0,osdop_notify=0,linger_active=0,osdop_other=0,poolop_resend=0,statfs_active=0,command_active=0,map_epoch=34,osdop_create=0,osdop_watch=0,op_r=0,map_full=0,osdop_src_cmpxattr=0,omap_rd=0,osd_session_open=0,osdop_sparse_read=0,osdop_truncate=0,linger_ping=0,osdop_mapext=0,poolop_send=0,osdop_cmpxattr=0,osd_laggy=0,osdop_writesame=0,osd_sessions=0,osdop_tmap_get=0,op_pg=0,command_send=0,osdop_read=0,op_send_bytes=0,statfs_resend=0,omap_del=0,poolop_active=0,osdop_write=0,osdop_zero=0,omap_wr=0,linger_send=0 1550658950000000000 +ceph,collection=mutex-OSDShard.4::shard_lock,id=0,type=osd wait.avgtime=0,wait.avgcount=0,wait.sum=0 1550658950000000000 +ceph,collection=AsyncMessenger::Worker-0,id=0,type=osd msgr_recv_messages=112,msgr_recv_bytes=14550,msgr_created_connections=15,msgr_running_recv_time=0.026754699,msgr_active_connections=11,msgr_send_messages=11,msgr_running_fast_dispatch_time=0.003373472,msgr_send_bytes=2090,msgr_running_total_time=0.041323592,msgr_running_send_time=0.000441856 1550658950000000000 +ceph,collection=mutex-OSDShard.2::shard_lock,id=0,type=osd wait.sum=0,wait.avgtime=0,wait.avgcount=0 1550658950000000000 +ceph,collection=bluestore,id=0,type=osd submit_lat.avgcount=209,kv_flush_lat.avgtime=0.000002175,bluestore_write_big_bytes=0,bluestore_txc=209,kv_commit_lat.avgcount=47,kv_commit_lat.sum=0.585164754,bluestore_buffer_miss_bytes=511,commit_lat.avgcount=209,bluestore_buffer_bytes=0,bluestore_onodes=102,state_kv_queued_lat.sum=1.439223859,deferred_write_bytes=0,bluestore_write_small_bytes=60279,decompress_lat.sum=0,state_kv_done_lat.avgcount=209,submit_lat.sum=0.055637603,state_prepare_lat.avgcount=209,bluestore_write_big=0,read_wait_aio_lat.avgcount=17,bluestore_write_small_deferred=18,kv_lat.sum=0.585267001,kv_flush_lat.sum=0.000102247,bluestore_buffers=0,state_prepare_lat.sum=0.051411998,bluestore_write_small_pre_read=18,state_deferred_queued_lat.sum=0,decompress_lat.avgtime=0,state_kv_done_lat.avgtime=0.000000629,bluestore_write_small_unused=0,read_lat.avgcount=34,bluestore_onode_shard_misses=0,bluestore_blobs=72,bluestore_read_eio=0,bluestore_blob_split=0,bluestore_onode_shard_hits=0,state_kv_commiting_lat.avgcount=209,bluestore_onode_hits=153,state_kv_commiting_lat.sum=2.477385041,read_onode_meta_lat.avgcount=51,state_finishing_lat.avgtime=0.000000489,bluestore_compressed_original=0,state_kv_queued_lat.avgtime=0.006886238,bluestore_gc_merged=0,throttle_lat.avgtime=0.000001247,state_aio_wait_lat.avgtime=0.000001326,bluestore_onode_reshard=0,state_done_lat.avgcount=191,bluestore_compressed_allocated=0,write_penalty_read_ops=0,bluestore_extents=72,compress_lat.avgtime=0,state_aio_wait_lat.avgcount=209,state_io_done_lat.avgtime=0.000000519,bluestore_write_big_blobs=0,state_kv_queued_lat.avgcount=209,kv_flush_lat.avgcount=47,state_finishing_lat.sum=0.000093565,state_io_done_lat.avgcount=209,kv_lat.avgtime=0.012452489,bluestore_buffer_hit_bytes=20750,read_wait_aio_lat.avgtime=0.000038077,bluestore_allocated=4718592,state_deferred_cleanup_lat.avgtime=0,compress_lat.avgcount=0,write_pad_bytes=304265,throttle_lat.sum=0.000260785,read_onode_meta_lat.avgtime=0.000038702,compress_success_count=0,state_deferred_aio_wait_lat.sum=0,decompress_lat.avgcount=0,state_deferred_aio_wait_lat.avgtime=0,bluestore_stored=51133,state_finishing_lat.avgcount=191,bluestore_onode_misses=132,deferred_write_ops=0,read_wait_aio_lat.sum=0.000647315,csum_lat.avgcount=1,state_kv_done_lat.sum=0.000131531,state_prepare_lat.avgtime=0.00024599,state_deferred_cleanup_lat.avgcount=0,state_deferred_queued_lat.avgcount=0,bluestore_reads_with_retries=0,state_kv_commiting_lat.avgtime=0.011853516,kv_commit_lat.avgtime=0.012450313,read_lat.sum=0.003031418,throttle_lat.avgcount=209,bluestore_write_small_new=71,state_deferred_queued_lat.avgtime=0,bluestore_extent_compress=0,bluestore_write_small=89,state_deferred_cleanup_lat.sum=0,submit_lat.avgtime=0.000266208,bluestore_fragmentation_micros=0,state_aio_wait_lat.sum=0.000277323,commit_lat.avgtime=0.018987901,compress_lat.sum=0,bluestore_compressed=0,state_done_lat.sum=0.000206953,csum_lat.avgtime=0.000023281,state_deferred_aio_wait_lat.avgcount=0,compress_rejected_count=0,kv_lat.avgcount=47,read_onode_meta_lat.sum=0.001973812,read_lat.avgtime=0.000089159,csum_lat.sum=0.000023281,state_io_done_lat.sum=0.00010855,state_done_lat.avgtime=0.000001083,commit_lat.sum=3.96847136 1550658950000000000 +ceph,collection=mutex-OSDShard.3::sdata_wait_lock,id=0,type=osd wait.avgcount=0,wait.sum=0,wait.avgtime=0 1550658950000000000 +ceph,collection=AsyncMessenger::Worker-2,id=0,type=osd msgr_running_fast_dispatch_time=0,msgr_recv_bytes=246,msgr_created_connections=5,msgr_active_connections=1,msgr_running_recv_time=0.001392218,msgr_running_total_time=1.934101301,msgr_running_send_time=1.781171967,msgr_recv_messages=3,msgr_send_bytes=26504031,msgr_send_messages=15409 1550658950000000000 +ceph,collection=finisher-objecter-finisher-0,id=0,type=osd complete_latency.avgcount=0,complete_latency.sum=0,complete_latency.avgtime=0,queue_len=0 1550658950000000000 +ceph,collection=mutex-OSDShard.4::sdata_wait_lock,id=0,type=osd wait.avgcount=0,wait.sum=0,wait.avgtime=0 1550658950000000000 +ceph,collection=throttle-objecter_bytes,id=0,type=osd take=0,get_sum=0,put_sum=0,put=0,val=0,get=0,get_or_fail_fail=0,wait.avgcount=0,get_or_fail_success=0,wait.sum=0,wait.avgtime=0,get_started=0,max=104857600,take_sum=0 1550658950000000000 +ceph,collection=throttle-mon_client_bytes,id=test,type=monitor get_or_fail_fail=0,take_sum=0,wait.avgtime=0,wait.avgcount=0,get_sum=64607,take=0,get_started=0,put=950,val=240,wait.sum=0,max=104857600,get_or_fail_success=953,put_sum=64367,get=953 1550658950000000000 +ceph,collection=mon,id=test,type=monitor election_win=1,election_lose=0,num_sessions=3,session_add=199,session_rm=196,session_trim=0,num_elections=1,election_call=0 1550658950000000000 +ceph,collection=cluster,id=test,type=monitor num_pg_active=0,num_mon=1,osd_bytes_avail=9654697984,num_object=0,num_osd_in=1,osd_bytes_used=1078525952,num_bytes=0,num_osd=1,num_pg_peering=0,num_pg_active_clean=0,num_pg=30,num_mon_quorum=1,num_object_degraded=0,osd_bytes=10733223936,num_object_unfound=0,num_osd_up=1,num_pool=1,num_object_misplaced=0,osd_epoch=34 1550658950000000000 +ceph,collection=throttle-msgr_dispatch_throttler-mon-mgrc,id=test,type=monitor get=2,put=2,get_sum=16,take_sum=0,wait.avgtime=0,val=0,wait.avgcount=0,get_or_fail_success=2,put_sum=16,max=104857600,get_started=0,take=0,get_or_fail_fail=0,wait.sum=0 1550658950000000000 +ceph,collection=rocksdb,id=test,type=monitor rocksdb_write_memtable_time.avgtime=0,submit_sync_latency.avgtime=0.013689071,submit_transaction_sync=39173,rocksdb_write_pre_and_post_time.avgtime=0,get_latency.avgcount=724581,submit_latency.avgtime=0,submit_sync_latency.avgcount=39173,rocksdb_write_wal_time.avgtime=0,rocksdb_write_pre_and_post_time.sum=0,compact_range=231,compact_queue_merge=0,rocksdb_write_memtable_time.avgcount=0,submit_sync_latency.sum=536.242007888,compact=0,rocksdb_write_delay_time.sum=0,get_latency.sum=9.578173532,rocksdb_write_delay_time.avgcount=0,rocksdb_write_delay_time.avgtime=0,compact_queue_len=0,get_latency.avgtime=0.000013218,submit_latency.sum=0,get=724581,rocksdb_write_wal_time.avgcount=0,submit_transaction=0,rocksdb_write_wal_time.sum=0,submit_latency.avgcount=0,rocksdb_write_pre_and_post_time.avgcount=0,rocksdb_write_memtable_time.sum=0 1550658950000000000 +ceph,collection=finisher-mon_finisher,id=test,type=monitor complete_latency.avgtime=0,complete_latency.avgcount=0,complete_latency.sum=0,queue_len=0 1550658950000000000 +ceph,collection=paxos,id=test,type=monitor share_state_keys.sum=0,collect_keys.avgcount=0,collect=0,store_state_latency.avgtime=0,begin_latency.sum=338.90900364,collect_keys.sum=0,collect_bytes.avgcount=0,accept_timeout=0,new_pn_latency.avgcount=0,new_pn_latency.sum=0,commit_keys.sum=116820,share_state_bytes.sum=0,refresh_latency.avgcount=19576,store_state=0,collect_timeout=0,lease_ack_timeout=0,collect_latency.avgcount=0,store_state_keys.avgcount=0,commit_bytes.sum=38478195,refresh_latency.sum=8.341938952,collect_uncommitted=0,commit_latency.avgcount=19576,share_state=0,begin_latency.avgtime=0.017312474,commit_latency.avgtime=0.009926797,begin_keys.sum=58728,start_peon=0,commit_keys.avgcount=19576,begin_latency.avgcount=19576,store_state_latency.avgcount=0,start_leader=1,begin_keys.avgcount=19576,collect_bytes.sum=0,begin_bytes.avgcount=19576,store_state_bytes.sum=0,commit=19576,begin_bytes.sum=41771257,new_pn_latency.avgtime=0,refresh_latency.avgtime=0.00042613,commit_latency.sum=194.326980684,new_pn=0,refresh=19576,collect_latency.sum=0,collect_latency.avgtime=0,lease_timeout=0,begin=19576,share_state_bytes.avgcount=0,share_state_keys.avgcount=0,store_state_keys.sum=0,store_state_bytes.avgcount=0,store_state_latency.sum=0,commit_bytes.avgcount=19576,restart=2 1550658950000000000 +ceph,collection=finisher-monstore,id=test,type=monitor complete_latency.avgcount=19576,complete_latency.sum=208.300976568,complete_latency.avgtime=0.01064063,queue_len=0 1550658950000000000 +ceph,collection=AsyncMessenger::Worker-2,id=test,type=monitor msgr_created_connections=1,msgr_send_bytes=0,msgr_running_send_time=0,msgr_recv_bytes=0,msgr_send_messages=1,msgr_recv_messages=0,msgr_running_total_time=0.003026541,msgr_running_recv_time=0,msgr_running_fast_dispatch_time=0,msgr_active_connections=1 1550658950000000000 +ceph,collection=throttle-msgr_dispatch_throttler-mon,id=test,type=monitor take=0,take_sum=0,put=39933,get=39933,put_sum=56745184,wait.avgtime=0,get_or_fail_success=39933,wait.sum=0,get_sum=56745184,get_or_fail_fail=0,wait.avgcount=0,val=0,max=104857600,get_started=0 1550658950000000000 +ceph,collection=throttle-mon_daemon_bytes,id=test,type=monitor max=419430400,get_started=0,wait.avgtime=0,take_sum=0,get=262,take=0,put_sum=21212,wait.avgcount=0,get_or_fail_success=262,get_or_fail_fail=0,put=262,wait.sum=0,val=0,get_sum=21212 1550658950000000000 +ceph,collection=AsyncMessenger::Worker-1,id=test,type=monitor msgr_send_messages=1071,msgr_running_total_time=0.703589077,msgr_active_connections=146,msgr_send_bytes=3887863,msgr_running_send_time=0.361602994,msgr_running_recv_time=0.328218119,msgr_running_fast_dispatch_time=0,msgr_recv_messages=978,msgr_recv_bytes=142209,msgr_created_connections=197 1550658950000000000 +ceph,collection=AsyncMessenger::Worker-0,id=test,type=monitor msgr_created_connections=54,msgr_recv_messages=38957,msgr_active_connections=47,msgr_running_fast_dispatch_time=0,msgr_send_bytes=25338946,msgr_running_total_time=9.190267622,msgr_running_send_time=3.124663809,msgr_running_recv_time=13.03937269,msgr_send_messages=15973,msgr_recv_bytes=59558181 1550658950000000000 +``` diff --git a/plugins/inputs/ceph/ceph.go b/plugins/inputs/ceph/ceph.go index b6a6c5c08..d3911102d 100644 --- a/plugins/inputs/ceph/ceph.go +++ b/plugins/inputs/ceph/ceph.go @@ -157,7 +157,6 @@ func init() { } inputs.Add(measurement, func() telegraf.Input { return &c }) - } var perfDump = func(binary string, socket *socket) (string, error) { @@ -336,17 +335,17 @@ type CephStatus struct { StateName string `json:"state_name"` Count float64 `json:"count"` } `json:"pgs_by_state"` - Version float64 `json:"version"` - NumPGs float64 `json:"num_pgs"` - DataBytes float64 `json:"data_bytes"` - BytesUsed float64 `json:"bytes_used"` - BytesAvail float64 `json:"bytes_avail"` - BytesTotal float64 `json:"bytes_total"` - ReadBytesSec float64 `json:"read_bytes_sec"` - WriteBytesSec float64 `json:"write_bytes_sec"` - OpPerSec float64 `json:"op_per_sec"` // This field is no longer reported in ceph 10 and later - ReadOpPerSec float64 `json:"read_op_per_sec"` - WriteOpPerSec float64 `json:"write_op_per_sec"` + Version float64 `json:"version"` + NumPGs float64 `json:"num_pgs"` + DataBytes float64 `json:"data_bytes"` + BytesUsed float64 `json:"bytes_used"` + BytesAvail float64 `json:"bytes_avail"` + BytesTotal float64 `json:"bytes_total"` + ReadBytesSec float64 `json:"read_bytes_sec"` + WriteBytesSec float64 `json:"write_bytes_sec"` + OpPerSec *float64 `json:"op_per_sec"` // This field is no longer reported in ceph 10 and later + ReadOpPerSec float64 `json:"read_op_per_sec"` + WriteOpPerSec float64 `json:"write_op_per_sec"` } `json:"pgmap"` } @@ -423,16 +422,21 @@ func decodeStatusPgmapState(acc telegraf.Accumulator, data *CephStatus) error { // CephDF is used to unmarshal 'ceph df' output type CephDf struct { Stats struct { - TotalSpace float64 `json:"total_space"` - TotalUsed float64 `json:"total_used"` - TotalAvail float64 `json:"total_avail"` + TotalSpace *float64 `json:"total_space"` // pre ceph 0.84 + TotalUsed *float64 `json:"total_used"` // pre ceph 0.84 + TotalAvail *float64 `json:"total_avail"` // pre ceph 0.84 + TotalBytes *float64 `json:"total_bytes"` + TotalUsedBytes *float64 `json:"total_used_bytes"` + TotalAvailBytes *float64 `json:"total_avail_bytes"` } `json:"stats"` Pools []struct { Name string `json:"name"` Stats struct { - KBUsed float64 `json:"kb_used"` - BytesUsed float64 `json:"bytes_used"` - Objects float64 `json:"objects"` + KBUsed float64 `json:"kb_used"` + BytesUsed float64 `json:"bytes_used"` + Objects float64 `json:"objects"` + PercentUsed *float64 `json:"percent_used"` + MaxAvail *float64 `json:"max_avail"` } `json:"stats"` } `json:"pools"` } @@ -446,9 +450,12 @@ func decodeDf(acc telegraf.Accumulator, input string) error { // ceph.usage: records global utilization and number of objects fields := map[string]interface{}{ - "total_space": data.Stats.TotalSpace, - "total_used": data.Stats.TotalUsed, - "total_avail": data.Stats.TotalAvail, + "total_space": data.Stats.TotalSpace, + "total_used": data.Stats.TotalUsed, + "total_avail": data.Stats.TotalAvail, + "total_bytes": data.Stats.TotalBytes, + "total_used_bytes": data.Stats.TotalUsedBytes, + "total_avail_bytes": data.Stats.TotalAvailBytes, } acc.AddFields("ceph_usage", fields, map[string]string{}) @@ -458,9 +465,11 @@ func decodeDf(acc telegraf.Accumulator, input string) error { "name": pool.Name, } fields := map[string]interface{}{ - "kb_used": pool.Stats.KBUsed, - "bytes_used": pool.Stats.BytesUsed, - "objects": pool.Stats.Objects, + "kb_used": pool.Stats.KBUsed, + "bytes_used": pool.Stats.BytesUsed, + "objects": pool.Stats.Objects, + "percent_used": pool.Stats.PercentUsed, + "max_avail": pool.Stats.MaxAvail, } acc.AddFields("ceph_pool_usage", fields, tags) } @@ -472,11 +481,11 @@ func decodeDf(acc telegraf.Accumulator, input string) error { type CephOSDPoolStats []struct { PoolName string `json:"pool_name"` ClientIORate struct { - ReadBytesSec float64 `json:"read_bytes_sec"` - WriteBytesSec float64 `json:"write_bytes_sec"` - OpPerSec float64 `json:"op_per_sec"` // This field is no longer reported in ceph 10 and later - ReadOpPerSec float64 `json:"read_op_per_sec"` - WriteOpPerSec float64 `json:"write_op_per_sec"` + ReadBytesSec float64 `json:"read_bytes_sec"` + WriteBytesSec float64 `json:"write_bytes_sec"` + OpPerSec *float64 `json:"op_per_sec"` // This field is no longer reported in ceph 10 and later + ReadOpPerSec float64 `json:"read_op_per_sec"` + WriteOpPerSec float64 `json:"write_op_per_sec"` } `json:"client_io_rate"` RecoveryRate struct { RecoveringObjectsPerSec float64 `json:"recovering_objects_per_sec"` diff --git a/plugins/inputs/ceph/ceph_test.go b/plugins/inputs/ceph/ceph_test.go index ee2f96491..6403d6994 100644 --- a/plugins/inputs/ceph/ceph_test.go +++ b/plugins/inputs/ceph/ceph_test.go @@ -874,7 +874,7 @@ var cephStatusResults = []expectedResult{ "bytes_total": float64(17335810048000), "read_bytes_sec": float64(0), "write_bytes_sec": float64(367217), - "op_per_sec": float64(98), + "op_per_sec": pf(98), "read_op_per_sec": float64(322), "write_op_per_sec": float64(1022), }, @@ -912,7 +912,10 @@ var cephStatusResults = []expectedResult{ var cephDFDump = ` { "stats": { "total_space": 472345880, "total_used": 71058504, - "total_avail": 377286864}, + "total_avail": 377286864, + "total_bytes": 472345880, + "total_used_bytes": 71058504, + "total_avail_bytes": 377286864}, "pools": [ { "name": "data", "id": 0, @@ -939,18 +942,23 @@ var cephDfResults = []expectedResult{ { metric: "ceph_usage", fields: map[string]interface{}{ - "total_space": float64(472345880), - "total_used": float64(71058504), - "total_avail": float64(377286864), + "total_space": pf(472345880), + "total_used": pf(71058504), + "total_avail": pf(377286864), + "total_bytes": pf(472345880), + "total_used_bytes": pf(71058504), + "total_avail_bytes": pf(377286864), }, tags: map[string]string{}, }, { metric: "ceph_pool_usage", fields: map[string]interface{}{ - "kb_used": float64(0), - "bytes_used": float64(0), - "objects": float64(0), + "kb_used": float64(0), + "bytes_used": float64(0), + "objects": float64(0), + "percent_used": (*float64)(nil), + "max_avail": (*float64)(nil), }, tags: map[string]string{ "name": "data", @@ -959,9 +967,11 @@ var cephDfResults = []expectedResult{ { metric: "ceph_pool_usage", fields: map[string]interface{}{ - "kb_used": float64(25), - "bytes_used": float64(25052), - "objects": float64(53), + "kb_used": float64(25), + "bytes_used": float64(25052), + "objects": float64(53), + "percent_used": (*float64)(nil), + "max_avail": (*float64)(nil), }, tags: map[string]string{ "name": "metadata", @@ -970,9 +980,11 @@ var cephDfResults = []expectedResult{ { metric: "ceph_pool_usage", fields: map[string]interface{}{ - "kb_used": float64(0), - "bytes_used": float64(0), - "objects": float64(0), + "kb_used": float64(0), + "bytes_used": float64(0), + "objects": float64(0), + "percent_used": (*float64)(nil), + "max_avail": (*float64)(nil), }, tags: map[string]string{ "name": "rbd", @@ -981,9 +993,11 @@ var cephDfResults = []expectedResult{ { metric: "ceph_pool_usage", fields: map[string]interface{}{ - "kb_used": float64(55476), - "bytes_used": float64(56806602), - "objects": float64(1), + "kb_used": float64(55476), + "bytes_used": float64(56806602), + "objects": float64(1), + "percent_used": (*float64)(nil), + "max_avail": (*float64)(nil), }, tags: map[string]string{ "name": "test", @@ -1028,7 +1042,7 @@ var cephOSDPoolStatsResults = []expectedResult{ fields: map[string]interface{}{ "read_bytes_sec": float64(0), "write_bytes_sec": float64(0), - "op_per_sec": float64(0), + "op_per_sec": (*float64)(nil), "read_op_per_sec": float64(0), "write_op_per_sec": float64(0), "recovering_objects_per_sec": float64(0), @@ -1044,7 +1058,7 @@ var cephOSDPoolStatsResults = []expectedResult{ fields: map[string]interface{}{ "read_bytes_sec": float64(10566067), "write_bytes_sec": float64(15165220376), - "op_per_sec": float64(9828), + "op_per_sec": pf(9828), "read_op_per_sec": float64(182), "write_op_per_sec": float64(473), "recovering_objects_per_sec": float64(279), @@ -1056,3 +1070,7 @@ var cephOSDPoolStatsResults = []expectedResult{ }, }, } + +func pf(i float64) *float64 { + return &i +} From 135b1d1a8eb3d8fffab3c7d3e12a7e6bbba2cc48 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 25 Feb 2019 15:34:11 -0800 Subject: [PATCH 0632/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 16e5cfca1..72c059e59 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -70,6 +70,7 @@ - [#5437](https://github.com/influxdata/telegraf/issues/5437): Host not added when using custom arguments in ping plugin. - [#5438](https://github.com/influxdata/telegraf/issues/5438): Fix InfluxDB output UDP line splitting. - [#5456](https://github.com/influxdata/telegraf/issues/5456): Disable results by row in azuredb query. +- [#5277](https://github.com/influxdata/telegraf/issues/5456): Add backwards compatibility fields in ceph usage and pool stats. ## v1.9.4 [2019-02-05] From 5253cbfa010361c26a7073abb3872246615bd9aa Mon Sep 17 00:00:00 2001 From: Greg <2653109+glinton@users.noreply.github.com> Date: Mon, 25 Feb 2019 17:02:30 -0700 Subject: [PATCH 0633/1815] Add ceph_health metrics to ceph input (#5482) --- plugins/inputs/ceph/README.md | 6 ++++++ plugins/inputs/ceph/ceph.go | 15 +++++++++++++++ 2 files changed, 21 insertions(+) diff --git a/plugins/inputs/ceph/README.md b/plugins/inputs/ceph/README.md index 88ca4aff7..33585b079 100644 --- a/plugins/inputs/ceph/README.md +++ b/plugins/inputs/ceph/README.md @@ -136,6 +136,11 @@ All admin measurements will have the following tags: *Cluster Stats* ++ ceph_health + - fields: + - status + - overall_status + - ceph_osdmap - fields: - epoch (float) @@ -209,6 +214,7 @@ ceph_pool_usage,name=telegraf kb_used=0,bytes_used=0,objects=0 15506589110000000 ceph_pgmap_state,state=undersized+peered count=30 1550658910000000000 ceph_pgmap bytes_total=10733223936,read_op_per_sec=0,write_op_per_sec=0,num_pgs=30,data_bytes=0,bytes_avail=9654697984,read_bytes_sec=0,write_bytes_sec=0,version=0,bytes_used=1078525952 1550658910000000000 ceph_osdmap num_up_osds=1,num_in_osds=1,full=false,nearfull=false,num_remapped_pgs=0,epoch=34,num_osds=1 1550658910000000000 +ceph_health status="HEALTH_WARN",overall_status="HEALTH_WARN" 1550658910000000000 ``` *Admin Socket Stats* diff --git a/plugins/inputs/ceph/ceph.go b/plugins/inputs/ceph/ceph.go index d3911102d..e28f977d2 100644 --- a/plugins/inputs/ceph/ceph.go +++ b/plugins/inputs/ceph/ceph.go @@ -319,6 +319,10 @@ func (c *Ceph) exec(command string) (string, error) { // CephStatus is used to unmarshal "ceph -s" output type CephStatus struct { + Health struct { + Status string `json:"status"` + OverallStatus string `json:"overall_status"` + } `json:"health"` OSDMap struct { OSDMap struct { Epoch float64 `json:"epoch"` @@ -357,6 +361,7 @@ func decodeStatus(acc telegraf.Accumulator, input string) error { } decoders := []func(telegraf.Accumulator, *CephStatus) error{ + decodeStatusHealth, decodeStatusOsdmap, decodeStatusPgmap, decodeStatusPgmapState, @@ -371,6 +376,16 @@ func decodeStatus(acc telegraf.Accumulator, input string) error { return nil } +// decodeStatusHealth decodes the health portion of the output of 'ceph status' +func decodeStatusHealth(acc telegraf.Accumulator, data *CephStatus) error { + fields := map[string]interface{}{ + "status": data.Health.Status, + "overall_status": data.Health.OverallStatus, + } + acc.AddFields("ceph_health", fields, map[string]string{}) + return nil +} + // decodeStatusOsdmap decodes the OSD map portion of the output of 'ceph -s' func decodeStatusOsdmap(acc telegraf.Accumulator, data *CephStatus) error { fields := map[string]interface{}{ From 8da6846e5373b1c7fff21dc6434cd1f27b13d59b Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 25 Feb 2019 16:04:10 -0800 Subject: [PATCH 0634/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 72c059e59..6fa79c6a0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -49,6 +49,7 @@ - [#5417](https://github.com/influxdata/telegraf/pull/5417): Add rcode tag and field to dns_query input. - [#5453](https://github.com/influxdata/telegraf/pull/5453): Support Azure Sovereign Environments with endpoint_url option. - [#5472](https://github.com/influxdata/telegraf/pull/5472): Support configuring a default timezone in JSON parser. +- [#5482](https://github.com/influxdata/telegraf/pull/5482): Add ceph_health metrics to ceph input. #### Bugfixes From a85833ae53ea08e3eb72989209c1012a07cf878f Mon Sep 17 00:00:00 2001 From: Max Eshleman Date: Tue, 26 Feb 2019 11:34:50 -0700 Subject: [PATCH 0635/1815] replace gomega with require in prometheus output client tests Signed-off-by: Robert Sullivan --- Gopkg.lock | 22 ------- .../prometheus_client_tls_test.go | 61 ++++++------------- 2 files changed, 18 insertions(+), 65 deletions(-) diff --git a/Gopkg.lock b/Gopkg.lock index c3c980f65..c07c288c0 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -821,27 +821,6 @@ revision = "eee57a3ac4174c55924125bb15eeeda8cffb6e6f" version = "v1.0.7" -[[projects]] - digest = "1:c8f0c8c28c9c1c51db72d0e7f04797cfe5d0d50528274099b6b2d6c314db7f97" - name = "github.com/onsi/gomega" - packages = [ - ".", - "format", - "internal/assertion", - "internal/asyncassertion", - "internal/oraclematcher", - "internal/testingtsupport", - "matchers", - "matchers/support/goraph/bipartitegraph", - "matchers/support/goraph/edge", - "matchers/support/goraph/node", - "matchers/support/goraph/util", - "types", - ] - pruneopts = "" - revision = "65fb64232476ad9046e57c26cd0bff3d3a8dc6cd" - version = "v1.4.3" - [[projects]] digest = "1:5d9b668b0b4581a978f07e7d2e3314af18eb27b3fb5d19b70185b7c575723d11" name = "github.com/opencontainers/go-digest" @@ -1589,7 +1568,6 @@ "github.com/nats-io/gnatsd/server", "github.com/nats-io/go-nats", "github.com/nsqio/go-nsq", - "github.com/onsi/gomega", "github.com/openzipkin/zipkin-go-opentracing", "github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore", "github.com/prometheus/client_golang/prometheus", diff --git a/plugins/outputs/prometheus_client/prometheus_client_tls_test.go b/plugins/outputs/prometheus_client/prometheus_client_tls_test.go index 4658fcdb4..d7484d61f 100644 --- a/plugins/outputs/prometheus_client/prometheus_client_tls_test.go +++ b/plugins/outputs/prometheus_client/prometheus_client_tls_test.go @@ -6,7 +6,7 @@ import ( "github.com/influxdata/telegraf/plugins/outputs/prometheus_client" "github.com/influxdata/telegraf/testutil" "github.com/influxdata/toml" - . "github.com/onsi/gomega" + "github.com/stretchr/testify/require" "net/http" "testing" ) @@ -28,8 +28,6 @@ type PrometheusClientTestContext struct { Output *prometheus_client.PrometheusClient Accumulator *testutil.Accumulator Client *http.Client - - *GomegaWithT } func TestWorksWithoutTLS(t *testing.T) { @@ -37,47 +35,29 @@ func TestWorksWithoutTLS(t *testing.T) { err := tc.Output.Connect() defer tc.Output.Close() - if err != nil { - panic(err) - } + require.NoError(t, err) - var response *http.Response - tc.Eventually(func() bool { - response, err = tc.Client.Get("http://localhost:9090/metrics") - return err == nil - }, "5s").Should(BeTrue()) + response, err := tc.Client.Get("http://localhost:9090/metrics") + require.NoError(t, err) - if err != nil { - panic(err) - } - - tc.Expect(response.StatusCode).To(Equal(http.StatusOK)) + require.NoError(t, err) + require.Equal(t, response.StatusCode, http.StatusOK) } func TestWorksWithTLS(t *testing.T) { tc := buildTestContext(t, []byte(configWithTLS)) err := tc.Output.Connect() defer tc.Output.Close() + require.NoError(t, err) - if err != nil { - panic(err) - } + response, err := tc.Client.Get("https://localhost:9090/metrics") + require.NoError(t, err) - var response *http.Response - tc.Eventually(func() bool { - response, err = tc.Client.Get("https://localhost:9090/metrics") - return err == nil - }, "5s").Should(BeTrue()) - - if err != nil { - panic(err) - } - - tc.Expect(response.StatusCode).To(Equal(http.StatusOK)) + require.NoError(t, err) + require.Equal(t, response.StatusCode, http.StatusOK) response, err = tc.Client.Get("http://localhost:9090/metrics") - - tc.Expect(err).To(HaveOccurred()) + require.Error(t, err) tr := &http.Transport{ TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, @@ -86,23 +66,20 @@ func TestWorksWithTLS(t *testing.T) { client := &http.Client{Transport: tr} response, err = client.Get("https://localhost:9090/metrics") - tc.Expect(err).To(HaveOccurred()) + require.Error(t, err) } func buildTestContext(t *testing.T, config []byte) *PrometheusClientTestContext { output := prometheus_client.NewClient() err := toml.Unmarshal(config, output) - - if err != nil { - panic(err) - } + require.NoError(t, err) var ( httpClient *http.Client ) if len(output.TLSAllowedCACerts) != 0 { - httpClient = buildClientWithTLS(output) + httpClient = buildClientWithTLS(t, output) } else { httpClient = buildClientWithoutTLS() } @@ -111,7 +88,6 @@ func buildTestContext(t *testing.T, config []byte) *PrometheusClientTestContext Output: output, Accumulator: &testutil.Accumulator{}, Client: httpClient, - GomegaWithT: NewGomegaWithT(t), } } @@ -119,11 +95,10 @@ func buildClientWithoutTLS() *http.Client { return &http.Client{} } -func buildClientWithTLS(output *prometheus_client.PrometheusClient) *http.Client { +func buildClientWithTLS(t *testing.T, output *prometheus_client.PrometheusClient) *http.Client { tlsConfig, err := pki.TLSClientConfig().TLSConfig() - if err != nil { - panic(err) - } + require.NoError(t, err) + transport := &http.Transport{TLSClientConfig: tlsConfig} return &http.Client{Transport: transport} } From b34ad9efc4caae6d902112b91ccebfc5c312e083 Mon Sep 17 00:00:00 2001 From: Max Eshleman Date: Tue, 26 Feb 2019 11:46:49 -0700 Subject: [PATCH 0636/1815] update sample config for prometheus output client Signed-off-by: Robert Sullivan --- plugins/outputs/prometheus_client/README.md | 8 +++----- plugins/outputs/prometheus_client/prometheus_client.go | 8 +++----- 2 files changed, 6 insertions(+), 10 deletions(-) diff --git a/plugins/outputs/prometheus_client/README.md b/plugins/outputs/prometheus_client/README.md index c2f097fbd..d1b4a1b0e 100644 --- a/plugins/outputs/prometheus_client/README.md +++ b/plugins/outputs/prometheus_client/README.md @@ -36,11 +36,9 @@ This plugin starts a [Prometheus](https://prometheus.io/) Client, it exposes all # tls_cert = "/etc/ssl/telegraf.crt" # tls_key = "/etc/ssl/telegraf.key" - ## If set, enable TLS client authentication with the given CA. - # tls_ca = "/etc/ssl/telegraf_ca.crt" - - ## Boolean value indicating whether or not to skip SSL verification - # insecure_skip_verify = false + ## Set one or more allowed client CA certificate file names to + ## enable mutually authenticated TLS connections + # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] ## Export metric collection time. # export_timestamp = false diff --git a/plugins/outputs/prometheus_client/prometheus_client.go b/plugins/outputs/prometheus_client/prometheus_client.go index c2af6f655..b37718ab7 100644 --- a/plugins/outputs/prometheus_client/prometheus_client.go +++ b/plugins/outputs/prometheus_client/prometheus_client.go @@ -107,11 +107,9 @@ var sampleConfig = ` # tls_cert = "/etc/ssl/telegraf.crt" # tls_key = "/etc/ssl/telegraf.key" - ## If set, enable TLS client authentication with the given CA. - # tls_ca = "/etc/ssl/telegraf_ca.crt" - - ## Boolean value indicating whether or not to skip SSL verification - # insecure_skip_verify = false + ## Set one or more allowed client CA certificate file names to + ## enable mutually authenticated TLS connections + # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] ## Export metric collection time. # export_timestamp = false From 04f3c4321c843f9e9a63b229b0c0f413b2a1db3b Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 26 Feb 2019 10:48:41 -0800 Subject: [PATCH 0637/1815] Fix several influx parser issues (#5484) - Add line/column position - Allow handlers to return errors - Fix tag value escaping - Allow newline in string fields --- plugins/parsers/influx/handler.go | 64 +- plugins/parsers/influx/machine.go | 41369 ++++++++++++-------- plugins/parsers/influx/machine.go.rl | 255 +- plugins/parsers/influx/machine_test.go | 827 +- plugins/parsers/influx/parser.go | 44 +- plugins/parsers/influx/parser_test.go | 168 +- plugins/serializers/influx/escape.go | 4 - plugins/serializers/influx/influx_test.go | 2 +- 8 files changed, 25417 insertions(+), 17316 deletions(-) diff --git a/plugins/parsers/influx/handler.go b/plugins/parsers/influx/handler.go index af7445a53..c488a9c98 100644 --- a/plugins/parsers/influx/handler.go +++ b/plugins/parsers/influx/handler.go @@ -2,16 +2,17 @@ package influx import ( "bytes" + "errors" + "strconv" "time" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" - "github.com/prometheus/common/log" ) type MetricHandler struct { builder *metric.Builder - metrics []telegraf.Metric + err error precision time.Duration } @@ -32,75 +33,88 @@ func (h *MetricHandler) SetTimePrecision(precision time.Duration) { } func (h *MetricHandler) Metric() (telegraf.Metric, error) { - return h.builder.Metric() + m, err := h.builder.Metric() + h.builder.Reset() + return m, err } -func (h *MetricHandler) SetMeasurement(name []byte) { +func (h *MetricHandler) SetMeasurement(name []byte) error { h.builder.SetName(nameUnescape(name)) + return nil } -func (h *MetricHandler) AddTag(key []byte, value []byte) { +func (h *MetricHandler) AddTag(key []byte, value []byte) error { tk := unescape(key) tv := unescape(value) h.builder.AddTag(tk, tv) + return nil } -func (h *MetricHandler) AddInt(key []byte, value []byte) { +func (h *MetricHandler) AddInt(key []byte, value []byte) error { fk := unescape(key) fv, err := parseIntBytes(bytes.TrimSuffix(value, []byte("i")), 10, 64) if err != nil { - log.Errorf("E! Received unparseable int value: %q: %v", value, err) - return + if numerr, ok := err.(*strconv.NumError); ok { + return numerr.Err + } + return err } h.builder.AddField(fk, fv) + return nil } -func (h *MetricHandler) AddUint(key []byte, value []byte) { +func (h *MetricHandler) AddUint(key []byte, value []byte) error { fk := unescape(key) fv, err := parseUintBytes(bytes.TrimSuffix(value, []byte("u")), 10, 64) if err != nil { - log.Errorf("E! Received unparseable uint value: %q: %v", value, err) - return + if numerr, ok := err.(*strconv.NumError); ok { + return numerr.Err + } + return err } h.builder.AddField(fk, fv) + return nil } -func (h *MetricHandler) AddFloat(key []byte, value []byte) { +func (h *MetricHandler) AddFloat(key []byte, value []byte) error { fk := unescape(key) fv, err := parseFloatBytes(value, 64) if err != nil { - log.Errorf("E! Received unparseable float value: %q: %v", value, err) - return + if numerr, ok := err.(*strconv.NumError); ok { + return numerr.Err + } + return err } h.builder.AddField(fk, fv) + return nil } -func (h *MetricHandler) AddString(key []byte, value []byte) { +func (h *MetricHandler) AddString(key []byte, value []byte) error { fk := unescape(key) fv := stringFieldUnescape(value) h.builder.AddField(fk, fv) + return nil } -func (h *MetricHandler) AddBool(key []byte, value []byte) { +func (h *MetricHandler) AddBool(key []byte, value []byte) error { fk := unescape(key) fv, err := parseBoolBytes(value) if err != nil { - log.Errorf("E! Received unparseable boolean value: %q: %v", value, err) - return + return errors.New("unparseable bool") } h.builder.AddField(fk, fv) + return nil } -func (h *MetricHandler) SetTimestamp(tm []byte) { +func (h *MetricHandler) SetTimestamp(tm []byte) error { v, err := parseIntBytes(tm, 10, 64) if err != nil { - log.Errorf("E! Received unparseable timestamp: %q: %v", tm, err) - return + if numerr, ok := err.(*strconv.NumError); ok { + return numerr.Err + } + return err } ns := v * int64(h.precision) h.builder.SetTime(time.Unix(0, ns)) -} - -func (h *MetricHandler) Reset() { - h.builder.Reset() + return nil } diff --git a/plugins/parsers/influx/machine.go b/plugins/parsers/influx/machine.go index fef7e2d38..b185eeabe 100644 --- a/plugins/parsers/influx/machine.go +++ b/plugins/parsers/influx/machine.go @@ -12,35 +12,36 @@ var ( ErrTagParse = errors.New("expected tag") ErrTimestampParse = errors.New("expected timestamp") ErrParse = errors.New("parse error") + EOF = errors.New("EOF") ) -//line plugins/parsers/influx/machine.go.rl:226 +//line plugins/parsers/influx/machine.go.rl:304 -//line plugins/parsers/influx/machine.go:23 -const LineProtocol_start int = 1 -const LineProtocol_first_final int = 206 +//line plugins/parsers/influx/machine.go:24 +const LineProtocol_start int = 259 +const LineProtocol_first_final int = 259 const LineProtocol_error int = 0 -const LineProtocol_en_main int = 1 -const LineProtocol_en_discard_line int = 195 -const LineProtocol_en_align int = 196 -const LineProtocol_en_series int = 199 +const LineProtocol_en_main int = 259 +const LineProtocol_en_discard_line int = 247 +const LineProtocol_en_align int = 715 +const LineProtocol_en_series int = 250 -//line plugins/parsers/influx/machine.go.rl:229 +//line plugins/parsers/influx/machine.go.rl:307 type Handler interface { - SetMeasurement(name []byte) - AddTag(key []byte, value []byte) - AddInt(key []byte, value []byte) - AddUint(key []byte, value []byte) - AddFloat(key []byte, value []byte) - AddString(key []byte, value []byte) - AddBool(key []byte, value []byte) - SetTimestamp(tm []byte) + SetMeasurement(name []byte) error + AddTag(key []byte, value []byte) error + AddInt(key []byte, value []byte) error + AddUint(key []byte, value []byte) error + AddFloat(key []byte, value []byte) error + AddString(key []byte, value []byte) error + AddBool(key []byte, value []byte) error + SetTimestamp(tm []byte) error } type machine struct { @@ -48,9 +49,10 @@ type machine struct { cs int p, pe, eof int pb int + lineno int + sol int handler Handler initState int - err error } func NewMachine(handler Handler) *machine { @@ -60,22 +62,24 @@ func NewMachine(handler Handler) *machine { } -//line plugins/parsers/influx/machine.go.rl:258 +//line plugins/parsers/influx/machine.go.rl:337 -//line plugins/parsers/influx/machine.go.rl:259 +//line plugins/parsers/influx/machine.go.rl:338 -//line plugins/parsers/influx/machine.go.rl:260 +//line plugins/parsers/influx/machine.go.rl:339 -//line plugins/parsers/influx/machine.go.rl:261 +//line plugins/parsers/influx/machine.go.rl:340 -//line plugins/parsers/influx/machine.go.rl:262 +//line plugins/parsers/influx/machine.go.rl:341 -//line plugins/parsers/influx/machine.go:74 +//line plugins/parsers/influx/machine.go.rl:342 + +//line plugins/parsers/influx/machine.go:78 { - m.cs = LineProtocol_start + ( m.cs) = LineProtocol_start } -//line plugins/parsers/influx/machine.go.rl:263 +//line plugins/parsers/influx/machine.go.rl:343 return m } @@ -87,22 +91,22 @@ func NewSeriesMachine(handler Handler) *machine { } -//line plugins/parsers/influx/machine.go.rl:274 +//line plugins/parsers/influx/machine.go.rl:354 -//line plugins/parsers/influx/machine.go.rl:275 +//line plugins/parsers/influx/machine.go.rl:355 -//line plugins/parsers/influx/machine.go.rl:276 +//line plugins/parsers/influx/machine.go.rl:356 -//line plugins/parsers/influx/machine.go.rl:277 +//line plugins/parsers/influx/machine.go.rl:357 -//line plugins/parsers/influx/machine.go.rl:278 +//line plugins/parsers/influx/machine.go.rl:358 -//line plugins/parsers/influx/machine.go:101 +//line plugins/parsers/influx/machine.go:105 { - m.cs = LineProtocol_start + ( m.cs) = LineProtocol_start } -//line plugins/parsers/influx/machine.go.rl:279 +//line plugins/parsers/influx/machine.go.rl:359 return m } @@ -111,34 +115,36 @@ func (m *machine) SetData(data []byte) { m.data = data m.p = 0 m.pb = 0 + m.lineno = 1 + m.sol = 0 m.pe = len(data) m.eof = len(data) - m.err = nil -//line plugins/parsers/influx/machine.go:120 +//line plugins/parsers/influx/machine.go:125 { - m.cs = LineProtocol_start + ( m.cs) = LineProtocol_start } -//line plugins/parsers/influx/machine.go.rl:292 +//line plugins/parsers/influx/machine.go.rl:373 m.cs = m.initState } -// ParseLine parses a line of input and returns true if more data can be -// parsed. -func (m *machine) ParseLine() bool { - if m.data == nil || m.p >= m.pe { - m.err = nil - return false +// Next parses the next metric line and returns nil if it was successfully +// processed. If the line contains a syntax error an error is returned, +// otherwise if the end of file is reached before finding a metric line then +// EOF is returned. +func (m *machine) Next() error { + if m.p == m.pe && m.pe == m.eof { + return EOF } - m.err = nil + var err error var key []byte - var yield bool + foundMetric := false -//line plugins/parsers/influx/machine.go:142 +//line plugins/parsers/influx/machine.go:148 { if ( m.p) == ( m.pe) { goto _test_eof @@ -146,71 +152,33 @@ func (m *machine) ParseLine() bool { goto _resume _again: - switch m.cs { + switch ( m.cs) { + case 259: + goto st259 case 1: goto st1 case 2: goto st2 case 3: goto st3 - case 4: - goto st4 case 0: goto st0 + case 4: + goto st4 case 5: goto st5 case 6: goto st6 case 7: goto st7 - case 206: - goto st206 - case 207: - goto st207 - case 208: - goto st208 case 8: goto st8 - case 209: - goto st209 - case 210: - goto st210 - case 211: - goto st211 - case 212: - goto st212 - case 213: - goto st213 - case 214: - goto st214 - case 215: - goto st215 - case 216: - goto st216 - case 217: - goto st217 - case 218: - goto st218 - case 219: - goto st219 - case 220: - goto st220 - case 221: - goto st221 - case 222: - goto st222 - case 223: - goto st223 - case 224: - goto st224 - case 225: - goto st225 - case 226: - goto st226 - case 227: - goto st227 - case 228: - goto st228 + case 260: + goto st260 + case 261: + goto st261 + case 262: + goto st262 case 9: goto st9 case 10: @@ -221,54 +189,26 @@ _again: goto st12 case 13: goto st13 - case 229: - goto st229 case 14: goto st14 case 15: goto st15 - case 230: - goto st230 - case 231: - goto st231 - case 232: - goto st232 - case 233: - goto st233 - case 234: - goto st234 - case 235: - goto st235 - case 236: - goto st236 - case 237: - goto st237 - case 238: - goto st238 case 16: goto st16 case 17: goto st17 case 18: goto st18 - case 239: - goto st239 case 19: goto st19 case 20: goto st20 case 21: goto st21 - case 240: - goto st240 case 22: goto st22 case 23: goto st23 - case 241: - goto st241 - case 242: - goto st242 case 24: goto st24 case 25: @@ -289,80 +229,22 @@ _again: goto st32 case 33: goto st33 - case 34: - goto st34 - case 35: - goto st35 - case 36: - goto st36 - case 37: - goto st37 - case 38: - goto st38 - case 39: - goto st39 - case 40: - goto st40 - case 41: - goto st41 - case 42: - goto st42 - case 243: - goto st243 - case 244: - goto st244 - case 43: - goto st43 - case 245: - goto st245 - case 246: - goto st246 - case 247: - goto st247 - case 248: - goto st248 - case 249: - goto st249 - case 250: - goto st250 - case 251: - goto st251 - case 252: - goto st252 - case 253: - goto st253 - case 254: - goto st254 - case 255: - goto st255 - case 256: - goto st256 - case 257: - goto st257 - case 258: - goto st258 - case 259: - goto st259 - case 260: - goto st260 - case 261: - goto st261 - case 262: - goto st262 case 263: goto st263 case 264: goto st264 - case 44: - goto st44 + case 34: + goto st34 + case 35: + goto st35 case 265: goto st265 case 266: goto st266 - case 45: - goto st45 case 267: goto st267 + case 36: + goto st36 case 268: goto st268 case 269: @@ -399,40 +281,36 @@ _again: goto st284 case 285: goto st285 + case 37: + goto st37 + case 38: + goto st38 case 286: goto st286 - case 46: - goto st46 - case 47: - goto st47 - case 48: - goto st48 case 287: goto st287 - case 49: - goto st49 - case 50: - goto st50 - case 51: - goto st51 - case 52: - goto st52 - case 53: - goto st53 case 288: goto st288 - case 54: - goto st54 + case 39: + goto st39 + case 40: + goto st40 + case 41: + goto st41 + case 42: + goto st42 + case 43: + goto st43 case 289: goto st289 - case 55: - goto st55 case 290: goto st290 case 291: goto st291 case 292: goto st292 + case 44: + goto st44 case 293: goto st293 case 294: @@ -445,42 +323,16 @@ _again: goto st297 case 298: goto st298 - case 56: - goto st56 - case 57: - goto st57 - case 58: - goto st58 case 299: goto st299 - case 59: - goto st59 - case 60: - goto st60 - case 61: - goto st61 case 300: goto st300 - case 62: - goto st62 - case 63: - goto st63 case 301: goto st301 case 302: goto st302 - case 64: - goto st64 - case 65: - goto st65 - case 66: - goto st66 case 303: goto st303 - case 67: - goto st67 - case 68: - goto st68 case 304: goto st304 case 305: @@ -499,52 +351,56 @@ _again: goto st311 case 312: goto st312 - case 69: - goto st69 - case 70: - goto st70 - case 71: - goto st71 case 313: goto st313 - case 72: - goto st72 - case 73: - goto st73 - case 74: - goto st74 case 314: goto st314 - case 75: - goto st75 - case 76: - goto st76 + case 45: + goto st45 + case 46: + goto st46 + case 47: + goto st47 + case 48: + goto st48 + case 49: + goto st49 + case 50: + goto st50 + case 51: + goto st51 + case 52: + goto st52 + case 53: + goto st53 + case 54: + goto st54 case 315: goto st315 case 316: goto st316 - case 77: - goto st77 - case 78: - goto st78 - case 79: - goto st79 - case 80: - goto st80 - case 81: - goto st81 - case 82: - goto st82 case 317: goto st317 + case 55: + goto st55 + case 56: + goto st56 + case 57: + goto st57 + case 58: + goto st58 + case 59: + goto st59 + case 60: + goto st60 case 318: goto st318 case 319: goto st319 + case 61: + goto st61 case 320: goto st320 - case 83: - goto st83 case 321: goto st321 case 322: @@ -553,8 +409,6 @@ _again: goto st323 case 324: goto st324 - case 84: - goto st84 case 325: goto st325 case 326: @@ -585,44 +439,20 @@ _again: goto st338 case 339: goto st339 + case 62: + goto st62 case 340: goto st340 case 341: goto st341 case 342: goto st342 - case 85: - goto st85 - case 86: - goto st86 - case 87: - goto st87 - case 88: - goto st88 - case 89: - goto st89 - case 90: - goto st90 - case 91: - goto st91 - case 92: - goto st92 - case 93: - goto st93 - case 94: - goto st94 - case 95: - goto st95 - case 96: - goto st96 - case 97: - goto st97 + case 63: + goto st63 case 343: goto st343 case 344: goto st344 - case 98: - goto st98 case 345: goto st345 case 346: @@ -659,26 +489,48 @@ _again: goto st361 case 362: goto st362 + case 64: + goto st64 + case 65: + goto st65 + case 66: + goto st66 + case 67: + goto st67 + case 68: + goto st68 case 363: goto st363 + case 69: + goto st69 + case 70: + goto st70 + case 71: + goto st71 + case 72: + goto st72 + case 73: + goto st73 case 364: goto st364 - case 99: - goto st99 - case 100: - goto st100 case 365: goto st365 case 366: goto st366 - case 101: - goto st101 + case 74: + goto st74 + case 75: + goto st75 case 367: goto st367 case 368: goto st368 + case 76: + goto st76 case 369: goto st369 + case 77: + goto st77 case 370: goto st370 case 371: @@ -713,28 +565,40 @@ _again: goto st385 case 386: goto st386 - case 102: - goto st102 case 387: goto st387 case 388: goto st388 - case 103: - goto st103 - case 104: - goto st104 - case 105: - goto st105 - case 106: - goto st106 - case 107: - goto st107 case 389: goto st389 - case 108: - goto st108 - case 109: - goto st109 + case 78: + goto st78 + case 79: + goto st79 + case 80: + goto st80 + case 81: + goto st81 + case 82: + goto st82 + case 83: + goto st83 + case 84: + goto st84 + case 85: + goto st85 + case 86: + goto st86 + case 87: + goto st87 + case 88: + goto st88 + case 89: + goto st89 + case 90: + goto st90 + case 91: + goto st91 case 390: goto st390 case 391: @@ -743,72 +607,52 @@ _again: goto st392 case 393: goto st393 + case 92: + goto st92 + case 93: + goto st93 + case 94: + goto st94 + case 95: + goto st95 case 394: goto st394 case 395: goto st395 + case 96: + goto st96 + case 97: + goto st97 case 396: goto st396 + case 98: + goto st98 + case 99: + goto st99 case 397: goto st397 case 398: goto st398 - case 110: - goto st110 - case 111: - goto st111 - case 112: - goto st112 + case 100: + goto st100 case 399: goto st399 - case 113: - goto st113 - case 114: - goto st114 - case 115: - goto st115 case 400: goto st400 - case 116: - goto st116 - case 117: - goto st117 + case 101: + goto st101 + case 102: + goto st102 case 401: goto st401 case 402: goto st402 - case 118: - goto st118 - case 119: - goto st119 - case 120: - goto st120 - case 121: - goto st121 - case 122: - goto st122 - case 123: - goto st123 - case 124: - goto st124 - case 125: - goto st125 - case 126: - goto st126 - case 127: - goto st127 - case 128: - goto st128 - case 129: - goto st129 case 403: goto st403 case 404: goto st404 case 405: goto st405 - case 130: - goto st130 case 406: goto st406 case 407: @@ -835,26 +679,32 @@ _again: goto st417 case 418: goto st418 + case 103: + goto st103 case 419: goto st419 case 420: goto st420 case 421: goto st421 + case 104: + goto st104 + case 105: + goto st105 case 422: goto st422 case 423: goto st423 case 424: goto st424 + case 106: + goto st106 case 425: goto st425 case 426: goto st426 case 427: goto st427 - case 131: - goto st131 case 428: goto st428 case 429: @@ -863,8 +713,6 @@ _again: goto st430 case 431: goto st431 - case 132: - goto st132 case 432: goto st432 case 433: @@ -891,6 +739,8 @@ _again: goto st443 case 444: goto st444 + case 107: + goto st107 case 445: goto st445 case 446: @@ -905,18 +755,10 @@ _again: goto st450 case 451: goto st451 - case 133: - goto st133 - case 134: - goto st134 - case 135: - goto st135 case 452: goto st452 case 453: goto st453 - case 136: - goto st136 case 454: goto st454 case 455: @@ -943,12 +785,26 @@ _again: goto st465 case 466: goto st466 + case 108: + goto st108 + case 109: + goto st109 + case 110: + goto st110 + case 111: + goto st111 + case 112: + goto st112 case 467: goto st467 + case 113: + goto st113 case 468: goto st468 case 469: goto st469 + case 114: + goto st114 case 470: goto st470 case 471: @@ -957,34 +813,56 @@ _again: goto st472 case 473: goto st473 - case 137: - goto st137 case 474: goto st474 case 475: goto st475 case 476: goto st476 - case 138: - goto st138 case 477: goto st477 case 478: goto st478 + case 115: + goto st115 + case 116: + goto st116 + case 117: + goto st117 case 479: goto st479 + case 118: + goto st118 + case 119: + goto st119 + case 120: + goto st120 case 480: goto st480 + case 121: + goto st121 + case 122: + goto st122 case 481: goto st481 case 482: goto st482 + case 123: + goto st123 + case 124: + goto st124 + case 125: + goto st125 + case 126: + goto st126 case 483: goto st483 case 484: goto st484 case 485: goto st485 + case 127: + goto st127 case 486: goto st486 case 487: @@ -1011,8 +889,6 @@ _again: goto st497 case 498: goto st498 - case 139: - goto st139 case 499: goto st499 case 500: @@ -1027,6 +903,10 @@ _again: goto st504 case 505: goto st505 + case 128: + goto st128 + case 129: + goto st129 case 506: goto st506 case 507: @@ -1045,36 +925,48 @@ _again: goto st513 case 514: goto st514 + case 130: + goto st130 + case 131: + goto st131 + case 132: + goto st132 case 515: goto st515 + case 133: + goto st133 + case 134: + goto st134 + case 135: + goto st135 case 516: goto st516 + case 136: + goto st136 + case 137: + goto st137 case 517: goto st517 case 518: goto st518 + case 138: + goto st138 + case 139: + goto st139 + case 140: + goto st140 case 519: goto st519 case 520: goto st520 - case 140: - goto st140 case 141: goto st141 - case 142: - goto st142 - case 143: - goto st143 - case 144: - goto st144 case 521: goto st521 - case 145: - goto st145 + case 142: + goto st142 case 522: goto st522 - case 146: - goto st146 case 523: goto st523 case 524: @@ -1089,48 +981,40 @@ _again: goto st528 case 529: goto st529 + case 143: + goto st143 + case 144: + goto st144 + case 145: + goto st145 case 530: goto st530 - case 531: - goto st531 + case 146: + goto st146 case 147: goto st147 case 148: goto st148 + case 531: + goto st531 case 149: goto st149 - case 532: - goto st532 case 150: goto st150 - case 151: - goto st151 - case 152: - goto st152 + case 532: + goto st532 case 533: goto st533 - case 153: - goto st153 - case 154: - goto st154 case 534: goto st534 case 535: goto st535 - case 155: - goto st155 - case 156: - goto st156 - case 157: - goto st157 case 536: goto st536 case 537: goto st537 case 538: goto st538 - case 158: - goto st158 case 539: goto st539 case 540: @@ -1157,24 +1041,28 @@ _again: goto st550 case 551: goto st551 + case 151: + goto st151 + case 152: + goto st152 case 552: goto st552 case 553: goto st553 case 554: goto st554 + case 153: + goto st153 case 555: goto st555 case 556: goto st556 + case 154: + goto st154 case 557: goto st557 case 558: goto st558 - case 159: - goto st159 - case 160: - goto st160 case 559: goto st559 case 560: @@ -1193,50 +1081,28 @@ _again: goto st566 case 567: goto st567 - case 161: - goto st161 - case 162: - goto st162 - case 163: - goto st163 case 568: goto st568 - case 164: - goto st164 - case 165: - goto st165 - case 166: - goto st166 case 569: goto st569 - case 167: - goto st167 - case 168: - goto st168 case 570: goto st570 case 571: goto st571 - case 169: - goto st169 - case 170: - goto st170 - case 171: - goto st171 - case 172: - goto st172 case 572: goto st572 - case 173: - goto st173 case 573: goto st573 case 574: goto st574 - case 174: - goto st174 + case 155: + goto st155 + case 156: + goto st156 case 575: goto st575 + case 157: + goto st157 case 576: goto st576 case 577: @@ -1253,40 +1119,44 @@ _again: goto st582 case 583: goto st583 - case 175: - goto st175 - case 176: - goto st176 - case 177: - goto st177 + case 158: + goto st158 + case 159: + goto st159 + case 160: + goto st160 case 584: goto st584 - case 178: - goto st178 - case 179: - goto st179 - case 180: - goto st180 + case 161: + goto st161 + case 162: + goto st162 + case 163: + goto st163 case 585: goto st585 - case 181: - goto st181 - case 182: - goto st182 + case 164: + goto st164 + case 165: + goto st165 case 586: goto st586 case 587: goto st587 - case 183: - goto st183 - case 184: - goto st184 + case 166: + goto st166 + case 167: + goto st167 + case 168: + goto st168 + case 169: + goto st169 + case 170: + goto st170 + case 171: + goto st171 case 588: goto st588 - case 185: - goto st185 - case 186: - goto st186 case 589: goto st589 case 590: @@ -1303,137 +1173,461 @@ _again: goto st595 case 596: goto st596 - case 187: - goto st187 - case 188: - goto st188 - case 189: - goto st189 case 597: goto st597 - case 190: - goto st190 - case 191: - goto st191 - case 192: - goto st192 case 598: goto st598 - case 193: - goto st193 - case 194: - goto st194 case 599: goto st599 case 600: goto st600 - case 195: - goto st195 case 601: goto st601 - case 196: - goto st196 case 602: goto st602 case 603: goto st603 - case 197: - goto st197 - case 198: - goto st198 - case 199: - goto st199 case 604: goto st604 case 605: goto st605 case 606: goto st606 + case 172: + goto st172 + case 173: + goto st173 + case 174: + goto st174 + case 607: + goto st607 + case 608: + goto st608 + case 609: + goto st609 + case 175: + goto st175 + case 610: + goto st610 + case 611: + goto st611 + case 176: + goto st176 + case 612: + goto st612 + case 613: + goto st613 + case 614: + goto st614 + case 615: + goto st615 + case 616: + goto st616 + case 177: + goto st177 + case 178: + goto st178 + case 179: + goto st179 + case 617: + goto st617 + case 180: + goto st180 + case 181: + goto st181 + case 182: + goto st182 + case 618: + goto st618 + case 183: + goto st183 + case 184: + goto st184 + case 619: + goto st619 + case 620: + goto st620 + case 185: + goto st185 + case 621: + goto st621 + case 622: + goto st622 + case 186: + goto st186 + case 187: + goto st187 + case 188: + goto st188 + case 623: + goto st623 + case 189: + goto st189 + case 190: + goto st190 + case 624: + goto st624 + case 625: + goto st625 + case 626: + goto st626 + case 627: + goto st627 + case 628: + goto st628 + case 629: + goto st629 + case 630: + goto st630 + case 631: + goto st631 + case 191: + goto st191 + case 192: + goto st192 + case 193: + goto st193 + case 632: + goto st632 + case 194: + goto st194 + case 195: + goto st195 + case 196: + goto st196 + case 633: + goto st633 + case 197: + goto st197 + case 198: + goto st198 + case 634: + goto st634 + case 635: + goto st635 + case 199: + goto st199 case 200: goto st200 case 201: goto st201 + case 636: + goto st636 + case 637: + goto st637 + case 638: + goto st638 + case 639: + goto st639 + case 640: + goto st640 + case 641: + goto st641 + case 642: + goto st642 + case 643: + goto st643 + case 644: + goto st644 + case 645: + goto st645 + case 646: + goto st646 + case 647: + goto st647 + case 648: + goto st648 + case 649: + goto st649 + case 650: + goto st650 + case 651: + goto st651 + case 652: + goto st652 + case 653: + goto st653 + case 654: + goto st654 case 202: goto st202 - case 607: - goto st607 case 203: goto st203 case 204: goto st204 case 205: goto st205 + case 206: + goto st206 + case 655: + goto st655 + case 207: + goto st207 + case 208: + goto st208 + case 656: + goto st656 + case 657: + goto st657 + case 658: + goto st658 + case 659: + goto st659 + case 660: + goto st660 + case 661: + goto st661 + case 662: + goto st662 + case 663: + goto st663 + case 664: + goto st664 + case 209: + goto st209 + case 210: + goto st210 + case 211: + goto st211 + case 665: + goto st665 + case 212: + goto st212 + case 213: + goto st213 + case 214: + goto st214 + case 666: + goto st666 + case 215: + goto st215 + case 216: + goto st216 + case 667: + goto st667 + case 668: + goto st668 + case 217: + goto st217 + case 218: + goto st218 + case 219: + goto st219 + case 220: + goto st220 + case 669: + goto st669 + case 221: + goto st221 + case 222: + goto st222 + case 670: + goto st670 + case 671: + goto st671 + case 672: + goto st672 + case 673: + goto st673 + case 674: + goto st674 + case 675: + goto st675 + case 676: + goto st676 + case 677: + goto st677 + case 223: + goto st223 + case 224: + goto st224 + case 225: + goto st225 + case 678: + goto st678 + case 226: + goto st226 + case 227: + goto st227 + case 228: + goto st228 + case 679: + goto st679 + case 229: + goto st229 + case 230: + goto st230 + case 680: + goto st680 + case 681: + goto st681 + case 231: + goto st231 + case 232: + goto st232 + case 233: + goto st233 + case 682: + goto st682 + case 683: + goto st683 + case 684: + goto st684 + case 685: + goto st685 + case 686: + goto st686 + case 687: + goto st687 + case 688: + goto st688 + case 689: + goto st689 + case 690: + goto st690 + case 691: + goto st691 + case 692: + goto st692 + case 693: + goto st693 + case 694: + goto st694 + case 695: + goto st695 + case 696: + goto st696 + case 697: + goto st697 + case 698: + goto st698 + case 699: + goto st699 + case 700: + goto st700 + case 234: + goto st234 + case 235: + goto st235 + case 701: + goto st701 + case 236: + goto st236 + case 237: + goto st237 + case 702: + goto st702 + case 703: + goto st703 + case 704: + goto st704 + case 705: + goto st705 + case 706: + goto st706 + case 707: + goto st707 + case 708: + goto st708 + case 709: + goto st709 + case 238: + goto st238 + case 239: + goto st239 + case 240: + goto st240 + case 710: + goto st710 + case 241: + goto st241 + case 242: + goto st242 + case 243: + goto st243 + case 711: + goto st711 + case 244: + goto st244 + case 245: + goto st245 + case 712: + goto st712 + case 713: + goto st713 + case 246: + goto st246 + case 247: + goto st247 + case 714: + goto st714 + case 250: + goto st250 + case 717: + goto st717 + case 718: + goto st718 + case 251: + goto st251 + case 252: + goto st252 + case 253: + goto st253 + case 254: + goto st254 + case 719: + goto st719 + case 255: + goto st255 + case 720: + goto st720 + case 256: + goto st256 + case 257: + goto st257 + case 258: + goto st258 + case 715: + goto st715 + case 716: + goto st716 + case 248: + goto st248 + case 249: + goto st249 } if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof } _resume: - switch m.cs { + switch ( m.cs) { + case 259: + goto st_case_259 case 1: goto st_case_1 case 2: goto st_case_2 case 3: goto st_case_3 - case 4: - goto st_case_4 case 0: goto st_case_0 + case 4: + goto st_case_4 case 5: goto st_case_5 case 6: goto st_case_6 case 7: goto st_case_7 - case 206: - goto st_case_206 - case 207: - goto st_case_207 - case 208: - goto st_case_208 case 8: goto st_case_8 - case 209: - goto st_case_209 - case 210: - goto st_case_210 - case 211: - goto st_case_211 - case 212: - goto st_case_212 - case 213: - goto st_case_213 - case 214: - goto st_case_214 - case 215: - goto st_case_215 - case 216: - goto st_case_216 - case 217: - goto st_case_217 - case 218: - goto st_case_218 - case 219: - goto st_case_219 - case 220: - goto st_case_220 - case 221: - goto st_case_221 - case 222: - goto st_case_222 - case 223: - goto st_case_223 - case 224: - goto st_case_224 - case 225: - goto st_case_225 - case 226: - goto st_case_226 - case 227: - goto st_case_227 - case 228: - goto st_case_228 + case 260: + goto st_case_260 + case 261: + goto st_case_261 + case 262: + goto st_case_262 case 9: goto st_case_9 case 10: @@ -1444,54 +1638,26 @@ _resume: goto st_case_12 case 13: goto st_case_13 - case 229: - goto st_case_229 case 14: goto st_case_14 case 15: goto st_case_15 - case 230: - goto st_case_230 - case 231: - goto st_case_231 - case 232: - goto st_case_232 - case 233: - goto st_case_233 - case 234: - goto st_case_234 - case 235: - goto st_case_235 - case 236: - goto st_case_236 - case 237: - goto st_case_237 - case 238: - goto st_case_238 case 16: goto st_case_16 case 17: goto st_case_17 case 18: goto st_case_18 - case 239: - goto st_case_239 case 19: goto st_case_19 case 20: goto st_case_20 case 21: goto st_case_21 - case 240: - goto st_case_240 case 22: goto st_case_22 case 23: goto st_case_23 - case 241: - goto st_case_241 - case 242: - goto st_case_242 case 24: goto st_case_24 case 25: @@ -1512,80 +1678,22 @@ _resume: goto st_case_32 case 33: goto st_case_33 - case 34: - goto st_case_34 - case 35: - goto st_case_35 - case 36: - goto st_case_36 - case 37: - goto st_case_37 - case 38: - goto st_case_38 - case 39: - goto st_case_39 - case 40: - goto st_case_40 - case 41: - goto st_case_41 - case 42: - goto st_case_42 - case 243: - goto st_case_243 - case 244: - goto st_case_244 - case 43: - goto st_case_43 - case 245: - goto st_case_245 - case 246: - goto st_case_246 - case 247: - goto st_case_247 - case 248: - goto st_case_248 - case 249: - goto st_case_249 - case 250: - goto st_case_250 - case 251: - goto st_case_251 - case 252: - goto st_case_252 - case 253: - goto st_case_253 - case 254: - goto st_case_254 - case 255: - goto st_case_255 - case 256: - goto st_case_256 - case 257: - goto st_case_257 - case 258: - goto st_case_258 - case 259: - goto st_case_259 - case 260: - goto st_case_260 - case 261: - goto st_case_261 - case 262: - goto st_case_262 case 263: goto st_case_263 case 264: goto st_case_264 - case 44: - goto st_case_44 + case 34: + goto st_case_34 + case 35: + goto st_case_35 case 265: goto st_case_265 case 266: goto st_case_266 - case 45: - goto st_case_45 case 267: goto st_case_267 + case 36: + goto st_case_36 case 268: goto st_case_268 case 269: @@ -1622,40 +1730,36 @@ _resume: goto st_case_284 case 285: goto st_case_285 + case 37: + goto st_case_37 + case 38: + goto st_case_38 case 286: goto st_case_286 - case 46: - goto st_case_46 - case 47: - goto st_case_47 - case 48: - goto st_case_48 case 287: goto st_case_287 - case 49: - goto st_case_49 - case 50: - goto st_case_50 - case 51: - goto st_case_51 - case 52: - goto st_case_52 - case 53: - goto st_case_53 case 288: goto st_case_288 - case 54: - goto st_case_54 + case 39: + goto st_case_39 + case 40: + goto st_case_40 + case 41: + goto st_case_41 + case 42: + goto st_case_42 + case 43: + goto st_case_43 case 289: goto st_case_289 - case 55: - goto st_case_55 case 290: goto st_case_290 case 291: goto st_case_291 case 292: goto st_case_292 + case 44: + goto st_case_44 case 293: goto st_case_293 case 294: @@ -1668,42 +1772,16 @@ _resume: goto st_case_297 case 298: goto st_case_298 - case 56: - goto st_case_56 - case 57: - goto st_case_57 - case 58: - goto st_case_58 case 299: goto st_case_299 - case 59: - goto st_case_59 - case 60: - goto st_case_60 - case 61: - goto st_case_61 case 300: goto st_case_300 - case 62: - goto st_case_62 - case 63: - goto st_case_63 case 301: goto st_case_301 case 302: goto st_case_302 - case 64: - goto st_case_64 - case 65: - goto st_case_65 - case 66: - goto st_case_66 case 303: goto st_case_303 - case 67: - goto st_case_67 - case 68: - goto st_case_68 case 304: goto st_case_304 case 305: @@ -1722,52 +1800,56 @@ _resume: goto st_case_311 case 312: goto st_case_312 - case 69: - goto st_case_69 - case 70: - goto st_case_70 - case 71: - goto st_case_71 case 313: goto st_case_313 - case 72: - goto st_case_72 - case 73: - goto st_case_73 - case 74: - goto st_case_74 case 314: goto st_case_314 - case 75: - goto st_case_75 - case 76: - goto st_case_76 + case 45: + goto st_case_45 + case 46: + goto st_case_46 + case 47: + goto st_case_47 + case 48: + goto st_case_48 + case 49: + goto st_case_49 + case 50: + goto st_case_50 + case 51: + goto st_case_51 + case 52: + goto st_case_52 + case 53: + goto st_case_53 + case 54: + goto st_case_54 case 315: goto st_case_315 case 316: goto st_case_316 - case 77: - goto st_case_77 - case 78: - goto st_case_78 - case 79: - goto st_case_79 - case 80: - goto st_case_80 - case 81: - goto st_case_81 - case 82: - goto st_case_82 case 317: goto st_case_317 + case 55: + goto st_case_55 + case 56: + goto st_case_56 + case 57: + goto st_case_57 + case 58: + goto st_case_58 + case 59: + goto st_case_59 + case 60: + goto st_case_60 case 318: goto st_case_318 case 319: goto st_case_319 + case 61: + goto st_case_61 case 320: goto st_case_320 - case 83: - goto st_case_83 case 321: goto st_case_321 case 322: @@ -1776,8 +1858,6 @@ _resume: goto st_case_323 case 324: goto st_case_324 - case 84: - goto st_case_84 case 325: goto st_case_325 case 326: @@ -1808,44 +1888,20 @@ _resume: goto st_case_338 case 339: goto st_case_339 + case 62: + goto st_case_62 case 340: goto st_case_340 case 341: goto st_case_341 case 342: goto st_case_342 - case 85: - goto st_case_85 - case 86: - goto st_case_86 - case 87: - goto st_case_87 - case 88: - goto st_case_88 - case 89: - goto st_case_89 - case 90: - goto st_case_90 - case 91: - goto st_case_91 - case 92: - goto st_case_92 - case 93: - goto st_case_93 - case 94: - goto st_case_94 - case 95: - goto st_case_95 - case 96: - goto st_case_96 - case 97: - goto st_case_97 + case 63: + goto st_case_63 case 343: goto st_case_343 case 344: goto st_case_344 - case 98: - goto st_case_98 case 345: goto st_case_345 case 346: @@ -1882,26 +1938,48 @@ _resume: goto st_case_361 case 362: goto st_case_362 + case 64: + goto st_case_64 + case 65: + goto st_case_65 + case 66: + goto st_case_66 + case 67: + goto st_case_67 + case 68: + goto st_case_68 case 363: goto st_case_363 + case 69: + goto st_case_69 + case 70: + goto st_case_70 + case 71: + goto st_case_71 + case 72: + goto st_case_72 + case 73: + goto st_case_73 case 364: goto st_case_364 - case 99: - goto st_case_99 - case 100: - goto st_case_100 case 365: goto st_case_365 case 366: goto st_case_366 - case 101: - goto st_case_101 + case 74: + goto st_case_74 + case 75: + goto st_case_75 case 367: goto st_case_367 case 368: goto st_case_368 + case 76: + goto st_case_76 case 369: goto st_case_369 + case 77: + goto st_case_77 case 370: goto st_case_370 case 371: @@ -1936,28 +2014,40 @@ _resume: goto st_case_385 case 386: goto st_case_386 - case 102: - goto st_case_102 case 387: goto st_case_387 case 388: goto st_case_388 - case 103: - goto st_case_103 - case 104: - goto st_case_104 - case 105: - goto st_case_105 - case 106: - goto st_case_106 - case 107: - goto st_case_107 case 389: goto st_case_389 - case 108: - goto st_case_108 - case 109: - goto st_case_109 + case 78: + goto st_case_78 + case 79: + goto st_case_79 + case 80: + goto st_case_80 + case 81: + goto st_case_81 + case 82: + goto st_case_82 + case 83: + goto st_case_83 + case 84: + goto st_case_84 + case 85: + goto st_case_85 + case 86: + goto st_case_86 + case 87: + goto st_case_87 + case 88: + goto st_case_88 + case 89: + goto st_case_89 + case 90: + goto st_case_90 + case 91: + goto st_case_91 case 390: goto st_case_390 case 391: @@ -1966,72 +2056,52 @@ _resume: goto st_case_392 case 393: goto st_case_393 + case 92: + goto st_case_92 + case 93: + goto st_case_93 + case 94: + goto st_case_94 + case 95: + goto st_case_95 case 394: goto st_case_394 case 395: goto st_case_395 + case 96: + goto st_case_96 + case 97: + goto st_case_97 case 396: goto st_case_396 + case 98: + goto st_case_98 + case 99: + goto st_case_99 case 397: goto st_case_397 case 398: goto st_case_398 - case 110: - goto st_case_110 - case 111: - goto st_case_111 - case 112: - goto st_case_112 + case 100: + goto st_case_100 case 399: goto st_case_399 - case 113: - goto st_case_113 - case 114: - goto st_case_114 - case 115: - goto st_case_115 case 400: goto st_case_400 - case 116: - goto st_case_116 - case 117: - goto st_case_117 + case 101: + goto st_case_101 + case 102: + goto st_case_102 case 401: goto st_case_401 case 402: goto st_case_402 - case 118: - goto st_case_118 - case 119: - goto st_case_119 - case 120: - goto st_case_120 - case 121: - goto st_case_121 - case 122: - goto st_case_122 - case 123: - goto st_case_123 - case 124: - goto st_case_124 - case 125: - goto st_case_125 - case 126: - goto st_case_126 - case 127: - goto st_case_127 - case 128: - goto st_case_128 - case 129: - goto st_case_129 case 403: goto st_case_403 case 404: goto st_case_404 case 405: goto st_case_405 - case 130: - goto st_case_130 case 406: goto st_case_406 case 407: @@ -2058,26 +2128,32 @@ _resume: goto st_case_417 case 418: goto st_case_418 + case 103: + goto st_case_103 case 419: goto st_case_419 case 420: goto st_case_420 case 421: goto st_case_421 + case 104: + goto st_case_104 + case 105: + goto st_case_105 case 422: goto st_case_422 case 423: goto st_case_423 case 424: goto st_case_424 + case 106: + goto st_case_106 case 425: goto st_case_425 case 426: goto st_case_426 case 427: goto st_case_427 - case 131: - goto st_case_131 case 428: goto st_case_428 case 429: @@ -2086,8 +2162,6 @@ _resume: goto st_case_430 case 431: goto st_case_431 - case 132: - goto st_case_132 case 432: goto st_case_432 case 433: @@ -2114,6 +2188,8 @@ _resume: goto st_case_443 case 444: goto st_case_444 + case 107: + goto st_case_107 case 445: goto st_case_445 case 446: @@ -2128,18 +2204,10 @@ _resume: goto st_case_450 case 451: goto st_case_451 - case 133: - goto st_case_133 - case 134: - goto st_case_134 - case 135: - goto st_case_135 case 452: goto st_case_452 case 453: goto st_case_453 - case 136: - goto st_case_136 case 454: goto st_case_454 case 455: @@ -2166,12 +2234,26 @@ _resume: goto st_case_465 case 466: goto st_case_466 + case 108: + goto st_case_108 + case 109: + goto st_case_109 + case 110: + goto st_case_110 + case 111: + goto st_case_111 + case 112: + goto st_case_112 case 467: goto st_case_467 + case 113: + goto st_case_113 case 468: goto st_case_468 case 469: goto st_case_469 + case 114: + goto st_case_114 case 470: goto st_case_470 case 471: @@ -2180,34 +2262,56 @@ _resume: goto st_case_472 case 473: goto st_case_473 - case 137: - goto st_case_137 case 474: goto st_case_474 case 475: goto st_case_475 case 476: goto st_case_476 - case 138: - goto st_case_138 case 477: goto st_case_477 case 478: goto st_case_478 + case 115: + goto st_case_115 + case 116: + goto st_case_116 + case 117: + goto st_case_117 case 479: goto st_case_479 + case 118: + goto st_case_118 + case 119: + goto st_case_119 + case 120: + goto st_case_120 case 480: goto st_case_480 + case 121: + goto st_case_121 + case 122: + goto st_case_122 case 481: goto st_case_481 case 482: goto st_case_482 + case 123: + goto st_case_123 + case 124: + goto st_case_124 + case 125: + goto st_case_125 + case 126: + goto st_case_126 case 483: goto st_case_483 case 484: goto st_case_484 case 485: goto st_case_485 + case 127: + goto st_case_127 case 486: goto st_case_486 case 487: @@ -2234,8 +2338,6 @@ _resume: goto st_case_497 case 498: goto st_case_498 - case 139: - goto st_case_139 case 499: goto st_case_499 case 500: @@ -2250,6 +2352,10 @@ _resume: goto st_case_504 case 505: goto st_case_505 + case 128: + goto st_case_128 + case 129: + goto st_case_129 case 506: goto st_case_506 case 507: @@ -2268,36 +2374,48 @@ _resume: goto st_case_513 case 514: goto st_case_514 + case 130: + goto st_case_130 + case 131: + goto st_case_131 + case 132: + goto st_case_132 case 515: goto st_case_515 + case 133: + goto st_case_133 + case 134: + goto st_case_134 + case 135: + goto st_case_135 case 516: goto st_case_516 + case 136: + goto st_case_136 + case 137: + goto st_case_137 case 517: goto st_case_517 case 518: goto st_case_518 + case 138: + goto st_case_138 + case 139: + goto st_case_139 + case 140: + goto st_case_140 case 519: goto st_case_519 case 520: goto st_case_520 - case 140: - goto st_case_140 case 141: goto st_case_141 - case 142: - goto st_case_142 - case 143: - goto st_case_143 - case 144: - goto st_case_144 case 521: goto st_case_521 - case 145: - goto st_case_145 + case 142: + goto st_case_142 case 522: goto st_case_522 - case 146: - goto st_case_146 case 523: goto st_case_523 case 524: @@ -2312,48 +2430,40 @@ _resume: goto st_case_528 case 529: goto st_case_529 + case 143: + goto st_case_143 + case 144: + goto st_case_144 + case 145: + goto st_case_145 case 530: goto st_case_530 - case 531: - goto st_case_531 + case 146: + goto st_case_146 case 147: goto st_case_147 case 148: goto st_case_148 + case 531: + goto st_case_531 case 149: goto st_case_149 - case 532: - goto st_case_532 case 150: goto st_case_150 - case 151: - goto st_case_151 - case 152: - goto st_case_152 + case 532: + goto st_case_532 case 533: goto st_case_533 - case 153: - goto st_case_153 - case 154: - goto st_case_154 case 534: goto st_case_534 case 535: goto st_case_535 - case 155: - goto st_case_155 - case 156: - goto st_case_156 - case 157: - goto st_case_157 case 536: goto st_case_536 case 537: goto st_case_537 case 538: goto st_case_538 - case 158: - goto st_case_158 case 539: goto st_case_539 case 540: @@ -2380,24 +2490,28 @@ _resume: goto st_case_550 case 551: goto st_case_551 + case 151: + goto st_case_151 + case 152: + goto st_case_152 case 552: goto st_case_552 case 553: goto st_case_553 case 554: goto st_case_554 + case 153: + goto st_case_153 case 555: goto st_case_555 case 556: goto st_case_556 + case 154: + goto st_case_154 case 557: goto st_case_557 case 558: goto st_case_558 - case 159: - goto st_case_159 - case 160: - goto st_case_160 case 559: goto st_case_559 case 560: @@ -2416,50 +2530,28 @@ _resume: goto st_case_566 case 567: goto st_case_567 - case 161: - goto st_case_161 - case 162: - goto st_case_162 - case 163: - goto st_case_163 case 568: goto st_case_568 - case 164: - goto st_case_164 - case 165: - goto st_case_165 - case 166: - goto st_case_166 case 569: goto st_case_569 - case 167: - goto st_case_167 - case 168: - goto st_case_168 case 570: goto st_case_570 case 571: goto st_case_571 - case 169: - goto st_case_169 - case 170: - goto st_case_170 - case 171: - goto st_case_171 - case 172: - goto st_case_172 case 572: goto st_case_572 - case 173: - goto st_case_173 case 573: goto st_case_573 case 574: goto st_case_574 - case 174: - goto st_case_174 + case 155: + goto st_case_155 + case 156: + goto st_case_156 case 575: goto st_case_575 + case 157: + goto st_case_157 case 576: goto st_case_576 case 577: @@ -2476,40 +2568,44 @@ _resume: goto st_case_582 case 583: goto st_case_583 - case 175: - goto st_case_175 - case 176: - goto st_case_176 - case 177: - goto st_case_177 + case 158: + goto st_case_158 + case 159: + goto st_case_159 + case 160: + goto st_case_160 case 584: goto st_case_584 - case 178: - goto st_case_178 - case 179: - goto st_case_179 - case 180: - goto st_case_180 + case 161: + goto st_case_161 + case 162: + goto st_case_162 + case 163: + goto st_case_163 case 585: goto st_case_585 - case 181: - goto st_case_181 - case 182: - goto st_case_182 + case 164: + goto st_case_164 + case 165: + goto st_case_165 case 586: goto st_case_586 case 587: goto st_case_587 - case 183: - goto st_case_183 - case 184: - goto st_case_184 + case 166: + goto st_case_166 + case 167: + goto st_case_167 + case 168: + goto st_case_168 + case 169: + goto st_case_169 + case 170: + goto st_case_170 + case 171: + goto st_case_171 case 588: goto st_case_588 - case 185: - goto st_case_185 - case 186: - goto st_case_186 case 589: goto st_case_589 case 590: @@ -2526,132 +2622,551 @@ _resume: goto st_case_595 case 596: goto st_case_596 - case 187: - goto st_case_187 - case 188: - goto st_case_188 - case 189: - goto st_case_189 case 597: goto st_case_597 - case 190: - goto st_case_190 - case 191: - goto st_case_191 - case 192: - goto st_case_192 case 598: goto st_case_598 - case 193: - goto st_case_193 - case 194: - goto st_case_194 case 599: goto st_case_599 case 600: goto st_case_600 - case 195: - goto st_case_195 case 601: goto st_case_601 - case 196: - goto st_case_196 case 602: goto st_case_602 case 603: goto st_case_603 - case 197: - goto st_case_197 - case 198: - goto st_case_198 - case 199: - goto st_case_199 case 604: goto st_case_604 case 605: goto st_case_605 case 606: goto st_case_606 + case 172: + goto st_case_172 + case 173: + goto st_case_173 + case 174: + goto st_case_174 + case 607: + goto st_case_607 + case 608: + goto st_case_608 + case 609: + goto st_case_609 + case 175: + goto st_case_175 + case 610: + goto st_case_610 + case 611: + goto st_case_611 + case 176: + goto st_case_176 + case 612: + goto st_case_612 + case 613: + goto st_case_613 + case 614: + goto st_case_614 + case 615: + goto st_case_615 + case 616: + goto st_case_616 + case 177: + goto st_case_177 + case 178: + goto st_case_178 + case 179: + goto st_case_179 + case 617: + goto st_case_617 + case 180: + goto st_case_180 + case 181: + goto st_case_181 + case 182: + goto st_case_182 + case 618: + goto st_case_618 + case 183: + goto st_case_183 + case 184: + goto st_case_184 + case 619: + goto st_case_619 + case 620: + goto st_case_620 + case 185: + goto st_case_185 + case 621: + goto st_case_621 + case 622: + goto st_case_622 + case 186: + goto st_case_186 + case 187: + goto st_case_187 + case 188: + goto st_case_188 + case 623: + goto st_case_623 + case 189: + goto st_case_189 + case 190: + goto st_case_190 + case 624: + goto st_case_624 + case 625: + goto st_case_625 + case 626: + goto st_case_626 + case 627: + goto st_case_627 + case 628: + goto st_case_628 + case 629: + goto st_case_629 + case 630: + goto st_case_630 + case 631: + goto st_case_631 + case 191: + goto st_case_191 + case 192: + goto st_case_192 + case 193: + goto st_case_193 + case 632: + goto st_case_632 + case 194: + goto st_case_194 + case 195: + goto st_case_195 + case 196: + goto st_case_196 + case 633: + goto st_case_633 + case 197: + goto st_case_197 + case 198: + goto st_case_198 + case 634: + goto st_case_634 + case 635: + goto st_case_635 + case 199: + goto st_case_199 case 200: goto st_case_200 case 201: goto st_case_201 + case 636: + goto st_case_636 + case 637: + goto st_case_637 + case 638: + goto st_case_638 + case 639: + goto st_case_639 + case 640: + goto st_case_640 + case 641: + goto st_case_641 + case 642: + goto st_case_642 + case 643: + goto st_case_643 + case 644: + goto st_case_644 + case 645: + goto st_case_645 + case 646: + goto st_case_646 + case 647: + goto st_case_647 + case 648: + goto st_case_648 + case 649: + goto st_case_649 + case 650: + goto st_case_650 + case 651: + goto st_case_651 + case 652: + goto st_case_652 + case 653: + goto st_case_653 + case 654: + goto st_case_654 case 202: goto st_case_202 - case 607: - goto st_case_607 case 203: goto st_case_203 case 204: goto st_case_204 case 205: goto st_case_205 + case 206: + goto st_case_206 + case 655: + goto st_case_655 + case 207: + goto st_case_207 + case 208: + goto st_case_208 + case 656: + goto st_case_656 + case 657: + goto st_case_657 + case 658: + goto st_case_658 + case 659: + goto st_case_659 + case 660: + goto st_case_660 + case 661: + goto st_case_661 + case 662: + goto st_case_662 + case 663: + goto st_case_663 + case 664: + goto st_case_664 + case 209: + goto st_case_209 + case 210: + goto st_case_210 + case 211: + goto st_case_211 + case 665: + goto st_case_665 + case 212: + goto st_case_212 + case 213: + goto st_case_213 + case 214: + goto st_case_214 + case 666: + goto st_case_666 + case 215: + goto st_case_215 + case 216: + goto st_case_216 + case 667: + goto st_case_667 + case 668: + goto st_case_668 + case 217: + goto st_case_217 + case 218: + goto st_case_218 + case 219: + goto st_case_219 + case 220: + goto st_case_220 + case 669: + goto st_case_669 + case 221: + goto st_case_221 + case 222: + goto st_case_222 + case 670: + goto st_case_670 + case 671: + goto st_case_671 + case 672: + goto st_case_672 + case 673: + goto st_case_673 + case 674: + goto st_case_674 + case 675: + goto st_case_675 + case 676: + goto st_case_676 + case 677: + goto st_case_677 + case 223: + goto st_case_223 + case 224: + goto st_case_224 + case 225: + goto st_case_225 + case 678: + goto st_case_678 + case 226: + goto st_case_226 + case 227: + goto st_case_227 + case 228: + goto st_case_228 + case 679: + goto st_case_679 + case 229: + goto st_case_229 + case 230: + goto st_case_230 + case 680: + goto st_case_680 + case 681: + goto st_case_681 + case 231: + goto st_case_231 + case 232: + goto st_case_232 + case 233: + goto st_case_233 + case 682: + goto st_case_682 + case 683: + goto st_case_683 + case 684: + goto st_case_684 + case 685: + goto st_case_685 + case 686: + goto st_case_686 + case 687: + goto st_case_687 + case 688: + goto st_case_688 + case 689: + goto st_case_689 + case 690: + goto st_case_690 + case 691: + goto st_case_691 + case 692: + goto st_case_692 + case 693: + goto st_case_693 + case 694: + goto st_case_694 + case 695: + goto st_case_695 + case 696: + goto st_case_696 + case 697: + goto st_case_697 + case 698: + goto st_case_698 + case 699: + goto st_case_699 + case 700: + goto st_case_700 + case 234: + goto st_case_234 + case 235: + goto st_case_235 + case 701: + goto st_case_701 + case 236: + goto st_case_236 + case 237: + goto st_case_237 + case 702: + goto st_case_702 + case 703: + goto st_case_703 + case 704: + goto st_case_704 + case 705: + goto st_case_705 + case 706: + goto st_case_706 + case 707: + goto st_case_707 + case 708: + goto st_case_708 + case 709: + goto st_case_709 + case 238: + goto st_case_238 + case 239: + goto st_case_239 + case 240: + goto st_case_240 + case 710: + goto st_case_710 + case 241: + goto st_case_241 + case 242: + goto st_case_242 + case 243: + goto st_case_243 + case 711: + goto st_case_711 + case 244: + goto st_case_244 + case 245: + goto st_case_245 + case 712: + goto st_case_712 + case 713: + goto st_case_713 + case 246: + goto st_case_246 + case 247: + goto st_case_247 + case 714: + goto st_case_714 + case 250: + goto st_case_250 + case 717: + goto st_case_717 + case 718: + goto st_case_718 + case 251: + goto st_case_251 + case 252: + goto st_case_252 + case 253: + goto st_case_253 + case 254: + goto st_case_254 + case 719: + goto st_case_719 + case 255: + goto st_case_255 + case 720: + goto st_case_720 + case 256: + goto st_case_256 + case 257: + goto st_case_257 + case 258: + goto st_case_258 + case 715: + goto st_case_715 + case 716: + goto st_case_716 + case 248: + goto st_case_248 + case 249: + goto st_case_249 } goto st_out + st259: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof259 + } + st_case_259: + switch ( m.data)[( m.p)] { + case 10: + goto tr35 + case 11: + goto tr440 + case 13: + goto tr35 + case 32: + goto tr439 + case 35: + goto tr35 + case 44: + goto tr35 + case 92: + goto tr441 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr439 + } + goto tr438 +tr33: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st1 +tr438: +//line plugins/parsers/influx/machine.go.rl:73 + + foundMetric = true + +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st1 st1: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof1 } st_case_1: +//line plugins/parsers/influx/machine.go:3096 switch ( m.data)[( m.p)] { + case 10: + goto tr2 + case 11: + goto tr3 + case 13: + goto tr2 case 32: goto tr1 - case 35: - goto tr1 case 44: - goto tr1 + goto tr4 case 92: - goto tr2 + goto st96 } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr1 - } - case ( m.data)[( m.p)] >= 9: + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { goto tr1 } - goto tr0 -tr0: -//line plugins/parsers/influx/machine.go.rl:18 + goto st1 +tr1: + ( m.cs) = 2 +//line plugins/parsers/influx/machine.go.rl:77 - m.pb = m.p + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- - goto st2 + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr60: + ( m.cs) = 2 +//line plugins/parsers/influx/machine.go.rl:90 + + err = m.handler.AddTag(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again st2: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof2 } st_case_2: -//line plugins/parsers/influx/machine.go:2627 +//line plugins/parsers/influx/machine.go:3146 switch ( m.data)[( m.p)] { case 10: - goto tr5 + goto tr8 case 11: - goto tr6 + goto tr9 case 13: - goto tr5 + goto tr8 case 32: - goto tr4 + goto st2 case 44: - goto tr7 + goto tr8 + case 61: + goto tr8 case 92: - goto st133 + goto tr10 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr4 + goto st2 } - goto st2 -tr4: -//line plugins/parsers/influx/machine.go.rl:72 + goto tr6 +tr6: +//line plugins/parsers/influx/machine.go.rl:19 - m.handler.SetMeasurement(m.text()) - - goto st3 -tr60: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) + m.pb = m.p goto st3 st3: @@ -2659,31 +3174,236 @@ tr60: goto _test_eof3 } st_case_3: -//line plugins/parsers/influx/machine.go:2663 +//line plugins/parsers/influx/machine.go:3178 switch ( m.data)[( m.p)] { - case 10: - goto tr5 - case 11: - goto tr11 - case 13: - goto tr5 case 32: - goto st3 + goto tr8 case 44: - goto tr5 + goto tr8 case 61: - goto tr5 - case 92: goto tr12 + case 92: + goto st36 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st3 + switch { + case ( m.data)[( m.p)] > 10: + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr8 + } + case ( m.data)[( m.p)] >= 9: + goto tr8 } - goto tr9 -tr9: -//line plugins/parsers/influx/machine.go.rl:18 + goto st3 +tr2: + ( m.cs) = 0 +//line plugins/parsers/influx/machine.go.rl:37 - m.pb = m.p + err = ErrTagParse + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + + goto _again +tr8: + ( m.cs) = 0 +//line plugins/parsers/influx/machine.go.rl:30 + + err = ErrFieldParse + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + + goto _again +tr35: + ( m.cs) = 0 +//line plugins/parsers/influx/machine.go.rl:23 + + err = ErrNameParse + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + + goto _again +tr39: + ( m.cs) = 0 +//line plugins/parsers/influx/machine.go.rl:23 + + err = ErrNameParse + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + +//line plugins/parsers/influx/machine.go.rl:37 + + err = ErrTagParse + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + + goto _again +tr43: + ( m.cs) = 0 +//line plugins/parsers/influx/machine.go.rl:23 + + err = ErrNameParse + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + +//line plugins/parsers/influx/machine.go.rl:30 + + err = ErrFieldParse + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + + goto _again +tr47: + ( m.cs) = 0 +//line plugins/parsers/influx/machine.go.rl:37 + + err = ErrTagParse + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + +//line plugins/parsers/influx/machine.go.rl:30 + + err = ErrFieldParse + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + + goto _again +tr105: + ( m.cs) = 0 +//line plugins/parsers/influx/machine.go.rl:30 + + err = ErrFieldParse + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + +//line plugins/parsers/influx/machine.go.rl:44 + + err = ErrTimestampParse + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + + goto _again +tr132: + ( m.cs) = 0 +//line plugins/parsers/influx/machine.go.rl:37 + + err = ErrTagParse + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + +//line plugins/parsers/influx/machine.go.rl:30 + + err = ErrFieldParse + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + +//line plugins/parsers/influx/machine.go.rl:44 + + err = ErrTimestampParse + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + + goto _again +tr198: + ( m.cs) = 0 +//line plugins/parsers/influx/machine.go.rl:37 + + err = ErrTagParse + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + +//line plugins/parsers/influx/machine.go.rl:44 + + err = ErrTimestampParse + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + + goto _again +tr404: + ( m.cs) = 0 +//line plugins/parsers/influx/machine.go.rl:23 + + err = ErrNameParse + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + +//line plugins/parsers/influx/machine.go.rl:37 + + err = ErrTagParse + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + +//line plugins/parsers/influx/machine.go.rl:30 + + err = ErrFieldParse + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + + goto _again +tr407: + ( m.cs) = 0 +//line plugins/parsers/influx/machine.go.rl:44 + + err = ErrTimestampParse + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + + goto _again +tr1023: +//line plugins/parsers/influx/machine.go.rl:64 + + ( m.p)-- + + {goto st259 } + + goto st0 +//line plugins/parsers/influx/machine.go:3399 +st_case_0: + st0: + ( m.cs) = 0 + goto _out +tr12: +//line plugins/parsers/influx/machine.go.rl:99 + + key = m.text() goto st4 st4: @@ -2691,458 +3411,105 @@ tr9: goto _test_eof4 } st_case_4: -//line plugins/parsers/influx/machine.go:2695 +//line plugins/parsers/influx/machine.go:3415 switch ( m.data)[( m.p)] { - case 32: - goto tr5 - case 44: - goto tr5 - case 61: - goto tr14 - case 92: - goto st10 + case 34: + goto st5 + case 45: + goto tr15 + case 46: + goto tr16 + case 48: + goto tr17 + case 70: + goto tr19 + case 84: + goto tr20 + case 102: + goto tr21 + case 116: + goto tr22 } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 - } - case ( m.data)[( m.p)] >= 9: - goto tr5 + if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto tr18 } - goto st4 -tr1: - m.cs = 0 -//line plugins/parsers/influx/machine.go.rl:56 - - m.err = ErrParse - ( m.p)-- - - m.cs = 195; - {( m.p)++; goto _out } - - goto _again -tr5: - m.cs = 0 -//line plugins/parsers/influx/machine.go.rl:35 - - m.err = ErrFieldParse - ( m.p)-- - - m.cs = 195; - {( m.p)++; goto _out } - -//line plugins/parsers/influx/machine.go.rl:56 - - m.err = ErrParse - ( m.p)-- - - m.cs = 195; - {( m.p)++; goto _out } - - goto _again -tr31: - m.cs = 0 -//line plugins/parsers/influx/machine.go.rl:49 - - m.err = ErrTimestampParse - ( m.p)-- - - m.cs = 195; - {( m.p)++; goto _out } - -//line plugins/parsers/influx/machine.go.rl:56 - - m.err = ErrParse - ( m.p)-- - - m.cs = 195; - {( m.p)++; goto _out } - - goto _again -tr52: - m.cs = 0 -//line plugins/parsers/influx/machine.go.rl:42 - - m.err = ErrTagParse - ( m.p)-- - - m.cs = 195; - {( m.p)++; goto _out } - -//line plugins/parsers/influx/machine.go.rl:56 - - m.err = ErrParse - ( m.p)-- - - m.cs = 195; - {( m.p)++; goto _out } - - goto _again -tr61: - m.cs = 0 -//line plugins/parsers/influx/machine.go.rl:42 - - m.err = ErrTagParse - ( m.p)-- - - m.cs = 195; - {( m.p)++; goto _out } - -//line plugins/parsers/influx/machine.go.rl:35 - - m.err = ErrFieldParse - ( m.p)-- - - m.cs = 195; - {( m.p)++; goto _out } - -//line plugins/parsers/influx/machine.go.rl:56 - - m.err = ErrParse - ( m.p)-- - - m.cs = 195; - {( m.p)++; goto _out } - - goto _again -tr101: - m.cs = 0 -//line plugins/parsers/influx/machine.go.rl:35 - - m.err = ErrFieldParse - ( m.p)-- - - m.cs = 195; - {( m.p)++; goto _out } - -//line plugins/parsers/influx/machine.go.rl:49 - - m.err = ErrTimestampParse - ( m.p)-- - - m.cs = 195; - {( m.p)++; goto _out } - -//line plugins/parsers/influx/machine.go.rl:56 - - m.err = ErrParse - ( m.p)-- - - m.cs = 195; - {( m.p)++; goto _out } - - goto _again -tr207: - m.cs = 0 -//line plugins/parsers/influx/machine.go.rl:42 - - m.err = ErrTagParse - ( m.p)-- - - m.cs = 195; - {( m.p)++; goto _out } - -//line plugins/parsers/influx/machine.go.rl:35 - - m.err = ErrFieldParse - ( m.p)-- - - m.cs = 195; - {( m.p)++; goto _out } - -//line plugins/parsers/influx/machine.go.rl:49 - - m.err = ErrTimestampParse - ( m.p)-- - - m.cs = 195; - {( m.p)++; goto _out } - -//line plugins/parsers/influx/machine.go.rl:56 - - m.err = ErrParse - ( m.p)-- - - m.cs = 195; - {( m.p)++; goto _out } - - goto _again -tr216: - m.cs = 0 -//line plugins/parsers/influx/machine.go.rl:42 - - m.err = ErrTagParse - ( m.p)-- - - m.cs = 195; - {( m.p)++; goto _out } - -//line plugins/parsers/influx/machine.go.rl:49 - - m.err = ErrTimestampParse - ( m.p)-- - - m.cs = 195; - {( m.p)++; goto _out } - -//line plugins/parsers/influx/machine.go.rl:56 - - m.err = ErrParse - ( m.p)-- - - m.cs = 195; - {( m.p)++; goto _out } - - goto _again -//line plugins/parsers/influx/machine.go:2899 -st_case_0: - st0: - m.cs = 0 - goto _out -tr14: -//line plugins/parsers/influx/machine.go.rl:84 - - key = m.text() - - goto st5 + goto tr8 st5: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof5 } st_case_5: -//line plugins/parsers/influx/machine.go:2915 - switch ( m.data)[( m.p)] { - case 34: - goto st6 - case 45: - goto tr17 - case 46: - goto tr18 - case 48: - goto tr19 - case 70: - goto tr21 - case 84: - goto tr22 - case 102: - goto tr23 - case 116: - goto tr24 - } - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr20 - } - goto tr5 - st6: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof6 - } - st_case_6: switch ( m.data)[( m.p)] { case 10: - goto tr5 + goto tr24 + case 12: + goto tr8 + case 13: + goto tr25 case 34: goto tr26 case 92: goto tr27 } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 + goto tr23 +tr23: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st6 + st6: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof6 } - goto tr25 -tr25: -//line plugins/parsers/influx/machine.go.rl:18 + st_case_6: +//line plugins/parsers/influx/machine.go:3467 + switch ( m.data)[( m.p)] { + case 10: + goto st7 + case 12: + goto tr8 + case 13: + goto st8 + case 34: + goto tr31 + case 92: + goto st76 + } + goto st6 +tr24: +//line plugins/parsers/influx/machine.go.rl:19 m.pb = m.p goto st7 st7: +//line plugins/parsers/influx/machine.go.rl:157 + + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line + if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof7 } st_case_7: -//line plugins/parsers/influx/machine.go:2966 +//line plugins/parsers/influx/machine.go:3498 switch ( m.data)[( m.p)] { case 10: - goto tr5 + goto st7 + case 12: + goto tr8 + case 13: + goto st8 case 34: - goto tr29 + goto tr31 case 92: - goto st11 + goto st76 } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 - } - goto st7 -tr26: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - -//line plugins/parsers/influx/machine.go.rl:104 - - m.handler.AddString(key, m.text()) - - goto st206 -tr29: -//line plugins/parsers/influx/machine.go.rl:104 - - m.handler.AddString(key, m.text()) - - goto st206 - st206: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof206 - } - st_case_206: -//line plugins/parsers/influx/machine.go:3000 - switch ( m.data)[( m.p)] { - case 10: - goto tr357 - case 13: - goto tr357 - case 32: - goto st207 - case 44: - goto st9 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st207 - } - goto tr101 -tr382: -//line plugins/parsers/influx/machine.go.rl:96 - - m.handler.AddFloat(key, m.text()) - - goto st207 -tr388: -//line plugins/parsers/influx/machine.go.rl:88 - - m.handler.AddInt(key, m.text()) - - goto st207 -tr392: -//line plugins/parsers/influx/machine.go.rl:92 - - m.handler.AddUint(key, m.text()) - - goto st207 -tr396: -//line plugins/parsers/influx/machine.go.rl:100 - - m.handler.AddBool(key, m.text()) - - goto st207 - st207: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof207 - } - st_case_207: -//line plugins/parsers/influx/machine.go:3044 - switch ( m.data)[( m.p)] { - case 10: - goto tr357 - case 13: - goto tr357 - case 32: - goto st207 - case 45: - goto tr359 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr360 - } - case ( m.data)[( m.p)] >= 9: - goto st207 - } - goto tr31 -tr357: - m.cs = 208 -//line plugins/parsers/influx/machine.go.rl:22 - - yield = true - m.cs = 196; - {( m.p)++; goto _out } - - goto _again -tr362: - m.cs = 208 -//line plugins/parsers/influx/machine.go.rl:108 - - m.handler.SetTimestamp(m.text()) - -//line plugins/parsers/influx/machine.go.rl:22 - - yield = true - m.cs = 196; - {( m.p)++; goto _out } - - goto _again -tr383: - m.cs = 208 -//line plugins/parsers/influx/machine.go.rl:96 - - m.handler.AddFloat(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:22 - - yield = true - m.cs = 196; - {( m.p)++; goto _out } - - goto _again -tr389: - m.cs = 208 -//line plugins/parsers/influx/machine.go.rl:88 - - m.handler.AddInt(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:22 - - yield = true - m.cs = 196; - {( m.p)++; goto _out } - - goto _again -tr393: - m.cs = 208 -//line plugins/parsers/influx/machine.go.rl:92 - - m.handler.AddUint(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:22 - - yield = true - m.cs = 196; - {( m.p)++; goto _out } - - goto _again -tr397: - m.cs = 208 -//line plugins/parsers/influx/machine.go.rl:100 - - m.handler.AddBool(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:22 - - yield = true - m.cs = 196; - {( m.p)++; goto _out } - - goto _again - st208: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof208 - } - st_case_208: -//line plugins/parsers/influx/machine.go:3143 - goto tr1 -tr359: -//line plugins/parsers/influx/machine.go.rl:18 + goto st6 +tr25: +//line plugins/parsers/influx/machine.go.rl:19 m.pb = m.p @@ -3152,477 +3519,244 @@ tr359: goto _test_eof8 } st_case_8: -//line plugins/parsers/influx/machine.go:3156 - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st209 +//line plugins/parsers/influx/machine.go:3523 + if ( m.data)[( m.p)] == 10 { + goto st7 } - goto tr31 -tr360: -//line plugins/parsers/influx/machine.go.rl:18 + goto tr8 +tr26: + ( m.cs) = 260 +//line plugins/parsers/influx/machine.go.rl:19 m.pb = m.p - goto st209 - st209: +//line plugins/parsers/influx/machine.go.rl:139 + + err = m.handler.AddString(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr31: + ( m.cs) = 260 +//line plugins/parsers/influx/machine.go.rl:139 + + err = m.handler.AddString(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again + st260: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof209 + goto _test_eof260 } - st_case_209: -//line plugins/parsers/influx/machine.go:3172 + st_case_260: +//line plugins/parsers/influx/machine.go:3563 switch ( m.data)[( m.p)] { case 10: - goto tr362 + goto st262 case 13: - goto tr362 + goto st34 case 32: - goto tr361 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st211 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 - } - goto tr31 -tr361: -//line plugins/parsers/influx/machine.go.rl:108 - - m.handler.SetTimestamp(m.text()) - - goto st210 - st210: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof210 - } - st_case_210: -//line plugins/parsers/influx/machine.go:3201 - switch ( m.data)[( m.p)] { - case 10: - goto tr357 - case 13: - goto tr357 - case 32: - goto st210 + goto st261 + case 44: + goto st37 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st210 + goto st261 } - goto tr1 - st211: + goto tr105 +tr516: + ( m.cs) = 261 +//line plugins/parsers/influx/machine.go.rl:121 + + err = m.handler.AddFloat(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr909: + ( m.cs) = 261 +//line plugins/parsers/influx/machine.go.rl:103 + + err = m.handler.AddInt(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr912: + ( m.cs) = 261 +//line plugins/parsers/influx/machine.go.rl:112 + + err = m.handler.AddUint(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr916: + ( m.cs) = 261 +//line plugins/parsers/influx/machine.go.rl:130 + + err = m.handler.AddBool(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again + st261: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof211 + goto _test_eof261 } - st_case_211: + st_case_261: +//line plugins/parsers/influx/machine.go:3635 switch ( m.data)[( m.p)] { case 10: - goto tr362 + goto st262 case 13: - goto tr362 + goto st34 case 32: - goto tr361 + goto st261 + case 45: + goto tr445 } switch { case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st212 + goto tr446 } case ( m.data)[( m.p)] >= 9: - goto tr361 + goto st261 } - goto tr31 - st212: + goto tr407 +tr451: + ( m.cs) = 262 +//line plugins/parsers/influx/machine.go.rl:148 + + err = m.handler.SetTimestamp(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr715: + ( m.cs) = 262 +//line plugins/parsers/influx/machine.go.rl:121 + + err = m.handler.AddFloat(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr925: + ( m.cs) = 262 +//line plugins/parsers/influx/machine.go.rl:103 + + err = m.handler.AddInt(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr930: + ( m.cs) = 262 +//line plugins/parsers/influx/machine.go.rl:112 + + err = m.handler.AddUint(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr935: + ( m.cs) = 262 +//line plugins/parsers/influx/machine.go.rl:130 + + err = m.handler.AddBool(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again + st262: +//line plugins/parsers/influx/machine.go.rl:157 + + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line + +//line plugins/parsers/influx/machine.go.rl:163 + + ( m.cs) = 715; + {( m.p)++; goto _out } + if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof212 + goto _test_eof262 } - st_case_212: + st_case_262: +//line plugins/parsers/influx/machine.go:3736 switch ( m.data)[( m.p)] { case 10: - goto tr362 + goto tr35 + case 11: + goto tr36 case 13: - goto tr362 + goto tr35 case 32: - goto tr361 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st213 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 - } - goto tr31 - st213: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof213 - } - st_case_213: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 13: - goto tr362 - case 32: - goto tr361 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st214 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 - } - goto tr31 - st214: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof214 - } - st_case_214: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 13: - goto tr362 - case 32: - goto tr361 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st215 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 - } - goto tr31 - st215: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof215 - } - st_case_215: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 13: - goto tr362 - case 32: - goto tr361 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st216 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 - } - goto tr31 - st216: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof216 - } - st_case_216: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 13: - goto tr362 - case 32: - goto tr361 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st217 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 - } - goto tr31 - st217: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof217 - } - st_case_217: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 13: - goto tr362 - case 32: - goto tr361 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st218 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 - } - goto tr31 - st218: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof218 - } - st_case_218: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 13: - goto tr362 - case 32: - goto tr361 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st219 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 - } - goto tr31 - st219: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof219 - } - st_case_219: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 13: - goto tr362 - case 32: - goto tr361 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st220 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 - } - goto tr31 - st220: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof220 - } - st_case_220: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 13: - goto tr362 - case 32: - goto tr361 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st221 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 - } - goto tr31 - st221: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof221 - } - st_case_221: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 13: - goto tr362 - case 32: - goto tr361 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st222 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 - } - goto tr31 - st222: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof222 - } - st_case_222: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 13: - goto tr362 - case 32: - goto tr361 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st223 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 - } - goto tr31 - st223: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof223 - } - st_case_223: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 13: - goto tr362 - case 32: - goto tr361 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st224 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 - } - goto tr31 - st224: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof224 - } - st_case_224: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 13: - goto tr362 - case 32: - goto tr361 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st225 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 - } - goto tr31 - st225: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof225 - } - st_case_225: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 13: - goto tr362 - case 32: - goto tr361 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st226 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 - } - goto tr31 - st226: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof226 - } - st_case_226: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 13: - goto tr362 - case 32: - goto tr361 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st227 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 - } - goto tr31 - st227: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof227 - } - st_case_227: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 13: - goto tr362 - case 32: - goto tr361 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st228 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 - } - goto tr31 - st228: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof228 - } - st_case_228: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 13: - goto tr362 - case 32: - goto tr361 + goto st9 + case 35: + goto tr35 + case 44: + goto tr35 + case 92: + goto tr37 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr361 + goto st9 } - goto tr31 -tr384: -//line plugins/parsers/influx/machine.go.rl:96 + goto tr33 +tr439: +//line plugins/parsers/influx/machine.go.rl:73 - m.handler.AddFloat(key, m.text()) - - goto st9 -tr390: -//line plugins/parsers/influx/machine.go.rl:88 - - m.handler.AddInt(key, m.text()) - - goto st9 -tr394: -//line plugins/parsers/influx/machine.go.rl:92 - - m.handler.AddUint(key, m.text()) - - goto st9 -tr398: -//line plugins/parsers/influx/machine.go.rl:100 - - m.handler.AddBool(key, m.text()) + foundMetric = true goto st9 st9: @@ -3630,28 +3764,39 @@ tr398: goto _test_eof9 } st_case_9: -//line plugins/parsers/influx/machine.go:3634 +//line plugins/parsers/influx/machine.go:3768 switch ( m.data)[( m.p)] { + case 10: + goto tr35 + case 11: + goto tr36 + case 13: + goto tr35 case 32: - goto tr5 + goto st9 + case 35: + goto tr35 case 44: - goto tr5 - case 61: - goto tr5 + goto tr35 case 92: - goto tr12 + goto tr37 } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 - } - case ( m.data)[( m.p)] >= 9: - goto tr5 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto st9 } - goto tr9 -tr12: -//line plugins/parsers/influx/machine.go.rl:18 + goto tr33 +tr36: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st10 +tr440: +//line plugins/parsers/influx/machine.go.rl:73 + + foundMetric = true + +//line plugins/parsers/influx/machine.go.rl:19 m.pb = m.p @@ -3661,37 +3806,70 @@ tr12: goto _test_eof10 } st_case_10: -//line plugins/parsers/influx/machine.go:3665 - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 - } - case ( m.data)[( m.p)] >= 9: - goto tr5 +//line plugins/parsers/influx/machine.go:3810 + switch ( m.data)[( m.p)] { + case 10: + goto tr39 + case 11: + goto tr40 + case 13: + goto tr39 + case 32: + goto tr38 + case 35: + goto st1 + case 44: + goto tr4 + case 92: + goto tr37 } - goto st4 -tr27: -//line plugins/parsers/influx/machine.go.rl:18 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr38 + } + goto tr33 +tr38: + ( m.cs) = 11 +//line plugins/parsers/influx/machine.go.rl:77 - m.pb = m.p + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- - goto st11 + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again st11: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof11 } st_case_11: -//line plugins/parsers/influx/machine.go:3686 +//line plugins/parsers/influx/machine.go:3849 switch ( m.data)[( m.p)] { - case 34: - goto st7 + case 10: + goto tr43 + case 11: + goto tr44 + case 13: + goto tr43 + case 32: + goto st11 + case 35: + goto tr6 + case 44: + goto tr43 + case 61: + goto tr33 case 92: - goto st7 + goto tr45 } - goto tr5 -tr17: -//line plugins/parsers/influx/machine.go.rl:18 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto st11 + } + goto tr41 +tr41: +//line plugins/parsers/influx/machine.go.rl:19 m.pb = m.p @@ -3701,568 +3879,465 @@ tr17: goto _test_eof12 } st_case_12: -//line plugins/parsers/influx/machine.go:3705 +//line plugins/parsers/influx/machine.go:3883 switch ( m.data)[( m.p)] { - case 46: - goto st13 - case 48: - goto st231 + case 10: + goto tr47 + case 11: + goto tr48 + case 13: + goto tr47 + case 32: + goto tr1 + case 44: + goto tr4 + case 61: + goto tr49 + case 92: + goto st29 } - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st234 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr1 } - goto tr5 -tr18: -//line plugins/parsers/influx/machine.go.rl:18 + goto st12 +tr48: + ( m.cs) = 13 +//line plugins/parsers/influx/machine.go.rl:77 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr51: + ( m.cs) = 13 +//line plugins/parsers/influx/machine.go.rl:77 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:19 m.pb = m.p - goto st13 + goto _again st13: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof13 } st_case_13: -//line plugins/parsers/influx/machine.go:3727 - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st229 - } - goto tr5 - st229: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof229 - } - st_case_229: +//line plugins/parsers/influx/machine.go:3939 switch ( m.data)[( m.p)] { case 10: - goto tr383 + goto tr47 + case 11: + goto tr51 case 13: - goto tr383 + goto tr47 case 32: - goto tr382 + goto tr1 case 44: - goto tr384 - case 69: - goto st14 - case 101: - goto st14 + goto tr4 + case 61: + goto tr49 + case 92: + goto tr45 } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st229 - } - case ( m.data)[( m.p)] >= 9: - goto tr382 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr1 } - goto tr101 + goto tr41 +tr4: + ( m.cs) = 14 +//line plugins/parsers/influx/machine.go.rl:77 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr62: + ( m.cs) = 14 +//line plugins/parsers/influx/machine.go.rl:90 + + err = m.handler.AddTag(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again st14: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof14 } st_case_14: +//line plugins/parsers/influx/machine.go:3991 switch ( m.data)[( m.p)] { - case 34: - goto st15 - case 43: - goto st15 - case 45: - goto st15 + case 32: + goto tr2 + case 44: + goto tr2 + case 61: + goto tr2 + case 92: + goto tr53 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st230 + switch { + case ( m.data)[( m.p)] > 10: + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr2 + } + case ( m.data)[( m.p)] >= 9: + goto tr2 } - goto tr5 + goto tr52 +tr52: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st15 st15: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof15 } st_case_15: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st230 - } - goto tr5 - st230: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof230 - } - st_case_230: +//line plugins/parsers/influx/machine.go:4022 switch ( m.data)[( m.p)] { - case 10: - goto tr383 - case 13: - goto tr383 case 32: - goto tr382 + goto tr2 case 44: - goto tr384 + goto tr2 + case 61: + goto tr55 + case 92: + goto st25 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st230 + case ( m.data)[( m.p)] > 10: + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr2 } case ( m.data)[( m.p)] >= 9: - goto tr382 + goto tr2 } - goto tr101 - st231: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof231 - } - st_case_231: - switch ( m.data)[( m.p)] { - case 10: - goto tr383 - case 13: - goto tr383 - case 32: - goto tr382 - case 44: - goto tr384 - case 46: - goto st229 - case 69: - goto st14 - case 101: - goto st14 - case 105: - goto st233 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st232 - } - case ( m.data)[( m.p)] >= 9: - goto tr382 - } - goto tr101 - st232: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof232 - } - st_case_232: - switch ( m.data)[( m.p)] { - case 10: - goto tr383 - case 13: - goto tr383 - case 32: - goto tr382 - case 44: - goto tr384 - case 46: - goto st229 - case 69: - goto st14 - case 101: - goto st14 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st232 - } - case ( m.data)[( m.p)] >= 9: - goto tr382 - } - goto tr101 - st233: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof233 - } - st_case_233: - switch ( m.data)[( m.p)] { - case 10: - goto tr389 - case 13: - goto tr389 - case 32: - goto tr388 - case 44: - goto tr390 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr388 - } - goto tr101 - st234: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof234 - } - st_case_234: - switch ( m.data)[( m.p)] { - case 10: - goto tr383 - case 13: - goto tr383 - case 32: - goto tr382 - case 44: - goto tr384 - case 46: - goto st229 - case 69: - goto st14 - case 101: - goto st14 - case 105: - goto st233 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st234 - } - case ( m.data)[( m.p)] >= 9: - goto tr382 - } - goto tr101 -tr19: -//line plugins/parsers/influx/machine.go.rl:18 + goto st15 +tr55: +//line plugins/parsers/influx/machine.go.rl:86 - m.pb = m.p + key = m.text() - goto st235 - st235: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof235 - } - st_case_235: -//line plugins/parsers/influx/machine.go:3934 - switch ( m.data)[( m.p)] { - case 10: - goto tr383 - case 13: - goto tr383 - case 32: - goto tr382 - case 44: - goto tr384 - case 46: - goto st229 - case 69: - goto st14 - case 101: - goto st14 - case 105: - goto st233 - case 117: - goto st236 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st232 - } - case ( m.data)[( m.p)] >= 9: - goto tr382 - } - goto tr101 - st236: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof236 - } - st_case_236: - switch ( m.data)[( m.p)] { - case 10: - goto tr393 - case 13: - goto tr393 - case 32: - goto tr392 - case 44: - goto tr394 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr392 - } - goto tr101 -tr20: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st237 - st237: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof237 - } - st_case_237: -//line plugins/parsers/influx/machine.go:3994 - switch ( m.data)[( m.p)] { - case 10: - goto tr383 - case 13: - goto tr383 - case 32: - goto tr382 - case 44: - goto tr384 - case 46: - goto st229 - case 69: - goto st14 - case 101: - goto st14 - case 105: - goto st233 - case 117: - goto st236 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st237 - } - case ( m.data)[( m.p)] >= 9: - goto tr382 - } - goto tr101 -tr21: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st238 - st238: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof238 - } - st_case_238: -//line plugins/parsers/influx/machine.go:4035 - switch ( m.data)[( m.p)] { - case 10: - goto tr397 - case 13: - goto tr397 - case 32: - goto tr396 - case 44: - goto tr398 - case 65: - goto st16 - case 97: - goto st19 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr396 - } - goto tr101 + goto st16 st16: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof16 } st_case_16: - if ( m.data)[( m.p)] == 76 { - goto st17 +//line plugins/parsers/influx/machine.go:4053 + switch ( m.data)[( m.p)] { + case 32: + goto tr2 + case 44: + goto tr2 + case 61: + goto tr2 + case 92: + goto tr58 } - goto tr5 + switch { + case ( m.data)[( m.p)] > 10: + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr2 + } + case ( m.data)[( m.p)] >= 9: + goto tr2 + } + goto tr57 +tr57: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st17 st17: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof17 } st_case_17: - if ( m.data)[( m.p)] == 83 { - goto st18 +//line plugins/parsers/influx/machine.go:4084 + switch ( m.data)[( m.p)] { + case 10: + goto tr2 + case 11: + goto tr61 + case 13: + goto tr2 + case 32: + goto tr60 + case 44: + goto tr62 + case 61: + goto tr2 + case 92: + goto st23 } - goto tr5 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr60 + } + goto st17 +tr61: + ( m.cs) = 18 +//line plugins/parsers/influx/machine.go.rl:90 + + err = m.handler.AddTag(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again st18: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof18 } st_case_18: - if ( m.data)[( m.p)] == 69 { - goto st239 - } - goto tr5 - st239: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof239 - } - st_case_239: +//line plugins/parsers/influx/machine.go:4123 switch ( m.data)[( m.p)] { case 10: - goto tr397 + goto tr47 + case 11: + goto tr65 case 13: - goto tr397 + goto tr47 case 32: - goto tr396 + goto tr60 case 44: - goto tr398 + goto tr62 + case 61: + goto tr47 + case 92: + goto tr66 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr396 + goto tr60 } - goto tr101 + goto tr64 +tr64: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st19 st19: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof19 } st_case_19: - if ( m.data)[( m.p)] == 108 { - goto st20 +//line plugins/parsers/influx/machine.go:4155 + switch ( m.data)[( m.p)] { + case 10: + goto tr47 + case 11: + goto tr68 + case 13: + goto tr47 + case 32: + goto tr60 + case 44: + goto tr62 + case 61: + goto tr12 + case 92: + goto st21 } - goto tr5 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr60 + } + goto st19 +tr68: + ( m.cs) = 20 +//line plugins/parsers/influx/machine.go.rl:90 + + err = m.handler.AddTag(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr65: + ( m.cs) = 20 +//line plugins/parsers/influx/machine.go.rl:90 + + err = m.handler.AddTag(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto _again st20: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof20 } st_case_20: - if ( m.data)[( m.p)] == 115 { - goto st21 +//line plugins/parsers/influx/machine.go:4211 + switch ( m.data)[( m.p)] { + case 10: + goto tr47 + case 11: + goto tr65 + case 13: + goto tr47 + case 32: + goto tr60 + case 44: + goto tr62 + case 61: + goto tr12 + case 92: + goto tr66 } - goto tr5 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr60 + } + goto tr64 +tr66: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st21 st21: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof21 } st_case_21: - if ( m.data)[( m.p)] == 101 { - goto st239 - } - goto tr5 -tr22: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st240 - st240: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof240 - } - st_case_240: -//line plugins/parsers/influx/machine.go:4138 - switch ( m.data)[( m.p)] { - case 10: - goto tr397 - case 13: - goto tr397 - case 32: - goto tr396 - case 44: - goto tr398 - case 82: +//line plugins/parsers/influx/machine.go:4243 + if ( m.data)[( m.p)] == 92 { goto st22 - case 114: - goto st23 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr396 + switch { + case ( m.data)[( m.p)] > 10: + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr47 + } + case ( m.data)[( m.p)] >= 9: + goto tr47 } - goto tr101 + goto st19 st22: +//line plugins/parsers/influx/machine.go.rl:234 + ( m.p)-- + if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof22 } st_case_22: - if ( m.data)[( m.p)] == 85 { - goto st18 +//line plugins/parsers/influx/machine.go:4264 + switch ( m.data)[( m.p)] { + case 10: + goto tr47 + case 11: + goto tr68 + case 13: + goto tr47 + case 32: + goto tr60 + case 44: + goto tr62 + case 61: + goto tr12 + case 92: + goto st21 } - goto tr5 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr60 + } + goto st19 +tr58: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st23 st23: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof23 } st_case_23: - if ( m.data)[( m.p)] == 117 { - goto st21 +//line plugins/parsers/influx/machine.go:4296 + if ( m.data)[( m.p)] == 92 { + goto st24 } - goto tr5 -tr23: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st241 - st241: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof241 + switch { + case ( m.data)[( m.p)] > 10: + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr2 + } + case ( m.data)[( m.p)] >= 9: + goto tr2 } - st_case_241: -//line plugins/parsers/influx/machine.go:4186 - switch ( m.data)[( m.p)] { - case 10: - goto tr397 - case 13: - goto tr397 - case 32: - goto tr396 - case 44: - goto tr398 - case 97: - goto st19 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr396 - } - goto tr101 -tr24: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st242 - st242: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof242 - } - st_case_242: -//line plugins/parsers/influx/machine.go:4214 - switch ( m.data)[( m.p)] { - case 10: - goto tr397 - case 13: - goto tr397 - case 32: - goto tr396 - case 44: - goto tr398 - case 114: - goto st23 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr396 - } - goto tr101 -tr11: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st24 + goto st17 st24: +//line plugins/parsers/influx/machine.go.rl:234 + ( m.p)-- + if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof24 } st_case_24: -//line plugins/parsers/influx/machine.go:4242 +//line plugins/parsers/influx/machine.go:4317 switch ( m.data)[( m.p)] { case 10: - goto tr5 + goto tr2 case 11: - goto tr11 + goto tr61 case 13: - goto tr5 + goto tr2 case 32: - goto st3 + goto tr60 case 44: - goto tr5 + goto tr62 case 61: - goto tr14 + goto tr2 case 92: - goto tr12 + goto st23 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st3 + goto tr60 } - goto tr9 -tr6: -//line plugins/parsers/influx/machine.go.rl:72 + goto st17 +tr53: +//line plugins/parsers/influx/machine.go.rl:19 - m.handler.SetMeasurement(m.text()) + m.pb = m.p goto st25 st25: @@ -4270,140 +4345,149 @@ tr6: goto _test_eof25 } st_case_25: -//line plugins/parsers/influx/machine.go:4274 - switch ( m.data)[( m.p)] { - case 10: - goto tr5 - case 11: - goto tr45 - case 13: - goto tr5 - case 32: - goto tr4 - case 44: - goto tr7 - case 61: - goto st2 - case 92: - goto tr46 +//line plugins/parsers/influx/machine.go:4349 + if ( m.data)[( m.p)] == 92 { + goto st26 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr4 + switch { + case ( m.data)[( m.p)] > 10: + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr2 + } + case ( m.data)[( m.p)] >= 9: + goto tr2 } - goto tr44 -tr44: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st26 + goto st15 st26: +//line plugins/parsers/influx/machine.go.rl:234 + ( m.p)-- + if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof26 } st_case_26: -//line plugins/parsers/influx/machine.go:4306 +//line plugins/parsers/influx/machine.go:4370 switch ( m.data)[( m.p)] { - case 10: - goto tr5 - case 11: - goto tr48 - case 13: - goto tr5 case 32: - goto tr4 + goto tr2 case 44: - goto tr7 + goto tr2 case 61: - goto tr49 + goto tr55 case 92: - goto st84 + goto st25 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr4 + switch { + case ( m.data)[( m.p)] > 10: + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr2 + } + case ( m.data)[( m.p)] >= 9: + goto tr2 } - goto st26 -tr48: -//line plugins/parsers/influx/machine.go.rl:72 + goto st15 +tr49: +//line plugins/parsers/influx/machine.go.rl:99 - m.handler.SetMeasurement(m.text()) + key = m.text() goto st27 -tr45: -//line plugins/parsers/influx/machine.go.rl:72 - - m.handler.SetMeasurement(m.text()) - -//line plugins/parsers/influx/machine.go.rl:18 +tr406: +//line plugins/parsers/influx/machine.go.rl:19 m.pb = m.p +//line plugins/parsers/influx/machine.go.rl:99 + + key = m.text() + goto st27 st27: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof27 } st_case_27: -//line plugins/parsers/influx/machine.go:4348 +//line plugins/parsers/influx/machine.go:4411 switch ( m.data)[( m.p)] { case 10: - goto tr5 + goto tr47 case 11: - goto tr45 + goto tr3 case 13: - goto tr5 + goto tr47 case 32: - goto tr4 + goto tr1 + case 34: + goto st30 case 44: - goto tr7 - case 61: - goto tr49 - case 92: - goto tr46 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { goto tr4 + case 45: + goto tr74 + case 46: + goto tr75 + case 48: + goto tr76 + case 70: + goto tr78 + case 84: + goto tr79 + case 92: + goto st96 + case 102: + goto tr80 + case 116: + goto tr81 } - goto tr44 -tr7: -//line plugins/parsers/influx/machine.go.rl:72 + switch { + case ( m.data)[( m.p)] > 12: + if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto tr77 + } + case ( m.data)[( m.p)] >= 9: + goto tr1 + } + goto st1 +tr3: + ( m.cs) = 28 +//line plugins/parsers/influx/machine.go.rl:77 - m.handler.SetMeasurement(m.text()) + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- - goto st28 -tr63: -//line plugins/parsers/influx/machine.go.rl:80 + ( m.cs) = 247; + {( m.p)++; goto _out } + } - m.handler.AddTag(key, m.text()) - - goto st28 + goto _again st28: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof28 } st_case_28: -//line plugins/parsers/influx/machine.go:4386 +//line plugins/parsers/influx/machine.go:4469 switch ( m.data)[( m.p)] { + case 10: + goto tr47 + case 11: + goto tr51 + case 13: + goto tr47 case 32: - goto tr52 + goto tr1 case 44: - goto tr52 + goto tr4 case 61: - goto tr52 + goto st1 case 92: - goto tr53 + goto tr45 } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr52 - } - case ( m.data)[( m.p)] >= 9: - goto tr52 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr1 } - goto tr51 -tr51: -//line plugins/parsers/influx/machine.go.rl:18 + goto tr41 +tr45: +//line plugins/parsers/influx/machine.go.rl:19 m.pb = m.p @@ -4413,59 +4497,44 @@ tr51: goto _test_eof29 } st_case_29: -//line plugins/parsers/influx/machine.go:4417 - switch ( m.data)[( m.p)] { - case 32: - goto tr52 - case 44: - goto tr52 - case 61: - goto tr55 - case 92: - goto st37 - } +//line plugins/parsers/influx/machine.go:4501 switch { case ( m.data)[( m.p)] > 10: if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr52 + goto tr8 } case ( m.data)[( m.p)] >= 9: - goto tr52 + goto tr8 } - goto st29 -tr55: -//line plugins/parsers/influx/machine.go.rl:76 - - key = m.text() - - goto st30 + goto st12 st30: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof30 } st_case_30: -//line plugins/parsers/influx/machine.go:4448 switch ( m.data)[( m.p)] { + case 9: + goto tr83 + case 10: + goto tr24 + case 11: + goto tr84 + case 12: + goto tr1 + case 13: + goto tr25 case 32: - goto tr52 + goto tr83 + case 34: + goto tr85 case 44: - goto tr52 - case 61: - goto tr52 + goto tr86 case 92: - goto tr58 + goto tr87 } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr52 - } - case ( m.data)[( m.p)] >= 9: - goto tr52 - } - goto tr57 -tr57: -//line plugins/parsers/influx/machine.go.rl:18 + goto tr82 +tr82: +//line plugins/parsers/influx/machine.go.rl:19 m.pb = m.p @@ -4475,61 +4544,102 @@ tr57: goto _test_eof31 } st_case_31: -//line plugins/parsers/influx/machine.go:4479 +//line plugins/parsers/influx/machine.go:4548 switch ( m.data)[( m.p)] { + case 9: + goto tr89 case 10: - goto tr61 + goto st7 case 11: - goto tr62 + goto tr90 + case 12: + goto tr1 case 13: - goto tr61 + goto st8 case 32: - goto tr60 + goto tr89 + case 34: + goto tr91 case 44: - goto tr63 - case 61: - goto tr61 + goto tr92 case 92: - goto st36 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr60 + goto st142 } goto st31 -tr62: -//line plugins/parsers/influx/machine.go.rl:80 +tr89: + ( m.cs) = 32 +//line plugins/parsers/influx/machine.go.rl:77 - m.handler.AddTag(key, m.text()) + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- - goto st32 + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr83: + ( m.cs) = 32 +//line plugins/parsers/influx/machine.go.rl:77 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto _again +tr231: + ( m.cs) = 32 +//line plugins/parsers/influx/machine.go.rl:90 + + err = m.handler.AddTag(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again st32: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof32 } st_case_32: -//line plugins/parsers/influx/machine.go:4511 +//line plugins/parsers/influx/machine.go:4618 switch ( m.data)[( m.p)] { + case 9: + goto st32 case 10: - goto tr61 + goto st7 case 11: - goto tr66 + goto tr96 + case 12: + goto st2 case 13: - goto tr61 + goto st8 case 32: - goto tr60 + goto st32 + case 34: + goto tr97 case 44: - goto tr63 + goto st6 case 61: - goto tr61 + goto st6 case 92: - goto tr67 + goto tr98 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr60 - } - goto tr65 -tr65: -//line plugins/parsers/influx/machine.go.rl:18 + goto tr94 +tr94: +//line plugins/parsers/influx/machine.go.rl:19 m.pb = m.p @@ -4539,71 +4649,210 @@ tr65: goto _test_eof33 } st_case_33: -//line plugins/parsers/influx/machine.go:4543 +//line plugins/parsers/influx/machine.go:4653 switch ( m.data)[( m.p)] { + case 9: + goto st6 case 10: - goto tr61 - case 11: - goto tr69 + goto st7 + case 12: + goto tr8 case 13: - goto tr61 + goto st8 case 32: - goto tr60 + goto st6 + case 34: + goto tr100 case 44: - goto tr63 + goto st6 case 61: - goto tr14 + goto tr101 case 92: - goto st35 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr60 + goto st77 } goto st33 -tr69: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - - goto st34 -tr66: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:18 +tr97: + ( m.cs) = 263 +//line plugins/parsers/influx/machine.go.rl:19 m.pb = m.p - goto st34 +//line plugins/parsers/influx/machine.go.rl:139 + + err = m.handler.AddString(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr100: + ( m.cs) = 263 +//line plugins/parsers/influx/machine.go.rl:139 + + err = m.handler.AddString(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr377: + ( m.cs) = 263 +//line plugins/parsers/influx/machine.go.rl:139 + + err = m.handler.AddString(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto _again + st263: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof263 + } + st_case_263: +//line plugins/parsers/influx/machine.go:4727 + switch ( m.data)[( m.p)] { + case 10: + goto st262 + case 11: + goto st264 + case 13: + goto st34 + case 32: + goto st261 + case 44: + goto st37 + case 61: + goto tr12 + case 92: + goto st36 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto st261 + } + goto st3 + st264: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof264 + } + st_case_264: + switch ( m.data)[( m.p)] { + case 10: + goto st262 + case 11: + goto st264 + case 13: + goto st34 + case 32: + goto st261 + case 44: + goto tr105 + case 45: + goto tr448 + case 61: + goto tr12 + case 92: + goto st36 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto tr449 + } + case ( m.data)[( m.p)] >= 9: + goto st261 + } + goto st3 +tr453: + ( m.cs) = 34 +//line plugins/parsers/influx/machine.go.rl:148 + + err = m.handler.SetTimestamp(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr717: + ( m.cs) = 34 +//line plugins/parsers/influx/machine.go.rl:121 + + err = m.handler.AddFloat(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr927: + ( m.cs) = 34 +//line plugins/parsers/influx/machine.go.rl:103 + + err = m.handler.AddInt(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr932: + ( m.cs) = 34 +//line plugins/parsers/influx/machine.go.rl:112 + + err = m.handler.AddUint(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr937: + ( m.cs) = 34 +//line plugins/parsers/influx/machine.go.rl:130 + + err = m.handler.AddBool(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again st34: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof34 } st_case_34: -//line plugins/parsers/influx/machine.go:4585 - switch ( m.data)[( m.p)] { - case 10: - goto tr61 - case 11: - goto tr66 - case 13: - goto tr61 - case 32: - goto tr60 - case 44: - goto tr63 - case 61: - goto tr14 - case 92: - goto tr67 +//line plugins/parsers/influx/machine.go:4850 + if ( m.data)[( m.p)] == 10 { + goto st262 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr60 - } - goto tr65 -tr67: -//line plugins/parsers/influx/machine.go.rl:18 + goto st0 +tr448: +//line plugins/parsers/influx/machine.go.rl:19 m.pb = m.p @@ -4613,18 +4862,139 @@ tr67: goto _test_eof35 } st_case_35: -//line plugins/parsers/influx/machine.go:4617 +//line plugins/parsers/influx/machine.go:4866 + switch ( m.data)[( m.p)] { + case 32: + goto tr105 + case 44: + goto tr105 + case 61: + goto tr12 + case 92: + goto st36 + } switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr61 + case ( m.data)[( m.p)] < 12: + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 10 { + goto tr105 + } + case ( m.data)[( m.p)] > 13: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st265 + } + default: + goto tr105 + } + goto st3 +tr449: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st265 + st265: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof265 + } + st_case_265: +//line plugins/parsers/influx/machine.go:4901 + switch ( m.data)[( m.p)] { + case 10: + goto tr451 + case 11: + goto tr452 + case 13: + goto tr453 + case 32: + goto tr450 + case 44: + goto tr105 + case 61: + goto tr12 + case 92: + goto st36 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st268 } case ( m.data)[( m.p)] >= 9: - goto tr61 + goto tr450 } - goto st33 -tr58: -//line plugins/parsers/influx/machine.go.rl:18 + goto st3 +tr450: + ( m.cs) = 266 +//line plugins/parsers/influx/machine.go.rl:148 + + err = m.handler.SetTimestamp(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again + st266: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof266 + } + st_case_266: +//line plugins/parsers/influx/machine.go:4945 + switch ( m.data)[( m.p)] { + case 10: + goto st262 + case 13: + goto st34 + case 32: + goto st266 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto st266 + } + goto st0 +tr452: + ( m.cs) = 267 +//line plugins/parsers/influx/machine.go.rl:148 + + err = m.handler.SetTimestamp(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again + st267: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof267 + } + st_case_267: +//line plugins/parsers/influx/machine.go:4976 + switch ( m.data)[( m.p)] { + case 10: + goto st262 + case 11: + goto st267 + case 13: + goto st34 + case 32: + goto st266 + case 44: + goto tr8 + case 61: + goto tr12 + case 92: + goto st36 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto st266 + } + goto st3 +tr10: +//line plugins/parsers/influx/machine.go.rl:19 m.pb = m.p @@ -4634,39 +5004,630 @@ tr58: goto _test_eof36 } st_case_36: -//line plugins/parsers/influx/machine.go:4638 +//line plugins/parsers/influx/machine.go:5008 switch { case ( m.data)[( m.p)] > 10: if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr52 + goto tr8 } case ( m.data)[( m.p)] >= 9: - goto tr52 + goto tr8 } - goto st31 -tr53: -//line plugins/parsers/influx/machine.go.rl:18 + goto st3 + st268: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof268 + } + st_case_268: + switch ( m.data)[( m.p)] { + case 10: + goto tr451 + case 11: + goto tr452 + case 13: + goto tr453 + case 32: + goto tr450 + case 44: + goto tr105 + case 61: + goto tr12 + case 92: + goto st36 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st269 + } + case ( m.data)[( m.p)] >= 9: + goto tr450 + } + goto st3 + st269: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof269 + } + st_case_269: + switch ( m.data)[( m.p)] { + case 10: + goto tr451 + case 11: + goto tr452 + case 13: + goto tr453 + case 32: + goto tr450 + case 44: + goto tr105 + case 61: + goto tr12 + case 92: + goto st36 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st270 + } + case ( m.data)[( m.p)] >= 9: + goto tr450 + } + goto st3 + st270: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof270 + } + st_case_270: + switch ( m.data)[( m.p)] { + case 10: + goto tr451 + case 11: + goto tr452 + case 13: + goto tr453 + case 32: + goto tr450 + case 44: + goto tr105 + case 61: + goto tr12 + case 92: + goto st36 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st271 + } + case ( m.data)[( m.p)] >= 9: + goto tr450 + } + goto st3 + st271: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof271 + } + st_case_271: + switch ( m.data)[( m.p)] { + case 10: + goto tr451 + case 11: + goto tr452 + case 13: + goto tr453 + case 32: + goto tr450 + case 44: + goto tr105 + case 61: + goto tr12 + case 92: + goto st36 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st272 + } + case ( m.data)[( m.p)] >= 9: + goto tr450 + } + goto st3 + st272: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof272 + } + st_case_272: + switch ( m.data)[( m.p)] { + case 10: + goto tr451 + case 11: + goto tr452 + case 13: + goto tr453 + case 32: + goto tr450 + case 44: + goto tr105 + case 61: + goto tr12 + case 92: + goto st36 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st273 + } + case ( m.data)[( m.p)] >= 9: + goto tr450 + } + goto st3 + st273: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof273 + } + st_case_273: + switch ( m.data)[( m.p)] { + case 10: + goto tr451 + case 11: + goto tr452 + case 13: + goto tr453 + case 32: + goto tr450 + case 44: + goto tr105 + case 61: + goto tr12 + case 92: + goto st36 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st274 + } + case ( m.data)[( m.p)] >= 9: + goto tr450 + } + goto st3 + st274: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof274 + } + st_case_274: + switch ( m.data)[( m.p)] { + case 10: + goto tr451 + case 11: + goto tr452 + case 13: + goto tr453 + case 32: + goto tr450 + case 44: + goto tr105 + case 61: + goto tr12 + case 92: + goto st36 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st275 + } + case ( m.data)[( m.p)] >= 9: + goto tr450 + } + goto st3 + st275: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof275 + } + st_case_275: + switch ( m.data)[( m.p)] { + case 10: + goto tr451 + case 11: + goto tr452 + case 13: + goto tr453 + case 32: + goto tr450 + case 44: + goto tr105 + case 61: + goto tr12 + case 92: + goto st36 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st276 + } + case ( m.data)[( m.p)] >= 9: + goto tr450 + } + goto st3 + st276: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof276 + } + st_case_276: + switch ( m.data)[( m.p)] { + case 10: + goto tr451 + case 11: + goto tr452 + case 13: + goto tr453 + case 32: + goto tr450 + case 44: + goto tr105 + case 61: + goto tr12 + case 92: + goto st36 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st277 + } + case ( m.data)[( m.p)] >= 9: + goto tr450 + } + goto st3 + st277: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof277 + } + st_case_277: + switch ( m.data)[( m.p)] { + case 10: + goto tr451 + case 11: + goto tr452 + case 13: + goto tr453 + case 32: + goto tr450 + case 44: + goto tr105 + case 61: + goto tr12 + case 92: + goto st36 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st278 + } + case ( m.data)[( m.p)] >= 9: + goto tr450 + } + goto st3 + st278: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof278 + } + st_case_278: + switch ( m.data)[( m.p)] { + case 10: + goto tr451 + case 11: + goto tr452 + case 13: + goto tr453 + case 32: + goto tr450 + case 44: + goto tr105 + case 61: + goto tr12 + case 92: + goto st36 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st279 + } + case ( m.data)[( m.p)] >= 9: + goto tr450 + } + goto st3 + st279: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof279 + } + st_case_279: + switch ( m.data)[( m.p)] { + case 10: + goto tr451 + case 11: + goto tr452 + case 13: + goto tr453 + case 32: + goto tr450 + case 44: + goto tr105 + case 61: + goto tr12 + case 92: + goto st36 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st280 + } + case ( m.data)[( m.p)] >= 9: + goto tr450 + } + goto st3 + st280: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof280 + } + st_case_280: + switch ( m.data)[( m.p)] { + case 10: + goto tr451 + case 11: + goto tr452 + case 13: + goto tr453 + case 32: + goto tr450 + case 44: + goto tr105 + case 61: + goto tr12 + case 92: + goto st36 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st281 + } + case ( m.data)[( m.p)] >= 9: + goto tr450 + } + goto st3 + st281: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof281 + } + st_case_281: + switch ( m.data)[( m.p)] { + case 10: + goto tr451 + case 11: + goto tr452 + case 13: + goto tr453 + case 32: + goto tr450 + case 44: + goto tr105 + case 61: + goto tr12 + case 92: + goto st36 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st282 + } + case ( m.data)[( m.p)] >= 9: + goto tr450 + } + goto st3 + st282: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof282 + } + st_case_282: + switch ( m.data)[( m.p)] { + case 10: + goto tr451 + case 11: + goto tr452 + case 13: + goto tr453 + case 32: + goto tr450 + case 44: + goto tr105 + case 61: + goto tr12 + case 92: + goto st36 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st283 + } + case ( m.data)[( m.p)] >= 9: + goto tr450 + } + goto st3 + st283: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof283 + } + st_case_283: + switch ( m.data)[( m.p)] { + case 10: + goto tr451 + case 11: + goto tr452 + case 13: + goto tr453 + case 32: + goto tr450 + case 44: + goto tr105 + case 61: + goto tr12 + case 92: + goto st36 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st284 + } + case ( m.data)[( m.p)] >= 9: + goto tr450 + } + goto st3 + st284: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof284 + } + st_case_284: + switch ( m.data)[( m.p)] { + case 10: + goto tr451 + case 11: + goto tr452 + case 13: + goto tr453 + case 32: + goto tr450 + case 44: + goto tr105 + case 61: + goto tr12 + case 92: + goto st36 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st285 + } + case ( m.data)[( m.p)] >= 9: + goto tr450 + } + goto st3 + st285: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof285 + } + st_case_285: + switch ( m.data)[( m.p)] { + case 10: + goto tr451 + case 11: + goto tr452 + case 13: + goto tr453 + case 32: + goto tr450 + case 44: + goto tr105 + case 61: + goto tr12 + case 92: + goto st36 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr450 + } + goto st3 +tr907: + ( m.cs) = 37 +//line plugins/parsers/influx/machine.go.rl:121 - m.pb = m.p + err = m.handler.AddFloat(key, m.text()) + if err != nil { + ( m.p)-- - goto st37 + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr1014: + ( m.cs) = 37 +//line plugins/parsers/influx/machine.go.rl:103 + + err = m.handler.AddInt(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr1016: + ( m.cs) = 37 +//line plugins/parsers/influx/machine.go.rl:112 + + err = m.handler.AddUint(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr1018: + ( m.cs) = 37 +//line plugins/parsers/influx/machine.go.rl:130 + + err = m.handler.AddBool(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again st37: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof37 } st_case_37: -//line plugins/parsers/influx/machine.go:4659 +//line plugins/parsers/influx/machine.go:5610 + switch ( m.data)[( m.p)] { + case 32: + goto tr8 + case 44: + goto tr8 + case 61: + goto tr8 + case 92: + goto tr10 + } switch { case ( m.data)[( m.p)] > 10: if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr52 + goto tr8 } case ( m.data)[( m.p)] >= 9: - goto tr52 + goto tr8 } - goto st29 -tr49: -//line plugins/parsers/influx/machine.go.rl:84 + goto tr6 +tr101: +//line plugins/parsers/influx/machine.go.rl:99 key = m.text() @@ -4676,46 +5637,276 @@ tr49: goto _test_eof38 } st_case_38: -//line plugins/parsers/influx/machine.go:4680 +//line plugins/parsers/influx/machine.go:5641 switch ( m.data)[( m.p)] { case 10: - goto tr5 - case 11: - goto tr6 + goto st7 + case 12: + goto tr8 case 13: - goto tr5 - case 32: - goto tr4 + goto st8 case 34: - goto st39 - case 44: - goto tr7 + goto tr107 case 45: - goto tr72 + goto tr108 case 46: - goto tr73 + goto tr109 case 48: - goto tr74 + goto tr110 case 70: - goto tr76 + goto tr112 case 84: - goto tr77 + goto tr113 case 92: - goto st133 + goto st76 case 102: - goto tr78 + goto tr114 case 116: - goto tr79 + goto tr115 + } + if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto tr111 + } + goto st6 +tr107: + ( m.cs) = 286 +//line plugins/parsers/influx/machine.go.rl:139 + + err = m.handler.AddString(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again + st286: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof286 + } + st_case_286: +//line plugins/parsers/influx/machine.go:5690 + switch ( m.data)[( m.p)] { + case 10: + goto tr475 + case 12: + goto st261 + case 13: + goto tr476 + case 32: + goto tr474 + case 34: + goto tr26 + case 44: + goto tr477 + case 92: + goto tr27 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { + goto tr474 + } + goto tr23 +tr474: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st287 +tr961: + ( m.cs) = 287 +//line plugins/parsers/influx/machine.go.rl:121 + + err = m.handler.AddFloat(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr966: + ( m.cs) = 287 +//line plugins/parsers/influx/machine.go.rl:103 + + err = m.handler.AddInt(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr969: + ( m.cs) = 287 +//line plugins/parsers/influx/machine.go.rl:112 + + err = m.handler.AddUint(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr972: + ( m.cs) = 287 +//line plugins/parsers/influx/machine.go.rl:130 + + err = m.handler.AddBool(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again + st287: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof287 + } + st_case_287: +//line plugins/parsers/influx/machine.go:5774 + switch ( m.data)[( m.p)] { + case 10: + goto st288 + case 12: + goto st261 + case 13: + goto st74 + case 32: + goto st287 + case 34: + goto tr31 + case 45: + goto tr480 + case 92: + goto st76 } switch { - case ( m.data)[( m.p)] > 12: - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr75 + case ( m.data)[( m.p)] > 11: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto tr481 } case ( m.data)[( m.p)] >= 9: - goto tr4 + goto st287 } - goto st2 + goto st6 +tr475: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st288 +tr584: + ( m.cs) = 288 +//line plugins/parsers/influx/machine.go.rl:148 + + err = m.handler.SetTimestamp(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr620: + ( m.cs) = 288 +//line plugins/parsers/influx/machine.go.rl:121 + + err = m.handler.AddFloat(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr778: + ( m.cs) = 288 +//line plugins/parsers/influx/machine.go.rl:103 + + err = m.handler.AddInt(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr784: + ( m.cs) = 288 +//line plugins/parsers/influx/machine.go.rl:112 + + err = m.handler.AddUint(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr790: + ( m.cs) = 288 +//line plugins/parsers/influx/machine.go.rl:130 + + err = m.handler.AddBool(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again + st288: +//line plugins/parsers/influx/machine.go.rl:157 + + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line + +//line plugins/parsers/influx/machine.go.rl:163 + + ( m.cs) = 715; + {( m.p)++; goto _out } + + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof288 + } + st_case_288: +//line plugins/parsers/influx/machine.go:5887 + switch ( m.data)[( m.p)] { + case 9: + goto st39 + case 10: + goto st7 + case 11: + goto tr117 + case 12: + goto st9 + case 13: + goto st8 + case 32: + goto st39 + case 34: + goto tr118 + case 35: + goto st6 + case 44: + goto st6 + case 92: + goto tr87 + } + goto tr82 st39: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof39 @@ -4723,26 +5914,29 @@ tr49: st_case_39: switch ( m.data)[( m.p)] { case 9: - goto tr81 + goto st39 + case 10: + goto st7 case 11: - goto tr82 + goto tr117 case 12: - goto tr4 + goto st9 + case 13: + goto st8 case 32: - goto tr81 + goto st39 case 34: - goto tr83 + goto tr118 + case 35: + goto st6 case 44: - goto tr84 + goto st6 case 92: - goto tr85 + goto tr87 } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 - } - goto tr80 -tr80: -//line plugins/parsers/influx/machine.go.rl:18 + goto tr82 +tr117: +//line plugins/parsers/influx/machine.go.rl:19 m.pb = m.p @@ -4752,79 +5946,76 @@ tr80: goto _test_eof40 } st_case_40: -//line plugins/parsers/influx/machine.go:4756 +//line plugins/parsers/influx/machine.go:5950 switch ( m.data)[( m.p)] { case 9: - goto tr87 + goto tr119 + case 10: + goto st7 case 11: - goto tr88 + goto tr120 case 12: - goto tr4 + goto tr38 + case 13: + goto st8 case 32: - goto tr87 + goto tr119 case 34: - goto tr89 + goto tr85 + case 35: + goto st31 case 44: - goto tr90 + goto tr92 case 92: - goto st170 + goto tr87 } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 - } - goto st40 -tr87: -//line plugins/parsers/influx/machine.go.rl:72 + goto tr82 +tr119: + ( m.cs) = 41 +//line plugins/parsers/influx/machine.go.rl:77 - m.handler.SetMeasurement(m.text()) + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- - goto st41 -tr81: -//line plugins/parsers/influx/machine.go.rl:72 + ( m.cs) = 247; + {( m.p)++; goto _out } + } - m.handler.SetMeasurement(m.text()) - -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st41 -tr237: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - - goto st41 + goto _again st41: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof41 } st_case_41: -//line plugins/parsers/influx/machine.go:4804 +//line plugins/parsers/influx/machine.go:5992 switch ( m.data)[( m.p)] { case 9: goto st41 + case 10: + goto st7 case 11: - goto tr94 + goto tr123 case 12: - goto st3 + goto st11 + case 13: + goto st8 case 32: goto st41 case 34: - goto tr95 + goto tr124 + case 35: + goto tr94 case 44: - goto st7 + goto st6 case 61: - goto st7 + goto tr82 case 92: - goto tr96 + goto tr125 } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 - } - goto tr92 -tr92: -//line plugins/parsers/influx/machine.go.rl:18 + goto tr121 +tr121: +//line plugins/parsers/influx/machine.go.rl:19 m.pb = m.p @@ -4834,754 +6025,494 @@ tr92: goto _test_eof42 } st_case_42: -//line plugins/parsers/influx/machine.go:4838 +//line plugins/parsers/influx/machine.go:6029 switch ( m.data)[( m.p)] { case 9: - goto st7 + goto tr89 case 10: - goto tr5 + goto st7 + case 11: + goto tr127 + case 12: + goto tr1 + case 13: + goto st8 case 32: - goto st7 + goto tr89 case 34: - goto tr98 + goto tr128 case 44: - goto st7 + goto tr92 case 61: - goto tr99 + goto tr129 case 92: - goto st78 - } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 + goto st94 } goto st42 -tr95: -//line plugins/parsers/influx/machine.go.rl:18 +tr127: + ( m.cs) = 43 +//line plugins/parsers/influx/machine.go.rl:77 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr131: + ( m.cs) = 43 +//line plugins/parsers/influx/machine.go.rl:77 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:19 m.pb = m.p -//line plugins/parsers/influx/machine.go.rl:104 - - m.handler.AddString(key, m.text()) - - goto st243 -tr98: -//line plugins/parsers/influx/machine.go.rl:104 - - m.handler.AddString(key, m.text()) - - goto st243 -tr114: -//line plugins/parsers/influx/machine.go.rl:104 - - m.handler.AddString(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st243 - st243: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof243 - } - st_case_243: -//line plugins/parsers/influx/machine.go:4890 - switch ( m.data)[( m.p)] { - case 10: - goto tr357 - case 11: - goto st244 - case 13: - goto tr357 - case 32: - goto st207 - case 44: - goto st9 - case 61: - goto tr14 - case 92: - goto st10 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st207 - } - goto st4 - st244: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof244 - } - st_case_244: - switch ( m.data)[( m.p)] { - case 10: - goto tr357 - case 11: - goto st244 - case 13: - goto tr357 - case 32: - goto st207 - case 44: - goto tr101 - case 45: - goto tr404 - case 61: - goto tr14 - case 92: - goto st10 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr405 - } - case ( m.data)[( m.p)] >= 9: - goto st207 - } - goto st4 -tr404: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st43 + goto _again st43: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof43 } st_case_43: -//line plugins/parsers/influx/machine.go:4954 +//line plugins/parsers/influx/machine.go:6088 switch ( m.data)[( m.p)] { + case 9: + goto tr89 + case 10: + goto st7 + case 11: + goto tr131 + case 12: + goto tr1 + case 13: + goto st8 case 32: - goto tr101 + goto tr89 + case 34: + goto tr124 case 44: - goto tr101 + goto tr92 case 61: - goto tr14 + goto tr129 case 92: - goto st10 + goto tr125 } - switch { - case ( m.data)[( m.p)] < 12: - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 10 { - goto tr101 - } - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st245 - } - default: - goto tr101 - } - goto st4 -tr405: -//line plugins/parsers/influx/machine.go.rl:18 + goto tr121 +tr124: + ( m.cs) = 289 +//line plugins/parsers/influx/machine.go.rl:19 m.pb = m.p - goto st245 - st245: +//line plugins/parsers/influx/machine.go.rl:139 + + err = m.handler.AddString(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr128: + ( m.cs) = 289 +//line plugins/parsers/influx/machine.go.rl:139 + + err = m.handler.AddString(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again + st289: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof245 + goto _test_eof289 } - st_case_245: -//line plugins/parsers/influx/machine.go:4989 + st_case_289: +//line plugins/parsers/influx/machine.go:6147 switch ( m.data)[( m.p)] { case 10: - goto tr362 + goto st262 case 11: - goto tr406 + goto tr483 case 13: - goto tr362 + goto st34 case 32: - goto tr361 + goto tr482 case 44: - goto tr101 + goto tr484 case 61: - goto tr14 + goto tr49 case 92: - goto st10 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st247 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 - } - goto st4 -tr406: -//line plugins/parsers/influx/machine.go.rl:108 - - m.handler.SetTimestamp(m.text()) - - goto st246 - st246: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof246 - } - st_case_246: -//line plugins/parsers/influx/machine.go:5026 - switch ( m.data)[( m.p)] { - case 10: - goto tr357 - case 11: - goto st246 - case 13: - goto tr357 - case 32: - goto st210 - case 44: - goto tr5 - case 61: - goto tr14 - case 92: - goto st10 + goto st29 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st210 + goto tr482 } - goto st4 - st247: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof247 - } - st_case_247: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr406 - case 13: - goto tr362 - case 32: - goto tr361 - case 44: - goto tr101 - case 61: - goto tr14 - case 92: - goto st10 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st248 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 - } - goto st4 - st248: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof248 - } - st_case_248: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr406 - case 13: - goto tr362 - case 32: - goto tr361 - case 44: - goto tr101 - case 61: - goto tr14 - case 92: - goto st10 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st249 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 - } - goto st4 - st249: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof249 - } - st_case_249: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr406 - case 13: - goto tr362 - case 32: - goto tr361 - case 44: - goto tr101 - case 61: - goto tr14 - case 92: - goto st10 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st250 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 - } - goto st4 - st250: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof250 - } - st_case_250: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr406 - case 13: - goto tr362 - case 32: - goto tr361 - case 44: - goto tr101 - case 61: - goto tr14 - case 92: - goto st10 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st251 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 - } - goto st4 - st251: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof251 - } - st_case_251: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr406 - case 13: - goto tr362 - case 32: - goto tr361 - case 44: - goto tr101 - case 61: - goto tr14 - case 92: - goto st10 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st252 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 - } - goto st4 - st252: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof252 - } - st_case_252: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr406 - case 13: - goto tr362 - case 32: - goto tr361 - case 44: - goto tr101 - case 61: - goto tr14 - case 92: - goto st10 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st253 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 - } - goto st4 - st253: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof253 - } - st_case_253: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr406 - case 13: - goto tr362 - case 32: - goto tr361 - case 44: - goto tr101 - case 61: - goto tr14 - case 92: - goto st10 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st254 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 - } - goto st4 - st254: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof254 - } - st_case_254: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr406 - case 13: - goto tr362 - case 32: - goto tr361 - case 44: - goto tr101 - case 61: - goto tr14 - case 92: - goto st10 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st255 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 - } - goto st4 - st255: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof255 - } - st_case_255: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr406 - case 13: - goto tr362 - case 32: - goto tr361 - case 44: - goto tr101 - case 61: - goto tr14 - case 92: - goto st10 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st256 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 - } - goto st4 - st256: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof256 - } - st_case_256: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr406 - case 13: - goto tr362 - case 32: - goto tr361 - case 44: - goto tr101 - case 61: - goto tr14 - case 92: - goto st10 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st257 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 - } - goto st4 - st257: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof257 - } - st_case_257: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr406 - case 13: - goto tr362 - case 32: - goto tr361 - case 44: - goto tr101 - case 61: - goto tr14 - case 92: - goto st10 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st258 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 - } - goto st4 - st258: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof258 - } - st_case_258: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr406 - case 13: - goto tr362 - case 32: - goto tr361 - case 44: - goto tr101 - case 61: - goto tr14 - case 92: - goto st10 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st259 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 - } - goto st4 - st259: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof259 - } - st_case_259: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr406 - case 13: - goto tr362 - case 32: - goto tr361 - case 44: - goto tr101 - case 61: - goto tr14 - case 92: - goto st10 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st260 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 - } - goto st4 - st260: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof260 - } - st_case_260: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr406 - case 13: - goto tr362 - case 32: - goto tr361 - case 44: - goto tr101 - case 61: - goto tr14 - case 92: - goto st10 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st261 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 - } - goto st4 - st261: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof261 - } - st_case_261: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr406 - case 13: - goto tr362 - case 32: - goto tr361 - case 44: - goto tr101 - case 61: - goto tr14 - case 92: - goto st10 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st262 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 - } - goto st4 - st262: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof262 - } - st_case_262: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr406 - case 13: - goto tr362 - case 32: - goto tr361 - case 44: - goto tr101 - case 61: - goto tr14 - case 92: - goto st10 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st263 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 - } - goto st4 - st263: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof263 - } - st_case_263: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr406 - case 13: - goto tr362 - case 32: - goto tr361 - case 44: - goto tr101 - case 61: - goto tr14 - case 92: - goto st10 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st264 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 - } - goto st4 - st264: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof264 - } - st_case_264: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr406 - case 13: - goto tr362 - case 32: - goto tr361 - case 44: - goto tr101 - case 61: - goto tr14 - case 92: - goto st10 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr361 - } - goto st4 -tr99: -//line plugins/parsers/influx/machine.go.rl:84 + goto st12 +tr482: + ( m.cs) = 290 +//line plugins/parsers/influx/machine.go.rl:77 - key = m.text() + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr547: + ( m.cs) = 290 +//line plugins/parsers/influx/machine.go.rl:90 + + err = m.handler.AddTag(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr622: + ( m.cs) = 290 +//line plugins/parsers/influx/machine.go.rl:77 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:121 + + err = m.handler.AddFloat(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr712: + ( m.cs) = 290 +//line plugins/parsers/influx/machine.go.rl:90 + + err = m.handler.AddTag(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:121 + + err = m.handler.AddFloat(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr724: + ( m.cs) = 290 +//line plugins/parsers/influx/machine.go.rl:90 + + err = m.handler.AddTag(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:103 + + err = m.handler.AddInt(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr731: + ( m.cs) = 290 +//line plugins/parsers/influx/machine.go.rl:90 + + err = m.handler.AddTag(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:112 + + err = m.handler.AddUint(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr738: + ( m.cs) = 290 +//line plugins/parsers/influx/machine.go.rl:90 + + err = m.handler.AddTag(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:130 + + err = m.handler.AddBool(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr804: + ( m.cs) = 290 +//line plugins/parsers/influx/machine.go.rl:77 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:103 + + err = m.handler.AddInt(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr809: + ( m.cs) = 290 +//line plugins/parsers/influx/machine.go.rl:77 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:112 + + err = m.handler.AddUint(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr814: + ( m.cs) = 290 +//line plugins/parsers/influx/machine.go.rl:77 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:130 + + err = m.handler.AddBool(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again + st290: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof290 + } + st_case_290: +//line plugins/parsers/influx/machine.go:6383 + switch ( m.data)[( m.p)] { + case 10: + goto st262 + case 11: + goto tr486 + case 13: + goto st34 + case 32: + goto st290 + case 44: + goto tr105 + case 45: + goto tr448 + case 61: + goto tr105 + case 92: + goto tr10 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto tr449 + } + case ( m.data)[( m.p)] >= 9: + goto st290 + } + goto tr6 +tr486: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st291 + st291: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof291 + } + st_case_291: +//line plugins/parsers/influx/machine.go:6422 + switch ( m.data)[( m.p)] { + case 10: + goto st262 + case 11: + goto tr486 + case 13: + goto st34 + case 32: + goto st290 + case 44: + goto tr105 + case 45: + goto tr448 + case 61: + goto tr12 + case 92: + goto tr10 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto tr449 + } + case ( m.data)[( m.p)] >= 9: + goto st290 + } + goto tr6 +tr483: + ( m.cs) = 292 +//line plugins/parsers/influx/machine.go.rl:77 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr487: + ( m.cs) = 292 +//line plugins/parsers/influx/machine.go.rl:77 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto _again + st292: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof292 + } + st_case_292: +//line plugins/parsers/influx/machine.go:6485 + switch ( m.data)[( m.p)] { + case 10: + goto st262 + case 11: + goto tr487 + case 13: + goto st34 + case 32: + goto tr482 + case 44: + goto tr4 + case 45: + goto tr488 + case 61: + goto tr49 + case 92: + goto tr45 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto tr489 + } + case ( m.data)[( m.p)] >= 9: + goto tr482 + } + goto tr41 +tr488: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p goto st44 st44: @@ -5589,1362 +6520,169 @@ tr99: goto _test_eof44 } st_case_44: -//line plugins/parsers/influx/machine.go:5593 +//line plugins/parsers/influx/machine.go:6524 switch ( m.data)[( m.p)] { case 10: - goto tr5 - case 34: - goto tr103 - case 45: - goto tr104 - case 46: - goto tr105 - case 48: - goto tr106 - case 70: - goto tr108 - case 84: - goto tr109 - case 92: - goto st11 - case 102: - goto tr110 - case 116: - goto tr111 - } - switch { - case ( m.data)[( m.p)] > 13: - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr107 - } - case ( m.data)[( m.p)] >= 12: - goto tr5 - } - goto st7 -tr103: -//line plugins/parsers/influx/machine.go.rl:104 - - m.handler.AddString(key, m.text()) - - goto st265 - st265: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof265 - } - st_case_265: -//line plugins/parsers/influx/machine.go:5636 - switch ( m.data)[( m.p)] { - case 10: - goto tr357 - case 12: - goto st207 - case 13: - goto tr357 - case 32: - goto tr426 - case 34: - goto tr26 - case 44: - goto tr427 - case 92: - goto tr27 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { - goto tr426 - } - goto tr25 -tr426: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st266 -tr452: -//line plugins/parsers/influx/machine.go.rl:96 - - m.handler.AddFloat(key, m.text()) - - goto st266 -tr457: -//line plugins/parsers/influx/machine.go.rl:88 - - m.handler.AddInt(key, m.text()) - - goto st266 -tr460: -//line plugins/parsers/influx/machine.go.rl:92 - - m.handler.AddUint(key, m.text()) - - goto st266 -tr463: -//line plugins/parsers/influx/machine.go.rl:100 - - m.handler.AddBool(key, m.text()) - - goto st266 - st266: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof266 - } - st_case_266: -//line plugins/parsers/influx/machine.go:5692 - switch ( m.data)[( m.p)] { - case 10: - goto tr357 - case 12: - goto st207 - case 13: - goto tr357 - case 32: - goto st266 - case 34: - goto tr29 - case 45: - goto tr429 - case 92: - goto st11 - } - switch { - case ( m.data)[( m.p)] > 11: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr430 - } - case ( m.data)[( m.p)] >= 9: - goto st266 - } - goto st7 -tr429: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st45 - st45: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof45 - } - st_case_45: -//line plugins/parsers/influx/machine.go:5729 - switch ( m.data)[( m.p)] { - case 10: - goto tr101 - case 34: - goto tr29 - case 92: - goto st11 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st267 - } - case ( m.data)[( m.p)] >= 12: - goto tr101 - } - goto st7 -tr430: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st267 - st267: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof267 - } - st_case_267: -//line plugins/parsers/influx/machine.go:5758 - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 12: - goto tr361 - case 13: - goto tr362 - case 32: - goto tr431 - case 34: - goto tr29 - case 92: - goto st11 - } - switch { - case ( m.data)[( m.p)] > 11: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st269 - } - case ( m.data)[( m.p)] >= 9: - goto tr431 - } - goto st7 -tr431: -//line plugins/parsers/influx/machine.go.rl:108 - - m.handler.SetTimestamp(m.text()) - - goto st268 - st268: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof268 - } - st_case_268: -//line plugins/parsers/influx/machine.go:5793 - switch ( m.data)[( m.p)] { - case 10: - goto tr357 - case 12: - goto st210 - case 13: - goto tr357 - case 32: - goto st268 - case 34: - goto tr29 - case 92: - goto st11 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { - goto st268 - } - goto st7 - st269: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof269 - } - st_case_269: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 12: - goto tr361 - case 13: - goto tr362 - case 32: - goto tr431 - case 34: - goto tr29 - case 92: - goto st11 - } - switch { - case ( m.data)[( m.p)] > 11: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st270 - } - case ( m.data)[( m.p)] >= 9: - goto tr431 - } - goto st7 - st270: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof270 - } - st_case_270: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 12: - goto tr361 - case 13: - goto tr362 - case 32: - goto tr431 - case 34: - goto tr29 - case 92: - goto st11 - } - switch { - case ( m.data)[( m.p)] > 11: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st271 - } - case ( m.data)[( m.p)] >= 9: - goto tr431 - } - goto st7 - st271: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof271 - } - st_case_271: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 12: - goto tr361 - case 13: - goto tr362 - case 32: - goto tr431 - case 34: - goto tr29 - case 92: - goto st11 - } - switch { - case ( m.data)[( m.p)] > 11: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st272 - } - case ( m.data)[( m.p)] >= 9: - goto tr431 - } - goto st7 - st272: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof272 - } - st_case_272: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 12: - goto tr361 - case 13: - goto tr362 - case 32: - goto tr431 - case 34: - goto tr29 - case 92: - goto st11 - } - switch { - case ( m.data)[( m.p)] > 11: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st273 - } - case ( m.data)[( m.p)] >= 9: - goto tr431 - } - goto st7 - st273: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof273 - } - st_case_273: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 12: - goto tr361 - case 13: - goto tr362 - case 32: - goto tr431 - case 34: - goto tr29 - case 92: - goto st11 - } - switch { - case ( m.data)[( m.p)] > 11: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st274 - } - case ( m.data)[( m.p)] >= 9: - goto tr431 - } - goto st7 - st274: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof274 - } - st_case_274: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 12: - goto tr361 - case 13: - goto tr362 - case 32: - goto tr431 - case 34: - goto tr29 - case 92: - goto st11 - } - switch { - case ( m.data)[( m.p)] > 11: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st275 - } - case ( m.data)[( m.p)] >= 9: - goto tr431 - } - goto st7 - st275: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof275 - } - st_case_275: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 12: - goto tr361 - case 13: - goto tr362 - case 32: - goto tr431 - case 34: - goto tr29 - case 92: - goto st11 - } - switch { - case ( m.data)[( m.p)] > 11: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st276 - } - case ( m.data)[( m.p)] >= 9: - goto tr431 - } - goto st7 - st276: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof276 - } - st_case_276: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 12: - goto tr361 - case 13: - goto tr362 - case 32: - goto tr431 - case 34: - goto tr29 - case 92: - goto st11 - } - switch { - case ( m.data)[( m.p)] > 11: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st277 - } - case ( m.data)[( m.p)] >= 9: - goto tr431 - } - goto st7 - st277: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof277 - } - st_case_277: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 12: - goto tr361 - case 13: - goto tr362 - case 32: - goto tr431 - case 34: - goto tr29 - case 92: - goto st11 - } - switch { - case ( m.data)[( m.p)] > 11: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st278 - } - case ( m.data)[( m.p)] >= 9: - goto tr431 - } - goto st7 - st278: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof278 - } - st_case_278: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 12: - goto tr361 - case 13: - goto tr362 - case 32: - goto tr431 - case 34: - goto tr29 - case 92: - goto st11 - } - switch { - case ( m.data)[( m.p)] > 11: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st279 - } - case ( m.data)[( m.p)] >= 9: - goto tr431 - } - goto st7 - st279: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof279 - } - st_case_279: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 12: - goto tr361 - case 13: - goto tr362 - case 32: - goto tr431 - case 34: - goto tr29 - case 92: - goto st11 - } - switch { - case ( m.data)[( m.p)] > 11: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st280 - } - case ( m.data)[( m.p)] >= 9: - goto tr431 - } - goto st7 - st280: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof280 - } - st_case_280: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 12: - goto tr361 - case 13: - goto tr362 - case 32: - goto tr431 - case 34: - goto tr29 - case 92: - goto st11 - } - switch { - case ( m.data)[( m.p)] > 11: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st281 - } - case ( m.data)[( m.p)] >= 9: - goto tr431 - } - goto st7 - st281: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof281 - } - st_case_281: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 12: - goto tr361 - case 13: - goto tr362 - case 32: - goto tr431 - case 34: - goto tr29 - case 92: - goto st11 - } - switch { - case ( m.data)[( m.p)] > 11: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st282 - } - case ( m.data)[( m.p)] >= 9: - goto tr431 - } - goto st7 - st282: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof282 - } - st_case_282: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 12: - goto tr361 - case 13: - goto tr362 - case 32: - goto tr431 - case 34: - goto tr29 - case 92: - goto st11 - } - switch { - case ( m.data)[( m.p)] > 11: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st283 - } - case ( m.data)[( m.p)] >= 9: - goto tr431 - } - goto st7 - st283: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof283 - } - st_case_283: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 12: - goto tr361 - case 13: - goto tr362 - case 32: - goto tr431 - case 34: - goto tr29 - case 92: - goto st11 - } - switch { - case ( m.data)[( m.p)] > 11: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st284 - } - case ( m.data)[( m.p)] >= 9: - goto tr431 - } - goto st7 - st284: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof284 - } - st_case_284: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 12: - goto tr361 - case 13: - goto tr362 - case 32: - goto tr431 - case 34: - goto tr29 - case 92: - goto st11 - } - switch { - case ( m.data)[( m.p)] > 11: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st285 - } - case ( m.data)[( m.p)] >= 9: - goto tr431 - } - goto st7 - st285: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof285 - } - st_case_285: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 12: - goto tr361 - case 13: - goto tr362 - case 32: - goto tr431 - case 34: - goto tr29 - case 92: - goto st11 - } - switch { - case ( m.data)[( m.p)] > 11: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st286 - } - case ( m.data)[( m.p)] >= 9: - goto tr431 - } - goto st7 - st286: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof286 - } - st_case_286: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 12: - goto tr361 - case 13: - goto tr362 - case 32: - goto tr431 - case 34: - goto tr29 - case 92: - goto st11 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { - goto tr431 - } - goto st7 -tr427: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st46 -tr469: -//line plugins/parsers/influx/machine.go.rl:96 - - m.handler.AddFloat(key, m.text()) - - goto st46 -tr473: -//line plugins/parsers/influx/machine.go.rl:88 - - m.handler.AddInt(key, m.text()) - - goto st46 -tr475: -//line plugins/parsers/influx/machine.go.rl:92 - - m.handler.AddUint(key, m.text()) - - goto st46 -tr477: -//line plugins/parsers/influx/machine.go.rl:100 - - m.handler.AddBool(key, m.text()) - - goto st46 - st46: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof46 - } - st_case_46: -//line plugins/parsers/influx/machine.go:6346 - switch ( m.data)[( m.p)] { - case 9: - goto st7 - case 10: - goto tr5 - case 32: - goto st7 - case 34: - goto tr114 - case 44: - goto st7 - case 61: - goto st7 - case 92: - goto tr115 - } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 - } - goto tr113 -tr113: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st47 - st47: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof47 - } - st_case_47: -//line plugins/parsers/influx/machine.go:6378 - switch ( m.data)[( m.p)] { - case 9: - goto st7 - case 10: - goto tr5 - case 32: - goto st7 - case 34: - goto tr98 - case 44: - goto st7 - case 61: - goto tr117 - case 92: - goto st77 - } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 - } - goto st47 -tr117: -//line plugins/parsers/influx/machine.go.rl:84 - - key = m.text() - - goto st48 - st48: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof48 - } - st_case_48: -//line plugins/parsers/influx/machine.go:6410 - switch ( m.data)[( m.p)] { - case 10: - goto tr5 - case 34: - goto tr119 - case 45: - goto tr104 - case 46: - goto tr105 - case 48: - goto tr106 - case 70: - goto tr108 - case 84: - goto tr109 - case 92: - goto st11 - case 102: - goto tr110 - case 116: - goto tr111 - } - switch { - case ( m.data)[( m.p)] > 13: - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr107 - } - case ( m.data)[( m.p)] >= 12: - goto tr5 - } - goto st7 -tr119: -//line plugins/parsers/influx/machine.go.rl:104 - - m.handler.AddString(key, m.text()) - - goto st287 - st287: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof287 - } - st_case_287: -//line plugins/parsers/influx/machine.go:6453 - switch ( m.data)[( m.p)] { - case 10: - goto tr357 - case 12: - goto st207 - case 13: - goto tr357 - case 32: - goto tr426 - case 34: - goto tr26 - case 44: - goto tr451 - case 92: - goto tr27 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { - goto tr426 - } - goto tr25 -tr451: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st49 -tr453: -//line plugins/parsers/influx/machine.go.rl:96 - - m.handler.AddFloat(key, m.text()) - - goto st49 -tr458: -//line plugins/parsers/influx/machine.go.rl:88 - - m.handler.AddInt(key, m.text()) - - goto st49 -tr461: -//line plugins/parsers/influx/machine.go.rl:92 - - m.handler.AddUint(key, m.text()) - - goto st49 -tr464: -//line plugins/parsers/influx/machine.go.rl:100 - - m.handler.AddBool(key, m.text()) - - goto st49 - st49: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof49 - } - st_case_49: -//line plugins/parsers/influx/machine.go:6509 - switch ( m.data)[( m.p)] { - case 9: - goto st7 - case 10: - goto tr5 - case 32: - goto st7 - case 34: - goto tr95 - case 44: - goto st7 - case 61: - goto st7 - case 92: - goto tr121 - } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 - } - goto tr120 -tr120: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st50 - st50: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof50 - } - st_case_50: -//line plugins/parsers/influx/machine.go:6541 - switch ( m.data)[( m.p)] { - case 9: - goto st7 - case 10: - goto tr5 - case 32: - goto st7 - case 34: - goto tr98 - case 44: - goto st7 - case 61: - goto tr123 - case 92: - goto st64 - } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 - } - goto st50 -tr123: -//line plugins/parsers/influx/machine.go.rl:84 - - key = m.text() - - goto st51 - st51: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof51 - } - st_case_51: -//line plugins/parsers/influx/machine.go:6573 - switch ( m.data)[( m.p)] { - case 10: - goto tr5 - case 34: - goto tr119 - case 45: - goto tr125 - case 46: - goto tr126 - case 48: - goto tr127 - case 70: - goto tr129 - case 84: - goto tr130 - case 92: - goto st11 - case 102: - goto tr131 - case 116: goto tr132 - } - switch { - case ( m.data)[( m.p)] > 13: - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr128 - } - case ( m.data)[( m.p)] >= 12: - goto tr5 - } - goto st7 -tr125: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st52 - st52: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof52 - } - st_case_52: -//line plugins/parsers/influx/machine.go:6616 - switch ( m.data)[( m.p)] { - case 10: - goto tr5 - case 34: - goto tr29 - case 46: - goto st53 - case 48: - goto st291 - case 92: - goto st11 - } - switch { - case ( m.data)[( m.p)] > 13: - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st294 - } - case ( m.data)[( m.p)] >= 12: - goto tr5 - } - goto st7 -tr126: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st53 - st53: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof53 - } - st_case_53: -//line plugins/parsers/influx/machine.go:6649 - switch ( m.data)[( m.p)] { - case 10: - goto tr5 - case 34: - goto tr29 - case 92: - goto st11 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st288 - } - case ( m.data)[( m.p)] >= 12: - goto tr5 - } - goto st7 - st288: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof288 - } - st_case_288: - switch ( m.data)[( m.p)] { - case 10: - goto tr383 - case 12: - goto tr382 + case 11: + goto tr48 case 13: - goto tr383 + goto tr132 case 32: - goto tr452 - case 34: - goto tr29 + goto tr1 case 44: - goto tr453 - case 69: - goto st54 + goto tr4 + case 61: + goto tr49 case 92: - goto st11 - case 101: - goto st54 - } - switch { - case ( m.data)[( m.p)] > 11: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st288 - } - case ( m.data)[( m.p)] >= 9: - goto tr452 - } - goto st7 - st54: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof54 - } - st_case_54: - switch ( m.data)[( m.p)] { - case 10: - goto tr5 - case 34: - goto tr137 - case 43: - goto st55 - case 45: - goto st55 - case 92: - goto st11 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st290 - } - case ( m.data)[( m.p)] >= 12: - goto tr5 - } - goto st7 -tr137: -//line plugins/parsers/influx/machine.go.rl:104 - - m.handler.AddString(key, m.text()) - - goto st289 - st289: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof289 - } - st_case_289: -//line plugins/parsers/influx/machine.go:6738 - switch ( m.data)[( m.p)] { - case 10: - goto tr357 - case 13: - goto tr357 - case 32: - goto st207 - case 44: - goto st9 + goto st29 } switch { case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st230 + goto st293 } case ( m.data)[( m.p)] >= 9: - goto st207 + goto tr1 } - goto tr101 - st55: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof55 - } - st_case_55: - switch ( m.data)[( m.p)] { - case 10: - goto tr5 - case 34: - goto tr29 - case 92: - goto st11 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st290 - } - case ( m.data)[( m.p)] >= 12: - goto tr5 - } - goto st7 - st290: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof290 - } - st_case_290: - switch ( m.data)[( m.p)] { - case 10: - goto tr383 - case 12: - goto tr382 - case 13: - goto tr383 - case 32: - goto tr452 - case 34: - goto tr29 - case 44: - goto tr453 - case 92: - goto st11 - } - switch { - case ( m.data)[( m.p)] > 11: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st290 - } - case ( m.data)[( m.p)] >= 9: - goto tr452 - } - goto st7 - st291: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof291 - } - st_case_291: - switch ( m.data)[( m.p)] { - case 10: - goto tr383 - case 12: - goto tr382 - case 13: - goto tr383 - case 32: - goto tr452 - case 34: - goto tr29 - case 44: - goto tr453 - case 46: - goto st288 - case 69: - goto st54 - case 92: - goto st11 - case 101: - goto st54 - case 105: - goto st293 - } - switch { - case ( m.data)[( m.p)] > 11: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st292 - } - case ( m.data)[( m.p)] >= 9: - goto tr452 - } - goto st7 - st292: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof292 - } - st_case_292: - switch ( m.data)[( m.p)] { - case 10: - goto tr383 - case 12: - goto tr382 - case 13: - goto tr383 - case 32: - goto tr452 - case 34: - goto tr29 - case 44: - goto tr453 - case 46: - goto st288 - case 69: - goto st54 - case 92: - goto st11 - case 101: - goto st54 - } - switch { - case ( m.data)[( m.p)] > 11: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st292 - } - case ( m.data)[( m.p)] >= 9: - goto tr452 - } - goto st7 + goto st12 +tr489: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st293 st293: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof293 } st_case_293: +//line plugins/parsers/influx/machine.go:6561 switch ( m.data)[( m.p)] { case 10: - goto tr389 - case 12: - goto tr388 + goto tr451 + case 11: + goto tr491 case 13: - goto tr389 + goto tr453 case 32: - goto tr457 - case 34: - goto tr29 + goto tr490 case 44: - goto tr458 + goto tr4 + case 61: + goto tr49 case 92: - goto st11 + goto st29 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { - goto tr457 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st297 + } + case ( m.data)[( m.p)] >= 9: + goto tr490 } - goto st7 + goto st12 +tr495: + ( m.cs) = 294 +//line plugins/parsers/influx/machine.go.rl:77 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr556: + ( m.cs) = 294 +//line plugins/parsers/influx/machine.go.rl:90 + + err = m.handler.AddTag(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr490: + ( m.cs) = 294 +//line plugins/parsers/influx/machine.go.rl:77 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:148 + + err = m.handler.SetTimestamp(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr553: + ( m.cs) = 294 +//line plugins/parsers/influx/machine.go.rl:90 + + err = m.handler.AddTag(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:148 + + err = m.handler.SetTimestamp(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again st294: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof294 } st_case_294: +//line plugins/parsers/influx/machine.go:6664 switch ( m.data)[( m.p)] { case 10: - goto tr383 - case 12: - goto tr382 + goto st262 + case 11: + goto tr494 case 13: - goto tr383 + goto st34 case 32: - goto tr452 - case 34: - goto tr29 + goto st294 case 44: - goto tr453 - case 46: - goto st288 - case 69: - goto st54 + goto tr8 + case 61: + goto tr8 case 92: - goto st11 - case 101: - goto st54 - case 105: - goto st293 + goto tr10 } - switch { - case ( m.data)[( m.p)] > 11: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st294 - } - case ( m.data)[( m.p)] >= 9: - goto tr452 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto st294 } - goto st7 -tr127: -//line plugins/parsers/influx/machine.go.rl:18 + goto tr6 +tr494: +//line plugins/parsers/influx/machine.go.rl:19 m.pb = m.p @@ -6954,207 +6692,153 @@ tr127: goto _test_eof295 } st_case_295: -//line plugins/parsers/influx/machine.go:6958 +//line plugins/parsers/influx/machine.go:6696 switch ( m.data)[( m.p)] { case 10: - goto tr383 - case 12: - goto tr382 + goto st262 + case 11: + goto tr494 case 13: - goto tr383 + goto st34 case 32: - goto tr452 - case 34: - goto tr29 + goto st294 case 44: - goto tr453 - case 46: - goto st288 - case 69: - goto st54 + goto tr8 + case 61: + goto tr12 case 92: - goto st11 - case 101: - goto st54 - case 105: - goto st293 - case 117: - goto st296 + goto tr10 } - switch { - case ( m.data)[( m.p)] > 11: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st292 - } - case ( m.data)[( m.p)] >= 9: - goto tr452 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto st294 } - goto st7 + goto tr6 +tr496: + ( m.cs) = 296 +//line plugins/parsers/influx/machine.go.rl:77 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto _again +tr491: + ( m.cs) = 296 +//line plugins/parsers/influx/machine.go.rl:77 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:148 + + err = m.handler.SetTimestamp(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again st296: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof296 } st_case_296: +//line plugins/parsers/influx/machine.go:6762 switch ( m.data)[( m.p)] { case 10: - goto tr393 - case 12: - goto tr392 + goto st262 + case 11: + goto tr496 case 13: - goto tr393 + goto st34 case 32: - goto tr460 - case 34: - goto tr29 + goto tr495 case 44: - goto tr461 + goto tr4 + case 61: + goto tr49 case 92: - goto st11 + goto tr45 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { - goto tr460 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr495 } - goto st7 -tr128: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st297 + goto tr41 st297: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof297 } st_case_297: -//line plugins/parsers/influx/machine.go:7030 switch ( m.data)[( m.p)] { case 10: - goto tr383 - case 12: - goto tr382 + goto tr451 + case 11: + goto tr491 case 13: - goto tr383 - case 32: - goto tr452 - case 34: - goto tr29 - case 44: goto tr453 - case 46: - goto st288 - case 69: - goto st54 + case 32: + goto tr490 + case 44: + goto tr4 + case 61: + goto tr49 case 92: - goto st11 - case 101: - goto st54 - case 105: - goto st293 - case 117: - goto st296 + goto st29 } switch { - case ( m.data)[( m.p)] > 11: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st297 + goto st298 } case ( m.data)[( m.p)] >= 9: - goto tr452 + goto tr490 } - goto st7 -tr129: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st298 + goto st12 st298: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof298 } st_case_298: -//line plugins/parsers/influx/machine.go:7077 switch ( m.data)[( m.p)] { case 10: - goto tr397 - case 12: - goto tr396 + goto tr451 + case 11: + goto tr491 case 13: - goto tr397 + goto tr453 case 32: - goto tr463 - case 34: - goto tr29 + goto tr490 case 44: - goto tr464 - case 65: - goto st56 + goto tr4 + case 61: + goto tr49 case 92: - goto st11 - case 97: - goto st59 + goto st29 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { - goto tr463 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st299 + } + case ( m.data)[( m.p)] >= 9: + goto tr490 } - goto st7 - st56: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof56 - } - st_case_56: - switch ( m.data)[( m.p)] { - case 10: - goto tr5 - case 34: - goto tr29 - case 76: - goto st57 - case 92: - goto st11 - } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 - } - goto st7 - st57: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof57 - } - st_case_57: - switch ( m.data)[( m.p)] { - case 10: - goto tr5 - case 34: - goto tr29 - case 83: - goto st58 - case 92: - goto st11 - } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 - } - goto st7 - st58: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof58 - } - st_case_58: - switch ( m.data)[( m.p)] { - case 10: - goto tr5 - case 34: - goto tr29 - case 69: - goto st299 - case 92: - goto st11 - } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 - } - goto st7 + goto st12 st299: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof299 @@ -7162,312 +6846,119 @@ tr129: st_case_299: switch ( m.data)[( m.p)] { case 10: - goto tr397 - case 12: - goto tr396 + goto tr451 + case 11: + goto tr491 case 13: - goto tr397 + goto tr453 case 32: - goto tr463 - case 34: - goto tr29 + goto tr490 case 44: - goto tr464 + goto tr4 + case 61: + goto tr49 case 92: - goto st11 + goto st29 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { - goto tr463 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st300 + } + case ( m.data)[( m.p)] >= 9: + goto tr490 } - goto st7 - st59: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof59 - } - st_case_59: - switch ( m.data)[( m.p)] { - case 10: - goto tr5 - case 34: - goto tr29 - case 92: - goto st11 - case 108: - goto st60 - } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 - } - goto st7 - st60: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof60 - } - st_case_60: - switch ( m.data)[( m.p)] { - case 10: - goto tr5 - case 34: - goto tr29 - case 92: - goto st11 - case 115: - goto st61 - } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 - } - goto st7 - st61: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof61 - } - st_case_61: - switch ( m.data)[( m.p)] { - case 10: - goto tr5 - case 34: - goto tr29 - case 92: - goto st11 - case 101: - goto st299 - } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 - } - goto st7 -tr130: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st300 + goto st12 st300: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof300 } st_case_300: -//line plugins/parsers/influx/machine.go:7252 switch ( m.data)[( m.p)] { case 10: - goto tr397 - case 12: - goto tr396 + goto tr451 + case 11: + goto tr491 case 13: - goto tr397 + goto tr453 case 32: - goto tr463 - case 34: - goto tr29 + goto tr490 case 44: - goto tr464 - case 82: - goto st62 + goto tr4 + case 61: + goto tr49 case 92: - goto st11 - case 114: - goto st63 + goto st29 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { - goto tr463 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st301 + } + case ( m.data)[( m.p)] >= 9: + goto tr490 } - goto st7 - st62: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof62 - } - st_case_62: - switch ( m.data)[( m.p)] { - case 10: - goto tr5 - case 34: - goto tr29 - case 85: - goto st58 - case 92: - goto st11 - } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 - } - goto st7 - st63: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof63 - } - st_case_63: - switch ( m.data)[( m.p)] { - case 10: - goto tr5 - case 34: - goto tr29 - case 92: - goto st11 - case 117: - goto st61 - } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 - } - goto st7 -tr131: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st301 + goto st12 st301: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof301 } st_case_301: -//line plugins/parsers/influx/machine.go:7326 switch ( m.data)[( m.p)] { case 10: - goto tr397 - case 12: - goto tr396 + goto tr451 + case 11: + goto tr491 case 13: - goto tr397 + goto tr453 case 32: - goto tr463 - case 34: - goto tr29 + goto tr490 case 44: - goto tr464 + goto tr4 + case 61: + goto tr49 case 92: - goto st11 - case 97: - goto st59 + goto st29 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { - goto tr463 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st302 + } + case ( m.data)[( m.p)] >= 9: + goto tr490 } - goto st7 -tr132: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st302 + goto st12 st302: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof302 } st_case_302: -//line plugins/parsers/influx/machine.go:7360 switch ( m.data)[( m.p)] { case 10: - goto tr397 - case 12: - goto tr396 + goto tr451 + case 11: + goto tr491 case 13: - goto tr397 + goto tr453 case 32: - goto tr463 - case 34: - goto tr29 + goto tr490 case 44: - goto tr464 + goto tr4 + case 61: + goto tr49 case 92: - goto st11 - case 114: - goto st63 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { - goto tr463 - } - goto st7 -tr121: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st64 - st64: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof64 - } - st_case_64: -//line plugins/parsers/influx/machine.go:7394 - switch ( m.data)[( m.p)] { - case 34: - goto st50 - case 92: - goto st50 + goto st29 } switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 - } - case ( m.data)[( m.p)] >= 9: - goto tr5 - } - goto st4 -tr104: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st65 - st65: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof65 - } - st_case_65: -//line plugins/parsers/influx/machine.go:7421 - switch ( m.data)[( m.p)] { - case 10: - goto tr5 - case 34: - goto tr29 - case 46: - goto st66 - case 48: - goto st305 - case 92: - goto st11 - } - switch { - case ( m.data)[( m.p)] > 13: - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st308 - } - case ( m.data)[( m.p)] >= 12: - goto tr5 - } - goto st7 -tr105: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st66 - st66: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof66 - } - st_case_66: -//line plugins/parsers/influx/machine.go:7454 - switch ( m.data)[( m.p)] { - case 10: - goto tr5 - case 34: - goto tr29 - case 92: - goto st11 - } - switch { - case ( m.data)[( m.p)] > 13: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st303 } - case ( m.data)[( m.p)] >= 12: - goto tr5 + case ( m.data)[( m.p)] >= 9: + goto tr490 } - goto st7 + goto st12 st303: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof303 @@ -7475,81 +6966,29 @@ tr105: st_case_303: switch ( m.data)[( m.p)] { case 10: - goto tr383 - case 12: - goto tr382 + goto tr451 + case 11: + goto tr491 case 13: - goto tr383 + goto tr453 case 32: - goto tr452 - case 34: - goto tr29 + goto tr490 case 44: - goto tr469 - case 69: - goto st67 + goto tr4 + case 61: + goto tr49 case 92: - goto st11 - case 101: - goto st67 + goto st29 } switch { - case ( m.data)[( m.p)] > 11: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st303 + goto st304 } case ( m.data)[( m.p)] >= 9: - goto tr452 + goto tr490 } - goto st7 - st67: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof67 - } - st_case_67: - switch ( m.data)[( m.p)] { - case 10: - goto tr5 - case 34: - goto tr137 - case 43: - goto st68 - case 45: - goto st68 - case 92: - goto st11 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st304 - } - case ( m.data)[( m.p)] >= 12: - goto tr5 - } - goto st7 - st68: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof68 - } - st_case_68: - switch ( m.data)[( m.p)] { - case 10: - goto tr5 - case 34: - goto tr29 - case 92: - goto st11 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st304 - } - case ( m.data)[( m.p)] >= 12: - goto tr5 - } - goto st7 + goto st12 st304: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof304 @@ -7557,29 +6996,29 @@ tr105: st_case_304: switch ( m.data)[( m.p)] { case 10: - goto tr383 - case 12: - goto tr382 + goto tr451 + case 11: + goto tr491 case 13: - goto tr383 + goto tr453 case 32: - goto tr452 - case 34: - goto tr29 + goto tr490 case 44: - goto tr469 + goto tr4 + case 61: + goto tr49 case 92: - goto st11 + goto st29 } switch { - case ( m.data)[( m.p)] > 11: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st304 + goto st305 } case ( m.data)[( m.p)] >= 9: - goto tr452 + goto tr490 } - goto st7 + goto st12 st305: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof305 @@ -7587,37 +7026,29 @@ tr105: st_case_305: switch ( m.data)[( m.p)] { case 10: - goto tr383 - case 12: - goto tr382 + goto tr451 + case 11: + goto tr491 case 13: - goto tr383 + goto tr453 case 32: - goto tr452 - case 34: - goto tr29 + goto tr490 case 44: - goto tr469 - case 46: - goto st303 - case 69: - goto st67 + goto tr4 + case 61: + goto tr49 case 92: - goto st11 - case 101: - goto st67 - case 105: - goto st307 + goto st29 } switch { - case ( m.data)[( m.p)] > 11: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st306 } case ( m.data)[( m.p)] >= 9: - goto tr452 + goto tr490 } - goto st7 + goto st12 st306: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof306 @@ -7625,35 +7056,29 @@ tr105: st_case_306: switch ( m.data)[( m.p)] { case 10: - goto tr383 - case 12: - goto tr382 + goto tr451 + case 11: + goto tr491 case 13: - goto tr383 + goto tr453 case 32: - goto tr452 - case 34: - goto tr29 + goto tr490 case 44: - goto tr469 - case 46: - goto st303 - case 69: - goto st67 + goto tr4 + case 61: + goto tr49 case 92: - goto st11 - case 101: - goto st67 + goto st29 } switch { - case ( m.data)[( m.p)] > 11: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st306 + goto st307 } case ( m.data)[( m.p)] >= 9: - goto tr452 + goto tr490 } - goto st7 + goto st12 st307: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof307 @@ -7661,24 +7086,29 @@ tr105: st_case_307: switch ( m.data)[( m.p)] { case 10: - goto tr389 - case 12: - goto tr388 + goto tr451 + case 11: + goto tr491 case 13: - goto tr389 + goto tr453 case 32: - goto tr457 - case 34: - goto tr29 + goto tr490 case 44: - goto tr473 + goto tr4 + case 61: + goto tr49 case 92: - goto st11 + goto st29 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { - goto tr457 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st308 + } + case ( m.data)[( m.p)] >= 9: + goto tr490 } - goto st7 + goto st12 st308: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof308 @@ -7686,84 +7116,59 @@ tr105: st_case_308: switch ( m.data)[( m.p)] { case 10: - goto tr383 - case 12: - goto tr382 + goto tr451 + case 11: + goto tr491 case 13: - goto tr383 + goto tr453 case 32: - goto tr452 - case 34: - goto tr29 + goto tr490 case 44: - goto tr469 - case 46: - goto st303 - case 69: - goto st67 + goto tr4 + case 61: + goto tr49 case 92: - goto st11 - case 101: - goto st67 - case 105: - goto st307 + goto st29 } switch { - case ( m.data)[( m.p)] > 11: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st308 + goto st309 } case ( m.data)[( m.p)] >= 9: - goto tr452 + goto tr490 } - goto st7 -tr106: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st309 + goto st12 st309: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof309 } st_case_309: -//line plugins/parsers/influx/machine.go:7732 switch ( m.data)[( m.p)] { case 10: - goto tr383 - case 12: - goto tr382 + goto tr451 + case 11: + goto tr491 case 13: - goto tr383 + goto tr453 case 32: - goto tr452 - case 34: - goto tr29 + goto tr490 case 44: - goto tr469 - case 46: - goto st303 - case 69: - goto st67 + goto tr4 + case 61: + goto tr49 case 92: - goto st11 - case 101: - goto st67 - case 105: - goto st307 - case 117: - goto st310 + goto st29 } switch { - case ( m.data)[( m.p)] > 11: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st306 + goto st310 } case ( m.data)[( m.p)] >= 9: - goto tr452 + goto tr490 } - goto st7 + goto st12 st310: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof310 @@ -7771,164 +7176,89 @@ tr106: st_case_310: switch ( m.data)[( m.p)] { case 10: - goto tr393 - case 12: - goto tr392 + goto tr451 + case 11: + goto tr491 case 13: - goto tr393 + goto tr453 case 32: - goto tr460 - case 34: - goto tr29 + goto tr490 case 44: - goto tr475 + goto tr4 + case 61: + goto tr49 case 92: - goto st11 + goto st29 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { - goto tr460 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st311 + } + case ( m.data)[( m.p)] >= 9: + goto tr490 } - goto st7 -tr107: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st311 + goto st12 st311: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof311 } st_case_311: -//line plugins/parsers/influx/machine.go:7804 switch ( m.data)[( m.p)] { case 10: - goto tr383 - case 12: - goto tr382 + goto tr451 + case 11: + goto tr491 case 13: - goto tr383 + goto tr453 case 32: - goto tr452 - case 34: - goto tr29 + goto tr490 case 44: - goto tr469 - case 46: - goto st303 - case 69: - goto st67 + goto tr4 + case 61: + goto tr49 case 92: - goto st11 - case 101: - goto st67 - case 105: - goto st307 - case 117: - goto st310 + goto st29 } switch { - case ( m.data)[( m.p)] > 11: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st311 + goto st312 } case ( m.data)[( m.p)] >= 9: - goto tr452 + goto tr490 } - goto st7 -tr108: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st312 + goto st12 st312: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof312 } st_case_312: -//line plugins/parsers/influx/machine.go:7851 switch ( m.data)[( m.p)] { case 10: - goto tr397 - case 12: - goto tr396 + goto tr451 + case 11: + goto tr491 case 13: - goto tr397 + goto tr453 case 32: - goto tr463 - case 34: - goto tr29 + goto tr490 case 44: - goto tr477 - case 65: - goto st69 + goto tr4 + case 61: + goto tr49 case 92: - goto st11 - case 97: - goto st72 + goto st29 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { - goto tr463 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st313 + } + case ( m.data)[( m.p)] >= 9: + goto tr490 } - goto st7 - st69: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof69 - } - st_case_69: - switch ( m.data)[( m.p)] { - case 10: - goto tr5 - case 34: - goto tr29 - case 76: - goto st70 - case 92: - goto st11 - } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 - } - goto st7 - st70: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof70 - } - st_case_70: - switch ( m.data)[( m.p)] { - case 10: - goto tr5 - case 34: - goto tr29 - case 83: - goto st71 - case 92: - goto st11 - } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 - } - goto st7 - st71: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof71 - } - st_case_71: - switch ( m.data)[( m.p)] { - case 10: - goto tr5 - case 34: - goto tr29 - case 69: - goto st313 - case 92: - goto st11 - } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 - } - goto st7 + goto st12 st313: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof313 @@ -7936,651 +7266,1283 @@ tr108: st_case_313: switch ( m.data)[( m.p)] { case 10: - goto tr397 - case 12: - goto tr396 + goto tr451 + case 11: + goto tr491 case 13: - goto tr397 + goto tr453 case 32: - goto tr463 - case 34: - goto tr29 + goto tr490 case 44: - goto tr477 + goto tr4 + case 61: + goto tr49 case 92: - goto st11 + goto st29 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { - goto tr463 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st314 + } + case ( m.data)[( m.p)] >= 9: + goto tr490 } - goto st7 - st72: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof72 - } - st_case_72: - switch ( m.data)[( m.p)] { - case 10: - goto tr5 - case 34: - goto tr29 - case 92: - goto st11 - case 108: - goto st73 - } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 - } - goto st7 - st73: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof73 - } - st_case_73: - switch ( m.data)[( m.p)] { - case 10: - goto tr5 - case 34: - goto tr29 - case 92: - goto st11 - case 115: - goto st74 - } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 - } - goto st7 - st74: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof74 - } - st_case_74: - switch ( m.data)[( m.p)] { - case 10: - goto tr5 - case 34: - goto tr29 - case 92: - goto st11 - case 101: - goto st313 - } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 - } - goto st7 -tr109: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st314 + goto st12 st314: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof314 } st_case_314: -//line plugins/parsers/influx/machine.go:8026 switch ( m.data)[( m.p)] { case 10: - goto tr397 - case 12: - goto tr396 + goto tr451 + case 11: + goto tr491 case 13: - goto tr397 + goto tr453 case 32: - goto tr463 - case 34: - goto tr29 + goto tr490 case 44: - goto tr477 - case 82: - goto st75 + goto tr4 + case 61: + goto tr49 case 92: - goto st11 - case 114: - goto st76 + goto st29 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { - goto tr463 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr490 } - goto st7 - st75: + goto st12 +tr484: + ( m.cs) = 45 +//line plugins/parsers/influx/machine.go.rl:77 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr549: + ( m.cs) = 45 +//line plugins/parsers/influx/machine.go.rl:90 + + err = m.handler.AddTag(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr799: + ( m.cs) = 45 +//line plugins/parsers/influx/machine.go.rl:77 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:121 + + err = m.handler.AddFloat(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr718: + ( m.cs) = 45 +//line plugins/parsers/influx/machine.go.rl:90 + + err = m.handler.AddTag(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:121 + + err = m.handler.AddFloat(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr928: + ( m.cs) = 45 +//line plugins/parsers/influx/machine.go.rl:90 + + err = m.handler.AddTag(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:103 + + err = m.handler.AddInt(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr933: + ( m.cs) = 45 +//line plugins/parsers/influx/machine.go.rl:90 + + err = m.handler.AddTag(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:112 + + err = m.handler.AddUint(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr938: + ( m.cs) = 45 +//line plugins/parsers/influx/machine.go.rl:90 + + err = m.handler.AddTag(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:130 + + err = m.handler.AddBool(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr982: + ( m.cs) = 45 +//line plugins/parsers/influx/machine.go.rl:77 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:103 + + err = m.handler.AddInt(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr985: + ( m.cs) = 45 +//line plugins/parsers/influx/machine.go.rl:77 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:112 + + err = m.handler.AddUint(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr988: + ( m.cs) = 45 +//line plugins/parsers/influx/machine.go.rl:77 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:130 + + err = m.handler.AddBool(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again + st45: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof75 + goto _test_eof45 } - st_case_75: + st_case_45: +//line plugins/parsers/influx/machine.go:7533 switch ( m.data)[( m.p)] { - case 10: - goto tr5 - case 34: - goto tr29 - case 85: - goto st71 + case 32: + goto tr47 + case 44: + goto tr47 + case 61: + goto tr47 case 92: - goto st11 + goto tr135 } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 + switch { + case ( m.data)[( m.p)] > 10: + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr47 + } + case ( m.data)[( m.p)] >= 9: + goto tr47 } - goto st7 - st76: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof76 - } - st_case_76: - switch ( m.data)[( m.p)] { - case 10: - goto tr5 - case 34: - goto tr29 - case 92: - goto st11 - case 117: - goto st74 - } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 - } - goto st7 -tr110: -//line plugins/parsers/influx/machine.go.rl:18 + goto tr134 +tr134: +//line plugins/parsers/influx/machine.go.rl:19 m.pb = m.p - goto st315 + goto st46 + st46: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof46 + } + st_case_46: +//line plugins/parsers/influx/machine.go:7564 + switch ( m.data)[( m.p)] { + case 32: + goto tr47 + case 44: + goto tr47 + case 61: + goto tr137 + case 92: + goto st101 + } + switch { + case ( m.data)[( m.p)] > 10: + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr47 + } + case ( m.data)[( m.p)] >= 9: + goto tr47 + } + goto st46 +tr137: +//line plugins/parsers/influx/machine.go.rl:86 + + key = m.text() + +//line plugins/parsers/influx/machine.go.rl:99 + + key = m.text() + + goto st47 + st47: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof47 + } + st_case_47: +//line plugins/parsers/influx/machine.go:7599 + switch ( m.data)[( m.p)] { + case 32: + goto tr47 + case 34: + goto tr139 + case 44: + goto tr47 + case 45: + goto tr140 + case 46: + goto tr141 + case 48: + goto tr142 + case 61: + goto tr47 + case 70: + goto tr144 + case 84: + goto tr145 + case 92: + goto tr58 + case 102: + goto tr146 + case 116: + goto tr147 + } + switch { + case ( m.data)[( m.p)] < 12: + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 10 { + goto tr47 + } + case ( m.data)[( m.p)] > 13: + if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto tr143 + } + default: + goto tr47 + } + goto tr57 +tr139: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st48 + st48: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof48 + } + st_case_48: +//line plugins/parsers/influx/machine.go:7650 + switch ( m.data)[( m.p)] { + case 9: + goto tr149 + case 10: + goto tr24 + case 11: + goto tr150 + case 12: + goto tr60 + case 13: + goto tr25 + case 32: + goto tr149 + case 34: + goto tr151 + case 44: + goto tr152 + case 61: + goto tr23 + case 92: + goto tr153 + } + goto tr148 +tr148: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st49 + st49: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof49 + } + st_case_49: +//line plugins/parsers/influx/machine.go:7685 + switch ( m.data)[( m.p)] { + case 9: + goto tr155 + case 10: + goto st7 + case 11: + goto tr156 + case 12: + goto tr60 + case 13: + goto st8 + case 32: + goto tr155 + case 34: + goto tr157 + case 44: + goto tr158 + case 61: + goto st6 + case 92: + goto st64 + } + goto st49 +tr180: + ( m.cs) = 50 +//line plugins/parsers/influx/machine.go.rl:77 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr155: + ( m.cs) = 50 +//line plugins/parsers/influx/machine.go.rl:90 + + err = m.handler.AddTag(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr149: + ( m.cs) = 50 +//line plugins/parsers/influx/machine.go.rl:90 + + err = m.handler.AddTag(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto _again + st50: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof50 + } + st_case_50: +//line plugins/parsers/influx/machine.go:7757 + switch ( m.data)[( m.p)] { + case 9: + goto st50 + case 10: + goto st7 + case 11: + goto tr162 + case 12: + goto st2 + case 13: + goto st8 + case 32: + goto st50 + case 34: + goto tr97 + case 44: + goto st6 + case 61: + goto st6 + case 92: + goto tr163 + } + goto tr160 +tr160: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st51 + st51: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof51 + } + st_case_51: +//line plugins/parsers/influx/machine.go:7792 + switch ( m.data)[( m.p)] { + case 9: + goto st6 + case 10: + goto st7 + case 12: + goto tr8 + case 13: + goto st8 + case 32: + goto st6 + case 34: + goto tr100 + case 44: + goto st6 + case 61: + goto tr165 + case 92: + goto st106 + } + goto st51 +tr165: +//line plugins/parsers/influx/machine.go.rl:99 + + key = m.text() + + goto st52 + st52: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof52 + } + st_case_52: +//line plugins/parsers/influx/machine.go:7825 + switch ( m.data)[( m.p)] { + case 10: + goto st7 + case 12: + goto tr8 + case 13: + goto st8 + case 34: + goto tr107 + case 45: + goto tr167 + case 46: + goto tr168 + case 48: + goto tr169 + case 70: + goto tr171 + case 84: + goto tr172 + case 92: + goto st76 + case 102: + goto tr173 + case 116: + goto tr174 + } + if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto tr170 + } + goto st6 +tr167: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st53 + st53: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof53 + } + st_case_53: +//line plugins/parsers/influx/machine.go:7867 + switch ( m.data)[( m.p)] { + case 10: + goto st7 + case 12: + goto tr8 + case 13: + goto st8 + case 34: + goto tr31 + case 46: + goto st54 + case 48: + goto st621 + case 92: + goto st76 + } + if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st622 + } + goto st6 +tr168: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st54 + st54: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof54 + } + st_case_54: +//line plugins/parsers/influx/machine.go:7899 + switch ( m.data)[( m.p)] { + case 10: + goto st7 + case 12: + goto tr8 + case 13: + goto st8 + case 34: + goto tr31 + case 92: + goto st76 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st315 + } + goto st6 st315: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof315 } st_case_315: -//line plugins/parsers/influx/machine.go:8100 switch ( m.data)[( m.p)] { case 10: - goto tr397 + goto tr515 case 12: - goto tr396 + goto tr516 case 13: - goto tr397 + goto tr517 case 32: - goto tr463 + goto tr514 case 34: - goto tr29 + goto tr31 case 44: - goto tr477 + goto tr518 + case 69: + goto st175 case 92: - goto st11 - case 97: - goto st72 + goto st76 + case 101: + goto st175 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { - goto tr463 + switch { + case ( m.data)[( m.p)] > 11: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st315 + } + case ( m.data)[( m.p)] >= 9: + goto tr514 } - goto st7 -tr111: -//line plugins/parsers/influx/machine.go.rl:18 + goto st6 +tr902: +//line plugins/parsers/influx/machine.go.rl:19 m.pb = m.p goto st316 +tr514: + ( m.cs) = 316 +//line plugins/parsers/influx/machine.go.rl:121 + + err = m.handler.AddFloat(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr908: + ( m.cs) = 316 +//line plugins/parsers/influx/machine.go.rl:103 + + err = m.handler.AddInt(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr911: + ( m.cs) = 316 +//line plugins/parsers/influx/machine.go.rl:112 + + err = m.handler.AddUint(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr915: + ( m.cs) = 316 +//line plugins/parsers/influx/machine.go.rl:130 + + err = m.handler.AddBool(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again st316: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof316 } st_case_316: -//line plugins/parsers/influx/machine.go:8134 +//line plugins/parsers/influx/machine.go:8013 switch ( m.data)[( m.p)] { case 10: - goto tr397 + goto st317 case 12: - goto tr396 + goto st261 case 13: - goto tr397 + goto st104 case 32: - goto tr463 + goto st316 case 34: - goto tr29 - case 44: - goto tr477 + goto tr31 + case 45: + goto tr522 case 92: - goto st11 - case 114: goto st76 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { - goto tr463 - } - goto st7 -tr115: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st77 - st77: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof77 - } - st_case_77: -//line plugins/parsers/influx/machine.go:8168 - switch ( m.data)[( m.p)] { - case 34: - goto st47 - case 92: - goto st47 - } switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 + case ( m.data)[( m.p)] > 11: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto tr523 } case ( m.data)[( m.p)] >= 9: - goto tr5 + goto st316 } - goto st4 -tr96: -//line plugins/parsers/influx/machine.go.rl:18 + goto st6 +tr650: +//line plugins/parsers/influx/machine.go.rl:19 m.pb = m.p - goto st78 - st78: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof78 - } - st_case_78: -//line plugins/parsers/influx/machine.go:8195 - switch ( m.data)[( m.p)] { - case 34: - goto st42 - case 92: - goto st42 - } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 - } - case ( m.data)[( m.p)] >= 9: - goto tr5 - } - goto st4 -tr94: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st79 - st79: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof79 - } - st_case_79: -//line plugins/parsers/influx/machine.go:8222 - switch ( m.data)[( m.p)] { - case 9: - goto st41 - case 11: - goto tr94 - case 12: - goto st3 - case 32: - goto st41 - case 34: - goto tr95 - case 44: - goto st7 - case 61: - goto tr99 - case 92: - goto tr96 - } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 - } - goto tr92 -tr88: -//line plugins/parsers/influx/machine.go.rl:72 - - m.handler.SetMeasurement(m.text()) - - goto st80 -tr82: -//line plugins/parsers/influx/machine.go.rl:72 - - m.handler.SetMeasurement(m.text()) - -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st80 - st80: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof80 - } - st_case_80: -//line plugins/parsers/influx/machine.go:8266 - switch ( m.data)[( m.p)] { - case 9: - goto tr87 - case 11: - goto tr157 - case 12: - goto tr4 - case 32: - goto tr87 - case 34: - goto tr158 - case 44: - goto tr90 - case 61: - goto st40 - case 92: - goto tr159 - } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 - } - goto tr156 -tr156: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st81 - st81: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof81 - } - st_case_81: -//line plugins/parsers/influx/machine.go:8300 - switch ( m.data)[( m.p)] { - case 9: - goto tr87 - case 11: - goto tr161 - case 12: - goto tr4 - case 32: - goto tr87 - case 34: - goto tr162 - case 44: - goto tr90 - case 61: - goto tr163 - case 92: - goto st132 - } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 - } - goto st81 -tr161: -//line plugins/parsers/influx/machine.go.rl:72 - - m.handler.SetMeasurement(m.text()) - - goto st82 -tr157: -//line plugins/parsers/influx/machine.go.rl:72 - - m.handler.SetMeasurement(m.text()) - -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st82 - st82: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof82 - } - st_case_82: -//line plugins/parsers/influx/machine.go:8344 - switch ( m.data)[( m.p)] { - case 9: - goto tr87 - case 11: - goto tr157 - case 12: - goto tr4 - case 32: - goto tr87 - case 34: - goto tr158 - case 44: - goto tr90 - case 61: - goto tr163 - case 92: - goto tr159 - } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 - } - goto tr156 -tr158: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - -//line plugins/parsers/influx/machine.go.rl:104 - - m.handler.AddString(key, m.text()) - goto st317 -tr162: -//line plugins/parsers/influx/machine.go.rl:104 +tr659: + ( m.cs) = 317 +//line plugins/parsers/influx/machine.go.rl:148 - m.handler.AddString(key, m.text()) + err = m.handler.SetTimestamp(m.text()) + if err != nil { + ( m.p)-- - goto st317 + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr515: + ( m.cs) = 317 +//line plugins/parsers/influx/machine.go.rl:121 + + err = m.handler.AddFloat(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr722: + ( m.cs) = 317 +//line plugins/parsers/influx/machine.go.rl:103 + + err = m.handler.AddInt(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr729: + ( m.cs) = 317 +//line plugins/parsers/influx/machine.go.rl:112 + + err = m.handler.AddUint(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr736: + ( m.cs) = 317 +//line plugins/parsers/influx/machine.go.rl:130 + + err = m.handler.AddBool(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again st317: +//line plugins/parsers/influx/machine.go.rl:157 + + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line + +//line plugins/parsers/influx/machine.go.rl:163 + + ( m.cs) = 715; + {( m.p)++; goto _out } + if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof317 } st_case_317: -//line plugins/parsers/influx/machine.go:8388 +//line plugins/parsers/influx/machine.go:8126 switch ( m.data)[( m.p)] { + case 9: + goto st166 case 10: - goto tr357 + goto st7 case 11: - goto tr483 + goto tr339 + case 12: + goto st9 case 13: - goto tr357 + goto st8 case 32: - goto tr482 + goto st166 + case 34: + goto tr118 + case 35: + goto st6 case 44: - goto tr484 - case 61: - goto tr49 + goto st6 case 92: - goto st84 + goto tr340 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr482 + goto tr337 +tr337: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st55 + st55: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof55 } - goto st26 -tr482: -//line plugins/parsers/influx/machine.go.rl:72 + st_case_55: +//line plugins/parsers/influx/machine.go:8161 + switch ( m.data)[( m.p)] { + case 9: + goto tr180 + case 10: + goto st7 + case 11: + goto tr181 + case 12: + goto tr1 + case 13: + goto st8 + case 32: + goto tr180 + case 34: + goto tr91 + case 44: + goto tr182 + case 92: + goto st157 + } + goto st55 +tr181: + ( m.cs) = 56 +//line plugins/parsers/influx/machine.go.rl:77 - m.handler.SetMeasurement(m.text()) + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- - goto st318 -tr514: -//line plugins/parsers/influx/machine.go.rl:80 + ( m.cs) = 247; + {( m.p)++; goto _out } + } - m.handler.AddTag(key, m.text()) + goto _again + st56: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof56 + } + st_case_56: +//line plugins/parsers/influx/machine.go:8201 + switch ( m.data)[( m.p)] { + case 9: + goto tr180 + case 10: + goto st7 + case 11: + goto tr185 + case 12: + goto tr1 + case 13: + goto st8 + case 32: + goto tr180 + case 34: + goto tr124 + case 44: + goto tr182 + case 61: + goto st55 + case 92: + goto tr186 + } + goto tr184 +tr184: +//line plugins/parsers/influx/machine.go.rl:19 - goto st318 -tr566: -//line plugins/parsers/influx/machine.go.rl:80 + m.pb = m.p - m.handler.AddTag(key, m.text()) + goto st57 + st57: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof57 + } + st_case_57: +//line plugins/parsers/influx/machine.go:8236 + switch ( m.data)[( m.p)] { + case 9: + goto tr180 + case 10: + goto st7 + case 11: + goto tr188 + case 12: + goto tr1 + case 13: + goto st8 + case 32: + goto tr180 + case 34: + goto tr128 + case 44: + goto tr182 + case 61: + goto tr189 + case 92: + goto st154 + } + goto st57 +tr188: + ( m.cs) = 58 +//line plugins/parsers/influx/machine.go.rl:77 -//line plugins/parsers/influx/machine.go.rl:96 + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- - m.handler.AddFloat(key, m.text()) + ( m.cs) = 247; + {( m.p)++; goto _out } + } - goto st318 -tr572: -//line plugins/parsers/influx/machine.go.rl:80 + goto _again +tr185: + ( m.cs) = 58 +//line plugins/parsers/influx/machine.go.rl:77 - m.handler.AddTag(key, m.text()) + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- -//line plugins/parsers/influx/machine.go.rl:88 + ( m.cs) = 247; + {( m.p)++; goto _out } + } - m.handler.AddInt(key, m.text()) +//line plugins/parsers/influx/machine.go.rl:19 - goto st318 -tr576: -//line plugins/parsers/influx/machine.go.rl:80 + m.pb = m.p - m.handler.AddTag(key, m.text()) + goto _again + st58: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof58 + } + st_case_58: +//line plugins/parsers/influx/machine.go:8295 + switch ( m.data)[( m.p)] { + case 9: + goto tr180 + case 10: + goto st7 + case 11: + goto tr185 + case 12: + goto tr1 + case 13: + goto st8 + case 32: + goto tr180 + case 34: + goto tr124 + case 44: + goto tr182 + case 61: + goto tr189 + case 92: + goto tr186 + } + goto tr184 +tr182: + ( m.cs) = 59 +//line plugins/parsers/influx/machine.go.rl:77 -//line plugins/parsers/influx/machine.go.rl:92 + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- - m.handler.AddUint(key, m.text()) + ( m.cs) = 247; + {( m.p)++; goto _out } + } - goto st318 -tr580: -//line plugins/parsers/influx/machine.go.rl:80 + goto _again +tr158: + ( m.cs) = 59 +//line plugins/parsers/influx/machine.go.rl:90 - m.handler.AddTag(key, m.text()) + err = m.handler.AddTag(key, m.text()) + if err != nil { + ( m.p)-- -//line plugins/parsers/influx/machine.go.rl:100 + ( m.cs) = 247; + {( m.p)++; goto _out } + } - m.handler.AddBool(key, m.text()) + goto _again +tr152: + ( m.cs) = 59 +//line plugins/parsers/influx/machine.go.rl:90 - goto st318 -tr791: -//line plugins/parsers/influx/machine.go.rl:72 + err = m.handler.AddTag(key, m.text()) + if err != nil { + ( m.p)-- - m.handler.SetMeasurement(m.text()) + ( m.cs) = 247; + {( m.p)++; goto _out } + } -//line plugins/parsers/influx/machine.go.rl:96 +//line plugins/parsers/influx/machine.go.rl:19 - m.handler.AddFloat(key, m.text()) + m.pb = m.p - goto st318 -tr800: -//line plugins/parsers/influx/machine.go.rl:72 + goto _again + st59: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof59 + } + st_case_59: +//line plugins/parsers/influx/machine.go:8367 + switch ( m.data)[( m.p)] { + case 9: + goto st6 + case 10: + goto st7 + case 12: + goto tr47 + case 13: + goto st8 + case 32: + goto st6 + case 34: + goto tr192 + case 44: + goto st6 + case 61: + goto st6 + case 92: + goto tr193 + } + goto tr191 +tr191: +//line plugins/parsers/influx/machine.go.rl:19 - m.handler.SetMeasurement(m.text()) + m.pb = m.p -//line plugins/parsers/influx/machine.go.rl:88 + goto st60 + st60: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof60 + } + st_case_60: +//line plugins/parsers/influx/machine.go:8400 + switch ( m.data)[( m.p)] { + case 9: + goto st6 + case 10: + goto st7 + case 12: + goto tr47 + case 13: + goto st8 + case 32: + goto st6 + case 34: + goto tr195 + case 44: + goto st6 + case 61: + goto tr196 + case 92: + goto st71 + } + goto st60 +tr192: + ( m.cs) = 318 +//line plugins/parsers/influx/machine.go.rl:19 - m.handler.AddInt(key, m.text()) + m.pb = m.p - goto st318 -tr805: -//line plugins/parsers/influx/machine.go.rl:72 +//line plugins/parsers/influx/machine.go.rl:139 - m.handler.SetMeasurement(m.text()) + err = m.handler.AddString(key, m.text()) + if err != nil { + ( m.p)-- -//line plugins/parsers/influx/machine.go.rl:92 + ( m.cs) = 247; + {( m.p)++; goto _out } + } - m.handler.AddUint(key, m.text()) + goto _again +tr195: + ( m.cs) = 318 +//line plugins/parsers/influx/machine.go.rl:139 - goto st318 -tr810: -//line plugins/parsers/influx/machine.go.rl:72 + err = m.handler.AddString(key, m.text()) + if err != nil { + ( m.p)-- - m.handler.SetMeasurement(m.text()) + ( m.cs) = 247; + {( m.p)++; goto _out } + } -//line plugins/parsers/influx/machine.go.rl:100 - - m.handler.AddBool(key, m.text()) - - goto st318 + goto _again st318: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof318 } st_case_318: -//line plugins/parsers/influx/machine.go:8506 +//line plugins/parsers/influx/machine.go:8457 switch ( m.data)[( m.p)] { case 10: - goto tr357 + goto st262 case 11: - goto tr486 + goto st319 case 13: - goto tr357 + goto st34 case 32: - goto st318 + goto st261 case 44: - goto tr101 - case 45: - goto tr404 + goto st37 case 61: - goto tr101 + goto tr55 case 92: - goto tr12 + goto st25 } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr405 - } - case ( m.data)[( m.p)] >= 9: - goto st318 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto st261 } - goto tr9 -tr486: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st319 + goto st15 st319: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof319 } st_case_319: -//line plugins/parsers/influx/machine.go:8545 switch ( m.data)[( m.p)] { case 10: - goto tr357 + goto st262 case 11: - goto tr486 + goto st319 case 13: - goto tr357 + goto st34 case 32: - goto st318 + goto st261 case 44: - goto tr101 + goto tr198 case 45: - goto tr404 + goto tr525 case 61: - goto tr14 + goto tr55 case 92: - goto tr12 + goto st25 } switch { case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr405 + goto tr526 } case ( m.data)[( m.p)] >= 9: - goto st318 + goto st261 } - goto tr9 -tr483: -//line plugins/parsers/influx/machine.go.rl:72 + goto st15 +tr525: +//line plugins/parsers/influx/machine.go.rl:19 - m.handler.SetMeasurement(m.text()) + m.pb = m.p - goto st320 -tr487: -//line plugins/parsers/influx/machine.go.rl:72 - - m.handler.SetMeasurement(m.text()) - -//line plugins/parsers/influx/machine.go.rl:18 + goto st61 + st61: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof61 + } + st_case_61: +//line plugins/parsers/influx/machine.go:8521 + switch ( m.data)[( m.p)] { + case 32: + goto tr198 + case 44: + goto tr198 + case 61: + goto tr55 + case 92: + goto st25 + } + switch { + case ( m.data)[( m.p)] < 12: + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 10 { + goto tr198 + } + case ( m.data)[( m.p)] > 13: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st320 + } + default: + goto tr198 + } + goto st15 +tr526: +//line plugins/parsers/influx/machine.go.rl:19 m.pb = m.p @@ -8590,98 +8552,151 @@ tr487: goto _test_eof320 } st_case_320: -//line plugins/parsers/influx/machine.go:8594 +//line plugins/parsers/influx/machine.go:8556 switch ( m.data)[( m.p)] { case 10: - goto tr357 + goto tr451 case 11: - goto tr487 + goto tr527 case 13: - goto tr357 + goto tr453 case 32: - goto tr482 + goto tr450 case 44: - goto tr7 - case 45: - goto tr488 + goto tr198 case 61: - goto tr49 + goto tr55 case 92: - goto tr46 + goto st25 } switch { case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr489 + goto st322 } case ( m.data)[( m.p)] >= 9: - goto tr482 + goto tr450 } - goto tr44 -tr488: -//line plugins/parsers/influx/machine.go.rl:18 + goto st15 +tr527: + ( m.cs) = 321 +//line plugins/parsers/influx/machine.go.rl:148 - m.pb = m.p + err = m.handler.SetTimestamp(m.text()) + if err != nil { + ( m.p)-- - goto st83 - st83: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof83 - } - st_case_83: -//line plugins/parsers/influx/machine.go:8633 - switch ( m.data)[( m.p)] { - case 10: - goto tr101 - case 11: - goto tr48 - case 13: - goto tr101 - case 32: - goto tr4 - case 44: - goto tr7 - case 61: - goto tr49 - case 92: - goto st84 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st321 - } - case ( m.data)[( m.p)] >= 9: - goto tr4 - } - goto st26 -tr489: -//line plugins/parsers/influx/machine.go.rl:18 + ( m.cs) = 247; + {( m.p)++; goto _out } + } - m.pb = m.p - - goto st321 + goto _again st321: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof321 } st_case_321: -//line plugins/parsers/influx/machine.go:8670 +//line plugins/parsers/influx/machine.go:8600 switch ( m.data)[( m.p)] { case 10: - goto tr362 + goto st262 case 11: - goto tr491 + goto st321 case 13: - goto tr362 + goto st34 case 32: - goto tr490 + goto st266 case 44: - goto tr7 + goto tr2 case 61: - goto tr49 + goto tr55 case 92: - goto st84 + goto st25 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto st266 + } + goto st15 + st322: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof322 + } + st_case_322: + switch ( m.data)[( m.p)] { + case 10: + goto tr451 + case 11: + goto tr527 + case 13: + goto tr453 + case 32: + goto tr450 + case 44: + goto tr198 + case 61: + goto tr55 + case 92: + goto st25 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st323 + } + case ( m.data)[( m.p)] >= 9: + goto tr450 + } + goto st15 + st323: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof323 + } + st_case_323: + switch ( m.data)[( m.p)] { + case 10: + goto tr451 + case 11: + goto tr527 + case 13: + goto tr453 + case 32: + goto tr450 + case 44: + goto tr198 + case 61: + goto tr55 + case 92: + goto st25 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st324 + } + case ( m.data)[( m.p)] >= 9: + goto tr450 + } + goto st15 + st324: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof324 + } + st_case_324: + switch ( m.data)[( m.p)] { + case 10: + goto tr451 + case 11: + goto tr527 + case 13: + goto tr453 + case 32: + goto tr450 + case 44: + goto tr198 + case 61: + goto tr55 + case 92: + goto st25 } switch { case ( m.data)[( m.p)] > 12: @@ -8689,166 +8704,9 @@ tr489: goto st325 } case ( m.data)[( m.p)] >= 9: - goto tr490 + goto tr450 } - goto st26 -tr495: -//line plugins/parsers/influx/machine.go.rl:72 - - m.handler.SetMeasurement(m.text()) - - goto st322 -tr523: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - - goto st322 -tr490: -//line plugins/parsers/influx/machine.go.rl:72 - - m.handler.SetMeasurement(m.text()) - -//line plugins/parsers/influx/machine.go.rl:108 - - m.handler.SetTimestamp(m.text()) - - goto st322 -tr520: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:108 - - m.handler.SetTimestamp(m.text()) - - goto st322 - st322: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof322 - } - st_case_322: -//line plugins/parsers/influx/machine.go:8733 - switch ( m.data)[( m.p)] { - case 10: - goto tr357 - case 11: - goto tr494 - case 13: - goto tr357 - case 32: - goto st322 - case 44: - goto tr5 - case 61: - goto tr5 - case 92: - goto tr12 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st322 - } - goto tr9 -tr494: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st323 - st323: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof323 - } - st_case_323: -//line plugins/parsers/influx/machine.go:8765 - switch ( m.data)[( m.p)] { - case 10: - goto tr357 - case 11: - goto tr494 - case 13: - goto tr357 - case 32: - goto st322 - case 44: - goto tr5 - case 61: - goto tr14 - case 92: - goto tr12 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st322 - } - goto tr9 -tr496: -//line plugins/parsers/influx/machine.go.rl:72 - - m.handler.SetMeasurement(m.text()) - -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st324 -tr491: -//line plugins/parsers/influx/machine.go.rl:72 - - m.handler.SetMeasurement(m.text()) - -//line plugins/parsers/influx/machine.go.rl:108 - - m.handler.SetTimestamp(m.text()) - - goto st324 - st324: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof324 - } - st_case_324: -//line plugins/parsers/influx/machine.go:8811 - switch ( m.data)[( m.p)] { - case 10: - goto tr357 - case 11: - goto tr496 - case 13: - goto tr357 - case 32: - goto tr495 - case 44: - goto tr7 - case 61: - goto tr49 - case 92: - goto tr46 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr495 - } - goto tr44 -tr46: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st84 - st84: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof84 - } - st_case_84: -//line plugins/parsers/influx/machine.go:8843 - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 - } - case ( m.data)[( m.p)] >= 9: - goto tr5 - } - goto st26 + goto st15 st325: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof325 @@ -8856,19 +8714,19 @@ tr46: st_case_325: switch ( m.data)[( m.p)] { case 10: - goto tr362 + goto tr451 case 11: - goto tr491 + goto tr527 case 13: - goto tr362 + goto tr453 case 32: - goto tr490 + goto tr450 case 44: - goto tr7 + goto tr198 case 61: - goto tr49 + goto tr55 case 92: - goto st84 + goto st25 } switch { case ( m.data)[( m.p)] > 12: @@ -8876,9 +8734,9 @@ tr46: goto st326 } case ( m.data)[( m.p)] >= 9: - goto tr490 + goto tr450 } - goto st26 + goto st15 st326: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof326 @@ -8886,19 +8744,19 @@ tr46: st_case_326: switch ( m.data)[( m.p)] { case 10: - goto tr362 + goto tr451 case 11: - goto tr491 + goto tr527 case 13: - goto tr362 + goto tr453 case 32: - goto tr490 + goto tr450 case 44: - goto tr7 + goto tr198 case 61: - goto tr49 + goto tr55 case 92: - goto st84 + goto st25 } switch { case ( m.data)[( m.p)] > 12: @@ -8906,9 +8764,9 @@ tr46: goto st327 } case ( m.data)[( m.p)] >= 9: - goto tr490 + goto tr450 } - goto st26 + goto st15 st327: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof327 @@ -8916,19 +8774,19 @@ tr46: st_case_327: switch ( m.data)[( m.p)] { case 10: - goto tr362 + goto tr451 case 11: - goto tr491 + goto tr527 case 13: - goto tr362 + goto tr453 case 32: - goto tr490 + goto tr450 case 44: - goto tr7 + goto tr198 case 61: - goto tr49 + goto tr55 case 92: - goto st84 + goto st25 } switch { case ( m.data)[( m.p)] > 12: @@ -8936,9 +8794,9 @@ tr46: goto st328 } case ( m.data)[( m.p)] >= 9: - goto tr490 + goto tr450 } - goto st26 + goto st15 st328: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof328 @@ -8946,19 +8804,19 @@ tr46: st_case_328: switch ( m.data)[( m.p)] { case 10: - goto tr362 + goto tr451 case 11: - goto tr491 + goto tr527 case 13: - goto tr362 + goto tr453 case 32: - goto tr490 + goto tr450 case 44: - goto tr7 + goto tr198 case 61: - goto tr49 + goto tr55 case 92: - goto st84 + goto st25 } switch { case ( m.data)[( m.p)] > 12: @@ -8966,9 +8824,9 @@ tr46: goto st329 } case ( m.data)[( m.p)] >= 9: - goto tr490 + goto tr450 } - goto st26 + goto st15 st329: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof329 @@ -8976,19 +8834,19 @@ tr46: st_case_329: switch ( m.data)[( m.p)] { case 10: - goto tr362 + goto tr451 case 11: - goto tr491 + goto tr527 case 13: - goto tr362 + goto tr453 case 32: - goto tr490 + goto tr450 case 44: - goto tr7 + goto tr198 case 61: - goto tr49 + goto tr55 case 92: - goto st84 + goto st25 } switch { case ( m.data)[( m.p)] > 12: @@ -8996,9 +8854,9 @@ tr46: goto st330 } case ( m.data)[( m.p)] >= 9: - goto tr490 + goto tr450 } - goto st26 + goto st15 st330: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof330 @@ -9006,19 +8864,19 @@ tr46: st_case_330: switch ( m.data)[( m.p)] { case 10: - goto tr362 + goto tr451 case 11: - goto tr491 + goto tr527 case 13: - goto tr362 + goto tr453 case 32: - goto tr490 + goto tr450 case 44: - goto tr7 + goto tr198 case 61: - goto tr49 + goto tr55 case 92: - goto st84 + goto st25 } switch { case ( m.data)[( m.p)] > 12: @@ -9026,9 +8884,9 @@ tr46: goto st331 } case ( m.data)[( m.p)] >= 9: - goto tr490 + goto tr450 } - goto st26 + goto st15 st331: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof331 @@ -9036,19 +8894,19 @@ tr46: st_case_331: switch ( m.data)[( m.p)] { case 10: - goto tr362 + goto tr451 case 11: - goto tr491 + goto tr527 case 13: - goto tr362 + goto tr453 case 32: - goto tr490 + goto tr450 case 44: - goto tr7 + goto tr198 case 61: - goto tr49 + goto tr55 case 92: - goto st84 + goto st25 } switch { case ( m.data)[( m.p)] > 12: @@ -9056,9 +8914,9 @@ tr46: goto st332 } case ( m.data)[( m.p)] >= 9: - goto tr490 + goto tr450 } - goto st26 + goto st15 st332: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof332 @@ -9066,19 +8924,19 @@ tr46: st_case_332: switch ( m.data)[( m.p)] { case 10: - goto tr362 + goto tr451 case 11: - goto tr491 + goto tr527 case 13: - goto tr362 + goto tr453 case 32: - goto tr490 + goto tr450 case 44: - goto tr7 + goto tr198 case 61: - goto tr49 + goto tr55 case 92: - goto st84 + goto st25 } switch { case ( m.data)[( m.p)] > 12: @@ -9086,9 +8944,9 @@ tr46: goto st333 } case ( m.data)[( m.p)] >= 9: - goto tr490 + goto tr450 } - goto st26 + goto st15 st333: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof333 @@ -9096,19 +8954,19 @@ tr46: st_case_333: switch ( m.data)[( m.p)] { case 10: - goto tr362 + goto tr451 case 11: - goto tr491 + goto tr527 case 13: - goto tr362 + goto tr453 case 32: - goto tr490 + goto tr450 case 44: - goto tr7 + goto tr198 case 61: - goto tr49 + goto tr55 case 92: - goto st84 + goto st25 } switch { case ( m.data)[( m.p)] > 12: @@ -9116,9 +8974,9 @@ tr46: goto st334 } case ( m.data)[( m.p)] >= 9: - goto tr490 + goto tr450 } - goto st26 + goto st15 st334: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof334 @@ -9126,19 +8984,19 @@ tr46: st_case_334: switch ( m.data)[( m.p)] { case 10: - goto tr362 + goto tr451 case 11: - goto tr491 + goto tr527 case 13: - goto tr362 + goto tr453 case 32: - goto tr490 + goto tr450 case 44: - goto tr7 + goto tr198 case 61: - goto tr49 + goto tr55 case 92: - goto st84 + goto st25 } switch { case ( m.data)[( m.p)] > 12: @@ -9146,9 +9004,9 @@ tr46: goto st335 } case ( m.data)[( m.p)] >= 9: - goto tr490 + goto tr450 } - goto st26 + goto st15 st335: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof335 @@ -9156,19 +9014,19 @@ tr46: st_case_335: switch ( m.data)[( m.p)] { case 10: - goto tr362 + goto tr451 case 11: - goto tr491 + goto tr527 case 13: - goto tr362 + goto tr453 case 32: - goto tr490 + goto tr450 case 44: - goto tr7 + goto tr198 case 61: - goto tr49 + goto tr55 case 92: - goto st84 + goto st25 } switch { case ( m.data)[( m.p)] > 12: @@ -9176,9 +9034,9 @@ tr46: goto st336 } case ( m.data)[( m.p)] >= 9: - goto tr490 + goto tr450 } - goto st26 + goto st15 st336: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof336 @@ -9186,19 +9044,19 @@ tr46: st_case_336: switch ( m.data)[( m.p)] { case 10: - goto tr362 + goto tr451 case 11: - goto tr491 + goto tr527 case 13: - goto tr362 + goto tr453 case 32: - goto tr490 + goto tr450 case 44: - goto tr7 + goto tr198 case 61: - goto tr49 + goto tr55 case 92: - goto st84 + goto st25 } switch { case ( m.data)[( m.p)] > 12: @@ -9206,9 +9064,9 @@ tr46: goto st337 } case ( m.data)[( m.p)] >= 9: - goto tr490 + goto tr450 } - goto st26 + goto st15 st337: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof337 @@ -9216,19 +9074,19 @@ tr46: st_case_337: switch ( m.data)[( m.p)] { case 10: - goto tr362 + goto tr451 case 11: - goto tr491 + goto tr527 case 13: - goto tr362 + goto tr453 case 32: - goto tr490 + goto tr450 case 44: - goto tr7 + goto tr198 case 61: - goto tr49 + goto tr55 case 92: - goto st84 + goto st25 } switch { case ( m.data)[( m.p)] > 12: @@ -9236,9 +9094,9 @@ tr46: goto st338 } case ( m.data)[( m.p)] >= 9: - goto tr490 + goto tr450 } - goto st26 + goto st15 st338: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof338 @@ -9246,19 +9104,19 @@ tr46: st_case_338: switch ( m.data)[( m.p)] { case 10: - goto tr362 + goto tr451 case 11: - goto tr491 + goto tr527 case 13: - goto tr362 + goto tr453 case 32: - goto tr490 + goto tr450 case 44: - goto tr7 + goto tr198 case 61: - goto tr49 + goto tr55 case 92: - goto st84 + goto st25 } switch { case ( m.data)[( m.p)] > 12: @@ -9266,9 +9124,9 @@ tr46: goto st339 } case ( m.data)[( m.p)] >= 9: - goto tr490 + goto tr450 } - goto st26 + goto st15 st339: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof339 @@ -9276,805 +9134,378 @@ tr46: st_case_339: switch ( m.data)[( m.p)] { case 10: - goto tr362 + goto tr451 case 11: - goto tr491 + goto tr527 case 13: - goto tr362 + goto tr453 case 32: - goto tr490 + goto tr450 case 44: - goto tr7 + goto tr198 case 61: - goto tr49 + goto tr55 case 92: - goto st84 + goto st25 } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st340 - } - case ( m.data)[( m.p)] >= 9: - goto tr490 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr450 } - goto st26 + goto st15 +tr196: +//line plugins/parsers/influx/machine.go.rl:86 + + key = m.text() + + goto st62 + st62: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof62 + } + st_case_62: +//line plugins/parsers/influx/machine.go:9167 + switch ( m.data)[( m.p)] { + case 9: + goto st6 + case 10: + goto st7 + case 12: + goto tr47 + case 13: + goto st8 + case 32: + goto st6 + case 34: + goto tr151 + case 44: + goto st6 + case 61: + goto st6 + case 92: + goto tr153 + } + goto tr148 +tr151: + ( m.cs) = 340 +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + +//line plugins/parsers/influx/machine.go.rl:139 + + err = m.handler.AddString(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr157: + ( m.cs) = 340 +//line plugins/parsers/influx/machine.go.rl:139 + + err = m.handler.AddString(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again st340: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof340 } st_case_340: +//line plugins/parsers/influx/machine.go:9224 switch ( m.data)[( m.p)] { case 10: - goto tr362 + goto st262 case 11: - goto tr491 + goto tr548 case 13: - goto tr362 + goto st34 case 32: - goto tr490 + goto tr547 case 44: - goto tr7 + goto tr549 case 61: - goto tr49 + goto tr132 case 92: - goto st84 + goto st23 } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st341 - } - case ( m.data)[( m.p)] >= 9: - goto tr490 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr547 } - goto st26 + goto st17 +tr548: + ( m.cs) = 341 +//line plugins/parsers/influx/machine.go.rl:90 + + err = m.handler.AddTag(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr716: + ( m.cs) = 341 +//line plugins/parsers/influx/machine.go.rl:90 + + err = m.handler.AddTag(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:121 + + err = m.handler.AddFloat(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr926: + ( m.cs) = 341 +//line plugins/parsers/influx/machine.go.rl:90 + + err = m.handler.AddTag(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:103 + + err = m.handler.AddInt(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr931: + ( m.cs) = 341 +//line plugins/parsers/influx/machine.go.rl:90 + + err = m.handler.AddTag(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:112 + + err = m.handler.AddUint(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr936: + ( m.cs) = 341 +//line plugins/parsers/influx/machine.go.rl:90 + + err = m.handler.AddTag(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:130 + + err = m.handler.AddBool(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again st341: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof341 } st_case_341: +//line plugins/parsers/influx/machine.go:9355 switch ( m.data)[( m.p)] { case 10: - goto tr362 + goto st262 case 11: - goto tr491 + goto tr550 case 13: - goto tr362 + goto st34 case 32: - goto tr490 + goto tr547 case 44: - goto tr7 + goto tr62 + case 45: + goto tr551 case 61: - goto tr49 + goto tr132 case 92: - goto st84 + goto tr66 } switch { case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st342 + goto tr552 } case ( m.data)[( m.p)] >= 9: - goto tr490 + goto tr547 } - goto st26 + goto tr64 +tr575: + ( m.cs) = 342 +//line plugins/parsers/influx/machine.go.rl:90 + + err = m.handler.AddTag(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr550: + ( m.cs) = 342 +//line plugins/parsers/influx/machine.go.rl:90 + + err = m.handler.AddTag(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto _again st342: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof342 } st_case_342: +//line plugins/parsers/influx/machine.go:9418 switch ( m.data)[( m.p)] { case 10: - goto tr362 + goto st262 case 11: - goto tr491 + goto tr550 case 13: - goto tr362 + goto st34 case 32: - goto tr490 + goto tr547 case 44: - goto tr7 + goto tr62 + case 45: + goto tr551 case 61: - goto tr49 + goto tr12 case 92: - goto st84 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr490 - } - goto st26 -tr484: -//line plugins/parsers/influx/machine.go.rl:72 - - m.handler.SetMeasurement(m.text()) - - goto st85 -tr516: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - - goto st85 -tr568: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:96 - - m.handler.AddFloat(key, m.text()) - - goto st85 -tr574: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:88 - - m.handler.AddInt(key, m.text()) - - goto st85 -tr578: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:92 - - m.handler.AddUint(key, m.text()) - - goto st85 -tr582: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:100 - - m.handler.AddBool(key, m.text()) - - goto st85 -tr795: -//line plugins/parsers/influx/machine.go.rl:72 - - m.handler.SetMeasurement(m.text()) - -//line plugins/parsers/influx/machine.go.rl:96 - - m.handler.AddFloat(key, m.text()) - - goto st85 -tr820: -//line plugins/parsers/influx/machine.go.rl:72 - - m.handler.SetMeasurement(m.text()) - -//line plugins/parsers/influx/machine.go.rl:88 - - m.handler.AddInt(key, m.text()) - - goto st85 -tr823: -//line plugins/parsers/influx/machine.go.rl:72 - - m.handler.SetMeasurement(m.text()) - -//line plugins/parsers/influx/machine.go.rl:92 - - m.handler.AddUint(key, m.text()) - - goto st85 -tr826: -//line plugins/parsers/influx/machine.go.rl:72 - - m.handler.SetMeasurement(m.text()) - -//line plugins/parsers/influx/machine.go.rl:100 - - m.handler.AddBool(key, m.text()) - - goto st85 - st85: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof85 - } - st_case_85: -//line plugins/parsers/influx/machine.go:9485 - switch ( m.data)[( m.p)] { - case 32: - goto tr61 - case 44: - goto tr61 - case 61: - goto tr61 - case 92: - goto tr167 + goto tr66 } switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr61 + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto tr552 } case ( m.data)[( m.p)] >= 9: - goto tr61 + goto tr547 } - goto tr166 -tr166: -//line plugins/parsers/influx/machine.go.rl:18 + goto tr64 +tr551: +//line plugins/parsers/influx/machine.go.rl:19 m.pb = m.p - goto st86 - st86: + goto st63 + st63: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof86 + goto _test_eof63 } - st_case_86: -//line plugins/parsers/influx/machine.go:9516 - switch ( m.data)[( m.p)] { - case 32: - goto tr61 - case 44: - goto tr61 - case 61: - goto tr169 - case 92: - goto st118 - } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr61 - } - case ( m.data)[( m.p)] >= 9: - goto tr61 - } - goto st86 -tr169: -//line plugins/parsers/influx/machine.go.rl:76 - - key = m.text() - -//line plugins/parsers/influx/machine.go.rl:84 - - key = m.text() - - goto st87 - st87: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof87 - } - st_case_87: -//line plugins/parsers/influx/machine.go:9551 - switch ( m.data)[( m.p)] { - case 32: - goto tr61 - case 34: - goto tr171 - case 44: - goto tr61 - case 45: - goto tr172 - case 46: - goto tr173 - case 48: - goto tr174 - case 61: - goto tr61 - case 70: - goto tr176 - case 84: - goto tr177 - case 92: - goto tr58 - case 102: - goto tr178 - case 116: - goto tr179 - } - switch { - case ( m.data)[( m.p)] < 12: - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 10 { - goto tr61 - } - case ( m.data)[( m.p)] > 13: - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr175 - } - default: - goto tr61 - } - goto tr57 -tr171: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st88 - st88: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof88 - } - st_case_88: -//line plugins/parsers/influx/machine.go:9602 - switch ( m.data)[( m.p)] { - case 9: - goto tr181 - case 11: - goto tr182 - case 12: - goto tr60 - case 32: - goto tr181 - case 34: - goto tr183 - case 44: - goto tr184 - case 61: - goto tr25 - case 92: - goto tr185 - } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr61 - } - goto tr180 -tr180: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st89 - st89: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof89 - } - st_case_89: -//line plugins/parsers/influx/machine.go:9636 - switch ( m.data)[( m.p)] { - case 9: - goto tr187 - case 11: - goto tr188 - case 12: - goto tr60 - case 32: - goto tr187 - case 34: - goto tr189 - case 44: - goto tr190 - case 61: - goto st7 - case 92: - goto st103 - } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr61 - } - goto st89 -tr187: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - - goto st90 -tr181: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st90 - st90: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof90 - } - st_case_90: -//line plugins/parsers/influx/machine.go:9680 - switch ( m.data)[( m.p)] { - case 9: - goto st90 - case 11: - goto tr194 - case 12: - goto st3 - case 32: - goto st90 - case 34: - goto tr95 - case 44: - goto st7 - case 61: - goto st7 - case 92: - goto tr195 - } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 - } - goto tr192 -tr192: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st91 - st91: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof91 - } - st_case_91: -//line plugins/parsers/influx/machine.go:9714 - switch ( m.data)[( m.p)] { - case 9: - goto st7 - case 10: - goto tr5 - case 32: - goto st7 - case 34: - goto tr98 - case 44: - goto st7 - case 61: - goto tr197 - case 92: - goto st93 - } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 - } - goto st91 -tr197: -//line plugins/parsers/influx/machine.go.rl:84 - - key = m.text() - - goto st92 - st92: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof92 - } - st_case_92: -//line plugins/parsers/influx/machine.go:9746 + st_case_63: +//line plugins/parsers/influx/machine.go:9457 switch ( m.data)[( m.p)] { case 10: - goto tr5 - case 34: - goto tr103 - case 45: - goto tr125 - case 46: - goto tr126 - case 48: - goto tr127 - case 70: - goto tr129 - case 84: - goto tr130 - case 92: - goto st11 - case 102: - goto tr131 - case 116: goto tr132 - } - switch { - case ( m.data)[( m.p)] > 13: - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr128 - } - case ( m.data)[( m.p)] >= 12: - goto tr5 - } - goto st7 -tr195: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st93 - st93: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof93 - } - st_case_93: -//line plugins/parsers/influx/machine.go:9789 - switch ( m.data)[( m.p)] { - case 34: - goto st91 + case 11: + goto tr68 + case 13: + goto tr132 + case 32: + goto tr60 + case 44: + goto tr62 + case 61: + goto tr12 case 92: - goto st91 + goto st21 } switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st343 } case ( m.data)[( m.p)] >= 9: - goto tr5 - } - goto st4 -tr194: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st94 - st94: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof94 - } - st_case_94: -//line plugins/parsers/influx/machine.go:9816 - switch ( m.data)[( m.p)] { - case 9: - goto st90 - case 11: - goto tr194 - case 12: - goto st3 - case 32: - goto st90 - case 34: - goto tr95 - case 44: - goto st7 - case 61: - goto tr197 - case 92: - goto tr195 - } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 - } - goto tr192 -tr188: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - - goto st95 -tr182: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st95 - st95: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof95 - } - st_case_95: -//line plugins/parsers/influx/machine.go:9860 - switch ( m.data)[( m.p)] { - case 9: - goto tr187 - case 11: - goto tr200 - case 12: goto tr60 - case 32: - goto tr187 - case 34: - goto tr201 - case 44: - goto tr190 - case 61: - goto st7 - case 92: - goto tr202 } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr61 - } - goto tr199 -tr199: -//line plugins/parsers/influx/machine.go.rl:18 + goto st19 +tr552: +//line plugins/parsers/influx/machine.go.rl:19 m.pb = m.p - goto st96 - st96: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof96 - } - st_case_96: -//line plugins/parsers/influx/machine.go:9894 - switch ( m.data)[( m.p)] { - case 9: - goto tr187 - case 11: - goto tr204 - case 12: - goto tr60 - case 32: - goto tr187 - case 34: - goto tr205 - case 44: - goto tr190 - case 61: - goto tr197 - case 92: - goto st105 - } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr61 - } - goto st96 -tr204: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - - goto st97 -tr200: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st97 - st97: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof97 - } - st_case_97: -//line plugins/parsers/influx/machine.go:9938 - switch ( m.data)[( m.p)] { - case 9: - goto tr187 - case 11: - goto tr200 - case 12: - goto tr60 - case 32: - goto tr187 - case 34: - goto tr201 - case 44: - goto tr190 - case 61: - goto tr197 - case 92: - goto tr202 - } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr61 - } - goto tr199 -tr201: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - -//line plugins/parsers/influx/machine.go.rl:104 - - m.handler.AddString(key, m.text()) - - goto st343 -tr205: -//line plugins/parsers/influx/machine.go.rl:104 - - m.handler.AddString(key, m.text()) - goto st343 st343: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof343 } st_case_343: -//line plugins/parsers/influx/machine.go:9982 +//line plugins/parsers/influx/machine.go:9494 switch ( m.data)[( m.p)] { case 10: - goto tr357 + goto tr451 case 11: - goto tr515 + goto tr554 case 13: - goto tr357 + goto tr453 case 32: - goto tr514 + goto tr553 case 44: - goto tr516 + goto tr62 case 61: - goto tr14 + goto tr12 case 92: - goto st35 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr514 - } - goto st33 -tr515: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - - goto st344 -tr517: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st344 - st344: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof344 - } - st_case_344: -//line plugins/parsers/influx/machine.go:10024 - switch ( m.data)[( m.p)] { - case 10: - goto tr357 - case 11: - goto tr517 - case 13: - goto tr357 - case 32: - goto tr514 - case 44: - goto tr63 - case 45: - goto tr518 - case 61: - goto tr14 - case 92: - goto tr67 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr519 - } - case ( m.data)[( m.p)] >= 9: - goto tr514 - } - goto tr65 -tr518: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st98 - st98: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof98 - } - st_case_98: -//line plugins/parsers/influx/machine.go:10063 - switch ( m.data)[( m.p)] { - case 10: - goto tr207 - case 11: - goto tr69 - case 13: - goto tr207 - case 32: - goto tr60 - case 44: - goto tr63 - case 61: - goto tr14 - case 92: - goto st35 + goto st21 } switch { case ( m.data)[( m.p)] > 12: @@ -10082,36 +9513,125 @@ tr518: goto st345 } case ( m.data)[( m.p)] >= 9: - goto tr60 + goto tr553 } - goto st33 -tr519: -//line plugins/parsers/influx/machine.go.rl:18 + goto st19 +tr557: + ( m.cs) = 344 +//line plugins/parsers/influx/machine.go.rl:90 + + err = m.handler.AddTag(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:19 m.pb = m.p - goto st345 + goto _again +tr554: + ( m.cs) = 344 +//line plugins/parsers/influx/machine.go.rl:90 + + err = m.handler.AddTag(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:148 + + err = m.handler.SetTimestamp(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again + st344: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof344 + } + st_case_344: +//line plugins/parsers/influx/machine.go:9565 + switch ( m.data)[( m.p)] { + case 10: + goto st262 + case 11: + goto tr557 + case 13: + goto st34 + case 32: + goto tr556 + case 44: + goto tr62 + case 61: + goto tr12 + case 92: + goto tr66 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr556 + } + goto tr64 st345: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof345 } st_case_345: -//line plugins/parsers/influx/machine.go:10100 switch ( m.data)[( m.p)] { case 10: - goto tr362 + goto tr451 case 11: - goto tr521 + goto tr554 case 13: - goto tr362 + goto tr453 case 32: - goto tr520 + goto tr553 case 44: - goto tr63 + goto tr62 case 61: - goto tr14 + goto tr12 case 92: - goto st35 + goto st21 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st346 + } + case ( m.data)[( m.p)] >= 9: + goto tr553 + } + goto st19 + st346: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof346 + } + st_case_346: + switch ( m.data)[( m.p)] { + case 10: + goto tr451 + case 11: + goto tr554 + case 13: + goto tr453 + case 32: + goto tr553 + case 44: + goto tr62 + case 61: + goto tr12 + case 92: + goto st21 } switch { case ( m.data)[( m.p)] > 12: @@ -10119,55 +9639,9 @@ tr519: goto st347 } case ( m.data)[( m.p)] >= 9: - goto tr520 + goto tr553 } - goto st33 -tr524: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st346 -tr521: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:108 - - m.handler.SetTimestamp(m.text()) - - goto st346 - st346: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof346 - } - st_case_346: -//line plugins/parsers/influx/machine.go:10151 - switch ( m.data)[( m.p)] { - case 10: - goto tr357 - case 11: - goto tr524 - case 13: - goto tr357 - case 32: - goto tr523 - case 44: - goto tr63 - case 61: - goto tr14 - case 92: - goto tr67 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr523 - } - goto tr65 + goto st19 st347: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof347 @@ -10175,19 +9649,19 @@ tr521: st_case_347: switch ( m.data)[( m.p)] { case 10: - goto tr362 + goto tr451 case 11: - goto tr521 + goto tr554 case 13: - goto tr362 + goto tr453 case 32: - goto tr520 + goto tr553 case 44: - goto tr63 + goto tr62 case 61: - goto tr14 + goto tr12 case 92: - goto st35 + goto st21 } switch { case ( m.data)[( m.p)] > 12: @@ -10195,9 +9669,9 @@ tr521: goto st348 } case ( m.data)[( m.p)] >= 9: - goto tr520 + goto tr553 } - goto st33 + goto st19 st348: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof348 @@ -10205,19 +9679,19 @@ tr521: st_case_348: switch ( m.data)[( m.p)] { case 10: - goto tr362 + goto tr451 case 11: - goto tr521 + goto tr554 case 13: - goto tr362 + goto tr453 case 32: - goto tr520 + goto tr553 case 44: - goto tr63 + goto tr62 case 61: - goto tr14 + goto tr12 case 92: - goto st35 + goto st21 } switch { case ( m.data)[( m.p)] > 12: @@ -10225,9 +9699,9 @@ tr521: goto st349 } case ( m.data)[( m.p)] >= 9: - goto tr520 + goto tr553 } - goto st33 + goto st19 st349: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof349 @@ -10235,19 +9709,19 @@ tr521: st_case_349: switch ( m.data)[( m.p)] { case 10: - goto tr362 + goto tr451 case 11: - goto tr521 + goto tr554 case 13: - goto tr362 + goto tr453 case 32: - goto tr520 + goto tr553 case 44: - goto tr63 + goto tr62 case 61: - goto tr14 + goto tr12 case 92: - goto st35 + goto st21 } switch { case ( m.data)[( m.p)] > 12: @@ -10255,9 +9729,9 @@ tr521: goto st350 } case ( m.data)[( m.p)] >= 9: - goto tr520 + goto tr553 } - goto st33 + goto st19 st350: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof350 @@ -10265,19 +9739,19 @@ tr521: st_case_350: switch ( m.data)[( m.p)] { case 10: - goto tr362 + goto tr451 case 11: - goto tr521 + goto tr554 case 13: - goto tr362 + goto tr453 case 32: - goto tr520 + goto tr553 case 44: - goto tr63 + goto tr62 case 61: - goto tr14 + goto tr12 case 92: - goto st35 + goto st21 } switch { case ( m.data)[( m.p)] > 12: @@ -10285,9 +9759,9 @@ tr521: goto st351 } case ( m.data)[( m.p)] >= 9: - goto tr520 + goto tr553 } - goto st33 + goto st19 st351: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof351 @@ -10295,19 +9769,19 @@ tr521: st_case_351: switch ( m.data)[( m.p)] { case 10: - goto tr362 + goto tr451 case 11: - goto tr521 + goto tr554 case 13: - goto tr362 + goto tr453 case 32: - goto tr520 + goto tr553 case 44: - goto tr63 + goto tr62 case 61: - goto tr14 + goto tr12 case 92: - goto st35 + goto st21 } switch { case ( m.data)[( m.p)] > 12: @@ -10315,9 +9789,9 @@ tr521: goto st352 } case ( m.data)[( m.p)] >= 9: - goto tr520 + goto tr553 } - goto st33 + goto st19 st352: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof352 @@ -10325,19 +9799,19 @@ tr521: st_case_352: switch ( m.data)[( m.p)] { case 10: - goto tr362 + goto tr451 case 11: - goto tr521 + goto tr554 case 13: - goto tr362 + goto tr453 case 32: - goto tr520 + goto tr553 case 44: - goto tr63 + goto tr62 case 61: - goto tr14 + goto tr12 case 92: - goto st35 + goto st21 } switch { case ( m.data)[( m.p)] > 12: @@ -10345,9 +9819,9 @@ tr521: goto st353 } case ( m.data)[( m.p)] >= 9: - goto tr520 + goto tr553 } - goto st33 + goto st19 st353: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof353 @@ -10355,19 +9829,19 @@ tr521: st_case_353: switch ( m.data)[( m.p)] { case 10: - goto tr362 + goto tr451 case 11: - goto tr521 + goto tr554 case 13: - goto tr362 + goto tr453 case 32: - goto tr520 + goto tr553 case 44: - goto tr63 + goto tr62 case 61: - goto tr14 + goto tr12 case 92: - goto st35 + goto st21 } switch { case ( m.data)[( m.p)] > 12: @@ -10375,9 +9849,9 @@ tr521: goto st354 } case ( m.data)[( m.p)] >= 9: - goto tr520 + goto tr553 } - goto st33 + goto st19 st354: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof354 @@ -10385,19 +9859,19 @@ tr521: st_case_354: switch ( m.data)[( m.p)] { case 10: - goto tr362 + goto tr451 case 11: - goto tr521 + goto tr554 case 13: - goto tr362 + goto tr453 case 32: - goto tr520 + goto tr553 case 44: - goto tr63 + goto tr62 case 61: - goto tr14 + goto tr12 case 92: - goto st35 + goto st21 } switch { case ( m.data)[( m.p)] > 12: @@ -10405,9 +9879,9 @@ tr521: goto st355 } case ( m.data)[( m.p)] >= 9: - goto tr520 + goto tr553 } - goto st33 + goto st19 st355: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof355 @@ -10415,19 +9889,19 @@ tr521: st_case_355: switch ( m.data)[( m.p)] { case 10: - goto tr362 + goto tr451 case 11: - goto tr521 + goto tr554 case 13: - goto tr362 + goto tr453 case 32: - goto tr520 + goto tr553 case 44: - goto tr63 + goto tr62 case 61: - goto tr14 + goto tr12 case 92: - goto st35 + goto st21 } switch { case ( m.data)[( m.p)] > 12: @@ -10435,9 +9909,9 @@ tr521: goto st356 } case ( m.data)[( m.p)] >= 9: - goto tr520 + goto tr553 } - goto st33 + goto st19 st356: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof356 @@ -10445,19 +9919,19 @@ tr521: st_case_356: switch ( m.data)[( m.p)] { case 10: - goto tr362 + goto tr451 case 11: - goto tr521 + goto tr554 case 13: - goto tr362 + goto tr453 case 32: - goto tr520 + goto tr553 case 44: - goto tr63 + goto tr62 case 61: - goto tr14 + goto tr12 case 92: - goto st35 + goto st21 } switch { case ( m.data)[( m.p)] > 12: @@ -10465,9 +9939,9 @@ tr521: goto st357 } case ( m.data)[( m.p)] >= 9: - goto tr520 + goto tr553 } - goto st33 + goto st19 st357: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof357 @@ -10475,19 +9949,19 @@ tr521: st_case_357: switch ( m.data)[( m.p)] { case 10: - goto tr362 + goto tr451 case 11: - goto tr521 + goto tr554 case 13: - goto tr362 + goto tr453 case 32: - goto tr520 + goto tr553 case 44: - goto tr63 + goto tr62 case 61: - goto tr14 + goto tr12 case 92: - goto st35 + goto st21 } switch { case ( m.data)[( m.p)] > 12: @@ -10495,9 +9969,9 @@ tr521: goto st358 } case ( m.data)[( m.p)] >= 9: - goto tr520 + goto tr553 } - goto st33 + goto st19 st358: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof358 @@ -10505,19 +9979,19 @@ tr521: st_case_358: switch ( m.data)[( m.p)] { case 10: - goto tr362 + goto tr451 case 11: - goto tr521 + goto tr554 case 13: - goto tr362 + goto tr453 case 32: - goto tr520 + goto tr553 case 44: - goto tr63 + goto tr62 case 61: - goto tr14 + goto tr12 case 92: - goto st35 + goto st21 } switch { case ( m.data)[( m.p)] > 12: @@ -10525,9 +9999,9 @@ tr521: goto st359 } case ( m.data)[( m.p)] >= 9: - goto tr520 + goto tr553 } - goto st33 + goto st19 st359: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof359 @@ -10535,19 +10009,19 @@ tr521: st_case_359: switch ( m.data)[( m.p)] { case 10: - goto tr362 + goto tr451 case 11: - goto tr521 + goto tr554 case 13: - goto tr362 + goto tr453 case 32: - goto tr520 + goto tr553 case 44: - goto tr63 + goto tr62 case 61: - goto tr14 + goto tr12 case 92: - goto st35 + goto st21 } switch { case ( m.data)[( m.p)] > 12: @@ -10555,9 +10029,9 @@ tr521: goto st360 } case ( m.data)[( m.p)] >= 9: - goto tr520 + goto tr553 } - goto st33 + goto st19 st360: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof360 @@ -10565,19 +10039,19 @@ tr521: st_case_360: switch ( m.data)[( m.p)] { case 10: - goto tr362 + goto tr451 case 11: - goto tr521 + goto tr554 case 13: - goto tr362 + goto tr453 case 32: - goto tr520 + goto tr553 case 44: - goto tr63 + goto tr62 case 61: - goto tr14 + goto tr12 case 92: - goto st35 + goto st21 } switch { case ( m.data)[( m.p)] > 12: @@ -10585,9 +10059,9 @@ tr521: goto st361 } case ( m.data)[( m.p)] >= 9: - goto tr520 + goto tr553 } - goto st33 + goto st19 st361: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof361 @@ -10595,19 +10069,19 @@ tr521: st_case_361: switch ( m.data)[( m.p)] { case 10: - goto tr362 + goto tr451 case 11: - goto tr521 + goto tr554 case 13: - goto tr362 + goto tr453 case 32: - goto tr520 + goto tr553 case 44: - goto tr63 + goto tr62 case 61: - goto tr14 + goto tr12 case 92: - goto st35 + goto st21 } switch { case ( m.data)[( m.p)] > 12: @@ -10615,9 +10089,9 @@ tr521: goto st362 } case ( m.data)[( m.p)] >= 9: - goto tr520 + goto tr553 } - goto st33 + goto st19 st362: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof362 @@ -10625,269 +10099,928 @@ tr521: st_case_362: switch ( m.data)[( m.p)] { case 10: - goto tr362 + goto tr451 case 11: - goto tr521 + goto tr554 case 13: - goto tr362 + goto tr453 case 32: - goto tr520 + goto tr553 case 44: - goto tr63 + goto tr62 case 61: - goto tr14 + goto tr12 case 92: - goto st35 + goto st21 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr553 + } + goto st19 +tr153: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st64 + st64: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof64 + } + st_case_64: +//line plugins/parsers/influx/machine.go:10132 + switch ( m.data)[( m.p)] { + case 34: + goto st49 + case 92: + goto st65 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st363 + case ( m.data)[( m.p)] > 10: + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr47 } case ( m.data)[( m.p)] >= 9: - goto tr520 + goto tr47 } - goto st33 + goto st17 + st65: +//line plugins/parsers/influx/machine.go.rl:234 + ( m.p)-- + + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof65 + } + st_case_65: +//line plugins/parsers/influx/machine.go:10156 + switch ( m.data)[( m.p)] { + case 9: + goto tr155 + case 10: + goto st7 + case 11: + goto tr156 + case 12: + goto tr60 + case 13: + goto st8 + case 32: + goto tr155 + case 34: + goto tr157 + case 44: + goto tr158 + case 61: + goto st6 + case 92: + goto st64 + } + goto st49 +tr156: + ( m.cs) = 66 +//line plugins/parsers/influx/machine.go.rl:90 + + err = m.handler.AddTag(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr150: + ( m.cs) = 66 +//line plugins/parsers/influx/machine.go.rl:90 + + err = m.handler.AddTag(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto _again + st66: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof66 + } + st_case_66: +//line plugins/parsers/influx/machine.go:10215 + switch ( m.data)[( m.p)] { + case 9: + goto tr155 + case 10: + goto st7 + case 11: + goto tr203 + case 12: + goto tr60 + case 13: + goto st8 + case 32: + goto tr155 + case 34: + goto tr204 + case 44: + goto tr158 + case 61: + goto st6 + case 92: + goto tr205 + } + goto tr202 +tr202: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st67 + st67: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof67 + } + st_case_67: +//line plugins/parsers/influx/machine.go:10250 + switch ( m.data)[( m.p)] { + case 9: + goto tr155 + case 10: + goto st7 + case 11: + goto tr207 + case 12: + goto tr60 + case 13: + goto st8 + case 32: + goto tr155 + case 34: + goto tr208 + case 44: + goto tr158 + case 61: + goto tr165 + case 92: + goto st69 + } + goto st67 +tr207: + ( m.cs) = 68 +//line plugins/parsers/influx/machine.go.rl:90 + + err = m.handler.AddTag(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr203: + ( m.cs) = 68 +//line plugins/parsers/influx/machine.go.rl:90 + + err = m.handler.AddTag(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto _again + st68: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof68 + } + st_case_68: +//line plugins/parsers/influx/machine.go:10309 + switch ( m.data)[( m.p)] { + case 9: + goto tr155 + case 10: + goto st7 + case 11: + goto tr203 + case 12: + goto tr60 + case 13: + goto st8 + case 32: + goto tr155 + case 34: + goto tr204 + case 44: + goto tr158 + case 61: + goto tr165 + case 92: + goto tr205 + } + goto tr202 +tr204: + ( m.cs) = 363 +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + +//line plugins/parsers/influx/machine.go.rl:139 + + err = m.handler.AddString(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr208: + ( m.cs) = 363 +//line plugins/parsers/influx/machine.go.rl:139 + + err = m.handler.AddString(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again st363: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof363 } st_case_363: +//line plugins/parsers/influx/machine.go:10368 switch ( m.data)[( m.p)] { case 10: - goto tr362 + goto st262 case 11: - goto tr521 + goto tr575 case 13: - goto tr362 + goto st34 case 32: - goto tr520 + goto tr547 case 44: - goto tr63 + goto tr549 case 61: - goto tr14 + goto tr12 case 92: - goto st35 + goto st21 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr547 + } + goto st19 +tr205: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st69 + st69: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof69 + } + st_case_69: +//line plugins/parsers/influx/machine.go:10400 + switch ( m.data)[( m.p)] { + case 34: + goto st67 + case 92: + goto st70 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st364 + case ( m.data)[( m.p)] > 10: + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr47 } case ( m.data)[( m.p)] >= 9: - goto tr520 + goto tr47 } - goto st33 + goto st19 + st70: +//line plugins/parsers/influx/machine.go.rl:234 + ( m.p)-- + + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof70 + } + st_case_70: +//line plugins/parsers/influx/machine.go:10424 + switch ( m.data)[( m.p)] { + case 9: + goto tr155 + case 10: + goto st7 + case 11: + goto tr207 + case 12: + goto tr60 + case 13: + goto st8 + case 32: + goto tr155 + case 34: + goto tr208 + case 44: + goto tr158 + case 61: + goto tr165 + case 92: + goto st69 + } + goto st67 +tr193: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st71 + st71: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof71 + } + st_case_71: +//line plugins/parsers/influx/machine.go:10459 + switch ( m.data)[( m.p)] { + case 34: + goto st60 + case 92: + goto st72 + } + switch { + case ( m.data)[( m.p)] > 10: + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr47 + } + case ( m.data)[( m.p)] >= 9: + goto tr47 + } + goto st15 + st72: +//line plugins/parsers/influx/machine.go.rl:234 + ( m.p)-- + + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof72 + } + st_case_72: +//line plugins/parsers/influx/machine.go:10483 + switch ( m.data)[( m.p)] { + case 9: + goto st6 + case 10: + goto st7 + case 12: + goto tr47 + case 13: + goto st8 + case 32: + goto st6 + case 34: + goto tr195 + case 44: + goto st6 + case 61: + goto tr196 + case 92: + goto st71 + } + goto st60 +tr189: +//line plugins/parsers/influx/machine.go.rl:99 + + key = m.text() + + goto st73 +tr346: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + +//line plugins/parsers/influx/machine.go.rl:99 + + key = m.text() + + goto st73 + st73: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof73 + } + st_case_73: +//line plugins/parsers/influx/machine.go:10526 + switch ( m.data)[( m.p)] { + case 9: + goto tr180 + case 10: + goto st7 + case 11: + goto tr181 + case 12: + goto tr1 + case 13: + goto st8 + case 32: + goto tr180 + case 34: + goto tr212 + case 44: + goto tr182 + case 45: + goto tr213 + case 46: + goto tr214 + case 48: + goto tr215 + case 70: + goto tr217 + case 84: + goto tr218 + case 92: + goto st157 + case 102: + goto tr219 + case 116: + goto tr220 + } + if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto tr216 + } + goto st55 +tr212: + ( m.cs) = 364 +//line plugins/parsers/influx/machine.go.rl:139 + + err = m.handler.AddString(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again st364: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof364 } st_case_364: +//line plugins/parsers/influx/machine.go:10583 switch ( m.data)[( m.p)] { + case 9: + goto tr576 case 10: - goto tr362 + goto tr475 case 11: - goto tr521 + goto tr577 + case 12: + goto tr482 case 13: - goto tr362 + goto tr476 case 32: - goto tr520 - case 44: - goto tr63 - case 61: - goto tr14 - case 92: - goto st35 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr520 - } - goto st33 -tr190: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - - goto st99 -tr184: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st99 - st99: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof99 - } - st_case_99: -//line plugins/parsers/influx/machine.go:10728 - switch ( m.data)[( m.p)] { - case 9: - goto st7 - case 10: - goto tr61 - case 32: - goto st7 + goto tr576 case 34: - goto tr210 + goto tr85 case 44: - goto st7 - case 61: - goto st7 + goto tr578 case 92: - goto tr211 + goto tr87 } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr61 - } - goto tr209 -tr209: -//line plugins/parsers/influx/machine.go.rl:18 + goto tr82 +tr607: + ( m.cs) = 365 +//line plugins/parsers/influx/machine.go.rl:77 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr576: + ( m.cs) = 365 +//line plugins/parsers/influx/machine.go.rl:77 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:19 m.pb = m.p - goto st100 - st100: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof100 - } - st_case_100: -//line plugins/parsers/influx/machine.go:10760 - switch ( m.data)[( m.p)] { - case 9: - goto st7 - case 10: - goto tr61 - case 32: - goto st7 - case 34: - goto tr213 - case 44: - goto st7 - case 61: - goto tr214 - case 92: - goto st104 - } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr61 - } - goto st100 -tr210: -//line plugins/parsers/influx/machine.go.rl:18 + goto _again +tr749: + ( m.cs) = 365 +//line plugins/parsers/influx/machine.go.rl:90 - m.pb = m.p + err = m.handler.AddTag(key, m.text()) + if err != nil { + ( m.p)-- -//line plugins/parsers/influx/machine.go.rl:104 + ( m.cs) = 247; + {( m.p)++; goto _out } + } - m.handler.AddString(key, m.text()) + goto _again +tr619: + ( m.cs) = 365 +//line plugins/parsers/influx/machine.go.rl:77 - goto st365 -tr213: -//line plugins/parsers/influx/machine.go.rl:104 + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- - m.handler.AddString(key, m.text()) + ( m.cs) = 247; + {( m.p)++; goto _out } + } - goto st365 +//line plugins/parsers/influx/machine.go.rl:121 + + err = m.handler.AddFloat(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr745: + ( m.cs) = 365 +//line plugins/parsers/influx/machine.go.rl:90 + + err = m.handler.AddTag(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:121 + + err = m.handler.AddFloat(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr777: + ( m.cs) = 365 +//line plugins/parsers/influx/machine.go.rl:90 + + err = m.handler.AddTag(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:103 + + err = m.handler.AddInt(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr783: + ( m.cs) = 365 +//line plugins/parsers/influx/machine.go.rl:90 + + err = m.handler.AddTag(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:112 + + err = m.handler.AddUint(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr789: + ( m.cs) = 365 +//line plugins/parsers/influx/machine.go.rl:90 + + err = m.handler.AddTag(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:130 + + err = m.handler.AddBool(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr802: + ( m.cs) = 365 +//line plugins/parsers/influx/machine.go.rl:77 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:103 + + err = m.handler.AddInt(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr807: + ( m.cs) = 365 +//line plugins/parsers/influx/machine.go.rl:77 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:112 + + err = m.handler.AddUint(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr812: + ( m.cs) = 365 +//line plugins/parsers/influx/machine.go.rl:77 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:130 + + err = m.handler.AddBool(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again st365: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof365 } st_case_365: -//line plugins/parsers/influx/machine.go:10802 +//line plugins/parsers/influx/machine.go:10837 switch ( m.data)[( m.p)] { + case 9: + goto st365 case 10: - goto tr357 + goto st288 case 11: - goto st366 + goto tr580 + case 12: + goto st290 case 13: - goto tr357 + goto st74 case 32: - goto st207 + goto st365 + case 34: + goto tr97 case 44: - goto st9 + goto st6 + case 45: + goto tr581 case 61: - goto tr55 + goto st6 case 92: - goto st37 + goto tr98 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st207 + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto tr582 } - goto st29 + goto tr94 +tr580: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st366 st366: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof366 } st_case_366: +//line plugins/parsers/influx/machine.go:10877 switch ( m.data)[( m.p)] { + case 9: + goto st365 case 10: - goto tr357 + goto st288 case 11: - goto st366 + goto tr580 + case 12: + goto st290 case 13: - goto tr357 + goto st74 case 32: - goto st207 + goto st365 + case 34: + goto tr97 case 44: - goto tr216 + goto st6 case 45: - goto tr543 + goto tr581 case 61: - goto tr55 + goto tr101 case 92: - goto st37 + goto tr98 } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr544 - } - case ( m.data)[( m.p)] >= 9: - goto st207 + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto tr582 } - goto st29 -tr543: -//line plugins/parsers/influx/machine.go.rl:18 + goto tr94 +tr476: +//line plugins/parsers/influx/machine.go.rl:19 m.pb = m.p - goto st101 - st101: + goto st74 +tr586: + ( m.cs) = 74 +//line plugins/parsers/influx/machine.go.rl:148 + + err = m.handler.SetTimestamp(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr623: + ( m.cs) = 74 +//line plugins/parsers/influx/machine.go.rl:121 + + err = m.handler.AddFloat(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr780: + ( m.cs) = 74 +//line plugins/parsers/influx/machine.go.rl:103 + + err = m.handler.AddInt(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr786: + ( m.cs) = 74 +//line plugins/parsers/influx/machine.go.rl:112 + + err = m.handler.AddUint(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr792: + ( m.cs) = 74 +//line plugins/parsers/influx/machine.go.rl:130 + + err = m.handler.AddBool(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again + st74: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof101 + goto _test_eof74 } - st_case_101: -//line plugins/parsers/influx/machine.go:10866 + st_case_74: +//line plugins/parsers/influx/machine.go:10982 + if ( m.data)[( m.p)] == 10 { + goto st288 + } + goto tr8 +tr581: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st75 + st75: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof75 + } + st_case_75: +//line plugins/parsers/influx/machine.go:10998 switch ( m.data)[( m.p)] { + case 9: + goto st6 + case 10: + goto st7 + case 12: + goto tr105 + case 13: + goto st8 case 32: - goto tr216 + goto st6 + case 34: + goto tr100 case 44: - goto tr216 + goto st6 case 61: - goto tr55 + goto tr101 case 92: - goto st37 + goto st77 } - switch { - case ( m.data)[( m.p)] < 12: - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 10 { - goto tr216 - } - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st367 - } - default: - goto tr216 + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st367 } - goto st29 -tr544: -//line plugins/parsers/influx/machine.go.rl:18 + goto st33 +tr582: +//line plugins/parsers/influx/machine.go.rl:19 m.pb = m.p @@ -10897,4037 +11030,2055 @@ tr544: goto _test_eof367 } st_case_367: -//line plugins/parsers/influx/machine.go:10901 +//line plugins/parsers/influx/machine.go:11034 switch ( m.data)[( m.p)] { + case 9: + goto tr583 case 10: - goto tr362 + goto tr584 case 11: - goto tr545 + goto tr585 + case 12: + goto tr450 case 13: - goto tr362 + goto tr586 case 32: - goto tr361 + goto tr583 + case 34: + goto tr100 case 44: - goto tr216 + goto st6 case 61: - goto tr55 + goto tr101 case 92: - goto st37 + goto st77 } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st369 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st370 } - goto st29 -tr545: -//line plugins/parsers/influx/machine.go.rl:108 + goto st33 +tr583: + ( m.cs) = 368 +//line plugins/parsers/influx/machine.go.rl:148 - m.handler.SetTimestamp(m.text()) + err = m.handler.SetTimestamp(m.text()) + if err != nil { + ( m.p)-- - goto st368 + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again st368: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof368 } st_case_368: -//line plugins/parsers/influx/machine.go:10938 +//line plugins/parsers/influx/machine.go:11079 switch ( m.data)[( m.p)] { case 10: - goto tr357 - case 11: - goto st368 + goto st288 + case 12: + goto st266 case 13: - goto tr357 + goto st74 case 32: - goto st210 - case 44: - goto tr52 - case 61: - goto tr55 + goto st368 + case 34: + goto tr31 case 92: - goto st37 + goto st76 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st210 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { + goto st368 } - goto st29 + goto st6 +tr27: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st76 + st76: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof76 + } + st_case_76: +//line plugins/parsers/influx/machine.go:11109 + switch ( m.data)[( m.p)] { + case 34: + goto st6 + case 92: + goto st6 + } + goto tr8 +tr585: + ( m.cs) = 369 +//line plugins/parsers/influx/machine.go.rl:148 + + err = m.handler.SetTimestamp(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again st369: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof369 } st_case_369: +//line plugins/parsers/influx/machine.go:11135 switch ( m.data)[( m.p)] { + case 9: + goto st368 case 10: - goto tr362 + goto st288 case 11: - goto tr545 + goto st369 + case 12: + goto st266 case 13: - goto tr362 + goto st74 case 32: - goto tr361 + goto st368 + case 34: + goto tr100 case 44: - goto tr216 + goto st6 case 61: - goto tr55 + goto tr101 case 92: - goto st37 + goto st77 + } + goto st33 +tr98: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st77 + st77: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof77 + } + st_case_77: +//line plugins/parsers/influx/machine.go:11170 + switch ( m.data)[( m.p)] { + case 34: + goto st33 + case 92: + goto st33 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st370 + case ( m.data)[( m.p)] > 10: + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr8 } case ( m.data)[( m.p)] >= 9: - goto tr361 + goto tr8 } - goto st29 + goto st3 st370: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof370 } st_case_370: switch ( m.data)[( m.p)] { + case 9: + goto tr583 case 10: - goto tr362 + goto tr584 case 11: - goto tr545 + goto tr585 + case 12: + goto tr450 case 13: - goto tr362 + goto tr586 case 32: - goto tr361 + goto tr583 + case 34: + goto tr100 case 44: - goto tr216 + goto st6 case 61: - goto tr55 + goto tr101 case 92: - goto st37 + goto st77 } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st371 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st371 } - goto st29 + goto st33 st371: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof371 } st_case_371: switch ( m.data)[( m.p)] { + case 9: + goto tr583 case 10: - goto tr362 + goto tr584 case 11: - goto tr545 + goto tr585 + case 12: + goto tr450 case 13: - goto tr362 + goto tr586 case 32: - goto tr361 + goto tr583 + case 34: + goto tr100 case 44: - goto tr216 + goto st6 case 61: - goto tr55 + goto tr101 case 92: - goto st37 + goto st77 } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st372 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st372 } - goto st29 + goto st33 st372: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof372 } st_case_372: switch ( m.data)[( m.p)] { + case 9: + goto tr583 case 10: - goto tr362 + goto tr584 case 11: - goto tr545 + goto tr585 + case 12: + goto tr450 case 13: - goto tr362 + goto tr586 case 32: - goto tr361 + goto tr583 + case 34: + goto tr100 case 44: - goto tr216 + goto st6 case 61: - goto tr55 + goto tr101 case 92: - goto st37 + goto st77 } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st373 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st373 } - goto st29 + goto st33 st373: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof373 } st_case_373: switch ( m.data)[( m.p)] { + case 9: + goto tr583 case 10: - goto tr362 + goto tr584 case 11: - goto tr545 + goto tr585 + case 12: + goto tr450 case 13: - goto tr362 + goto tr586 case 32: - goto tr361 + goto tr583 + case 34: + goto tr100 case 44: - goto tr216 + goto st6 case 61: - goto tr55 + goto tr101 case 92: - goto st37 + goto st77 } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st374 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st374 } - goto st29 + goto st33 st374: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof374 } st_case_374: switch ( m.data)[( m.p)] { + case 9: + goto tr583 case 10: - goto tr362 + goto tr584 case 11: - goto tr545 + goto tr585 + case 12: + goto tr450 case 13: - goto tr362 + goto tr586 case 32: - goto tr361 + goto tr583 + case 34: + goto tr100 case 44: - goto tr216 + goto st6 case 61: - goto tr55 + goto tr101 case 92: - goto st37 + goto st77 } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st375 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st375 } - goto st29 + goto st33 st375: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof375 } st_case_375: switch ( m.data)[( m.p)] { + case 9: + goto tr583 case 10: - goto tr362 + goto tr584 case 11: - goto tr545 + goto tr585 + case 12: + goto tr450 case 13: - goto tr362 + goto tr586 case 32: - goto tr361 + goto tr583 + case 34: + goto tr100 case 44: - goto tr216 + goto st6 case 61: - goto tr55 + goto tr101 case 92: - goto st37 + goto st77 } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st376 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st376 } - goto st29 + goto st33 st376: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof376 } st_case_376: switch ( m.data)[( m.p)] { + case 9: + goto tr583 case 10: - goto tr362 + goto tr584 case 11: - goto tr545 + goto tr585 + case 12: + goto tr450 case 13: - goto tr362 + goto tr586 case 32: - goto tr361 + goto tr583 + case 34: + goto tr100 case 44: - goto tr216 + goto st6 case 61: - goto tr55 + goto tr101 case 92: - goto st37 + goto st77 } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st377 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st377 } - goto st29 + goto st33 st377: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof377 } st_case_377: switch ( m.data)[( m.p)] { + case 9: + goto tr583 case 10: - goto tr362 + goto tr584 case 11: - goto tr545 + goto tr585 + case 12: + goto tr450 case 13: - goto tr362 + goto tr586 case 32: - goto tr361 + goto tr583 + case 34: + goto tr100 case 44: - goto tr216 + goto st6 case 61: - goto tr55 + goto tr101 case 92: - goto st37 + goto st77 } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st378 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st378 } - goto st29 + goto st33 st378: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof378 } st_case_378: switch ( m.data)[( m.p)] { + case 9: + goto tr583 case 10: - goto tr362 + goto tr584 case 11: - goto tr545 + goto tr585 + case 12: + goto tr450 case 13: - goto tr362 + goto tr586 case 32: - goto tr361 + goto tr583 + case 34: + goto tr100 case 44: - goto tr216 + goto st6 case 61: - goto tr55 + goto tr101 case 92: - goto st37 + goto st77 } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st379 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st379 } - goto st29 + goto st33 st379: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof379 } st_case_379: switch ( m.data)[( m.p)] { + case 9: + goto tr583 case 10: - goto tr362 + goto tr584 case 11: - goto tr545 + goto tr585 + case 12: + goto tr450 case 13: - goto tr362 + goto tr586 case 32: - goto tr361 + goto tr583 + case 34: + goto tr100 case 44: - goto tr216 + goto st6 case 61: - goto tr55 + goto tr101 case 92: - goto st37 + goto st77 } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st380 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st380 } - goto st29 + goto st33 st380: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof380 } st_case_380: switch ( m.data)[( m.p)] { + case 9: + goto tr583 case 10: - goto tr362 + goto tr584 case 11: - goto tr545 + goto tr585 + case 12: + goto tr450 case 13: - goto tr362 + goto tr586 case 32: - goto tr361 + goto tr583 + case 34: + goto tr100 case 44: - goto tr216 + goto st6 case 61: - goto tr55 + goto tr101 case 92: - goto st37 + goto st77 } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st381 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st381 } - goto st29 + goto st33 st381: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof381 } st_case_381: switch ( m.data)[( m.p)] { + case 9: + goto tr583 case 10: - goto tr362 + goto tr584 case 11: - goto tr545 + goto tr585 + case 12: + goto tr450 case 13: - goto tr362 + goto tr586 case 32: - goto tr361 + goto tr583 + case 34: + goto tr100 case 44: - goto tr216 + goto st6 case 61: - goto tr55 + goto tr101 case 92: - goto st37 + goto st77 } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st382 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st382 } - goto st29 + goto st33 st382: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof382 } st_case_382: switch ( m.data)[( m.p)] { + case 9: + goto tr583 case 10: - goto tr362 + goto tr584 case 11: - goto tr545 + goto tr585 + case 12: + goto tr450 case 13: - goto tr362 + goto tr586 case 32: - goto tr361 + goto tr583 + case 34: + goto tr100 case 44: - goto tr216 + goto st6 case 61: - goto tr55 + goto tr101 case 92: - goto st37 + goto st77 } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st383 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st383 } - goto st29 + goto st33 st383: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof383 } st_case_383: switch ( m.data)[( m.p)] { + case 9: + goto tr583 case 10: - goto tr362 + goto tr584 case 11: - goto tr545 + goto tr585 + case 12: + goto tr450 case 13: - goto tr362 + goto tr586 case 32: - goto tr361 + goto tr583 + case 34: + goto tr100 case 44: - goto tr216 + goto st6 case 61: - goto tr55 + goto tr101 case 92: - goto st37 + goto st77 } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st384 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st384 } - goto st29 + goto st33 st384: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof384 } st_case_384: switch ( m.data)[( m.p)] { + case 9: + goto tr583 case 10: - goto tr362 + goto tr584 case 11: - goto tr545 + goto tr585 + case 12: + goto tr450 case 13: - goto tr362 + goto tr586 case 32: - goto tr361 + goto tr583 + case 34: + goto tr100 case 44: - goto tr216 + goto st6 case 61: - goto tr55 + goto tr101 case 92: - goto st37 + goto st77 } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st385 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st385 } - goto st29 + goto st33 st385: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof385 } st_case_385: switch ( m.data)[( m.p)] { + case 9: + goto tr583 case 10: - goto tr362 + goto tr584 case 11: - goto tr545 + goto tr585 + case 12: + goto tr450 case 13: - goto tr362 + goto tr586 case 32: - goto tr361 + goto tr583 + case 34: + goto tr100 case 44: - goto tr216 + goto st6 case 61: - goto tr55 + goto tr101 case 92: - goto st37 + goto st77 } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st386 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st386 } - goto st29 + goto st33 st386: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof386 } st_case_386: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr545 - case 13: - goto tr362 - case 32: - goto tr361 - case 44: - goto tr216 - case 61: - goto tr55 - case 92: - goto st37 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr361 - } - goto st29 -tr214: -//line plugins/parsers/influx/machine.go.rl:76 - - key = m.text() - - goto st102 - st102: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof102 - } - st_case_102: -//line plugins/parsers/influx/machine.go:11505 switch ( m.data)[( m.p)] { case 9: - goto st7 + goto tr583 case 10: - goto tr61 + goto tr584 + case 11: + goto tr585 + case 12: + goto tr450 + case 13: + goto tr586 case 32: - goto st7 + goto tr583 case 34: - goto tr183 + goto tr100 case 44: - goto st7 + goto st6 case 61: - goto st7 + goto tr101 case 92: - goto tr185 + goto st77 } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr61 + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st387 } - goto tr180 -tr183: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - -//line plugins/parsers/influx/machine.go.rl:104 - - m.handler.AddString(key, m.text()) - - goto st387 -tr189: -//line plugins/parsers/influx/machine.go.rl:104 - - m.handler.AddString(key, m.text()) - - goto st387 + goto st33 st387: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof387 } st_case_387: -//line plugins/parsers/influx/machine.go:11547 switch ( m.data)[( m.p)] { + case 9: + goto tr583 case 10: - goto tr357 + goto tr584 case 11: - goto tr565 + goto tr585 + case 12: + goto tr450 case 13: - goto tr357 + goto tr586 case 32: - goto tr514 + goto tr583 + case 34: + goto tr100 case 44: - goto tr516 + goto st6 case 61: - goto tr207 + goto tr101 case 92: - goto st36 + goto st77 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr514 - } - goto st31 -tr565: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - - goto st388 -tr567: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:96 - - m.handler.AddFloat(key, m.text()) - - goto st388 -tr573: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:88 - - m.handler.AddInt(key, m.text()) - - goto st388 + goto st33 tr577: -//line plugins/parsers/influx/machine.go.rl:80 + ( m.cs) = 388 +//line plugins/parsers/influx/machine.go.rl:77 - m.handler.AddTag(key, m.text()) + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- -//line plugins/parsers/influx/machine.go.rl:92 + ( m.cs) = 247; + {( m.p)++; goto _out } + } - m.handler.AddUint(key, m.text()) +//line plugins/parsers/influx/machine.go.rl:19 - goto st388 -tr581: -//line plugins/parsers/influx/machine.go.rl:80 + m.pb = m.p - m.handler.AddTag(key, m.text()) + goto _again +tr621: + ( m.cs) = 388 +//line plugins/parsers/influx/machine.go.rl:77 -//line plugins/parsers/influx/machine.go.rl:100 + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- - m.handler.AddBool(key, m.text()) + ( m.cs) = 247; + {( m.p)++; goto _out } + } - goto st388 +//line plugins/parsers/influx/machine.go.rl:121 + + err = m.handler.AddFloat(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr803: + ( m.cs) = 388 +//line plugins/parsers/influx/machine.go.rl:77 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:103 + + err = m.handler.AddInt(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr808: + ( m.cs) = 388 +//line plugins/parsers/influx/machine.go.rl:77 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:112 + + err = m.handler.AddUint(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr813: + ( m.cs) = 388 +//line plugins/parsers/influx/machine.go.rl:77 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:130 + + err = m.handler.AddBool(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again st388: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof388 } st_case_388: -//line plugins/parsers/influx/machine.go:11619 +//line plugins/parsers/influx/machine.go:11855 switch ( m.data)[( m.p)] { + case 9: + goto tr607 case 10: - goto tr357 + goto st288 case 11: - goto tr517 + goto tr608 + case 12: + goto tr482 case 13: - goto tr357 + goto st74 case 32: - goto tr514 + goto tr607 + case 34: + goto tr124 case 44: - goto tr63 + goto tr92 case 45: - goto tr518 + goto tr609 case 61: - goto tr207 + goto st31 case 92: - goto tr67 + goto tr125 } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr519 - } - case ( m.data)[( m.p)] >= 9: - goto tr514 + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto tr610 } - goto tr65 -tr185: -//line plugins/parsers/influx/machine.go.rl:18 + goto tr121 +tr608: + ( m.cs) = 389 +//line plugins/parsers/influx/machine.go.rl:77 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:19 m.pb = m.p - goto st103 - st103: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof103 - } - st_case_103: -//line plugins/parsers/influx/machine.go:11658 - switch ( m.data)[( m.p)] { - case 34: - goto st89 - case 92: - goto st89 - } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr61 - } - case ( m.data)[( m.p)] >= 9: - goto tr61 - } - goto st31 -tr211: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st104 - st104: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof104 - } - st_case_104: -//line plugins/parsers/influx/machine.go:11685 - switch ( m.data)[( m.p)] { - case 34: - goto st100 - case 92: - goto st100 - } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr61 - } - case ( m.data)[( m.p)] >= 9: - goto tr61 - } - goto st29 -tr202: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st105 - st105: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof105 - } - st_case_105: -//line plugins/parsers/influx/machine.go:11712 - switch ( m.data)[( m.p)] { - case 34: - goto st96 - case 92: - goto st96 - } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr61 - } - case ( m.data)[( m.p)] >= 9: - goto tr61 - } - goto st33 -tr172: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st106 - st106: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof106 - } - st_case_106: -//line plugins/parsers/influx/machine.go:11739 - switch ( m.data)[( m.p)] { - case 10: - goto tr61 - case 11: - goto tr62 - case 13: - goto tr61 - case 32: - goto tr60 - case 44: - goto tr63 - case 46: - goto st107 - case 48: - goto st391 - case 61: - goto tr61 - case 92: - goto st36 - } - switch { - case ( m.data)[( m.p)] > 12: - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st394 - } - case ( m.data)[( m.p)] >= 9: - goto tr60 - } - goto st31 -tr173: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st107 - st107: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof107 - } - st_case_107: -//line plugins/parsers/influx/machine.go:11780 - switch ( m.data)[( m.p)] { - case 10: - goto tr61 - case 11: - goto tr62 - case 13: - goto tr61 - case 32: - goto tr60 - case 44: - goto tr63 - case 61: - goto tr61 - case 92: - goto st36 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st389 - } - case ( m.data)[( m.p)] >= 9: - goto tr60 - } - goto st31 + goto _again st389: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof389 } st_case_389: +//line plugins/parsers/influx/machine.go:11906 switch ( m.data)[( m.p)] { + case 9: + goto tr607 case 10: - goto tr383 + goto st288 case 11: - goto tr567 + goto tr608 + case 12: + goto tr482 case 13: - goto tr383 + goto st74 case 32: - goto tr566 - case 44: - goto tr568 - case 61: - goto tr207 - case 69: - goto st108 - case 92: - goto st36 - case 101: - goto st108 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st389 - } - case ( m.data)[( m.p)] >= 9: - goto tr566 - } - goto st31 - st108: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof108 - } - st_case_108: - switch ( m.data)[( m.p)] { - case 10: - goto tr61 - case 11: - goto tr62 - case 13: - goto tr61 - case 32: - goto tr60 + goto tr607 case 34: - goto st109 + goto tr124 case 44: - goto tr63 + goto tr92 + case 45: + goto tr609 case 61: - goto tr61 + goto tr129 case 92: - goto st36 + goto tr125 } - switch { - case ( m.data)[( m.p)] < 43: - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr60 - } - case ( m.data)[( m.p)] > 45: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st390 - } - default: - goto st109 + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto tr610 } - goto st31 - st109: + goto tr121 +tr92: + ( m.cs) = 78 +//line plugins/parsers/influx/machine.go.rl:77 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr86: + ( m.cs) = 78 +//line plugins/parsers/influx/machine.go.rl:77 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto _again +tr233: + ( m.cs) = 78 +//line plugins/parsers/influx/machine.go.rl:90 + + err = m.handler.AddTag(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again + st78: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof109 + goto _test_eof78 } - st_case_109: + st_case_78: +//line plugins/parsers/influx/machine.go:11983 switch ( m.data)[( m.p)] { + case 9: + goto st6 case 10: - goto tr61 - case 11: - goto tr62 + goto st7 + case 12: + goto tr47 case 13: - goto tr61 + goto st8 case 32: - goto tr60 + goto st6 + case 34: + goto tr192 case 44: - goto tr63 + goto st6 case 61: - goto tr61 + goto st6 case 92: - goto st36 + goto tr224 + } + goto tr223 +tr223: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st79 + st79: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof79 + } + st_case_79: +//line plugins/parsers/influx/machine.go:12016 + switch ( m.data)[( m.p)] { + case 9: + goto st6 + case 10: + goto st7 + case 12: + goto tr47 + case 13: + goto st8 + case 32: + goto st6 + case 34: + goto tr195 + case 44: + goto st6 + case 61: + goto tr226 + case 92: + goto st89 + } + goto st79 +tr226: +//line plugins/parsers/influx/machine.go.rl:86 + + key = m.text() + + goto st80 + st80: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof80 + } + st_case_80: +//line plugins/parsers/influx/machine.go:12049 + switch ( m.data)[( m.p)] { + case 9: + goto st6 + case 10: + goto st7 + case 12: + goto tr47 + case 13: + goto st8 + case 32: + goto st6 + case 34: + goto tr151 + case 44: + goto st6 + case 61: + goto st6 + case 92: + goto tr229 + } + goto tr228 +tr228: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st81 + st81: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof81 + } + st_case_81: +//line plugins/parsers/influx/machine.go:12082 + switch ( m.data)[( m.p)] { + case 9: + goto tr231 + case 10: + goto st7 + case 11: + goto tr232 + case 12: + goto tr60 + case 13: + goto st8 + case 32: + goto tr231 + case 34: + goto tr157 + case 44: + goto tr233 + case 61: + goto st6 + case 92: + goto st87 + } + goto st81 +tr232: + ( m.cs) = 82 +//line plugins/parsers/influx/machine.go.rl:90 + + err = m.handler.AddTag(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again + st82: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof82 + } + st_case_82: +//line plugins/parsers/influx/machine.go:12124 + switch ( m.data)[( m.p)] { + case 9: + goto tr231 + case 10: + goto st7 + case 11: + goto tr236 + case 12: + goto tr60 + case 13: + goto st8 + case 32: + goto tr231 + case 34: + goto tr204 + case 44: + goto tr233 + case 61: + goto st6 + case 92: + goto tr237 + } + goto tr235 +tr235: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st83 + st83: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof83 + } + st_case_83: +//line plugins/parsers/influx/machine.go:12159 + switch ( m.data)[( m.p)] { + case 9: + goto tr231 + case 10: + goto st7 + case 11: + goto tr239 + case 12: + goto tr60 + case 13: + goto st8 + case 32: + goto tr231 + case 34: + goto tr208 + case 44: + goto tr233 + case 61: + goto tr101 + case 92: + goto st85 + } + goto st83 +tr239: + ( m.cs) = 84 +//line plugins/parsers/influx/machine.go.rl:90 + + err = m.handler.AddTag(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr236: + ( m.cs) = 84 +//line plugins/parsers/influx/machine.go.rl:90 + + err = m.handler.AddTag(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto _again + st84: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof84 + } + st_case_84: +//line plugins/parsers/influx/machine.go:12218 + switch ( m.data)[( m.p)] { + case 9: + goto tr231 + case 10: + goto st7 + case 11: + goto tr236 + case 12: + goto tr60 + case 13: + goto st8 + case 32: + goto tr231 + case 34: + goto tr204 + case 44: + goto tr233 + case 61: + goto tr101 + case 92: + goto tr237 + } + goto tr235 +tr237: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st85 + st85: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof85 + } + st_case_85: +//line plugins/parsers/influx/machine.go:12253 + switch ( m.data)[( m.p)] { + case 34: + goto st83 + case 92: + goto st86 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st390 + case ( m.data)[( m.p)] > 10: + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr47 } case ( m.data)[( m.p)] >= 9: - goto tr60 + goto tr47 } - goto st31 + goto st19 + st86: +//line plugins/parsers/influx/machine.go.rl:234 + ( m.p)-- + + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof86 + } + st_case_86: +//line plugins/parsers/influx/machine.go:12277 + switch ( m.data)[( m.p)] { + case 9: + goto tr231 + case 10: + goto st7 + case 11: + goto tr239 + case 12: + goto tr60 + case 13: + goto st8 + case 32: + goto tr231 + case 34: + goto tr208 + case 44: + goto tr233 + case 61: + goto tr101 + case 92: + goto st85 + } + goto st83 +tr229: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st87 + st87: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof87 + } + st_case_87: +//line plugins/parsers/influx/machine.go:12312 + switch ( m.data)[( m.p)] { + case 34: + goto st81 + case 92: + goto st88 + } + switch { + case ( m.data)[( m.p)] > 10: + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr47 + } + case ( m.data)[( m.p)] >= 9: + goto tr47 + } + goto st17 + st88: +//line plugins/parsers/influx/machine.go.rl:234 + ( m.p)-- + + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof88 + } + st_case_88: +//line plugins/parsers/influx/machine.go:12336 + switch ( m.data)[( m.p)] { + case 9: + goto tr231 + case 10: + goto st7 + case 11: + goto tr232 + case 12: + goto tr60 + case 13: + goto st8 + case 32: + goto tr231 + case 34: + goto tr157 + case 44: + goto tr233 + case 61: + goto st6 + case 92: + goto st87 + } + goto st81 +tr224: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st89 + st89: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof89 + } + st_case_89: +//line plugins/parsers/influx/machine.go:12371 + switch ( m.data)[( m.p)] { + case 34: + goto st79 + case 92: + goto st90 + } + switch { + case ( m.data)[( m.p)] > 10: + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr47 + } + case ( m.data)[( m.p)] >= 9: + goto tr47 + } + goto st15 + st90: +//line plugins/parsers/influx/machine.go.rl:234 + ( m.p)-- + + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof90 + } + st_case_90: +//line plugins/parsers/influx/machine.go:12395 + switch ( m.data)[( m.p)] { + case 9: + goto st6 + case 10: + goto st7 + case 12: + goto tr47 + case 13: + goto st8 + case 32: + goto st6 + case 34: + goto tr195 + case 44: + goto st6 + case 61: + goto tr226 + case 92: + goto st89 + } + goto st79 +tr609: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st91 + st91: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof91 + } + st_case_91: +//line plugins/parsers/influx/machine.go:12428 + switch ( m.data)[( m.p)] { + case 9: + goto tr89 + case 10: + goto st7 + case 11: + goto tr127 + case 12: + goto tr1 + case 13: + goto st8 + case 32: + goto tr89 + case 34: + goto tr128 + case 44: + goto tr92 + case 61: + goto tr129 + case 92: + goto st94 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st390 + } + goto st42 +tr610: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st390 st390: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof390 } st_case_390: +//line plugins/parsers/influx/machine.go:12466 switch ( m.data)[( m.p)] { + case 9: + goto tr611 case 10: - goto tr383 + goto tr584 case 11: - goto tr567 + goto tr612 + case 12: + goto tr490 case 13: - goto tr383 + goto tr586 case 32: - goto tr566 + goto tr611 + case 34: + goto tr128 case 44: - goto tr568 + goto tr92 case 61: - goto tr207 + goto tr129 case 92: - goto st36 + goto st94 } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st390 - } - case ( m.data)[( m.p)] >= 9: - goto tr566 + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st534 } - goto st31 + goto st42 +tr616: + ( m.cs) = 391 +//line plugins/parsers/influx/machine.go.rl:77 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr756: + ( m.cs) = 391 +//line plugins/parsers/influx/machine.go.rl:90 + + err = m.handler.AddTag(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr611: + ( m.cs) = 391 +//line plugins/parsers/influx/machine.go.rl:77 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:148 + + err = m.handler.SetTimestamp(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr753: + ( m.cs) = 391 +//line plugins/parsers/influx/machine.go.rl:90 + + err = m.handler.AddTag(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:148 + + err = m.handler.SetTimestamp(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again st391: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof391 } st_case_391: +//line plugins/parsers/influx/machine.go:12570 switch ( m.data)[( m.p)] { + case 9: + goto st391 case 10: - goto tr383 + goto st288 case 11: - goto tr567 + goto tr615 + case 12: + goto st294 case 13: - goto tr383 + goto st74 case 32: - goto tr566 + goto st391 + case 34: + goto tr97 case 44: - goto tr568 - case 46: - goto st389 + goto st6 case 61: - goto tr207 - case 69: - goto st108 + goto st6 case 92: - goto st36 - case 101: - goto st108 - case 105: - goto st393 + goto tr98 } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st392 - } - case ( m.data)[( m.p)] >= 9: - goto tr566 - } - goto st31 + goto tr94 +tr615: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st392 st392: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof392 } st_case_392: +//line plugins/parsers/influx/machine.go:12605 switch ( m.data)[( m.p)] { + case 9: + goto st391 case 10: - goto tr383 + goto st288 case 11: - goto tr567 + goto tr615 + case 12: + goto st294 case 13: - goto tr383 + goto st74 case 32: - goto tr566 + goto st391 + case 34: + goto tr97 case 44: - goto tr568 - case 46: - goto st389 + goto st6 case 61: - goto tr207 - case 69: - goto st108 + goto tr101 case 92: - goto st36 - case 101: - goto st108 + goto tr98 } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st392 - } - case ( m.data)[( m.p)] >= 9: - goto tr566 - } - goto st31 + goto tr94 +tr617: + ( m.cs) = 393 +//line plugins/parsers/influx/machine.go.rl:77 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto _again +tr612: + ( m.cs) = 393 +//line plugins/parsers/influx/machine.go.rl:77 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:148 + + err = m.handler.SetTimestamp(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again st393: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof393 } st_case_393: +//line plugins/parsers/influx/machine.go:12674 switch ( m.data)[( m.p)] { + case 9: + goto tr616 case 10: - goto tr389 + goto st288 case 11: - goto tr573 + goto tr617 + case 12: + goto tr495 case 13: - goto tr389 + goto st74 case 32: - goto tr572 + goto tr616 + case 34: + goto tr124 case 44: - goto tr574 + goto tr92 case 61: - goto tr207 + goto tr129 case 92: - goto st36 + goto tr125 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr572 + goto tr121 +tr129: +//line plugins/parsers/influx/machine.go.rl:99 + + key = m.text() + + goto st92 +tr374: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + +//line plugins/parsers/influx/machine.go.rl:99 + + key = m.text() + + goto st92 + st92: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof92 + } + st_case_92: +//line plugins/parsers/influx/machine.go:12719 + switch ( m.data)[( m.p)] { + case 9: + goto tr89 + case 10: + goto st7 + case 11: + goto tr90 + case 12: + goto tr1 + case 13: + goto st8 + case 32: + goto tr89 + case 34: + goto tr212 + case 44: + goto tr92 + case 45: + goto tr245 + case 46: + goto tr246 + case 48: + goto tr247 + case 70: + goto tr249 + case 84: + goto tr250 + case 92: + goto st142 + case 102: + goto tr251 + case 116: + goto tr252 + } + if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto tr248 } goto st31 +tr90: + ( m.cs) = 93 +//line plugins/parsers/influx/machine.go.rl:77 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr84: + ( m.cs) = 93 +//line plugins/parsers/influx/machine.go.rl:77 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto _again + st93: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof93 + } + st_case_93: +//line plugins/parsers/influx/machine.go:12793 + switch ( m.data)[( m.p)] { + case 9: + goto tr89 + case 10: + goto st7 + case 11: + goto tr131 + case 12: + goto tr1 + case 13: + goto st8 + case 32: + goto tr89 + case 34: + goto tr124 + case 44: + goto tr92 + case 61: + goto st31 + case 92: + goto tr125 + } + goto tr121 +tr125: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st94 + st94: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof94 + } + st_case_94: +//line plugins/parsers/influx/machine.go:12828 + switch ( m.data)[( m.p)] { + case 34: + goto st42 + case 92: + goto st42 + } + switch { + case ( m.data)[( m.p)] > 10: + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr8 + } + case ( m.data)[( m.p)] >= 9: + goto tr8 + } + goto st12 +tr245: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st95 + st95: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof95 + } + st_case_95: +//line plugins/parsers/influx/machine.go:12855 + switch ( m.data)[( m.p)] { + case 9: + goto tr89 + case 10: + goto st7 + case 11: + goto tr90 + case 12: + goto tr1 + case 13: + goto st8 + case 32: + goto tr89 + case 34: + goto tr91 + case 44: + goto tr92 + case 46: + goto st97 + case 48: + goto st522 + case 92: + goto st142 + } + if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st525 + } + goto st31 +tr85: + ( m.cs) = 394 +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + +//line plugins/parsers/influx/machine.go.rl:139 + + err = m.handler.AddString(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr91: + ( m.cs) = 394 +//line plugins/parsers/influx/machine.go.rl:139 + + err = m.handler.AddString(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr118: + ( m.cs) = 394 +//line plugins/parsers/influx/machine.go.rl:139 + + err = m.handler.AddString(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto _again st394: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof394 } st_case_394: +//line plugins/parsers/influx/machine.go:12936 switch ( m.data)[( m.p)] { case 10: - goto tr383 + goto st262 case 11: - goto tr567 + goto tr618 case 13: - goto tr383 - case 32: - goto tr566 - case 44: - goto tr568 - case 46: - goto st389 - case 61: - goto tr207 - case 69: - goto st108 - case 92: - goto st36 - case 101: - goto st108 - case 105: - goto st393 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st394 - } - case ( m.data)[( m.p)] >= 9: - goto tr566 - } - goto st31 -tr174: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st395 - st395: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof395 - } - st_case_395: -//line plugins/parsers/influx/machine.go:12084 - switch ( m.data)[( m.p)] { - case 10: - goto tr383 - case 11: - goto tr567 - case 13: - goto tr383 - case 32: - goto tr566 - case 44: - goto tr568 - case 46: - goto st389 - case 61: - goto tr207 - case 69: - goto st108 - case 92: - goto st36 - case 101: - goto st108 - case 105: - goto st393 - case 117: - goto st396 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st392 - } - case ( m.data)[( m.p)] >= 9: - goto tr566 - } - goto st31 - st396: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof396 - } - st_case_396: - switch ( m.data)[( m.p)] { - case 10: - goto tr393 - case 11: - goto tr577 - case 13: - goto tr393 - case 32: - goto tr576 - case 44: - goto tr578 - case 61: - goto tr207 - case 92: - goto st36 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr576 - } - goto st31 -tr175: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st397 - st397: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof397 - } - st_case_397: -//line plugins/parsers/influx/machine.go:12156 - switch ( m.data)[( m.p)] { - case 10: - goto tr383 - case 11: - goto tr567 - case 13: - goto tr383 - case 32: - goto tr566 - case 44: - goto tr568 - case 46: - goto st389 - case 61: - goto tr207 - case 69: - goto st108 - case 92: - goto st36 - case 101: - goto st108 - case 105: - goto st393 - case 117: - goto st396 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st397 - } - case ( m.data)[( m.p)] >= 9: - goto tr566 - } - goto st31 -tr176: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st398 - st398: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof398 - } - st_case_398: -//line plugins/parsers/influx/machine.go:12203 - switch ( m.data)[( m.p)] { - case 10: - goto tr397 - case 11: - goto tr581 - case 13: - goto tr397 - case 32: - goto tr580 - case 44: - goto tr582 - case 61: - goto tr207 - case 65: - goto st110 - case 92: - goto st36 - case 97: - goto st113 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr580 - } - goto st31 - st110: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof110 - } - st_case_110: - switch ( m.data)[( m.p)] { - case 10: - goto tr61 - case 11: - goto tr62 - case 13: - goto tr61 - case 32: - goto tr60 - case 44: - goto tr63 - case 61: - goto tr61 - case 76: - goto st111 - case 92: - goto st36 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr60 - } - goto st31 - st111: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof111 - } - st_case_111: - switch ( m.data)[( m.p)] { - case 10: - goto tr61 - case 11: - goto tr62 - case 13: - goto tr61 - case 32: - goto tr60 - case 44: - goto tr63 - case 61: - goto tr61 - case 83: - goto st112 - case 92: - goto st36 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr60 - } - goto st31 - st112: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof112 - } - st_case_112: - switch ( m.data)[( m.p)] { - case 10: - goto tr61 - case 11: - goto tr62 - case 13: - goto tr61 - case 32: - goto tr60 - case 44: - goto tr63 - case 61: - goto tr61 - case 69: - goto st399 - case 92: - goto st36 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr60 - } - goto st31 - st399: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof399 - } - st_case_399: - switch ( m.data)[( m.p)] { - case 10: - goto tr397 - case 11: - goto tr581 - case 13: - goto tr397 - case 32: - goto tr580 - case 44: - goto tr582 - case 61: - goto tr207 - case 92: - goto st36 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr580 - } - goto st31 - st113: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof113 - } - st_case_113: - switch ( m.data)[( m.p)] { - case 10: - goto tr61 - case 11: - goto tr62 - case 13: - goto tr61 - case 32: - goto tr60 - case 44: - goto tr63 - case 61: - goto tr61 - case 92: - goto st36 - case 108: - goto st114 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr60 - } - goto st31 - st114: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof114 - } - st_case_114: - switch ( m.data)[( m.p)] { - case 10: - goto tr61 - case 11: - goto tr62 - case 13: - goto tr61 - case 32: - goto tr60 - case 44: - goto tr63 - case 61: - goto tr61 - case 92: - goto st36 - case 115: - goto st115 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr60 - } - goto st31 - st115: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof115 - } - st_case_115: - switch ( m.data)[( m.p)] { - case 10: - goto tr61 - case 11: - goto tr62 - case 13: - goto tr61 - case 32: - goto tr60 - case 44: - goto tr63 - case 61: - goto tr61 - case 92: - goto st36 - case 101: - goto st399 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr60 - } - goto st31 -tr177: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st400 - st400: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof400 - } - st_case_400: -//line plugins/parsers/influx/machine.go:12426 - switch ( m.data)[( m.p)] { - case 10: - goto tr397 - case 11: - goto tr581 - case 13: - goto tr397 - case 32: - goto tr580 - case 44: - goto tr582 - case 61: - goto tr207 - case 82: - goto st116 - case 92: - goto st36 - case 114: - goto st117 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr580 - } - goto st31 - st116: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof116 - } - st_case_116: - switch ( m.data)[( m.p)] { - case 10: - goto tr61 - case 11: - goto tr62 - case 13: - goto tr61 - case 32: - goto tr60 - case 44: - goto tr63 - case 61: - goto tr61 - case 85: - goto st112 - case 92: - goto st36 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr60 - } - goto st31 - st117: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof117 - } - st_case_117: - switch ( m.data)[( m.p)] { - case 10: - goto tr61 - case 11: - goto tr62 - case 13: - goto tr61 - case 32: - goto tr60 - case 44: - goto tr63 - case 61: - goto tr61 - case 92: - goto st36 - case 117: - goto st115 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr60 - } - goto st31 -tr178: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st401 - st401: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof401 - } - st_case_401: -//line plugins/parsers/influx/machine.go:12516 - switch ( m.data)[( m.p)] { - case 10: - goto tr397 - case 11: - goto tr581 - case 13: - goto tr397 - case 32: - goto tr580 - case 44: - goto tr582 - case 61: - goto tr207 - case 92: - goto st36 - case 97: - goto st113 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr580 - } - goto st31 -tr179: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st402 - st402: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof402 - } - st_case_402: -//line plugins/parsers/influx/machine.go:12550 - switch ( m.data)[( m.p)] { - case 10: - goto tr397 - case 11: - goto tr581 - case 13: - goto tr397 - case 32: - goto tr580 - case 44: - goto tr582 - case 61: - goto tr207 - case 92: - goto st36 - case 114: - goto st117 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr580 - } - goto st31 -tr167: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st118 - st118: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof118 - } - st_case_118: -//line plugins/parsers/influx/machine.go:12584 - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr61 - } - case ( m.data)[( m.p)] >= 9: - goto tr61 - } - goto st86 -tr90: -//line plugins/parsers/influx/machine.go.rl:72 - - m.handler.SetMeasurement(m.text()) - - goto st119 -tr84: -//line plugins/parsers/influx/machine.go.rl:72 - - m.handler.SetMeasurement(m.text()) - -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st119 -tr239: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - - goto st119 - st119: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof119 - } - st_case_119: -//line plugins/parsers/influx/machine.go:12621 - switch ( m.data)[( m.p)] { - case 9: - goto st7 - case 10: - goto tr61 - case 32: - goto st7 - case 34: - goto tr210 - case 44: - goto st7 - case 61: - goto st7 - case 92: - goto tr230 - } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr61 - } - goto tr229 -tr229: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st120 - st120: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof120 - } - st_case_120: -//line plugins/parsers/influx/machine.go:12653 - switch ( m.data)[( m.p)] { - case 9: - goto st7 - case 10: - goto tr61 - case 32: - goto st7 - case 34: - goto tr213 - case 44: - goto st7 - case 61: - goto tr232 - case 92: - goto st128 - } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr61 - } - goto st120 -tr232: -//line plugins/parsers/influx/machine.go.rl:76 - - key = m.text() - - goto st121 - st121: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof121 - } - st_case_121: -//line plugins/parsers/influx/machine.go:12685 - switch ( m.data)[( m.p)] { - case 9: - goto st7 - case 10: - goto tr61 - case 32: - goto st7 - case 34: - goto tr183 - case 44: - goto st7 - case 61: - goto st7 - case 92: - goto tr235 - } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr61 - } - goto tr234 -tr234: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st122 - st122: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof122 - } - st_case_122: -//line plugins/parsers/influx/machine.go:12717 - switch ( m.data)[( m.p)] { - case 9: - goto tr237 - case 11: - goto tr238 - case 12: - goto tr60 - case 32: - goto tr237 - case 34: - goto tr189 - case 44: - goto tr239 - case 61: - goto st7 - case 92: - goto st127 - } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr61 - } - goto st122 -tr238: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - - goto st123 - st123: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof123 - } - st_case_123: -//line plugins/parsers/influx/machine.go:12751 - switch ( m.data)[( m.p)] { - case 9: - goto tr237 - case 11: - goto tr242 - case 12: - goto tr60 - case 32: - goto tr237 - case 34: - goto tr201 - case 44: - goto tr239 - case 61: - goto st7 - case 92: - goto tr243 - } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr61 - } - goto tr241 -tr241: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st124 - st124: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof124 - } - st_case_124: -//line plugins/parsers/influx/machine.go:12785 - switch ( m.data)[( m.p)] { - case 9: - goto tr237 - case 11: - goto tr245 - case 12: - goto tr60 - case 32: - goto tr237 - case 34: - goto tr205 - case 44: - goto tr239 - case 61: - goto tr99 - case 92: - goto st126 - } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr61 - } - goto st124 -tr245: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - - goto st125 -tr242: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st125 - st125: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof125 - } - st_case_125: -//line plugins/parsers/influx/machine.go:12829 - switch ( m.data)[( m.p)] { - case 9: - goto tr237 - case 11: - goto tr242 - case 12: - goto tr60 - case 32: - goto tr237 - case 34: - goto tr201 - case 44: - goto tr239 - case 61: - goto tr99 - case 92: - goto tr243 - } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr61 - } - goto tr241 -tr243: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st126 - st126: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof126 - } - st_case_126: -//line plugins/parsers/influx/machine.go:12863 - switch ( m.data)[( m.p)] { - case 34: - goto st124 - case 92: - goto st124 - } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr61 - } - case ( m.data)[( m.p)] >= 9: - goto tr61 - } - goto st33 -tr235: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st127 - st127: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof127 - } - st_case_127: -//line plugins/parsers/influx/machine.go:12890 - switch ( m.data)[( m.p)] { - case 34: - goto st122 - case 92: - goto st122 - } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr61 - } - case ( m.data)[( m.p)] >= 9: - goto tr61 - } - goto st31 -tr230: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st128 - st128: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof128 - } - st_case_128: -//line plugins/parsers/influx/machine.go:12917 - switch ( m.data)[( m.p)] { - case 34: - goto st120 - case 92: - goto st120 - } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr61 - } - case ( m.data)[( m.p)] >= 9: - goto tr61 - } - goto st29 -tr163: -//line plugins/parsers/influx/machine.go.rl:84 - - key = m.text() - - goto st129 - st129: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof129 - } - st_case_129: -//line plugins/parsers/influx/machine.go:12944 - switch ( m.data)[( m.p)] { - case 9: - goto tr87 - case 11: - goto tr88 - case 12: - goto tr4 - case 32: - goto tr87 - case 34: - goto tr247 - case 44: - goto tr90 - case 45: - goto tr248 - case 46: - goto tr249 - case 48: - goto tr250 - case 70: - goto tr252 - case 84: - goto tr253 - case 92: - goto st170 - case 102: - goto tr254 - case 116: - goto tr255 - } - switch { - case ( m.data)[( m.p)] > 13: - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr251 - } - case ( m.data)[( m.p)] >= 10: - goto tr5 - } - goto st40 -tr247: -//line plugins/parsers/influx/machine.go.rl:104 - - m.handler.AddString(key, m.text()) - - goto st403 - st403: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof403 - } - st_case_403: -//line plugins/parsers/influx/machine.go:12995 - switch ( m.data)[( m.p)] { - case 9: - goto tr587 - case 11: - goto tr588 - case 12: - goto tr482 - case 32: - goto tr587 - case 34: - goto tr83 - case 44: - goto tr589 - case 92: - goto tr85 - } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr357 - } - goto tr80 -tr614: -//line plugins/parsers/influx/machine.go.rl:72 - - m.handler.SetMeasurement(m.text()) - - goto st404 -tr587: -//line plugins/parsers/influx/machine.go.rl:72 - - m.handler.SetMeasurement(m.text()) - -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st404 -tr746: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - - goto st404 -tr742: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:96 - - m.handler.AddFloat(key, m.text()) - - goto st404 -tr774: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:88 - - m.handler.AddInt(key, m.text()) - - goto st404 -tr778: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:92 - - m.handler.AddUint(key, m.text()) - - goto st404 -tr782: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:100 - - m.handler.AddBool(key, m.text()) - - goto st404 -tr789: -//line plugins/parsers/influx/machine.go.rl:72 - - m.handler.SetMeasurement(m.text()) - -//line plugins/parsers/influx/machine.go.rl:96 - - m.handler.AddFloat(key, m.text()) - - goto st404 -tr798: -//line plugins/parsers/influx/machine.go.rl:72 - - m.handler.SetMeasurement(m.text()) - -//line plugins/parsers/influx/machine.go.rl:88 - - m.handler.AddInt(key, m.text()) - - goto st404 -tr803: -//line plugins/parsers/influx/machine.go.rl:72 - - m.handler.SetMeasurement(m.text()) - -//line plugins/parsers/influx/machine.go.rl:92 - - m.handler.AddUint(key, m.text()) - - goto st404 -tr808: -//line plugins/parsers/influx/machine.go.rl:72 - - m.handler.SetMeasurement(m.text()) - -//line plugins/parsers/influx/machine.go.rl:100 - - m.handler.AddBool(key, m.text()) - - goto st404 - st404: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof404 - } - st_case_404: -//line plugins/parsers/influx/machine.go:13123 - switch ( m.data)[( m.p)] { - case 9: - goto st404 - case 11: - goto tr591 - case 12: - goto st318 - case 32: - goto st404 - case 34: - goto tr95 - case 44: - goto st7 - case 45: - goto tr592 - case 61: - goto st7 - case 92: - goto tr96 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr593 - } - case ( m.data)[( m.p)] >= 10: - goto tr357 - } - goto tr92 -tr591: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st405 - st405: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof405 - } - st_case_405: -//line plugins/parsers/influx/machine.go:13164 - switch ( m.data)[( m.p)] { - case 9: - goto st404 - case 11: - goto tr591 - case 12: - goto st318 - case 32: - goto st404 - case 34: - goto tr95 - case 44: - goto st7 - case 45: - goto tr592 - case 61: - goto tr99 - case 92: - goto tr96 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr593 - } - case ( m.data)[( m.p)] >= 10: - goto tr357 - } - goto tr92 -tr592: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st130 - st130: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof130 - } - st_case_130: -//line plugins/parsers/influx/machine.go:13205 - switch ( m.data)[( m.p)] { - case 9: - goto st7 - case 10: - goto tr101 - case 32: - goto st7 - case 34: - goto tr98 - case 44: - goto st7 - case 61: - goto tr99 - case 92: - goto st78 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st406 - } - case ( m.data)[( m.p)] >= 12: - goto tr101 - } - goto st42 -tr593: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st406 - st406: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof406 - } - st_case_406: -//line plugins/parsers/influx/machine.go:13242 - switch ( m.data)[( m.p)] { - case 9: - goto tr431 - case 11: - goto tr594 - case 12: - goto tr361 - case 32: - goto tr431 - case 34: - goto tr98 - case 44: - goto st7 - case 61: - goto tr99 - case 92: - goto st78 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st408 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st42 -tr594: -//line plugins/parsers/influx/machine.go.rl:108 - - m.handler.SetTimestamp(m.text()) - - goto st407 - st407: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof407 - } - st_case_407: -//line plugins/parsers/influx/machine.go:13281 - switch ( m.data)[( m.p)] { - case 9: - goto st268 - case 11: - goto st407 - case 12: - goto st210 - case 32: - goto st268 - case 34: - goto tr98 - case 44: - goto st7 - case 61: - goto tr99 - case 92: - goto st78 - } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr357 - } - goto st42 - st408: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof408 - } - st_case_408: - switch ( m.data)[( m.p)] { - case 9: - goto tr431 - case 11: - goto tr594 - case 12: - goto tr361 - case 32: - goto tr431 - case 34: - goto tr98 - case 44: - goto st7 - case 61: - goto tr99 - case 92: - goto st78 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st409 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st42 - st409: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof409 - } - st_case_409: - switch ( m.data)[( m.p)] { - case 9: - goto tr431 - case 11: - goto tr594 - case 12: - goto tr361 - case 32: - goto tr431 - case 34: - goto tr98 - case 44: - goto st7 - case 61: - goto tr99 - case 92: - goto st78 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st410 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st42 - st410: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof410 - } - st_case_410: - switch ( m.data)[( m.p)] { - case 9: - goto tr431 - case 11: - goto tr594 - case 12: - goto tr361 - case 32: - goto tr431 - case 34: - goto tr98 - case 44: - goto st7 - case 61: - goto tr99 - case 92: - goto st78 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st411 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st42 - st411: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof411 - } - st_case_411: - switch ( m.data)[( m.p)] { - case 9: - goto tr431 - case 11: - goto tr594 - case 12: - goto tr361 - case 32: - goto tr431 - case 34: - goto tr98 - case 44: - goto st7 - case 61: - goto tr99 - case 92: - goto st78 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st412 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st42 - st412: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof412 - } - st_case_412: - switch ( m.data)[( m.p)] { - case 9: - goto tr431 - case 11: - goto tr594 - case 12: - goto tr361 - case 32: - goto tr431 - case 34: - goto tr98 - case 44: - goto st7 - case 61: - goto tr99 - case 92: - goto st78 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st413 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st42 - st413: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof413 - } - st_case_413: - switch ( m.data)[( m.p)] { - case 9: - goto tr431 - case 11: - goto tr594 - case 12: - goto tr361 - case 32: - goto tr431 - case 34: - goto tr98 - case 44: - goto st7 - case 61: - goto tr99 - case 92: - goto st78 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st414 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st42 - st414: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof414 - } - st_case_414: - switch ( m.data)[( m.p)] { - case 9: - goto tr431 - case 11: - goto tr594 - case 12: - goto tr361 - case 32: - goto tr431 - case 34: - goto tr98 - case 44: - goto st7 - case 61: - goto tr99 - case 92: - goto st78 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st415 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st42 - st415: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof415 - } - st_case_415: - switch ( m.data)[( m.p)] { - case 9: - goto tr431 - case 11: - goto tr594 - case 12: - goto tr361 - case 32: - goto tr431 - case 34: - goto tr98 - case 44: - goto st7 - case 61: - goto tr99 - case 92: - goto st78 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st416 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st42 - st416: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof416 - } - st_case_416: - switch ( m.data)[( m.p)] { - case 9: - goto tr431 - case 11: - goto tr594 - case 12: - goto tr361 - case 32: - goto tr431 - case 34: - goto tr98 - case 44: - goto st7 - case 61: - goto tr99 - case 92: - goto st78 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st417 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st42 - st417: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof417 - } - st_case_417: - switch ( m.data)[( m.p)] { - case 9: - goto tr431 - case 11: - goto tr594 - case 12: - goto tr361 - case 32: - goto tr431 - case 34: - goto tr98 - case 44: - goto st7 - case 61: - goto tr99 - case 92: - goto st78 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st418 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st42 - st418: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof418 - } - st_case_418: - switch ( m.data)[( m.p)] { - case 9: - goto tr431 - case 11: - goto tr594 - case 12: - goto tr361 - case 32: - goto tr431 - case 34: - goto tr98 - case 44: - goto st7 - case 61: - goto tr99 - case 92: - goto st78 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st419 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st42 - st419: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof419 - } - st_case_419: - switch ( m.data)[( m.p)] { - case 9: - goto tr431 - case 11: - goto tr594 - case 12: - goto tr361 - case 32: - goto tr431 - case 34: - goto tr98 - case 44: - goto st7 - case 61: - goto tr99 - case 92: - goto st78 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st420 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st42 - st420: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof420 - } - st_case_420: - switch ( m.data)[( m.p)] { - case 9: - goto tr431 - case 11: - goto tr594 - case 12: - goto tr361 - case 32: - goto tr431 - case 34: - goto tr98 - case 44: - goto st7 - case 61: - goto tr99 - case 92: - goto st78 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st421 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st42 - st421: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof421 - } - st_case_421: - switch ( m.data)[( m.p)] { - case 9: - goto tr431 - case 11: - goto tr594 - case 12: - goto tr361 - case 32: - goto tr431 - case 34: - goto tr98 - case 44: - goto st7 - case 61: - goto tr99 - case 92: - goto st78 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st422 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st42 - st422: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof422 - } - st_case_422: - switch ( m.data)[( m.p)] { - case 9: - goto tr431 - case 11: - goto tr594 - case 12: - goto tr361 - case 32: - goto tr431 - case 34: - goto tr98 - case 44: - goto st7 - case 61: - goto tr99 - case 92: - goto st78 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st423 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st42 - st423: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof423 - } - st_case_423: - switch ( m.data)[( m.p)] { - case 9: - goto tr431 - case 11: - goto tr594 - case 12: - goto tr361 - case 32: - goto tr431 - case 34: - goto tr98 - case 44: - goto st7 - case 61: - goto tr99 - case 92: - goto st78 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st424 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st42 - st424: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof424 - } - st_case_424: - switch ( m.data)[( m.p)] { - case 9: - goto tr431 - case 11: - goto tr594 - case 12: - goto tr361 - case 32: - goto tr431 - case 34: - goto tr98 - case 44: - goto st7 - case 61: - goto tr99 - case 92: - goto st78 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st425 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st42 - st425: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof425 - } - st_case_425: - switch ( m.data)[( m.p)] { - case 9: - goto tr431 - case 11: - goto tr594 - case 12: - goto tr361 - case 32: - goto tr431 - case 34: - goto tr98 - case 44: - goto st7 - case 61: - goto tr99 - case 92: - goto st78 - } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr362 - } - goto st42 -tr588: -//line plugins/parsers/influx/machine.go.rl:72 - - m.handler.SetMeasurement(m.text()) - -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st426 -tr790: -//line plugins/parsers/influx/machine.go.rl:72 - - m.handler.SetMeasurement(m.text()) - -//line plugins/parsers/influx/machine.go.rl:96 - - m.handler.AddFloat(key, m.text()) - - goto st426 -tr799: -//line plugins/parsers/influx/machine.go.rl:72 - - m.handler.SetMeasurement(m.text()) - -//line plugins/parsers/influx/machine.go.rl:88 - - m.handler.AddInt(key, m.text()) - - goto st426 -tr804: -//line plugins/parsers/influx/machine.go.rl:72 - - m.handler.SetMeasurement(m.text()) - -//line plugins/parsers/influx/machine.go.rl:92 - - m.handler.AddUint(key, m.text()) - - goto st426 -tr809: -//line plugins/parsers/influx/machine.go.rl:72 - - m.handler.SetMeasurement(m.text()) - -//line plugins/parsers/influx/machine.go.rl:100 - - m.handler.AddBool(key, m.text()) - - goto st426 - st426: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof426 - } - st_case_426: -//line plugins/parsers/influx/machine.go:13930 - switch ( m.data)[( m.p)] { - case 9: - goto tr614 - case 11: - goto tr615 - case 12: - goto tr482 - case 32: - goto tr614 - case 34: - goto tr158 - case 44: - goto tr90 - case 45: - goto tr616 - case 61: - goto st40 - case 92: - goto tr159 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr617 - } - case ( m.data)[( m.p)] >= 10: - goto tr357 - } - goto tr156 -tr615: -//line plugins/parsers/influx/machine.go.rl:72 - - m.handler.SetMeasurement(m.text()) - -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st427 - st427: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof427 - } - st_case_427: -//line plugins/parsers/influx/machine.go:13975 - switch ( m.data)[( m.p)] { - case 9: - goto tr614 - case 11: - goto tr615 - case 12: - goto tr482 - case 32: - goto tr614 - case 34: - goto tr158 - case 44: - goto tr90 - case 45: - goto tr616 - case 61: - goto tr163 - case 92: - goto tr159 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr617 - } - case ( m.data)[( m.p)] >= 10: - goto tr357 - } - goto tr156 -tr616: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st131 - st131: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof131 - } - st_case_131: -//line plugins/parsers/influx/machine.go:14016 - switch ( m.data)[( m.p)] { - case 9: - goto tr87 - case 11: - goto tr161 - case 12: - goto tr4 - case 32: - goto tr87 - case 34: - goto tr162 - case 44: - goto tr90 - case 61: - goto tr163 - case 92: - goto st132 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st428 - } - case ( m.data)[( m.p)] >= 10: - goto tr101 - } - goto st81 -tr617: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st428 - st428: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof428 - } - st_case_428: -//line plugins/parsers/influx/machine.go:14055 - switch ( m.data)[( m.p)] { - case 9: - goto tr618 - case 11: - goto tr619 - case 12: - goto tr490 - case 32: - goto tr618 - case 34: - goto tr162 - case 44: - goto tr90 - case 61: - goto tr163 - case 92: - goto st132 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st432 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st81 -tr623: -//line plugins/parsers/influx/machine.go.rl:72 - - m.handler.SetMeasurement(m.text()) - - goto st429 -tr753: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - - goto st429 -tr618: -//line plugins/parsers/influx/machine.go.rl:72 - - m.handler.SetMeasurement(m.text()) - -//line plugins/parsers/influx/machine.go.rl:108 - - m.handler.SetTimestamp(m.text()) - - goto st429 -tr750: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:108 - - m.handler.SetTimestamp(m.text()) - - goto st429 - st429: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof429 - } - st_case_429: -//line plugins/parsers/influx/machine.go:14120 - switch ( m.data)[( m.p)] { - case 9: - goto st429 - case 11: - goto tr622 - case 12: - goto st322 - case 32: - goto st429 - case 34: - goto tr95 - case 44: - goto st7 - case 61: - goto st7 - case 92: - goto tr96 - } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr357 - } - goto tr92 -tr622: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st430 - st430: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof430 - } - st_case_430: -//line plugins/parsers/influx/machine.go:14154 - switch ( m.data)[( m.p)] { - case 9: - goto st429 - case 11: - goto tr622 - case 12: - goto st322 - case 32: - goto st429 - case 34: - goto tr95 - case 44: - goto st7 - case 61: - goto tr99 - case 92: - goto tr96 - } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr357 - } - goto tr92 -tr624: -//line plugins/parsers/influx/machine.go.rl:72 - - m.handler.SetMeasurement(m.text()) - -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st431 -tr619: -//line plugins/parsers/influx/machine.go.rl:72 - - m.handler.SetMeasurement(m.text()) - -//line plugins/parsers/influx/machine.go.rl:108 - - m.handler.SetTimestamp(m.text()) - - goto st431 - st431: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof431 - } - st_case_431: -//line plugins/parsers/influx/machine.go:14202 - switch ( m.data)[( m.p)] { - case 9: - goto tr623 - case 11: - goto tr624 - case 12: - goto tr495 - case 32: - goto tr623 - case 34: - goto tr158 - case 44: - goto tr90 - case 61: - goto tr163 - case 92: - goto tr159 - } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr357 - } - goto tr156 -tr159: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st132 - st132: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof132 - } - st_case_132: -//line plugins/parsers/influx/machine.go:14236 - switch ( m.data)[( m.p)] { - case 34: - goto st81 - case 92: - goto st81 - } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 - } - case ( m.data)[( m.p)] >= 9: - goto tr5 - } - goto st26 - st432: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof432 - } - st_case_432: - switch ( m.data)[( m.p)] { - case 9: - goto tr618 - case 11: - goto tr619 - case 12: - goto tr490 - case 32: - goto tr618 - case 34: - goto tr162 - case 44: - goto tr90 - case 61: - goto tr163 - case 92: - goto st132 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st433 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st81 - st433: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof433 - } - st_case_433: - switch ( m.data)[( m.p)] { - case 9: - goto tr618 - case 11: - goto tr619 - case 12: - goto tr490 - case 32: - goto tr618 - case 34: - goto tr162 - case 44: - goto tr90 - case 61: - goto tr163 - case 92: - goto st132 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st434 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st81 - st434: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof434 - } - st_case_434: - switch ( m.data)[( m.p)] { - case 9: - goto tr618 - case 11: - goto tr619 - case 12: - goto tr490 - case 32: - goto tr618 - case 34: - goto tr162 - case 44: - goto tr90 - case 61: - goto tr163 - case 92: - goto st132 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st435 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st81 - st435: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof435 - } - st_case_435: - switch ( m.data)[( m.p)] { - case 9: - goto tr618 - case 11: - goto tr619 - case 12: - goto tr490 - case 32: - goto tr618 - case 34: - goto tr162 - case 44: - goto tr90 - case 61: - goto tr163 - case 92: - goto st132 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st436 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st81 - st436: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof436 - } - st_case_436: - switch ( m.data)[( m.p)] { - case 9: - goto tr618 - case 11: - goto tr619 - case 12: - goto tr490 - case 32: - goto tr618 - case 34: - goto tr162 - case 44: - goto tr90 - case 61: - goto tr163 - case 92: - goto st132 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st437 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st81 - st437: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof437 - } - st_case_437: - switch ( m.data)[( m.p)] { - case 9: - goto tr618 - case 11: - goto tr619 - case 12: - goto tr490 - case 32: - goto tr618 - case 34: - goto tr162 - case 44: - goto tr90 - case 61: - goto tr163 - case 92: - goto st132 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st438 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st81 - st438: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof438 - } - st_case_438: - switch ( m.data)[( m.p)] { - case 9: - goto tr618 - case 11: - goto tr619 - case 12: - goto tr490 - case 32: - goto tr618 - case 34: - goto tr162 - case 44: - goto tr90 - case 61: - goto tr163 - case 92: - goto st132 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st439 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st81 - st439: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof439 - } - st_case_439: - switch ( m.data)[( m.p)] { - case 9: - goto tr618 - case 11: - goto tr619 - case 12: - goto tr490 - case 32: - goto tr618 - case 34: - goto tr162 - case 44: - goto tr90 - case 61: - goto tr163 - case 92: - goto st132 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st440 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st81 - st440: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof440 - } - st_case_440: - switch ( m.data)[( m.p)] { - case 9: - goto tr618 - case 11: - goto tr619 - case 12: - goto tr490 - case 32: - goto tr618 - case 34: - goto tr162 - case 44: - goto tr90 - case 61: - goto tr163 - case 92: - goto st132 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st441 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st81 - st441: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof441 - } - st_case_441: - switch ( m.data)[( m.p)] { - case 9: - goto tr618 - case 11: - goto tr619 - case 12: - goto tr490 - case 32: - goto tr618 - case 34: - goto tr162 - case 44: - goto tr90 - case 61: - goto tr163 - case 92: - goto st132 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st442 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st81 - st442: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof442 - } - st_case_442: - switch ( m.data)[( m.p)] { - case 9: - goto tr618 - case 11: - goto tr619 - case 12: - goto tr490 - case 32: - goto tr618 - case 34: - goto tr162 - case 44: - goto tr90 - case 61: - goto tr163 - case 92: - goto st132 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st443 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st81 - st443: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof443 - } - st_case_443: - switch ( m.data)[( m.p)] { - case 9: - goto tr618 - case 11: - goto tr619 - case 12: - goto tr490 - case 32: - goto tr618 - case 34: - goto tr162 - case 44: - goto tr90 - case 61: - goto tr163 - case 92: - goto st132 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st444 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st81 - st444: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof444 - } - st_case_444: - switch ( m.data)[( m.p)] { - case 9: - goto tr618 - case 11: - goto tr619 - case 12: - goto tr490 - case 32: - goto tr618 - case 34: - goto tr162 - case 44: - goto tr90 - case 61: - goto tr163 - case 92: - goto st132 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st445 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st81 - st445: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof445 - } - st_case_445: - switch ( m.data)[( m.p)] { - case 9: - goto tr618 - case 11: - goto tr619 - case 12: - goto tr490 - case 32: - goto tr618 - case 34: - goto tr162 - case 44: - goto tr90 - case 61: - goto tr163 - case 92: - goto st132 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st446 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st81 - st446: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof446 - } - st_case_446: - switch ( m.data)[( m.p)] { - case 9: - goto tr618 - case 11: - goto tr619 - case 12: - goto tr490 - case 32: - goto tr618 - case 34: - goto tr162 - case 44: - goto tr90 - case 61: - goto tr163 - case 92: - goto st132 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st447 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st81 - st447: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof447 - } - st_case_447: - switch ( m.data)[( m.p)] { - case 9: - goto tr618 - case 11: - goto tr619 - case 12: - goto tr490 - case 32: - goto tr618 - case 34: - goto tr162 - case 44: - goto tr90 - case 61: - goto tr163 - case 92: - goto st132 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st448 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st81 - st448: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof448 - } - st_case_448: - switch ( m.data)[( m.p)] { - case 9: - goto tr618 - case 11: - goto tr619 - case 12: - goto tr490 - case 32: - goto tr618 - case 34: - goto tr162 - case 44: - goto tr90 - case 61: - goto tr163 - case 92: - goto st132 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st449 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st81 - st449: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof449 - } - st_case_449: - switch ( m.data)[( m.p)] { - case 9: - goto tr618 - case 11: - goto tr619 - case 12: - goto tr490 - case 32: - goto tr618 - case 34: - goto tr162 - case 44: - goto tr90 - case 61: - goto tr163 - case 92: - goto st132 - } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr362 - } - goto st81 -tr83: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - -//line plugins/parsers/influx/machine.go.rl:104 - - m.handler.AddString(key, m.text()) - - goto st450 -tr89: -//line plugins/parsers/influx/machine.go.rl:104 - - m.handler.AddString(key, m.text()) - - goto st450 - st450: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof450 - } - st_case_450: -//line plugins/parsers/influx/machine.go:14844 - switch ( m.data)[( m.p)] { - case 10: - goto tr357 - case 11: - goto tr642 - case 13: - goto tr357 + goto st34 case 32: goto tr482 case 44: goto tr484 case 92: - goto st133 + goto st96 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { goto tr482 } - goto st2 -tr642: -//line plugins/parsers/influx/machine.go.rl:72 + goto st1 +tr618: + ( m.cs) = 395 +//line plugins/parsers/influx/machine.go.rl:77 - m.handler.SetMeasurement(m.text()) + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- - goto st451 -tr794: -//line plugins/parsers/influx/machine.go.rl:72 + ( m.cs) = 247; + {( m.p)++; goto _out } + } - m.handler.SetMeasurement(m.text()) + goto _again +tr798: + ( m.cs) = 395 +//line plugins/parsers/influx/machine.go.rl:77 -//line plugins/parsers/influx/machine.go.rl:96 + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- - m.handler.AddFloat(key, m.text()) + ( m.cs) = 247; + {( m.p)++; goto _out } + } - goto st451 -tr819: -//line plugins/parsers/influx/machine.go.rl:72 +//line plugins/parsers/influx/machine.go.rl:121 - m.handler.SetMeasurement(m.text()) + err = m.handler.AddFloat(key, m.text()) + if err != nil { + ( m.p)-- -//line plugins/parsers/influx/machine.go.rl:88 + ( m.cs) = 247; + {( m.p)++; goto _out } + } - m.handler.AddInt(key, m.text()) + goto _again +tr981: + ( m.cs) = 395 +//line plugins/parsers/influx/machine.go.rl:77 - goto st451 -tr822: -//line plugins/parsers/influx/machine.go.rl:72 + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- - m.handler.SetMeasurement(m.text()) + ( m.cs) = 247; + {( m.p)++; goto _out } + } -//line plugins/parsers/influx/machine.go.rl:92 +//line plugins/parsers/influx/machine.go.rl:103 - m.handler.AddUint(key, m.text()) + err = m.handler.AddInt(key, m.text()) + if err != nil { + ( m.p)-- - goto st451 -tr825: -//line plugins/parsers/influx/machine.go.rl:72 + ( m.cs) = 247; + {( m.p)++; goto _out } + } - m.handler.SetMeasurement(m.text()) + goto _again +tr984: + ( m.cs) = 395 +//line plugins/parsers/influx/machine.go.rl:77 -//line plugins/parsers/influx/machine.go.rl:100 + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- - m.handler.AddBool(key, m.text()) + ( m.cs) = 247; + {( m.p)++; goto _out } + } - goto st451 - st451: +//line plugins/parsers/influx/machine.go.rl:112 + + err = m.handler.AddUint(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr987: + ( m.cs) = 395 +//line plugins/parsers/influx/machine.go.rl:77 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:130 + + err = m.handler.AddBool(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again + st395: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof451 + goto _test_eof395 } - st_case_451: -//line plugins/parsers/influx/machine.go:14914 + st_case_395: +//line plugins/parsers/influx/machine.go:13065 switch ( m.data)[( m.p)] { case 10: - goto tr357 + goto st262 case 11: goto tr487 case 13: - goto tr357 + goto st34 case 32: goto tr482 case 44: - goto tr7 + goto tr4 case 45: goto tr488 case 61: - goto st2 + goto st1 case 92: - goto tr46 + goto tr45 } switch { case ( m.data)[( m.p)] > 12: @@ -14937,916 +13088,1185 @@ tr825: case ( m.data)[( m.p)] >= 9: goto tr482 } - goto tr44 -tr2: -//line plugins/parsers/influx/machine.go.rl:18 + goto tr41 +tr37: +//line plugins/parsers/influx/machine.go.rl:19 m.pb = m.p - goto st133 - st133: + goto st96 +tr441: +//line plugins/parsers/influx/machine.go.rl:73 + + foundMetric = true + +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st96 + st96: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof133 + goto _test_eof96 } - st_case_133: -//line plugins/parsers/influx/machine.go:14953 + st_case_96: +//line plugins/parsers/influx/machine.go:13114 switch { case ( m.data)[( m.p)] > 10: if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr1 + goto st0 } case ( m.data)[( m.p)] >= 9: - goto tr1 + goto st0 } - goto st2 -tr589: -//line plugins/parsers/influx/machine.go.rl:72 - - m.handler.SetMeasurement(m.text()) - -//line plugins/parsers/influx/machine.go.rl:18 + goto st1 +tr246: +//line plugins/parsers/influx/machine.go.rl:19 m.pb = m.p - goto st134 -tr744: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:96 - - m.handler.AddFloat(key, m.text()) - - goto st134 -tr776: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:88 - - m.handler.AddInt(key, m.text()) - - goto st134 -tr780: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:92 - - m.handler.AddUint(key, m.text()) - - goto st134 -tr784: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:100 - - m.handler.AddBool(key, m.text()) - - goto st134 -tr792: -//line plugins/parsers/influx/machine.go.rl:72 - - m.handler.SetMeasurement(m.text()) - -//line plugins/parsers/influx/machine.go.rl:96 - - m.handler.AddFloat(key, m.text()) - - goto st134 -tr801: -//line plugins/parsers/influx/machine.go.rl:72 - - m.handler.SetMeasurement(m.text()) - -//line plugins/parsers/influx/machine.go.rl:88 - - m.handler.AddInt(key, m.text()) - - goto st134 -tr806: -//line plugins/parsers/influx/machine.go.rl:72 - - m.handler.SetMeasurement(m.text()) - -//line plugins/parsers/influx/machine.go.rl:92 - - m.handler.AddUint(key, m.text()) - - goto st134 -tr811: -//line plugins/parsers/influx/machine.go.rl:72 - - m.handler.SetMeasurement(m.text()) - -//line plugins/parsers/influx/machine.go.rl:100 - - m.handler.AddBool(key, m.text()) - - goto st134 - st134: + goto st97 + st97: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof134 + goto _test_eof97 } - st_case_134: -//line plugins/parsers/influx/machine.go:15058 + st_case_97: +//line plugins/parsers/influx/machine.go:13135 switch ( m.data)[( m.p)] { case 9: - goto st7 + goto tr89 case 10: - goto tr61 - case 32: goto st7 - case 34: - goto tr259 - case 44: - goto st7 - case 61: - goto st7 - case 92: - goto tr260 - } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr61 - } - goto tr258 -tr258: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st135 - st135: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof135 - } - st_case_135: -//line plugins/parsers/influx/machine.go:15090 - switch ( m.data)[( m.p)] { - case 9: - goto st7 - case 10: - goto tr61 - case 32: - goto st7 - case 34: - goto tr262 - case 44: - goto st7 - case 61: - goto tr263 - case 92: - goto st169 - } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr61 - } - goto st135 -tr259: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - -//line plugins/parsers/influx/machine.go.rl:104 - - m.handler.AddString(key, m.text()) - - goto st452 -tr262: -//line plugins/parsers/influx/machine.go.rl:104 - - m.handler.AddString(key, m.text()) - - goto st452 - st452: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof452 - } - st_case_452: -//line plugins/parsers/influx/machine.go:15132 - switch ( m.data)[( m.p)] { - case 10: - goto tr357 case 11: - goto st453 + goto tr90 + case 12: + goto tr1 case 13: - goto tr357 + goto st8 case 32: - goto st207 + goto tr89 + case 34: + goto tr91 case 44: - goto st9 - case 61: - goto tr169 + goto tr92 case 92: - goto st118 + goto st142 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st396 + } + goto st31 + st396: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof396 + } + st_case_396: + switch ( m.data)[( m.p)] { + case 9: + goto tr619 + case 10: + goto tr620 + case 11: + goto tr621 + case 12: + goto tr622 + case 13: + goto tr623 + case 32: + goto tr619 + case 34: + goto tr91 + case 44: + goto tr624 + case 69: + goto st140 + case 92: + goto st142 + case 101: + goto st140 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st396 + } + goto st31 +tr578: + ( m.cs) = 98 +//line plugins/parsers/influx/machine.go.rl:77 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto _again +tr624: + ( m.cs) = 98 +//line plugins/parsers/influx/machine.go.rl:77 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:121 + + err = m.handler.AddFloat(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr747: + ( m.cs) = 98 +//line plugins/parsers/influx/machine.go.rl:90 + + err = m.handler.AddTag(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:121 + + err = m.handler.AddFloat(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr781: + ( m.cs) = 98 +//line plugins/parsers/influx/machine.go.rl:90 + + err = m.handler.AddTag(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:103 + + err = m.handler.AddInt(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr787: + ( m.cs) = 98 +//line plugins/parsers/influx/machine.go.rl:90 + + err = m.handler.AddTag(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:112 + + err = m.handler.AddUint(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr793: + ( m.cs) = 98 +//line plugins/parsers/influx/machine.go.rl:90 + + err = m.handler.AddTag(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:130 + + err = m.handler.AddBool(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr805: + ( m.cs) = 98 +//line plugins/parsers/influx/machine.go.rl:77 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:103 + + err = m.handler.AddInt(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr810: + ( m.cs) = 98 +//line plugins/parsers/influx/machine.go.rl:77 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:112 + + err = m.handler.AddUint(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr815: + ( m.cs) = 98 +//line plugins/parsers/influx/machine.go.rl:77 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:130 + + err = m.handler.AddBool(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again + st98: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof98 + } + st_case_98: +//line plugins/parsers/influx/machine.go:13399 + switch ( m.data)[( m.p)] { + case 9: + goto st6 + case 10: + goto st7 + case 12: + goto tr47 + case 13: + goto st8 + case 32: + goto st6 + case 34: + goto tr258 + case 44: + goto st6 + case 61: + goto st6 + case 92: + goto tr259 + } + goto tr257 +tr257: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st99 + st99: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof99 + } + st_case_99: +//line plugins/parsers/influx/machine.go:13432 + switch ( m.data)[( m.p)] { + case 9: + goto st6 + case 10: + goto st7 + case 12: + goto tr47 + case 13: + goto st8 + case 32: + goto st6 + case 34: + goto tr261 + case 44: + goto st6 + case 61: + goto tr262 + case 92: + goto st138 + } + goto st99 +tr258: + ( m.cs) = 397 +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + +//line plugins/parsers/influx/machine.go.rl:139 + + err = m.handler.AddString(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr261: + ( m.cs) = 397 +//line plugins/parsers/influx/machine.go.rl:139 + + err = m.handler.AddString(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again + st397: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof397 + } + st_case_397: +//line plugins/parsers/influx/machine.go:13489 + switch ( m.data)[( m.p)] { + case 10: + goto st262 + case 11: + goto st398 + case 13: + goto st34 + case 32: + goto st261 + case 44: + goto st37 + case 61: + goto tr137 + case 92: + goto st101 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st207 + goto st261 } - goto st86 - st453: + goto st46 + st398: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof453 + goto _test_eof398 } - st_case_453: + st_case_398: switch ( m.data)[( m.p)] { case 10: - goto tr357 + goto st262 case 11: - goto st453 + goto st398 case 13: - goto tr357 + goto st34 case 32: - goto st207 + goto st261 case 44: - goto tr207 + goto tr132 case 45: - goto tr644 + goto tr627 case 61: - goto tr169 + goto tr137 case 92: - goto st118 + goto st101 } switch { case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr645 + goto tr628 } case ( m.data)[( m.p)] >= 9: - goto st207 + goto st261 } - goto st86 -tr644: -//line plugins/parsers/influx/machine.go.rl:18 + goto st46 +tr627: +//line plugins/parsers/influx/machine.go.rl:19 m.pb = m.p - goto st136 - st136: + goto st100 + st100: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof136 + goto _test_eof100 } - st_case_136: -//line plugins/parsers/influx/machine.go:15196 + st_case_100: +//line plugins/parsers/influx/machine.go:13553 switch ( m.data)[( m.p)] { case 32: - goto tr207 + goto tr132 case 44: - goto tr207 + goto tr132 case 61: - goto tr169 + goto tr137 case 92: - goto st118 + goto st101 } switch { case ( m.data)[( m.p)] < 12: if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 10 { - goto tr207 + goto tr132 } case ( m.data)[( m.p)] > 13: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st454 + goto st399 } default: - goto tr207 + goto tr132 } - goto st86 -tr645: -//line plugins/parsers/influx/machine.go.rl:18 + goto st46 +tr628: +//line plugins/parsers/influx/machine.go.rl:19 m.pb = m.p - goto st454 - st454: + goto st399 + st399: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof454 + goto _test_eof399 } - st_case_454: -//line plugins/parsers/influx/machine.go:15231 + st_case_399: +//line plugins/parsers/influx/machine.go:13588 switch ( m.data)[( m.p)] { case 10: - goto tr362 + goto tr451 case 11: - goto tr646 + goto tr629 case 13: - goto tr362 + goto tr453 case 32: - goto tr361 + goto tr450 case 44: - goto tr207 + goto tr132 case 61: - goto tr169 + goto tr137 case 92: - goto st118 + goto st101 } switch { case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st456 + goto st401 } case ( m.data)[( m.p)] >= 9: - goto tr361 + goto tr450 } - goto st86 -tr646: -//line plugins/parsers/influx/machine.go.rl:108 + goto st46 +tr629: + ( m.cs) = 400 +//line plugins/parsers/influx/machine.go.rl:148 - m.handler.SetTimestamp(m.text()) + err = m.handler.SetTimestamp(m.text()) + if err != nil { + ( m.p)-- - goto st455 - st455: + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again + st400: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof455 + goto _test_eof400 } - st_case_455: -//line plugins/parsers/influx/machine.go:15268 + st_case_400: +//line plugins/parsers/influx/machine.go:13632 switch ( m.data)[( m.p)] { case 10: - goto tr357 + goto st262 case 11: - goto st455 + goto st400 case 13: - goto tr357 + goto st34 case 32: - goto st210 + goto st266 case 44: - goto tr61 + goto tr47 case 61: - goto tr169 + goto tr137 case 92: - goto st118 + goto st101 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st210 + goto st266 } - goto st86 - st456: + goto st46 +tr135: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st101 + st101: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof456 + goto _test_eof101 } - st_case_456: + st_case_101: +//line plugins/parsers/influx/machine.go:13664 + if ( m.data)[( m.p)] == 92 { + goto st102 + } + switch { + case ( m.data)[( m.p)] > 10: + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr47 + } + case ( m.data)[( m.p)] >= 9: + goto tr47 + } + goto st46 + st102: +//line plugins/parsers/influx/machine.go.rl:234 + ( m.p)-- + + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof102 + } + st_case_102: +//line plugins/parsers/influx/machine.go:13685 + switch ( m.data)[( m.p)] { + case 32: + goto tr47 + case 44: + goto tr47 + case 61: + goto tr137 + case 92: + goto st101 + } + switch { + case ( m.data)[( m.p)] > 10: + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr47 + } + case ( m.data)[( m.p)] >= 9: + goto tr47 + } + goto st46 + st401: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof401 + } + st_case_401: switch ( m.data)[( m.p)] { case 10: - goto tr362 + goto tr451 case 11: - goto tr646 + goto tr629 case 13: - goto tr362 + goto tr453 case 32: - goto tr361 + goto tr450 case 44: - goto tr207 + goto tr132 case 61: - goto tr169 + goto tr137 case 92: - goto st118 + goto st101 } switch { case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st457 + goto st402 } case ( m.data)[( m.p)] >= 9: - goto tr361 + goto tr450 } - goto st86 - st457: + goto st46 + st402: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof457 + goto _test_eof402 } - st_case_457: + st_case_402: switch ( m.data)[( m.p)] { case 10: - goto tr362 + goto tr451 case 11: - goto tr646 + goto tr629 case 13: - goto tr362 + goto tr453 case 32: - goto tr361 + goto tr450 case 44: - goto tr207 + goto tr132 case 61: - goto tr169 + goto tr137 case 92: - goto st118 + goto st101 } switch { case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st458 + goto st403 } case ( m.data)[( m.p)] >= 9: - goto tr361 + goto tr450 } - goto st86 - st458: + goto st46 + st403: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof458 + goto _test_eof403 } - st_case_458: + st_case_403: switch ( m.data)[( m.p)] { case 10: - goto tr362 + goto tr451 case 11: - goto tr646 + goto tr629 case 13: - goto tr362 + goto tr453 case 32: - goto tr361 + goto tr450 case 44: - goto tr207 + goto tr132 case 61: - goto tr169 + goto tr137 case 92: - goto st118 + goto st101 } switch { case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st459 + goto st404 } case ( m.data)[( m.p)] >= 9: - goto tr361 + goto tr450 } - goto st86 - st459: + goto st46 + st404: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof459 + goto _test_eof404 } - st_case_459: + st_case_404: switch ( m.data)[( m.p)] { case 10: - goto tr362 + goto tr451 case 11: - goto tr646 + goto tr629 case 13: - goto tr362 + goto tr453 case 32: - goto tr361 + goto tr450 case 44: - goto tr207 + goto tr132 case 61: - goto tr169 + goto tr137 case 92: - goto st118 + goto st101 } switch { case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st460 + goto st405 } case ( m.data)[( m.p)] >= 9: - goto tr361 + goto tr450 } - goto st86 - st460: + goto st46 + st405: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof460 + goto _test_eof405 } - st_case_460: + st_case_405: switch ( m.data)[( m.p)] { case 10: - goto tr362 + goto tr451 case 11: - goto tr646 + goto tr629 case 13: - goto tr362 + goto tr453 case 32: - goto tr361 + goto tr450 case 44: - goto tr207 + goto tr132 case 61: - goto tr169 + goto tr137 case 92: - goto st118 + goto st101 } switch { case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st461 + goto st406 } case ( m.data)[( m.p)] >= 9: - goto tr361 + goto tr450 } - goto st86 - st461: + goto st46 + st406: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof461 + goto _test_eof406 } - st_case_461: + st_case_406: switch ( m.data)[( m.p)] { case 10: - goto tr362 + goto tr451 case 11: - goto tr646 + goto tr629 case 13: - goto tr362 + goto tr453 case 32: - goto tr361 + goto tr450 case 44: - goto tr207 + goto tr132 case 61: - goto tr169 + goto tr137 case 92: - goto st118 + goto st101 } switch { case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st462 + goto st407 } case ( m.data)[( m.p)] >= 9: - goto tr361 + goto tr450 } - goto st86 - st462: + goto st46 + st407: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof462 + goto _test_eof407 } - st_case_462: + st_case_407: switch ( m.data)[( m.p)] { case 10: - goto tr362 + goto tr451 case 11: - goto tr646 + goto tr629 case 13: - goto tr362 + goto tr453 case 32: - goto tr361 + goto tr450 case 44: - goto tr207 + goto tr132 case 61: - goto tr169 + goto tr137 case 92: - goto st118 + goto st101 } switch { case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st463 + goto st408 } case ( m.data)[( m.p)] >= 9: - goto tr361 + goto tr450 } - goto st86 - st463: + goto st46 + st408: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof463 + goto _test_eof408 } - st_case_463: + st_case_408: switch ( m.data)[( m.p)] { case 10: - goto tr362 + goto tr451 case 11: - goto tr646 + goto tr629 case 13: - goto tr362 + goto tr453 case 32: - goto tr361 + goto tr450 case 44: - goto tr207 + goto tr132 case 61: - goto tr169 + goto tr137 case 92: - goto st118 + goto st101 } switch { case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st464 + goto st409 } case ( m.data)[( m.p)] >= 9: - goto tr361 + goto tr450 } - goto st86 - st464: + goto st46 + st409: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof464 + goto _test_eof409 } - st_case_464: + st_case_409: switch ( m.data)[( m.p)] { case 10: - goto tr362 + goto tr451 case 11: - goto tr646 + goto tr629 case 13: - goto tr362 + goto tr453 case 32: - goto tr361 + goto tr450 case 44: - goto tr207 + goto tr132 case 61: - goto tr169 + goto tr137 case 92: - goto st118 + goto st101 } switch { case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st465 + goto st410 } case ( m.data)[( m.p)] >= 9: - goto tr361 + goto tr450 } - goto st86 - st465: + goto st46 + st410: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof465 + goto _test_eof410 } - st_case_465: + st_case_410: switch ( m.data)[( m.p)] { case 10: - goto tr362 + goto tr451 case 11: - goto tr646 + goto tr629 case 13: - goto tr362 + goto tr453 case 32: - goto tr361 + goto tr450 case 44: - goto tr207 + goto tr132 case 61: - goto tr169 + goto tr137 case 92: - goto st118 + goto st101 } switch { case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st466 + goto st411 } case ( m.data)[( m.p)] >= 9: - goto tr361 + goto tr450 } - goto st86 - st466: + goto st46 + st411: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof466 + goto _test_eof411 } - st_case_466: + st_case_411: switch ( m.data)[( m.p)] { case 10: - goto tr362 + goto tr451 case 11: - goto tr646 + goto tr629 case 13: - goto tr362 + goto tr453 case 32: - goto tr361 + goto tr450 case 44: - goto tr207 + goto tr132 case 61: - goto tr169 + goto tr137 case 92: - goto st118 + goto st101 } switch { case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st467 + goto st412 } case ( m.data)[( m.p)] >= 9: - goto tr361 + goto tr450 } - goto st86 - st467: + goto st46 + st412: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof467 + goto _test_eof412 } - st_case_467: + st_case_412: switch ( m.data)[( m.p)] { case 10: - goto tr362 + goto tr451 case 11: - goto tr646 + goto tr629 case 13: - goto tr362 + goto tr453 case 32: - goto tr361 + goto tr450 case 44: - goto tr207 + goto tr132 case 61: - goto tr169 + goto tr137 case 92: - goto st118 + goto st101 } switch { case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st468 + goto st413 } case ( m.data)[( m.p)] >= 9: - goto tr361 + goto tr450 } - goto st86 - st468: + goto st46 + st413: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof468 + goto _test_eof413 } - st_case_468: + st_case_413: switch ( m.data)[( m.p)] { case 10: - goto tr362 + goto tr451 case 11: - goto tr646 + goto tr629 case 13: - goto tr362 + goto tr453 case 32: - goto tr361 + goto tr450 case 44: - goto tr207 + goto tr132 case 61: - goto tr169 + goto tr137 case 92: - goto st118 + goto st101 } switch { case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st469 + goto st414 } case ( m.data)[( m.p)] >= 9: - goto tr361 + goto tr450 } - goto st86 - st469: + goto st46 + st414: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof469 + goto _test_eof414 } - st_case_469: + st_case_414: switch ( m.data)[( m.p)] { case 10: - goto tr362 + goto tr451 case 11: - goto tr646 + goto tr629 case 13: - goto tr362 + goto tr453 case 32: - goto tr361 + goto tr450 case 44: - goto tr207 + goto tr132 case 61: - goto tr169 + goto tr137 case 92: - goto st118 + goto st101 } switch { case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st470 + goto st415 } case ( m.data)[( m.p)] >= 9: - goto tr361 + goto tr450 } - goto st86 - st470: + goto st46 + st415: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof470 + goto _test_eof415 } - st_case_470: + st_case_415: switch ( m.data)[( m.p)] { case 10: - goto tr362 + goto tr451 case 11: - goto tr646 + goto tr629 case 13: - goto tr362 + goto tr453 case 32: - goto tr361 + goto tr450 case 44: - goto tr207 + goto tr132 case 61: - goto tr169 + goto tr137 case 92: - goto st118 + goto st101 } switch { case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st471 + goto st416 } case ( m.data)[( m.p)] >= 9: - goto tr361 + goto tr450 } - goto st86 - st471: + goto st46 + st416: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof471 + goto _test_eof416 } - st_case_471: + st_case_416: switch ( m.data)[( m.p)] { case 10: - goto tr362 + goto tr451 case 11: - goto tr646 + goto tr629 case 13: - goto tr362 + goto tr453 case 32: - goto tr361 + goto tr450 case 44: - goto tr207 + goto tr132 case 61: - goto tr169 + goto tr137 case 92: - goto st118 + goto st101 } switch { case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st472 + goto st417 } case ( m.data)[( m.p)] >= 9: - goto tr361 + goto tr450 } - goto st86 - st472: + goto st46 + st417: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof472 + goto _test_eof417 } - st_case_472: + st_case_417: switch ( m.data)[( m.p)] { case 10: - goto tr362 + goto tr451 case 11: - goto tr646 + goto tr629 case 13: - goto tr362 + goto tr453 case 32: - goto tr361 + goto tr450 case 44: - goto tr207 + goto tr132 case 61: - goto tr169 + goto tr137 case 92: - goto st118 + goto st101 } switch { case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st473 + goto st418 } case ( m.data)[( m.p)] >= 9: - goto tr361 + goto tr450 } - goto st86 - st473: + goto st46 + st418: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof473 + goto _test_eof418 } - st_case_473: + st_case_418: switch ( m.data)[( m.p)] { case 10: - goto tr362 + goto tr451 case 11: - goto tr646 + goto tr629 case 13: - goto tr362 + goto tr453 case 32: - goto tr361 + goto tr450 case 44: - goto tr207 + goto tr132 case 61: - goto tr169 + goto tr137 case 92: - goto st118 + goto st101 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr361 + goto tr450 } - goto st86 -tr263: -//line plugins/parsers/influx/machine.go.rl:76 + goto st46 +tr262: +//line plugins/parsers/influx/machine.go.rl:86 key = m.text() -//line plugins/parsers/influx/machine.go.rl:84 +//line plugins/parsers/influx/machine.go.rl:99 key = m.text() - goto st137 - st137: + goto st103 + st103: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof137 + goto _test_eof103 } - st_case_137: -//line plugins/parsers/influx/machine.go:15839 + st_case_103: +//line plugins/parsers/influx/machine.go:14255 switch ( m.data)[( m.p)] { case 9: - goto st7 + goto st6 case 10: - goto tr61 - case 32: goto st7 + case 12: + goto tr47 + case 13: + goto st8 + case 32: + goto st6 case 34: goto tr266 case 44: - goto st7 + goto st6 case 45: goto tr267 case 46: @@ -15854,236 +14274,3038 @@ tr263: case 48: goto tr269 case 61: - goto st7 + goto st6 case 70: goto tr271 case 84: goto tr272 case 92: - goto tr235 + goto tr229 case 102: goto tr273 case 116: goto tr274 } - switch { - case ( m.data)[( m.p)] > 13: - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr270 - } - case ( m.data)[( m.p)] >= 12: - goto tr61 + if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto tr270 } - goto tr234 + goto tr228 tr266: -//line plugins/parsers/influx/machine.go.rl:18 + ( m.cs) = 419 +//line plugins/parsers/influx/machine.go.rl:19 m.pb = m.p -//line plugins/parsers/influx/machine.go.rl:104 +//line plugins/parsers/influx/machine.go.rl:139 - m.handler.AddString(key, m.text()) + err = m.handler.AddString(key, m.text()) + if err != nil { + ( m.p)-- - goto st474 + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again + st419: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof419 + } + st_case_419: +//line plugins/parsers/influx/machine.go:14316 + switch ( m.data)[( m.p)] { + case 9: + goto tr649 + case 10: + goto tr650 + case 11: + goto tr651 + case 12: + goto tr547 + case 13: + goto tr652 + case 32: + goto tr649 + case 34: + goto tr151 + case 44: + goto tr653 + case 61: + goto tr23 + case 92: + goto tr153 + } + goto tr148 +tr841: + ( m.cs) = 420 +//line plugins/parsers/influx/machine.go.rl:77 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr682: + ( m.cs) = 420 +//line plugins/parsers/influx/machine.go.rl:90 + + err = m.handler.AddTag(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr649: + ( m.cs) = 420 +//line plugins/parsers/influx/machine.go.rl:90 + + err = m.handler.AddTag(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto _again +tr837: + ( m.cs) = 420 +//line plugins/parsers/influx/machine.go.rl:77 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:121 + + err = m.handler.AddFloat(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr710: + ( m.cs) = 420 +//line plugins/parsers/influx/machine.go.rl:90 + + err = m.handler.AddTag(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:121 + + err = m.handler.AddFloat(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr721: + ( m.cs) = 420 +//line plugins/parsers/influx/machine.go.rl:90 + + err = m.handler.AddTag(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:103 + + err = m.handler.AddInt(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr728: + ( m.cs) = 420 +//line plugins/parsers/influx/machine.go.rl:90 + + err = m.handler.AddTag(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:112 + + err = m.handler.AddUint(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr735: + ( m.cs) = 420 +//line plugins/parsers/influx/machine.go.rl:90 + + err = m.handler.AddTag(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:130 + + err = m.handler.AddBool(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr869: + ( m.cs) = 420 +//line plugins/parsers/influx/machine.go.rl:77 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:103 + + err = m.handler.AddInt(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr873: + ( m.cs) = 420 +//line plugins/parsers/influx/machine.go.rl:77 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:112 + + err = m.handler.AddUint(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr877: + ( m.cs) = 420 +//line plugins/parsers/influx/machine.go.rl:77 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:130 + + err = m.handler.AddBool(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again + st420: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof420 + } + st_case_420: +//line plugins/parsers/influx/machine.go:14572 + switch ( m.data)[( m.p)] { + case 9: + goto st420 + case 10: + goto st317 + case 11: + goto tr655 + case 12: + goto st290 + case 13: + goto st104 + case 32: + goto st420 + case 34: + goto tr97 + case 44: + goto st6 + case 45: + goto tr656 + case 61: + goto st6 + case 92: + goto tr163 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto tr657 + } + goto tr160 +tr655: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st421 + st421: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof421 + } + st_case_421: +//line plugins/parsers/influx/machine.go:14612 + switch ( m.data)[( m.p)] { + case 9: + goto st420 + case 10: + goto st317 + case 11: + goto tr655 + case 12: + goto st290 + case 13: + goto st104 + case 32: + goto st420 + case 34: + goto tr97 + case 44: + goto st6 + case 45: + goto tr656 + case 61: + goto tr165 + case 92: + goto tr163 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto tr657 + } + goto tr160 +tr652: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st104 +tr661: + ( m.cs) = 104 +//line plugins/parsers/influx/machine.go.rl:148 + + err = m.handler.SetTimestamp(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr517: + ( m.cs) = 104 +//line plugins/parsers/influx/machine.go.rl:121 + + err = m.handler.AddFloat(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr725: + ( m.cs) = 104 +//line plugins/parsers/influx/machine.go.rl:103 + + err = m.handler.AddInt(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr732: + ( m.cs) = 104 +//line plugins/parsers/influx/machine.go.rl:112 + + err = m.handler.AddUint(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr739: + ( m.cs) = 104 +//line plugins/parsers/influx/machine.go.rl:130 + + err = m.handler.AddBool(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again + st104: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof104 + } + st_case_104: +//line plugins/parsers/influx/machine.go:14717 + if ( m.data)[( m.p)] == 10 { + goto st317 + } + goto tr8 +tr656: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st105 + st105: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof105 + } + st_case_105: +//line plugins/parsers/influx/machine.go:14733 + switch ( m.data)[( m.p)] { + case 9: + goto st6 + case 10: + goto st7 + case 12: + goto tr105 + case 13: + goto st8 + case 32: + goto st6 + case 34: + goto tr100 + case 44: + goto st6 + case 61: + goto tr165 + case 92: + goto st106 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st422 + } + goto st51 +tr657: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st422 + st422: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof422 + } + st_case_422: +//line plugins/parsers/influx/machine.go:14769 + switch ( m.data)[( m.p)] { + case 9: + goto tr658 + case 10: + goto tr659 + case 11: + goto tr660 + case 12: + goto tr450 + case 13: + goto tr661 + case 32: + goto tr658 + case 34: + goto tr100 + case 44: + goto st6 + case 61: + goto tr165 + case 92: + goto st106 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st425 + } + goto st51 +tr658: + ( m.cs) = 423 +//line plugins/parsers/influx/machine.go.rl:148 + + err = m.handler.SetTimestamp(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again + st423: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof423 + } + st_case_423: +//line plugins/parsers/influx/machine.go:14814 + switch ( m.data)[( m.p)] { + case 10: + goto st317 + case 12: + goto st266 + case 13: + goto st104 + case 32: + goto st423 + case 34: + goto tr31 + case 92: + goto st76 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { + goto st423 + } + goto st6 +tr660: + ( m.cs) = 424 +//line plugins/parsers/influx/machine.go.rl:148 + + err = m.handler.SetTimestamp(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again + st424: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof424 + } + st_case_424: +//line plugins/parsers/influx/machine.go:14851 + switch ( m.data)[( m.p)] { + case 9: + goto st423 + case 10: + goto st317 + case 11: + goto st424 + case 12: + goto st266 + case 13: + goto st104 + case 32: + goto st423 + case 34: + goto tr100 + case 44: + goto st6 + case 61: + goto tr165 + case 92: + goto st106 + } + goto st51 +tr163: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st106 + st106: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof106 + } + st_case_106: +//line plugins/parsers/influx/machine.go:14886 + switch ( m.data)[( m.p)] { + case 34: + goto st51 + case 92: + goto st51 + } + switch { + case ( m.data)[( m.p)] > 10: + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr8 + } + case ( m.data)[( m.p)] >= 9: + goto tr8 + } + goto st3 + st425: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof425 + } + st_case_425: + switch ( m.data)[( m.p)] { + case 9: + goto tr658 + case 10: + goto tr659 + case 11: + goto tr660 + case 12: + goto tr450 + case 13: + goto tr661 + case 32: + goto tr658 + case 34: + goto tr100 + case 44: + goto st6 + case 61: + goto tr165 + case 92: + goto st106 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st426 + } + goto st51 + st426: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof426 + } + st_case_426: + switch ( m.data)[( m.p)] { + case 9: + goto tr658 + case 10: + goto tr659 + case 11: + goto tr660 + case 12: + goto tr450 + case 13: + goto tr661 + case 32: + goto tr658 + case 34: + goto tr100 + case 44: + goto st6 + case 61: + goto tr165 + case 92: + goto st106 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st427 + } + goto st51 + st427: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof427 + } + st_case_427: + switch ( m.data)[( m.p)] { + case 9: + goto tr658 + case 10: + goto tr659 + case 11: + goto tr660 + case 12: + goto tr450 + case 13: + goto tr661 + case 32: + goto tr658 + case 34: + goto tr100 + case 44: + goto st6 + case 61: + goto tr165 + case 92: + goto st106 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st428 + } + goto st51 + st428: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof428 + } + st_case_428: + switch ( m.data)[( m.p)] { + case 9: + goto tr658 + case 10: + goto tr659 + case 11: + goto tr660 + case 12: + goto tr450 + case 13: + goto tr661 + case 32: + goto tr658 + case 34: + goto tr100 + case 44: + goto st6 + case 61: + goto tr165 + case 92: + goto st106 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st429 + } + goto st51 + st429: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof429 + } + st_case_429: + switch ( m.data)[( m.p)] { + case 9: + goto tr658 + case 10: + goto tr659 + case 11: + goto tr660 + case 12: + goto tr450 + case 13: + goto tr661 + case 32: + goto tr658 + case 34: + goto tr100 + case 44: + goto st6 + case 61: + goto tr165 + case 92: + goto st106 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st430 + } + goto st51 + st430: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof430 + } + st_case_430: + switch ( m.data)[( m.p)] { + case 9: + goto tr658 + case 10: + goto tr659 + case 11: + goto tr660 + case 12: + goto tr450 + case 13: + goto tr661 + case 32: + goto tr658 + case 34: + goto tr100 + case 44: + goto st6 + case 61: + goto tr165 + case 92: + goto st106 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st431 + } + goto st51 + st431: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof431 + } + st_case_431: + switch ( m.data)[( m.p)] { + case 9: + goto tr658 + case 10: + goto tr659 + case 11: + goto tr660 + case 12: + goto tr450 + case 13: + goto tr661 + case 32: + goto tr658 + case 34: + goto tr100 + case 44: + goto st6 + case 61: + goto tr165 + case 92: + goto st106 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st432 + } + goto st51 + st432: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof432 + } + st_case_432: + switch ( m.data)[( m.p)] { + case 9: + goto tr658 + case 10: + goto tr659 + case 11: + goto tr660 + case 12: + goto tr450 + case 13: + goto tr661 + case 32: + goto tr658 + case 34: + goto tr100 + case 44: + goto st6 + case 61: + goto tr165 + case 92: + goto st106 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st433 + } + goto st51 + st433: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof433 + } + st_case_433: + switch ( m.data)[( m.p)] { + case 9: + goto tr658 + case 10: + goto tr659 + case 11: + goto tr660 + case 12: + goto tr450 + case 13: + goto tr661 + case 32: + goto tr658 + case 34: + goto tr100 + case 44: + goto st6 + case 61: + goto tr165 + case 92: + goto st106 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st434 + } + goto st51 + st434: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof434 + } + st_case_434: + switch ( m.data)[( m.p)] { + case 9: + goto tr658 + case 10: + goto tr659 + case 11: + goto tr660 + case 12: + goto tr450 + case 13: + goto tr661 + case 32: + goto tr658 + case 34: + goto tr100 + case 44: + goto st6 + case 61: + goto tr165 + case 92: + goto st106 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st435 + } + goto st51 + st435: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof435 + } + st_case_435: + switch ( m.data)[( m.p)] { + case 9: + goto tr658 + case 10: + goto tr659 + case 11: + goto tr660 + case 12: + goto tr450 + case 13: + goto tr661 + case 32: + goto tr658 + case 34: + goto tr100 + case 44: + goto st6 + case 61: + goto tr165 + case 92: + goto st106 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st436 + } + goto st51 + st436: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof436 + } + st_case_436: + switch ( m.data)[( m.p)] { + case 9: + goto tr658 + case 10: + goto tr659 + case 11: + goto tr660 + case 12: + goto tr450 + case 13: + goto tr661 + case 32: + goto tr658 + case 34: + goto tr100 + case 44: + goto st6 + case 61: + goto tr165 + case 92: + goto st106 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st437 + } + goto st51 + st437: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof437 + } + st_case_437: + switch ( m.data)[( m.p)] { + case 9: + goto tr658 + case 10: + goto tr659 + case 11: + goto tr660 + case 12: + goto tr450 + case 13: + goto tr661 + case 32: + goto tr658 + case 34: + goto tr100 + case 44: + goto st6 + case 61: + goto tr165 + case 92: + goto st106 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st438 + } + goto st51 + st438: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof438 + } + st_case_438: + switch ( m.data)[( m.p)] { + case 9: + goto tr658 + case 10: + goto tr659 + case 11: + goto tr660 + case 12: + goto tr450 + case 13: + goto tr661 + case 32: + goto tr658 + case 34: + goto tr100 + case 44: + goto st6 + case 61: + goto tr165 + case 92: + goto st106 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st439 + } + goto st51 + st439: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof439 + } + st_case_439: + switch ( m.data)[( m.p)] { + case 9: + goto tr658 + case 10: + goto tr659 + case 11: + goto tr660 + case 12: + goto tr450 + case 13: + goto tr661 + case 32: + goto tr658 + case 34: + goto tr100 + case 44: + goto st6 + case 61: + goto tr165 + case 92: + goto st106 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st440 + } + goto st51 + st440: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof440 + } + st_case_440: + switch ( m.data)[( m.p)] { + case 9: + goto tr658 + case 10: + goto tr659 + case 11: + goto tr660 + case 12: + goto tr450 + case 13: + goto tr661 + case 32: + goto tr658 + case 34: + goto tr100 + case 44: + goto st6 + case 61: + goto tr165 + case 92: + goto st106 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st441 + } + goto st51 + st441: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof441 + } + st_case_441: + switch ( m.data)[( m.p)] { + case 9: + goto tr658 + case 10: + goto tr659 + case 11: + goto tr660 + case 12: + goto tr450 + case 13: + goto tr661 + case 32: + goto tr658 + case 34: + goto tr100 + case 44: + goto st6 + case 61: + goto tr165 + case 92: + goto st106 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st442 + } + goto st51 + st442: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof442 + } + st_case_442: + switch ( m.data)[( m.p)] { + case 9: + goto tr658 + case 10: + goto tr659 + case 11: + goto tr660 + case 12: + goto tr450 + case 13: + goto tr661 + case 32: + goto tr658 + case 34: + goto tr100 + case 44: + goto st6 + case 61: + goto tr165 + case 92: + goto st106 + } + goto st51 +tr651: + ( m.cs) = 443 +//line plugins/parsers/influx/machine.go.rl:90 + + err = m.handler.AddTag(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto _again +tr711: + ( m.cs) = 443 +//line plugins/parsers/influx/machine.go.rl:90 + + err = m.handler.AddTag(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:121 + + err = m.handler.AddFloat(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr723: + ( m.cs) = 443 +//line plugins/parsers/influx/machine.go.rl:90 + + err = m.handler.AddTag(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:103 + + err = m.handler.AddInt(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr730: + ( m.cs) = 443 +//line plugins/parsers/influx/machine.go.rl:90 + + err = m.handler.AddTag(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:112 + + err = m.handler.AddUint(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr737: + ( m.cs) = 443 +//line plugins/parsers/influx/machine.go.rl:90 + + err = m.handler.AddTag(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:130 + + err = m.handler.AddBool(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again + st443: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof443 + } + st_case_443: +//line plugins/parsers/influx/machine.go:15571 + switch ( m.data)[( m.p)] { + case 9: + goto tr682 + case 10: + goto st317 + case 11: + goto tr683 + case 12: + goto tr547 + case 13: + goto st104 + case 32: + goto tr682 + case 34: + goto tr204 + case 44: + goto tr158 + case 45: + goto tr684 + case 61: + goto st6 + case 92: + goto tr205 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto tr685 + } + goto tr202 +tr683: + ( m.cs) = 444 +//line plugins/parsers/influx/machine.go.rl:90 + + err = m.handler.AddTag(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto _again + st444: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof444 + } + st_case_444: +//line plugins/parsers/influx/machine.go:15622 + switch ( m.data)[( m.p)] { + case 9: + goto tr682 + case 10: + goto st317 + case 11: + goto tr683 + case 12: + goto tr547 + case 13: + goto st104 + case 32: + goto tr682 + case 34: + goto tr204 + case 44: + goto tr158 + case 45: + goto tr684 + case 61: + goto tr165 + case 92: + goto tr205 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto tr685 + } + goto tr202 +tr684: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st107 + st107: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof107 + } + st_case_107: +//line plugins/parsers/influx/machine.go:15662 + switch ( m.data)[( m.p)] { + case 9: + goto tr155 + case 10: + goto st7 + case 11: + goto tr207 + case 12: + goto tr60 + case 13: + goto st8 + case 32: + goto tr155 + case 34: + goto tr208 + case 44: + goto tr158 + case 61: + goto tr165 + case 92: + goto st69 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st445 + } + goto st67 +tr685: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st445 + st445: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof445 + } + st_case_445: +//line plugins/parsers/influx/machine.go:15700 + switch ( m.data)[( m.p)] { + case 9: + goto tr686 + case 10: + goto tr659 + case 11: + goto tr687 + case 12: + goto tr553 + case 13: + goto tr661 + case 32: + goto tr686 + case 34: + goto tr208 + case 44: + goto tr158 + case 61: + goto tr165 + case 92: + goto st69 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st449 + } + goto st67 +tr848: + ( m.cs) = 446 +//line plugins/parsers/influx/machine.go.rl:77 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr691: + ( m.cs) = 446 +//line plugins/parsers/influx/machine.go.rl:90 + + err = m.handler.AddTag(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr845: + ( m.cs) = 446 +//line plugins/parsers/influx/machine.go.rl:77 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:148 + + err = m.handler.SetTimestamp(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr686: + ( m.cs) = 446 +//line plugins/parsers/influx/machine.go.rl:90 + + err = m.handler.AddTag(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:148 + + err = m.handler.SetTimestamp(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again + st446: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof446 + } + st_case_446: +//line plugins/parsers/influx/machine.go:15804 + switch ( m.data)[( m.p)] { + case 9: + goto st446 + case 10: + goto st317 + case 11: + goto tr690 + case 12: + goto st294 + case 13: + goto st104 + case 32: + goto st446 + case 34: + goto tr97 + case 44: + goto st6 + case 61: + goto st6 + case 92: + goto tr163 + } + goto tr160 +tr690: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st447 + st447: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof447 + } + st_case_447: +//line plugins/parsers/influx/machine.go:15839 + switch ( m.data)[( m.p)] { + case 9: + goto st446 + case 10: + goto st317 + case 11: + goto tr690 + case 12: + goto st294 + case 13: + goto st104 + case 32: + goto st446 + case 34: + goto tr97 + case 44: + goto st6 + case 61: + goto tr165 + case 92: + goto tr163 + } + goto tr160 +tr692: + ( m.cs) = 448 +//line plugins/parsers/influx/machine.go.rl:90 + + err = m.handler.AddTag(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto _again +tr687: + ( m.cs) = 448 +//line plugins/parsers/influx/machine.go.rl:90 + + err = m.handler.AddTag(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:148 + + err = m.handler.SetTimestamp(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again + st448: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof448 + } + st_case_448: +//line plugins/parsers/influx/machine.go:15908 + switch ( m.data)[( m.p)] { + case 9: + goto tr691 + case 10: + goto st317 + case 11: + goto tr692 + case 12: + goto tr556 + case 13: + goto st104 + case 32: + goto tr691 + case 34: + goto tr204 + case 44: + goto tr158 + case 61: + goto tr165 + case 92: + goto tr205 + } + goto tr202 + st449: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof449 + } + st_case_449: + switch ( m.data)[( m.p)] { + case 9: + goto tr686 + case 10: + goto tr659 + case 11: + goto tr687 + case 12: + goto tr553 + case 13: + goto tr661 + case 32: + goto tr686 + case 34: + goto tr208 + case 44: + goto tr158 + case 61: + goto tr165 + case 92: + goto st69 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st450 + } + goto st67 + st450: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof450 + } + st_case_450: + switch ( m.data)[( m.p)] { + case 9: + goto tr686 + case 10: + goto tr659 + case 11: + goto tr687 + case 12: + goto tr553 + case 13: + goto tr661 + case 32: + goto tr686 + case 34: + goto tr208 + case 44: + goto tr158 + case 61: + goto tr165 + case 92: + goto st69 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st451 + } + goto st67 + st451: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof451 + } + st_case_451: + switch ( m.data)[( m.p)] { + case 9: + goto tr686 + case 10: + goto tr659 + case 11: + goto tr687 + case 12: + goto tr553 + case 13: + goto tr661 + case 32: + goto tr686 + case 34: + goto tr208 + case 44: + goto tr158 + case 61: + goto tr165 + case 92: + goto st69 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st452 + } + goto st67 + st452: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof452 + } + st_case_452: + switch ( m.data)[( m.p)] { + case 9: + goto tr686 + case 10: + goto tr659 + case 11: + goto tr687 + case 12: + goto tr553 + case 13: + goto tr661 + case 32: + goto tr686 + case 34: + goto tr208 + case 44: + goto tr158 + case 61: + goto tr165 + case 92: + goto st69 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st453 + } + goto st67 + st453: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof453 + } + st_case_453: + switch ( m.data)[( m.p)] { + case 9: + goto tr686 + case 10: + goto tr659 + case 11: + goto tr687 + case 12: + goto tr553 + case 13: + goto tr661 + case 32: + goto tr686 + case 34: + goto tr208 + case 44: + goto tr158 + case 61: + goto tr165 + case 92: + goto st69 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st454 + } + goto st67 + st454: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof454 + } + st_case_454: + switch ( m.data)[( m.p)] { + case 9: + goto tr686 + case 10: + goto tr659 + case 11: + goto tr687 + case 12: + goto tr553 + case 13: + goto tr661 + case 32: + goto tr686 + case 34: + goto tr208 + case 44: + goto tr158 + case 61: + goto tr165 + case 92: + goto st69 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st455 + } + goto st67 + st455: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof455 + } + st_case_455: + switch ( m.data)[( m.p)] { + case 9: + goto tr686 + case 10: + goto tr659 + case 11: + goto tr687 + case 12: + goto tr553 + case 13: + goto tr661 + case 32: + goto tr686 + case 34: + goto tr208 + case 44: + goto tr158 + case 61: + goto tr165 + case 92: + goto st69 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st456 + } + goto st67 + st456: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof456 + } + st_case_456: + switch ( m.data)[( m.p)] { + case 9: + goto tr686 + case 10: + goto tr659 + case 11: + goto tr687 + case 12: + goto tr553 + case 13: + goto tr661 + case 32: + goto tr686 + case 34: + goto tr208 + case 44: + goto tr158 + case 61: + goto tr165 + case 92: + goto st69 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st457 + } + goto st67 + st457: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof457 + } + st_case_457: + switch ( m.data)[( m.p)] { + case 9: + goto tr686 + case 10: + goto tr659 + case 11: + goto tr687 + case 12: + goto tr553 + case 13: + goto tr661 + case 32: + goto tr686 + case 34: + goto tr208 + case 44: + goto tr158 + case 61: + goto tr165 + case 92: + goto st69 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st458 + } + goto st67 + st458: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof458 + } + st_case_458: + switch ( m.data)[( m.p)] { + case 9: + goto tr686 + case 10: + goto tr659 + case 11: + goto tr687 + case 12: + goto tr553 + case 13: + goto tr661 + case 32: + goto tr686 + case 34: + goto tr208 + case 44: + goto tr158 + case 61: + goto tr165 + case 92: + goto st69 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st459 + } + goto st67 + st459: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof459 + } + st_case_459: + switch ( m.data)[( m.p)] { + case 9: + goto tr686 + case 10: + goto tr659 + case 11: + goto tr687 + case 12: + goto tr553 + case 13: + goto tr661 + case 32: + goto tr686 + case 34: + goto tr208 + case 44: + goto tr158 + case 61: + goto tr165 + case 92: + goto st69 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st460 + } + goto st67 + st460: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof460 + } + st_case_460: + switch ( m.data)[( m.p)] { + case 9: + goto tr686 + case 10: + goto tr659 + case 11: + goto tr687 + case 12: + goto tr553 + case 13: + goto tr661 + case 32: + goto tr686 + case 34: + goto tr208 + case 44: + goto tr158 + case 61: + goto tr165 + case 92: + goto st69 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st461 + } + goto st67 + st461: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof461 + } + st_case_461: + switch ( m.data)[( m.p)] { + case 9: + goto tr686 + case 10: + goto tr659 + case 11: + goto tr687 + case 12: + goto tr553 + case 13: + goto tr661 + case 32: + goto tr686 + case 34: + goto tr208 + case 44: + goto tr158 + case 61: + goto tr165 + case 92: + goto st69 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st462 + } + goto st67 + st462: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof462 + } + st_case_462: + switch ( m.data)[( m.p)] { + case 9: + goto tr686 + case 10: + goto tr659 + case 11: + goto tr687 + case 12: + goto tr553 + case 13: + goto tr661 + case 32: + goto tr686 + case 34: + goto tr208 + case 44: + goto tr158 + case 61: + goto tr165 + case 92: + goto st69 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st463 + } + goto st67 + st463: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof463 + } + st_case_463: + switch ( m.data)[( m.p)] { + case 9: + goto tr686 + case 10: + goto tr659 + case 11: + goto tr687 + case 12: + goto tr553 + case 13: + goto tr661 + case 32: + goto tr686 + case 34: + goto tr208 + case 44: + goto tr158 + case 61: + goto tr165 + case 92: + goto st69 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st464 + } + goto st67 + st464: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof464 + } + st_case_464: + switch ( m.data)[( m.p)] { + case 9: + goto tr686 + case 10: + goto tr659 + case 11: + goto tr687 + case 12: + goto tr553 + case 13: + goto tr661 + case 32: + goto tr686 + case 34: + goto tr208 + case 44: + goto tr158 + case 61: + goto tr165 + case 92: + goto st69 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st465 + } + goto st67 + st465: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof465 + } + st_case_465: + switch ( m.data)[( m.p)] { + case 9: + goto tr686 + case 10: + goto tr659 + case 11: + goto tr687 + case 12: + goto tr553 + case 13: + goto tr661 + case 32: + goto tr686 + case 34: + goto tr208 + case 44: + goto tr158 + case 61: + goto tr165 + case 92: + goto st69 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st466 + } + goto st67 + st466: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof466 + } + st_case_466: + switch ( m.data)[( m.p)] { + case 9: + goto tr686 + case 10: + goto tr659 + case 11: + goto tr687 + case 12: + goto tr553 + case 13: + goto tr661 + case 32: + goto tr686 + case 34: + goto tr208 + case 44: + goto tr158 + case 61: + goto tr165 + case 92: + goto st69 + } + goto st67 +tr653: + ( m.cs) = 108 +//line plugins/parsers/influx/machine.go.rl:90 + + err = m.handler.AddTag(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto _again +tr839: + ( m.cs) = 108 +//line plugins/parsers/influx/machine.go.rl:77 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:121 + + err = m.handler.AddFloat(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr713: + ( m.cs) = 108 +//line plugins/parsers/influx/machine.go.rl:90 + + err = m.handler.AddTag(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:121 + + err = m.handler.AddFloat(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr726: + ( m.cs) = 108 +//line plugins/parsers/influx/machine.go.rl:90 + + err = m.handler.AddTag(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:103 + + err = m.handler.AddInt(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr733: + ( m.cs) = 108 +//line plugins/parsers/influx/machine.go.rl:90 + + err = m.handler.AddTag(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:112 + + err = m.handler.AddUint(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr740: + ( m.cs) = 108 +//line plugins/parsers/influx/machine.go.rl:90 + + err = m.handler.AddTag(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:130 + + err = m.handler.AddBool(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr871: + ( m.cs) = 108 +//line plugins/parsers/influx/machine.go.rl:77 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:103 + + err = m.handler.AddInt(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr875: + ( m.cs) = 108 +//line plugins/parsers/influx/machine.go.rl:77 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:112 + + err = m.handler.AddUint(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr879: + ( m.cs) = 108 +//line plugins/parsers/influx/machine.go.rl:77 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:130 + + err = m.handler.AddBool(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again + st108: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof108 + } + st_case_108: +//line plugins/parsers/influx/machine.go:16693 + switch ( m.data)[( m.p)] { + case 9: + goto st6 + case 10: + goto st7 + case 12: + goto tr47 + case 13: + goto st8 + case 32: + goto st6 + case 34: + goto tr258 + case 44: + goto st6 + case 61: + goto st6 + case 92: + goto tr279 + } + goto tr278 +tr278: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st109 + st109: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof109 + } + st_case_109: +//line plugins/parsers/influx/machine.go:16726 + switch ( m.data)[( m.p)] { + case 9: + goto st6 + case 10: + goto st7 + case 12: + goto tr47 + case 13: + goto st8 + case 32: + goto st6 + case 34: + goto tr261 + case 44: + goto st6 + case 61: + goto tr281 + case 92: + goto st123 + } + goto st109 +tr281: +//line plugins/parsers/influx/machine.go.rl:86 + + key = m.text() + +//line plugins/parsers/influx/machine.go.rl:99 + + key = m.text() + + goto st110 + st110: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof110 + } + st_case_110: +//line plugins/parsers/influx/machine.go:16763 + switch ( m.data)[( m.p)] { + case 9: + goto st6 + case 10: + goto st7 + case 12: + goto tr47 + case 13: + goto st8 + case 32: + goto st6 + case 34: + goto tr266 + case 44: + goto st6 + case 45: + goto tr283 + case 46: + goto tr284 + case 48: + goto tr285 + case 61: + goto st6 + case 70: + goto tr287 + case 84: + goto tr288 + case 92: + goto tr153 + case 102: + goto tr289 + case 116: + goto tr290 + } + if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto tr286 + } + goto tr148 +tr283: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st111 + st111: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof111 + } + st_case_111: +//line plugins/parsers/influx/machine.go:16813 + switch ( m.data)[( m.p)] { + case 9: + goto tr155 + case 10: + goto st7 + case 11: + goto tr156 + case 12: + goto tr60 + case 13: + goto st8 + case 32: + goto tr155 + case 34: + goto tr157 + case 44: + goto tr158 + case 46: + goto st112 + case 48: + goto st471 + case 61: + goto st6 + case 92: + goto st64 + } + if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st474 + } + goto st49 +tr284: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st112 + st112: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof112 + } + st_case_112: +//line plugins/parsers/influx/machine.go:16855 + switch ( m.data)[( m.p)] { + case 9: + goto tr155 + case 10: + goto st7 + case 11: + goto tr156 + case 12: + goto tr60 + case 13: + goto st8 + case 32: + goto tr155 + case 34: + goto tr157 + case 44: + goto tr158 + case 61: + goto st6 + case 92: + goto st64 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st467 + } + goto st49 + st467: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof467 + } + st_case_467: + switch ( m.data)[( m.p)] { + case 9: + goto tr710 + case 10: + goto tr515 + case 11: + goto tr711 + case 12: + goto tr712 + case 13: + goto tr517 + case 32: + goto tr710 + case 34: + goto tr157 + case 44: + goto tr713 + case 61: + goto st6 + case 69: + goto st113 + case 92: + goto st64 + case 101: + goto st113 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st467 + } + goto st49 + st113: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof113 + } + st_case_113: + switch ( m.data)[( m.p)] { + case 9: + goto tr155 + case 10: + goto st7 + case 11: + goto tr156 + case 12: + goto tr60 + case 13: + goto st8 + case 32: + goto tr155 + case 34: + goto tr295 + case 44: + goto tr158 + case 61: + goto st6 + case 92: + goto st64 + } + switch { + case ( m.data)[( m.p)] > 45: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st470 + } + case ( m.data)[( m.p)] >= 43: + goto st114 + } + goto st49 +tr295: + ( m.cs) = 468 +//line plugins/parsers/influx/machine.go.rl:139 + + err = m.handler.AddString(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again + st468: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof468 + } + st_case_468: +//line plugins/parsers/influx/machine.go:16971 + switch ( m.data)[( m.p)] { + case 10: + goto st262 + case 11: + goto tr548 + case 13: + goto st34 + case 32: + goto tr547 + case 44: + goto tr549 + case 61: + goto tr132 + case 92: + goto st23 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st469 + } + case ( m.data)[( m.p)] >= 9: + goto tr547 + } + goto st17 + st469: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof469 + } + st_case_469: + switch ( m.data)[( m.p)] { + case 10: + goto tr715 + case 11: + goto tr716 + case 13: + goto tr717 + case 32: + goto tr712 + case 44: + goto tr718 + case 61: + goto tr132 + case 92: + goto st23 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st469 + } + case ( m.data)[( m.p)] >= 9: + goto tr712 + } + goto st17 + st114: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof114 + } + st_case_114: + switch ( m.data)[( m.p)] { + case 9: + goto tr155 + case 10: + goto st7 + case 11: + goto tr156 + case 12: + goto tr60 + case 13: + goto st8 + case 32: + goto tr155 + case 34: + goto tr157 + case 44: + goto tr158 + case 61: + goto st6 + case 92: + goto st64 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st470 + } + goto st49 + st470: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof470 + } + st_case_470: + switch ( m.data)[( m.p)] { + case 9: + goto tr710 + case 10: + goto tr515 + case 11: + goto tr711 + case 12: + goto tr712 + case 13: + goto tr517 + case 32: + goto tr710 + case 34: + goto tr157 + case 44: + goto tr713 + case 61: + goto st6 + case 92: + goto st64 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st470 + } + goto st49 + st471: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof471 + } + st_case_471: + switch ( m.data)[( m.p)] { + case 9: + goto tr710 + case 10: + goto tr515 + case 11: + goto tr711 + case 12: + goto tr712 + case 13: + goto tr517 + case 32: + goto tr710 + case 34: + goto tr157 + case 44: + goto tr713 + case 46: + goto st467 + case 61: + goto st6 + case 69: + goto st113 + case 92: + goto st64 + case 101: + goto st113 + case 105: + goto st473 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st472 + } + goto st49 + st472: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof472 + } + st_case_472: + switch ( m.data)[( m.p)] { + case 9: + goto tr710 + case 10: + goto tr515 + case 11: + goto tr711 + case 12: + goto tr712 + case 13: + goto tr517 + case 32: + goto tr710 + case 34: + goto tr157 + case 44: + goto tr713 + case 46: + goto st467 + case 61: + goto st6 + case 69: + goto st113 + case 92: + goto st64 + case 101: + goto st113 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st472 + } + goto st49 + st473: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof473 + } + st_case_473: + switch ( m.data)[( m.p)] { + case 9: + goto tr721 + case 10: + goto tr722 + case 11: + goto tr723 + case 12: + goto tr724 + case 13: + goto tr725 + case 32: + goto tr721 + case 34: + goto tr157 + case 44: + goto tr726 + case 61: + goto st6 + case 92: + goto st64 + } + goto st49 st474: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof474 } st_case_474: -//line plugins/parsers/influx/machine.go:15894 switch ( m.data)[( m.p)] { case 9: - goto tr666 + goto tr710 + case 10: + goto tr515 case 11: - goto tr667 + goto tr711 case 12: - goto tr514 + goto tr712 + case 13: + goto tr517 case 32: - goto tr666 + goto tr710 case 34: - goto tr183 + goto tr157 case 44: - goto tr668 + goto tr713 + case 46: + goto st467 case 61: - goto tr25 + goto st6 + case 69: + goto st113 case 92: - goto tr185 + goto st64 + case 101: + goto st113 + case 105: + goto st473 } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr357 + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st474 } - goto tr180 -tr693: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - - goto st475 -tr666: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:18 + goto st49 +tr285: +//line plugins/parsers/influx/machine.go.rl:19 m.pb = m.p - goto st475 -tr721: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:96 - - m.handler.AddFloat(key, m.text()) - - goto st475 -tr727: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:88 - - m.handler.AddInt(key, m.text()) - - goto st475 -tr731: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:92 - - m.handler.AddUint(key, m.text()) - - goto st475 -tr735: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:100 - - m.handler.AddBool(key, m.text()) - goto st475 st475: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof475 } st_case_475: -//line plugins/parsers/influx/machine.go:15978 +//line plugins/parsers/influx/machine.go:17243 switch ( m.data)[( m.p)] { case 9: - goto st475 + goto tr710 + case 10: + goto tr515 case 11: - goto tr670 + goto tr711 case 12: - goto st318 + goto tr712 + case 13: + goto tr517 case 32: - goto st475 + goto tr710 case 34: - goto tr95 + goto tr157 case 44: - goto st7 - case 45: - goto tr671 + goto tr713 + case 46: + goto st467 case 61: - goto st7 + goto st6 + case 69: + goto st113 case 92: - goto tr195 + goto st64 + case 101: + goto st113 + case 105: + goto st473 + case 117: + goto st476 } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr672 - } - case ( m.data)[( m.p)] >= 10: - goto tr357 + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st472 } - goto tr192 -tr670: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st476 + goto st49 st476: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof476 } st_case_476: -//line plugins/parsers/influx/machine.go:16019 switch ( m.data)[( m.p)] { case 9: - goto st475 - case 11: - goto tr670 - case 12: - goto st318 - case 32: - goto st475 - case 34: - goto tr95 - case 44: - goto st7 - case 45: - goto tr671 - case 61: - goto tr197 - case 92: - goto tr195 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr672 - } - case ( m.data)[( m.p)] >= 10: - goto tr357 - } - goto tr192 -tr671: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st138 - st138: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof138 - } - st_case_138: -//line plugins/parsers/influx/machine.go:16060 - switch ( m.data)[( m.p)] { - case 9: - goto st7 + goto tr728 case 10: - goto tr101 + goto tr729 + case 11: + goto tr730 + case 12: + goto tr731 + case 13: + goto tr732 case 32: - goto st7 + goto tr728 case 34: - goto tr98 + goto tr157 case 44: - goto st7 + goto tr733 case 61: - goto tr197 + goto st6 case 92: - goto st93 + goto st64 } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st477 - } - case ( m.data)[( m.p)] >= 12: - goto tr101 - } - goto st91 -tr672: -//line plugins/parsers/influx/machine.go.rl:18 + goto st49 +tr286: +//line plugins/parsers/influx/machine.go.rl:19 m.pb = m.p @@ -16093,38 +17315,47 @@ tr672: goto _test_eof477 } st_case_477: -//line plugins/parsers/influx/machine.go:16097 +//line plugins/parsers/influx/machine.go:17319 switch ( m.data)[( m.p)] { case 9: - goto tr431 + goto tr710 + case 10: + goto tr515 case 11: - goto tr673 + goto tr711 case 12: - goto tr361 + goto tr712 + case 13: + goto tr517 case 32: - goto tr431 + goto tr710 case 34: - goto tr98 + goto tr157 case 44: - goto st7 + goto tr713 + case 46: + goto st467 case 61: - goto tr197 + goto st6 + case 69: + goto st113 case 92: - goto st93 + goto st64 + case 101: + goto st113 + case 105: + goto st473 + case 117: + goto st476 } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st479 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st477 } - goto st91 -tr673: -//line plugins/parsers/influx/machine.go.rl:108 + goto st49 +tr287: +//line plugins/parsers/influx/machine.go.rl:19 - m.handler.SetTimestamp(m.text()) + m.pb = m.p goto st478 st478: @@ -16132,29 +17363,124 @@ tr673: goto _test_eof478 } st_case_478: -//line plugins/parsers/influx/machine.go:16136 +//line plugins/parsers/influx/machine.go:17367 switch ( m.data)[( m.p)] { case 9: - goto st268 + goto tr735 + case 10: + goto tr736 case 11: - goto st478 + goto tr737 case 12: - goto st210 + goto tr738 + case 13: + goto tr739 case 32: - goto st268 + goto tr735 case 34: - goto tr98 + goto tr157 case 44: - goto st7 + goto tr740 case 61: - goto tr197 + goto st6 + case 65: + goto st115 case 92: - goto st93 + goto st64 + case 97: + goto st118 } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr357 + goto st49 + st115: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof115 } - goto st91 + st_case_115: + switch ( m.data)[( m.p)] { + case 9: + goto tr155 + case 10: + goto st7 + case 11: + goto tr156 + case 12: + goto tr60 + case 13: + goto st8 + case 32: + goto tr155 + case 34: + goto tr157 + case 44: + goto tr158 + case 61: + goto st6 + case 76: + goto st116 + case 92: + goto st64 + } + goto st49 + st116: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof116 + } + st_case_116: + switch ( m.data)[( m.p)] { + case 9: + goto tr155 + case 10: + goto st7 + case 11: + goto tr156 + case 12: + goto tr60 + case 13: + goto st8 + case 32: + goto tr155 + case 34: + goto tr157 + case 44: + goto tr158 + case 61: + goto st6 + case 83: + goto st117 + case 92: + goto st64 + } + goto st49 + st117: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof117 + } + st_case_117: + switch ( m.data)[( m.p)] { + case 9: + goto tr155 + case 10: + goto st7 + case 11: + goto tr156 + case 12: + goto tr60 + case 13: + goto st8 + case 32: + goto tr155 + case 34: + goto tr157 + case 44: + goto tr158 + case 61: + goto st6 + case 69: + goto st479 + case 92: + goto st64 + } + goto st49 st479: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof479 @@ -16162,127 +17488,427 @@ tr673: st_case_479: switch ( m.data)[( m.p)] { case 9: - goto tr431 + goto tr735 + case 10: + goto tr736 case 11: - goto tr673 + goto tr737 case 12: - goto tr361 + goto tr738 + case 13: + goto tr739 case 32: - goto tr431 + goto tr735 case 34: - goto tr98 + goto tr157 case 44: - goto st7 + goto tr740 case 61: - goto tr197 + goto st6 case 92: - goto st93 + goto st64 } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st480 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 + goto st49 + st118: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof118 } - goto st91 + st_case_118: + switch ( m.data)[( m.p)] { + case 9: + goto tr155 + case 10: + goto st7 + case 11: + goto tr156 + case 12: + goto tr60 + case 13: + goto st8 + case 32: + goto tr155 + case 34: + goto tr157 + case 44: + goto tr158 + case 61: + goto st6 + case 92: + goto st64 + case 108: + goto st119 + } + goto st49 + st119: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof119 + } + st_case_119: + switch ( m.data)[( m.p)] { + case 9: + goto tr155 + case 10: + goto st7 + case 11: + goto tr156 + case 12: + goto tr60 + case 13: + goto st8 + case 32: + goto tr155 + case 34: + goto tr157 + case 44: + goto tr158 + case 61: + goto st6 + case 92: + goto st64 + case 115: + goto st120 + } + goto st49 + st120: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof120 + } + st_case_120: + switch ( m.data)[( m.p)] { + case 9: + goto tr155 + case 10: + goto st7 + case 11: + goto tr156 + case 12: + goto tr60 + case 13: + goto st8 + case 32: + goto tr155 + case 34: + goto tr157 + case 44: + goto tr158 + case 61: + goto st6 + case 92: + goto st64 + case 101: + goto st479 + } + goto st49 +tr288: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st480 st480: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof480 } st_case_480: +//line plugins/parsers/influx/machine.go:17614 switch ( m.data)[( m.p)] { case 9: - goto tr431 + goto tr735 + case 10: + goto tr736 case 11: - goto tr673 + goto tr737 case 12: - goto tr361 + goto tr738 + case 13: + goto tr739 case 32: - goto tr431 + goto tr735 case 34: - goto tr98 + goto tr157 case 44: - goto st7 + goto tr740 case 61: - goto tr197 + goto st6 + case 82: + goto st121 case 92: - goto st93 + goto st64 + case 114: + goto st122 } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st481 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 + goto st49 + st121: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof121 } - goto st91 + st_case_121: + switch ( m.data)[( m.p)] { + case 9: + goto tr155 + case 10: + goto st7 + case 11: + goto tr156 + case 12: + goto tr60 + case 13: + goto st8 + case 32: + goto tr155 + case 34: + goto tr157 + case 44: + goto tr158 + case 61: + goto st6 + case 85: + goto st117 + case 92: + goto st64 + } + goto st49 + st122: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof122 + } + st_case_122: + switch ( m.data)[( m.p)] { + case 9: + goto tr155 + case 10: + goto st7 + case 11: + goto tr156 + case 12: + goto tr60 + case 13: + goto st8 + case 32: + goto tr155 + case 34: + goto tr157 + case 44: + goto tr158 + case 61: + goto st6 + case 92: + goto st64 + case 117: + goto st120 + } + goto st49 +tr289: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st481 st481: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof481 } st_case_481: +//line plugins/parsers/influx/machine.go:17713 switch ( m.data)[( m.p)] { case 9: - goto tr431 + goto tr735 + case 10: + goto tr736 case 11: - goto tr673 + goto tr737 case 12: - goto tr361 + goto tr738 + case 13: + goto tr739 case 32: - goto tr431 + goto tr735 case 34: - goto tr98 + goto tr157 case 44: - goto st7 + goto tr740 case 61: - goto tr197 + goto st6 case 92: - goto st93 + goto st64 + case 97: + goto st118 } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st482 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st91 + goto st49 +tr290: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st482 st482: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof482 } st_case_482: +//line plugins/parsers/influx/machine.go:17750 switch ( m.data)[( m.p)] { case 9: - goto tr431 + goto tr735 + case 10: + goto tr736 case 11: - goto tr673 + goto tr737 case 12: - goto tr361 + goto tr738 + case 13: + goto tr739 case 32: - goto tr431 + goto tr735 case 34: - goto tr98 + goto tr157 case 44: - goto st7 + goto tr740 case 61: - goto tr197 + goto st6 case 92: - goto st93 + goto st64 + case 114: + goto st122 + } + goto st49 +tr279: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st123 + st123: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof123 + } + st_case_123: +//line plugins/parsers/influx/machine.go:17787 + switch ( m.data)[( m.p)] { + case 34: + goto st109 + case 92: + goto st124 } switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st483 + case ( m.data)[( m.p)] > 10: + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr47 } - case ( m.data)[( m.p)] >= 10: - goto tr362 + case ( m.data)[( m.p)] >= 9: + goto tr47 } - goto st91 + goto st46 + st124: +//line plugins/parsers/influx/machine.go.rl:234 + ( m.p)-- + + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof124 + } + st_case_124: +//line plugins/parsers/influx/machine.go:17811 + switch ( m.data)[( m.p)] { + case 9: + goto st6 + case 10: + goto st7 + case 12: + goto tr47 + case 13: + goto st8 + case 32: + goto st6 + case 34: + goto tr261 + case 44: + goto st6 + case 61: + goto tr281 + case 92: + goto st123 + } + goto st109 +tr267: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st125 + st125: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof125 + } + st_case_125: +//line plugins/parsers/influx/machine.go:17844 + switch ( m.data)[( m.p)] { + case 9: + goto tr231 + case 10: + goto st7 + case 11: + goto tr232 + case 12: + goto tr60 + case 13: + goto st8 + case 32: + goto tr231 + case 34: + goto tr157 + case 44: + goto tr233 + case 46: + goto st126 + case 48: + goto st507 + case 61: + goto st6 + case 92: + goto st87 + } + if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st510 + } + goto st81 +tr268: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st126 + st126: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof126 + } + st_case_126: +//line plugins/parsers/influx/machine.go:17886 + switch ( m.data)[( m.p)] { + case 9: + goto tr231 + case 10: + goto st7 + case 11: + goto tr232 + case 12: + goto tr60 + case 13: + goto st8 + case 32: + goto tr231 + case 34: + goto tr157 + case 44: + goto tr233 + case 61: + goto st6 + case 92: + goto st87 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st483 + } + goto st81 st483: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof483 @@ -16290,159 +17916,356 @@ tr673: st_case_483: switch ( m.data)[( m.p)] { case 9: - goto tr431 + goto tr745 + case 10: + goto tr620 case 11: - goto tr673 + goto tr746 case 12: - goto tr361 + goto tr712 + case 13: + goto tr623 case 32: - goto tr431 + goto tr745 case 34: - goto tr98 + goto tr157 case 44: - goto st7 + goto tr747 case 61: - goto tr197 + goto st6 + case 69: + goto st128 case 92: - goto st93 + goto st87 + case 101: + goto st128 } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st484 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st483 } - goto st91 + goto st81 +tr746: + ( m.cs) = 484 +//line plugins/parsers/influx/machine.go.rl:90 + + err = m.handler.AddTag(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:121 + + err = m.handler.AddFloat(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr779: + ( m.cs) = 484 +//line plugins/parsers/influx/machine.go.rl:90 + + err = m.handler.AddTag(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:103 + + err = m.handler.AddInt(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr785: + ( m.cs) = 484 +//line plugins/parsers/influx/machine.go.rl:90 + + err = m.handler.AddTag(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:112 + + err = m.handler.AddUint(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr791: + ( m.cs) = 484 +//line plugins/parsers/influx/machine.go.rl:90 + + err = m.handler.AddTag(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:130 + + err = m.handler.AddBool(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again st484: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof484 } st_case_484: +//line plugins/parsers/influx/machine.go:18045 switch ( m.data)[( m.p)] { case 9: - goto tr431 + goto tr749 + case 10: + goto st288 case 11: - goto tr673 + goto tr750 case 12: - goto tr361 + goto tr547 + case 13: + goto st74 case 32: - goto tr431 + goto tr749 case 34: - goto tr98 + goto tr204 case 44: - goto st7 + goto tr233 + case 45: + goto tr751 case 61: - goto tr197 + goto st6 case 92: - goto st93 + goto tr237 } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st485 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto tr752 } - goto st91 + goto tr235 +tr750: + ( m.cs) = 485 +//line plugins/parsers/influx/machine.go.rl:90 + + err = m.handler.AddTag(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto _again st485: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof485 } st_case_485: +//line plugins/parsers/influx/machine.go:18096 switch ( m.data)[( m.p)] { case 9: - goto tr431 + goto tr749 + case 10: + goto st288 case 11: - goto tr673 + goto tr750 case 12: - goto tr361 + goto tr547 + case 13: + goto st74 case 32: - goto tr431 + goto tr749 case 34: - goto tr98 + goto tr204 case 44: - goto st7 + goto tr233 + case 45: + goto tr751 case 61: - goto tr197 + goto tr101 case 92: - goto st93 + goto tr237 } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st486 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto tr752 } - goto st91 + goto tr235 +tr751: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st127 + st127: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof127 + } + st_case_127: +//line plugins/parsers/influx/machine.go:18136 + switch ( m.data)[( m.p)] { + case 9: + goto tr231 + case 10: + goto st7 + case 11: + goto tr239 + case 12: + goto tr60 + case 13: + goto st8 + case 32: + goto tr231 + case 34: + goto tr208 + case 44: + goto tr233 + case 61: + goto tr101 + case 92: + goto st85 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st486 + } + goto st83 +tr752: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st486 st486: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof486 } st_case_486: +//line plugins/parsers/influx/machine.go:18174 switch ( m.data)[( m.p)] { case 9: - goto tr431 + goto tr753 + case 10: + goto tr584 case 11: - goto tr673 + goto tr754 case 12: - goto tr361 + goto tr553 + case 13: + goto tr586 case 32: - goto tr431 + goto tr753 case 34: - goto tr98 + goto tr208 case 44: - goto st7 + goto tr233 case 61: - goto tr197 + goto tr101 case 92: - goto st93 + goto st85 } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st487 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st488 } - goto st91 + goto st83 +tr757: + ( m.cs) = 487 +//line plugins/parsers/influx/machine.go.rl:90 + + err = m.handler.AddTag(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto _again +tr754: + ( m.cs) = 487 +//line plugins/parsers/influx/machine.go.rl:90 + + err = m.handler.AddTag(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:148 + + err = m.handler.SetTimestamp(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again st487: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof487 } st_case_487: +//line plugins/parsers/influx/machine.go:18246 switch ( m.data)[( m.p)] { case 9: - goto tr431 + goto tr756 + case 10: + goto st288 case 11: - goto tr673 + goto tr757 case 12: - goto tr361 + goto tr556 + case 13: + goto st74 case 32: - goto tr431 + goto tr756 case 34: - goto tr98 + goto tr204 case 44: - goto st7 + goto tr233 case 61: - goto tr197 + goto tr101 case 92: - goto st93 + goto tr237 } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st488 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st91 + goto tr235 st488: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof488 @@ -16450,31 +18273,30 @@ tr673: st_case_488: switch ( m.data)[( m.p)] { case 9: - goto tr431 + goto tr753 + case 10: + goto tr584 case 11: - goto tr673 + goto tr754 case 12: - goto tr361 + goto tr553 + case 13: + goto tr586 case 32: - goto tr431 + goto tr753 case 34: - goto tr98 + goto tr208 case 44: - goto st7 + goto tr233 case 61: - goto tr197 + goto tr101 case 92: - goto st93 + goto st85 } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st489 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st489 } - goto st91 + goto st83 st489: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof489 @@ -16482,31 +18304,30 @@ tr673: st_case_489: switch ( m.data)[( m.p)] { case 9: - goto tr431 + goto tr753 + case 10: + goto tr584 case 11: - goto tr673 + goto tr754 case 12: - goto tr361 + goto tr553 + case 13: + goto tr586 case 32: - goto tr431 + goto tr753 case 34: - goto tr98 + goto tr208 case 44: - goto st7 + goto tr233 case 61: - goto tr197 + goto tr101 case 92: - goto st93 + goto st85 } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st490 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st490 } - goto st91 + goto st83 st490: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof490 @@ -16514,31 +18335,30 @@ tr673: st_case_490: switch ( m.data)[( m.p)] { case 9: - goto tr431 + goto tr753 + case 10: + goto tr584 case 11: - goto tr673 + goto tr754 case 12: - goto tr361 + goto tr553 + case 13: + goto tr586 case 32: - goto tr431 + goto tr753 case 34: - goto tr98 + goto tr208 case 44: - goto st7 + goto tr233 case 61: - goto tr197 + goto tr101 case 92: - goto st93 + goto st85 } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st491 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st491 } - goto st91 + goto st83 st491: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof491 @@ -16546,31 +18366,30 @@ tr673: st_case_491: switch ( m.data)[( m.p)] { case 9: - goto tr431 + goto tr753 + case 10: + goto tr584 case 11: - goto tr673 + goto tr754 case 12: - goto tr361 + goto tr553 + case 13: + goto tr586 case 32: - goto tr431 + goto tr753 case 34: - goto tr98 + goto tr208 case 44: - goto st7 + goto tr233 case 61: - goto tr197 + goto tr101 case 92: - goto st93 + goto st85 } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st492 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st492 } - goto st91 + goto st83 st492: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof492 @@ -16578,31 +18397,30 @@ tr673: st_case_492: switch ( m.data)[( m.p)] { case 9: - goto tr431 + goto tr753 + case 10: + goto tr584 case 11: - goto tr673 + goto tr754 case 12: - goto tr361 + goto tr553 + case 13: + goto tr586 case 32: - goto tr431 + goto tr753 case 34: - goto tr98 + goto tr208 case 44: - goto st7 + goto tr233 case 61: - goto tr197 + goto tr101 case 92: - goto st93 + goto st85 } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st493 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st493 } - goto st91 + goto st83 st493: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof493 @@ -16610,31 +18428,30 @@ tr673: st_case_493: switch ( m.data)[( m.p)] { case 9: - goto tr431 + goto tr753 + case 10: + goto tr584 case 11: - goto tr673 + goto tr754 case 12: - goto tr361 + goto tr553 + case 13: + goto tr586 case 32: - goto tr431 + goto tr753 case 34: - goto tr98 + goto tr208 case 44: - goto st7 + goto tr233 case 61: - goto tr197 + goto tr101 case 92: - goto st93 + goto st85 } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st494 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st494 } - goto st91 + goto st83 st494: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof494 @@ -16642,31 +18459,30 @@ tr673: st_case_494: switch ( m.data)[( m.p)] { case 9: - goto tr431 + goto tr753 + case 10: + goto tr584 case 11: - goto tr673 + goto tr754 case 12: - goto tr361 + goto tr553 + case 13: + goto tr586 case 32: - goto tr431 + goto tr753 case 34: - goto tr98 + goto tr208 case 44: - goto st7 + goto tr233 case 61: - goto tr197 + goto tr101 case 92: - goto st93 + goto st85 } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st495 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st495 } - goto st91 + goto st83 st495: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof495 @@ -16674,31 +18490,30 @@ tr673: st_case_495: switch ( m.data)[( m.p)] { case 9: - goto tr431 + goto tr753 + case 10: + goto tr584 case 11: - goto tr673 + goto tr754 case 12: - goto tr361 + goto tr553 + case 13: + goto tr586 case 32: - goto tr431 + goto tr753 case 34: - goto tr98 + goto tr208 case 44: - goto st7 + goto tr233 case 61: - goto tr197 + goto tr101 case 92: - goto st93 + goto st85 } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st496 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st496 } - goto st91 + goto st83 st496: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof496 @@ -16706,360 +18521,216 @@ tr673: st_case_496: switch ( m.data)[( m.p)] { case 9: - goto tr431 + goto tr753 + case 10: + goto tr584 case 11: - goto tr673 + goto tr754 case 12: - goto tr361 + goto tr553 + case 13: + goto tr586 case 32: - goto tr431 + goto tr753 case 34: - goto tr98 + goto tr208 case 44: - goto st7 + goto tr233 case 61: - goto tr197 + goto tr101 case 92: - goto st93 + goto st85 } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr362 + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st497 } - goto st91 -tr667: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st497 -tr722: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:96 - - m.handler.AddFloat(key, m.text()) - - goto st497 -tr728: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:88 - - m.handler.AddInt(key, m.text()) - - goto st497 -tr732: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:92 - - m.handler.AddUint(key, m.text()) - - goto st497 -tr736: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:100 - - m.handler.AddBool(key, m.text()) - - goto st497 + goto st83 st497: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof497 } st_case_497: -//line plugins/parsers/influx/machine.go:16785 switch ( m.data)[( m.p)] { case 9: - goto tr693 + goto tr753 + case 10: + goto tr584 case 11: - goto tr694 + goto tr754 case 12: - goto tr514 + goto tr553 + case 13: + goto tr586 case 32: - goto tr693 + goto tr753 case 34: - goto tr201 + goto tr208 case 44: - goto tr190 - case 45: - goto tr695 + goto tr233 case 61: - goto st7 + goto tr101 case 92: - goto tr202 + goto st85 } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr696 - } - case ( m.data)[( m.p)] >= 10: - goto tr357 + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st498 } - goto tr199 -tr694: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st498 + goto st83 st498: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof498 } st_case_498: -//line plugins/parsers/influx/machine.go:16830 switch ( m.data)[( m.p)] { case 9: - goto tr693 + goto tr753 + case 10: + goto tr584 case 11: - goto tr694 + goto tr754 case 12: - goto tr514 + goto tr553 + case 13: + goto tr586 case 32: - goto tr693 + goto tr753 case 34: - goto tr201 + goto tr208 case 44: - goto tr190 - case 45: - goto tr695 + goto tr233 case 61: - goto tr197 + goto tr101 case 92: - goto tr202 + goto st85 } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr696 - } - case ( m.data)[( m.p)] >= 10: - goto tr357 + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st499 } - goto tr199 -tr695: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st139 - st139: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof139 - } - st_case_139: -//line plugins/parsers/influx/machine.go:16871 - switch ( m.data)[( m.p)] { - case 9: - goto tr187 - case 11: - goto tr204 - case 12: - goto tr60 - case 32: - goto tr187 - case 34: - goto tr205 - case 44: - goto tr190 - case 61: - goto tr197 - case 92: - goto st105 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st499 - } - case ( m.data)[( m.p)] >= 10: - goto tr207 - } - goto st96 -tr696: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st499 + goto st83 st499: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof499 } st_case_499: -//line plugins/parsers/influx/machine.go:16910 switch ( m.data)[( m.p)] { case 9: - goto tr697 + goto tr753 + case 10: + goto tr584 case 11: - goto tr698 + goto tr754 case 12: - goto tr520 + goto tr553 + case 13: + goto tr586 case 32: - goto tr697 + goto tr753 case 34: - goto tr205 + goto tr208 case 44: - goto tr190 + goto tr233 case 61: - goto tr197 + goto tr101 case 92: - goto st105 + goto st85 } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st503 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st500 } - goto st96 -tr702: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - - goto st500 -tr697: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:108 - - m.handler.SetTimestamp(m.text()) - - goto st500 + goto st83 st500: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof500 } st_case_500: -//line plugins/parsers/influx/machine.go:16959 switch ( m.data)[( m.p)] { case 9: - goto st500 + goto tr753 + case 10: + goto tr584 case 11: - goto tr701 + goto tr754 case 12: - goto st322 + goto tr553 + case 13: + goto tr586 case 32: - goto st500 + goto tr753 case 34: - goto tr95 + goto tr208 case 44: - goto st7 + goto tr233 case 61: - goto st7 + goto tr101 case 92: - goto tr195 + goto st85 } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr357 + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st501 } - goto tr192 -tr701: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st501 + goto st83 st501: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof501 } st_case_501: -//line plugins/parsers/influx/machine.go:16993 switch ( m.data)[( m.p)] { case 9: - goto st500 + goto tr753 + case 10: + goto tr584 case 11: - goto tr701 + goto tr754 case 12: - goto st322 + goto tr553 + case 13: + goto tr586 case 32: - goto st500 + goto tr753 case 34: - goto tr95 + goto tr208 case 44: - goto st7 + goto tr233 case 61: - goto tr197 + goto tr101 case 92: - goto tr195 + goto st85 } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr357 + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st502 } - goto tr192 -tr703: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st502 -tr698: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:108 - - m.handler.SetTimestamp(m.text()) - - goto st502 + goto st83 st502: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof502 } st_case_502: -//line plugins/parsers/influx/machine.go:17041 switch ( m.data)[( m.p)] { case 9: - goto tr702 + goto tr753 + case 10: + goto tr584 case 11: - goto tr703 + goto tr754 case 12: - goto tr523 + goto tr553 + case 13: + goto tr586 case 32: - goto tr702 + goto tr753 case 34: - goto tr201 + goto tr208 case 44: - goto tr190 + goto tr233 case 61: - goto tr197 + goto tr101 case 92: - goto tr202 + goto st85 } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr357 + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st503 } - goto tr199 + goto st83 st503: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof503 @@ -17067,31 +18738,30 @@ tr698: st_case_503: switch ( m.data)[( m.p)] { case 9: - goto tr697 + goto tr753 + case 10: + goto tr584 case 11: - goto tr698 + goto tr754 case 12: - goto tr520 + goto tr553 + case 13: + goto tr586 case 32: - goto tr697 + goto tr753 case 34: - goto tr205 + goto tr208 case 44: - goto tr190 + goto tr233 case 61: - goto tr197 + goto tr101 case 92: - goto st105 + goto st85 } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st504 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st504 } - goto st96 + goto st83 st504: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof504 @@ -17099,31 +18769,30 @@ tr698: st_case_504: switch ( m.data)[( m.p)] { case 9: - goto tr697 + goto tr753 + case 10: + goto tr584 case 11: - goto tr698 + goto tr754 case 12: - goto tr520 + goto tr553 + case 13: + goto tr586 case 32: - goto tr697 + goto tr753 case 34: - goto tr205 + goto tr208 case 44: - goto tr190 + goto tr233 case 61: - goto tr197 + goto tr101 case 92: - goto st105 + goto st85 } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st505 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st505 } - goto st96 + goto st83 st505: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof505 @@ -17131,31 +18800,94 @@ tr698: st_case_505: switch ( m.data)[( m.p)] { case 9: - goto tr697 + goto tr753 + case 10: + goto tr584 case 11: - goto tr698 + goto tr754 case 12: - goto tr520 + goto tr553 + case 13: + goto tr586 case 32: - goto tr697 + goto tr753 case 34: - goto tr205 + goto tr208 case 44: - goto tr190 + goto tr233 case 61: - goto tr197 + goto tr101 case 92: - goto st105 + goto st85 + } + goto st83 + st128: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof128 + } + st_case_128: + switch ( m.data)[( m.p)] { + case 9: + goto tr231 + case 10: + goto st7 + case 11: + goto tr232 + case 12: + goto tr60 + case 13: + goto st8 + case 32: + goto tr231 + case 34: + goto tr295 + case 44: + goto tr233 + case 61: + goto st6 + case 92: + goto st87 } switch { - case ( m.data)[( m.p)] > 13: + case ( m.data)[( m.p)] > 45: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st506 } - case ( m.data)[( m.p)] >= 10: - goto tr362 + case ( m.data)[( m.p)] >= 43: + goto st129 } - goto st96 + goto st81 + st129: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof129 + } + st_case_129: + switch ( m.data)[( m.p)] { + case 9: + goto tr231 + case 10: + goto st7 + case 11: + goto tr232 + case 12: + goto tr60 + case 13: + goto st8 + case 32: + goto tr231 + case 34: + goto tr157 + case 44: + goto tr233 + case 61: + goto st6 + case 92: + goto st87 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st506 + } + goto st81 st506: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof506 @@ -17163,31 +18895,30 @@ tr698: st_case_506: switch ( m.data)[( m.p)] { case 9: - goto tr697 + goto tr745 + case 10: + goto tr620 case 11: - goto tr698 + goto tr746 case 12: - goto tr520 + goto tr712 + case 13: + goto tr623 case 32: - goto tr697 + goto tr745 case 34: - goto tr205 + goto tr157 case 44: - goto tr190 + goto tr747 case 61: - goto tr197 + goto st6 case 92: - goto st105 + goto st87 } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st507 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st506 } - goto st96 + goto st81 st507: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof507 @@ -17195,31 +18926,38 @@ tr698: st_case_507: switch ( m.data)[( m.p)] { case 9: - goto tr697 + goto tr745 + case 10: + goto tr620 case 11: - goto tr698 + goto tr746 case 12: - goto tr520 + goto tr712 + case 13: + goto tr623 case 32: - goto tr697 + goto tr745 case 34: - goto tr205 + goto tr157 case 44: - goto tr190 + goto tr747 + case 46: + goto st483 case 61: - goto tr197 + goto st6 + case 69: + goto st128 case 92: - goto st105 + goto st87 + case 101: + goto st128 + case 105: + goto st509 } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st508 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st508 } - goto st96 + goto st81 st508: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof508 @@ -17227,31 +18965,36 @@ tr698: st_case_508: switch ( m.data)[( m.p)] { case 9: - goto tr697 + goto tr745 + case 10: + goto tr620 case 11: - goto tr698 + goto tr746 case 12: - goto tr520 + goto tr712 + case 13: + goto tr623 case 32: - goto tr697 + goto tr745 case 34: - goto tr205 + goto tr157 case 44: - goto tr190 + goto tr747 + case 46: + goto st483 case 61: - goto tr197 + goto st6 + case 69: + goto st128 case 92: - goto st105 + goto st87 + case 101: + goto st128 } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st509 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st508 } - goto st96 + goto st81 st509: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof509 @@ -17259,31 +19002,27 @@ tr698: st_case_509: switch ( m.data)[( m.p)] { case 9: - goto tr697 + goto tr777 + case 10: + goto tr778 case 11: - goto tr698 + goto tr779 case 12: - goto tr520 + goto tr724 + case 13: + goto tr780 case 32: - goto tr697 + goto tr777 case 34: - goto tr205 + goto tr157 case 44: - goto tr190 + goto tr781 case 61: - goto tr197 + goto st6 case 92: - goto st105 + goto st87 } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st510 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st96 + goto st81 st510: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof510 @@ -17291,63 +19030,86 @@ tr698: st_case_510: switch ( m.data)[( m.p)] { case 9: - goto tr697 + goto tr745 + case 10: + goto tr620 case 11: - goto tr698 + goto tr746 case 12: - goto tr520 + goto tr712 + case 13: + goto tr623 case 32: - goto tr697 + goto tr745 case 34: - goto tr205 + goto tr157 case 44: - goto tr190 + goto tr747 + case 46: + goto st483 case 61: - goto tr197 + goto st6 + case 69: + goto st128 case 92: - goto st105 + goto st87 + case 101: + goto st128 + case 105: + goto st509 } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st511 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st510 } - goto st96 + goto st81 +tr269: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st511 st511: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof511 } st_case_511: +//line plugins/parsers/influx/machine.go:19077 switch ( m.data)[( m.p)] { case 9: - goto tr697 + goto tr745 + case 10: + goto tr620 case 11: - goto tr698 + goto tr746 case 12: - goto tr520 + goto tr712 + case 13: + goto tr623 case 32: - goto tr697 + goto tr745 case 34: - goto tr205 + goto tr157 case 44: - goto tr190 + goto tr747 + case 46: + goto st483 case 61: - goto tr197 + goto st6 + case 69: + goto st128 case 92: - goto st105 + goto st87 + case 101: + goto st128 + case 105: + goto st509 + case 117: + goto st512 } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st512 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st508 } - goto st96 + goto st81 st512: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof512 @@ -17355,95 +19117,204 @@ tr698: st_case_512: switch ( m.data)[( m.p)] { case 9: - goto tr697 + goto tr783 + case 10: + goto tr784 case 11: - goto tr698 + goto tr785 case 12: - goto tr520 + goto tr731 + case 13: + goto tr786 case 32: - goto tr697 + goto tr783 case 34: - goto tr205 + goto tr157 case 44: - goto tr190 + goto tr787 case 61: - goto tr197 + goto st6 case 92: - goto st105 + goto st87 } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st513 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st96 + goto st81 +tr270: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st513 st513: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof513 } st_case_513: +//line plugins/parsers/influx/machine.go:19153 switch ( m.data)[( m.p)] { case 9: - goto tr697 + goto tr745 + case 10: + goto tr620 case 11: - goto tr698 + goto tr746 case 12: - goto tr520 + goto tr712 + case 13: + goto tr623 case 32: - goto tr697 + goto tr745 case 34: - goto tr205 + goto tr157 case 44: - goto tr190 + goto tr747 + case 46: + goto st483 case 61: - goto tr197 + goto st6 + case 69: + goto st128 case 92: - goto st105 + goto st87 + case 101: + goto st128 + case 105: + goto st509 + case 117: + goto st512 } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st514 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st513 } - goto st96 + goto st81 +tr271: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st514 st514: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof514 } st_case_514: +//line plugins/parsers/influx/machine.go:19201 switch ( m.data)[( m.p)] { case 9: - goto tr697 + goto tr789 + case 10: + goto tr790 case 11: - goto tr698 + goto tr791 case 12: - goto tr520 + goto tr738 + case 13: + goto tr792 case 32: - goto tr697 + goto tr789 case 34: - goto tr205 + goto tr157 case 44: - goto tr190 + goto tr793 case 61: - goto tr197 + goto st6 + case 65: + goto st130 case 92: - goto st105 + goto st87 + case 97: + goto st133 } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st515 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 + goto st81 + st130: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof130 } - goto st96 + st_case_130: + switch ( m.data)[( m.p)] { + case 9: + goto tr231 + case 10: + goto st7 + case 11: + goto tr232 + case 12: + goto tr60 + case 13: + goto st8 + case 32: + goto tr231 + case 34: + goto tr157 + case 44: + goto tr233 + case 61: + goto st6 + case 76: + goto st131 + case 92: + goto st87 + } + goto st81 + st131: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof131 + } + st_case_131: + switch ( m.data)[( m.p)] { + case 9: + goto tr231 + case 10: + goto st7 + case 11: + goto tr232 + case 12: + goto tr60 + case 13: + goto st8 + case 32: + goto tr231 + case 34: + goto tr157 + case 44: + goto tr233 + case 61: + goto st6 + case 83: + goto st132 + case 92: + goto st87 + } + goto st81 + st132: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof132 + } + st_case_132: + switch ( m.data)[( m.p)] { + case 9: + goto tr231 + case 10: + goto st7 + case 11: + goto tr232 + case 12: + goto tr60 + case 13: + goto st8 + case 32: + goto tr231 + case 34: + goto tr157 + case 44: + goto tr233 + case 61: + goto st6 + case 69: + goto st515 + case 92: + goto st87 + } + goto st81 st515: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof515 @@ -17451,431 +19322,480 @@ tr698: st_case_515: switch ( m.data)[( m.p)] { case 9: - goto tr697 + goto tr789 + case 10: + goto tr790 case 11: - goto tr698 + goto tr791 case 12: - goto tr520 + goto tr738 + case 13: + goto tr792 case 32: - goto tr697 + goto tr789 case 34: - goto tr205 + goto tr157 case 44: - goto tr190 + goto tr793 case 61: - goto tr197 + goto st6 case 92: - goto st105 + goto st87 } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st516 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 + goto st81 + st133: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof133 } - goto st96 + st_case_133: + switch ( m.data)[( m.p)] { + case 9: + goto tr231 + case 10: + goto st7 + case 11: + goto tr232 + case 12: + goto tr60 + case 13: + goto st8 + case 32: + goto tr231 + case 34: + goto tr157 + case 44: + goto tr233 + case 61: + goto st6 + case 92: + goto st87 + case 108: + goto st134 + } + goto st81 + st134: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof134 + } + st_case_134: + switch ( m.data)[( m.p)] { + case 9: + goto tr231 + case 10: + goto st7 + case 11: + goto tr232 + case 12: + goto tr60 + case 13: + goto st8 + case 32: + goto tr231 + case 34: + goto tr157 + case 44: + goto tr233 + case 61: + goto st6 + case 92: + goto st87 + case 115: + goto st135 + } + goto st81 + st135: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof135 + } + st_case_135: + switch ( m.data)[( m.p)] { + case 9: + goto tr231 + case 10: + goto st7 + case 11: + goto tr232 + case 12: + goto tr60 + case 13: + goto st8 + case 32: + goto tr231 + case 34: + goto tr157 + case 44: + goto tr233 + case 61: + goto st6 + case 92: + goto st87 + case 101: + goto st515 + } + goto st81 +tr272: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st516 st516: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof516 } st_case_516: +//line plugins/parsers/influx/machine.go:19448 switch ( m.data)[( m.p)] { case 9: - goto tr697 + goto tr789 + case 10: + goto tr790 case 11: - goto tr698 + goto tr791 case 12: - goto tr520 + goto tr738 + case 13: + goto tr792 case 32: - goto tr697 + goto tr789 case 34: - goto tr205 + goto tr157 case 44: - goto tr190 + goto tr793 case 61: - goto tr197 + goto st6 + case 82: + goto st136 case 92: - goto st105 + goto st87 + case 114: + goto st137 } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st517 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 + goto st81 + st136: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof136 } - goto st96 + st_case_136: + switch ( m.data)[( m.p)] { + case 9: + goto tr231 + case 10: + goto st7 + case 11: + goto tr232 + case 12: + goto tr60 + case 13: + goto st8 + case 32: + goto tr231 + case 34: + goto tr157 + case 44: + goto tr233 + case 61: + goto st6 + case 85: + goto st132 + case 92: + goto st87 + } + goto st81 + st137: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof137 + } + st_case_137: + switch ( m.data)[( m.p)] { + case 9: + goto tr231 + case 10: + goto st7 + case 11: + goto tr232 + case 12: + goto tr60 + case 13: + goto st8 + case 32: + goto tr231 + case 34: + goto tr157 + case 44: + goto tr233 + case 61: + goto st6 + case 92: + goto st87 + case 117: + goto st135 + } + goto st81 +tr273: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st517 st517: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof517 } st_case_517: +//line plugins/parsers/influx/machine.go:19547 switch ( m.data)[( m.p)] { case 9: - goto tr697 + goto tr789 + case 10: + goto tr790 case 11: - goto tr698 + goto tr791 case 12: - goto tr520 + goto tr738 + case 13: + goto tr792 case 32: - goto tr697 + goto tr789 case 34: - goto tr205 + goto tr157 case 44: - goto tr190 + goto tr793 case 61: - goto tr197 + goto st6 case 92: - goto st105 + goto st87 + case 97: + goto st133 } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st518 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st96 + goto st81 +tr274: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st518 st518: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof518 } st_case_518: +//line plugins/parsers/influx/machine.go:19584 switch ( m.data)[( m.p)] { case 9: - goto tr697 + goto tr789 + case 10: + goto tr790 case 11: - goto tr698 + goto tr791 case 12: - goto tr520 + goto tr738 + case 13: + goto tr792 case 32: - goto tr697 + goto tr789 case 34: - goto tr205 + goto tr157 case 44: - goto tr190 + goto tr793 case 61: - goto tr197 + goto st6 case 92: - goto st105 + goto st87 + case 114: + goto st137 + } + goto st81 +tr259: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st138 + st138: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof138 + } + st_case_138: +//line plugins/parsers/influx/machine.go:19621 + switch ( m.data)[( m.p)] { + case 34: + goto st99 + case 92: + goto st139 } switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st519 + case ( m.data)[( m.p)] > 10: + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr47 } - case ( m.data)[( m.p)] >= 10: - goto tr362 + case ( m.data)[( m.p)] >= 9: + goto tr47 } - goto st96 + goto st46 + st139: +//line plugins/parsers/influx/machine.go.rl:234 + ( m.p)-- + + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof139 + } + st_case_139: +//line plugins/parsers/influx/machine.go:19645 + switch ( m.data)[( m.p)] { + case 9: + goto st6 + case 10: + goto st7 + case 12: + goto tr47 + case 13: + goto st8 + case 32: + goto st6 + case 34: + goto tr261 + case 44: + goto st6 + case 61: + goto tr262 + case 92: + goto st138 + } + goto st99 + st140: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof140 + } + st_case_140: + switch ( m.data)[( m.p)] { + case 9: + goto tr89 + case 10: + goto st7 + case 11: + goto tr90 + case 12: + goto tr1 + case 13: + goto st8 + case 32: + goto tr89 + case 34: + goto tr317 + case 44: + goto tr92 + case 92: + goto st142 + } + switch { + case ( m.data)[( m.p)] > 45: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st521 + } + case ( m.data)[( m.p)] >= 43: + goto st141 + } + goto st31 +tr317: + ( m.cs) = 519 +//line plugins/parsers/influx/machine.go.rl:139 + + err = m.handler.AddString(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again st519: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof519 } st_case_519: +//line plugins/parsers/influx/machine.go:19719 switch ( m.data)[( m.p)] { - case 9: - goto tr697 + case 10: + goto st262 case 11: - goto tr698 - case 12: - goto tr520 + goto tr618 + case 13: + goto st34 case 32: - goto tr697 - case 34: - goto tr205 + goto tr482 case 44: - goto tr190 - case 61: - goto tr197 + goto tr484 case 92: - goto st105 + goto st96 } switch { - case ( m.data)[( m.p)] > 13: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st520 } - case ( m.data)[( m.p)] >= 10: - goto tr362 + case ( m.data)[( m.p)] >= 9: + goto tr482 } - goto st96 + goto st1 st520: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof520 } st_case_520: switch ( m.data)[( m.p)] { - case 9: - goto tr697 - case 11: - goto tr698 - case 12: - goto tr520 - case 32: - goto tr697 - case 34: - goto tr205 - case 44: - goto tr190 - case 61: - goto tr197 - case 92: - goto st105 - } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr362 - } - goto st96 -tr668: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st140 -tr723: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:96 - - m.handler.AddFloat(key, m.text()) - - goto st140 -tr729: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:88 - - m.handler.AddInt(key, m.text()) - - goto st140 -tr733: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:92 - - m.handler.AddUint(key, m.text()) - - goto st140 -tr737: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:100 - - m.handler.AddBool(key, m.text()) - - goto st140 - st140: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof140 - } - st_case_140: -//line plugins/parsers/influx/machine.go:17690 - switch ( m.data)[( m.p)] { - case 9: - goto st7 case 10: - goto tr61 + goto tr715 + case 11: + goto tr798 + case 13: + goto tr717 case 32: - goto st7 - case 34: - goto tr259 + goto tr622 case 44: - goto st7 - case 61: - goto st7 + goto tr799 case 92: - goto tr278 + goto st96 } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr61 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st520 + } + case ( m.data)[( m.p)] >= 9: + goto tr622 } - goto tr277 -tr277: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st141 + goto st1 st141: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof141 } st_case_141: -//line plugins/parsers/influx/machine.go:17722 switch ( m.data)[( m.p)] { case 9: - goto st7 + goto tr89 case 10: - goto tr61 - case 32: goto st7 - case 34: - goto tr262 - case 44: - goto st7 - case 61: - goto tr280 - case 92: - goto st155 - } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr61 - } - goto st141 -tr280: -//line plugins/parsers/influx/machine.go.rl:76 - - key = m.text() - -//line plugins/parsers/influx/machine.go.rl:84 - - key = m.text() - - goto st142 - st142: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof142 - } - st_case_142: -//line plugins/parsers/influx/machine.go:17758 - switch ( m.data)[( m.p)] { - case 9: - goto st7 - case 10: - goto tr61 - case 32: - goto st7 - case 34: - goto tr266 - case 44: - goto st7 - case 45: - goto tr282 - case 46: - goto tr283 - case 48: - goto tr284 - case 61: - goto st7 - case 70: - goto tr286 - case 84: - goto tr287 - case 92: - goto tr185 - case 102: - goto tr288 - case 116: - goto tr289 - } - switch { - case ( m.data)[( m.p)] > 13: - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr285 - } - case ( m.data)[( m.p)] >= 12: - goto tr61 - } - goto tr180 -tr282: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st143 - st143: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof143 - } - st_case_143: -//line plugins/parsers/influx/machine.go:17809 - switch ( m.data)[( m.p)] { - case 9: - goto tr187 case 11: - goto tr188 + goto tr90 case 12: - goto tr60 + goto tr1 + case 13: + goto st8 case 32: - goto tr187 + goto tr89 case 34: - goto tr189 + goto tr91 case 44: - goto tr190 - case 46: - goto st144 - case 48: - goto st524 - case 61: - goto st7 + goto tr92 case 92: - goto st103 + goto st142 } - switch { - case ( m.data)[( m.p)] > 13: - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st527 - } - case ( m.data)[( m.p)] >= 10: - goto tr61 + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st521 } - goto st89 -tr283: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st144 - st144: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof144 - } - st_case_144: -//line plugins/parsers/influx/machine.go:17852 - switch ( m.data)[( m.p)] { - case 9: - goto tr187 - case 11: - goto tr188 - case 12: - goto tr60 - case 32: - goto tr187 - case 34: - goto tr189 - case 44: - goto tr190 - case 61: - goto st7 - case 92: - goto st103 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st521 - } - case ( m.data)[( m.p)] >= 10: - goto tr61 - } - goto st89 + goto st31 st521: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof521 @@ -17883,140 +19803,92 @@ tr283: st_case_521: switch ( m.data)[( m.p)] { case 9: - goto tr721 + goto tr619 + case 10: + goto tr620 case 11: - goto tr722 + goto tr621 case 12: - goto tr566 + goto tr622 + case 13: + goto tr623 case 32: - goto tr721 + goto tr619 case 34: - goto tr189 + goto tr91 case 44: - goto tr723 - case 61: - goto st7 - case 69: - goto st145 + goto tr624 case 92: - goto st103 - case 101: - goto st145 + goto st142 } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st521 - } - case ( m.data)[( m.p)] >= 10: - goto tr383 + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st521 } - goto st89 - st145: + goto st31 +tr87: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st142 + st142: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof145 + goto _test_eof142 } - st_case_145: + st_case_142: +//line plugins/parsers/influx/machine.go:19840 switch ( m.data)[( m.p)] { - case 9: - goto tr187 - case 11: - goto tr188 - case 12: - goto tr60 - case 32: - goto tr187 case 34: - goto tr294 - case 44: - goto tr190 - case 61: - goto st7 + goto st31 case 92: - goto st103 + goto st31 } switch { - case ( m.data)[( m.p)] < 43: - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr61 + case ( m.data)[( m.p)] > 10: + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr8 } - case ( m.data)[( m.p)] > 45: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st523 - } - default: - goto st146 + case ( m.data)[( m.p)] >= 9: + goto tr8 } - goto st89 -tr294: -//line plugins/parsers/influx/machine.go.rl:104 - - m.handler.AddString(key, m.text()) - - goto st522 + goto st1 st522: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof522 } st_case_522: -//line plugins/parsers/influx/machine.go:17963 - switch ( m.data)[( m.p)] { - case 10: - goto tr357 - case 11: - goto tr565 - case 13: - goto tr357 - case 32: - goto tr514 - case 44: - goto tr516 - case 61: - goto tr207 - case 92: - goto st36 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st390 - } - case ( m.data)[( m.p)] >= 9: - goto tr514 - } - goto st31 - st146: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof146 - } - st_case_146: switch ( m.data)[( m.p)] { case 9: - goto tr187 + goto tr619 + case 10: + goto tr620 case 11: - goto tr188 + goto tr621 case 12: - goto tr60 + goto tr622 + case 13: + goto tr623 case 32: - goto tr187 + goto tr619 case 34: - goto tr189 + goto tr91 case 44: - goto tr190 - case 61: - goto st7 + goto tr624 + case 46: + goto st396 + case 69: + goto st140 case 92: - goto st103 + goto st142 + case 101: + goto st140 + case 105: + goto st524 } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st523 - } - case ( m.data)[( m.p)] >= 10: - goto tr61 + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st523 } - goto st89 + goto st31 st523: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof523 @@ -18024,31 +19896,34 @@ tr294: st_case_523: switch ( m.data)[( m.p)] { case 9: - goto tr721 + goto tr619 + case 10: + goto tr620 case 11: - goto tr722 + goto tr621 case 12: - goto tr566 + goto tr622 + case 13: + goto tr623 case 32: - goto tr721 + goto tr619 case 34: - goto tr189 + goto tr91 case 44: - goto tr723 - case 61: - goto st7 + goto tr624 + case 46: + goto st396 + case 69: + goto st140 case 92: - goto st103 + goto st142 + case 101: + goto st140 } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st523 - } - case ( m.data)[( m.p)] >= 10: - goto tr383 + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st523 } - goto st89 + goto st31 st524: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof524 @@ -18056,39 +19931,25 @@ tr294: st_case_524: switch ( m.data)[( m.p)] { case 9: - goto tr721 + goto tr802 + case 10: + goto tr778 case 11: - goto tr722 + goto tr803 case 12: - goto tr566 + goto tr804 + case 13: + goto tr780 case 32: - goto tr721 + goto tr802 case 34: - goto tr189 + goto tr91 case 44: - goto tr723 - case 46: - goto st521 - case 61: - goto st7 - case 69: - goto st145 + goto tr805 case 92: - goto st103 - case 101: - goto st145 - case 105: - goto st526 + goto st142 } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st525 - } - case ( m.data)[( m.p)] >= 10: - goto tr383 - } - goto st89 + goto st31 st525: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof525 @@ -18096,64 +19957,82 @@ tr294: st_case_525: switch ( m.data)[( m.p)] { case 9: - goto tr721 + goto tr619 + case 10: + goto tr620 case 11: - goto tr722 + goto tr621 case 12: - goto tr566 + goto tr622 + case 13: + goto tr623 case 32: - goto tr721 + goto tr619 case 34: - goto tr189 + goto tr91 case 44: - goto tr723 + goto tr624 case 46: - goto st521 - case 61: - goto st7 + goto st396 case 69: - goto st145 + goto st140 case 92: - goto st103 + goto st142 case 101: - goto st145 + goto st140 + case 105: + goto st524 } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st525 - } - case ( m.data)[( m.p)] >= 10: - goto tr383 + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st525 } - goto st89 + goto st31 +tr247: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st526 st526: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof526 } st_case_526: +//line plugins/parsers/influx/machine.go:20002 switch ( m.data)[( m.p)] { case 9: - goto tr727 + goto tr619 + case 10: + goto tr620 case 11: - goto tr728 + goto tr621 case 12: - goto tr572 + goto tr622 + case 13: + goto tr623 case 32: - goto tr727 + goto tr619 case 34: - goto tr189 + goto tr91 case 44: - goto tr729 - case 61: - goto st7 + goto tr624 + case 46: + goto st396 + case 69: + goto st140 case 92: - goto st103 + goto st142 + case 101: + goto st140 + case 105: + goto st524 + case 117: + goto st527 } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr389 + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st523 } - goto st89 + goto st31 st527: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof527 @@ -18161,41 +20040,27 @@ tr294: st_case_527: switch ( m.data)[( m.p)] { case 9: - goto tr721 + goto tr807 + case 10: + goto tr784 case 11: - goto tr722 + goto tr808 case 12: - goto tr566 + goto tr809 + case 13: + goto tr786 case 32: - goto tr721 + goto tr807 case 34: - goto tr189 + goto tr91 case 44: - goto tr723 - case 46: - goto st521 - case 61: - goto st7 - case 69: - goto st145 + goto tr810 case 92: - goto st103 - case 101: - goto st145 - case 105: - goto st526 + goto st142 } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st527 - } - case ( m.data)[( m.p)] >= 10: - goto tr383 - } - goto st89 -tr284: -//line plugins/parsers/influx/machine.go.rl:18 + goto st31 +tr248: +//line plugins/parsers/influx/machine.go.rl:19 m.pb = m.p @@ -18205,122 +20070,274 @@ tr284: goto _test_eof528 } st_case_528: -//line plugins/parsers/influx/machine.go:18209 +//line plugins/parsers/influx/machine.go:20074 switch ( m.data)[( m.p)] { case 9: - goto tr721 + goto tr619 + case 10: + goto tr620 case 11: - goto tr722 + goto tr621 case 12: - goto tr566 + goto tr622 + case 13: + goto tr623 case 32: - goto tr721 + goto tr619 case 34: - goto tr189 + goto tr91 case 44: - goto tr723 + goto tr624 case 46: - goto st521 - case 61: - goto st7 + goto st396 case 69: - goto st145 + goto st140 case 92: - goto st103 + goto st142 case 101: - goto st145 + goto st140 case 105: - goto st526 + goto st524 case 117: - goto st529 + goto st527 } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st525 - } - case ( m.data)[( m.p)] >= 10: - goto tr383 + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st528 } - goto st89 + goto st31 +tr249: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st529 st529: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof529 } st_case_529: +//line plugins/parsers/influx/machine.go:20120 switch ( m.data)[( m.p)] { case 9: - goto tr731 + goto tr812 + case 10: + goto tr790 case 11: - goto tr732 + goto tr813 case 12: - goto tr576 + goto tr814 + case 13: + goto tr792 case 32: - goto tr731 + goto tr812 case 34: - goto tr189 + goto tr91 case 44: - goto tr733 - case 61: - goto st7 + goto tr815 + case 65: + goto st143 case 92: - goto st103 + goto st142 + case 97: + goto st146 } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr393 + goto st31 + st143: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof143 } - goto st89 -tr285: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st530 + st_case_143: + switch ( m.data)[( m.p)] { + case 9: + goto tr89 + case 10: + goto st7 + case 11: + goto tr90 + case 12: + goto tr1 + case 13: + goto st8 + case 32: + goto tr89 + case 34: + goto tr91 + case 44: + goto tr92 + case 76: + goto st144 + case 92: + goto st142 + } + goto st31 + st144: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof144 + } + st_case_144: + switch ( m.data)[( m.p)] { + case 9: + goto tr89 + case 10: + goto st7 + case 11: + goto tr90 + case 12: + goto tr1 + case 13: + goto st8 + case 32: + goto tr89 + case 34: + goto tr91 + case 44: + goto tr92 + case 83: + goto st145 + case 92: + goto st142 + } + goto st31 + st145: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof145 + } + st_case_145: + switch ( m.data)[( m.p)] { + case 9: + goto tr89 + case 10: + goto st7 + case 11: + goto tr90 + case 12: + goto tr1 + case 13: + goto st8 + case 32: + goto tr89 + case 34: + goto tr91 + case 44: + goto tr92 + case 69: + goto st530 + case 92: + goto st142 + } + goto st31 st530: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof530 } st_case_530: -//line plugins/parsers/influx/machine.go:18285 switch ( m.data)[( m.p)] { case 9: - goto tr721 + goto tr812 + case 10: + goto tr790 case 11: - goto tr722 + goto tr813 case 12: - goto tr566 + goto tr814 + case 13: + goto tr792 case 32: - goto tr721 + goto tr812 case 34: - goto tr189 + goto tr91 case 44: - goto tr723 - case 46: - goto st521 - case 61: - goto st7 - case 69: - goto st145 + goto tr815 case 92: - goto st103 + goto st142 + } + goto st31 + st146: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof146 + } + st_case_146: + switch ( m.data)[( m.p)] { + case 9: + goto tr89 + case 10: + goto st7 + case 11: + goto tr90 + case 12: + goto tr1 + case 13: + goto st8 + case 32: + goto tr89 + case 34: + goto tr91 + case 44: + goto tr92 + case 92: + goto st142 + case 108: + goto st147 + } + goto st31 + st147: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof147 + } + st_case_147: + switch ( m.data)[( m.p)] { + case 9: + goto tr89 + case 10: + goto st7 + case 11: + goto tr90 + case 12: + goto tr1 + case 13: + goto st8 + case 32: + goto tr89 + case 34: + goto tr91 + case 44: + goto tr92 + case 92: + goto st142 + case 115: + goto st148 + } + goto st31 + st148: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof148 + } + st_case_148: + switch ( m.data)[( m.p)] { + case 9: + goto tr89 + case 10: + goto st7 + case 11: + goto tr90 + case 12: + goto tr1 + case 13: + goto st8 + case 32: + goto tr89 + case 34: + goto tr91 + case 44: + goto tr92 + case 92: + goto st142 case 101: - goto st145 - case 105: - goto st526 - case 117: - goto st529 + goto st530 } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st530 - } - case ( m.data)[( m.p)] >= 10: - goto tr383 - } - goto st89 -tr286: -//line plugins/parsers/influx/machine.go.rl:18 + goto st31 +tr250: +//line plugins/parsers/influx/machine.go.rl:19 m.pb = m.p @@ -18330,91 +20347,32 @@ tr286: goto _test_eof531 } st_case_531: -//line plugins/parsers/influx/machine.go:18334 +//line plugins/parsers/influx/machine.go:20351 switch ( m.data)[( m.p)] { case 9: - goto tr735 + goto tr812 + case 10: + goto tr790 case 11: - goto tr736 + goto tr813 case 12: - goto tr580 + goto tr814 + case 13: + goto tr792 case 32: - goto tr735 + goto tr812 case 34: - goto tr189 + goto tr91 case 44: - goto tr737 - case 61: - goto st7 - case 65: - goto st147 - case 92: - goto st103 - case 97: - goto st150 - } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr397 - } - goto st89 - st147: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof147 - } - st_case_147: - switch ( m.data)[( m.p)] { - case 9: - goto tr187 - case 11: - goto tr188 - case 12: - goto tr60 - case 32: - goto tr187 - case 34: - goto tr189 - case 44: - goto tr190 - case 61: - goto st7 - case 76: - goto st148 - case 92: - goto st103 - } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr61 - } - goto st89 - st148: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof148 - } - st_case_148: - switch ( m.data)[( m.p)] { - case 9: - goto tr187 - case 11: - goto tr188 - case 12: - goto tr60 - case 32: - goto tr187 - case 34: - goto tr189 - case 44: - goto tr190 - case 61: - goto st7 - case 83: + goto tr815 + case 82: goto st149 case 92: - goto st103 + goto st142 + case 114: + goto st150 } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr61 - } - goto st89 + goto st31 st149: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof149 @@ -18422,55 +20380,27 @@ tr286: st_case_149: switch ( m.data)[( m.p)] { case 9: - goto tr187 - case 11: - goto tr188 - case 12: - goto tr60 - case 32: - goto tr187 - case 34: - goto tr189 - case 44: - goto tr190 - case 61: + goto tr89 + case 10: goto st7 - case 69: - goto st532 - case 92: - goto st103 - } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr61 - } - goto st89 - st532: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof532 - } - st_case_532: - switch ( m.data)[( m.p)] { - case 9: - goto tr735 case 11: - goto tr736 + goto tr90 case 12: - goto tr580 + goto tr1 + case 13: + goto st8 case 32: - goto tr735 + goto tr89 case 34: - goto tr189 + goto tr91 case 44: - goto tr737 - case 61: - goto st7 + goto tr92 + case 85: + goto st145 case 92: - goto st103 + goto st142 } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr397 - } - goto st89 + goto st31 st150: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof150 @@ -18478,88 +20408,64 @@ tr286: st_case_150: switch ( m.data)[( m.p)] { case 9: - goto tr187 - case 11: - goto tr188 - case 12: - goto tr60 - case 32: - goto tr187 - case 34: - goto tr189 - case 44: - goto tr190 - case 61: + goto tr89 + case 10: goto st7 + case 11: + goto tr90 + case 12: + goto tr1 + case 13: + goto st8 + case 32: + goto tr89 + case 34: + goto tr91 + case 44: + goto tr92 case 92: - goto st103 - case 108: - goto st151 + goto st142 + case 117: + goto st148 } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr61 - } - goto st89 - st151: + goto st31 +tr251: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st532 + st532: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof151 + goto _test_eof532 } - st_case_151: + st_case_532: +//line plugins/parsers/influx/machine.go:20444 switch ( m.data)[( m.p)] { case 9: - goto tr187 + goto tr812 + case 10: + goto tr790 case 11: - goto tr188 + goto tr813 case 12: - goto tr60 + goto tr814 + case 13: + goto tr792 case 32: - goto tr187 + goto tr812 case 34: - goto tr189 + goto tr91 case 44: - goto tr190 - case 61: - goto st7 + goto tr815 case 92: - goto st103 - case 115: - goto st152 + goto st142 + case 97: + goto st146 } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr61 - } - goto st89 - st152: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof152 - } - st_case_152: - switch ( m.data)[( m.p)] { - case 9: - goto tr187 - case 11: - goto tr188 - case 12: - goto tr60 - case 32: - goto tr187 - case 34: - goto tr189 - case 44: - goto tr190 - case 61: - goto st7 - case 92: - goto st103 - case 101: - goto st532 - } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr61 - } - goto st89 -tr287: -//line plugins/parsers/influx/machine.go.rl:18 + goto st31 +tr252: +//line plugins/parsers/influx/machine.go.rl:19 m.pb = m.p @@ -18569,235 +20475,1692 @@ tr287: goto _test_eof533 } st_case_533: -//line plugins/parsers/influx/machine.go:18573 +//line plugins/parsers/influx/machine.go:20479 switch ( m.data)[( m.p)] { case 9: - goto tr735 + goto tr812 + case 10: + goto tr790 case 11: - goto tr736 + goto tr813 case 12: - goto tr580 + goto tr814 + case 13: + goto tr792 case 32: - goto tr735 + goto tr812 case 34: - goto tr189 + goto tr91 case 44: - goto tr737 - case 61: - goto st7 - case 82: - goto st153 + goto tr815 case 92: - goto st103 + goto st142 case 114: - goto st154 + goto st150 } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr397 - } - goto st89 - st153: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof153 - } - st_case_153: - switch ( m.data)[( m.p)] { - case 9: - goto tr187 - case 11: - goto tr188 - case 12: - goto tr60 - case 32: - goto tr187 - case 34: - goto tr189 - case 44: - goto tr190 - case 61: - goto st7 - case 85: - goto st149 - case 92: - goto st103 - } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr61 - } - goto st89 - st154: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof154 - } - st_case_154: - switch ( m.data)[( m.p)] { - case 9: - goto tr187 - case 11: - goto tr188 - case 12: - goto tr60 - case 32: - goto tr187 - case 34: - goto tr189 - case 44: - goto tr190 - case 61: - goto st7 - case 92: - goto st103 - case 117: - goto st152 - } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr61 - } - goto st89 -tr288: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st534 + goto st31 st534: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof534 } st_case_534: -//line plugins/parsers/influx/machine.go:18669 switch ( m.data)[( m.p)] { case 9: - goto tr735 + goto tr611 + case 10: + goto tr584 case 11: - goto tr736 + goto tr612 case 12: - goto tr580 + goto tr490 + case 13: + goto tr586 case 32: - goto tr735 + goto tr611 case 34: - goto tr189 + goto tr128 case 44: - goto tr737 + goto tr92 case 61: - goto st7 + goto tr129 case 92: - goto st103 - case 97: - goto st150 + goto st94 } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr397 + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st535 } - goto st89 -tr289: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st535 + goto st42 st535: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof535 } st_case_535: -//line plugins/parsers/influx/machine.go:18705 switch ( m.data)[( m.p)] { case 9: - goto tr735 + goto tr611 + case 10: + goto tr584 case 11: - goto tr736 + goto tr612 case 12: - goto tr580 + goto tr490 + case 13: + goto tr586 case 32: - goto tr735 + goto tr611 case 34: - goto tr189 + goto tr128 case 44: - goto tr737 + goto tr92 case 61: - goto st7 + goto tr129 case 92: - goto st103 - case 114: - goto st154 + goto st94 } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr397 + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st536 } - goto st89 -tr278: -//line plugins/parsers/influx/machine.go.rl:18 + goto st42 + st536: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof536 + } + st_case_536: + switch ( m.data)[( m.p)] { + case 9: + goto tr611 + case 10: + goto tr584 + case 11: + goto tr612 + case 12: + goto tr490 + case 13: + goto tr586 + case 32: + goto tr611 + case 34: + goto tr128 + case 44: + goto tr92 + case 61: + goto tr129 + case 92: + goto st94 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st537 + } + goto st42 + st537: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof537 + } + st_case_537: + switch ( m.data)[( m.p)] { + case 9: + goto tr611 + case 10: + goto tr584 + case 11: + goto tr612 + case 12: + goto tr490 + case 13: + goto tr586 + case 32: + goto tr611 + case 34: + goto tr128 + case 44: + goto tr92 + case 61: + goto tr129 + case 92: + goto st94 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st538 + } + goto st42 + st538: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof538 + } + st_case_538: + switch ( m.data)[( m.p)] { + case 9: + goto tr611 + case 10: + goto tr584 + case 11: + goto tr612 + case 12: + goto tr490 + case 13: + goto tr586 + case 32: + goto tr611 + case 34: + goto tr128 + case 44: + goto tr92 + case 61: + goto tr129 + case 92: + goto st94 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st539 + } + goto st42 + st539: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof539 + } + st_case_539: + switch ( m.data)[( m.p)] { + case 9: + goto tr611 + case 10: + goto tr584 + case 11: + goto tr612 + case 12: + goto tr490 + case 13: + goto tr586 + case 32: + goto tr611 + case 34: + goto tr128 + case 44: + goto tr92 + case 61: + goto tr129 + case 92: + goto st94 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st540 + } + goto st42 + st540: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof540 + } + st_case_540: + switch ( m.data)[( m.p)] { + case 9: + goto tr611 + case 10: + goto tr584 + case 11: + goto tr612 + case 12: + goto tr490 + case 13: + goto tr586 + case 32: + goto tr611 + case 34: + goto tr128 + case 44: + goto tr92 + case 61: + goto tr129 + case 92: + goto st94 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st541 + } + goto st42 + st541: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof541 + } + st_case_541: + switch ( m.data)[( m.p)] { + case 9: + goto tr611 + case 10: + goto tr584 + case 11: + goto tr612 + case 12: + goto tr490 + case 13: + goto tr586 + case 32: + goto tr611 + case 34: + goto tr128 + case 44: + goto tr92 + case 61: + goto tr129 + case 92: + goto st94 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st542 + } + goto st42 + st542: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof542 + } + st_case_542: + switch ( m.data)[( m.p)] { + case 9: + goto tr611 + case 10: + goto tr584 + case 11: + goto tr612 + case 12: + goto tr490 + case 13: + goto tr586 + case 32: + goto tr611 + case 34: + goto tr128 + case 44: + goto tr92 + case 61: + goto tr129 + case 92: + goto st94 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st543 + } + goto st42 + st543: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof543 + } + st_case_543: + switch ( m.data)[( m.p)] { + case 9: + goto tr611 + case 10: + goto tr584 + case 11: + goto tr612 + case 12: + goto tr490 + case 13: + goto tr586 + case 32: + goto tr611 + case 34: + goto tr128 + case 44: + goto tr92 + case 61: + goto tr129 + case 92: + goto st94 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st544 + } + goto st42 + st544: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof544 + } + st_case_544: + switch ( m.data)[( m.p)] { + case 9: + goto tr611 + case 10: + goto tr584 + case 11: + goto tr612 + case 12: + goto tr490 + case 13: + goto tr586 + case 32: + goto tr611 + case 34: + goto tr128 + case 44: + goto tr92 + case 61: + goto tr129 + case 92: + goto st94 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st545 + } + goto st42 + st545: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof545 + } + st_case_545: + switch ( m.data)[( m.p)] { + case 9: + goto tr611 + case 10: + goto tr584 + case 11: + goto tr612 + case 12: + goto tr490 + case 13: + goto tr586 + case 32: + goto tr611 + case 34: + goto tr128 + case 44: + goto tr92 + case 61: + goto tr129 + case 92: + goto st94 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st546 + } + goto st42 + st546: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof546 + } + st_case_546: + switch ( m.data)[( m.p)] { + case 9: + goto tr611 + case 10: + goto tr584 + case 11: + goto tr612 + case 12: + goto tr490 + case 13: + goto tr586 + case 32: + goto tr611 + case 34: + goto tr128 + case 44: + goto tr92 + case 61: + goto tr129 + case 92: + goto st94 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st547 + } + goto st42 + st547: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof547 + } + st_case_547: + switch ( m.data)[( m.p)] { + case 9: + goto tr611 + case 10: + goto tr584 + case 11: + goto tr612 + case 12: + goto tr490 + case 13: + goto tr586 + case 32: + goto tr611 + case 34: + goto tr128 + case 44: + goto tr92 + case 61: + goto tr129 + case 92: + goto st94 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st548 + } + goto st42 + st548: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof548 + } + st_case_548: + switch ( m.data)[( m.p)] { + case 9: + goto tr611 + case 10: + goto tr584 + case 11: + goto tr612 + case 12: + goto tr490 + case 13: + goto tr586 + case 32: + goto tr611 + case 34: + goto tr128 + case 44: + goto tr92 + case 61: + goto tr129 + case 92: + goto st94 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st549 + } + goto st42 + st549: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof549 + } + st_case_549: + switch ( m.data)[( m.p)] { + case 9: + goto tr611 + case 10: + goto tr584 + case 11: + goto tr612 + case 12: + goto tr490 + case 13: + goto tr586 + case 32: + goto tr611 + case 34: + goto tr128 + case 44: + goto tr92 + case 61: + goto tr129 + case 92: + goto st94 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st550 + } + goto st42 + st550: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof550 + } + st_case_550: + switch ( m.data)[( m.p)] { + case 9: + goto tr611 + case 10: + goto tr584 + case 11: + goto tr612 + case 12: + goto tr490 + case 13: + goto tr586 + case 32: + goto tr611 + case 34: + goto tr128 + case 44: + goto tr92 + case 61: + goto tr129 + case 92: + goto st94 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st551 + } + goto st42 + st551: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof551 + } + st_case_551: + switch ( m.data)[( m.p)] { + case 9: + goto tr611 + case 10: + goto tr584 + case 11: + goto tr612 + case 12: + goto tr490 + case 13: + goto tr586 + case 32: + goto tr611 + case 34: + goto tr128 + case 44: + goto tr92 + case 61: + goto tr129 + case 92: + goto st94 + } + goto st42 +tr213: +//line plugins/parsers/influx/machine.go.rl:19 m.pb = m.p - goto st155 + goto st151 + st151: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof151 + } + st_case_151: +//line plugins/parsers/influx/machine.go:21069 + switch ( m.data)[( m.p)] { + case 9: + goto tr180 + case 10: + goto st7 + case 11: + goto tr181 + case 12: + goto tr1 + case 13: + goto st8 + case 32: + goto tr180 + case 34: + goto tr91 + case 44: + goto tr182 + case 46: + goto st152 + case 48: + goto st576 + case 92: + goto st157 + } + if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st579 + } + goto st55 +tr214: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st152 + st152: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof152 + } + st_case_152: +//line plugins/parsers/influx/machine.go:21109 + switch ( m.data)[( m.p)] { + case 9: + goto tr180 + case 10: + goto st7 + case 11: + goto tr181 + case 12: + goto tr1 + case 13: + goto st8 + case 32: + goto tr180 + case 34: + goto tr91 + case 44: + goto tr182 + case 92: + goto st157 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st552 + } + goto st55 + st552: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof552 + } + st_case_552: + switch ( m.data)[( m.p)] { + case 9: + goto tr837 + case 10: + goto tr515 + case 11: + goto tr838 + case 12: + goto tr622 + case 13: + goto tr517 + case 32: + goto tr837 + case 34: + goto tr91 + case 44: + goto tr839 + case 69: + goto st155 + case 92: + goto st157 + case 101: + goto st155 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st552 + } + goto st55 +tr838: + ( m.cs) = 553 +//line plugins/parsers/influx/machine.go.rl:77 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:121 + + err = m.handler.AddFloat(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr870: + ( m.cs) = 553 +//line plugins/parsers/influx/machine.go.rl:77 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:103 + + err = m.handler.AddInt(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr874: + ( m.cs) = 553 +//line plugins/parsers/influx/machine.go.rl:77 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:112 + + err = m.handler.AddUint(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr878: + ( m.cs) = 553 +//line plugins/parsers/influx/machine.go.rl:77 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:130 + + err = m.handler.AddBool(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again + st553: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof553 + } + st_case_553: +//line plugins/parsers/influx/machine.go:21264 + switch ( m.data)[( m.p)] { + case 9: + goto tr841 + case 10: + goto st317 + case 11: + goto tr842 + case 12: + goto tr482 + case 13: + goto st104 + case 32: + goto tr841 + case 34: + goto tr124 + case 44: + goto tr182 + case 45: + goto tr843 + case 61: + goto st55 + case 92: + goto tr186 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto tr844 + } + goto tr184 +tr842: + ( m.cs) = 554 +//line plugins/parsers/influx/machine.go.rl:77 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto _again + st554: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof554 + } + st_case_554: +//line plugins/parsers/influx/machine.go:21315 + switch ( m.data)[( m.p)] { + case 9: + goto tr841 + case 10: + goto st317 + case 11: + goto tr842 + case 12: + goto tr482 + case 13: + goto st104 + case 32: + goto tr841 + case 34: + goto tr124 + case 44: + goto tr182 + case 45: + goto tr843 + case 61: + goto tr189 + case 92: + goto tr186 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto tr844 + } + goto tr184 +tr843: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st153 + st153: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof153 + } + st_case_153: +//line plugins/parsers/influx/machine.go:21355 + switch ( m.data)[( m.p)] { + case 9: + goto tr180 + case 10: + goto st7 + case 11: + goto tr188 + case 12: + goto tr1 + case 13: + goto st8 + case 32: + goto tr180 + case 34: + goto tr128 + case 44: + goto tr182 + case 61: + goto tr189 + case 92: + goto st154 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st555 + } + goto st57 +tr844: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st555 + st555: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof555 + } + st_case_555: +//line plugins/parsers/influx/machine.go:21393 + switch ( m.data)[( m.p)] { + case 9: + goto tr845 + case 10: + goto tr659 + case 11: + goto tr846 + case 12: + goto tr490 + case 13: + goto tr661 + case 32: + goto tr845 + case 34: + goto tr128 + case 44: + goto tr182 + case 61: + goto tr189 + case 92: + goto st154 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st557 + } + goto st57 +tr849: + ( m.cs) = 556 +//line plugins/parsers/influx/machine.go.rl:77 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto _again +tr846: + ( m.cs) = 556 +//line plugins/parsers/influx/machine.go.rl:77 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:148 + + err = m.handler.SetTimestamp(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again + st556: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof556 + } + st_case_556: +//line plugins/parsers/influx/machine.go:21465 + switch ( m.data)[( m.p)] { + case 9: + goto tr848 + case 10: + goto st317 + case 11: + goto tr849 + case 12: + goto tr495 + case 13: + goto st104 + case 32: + goto tr848 + case 34: + goto tr124 + case 44: + goto tr182 + case 61: + goto tr189 + case 92: + goto tr186 + } + goto tr184 +tr186: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st154 + st154: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof154 + } + st_case_154: +//line plugins/parsers/influx/machine.go:21500 + switch ( m.data)[( m.p)] { + case 34: + goto st57 + case 92: + goto st57 + } + switch { + case ( m.data)[( m.p)] > 10: + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr8 + } + case ( m.data)[( m.p)] >= 9: + goto tr8 + } + goto st12 + st557: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof557 + } + st_case_557: + switch ( m.data)[( m.p)] { + case 9: + goto tr845 + case 10: + goto tr659 + case 11: + goto tr846 + case 12: + goto tr490 + case 13: + goto tr661 + case 32: + goto tr845 + case 34: + goto tr128 + case 44: + goto tr182 + case 61: + goto tr189 + case 92: + goto st154 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st558 + } + goto st57 + st558: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof558 + } + st_case_558: + switch ( m.data)[( m.p)] { + case 9: + goto tr845 + case 10: + goto tr659 + case 11: + goto tr846 + case 12: + goto tr490 + case 13: + goto tr661 + case 32: + goto tr845 + case 34: + goto tr128 + case 44: + goto tr182 + case 61: + goto tr189 + case 92: + goto st154 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st559 + } + goto st57 + st559: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof559 + } + st_case_559: + switch ( m.data)[( m.p)] { + case 9: + goto tr845 + case 10: + goto tr659 + case 11: + goto tr846 + case 12: + goto tr490 + case 13: + goto tr661 + case 32: + goto tr845 + case 34: + goto tr128 + case 44: + goto tr182 + case 61: + goto tr189 + case 92: + goto st154 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st560 + } + goto st57 + st560: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof560 + } + st_case_560: + switch ( m.data)[( m.p)] { + case 9: + goto tr845 + case 10: + goto tr659 + case 11: + goto tr846 + case 12: + goto tr490 + case 13: + goto tr661 + case 32: + goto tr845 + case 34: + goto tr128 + case 44: + goto tr182 + case 61: + goto tr189 + case 92: + goto st154 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st561 + } + goto st57 + st561: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof561 + } + st_case_561: + switch ( m.data)[( m.p)] { + case 9: + goto tr845 + case 10: + goto tr659 + case 11: + goto tr846 + case 12: + goto tr490 + case 13: + goto tr661 + case 32: + goto tr845 + case 34: + goto tr128 + case 44: + goto tr182 + case 61: + goto tr189 + case 92: + goto st154 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st562 + } + goto st57 + st562: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof562 + } + st_case_562: + switch ( m.data)[( m.p)] { + case 9: + goto tr845 + case 10: + goto tr659 + case 11: + goto tr846 + case 12: + goto tr490 + case 13: + goto tr661 + case 32: + goto tr845 + case 34: + goto tr128 + case 44: + goto tr182 + case 61: + goto tr189 + case 92: + goto st154 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st563 + } + goto st57 + st563: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof563 + } + st_case_563: + switch ( m.data)[( m.p)] { + case 9: + goto tr845 + case 10: + goto tr659 + case 11: + goto tr846 + case 12: + goto tr490 + case 13: + goto tr661 + case 32: + goto tr845 + case 34: + goto tr128 + case 44: + goto tr182 + case 61: + goto tr189 + case 92: + goto st154 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st564 + } + goto st57 + st564: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof564 + } + st_case_564: + switch ( m.data)[( m.p)] { + case 9: + goto tr845 + case 10: + goto tr659 + case 11: + goto tr846 + case 12: + goto tr490 + case 13: + goto tr661 + case 32: + goto tr845 + case 34: + goto tr128 + case 44: + goto tr182 + case 61: + goto tr189 + case 92: + goto st154 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st565 + } + goto st57 + st565: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof565 + } + st_case_565: + switch ( m.data)[( m.p)] { + case 9: + goto tr845 + case 10: + goto tr659 + case 11: + goto tr846 + case 12: + goto tr490 + case 13: + goto tr661 + case 32: + goto tr845 + case 34: + goto tr128 + case 44: + goto tr182 + case 61: + goto tr189 + case 92: + goto st154 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st566 + } + goto st57 + st566: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof566 + } + st_case_566: + switch ( m.data)[( m.p)] { + case 9: + goto tr845 + case 10: + goto tr659 + case 11: + goto tr846 + case 12: + goto tr490 + case 13: + goto tr661 + case 32: + goto tr845 + case 34: + goto tr128 + case 44: + goto tr182 + case 61: + goto tr189 + case 92: + goto st154 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st567 + } + goto st57 + st567: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof567 + } + st_case_567: + switch ( m.data)[( m.p)] { + case 9: + goto tr845 + case 10: + goto tr659 + case 11: + goto tr846 + case 12: + goto tr490 + case 13: + goto tr661 + case 32: + goto tr845 + case 34: + goto tr128 + case 44: + goto tr182 + case 61: + goto tr189 + case 92: + goto st154 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st568 + } + goto st57 + st568: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof568 + } + st_case_568: + switch ( m.data)[( m.p)] { + case 9: + goto tr845 + case 10: + goto tr659 + case 11: + goto tr846 + case 12: + goto tr490 + case 13: + goto tr661 + case 32: + goto tr845 + case 34: + goto tr128 + case 44: + goto tr182 + case 61: + goto tr189 + case 92: + goto st154 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st569 + } + goto st57 + st569: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof569 + } + st_case_569: + switch ( m.data)[( m.p)] { + case 9: + goto tr845 + case 10: + goto tr659 + case 11: + goto tr846 + case 12: + goto tr490 + case 13: + goto tr661 + case 32: + goto tr845 + case 34: + goto tr128 + case 44: + goto tr182 + case 61: + goto tr189 + case 92: + goto st154 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st570 + } + goto st57 + st570: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof570 + } + st_case_570: + switch ( m.data)[( m.p)] { + case 9: + goto tr845 + case 10: + goto tr659 + case 11: + goto tr846 + case 12: + goto tr490 + case 13: + goto tr661 + case 32: + goto tr845 + case 34: + goto tr128 + case 44: + goto tr182 + case 61: + goto tr189 + case 92: + goto st154 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st571 + } + goto st57 + st571: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof571 + } + st_case_571: + switch ( m.data)[( m.p)] { + case 9: + goto tr845 + case 10: + goto tr659 + case 11: + goto tr846 + case 12: + goto tr490 + case 13: + goto tr661 + case 32: + goto tr845 + case 34: + goto tr128 + case 44: + goto tr182 + case 61: + goto tr189 + case 92: + goto st154 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st572 + } + goto st57 + st572: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof572 + } + st_case_572: + switch ( m.data)[( m.p)] { + case 9: + goto tr845 + case 10: + goto tr659 + case 11: + goto tr846 + case 12: + goto tr490 + case 13: + goto tr661 + case 32: + goto tr845 + case 34: + goto tr128 + case 44: + goto tr182 + case 61: + goto tr189 + case 92: + goto st154 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st573 + } + goto st57 + st573: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof573 + } + st_case_573: + switch ( m.data)[( m.p)] { + case 9: + goto tr845 + case 10: + goto tr659 + case 11: + goto tr846 + case 12: + goto tr490 + case 13: + goto tr661 + case 32: + goto tr845 + case 34: + goto tr128 + case 44: + goto tr182 + case 61: + goto tr189 + case 92: + goto st154 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st574 + } + goto st57 + st574: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof574 + } + st_case_574: + switch ( m.data)[( m.p)] { + case 9: + goto tr845 + case 10: + goto tr659 + case 11: + goto tr846 + case 12: + goto tr490 + case 13: + goto tr661 + case 32: + goto tr845 + case 34: + goto tr128 + case 44: + goto tr182 + case 61: + goto tr189 + case 92: + goto st154 + } + goto st57 st155: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof155 } st_case_155: -//line plugins/parsers/influx/machine.go:18741 switch ( m.data)[( m.p)] { + case 9: + goto tr180 + case 10: + goto st7 + case 11: + goto tr181 + case 12: + goto tr1 + case 13: + goto st8 + case 32: + goto tr180 case 34: - goto st141 + goto tr317 + case 44: + goto tr182 case 92: - goto st141 + goto st157 } switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr61 + case ( m.data)[( m.p)] > 45: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st575 } - case ( m.data)[( m.p)] >= 9: - goto tr61 + case ( m.data)[( m.p)] >= 43: + goto st156 } - goto st86 -tr267: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st156 + goto st55 st156: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof156 } st_case_156: -//line plugins/parsers/influx/machine.go:18768 switch ( m.data)[( m.p)] { case 9: - goto tr237 - case 11: - goto tr238 - case 12: - goto tr60 - case 32: - goto tr237 - case 34: - goto tr189 - case 44: - goto tr239 - case 46: - goto st157 - case 48: - goto st560 - case 61: + goto tr180 + case 10: goto st7 + case 11: + goto tr181 + case 12: + goto tr1 + case 13: + goto st8 + case 32: + goto tr180 + case 34: + goto tr91 + case 44: + goto tr182 case 92: - goto st127 + goto st157 } - switch { - case ( m.data)[( m.p)] > 13: - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st563 - } - case ( m.data)[( m.p)] >= 10: - goto tr61 + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st575 } - goto st122 -tr268: -//line plugins/parsers/influx/machine.go.rl:18 + goto st55 + st575: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof575 + } + st_case_575: + switch ( m.data)[( m.p)] { + case 9: + goto tr837 + case 10: + goto tr515 + case 11: + goto tr838 + case 12: + goto tr622 + case 13: + goto tr517 + case 32: + goto tr837 + case 34: + goto tr91 + case 44: + goto tr839 + case 92: + goto st157 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st575 + } + goto st55 +tr340: +//line plugins/parsers/influx/machine.go.rl:19 m.pb = m.p @@ -18807,1987 +22170,22 @@ tr268: goto _test_eof157 } st_case_157: -//line plugins/parsers/influx/machine.go:18811 - switch ( m.data)[( m.p)] { - case 9: - goto tr237 - case 11: - goto tr238 - case 12: - goto tr60 - case 32: - goto tr237 - case 34: - goto tr189 - case 44: - goto tr239 - case 61: - goto st7 - case 92: - goto st127 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st536 - } - case ( m.data)[( m.p)] >= 10: - goto tr61 - } - goto st122 - st536: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof536 - } - st_case_536: - switch ( m.data)[( m.p)] { - case 9: - goto tr742 - case 11: - goto tr743 - case 12: - goto tr566 - case 32: - goto tr742 - case 34: - goto tr189 - case 44: - goto tr744 - case 61: - goto st7 - case 69: - goto st159 - case 92: - goto st127 - case 101: - goto st159 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st536 - } - case ( m.data)[( m.p)] >= 10: - goto tr383 - } - goto st122 -tr743: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:96 - - m.handler.AddFloat(key, m.text()) - - goto st537 -tr775: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:88 - - m.handler.AddInt(key, m.text()) - - goto st537 -tr779: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:92 - - m.handler.AddUint(key, m.text()) - - goto st537 -tr783: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:100 - - m.handler.AddBool(key, m.text()) - - goto st537 - st537: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof537 - } - st_case_537: -//line plugins/parsers/influx/machine.go:18920 - switch ( m.data)[( m.p)] { - case 9: - goto tr746 - case 11: - goto tr747 - case 12: - goto tr514 - case 32: - goto tr746 - case 34: - goto tr201 - case 44: - goto tr239 - case 45: - goto tr748 - case 61: - goto st7 - case 92: - goto tr243 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr749 - } - case ( m.data)[( m.p)] >= 10: - goto tr357 - } - goto tr241 -tr747: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st538 - st538: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof538 - } - st_case_538: -//line plugins/parsers/influx/machine.go:18965 - switch ( m.data)[( m.p)] { - case 9: - goto tr746 - case 11: - goto tr747 - case 12: - goto tr514 - case 32: - goto tr746 - case 34: - goto tr201 - case 44: - goto tr239 - case 45: - goto tr748 - case 61: - goto tr99 - case 92: - goto tr243 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr749 - } - case ( m.data)[( m.p)] >= 10: - goto tr357 - } - goto tr241 -tr748: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st158 - st158: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof158 - } - st_case_158: -//line plugins/parsers/influx/machine.go:19006 - switch ( m.data)[( m.p)] { - case 9: - goto tr237 - case 11: - goto tr245 - case 12: - goto tr60 - case 32: - goto tr237 - case 34: - goto tr205 - case 44: - goto tr239 - case 61: - goto tr99 - case 92: - goto st126 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st539 - } - case ( m.data)[( m.p)] >= 10: - goto tr207 - } - goto st124 -tr749: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st539 - st539: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof539 - } - st_case_539: -//line plugins/parsers/influx/machine.go:19045 - switch ( m.data)[( m.p)] { - case 9: - goto tr750 - case 11: - goto tr751 - case 12: - goto tr520 - case 32: - goto tr750 - case 34: - goto tr205 - case 44: - goto tr239 - case 61: - goto tr99 - case 92: - goto st126 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st541 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st124 -tr754: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st540 -tr751: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:108 - - m.handler.SetTimestamp(m.text()) - - goto st540 - st540: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof540 - } - st_case_540: -//line plugins/parsers/influx/machine.go:19098 - switch ( m.data)[( m.p)] { - case 9: - goto tr753 - case 11: - goto tr754 - case 12: - goto tr523 - case 32: - goto tr753 - case 34: - goto tr201 - case 44: - goto tr239 - case 61: - goto tr99 - case 92: - goto tr243 - } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr357 - } - goto tr241 - st541: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof541 - } - st_case_541: - switch ( m.data)[( m.p)] { - case 9: - goto tr750 - case 11: - goto tr751 - case 12: - goto tr520 - case 32: - goto tr750 - case 34: - goto tr205 - case 44: - goto tr239 - case 61: - goto tr99 - case 92: - goto st126 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st542 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st124 - st542: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof542 - } - st_case_542: - switch ( m.data)[( m.p)] { - case 9: - goto tr750 - case 11: - goto tr751 - case 12: - goto tr520 - case 32: - goto tr750 - case 34: - goto tr205 - case 44: - goto tr239 - case 61: - goto tr99 - case 92: - goto st126 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st543 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st124 - st543: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof543 - } - st_case_543: - switch ( m.data)[( m.p)] { - case 9: - goto tr750 - case 11: - goto tr751 - case 12: - goto tr520 - case 32: - goto tr750 - case 34: - goto tr205 - case 44: - goto tr239 - case 61: - goto tr99 - case 92: - goto st126 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st544 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st124 - st544: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof544 - } - st_case_544: - switch ( m.data)[( m.p)] { - case 9: - goto tr750 - case 11: - goto tr751 - case 12: - goto tr520 - case 32: - goto tr750 - case 34: - goto tr205 - case 44: - goto tr239 - case 61: - goto tr99 - case 92: - goto st126 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st545 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st124 - st545: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof545 - } - st_case_545: - switch ( m.data)[( m.p)] { - case 9: - goto tr750 - case 11: - goto tr751 - case 12: - goto tr520 - case 32: - goto tr750 - case 34: - goto tr205 - case 44: - goto tr239 - case 61: - goto tr99 - case 92: - goto st126 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st546 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st124 - st546: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof546 - } - st_case_546: - switch ( m.data)[( m.p)] { - case 9: - goto tr750 - case 11: - goto tr751 - case 12: - goto tr520 - case 32: - goto tr750 - case 34: - goto tr205 - case 44: - goto tr239 - case 61: - goto tr99 - case 92: - goto st126 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st547 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st124 - st547: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof547 - } - st_case_547: - switch ( m.data)[( m.p)] { - case 9: - goto tr750 - case 11: - goto tr751 - case 12: - goto tr520 - case 32: - goto tr750 - case 34: - goto tr205 - case 44: - goto tr239 - case 61: - goto tr99 - case 92: - goto st126 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st548 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st124 - st548: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof548 - } - st_case_548: - switch ( m.data)[( m.p)] { - case 9: - goto tr750 - case 11: - goto tr751 - case 12: - goto tr520 - case 32: - goto tr750 - case 34: - goto tr205 - case 44: - goto tr239 - case 61: - goto tr99 - case 92: - goto st126 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st549 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st124 - st549: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof549 - } - st_case_549: - switch ( m.data)[( m.p)] { - case 9: - goto tr750 - case 11: - goto tr751 - case 12: - goto tr520 - case 32: - goto tr750 - case 34: - goto tr205 - case 44: - goto tr239 - case 61: - goto tr99 - case 92: - goto st126 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st550 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st124 - st550: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof550 - } - st_case_550: - switch ( m.data)[( m.p)] { - case 9: - goto tr750 - case 11: - goto tr751 - case 12: - goto tr520 - case 32: - goto tr750 - case 34: - goto tr205 - case 44: - goto tr239 - case 61: - goto tr99 - case 92: - goto st126 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st551 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st124 - st551: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof551 - } - st_case_551: - switch ( m.data)[( m.p)] { - case 9: - goto tr750 - case 11: - goto tr751 - case 12: - goto tr520 - case 32: - goto tr750 - case 34: - goto tr205 - case 44: - goto tr239 - case 61: - goto tr99 - case 92: - goto st126 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st552 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st124 - st552: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof552 - } - st_case_552: - switch ( m.data)[( m.p)] { - case 9: - goto tr750 - case 11: - goto tr751 - case 12: - goto tr520 - case 32: - goto tr750 - case 34: - goto tr205 - case 44: - goto tr239 - case 61: - goto tr99 - case 92: - goto st126 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st553 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st124 - st553: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof553 - } - st_case_553: - switch ( m.data)[( m.p)] { - case 9: - goto tr750 - case 11: - goto tr751 - case 12: - goto tr520 - case 32: - goto tr750 - case 34: - goto tr205 - case 44: - goto tr239 - case 61: - goto tr99 - case 92: - goto st126 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st554 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st124 - st554: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof554 - } - st_case_554: - switch ( m.data)[( m.p)] { - case 9: - goto tr750 - case 11: - goto tr751 - case 12: - goto tr520 - case 32: - goto tr750 - case 34: - goto tr205 - case 44: - goto tr239 - case 61: - goto tr99 - case 92: - goto st126 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st555 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st124 - st555: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof555 - } - st_case_555: - switch ( m.data)[( m.p)] { - case 9: - goto tr750 - case 11: - goto tr751 - case 12: - goto tr520 - case 32: - goto tr750 - case 34: - goto tr205 - case 44: - goto tr239 - case 61: - goto tr99 - case 92: - goto st126 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st556 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st124 - st556: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof556 - } - st_case_556: - switch ( m.data)[( m.p)] { - case 9: - goto tr750 - case 11: - goto tr751 - case 12: - goto tr520 - case 32: - goto tr750 - case 34: - goto tr205 - case 44: - goto tr239 - case 61: - goto tr99 - case 92: - goto st126 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st557 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st124 - st557: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof557 - } - st_case_557: - switch ( m.data)[( m.p)] { - case 9: - goto tr750 - case 11: - goto tr751 - case 12: - goto tr520 - case 32: - goto tr750 - case 34: - goto tr205 - case 44: - goto tr239 - case 61: - goto tr99 - case 92: - goto st126 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st558 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st124 - st558: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof558 - } - st_case_558: - switch ( m.data)[( m.p)] { - case 9: - goto tr750 - case 11: - goto tr751 - case 12: - goto tr520 - case 32: - goto tr750 - case 34: - goto tr205 - case 44: - goto tr239 - case 61: - goto tr99 - case 92: - goto st126 - } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr362 - } - goto st124 - st159: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof159 - } - st_case_159: - switch ( m.data)[( m.p)] { - case 9: - goto tr237 - case 11: - goto tr238 - case 12: - goto tr60 - case 32: - goto tr237 - case 34: - goto tr294 - case 44: - goto tr239 - case 61: - goto st7 - case 92: - goto st127 - } - switch { - case ( m.data)[( m.p)] < 43: - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr61 - } - case ( m.data)[( m.p)] > 45: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st559 - } - default: - goto st160 - } - goto st122 - st160: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof160 - } - st_case_160: - switch ( m.data)[( m.p)] { - case 9: - goto tr237 - case 11: - goto tr238 - case 12: - goto tr60 - case 32: - goto tr237 - case 34: - goto tr189 - case 44: - goto tr239 - case 61: - goto st7 - case 92: - goto st127 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st559 - } - case ( m.data)[( m.p)] >= 10: - goto tr61 - } - goto st122 - st559: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof559 - } - st_case_559: - switch ( m.data)[( m.p)] { - case 9: - goto tr742 - case 11: - goto tr743 - case 12: - goto tr566 - case 32: - goto tr742 - case 34: - goto tr189 - case 44: - goto tr744 - case 61: - goto st7 - case 92: - goto st127 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st559 - } - case ( m.data)[( m.p)] >= 10: - goto tr383 - } - goto st122 - st560: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof560 - } - st_case_560: - switch ( m.data)[( m.p)] { - case 9: - goto tr742 - case 11: - goto tr743 - case 12: - goto tr566 - case 32: - goto tr742 - case 34: - goto tr189 - case 44: - goto tr744 - case 46: - goto st536 - case 61: - goto st7 - case 69: - goto st159 - case 92: - goto st127 - case 101: - goto st159 - case 105: - goto st562 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st561 - } - case ( m.data)[( m.p)] >= 10: - goto tr383 - } - goto st122 - st561: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof561 - } - st_case_561: - switch ( m.data)[( m.p)] { - case 9: - goto tr742 - case 11: - goto tr743 - case 12: - goto tr566 - case 32: - goto tr742 - case 34: - goto tr189 - case 44: - goto tr744 - case 46: - goto st536 - case 61: - goto st7 - case 69: - goto st159 - case 92: - goto st127 - case 101: - goto st159 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st561 - } - case ( m.data)[( m.p)] >= 10: - goto tr383 - } - goto st122 - st562: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof562 - } - st_case_562: - switch ( m.data)[( m.p)] { - case 9: - goto tr774 - case 11: - goto tr775 - case 12: - goto tr572 - case 32: - goto tr774 - case 34: - goto tr189 - case 44: - goto tr776 - case 61: - goto st7 - case 92: - goto st127 - } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr389 - } - goto st122 - st563: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof563 - } - st_case_563: - switch ( m.data)[( m.p)] { - case 9: - goto tr742 - case 11: - goto tr743 - case 12: - goto tr566 - case 32: - goto tr742 - case 34: - goto tr189 - case 44: - goto tr744 - case 46: - goto st536 - case 61: - goto st7 - case 69: - goto st159 - case 92: - goto st127 - case 101: - goto st159 - case 105: - goto st562 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st563 - } - case ( m.data)[( m.p)] >= 10: - goto tr383 - } - goto st122 -tr269: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st564 - st564: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof564 - } - st_case_564: -//line plugins/parsers/influx/machine.go:19948 - switch ( m.data)[( m.p)] { - case 9: - goto tr742 - case 11: - goto tr743 - case 12: - goto tr566 - case 32: - goto tr742 - case 34: - goto tr189 - case 44: - goto tr744 - case 46: - goto st536 - case 61: - goto st7 - case 69: - goto st159 - case 92: - goto st127 - case 101: - goto st159 - case 105: - goto st562 - case 117: - goto st565 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st561 - } - case ( m.data)[( m.p)] >= 10: - goto tr383 - } - goto st122 - st565: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof565 - } - st_case_565: - switch ( m.data)[( m.p)] { - case 9: - goto tr778 - case 11: - goto tr779 - case 12: - goto tr576 - case 32: - goto tr778 - case 34: - goto tr189 - case 44: - goto tr780 - case 61: - goto st7 - case 92: - goto st127 - } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr393 - } - goto st122 -tr270: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st566 - st566: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof566 - } - st_case_566: -//line plugins/parsers/influx/machine.go:20024 - switch ( m.data)[( m.p)] { - case 9: - goto tr742 - case 11: - goto tr743 - case 12: - goto tr566 - case 32: - goto tr742 - case 34: - goto tr189 - case 44: - goto tr744 - case 46: - goto st536 - case 61: - goto st7 - case 69: - goto st159 - case 92: - goto st127 - case 101: - goto st159 - case 105: - goto st562 - case 117: - goto st565 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st566 - } - case ( m.data)[( m.p)] >= 10: - goto tr383 - } - goto st122 -tr271: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st567 - st567: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof567 - } - st_case_567: -//line plugins/parsers/influx/machine.go:20073 - switch ( m.data)[( m.p)] { - case 9: - goto tr782 - case 11: - goto tr783 - case 12: - goto tr580 - case 32: - goto tr782 - case 34: - goto tr189 - case 44: - goto tr784 - case 61: - goto st7 - case 65: - goto st161 - case 92: - goto st127 - case 97: - goto st164 - } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr397 - } - goto st122 - st161: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof161 - } - st_case_161: - switch ( m.data)[( m.p)] { - case 9: - goto tr237 - case 11: - goto tr238 - case 12: - goto tr60 - case 32: - goto tr237 - case 34: - goto tr189 - case 44: - goto tr239 - case 61: - goto st7 - case 76: - goto st162 - case 92: - goto st127 - } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr61 - } - goto st122 - st162: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof162 - } - st_case_162: - switch ( m.data)[( m.p)] { - case 9: - goto tr237 - case 11: - goto tr238 - case 12: - goto tr60 - case 32: - goto tr237 - case 34: - goto tr189 - case 44: - goto tr239 - case 61: - goto st7 - case 83: - goto st163 - case 92: - goto st127 - } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr61 - } - goto st122 - st163: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof163 - } - st_case_163: - switch ( m.data)[( m.p)] { - case 9: - goto tr237 - case 11: - goto tr238 - case 12: - goto tr60 - case 32: - goto tr237 - case 34: - goto tr189 - case 44: - goto tr239 - case 61: - goto st7 - case 69: - goto st568 - case 92: - goto st127 - } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr61 - } - goto st122 - st568: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof568 - } - st_case_568: - switch ( m.data)[( m.p)] { - case 9: - goto tr782 - case 11: - goto tr783 - case 12: - goto tr580 - case 32: - goto tr782 - case 34: - goto tr189 - case 44: - goto tr784 - case 61: - goto st7 - case 92: - goto st127 - } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr397 - } - goto st122 - st164: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof164 - } - st_case_164: - switch ( m.data)[( m.p)] { - case 9: - goto tr237 - case 11: - goto tr238 - case 12: - goto tr60 - case 32: - goto tr237 - case 34: - goto tr189 - case 44: - goto tr239 - case 61: - goto st7 - case 92: - goto st127 - case 108: - goto st165 - } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr61 - } - goto st122 - st165: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof165 - } - st_case_165: - switch ( m.data)[( m.p)] { - case 9: - goto tr237 - case 11: - goto tr238 - case 12: - goto tr60 - case 32: - goto tr237 - case 34: - goto tr189 - case 44: - goto tr239 - case 61: - goto st7 - case 92: - goto st127 - case 115: - goto st166 - } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr61 - } - goto st122 - st166: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof166 - } - st_case_166: - switch ( m.data)[( m.p)] { - case 9: - goto tr237 - case 11: - goto tr238 - case 12: - goto tr60 - case 32: - goto tr237 - case 34: - goto tr189 - case 44: - goto tr239 - case 61: - goto st7 - case 92: - goto st127 - case 101: - goto st568 - } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr61 - } - goto st122 -tr272: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st569 - st569: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof569 - } - st_case_569: -//line plugins/parsers/influx/machine.go:20312 - switch ( m.data)[( m.p)] { - case 9: - goto tr782 - case 11: - goto tr783 - case 12: - goto tr580 - case 32: - goto tr782 - case 34: - goto tr189 - case 44: - goto tr784 - case 61: - goto st7 - case 82: - goto st167 - case 92: - goto st127 - case 114: - goto st168 - } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr397 - } - goto st122 - st167: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof167 - } - st_case_167: - switch ( m.data)[( m.p)] { - case 9: - goto tr237 - case 11: - goto tr238 - case 12: - goto tr60 - case 32: - goto tr237 - case 34: - goto tr189 - case 44: - goto tr239 - case 61: - goto st7 - case 85: - goto st163 - case 92: - goto st127 - } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr61 - } - goto st122 - st168: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof168 - } - st_case_168: - switch ( m.data)[( m.p)] { - case 9: - goto tr237 - case 11: - goto tr238 - case 12: - goto tr60 - case 32: - goto tr237 - case 34: - goto tr189 - case 44: - goto tr239 - case 61: - goto st7 - case 92: - goto st127 - case 117: - goto st166 - } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr61 - } - goto st122 -tr273: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st570 - st570: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof570 - } - st_case_570: -//line plugins/parsers/influx/machine.go:20408 - switch ( m.data)[( m.p)] { - case 9: - goto tr782 - case 11: - goto tr783 - case 12: - goto tr580 - case 32: - goto tr782 - case 34: - goto tr189 - case 44: - goto tr784 - case 61: - goto st7 - case 92: - goto st127 - case 97: - goto st164 - } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr397 - } - goto st122 -tr274: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st571 - st571: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof571 - } - st_case_571: -//line plugins/parsers/influx/machine.go:20444 - switch ( m.data)[( m.p)] { - case 9: - goto tr782 - case 11: - goto tr783 - case 12: - goto tr580 - case 32: - goto tr782 - case 34: - goto tr189 - case 44: - goto tr784 - case 61: - goto st7 - case 92: - goto st127 - case 114: - goto st168 - } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr397 - } - goto st122 -tr260: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st169 - st169: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof169 - } - st_case_169: -//line plugins/parsers/influx/machine.go:20480 +//line plugins/parsers/influx/machine.go:22174 switch ( m.data)[( m.p)] { case 34: - goto st135 + goto st55 case 92: - goto st135 + goto st55 } switch { case ( m.data)[( m.p)] > 10: if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr61 + goto tr8 } case ( m.data)[( m.p)] >= 9: - goto tr61 + goto tr8 } - goto st86 -tr85: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st170 - st170: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof170 - } - st_case_170: -//line plugins/parsers/influx/machine.go:20507 - switch ( m.data)[( m.p)] { - case 34: - goto st40 - case 92: - goto st40 - } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 - } - case ( m.data)[( m.p)] >= 9: - goto tr5 - } - goto st2 -tr248: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st171 - st171: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof171 - } - st_case_171: -//line plugins/parsers/influx/machine.go:20534 - switch ( m.data)[( m.p)] { - case 9: - goto tr87 - case 11: - goto tr88 - case 12: - goto tr4 - case 32: - goto tr87 - case 34: - goto tr89 - case 44: - goto tr90 - case 46: - goto st172 - case 48: - goto st576 - case 92: - goto st170 - } - switch { - case ( m.data)[( m.p)] > 13: - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st579 - } - case ( m.data)[( m.p)] >= 10: - goto tr5 - } - goto st40 -tr249: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st172 - st172: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof172 - } - st_case_172: -//line plugins/parsers/influx/machine.go:20575 - switch ( m.data)[( m.p)] { - case 9: - goto tr87 - case 11: - goto tr88 - case 12: - goto tr4 - case 32: - goto tr87 - case 34: - goto tr89 - case 44: - goto tr90 - case 92: - goto st170 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st572 - } - case ( m.data)[( m.p)] >= 10: - goto tr5 - } - goto st40 - st572: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof572 - } - st_case_572: - switch ( m.data)[( m.p)] { - case 9: - goto tr789 - case 11: - goto tr790 - case 12: - goto tr791 - case 32: - goto tr789 - case 34: - goto tr89 - case 44: - goto tr792 - case 69: - goto st173 - case 92: - goto st170 - case 101: - goto st173 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st572 - } - case ( m.data)[( m.p)] >= 10: - goto tr383 - } - goto st40 - st173: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof173 - } - st_case_173: - switch ( m.data)[( m.p)] { - case 9: - goto tr87 - case 11: - goto tr88 - case 12: - goto tr4 - case 32: - goto tr87 - case 34: - goto tr318 - case 44: - goto tr90 - case 92: - goto st170 - } - switch { - case ( m.data)[( m.p)] < 43: - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 - } - case ( m.data)[( m.p)] > 45: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st575 - } - default: - goto st174 - } - goto st40 -tr318: -//line plugins/parsers/influx/machine.go.rl:104 - - m.handler.AddString(key, m.text()) - - goto st573 - st573: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof573 - } - st_case_573: -//line plugins/parsers/influx/machine.go:20680 - switch ( m.data)[( m.p)] { - case 10: - goto tr357 - case 11: - goto tr642 - case 13: - goto tr357 - case 32: - goto tr482 - case 44: - goto tr484 - case 92: - goto st133 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st574 - } - case ( m.data)[( m.p)] >= 9: - goto tr482 - } - goto st2 - st574: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof574 - } - st_case_574: - switch ( m.data)[( m.p)] { - case 10: - goto tr383 - case 11: - goto tr794 - case 13: - goto tr383 - case 32: - goto tr791 - case 44: - goto tr795 - case 92: - goto st133 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st574 - } - case ( m.data)[( m.p)] >= 9: - goto tr791 - } - goto st2 - st174: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof174 - } - st_case_174: - switch ( m.data)[( m.p)] { - case 9: - goto tr87 - case 11: - goto tr88 - case 12: - goto tr4 - case 32: - goto tr87 - case 34: - goto tr89 - case 44: - goto tr90 - case 92: - goto st170 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st575 - } - case ( m.data)[( m.p)] >= 10: - goto tr5 - } - goto st40 - st575: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof575 - } - st_case_575: - switch ( m.data)[( m.p)] { - case 9: - goto tr789 - case 11: - goto tr790 - case 12: - goto tr791 - case 32: - goto tr789 - case 34: - goto tr89 - case 44: - goto tr792 - case 92: - goto st170 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st575 - } - case ( m.data)[( m.p)] >= 10: - goto tr383 - } - goto st40 + goto st1 st576: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof576 @@ -20795,37 +22193,36 @@ tr318: st_case_576: switch ( m.data)[( m.p)] { case 9: - goto tr789 + goto tr837 + case 10: + goto tr515 case 11: - goto tr790 + goto tr838 case 12: - goto tr791 + goto tr622 + case 13: + goto tr517 case 32: - goto tr789 + goto tr837 case 34: - goto tr89 + goto tr91 case 44: - goto tr792 + goto tr839 case 46: - goto st572 + goto st552 case 69: - goto st173 + goto st155 case 92: - goto st170 + goto st157 case 101: - goto st173 + goto st155 case 105: goto st578 } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st577 - } - case ( m.data)[( m.p)] >= 10: - goto tr383 + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st577 } - goto st40 + goto st55 st577: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof577 @@ -20833,35 +22230,34 @@ tr318: st_case_577: switch ( m.data)[( m.p)] { case 9: - goto tr789 + goto tr837 + case 10: + goto tr515 case 11: - goto tr790 + goto tr838 case 12: - goto tr791 + goto tr622 + case 13: + goto tr517 case 32: - goto tr789 + goto tr837 case 34: - goto tr89 + goto tr91 case 44: - goto tr792 + goto tr839 case 46: - goto st572 + goto st552 case 69: - goto st173 + goto st155 case 92: - goto st170 + goto st157 case 101: - goto st173 + goto st155 } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st577 - } - case ( m.data)[( m.p)] >= 10: - goto tr383 + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st577 } - goto st40 + goto st55 st578: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof578 @@ -20869,24 +22265,25 @@ tr318: st_case_578: switch ( m.data)[( m.p)] { case 9: - goto tr798 + goto tr869 + case 10: + goto tr722 case 11: - goto tr799 + goto tr870 case 12: - goto tr800 + goto tr804 + case 13: + goto tr725 case 32: - goto tr798 + goto tr869 case 34: - goto tr89 + goto tr91 case 44: - goto tr801 + goto tr871 case 92: - goto st170 + goto st157 } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr389 - } - goto st40 + goto st55 st579: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof579 @@ -20894,39 +22291,38 @@ tr318: st_case_579: switch ( m.data)[( m.p)] { case 9: - goto tr789 + goto tr837 + case 10: + goto tr515 case 11: - goto tr790 + goto tr838 case 12: - goto tr791 + goto tr622 + case 13: + goto tr517 case 32: - goto tr789 + goto tr837 case 34: - goto tr89 + goto tr91 case 44: - goto tr792 + goto tr839 case 46: - goto st572 + goto st552 case 69: - goto st173 + goto st155 case 92: - goto st170 + goto st157 case 101: - goto st173 + goto st155 case 105: goto st578 } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st579 - } - case ( m.data)[( m.p)] >= 10: - goto tr383 + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st579 } - goto st40 -tr250: -//line plugins/parsers/influx/machine.go.rl:18 + goto st55 +tr215: +//line plugins/parsers/influx/machine.go.rl:19 m.pb = m.p @@ -20936,42 +22332,41 @@ tr250: goto _test_eof580 } st_case_580: -//line plugins/parsers/influx/machine.go:20940 +//line plugins/parsers/influx/machine.go:22336 switch ( m.data)[( m.p)] { case 9: - goto tr789 + goto tr837 + case 10: + goto tr515 case 11: - goto tr790 + goto tr838 case 12: - goto tr791 + goto tr622 + case 13: + goto tr517 case 32: - goto tr789 + goto tr837 case 34: - goto tr89 + goto tr91 case 44: - goto tr792 + goto tr839 case 46: - goto st572 + goto st552 case 69: - goto st173 + goto st155 case 92: - goto st170 + goto st157 case 101: - goto st173 + goto st155 case 105: goto st578 case 117: goto st581 } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st577 - } - case ( m.data)[( m.p)] >= 10: - goto tr383 + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st577 } - goto st40 + goto st55 st581: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof581 @@ -20979,26 +22374,27 @@ tr250: st_case_581: switch ( m.data)[( m.p)] { case 9: - goto tr803 + goto tr873 + case 10: + goto tr729 case 11: - goto tr804 + goto tr874 case 12: - goto tr805 + goto tr809 + case 13: + goto tr732 case 32: - goto tr803 + goto tr873 case 34: - goto tr89 + goto tr91 case 44: - goto tr806 + goto tr875 case 92: - goto st170 + goto st157 } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr393 - } - goto st40 -tr251: -//line plugins/parsers/influx/machine.go.rl:18 + goto st55 +tr216: +//line plugins/parsers/influx/machine.go.rl:19 m.pb = m.p @@ -21008,44 +22404,43 @@ tr251: goto _test_eof582 } st_case_582: -//line plugins/parsers/influx/machine.go:21012 +//line plugins/parsers/influx/machine.go:22408 switch ( m.data)[( m.p)] { case 9: - goto tr789 + goto tr837 + case 10: + goto tr515 case 11: - goto tr790 + goto tr838 case 12: - goto tr791 + goto tr622 + case 13: + goto tr517 case 32: - goto tr789 + goto tr837 case 34: - goto tr89 + goto tr91 case 44: - goto tr792 + goto tr839 case 46: - goto st572 + goto st552 case 69: - goto st173 + goto st155 case 92: - goto st170 + goto st157 case 101: - goto st173 + goto st155 case 105: goto st578 case 117: goto st581 } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st582 - } - case ( m.data)[( m.p)] >= 10: - goto tr383 + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st582 } - goto st40 -tr252: -//line plugins/parsers/influx/machine.go.rl:18 + goto st55 +tr217: +//line plugins/parsers/influx/machine.go.rl:19 m.pb = m.p @@ -21055,112 +22450,116 @@ tr252: goto _test_eof583 } st_case_583: -//line plugins/parsers/influx/machine.go:21059 +//line plugins/parsers/influx/machine.go:22454 switch ( m.data)[( m.p)] { case 9: - goto tr808 + goto tr877 + case 10: + goto tr736 case 11: - goto tr809 + goto tr878 case 12: - goto tr810 + goto tr814 + case 13: + goto tr739 case 32: - goto tr808 + goto tr877 case 34: - goto tr89 + goto tr91 case 44: - goto tr811 + goto tr879 case 65: - goto st175 + goto st158 case 92: - goto st170 + goto st157 case 97: - goto st178 + goto st161 } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr397 - } - goto st40 - st175: + goto st55 + st158: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof175 + goto _test_eof158 } - st_case_175: + st_case_158: switch ( m.data)[( m.p)] { case 9: - goto tr87 + goto tr180 + case 10: + goto st7 case 11: - goto tr88 + goto tr181 case 12: - goto tr4 + goto tr1 + case 13: + goto st8 case 32: - goto tr87 + goto tr180 case 34: - goto tr89 + goto tr91 case 44: - goto tr90 + goto tr182 case 76: - goto st176 + goto st159 case 92: - goto st170 + goto st157 } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 - } - goto st40 - st176: + goto st55 + st159: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof176 + goto _test_eof159 } - st_case_176: + st_case_159: switch ( m.data)[( m.p)] { case 9: - goto tr87 + goto tr180 + case 10: + goto st7 case 11: - goto tr88 + goto tr181 case 12: - goto tr4 + goto tr1 + case 13: + goto st8 case 32: - goto tr87 + goto tr180 case 34: - goto tr89 + goto tr91 case 44: - goto tr90 + goto tr182 case 83: - goto st177 + goto st160 case 92: - goto st170 + goto st157 } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 - } - goto st40 - st177: + goto st55 + st160: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof177 + goto _test_eof160 } - st_case_177: + st_case_160: switch ( m.data)[( m.p)] { case 9: - goto tr87 + goto tr180 + case 10: + goto st7 case 11: - goto tr88 + goto tr181 case 12: - goto tr4 + goto tr1 + case 13: + goto st8 case 32: - goto tr87 + goto tr180 case 34: - goto tr89 + goto tr91 case 44: - goto tr90 + goto tr182 case 69: goto st584 case 92: - goto st170 + goto st157 } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 - } - goto st40 + goto st55 st584: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof584 @@ -21168,107 +22567,111 @@ tr252: st_case_584: switch ( m.data)[( m.p)] { case 9: - goto tr808 + goto tr877 + case 10: + goto tr736 case 11: - goto tr809 + goto tr878 case 12: - goto tr810 + goto tr814 + case 13: + goto tr739 case 32: - goto tr808 + goto tr877 case 34: - goto tr89 + goto tr91 case 44: - goto tr811 + goto tr879 case 92: - goto st170 + goto st157 } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr397 - } - goto st40 - st178: + goto st55 + st161: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof178 + goto _test_eof161 } - st_case_178: + st_case_161: switch ( m.data)[( m.p)] { case 9: - goto tr87 + goto tr180 + case 10: + goto st7 case 11: - goto tr88 + goto tr181 case 12: - goto tr4 + goto tr1 + case 13: + goto st8 case 32: - goto tr87 + goto tr180 case 34: - goto tr89 + goto tr91 case 44: - goto tr90 + goto tr182 case 92: - goto st170 + goto st157 case 108: - goto st179 + goto st162 } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 - } - goto st40 - st179: + goto st55 + st162: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof179 + goto _test_eof162 } - st_case_179: + st_case_162: switch ( m.data)[( m.p)] { case 9: - goto tr87 + goto tr180 + case 10: + goto st7 case 11: - goto tr88 + goto tr181 case 12: - goto tr4 + goto tr1 + case 13: + goto st8 case 32: - goto tr87 + goto tr180 case 34: - goto tr89 + goto tr91 case 44: - goto tr90 + goto tr182 case 92: - goto st170 + goto st157 case 115: - goto st180 + goto st163 } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 - } - goto st40 - st180: + goto st55 + st163: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof180 + goto _test_eof163 } - st_case_180: + st_case_163: switch ( m.data)[( m.p)] { case 9: - goto tr87 + goto tr180 + case 10: + goto st7 case 11: - goto tr88 + goto tr181 case 12: - goto tr4 + goto tr1 + case 13: + goto st8 case 32: - goto tr87 + goto tr180 case 34: - goto tr89 + goto tr91 case 44: - goto tr90 + goto tr182 case 92: - goto st170 + goto st157 case 101: goto st584 } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 - } - goto st40 -tr253: -//line plugins/parsers/influx/machine.go.rl:18 + goto st55 +tr218: +//line plugins/parsers/influx/machine.go.rl:19 m.pb = m.p @@ -21278,87 +22681,90 @@ tr253: goto _test_eof585 } st_case_585: -//line plugins/parsers/influx/machine.go:21282 +//line plugins/parsers/influx/machine.go:22685 switch ( m.data)[( m.p)] { case 9: - goto tr808 + goto tr877 + case 10: + goto tr736 case 11: - goto tr809 + goto tr878 case 12: - goto tr810 + goto tr814 + case 13: + goto tr739 case 32: - goto tr808 + goto tr877 case 34: - goto tr89 + goto tr91 case 44: - goto tr811 + goto tr879 case 82: - goto st181 + goto st164 case 92: - goto st170 + goto st157 case 114: - goto st182 + goto st165 } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr397 - } - goto st40 - st181: + goto st55 + st164: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof181 + goto _test_eof164 } - st_case_181: + st_case_164: switch ( m.data)[( m.p)] { case 9: - goto tr87 + goto tr180 + case 10: + goto st7 case 11: - goto tr88 + goto tr181 case 12: - goto tr4 + goto tr1 + case 13: + goto st8 case 32: - goto tr87 + goto tr180 case 34: - goto tr89 + goto tr91 case 44: - goto tr90 + goto tr182 case 85: - goto st177 + goto st160 case 92: - goto st170 + goto st157 } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 - } - goto st40 - st182: + goto st55 + st165: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof182 + goto _test_eof165 } - st_case_182: + st_case_165: switch ( m.data)[( m.p)] { case 9: - goto tr87 + goto tr180 + case 10: + goto st7 case 11: - goto tr88 + goto tr181 case 12: - goto tr4 + goto tr1 + case 13: + goto st8 case 32: - goto tr87 + goto tr180 case 34: - goto tr89 + goto tr91 case 44: - goto tr90 + goto tr182 case 92: - goto st170 + goto st157 case 117: - goto st180 + goto st163 } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 - } - goto st40 -tr254: -//line plugins/parsers/influx/machine.go.rl:18 + goto st55 +tr219: +//line plugins/parsers/influx/machine.go.rl:19 m.pb = m.p @@ -21368,31 +22774,32 @@ tr254: goto _test_eof586 } st_case_586: -//line plugins/parsers/influx/machine.go:21372 +//line plugins/parsers/influx/machine.go:22778 switch ( m.data)[( m.p)] { case 9: - goto tr808 + goto tr877 + case 10: + goto tr736 case 11: - goto tr809 + goto tr878 case 12: - goto tr810 + goto tr814 + case 13: + goto tr739 case 32: - goto tr808 + goto tr877 case 34: - goto tr89 + goto tr91 case 44: - goto tr811 + goto tr879 case 92: - goto st170 + goto st157 case 97: - goto st178 + goto st161 } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr397 - } - goto st40 -tr255: -//line plugins/parsers/influx/machine.go.rl:18 + goto st55 +tr220: +//line plugins/parsers/influx/machine.go.rl:19 m.pb = m.p @@ -21402,197 +22809,298 @@ tr255: goto _test_eof587 } st_case_587: -//line plugins/parsers/influx/machine.go:21406 +//line plugins/parsers/influx/machine.go:22813 switch ( m.data)[( m.p)] { case 9: - goto tr808 + goto tr877 + case 10: + goto tr736 case 11: - goto tr809 + goto tr878 case 12: - goto tr810 + goto tr814 + case 13: + goto tr739 case 32: - goto tr808 + goto tr877 case 34: - goto tr89 + goto tr91 case 44: - goto tr811 + goto tr879 case 92: - goto st170 + goto st157 case 114: - goto st182 + goto st165 } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr397 + goto st55 + st166: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof166 } - goto st40 -tr72: -//line plugins/parsers/influx/machine.go.rl:18 + st_case_166: + switch ( m.data)[( m.p)] { + case 9: + goto st166 + case 10: + goto st7 + case 11: + goto tr339 + case 12: + goto st9 + case 13: + goto st8 + case 32: + goto st166 + case 34: + goto tr118 + case 35: + goto st6 + case 44: + goto st6 + case 92: + goto tr340 + } + goto tr337 +tr339: +//line plugins/parsers/influx/machine.go.rl:19 m.pb = m.p - goto st183 - st183: + goto st167 + st167: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof183 + goto _test_eof167 } - st_case_183: -//line plugins/parsers/influx/machine.go:21440 + st_case_167: +//line plugins/parsers/influx/machine.go:22876 switch ( m.data)[( m.p)] { + case 9: + goto tr341 case 10: - goto tr5 + goto st7 case 11: - goto tr6 + goto tr342 + case 12: + goto tr38 case 13: - goto tr5 + goto st8 case 32: - goto tr4 + goto tr341 + case 34: + goto tr85 + case 35: + goto st55 case 44: - goto tr7 - case 46: - goto st184 - case 48: - goto st589 + goto tr182 case 92: - goto st133 + goto tr340 } - switch { - case ( m.data)[( m.p)] > 12: - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st592 - } - case ( m.data)[( m.p)] >= 9: - goto tr4 + goto tr337 +tr341: + ( m.cs) = 168 +//line plugins/parsers/influx/machine.go.rl:77 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again + st168: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof168 } - goto st2 -tr73: -//line plugins/parsers/influx/machine.go.rl:18 + st_case_168: +//line plugins/parsers/influx/machine.go:22918 + switch ( m.data)[( m.p)] { + case 9: + goto st168 + case 10: + goto st7 + case 11: + goto tr344 + case 12: + goto st11 + case 13: + goto st8 + case 32: + goto st168 + case 34: + goto tr124 + case 35: + goto tr160 + case 44: + goto st6 + case 61: + goto tr337 + case 92: + goto tr186 + } + goto tr184 +tr344: +//line plugins/parsers/influx/machine.go.rl:19 m.pb = m.p - goto st184 - st184: + goto st169 +tr345: + ( m.cs) = 169 +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + +//line plugins/parsers/influx/machine.go.rl:77 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again + st169: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof184 + goto _test_eof169 } - st_case_184: -//line plugins/parsers/influx/machine.go:21479 + st_case_169: +//line plugins/parsers/influx/machine.go:22972 + switch ( m.data)[( m.p)] { + case 9: + goto tr341 + case 10: + goto st7 + case 11: + goto tr345 + case 12: + goto tr38 + case 13: + goto st8 + case 32: + goto tr341 + case 34: + goto tr124 + case 44: + goto tr182 + case 61: + goto tr346 + case 92: + goto tr186 + } + goto tr184 +tr342: + ( m.cs) = 170 +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + +//line plugins/parsers/influx/machine.go.rl:77 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again + st170: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof170 + } + st_case_170: +//line plugins/parsers/influx/machine.go:23018 + switch ( m.data)[( m.p)] { + case 9: + goto tr341 + case 10: + goto st7 + case 11: + goto tr345 + case 12: + goto tr38 + case 13: + goto st8 + case 32: + goto tr341 + case 34: + goto tr124 + case 44: + goto tr182 + case 61: + goto tr337 + case 92: + goto tr186 + } + goto tr184 +tr522: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st171 + st171: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof171 + } + st_case_171: +//line plugins/parsers/influx/machine.go:23053 switch ( m.data)[( m.p)] { case 10: - goto tr5 - case 11: - goto tr6 + goto st7 + case 12: + goto tr105 case 13: - goto tr5 - case 32: - goto tr4 - case 44: - goto tr7 + goto st8 + case 34: + goto tr31 case 92: - goto st133 + goto st76 } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st588 - } - case ( m.data)[( m.p)] >= 9: - goto tr4 + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st588 } - goto st2 + goto st6 +tr523: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st588 st588: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof588 } st_case_588: +//line plugins/parsers/influx/machine.go:23081 switch ( m.data)[( m.p)] { case 10: - goto tr383 - case 11: - goto tr794 + goto tr659 + case 12: + goto tr450 case 13: - goto tr383 + goto tr661 case 32: - goto tr791 - case 44: - goto tr795 - case 69: - goto st185 - case 92: - goto st133 - case 101: - goto st185 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st588 - } - case ( m.data)[( m.p)] >= 9: - goto tr791 - } - goto st2 - st185: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof185 - } - st_case_185: - switch ( m.data)[( m.p)] { - case 10: - goto tr5 - case 11: - goto tr6 - case 13: - goto tr5 - case 32: - goto tr4 + goto tr658 case 34: - goto st186 - case 44: - goto tr7 + goto tr31 case 92: - goto st133 + goto st76 } switch { - case ( m.data)[( m.p)] < 43: - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr4 - } - case ( m.data)[( m.p)] > 45: + case ( m.data)[( m.p)] > 11: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st574 - } - default: - goto st186 - } - goto st2 - st186: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof186 - } - st_case_186: - switch ( m.data)[( m.p)] { - case 10: - goto tr5 - case 11: - goto tr6 - case 13: - goto tr5 - case 32: - goto tr4 - case 44: - goto tr7 - case 92: - goto st133 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st574 + goto st589 } case ( m.data)[( m.p)] >= 9: - goto tr4 + goto tr658 } - goto st2 + goto st6 st589: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof589 @@ -21600,35 +23108,27 @@ tr73: st_case_589: switch ( m.data)[( m.p)] { case 10: - goto tr383 - case 11: - goto tr794 + goto tr659 + case 12: + goto tr450 case 13: - goto tr383 + goto tr661 case 32: - goto tr791 - case 44: - goto tr795 - case 46: - goto st588 - case 69: - goto st185 + goto tr658 + case 34: + goto tr31 case 92: - goto st133 - case 101: - goto st185 - case 105: - goto st591 + goto st76 } switch { - case ( m.data)[( m.p)] > 12: + case ( m.data)[( m.p)] > 11: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st590 } case ( m.data)[( m.p)] >= 9: - goto tr791 + goto tr658 } - goto st2 + goto st6 st590: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof590 @@ -21636,33 +23136,27 @@ tr73: st_case_590: switch ( m.data)[( m.p)] { case 10: - goto tr383 - case 11: - goto tr794 + goto tr659 + case 12: + goto tr450 case 13: - goto tr383 + goto tr661 case 32: - goto tr791 - case 44: - goto tr795 - case 46: - goto st588 - case 69: - goto st185 + goto tr658 + case 34: + goto tr31 case 92: - goto st133 - case 101: - goto st185 + goto st76 } switch { - case ( m.data)[( m.p)] > 12: + case ( m.data)[( m.p)] > 11: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st590 + goto st591 } case ( m.data)[( m.p)] >= 9: - goto tr791 + goto tr658 } - goto st2 + goto st6 st591: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof591 @@ -21670,22 +23164,27 @@ tr73: st_case_591: switch ( m.data)[( m.p)] { case 10: - goto tr389 - case 11: - goto tr819 + goto tr659 + case 12: + goto tr450 case 13: - goto tr389 + goto tr661 case 32: - goto tr800 - case 44: - goto tr820 + goto tr658 + case 34: + goto tr31 case 92: - goto st133 + goto st76 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr800 + switch { + case ( m.data)[( m.p)] > 11: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st592 + } + case ( m.data)[( m.p)] >= 9: + goto tr658 } - goto st2 + goto st6 st592: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof592 @@ -21693,80 +23192,55 @@ tr73: st_case_592: switch ( m.data)[( m.p)] { case 10: - goto tr383 - case 11: - goto tr794 + goto tr659 + case 12: + goto tr450 case 13: - goto tr383 + goto tr661 case 32: - goto tr791 - case 44: - goto tr795 - case 46: - goto st588 - case 69: - goto st185 + goto tr658 + case 34: + goto tr31 case 92: - goto st133 - case 101: - goto st185 - case 105: - goto st591 + goto st76 } switch { - case ( m.data)[( m.p)] > 12: + case ( m.data)[( m.p)] > 11: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st592 + goto st593 } case ( m.data)[( m.p)] >= 9: - goto tr791 + goto tr658 } - goto st2 -tr74: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st593 + goto st6 st593: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof593 } st_case_593: -//line plugins/parsers/influx/machine.go:21737 switch ( m.data)[( m.p)] { case 10: - goto tr383 - case 11: - goto tr794 + goto tr659 + case 12: + goto tr450 case 13: - goto tr383 + goto tr661 case 32: - goto tr791 - case 44: - goto tr795 - case 46: - goto st588 - case 69: - goto st185 + goto tr658 + case 34: + goto tr31 case 92: - goto st133 - case 101: - goto st185 - case 105: - goto st591 - case 117: - goto st594 + goto st76 } switch { - case ( m.data)[( m.p)] > 12: + case ( m.data)[( m.p)] > 11: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st590 + goto st594 } case ( m.data)[( m.p)] >= 9: - goto tr791 + goto tr658 } - goto st2 + goto st6 st594: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof594 @@ -21774,176 +23248,83 @@ tr74: st_case_594: switch ( m.data)[( m.p)] { case 10: - goto tr393 - case 11: - goto tr822 + goto tr659 + case 12: + goto tr450 case 13: - goto tr393 + goto tr661 case 32: - goto tr805 - case 44: - goto tr823 + goto tr658 + case 34: + goto tr31 case 92: - goto st133 + goto st76 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr805 + switch { + case ( m.data)[( m.p)] > 11: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st595 + } + case ( m.data)[( m.p)] >= 9: + goto tr658 } - goto st2 -tr75: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st595 + goto st6 st595: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof595 } st_case_595: -//line plugins/parsers/influx/machine.go:21805 switch ( m.data)[( m.p)] { case 10: - goto tr383 - case 11: - goto tr794 + goto tr659 + case 12: + goto tr450 case 13: - goto tr383 + goto tr661 case 32: - goto tr791 - case 44: - goto tr795 - case 46: - goto st588 - case 69: - goto st185 + goto tr658 + case 34: + goto tr31 case 92: - goto st133 - case 101: - goto st185 - case 105: - goto st591 - case 117: - goto st594 + goto st76 } switch { - case ( m.data)[( m.p)] > 12: + case ( m.data)[( m.p)] > 11: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st595 + goto st596 } case ( m.data)[( m.p)] >= 9: - goto tr791 + goto tr658 } - goto st2 -tr76: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st596 + goto st6 st596: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof596 } st_case_596: -//line plugins/parsers/influx/machine.go:21850 switch ( m.data)[( m.p)] { case 10: - goto tr397 - case 11: - goto tr825 + goto tr659 + case 12: + goto tr450 case 13: - goto tr397 + goto tr661 case 32: - goto tr810 - case 44: - goto tr826 - case 65: - goto st187 + goto tr658 + case 34: + goto tr31 case 92: - goto st133 - case 97: - goto st190 + goto st76 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr810 + switch { + case ( m.data)[( m.p)] > 11: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st597 + } + case ( m.data)[( m.p)] >= 9: + goto tr658 } - goto st2 - st187: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof187 - } - st_case_187: - switch ( m.data)[( m.p)] { - case 10: - goto tr5 - case 11: - goto tr6 - case 13: - goto tr5 - case 32: - goto tr4 - case 44: - goto tr7 - case 76: - goto st188 - case 92: - goto st133 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr4 - } - goto st2 - st188: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof188 - } - st_case_188: - switch ( m.data)[( m.p)] { - case 10: - goto tr5 - case 11: - goto tr6 - case 13: - goto tr5 - case 32: - goto tr4 - case 44: - goto tr7 - case 83: - goto st189 - case 92: - goto st133 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr4 - } - goto st2 - st189: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof189 - } - st_case_189: - switch ( m.data)[( m.p)] { - case 10: - goto tr5 - case 11: - goto tr6 - case 13: - goto tr5 - case 32: - goto tr4 - case 44: - goto tr7 - case 69: - goto st597 - case 92: - goto st133 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr4 - } - goto st2 + goto st6 st597: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof597 @@ -21951,22 +23332,1402 @@ tr76: st_case_597: switch ( m.data)[( m.p)] { case 10: - goto tr397 - case 11: - goto tr825 + goto tr659 + case 12: + goto tr450 case 13: - goto tr397 + goto tr661 case 32: - goto tr810 - case 44: - goto tr826 + goto tr658 + case 34: + goto tr31 case 92: - goto st133 + goto st76 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr810 + switch { + case ( m.data)[( m.p)] > 11: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st598 + } + case ( m.data)[( m.p)] >= 9: + goto tr658 } - goto st2 + goto st6 + st598: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof598 + } + st_case_598: + switch ( m.data)[( m.p)] { + case 10: + goto tr659 + case 12: + goto tr450 + case 13: + goto tr661 + case 32: + goto tr658 + case 34: + goto tr31 + case 92: + goto st76 + } + switch { + case ( m.data)[( m.p)] > 11: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st599 + } + case ( m.data)[( m.p)] >= 9: + goto tr658 + } + goto st6 + st599: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof599 + } + st_case_599: + switch ( m.data)[( m.p)] { + case 10: + goto tr659 + case 12: + goto tr450 + case 13: + goto tr661 + case 32: + goto tr658 + case 34: + goto tr31 + case 92: + goto st76 + } + switch { + case ( m.data)[( m.p)] > 11: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st600 + } + case ( m.data)[( m.p)] >= 9: + goto tr658 + } + goto st6 + st600: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof600 + } + st_case_600: + switch ( m.data)[( m.p)] { + case 10: + goto tr659 + case 12: + goto tr450 + case 13: + goto tr661 + case 32: + goto tr658 + case 34: + goto tr31 + case 92: + goto st76 + } + switch { + case ( m.data)[( m.p)] > 11: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st601 + } + case ( m.data)[( m.p)] >= 9: + goto tr658 + } + goto st6 + st601: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof601 + } + st_case_601: + switch ( m.data)[( m.p)] { + case 10: + goto tr659 + case 12: + goto tr450 + case 13: + goto tr661 + case 32: + goto tr658 + case 34: + goto tr31 + case 92: + goto st76 + } + switch { + case ( m.data)[( m.p)] > 11: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st602 + } + case ( m.data)[( m.p)] >= 9: + goto tr658 + } + goto st6 + st602: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof602 + } + st_case_602: + switch ( m.data)[( m.p)] { + case 10: + goto tr659 + case 12: + goto tr450 + case 13: + goto tr661 + case 32: + goto tr658 + case 34: + goto tr31 + case 92: + goto st76 + } + switch { + case ( m.data)[( m.p)] > 11: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st603 + } + case ( m.data)[( m.p)] >= 9: + goto tr658 + } + goto st6 + st603: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof603 + } + st_case_603: + switch ( m.data)[( m.p)] { + case 10: + goto tr659 + case 12: + goto tr450 + case 13: + goto tr661 + case 32: + goto tr658 + case 34: + goto tr31 + case 92: + goto st76 + } + switch { + case ( m.data)[( m.p)] > 11: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st604 + } + case ( m.data)[( m.p)] >= 9: + goto tr658 + } + goto st6 + st604: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof604 + } + st_case_604: + switch ( m.data)[( m.p)] { + case 10: + goto tr659 + case 12: + goto tr450 + case 13: + goto tr661 + case 32: + goto tr658 + case 34: + goto tr31 + case 92: + goto st76 + } + switch { + case ( m.data)[( m.p)] > 11: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st605 + } + case ( m.data)[( m.p)] >= 9: + goto tr658 + } + goto st6 + st605: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof605 + } + st_case_605: + switch ( m.data)[( m.p)] { + case 10: + goto tr659 + case 12: + goto tr450 + case 13: + goto tr661 + case 32: + goto tr658 + case 34: + goto tr31 + case 92: + goto st76 + } + switch { + case ( m.data)[( m.p)] > 11: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st606 + } + case ( m.data)[( m.p)] >= 9: + goto tr658 + } + goto st6 + st606: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof606 + } + st_case_606: + switch ( m.data)[( m.p)] { + case 10: + goto tr659 + case 12: + goto tr450 + case 13: + goto tr661 + case 32: + goto tr658 + case 34: + goto tr31 + case 92: + goto st76 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { + goto tr658 + } + goto st6 +tr903: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st172 +tr518: + ( m.cs) = 172 +//line plugins/parsers/influx/machine.go.rl:121 + + err = m.handler.AddFloat(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr910: + ( m.cs) = 172 +//line plugins/parsers/influx/machine.go.rl:103 + + err = m.handler.AddInt(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr913: + ( m.cs) = 172 +//line plugins/parsers/influx/machine.go.rl:112 + + err = m.handler.AddUint(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr917: + ( m.cs) = 172 +//line plugins/parsers/influx/machine.go.rl:130 + + err = m.handler.AddBool(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again + st172: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof172 + } + st_case_172: +//line plugins/parsers/influx/machine.go:23667 + switch ( m.data)[( m.p)] { + case 9: + goto st6 + case 10: + goto st7 + case 12: + goto tr8 + case 13: + goto st8 + case 32: + goto st6 + case 34: + goto tr97 + case 44: + goto st6 + case 61: + goto st6 + case 92: + goto tr349 + } + goto tr348 +tr348: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st173 + st173: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof173 + } + st_case_173: +//line plugins/parsers/influx/machine.go:23700 + switch ( m.data)[( m.p)] { + case 9: + goto st6 + case 10: + goto st7 + case 12: + goto tr8 + case 13: + goto st8 + case 32: + goto st6 + case 34: + goto tr100 + case 44: + goto st6 + case 61: + goto tr351 + case 92: + goto st185 + } + goto st173 +tr351: +//line plugins/parsers/influx/machine.go.rl:99 + + key = m.text() + + goto st174 + st174: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof174 + } + st_case_174: +//line plugins/parsers/influx/machine.go:23733 + switch ( m.data)[( m.p)] { + case 10: + goto st7 + case 12: + goto tr8 + case 13: + goto st8 + case 34: + goto tr353 + case 45: + goto tr167 + case 46: + goto tr168 + case 48: + goto tr169 + case 70: + goto tr171 + case 84: + goto tr172 + case 92: + goto st76 + case 102: + goto tr173 + case 116: + goto tr174 + } + if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto tr170 + } + goto st6 +tr353: + ( m.cs) = 607 +//line plugins/parsers/influx/machine.go.rl:139 + + err = m.handler.AddString(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again + st607: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof607 + } + st_case_607: +//line plugins/parsers/influx/machine.go:23782 + switch ( m.data)[( m.p)] { + case 10: + goto tr650 + case 12: + goto st261 + case 13: + goto tr652 + case 32: + goto tr902 + case 34: + goto tr26 + case 44: + goto tr903 + case 92: + goto tr27 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { + goto tr902 + } + goto tr23 +tr169: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st608 + st608: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof608 + } + st_case_608: +//line plugins/parsers/influx/machine.go:23814 + switch ( m.data)[( m.p)] { + case 10: + goto tr515 + case 12: + goto tr516 + case 13: + goto tr517 + case 32: + goto tr514 + case 34: + goto tr31 + case 44: + goto tr518 + case 46: + goto st315 + case 69: + goto st175 + case 92: + goto st76 + case 101: + goto st175 + case 105: + goto st613 + case 117: + goto st614 + } + switch { + case ( m.data)[( m.p)] > 11: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st609 + } + case ( m.data)[( m.p)] >= 9: + goto tr514 + } + goto st6 + st609: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof609 + } + st_case_609: + switch ( m.data)[( m.p)] { + case 10: + goto tr515 + case 12: + goto tr516 + case 13: + goto tr517 + case 32: + goto tr514 + case 34: + goto tr31 + case 44: + goto tr518 + case 46: + goto st315 + case 69: + goto st175 + case 92: + goto st76 + case 101: + goto st175 + } + switch { + case ( m.data)[( m.p)] > 11: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st609 + } + case ( m.data)[( m.p)] >= 9: + goto tr514 + } + goto st6 + st175: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof175 + } + st_case_175: + switch ( m.data)[( m.p)] { + case 10: + goto st7 + case 12: + goto tr8 + case 13: + goto st8 + case 34: + goto tr354 + case 43: + goto st176 + case 45: + goto st176 + case 92: + goto st76 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st612 + } + goto st6 +tr354: + ( m.cs) = 610 +//line plugins/parsers/influx/machine.go.rl:139 + + err = m.handler.AddString(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again + st610: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof610 + } + st_case_610: +//line plugins/parsers/influx/machine.go:23929 + switch ( m.data)[( m.p)] { + case 10: + goto st262 + case 13: + goto st34 + case 32: + goto st261 + case 44: + goto st37 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st611 + } + case ( m.data)[( m.p)] >= 9: + goto st261 + } + goto tr105 + st611: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof611 + } + st_case_611: + switch ( m.data)[( m.p)] { + case 10: + goto tr715 + case 13: + goto tr717 + case 32: + goto tr516 + case 44: + goto tr907 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st611 + } + case ( m.data)[( m.p)] >= 9: + goto tr516 + } + goto tr105 + st176: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof176 + } + st_case_176: + switch ( m.data)[( m.p)] { + case 10: + goto st7 + case 12: + goto tr8 + case 13: + goto st8 + case 34: + goto tr31 + case 92: + goto st76 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st612 + } + goto st6 + st612: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof612 + } + st_case_612: + switch ( m.data)[( m.p)] { + case 10: + goto tr515 + case 12: + goto tr516 + case 13: + goto tr517 + case 32: + goto tr514 + case 34: + goto tr31 + case 44: + goto tr518 + case 92: + goto st76 + } + switch { + case ( m.data)[( m.p)] > 11: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st612 + } + case ( m.data)[( m.p)] >= 9: + goto tr514 + } + goto st6 + st613: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof613 + } + st_case_613: + switch ( m.data)[( m.p)] { + case 10: + goto tr722 + case 12: + goto tr909 + case 13: + goto tr725 + case 32: + goto tr908 + case 34: + goto tr31 + case 44: + goto tr910 + case 92: + goto st76 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { + goto tr908 + } + goto st6 + st614: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof614 + } + st_case_614: + switch ( m.data)[( m.p)] { + case 10: + goto tr729 + case 12: + goto tr912 + case 13: + goto tr732 + case 32: + goto tr911 + case 34: + goto tr31 + case 44: + goto tr913 + case 92: + goto st76 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { + goto tr911 + } + goto st6 +tr170: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st615 + st615: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof615 + } + st_case_615: +//line plugins/parsers/influx/machine.go:24085 + switch ( m.data)[( m.p)] { + case 10: + goto tr515 + case 12: + goto tr516 + case 13: + goto tr517 + case 32: + goto tr514 + case 34: + goto tr31 + case 44: + goto tr518 + case 46: + goto st315 + case 69: + goto st175 + case 92: + goto st76 + case 101: + goto st175 + case 105: + goto st613 + case 117: + goto st614 + } + switch { + case ( m.data)[( m.p)] > 11: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st615 + } + case ( m.data)[( m.p)] >= 9: + goto tr514 + } + goto st6 +tr171: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st616 + st616: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof616 + } + st_case_616: +//line plugins/parsers/influx/machine.go:24132 + switch ( m.data)[( m.p)] { + case 10: + goto tr736 + case 12: + goto tr916 + case 13: + goto tr739 + case 32: + goto tr915 + case 34: + goto tr31 + case 44: + goto tr917 + case 65: + goto st177 + case 92: + goto st76 + case 97: + goto st180 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { + goto tr915 + } + goto st6 + st177: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof177 + } + st_case_177: + switch ( m.data)[( m.p)] { + case 10: + goto st7 + case 12: + goto tr8 + case 13: + goto st8 + case 34: + goto tr31 + case 76: + goto st178 + case 92: + goto st76 + } + goto st6 + st178: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof178 + } + st_case_178: + switch ( m.data)[( m.p)] { + case 10: + goto st7 + case 12: + goto tr8 + case 13: + goto st8 + case 34: + goto tr31 + case 83: + goto st179 + case 92: + goto st76 + } + goto st6 + st179: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof179 + } + st_case_179: + switch ( m.data)[( m.p)] { + case 10: + goto st7 + case 12: + goto tr8 + case 13: + goto st8 + case 34: + goto tr31 + case 69: + goto st617 + case 92: + goto st76 + } + goto st6 + st617: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof617 + } + st_case_617: + switch ( m.data)[( m.p)] { + case 10: + goto tr736 + case 12: + goto tr916 + case 13: + goto tr739 + case 32: + goto tr915 + case 34: + goto tr31 + case 44: + goto tr917 + case 92: + goto st76 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { + goto tr915 + } + goto st6 + st180: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof180 + } + st_case_180: + switch ( m.data)[( m.p)] { + case 10: + goto st7 + case 12: + goto tr8 + case 13: + goto st8 + case 34: + goto tr31 + case 92: + goto st76 + case 108: + goto st181 + } + goto st6 + st181: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof181 + } + st_case_181: + switch ( m.data)[( m.p)] { + case 10: + goto st7 + case 12: + goto tr8 + case 13: + goto st8 + case 34: + goto tr31 + case 92: + goto st76 + case 115: + goto st182 + } + goto st6 + st182: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof182 + } + st_case_182: + switch ( m.data)[( m.p)] { + case 10: + goto st7 + case 12: + goto tr8 + case 13: + goto st8 + case 34: + goto tr31 + case 92: + goto st76 + case 101: + goto st617 + } + goto st6 +tr172: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st618 + st618: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof618 + } + st_case_618: +//line plugins/parsers/influx/machine.go:24313 + switch ( m.data)[( m.p)] { + case 10: + goto tr736 + case 12: + goto tr916 + case 13: + goto tr739 + case 32: + goto tr915 + case 34: + goto tr31 + case 44: + goto tr917 + case 82: + goto st183 + case 92: + goto st76 + case 114: + goto st184 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { + goto tr915 + } + goto st6 + st183: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof183 + } + st_case_183: + switch ( m.data)[( m.p)] { + case 10: + goto st7 + case 12: + goto tr8 + case 13: + goto st8 + case 34: + goto tr31 + case 85: + goto st179 + case 92: + goto st76 + } + goto st6 + st184: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof184 + } + st_case_184: + switch ( m.data)[( m.p)] { + case 10: + goto st7 + case 12: + goto tr8 + case 13: + goto st8 + case 34: + goto tr31 + case 92: + goto st76 + case 117: + goto st182 + } + goto st6 +tr173: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st619 + st619: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof619 + } + st_case_619: +//line plugins/parsers/influx/machine.go:24389 + switch ( m.data)[( m.p)] { + case 10: + goto tr736 + case 12: + goto tr916 + case 13: + goto tr739 + case 32: + goto tr915 + case 34: + goto tr31 + case 44: + goto tr917 + case 92: + goto st76 + case 97: + goto st180 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { + goto tr915 + } + goto st6 +tr174: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st620 + st620: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof620 + } + st_case_620: +//line plugins/parsers/influx/machine.go:24423 + switch ( m.data)[( m.p)] { + case 10: + goto tr736 + case 12: + goto tr916 + case 13: + goto tr739 + case 32: + goto tr915 + case 34: + goto tr31 + case 44: + goto tr917 + case 92: + goto st76 + case 114: + goto st184 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { + goto tr915 + } + goto st6 +tr349: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st185 + st185: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof185 + } + st_case_185: +//line plugins/parsers/influx/machine.go:24457 + switch ( m.data)[( m.p)] { + case 34: + goto st173 + case 92: + goto st173 + } + switch { + case ( m.data)[( m.p)] > 10: + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr8 + } + case ( m.data)[( m.p)] >= 9: + goto tr8 + } + goto st3 + st621: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof621 + } + st_case_621: + switch ( m.data)[( m.p)] { + case 10: + goto tr515 + case 12: + goto tr516 + case 13: + goto tr517 + case 32: + goto tr514 + case 34: + goto tr31 + case 44: + goto tr518 + case 46: + goto st315 + case 69: + goto st175 + case 92: + goto st76 + case 101: + goto st175 + case 105: + goto st613 + } + switch { + case ( m.data)[( m.p)] > 11: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st609 + } + case ( m.data)[( m.p)] >= 9: + goto tr514 + } + goto st6 + st622: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof622 + } + st_case_622: + switch ( m.data)[( m.p)] { + case 10: + goto tr515 + case 12: + goto tr516 + case 13: + goto tr517 + case 32: + goto tr514 + case 34: + goto tr31 + case 44: + goto tr518 + case 46: + goto st315 + case 69: + goto st175 + case 92: + goto st76 + case 101: + goto st175 + case 105: + goto st613 + } + switch { + case ( m.data)[( m.p)] > 11: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st622 + } + case ( m.data)[( m.p)] >= 9: + goto tr514 + } + goto st6 +tr162: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st186 + st186: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof186 + } + st_case_186: +//line plugins/parsers/influx/machine.go:24560 + switch ( m.data)[( m.p)] { + case 9: + goto st50 + case 10: + goto st7 + case 11: + goto tr162 + case 12: + goto st2 + case 13: + goto st8 + case 32: + goto st50 + case 34: + goto tr97 + case 44: + goto st6 + case 61: + goto tr165 + case 92: + goto tr163 + } + goto tr160 +tr140: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st187 + st187: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof187 + } + st_case_187: +//line plugins/parsers/influx/machine.go:24595 + switch ( m.data)[( m.p)] { + case 10: + goto tr47 + case 11: + goto tr61 + case 13: + goto tr47 + case 32: + goto tr60 + case 44: + goto tr62 + case 46: + goto st188 + case 48: + goto st624 + case 61: + goto tr47 + case 92: + goto st23 + } + switch { + case ( m.data)[( m.p)] > 12: + if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st627 + } + case ( m.data)[( m.p)] >= 9: + goto tr60 + } + goto st17 +tr141: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st188 + st188: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof188 + } + st_case_188: +//line plugins/parsers/influx/machine.go:24636 + switch ( m.data)[( m.p)] { + case 10: + goto tr47 + case 11: + goto tr61 + case 13: + goto tr47 + case 32: + goto tr60 + case 44: + goto tr62 + case 61: + goto tr47 + case 92: + goto st23 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st623 + } + case ( m.data)[( m.p)] >= 9: + goto tr60 + } + goto st17 + st623: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof623 + } + st_case_623: + switch ( m.data)[( m.p)] { + case 10: + goto tr715 + case 11: + goto tr716 + case 13: + goto tr717 + case 32: + goto tr712 + case 44: + goto tr718 + case 61: + goto tr132 + case 69: + goto st189 + case 92: + goto st23 + case 101: + goto st189 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st623 + } + case ( m.data)[( m.p)] >= 9: + goto tr712 + } + goto st17 + st189: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof189 + } + st_case_189: + switch ( m.data)[( m.p)] { + case 10: + goto tr47 + case 11: + goto tr61 + case 13: + goto tr47 + case 32: + goto tr60 + case 34: + goto st190 + case 44: + goto tr62 + case 61: + goto tr47 + case 92: + goto st23 + } + switch { + case ( m.data)[( m.p)] < 43: + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr60 + } + case ( m.data)[( m.p)] > 45: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st469 + } + default: + goto st190 + } + goto st17 st190: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof190 @@ -21974,24 +24735,321 @@ tr76: st_case_190: switch ( m.data)[( m.p)] { case 10: - goto tr5 + goto tr47 case 11: - goto tr6 + goto tr61 case 13: - goto tr5 + goto tr47 case 32: - goto tr4 + goto tr60 case 44: - goto tr7 + goto tr62 + case 61: + goto tr47 case 92: - goto st133 - case 108: - goto st191 + goto st23 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st469 + } + case ( m.data)[( m.p)] >= 9: + goto tr60 + } + goto st17 + st624: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof624 + } + st_case_624: + switch ( m.data)[( m.p)] { + case 10: + goto tr715 + case 11: + goto tr716 + case 13: + goto tr717 + case 32: + goto tr712 + case 44: + goto tr718 + case 46: + goto st623 + case 61: + goto tr132 + case 69: + goto st189 + case 92: + goto st23 + case 101: + goto st189 + case 105: + goto st626 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st625 + } + case ( m.data)[( m.p)] >= 9: + goto tr712 + } + goto st17 + st625: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof625 + } + st_case_625: + switch ( m.data)[( m.p)] { + case 10: + goto tr715 + case 11: + goto tr716 + case 13: + goto tr717 + case 32: + goto tr712 + case 44: + goto tr718 + case 46: + goto st623 + case 61: + goto tr132 + case 69: + goto st189 + case 92: + goto st23 + case 101: + goto st189 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st625 + } + case ( m.data)[( m.p)] >= 9: + goto tr712 + } + goto st17 + st626: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof626 + } + st_case_626: + switch ( m.data)[( m.p)] { + case 10: + goto tr925 + case 11: + goto tr926 + case 13: + goto tr927 + case 32: + goto tr724 + case 44: + goto tr928 + case 61: + goto tr132 + case 92: + goto st23 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr4 + goto tr724 } - goto st2 + goto st17 + st627: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof627 + } + st_case_627: + switch ( m.data)[( m.p)] { + case 10: + goto tr715 + case 11: + goto tr716 + case 13: + goto tr717 + case 32: + goto tr712 + case 44: + goto tr718 + case 46: + goto st623 + case 61: + goto tr132 + case 69: + goto st189 + case 92: + goto st23 + case 101: + goto st189 + case 105: + goto st626 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st627 + } + case ( m.data)[( m.p)] >= 9: + goto tr712 + } + goto st17 +tr142: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st628 + st628: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof628 + } + st_case_628: +//line plugins/parsers/influx/machine.go:24910 + switch ( m.data)[( m.p)] { + case 10: + goto tr715 + case 11: + goto tr716 + case 13: + goto tr717 + case 32: + goto tr712 + case 44: + goto tr718 + case 46: + goto st623 + case 61: + goto tr132 + case 69: + goto st189 + case 92: + goto st23 + case 101: + goto st189 + case 105: + goto st626 + case 117: + goto st629 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st625 + } + case ( m.data)[( m.p)] >= 9: + goto tr712 + } + goto st17 + st629: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof629 + } + st_case_629: + switch ( m.data)[( m.p)] { + case 10: + goto tr930 + case 11: + goto tr931 + case 13: + goto tr932 + case 32: + goto tr731 + case 44: + goto tr933 + case 61: + goto tr132 + case 92: + goto st23 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr731 + } + goto st17 +tr143: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st630 + st630: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof630 + } + st_case_630: +//line plugins/parsers/influx/machine.go:24982 + switch ( m.data)[( m.p)] { + case 10: + goto tr715 + case 11: + goto tr716 + case 13: + goto tr717 + case 32: + goto tr712 + case 44: + goto tr718 + case 46: + goto st623 + case 61: + goto tr132 + case 69: + goto st189 + case 92: + goto st23 + case 101: + goto st189 + case 105: + goto st626 + case 117: + goto st629 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st630 + } + case ( m.data)[( m.p)] >= 9: + goto tr712 + } + goto st17 +tr144: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st631 + st631: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof631 + } + st_case_631: +//line plugins/parsers/influx/machine.go:25029 + switch ( m.data)[( m.p)] { + case 10: + goto tr935 + case 11: + goto tr936 + case 13: + goto tr937 + case 32: + goto tr738 + case 44: + goto tr938 + case 61: + goto tr132 + case 65: + goto st191 + case 92: + goto st23 + case 97: + goto st194 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr738 + } + goto st17 st191: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof191 @@ -21999,24 +25057,26 @@ tr76: st_case_191: switch ( m.data)[( m.p)] { case 10: - goto tr5 + goto tr47 case 11: - goto tr6 + goto tr61 case 13: - goto tr5 + goto tr47 case 32: - goto tr4 + goto tr60 case 44: - goto tr7 - case 92: - goto st133 - case 115: + goto tr62 + case 61: + goto tr47 + case 76: goto st192 + case 92: + goto st23 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr4 + goto tr60 } - goto st2 + goto st17 st192: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof192 @@ -22024,58 +25084,26 @@ tr76: st_case_192: switch ( m.data)[( m.p)] { case 10: - goto tr5 + goto tr47 case 11: - goto tr6 + goto tr61 case 13: - goto tr5 + goto tr47 case 32: - goto tr4 + goto tr60 case 44: - goto tr7 - case 92: - goto st133 - case 101: - goto st597 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr4 - } - goto st2 -tr77: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st598 - st598: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof598 - } - st_case_598: -//line plugins/parsers/influx/machine.go:22057 - switch ( m.data)[( m.p)] { - case 10: - goto tr397 - case 11: - goto tr825 - case 13: - goto tr397 - case 32: - goto tr810 - case 44: - goto tr826 - case 82: + goto tr62 + case 61: + goto tr47 + case 83: goto st193 case 92: - goto st133 - case 114: - goto st194 + goto st23 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr810 + goto tr60 } - goto st2 + goto st17 st193: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof193 @@ -22083,24 +25111,51 @@ tr77: st_case_193: switch ( m.data)[( m.p)] { case 10: - goto tr5 + goto tr47 case 11: - goto tr6 + goto tr61 case 13: - goto tr5 + goto tr47 case 32: - goto tr4 + goto tr60 case 44: - goto tr7 - case 85: - goto st189 + goto tr62 + case 61: + goto tr47 + case 69: + goto st632 case 92: - goto st133 + goto st23 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr4 + goto tr60 } - goto st2 + goto st17 + st632: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof632 + } + st_case_632: + switch ( m.data)[( m.p)] { + case 10: + goto tr935 + case 11: + goto tr936 + case 13: + goto tr937 + case 32: + goto tr738 + case 44: + goto tr938 + case 61: + goto tr132 + case 92: + goto st23 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr738 + } + goto st17 st194: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof194 @@ -22108,88 +25163,26 @@ tr77: st_case_194: switch ( m.data)[( m.p)] { case 10: - goto tr5 + goto tr47 case 11: - goto tr6 + goto tr61 case 13: - goto tr5 + goto tr47 case 32: - goto tr4 + goto tr60 case 44: - goto tr7 + goto tr62 + case 61: + goto tr47 case 92: - goto st133 - case 117: - goto st192 + goto st23 + case 108: + goto st195 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr4 + goto tr60 } - goto st2 -tr78: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st599 - st599: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof599 - } - st_case_599: -//line plugins/parsers/influx/machine.go:22141 - switch ( m.data)[( m.p)] { - case 10: - goto tr397 - case 11: - goto tr825 - case 13: - goto tr397 - case 32: - goto tr810 - case 44: - goto tr826 - case 92: - goto st133 - case 97: - goto st190 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr810 - } - goto st2 -tr79: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st600 - st600: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof600 - } - st_case_600: -//line plugins/parsers/influx/machine.go:22173 - switch ( m.data)[( m.p)] { - case 10: - goto tr397 - case 11: - goto tr825 - case 13: - goto tr397 - case 32: - goto tr810 - case 44: - goto tr826 - case 92: - goto st133 - case 114: - goto st194 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr810 - } - goto st2 + goto st17 st195: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof195 @@ -22197,90 +25190,89 @@ tr79: st_case_195: switch ( m.data)[( m.p)] { case 10: - goto tr338 + goto tr47 + case 11: + goto tr61 case 13: - goto tr338 + goto tr47 + case 32: + goto tr60 + case 44: + goto tr62 + case 61: + goto tr47 + case 92: + goto st23 + case 115: + goto st196 } - goto st195 -tr338: -//line plugins/parsers/influx/machine.go.rl:68 - - {goto st196 } - - goto st601 - st601: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof601 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr60 } - st_case_601: -//line plugins/parsers/influx/machine.go:22217 - goto st0 + goto st17 st196: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof196 } st_case_196: switch ( m.data)[( m.p)] { + case 10: + goto tr47 case 11: - goto tr341 + goto tr61 + case 13: + goto tr47 case 32: - goto st196 - case 35: - goto st197 + goto tr60 case 44: - goto st0 + goto tr62 + case 61: + goto tr47 case 92: - goto st198 + goto st23 + case 101: + goto st632 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto st196 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr60 } - goto tr339 -tr339: -//line plugins/parsers/influx/machine.go.rl:63 + goto st17 +tr145: +//line plugins/parsers/influx/machine.go.rl:19 - ( m.p)-- + m.pb = m.p - {goto st1 } - - goto st602 - st602: + goto st633 + st633: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof602 + goto _test_eof633 } - st_case_602: -//line plugins/parsers/influx/machine.go:22253 - goto st0 -tr341: -//line plugins/parsers/influx/machine.go.rl:63 - - ( m.p)-- - - {goto st1 } - - goto st603 - st603: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof603 - } - st_case_603: -//line plugins/parsers/influx/machine.go:22268 + st_case_633: +//line plugins/parsers/influx/machine.go:25252 switch ( m.data)[( m.p)] { + case 10: + goto tr935 case 11: - goto tr341 + goto tr936 + case 13: + goto tr937 case 32: - goto st196 - case 35: - goto st197 + goto tr738 case 44: - goto st0 + goto tr938 + case 61: + goto tr132 + case 82: + goto st197 case 92: + goto st23 + case 114: goto st198 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto st196 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr738 } - goto tr339 + goto st17 st197: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof197 @@ -22288,196 +25280,221 @@ tr341: st_case_197: switch ( m.data)[( m.p)] { case 10: - goto st196 + goto tr47 + case 11: + goto tr61 case 13: - goto st196 + goto tr47 + case 32: + goto tr60 + case 44: + goto tr62 + case 61: + goto tr47 + case 85: + goto st193 + case 92: + goto st23 } - goto st197 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr60 + } + goto st17 st198: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof198 } st_case_198: - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto st0 - } - case ( m.data)[( m.p)] >= 9: - goto st0 + switch ( m.data)[( m.p)] { + case 10: + goto tr47 + case 11: + goto tr61 + case 13: + goto tr47 + case 32: + goto tr60 + case 44: + goto tr62 + case 61: + goto tr47 + case 92: + goto st23 + case 117: + goto st196 } - goto tr339 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr60 + } + goto st17 +tr146: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st634 + st634: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof634 + } + st_case_634: +//line plugins/parsers/influx/machine.go:25342 + switch ( m.data)[( m.p)] { + case 10: + goto tr935 + case 11: + goto tr936 + case 13: + goto tr937 + case 32: + goto tr738 + case 44: + goto tr938 + case 61: + goto tr132 + case 92: + goto st23 + case 97: + goto st194 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr738 + } + goto st17 +tr147: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st635 + st635: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof635 + } + st_case_635: +//line plugins/parsers/influx/machine.go:25376 + switch ( m.data)[( m.p)] { + case 10: + goto tr935 + case 11: + goto tr936 + case 13: + goto tr937 + case 32: + goto tr738 + case 44: + goto tr938 + case 61: + goto tr132 + case 92: + goto st23 + case 114: + goto st198 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr738 + } + goto st17 +tr123: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st199 +tr373: + ( m.cs) = 199 +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + +//line plugins/parsers/influx/machine.go.rl:77 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again st199: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof199 } st_case_199: +//line plugins/parsers/influx/machine.go:25427 switch ( m.data)[( m.p)] { + case 9: + goto tr119 + case 10: + goto st7 + case 11: + goto tr373 + case 12: + goto tr38 + case 13: + goto st8 case 32: - goto st0 - case 35: - goto st0 + goto tr119 + case 34: + goto tr124 case 44: - goto st0 + goto tr92 + case 61: + goto tr374 case 92: - goto tr346 + goto tr125 } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto st0 - } - case ( m.data)[( m.p)] >= 9: - goto st0 - } - goto tr345 -tr345: -//line plugins/parsers/influx/machine.go.rl:18 + goto tr121 +tr120: + ( m.cs) = 200 +//line plugins/parsers/influx/machine.go.rl:19 m.pb = m.p - goto st604 -tr833: -//line plugins/parsers/influx/machine.go.rl:72 +//line plugins/parsers/influx/machine.go.rl:77 - m.handler.SetMeasurement(m.text()) + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- - goto st604 - st604: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof604 - } - st_case_604: -//line plugins/parsers/influx/machine.go:22352 - switch ( m.data)[( m.p)] { - case 10: - goto tr832 - case 11: - goto tr833 - case 13: - goto tr832 - case 32: - goto tr831 - case 44: - goto tr834 - case 92: - goto st205 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr831 - } - goto st604 -tr831: -//line plugins/parsers/influx/machine.go.rl:72 - - m.handler.SetMeasurement(m.text()) - - goto st605 -tr838: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - - goto st605 - st605: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof605 - } - st_case_605: -//line plugins/parsers/influx/machine.go:22388 - switch ( m.data)[( m.p)] { - case 10: - goto tr837 - case 13: - goto tr837 - case 32: - goto st605 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st605 - } - goto st0 -tr837: - m.cs = 606 -//line plugins/parsers/influx/machine.go.rl:22 - - yield = true - m.cs = 196; - {( m.p)++; goto _out } + ( m.cs) = 247; + {( m.p)++; goto _out } + } goto _again -tr832: - m.cs = 606 -//line plugins/parsers/influx/machine.go.rl:72 - - m.handler.SetMeasurement(m.text()) - -//line plugins/parsers/influx/machine.go.rl:22 - - yield = true - m.cs = 196; - {( m.p)++; goto _out } - - goto _again -tr839: - m.cs = 606 -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:22 - - yield = true - m.cs = 196; - {( m.p)++; goto _out } - - goto _again - st606: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof606 - } - st_case_606: -//line plugins/parsers/influx/machine.go:22441 - goto st0 -tr834: -//line plugins/parsers/influx/machine.go.rl:72 - - m.handler.SetMeasurement(m.text()) - - goto st200 -tr841: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - - goto st200 st200: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof200 } st_case_200: -//line plugins/parsers/influx/machine.go:22460 +//line plugins/parsers/influx/machine.go:25473 switch ( m.data)[( m.p)] { + case 9: + goto tr119 + case 10: + goto st7 + case 11: + goto tr373 + case 12: + goto tr38 + case 13: + goto st8 case 32: - goto tr52 + goto tr119 + case 34: + goto tr124 case 44: - goto tr52 + goto tr92 case 61: - goto tr52 + goto tr82 case 92: - goto tr348 + goto tr125 } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr52 - } - case ( m.data)[( m.p)] >= 9: - goto tr52 - } - goto tr347 -tr347: -//line plugins/parsers/influx/machine.go.rl:18 + goto tr121 +tr480: +//line plugins/parsers/influx/machine.go.rl:19 m.pb = m.p @@ -22487,97 +25504,644 @@ tr347: goto _test_eof201 } st_case_201: -//line plugins/parsers/influx/machine.go:22491 +//line plugins/parsers/influx/machine.go:25508 switch ( m.data)[( m.p)] { - case 32: - goto tr52 - case 44: - goto tr52 - case 61: - goto tr350 + case 10: + goto st7 + case 12: + goto tr105 + case 13: + goto st8 + case 34: + goto tr31 case 92: - goto st204 + goto st76 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st636 + } + goto st6 +tr481: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st636 + st636: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof636 + } + st_case_636: +//line plugins/parsers/influx/machine.go:25536 + switch ( m.data)[( m.p)] { + case 10: + goto tr584 + case 12: + goto tr450 + case 13: + goto tr586 + case 32: + goto tr583 + case 34: + goto tr31 + case 92: + goto st76 } switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr52 + case ( m.data)[( m.p)] > 11: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st637 } case ( m.data)[( m.p)] >= 9: - goto tr52 + goto tr583 } - goto st201 -tr350: -//line plugins/parsers/influx/machine.go.rl:76 + goto st6 + st637: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof637 + } + st_case_637: + switch ( m.data)[( m.p)] { + case 10: + goto tr584 + case 12: + goto tr450 + case 13: + goto tr586 + case 32: + goto tr583 + case 34: + goto tr31 + case 92: + goto st76 + } + switch { + case ( m.data)[( m.p)] > 11: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st638 + } + case ( m.data)[( m.p)] >= 9: + goto tr583 + } + goto st6 + st638: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof638 + } + st_case_638: + switch ( m.data)[( m.p)] { + case 10: + goto tr584 + case 12: + goto tr450 + case 13: + goto tr586 + case 32: + goto tr583 + case 34: + goto tr31 + case 92: + goto st76 + } + switch { + case ( m.data)[( m.p)] > 11: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st639 + } + case ( m.data)[( m.p)] >= 9: + goto tr583 + } + goto st6 + st639: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof639 + } + st_case_639: + switch ( m.data)[( m.p)] { + case 10: + goto tr584 + case 12: + goto tr450 + case 13: + goto tr586 + case 32: + goto tr583 + case 34: + goto tr31 + case 92: + goto st76 + } + switch { + case ( m.data)[( m.p)] > 11: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st640 + } + case ( m.data)[( m.p)] >= 9: + goto tr583 + } + goto st6 + st640: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof640 + } + st_case_640: + switch ( m.data)[( m.p)] { + case 10: + goto tr584 + case 12: + goto tr450 + case 13: + goto tr586 + case 32: + goto tr583 + case 34: + goto tr31 + case 92: + goto st76 + } + switch { + case ( m.data)[( m.p)] > 11: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st641 + } + case ( m.data)[( m.p)] >= 9: + goto tr583 + } + goto st6 + st641: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof641 + } + st_case_641: + switch ( m.data)[( m.p)] { + case 10: + goto tr584 + case 12: + goto tr450 + case 13: + goto tr586 + case 32: + goto tr583 + case 34: + goto tr31 + case 92: + goto st76 + } + switch { + case ( m.data)[( m.p)] > 11: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st642 + } + case ( m.data)[( m.p)] >= 9: + goto tr583 + } + goto st6 + st642: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof642 + } + st_case_642: + switch ( m.data)[( m.p)] { + case 10: + goto tr584 + case 12: + goto tr450 + case 13: + goto tr586 + case 32: + goto tr583 + case 34: + goto tr31 + case 92: + goto st76 + } + switch { + case ( m.data)[( m.p)] > 11: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st643 + } + case ( m.data)[( m.p)] >= 9: + goto tr583 + } + goto st6 + st643: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof643 + } + st_case_643: + switch ( m.data)[( m.p)] { + case 10: + goto tr584 + case 12: + goto tr450 + case 13: + goto tr586 + case 32: + goto tr583 + case 34: + goto tr31 + case 92: + goto st76 + } + switch { + case ( m.data)[( m.p)] > 11: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st644 + } + case ( m.data)[( m.p)] >= 9: + goto tr583 + } + goto st6 + st644: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof644 + } + st_case_644: + switch ( m.data)[( m.p)] { + case 10: + goto tr584 + case 12: + goto tr450 + case 13: + goto tr586 + case 32: + goto tr583 + case 34: + goto tr31 + case 92: + goto st76 + } + switch { + case ( m.data)[( m.p)] > 11: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st645 + } + case ( m.data)[( m.p)] >= 9: + goto tr583 + } + goto st6 + st645: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof645 + } + st_case_645: + switch ( m.data)[( m.p)] { + case 10: + goto tr584 + case 12: + goto tr450 + case 13: + goto tr586 + case 32: + goto tr583 + case 34: + goto tr31 + case 92: + goto st76 + } + switch { + case ( m.data)[( m.p)] > 11: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st646 + } + case ( m.data)[( m.p)] >= 9: + goto tr583 + } + goto st6 + st646: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof646 + } + st_case_646: + switch ( m.data)[( m.p)] { + case 10: + goto tr584 + case 12: + goto tr450 + case 13: + goto tr586 + case 32: + goto tr583 + case 34: + goto tr31 + case 92: + goto st76 + } + switch { + case ( m.data)[( m.p)] > 11: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st647 + } + case ( m.data)[( m.p)] >= 9: + goto tr583 + } + goto st6 + st647: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof647 + } + st_case_647: + switch ( m.data)[( m.p)] { + case 10: + goto tr584 + case 12: + goto tr450 + case 13: + goto tr586 + case 32: + goto tr583 + case 34: + goto tr31 + case 92: + goto st76 + } + switch { + case ( m.data)[( m.p)] > 11: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st648 + } + case ( m.data)[( m.p)] >= 9: + goto tr583 + } + goto st6 + st648: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof648 + } + st_case_648: + switch ( m.data)[( m.p)] { + case 10: + goto tr584 + case 12: + goto tr450 + case 13: + goto tr586 + case 32: + goto tr583 + case 34: + goto tr31 + case 92: + goto st76 + } + switch { + case ( m.data)[( m.p)] > 11: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st649 + } + case ( m.data)[( m.p)] >= 9: + goto tr583 + } + goto st6 + st649: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof649 + } + st_case_649: + switch ( m.data)[( m.p)] { + case 10: + goto tr584 + case 12: + goto tr450 + case 13: + goto tr586 + case 32: + goto tr583 + case 34: + goto tr31 + case 92: + goto st76 + } + switch { + case ( m.data)[( m.p)] > 11: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st650 + } + case ( m.data)[( m.p)] >= 9: + goto tr583 + } + goto st6 + st650: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof650 + } + st_case_650: + switch ( m.data)[( m.p)] { + case 10: + goto tr584 + case 12: + goto tr450 + case 13: + goto tr586 + case 32: + goto tr583 + case 34: + goto tr31 + case 92: + goto st76 + } + switch { + case ( m.data)[( m.p)] > 11: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st651 + } + case ( m.data)[( m.p)] >= 9: + goto tr583 + } + goto st6 + st651: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof651 + } + st_case_651: + switch ( m.data)[( m.p)] { + case 10: + goto tr584 + case 12: + goto tr450 + case 13: + goto tr586 + case 32: + goto tr583 + case 34: + goto tr31 + case 92: + goto st76 + } + switch { + case ( m.data)[( m.p)] > 11: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st652 + } + case ( m.data)[( m.p)] >= 9: + goto tr583 + } + goto st6 + st652: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof652 + } + st_case_652: + switch ( m.data)[( m.p)] { + case 10: + goto tr584 + case 12: + goto tr450 + case 13: + goto tr586 + case 32: + goto tr583 + case 34: + goto tr31 + case 92: + goto st76 + } + switch { + case ( m.data)[( m.p)] > 11: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st653 + } + case ( m.data)[( m.p)] >= 9: + goto tr583 + } + goto st6 + st653: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof653 + } + st_case_653: + switch ( m.data)[( m.p)] { + case 10: + goto tr584 + case 12: + goto tr450 + case 13: + goto tr586 + case 32: + goto tr583 + case 34: + goto tr31 + case 92: + goto st76 + } + switch { + case ( m.data)[( m.p)] > 11: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st654 + } + case ( m.data)[( m.p)] >= 9: + goto tr583 + } + goto st6 + st654: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof654 + } + st_case_654: + switch ( m.data)[( m.p)] { + case 10: + goto tr584 + case 12: + goto tr450 + case 13: + goto tr586 + case 32: + goto tr583 + case 34: + goto tr31 + case 92: + goto st76 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { + goto tr583 + } + goto st6 +tr477: +//line plugins/parsers/influx/machine.go.rl:19 - key = m.text() + m.pb = m.p goto st202 +tr962: + ( m.cs) = 202 +//line plugins/parsers/influx/machine.go.rl:121 + + err = m.handler.AddFloat(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr967: + ( m.cs) = 202 +//line plugins/parsers/influx/machine.go.rl:103 + + err = m.handler.AddInt(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr970: + ( m.cs) = 202 +//line plugins/parsers/influx/machine.go.rl:112 + + err = m.handler.AddUint(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr973: + ( m.cs) = 202 +//line plugins/parsers/influx/machine.go.rl:130 + + err = m.handler.AddBool(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again st202: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof202 } st_case_202: -//line plugins/parsers/influx/machine.go:22522 - switch ( m.data)[( m.p)] { - case 32: - goto tr52 - case 44: - goto tr52 - case 61: - goto tr52 - case 92: - goto tr353 - } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr52 - } - case ( m.data)[( m.p)] >= 9: - goto tr52 - } - goto tr352 -tr352: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st607 -tr840: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - - goto st607 - st607: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof607 - } - st_case_607: -//line plugins/parsers/influx/machine.go:22559 +//line plugins/parsers/influx/machine.go:26122 switch ( m.data)[( m.p)] { + case 9: + goto st6 case 10: - goto tr839 - case 11: - goto tr840 + goto st7 + case 12: + goto tr8 case 13: - goto tr839 + goto st8 case 32: - goto tr838 + goto st6 + case 34: + goto tr377 case 44: - goto tr841 + goto st6 case 61: - goto tr52 + goto st6 case 92: - goto st203 + goto tr378 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr838 - } - goto st607 -tr353: -//line plugins/parsers/influx/machine.go.rl:18 + goto tr376 +tr376: +//line plugins/parsers/influx/machine.go.rl:19 m.pb = m.p @@ -22587,20 +26151,32 @@ tr353: goto _test_eof203 } st_case_203: -//line plugins/parsers/influx/machine.go:22591 - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr52 - } - case ( m.data)[( m.p)] >= 9: - goto tr52 +//line plugins/parsers/influx/machine.go:26155 + switch ( m.data)[( m.p)] { + case 9: + goto st6 + case 10: + goto st7 + case 12: + goto tr8 + case 13: + goto st8 + case 32: + goto st6 + case 34: + goto tr100 + case 44: + goto st6 + case 61: + goto tr380 + case 92: + goto st217 } - goto st607 -tr348: -//line plugins/parsers/influx/machine.go.rl:18 + goto st203 +tr380: +//line plugins/parsers/influx/machine.go.rl:99 - m.pb = m.p + key = m.text() goto st204 st204: @@ -22608,18 +26184,39 @@ tr348: goto _test_eof204 } st_case_204: -//line plugins/parsers/influx/machine.go:22612 - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr52 - } - case ( m.data)[( m.p)] >= 9: - goto tr52 +//line plugins/parsers/influx/machine.go:26188 + switch ( m.data)[( m.p)] { + case 10: + goto st7 + case 12: + goto tr8 + case 13: + goto st8 + case 34: + goto tr353 + case 45: + goto tr108 + case 46: + goto tr109 + case 48: + goto tr110 + case 70: + goto tr112 + case 84: + goto tr113 + case 92: + goto st76 + case 102: + goto tr114 + case 116: + goto tr115 } - goto st201 -tr346: -//line plugins/parsers/influx/machine.go.rl:18 + if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto tr111 + } + goto st6 +tr108: +//line plugins/parsers/influx/machine.go.rl:19 m.pb = m.p @@ -22629,7 +26226,3075 @@ tr346: goto _test_eof205 } st_case_205: -//line plugins/parsers/influx/machine.go:22633 +//line plugins/parsers/influx/machine.go:26230 + switch ( m.data)[( m.p)] { + case 10: + goto st7 + case 12: + goto tr8 + case 13: + goto st8 + case 34: + goto tr31 + case 46: + goto st206 + case 48: + goto st657 + case 92: + goto st76 + } + if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st660 + } + goto st6 +tr109: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st206 + st206: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof206 + } + st_case_206: +//line plugins/parsers/influx/machine.go:26262 + switch ( m.data)[( m.p)] { + case 10: + goto st7 + case 12: + goto tr8 + case 13: + goto st8 + case 34: + goto tr31 + case 92: + goto st76 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st655 + } + goto st6 + st655: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof655 + } + st_case_655: + switch ( m.data)[( m.p)] { + case 10: + goto tr620 + case 12: + goto tr516 + case 13: + goto tr623 + case 32: + goto tr961 + case 34: + goto tr31 + case 44: + goto tr962 + case 69: + goto st207 + case 92: + goto st76 + case 101: + goto st207 + } + switch { + case ( m.data)[( m.p)] > 11: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st655 + } + case ( m.data)[( m.p)] >= 9: + goto tr961 + } + goto st6 + st207: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof207 + } + st_case_207: + switch ( m.data)[( m.p)] { + case 10: + goto st7 + case 12: + goto tr8 + case 13: + goto st8 + case 34: + goto tr354 + case 43: + goto st208 + case 45: + goto st208 + case 92: + goto st76 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st656 + } + goto st6 + st208: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof208 + } + st_case_208: + switch ( m.data)[( m.p)] { + case 10: + goto st7 + case 12: + goto tr8 + case 13: + goto st8 + case 34: + goto tr31 + case 92: + goto st76 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st656 + } + goto st6 + st656: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof656 + } + st_case_656: + switch ( m.data)[( m.p)] { + case 10: + goto tr620 + case 12: + goto tr516 + case 13: + goto tr623 + case 32: + goto tr961 + case 34: + goto tr31 + case 44: + goto tr962 + case 92: + goto st76 + } + switch { + case ( m.data)[( m.p)] > 11: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st656 + } + case ( m.data)[( m.p)] >= 9: + goto tr961 + } + goto st6 + st657: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof657 + } + st_case_657: + switch ( m.data)[( m.p)] { + case 10: + goto tr620 + case 12: + goto tr516 + case 13: + goto tr623 + case 32: + goto tr961 + case 34: + goto tr31 + case 44: + goto tr962 + case 46: + goto st655 + case 69: + goto st207 + case 92: + goto st76 + case 101: + goto st207 + case 105: + goto st659 + } + switch { + case ( m.data)[( m.p)] > 11: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st658 + } + case ( m.data)[( m.p)] >= 9: + goto tr961 + } + goto st6 + st658: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof658 + } + st_case_658: + switch ( m.data)[( m.p)] { + case 10: + goto tr620 + case 12: + goto tr516 + case 13: + goto tr623 + case 32: + goto tr961 + case 34: + goto tr31 + case 44: + goto tr962 + case 46: + goto st655 + case 69: + goto st207 + case 92: + goto st76 + case 101: + goto st207 + } + switch { + case ( m.data)[( m.p)] > 11: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st658 + } + case ( m.data)[( m.p)] >= 9: + goto tr961 + } + goto st6 + st659: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof659 + } + st_case_659: + switch ( m.data)[( m.p)] { + case 10: + goto tr778 + case 12: + goto tr909 + case 13: + goto tr780 + case 32: + goto tr966 + case 34: + goto tr31 + case 44: + goto tr967 + case 92: + goto st76 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { + goto tr966 + } + goto st6 + st660: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof660 + } + st_case_660: + switch ( m.data)[( m.p)] { + case 10: + goto tr620 + case 12: + goto tr516 + case 13: + goto tr623 + case 32: + goto tr961 + case 34: + goto tr31 + case 44: + goto tr962 + case 46: + goto st655 + case 69: + goto st207 + case 92: + goto st76 + case 101: + goto st207 + case 105: + goto st659 + } + switch { + case ( m.data)[( m.p)] > 11: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st660 + } + case ( m.data)[( m.p)] >= 9: + goto tr961 + } + goto st6 +tr110: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st661 + st661: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof661 + } + st_case_661: +//line plugins/parsers/influx/machine.go:26537 + switch ( m.data)[( m.p)] { + case 10: + goto tr620 + case 12: + goto tr516 + case 13: + goto tr623 + case 32: + goto tr961 + case 34: + goto tr31 + case 44: + goto tr962 + case 46: + goto st655 + case 69: + goto st207 + case 92: + goto st76 + case 101: + goto st207 + case 105: + goto st659 + case 117: + goto st662 + } + switch { + case ( m.data)[( m.p)] > 11: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st658 + } + case ( m.data)[( m.p)] >= 9: + goto tr961 + } + goto st6 + st662: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof662 + } + st_case_662: + switch ( m.data)[( m.p)] { + case 10: + goto tr784 + case 12: + goto tr912 + case 13: + goto tr786 + case 32: + goto tr969 + case 34: + goto tr31 + case 44: + goto tr970 + case 92: + goto st76 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { + goto tr969 + } + goto st6 +tr111: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st663 + st663: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof663 + } + st_case_663: +//line plugins/parsers/influx/machine.go:26609 + switch ( m.data)[( m.p)] { + case 10: + goto tr620 + case 12: + goto tr516 + case 13: + goto tr623 + case 32: + goto tr961 + case 34: + goto tr31 + case 44: + goto tr962 + case 46: + goto st655 + case 69: + goto st207 + case 92: + goto st76 + case 101: + goto st207 + case 105: + goto st659 + case 117: + goto st662 + } + switch { + case ( m.data)[( m.p)] > 11: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st663 + } + case ( m.data)[( m.p)] >= 9: + goto tr961 + } + goto st6 +tr112: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st664 + st664: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof664 + } + st_case_664: +//line plugins/parsers/influx/machine.go:26656 + switch ( m.data)[( m.p)] { + case 10: + goto tr790 + case 12: + goto tr916 + case 13: + goto tr792 + case 32: + goto tr972 + case 34: + goto tr31 + case 44: + goto tr973 + case 65: + goto st209 + case 92: + goto st76 + case 97: + goto st212 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { + goto tr972 + } + goto st6 + st209: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof209 + } + st_case_209: + switch ( m.data)[( m.p)] { + case 10: + goto st7 + case 12: + goto tr8 + case 13: + goto st8 + case 34: + goto tr31 + case 76: + goto st210 + case 92: + goto st76 + } + goto st6 + st210: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof210 + } + st_case_210: + switch ( m.data)[( m.p)] { + case 10: + goto st7 + case 12: + goto tr8 + case 13: + goto st8 + case 34: + goto tr31 + case 83: + goto st211 + case 92: + goto st76 + } + goto st6 + st211: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof211 + } + st_case_211: + switch ( m.data)[( m.p)] { + case 10: + goto st7 + case 12: + goto tr8 + case 13: + goto st8 + case 34: + goto tr31 + case 69: + goto st665 + case 92: + goto st76 + } + goto st6 + st665: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof665 + } + st_case_665: + switch ( m.data)[( m.p)] { + case 10: + goto tr790 + case 12: + goto tr916 + case 13: + goto tr792 + case 32: + goto tr972 + case 34: + goto tr31 + case 44: + goto tr973 + case 92: + goto st76 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { + goto tr972 + } + goto st6 + st212: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof212 + } + st_case_212: + switch ( m.data)[( m.p)] { + case 10: + goto st7 + case 12: + goto tr8 + case 13: + goto st8 + case 34: + goto tr31 + case 92: + goto st76 + case 108: + goto st213 + } + goto st6 + st213: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof213 + } + st_case_213: + switch ( m.data)[( m.p)] { + case 10: + goto st7 + case 12: + goto tr8 + case 13: + goto st8 + case 34: + goto tr31 + case 92: + goto st76 + case 115: + goto st214 + } + goto st6 + st214: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof214 + } + st_case_214: + switch ( m.data)[( m.p)] { + case 10: + goto st7 + case 12: + goto tr8 + case 13: + goto st8 + case 34: + goto tr31 + case 92: + goto st76 + case 101: + goto st665 + } + goto st6 +tr113: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st666 + st666: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof666 + } + st_case_666: +//line plugins/parsers/influx/machine.go:26837 + switch ( m.data)[( m.p)] { + case 10: + goto tr790 + case 12: + goto tr916 + case 13: + goto tr792 + case 32: + goto tr972 + case 34: + goto tr31 + case 44: + goto tr973 + case 82: + goto st215 + case 92: + goto st76 + case 114: + goto st216 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { + goto tr972 + } + goto st6 + st215: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof215 + } + st_case_215: + switch ( m.data)[( m.p)] { + case 10: + goto st7 + case 12: + goto tr8 + case 13: + goto st8 + case 34: + goto tr31 + case 85: + goto st211 + case 92: + goto st76 + } + goto st6 + st216: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof216 + } + st_case_216: + switch ( m.data)[( m.p)] { + case 10: + goto st7 + case 12: + goto tr8 + case 13: + goto st8 + case 34: + goto tr31 + case 92: + goto st76 + case 117: + goto st214 + } + goto st6 +tr114: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st667 + st667: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof667 + } + st_case_667: +//line plugins/parsers/influx/machine.go:26913 + switch ( m.data)[( m.p)] { + case 10: + goto tr790 + case 12: + goto tr916 + case 13: + goto tr792 + case 32: + goto tr972 + case 34: + goto tr31 + case 44: + goto tr973 + case 92: + goto st76 + case 97: + goto st212 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { + goto tr972 + } + goto st6 +tr115: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st668 + st668: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof668 + } + st_case_668: +//line plugins/parsers/influx/machine.go:26947 + switch ( m.data)[( m.p)] { + case 10: + goto tr790 + case 12: + goto tr916 + case 13: + goto tr792 + case 32: + goto tr972 + case 34: + goto tr31 + case 44: + goto tr973 + case 92: + goto st76 + case 114: + goto st216 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { + goto tr972 + } + goto st6 +tr378: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st217 + st217: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof217 + } + st_case_217: +//line plugins/parsers/influx/machine.go:26981 + switch ( m.data)[( m.p)] { + case 34: + goto st203 + case 92: + goto st203 + } + switch { + case ( m.data)[( m.p)] > 10: + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr8 + } + case ( m.data)[( m.p)] >= 9: + goto tr8 + } + goto st3 +tr96: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st218 + st218: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof218 + } + st_case_218: +//line plugins/parsers/influx/machine.go:27008 + switch ( m.data)[( m.p)] { + case 9: + goto st32 + case 10: + goto st7 + case 11: + goto tr96 + case 12: + goto st2 + case 13: + goto st8 + case 32: + goto st32 + case 34: + goto tr97 + case 44: + goto st6 + case 61: + goto tr101 + case 92: + goto tr98 + } + goto tr94 +tr74: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st219 + st219: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof219 + } + st_case_219: +//line plugins/parsers/influx/machine.go:27043 + switch ( m.data)[( m.p)] { + case 10: + goto tr47 + case 11: + goto tr3 + case 13: + goto tr47 + case 32: + goto tr1 + case 44: + goto tr4 + case 46: + goto st220 + case 48: + goto st670 + case 92: + goto st96 + } + switch { + case ( m.data)[( m.p)] > 12: + if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st673 + } + case ( m.data)[( m.p)] >= 9: + goto tr1 + } + goto st1 +tr75: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st220 + st220: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof220 + } + st_case_220: +//line plugins/parsers/influx/machine.go:27082 + switch ( m.data)[( m.p)] { + case 10: + goto tr47 + case 11: + goto tr3 + case 13: + goto tr47 + case 32: + goto tr1 + case 44: + goto tr4 + case 92: + goto st96 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st669 + } + case ( m.data)[( m.p)] >= 9: + goto tr1 + } + goto st1 + st669: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof669 + } + st_case_669: + switch ( m.data)[( m.p)] { + case 10: + goto tr715 + case 11: + goto tr798 + case 13: + goto tr717 + case 32: + goto tr622 + case 44: + goto tr799 + case 69: + goto st221 + case 92: + goto st96 + case 101: + goto st221 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st669 + } + case ( m.data)[( m.p)] >= 9: + goto tr622 + } + goto st1 + st221: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof221 + } + st_case_221: + switch ( m.data)[( m.p)] { + case 10: + goto tr47 + case 11: + goto tr3 + case 13: + goto tr47 + case 32: + goto tr1 + case 34: + goto st222 + case 44: + goto tr4 + case 92: + goto st96 + } + switch { + case ( m.data)[( m.p)] < 43: + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr1 + } + case ( m.data)[( m.p)] > 45: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st520 + } + default: + goto st222 + } + goto st1 + st222: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof222 + } + st_case_222: + switch ( m.data)[( m.p)] { + case 10: + goto tr47 + case 11: + goto tr3 + case 13: + goto tr47 + case 32: + goto tr1 + case 44: + goto tr4 + case 92: + goto st96 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st520 + } + case ( m.data)[( m.p)] >= 9: + goto tr1 + } + goto st1 + st670: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof670 + } + st_case_670: + switch ( m.data)[( m.p)] { + case 10: + goto tr715 + case 11: + goto tr798 + case 13: + goto tr717 + case 32: + goto tr622 + case 44: + goto tr799 + case 46: + goto st669 + case 69: + goto st221 + case 92: + goto st96 + case 101: + goto st221 + case 105: + goto st672 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st671 + } + case ( m.data)[( m.p)] >= 9: + goto tr622 + } + goto st1 + st671: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof671 + } + st_case_671: + switch ( m.data)[( m.p)] { + case 10: + goto tr715 + case 11: + goto tr798 + case 13: + goto tr717 + case 32: + goto tr622 + case 44: + goto tr799 + case 46: + goto st669 + case 69: + goto st221 + case 92: + goto st96 + case 101: + goto st221 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st671 + } + case ( m.data)[( m.p)] >= 9: + goto tr622 + } + goto st1 + st672: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof672 + } + st_case_672: + switch ( m.data)[( m.p)] { + case 10: + goto tr925 + case 11: + goto tr981 + case 13: + goto tr927 + case 32: + goto tr804 + case 44: + goto tr982 + case 92: + goto st96 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr804 + } + goto st1 + st673: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof673 + } + st_case_673: + switch ( m.data)[( m.p)] { + case 10: + goto tr715 + case 11: + goto tr798 + case 13: + goto tr717 + case 32: + goto tr622 + case 44: + goto tr799 + case 46: + goto st669 + case 69: + goto st221 + case 92: + goto st96 + case 101: + goto st221 + case 105: + goto st672 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st673 + } + case ( m.data)[( m.p)] >= 9: + goto tr622 + } + goto st1 +tr76: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st674 + st674: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof674 + } + st_case_674: +//line plugins/parsers/influx/machine.go:27340 + switch ( m.data)[( m.p)] { + case 10: + goto tr715 + case 11: + goto tr798 + case 13: + goto tr717 + case 32: + goto tr622 + case 44: + goto tr799 + case 46: + goto st669 + case 69: + goto st221 + case 92: + goto st96 + case 101: + goto st221 + case 105: + goto st672 + case 117: + goto st675 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st671 + } + case ( m.data)[( m.p)] >= 9: + goto tr622 + } + goto st1 + st675: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof675 + } + st_case_675: + switch ( m.data)[( m.p)] { + case 10: + goto tr930 + case 11: + goto tr984 + case 13: + goto tr932 + case 32: + goto tr809 + case 44: + goto tr985 + case 92: + goto st96 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr809 + } + goto st1 +tr77: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st676 + st676: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof676 + } + st_case_676: +//line plugins/parsers/influx/machine.go:27408 + switch ( m.data)[( m.p)] { + case 10: + goto tr715 + case 11: + goto tr798 + case 13: + goto tr717 + case 32: + goto tr622 + case 44: + goto tr799 + case 46: + goto st669 + case 69: + goto st221 + case 92: + goto st96 + case 101: + goto st221 + case 105: + goto st672 + case 117: + goto st675 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st676 + } + case ( m.data)[( m.p)] >= 9: + goto tr622 + } + goto st1 +tr78: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st677 + st677: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof677 + } + st_case_677: +//line plugins/parsers/influx/machine.go:27453 + switch ( m.data)[( m.p)] { + case 10: + goto tr935 + case 11: + goto tr987 + case 13: + goto tr937 + case 32: + goto tr814 + case 44: + goto tr988 + case 65: + goto st223 + case 92: + goto st96 + case 97: + goto st226 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr814 + } + goto st1 + st223: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof223 + } + st_case_223: + switch ( m.data)[( m.p)] { + case 10: + goto tr47 + case 11: + goto tr3 + case 13: + goto tr47 + case 32: + goto tr1 + case 44: + goto tr4 + case 76: + goto st224 + case 92: + goto st96 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr1 + } + goto st1 + st224: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof224 + } + st_case_224: + switch ( m.data)[( m.p)] { + case 10: + goto tr47 + case 11: + goto tr3 + case 13: + goto tr47 + case 32: + goto tr1 + case 44: + goto tr4 + case 83: + goto st225 + case 92: + goto st96 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr1 + } + goto st1 + st225: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof225 + } + st_case_225: + switch ( m.data)[( m.p)] { + case 10: + goto tr47 + case 11: + goto tr3 + case 13: + goto tr47 + case 32: + goto tr1 + case 44: + goto tr4 + case 69: + goto st678 + case 92: + goto st96 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr1 + } + goto st1 + st678: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof678 + } + st_case_678: + switch ( m.data)[( m.p)] { + case 10: + goto tr935 + case 11: + goto tr987 + case 13: + goto tr937 + case 32: + goto tr814 + case 44: + goto tr988 + case 92: + goto st96 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr814 + } + goto st1 + st226: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof226 + } + st_case_226: + switch ( m.data)[( m.p)] { + case 10: + goto tr47 + case 11: + goto tr3 + case 13: + goto tr47 + case 32: + goto tr1 + case 44: + goto tr4 + case 92: + goto st96 + case 108: + goto st227 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr1 + } + goto st1 + st227: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof227 + } + st_case_227: + switch ( m.data)[( m.p)] { + case 10: + goto tr47 + case 11: + goto tr3 + case 13: + goto tr47 + case 32: + goto tr1 + case 44: + goto tr4 + case 92: + goto st96 + case 115: + goto st228 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr1 + } + goto st1 + st228: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof228 + } + st_case_228: + switch ( m.data)[( m.p)] { + case 10: + goto tr47 + case 11: + goto tr3 + case 13: + goto tr47 + case 32: + goto tr1 + case 44: + goto tr4 + case 92: + goto st96 + case 101: + goto st678 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr1 + } + goto st1 +tr79: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st679 + st679: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof679 + } + st_case_679: +//line plugins/parsers/influx/machine.go:27660 + switch ( m.data)[( m.p)] { + case 10: + goto tr935 + case 11: + goto tr987 + case 13: + goto tr937 + case 32: + goto tr814 + case 44: + goto tr988 + case 82: + goto st229 + case 92: + goto st96 + case 114: + goto st230 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr814 + } + goto st1 + st229: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof229 + } + st_case_229: + switch ( m.data)[( m.p)] { + case 10: + goto tr47 + case 11: + goto tr3 + case 13: + goto tr47 + case 32: + goto tr1 + case 44: + goto tr4 + case 85: + goto st225 + case 92: + goto st96 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr1 + } + goto st1 + st230: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof230 + } + st_case_230: + switch ( m.data)[( m.p)] { + case 10: + goto tr47 + case 11: + goto tr3 + case 13: + goto tr47 + case 32: + goto tr1 + case 44: + goto tr4 + case 92: + goto st96 + case 117: + goto st228 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr1 + } + goto st1 +tr80: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st680 + st680: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof680 + } + st_case_680: +//line plugins/parsers/influx/machine.go:27744 + switch ( m.data)[( m.p)] { + case 10: + goto tr935 + case 11: + goto tr987 + case 13: + goto tr937 + case 32: + goto tr814 + case 44: + goto tr988 + case 92: + goto st96 + case 97: + goto st226 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr814 + } + goto st1 +tr81: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st681 + st681: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof681 + } + st_case_681: +//line plugins/parsers/influx/machine.go:27776 + switch ( m.data)[( m.p)] { + case 10: + goto tr935 + case 11: + goto tr987 + case 13: + goto tr937 + case 32: + goto tr814 + case 44: + goto tr988 + case 92: + goto st96 + case 114: + goto st230 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr814 + } + goto st1 +tr44: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st231 +tr405: + ( m.cs) = 231 +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + +//line plugins/parsers/influx/machine.go.rl:77 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again + st231: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof231 + } + st_case_231: +//line plugins/parsers/influx/machine.go:27825 + switch ( m.data)[( m.p)] { + case 10: + goto tr404 + case 11: + goto tr405 + case 13: + goto tr404 + case 32: + goto tr38 + case 44: + goto tr4 + case 61: + goto tr406 + case 92: + goto tr45 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr38 + } + goto tr41 +tr40: + ( m.cs) = 232 +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + +//line plugins/parsers/influx/machine.go.rl:77 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again + st232: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof232 + } + st_case_232: +//line plugins/parsers/influx/machine.go:27868 + switch ( m.data)[( m.p)] { + case 10: + goto tr404 + case 11: + goto tr405 + case 13: + goto tr404 + case 32: + goto tr38 + case 44: + goto tr4 + case 61: + goto tr33 + case 92: + goto tr45 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr38 + } + goto tr41 +tr445: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st233 + st233: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof233 + } + st_case_233: +//line plugins/parsers/influx/machine.go:27900 + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st682 + } + goto tr407 +tr446: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st682 + st682: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof682 + } + st_case_682: +//line plugins/parsers/influx/machine.go:27916 + switch ( m.data)[( m.p)] { + case 10: + goto tr451 + case 13: + goto tr453 + case 32: + goto tr450 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st683 + } + case ( m.data)[( m.p)] >= 9: + goto tr450 + } + goto tr407 + st683: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof683 + } + st_case_683: + switch ( m.data)[( m.p)] { + case 10: + goto tr451 + case 13: + goto tr453 + case 32: + goto tr450 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st684 + } + case ( m.data)[( m.p)] >= 9: + goto tr450 + } + goto tr407 + st684: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof684 + } + st_case_684: + switch ( m.data)[( m.p)] { + case 10: + goto tr451 + case 13: + goto tr453 + case 32: + goto tr450 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st685 + } + case ( m.data)[( m.p)] >= 9: + goto tr450 + } + goto tr407 + st685: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof685 + } + st_case_685: + switch ( m.data)[( m.p)] { + case 10: + goto tr451 + case 13: + goto tr453 + case 32: + goto tr450 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st686 + } + case ( m.data)[( m.p)] >= 9: + goto tr450 + } + goto tr407 + st686: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof686 + } + st_case_686: + switch ( m.data)[( m.p)] { + case 10: + goto tr451 + case 13: + goto tr453 + case 32: + goto tr450 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st687 + } + case ( m.data)[( m.p)] >= 9: + goto tr450 + } + goto tr407 + st687: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof687 + } + st_case_687: + switch ( m.data)[( m.p)] { + case 10: + goto tr451 + case 13: + goto tr453 + case 32: + goto tr450 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st688 + } + case ( m.data)[( m.p)] >= 9: + goto tr450 + } + goto tr407 + st688: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof688 + } + st_case_688: + switch ( m.data)[( m.p)] { + case 10: + goto tr451 + case 13: + goto tr453 + case 32: + goto tr450 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st689 + } + case ( m.data)[( m.p)] >= 9: + goto tr450 + } + goto tr407 + st689: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof689 + } + st_case_689: + switch ( m.data)[( m.p)] { + case 10: + goto tr451 + case 13: + goto tr453 + case 32: + goto tr450 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st690 + } + case ( m.data)[( m.p)] >= 9: + goto tr450 + } + goto tr407 + st690: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof690 + } + st_case_690: + switch ( m.data)[( m.p)] { + case 10: + goto tr451 + case 13: + goto tr453 + case 32: + goto tr450 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st691 + } + case ( m.data)[( m.p)] >= 9: + goto tr450 + } + goto tr407 + st691: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof691 + } + st_case_691: + switch ( m.data)[( m.p)] { + case 10: + goto tr451 + case 13: + goto tr453 + case 32: + goto tr450 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st692 + } + case ( m.data)[( m.p)] >= 9: + goto tr450 + } + goto tr407 + st692: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof692 + } + st_case_692: + switch ( m.data)[( m.p)] { + case 10: + goto tr451 + case 13: + goto tr453 + case 32: + goto tr450 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st693 + } + case ( m.data)[( m.p)] >= 9: + goto tr450 + } + goto tr407 + st693: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof693 + } + st_case_693: + switch ( m.data)[( m.p)] { + case 10: + goto tr451 + case 13: + goto tr453 + case 32: + goto tr450 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st694 + } + case ( m.data)[( m.p)] >= 9: + goto tr450 + } + goto tr407 + st694: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof694 + } + st_case_694: + switch ( m.data)[( m.p)] { + case 10: + goto tr451 + case 13: + goto tr453 + case 32: + goto tr450 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st695 + } + case ( m.data)[( m.p)] >= 9: + goto tr450 + } + goto tr407 + st695: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof695 + } + st_case_695: + switch ( m.data)[( m.p)] { + case 10: + goto tr451 + case 13: + goto tr453 + case 32: + goto tr450 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st696 + } + case ( m.data)[( m.p)] >= 9: + goto tr450 + } + goto tr407 + st696: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof696 + } + st_case_696: + switch ( m.data)[( m.p)] { + case 10: + goto tr451 + case 13: + goto tr453 + case 32: + goto tr450 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st697 + } + case ( m.data)[( m.p)] >= 9: + goto tr450 + } + goto tr407 + st697: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof697 + } + st_case_697: + switch ( m.data)[( m.p)] { + case 10: + goto tr451 + case 13: + goto tr453 + case 32: + goto tr450 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st698 + } + case ( m.data)[( m.p)] >= 9: + goto tr450 + } + goto tr407 + st698: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof698 + } + st_case_698: + switch ( m.data)[( m.p)] { + case 10: + goto tr451 + case 13: + goto tr453 + case 32: + goto tr450 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st699 + } + case ( m.data)[( m.p)] >= 9: + goto tr450 + } + goto tr407 + st699: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof699 + } + st_case_699: + switch ( m.data)[( m.p)] { + case 10: + goto tr451 + case 13: + goto tr453 + case 32: + goto tr450 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st700 + } + case ( m.data)[( m.p)] >= 9: + goto tr450 + } + goto tr407 + st700: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof700 + } + st_case_700: + switch ( m.data)[( m.p)] { + case 10: + goto tr451 + case 13: + goto tr453 + case 32: + goto tr450 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr450 + } + goto tr407 +tr15: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st234 + st234: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof234 + } + st_case_234: +//line plugins/parsers/influx/machine.go:28336 + switch ( m.data)[( m.p)] { + case 46: + goto st235 + case 48: + goto st702 + } + if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st705 + } + goto tr8 +tr16: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st235 + st235: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof235 + } + st_case_235: +//line plugins/parsers/influx/machine.go:28358 + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st701 + } + goto tr8 + st701: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof701 + } + st_case_701: + switch ( m.data)[( m.p)] { + case 10: + goto tr715 + case 13: + goto tr717 + case 32: + goto tr516 + case 44: + goto tr907 + case 69: + goto st236 + case 101: + goto st236 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st701 + } + case ( m.data)[( m.p)] >= 9: + goto tr516 + } + goto tr105 + st236: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof236 + } + st_case_236: + switch ( m.data)[( m.p)] { + case 34: + goto st237 + case 43: + goto st237 + case 45: + goto st237 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st611 + } + goto tr8 + st237: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof237 + } + st_case_237: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st611 + } + goto tr8 + st702: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof702 + } + st_case_702: + switch ( m.data)[( m.p)] { + case 10: + goto tr715 + case 13: + goto tr717 + case 32: + goto tr516 + case 44: + goto tr907 + case 46: + goto st701 + case 69: + goto st236 + case 101: + goto st236 + case 105: + goto st704 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st703 + } + case ( m.data)[( m.p)] >= 9: + goto tr516 + } + goto tr105 + st703: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof703 + } + st_case_703: + switch ( m.data)[( m.p)] { + case 10: + goto tr715 + case 13: + goto tr717 + case 32: + goto tr516 + case 44: + goto tr907 + case 46: + goto st701 + case 69: + goto st236 + case 101: + goto st236 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st703 + } + case ( m.data)[( m.p)] >= 9: + goto tr516 + } + goto tr105 + st704: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof704 + } + st_case_704: + switch ( m.data)[( m.p)] { + case 10: + goto tr925 + case 13: + goto tr927 + case 32: + goto tr909 + case 44: + goto tr1014 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr909 + } + goto tr105 + st705: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof705 + } + st_case_705: + switch ( m.data)[( m.p)] { + case 10: + goto tr715 + case 13: + goto tr717 + case 32: + goto tr516 + case 44: + goto tr907 + case 46: + goto st701 + case 69: + goto st236 + case 101: + goto st236 + case 105: + goto st704 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st705 + } + case ( m.data)[( m.p)] >= 9: + goto tr516 + } + goto tr105 +tr17: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st706 + st706: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof706 + } + st_case_706: +//line plugins/parsers/influx/machine.go:28541 + switch ( m.data)[( m.p)] { + case 10: + goto tr715 + case 13: + goto tr717 + case 32: + goto tr516 + case 44: + goto tr907 + case 46: + goto st701 + case 69: + goto st236 + case 101: + goto st236 + case 105: + goto st704 + case 117: + goto st707 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st703 + } + case ( m.data)[( m.p)] >= 9: + goto tr516 + } + goto tr105 + st707: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof707 + } + st_case_707: + switch ( m.data)[( m.p)] { + case 10: + goto tr930 + case 13: + goto tr932 + case 32: + goto tr912 + case 44: + goto tr1016 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr912 + } + goto tr105 +tr18: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st708 + st708: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof708 + } + st_case_708: +//line plugins/parsers/influx/machine.go:28601 + switch ( m.data)[( m.p)] { + case 10: + goto tr715 + case 13: + goto tr717 + case 32: + goto tr516 + case 44: + goto tr907 + case 46: + goto st701 + case 69: + goto st236 + case 101: + goto st236 + case 105: + goto st704 + case 117: + goto st707 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st708 + } + case ( m.data)[( m.p)] >= 9: + goto tr516 + } + goto tr105 +tr19: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st709 + st709: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof709 + } + st_case_709: +//line plugins/parsers/influx/machine.go:28642 + switch ( m.data)[( m.p)] { + case 10: + goto tr935 + case 13: + goto tr937 + case 32: + goto tr916 + case 44: + goto tr1018 + case 65: + goto st238 + case 97: + goto st241 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr916 + } + goto tr105 + st238: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof238 + } + st_case_238: + if ( m.data)[( m.p)] == 76 { + goto st239 + } + goto tr8 + st239: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof239 + } + st_case_239: + if ( m.data)[( m.p)] == 83 { + goto st240 + } + goto tr8 + st240: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof240 + } + st_case_240: + if ( m.data)[( m.p)] == 69 { + goto st710 + } + goto tr8 + st710: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof710 + } + st_case_710: + switch ( m.data)[( m.p)] { + case 10: + goto tr935 + case 13: + goto tr937 + case 32: + goto tr916 + case 44: + goto tr1018 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr916 + } + goto tr105 + st241: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof241 + } + st_case_241: + if ( m.data)[( m.p)] == 108 { + goto st242 + } + goto tr8 + st242: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof242 + } + st_case_242: + if ( m.data)[( m.p)] == 115 { + goto st243 + } + goto tr8 + st243: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof243 + } + st_case_243: + if ( m.data)[( m.p)] == 101 { + goto st710 + } + goto tr8 +tr20: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st711 + st711: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof711 + } + st_case_711: +//line plugins/parsers/influx/machine.go:28745 + switch ( m.data)[( m.p)] { + case 10: + goto tr935 + case 13: + goto tr937 + case 32: + goto tr916 + case 44: + goto tr1018 + case 82: + goto st244 + case 114: + goto st245 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr916 + } + goto tr105 + st244: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof244 + } + st_case_244: + if ( m.data)[( m.p)] == 85 { + goto st240 + } + goto tr8 + st245: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof245 + } + st_case_245: + if ( m.data)[( m.p)] == 117 { + goto st243 + } + goto tr8 +tr21: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st712 + st712: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof712 + } + st_case_712: +//line plugins/parsers/influx/machine.go:28793 + switch ( m.data)[( m.p)] { + case 10: + goto tr935 + case 13: + goto tr937 + case 32: + goto tr916 + case 44: + goto tr1018 + case 97: + goto st241 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr916 + } + goto tr105 +tr22: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st713 + st713: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof713 + } + st_case_713: +//line plugins/parsers/influx/machine.go:28821 + switch ( m.data)[( m.p)] { + case 10: + goto tr935 + case 13: + goto tr937 + case 32: + goto tr916 + case 44: + goto tr1018 + case 114: + goto st245 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr916 + } + goto tr105 +tr9: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st246 + st246: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof246 + } + st_case_246: +//line plugins/parsers/influx/machine.go:28849 + switch ( m.data)[( m.p)] { + case 10: + goto tr8 + case 11: + goto tr9 + case 13: + goto tr8 + case 32: + goto st2 + case 44: + goto tr8 + case 61: + goto tr12 + case 92: + goto tr10 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto st2 + } + goto tr6 + st247: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof247 + } + st_case_247: + if ( m.data)[( m.p)] == 10 { + goto tr421 + } + goto st247 +tr421: +//line plugins/parsers/influx/machine.go.rl:69 + + {goto st715 } + + goto st714 + st714: +//line plugins/parsers/influx/machine.go.rl:157 + + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line + + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof714 + } + st_case_714: +//line plugins/parsers/influx/machine.go:28896 + goto st0 + st250: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof250 + } + st_case_250: + switch ( m.data)[( m.p)] { + case 32: + goto tr35 + case 35: + goto tr35 + case 44: + goto tr35 + case 92: + goto tr425 + } + switch { + case ( m.data)[( m.p)] > 10: + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr35 + } + case ( m.data)[( m.p)] >= 9: + goto tr35 + } + goto tr424 +tr424: +//line plugins/parsers/influx/machine.go.rl:73 + + foundMetric = true + +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st717 + st717: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof717 + } + st_case_717: +//line plugins/parsers/influx/machine.go:28937 + switch ( m.data)[( m.p)] { + case 9: + goto tr2 + case 10: + goto tr1026 + case 12: + goto tr2 + case 13: + goto tr1027 + case 32: + goto tr2 + case 44: + goto tr1028 + case 92: + goto st258 + } + goto st717 +tr1026: + ( m.cs) = 718 +//line plugins/parsers/influx/machine.go.rl:77 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr1030: + ( m.cs) = 718 +//line plugins/parsers/influx/machine.go.rl:90 + + err = m.handler.AddTag(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again + st718: +//line plugins/parsers/influx/machine.go.rl:157 + + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line + +//line plugins/parsers/influx/machine.go.rl:163 + + ( m.cs) = 715; + {( m.p)++; goto _out } + + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof718 + } + st_case_718: +//line plugins/parsers/influx/machine.go:28997 + goto st0 +tr1027: + ( m.cs) = 251 +//line plugins/parsers/influx/machine.go.rl:77 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr1031: + ( m.cs) = 251 +//line plugins/parsers/influx/machine.go.rl:90 + + err = m.handler.AddTag(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again + st251: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof251 + } + st_case_251: +//line plugins/parsers/influx/machine.go:29030 + if ( m.data)[( m.p)] == 10 { + goto st718 + } + goto st0 +tr1028: + ( m.cs) = 252 +//line plugins/parsers/influx/machine.go.rl:77 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again +tr1032: + ( m.cs) = 252 +//line plugins/parsers/influx/machine.go.rl:90 + + err = m.handler.AddTag(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; goto _out } + } + + goto _again + st252: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof252 + } + st_case_252: +//line plugins/parsers/influx/machine.go:29066 + switch ( m.data)[( m.p)] { + case 32: + goto tr2 + case 44: + goto tr2 + case 61: + goto tr2 + case 92: + goto tr428 + } + switch { + case ( m.data)[( m.p)] > 10: + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr2 + } + case ( m.data)[( m.p)] >= 9: + goto tr2 + } + goto tr427 +tr427: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st253 + st253: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof253 + } + st_case_253: +//line plugins/parsers/influx/machine.go:29097 + switch ( m.data)[( m.p)] { + case 32: + goto tr2 + case 44: + goto tr2 + case 61: + goto tr430 + case 92: + goto st256 + } + switch { + case ( m.data)[( m.p)] > 10: + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr2 + } + case ( m.data)[( m.p)] >= 9: + goto tr2 + } + goto st253 +tr430: +//line plugins/parsers/influx/machine.go.rl:86 + + key = m.text() + + goto st254 + st254: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof254 + } + st_case_254: +//line plugins/parsers/influx/machine.go:29128 + switch ( m.data)[( m.p)] { + case 32: + goto tr2 + case 44: + goto tr2 + case 61: + goto tr2 + case 92: + goto tr433 + } + switch { + case ( m.data)[( m.p)] > 10: + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr2 + } + case ( m.data)[( m.p)] >= 9: + goto tr2 + } + goto tr432 +tr432: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st719 + st719: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof719 + } + st_case_719: +//line plugins/parsers/influx/machine.go:29159 + switch ( m.data)[( m.p)] { + case 9: + goto tr2 + case 10: + goto tr1030 + case 12: + goto tr2 + case 13: + goto tr1031 + case 32: + goto tr2 + case 44: + goto tr1032 + case 61: + goto tr2 + case 92: + goto st255 + } + goto st719 +tr433: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st255 + st255: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof255 + } + st_case_255: +//line plugins/parsers/influx/machine.go:29190 + if ( m.data)[( m.p)] == 92 { + goto st720 + } + switch { + case ( m.data)[( m.p)] > 10: + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr2 + } + case ( m.data)[( m.p)] >= 9: + goto tr2 + } + goto st719 + st720: +//line plugins/parsers/influx/machine.go.rl:234 + ( m.p)-- + + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof720 + } + st_case_720: +//line plugins/parsers/influx/machine.go:29211 + switch ( m.data)[( m.p)] { + case 9: + goto tr2 + case 10: + goto tr1030 + case 12: + goto tr2 + case 13: + goto tr1031 + case 32: + goto tr2 + case 44: + goto tr1032 + case 61: + goto tr2 + case 92: + goto st255 + } + goto st719 +tr428: +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st256 + st256: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof256 + } + st_case_256: +//line plugins/parsers/influx/machine.go:29242 + if ( m.data)[( m.p)] == 92 { + goto st257 + } + switch { + case ( m.data)[( m.p)] > 10: + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr2 + } + case ( m.data)[( m.p)] >= 9: + goto tr2 + } + goto st253 + st257: +//line plugins/parsers/influx/machine.go.rl:234 + ( m.p)-- + + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof257 + } + st_case_257: +//line plugins/parsers/influx/machine.go:29263 + switch ( m.data)[( m.p)] { + case 32: + goto tr2 + case 44: + goto tr2 + case 61: + goto tr430 + case 92: + goto st256 + } + switch { + case ( m.data)[( m.p)] > 10: + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr2 + } + case ( m.data)[( m.p)] >= 9: + goto tr2 + } + goto st253 +tr425: +//line plugins/parsers/influx/machine.go.rl:73 + + foundMetric = true + +//line plugins/parsers/influx/machine.go.rl:19 + + m.pb = m.p + + goto st258 + st258: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof258 + } + st_case_258: +//line plugins/parsers/influx/machine.go:29298 switch { case ( m.data)[( m.p)] > 10: if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { @@ -22638,907 +29303,1495 @@ tr346: case ( m.data)[( m.p)] >= 9: goto st0 } - goto st604 + goto st717 + st715: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof715 + } + st_case_715: + switch ( m.data)[( m.p)] { + case 10: + goto st716 + case 13: + goto st248 + case 32: + goto st715 + case 35: + goto st249 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto st715 + } + goto tr1023 + st716: +//line plugins/parsers/influx/machine.go.rl:157 + + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line + + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof716 + } + st_case_716: +//line plugins/parsers/influx/machine.go:29338 + switch ( m.data)[( m.p)] { + case 10: + goto st716 + case 13: + goto st248 + case 32: + goto st715 + case 35: + goto st249 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto st715 + } + goto tr1023 + st248: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof248 + } + st_case_248: + if ( m.data)[( m.p)] == 10 { + goto st716 + } + goto st0 + st249: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof249 + } + st_case_249: + if ( m.data)[( m.p)] == 10 { + goto st716 + } + goto st249 st_out: - _test_eof1: m.cs = 1; goto _test_eof - _test_eof2: m.cs = 2; goto _test_eof - _test_eof3: m.cs = 3; goto _test_eof - _test_eof4: m.cs = 4; goto _test_eof - _test_eof5: m.cs = 5; goto _test_eof - _test_eof6: m.cs = 6; goto _test_eof - _test_eof7: m.cs = 7; goto _test_eof - _test_eof206: m.cs = 206; goto _test_eof - _test_eof207: m.cs = 207; goto _test_eof - _test_eof208: m.cs = 208; goto _test_eof - _test_eof8: m.cs = 8; goto _test_eof - _test_eof209: m.cs = 209; goto _test_eof - _test_eof210: m.cs = 210; goto _test_eof - _test_eof211: m.cs = 211; goto _test_eof - _test_eof212: m.cs = 212; goto _test_eof - _test_eof213: m.cs = 213; goto _test_eof - _test_eof214: m.cs = 214; goto _test_eof - _test_eof215: m.cs = 215; goto _test_eof - _test_eof216: m.cs = 216; goto _test_eof - _test_eof217: m.cs = 217; goto _test_eof - _test_eof218: m.cs = 218; goto _test_eof - _test_eof219: m.cs = 219; goto _test_eof - _test_eof220: m.cs = 220; goto _test_eof - _test_eof221: m.cs = 221; goto _test_eof - _test_eof222: m.cs = 222; goto _test_eof - _test_eof223: m.cs = 223; goto _test_eof - _test_eof224: m.cs = 224; goto _test_eof - _test_eof225: m.cs = 225; goto _test_eof - _test_eof226: m.cs = 226; goto _test_eof - _test_eof227: m.cs = 227; goto _test_eof - _test_eof228: m.cs = 228; goto _test_eof - _test_eof9: m.cs = 9; goto _test_eof - _test_eof10: m.cs = 10; goto _test_eof - _test_eof11: m.cs = 11; goto _test_eof - _test_eof12: m.cs = 12; goto _test_eof - _test_eof13: m.cs = 13; goto _test_eof - _test_eof229: m.cs = 229; goto _test_eof - _test_eof14: m.cs = 14; goto _test_eof - _test_eof15: m.cs = 15; goto _test_eof - _test_eof230: m.cs = 230; goto _test_eof - _test_eof231: m.cs = 231; goto _test_eof - _test_eof232: m.cs = 232; goto _test_eof - _test_eof233: m.cs = 233; goto _test_eof - _test_eof234: m.cs = 234; goto _test_eof - _test_eof235: m.cs = 235; goto _test_eof - _test_eof236: m.cs = 236; goto _test_eof - _test_eof237: m.cs = 237; goto _test_eof - _test_eof238: m.cs = 238; goto _test_eof - _test_eof16: m.cs = 16; goto _test_eof - _test_eof17: m.cs = 17; goto _test_eof - _test_eof18: m.cs = 18; goto _test_eof - _test_eof239: m.cs = 239; goto _test_eof - _test_eof19: m.cs = 19; goto _test_eof - _test_eof20: m.cs = 20; goto _test_eof - _test_eof21: m.cs = 21; goto _test_eof - _test_eof240: m.cs = 240; goto _test_eof - _test_eof22: m.cs = 22; goto _test_eof - _test_eof23: m.cs = 23; goto _test_eof - _test_eof241: m.cs = 241; goto _test_eof - _test_eof242: m.cs = 242; goto _test_eof - _test_eof24: m.cs = 24; goto _test_eof - _test_eof25: m.cs = 25; goto _test_eof - _test_eof26: m.cs = 26; goto _test_eof - _test_eof27: m.cs = 27; goto _test_eof - _test_eof28: m.cs = 28; goto _test_eof - _test_eof29: m.cs = 29; goto _test_eof - _test_eof30: m.cs = 30; goto _test_eof - _test_eof31: m.cs = 31; goto _test_eof - _test_eof32: m.cs = 32; goto _test_eof - _test_eof33: m.cs = 33; goto _test_eof - _test_eof34: m.cs = 34; goto _test_eof - _test_eof35: m.cs = 35; goto _test_eof - _test_eof36: m.cs = 36; goto _test_eof - _test_eof37: m.cs = 37; goto _test_eof - _test_eof38: m.cs = 38; goto _test_eof - _test_eof39: m.cs = 39; goto _test_eof - _test_eof40: m.cs = 40; goto _test_eof - _test_eof41: m.cs = 41; goto _test_eof - _test_eof42: m.cs = 42; goto _test_eof - _test_eof243: m.cs = 243; goto _test_eof - _test_eof244: m.cs = 244; goto _test_eof - _test_eof43: m.cs = 43; goto _test_eof - _test_eof245: m.cs = 245; goto _test_eof - _test_eof246: m.cs = 246; goto _test_eof - _test_eof247: m.cs = 247; goto _test_eof - _test_eof248: m.cs = 248; goto _test_eof - _test_eof249: m.cs = 249; goto _test_eof - _test_eof250: m.cs = 250; goto _test_eof - _test_eof251: m.cs = 251; goto _test_eof - _test_eof252: m.cs = 252; goto _test_eof - _test_eof253: m.cs = 253; goto _test_eof - _test_eof254: m.cs = 254; goto _test_eof - _test_eof255: m.cs = 255; goto _test_eof - _test_eof256: m.cs = 256; goto _test_eof - _test_eof257: m.cs = 257; goto _test_eof - _test_eof258: m.cs = 258; goto _test_eof - _test_eof259: m.cs = 259; goto _test_eof - _test_eof260: m.cs = 260; goto _test_eof - _test_eof261: m.cs = 261; goto _test_eof - _test_eof262: m.cs = 262; goto _test_eof - _test_eof263: m.cs = 263; goto _test_eof - _test_eof264: m.cs = 264; goto _test_eof - _test_eof44: m.cs = 44; goto _test_eof - _test_eof265: m.cs = 265; goto _test_eof - _test_eof266: m.cs = 266; goto _test_eof - _test_eof45: m.cs = 45; goto _test_eof - _test_eof267: m.cs = 267; goto _test_eof - _test_eof268: m.cs = 268; goto _test_eof - _test_eof269: m.cs = 269; goto _test_eof - _test_eof270: m.cs = 270; goto _test_eof - _test_eof271: m.cs = 271; goto _test_eof - _test_eof272: m.cs = 272; goto _test_eof - _test_eof273: m.cs = 273; goto _test_eof - _test_eof274: m.cs = 274; goto _test_eof - _test_eof275: m.cs = 275; goto _test_eof - _test_eof276: m.cs = 276; goto _test_eof - _test_eof277: m.cs = 277; goto _test_eof - _test_eof278: m.cs = 278; goto _test_eof - _test_eof279: m.cs = 279; goto _test_eof - _test_eof280: m.cs = 280; goto _test_eof - _test_eof281: m.cs = 281; goto _test_eof - _test_eof282: m.cs = 282; goto _test_eof - _test_eof283: m.cs = 283; goto _test_eof - _test_eof284: m.cs = 284; goto _test_eof - _test_eof285: m.cs = 285; goto _test_eof - _test_eof286: m.cs = 286; goto _test_eof - _test_eof46: m.cs = 46; goto _test_eof - _test_eof47: m.cs = 47; goto _test_eof - _test_eof48: m.cs = 48; goto _test_eof - _test_eof287: m.cs = 287; goto _test_eof - _test_eof49: m.cs = 49; goto _test_eof - _test_eof50: m.cs = 50; goto _test_eof - _test_eof51: m.cs = 51; goto _test_eof - _test_eof52: m.cs = 52; goto _test_eof - _test_eof53: m.cs = 53; goto _test_eof - _test_eof288: m.cs = 288; goto _test_eof - _test_eof54: m.cs = 54; goto _test_eof - _test_eof289: m.cs = 289; goto _test_eof - _test_eof55: m.cs = 55; goto _test_eof - _test_eof290: m.cs = 290; goto _test_eof - _test_eof291: m.cs = 291; goto _test_eof - _test_eof292: m.cs = 292; goto _test_eof - _test_eof293: m.cs = 293; goto _test_eof - _test_eof294: m.cs = 294; goto _test_eof - _test_eof295: m.cs = 295; goto _test_eof - _test_eof296: m.cs = 296; goto _test_eof - _test_eof297: m.cs = 297; goto _test_eof - _test_eof298: m.cs = 298; goto _test_eof - _test_eof56: m.cs = 56; goto _test_eof - _test_eof57: m.cs = 57; goto _test_eof - _test_eof58: m.cs = 58; goto _test_eof - _test_eof299: m.cs = 299; goto _test_eof - _test_eof59: m.cs = 59; goto _test_eof - _test_eof60: m.cs = 60; goto _test_eof - _test_eof61: m.cs = 61; goto _test_eof - _test_eof300: m.cs = 300; goto _test_eof - _test_eof62: m.cs = 62; goto _test_eof - _test_eof63: m.cs = 63; goto _test_eof - _test_eof301: m.cs = 301; goto _test_eof - _test_eof302: m.cs = 302; goto _test_eof - _test_eof64: m.cs = 64; goto _test_eof - _test_eof65: m.cs = 65; goto _test_eof - _test_eof66: m.cs = 66; goto _test_eof - _test_eof303: m.cs = 303; goto _test_eof - _test_eof67: m.cs = 67; goto _test_eof - _test_eof68: m.cs = 68; goto _test_eof - _test_eof304: m.cs = 304; goto _test_eof - _test_eof305: m.cs = 305; goto _test_eof - _test_eof306: m.cs = 306; goto _test_eof - _test_eof307: m.cs = 307; goto _test_eof - _test_eof308: m.cs = 308; goto _test_eof - _test_eof309: m.cs = 309; goto _test_eof - _test_eof310: m.cs = 310; goto _test_eof - _test_eof311: m.cs = 311; goto _test_eof - _test_eof312: m.cs = 312; goto _test_eof - _test_eof69: m.cs = 69; goto _test_eof - _test_eof70: m.cs = 70; goto _test_eof - _test_eof71: m.cs = 71; goto _test_eof - _test_eof313: m.cs = 313; goto _test_eof - _test_eof72: m.cs = 72; goto _test_eof - _test_eof73: m.cs = 73; goto _test_eof - _test_eof74: m.cs = 74; goto _test_eof - _test_eof314: m.cs = 314; goto _test_eof - _test_eof75: m.cs = 75; goto _test_eof - _test_eof76: m.cs = 76; goto _test_eof - _test_eof315: m.cs = 315; goto _test_eof - _test_eof316: m.cs = 316; goto _test_eof - _test_eof77: m.cs = 77; goto _test_eof - _test_eof78: m.cs = 78; goto _test_eof - _test_eof79: m.cs = 79; goto _test_eof - _test_eof80: m.cs = 80; goto _test_eof - _test_eof81: m.cs = 81; goto _test_eof - _test_eof82: m.cs = 82; goto _test_eof - _test_eof317: m.cs = 317; goto _test_eof - _test_eof318: m.cs = 318; goto _test_eof - _test_eof319: m.cs = 319; goto _test_eof - _test_eof320: m.cs = 320; goto _test_eof - _test_eof83: m.cs = 83; goto _test_eof - _test_eof321: m.cs = 321; goto _test_eof - _test_eof322: m.cs = 322; goto _test_eof - _test_eof323: m.cs = 323; goto _test_eof - _test_eof324: m.cs = 324; goto _test_eof - _test_eof84: m.cs = 84; goto _test_eof - _test_eof325: m.cs = 325; goto _test_eof - _test_eof326: m.cs = 326; goto _test_eof - _test_eof327: m.cs = 327; goto _test_eof - _test_eof328: m.cs = 328; goto _test_eof - _test_eof329: m.cs = 329; goto _test_eof - _test_eof330: m.cs = 330; goto _test_eof - _test_eof331: m.cs = 331; goto _test_eof - _test_eof332: m.cs = 332; goto _test_eof - _test_eof333: m.cs = 333; goto _test_eof - _test_eof334: m.cs = 334; goto _test_eof - _test_eof335: m.cs = 335; goto _test_eof - _test_eof336: m.cs = 336; goto _test_eof - _test_eof337: m.cs = 337; goto _test_eof - _test_eof338: m.cs = 338; goto _test_eof - _test_eof339: m.cs = 339; goto _test_eof - _test_eof340: m.cs = 340; goto _test_eof - _test_eof341: m.cs = 341; goto _test_eof - _test_eof342: m.cs = 342; goto _test_eof - _test_eof85: m.cs = 85; goto _test_eof - _test_eof86: m.cs = 86; goto _test_eof - _test_eof87: m.cs = 87; goto _test_eof - _test_eof88: m.cs = 88; goto _test_eof - _test_eof89: m.cs = 89; goto _test_eof - _test_eof90: m.cs = 90; goto _test_eof - _test_eof91: m.cs = 91; goto _test_eof - _test_eof92: m.cs = 92; goto _test_eof - _test_eof93: m.cs = 93; goto _test_eof - _test_eof94: m.cs = 94; goto _test_eof - _test_eof95: m.cs = 95; goto _test_eof - _test_eof96: m.cs = 96; goto _test_eof - _test_eof97: m.cs = 97; goto _test_eof - _test_eof343: m.cs = 343; goto _test_eof - _test_eof344: m.cs = 344; goto _test_eof - _test_eof98: m.cs = 98; goto _test_eof - _test_eof345: m.cs = 345; goto _test_eof - _test_eof346: m.cs = 346; goto _test_eof - _test_eof347: m.cs = 347; goto _test_eof - _test_eof348: m.cs = 348; goto _test_eof - _test_eof349: m.cs = 349; goto _test_eof - _test_eof350: m.cs = 350; goto _test_eof - _test_eof351: m.cs = 351; goto _test_eof - _test_eof352: m.cs = 352; goto _test_eof - _test_eof353: m.cs = 353; goto _test_eof - _test_eof354: m.cs = 354; goto _test_eof - _test_eof355: m.cs = 355; goto _test_eof - _test_eof356: m.cs = 356; goto _test_eof - _test_eof357: m.cs = 357; goto _test_eof - _test_eof358: m.cs = 358; goto _test_eof - _test_eof359: m.cs = 359; goto _test_eof - _test_eof360: m.cs = 360; goto _test_eof - _test_eof361: m.cs = 361; goto _test_eof - _test_eof362: m.cs = 362; goto _test_eof - _test_eof363: m.cs = 363; goto _test_eof - _test_eof364: m.cs = 364; goto _test_eof - _test_eof99: m.cs = 99; goto _test_eof - _test_eof100: m.cs = 100; goto _test_eof - _test_eof365: m.cs = 365; goto _test_eof - _test_eof366: m.cs = 366; goto _test_eof - _test_eof101: m.cs = 101; goto _test_eof - _test_eof367: m.cs = 367; goto _test_eof - _test_eof368: m.cs = 368; goto _test_eof - _test_eof369: m.cs = 369; goto _test_eof - _test_eof370: m.cs = 370; goto _test_eof - _test_eof371: m.cs = 371; goto _test_eof - _test_eof372: m.cs = 372; goto _test_eof - _test_eof373: m.cs = 373; goto _test_eof - _test_eof374: m.cs = 374; goto _test_eof - _test_eof375: m.cs = 375; goto _test_eof - _test_eof376: m.cs = 376; goto _test_eof - _test_eof377: m.cs = 377; goto _test_eof - _test_eof378: m.cs = 378; goto _test_eof - _test_eof379: m.cs = 379; goto _test_eof - _test_eof380: m.cs = 380; goto _test_eof - _test_eof381: m.cs = 381; goto _test_eof - _test_eof382: m.cs = 382; goto _test_eof - _test_eof383: m.cs = 383; goto _test_eof - _test_eof384: m.cs = 384; goto _test_eof - _test_eof385: m.cs = 385; goto _test_eof - _test_eof386: m.cs = 386; goto _test_eof - _test_eof102: m.cs = 102; goto _test_eof - _test_eof387: m.cs = 387; goto _test_eof - _test_eof388: m.cs = 388; goto _test_eof - _test_eof103: m.cs = 103; goto _test_eof - _test_eof104: m.cs = 104; goto _test_eof - _test_eof105: m.cs = 105; goto _test_eof - _test_eof106: m.cs = 106; goto _test_eof - _test_eof107: m.cs = 107; goto _test_eof - _test_eof389: m.cs = 389; goto _test_eof - _test_eof108: m.cs = 108; goto _test_eof - _test_eof109: m.cs = 109; goto _test_eof - _test_eof390: m.cs = 390; goto _test_eof - _test_eof391: m.cs = 391; goto _test_eof - _test_eof392: m.cs = 392; goto _test_eof - _test_eof393: m.cs = 393; goto _test_eof - _test_eof394: m.cs = 394; goto _test_eof - _test_eof395: m.cs = 395; goto _test_eof - _test_eof396: m.cs = 396; goto _test_eof - _test_eof397: m.cs = 397; goto _test_eof - _test_eof398: m.cs = 398; goto _test_eof - _test_eof110: m.cs = 110; goto _test_eof - _test_eof111: m.cs = 111; goto _test_eof - _test_eof112: m.cs = 112; goto _test_eof - _test_eof399: m.cs = 399; goto _test_eof - _test_eof113: m.cs = 113; goto _test_eof - _test_eof114: m.cs = 114; goto _test_eof - _test_eof115: m.cs = 115; goto _test_eof - _test_eof400: m.cs = 400; goto _test_eof - _test_eof116: m.cs = 116; goto _test_eof - _test_eof117: m.cs = 117; goto _test_eof - _test_eof401: m.cs = 401; goto _test_eof - _test_eof402: m.cs = 402; goto _test_eof - _test_eof118: m.cs = 118; goto _test_eof - _test_eof119: m.cs = 119; goto _test_eof - _test_eof120: m.cs = 120; goto _test_eof - _test_eof121: m.cs = 121; goto _test_eof - _test_eof122: m.cs = 122; goto _test_eof - _test_eof123: m.cs = 123; goto _test_eof - _test_eof124: m.cs = 124; goto _test_eof - _test_eof125: m.cs = 125; goto _test_eof - _test_eof126: m.cs = 126; goto _test_eof - _test_eof127: m.cs = 127; goto _test_eof - _test_eof128: m.cs = 128; goto _test_eof - _test_eof129: m.cs = 129; goto _test_eof - _test_eof403: m.cs = 403; goto _test_eof - _test_eof404: m.cs = 404; goto _test_eof - _test_eof405: m.cs = 405; goto _test_eof - _test_eof130: m.cs = 130; goto _test_eof - _test_eof406: m.cs = 406; goto _test_eof - _test_eof407: m.cs = 407; goto _test_eof - _test_eof408: m.cs = 408; goto _test_eof - _test_eof409: m.cs = 409; goto _test_eof - _test_eof410: m.cs = 410; goto _test_eof - _test_eof411: m.cs = 411; goto _test_eof - _test_eof412: m.cs = 412; goto _test_eof - _test_eof413: m.cs = 413; goto _test_eof - _test_eof414: m.cs = 414; goto _test_eof - _test_eof415: m.cs = 415; goto _test_eof - _test_eof416: m.cs = 416; goto _test_eof - _test_eof417: m.cs = 417; goto _test_eof - _test_eof418: m.cs = 418; goto _test_eof - _test_eof419: m.cs = 419; goto _test_eof - _test_eof420: m.cs = 420; goto _test_eof - _test_eof421: m.cs = 421; goto _test_eof - _test_eof422: m.cs = 422; goto _test_eof - _test_eof423: m.cs = 423; goto _test_eof - _test_eof424: m.cs = 424; goto _test_eof - _test_eof425: m.cs = 425; goto _test_eof - _test_eof426: m.cs = 426; goto _test_eof - _test_eof427: m.cs = 427; goto _test_eof - _test_eof131: m.cs = 131; goto _test_eof - _test_eof428: m.cs = 428; goto _test_eof - _test_eof429: m.cs = 429; goto _test_eof - _test_eof430: m.cs = 430; goto _test_eof - _test_eof431: m.cs = 431; goto _test_eof - _test_eof132: m.cs = 132; goto _test_eof - _test_eof432: m.cs = 432; goto _test_eof - _test_eof433: m.cs = 433; goto _test_eof - _test_eof434: m.cs = 434; goto _test_eof - _test_eof435: m.cs = 435; goto _test_eof - _test_eof436: m.cs = 436; goto _test_eof - _test_eof437: m.cs = 437; goto _test_eof - _test_eof438: m.cs = 438; goto _test_eof - _test_eof439: m.cs = 439; goto _test_eof - _test_eof440: m.cs = 440; goto _test_eof - _test_eof441: m.cs = 441; goto _test_eof - _test_eof442: m.cs = 442; goto _test_eof - _test_eof443: m.cs = 443; goto _test_eof - _test_eof444: m.cs = 444; goto _test_eof - _test_eof445: m.cs = 445; goto _test_eof - _test_eof446: m.cs = 446; goto _test_eof - _test_eof447: m.cs = 447; goto _test_eof - _test_eof448: m.cs = 448; goto _test_eof - _test_eof449: m.cs = 449; goto _test_eof - _test_eof450: m.cs = 450; goto _test_eof - _test_eof451: m.cs = 451; goto _test_eof - _test_eof133: m.cs = 133; goto _test_eof - _test_eof134: m.cs = 134; goto _test_eof - _test_eof135: m.cs = 135; goto _test_eof - _test_eof452: m.cs = 452; goto _test_eof - _test_eof453: m.cs = 453; goto _test_eof - _test_eof136: m.cs = 136; goto _test_eof - _test_eof454: m.cs = 454; goto _test_eof - _test_eof455: m.cs = 455; goto _test_eof - _test_eof456: m.cs = 456; goto _test_eof - _test_eof457: m.cs = 457; goto _test_eof - _test_eof458: m.cs = 458; goto _test_eof - _test_eof459: m.cs = 459; goto _test_eof - _test_eof460: m.cs = 460; goto _test_eof - _test_eof461: m.cs = 461; goto _test_eof - _test_eof462: m.cs = 462; goto _test_eof - _test_eof463: m.cs = 463; goto _test_eof - _test_eof464: m.cs = 464; goto _test_eof - _test_eof465: m.cs = 465; goto _test_eof - _test_eof466: m.cs = 466; goto _test_eof - _test_eof467: m.cs = 467; goto _test_eof - _test_eof468: m.cs = 468; goto _test_eof - _test_eof469: m.cs = 469; goto _test_eof - _test_eof470: m.cs = 470; goto _test_eof - _test_eof471: m.cs = 471; goto _test_eof - _test_eof472: m.cs = 472; goto _test_eof - _test_eof473: m.cs = 473; goto _test_eof - _test_eof137: m.cs = 137; goto _test_eof - _test_eof474: m.cs = 474; goto _test_eof - _test_eof475: m.cs = 475; goto _test_eof - _test_eof476: m.cs = 476; goto _test_eof - _test_eof138: m.cs = 138; goto _test_eof - _test_eof477: m.cs = 477; goto _test_eof - _test_eof478: m.cs = 478; goto _test_eof - _test_eof479: m.cs = 479; goto _test_eof - _test_eof480: m.cs = 480; goto _test_eof - _test_eof481: m.cs = 481; goto _test_eof - _test_eof482: m.cs = 482; goto _test_eof - _test_eof483: m.cs = 483; goto _test_eof - _test_eof484: m.cs = 484; goto _test_eof - _test_eof485: m.cs = 485; goto _test_eof - _test_eof486: m.cs = 486; goto _test_eof - _test_eof487: m.cs = 487; goto _test_eof - _test_eof488: m.cs = 488; goto _test_eof - _test_eof489: m.cs = 489; goto _test_eof - _test_eof490: m.cs = 490; goto _test_eof - _test_eof491: m.cs = 491; goto _test_eof - _test_eof492: m.cs = 492; goto _test_eof - _test_eof493: m.cs = 493; goto _test_eof - _test_eof494: m.cs = 494; goto _test_eof - _test_eof495: m.cs = 495; goto _test_eof - _test_eof496: m.cs = 496; goto _test_eof - _test_eof497: m.cs = 497; goto _test_eof - _test_eof498: m.cs = 498; goto _test_eof - _test_eof139: m.cs = 139; goto _test_eof - _test_eof499: m.cs = 499; goto _test_eof - _test_eof500: m.cs = 500; goto _test_eof - _test_eof501: m.cs = 501; goto _test_eof - _test_eof502: m.cs = 502; goto _test_eof - _test_eof503: m.cs = 503; goto _test_eof - _test_eof504: m.cs = 504; goto _test_eof - _test_eof505: m.cs = 505; goto _test_eof - _test_eof506: m.cs = 506; goto _test_eof - _test_eof507: m.cs = 507; goto _test_eof - _test_eof508: m.cs = 508; goto _test_eof - _test_eof509: m.cs = 509; goto _test_eof - _test_eof510: m.cs = 510; goto _test_eof - _test_eof511: m.cs = 511; goto _test_eof - _test_eof512: m.cs = 512; goto _test_eof - _test_eof513: m.cs = 513; goto _test_eof - _test_eof514: m.cs = 514; goto _test_eof - _test_eof515: m.cs = 515; goto _test_eof - _test_eof516: m.cs = 516; goto _test_eof - _test_eof517: m.cs = 517; goto _test_eof - _test_eof518: m.cs = 518; goto _test_eof - _test_eof519: m.cs = 519; goto _test_eof - _test_eof520: m.cs = 520; goto _test_eof - _test_eof140: m.cs = 140; goto _test_eof - _test_eof141: m.cs = 141; goto _test_eof - _test_eof142: m.cs = 142; goto _test_eof - _test_eof143: m.cs = 143; goto _test_eof - _test_eof144: m.cs = 144; goto _test_eof - _test_eof521: m.cs = 521; goto _test_eof - _test_eof145: m.cs = 145; goto _test_eof - _test_eof522: m.cs = 522; goto _test_eof - _test_eof146: m.cs = 146; goto _test_eof - _test_eof523: m.cs = 523; goto _test_eof - _test_eof524: m.cs = 524; goto _test_eof - _test_eof525: m.cs = 525; goto _test_eof - _test_eof526: m.cs = 526; goto _test_eof - _test_eof527: m.cs = 527; goto _test_eof - _test_eof528: m.cs = 528; goto _test_eof - _test_eof529: m.cs = 529; goto _test_eof - _test_eof530: m.cs = 530; goto _test_eof - _test_eof531: m.cs = 531; goto _test_eof - _test_eof147: m.cs = 147; goto _test_eof - _test_eof148: m.cs = 148; goto _test_eof - _test_eof149: m.cs = 149; goto _test_eof - _test_eof532: m.cs = 532; goto _test_eof - _test_eof150: m.cs = 150; goto _test_eof - _test_eof151: m.cs = 151; goto _test_eof - _test_eof152: m.cs = 152; goto _test_eof - _test_eof533: m.cs = 533; goto _test_eof - _test_eof153: m.cs = 153; goto _test_eof - _test_eof154: m.cs = 154; goto _test_eof - _test_eof534: m.cs = 534; goto _test_eof - _test_eof535: m.cs = 535; goto _test_eof - _test_eof155: m.cs = 155; goto _test_eof - _test_eof156: m.cs = 156; goto _test_eof - _test_eof157: m.cs = 157; goto _test_eof - _test_eof536: m.cs = 536; goto _test_eof - _test_eof537: m.cs = 537; goto _test_eof - _test_eof538: m.cs = 538; goto _test_eof - _test_eof158: m.cs = 158; goto _test_eof - _test_eof539: m.cs = 539; goto _test_eof - _test_eof540: m.cs = 540; goto _test_eof - _test_eof541: m.cs = 541; goto _test_eof - _test_eof542: m.cs = 542; goto _test_eof - _test_eof543: m.cs = 543; goto _test_eof - _test_eof544: m.cs = 544; goto _test_eof - _test_eof545: m.cs = 545; goto _test_eof - _test_eof546: m.cs = 546; goto _test_eof - _test_eof547: m.cs = 547; goto _test_eof - _test_eof548: m.cs = 548; goto _test_eof - _test_eof549: m.cs = 549; goto _test_eof - _test_eof550: m.cs = 550; goto _test_eof - _test_eof551: m.cs = 551; goto _test_eof - _test_eof552: m.cs = 552; goto _test_eof - _test_eof553: m.cs = 553; goto _test_eof - _test_eof554: m.cs = 554; goto _test_eof - _test_eof555: m.cs = 555; goto _test_eof - _test_eof556: m.cs = 556; goto _test_eof - _test_eof557: m.cs = 557; goto _test_eof - _test_eof558: m.cs = 558; goto _test_eof - _test_eof159: m.cs = 159; goto _test_eof - _test_eof160: m.cs = 160; goto _test_eof - _test_eof559: m.cs = 559; goto _test_eof - _test_eof560: m.cs = 560; goto _test_eof - _test_eof561: m.cs = 561; goto _test_eof - _test_eof562: m.cs = 562; goto _test_eof - _test_eof563: m.cs = 563; goto _test_eof - _test_eof564: m.cs = 564; goto _test_eof - _test_eof565: m.cs = 565; goto _test_eof - _test_eof566: m.cs = 566; goto _test_eof - _test_eof567: m.cs = 567; goto _test_eof - _test_eof161: m.cs = 161; goto _test_eof - _test_eof162: m.cs = 162; goto _test_eof - _test_eof163: m.cs = 163; goto _test_eof - _test_eof568: m.cs = 568; goto _test_eof - _test_eof164: m.cs = 164; goto _test_eof - _test_eof165: m.cs = 165; goto _test_eof - _test_eof166: m.cs = 166; goto _test_eof - _test_eof569: m.cs = 569; goto _test_eof - _test_eof167: m.cs = 167; goto _test_eof - _test_eof168: m.cs = 168; goto _test_eof - _test_eof570: m.cs = 570; goto _test_eof - _test_eof571: m.cs = 571; goto _test_eof - _test_eof169: m.cs = 169; goto _test_eof - _test_eof170: m.cs = 170; goto _test_eof - _test_eof171: m.cs = 171; goto _test_eof - _test_eof172: m.cs = 172; goto _test_eof - _test_eof572: m.cs = 572; goto _test_eof - _test_eof173: m.cs = 173; goto _test_eof - _test_eof573: m.cs = 573; goto _test_eof - _test_eof574: m.cs = 574; goto _test_eof - _test_eof174: m.cs = 174; goto _test_eof - _test_eof575: m.cs = 575; goto _test_eof - _test_eof576: m.cs = 576; goto _test_eof - _test_eof577: m.cs = 577; goto _test_eof - _test_eof578: m.cs = 578; goto _test_eof - _test_eof579: m.cs = 579; goto _test_eof - _test_eof580: m.cs = 580; goto _test_eof - _test_eof581: m.cs = 581; goto _test_eof - _test_eof582: m.cs = 582; goto _test_eof - _test_eof583: m.cs = 583; goto _test_eof - _test_eof175: m.cs = 175; goto _test_eof - _test_eof176: m.cs = 176; goto _test_eof - _test_eof177: m.cs = 177; goto _test_eof - _test_eof584: m.cs = 584; goto _test_eof - _test_eof178: m.cs = 178; goto _test_eof - _test_eof179: m.cs = 179; goto _test_eof - _test_eof180: m.cs = 180; goto _test_eof - _test_eof585: m.cs = 585; goto _test_eof - _test_eof181: m.cs = 181; goto _test_eof - _test_eof182: m.cs = 182; goto _test_eof - _test_eof586: m.cs = 586; goto _test_eof - _test_eof587: m.cs = 587; goto _test_eof - _test_eof183: m.cs = 183; goto _test_eof - _test_eof184: m.cs = 184; goto _test_eof - _test_eof588: m.cs = 588; goto _test_eof - _test_eof185: m.cs = 185; goto _test_eof - _test_eof186: m.cs = 186; goto _test_eof - _test_eof589: m.cs = 589; goto _test_eof - _test_eof590: m.cs = 590; goto _test_eof - _test_eof591: m.cs = 591; goto _test_eof - _test_eof592: m.cs = 592; goto _test_eof - _test_eof593: m.cs = 593; goto _test_eof - _test_eof594: m.cs = 594; goto _test_eof - _test_eof595: m.cs = 595; goto _test_eof - _test_eof596: m.cs = 596; goto _test_eof - _test_eof187: m.cs = 187; goto _test_eof - _test_eof188: m.cs = 188; goto _test_eof - _test_eof189: m.cs = 189; goto _test_eof - _test_eof597: m.cs = 597; goto _test_eof - _test_eof190: m.cs = 190; goto _test_eof - _test_eof191: m.cs = 191; goto _test_eof - _test_eof192: m.cs = 192; goto _test_eof - _test_eof598: m.cs = 598; goto _test_eof - _test_eof193: m.cs = 193; goto _test_eof - _test_eof194: m.cs = 194; goto _test_eof - _test_eof599: m.cs = 599; goto _test_eof - _test_eof600: m.cs = 600; goto _test_eof - _test_eof195: m.cs = 195; goto _test_eof - _test_eof601: m.cs = 601; goto _test_eof - _test_eof196: m.cs = 196; goto _test_eof - _test_eof602: m.cs = 602; goto _test_eof - _test_eof603: m.cs = 603; goto _test_eof - _test_eof197: m.cs = 197; goto _test_eof - _test_eof198: m.cs = 198; goto _test_eof - _test_eof199: m.cs = 199; goto _test_eof - _test_eof604: m.cs = 604; goto _test_eof - _test_eof605: m.cs = 605; goto _test_eof - _test_eof606: m.cs = 606; goto _test_eof - _test_eof200: m.cs = 200; goto _test_eof - _test_eof201: m.cs = 201; goto _test_eof - _test_eof202: m.cs = 202; goto _test_eof - _test_eof607: m.cs = 607; goto _test_eof - _test_eof203: m.cs = 203; goto _test_eof - _test_eof204: m.cs = 204; goto _test_eof - _test_eof205: m.cs = 205; goto _test_eof + _test_eof259: ( m.cs) = 259; goto _test_eof + _test_eof1: ( m.cs) = 1; goto _test_eof + _test_eof2: ( m.cs) = 2; goto _test_eof + _test_eof3: ( m.cs) = 3; goto _test_eof + _test_eof4: ( m.cs) = 4; goto _test_eof + _test_eof5: ( m.cs) = 5; goto _test_eof + _test_eof6: ( m.cs) = 6; goto _test_eof + _test_eof7: ( m.cs) = 7; goto _test_eof + _test_eof8: ( m.cs) = 8; goto _test_eof + _test_eof260: ( m.cs) = 260; goto _test_eof + _test_eof261: ( m.cs) = 261; goto _test_eof + _test_eof262: ( m.cs) = 262; goto _test_eof + _test_eof9: ( m.cs) = 9; goto _test_eof + _test_eof10: ( m.cs) = 10; goto _test_eof + _test_eof11: ( m.cs) = 11; goto _test_eof + _test_eof12: ( m.cs) = 12; goto _test_eof + _test_eof13: ( m.cs) = 13; goto _test_eof + _test_eof14: ( m.cs) = 14; goto _test_eof + _test_eof15: ( m.cs) = 15; goto _test_eof + _test_eof16: ( m.cs) = 16; goto _test_eof + _test_eof17: ( m.cs) = 17; goto _test_eof + _test_eof18: ( m.cs) = 18; goto _test_eof + _test_eof19: ( m.cs) = 19; goto _test_eof + _test_eof20: ( m.cs) = 20; goto _test_eof + _test_eof21: ( m.cs) = 21; goto _test_eof + _test_eof22: ( m.cs) = 22; goto _test_eof + _test_eof23: ( m.cs) = 23; goto _test_eof + _test_eof24: ( m.cs) = 24; goto _test_eof + _test_eof25: ( m.cs) = 25; goto _test_eof + _test_eof26: ( m.cs) = 26; goto _test_eof + _test_eof27: ( m.cs) = 27; goto _test_eof + _test_eof28: ( m.cs) = 28; goto _test_eof + _test_eof29: ( m.cs) = 29; goto _test_eof + _test_eof30: ( m.cs) = 30; goto _test_eof + _test_eof31: ( m.cs) = 31; goto _test_eof + _test_eof32: ( m.cs) = 32; goto _test_eof + _test_eof33: ( m.cs) = 33; goto _test_eof + _test_eof263: ( m.cs) = 263; goto _test_eof + _test_eof264: ( m.cs) = 264; goto _test_eof + _test_eof34: ( m.cs) = 34; goto _test_eof + _test_eof35: ( m.cs) = 35; goto _test_eof + _test_eof265: ( m.cs) = 265; goto _test_eof + _test_eof266: ( m.cs) = 266; goto _test_eof + _test_eof267: ( m.cs) = 267; goto _test_eof + _test_eof36: ( m.cs) = 36; goto _test_eof + _test_eof268: ( m.cs) = 268; goto _test_eof + _test_eof269: ( m.cs) = 269; goto _test_eof + _test_eof270: ( m.cs) = 270; goto _test_eof + _test_eof271: ( m.cs) = 271; goto _test_eof + _test_eof272: ( m.cs) = 272; goto _test_eof + _test_eof273: ( m.cs) = 273; goto _test_eof + _test_eof274: ( m.cs) = 274; goto _test_eof + _test_eof275: ( m.cs) = 275; goto _test_eof + _test_eof276: ( m.cs) = 276; goto _test_eof + _test_eof277: ( m.cs) = 277; goto _test_eof + _test_eof278: ( m.cs) = 278; goto _test_eof + _test_eof279: ( m.cs) = 279; goto _test_eof + _test_eof280: ( m.cs) = 280; goto _test_eof + _test_eof281: ( m.cs) = 281; goto _test_eof + _test_eof282: ( m.cs) = 282; goto _test_eof + _test_eof283: ( m.cs) = 283; goto _test_eof + _test_eof284: ( m.cs) = 284; goto _test_eof + _test_eof285: ( m.cs) = 285; goto _test_eof + _test_eof37: ( m.cs) = 37; goto _test_eof + _test_eof38: ( m.cs) = 38; goto _test_eof + _test_eof286: ( m.cs) = 286; goto _test_eof + _test_eof287: ( m.cs) = 287; goto _test_eof + _test_eof288: ( m.cs) = 288; goto _test_eof + _test_eof39: ( m.cs) = 39; goto _test_eof + _test_eof40: ( m.cs) = 40; goto _test_eof + _test_eof41: ( m.cs) = 41; goto _test_eof + _test_eof42: ( m.cs) = 42; goto _test_eof + _test_eof43: ( m.cs) = 43; goto _test_eof + _test_eof289: ( m.cs) = 289; goto _test_eof + _test_eof290: ( m.cs) = 290; goto _test_eof + _test_eof291: ( m.cs) = 291; goto _test_eof + _test_eof292: ( m.cs) = 292; goto _test_eof + _test_eof44: ( m.cs) = 44; goto _test_eof + _test_eof293: ( m.cs) = 293; goto _test_eof + _test_eof294: ( m.cs) = 294; goto _test_eof + _test_eof295: ( m.cs) = 295; goto _test_eof + _test_eof296: ( m.cs) = 296; goto _test_eof + _test_eof297: ( m.cs) = 297; goto _test_eof + _test_eof298: ( m.cs) = 298; goto _test_eof + _test_eof299: ( m.cs) = 299; goto _test_eof + _test_eof300: ( m.cs) = 300; goto _test_eof + _test_eof301: ( m.cs) = 301; goto _test_eof + _test_eof302: ( m.cs) = 302; goto _test_eof + _test_eof303: ( m.cs) = 303; goto _test_eof + _test_eof304: ( m.cs) = 304; goto _test_eof + _test_eof305: ( m.cs) = 305; goto _test_eof + _test_eof306: ( m.cs) = 306; goto _test_eof + _test_eof307: ( m.cs) = 307; goto _test_eof + _test_eof308: ( m.cs) = 308; goto _test_eof + _test_eof309: ( m.cs) = 309; goto _test_eof + _test_eof310: ( m.cs) = 310; goto _test_eof + _test_eof311: ( m.cs) = 311; goto _test_eof + _test_eof312: ( m.cs) = 312; goto _test_eof + _test_eof313: ( m.cs) = 313; goto _test_eof + _test_eof314: ( m.cs) = 314; goto _test_eof + _test_eof45: ( m.cs) = 45; goto _test_eof + _test_eof46: ( m.cs) = 46; goto _test_eof + _test_eof47: ( m.cs) = 47; goto _test_eof + _test_eof48: ( m.cs) = 48; goto _test_eof + _test_eof49: ( m.cs) = 49; goto _test_eof + _test_eof50: ( m.cs) = 50; goto _test_eof + _test_eof51: ( m.cs) = 51; goto _test_eof + _test_eof52: ( m.cs) = 52; goto _test_eof + _test_eof53: ( m.cs) = 53; goto _test_eof + _test_eof54: ( m.cs) = 54; goto _test_eof + _test_eof315: ( m.cs) = 315; goto _test_eof + _test_eof316: ( m.cs) = 316; goto _test_eof + _test_eof317: ( m.cs) = 317; goto _test_eof + _test_eof55: ( m.cs) = 55; goto _test_eof + _test_eof56: ( m.cs) = 56; goto _test_eof + _test_eof57: ( m.cs) = 57; goto _test_eof + _test_eof58: ( m.cs) = 58; goto _test_eof + _test_eof59: ( m.cs) = 59; goto _test_eof + _test_eof60: ( m.cs) = 60; goto _test_eof + _test_eof318: ( m.cs) = 318; goto _test_eof + _test_eof319: ( m.cs) = 319; goto _test_eof + _test_eof61: ( m.cs) = 61; goto _test_eof + _test_eof320: ( m.cs) = 320; goto _test_eof + _test_eof321: ( m.cs) = 321; goto _test_eof + _test_eof322: ( m.cs) = 322; goto _test_eof + _test_eof323: ( m.cs) = 323; goto _test_eof + _test_eof324: ( m.cs) = 324; goto _test_eof + _test_eof325: ( m.cs) = 325; goto _test_eof + _test_eof326: ( m.cs) = 326; goto _test_eof + _test_eof327: ( m.cs) = 327; goto _test_eof + _test_eof328: ( m.cs) = 328; goto _test_eof + _test_eof329: ( m.cs) = 329; goto _test_eof + _test_eof330: ( m.cs) = 330; goto _test_eof + _test_eof331: ( m.cs) = 331; goto _test_eof + _test_eof332: ( m.cs) = 332; goto _test_eof + _test_eof333: ( m.cs) = 333; goto _test_eof + _test_eof334: ( m.cs) = 334; goto _test_eof + _test_eof335: ( m.cs) = 335; goto _test_eof + _test_eof336: ( m.cs) = 336; goto _test_eof + _test_eof337: ( m.cs) = 337; goto _test_eof + _test_eof338: ( m.cs) = 338; goto _test_eof + _test_eof339: ( m.cs) = 339; goto _test_eof + _test_eof62: ( m.cs) = 62; goto _test_eof + _test_eof340: ( m.cs) = 340; goto _test_eof + _test_eof341: ( m.cs) = 341; goto _test_eof + _test_eof342: ( m.cs) = 342; goto _test_eof + _test_eof63: ( m.cs) = 63; goto _test_eof + _test_eof343: ( m.cs) = 343; goto _test_eof + _test_eof344: ( m.cs) = 344; goto _test_eof + _test_eof345: ( m.cs) = 345; goto _test_eof + _test_eof346: ( m.cs) = 346; goto _test_eof + _test_eof347: ( m.cs) = 347; goto _test_eof + _test_eof348: ( m.cs) = 348; goto _test_eof + _test_eof349: ( m.cs) = 349; goto _test_eof + _test_eof350: ( m.cs) = 350; goto _test_eof + _test_eof351: ( m.cs) = 351; goto _test_eof + _test_eof352: ( m.cs) = 352; goto _test_eof + _test_eof353: ( m.cs) = 353; goto _test_eof + _test_eof354: ( m.cs) = 354; goto _test_eof + _test_eof355: ( m.cs) = 355; goto _test_eof + _test_eof356: ( m.cs) = 356; goto _test_eof + _test_eof357: ( m.cs) = 357; goto _test_eof + _test_eof358: ( m.cs) = 358; goto _test_eof + _test_eof359: ( m.cs) = 359; goto _test_eof + _test_eof360: ( m.cs) = 360; goto _test_eof + _test_eof361: ( m.cs) = 361; goto _test_eof + _test_eof362: ( m.cs) = 362; goto _test_eof + _test_eof64: ( m.cs) = 64; goto _test_eof + _test_eof65: ( m.cs) = 65; goto _test_eof + _test_eof66: ( m.cs) = 66; goto _test_eof + _test_eof67: ( m.cs) = 67; goto _test_eof + _test_eof68: ( m.cs) = 68; goto _test_eof + _test_eof363: ( m.cs) = 363; goto _test_eof + _test_eof69: ( m.cs) = 69; goto _test_eof + _test_eof70: ( m.cs) = 70; goto _test_eof + _test_eof71: ( m.cs) = 71; goto _test_eof + _test_eof72: ( m.cs) = 72; goto _test_eof + _test_eof73: ( m.cs) = 73; goto _test_eof + _test_eof364: ( m.cs) = 364; goto _test_eof + _test_eof365: ( m.cs) = 365; goto _test_eof + _test_eof366: ( m.cs) = 366; goto _test_eof + _test_eof74: ( m.cs) = 74; goto _test_eof + _test_eof75: ( m.cs) = 75; goto _test_eof + _test_eof367: ( m.cs) = 367; goto _test_eof + _test_eof368: ( m.cs) = 368; goto _test_eof + _test_eof76: ( m.cs) = 76; goto _test_eof + _test_eof369: ( m.cs) = 369; goto _test_eof + _test_eof77: ( m.cs) = 77; goto _test_eof + _test_eof370: ( m.cs) = 370; goto _test_eof + _test_eof371: ( m.cs) = 371; goto _test_eof + _test_eof372: ( m.cs) = 372; goto _test_eof + _test_eof373: ( m.cs) = 373; goto _test_eof + _test_eof374: ( m.cs) = 374; goto _test_eof + _test_eof375: ( m.cs) = 375; goto _test_eof + _test_eof376: ( m.cs) = 376; goto _test_eof + _test_eof377: ( m.cs) = 377; goto _test_eof + _test_eof378: ( m.cs) = 378; goto _test_eof + _test_eof379: ( m.cs) = 379; goto _test_eof + _test_eof380: ( m.cs) = 380; goto _test_eof + _test_eof381: ( m.cs) = 381; goto _test_eof + _test_eof382: ( m.cs) = 382; goto _test_eof + _test_eof383: ( m.cs) = 383; goto _test_eof + _test_eof384: ( m.cs) = 384; goto _test_eof + _test_eof385: ( m.cs) = 385; goto _test_eof + _test_eof386: ( m.cs) = 386; goto _test_eof + _test_eof387: ( m.cs) = 387; goto _test_eof + _test_eof388: ( m.cs) = 388; goto _test_eof + _test_eof389: ( m.cs) = 389; goto _test_eof + _test_eof78: ( m.cs) = 78; goto _test_eof + _test_eof79: ( m.cs) = 79; goto _test_eof + _test_eof80: ( m.cs) = 80; goto _test_eof + _test_eof81: ( m.cs) = 81; goto _test_eof + _test_eof82: ( m.cs) = 82; goto _test_eof + _test_eof83: ( m.cs) = 83; goto _test_eof + _test_eof84: ( m.cs) = 84; goto _test_eof + _test_eof85: ( m.cs) = 85; goto _test_eof + _test_eof86: ( m.cs) = 86; goto _test_eof + _test_eof87: ( m.cs) = 87; goto _test_eof + _test_eof88: ( m.cs) = 88; goto _test_eof + _test_eof89: ( m.cs) = 89; goto _test_eof + _test_eof90: ( m.cs) = 90; goto _test_eof + _test_eof91: ( m.cs) = 91; goto _test_eof + _test_eof390: ( m.cs) = 390; goto _test_eof + _test_eof391: ( m.cs) = 391; goto _test_eof + _test_eof392: ( m.cs) = 392; goto _test_eof + _test_eof393: ( m.cs) = 393; goto _test_eof + _test_eof92: ( m.cs) = 92; goto _test_eof + _test_eof93: ( m.cs) = 93; goto _test_eof + _test_eof94: ( m.cs) = 94; goto _test_eof + _test_eof95: ( m.cs) = 95; goto _test_eof + _test_eof394: ( m.cs) = 394; goto _test_eof + _test_eof395: ( m.cs) = 395; goto _test_eof + _test_eof96: ( m.cs) = 96; goto _test_eof + _test_eof97: ( m.cs) = 97; goto _test_eof + _test_eof396: ( m.cs) = 396; goto _test_eof + _test_eof98: ( m.cs) = 98; goto _test_eof + _test_eof99: ( m.cs) = 99; goto _test_eof + _test_eof397: ( m.cs) = 397; goto _test_eof + _test_eof398: ( m.cs) = 398; goto _test_eof + _test_eof100: ( m.cs) = 100; goto _test_eof + _test_eof399: ( m.cs) = 399; goto _test_eof + _test_eof400: ( m.cs) = 400; goto _test_eof + _test_eof101: ( m.cs) = 101; goto _test_eof + _test_eof102: ( m.cs) = 102; goto _test_eof + _test_eof401: ( m.cs) = 401; goto _test_eof + _test_eof402: ( m.cs) = 402; goto _test_eof + _test_eof403: ( m.cs) = 403; goto _test_eof + _test_eof404: ( m.cs) = 404; goto _test_eof + _test_eof405: ( m.cs) = 405; goto _test_eof + _test_eof406: ( m.cs) = 406; goto _test_eof + _test_eof407: ( m.cs) = 407; goto _test_eof + _test_eof408: ( m.cs) = 408; goto _test_eof + _test_eof409: ( m.cs) = 409; goto _test_eof + _test_eof410: ( m.cs) = 410; goto _test_eof + _test_eof411: ( m.cs) = 411; goto _test_eof + _test_eof412: ( m.cs) = 412; goto _test_eof + _test_eof413: ( m.cs) = 413; goto _test_eof + _test_eof414: ( m.cs) = 414; goto _test_eof + _test_eof415: ( m.cs) = 415; goto _test_eof + _test_eof416: ( m.cs) = 416; goto _test_eof + _test_eof417: ( m.cs) = 417; goto _test_eof + _test_eof418: ( m.cs) = 418; goto _test_eof + _test_eof103: ( m.cs) = 103; goto _test_eof + _test_eof419: ( m.cs) = 419; goto _test_eof + _test_eof420: ( m.cs) = 420; goto _test_eof + _test_eof421: ( m.cs) = 421; goto _test_eof + _test_eof104: ( m.cs) = 104; goto _test_eof + _test_eof105: ( m.cs) = 105; goto _test_eof + _test_eof422: ( m.cs) = 422; goto _test_eof + _test_eof423: ( m.cs) = 423; goto _test_eof + _test_eof424: ( m.cs) = 424; goto _test_eof + _test_eof106: ( m.cs) = 106; goto _test_eof + _test_eof425: ( m.cs) = 425; goto _test_eof + _test_eof426: ( m.cs) = 426; goto _test_eof + _test_eof427: ( m.cs) = 427; goto _test_eof + _test_eof428: ( m.cs) = 428; goto _test_eof + _test_eof429: ( m.cs) = 429; goto _test_eof + _test_eof430: ( m.cs) = 430; goto _test_eof + _test_eof431: ( m.cs) = 431; goto _test_eof + _test_eof432: ( m.cs) = 432; goto _test_eof + _test_eof433: ( m.cs) = 433; goto _test_eof + _test_eof434: ( m.cs) = 434; goto _test_eof + _test_eof435: ( m.cs) = 435; goto _test_eof + _test_eof436: ( m.cs) = 436; goto _test_eof + _test_eof437: ( m.cs) = 437; goto _test_eof + _test_eof438: ( m.cs) = 438; goto _test_eof + _test_eof439: ( m.cs) = 439; goto _test_eof + _test_eof440: ( m.cs) = 440; goto _test_eof + _test_eof441: ( m.cs) = 441; goto _test_eof + _test_eof442: ( m.cs) = 442; goto _test_eof + _test_eof443: ( m.cs) = 443; goto _test_eof + _test_eof444: ( m.cs) = 444; goto _test_eof + _test_eof107: ( m.cs) = 107; goto _test_eof + _test_eof445: ( m.cs) = 445; goto _test_eof + _test_eof446: ( m.cs) = 446; goto _test_eof + _test_eof447: ( m.cs) = 447; goto _test_eof + _test_eof448: ( m.cs) = 448; goto _test_eof + _test_eof449: ( m.cs) = 449; goto _test_eof + _test_eof450: ( m.cs) = 450; goto _test_eof + _test_eof451: ( m.cs) = 451; goto _test_eof + _test_eof452: ( m.cs) = 452; goto _test_eof + _test_eof453: ( m.cs) = 453; goto _test_eof + _test_eof454: ( m.cs) = 454; goto _test_eof + _test_eof455: ( m.cs) = 455; goto _test_eof + _test_eof456: ( m.cs) = 456; goto _test_eof + _test_eof457: ( m.cs) = 457; goto _test_eof + _test_eof458: ( m.cs) = 458; goto _test_eof + _test_eof459: ( m.cs) = 459; goto _test_eof + _test_eof460: ( m.cs) = 460; goto _test_eof + _test_eof461: ( m.cs) = 461; goto _test_eof + _test_eof462: ( m.cs) = 462; goto _test_eof + _test_eof463: ( m.cs) = 463; goto _test_eof + _test_eof464: ( m.cs) = 464; goto _test_eof + _test_eof465: ( m.cs) = 465; goto _test_eof + _test_eof466: ( m.cs) = 466; goto _test_eof + _test_eof108: ( m.cs) = 108; goto _test_eof + _test_eof109: ( m.cs) = 109; goto _test_eof + _test_eof110: ( m.cs) = 110; goto _test_eof + _test_eof111: ( m.cs) = 111; goto _test_eof + _test_eof112: ( m.cs) = 112; goto _test_eof + _test_eof467: ( m.cs) = 467; goto _test_eof + _test_eof113: ( m.cs) = 113; goto _test_eof + _test_eof468: ( m.cs) = 468; goto _test_eof + _test_eof469: ( m.cs) = 469; goto _test_eof + _test_eof114: ( m.cs) = 114; goto _test_eof + _test_eof470: ( m.cs) = 470; goto _test_eof + _test_eof471: ( m.cs) = 471; goto _test_eof + _test_eof472: ( m.cs) = 472; goto _test_eof + _test_eof473: ( m.cs) = 473; goto _test_eof + _test_eof474: ( m.cs) = 474; goto _test_eof + _test_eof475: ( m.cs) = 475; goto _test_eof + _test_eof476: ( m.cs) = 476; goto _test_eof + _test_eof477: ( m.cs) = 477; goto _test_eof + _test_eof478: ( m.cs) = 478; goto _test_eof + _test_eof115: ( m.cs) = 115; goto _test_eof + _test_eof116: ( m.cs) = 116; goto _test_eof + _test_eof117: ( m.cs) = 117; goto _test_eof + _test_eof479: ( m.cs) = 479; goto _test_eof + _test_eof118: ( m.cs) = 118; goto _test_eof + _test_eof119: ( m.cs) = 119; goto _test_eof + _test_eof120: ( m.cs) = 120; goto _test_eof + _test_eof480: ( m.cs) = 480; goto _test_eof + _test_eof121: ( m.cs) = 121; goto _test_eof + _test_eof122: ( m.cs) = 122; goto _test_eof + _test_eof481: ( m.cs) = 481; goto _test_eof + _test_eof482: ( m.cs) = 482; goto _test_eof + _test_eof123: ( m.cs) = 123; goto _test_eof + _test_eof124: ( m.cs) = 124; goto _test_eof + _test_eof125: ( m.cs) = 125; goto _test_eof + _test_eof126: ( m.cs) = 126; goto _test_eof + _test_eof483: ( m.cs) = 483; goto _test_eof + _test_eof484: ( m.cs) = 484; goto _test_eof + _test_eof485: ( m.cs) = 485; goto _test_eof + _test_eof127: ( m.cs) = 127; goto _test_eof + _test_eof486: ( m.cs) = 486; goto _test_eof + _test_eof487: ( m.cs) = 487; goto _test_eof + _test_eof488: ( m.cs) = 488; goto _test_eof + _test_eof489: ( m.cs) = 489; goto _test_eof + _test_eof490: ( m.cs) = 490; goto _test_eof + _test_eof491: ( m.cs) = 491; goto _test_eof + _test_eof492: ( m.cs) = 492; goto _test_eof + _test_eof493: ( m.cs) = 493; goto _test_eof + _test_eof494: ( m.cs) = 494; goto _test_eof + _test_eof495: ( m.cs) = 495; goto _test_eof + _test_eof496: ( m.cs) = 496; goto _test_eof + _test_eof497: ( m.cs) = 497; goto _test_eof + _test_eof498: ( m.cs) = 498; goto _test_eof + _test_eof499: ( m.cs) = 499; goto _test_eof + _test_eof500: ( m.cs) = 500; goto _test_eof + _test_eof501: ( m.cs) = 501; goto _test_eof + _test_eof502: ( m.cs) = 502; goto _test_eof + _test_eof503: ( m.cs) = 503; goto _test_eof + _test_eof504: ( m.cs) = 504; goto _test_eof + _test_eof505: ( m.cs) = 505; goto _test_eof + _test_eof128: ( m.cs) = 128; goto _test_eof + _test_eof129: ( m.cs) = 129; goto _test_eof + _test_eof506: ( m.cs) = 506; goto _test_eof + _test_eof507: ( m.cs) = 507; goto _test_eof + _test_eof508: ( m.cs) = 508; goto _test_eof + _test_eof509: ( m.cs) = 509; goto _test_eof + _test_eof510: ( m.cs) = 510; goto _test_eof + _test_eof511: ( m.cs) = 511; goto _test_eof + _test_eof512: ( m.cs) = 512; goto _test_eof + _test_eof513: ( m.cs) = 513; goto _test_eof + _test_eof514: ( m.cs) = 514; goto _test_eof + _test_eof130: ( m.cs) = 130; goto _test_eof + _test_eof131: ( m.cs) = 131; goto _test_eof + _test_eof132: ( m.cs) = 132; goto _test_eof + _test_eof515: ( m.cs) = 515; goto _test_eof + _test_eof133: ( m.cs) = 133; goto _test_eof + _test_eof134: ( m.cs) = 134; goto _test_eof + _test_eof135: ( m.cs) = 135; goto _test_eof + _test_eof516: ( m.cs) = 516; goto _test_eof + _test_eof136: ( m.cs) = 136; goto _test_eof + _test_eof137: ( m.cs) = 137; goto _test_eof + _test_eof517: ( m.cs) = 517; goto _test_eof + _test_eof518: ( m.cs) = 518; goto _test_eof + _test_eof138: ( m.cs) = 138; goto _test_eof + _test_eof139: ( m.cs) = 139; goto _test_eof + _test_eof140: ( m.cs) = 140; goto _test_eof + _test_eof519: ( m.cs) = 519; goto _test_eof + _test_eof520: ( m.cs) = 520; goto _test_eof + _test_eof141: ( m.cs) = 141; goto _test_eof + _test_eof521: ( m.cs) = 521; goto _test_eof + _test_eof142: ( m.cs) = 142; goto _test_eof + _test_eof522: ( m.cs) = 522; goto _test_eof + _test_eof523: ( m.cs) = 523; goto _test_eof + _test_eof524: ( m.cs) = 524; goto _test_eof + _test_eof525: ( m.cs) = 525; goto _test_eof + _test_eof526: ( m.cs) = 526; goto _test_eof + _test_eof527: ( m.cs) = 527; goto _test_eof + _test_eof528: ( m.cs) = 528; goto _test_eof + _test_eof529: ( m.cs) = 529; goto _test_eof + _test_eof143: ( m.cs) = 143; goto _test_eof + _test_eof144: ( m.cs) = 144; goto _test_eof + _test_eof145: ( m.cs) = 145; goto _test_eof + _test_eof530: ( m.cs) = 530; goto _test_eof + _test_eof146: ( m.cs) = 146; goto _test_eof + _test_eof147: ( m.cs) = 147; goto _test_eof + _test_eof148: ( m.cs) = 148; goto _test_eof + _test_eof531: ( m.cs) = 531; goto _test_eof + _test_eof149: ( m.cs) = 149; goto _test_eof + _test_eof150: ( m.cs) = 150; goto _test_eof + _test_eof532: ( m.cs) = 532; goto _test_eof + _test_eof533: ( m.cs) = 533; goto _test_eof + _test_eof534: ( m.cs) = 534; goto _test_eof + _test_eof535: ( m.cs) = 535; goto _test_eof + _test_eof536: ( m.cs) = 536; goto _test_eof + _test_eof537: ( m.cs) = 537; goto _test_eof + _test_eof538: ( m.cs) = 538; goto _test_eof + _test_eof539: ( m.cs) = 539; goto _test_eof + _test_eof540: ( m.cs) = 540; goto _test_eof + _test_eof541: ( m.cs) = 541; goto _test_eof + _test_eof542: ( m.cs) = 542; goto _test_eof + _test_eof543: ( m.cs) = 543; goto _test_eof + _test_eof544: ( m.cs) = 544; goto _test_eof + _test_eof545: ( m.cs) = 545; goto _test_eof + _test_eof546: ( m.cs) = 546; goto _test_eof + _test_eof547: ( m.cs) = 547; goto _test_eof + _test_eof548: ( m.cs) = 548; goto _test_eof + _test_eof549: ( m.cs) = 549; goto _test_eof + _test_eof550: ( m.cs) = 550; goto _test_eof + _test_eof551: ( m.cs) = 551; goto _test_eof + _test_eof151: ( m.cs) = 151; goto _test_eof + _test_eof152: ( m.cs) = 152; goto _test_eof + _test_eof552: ( m.cs) = 552; goto _test_eof + _test_eof553: ( m.cs) = 553; goto _test_eof + _test_eof554: ( m.cs) = 554; goto _test_eof + _test_eof153: ( m.cs) = 153; goto _test_eof + _test_eof555: ( m.cs) = 555; goto _test_eof + _test_eof556: ( m.cs) = 556; goto _test_eof + _test_eof154: ( m.cs) = 154; goto _test_eof + _test_eof557: ( m.cs) = 557; goto _test_eof + _test_eof558: ( m.cs) = 558; goto _test_eof + _test_eof559: ( m.cs) = 559; goto _test_eof + _test_eof560: ( m.cs) = 560; goto _test_eof + _test_eof561: ( m.cs) = 561; goto _test_eof + _test_eof562: ( m.cs) = 562; goto _test_eof + _test_eof563: ( m.cs) = 563; goto _test_eof + _test_eof564: ( m.cs) = 564; goto _test_eof + _test_eof565: ( m.cs) = 565; goto _test_eof + _test_eof566: ( m.cs) = 566; goto _test_eof + _test_eof567: ( m.cs) = 567; goto _test_eof + _test_eof568: ( m.cs) = 568; goto _test_eof + _test_eof569: ( m.cs) = 569; goto _test_eof + _test_eof570: ( m.cs) = 570; goto _test_eof + _test_eof571: ( m.cs) = 571; goto _test_eof + _test_eof572: ( m.cs) = 572; goto _test_eof + _test_eof573: ( m.cs) = 573; goto _test_eof + _test_eof574: ( m.cs) = 574; goto _test_eof + _test_eof155: ( m.cs) = 155; goto _test_eof + _test_eof156: ( m.cs) = 156; goto _test_eof + _test_eof575: ( m.cs) = 575; goto _test_eof + _test_eof157: ( m.cs) = 157; goto _test_eof + _test_eof576: ( m.cs) = 576; goto _test_eof + _test_eof577: ( m.cs) = 577; goto _test_eof + _test_eof578: ( m.cs) = 578; goto _test_eof + _test_eof579: ( m.cs) = 579; goto _test_eof + _test_eof580: ( m.cs) = 580; goto _test_eof + _test_eof581: ( m.cs) = 581; goto _test_eof + _test_eof582: ( m.cs) = 582; goto _test_eof + _test_eof583: ( m.cs) = 583; goto _test_eof + _test_eof158: ( m.cs) = 158; goto _test_eof + _test_eof159: ( m.cs) = 159; goto _test_eof + _test_eof160: ( m.cs) = 160; goto _test_eof + _test_eof584: ( m.cs) = 584; goto _test_eof + _test_eof161: ( m.cs) = 161; goto _test_eof + _test_eof162: ( m.cs) = 162; goto _test_eof + _test_eof163: ( m.cs) = 163; goto _test_eof + _test_eof585: ( m.cs) = 585; goto _test_eof + _test_eof164: ( m.cs) = 164; goto _test_eof + _test_eof165: ( m.cs) = 165; goto _test_eof + _test_eof586: ( m.cs) = 586; goto _test_eof + _test_eof587: ( m.cs) = 587; goto _test_eof + _test_eof166: ( m.cs) = 166; goto _test_eof + _test_eof167: ( m.cs) = 167; goto _test_eof + _test_eof168: ( m.cs) = 168; goto _test_eof + _test_eof169: ( m.cs) = 169; goto _test_eof + _test_eof170: ( m.cs) = 170; goto _test_eof + _test_eof171: ( m.cs) = 171; goto _test_eof + _test_eof588: ( m.cs) = 588; goto _test_eof + _test_eof589: ( m.cs) = 589; goto _test_eof + _test_eof590: ( m.cs) = 590; goto _test_eof + _test_eof591: ( m.cs) = 591; goto _test_eof + _test_eof592: ( m.cs) = 592; goto _test_eof + _test_eof593: ( m.cs) = 593; goto _test_eof + _test_eof594: ( m.cs) = 594; goto _test_eof + _test_eof595: ( m.cs) = 595; goto _test_eof + _test_eof596: ( m.cs) = 596; goto _test_eof + _test_eof597: ( m.cs) = 597; goto _test_eof + _test_eof598: ( m.cs) = 598; goto _test_eof + _test_eof599: ( m.cs) = 599; goto _test_eof + _test_eof600: ( m.cs) = 600; goto _test_eof + _test_eof601: ( m.cs) = 601; goto _test_eof + _test_eof602: ( m.cs) = 602; goto _test_eof + _test_eof603: ( m.cs) = 603; goto _test_eof + _test_eof604: ( m.cs) = 604; goto _test_eof + _test_eof605: ( m.cs) = 605; goto _test_eof + _test_eof606: ( m.cs) = 606; goto _test_eof + _test_eof172: ( m.cs) = 172; goto _test_eof + _test_eof173: ( m.cs) = 173; goto _test_eof + _test_eof174: ( m.cs) = 174; goto _test_eof + _test_eof607: ( m.cs) = 607; goto _test_eof + _test_eof608: ( m.cs) = 608; goto _test_eof + _test_eof609: ( m.cs) = 609; goto _test_eof + _test_eof175: ( m.cs) = 175; goto _test_eof + _test_eof610: ( m.cs) = 610; goto _test_eof + _test_eof611: ( m.cs) = 611; goto _test_eof + _test_eof176: ( m.cs) = 176; goto _test_eof + _test_eof612: ( m.cs) = 612; goto _test_eof + _test_eof613: ( m.cs) = 613; goto _test_eof + _test_eof614: ( m.cs) = 614; goto _test_eof + _test_eof615: ( m.cs) = 615; goto _test_eof + _test_eof616: ( m.cs) = 616; goto _test_eof + _test_eof177: ( m.cs) = 177; goto _test_eof + _test_eof178: ( m.cs) = 178; goto _test_eof + _test_eof179: ( m.cs) = 179; goto _test_eof + _test_eof617: ( m.cs) = 617; goto _test_eof + _test_eof180: ( m.cs) = 180; goto _test_eof + _test_eof181: ( m.cs) = 181; goto _test_eof + _test_eof182: ( m.cs) = 182; goto _test_eof + _test_eof618: ( m.cs) = 618; goto _test_eof + _test_eof183: ( m.cs) = 183; goto _test_eof + _test_eof184: ( m.cs) = 184; goto _test_eof + _test_eof619: ( m.cs) = 619; goto _test_eof + _test_eof620: ( m.cs) = 620; goto _test_eof + _test_eof185: ( m.cs) = 185; goto _test_eof + _test_eof621: ( m.cs) = 621; goto _test_eof + _test_eof622: ( m.cs) = 622; goto _test_eof + _test_eof186: ( m.cs) = 186; goto _test_eof + _test_eof187: ( m.cs) = 187; goto _test_eof + _test_eof188: ( m.cs) = 188; goto _test_eof + _test_eof623: ( m.cs) = 623; goto _test_eof + _test_eof189: ( m.cs) = 189; goto _test_eof + _test_eof190: ( m.cs) = 190; goto _test_eof + _test_eof624: ( m.cs) = 624; goto _test_eof + _test_eof625: ( m.cs) = 625; goto _test_eof + _test_eof626: ( m.cs) = 626; goto _test_eof + _test_eof627: ( m.cs) = 627; goto _test_eof + _test_eof628: ( m.cs) = 628; goto _test_eof + _test_eof629: ( m.cs) = 629; goto _test_eof + _test_eof630: ( m.cs) = 630; goto _test_eof + _test_eof631: ( m.cs) = 631; goto _test_eof + _test_eof191: ( m.cs) = 191; goto _test_eof + _test_eof192: ( m.cs) = 192; goto _test_eof + _test_eof193: ( m.cs) = 193; goto _test_eof + _test_eof632: ( m.cs) = 632; goto _test_eof + _test_eof194: ( m.cs) = 194; goto _test_eof + _test_eof195: ( m.cs) = 195; goto _test_eof + _test_eof196: ( m.cs) = 196; goto _test_eof + _test_eof633: ( m.cs) = 633; goto _test_eof + _test_eof197: ( m.cs) = 197; goto _test_eof + _test_eof198: ( m.cs) = 198; goto _test_eof + _test_eof634: ( m.cs) = 634; goto _test_eof + _test_eof635: ( m.cs) = 635; goto _test_eof + _test_eof199: ( m.cs) = 199; goto _test_eof + _test_eof200: ( m.cs) = 200; goto _test_eof + _test_eof201: ( m.cs) = 201; goto _test_eof + _test_eof636: ( m.cs) = 636; goto _test_eof + _test_eof637: ( m.cs) = 637; goto _test_eof + _test_eof638: ( m.cs) = 638; goto _test_eof + _test_eof639: ( m.cs) = 639; goto _test_eof + _test_eof640: ( m.cs) = 640; goto _test_eof + _test_eof641: ( m.cs) = 641; goto _test_eof + _test_eof642: ( m.cs) = 642; goto _test_eof + _test_eof643: ( m.cs) = 643; goto _test_eof + _test_eof644: ( m.cs) = 644; goto _test_eof + _test_eof645: ( m.cs) = 645; goto _test_eof + _test_eof646: ( m.cs) = 646; goto _test_eof + _test_eof647: ( m.cs) = 647; goto _test_eof + _test_eof648: ( m.cs) = 648; goto _test_eof + _test_eof649: ( m.cs) = 649; goto _test_eof + _test_eof650: ( m.cs) = 650; goto _test_eof + _test_eof651: ( m.cs) = 651; goto _test_eof + _test_eof652: ( m.cs) = 652; goto _test_eof + _test_eof653: ( m.cs) = 653; goto _test_eof + _test_eof654: ( m.cs) = 654; goto _test_eof + _test_eof202: ( m.cs) = 202; goto _test_eof + _test_eof203: ( m.cs) = 203; goto _test_eof + _test_eof204: ( m.cs) = 204; goto _test_eof + _test_eof205: ( m.cs) = 205; goto _test_eof + _test_eof206: ( m.cs) = 206; goto _test_eof + _test_eof655: ( m.cs) = 655; goto _test_eof + _test_eof207: ( m.cs) = 207; goto _test_eof + _test_eof208: ( m.cs) = 208; goto _test_eof + _test_eof656: ( m.cs) = 656; goto _test_eof + _test_eof657: ( m.cs) = 657; goto _test_eof + _test_eof658: ( m.cs) = 658; goto _test_eof + _test_eof659: ( m.cs) = 659; goto _test_eof + _test_eof660: ( m.cs) = 660; goto _test_eof + _test_eof661: ( m.cs) = 661; goto _test_eof + _test_eof662: ( m.cs) = 662; goto _test_eof + _test_eof663: ( m.cs) = 663; goto _test_eof + _test_eof664: ( m.cs) = 664; goto _test_eof + _test_eof209: ( m.cs) = 209; goto _test_eof + _test_eof210: ( m.cs) = 210; goto _test_eof + _test_eof211: ( m.cs) = 211; goto _test_eof + _test_eof665: ( m.cs) = 665; goto _test_eof + _test_eof212: ( m.cs) = 212; goto _test_eof + _test_eof213: ( m.cs) = 213; goto _test_eof + _test_eof214: ( m.cs) = 214; goto _test_eof + _test_eof666: ( m.cs) = 666; goto _test_eof + _test_eof215: ( m.cs) = 215; goto _test_eof + _test_eof216: ( m.cs) = 216; goto _test_eof + _test_eof667: ( m.cs) = 667; goto _test_eof + _test_eof668: ( m.cs) = 668; goto _test_eof + _test_eof217: ( m.cs) = 217; goto _test_eof + _test_eof218: ( m.cs) = 218; goto _test_eof + _test_eof219: ( m.cs) = 219; goto _test_eof + _test_eof220: ( m.cs) = 220; goto _test_eof + _test_eof669: ( m.cs) = 669; goto _test_eof + _test_eof221: ( m.cs) = 221; goto _test_eof + _test_eof222: ( m.cs) = 222; goto _test_eof + _test_eof670: ( m.cs) = 670; goto _test_eof + _test_eof671: ( m.cs) = 671; goto _test_eof + _test_eof672: ( m.cs) = 672; goto _test_eof + _test_eof673: ( m.cs) = 673; goto _test_eof + _test_eof674: ( m.cs) = 674; goto _test_eof + _test_eof675: ( m.cs) = 675; goto _test_eof + _test_eof676: ( m.cs) = 676; goto _test_eof + _test_eof677: ( m.cs) = 677; goto _test_eof + _test_eof223: ( m.cs) = 223; goto _test_eof + _test_eof224: ( m.cs) = 224; goto _test_eof + _test_eof225: ( m.cs) = 225; goto _test_eof + _test_eof678: ( m.cs) = 678; goto _test_eof + _test_eof226: ( m.cs) = 226; goto _test_eof + _test_eof227: ( m.cs) = 227; goto _test_eof + _test_eof228: ( m.cs) = 228; goto _test_eof + _test_eof679: ( m.cs) = 679; goto _test_eof + _test_eof229: ( m.cs) = 229; goto _test_eof + _test_eof230: ( m.cs) = 230; goto _test_eof + _test_eof680: ( m.cs) = 680; goto _test_eof + _test_eof681: ( m.cs) = 681; goto _test_eof + _test_eof231: ( m.cs) = 231; goto _test_eof + _test_eof232: ( m.cs) = 232; goto _test_eof + _test_eof233: ( m.cs) = 233; goto _test_eof + _test_eof682: ( m.cs) = 682; goto _test_eof + _test_eof683: ( m.cs) = 683; goto _test_eof + _test_eof684: ( m.cs) = 684; goto _test_eof + _test_eof685: ( m.cs) = 685; goto _test_eof + _test_eof686: ( m.cs) = 686; goto _test_eof + _test_eof687: ( m.cs) = 687; goto _test_eof + _test_eof688: ( m.cs) = 688; goto _test_eof + _test_eof689: ( m.cs) = 689; goto _test_eof + _test_eof690: ( m.cs) = 690; goto _test_eof + _test_eof691: ( m.cs) = 691; goto _test_eof + _test_eof692: ( m.cs) = 692; goto _test_eof + _test_eof693: ( m.cs) = 693; goto _test_eof + _test_eof694: ( m.cs) = 694; goto _test_eof + _test_eof695: ( m.cs) = 695; goto _test_eof + _test_eof696: ( m.cs) = 696; goto _test_eof + _test_eof697: ( m.cs) = 697; goto _test_eof + _test_eof698: ( m.cs) = 698; goto _test_eof + _test_eof699: ( m.cs) = 699; goto _test_eof + _test_eof700: ( m.cs) = 700; goto _test_eof + _test_eof234: ( m.cs) = 234; goto _test_eof + _test_eof235: ( m.cs) = 235; goto _test_eof + _test_eof701: ( m.cs) = 701; goto _test_eof + _test_eof236: ( m.cs) = 236; goto _test_eof + _test_eof237: ( m.cs) = 237; goto _test_eof + _test_eof702: ( m.cs) = 702; goto _test_eof + _test_eof703: ( m.cs) = 703; goto _test_eof + _test_eof704: ( m.cs) = 704; goto _test_eof + _test_eof705: ( m.cs) = 705; goto _test_eof + _test_eof706: ( m.cs) = 706; goto _test_eof + _test_eof707: ( m.cs) = 707; goto _test_eof + _test_eof708: ( m.cs) = 708; goto _test_eof + _test_eof709: ( m.cs) = 709; goto _test_eof + _test_eof238: ( m.cs) = 238; goto _test_eof + _test_eof239: ( m.cs) = 239; goto _test_eof + _test_eof240: ( m.cs) = 240; goto _test_eof + _test_eof710: ( m.cs) = 710; goto _test_eof + _test_eof241: ( m.cs) = 241; goto _test_eof + _test_eof242: ( m.cs) = 242; goto _test_eof + _test_eof243: ( m.cs) = 243; goto _test_eof + _test_eof711: ( m.cs) = 711; goto _test_eof + _test_eof244: ( m.cs) = 244; goto _test_eof + _test_eof245: ( m.cs) = 245; goto _test_eof + _test_eof712: ( m.cs) = 712; goto _test_eof + _test_eof713: ( m.cs) = 713; goto _test_eof + _test_eof246: ( m.cs) = 246; goto _test_eof + _test_eof247: ( m.cs) = 247; goto _test_eof + _test_eof714: ( m.cs) = 714; goto _test_eof + _test_eof250: ( m.cs) = 250; goto _test_eof + _test_eof717: ( m.cs) = 717; goto _test_eof + _test_eof718: ( m.cs) = 718; goto _test_eof + _test_eof251: ( m.cs) = 251; goto _test_eof + _test_eof252: ( m.cs) = 252; goto _test_eof + _test_eof253: ( m.cs) = 253; goto _test_eof + _test_eof254: ( m.cs) = 254; goto _test_eof + _test_eof719: ( m.cs) = 719; goto _test_eof + _test_eof255: ( m.cs) = 255; goto _test_eof + _test_eof720: ( m.cs) = 720; goto _test_eof + _test_eof256: ( m.cs) = 256; goto _test_eof + _test_eof257: ( m.cs) = 257; goto _test_eof + _test_eof258: ( m.cs) = 258; goto _test_eof + _test_eof715: ( m.cs) = 715; goto _test_eof + _test_eof716: ( m.cs) = 716; goto _test_eof + _test_eof248: ( m.cs) = 248; goto _test_eof + _test_eof249: ( m.cs) = 249; goto _test_eof _test_eof: {} if ( m.p) == ( m.eof) { - switch m.cs { - case 206, 207, 208, 210, 243, 244, 246, 265, 266, 268, 287, 289, 317, 318, 319, 320, 322, 323, 324, 343, 344, 346, 365, 366, 368, 387, 388, 403, 404, 405, 407, 426, 427, 429, 430, 431, 450, 451, 452, 453, 455, 474, 475, 476, 478, 497, 498, 500, 501, 502, 522, 537, 538, 540, 573, 602, 603, 605, 606: -//line plugins/parsers/influx/machine.go.rl:22 + switch ( m.cs) { + case 9, 250: +//line plugins/parsers/influx/machine.go.rl:23 - yield = true - m.cs = 196; - {( m.p)++; m.cs = 0; goto _out } - - case 1, 133: -//line plugins/parsers/influx/machine.go.rl:56 - - m.err = ErrParse + err = ErrNameParse ( m.p)-- - m.cs = 195; - {( m.p)++; m.cs = 0; goto _out } + ( m.cs) = 247; + {( m.p)++; ( m.cs) = 0; goto _out } - case 2, 3, 4, 5, 6, 7, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 38, 39, 40, 41, 42, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 90, 91, 92, 93, 94, 129, 132, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194: -//line plugins/parsers/influx/machine.go.rl:35 + case 2, 3, 4, 5, 6, 7, 8, 29, 32, 33, 36, 37, 38, 50, 51, 52, 53, 54, 74, 76, 77, 94, 104, 106, 142, 154, 157, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246: +//line plugins/parsers/influx/machine.go.rl:30 - m.err = ErrFieldParse + err = ErrFieldParse ( m.p)-- - m.cs = 195; - {( m.p)++; m.cs = 0; goto _out } + ( m.cs) = 247; + {( m.p)++; ( m.cs) = 0; goto _out } -//line plugins/parsers/influx/machine.go.rl:56 + case 14, 15, 16, 23, 25, 26, 252, 253, 254, 255, 256, 257: +//line plugins/parsers/influx/machine.go.rl:37 - m.err = ErrParse + err = ErrTagParse ( m.p)-- - m.cs = 195; - {( m.p)++; m.cs = 0; goto _out } + ( m.cs) = 247; + {( m.p)++; ( m.cs) = 0; goto _out } - case 28, 29, 30, 36, 37, 200, 201, 202, 203, 204: -//line plugins/parsers/influx/machine.go.rl:42 + case 233: +//line plugins/parsers/influx/machine.go.rl:44 - m.err = ErrTagParse + err = ErrTimestampParse ( m.p)-- - m.cs = 195; - {( m.p)++; m.cs = 0; goto _out } + ( m.cs) = 247; + {( m.p)++; ( m.cs) = 0; goto _out } -//line plugins/parsers/influx/machine.go.rl:56 + case 259: +//line plugins/parsers/influx/machine.go.rl:73 - m.err = ErrParse + foundMetric = true + + case 289, 292, 296, 364, 388, 389, 393, 394, 395, 519, 553, 554, 556, 717: +//line plugins/parsers/influx/machine.go.rl:77 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; ( m.cs) = 0; goto _out } + } + + case 340, 341, 342, 344, 363, 419, 443, 444, 448, 468, 484, 485, 487, 719, 720: +//line plugins/parsers/influx/machine.go.rl:90 + + err = m.handler.AddTag(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; ( m.cs) = 0; goto _out } + } + + case 613, 659, 704: +//line plugins/parsers/influx/machine.go.rl:103 + + err = m.handler.AddInt(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; ( m.cs) = 0; goto _out } + } + + case 614, 662, 707: +//line plugins/parsers/influx/machine.go.rl:112 + + err = m.handler.AddUint(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; ( m.cs) = 0; goto _out } + } + + case 315, 608, 609, 611, 612, 615, 621, 622, 655, 656, 657, 658, 660, 661, 663, 701, 702, 703, 705, 706, 708: +//line plugins/parsers/influx/machine.go.rl:121 + + err = m.handler.AddFloat(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; ( m.cs) = 0; goto _out } + } + + case 616, 617, 618, 619, 620, 664, 665, 666, 667, 668, 709, 710, 711, 712, 713: +//line plugins/parsers/influx/machine.go.rl:130 + + err = m.handler.AddBool(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; ( m.cs) = 0; goto _out } + } + + case 265, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 320, 322, 323, 324, 325, 326, 327, 328, 329, 330, 331, 332, 333, 334, 335, 336, 337, 338, 339, 367, 370, 371, 372, 373, 374, 375, 376, 377, 378, 379, 380, 381, 382, 383, 384, 385, 386, 387, 399, 401, 402, 403, 404, 405, 406, 407, 408, 409, 410, 411, 412, 413, 414, 415, 416, 417, 418, 422, 425, 426, 427, 428, 429, 430, 431, 432, 433, 434, 435, 436, 437, 438, 439, 440, 441, 442, 588, 589, 590, 591, 592, 593, 594, 595, 596, 597, 598, 599, 600, 601, 602, 603, 604, 605, 606, 636, 637, 638, 639, 640, 641, 642, 643, 644, 645, 646, 647, 648, 649, 650, 651, 652, 653, 654, 682, 683, 684, 685, 686, 687, 688, 689, 690, 691, 692, 693, 694, 695, 696, 697, 698, 699, 700: +//line plugins/parsers/influx/machine.go.rl:148 + + err = m.handler.SetTimestamp(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; ( m.cs) = 0; goto _out } + } + + case 11, 39, 41, 166, 168: +//line plugins/parsers/influx/machine.go.rl:23 + + err = ErrNameParse ( m.p)-- - m.cs = 195; - {( m.p)++; m.cs = 0; goto _out } + ( m.cs) = 247; + {( m.p)++; ( m.cs) = 0; goto _out } - case 8: -//line plugins/parsers/influx/machine.go.rl:49 +//line plugins/parsers/influx/machine.go.rl:30 - m.err = ErrTimestampParse + err = ErrFieldParse ( m.p)-- - m.cs = 195; - {( m.p)++; m.cs = 0; goto _out } + ( m.cs) = 247; + {( m.p)++; ( m.cs) = 0; goto _out } -//line plugins/parsers/influx/machine.go.rl:56 + case 35, 75, 105, 171, 201: +//line plugins/parsers/influx/machine.go.rl:30 - m.err = ErrParse + err = ErrFieldParse ( m.p)-- - m.cs = 195; - {( m.p)++; m.cs = 0; goto _out } + ( m.cs) = 247; + {( m.p)++; ( m.cs) = 0; goto _out } - case 604: -//line plugins/parsers/influx/machine.go.rl:72 +//line plugins/parsers/influx/machine.go.rl:44 - m.handler.SetMeasurement(m.text()) - -//line plugins/parsers/influx/machine.go.rl:22 - - yield = true - m.cs = 196; - {( m.p)++; m.cs = 0; goto _out } - - case 607: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:22 - - yield = true - m.cs = 196; - {( m.p)++; m.cs = 0; goto _out } - - case 233, 293, 307, 393, 526, 562, 578, 591: -//line plugins/parsers/influx/machine.go.rl:88 - - m.handler.AddInt(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:22 - - yield = true - m.cs = 196; - {( m.p)++; m.cs = 0; goto _out } - - case 236, 296, 310, 396, 529, 565, 581, 594: -//line plugins/parsers/influx/machine.go.rl:92 - - m.handler.AddUint(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:22 - - yield = true - m.cs = 196; - {( m.p)++; m.cs = 0; goto _out } - - case 229, 230, 231, 232, 234, 235, 237, 288, 290, 291, 292, 294, 295, 297, 303, 304, 305, 306, 308, 309, 311, 389, 390, 391, 392, 394, 395, 397, 521, 523, 524, 525, 527, 528, 530, 536, 559, 560, 561, 563, 564, 566, 572, 574, 575, 576, 577, 579, 580, 582, 588, 589, 590, 592, 593, 595: -//line plugins/parsers/influx/machine.go.rl:96 - - m.handler.AddFloat(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:22 - - yield = true - m.cs = 196; - {( m.p)++; m.cs = 0; goto _out } - - case 238, 239, 240, 241, 242, 298, 299, 300, 301, 302, 312, 313, 314, 315, 316, 398, 399, 400, 401, 402, 531, 532, 533, 534, 535, 567, 568, 569, 570, 571, 583, 584, 585, 586, 587, 596, 597, 598, 599, 600: -//line plugins/parsers/influx/machine.go.rl:100 - - m.handler.AddBool(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:22 - - yield = true - m.cs = 196; - {( m.p)++; m.cs = 0; goto _out } - - case 209, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 245, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, 261, 262, 263, 264, 267, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, 321, 325, 326, 327, 328, 329, 330, 331, 332, 333, 334, 335, 336, 337, 338, 339, 340, 341, 342, 345, 347, 348, 349, 350, 351, 352, 353, 354, 355, 356, 357, 358, 359, 360, 361, 362, 363, 364, 367, 369, 370, 371, 372, 373, 374, 375, 376, 377, 378, 379, 380, 381, 382, 383, 384, 385, 386, 406, 408, 409, 410, 411, 412, 413, 414, 415, 416, 417, 418, 419, 420, 421, 422, 423, 424, 425, 428, 432, 433, 434, 435, 436, 437, 438, 439, 440, 441, 442, 443, 444, 445, 446, 447, 448, 449, 454, 456, 457, 458, 459, 460, 461, 462, 463, 464, 465, 466, 467, 468, 469, 470, 471, 472, 473, 477, 479, 480, 481, 482, 483, 484, 485, 486, 487, 488, 489, 490, 491, 492, 493, 494, 495, 496, 499, 503, 504, 505, 506, 507, 508, 509, 510, 511, 512, 513, 514, 515, 516, 517, 518, 519, 520, 539, 541, 542, 543, 544, 545, 546, 547, 548, 549, 550, 551, 552, 553, 554, 555, 556, 557, 558: -//line plugins/parsers/influx/machine.go.rl:108 - - m.handler.SetTimestamp(m.text()) - -//line plugins/parsers/influx/machine.go.rl:22 - - yield = true - m.cs = 196; - {( m.p)++; m.cs = 0; goto _out } - - case 43, 45, 83, 130, 131, 138: -//line plugins/parsers/influx/machine.go.rl:35 - - m.err = ErrFieldParse + err = ErrTimestampParse ( m.p)-- - m.cs = 195; - {( m.p)++; m.cs = 0; goto _out } + ( m.cs) = 247; + {( m.p)++; ( m.cs) = 0; goto _out } -//line plugins/parsers/influx/machine.go.rl:49 + case 21, 45, 46, 47, 59, 60, 62, 64, 69, 71, 72, 78, 79, 80, 85, 87, 89, 90, 98, 99, 101, 102, 103, 108, 109, 110, 123, 124, 138, 139: +//line plugins/parsers/influx/machine.go.rl:37 - m.err = ErrTimestampParse + err = ErrTagParse ( m.p)-- - m.cs = 195; - {( m.p)++; m.cs = 0; goto _out } + ( m.cs) = 247; + {( m.p)++; ( m.cs) = 0; goto _out } -//line plugins/parsers/influx/machine.go.rl:56 +//line plugins/parsers/influx/machine.go.rl:30 - m.err = ErrParse + err = ErrFieldParse ( m.p)-- - m.cs = 195; - {( m.p)++; m.cs = 0; goto _out } + ( m.cs) = 247; + {( m.p)++; ( m.cs) = 0; goto _out } - case 31, 32, 33, 34, 35, 85, 86, 87, 88, 89, 95, 96, 97, 99, 100, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 134, 135, 137, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169: -//line plugins/parsers/influx/machine.go.rl:42 + case 61: +//line plugins/parsers/influx/machine.go.rl:37 - m.err = ErrTagParse + err = ErrTagParse ( m.p)-- - m.cs = 195; - {( m.p)++; m.cs = 0; goto _out } + ( m.cs) = 247; + {( m.p)++; ( m.cs) = 0; goto _out } -//line plugins/parsers/influx/machine.go.rl:35 +//line plugins/parsers/influx/machine.go.rl:44 - m.err = ErrFieldParse + err = ErrTimestampParse ( m.p)-- - m.cs = 195; - {( m.p)++; m.cs = 0; goto _out } + ( m.cs) = 247; + {( m.p)++; ( m.cs) = 0; goto _out } -//line plugins/parsers/influx/machine.go.rl:56 + case 1: +//line plugins/parsers/influx/machine.go.rl:77 - m.err = ErrParse + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; ( m.cs) = 0; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:37 + + err = ErrTagParse ( m.p)-- - m.cs = 195; - {( m.p)++; m.cs = 0; goto _out } + ( m.cs) = 247; + {( m.p)++; ( m.cs) = 0; goto _out } - case 101: -//line plugins/parsers/influx/machine.go.rl:42 + case 524, 578, 672: +//line plugins/parsers/influx/machine.go.rl:77 - m.err = ErrTagParse + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; ( m.cs) = 0; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:103 + + err = m.handler.AddInt(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; ( m.cs) = 0; goto _out } + } + + case 527, 581, 675: +//line plugins/parsers/influx/machine.go.rl:77 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; ( m.cs) = 0; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:112 + + err = m.handler.AddUint(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; ( m.cs) = 0; goto _out } + } + + case 396, 520, 521, 522, 523, 525, 526, 528, 552, 575, 576, 577, 579, 580, 582, 669, 670, 671, 673, 674, 676: +//line plugins/parsers/influx/machine.go.rl:77 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; ( m.cs) = 0; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:121 + + err = m.handler.AddFloat(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; ( m.cs) = 0; goto _out } + } + + case 529, 530, 531, 532, 533, 583, 584, 585, 586, 587, 677, 678, 679, 680, 681: +//line plugins/parsers/influx/machine.go.rl:77 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; ( m.cs) = 0; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:130 + + err = m.handler.AddBool(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; ( m.cs) = 0; goto _out } + } + + case 293, 297, 298, 299, 300, 301, 302, 303, 304, 305, 306, 307, 308, 309, 310, 311, 312, 313, 314, 390, 534, 535, 536, 537, 538, 539, 540, 541, 542, 543, 544, 545, 546, 547, 548, 549, 550, 551, 555, 557, 558, 559, 560, 561, 562, 563, 564, 565, 566, 567, 568, 569, 570, 571, 572, 573, 574: +//line plugins/parsers/influx/machine.go.rl:77 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; ( m.cs) = 0; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:148 + + err = m.handler.SetTimestamp(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; ( m.cs) = 0; goto _out } + } + + case 17, 24: +//line plugins/parsers/influx/machine.go.rl:90 + + err = m.handler.AddTag(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; ( m.cs) = 0; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:37 + + err = ErrTagParse ( m.p)-- - m.cs = 195; - {( m.p)++; m.cs = 0; goto _out } + ( m.cs) = 247; + {( m.p)++; ( m.cs) = 0; goto _out } -//line plugins/parsers/influx/machine.go.rl:49 + case 473, 509, 626: +//line plugins/parsers/influx/machine.go.rl:90 - m.err = ErrTimestampParse + err = m.handler.AddTag(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; ( m.cs) = 0; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:103 + + err = m.handler.AddInt(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; ( m.cs) = 0; goto _out } + } + + case 476, 512, 629: +//line plugins/parsers/influx/machine.go.rl:90 + + err = m.handler.AddTag(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; ( m.cs) = 0; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:112 + + err = m.handler.AddUint(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; ( m.cs) = 0; goto _out } + } + + case 467, 469, 470, 471, 472, 474, 475, 477, 483, 506, 507, 508, 510, 511, 513, 623, 624, 625, 627, 628, 630: +//line plugins/parsers/influx/machine.go.rl:90 + + err = m.handler.AddTag(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; ( m.cs) = 0; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:121 + + err = m.handler.AddFloat(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; ( m.cs) = 0; goto _out } + } + + case 478, 479, 480, 481, 482, 514, 515, 516, 517, 518, 631, 632, 633, 634, 635: +//line plugins/parsers/influx/machine.go.rl:90 + + err = m.handler.AddTag(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; ( m.cs) = 0; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:130 + + err = m.handler.AddBool(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; ( m.cs) = 0; goto _out } + } + + case 343, 345, 346, 347, 348, 349, 350, 351, 352, 353, 354, 355, 356, 357, 358, 359, 360, 361, 362, 445, 449, 450, 451, 452, 453, 454, 455, 456, 457, 458, 459, 460, 461, 462, 463, 464, 465, 466, 486, 488, 489, 490, 491, 492, 493, 494, 495, 496, 497, 498, 499, 500, 501, 502, 503, 504, 505: +//line plugins/parsers/influx/machine.go.rl:90 + + err = m.handler.AddTag(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; ( m.cs) = 0; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:148 + + err = m.handler.SetTimestamp(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; ( m.cs) = 0; goto _out } + } + + case 10: +//line plugins/parsers/influx/machine.go.rl:23 + + err = ErrNameParse ( m.p)-- - m.cs = 195; - {( m.p)++; m.cs = 0; goto _out } + ( m.cs) = 247; + {( m.p)++; ( m.cs) = 0; goto _out } -//line plugins/parsers/influx/machine.go.rl:56 +//line plugins/parsers/influx/machine.go.rl:77 - m.err = ErrParse + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; ( m.cs) = 0; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:37 + + err = ErrTagParse ( m.p)-- - m.cs = 195; - {( m.p)++; m.cs = 0; goto _out } + ( m.cs) = 247; + {( m.p)++; ( m.cs) = 0; goto _out } - case 98, 136, 139, 158: -//line plugins/parsers/influx/machine.go.rl:42 + case 100: +//line plugins/parsers/influx/machine.go.rl:37 - m.err = ErrTagParse + err = ErrTagParse ( m.p)-- - m.cs = 195; - {( m.p)++; m.cs = 0; goto _out } + ( m.cs) = 247; + {( m.p)++; ( m.cs) = 0; goto _out } -//line plugins/parsers/influx/machine.go.rl:35 +//line plugins/parsers/influx/machine.go.rl:30 - m.err = ErrFieldParse + err = ErrFieldParse ( m.p)-- - m.cs = 195; - {( m.p)++; m.cs = 0; goto _out } + ( m.cs) = 247; + {( m.p)++; ( m.cs) = 0; goto _out } -//line plugins/parsers/influx/machine.go.rl:49 +//line plugins/parsers/influx/machine.go.rl:44 - m.err = ErrTimestampParse + err = ErrTimestampParse ( m.p)-- - m.cs = 195; - {( m.p)++; m.cs = 0; goto _out } + ( m.cs) = 247; + {( m.p)++; ( m.cs) = 0; goto _out } -//line plugins/parsers/influx/machine.go.rl:56 + case 12, 13, 27, 28, 30, 31, 42, 43, 55, 56, 57, 58, 73, 92, 93, 95, 97, 140, 141, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 155, 156, 158, 159, 160, 161, 162, 163, 164, 165, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230: +//line plugins/parsers/influx/machine.go.rl:77 - m.err = ErrParse + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; ( m.cs) = 0; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:37 + + err = ErrTagParse ( m.p)-- - m.cs = 195; - {( m.p)++; m.cs = 0; goto _out } + ( m.cs) = 247; + {( m.p)++; ( m.cs) = 0; goto _out } -//line plugins/parsers/influx/machine.go:23507 +//line plugins/parsers/influx/machine.go.rl:30 + + err = ErrFieldParse + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; ( m.cs) = 0; goto _out } + + case 18, 19, 20, 22, 48, 49, 65, 66, 67, 68, 70, 81, 82, 83, 84, 86, 88, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 125, 126, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198: +//line plugins/parsers/influx/machine.go.rl:90 + + err = m.handler.AddTag(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; ( m.cs) = 0; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:37 + + err = ErrTagParse + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; ( m.cs) = 0; goto _out } + +//line plugins/parsers/influx/machine.go.rl:30 + + err = ErrFieldParse + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; ( m.cs) = 0; goto _out } + + case 40, 167, 169, 170, 199, 200, 231, 232: +//line plugins/parsers/influx/machine.go.rl:23 + + err = ErrNameParse + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; ( m.cs) = 0; goto _out } + +//line plugins/parsers/influx/machine.go.rl:77 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; ( m.cs) = 0; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:37 + + err = ErrTagParse + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; ( m.cs) = 0; goto _out } + +//line plugins/parsers/influx/machine.go.rl:30 + + err = ErrFieldParse + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; ( m.cs) = 0; goto _out } + + case 44, 91, 153: +//line plugins/parsers/influx/machine.go.rl:77 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; ( m.cs) = 0; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:37 + + err = ErrTagParse + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; ( m.cs) = 0; goto _out } + +//line plugins/parsers/influx/machine.go.rl:30 + + err = ErrFieldParse + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; ( m.cs) = 0; goto _out } + +//line plugins/parsers/influx/machine.go.rl:44 + + err = ErrTimestampParse + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; ( m.cs) = 0; goto _out } + + case 63, 107, 127: +//line plugins/parsers/influx/machine.go.rl:90 + + err = m.handler.AddTag(key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; ( m.cs) = 0; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:37 + + err = ErrTagParse + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; ( m.cs) = 0; goto _out } + +//line plugins/parsers/influx/machine.go.rl:30 + + err = ErrFieldParse + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; ( m.cs) = 0; goto _out } + +//line plugins/parsers/influx/machine.go.rl:44 + + err = ErrTimestampParse + ( m.p)-- + + ( m.cs) = 247; + {( m.p)++; ( m.cs) = 0; goto _out } + +//line plugins/parsers/influx/machine.go:30741 } } _out: {} } -//line plugins/parsers/influx/machine.go.rl:308 +//line plugins/parsers/influx/machine.go.rl:390 - // Even if there was an error, return true. On the next call to this - // function we will attempt to scan to the next line of input and recover. - if m.err != nil { - return true + if err != nil { + return err } - // Don't check the error state in the case that we just yielded, because - // the yield indicates we just completed parsing a line. - if !yield && m.cs == LineProtocol_error { - m.err = ErrParse - return true + // This would indicate an error in the machine that was reported with a + // more specific error. We return a generic error but this should + // possibly be a panic. + if m.cs == 0 { + m.cs = LineProtocol_en_discard_line + return ErrParse } - return true + // If we haven't found a metric line yet and we reached the EOF, report it + // now. This happens when the data ends with a comment or whitespace. + // + // Otherwise we have successfully parsed a metric line, so if we are at + // the EOF we will report it the next call. + if !foundMetric && m.p == m.pe && m.pe == m.eof { + return EOF + } + + return nil } -// Err returns the error that occurred on the last call to ParseLine. If the -// result is nil, then the line was parsed successfully. -func (m *machine) Err() error { - return m.err -} - -// Position returns the current position into the input. +// Position returns the current byte offset into the data. func (m *machine) Position() int { return m.p } +// LineOffset returns the byte offset of the current line. +func (m *machine) LineOffset() int { + return m.sol +} + +// LineNumber returns the current line number. Lines are counted based on the +// regular expression `\r?\n`. +func (m *machine) LineNumber() int { + return m.lineno +} + +// Column returns the current column. +func (m *machine) Column() int { + lineOffset := m.p - m.sol + return lineOffset + 1 +} + func (m *machine) text() []byte { return m.data[m.pb:m.p] } diff --git a/plugins/parsers/influx/machine.go.rl b/plugins/parsers/influx/machine.go.rl index c8cf0bee9..52b32b2b8 100644 --- a/plugins/parsers/influx/machine.go.rl +++ b/plugins/parsers/influx/machine.go.rl @@ -10,6 +10,7 @@ var ( ErrTagParse = errors.New("expected tag") ErrTimestampParse = errors.New("expected timestamp") ErrParse = errors.New("parse error") + EOF = errors.New("EOF") ) %%{ @@ -19,58 +20,67 @@ action begin { m.pb = m.p } -action yield { - yield = true - fnext align; - fbreak; -} - action name_error { - m.err = ErrNameParse + err = ErrNameParse fhold; fnext discard_line; fbreak; } action field_error { - m.err = ErrFieldParse + err = ErrFieldParse fhold; fnext discard_line; fbreak; } action tagset_error { - m.err = ErrTagParse + err = ErrTagParse fhold; fnext discard_line; fbreak; } action timestamp_error { - m.err = ErrTimestampParse + err = ErrTimestampParse fhold; fnext discard_line; fbreak; } action parse_error { - m.err = ErrParse + err = ErrParse fhold; fnext discard_line; fbreak; } +action align_error { + err = ErrParse + fnext discard_line; + fbreak; +} + action hold_recover { fhold; fgoto main; } -action discard { +action goto_align { fgoto align; } +action found_metric { + foundMetric = true +} + action name { - m.handler.SetMeasurement(m.text()) + err = m.handler.SetMeasurement(m.text()) + if err != nil { + fhold; + fnext discard_line; + fbreak; + } } action tagkey { @@ -78,7 +88,12 @@ action tagkey { } action tagvalue { - m.handler.AddTag(key, m.text()) + err = m.handler.AddTag(key, m.text()) + if err != nil { + fhold; + fnext discard_line; + fbreak; + } } action fieldkey { @@ -86,32 +101,76 @@ action fieldkey { } action integer { - m.handler.AddInt(key, m.text()) + err = m.handler.AddInt(key, m.text()) + if err != nil { + fhold; + fnext discard_line; + fbreak; + } } action unsigned { - m.handler.AddUint(key, m.text()) + err = m.handler.AddUint(key, m.text()) + if err != nil { + fhold; + fnext discard_line; + fbreak; + } } action float { - m.handler.AddFloat(key, m.text()) + err = m.handler.AddFloat(key, m.text()) + if err != nil { + fhold; + fnext discard_line; + fbreak; + } } action bool { - m.handler.AddBool(key, m.text()) + err = m.handler.AddBool(key, m.text()) + if err != nil { + fhold; + fnext discard_line; + fbreak; + } } action string { - m.handler.AddString(key, m.text()) + err = m.handler.AddString(key, m.text()) + if err != nil { + fhold; + fnext discard_line; + fbreak; + } } action timestamp { - m.handler.SetTimestamp(m.text()) + err = m.handler.SetTimestamp(m.text()) + if err != nil { + fhold; + fnext discard_line; + fbreak; + } +} + +action incr_newline { + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line +} + +action eol { + fnext align; + fbreak; } ws = [\t\v\f ]; +newline = + '\r'? '\n' %to(incr_newline); + non_zero_digit = [1-9]; @@ -155,7 +214,7 @@ fieldbool = (true | false) >begin %bool; fieldstringchar = - [^\n\f\r\\"] | '\\' [\\"]; + [^\f\r\n\\"] | '\\' [\\"] | newline; fieldstring = fieldstringchar* >begin %string; @@ -172,16 +231,16 @@ fieldset = field ( ',' field )*; tagchar = - [^\t\n\f\r ,=\\] | ( '\\' [^\t\n\f\r] ); + [^\t\n\f\r ,=\\] | ( '\\' [^\t\n\f\r\\] ) | '\\\\' %to{ fhold; }; tagkey = tagchar+ >begin %tagkey; tagvalue = - tagchar+ >begin %tagvalue; + tagchar+ >begin %eof(tagvalue) %tagvalue; tagset = - (',' (tagkey '=' tagvalue) $err(tagset_error))*; + ((',' tagkey '=' tagvalue) $err(tagset_error))*; measurement_chars = [^\t\n\f\r ,\\] | ( '\\' [^\t\n\f\r] ); @@ -190,52 +249,71 @@ measurement_start = measurement_chars - '#'; measurement = - (measurement_start measurement_chars*) >begin %name; + (measurement_start measurement_chars*) >begin %eof(name) %name; -newline = - [\r\n]; +eol_break = + newline %to(eol) + ; -comment = - '#' (any -- newline)* newline; - -eol = - ws* newline? >yield %eof(yield); - -line = - measurement +metric = + measurement >err(name_error) tagset - (ws+ fieldset) $err(field_error) + ws+ fieldset $err(field_error) (ws+ timestamp)? $err(timestamp_error) - eol; + ; -# The main machine parses a single line of line protocol. -main := line $err(parse_error); +line_with_term = + ws* metric ws* eol_break + ; + +line_without_term = + ws* metric ws* + ; + +main := + (line_with_term* + (line_with_term | line_without_term?) + ) >found_metric + ; # The discard_line machine discards the current line. Useful for recovering # on the next line when an error occurs. discard_line := - (any - newline)* newline @discard; + (any -- newline)* newline @goto_align; + +commentline = + ws* '#' (any -- newline)* newline; + +emptyline = + ws* newline; # The align machine scans forward to the start of the next line. This machine # is used to skip over whitespace and comments, keeping this logic out of the # main machine. +# +# Skip valid lines that don't contain line protocol, any other data will move +# control to the main parser via the err action. align := - (space* comment)* space* measurement_start @hold_recover %eof(yield); + (emptyline | commentline | ws+)* %err(hold_recover); -series := measurement tagset $err(parse_error) eol; +# Series is a machine for matching measurement+tagset +series := + (measurement >err(name_error) tagset eol_break?) + >found_metric + ; }%% %% write data; type Handler interface { - SetMeasurement(name []byte) - AddTag(key []byte, value []byte) - AddInt(key []byte, value []byte) - AddUint(key []byte, value []byte) - AddFloat(key []byte, value []byte) - AddString(key []byte, value []byte) - AddBool(key []byte, value []byte) - SetTimestamp(tm []byte) + SetMeasurement(name []byte) error + AddTag(key []byte, value []byte) error + AddInt(key []byte, value []byte) error + AddUint(key []byte, value []byte) error + AddFloat(key []byte, value []byte) error + AddString(key []byte, value []byte) error + AddBool(key []byte, value []byte) error + SetTimestamp(tm []byte) error } type machine struct { @@ -243,9 +321,10 @@ type machine struct { cs int p, pe, eof int pb int + lineno int + sol int handler Handler initState int - err error } func NewMachine(handler Handler) *machine { @@ -256,6 +335,7 @@ func NewMachine(handler Handler) *machine { %% access m.; %% variable p m.p; + %% variable cs m.cs; %% variable pe m.pe; %% variable eof m.eof; %% variable data m.data; @@ -284,55 +364,76 @@ func (m *machine) SetData(data []byte) { m.data = data m.p = 0 m.pb = 0 + m.lineno = 1 + m.sol = 0 m.pe = len(data) m.eof = len(data) - m.err = nil %% write init; m.cs = m.initState } -// ParseLine parses a line of input and returns true if more data can be -// parsed. -func (m *machine) ParseLine() bool { - if m.data == nil || m.p >= m.pe { - m.err = nil - return false +// Next parses the next metric line and returns nil if it was successfully +// processed. If the line contains a syntax error an error is returned, +// otherwise if the end of file is reached before finding a metric line then +// EOF is returned. +func (m *machine) Next() error { + if m.p == m.pe && m.pe == m.eof { + return EOF } - m.err = nil + var err error var key []byte - var yield bool + foundMetric := false %% write exec; - // Even if there was an error, return true. On the next call to this - // function we will attempt to scan to the next line of input and recover. - if m.err != nil { - return true + if err != nil { + return err } - // Don't check the error state in the case that we just yielded, because - // the yield indicates we just completed parsing a line. - if !yield && m.cs == LineProtocol_error { - m.err = ErrParse - return true + // This would indicate an error in the machine that was reported with a + // more specific error. We return a generic error but this should + // possibly be a panic. + if m.cs == %%{ write error; }%% { + m.cs = LineProtocol_en_discard_line + return ErrParse } - return true + // If we haven't found a metric line yet and we reached the EOF, report it + // now. This happens when the data ends with a comment or whitespace. + // + // Otherwise we have successfully parsed a metric line, so if we are at + // the EOF we will report it the next call. + if !foundMetric && m.p == m.pe && m.pe == m.eof { + return EOF + } + + return nil } -// Err returns the error that occurred on the last call to ParseLine. If the -// result is nil, then the line was parsed successfully. -func (m *machine) Err() error { - return m.err -} - -// Position returns the current position into the input. +// Position returns the current byte offset into the data. func (m *machine) Position() int { return m.p } +// LineOffset returns the byte offset of the current line. +func (m *machine) LineOffset() int { + return m.sol +} + +// LineNumber returns the current line number. Lines are counted based on the +// regular expression `\r?\n`. +func (m *machine) LineNumber() int { + return m.lineno +} + +// Column returns the current column. +func (m *machine) Column() int { + lineOffset := m.p - m.sol + return lineOffset + 1 +} + func (m *machine) text() []byte { return m.data[m.pb:m.p] } diff --git a/plugins/parsers/influx/machine_test.go b/plugins/parsers/influx/machine_test.go index 1a9cb196e..a1c921ef1 100644 --- a/plugins/parsers/influx/machine_test.go +++ b/plugins/parsers/influx/machine_test.go @@ -1,9 +1,11 @@ -package influx +package influx_test import ( + "errors" "fmt" "testing" + "github.com/influxdata/telegraf/plugins/parsers/influx" "github.com/stretchr/testify/require" ) @@ -11,15 +13,16 @@ type TestingHandler struct { results []Result } -func (h *TestingHandler) SetMeasurement(name []byte) { +func (h *TestingHandler) SetMeasurement(name []byte) error { mname := Result{ Name: Measurement, Value: name, } h.results = append(h.results, mname) + return nil } -func (h *TestingHandler) AddTag(key []byte, value []byte) { +func (h *TestingHandler) AddTag(key []byte, value []byte) error { tagkey := Result{ Name: TagKey, Value: key, @@ -29,9 +32,10 @@ func (h *TestingHandler) AddTag(key []byte, value []byte) { Value: value, } h.results = append(h.results, tagkey, tagvalue) + return nil } -func (h *TestingHandler) AddInt(key []byte, value []byte) { +func (h *TestingHandler) AddInt(key []byte, value []byte) error { fieldkey := Result{ Name: FieldKey, Value: key, @@ -41,9 +45,10 @@ func (h *TestingHandler) AddInt(key []byte, value []byte) { Value: value, } h.results = append(h.results, fieldkey, fieldvalue) + return nil } -func (h *TestingHandler) AddUint(key []byte, value []byte) { +func (h *TestingHandler) AddUint(key []byte, value []byte) error { fieldkey := Result{ Name: FieldKey, Value: key, @@ -53,9 +58,10 @@ func (h *TestingHandler) AddUint(key []byte, value []byte) { Value: value, } h.results = append(h.results, fieldkey, fieldvalue) + return nil } -func (h *TestingHandler) AddFloat(key []byte, value []byte) { +func (h *TestingHandler) AddFloat(key []byte, value []byte) error { fieldkey := Result{ Name: FieldKey, Value: key, @@ -65,9 +71,10 @@ func (h *TestingHandler) AddFloat(key []byte, value []byte) { Value: value, } h.results = append(h.results, fieldkey, fieldvalue) + return nil } -func (h *TestingHandler) AddString(key []byte, value []byte) { +func (h *TestingHandler) AddString(key []byte, value []byte) error { fieldkey := Result{ Name: FieldKey, Value: key, @@ -77,9 +84,10 @@ func (h *TestingHandler) AddString(key []byte, value []byte) { Value: value, } h.results = append(h.results, fieldkey, fieldvalue) + return nil } -func (h *TestingHandler) AddBool(key []byte, value []byte) { +func (h *TestingHandler) AddBool(key []byte, value []byte) error { fieldkey := Result{ Name: FieldKey, Value: key, @@ -89,58 +97,70 @@ func (h *TestingHandler) AddBool(key []byte, value []byte) { Value: value, } h.results = append(h.results, fieldkey, fieldvalue) + return nil } -func (h *TestingHandler) SetTimestamp(tm []byte) { +func (h *TestingHandler) SetTimestamp(tm []byte) error { timestamp := Result{ Name: Timestamp, Value: tm, } h.results = append(h.results, timestamp) + return nil } -func (h *TestingHandler) Reset() { +func (h *TestingHandler) Result(err error) { + var res Result + if err == nil { + res = Result{ + Name: Success, + } + } else { + res = Result{ + Name: Error, + err: err, + } + } + h.results = append(h.results, res) } func (h *TestingHandler) Results() []Result { return h.results } -func (h *TestingHandler) AddError(err error) { - e := Result{ - err: err, - } - h.results = append(h.results, e) -} - type BenchmarkingHandler struct { } -func (h *BenchmarkingHandler) SetMeasurement(name []byte) { +func (h *BenchmarkingHandler) SetMeasurement(name []byte) error { + return nil } -func (h *BenchmarkingHandler) AddTag(key []byte, value []byte) { +func (h *BenchmarkingHandler) AddTag(key []byte, value []byte) error { + return nil } -func (h *BenchmarkingHandler) AddInt(key []byte, value []byte) { +func (h *BenchmarkingHandler) AddInt(key []byte, value []byte) error { + return nil } -func (h *BenchmarkingHandler) AddUint(key []byte, value []byte) { +func (h *BenchmarkingHandler) AddUint(key []byte, value []byte) error { + return nil } -func (h *BenchmarkingHandler) AddFloat(key []byte, value []byte) { +func (h *BenchmarkingHandler) AddFloat(key []byte, value []byte) error { + return nil } -func (h *BenchmarkingHandler) AddString(key []byte, value []byte) { +func (h *BenchmarkingHandler) AddString(key []byte, value []byte) error { + return nil } -func (h *BenchmarkingHandler) AddBool(key []byte, value []byte) { +func (h *BenchmarkingHandler) AddBool(key []byte, value []byte) error { + return nil } -func (h *BenchmarkingHandler) SetTimestamp(tm []byte) { -} - -func (h *BenchmarkingHandler) Reset() { +func (h *BenchmarkingHandler) SetTimestamp(tm []byte) error { + return nil } type TokenType int @@ -161,6 +181,8 @@ const ( EOF Punc WhiteSpace + Success + Error ) func (t TokenType) String() string { @@ -195,6 +217,10 @@ func (t TokenType) String() string { return "Punc" case WhiteSpace: return "WhiteSpace" + case Success: + return "Success" + case Error: + return "Error" default: panic("Unknown TokenType") } @@ -246,6 +272,9 @@ var tests = []struct { Name: FieldFloat, Value: []byte("42"), }, + { + Name: Success, + }, }, }, { @@ -264,6 +293,9 @@ var tests = []struct { Name: FieldFloat, Value: []byte("42"), }, + { + Name: Success, + }, }, }, { @@ -286,6 +318,9 @@ var tests = []struct { Name: Timestamp, Value: []byte("1516241192000000000"), }, + { + Name: Success, + }, }, }, { @@ -304,6 +339,9 @@ var tests = []struct { Name: FieldFloat, Value: []byte("42"), }, + { + Name: Success, + }, }, }, { @@ -322,6 +360,9 @@ var tests = []struct { Name: FieldFloat, Value: []byte("42"), }, + { + Name: Success, + }, }, }, { @@ -340,6 +381,9 @@ var tests = []struct { Name: FieldFloat, Value: []byte("42"), }, + { + Name: Success, + }, }, }, { @@ -358,6 +402,9 @@ var tests = []struct { Name: FieldFloat, Value: []byte("42"), }, + { + Name: Success, + }, }, }, { @@ -376,6 +423,9 @@ var tests = []struct { Name: FieldFloat, Value: []byte("42"), }, + { + Name: Success, + }, }, }, { @@ -394,6 +444,9 @@ var tests = []struct { Name: FieldFloat, Value: []byte("42e0"), }, + { + Name: Success, + }, }, }, { @@ -412,6 +465,9 @@ var tests = []struct { Name: FieldFloat, Value: []byte("-42e0"), }, + { + Name: Success, + }, }, }, { @@ -430,6 +486,9 @@ var tests = []struct { Name: FieldFloat, Value: []byte("42e-1"), }, + { + Name: Success, + }, }, }, { @@ -448,6 +507,9 @@ var tests = []struct { Name: FieldFloat, Value: []byte("42E0"), }, + { + Name: Success, + }, }, }, { @@ -459,7 +521,8 @@ var tests = []struct { Value: []byte("cpu"), }, { - err: ErrFieldParse, + Name: Error, + err: influx.ErrFieldParse, }, }, }, @@ -479,6 +542,9 @@ var tests = []struct { Name: FieldFloat, Value: []byte("42.2"), }, + { + Name: Success, + }, }, }, { @@ -497,6 +563,9 @@ var tests = []struct { Name: FieldFloat, Value: []byte("-42"), }, + { + Name: Success, + }, }, }, { @@ -515,6 +584,9 @@ var tests = []struct { Name: FieldFloat, Value: []byte(".42"), }, + { + Name: Success, + }, }, }, { @@ -533,6 +605,9 @@ var tests = []struct { Name: FieldFloat, Value: []byte("-.42"), }, + { + Name: Success, + }, }, }, { @@ -551,6 +626,9 @@ var tests = []struct { Name: FieldFloat, Value: []byte("00.42"), }, + { + Name: Success, + }, }, }, { @@ -562,7 +640,8 @@ var tests = []struct { Value: []byte("cpu"), }, { - err: ErrFieldParse, + Name: Error, + err: influx.ErrFieldParse, }, }, }, @@ -590,6 +669,9 @@ var tests = []struct { Name: FieldFloat, Value: []byte("42"), }, + { + Name: Success, + }, }, }, { @@ -608,6 +690,9 @@ var tests = []struct { Name: FieldInt, Value: []byte("42i"), }, + { + Name: Success, + }, }, }, { @@ -626,6 +711,9 @@ var tests = []struct { Name: FieldInt, Value: []byte("-42i"), }, + { + Name: Success, + }, }, }, { @@ -644,6 +732,9 @@ var tests = []struct { Name: FieldInt, Value: []byte("0i"), }, + { + Name: Success, + }, }, }, { @@ -662,6 +753,30 @@ var tests = []struct { Name: FieldInt, Value: []byte("-0i"), }, + { + Name: Success, + }, + }, + }, + { + name: "integer field overflow okay", + input: []byte("cpu value=9223372036854775808i"), + results: []Result{ + { + Name: Measurement, + Value: []byte("cpu"), + }, + { + Name: FieldKey, + Value: []byte("value"), + }, + { + Name: FieldInt, + Value: []byte("9223372036854775808i"), + }, + { + Name: Success, + }, }, }, { @@ -673,13 +788,14 @@ var tests = []struct { Value: []byte("cpu"), }, { - err: ErrFieldParse, + Name: Error, + err: influx.ErrFieldParse, }, }, }, { name: "string field", - input: []byte(`cpu value="42"`), + input: []byte("cpu value=\"42\""), results: []Result{ { Name: Measurement, @@ -693,11 +809,35 @@ var tests = []struct { Name: FieldString, Value: []byte("42"), }, + { + Name: Success, + }, + }, + }, + { + name: "newline in string field", + input: []byte("cpu value=\"4\n2\""), + results: []Result{ + { + Name: Measurement, + Value: []byte("cpu"), + }, + { + Name: FieldKey, + Value: []byte("value"), + }, + { + Name: FieldString, + Value: []byte("4\n2"), + }, + { + Name: Success, + }, }, }, { name: "bool field", - input: []byte(`cpu value=true`), + input: []byte("cpu value=true"), results: []Result{ { Name: Measurement, @@ -711,11 +851,14 @@ var tests = []struct { Name: FieldBool, Value: []byte("true"), }, + { + Name: Success, + }, }, }, { name: "tag", - input: []byte(`cpu,host=localhost value=42`), + input: []byte("cpu,host=localhost value=42"), results: []Result{ { Name: Measurement, @@ -737,11 +880,14 @@ var tests = []struct { Name: FieldFloat, Value: []byte("42"), }, + { + Name: Success, + }, }, }, { name: "tag key escape space", - input: []byte(`cpu,h\ ost=localhost value=42`), + input: []byte("cpu,h\\ ost=localhost value=42"), results: []Result{ { Name: Measurement, @@ -763,11 +909,14 @@ var tests = []struct { Name: FieldFloat, Value: []byte("42"), }, + { + Name: Success, + }, }, }, { name: "tag key escape comma", - input: []byte(`cpu,h\,ost=localhost value=42`), + input: []byte("cpu,h\\,ost=localhost value=42"), results: []Result{ { Name: Measurement, @@ -789,11 +938,14 @@ var tests = []struct { Name: FieldFloat, Value: []byte("42"), }, + { + Name: Success, + }, }, }, { name: "tag key escape equal", - input: []byte(`cpu,h\=ost=localhost value=42`), + input: []byte("cpu,h\\=ost=localhost value=42"), results: []Result{ { Name: Measurement, @@ -815,11 +967,14 @@ var tests = []struct { Name: FieldFloat, Value: []byte("42"), }, + { + Name: Success, + }, }, }, { name: "multiple tags", - input: []byte(`cpu,host=localhost,cpu=cpu0 value=42`), + input: []byte("cpu,host=localhost,cpu=cpu0 value=42"), results: []Result{ { Name: Measurement, @@ -849,6 +1004,96 @@ var tests = []struct { Name: FieldFloat, Value: []byte("42"), }, + { + Name: Success, + }, + }, + }, + { + name: "tag value escape space", + input: []byte(`cpu,host=two\ words value=42`), + results: []Result{ + { + Name: Measurement, + Value: []byte("cpu"), + }, + { + Name: TagKey, + Value: []byte("host"), + }, + { + Name: TagValue, + Value: []byte(`two\ words`), + }, + { + Name: FieldKey, + Value: []byte("value"), + }, + { + Name: FieldFloat, + Value: []byte("42"), + }, + { + Name: Success, + }, + }, + }, + { + name: "tag value double escape space", + input: []byte(`cpu,host=two\\ words value=42`), + results: []Result{ + { + Name: Measurement, + Value: []byte("cpu"), + }, + { + Name: TagKey, + Value: []byte("host"), + }, + { + Name: TagValue, + Value: []byte(`two\\ words`), + }, + { + Name: FieldKey, + Value: []byte("value"), + }, + { + Name: FieldFloat, + Value: []byte("42"), + }, + { + Name: Success, + }, + }, + }, + { + name: "tag value triple escape space", + input: []byte(`cpu,host=two\\\ words value=42`), + results: []Result{ + { + Name: Measurement, + Value: []byte("cpu"), + }, + { + Name: TagKey, + Value: []byte("host"), + }, + { + Name: TagValue, + Value: []byte(`two\\\ words`), + }, + { + Name: FieldKey, + Value: []byte("value"), + }, + { + Name: FieldFloat, + Value: []byte("42"), + }, + { + Name: Success, + }, }, }, { @@ -860,7 +1105,8 @@ var tests = []struct { Value: []byte("cpu"), }, { - err: ErrTagParse, + Name: Error, + err: influx.ErrTagParse, }, }, }, @@ -873,7 +1119,8 @@ var tests = []struct { Value: []byte("cpu"), }, { - err: ErrTagParse, + Name: Error, + err: influx.ErrTagParse, }, }, }, @@ -886,7 +1133,8 @@ var tests = []struct { Value: []byte("cpu"), }, { - err: ErrTagParse, + Name: Error, + err: influx.ErrTagParse, }, }, }, @@ -899,7 +1147,8 @@ var tests = []struct { Value: []byte("cpu"), }, { - err: ErrTagParse, + Name: Error, + err: influx.ErrTagParse, }, }, }, @@ -912,7 +1161,8 @@ var tests = []struct { Value: []byte("cpu"), }, { - err: ErrTagParse, + Name: Error, + err: influx.ErrTagParse, }, }, }, @@ -936,6 +1186,9 @@ var tests = []struct { Name: Timestamp, Value: []byte("-1"), }, + { + Name: Success, + }, }, }, { @@ -958,11 +1211,14 @@ var tests = []struct { Name: Timestamp, Value: []byte("0"), }, + { + Name: Success, + }, }, }, { name: "multiline", - input: []byte("cpu value=42\n\n\ncpu value=43\n"), + input: []byte("cpu value=42\n\n\n\ncpu value=43"), results: []Result{ { Name: Measurement, @@ -976,6 +1232,9 @@ var tests = []struct { Name: FieldFloat, Value: []byte("42"), }, + { + Name: Success, + }, { Name: Measurement, Value: []byte("cpu"), @@ -988,21 +1247,26 @@ var tests = []struct { Name: FieldFloat, Value: []byte("43"), }, + { + Name: Success, + }, }, }, { name: "error recovery", - input: []byte("cpu value=howdy\ncpu\ncpu value=42\n"), + input: []byte("cpu value=howdy,value2=42\ncpu\ncpu value=42"), results: []Result{ { Name: Measurement, Value: []byte("cpu"), }, { - err: ErrFieldParse, + Name: Error, + err: influx.ErrFieldParse, }, { - err: ErrFieldParse, + Name: Error, + err: influx.ErrTagParse, }, { Name: Measurement, @@ -1016,6 +1280,9 @@ var tests = []struct { Name: FieldFloat, Value: []byte("42"), }, + { + Name: Success, + }, }, }, { @@ -1038,6 +1305,9 @@ var tests = []struct { Name: Timestamp, Value: []byte("1516241192000000000"), }, + { + Name: Success, + }, { Name: Measurement, Value: []byte("cpu"), @@ -1050,6 +1320,9 @@ var tests = []struct { Name: FieldFloat, Value: []byte("42"), }, + { + Name: Success, + }, }, }, { @@ -1068,6 +1341,9 @@ var tests = []struct { Name: FieldFloat, Value: []byte("42"), }, + { + Name: Success, + }, }, }, { @@ -1079,7 +1355,8 @@ var tests = []struct { Value: []byte("cpu"), }, { - err: ErrFieldParse, + Name: Error, + err: influx.ErrFieldParse, }, }, }, @@ -1092,7 +1369,8 @@ var tests = []struct { Value: []byte("cpu"), }, { - err: ErrFieldParse, + Name: Error, + err: influx.ErrFieldParse, }, }, }, @@ -1101,16 +1379,22 @@ var tests = []struct { input: []byte("cpu"), results: []Result{ { - err: ErrFieldParse, + Name: Measurement, + Value: []byte("cpu"), + }, + { + Name: Error, + err: influx.ErrTagParse, }, }, }, { - name: "invalid measurement only eol", - input: []byte("cpu\n"), + name: "invalid measurement char", + input: []byte(","), results: []Result{ { - err: ErrFieldParse, + Name: Error, + err: influx.ErrNameParse, }, }, }, @@ -1123,7 +1407,8 @@ var tests = []struct { Value: []byte("cpu"), }, { - err: ErrTagParse, + Name: Error, + err: influx.ErrTagParse, }, }, }, @@ -1144,7 +1429,8 @@ var tests = []struct { Value: []byte("y"), }, { - err: ErrFieldParse, + Name: Error, + err: influx.ErrFieldParse, }, }, }, @@ -1165,7 +1451,8 @@ var tests = []struct { Value: []byte("42"), }, { - err: ErrTimestampParse, + Name: Error, + err: influx.ErrTimestampParse, }, }, }, @@ -1186,7 +1473,8 @@ var tests = []struct { Value: []byte("42"), }, { - err: ErrTimestampParse, + Name: Error, + err: influx.ErrTimestampParse, }, }, }, @@ -1199,42 +1487,28 @@ var tests = []struct { Value: []byte("cpu"), }, { - err: ErrFieldParse, - }, - }, - }, - { - name: "invalid newline in string field", - input: []byte("cpu value=\"4\n2\""), - results: []Result{ - { - Name: Measurement, - Value: []byte("cpu"), - }, - { - err: ErrFieldParse, - }, - { - err: ErrFieldParse, + Name: Error, + err: influx.ErrFieldParse, }, }, }, { name: "invalid field value", - input: []byte(`cpu value=howdy`), + input: []byte("cpu value=howdy"), results: []Result{ { Name: Measurement, Value: []byte("cpu"), }, { - err: ErrFieldParse, + Name: Error, + err: influx.ErrFieldParse, }, }, }, { name: "invalid quoted timestamp", - input: []byte(`cpu value=42 "12345678901234567890"`), + input: []byte("cpu value=42 \"12345678901234567890\""), results: []Result{ { Name: Measurement, @@ -1249,10 +1523,16 @@ var tests = []struct { Value: []byte("42"), }, { - err: ErrTimestampParse, + Name: Error, + err: influx.ErrTimestampParse, }, }, }, + { + name: "comment only", + input: []byte("# blah blah"), + results: []Result(nil), + }, { name: "commented line", input: []byte("# blah blah\ncpu value=42"), @@ -1269,6 +1549,45 @@ var tests = []struct { Name: FieldFloat, Value: []byte("42"), }, + { + Name: Success, + }, + }, + }, + { + name: "middle comment", + input: []byte("cpu value=42\n# blah blah\ncpu value=42"), + results: []Result{ + { + Name: Measurement, + Value: []byte("cpu"), + }, + { + Name: FieldKey, + Value: []byte("value"), + }, + { + Name: FieldFloat, + Value: []byte("42"), + }, + { + Name: Success, + }, + { + Name: Measurement, + Value: []byte("cpu"), + }, + { + Name: FieldKey, + Value: []byte("value"), + }, + { + Name: FieldFloat, + Value: []byte("42"), + }, + { + Name: Success, + }, }, }, { @@ -1287,6 +1606,9 @@ var tests = []struct { Name: FieldFloat, Value: []byte("42"), }, + { + Name: Success, + }, }, }, { @@ -1305,6 +1627,9 @@ var tests = []struct { Name: FieldFloat, Value: []byte("42"), }, + { + Name: Success, + }, }, }, { @@ -1323,6 +1648,9 @@ var tests = []struct { Name: FieldFloat, Value: []byte("42"), }, + { + Name: Success, + }, }, }, } @@ -1331,22 +1659,15 @@ func TestMachine(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { handler := &TestingHandler{} - fsm := NewMachine(handler) + fsm := influx.NewMachine(handler) fsm.SetData(tt.input) - count := 0 - for fsm.ParseLine() { - if fsm.Err() != nil { - handler.AddError(fsm.Err()) - } - count++ - if count > 20 { + for i := 0; i < 20; i++ { + err := fsm.Next() + if err != nil && err == influx.EOF { break } - } - - if fsm.Err() != nil { - handler.AddError(fsm.Err()) + handler.Result(err) } results := handler.Results() @@ -1355,16 +1676,96 @@ func TestMachine(t *testing.T) { } } +func TestMachinePosition(t *testing.T) { + var tests = []struct { + name string + input []byte + lineno int + column int + }{ + { + name: "empty string", + input: []byte(""), + lineno: 1, + column: 1, + }, + { + name: "minimal", + input: []byte("cpu value=42"), + lineno: 1, + column: 13, + }, + { + name: "one newline", + input: []byte("cpu value=42\ncpu value=42"), + lineno: 2, + column: 13, + }, + { + name: "several newlines", + input: []byte("cpu value=42\n\n\n"), + lineno: 4, + column: 1, + }, + { + name: "error on second line", + input: []byte("cpu value=42\ncpu value=invalid"), + lineno: 2, + column: 11, + }, + { + name: "error after comment line", + input: []byte("cpu value=42\n# comment\ncpu value=invalid"), + lineno: 3, + column: 11, + }, + { + name: "dos line endings", + input: []byte("cpu value=42\r\ncpu value=invalid"), + lineno: 2, + column: 11, + }, + { + name: "mac line endings not supported", + input: []byte("cpu value=42\rcpu value=invalid"), + lineno: 1, + column: 14, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + handler := &TestingHandler{} + fsm := influx.NewMachine(handler) + fsm.SetData(tt.input) + + // Parse until an error or eof + for i := 0; i < 20; i++ { + err := fsm.Next() + if err != nil { + break + } + } + + require.Equal(t, tt.lineno, fsm.LineNumber(), "lineno") + require.Equal(t, tt.column, fsm.Column(), "column") + }) + } +} + func BenchmarkMachine(b *testing.B) { for _, tt := range tests { b.Run(tt.name, func(b *testing.B) { handler := &BenchmarkingHandler{} - fsm := NewMachine(handler) + fsm := influx.NewMachine(handler) for n := 0; n < b.N; n++ { fsm.SetData(tt.input) - for fsm.ParseLine() { + for { + err := fsm.Next() + if err != nil { + break + } } } }) @@ -1374,19 +1775,27 @@ func BenchmarkMachine(b *testing.B) { func TestMachineProcstat(t *testing.T) { input := []byte("procstat,exe=bash,process_name=bash voluntary_context_switches=42i,memory_rss=5103616i,rlimit_memory_data_hard=2147483647i,cpu_time_user=0.02,rlimit_file_locks_soft=2147483647i,pid=29417i,cpu_time_nice=0,rlimit_memory_locked_soft=65536i,read_count=259i,rlimit_memory_vms_hard=2147483647i,memory_swap=0i,rlimit_num_fds_soft=1024i,rlimit_nice_priority_hard=0i,cpu_time_soft_irq=0,cpu_time=0i,rlimit_memory_locked_hard=65536i,realtime_priority=0i,signals_pending=0i,nice_priority=20i,cpu_time_idle=0,memory_stack=139264i,memory_locked=0i,rlimit_memory_stack_soft=8388608i,cpu_time_iowait=0,cpu_time_guest=0,cpu_time_guest_nice=0,rlimit_memory_data_soft=2147483647i,read_bytes=0i,rlimit_cpu_time_soft=2147483647i,involuntary_context_switches=2i,write_bytes=106496i,cpu_time_system=0,cpu_time_irq=0,cpu_usage=0,memory_vms=21659648i,memory_data=1576960i,rlimit_memory_stack_hard=2147483647i,num_threads=1i,cpu_time_stolen=0,rlimit_memory_rss_soft=2147483647i,rlimit_realtime_priority_soft=0i,num_fds=4i,write_count=35i,rlimit_signals_pending_soft=78994i,cpu_time_steal=0,rlimit_num_fds_hard=4096i,rlimit_file_locks_hard=2147483647i,rlimit_cpu_time_hard=2147483647i,rlimit_signals_pending_hard=78994i,rlimit_nice_priority_soft=0i,rlimit_memory_rss_hard=2147483647i,rlimit_memory_vms_soft=2147483647i,rlimit_realtime_priority_hard=0i 1517620624000000000") handler := &TestingHandler{} - fsm := NewMachine(handler) + fsm := influx.NewMachine(handler) fsm.SetData(input) - for fsm.ParseLine() { + for { + err := fsm.Next() + if err != nil { + break + } } } func BenchmarkMachineProcstat(b *testing.B) { input := []byte("procstat,exe=bash,process_name=bash voluntary_context_switches=42i,memory_rss=5103616i,rlimit_memory_data_hard=2147483647i,cpu_time_user=0.02,rlimit_file_locks_soft=2147483647i,pid=29417i,cpu_time_nice=0,rlimit_memory_locked_soft=65536i,read_count=259i,rlimit_memory_vms_hard=2147483647i,memory_swap=0i,rlimit_num_fds_soft=1024i,rlimit_nice_priority_hard=0i,cpu_time_soft_irq=0,cpu_time=0i,rlimit_memory_locked_hard=65536i,realtime_priority=0i,signals_pending=0i,nice_priority=20i,cpu_time_idle=0,memory_stack=139264i,memory_locked=0i,rlimit_memory_stack_soft=8388608i,cpu_time_iowait=0,cpu_time_guest=0,cpu_time_guest_nice=0,rlimit_memory_data_soft=2147483647i,read_bytes=0i,rlimit_cpu_time_soft=2147483647i,involuntary_context_switches=2i,write_bytes=106496i,cpu_time_system=0,cpu_time_irq=0,cpu_usage=0,memory_vms=21659648i,memory_data=1576960i,rlimit_memory_stack_hard=2147483647i,num_threads=1i,cpu_time_stolen=0,rlimit_memory_rss_soft=2147483647i,rlimit_realtime_priority_soft=0i,num_fds=4i,write_count=35i,rlimit_signals_pending_soft=78994i,cpu_time_steal=0,rlimit_num_fds_hard=4096i,rlimit_file_locks_hard=2147483647i,rlimit_cpu_time_hard=2147483647i,rlimit_signals_pending_hard=78994i,rlimit_nice_priority_soft=0i,rlimit_memory_rss_hard=2147483647i,rlimit_memory_vms_soft=2147483647i,rlimit_realtime_priority_hard=0i 1517620624000000000") handler := &BenchmarkingHandler{} - fsm := NewMachine(handler) + fsm := influx.NewMachine(handler) for n := 0; n < b.N; n++ { fsm.SetData(input) - for fsm.ParseLine() { + for { + err := fsm.Next() + if err != nil { + break + } } } } @@ -1411,6 +1820,9 @@ func TestSeriesMachine(t *testing.T) { Name: Measurement, Value: []byte("cpu"), }, + { + Name: Success, + }, }, }, { @@ -1437,6 +1849,9 @@ func TestSeriesMachine(t *testing.T) { Name: TagValue, Value: []byte("y"), }, + { + Name: Success, + }, }, }, } @@ -1444,22 +1859,15 @@ func TestSeriesMachine(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { handler := &TestingHandler{} - fsm := NewSeriesMachine(handler) + fsm := influx.NewSeriesMachine(handler) fsm.SetData(tt.input) - count := 0 - for fsm.ParseLine() { - if fsm.Err() != nil { - handler.AddError(fsm.Err()) - } - count++ - if count > 20 { + for { + err := fsm.Next() + if err != nil { break } - } - - if fsm.Err() != nil { - handler.AddError(fsm.Err()) + handler.Result(err) } results := handler.Results() @@ -1467,3 +1875,206 @@ func TestSeriesMachine(t *testing.T) { }) } } + +type MockHandler struct { + SetMeasurementF func(name []byte) error + AddTagF func(key []byte, value []byte) error + AddIntF func(key []byte, value []byte) error + AddUintF func(key []byte, value []byte) error + AddFloatF func(key []byte, value []byte) error + AddStringF func(key []byte, value []byte) error + AddBoolF func(key []byte, value []byte) error + SetTimestampF func(tm []byte) error + + TestingHandler +} + +func (h *MockHandler) SetMeasurement(name []byte) error { + h.TestingHandler.SetMeasurement(name) + return h.SetMeasurementF(name) +} + +func (h *MockHandler) AddTag(name, value []byte) error { + return h.AddTagF(name, value) +} + +func (h *MockHandler) AddInt(name, value []byte) error { + err := h.AddIntF(name, value) + if err != nil { + return err + } + h.TestingHandler.AddInt(name, value) + return nil +} + +func (h *MockHandler) AddUint(name, value []byte) error { + err := h.AddUintF(name, value) + if err != nil { + return err + } + h.TestingHandler.AddUint(name, value) + return nil +} + +func (h *MockHandler) AddFloat(name, value []byte) error { + return h.AddFloatF(name, value) +} + +func (h *MockHandler) AddString(name, value []byte) error { + return h.AddStringF(name, value) +} + +func (h *MockHandler) AddBool(name, value []byte) error { + return h.AddBoolF(name, value) +} + +func (h *MockHandler) SetTimestamp(tm []byte) error { + return h.SetTimestampF(tm) +} + +func TestHandlerErrorRecovery(t *testing.T) { + var tests = []struct { + name string + input []byte + handler *MockHandler + results []Result + }{ + { + name: "integer", + input: []byte("cpu value=43i\ncpu value=42i"), + handler: &MockHandler{ + SetMeasurementF: func(name []byte) error { + return nil + }, + AddIntF: func(name, value []byte) error { + if string(value) != "42i" { + return errors.New("handler error") + } + return nil + }, + }, + results: []Result{ + { + Name: Measurement, + Value: []byte("cpu"), + }, + { + Name: Error, + err: errors.New("handler error"), + }, + { + Name: Measurement, + Value: []byte("cpu"), + }, + { + Name: FieldKey, + Value: []byte("value"), + }, + { + Name: FieldInt, + Value: []byte("42i"), + }, + { + Name: Success, + }, + }, + }, + { + name: "integer with timestamp", + input: []byte("cpu value=43i 1516241192000000000\ncpu value=42i"), + handler: &MockHandler{ + SetMeasurementF: func(name []byte) error { + return nil + }, + AddIntF: func(name, value []byte) error { + if string(value) != "42i" { + return errors.New("handler error") + } + return nil + }, + }, + results: []Result{ + { + Name: Measurement, + Value: []byte("cpu"), + }, + { + Name: Error, + err: errors.New("handler error"), + }, + { + Name: Measurement, + Value: []byte("cpu"), + }, + { + Name: FieldKey, + Value: []byte("value"), + }, + { + Name: FieldInt, + Value: []byte("42i"), + }, + { + Name: Success, + }, + }, + }, + { + name: "unsigned", + input: []byte("cpu value=43u\ncpu value=42u"), + handler: &MockHandler{ + SetMeasurementF: func(name []byte) error { + return nil + }, + AddUintF: func(name, value []byte) error { + if string(value) != "42u" { + return errors.New("handler error") + } + return nil + }, + }, + results: []Result{ + { + Name: Measurement, + Value: []byte("cpu"), + }, + { + Name: Error, + err: errors.New("handler error"), + }, + { + Name: Measurement, + Value: []byte("cpu"), + }, + { + Name: FieldKey, + Value: []byte("value"), + }, + { + Name: FieldUint, + Value: []byte("42u"), + }, + { + Name: Success, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + fsm := influx.NewMachine(tt.handler) + fsm.SetData(tt.input) + + for i := 0; i < 20; i++ { + err := fsm.Next() + if err != nil && err == influx.EOF { + break + } + tt.handler.Result(err) + } + + results := tt.handler.Results() + require.Equal(t, tt.results, results) + }) + } +} diff --git a/plugins/parsers/influx/parser.go b/plugins/parsers/influx/parser.go index b236a6f10..8b9272b69 100644 --- a/plugins/parsers/influx/parser.go +++ b/plugins/parsers/influx/parser.go @@ -3,6 +3,7 @@ package influx import ( "errors" "fmt" + "strings" "sync" "github.com/influxdata/telegraf" @@ -17,17 +18,24 @@ var ( ) type ParseError struct { - Offset int - msg string - buf string + Offset int + LineOffset int + LineNumber int + Column int + msg string + buf string } func (e *ParseError) Error() string { - buffer := e.buf + buffer := e.buf[e.LineOffset:] + eol := strings.IndexAny(buffer, "\r\n") + if eol >= 0 { + buffer = buffer[:eol] + } if len(buffer) > maxErrorBufferSize { buffer = buffer[:maxErrorBufferSize] + "..." } - return fmt.Sprintf("metric parse error: %s at offset %d: %q", e.msg, e.Offset, buffer) + return fmt.Sprintf("metric parse error: %s at %d:%d: %q", e.msg, e.LineNumber, e.Column, buffer) } type Parser struct { @@ -60,14 +68,20 @@ func (p *Parser) Parse(input []byte) ([]telegraf.Metric, error) { metrics := make([]telegraf.Metric, 0) p.machine.SetData(input) - for p.machine.ParseLine() { - err := p.machine.Err() + for { + err := p.machine.Next() + if err == EOF { + break + } + if err != nil { - p.handler.Reset() return nil, &ParseError{ - Offset: p.machine.Position(), - msg: err.Error(), - buf: string(input), + Offset: p.machine.Position(), + LineOffset: p.machine.LineOffset(), + LineNumber: p.machine.LineNumber(), + Column: p.machine.Column(), + msg: err.Error(), + buf: string(input), } } @@ -75,7 +89,11 @@ func (p *Parser) Parse(input []byte) ([]telegraf.Metric, error) { if err != nil { return nil, err } - p.handler.Reset() + + if metric == nil { + continue + } + metrics = append(metrics, metric) } @@ -84,7 +102,7 @@ func (p *Parser) Parse(input []byte) ([]telegraf.Metric, error) { } func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { - metrics, err := p.Parse([]byte(line + "\n")) + metrics, err := p.Parse([]byte(line)) if err != nil { return nil, err } diff --git a/plugins/parsers/influx/parser_test.go b/plugins/parsers/influx/parser_test.go index 05a797442..4d30eeb0b 100644 --- a/plugins/parsers/influx/parser_test.go +++ b/plugins/parsers/influx/parser_test.go @@ -1,6 +1,8 @@ package influx import ( + "strconv" + "strings" "testing" "time" @@ -173,6 +175,63 @@ var ptests = []struct { }, err: nil, }, + { + name: "tag value escape space", + input: []byte(`cpu,host=two\ words value=42`), + metrics: []telegraf.Metric{ + Metric( + metric.New( + "cpu", + map[string]string{ + "host": "two words", + }, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(42, 0), + ), + ), + }, + err: nil, + }, + { + name: "tag value double escape space", + input: []byte(`cpu,host=two\\ words value=42`), + metrics: []telegraf.Metric{ + Metric( + metric.New( + "cpu", + map[string]string{ + "host": `two\ words`, + }, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(42, 0), + ), + ), + }, + err: nil, + }, + { + name: "tag value triple escape space", + input: []byte(`cpu,host=two\\\ words value=42`), + metrics: []telegraf.Metric{ + Metric( + metric.New( + "cpu", + map[string]string{ + "host": `two\\ words`, + }, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(42, 0), + ), + ), + }, + err: nil, + }, { name: "field key escape not escapable", input: []byte(`cpu va\lue=42`), @@ -259,19 +318,16 @@ var ptests = []struct { err: nil, }, { - name: "field int overflow dropped", - input: []byte("cpu value=9223372036854775808i"), - metrics: []telegraf.Metric{ - Metric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{}, - time.Unix(42, 0), - ), - ), + name: "field int overflow", + input: []byte("cpu value=9223372036854775808i"), + metrics: nil, + err: &ParseError{ + Offset: 30, + LineNumber: 1, + Column: 31, + msg: strconv.ErrRange.Error(), + buf: "cpu value=9223372036854775808i", }, - err: nil, }, { name: "field int max value", @@ -308,19 +364,16 @@ var ptests = []struct { err: nil, }, { - name: "field uint overflow dropped", - input: []byte("cpu value=18446744073709551616u"), - metrics: []telegraf.Metric{ - Metric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{}, - time.Unix(42, 0), - ), - ), + name: "field uint overflow", + input: []byte("cpu value=18446744073709551616u"), + metrics: nil, + err: &ParseError{ + Offset: 31, + LineNumber: 1, + Column: 32, + msg: strconv.ErrRange.Error(), + buf: "cpu value=18446744073709551616u", }, - err: nil, }, { name: "field uint max value", @@ -407,6 +460,23 @@ var ptests = []struct { }, err: nil, }, + { + name: "field string newline", + input: []byte("cpu value=\"4\n2\""), + metrics: []telegraf.Metric{ + Metric( + metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": "4\n2", + }, + time.Unix(42, 0), + ), + ), + }, + err: nil, + }, { name: "no timestamp", input: []byte("cpu value=42"), @@ -497,9 +567,11 @@ var ptests = []struct { input: []byte("cpu"), metrics: nil, err: &ParseError{ - Offset: 3, - msg: ErrFieldParse.Error(), - buf: "cpu", + Offset: 3, + LineNumber: 1, + Column: 4, + msg: ErrTagParse.Error(), + buf: "cpu", }, }, { @@ -667,9 +739,11 @@ func TestSeriesParser(t *testing.T) { input: []byte("cpu,a="), metrics: []telegraf.Metric{}, err: &ParseError{ - Offset: 6, - msg: ErrTagParse.Error(), - buf: "cpu,a=", + Offset: 6, + LineNumber: 1, + Column: 7, + msg: ErrTagParse.Error(), + buf: "cpu,a=", }, }, } @@ -696,3 +770,37 @@ func TestSeriesParser(t *testing.T) { }) } } + +func TestParserErrorString(t *testing.T) { + var ptests = []struct { + name string + input []byte + errString string + }{ + { + name: "multiple line error", + input: []byte("cpu value=42\ncpu value=invalid\ncpu value=42"), + errString: `metric parse error: expected field at 2:11: "cpu value=invalid"`, + }, + { + name: "handler error", + input: []byte("cpu value=9223372036854775808i\ncpu value=42"), + errString: `metric parse error: value out of range at 1:31: "cpu value=9223372036854775808i"`, + }, + { + name: "buffer too long", + input: []byte("cpu " + strings.Repeat("ab", maxErrorBufferSize) + "=invalid\ncpu value=42"), + errString: "metric parse error: expected field at 1:2054: \"cpu " + strings.Repeat("ab", maxErrorBufferSize)[:maxErrorBufferSize-4] + "...\"", + }, + } + + for _, tt := range ptests { + t.Run(tt.name, func(t *testing.T) { + handler := NewMetricHandler() + parser := NewParser(handler) + + _, err := parser.Parse(tt.input) + require.Equal(t, tt.errString, err.Error()) + }) + } +} diff --git a/plugins/serializers/influx/escape.go b/plugins/serializers/influx/escape.go index 27caa6bb3..9320eb7fa 100644 --- a/plugins/serializers/influx/escape.go +++ b/plugins/serializers/influx/escape.go @@ -29,10 +29,6 @@ var ( ) stringFieldEscaper = strings.NewReplacer( - "\t", `\t`, - "\n", `\n`, - "\f", `\f`, - "\r", `\r`, `"`, `\"`, `\`, `\\`, ) diff --git a/plugins/serializers/influx/influx_test.go b/plugins/serializers/influx/influx_test.go index e3526428e..8102bd973 100644 --- a/plugins/serializers/influx/influx_test.go +++ b/plugins/serializers/influx/influx_test.go @@ -335,7 +335,7 @@ var tests = []struct { time.Unix(0, 0), ), ), - output: []byte("cpu value=\"x\\ny\" 0\n"), + output: []byte("cpu value=\"x\ny\" 0\n"), }, { name: "need more space", From 931fac9b8603f5bc5eef3d4f49b312b5207ad937 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 26 Feb 2019 12:44:42 -0800 Subject: [PATCH 0638/1815] Fix issue link in changelog --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6fa79c6a0..f63396dac 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -71,7 +71,7 @@ - [#5437](https://github.com/influxdata/telegraf/issues/5437): Host not added when using custom arguments in ping plugin. - [#5438](https://github.com/influxdata/telegraf/issues/5438): Fix InfluxDB output UDP line splitting. - [#5456](https://github.com/influxdata/telegraf/issues/5456): Disable results by row in azuredb query. -- [#5277](https://github.com/influxdata/telegraf/issues/5456): Add backwards compatibility fields in ceph usage and pool stats. +- [#5277](https://github.com/influxdata/telegraf/issues/5277): Add backwards compatibility fields in ceph usage and pool stats. ## v1.9.4 [2019-02-05] From c9597a2463407c37f2f818e8919fb7c2d1273dca Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 26 Feb 2019 12:11:02 -0800 Subject: [PATCH 0639/1815] Set release date for 1.9.5 --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f63396dac..99b9d7c49 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -59,7 +59,7 @@ - [#5304](https://github.com/influxdata/telegraf/issues/5304): Fix x509_cert input stops checking certs after first error. - [#5404](https://github.com/influxdata/telegraf/issues/5404): Group stackdriver requests to send one point per timeseries. -## v1.9.5 [unreleased] +## v1.9.5 [2019-02-26] #### Bugfixes From 9740e956ca3176774ae23a312e5d498f4c20ef5a Mon Sep 17 00:00:00 2001 From: Greg <2653109+glinton@users.noreply.github.com> Date: Tue, 26 Feb 2019 15:03:25 -0700 Subject: [PATCH 0640/1815] Log permission error and ignore in filecount input (#5483) --- plugins/inputs/filecount/README.md | 10 +++++----- plugins/inputs/filecount/filecount.go | 16 ++++++++++++++-- 2 files changed, 19 insertions(+), 7 deletions(-) diff --git a/plugins/inputs/filecount/README.md b/plugins/inputs/filecount/README.md index a6836ffc3..49e28caa6 100644 --- a/plugins/inputs/filecount/README.md +++ b/plugins/inputs/filecount/README.md @@ -1,6 +1,6 @@ # Filecount Input Plugin -Counts files in directories that match certain criteria. +Reports the number and total size of files in specified directories. ### Configuration: @@ -8,7 +8,7 @@ Counts files in directories that match certain criteria. [[inputs.filecount]] ## Directory to gather stats about. ## deprecated in 1.9; use the directories option - directory = "/var/cache/apt/archives" + # directory = "/var/cache/apt/archives" ## Directories to gather stats about. ## This accept standard unit glob matching rules, but with the addition of @@ -16,13 +16,13 @@ Counts files in directories that match certain criteria. ## /var/log/** -> recursively find all directories in /var/log and count files in each directories ## /var/log/*/* -> find all directories with a parent dir in /var/log and count files in each directories ## /var/log -> count all files in /var/log and all of its subdirectories - directories = ["/var/cache/apt/archives"] + directories = ["/var/cache/apt", "/tmp"] ## Only count files that match the name pattern. Defaults to "*". - name = "*.deb" + name = "*" ## Count files in subdirectories. Defaults to true. - recursive = false + recursive = true ## Only count regular files. Defaults to true. regular_only = true diff --git a/plugins/inputs/filecount/filecount.go b/plugins/inputs/filecount/filecount.go index f8840721b..1fd7041ff 100644 --- a/plugins/inputs/filecount/filecount.go +++ b/plugins/inputs/filecount/filecount.go @@ -1,22 +1,25 @@ package filecount import ( + "log" "os" "path/filepath" "strings" "time" + "github.com/karrick/godirwalk" + "github.com/pkg/errors" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/internal/globpath" "github.com/influxdata/telegraf/plugins/inputs" - "github.com/karrick/godirwalk" ) const sampleConfig = ` ## Directory to gather stats about. ## deprecated in 1.9; use the directories option - directory = "/var/cache/apt/archives" + # directory = "/var/cache/apt/archives" ## Directories to gather stats about. ## This accept standard unit glob matching rules, but with the addition of @@ -152,6 +155,7 @@ func (fc *FileCount) initFileFilters() { func (fc *FileCount) count(acc telegraf.Accumulator, basedir string, glob globpath.GlobPath) { childCount := make(map[string]int64) childSize := make(map[string]int64) + walkFn := func(path string, de *godirwalk.Dirent) error { if path == basedir { return nil @@ -178,6 +182,7 @@ func (fc *FileCount) count(acc telegraf.Accumulator, basedir string, glob globpa } return nil } + postChildrenFn := func(path string, de *godirwalk.Dirent) error { if glob.MatchString(path) { gauge := map[string]interface{}{ @@ -203,6 +208,13 @@ func (fc *FileCount) count(acc telegraf.Accumulator, basedir string, glob globpa Callback: walkFn, PostChildrenCallback: postChildrenFn, Unsorted: true, + ErrorCallback: func(osPathname string, err error) godirwalk.ErrorAction { + if os.IsPermission(errors.Cause(err)) { + log.Println("D! [inputs.filecount]", err) + return godirwalk.SkipNode + } + return godirwalk.Halt + }, }) if err != nil { acc.AddError(err) From ec746cc32a152aed57d46cfe562d7b212e1e9ea8 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 26 Feb 2019 14:04:36 -0800 Subject: [PATCH 0641/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 99b9d7c49..8da1f79fe 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -58,6 +58,7 @@ - [#5316](https://github.com/influxdata/telegraf/pull/5316): Remove auth from /ping route in influxdb_listener. - [#5304](https://github.com/influxdata/telegraf/issues/5304): Fix x509_cert input stops checking certs after first error. - [#5404](https://github.com/influxdata/telegraf/issues/5404): Group stackdriver requests to send one point per timeseries. +- [#5449](https://github.com/influxdata/telegraf/issues/5449): Log permission error and ignore in filecount input. ## v1.9.5 [2019-02-26] From 8d90609198ab90b8ee07a2268b8fcd8994bf19f3 Mon Sep 17 00:00:00 2001 From: Greg Linton Date: Tue, 26 Feb 2019 17:03:13 -0700 Subject: [PATCH 0642/1815] Remove 'inputs.' prefix when logging loaded inputs --- internal/config/config.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/config/config.go b/internal/config/config.go index 4388d658d..557fdd5fa 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -151,7 +151,7 @@ type AgentConfig struct { func (c *Config) InputNames() []string { var name []string for _, input := range c.Inputs { - name = append(name, input.Name()) + name = append(name, input.Config.Name) } return name } From 85617887c4db6733a90334fccb61a7155c6a1c6a Mon Sep 17 00:00:00 2001 From: Greg <2653109+glinton@users.noreply.github.com> Date: Tue, 26 Feb 2019 18:35:57 -0700 Subject: [PATCH 0643/1815] Add option to disable timestamp adjustment in grok parser (#5488) --- internal/config/config.go | 9 +++++++++ plugins/parsers/grok/README.md | 3 +++ plugins/parsers/grok/parser.go | 11 +++++++++++ plugins/parsers/registry.go | 12 +++++++----- 4 files changed, 30 insertions(+), 5 deletions(-) diff --git a/internal/config/config.go b/internal/config/config.go index 4388d658d..0e49572ee 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -1508,6 +1508,14 @@ func getParserConfig(name string, tbl *ast.Table) (*parsers.Config, error) { } } + if node, ok := tbl.Fields["grok_unique_timestamp"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if str, ok := kv.Value.(*ast.String); ok { + c.GrokUniqueTimestamp = str.Value + } + } + } + //for csv parser if node, ok := tbl.Fields["csv_column_names"]; ok { if kv, ok := node.(*ast.KeyValue); ok { @@ -1661,6 +1669,7 @@ func getParserConfig(name string, tbl *ast.Table) (*parsers.Config, error) { delete(tbl.Fields, "grok_custom_patterns") delete(tbl.Fields, "grok_custom_pattern_files") delete(tbl.Fields, "grok_timezone") + delete(tbl.Fields, "grok_unique_timestamp") delete(tbl.Fields, "csv_column_names") delete(tbl.Fields, "csv_column_types") delete(tbl.Fields, "csv_comment") diff --git a/plugins/parsers/grok/README.md b/plugins/parsers/grok/README.md index 4ebbbd3f2..1570bfb28 100644 --- a/plugins/parsers/grok/README.md +++ b/plugins/parsers/grok/README.md @@ -110,6 +110,9 @@ you will find the https://grokdebug.herokuapp.com application quite useful! ## 2. "Canada/Eastern" -- Unix TZ values like those found in https://en.wikipedia.org/wiki/List_of_tz_database_time_zones ## 3. UTC -- or blank/unspecified, will return timestamp in UTC grok_timezone = "Canada/Eastern" + + ## When grok_unique_timestamp is set to "disable", timestamp will not incremented if there is a duplicate. Default is "auto" + # grok_unique_timestamp = "auto" ``` #### Timestamp Examples diff --git a/plugins/parsers/grok/parser.go b/plugins/parsers/grok/parser.go index c1ebf9003..eb1d1e71c 100644 --- a/plugins/parsers/grok/parser.go +++ b/plugins/parsers/grok/parser.go @@ -86,6 +86,9 @@ type Parser struct { Timezone string loc *time.Location + // UniqueTimestamp when set to "disable", timestamp will not incremented if there is a duplicate. + UniqueTimestamp string + // typeMap is a map of patterns -> capture name -> modifier, // ie, { // "%{TESTLOG}": @@ -134,6 +137,10 @@ func (p *Parser) Compile() error { return err } + if p.UniqueTimestamp == "" { + p.UniqueTimestamp = "auto" + } + // Give Patterns fake names so that they can be treated as named // "custom patterns" p.NamedPatterns = make([]string, 0, len(p.Patterns)) @@ -358,6 +365,10 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { return nil, fmt.Errorf("grok: must have one or more fields") } + if p.UniqueTimestamp != "auto" { + return metric.New(p.Measurement, tags, fields, timestamp) + } + return metric.New(p.Measurement, tags, fields, p.tsModder.tsMod(timestamp)) } diff --git a/plugins/parsers/registry.go b/plugins/parsers/registry.go index ffa7d142f..ad54e35ad 100644 --- a/plugins/parsers/registry.go +++ b/plugins/parsers/registry.go @@ -126,6 +126,7 @@ type Config struct { GrokCustomPatterns string `toml:"grok_custom_patterns"` GrokCustomPatternFiles []string `toml:"grok_custom_pattern_files"` GrokTimezone string `toml:"grok_timezone"` + GrokUniqueTimestamp string `toml:"grok_unique_timestamp"` //csv configuration CSVColumnNames []string `toml:"csv_column_names"` @@ -189,7 +190,8 @@ func NewParser(config *Config) (Parser, error) { config.GrokNamedPatterns, config.GrokCustomPatterns, config.GrokCustomPatternFiles, - config.GrokTimezone) + config.GrokTimezone, + config.GrokUniqueTimestamp) case "csv": parser, err = newCSVParser(config.MetricName, config.CSVHeaderRowCount, @@ -298,10 +300,9 @@ func newJSONParser( //Deprecated: Use NewParser to get a JSONParser object func newGrokParser(metricName string, - patterns []string, - nPatterns []string, - cPatterns string, - cPatternFiles []string, tZone string) (Parser, error) { + patterns []string, nPatterns []string, + cPatterns string, cPatternFiles []string, + tZone string, uniqueTimestamp string) (Parser, error) { parser := grok.Parser{ Measurement: metricName, Patterns: patterns, @@ -309,6 +310,7 @@ func newGrokParser(metricName string, CustomPatterns: cPatterns, CustomPatternFiles: cPatternFiles, Timezone: tZone, + UniqueTimestamp: uniqueTimestamp, } err := parser.Compile() From a3f83afe4a4d4da5980dd8fa839b9110ac0c51cd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Robert=20Edstr=C3=B6m?= <108799+Legogris@users.noreply.github.com> Date: Wed, 27 Feb 2019 02:40:21 +0100 Subject: [PATCH 0644/1815] Move capacity check for stackdriver output plugin (#5479) --- plugins/outputs/stackdriver/stackdriver.go | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/plugins/outputs/stackdriver/stackdriver.go b/plugins/outputs/stackdriver/stackdriver.go index d57675bc3..572cdb4c7 100644 --- a/plugins/outputs/stackdriver/stackdriver.go +++ b/plugins/outputs/stackdriver/stackdriver.go @@ -199,7 +199,7 @@ func (s *Stackdriver) Write(metrics []telegraf.Metric) error { for len(buckets) != 0 { // can send up to 200 time series to stackdriver timeSeries := make([]*monitoringpb.TimeSeries, 0, 200) - for i := 0; i < len(keys); i++ { + for i := 0; i < len(keys) && len(timeSeries) < cap(timeSeries); i++ { k := keys[i] s := buckets[k] timeSeries = append(timeSeries, s[0]) @@ -212,10 +212,6 @@ func (s *Stackdriver) Write(metrics []telegraf.Metric) error { s = s[1:] buckets[k] = s - - if len(timeSeries) == cap(timeSeries) { - break - } } // Prepare time series request. From 4df0cc006c6b540ee6e90fb852b1d82eab8367b6 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 26 Feb 2019 17:37:46 -0800 Subject: [PATCH 0645/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8da1f79fe..b113cae90 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -50,6 +50,7 @@ - [#5453](https://github.com/influxdata/telegraf/pull/5453): Support Azure Sovereign Environments with endpoint_url option. - [#5472](https://github.com/influxdata/telegraf/pull/5472): Support configuring a default timezone in JSON parser. - [#5482](https://github.com/influxdata/telegraf/pull/5482): Add ceph_health metrics to ceph input. +- [#5488](https://github.com/influxdata/telegraf/pull/5488): Add option to disable unique timestamp adjustment in grok parser. #### Bugfixes From c023ffe0a5800865b723ded149cd72ee63427045 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 26 Feb 2019 18:04:45 -0800 Subject: [PATCH 0646/1815] Add unique_timestamp option from grok parser to logparser grok --- plugins/inputs/logparser/logparser.go | 7 ++++++- plugins/parsers/grok/README.md | 3 ++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/plugins/inputs/logparser/logparser.go b/plugins/inputs/logparser/logparser.go index eb23e2b74..a7fd97e8e 100644 --- a/plugins/inputs/logparser/logparser.go +++ b/plugins/inputs/logparser/logparser.go @@ -8,7 +8,6 @@ import ( "sync" "github.com/influxdata/tail" - "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal/globpath" "github.com/influxdata/telegraf/plugins/inputs" @@ -28,6 +27,7 @@ type GrokConfig struct { CustomPatterns string CustomPatternFiles []string Timezone string + UniqueTimestamp string } type logEntry struct { @@ -100,6 +100,10 @@ const sampleConfig = ` ## 2. "Canada/Eastern" -- Unix TZ values like those found in https://en.wikipedia.org/wiki/List_of_tz_database_time_zones ## 3. UTC -- or blank/unspecified, will return timestamp in UTC # timezone = "Canada/Eastern" + + ## When set to "disable", timestamp will not incremented if there is a + ## duplicate. + # unique_timestamp = "auto" ` // SampleConfig returns the sample configuration for the plugin @@ -144,6 +148,7 @@ func (l *LogParserPlugin) Start(acc telegraf.Accumulator) error { GrokCustomPatterns: l.GrokConfig.CustomPatterns, GrokCustomPatternFiles: l.GrokConfig.CustomPatternFiles, GrokTimezone: l.GrokConfig.Timezone, + GrokUniqueTimestamp: l.GrokConfig.UniqueTimestamp, DataFormat: "grok", } diff --git a/plugins/parsers/grok/README.md b/plugins/parsers/grok/README.md index 1570bfb28..a694735de 100644 --- a/plugins/parsers/grok/README.md +++ b/plugins/parsers/grok/README.md @@ -111,7 +111,8 @@ you will find the https://grokdebug.herokuapp.com application quite useful! ## 3. UTC -- or blank/unspecified, will return timestamp in UTC grok_timezone = "Canada/Eastern" - ## When grok_unique_timestamp is set to "disable", timestamp will not incremented if there is a duplicate. Default is "auto" + ## When set to "disable" timestamp will not incremented if there is a + ## duplicate. # grok_unique_timestamp = "auto" ``` From 03f40b35884e957665bcc07eeeb0eea6ac2e2d38 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 26 Feb 2019 18:22:12 -0800 Subject: [PATCH 0647/1815] Remove prefix from aggregators name --- internal/config/config.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/config/config.go b/internal/config/config.go index 557fdd5fa..28d683cc5 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -160,7 +160,7 @@ func (c *Config) InputNames() []string { func (c *Config) AggregatorNames() []string { var name []string for _, aggregator := range c.Aggregators { - name = append(name, aggregator.Name()) + name = append(name, aggregator.Config.Name) } return name } From c6612a4e4a2797ab268b224b92487426e3a3d160 Mon Sep 17 00:00:00 2001 From: Greg <2653109+glinton@users.noreply.github.com> Date: Tue, 26 Feb 2019 19:25:42 -0700 Subject: [PATCH 0648/1815] Add cloud pubsub push input plugin (#5442) --- plugins/inputs/all/all.go | 1 + plugins/inputs/cloud_pubsub_push/README.md | 72 ++++ .../inputs/cloud_pubsub_push/pubsub_push.go | 323 ++++++++++++++++++ .../cloud_pubsub_push/pubsub_push_test.go | 216 ++++++++++++ 4 files changed, 612 insertions(+) create mode 100644 plugins/inputs/cloud_pubsub_push/README.md create mode 100644 plugins/inputs/cloud_pubsub_push/pubsub_push.go create mode 100644 plugins/inputs/cloud_pubsub_push/pubsub_push_test.go diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index e03648036..765505c3e 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -15,6 +15,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/cgroup" _ "github.com/influxdata/telegraf/plugins/inputs/chrony" _ "github.com/influxdata/telegraf/plugins/inputs/cloud_pubsub" + _ "github.com/influxdata/telegraf/plugins/inputs/cloud_pubsub_push" _ "github.com/influxdata/telegraf/plugins/inputs/cloudwatch" _ "github.com/influxdata/telegraf/plugins/inputs/conntrack" _ "github.com/influxdata/telegraf/plugins/inputs/consul" diff --git a/plugins/inputs/cloud_pubsub_push/README.md b/plugins/inputs/cloud_pubsub_push/README.md new file mode 100644 index 000000000..76725c997 --- /dev/null +++ b/plugins/inputs/cloud_pubsub_push/README.md @@ -0,0 +1,72 @@ +# Google Cloud PubSub Push Input Service Plugin + +The Google Cloud PubSub Push listener is a service input plugin that listens for messages sent via an HTTP POST from [Google Cloud PubSub][pubsub]. +The plugin expects messages in Google's Pub/Sub JSON Format ONLY. +The intent of the plugin is to allow Telegraf to serve as an endpoint of the Google Pub/Sub 'Push' service. +Google's PubSub service will **only** send over HTTPS/TLS so this plugin must be behind a valid proxy or must be configured to use TLS. + +Enable TLS by specifying the file names of a service TLS certificate and key. + +Enable mutually authenticated TLS and authorize client connections by signing certificate authority by including a list of allowed CA certificate file names in `tls_allowed_cacerts`. + + +### Configuration: + +This is a sample configuration for the plugin. + +```toml +[[inputs.cloud_pubsub_push]] + ## Address and port to host HTTP listener on + service_address = ":8080" + + ## Application secret to verify messages originate from Cloud Pub/Sub + # token = "" + + ## Path to listen to. + # path = "/" + + ## Maximum duration before timing out read of the request + # read_timeout = "10s" + ## Maximum duration before timing out write of the response. This should be set to a value + ## large enough that you can send at least 'metric_batch_size' number of messages within the + ## duration. + # write_timeout = "10s" + + ## Maximum allowed http request body size in bytes. + ## 0 means to use the default of 524,288,00 bytes (500 mebibytes) + # max_body_size = "500MB" + + ## Whether to add the pubsub metadata, such as message attributes and subscription as a tag. + # add_meta = false + + ## Optional. Maximum messages to read from PubSub that have not been written + ## to an output. Defaults to 1000. + ## For best throughput set based on the number of metrics within + ## each message and the size of the output's metric_batch_size. + ## + ## For example, if each message contains 10 metrics and the output + ## metric_batch_size is 1000, setting this to 100 will ensure that a + ## full batch is collected and the write is triggered immediately without + ## waiting until the next flush_interval. + # max_undelivered_messages = 1000 + + ## Set one or more allowed client CA certificate file names to + ## enable mutually authenticated TLS connections + # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] + + ## Add service certificate and key + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "influx" +``` + +This plugin assumes you have already created a PUSH subscription for a given +PubSub topic. + +[pubsub]: https://cloud.google.com/pubsub +[input data formats]: /docs/DATA_FORMATS_INPUT.md diff --git a/plugins/inputs/cloud_pubsub_push/pubsub_push.go b/plugins/inputs/cloud_pubsub_push/pubsub_push.go new file mode 100644 index 000000000..8b83a440d --- /dev/null +++ b/plugins/inputs/cloud_pubsub_push/pubsub_push.go @@ -0,0 +1,323 @@ +package cloud_pubsub_push + +import ( + "context" + "crypto/subtle" + "encoding/base64" + "encoding/json" + "io/ioutil" + "log" + "net" + "net/http" + "sync" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + tlsint "github.com/influxdata/telegraf/internal/tls" + "github.com/influxdata/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/parsers" +) + +// defaultMaxBodySize is the default maximum request body size, in bytes. +// if the request body is over this size, we will return an HTTP 413 error. +// 500 MB +const defaultMaxBodySize = 500 * 1024 * 1024 +const defaultMaxUndeliveredMessages = 1000 + +type PubSubPush struct { + ServiceAddress string + Token string + Path string + ReadTimeout internal.Duration + WriteTimeout internal.Duration + MaxBodySize internal.Size + AddMeta bool + + MaxUndeliveredMessages int `toml:"max_undelivered_messages"` + + tlsint.ServerConfig + parsers.Parser + + listener net.Listener + server *http.Server + acc telegraf.TrackingAccumulator + ctx context.Context + cancel context.CancelFunc + wg *sync.WaitGroup + mu *sync.Mutex + + undelivered map[telegraf.TrackingID]chan bool + sem chan struct{} +} + +// Message defines the structure of a Google Pub/Sub message. +type Message struct { + Atts map[string]string `json:"attributes"` + Data string `json:"data"` // Data is base64 encoded data +} + +// Payload is the received Google Pub/Sub data. (https://cloud.google.com/pubsub/docs/push) +type Payload struct { + Msg Message `json:"message"` + Subscription string `json:"subscription"` +} + +const sampleConfig = ` + ## Address and port to host HTTP listener on + service_address = ":8080" + + ## Application secret to verify messages originate from Cloud Pub/Sub + # token = "" + + ## Path to listen to. + # path = "/" + + ## Maximum duration before timing out read of the request + # read_timeout = "10s" + ## Maximum duration before timing out write of the response. This should be set to a value + ## large enough that you can send at least 'metric_batch_size' number of messages within the + ## duration. + # write_timeout = "10s" + + ## Maximum allowed http request body size in bytes. + ## 0 means to use the default of 524,288,00 bytes (500 mebibytes) + # max_body_size = "500MB" + + ## Whether to add the pubsub metadata, such as message attributes and subscription as a tag. + # add_meta = false + + ## Optional. Maximum messages to read from PubSub that have not been written + ## to an output. Defaults to 1000. + ## For best throughput set based on the number of metrics within + ## each message and the size of the output's metric_batch_size. + ## + ## For example, if each message contains 10 metrics and the output + ## metric_batch_size is 1000, setting this to 100 will ensure that a + ## full batch is collected and the write is triggered immediately without + ## waiting until the next flush_interval. + # max_undelivered_messages = 1000 + + ## Set one or more allowed client CA certificate file names to + ## enable mutually authenticated TLS connections + # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] + + ## Add service certificate and key + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "influx" +` + +func (p *PubSubPush) SampleConfig() string { + return sampleConfig +} + +func (p *PubSubPush) Description() string { + return "Google Cloud Pub/Sub Push HTTP listener" +} + +func (p *PubSubPush) Gather(_ telegraf.Accumulator) error { + return nil +} + +func (p *PubSubPush) SetParser(parser parsers.Parser) { + p.Parser = parser +} + +// Start starts the http listener service. +func (p *PubSubPush) Start(acc telegraf.Accumulator) error { + if p.MaxBodySize.Size == 0 { + p.MaxBodySize.Size = defaultMaxBodySize + } + + if p.ReadTimeout.Duration < time.Second { + p.ReadTimeout.Duration = time.Second * 10 + } + if p.WriteTimeout.Duration < time.Second { + p.WriteTimeout.Duration = time.Second * 10 + } + + tlsConf, err := p.ServerConfig.TLSConfig() + if err != nil { + return err + } + + p.server = &http.Server{ + Addr: p.ServiceAddress, + Handler: http.TimeoutHandler(p, p.WriteTimeout.Duration, "timed out processing metric"), + ReadTimeout: p.ReadTimeout.Duration, + TLSConfig: tlsConf, + } + + p.ctx, p.cancel = context.WithCancel(context.Background()) + p.wg = &sync.WaitGroup{} + p.acc = acc.WithTracking(p.MaxUndeliveredMessages) + p.sem = make(chan struct{}, p.MaxUndeliveredMessages) + p.undelivered = make(map[telegraf.TrackingID]chan bool) + p.mu = &sync.Mutex{} + + p.wg.Add(1) + go func() { + defer p.wg.Done() + p.receiveDelivered() + }() + + p.wg.Add(1) + go func() { + defer p.wg.Done() + if tlsConf != nil { + p.server.ListenAndServeTLS("", "") + } else { + p.server.ListenAndServe() + } + }() + + return nil +} + +// Stop cleans up all resources +func (p *PubSubPush) Stop() { + p.cancel() + p.server.Shutdown(p.ctx) + p.wg.Wait() +} + +func (p *PubSubPush) ServeHTTP(res http.ResponseWriter, req *http.Request) { + if req.URL.Path == p.Path { + p.AuthenticateIfSet(p.serveWrite, res, req) + } else { + p.AuthenticateIfSet(http.NotFound, res, req) + } +} + +func (p *PubSubPush) serveWrite(res http.ResponseWriter, req *http.Request) { + select { + case <-req.Context().Done(): + res.WriteHeader(http.StatusServiceUnavailable) + return + case <-p.ctx.Done(): + res.WriteHeader(http.StatusServiceUnavailable) + return + case p.sem <- struct{}{}: + break + } + + // Check that the content length is not too large for us to handle. + if req.ContentLength > p.MaxBodySize.Size { + res.WriteHeader(http.StatusRequestEntityTooLarge) + return + } + + if req.Method != http.MethodPost { + res.WriteHeader(http.StatusMethodNotAllowed) + return + } + + body := http.MaxBytesReader(res, req.Body, p.MaxBodySize.Size) + bytes, err := ioutil.ReadAll(body) + if err != nil { + res.WriteHeader(http.StatusRequestEntityTooLarge) + return + } + + var payload Payload + if err = json.Unmarshal(bytes, &payload); err != nil { + log.Printf("E! [inputs.cloud_pubsub_push] Error decoding payload %s", err.Error()) + res.WriteHeader(http.StatusBadRequest) + return + } + + sDec, err := base64.StdEncoding.DecodeString(payload.Msg.Data) + if err != nil { + log.Printf("E! [inputs.cloud_pubsub_push] Base64-Decode Failed %s", err.Error()) + res.WriteHeader(http.StatusBadRequest) + return + } + + metrics, err := p.Parse(sDec) + if err != nil { + log.Println("D! [inputs.cloud_pubsub_push] " + err.Error()) + res.WriteHeader(http.StatusBadRequest) + return + } + + if p.AddMeta { + for i := range metrics { + for k, v := range payload.Msg.Atts { + metrics[i].AddTag(k, v) + } + metrics[i].AddTag("subscription", payload.Subscription) + } + } + + ch := make(chan bool, 1) + p.mu.Lock() + p.undelivered[p.acc.AddTrackingMetricGroup(metrics)] = ch + p.mu.Unlock() + + select { + case <-req.Context().Done(): + res.WriteHeader(http.StatusServiceUnavailable) + return + case success := <-ch: + if success { + res.WriteHeader(http.StatusNoContent) + } else { + res.WriteHeader(http.StatusInternalServerError) + } + } +} + +func (p *PubSubPush) receiveDelivered() { + for { + select { + case <-p.ctx.Done(): + return + case info := <-p.acc.Delivered(): + <-p.sem + + p.mu.Lock() + ch, ok := p.undelivered[info.ID()] + if !ok { + p.mu.Unlock() + continue + } + + delete(p.undelivered, info.ID()) + p.mu.Unlock() + + if info.Delivered() { + ch <- true + } else { + ch <- false + log.Println("D! [inputs.cloud_pubsub_push] Metric group failed to process") + } + } + } +} + +func (p *PubSubPush) AuthenticateIfSet(handler http.HandlerFunc, res http.ResponseWriter, req *http.Request) { + if p.Token != "" { + if subtle.ConstantTimeCompare([]byte(req.FormValue("token")), []byte(p.Token)) != 1 { + http.Error(res, "Unauthorized.", http.StatusUnauthorized) + return + } + } + + handler(res, req) +} + +func init() { + inputs.Add("cloud_pubsub_push", func() telegraf.Input { + return &PubSubPush{ + ServiceAddress: ":8080", + Path: "/", + MaxUndeliveredMessages: defaultMaxUndeliveredMessages, + } + }) +} diff --git a/plugins/inputs/cloud_pubsub_push/pubsub_push_test.go b/plugins/inputs/cloud_pubsub_push/pubsub_push_test.go new file mode 100644 index 000000000..57734c705 --- /dev/null +++ b/plugins/inputs/cloud_pubsub_push/pubsub_push_test.go @@ -0,0 +1,216 @@ +package cloud_pubsub_push + +import ( + "context" + "fmt" + "io" + "net/http" + "net/http/httptest" + "strings" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/agent" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/internal/models" + "github.com/influxdata/telegraf/plugins/parsers" +) + +func TestServeHTTP(t *testing.T) { + tests := []struct { + name string + method string + path string + body io.Reader + status int + maxsize int64 + expected string + fail bool + full bool + }{ + { + name: "bad method get", + method: "GET", + path: "/", + status: http.StatusMethodNotAllowed, + }, + { + name: "post not found", + method: "POST", + path: "/allthings", + status: http.StatusNotFound, + }, + { + name: "post large date", + method: "POST", + path: "/", + status: http.StatusRequestEntityTooLarge, + body: strings.NewReader(`{"message":{"attributes":{"deviceId":"myPi","deviceNumId":"2808946627307959","deviceRegistryId":"my-registry","deviceRegistryLocation":"us-central1","projectId":"conference-demos","subFolder":""},"data":"dGVzdGluZ0dvb2dsZSxzZW5zb3I9Ym1lXzI4MCB0ZW1wX2M9MjMuOTUsaHVtaWRpdHk9NjIuODMgMTUzNjk1Mjk3NDU1MzUxMDIzMQ==","messageId":"204004313210337","message_id":"204004313210337","publishTime":"2018-09-14T19:22:54.587Z","publish_time":"2018-09-14T19:22:54.587Z"},"subscription":"projects/conference-demos/subscriptions/my-subscription"}`), + }, + { + name: "post valid data", + method: "POST", + path: "/", + maxsize: 500 * 1024 * 1024, + status: http.StatusNoContent, + body: strings.NewReader(`{"message":{"attributes":{"deviceId":"myPi","deviceNumId":"2808946627307959","deviceRegistryId":"my-registry","deviceRegistryLocation":"us-central1","projectId":"conference-demos","subFolder":""},"data":"dGVzdGluZ0dvb2dsZSxzZW5zb3I9Ym1lXzI4MCB0ZW1wX2M9MjMuOTUsaHVtaWRpdHk9NjIuODMgMTUzNjk1Mjk3NDU1MzUxMDIzMQ==","messageId":"204004313210337","message_id":"204004313210337","publishTime":"2018-09-14T19:22:54.587Z","publish_time":"2018-09-14T19:22:54.587Z"},"subscription":"projects/conference-demos/subscriptions/my-subscription"}`), + }, + { + name: "fail write", + method: "POST", + path: "/", + maxsize: 500 * 1024 * 1024, + status: http.StatusServiceUnavailable, + body: strings.NewReader(`{"message":{"attributes":{"deviceId":"myPi","deviceNumId":"2808946627307959","deviceRegistryId":"my-registry","deviceRegistryLocation":"us-central1","projectId":"conference-demos","subFolder":""},"data":"dGVzdGluZ0dvb2dsZSxzZW5zb3I9Ym1lXzI4MCB0ZW1wX2M9MjMuOTUsaHVtaWRpdHk9NjIuODMgMTUzNjk1Mjk3NDU1MzUxMDIzMQ==","messageId":"204004313210337","message_id":"204004313210337","publishTime":"2018-09-14T19:22:54.587Z","publish_time":"2018-09-14T19:22:54.587Z"},"subscription":"projects/conference-demos/subscriptions/my-subscription"}`), + fail: true, + }, + { + name: "full buffer", + method: "POST", + path: "/", + maxsize: 500 * 1024 * 1024, + status: http.StatusServiceUnavailable, + body: strings.NewReader(`{"message":{"attributes":{"deviceId":"myPi","deviceNumId":"2808946627307959","deviceRegistryId":"my-registry","deviceRegistryLocation":"us-central1","projectId":"conference-demos","subFolder":""},"data":"dGVzdGluZ0dvb2dsZSxzZW5zb3I9Ym1lXzI4MCB0ZW1wX2M9MjMuOTUsaHVtaWRpdHk9NjIuODMgMTUzNjk1Mjk3NDU1MzUxMDIzMQ==","messageId":"204004313210337","message_id":"204004313210337","publishTime":"2018-09-14T19:22:54.587Z","publish_time":"2018-09-14T19:22:54.587Z"},"subscription":"projects/conference-demos/subscriptions/my-subscription"}`), + full: true, + }, + { + name: "post invalid body", + method: "POST", + path: "/", + maxsize: 500 * 1024 * 1024, + status: http.StatusBadRequest, + body: strings.NewReader(`invalid body`), + }, + { + name: "post invalid data", + method: "POST", + path: "/", + maxsize: 500 * 1024 * 1024, + status: http.StatusBadRequest, + body: strings.NewReader(`{"message":{"attributes":{"deviceId":"myPi","deviceNumId":"2808946627307959","deviceRegistryId":"my-registry","deviceRegistryLocation":"us-central1","projectId":"conference-demos","subFolder":""},"data":"not base 64 encoded data","messageId":"204004313210337","message_id":"204004313210337","publishTime":"2018-09-14T19:22:54.587Z","publish_time":"2018-09-14T19:22:54.587Z"},"subscription":"projects/conference-demos/subscriptions/my-subscription"}`), + }, + { + name: "post invalid data format", + method: "POST", + path: "/", + maxsize: 500 * 1024 * 1024, + status: http.StatusBadRequest, + body: strings.NewReader(`{"message":{"attributes":{"deviceId":"myPi","deviceNumId":"2808946627307959","deviceRegistryId":"my-registry","deviceRegistryLocation":"us-central1","projectId":"conference-demos","subFolder":""},"data":"bm90IHZhbGlkIGZvcm1hdHRlZCBkYXRh","messageId":"204004313210337","message_id":"204004313210337","publishTime":"2018-09-14T19:22:54.587Z","publish_time":"2018-09-14T19:22:54.587Z"},"subscription":"projects/conference-demos/subscriptions/my-subscription"}`), + }, + { + name: "post invalid structured body", + method: "POST", + path: "/", + maxsize: 500 * 1024 * 1024, + status: http.StatusBadRequest, + body: strings.NewReader(`{"message":{"attributes":{"thing":1},"data":"bm90IHZhbGlkIGZvcm1hdHRlZCBkYXRh"},"subscription":"projects/conference-demos/subscriptions/my-subscription"}`), + }, + } + + for _, test := range tests { + wg := &sync.WaitGroup{} + req, err := http.NewRequest(test.method, test.path, test.body) + require.NoError(t, err) + + rr := httptest.NewRecorder() + pubPush := &PubSubPush{ + Path: "/", + MaxBodySize: internal.Size{ + Size: test.maxsize, + }, + sem: make(chan struct{}, 1), + undelivered: make(map[telegraf.TrackingID]chan bool), + mu: &sync.Mutex{}, + WriteTimeout: internal.Duration{Duration: time.Second * 1}, + } + + pubPush.ctx, pubPush.cancel = context.WithCancel(context.Background()) + + if test.full { + // fill buffer with fake message + pubPush.sem <- struct{}{} + } + + p, _ := parsers.NewParser(&parsers.Config{ + MetricName: "cloud_pubsub_push", + DataFormat: "influx", + }) + pubPush.SetParser(p) + + dst := make(chan telegraf.Metric, 1) + ro := models.NewRunningOutput("test", &testOutput{failWrite: test.fail}, &models.OutputConfig{}, 1, 1) + pubPush.acc = agent.NewAccumulator(&testMetricMaker{}, dst).WithTracking(1) + + wg.Add(1) + go func() { + defer wg.Done() + pubPush.receiveDelivered() + }() + + wg.Add(1) + go func(status int, d chan telegraf.Metric) { + defer wg.Done() + for m := range d { + ro.AddMetric(m) + ro.Write() + } + }(test.status, dst) + + ctx, cancel := context.WithTimeout(req.Context(), pubPush.WriteTimeout.Duration) + req = req.WithContext(ctx) + + pubPush.ServeHTTP(rr, req) + require.Equal(t, test.status, rr.Code, test.name) + + if test.expected != "" { + require.Equal(t, test.expected, rr.Body.String(), test.name) + } + + pubPush.cancel() + cancel() + close(dst) + wg.Wait() + } +} + +type testMetricMaker struct{} + +func (tm *testMetricMaker) Name() string { + return "TestPlugin" +} + +func (tm *testMetricMaker) MakeMetric(metric telegraf.Metric) telegraf.Metric { + return metric +} + +type testOutput struct { + // if true, mock a write failure + failWrite bool +} + +func (*testOutput) Connect() error { + return nil +} + +func (*testOutput) Close() error { + return nil +} + +func (*testOutput) Description() string { + return "" +} + +func (*testOutput) SampleConfig() string { + return "" +} + +func (t *testOutput) Write(metrics []telegraf.Metric) error { + if t.failWrite { + return fmt.Errorf("failed write") + } + return nil +} From 33773890178392472f1bd21387bff9d7db49dada Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 26 Feb 2019 18:30:54 -0800 Subject: [PATCH 0649/1815] Add cloud_pubsub_push to readme/changelog --- CHANGELOG.md | 1 + README.md | 1 + 2 files changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index b113cae90..4e9811c8f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,7 @@ #### New Inputs - [cloud_pubsub](/plugins/inputs/cloud_pubsub/README.md) - Contributed by @emilymye +- [cloud_pubsub_push](/plugins/inputs/cloud_pubsub_push/README.md) - Contributed by @influxdata - [kinesis_consumer](/plugins/inputs/kinesis_consumer/README.md) - Contributed by @influxdata - [kube_inventory](/plugins/inputs/kube_inventory/README.md) - Contributed by @influxdata - [neptune_apex](/plugins/inputs/neptune_apex/README.md) - Contributed by @MaxRenaud diff --git a/README.md b/README.md index 62fe04afd..e8f3d613e 100644 --- a/README.md +++ b/README.md @@ -146,6 +146,7 @@ For documentation on the latest development code see the [documentation index][d * [cgroup](./plugins/inputs/cgroup) * [chrony](./plugins/inputs/chrony) * [cloud_pubsub](./plugins/inputs/cloud_pubsub) Google Cloud Pub/Sub +* [cloud_pubsub_push](./plugins/inputs/cloud_pubsub_push) Google Cloud Pub/Sub push endpoint * [conntrack](./plugins/inputs/conntrack) * [consul](./plugins/inputs/consul) * [couchbase](./plugins/inputs/couchbase) From d84e501ab6bbbeaabfeec94e1695d02cf4894f54 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 26 Feb 2019 18:38:24 -0800 Subject: [PATCH 0650/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4e9811c8f..84a4c05af 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -52,6 +52,7 @@ - [#5472](https://github.com/influxdata/telegraf/pull/5472): Support configuring a default timezone in JSON parser. - [#5482](https://github.com/influxdata/telegraf/pull/5482): Add ceph_health metrics to ceph input. - [#5488](https://github.com/influxdata/telegraf/pull/5488): Add option to disable unique timestamp adjustment in grok parser. +- [#5473](https://github.com/influxdata/telegraf/pull/5473): Add mutual TLS support to prometheus_client output. #### Bugfixes From 50c1103657a0113a51a95e9f2c0f1e07787d32a8 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 26 Feb 2019 18:42:18 -0800 Subject: [PATCH 0651/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 84a4c05af..1b74a3fbb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -53,6 +53,7 @@ - [#5482](https://github.com/influxdata/telegraf/pull/5482): Add ceph_health metrics to ceph input. - [#5488](https://github.com/influxdata/telegraf/pull/5488): Add option to disable unique timestamp adjustment in grok parser. - [#5473](https://github.com/influxdata/telegraf/pull/5473): Add mutual TLS support to prometheus_client output. +- [#4308](https://github.com/influxdata/telegraf/pull/4308): Add additional metrics to rabbitmq input. #### Bugfixes From 1872356103a6022e2c206b6af14892d693774b1f Mon Sep 17 00:00:00 2001 From: Matthew Crenshaw <3420325+sgtsquiggs@users.noreply.github.com> Date: Wed, 27 Feb 2019 13:43:39 -0500 Subject: [PATCH 0652/1815] Add multicast support to socket_listener input --- .../inputs/socket_listener/socket_listener.go | 31 +++++++++++++++++-- 1 file changed, 28 insertions(+), 3 deletions(-) diff --git a/plugins/inputs/socket_listener/socket_listener.go b/plugins/inputs/socket_listener/socket_listener.go index d81c45994..391427da9 100644 --- a/plugins/inputs/socket_listener/socket_listener.go +++ b/plugins/inputs/socket_listener/socket_listener.go @@ -114,7 +114,7 @@ func (ssl *streamSocketListener) read(c net.Conn) { metrics, err := ssl.Parse(scnr.Bytes()) if err != nil { ssl.AddError(fmt.Errorf("unable to parse incoming line: %s", err)) - //TODO rate limit + // TODO rate limit continue } for _, m := range metrics { @@ -150,7 +150,7 @@ func (psl *packetSocketListener) listen() { metrics, err := psl.Parse(buf[:n]) if err != nil { psl.AddError(fmt.Errorf("unable to parse incoming packet: %s", err)) - //TODO rate limit + // TODO rate limit continue } for _, m := range metrics { @@ -284,7 +284,7 @@ func (sl *SocketListener) Start(acc telegraf.Accumulator) error { sl.Closer = ssl go ssl.listen() case "udp", "udp4", "udp6", "ip", "ip4", "ip6", "unixgram": - pc, err := net.ListenPacket(protocol, addr) + pc, err := udpListen(protocol, addr) if err != nil { return err } @@ -317,6 +317,31 @@ func (sl *SocketListener) Start(acc telegraf.Accumulator) error { return nil } +func udpListen(network string, address string) (net.PacketConn, error) { + switch network { + case "udp", "udp4", "udp6": + var addr *net.UDPAddr + var err error + var ifi *net.Interface + if spl := strings.SplitN(address, "%", 2); len(spl) == 2 { + address = spl[0] + ifi, err = net.InterfaceByName(spl[1]) + if err != nil { + return nil, err + } + } + addr, err = net.ResolveUDPAddr(network, address) + if err != nil { + return nil, err + } + if addr.IP.IsMulticast() { + return net.ListenMulticastUDP(network, ifi, addr) + } + return net.ListenUDP(network, addr) + } + return net.ListenPacket(network, address) +} + func (sl *SocketListener) Stop() { if sl.Closer != nil { sl.Close() From 65b76dc74604d48236d5b4c2bb69719b419254cd Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 27 Feb 2019 10:54:02 -0800 Subject: [PATCH 0653/1815] Add tag based routing in influxdb/influxdb_v2 outputs (#5490) --- plugins/outputs/influxdb/README.md | 4 + plugins/outputs/influxdb/http.go | 202 ++++++++++-------- plugins/outputs/influxdb/http_test.go | 68 +++--- plugins/outputs/influxdb/influxdb.go | 58 ++--- plugins/outputs/influxdb/influxdb_test.go | 42 ++-- plugins/outputs/influxdb/udp.go | 4 +- plugins/outputs/influxdb/udp_test.go | 18 +- plugins/outputs/influxdb_v2/README.md | 4 + plugins/outputs/influxdb_v2/http.go | 63 ++++-- .../outputs/influxdb_v2/http_internal_test.go | 12 -- plugins/outputs/influxdb_v2/influxdb.go | 6 + 11 files changed, 273 insertions(+), 208 deletions(-) diff --git a/plugins/outputs/influxdb/README.md b/plugins/outputs/influxdb/README.md index 8a9f1a5b8..48ab3d51b 100644 --- a/plugins/outputs/influxdb/README.md +++ b/plugins/outputs/influxdb/README.md @@ -19,6 +19,10 @@ The InfluxDB output plugin writes metrics to the [InfluxDB v1.x] HTTP or UDP ser ## For UDP url endpoint database needs to be configured on server side. # database = "telegraf" + ## The value of this tag will be used to determine the database. If this + ## tag is not set the 'database' option is used as the default. + # database_tag = "" + ## If true, no CREATE DATABASE queries will be sent. Set to true when using ## Telegraf with a user without permissions to create databases or when the ## database already exists. diff --git a/plugins/outputs/influxdb/http.go b/plugins/outputs/influxdb/http.go index 5a589dc0e..43aa55ea8 100644 --- a/plugins/outputs/influxdb/http.go +++ b/plugins/outputs/influxdb/http.go @@ -19,13 +19,6 @@ import ( "github.com/influxdata/telegraf/plugins/serializers/influx" ) -type APIErrorType int - -const ( - _ APIErrorType = iota - DatabaseNotFound -) - const ( defaultRequestTimeout = time.Second * 5 defaultDatabase = "telegraf" @@ -37,7 +30,6 @@ const ( ) var ( - // Escape an identifier in InfluxQL. escapeIdentifier = strings.NewReplacer( "\n", `\n`, @@ -46,12 +38,11 @@ var ( ) ) -// APIError is an error reported by the InfluxDB server +// APIError is a general error reported by the InfluxDB server type APIError struct { StatusCode int Title string Description string - Type APIErrorType } func (e APIError) Error() string { @@ -61,6 +52,11 @@ func (e APIError) Error() string { return e.Title } +type DatabaseNotFoundError struct { + APIError + Database string +} + // QueryResponse is the response body from the /query endpoint type QueryResponse struct { Results []QueryResult `json:"results"` @@ -87,51 +83,42 @@ func (r WriteResponse) Error() string { } type HTTPConfig struct { - URL *url.URL - UserAgent string - Timeout time.Duration - Username string - Password string - TLSConfig *tls.Config - Proxy *url.URL - Headers map[string]string - ContentEncoding string - Database string - RetentionPolicy string - Consistency string + URL *url.URL + UserAgent string + Timeout time.Duration + Username string + Password string + TLSConfig *tls.Config + Proxy *url.URL + Headers map[string]string + ContentEncoding string + Database string + DatabaseTag string + RetentionPolicy string + Consistency string + SkipDatabaseCreation bool InfluxUintSupport bool `toml:"influx_uint_support"` Serializer *influx.Serializer } type httpClient struct { - WriteURL string - QueryURL string - ContentEncoding string - Timeout time.Duration - Username string - Password string - Headers map[string]string - - client *http.Client - serializer *influx.Serializer - url *url.URL - database string + client *http.Client + config HTTPConfig + createdDatabases map[string]bool } -func NewHTTPClient(config *HTTPConfig) (*httpClient, error) { +func NewHTTPClient(config HTTPConfig) (*httpClient, error) { if config.URL == nil { return nil, ErrMissingURL } - database := config.Database - if database == "" { - database = defaultDatabase + if config.Database == "" { + config.Database = defaultDatabase } - timeout := config.Timeout - if timeout == 0 { - timeout = defaultRequestTimeout + if config.Timeout == 0 { + config.Timeout = defaultRequestTimeout } userAgent := config.UserAgent @@ -139,10 +126,12 @@ func NewHTTPClient(config *HTTPConfig) (*httpClient, error) { userAgent = "Telegraf/" + internal.Version() } - var headers = make(map[string]string, len(config.Headers)+1) - headers["User-Agent"] = userAgent + if config.Headers == nil { + config.Headers = make(map[string]string) + } + config.Headers["User-Agent"] = userAgent for k, v := range config.Headers { - headers[k] = v + config.Headers[k] = v } var proxy func(*http.Request) (*url.URL, error) @@ -152,22 +141,8 @@ func NewHTTPClient(config *HTTPConfig) (*httpClient, error) { proxy = http.ProxyFromEnvironment } - serializer := config.Serializer - if serializer == nil { - serializer = influx.NewSerializer() - } - - writeURL, err := makeWriteURL( - config.URL, - database, - config.RetentionPolicy, - config.Consistency) - if err != nil { - return nil, err - } - queryURL, err := makeQueryURL(config.URL) - if err != nil { - return nil, err + if config.Serializer == nil { + config.Serializer = influx.NewSerializer() } var transport *http.Transport @@ -192,40 +167,32 @@ func NewHTTPClient(config *HTTPConfig) (*httpClient, error) { } client := &httpClient{ - serializer: serializer, client: &http.Client{ - Timeout: timeout, + Timeout: config.Timeout, Transport: transport, }, - database: database, - url: config.URL, - WriteURL: writeURL, - QueryURL: queryURL, - ContentEncoding: config.ContentEncoding, - Timeout: timeout, - Username: config.Username, - Password: config.Password, - Headers: headers, + createdDatabases: make(map[string]bool), + config: config, } return client, nil } // URL returns the origin URL that this client connects too. func (c *httpClient) URL() string { - return c.url.String() + return c.config.URL.String() } -// URL returns the database that this client connects too. +// Database returns the default database that this client connects too. func (c *httpClient) Database() string { - return c.database + return c.config.Database } // CreateDatabase attempts to create a new database in the InfluxDB server. // Note that some names are not allowed by the server, notably those with // non-printable characters or slashes. -func (c *httpClient) CreateDatabase(ctx context.Context) error { +func (c *httpClient) CreateDatabase(ctx context.Context, database string) error { query := fmt.Sprintf(`CREATE DATABASE "%s"`, - escapeIdentifier.Replace(c.database)) + escapeIdentifier.Replace(database)) req, err := c.makeQueryRequest(query) @@ -241,6 +208,7 @@ func (c *httpClient) CreateDatabase(ctx context.Context) error { if err != nil { if resp.StatusCode == 200 { + c.createdDatabases[database] = true return nil } @@ -252,6 +220,7 @@ func (c *httpClient) CreateDatabase(ctx context.Context) error { // Even with a 200 response there can be an error if resp.StatusCode == http.StatusOK && queryResp.Error() == "" { + c.createdDatabases[database] = true return nil } @@ -264,10 +233,52 @@ func (c *httpClient) CreateDatabase(ctx context.Context) error { // Write sends the metrics to InfluxDB func (c *httpClient) Write(ctx context.Context, metrics []telegraf.Metric) error { - var err error + batches := make(map[string][]telegraf.Metric) + if c.config.DatabaseTag == "" { + err := c.writeBatch(ctx, c.config.Database, metrics) + if err != nil { + return err + } + } else { + for _, metric := range metrics { + db, ok := metric.GetTag(c.config.DatabaseTag) + if !ok { + db = c.config.Database + } - reader := influx.NewReader(metrics, c.serializer) - req, err := c.makeWriteRequest(reader) + if _, ok := batches[db]; !ok { + batches[db] = make([]telegraf.Metric, 0) + } + + batches[db] = append(batches[db], metric) + } + + for db, batch := range batches { + if !c.config.SkipDatabaseCreation && !c.createdDatabases[db] { + err := c.CreateDatabase(ctx, db) + if err != nil { + log.Printf("W! [outputs.influxdb] when writing to [%s]: database %q creation failed: %v", + c.config.URL, db, err) + } + } + + err := c.writeBatch(ctx, db, batch) + if err != nil { + return err + } + } + } + return nil +} + +func (c *httpClient) writeBatch(ctx context.Context, db string, metrics []telegraf.Metric) error { + url, err := makeWriteURL(c.config.URL, db, c.config.RetentionPolicy, c.config.Consistency) + if err != nil { + return err + } + + reader := influx.NewReader(metrics, c.config.Serializer) + req, err := c.makeWriteRequest(url, reader) if err != nil { return err } @@ -292,11 +303,13 @@ func (c *httpClient) Write(ctx context.Context, metrics []telegraf.Metric) error } if strings.Contains(desc, errStringDatabaseNotFound) { - return &APIError{ - StatusCode: resp.StatusCode, - Title: resp.Status, - Description: desc, - Type: DatabaseNotFound, + return &DatabaseNotFoundError{ + APIError: APIError{ + StatusCode: resp.StatusCode, + Title: resp.Status, + Description: desc, + }, + Database: db, } } @@ -340,11 +353,16 @@ func (c *httpClient) Write(ctx context.Context, metrics []telegraf.Metric) error } func (c *httpClient) makeQueryRequest(query string) (*http.Request, error) { + queryURL, err := makeQueryURL(c.config.URL) + if err != nil { + return nil, err + } + params := url.Values{} params.Set("q", query) form := strings.NewReader(params.Encode()) - req, err := http.NewRequest("POST", c.QueryURL, form) + req, err := http.NewRequest("POST", queryURL, form) if err != nil { return nil, err } @@ -355,16 +373,16 @@ func (c *httpClient) makeQueryRequest(query string) (*http.Request, error) { return req, nil } -func (c *httpClient) makeWriteRequest(body io.Reader) (*http.Request, error) { +func (c *httpClient) makeWriteRequest(url string, body io.Reader) (*http.Request, error) { var err error - if c.ContentEncoding == "gzip" { + if c.config.ContentEncoding == "gzip" { body, err = internal.CompressWithGzip(body) if err != nil { return nil, err } } - req, err := http.NewRequest("POST", c.WriteURL, body) + req, err := http.NewRequest("POST", url, body) if err != nil { return nil, err } @@ -372,7 +390,7 @@ func (c *httpClient) makeWriteRequest(body io.Reader) (*http.Request, error) { req.Header.Set("Content-Type", "text/plain; charset=utf-8") c.addHeaders(req) - if c.ContentEncoding == "gzip" { + if c.config.ContentEncoding == "gzip" { req.Header.Set("Content-Encoding", "gzip") } @@ -380,11 +398,11 @@ func (c *httpClient) makeWriteRequest(body io.Reader) (*http.Request, error) { } func (c *httpClient) addHeaders(req *http.Request) { - if c.Username != "" || c.Password != "" { - req.SetBasicAuth(c.Username, c.Password) + if c.config.Username != "" || c.config.Password != "" { + req.SetBasicAuth(c.config.Username, c.config.Password) } - for header, value := range c.Headers { + for header, value := range c.config.Headers { req.Header.Set(header, value) } } diff --git a/plugins/outputs/influxdb/http_test.go b/plugins/outputs/influxdb/http_test.go index fa648f0f8..2b6b45eef 100644 --- a/plugins/outputs/influxdb/http_test.go +++ b/plugins/outputs/influxdb/http_test.go @@ -33,14 +33,14 @@ func getHTTPURL() *url.URL { } func TestHTTP_EmptyConfig(t *testing.T) { - config := &influxdb.HTTPConfig{} + config := influxdb.HTTPConfig{} _, err := influxdb.NewHTTPClient(config) require.Error(t, err) require.Contains(t, err.Error(), influxdb.ErrMissingURL.Error()) } func TestHTTP_MinimalConfig(t *testing.T) { - config := &influxdb.HTTPConfig{ + config := influxdb.HTTPConfig{ URL: getHTTPURL(), } _, err := influxdb.NewHTTPClient(config) @@ -48,7 +48,7 @@ func TestHTTP_MinimalConfig(t *testing.T) { } func TestHTTP_UnsupportedScheme(t *testing.T) { - config := &influxdb.HTTPConfig{ + config := influxdb.HTTPConfig{ URL: &url.URL{ Scheme: "foo", Host: "localhost", @@ -69,14 +69,14 @@ func TestHTTP_CreateDatabase(t *testing.T) { tests := []struct { name string - config *influxdb.HTTPConfig + config influxdb.HTTPConfig database string queryHandlerFunc func(t *testing.T, w http.ResponseWriter, r *http.Request) errFunc func(t *testing.T, err error) }{ { name: "success", - config: &influxdb.HTTPConfig{ + config: influxdb.HTTPConfig{ URL: u, Database: "xyzzy", }, @@ -88,7 +88,7 @@ func TestHTTP_CreateDatabase(t *testing.T) { }, { name: "send basic auth", - config: &influxdb.HTTPConfig{ + config: influxdb.HTTPConfig{ URL: u, Username: "guy", Password: "smiley", @@ -106,7 +106,7 @@ func TestHTTP_CreateDatabase(t *testing.T) { }, { name: "send user agent", - config: &influxdb.HTTPConfig{ + config: influxdb.HTTPConfig{ URL: u, Headers: map[string]string{ "A": "B", @@ -124,7 +124,7 @@ func TestHTTP_CreateDatabase(t *testing.T) { }, { name: "send headers", - config: &influxdb.HTTPConfig{ + config: influxdb.HTTPConfig{ URL: u, Headers: map[string]string{ "A": "B", @@ -141,7 +141,7 @@ func TestHTTP_CreateDatabase(t *testing.T) { }, { name: "database default", - config: &influxdb.HTTPConfig{ + config: influxdb.HTTPConfig{ URL: u, }, queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { @@ -152,7 +152,7 @@ func TestHTTP_CreateDatabase(t *testing.T) { }, { name: "database name is escaped", - config: &influxdb.HTTPConfig{ + config: influxdb.HTTPConfig{ URL: u, Database: `a " b`, }, @@ -164,7 +164,7 @@ func TestHTTP_CreateDatabase(t *testing.T) { }, { name: "invalid database name creates api error", - config: &influxdb.HTTPConfig{ + config: influxdb.HTTPConfig{ URL: u, Database: `a \\ b`, }, @@ -185,7 +185,7 @@ func TestHTTP_CreateDatabase(t *testing.T) { }, { name: "error with no response body", - config: &influxdb.HTTPConfig{ + config: influxdb.HTTPConfig{ URL: u, Database: "telegraf", }, @@ -203,7 +203,7 @@ func TestHTTP_CreateDatabase(t *testing.T) { }, { name: "ok with no response body", - config: &influxdb.HTTPConfig{ + config: influxdb.HTTPConfig{ URL: u, Database: "telegraf", }, @@ -230,7 +230,7 @@ func TestHTTP_CreateDatabase(t *testing.T) { client, err := influxdb.NewHTTPClient(tt.config) require.NoError(t, err) - err = client.CreateDatabase(ctx) + err = client.CreateDatabase(ctx, client.Database()) if tt.errFunc != nil { tt.errFunc(t, err) } else { @@ -251,14 +251,14 @@ func TestHTTP_Write(t *testing.T) { tests := []struct { name string - config *influxdb.HTTPConfig + config influxdb.HTTPConfig queryHandlerFunc func(t *testing.T, w http.ResponseWriter, r *http.Request) errFunc func(t *testing.T, err error) logFunc func(t *testing.T, str string) }{ { name: "success", - config: &influxdb.HTTPConfig{ + config: influxdb.HTTPConfig{ URL: u, Database: "telegraf", }, @@ -272,7 +272,7 @@ func TestHTTP_Write(t *testing.T) { }, { name: "send basic auth", - config: &influxdb.HTTPConfig{ + config: influxdb.HTTPConfig{ URL: u, Database: "telegraf", Username: "guy", @@ -288,7 +288,7 @@ func TestHTTP_Write(t *testing.T) { }, { name: "send user agent", - config: &influxdb.HTTPConfig{ + config: influxdb.HTTPConfig{ URL: u, Database: "telegraf", UserAgent: "telegraf", @@ -300,7 +300,7 @@ func TestHTTP_Write(t *testing.T) { }, { name: "default user agent", - config: &influxdb.HTTPConfig{ + config: influxdb.HTTPConfig{ URL: u, Database: "telegraf", }, @@ -311,7 +311,7 @@ func TestHTTP_Write(t *testing.T) { }, { name: "default database", - config: &influxdb.HTTPConfig{ + config: influxdb.HTTPConfig{ URL: u, }, queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { @@ -321,7 +321,7 @@ func TestHTTP_Write(t *testing.T) { }, { name: "send headers", - config: &influxdb.HTTPConfig{ + config: influxdb.HTTPConfig{ URL: u, Headers: map[string]string{ "A": "B", @@ -336,7 +336,7 @@ func TestHTTP_Write(t *testing.T) { }, { name: "send retention policy", - config: &influxdb.HTTPConfig{ + config: influxdb.HTTPConfig{ URL: u, Database: "telegraf", RetentionPolicy: "foo", @@ -348,7 +348,7 @@ func TestHTTP_Write(t *testing.T) { }, { name: "send consistency", - config: &influxdb.HTTPConfig{ + config: influxdb.HTTPConfig{ URL: u, Database: "telegraf", Consistency: "all", @@ -360,7 +360,7 @@ func TestHTTP_Write(t *testing.T) { }, { name: "hinted handoff not empty no log no error", - config: &influxdb.HTTPConfig{ + config: influxdb.HTTPConfig{ URL: u, Database: "telegraf", }, @@ -374,7 +374,7 @@ func TestHTTP_Write(t *testing.T) { }, { name: "partial write errors are logged no error", - config: &influxdb.HTTPConfig{ + config: influxdb.HTTPConfig{ URL: u, Database: "telegraf", }, @@ -388,7 +388,7 @@ func TestHTTP_Write(t *testing.T) { }, { name: "parse errors are logged no error", - config: &influxdb.HTTPConfig{ + config: influxdb.HTTPConfig{ URL: u, Database: "telegraf", }, @@ -402,7 +402,7 @@ func TestHTTP_Write(t *testing.T) { }, { name: "http error", - config: &influxdb.HTTPConfig{ + config: influxdb.HTTPConfig{ URL: u, Database: "telegraf", }, @@ -419,7 +419,7 @@ func TestHTTP_Write(t *testing.T) { }, { name: "http error with desc", - config: &influxdb.HTTPConfig{ + config: influxdb.HTTPConfig{ URL: u, Database: "telegraf", }, @@ -520,14 +520,14 @@ func TestHTTP_WritePathPrefix(t *testing.T) { require.NoError(t, err) metrics := []telegraf.Metric{m} - config := &influxdb.HTTPConfig{ + config := influxdb.HTTPConfig{ URL: u, Database: "telegraf", } client, err := influxdb.NewHTTPClient(config) require.NoError(t, err) - err = client.CreateDatabase(ctx) + err = client.CreateDatabase(ctx, config.Database) require.NoError(t, err) err = client.Write(ctx, metrics) require.NoError(t, err) @@ -573,7 +573,7 @@ func TestHTTP_WriteContentEncodingGzip(t *testing.T) { require.NoError(t, err) metrics := []telegraf.Metric{m} - config := &influxdb.HTTPConfig{ + config := influxdb.HTTPConfig{ URL: u, Database: "telegraf", ContentEncoding: "gzip", @@ -605,7 +605,7 @@ func TestHTTP_UnixSocket(t *testing.T) { tests := []struct { name string - config *influxdb.HTTPConfig + config influxdb.HTTPConfig database string queryHandlerFunc func(t *testing.T, w http.ResponseWriter, r *http.Request) writeHandlerFunc func(t *testing.T, w http.ResponseWriter, r *http.Request) @@ -613,7 +613,7 @@ func TestHTTP_UnixSocket(t *testing.T) { }{ { name: "success", - config: &influxdb.HTTPConfig{ + config: influxdb.HTTPConfig{ URL: &url.URL{Scheme: "unix", Path: sock}, Database: "xyzzy", }, @@ -649,7 +649,7 @@ func TestHTTP_UnixSocket(t *testing.T) { client, err := influxdb.NewHTTPClient(tt.config) require.NoError(t, err) - err = client.CreateDatabase(ctx) + err = client.CreateDatabase(ctx, tt.config.Database) if tt.errFunc != nil { tt.errFunc(t, err) } else { diff --git a/plugins/outputs/influxdb/influxdb.go b/plugins/outputs/influxdb/influxdb.go index a3f2fd003..3b3e80206 100644 --- a/plugins/outputs/influxdb/influxdb.go +++ b/plugins/outputs/influxdb/influxdb.go @@ -24,10 +24,9 @@ var ( type Client interface { Write(context.Context, []telegraf.Metric) error - CreateDatabase(ctx context.Context) error - - URL() string + CreateDatabase(ctx context.Context, database string) error Database() string + URL() string } // InfluxDB struct is the primary data structure for the plugin @@ -37,6 +36,7 @@ type InfluxDB struct { Username string Password string Database string + DatabaseTag string `toml:"database_tag"` UserAgent string RetentionPolicy string WriteConsistency string @@ -72,6 +72,10 @@ var sampleConfig = ` ## For UDP url endpoint database needs to be configured on server side. # database = "telegraf" + ## The value of this tag will be used to determine the database. If this + ## tag is not set the 'database' option is used as the default. + # database_tag = "" + ## If true, no CREATE DATABASE queries will be sent. Set to true when using ## Telegraf with a user without permissions to create databases or when the ## database already exists. @@ -205,14 +209,12 @@ func (i *InfluxDB) Write(metrics []telegraf.Metric) error { } switch apiError := err.(type) { - case *APIError: + case *DatabaseNotFoundError: if !i.SkipDatabaseCreation { - if apiError.Type == DatabaseNotFound { - err := client.CreateDatabase(ctx) - if err != nil { - log.Printf("E! [outputs.influxdb] when writing to [%s]: database %q not found and failed to recreate", - client.URL(), client.Database()) - } + err := client.CreateDatabase(ctx, apiError.Database) + if err != nil { + log.Printf("E! [outputs.influxdb] when writing to [%s]: database %q not found and failed to recreate", + client.URL(), apiError.Database) } } } @@ -245,19 +247,21 @@ func (i *InfluxDB) httpClient(ctx context.Context, url *url.URL, proxy *url.URL) } config := &HTTPConfig{ - URL: url, - Timeout: i.Timeout.Duration, - TLSConfig: tlsConfig, - UserAgent: i.UserAgent, - Username: i.Username, - Password: i.Password, - Proxy: proxy, - ContentEncoding: i.ContentEncoding, - Headers: i.HTTPHeaders, - Database: i.Database, - RetentionPolicy: i.RetentionPolicy, - Consistency: i.WriteConsistency, - Serializer: i.serializer, + URL: url, + Timeout: i.Timeout.Duration, + TLSConfig: tlsConfig, + UserAgent: i.UserAgent, + Username: i.Username, + Password: i.Password, + Proxy: proxy, + ContentEncoding: i.ContentEncoding, + Headers: i.HTTPHeaders, + Database: i.Database, + DatabaseTag: i.DatabaseTag, + SkipDatabaseCreation: i.SkipDatabaseCreation, + RetentionPolicy: i.RetentionPolicy, + Consistency: i.WriteConsistency, + Serializer: i.serializer, } c, err := i.CreateHTTPClientF(config) @@ -266,10 +270,10 @@ func (i *InfluxDB) httpClient(ctx context.Context, url *url.URL, proxy *url.URL) } if !i.SkipDatabaseCreation { - err = c.CreateDatabase(ctx) + err = c.CreateDatabase(ctx, c.Database()) if err != nil { log.Printf("W! [outputs.influxdb] when writing to [%s]: database %q creation failed: %v", - c.URL(), c.Database(), err) + c.URL(), i.Database, err) } } @@ -281,10 +285,10 @@ func init() { return &InfluxDB{ Timeout: internal.Duration{Duration: time.Second * 5}, CreateHTTPClientF: func(config *HTTPConfig) (Client, error) { - return NewHTTPClient(config) + return NewHTTPClient(*config) }, CreateUDPClientF: func(config *UDPConfig) (Client, error) { - return NewUDPClient(config) + return NewUDPClient(*config) }, } }) diff --git a/plugins/outputs/influxdb/influxdb_test.go b/plugins/outputs/influxdb/influxdb_test.go index 63ecc47be..2f47d8134 100644 --- a/plugins/outputs/influxdb/influxdb_test.go +++ b/plugins/outputs/influxdb/influxdb_test.go @@ -16,25 +16,25 @@ import ( type MockClient struct { URLF func() string - DatabaseF func() string WriteF func(context.Context, []telegraf.Metric) error - CreateDatabaseF func(ctx context.Context) error + CreateDatabaseF func(ctx context.Context, database string) error + DatabaseF func() string } func (c *MockClient) URL() string { return c.URLF() } -func (c *MockClient) Database() string { - return c.DatabaseF() -} - func (c *MockClient) Write(ctx context.Context, metrics []telegraf.Metric) error { return c.WriteF(ctx, metrics) } -func (c *MockClient) CreateDatabase(ctx context.Context) error { - return c.CreateDatabaseF(ctx) +func (c *MockClient) CreateDatabase(ctx context.Context, database string) error { + return c.CreateDatabaseF(ctx, database) +} + +func (c *MockClient) Database() string { + return c.DatabaseF() } func TestDeprecatedURLSupport(t *testing.T) { @@ -58,7 +58,10 @@ func TestDefaultURL(t *testing.T) { CreateHTTPClientF: func(config *influxdb.HTTPConfig) (influxdb.Client, error) { actual = config return &MockClient{ - CreateDatabaseF: func(ctx context.Context) error { + DatabaseF: func() string { + return "telegraf" + }, + CreateDatabaseF: func(ctx context.Context, database string) error { return nil }, }, nil @@ -113,7 +116,10 @@ func TestConnectHTTPConfig(t *testing.T) { CreateHTTPClientF: func(config *influxdb.HTTPConfig) (influxdb.Client, error) { actual = config return &MockClient{ - CreateDatabaseF: func(ctx context.Context) error { + DatabaseF: func() string { + return "telegraf" + }, + CreateDatabaseF: func(ctx context.Context, database string) error { return nil }, }, nil @@ -145,15 +151,19 @@ func TestWriteRecreateDatabaseIfDatabaseNotFound(t *testing.T) { CreateHTTPClientF: func(config *influxdb.HTTPConfig) (influxdb.Client, error) { return &MockClient{ - CreateDatabaseF: func(ctx context.Context) error { + DatabaseF: func() string { + return "telegraf" + }, + CreateDatabaseF: func(ctx context.Context, database string) error { return nil }, WriteF: func(ctx context.Context, metrics []telegraf.Metric) error { - return &influxdb.APIError{ - StatusCode: http.StatusNotFound, - Title: "404 Not Found", - Description: `database not found "telegraf"`, - Type: influxdb.DatabaseNotFound, + return &influxdb.DatabaseNotFoundError{ + APIError: influxdb.APIError{ + StatusCode: http.StatusNotFound, + Title: "404 Not Found", + Description: `database not found "telegraf"`, + }, } }, URLF: func() string { diff --git a/plugins/outputs/influxdb/udp.go b/plugins/outputs/influxdb/udp.go index 8e636d340..31c854def 100644 --- a/plugins/outputs/influxdb/udp.go +++ b/plugins/outputs/influxdb/udp.go @@ -34,7 +34,7 @@ type UDPConfig struct { Dialer Dialer } -func NewUDPClient(config *UDPConfig) (*udpClient, error) { +func NewUDPClient(config UDPConfig) (*udpClient, error) { if config.URL == nil { return nil, ErrMissingURL } @@ -113,7 +113,7 @@ func (c *udpClient) Write(ctx context.Context, metrics []telegraf.Metric) error return nil } -func (c *udpClient) CreateDatabase(ctx context.Context) error { +func (c *udpClient) CreateDatabase(ctx context.Context, database string) error { return nil } diff --git a/plugins/outputs/influxdb/udp_test.go b/plugins/outputs/influxdb/udp_test.go index 2d21fd7bf..136ebb787 100644 --- a/plugins/outputs/influxdb/udp_test.go +++ b/plugins/outputs/influxdb/udp_test.go @@ -66,14 +66,14 @@ func (d *MockDialer) DialContext(ctx context.Context, network string, address st } func TestUDP_NewUDPClientNoURL(t *testing.T) { - config := &influxdb.UDPConfig{} + config := influxdb.UDPConfig{} _, err := influxdb.NewUDPClient(config) require.Equal(t, err, influxdb.ErrMissingURL) } func TestUDP_URL(t *testing.T) { u := getURL() - config := &influxdb.UDPConfig{ + config := influxdb.UDPConfig{ URL: u, } @@ -86,7 +86,7 @@ func TestUDP_URL(t *testing.T) { func TestUDP_Simple(t *testing.T) { var buffer bytes.Buffer - config := &influxdb.UDPConfig{ + config := influxdb.UDPConfig{ URL: getURL(), Dialer: &MockDialer{ DialContextF: func(network, address string) (influxdb.Conn, error) { @@ -117,7 +117,7 @@ func TestUDP_DialError(t *testing.T) { u, err := url.Parse("invalid://127.0.0.1:9999") require.NoError(t, err) - config := &influxdb.UDPConfig{ + config := influxdb.UDPConfig{ URL: u, Dialer: &MockDialer{ DialContextF: func(network, address string) (influxdb.Conn, error) { @@ -137,7 +137,7 @@ func TestUDP_DialError(t *testing.T) { func TestUDP_WriteError(t *testing.T) { closed := false - config := &influxdb.UDPConfig{ + config := influxdb.UDPConfig{ URL: getURL(), Dialer: &MockDialer{ DialContextF: func(network, address string) (influxdb.Conn, error) { @@ -167,13 +167,13 @@ func TestUDP_WriteError(t *testing.T) { func TestUDP_ErrorLogging(t *testing.T) { tests := []struct { name string - config *influxdb.UDPConfig + config influxdb.UDPConfig metrics []telegraf.Metric logContains string }{ { name: "logs need more space", - config: &influxdb.UDPConfig{ + config: influxdb.UDPConfig{ MaxPayloadSize: 1, URL: getURL(), Dialer: &MockDialer{ @@ -188,7 +188,7 @@ func TestUDP_ErrorLogging(t *testing.T) { }, { name: "logs series name", - config: &influxdb.UDPConfig{ + config: influxdb.UDPConfig{ URL: getURL(), Dialer: &MockDialer{ DialContextF: func(network, address string) (influxdb.Conn, error) { @@ -258,7 +258,7 @@ func TestUDP_WriteWithRealConn(t *testing.T) { u, err := url.Parse(fmt.Sprintf("%s://%s", addr.Network(), addr)) require.NoError(t, err) - config := &influxdb.UDPConfig{ + config := influxdb.UDPConfig{ URL: u, } client, err := influxdb.NewUDPClient(config) diff --git a/plugins/outputs/influxdb_v2/README.md b/plugins/outputs/influxdb_v2/README.md index 245391d48..830e70b41 100644 --- a/plugins/outputs/influxdb_v2/README.md +++ b/plugins/outputs/influxdb_v2/README.md @@ -22,6 +22,10 @@ The InfluxDB output plugin writes metrics to the [InfluxDB v2.x] HTTP service. ## Destination bucket to write into. bucket = "" + ## The value of this tag will be used to determine the bucket. If this + ## tag is not set the 'bucket' option is used as the default. + # bucket_tag = "" + ## Timeout for HTTP messages. # timeout = "5s" diff --git a/plugins/outputs/influxdb_v2/http.go b/plugins/outputs/influxdb_v2/http.go index 8709a9b84..cdc40c148 100644 --- a/plugins/outputs/influxdb_v2/http.go +++ b/plugins/outputs/influxdb_v2/http.go @@ -20,13 +20,10 @@ import ( "github.com/influxdata/telegraf/plugins/serializers/influx" ) -type APIErrorType int - type APIError struct { StatusCode int Title string Description string - Type APIErrorType } func (e APIError) Error() string { @@ -47,6 +44,7 @@ type HTTPConfig struct { Token string Organization string Bucket string + BucketTag string Timeout time.Duration Headers map[string]string Proxy *url.URL @@ -58,10 +56,12 @@ type HTTPConfig struct { } type httpClient struct { - WriteURL string ContentEncoding string Timeout time.Duration Headers map[string]string + Organization string + Bucket string + BucketTag string client *http.Client serializer *influx.Serializer @@ -103,14 +103,6 @@ func NewHTTPClient(config *HTTPConfig) (*httpClient, error) { serializer = influx.NewSerializer() } - writeURL, err := makeWriteURL( - *config.URL, - config.Organization, - config.Bucket) - if err != nil { - return nil, err - } - var transport *http.Transport switch config.URL.Scheme { case "http", "https": @@ -139,10 +131,12 @@ func NewHTTPClient(config *HTTPConfig) (*httpClient, error) { Transport: transport, }, url: config.URL, - WriteURL: writeURL, ContentEncoding: config.ContentEncoding, Timeout: timeout, Headers: headers, + Organization: config.Organization, + Bucket: config.Bucket, + BucketTag: config.BucketTag, } return client, nil } @@ -173,8 +167,45 @@ func (c *httpClient) Write(ctx context.Context, metrics []telegraf.Metric) error if c.retryTime.After(time.Now()) { return errors.New("Retry time has not elapsed") } + + batches := make(map[string][]telegraf.Metric) + if c.BucketTag == "" { + err := c.writeBatch(ctx, c.Bucket, metrics) + if err != nil { + return err + } + } else { + for _, metric := range metrics { + bucket, ok := metric.GetTag(c.BucketTag) + if !ok { + bucket = c.Bucket + } + + if _, ok := batches[bucket]; !ok { + batches[bucket] = make([]telegraf.Metric, 0) + } + + batches[bucket] = append(batches[bucket], metric) + } + + for bucket, batch := range batches { + err := c.writeBatch(ctx, bucket, batch) + if err != nil { + return err + } + } + } + return nil +} + +func (c *httpClient) writeBatch(ctx context.Context, bucket string, metrics []telegraf.Metric) error { + url, err := makeWriteURL(*c.url, c.Organization, bucket) + if err != nil { + return err + } + reader := influx.NewReader(metrics, c.serializer) - req, err := c.makeWriteRequest(reader) + req, err := c.makeWriteRequest(url, reader) if err != nil { return err } @@ -227,7 +258,7 @@ func (c *httpClient) Write(ctx context.Context, metrics []telegraf.Metric) error } } -func (c *httpClient) makeWriteRequest(body io.Reader) (*http.Request, error) { +func (c *httpClient) makeWriteRequest(url string, body io.Reader) (*http.Request, error) { var err error if c.ContentEncoding == "gzip" { body, err = internal.CompressWithGzip(body) @@ -236,7 +267,7 @@ func (c *httpClient) makeWriteRequest(body io.Reader) (*http.Request, error) { } } - req, err := http.NewRequest("POST", c.WriteURL, body) + req, err := http.NewRequest("POST", url, body) if err != nil { return nil, err } diff --git a/plugins/outputs/influxdb_v2/http_internal_test.go b/plugins/outputs/influxdb_v2/http_internal_test.go index 748519a7b..e9685da12 100644 --- a/plugins/outputs/influxdb_v2/http_internal_test.go +++ b/plugins/outputs/influxdb_v2/http_internal_test.go @@ -1,7 +1,6 @@ package influxdb_v2 import ( - "io" "net/url" "testing" @@ -46,14 +45,3 @@ func TestMakeWriteURL(t *testing.T) { } } } - -func TestMakeWriteRequest(t *testing.T) { - reader, _ := io.Pipe() - cli := httpClient{ - WriteURL: "http://localhost:9999/v2/write?bucket=telegraf&org=influx", - ContentEncoding: "gzip", - Headers: map[string]string{"x": "y"}, - } - _, err := cli.makeWriteRequest(reader) - require.NoError(t, err) -} diff --git a/plugins/outputs/influxdb_v2/influxdb.go b/plugins/outputs/influxdb_v2/influxdb.go index a3722a046..d0d6800a6 100644 --- a/plugins/outputs/influxdb_v2/influxdb.go +++ b/plugins/outputs/influxdb_v2/influxdb.go @@ -38,6 +38,10 @@ var sampleConfig = ` ## Destination bucket to write into. bucket = "" + ## The value of this tag will be used to determine the bucket. If this + ## tag is not set the 'bucket' option is used as the default. + # bucket_tag = "" + ## Timeout for HTTP messages. # timeout = "5s" @@ -77,6 +81,7 @@ type InfluxDB struct { Token string `toml:"token"` Organization string `toml:"organization"` Bucket string `toml:"bucket"` + BucketTag string `toml:"bucket_tag"` Timeout internal.Duration `toml:"timeout"` HTTPHeaders map[string]string `toml:"http_headers"` HTTPProxy string `toml:"http_proxy"` @@ -174,6 +179,7 @@ func (i *InfluxDB) getHTTPClient(ctx context.Context, url *url.URL, proxy *url.U Token: i.Token, Organization: i.Organization, Bucket: i.Bucket, + BucketTag: i.BucketTag, Timeout: i.Timeout.Duration, Headers: i.HTTPHeaders, Proxy: proxy, From a4b4dd82958987b90142a37774e7fa5d793b31a1 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 27 Feb 2019 10:46:11 -0800 Subject: [PATCH 0654/1815] Update changelog --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1b74a3fbb..075d5aa63 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -54,6 +54,8 @@ - [#5488](https://github.com/influxdata/telegraf/pull/5488): Add option to disable unique timestamp adjustment in grok parser. - [#5473](https://github.com/influxdata/telegraf/pull/5473): Add mutual TLS support to prometheus_client output. - [#4308](https://github.com/influxdata/telegraf/pull/4308): Add additional metrics to rabbitmq input. +- [#5388](https://github.com/influxdata/telegraf/pull/5388): Add multicast support to socket_listener input. +- [#5490](https://github.com/influxdata/telegraf/pull/5490): Add tag based routing in influxdb/influxdb_v2 outputs. #### Bugfixes From 2d2abe295b332da2e1be8905ad0090dfdd0422d3 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 27 Feb 2019 11:10:54 -0800 Subject: [PATCH 0655/1815] Update sample telegraf.conf --- etc/telegraf.conf | 2621 +++++++-------------------------------------- 1 file changed, 400 insertions(+), 2221 deletions(-) diff --git a/etc/telegraf.conf b/etc/telegraf.conf index 7cf955c4e..18466692d 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -47,8 +47,8 @@ ## same time, which can have a measurable effect on the system. collection_jitter = "0s" - ## Default flushing interval for all outputs. Maximum flush_interval will be - ## flush_interval + flush_jitter + ## Default flushing interval for all outputs. You shouldn't set this below + ## interval. Maximum flush_interval will be flush_interval + flush_jitter flush_interval = "10s" ## Jitter the flush interval by a random amount. This is primarily to avoid ## large write spikes for users running a large number of telegraf instances. @@ -82,69 +82,48 @@ # OUTPUT PLUGINS # ############################################################################### -# Configuration for sending metrics to InfluxDB +# Configuration for influxdb server to send metrics to [[outputs.influxdb]] ## The full HTTP or UDP URL for your InfluxDB instance. ## - ## Multiple URLs can be specified for a single cluster, only ONE of the - ## urls will be written to each interval. - # urls = ["unix:///var/run/influxdb.sock"] - # urls = ["udp://127.0.0.1:8089"] - # urls = ["http://127.0.0.1:8086"] - - ## The target database for metrics; will be created as needed. - ## For UDP url endpoint database needs to be configured on server side. - # database = "telegraf" - - ## If true, no CREATE DATABASE queries will be sent. Set to true when using - ## Telegraf with a user without permissions to create databases or when the - ## database already exists. - # skip_database_creation = false + ## Multiple urls can be specified as part of the same cluster, + ## this means that only ONE of the urls will be written to each interval. + # urls = ["udp://127.0.0.1:8089"] # UDP endpoint example + urls = ["http://127.0.0.1:8086"] # required + ## The target database for metrics (telegraf will create it if not exists). + database = "telegraf" # required ## Name of existing retention policy to write to. Empty string writes to - ## the default retention policy. Only takes effect when using HTTP. - # retention_policy = "" + ## the default retention policy. + retention_policy = "" + ## Write consistency (clusters only), can be: "any", "one", "quorum", "all" + write_consistency = "any" - ## Write consistency (clusters only), can be: "any", "one", "quorum", "all". - ## Only takes effect when using HTTP. - # write_consistency = "any" - - ## Timeout for HTTP messages. - # timeout = "5s" - - ## HTTP Basic Auth + ## Write timeout (for the InfluxDB client), formatted as a string. + ## If not provided, will default to 5s. 0s means no timeout (not recommended). + timeout = "5s" # username = "telegraf" # password = "metricsmetricsmetricsmetrics" - - ## HTTP User-Agent + ## Set the user agent for HTTP POSTs (can be useful for log differentiation) # user_agent = "telegraf" + ## Set UDP payload size, defaults to InfluxDB UDP Client default (512 bytes) + # udp_payload = 512 - ## UDP payload size is the maximum packet size to send. - # udp_payload = "512B" - - ## Optional TLS Config for use on HTTP connections. - # tls_ca = "/etc/telegraf/ca.pem" - # tls_cert = "/etc/telegraf/cert.pem" - # tls_key = "/etc/telegraf/key.pem" - ## Use TLS but skip chain & host verification + ## Optional SSL Config + # ssl_ca = "/etc/telegraf/ca.pem" + # ssl_cert = "/etc/telegraf/cert.pem" + # ssl_key = "/etc/telegraf/key.pem" + ## Use SSL but skip chain & host verification # insecure_skip_verify = false - ## HTTP Proxy override, if unset values the standard proxy environment - ## variables are consulted to determine which proxy, if any, should be used. + ## HTTP Proxy Config # http_proxy = "http://corporate.proxy:3128" - ## Additional HTTP headers + ## Optional HTTP headers # http_headers = {"X-Special-Header" = "Special-Value"} - ## HTTP Content-Encoding for write request body, can be set to "gzip" to - ## compress body or "identity" to apply no encoding. - # content_encoding = "identity" - - ## When true, Telegraf will output unsigned integers as unsigned values, - ## i.e.: "42u". You will need a version of InfluxDB supporting unsigned - ## integer values. Enabling this option will result in field type errors if - ## existing data has been written. - # influx_uint_support = false + ## Compress each HTTP request payload using GZIP. + # content_encoding = "gzip" # # Configuration for Amon Server to send metrics to. @@ -159,187 +138,44 @@ # # timeout = "5s" -# # Publishes metrics to an AMQP broker +# # Configuration for the AMQP server to send metrics to # [[outputs.amqp]] -# ## Broker to publish to. -# ## deprecated in 1.7; use the brokers option -# # url = "amqp://localhost:5672/influxdb" -# -# ## Brokers to publish to. If multiple brokers are specified a random broker -# ## will be selected anytime a connection is established. This can be -# ## helpful for load balancing when not using a dedicated load balancer. -# brokers = ["amqp://localhost:5672/influxdb"] -# -# ## Maximum messages to send over a connection. Once this is reached, the -# ## connection is closed and a new connection is made. This can be helpful for -# ## load balancing when not using a dedicated load balancer. -# # max_messages = 0 -# -# ## Exchange to declare and publish to. +# ## AMQP url +# url = "amqp://localhost:5672/influxdb" +# ## AMQP exchange # exchange = "telegraf" -# -# ## Exchange type; common types are "direct", "fanout", "topic", "header", "x-consistent-hash". -# # exchange_type = "topic" -# -# ## If true, exchange will be passively declared. -# # exchange_declare_passive = false -# -# ## Exchange durability can be either "transient" or "durable". -# # exchange_durability = "durable" -# -# ## Additional exchange arguments. -# # exchange_arguments = { } -# # exchange_arguments = {"hash_propery" = "timestamp"} -# -# ## Authentication credentials for the PLAIN auth_method. -# # username = "" -# # password = "" -# # ## Auth method. PLAIN and EXTERNAL are supported # ## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as # ## described here: https://www.rabbitmq.com/plugins.html # # auth_method = "PLAIN" +# ## Telegraf tag to use as a routing key +# ## ie, if this tag exists, its value will be used as the routing key +# routing_tag = "host" +# ## Delivery Mode controls if a published message is persistent +# ## Valid options are "transient" and "persistent". default: "transient" +# delivery_mode = "transient" # -# ## Metric tag to use as a routing key. -# ## ie, if this tag exists, its value will be used as the routing key -# # routing_tag = "host" -# -# ## Static routing key. Used when no routing_tag is set or as a fallback -# ## when the tag specified in routing tag is not found. -# # routing_key = "" -# # routing_key = "telegraf" -# -# ## Delivery Mode controls if a published message is persistent. -# ## One of "transient" or "persistent". -# # delivery_mode = "transient" -# -# ## InfluxDB database added as a message header. -# ## deprecated in 1.7; use the headers option +# ## InfluxDB retention policy +# # retention_policy = "default" +# ## InfluxDB database # # database = "telegraf" # -# ## InfluxDB retention policy added as a message header -# ## deprecated in 1.7; use the headers option -# # retention_policy = "default" -# -# ## Static headers added to each published message. -# # headers = { } -# # headers = {"database" = "telegraf", "retention_policy" = "default"} -# -# ## Connection timeout. If not provided, will default to 5s. 0s means no -# ## timeout (not recommended). +# ## Write timeout, formatted as a string. If not provided, will default +# ## to 5s. 0s means no timeout (not recommended). # # timeout = "5s" # -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification +# ## Optional SSL Config +# # ssl_ca = "/etc/telegraf/ca.pem" +# # ssl_cert = "/etc/telegraf/cert.pem" +# # ssl_key = "/etc/telegraf/key.pem" +# ## Use SSL but skip chain & host verification # # insecure_skip_verify = false # -# ## If true use batch serialization format instead of line based delimiting. -# ## Only applies to data formats which are not line based such as JSON. -# ## Recommended to set to true. -# # use_batch_format = false -# # ## Data format to output. # ## Each data format has its own unique set of configuration options, read # ## more about them here: # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md -# # data_format = "influx" - - -# # Send metrics to Azure Application Insights -# [[outputs.application_insights]] -# ## Instrumentation key of the Application Insights resource. -# instrumentation_key = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxx" -# -# ## Timeout for closing (default: 5s). -# # timeout = "5s" -# -# ## Enable additional diagnostic logging. -# # enable_diagnostic_logging = false -# -# ## Context Tag Sources add Application Insights context tags to a tag value. -# ## -# ## For list of allowed context tag keys see: -# ## https://github.com/Microsoft/ApplicationInsights-Go/blob/master/appinsights/contracts/contexttagkeys.go -# # [outputs.application_insights.context_tag_sources] -# # "ai.cloud.role" = "kubernetes_container_name" -# # "ai.cloud.roleInstance" = "kubernetes_pod_name" - - -# # Send aggregate metrics to Azure Monitor -# [[outputs.azure_monitor]] -# ## Timeout for HTTP writes. -# # timeout = "20s" -# -# ## Set the namespace prefix, defaults to "Telegraf/". -# # namespace_prefix = "Telegraf/" -# -# ## Azure Monitor doesn't have a string value type, so convert string -# ## fields to dimensions (a.k.a. tags) if enabled. Azure Monitor allows -# ## a maximum of 10 dimensions so Telegraf will only send the first 10 -# ## alphanumeric dimensions. -# # strings_as_dimensions = false -# -# ## Both region and resource_id must be set or be available via the -# ## Instance Metadata service on Azure Virtual Machines. -# # -# ## Azure Region to publish metrics against. -# ## ex: region = "southcentralus" -# # region = "" -# # -# ## The Azure Resource ID against which metric will be logged, e.g. -# ## ex: resource_id = "/subscriptions//resourceGroups//providers/Microsoft.Compute/virtualMachines/" -# # resource_id = "" - - -# # Publish Telegraf metrics to a Google Cloud PubSub topic -# [[outputs.cloud_pubsub]] -# ## Required. Name of Google Cloud Platform (GCP) Project that owns -# ## the given PubSub subscription. -# project = "my-project" -# -# ## Required. Name of PubSub subscription to ingest metrics from. -# subscription = "my-subscription" -# -# ## Required. Data format to consume. -# ## Each data format has its own unique set of configuration options. -# ## Read more about them here: -# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md # data_format = "influx" -# -# ## Optional. Filepath for GCP credentials JSON file to authorize calls to -# ## PubSub APIs. If not set explicitly, Telegraf will attempt to use -# ## Application Default Credentials, which is preferred. -# # credentials_file = "path/to/my/creds.json" -# -# ## Optional. If true, will send all metrics per write in one PubSub message. -# # send_batched = true -# -# ## The following publish_* parameters specifically configures batching -# ## requests made to the GCP Cloud PubSub API via the PubSub Golang library. Read -# ## more here: https://godoc.org/cloud.google.com/go/pubsub#PublishSettings -# -# ## Optional. Send a request to PubSub (i.e. actually publish a batch) -# ## when it has this many PubSub messages. If send_batched is true, -# ## this is ignored and treated as if it were 1. -# # publish_count_threshold = 1000 -# -# ## Optional. Send a request to PubSub (i.e. actually publish a batch) -# ## when it has this many PubSub messages. If send_batched is true, -# ## this is ignored and treated as if it were 1 -# # publish_byte_threshold = 1000000 -# -# ## Optional. Specifically configures requests made to the PubSub API. -# # publish_num_go_routines = 2 -# -# ## Optional. Specifies a timeout for requests to the PubSub API. -# # publish_timeout = "30s" -# -# ## Optional. PubSub attributes to add to metrics. -# # [[inputs.pubsub.attributes]] -# # my_attr = "tag_value" # # Configuration for AWS CloudWatch output. @@ -362,22 +198,8 @@ # #profile = "" # #shared_credential_file = "" # -# ## Endpoint to make request against, the correct endpoint is automatically -# ## determined and this option should only be set if you wish to override the -# ## default. -# ## ex: endpoint_url = "http://localhost:8000" -# # endpoint_url = "" -# # ## Namespace for the CloudWatch MetricDatums # namespace = "InfluxData/Telegraf" -# -# ## If you have a large amount of metrics, you should consider to send statistic -# ## values instead of raw metrics which could not only improve performance but -# ## also save AWS API cost. If enable this flag, this plugin would parse the required -# ## CloudWatch statistic fields (count, min, max, and sum) and send them to CloudWatch. -# ## You could use basicstats aggregator to calculate those fields. If not all statistic -# ## fields are available, all fields would still be sent as raw metrics. -# # write_statistics = false # # Configuration for CrateDB to send metrics to. @@ -398,9 +220,6 @@ # ## Datadog API key # apikey = "my-secret-key" # required. # -# # The base endpoint URL can optionally be specified but it defaults to: -# #url = "https://app.datadoghq.com/api/v1/series" -# # ## Connection timeout. # # timeout = "5s" @@ -445,11 +264,11 @@ # # default_tag_value = "none" # index_name = "telegraf-%Y.%m.%d" # required. # -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification +# ## Optional SSL Config +# # ssl_ca = "/etc/telegraf/ca.pem" +# # ssl_cert = "/etc/telegraf/cert.pem" +# # ssl_key = "/etc/telegraf/key.pem" +# ## Use SSL but skip chain & host verification # # insecure_skip_verify = false # # ## Template Config @@ -485,18 +304,14 @@ # ## Graphite output template # ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md # template = "host.tags.measurement.field" -# -# ## Enable Graphite tags support -# # graphite_tag_support = false -# # ## timeout in seconds for the write connection to graphite # timeout = 2 # -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification +# ## Optional SSL Config +# # ssl_ca = "/etc/telegraf/ca.pem" +# # ssl_cert = "/etc/telegraf/cert.pem" +# # ssl_key = "/etc/telegraf/key.pem" +# ## Use SSL but skip chain & host verification # # insecure_skip_verify = false @@ -506,95 +321,6 @@ # servers = ["127.0.0.1:12201", "192.168.1.1:12201"] -# # A plugin that can transmit metrics over HTTP -# [[outputs.http]] -# ## URL is the address to send metrics to -# url = "http://127.0.0.1:8080/metric" -# -# ## Timeout for HTTP message -# # timeout = "5s" -# -# ## HTTP method, one of: "POST" or "PUT" -# # method = "POST" -# -# ## HTTP Basic Auth credentials -# # username = "username" -# # password = "pa$$word" -# -# ## OAuth2 Client Credentials Grant -# # client_id = "clientid" -# # client_secret = "secret" -# # token_url = "https://indentityprovider/oauth2/v1/token" -# # scopes = ["urn:opc:idm:__myscopes__"] -# -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification -# # insecure_skip_verify = false -# -# ## Data format to output. -# ## Each data format has it's own unique set of configuration options, read -# ## more about them here: -# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md -# # data_format = "influx" -# -# ## Additional HTTP headers -# # [outputs.http.headers] -# # # Should be set manually to "application/json" for json data_format -# # Content-Type = "text/plain; charset=utf-8" -# -# ## HTTP Content-Encoding for write request body, can be set to "gzip" to -# ## compress body or "identity" to apply no encoding. -# # content_encoding = "identity" - - -# # Configuration for sending metrics to InfluxDB -# [[outputs.influxdb_v2]] -# ## The URLs of the InfluxDB cluster nodes. -# ## -# ## Multiple URLs can be specified for a single cluster, only ONE of the -# ## urls will be written to each interval. -# urls = ["http://127.0.0.1:9999"] -# -# ## Token for authentication. -# token = "" -# -# ## Organization is the name of the organization you wish to write to; must exist. -# organization = "" -# -# ## Destination bucket to write into. -# bucket = "" -# -# ## Timeout for HTTP messages. -# # timeout = "5s" -# -# ## Additional HTTP headers -# # http_headers = {"X-Special-Header" = "Special-Value"} -# -# ## HTTP Proxy override, if unset values the standard proxy environment -# ## variables are consulted to determine which proxy, if any, should be used. -# # http_proxy = "http://corporate.proxy:3128" -# -# ## HTTP User-Agent -# # user_agent = "telegraf" -# -# ## Content-Encoding for write request body, can be set to "gzip" to -# ## compress body or "identity" to apply no encoding. -# # content_encoding = "gzip" -# -# ## Enable or disable uint support for writing uints influxdb 2.0. -# # influx_uint_support = false -# -# ## Optional TLS Config for use on HTTP connections. -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification -# # insecure_skip_verify = false - - # # Configuration for sending metrics to an Instrumental project # [[outputs.instrumental]] # ## Project API Token (required) @@ -617,15 +343,6 @@ # ## Kafka topic for producer messages # topic = "telegraf" # -# ## Optional Client id -# # client_id = "Telegraf" -# -# ## Set the minimal supported Kafka version. Setting this enables the use of new -# ## Kafka features and APIs. Of particular interest, lz4 compression -# ## requires at least version 0.10.0.0. -# ## ex: version = "1.1.0" -# # version = "" -# # ## Optional topic suffix configuration. # ## If the section is omitted, no suffix is used. # ## Following topic suffix methods are supported: @@ -657,20 +374,12 @@ # ## ie, if this tag exists, its value will be used as the routing key # routing_tag = "host" # -# ## Static routing key. Used when no routing_tag is set or as a fallback -# ## when the tag specified in routing tag is not found. If set to "random", -# ## a random value will be generated for each message. -# ## ex: routing_key = "random" -# ## routing_key = "telegraf" -# # routing_key = "" -# # ## CompressionCodec represents the various compression codecs recognized by # ## Kafka in messages. # ## 0 : No compression # ## 1 : Gzip compression # ## 2 : Snappy compression -# ## 3 : LZ4 compression -# # compression_codec = 0 +# compression_codec = 0 # # ## RequiredAcks is used in Produce Requests to tell the broker how many # ## replica acknowledgements it must see before responding @@ -686,21 +395,16 @@ # ## received the data. This option provides the best durability, we # ## guarantee that no messages will be lost as long as at least one in # ## sync replica remains. -# # required_acks = -1 +# required_acks = -1 # -# ## The maximum number of times to retry sending a metric before failing -# ## until the next flush. -# # max_retry = 3 +# ## The total number of times to retry sending a message +# max_retry = 3 # -# ## The maximum permitted size of a message. Should be set equal to or -# ## smaller than the broker's 'message.max.bytes'. -# # max_message_bytes = 1000000 -# -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification +# ## Optional SSL Config +# # ssl_ca = "/etc/telegraf/ca.pem" +# # ssl_cert = "/etc/telegraf/cert.pem" +# # ssl_key = "/etc/telegraf/key.pem" +# ## Use SSL but skip chain & host verification # # insecure_skip_verify = false # # ## Optional SASL Config @@ -711,7 +415,7 @@ # ## Each data format has its own unique set of configuration options, read # ## more about them here: # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md -# # data_format = "influx" +# data_format = "influx" # # Configuration for the AWS Kinesis output. @@ -734,12 +438,6 @@ # #profile = "" # #shared_credential_file = "" # -# ## Endpoint to make request against, the correct endpoint is automatically -# ## determined and this option should only be set if you wish to override the -# ## default. -# ## ex: endpoint_url = "http://localhost:8000" -# # endpoint_url = "" -# # ## Kinesis StreamName must exist prior to starting telegraf. # streamname = "StreamName" # ## DEPRECATED: PartitionKey as used for sharding data. @@ -764,11 +462,10 @@ # # method = "measurement" # # # ## Use the value of a tag for all writes, if the tag is not set the empty -# ## default option will be used. When no default, defaults to "telegraf" +# ## string will be used: # # [outputs.kinesis.partition] # # method = "tag" # # key = "host" -# # default = "mykey" # # # ## Data format to output. @@ -809,32 +506,22 @@ # ## ex: prefix/web01.example.com/mem # topic_prefix = "telegraf" # -# ## QoS policy for messages -# ## 0 = at most once -# ## 1 = at least once -# ## 2 = exactly once -# # qos = 2 -# # ## username and password to connect MQTT server. # # username = "telegraf" # # password = "metricsmetricsmetricsmetrics" # -# ## client ID, if not set a random ID is generated -# # client_id = "" -# # ## Timeout for write operations. default: 5s # # timeout = "5s" # -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification -# # insecure_skip_verify = false +# ## client ID, if not set a random ID is generated +# # client_id = "" # -# ## When true, metrics will be sent in one MQTT message per flush. Otherwise, -# ## metrics are written one metric per MQTT message. -# # batch = false +# ## Optional SSL Config +# # ssl_ca = "/etc/telegraf/ca.pem" +# # ssl_cert = "/etc/telegraf/cert.pem" +# # ssl_key = "/etc/telegraf/key.pem" +# ## Use SSL but skip chain & host verification +# # insecure_skip_verify = false # # ## Data format to output. # ## Each data format has its own unique set of configuration options, read @@ -853,11 +540,11 @@ # ## NATS subject for producer messages # subject = "telegraf" # -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification +# ## Optional SSL Config +# # ssl_ca = "/etc/telegraf/ca.pem" +# # ssl_cert = "/etc/telegraf/cert.pem" +# # ssl_key = "/etc/telegraf/key.pem" +# ## Use SSL but skip chain & host verification # # insecure_skip_verify = false # # ## Data format to output. @@ -896,11 +583,7 @@ # # ## Number of data points to send to OpenTSDB in Http requests. # ## Not used with telnet API. -# http_batch_size = 50 -# -# ## URI Path for Http requests to OpenTSDB. -# ## Used in cases where OpenTSDB is located behind a reverse proxy. -# http_path = "/api/put" +# httpBatchSize = 50 # # ## Debug true - Prints OpenTSDB communication # debug = false @@ -912,36 +595,14 @@ # # Configuration for the Prometheus client to spawn # [[outputs.prometheus_client]] # ## Address to listen on -# listen = ":9273" +# # listen = ":9273" # -# ## Use HTTP Basic Authentication. -# # basic_username = "Foo" -# # basic_password = "Bar" -# -# ## If set, the IP Ranges which are allowed to access metrics. -# ## ex: ip_range = ["192.168.0.0/24", "192.168.1.0/30"] -# # ip_range = [] -# -# ## Path to publish the metrics on. -# # path = "/metrics" -# -# ## Expiration interval for each metric. 0 == no expiration +# ## Interval to expire metrics and not deliver to prometheus, 0 == no expiration # # expiration_interval = "60s" # # ## Collectors to enable, valid entries are "gocollector" and "process". # ## If unset, both are enabled. -# # collectors_exclude = ["gocollector", "process"] -# -# ## Send string metrics as Prometheus labels. -# ## Unless set to false all string metrics will be sent as labels. -# # string_as_label = true -# -# ## If set, enable TLS with the given certificate. -# # tls_cert = "/etc/ssl/telegraf.crt" -# # tls_key = "/etc/ssl/telegraf.key" -# -# ## Export metric collection time. -# # export_timestamp = false +# collectors_exclude = ["gocollector", "process"] # # Configuration for the Riemann server to send metrics to @@ -1002,13 +663,6 @@ # # address = "unix:///tmp/telegraf.sock" # # address = "unixgram:///tmp/telegraf.sock" # -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification -# # insecure_skip_verify = false -# # ## Period between keep alive probes. # ## Only applies to TCP sockets. # ## 0 disables keep alive probes. @@ -1022,55 +676,40 @@ # # data_format = "influx" -# # Configuration for Google Cloud Stackdriver to send metrics to -# [[outputs.stackdriver]] -# # GCP Project -# project = "erudite-bloom-151019" -# -# # The namespace for the metric descriptor -# namespace = "telegraf" - - # # Configuration for Wavefront server to send metrics to # [[outputs.wavefront]] -# ## Url for Wavefront Direct Ingestion or using HTTP with Wavefront Proxy -# ## If using Wavefront Proxy, also specify port. example: http://proxyserver:2878 -# url = "https://metrics.wavefront.com" +# ## DNS name of the wavefront proxy server +# host = "wavefront.example.com" # -# ## Authentication Token for Wavefront. Only required if using Direct Ingestion -# #token = "DUMMY_TOKEN" -# -# ## DNS name of the wavefront proxy server. Do not use if url is specified -# #host = "wavefront.example.com" -# -# ## Port that the Wavefront proxy server listens on. Do not use if url is specified -# #port = 2878 +# ## Port that the Wavefront proxy server listens on +# port = 2878 # # ## prefix for metrics keys # #prefix = "my.specific.prefix." # -# ## whether to use "value" for name of simple fields. default is false +# ## whether to use "value" for name of simple fields # #simple_fields = false # -# ## character to use between metric and field name. default is . (dot) +# ## character to use between metric and field name. defaults to . (dot) # #metric_separator = "." # -# ## Convert metric name paths to use metricSeparator character -# ## When true will convert all _ (underscore) characters in final metric name. default is true +# ## Convert metric name paths to use metricSeperator character +# ## When true (default) will convert all _ (underscore) chartacters in final metric name # #convert_paths = true # # ## Use Regex to sanitize metric and tag names from invalid characters -# ## Regex is more thorough, but significantly slower. default is false +# ## Regex is more thorough, but significantly slower # #use_regex = false # # ## point tags to use as the source name for Wavefront (if none found, host will be used) -# #source_override = ["hostname", "address", "agent_host", "node_host"] +# #source_override = ["hostname", "snmp_host", "node_host"] # -# ## whether to convert boolean values to numeric values, with false -> 0.0 and true -> 1.0. default is true +# ## whether to convert boolean values to numeric values, with false -> 0.0 and true -> 1.0. default true # #convert_bool = true # # ## Define a mapping, namespaced by metric prefix, from string values to numeric values -# ## deprecated in 1.9; use the enum processor plugin +# ## The example below maps "green" -> 1.0, "yellow" -> 0.5, "red" -> 0.0 for +# ## any metrics beginning with "elasticsearch" # #[[outputs.wavefront.string_to_number.elasticsearch]] # # green = 1.0 # # yellow = 0.5 @@ -1082,217 +721,10 @@ # PROCESSOR PLUGINS # ############################################################################### -# # Convert values to another metric value type -# [[processors.converter]] -# ## Tags to convert -# ## -# ## The table key determines the target type, and the array of key-values -# ## select the keys to convert. The array may contain globs. -# ## = [...] -# [processors.converter.tags] -# string = [] -# integer = [] -# unsigned = [] -# boolean = [] -# float = [] -# -# ## Fields to convert -# ## -# ## The table key determines the target type, and the array of key-values -# ## select the keys to convert. The array may contain globs. -# ## = [...] -# [processors.converter.fields] -# tag = [] -# string = [] -# integer = [] -# unsigned = [] -# boolean = [] -# float = [] - - -# # Map enum values according to given table. -# [[processors.enum]] -# [[processors.enum.mapping]] -# ## Name of the field to map -# field = "status" -# -# ## Destination field to be used for the mapped value. By default the source -# ## field is used, overwriting the original value. -# # dest = "status_code" -# -# ## Default value to be used for all values not contained in the mapping -# ## table. When unset, the unmodified value for the field will be used if no -# ## match is found. -# # default = 0 -# -# ## Table of mappings -# [processors.enum.mapping.value_mappings] -# green = 1 -# yellow = 2 -# red = 3 - - -# # Apply metric modifications using override semantics. -# [[processors.override]] -# ## All modifications on inputs and aggregators can be overridden: -# # name_override = "new_name" -# # name_prefix = "new_name_prefix" -# # name_suffix = "new_name_suffix" -# -# ## Tags to be added (all values must be strings) -# # [processors.override.tags] -# # additional_tag = "tag_value" - - -# # Parse a value in a specified field/tag(s) and add the result in a new metric -# [[processors.parser]] -# ## The name of the fields whose value will be parsed. -# parse_fields = [] -# -# ## If true, incoming metrics are not emitted. -# drop_original = false -# -# ## If set to override, emitted metrics will be merged by overriding the -# ## original metric using the newly parsed metrics. -# merge = "override" -# -# ## The dataformat to be read from files -# ## Each data format has its own unique set of configuration options, read -# ## more about them here: -# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md -# data_format = "influx" - - # # Print all metrics that pass through this filter. # [[processors.printer]] -# # Transforms tag and field values with regex pattern -# [[processors.regex]] -# ## Tag and field conversions defined in a separate sub-tables -# # [[processors.regex.tags]] -# # ## Tag to change -# # key = "resp_code" -# # ## Regular expression to match on a tag value -# # pattern = "^(\\d)\\d\\d$" -# # ## Pattern for constructing a new value (${1} represents first subgroup) -# # replacement = "${1}xx" -# -# # [[processors.regex.fields]] -# # key = "request" -# # ## All the power of the Go regular expressions available here -# # ## For example, named subgroups -# # pattern = "^/api(?P/[\\w/]+)\\S*" -# # replacement = "${method}" -# # ## If result_key is present, a new field will be created -# # ## instead of changing existing field -# # result_key = "method" -# -# ## Multiple conversions may be applied for one field sequentially -# ## Let's extract one more value -# # [[processors.regex.fields]] -# # key = "request" -# # pattern = ".*category=(\\w+).*" -# # replacement = "${1}" -# # result_key = "search_category" - - -# # Rename measurements, tags, and fields that pass through this filter. -# [[processors.rename]] - - -# # Perform string processing on tags, fields, and measurements -# [[processors.strings]] -# ## Convert a tag value to uppercase -# # [[processors.strings.uppercase]] -# # tag = "method" -# -# ## Convert a field value to lowercase and store in a new field -# # [[processors.strings.lowercase]] -# # field = "uri_stem" -# # dest = "uri_stem_normalised" -# -# ## Trim leading and trailing whitespace using the default cutset -# # [[processors.strings.trim]] -# # field = "message" -# -# ## Trim leading characters in cutset -# # [[processors.strings.trim_left]] -# # field = "message" -# # cutset = "\t" -# -# ## Trim trailing characters in cutset -# # [[processors.strings.trim_right]] -# # field = "message" -# # cutset = "\r\n" -# -# ## Trim the given prefix from the field -# # [[processors.strings.trim_prefix]] -# # field = "my_value" -# # prefix = "my_" -# -# ## Trim the given suffix from the field -# # [[processors.strings.trim_suffix]] -# # field = "read_count" -# # suffix = "_count" -# -# ## Replace all non-overlapping instances of old with new -# # [[processors.strings.replace]] -# # measurement = "*" -# # old = ":" -# # new = "_" - - -# # Print all metrics that pass through this filter. -# [[processors.topk]] -# ## How many seconds between aggregations -# # period = 10 -# -# ## How many top metrics to return -# # k = 10 -# -# ## Over which tags should the aggregation be done. Globs can be specified, in -# ## which case any tag matching the glob will aggregated over. If set to an -# ## empty list is no aggregation over tags is done -# # group_by = ['*'] -# -# ## Over which fields are the top k are calculated -# # fields = ["value"] -# -# ## What aggregation to use. Options: sum, mean, min, max -# # aggregation = "mean" -# -# ## Instead of the top k largest metrics, return the bottom k lowest metrics -# # bottomk = false -# -# ## The plugin assigns each metric a GroupBy tag generated from its name and -# ## tags. If this setting is different than "" the plugin will add a -# ## tag (which name will be the value of this setting) to each metric with -# ## the value of the calculated GroupBy tag. Useful for debugging -# # add_groupby_tag = "" -# -# ## These settings provide a way to know the position of each metric in -# ## the top k. The 'add_rank_field' setting allows to specify for which -# ## fields the position is required. If the list is non empty, then a field -# ## will be added to each and every metric for each string present in this -# ## setting. This field will contain the ranking of the group that -# ## the metric belonged to when aggregated over that field. -# ## The name of the field will be set to the name of the aggregation field, -# ## suffixed with the string '_topk_rank' -# # add_rank_fields = [] -# -# ## These settings provide a way to know what values the plugin is generating -# ## when aggregating metrics. The 'add_agregate_field' setting allows to -# ## specify for which fields the final aggregation value is required. If the -# ## list is non empty, then a field will be added to each every metric for -# ## each field present in this setting. This field will contain -# ## the computed aggregation for the group that the metric belonged to when -# ## aggregated over that field. -# ## The name of the field will be set to the name of the aggregation field, -# ## suffixed with the string '_topk_aggregate' -# # add_aggregate_fields = [] - - ############################################################################### # AGGREGATOR PLUGINS # @@ -1300,14 +732,12 @@ # # Keep the aggregate basicstats of each metric passing through. # [[aggregators.basicstats]] +# ## General Aggregator Arguments: # ## The period on which to flush & clear the aggregator. # period = "30s" # ## If true, the original metric will be dropped by the # ## aggregator and will not get sent to the output plugins. # drop_original = false -# -# ## Configures which basic stats to push as fields -# # stats = ["count", "min", "max", "mean", "stdev", "s2", "sum"] # # Create aggregate histograms. @@ -1346,18 +776,6 @@ # drop_original = false -# # Count the occurrence of values in fields. -# [[aggregators.valuecounter]] -# ## General Aggregator Arguments: -# ## The period on which to flush & clear the aggregator. -# period = "30s" -# ## If true, the original metric will be dropped by the -# ## aggregator and will not get sent to the output plugins. -# drop_original = false -# ## The fields for which the values will be counted -# fields = [] - - ############################################################################### # INPUT PLUGINS # @@ -1377,12 +795,13 @@ # Read metrics about disk usage by mount point [[inputs.disk]] - ## By default stats will be gathered for all mount points. - ## Set mount_points will restrict the stats to only the specified mount points. + ## By default, telegraf gather stats for all mountpoints. + ## Setting mountpoints will restrict the stats to the specified mountpoints. # mount_points = ["/"] - ## Ignore mount points by filesystem type. - ignore_fs = ["tmpfs", "devtmpfs", "devfs", "overlay", "aufs", "squashfs"] + ## Ignore some mountpoints by filesystem type. For example (dev)tmpfs (usually + ## present on /run, /var/run, /dev/shm or /dev). + ignore_fs = ["tmpfs", "devtmpfs", "devfs"] # Read metrics about disk IO by device @@ -1390,7 +809,7 @@ ## By default, telegraf will gather stats for all devices including ## disk partitions. ## Setting devices will restrict the stats to the specified devices. - # devices = ["sda", "sdb", "vd*"] + # devices = ["sda", "sdb"] ## Uncomment the following line if you need disk serial numbers. # skip_serial_number = false # @@ -1399,8 +818,6 @@ ## Currently only Linux is supported via udev properties. You can view ## available properties for a device by running: ## 'udevadm info -q property -n /dev/sda' - ## Note: Most, but not all, udev properties can be accessed this way. Properties - ## that are currently inaccessible include DEVTYPE, DEVNAME, and DEVPATH. # device_tags = ["ID_FS_TYPE", "ID_FS_USAGE"] # ## Using the same metadata source as device_tags, you can also customize the @@ -1439,48 +856,12 @@ # no configuration -# # Gather ActiveMQ metrics -# [[inputs.activemq]] -# ## Required ActiveMQ Endpoint -# # server = "192.168.50.10" -# -# ## Required ActiveMQ port -# # port = 8161 -# -# ## Credentials for basic HTTP authentication -# # username = "admin" -# # password = "admin" -# -# ## Required ActiveMQ webadmin root path -# # webadmin = "admin" -# -# ## Maximum time to receive response. -# # response_timeout = "5s" -# -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification - - # # Read stats from aerospike server(s) # [[inputs.aerospike]] # ## Aerospike servers to connect to (with port) # ## This plugin will query all namespaces the aerospike # ## server has configured and get stats for them. # servers = ["localhost:3000"] -# -# # username = "telegraf" -# # password = "pa$$word" -# -# ## Optional TLS Config -# # enable_tls = false -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## If false, skip chain & host verification -# # insecure_skip_verify = true # # Read Apache status information (mod_status) @@ -1497,37 +878,11 @@ # ## Maximum time to receive response. # # response_timeout = "5s" # -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification -# # insecure_skip_verify = false - - -# # Gather metrics from Apache Aurora schedulers -# [[inputs.aurora]] -# ## Schedulers are the base addresses of your Aurora Schedulers -# schedulers = ["http://127.0.0.1:8081"] -# -# ## Set of role types to collect metrics from. -# ## -# ## The scheduler roles are checked each interval by contacting the -# ## scheduler nodes; zookeeper is not contacted. -# # roles = ["leader", "follower"] -# -# ## Timeout is the max time for total network operations. -# # timeout = "5s" -# -# ## Username and password are sent using HTTP Basic Auth. -# # username = "username" -# # password = "pa$$word" -# -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification +# ## Optional SSL Config +# # ssl_ca = "/etc/telegraf/ca.pem" +# # ssl_cert = "/etc/telegraf/cert.pem" +# # ssl_key = "/etc/telegraf/key.pem" +# ## Use SSL but skip chain & host verification # # insecure_skip_verify = false @@ -1543,16 +898,6 @@ # bcacheDevs = ["bcache0"] -# # Collects Beanstalkd server and tubes stats -# [[inputs.beanstalkd]] -# ## Server to collect data from -# server = "localhost:11300" -# -# ## List of tubes to gather stats about. -# ## If no tubes specified then data gathered for each tube on server reported by list-tubes command -# tubes = ["notifications"] - - # # Collect bond interface status, slaves statuses and failures count # [[inputs.bond]] # ## Sets 'proc' directory path @@ -1565,47 +910,22 @@ # # bond_interfaces = ["bond0"] -# # Collect Kafka topics and consumers status from Burrow HTTP API. -# [[inputs.burrow]] -# ## Burrow API endpoints in format "schema://host:port". -# ## Default is "http://localhost:8000". -# servers = ["http://localhost:8000"] -# -# ## Override Burrow API prefix. -# ## Useful when Burrow is behind reverse-proxy. -# # api_prefix = "/v3/kafka" -# -# ## Maximum time to receive response. -# # response_timeout = "5s" -# -# ## Limit per-server concurrent connections. -# ## Useful in case of large number of topics or consumer groups. -# # concurrent_connections = 20 -# -# ## Filter clusters, default is no filtering. -# ## Values can be specified as glob patterns. -# # clusters_include = [] -# # clusters_exclude = [] -# -# ## Filter consumer groups, default is no filtering. -# ## Values can be specified as glob patterns. -# # groups_include = [] -# # groups_exclude = [] -# -# ## Filter topics, default is no filtering. -# ## Values can be specified as glob patterns. -# # topics_include = [] -# # topics_exclude = [] -# -# ## Credentials for basic HTTP authentication. -# # username = "" -# # password = "" -# -# ## Optional SSL config -# # ssl_ca = "/etc/telegraf/ca.pem" -# # ssl_cert = "/etc/telegraf/cert.pem" -# # ssl_key = "/etc/telegraf/key.pem" -# # insecure_skip_verify = false +# # Read Cassandra metrics through Jolokia +# [[inputs.cassandra]] +# # This is the context root used to compose the jolokia url +# context = "/jolokia/read" +# ## List of cassandra servers exposing jolokia read service +# servers = ["myuser:mypassword@10.10.10.1:8778","10.10.10.2:8778",":8778"] +# ## List of metrics collected on above servers +# ## Each metric consists of a jmx path. +# ## This will collect all heap memory usage metrics from the jvm and +# ## ReadLatency metrics for all keyspaces and tables. +# ## "type=Table" in the query works with Cassandra3.0. Older versions might +# ## need to use "type=ColumnFamily" +# metrics = [ +# "/java.lang:type=Memory/HeapMemoryUsage", +# "/org.apache.cassandra.metrics:type=Table,keyspace=*,scope=*,name=ReadLatency" +# ] # # Collects performance metrics from the MON and OSD nodes in a Ceph storage cluster. @@ -1684,12 +1004,6 @@ # #profile = "" # #shared_credential_file = "" # -# ## Endpoint to make request against, the correct endpoint is automatically -# ## determined and this option should only be set if you wish to override the -# ## default. -# ## ex: endpoint_url = "http://localhost:8000" -# # endpoint_url = "" -# # # The minimum period for Cloudwatch metrics is 1 minute (60s). However not all # # metrics are made available to the 1 minute period. Some are collected at # # 3 minute, 5 minute, or larger intervals. See https://aws.amazon.com/cloudwatch/faqs/#monitoring. @@ -1726,9 +1040,7 @@ # #[[inputs.cloudwatch.metrics]] # # names = ["Latency", "RequestCount"] # # -# # ## Dimension filters for Metric. These are optional however all dimensions -# # ## defined for the metric names must be specified in order to retrieve -# # ## the metric statistics. +# # ## Dimension filters for Metric (optional) # # [[inputs.cloudwatch.metrics.dimensions]] # # name = "LoadBalancerName" # # value = "p-example" @@ -1752,33 +1064,19 @@ # # Gather health check statuses from services registered in Consul # [[inputs.consul]] -# ## Consul server address +# ## Most of these values defaults to the one configured on a Consul's agent level. +# ## Optional Consul server address (default: "localhost") # # address = "localhost" -# -# ## URI scheme for the Consul server, one of "http", "https" +# ## Optional URI scheme for the Consul server (default: "http") # # scheme = "http" -# -# ## ACL token used in every request +# ## Optional ACL token used in every request (default: "") # # token = "" -# -# ## HTTP Basic Authentication username and password. +# ## Optional username used for request HTTP Basic Authentication (default: "") # # username = "" +# ## Optional password used for HTTP Basic Authentication (default: "") # # password = "" -# -# ## Data center to query the health checks from -# # datacenter = "" -# -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification -# # insecure_skip_verify = true -# -# ## Consul checks' tag splitting -# # When tags are formatted like "key:value" with ":" as a delimiter then -# # they will be splitted and reported as proper key:value in Telegraf -# # tag_delimiter = ":" +# ## Optional data centre to query the health checks from (default: "") +# # datacentre = "" # # Read metrics from one or many couchbase clusters @@ -1798,12 +1096,8 @@ # # Read CouchDB Stats from one or more servers # [[inputs.couchdb]] # ## Works with CouchDB stats endpoints out of the box -# ## Multiple Hosts from which to read CouchDB stats: +# ## Multiple HOSTs from which to read CouchDB stats: # hosts = ["http://localhost:8086/_stats"] -# -# ## Use HTTP Basic Authentication. -# # basic_username = "telegraf" -# # basic_password = "p@ssw0rd" # # Input plugin for DC/OS metrics @@ -1838,10 +1132,10 @@ # ## Maximum time to receive a response from cluster. # # response_timeout = "20s" # -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" +# ## Optional SSL Config +# # ssl_ca = "/etc/telegraf/ca.pem" +# # ssl_cert = "/etc/telegraf/cert.pem" +# # ssl_key = "/etc/telegraf/key.pem" # ## If false, skip chain & host verification # # insecure_skip_verify = true # @@ -1905,11 +1199,6 @@ # container_name_include = [] # container_name_exclude = [] # -# ## Container states to include and exclude. Globs accepted. -# ## When empty only containers in the "running" state will be captured. -# # container_state_include = [] -# # container_state_exclude = [] -# # ## Timeout for docker list, info, and stats commands # timeout = "5s" # @@ -1926,11 +1215,11 @@ # docker_label_include = [] # docker_label_exclude = [] # -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification +# ## Optional SSL Config +# # ssl_ca = "/etc/telegraf/ca.pem" +# # ssl_cert = "/etc/telegraf/cert.pem" +# # ssl_key = "/etc/telegraf/key.pem" +# ## Use SSL but skip chain & host verification # # insecure_skip_verify = false @@ -1973,22 +1262,20 @@ # ## - cluster # # cluster_health_level = "indices" # -# ## Set cluster_stats to true when you want to also obtain cluster stats. +# ## Set cluster_stats to true when you want to also obtain cluster stats from the +# ## Master node. # cluster_stats = false # -# ## Only gather cluster_stats from the master node. To work this require local = true -# cluster_stats_only_from_master = true -# # ## node_stats is a list of sub-stats that you want to have gathered. Valid options # ## are "indices", "os", "process", "jvm", "thread_pool", "fs", "transport", "http", -# ## "breaker". Per default, all stats are gathered. +# ## "breakers". Per default, all stats are gathered. # # node_stats = ["jvm", "http"] # -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification +# ## Optional SSL Config +# # ssl_ca = "/etc/telegraf/ca.pem" +# # ssl_cert = "/etc/telegraf/cert.pem" +# # ssl_key = "/etc/telegraf/key.pem" +# ## Use SSL but skip chain & host verification # # insecure_skip_verify = false @@ -2020,72 +1307,6 @@ # use_sudo = false -# # Read devices value(s) from a Fibaro controller -# [[inputs.fibaro]] -# ## Required Fibaro controller address/hostname. -# ## Note: at the time of writing this plugin, Fibaro only implemented http - no https available -# url = "http://:80" -# -# ## Required credentials to access the API (http://) -# username = "" -# password = "" -# -# ## Amount of time allowed to complete the HTTP request -# # timeout = "5s" - - -# # Reload and gather from file[s] on telegraf's interval. -# [[inputs.file]] -# ## Files to parse each interval. -# ## These accept standard unix glob matching rules, but with the addition of -# ## ** as a "super asterisk". ie: -# ## /var/log/**.log -> recursively find all .log files in /var/log -# ## /var/log/*/*.log -> find all .log files with a parent dir in /var/log -# ## /var/log/apache.log -> only read the apache log file -# files = ["/var/log/apache/access.log"] -# -# ## The dataformat to be read from files -# ## Each data format has its own unique set of configuration options, read -# ## more about them here: -# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md -# data_format = "influx" - - -# # Count files in a directory -# [[inputs.filecount]] -# ## Directory to gather stats about. -# ## deprecated in 1.9; use the directories option -# directory = "/var/cache/apt/archives" -# -# ## Directories to gather stats about. -# ## This accept standard unit glob matching rules, but with the addition of -# ## ** as a "super asterisk". ie: -# ## /var/log/** -> recursively find all directories in /var/log and count files in each directories -# ## /var/log/*/* -> find all directories with a parent dir in /var/log and count files in each directories -# ## /var/log -> count all files in /var/log and all of its subdirectories -# directories = ["/var/cache/apt/archives"] -# -# ## Only count files that match the name pattern. Defaults to "*". -# name = "*.deb" -# -# ## Count files in subdirectories. Defaults to true. -# recursive = false -# -# ## Only count regular files. Defaults to true. -# regular_only = true -# -# ## Only count files that are at least this size. If size is -# ## a negative number, only count files that are smaller than the -# ## absolute value of size. Acceptable units are B, KiB, MiB, KB, ... -# ## Without quotes and units, interpreted as size in bytes. -# size = "0B" -# -# ## Only count files that have not been touched for at least this -# ## duration. If mtime is negative, only count files that have been -# ## touched in this duration. Defaults to "0s". -# mtime = "0s" - - # # Read stats about given file(s) # [[inputs.filestat]] # ## Files to gather stats about. @@ -2147,11 +1368,11 @@ # username = "" # password = "" # -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification +# ## Optional SSL Config +# # ssl_ca = "/etc/telegraf/ca.pem" +# # ssl_cert = "/etc/telegraf/cert.pem" +# # ssl_key = "/etc/telegraf/key.pem" +# ## Use SSL but skip chain & host verification # # insecure_skip_verify = false @@ -2165,10 +1386,6 @@ # ## If no servers are specified, then default to 127.0.0.1:1936/haproxy?stats # servers = ["http://myhaproxy.com:1936/haproxy?stats"] # -# ## Credentials for basic HTTP authentication -# # username = "admin" -# # password = "admin" -# # ## You can also use local socket with standard wildcard globbing. # ## Server address not starting with 'http' will be treated as a possible # ## socket, so both examples below are valid. @@ -2177,13 +1394,13 @@ # ## By default, some of the fields are renamed from what haproxy calls them. # ## Setting this option to true results in the plugin keeping the original # ## field names. -# # keep_field_names = false +# # keep_field_names = true # -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification +# ## Optional SSL Config +# # ssl_ca = "/etc/telegraf/ca.pem" +# # ssl_cert = "/etc/telegraf/cert.pem" +# # ssl_key = "/etc/telegraf/key.pem" +# ## Use SSL but skip chain & host verification # # insecure_skip_verify = false @@ -2200,55 +1417,11 @@ # # devices = ["sda", "*"] -# # Read formatted metrics from one or more HTTP endpoints -# [[inputs.http]] -# ## One or more URLs from which to read formatted metrics -# urls = [ -# "http://localhost/metrics" -# ] -# -# ## HTTP method -# # method = "GET" -# -# ## Optional HTTP headers -# # headers = {"X-Special-Header" = "Special-Value"} -# -# ## Optional HTTP Basic Auth Credentials -# # username = "username" -# # password = "pa$$word" -# -# ## HTTP entity-body to send with POST/PUT requests. -# # body = "" -# -# ## HTTP Content-Encoding for write request body, can be set to "gzip" to -# ## compress body or "identity" to apply no encoding. -# # content_encoding = "identity" -# -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification -# # insecure_skip_verify = false -# -# ## Amount of time allowed to complete the HTTP request -# # timeout = "5s" -# -# ## Data format to consume. -# ## Each data format has its own unique set of configuration options, read -# ## more about them here: -# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md -# # data_format = "influx" - - # # HTTP/HTTPS request given an address a method and a timeout # [[inputs.http_response]] # ## Server address (default http://localhost) # # address = "http://localhost" # -# ## Set http_proxy (telegraf uses the system wide proxy settings if it's is not set) -# # http_proxy = "http://localhost:8888" -# # ## Set response_timeout (default 5 seconds) # # response_timeout = "5s" # @@ -2268,11 +1441,11 @@ # # response_string_match = "ok" # # response_string_match = "\".*_status\".?:.?\"up\"" # -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification +# ## Optional SSL Config +# # ssl_ca = "/etc/telegraf/ca.pem" +# # ssl_cert = "/etc/telegraf/cert.pem" +# # ssl_key = "/etc/telegraf/key.pem" +# ## Use SSL but skip chain & host verification # # insecure_skip_verify = false # # ## HTTP Request Headers (all values must be strings) @@ -2308,13 +1481,6 @@ # # "my_tag_2" # # ] # -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification -# # insecure_skip_verify = false -# # ## HTTP parameters (all values must be strings). For "GET" requests, data # ## will be included in the query. For "POST" requests, data will be included # ## in the request body as "x-www-form-urlencoded". @@ -2326,29 +1492,13 @@ # # [inputs.httpjson.headers] # # X-Auth-Token = "my-xauth-token" # # apiVersion = "v1" - - -# # Gather Icinga2 status -# [[inputs.icinga2]] -# ## Required Icinga2 server address (default: "https://localhost:5665") -# # server = "https://localhost:5665" # -# ## Required Icinga2 object type ("services" or "hosts, default "services") -# # object_type = "services" -# -# ## Credentials for basic HTTP authentication -# # username = "admin" -# # password = "admin" -# -# ## Maximum time to receive response. -# # response_timeout = "5s" -# -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification -# # insecure_skip_verify = true +# ## Optional SSL Config +# # ssl_ca = "/etc/telegraf/ca.pem" +# # ssl_cert = "/etc/telegraf/cert.pem" +# # ssl_key = "/etc/telegraf/key.pem" +# ## Use SSL but skip chain & host verification +# # insecure_skip_verify = false # # Read InfluxDB-formatted JSON metrics from one or more HTTP endpoints @@ -2363,11 +1513,11 @@ # "http://localhost:8086/debug/vars" # ] # -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification +# ## Optional SSL Config +# # ssl_ca = "/etc/telegraf/ca.pem" +# # ssl_cert = "/etc/telegraf/cert.pem" +# # ssl_key = "/etc/telegraf/key.pem" +# ## Use SSL but skip chain & host verification # # insecure_skip_verify = false # # ## http request & header timeout @@ -2382,27 +1532,16 @@ # # This plugin gathers interrupts data from /proc/interrupts and /proc/softirqs. # [[inputs.interrupts]] -# ## When set to true, cpu metrics are tagged with the cpu. Otherwise cpu is -# ## stored as a field. -# ## -# ## The default is false for backwards compatibility, and will be changed to -# ## true in a future version. It is recommended to set to true on new -# ## deployments. -# # cpu_as_tag = false -# # ## To filter which IRQs to collect, make use of tagpass / tagdrop, i.e. # # [inputs.interrupts.tagdrop] -# # irq = [ "NET_RX", "TASKLET" ] +# # irq = [ "NET_RX", "TASKLET" ] # # Read metrics from the bare metal servers via IPMI # [[inputs.ipmi_sensor]] # ## optionally specify the path to the ipmitool executable # # path = "/usr/bin/ipmitool" -# ## -# ## optionally force session privilege level. Can be CALLBACK, USER, OPERATOR, ADMINISTRATOR -# # privilege = "ADMINISTRATOR" -# ## +# # # ## optionally specify one or more servers via a url matching # ## [username[:password]@][protocol[(address)]] # ## e.g. @@ -2418,20 +1557,6 @@ # # ## Timeout for the ipmitool command to complete # timeout = "20s" -# -# ## Schema Version: (Optional, defaults to version 1) -# metric_version = 2 - - -# # Gather packets and bytes counters from Linux ipsets -# [[inputs.ipset]] -# ## By default, we only show sets which have already matched at least 1 packet. -# ## set include_unmatched_sets = true to gather them all. -# include_unmatched_sets = false -# ## Adjust your sudo settings appropriately if using this option ("sudo ipset save") -# use_sudo = false -# ## The default timeout of 1s for ipset execution can be overridden here: -# # timeout = "1s" # # Gather packets and bytes throughput from iptables @@ -2444,8 +1569,6 @@ # ## Setting 'use_lock' to true runs iptables with the "-w" option. # ## Adjust your sudo settings appropriately if using this option ("iptables -wnvl") # use_lock = false -# ## Define an alternate executable, such as "ip6tables". Default is "iptables". -# # binary = "ip6tables" # ## defines the table to monitor: # table = "filter" # ## defines the chains to monitor. @@ -2454,55 +1577,6 @@ # chains = [ "INPUT" ] -# # Collect virtual and real server stats from Linux IPVS -# [[inputs.ipvs]] -# # no configuration - - -# # Read jobs and cluster metrics from Jenkins instances -# [[inputs.jenkins]] -# ## The Jenkins URL -# url = "http://my-jenkins-instance:8080" -# # username = "admin" -# # password = "admin" -# -# ## Set response_timeout -# response_timeout = "5s" -# -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use SSL but skip chain & host verification -# # insecure_skip_verify = false -# -# ## Optional Max Job Build Age filter -# ## Default 1 hour, ignore builds older than max_build_age -# # max_build_age = "1h" -# -# ## Optional Sub Job Depth filter -# ## Jenkins can have unlimited layer of sub jobs -# ## This config will limit the layers of pulling, default value 0 means -# ## unlimited pulling until no more sub jobs -# # max_subjob_depth = 0 -# -# ## Optional Sub Job Per Layer -# ## In workflow-multibranch-plugin, each branch will be created as a sub job. -# ## This config will limit to call only the lasted branches in each layer, -# ## empty will use default value 10 -# # max_subjob_per_layer = 10 -# -# ## Jobs to exclude from gathering -# # job_exclude = [ "job1", "job2/subjob1/subjob2", "job3/*"] -# -# ## Nodes to exclude from gathering -# # node_exclude = [ "node1", "node2" ] -# -# ## Worker pool for jenkins plugin only -# ## Empty this field will use default value 5 -# # max_connections = 5 - - # # Read JMX metrics through Jolokia # [[inputs.jolokia]] # # DEPRECATED: the jolokia plugin has been deprecated in favor of the @@ -2583,10 +1657,10 @@ # # password = "" # # response_timeout = "5s" # -# ## Optional TLS config -# # tls_ca = "/var/private/ca.pem" -# # tls_cert = "/var/private/client.pem" -# # tls_key = "/var/private/client-key.pem" +# ## Optional SSL config +# # ssl_ca = "/var/private/ca.pem" +# # ssl_cert = "/var/private/client.pem" +# # ssl_key = "/var/private/client-key.pem" # # insecure_skip_verify = false # # ## Add metrics to read @@ -2608,22 +1682,22 @@ # # password = "" # # response_timeout = "5s" # -# ## Optional TLS config -# # tls_ca = "/var/private/ca.pem" -# # tls_cert = "/var/private/client.pem" -# # tls_key = "/var/private/client-key.pem" +# ## Optional SSL config +# # ssl_ca = "/var/private/ca.pem" +# # ssl_cert = "/var/private/client.pem" +# # ssl_key = "/var/private/client-key.pem" # # insecure_skip_verify = false # # ## Add proxy targets to query # # default_target_username = "" # # default_target_password = "" -# [[inputs.jolokia2_proxy.target]] +# [[inputs.jolokia_proxy.target]] # url = "service:jmx:rmi:///jndi/rmi://targethost:9999/jmxrmi" -# # username = "" -# # password = "" +# # username = "" +# # password = "" # # ## Add metrics to read -# [[inputs.jolokia2_proxy.metric]] +# [[inputs.jolokia_proxy.metric]] # name = "java_runtime" # mbean = "java.lang:type=Runtime" # paths = ["Uptime"] @@ -2639,13 +1713,6 @@ # # ## Time limit for http requests # timeout = "5s" -# -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification -# # insecure_skip_verify = false # # Get kernel statistics from /proc/vmstat @@ -2653,44 +1720,22 @@ # # no configuration -# # Read status information from one or more Kibana servers -# [[inputs.kibana]] -# ## specify a list of one or more Kibana servers -# servers = ["http://localhost:5601"] -# -# ## Timeout for HTTP requests -# timeout = "5s" -# -# ## HTTP Basic Auth credentials -# # username = "username" -# # password = "pa$$word" -# -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification -# # insecure_skip_verify = false - - # # Read metrics from the kubernetes kubelet api # [[inputs.kubernetes]] # ## URL for the kubelet -# url = "http://127.0.0.1:10255" +# url = "http://1.1.1.1:10255" # -# ## Use bearer token for authorization. ('bearer_token' takes priority) -# # bearer_token = "/path/to/bearer/token" -# ## OR -# # bearer_token_string = "abc_123" +# ## Use bearer token for authorization +# # bearer_token = /path/to/bearer/token # # ## Set response_timeout (default 5 seconds) # # response_timeout = "5s" # -# ## Optional TLS Config -# # tls_ca = /path/to/cafile -# # tls_cert = /path/to/certfile -# # tls_key = /path/to/keyfile -# ## Use TLS but skip chain & host verification +# ## Optional SSL Config +# # ssl_ca = /path/to/cafile +# # ssl_cert = /path/to/certfile +# # ssl_key = /path/to/keyfile +# ## Use SSL but skip chain & host verification # # insecure_skip_verify = false @@ -2734,16 +1779,6 @@ # # campaign_id = "" -# # Read metrics from one or many mcrouter servers -# [[inputs.mcrouter]] -# ## An array of address to gather stats about. Specify an ip or hostname -# ## with port. ie tcp://localhost:11211, tcp://10.0.0.1:11211, etc. -# servers = ["tcp://localhost:11211", "unix:///var/run/mcrouter.sock"] -# -# ## Timeout for metric collections from all servers. Minimum timeout is "1s". -# # timeout = "5s" - - # # Read metrics from one or many memcached servers # [[inputs.memcached]] # ## An array of address to gather stats about. Specify an ip on hostname @@ -2757,7 +1792,7 @@ # ## Timeout, in ms. # timeout = 100 # ## A list of Mesos masters. -# masters = ["http://localhost:5050"] +# masters = ["localhost:5050"] # ## Master metrics groups to be collected, by default, all enabled. # master_collections = [ # "resources", @@ -2781,13 +1816,6 @@ # # "tasks", # # "messages", # # ] -# -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification -# # insecure_skip_verify = false # # Collects scores from a minecraft server's scoreboard using the RCON protocol @@ -2808,43 +1836,16 @@ # ## mongodb://user:auth_key@10.10.3.30:27017, # ## mongodb://10.10.3.33:18832, # servers = ["mongodb://127.0.0.1:27017"] +# gather_perdb_stats = false # -# ## When true, collect per database stats -# # gather_perdb_stats = false -# -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification +# ## Optional SSL Config +# # ssl_ca = "/etc/telegraf/ca.pem" +# # ssl_cert = "/etc/telegraf/cert.pem" +# # ssl_key = "/etc/telegraf/key.pem" +# ## Use SSL but skip chain & host verification # # insecure_skip_verify = false -# # Aggregates the contents of multiple files into a single point -# [[inputs.multifile]] -# ## Base directory where telegraf will look for files. -# ## Omit this option to use absolute paths. -# base_dir = "/sys/bus/i2c/devices/1-0076/iio:device0" -# -# ## If true, Telegraf discard all data when a single file can't be read. -# ## Else, Telegraf omits the field generated from this file. -# # fail_early = true -# -# ## Files to parse each interval. -# [[inputs.multifile.file]] -# file = "in_pressure_input" -# dest = "pressure" -# conversion = "float" -# [[inputs.multifile.file]] -# file = "in_temp_input" -# dest = "temperature" -# conversion = "float(3)" -# [[inputs.multifile.file]] -# file = "in_humidityrelative_input" -# dest = "humidityrelative" -# conversion = "float(3)" - - # # Read metrics from one or many mysql servers # [[inputs.mysql]] # ## specify servers via a url matching: @@ -2856,20 +1857,6 @@ # # # ## If no servers are specified, then localhost is used as the host. # servers = ["tcp(127.0.0.1:3306)/"] -# -# ## Selects the metric output format. -# ## -# ## This option exists to maintain backwards compatibility, if you have -# ## existing metrics do not set or change this value until you are ready to -# ## migrate to the new format. -# ## -# ## If you do not have existing metrics from this plugin set to the latest -# ## version. -# ## -# ## Telegraf >=1.6: metric_version = 2 -# ## <1.6: metric_version = 1 (or unset) -# metric_version = 2 -# # ## the limits for metrics form perf_events_statements # perf_events_statements_digest_text_limit = 120 # perf_events_statements_limit = 250 @@ -2884,7 +1871,7 @@ # ## gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST # gather_process_list = true # # -# ## gather user statistics from INFORMATION_SCHEMA.USER_STATISTICS +# ## gather thread state counts from INFORMATION_SCHEMA.USER_STATISTICS # gather_user_statistics = true # # # ## gather auto_increment columns and max values from information schema @@ -2920,36 +1907,10 @@ # ## Some queries we may want to run less often (such as SHOW GLOBAL VARIABLES) # interval_slow = "30m" # -# ## Optional TLS Config (will be used if tls=custom parameter specified in server uri) -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification -# # insecure_skip_verify = false - - -# # Provides metrics about the state of a NATS server -# [[inputs.nats]] -# ## The address of the monitoring endpoint of the NATS server -# server = "http://localhost:8222" -# -# ## Maximum time to receive response -# # response_timeout = "5s" - - -# # Neptune Apex data collector -# [[inputs.neptune_apex]] -# ## The Neptune Apex plugin reads the publicly available status.xml data from a local Apex. -# ## Measurements will be logged under "apex". -# -# ## The base URL of the local Apex(es). If you specify more than one server, they will -# ## be differentiated by the "source" tag. -# servers = [ -# "http://apex.local", -# ] -# -# ## The response_timeout specifies how long to wait for a reply from the Apex. -# #response_timeout = "5s" +# ## Optional SSL Config (will be used if tls=custom parameter specified in server uri) +# ssl_ca = "/etc/telegraf/ca.pem" +# ssl_cert = "/etc/telegraf/cert.pem" +# ssl_key = "/etc/telegraf/key.pem" # # Read metrics about network interface usage @@ -2959,15 +1920,9 @@ # ## regardless of status. # ## # # interfaces = ["eth0"] -# ## -# ## On linux systems telegraf also collects protocol stats. -# ## Setting ignore_protocol_stats to true will skip reporting of protocol metrics. -# ## -# # ignore_protocol_stats = false -# ## -# # Collect response time of a TCP or UDP connection +# # TCP or UDP 'ping' given url and collect response time in seconds # [[inputs.net_response]] # ## Protocol, must be "tcp" or "udp" # ## NOTE: because the "udp" protocol does not respond to requests, it requires @@ -2975,12 +1930,11 @@ # protocol = "tcp" # ## Server address (default localhost) # address = "localhost:80" -# # ## Set timeout -# # timeout = "1s" +# timeout = "1s" # # ## Set read timeout (only used if expecting a response) -# # read_timeout = "1s" +# read_timeout = "1s" # # ## The following options are required for UDP checks. For TCP, they are # ## optional. The plugin will send the given string to the server and then @@ -2989,9 +1943,6 @@ # # send = "ssh" # ## expected string in answer # # expect = "ssh" -# -# ## Uncomment to remove deprecated fields -# # fieldexclude = ["result_type", "string_found"] # # Read TCP metrics such as established, time wait and sockets counts. @@ -3004,11 +1955,10 @@ # # An array of Nginx stub_status URI to gather stats. # urls = ["http://localhost/server_status"] # -# ## Optional TLS Config -# tls_ca = "/etc/telegraf/ca.pem" -# tls_cert = "/etc/telegraf/cert.cer" -# tls_key = "/etc/telegraf/key.key" -# ## Use TLS but skip chain & host verification +# # TLS/SSL configuration +# ssl_ca = "/etc/telegraf/ca.pem" +# ssl_cert = "/etc/telegraf/cert.cer" +# ssl_key = "/etc/telegraf/key.key" # insecure_skip_verify = false # # # HTTP response timeout (default: 5s) @@ -3024,68 +1974,10 @@ # response_timeout = "5s" -# # Read Nginx Plus Api documentation -# [[inputs.nginx_plus_api]] -# ## An array of API URI to gather stats. -# urls = ["http://localhost/api"] -# -# # Nginx API version, default: 3 -# # api_version = 3 -# -# # HTTP response timeout (default: 5s) -# response_timeout = "5s" - - -# # Read nginx_upstream_check module status information (https://github.com/yaoweibin/nginx_upstream_check_module) -# [[inputs.nginx_upstream_check]] -# ## An URL where Nginx Upstream check module is enabled -# ## It should be set to return a JSON formatted response -# url = "http://127.0.0.1/status?format=json" -# -# ## HTTP method -# # method = "GET" -# -# ## Optional HTTP headers -# # headers = {"X-Special-Header" = "Special-Value"} -# -# ## Override HTTP "Host" header -# # host_header = "check.example.com" -# -# ## Timeout for HTTP requests -# timeout = "5s" -# -# ## Optional HTTP Basic Auth credentials -# # username = "username" -# # password = "pa$$word" -# -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification -# # insecure_skip_verify = false - - -# # Read Nginx virtual host traffic status module information (nginx-module-vts) -# [[inputs.nginx_vts]] -# ## An array of ngx_http_status_module or status URI to gather stats. -# urls = ["http://localhost/status"] -# -# ## HTTP response timeout (default: 5s) -# response_timeout = "5s" - - # # Read NSQ topic and channel statistics. # [[inputs.nsq]] # ## An array of NSQD HTTP API endpoints -# endpoints = ["http://localhost:4151"] -# -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification -# # insecure_skip_verify = false +# endpoints = ["http://localhost:4151"] # # Collect kernel snmp counters and network interface statistics @@ -3106,15 +1998,6 @@ # dns_lookup = true -# # Pulls statistics from nvidia GPUs attached to the host -# [[inputs.nvidia_smi]] -# ## Optional: path to nvidia-smi binary, defaults to $PATH via exec.LookPath -# # bin_path = "/usr/bin/nvidia-smi" -# -# ## Optional: timeout for GPU polling -# # timeout = "5s" - - # # OpenLDAP cn=Monitor plugin # [[inputs.openldap]] # host = "localhost" @@ -3123,21 +2006,17 @@ # # ldaps, starttls, or no encryption. default is an empty string, disabling all encryption. # # note that port will likely need to be changed to 636 for ldaps # # valid options: "" | "starttls" | "ldaps" -# tls = "" +# ssl = "" # # # skip peer certificate verification. Default is false. # insecure_skip_verify = false # # # Path to PEM-encoded Root certificate to use to verify server certificate -# tls_ca = "/etc/ssl/certs.pem" +# ssl_ca = "/etc/ssl/certs.pem" # # # dn/password to bind with. If bind_dn is empty, an anonymous bind is performed. # bind_dn = "" # bind_password = "" -# -# # Reverse metric names so they sort more naturally. Recommended. -# # This defaults to false if unset, but is set to true when generating a new config -# reverse_metric_names = true # # A plugin to collect stats from Opensmtpd - a validating, recursive, and caching DNS resolver @@ -3200,32 +2079,19 @@ # # Ping given url(s) and return statistics # [[inputs.ping]] +# ## NOTE: this plugin forks the ping command. You may need to set capabilities +# ## via setcap cap_net_raw+p /bin/ping +# # # ## List of urls to ping -# urls = ["example.org"] -# -# ## Number of pings to send per collection (ping -c ) +# urls = ["www.google.com"] # required +# ## number of pings to send per collection (ping -c ) # # count = 1 -# -# ## Interval, in s, at which to ping. 0 == default (ping -i ) -# ## Not available in Windows. +# ## interval, in s, at which to ping. 0 == default (ping -i ) # # ping_interval = 1.0 -# -# ## Per-ping timeout, in s. 0 == no timeout (ping -W ) +# ## per-ping timeout, in s. 0 == no timeout (ping -W ) # # timeout = 1.0 -# -# ## Total-ping deadline, in s. 0 == no deadline (ping -w ) -# # deadline = 10 -# -# ## Interface or source address to send ping from (ping -I ) -# ## on Darwin and Freebsd only source address possible: (ping -S ) +# ## interface to send ping from (ping -I ) # # interface = "" -# -# ## Specify the ping executable binary, default is "ping" -# # binary = "ping" -# -# ## Arguments for ping command -# ## when arguments is not empty, other options (ping_interval, timeout, etc) will be ignored -# # arguments = ["-c", "3"] # # Measure postfix queue statistics @@ -3235,6 +2101,90 @@ # # queue_directory = "/var/spool/postfix" +# # Read metrics from one or many postgresql servers +# [[inputs.postgresql]] +# ## specify address via a url matching: +# ## postgres://[pqgotest[:password]]@localhost[/dbname]\ +# ## ?sslmode=[disable|verify-ca|verify-full] +# ## or a simple string: +# ## host=localhost user=pqotest password=... sslmode=... dbname=app_production +# ## +# ## All connection parameters are optional. +# ## +# ## Without the dbname parameter, the driver will default to a database +# ## with the same name as the user. This dbname is just for instantiating a +# ## connection with the server and doesn't restrict the databases we are trying +# ## to grab metrics for. +# ## +# address = "host=localhost user=postgres sslmode=disable" +# +# ## A list of databases to explicitly ignore. If not specified, metrics for all +# ## databases are gathered. Do NOT use with the 'databases' option. +# # ignored_databases = ["postgres", "template0", "template1"] +# +# ## A list of databases to pull metrics about. If not specified, metrics for all +# ## databases are gathered. Do NOT use with the 'ignored_databases' option. +# # databases = ["app_production", "testing"] + + +# # Read metrics from one or many postgresql servers +# [[inputs.postgresql_extensible]] +# ## specify address via a url matching: +# ## postgres://[pqgotest[:password]]@localhost[/dbname]\ +# ## ?sslmode=[disable|verify-ca|verify-full] +# ## or a simple string: +# ## host=localhost user=pqotest password=... sslmode=... dbname=app_production +# # +# ## All connection parameters are optional. # +# ## Without the dbname parameter, the driver will default to a database +# ## with the same name as the user. This dbname is just for instantiating a +# ## connection with the server and doesn't restrict the databases we are trying +# ## to grab metrics for. +# # +# address = "host=localhost user=postgres sslmode=disable" +# ## A list of databases to pull metrics about. If not specified, metrics for all +# ## databases are gathered. +# ## databases = ["app_production", "testing"] +# # +# # outputaddress = "db01" +# ## A custom name for the database that will be used as the "server" tag in the +# ## measurement output. If not specified, a default one generated from +# ## the connection address is used. +# # +# ## Define the toml config where the sql queries are stored +# ## New queries can be added, if the withdbname is set to true and there is no +# ## databases defined in the 'databases field', the sql query is ended by a +# ## 'is not null' in order to make the query succeed. +# ## Example : +# ## The sqlquery : "SELECT * FROM pg_stat_database where datname" become +# ## "SELECT * FROM pg_stat_database where datname IN ('postgres', 'pgbench')" +# ## because the databases variable was set to ['postgres', 'pgbench' ] and the +# ## withdbname was true. Be careful that if the withdbname is set to false you +# ## don't have to define the where clause (aka with the dbname) the tagvalue +# ## field is used to define custom tags (separated by commas) +# ## The optional "measurement" value can be used to override the default +# ## output measurement name ("postgresql"). +# # +# ## Structure : +# ## [[inputs.postgresql_extensible.query]] +# ## sqlquery string +# ## version string +# ## withdbname boolean +# ## tagvalue string (comma separated) +# ## measurement string +# [[inputs.postgresql_extensible.query]] +# sqlquery="SELECT * FROM pg_stat_database" +# version=901 +# withdbname=false +# tagvalue="" +# measurement="" +# [[inputs.postgresql_extensible.query]] +# sqlquery="SELECT * FROM pg_stat_bgwriter" +# version=901 +# withdbname=false +# tagvalue="postgresql.stats" + + # # Read metrics from one or many PowerDNS servers # [[inputs.powerdns]] # ## An array of sockets to gather stats about. @@ -3244,6 +2194,7 @@ # # Monitor process cpu and memory usage # [[inputs.procstat]] +# ## Must specify one of: pid_file, exe, or pattern # ## PID file to monitor process # pid_file = "/var/run/nginx.pid" # ## executable name (ie, pgrep ) @@ -3257,26 +2208,37 @@ # ## CGroup name or path # # cgroup = "systemd/system.slice/nginx.service" # -# ## Windows service name -# # win_service = "" -# # ## override for process_name # ## This is optional; default is sourced from /proc//status # # process_name = "bar" -# # ## Field name prefix -# # prefix = "" +# prefix = "" +# ## comment this out if you want raw cpu_time stats +# fielddrop = ["cpu_time_*"] +# ## This is optional; moves pid into a tag instead of a field +# pid_tag = false + + +# # Read metrics from one or many prometheus clients +# [[inputs.prometheus]] +# ## An array of urls to scrape metrics from. +# urls = ["http://localhost:9100/metrics"] # -# ## Add PID as a tag instead of a field; useful to differentiate between -# ## processes whose tags are otherwise the same. Can create a large number -# ## of series, use judiciously. -# # pid_tag = false +# ## An array of Kubernetes services to scrape metrics from. +# # kubernetes_services = ["http://my-service-dns.my-namespace:9100/metrics"] # -# ## Method to use when finding process IDs. Can be one of 'pgrep', or -# ## 'native'. The pgrep finder calls the pgrep executable in the PATH while -# ## the native finder performs the search directly in a manor dependent on the -# ## platform. Default is 'pgrep' -# # pid_finder = "pgrep" +# ## Use bearer token for authorization +# # bearer_token = /path/to/bearer/token +# +# ## Specify timeout duration for slower prometheus clients (default is 3s) +# # response_timeout = "3s" +# +# ## Optional SSL Config +# # ssl_ca = /path/to/cafile +# # ssl_cert = /path/to/certfile +# # ssl_key = /path/to/keyfile +# ## Use SSL but skip chain & host verification +# # insecure_skip_verify = false # # Reads last_run_summary.yaml file and converts to measurments @@ -3295,11 +2257,11 @@ # # username = "guest" # # password = "guest" # -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification +# ## Optional SSL Config +# # ssl_ca = "/etc/telegraf/ca.pem" +# # ssl_cert = "/etc/telegraf/cert.pem" +# # ssl_key = "/etc/telegraf/key.pem" +# ## Use SSL but skip chain & host verification # # insecure_skip_verify = false # # ## Optional request timeouts @@ -3319,15 +2281,6 @@ # ## A list of queues to gather as the rabbitmq_queue measurement. If not # ## specified, metrics for all queues are gathered. # # queues = ["telegraf"] -# -# ## A list of exchanges to gather as the rabbitmq_exchange measurement. If not -# ## specified, metrics for all exchanges are gathered. -# # exchanges = ["telegraf"] -# -# ## Queues to include and exclude. Globs accepted. -# ## Note that an empty array for both will include all queues -# queue_name_include = [] -# queue_name_exclude = [] # # Read raindrops stats (raindrops - real-time stats for preforking Rack servers) @@ -3348,16 +2301,6 @@ # ## If no servers are specified, then localhost is used as the host. # ## If no port is specified, 6379 is used # servers = ["tcp://localhost:6379"] -# -# ## specify server password -# # password = "s#cr@t%" -# -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification -# # insecure_skip_verify = true # # Read metrics from one or many RethinkDB servers @@ -3409,9 +2352,6 @@ # ## Remove numbers from field names. # ## If true, a field name like 'temp1_input' will be changed to 'temp_input'. # # remove_numbers = true -# -# ## Timeout is the maximum amount of time that the sensors command can run. -# # timeout = "5s" # # Read metrics from storage devices supporting S.M.A.R.T. @@ -3631,28 +2571,6 @@ # # servers = [ # # "Server=192.168.1.10;Port=1433;User Id=;Password=;app name=telegraf;log=1;", # # ] -# -# ## Optional parameter, setting this to 2 will use a new version -# ## of the collection queries that break compatibility with the original -# ## dashboards. -# query_version = 2 -# -# ## If you are using AzureDB, setting this to true will gather resource utilization metrics -# # azuredb = false -# -# ## If you would like to exclude some of the metrics queries, list them here -# ## Possible choices: -# ## - PerformanceCounters -# ## - WaitStatsCategorized -# ## - DatabaseIO -# ## - DatabaseProperties -# ## - CPUHistory -# ## - DatabaseSize -# ## - DatabaseStats -# ## - MemoryClerk -# ## - VolumeSpace -# ## - PerformanceMetrics -# # exclude_query = [ 'DatabaseIO' ] # # Sysstat metrics collector @@ -3729,27 +2647,6 @@ # # virtual_servers = [1] -# # Read metrics about temperature -# [[inputs.temp]] -# # no configuration - - -# # Read Tengine's basic status information (ngx_http_reqstat_module) -# [[inputs.tengine]] -# # An array of Tengine reqstat module URI to gather stats. -# urls = ["http://127.0.0.1/us"] -# -# # HTTP response timeout (default: 5s) -# # response_timeout = "5s" -# -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.cer" -# # tls_key = "/etc/telegraf/key.key" -# ## Use TLS but skip chain & host verification -# # insecure_skip_verify = false - - # # Gather metrics from the Tomcat server status page. # [[inputs.tomcat]] # ## URL of the Tomcat server status @@ -3762,11 +2659,11 @@ # ## Request timeout # # timeout = "5s" # -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification +# ## Optional SSL Config +# # ssl_ca = "/etc/telegraf/ca.pem" +# # ssl_cert = "/etc/telegraf/cert.pem" +# # ssl_key = "/etc/telegraf/key.pem" +# ## Use SSL but skip chain & host verification # # insecure_skip_verify = false @@ -3784,27 +2681,19 @@ # pools = ["redis_pool", "mc_pool"] -# # A plugin to collect stats from the Unbound DNS resolver +# # A plugin to collect stats from Unbound - a validating, recursive, and caching DNS resolver # [[inputs.unbound]] -# ## Address of server to connect to, read from unbound conf default, optionally ':port' -# ## Will lookup IP if given a hostname -# server = "127.0.0.1:8953" -# # ## If running as a restricted user you can prepend sudo for additional access: -# # use_sudo = false +# #use_sudo = false # # ## The default location of the unbound-control binary can be overridden with: -# # binary = "/usr/sbin/unbound-control" +# binary = "/usr/sbin/unbound-control" # # ## The default timeout of 1s can be overriden with: -# # timeout = "1s" +# timeout = "1s" # -# ## When set to true, thread metrics are tagged with the thread id. -# ## -# ## The default is false for backwards compatibility, and will be changed to -# ## true in a future version. It is recommended to set to true on new -# ## deployments. -# thread_as_tag = false +# ## Use the builtin fielddrop/fieldpass telegraf filters in order to keep/remove specific fields +# fieldpass = ["total_*", "num_*","time_up", "mem_*"] # # A plugin to collect stats from Varnish HTTP Cache @@ -3823,34 +2712,7 @@ # # ## Optional name for the varnish instance (or working directory) to query # ## Usually appened after -n in varnish cli -# # instance_name = instanceName -# -# ## Timeout for varnishstat command -# # timeout = "1s" - - -# # Monitor wifi signal strength and quality -# [[inputs.wireless]] -# ## Sets 'proc' directory path -# ## If not specified, then default is /proc -# # host_proc = "/proc" - - -# # Reads metrics from a SSL certificate -# [[inputs.x509_cert]] -# ## List certificate sources -# sources = ["/etc/ssl/certs/ssl-cert-snakeoil.pem", "tcp://example.org:443"] -# -# ## Timeout for SSL connection -# # timeout = "5s" -# -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# -# ## Use TLS but skip chain & host verification -# # insecure_skip_verify = false +# #name = instanceName # # Read metrics of ZFS from arcstats, zfetchstats, vdev_cache_stats, and pools @@ -3862,9 +2724,7 @@ # ## By default, telegraf gather all zfs stats # ## If not specified, then default is: # # kstatMetrics = ["arcstats", "zfetchstats", "vdev_cache_stats"] -# ## For Linux, the default is: -# # kstatMetrics = ["abdstats", "arcstats", "dnodestats", "dbufcachestats", -# # "dmu_tx", "fm", "vdev_mirror_stats", "zfetchstats", "zil"] +# # ## By default, don't gather zpool stats # # poolMetrics = false @@ -3877,17 +2737,6 @@ # ## If no servers are specified, then localhost is used as the host. # ## If no port is specified, 2181 is used # servers = [":2181"] -# -# ## Timeout for metric collections from all servers. Minimum timeout is "1s". -# # timeout = "5s" -# -# ## Optional TLS Config -# # enable_tls = true -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## If false, skip chain & host verification -# # insecure_skip_verify = true @@ -3897,67 +2746,28 @@ # # AMQP consumer plugin # [[inputs.amqp_consumer]] -# ## Broker to consume from. -# ## deprecated in 1.7; use the brokers option -# # url = "amqp://localhost:5672/influxdb" -# -# ## Brokers to consume from. If multiple brokers are specified a random broker -# ## will be selected anytime a connection is established. This can be -# ## helpful for load balancing when not using a dedicated load balancer. -# brokers = ["amqp://localhost:5672/influxdb"] -# -# ## Authentication credentials for the PLAIN auth_method. -# # username = "" -# # password = "" -# -# ## Exchange to declare and consume from. +# ## AMQP url +# url = "amqp://localhost:5672/influxdb" +# ## AMQP exchange # exchange = "telegraf" -# -# ## Exchange type; common types are "direct", "fanout", "topic", "header", "x-consistent-hash". -# # exchange_type = "topic" -# -# ## If true, exchange will be passively declared. -# # exchange_passive = false -# -# ## Exchange durability can be either "transient" or "durable". -# # exchange_durability = "durable" -# -# ## Additional exchange arguments. -# # exchange_arguments = { } -# # exchange_arguments = {"hash_propery" = "timestamp"} -# -# ## AMQP queue name. +# ## AMQP queue name # queue = "telegraf" -# -# ## AMQP queue durability can be "transient" or "durable". -# queue_durability = "durable" -# -# ## Binding Key. +# ## Binding Key # binding_key = "#" # # ## Maximum number of messages server should give to the worker. -# # prefetch_count = 50 -# -# ## Maximum messages to read from the broker that have not been written by an -# ## output. For best throughput set based on the number of metrics within -# ## each message and the size of the output's metric_batch_size. -# ## -# ## For example, if each message from the queue contains 10 metrics and the -# ## output metric_batch_size is 1000, setting this to 100 will ensure that a -# ## full batch is collected and the write is triggered immediately without -# ## waiting until the next flush_interval. -# # max_undelivered_messages = 1000 +# prefetch_count = 50 # # ## Auth method. PLAIN and EXTERNAL are supported # ## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as # ## described here: https://www.rabbitmq.com/plugins.html # # auth_method = "PLAIN" # -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification +# ## Optional SSL Config +# # ssl_ca = "/etc/telegraf/ca.pem" +# # ssl_cert = "/etc/telegraf/cert.pem" +# # ssl_key = "/etc/telegraf/key.pem" +# ## Use SSL but skip chain & host verification # # insecure_skip_verify = false # # ## Data format to consume. @@ -3967,93 +2777,6 @@ # data_format = "influx" -# # Read Cassandra metrics through Jolokia -# [[inputs.cassandra]] -# ## DEPRECATED: The cassandra plugin has been deprecated. Please use the -# ## jolokia2 plugin instead. -# ## -# ## see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia2 -# -# context = "/jolokia/read" -# ## List of cassandra servers exposing jolokia read service -# servers = ["myuser:mypassword@10.10.10.1:8778","10.10.10.2:8778",":8778"] -# ## List of metrics collected on above servers -# ## Each metric consists of a jmx path. -# ## This will collect all heap memory usage metrics from the jvm and -# ## ReadLatency metrics for all keyspaces and tables. -# ## "type=Table" in the query works with Cassandra3.0. Older versions might -# ## need to use "type=ColumnFamily" -# metrics = [ -# "/java.lang:type=Memory/HeapMemoryUsage", -# "/org.apache.cassandra.metrics:type=Table,keyspace=*,scope=*,name=ReadLatency" -# ] - - -# # Read metrics from Google PubSub -# [[inputs.cloud_pubsub]] -# ## Required. Name of Google Cloud Platform (GCP) Project that owns -# ## the given PubSub subscription. -# project = "my-project" -# -# ## Required. Name of PubSub subscription to ingest metrics from. -# subscription = "my-subscription" -# -# ## Required. Data format to consume. -# ## Each data format has its own unique set of configuration options. -# ## Read more about them here: -# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md -# data_format = "influx" -# -# ## Optional. Filepath for GCP credentials JSON file to authorize calls to -# ## PubSub APIs. If not set explicitly, Telegraf will attempt to use -# ## Application Default Credentials, which is preferred. -# # credentials_file = "path/to/my/creds.json" -# -# ## Optional. Maximum byte length of a message to consume. -# ## Larger messages are dropped with an error. If less than 0 or unspecified, -# ## treated as no limit. -# # max_message_len = 1000000 -# -# ## Optional. Maximum messages to read from PubSub that have not been written -# ## to an output. Defaults to 1000. -# ## For best throughput set based on the number of metrics within -# ## each message and the size of the output's metric_batch_size. -# ## -# ## For example, if each message contains 10 metrics and the output -# ## metric_batch_size is 1000, setting this to 100 will ensure that a -# ## full batch is collected and the write is triggered immediately without -# ## waiting until the next flush_interval. -# # max_undelivered_messages = 1000 -# -# ## The following are optional Subscription ReceiveSettings in PubSub. -# ## Read more about these values: -# ## https://godoc.org/cloud.google.com/go/pubsub#ReceiveSettings -# -# ## Optional. Maximum number of seconds for which a PubSub subscription -# ## should auto-extend the PubSub ACK deadline for each message. If less than -# ## 0, auto-extension is disabled. -# # max_extension = 0 -# -# ## Optional. Maximum number of unprocessed messages in PubSub -# ## (unacknowledged but not yet expired in PubSub). -# ## A value of 0 is treated as the default PubSub value. -# ## Negative values will be treated as unlimited. -# # max_outstanding_messages = 0 -# -# ## Optional. Maximum size in bytes of unprocessed messages in PubSub -# ## (unacknowledged but not yet expired in PubSub). -# ## A value of 0 is treated as the default PubSub value. -# ## Negative values will be treated as unlimited. -# # max_outstanding_bytes = 0 -# -# ## Optional. Max number of goroutines a PubSub Subscription receiver can spawn -# ## to pull messages from PubSub concurrently. This limit applies to each -# ## subscription separately and is treated as the PubSub default if less than -# ## 1. Note this setting does not limit the number of messages that can be -# ## processed concurrently (use "max_outstanding_messages" instead). -# # max_receiver_go_routines = 0 - - # # Influx HTTP write listener # [[inputs.http_listener]] # ## Address and port to host HTTP listener on @@ -4065,12 +2788,12 @@ # write_timeout = "10s" # # ## Maximum allowed http request body size in bytes. -# ## 0 means to use the default of 524,288,000 bytes (500 mebibytes) -# max_body_size = "500MiB" +# ## 0 means to use the default of 536,870,912 bytes (500 mebibytes) +# max_body_size = 0 # # ## Maximum line size allowed to be sent in bytes. # ## 0 means to use the default of 65536 bytes (64 kibibytes) -# max_line_size = "64KiB" +# max_line_size = 0 # # ## Set one or more allowed client CA certificate file names to # ## enable mutually authenticated TLS connections @@ -4079,130 +2802,6 @@ # ## Add service certificate and key # tls_cert = "/etc/telegraf/cert.pem" # tls_key = "/etc/telegraf/key.pem" -# -# ## Optional username and password to accept for HTTP basic authentication. -# ## You probably want to make sure you have TLS configured above for this. -# # basic_username = "foobar" -# # basic_password = "barfoo" - - -# # Generic HTTP write listener -# [[inputs.http_listener_v2]] -# ## Address and port to host HTTP listener on -# service_address = ":8080" -# -# ## Path to listen to. -# # path = "/telegraf" -# -# ## HTTP methods to accept. -# # methods = ["POST", "PUT"] -# -# ## maximum duration before timing out read of the request -# # read_timeout = "10s" -# ## maximum duration before timing out write of the response -# # write_timeout = "10s" -# -# ## Maximum allowed http request body size in bytes. -# ## 0 means to use the default of 524,288,00 bytes (500 mebibytes) -# # max_body_size = "500MB" -# -# ## Set one or more allowed client CA certificate file names to -# ## enable mutually authenticated TLS connections -# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] -# -# ## Add service certificate and key -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# -# ## Optional username and password to accept for HTTP basic authentication. -# ## You probably want to make sure you have TLS configured above for this. -# # basic_username = "foobar" -# # basic_password = "barfoo" -# -# ## Data format to consume. -# ## Each data format has its own unique set of configuration options, read -# ## more about them here: -# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md -# data_format = "influx" - - -# # Influx HTTP write listener -# [[inputs.influxdb_listener]] -# ## Address and port to host HTTP listener on -# service_address = ":8186" -# -# ## maximum duration before timing out read of the request -# read_timeout = "10s" -# ## maximum duration before timing out write of the response -# write_timeout = "10s" -# -# ## Maximum allowed http request body size in bytes. -# ## 0 means to use the default of 524,288,000 bytes (500 mebibytes) -# max_body_size = "500MiB" -# -# ## Maximum line size allowed to be sent in bytes. -# ## 0 means to use the default of 65536 bytes (64 kibibytes) -# max_line_size = "64KiB" -# -# ## Set one or more allowed client CA certificate file names to -# ## enable mutually authenticated TLS connections -# tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] -# -# ## Add service certificate and key -# tls_cert = "/etc/telegraf/cert.pem" -# tls_key = "/etc/telegraf/key.pem" -# -# ## Optional username and password to accept for HTTP basic authentication. -# ## You probably want to make sure you have TLS configured above for this. -# # basic_username = "foobar" -# # basic_password = "barfoo" - - -# # Read JTI OpenConfig Telemetry from listed sensors -# [[inputs.jti_openconfig_telemetry]] -# ## List of device addresses to collect telemetry from -# servers = ["localhost:1883"] -# -# ## Authentication details. Username and password are must if device expects -# ## authentication. Client ID must be unique when connecting from multiple instances -# ## of telegraf to the same device -# username = "user" -# password = "pass" -# client_id = "telegraf" -# -# ## Frequency to get data -# sample_frequency = "1000ms" -# -# ## Sensors to subscribe for -# ## A identifier for each sensor can be provided in path by separating with space -# ## Else sensor path will be used as identifier -# ## When identifier is used, we can provide a list of space separated sensors. -# ## A single subscription will be created with all these sensors and data will -# ## be saved to measurement with this identifier name -# sensors = [ -# "/interfaces/", -# "collection /components/ /lldp", -# ] -# -# ## We allow specifying sensor group level reporting rate. To do this, specify the -# ## reporting rate in Duration at the beginning of sensor paths / collection -# ## name. For entries without reporting rate, we use configured sample frequency -# sensors = [ -# "1000ms customReporting /interfaces /lldp", -# "2000ms collection /components", -# "/interfaces", -# ] -# -# ## x509 Certificate to use with TLS connection. If it is not provided, an insecure -# ## channel will be opened with server -# ssl_cert = "/etc/telegraf/cert.pem" -# -# ## Delay between retry attempts of failed RPC calls or streams. Defaults to 1000ms. -# ## Failed streams/calls will not be retried if 0 is provided -# retry_delay = "1000ms" -# -# ## To treat all string values as tags, set this to true -# str_as_tags = false # # Read metrics from Kafka topic(s) @@ -4211,23 +2810,12 @@ # brokers = ["localhost:9092"] # ## topic(s) to consume # topics = ["telegraf"] -# ## Add topic as tag if topic_tag is not empty -# # topic_tag = "" # -# ## Optional Client id -# # client_id = "Telegraf" -# -# ## Set the minimal supported Kafka version. Setting this enables the use of new -# ## Kafka features and APIs. Of particular interest, lz4 compression -# ## requires at least version 0.10.0.0. -# ## ex: version = "1.1.0" -# # version = "" -# -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification +# ## Optional SSL Config +# # ssl_ca = "/etc/telegraf/ca.pem" +# # ssl_cert = "/etc/telegraf/cert.pem" +# # ssl_key = "/etc/telegraf/key.pem" +# ## Use SSL but skip chain & host verification # # insecure_skip_verify = false # # ## Optional SASL Config @@ -4238,25 +2826,16 @@ # consumer_group = "telegraf_metrics_consumers" # ## Offset (must be either "oldest" or "newest") # offset = "oldest" -# ## Maximum length of a message to consume, in bytes (default 0/unlimited); -# ## larger messages are dropped -# max_message_len = 1000000 -# -# ## Maximum messages to read from the broker that have not been written by an -# ## output. For best throughput set based on the number of metrics within -# ## each message and the size of the output's metric_batch_size. -# ## -# ## For example, if each message from the queue contains 10 metrics and the -# ## output metric_batch_size is 1000, setting this to 100 will ensure that a -# ## full batch is collected and the write is triggered immediately without -# ## waiting until the next flush_interval. -# # max_undelivered_messages = 1000 # # ## Data format to consume. # ## Each data format has its own unique set of configuration options, read # ## more about them here: # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md # data_format = "influx" +# +# ## Maximum length of a message to consume, in bytes (default 0/unlimited); +# ## larger messages are dropped +# max_message_len = 65536 # # Read metrics from Kafka topic(s) @@ -4302,6 +2881,7 @@ # # watch_method = "inotify" # # ## Parse logstash-style "grok" patterns: +# ## Telegraf built-in parsing patterns: https://goo.gl/dkay10 # [inputs.logparser.grok] # ## This is a list of patterns to check the given log file(s) for. # ## Note that adding patterns here increases processing time. The most @@ -4319,7 +2899,6 @@ # # ## Custom patterns can also be defined here. Put one pattern per line. # custom_patterns = ''' -# ''' # # ## Timezone allows you to provide an override for timestamps that # ## don't already include an offset @@ -4330,7 +2909,8 @@ # ## 1. Local -- interpret based on machine localtime # ## 2. "Canada/Eastern" -- Unix TZ values like those found in https://en.wikipedia.org/wiki/List_of_tz_database_time_zones # ## 3. UTC -- or blank/unspecified, will return timestamp in UTC -# # timezone = "Canada/Eastern" +# timezone = "Canada/Eastern" +# ''' # # Read metrics from MQTT topic(s) @@ -4339,28 +2919,11 @@ # ## schema can be tcp, ssl, or ws. # servers = ["tcp://localhost:1883"] # -# ## QoS policy for messages -# ## 0 = at most once -# ## 1 = at least once -# ## 2 = exactly once -# ## -# ## When using a QoS of 1 or 2, you should enable persistent_session to allow -# ## resuming unacknowledged messages. +# ## MQTT QoS, must be 0, 1, or 2 # qos = 0 -# # ## Connection timeout for initial connection in seconds # connection_timeout = "30s" # -# ## Maximum messages to read from the broker that have not been written by an -# ## output. For best throughput set based on the number of metrics within -# ## each message and the size of the output's metric_batch_size. -# ## -# ## For example, if each message from the queue contains 10 metrics and the -# ## output metric_batch_size is 1000, setting this to 100 will ensure that a -# ## full batch is collected and the write is triggered immediately without -# ## waiting until the next flush_interval. -# # max_undelivered_messages = 1000 -# # ## Topics to subscribe to # topics = [ # "telegraf/host01/cpu", @@ -4379,11 +2942,11 @@ # # username = "telegraf" # # password = "metricsmetricsmetricsmetrics" # -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification +# ## Optional SSL Config +# # ssl_ca = "/etc/telegraf/ca.pem" +# # ssl_cert = "/etc/telegraf/cert.pem" +# # ssl_key = "/etc/telegraf/key.pem" +# ## Use SSL but skip chain & host verification # # insecure_skip_verify = false # # ## Data format to consume. @@ -4396,29 +2959,19 @@ # # Read metrics from NATS subject(s) # [[inputs.nats_consumer]] # ## urls of NATS servers -# servers = ["nats://localhost:4222"] +# # servers = ["nats://localhost:4222"] # ## Use Transport Layer Security -# secure = false +# # secure = false # ## subject(s) to consume -# subjects = ["telegraf"] +# # subjects = ["telegraf"] # ## name a queue group -# queue_group = "telegraf_consumers" +# # queue_group = "telegraf_consumers" # # ## Sets the limits for pending msgs and bytes for each subscription # ## These shouldn't need to be adjusted except in very high throughput scenarios # # pending_message_limit = 65536 # # pending_bytes_limit = 67108864 # -# ## Maximum messages to read from the broker that have not been written by an -# ## output. For best throughput set based on the number of metrics within -# ## each message and the size of the output's metric_batch_size. -# ## -# ## For example, if each message from the queue contains 10 metrics and the -# ## output metric_batch_size is 1000, setting this to 100 will ensure that a -# ## full batch is collected and the write is triggered immediately without -# ## waiting until the next flush_interval. -# # max_undelivered_messages = 1000 -# # ## Data format to consume. # ## Each data format has its own unique set of configuration options, read # ## more about them here: @@ -4438,16 +2991,6 @@ # channel = "consumer" # max_in_flight = 100 # -# ## Maximum messages to read from the broker that have not been written by an -# ## output. For best throughput set based on the number of metrics within -# ## each message and the size of the output's metric_batch_size. -# ## -# ## For example, if each message from the queue contains 10 metrics and the -# ## output metric_batch_size is 1000, setting this to 100 will ensure that a -# ## full batch is collected and the write is triggered immediately without -# ## waiting until the next flush_interval. -# # max_undelivered_messages = 1000 -# # ## Data format to consume. # ## Each data format has its own unique set of configuration options, read # ## more about them here: @@ -4455,153 +2998,6 @@ # data_format = "influx" -# # Read metrics from one or many pgbouncer servers -# [[inputs.pgbouncer]] -# ## specify address via a url matching: -# ## postgres://[pqgotest[:password]]@localhost[/dbname]\ -# ## ?sslmode=[disable|verify-ca|verify-full] -# ## or a simple string: -# ## host=localhost user=pqotest password=... sslmode=... dbname=app_production -# ## -# ## All connection parameters are optional. -# ## -# address = "host=localhost user=pgbouncer sslmode=disable" - - -# # Read metrics from one or many postgresql servers -# [[inputs.postgresql]] -# ## specify address via a url matching: -# ## postgres://[pqgotest[:password]]@localhost[/dbname]\ -# ## ?sslmode=[disable|verify-ca|verify-full] -# ## or a simple string: -# ## host=localhost user=pqotest password=... sslmode=... dbname=app_production -# ## -# ## All connection parameters are optional. -# ## -# ## Without the dbname parameter, the driver will default to a database -# ## with the same name as the user. This dbname is just for instantiating a -# ## connection with the server and doesn't restrict the databases we are trying -# ## to grab metrics for. -# ## -# address = "host=localhost user=postgres sslmode=disable" -# ## A custom name for the database that will be used as the "server" tag in the -# ## measurement output. If not specified, a default one generated from -# ## the connection address is used. -# # outputaddress = "db01" -# -# ## connection configuration. -# ## maxlifetime - specify the maximum lifetime of a connection. -# ## default is forever (0s) -# max_lifetime = "0s" -# -# ## A list of databases to explicitly ignore. If not specified, metrics for all -# ## databases are gathered. Do NOT use with the 'databases' option. -# # ignored_databases = ["postgres", "template0", "template1"] -# -# ## A list of databases to pull metrics about. If not specified, metrics for all -# ## databases are gathered. Do NOT use with the 'ignored_databases' option. -# # databases = ["app_production", "testing"] - - -# # Read metrics from one or many postgresql servers -# [[inputs.postgresql_extensible]] -# ## specify address via a url matching: -# ## postgres://[pqgotest[:password]]@localhost[/dbname]\ -# ## ?sslmode=[disable|verify-ca|verify-full] -# ## or a simple string: -# ## host=localhost user=pqotest password=... sslmode=... dbname=app_production -# # -# ## All connection parameters are optional. # -# ## Without the dbname parameter, the driver will default to a database -# ## with the same name as the user. This dbname is just for instantiating a -# ## connection with the server and doesn't restrict the databases we are trying -# ## to grab metrics for. -# # -# address = "host=localhost user=postgres sslmode=disable" -# -# ## connection configuration. -# ## maxlifetime - specify the maximum lifetime of a connection. -# ## default is forever (0s) -# max_lifetime = "0s" -# -# ## A list of databases to pull metrics about. If not specified, metrics for all -# ## databases are gathered. -# ## databases = ["app_production", "testing"] -# # -# ## A custom name for the database that will be used as the "server" tag in the -# ## measurement output. If not specified, a default one generated from -# ## the connection address is used. -# # outputaddress = "db01" -# # -# ## Define the toml config where the sql queries are stored -# ## New queries can be added, if the withdbname is set to true and there is no -# ## databases defined in the 'databases field', the sql query is ended by a -# ## 'is not null' in order to make the query succeed. -# ## Example : -# ## The sqlquery : "SELECT * FROM pg_stat_database where datname" become -# ## "SELECT * FROM pg_stat_database where datname IN ('postgres', 'pgbench')" -# ## because the databases variable was set to ['postgres', 'pgbench' ] and the -# ## withdbname was true. Be careful that if the withdbname is set to false you -# ## don't have to define the where clause (aka with the dbname) the tagvalue -# ## field is used to define custom tags (separated by commas) -# ## The optional "measurement" value can be used to override the default -# ## output measurement name ("postgresql"). -# # -# ## Structure : -# ## [[inputs.postgresql_extensible.query]] -# ## sqlquery string -# ## version string -# ## withdbname boolean -# ## tagvalue string (comma separated) -# ## measurement string -# [[inputs.postgresql_extensible.query]] -# sqlquery="SELECT * FROM pg_stat_database" -# version=901 -# withdbname=false -# tagvalue="" -# measurement="" -# [[inputs.postgresql_extensible.query]] -# sqlquery="SELECT * FROM pg_stat_bgwriter" -# version=901 -# withdbname=false -# tagvalue="postgresql.stats" - - -# # Read metrics from one or many prometheus clients -# [[inputs.prometheus]] -# ## An array of urls to scrape metrics from. -# urls = ["http://localhost:9100/metrics"] -# -# ## An array of Kubernetes services to scrape metrics from. -# # kubernetes_services = ["http://my-service-dns.my-namespace:9100/metrics"] -# -# ## Kubernetes config file to create client from. -# # kube_config = "/path/to/kubernetes.config" -# -# ## Scrape Kubernetes pods for the following prometheus annotations: -# ## - prometheus.io/scrape: Enable scraping for this pod -# ## - prometheus.io/scheme: If the metrics endpoint is secured then you will need to -# ## set this to 'https' & most likely set the tls config. -# ## - prometheus.io/path: If the metrics path is not /metrics, define it with this annotation. -# ## - prometheus.io/port: If port is not 9102 use this annotation -# # monitor_kubernetes_pods = true -# -# ## Use bearer token for authorization. ('bearer_token' takes priority) -# # bearer_token = "/path/to/bearer/token" -# ## OR -# # bearer_token_string = "abc_123" -# -# ## Specify timeout duration for slower prometheus clients (default is 3s) -# # response_timeout = "3s" -# -# ## Optional TLS Config -# # tls_ca = /path/to/cafile -# # tls_cert = /path/to/certfile -# # tls_key = /path/to/keyfile -# ## Use TLS but skip chain & host verification -# # insecure_skip_verify = false - - # # Generic socket listener capable of handling multiple socket types. # [[inputs.socket_listener]] # ## URL to listen on @@ -4626,18 +3022,11 @@ # ## 0 (default) is unlimited. # # read_timeout = "30s" # -# ## Optional TLS configuration. -# ## Only applies to stream sockets (e.g. TCP). -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Enables client authentication if set. -# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] -# -# ## Maximum socket buffer size (in bytes when no unit specified). +# ## Maximum socket buffer size in bytes. # ## For stream sockets, once the buffer fills up, the sender will start backing up. # ## For datagram sockets, once the buffer fills up, metrics will start dropping. # ## Defaults to the OS default. -# # read_buffer_size = "64KiB" +# # read_buffer_size = 65535 # # ## Period between keep alive probes. # ## Only applies to TCP sockets. @@ -4660,14 +3049,6 @@ # ## MaxTCPConnection - applicable when protocol is set to tcp (default=250) # max_tcp_connections = 250 # -# ## Enable TCP keep alive probes (default=false) -# tcp_keep_alive = false -# -# ## Specifies the keep-alive period for an active network connection. -# ## Only applies to TCP sockets and will be ignored if tcp_keep_alive is false. -# ## Defaults to the OS configuration. -# # tcp_keep_alive_period = "2h" -# # ## Address and port to host UDP listener on # service_address = ":8125" # @@ -4694,7 +3075,7 @@ # parse_data_dog_tags = false # # ## Statsd data translation templates, more info can be read here: -# ## https://github.com/influxdata/telegraf/blob/master/docs/TEMPLATE_PATTERN.md +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#graphite # # templates = [ # # "cpu.* measurement*" # # ] @@ -4709,56 +3090,6 @@ # percentile_limit = 1000 -# # Accepts syslog messages following RFC5424 format with transports as per RFC5426, RFC5425, or RFC6587 -# [[inputs.syslog]] -# ## Specify an ip or hostname with port - eg., tcp://localhost:6514, tcp://10.0.0.1:6514 -# ## Protocol, address and port to host the syslog receiver. -# ## If no host is specified, then localhost is used. -# ## If no port is specified, 6514 is used (RFC5425#section-4.1). -# server = "tcp://:6514" -# -# ## TLS Config -# # tls_allowed_cacerts = ["/etc/telegraf/ca.pem"] -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# -# ## Period between keep alive probes. -# ## 0 disables keep alive probes. -# ## Defaults to the OS configuration. -# ## Only applies to stream sockets (e.g. TCP). -# # keep_alive_period = "5m" -# -# ## Maximum number of concurrent connections (default = 0). -# ## 0 means unlimited. -# ## Only applies to stream sockets (e.g. TCP). -# # max_connections = 1024 -# -# ## Read timeout is the maximum time allowed for reading a single message (default = 5s). -# ## 0 means unlimited. -# # read_timeout = "5s" -# -# ## The framing technique with which it is expected that messages are transported (default = "octet-counting"). -# ## Whether the messages come using the octect-counting (RFC5425#section-4.3.1, RFC6587#section-3.4.1), -# ## or the non-transparent framing technique (RFC6587#section-3.4.2). -# ## Must be one of "octect-counting", "non-transparent". -# # framing = "octet-counting" -# -# ## The trailer to be expected in case of non-trasparent framing (default = "LF"). -# ## Must be one of "LF", or "NUL". -# # trailer = "LF" -# -# ## Whether to parse in best effort mode or not (default = false). -# ## By default best effort parsing is off. -# # best_effort = false -# -# ## Character to prepend to SD-PARAMs (default = "_"). -# ## A syslog message can contain multiple parameters and multiple identifiers within structured data section. -# ## Eg., [id1 name1="val1" name2="val2"][id2 name1="val1" nameA="valA"] -# ## For each combination a field is created. -# ## Its name is created concatenating identifier, sdparam_separator, and parameter name. -# # sdparam_separator = "_" - - # # Stream a log file, like the tail -f command # [[inputs.tail]] # ## files to tail. @@ -4800,158 +3131,6 @@ # # see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/socket_listener -# # Read metrics from VMware vCenter -# [[inputs.vsphere]] -# ## List of vCenter URLs to be monitored. These three lines must be uncommented -# ## and edited for the plugin to work. -# vcenters = [ "https://vcenter.local/sdk" ] -# username = "user@corp.local" -# password = "secret" -# -# ## VMs -# ## Typical VM metrics (if omitted or empty, all metrics are collected) -# vm_metric_include = [ -# "cpu.demand.average", -# "cpu.idle.summation", -# "cpu.latency.average", -# "cpu.readiness.average", -# "cpu.ready.summation", -# "cpu.run.summation", -# "cpu.usagemhz.average", -# "cpu.used.summation", -# "cpu.wait.summation", -# "mem.active.average", -# "mem.granted.average", -# "mem.latency.average", -# "mem.swapin.average", -# "mem.swapinRate.average", -# "mem.swapout.average", -# "mem.swapoutRate.average", -# "mem.usage.average", -# "mem.vmmemctl.average", -# "net.bytesRx.average", -# "net.bytesTx.average", -# "net.droppedRx.summation", -# "net.droppedTx.summation", -# "net.usage.average", -# "power.power.average", -# "virtualDisk.numberReadAveraged.average", -# "virtualDisk.numberWriteAveraged.average", -# "virtualDisk.read.average", -# "virtualDisk.readOIO.latest", -# "virtualDisk.throughput.usage.average", -# "virtualDisk.totalReadLatency.average", -# "virtualDisk.totalWriteLatency.average", -# "virtualDisk.write.average", -# "virtualDisk.writeOIO.latest", -# "sys.uptime.latest", -# ] -# # vm_metric_exclude = [] ## Nothing is excluded by default -# # vm_instances = true ## true by default -# -# ## Hosts -# ## Typical host metrics (if omitted or empty, all metrics are collected) -# host_metric_include = [ -# "cpu.coreUtilization.average", -# "cpu.costop.summation", -# "cpu.demand.average", -# "cpu.idle.summation", -# "cpu.latency.average", -# "cpu.readiness.average", -# "cpu.ready.summation", -# "cpu.swapwait.summation", -# "cpu.usage.average", -# "cpu.usagemhz.average", -# "cpu.used.summation", -# "cpu.utilization.average", -# "cpu.wait.summation", -# "disk.deviceReadLatency.average", -# "disk.deviceWriteLatency.average", -# "disk.kernelReadLatency.average", -# "disk.kernelWriteLatency.average", -# "disk.numberReadAveraged.average", -# "disk.numberWriteAveraged.average", -# "disk.read.average", -# "disk.totalReadLatency.average", -# "disk.totalWriteLatency.average", -# "disk.write.average", -# "mem.active.average", -# "mem.latency.average", -# "mem.state.latest", -# "mem.swapin.average", -# "mem.swapinRate.average", -# "mem.swapout.average", -# "mem.swapoutRate.average", -# "mem.totalCapacity.average", -# "mem.usage.average", -# "mem.vmmemctl.average", -# "net.bytesRx.average", -# "net.bytesTx.average", -# "net.droppedRx.summation", -# "net.droppedTx.summation", -# "net.errorsRx.summation", -# "net.errorsTx.summation", -# "net.usage.average", -# "power.power.average", -# "storageAdapter.numberReadAveraged.average", -# "storageAdapter.numberWriteAveraged.average", -# "storageAdapter.read.average", -# "storageAdapter.write.average", -# "sys.uptime.latest", -# ] -# # host_metric_exclude = [] ## Nothing excluded by default -# # host_instances = true ## true by default -# -# ## Clusters -# # cluster_metric_include = [] ## if omitted or empty, all metrics are collected -# # cluster_metric_exclude = [] ## Nothing excluded by default -# # cluster_instances = false ## false by default -# -# ## Datastores -# # datastore_metric_include = [] ## if omitted or empty, all metrics are collected -# # datastore_metric_exclude = [] ## Nothing excluded by default -# # datastore_instances = false ## false by default for Datastores only -# -# ## Datacenters -# datacenter_metric_include = [] ## if omitted or empty, all metrics are collected -# datacenter_metric_exclude = [ "*" ] ## Datacenters are not collected by default. -# # datacenter_instances = false ## false by default for Datastores only -# -# ## Plugin Settings -# ## separator character to use for measurement and field names (default: "_") -# # separator = "_" -# -# ## number of objects to retreive per query for realtime resources (vms and hosts) -# ## set to 64 for vCenter 5.5 and 6.0 (default: 256) -# # max_query_objects = 256 -# -# ## number of metrics to retreive per query for non-realtime resources (clusters and datastores) -# ## set to 64 for vCenter 5.5 and 6.0 (default: 256) -# # max_query_metrics = 256 -# -# ## number of go routines to use for collection and discovery of objects and metrics -# # collect_concurrency = 1 -# # discover_concurrency = 1 -# -# ## whether or not to force discovery of new objects on initial gather call before collecting metrics -# ## when true for large environments this may cause errors for time elapsed while collecting metrics -# ## when false (default) the first collection cycle may result in no or limited metrics while objects are discovered -# # force_discover_on_init = false -# -# ## the interval before (re)discovering objects subject to metrics collection (default: 300s) -# # object_discovery_interval = "300s" -# -# ## timeout applies to any of the api request made to vcenter -# # timeout = "60s" -# -# ## Optional SSL Config -# # ssl_ca = "/path/to/cafile" -# # ssl_cert = "/path/to/certfile" -# # ssl_key = "/path/to/keyfile" -# ## Use SSL but skip chain & host verification -# # insecure_skip_verify = false - - # # A Webhooks Event collector # [[inputs.webhooks]] # ## Address and port to host Webhook listener on From bfc8758db62565cfcf0f1898e1c2b5a8e0cc8b36 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 27 Feb 2019 11:32:58 -0800 Subject: [PATCH 0656/1815] Update build.py next_version --- scripts/build.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/build.py b/scripts/build.py index 58b684f82..e7a402be5 100755 --- a/scripts/build.py +++ b/scripts/build.py @@ -95,7 +95,7 @@ supported_packages = { "freebsd": [ "tar" ] } -next_version = '1.10.0' +next_version = '1.11.0' ################ #### Telegraf Functions From 7787ea2dcc55f37f1baffe1a89266aa93a7cf14c Mon Sep 17 00:00:00 2001 From: scier Date: Wed, 27 Feb 2019 16:33:38 -0800 Subject: [PATCH 0657/1815] Create log file in append mode (#5497) --- logger/logger.go | 14 ++++---------- logger/logger_test.go | 23 +++++++++++++++++++++++ 2 files changed, 27 insertions(+), 10 deletions(-) diff --git a/logger/logger.go b/logger/logger.go index 7ad1c8069..6250dedd6 100644 --- a/logger/logger.go +++ b/logger/logger.go @@ -50,16 +50,10 @@ func SetupLogging(debug, quiet bool, logfile string) { var oFile *os.File if logfile != "" { - if _, err := os.Stat(logfile); os.IsNotExist(err) { - if oFile, err = os.Create(logfile); err != nil { - log.Printf("E! Unable to create %s (%s), using stderr", logfile, err) - oFile = os.Stderr - } - } else { - if oFile, err = os.OpenFile(logfile, os.O_APPEND|os.O_WRONLY, os.ModeAppend); err != nil { - log.Printf("E! Unable to append to %s (%s), using stderr", logfile, err) - oFile = os.Stderr - } + var err error + if oFile, err = os.OpenFile(logfile, os.O_CREATE|os.O_APPEND|os.O_WRONLY, os.ModeAppend|0644); err != nil { + log.Printf("E! Unable to open %s (%s), using stderr", logfile, err) + oFile = os.Stderr } } else { oFile = os.Stderr diff --git a/logger/logger_test.go b/logger/logger_test.go index 09c7c82eb..a721cbba7 100644 --- a/logger/logger_test.go +++ b/logger/logger_test.go @@ -64,6 +64,29 @@ func TestAddDefaultLogLevel(t *testing.T) { assert.Equal(t, f[19:], []byte("Z I! TEST\n")) } +func TestWriteToTruncatedFile(t *testing.T) { + tmpfile, err := ioutil.TempFile("", "") + assert.NoError(t, err) + defer func() { os.Remove(tmpfile.Name()) }() + + SetupLogging(true, false, tmpfile.Name()) + log.Printf("TEST") + + f, err := ioutil.ReadFile(tmpfile.Name()) + assert.NoError(t, err) + assert.Equal(t, f[19:], []byte("Z I! TEST\n")) + + tmpf, err := os.OpenFile(tmpfile.Name(), os.O_TRUNC, 0644) + assert.NoError(t, err) + assert.NoError(t, tmpf.Close()) + + log.Printf("SHOULD BE FIRST") + + f, err = ioutil.ReadFile(tmpfile.Name()) + assert.NoError(t, err) + assert.Equal(t, f[19:], []byte("Z I! SHOULD BE FIRST\n")) +} + func BenchmarkTelegrafLogWrite(b *testing.B) { var msg = []byte("test") var buf bytes.Buffer From 41286d10c2abe0c0e8d6d458f4fc89def30236b4 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 27 Feb 2019 16:34:49 -0800 Subject: [PATCH 0658/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 075d5aa63..b9760b9d0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -65,6 +65,7 @@ - [#5304](https://github.com/influxdata/telegraf/issues/5304): Fix x509_cert input stops checking certs after first error. - [#5404](https://github.com/influxdata/telegraf/issues/5404): Group stackdriver requests to send one point per timeseries. - [#5449](https://github.com/influxdata/telegraf/issues/5449): Log permission error and ignore in filecount input. +- [#5497](https://github.com/influxdata/telegraf/pull/5497): Create log file in append mode. ## v1.9.5 [2019-02-26] From 2c09010f72a6d25dd41968263e0e0564c2309c95 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 1 Mar 2019 11:19:31 -0800 Subject: [PATCH 0659/1815] Listen before returning from Connect in prometheus output (#5509) --- .../prometheus_client/prometheus_client.go | 22 ++++++++++++------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/plugins/outputs/prometheus_client/prometheus_client.go b/plugins/outputs/prometheus_client/prometheus_client.go index b37718ab7..190b0d882 100644 --- a/plugins/outputs/prometheus_client/prometheus_client.go +++ b/plugins/outputs/prometheus_client/prometheus_client.go @@ -3,6 +3,7 @@ package prometheus_client import ( "context" "crypto/subtle" + "crypto/tls" "fmt" "log" "net" @@ -16,7 +17,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" - "github.com/influxdata/telegraf/internal/tls" + tlsint "github.com/influxdata/telegraf/internal/tls" "github.com/influxdata/telegraf/plugins/outputs" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" @@ -66,7 +67,7 @@ type PrometheusClient struct { StringAsLabel bool `toml:"string_as_label"` ExportTimestamp bool `toml:"export_timestamp"` - tls.ServerConfig + tlsint.ServerConfig server *http.Server @@ -199,13 +200,18 @@ func (p *PrometheusClient) Connect() error { TLSConfig: tlsConfig, } + var listener net.Listener + if tlsConfig != nil { + listener, err = tls.Listen("tcp", p.Listen, tlsConfig) + } else { + listener, err = net.Listen("tcp", p.Listen) + } + if err != nil { + return err + } + go func() { - var err error - if p.TLSCert != "" && p.TLSKey != "" { - err = p.server.ListenAndServeTLS("", "") - } else { - err = p.server.ListenAndServe() - } + err := p.server.Serve(listener) if err != nil && err != http.ErrServerClosed { log.Printf("E! Error creating prometheus metric endpoint, err: %s\n", err.Error()) From c57f2d9d4861ad799f19a7e779c7e93204e13ad6 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 1 Mar 2019 11:21:31 -0800 Subject: [PATCH 0660/1815] Ignore tracking for metrics added to aggregator (#5508) --- internal/models/running_aggregator.go | 27 +++++++++++++-------------- metric/metric.go | 22 ++++++++++++++++++++++ 2 files changed, 35 insertions(+), 14 deletions(-) diff --git a/internal/models/running_aggregator.go b/internal/models/running_aggregator.go index b1fa3637b..f54b5266e 100644 --- a/internal/models/running_aggregator.go +++ b/internal/models/running_aggregator.go @@ -5,6 +5,7 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/selfstat" ) @@ -96,39 +97,37 @@ func (r *RunningAggregator) MakeMetric(metric telegraf.Metric) telegraf.Metric { return m } -func (r *RunningAggregator) metricFiltered(metric telegraf.Metric) { - r.MetricsFiltered.Incr(1) - metric.Accept() -} - func (r *RunningAggregator) metricDropped(metric telegraf.Metric) { r.MetricsDropped.Incr(1) - metric.Accept() } // Add a metric to the aggregator and return true if the original metric // should be dropped. -func (r *RunningAggregator) Add(metric telegraf.Metric) bool { - if ok := r.Config.Filter.Select(metric); !ok { +func (r *RunningAggregator) Add(m telegraf.Metric) bool { + if ok := r.Config.Filter.Select(m); !ok { return false } - metric = metric.Copy() + // Make a copy of the metric but don't retain tracking; it doesn't make + // sense to fail a metric's delivery due to the aggregation not being + // sent because we can't create aggregations of historical data. + m = metric.FromMetric(m) - r.Config.Filter.Modify(metric) - if len(metric.FieldList()) == 0 { + r.Config.Filter.Modify(m) + if len(m.FieldList()) == 0 { + r.metricDropped(m) return r.Config.DropOriginal } r.Lock() defer r.Unlock() - if r.periodStart.IsZero() || metric.Time().After(r.periodEnd) { - r.metricDropped(metric) + if r.periodStart.IsZero() || m.Time().After(r.periodEnd) { + r.metricDropped(m) return r.Config.DropOriginal } - r.Aggregator.Add(metric) + r.Aggregator.Add(m) return r.Config.DropOriginal } diff --git a/metric/metric.go b/metric/metric.go index f2a49957e..de4af500b 100644 --- a/metric/metric.go +++ b/metric/metric.go @@ -62,6 +62,28 @@ func New( return m, nil } +// FromMetric returns a deep copy of the metric with any tracking information +// removed. +func FromMetric(other telegraf.Metric) telegraf.Metric { + m := &metric{ + name: other.Name(), + tags: make([]*telegraf.Tag, len(other.TagList())), + fields: make([]*telegraf.Field, len(other.FieldList())), + tm: other.Time(), + tp: other.Type(), + aggregate: other.IsAggregate(), + } + + for i, tag := range other.TagList() { + m.tags[i] = &telegraf.Tag{Key: tag.Key, Value: tag.Value} + } + + for i, field := range other.FieldList() { + m.fields[i] = &telegraf.Field{Key: field.Key, Value: field.Value} + } + return m +} + func (m *metric) String() string { return fmt.Sprintf("%s %v %v %d", m.name, m.Tags(), m.Fields(), m.tm.UnixNano()) } From 9bd48d4673aa89249ead664fdd0e5e318437c348 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 1 Mar 2019 11:27:47 -0800 Subject: [PATCH 0661/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index b9760b9d0..ddb6ad9cf 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -66,6 +66,7 @@ - [#5404](https://github.com/influxdata/telegraf/issues/5404): Group stackdriver requests to send one point per timeseries. - [#5449](https://github.com/influxdata/telegraf/issues/5449): Log permission error and ignore in filecount input. - [#5497](https://github.com/influxdata/telegraf/pull/5497): Create log file in append mode. +- [#5325](https://github.com/influxdata/telegraf/issues/5325): Ignore tracking for metrics added to aggregator. ## v1.9.5 [2019-02-26] From 30fcaf09875f28f3f80fbbe3ded946382f166857 Mon Sep 17 00:00:00 2001 From: Greg <2653109+glinton@users.noreply.github.com> Date: Fri, 1 Mar 2019 14:26:11 -0700 Subject: [PATCH 0662/1815] Improve error handling in prometheus output (#5512) --- plugins/outputs/prometheus_client/prometheus_client.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/plugins/outputs/prometheus_client/prometheus_client.go b/plugins/outputs/prometheus_client/prometheus_client.go index 190b0d882..f919b6a71 100644 --- a/plugins/outputs/prometheus_client/prometheus_client.go +++ b/plugins/outputs/prometheus_client/prometheus_client.go @@ -176,7 +176,10 @@ func (p *PrometheusClient) Connect() error { } } - registry.Register(p) + err := registry.Register(p) + if err != nil { + return err + } if p.Listen == "" { p.Listen = "localhost:9273" @@ -301,6 +304,7 @@ func (p *PrometheusClient) Collect(ch chan<- prometheus.Metric) { log.Printf("E! Error creating prometheus metric, "+ "key: %s, labels: %v,\nerr: %s\n", name, labels, err.Error()) + continue } if p.ExportTimestamp { From d09c2135623215761240621593a8d60f69d72e8b Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Sat, 2 Mar 2019 21:07:05 -0800 Subject: [PATCH 0663/1815] Fix sample configuration; generated with wrong version --- etc/telegraf.conf | 2890 ++++++++++++++++++++++++++++++++++++++------- 1 file changed, 2486 insertions(+), 404 deletions(-) diff --git a/etc/telegraf.conf b/etc/telegraf.conf index 18466692d..5c978aa59 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -47,8 +47,8 @@ ## same time, which can have a measurable effect on the system. collection_jitter = "0s" - ## Default flushing interval for all outputs. You shouldn't set this below - ## interval. Maximum flush_interval will be flush_interval + flush_jitter + ## Default flushing interval for all outputs. Maximum flush_interval will be + ## flush_interval + flush_jitter flush_interval = "10s" ## Jitter the flush interval by a random amount. This is primarily to avoid ## large write spikes for users running a large number of telegraf instances. @@ -82,48 +82,73 @@ # OUTPUT PLUGINS # ############################################################################### -# Configuration for influxdb server to send metrics to +# Configuration for sending metrics to InfluxDB [[outputs.influxdb]] ## The full HTTP or UDP URL for your InfluxDB instance. ## - ## Multiple urls can be specified as part of the same cluster, - ## this means that only ONE of the urls will be written to each interval. - # urls = ["udp://127.0.0.1:8089"] # UDP endpoint example - urls = ["http://127.0.0.1:8086"] # required - ## The target database for metrics (telegraf will create it if not exists). - database = "telegraf" # required + ## Multiple URLs can be specified for a single cluster, only ONE of the + ## urls will be written to each interval. + # urls = ["unix:///var/run/influxdb.sock"] + # urls = ["udp://127.0.0.1:8089"] + # urls = ["http://127.0.0.1:8086"] + + ## The target database for metrics; will be created as needed. + ## For UDP url endpoint database needs to be configured on server side. + # database = "telegraf" + + ## The value of this tag will be used to determine the database. If this + ## tag is not set the 'database' option is used as the default. + # database_tag = "" + + ## If true, no CREATE DATABASE queries will be sent. Set to true when using + ## Telegraf with a user without permissions to create databases or when the + ## database already exists. + # skip_database_creation = false ## Name of existing retention policy to write to. Empty string writes to - ## the default retention policy. - retention_policy = "" - ## Write consistency (clusters only), can be: "any", "one", "quorum", "all" - write_consistency = "any" + ## the default retention policy. Only takes effect when using HTTP. + # retention_policy = "" - ## Write timeout (for the InfluxDB client), formatted as a string. - ## If not provided, will default to 5s. 0s means no timeout (not recommended). - timeout = "5s" + ## Write consistency (clusters only), can be: "any", "one", "quorum", "all". + ## Only takes effect when using HTTP. + # write_consistency = "any" + + ## Timeout for HTTP messages. + # timeout = "5s" + + ## HTTP Basic Auth # username = "telegraf" # password = "metricsmetricsmetricsmetrics" - ## Set the user agent for HTTP POSTs (can be useful for log differentiation) - # user_agent = "telegraf" - ## Set UDP payload size, defaults to InfluxDB UDP Client default (512 bytes) - # udp_payload = 512 - ## Optional SSL Config - # ssl_ca = "/etc/telegraf/ca.pem" - # ssl_cert = "/etc/telegraf/cert.pem" - # ssl_key = "/etc/telegraf/key.pem" - ## Use SSL but skip chain & host verification + ## HTTP User-Agent + # user_agent = "telegraf" + + ## UDP payload size is the maximum packet size to send. + # udp_payload = "512B" + + ## Optional TLS Config for use on HTTP connections. + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification # insecure_skip_verify = false - ## HTTP Proxy Config + ## HTTP Proxy override, if unset values the standard proxy environment + ## variables are consulted to determine which proxy, if any, should be used. # http_proxy = "http://corporate.proxy:3128" - ## Optional HTTP headers + ## Additional HTTP headers # http_headers = {"X-Special-Header" = "Special-Value"} - ## Compress each HTTP request payload using GZIP. - # content_encoding = "gzip" + ## HTTP Content-Encoding for write request body, can be set to "gzip" to + ## compress body or "identity" to apply no encoding. + # content_encoding = "identity" + + ## When true, Telegraf will output unsigned integers as unsigned values, + ## i.e.: "42u". You will need a version of InfluxDB supporting unsigned + ## integer values. Enabling this option will result in field type errors if + ## existing data has been written. + # influx_uint_support = false # # Configuration for Amon Server to send metrics to. @@ -138,44 +163,192 @@ # # timeout = "5s" -# # Configuration for the AMQP server to send metrics to +# # Publishes metrics to an AMQP broker # [[outputs.amqp]] -# ## AMQP url -# url = "amqp://localhost:5672/influxdb" -# ## AMQP exchange +# ## Broker to publish to. +# ## deprecated in 1.7; use the brokers option +# # url = "amqp://localhost:5672/influxdb" +# +# ## Brokers to publish to. If multiple brokers are specified a random broker +# ## will be selected anytime a connection is established. This can be +# ## helpful for load balancing when not using a dedicated load balancer. +# brokers = ["amqp://localhost:5672/influxdb"] +# +# ## Maximum messages to send over a connection. Once this is reached, the +# ## connection is closed and a new connection is made. This can be helpful for +# ## load balancing when not using a dedicated load balancer. +# # max_messages = 0 +# +# ## Exchange to declare and publish to. # exchange = "telegraf" +# +# ## Exchange type; common types are "direct", "fanout", "topic", "header", "x-consistent-hash". +# # exchange_type = "topic" +# +# ## If true, exchange will be passively declared. +# # exchange_declare_passive = false +# +# ## Exchange durability can be either "transient" or "durable". +# # exchange_durability = "durable" +# +# ## Additional exchange arguments. +# # exchange_arguments = { } +# # exchange_arguments = {"hash_propery" = "timestamp"} +# +# ## Authentication credentials for the PLAIN auth_method. +# # username = "" +# # password = "" +# # ## Auth method. PLAIN and EXTERNAL are supported # ## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as # ## described here: https://www.rabbitmq.com/plugins.html # # auth_method = "PLAIN" -# ## Telegraf tag to use as a routing key -# ## ie, if this tag exists, its value will be used as the routing key -# routing_tag = "host" -# ## Delivery Mode controls if a published message is persistent -# ## Valid options are "transient" and "persistent". default: "transient" -# delivery_mode = "transient" # -# ## InfluxDB retention policy -# # retention_policy = "default" -# ## InfluxDB database +# ## Metric tag to use as a routing key. +# ## ie, if this tag exists, its value will be used as the routing key +# # routing_tag = "host" +# +# ## Static routing key. Used when no routing_tag is set or as a fallback +# ## when the tag specified in routing tag is not found. +# # routing_key = "" +# # routing_key = "telegraf" +# +# ## Delivery Mode controls if a published message is persistent. +# ## One of "transient" or "persistent". +# # delivery_mode = "transient" +# +# ## InfluxDB database added as a message header. +# ## deprecated in 1.7; use the headers option # # database = "telegraf" # -# ## Write timeout, formatted as a string. If not provided, will default -# ## to 5s. 0s means no timeout (not recommended). +# ## InfluxDB retention policy added as a message header +# ## deprecated in 1.7; use the headers option +# # retention_policy = "default" +# +# ## Static headers added to each published message. +# # headers = { } +# # headers = {"database" = "telegraf", "retention_policy" = "default"} +# +# ## Connection timeout. If not provided, will default to 5s. 0s means no +# ## timeout (not recommended). # # timeout = "5s" # -# ## Optional SSL Config -# # ssl_ca = "/etc/telegraf/ca.pem" -# # ssl_cert = "/etc/telegraf/cert.pem" -# # ssl_key = "/etc/telegraf/key.pem" -# ## Use SSL but skip chain & host verification +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification # # insecure_skip_verify = false # +# ## If true use batch serialization format instead of line based delimiting. +# ## Only applies to data formats which are not line based such as JSON. +# ## Recommended to set to true. +# # use_batch_format = false +# # ## Data format to output. # ## Each data format has its own unique set of configuration options, read # ## more about them here: # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# # data_format = "influx" + + +# # Send metrics to Azure Application Insights +# [[outputs.application_insights]] +# ## Instrumentation key of the Application Insights resource. +# instrumentation_key = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxx" +# +# ## Timeout for closing (default: 5s). +# # timeout = "5s" +# +# ## Enable additional diagnostic logging. +# # enable_diagnostic_logging = false +# +# ## Context Tag Sources add Application Insights context tags to a tag value. +# ## +# ## For list of allowed context tag keys see: +# ## https://github.com/Microsoft/ApplicationInsights-Go/blob/master/appinsights/contracts/contexttagkeys.go +# # [outputs.application_insights.context_tag_sources] +# # "ai.cloud.role" = "kubernetes_container_name" +# # "ai.cloud.roleInstance" = "kubernetes_pod_name" + + +# # Send aggregate metrics to Azure Monitor +# [[outputs.azure_monitor]] +# ## Timeout for HTTP writes. +# # timeout = "20s" +# +# ## Set the namespace prefix, defaults to "Telegraf/". +# # namespace_prefix = "Telegraf/" +# +# ## Azure Monitor doesn't have a string value type, so convert string +# ## fields to dimensions (a.k.a. tags) if enabled. Azure Monitor allows +# ## a maximum of 10 dimensions so Telegraf will only send the first 10 +# ## alphanumeric dimensions. +# # strings_as_dimensions = false +# +# ## Both region and resource_id must be set or be available via the +# ## Instance Metadata service on Azure Virtual Machines. +# # +# ## Azure Region to publish metrics against. +# ## ex: region = "southcentralus" +# # region = "" +# # +# ## The Azure Resource ID against which metric will be logged, e.g. +# ## ex: resource_id = "/subscriptions//resourceGroups//providers/Microsoft.Compute/virtualMachines/" +# # resource_id = "" +# +# ## Optionally, if in Azure US Government, China or other sovereign +# ## cloud environment, set appropriate REST endpoint for receiving +# ## metrics. (Note: region may be unused in this context) +# # endpoint_url = "https://monitoring.core.usgovcloudapi.net" + + +# # Publish Telegraf metrics to a Google Cloud PubSub topic +# [[outputs.cloud_pubsub]] +# ## Required. Name of Google Cloud Platform (GCP) Project that owns +# ## the given PubSub topic. +# project = "my-project" +# +# ## Required. Name of PubSub topic to publish metrics to. +# topic = "my-topic" +# +# ## Required. Data format to consume. +# ## Each data format has its own unique set of configuration options. +# ## Read more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md # data_format = "influx" +# +# ## Optional. Filepath for GCP credentials JSON file to authorize calls to +# ## PubSub APIs. If not set explicitly, Telegraf will attempt to use +# ## Application Default Credentials, which is preferred. +# # credentials_file = "path/to/my/creds.json" +# +# ## Optional. If true, will send all metrics per write in one PubSub message. +# # send_batched = true +# +# ## The following publish_* parameters specifically configures batching +# ## requests made to the GCP Cloud PubSub API via the PubSub Golang library. Read +# ## more here: https://godoc.org/cloud.google.com/go/pubsub#PublishSettings +# +# ## Optional. Send a request to PubSub (i.e. actually publish a batch) +# ## when it has this many PubSub messages. If send_batched is true, +# ## this is ignored and treated as if it were 1. +# # publish_count_threshold = 1000 +# +# ## Optional. Send a request to PubSub (i.e. actually publish a batch) +# ## when it has this many PubSub messages. If send_batched is true, +# ## this is ignored and treated as if it were 1 +# # publish_byte_threshold = 1000000 +# +# ## Optional. Specifically configures requests made to the PubSub API. +# # publish_num_go_routines = 2 +# +# ## Optional. Specifies a timeout for requests to the PubSub API. +# # publish_timeout = "30s" +# +# ## Optional. PubSub attributes to add to metrics. +# # [[inputs.pubsub.attributes]] +# # my_attr = "tag_value" # # Configuration for AWS CloudWatch output. @@ -198,8 +371,22 @@ # #profile = "" # #shared_credential_file = "" # +# ## Endpoint to make request against, the correct endpoint is automatically +# ## determined and this option should only be set if you wish to override the +# ## default. +# ## ex: endpoint_url = "http://localhost:8000" +# # endpoint_url = "" +# # ## Namespace for the CloudWatch MetricDatums # namespace = "InfluxData/Telegraf" +# +# ## If you have a large amount of metrics, you should consider to send statistic +# ## values instead of raw metrics which could not only improve performance but +# ## also save AWS API cost. If enable this flag, this plugin would parse the required +# ## CloudWatch statistic fields (count, min, max, and sum) and send them to CloudWatch. +# ## You could use basicstats aggregator to calculate those fields. If not all statistic +# ## fields are available, all fields would still be sent as raw metrics. +# # write_statistics = false # # Configuration for CrateDB to send metrics to. @@ -220,6 +407,9 @@ # ## Datadog API key # apikey = "my-secret-key" # required. # +# # The base endpoint URL can optionally be specified but it defaults to: +# #url = "https://app.datadoghq.com/api/v1/series" +# # ## Connection timeout. # # timeout = "5s" @@ -264,11 +454,11 @@ # # default_tag_value = "none" # index_name = "telegraf-%Y.%m.%d" # required. # -# ## Optional SSL Config -# # ssl_ca = "/etc/telegraf/ca.pem" -# # ssl_cert = "/etc/telegraf/cert.pem" -# # ssl_key = "/etc/telegraf/key.pem" -# ## Use SSL but skip chain & host verification +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification # # insecure_skip_verify = false # # ## Template Config @@ -304,14 +494,18 @@ # ## Graphite output template # ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md # template = "host.tags.measurement.field" +# +# ## Enable Graphite tags support +# # graphite_tag_support = false +# # ## timeout in seconds for the write connection to graphite # timeout = 2 # -# ## Optional SSL Config -# # ssl_ca = "/etc/telegraf/ca.pem" -# # ssl_cert = "/etc/telegraf/cert.pem" -# # ssl_key = "/etc/telegraf/key.pem" -# ## Use SSL but skip chain & host verification +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification # # insecure_skip_verify = false @@ -321,6 +515,99 @@ # servers = ["127.0.0.1:12201", "192.168.1.1:12201"] +# # A plugin that can transmit metrics over HTTP +# [[outputs.http]] +# ## URL is the address to send metrics to +# url = "http://127.0.0.1:8080/metric" +# +# ## Timeout for HTTP message +# # timeout = "5s" +# +# ## HTTP method, one of: "POST" or "PUT" +# # method = "POST" +# +# ## HTTP Basic Auth credentials +# # username = "username" +# # password = "pa$$word" +# +# ## OAuth2 Client Credentials Grant +# # client_id = "clientid" +# # client_secret = "secret" +# # token_url = "https://indentityprovider/oauth2/v1/token" +# # scopes = ["urn:opc:idm:__myscopes__"] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Data format to output. +# ## Each data format has it's own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# # data_format = "influx" +# +# ## Additional HTTP headers +# # [outputs.http.headers] +# # # Should be set manually to "application/json" for json data_format +# # Content-Type = "text/plain; charset=utf-8" +# +# ## HTTP Content-Encoding for write request body, can be set to "gzip" to +# ## compress body or "identity" to apply no encoding. +# # content_encoding = "identity" + + +# # Configuration for sending metrics to InfluxDB +# [[outputs.influxdb_v2]] +# ## The URLs of the InfluxDB cluster nodes. +# ## +# ## Multiple URLs can be specified for a single cluster, only ONE of the +# ## urls will be written to each interval. +# urls = ["http://127.0.0.1:9999"] +# +# ## Token for authentication. +# token = "" +# +# ## Organization is the name of the organization you wish to write to; must exist. +# organization = "" +# +# ## Destination bucket to write into. +# bucket = "" +# +# ## The value of this tag will be used to determine the bucket. If this +# ## tag is not set the 'bucket' option is used as the default. +# # bucket_tag = "" +# +# ## Timeout for HTTP messages. +# # timeout = "5s" +# +# ## Additional HTTP headers +# # http_headers = {"X-Special-Header" = "Special-Value"} +# +# ## HTTP Proxy override, if unset values the standard proxy environment +# ## variables are consulted to determine which proxy, if any, should be used. +# # http_proxy = "http://corporate.proxy:3128" +# +# ## HTTP User-Agent +# # user_agent = "telegraf" +# +# ## Content-Encoding for write request body, can be set to "gzip" to +# ## compress body or "identity" to apply no encoding. +# # content_encoding = "gzip" +# +# ## Enable or disable uint support for writing uints influxdb 2.0. +# # influx_uint_support = false +# +# ## Optional TLS Config for use on HTTP connections. +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + # # Configuration for sending metrics to an Instrumental project # [[outputs.instrumental]] # ## Project API Token (required) @@ -343,6 +630,15 @@ # ## Kafka topic for producer messages # topic = "telegraf" # +# ## Optional Client id +# # client_id = "Telegraf" +# +# ## Set the minimal supported Kafka version. Setting this enables the use of new +# ## Kafka features and APIs. Of particular interest, lz4 compression +# ## requires at least version 0.10.0.0. +# ## ex: version = "1.1.0" +# # version = "" +# # ## Optional topic suffix configuration. # ## If the section is omitted, no suffix is used. # ## Following topic suffix methods are supported: @@ -374,12 +670,20 @@ # ## ie, if this tag exists, its value will be used as the routing key # routing_tag = "host" # +# ## Static routing key. Used when no routing_tag is set or as a fallback +# ## when the tag specified in routing tag is not found. If set to "random", +# ## a random value will be generated for each message. +# ## ex: routing_key = "random" +# ## routing_key = "telegraf" +# # routing_key = "" +# # ## CompressionCodec represents the various compression codecs recognized by # ## Kafka in messages. # ## 0 : No compression # ## 1 : Gzip compression # ## 2 : Snappy compression -# compression_codec = 0 +# ## 3 : LZ4 compression +# # compression_codec = 0 # # ## RequiredAcks is used in Produce Requests to tell the broker how many # ## replica acknowledgements it must see before responding @@ -395,16 +699,21 @@ # ## received the data. This option provides the best durability, we # ## guarantee that no messages will be lost as long as at least one in # ## sync replica remains. -# required_acks = -1 +# # required_acks = -1 # -# ## The total number of times to retry sending a message -# max_retry = 3 +# ## The maximum number of times to retry sending a metric before failing +# ## until the next flush. +# # max_retry = 3 # -# ## Optional SSL Config -# # ssl_ca = "/etc/telegraf/ca.pem" -# # ssl_cert = "/etc/telegraf/cert.pem" -# # ssl_key = "/etc/telegraf/key.pem" -# ## Use SSL but skip chain & host verification +# ## The maximum permitted size of a message. Should be set equal to or +# ## smaller than the broker's 'message.max.bytes'. +# # max_message_bytes = 1000000 +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification # # insecure_skip_verify = false # # ## Optional SASL Config @@ -415,7 +724,7 @@ # ## Each data format has its own unique set of configuration options, read # ## more about them here: # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md -# data_format = "influx" +# # data_format = "influx" # # Configuration for the AWS Kinesis output. @@ -438,6 +747,12 @@ # #profile = "" # #shared_credential_file = "" # +# ## Endpoint to make request against, the correct endpoint is automatically +# ## determined and this option should only be set if you wish to override the +# ## default. +# ## ex: endpoint_url = "http://localhost:8000" +# # endpoint_url = "" +# # ## Kinesis StreamName must exist prior to starting telegraf. # streamname = "StreamName" # ## DEPRECATED: PartitionKey as used for sharding data. @@ -462,10 +777,11 @@ # # method = "measurement" # # # ## Use the value of a tag for all writes, if the tag is not set the empty -# ## string will be used: +# ## default option will be used. When no default, defaults to "telegraf" # # [outputs.kinesis.partition] # # method = "tag" # # key = "host" +# # default = "mykey" # # # ## Data format to output. @@ -506,23 +822,37 @@ # ## ex: prefix/web01.example.com/mem # topic_prefix = "telegraf" # +# ## QoS policy for messages +# ## 0 = at most once +# ## 1 = at least once +# ## 2 = exactly once +# # qos = 2 +# # ## username and password to connect MQTT server. # # username = "telegraf" # # password = "metricsmetricsmetricsmetrics" # -# ## Timeout for write operations. default: 5s -# # timeout = "5s" -# # ## client ID, if not set a random ID is generated # # client_id = "" # -# ## Optional SSL Config -# # ssl_ca = "/etc/telegraf/ca.pem" -# # ssl_cert = "/etc/telegraf/cert.pem" -# # ssl_key = "/etc/telegraf/key.pem" -# ## Use SSL but skip chain & host verification +# ## Timeout for write operations. default: 5s +# # timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification # # insecure_skip_verify = false # +# ## When true, metrics will be sent in one MQTT message per flush. Otherwise, +# ## metrics are written one metric per MQTT message. +# # batch = false +# +# ## When true, metric will have RETAIN flag set, making broker cache entries until someone +# ## actually reads it +# # retain = false +# # ## Data format to output. # ## Each data format has its own unique set of configuration options, read # ## more about them here: @@ -540,11 +870,11 @@ # ## NATS subject for producer messages # subject = "telegraf" # -# ## Optional SSL Config -# # ssl_ca = "/etc/telegraf/ca.pem" -# # ssl_cert = "/etc/telegraf/cert.pem" -# # ssl_key = "/etc/telegraf/key.pem" -# ## Use SSL but skip chain & host verification +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification # # insecure_skip_verify = false # # ## Data format to output. @@ -583,7 +913,11 @@ # # ## Number of data points to send to OpenTSDB in Http requests. # ## Not used with telnet API. -# httpBatchSize = 50 +# http_batch_size = 50 +# +# ## URI Path for Http requests to OpenTSDB. +# ## Used in cases where OpenTSDB is located behind a reverse proxy. +# http_path = "/api/put" # # ## Debug true - Prints OpenTSDB communication # debug = false @@ -595,14 +929,40 @@ # # Configuration for the Prometheus client to spawn # [[outputs.prometheus_client]] # ## Address to listen on -# # listen = ":9273" +# listen = ":9273" # -# ## Interval to expire metrics and not deliver to prometheus, 0 == no expiration +# ## Use HTTP Basic Authentication. +# # basic_username = "Foo" +# # basic_password = "Bar" +# +# ## If set, the IP Ranges which are allowed to access metrics. +# ## ex: ip_range = ["192.168.0.0/24", "192.168.1.0/30"] +# # ip_range = [] +# +# ## Path to publish the metrics on. +# # path = "/metrics" +# +# ## Expiration interval for each metric. 0 == no expiration # # expiration_interval = "60s" # # ## Collectors to enable, valid entries are "gocollector" and "process". # ## If unset, both are enabled. -# collectors_exclude = ["gocollector", "process"] +# # collectors_exclude = ["gocollector", "process"] +# +# ## Send string metrics as Prometheus labels. +# ## Unless set to false all string metrics will be sent as labels. +# # string_as_label = true +# +# ## If set, enable TLS with the given certificate. +# # tls_cert = "/etc/ssl/telegraf.crt" +# # tls_key = "/etc/ssl/telegraf.key" +# +# ## Set one or more allowed client CA certificate file names to +# ## enable mutually authenticated TLS connections +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# +# ## Export metric collection time. +# # export_timestamp = false # # Configuration for the Riemann server to send metrics to @@ -663,6 +1023,13 @@ # # address = "unix:///tmp/telegraf.sock" # # address = "unixgram:///tmp/telegraf.sock" # +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# # ## Period between keep alive probes. # ## Only applies to TCP sockets. # ## 0 disables keep alive probes. @@ -676,40 +1043,64 @@ # # data_format = "influx" +# # Configuration for Google Cloud Stackdriver to send metrics to +# [[outputs.stackdriver]] +# ## GCP Project +# project = "erudite-bloom-151019" +# +# ## The namespace for the metric descriptor +# namespace = "telegraf" +# +# ## Custom resource type +# # resource_type = "generic_node" +# +# ## Additonal resource labels +# # [outputs.stackdriver.resource_labels] +# # node_id = "$HOSTNAME" +# # namespace = "myapp" +# # location = "eu-north0" + + # # Configuration for Wavefront server to send metrics to # [[outputs.wavefront]] -# ## DNS name of the wavefront proxy server -# host = "wavefront.example.com" +# ## Url for Wavefront Direct Ingestion or using HTTP with Wavefront Proxy +# ## If using Wavefront Proxy, also specify port. example: http://proxyserver:2878 +# url = "https://metrics.wavefront.com" # -# ## Port that the Wavefront proxy server listens on -# port = 2878 +# ## Authentication Token for Wavefront. Only required if using Direct Ingestion +# #token = "DUMMY_TOKEN" +# +# ## DNS name of the wavefront proxy server. Do not use if url is specified +# #host = "wavefront.example.com" +# +# ## Port that the Wavefront proxy server listens on. Do not use if url is specified +# #port = 2878 # # ## prefix for metrics keys # #prefix = "my.specific.prefix." # -# ## whether to use "value" for name of simple fields +# ## whether to use "value" for name of simple fields. default is false # #simple_fields = false # -# ## character to use between metric and field name. defaults to . (dot) +# ## character to use between metric and field name. default is . (dot) # #metric_separator = "." # -# ## Convert metric name paths to use metricSeperator character -# ## When true (default) will convert all _ (underscore) chartacters in final metric name +# ## Convert metric name paths to use metricSeparator character +# ## When true will convert all _ (underscore) characters in final metric name. default is true # #convert_paths = true # # ## Use Regex to sanitize metric and tag names from invalid characters -# ## Regex is more thorough, but significantly slower +# ## Regex is more thorough, but significantly slower. default is false # #use_regex = false # # ## point tags to use as the source name for Wavefront (if none found, host will be used) -# #source_override = ["hostname", "snmp_host", "node_host"] +# #source_override = ["hostname", "address", "agent_host", "node_host"] # -# ## whether to convert boolean values to numeric values, with false -> 0.0 and true -> 1.0. default true +# ## whether to convert boolean values to numeric values, with false -> 0.0 and true -> 1.0. default is true # #convert_bool = true # # ## Define a mapping, namespaced by metric prefix, from string values to numeric values -# ## The example below maps "green" -> 1.0, "yellow" -> 0.5, "red" -> 0.0 for -# ## any metrics beginning with "elasticsearch" +# ## deprecated in 1.9; use the enum processor plugin # #[[outputs.wavefront.string_to_number.elasticsearch]] # # green = 1.0 # # yellow = 0.5 @@ -721,10 +1112,217 @@ # PROCESSOR PLUGINS # ############################################################################### +# # Convert values to another metric value type +# [[processors.converter]] +# ## Tags to convert +# ## +# ## The table key determines the target type, and the array of key-values +# ## select the keys to convert. The array may contain globs. +# ## = [...] +# [processors.converter.tags] +# string = [] +# integer = [] +# unsigned = [] +# boolean = [] +# float = [] +# +# ## Fields to convert +# ## +# ## The table key determines the target type, and the array of key-values +# ## select the keys to convert. The array may contain globs. +# ## = [...] +# [processors.converter.fields] +# tag = [] +# string = [] +# integer = [] +# unsigned = [] +# boolean = [] +# float = [] + + +# # Map enum values according to given table. +# [[processors.enum]] +# [[processors.enum.mapping]] +# ## Name of the field to map +# field = "status" +# +# ## Destination field to be used for the mapped value. By default the source +# ## field is used, overwriting the original value. +# # dest = "status_code" +# +# ## Default value to be used for all values not contained in the mapping +# ## table. When unset, the unmodified value for the field will be used if no +# ## match is found. +# # default = 0 +# +# ## Table of mappings +# [processors.enum.mapping.value_mappings] +# green = 1 +# yellow = 2 +# red = 3 + + +# # Apply metric modifications using override semantics. +# [[processors.override]] +# ## All modifications on inputs and aggregators can be overridden: +# # name_override = "new_name" +# # name_prefix = "new_name_prefix" +# # name_suffix = "new_name_suffix" +# +# ## Tags to be added (all values must be strings) +# # [processors.override.tags] +# # additional_tag = "tag_value" + + +# # Parse a value in a specified field/tag(s) and add the result in a new metric +# [[processors.parser]] +# ## The name of the fields whose value will be parsed. +# parse_fields = [] +# +# ## If true, incoming metrics are not emitted. +# drop_original = false +# +# ## If set to override, emitted metrics will be merged by overriding the +# ## original metric using the newly parsed metrics. +# merge = "override" +# +# ## The dataformat to be read from files +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + # # Print all metrics that pass through this filter. # [[processors.printer]] +# # Transforms tag and field values with regex pattern +# [[processors.regex]] +# ## Tag and field conversions defined in a separate sub-tables +# # [[processors.regex.tags]] +# # ## Tag to change +# # key = "resp_code" +# # ## Regular expression to match on a tag value +# # pattern = "^(\\d)\\d\\d$" +# # ## Pattern for constructing a new value (${1} represents first subgroup) +# # replacement = "${1}xx" +# +# # [[processors.regex.fields]] +# # key = "request" +# # ## All the power of the Go regular expressions available here +# # ## For example, named subgroups +# # pattern = "^/api(?P/[\\w/]+)\\S*" +# # replacement = "${method}" +# # ## If result_key is present, a new field will be created +# # ## instead of changing existing field +# # result_key = "method" +# +# ## Multiple conversions may be applied for one field sequentially +# ## Let's extract one more value +# # [[processors.regex.fields]] +# # key = "request" +# # pattern = ".*category=(\\w+).*" +# # replacement = "${1}" +# # result_key = "search_category" + + +# # Rename measurements, tags, and fields that pass through this filter. +# [[processors.rename]] + + +# # Perform string processing on tags, fields, and measurements +# [[processors.strings]] +# ## Convert a tag value to uppercase +# # [[processors.strings.uppercase]] +# # tag = "method" +# +# ## Convert a field value to lowercase and store in a new field +# # [[processors.strings.lowercase]] +# # field = "uri_stem" +# # dest = "uri_stem_normalised" +# +# ## Trim leading and trailing whitespace using the default cutset +# # [[processors.strings.trim]] +# # field = "message" +# +# ## Trim leading characters in cutset +# # [[processors.strings.trim_left]] +# # field = "message" +# # cutset = "\t" +# +# ## Trim trailing characters in cutset +# # [[processors.strings.trim_right]] +# # field = "message" +# # cutset = "\r\n" +# +# ## Trim the given prefix from the field +# # [[processors.strings.trim_prefix]] +# # field = "my_value" +# # prefix = "my_" +# +# ## Trim the given suffix from the field +# # [[processors.strings.trim_suffix]] +# # field = "read_count" +# # suffix = "_count" +# +# ## Replace all non-overlapping instances of old with new +# # [[processors.strings.replace]] +# # measurement = "*" +# # old = ":" +# # new = "_" + + +# # Print all metrics that pass through this filter. +# [[processors.topk]] +# ## How many seconds between aggregations +# # period = 10 +# +# ## How many top metrics to return +# # k = 10 +# +# ## Over which tags should the aggregation be done. Globs can be specified, in +# ## which case any tag matching the glob will aggregated over. If set to an +# ## empty list is no aggregation over tags is done +# # group_by = ['*'] +# +# ## Over which fields are the top k are calculated +# # fields = ["value"] +# +# ## What aggregation to use. Options: sum, mean, min, max +# # aggregation = "mean" +# +# ## Instead of the top k largest metrics, return the bottom k lowest metrics +# # bottomk = false +# +# ## The plugin assigns each metric a GroupBy tag generated from its name and +# ## tags. If this setting is different than "" the plugin will add a +# ## tag (which name will be the value of this setting) to each metric with +# ## the value of the calculated GroupBy tag. Useful for debugging +# # add_groupby_tag = "" +# +# ## These settings provide a way to know the position of each metric in +# ## the top k. The 'add_rank_field' setting allows to specify for which +# ## fields the position is required. If the list is non empty, then a field +# ## will be added to each and every metric for each string present in this +# ## setting. This field will contain the ranking of the group that +# ## the metric belonged to when aggregated over that field. +# ## The name of the field will be set to the name of the aggregation field, +# ## suffixed with the string '_topk_rank' +# # add_rank_fields = [] +# +# ## These settings provide a way to know what values the plugin is generating +# ## when aggregating metrics. The 'add_agregate_field' setting allows to +# ## specify for which fields the final aggregation value is required. If the +# ## list is non empty, then a field will be added to each every metric for +# ## each field present in this setting. This field will contain +# ## the computed aggregation for the group that the metric belonged to when +# ## aggregated over that field. +# ## The name of the field will be set to the name of the aggregation field, +# ## suffixed with the string '_topk_aggregate' +# # add_aggregate_fields = [] + + ############################################################################### # AGGREGATOR PLUGINS # @@ -732,12 +1330,14 @@ # # Keep the aggregate basicstats of each metric passing through. # [[aggregators.basicstats]] -# ## General Aggregator Arguments: # ## The period on which to flush & clear the aggregator. # period = "30s" # ## If true, the original metric will be dropped by the # ## aggregator and will not get sent to the output plugins. # drop_original = false +# +# ## Configures which basic stats to push as fields +# # stats = ["count", "min", "max", "mean", "stdev", "s2", "sum"] # # Create aggregate histograms. @@ -776,6 +1376,18 @@ # drop_original = false +# # Count the occurrence of values in fields. +# [[aggregators.valuecounter]] +# ## General Aggregator Arguments: +# ## The period on which to flush & clear the aggregator. +# period = "30s" +# ## If true, the original metric will be dropped by the +# ## aggregator and will not get sent to the output plugins. +# drop_original = false +# ## The fields for which the values will be counted +# fields = [] + + ############################################################################### # INPUT PLUGINS # @@ -795,13 +1407,12 @@ # Read metrics about disk usage by mount point [[inputs.disk]] - ## By default, telegraf gather stats for all mountpoints. - ## Setting mountpoints will restrict the stats to the specified mountpoints. + ## By default stats will be gathered for all mount points. + ## Set mount_points will restrict the stats to only the specified mount points. # mount_points = ["/"] - ## Ignore some mountpoints by filesystem type. For example (dev)tmpfs (usually - ## present on /run, /var/run, /dev/shm or /dev). - ignore_fs = ["tmpfs", "devtmpfs", "devfs"] + ## Ignore mount points by filesystem type. + ignore_fs = ["tmpfs", "devtmpfs", "devfs", "overlay", "aufs", "squashfs"] # Read metrics about disk IO by device @@ -809,7 +1420,7 @@ ## By default, telegraf will gather stats for all devices including ## disk partitions. ## Setting devices will restrict the stats to the specified devices. - # devices = ["sda", "sdb"] + # devices = ["sda", "sdb", "vd*"] ## Uncomment the following line if you need disk serial numbers. # skip_serial_number = false # @@ -818,6 +1429,8 @@ ## Currently only Linux is supported via udev properties. You can view ## available properties for a device by running: ## 'udevadm info -q property -n /dev/sda' + ## Note: Most, but not all, udev properties can be accessed this way. Properties + ## that are currently inaccessible include DEVTYPE, DEVNAME, and DEVPATH. # device_tags = ["ID_FS_TYPE", "ID_FS_USAGE"] # ## Using the same metadata source as device_tags, you can also customize the @@ -856,12 +1469,48 @@ # no configuration +# # Gather ActiveMQ metrics +# [[inputs.activemq]] +# ## Required ActiveMQ Endpoint +# # server = "192.168.50.10" +# +# ## Required ActiveMQ port +# # port = 8161 +# +# ## Credentials for basic HTTP authentication +# # username = "admin" +# # password = "admin" +# +# ## Required ActiveMQ webadmin root path +# # webadmin = "admin" +# +# ## Maximum time to receive response. +# # response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification + + # # Read stats from aerospike server(s) # [[inputs.aerospike]] # ## Aerospike servers to connect to (with port) # ## This plugin will query all namespaces the aerospike # ## server has configured and get stats for them. # servers = ["localhost:3000"] +# +# # username = "telegraf" +# # password = "pa$$word" +# +# ## Optional TLS Config +# # enable_tls = false +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## If false, skip chain & host verification +# # insecure_skip_verify = true # # Read Apache status information (mod_status) @@ -878,11 +1527,37 @@ # ## Maximum time to receive response. # # response_timeout = "5s" # -# ## Optional SSL Config -# # ssl_ca = "/etc/telegraf/ca.pem" -# # ssl_cert = "/etc/telegraf/cert.pem" -# # ssl_key = "/etc/telegraf/key.pem" -# ## Use SSL but skip chain & host verification +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Gather metrics from Apache Aurora schedulers +# [[inputs.aurora]] +# ## Schedulers are the base addresses of your Aurora Schedulers +# schedulers = ["http://127.0.0.1:8081"] +# +# ## Set of role types to collect metrics from. +# ## +# ## The scheduler roles are checked each interval by contacting the +# ## scheduler nodes; zookeeper is not contacted. +# # roles = ["leader", "follower"] +# +# ## Timeout is the max time for total network operations. +# # timeout = "5s" +# +# ## Username and password are sent using HTTP Basic Auth. +# # username = "username" +# # password = "pa$$word" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification # # insecure_skip_verify = false @@ -898,6 +1573,16 @@ # bcacheDevs = ["bcache0"] +# # Collects Beanstalkd server and tubes stats +# [[inputs.beanstalkd]] +# ## Server to collect data from +# server = "localhost:11300" +# +# ## List of tubes to gather stats about. +# ## If no tubes specified then data gathered for each tube on server reported by list-tubes command +# tubes = ["notifications"] + + # # Collect bond interface status, slaves statuses and failures count # [[inputs.bond]] # ## Sets 'proc' directory path @@ -910,22 +1595,47 @@ # # bond_interfaces = ["bond0"] -# # Read Cassandra metrics through Jolokia -# [[inputs.cassandra]] -# # This is the context root used to compose the jolokia url -# context = "/jolokia/read" -# ## List of cassandra servers exposing jolokia read service -# servers = ["myuser:mypassword@10.10.10.1:8778","10.10.10.2:8778",":8778"] -# ## List of metrics collected on above servers -# ## Each metric consists of a jmx path. -# ## This will collect all heap memory usage metrics from the jvm and -# ## ReadLatency metrics for all keyspaces and tables. -# ## "type=Table" in the query works with Cassandra3.0. Older versions might -# ## need to use "type=ColumnFamily" -# metrics = [ -# "/java.lang:type=Memory/HeapMemoryUsage", -# "/org.apache.cassandra.metrics:type=Table,keyspace=*,scope=*,name=ReadLatency" -# ] +# # Collect Kafka topics and consumers status from Burrow HTTP API. +# [[inputs.burrow]] +# ## Burrow API endpoints in format "schema://host:port". +# ## Default is "http://localhost:8000". +# servers = ["http://localhost:8000"] +# +# ## Override Burrow API prefix. +# ## Useful when Burrow is behind reverse-proxy. +# # api_prefix = "/v3/kafka" +# +# ## Maximum time to receive response. +# # response_timeout = "5s" +# +# ## Limit per-server concurrent connections. +# ## Useful in case of large number of topics or consumer groups. +# # concurrent_connections = 20 +# +# ## Filter clusters, default is no filtering. +# ## Values can be specified as glob patterns. +# # clusters_include = [] +# # clusters_exclude = [] +# +# ## Filter consumer groups, default is no filtering. +# ## Values can be specified as glob patterns. +# # groups_include = [] +# # groups_exclude = [] +# +# ## Filter topics, default is no filtering. +# ## Values can be specified as glob patterns. +# # topics_include = [] +# # topics_exclude = [] +# +# ## Credentials for basic HTTP authentication. +# # username = "" +# # password = "" +# +# ## Optional SSL config +# # ssl_ca = "/etc/telegraf/ca.pem" +# # ssl_cert = "/etc/telegraf/cert.pem" +# # ssl_key = "/etc/telegraf/key.pem" +# # insecure_skip_verify = false # # Collects performance metrics from the MON and OSD nodes in a Ceph storage cluster. @@ -1004,6 +1714,12 @@ # #profile = "" # #shared_credential_file = "" # +# ## Endpoint to make request against, the correct endpoint is automatically +# ## determined and this option should only be set if you wish to override the +# ## default. +# ## ex: endpoint_url = "http://localhost:8000" +# # endpoint_url = "" +# # # The minimum period for Cloudwatch metrics is 1 minute (60s). However not all # # metrics are made available to the 1 minute period. Some are collected at # # 3 minute, 5 minute, or larger intervals. See https://aws.amazon.com/cloudwatch/faqs/#monitoring. @@ -1040,7 +1756,9 @@ # #[[inputs.cloudwatch.metrics]] # # names = ["Latency", "RequestCount"] # # -# # ## Dimension filters for Metric (optional) +# # ## Dimension filters for Metric. These are optional however all dimensions +# # ## defined for the metric names must be specified in order to retrieve +# # ## the metric statistics. # # [[inputs.cloudwatch.metrics.dimensions]] # # name = "LoadBalancerName" # # value = "p-example" @@ -1064,19 +1782,33 @@ # # Gather health check statuses from services registered in Consul # [[inputs.consul]] -# ## Most of these values defaults to the one configured on a Consul's agent level. -# ## Optional Consul server address (default: "localhost") +# ## Consul server address # # address = "localhost" -# ## Optional URI scheme for the Consul server (default: "http") +# +# ## URI scheme for the Consul server, one of "http", "https" # # scheme = "http" -# ## Optional ACL token used in every request (default: "") +# +# ## ACL token used in every request # # token = "" -# ## Optional username used for request HTTP Basic Authentication (default: "") +# +# ## HTTP Basic Authentication username and password. # # username = "" -# ## Optional password used for HTTP Basic Authentication (default: "") # # password = "" -# ## Optional data centre to query the health checks from (default: "") -# # datacentre = "" +# +# ## Data center to query the health checks from +# # datacenter = "" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = true +# +# ## Consul checks' tag splitting +# # When tags are formatted like "key:value" with ":" as a delimiter then +# # they will be splitted and reported as proper key:value in Telegraf +# # tag_delimiter = ":" # # Read metrics from one or many couchbase clusters @@ -1096,8 +1828,12 @@ # # Read CouchDB Stats from one or more servers # [[inputs.couchdb]] # ## Works with CouchDB stats endpoints out of the box -# ## Multiple HOSTs from which to read CouchDB stats: +# ## Multiple Hosts from which to read CouchDB stats: # hosts = ["http://localhost:8086/_stats"] +# +# ## Use HTTP Basic Authentication. +# # basic_username = "telegraf" +# # basic_password = "p@ssw0rd" # # Input plugin for DC/OS metrics @@ -1132,10 +1868,10 @@ # ## Maximum time to receive a response from cluster. # # response_timeout = "20s" # -# ## Optional SSL Config -# # ssl_ca = "/etc/telegraf/ca.pem" -# # ssl_cert = "/etc/telegraf/cert.pem" -# # ssl_key = "/etc/telegraf/key.pem" +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" # ## If false, skip chain & host verification # # insecure_skip_verify = true # @@ -1199,6 +1935,11 @@ # container_name_include = [] # container_name_exclude = [] # +# ## Container states to include and exclude. Globs accepted. +# ## When empty only containers in the "running" state will be captured. +# # container_state_include = [] +# # container_state_exclude = [] +# # ## Timeout for docker list, info, and stats commands # timeout = "5s" # @@ -1215,11 +1956,11 @@ # docker_label_include = [] # docker_label_exclude = [] # -# ## Optional SSL Config -# # ssl_ca = "/etc/telegraf/ca.pem" -# # ssl_cert = "/etc/telegraf/cert.pem" -# # ssl_key = "/etc/telegraf/key.pem" -# ## Use SSL but skip chain & host verification +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification # # insecure_skip_verify = false @@ -1262,20 +2003,22 @@ # ## - cluster # # cluster_health_level = "indices" # -# ## Set cluster_stats to true when you want to also obtain cluster stats from the -# ## Master node. +# ## Set cluster_stats to true when you want to also obtain cluster stats. # cluster_stats = false # +# ## Only gather cluster_stats from the master node. To work this require local = true +# cluster_stats_only_from_master = true +# # ## node_stats is a list of sub-stats that you want to have gathered. Valid options # ## are "indices", "os", "process", "jvm", "thread_pool", "fs", "transport", "http", -# ## "breakers". Per default, all stats are gathered. +# ## "breaker". Per default, all stats are gathered. # # node_stats = ["jvm", "http"] # -# ## Optional SSL Config -# # ssl_ca = "/etc/telegraf/ca.pem" -# # ssl_cert = "/etc/telegraf/cert.pem" -# # ssl_key = "/etc/telegraf/key.pem" -# ## Use SSL but skip chain & host verification +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification # # insecure_skip_verify = false @@ -1307,6 +2050,72 @@ # use_sudo = false +# # Read devices value(s) from a Fibaro controller +# [[inputs.fibaro]] +# ## Required Fibaro controller address/hostname. +# ## Note: at the time of writing this plugin, Fibaro only implemented http - no https available +# url = "http://:80" +# +# ## Required credentials to access the API (http://) +# username = "" +# password = "" +# +# ## Amount of time allowed to complete the HTTP request +# # timeout = "5s" + + +# # Reload and gather from file[s] on telegraf's interval. +# [[inputs.file]] +# ## Files to parse each interval. +# ## These accept standard unix glob matching rules, but with the addition of +# ## ** as a "super asterisk". ie: +# ## /var/log/**.log -> recursively find all .log files in /var/log +# ## /var/log/*/*.log -> find all .log files with a parent dir in /var/log +# ## /var/log/apache.log -> only read the apache log file +# files = ["/var/log/apache/access.log"] +# +# ## The dataformat to be read from files +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Count files in a directory +# [[inputs.filecount]] +# ## Directory to gather stats about. +# ## deprecated in 1.9; use the directories option +# # directory = "/var/cache/apt/archives" +# +# ## Directories to gather stats about. +# ## This accept standard unit glob matching rules, but with the addition of +# ## ** as a "super asterisk". ie: +# ## /var/log/** -> recursively find all directories in /var/log and count files in each directories +# ## /var/log/*/* -> find all directories with a parent dir in /var/log and count files in each directories +# ## /var/log -> count all files in /var/log and all of its subdirectories +# directories = ["/var/cache/apt/archives"] +# +# ## Only count files that match the name pattern. Defaults to "*". +# name = "*.deb" +# +# ## Count files in subdirectories. Defaults to true. +# recursive = false +# +# ## Only count regular files. Defaults to true. +# regular_only = true +# +# ## Only count files that are at least this size. If size is +# ## a negative number, only count files that are smaller than the +# ## absolute value of size. Acceptable units are B, KiB, MiB, KB, ... +# ## Without quotes and units, interpreted as size in bytes. +# size = "0B" +# +# ## Only count files that have not been touched for at least this +# ## duration. If mtime is negative, only count files that have been +# ## touched in this duration. Defaults to "0s". +# mtime = "0s" + + # # Read stats about given file(s) # [[inputs.filestat]] # ## Files to gather stats about. @@ -1368,11 +2177,11 @@ # username = "" # password = "" # -# ## Optional SSL Config -# # ssl_ca = "/etc/telegraf/ca.pem" -# # ssl_cert = "/etc/telegraf/cert.pem" -# # ssl_key = "/etc/telegraf/key.pem" -# ## Use SSL but skip chain & host verification +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification # # insecure_skip_verify = false @@ -1386,6 +2195,10 @@ # ## If no servers are specified, then default to 127.0.0.1:1936/haproxy?stats # servers = ["http://myhaproxy.com:1936/haproxy?stats"] # +# ## Credentials for basic HTTP authentication +# # username = "admin" +# # password = "admin" +# # ## You can also use local socket with standard wildcard globbing. # ## Server address not starting with 'http' will be treated as a possible # ## socket, so both examples below are valid. @@ -1394,13 +2207,13 @@ # ## By default, some of the fields are renamed from what haproxy calls them. # ## Setting this option to true results in the plugin keeping the original # ## field names. -# # keep_field_names = true +# # keep_field_names = false # -# ## Optional SSL Config -# # ssl_ca = "/etc/telegraf/ca.pem" -# # ssl_cert = "/etc/telegraf/cert.pem" -# # ssl_key = "/etc/telegraf/key.pem" -# ## Use SSL but skip chain & host verification +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification # # insecure_skip_verify = false @@ -1417,11 +2230,55 @@ # # devices = ["sda", "*"] +# # Read formatted metrics from one or more HTTP endpoints +# [[inputs.http]] +# ## One or more URLs from which to read formatted metrics +# urls = [ +# "http://localhost/metrics" +# ] +# +# ## HTTP method +# # method = "GET" +# +# ## Optional HTTP headers +# # headers = {"X-Special-Header" = "Special-Value"} +# +# ## Optional HTTP Basic Auth Credentials +# # username = "username" +# # password = "pa$$word" +# +# ## HTTP entity-body to send with POST/PUT requests. +# # body = "" +# +# ## HTTP Content-Encoding for write request body, can be set to "gzip" to +# ## compress body or "identity" to apply no encoding. +# # content_encoding = "identity" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Amount of time allowed to complete the HTTP request +# # timeout = "5s" +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# # data_format = "influx" + + # # HTTP/HTTPS request given an address a method and a timeout # [[inputs.http_response]] # ## Server address (default http://localhost) # # address = "http://localhost" # +# ## Set http_proxy (telegraf uses the system wide proxy settings if it's is not set) +# # http_proxy = "http://localhost:8888" +# # ## Set response_timeout (default 5 seconds) # # response_timeout = "5s" # @@ -1441,11 +2298,11 @@ # # response_string_match = "ok" # # response_string_match = "\".*_status\".?:.?\"up\"" # -# ## Optional SSL Config -# # ssl_ca = "/etc/telegraf/ca.pem" -# # ssl_cert = "/etc/telegraf/cert.pem" -# # ssl_key = "/etc/telegraf/key.pem" -# ## Use SSL but skip chain & host verification +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification # # insecure_skip_verify = false # # ## HTTP Request Headers (all values must be strings) @@ -1481,6 +2338,13 @@ # # "my_tag_2" # # ] # +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# # ## HTTP parameters (all values must be strings). For "GET" requests, data # ## will be included in the query. For "POST" requests, data will be included # ## in the request body as "x-www-form-urlencoded". @@ -1492,13 +2356,29 @@ # # [inputs.httpjson.headers] # # X-Auth-Token = "my-xauth-token" # # apiVersion = "v1" + + +# # Gather Icinga2 status +# [[inputs.icinga2]] +# ## Required Icinga2 server address (default: "https://localhost:5665") +# # server = "https://localhost:5665" # -# ## Optional SSL Config -# # ssl_ca = "/etc/telegraf/ca.pem" -# # ssl_cert = "/etc/telegraf/cert.pem" -# # ssl_key = "/etc/telegraf/key.pem" -# ## Use SSL but skip chain & host verification -# # insecure_skip_verify = false +# ## Required Icinga2 object type ("services" or "hosts, default "services") +# # object_type = "services" +# +# ## Credentials for basic HTTP authentication +# # username = "admin" +# # password = "admin" +# +# ## Maximum time to receive response. +# # response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = true # # Read InfluxDB-formatted JSON metrics from one or more HTTP endpoints @@ -1513,11 +2393,11 @@ # "http://localhost:8086/debug/vars" # ] # -# ## Optional SSL Config -# # ssl_ca = "/etc/telegraf/ca.pem" -# # ssl_cert = "/etc/telegraf/cert.pem" -# # ssl_key = "/etc/telegraf/key.pem" -# ## Use SSL but skip chain & host verification +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification # # insecure_skip_verify = false # # ## http request & header timeout @@ -1532,16 +2412,27 @@ # # This plugin gathers interrupts data from /proc/interrupts and /proc/softirqs. # [[inputs.interrupts]] +# ## When set to true, cpu metrics are tagged with the cpu. Otherwise cpu is +# ## stored as a field. +# ## +# ## The default is false for backwards compatibility, and will be changed to +# ## true in a future version. It is recommended to set to true on new +# ## deployments. +# # cpu_as_tag = false +# # ## To filter which IRQs to collect, make use of tagpass / tagdrop, i.e. # # [inputs.interrupts.tagdrop] -# # irq = [ "NET_RX", "TASKLET" ] +# # irq = [ "NET_RX", "TASKLET" ] # # Read metrics from the bare metal servers via IPMI # [[inputs.ipmi_sensor]] # ## optionally specify the path to the ipmitool executable # # path = "/usr/bin/ipmitool" -# # +# ## +# ## optionally force session privilege level. Can be CALLBACK, USER, OPERATOR, ADMINISTRATOR +# # privilege = "ADMINISTRATOR" +# ## # ## optionally specify one or more servers via a url matching # ## [username[:password]@][protocol[(address)]] # ## e.g. @@ -1557,6 +2448,20 @@ # # ## Timeout for the ipmitool command to complete # timeout = "20s" +# +# ## Schema Version: (Optional, defaults to version 1) +# metric_version = 2 + + +# # Gather packets and bytes counters from Linux ipsets +# [[inputs.ipset]] +# ## By default, we only show sets which have already matched at least 1 packet. +# ## set include_unmatched_sets = true to gather them all. +# include_unmatched_sets = false +# ## Adjust your sudo settings appropriately if using this option ("sudo ipset save") +# use_sudo = false +# ## The default timeout of 1s for ipset execution can be overridden here: +# # timeout = "1s" # # Gather packets and bytes throughput from iptables @@ -1569,6 +2474,8 @@ # ## Setting 'use_lock' to true runs iptables with the "-w" option. # ## Adjust your sudo settings appropriately if using this option ("iptables -wnvl") # use_lock = false +# ## Define an alternate executable, such as "ip6tables". Default is "iptables". +# # binary = "ip6tables" # ## defines the table to monitor: # table = "filter" # ## defines the chains to monitor. @@ -1577,6 +2484,55 @@ # chains = [ "INPUT" ] +# # Collect virtual and real server stats from Linux IPVS +# [[inputs.ipvs]] +# # no configuration + + +# # Read jobs and cluster metrics from Jenkins instances +# [[inputs.jenkins]] +# ## The Jenkins URL +# url = "http://my-jenkins-instance:8080" +# # username = "admin" +# # password = "admin" +# +# ## Set response_timeout +# response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use SSL but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Optional Max Job Build Age filter +# ## Default 1 hour, ignore builds older than max_build_age +# # max_build_age = "1h" +# +# ## Optional Sub Job Depth filter +# ## Jenkins can have unlimited layer of sub jobs +# ## This config will limit the layers of pulling, default value 0 means +# ## unlimited pulling until no more sub jobs +# # max_subjob_depth = 0 +# +# ## Optional Sub Job Per Layer +# ## In workflow-multibranch-plugin, each branch will be created as a sub job. +# ## This config will limit to call only the lasted branches in each layer, +# ## empty will use default value 10 +# # max_subjob_per_layer = 10 +# +# ## Jobs to exclude from gathering +# # job_exclude = [ "job1", "job2/subjob1/subjob2", "job3/*"] +# +# ## Nodes to exclude from gathering +# # node_exclude = [ "node1", "node2" ] +# +# ## Worker pool for jenkins plugin only +# ## Empty this field will use default value 5 +# # max_connections = 5 + + # # Read JMX metrics through Jolokia # [[inputs.jolokia]] # # DEPRECATED: the jolokia plugin has been deprecated in favor of the @@ -1657,10 +2613,10 @@ # # password = "" # # response_timeout = "5s" # -# ## Optional SSL config -# # ssl_ca = "/var/private/ca.pem" -# # ssl_cert = "/var/private/client.pem" -# # ssl_key = "/var/private/client-key.pem" +# ## Optional TLS config +# # tls_ca = "/var/private/ca.pem" +# # tls_cert = "/var/private/client.pem" +# # tls_key = "/var/private/client-key.pem" # # insecure_skip_verify = false # # ## Add metrics to read @@ -1682,22 +2638,22 @@ # # password = "" # # response_timeout = "5s" # -# ## Optional SSL config -# # ssl_ca = "/var/private/ca.pem" -# # ssl_cert = "/var/private/client.pem" -# # ssl_key = "/var/private/client-key.pem" +# ## Optional TLS config +# # tls_ca = "/var/private/ca.pem" +# # tls_cert = "/var/private/client.pem" +# # tls_key = "/var/private/client-key.pem" # # insecure_skip_verify = false # # ## Add proxy targets to query # # default_target_username = "" # # default_target_password = "" -# [[inputs.jolokia_proxy.target]] +# [[inputs.jolokia2_proxy.target]] # url = "service:jmx:rmi:///jndi/rmi://targethost:9999/jmxrmi" -# # username = "" -# # password = "" +# # username = "" +# # password = "" # # ## Add metrics to read -# [[inputs.jolokia_proxy.metric]] +# [[inputs.jolokia2_proxy.metric]] # name = "java_runtime" # mbean = "java.lang:type=Runtime" # paths = ["Uptime"] @@ -1713,6 +2669,13 @@ # # ## Time limit for http requests # timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false # # Get kernel statistics from /proc/vmstat @@ -1720,22 +2683,78 @@ # # no configuration -# # Read metrics from the kubernetes kubelet api -# [[inputs.kubernetes]] -# ## URL for the kubelet -# url = "http://1.1.1.1:10255" +# # Read status information from one or more Kibana servers +# [[inputs.kibana]] +# ## specify a list of one or more Kibana servers +# servers = ["http://localhost:5601"] # -# ## Use bearer token for authorization -# # bearer_token = /path/to/bearer/token +# ## Timeout for HTTP requests +# timeout = "5s" +# +# ## HTTP Basic Auth credentials +# # username = "username" +# # password = "pa$$word" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read metrics from the Kubernetes api +# [[inputs.kube_inventory]] +# ## URL for the Kubernetes API +# url = "https://127.0.0.1" +# +# ## Namespace to use. Set to "" to use all namespaces. +# # namespace = "default" +# +# ## Use bearer token for authorization. ('bearer_token' takes priority) +# # bearer_token = "/path/to/bearer/token" +# ## OR +# # bearer_token_string = "abc_123" # # ## Set response_timeout (default 5 seconds) # # response_timeout = "5s" # -# ## Optional SSL Config -# # ssl_ca = /path/to/cafile -# # ssl_cert = /path/to/certfile -# # ssl_key = /path/to/keyfile -# ## Use SSL but skip chain & host verification +# ## Optional Resources to exclude from gathering +# ## Leave them with blank with try to gather everything available. +# ## Values can be - "daemonsets", deployments", "nodes", "persistentvolumes", +# ## "persistentvolumeclaims", "pods", "statefulsets" +# # resource_exclude = [ "deployments", "nodes", "statefulsets" ] +# +# ## Optional Resources to include when gathering +# ## Overrides resource_exclude if both set. +# # resource_include = [ "deployments", "nodes", "statefulsets" ] +# +# ## Optional TLS Config +# # tls_ca = "/path/to/cafile" +# # tls_cert = "/path/to/certfile" +# # tls_key = "/path/to/keyfile" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read metrics from the kubernetes kubelet api +# [[inputs.kubernetes]] +# ## URL for the kubelet +# url = "http://127.0.0.1:10255" +# +# ## Use bearer token for authorization. ('bearer_token' takes priority) +# # bearer_token = "/path/to/bearer/token" +# ## OR +# # bearer_token_string = "abc_123" +# +# ## Set response_timeout (default 5 seconds) +# # response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = /path/to/cafile +# # tls_cert = /path/to/certfile +# # tls_key = /path/to/keyfile +# ## Use TLS but skip chain & host verification # # insecure_skip_verify = false @@ -1779,6 +2798,16 @@ # # campaign_id = "" +# # Read metrics from one or many mcrouter servers +# [[inputs.mcrouter]] +# ## An array of address to gather stats about. Specify an ip or hostname +# ## with port. ie tcp://localhost:11211, tcp://10.0.0.1:11211, etc. +# servers = ["tcp://localhost:11211", "unix:///var/run/mcrouter.sock"] +# +# ## Timeout for metric collections from all servers. Minimum timeout is "1s". +# # timeout = "5s" + + # # Read metrics from one or many memcached servers # [[inputs.memcached]] # ## An array of address to gather stats about. Specify an ip on hostname @@ -1792,7 +2821,7 @@ # ## Timeout, in ms. # timeout = 100 # ## A list of Mesos masters. -# masters = ["localhost:5050"] +# masters = ["http://localhost:5050"] # ## Master metrics groups to be collected, by default, all enabled. # master_collections = [ # "resources", @@ -1816,6 +2845,13 @@ # # "tasks", # # "messages", # # ] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false # # Collects scores from a minecraft server's scoreboard using the RCON protocol @@ -1836,16 +2872,43 @@ # ## mongodb://user:auth_key@10.10.3.30:27017, # ## mongodb://10.10.3.33:18832, # servers = ["mongodb://127.0.0.1:27017"] -# gather_perdb_stats = false # -# ## Optional SSL Config -# # ssl_ca = "/etc/telegraf/ca.pem" -# # ssl_cert = "/etc/telegraf/cert.pem" -# # ssl_key = "/etc/telegraf/key.pem" -# ## Use SSL but skip chain & host verification +# ## When true, collect per database stats +# # gather_perdb_stats = false +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification # # insecure_skip_verify = false +# # Aggregates the contents of multiple files into a single point +# [[inputs.multifile]] +# ## Base directory where telegraf will look for files. +# ## Omit this option to use absolute paths. +# base_dir = "/sys/bus/i2c/devices/1-0076/iio:device0" +# +# ## If true, Telegraf discard all data when a single file can't be read. +# ## Else, Telegraf omits the field generated from this file. +# # fail_early = true +# +# ## Files to parse each interval. +# [[inputs.multifile.file]] +# file = "in_pressure_input" +# dest = "pressure" +# conversion = "float" +# [[inputs.multifile.file]] +# file = "in_temp_input" +# dest = "temperature" +# conversion = "float(3)" +# [[inputs.multifile.file]] +# file = "in_humidityrelative_input" +# dest = "humidityrelative" +# conversion = "float(3)" + + # # Read metrics from one or many mysql servers # [[inputs.mysql]] # ## specify servers via a url matching: @@ -1857,6 +2920,20 @@ # # # ## If no servers are specified, then localhost is used as the host. # servers = ["tcp(127.0.0.1:3306)/"] +# +# ## Selects the metric output format. +# ## +# ## This option exists to maintain backwards compatibility, if you have +# ## existing metrics do not set or change this value until you are ready to +# ## migrate to the new format. +# ## +# ## If you do not have existing metrics from this plugin set to the latest +# ## version. +# ## +# ## Telegraf >=1.6: metric_version = 2 +# ## <1.6: metric_version = 1 (or unset) +# metric_version = 2 +# # ## the limits for metrics form perf_events_statements # perf_events_statements_digest_text_limit = 120 # perf_events_statements_limit = 250 @@ -1871,7 +2948,7 @@ # ## gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST # gather_process_list = true # # -# ## gather thread state counts from INFORMATION_SCHEMA.USER_STATISTICS +# ## gather user statistics from INFORMATION_SCHEMA.USER_STATISTICS # gather_user_statistics = true # # # ## gather auto_increment columns and max values from information schema @@ -1907,10 +2984,36 @@ # ## Some queries we may want to run less often (such as SHOW GLOBAL VARIABLES) # interval_slow = "30m" # -# ## Optional SSL Config (will be used if tls=custom parameter specified in server uri) -# ssl_ca = "/etc/telegraf/ca.pem" -# ssl_cert = "/etc/telegraf/cert.pem" -# ssl_key = "/etc/telegraf/key.pem" +# ## Optional TLS Config (will be used if tls=custom parameter specified in server uri) +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Provides metrics about the state of a NATS server +# [[inputs.nats]] +# ## The address of the monitoring endpoint of the NATS server +# server = "http://localhost:8222" +# +# ## Maximum time to receive response +# # response_timeout = "5s" + + +# # Neptune Apex data collector +# [[inputs.neptune_apex]] +# ## The Neptune Apex plugin reads the publicly available status.xml data from a local Apex. +# ## Measurements will be logged under "apex". +# +# ## The base URL of the local Apex(es). If you specify more than one server, they will +# ## be differentiated by the "source" tag. +# servers = [ +# "http://apex.local", +# ] +# +# ## The response_timeout specifies how long to wait for a reply from the Apex. +# #response_timeout = "5s" # # Read metrics about network interface usage @@ -1920,9 +3023,15 @@ # ## regardless of status. # ## # # interfaces = ["eth0"] +# ## +# ## On linux systems telegraf also collects protocol stats. +# ## Setting ignore_protocol_stats to true will skip reporting of protocol metrics. +# ## +# # ignore_protocol_stats = false +# ## -# # TCP or UDP 'ping' given url and collect response time in seconds +# # Collect response time of a TCP or UDP connection # [[inputs.net_response]] # ## Protocol, must be "tcp" or "udp" # ## NOTE: because the "udp" protocol does not respond to requests, it requires @@ -1930,11 +3039,12 @@ # protocol = "tcp" # ## Server address (default localhost) # address = "localhost:80" +# # ## Set timeout -# timeout = "1s" +# # timeout = "1s" # # ## Set read timeout (only used if expecting a response) -# read_timeout = "1s" +# # read_timeout = "1s" # # ## The following options are required for UDP checks. For TCP, they are # ## optional. The plugin will send the given string to the server and then @@ -1943,6 +3053,9 @@ # # send = "ssh" # ## expected string in answer # # expect = "ssh" +# +# ## Uncomment to remove deprecated fields +# # fieldexclude = ["result_type", "string_found"] # # Read TCP metrics such as established, time wait and sockets counts. @@ -1955,10 +3068,11 @@ # # An array of Nginx stub_status URI to gather stats. # urls = ["http://localhost/server_status"] # -# # TLS/SSL configuration -# ssl_ca = "/etc/telegraf/ca.pem" -# ssl_cert = "/etc/telegraf/cert.cer" -# ssl_key = "/etc/telegraf/key.key" +# ## Optional TLS Config +# tls_ca = "/etc/telegraf/ca.pem" +# tls_cert = "/etc/telegraf/cert.cer" +# tls_key = "/etc/telegraf/key.key" +# ## Use TLS but skip chain & host verification # insecure_skip_verify = false # # # HTTP response timeout (default: 5s) @@ -1974,10 +3088,68 @@ # response_timeout = "5s" +# # Read Nginx Plus Api documentation +# [[inputs.nginx_plus_api]] +# ## An array of API URI to gather stats. +# urls = ["http://localhost/api"] +# +# # Nginx API version, default: 3 +# # api_version = 3 +# +# # HTTP response timeout (default: 5s) +# response_timeout = "5s" + + +# # Read nginx_upstream_check module status information (https://github.com/yaoweibin/nginx_upstream_check_module) +# [[inputs.nginx_upstream_check]] +# ## An URL where Nginx Upstream check module is enabled +# ## It should be set to return a JSON formatted response +# url = "http://127.0.0.1/status?format=json" +# +# ## HTTP method +# # method = "GET" +# +# ## Optional HTTP headers +# # headers = {"X-Special-Header" = "Special-Value"} +# +# ## Override HTTP "Host" header +# # host_header = "check.example.com" +# +# ## Timeout for HTTP requests +# timeout = "5s" +# +# ## Optional HTTP Basic Auth credentials +# # username = "username" +# # password = "pa$$word" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read Nginx virtual host traffic status module information (nginx-module-vts) +# [[inputs.nginx_vts]] +# ## An array of ngx_http_status_module or status URI to gather stats. +# urls = ["http://localhost/status"] +# +# ## HTTP response timeout (default: 5s) +# response_timeout = "5s" + + # # Read NSQ topic and channel statistics. # [[inputs.nsq]] # ## An array of NSQD HTTP API endpoints -# endpoints = ["http://localhost:4151"] +# endpoints = ["http://localhost:4151"] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false # # Collect kernel snmp counters and network interface statistics @@ -1998,6 +3170,15 @@ # dns_lookup = true +# # Pulls statistics from nvidia GPUs attached to the host +# [[inputs.nvidia_smi]] +# ## Optional: path to nvidia-smi binary, defaults to $PATH via exec.LookPath +# # bin_path = "/usr/bin/nvidia-smi" +# +# ## Optional: timeout for GPU polling +# # timeout = "5s" + + # # OpenLDAP cn=Monitor plugin # [[inputs.openldap]] # host = "localhost" @@ -2006,17 +3187,21 @@ # # ldaps, starttls, or no encryption. default is an empty string, disabling all encryption. # # note that port will likely need to be changed to 636 for ldaps # # valid options: "" | "starttls" | "ldaps" -# ssl = "" +# tls = "" # # # skip peer certificate verification. Default is false. # insecure_skip_verify = false # # # Path to PEM-encoded Root certificate to use to verify server certificate -# ssl_ca = "/etc/ssl/certs.pem" +# tls_ca = "/etc/ssl/certs.pem" # # # dn/password to bind with. If bind_dn is empty, an anonymous bind is performed. # bind_dn = "" # bind_password = "" +# +# # Reverse metric names so they sort more naturally. Recommended. +# # This defaults to false if unset, but is set to true when generating a new config +# reverse_metric_names = true # # A plugin to collect stats from Opensmtpd - a validating, recursive, and caching DNS resolver @@ -2079,19 +3264,32 @@ # # Ping given url(s) and return statistics # [[inputs.ping]] -# ## NOTE: this plugin forks the ping command. You may need to set capabilities -# ## via setcap cap_net_raw+p /bin/ping -# # # ## List of urls to ping -# urls = ["www.google.com"] # required -# ## number of pings to send per collection (ping -c ) +# urls = ["example.org"] +# +# ## Number of pings to send per collection (ping -c ) # # count = 1 -# ## interval, in s, at which to ping. 0 == default (ping -i ) +# +# ## Interval, in s, at which to ping. 0 == default (ping -i ) +# ## Not available in Windows. # # ping_interval = 1.0 -# ## per-ping timeout, in s. 0 == no timeout (ping -W ) +# +# ## Per-ping timeout, in s. 0 == no timeout (ping -W ) # # timeout = 1.0 -# ## interface to send ping from (ping -I ) +# +# ## Total-ping deadline, in s. 0 == no deadline (ping -w ) +# # deadline = 10 +# +# ## Interface or source address to send ping from (ping -I ) +# ## on Darwin and Freebsd only source address possible: (ping -S ) # # interface = "" +# +# ## Specify the ping executable binary, default is "ping" +# # binary = "ping" +# +# ## Arguments for ping command +# ## when arguments is not empty, other options (ping_interval, timeout, etc) will be ignored +# # arguments = ["-c", "3"] # # Measure postfix queue statistics @@ -2101,90 +3299,6 @@ # # queue_directory = "/var/spool/postfix" -# # Read metrics from one or many postgresql servers -# [[inputs.postgresql]] -# ## specify address via a url matching: -# ## postgres://[pqgotest[:password]]@localhost[/dbname]\ -# ## ?sslmode=[disable|verify-ca|verify-full] -# ## or a simple string: -# ## host=localhost user=pqotest password=... sslmode=... dbname=app_production -# ## -# ## All connection parameters are optional. -# ## -# ## Without the dbname parameter, the driver will default to a database -# ## with the same name as the user. This dbname is just for instantiating a -# ## connection with the server and doesn't restrict the databases we are trying -# ## to grab metrics for. -# ## -# address = "host=localhost user=postgres sslmode=disable" -# -# ## A list of databases to explicitly ignore. If not specified, metrics for all -# ## databases are gathered. Do NOT use with the 'databases' option. -# # ignored_databases = ["postgres", "template0", "template1"] -# -# ## A list of databases to pull metrics about. If not specified, metrics for all -# ## databases are gathered. Do NOT use with the 'ignored_databases' option. -# # databases = ["app_production", "testing"] - - -# # Read metrics from one or many postgresql servers -# [[inputs.postgresql_extensible]] -# ## specify address via a url matching: -# ## postgres://[pqgotest[:password]]@localhost[/dbname]\ -# ## ?sslmode=[disable|verify-ca|verify-full] -# ## or a simple string: -# ## host=localhost user=pqotest password=... sslmode=... dbname=app_production -# # -# ## All connection parameters are optional. # -# ## Without the dbname parameter, the driver will default to a database -# ## with the same name as the user. This dbname is just for instantiating a -# ## connection with the server and doesn't restrict the databases we are trying -# ## to grab metrics for. -# # -# address = "host=localhost user=postgres sslmode=disable" -# ## A list of databases to pull metrics about. If not specified, metrics for all -# ## databases are gathered. -# ## databases = ["app_production", "testing"] -# # -# # outputaddress = "db01" -# ## A custom name for the database that will be used as the "server" tag in the -# ## measurement output. If not specified, a default one generated from -# ## the connection address is used. -# # -# ## Define the toml config where the sql queries are stored -# ## New queries can be added, if the withdbname is set to true and there is no -# ## databases defined in the 'databases field', the sql query is ended by a -# ## 'is not null' in order to make the query succeed. -# ## Example : -# ## The sqlquery : "SELECT * FROM pg_stat_database where datname" become -# ## "SELECT * FROM pg_stat_database where datname IN ('postgres', 'pgbench')" -# ## because the databases variable was set to ['postgres', 'pgbench' ] and the -# ## withdbname was true. Be careful that if the withdbname is set to false you -# ## don't have to define the where clause (aka with the dbname) the tagvalue -# ## field is used to define custom tags (separated by commas) -# ## The optional "measurement" value can be used to override the default -# ## output measurement name ("postgresql"). -# # -# ## Structure : -# ## [[inputs.postgresql_extensible.query]] -# ## sqlquery string -# ## version string -# ## withdbname boolean -# ## tagvalue string (comma separated) -# ## measurement string -# [[inputs.postgresql_extensible.query]] -# sqlquery="SELECT * FROM pg_stat_database" -# version=901 -# withdbname=false -# tagvalue="" -# measurement="" -# [[inputs.postgresql_extensible.query]] -# sqlquery="SELECT * FROM pg_stat_bgwriter" -# version=901 -# withdbname=false -# tagvalue="postgresql.stats" - - # # Read metrics from one or many PowerDNS servers # [[inputs.powerdns]] # ## An array of sockets to gather stats about. @@ -2194,7 +3308,6 @@ # # Monitor process cpu and memory usage # [[inputs.procstat]] -# ## Must specify one of: pid_file, exe, or pattern # ## PID file to monitor process # pid_file = "/var/run/nginx.pid" # ## executable name (ie, pgrep ) @@ -2208,37 +3321,26 @@ # ## CGroup name or path # # cgroup = "systemd/system.slice/nginx.service" # +# ## Windows service name +# # win_service = "" +# # ## override for process_name # ## This is optional; default is sourced from /proc//status # # process_name = "bar" +# # ## Field name prefix -# prefix = "" -# ## comment this out if you want raw cpu_time stats -# fielddrop = ["cpu_time_*"] -# ## This is optional; moves pid into a tag instead of a field -# pid_tag = false - - -# # Read metrics from one or many prometheus clients -# [[inputs.prometheus]] -# ## An array of urls to scrape metrics from. -# urls = ["http://localhost:9100/metrics"] +# # prefix = "" # -# ## An array of Kubernetes services to scrape metrics from. -# # kubernetes_services = ["http://my-service-dns.my-namespace:9100/metrics"] +# ## Add PID as a tag instead of a field; useful to differentiate between +# ## processes whose tags are otherwise the same. Can create a large number +# ## of series, use judiciously. +# # pid_tag = false # -# ## Use bearer token for authorization -# # bearer_token = /path/to/bearer/token -# -# ## Specify timeout duration for slower prometheus clients (default is 3s) -# # response_timeout = "3s" -# -# ## Optional SSL Config -# # ssl_ca = /path/to/cafile -# # ssl_cert = /path/to/certfile -# # ssl_key = /path/to/keyfile -# ## Use SSL but skip chain & host verification -# # insecure_skip_verify = false +# ## Method to use when finding process IDs. Can be one of 'pgrep', or +# ## 'native'. The pgrep finder calls the pgrep executable in the PATH while +# ## the native finder performs the search directly in a manor dependent on the +# ## platform. Default is 'pgrep' +# # pid_finder = "pgrep" # # Reads last_run_summary.yaml file and converts to measurments @@ -2257,11 +3359,11 @@ # # username = "guest" # # password = "guest" # -# ## Optional SSL Config -# # ssl_ca = "/etc/telegraf/ca.pem" -# # ssl_cert = "/etc/telegraf/cert.pem" -# # ssl_key = "/etc/telegraf/key.pem" -# ## Use SSL but skip chain & host verification +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification # # insecure_skip_verify = false # # ## Optional request timeouts @@ -2281,6 +3383,15 @@ # ## A list of queues to gather as the rabbitmq_queue measurement. If not # ## specified, metrics for all queues are gathered. # # queues = ["telegraf"] +# +# ## A list of exchanges to gather as the rabbitmq_exchange measurement. If not +# ## specified, metrics for all exchanges are gathered. +# # exchanges = ["telegraf"] +# +# ## Queues to include and exclude. Globs accepted. +# ## Note that an empty array for both will include all queues +# queue_name_include = [] +# queue_name_exclude = [] # # Read raindrops stats (raindrops - real-time stats for preforking Rack servers) @@ -2301,6 +3412,16 @@ # ## If no servers are specified, then localhost is used as the host. # ## If no port is specified, 6379 is used # servers = ["tcp://localhost:6379"] +# +# ## specify server password +# # password = "s#cr@t%" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = true # # Read metrics from one or many RethinkDB servers @@ -2352,6 +3473,9 @@ # ## Remove numbers from field names. # ## If true, a field name like 'temp1_input' will be changed to 'temp_input'. # # remove_numbers = true +# +# ## Timeout is the maximum amount of time that the sensors command can run. +# # timeout = "5s" # # Read metrics from storage devices supporting S.M.A.R.T. @@ -2571,6 +3695,108 @@ # # servers = [ # # "Server=192.168.1.10;Port=1433;User Id=;Password=;app name=telegraf;log=1;", # # ] +# +# ## Optional parameter, setting this to 2 will use a new version +# ## of the collection queries that break compatibility with the original +# ## dashboards. +# query_version = 2 +# +# ## If you are using AzureDB, setting this to true will gather resource utilization metrics +# # azuredb = false +# +# ## If you would like to exclude some of the metrics queries, list them here +# ## Possible choices: +# ## - PerformanceCounters +# ## - WaitStatsCategorized +# ## - DatabaseIO +# ## - DatabaseProperties +# ## - CPUHistory +# ## - DatabaseSize +# ## - DatabaseStats +# ## - MemoryClerk +# ## - VolumeSpace +# ## - PerformanceMetrics +# # exclude_query = [ 'DatabaseIO' ] + + +# # Gather timeseries from Google Cloud Platform v3 monitoring API +# [[inputs.stackdriver]] +# ## GCP Project +# project = "erudite-bloom-151019" +# +# ## Include timeseries that start with the given metric type. +# metric_type_prefix_include = [ +# "compute.googleapis.com/", +# ] +# +# ## Exclude timeseries that start with the given metric type. +# # metric_type_prefix_exclude = [] +# +# ## Many metrics are updated once per minute; it is recommended to override +# ## the agent level interval with a value of 1m or greater. +# interval = "1m" +# +# ## Maximum number of API calls to make per second. The quota for accounts +# ## varies, it can be viewed on the API dashboard: +# ## https://cloud.google.com/monitoring/quotas#quotas_and_limits +# # rate_limit = 14 +# +# ## The delay and window options control the number of points selected on +# ## each gather. When set, metrics are gathered between: +# ## start: now() - delay - window +# ## end: now() - delay +# # +# ## Collection delay; if set too low metrics may not yet be available. +# # delay = "5m" +# # +# ## If unset, the window will start at 1m and be updated dynamically to span +# ## the time between calls (approximately the length of the plugin interval). +# # window = "1m" +# +# ## TTL for cached list of metric types. This is the maximum amount of time +# ## it may take to discover new metrics. +# # cache_ttl = "1h" +# +# ## If true, raw bucket counts are collected for distribution value types. +# ## For a more lightweight collection, you may wish to disable and use +# ## distribution_aggregation_aligners instead. +# # gather_raw_distribution_buckets = true +# +# ## Aggregate functions to be used for metrics whose value type is +# ## distribution. These aggregate values are recorded in in addition to raw +# ## bucket counts; if they are enabled. +# ## +# ## For a list of aligner strings see: +# ## https://cloud.google.com/monitoring/api/ref_v3/rpc/google.monitoring.v3#aligner +# # distribution_aggregation_aligners = [ +# # "ALIGN_PERCENTILE_99", +# # "ALIGN_PERCENTILE_95", +# # "ALIGN_PERCENTILE_50", +# # ] +# +# ## Filters can be added to reduce the number of time series matched. All +# ## functions are supported: starts_with, ends_with, has_substring, and +# ## one_of. Only the '=' operator is supported. +# ## +# ## The logical operators when combining filters are defined statically using +# ## the following values: +# ## filter ::= {AND } +# ## resource_labels ::= {OR } +# ## metric_labels ::= {OR } +# ## +# ## For more details, see https://cloud.google.com/monitoring/api/v3/filters +# # +# ## Resource labels refine the time series selection with the following expression: +# ## resource.labels. = +# # [[inputs.stackdriver.filter.resource_labels]] +# # key = "instance_name" +# # value = 'starts_with("localhost")' +# # +# ## Metric labels refine the time series selection with the following expression: +# ## metric.labels. = +# # [[inputs.stackdriver.filter.metric_labels]] +# # key = "device_name" +# # value = 'one_of("sda", "sdb")' # # Sysstat metrics collector @@ -2647,6 +3873,27 @@ # # virtual_servers = [1] +# # Read metrics about temperature +# [[inputs.temp]] +# # no configuration + + +# # Read Tengine's basic status information (ngx_http_reqstat_module) +# [[inputs.tengine]] +# # An array of Tengine reqstat module URI to gather stats. +# urls = ["http://127.0.0.1/us"] +# +# # HTTP response timeout (default: 5s) +# # response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.cer" +# # tls_key = "/etc/telegraf/key.key" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + # # Gather metrics from the Tomcat server status page. # [[inputs.tomcat]] # ## URL of the Tomcat server status @@ -2659,11 +3906,11 @@ # ## Request timeout # # timeout = "5s" # -# ## Optional SSL Config -# # ssl_ca = "/etc/telegraf/ca.pem" -# # ssl_cert = "/etc/telegraf/cert.pem" -# # ssl_key = "/etc/telegraf/key.pem" -# ## Use SSL but skip chain & host verification +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification # # insecure_skip_verify = false @@ -2681,19 +3928,27 @@ # pools = ["redis_pool", "mc_pool"] -# # A plugin to collect stats from Unbound - a validating, recursive, and caching DNS resolver +# # A plugin to collect stats from the Unbound DNS resolver # [[inputs.unbound]] +# ## Address of server to connect to, read from unbound conf default, optionally ':port' +# ## Will lookup IP if given a hostname +# server = "127.0.0.1:8953" +# # ## If running as a restricted user you can prepend sudo for additional access: -# #use_sudo = false +# # use_sudo = false # # ## The default location of the unbound-control binary can be overridden with: -# binary = "/usr/sbin/unbound-control" +# # binary = "/usr/sbin/unbound-control" # # ## The default timeout of 1s can be overriden with: -# timeout = "1s" +# # timeout = "1s" # -# ## Use the builtin fielddrop/fieldpass telegraf filters in order to keep/remove specific fields -# fieldpass = ["total_*", "num_*","time_up", "mem_*"] +# ## When set to true, thread metrics are tagged with the thread id. +# ## +# ## The default is false for backwards compatibility, and will be changed to +# ## true in a future version. It is recommended to set to true on new +# ## deployments. +# thread_as_tag = false # # A plugin to collect stats from Varnish HTTP Cache @@ -2712,7 +3967,34 @@ # # ## Optional name for the varnish instance (or working directory) to query # ## Usually appened after -n in varnish cli -# #name = instanceName +# # instance_name = instanceName +# +# ## Timeout for varnishstat command +# # timeout = "1s" + + +# # Monitor wifi signal strength and quality +# [[inputs.wireless]] +# ## Sets 'proc' directory path +# ## If not specified, then default is /proc +# # host_proc = "/proc" + + +# # Reads metrics from a SSL certificate +# [[inputs.x509_cert]] +# ## List certificate sources +# sources = ["/etc/ssl/certs/ssl-cert-snakeoil.pem", "tcp://example.org:443"] +# +# ## Timeout for SSL connection +# # timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false # # Read metrics of ZFS from arcstats, zfetchstats, vdev_cache_stats, and pools @@ -2724,7 +4006,9 @@ # ## By default, telegraf gather all zfs stats # ## If not specified, then default is: # # kstatMetrics = ["arcstats", "zfetchstats", "vdev_cache_stats"] -# +# ## For Linux, the default is: +# # kstatMetrics = ["abdstats", "arcstats", "dnodestats", "dbufcachestats", +# # "dmu_tx", "fm", "vdev_mirror_stats", "zfetchstats", "zil"] # ## By default, don't gather zpool stats # # poolMetrics = false @@ -2737,6 +4021,17 @@ # ## If no servers are specified, then localhost is used as the host. # ## If no port is specified, 2181 is used # servers = [":2181"] +# +# ## Timeout for metric collections from all servers. Minimum timeout is "1s". +# # timeout = "5s" +# +# ## Optional TLS Config +# # enable_tls = true +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## If false, skip chain & host verification +# # insecure_skip_verify = true @@ -2746,28 +4041,67 @@ # # AMQP consumer plugin # [[inputs.amqp_consumer]] -# ## AMQP url -# url = "amqp://localhost:5672/influxdb" -# ## AMQP exchange +# ## Broker to consume from. +# ## deprecated in 1.7; use the brokers option +# # url = "amqp://localhost:5672/influxdb" +# +# ## Brokers to consume from. If multiple brokers are specified a random broker +# ## will be selected anytime a connection is established. This can be +# ## helpful for load balancing when not using a dedicated load balancer. +# brokers = ["amqp://localhost:5672/influxdb"] +# +# ## Authentication credentials for the PLAIN auth_method. +# # username = "" +# # password = "" +# +# ## Exchange to declare and consume from. # exchange = "telegraf" -# ## AMQP queue name +# +# ## Exchange type; common types are "direct", "fanout", "topic", "header", "x-consistent-hash". +# # exchange_type = "topic" +# +# ## If true, exchange will be passively declared. +# # exchange_passive = false +# +# ## Exchange durability can be either "transient" or "durable". +# # exchange_durability = "durable" +# +# ## Additional exchange arguments. +# # exchange_arguments = { } +# # exchange_arguments = {"hash_propery" = "timestamp"} +# +# ## AMQP queue name. # queue = "telegraf" -# ## Binding Key +# +# ## AMQP queue durability can be "transient" or "durable". +# queue_durability = "durable" +# +# ## Binding Key. # binding_key = "#" # # ## Maximum number of messages server should give to the worker. -# prefetch_count = 50 +# # prefetch_count = 50 +# +# ## Maximum messages to read from the broker that have not been written by an +# ## output. For best throughput set based on the number of metrics within +# ## each message and the size of the output's metric_batch_size. +# ## +# ## For example, if each message from the queue contains 10 metrics and the +# ## output metric_batch_size is 1000, setting this to 100 will ensure that a +# ## full batch is collected and the write is triggered immediately without +# ## waiting until the next flush_interval. +# # max_undelivered_messages = 1000 # # ## Auth method. PLAIN and EXTERNAL are supported # ## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as # ## described here: https://www.rabbitmq.com/plugins.html # # auth_method = "PLAIN" # -# ## Optional SSL Config -# # ssl_ca = "/etc/telegraf/ca.pem" -# # ssl_cert = "/etc/telegraf/cert.pem" -# # ssl_key = "/etc/telegraf/key.pem" -# ## Use SSL but skip chain & host verification +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification # # insecure_skip_verify = false # # ## Data format to consume. @@ -2777,6 +4111,150 @@ # data_format = "influx" +# # Read Cassandra metrics through Jolokia +# [[inputs.cassandra]] +# ## DEPRECATED: The cassandra plugin has been deprecated. Please use the +# ## jolokia2 plugin instead. +# ## +# ## see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia2 +# +# context = "/jolokia/read" +# ## List of cassandra servers exposing jolokia read service +# servers = ["myuser:mypassword@10.10.10.1:8778","10.10.10.2:8778",":8778"] +# ## List of metrics collected on above servers +# ## Each metric consists of a jmx path. +# ## This will collect all heap memory usage metrics from the jvm and +# ## ReadLatency metrics for all keyspaces and tables. +# ## "type=Table" in the query works with Cassandra3.0. Older versions might +# ## need to use "type=ColumnFamily" +# metrics = [ +# "/java.lang:type=Memory/HeapMemoryUsage", +# "/org.apache.cassandra.metrics:type=Table,keyspace=*,scope=*,name=ReadLatency" +# ] + + +# # Read metrics from Google PubSub +# [[inputs.cloud_pubsub]] +# ## Required. Name of Google Cloud Platform (GCP) Project that owns +# ## the given PubSub subscription. +# project = "my-project" +# +# ## Required. Name of PubSub subscription to ingest metrics from. +# subscription = "my-subscription" +# +# ## Required. Data format to consume. +# ## Each data format has its own unique set of configuration options. +# ## Read more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" +# +# ## Optional. Filepath for GCP credentials JSON file to authorize calls to +# ## PubSub APIs. If not set explicitly, Telegraf will attempt to use +# ## Application Default Credentials, which is preferred. +# # credentials_file = "path/to/my/creds.json" +# +# ## Optional. Number of seconds to wait before attempting to restart the +# ## PubSub subscription receiver after an unexpected error. +# ## If the streaming pull for a PubSub Subscription fails (receiver), +# ## the agent attempts to restart receiving messages after this many seconds. +# # retry_delay_seconds = 5 +# +# ## Optional. Maximum byte length of a message to consume. +# ## Larger messages are dropped with an error. If less than 0 or unspecified, +# ## treated as no limit. +# # max_message_len = 1000000 +# +# ## Optional. Maximum messages to read from PubSub that have not been written +# ## to an output. Defaults to 1000. +# ## For best throughput set based on the number of metrics within +# ## each message and the size of the output's metric_batch_size. +# ## +# ## For example, if each message contains 10 metrics and the output +# ## metric_batch_size is 1000, setting this to 100 will ensure that a +# ## full batch is collected and the write is triggered immediately without +# ## waiting until the next flush_interval. +# # max_undelivered_messages = 1000 +# +# ## The following are optional Subscription ReceiveSettings in PubSub. +# ## Read more about these values: +# ## https://godoc.org/cloud.google.com/go/pubsub#ReceiveSettings +# +# ## Optional. Maximum number of seconds for which a PubSub subscription +# ## should auto-extend the PubSub ACK deadline for each message. If less than +# ## 0, auto-extension is disabled. +# # max_extension = 0 +# +# ## Optional. Maximum number of unprocessed messages in PubSub +# ## (unacknowledged but not yet expired in PubSub). +# ## A value of 0 is treated as the default PubSub value. +# ## Negative values will be treated as unlimited. +# # max_outstanding_messages = 0 +# +# ## Optional. Maximum size in bytes of unprocessed messages in PubSub +# ## (unacknowledged but not yet expired in PubSub). +# ## A value of 0 is treated as the default PubSub value. +# ## Negative values will be treated as unlimited. +# # max_outstanding_bytes = 0 +# +# ## Optional. Max number of goroutines a PubSub Subscription receiver can spawn +# ## to pull messages from PubSub concurrently. This limit applies to each +# ## subscription separately and is treated as the PubSub default if less than +# ## 1. Note this setting does not limit the number of messages that can be +# ## processed concurrently (use "max_outstanding_messages" instead). +# # max_receiver_go_routines = 0 + + +# # Google Cloud Pub/Sub Push HTTP listener +# [[inputs.cloud_pubsub_push]] +# ## Address and port to host HTTP listener on +# service_address = ":8080" +# +# ## Application secret to verify messages originate from Cloud Pub/Sub +# # token = "" +# +# ## Path to listen to. +# # path = "/" +# +# ## Maximum duration before timing out read of the request +# # read_timeout = "10s" +# ## Maximum duration before timing out write of the response. This should be set to a value +# ## large enough that you can send at least 'metric_batch_size' number of messages within the +# ## duration. +# # write_timeout = "10s" +# +# ## Maximum allowed http request body size in bytes. +# ## 0 means to use the default of 524,288,00 bytes (500 mebibytes) +# # max_body_size = "500MB" +# +# ## Whether to add the pubsub metadata, such as message attributes and subscription as a tag. +# # add_meta = false +# +# ## Optional. Maximum messages to read from PubSub that have not been written +# ## to an output. Defaults to 1000. +# ## For best throughput set based on the number of metrics within +# ## each message and the size of the output's metric_batch_size. +# ## +# ## For example, if each message contains 10 metrics and the output +# ## metric_batch_size is 1000, setting this to 100 will ensure that a +# ## full batch is collected and the write is triggered immediately without +# ## waiting until the next flush_interval. +# # max_undelivered_messages = 1000 +# +# ## Set one or more allowed client CA certificate file names to +# ## enable mutually authenticated TLS connections +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# +# ## Add service certificate and key +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + # # Influx HTTP write listener # [[inputs.http_listener]] # ## Address and port to host HTTP listener on @@ -2788,12 +4266,12 @@ # write_timeout = "10s" # # ## Maximum allowed http request body size in bytes. -# ## 0 means to use the default of 536,870,912 bytes (500 mebibytes) -# max_body_size = 0 +# ## 0 means to use the default of 524,288,000 bytes (500 mebibytes) +# max_body_size = "500MiB" # # ## Maximum line size allowed to be sent in bytes. # ## 0 means to use the default of 65536 bytes (64 kibibytes) -# max_line_size = 0 +# max_line_size = "64KiB" # # ## Set one or more allowed client CA certificate file names to # ## enable mutually authenticated TLS connections @@ -2802,6 +4280,130 @@ # ## Add service certificate and key # tls_cert = "/etc/telegraf/cert.pem" # tls_key = "/etc/telegraf/key.pem" +# +# ## Optional username and password to accept for HTTP basic authentication. +# ## You probably want to make sure you have TLS configured above for this. +# # basic_username = "foobar" +# # basic_password = "barfoo" + + +# # Generic HTTP write listener +# [[inputs.http_listener_v2]] +# ## Address and port to host HTTP listener on +# service_address = ":8080" +# +# ## Path to listen to. +# # path = "/telegraf" +# +# ## HTTP methods to accept. +# # methods = ["POST", "PUT"] +# +# ## maximum duration before timing out read of the request +# # read_timeout = "10s" +# ## maximum duration before timing out write of the response +# # write_timeout = "10s" +# +# ## Maximum allowed http request body size in bytes. +# ## 0 means to use the default of 524,288,00 bytes (500 mebibytes) +# # max_body_size = "500MB" +# +# ## Set one or more allowed client CA certificate file names to +# ## enable mutually authenticated TLS connections +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# +# ## Add service certificate and key +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## Optional username and password to accept for HTTP basic authentication. +# ## You probably want to make sure you have TLS configured above for this. +# # basic_username = "foobar" +# # basic_password = "barfoo" +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Influx HTTP write listener +# [[inputs.influxdb_listener]] +# ## Address and port to host HTTP listener on +# service_address = ":8186" +# +# ## maximum duration before timing out read of the request +# read_timeout = "10s" +# ## maximum duration before timing out write of the response +# write_timeout = "10s" +# +# ## Maximum allowed http request body size in bytes. +# ## 0 means to use the default of 524,288,000 bytes (500 mebibytes) +# max_body_size = "500MiB" +# +# ## Maximum line size allowed to be sent in bytes. +# ## 0 means to use the default of 65536 bytes (64 kibibytes) +# max_line_size = "64KiB" +# +# ## Set one or more allowed client CA certificate file names to +# ## enable mutually authenticated TLS connections +# tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# +# ## Add service certificate and key +# tls_cert = "/etc/telegraf/cert.pem" +# tls_key = "/etc/telegraf/key.pem" +# +# ## Optional username and password to accept for HTTP basic authentication. +# ## You probably want to make sure you have TLS configured above for this. +# # basic_username = "foobar" +# # basic_password = "barfoo" + + +# # Read JTI OpenConfig Telemetry from listed sensors +# [[inputs.jti_openconfig_telemetry]] +# ## List of device addresses to collect telemetry from +# servers = ["localhost:1883"] +# +# ## Authentication details. Username and password are must if device expects +# ## authentication. Client ID must be unique when connecting from multiple instances +# ## of telegraf to the same device +# username = "user" +# password = "pass" +# client_id = "telegraf" +# +# ## Frequency to get data +# sample_frequency = "1000ms" +# +# ## Sensors to subscribe for +# ## A identifier for each sensor can be provided in path by separating with space +# ## Else sensor path will be used as identifier +# ## When identifier is used, we can provide a list of space separated sensors. +# ## A single subscription will be created with all these sensors and data will +# ## be saved to measurement with this identifier name +# sensors = [ +# "/interfaces/", +# "collection /components/ /lldp", +# ] +# +# ## We allow specifying sensor group level reporting rate. To do this, specify the +# ## reporting rate in Duration at the beginning of sensor paths / collection +# ## name. For entries without reporting rate, we use configured sample frequency +# sensors = [ +# "1000ms customReporting /interfaces /lldp", +# "2000ms collection /components", +# "/interfaces", +# ] +# +# ## x509 Certificate to use with TLS connection. If it is not provided, an insecure +# ## channel will be opened with server +# ssl_cert = "/etc/telegraf/cert.pem" +# +# ## Delay between retry attempts of failed RPC calls or streams. Defaults to 1000ms. +# ## Failed streams/calls will not be retried if 0 is provided +# retry_delay = "1000ms" +# +# ## To treat all string values as tags, set this to true +# str_as_tags = false # # Read metrics from Kafka topic(s) @@ -2810,12 +4412,23 @@ # brokers = ["localhost:9092"] # ## topic(s) to consume # topics = ["telegraf"] +# ## Add topic as tag if topic_tag is not empty +# # topic_tag = "" # -# ## Optional SSL Config -# # ssl_ca = "/etc/telegraf/ca.pem" -# # ssl_cert = "/etc/telegraf/cert.pem" -# # ssl_key = "/etc/telegraf/key.pem" -# ## Use SSL but skip chain & host verification +# ## Optional Client id +# # client_id = "Telegraf" +# +# ## Set the minimal supported Kafka version. Setting this enables the use of new +# ## Kafka features and APIs. Of particular interest, lz4 compression +# ## requires at least version 0.10.0.0. +# ## ex: version = "1.1.0" +# # version = "" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification # # insecure_skip_verify = false # # ## Optional SASL Config @@ -2826,16 +4439,25 @@ # consumer_group = "telegraf_metrics_consumers" # ## Offset (must be either "oldest" or "newest") # offset = "oldest" +# ## Maximum length of a message to consume, in bytes (default 0/unlimited); +# ## larger messages are dropped +# max_message_len = 1000000 +# +# ## Maximum messages to read from the broker that have not been written by an +# ## output. For best throughput set based on the number of metrics within +# ## each message and the size of the output's metric_batch_size. +# ## +# ## For example, if each message from the queue contains 10 metrics and the +# ## output metric_batch_size is 1000, setting this to 100 will ensure that a +# ## full batch is collected and the write is triggered immediately without +# ## waiting until the next flush_interval. +# # max_undelivered_messages = 1000 # # ## Data format to consume. # ## Each data format has its own unique set of configuration options, read # ## more about them here: # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md # data_format = "influx" -# -# ## Maximum length of a message to consume, in bytes (default 0/unlimited); -# ## larger messages are dropped -# max_message_len = 65536 # # Read metrics from Kafka topic(s) @@ -2862,6 +4484,62 @@ # max_message_len = 65536 +# # Configuration for the AWS Kinesis input. +# [[inputs.kinesis_consumer]] +# ## Amazon REGION of kinesis endpoint. +# region = "ap-southeast-2" +# +# ## Amazon Credentials +# ## Credentials are loaded in the following order +# ## 1) Assumed credentials via STS if role_arn is specified +# ## 2) explicit credentials from 'access_key' and 'secret_key' +# ## 3) shared profile from 'profile' +# ## 4) environment variables +# ## 5) shared credentials file +# ## 6) EC2 Instance Profile +# # access_key = "" +# # secret_key = "" +# # token = "" +# # role_arn = "" +# # profile = "" +# # shared_credential_file = "" +# +# ## Endpoint to make request against, the correct endpoint is automatically +# ## determined and this option should only be set if you wish to override the +# ## default. +# ## ex: endpoint_url = "http://localhost:8000" +# # endpoint_url = "" +# +# ## Kinesis StreamName must exist prior to starting telegraf. +# streamname = "StreamName" +# +# ## Shard iterator type (only 'TRIM_HORIZON' and 'LATEST' currently supported) +# # shard_iterator_type = "TRIM_HORIZON" +# +# ## Maximum messages to read from the broker that have not been written by an +# ## output. For best throughput set based on the number of metrics within +# ## each message and the size of the output's metric_batch_size. +# ## +# ## For example, if each message from the queue contains 10 metrics and the +# ## output metric_batch_size is 1000, setting this to 100 will ensure that a +# ## full batch is collected and the write is triggered immediately without +# ## waiting until the next flush_interval. +# # max_undelivered_messages = 1000 +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" +# +# ## Optional +# ## Configuration for a dynamodb checkpoint +# [inputs.kinesis_consumer.checkpoint_dynamodb] +# ## unique name for this consumer +# app_name = "default" +# table_name = "default" + + # # Stream and parse log file(s). # [[inputs.logparser]] # ## Log files to parse. @@ -2881,7 +4559,6 @@ # # watch_method = "inotify" # # ## Parse logstash-style "grok" patterns: -# ## Telegraf built-in parsing patterns: https://goo.gl/dkay10 # [inputs.logparser.grok] # ## This is a list of patterns to check the given log file(s) for. # ## Note that adding patterns here increases processing time. The most @@ -2899,6 +4576,7 @@ # # ## Custom patterns can also be defined here. Put one pattern per line. # custom_patterns = ''' +# ''' # # ## Timezone allows you to provide an override for timestamps that # ## don't already include an offset @@ -2909,8 +4587,11 @@ # ## 1. Local -- interpret based on machine localtime # ## 2. "Canada/Eastern" -- Unix TZ values like those found in https://en.wikipedia.org/wiki/List_of_tz_database_time_zones # ## 3. UTC -- or blank/unspecified, will return timestamp in UTC -# timezone = "Canada/Eastern" -# ''' +# # timezone = "Canada/Eastern" +# +# ## When set to "disable", timestamp will not incremented if there is a +# ## duplicate. +# # unique_timestamp = "auto" # # Read metrics from MQTT topic(s) @@ -2919,11 +4600,28 @@ # ## schema can be tcp, ssl, or ws. # servers = ["tcp://localhost:1883"] # -# ## MQTT QoS, must be 0, 1, or 2 +# ## QoS policy for messages +# ## 0 = at most once +# ## 1 = at least once +# ## 2 = exactly once +# ## +# ## When using a QoS of 1 or 2, you should enable persistent_session to allow +# ## resuming unacknowledged messages. # qos = 0 +# # ## Connection timeout for initial connection in seconds # connection_timeout = "30s" # +# ## Maximum messages to read from the broker that have not been written by an +# ## output. For best throughput set based on the number of metrics within +# ## each message and the size of the output's metric_batch_size. +# ## +# ## For example, if each message from the queue contains 10 metrics and the +# ## output metric_batch_size is 1000, setting this to 100 will ensure that a +# ## full batch is collected and the write is triggered immediately without +# ## waiting until the next flush_interval. +# # max_undelivered_messages = 1000 +# # ## Topics to subscribe to # topics = [ # "telegraf/host01/cpu", @@ -2942,11 +4640,11 @@ # # username = "telegraf" # # password = "metricsmetricsmetricsmetrics" # -# ## Optional SSL Config -# # ssl_ca = "/etc/telegraf/ca.pem" -# # ssl_cert = "/etc/telegraf/cert.pem" -# # ssl_key = "/etc/telegraf/key.pem" -# ## Use SSL but skip chain & host verification +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification # # insecure_skip_verify = false # # ## Data format to consume. @@ -2959,19 +4657,29 @@ # # Read metrics from NATS subject(s) # [[inputs.nats_consumer]] # ## urls of NATS servers -# # servers = ["nats://localhost:4222"] +# servers = ["nats://localhost:4222"] # ## Use Transport Layer Security -# # secure = false +# secure = false # ## subject(s) to consume -# # subjects = ["telegraf"] +# subjects = ["telegraf"] # ## name a queue group -# # queue_group = "telegraf_consumers" +# queue_group = "telegraf_consumers" # # ## Sets the limits for pending msgs and bytes for each subscription # ## These shouldn't need to be adjusted except in very high throughput scenarios # # pending_message_limit = 65536 # # pending_bytes_limit = 67108864 # +# ## Maximum messages to read from the broker that have not been written by an +# ## output. For best throughput set based on the number of metrics within +# ## each message and the size of the output's metric_batch_size. +# ## +# ## For example, if each message from the queue contains 10 metrics and the +# ## output metric_batch_size is 1000, setting this to 100 will ensure that a +# ## full batch is collected and the write is triggered immediately without +# ## waiting until the next flush_interval. +# # max_undelivered_messages = 1000 +# # ## Data format to consume. # ## Each data format has its own unique set of configuration options, read # ## more about them here: @@ -2991,6 +4699,16 @@ # channel = "consumer" # max_in_flight = 100 # +# ## Maximum messages to read from the broker that have not been written by an +# ## output. For best throughput set based on the number of metrics within +# ## each message and the size of the output's metric_batch_size. +# ## +# ## For example, if each message from the queue contains 10 metrics and the +# ## output metric_batch_size is 1000, setting this to 100 will ensure that a +# ## full batch is collected and the write is triggered immediately without +# ## waiting until the next flush_interval. +# # max_undelivered_messages = 1000 +# # ## Data format to consume. # ## Each data format has its own unique set of configuration options, read # ## more about them here: @@ -2998,6 +4716,153 @@ # data_format = "influx" +# # Read metrics from one or many pgbouncer servers +# [[inputs.pgbouncer]] +# ## specify address via a url matching: +# ## postgres://[pqgotest[:password]]@localhost[/dbname]\ +# ## ?sslmode=[disable|verify-ca|verify-full] +# ## or a simple string: +# ## host=localhost user=pqotest password=... sslmode=... dbname=app_production +# ## +# ## All connection parameters are optional. +# ## +# address = "host=localhost user=pgbouncer sslmode=disable" + + +# # Read metrics from one or many postgresql servers +# [[inputs.postgresql]] +# ## specify address via a url matching: +# ## postgres://[pqgotest[:password]]@localhost[/dbname]\ +# ## ?sslmode=[disable|verify-ca|verify-full] +# ## or a simple string: +# ## host=localhost user=pqotest password=... sslmode=... dbname=app_production +# ## +# ## All connection parameters are optional. +# ## +# ## Without the dbname parameter, the driver will default to a database +# ## with the same name as the user. This dbname is just for instantiating a +# ## connection with the server and doesn't restrict the databases we are trying +# ## to grab metrics for. +# ## +# address = "host=localhost user=postgres sslmode=disable" +# ## A custom name for the database that will be used as the "server" tag in the +# ## measurement output. If not specified, a default one generated from +# ## the connection address is used. +# # outputaddress = "db01" +# +# ## connection configuration. +# ## maxlifetime - specify the maximum lifetime of a connection. +# ## default is forever (0s) +# max_lifetime = "0s" +# +# ## A list of databases to explicitly ignore. If not specified, metrics for all +# ## databases are gathered. Do NOT use with the 'databases' option. +# # ignored_databases = ["postgres", "template0", "template1"] +# +# ## A list of databases to pull metrics about. If not specified, metrics for all +# ## databases are gathered. Do NOT use with the 'ignored_databases' option. +# # databases = ["app_production", "testing"] + + +# # Read metrics from one or many postgresql servers +# [[inputs.postgresql_extensible]] +# ## specify address via a url matching: +# ## postgres://[pqgotest[:password]]@localhost[/dbname]\ +# ## ?sslmode=[disable|verify-ca|verify-full] +# ## or a simple string: +# ## host=localhost user=pqotest password=... sslmode=... dbname=app_production +# # +# ## All connection parameters are optional. # +# ## Without the dbname parameter, the driver will default to a database +# ## with the same name as the user. This dbname is just for instantiating a +# ## connection with the server and doesn't restrict the databases we are trying +# ## to grab metrics for. +# # +# address = "host=localhost user=postgres sslmode=disable" +# +# ## connection configuration. +# ## maxlifetime - specify the maximum lifetime of a connection. +# ## default is forever (0s) +# max_lifetime = "0s" +# +# ## A list of databases to pull metrics about. If not specified, metrics for all +# ## databases are gathered. +# ## databases = ["app_production", "testing"] +# # +# ## A custom name for the database that will be used as the "server" tag in the +# ## measurement output. If not specified, a default one generated from +# ## the connection address is used. +# # outputaddress = "db01" +# # +# ## Define the toml config where the sql queries are stored +# ## New queries can be added, if the withdbname is set to true and there is no +# ## databases defined in the 'databases field', the sql query is ended by a +# ## 'is not null' in order to make the query succeed. +# ## Example : +# ## The sqlquery : "SELECT * FROM pg_stat_database where datname" become +# ## "SELECT * FROM pg_stat_database where datname IN ('postgres', 'pgbench')" +# ## because the databases variable was set to ['postgres', 'pgbench' ] and the +# ## withdbname was true. Be careful that if the withdbname is set to false you +# ## don't have to define the where clause (aka with the dbname) the tagvalue +# ## field is used to define custom tags (separated by commas) +# ## The optional "measurement" value can be used to override the default +# ## output measurement name ("postgresql"). +# # +# ## Structure : +# ## [[inputs.postgresql_extensible.query]] +# ## sqlquery string +# ## version string +# ## withdbname boolean +# ## tagvalue string (comma separated) +# ## measurement string +# [[inputs.postgresql_extensible.query]] +# sqlquery="SELECT * FROM pg_stat_database" +# version=901 +# withdbname=false +# tagvalue="" +# measurement="" +# [[inputs.postgresql_extensible.query]] +# sqlquery="SELECT * FROM pg_stat_bgwriter" +# version=901 +# withdbname=false +# tagvalue="postgresql.stats" + + +# # Read metrics from one or many prometheus clients +# [[inputs.prometheus]] +# ## An array of urls to scrape metrics from. +# urls = ["http://localhost:9100/metrics"] +# +# ## An array of Kubernetes services to scrape metrics from. +# # kubernetes_services = ["http://my-service-dns.my-namespace:9100/metrics"] +# +# ## Kubernetes config file to create client from. +# # kube_config = "/path/to/kubernetes.config" +# +# ## Scrape Kubernetes pods for the following prometheus annotations: +# ## - prometheus.io/scrape: Enable scraping for this pod +# ## - prometheus.io/scheme: If the metrics endpoint is secured then you will need to +# ## set this to 'https' & most likely set the tls config. +# ## - prometheus.io/path: If the metrics path is not /metrics, define it with this annotation. +# ## - prometheus.io/port: If port is not 9102 use this annotation +# # monitor_kubernetes_pods = true +# +# ## Use bearer token for authorization. ('bearer_token' takes priority) +# # bearer_token = "/path/to/bearer/token" +# ## OR +# # bearer_token_string = "abc_123" +# +# ## Specify timeout duration for slower prometheus clients (default is 3s) +# # response_timeout = "3s" +# +# ## Optional TLS Config +# # tls_ca = /path/to/cafile +# # tls_cert = /path/to/certfile +# # tls_key = /path/to/keyfile +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + # # Generic socket listener capable of handling multiple socket types. # [[inputs.socket_listener]] # ## URL to listen on @@ -3022,11 +4887,18 @@ # ## 0 (default) is unlimited. # # read_timeout = "30s" # -# ## Maximum socket buffer size in bytes. +# ## Optional TLS configuration. +# ## Only applies to stream sockets (e.g. TCP). +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Enables client authentication if set. +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# +# ## Maximum socket buffer size (in bytes when no unit specified). # ## For stream sockets, once the buffer fills up, the sender will start backing up. # ## For datagram sockets, once the buffer fills up, metrics will start dropping. # ## Defaults to the OS default. -# # read_buffer_size = 65535 +# # read_buffer_size = "64KiB" # # ## Period between keep alive probes. # ## Only applies to TCP sockets. @@ -3049,6 +4921,14 @@ # ## MaxTCPConnection - applicable when protocol is set to tcp (default=250) # max_tcp_connections = 250 # +# ## Enable TCP keep alive probes (default=false) +# tcp_keep_alive = false +# +# ## Specifies the keep-alive period for an active network connection. +# ## Only applies to TCP sockets and will be ignored if tcp_keep_alive is false. +# ## Defaults to the OS configuration. +# # tcp_keep_alive_period = "2h" +# # ## Address and port to host UDP listener on # service_address = ":8125" # @@ -3075,7 +4955,7 @@ # parse_data_dog_tags = false # # ## Statsd data translation templates, more info can be read here: -# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#graphite +# ## https://github.com/influxdata/telegraf/blob/master/docs/TEMPLATE_PATTERN.md # # templates = [ # # "cpu.* measurement*" # # ] @@ -3090,6 +4970,56 @@ # percentile_limit = 1000 +# # Accepts syslog messages following RFC5424 format with transports as per RFC5426, RFC5425, or RFC6587 +# [[inputs.syslog]] +# ## Specify an ip or hostname with port - eg., tcp://localhost:6514, tcp://10.0.0.1:6514 +# ## Protocol, address and port to host the syslog receiver. +# ## If no host is specified, then localhost is used. +# ## If no port is specified, 6514 is used (RFC5425#section-4.1). +# server = "tcp://:6514" +# +# ## TLS Config +# # tls_allowed_cacerts = ["/etc/telegraf/ca.pem"] +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## Period between keep alive probes. +# ## 0 disables keep alive probes. +# ## Defaults to the OS configuration. +# ## Only applies to stream sockets (e.g. TCP). +# # keep_alive_period = "5m" +# +# ## Maximum number of concurrent connections (default = 0). +# ## 0 means unlimited. +# ## Only applies to stream sockets (e.g. TCP). +# # max_connections = 1024 +# +# ## Read timeout is the maximum time allowed for reading a single message (default = 5s). +# ## 0 means unlimited. +# # read_timeout = "5s" +# +# ## The framing technique with which it is expected that messages are transported (default = "octet-counting"). +# ## Whether the messages come using the octect-counting (RFC5425#section-4.3.1, RFC6587#section-3.4.1), +# ## or the non-transparent framing technique (RFC6587#section-3.4.2). +# ## Must be one of "octect-counting", "non-transparent". +# # framing = "octet-counting" +# +# ## The trailer to be expected in case of non-trasparent framing (default = "LF"). +# ## Must be one of "LF", or "NUL". +# # trailer = "LF" +# +# ## Whether to parse in best effort mode or not (default = false). +# ## By default best effort parsing is off. +# # best_effort = false +# +# ## Character to prepend to SD-PARAMs (default = "_"). +# ## A syslog message can contain multiple parameters and multiple identifiers within structured data section. +# ## Eg., [id1 name1="val1" name2="val2"][id2 name1="val1" nameA="valA"] +# ## For each combination a field is created. +# ## Its name is created concatenating identifier, sdparam_separator, and parameter name. +# # sdparam_separator = "_" + + # # Stream a log file, like the tail -f command # [[inputs.tail]] # ## files to tail. @@ -3131,6 +5061,158 @@ # # see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/socket_listener +# # Read metrics from VMware vCenter +# [[inputs.vsphere]] +# ## List of vCenter URLs to be monitored. These three lines must be uncommented +# ## and edited for the plugin to work. +# vcenters = [ "https://vcenter.local/sdk" ] +# username = "user@corp.local" +# password = "secret" +# +# ## VMs +# ## Typical VM metrics (if omitted or empty, all metrics are collected) +# vm_metric_include = [ +# "cpu.demand.average", +# "cpu.idle.summation", +# "cpu.latency.average", +# "cpu.readiness.average", +# "cpu.ready.summation", +# "cpu.run.summation", +# "cpu.usagemhz.average", +# "cpu.used.summation", +# "cpu.wait.summation", +# "mem.active.average", +# "mem.granted.average", +# "mem.latency.average", +# "mem.swapin.average", +# "mem.swapinRate.average", +# "mem.swapout.average", +# "mem.swapoutRate.average", +# "mem.usage.average", +# "mem.vmmemctl.average", +# "net.bytesRx.average", +# "net.bytesTx.average", +# "net.droppedRx.summation", +# "net.droppedTx.summation", +# "net.usage.average", +# "power.power.average", +# "virtualDisk.numberReadAveraged.average", +# "virtualDisk.numberWriteAveraged.average", +# "virtualDisk.read.average", +# "virtualDisk.readOIO.latest", +# "virtualDisk.throughput.usage.average", +# "virtualDisk.totalReadLatency.average", +# "virtualDisk.totalWriteLatency.average", +# "virtualDisk.write.average", +# "virtualDisk.writeOIO.latest", +# "sys.uptime.latest", +# ] +# # vm_metric_exclude = [] ## Nothing is excluded by default +# # vm_instances = true ## true by default +# +# ## Hosts +# ## Typical host metrics (if omitted or empty, all metrics are collected) +# host_metric_include = [ +# "cpu.coreUtilization.average", +# "cpu.costop.summation", +# "cpu.demand.average", +# "cpu.idle.summation", +# "cpu.latency.average", +# "cpu.readiness.average", +# "cpu.ready.summation", +# "cpu.swapwait.summation", +# "cpu.usage.average", +# "cpu.usagemhz.average", +# "cpu.used.summation", +# "cpu.utilization.average", +# "cpu.wait.summation", +# "disk.deviceReadLatency.average", +# "disk.deviceWriteLatency.average", +# "disk.kernelReadLatency.average", +# "disk.kernelWriteLatency.average", +# "disk.numberReadAveraged.average", +# "disk.numberWriteAveraged.average", +# "disk.read.average", +# "disk.totalReadLatency.average", +# "disk.totalWriteLatency.average", +# "disk.write.average", +# "mem.active.average", +# "mem.latency.average", +# "mem.state.latest", +# "mem.swapin.average", +# "mem.swapinRate.average", +# "mem.swapout.average", +# "mem.swapoutRate.average", +# "mem.totalCapacity.average", +# "mem.usage.average", +# "mem.vmmemctl.average", +# "net.bytesRx.average", +# "net.bytesTx.average", +# "net.droppedRx.summation", +# "net.droppedTx.summation", +# "net.errorsRx.summation", +# "net.errorsTx.summation", +# "net.usage.average", +# "power.power.average", +# "storageAdapter.numberReadAveraged.average", +# "storageAdapter.numberWriteAveraged.average", +# "storageAdapter.read.average", +# "storageAdapter.write.average", +# "sys.uptime.latest", +# ] +# # host_metric_exclude = [] ## Nothing excluded by default +# # host_instances = true ## true by default +# +# ## Clusters +# # cluster_metric_include = [] ## if omitted or empty, all metrics are collected +# # cluster_metric_exclude = [] ## Nothing excluded by default +# # cluster_instances = false ## false by default +# +# ## Datastores +# # datastore_metric_include = [] ## if omitted or empty, all metrics are collected +# # datastore_metric_exclude = [] ## Nothing excluded by default +# # datastore_instances = false ## false by default for Datastores only +# +# ## Datacenters +# datacenter_metric_include = [] ## if omitted or empty, all metrics are collected +# datacenter_metric_exclude = [ "*" ] ## Datacenters are not collected by default. +# # datacenter_instances = false ## false by default for Datastores only +# +# ## Plugin Settings +# ## separator character to use for measurement and field names (default: "_") +# # separator = "_" +# +# ## number of objects to retreive per query for realtime resources (vms and hosts) +# ## set to 64 for vCenter 5.5 and 6.0 (default: 256) +# # max_query_objects = 256 +# +# ## number of metrics to retreive per query for non-realtime resources (clusters and datastores) +# ## set to 64 for vCenter 5.5 and 6.0 (default: 256) +# # max_query_metrics = 256 +# +# ## number of go routines to use for collection and discovery of objects and metrics +# # collect_concurrency = 1 +# # discover_concurrency = 1 +# +# ## whether or not to force discovery of new objects on initial gather call before collecting metrics +# ## when true for large environments this may cause errors for time elapsed while collecting metrics +# ## when false (default) the first collection cycle may result in no or limited metrics while objects are discovered +# # force_discover_on_init = false +# +# ## the interval before (re)discovering objects subject to metrics collection (default: 300s) +# # object_discovery_interval = "300s" +# +# ## timeout applies to any of the api request made to vcenter +# # timeout = "60s" +# +# ## Optional SSL Config +# # ssl_ca = "/path/to/cafile" +# # ssl_cert = "/path/to/certfile" +# # ssl_key = "/path/to/keyfile" +# ## Use SSL but skip chain & host verification +# # insecure_skip_verify = false + + # # A Webhooks Event collector # [[inputs.webhooks]] # ## Address and port to host Webhook listener on From c0e0da7ef6364ee682174e8cd3b218d6ff75ab69 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 4 Mar 2019 12:35:57 -0800 Subject: [PATCH 0664/1815] Fix conversion from string float to integer (#5518) --- plugins/processors/converter/converter.go | 28 +++++++++++---- .../processors/converter/converter_test.go | 34 ++++++++++++------- 2 files changed, 43 insertions(+), 19 deletions(-) diff --git a/plugins/processors/converter/converter.go b/plugins/processors/converter/converter.go index 50fd195e0..3c9b74f3f 100644 --- a/plugins/processors/converter/converter.go +++ b/plugins/processors/converter/converter.go @@ -336,7 +336,7 @@ func toInteger(v interface{}) (int64, bool) { } else if value > float64(math.MaxInt64) { return math.MaxInt64, true } else { - return int64(value), true + return int64(Round(value)), true } case bool: if value { @@ -345,8 +345,11 @@ func toInteger(v interface{}) (int64, bool) { return 0, true } case string: - result, err := strconv.ParseInt(value, 10, 64) - return result, err == nil + result, err := strconv.ParseFloat(value, 64) + if err != nil { + return 0, false + } + return toInteger(result) } return 0, false } @@ -367,7 +370,7 @@ func toUnsigned(v interface{}) (uint64, bool) { } else if value > float64(math.MaxUint64) { return math.MaxUint64, true } else { - return uint64(value), true + return uint64(Round(value)), true } case bool: if value { @@ -376,8 +379,11 @@ func toUnsigned(v interface{}) (uint64, bool) { return 0, true } case string: - result, err := strconv.ParseUint(value, 10, 64) - return result, err == nil + result, err := strconv.ParseFloat(value, 64) + if err != nil { + return 0, false + } + return toUnsigned(result) } return 0, false } @@ -419,6 +425,16 @@ func toString(v interface{}) (string, bool) { return "", false } +// math.Round was not added until Go 1.10, can be removed when support for Go +// 1.9 is dropped. +func Round(x float64) float64 { + t := math.Trunc(x) + if math.Abs(x-t) >= 0.5 { + return t + math.Copysign(1, x) + } + return t +} + func logPrintf(format string, v ...interface{}) { log.Printf("D! [processors.converter] "+format, v...) } diff --git a/plugins/processors/converter/converter_test.go b/plugins/processors/converter/converter_test.go index 76839760d..1b00cedf9 100644 --- a/plugins/processors/converter/converter_test.go +++ b/plugins/processors/converter/converter_test.go @@ -129,8 +129,8 @@ func TestConverter(t *testing.T) { converter: &Converter{ Fields: &Conversion{ String: []string{"a"}, - Integer: []string{"b"}, - Unsigned: []string{"c"}, + Integer: []string{"b", "b1", "b2"}, + Unsigned: []string{"c", "c1", "c2"}, Boolean: []string{"d"}, Float: []string{"e"}, Tag: []string{"f"}, @@ -141,12 +141,16 @@ func TestConverter(t *testing.T) { "cpu", map[string]string{}, map[string]interface{}{ - "a": "howdy", - "b": "42", - "c": "42", - "d": "true", - "e": "42.0", - "f": "foo", + "a": "howdy", + "b": "42", + "b1": "42.2", + "b2": "42.5", + "c": "42", + "c1": "42.2", + "c2": "42.5", + "d": "true", + "e": "42.0", + "f": "foo", }, time.Unix(0, 0), ), @@ -158,11 +162,15 @@ func TestConverter(t *testing.T) { "f": "foo", }, map[string]interface{}{ - "a": "howdy", - "b": int64(42), - "c": uint64(42), - "d": true, - "e": 42.0, + "a": "howdy", + "b": int64(42), + "b1": int64(42), + "b2": int64(43), + "c": uint64(42), + "c1": uint64(42), + "c2": uint64(43), + "d": true, + "e": 42.0, }, time.Unix(0, 0), ), From 0b5811e19338ead14a535aba499b211f9c08c299 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 4 Mar 2019 12:36:19 -0800 Subject: [PATCH 0665/1815] Fix panic when rejecting empty batch (#5524) --- internal/models/buffer.go | 7 ++++++- internal/models/buffer_test.go | 12 ++++++++++++ 2 files changed, 18 insertions(+), 1 deletion(-) diff --git a/internal/models/buffer.go b/internal/models/buffer.go index 7b27e6686..5d036c728 100644 --- a/internal/models/buffer.go +++ b/internal/models/buffer.go @@ -182,6 +182,10 @@ func (b *Buffer) Reject(batch []telegraf.Metric) { b.Lock() defer b.Unlock() + if len(batch) == 0 { + return + } + older := b.dist(b.first, b.batchFirst) free := b.cap - b.size restore := min(len(batch), free+older) @@ -191,7 +195,8 @@ func (b *Buffer) Reject(batch []telegraf.Metric) { rp := b.last re := b.nextby(rp, restore) b.last = re - for rb != rp { + + for rb != rp && rp != re { rp = b.prev(rp) re = b.prev(re) diff --git a/internal/models/buffer_test.go b/internal/models/buffer_test.go index 892af8bd4..bc19680d1 100644 --- a/internal/models/buffer_test.go +++ b/internal/models/buffer_test.go @@ -714,3 +714,15 @@ func TestBuffer_AddOverwriteAndRejectOffset(t *testing.T) { require.Equal(t, 13, reject) require.Equal(t, 5, accept) } + +func TestBuffer_RejectEmptyBatch(t *testing.T) { + b := setup(NewBuffer("test", 5)) + batch := b.Batch(2) + b.Add(MetricTime(1)) + b.Reject(batch) + b.Add(MetricTime(2)) + batch = b.Batch(2) + for _, m := range batch { + require.NotNil(t, m) + } +} From b21864fc17b922c12be625c31e6d7b2d04d285f9 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 4 Mar 2019 12:55:35 -0800 Subject: [PATCH 0666/1815] Update changelog --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index ddb6ad9cf..d32174d55 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -67,6 +67,8 @@ - [#5449](https://github.com/influxdata/telegraf/issues/5449): Log permission error and ignore in filecount input. - [#5497](https://github.com/influxdata/telegraf/pull/5497): Create log file in append mode. - [#5325](https://github.com/influxdata/telegraf/issues/5325): Ignore tracking for metrics added to aggregator. +- [#5514](https://github.com/influxdata/telegraf/issues/5514): Fix panic when rejecting empty batch. +- [#5518](https://github.com/influxdata/telegraf/pull/5518): Fix conversion from string float to integer. ## v1.9.5 [2019-02-26] From 77a2de2bc1319028ca3afa8c93a84a2ea9a78f94 Mon Sep 17 00:00:00 2001 From: scier Date: Mon, 4 Mar 2019 13:00:22 -0800 Subject: [PATCH 0667/1815] Add a link to x509_cert plugin in the README (#5515) --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index e8f3d613e..8d08c6dfd 100644 --- a/README.md +++ b/README.md @@ -282,6 +282,7 @@ For documentation on the latest development code see the [documentation index][d * [win_perf_counters](./plugins/inputs/win_perf_counters) (windows performance counters) * [win_services](./plugins/inputs/win_services) * [wireless](./plugins/inputs/wireless) +* [x509_cert](./plugins/inputs/x509_cert) * [zfs](./plugins/inputs/zfs) * [zipkin](./plugins/inputs/zipkin) * [zookeeper](./plugins/inputs/zookeeper) From 98e922123a1b55811c428258918f447e7425a9d6 Mon Sep 17 00:00:00 2001 From: Greg <2653109+glinton@users.noreply.github.com> Date: Mon, 4 Mar 2019 14:34:52 -0700 Subject: [PATCH 0668/1815] Dereference pointer fields in metrics (#5525) --- metric/metric.go | 61 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 61 insertions(+) diff --git a/metric/metric.go b/metric/metric.go index de4af500b..29345e63c 100644 --- a/metric/metric.go +++ b/metric/metric.go @@ -312,7 +312,68 @@ func convertField(v interface{}) interface{} { return uint64(v) case float32: return float64(v) + case *float64: + if v != nil { + return *v + } + case *int64: + if v != nil { + return *v + } + case *string: + if v != nil { + return *v + } + case *bool: + if v != nil { + return *v + } + case *int: + if v != nil { + return int64(*v) + } + case *uint: + if v != nil { + return uint64(*v) + } + case *uint64: + if v != nil { + return uint64(*v) + } + case *[]byte: + if v != nil { + return string(*v) + } + case *int32: + if v != nil { + return int64(*v) + } + case *int16: + if v != nil { + return int64(*v) + } + case *int8: + if v != nil { + return int64(*v) + } + case *uint32: + if v != nil { + return uint64(*v) + } + case *uint16: + if v != nil { + return uint64(*v) + } + case *uint8: + if v != nil { + return uint64(*v) + } + case *float32: + if v != nil { + return float64(*v) + } default: return nil } + return nil } From 5108e82a5dc21f129645315f48c8d94fc042aead Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 4 Mar 2019 17:17:35 -0800 Subject: [PATCH 0669/1815] Remove unused dependencies --- Gopkg.lock | 22 +--------------------- docs/LICENSE_OF_DEPENDENCIES.md | 2 -- 2 files changed, 1 insertion(+), 23 deletions(-) diff --git a/Gopkg.lock b/Gopkg.lock index 6a215cc60..03c4e4ab4 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -116,17 +116,6 @@ revision = "1dc8cf203d24cd454e71ce40ab4cd0bf3112df90" version = "v1.27.0" -[[projects]] - branch = "master" - digest = "1:a74730e052a45a3fab1d310fdef2ec17ae3d6af16228421e238320846f2aaec8" - name = "github.com/alecthomas/template" - packages = [ - ".", - "parse", - ] - pruneopts = "" - revision = "a0175ee3bccc567396460bf5acd36800cb10c49c" - [[projects]] branch = "master" digest = "1:8483994d21404c8a1d489f6be756e25bfccd3b45d65821f25695577791a08e68" @@ -952,7 +941,6 @@ packages = [ "expfmt", "internal/bitbucket.org/ww/goautoneg", - "log", "model", ] pruneopts = "" @@ -1413,14 +1401,6 @@ revision = "168a6198bcb0ef175f7dacec0b8691fc141dc9b8" version = "v1.13.0" -[[projects]] - digest = "1:15d017551627c8bb091bde628215b2861bed128855343fdd570c62d08871f6e1" - name = "gopkg.in/alecthomas/kingpin.v2" - packages = ["."] - pruneopts = "" - revision = "947dcec5ba9c011838740e680966fd7087a71d0d" - version = "v2.2.6" - [[projects]] digest = "1:3cad99e0d1f94b8c162787c12e59d0a0b9df1ef75590eb145cdd625479091efe" name = "gopkg.in/asn1-ber.v1" @@ -1597,11 +1577,11 @@ "github.com/nsqio/go-nsq", "github.com/openzipkin/zipkin-go-opentracing", "github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore", + "github.com/pkg/errors", "github.com/prometheus/client_golang/prometheus", "github.com/prometheus/client_golang/prometheus/promhttp", "github.com/prometheus/client_model/go", "github.com/prometheus/common/expfmt", - "github.com/prometheus/common/log", "github.com/satori/go.uuid", "github.com/shirou/gopsutil/cpu", "github.com/shirou/gopsutil/disk", diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index 2bd6ea01d..485b758a4 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -8,7 +8,6 @@ following works: - collectd.org [MIT License](https://git.octo.it/?p=collectd.git;a=blob;f=COPYING;hb=HEAD) - contrib.go.opencensus.io/exporter/stackdriver [Apache License 2.0](https://github.com/census-ecosystem/opencensus-go-exporter-stackdriver/blob/master/LICENSE) - github.com/aerospike/aerospike-client-go [Apache License 2.0](https://github.com/aerospike/aerospike-client-go/blob/master/LICENSE) -- github.com/alecthomas/template [BSD 3-Clause "New" or "Revised" License](https://github.com/alecthomas/template/blob/master/LICENSE) - github.com/alecthomas/units [MIT License](https://github.com/alecthomas/units/blob/master/COPYING) - github.com/amir/raidman [The Unlicense](https://github.com/amir/raidman/blob/master/UNLICENSE) - github.com/apache/thrift [Apache License 2.0](https://github.com/apache/thrift/blob/master/LICENSE) @@ -124,7 +123,6 @@ following works: - google.golang.org/appengine [Apache License 2.0](https://github.com/golang/appengine/blob/master/LICENSE) - google.golang.org/genproto [Apache License 2.0](https://github.com/google/go-genproto/blob/master/LICENSE) - google.golang.org/grpc [Apache License 2.0](https://github.com/grpc/grpc-go/blob/master/LICENSE) -- gopkg.in/alecthomas/kingpin.v2 [MIT License](https://github.com/alecthomas/kingpin/blob/v2.2.6/COPYING) - gopkg.in/asn1-ber.v1 [MIT License](https://github.com/go-asn1-ber/asn1-ber/blob/v1.3/LICENSE) - gopkg.in/fatih/pool.v2 [MIT License](https://github.com/fatih/pool/blob/v2.0.0/LICENSE) - gopkg.in/fsnotify.v1 [BSD 3-Clause "New" or "Revised" License](https://github.com/fsnotify/fsnotify/blob/v1.4.7/LICENSE) From 6088c7f9697d2fac339c49a75dea2e7c6a752836 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 4 Mar 2019 17:22:24 -0800 Subject: [PATCH 0670/1815] Add dep check to CI tests --- Makefile | 1 + 1 file changed, 1 insertion(+) diff --git a/Makefile b/Makefile index 100883198..8f6c5da91 100644 --- a/Makefile +++ b/Makefile @@ -87,6 +87,7 @@ vet: .PHONY: check check: fmtcheck vet + dep check .PHONY: test-all test-all: fmtcheck vet From 3d206d28971c85fe7cd6b773ad42b4d8246a15e8 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 5 Mar 2019 10:42:25 -0800 Subject: [PATCH 0671/1815] Exclude dep check on windows CI --- .circleci/config.yml | 1 + Makefile | 1 - 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 63cbf2549..4f8dc3d2d 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -21,6 +21,7 @@ jobs: - restore_cache: key: vendor-{{ checksum "Gopkg.lock" }} - run: 'make deps' + - run: 'dep check' - save_cache: name: 'vendored deps' key: vendor-{{ checksum "Gopkg.lock" }} diff --git a/Makefile b/Makefile index 8f6c5da91..100883198 100644 --- a/Makefile +++ b/Makefile @@ -87,7 +87,6 @@ vet: .PHONY: check check: fmtcheck vet - dep check .PHONY: test-all test-all: fmtcheck vet From 1dcfecdb590e3c49cd60ee8474b532f9c4169749 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 5 Mar 2019 10:49:53 -0800 Subject: [PATCH 0672/1815] Build with Go 1.11.5 on Windows CI --- appveyor.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/appveyor.yml b/appveyor.yml index dfdf31d50..15cdd5664 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -12,11 +12,11 @@ platform: x64 install: - IF NOT EXIST "C:\Cache" mkdir C:\Cache - - IF NOT EXIST "C:\Cache\go1.11.4.msi" curl -o "C:\Cache\go1.11.4.msi" https://storage.googleapis.com/golang/go1.11.4.windows-amd64.msi + - IF NOT EXIST "C:\Cache\go1.11.5.msi" curl -o "C:\Cache\go1.11.5.msi" https://storage.googleapis.com/golang/go1.11.5.windows-amd64.msi - IF NOT EXIST "C:\Cache\gnuwin32-bin.zip" curl -o "C:\Cache\gnuwin32-bin.zip" https://dl.influxdata.com/telegraf/ci/make-3.81-bin.zip - IF NOT EXIST "C:\Cache\gnuwin32-dep.zip" curl -o "C:\Cache\gnuwin32-dep.zip" https://dl.influxdata.com/telegraf/ci/make-3.81-dep.zip - IF EXIST "C:\Go" rmdir /S /Q C:\Go - - msiexec.exe /i "C:\Cache\go1.11.4.msi" /quiet + - msiexec.exe /i "C:\Cache\go1.11.5.msi" /quiet - 7z x "C:\Cache\gnuwin32-bin.zip" -oC:\GnuWin32 -y - 7z x "C:\Cache\gnuwin32-dep.zip" -oC:\GnuWin32 -y - go get -d github.com/golang/dep From a0527db0370633ce93c5e3d91006a73e4642d3c5 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 5 Mar 2019 11:07:39 -0800 Subject: [PATCH 0673/1815] Allow grok to produce metrics with no fields (#5533) --- plugins/parsers/grok/parser.go | 7 +-- plugins/parsers/grok/parser_test.go | 87 ++++++++++++++++++----------- plugins/parsers/registry.go | 1 - 3 files changed, 55 insertions(+), 40 deletions(-) diff --git a/plugins/parsers/grok/parser.go b/plugins/parsers/grok/parser.go index eb1d1e71c..5984e288e 100644 --- a/plugins/parsers/grok/parser.go +++ b/plugins/parsers/grok/parser.go @@ -11,10 +11,9 @@ import ( "strings" "time" - "github.com/vjeantet/grok" - "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" + "github.com/vjeantet/grok" ) var timeLayouts = map[string]string{ @@ -361,10 +360,6 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { } } - if len(fields) == 0 { - return nil, fmt.Errorf("grok: must have one or more fields") - } - if p.UniqueTimestamp != "auto" { return metric.New(p.Measurement, tags, fields, timestamp) } diff --git a/plugins/parsers/grok/parser_test.go b/plugins/parsers/grok/parser_test.go index 22007971b..23af0af44 100644 --- a/plugins/parsers/grok/parser_test.go +++ b/plugins/parsers/grok/parser_test.go @@ -5,6 +5,7 @@ import ( "testing" "time" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -571,61 +572,81 @@ func TestCompileErrors(t *testing.T) { assert.Error(t, p.Compile()) } -func TestParseErrors(t *testing.T) { - // Parse fails because the pattern doesn't exist +func TestParseErrors_MissingPattern(t *testing.T) { p := &Parser{ - Patterns: []string{"%{TEST_LOG_B}"}, + Measurement: "grok", + Patterns: []string{"%{TEST_LOG_B}"}, CustomPatterns: ` TEST_LOG_A %{HTTPDATE:ts:ts-httpd} %{WORD:myword:int} %{} `, } - assert.Error(t, p.Compile()) + require.Error(t, p.Compile()) _, err := p.ParseLine(`[04/Jun/2016:12:41:45 +0100] notnumber 200 192.168.1.1 5.432µs 101`) - assert.Error(t, err) + require.Error(t, err) +} - // Parse fails because myword is not an int - p = &Parser{ - Patterns: []string{"%{TEST_LOG_A}"}, +func TestParseErrors_WrongIntegerType(t *testing.T) { + p := &Parser{ + Measurement: "grok", + Patterns: []string{"%{TEST_LOG_A}"}, CustomPatterns: ` - TEST_LOG_A %{HTTPDATE:ts:ts-httpd} %{WORD:myword:int} + TEST_LOG_A %{NUMBER:ts:ts-epoch} %{WORD:myword:int} `, } - assert.NoError(t, p.Compile()) - _, err = p.ParseLine(`04/Jun/2016:12:41:45 +0100 notnumber`) - assert.Error(t, err) + require.NoError(t, p.Compile()) + m, err := p.ParseLine(`0 notnumber`) + require.NoError(t, err) + testutil.RequireMetricEqual(t, + m, + testutil.MustMetric("grok", map[string]string{}, map[string]interface{}{}, time.Unix(0, 0))) +} - // Parse fails because myword is not a float - p = &Parser{ - Patterns: []string{"%{TEST_LOG_A}"}, +func TestParseErrors_WrongFloatType(t *testing.T) { + p := &Parser{ + Measurement: "grok", + Patterns: []string{"%{TEST_LOG_A}"}, CustomPatterns: ` - TEST_LOG_A %{HTTPDATE:ts:ts-httpd} %{WORD:myword:float} + TEST_LOG_A %{NUMBER:ts:ts-epoch} %{WORD:myword:float} `, } - assert.NoError(t, p.Compile()) - _, err = p.ParseLine(`04/Jun/2016:12:41:45 +0100 notnumber`) - assert.Error(t, err) + require.NoError(t, p.Compile()) + m, err := p.ParseLine(`0 notnumber`) + require.NoError(t, err) + testutil.RequireMetricEqual(t, + m, + testutil.MustMetric("grok", map[string]string{}, map[string]interface{}{}, time.Unix(0, 0))) +} - // Parse fails because myword is not a duration - p = &Parser{ - Patterns: []string{"%{TEST_LOG_A}"}, +func TestParseErrors_WrongDurationType(t *testing.T) { + p := &Parser{ + Measurement: "grok", + Patterns: []string{"%{TEST_LOG_A}"}, CustomPatterns: ` - TEST_LOG_A %{HTTPDATE:ts:ts-httpd} %{WORD:myword:duration} + TEST_LOG_A %{NUMBER:ts:ts-epoch} %{WORD:myword:duration} `, } - assert.NoError(t, p.Compile()) - _, err = p.ParseLine(`04/Jun/2016:12:41:45 +0100 notnumber`) - assert.Error(t, err) + require.NoError(t, p.Compile()) + m, err := p.ParseLine(`0 notnumber`) + require.NoError(t, err) + testutil.RequireMetricEqual(t, + m, + testutil.MustMetric("grok", map[string]string{}, map[string]interface{}{}, time.Unix(0, 0))) +} - // Parse fails because the time layout is wrong. - p = &Parser{ - Patterns: []string{"%{TEST_LOG_A}"}, +func TestParseErrors_WrongTimeLayout(t *testing.T) { + p := &Parser{ + Measurement: "grok", + Patterns: []string{"%{TEST_LOG_A}"}, CustomPatterns: ` - TEST_LOG_A %{HTTPDATE:ts:ts-unix} %{WORD:myword:duration} + TEST_LOG_A %{NUMBER:ts:ts-epoch} %{WORD:myword:duration} `, } - assert.NoError(t, p.Compile()) - _, err = p.ParseLine(`04/Jun/2016:12:41:45 +0100 notnumber`) - assert.Error(t, err) + require.NoError(t, p.Compile()) + m, err := p.ParseLine(`0 notnumber`) + require.NoError(t, err) + testutil.RequireMetricEqual(t, + m, + testutil.MustMetric("grok", map[string]string{}, map[string]interface{}{}, time.Unix(0, 0))) } func TestTsModder(t *testing.T) { diff --git a/plugins/parsers/registry.go b/plugins/parsers/registry.go index ad54e35ad..e6e15469f 100644 --- a/plugins/parsers/registry.go +++ b/plugins/parsers/registry.go @@ -298,7 +298,6 @@ func newJSONParser( return parser } -//Deprecated: Use NewParser to get a JSONParser object func newGrokParser(metricName string, patterns []string, nPatterns []string, cPatterns string, cPatternFiles []string, From b5adaff07f902626017f03196843768bb3008b4d Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 5 Mar 2019 11:08:02 -0800 Subject: [PATCH 0674/1815] Sort metrics by timestamp in prometheus output (#5534) --- .../prometheus_client/prometheus_client.go | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/plugins/outputs/prometheus_client/prometheus_client.go b/plugins/outputs/prometheus_client/prometheus_client.go index f919b6a71..5611a0a9e 100644 --- a/plugins/outputs/prometheus_client/prometheus_client.go +++ b/plugins/outputs/prometheus_client/prometheus_client.go @@ -364,13 +364,27 @@ func (p *PrometheusClient) addMetricFamily(point telegraf.Metric, sample *Sample addSample(fam, sample, sampleID) } +// Sorted returns a copy of the metrics in time ascending order. A copy is +// made to avoid modifying the input metric slice since doing so is not +// allowed. +func sorted(metrics []telegraf.Metric) []telegraf.Metric { + batch := make([]telegraf.Metric, 0, len(metrics)) + for i := len(metrics) - 1; i >= 0; i-- { + batch = append(batch, metrics[i]) + } + sort.Slice(batch, func(i, j int) bool { + return batch[i].Time().Before(batch[j].Time()) + }) + return batch +} + func (p *PrometheusClient) Write(metrics []telegraf.Metric) error { p.Lock() defer p.Unlock() now := p.now() - for _, point := range metrics { + for _, point := range sorted(metrics) { tags := point.Tags() sampleID := CreateSampleID(tags) From f2aa35e258184d5cfdf9433b3d5fc53045b5a5f0 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 5 Mar 2019 11:10:10 -0800 Subject: [PATCH 0675/1815] Update changelog --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index d32174d55..febd52ad9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -56,6 +56,7 @@ - [#4308](https://github.com/influxdata/telegraf/pull/4308): Add additional metrics to rabbitmq input. - [#5388](https://github.com/influxdata/telegraf/pull/5388): Add multicast support to socket_listener input. - [#5490](https://github.com/influxdata/telegraf/pull/5490): Add tag based routing in influxdb/influxdb_v2 outputs. +- [#5533](https://github.com/influxdata/telegraf/pull/5533): Allow grok parser to produce metrics with no fields. #### Bugfixes @@ -69,6 +70,7 @@ - [#5325](https://github.com/influxdata/telegraf/issues/5325): Ignore tracking for metrics added to aggregator. - [#5514](https://github.com/influxdata/telegraf/issues/5514): Fix panic when rejecting empty batch. - [#5518](https://github.com/influxdata/telegraf/pull/5518): Fix conversion from string float to integer. +- [#5431](https://github.com/influxdata/telegraf/pull/5431): Sort metrics by timestamp in prometheus output. ## v1.9.5 [2019-02-26] From 51cc0fe6d89ea9f0fa6bebdc2f0dac0d1ec2d2d6 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 5 Mar 2019 11:12:27 -0800 Subject: [PATCH 0676/1815] Set 1.10.0 release date --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index febd52ad9..a759dd630 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,4 @@ -## v1.10 [unreleased] +## v1.10 [2019-03-05] #### New Inputs From dd67144660cfd7f3eb55cc9c5c8ba5ef734ff391 Mon Sep 17 00:00:00 2001 From: emily Date: Wed, 6 Mar 2019 17:34:47 -0800 Subject: [PATCH 0677/1815] Add Base64-encoding/decoding for Google Cloud PubSub plugins (#5543) --- plugins/inputs/cloud_pubsub/README.md | 5 ++ plugins/inputs/cloud_pubsub/pubsub.go | 20 +++++++- plugins/inputs/cloud_pubsub/pubsub_test.go | 45 +++++++++++++++++ plugins/outputs/cloud_pubsub/README.md | 3 ++ plugins/outputs/cloud_pubsub/pubsub.go | 17 +++++++ plugins/outputs/cloud_pubsub/pubsub_test.go | 50 ++++++++++++++++--- plugins/outputs/cloud_pubsub/topic_stubbed.go | 12 ++++- 7 files changed, 143 insertions(+), 9 deletions(-) diff --git a/plugins/inputs/cloud_pubsub/README.md b/plugins/inputs/cloud_pubsub/README.md index 6bf3fa29e..460cf4b82 100644 --- a/plugins/inputs/cloud_pubsub/README.md +++ b/plugins/inputs/cloud_pubsub/README.md @@ -75,6 +75,11 @@ and creates metrics using one of the supported [input data formats][]. ## 1. Note this setting does not limit the number of messages that can be ## processed concurrently (use "max_outstanding_messages" instead). # max_receiver_go_routines = 0 + + ## Optional. If true, Telegraf will attempt to base64 decode the + ## PubSub message data before parsing. Many GCP services that + ## output JSON to Google PubSub base64-encode the JSON payload. + # base64_data = false ``` ### Multiple Subscriptions and Topics diff --git a/plugins/inputs/cloud_pubsub/pubsub.go b/plugins/inputs/cloud_pubsub/pubsub.go index 9f7125126..845711e7d 100644 --- a/plugins/inputs/cloud_pubsub/pubsub.go +++ b/plugins/inputs/cloud_pubsub/pubsub.go @@ -6,6 +6,7 @@ import ( "sync" "cloud.google.com/go/pubsub" + "encoding/base64" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" @@ -40,6 +41,8 @@ type PubSub struct { MaxUndeliveredMessages int `toml:"max_undelivered_messages"` RetryReceiveDelaySeconds int `toml:"retry_delay_seconds"` + Base64Data bool `toml:"base64_data"` + sub subscription stubSub func() subscription @@ -169,7 +172,18 @@ func (ps *PubSub) onMessage(ctx context.Context, msg message) error { return fmt.Errorf("message longer than max_message_len (%d > %d)", len(msg.Data()), ps.MaxMessageLen) } - metrics, err := ps.parser.Parse(msg.Data()) + var data []byte + if ps.Base64Data { + strData, err := base64.StdEncoding.DecodeString(string(msg.Data())) + if err != nil { + return fmt.Errorf("unable to base64 decode message: %v", err) + } + data = []byte(strData) + } else { + data = msg.Data() + } + + metrics, err := ps.parser.Parse(data) if err != nil { msg.Ack() return err @@ -345,4 +359,8 @@ const sampleConfig = ` ## 1. Note this setting does not limit the number of messages that can be ## processed concurrently (use "max_outstanding_messages" instead). # max_receiver_go_routines = 0 + + ## Optional. If true, Telegraf will attempt to base64 decode the + ## PubSub message data before parsing + # base64_data = false ` diff --git a/plugins/inputs/cloud_pubsub/pubsub_test.go b/plugins/inputs/cloud_pubsub/pubsub_test.go index be6070d15..6233546aa 100644 --- a/plugins/inputs/cloud_pubsub/pubsub_test.go +++ b/plugins/inputs/cloud_pubsub/pubsub_test.go @@ -1,6 +1,7 @@ package cloud_pubsub import ( + "encoding/base64" "errors" "github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/testutil" @@ -55,6 +56,50 @@ func TestRunParse(t *testing.T) { validateTestInfluxMetric(t, metric) } +// Test ingesting InfluxDB-format PubSub message +func TestRunBase64(t *testing.T) { + subId := "sub-run-base64" + + testParser, _ := parsers.NewInfluxParser() + + sub := &stubSub{ + id: subId, + messages: make(chan *testMsg, 100), + } + sub.receiver = testMessagesReceive(sub) + + ps := &PubSub{ + parser: testParser, + stubSub: func() subscription { return sub }, + Project: "projectIDontMatterForTests", + Subscription: subId, + MaxUndeliveredMessages: defaultMaxUndeliveredMessages, + Base64Data: true, + } + + acc := &testutil.Accumulator{} + if err := ps.Start(acc); err != nil { + t.Fatalf("test PubSub failed to start: %s", err) + } + defer ps.Stop() + + if ps.sub == nil { + t.Fatal("expected plugin subscription to be non-nil") + } + + testTracker := &testTracker{} + msg := &testMsg{ + value: base64.StdEncoding.EncodeToString([]byte(msgInflux)), + tracker: testTracker, + } + sub.messages <- msg + + acc.Wait(1) + assert.Equal(t, acc.NFields(), 1) + metric := acc.Metrics[0] + validateTestInfluxMetric(t, metric) +} + func TestRunInvalidMessages(t *testing.T) { subId := "sub-invalid-messages" diff --git a/plugins/outputs/cloud_pubsub/README.md b/plugins/outputs/cloud_pubsub/README.md index 873f3c9b3..3a4088b61 100644 --- a/plugins/outputs/cloud_pubsub/README.md +++ b/plugins/outputs/cloud_pubsub/README.md @@ -52,6 +52,9 @@ generate it using `telegraf --usage cloud_pubsub`. ## Optional. Specifies a timeout for requests to the PubSub API. # publish_timeout = "30s" + ## Optional. If true, published PubSub message data will be base64-encoded. + # base64_data = false + ## Optional. PubSub attributes to add to metrics. # [[inputs.pubsub.attributes]] # my_attr = "tag_value" diff --git a/plugins/outputs/cloud_pubsub/pubsub.go b/plugins/outputs/cloud_pubsub/pubsub.go index ee1611d3f..c8fbf242d 100644 --- a/plugins/outputs/cloud_pubsub/pubsub.go +++ b/plugins/outputs/cloud_pubsub/pubsub.go @@ -6,6 +6,7 @@ import ( "sync" "cloud.google.com/go/pubsub" + "encoding/base64" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/outputs" @@ -56,6 +57,9 @@ const sampleConfig = ` ## Optional. Specifies a timeout for requests to the PubSub API. # publish_timeout = "30s" + ## Optional. If true, published PubSub message data will be base64-encoded. + # base64_data = false + ## Optional. PubSub attributes to add to metrics. # [[inputs.pubsub.attributes]] # my_attr = "tag_value" @@ -72,6 +76,7 @@ type PubSub struct { PublishByteThreshold int `toml:"publish_byte_threshold"` PublishNumGoroutines int `toml:"publish_num_go_routines"` PublishTimeout internal.Duration `toml:"publish_timeout"` + Base64Data bool `toml:"base64_data"` t topic c *pubsub.Client @@ -207,6 +212,12 @@ func (ps *PubSub) toMessages(metrics []telegraf.Metric) ([]*pubsub.Message, erro if err != nil { return nil, err } + + if ps.Base64Data { + encoded := base64.StdEncoding.EncodeToString(b) + b = []byte(encoded) + } + msg := &pubsub.Message{Data: b} if ps.Attributes != nil { msg.Attributes = ps.Attributes @@ -220,6 +231,12 @@ func (ps *PubSub) toMessages(metrics []telegraf.Metric) ([]*pubsub.Message, erro if err != nil { return nil, err } + + if ps.Base64Data { + encoded := base64.StdEncoding.EncodeToString(b) + b = []byte(encoded) + } + msgs[i] = &pubsub.Message{ Data: b, } diff --git a/plugins/outputs/cloud_pubsub/pubsub_test.go b/plugins/outputs/cloud_pubsub/pubsub_test.go index eb993b37c..76eb518d7 100644 --- a/plugins/outputs/cloud_pubsub/pubsub_test.go +++ b/plugins/outputs/cloud_pubsub/pubsub_test.go @@ -4,6 +4,7 @@ import ( "testing" "cloud.google.com/go/pubsub" + "encoding/base64" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/testutil" @@ -26,7 +27,7 @@ func TestPubSub_WriteSingle(t *testing.T) { } for _, testM := range testMetrics { - verifyMetricPublished(t, testM.m, topic.published) + verifyRawMetricPublished(t, testM.m, topic.published) } } @@ -48,7 +49,7 @@ func TestPubSub_WriteWithAttribute(t *testing.T) { } for _, testM := range testMetrics { - msg := verifyMetricPublished(t, testM.m, topic.published) + msg := verifyRawMetricPublished(t, testM.m, topic.published) assert.Equalf(t, "bar1", msg.Attributes["foo1"], "expected attribute foo1=bar1") assert.Equalf(t, "bar2", msg.Attributes["foo2"], "expected attribute foo2=bar2") } @@ -70,7 +71,7 @@ func TestPubSub_WriteMultiple(t *testing.T) { } for _, testM := range testMetrics { - verifyMetricPublished(t, testM.m, topic.published) + verifyRawMetricPublished(t, testM.m, topic.published) } assert.Equalf(t, 1, topic.bundleCount, "unexpected bundle count") } @@ -94,7 +95,7 @@ func TestPubSub_WriteOverCountThreshold(t *testing.T) { } for _, testM := range testMetrics { - verifyMetricPublished(t, testM.m, topic.published) + verifyRawMetricPublished(t, testM.m, topic.published) } assert.Equalf(t, 2, topic.bundleCount, "unexpected bundle count") } @@ -117,11 +118,33 @@ func TestPubSub_WriteOverByteThreshold(t *testing.T) { } for _, testM := range testMetrics { - verifyMetricPublished(t, testM.m, topic.published) + verifyRawMetricPublished(t, testM.m, topic.published) } assert.Equalf(t, 2, topic.bundleCount, "unexpected bundle count") } +func TestPubSub_WriteBase64Single(t *testing.T) { + + testMetrics := []testMetric{ + {testutil.TestMetric("value_1", "test"), false /*return error */}, + {testutil.TestMetric("value_2", "test"), false}, + } + + settings := pubsub.DefaultPublishSettings + settings.CountThreshold = 1 + ps, topic, metrics := getTestResources(t, settings, testMetrics) + ps.Base64Data = true + + err := ps.Write(metrics) + if err != nil { + t.Fatalf("got unexpected error: %v", err) + } + + for _, testM := range testMetrics { + verifyMetricPublished(t, testM.m, topic.published, true /* base64encoded */) + } +} + func TestPubSub_Error(t *testing.T) { testMetrics := []testMetric{ // Force this batch to return error @@ -141,7 +164,11 @@ func TestPubSub_Error(t *testing.T) { } } -func verifyMetricPublished(t *testing.T, m telegraf.Metric, published map[string]*pubsub.Message) *pubsub.Message { +func verifyRawMetricPublished(t *testing.T, m telegraf.Metric, published map[string]*pubsub.Message) *pubsub.Message { + return verifyMetricPublished(t, m, published, false) +} + +func verifyMetricPublished(t *testing.T, m telegraf.Metric, published map[string]*pubsub.Message, base64Encoded bool) *pubsub.Message { p, _ := parsers.NewInfluxParser() v, _ := m.GetField("value") @@ -150,7 +177,16 @@ func verifyMetricPublished(t *testing.T, m telegraf.Metric, published map[string t.Fatalf("expected metric to get published (value: %s)", v.(string)) } - parsed, err := p.Parse(psMsg.Data) + data := psMsg.Data + if base64Encoded { + v, err := base64.StdEncoding.DecodeString(string(psMsg.Data)) + if err != nil { + t.Fatalf("Unable to decode expected base64-encoded message: %s", err) + } + data = []byte(v) + } + + parsed, err := p.Parse(data) if err != nil { t.Fatalf("could not parse influxdb metric from published message: %s", string(psMsg.Data)) } diff --git a/plugins/outputs/cloud_pubsub/topic_stubbed.go b/plugins/outputs/cloud_pubsub/topic_stubbed.go index 55f2e5a0a..d78d4fbd4 100644 --- a/plugins/outputs/cloud_pubsub/topic_stubbed.go +++ b/plugins/outputs/cloud_pubsub/topic_stubbed.go @@ -10,6 +10,7 @@ import ( "time" "cloud.google.com/go/pubsub" + "encoding/base64" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/parsers" @@ -180,8 +181,17 @@ func (t *stubTopic) parseIDs(msg *pubsub.Message) []string { p, _ := parsers.NewInfluxParser() metrics, err := p.Parse(msg.Data) if err != nil { - t.Fatalf("unexpected parsing error: %v", err) + // Just attempt to base64-decode first before returning error. + d, err := base64.StdEncoding.DecodeString(string(msg.Data)) + if err != nil { + t.Errorf("unable to base64-decode potential test message: %v", err) + } + metrics, err = p.Parse(d) + if err != nil { + t.Fatalf("unexpected parsing error: %v", err) + } } + ids := make([]string, len(metrics)) for i, met := range metrics { id, _ := met.GetField("value") From c3e793bb4edba0130fa9ae5c15a86cd6e06e065f Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 6 Mar 2019 17:37:44 -0800 Subject: [PATCH 0678/1815] Return any errors when creating tls config (#5541) --- plugins/inputs/socket_listener/socket_listener.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/inputs/socket_listener/socket_listener.go b/plugins/inputs/socket_listener/socket_listener.go index 391427da9..ed007a00a 100644 --- a/plugins/inputs/socket_listener/socket_listener.go +++ b/plugins/inputs/socket_listener/socket_listener.go @@ -261,7 +261,7 @@ func (sl *SocketListener) Start(acc telegraf.Accumulator) error { tlsCfg, err := sl.ServerConfig.TLSConfig() if err != nil { - return nil + return err } if tlsCfg == nil { From d4c2d4548cda923319c05a03a835847858466427 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 6 Mar 2019 17:40:16 -0800 Subject: [PATCH 0679/1815] Update changelog --- CHANGELOG.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index a759dd630..dfd1b769c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,10 @@ +## v1.10.1 [unreleased] + +#### Bugfixes + +- [#5448](https://github.com/influxdata/telegraf/pull/5448): Show error when TLS configuration cannot be loaded. +- [#5543](https://github.com/influxdata/telegraf/pull/5543): Add Base64-encoding/decoding for Google Cloud PubSub plugins. + ## v1.10 [2019-03-05] #### New Inputs From 9378de942d9292563f20e3f126c289fdcfa21d37 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 7 Mar 2019 12:53:25 -0800 Subject: [PATCH 0680/1815] Update gopsutil to v2.19.02 (#5552) --- Gopkg.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Gopkg.lock b/Gopkg.lock index 03c4e4ab4..47feeb386 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -984,7 +984,7 @@ version = "v1.2.0" [[projects]] - digest = "1:066c1020d667e25a0614b56aee1f9ac47e75c77de98eddfb51e9be02c68c1577" + digest = "1:d77a85cf43b70ae61fa2543d402d782b40dca0f5f41413839b5f916782b0fab9" name = "github.com/shirou/gopsutil" packages = [ "cpu", @@ -997,8 +997,8 @@ "process", ] pruneopts = "" - revision = "071446942108a03a13cf0717275ad3bdbcb691b4" - version = "v2.19.01" + revision = "6c6abd6d1666d6b27f1c261e0f850441ba22aa3a" + version = "v2.19.02" [[projects]] branch = "master" From 03920075e4b3c7851e65978f7a9240eca87d46f3 Mon Sep 17 00:00:00 2001 From: Greg <2653109+glinton@users.noreply.github.com> Date: Fri, 8 Mar 2019 12:25:20 -0700 Subject: [PATCH 0681/1815] Fix links and add config to webhook readme (#5558) --- plugins/inputs/webhooks/README.md | 33 +++++++++++++++++++-- plugins/inputs/webhooks/filestack/README.md | 2 +- plugins/inputs/webhooks/mandrill/README.md | 2 +- plugins/inputs/webhooks/particle/README.md | 2 +- plugins/inputs/webhooks/rollbar/README.md | 2 +- plugins/inputs/webhooks/webhooks.go | 2 +- 6 files changed, 36 insertions(+), 7 deletions(-) diff --git a/plugins/inputs/webhooks/README.md b/plugins/inputs/webhooks/README.md index 13141fc4b..c6c7daf35 100644 --- a/plugins/inputs/webhooks/README.md +++ b/plugins/inputs/webhooks/README.md @@ -13,7 +13,36 @@ $ cp config.conf.new /etc/telegraf/telegraf.conf $ sudo service telegraf start ``` -## Available webhooks + +### Configuration: + +```toml +[[inputs.webhooks]] + ## Address and port to host Webhook listener on + service_address = ":1619" + + [inputs.webhooks.filestack] + path = "/filestack" + + [inputs.webhooks.github] + path = "/github" + # secret = "" + + [inputs.webhooks.mandrill] + path = "/mandrill" + + [inputs.webhooks.rollbar] + path = "/rollbar" + + [inputs.webhooks.papertrail] + path = "/papertrail" + + [inputs.webhooks.particle] + path = "/particle" +``` + + +### Available webhooks - [Filestack](filestack/) - [Github](github/) @@ -23,7 +52,7 @@ $ sudo service telegraf start - [Particle](particle/) -## Adding new webhooks plugin +### Adding new webhooks plugin 1. Add your webhook plugin inside the `webhooks` folder 1. Your plugin must implement the `Webhook` interface diff --git a/plugins/inputs/webhooks/filestack/README.md b/plugins/inputs/webhooks/filestack/README.md index 585e6f202..7af2a780d 100644 --- a/plugins/inputs/webhooks/filestack/README.md +++ b/plugins/inputs/webhooks/filestack/README.md @@ -1,6 +1,6 @@ # Filestack webhook -You should configure your Filestack's Webhooks to point at the `webhooks` service. To do this go to `filestack.com/`, select your app and click `Credentials > Webhooks`. In the resulting page, set the `URL` to `http://:1619/filestack`, and click on `Add`. +You should configure your Filestack's Webhooks to point at the `webhooks` service. To do this go to [filestack.com](https://www.filestack.com/), select your app and click `Credentials > Webhooks`. In the resulting page, set the `URL` to `http://:1619/filestack`, and click on `Add`. ## Events diff --git a/plugins/inputs/webhooks/mandrill/README.md b/plugins/inputs/webhooks/mandrill/README.md index 2fb4914e1..9c4f3a58c 100644 --- a/plugins/inputs/webhooks/mandrill/README.md +++ b/plugins/inputs/webhooks/mandrill/README.md @@ -1,6 +1,6 @@ # mandrill webhook -You should configure your Mandrill's Webhooks to point at the `webhooks` service. To do this go to `mandrillapp.com/` and click `Settings > Webhooks`. In the resulting page, click on `Add a Webhook`, select all events, and set the `URL` to `http://:1619/mandrill`, and click on `Create Webhook`. +You should configure your Mandrill's Webhooks to point at the `webhooks` service. To do this go to [mandrillapp.com](https://mandrillapp.com) and click `Settings > Webhooks`. In the resulting page, click on `Add a Webhook`, select all events, and set the `URL` to `http://:1619/mandrill`, and click on `Create Webhook`. ## Events diff --git a/plugins/inputs/webhooks/particle/README.md b/plugins/inputs/webhooks/particle/README.md index 4e3426da5..688898db0 100644 --- a/plugins/inputs/webhooks/particle/README.md +++ b/plugins/inputs/webhooks/particle/README.md @@ -1,7 +1,7 @@ # particle webhooks -You should configure your Particle.io's Webhooks to point at the `webhooks` service. To do this go to `(https://console.particle.io/)[https://console.particle.io]` and click `Integrations > New Integration > Webhook`. In the resulting page set `URL` to `http://:1619/particle`, and under `Advanced Settings` click on `JSON` and add: +You should configure your Particle.io's Webhooks to point at the `webhooks` service. To do this go to [https://console.particle.io](https://console.particle.io/) and click `Integrations > New Integration > Webhook`. In the resulting page set `URL` to `http://:1619/particle`, and under `Advanced Settings` click on `JSON` and add: ``` { diff --git a/plugins/inputs/webhooks/rollbar/README.md b/plugins/inputs/webhooks/rollbar/README.md index b3f1bfeaa..471dc9fd0 100644 --- a/plugins/inputs/webhooks/rollbar/README.md +++ b/plugins/inputs/webhooks/rollbar/README.md @@ -1,6 +1,6 @@ # rollbar webhooks -You should configure your Rollbar's Webhooks to point at the `webhooks` service. To do this go to `rollbar.com/` and click `Settings > Notifications > Webhook`. In the resulting page set `URL` to `http://:1619/rollbar`, and click on `Enable Webhook Integration`. +You should configure your Rollbar's Webhooks to point at the `webhooks` service. To do this go to [rollbar.com](https://rollbar.com/) and click `Settings > Notifications > Webhook`. In the resulting page set `URL` to `http://:1619/rollbar`, and click on `Enable Webhook Integration`. ## Events diff --git a/plugins/inputs/webhooks/webhooks.go b/plugins/inputs/webhooks/webhooks.go index fa31ec490..4baaf6ffb 100644 --- a/plugins/inputs/webhooks/webhooks.go +++ b/plugins/inputs/webhooks/webhooks.go @@ -67,7 +67,7 @@ func (wb *Webhooks) SampleConfig() string { [inputs.webhooks.particle] path = "/particle" - ` +` } func (wb *Webhooks) Description() string { From 7da57fe5f5ab019b59db74945276b8c604b73ba7 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 8 Mar 2019 14:10:15 -0800 Subject: [PATCH 0682/1815] Fix incorrect option in net_response sample config --- plugins/inputs/net_response/README.md | 2 +- plugins/inputs/net_response/net_response.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/inputs/net_response/README.md b/plugins/inputs/net_response/README.md index 1982ced0c..dcfb341d5 100644 --- a/plugins/inputs/net_response/README.md +++ b/plugins/inputs/net_response/README.md @@ -30,7 +30,7 @@ verify text in the response. # expect = "ssh" ## Uncomment to remove deprecated fields; recommended for new deploys - # fieldexclude = ["result_type", "string_found"] + # fielddrop = ["result_type", "string_found"] ``` ### Metrics: diff --git a/plugins/inputs/net_response/net_response.go b/plugins/inputs/net_response/net_response.go index 55ee0e00d..e411aa647 100644 --- a/plugins/inputs/net_response/net_response.go +++ b/plugins/inputs/net_response/net_response.go @@ -63,7 +63,7 @@ var sampleConfig = ` # expect = "ssh" ## Uncomment to remove deprecated fields - # fieldexclude = ["result_type", "string_found"] + # fielddrop = ["result_type", "string_found"] ` // SampleConfig will return a complete configuration example with details about each field. From 80155029c4cf41dd42088cddcddb1638dcc364ca Mon Sep 17 00:00:00 2001 From: Dheeraj Dwivedi Date: Sat, 9 Mar 2019 04:00:38 +0530 Subject: [PATCH 0683/1815] Add TTL field to ping input (#5556) --- plugins/inputs/ping/README.md | 3 +- plugins/inputs/ping/ping.go | 92 +++++++++++++++++++++----------- plugins/inputs/ping/ping_test.go | 42 +++++++++++++-- 3 files changed, 100 insertions(+), 37 deletions(-) diff --git a/plugins/inputs/ping/README.md b/plugins/inputs/ping/README.md index 1083c0074..f59a6c947 100644 --- a/plugins/inputs/ping/README.md +++ b/plugins/inputs/ping/README.md @@ -70,6 +70,7 @@ LimitNOFILE=4096 - packets_transmitted (integer) - packets_received (integer) - percent_packets_loss (float) + - ttl (integer, Not available on Windows) - average_response_ms (integer) - minimum_response_ms (integer) - maximum_response_ms (integer) @@ -92,5 +93,5 @@ ping,url=example.org result_code=0i,average_response_ms=7i,maximum_response_ms=9 **Linux:** ``` -ping,url=example.org average_response_ms=23.066,maximum_response_ms=24.64,minimum_response_ms=22.451,packets_received=5i,packets_transmitted=5i,percent_packet_loss=0,result_code=0i,standard_deviation_ms=0.809 1535747258000000000 +ping,url=example.org average_response_ms=23.066,ttl=63,maximum_response_ms=24.64,minimum_response_ms=22.451,packets_received=5i,packets_transmitted=5i,percent_packet_loss=0,result_code=0i,standard_deviation_ms=0.809 1535747258000000000 ``` diff --git a/plugins/inputs/ping/ping.go b/plugins/inputs/ping/ping.go index 69db140ae..28e967a85 100644 --- a/plugins/inputs/ping/ping.go +++ b/plugins/inputs/ping/ping.go @@ -7,6 +7,7 @@ import ( "fmt" "net" "os/exec" + "regexp" "runtime" "strconv" "strings" @@ -151,7 +152,7 @@ func (p *Ping) pingToURL(u string, acc telegraf.Accumulator) { } } - trans, rec, min, avg, max, stddev, err := processPingOutput(out) + trans, rec, ttl, min, avg, max, stddev, err := processPingOutput(out) if err != nil { // fatal error acc.AddError(fmt.Errorf("%s: %s", err, u)) @@ -164,6 +165,9 @@ func (p *Ping) pingToURL(u string, acc telegraf.Accumulator) { fields["packets_transmitted"] = trans fields["packets_received"] = rec fields["percent_packet_loss"] = loss + if ttl >= 0 { + fields["ttl"] = ttl + } if min >= 0 { fields["minimum_response_ms"] = min } @@ -253,50 +257,74 @@ func (p *Ping) args(url string, system string) []string { // round-trip min/avg/max/stddev = 34.843/43.508/52.172/8.664 ms // // It returns (, , ) -func processPingOutput(out string) (int, int, float64, float64, float64, float64, error) { - var trans, recv int +func processPingOutput(out string) (int, int, int, float64, float64, float64, float64, error) { + var trans, recv, ttl int = 0, 0, -1 var min, avg, max, stddev float64 = -1.0, -1.0, -1.0, -1.0 // Set this error to nil if we find a 'transmitted' line err := errors.New("Fatal error processing ping output") lines := strings.Split(out, "\n") for _, line := range lines { - if strings.Contains(line, "transmitted") && + // Reading only first TTL, ignoring other TTL messages + if ttl == -1 && strings.Contains(line, "ttl=") { + ttl, err = getTTL(line) + } else if strings.Contains(line, "transmitted") && strings.Contains(line, "received") { - stats := strings.Split(line, ", ") - // Transmitted packets - trans, err = strconv.Atoi(strings.Split(stats[0], " ")[0]) + trans, recv, err = getPacketStats(line, trans, recv) if err != nil { - return trans, recv, min, avg, max, stddev, err - } - // Received packets - recv, err = strconv.Atoi(strings.Split(stats[1], " ")[0]) - if err != nil { - return trans, recv, min, avg, max, stddev, err + return trans, recv, ttl, min, avg, max, stddev, err } } else if strings.Contains(line, "min/avg/max") { - stats := strings.Split(line, " ")[3] - data := strings.Split(stats, "/") - min, err = strconv.ParseFloat(data[0], 64) + min, avg, max, stddev, err = checkRoundTripTimeStats(line, min, avg, max, stddev) if err != nil { - return trans, recv, min, avg, max, stddev, err - } - avg, err = strconv.ParseFloat(data[1], 64) - if err != nil { - return trans, recv, min, avg, max, stddev, err - } - max, err = strconv.ParseFloat(data[2], 64) - if err != nil { - return trans, recv, min, avg, max, stddev, err - } - if len(data) == 4 { - stddev, err = strconv.ParseFloat(data[3], 64) - if err != nil { - return trans, recv, min, avg, max, stddev, err - } + return trans, recv, ttl, min, avg, max, stddev, err } } } - return trans, recv, min, avg, max, stddev, err + return trans, recv, ttl, min, avg, max, stddev, err +} + +func getPacketStats(line string, trans, recv int) (int, int, error) { + stats := strings.Split(line, ", ") + // Transmitted packets + trans, err := strconv.Atoi(strings.Split(stats[0], " ")[0]) + if err != nil { + return trans, recv, err + } + // Received packets + recv, err = strconv.Atoi(strings.Split(stats[1], " ")[0]) + return trans, recv, err +} + +func getTTL(line string) (int, error) { + ttlLine := regexp.MustCompile(`ttl=(\d+)`) + ttlMatch := ttlLine.FindStringSubmatch(line) + return strconv.Atoi(ttlMatch[1]) +} + +func checkRoundTripTimeStats(line string, min, avg, max, + stddev float64) (float64, float64, float64, float64, error) { + stats := strings.Split(line, " ")[3] + data := strings.Split(stats, "/") + + min, err := strconv.ParseFloat(data[0], 64) + if err != nil { + return min, avg, max, stddev, err + } + avg, err = strconv.ParseFloat(data[1], 64) + if err != nil { + return min, avg, max, stddev, err + } + max, err = strconv.ParseFloat(data[2], 64) + if err != nil { + return min, avg, max, stddev, err + } + if len(data) == 4 { + stddev, err = strconv.ParseFloat(data[3], 64) + if err != nil { + return min, avg, max, stddev, err + } + } + return min, avg, max, stddev, err } func init() { diff --git a/plugins/inputs/ping/ping_test.go b/plugins/inputs/ping/ping_test.go index ad6fa306a..8870d4156 100644 --- a/plugins/inputs/ping/ping_test.go +++ b/plugins/inputs/ping/ping_test.go @@ -61,8 +61,9 @@ ping: -i interval too short: Operation not permitted // Test that ping command output is processed properly func TestProcessPingOutput(t *testing.T) { - trans, rec, min, avg, max, stddev, err := processPingOutput(bsdPingOutput) + trans, rec, ttl, min, avg, max, stddev, err := processPingOutput(bsdPingOutput) assert.NoError(t, err) + assert.Equal(t, 55, ttl, "ttl value is 55") assert.Equal(t, 5, trans, "5 packets were transmitted") assert.Equal(t, 5, rec, "5 packets were transmitted") assert.InDelta(t, 15.087, min, 0.001) @@ -70,8 +71,9 @@ func TestProcessPingOutput(t *testing.T) { assert.InDelta(t, 27.263, max, 0.001) assert.InDelta(t, 4.076, stddev, 0.001) - trans, rec, min, avg, max, stddev, err = processPingOutput(linuxPingOutput) + trans, rec, ttl, min, avg, max, stddev, err = processPingOutput(linuxPingOutput) assert.NoError(t, err) + assert.Equal(t, 63, ttl, "ttl value is 63") assert.Equal(t, 5, trans, "5 packets were transmitted") assert.Equal(t, 5, rec, "5 packets were transmitted") assert.InDelta(t, 35.225, min, 0.001) @@ -79,8 +81,9 @@ func TestProcessPingOutput(t *testing.T) { assert.InDelta(t, 51.806, max, 0.001) assert.InDelta(t, 5.325, stddev, 0.001) - trans, rec, min, avg, max, stddev, err = processPingOutput(busyBoxPingOutput) + trans, rec, ttl, min, avg, max, stddev, err = processPingOutput(busyBoxPingOutput) assert.NoError(t, err) + assert.Equal(t, 56, ttl, "ttl value is 56") assert.Equal(t, 4, trans, "4 packets were transmitted") assert.Equal(t, 4, rec, "4 packets were transmitted") assert.InDelta(t, 15.810, min, 0.001) @@ -89,10 +92,37 @@ func TestProcessPingOutput(t *testing.T) { assert.InDelta(t, -1.0, stddev, 0.001) } +// Linux ping output with varying TTL +var linuxPingOutputWithVaryingTTL = ` +PING www.google.com (216.58.218.164) 56(84) bytes of data. +64 bytes from host.net (216.58.218.164): icmp_seq=1 ttl=63 time=35.2 ms +64 bytes from host.net (216.58.218.164): icmp_seq=2 ttl=255 time=42.3 ms +64 bytes from host.net (216.58.218.164): icmp_seq=3 ttl=64 time=45.1 ms +64 bytes from host.net (216.58.218.164): icmp_seq=4 ttl=64 time=43.5 ms +64 bytes from host.net (216.58.218.164): icmp_seq=5 ttl=255 time=51.8 ms + +--- www.google.com ping statistics --- +5 packets transmitted, 5 received, 0% packet loss, time 4010ms +rtt min/avg/max/mdev = 35.225/43.628/51.806/5.325 ms +` + +// Test that ping command output is processed properly +func TestProcessPingOutputWithVaryingTTL(t *testing.T) { + trans, rec, ttl, min, avg, max, stddev, err := processPingOutput(linuxPingOutputWithVaryingTTL) + assert.NoError(t, err) + assert.Equal(t, 63, ttl, "ttl value is 63") + assert.Equal(t, 5, trans, "5 packets were transmitted") + assert.Equal(t, 5, rec, "5 packets were transmitted") + assert.InDelta(t, 35.225, min, 0.001) + assert.InDelta(t, 43.628, avg, 0.001) + assert.InDelta(t, 51.806, max, 0.001) + assert.InDelta(t, 5.325, stddev, 0.001) +} + // Test that processPingOutput returns an error when 'ping' fails to run, such // as when an invalid argument is provided func TestErrorProcessPingOutput(t *testing.T) { - _, _, _, _, _, _, err := processPingOutput(fatalPingOutput) + _, _, _, _, _, _, _, err := processPingOutput(fatalPingOutput) assert.Error(t, err, "Error was expected from processPingOutput") } @@ -160,6 +190,7 @@ func TestPingGather(t *testing.T) { "packets_transmitted": 5, "packets_received": 5, "percent_packet_loss": 0.0, + "ttl": 63, "minimum_response_ms": 35.225, "average_response_ms": 43.628, "maximum_response_ms": 51.806, @@ -201,6 +232,7 @@ func TestLossyPingGather(t *testing.T) { "packets_transmitted": 5, "packets_received": 3, "percent_packet_loss": 40.0, + "ttl": 63, "minimum_response_ms": 35.225, "average_response_ms": 44.033, "maximum_response_ms": 51.806, @@ -262,6 +294,8 @@ func TestFatalPingGather(t *testing.T) { "Fatal ping should not have packet measurements") assert.False(t, acc.HasMeasurement("percent_packet_loss"), "Fatal ping should not have packet measurements") + assert.False(t, acc.HasMeasurement("ttl"), + "Fatal ping should not have packet measurements") assert.False(t, acc.HasMeasurement("minimum_response_ms"), "Fatal ping should not have packet measurements") assert.False(t, acc.HasMeasurement("average_response_ms"), From bdb9d5c842736608e9789da6ac5c2a41377360b2 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 8 Mar 2019 14:32:46 -0800 Subject: [PATCH 0684/1815] Update changelog --- CHANGELOG.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index dfd1b769c..eac3abe75 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,9 @@ +## v1.11 [unreleased] + +#### Features + +- [#5556](https://github.com/influxdata/telegraf/pull/5556): Add TTL field to ping input. + ## v1.10.1 [unreleased] #### Bugfixes From 91cd17fd4094a15fdbea3121c73757b8d33f15d2 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 8 Mar 2019 14:54:16 -0800 Subject: [PATCH 0685/1815] Use random available port in prometheus output tests (#5555) --- .../prometheus_client/prometheus_client.go | 26 ++++++++++++++++++- .../prometheus_client_tls_test.go | 23 +++++++--------- 2 files changed, 35 insertions(+), 14 deletions(-) diff --git a/plugins/outputs/prometheus_client/prometheus_client.go b/plugins/outputs/prometheus_client/prometheus_client.go index 5611a0a9e..db7b0c207 100644 --- a/plugins/outputs/prometheus_client/prometheus_client.go +++ b/plugins/outputs/prometheus_client/prometheus_client.go @@ -8,6 +8,7 @@ import ( "log" "net" "net/http" + "net/url" "regexp" "sort" "strconv" @@ -70,6 +71,7 @@ type PrometheusClient struct { tlsint.ServerConfig server *http.Server + url string sync.Mutex // fam is the non-expired MetricFamily by Prometheus metric name. @@ -107,7 +109,7 @@ var sampleConfig = ` ## If set, enable TLS with the given certificate. # tls_cert = "/etc/ssl/telegraf.crt" # tls_key = "/etc/ssl/telegraf.key" - + ## Set one or more allowed client CA certificate file names to ## enable mutually authenticated TLS connections # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] @@ -213,6 +215,8 @@ func (p *PrometheusClient) Connect() error { return err } + p.url = createURL(tlsConfig, listener, p.Path) + go func() { err := p.server.Serve(listener) if err != nil && err != http.ErrServerClosed { @@ -224,11 +228,31 @@ func (p *PrometheusClient) Connect() error { return nil } +// Address returns the address the plugin is listening on. If not listening +// an empty string is returned. +func (p *PrometheusClient) URL() string { + return p.url +} + +func createURL(tlsConfig *tls.Config, listener net.Listener, path string) string { + u := url.URL{ + Scheme: "http", + Host: listener.Addr().String(), + Path: path, + } + + if tlsConfig != nil { + u.Scheme = "https" + } + return u.String() +} + func (p *PrometheusClient) Close() error { ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) defer cancel() err := p.server.Shutdown(ctx) prometheus.Unregister(p) + p.url = "" return err } diff --git a/plugins/outputs/prometheus_client/prometheus_client_tls_test.go b/plugins/outputs/prometheus_client/prometheus_client_tls_test.go index d7484d61f..bcf6b4381 100644 --- a/plugins/outputs/prometheus_client/prometheus_client_tls_test.go +++ b/plugins/outputs/prometheus_client/prometheus_client_tls_test.go @@ -3,25 +3,26 @@ package prometheus_client_test import ( "crypto/tls" "fmt" + "net/http" + "testing" + "github.com/influxdata/telegraf/plugins/outputs/prometheus_client" "github.com/influxdata/telegraf/testutil" "github.com/influxdata/toml" "github.com/stretchr/testify/require" - "net/http" - "testing" ) var pki = testutil.NewPKI("../../../testutil/pki") var configWithTLS = fmt.Sprintf(` - listen = "127.0.0.1:9090" + listen = "127.0.0.1:0" tls_allowed_cacerts = ["%s"] tls_cert = "%s" tls_key = "%s" `, pki.TLSServerConfig().TLSAllowedCACerts[0], pki.TLSServerConfig().TLSCert, pki.TLSServerConfig().TLSKey) var configWithoutTLS = ` - listen = "127.0.0.1:9090" + listen = "127.0.0.1:0" ` type PrometheusClientTestContext struct { @@ -33,11 +34,10 @@ type PrometheusClientTestContext struct { func TestWorksWithoutTLS(t *testing.T) { tc := buildTestContext(t, []byte(configWithoutTLS)) err := tc.Output.Connect() + require.NoError(t, err) defer tc.Output.Close() - require.NoError(t, err) - - response, err := tc.Client.Get("http://localhost:9090/metrics") + response, err := tc.Client.Get(tc.Output.URL()) require.NoError(t, err) require.NoError(t, err) @@ -47,24 +47,21 @@ func TestWorksWithoutTLS(t *testing.T) { func TestWorksWithTLS(t *testing.T) { tc := buildTestContext(t, []byte(configWithTLS)) err := tc.Output.Connect() - defer tc.Output.Close() require.NoError(t, err) + defer tc.Output.Close() - response, err := tc.Client.Get("https://localhost:9090/metrics") + response, err := tc.Client.Get(tc.Output.URL()) require.NoError(t, err) require.NoError(t, err) require.Equal(t, response.StatusCode, http.StatusOK) - response, err = tc.Client.Get("http://localhost:9090/metrics") - require.Error(t, err) - tr := &http.Transport{ TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, } client := &http.Client{Transport: tr} - response, err = client.Get("https://localhost:9090/metrics") + response, err = client.Get(tc.Output.URL()) require.Error(t, err) } From 92b01ab4f9543d17bcf87648dd5555cd44cfb65b Mon Sep 17 00:00:00 2001 From: Soulou Date: Mon, 11 Mar 2019 01:21:30 +0100 Subject: [PATCH 0686/1815] Fix how major and minor identifiers of block devices are read. The current implementation assure that the major and the minor are coded on one byte. But they are not: ``` brw-rw---- 1 root disk 252, 290 Feb 25 11:36 dm-290 ``` 290 as minor in this example is over 1 byte. So after wondering why all my devices iops weren't correctly stored, I found out that several points were added for some disks. For `dm-290` it was overriding `252:34`, instead of getting udev stats for `252:290`. The solution is here: https://sites.uclouvain.be/SystInfo/usr/include/sys/sysmacros.h.html The implementation is directly taken from this, fixing my bug. --- plugins/inputs/diskio/diskio_linux.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/inputs/diskio/diskio_linux.go b/plugins/inputs/diskio/diskio_linux.go index d27fd3b46..2d3e5f6b0 100644 --- a/plugins/inputs/diskio/diskio_linux.go +++ b/plugins/inputs/diskio/diskio_linux.go @@ -36,7 +36,7 @@ func (s *DiskIO) diskInfo(devName string) (map[string]string, error) { } major := stat.Rdev >> 8 & 0xff - minor := stat.Rdev & 0xff + minor := (stat.Rdev & 0xff) | (stat.Rdev>>12)&^0xff udevDataPath := fmt.Sprintf("%s/b%d:%d", udevPath, major, minor) di := map[string]string{} From 1752619e356d88b43903c06a16d315938eb1ab8d Mon Sep 17 00:00:00 2001 From: Pontus Rydin Date: Mon, 11 Mar 2019 14:08:53 -0400 Subject: [PATCH 0687/1815] Remove calls to destroy on ViewManager (#5557) --- plugins/inputs/vsphere/finder.go | 1 - 1 file changed, 1 deletion(-) diff --git a/plugins/inputs/vsphere/finder.go b/plugins/inputs/vsphere/finder.go index 372aa5e3b..599655402 100644 --- a/plugins/inputs/vsphere/finder.go +++ b/plugins/inputs/vsphere/finder.go @@ -79,7 +79,6 @@ func (f *Finder) descend(ctx context.Context, root types.ManagedObjectReference, } m := view.NewManager(f.client.Client.Client) - defer m.Destroy(ctx) v, err := m.CreateContainerView(ctx, root, ct, false) if err != nil { return err From 19988a94debe312f4626b2c99675bc7803a4da3a Mon Sep 17 00:00:00 2001 From: Pontus Rydin Date: Mon, 11 Mar 2019 14:16:32 -0400 Subject: [PATCH 0688/1815] Add use_int_samples option for backwards compatibility (#5563) --- plugins/inputs/vsphere/endpoint.go | 17 +++++++++++++++-- plugins/inputs/vsphere/vsphere.go | 8 ++++++++ 2 files changed, 23 insertions(+), 2 deletions(-) diff --git a/plugins/inputs/vsphere/endpoint.go b/plugins/inputs/vsphere/endpoint.go index 192a4a487..694efb574 100644 --- a/plugins/inputs/vsphere/endpoint.go +++ b/plugins/inputs/vsphere/endpoint.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "log" + "math" "math/rand" "net/url" "regexp" @@ -948,7 +949,7 @@ func (e *Endpoint) collectChunk(ctx context.Context, pqs []types.PerfQuerySpec, e.populateTags(&objectRef, resourceType, res, t, &v) nValues := 0 - alignedInfo, alignedValues := alignSamples(em.SampleInfo, v.Value, interval) // TODO: Estimate interval + alignedInfo, alignedValues := alignSamples(em.SampleInfo, v.Value, interval) for idx, sample := range alignedInfo { // According to the docs, SampleInfo and Value should have the same length, but we've seen corrupted @@ -981,7 +982,11 @@ func (e *Endpoint) collectChunk(ctx context.Context, pqs []types.PerfQuerySpec, if info.UnitInfo.GetElementDescription().Key == "percent" { bucket.fields[fn] = float64(v) / 100.0 } else { - bucket.fields[fn] = v + if e.Parent.UseIntSamples { + bucket.fields[fn] = int64(round(v)) + } else { + bucket.fields[fn] = v + } } count++ @@ -1082,3 +1087,11 @@ func cleanDiskTag(disk string) string { // Remove enclosing "<>" return strings.TrimSuffix(strings.TrimPrefix(disk, "<"), ">") } + +func round(x float64) float64 { + t := math.Trunc(x) + if math.Abs(x-t) >= 0.5 { + return t + math.Copysign(1, x) + } + return t +} diff --git a/plugins/inputs/vsphere/vsphere.go b/plugins/inputs/vsphere/vsphere.go index 809026e3e..852dd5e25 100644 --- a/plugins/inputs/vsphere/vsphere.go +++ b/plugins/inputs/vsphere/vsphere.go @@ -40,6 +40,7 @@ type VSphere struct { DatastoreMetricExclude []string DatastoreInclude []string Separator string + UseIntSamples bool MaxQueryObjects int MaxQueryMetrics int @@ -199,6 +200,12 @@ var sampleConfig = ` ## timeout applies to any of the api request made to vcenter # timeout = "60s" + ## When set to true, all samples are sent as integers. This makes the output data types backwards compatible + ## with Telegraf 1.9 or lower. Normally all samples from vCenter, with the exception of percentages, are + ## integer values, but under some conditions, some averaging takes place internally in the plugin. Setting this + ## flag to "false" will send values as floats to preserve the full precision when averaging takes place. + # use_int_samples = true + ## Optional SSL Config # ssl_ca = "/path/to/cafile" # ssl_cert = "/path/to/certfile" @@ -312,6 +319,7 @@ func init() { DatastoreMetricExclude: nil, DatastoreInclude: []string{"/*/datastore/**"}, Separator: "_", + UseIntSamples: true, MaxQueryObjects: 256, MaxQueryMetrics: 256, From f298f87f0ccc6555b8622c6b4b7d555613526575 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 11 Mar 2019 11:55:20 -0700 Subject: [PATCH 0689/1815] Update changelog; vsphere readme --- CHANGELOG.md | 4 ++- plugins/inputs/vsphere/README.md | 58 ++++++++++++++++++-------------- 2 files changed, 36 insertions(+), 26 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index eac3abe75..6411fe8bf 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,8 +8,10 @@ #### Bugfixes -- [#5448](https://github.com/influxdata/telegraf/pull/5448): Show error when TLS configuration cannot be loaded. +- [#5448](https://github.com/influxdata/telegraf/issues/5448): Show error when TLS configuration cannot be loaded. - [#5543](https://github.com/influxdata/telegraf/pull/5543): Add Base64-encoding/decoding for Google Cloud PubSub plugins. +- [#5565](https://github.com/influxdata/telegraf/issues/5565): Fix type compatibility in vsphere plugin with use_int_samples option. +- [#5492](https://github.com/influxdata/telegraf/issues/5492): Fix vsphere input shows failed task in vCenter. ## v1.10 [2019-03-05] diff --git a/plugins/inputs/vsphere/README.md b/plugins/inputs/vsphere/README.md index e14de2cee..ae7cdc37b 100644 --- a/plugins/inputs/vsphere/README.md +++ b/plugins/inputs/vsphere/README.md @@ -1,7 +1,7 @@ # VMware vSphere Input Plugin The VMware vSphere plugin uses the vSphere API to gather metrics from multiple vCenter servers. - + * Clusters * Hosts * VMs @@ -9,7 +9,7 @@ The VMware vSphere plugin uses the vSphere API to gather metrics from multiple v ## Configuration -NOTE: To disable collection of a specific resource type, simply exclude all metrics using the XX_metric_exclude. +NOTE: To disable collection of a specific resource type, simply exclude all metrics using the XX_metric_exclude. For example, to disable collection of VMs, add this: ``` @@ -52,7 +52,7 @@ vm_metric_exclude = [ "*" ] "net.droppedRx.summation", "net.droppedTx.summation", "net.usage.average", - "power.power.average", + "power.power.average", "virtualDisk.numberReadAveraged.average", "virtualDisk.numberWriteAveraged.average", "virtualDisk.read.average", @@ -67,7 +67,7 @@ vm_metric_exclude = [ "*" ] # vm_metric_exclude = [] ## Nothing is excluded by default # vm_instances = true ## true by default - ## Hosts + ## Hosts ## Typical host metrics (if omitted or empty, all metrics are collected) # host_include = [ "/*/host/**"] # Inventory path to hosts to collect (by default all are collected) host_metric_include = [ @@ -121,25 +121,25 @@ vm_metric_exclude = [ "*" ] # host_metric_exclude = [] ## Nothing excluded by default # host_instances = true ## true by default - ## Clusters + ## Clusters # cluster_include = [ "/*/host/**"] # Inventory path to clusters to collect (by default all are collected) # cluster_metric_include = [] ## if omitted or empty, all metrics are collected # cluster_metric_exclude = [] ## Nothing excluded by default - # cluster_instances = false ## false by default + # cluster_instances = false ## false by default - ## Datastores + ## Datastores # cluster_include = [ "/*/datastore/**"] # Inventory path to datastores to collect (by default all are collected) # datastore_metric_include = [] ## if omitted or empty, all metrics are collected # datastore_metric_exclude = [] ## Nothing excluded by default - # datastore_instances = false ## false by default + # datastore_instances = false ## false by default ## Datacenters # datacenter_include = [ "/*/host/**"] # Inventory path to clusters to collect (by default all are collected) datacenter_metric_include = [] ## if omitted or empty, all metrics are collected datacenter_metric_exclude = [ "*" ] ## Datacenters are not collected by default. - # datacenter_instances = false ## false by default + # datacenter_instances = false ## false by default - ## Plugin Settings + ## Plugin Settings ## separator character to use for measurement and field names (default: "_") # separator = "_" @@ -166,17 +166,25 @@ vm_metric_exclude = [ "*" ] ## timeout applies to any of the api request made to vcenter # timeout = "60s" + ## When set to true, all samples are sent as integers. This makes the output + ## data types backwards compatible with Telegraf 1.9 or lower. Normally all + ## samples from vCenter, with the exception of percentages, are integer + ## values, but under some conditions, some averaging takes place internally in + ## the plugin. Setting this flag to "false" will send values as floats to + ## preserve the full precision when averaging takes place. + # use_int_samples = true + ## Optional SSL Config # ssl_ca = "/path/to/cafile" # ssl_cert = "/path/to/certfile" # ssl_key = "/path/to/keyfile" ## Use SSL but skip chain & host verification # insecure_skip_verify = false -``` +``` ### Objects and Metrics Per Query -By default, in vCenter's configuration a limit is set to the number of entities that are included in a performance chart query. Default settings for vCenter 6.5 and above is 256. Prior versions of vCenter have this set to 64. +By default, in vCenter's configuration a limit is set to the number of entities that are included in a performance chart query. Default settings for vCenter 6.5 and above is 256. Prior versions of vCenter have this set to 64. A vCenter administrator can change this setting, see this [VMware KB article](https://kb.vmware.com/s/article/2107096) for more information. Any modification should be reflected in this plugin by modifying the parameter `max_query_objects` @@ -233,23 +241,23 @@ to a file system. A vSphere inventory has a structure similar to this: #### Using Inventory Paths Using familiar UNIX-style paths, one could select e.g. VM2 with the path ```/DC0/vm/VM2```. -Often, we want to select a group of resource, such as all the VMs in a folder. We could use the path ```/DC0/vm/Folder1/*``` for that. +Often, we want to select a group of resource, such as all the VMs in a folder. We could use the path ```/DC0/vm/Folder1/*``` for that. Another possibility is to select objects using a partial name, such as ```/DC0/vm/Folder1/hadoop*``` yielding all vms in Folder1 with a name starting with "hadoop". Finally, due to the arbitrary nesting of the folder structure, we need a "recursive wildcard" for traversing multiple folders. We use the "**" symbol for that. If we want to look for a VM with a name starting with "hadoop" in any folder, we could use the following path: ```/DC0/vm/**/hadoop*``` #### Multiple paths to VMs -As we can see from the example tree above, VMs appear both in its on folder under the datacenter, as well as under the hosts. This is useful when you like to select VMs on a specific host. For example, ```/DC0/host/Cluster1/Host1/hadoop*``` selects all VMs with a name starting with "hadoop" that are running on Host1. +As we can see from the example tree above, VMs appear both in its on folder under the datacenter, as well as under the hosts. This is useful when you like to select VMs on a specific host. For example, ```/DC0/host/Cluster1/Host1/hadoop*``` selects all VMs with a name starting with "hadoop" that are running on Host1. We can extend this to looking at a cluster level: ```/DC0/host/Cluster1/*/hadoop*```. This selects any VM matching "hadoop*" on any host in Cluster1. ## Performance Considerations ### Realtime vs. historical metrics -vCenter keeps two different kinds of metrics, known as realtime and historical metrics. +vCenter keeps two different kinds of metrics, known as realtime and historical metrics. -* Realtime metrics: Avaialable at a 20 second granularity. These metrics are stored in memory and are very fast and cheap to query. Our tests have shown that a complete set of realtime metrics for 7000 virtual machines can be obtained in less than 20 seconds. Realtime metrics are only available on **ESXi hosts** and **virtual machine** resources. Realtime metrics are only stored for 1 hour in vCenter. +* Realtime metrics: Avaialable at a 20 second granularity. These metrics are stored in memory and are very fast and cheap to query. Our tests have shown that a complete set of realtime metrics for 7000 virtual machines can be obtained in less than 20 seconds. Realtime metrics are only available on **ESXi hosts** and **virtual machine** resources. Realtime metrics are only stored for 1 hour in vCenter. * Historical metrics: Available at a 5 minute, 30 minutes, 2 hours and 24 hours rollup levels. The vSphere Telegraf plugin only uses the 5 minute rollup. These metrics are stored in the vCenter database and can be expensive and slow to query. Historical metrics are the only type of metrics available for **clusters**, **datastores** and **datacenters**. For more information, refer to the vSphere documentation here: https://pubs.vmware.com/vsphere-50/index.jsp?topic=%2Fcom.vmware.wssdk.pg.doc_50%2FPG_Ch16_Performance.18.2.html @@ -293,7 +301,7 @@ This will disrupt the metric collection and can result in missed samples. The be host_metric_exclude = ["*"] # Exclude realtime metrics vm_metric_exclude = ["*"] # Exclude realtime metrics - max_query_metrics = 256 + max_query_metrics = 256 collect_concurrency = 3 ``` @@ -303,7 +311,7 @@ The ```max_query_metrics``` determines the maximum number of metrics to attempt ```2019-01-21T03:24:18Z W! [input.vsphere] Configured max_query_metrics is 256, but server limits it to 64. Reducing.``` -You may ask a vCenter administrator to increase this limit to help boost performance. +You may ask a vCenter administrator to increase this limit to help boost performance. ### Cluster metrics and the max_query_metrics setting @@ -313,7 +321,7 @@ Cluster metrics are handled a bit differently by vCenter. They are aggregated fr There are two ways of addressing this: * Ask your vCenter administrator to set ```config.vpxd.stats.maxQueryMetrics``` to a number that's higher than the total number of virtual machines managed by a vCenter instance. -* Exclude the cluster metrics and use either the basicstats aggregator to calculate sums and averages per cluster or use queries in the visualization tool to obtain the same result. +* Exclude the cluster metrics and use either the basicstats aggregator to calculate sums and averages per cluster or use queries in the visualization tool to obtain the same result. ### Concurrency settings @@ -321,7 +329,7 @@ The vSphere plugin allows you to specify two concurrency settings: * ```collect_concurrency```: The maximum number of simultaneous queries for performance metrics allowed per resource type. * ```discover_concurrency```: The maximum number of simultaneous queries for resource discovery allowed. -While a higher level of concurrency typically has a positive impact on performance, increasing these numbers too much can cause performance issues at the vCenter server. A rule of thumb is to set these parameters to the number of virtual machines divided by 1500 and rounded up to the nearest integer. +While a higher level of concurrency typically has a positive impact on performance, increasing these numbers too much can cause performance issues at the vCenter server. A rule of thumb is to set these parameters to the number of virtual machines divided by 1500 and rounded up to the nearest integer. ## Measurements & Fields @@ -342,7 +350,7 @@ While a higher level of concurrency typically has a positive impact on performan - Storage Path: commands, latency, # reads/writes - System Resources: cpu active, cpu max, cpu running, cpu usage, mem allocated, mem consumed, mem shared, swap - System: uptime - - Flash Module: active VMDKs + - Flash Module: active VMDKs - VM Stats: - CPU: demand, usage, readiness, cost, mhz - Datastore: latency, # reads/writes @@ -352,12 +360,12 @@ While a higher level of concurrency typically has a positive impact on performan - Power: energy, usage - Res CPU: active, max, running - System: operating system uptime, uptime - - Virtual Disk: seeks, # reads/writes, latency, load + - Virtual Disk: seeks, # reads/writes, latency, load - Datastore stats: - - Disk: Capacity, provisioned, used + - Disk: Capacity, provisioned, used For a detailed list of commonly available metrics, please refer to [METRICS.md](METRICS.md) - + ## Tags - all metrics @@ -380,7 +388,7 @@ For a detailed list of commonly available metrics, please refer to [METRICS.md]( - interface (name of network interface) - storageAdapter stats for Host - adapter (name of storage adapter) -- storagePath stats for Host +- storagePath stats for Host - path (id of storage path) - sys.resource* stats for Host - resource (resource type) From 9db15651ea2b3bf1816885afe5c3780edbbd4771 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 11 Mar 2019 12:00:20 -0700 Subject: [PATCH 0690/1815] Fix ClusterRole aggregation documentation in kube_inventory --- plugins/inputs/kube_inventory/README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/plugins/inputs/kube_inventory/README.md b/plugins/inputs/kube_inventory/README.md index db13c86b9..7bcb63d14 100644 --- a/plugins/inputs/kube_inventory/README.md +++ b/plugins/inputs/kube_inventory/README.md @@ -83,6 +83,7 @@ aggregationRule: clusterRoleSelectors: - matchLabels: rbac.authorization.k8s.io/aggregate-view-telegraf: "true" + - matchLabels: rbac.authorization.k8s.io/aggregate-to-view: "true" rules: [] # Rules are automatically filled in by the controller manager. ``` From c61c48e10b443358bf6bb7725da6dd8b466ad1a2 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 11 Mar 2019 12:31:48 -0700 Subject: [PATCH 0691/1815] Remove trailing whitespace and wordwrap readme in vsphere --- plugins/inputs/vsphere/vsphere.go | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/plugins/inputs/vsphere/vsphere.go b/plugins/inputs/vsphere/vsphere.go index 852dd5e25..d64b5273d 100644 --- a/plugins/inputs/vsphere/vsphere.go +++ b/plugins/inputs/vsphere/vsphere.go @@ -90,7 +90,7 @@ var sampleConfig = ` "net.droppedRx.summation", "net.droppedTx.summation", "net.usage.average", - "power.power.average", + "power.power.average", "virtualDisk.numberReadAveraged.average", "virtualDisk.numberWriteAveraged.average", "virtualDisk.read.average", @@ -105,7 +105,7 @@ var sampleConfig = ` # vm_metric_exclude = [] ## Nothing is excluded by default # vm_instances = true ## true by default - ## Hosts + ## Hosts ## Typical host metrics (if omitted or empty, all metrics are collected) host_metric_include = [ "cpu.coreUtilization.average", @@ -158,12 +158,12 @@ var sampleConfig = ` # host_metric_exclude = [] ## Nothing excluded by default # host_instances = true ## true by default - ## Clusters + ## Clusters # cluster_metric_include = [] ## if omitted or empty, all metrics are collected # cluster_metric_exclude = [] ## Nothing excluded by default # cluster_instances = false ## false by default - ## Datastores + ## Datastores # datastore_metric_include = [] ## if omitted or empty, all metrics are collected # datastore_metric_exclude = [] ## Nothing excluded by default # datastore_instances = false ## false by default for Datastores only @@ -173,7 +173,7 @@ var sampleConfig = ` datacenter_metric_exclude = [ "*" ] ## Datacenters are not collected by default. # datacenter_instances = false ## false by default for Datastores only - ## Plugin Settings + ## Plugin Settings ## separator character to use for measurement and field names (default: "_") # separator = "_" @@ -200,10 +200,12 @@ var sampleConfig = ` ## timeout applies to any of the api request made to vcenter # timeout = "60s" - ## When set to true, all samples are sent as integers. This makes the output data types backwards compatible - ## with Telegraf 1.9 or lower. Normally all samples from vCenter, with the exception of percentages, are - ## integer values, but under some conditions, some averaging takes place internally in the plugin. Setting this - ## flag to "false" will send values as floats to preserve the full precision when averaging takes place. + ## When set to true, all samples are sent as integers. This makes the output + ## data types backwards compatible with Telegraf 1.9 or lower. Normally all + ## samples from vCenter, with the exception of percentages, are integer + ## values, but under some conditions, some averaging takes place internally in + ## the plugin. Setting this flag to "false" will send values as floats to + ## preserve the full precision when averaging takes place. # use_int_samples = true ## Optional SSL Config From 2566210df1e1ffba552b3379a09b443f3ddc3e3f Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 11 Mar 2019 12:32:45 -0700 Subject: [PATCH 0692/1815] Update telegraf.conf --- etc/telegraf.conf | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/etc/telegraf.conf b/etc/telegraf.conf index 5c978aa59..4c3de469c 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -346,6 +346,9 @@ # ## Optional. Specifies a timeout for requests to the PubSub API. # # publish_timeout = "30s" # +# ## Optional. If true, published PubSub message data will be base64-encoded. +# # base64_data = false +# # ## Optional. PubSub attributes to add to metrics. # # [[inputs.pubsub.attributes]] # # my_attr = "tag_value" @@ -3055,7 +3058,7 @@ # # expect = "ssh" # # ## Uncomment to remove deprecated fields -# # fieldexclude = ["result_type", "string_found"] +# # fielddrop = ["result_type", "string_found"] # # Read TCP metrics such as established, time wait and sockets counts. @@ -4202,6 +4205,10 @@ # ## 1. Note this setting does not limit the number of messages that can be # ## processed concurrently (use "max_outstanding_messages" instead). # # max_receiver_go_routines = 0 +# +# ## Optional. If true, Telegraf will attempt to base64 decode the +# ## PubSub message data before parsing +# # base64_data = false # # Google Cloud Pub/Sub Push HTTP listener @@ -5205,6 +5212,14 @@ # ## timeout applies to any of the api request made to vcenter # # timeout = "60s" # +# ## When set to true, all samples are sent as integers. This makes the output +# ## data types backwards compatible with Telegraf 1.9 or lower. Normally all +# ## samples from vCenter, with the exception of percentages, are integer +# ## values, but under some conditions, some averaging takes place internally in +# ## the plugin. Setting this flag to "false" will send values as floats to +# ## preserve the full precision when averaging takes place. +# # use_int_samples = true +# # ## Optional SSL Config # # ssl_ca = "/path/to/cafile" # # ssl_cert = "/path/to/certfile" From 88e0cb16e15361122474f86cbf80bb0ccff6a695 Mon Sep 17 00:00:00 2001 From: Dimitri Saingre Date: Mon, 11 Mar 2019 23:36:38 +0100 Subject: [PATCH 0693/1815] Add hexadecimal string to integer conversion to converter processor (#5569) --- plugins/processors/converter/converter.go | 22 ++++++++++++++----- .../processors/converter/converter_test.go | 8 +++++-- 2 files changed, 22 insertions(+), 8 deletions(-) diff --git a/plugins/processors/converter/converter.go b/plugins/processors/converter/converter.go index 3c9b74f3f..db240abf4 100644 --- a/plugins/processors/converter/converter.go +++ b/plugins/processors/converter/converter.go @@ -345,11 +345,16 @@ func toInteger(v interface{}) (int64, bool) { return 0, true } case string: - result, err := strconv.ParseFloat(value, 64) + result, err := strconv.ParseInt(value, 0, 64) + if err != nil { - return 0, false + result, err := strconv.ParseFloat(value, 64) + if err != nil { + return 0, false + } + return toInteger(result) } - return toInteger(result) + return result, true } return 0, false } @@ -379,11 +384,16 @@ func toUnsigned(v interface{}) (uint64, bool) { return 0, true } case string: - result, err := strconv.ParseFloat(value, 64) + result, err := strconv.ParseUint(value, 0, 64) + if err != nil { - return 0, false + result, err := strconv.ParseFloat(value, 64) + if err != nil { + return 0, false + } + return toUnsigned(result) } - return toUnsigned(result) + return result, true } return 0, false } diff --git a/plugins/processors/converter/converter_test.go b/plugins/processors/converter/converter_test.go index 1b00cedf9..1d60a40fb 100644 --- a/plugins/processors/converter/converter_test.go +++ b/plugins/processors/converter/converter_test.go @@ -129,8 +129,8 @@ func TestConverter(t *testing.T) { converter: &Converter{ Fields: &Conversion{ String: []string{"a"}, - Integer: []string{"b", "b1", "b2"}, - Unsigned: []string{"c", "c1", "c2"}, + Integer: []string{"b", "b1", "b2", "b3"}, + Unsigned: []string{"c", "c1", "c2", "c3"}, Boolean: []string{"d"}, Float: []string{"e"}, Tag: []string{"f"}, @@ -145,9 +145,11 @@ func TestConverter(t *testing.T) { "b": "42", "b1": "42.2", "b2": "42.5", + "b3": "0x2A", "c": "42", "c1": "42.2", "c2": "42.5", + "c3": "0x2A", "d": "true", "e": "42.0", "f": "foo", @@ -166,9 +168,11 @@ func TestConverter(t *testing.T) { "b": int64(42), "b1": int64(42), "b2": int64(43), + "b3": int64(42), "c": uint64(42), "c1": uint64(42), "c2": uint64(43), + "c3": uint64(42), "d": true, "e": 42.0, }, From 2a0ae109405138d6ac378d30d11dd052b695a23a Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 11 Mar 2019 15:37:34 -0700 Subject: [PATCH 0694/1815] Update changelog --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6411fe8bf..b99ab556a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,8 @@ #### Features - [#5556](https://github.com/influxdata/telegraf/pull/5556): Add TTL field to ping input. +- [#5569](https://github.com/influxdata/telegraf/pull/5569): Add hexadecimal string to integer conversion to converter processor. + ## v1.10.1 [unreleased] From 45b1247d9b24cc8760d56ae914492ff7c9247005 Mon Sep 17 00:00:00 2001 From: Greg <2653109+glinton@users.noreply.github.com> Date: Tue, 12 Mar 2019 20:49:19 -0600 Subject: [PATCH 0695/1815] Use valid measurement names in csv parser (#5577) If an empty column is present in parsed csv data, a `recordFields` is set to the key of "". If no `MeasurementColumn` is defined, this empty value was being used as the measurement name. By only setting the measurementName if the value is not empty, we avoid this error. Since an empty column is a valid column, skip values must account for them. --- plugins/parsers/csv/parser.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/parsers/csv/parser.go b/plugins/parsers/csv/parser.go index 5f4fcc640..f8bf93e70 100644 --- a/plugins/parsers/csv/parser.go +++ b/plugins/parsers/csv/parser.go @@ -204,7 +204,7 @@ outer: // will default to plugin name measurementName := p.MetricName - if recordFields[p.MeasurementColumn] != nil { + if recordFields[p.MeasurementColumn] != nil && recordFields[p.MeasurementColumn] != "" { measurementName = fmt.Sprintf("%v", recordFields[p.MeasurementColumn]) } From 96ded740985d2cd7941b4dff6e8b958ab19826c5 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 12 Mar 2019 19:57:10 -0700 Subject: [PATCH 0696/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index b99ab556a..f7abf32ca 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,6 +14,7 @@ - [#5543](https://github.com/influxdata/telegraf/pull/5543): Add Base64-encoding/decoding for Google Cloud PubSub plugins. - [#5565](https://github.com/influxdata/telegraf/issues/5565): Fix type compatibility in vsphere plugin with use_int_samples option. - [#5492](https://github.com/influxdata/telegraf/issues/5492): Fix vsphere input shows failed task in vCenter. +- [#5530](https://github.com/influxdata/telegraf/issues/5530): Fix invalid measurement name and skip column in csv parser. ## v1.10 [2019-03-05] From 2118fbe78a0f267c88368d3dc312b2481a9d65e2 Mon Sep 17 00:00:00 2001 From: Soulou Date: Fri, 15 Mar 2019 14:36:56 +0100 Subject: [PATCH 0697/1815] Use unix.Major/unix.Minor instead of custom implementation --- plugins/inputs/diskio/diskio_linux.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/inputs/diskio/diskio_linux.go b/plugins/inputs/diskio/diskio_linux.go index 2d3e5f6b0..73eba361c 100644 --- a/plugins/inputs/diskio/diskio_linux.go +++ b/plugins/inputs/diskio/diskio_linux.go @@ -35,8 +35,8 @@ func (s *DiskIO) diskInfo(devName string) (map[string]string, error) { return ic.values, nil } - major := stat.Rdev >> 8 & 0xff - minor := (stat.Rdev & 0xff) | (stat.Rdev>>12)&^0xff + major := unix.Major(stat.Rdev) + minor := unix.Minor(stat.Rdev) udevDataPath := fmt.Sprintf("%s/b%d:%d", udevPath, major, minor) di := map[string]string{} From 6a97b259ebdd6e483c4a5aa38a60f097a88d43fb Mon Sep 17 00:00:00 2001 From: Greg <2653109+glinton@users.noreply.github.com> Date: Mon, 18 Mar 2019 12:07:44 -0600 Subject: [PATCH 0698/1815] Minimize call to collect uptime in system input (#5592) --- plugins/inputs/system/system.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/plugins/inputs/system/system.go b/plugins/inputs/system/system.go index ad17c56ed..55ebbc59e 100644 --- a/plugins/inputs/system/system.go +++ b/plugins/inputs/system/system.go @@ -47,22 +47,22 @@ func (_ *SystemStats) Gather(acc telegraf.Accumulator) error { now := time.Now() acc.AddGauge("system", fields, nil, now) - hostinfo, err := host.Info() + uptime, err := host.Uptime() if err != nil { return err } acc.AddCounter("system", map[string]interface{}{ - "uptime": hostinfo.Uptime, + "uptime": uptime, }, nil, now) acc.AddFields("system", map[string]interface{}{ - "uptime_format": format_uptime(hostinfo.Uptime), + "uptime_format": formatUptime(uptime), }, nil, now) return nil } -func format_uptime(uptime uint64) string { +func formatUptime(uptime uint64) string { buf := new(bytes.Buffer) w := bufio.NewWriter(buf) From 6b2137a4a18ce62658bc36b65c9e98cd5a5a0c9a Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 18 Mar 2019 11:10:02 -0700 Subject: [PATCH 0699/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index f7abf32ca..d0e0005ad 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -15,6 +15,7 @@ - [#5565](https://github.com/influxdata/telegraf/issues/5565): Fix type compatibility in vsphere plugin with use_int_samples option. - [#5492](https://github.com/influxdata/telegraf/issues/5492): Fix vsphere input shows failed task in vCenter. - [#5530](https://github.com/influxdata/telegraf/issues/5530): Fix invalid measurement name and skip column in csv parser. +- [#5589](https://github.com/influxdata/telegraf/issues/5589): Fix system input causing high cpu usage on Raspbian. ## v1.10 [2019-03-05] From eeb0e094c291049aac22c577800fff49663b46c7 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 18 Mar 2019 12:47:47 -0700 Subject: [PATCH 0700/1815] Remove test directory with .. prefix --- .../subconfig/..4984_10_04_08_28_06.119/invalid-config.conf | 4 ---- 1 file changed, 4 deletions(-) delete mode 100644 internal/config/testdata/subconfig/..4984_10_04_08_28_06.119/invalid-config.conf diff --git a/internal/config/testdata/subconfig/..4984_10_04_08_28_06.119/invalid-config.conf b/internal/config/testdata/subconfig/..4984_10_04_08_28_06.119/invalid-config.conf deleted file mode 100644 index aee9abdfe..000000000 --- a/internal/config/testdata/subconfig/..4984_10_04_08_28_06.119/invalid-config.conf +++ /dev/null @@ -1,4 +0,0 @@ -# This invalid config file should be skipped during testing -# as it is an ..data folder - -[[outputs.influxdb From 5e88824c153e9dbd5750eb0a46d769b1d01782ac Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 18 Mar 2019 17:54:12 -0700 Subject: [PATCH 0701/1815] Improve stability of appveyor builds (#5578) --- Gopkg.lock | 6 ++++++ appveyor.yml | 1 + 2 files changed, 7 insertions(+) diff --git a/Gopkg.lock b/Gopkg.lock index 47feeb386..b889e6f8c 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -1202,6 +1202,7 @@ ] pruneopts = "" revision = "a2144134853fc9a27a7b1e3eb4f19f1a76df13c9" + source = "https://github.com/golang/crypto.git" [[projects]] branch = "master" @@ -1230,6 +1231,7 @@ ] pruneopts = "" revision = "a680a1efc54dd51c040b3b5ce4939ea3cf2ea0d1" + source = "https://github.com/golang/net.git" [[projects]] branch = "master" @@ -1245,6 +1247,7 @@ ] pruneopts = "" revision = "d2e6202438beef2727060aa7cabdd924d92ebfd9" + source = "https://github.com/golang/oauth2.git" [[projects]] branch = "master" @@ -1256,6 +1259,7 @@ ] pruneopts = "" revision = "42b317875d0fa942474b76e1b46a6060d720ae6e" + source = "https://github.com/golang/sync.git" [[projects]] branch = "master" @@ -1272,6 +1276,7 @@ ] pruneopts = "" revision = "7c4c994c65f702f41ed7d6620a2cb34107576a77" + source = "https://github.com/golang/sys.git" [[projects]] digest = "1:5acd3512b047305d49e8763eef7ba423901e85d5dd2fd1e71778a0ea8de10bd4" @@ -1307,6 +1312,7 @@ pruneopts = "" revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0" version = "v0.3.0" + source = "https://github.com/golang/text.git" [[projects]] branch = "master" diff --git a/appveyor.yml b/appveyor.yml index 15cdd5664..39ec04425 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -2,6 +2,7 @@ version: "{build}" cache: - C:\Cache + - C:\gopath\pkg\dep\sources -> Gopkg.lock clone_folder: C:\gopath\src\github.com\influxdata\telegraf From 3b1ab6f3624a10c6a21a05fa877078023b58fff8 Mon Sep 17 00:00:00 2001 From: Greg <2653109+glinton@users.noreply.github.com> Date: Tue, 19 Mar 2019 14:39:42 -0600 Subject: [PATCH 0702/1815] Don't add empty healthcheck tags to consul input (#5575) --- plugins/inputs/consul/consul.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/plugins/inputs/consul/consul.go b/plugins/inputs/consul/consul.go index 4662b54b0..4b5ee4b1c 100644 --- a/plugins/inputs/consul/consul.go +++ b/plugins/inputs/consul/consul.go @@ -5,6 +5,7 @@ import ( "strings" "github.com/hashicorp/consul/api" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal/tls" "github.com/influxdata/telegraf/plugins/inputs" @@ -126,12 +127,12 @@ func (c *Consul) GatherHealthCheck(acc telegraf.Accumulator, checks []*api.Healt for _, checkTag := range check.ServiceTags { if c.TagDelimiter != "" { splittedTag := strings.SplitN(checkTag, c.TagDelimiter, 2) - if len(splittedTag) == 1 { + if len(splittedTag) == 1 && checkTag != "" { tags[checkTag] = checkTag - } else if len(splittedTag) == 2 { + } else if len(splittedTag) == 2 && splittedTag[1] != "" { tags[splittedTag[0]] = splittedTag[1] } - } else { + } else if checkTag != "" { tags[checkTag] = checkTag } } From b74660163e5cf1d552d7067f04e6c7bad0125e96 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 19 Mar 2019 13:45:19 -0700 Subject: [PATCH 0703/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index d0e0005ad..cd35f06ce 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,6 +16,7 @@ - [#5492](https://github.com/influxdata/telegraf/issues/5492): Fix vsphere input shows failed task in vCenter. - [#5530](https://github.com/influxdata/telegraf/issues/5530): Fix invalid measurement name and skip column in csv parser. - [#5589](https://github.com/influxdata/telegraf/issues/5589): Fix system input causing high cpu usage on Raspbian. +- [#5575](https://github.com/influxdata/telegraf/issues/5575): Don't add empty healthcheck tags to consul input. ## v1.10 [2019-03-05] From 5d9b8297760dd02cd5410e320e934368e4d9a260 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 19 Mar 2019 13:46:26 -0700 Subject: [PATCH 0704/1815] Set Telegraf 1.10.1 release date --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index cd35f06ce..bd56d24a0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,7 +6,7 @@ - [#5569](https://github.com/influxdata/telegraf/pull/5569): Add hexadecimal string to integer conversion to converter processor. -## v1.10.1 [unreleased] +## v1.10.1 [2019-03-19] #### Bugfixes From 33ee309fd1949dab8bffe752210c2bdccce8f7f1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adri=C3=A1n=20L=C3=B3pez?= Date: Wed, 20 Mar 2019 18:36:51 +0100 Subject: [PATCH 0705/1815] Fix deadlock when Telegraf is aligning aggregators (#5612) --- agent/agent.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/agent/agent.go b/agent/agent.go index ec9aa7f32..338d418f7 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -378,7 +378,10 @@ func (a *Agent) runAggregators( for _, agg := range a.Config.Aggregators { wg.Add(1) go func(agg *models.RunningAggregator) { - defer wg.Done() + defer func() { + wg.Done() + close(aggregations) + }() if a.Config.Agent.RoundInterval { // Aggregators are aligned to the agent interval regardless of @@ -394,7 +397,6 @@ func (a *Agent) runAggregators( acc := NewAccumulator(agg, aggregations) acc.SetPrecision(precision, interval) a.push(ctx, agg, acc) - close(aggregations) }(agg) } From 51409c876860b342f0fc3a43557360313f731859 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 20 Mar 2019 11:38:20 -0700 Subject: [PATCH 0706/1815] Update changelog --- CHANGELOG.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index bd56d24a0..bbea3ac33 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,9 @@ - [#5556](https://github.com/influxdata/telegraf/pull/5556): Add TTL field to ping input. - [#5569](https://github.com/influxdata/telegraf/pull/5569): Add hexadecimal string to integer conversion to converter processor. +## v1.10.2 [unreleased] + +- [#5612](https://github.com/influxdata/telegraf/pull/5612): Fix deadlock when Telegraf is aligning aggregators. ## v1.10.1 [2019-03-19] From fa65a82ef36f0783f11bbad1f6cf2a2cbf7910a7 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 20 Mar 2019 12:53:55 -0700 Subject: [PATCH 0707/1815] Update changelog --- CHANGELOG.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index bbea3ac33..0a275a923 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,7 +7,10 @@ ## v1.10.2 [unreleased] +#### Bugfixes + - [#5612](https://github.com/influxdata/telegraf/pull/5612): Fix deadlock when Telegraf is aligning aggregators. +- [#5523](https://github.com/influxdata/telegraf/issues/5523): Fix missing cluster stats in ceph input. ## v1.10.1 [2019-03-19] From 205de66dd645dafc7193700154105d84c981b5d0 Mon Sep 17 00:00:00 2001 From: Soulou Date: Thu, 21 Mar 2019 12:00:20 +0100 Subject: [PATCH 0708/1815] [diskio] Force type of stat.Rdev uint64 for mipsle compatibility For most platforms, stat.Rdev is already a uint64 so this is without any effect for linux,mipsle, unix.Stat_t.Rdev is a uint32, but the way to compute major and minor doesn't change, casting the uint32 has no impact either --- plugins/inputs/diskio/diskio_linux.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/inputs/diskio/diskio_linux.go b/plugins/inputs/diskio/diskio_linux.go index 73eba361c..c727f485b 100644 --- a/plugins/inputs/diskio/diskio_linux.go +++ b/plugins/inputs/diskio/diskio_linux.go @@ -35,8 +35,8 @@ func (s *DiskIO) diskInfo(devName string) (map[string]string, error) { return ic.values, nil } - major := unix.Major(stat.Rdev) - minor := unix.Minor(stat.Rdev) + major := unix.Major(uint64(stat.Rdev)) + minor := unix.Minor(uint64(stat.Rdev)) udevDataPath := fmt.Sprintf("%s/b%d:%d", udevPath, major, minor) di := map[string]string{} From 99a390b8e62c863d42a32865e9d4fb63f4af536f Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 22 Mar 2019 13:59:30 -0700 Subject: [PATCH 0709/1815] Fix open error handling in file output (#5540) --- agent/agent.go | 11 +++-------- internal/models/running_output.go | 7 +++++++ plugins/outputs/file/file.go | 22 +++++++--------------- 3 files changed, 17 insertions(+), 23 deletions(-) diff --git a/agent/agent.go b/agent/agent.go index 338d418f7..d2bc69ad0 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -129,10 +129,7 @@ func (a *Agent) Run(ctx context.Context) error { wg.Wait() log.Printf("D! [agent] Closing outputs") - err = a.closeOutputs() - if err != nil { - return err - } + a.closeOutputs() log.Printf("D! [agent] Stopped Successfully") return nil @@ -589,12 +586,10 @@ func (a *Agent) connectOutputs(ctx context.Context) error { } // closeOutputs closes all outputs. -func (a *Agent) closeOutputs() error { - var err error +func (a *Agent) closeOutputs() { for _, output := range a.Config.Outputs { - err = output.Output.Close() + output.Close() } - return err } // startServiceInputs starts all service inputs. diff --git a/internal/models/running_output.go b/internal/models/running_output.go index 531a3065b..4cec18cc8 100644 --- a/internal/models/running_output.go +++ b/internal/models/running_output.go @@ -180,6 +180,13 @@ func (ro *RunningOutput) WriteBatch() error { return nil } +func (ro *RunningOutput) Close() { + err := ro.Output.Close() + if err != nil { + log.Printf("E! [outputs.%s] Error closing output: %v", ro.Name, err) + } +} + func (ro *RunningOutput) write(metrics []telegraf.Metric) error { start := time.Now() err := ro.Output.Write(metrics) diff --git a/plugins/outputs/file/file.go b/plugins/outputs/file/file.go index 0bbff2f64..0ef61df51 100644 --- a/plugins/outputs/file/file.go +++ b/plugins/outputs/file/file.go @@ -43,17 +43,11 @@ func (f *File) Connect() error { if file == "stdout" { f.writers = append(f.writers, os.Stdout) } else { - var of *os.File - var err error - if _, err := os.Stat(file); os.IsNotExist(err) { - of, err = os.Create(file) - } else { - of, err = os.OpenFile(file, os.O_APPEND|os.O_WRONLY, os.ModeAppend) - } - + of, err := os.OpenFile(file, os.O_CREATE|os.O_APPEND|os.O_WRONLY, os.ModeAppend|0644) if err != nil { return err } + f.writers = append(f.writers, of) f.closers = append(f.closers, of) } @@ -62,16 +56,14 @@ func (f *File) Connect() error { } func (f *File) Close() error { - var errS string + var err error for _, c := range f.closers { - if err := c.Close(); err != nil { - errS += err.Error() + "\n" + errClose := c.Close() + if errClose != nil { + err = errClose } } - if errS != "" { - return fmt.Errorf(errS) - } - return nil + return err } func (f *File) SampleConfig() string { From 68b8db4a64e9068f2c9ef008d689ded14ef8a697 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 22 Mar 2019 14:00:58 -0700 Subject: [PATCH 0710/1815] Update changelog --- CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0a275a923..933b6e9a7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,10 @@ - [#5556](https://github.com/influxdata/telegraf/pull/5556): Add TTL field to ping input. - [#5569](https://github.com/influxdata/telegraf/pull/5569): Add hexadecimal string to integer conversion to converter processor. +#### Bugfixes + +- [#5540](https://github.com/influxdata/telegraf/pull/5540): Fix open file error handling in file output. + ## v1.10.2 [unreleased] #### Bugfixes From 72d4f00082704933fb1f7570bf875bdd5ae46a27 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 22 Mar 2019 14:02:15 -0700 Subject: [PATCH 0711/1815] Remove string trimming from grok parser (#5608) --- plugins/parsers/grok/influx_patterns.go | 2 +- plugins/parsers/grok/parser.go | 2 +- plugins/parsers/grok/parser_test.go | 21 +++++++++++++++++++++ 3 files changed, 23 insertions(+), 2 deletions(-) diff --git a/plugins/parsers/grok/influx_patterns.go b/plugins/parsers/grok/influx_patterns.go index b7853c742..92b12731f 100644 --- a/plugins/parsers/grok/influx_patterns.go +++ b/plugins/parsers/grok/influx_patterns.go @@ -69,7 +69,7 @@ COMMON_LOG_FORMAT %{CLIENT:client_ip} %{NOTSPACE:ident} %{NOTSPACE:auth} \[%{HTT # Combined log format is the same as the common log format but with the addition # of two quoted strings at the end for "referrer" and "agent" # See Examples at http://httpd.apache.org/docs/current/mod/mod_log_config.html -COMBINED_LOG_FORMAT %{COMMON_LOG_FORMAT} %{QS:referrer} %{QS:agent} +COMBINED_LOG_FORMAT %{COMMON_LOG_FORMAT} "%{DATA:referrer}" "%{DATA:agent}" # HTTPD log formats HTTPD20_ERRORLOG \[%{HTTPDERROR_DATE:timestamp}\] \[%{LOGLEVEL:loglevel:tag}\] (?:\[client %{IPORHOST:clientip}\] ){0,1}%{GREEDYDATA:errormsg} diff --git a/plugins/parsers/grok/parser.go b/plugins/parsers/grok/parser.go index 5984e288e..cecb69f94 100644 --- a/plugins/parsers/grok/parser.go +++ b/plugins/parsers/grok/parser.go @@ -271,7 +271,7 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { case TAG: tags[k] = v case STRING: - fields[k] = strings.Trim(v, `"`) + fields[k] = v case EPOCH: parts := strings.SplitN(v, ".", 2) if len(parts) == 0 { diff --git a/plugins/parsers/grok/parser_test.go b/plugins/parsers/grok/parser_test.go index 23af0af44..2b8815264 100644 --- a/plugins/parsers/grok/parser_test.go +++ b/plugins/parsers/grok/parser_test.go @@ -1047,3 +1047,24 @@ func TestEmptyYearInTimestamp(t *testing.T) { require.NotNil(t, m) require.Equal(t, time.Now().Year(), m.Time().Year()) } + +func TestTrimRegression(t *testing.T) { + // https://github.com/influxdata/telegraf/issues/4998 + p := &Parser{ + Patterns: []string{`%{GREEDYDATA:message:string}`}, + } + require.NoError(t, p.Compile()) + + actual, err := p.ParseLine(`level=info msg="ok"`) + require.NoError(t, err) + + expected := testutil.MustMetric( + "", + map[string]string{}, + map[string]interface{}{ + "message": `level=info msg="ok"`, + }, + actual.Time(), + ) + require.Equal(t, expected, actual) +} From 417c5c1de894f935b62d10d4605374847d670c89 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 22 Mar 2019 14:02:45 -0700 Subject: [PATCH 0712/1815] Add owned directories to rpm package (#5607) --- scripts/build.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/build.py b/scripts/build.py index e7a402be5..5869bf1ed 100755 --- a/scripts/build.py +++ b/scripts/build.py @@ -647,7 +647,7 @@ def package(build_output, pkg_name, version, nightly=False, iteration=1, static= package_build_root, current_location) if package_type == "rpm": - fpm_command += "--depends coreutils --depends shadow-utils --rpm-posttrans {}".format(POSTINST_SCRIPT) + fpm_command += "--directories /var/log/telegraf --directories /etc/telegraf --depends coreutils --depends shadow-utils --rpm-posttrans {}".format(POSTINST_SCRIPT) out = run(fpm_command, shell=True) matches = re.search(':path=>"(.*)"', out) outfile = None From 135166323b57aa01c6fe34dbd0f51df693ee7787 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 22 Mar 2019 14:14:38 -0700 Subject: [PATCH 0713/1815] Update changelog --- CHANGELOG.md | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 933b6e9a7..fd751268e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,10 +11,19 @@ ## v1.10.2 [unreleased] +#### Release Notes + +- String fields no longer have leading and trailing quotating marks removed in + the grok parser. If you are capturing quoted strings you may need to update + the patterns. + #### Bugfixes - [#5612](https://github.com/influxdata/telegraf/pull/5612): Fix deadlock when Telegraf is aligning aggregators. - [#5523](https://github.com/influxdata/telegraf/issues/5523): Fix missing cluster stats in ceph input. +- [#5566](https://github.com/influxdata/telegraf/pull/5566): Fix reading major and minor block devices identifiers in diskio input. +- [#5607](https://github.com/influxdata/telegraf/pull/5607): Add owned directories to rpm package spec. +- [#5608](https://github.com/influxdata/telegraf/pull/5607): Fix last character removed from string field in grok parser. ## v1.10.1 [2019-03-19] From 888d847a053bdee85405d901d96fe04430e0d3c6 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 25 Mar 2019 12:09:04 -0700 Subject: [PATCH 0714/1815] Add link to vjeantet/grok to grok parser docs --- plugins/parsers/grok/README.md | 14 +++++---- plugins/parsers/grok/influx_patterns.go | 39 ------------------------- 2 files changed, 9 insertions(+), 44 deletions(-) diff --git a/plugins/parsers/grok/README.md b/plugins/parsers/grok/README.md index a694735de..c7f2325b3 100644 --- a/plugins/parsers/grok/README.md +++ b/plugins/parsers/grok/README.md @@ -59,11 +59,15 @@ To match a comma decimal point you can use a period. For example `%{TIMESTAMP:t To match a comma decimal point you can use a period in the pattern string. See https://golang.org/pkg/time/#Parse for more details. -Telegraf has many of its own [built-in patterns](/plugins/parsers/grok/influx_patterns.go), -as well as support for most of -[logstash's builtin patterns](https://github.com/logstash-plugins/logstash-patterns-core/blob/master/patterns/grok-patterns). -_Golang regular expressions do not support lookahead or lookbehind. -logstash patterns that depend on these are not supported._ +Telegraf has many of its own [built-in patterns][] as well as support for most +of the Logstash builtin patterns using [these Go compatible patterns][grok-patterns]. + +**Note** Golang regular expressions do not support lookahead or lookbehind. +Logstash patterns that use these features may not supported, or may use a Go +friendly pattern that does is not always completely compatible with Logstash. + +[built-in patterns]: /plugins/parsers/grok/influx_patterns.go +[grok-patterns]: https://github.com/vjeantet/grok/blob/master/patterns/grok-patterns If you need help building patterns to match your logs, you will find the https://grokdebug.herokuapp.com application quite useful! diff --git a/plugins/parsers/grok/influx_patterns.go b/plugins/parsers/grok/influx_patterns.go index 92b12731f..282c28111 100644 --- a/plugins/parsers/grok/influx_patterns.go +++ b/plugins/parsers/grok/influx_patterns.go @@ -1,45 +1,6 @@ package grok const DEFAULT_PATTERNS = ` -# Captures are a slightly modified version of logstash "grok" patterns, with -# the format %{[:][:]} -# By default all named captures are converted into string fields. -# If a pattern does not have a semantic name it will not be captured. -# Modifiers can be used to convert captures to other types or tags. -# Timestamp modifiers can be used to convert captures to the timestamp of the -# parsed metric. - -# View logstash grok pattern docs here: -# https://www.elastic.co/guide/en/logstash/current/plugins-filters-grok.html -# All default logstash patterns are supported, these can be viewed here: -# https://github.com/logstash-plugins/logstash-patterns-core/blob/master/patterns/grok-patterns - -# Available modifiers: -# string (default if nothing is specified) -# int -# float -# duration (ie, 5.23ms gets converted to int nanoseconds) -# tag (converts the field into a tag) -# drop (drops the field completely) -# Timestamp modifiers: -# ts-ansic ("Mon Jan _2 15:04:05 2006") -# ts-unix ("Mon Jan _2 15:04:05 MST 2006") -# ts-ruby ("Mon Jan 02 15:04:05 -0700 2006") -# ts-rfc822 ("02 Jan 06 15:04 MST") -# ts-rfc822z ("02 Jan 06 15:04 -0700") -# ts-rfc850 ("Monday, 02-Jan-06 15:04:05 MST") -# ts-rfc1123 ("Mon, 02 Jan 2006 15:04:05 MST") -# ts-rfc1123z ("Mon, 02 Jan 2006 15:04:05 -0700") -# ts-rfc3339 ("2006-01-02T15:04:05Z07:00") -# ts-rfc3339nano ("2006-01-02T15:04:05.999999999Z07:00") -# ts-httpd ("02/Jan/2006:15:04:05 -0700") -# ts-epoch (seconds since unix epoch) -# ts-epochnano (nanoseconds since unix epoch) -# ts-"CUSTOM" -# CUSTOM time layouts must be within quotes and be the representation of the -# "reference time", which is Mon Jan 2 15:04:05 -0700 MST 2006 -# See https://golang.org/pkg/time/#Parse for more details. - # Example log file pattern, example log looks like this: # [04/Jun/2016:12:41:45 +0100] 1.25 200 192.168.1.1 5.432µs # Breakdown of the DURATION pattern below: From e793a69533122bb9263d141b991c524985483c2a Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 25 Mar 2019 12:11:59 -0700 Subject: [PATCH 0715/1815] Fix grammar --- plugins/parsers/grok/README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/plugins/parsers/grok/README.md b/plugins/parsers/grok/README.md index c7f2325b3..6263eecc9 100644 --- a/plugins/parsers/grok/README.md +++ b/plugins/parsers/grok/README.md @@ -62,9 +62,9 @@ See https://golang.org/pkg/time/#Parse for more details. Telegraf has many of its own [built-in patterns][] as well as support for most of the Logstash builtin patterns using [these Go compatible patterns][grok-patterns]. -**Note** Golang regular expressions do not support lookahead or lookbehind. -Logstash patterns that use these features may not supported, or may use a Go -friendly pattern that does is not always completely compatible with Logstash. +**Note:** Golang regular expressions do not support lookahead or lookbehind. +Logstash patterns that use these features may not be supported, or may use a Go +friendly pattern that is not fully compatible with the Logstash pattern. [built-in patterns]: /plugins/parsers/grok/influx_patterns.go [grok-patterns]: https://github.com/vjeantet/grok/blob/master/patterns/grok-patterns From 60027cf902c39957ea8a7cc682388704ec04d0b2 Mon Sep 17 00:00:00 2001 From: scier Date: Mon, 25 Mar 2019 16:24:42 -0700 Subject: [PATCH 0716/1815] Add support for multiple line text and perfdata to nagios parser (#5601) --- plugins/inputs/exec/exec.go | 114 +++--- plugins/inputs/exec/exec_test.go | 87 ++++- plugins/parsers/nagios/parser.go | 148 ++++++- plugins/parsers/nagios/parser_test.go | 542 +++++++++++++++++++++----- 4 files changed, 708 insertions(+), 183 deletions(-) diff --git a/plugins/inputs/exec/exec.go b/plugins/inputs/exec/exec.go index 9cb86c3cd..615736b3c 100644 --- a/plugins/inputs/exec/exec.go +++ b/plugins/inputs/exec/exec.go @@ -3,12 +3,12 @@ package exec import ( "bytes" "fmt" + "log" "os/exec" "path/filepath" "runtime" "strings" "sync" - "syscall" "time" "github.com/kballard/go-shellquote" @@ -61,39 +61,18 @@ func NewExec() *Exec { } type Runner interface { - Run(*Exec, string, telegraf.Accumulator) ([]byte, error) + Run(string, time.Duration) ([]byte, []byte, error) } type CommandRunner struct{} -func AddNagiosState(exitCode error, acc telegraf.Accumulator) error { - nagiosState := 0 - if exitCode != nil { - exiterr, ok := exitCode.(*exec.ExitError) - if ok { - status, ok := exiterr.Sys().(syscall.WaitStatus) - if ok { - nagiosState = status.ExitStatus() - } else { - return fmt.Errorf("exec: unable to get nagios plugin exit code") - } - } else { - return fmt.Errorf("exec: unable to get nagios plugin exit code") - } - } - fields := map[string]interface{}{"state": nagiosState} - acc.AddFields("nagios_state", fields, nil) - return nil -} - func (c CommandRunner) Run( - e *Exec, command string, - acc telegraf.Accumulator, -) ([]byte, error) { + timeout time.Duration, +) ([]byte, []byte, error) { split_cmd, err := shellquote.Split(command) if err != nil || len(split_cmd) == 0 { - return nil, fmt.Errorf("exec: unable to parse command, %s", err) + return nil, nil, fmt.Errorf("exec: unable to parse command, %s", err) } cmd := exec.Command(split_cmd[0], split_cmd[1:]...) @@ -105,44 +84,35 @@ func (c CommandRunner) Run( cmd.Stdout = &out cmd.Stderr = &stderr - if err := internal.RunTimeout(cmd, e.Timeout.Duration); err != nil { - switch e.parser.(type) { - case *nagios.NagiosParser: - AddNagiosState(err, acc) - default: - var errMessage = "" - if stderr.Len() > 0 { - stderr = removeCarriageReturns(stderr) - // Limit the number of bytes. - didTruncate := false - if stderr.Len() > MaxStderrBytes { - stderr.Truncate(MaxStderrBytes) - didTruncate = true - } - if i := bytes.IndexByte(stderr.Bytes(), '\n'); i > 0 { - // Only show truncation if the newline wasn't the last character. - if i < stderr.Len()-1 { - didTruncate = true - } - stderr.Truncate(i) - } - if didTruncate { - stderr.WriteString("...") - } - - errMessage = fmt.Sprintf(": %s", stderr.String()) - } - return nil, fmt.Errorf("exec: %s for command '%s'%s", err, command, errMessage) - } - } else { - switch e.parser.(type) { - case *nagios.NagiosParser: - AddNagiosState(nil, acc) - } - } + runErr := internal.RunTimeout(cmd, timeout) out = removeCarriageReturns(out) - return out.Bytes(), nil + if stderr.Len() > 0 { + stderr = removeCarriageReturns(stderr) + stderr = truncate(stderr) + } + + return out.Bytes(), stderr.Bytes(), runErr +} + +func truncate(buf bytes.Buffer) bytes.Buffer { + // Limit the number of bytes. + didTruncate := false + if buf.Len() > MaxStderrBytes { + buf.Truncate(MaxStderrBytes) + didTruncate = true + } + if i := bytes.IndexByte(buf.Bytes(), '\n'); i > 0 { + // Only show truncation if the newline wasn't the last character. + if i < buf.Len()-1 { + didTruncate = true + } + buf.Truncate(i) + } + if didTruncate { + buf.WriteString("...") + } + return buf } // removeCarriageReturns removes all carriage returns from the input if the @@ -173,9 +143,11 @@ func removeCarriageReturns(b bytes.Buffer) bytes.Buffer { func (e *Exec) ProcessCommand(command string, acc telegraf.Accumulator, wg *sync.WaitGroup) { defer wg.Done() + _, isNagios := e.parser.(*nagios.NagiosParser) - out, err := e.runner.Run(e, command, acc) - if err != nil { + out, errbuf, runErr := e.runner.Run(command, e.Timeout.Duration) + if !isNagios && runErr != nil { + err := fmt.Errorf("exec: %s for command '%s': %s", runErr, command, string(errbuf)) acc.AddError(err) return } @@ -183,11 +155,19 @@ func (e *Exec) ProcessCommand(command string, acc telegraf.Accumulator, wg *sync metrics, err := e.parser.Parse(out) if err != nil { acc.AddError(err) - } else { - for _, metric := range metrics { - acc.AddFields(metric.Name(), metric.Fields(), metric.Tags(), metric.Time()) + return + } + + if isNagios { + metrics, err = nagios.TryAddState(runErr, metrics) + if err != nil { + log.Printf("E! [inputs.exec] failed to add nagios state: %s", err) } } + + for _, m := range metrics { + acc.AddMetric(m) + } } func (e *Exec) SampleConfig() string { diff --git a/plugins/inputs/exec/exec_test.go b/plugins/inputs/exec/exec_test.go index 0bfeece54..5aaef8961 100644 --- a/plugins/inputs/exec/exec_test.go +++ b/plugins/inputs/exec/exec_test.go @@ -5,8 +5,8 @@ import ( "fmt" "runtime" "testing" + "time" - "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/testutil" @@ -74,22 +74,21 @@ var crTests = []CarriageReturnTest{ } type runnerMock struct { - out []byte - err error + out []byte + errout []byte + err error } -func newRunnerMock(out []byte, err error) Runner { +func newRunnerMock(out []byte, errout []byte, err error) Runner { return &runnerMock{ - out: out, - err: err, + out: out, + errout: errout, + err: err, } } -func (r runnerMock) Run(e *Exec, command string, acc telegraf.Accumulator) ([]byte, error) { - if r.err != nil { - return nil, r.err - } - return r.out, nil +func (r runnerMock) Run(command string, _ time.Duration) ([]byte, []byte, error) { + return r.out, r.errout, r.err } func TestExec(t *testing.T) { @@ -98,7 +97,7 @@ func TestExec(t *testing.T) { MetricName: "exec", }) e := &Exec{ - runner: newRunnerMock([]byte(validJson), nil), + runner: newRunnerMock([]byte(validJson), nil, nil), Commands: []string{"testcommand arg1"}, parser: parser, } @@ -127,7 +126,7 @@ func TestExecMalformed(t *testing.T) { MetricName: "exec", }) e := &Exec{ - runner: newRunnerMock([]byte(malformedJson), nil), + runner: newRunnerMock([]byte(malformedJson), nil, nil), Commands: []string{"badcommand arg1"}, parser: parser, } @@ -143,7 +142,7 @@ func TestCommandError(t *testing.T) { MetricName: "exec", }) e := &Exec{ - runner: newRunnerMock(nil, fmt.Errorf("exit status code 1")), + runner: newRunnerMock(nil, nil, fmt.Errorf("exit status code 1")), Commands: []string{"badcommand"}, parser: parser, } @@ -201,6 +200,66 @@ func TestExecCommandWithoutGlobAndPath(t *testing.T) { acc.AssertContainsFields(t, "metric", fields) } +func TestTruncate(t *testing.T) { + tests := []struct { + name string + bufF func() *bytes.Buffer + expF func() *bytes.Buffer + }{ + { + name: "should not truncate", + bufF: func() *bytes.Buffer { + var b bytes.Buffer + b.WriteString("hello world") + return &b + }, + expF: func() *bytes.Buffer { + var b bytes.Buffer + b.WriteString("hello world") + return &b + }, + }, + { + name: "should truncate up to the new line", + bufF: func() *bytes.Buffer { + var b bytes.Buffer + b.WriteString("hello world\nand all the people") + return &b + }, + expF: func() *bytes.Buffer { + var b bytes.Buffer + b.WriteString("hello world...") + return &b + }, + }, + { + name: "should truncate to the MaxStderrBytes", + bufF: func() *bytes.Buffer { + var b bytes.Buffer + for i := 0; i < 2*MaxStderrBytes; i++ { + b.WriteByte('b') + } + return &b + }, + expF: func() *bytes.Buffer { + var b bytes.Buffer + for i := 0; i < MaxStderrBytes; i++ { + b.WriteByte('b') + } + b.WriteString("...") + return &b + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + res := truncate(*tt.bufF()) + require.Equal(t, tt.expF().Bytes(), res.Bytes()) + }) + } +} + func TestRemoveCarriageReturns(t *testing.T) { if runtime.GOOS == "windows" { // Test that all carriage returns are removed diff --git a/plugins/parsers/nagios/parser.go b/plugins/parsers/nagios/parser.go index 858f5082c..e4058852b 100644 --- a/plugins/parsers/nagios/parser.go +++ b/plugins/parsers/nagios/parser.go @@ -1,17 +1,78 @@ package nagios import ( + "bufio" + "bytes" "errors" + "fmt" "log" + "os/exec" "regexp" "strconv" "strings" + "syscall" "time" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" ) +// getExitCode get the exit code from an error value which is the result +// of running a command through exec package api. +func getExitCode(err error) (int, error) { + if err == nil { + return 0, nil + } + + ee, ok := err.(*exec.ExitError) + if !ok { + // If it is not an *exec.ExitError, then it must be + // an io error, but docs do not say anything about the + // exit code in this case. + return 0, errors.New("expected *exec.ExitError") + } + + ws, ok := ee.Sys().(syscall.WaitStatus) + if !ok { + return 0, errors.New("expected syscall.WaitStatus") + } + + return ws.ExitStatus(), nil +} + +// TryAddState attempts to add a state derived from the runErr. +// If any error occurs, it is guaranteed to be returned along with +// the initial metric slice. +func TryAddState(runErr error, metrics []telegraf.Metric) ([]telegraf.Metric, error) { + state, err := getExitCode(runErr) + if err != nil { + return metrics, fmt.Errorf("exec: get exit code: %s", err) + } + + for _, m := range metrics { + if m.Name() == "nagios_state" { + m.AddField("state", state) + return metrics, nil + } + } + + var ts time.Time + if len(metrics) != 0 { + ts = metrics[0].Time() + } else { + ts = time.Now().UTC() + } + f := map[string]interface{}{ + "state": state, + } + m, err := metric.New("nagios_state", nil, f, ts) + if err != nil { + return metrics, err + } + metrics = append(metrics, m) + return metrics, nil +} + type NagiosParser struct { MetricName string DefaultTags map[string]string @@ -34,27 +95,88 @@ func (p *NagiosParser) SetDefaultTags(tags map[string]string) { } func (p *NagiosParser) Parse(buf []byte) ([]telegraf.Metric, error) { + ts := time.Now().UTC() + + s := bufio.NewScanner(bytes.NewReader(buf)) + + var msg bytes.Buffer + var longmsg bytes.Buffer + metrics := make([]telegraf.Metric, 0) - lines := strings.Split(strings.TrimSpace(string(buf)), "\n") - for _, line := range lines { - data_splitted := strings.Split(line, "|") - - if len(data_splitted) != 2 { - // got human readable output only or bad line - continue - } - m, err := parsePerfData(data_splitted[1]) + // Scan the first line. + if !s.Scan() && s.Err() != nil { + return nil, s.Err() + } + parts := bytes.Split(s.Bytes(), []byte{'|'}) + switch len(parts) { + case 2: + ms, err := parsePerfData(string(parts[1]), ts) if err != nil { log.Printf("E! [parser.nagios] failed to parse performance data: %s\n", err.Error()) - continue } - metrics = append(metrics, m...) + metrics = append(metrics, ms...) + fallthrough + case 1: + msg.Write(bytes.TrimSpace(parts[0])) + default: + return nil, errors.New("illegal output format") } + + // Read long output. + for s.Scan() { + if bytes.Contains(s.Bytes(), []byte{'|'}) { + parts := bytes.Split(s.Bytes(), []byte{'|'}) + if longmsg.Len() != 0 { + longmsg.WriteByte('\n') + } + longmsg.Write(bytes.TrimSpace(parts[0])) + + ms, err := parsePerfData(string(parts[1]), ts) + if err != nil { + log.Printf("E! [parser.nagios] failed to parse performance data: %s\n", err.Error()) + } + metrics = append(metrics, ms...) + break + } + if longmsg.Len() != 0 { + longmsg.WriteByte('\n') + } + longmsg.Write(bytes.TrimSpace((s.Bytes()))) + } + + // Parse extra performance data. + for s.Scan() { + ms, err := parsePerfData(s.Text(), ts) + if err != nil { + log.Printf("E! [parser.nagios] failed to parse performance data: %s\n", err.Error()) + } + metrics = append(metrics, ms...) + } + + if s.Err() != nil { + log.Printf("D! [parser.nagios] unexpected io error: %s\n", s.Err()) + } + + // Create nagios state. + fields := map[string]interface{}{ + "service_output": msg.String(), + } + if longmsg.Len() != 0 { + fields["long_service_output"] = longmsg.String() + } + + m, err := metric.New("nagios_state", nil, fields, ts) + if err == nil { + metrics = append(metrics, m) + } else { + log.Printf("E! [parser.nagios] failed to add nagios_state: %s\n", err) + } + return metrics, nil } -func parsePerfData(perfdatas string) ([]telegraf.Metric, error) { +func parsePerfData(perfdatas string, timestamp time.Time) ([]telegraf.Metric, error) { metrics := make([]telegraf.Metric, 0) for _, unParsedPerf := range perfSplitRegExp.FindAllString(perfdatas, -1) { @@ -125,7 +247,7 @@ func parsePerfData(perfdatas string) ([]telegraf.Metric, error) { } // Create metric - metric, err := metric.New("nagios", tags, fields, time.Now().UTC()) + metric, err := metric.New("nagios", tags, fields, timestamp) if err != nil { return nil, err } diff --git a/plugins/parsers/nagios/parser_test.go b/plugins/parsers/nagios/parser_test.go index a4da30030..7f5b5937e 100644 --- a/plugins/parsers/nagios/parser_test.go +++ b/plugins/parsers/nagios/parser_test.go @@ -1,112 +1,476 @@ package nagios import ( + "errors" "testing" + "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" + "github.com/influxdata/telegraf/testutil" ) -const validOutput1 = `PING OK - Packet loss = 0%, RTA = 0.30 ms|rta=0.298000ms;4000.000000;6000.000000;0.000000 pl=0%;80;90;0;100 -This is a long output -with three lines -` -const validOutput2 = "TCP OK - 0.008 second response time on port 80|time=0.008457s;;;0.000000;10.000000" -const validOutput3 = "TCP OK - 0.008 second response time on port 80|time=0.008457" -const validOutput4 = "OK: Load average: 0.00, 0.01, 0.05 | 'load1'=0.00;~:4;@0:6;0; 'load5'=0.01;3;0:5;0; 'load15'=0.05;0:2;0:4;0;" -const invalidOutput3 = "PING OK - Packet loss = 0%, RTA = 0.30 ms" -const invalidOutput4 = "PING OK - Packet loss = 0%, RTA = 0.30 ms| =3;;;; dgasdg =;;;; sff=;;;;" - -func TestParseValidOutput(t *testing.T) { - parser := NagiosParser{ - MetricName: "nagios_test", +func TestGetExitCode(t *testing.T) { + tests := []struct { + name string + errF func() error + expCode int + expErr error + }{ + { + name: "nil error passed is ok", + errF: func() error { + return nil + }, + expCode: 0, + expErr: nil, + }, + { + name: "unexpected error type", + errF: func() error { + return errors.New("I am not *exec.ExitError") + }, + expCode: 0, + expErr: errors.New("expected *exec.ExitError"), + }, } - // Output1 - metrics, err := parser.Parse([]byte(validOutput1)) - require.NoError(t, err) - require.Len(t, metrics, 2) - // rta - assert.Equal(t, "rta", metrics[0].Tags()["perfdata"]) - assert.Equal(t, map[string]interface{}{ - "value": float64(0.298), - "warning_lt": float64(0), - "warning_gt": float64(4000), - "critical_lt": float64(0), - "critical_gt": float64(6000), - "min": float64(0), - }, metrics[0].Fields()) - assert.Equal(t, map[string]string{"unit": "ms", "perfdata": "rta"}, metrics[0].Tags()) - // pl - assert.Equal(t, "pl", metrics[1].Tags()["perfdata"]) - assert.Equal(t, map[string]interface{}{ - "value": float64(0), - "warning_lt": float64(0), - "warning_gt": float64(80), - "critical_lt": float64(0), - "critical_gt": float64(90), - "min": float64(0), - "max": float64(100), - }, metrics[1].Fields()) - assert.Equal(t, map[string]string{"unit": "%", "perfdata": "pl"}, metrics[1].Tags()) + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + e := tt.errF() + code, err := getExitCode(e) - // Output2 - metrics, err = parser.Parse([]byte(validOutput2)) - require.NoError(t, err) - require.Len(t, metrics, 1) - // time - assert.Equal(t, "time", metrics[0].Tags()["perfdata"]) - assert.Equal(t, map[string]interface{}{ - "value": float64(0.008457), - "min": float64(0), - "max": float64(10), - }, metrics[0].Fields()) - assert.Equal(t, map[string]string{"unit": "s", "perfdata": "time"}, metrics[0].Tags()) - - // Output3 - metrics, err = parser.Parse([]byte(validOutput3)) - require.NoError(t, err) - require.Len(t, metrics, 1) - // time - assert.Equal(t, "time", metrics[0].Tags()["perfdata"]) - assert.Equal(t, map[string]interface{}{ - "value": float64(0.008457), - }, metrics[0].Fields()) - assert.Equal(t, map[string]string{"perfdata": "time"}, metrics[0].Tags()) - - // Output4 - metrics, err = parser.Parse([]byte(validOutput4)) - require.NoError(t, err) - require.Len(t, metrics, 3) - // load - // const validOutput4 = "OK: Load average: 0.00, 0.01, 0.05 | 'load1'=0.00;0:4;0:6;0; 'load5'=0.01;0:3;0:5;0; 'load15'=0.05;0:2;0:4;0;" - assert.Equal(t, map[string]interface{}{ - "value": float64(0.00), - "warning_lt": MinFloat64, - "warning_gt": float64(4), - "critical_le": float64(0), - "critical_ge": float64(6), - "min": float64(0), - }, metrics[0].Fields()) - - assert.Equal(t, map[string]string{"perfdata": "load1"}, metrics[0].Tags()) + require.Equal(t, tt.expCode, code) + require.Equal(t, tt.expErr, err) + }) + } } -func TestParseInvalidOutput(t *testing.T) { +type metricBuilder struct { + name string + tags map[string]string + fields map[string]interface{} + timestamp time.Time +} + +func mb() *metricBuilder { + return &metricBuilder{} +} + +func (b *metricBuilder) n(v string) *metricBuilder { + b.name = v + return b +} + +func (b *metricBuilder) t(k, v string) *metricBuilder { + if b.tags == nil { + b.tags = make(map[string]string) + } + b.tags[k] = v + return b +} + +func (b *metricBuilder) f(k string, v interface{}) *metricBuilder { + if b.fields == nil { + b.fields = make(map[string]interface{}) + } + b.fields[k] = v + return b +} + +func (b *metricBuilder) ts(v time.Time) *metricBuilder { + b.timestamp = v + return b +} + +func (b *metricBuilder) b() telegraf.Metric { + m, err := metric.New(b.name, b.tags, b.fields, b.timestamp) + if err != nil { + panic(err) + } + return m +} + +// assertEqual asserts two slices to be equal. Note, that the order +// of the entries matters. +func assertEqual(t *testing.T, exp, actual []telegraf.Metric) { + require.Equal(t, len(exp), len(actual)) + for i := 0; i < len(exp); i++ { + ok := testutil.MetricEqual(exp[i], actual[i]) + require.True(t, ok) + } +} + +func TestTryAddState(t *testing.T) { + tests := []struct { + name string + runErrF func() error + metrics []telegraf.Metric + assertF func(*testing.T, []telegraf.Metric, error) + }{ + { + name: "should append state=0 field to existing metric", + runErrF: func() error { + return nil + }, + metrics: []telegraf.Metric{ + mb(). + n("nagios"). + f("perfdata", 0).b(), + mb(). + n("nagios_state"). + f("service_output", "OK: system working").b(), + }, + assertF: func(t *testing.T, metrics []telegraf.Metric, err error) { + exp := []telegraf.Metric{ + mb(). + n("nagios"). + f("perfdata", 0).b(), + mb(). + n("nagios_state"). + f("service_output", "OK: system working"). + f("state", 0).b(), + } + assertEqual(t, exp, metrics) + require.NoError(t, err) + }, + }, + { + name: "should create 'nagios_state state=0' and same timestamp as others", + runErrF: func() error { + return nil + }, + metrics: []telegraf.Metric{ + mb(). + n("nagios"). + f("perfdata", 0).b(), + }, + assertF: func(t *testing.T, metrics []telegraf.Metric, err error) { + exp := []telegraf.Metric{ + mb(). + n("nagios"). + f("perfdata", 0).b(), + mb(). + n("nagios_state"). + f("state", 0).b(), + } + assertEqual(t, exp, metrics) + require.NoError(t, err) + }, + }, + { + name: "should create 'nagios_state state=0' and recent timestamp", + runErrF: func() error { + return nil + }, + metrics: []telegraf.Metric{}, + assertF: func(t *testing.T, metrics []telegraf.Metric, err error) { + require.Len(t, metrics, 1) + m := metrics[0] + require.Equal(t, "nagios_state", m.Name()) + s, ok := m.GetField("state") + require.True(t, ok) + require.Equal(t, int64(0), s) + require.WithinDuration(t, time.Now().UTC(), m.Time(), 10*time.Second) + require.NoError(t, err) + }, + }, + { + name: "should return original metrics and an error", + runErrF: func() error { + return errors.New("non parsable error") + }, + metrics: []telegraf.Metric{ + mb(). + n("nagios"). + f("perfdata", 0).b(), + }, + assertF: func(t *testing.T, metrics []telegraf.Metric, err error) { + exp := []telegraf.Metric{ + mb(). + n("nagios"). + f("perfdata", 0).b(), + } + expErr := "exec: get exit code: expected *exec.ExitError" + + assertEqual(t, exp, metrics) + require.Equal(t, expErr, err.Error()) + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + metrics, err := TryAddState(tt.runErrF(), tt.metrics) + tt.assertF(t, metrics, err) + }) + } +} + +func assertNagiosState(t *testing.T, m telegraf.Metric, f map[string]interface{}) { + assert.Equal(t, map[string]string{}, m.Tags()) + assert.Equal(t, f, m.Fields()) +} + +func TestParse(t *testing.T) { parser := NagiosParser{ MetricName: "nagios_test", } - // invalidOutput3 - metrics, err := parser.Parse([]byte(invalidOutput3)) - require.NoError(t, err) - require.Len(t, metrics, 0) + tests := []struct { + name string + input string + assertF func(*testing.T, []telegraf.Metric, error) + }{ + { + name: "valid output 1", + input: `PING OK - Packet loss = 0%, RTA = 0.30 ms|rta=0.298000ms;4000.000000;6000.000000;0.000000 pl=0%;80;90;0;100 +This is a long output +with three lines +`, + assertF: func(t *testing.T, metrics []telegraf.Metric, err error) { + require.NoError(t, err) + require.Len(t, metrics, 3) + // rta + assert.Equal(t, map[string]string{ + "unit": "ms", + "perfdata": "rta", + }, metrics[0].Tags()) + assert.Equal(t, map[string]interface{}{ + "value": float64(0.298), + "warning_lt": float64(0), + "warning_gt": float64(4000), + "critical_lt": float64(0), + "critical_gt": float64(6000), + "min": float64(0), + }, metrics[0].Fields()) - // invalidOutput4 - metrics, err = parser.Parse([]byte(invalidOutput4)) - require.NoError(t, err) - require.Len(t, metrics, 0) + // pl + assert.Equal(t, map[string]string{ + "unit": "%", + "perfdata": "pl", + }, metrics[1].Tags()) + assert.Equal(t, map[string]interface{}{ + "value": float64(0), + "warning_lt": float64(0), + "warning_gt": float64(80), + "critical_lt": float64(0), + "critical_gt": float64(90), + "min": float64(0), + "max": float64(100), + }, metrics[1].Fields()) + assertNagiosState(t, metrics[2], map[string]interface{}{ + "service_output": "PING OK - Packet loss = 0%, RTA = 0.30 ms", + "long_service_output": "This is a long output\nwith three lines", + }) + }, + }, + { + name: "valid output 2", + input: "TCP OK - 0.008 second response time on port 80|time=0.008457s;;;0.000000;10.000000", + assertF: func(t *testing.T, metrics []telegraf.Metric, err error) { + require.NoError(t, err) + require.Len(t, metrics, 2) + // time + assert.Equal(t, map[string]string{ + "unit": "s", + "perfdata": "time", + }, metrics[0].Tags()) + assert.Equal(t, map[string]interface{}{ + "value": float64(0.008457), + "min": float64(0), + "max": float64(10), + }, metrics[0].Fields()) + + assertNagiosState(t, metrics[1], map[string]interface{}{ + "service_output": "TCP OK - 0.008 second response time on port 80", + }) + }, + }, + { + name: "valid output 3", + input: "TCP OK - 0.008 second response time on port 80|time=0.008457", + assertF: func(t *testing.T, metrics []telegraf.Metric, err error) { + require.NoError(t, err) + require.Len(t, metrics, 2) + // time + assert.Equal(t, map[string]string{ + "perfdata": "time", + }, metrics[0].Tags()) + assert.Equal(t, map[string]interface{}{ + "value": float64(0.008457), + }, metrics[0].Fields()) + + assertNagiosState(t, metrics[1], map[string]interface{}{ + "service_output": "TCP OK - 0.008 second response time on port 80", + }) + }, + }, + { + name: "valid output 4", + input: "OK: Load average: 0.00, 0.01, 0.05 | 'load1'=0.00;~:4;@0:6;0; 'load5'=0.01;3;0:5;0; 'load15'=0.05;0:2;0:4;0;", + assertF: func(t *testing.T, metrics []telegraf.Metric, err error) { + require.NoError(t, err) + require.Len(t, metrics, 4) + // load1 + assert.Equal(t, map[string]string{ + "perfdata": "load1", + }, metrics[0].Tags()) + assert.Equal(t, map[string]interface{}{ + "value": float64(0.00), + "warning_lt": MinFloat64, + "warning_gt": float64(4), + "critical_le": float64(0), + "critical_ge": float64(6), + "min": float64(0), + }, metrics[0].Fields()) + + // load5 + assert.Equal(t, map[string]string{ + "perfdata": "load5", + }, metrics[1].Tags()) + assert.Equal(t, map[string]interface{}{ + "value": float64(0.01), + "warning_gt": float64(3), + "warning_lt": float64(0), + "critical_lt": float64(0), + "critical_gt": float64(5), + "min": float64(0), + }, metrics[1].Fields()) + + // load15 + assert.Equal(t, map[string]string{ + "perfdata": "load15", + }, metrics[2].Tags()) + assert.Equal(t, map[string]interface{}{ + "value": float64(0.05), + "warning_lt": float64(0), + "warning_gt": float64(2), + "critical_lt": float64(0), + "critical_gt": float64(4), + "min": float64(0), + }, metrics[2].Fields()) + + assertNagiosState(t, metrics[3], map[string]interface{}{ + "service_output": "OK: Load average: 0.00, 0.01, 0.05", + }) + }, + }, + { + name: "no perf data", + input: "PING OK - Packet loss = 0%, RTA = 0.30 ms", + assertF: func(t *testing.T, metrics []telegraf.Metric, err error) { + require.NoError(t, err) + require.Len(t, metrics, 1) + + assertNagiosState(t, metrics[0], map[string]interface{}{ + "service_output": "PING OK - Packet loss = 0%, RTA = 0.30 ms", + }) + }, + }, + { + name: "malformed perf data", + input: "PING OK - Packet loss = 0%, RTA = 0.30 ms| =3;;;; dgasdg =;;;; sff=;;;;", + assertF: func(t *testing.T, metrics []telegraf.Metric, err error) { + require.NoError(t, err) + require.Len(t, metrics, 1) + + assertNagiosState(t, metrics[0], map[string]interface{}{ + "service_output": "PING OK - Packet loss = 0%, RTA = 0.30 ms", + }) + }, + }, + { + name: "from https://assets.nagios.com/downloads/nagioscore/docs/nagioscore/3/en/pluginapi.html", + input: `DISK OK - free space: / 3326 MB (56%); | /=2643MB;5948;5958;0;5968 +/ 15272 MB (77%); +/boot 68 MB (69%); +/home 69357 MB (27%); +/var/log 819 MB (84%); | /boot=68MB;88;93;0;98 +/home=69357MB;253404;253409;0;253414 +/var/log=818MB;970;975;0;980 +`, + assertF: func(t *testing.T, metrics []telegraf.Metric, err error) { + require.NoError(t, err) + require.Len(t, metrics, 5) + // /=2643MB;5948;5958;0;5968 + assert.Equal(t, map[string]string{ + "unit": "MB", + "perfdata": "/", + }, metrics[0].Tags()) + assert.Equal(t, map[string]interface{}{ + "value": float64(2643), + "warning_lt": float64(0), + "warning_gt": float64(5948), + "critical_lt": float64(0), + "critical_gt": float64(5958), + "min": float64(0), + "max": float64(5968), + }, metrics[0].Fields()) + + // /boot=68MB;88;93;0;98 + assert.Equal(t, map[string]string{ + "unit": "MB", + "perfdata": "/boot", + }, metrics[1].Tags()) + assert.Equal(t, map[string]interface{}{ + "value": float64(68), + "warning_lt": float64(0), + "warning_gt": float64(88), + "critical_lt": float64(0), + "critical_gt": float64(93), + "min": float64(0), + "max": float64(98), + }, metrics[1].Fields()) + + // /home=69357MB;253404;253409;0;253414 + assert.Equal(t, map[string]string{ + "unit": "MB", + "perfdata": "/home", + }, metrics[2].Tags()) + assert.Equal(t, map[string]interface{}{ + "value": float64(69357), + "warning_lt": float64(0), + "warning_gt": float64(253404), + "critical_lt": float64(0), + "critical_gt": float64(253409), + "min": float64(0), + "max": float64(253414), + }, metrics[2].Fields()) + + // /var/log=818MB;970;975;0;980 + assert.Equal(t, map[string]string{ + "unit": "MB", + "perfdata": "/var/log", + }, metrics[3].Tags()) + assert.Equal(t, map[string]interface{}{ + "value": float64(818), + "warning_lt": float64(0), + "warning_gt": float64(970), + "critical_lt": float64(0), + "critical_gt": float64(975), + "min": float64(0), + "max": float64(980), + }, metrics[3].Fields()) + + assertNagiosState(t, metrics[4], map[string]interface{}{ + "service_output": "DISK OK - free space: / 3326 MB (56%);", + "long_service_output": "/ 15272 MB (77%);\n/boot 68 MB (69%);\n/home 69357 MB (27%);\n/var/log 819 MB (84%);", + }) + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + metrics, err := parser.Parse([]byte(tt.input)) + tt.assertF(t, metrics, err) + }) + } } func TestParseThreshold(t *testing.T) { From 0f21373439115ba0c34b8f470672184f035b900d Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 25 Mar 2019 16:26:03 -0700 Subject: [PATCH 0717/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index fd751268e..1a2f55483 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,7 @@ - [#5556](https://github.com/influxdata/telegraf/pull/5556): Add TTL field to ping input. - [#5569](https://github.com/influxdata/telegraf/pull/5569): Add hexadecimal string to integer conversion to converter processor. +- [#5601](https://github.com/influxdata/telegraf/pull/5601): Add support for multiple line text and perfdata to nagios parser. #### Bugfixes From 5e6374c19b8d9c44258e444ad04e7f4670f8e8b9 Mon Sep 17 00:00:00 2001 From: John Hu Date: Wed, 27 Mar 2019 09:01:50 +0800 Subject: [PATCH 0718/1815] Fix drop tracking of aggregator drop_original metrics (#5632) --- agent/agent.go | 2 ++ docker-compose.yml | 4 ++-- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/agent/agent.go b/agent/agent.go index d2bc69ad0..d83748811 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -364,6 +364,8 @@ func (a *Agent) runAggregators( if !dropOriginal { dst <- metric + } else { + metric.Drop() } } cancel() diff --git a/docker-compose.yml b/docker-compose.yml index 5ac47089d..ca5fa3836 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -17,7 +17,7 @@ services: - KAFKA_ADVERTISED_HOST_NAME=localhost - KAFKA_ADVERTISED_PORT=9092 - KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181 - - KAFKA_CREATE_TOPICS="test:1:1" + - KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR=1 - JAVA_OPTS="-Xms256m -Xmx256m" ports: - "9092:9092" @@ -38,7 +38,7 @@ services: - "3306:3306" memcached: image: memcached - ports: + ports: - "11211:11211" pgbouncer: image: mbed/pgbouncer From 3bb1548414ff92cd0cf781fbc984c58714606ada Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 26 Mar 2019 18:09:35 -0700 Subject: [PATCH 0719/1815] Fix plugin name in influxdb_v2 output logging (#5627) --- plugins/outputs/influxdb_v2/influxdb.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/outputs/influxdb_v2/influxdb.go b/plugins/outputs/influxdb_v2/influxdb.go index d0d6800a6..dca02b0cb 100644 --- a/plugins/outputs/influxdb_v2/influxdb.go +++ b/plugins/outputs/influxdb_v2/influxdb.go @@ -162,7 +162,7 @@ func (i *InfluxDB) Write(metrics []telegraf.Metric) error { return nil } - log.Printf("E! [outputs.influxdb] when writing to [%s]: %v", client.URL(), err) + log.Printf("E! [outputs.influxdb_v2] when writing to [%s]: %v", client.URL(), err) } return errors.New("could not write any address") From 22ab6492619a9d277dc5e7d3f4508f202745e6bc Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 26 Mar 2019 18:11:56 -0700 Subject: [PATCH 0720/1815] Listen before leaving start in statsd (#5628) --- plugins/inputs/statsd/statsd.go | 77 ++++++++++++++++++++------------- 1 file changed, 46 insertions(+), 31 deletions(-) diff --git a/plugins/inputs/statsd/statsd.go b/plugins/inputs/statsd/statsd.go index 6b0dd0b78..8b5e15502 100644 --- a/plugins/inputs/statsd/statsd.go +++ b/plugins/inputs/statsd/statsd.go @@ -13,11 +13,10 @@ import ( "sync" "time" - "github.com/influxdata/telegraf/plugins/parsers/graphite" - "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/parsers/graphite" "github.com/influxdata/telegraf/selfstat" ) @@ -338,38 +337,64 @@ func (s *Statsd) Start(_ telegraf.Accumulator) error { s.MetricSeparator = defaultSeparator } - s.wg.Add(2) - // Start the UDP listener if s.isUDP() { - go s.udpListen() + address, err := net.ResolveUDPAddr(s.Protocol, s.ServiceAddress) + if err != nil { + return err + } + + conn, err := net.ListenUDP(s.Protocol, address) + if err != nil { + return err + } + + log.Println("I! Statsd UDP listener listening on: ", conn.LocalAddr().String()) + s.UDPlistener = conn + + s.wg.Add(1) + go func() { + defer s.wg.Done() + s.udpListen(conn) + }() } else { - go s.tcpListen() + address, err := net.ResolveTCPAddr("tcp", s.ServiceAddress) + if err != nil { + return err + } + listener, err := net.ListenTCP("tcp", address) + if err != nil { + return err + } + + log.Println("I! TCP Statsd listening on: ", listener.Addr().String()) + s.TCPlistener = listener + + s.wg.Add(1) + go func() { + defer s.wg.Done() + s.tcpListen(listener) + }() } + // Start the line parser - go s.parser() + s.wg.Add(1) + go func() { + defer s.wg.Done() + s.parser() + }() log.Printf("I! Started the statsd service on %s\n", s.ServiceAddress) return nil } // tcpListen() starts listening for udp packets on the configured port. -func (s *Statsd) tcpListen() error { - defer s.wg.Done() - // Start listener - var err error - address, _ := net.ResolveTCPAddr("tcp", s.ServiceAddress) - s.TCPlistener, err = net.ListenTCP("tcp", address) - if err != nil { - log.Fatalf("ERROR: ListenTCP - %s", err) - return err - } - log.Println("I! TCP Statsd listening on: ", s.TCPlistener.Addr().String()) +func (s *Statsd) tcpListen(listener *net.TCPListener) error { for { select { case <-s.done: return nil default: // Accept connection: - conn, err := s.TCPlistener.AcceptTCP() + conn, err := listener.AcceptTCP() if err != nil { return err } @@ -403,16 +428,7 @@ func (s *Statsd) tcpListen() error { } // udpListen starts listening for udp packets on the configured port. -func (s *Statsd) udpListen() error { - defer s.wg.Done() - var err error - address, _ := net.ResolveUDPAddr(s.Protocol, s.ServiceAddress) - s.UDPlistener, err = net.ListenUDP(s.Protocol, address) - if err != nil { - log.Fatalf("ERROR: ListenUDP - %s", err) - } - log.Println("I! Statsd UDP listener listening on: ", s.UDPlistener.LocalAddr().String()) - +func (s *Statsd) udpListen(conn *net.UDPConn) error { if s.ReadBufferSize > 0 { s.UDPlistener.SetReadBuffer(s.ReadBufferSize) } @@ -423,7 +439,7 @@ func (s *Statsd) udpListen() error { case <-s.done: return nil default: - n, _, err := s.UDPlistener.ReadFromUDP(buf) + n, _, err := conn.ReadFromUDP(buf) if err != nil && !strings.Contains(err.Error(), "closed network") { log.Printf("E! Error READ: %s\n", err.Error()) continue @@ -448,7 +464,6 @@ func (s *Statsd) udpListen() error { // packet into statsd strings and then calls parseStatsdLine, which parses a // single statsd metric into a struct. func (s *Statsd) parser() error { - defer s.wg.Done() for { select { case <-s.done: From 5f74c0da0d8fcb8e3cff88d582a856e20bbddacd Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 26 Mar 2019 18:12:40 -0700 Subject: [PATCH 0721/1815] Fix basedir check and parent dir extraction in filecount input (#5630) --- plugins/inputs/filecount/filecount.go | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/plugins/inputs/filecount/filecount.go b/plugins/inputs/filecount/filecount.go index 1fd7041ff..c0072e0d8 100644 --- a/plugins/inputs/filecount/filecount.go +++ b/plugins/inputs/filecount/filecount.go @@ -4,16 +4,14 @@ import ( "log" "os" "path/filepath" - "strings" "time" - "github.com/karrick/godirwalk" - "github.com/pkg/errors" - "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/internal/globpath" "github.com/influxdata/telegraf/plugins/inputs" + "github.com/karrick/godirwalk" + "github.com/pkg/errors" ) const sampleConfig = ` @@ -157,7 +155,8 @@ func (fc *FileCount) count(acc telegraf.Accumulator, basedir string, glob globpa childSize := make(map[string]int64) walkFn := func(path string, de *godirwalk.Dirent) error { - if path == basedir { + rel, err := filepath.Rel(basedir, path) + if err == nil && rel == "." { return nil } file, err := os.Stat(path) @@ -173,7 +172,7 @@ func (fc *FileCount) count(acc telegraf.Accumulator, basedir string, glob globpa return nil } if match { - parent := path[:strings.LastIndex(path, "/")] + parent := filepath.Dir(path) childCount[parent]++ childSize[parent] += file.Size() } @@ -194,7 +193,7 @@ func (fc *FileCount) count(acc telegraf.Accumulator, basedir string, glob globpa "directory": path, }) } - parent := path[:strings.LastIndex(path, "/")] + parent := filepath.Dir(path) if fc.Recursive { childCount[parent] += childCount[path] childSize[parent] += childSize[path] From be26ef6f9b5d7f9f38f6831a79f2c5050d2c559e Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 26 Mar 2019 18:06:33 -0700 Subject: [PATCH 0722/1815] Update changelog --- CHANGELOG.md | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1a2f55483..b4fe41d62 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,8 +8,6 @@ #### Bugfixes -- [#5540](https://github.com/influxdata/telegraf/pull/5540): Fix open file error handling in file output. - ## v1.10.2 [unreleased] #### Release Notes @@ -25,6 +23,11 @@ - [#5566](https://github.com/influxdata/telegraf/pull/5566): Fix reading major and minor block devices identifiers in diskio input. - [#5607](https://github.com/influxdata/telegraf/pull/5607): Add owned directories to rpm package spec. - [#5608](https://github.com/influxdata/telegraf/pull/5607): Fix last character removed from string field in grok parser. +- [#5632](https://github.com/influxdata/telegraf/pull/5632): Fix drop tracking of metrics removed with aggregator drop_original. +- [#5540](https://github.com/influxdata/telegraf/pull/5540): Fix open file error handling in file output. +- [#5627](https://github.com/influxdata/telegraf/pull/5627): Fix plugin name in influxdb_v2 output logging. +- [#5630](https://github.com/influxdata/telegraf/pull/5630): Fix basedir check and parent dir extraction in filecount input. +- [#5628](https://github.com/influxdata/telegraf/pull/5628): Listen before leaving start in statsd. ## v1.10.1 [2019-03-19] From c57ba6110d0f01b61f694a8046d5bfd63f92d72d Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 26 Mar 2019 18:17:27 -0700 Subject: [PATCH 0723/1815] Exit after running --test without requiring --console (#5631) --- cmd/telegraf/telegraf.go | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/cmd/telegraf/telegraf.go b/cmd/telegraf/telegraf.go index a3fae740c..5dd29cef7 100644 --- a/cmd/telegraf/telegraf.go +++ b/cmd/telegraf/telegraf.go @@ -342,7 +342,7 @@ func main() { log.Println("Telegraf version already configured to: " + internal.Version()) } - if runtime.GOOS == "windows" && !(*fRunAsConsole) { + if runtime.GOOS == "windows" && windowsRunAsService() { svcConfig := &service.Config{ Name: *fServiceName, DisplayName: "Telegraf Data Collector Service", @@ -392,3 +392,16 @@ func main() { ) } } + +// Return true if Telegraf should create a Windows service. +func windowsRunAsService() bool { + if *fService != "" { + return true + } + + if *fRunAsConsole { + return false + } + + return !service.Interactive() +} From 5109847be1f24e958be54599049217793612554c Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 26 Mar 2019 18:20:52 -0700 Subject: [PATCH 0724/1815] Update changelog --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index b4fe41d62..a4202e8ab 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,8 @@ #### Bugfixes +- [#5631](https://github.com/influxdata/telegraf/pull/5631): Create Windows service only when specified or in service manager. + ## v1.10.2 [unreleased] #### Release Notes From 3045ffbbe3aadd46ccbf97644083a56008a4eac8 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 27 Mar 2019 12:25:07 -0700 Subject: [PATCH 0725/1815] Add github source for golang/x to Gopkg.toml Prevents removal of source in lock file. --- Gopkg.lock | 2 +- Gopkg.toml | 15 +++++++++++++++ 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/Gopkg.lock b/Gopkg.lock index b889e6f8c..cc00c787c 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -1311,8 +1311,8 @@ ] pruneopts = "" revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0" - version = "v0.3.0" source = "https://github.com/golang/text.git" + version = "v0.3.0" [[projects]] branch = "master" diff --git a/Gopkg.toml b/Gopkg.toml index cd7825ccb..d9475a504 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -189,10 +189,12 @@ [[constraint]] name = "golang.org/x/net" branch = "master" + source = "https://github.com/golang/net.git" [[constraint]] name = "golang.org/x/sys" branch = "master" + source = "https://github.com/golang/sys.git" [[constraint]] name = "google.golang.org/grpc" @@ -237,6 +239,7 @@ [[constraint]] branch = "master" name = "golang.org/x/oauth2" + source = "https://github.com/golang/oauth2.git" [[constraint]] branch = "master" @@ -265,3 +268,15 @@ [[constraint]] name = "github.com/go-logfmt/logfmt" version = "0.4.0" + +[[override]] + name = "golang.org/x/crypto" + source = "https://github.com/golang/crypto.git" + +[[override]] + name = "golang.org/x/sync" + source = "https://github.com/golang/sync.git" + +[[override]] + name = "golang.org/x/text" + source = "https://github.com/golang/text.git" From 4e3244c57513e0599cf6ff53d780ce649f5c31d5 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 29 Mar 2019 15:40:33 -0700 Subject: [PATCH 0726/1815] Fix aggregator window and shutdown of multiple aggregators (#5644) --- accumulator.go | 9 +- agent/accumulator.go | 17 +-- agent/accumulator_test.go | 7 +- agent/agent.go | 129 ++++++++++++++------- agent/agent_test.go | 65 ++++++++++- internal/internal.go | 2 + internal/internal_test.go | 34 ++++++ internal/models/running_aggregator.go | 35 +++--- internal/models/running_aggregator_test.go | 10 +- testutil/accumulator.go | 3 +- 10 files changed, 218 insertions(+), 93 deletions(-) diff --git a/accumulator.go b/accumulator.go index 825455c4c..1ea5737a8 100644 --- a/accumulator.go +++ b/accumulator.go @@ -41,11 +41,10 @@ type Accumulator interface { // AddMetric adds an metric to the accumulator. AddMetric(Metric) - // SetPrecision takes two time.Duration objects. If the first is non-zero, - // it sets that as the precision. Otherwise, it takes the second argument - // as the order of time that the metrics should be rounded to, with the - // maximum being 1s. - SetPrecision(precision, interval time.Duration) + // SetPrecision sets the timestamp rounding precision. All metrics addeds + // added to the accumulator will have their timestamp rounded to the + // nearest multiple of precision. + SetPrecision(precision time.Duration) // Report an error. AddError(err error) diff --git a/agent/accumulator.go b/agent/accumulator.go index 0533a06e2..9e0bb11ca 100644 --- a/agent/accumulator.go +++ b/agent/accumulator.go @@ -114,21 +114,8 @@ func (ac *accumulator) AddError(err error) { log.Printf("E! [%s]: Error in plugin: %v", ac.maker.Name(), err) } -func (ac *accumulator) SetPrecision(precision, interval time.Duration) { - if precision > 0 { - ac.precision = precision - return - } - switch { - case interval >= time.Second: - ac.precision = time.Second - case interval >= time.Millisecond: - ac.precision = time.Millisecond - case interval >= time.Microsecond: - ac.precision = time.Microsecond - default: - ac.precision = time.Nanosecond - } +func (ac *accumulator) SetPrecision(precision time.Duration) { + ac.precision = precision } func (ac *accumulator) getTime(t []time.Time) time.Time { diff --git a/agent/accumulator_test.go b/agent/accumulator_test.go index 316ad124b..933821701 100644 --- a/agent/accumulator_test.go +++ b/agent/accumulator_test.go @@ -74,7 +74,6 @@ func TestSetPrecision(t *testing.T) { name string unset bool precision time.Duration - interval time.Duration timestamp time.Time expected time.Time }{ @@ -86,13 +85,13 @@ func TestSetPrecision(t *testing.T) { }, { name: "second interval", - interval: time.Second, + precision: time.Second, timestamp: time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC), expected: time.Date(2006, time.February, 10, 12, 0, 0, 0, time.UTC), }, { name: "microsecond interval", - interval: time.Microsecond, + precision: time.Microsecond, timestamp: time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC), expected: time.Date(2006, time.February, 10, 12, 0, 0, 82913000, time.UTC), }, @@ -109,7 +108,7 @@ func TestSetPrecision(t *testing.T) { a := NewAccumulator(&TestMetricMaker{}, metrics) if !tt.unset { - a.SetPrecision(tt.precision, tt.interval) + a.SetPrecision(tt.precision) } a.AddFields("acctest", diff --git a/agent/agent.go b/agent/agent.go index d83748811..ae2de85bf 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -180,8 +180,7 @@ func (a *Agent) Test(ctx context.Context) error { } acc := NewAccumulator(input, metricC) - acc.SetPrecision(a.Config.Agent.Precision.Duration, - a.Config.Agent.Interval.Duration) + acc.SetPrecision(a.Precision()) input.SetDefaultTags(a.Config.Tags) // Special instructions for some inputs. cpu, for example, needs to be @@ -189,8 +188,7 @@ func (a *Agent) Test(ctx context.Context) error { switch input.Name() { case "inputs.cpu", "inputs.mongodb", "inputs.procstat": nulAcc := NewAccumulator(input, nulC) - nulAcc.SetPrecision(a.Config.Agent.Precision.Duration, - a.Config.Agent.Interval.Duration) + nulAcc.SetPrecision(a.Precision()) if err := input.Input.Gather(nulAcc); err != nil { return err } @@ -222,7 +220,6 @@ func (a *Agent) runInputs( var wg sync.WaitGroup for _, input := range a.Config.Inputs { interval := a.Config.Agent.Interval.Duration - precision := a.Config.Agent.Precision.Duration jitter := a.Config.Agent.CollectionJitter.Duration // Overwrite agent interval if this plugin has its own. @@ -231,7 +228,7 @@ func (a *Agent) runInputs( } acc := NewAccumulator(input, dst) - acc.SetPrecision(precision, interval) + acc.SetPrecision(a.Precision()) wg.Add(1) go func(input *models.RunningInput) { @@ -339,10 +336,27 @@ func (a *Agent) applyProcessors(m telegraf.Metric) []telegraf.Metric { return metrics } -// runAggregators triggers the periodic push for Aggregators. +func updateWindow(start time.Time, roundInterval bool, period time.Duration) (time.Time, time.Time) { + var until time.Time + if roundInterval { + until = internal.AlignTime(start, period) + if until == start { + until = internal.AlignTime(start.Add(time.Nanosecond), period) + } + } else { + until = start.Add(period) + } + + since := until.Add(-period) + + return since, until +} + +// runAggregators adds metrics to the aggregators and triggers their periodic +// push call. // -// When the context is done a final push will occur and then this function -// will return. +// Runs until src is closed and all metrics have been processed. Will call +// push one final time before returning. func (a *Agent) runAggregators( startTime time.Time, src <-chan telegraf.Metric, @@ -350,6 +364,13 @@ func (a *Agent) runAggregators( ) error { ctx, cancel := context.WithCancel(context.Background()) + // Before calling Add, initialize the aggregation window. This ensures + // that any metric created after start time will be aggregated. + for _, agg := range a.Config.Aggregators { + since, until := updateWindow(startTime, a.Config.Agent.RoundInterval, agg.Period()) + agg.UpdateWindow(since, until) + } + var wg sync.WaitGroup wg.Add(1) go func() { @@ -371,33 +392,29 @@ func (a *Agent) runAggregators( cancel() }() - precision := a.Config.Agent.Precision.Duration - interval := a.Config.Agent.Interval.Duration aggregations := make(chan telegraf.Metric, 100) - for _, agg := range a.Config.Aggregators { - wg.Add(1) - go func(agg *models.RunningAggregator) { - defer func() { - wg.Done() - close(aggregations) - }() + wg.Add(1) + go func() { + defer wg.Done() - if a.Config.Agent.RoundInterval { - // Aggregators are aligned to the agent interval regardless of - // their period. - err := internal.SleepContext(ctx, internal.AlignDuration(startTime, interval)) - if err != nil { - return - } - } + var aggWg sync.WaitGroup + for _, agg := range a.Config.Aggregators { + aggWg.Add(1) + go func(agg *models.RunningAggregator) { + defer aggWg.Done() - agg.SetPeriodStart(startTime) + acc := NewAccumulator(agg, aggregations) + acc.SetPrecision(a.Precision()) + fmt.Println(1) + a.push(ctx, agg, acc) + fmt.Println(2) + }(agg) + } - acc := NewAccumulator(agg, aggregations) - acc.SetPrecision(precision, interval) - a.push(ctx, agg, acc) - }(agg) - } + aggWg.Wait() + fmt.Println(3) + close(aggregations) + }() for metric := range aggregations { metrics := a.applyProcessors(metric) @@ -405,39 +422,42 @@ func (a *Agent) runAggregators( dst <- metric } } + fmt.Println(4) wg.Wait() + fmt.Println(5) return nil } -// push runs the push for a single aggregator every period. More simple than -// the output/input version as timeout should be less likely.... not really -// because the output channel can block for now. +// push runs the push for a single aggregator every period. func (a *Agent) push( ctx context.Context, aggregator *models.RunningAggregator, acc telegraf.Accumulator, ) { - ticker := time.NewTicker(aggregator.Period()) - defer ticker.Stop() - for { + // Ensures that Push will be called for each period, even if it has + // already elapsed before this function is called. This is guaranteed + // because so long as only Push updates the EndPeriod. This method + // also avoids drift by not using a ticker. + until := time.Until(aggregator.EndPeriod()) + select { - case <-ticker.C: + case <-time.After(until): + aggregator.Push(acc) break case <-ctx.Done(): aggregator.Push(acc) return } - - aggregator.Push(acc) } } // runOutputs triggers the periodic write for Outputs. // -// When the context is done, outputs continue to run until their buffer is -// closed, afterwich they run flush once more. + +// Runs until src is closed and all metrics have been processed. Will call +// Write one final time before returning. func (a *Agent) runOutputs( startTime time.Time, src <-chan telegraf.Metric, @@ -608,7 +628,7 @@ func (a *Agent) startServiceInputs( // Gather() accumulator does apply rounding according to the // precision agent setting. acc := NewAccumulator(input, dst) - acc.SetPrecision(time.Nanosecond, 0) + acc.SetPrecision(time.Nanosecond) err := si.Start(acc) if err != nil { @@ -638,6 +658,27 @@ func (a *Agent) stopServiceInputs() { } } +// Returns the rounding precision for metrics. +func (a *Agent) Precision() time.Duration { + precision := a.Config.Agent.Precision.Duration + interval := a.Config.Agent.Interval.Duration + + if precision > 0 { + return precision + } + + switch { + case interval >= time.Second: + return time.Second + case interval >= time.Millisecond: + return time.Millisecond + case interval >= time.Microsecond: + return time.Microsecond + default: + return time.Nanosecond + } +} + // panicRecover displays an error if an input panics. func panicRecover(input *models.RunningInput) { if err := recover(); err != nil { diff --git a/agent/agent_test.go b/agent/agent_test.go index a5920ce1c..c822a236b 100644 --- a/agent/agent_test.go +++ b/agent/agent_test.go @@ -2,15 +2,13 @@ package agent import ( "testing" + "time" "github.com/influxdata/telegraf/internal/config" - - // needing to load the plugins _ "github.com/influxdata/telegraf/plugins/inputs/all" - // needing to load the outputs _ "github.com/influxdata/telegraf/plugins/outputs/all" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestAgent_OmitHostname(t *testing.T) { @@ -109,3 +107,62 @@ func TestAgent_LoadOutput(t *testing.T) { a, _ = NewAgent(c) assert.Equal(t, 3, len(a.Config.Outputs)) } + +func TestWindow(t *testing.T) { + parse := func(s string) time.Time { + tm, err := time.Parse(time.RFC3339, s) + if err != nil { + panic(err) + } + return tm + } + + tests := []struct { + name string + start time.Time + roundInterval bool + period time.Duration + since time.Time + until time.Time + }{ + { + name: "round with exact alignment", + start: parse("2018-03-27T00:00:00Z"), + roundInterval: true, + period: 30 * time.Second, + since: parse("2018-03-27T00:00:00Z"), + until: parse("2018-03-27T00:00:30Z"), + }, + { + name: "round with alignment needed", + start: parse("2018-03-27T00:00:05Z"), + roundInterval: true, + period: 30 * time.Second, + since: parse("2018-03-27T00:00:00Z"), + until: parse("2018-03-27T00:00:30Z"), + }, + { + name: "no round with exact alignment", + start: parse("2018-03-27T00:00:00Z"), + roundInterval: false, + period: 30 * time.Second, + since: parse("2018-03-27T00:00:00Z"), + until: parse("2018-03-27T00:00:30Z"), + }, + { + name: "no found with alignment needed", + start: parse("2018-03-27T00:00:05Z"), + roundInterval: false, + period: 30 * time.Second, + since: parse("2018-03-27T00:00:05Z"), + until: parse("2018-03-27T00:00:35Z"), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + since, until := updateWindow(tt.start, tt.roundInterval, tt.period) + require.Equal(t, tt.since, since, "since") + require.Equal(t, tt.until, until, "until") + }) + } +} diff --git a/internal/internal.go b/internal/internal.go index b373c9c35..133b19e9b 100644 --- a/internal/internal.go +++ b/internal/internal.go @@ -288,11 +288,13 @@ func SleepContext(ctx context.Context, duration time.Duration) error { } // AlignDuration returns the duration until next aligned interval. +// If the current time is aligned a 0 duration is returned. func AlignDuration(tm time.Time, interval time.Duration) time.Duration { return AlignTime(tm, interval).Sub(tm) } // AlignTime returns the time of the next aligned interval. +// If the current time is aligned the current time is returned. func AlignTime(tm time.Time, interval time.Duration) time.Time { truncated := tm.Truncate(interval) if truncated == tm { diff --git a/internal/internal_test.go b/internal/internal_test.go index 681e1f808..da2fe01c5 100644 --- a/internal/internal_test.go +++ b/internal/internal_test.go @@ -271,6 +271,40 @@ func TestAlignDuration(t *testing.T) { } } +func TestAlignTime(t *testing.T) { + rfc3339 := func(value string) time.Time { + t, _ := time.Parse(time.RFC3339, value) + return t + } + + tests := []struct { + name string + now time.Time + interval time.Duration + expected time.Time + }{ + { + name: "aligned", + now: rfc3339("2018-01-01T01:01:00Z"), + interval: 10 * time.Second, + expected: rfc3339("2018-01-01T01:01:00Z"), + }, + { + name: "aligned", + now: rfc3339("2018-01-01T01:01:01Z"), + interval: 10 * time.Second, + expected: rfc3339("2018-01-01T01:01:10Z"), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + actual := AlignTime(tt.now, tt.interval) + require.Equal(t, tt.expected, actual) + }) + } +} + func TestParseTimestamp(t *testing.T) { time, err := ParseTimestamp("2019-02-20 21:50:34.029665", "2006-01-02 15:04:05.000000") assert.Nil(t, err) diff --git a/internal/models/running_aggregator.go b/internal/models/running_aggregator.go index f54b5266e..8a2cd576a 100644 --- a/internal/models/running_aggregator.go +++ b/internal/models/running_aggregator.go @@ -1,6 +1,7 @@ package models import ( + "log" "sync" "time" @@ -74,9 +75,14 @@ func (r *RunningAggregator) Period() time.Duration { return r.Config.Period } -func (r *RunningAggregator) SetPeriodStart(start time.Time) { +func (r *RunningAggregator) EndPeriod() time.Time { + return r.periodEnd +} + +func (r *RunningAggregator) UpdateWindow(start, until time.Time) { r.periodStart = start - r.periodEnd = r.periodStart.Add(r.Config.Period).Add(r.Config.Delay) + r.periodEnd = until + log.Printf("D! [%s] Updated aggregation range [%s, %s]", r.Name(), start, until) } func (r *RunningAggregator) MakeMetric(metric telegraf.Metric) telegraf.Metric { @@ -97,10 +103,6 @@ func (r *RunningAggregator) MakeMetric(metric telegraf.Metric) telegraf.Metric { return m } -func (r *RunningAggregator) metricDropped(metric telegraf.Metric) { - r.MetricsDropped.Incr(1) -} - // Add a metric to the aggregator and return true if the original metric // should be dropped. func (r *RunningAggregator) Add(m telegraf.Metric) bool { @@ -108,22 +110,25 @@ func (r *RunningAggregator) Add(m telegraf.Metric) bool { return false } - // Make a copy of the metric but don't retain tracking; it doesn't make - // sense to fail a metric's delivery due to the aggregation not being - // sent because we can't create aggregations of historical data. + // Make a copy of the metric but don't retain tracking. We do not fail a + // delivery due to the aggregation not being sent because we can't create + // aggregations of historical data. Additionally, waiting for the + // aggregation to be pushed would introduce a hefty latency to delivery. m = metric.FromMetric(m) r.Config.Filter.Modify(m) if len(m.FieldList()) == 0 { - r.metricDropped(m) + r.MetricsFiltered.Incr(1) return r.Config.DropOriginal } r.Lock() defer r.Unlock() - if r.periodStart.IsZero() || m.Time().After(r.periodEnd) { - r.metricDropped(m) + if m.Time().Before(r.periodStart) || m.Time().After(r.periodEnd.Add(r.Config.Delay)) { + log.Printf("D! [%s] metric is outside aggregation window; discarding. %s: m: %s e: %s", + r.Name(), m.Time(), r.periodStart, r.periodEnd) + r.MetricsDropped.Incr(1) return r.Config.DropOriginal } @@ -135,8 +140,10 @@ func (r *RunningAggregator) Push(acc telegraf.Accumulator) { r.Lock() defer r.Unlock() - r.periodStart = r.periodEnd - r.periodEnd = r.periodStart.Add(r.Config.Period).Add(r.Config.Delay) + since := r.periodEnd + until := r.periodEnd.Add(r.Config.Period) + r.UpdateWindow(since, until) + r.push(acc) r.Aggregator.Reset() } diff --git a/internal/models/running_aggregator_test.go b/internal/models/running_aggregator_test.go index 76c7e4e5d..19476eecf 100644 --- a/internal/models/running_aggregator_test.go +++ b/internal/models/running_aggregator_test.go @@ -23,7 +23,7 @@ func TestAdd(t *testing.T) { acc := testutil.Accumulator{} now := time.Now() - ra.SetPeriodStart(now) + ra.UpdateWindow(now, now.Add(ra.Config.Period)) m := testutil.MustMetric("RITest", map[string]string{}, @@ -51,7 +51,7 @@ func TestAddMetricsOutsideCurrentPeriod(t *testing.T) { require.NoError(t, ra.Config.Filter.Compile()) acc := testutil.Accumulator{} now := time.Now() - ra.SetPeriodStart(now) + ra.UpdateWindow(now, now.Add(ra.Config.Period)) m := testutil.MustMetric("RITest", map[string]string{}, @@ -86,7 +86,7 @@ func TestAddMetricsOutsideCurrentPeriod(t *testing.T) { ra.Push(&acc) require.Equal(t, 1, len(acc.Metrics)) - require.Equal(t, int64(202), acc.Metrics[0].Fields["sum"]) + require.Equal(t, int64(101), acc.Metrics[0].Fields["sum"]) } func TestAddAndPushOnePeriod(t *testing.T) { @@ -102,7 +102,7 @@ func TestAddAndPushOnePeriod(t *testing.T) { acc := testutil.Accumulator{} now := time.Now() - ra.SetPeriodStart(now) + ra.UpdateWindow(now, now.Add(ra.Config.Period)) m := testutil.MustMetric("RITest", map[string]string{}, @@ -129,7 +129,7 @@ func TestAddDropOriginal(t *testing.T) { require.NoError(t, ra.Config.Filter.Compile()) now := time.Now() - ra.SetPeriodStart(now) + ra.UpdateWindow(now, now.Add(ra.Config.Period)) m := testutil.MustMetric("RITest", map[string]string{}, diff --git a/testutil/accumulator.go b/testutil/accumulator.go index 3fe291699..a7b9fe8f6 100644 --- a/testutil/accumulator.go +++ b/testutil/accumulator.go @@ -10,7 +10,6 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/stretchr/testify/assert" ) @@ -204,7 +203,7 @@ func (a *Accumulator) AddError(err error) { a.Unlock() } -func (a *Accumulator) SetPrecision(precision, interval time.Duration) { +func (a *Accumulator) SetPrecision(precision time.Duration) { return } From aac013f8ab90a73cfd7ed86c72b5640a9990119e Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 29 Mar 2019 15:52:36 -0700 Subject: [PATCH 0727/1815] Update changelog --- CHANGELOG.md | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a4202e8ab..56efbbbc3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -24,12 +24,14 @@ - [#5523](https://github.com/influxdata/telegraf/issues/5523): Fix missing cluster stats in ceph input. - [#5566](https://github.com/influxdata/telegraf/pull/5566): Fix reading major and minor block devices identifiers in diskio input. - [#5607](https://github.com/influxdata/telegraf/pull/5607): Add owned directories to rpm package spec. -- [#5608](https://github.com/influxdata/telegraf/pull/5607): Fix last character removed from string field in grok parser. +- [#4998](https://github.com/influxdata/telegraf/issues/4998): Fix last character removed from string field in grok parser. - [#5632](https://github.com/influxdata/telegraf/pull/5632): Fix drop tracking of metrics removed with aggregator drop_original. - [#5540](https://github.com/influxdata/telegraf/pull/5540): Fix open file error handling in file output. -- [#5627](https://github.com/influxdata/telegraf/pull/5627): Fix plugin name in influxdb_v2 output logging. -- [#5630](https://github.com/influxdata/telegraf/pull/5630): Fix basedir check and parent dir extraction in filecount input. -- [#5628](https://github.com/influxdata/telegraf/pull/5628): Listen before leaving start in statsd. +- [#5626](https://github.com/influxdata/telegraf/issues/5626): Fix plugin name in influxdb_v2 output logging. +- [#5621](https://github.com/influxdata/telegraf/issues/5621): Fix basedir check and parent dir extraction in filecount input. +- [#5618](https://github.com/influxdata/telegraf/issues/5618): Listen before leaving start in statsd. +- [#5595](https://github.com/influxdata/telegraf/issues/5595): Fix aggregator window alignment. +- [#5637](https://github.com/influxdata/telegraf/issues/5637): Fix panic during shutdown of multiple aggregators. ## v1.10.1 [2019-03-19] From 6feb6c18534557804de1e1674eb0ac7af25d8a5b Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 29 Mar 2019 16:02:10 -0700 Subject: [PATCH 0728/1815] Add support for ${} env vars in configuration file (#5648) --- docs/CONFIGURATION.md | 16 +++++------ internal/config/config.go | 27 ++++++++++++++----- internal/config/config_test.go | 3 +-- .../testdata/single_plugin_env_vars.toml | 2 +- 4 files changed, 30 insertions(+), 18 deletions(-) diff --git a/docs/CONFIGURATION.md b/docs/CONFIGURATION.md index 157ad023c..9e016af62 100644 --- a/docs/CONFIGURATION.md +++ b/docs/CONFIGURATION.md @@ -34,10 +34,10 @@ configuration files. ### Environment Variables -Environment variables can be used anywhere in the config file, simply prepend -them with `$`. Replacement occurs before file parsing. For strings -the variable must be within quotes, e.g., `"$STR_VAR"`, for numbers and booleans -they should be unquoted, e.g., `$INT_VAR`, `$BOOL_VAR`. +Environment variables can be used anywhere in the config file, simply surround +them with `${}`. Replacement occurs before file parsing. For strings +the variable must be within quotes, e.g., `"${STR_VAR}"`, for numbers and booleans +they should be unquoted, e.g., `${INT_VAR}`, `${BOOL_VAR}`. When using the `.deb` or `.rpm` packages, you can define environment variables in the `/etc/default/telegraf` file. @@ -55,14 +55,14 @@ INFLUX_PASSWORD="monkey123" `/etc/telegraf.conf`: ```toml [global_tags] - user = "$USER" + user = "${USER}" [[inputs.mem]] [[outputs.influxdb]] - urls = ["$INFLUX_URL"] - skip_database_creation = $INFLUX_SKIP_DATABASE_CREATION - password = "$INFLUX_PASSWORD" + urls = ["${INFLUX_URL}"] + skip_database_creation = ${INFLUX_SKIP_DATABASE_CREATION} + password = "${INFLUX_PASSWORD}" ``` The above files will produce the following effective configuration file to be diff --git a/internal/config/config.go b/internal/config/config.go index 1c47b1535..939cb4c75 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -40,7 +40,7 @@ var ( outputDefaults = []string{"influxdb"} // envVarRe is a regex to find environment variables in the config file - envVarRe = regexp.MustCompile(`\$\w+`) + envVarRe = regexp.MustCompile(`\$\{(\w+)\}|\$(\w+)`) envVarEscaper = strings.NewReplacer( `"`, `\"`, @@ -208,9 +208,9 @@ var header = `# Telegraf Configuration # Use 'telegraf -config telegraf.conf -test' to see what metrics a config # file would generate. # -# Environment variables can be used anywhere in this config file, simply prepend -# them with $. For strings the variable must be within quotes (ie, "$STR_VAR"), -# for numbers and booleans they should be plain (ie, $INT_VAR, $BOOL_VAR) +# Environment variables can be used anywhere in this config file, simply surround +# them with ${}. For strings the variable must be within quotes (ie, "${STR_VAR}"), +# for numbers and booleans they should be plain (ie, ${INT_VAR}, ${BOOL_VAR}) # Global tags can be specified here in key="value" format. @@ -787,12 +787,25 @@ func fetchConfig(u *url.URL) ([]byte, error) { func parseConfig(contents []byte) (*ast.Table, error) { contents = trimBOM(contents) - env_vars := envVarRe.FindAll(contents, -1) - for _, env_var := range env_vars { + parameters := envVarRe.FindAllSubmatch(contents, -1) + for _, parameter := range parameters { + if len(parameter) != 3 { + continue + } + + var env_var []byte + if parameter[1] != nil { + env_var = parameter[1] + } else if parameter[2] != nil { + env_var = parameter[2] + } else { + continue + } + env_val, ok := os.LookupEnv(strings.TrimPrefix(string(env_var), "$")) if ok { env_val = escapeEnv(env_val) - contents = bytes.Replace(contents, env_var, []byte(env_val), 1) + contents = bytes.Replace(contents, parameter[0], []byte(env_val), 1) } } diff --git a/internal/config/config_test.go b/internal/config/config_test.go index cd7d2301c..77b0dffd4 100644 --- a/internal/config/config_test.go +++ b/internal/config/config_test.go @@ -11,7 +11,6 @@ import ( "github.com/influxdata/telegraf/plugins/inputs/memcached" "github.com/influxdata/telegraf/plugins/inputs/procstat" "github.com/influxdata/telegraf/plugins/parsers" - "github.com/stretchr/testify/assert" ) @@ -28,7 +27,7 @@ func TestConfig_LoadSingleInputWithEnvVars(t *testing.T) { filter := models.Filter{ NameDrop: []string{"metricname2"}, - NamePass: []string{"metricname1"}, + NamePass: []string{"metricname1", "ip_192.168.1.1_name"}, FieldDrop: []string{"other", "stuff"}, FieldPass: []string{"some", "strings"}, TagDrop: []models.TagFilter{ diff --git a/internal/config/testdata/single_plugin_env_vars.toml b/internal/config/testdata/single_plugin_env_vars.toml index 6600a77b3..b1f71ea8a 100644 --- a/internal/config/testdata/single_plugin_env_vars.toml +++ b/internal/config/testdata/single_plugin_env_vars.toml @@ -1,6 +1,6 @@ [[inputs.memcached]] servers = ["$MY_TEST_SERVER"] - namepass = ["metricname1"] + namepass = ["metricname1", "ip_${MY_TEST_SERVER}_name"] namedrop = ["metricname2"] fieldpass = ["some", "strings"] fielddrop = ["other", "stuff"] From db1e902c81375277fad3481ba1bf41f4157425a1 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 29 Mar 2019 16:03:34 -0700 Subject: [PATCH 0729/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 56efbbbc3..4cec21e25 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,7 @@ - [#5556](https://github.com/influxdata/telegraf/pull/5556): Add TTL field to ping input. - [#5569](https://github.com/influxdata/telegraf/pull/5569): Add hexadecimal string to integer conversion to converter processor. - [#5601](https://github.com/influxdata/telegraf/pull/5601): Add support for multiple line text and perfdata to nagios parser. +- [#5648](https://github.com/influxdata/telegraf/pull/5648): Allow env vars ${} expansion syntax in configuration file. #### Bugfixes From 13a00eeca54c90c70c76a475c92abd1dfa37e500 Mon Sep 17 00:00:00 2001 From: Olli-Pekka Lehto Date: Mon, 1 Apr 2019 13:53:50 -0500 Subject: [PATCH 0730/1815] Add option to reset buckets on flush to histogram aggregator (#5641) --- plugins/aggregators/histogram/README.md | 9 ++++- plugins/aggregators/histogram/histogram.go | 19 ++++++++-- .../aggregators/histogram/histogram_test.go | 37 ++++++++++++++++--- 3 files changed, 53 insertions(+), 12 deletions(-) diff --git a/plugins/aggregators/histogram/README.md b/plugins/aggregators/histogram/README.md index b4525681e..f9dafd789 100644 --- a/plugins/aggregators/histogram/README.md +++ b/plugins/aggregators/histogram/README.md @@ -7,8 +7,9 @@ Values added to a bucket are also added to the larger buckets in the distribution. This creates a [cumulative histogram](https://en.wikipedia.org/wiki/Histogram#/media/File:Cumulative_vs_normal_histogram.svg). Like other Telegraf aggregators, the metric is emitted every `period` seconds. -Bucket counts however are not reset between periods and will be non-strictly -increasing while Telegraf is running. +By default bucket counts are not reset between periods and will be non-strictly +increasing while Telegraf is running. This behavior can be changed by setting the +`reset` parameter to true. #### Design @@ -34,6 +35,10 @@ of the algorithm which is implemented in the Prometheus ## aggregator and will not get sent to the output plugins. drop_original = false + ## If true, the histogram will be reset on flush instead + ## of accumulating the results. + reset = false + ## Example config that aggregates all fields of the metric. # [[aggregators.histogram.config]] # ## The set of buckets. diff --git a/plugins/aggregators/histogram/histogram.go b/plugins/aggregators/histogram/histogram.go index a60cede3d..a565d8902 100644 --- a/plugins/aggregators/histogram/histogram.go +++ b/plugins/aggregators/histogram/histogram.go @@ -16,7 +16,8 @@ const bucketInf = "+Inf" // HistogramAggregator is aggregator with histogram configs and particular histograms for defined metrics type HistogramAggregator struct { - Configs []config `toml:"config"` + Configs []config `toml:"config"` + ResetBuckets bool `toml:"reset"` buckets bucketsByMetrics cache map[uint64]metricHistogramCollection @@ -72,6 +73,10 @@ var sampleConfig = ` ## aggregator and will not get sent to the output plugins. drop_original = false + ## If true, the histogram will be reset on flush instead + ## of accumulating the results. + reset = false + ## Example config that aggregates all fields of the metric. # [[aggregators.histogram.config]] # ## The set of buckets. @@ -201,9 +206,15 @@ func (h *HistogramAggregator) groupField( ) } -// Reset does nothing, because we need to collect counts for a long time, otherwise if config parameter 'reset' has -// small value, we will get a histogram with a small amount of the distribution. -func (h *HistogramAggregator) Reset() {} +// Reset does nothing by default, because we typically need to collect counts for a long time. +// Otherwise if config parameter 'reset' has 'true' value, we will get a histogram +// with a small amount of the distribution. However in some use cases a reset is useful. +func (h *HistogramAggregator) Reset() { + if h.ResetBuckets { + h.resetCache() + h.buckets = make(bucketsByMetrics) + } +} // resetCache resets cached counts(hits) in the buckets func (h *HistogramAggregator) resetCache() { diff --git a/plugins/aggregators/histogram/histogram_test.go b/plugins/aggregators/histogram/histogram_test.go index 8c4a2b9d3..694235831 100644 --- a/plugins/aggregators/histogram/histogram_test.go +++ b/plugins/aggregators/histogram/histogram_test.go @@ -12,8 +12,8 @@ import ( ) // NewTestHistogram creates new test histogram aggregation with specified config -func NewTestHistogram(cfg []config) telegraf.Aggregator { - htm := &HistogramAggregator{Configs: cfg} +func NewTestHistogram(cfg []config, reset bool) telegraf.Aggregator { + htm := &HistogramAggregator{Configs: cfg, ResetBuckets: reset} htm.buckets = make(bucketsByMetrics) htm.resetCache() @@ -69,11 +69,12 @@ func BenchmarkApply(b *testing.B) { func TestHistogramWithPeriodAndOneField(t *testing.T) { var cfg []config cfg = append(cfg, config{Metric: "first_metric_name", Fields: []string{"a"}, Buckets: []float64{0.0, 10.0, 20.0, 30.0, 40.0}}) - histogram := NewTestHistogram(cfg) + histogram := NewTestHistogram(cfg, false) acc := &testutil.Accumulator{} histogram.Add(firstMetric1) + histogram.Reset() histogram.Add(firstMetric2) histogram.Push(acc) @@ -88,12 +89,36 @@ func TestHistogramWithPeriodAndOneField(t *testing.T) { assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2)}, bucketInf) } +// TestHistogramWithPeriodAndOneField tests metrics for one period and for one field +func TestHistogramWithReset(t *testing.T) { + var cfg []config + cfg = append(cfg, config{Metric: "first_metric_name", Fields: []string{"a"}, Buckets: []float64{0.0, 10.0, 20.0, 30.0, 40.0}}) + histogram := NewTestHistogram(cfg, true) + + acc := &testutil.Accumulator{} + + histogram.Add(firstMetric1) + histogram.Reset() + histogram.Add(firstMetric2) + histogram.Push(acc) + + if len(acc.Metrics) != 6 { + assert.Fail(t, "Incorrect number of metrics") + } + assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(0)}, "0") + assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(0)}, "10") + assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(1)}, "20") + assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(1)}, "30") + assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(1)}, "40") + assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(1)}, bucketInf) +} + // TestHistogramWithPeriodAndAllFields tests two metrics for one period and for all fields func TestHistogramWithPeriodAndAllFields(t *testing.T) { var cfg []config cfg = append(cfg, config{Metric: "first_metric_name", Buckets: []float64{0.0, 15.5, 20.0, 30.0, 40.0}}) cfg = append(cfg, config{Metric: "second_metric_name", Buckets: []float64{0.0, 4.0, 10.0, 23.0, 30.0}}) - histogram := NewTestHistogram(cfg) + histogram := NewTestHistogram(cfg, false) acc := &testutil.Accumulator{} @@ -127,7 +152,7 @@ func TestHistogramDifferentPeriodsAndAllFields(t *testing.T) { var cfg []config cfg = append(cfg, config{Metric: "first_metric_name", Buckets: []float64{0.0, 10.0, 20.0, 30.0, 40.0}}) - histogram := NewTestHistogram(cfg) + histogram := NewTestHistogram(cfg, false) acc := &testutil.Accumulator{} histogram.Add(firstMetric1) @@ -166,7 +191,7 @@ func TestWrongBucketsOrder(t *testing.T) { var cfg []config cfg = append(cfg, config{Metric: "first_metric_name", Buckets: []float64{0.0, 90.0, 20.0, 30.0, 40.0}}) - histogram := NewTestHistogram(cfg) + histogram := NewTestHistogram(cfg, false) histogram.Add(firstMetric2) } From 782280345d62f3e466918023a30d98bf507c5ac5 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 1 Apr 2019 11:54:43 -0700 Subject: [PATCH 0731/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4cec21e25..b9ebd4d88 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,7 @@ - [#5569](https://github.com/influxdata/telegraf/pull/5569): Add hexadecimal string to integer conversion to converter processor. - [#5601](https://github.com/influxdata/telegraf/pull/5601): Add support for multiple line text and perfdata to nagios parser. - [#5648](https://github.com/influxdata/telegraf/pull/5648): Allow env vars ${} expansion syntax in configuration file. +- [#5641](https://github.com/influxdata/telegraf/pull/5641): Add option to reset buckets on flush to histogram aggregator. #### Bugfixes From 3b80d8a7fd8a9d77381cedac14e1d0088578d146 Mon Sep 17 00:00:00 2001 From: liispon Date: Wed, 3 Apr 2019 02:04:37 +0800 Subject: [PATCH 0732/1815] Use github.com/ghodss/yaml to parse k8s config (#5643) --- Gopkg.lock | 9 +++++++++ Gopkg.toml | 4 ++++ plugins/inputs/prometheus/kubernetes.go | 2 +- 3 files changed, 14 insertions(+), 1 deletion(-) diff --git a/Gopkg.lock b/Gopkg.lock index cc00c787c..67654d523 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -403,6 +403,14 @@ revision = "d1bbc0cffaf9849ddcae7b9efffae33e2dd52e9a" version = "v1.2.0" +[[projects]] + branch = "master" + digest = "1:ec95c1c49fbec27ab5383b9c47fae5c2fe1d97ac5b41d36d78e17588a44e9f3f" + name = "github.com/ghodss/yaml" + packages = ["."] + pruneopts = "" + revision = "25d852aebe32c875e9c044af3eef9c7dc6bc777f" + [[projects]] digest = "1:858b7fe7b0f4bc7ef9953926828f2816ea52d01a88d72d1c45bc8c108f23c356" name = "github.com/go-ini/ini" @@ -1547,6 +1555,7 @@ "github.com/ericchiang/k8s/apis/meta/v1", "github.com/ericchiang/k8s/apis/resource", "github.com/ericchiang/k8s/util/intstr", + "github.com/ghodss/yaml", "github.com/go-logfmt/logfmt", "github.com/go-redis/redis", "github.com/go-sql-driver/mysql", diff --git a/Gopkg.toml b/Gopkg.toml index d9475a504..bfc854450 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -269,6 +269,10 @@ name = "github.com/go-logfmt/logfmt" version = "0.4.0" +[[constraint]] + branch = "master" + name = "github.com/ghodss/yaml" + [[override]] name = "golang.org/x/crypto" source = "https://github.com/golang/crypto.git" diff --git a/plugins/inputs/prometheus/kubernetes.go b/plugins/inputs/prometheus/kubernetes.go index 87db15ffe..0d86ad91e 100644 --- a/plugins/inputs/prometheus/kubernetes.go +++ b/plugins/inputs/prometheus/kubernetes.go @@ -14,7 +14,7 @@ import ( "github.com/ericchiang/k8s" corev1 "github.com/ericchiang/k8s/apis/core/v1" - "gopkg.in/yaml.v2" + "github.com/ghodss/yaml" ) type payload struct { From 0d00f0af3cd273fb06fef1dd1ba99a5ff932ce8b Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 2 Apr 2019 11:06:44 -0700 Subject: [PATCH 0733/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index b9ebd4d88..19b2b0132 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -34,6 +34,7 @@ - [#5618](https://github.com/influxdata/telegraf/issues/5618): Listen before leaving start in statsd. - [#5595](https://github.com/influxdata/telegraf/issues/5595): Fix aggregator window alignment. - [#5637](https://github.com/influxdata/telegraf/issues/5637): Fix panic during shutdown of multiple aggregators. +- [#5642](https://github.com/influxdata/telegraf/issues/5642): Fix parsing of kube config certificate-authority-data in prometheus input. ## v1.10.1 [2019-03-19] From c283e5992a3c4a83a6e4b07b6938f2a5254acf3f Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 2 Apr 2019 11:14:56 -0700 Subject: [PATCH 0734/1815] Fix tags applied to wrong metric on parse error (#5650) --- plugins/parsers/influx/handler.go | 4 ++++ plugins/parsers/influx/parser.go | 2 ++ 2 files changed, 6 insertions(+) diff --git a/plugins/parsers/influx/handler.go b/plugins/parsers/influx/handler.go index c488a9c98..928671cc9 100644 --- a/plugins/parsers/influx/handler.go +++ b/plugins/parsers/influx/handler.go @@ -118,3 +118,7 @@ func (h *MetricHandler) SetTimestamp(tm []byte) error { h.builder.SetTime(time.Unix(0, ns)) return nil } + +func (h *MetricHandler) Reset() { + h.builder.Reset() +} diff --git a/plugins/parsers/influx/parser.go b/plugins/parsers/influx/parser.go index 8b9272b69..f1cd9a032 100644 --- a/plugins/parsers/influx/parser.go +++ b/plugins/parsers/influx/parser.go @@ -75,6 +75,7 @@ func (p *Parser) Parse(input []byte) ([]telegraf.Metric, error) { } if err != nil { + p.handler.Reset() return nil, &ParseError{ Offset: p.machine.Position(), LineOffset: p.machine.LineOffset(), @@ -87,6 +88,7 @@ func (p *Parser) Parse(input []byte) ([]telegraf.Metric, error) { metric, err := p.handler.Metric() if err != nil { + p.handler.Reset() return nil, err } From fb01b8ba281641637b98c0c12fdaaa28374ef1ff Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 2 Apr 2019 11:17:52 -0700 Subject: [PATCH 0735/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 19b2b0132..75fd4885c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -35,6 +35,7 @@ - [#5595](https://github.com/influxdata/telegraf/issues/5595): Fix aggregator window alignment. - [#5637](https://github.com/influxdata/telegraf/issues/5637): Fix panic during shutdown of multiple aggregators. - [#5642](https://github.com/influxdata/telegraf/issues/5642): Fix parsing of kube config certificate-authority-data in prometheus input. +- [#5636](https://github.com/influxdata/telegraf/issues/5636): Fix tags applied to wrong metric on parse error. ## v1.10.1 [2019-03-19] From ff81811720fa493c9ff9e8c17b73df2640e23c66 Mon Sep 17 00:00:00 2001 From: Pierre Tessier Date: Tue, 2 Apr 2019 14:47:25 -0400 Subject: [PATCH 0736/1815] Add option to use strict sanitization rules to wavefront output (#5664) --- plugins/outputs/wavefront/README.md | 4 ++ plugins/outputs/wavefront/wavefront.go | 17 ++++++++ plugins/outputs/wavefront/wavefront_test.go | 47 +++++++++++++++++++++ 3 files changed, 68 insertions(+) diff --git a/plugins/outputs/wavefront/README.md b/plugins/outputs/wavefront/README.md index bc2156b13..71a760900 100644 --- a/plugins/outputs/wavefront/README.md +++ b/plugins/outputs/wavefront/README.md @@ -32,6 +32,10 @@ This plugin writes to a [Wavefront](https://www.wavefront.com) proxy, in Wavefro ## When true will convert all _ (underscore) characters in final metric name. default is true #convert_paths = true + ## Use Strict rules to sanitize metric and tag names from invalid characters + ## When enabled forward slash (/) and comma (,) will be accpeted + #use_strict = false + ## Use Regex to sanitize metric and tag names from invalid characters ## Regex is more thorough, but significantly slower. default is false #use_regex = false diff --git a/plugins/outputs/wavefront/wavefront.go b/plugins/outputs/wavefront/wavefront.go index 257c5512e..65666d627 100644 --- a/plugins/outputs/wavefront/wavefront.go +++ b/plugins/outputs/wavefront/wavefront.go @@ -22,6 +22,7 @@ type Wavefront struct { ConvertPaths bool ConvertBool bool UseRegex bool + UseStrict bool SourceOverride []string StringToNumber map[string][]map[string]float64 @@ -37,6 +38,14 @@ var sanitizedChars = strings.NewReplacer( "=", "-", ) +// catch many of the invalid chars that could appear in a metric or tag name +var strictSanitizedChars = strings.NewReplacer( + "!", "-", "@", "-", "#", "-", "$", "-", "%", "-", "^", "-", "&", "-", + "*", "-", "(", "-", ")", "-", "+", "-", "`", "-", "'", "-", "\"", "-", + "[", "-", "]", "-", "{", "-", "}", "-", ":", "-", ";", "-", "<", "-", + ">", "-", "?", "-", "\\", "-", "|", "-", " ", "-", "=", "-", +) + // instead of Replacer which may miss some special characters we can use a regex pattern, but this is significantly slower than Replacer var sanitizedRegex = regexp.MustCompile("[^a-zA-Z\\d_.-]") @@ -71,6 +80,10 @@ var sampleConfig = ` ## When true will convert all _ (underscore) characters in final metric name. default is true #convert_paths = true + ## Use Strict rules to sanitize metric and tag names from invalid characters + ## When enabled forward slash (/) and comma (,) will be accpeted + #use_strict = false + ## Use Regex to sanitize metric and tag names from invalid characters ## Regex is more thorough, but significantly slower. default is false #use_regex = false @@ -163,6 +176,8 @@ func buildMetrics(m telegraf.Metric, w *Wavefront) []*MetricPoint { if w.UseRegex { name = sanitizedRegex.ReplaceAllLiteralString(name, "-") + } else if w.UseStrict { + name = strictSanitizedChars.Replace(name) } else { name = sanitizedChars.Replace(name) } @@ -238,6 +253,8 @@ func buildTags(mTags map[string]string, w *Wavefront) (string, map[string]string var key string if w.UseRegex { key = sanitizedRegex.ReplaceAllLiteralString(k, "-") + } else if w.UseStrict { + key = strictSanitizedChars.Replace(k) } else { key = sanitizedChars.Replace(k) } diff --git a/plugins/outputs/wavefront/wavefront_test.go b/plugins/outputs/wavefront/wavefront_test.go index 1fda6c7ae..776c3698f 100644 --- a/plugins/outputs/wavefront/wavefront_test.go +++ b/plugins/outputs/wavefront/wavefront_test.go @@ -50,6 +50,13 @@ func TestBuildMetrics(t *testing.T) { {Metric: w.Prefix + "testing.metric2", Value: 1, Timestamp: timestamp, Tags: map[string]string{"tag1": "value1"}}, }, }, + { + testutil.TestMetric(float64(1), "testing_just/another,metric:float", "metric2"), + []MetricPoint{ + {Metric: w.Prefix + "testing.just-another-metric-float", Value: 1, Timestamp: timestamp, Tags: map[string]string{"tag1": "value1"}}, + {Metric: w.Prefix + "testing.metric2", Value: 1, Timestamp: timestamp, Tags: map[string]string{"tag1": "value1"}}, + }, + }, { testMetric1, []MetricPoint{{Metric: w.Prefix + "test.simple.metric", Value: 123, Timestamp: timestamp, Source: "testHost", Tags: map[string]string{"tag1": "value1"}}}, @@ -67,6 +74,46 @@ func TestBuildMetrics(t *testing.T) { } +func TestBuildMetricsStrict(t *testing.T) { + w := defaultWavefront() + w.Prefix = "testthis." + w.UseStrict = true + + pathReplacer = strings.NewReplacer("_", w.MetricSeparator) + + var timestamp int64 = 1257894000 + + var metricTests = []struct { + metric telegraf.Metric + metricPoints []MetricPoint + }{ + { + testutil.TestMetric(float64(1), "testing_just*a%metric:float", "metric2"), + []MetricPoint{ + {Metric: w.Prefix + "testing.just-a-metric-float", Value: 1, Timestamp: timestamp, Tags: map[string]string{"tag1": "value1"}}, + {Metric: w.Prefix + "testing.metric2", Value: 1, Timestamp: timestamp, Tags: map[string]string{"tag1": "value1"}}, + }, + }, + { + testutil.TestMetric(float64(1), "testing_just/another,metric:float", "metric2"), + []MetricPoint{ + {Metric: w.Prefix + "testing.just/another,metric-float", Value: 1, Timestamp: timestamp, Tags: map[string]string{"tag/1": "value1", "tag,2": "value2"}}, + {Metric: w.Prefix + "testing.metric2", Value: 1, Timestamp: timestamp, Tags: map[string]string{"tag/1": "value1", "tag,2": "value2"}}, + }, + }, + } + + for _, mt := range metricTests { + ml := buildMetrics(mt.metric, w) + for i, line := range ml { + if mt.metricPoints[i].Metric != line.Metric || mt.metricPoints[i].Value != line.Value { + t.Errorf("\nexpected\t%+v %+v\nreceived\t%+v %+v\n", mt.metricPoints[i].Metric, mt.metricPoints[i].Value, line.Metric, line.Value) + } + } + } + +} + func TestBuildMetricsWithSimpleFields(t *testing.T) { w := defaultWavefront() w.Prefix = "testthis." From b092aec103e7bd61a7ca19be5e227b8290163f83 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 2 Apr 2019 11:49:00 -0700 Subject: [PATCH 0737/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 75fd4885c..86f378f43 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ - [#5601](https://github.com/influxdata/telegraf/pull/5601): Add support for multiple line text and perfdata to nagios parser. - [#5648](https://github.com/influxdata/telegraf/pull/5648): Allow env vars ${} expansion syntax in configuration file. - [#5641](https://github.com/influxdata/telegraf/pull/5641): Add option to reset buckets on flush to histogram aggregator. +- [#5664](https://github.com/influxdata/telegraf/pull/5664): Add option to use strict sanitization rules to wavefront output. #### Bugfixes From 346ac519b6169bc241b15320aaabd879e9201925 Mon Sep 17 00:00:00 2001 From: David McKay Date: Tue, 2 Apr 2019 19:51:40 +0100 Subject: [PATCH 0738/1815] Fix CrateDB port in docker compose file (#5667) --- docker-compose.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker-compose.yml b/docker-compose.yml index ca5fa3836..a5991434b 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -90,7 +90,7 @@ services: ports: - "4200:4200" - "4230:4230" - - "5432:5432" + - "6543:5432" command: - crate - -Cnetwork.host=0.0.0.0 From 42cc84c2621305f9a53e92cd8bc52c50c3ac3f8c Mon Sep 17 00:00:00 2001 From: David McKay Date: Tue, 2 Apr 2019 19:53:11 +0100 Subject: [PATCH 0739/1815] Fix command for running integration test containers (#5660) --- CONTRIBUTING.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 0015cd5eb..badf71c12 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -51,7 +51,7 @@ make test Running the integration tests requires several docker containers to be running. You can start the containers with: ``` -make docker-run +docker-compose up ``` And run the full test suite with: From d738892c0ba3f3e72504c2850f5e7e6a4fd9178d Mon Sep 17 00:00:00 2001 From: Greg <2653109+glinton@users.noreply.github.com> Date: Tue, 2 Apr 2019 13:42:48 -0600 Subject: [PATCH 0740/1815] Remove tags that would create invalid label names in prometheus output (#5663) --- .../prometheus_client/prometheus_client.go | 33 ++++++++++++++++--- 1 file changed, 29 insertions(+), 4 deletions(-) diff --git a/plugins/outputs/prometheus_client/prometheus_client.go b/plugins/outputs/prometheus_client/prometheus_client.go index db7b0c207..da051daf9 100644 --- a/plugins/outputs/prometheus_client/prometheus_client.go +++ b/plugins/outputs/prometheus_client/prometheus_client.go @@ -24,7 +24,10 @@ import ( "github.com/prometheus/client_golang/prometheus/promhttp" ) -var invalidNameCharRE = regexp.MustCompile(`[^a-zA-Z0-9_]`) +var ( + invalidNameCharRE = regexp.MustCompile(`[^a-zA-Z0-9_]`) + validNameCharRE = regexp.MustCompile(`^[a-zA-Z_][a-zA-Z0-9_]*`) +) // SampleID uniquely identifies a Sample type SampleID string @@ -343,6 +346,10 @@ func sanitize(value string) string { return invalidNameCharRE.ReplaceAllString(value, "_") } +func isValidTagName(tag string) bool { + return validNameCharRE.MatchString(tag) +} + func getPromValueType(tt telegraf.ValueType) prometheus.ValueType { switch tt { case telegraf.Counter: @@ -414,7 +421,11 @@ func (p *PrometheusClient) Write(metrics []telegraf.Metric) error { labels := make(map[string]string) for k, v := range tags { - labels[sanitize(k)] = v + tName := sanitize(k) + if !isValidTagName(tName) { + continue + } + labels[tName] = v } // Prometheus doesn't have a string value type, so convert string @@ -423,7 +434,11 @@ func (p *PrometheusClient) Write(metrics []telegraf.Metric) error { for fn, fv := range point.Fields() { switch fv := fv.(type) { case string: - labels[sanitize(fn)] = fv + tName := sanitize(fn) + if !isValidTagName(tName) { + continue + } + labels[tName] = fv } } } @@ -469,6 +484,10 @@ func (p *PrometheusClient) Write(metrics []telegraf.Metric) error { } mname = sanitize(point.Name()) + if !isValidTagName(mname) { + continue + } + p.addMetricFamily(point, sample, mname, sampleID) case telegraf.Histogram: @@ -511,6 +530,10 @@ func (p *PrometheusClient) Write(metrics []telegraf.Metric) error { } mname = sanitize(point.Name()) + if !isValidTagName(mname) { + continue + } + p.addMetricFamily(point, sample, mname, sampleID) default: @@ -555,7 +578,9 @@ func (p *PrometheusClient) Write(metrics []telegraf.Metric) error { mname = sanitize(fmt.Sprintf("%s_%s", point.Name(), fn)) } } - + if !isValidTagName(mname) { + continue + } p.addMetricFamily(point, sample, mname, sampleID) } From eba13426fc45157bb2b562d3d87b86b1ef5f4578 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 2 Apr 2019 12:43:37 -0700 Subject: [PATCH 0741/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 86f378f43..22e928482 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -37,6 +37,7 @@ - [#5637](https://github.com/influxdata/telegraf/issues/5637): Fix panic during shutdown of multiple aggregators. - [#5642](https://github.com/influxdata/telegraf/issues/5642): Fix parsing of kube config certificate-authority-data in prometheus input. - [#5636](https://github.com/influxdata/telegraf/issues/5636): Fix tags applied to wrong metric on parse error. +- [#5522](https://github.com/influxdata/telegraf/issues/5522): Remove tags that would create invalid label names in prometheus output. ## v1.10.1 [2019-03-19] From 286eeb117e248b23433875aa4b95b45ebfd52a6b Mon Sep 17 00:00:00 2001 From: Greg <2653109+glinton@users.noreply.github.com> Date: Tue, 2 Apr 2019 14:06:44 -0600 Subject: [PATCH 0742/1815] Remove unused config option from logfmt parser docs (#5669) --- plugins/parsers/logfmt/README.md | 4 ---- 1 file changed, 4 deletions(-) diff --git a/plugins/parsers/logfmt/README.md b/plugins/parsers/logfmt/README.md index fb3a565b3..d3e8ab66f 100644 --- a/plugins/parsers/logfmt/README.md +++ b/plugins/parsers/logfmt/README.md @@ -15,10 +15,6 @@ The `logfmt` data format parses data in [logfmt] format. ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "logfmt" - - ## Set the name of the created metric, if unset the name of the plugin will - ## be used. - metric_name = "logfmt" ``` ### Metrics From 1d965f11d3ef47ee0a297890b93da9ceba4e3ee4 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 2 Apr 2019 13:08:26 -0700 Subject: [PATCH 0743/1815] Set release date for 1.10.2 --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 22e928482..42cd7fae5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,7 +13,7 @@ - [#5631](https://github.com/influxdata/telegraf/pull/5631): Create Windows service only when specified or in service manager. -## v1.10.2 [unreleased] +## v1.10.2 [2019-04-02] #### Release Notes From 0e50a3977d1d9d7dfbb55efb622add88d7e15aa0 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 2 Apr 2019 14:14:37 -0700 Subject: [PATCH 0744/1815] Fix typo --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 42cd7fae5..2ca1f04a5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,7 +17,7 @@ #### Release Notes -- String fields no longer have leading and trailing quotating marks removed in +- String fields no longer have leading and trailing quotation marks removed in the grok parser. If you are capturing quoted strings you may need to update the patterns. From 0a68c8468b1cc285316979c9c24fcb0368af0400 Mon Sep 17 00:00:00 2001 From: David McKay Date: Wed, 3 Apr 2019 00:06:15 +0100 Subject: [PATCH 0745/1815] Add github input plugin (#5587) --- Gopkg.lock | 17 +++ Gopkg.toml | 4 + plugins/inputs/all/all.go | 1 + plugins/inputs/github/README.md | 47 +++++++ plugins/inputs/github/github.go | 184 +++++++++++++++++++++++++++ plugins/inputs/github/github_test.go | 119 +++++++++++++++++ 6 files changed, 372 insertions(+) create mode 100644 plugins/inputs/github/README.md create mode 100644 plugins/inputs/github/github.go create mode 100644 plugins/inputs/github/github_test.go diff --git a/Gopkg.lock b/Gopkg.lock index 67654d523..1521eb2cd 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -527,6 +527,22 @@ revision = "3af367b6b30c263d47e8895973edcca9a49cf029" version = "v0.2.0" +[[projects]] + digest = "1:e38ad2825940d58bd8425be40bcd4211099d0c1988c158c35828197413b3cf85" + name = "github.com/google/go-github" + packages = ["github"] + pruneopts = "" + revision = "7462feb2032c2da9e3b85e9b04e6853a6e9e14ca" + version = "v24.0.1" + +[[projects]] + digest = "1:cea4aa2038169ee558bf507d5ea02c94ca85bcca28a4c7bb99fd59b31e43a686" + name = "github.com/google/go-querystring" + packages = ["query"] + pruneopts = "" + revision = "44c6ddd0a2342c386950e880b658017258da92fc" + version = "v1.0.0" + [[projects]] digest = "1:c1d7e883c50a26ea34019320d8ae40fad86c9e5d56e63a1ba2cb618cef43e986" name = "github.com/google/uuid" @@ -1565,6 +1581,7 @@ "github.com/golang/protobuf/ptypes/empty", "github.com/golang/protobuf/ptypes/timestamp", "github.com/google/go-cmp/cmp", + "github.com/google/go-github/github", "github.com/gorilla/mux", "github.com/harlow/kinesis-consumer", "github.com/harlow/kinesis-consumer/checkpoint/ddb", diff --git a/Gopkg.toml b/Gopkg.toml index bfc854450..057af5e3b 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -284,3 +284,7 @@ [[override]] name = "golang.org/x/text" source = "https://github.com/golang/text.git" + +[[constraint]] + name = "github.com/google/go-github" + version = "24.0.1" diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index 765505c3e..5f1ba4759 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -38,6 +38,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/filecount" _ "github.com/influxdata/telegraf/plugins/inputs/filestat" _ "github.com/influxdata/telegraf/plugins/inputs/fluentd" + _ "github.com/influxdata/telegraf/plugins/inputs/github" _ "github.com/influxdata/telegraf/plugins/inputs/graylog" _ "github.com/influxdata/telegraf/plugins/inputs/haproxy" _ "github.com/influxdata/telegraf/plugins/inputs/hddtemp" diff --git a/plugins/inputs/github/README.md b/plugins/inputs/github/README.md new file mode 100644 index 000000000..dc5a161cd --- /dev/null +++ b/plugins/inputs/github/README.md @@ -0,0 +1,47 @@ +# GitHub Input Plugin + +The [GitHub](https://www.github.com) input plugin gathers statistics from GitHub repositories. + +### Configuration: + +```toml +[[inputs.github]] + ## List of repositories to monitor + ## ex: repositories = ["influxdata/telegraf"] + # repositories = [] + + ## Optional: Unauthenticated requests are limited to 60 per hour. + # access_token = "" + + ## Optional: Default 5s. + # http_timeout = "5s" +``` + +### Metrics: + +- github_repository + - tags: + - `name` - The repository name + - `owner` - The owner of the repository + - `language` - The primary language of the repository + - `license` - The license set for the repository + - fields: + - `stars` (int) + - `forks` (int) + - `open_issues` (int) + - `size` (int) + +* github_rate_limit + - tags: + - `access_token` - An obfusticated reference to the configured access token or "Unauthenticated" + - fields: + - `limit` - How many requests you are limited to (per hour) + - `remaining` - How many requests you have remaining (per hour) + - `blocks` - How many requests have been blocked due to rate limit + +### Example Output: + +``` +github,full_name=influxdata/telegraf,name=telegraf,owner=influxdata,language=Go,license=MIT\ License stars=6401i,forks=2421i,open_issues=722i,size=22611i 1552651811000000000 +internal_github,access_token=Unauthenticated rate_limit_remaining=59i,rate_limit_limit=60i,rate_limit_blocks=0i 1552653551000000000 +``` diff --git a/plugins/inputs/github/github.go b/plugins/inputs/github/github.go new file mode 100644 index 000000000..cf709e69a --- /dev/null +++ b/plugins/inputs/github/github.go @@ -0,0 +1,184 @@ +package github + +import ( + "context" + "fmt" + "net/http" + "strings" + "sync" + "time" + + "github.com/google/go-github/github" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/selfstat" + "golang.org/x/oauth2" +) + +// GitHub - plugin main structure +type GitHub struct { + Repositories []string `toml:"repositories"` + AccessToken string `toml:"access_token"` + HTTPTimeout internal.Duration `toml:"http_timeout"` + githubClient *github.Client + + obfusticatedToken string + + RateLimit selfstat.Stat + RateLimitErrors selfstat.Stat + RateRemaining selfstat.Stat +} + +const sampleConfig = ` + ## List of repositories to monitor + ## ex: repositories = ["influxdata/telegraf"] + # repositories = [] + + ## Optional: Unauthenticated requests are limited to 60 per hour. + # access_token = "" + + ## Optional: Default 5s. + # http_timeout = "5s" +` + +// SampleConfig returns sample configuration for this plugin. +func (g *GitHub) SampleConfig() string { + return sampleConfig +} + +// Description returns the plugin description. +func (g *GitHub) Description() string { + return "Read repository information from GitHub, including forks, stars, and more." +} + +// Create GitHub Client +func (g *GitHub) createGitHubClient(ctx context.Context) (*github.Client, error) { + httpClient := &http.Client{ + Transport: &http.Transport{ + Proxy: http.ProxyFromEnvironment, + }, + Timeout: g.HTTPTimeout.Duration, + } + + g.obfusticatedToken = "Unauthenticated" + + if g.AccessToken != "" { + tokenSource := oauth2.StaticTokenSource( + &oauth2.Token{AccessToken: g.AccessToken}, + ) + oauthClient := oauth2.NewClient(ctx, tokenSource) + ctx = context.WithValue(ctx, oauth2.HTTPClient, oauthClient) + + g.obfusticatedToken = g.AccessToken[0:4] + "..." + g.AccessToken[len(g.AccessToken)-3:] + + return github.NewClient(oauthClient), nil + } + + return github.NewClient(httpClient), nil +} + +// Gather GitHub Metrics +func (g *GitHub) Gather(acc telegraf.Accumulator) error { + ctx := context.Background() + + if g.githubClient == nil { + githubClient, err := g.createGitHubClient(ctx) + + if err != nil { + return err + } + + g.githubClient = githubClient + + tokenTags := map[string]string{ + "access_token": g.obfusticatedToken, + } + + g.RateLimitErrors = selfstat.Register("github", "rate_limit_blocks", tokenTags) + g.RateLimit = selfstat.Register("github", "rate_limit_limit", tokenTags) + g.RateRemaining = selfstat.Register("github", "rate_limit_remaining", tokenTags) + } + + var wg sync.WaitGroup + wg.Add(len(g.Repositories)) + + for _, repository := range g.Repositories { + go func(repositoryName string, acc telegraf.Accumulator) { + defer wg.Done() + + owner, repository, err := splitRepositoryName(repositoryName) + if err != nil { + acc.AddError(err) + return + } + + repositoryInfo, response, err := g.githubClient.Repositories.Get(ctx, owner, repository) + + if _, ok := err.(*github.RateLimitError); ok { + g.RateLimitErrors.Incr(1) + } + + if err != nil { + acc.AddError(err) + return + } + + g.RateLimit.Set(int64(response.Rate.Limit)) + g.RateRemaining.Set(int64(response.Rate.Remaining)) + + now := time.Now() + tags := getTags(repositoryInfo) + fields := getFields(repositoryInfo) + + acc.AddFields("github_repository", fields, tags, now) + }(repository, acc) + } + + wg.Wait() + return nil +} + +func splitRepositoryName(repositoryName string) (string, string, error) { + splits := strings.SplitN(repositoryName, "/", 2) + + if len(splits) != 2 { + return "", "", fmt.Errorf("%v is not of format 'owner/repository'", repositoryName) + } + + return splits[0], splits[1], nil +} + +func getLicense(repositoryInfo *github.Repository) string { + if repositoryInfo.GetLicense() != nil { + return *repositoryInfo.License.Name + } + + return "None" +} + +func getTags(repositoryInfo *github.Repository) map[string]string { + return map[string]string{ + "owner": *repositoryInfo.Owner.Login, + "name": *repositoryInfo.Name, + "language": *repositoryInfo.Language, + "license": getLicense(repositoryInfo), + } +} + +func getFields(repositoryInfo *github.Repository) map[string]interface{} { + return map[string]interface{}{ + "stars": *repositoryInfo.StargazersCount, + "forks": *repositoryInfo.ForksCount, + "open_issues": *repositoryInfo.OpenIssuesCount, + "size": *repositoryInfo.Size, + } +} + +func init() { + inputs.Add("github", func() telegraf.Input { + return &GitHub{ + HTTPTimeout: internal.Duration{Duration: time.Second * 5}, + } + }) +} diff --git a/plugins/inputs/github/github_test.go b/plugins/inputs/github/github_test.go new file mode 100644 index 000000000..0ebae3a67 --- /dev/null +++ b/plugins/inputs/github/github_test.go @@ -0,0 +1,119 @@ +package github + +import ( + "reflect" + "testing" + + gh "github.com/google/go-github/github" + "github.com/stretchr/testify/require" +) + +func TestSplitRepositoryNameWithWorkingExample(t *testing.T) { + var validRepositoryNames = []struct { + fullName string + owner string + repository string + }{ + {"influxdata/telegraf", "influxdata", "telegraf"}, + {"influxdata/influxdb", "influxdata", "influxdb"}, + {"rawkode/saltstack-dotfiles", "rawkode", "saltstack-dotfiles"}, + } + + for _, tt := range validRepositoryNames { + t.Run(tt.fullName, func(t *testing.T) { + owner, repository, _ := splitRepositoryName(tt.fullName) + + require.Equal(t, tt.owner, owner) + require.Equal(t, tt.repository, repository) + }) + } +} + +func TestSplitRepositoryNameWithNoSlash(t *testing.T) { + var invalidRepositoryNames = []string{ + "influxdata-influxdb", + } + + for _, tt := range invalidRepositoryNames { + t.Run(tt, func(t *testing.T) { + _, _, err := splitRepositoryName(tt) + + require.NotNil(t, err) + }) + } +} + +func TestGetLicenseWhenExists(t *testing.T) { + licenseName := "MIT" + license := gh.License{Name: &licenseName} + repository := gh.Repository{License: &license} + + getLicenseReturn := getLicense(&repository) + + require.Equal(t, "MIT", getLicenseReturn) +} + +func TestGetLicenseWhenMissing(t *testing.T) { + repository := gh.Repository{} + + getLicenseReturn := getLicense(&repository) + + require.Equal(t, "None", getLicenseReturn) +} + +func TestGetTags(t *testing.T) { + licenseName := "MIT" + license := gh.License{Name: &licenseName} + + ownerName := "influxdata" + owner := gh.User{Login: &ownerName} + + fullName := "influxdata/influxdb" + repositoryName := "influxdb" + + language := "Go" + + repository := gh.Repository{ + FullName: &fullName, + Name: &repositoryName, + License: &license, + Owner: &owner, + Language: &language, + } + + getTagsReturn := getTags(&repository) + + correctTagsReturn := map[string]string{ + "owner": ownerName, + "name": repositoryName, + "language": language, + "license": licenseName, + } + + require.Equal(t, true, reflect.DeepEqual(getTagsReturn, correctTagsReturn)) +} + +func TestGetFields(t *testing.T) { + stars := 1 + forks := 2 + openIssues := 3 + size := 4 + + repository := gh.Repository{ + StargazersCount: &stars, + ForksCount: &forks, + OpenIssuesCount: &openIssues, + Size: &size, + } + + getFieldsReturn := getFields(&repository) + + correctFieldReturn := make(map[string]interface{}) + + correctFieldReturn["stars"] = 1 + correctFieldReturn["forks"] = 2 + correctFieldReturn["open_issues"] = 3 + correctFieldReturn["size"] = 4 + + require.Equal(t, true, reflect.DeepEqual(getFieldsReturn, correctFieldReturn)) +} From 1bcbc3eea73d9426c7129303a007e0c0f23f6440 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 2 Apr 2019 16:27:45 -0700 Subject: [PATCH 0746/1815] Update docs and changelog for github input --- CHANGELOG.md | 4 +++ README.md | 1 + docs/LICENSE_OF_DEPENDENCIES.md | 2 ++ plugins/inputs/github/README.md | 50 +++++++++++++++++++-------------- plugins/inputs/github/github.go | 11 ++++---- 5 files changed, 41 insertions(+), 27 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2ca1f04a5..065680958 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,9 @@ ## v1.11 [unreleased] +#### New Inputs + +- [github](/plugins/inputs/github/README.md) - Contributed by @influxdata + #### Features - [#5556](https://github.com/influxdata/telegraf/pull/5556): Add TTL field to ping input. diff --git a/README.md b/README.md index 8d08c6dfd..5d85ae5d6 100644 --- a/README.md +++ b/README.md @@ -168,6 +168,7 @@ For documentation on the latest development code see the [documentation index][d * [filestat](./plugins/inputs/filestat) * [filecount](./plugins/inputs/filecount) * [fluentd](./plugins/inputs/fluentd) +* [github](./plugins/inputs/github) * [graylog](./plugins/inputs/graylog) * [haproxy](./plugins/inputs/haproxy) * [hddtemp](./plugins/inputs/hddtemp) diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index 485b758a4..5b6faf4c9 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -43,6 +43,8 @@ following works: - github.com/golang/protobuf [BSD 3-Clause "New" or "Revised" License](https://github.com/golang/protobuf/blob/master/LICENSE) - github.com/golang/snappy [BSD 3-Clause "New" or "Revised" License](https://github.com/golang/snappy/blob/master/LICENSE) - github.com/google/go-cmp [BSD 3-Clause "New" or "Revised" License](https://github.com/google/go-cmp/blob/master/LICENSE) +- github.com/google/go-github [BSD 3-Clause "New" or "Revised" License](https://github.com/google/go-github/blob/master/LICENSE) +- github.com/google/go-querystring [BSD 3-Clause "New" or "Revised" License](https://github.com/google/go-querystring/blob/master/LICENSE) - github.com/google/uuid [BSD 3-Clause "New" or "Revised" License](https://github.com/google/uuid/blob/master/LICENSE) - github.com/googleapis/gax-go [BSD 3-Clause "New" or "Revised" License](https://github.com/googleapis/gax-go/blob/master/LICENSE) - github.com/gorilla/context [BSD 3-Clause "New" or "Revised" License](https://github.com/gorilla/context/blob/master/LICENSE) diff --git a/plugins/inputs/github/README.md b/plugins/inputs/github/README.md index dc5a161cd..524d1d0e7 100644 --- a/plugins/inputs/github/README.md +++ b/plugins/inputs/github/README.md @@ -1,47 +1,55 @@ # GitHub Input Plugin -The [GitHub](https://www.github.com) input plugin gathers statistics from GitHub repositories. +Gather repository information from [GitHub][] hosted repositories. -### Configuration: +**Note:** Telegraf also contains the [webhook][] input which can be used as an +alternative method for collecting repository information. + +### Configuration ```toml [[inputs.github]] ## List of repositories to monitor - ## ex: repositories = ["influxdata/telegraf"] - # repositories = [] + repositories = ["influxdata/telegraf"] - ## Optional: Unauthenticated requests are limited to 60 per hour. + ## Github API access token. Unauthenticated requests are limited to 60 per hour. # access_token = "" - ## Optional: Default 5s. + ## Timeout for HTTP requests. # http_timeout = "5s" ``` -### Metrics: +### Metrics - github_repository - tags: - - `name` - The repository name - - `owner` - The owner of the repository - - `language` - The primary language of the repository - - `license` - The license set for the repository + - name - The repository name + - owner - The owner of the repository + - language - The primary language of the repository + - license - The license set for the repository - fields: - - `stars` (int) - - `forks` (int) - - `open_issues` (int) - - `size` (int) + - stars (int) + - forks (int) + - open_issues (int) + - size (int) -* github_rate_limit +When the [internal][] input is enabled: + ++ internal_github - tags: - - `access_token` - An obfusticated reference to the configured access token or "Unauthenticated" + - access_token - An obfusticated reference to the configured access token or "Unauthenticated" - fields: - - `limit` - How many requests you are limited to (per hour) - - `remaining` - How many requests you have remaining (per hour) - - `blocks` - How many requests have been blocked due to rate limit + - limit - How many requests you are limited to (per hour) + - remaining - How many requests you have remaining (per hour) + - blocks - How many requests have been blocked due to rate limit -### Example Output: +### Example Output ``` github,full_name=influxdata/telegraf,name=telegraf,owner=influxdata,language=Go,license=MIT\ License stars=6401i,forks=2421i,open_issues=722i,size=22611i 1552651811000000000 internal_github,access_token=Unauthenticated rate_limit_remaining=59i,rate_limit_limit=60i,rate_limit_blocks=0i 1552653551000000000 ``` + +[GitHub]: https://www.github.com +[internal]: /plugins/inputs/internal +[webhook]: /plugins/inputs/webhooks/github diff --git a/plugins/inputs/github/github.go b/plugins/inputs/github/github.go index cf709e69a..ff497e55b 100644 --- a/plugins/inputs/github/github.go +++ b/plugins/inputs/github/github.go @@ -31,14 +31,13 @@ type GitHub struct { } const sampleConfig = ` - ## List of repositories to monitor - ## ex: repositories = ["influxdata/telegraf"] - # repositories = [] + ## List of repositories to monitor. + repositories = ["influxdata/telegraf"] - ## Optional: Unauthenticated requests are limited to 60 per hour. + ## Github API access token. Unauthenticated requests are limited to 60 per hour. # access_token = "" - ## Optional: Default 5s. + ## Timeout for HTTP requests. # http_timeout = "5s" ` @@ -49,7 +48,7 @@ func (g *GitHub) SampleConfig() string { // Description returns the plugin description. func (g *GitHub) Description() string { - return "Read repository information from GitHub, including forks, stars, and more." + return "Gather repository information from GitHub hosted repositories." } // Create GitHub Client From a61cb4dca54ff9501211d97c994a3efac6ef597e Mon Sep 17 00:00:00 2001 From: Daniel Fenert Date: Thu, 4 Apr 2019 00:59:47 +0200 Subject: [PATCH 0747/1815] Add bind input plugin (#5653) --- README.md | 1 + plugins/inputs/all/all.go | 1 + plugins/inputs/bind/README.md | 118 +++ plugins/inputs/bind/bind.go | 87 ++ plugins/inputs/bind/bind_test.go | 581 ++++++++++++ plugins/inputs/bind/json_stats.go | 166 ++++ plugins/inputs/bind/testdata/json/v1/mem | 133 +++ plugins/inputs/bind/testdata/json/v1/net | 241 +++++ plugins/inputs/bind/testdata/json/v1/server | 141 +++ plugins/inputs/bind/testdata/xml/v2 | 926 ++++++++++++++++++++ plugins/inputs/bind/testdata/xml/v3/mem | 142 +++ plugins/inputs/bind/testdata/xml/v3/net | 156 ++++ plugins/inputs/bind/testdata/xml/v3/server | 328 +++++++ plugins/inputs/bind/xml_stats_v2.go | 168 ++++ plugins/inputs/bind/xml_stats_v3.go | 161 ++++ 15 files changed, 3350 insertions(+) create mode 100644 plugins/inputs/bind/README.md create mode 100644 plugins/inputs/bind/bind.go create mode 100644 plugins/inputs/bind/bind_test.go create mode 100644 plugins/inputs/bind/json_stats.go create mode 100644 plugins/inputs/bind/testdata/json/v1/mem create mode 100644 plugins/inputs/bind/testdata/json/v1/net create mode 100644 plugins/inputs/bind/testdata/json/v1/server create mode 100644 plugins/inputs/bind/testdata/xml/v2 create mode 100644 plugins/inputs/bind/testdata/xml/v3/mem create mode 100644 plugins/inputs/bind/testdata/xml/v3/net create mode 100644 plugins/inputs/bind/testdata/xml/v3/server create mode 100644 plugins/inputs/bind/xml_stats_v2.go create mode 100644 plugins/inputs/bind/xml_stats_v3.go diff --git a/README.md b/README.md index 5d85ae5d6..de54c706a 100644 --- a/README.md +++ b/README.md @@ -139,6 +139,7 @@ For documentation on the latest development code see the [documentation index][d * [aws cloudwatch](./plugins/inputs/cloudwatch) * [bcache](./plugins/inputs/bcache) * [beanstalkd](./plugins/inputs/beanstalkd) +* [bind](./plugins/inputs/bind) * [bond](./plugins/inputs/bond) * [burrow](./plugins/inputs/burrow) * [cassandra](./plugins/inputs/cassandra) (deprecated, use [jolokia2](./plugins/inputs/jolokia2)) diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index 5f1ba4759..7c592e925 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -8,6 +8,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/aurora" _ "github.com/influxdata/telegraf/plugins/inputs/bcache" _ "github.com/influxdata/telegraf/plugins/inputs/beanstalkd" + _ "github.com/influxdata/telegraf/plugins/inputs/bind" _ "github.com/influxdata/telegraf/plugins/inputs/bond" _ "github.com/influxdata/telegraf/plugins/inputs/burrow" _ "github.com/influxdata/telegraf/plugins/inputs/cassandra" diff --git a/plugins/inputs/bind/README.md b/plugins/inputs/bind/README.md new file mode 100644 index 000000000..34d419d3a --- /dev/null +++ b/plugins/inputs/bind/README.md @@ -0,0 +1,118 @@ +# BIND 9 Nameserver Statistics Input Plugin + +This plugin decodes the JSON or XML statistics provided by BIND 9 nameservers. + +### XML Statistics Channel + +Version 2 statistics (BIND 9.6 - 9.9) and version 3 statistics (BIND 9.9+) are supported. Note that +for BIND 9.9 to support version 3 statistics, it must be built with the `--enable-newstats` compile +flag, and it must be specifically requested via the correct URL. Version 3 statistics are the +default (and only) XML format in BIND 9.10+. + +### JSON Statistics Channel + +JSON statistics schema version 1 (BIND 9.10+) is supported. As of writing, some distros still do +not enable support for JSON statistics in their BIND packages. + +### Configuration: + +- **urls** []string: List of BIND statistics channel URLs to collect from. Do not include a + trailing slash in the URL. Default is "http://localhost:8053/xml/v3". +- **gather_memory_contexts** bool: Report per-context memory statistics. +- **gather_views** bool: Report per-view query statistics. + +The following table summarizes the URL formats which should be used, depending on your BIND +version and configured statistics channel. + +| BIND Version | Statistics Format | Example URL | +| ------------ | ----------------- | ----------------------------- | +| 9.6 - 9.8 | XML v2 | http://localhost:8053 | +| 9.9 | XML v2 | http://localhost:8053/xml/v2 | +| 9.9+ | XML v3 | http://localhost:8053/xml/v3 | +| 9.10+ | JSON v1 | http://localhost:8053/json/v1 | + +#### Configuration of BIND Daemon + +Add the following to your named.conf if running Telegraf on the same host as the BIND daemon: +``` +statistics-channels { + inet 127.0.0.1 port 8053; +}; +``` + +Alternatively, specify a wildcard address (e.g., 0.0.0.0) or specific IP address of an interface to +configure the BIND daemon to listen on that address. Note that you should secure the statistics +channel with an ACL if it is publicly reachable. Consult the BIND Administrator Reference Manual +for more information. + +### Measurements & Fields: + +- bind_counter + - name=value (multiple) +- bind_memory + - total_use + - in_use + - block_size + - context_size + - lost +- bind_memory_context + - total + - in_use + +### Tags: + +- All measurements + - url + - source + - port +- bind_counter + - type + - view (optional) +- bind_memory_context + - id + - name + +### Sample Queries: + +These are some useful queries (to generate dashboards or other) to run against data from this +plugin: + +``` +SELECT non_negative_derivative(mean(/^A$|^PTR$/), 5m) FROM bind_counter \ +WHERE "url" = 'localhost:8053' AND "type" = 'qtype' AND time > now() - 1h \ +GROUP BY time(5m), "type" +``` + +``` +name: bind_counter +tags: type=qtype +time non_negative_derivative_A non_negative_derivative_PTR +---- ------------------------- --------------------------- +1553862000000000000 254.99444444430992 1388.311111111194 +1553862300000000000 354 2135.716666666791 +1553862600000000000 316.8666666666977 2130.133333333768 +1553862900000000000 309.05000000004657 2126.75 +1553863200000000000 315.64999999990687 2128.483333332464 +1553863500000000000 308.9166666667443 2132.350000000559 +1553863800000000000 302.64999999990687 2131.1833333335817 +1553864100000000000 310.85000000009313 2132.449999999255 +1553864400000000000 314.3666666666977 2136.216666666791 +1553864700000000000 303.2333333331626 2133.8166666673496 +1553865000000000000 304.93333333334886 2127.333333333023 +1553865300000000000 317.93333333334886 2130.3166666664183 +1553865600000000000 280.6666666667443 1807.9071428570896 +``` + +### Example Output + +Here is example output of this plugin: + +``` +bind_memory,host=LAP,port=8053,source=localhost,url=localhost:8053 block_size=12058624i,context_size=4575056i,in_use=4113717i,lost=0i,total_use=16663252i 1554276619000000000 +bind_counter,host=LAP,port=8053,source=localhost,type=opcode,url=localhost:8053 IQUERY=0i,NOTIFY=0i,QUERY=9i,STATUS=0i,UPDATE=0i 1554276619000000000 +bind_counter,host=LAP,port=8053,source=localhost,type=rcode,url=localhost:8053 17=0i,18=0i,19=0i,20=0i,21=0i,22=0i,BADCOOKIE=0i,BADVERS=0i,FORMERR=0i,NOERROR=7i,NOTAUTH=0i,NOTIMP=0i,NOTZONE=0i,NXDOMAIN=0i,NXRRSET=0i,REFUSED=0i,RESERVED11=0i,RESERVED12=0i,RESERVED13=0i,RESERVED14=0i,RESERVED15=0i,SERVFAIL=2i,YXDOMAIN=0i,YXRRSET=0i 1554276619000000000 +bind_counter,host=LAP,port=8053,source=localhost,type=qtype,url=localhost:8053 A=1i,ANY=1i,NS=1i,PTR=5i,SOA=1i 1554276619000000000 +bind_counter,host=LAP,port=8053,source=localhost,type=nsstat,url=localhost:8053 AuthQryRej=0i,CookieBadSize=0i,CookieBadTime=0i,CookieIn=9i,CookieMatch=0i,CookieNew=9i,CookieNoMatch=0i,DNS64=0i,ECSOpt=0i,ExpireOpt=0i,KeyTagOpt=0i,NSIDOpt=0i,OtherOpt=0i,QryAuthAns=7i,QryBADCOOKIE=0i,QryDropped=0i,QryDuplicate=0i,QryFORMERR=0i,QryFailure=0i,QryNXDOMAIN=0i,QryNXRedir=0i,QryNXRedirRLookup=0i,QryNoauthAns=0i,QryNxrrset=1i,QryRecursion=2i,QryReferral=0i,QrySERVFAIL=2i,QrySuccess=6i,QryTCP=1i,QryUDP=8i,RPZRewrites=0i,RateDropped=0i,RateSlipped=0i,RecQryRej=0i,RecursClients=0i,ReqBadEDNSVer=0i,ReqBadSIG=0i,ReqEdns0=9i,ReqSIG0=0i,ReqTCP=1i,ReqTSIG=0i,Requestv4=9i,Requestv6=0i,RespEDNS0=9i,RespSIG0=0i,RespTSIG=0i,Response=9i,TruncatedResp=0i,UpdateBadPrereq=0i,UpdateDone=0i,UpdateFail=0i,UpdateFwdFail=0i,UpdateRej=0i,UpdateReqFwd=0i,UpdateRespFwd=0i,XfrRej=0i,XfrReqDone=0i 1554276619000000000 +bind_counter,host=LAP,port=8053,source=localhost,type=zonestat,url=localhost:8053 AXFRReqv4=0i,AXFRReqv6=0i,IXFRReqv4=0i,IXFRReqv6=0i,NotifyInv4=0i,NotifyInv6=0i,NotifyOutv4=0i,NotifyOutv6=0i,NotifyRej=0i,SOAOutv4=0i,SOAOutv6=0i,XfrFail=0i,XfrSuccess=0i 1554276619000000000 +bind_counter,host=LAP,port=8053,source=localhost,type=sockstat,url=localhost:8053 FDWatchClose=0i,FDwatchConn=0i,FDwatchConnFail=0i,FDwatchRecvErr=0i,FDwatchSendErr=0i,FdwatchBindFail=0i,RawActive=1i,RawClose=0i,RawOpen=1i,RawOpenFail=0i,RawRecvErr=0i,TCP4Accept=6i,TCP4AcceptFail=0i,TCP4Active=9i,TCP4BindFail=0i,TCP4Close=5i,TCP4Conn=0i,TCP4ConnFail=0i,TCP4Open=8i,TCP4OpenFail=0i,TCP4RecvErr=0i,TCP4SendErr=0i,TCP6Accept=0i,TCP6AcceptFail=0i,TCP6Active=2i,TCP6BindFail=0i,TCP6Close=0i,TCP6Conn=0i,TCP6ConnFail=0i,TCP6Open=2i,TCP6OpenFail=0i,TCP6RecvErr=0i,TCP6SendErr=0i,UDP4Active=18i,UDP4BindFail=14i,UDP4Close=14i,UDP4Conn=0i,UDP4ConnFail=0i,UDP4Open=32i,UDP4OpenFail=0i,UDP4RecvErr=0i,UDP4SendErr=0i,UDP6Active=3i,UDP6BindFail=0i,UDP6Close=6i,UDP6Conn=0i,UDP6ConnFail=6i,UDP6Open=9i,UDP6OpenFail=0i,UDP6RecvErr=0i,UDP6SendErr=0i,UnixAccept=0i,UnixAcceptFail=0i,UnixActive=0i,UnixBindFail=0i,UnixClose=0i,UnixConn=0i,UnixConnFail=0i,UnixOpen=0i,UnixOpenFail=0i,UnixRecvErr=0i,UnixSendErr=0i 1554276619000000000 +``` diff --git a/plugins/inputs/bind/bind.go b/plugins/inputs/bind/bind.go new file mode 100644 index 000000000..967c9031a --- /dev/null +++ b/plugins/inputs/bind/bind.go @@ -0,0 +1,87 @@ +package bind + +import ( + "fmt" + "net/http" + "net/url" + "sync" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" +) + +type Bind struct { + Urls []string + GatherMemoryContexts bool + GatherViews bool +} + +var sampleConfig = ` + ## An array of BIND XML statistics URI to gather stats. + ## Default is "http://localhost:8053/xml/v3". + # urls = ["http://localhost:8053/xml/v3"] + # gather_memory_contexts = false + # gather_views = false +` + +var client = &http.Client{ + Timeout: time.Duration(4 * time.Second), +} + +func (b *Bind) Description() string { + return "Read BIND nameserver XML statistics" +} + +func (b *Bind) SampleConfig() string { + return sampleConfig +} + +func (b *Bind) Gather(acc telegraf.Accumulator) error { + var wg sync.WaitGroup + + if len(b.Urls) == 0 { + b.Urls = []string{"http://localhost:8053/xml/v3"} + } + + for _, u := range b.Urls { + addr, err := url.Parse(u) + if err != nil { + acc.AddError(fmt.Errorf("Unable to parse address '%s': %s", u, err)) + continue + } + + wg.Add(1) + go func(addr *url.URL) { + defer wg.Done() + acc.AddError(b.gatherUrl(addr, acc)) + }(addr) + } + + wg.Wait() + return nil +} + +func (b *Bind) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error { + switch addr.Path { + case "": + // BIND 9.6 - 9.8 + return b.readStatsXMLv2(addr, acc) + case "/json/v1": + // BIND 9.10+ + return b.readStatsJSON(addr, acc) + case "/xml/v2": + // BIND 9.9 + return b.readStatsXMLv2(addr, acc) + case "/xml/v3": + // BIND 9.9+ + return b.readStatsXMLv3(addr, acc) + default: + return fmt.Errorf("URL %s is ambiguous. Please check plugin documentation for supported URL formats.", + addr) + } +} + +func init() { + inputs.Add("bind", func() telegraf.Input { return &Bind{} }) +} diff --git a/plugins/inputs/bind/bind_test.go b/plugins/inputs/bind/bind_test.go new file mode 100644 index 000000000..b961d549d --- /dev/null +++ b/plugins/inputs/bind/bind_test.go @@ -0,0 +1,581 @@ +package bind + +import ( + "net" + "net/http" + "net/http/httptest" + "testing" + + "github.com/influxdata/telegraf/testutil" + + "github.com/stretchr/testify/assert" +) + +func TestBindJsonStats(t *testing.T) { + ts := httptest.NewServer(http.FileServer(http.Dir("testdata"))) + url := ts.Listener.Addr().String() + host, port, _ := net.SplitHostPort(url) + defer ts.Close() + + b := Bind{ + Urls: []string{ts.URL + "/json/v1"}, + GatherMemoryContexts: true, + GatherViews: true, + } + + var acc testutil.Accumulator + err := acc.GatherError(b.Gather) + + assert.Nil(t, err) + + // Use subtests for counters, since they are similar structure + type fieldSet struct { + fieldKey string + fieldValue int64 + } + + testCases := []struct { + counterType string + values []fieldSet + }{ + { + "opcode", + []fieldSet{ + {"NOTIFY", 0}, + {"UPDATE", 0}, + {"IQUERY", 0}, + {"QUERY", 13}, + {"STATUS", 0}, + }, + }, + { + "qtype", + []fieldSet{ + {"A", 2}, + {"AAAA", 2}, + {"PTR", 7}, + {"SRV", 2}, + }, + }, + { + "nsstat", + []fieldSet{ + {"QrySuccess", 6}, + {"QryRecursion", 12}, + {"Requestv4", 13}, + {"QryNXDOMAIN", 4}, + {"QryAuthAns", 1}, + {"QryNxrrset", 1}, + {"QryNoauthAns", 10}, + {"QryUDP", 13}, + {"QryDuplicate", 1}, + {"QrySERVFAIL", 1}, + {"Response", 12}, + }, + }, + { + "sockstat", + []fieldSet{ + {"TCP4Open", 118}, + {"UDP6Close", 112}, + {"UDP4Close", 333}, + {"TCP4Close", 119}, + {"TCP6Active", 2}, + {"UDP4Active", 2}, + {"UDP4RecvErr", 1}, + {"UDP4Open", 335}, + {"TCP4Active", 10}, + {"RawActive", 1}, + {"UDP6ConnFail", 112}, + {"TCP4Conn", 114}, + {"UDP6Active", 1}, + {"UDP6Open", 113}, + {"UDP4Conn", 333}, + {"UDP6SendErr", 112}, + {"RawOpen", 1}, + {"TCP4Accept", 6}, + {"TCP6Open", 2}, + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.counterType, func(t *testing.T) { + tags := map[string]string{ + "url": url, + "type": tc.counterType, + "source": host, + "port": port, + } + + fields := map[string]interface{}{} + + for _, val := range tc.values { + fields[val.fieldKey] = val.fieldValue + } + + acc.AssertContainsTaggedFields(t, "bind_counter", fields, tags) + }) + } + + // Subtest for memory stats + t.Run("memory", func(t *testing.T) { + tags := map[string]string{ + "url": url, + "source": host, + "port": port, + } + + fields := map[string]interface{}{ + "block_size": 13893632, + "context_size": 3685480, + "in_use": 3064368, + "lost": 0, + "total_use": 18206566, + } + + acc.AssertContainsTaggedFields(t, "bind_memory", fields, tags) + }) + + // Subtest for per-context memory stats + t.Run("memory_context", func(t *testing.T) { + assert.True(t, acc.HasIntField("bind_memory_context", "total")) + assert.True(t, acc.HasIntField("bind_memory_context", "in_use")) + }) +} + +func TestBindXmlStatsV2(t *testing.T) { + ts := httptest.NewServer(http.FileServer(http.Dir("testdata"))) + url := ts.Listener.Addr().String() + host, port, _ := net.SplitHostPort(url) + defer ts.Close() + + b := Bind{ + Urls: []string{ts.URL + "/xml/v2"}, + GatherMemoryContexts: true, + GatherViews: true, + } + + var acc testutil.Accumulator + err := acc.GatherError(b.Gather) + + assert.Nil(t, err) + + // Use subtests for counters, since they are similar structure + type fieldSet struct { + fieldKey string + fieldValue int64 + } + + testCases := []struct { + counterType string + values []fieldSet + }{ + { + "opcode", + []fieldSet{ + {"UPDATE", 238}, + {"QUERY", 102312374}, + }, + }, + { + "qtype", + []fieldSet{ + {"ANY", 7}, + {"DNSKEY", 452}, + {"SSHFP", 2987}, + {"SOA", 100415}, + {"AAAA", 37786321}, + {"MX", 441155}, + {"IXFR", 157}, + {"CNAME", 531}, + {"NS", 1999}, + {"TXT", 34628}, + {"A", 58951432}, + {"SRV", 741082}, + {"PTR", 4211487}, + {"NAPTR", 39137}, + {"DS", 584}, + }, + }, + { + "nsstat", + []fieldSet{ + {"XfrReqDone", 157}, + {"ReqEdns0", 441758}, + {"ReqTSIG", 0}, + {"UpdateRespFwd", 0}, + {"RespEDNS0", 441748}, + {"QryDropped", 16}, + {"RPZRewrites", 0}, + {"XfrRej", 0}, + {"RecQryRej", 0}, + {"QryNxrrset", 24423133}, + {"QryFORMERR", 0}, + {"ReqTCP", 1548156}, + {"UpdateDone", 0}, + {"QrySERVFAIL", 14422}, + {"QryRecursion", 2104239}, + {"Requestv4", 102312611}, + {"UpdateFwdFail", 0}, + {"QryReferral", 3}, + {"Response", 102301560}, + {"RespTSIG", 0}, + {"QrySuccess", 63811668}, + {"QryFailure", 0}, + {"RespSIG0", 0}, + {"ReqSIG0", 0}, + {"UpdateRej", 238}, + {"QryAuthAns", 72180718}, + {"UpdateFail", 0}, + {"QryDuplicate", 10879}, + {"RateDropped", 0}, + {"QryNoauthAns", 30106182}, + {"QryNXDOMAIN", 14052096}, + {"ReqBadSIG", 0}, + {"UpdateReqFwd", 0}, + {"RateSlipped", 0}, + {"TruncatedResp", 3787}, + {"Requestv6", 1}, + {"UpdateBadPrereq", 0}, + {"AuthQryRej", 0}, + {"ReqBadEDNSVer", 0}, + }, + }, + { + "sockstat", + []fieldSet{ + {"FdwatchBindFail", 0}, + {"UDP6Open", 238269}, + {"UDP6SendErr", 238250}, + {"TCP4ConnFail", 0}, + {"TCP4Conn", 590}, + {"TCP6AcceptFail", 0}, + {"UDP4SendErr", 0}, + {"FDwatchConn", 0}, + {"TCP4RecvErr", 1}, + {"TCP4OpenFail", 0}, + {"UDP4OpenFail", 0}, + {"UDP6OpenFail", 0}, + {"TCP4Close", 1548268}, + {"TCP6BindFail", 0}, + {"TCP4AcceptFail", 0}, + {"UnixConn", 0}, + {"UDP4Open", 3765532}, + {"TCP6Close", 0}, + {"FDwatchRecvErr", 0}, + {"UDP4Conn", 3764828}, + {"UnixConnFail", 0}, + {"TCP6Conn", 0}, + {"TCP6OpenFail", 0}, + {"TCP6SendErr", 0}, + {"TCP6RecvErr", 0}, + {"FDwatchSendErr", 0}, + {"UDP4RecvErr", 1650}, + {"UDP4ConnFail", 0}, + {"UDP6Close", 238267}, + {"FDWatchClose", 0}, + {"TCP4Accept", 1547672}, + {"UnixAccept", 0}, + {"TCP4Open", 602}, + {"UDP4BindFail", 219}, + {"UDP6ConnFail", 238250}, + {"UnixClose", 0}, + {"TCP4BindFail", 0}, + {"UnixOpenFail", 0}, + {"UDP6BindFail", 16}, + {"UnixOpen", 0}, + {"UnixAcceptFail", 0}, + {"UnixRecvErr", 0}, + {"UDP6RecvErr", 0}, + {"TCP6ConnFail", 0}, + {"FDwatchConnFail", 0}, + {"TCP4SendErr", 0}, + {"UDP4Close", 3765528}, + {"UnixSendErr", 0}, + {"TCP6Open", 2}, + {"UDP6Conn", 1}, + {"TCP6Accept", 0}, + {"UnixBindFail", 0}, + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.counterType, func(t *testing.T) { + tags := map[string]string{ + "url": url, + "type": tc.counterType, + "source": host, + "port": port, + } + + fields := map[string]interface{}{} + + for _, val := range tc.values { + fields[val.fieldKey] = val.fieldValue + } + + acc.AssertContainsTaggedFields(t, "bind_counter", fields, tags) + }) + } + + // Subtest for memory stats + t.Run("memory", func(t *testing.T) { + tags := map[string]string{ + "url": url, + "source": host, + "port": port, + } + + fields := map[string]interface{}{ + "block_size": 77070336, + "context_size": 6663840, + "in_use": 20772579, + "lost": 0, + "total_use": 81804609, + } + + acc.AssertContainsTaggedFields(t, "bind_memory", fields, tags) + }) + + // Subtest for per-context memory stats + t.Run("memory_context", func(t *testing.T) { + assert.True(t, acc.HasIntField("bind_memory_context", "total")) + assert.True(t, acc.HasIntField("bind_memory_context", "in_use")) + }) +} + +func TestBindXmlStatsV3(t *testing.T) { + ts := httptest.NewServer(http.FileServer(http.Dir("testdata"))) + url := ts.Listener.Addr().String() + host, port, _ := net.SplitHostPort(url) + defer ts.Close() + + b := Bind{ + Urls: []string{ts.URL + "/xml/v3"}, + GatherMemoryContexts: true, + GatherViews: true, + } + + var acc testutil.Accumulator + err := acc.GatherError(b.Gather) + + assert.Nil(t, err) + + // Use subtests for counters, since they are similar structure + type fieldSet struct { + fieldKey string + fieldValue int64 + } + + testCases := []struct { + counterType string + values []fieldSet + }{ + { + "opcode", + []fieldSet{ + {"NOTIFY", 0}, + {"UPDATE", 0}, + {"IQUERY", 0}, + {"QUERY", 74941}, + {"STATUS", 0}, + }, + }, + { + "qtype", + []fieldSet{ + {"ANY", 22}, + {"SOA", 18}, + {"AAAA", 5735}, + {"MX", 618}, + {"NS", 373}, + {"TXT", 970}, + {"A", 63672}, + {"SRV", 139}, + {"PTR", 3393}, + {"RRSIG", 1}, + }, + }, + { + "nsstat", + []fieldSet{ + {"DNS64", 0}, + {"ExpireOpt", 0}, + {"NSIDOpt", 0}, + {"OtherOpt", 59}, + {"XfrReqDone", 0}, + {"ReqEdns0", 9250}, + {"ReqTSIG", 0}, + {"UpdateRespFwd", 0}, + {"RespEDNS0", 9250}, + {"QryDropped", 11}, + {"RPZRewrites", 0}, + {"XfrRej", 0}, + {"RecQryRej", 35}, + {"QryNxrrset", 2452}, + {"QryFORMERR", 0}, + {"ReqTCP", 260}, + {"QryTCP", 258}, + {"QryUDP", 74648}, + {"UpdateDone", 0}, + {"QrySERVFAIL", 122}, + {"QryRecursion", 53750}, + {"RecursClients", 0}, + {"Requestv4", 74942}, + {"UpdateFwdFail", 0}, + {"QryReferral", 0}, + {"Response", 63264}, + {"RespTSIG", 0}, + {"QrySuccess", 49044}, + {"QryFailure", 35}, + {"RespSIG0", 0}, + {"ReqSIG0", 0}, + {"UpdateRej", 0}, + {"QryAuthAns", 2752}, + {"UpdateFail", 0}, + {"QryDuplicate", 11667}, + {"RateDropped", 0}, + {"QryNoauthAns", 60354}, + {"QryNXDOMAIN", 11610}, + {"ReqBadSIG", 0}, + {"UpdateReqFwd", 0}, + {"RateSlipped", 0}, + {"TruncatedResp", 365}, + {"Requestv6", 0}, + {"UpdateBadPrereq", 0}, + {"AuthQryRej", 0}, + {"ReqBadEDNSVer", 0}, + {"SitBadSize", 0}, + {"SitBadTime", 0}, + {"SitMatch", 0}, + {"SitNew", 0}, + {"SitNoMatch", 0}, + {"SitOpt", 0}, + {"TruncatedResp", 365}, + }, + }, + { + "sockstat", + []fieldSet{ + {"FDwatchConnFail", 0}, + {"UnixClose", 0}, + {"TCP6OpenFail", 0}, + {"TCP6Active", 0}, + {"UDP4RecvErr", 14}, + {"TCP6Conn", 0}, + {"FDWatchClose", 0}, + {"TCP4ConnFail", 0}, + {"UnixConn", 0}, + {"UnixSendErr", 0}, + {"UDP6Close", 0}, + {"UnixOpen", 0}, + {"UDP4Conn", 92535}, + {"TCP4Close", 336}, + {"UnixAcceptFail", 0}, + {"UnixAccept", 0}, + {"TCP6AcceptFail", 0}, + {"UDP6Open", 0}, + {"UDP6BindFail", 0}, + {"UDP6RecvErr", 0}, + {"RawOpenFail", 0}, + {"TCP4Accept", 293}, + {"UDP6SendErr", 0}, + {"UDP6Conn", 0}, + {"TCP4SendErr", 0}, + {"UDP4BindFail", 1}, + {"UDP4Active", 4}, + {"TCP4Active", 297}, + {"UnixConnFail", 0}, + {"UnixOpenFail", 0}, + {"UDP6ConnFail", 0}, + {"TCP6Accept", 0}, + {"UnixRecvErr", 0}, + {"RawActive", 1}, + {"UDP6OpenFail", 0}, + {"RawClose", 0}, + {"UnixBindFail", 0}, + {"UnixActive", 0}, + {"FdwatchBindFail", 0}, + {"UDP4SendErr", 0}, + {"RawRecvErr", 0}, + {"TCP6Close", 0}, + {"FDwatchRecvErr", 0}, + {"TCP4BindFail", 0}, + {"TCP4AcceptFail", 0}, + {"TCP4OpenFail", 0}, + {"UDP4Open", 92542}, + {"UDP4ConnFail", 0}, + {"TCP4Conn", 44}, + {"TCP6ConnFail", 0}, + {"FDwatchConn", 0}, + {"UDP6Active", 0}, + {"RawOpen", 1}, + {"TCP6BindFail", 0}, + {"UDP4Close", 92538}, + {"TCP6Open", 0}, + {"TCP6SendErr", 0}, + {"TCP4Open", 48}, + {"FDwatchSendErr", 0}, + {"TCP6RecvErr", 0}, + {"UDP4OpenFail", 0}, + {"TCP4RecvErr", 0}, + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.counterType, func(t *testing.T) { + tags := map[string]string{ + "url": url, + "type": tc.counterType, + "source": host, + "port": port, + } + + fields := map[string]interface{}{} + + for _, val := range tc.values { + fields[val.fieldKey] = val.fieldValue + } + + acc.AssertContainsTaggedFields(t, "bind_counter", fields, tags) + }) + } + + // Subtest for memory stats + t.Run("memory", func(t *testing.T) { + tags := map[string]string{ + "url": url, + "source": host, + "port": port, + } + + fields := map[string]interface{}{ + "block_size": 45875200, + "context_size": 10037400, + "in_use": 6000232, + "lost": 0, + "total_use": 777821909, + } + + acc.AssertContainsTaggedFields(t, "bind_memory", fields, tags) + }) + + // Subtest for per-context memory stats + t.Run("memory_context", func(t *testing.T) { + assert.True(t, acc.HasIntField("bind_memory_context", "total")) + assert.True(t, acc.HasIntField("bind_memory_context", "in_use")) + }) +} + +func TestBindUnparseableURL(t *testing.T) { + b := Bind{ + Urls: []string{"://example.com"}, + } + + var acc testutil.Accumulator + err := acc.GatherError(b.Gather) + assert.Contains(t, err.Error(), "Unable to parse address") +} diff --git a/plugins/inputs/bind/json_stats.go b/plugins/inputs/bind/json_stats.go new file mode 100644 index 000000000..95c7e6fe8 --- /dev/null +++ b/plugins/inputs/bind/json_stats.go @@ -0,0 +1,166 @@ +package bind + +import ( + "encoding/json" + "fmt" + "net" + "net/http" + "net/url" + "strings" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" +) + +type jsonStats struct { + OpCodes map[string]int + QTypes map[string]int + NSStats map[string]int + SockStats map[string]int + Views map[string]jsonView + Memory jsonMemory +} + +type jsonMemory struct { + TotalUse int + InUse int + BlockSize int + ContextSize int + Lost int + Contexts []struct { + Id string + Name string + Total int + InUse int + } +} + +type jsonView struct { + Resolver map[string]map[string]int +} + +// addJSONCounter adds a counter array to a Telegraf Accumulator, with the specified tags. +func addJSONCounter(acc telegraf.Accumulator, commonTags map[string]string, stats map[string]int) { + grouper := metric.NewSeriesGrouper() + ts := time.Now() + for name, value := range stats { + if commonTags["type"] == "opcode" && strings.HasPrefix(name, "RESERVED") { + continue + } + + tags := make(map[string]string) + + // Create local copy of tags since maps are reference types + for k, v := range commonTags { + tags[k] = v + } + + grouper.Add("bind_counter", tags, ts, name, value) + } + + //Add grouped metrics + for _, metric := range grouper.Metrics() { + acc.AddMetric(metric) + } +} + +// addStatsJson walks a jsonStats struct and adds the values to the telegraf.Accumulator. +func (b *Bind) addStatsJSON(stats jsonStats, acc telegraf.Accumulator, urlTag string) { + grouper := metric.NewSeriesGrouper() + ts := time.Now() + tags := map[string]string{"url": urlTag} + host, port, _ := net.SplitHostPort(urlTag) + tags["source"] = host + tags["port"] = port + + // Opcodes + tags["type"] = "opcode" + addJSONCounter(acc, tags, stats.OpCodes) + + // Query RDATA types + tags["type"] = "qtype" + addJSONCounter(acc, tags, stats.QTypes) + + // Nameserver stats + tags["type"] = "nsstat" + addJSONCounter(acc, tags, stats.NSStats) + + // Socket statistics + tags["type"] = "sockstat" + addJSONCounter(acc, tags, stats.SockStats) + + // Memory stats + fields := map[string]interface{}{ + "total_use": stats.Memory.TotalUse, + "in_use": stats.Memory.InUse, + "block_size": stats.Memory.BlockSize, + "context_size": stats.Memory.ContextSize, + "lost": stats.Memory.Lost, + } + acc.AddGauge("bind_memory", fields, map[string]string{"url": urlTag, "source": host, "port": port}) + + // Detailed, per-context memory stats + if b.GatherMemoryContexts { + for _, c := range stats.Memory.Contexts { + tags := map[string]string{"url": urlTag, "id": c.Id, "name": c.Name, "source": host, "port": port} + fields := map[string]interface{}{"total": c.Total, "in_use": c.InUse} + + acc.AddGauge("bind_memory_context", fields, tags) + } + } + + // Detailed, per-view stats + if b.GatherViews { + for vName, view := range stats.Views { + for cntrType, counters := range view.Resolver { + for cntrName, value := range counters { + tags := map[string]string{ + "url": urlTag, + "source": host, + "port": port, + "view": vName, + "type": cntrType, + } + + grouper.Add("bind_counter", tags, ts, cntrName, value) + } + } + } + } + + //Add grouped metrics + for _, metric := range grouper.Metrics() { + acc.AddMetric(metric) + } +} + +// readStatsJSON takes a base URL to probe, and requests the individual statistics blobs that we +// are interested in. These individual blobs have a combined size which is significantly smaller +// than if we requested everything at once (e.g. taskmgr and socketmgr can be omitted). +func (b *Bind) readStatsJSON(addr *url.URL, acc telegraf.Accumulator) error { + var stats jsonStats + + // Progressively build up full jsonStats struct by parsing the individual HTTP responses + for _, suffix := range [...]string{"/server", "/net", "/mem"} { + scrapeUrl := addr.String() + suffix + + resp, err := client.Get(scrapeUrl) + if err != nil { + return err + } + + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("%s returned HTTP status: %s", scrapeUrl, resp.Status) + } + + if err := json.NewDecoder(resp.Body).Decode(&stats); err != nil { + return fmt.Errorf("Unable to decode JSON blob: %s", err) + } + } + + b.addStatsJSON(stats, acc, addr.Host) + return nil +} diff --git a/plugins/inputs/bind/testdata/json/v1/mem b/plugins/inputs/bind/testdata/json/v1/mem new file mode 100644 index 000000000..8872344e1 --- /dev/null +++ b/plugins/inputs/bind/testdata/json/v1/mem @@ -0,0 +1,133 @@ +{ + "json-stats-version":"1.2", + "boot-time":"2017-07-28T13:24:53Z", + "config-time":"2017-07-28T13:24:53Z", + "current-time":"2017-07-28T15:33:07Z", + "memory":{ + "TotalUse":18206566, + "InUse":3064368, + "BlockSize":13893632, + "ContextSize":3685480, + "Lost":0, + "contexts":[ + { + "id":"0x55fb2e042de0", + "name":"main", + "references":202, + "total":2693003, + "inuse":1454904, + "maxinuse":1508072, + "blocksize":786432, + "pools":40, + "hiwater":0, + "lowater":0 + }, + { + "id":"0x55fb2e0507e0", + "name":"dst", + "references":1, + "total":387478, + "inuse":91776, + "maxinuse":97208, + "pools":0, + "hiwater":0, + "lowater":0 + }, + { + "id":"0x55fb2e0938e0", + "name":"zonemgr-pool", + "references":113, + "total":742986, + "inuse":143776, + "maxinuse":313961, + "blocksize":262144, + "pools":0, + "hiwater":0, + "lowater":0 + }, + { + "id":"0x7f19d00017d0", + "name":"threadkey", + "references":1, + "total":0, + "inuse":0, + "maxinuse":0, + "pools":0, + "hiwater":0, + "lowater":0 + }, + { + "id":"0x7f19d00475f0", + "name":"client", + "references":3, + "total":267800, + "inuse":8760, + "maxinuse":8760, + "blocksize":262144, + "pools":2, + "hiwater":0, + "lowater":0 + }, + { + "id":"0x7f19d00dfca0", + "name":"cache", + "references":8, + "total":288938, + "inuse":83650, + "maxinuse":83842, + "blocksize":262144, + "pools":0, + "hiwater":0, + "lowater":0 + }, + { + "id":"0x7f19d00eaa30", + "name":"cache_heap", + "references":18, + "total":393216, + "inuse":132096, + "maxinuse":132096, + "blocksize":262144, + "pools":0, + "hiwater":0, + "lowater":0 + }, + { + "id":"0x7f19d01094e0", + "name":"res0", + "references":1, + "total":262144, + "inuse":0, + "maxinuse":22048, + "blocksize":262144, + "pools":0, + "hiwater":0, + "lowater":0 + }, + { + "id":"0x7f19d0114270", + "name":"res1", + "references":1, + "total":0, + "inuse":0, + "maxinuse":0, + "blocksize":0, + "pools":0, + "hiwater":0, + "lowater":0 + }, + { + "id":"0x7f19d011f000", + "name":"res2", + "references":1, + "total":0, + "inuse":0, + "maxinuse":0, + "blocksize":0, + "pools":0, + "hiwater":0, + "lowater":0 + } + ] + } +} \ No newline at end of file diff --git a/plugins/inputs/bind/testdata/json/v1/net b/plugins/inputs/bind/testdata/json/v1/net new file mode 100644 index 000000000..0bbd41429 --- /dev/null +++ b/plugins/inputs/bind/testdata/json/v1/net @@ -0,0 +1,241 @@ +{ + "json-stats-version":"1.2", + "boot-time":"2017-07-28T13:24:53Z", + "config-time":"2017-07-28T13:24:53Z", + "current-time":"2017-07-28T15:33:07Z", + "sockstats":{ + "UDP4Open":335, + "UDP6Open":113, + "TCP4Open":118, + "TCP6Open":2, + "RawOpen":1, + "UDP4Close":333, + "UDP6Close":112, + "TCP4Close":119, + "UDP6ConnFail":112, + "UDP4Conn":333, + "TCP4Conn":114, + "TCP4Accept":6, + "UDP6SendErr":112, + "UDP4RecvErr":1, + "UDP4Active":2, + "UDP6Active":1, + "TCP4Active":10, + "TCP6Active":2, + "RawActive":1 + }, + "socketmgr":{ + "sockets":[ + { + "id":"0x7f19dd849010", + "references":1, + "type":"not-initialized", + "local-address":"", + "states":[ + "bound" + ] + }, + { + "id":"0x7f19dd849268", + "references":1, + "type":"tcp", + "local-address":"0.0.0.0#8053", + "states":[ + "listener", + "bound" + ] + }, + { + "id":"0x7f19dd849718", + "references":2, + "type":"udp", + "local-address":"::#53", + "states":[ + "bound" + ] + }, + { + "id":"0x7f19dd849970", + "references":2, + "type":"tcp", + "local-address":"::#53", + "states":[ + "listener", + "bound" + ] + }, + { + "id":"0x7f19dd849bc8", + "references":2, + "type":"udp", + "local-address":"127.0.0.1#53", + "states":[ + "bound" + ] + }, + { + "id":"0x7f19dd6f4010", + "references":2, + "type":"tcp", + "local-address":"127.0.0.1#53", + "states":[ + "listener", + "bound" + ] + }, + { + "id":"0x7f19dd6f4718", + "references":1, + "type":"tcp", + "local-address":"127.0.0.1#953", + "states":[ + "listener", + "bound" + ] + }, + { + "id":"0x7f19dd6f4bc8", + "references":1, + "type":"tcp", + "local-address":"::1#953", + "states":[ + "listener", + "bound" + ] + }, + { + "id":"0x7f19d4fb7970", + "references":1, + "type":"udp", + "states":[ + ] + }, + { + "id":"0x7f19d4fb7bc8", + "references":1, + "type":"udp", + "states":[ + ] + }, + { + "id":"0x7f19d4fc7010", + "references":1, + "type":"udp", + "states":[ + ] + }, + { + "id":"0x7f19d4fc74c0", + "references":1, + "type":"udp", + "states":[ + ] + }, + { + "id":"0x7f19d4fc7718", + "references":1, + "type":"udp", + "states":[ + ] + }, + { + "id":"0x7f19d4fc7bc8", + "references":1, + "type":"udp", + "states":[ + ] + }, + { + "id":"0x7f19d4fd1010", + "references":1, + "type":"udp", + "states":[ + ] + }, + { + "id":"0x7f19d4fd1268", + "references":1, + "type":"udp", + "states":[ + ] + }, + { + "id":"0x7f19d4fd14c0", + "references":1, + "type":"udp", + "states":[ + ] + }, + { + "id":"0x7f19d4fd1718", + "references":1, + "type":"udp", + "states":[ + ] + }, + { + "id":"0x7f19d4fd1970", + "references":1, + "type":"udp", + "states":[ + ] + }, + { + "id":"0x7f19d4fd1bc8", + "references":1, + "type":"udp", + "states":[ + ] + }, + { + "id":"0x7f19d4fd9010", + "references":1, + "type":"udp", + "states":[ + ] + }, + { + "id":"0x7f19d4fda4c0", + "references":1, + "type":"udp", + "states":[ + ] + }, + { + "id":"0x7f19d4fd9bc8", + "references":1, + "type":"udp", + "states":[ + ] + }, + { + "id":"0x7f19d4fda268", + "references":1, + "type":"udp", + "states":[ + ] + }, + { + "id":"0x7f19d4fd9970", + "references":1, + "type":"udp", + "states":[ + ] + }, + { + "id":"0x7f19d4fda010", + "references":1, + "type":"udp", + "states":[ + ] + }, + { + "id":"0x7f19d4fd9718", + "references":1, + "type":"udp", + "states":[ + ] + } + ] + } +} \ No newline at end of file diff --git a/plugins/inputs/bind/testdata/json/v1/server b/plugins/inputs/bind/testdata/json/v1/server new file mode 100644 index 000000000..53acd9067 --- /dev/null +++ b/plugins/inputs/bind/testdata/json/v1/server @@ -0,0 +1,141 @@ +{ + "json-stats-version":"1.2", + "boot-time":"2017-07-28T13:24:53Z", + "config-time":"2017-07-28T13:24:53Z", + "current-time":"2017-07-28T15:33:07Z", + "opcodes":{ + "QUERY":13, + "IQUERY":0, + "STATUS":0, + "RESERVED3":0, + "NOTIFY":0, + "UPDATE":0, + "RESERVED6":0, + "RESERVED7":0, + "RESERVED8":0, + "RESERVED9":0, + "RESERVED10":0, + "RESERVED11":0, + "RESERVED12":0, + "RESERVED13":0, + "RESERVED14":0, + "RESERVED15":0 + }, + "qtypes":{ + "A":2, + "PTR":7, + "AAAA":2, + "SRV":2 + }, + "nsstats":{ + "Requestv4":13, + "Response":12, + "QrySuccess":6, + "QryAuthAns":1, + "QryNoauthAns":10, + "QryNxrrset":1, + "QrySERVFAIL":1, + "QryNXDOMAIN":4, + "QryRecursion":12, + "QryDuplicate":1, + "QryUDP":13 + }, + "views":{ + "_default":{ + "resolver":{ + "stats":{ + "Queryv4":447, + "Queryv6":112, + "Responsev4":444, + "NXDOMAIN":3, + "Truncated":114, + "Retry":242, + "QueryTimeout":3, + "GlueFetchv4":61, + "GlueFetchv6":68, + "GlueFetchv6Fail":24, + "ValAttempt":36, + "ValOk":27, + "ValNegOk":9, + "QryRTT100":287, + "QryRTT500":152, + "QryRTT800":4, + "BucketSize":31 + }, + "qtypes":{ + "A":220, + "NS":19, + "PTR":22, + "AAAA":233, + "SRV":14, + "DS":27, + "DNSKEY":24 + }, + "cache":{ + "A":150, + "NS":44, + "PTR":3, + "AAAA":104, + "DS":23, + "RRSIG":94, + "NSEC":8, + "DNSKEY":7, + "!AAAA":23, + "!DS":5, + "NXDOMAIN":1 + }, + "cachestats":{ + "CacheHits":1675, + "CacheMisses":44, + "QueryHits":17, + "QueryMisses":12, + "DeleteLRU":0, + "DeleteTTL":16, + "CacheNodes":219, + "CacheBuckets":129, + "TreeMemTotal":551082, + "TreeMemInUse":150704, + "HeapMemMax":132096, + "HeapMemTotal":393216, + "HeapMemInUse":132096 + }, + "adb":{ + "nentries":1021, + "entriescnt":254, + "nnames":1021, + "namescnt":195 + } + } + }, + "_bind":{ + "resolver":{ + "stats":{ + "BucketSize":31 + }, + "qtypes":{ + }, + "cache":{ + }, + "cachestats":{ + "CacheHits":0, + "CacheMisses":0, + "QueryHits":0, + "QueryMisses":0, + "DeleteLRU":0, + "DeleteTTL":0, + "CacheNodes":0, + "CacheBuckets":64, + "TreeMemTotal":287392, + "TreeMemInUse":29608, + "HeapMemMax":1024, + "HeapMemTotal":262144, + "HeapMemInUse":1024 + }, + "adb":{ + "nentries":1021, + "nnames":1021 + } + } + } + } +} \ No newline at end of file diff --git a/plugins/inputs/bind/testdata/xml/v2 b/plugins/inputs/bind/testdata/xml/v2 new file mode 100644 index 000000000..e16c53dbc --- /dev/null +++ b/plugins/inputs/bind/testdata/xml/v2 @@ -0,0 +1,926 @@ + + + + + + + + _default + + A + 2936881 + + + NS + 28994 + + + CNAME + 26 + + + SOA + 15131 + + + PTR + 47924 + + + MX + 1884 + + + TXT + 6486 + + + AAAA + 949781 + + + SRV + 14740 + + + NAPTR + 1606 + + + DS + 25 + + + SSHFP + 185 + + + DNSKEY + 13 + + + ANY + 1 + + + Queryv4 + 3765426 + + + Queryv6 + 238251 + + + Responsev4 + 3716142 + + + Responsev6 + 1 + + + NXDOMAIN + 100052 + + + SERVFAIL + 5894 + + + FORMERR + 2041 + + + OtherError + 14801 + + + EDNS0Fail + 2615 + + + Mismatch + 0 + + + Truncated + 598 + + + Lame + 117 + + + Retry + 383343 + + + QueryAbort + 0 + + + QuerySockFail + 0 + + + QueryTimeout + 50874 + + + GlueFetchv4 + 260749 + + + GlueFetchv6 + 225310 + + + GlueFetchv4Fail + 5756 + + + GlueFetchv6Fail + 141500 + + + ValAttempt + 0 + + + ValOk + 0 + + + ValNegOk + 0 + + + ValFail + 0 + + + QryRTT10 + 458176 + + + QryRTT100 + 3010133 + + + QryRTT500 + 244312 + + + QryRTT800 + 1275 + + + QryRTT1600 + 361 + + + QryRTT1600+ + 236 + + + + A + 2700 + + + NS + 759 + + + CNAME + 486 + + + SOA + 2 + + + PTR + 6 + + + TXT + 2 + + + AAAA + 629 + + + SRV + 1 + + + DS + 48 + + + RRSIG + 203 + + + NSEC + 22 + + + DNSKEY + 1 + + + !A + 6 + + + !SOA + 26 + + + !AAAA + 84 + + + !NAPTR + 3 + + + NXDOMAIN + 143 + + + + + _bind + + Queryv4 + 0 + + + Queryv6 + 0 + + + Responsev4 + 0 + + + Responsev6 + 0 + + + NXDOMAIN + 0 + + + SERVFAIL + 0 + + + FORMERR + 0 + + + OtherError + 0 + + + EDNS0Fail + 0 + + + Mismatch + 0 + + + Truncated + 0 + + + Lame + 0 + + + Retry + 0 + + + QueryAbort + 0 + + + QuerySockFail + 0 + + + QueryTimeout + 0 + + + GlueFetchv4 + 0 + + + GlueFetchv6 + 0 + + + GlueFetchv4Fail + 0 + + + GlueFetchv6Fail + 0 + + + ValAttempt + 0 + + + ValOk + 0 + + + ValNegOk + 0 + + + ValFail + 0 + + + QryRTT10 + 0 + + + QryRTT100 + 0 + + + QryRTT500 + 0 + + + QryRTT800 + 0 + + + QryRTT1600 + 0 + + + QryRTT1600+ + 0 + + + + + + 2016-10-02T18:45:00Z + 2016-10-23T19:27:48Z + + + QUERY + 102312374 + + + UPDATE + 238 + + + + + A + 58951432 + + + NS + 1999 + + + CNAME + 531 + + + SOA + 100415 + + + PTR + 4211487 + + + MX + 441155 + + + TXT + 34628 + + + AAAA + 37786321 + + + SRV + 741082 + + + NAPTR + 39137 + + + DS + 584 + + + SSHFP + 2987 + + + DNSKEY + 452 + + + IXFR + 157 + + + ANY + 7 + + + + Requestv4 + 102312611 + + + Requestv6 + 1 + + + ReqEdns0 + 441758 + + + ReqBadEDNSVer + 0 + + + ReqTSIG + 0 + + + ReqSIG0 + 0 + + + ReqBadSIG + 0 + + + ReqTCP + 1548156 + + + AuthQryRej + 0 + + + RecQryRej + 0 + + + XfrRej + 0 + + + UpdateRej + 238 + + + Response + 102301560 + + + TruncatedResp + 3787 + + + RespEDNS0 + 441748 + + + RespTSIG + 0 + + + RespSIG0 + 0 + + + QrySuccess + 63811668 + + + QryAuthAns + 72180718 + + + QryNoauthAns + 30106182 + + + QryReferral + 3 + + + QryNxrrset + 24423133 + + + QrySERVFAIL + 14422 + + + QryFORMERR + 0 + + + QryNXDOMAIN + 14052096 + + + QryRecursion + 2104239 + + + QryDuplicate + 10879 + + + QryDropped + 16 + + + QryFailure + 0 + + + XfrReqDone + 157 + + + UpdateReqFwd + 0 + + + UpdateRespFwd + 0 + + + UpdateFwdFail + 0 + + + UpdateDone + 0 + + + UpdateFail + 0 + + + UpdateBadPrereq + 0 + + + RPZRewrites + 0 + + + RateDropped + 0 + + + RateSlipped + 0 + + + NotifyOutv4 + 663 + + + NotifyOutv6 + 0 + + + NotifyInv4 + 0 + + + NotifyInv6 + 0 + + + NotifyRej + 0 + + + SOAOutv4 + 386 + + + SOAOutv6 + 0 + + + AXFRReqv4 + 0 + + + AXFRReqv6 + 0 + + + IXFRReqv4 + 0 + + + IXFRReqv6 + 0 + + + XfrSuccess + 0 + + + XfrFail + 0 + + + Mismatch + 2 + + + UDP4Open + 3765532 + + + UDP6Open + 238269 + + + TCP4Open + 602 + + + TCP6Open + 2 + + + UnixOpen + 0 + + + UDP4OpenFail + 0 + + + UDP6OpenFail + 0 + + + TCP4OpenFail + 0 + + + TCP6OpenFail + 0 + + + UnixOpenFail + 0 + + + UDP4Close + 3765528 + + + UDP6Close + 238267 + + + TCP4Close + 1548268 + + + TCP6Close + 0 + + + UnixClose + 0 + + + FDWatchClose + 0 + + + UDP4BindFail + 219 + + + UDP6BindFail + 16 + + + TCP4BindFail + 0 + + + TCP6BindFail + 0 + + + UnixBindFail + 0 + + + FdwatchBindFail + 0 + + + UDP4ConnFail + 0 + + + UDP6ConnFail + 238250 + + + TCP4ConnFail + 0 + + + TCP6ConnFail + 0 + + + UnixConnFail + 0 + + + FDwatchConnFail + 0 + + + UDP4Conn + 3764828 + + + UDP6Conn + 1 + + + TCP4Conn + 590 + + + TCP6Conn + 0 + + + UnixConn + 0 + + + FDwatchConn + 0 + + + TCP4AcceptFail + 0 + + + TCP6AcceptFail + 0 + + + UnixAcceptFail + 0 + + + TCP4Accept + 1547672 + + + TCP6Accept + 0 + + + UnixAccept + 0 + + + UDP4SendErr + 0 + + + UDP6SendErr + 238250 + + + TCP4SendErr + 0 + + + TCP6SendErr + 0 + + + UnixSendErr + 0 + + + FDwatchSendErr + 0 + + + UDP4RecvErr + 1650 + + + UDP6RecvErr + 0 + + + TCP4RecvErr + 1 + + + TCP6RecvErr + 0 + + + UnixRecvErr + 0 + + + FDwatchRecvErr + 0 + + + + + + 0x7f8a94e061d0 + main + 229 + 5002528 + 3662792 + 4848264 + 2359296 + 75 + 0 + 0 + + + 0x7f8a94e13830 + dst + 1 + 133486 + 96456 + 102346 + - + 0 + 0 + 0 + + + 0x7f8a94e401c0 + zonemgr-pool + 501 + 6339848 + 4384240 + 5734049 + 6029312 + 0 + 0 + 0 + + + + 81804609 + 20772579 + 77070336 + 6663840 + 0 + + + + + diff --git a/plugins/inputs/bind/testdata/xml/v3/mem b/plugins/inputs/bind/testdata/xml/v3/mem new file mode 100644 index 000000000..493708d7d --- /dev/null +++ b/plugins/inputs/bind/testdata/xml/v3/mem @@ -0,0 +1,142 @@ + + + + + 2017-07-21T11:53:28Z + 2017-07-21T11:53:28Z + 2017-07-25T23:47:08Z + + + + + + + 0x55fb2e042de0 + main + 202 + 2706043 + 1454904 + 1508072 + 786432 + 40 + 0 + 0 + + + 0x55fb2e0507e0 + dst + 1 + 387478 + 91776 + 97208 + - + 0 + 0 + 0 + + + 0x55fb2e0938e0 + zonemgr-pool + 113 + 742986 + 143776 + 313961 + 262144 + 0 + 0 + 0 + + + 0x7f19d00017d0 + threadkey + 1 + 0 + 0 + 0 + - + 0 + 0 + 0 + + + 0x7f19d00475f0 + client + 3 + 267800 + 8760 + 8760 + 262144 + 2 + 0 + 0 + + + 0x7f19d00dfca0 + cache + 8 + 288938 + 83650 + 83842 + 262144 + 0 + 0 + 0 + + + 0x7f19d00eaa30 + cache_heap + 18 + 393216 + 132096 + 132096 + 262144 + 0 + 0 + 0 + + + 0x7f19d01094e0 + res0 + 1 + 262144 + 0 + 22048 + 262144 + 0 + 0 + 0 + + + 0x7f19d0114270 + res1 + 1 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + + 0x7f19d011f000 + res2 + 1 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + + + 777821909 + 6000232 + 45875200 + 10037400 + 0 + + + diff --git a/plugins/inputs/bind/testdata/xml/v3/net b/plugins/inputs/bind/testdata/xml/v3/net new file mode 100644 index 000000000..50f713447 --- /dev/null +++ b/plugins/inputs/bind/testdata/xml/v3/net @@ -0,0 +1,156 @@ + + + + + 2017-07-21T11:53:28Z + 2017-07-21T11:53:28Z + 2017-07-25T23:47:08Z + + 92542 + 0 + 48 + 0 + 0 + 1 + 0 + 0 + 0 + 0 + 0 + 0 + 92538 + 0 + 336 + 0 + 0 + 0 + 0 + 1 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 92535 + 0 + 44 + 0 + 0 + 0 + 0 + 0 + 0 + 293 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 14 + 0 + 0 + 0 + 0 + 0 + 0 + 4 + 0 + 297 + 0 + 0 + 1 + + + + + + + + 0x7f19dd849010 + 1 + not-initialized + <unknown address, family 16> + + bound + + + + 0x7f19dd849268 + 1 + tcp + 0.0.0.0#8053 + + listener + bound + + + + 0x7f19dd849718 + 2 + udp + ::#53 + + bound + + + + 0x7f19dd849970 + 2 + tcp + ::#53 + + listener + bound + + + + 0x7f19dd849bc8 + 2 + udp + 127.0.0.1#53 + + bound + + + + 0x7f19dd6f4010 + 2 + tcp + 127.0.0.1#53 + + listener + bound + + + + 0x7f19dd6f4718 + 1 + tcp + 127.0.0.1#953 + + listener + bound + + + + 0x7f19dd6f4bc8 + 1 + tcp + ::1#953 + + listener + bound + + + + + diff --git a/plugins/inputs/bind/testdata/xml/v3/server b/plugins/inputs/bind/testdata/xml/v3/server new file mode 100644 index 000000000..0d9206c69 --- /dev/null +++ b/plugins/inputs/bind/testdata/xml/v3/server @@ -0,0 +1,328 @@ + + + + + 2017-07-21T11:53:28Z + 2017-07-21T11:53:28Z + 2017-07-25T23:47:08Z + + 74941 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + + 63672 + 373 + 18 + 3393 + 618 + 970 + 5735 + 139 + 1 + 22 + + + 74942 + 0 + 9250 + 0 + 0 + 0 + 0 + 260 + 0 + 35 + 0 + 0 + 63264 + 365 + 9250 + 0 + 0 + 49044 + 2752 + 60354 + 0 + 2452 + 122 + 0 + 11610 + 53750 + 11667 + 11 + 35 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 74648 + 258 + 0 + 0 + 59 + 0 + 0 + 0 + 0 + 0 + 0 + + + 2 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + + + + + + 61568 + 9126 + 1249 + 286 + 942 + 3933 + 21 + 13749 + 1699 + + + 92573 + 0 + 92135 + 0 + 8182 + 318 + 0 + 0 + 0 + 0 + 42 + 12 + 800 + 0 + 0 + 0 + 0 + 490 + 1398 + 0 + 3 + 0 + 90256 + 67322 + 22850 + 6 + 0 + 45760 + 45543 + 743 + 75 + 0 + 0 + 31 + 34 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + + + A + 195 + + + NS + 42 + + + CNAME + 7 + + + PTR + 48 + + + MX + 7 + + + TXT + 6 + + + AAAA + 4 + + + DS + 97 + + + RRSIG + 258 + + + NSEC + 89 + + + DNSKEY + 60 + + + !DS + 29 + + + NXDOMAIN + 25 + + + + 1021 + 314 + 1021 + 316 + + + 1904593 + 96 + 336094 + 369336 + 0 + 47518 + 769 + 519 + 1464363 + 392128 + 828966 + 393216 + 132096 + 132096 + + + + + + 0 + + + 0 + + + 0 + + + 0 + + + + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 31 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + + + 1021 + 0 + 1021 + 0 + + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 64 + 287392 + 29608 + 29608 + 262144 + 1024 + 1024 + + + + diff --git a/plugins/inputs/bind/xml_stats_v2.go b/plugins/inputs/bind/xml_stats_v2.go new file mode 100644 index 000000000..45071bdc0 --- /dev/null +++ b/plugins/inputs/bind/xml_stats_v2.go @@ -0,0 +1,168 @@ +package bind + +import ( + "encoding/xml" + "fmt" + "net" + "net/http" + "net/url" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" +) + +type v2Root struct { + XMLName xml.Name + Version string `xml:"version,attr"` + Statistics v2Statistics `xml:"bind>statistics"` +} + +// Omitted branches: socketmgr, taskmgr +type v2Statistics struct { + Version string `xml:"version,attr"` + Views []struct { + // Omitted branches: zones + Name string `xml:"name"` + RdTypes []v2Counter `xml:"rdtype"` + ResStats []v2Counter `xml:"resstat"` + Caches []struct { + Name string `xml:"name,attr"` + RRSets []v2Counter `xml:"rrset"` + } `xml:"cache"` + } `xml:"views>view"` + Server struct { + OpCodes []v2Counter `xml:"requests>opcode"` + RdTypes []v2Counter `xml:"queries-in>rdtype"` + NSStats []v2Counter `xml:"nsstat"` + ZoneStats []v2Counter `xml:"zonestat"` + ResStats []v2Counter `xml:"resstat"` + SockStats []v2Counter `xml:"sockstat"` + } `xml:"server"` + Memory struct { + Contexts []struct { + // Omitted nodes: references, maxinuse, blocksize, pools, hiwater, lowater + Id string `xml:"id"` + Name string `xml:"name"` + Total int `xml:"total"` + InUse int `xml:"inuse"` + } `xml:"contexts>context"` + Summary struct { + TotalUse int + InUse int + BlockSize int + ContextSize int + Lost int + } `xml:"summary"` + } `xml:"memory"` +} + +// BIND statistics v2 counter struct used throughout +type v2Counter struct { + Name string `xml:"name"` + Value int `xml:"counter"` +} + +// addXMLv2Counter adds a v2Counter array to a Telegraf Accumulator, with the specified tags +func addXMLv2Counter(acc telegraf.Accumulator, commonTags map[string]string, stats []v2Counter) { + grouper := metric.NewSeriesGrouper() + ts := time.Now() + for _, c := range stats { + tags := make(map[string]string) + + // Create local copy of tags since maps are reference types + for k, v := range commonTags { + tags[k] = v + } + + grouper.Add("bind_counter", tags, ts, c.Name, c.Value) + } + + //Add grouped metrics + for _, metric := range grouper.Metrics() { + acc.AddMetric(metric) + } +} + +// readStatsXMLv2 decodes a BIND9 XML statistics version 2 document. Unlike the XML v3 statistics +// format, the v2 format does not support broken-out subsets. +func (b *Bind) readStatsXMLv2(addr *url.URL, acc telegraf.Accumulator) error { + var stats v2Root + + resp, err := client.Get(addr.String()) + if err != nil { + return err + } + + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("%s returned HTTP status: %s", addr, resp.Status) + } + + if err := xml.NewDecoder(resp.Body).Decode(&stats); err != nil { + return fmt.Errorf("Unable to decode XML document: %s", err) + } + + tags := map[string]string{"url": addr.Host} + host, port, _ := net.SplitHostPort(addr.Host) + tags["source"] = host + tags["port"] = port + + // Opcodes + tags["type"] = "opcode" + addXMLv2Counter(acc, tags, stats.Statistics.Server.OpCodes) + + // Query RDATA types + tags["type"] = "qtype" + addXMLv2Counter(acc, tags, stats.Statistics.Server.RdTypes) + + // Nameserver stats + tags["type"] = "nsstat" + addXMLv2Counter(acc, tags, stats.Statistics.Server.NSStats) + + // Zone stats + tags["type"] = "zonestat" + addXMLv2Counter(acc, tags, stats.Statistics.Server.ZoneStats) + + // Socket statistics + tags["type"] = "sockstat" + addXMLv2Counter(acc, tags, stats.Statistics.Server.SockStats) + + // Memory stats + fields := map[string]interface{}{ + "total_use": stats.Statistics.Memory.Summary.TotalUse, + "in_use": stats.Statistics.Memory.Summary.InUse, + "block_size": stats.Statistics.Memory.Summary.BlockSize, + "context_size": stats.Statistics.Memory.Summary.ContextSize, + "lost": stats.Statistics.Memory.Summary.Lost, + } + acc.AddGauge("bind_memory", fields, map[string]string{"url": addr.Host, "source": host, "port": port}) + + // Detailed, per-context memory stats + if b.GatherMemoryContexts { + for _, c := range stats.Statistics.Memory.Contexts { + tags := map[string]string{"url": addr.Host, "id": c.Id, "name": c.Name, "source": host, "port": port} + fields := map[string]interface{}{"total": c.Total, "in_use": c.InUse} + + acc.AddGauge("bind_memory_context", fields, tags) + } + } + + // Detailed, per-view stats + if b.GatherViews { + for _, v := range stats.Statistics.Views { + tags := map[string]string{"url": addr.Host, "view": v.Name} + + // Query RDATA types + tags["type"] = "qtype" + addXMLv2Counter(acc, tags, v.RdTypes) + + // Resolver stats + tags["type"] = "resstats" + addXMLv2Counter(acc, tags, v.ResStats) + } + } + + return nil +} diff --git a/plugins/inputs/bind/xml_stats_v3.go b/plugins/inputs/bind/xml_stats_v3.go new file mode 100644 index 000000000..ed2cc1b7f --- /dev/null +++ b/plugins/inputs/bind/xml_stats_v3.go @@ -0,0 +1,161 @@ +package bind + +import ( + "encoding/xml" + "fmt" + "net" + "net/http" + "net/url" + "strings" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" +) + +// XML path: //statistics +// Omitted branches: socketmgr, taskmgr +type v3Stats struct { + Server v3Server `xml:"server"` + Views []v3View `xml:"views>view"` + Memory v3Memory `xml:"memory"` +} + +// XML path: //statistics/memory +type v3Memory struct { + Contexts []struct { + // Omitted nodes: references, maxinuse, blocksize, pools, hiwater, lowater + Id string `xml:"id"` + Name string `xml:"name"` + Total int `xml:"total"` + InUse int `xml:"inuse"` + } `xml:"contexts>context"` + Summary struct { + TotalUse int + InUse int + BlockSize int + ContextSize int + Lost int + } `xml:"summary"` +} + +// XML path: //statistics/server +type v3Server struct { + CounterGroups []v3CounterGroup `xml:"counters"` +} + +// XML path: //statistics/views/view +type v3View struct { + // Omitted branches: zones + Name string `xml:"name,attr"` + CounterGroups []v3CounterGroup `xml:"counters"` + Caches []struct { + Name string `xml:"name,attr"` + RRSets []struct { + Name string `xml:"name"` + Value int `xml:"counter"` + } `xml:"rrset"` + } `xml:"cache"` +} + +// Generic XML v3 doc fragment used in multiple places +type v3CounterGroup struct { + Type string `xml:"type,attr"` + Counters []struct { + Name string `xml:"name,attr"` + Value int `xml:",chardata"` + } `xml:"counter"` +} + +// addStatsXMLv3 walks a v3Stats struct and adds the values to the telegraf.Accumulator. +func (b *Bind) addStatsXMLv3(stats v3Stats, acc telegraf.Accumulator, hostPort string) { + grouper := metric.NewSeriesGrouper() + ts := time.Now() + host, port, _ := net.SplitHostPort(hostPort) + // Counter groups + for _, cg := range stats.Server.CounterGroups { + for _, c := range cg.Counters { + if cg.Type == "opcode" && strings.HasPrefix(c.Name, "RESERVED") { + continue + } + + tags := map[string]string{"url": hostPort, "source": host, "port": port, "type": cg.Type} + + grouper.Add("bind_counter", tags, ts, c.Name, c.Value) + } + } + + // Memory stats + fields := map[string]interface{}{ + "total_use": stats.Memory.Summary.TotalUse, + "in_use": stats.Memory.Summary.InUse, + "block_size": stats.Memory.Summary.BlockSize, + "context_size": stats.Memory.Summary.ContextSize, + "lost": stats.Memory.Summary.Lost, + } + acc.AddGauge("bind_memory", fields, map[string]string{"url": hostPort, "source": host, "port": port}) + + // Detailed, per-context memory stats + if b.GatherMemoryContexts { + for _, c := range stats.Memory.Contexts { + tags := map[string]string{"url": hostPort, "source": host, "port": port, "id": c.Id, "name": c.Name} + fields := map[string]interface{}{"total": c.Total, "in_use": c.InUse} + + acc.AddGauge("bind_memory_context", fields, tags) + } + } + + // Detailed, per-view stats + if b.GatherViews { + for _, v := range stats.Views { + for _, cg := range v.CounterGroups { + for _, c := range cg.Counters { + tags := map[string]string{ + "url": hostPort, + "source": host, + "port": port, + "view": v.Name, + "type": cg.Type, + } + + grouper.Add("bind_counter", tags, ts, c.Name, c.Value) + } + } + } + } + + //Add grouped metrics + for _, metric := range grouper.Metrics() { + acc.AddMetric(metric) + } +} + +// readStatsXMLv3 takes a base URL to probe, and requests the individual statistics documents that +// we are interested in. These individual documents have a combined size which is significantly +// smaller than if we requested everything at once (e.g. taskmgr and socketmgr can be omitted). +func (b *Bind) readStatsXMLv3(addr *url.URL, acc telegraf.Accumulator) error { + var stats v3Stats + + // Progressively build up full v3Stats struct by parsing the individual HTTP responses + for _, suffix := range [...]string{"/server", "/net", "/mem"} { + scrapeUrl := addr.String() + suffix + + resp, err := client.Get(scrapeUrl) + if err != nil { + return err + } + + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("%s returned HTTP status: %s", scrapeUrl, resp.Status) + } + + if err := xml.NewDecoder(resp.Body).Decode(&stats); err != nil { + return fmt.Errorf("Unable to decode XML document: %s", err) + } + } + + b.addStatsXMLv3(stats, acc, addr.Host) + return nil +} From dc5db8fc38a00b8b81a3b35c946984acfa7decbd Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 3 Apr 2019 16:02:21 -0700 Subject: [PATCH 0748/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 065680958..d2b2f180d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,7 @@ #### New Inputs +- [bind](/plugins/inputs/bind/README.md) - Contributed by @dswarbrick & @danielllek - [github](/plugins/inputs/github/README.md) - Contributed by @influxdata #### Features From e5215d74db5e0276550b2eefbc32ad8410f67a17 Mon Sep 17 00:00:00 2001 From: Robert Sullivan Date: Thu, 4 Apr 2019 17:38:33 -0600 Subject: [PATCH 0749/1815] Allow colons in metric names in prometheus_client output (#5680) --- plugins/outputs/prometheus_client/prometheus_client.go | 2 +- plugins/outputs/prometheus_client/prometheus_client_test.go | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/plugins/outputs/prometheus_client/prometheus_client.go b/plugins/outputs/prometheus_client/prometheus_client.go index da051daf9..32dcdbb89 100644 --- a/plugins/outputs/prometheus_client/prometheus_client.go +++ b/plugins/outputs/prometheus_client/prometheus_client.go @@ -25,7 +25,7 @@ import ( ) var ( - invalidNameCharRE = regexp.MustCompile(`[^a-zA-Z0-9_]`) + invalidNameCharRE = regexp.MustCompile(`[^a-zA-Z0-9_:]`) validNameCharRE = regexp.MustCompile(`^[a-zA-Z_][a-zA-Z0-9_]*`) ) diff --git a/plugins/outputs/prometheus_client/prometheus_client_test.go b/plugins/outputs/prometheus_client/prometheus_client_test.go index b6bbe35fd..211e24030 100644 --- a/plugins/outputs/prometheus_client/prometheus_client_test.go +++ b/plugins/outputs/prometheus_client/prometheus_client_test.go @@ -186,15 +186,15 @@ func TestWrite_Sanitize(t *testing.T) { client := NewClient() p1, err := metric.New( - "foo.bar", + "foo.bar:colon", map[string]string{"tag-with-dash": "localhost.local"}, - map[string]interface{}{"field-with-dash": 42}, + map[string]interface{}{"field-with-dash-and:colon": 42}, time.Now(), telegraf.Counter) err = client.Write([]telegraf.Metric{p1}) require.NoError(t, err) - fam, ok := client.fam["foo_bar_field_with_dash"] + fam, ok := client.fam["foo_bar:colon_field_with_dash_and:colon"] require.True(t, ok) require.Equal(t, map[string]int{"tag_with_dash": 1}, fam.LabelSet) From 991e83c35f9c31db0110ca920d4d102c9f9aa724 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 4 Apr 2019 16:40:12 -0700 Subject: [PATCH 0750/1815] Update changelog --- CHANGELOG.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index d2b2f180d..8626379f7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -18,6 +18,12 @@ - [#5631](https://github.com/influxdata/telegraf/pull/5631): Create Windows service only when specified or in service manager. +## v1.10.3 [unreleased] + +#### Bugfixes + +- [#5680](https://github.com/influxdata/telegraf/pull/5680): Allow colons in metric names in prometheus_client output. + ## v1.10.2 [2019-04-02] #### Release Notes From 267a9f182bc1a4cf95dd5e8e3873514e7776a4c6 Mon Sep 17 00:00:00 2001 From: Pierre Tessier Date: Fri, 5 Apr 2019 17:46:12 -0400 Subject: [PATCH 0751/1815] Add wavefront serializer plugin (#5670) --- docs/DATA_FORMATS_OUTPUT.md | 1 + internal/config/config.go | 26 ++ plugins/serializers/registry.go | 14 + plugins/serializers/wavefront/README.md | 47 +++ plugins/serializers/wavefront/wavefront.go | 202 ++++++++++++ .../serializers/wavefront/wavefront_test.go | 295 ++++++++++++++++++ 6 files changed, 585 insertions(+) create mode 100644 plugins/serializers/wavefront/README.md create mode 100755 plugins/serializers/wavefront/wavefront.go create mode 100755 plugins/serializers/wavefront/wavefront_test.go diff --git a/docs/DATA_FORMATS_OUTPUT.md b/docs/DATA_FORMATS_OUTPUT.md index 3ee16524d..f3ac028b9 100644 --- a/docs/DATA_FORMATS_OUTPUT.md +++ b/docs/DATA_FORMATS_OUTPUT.md @@ -9,6 +9,7 @@ plugins. 1. [Graphite](/plugins/serializers/graphite) 1. [SplunkMetric](/plugins/serializers/splunkmetric) 1. [Carbon2](/plugins/serializers/carbon2) +1. [Wavefront](/plugins/serializers/wavefront) You will be able to identify the plugins with support by the presence of a `data_format` config option, for example, in the `file` output plugin: diff --git a/internal/config/config.go b/internal/config/config.go index 939cb4c75..a0fc45a3c 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -1810,6 +1810,30 @@ func buildSerializer(name string, tbl *ast.Table) (serializers.Serializer, error } } + if node, ok := tbl.Fields["wavefront_source_override"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if ary, ok := kv.Value.(*ast.Array); ok { + for _, elem := range ary.Value { + if str, ok := elem.(*ast.String); ok { + c.WavefrontSourceOverride = append(c.WavefrontSourceOverride, str.Value) + } + } + } + } + } + + if node, ok := tbl.Fields["wavefront_use_strict"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if b, ok := kv.Value.(*ast.Boolean); ok { + var err error + c.WavefrontUseStrict, err = b.Boolean() + if err != nil { + return nil, err + } + } + } + } + delete(tbl.Fields, "influx_max_line_bytes") delete(tbl.Fields, "influx_sort_fields") delete(tbl.Fields, "influx_uint_support") @@ -1819,6 +1843,8 @@ func buildSerializer(name string, tbl *ast.Table) (serializers.Serializer, error delete(tbl.Fields, "template") delete(tbl.Fields, "json_timestamp_units") delete(tbl.Fields, "splunkmetric_hec_routing") + delete(tbl.Fields, "wavefront_source_override") + delete(tbl.Fields, "wavefront_use_strict") return serializers.NewSerializer(c) } diff --git a/plugins/serializers/registry.go b/plugins/serializers/registry.go index cbc5981a6..ecac63323 100644 --- a/plugins/serializers/registry.go +++ b/plugins/serializers/registry.go @@ -12,6 +12,7 @@ import ( "github.com/influxdata/telegraf/plugins/serializers/json" "github.com/influxdata/telegraf/plugins/serializers/nowmetric" "github.com/influxdata/telegraf/plugins/serializers/splunkmetric" + "github.com/influxdata/telegraf/plugins/serializers/wavefront" ) // SerializerOutput is an interface for output plugins that are able to @@ -66,6 +67,13 @@ type Config struct { // Include HEC routing fields for splunkmetric output HecRouting bool + + // Point tags to use as the source name for Wavefront (if none found, host will be used). + WavefrontSourceOverride []string + + // Use Strict rules to sanitize metric and tag names from invalid characters for Wavefront + // When enabled forward slash (/) and comma (,) will be accepted + WavefrontUseStrict bool } // NewSerializer a Serializer interface based on the given config. @@ -85,12 +93,18 @@ func NewSerializer(config *Config) (Serializer, error) { serializer, err = NewNowSerializer() case "carbon2": serializer, err = NewCarbon2Serializer() + case "wavefront": + serializer, err = NewWavefrontSerializer(config.Prefix, config.WavefrontUseStrict, config.WavefrontSourceOverride) default: err = fmt.Errorf("Invalid data format: %s", config.DataFormat) } return serializer, err } +func NewWavefrontSerializer(prefix string, useStrict bool, sourceOverride []string) (Serializer, error) { + return wavefront.NewSerializer(prefix, useStrict, sourceOverride) +} + func NewJsonSerializer(timestampUnits time.Duration) (Serializer, error) { return json.NewSerializer(timestampUnits) } diff --git a/plugins/serializers/wavefront/README.md b/plugins/serializers/wavefront/README.md new file mode 100644 index 000000000..2b3be1f78 --- /dev/null +++ b/plugins/serializers/wavefront/README.md @@ -0,0 +1,47 @@ +# Example + +The `wavefront` serializer translates the Telegraf metric format to the [Wavefront Data Format](https://docs.wavefront.com/wavefront_data_format.html). + +### Configuration + +```toml +[[outputs.file]] + files = ["stdout"] + + ## Use Strict rules to sanitize metric and tag names from invalid characters + ## When enabled forward slash (/) and comma (,) will be accpeted + # use_strict = false + + ## point tags to use as the source name for Wavefront (if none found, host will be used) + # source_override = ["hostname", "address", "agent_host", "node_host"] + + ## Data format to output. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md + data_format = "wavefront" +``` + +### Metrics + +A Wavefront metric is equivalent to a single field value of a Telegraf measurement. +The Wavefront metric name will be: `.` +If a prefix is specified it will be honored. +Only boolean and numeric metrics will be serialized, all other types will generate +an error. + +### Example + +The following Telegraf metric + +``` +cpu,cpu=cpu0,host=testHost user=12,idle=88,system=0 1234567890 +``` + +will serialize into the following Wavefront metrics + +``` +"cpu.user" 12.000000 1234567890 source="testHost" "cpu"="cpu0" +"cpu.idle" 88.000000 1234567890 source="testHost" "cpu"="cpu0" +"cpu.system" 0.000000 1234567890 source="testHost" "cpu"="cpu0" +``` diff --git a/plugins/serializers/wavefront/wavefront.go b/plugins/serializers/wavefront/wavefront.go new file mode 100755 index 000000000..70b87512f --- /dev/null +++ b/plugins/serializers/wavefront/wavefront.go @@ -0,0 +1,202 @@ +package wavefront + +import ( + "bytes" + "fmt" + "log" + "strconv" + "strings" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/outputs/wavefront" +) + +// WavefrontSerializer : WavefrontSerializer struct +type WavefrontSerializer struct { + Prefix string + UseStrict bool + SourceOverride []string +} + +// catch many of the invalid chars that could appear in a metric or tag name +var sanitizedChars = strings.NewReplacer( + "!", "-", "@", "-", "#", "-", "$", "-", "%", "-", "^", "-", "&", "-", + "*", "-", "(", "-", ")", "-", "+", "-", "`", "-", "'", "-", "\"", "-", + "[", "-", "]", "-", "{", "-", "}", "-", ":", "-", ";", "-", "<", "-", + ">", "-", ",", "-", "?", "-", "/", "-", "\\", "-", "|", "-", " ", "-", + "=", "-", +) + +// catch many of the invalid chars that could appear in a metric or tag name +var strictSanitizedChars = strings.NewReplacer( + "!", "-", "@", "-", "#", "-", "$", "-", "%", "-", "^", "-", "&", "-", + "*", "-", "(", "-", ")", "-", "+", "-", "`", "-", "'", "-", "\"", "-", + "[", "-", "]", "-", "{", "-", "}", "-", ":", "-", ";", "-", "<", "-", + ">", "-", "?", "-", "\\", "-", "|", "-", " ", "-", "=", "-", +) + +var tagValueReplacer = strings.NewReplacer("\"", "\\\"", "*", "-") + +var pathReplacer = strings.NewReplacer("_", ".") + +func NewSerializer(prefix string, useStrict bool, sourceOverride []string) (*WavefrontSerializer, error) { + s := &WavefrontSerializer{ + Prefix: prefix, + UseStrict: useStrict, + SourceOverride: sourceOverride, + } + return s, nil +} + +// Serialize : Serialize based on Wavefront format +func (s *WavefrontSerializer) Serialize(m telegraf.Metric) ([]byte, error) { + out := []byte{} + metricSeparator := "." + + for fieldName, value := range m.Fields() { + var name string + + if fieldName == "value" { + name = fmt.Sprintf("%s%s", s.Prefix, m.Name()) + } else { + name = fmt.Sprintf("%s%s%s%s", s.Prefix, m.Name(), metricSeparator, fieldName) + } + + if s.UseStrict { + name = strictSanitizedChars.Replace(name) + } else { + name = sanitizedChars.Replace(name) + } + + name = pathReplacer.Replace(name) + + metric := &wavefront.MetricPoint{ + Metric: name, + Timestamp: m.Time().Unix(), + } + + metricValue, buildError := buildValue(value, metric.Metric) + if buildError != nil { + // bad value continue to next metric + continue + } + metric.Value = metricValue + + source, tags := buildTags(m.Tags(), s) + metric.Source = source + metric.Tags = tags + + out = append(out, formatMetricPoint(metric, s)...) + } + return out, nil +} + +func (s *WavefrontSerializer) SerializeBatch(metrics []telegraf.Metric) ([]byte, error) { + var batch bytes.Buffer + for _, m := range metrics { + buf, err := s.Serialize(m) + if err != nil { + return nil, err + } + _, err = batch.Write(buf) + if err != nil { + return nil, err + } + } + return batch.Bytes(), nil +} + +func buildTags(mTags map[string]string, s *WavefrontSerializer) (string, map[string]string) { + + // Remove all empty tags. + for k, v := range mTags { + if v == "" { + delete(mTags, k) + } + } + + var source string + + if src, ok := mTags["source"]; ok { + source = src + delete(mTags, "source") + } else { + sourceTagFound := false + for _, src := range s.SourceOverride { + for k, v := range mTags { + if k == src { + source = v + mTags["telegraf_host"] = mTags["host"] + sourceTagFound = true + delete(mTags, k) + break + } + } + if sourceTagFound { + break + } + } + + if !sourceTagFound { + source = mTags["host"] + } + } + + delete(mTags, "host") + + return tagValueReplacer.Replace(source), mTags +} + +func buildValue(v interface{}, name string) (float64, error) { + switch p := v.(type) { + case bool: + if p { + return 1, nil + } else { + return 0, nil + } + case int64: + return float64(v.(int64)), nil + case uint64: + return float64(v.(uint64)), nil + case float64: + return v.(float64), nil + case string: + // return an error but don't log + return 0, fmt.Errorf("string type not supported") + default: + // return an error and log a debug message + err := fmt.Errorf("unexpected type: %T, with value: %v, for :%s", v, v, name) + log.Printf("D! Serializer [wavefront] %s\n", err.Error()) + return 0, err + } +} + +func formatMetricPoint(metricPoint *wavefront.MetricPoint, s *WavefrontSerializer) []byte { + var buffer bytes.Buffer + buffer.WriteString("\"") + buffer.WriteString(metricPoint.Metric) + buffer.WriteString("\" ") + buffer.WriteString(strconv.FormatFloat(metricPoint.Value, 'f', 6, 64)) + buffer.WriteString(" ") + buffer.WriteString(strconv.FormatInt(metricPoint.Timestamp, 10)) + buffer.WriteString(" source=\"") + buffer.WriteString(metricPoint.Source) + buffer.WriteString("\"") + + for k, v := range metricPoint.Tags { + buffer.WriteString(" \"") + if s.UseStrict { + buffer.WriteString(strictSanitizedChars.Replace(k)) + } else { + buffer.WriteString(sanitizedChars.Replace(k)) + } + buffer.WriteString("\"=\"") + buffer.WriteString(tagValueReplacer.Replace(v)) + buffer.WriteString("\"") + } + + buffer.WriteString("\n") + + return buffer.Bytes() +} diff --git a/plugins/serializers/wavefront/wavefront_test.go b/plugins/serializers/wavefront/wavefront_test.go new file mode 100755 index 000000000..3230ce515 --- /dev/null +++ b/plugins/serializers/wavefront/wavefront_test.go @@ -0,0 +1,295 @@ +package wavefront + +import ( + "fmt" + "reflect" + "strings" + "testing" + "time" + + "github.com/influxdata/telegraf/metric" + "github.com/influxdata/telegraf/plugins/outputs/wavefront" + "github.com/stretchr/testify/assert" +) + +func TestBuildTags(t *testing.T) { + var tagTests = []struct { + ptIn map[string]string + outTags map[string]string + outSource string + }{ + { + map[string]string{"one": "two", "three": "four", "host": "testHost"}, + map[string]string{"one": "two", "three": "four"}, + "testHost", + }, + { + map[string]string{"aaa": "bbb", "host": "testHost"}, + map[string]string{"aaa": "bbb"}, + "testHost", + }, + { + map[string]string{"bbb": "789", "aaa": "123", "host": "testHost"}, + map[string]string{"aaa": "123", "bbb": "789"}, + "testHost", + }, + { + map[string]string{"host": "aaa", "dc": "bbb"}, + map[string]string{"dc": "bbb"}, + "aaa", + }, + { + map[string]string{"instanceid": "i-0123456789", "host": "aaa", "dc": "bbb"}, + map[string]string{"dc": "bbb", "telegraf_host": "aaa"}, + "i-0123456789", + }, + { + map[string]string{"instance-id": "i-0123456789", "host": "aaa", "dc": "bbb"}, + map[string]string{"dc": "bbb", "telegraf_host": "aaa"}, + "i-0123456789", + }, + { + map[string]string{"instanceid": "i-0123456789", "host": "aaa", "hostname": "ccc", "dc": "bbb"}, + map[string]string{"dc": "bbb", "hostname": "ccc", "telegraf_host": "aaa"}, + "i-0123456789", + }, + { + map[string]string{"instanceid": "i-0123456789", "host": "aaa", "snmp_host": "ccc", "dc": "bbb"}, + map[string]string{"dc": "bbb", "snmp_host": "ccc", "telegraf_host": "aaa"}, + "i-0123456789", + }, + { + map[string]string{"host": "aaa", "snmp_host": "ccc", "dc": "bbb"}, + map[string]string{"dc": "bbb", "telegraf_host": "aaa"}, + "ccc", + }, + } + s := WavefrontSerializer{SourceOverride: []string{"instanceid", "instance-id", "hostname", "snmp_host", "node_host"}} + + for _, tt := range tagTests { + source, tags := buildTags(tt.ptIn, &s) + if !reflect.DeepEqual(tags, tt.outTags) { + t.Errorf("\nexpected\t%+v\nreceived\t%+v\n", tt.outTags, tags) + } + if source != tt.outSource { + t.Errorf("\nexpected\t%s\nreceived\t%s\n", tt.outSource, source) + } + } +} + +func TestBuildTagsHostTag(t *testing.T) { + var tagTests = []struct { + ptIn map[string]string + outTags map[string]string + outSource string + }{ + { + map[string]string{"one": "two", "host": "testHost", "snmp_host": "snmpHost"}, + map[string]string{"telegraf_host": "testHost", "one": "two"}, + "snmpHost", + }, + } + s := WavefrontSerializer{SourceOverride: []string{"snmp_host"}} + + for _, tt := range tagTests { + source, tags := buildTags(tt.ptIn, &s) + if !reflect.DeepEqual(tags, tt.outTags) { + t.Errorf("\nexpected\t%+v\nreceived\t%+v\n", tt.outTags, tags) + } + if source != tt.outSource { + t.Errorf("\nexpected\t%s\nreceived\t%s\n", tt.outSource, source) + } + } +} + +func TestFormatMetricPoint(t *testing.T) { + var pointTests = []struct { + ptIn *wavefront.MetricPoint + out string + }{ + { + &wavefront.MetricPoint{ + Metric: "cpu.idle", + Value: 1, + Timestamp: 1554172967, + Source: "testHost", + Tags: map[string]string{"aaa": "bbb"}, + }, + "\"cpu.idle\" 1.000000 1554172967 source=\"testHost\" \"aaa\"=\"bbb\"\n", + }, + { + &wavefront.MetricPoint{ + Metric: "cpu.idle", + Value: 1, + Timestamp: 1554172967, + Source: "testHost", + Tags: map[string]string{"sp&c!al/chars,": "get*replaced"}, + }, + "\"cpu.idle\" 1.000000 1554172967 source=\"testHost\" \"sp-c-al-chars-\"=\"get-replaced\"\n", + }, + } + + s := WavefrontSerializer{} + + for _, pt := range pointTests { + bout := formatMetricPoint(pt.ptIn, &s) + sout := string(bout[:]) + if sout != pt.out { + t.Errorf("\nexpected\t%s\nreceived\t%s\n", pt.out, sout) + } + } +} + +func TestUseStrict(t *testing.T) { + var pointTests = []struct { + ptIn *wavefront.MetricPoint + out string + }{ + { + &wavefront.MetricPoint{ + Metric: "cpu.idle", + Value: 1, + Timestamp: 1554172967, + Source: "testHost", + Tags: map[string]string{"sp&c!al/chars,": "get*replaced"}, + }, + "\"cpu.idle\" 1.000000 1554172967 source=\"testHost\" \"sp-c-al/chars,\"=\"get-replaced\"\n", + }, + } + + s := WavefrontSerializer{UseStrict: true} + + for _, pt := range pointTests { + bout := formatMetricPoint(pt.ptIn, &s) + sout := string(bout[:]) + if sout != pt.out { + t.Errorf("\nexpected\t%s\nreceived\t%s\n", pt.out, sout) + } + } +} + +func TestSerializeMetricFloat(t *testing.T) { + now := time.Now() + tags := map[string]string{ + "cpu": "cpu0", + "host": "realHost", + } + fields := map[string]interface{}{ + "usage_idle": float64(91.5), + } + m, err := metric.New("cpu", tags, fields, now) + assert.NoError(t, err) + + s := WavefrontSerializer{} + buf, _ := s.Serialize(m) + mS := strings.Split(strings.TrimSpace(string(buf)), "\n") + assert.NoError(t, err) + + expS := []string{fmt.Sprintf("\"cpu.usage.idle\" 91.500000 %d source=\"realHost\" \"cpu\"=\"cpu0\"", now.UnixNano()/1000000000)} + assert.Equal(t, expS, mS) +} + +func TestSerializeMetricInt(t *testing.T) { + now := time.Now() + tags := map[string]string{ + "cpu": "cpu0", + "host": "realHost", + } + fields := map[string]interface{}{ + "usage_idle": int64(91), + } + m, err := metric.New("cpu", tags, fields, now) + assert.NoError(t, err) + + s := WavefrontSerializer{} + buf, _ := s.Serialize(m) + mS := strings.Split(strings.TrimSpace(string(buf)), "\n") + assert.NoError(t, err) + + expS := []string{fmt.Sprintf("\"cpu.usage.idle\" 91.000000 %d source=\"realHost\" \"cpu\"=\"cpu0\"", now.UnixNano()/1000000000)} + assert.Equal(t, expS, mS) +} + +func TestSerializeMetricBoolTrue(t *testing.T) { + now := time.Now() + tags := map[string]string{ + "cpu": "cpu0", + "host": "realHost", + } + fields := map[string]interface{}{ + "usage_idle": true, + } + m, err := metric.New("cpu", tags, fields, now) + assert.NoError(t, err) + + s := WavefrontSerializer{} + buf, _ := s.Serialize(m) + mS := strings.Split(strings.TrimSpace(string(buf)), "\n") + assert.NoError(t, err) + + expS := []string{fmt.Sprintf("\"cpu.usage.idle\" 1.000000 %d source=\"realHost\" \"cpu\"=\"cpu0\"", now.UnixNano()/1000000000)} + assert.Equal(t, expS, mS) +} + +func TestSerializeMetricBoolFalse(t *testing.T) { + now := time.Now() + tags := map[string]string{ + "cpu": "cpu0", + "host": "realHost", + } + fields := map[string]interface{}{ + "usage_idle": false, + } + m, err := metric.New("cpu", tags, fields, now) + assert.NoError(t, err) + + s := WavefrontSerializer{} + buf, _ := s.Serialize(m) + mS := strings.Split(strings.TrimSpace(string(buf)), "\n") + assert.NoError(t, err) + + expS := []string{fmt.Sprintf("\"cpu.usage.idle\" 0.000000 %d source=\"realHost\" \"cpu\"=\"cpu0\"", now.UnixNano()/1000000000)} + assert.Equal(t, expS, mS) +} + +func TestSerializeMetricFieldValue(t *testing.T) { + now := time.Now() + tags := map[string]string{ + "cpu": "cpu0", + "host": "realHost", + } + fields := map[string]interface{}{ + "value": int64(91), + } + m, err := metric.New("cpu", tags, fields, now) + assert.NoError(t, err) + + s := WavefrontSerializer{} + buf, _ := s.Serialize(m) + mS := strings.Split(strings.TrimSpace(string(buf)), "\n") + assert.NoError(t, err) + + expS := []string{fmt.Sprintf("\"cpu\" 91.000000 %d source=\"realHost\" \"cpu\"=\"cpu0\"", now.UnixNano()/1000000000)} + assert.Equal(t, expS, mS) +} + +func TestSerializeMetricPrefix(t *testing.T) { + now := time.Now() + tags := map[string]string{ + "cpu": "cpu0", + "host": "realHost", + } + fields := map[string]interface{}{ + "usage_idle": int64(91), + } + m, err := metric.New("cpu", tags, fields, now) + assert.NoError(t, err) + + s := WavefrontSerializer{Prefix: "telegraf."} + buf, _ := s.Serialize(m) + mS := strings.Split(strings.TrimSpace(string(buf)), "\n") + assert.NoError(t, err) + + expS := []string{fmt.Sprintf("\"telegraf.cpu.usage.idle\" 91.000000 %d source=\"realHost\" \"cpu\"=\"cpu0\"", now.UnixNano()/1000000000)} + assert.Equal(t, expS, mS) +} From 5bc60ca79d2a63ffba7157b80c763fe16c9f43cb Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 5 Apr 2019 14:48:24 -0700 Subject: [PATCH 0752/1815] Update changelog --- CHANGELOG.md | 4 ++++ README.md | 1 + 2 files changed, 5 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8626379f7..ede9ec836 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,10 @@ - [bind](/plugins/inputs/bind/README.md) - Contributed by @dswarbrick & @danielllek - [github](/plugins/inputs/github/README.md) - Contributed by @influxdata +#### New Serializers + +- [wavefront](/plugins/serializers/wavefront/README.md) - Contributed by @puckpuck + #### Features - [#5556](https://github.com/influxdata/telegraf/pull/5556): Add TTL field to ping input. diff --git a/README.md b/README.md index de54c706a..758d7acb0 100644 --- a/README.md +++ b/README.md @@ -311,6 +311,7 @@ For documentation on the latest development code see the [documentation index][d - [ServiceNow](/plugins/serializers/nowmetric) - [SplunkMetric](/plugins/serializers/splunkmetric) - [Carbon2](/plugins/serializers/carbon2) +- [Wavefront](/plugins/serializers/wavefront) ## Processor Plugins From ca99569e6f19502894409de960d504fded9f7b9a Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 5 Apr 2019 15:06:46 -0700 Subject: [PATCH 0753/1815] Fix wavefront serializer option names in README --- plugins/serializers/wavefront/README.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/plugins/serializers/wavefront/README.md b/plugins/serializers/wavefront/README.md index 2b3be1f78..7a6594da3 100644 --- a/plugins/serializers/wavefront/README.md +++ b/plugins/serializers/wavefront/README.md @@ -1,6 +1,6 @@ # Example -The `wavefront` serializer translates the Telegraf metric format to the [Wavefront Data Format](https://docs.wavefront.com/wavefront_data_format.html). +The `wavefront` serializer translates the Telegraf metric format to the [Wavefront Data Format](https://docs.wavefront.com/wavefront_data_format.html). ### Configuration @@ -10,10 +10,10 @@ The `wavefront` serializer translates the Telegraf metric format to the [Wavefro ## Use Strict rules to sanitize metric and tag names from invalid characters ## When enabled forward slash (/) and comma (,) will be accpeted - # use_strict = false - + # wavefront_use_strict = false + ## point tags to use as the source name for Wavefront (if none found, host will be used) - # source_override = ["hostname", "address", "agent_host", "node_host"] + # wavefront_source_override = ["hostname", "address", "agent_host", "node_host"] ## Data format to output. ## Each data format has its own unique set of configuration options, read From 90593a07b87b06e03b85c6a2a5879ae957006ad1 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 8 Apr 2019 15:42:28 -0700 Subject: [PATCH 0754/1815] Clarify supported ping utils --- plugins/inputs/ping/README.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/plugins/inputs/ping/README.md b/plugins/inputs/ping/README.md index f59a6c947..5d3904e92 100644 --- a/plugins/inputs/ping/README.md +++ b/plugins/inputs/ping/README.md @@ -2,8 +2,9 @@ Sends a ping message by executing the system ping command and reports the results. -Currently there is no support for GNU Inetutils, use with iputils-ping -instead: +Most ping command implementations are supported, one notable exception being +that there is currently no support for GNU Inetutils ping. You may instead +use the iputils-ping implementation: ``` apt-get install iputils-ping ``` From b2baa2fdd572fd93574896037f1e12d1b407aec8 Mon Sep 17 00:00:00 2001 From: Benjamin Fuller Date: Wed, 10 Apr 2019 15:52:46 -0600 Subject: [PATCH 0755/1815] Add optional namespace restriction to prometheus input plugin (#5697) --- plugins/inputs/prometheus/README.md | 5 +++++ plugins/inputs/prometheus/kubernetes.go | 2 +- plugins/inputs/prometheus/prometheus.go | 6 +++++- 3 files changed, 11 insertions(+), 2 deletions(-) diff --git a/plugins/inputs/prometheus/README.md b/plugins/inputs/prometheus/README.md index 9208f54be..c1f50bb96 100644 --- a/plugins/inputs/prometheus/README.md +++ b/plugins/inputs/prometheus/README.md @@ -24,6 +24,9 @@ in Prometheus format. ## - prometheus.io/path: If the metrics path is not /metrics, define it with this annotation. ## - prometheus.io/port: If port is not 9102 use this annotation # monitor_kubernetes_pods = true + ## Restricts Kubernetes monitoring to a single namespace + ## ex: monitor_kubernetes_pods_namespace = "default" + # monitor_kubernetes_pods_namespace = "" ## Use bearer token for authorization. ('bearer_token' takes priority) # bearer_token = "/path/to/bearer/token" @@ -64,6 +67,8 @@ Currently the following annotation are supported: * `prometheus.io/path` Override the path for the metrics endpoint on the service. (default '/metrics') * `prometheus.io/port` Used to override the port. (default 9102) +Using the `monitor_kubernetes_pods_namespace` option allows you to limit which pods you are scraping. + #### Bearer Token If set, the file specified by the `bearer_token` parameter will be read on diff --git a/plugins/inputs/prometheus/kubernetes.go b/plugins/inputs/prometheus/kubernetes.go index 0d86ad91e..d92d90ead 100644 --- a/plugins/inputs/prometheus/kubernetes.go +++ b/plugins/inputs/prometheus/kubernetes.go @@ -83,7 +83,7 @@ func (p *Prometheus) start(ctx context.Context) error { // directed to do so by K8s. func (p *Prometheus) watch(ctx context.Context, client *k8s.Client) error { pod := &corev1.Pod{} - watcher, err := client.Watch(ctx, "", &corev1.Pod{}) + watcher, err := client.Watch(ctx, p.PodNamespace, &corev1.Pod{}) if err != nil { return err } diff --git a/plugins/inputs/prometheus/prometheus.go b/plugins/inputs/prometheus/prometheus.go index 879af4567..a4409c5b0 100644 --- a/plugins/inputs/prometheus/prometheus.go +++ b/plugins/inputs/prometheus/prometheus.go @@ -41,7 +41,8 @@ type Prometheus struct { client *http.Client // Should we scrape Kubernetes services for prometheus annotations - MonitorPods bool `toml:"monitor_kubernetes_pods"` + MonitorPods bool `toml:"monitor_kubernetes_pods"` + PodNamespace string `toml:"monitor_kubernetes_pods_namespace"` lock sync.Mutex kubernetesPods map[string]URLAndAddress cancel context.CancelFunc @@ -65,6 +66,9 @@ var sampleConfig = ` ## - prometheus.io/path: If the metrics path is not /metrics, define it with this annotation. ## - prometheus.io/port: If port is not 9102 use this annotation # monitor_kubernetes_pods = true + ## Restricts Kubernetes monitoring to a single namespace + ## ex: monitor_kubernetes_pods_namespace = "default" + # monitor_kubernetes_pods_namespace = "" ## Use bearer token for authorization. ('bearer_token' takes priority) # bearer_token = "/path/to/bearer/token" From 4079e4605fc809923cb4c2c4ee836e5b65cd658d Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 10 Apr 2019 17:18:48 -0700 Subject: [PATCH 0756/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index ede9ec836..1bb2ebd9a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,6 +17,7 @@ - [#5648](https://github.com/influxdata/telegraf/pull/5648): Allow env vars ${} expansion syntax in configuration file. - [#5641](https://github.com/influxdata/telegraf/pull/5641): Add option to reset buckets on flush to histogram aggregator. - [#5664](https://github.com/influxdata/telegraf/pull/5664): Add option to use strict sanitization rules to wavefront output. +- [#5697](https://github.com/influxdata/telegraf/pull/5697): Add namespace restriction to prometheus input plugin. #### Bugfixes From 9ea7cdd319c5edeb4bcf907d33129c26b00cb123 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 10 Apr 2019 17:19:29 -0700 Subject: [PATCH 0757/1815] Deprecate uptime_format field in system input (#5708) --- plugins/inputs/system/README.md | 2 +- plugins/inputs/system/system.go | 12 ++++++++---- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/plugins/inputs/system/README.md b/plugins/inputs/system/README.md index bea9bd2d9..efaa8a17f 100644 --- a/plugins/inputs/system/README.md +++ b/plugins/inputs/system/README.md @@ -25,7 +25,7 @@ the `telegraf` user to be added to the `utmp` group on some systems. - n_users (integer) - n_cpus (integer) - uptime (integer, seconds) - - uptime_format (string) + - uptime_format (string, deprecated in 1.10, use `uptime` field) ### Example Output: diff --git a/plugins/inputs/system/system.go b/plugins/inputs/system/system.go index 55ebbc59e..5c68870bb 100644 --- a/plugins/inputs/system/system.go +++ b/plugins/inputs/system/system.go @@ -9,11 +9,10 @@ import ( "strings" "time" - "github.com/shirou/gopsutil/host" - "github.com/shirou/gopsutil/load" - "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" + "github.com/shirou/gopsutil/host" + "github.com/shirou/gopsutil/load" ) type SystemStats struct{} @@ -22,7 +21,12 @@ func (_ *SystemStats) Description() string { return "Read metrics about system load & uptime" } -func (_ *SystemStats) SampleConfig() string { return "" } +func (_ *SystemStats) SampleConfig() string { + return ` + ## Uncomment to remove deprecated metrics. + # fielddrop = ["uptime_format"] +` +} func (_ *SystemStats) Gather(acc telegraf.Accumulator) error { loadavg, err := load.Avg() From 7f8bf56670f4b13b6c7acc29c2a63221d04c1b73 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 10 Apr 2019 17:20:48 -0700 Subject: [PATCH 0758/1815] Update changelog --- CHANGELOG.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1bb2ebd9a..49fc51c54 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,10 @@ ## v1.11 [unreleased] +#### Release Notes + +- The `uptime_format` field in the system input has been deprecated, use the + `uptime` field instead. + #### New Inputs - [bind](/plugins/inputs/bind/README.md) - Contributed by @dswarbrick & @danielllek From d2666d0db6927d324422dcc3ec7e9c38e2b3704b Mon Sep 17 00:00:00 2001 From: scottprichard <46452427+scottprichard@users.noreply.github.com> Date: Wed, 10 Apr 2019 21:42:38 -0400 Subject: [PATCH 0759/1815] Add cmdline tag to procstat input (#5681) --- plugins/inputs/procstat/README.md | 4 ++++ plugins/inputs/procstat/process.go | 1 + plugins/inputs/procstat/procstat.go | 14 ++++++++++++++ plugins/inputs/procstat/procstat_test.go | 4 ++++ 4 files changed, 23 insertions(+) diff --git a/plugins/inputs/procstat/README.md b/plugins/inputs/procstat/README.md index 0dd631b05..dfe95291a 100644 --- a/plugins/inputs/procstat/README.md +++ b/plugins/inputs/procstat/README.md @@ -41,6 +41,9 @@ Processes can be selected for monitoring using one of several methods: ## Field name prefix # prefix = "" + ## When true add the full cmdline as a tag. + # cmdline_tag = false + ## Add PID as a tag instead of a field; useful to differentiate between ## processes whose tags are otherwise the same. Can create a large number ## of series, use judiciously. @@ -72,6 +75,7 @@ implemented as a WMI query. The pattern allows fuzzy matching using only - procstat - tags: - pid (when `pid_tag` is true) + - cmdline (when 'cmdline_tag' is true) - process_name - pidfile (when defined) - exe (when defined) diff --git a/plugins/inputs/procstat/process.go b/plugins/inputs/procstat/process.go index 30e8f182f..94a57c192 100644 --- a/plugins/inputs/procstat/process.go +++ b/plugins/inputs/procstat/process.go @@ -15,6 +15,7 @@ type Process interface { IOCounters() (*process.IOCountersStat, error) MemoryInfo() (*process.MemoryInfoStat, error) Name() (string, error) + Cmdline() (string, error) NumCtxSwitches() (*process.NumCtxSwitchesStat, error) NumFDs() (int32, error) NumThreads() (int32, error) diff --git a/plugins/inputs/procstat/procstat.go b/plugins/inputs/procstat/procstat.go index 8424cd674..55552bb4a 100644 --- a/plugins/inputs/procstat/procstat.go +++ b/plugins/inputs/procstat/procstat.go @@ -27,6 +27,7 @@ type Procstat struct { Exe string Pattern string Prefix string + CmdLineTag bool `toml:"cmdline_tag"` ProcessName string User string SystemdUnit string @@ -65,6 +66,9 @@ var sampleConfig = ` ## Field name prefix # prefix = "" + ## When true add the full cmdline as a tag. + # cmdline_tag = false + ## Add PID as a tag instead of a field; useful to differentiate between ## processes whose tags are otherwise the same. Can create a large number ## of series, use judiciously. @@ -170,6 +174,16 @@ func (p *Procstat) addMetric(proc Process, acc telegraf.Accumulator) { fields["pid"] = int32(proc.PID()) } + //If cmd_line tag is true and it is not already set add cmdline as a tag + if p.CmdLineTag { + if _, ok := proc.Tags()["cmdline"]; !ok { + Cmdline, err := proc.Cmdline() + if err == nil { + proc.Tags()["cmdline"] = Cmdline + } + } + } + numThreads, err := proc.NumThreads() if err == nil { fields[prefix+"num_threads"] = numThreads diff --git a/plugins/inputs/procstat/procstat_test.go b/plugins/inputs/procstat/procstat_test.go index 7a2eaf9ee..191c056ea 100644 --- a/plugins/inputs/procstat/procstat_test.go +++ b/plugins/inputs/procstat/procstat_test.go @@ -76,6 +76,10 @@ func (pg *testPgrep) PidFile(path string) ([]PID, error) { return pg.pids, pg.err } +func (p *testProc) Cmdline() (string, error) { + return "test_proc", nil +} + func (pg *testPgrep) Pattern(pattern string) ([]PID, error) { return pg.pids, pg.err } From 2e5165d416b216286d5a243bcad613eae53de91d Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 10 Apr 2019 18:44:28 -0700 Subject: [PATCH 0760/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 49fc51c54..fe4fbbabb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -23,6 +23,7 @@ - [#5641](https://github.com/influxdata/telegraf/pull/5641): Add option to reset buckets on flush to histogram aggregator. - [#5664](https://github.com/influxdata/telegraf/pull/5664): Add option to use strict sanitization rules to wavefront output. - [#5697](https://github.com/influxdata/telegraf/pull/5697): Add namespace restriction to prometheus input plugin. +- [#5681](https://github.com/influxdata/telegraf/pull/5681): Add cmdline tag to procstat input. #### Bugfixes From 24391a8b5e571f6b79c81d55dc0780bb94a675be Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 11 Apr 2019 11:48:23 -0700 Subject: [PATCH 0761/1815] Remove debug print statements --- agent/agent.go | 5 ----- 1 file changed, 5 deletions(-) diff --git a/agent/agent.go b/agent/agent.go index ae2de85bf..2687bbc0f 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -405,14 +405,11 @@ func (a *Agent) runAggregators( acc := NewAccumulator(agg, aggregations) acc.SetPrecision(a.Precision()) - fmt.Println(1) a.push(ctx, agg, acc) - fmt.Println(2) }(agg) } aggWg.Wait() - fmt.Println(3) close(aggregations) }() @@ -422,10 +419,8 @@ func (a *Agent) runAggregators( dst <- metric } } - fmt.Println(4) wg.Wait() - fmt.Println(5) return nil } From 776e06c76961661e305fdad63945289b02ded1f3 Mon Sep 17 00:00:00 2001 From: Greg <2653109+glinton@users.noreply.github.com> Date: Thu, 11 Apr 2019 14:15:38 -0600 Subject: [PATCH 0762/1815] Support verbose query param in ping endpoint of influxdb_listener (#5704) --- plugins/inputs/influxdb_listener/http_listener.go | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/plugins/inputs/influxdb_listener/http_listener.go b/plugins/inputs/influxdb_listener/http_listener.go index 2857ae9c9..7e5544786 100644 --- a/plugins/inputs/influxdb_listener/http_listener.go +++ b/plugins/inputs/influxdb_listener/http_listener.go @@ -5,6 +5,7 @@ import ( "compress/gzip" "crypto/subtle" "crypto/tls" + "encoding/json" "fmt" "io" "log" @@ -231,8 +232,16 @@ func (h *HTTPListener) ServeHTTP(res http.ResponseWriter, req *http.Request) { case "/ping": h.PingsRecv.Incr(1) defer h.PingsServed.Incr(1) + verbose := req.URL.Query().Get("verbose") + // respond to ping requests - res.WriteHeader(http.StatusNoContent) + if verbose != "" && verbose != "0" && verbose != "false" { + res.WriteHeader(http.StatusOK) + b, _ := json.Marshal(map[string]string{"version": "1.0"}) // based on header set above + res.Write(b) + } else { + res.WriteHeader(http.StatusNoContent) + } default: defer h.NotFoundsServed.Incr(1) // Don't know how to respond to calls to other endpoints From e9fe26f17b839272f32b8c7c8942c9d49b1c913f Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 11 Apr 2019 13:16:19 -0700 Subject: [PATCH 0763/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index fe4fbbabb..70edb6e83 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -24,6 +24,7 @@ - [#5664](https://github.com/influxdata/telegraf/pull/5664): Add option to use strict sanitization rules to wavefront output. - [#5697](https://github.com/influxdata/telegraf/pull/5697): Add namespace restriction to prometheus input plugin. - [#5681](https://github.com/influxdata/telegraf/pull/5681): Add cmdline tag to procstat input. +- [#5704](https://github.com/influxdata/telegraf/pull/5704): Support verbose query param in ping endpoint of influxdb_listener. #### Bugfixes From 5bf793bb945124d259db46204f2bf6bd9190490c Mon Sep 17 00:00:00 2001 From: Greg <2653109+glinton@users.noreply.github.com> Date: Fri, 12 Apr 2019 13:22:16 -0600 Subject: [PATCH 0764/1815] Update issue templates to include code block and comments (#5721) --- .github/ISSUE_TEMPLATE/Bug_report.md | 52 ++++++++++++----------- .github/ISSUE_TEMPLATE/Feature_request.md | 34 +++++++-------- 2 files changed, 45 insertions(+), 41 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/Bug_report.md b/.github/ISSUE_TEMPLATE/Bug_report.md index b84aad767..49cfdefe3 100644 --- a/.github/ISSUE_TEMPLATE/Bug_report.md +++ b/.github/ISSUE_TEMPLATE/Bug_report.md @@ -1,24 +1,28 @@ ---- -name: Bug report -about: Create a report to help us improve - ---- - -### Relevant telegraf.conf: - -### System info: - -[Include Telegraf version, operating system name, and other relevant details] - -### Steps to reproduce: - -1. ... -2. ... - -### Expected behavior: - -### Actual behavior: - -### Additional info: - -[Include gist of relevant config, logs, etc.] +--- +name: Bug report +about: Create a report to help us improve + +--- + +### Relevant telegraf.conf: + +```toml + +``` + +### System info: + + + +### Steps to reproduce: + +1. ... +2. ... + +### Expected behavior: + +### Actual behavior: + +### Additional info: + + diff --git a/.github/ISSUE_TEMPLATE/Feature_request.md b/.github/ISSUE_TEMPLATE/Feature_request.md index 84d45fcd6..20aba04be 100644 --- a/.github/ISSUE_TEMPLATE/Feature_request.md +++ b/.github/ISSUE_TEMPLATE/Feature_request.md @@ -1,17 +1,17 @@ ---- -name: Feature request -about: Suggest an idea for this project - ---- - -## Feature Request - -Opening a feature request kicks off a discussion. - -### Proposal: - -### Current behavior: - -### Desired behavior: - -### Use case: [Why is this important (helps with prioritizing requests)] +--- +name: Feature request +about: Suggest an idea for this project + +--- + +## Feature Request + +Opening a feature request kicks off a discussion. + +### Proposal: + +### Current behavior: + +### Desired behavior: + +### Use case: From 37441e9eb1f65c14ec0d58ccd76dd3271424815b Mon Sep 17 00:00:00 2001 From: Lorenzo Affetti Date: Tue, 16 Apr 2019 01:07:47 +0200 Subject: [PATCH 0765/1815] Set log directory attributes in rpm spec (#5716) --- scripts/build.py | 4 ++++ scripts/post-install.sh | 12 ++++++++---- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/scripts/build.py b/scripts/build.py index 5869bf1ed..85e1724a5 100755 --- a/scripts/build.py +++ b/scripts/build.py @@ -18,6 +18,8 @@ import argparse # Packaging variables PACKAGE_NAME = "telegraf" +USER = "telegraf" +GROUP = "telegraf" INSTALL_ROOT_DIR = "/usr/bin" LOG_DIR = "/var/log/telegraf" SCRIPT_DIR = "/usr/lib/telegraf/scripts" @@ -66,6 +68,7 @@ fpm_common_args = "-f -s dir --log error \ --before-install {} \ --after-remove {} \ --before-remove {} \ + --rpm-attr 755,{},{}:{} \ --description \"{}\"".format( VENDOR, PACKAGE_URL, @@ -77,6 +80,7 @@ fpm_common_args = "-f -s dir --log error \ PREINST_SCRIPT, POSTREMOVE_SCRIPT, PREREMOVE_SCRIPT, + USER, GROUP, LOG_DIR, DESCRIPTION) targets = { diff --git a/scripts/post-install.sh b/scripts/post-install.sh index 822a4e4de..9972364bc 100644 --- a/scripts/post-install.sh +++ b/scripts/post-install.sh @@ -32,10 +32,6 @@ if ! id telegraf &>/dev/null; then useradd -r -M telegraf -s /bin/false -d /etc/telegraf -g telegraf fi -test -d $LOG_DIR || mkdir -p $LOG_DIR -chown -R -L telegraf:telegraf $LOG_DIR -chmod 755 $LOG_DIR - # Remove legacy symlink, if it exists if [[ -L /etc/init.d/telegraf ]]; then rm -f /etc/init.d/telegraf @@ -72,6 +68,14 @@ if [[ -f /etc/redhat-release ]] || [[ -f /etc/SuSE-release ]]; then fi elif [[ -f /etc/debian_version ]]; then # Debian/Ubuntu logic + + # Ownership for RH-based platforms is set in build.py via the `rmp-attr` option. + # We perform ownership change only for Debian-based systems. + # Moving these lines out of this if statement would make `rmp -V` fail after installation. + test -d $LOG_DIR || mkdir -p $LOG_DIR + chown -R -L telegraf:telegraf $LOG_DIR + chmod 755 $LOG_DIR + if [[ "$(readlink /proc/1/exe)" == */systemd ]]; then install_systemd /lib/systemd/system/telegraf.service deb-systemd-invoke restart telegraf.service || echo "WARNING: systemd not running." From 396d44546d5737def24795f303e04a8c1a58a421 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 15 Apr 2019 16:09:37 -0700 Subject: [PATCH 0766/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 70edb6e83..562cd4afc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -35,6 +35,7 @@ #### Bugfixes - [#5680](https://github.com/influxdata/telegraf/pull/5680): Allow colons in metric names in prometheus_client output. +- [#5716](https://github.com/influxdata/telegraf/pull/5716): Set log directory attributes in rpm spec. ## v1.10.2 [2019-04-02] From 08080bbc1fe0125f18e4aae1f91e4a4521bde0bf Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 16 Apr 2019 12:35:08 -0700 Subject: [PATCH 0767/1815] Set release date for 1.10.3 --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 562cd4afc..cbb6e4ff2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -30,7 +30,7 @@ - [#5631](https://github.com/influxdata/telegraf/pull/5631): Create Windows service only when specified or in service manager. -## v1.10.3 [unreleased] +## v1.10.3 [2019-04-16] #### Bugfixes From 72695228b3ea967c09ee8f7184b69203344b87bd Mon Sep 17 00:00:00 2001 From: Greg <2653109+glinton@users.noreply.github.com> Date: Tue, 16 Apr 2019 18:56:56 -0600 Subject: [PATCH 0768/1815] Enhance HTTP connection options for phpfpm input plugin (#5713) --- plugins/inputs/phpfpm/README.md | 10 ++++++++++ plugins/inputs/phpfpm/phpfpm.go | 31 +++++++++++++++++++++++++++---- 2 files changed, 37 insertions(+), 4 deletions(-) diff --git a/plugins/inputs/phpfpm/README.md b/plugins/inputs/phpfpm/README.md index 531edae24..e2f4e0c2f 100644 --- a/plugins/inputs/phpfpm/README.md +++ b/plugins/inputs/phpfpm/README.md @@ -27,6 +27,16 @@ Get phpfpm stats using either HTTP status page or fpm socket. ## Example of multiple gathering from local socket and remote host ## urls = ["http://192.168.1.20/status", "/tmp/fpm.sock"] urls = ["http://localhost/status"] + + ## Duration allowed to complete HTTP requests. + # timeout = "5s" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false ``` When using `unixsocket`, you have to ensure that telegraf runs on same diff --git a/plugins/inputs/phpfpm/phpfpm.go b/plugins/inputs/phpfpm/phpfpm.go index e40dae174..ed205e6e7 100644 --- a/plugins/inputs/phpfpm/phpfpm.go +++ b/plugins/inputs/phpfpm/phpfpm.go @@ -13,6 +13,8 @@ import ( "sync" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/internal/tls" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -35,7 +37,9 @@ type metric map[string]int64 type poolStat map[string]metric type phpfpm struct { - Urls []string + Urls []string + Timeout internal.Duration + tls.ClientConfig client *http.Client } @@ -58,9 +62,19 @@ var sampleConfig = ` ## "fcgi://10.0.0.12:9000/status" ## "cgi://10.0.10.12:9001/status" ## - ## Example of multiple gathering from local socket and remove host + ## Example of multiple gathering from local socket and remote host ## urls = ["http://192.168.1.20/status", "/tmp/fpm.sock"] urls = ["http://localhost/status"] + + ## Duration allowed to complete HTTP requests. + # timeout = "5s" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false ` func (r *phpfpm) SampleConfig() string { @@ -96,8 +110,17 @@ func (g *phpfpm) Gather(acc telegraf.Accumulator) error { // Request status page to get stat raw data and import it func (g *phpfpm) gatherServer(addr string, acc telegraf.Accumulator) error { if g.client == nil { - client := &http.Client{} - g.client = client + tlsCfg, err := g.ClientConfig.TLSConfig() + if err != nil { + return err + } + tr := &http.Transport{ + TLSClientConfig: tlsCfg, + } + g.client = &http.Client{ + Transport: tr, + Timeout: g.Timeout.Duration, + } } if strings.HasPrefix(addr, "http://") || strings.HasPrefix(addr, "https://") { From 6a7d0c142eda31baeeb6f97427994cb573d69e94 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 16 Apr 2019 17:59:13 -0700 Subject: [PATCH 0769/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index cbb6e4ff2..4b7a3c47e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -25,6 +25,7 @@ - [#5697](https://github.com/influxdata/telegraf/pull/5697): Add namespace restriction to prometheus input plugin. - [#5681](https://github.com/influxdata/telegraf/pull/5681): Add cmdline tag to procstat input. - [#5704](https://github.com/influxdata/telegraf/pull/5704): Support verbose query param in ping endpoint of influxdb_listener. +- [#5713](https://github.com/influxdata/telegraf/pull/5713): Enhance HTTP connection options for phpfpm input plugin. #### Bugfixes From 2faf37e5c104be15359164fef654f2d011789a42 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 17 Apr 2019 15:46:20 -0700 Subject: [PATCH 0770/1815] Fix docs for metric buffer limit to reflect current behavior (#5741) --- docs/CONFIGURATION.md | 5 +- etc/telegraf.conf | 61 ++++++++++++++++++++---- etc/telegraf_windows.conf | 5 +- internal/config/config.go | 5 +- plugins/inputs/vsphere/vsphere_test.go | 64 -------------------------- 5 files changed, 55 insertions(+), 85 deletions(-) diff --git a/docs/CONFIGURATION.md b/docs/CONFIGURATION.md index 9e016af62..edb334145 100644 --- a/docs/CONFIGURATION.md +++ b/docs/CONFIGURATION.md @@ -112,10 +112,7 @@ The agent table configures Telegraf and the defaults used across all plugins. This controls the size of writes that Telegraf sends to output plugins. - **metric_buffer_limit**: - For failed writes, telegraf will cache metric_buffer_limit metrics for each - output, and will flush this buffer on a successful write. Oldest metrics - are dropped first when this buffer fills. - This buffer only fills when writes fail to output plugin(s). + Maximum number of unwritten metrics per output. - **collection_jitter**: Collection jitter is used to jitter the collection by a random [interval][]. diff --git a/etc/telegraf.conf b/etc/telegraf.conf index 4c3de469c..8e3264a84 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -9,9 +9,9 @@ # Use 'telegraf -config telegraf.conf -test' to see what metrics a config # file would generate. # -# Environment variables can be used anywhere in this config file, simply prepend -# them with $. For strings the variable must be within quotes (ie, "$STR_VAR"), -# for numbers and booleans they should be plain (ie, $INT_VAR, $BOOL_VAR) +# Environment variables can be used anywhere in this config file, simply surround +# them with ${}. For strings the variable must be within quotes (ie, "${STR_VAR}"), +# for numbers and booleans they should be plain (ie, ${INT_VAR}, ${BOOL_VAR}) # Global tags can be specified here in key="value" format. @@ -35,10 +35,7 @@ ## This controls the size of writes that Telegraf sends to output plugins. metric_batch_size = 1000 - ## For failed writes, telegraf will cache metric_buffer_limit metrics for each - ## output, and will flush this buffer on a successful write. Oldest metrics - ## are dropped first when this buffer fills. - ## This buffer only fills when writes fail to output plugin(s). + ## Maximum number of unwritten metrics per output. metric_buffer_limit = 10000 ## Collection jitter is used to jitter the collection by a random amount. @@ -1092,6 +1089,10 @@ # ## When true will convert all _ (underscore) characters in final metric name. default is true # #convert_paths = true # +# ## Use Strict rules to sanitize metric and tag names from invalid characters +# ## When enabled forward slash (/) and comma (,) will be accpeted +# #use_strict = false +# # ## Use Regex to sanitize metric and tag names from invalid characters # ## Regex is more thorough, but significantly slower. default is false # #use_regex = false @@ -1352,6 +1353,10 @@ # ## aggregator and will not get sent to the output plugins. # drop_original = false # +# ## If true, the histogram will be reset on flush instead +# ## of accumulating the results. +# reset = false +# # ## Example config that aggregates all fields of the metric. # # [[aggregators.histogram.config]] # # ## The set of buckets. @@ -1469,7 +1474,8 @@ # Read metrics about system load & uptime [[inputs.system]] - # no configuration + ## Uncomment to remove deprecated metrics. + # fielddrop = ["uptime_format"] # # Gather ActiveMQ metrics @@ -1586,6 +1592,15 @@ # tubes = ["notifications"] +# # Read BIND nameserver XML statistics +# [[inputs.bind]] +# ## An array of BIND XML statistics URI to gather stats. +# ## Default is "http://localhost:8053/xml/v3". +# # urls = ["http://localhost:8053/xml/v3"] +# # gather_memory_contexts = false +# # gather_views = false + + # # Collect bond interface status, slaves statuses and failures count # [[inputs.bond]] # ## Sets 'proc' directory path @@ -2151,6 +2166,18 @@ # ] +# # Gather repository information from GitHub hosted repositories. +# [[inputs.github]] +# ## List of repositories to monitor. +# repositories = ["influxdata/telegraf"] +# +# ## Github API access token. Unauthenticated requests are limited to 60 per hour. +# # access_token = "" +# +# ## Timeout for HTTP requests. +# # http_timeout = "5s" + + # # Read flattened metrics from one or more GrayLog HTTP endpoints # [[inputs.graylog]] # ## API endpoint, currently supported API: @@ -3260,9 +3287,19 @@ # ## "fcgi://10.0.0.12:9000/status" # ## "cgi://10.0.10.12:9001/status" # ## -# ## Example of multiple gathering from local socket and remove host +# ## Example of multiple gathering from local socket and remote host # ## urls = ["http://192.168.1.20/status", "/tmp/fpm.sock"] # urls = ["http://localhost/status"] +# +# ## Duration allowed to complete HTTP requests. +# # timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false # # Ping given url(s) and return statistics @@ -3334,6 +3371,9 @@ # ## Field name prefix # # prefix = "" # +# ## When true add the full cmdline as a tag. +# # cmdline_tag = false +# # ## Add PID as a tag instead of a field; useful to differentiate between # ## processes whose tags are otherwise the same. Can create a large number # ## of series, use judiciously. @@ -4853,6 +4893,9 @@ # ## - prometheus.io/path: If the metrics path is not /metrics, define it with this annotation. # ## - prometheus.io/port: If port is not 9102 use this annotation # # monitor_kubernetes_pods = true +# ## Restricts Kubernetes monitoring to a single namespace +# ## ex: monitor_kubernetes_pods_namespace = "default" +# # monitor_kubernetes_pods_namespace = "" # # ## Use bearer token for authorization. ('bearer_token' takes priority) # # bearer_token = "/path/to/bearer/token" diff --git a/etc/telegraf_windows.conf b/etc/telegraf_windows.conf index f0bfbdba0..3263eea11 100644 --- a/etc/telegraf_windows.conf +++ b/etc/telegraf_windows.conf @@ -35,10 +35,7 @@ ## This controls the size of writes that Telegraf sends to output plugins. metric_batch_size = 1000 - ## For failed writes, telegraf will cache metric_buffer_limit metrics for each - ## output, and will flush this buffer on a successful write. Oldest metrics - ## are dropped first when this buffer fills. - ## This buffer only fills when writes fail to output plugin(s). + ## Maximum number of unwritten metrics per output. metric_buffer_limit = 10000 ## Collection jitter is used to jitter the collection by a random amount. diff --git a/internal/config/config.go b/internal/config/config.go index a0fc45a3c..4f747113f 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -234,10 +234,7 @@ var header = `# Telegraf Configuration ## This controls the size of writes that Telegraf sends to output plugins. metric_batch_size = 1000 - ## For failed writes, telegraf will cache metric_buffer_limit metrics for each - ## output, and will flush this buffer on a successful write. Oldest metrics - ## are dropped first when this buffer fills. - ## This buffer only fills when writes fail to output plugin(s). + ## Maximum number of unwritten metrics per output. metric_buffer_limit = 10000 ## Collection jitter is used to jitter the collection by a random amount. diff --git a/plugins/inputs/vsphere/vsphere_test.go b/plugins/inputs/vsphere/vsphere_test.go index eff56a89d..73956b542 100644 --- a/plugins/inputs/vsphere/vsphere_test.go +++ b/plugins/inputs/vsphere/vsphere_test.go @@ -25,83 +25,19 @@ import ( ) var configHeader = ` -# Telegraf Configuration -# -# Telegraf is entirely plugin driven. All metrics are gathered from the -# declared inputs, and sent to the declared outputs. -# -# Plugins must be declared in here to be active. -# To deactivate a plugin, comment out the name and any variables. -# -# Use 'telegraf -config telegraf.conf -test' to see what metrics a config -# file would generate. -# -# Environment variables can be used anywhere in this config file, simply prepend -# them with $. For strings the variable must be within quotes (ie, "$STR_VAR"), -# for numbers and booleans they should be plain (ie, $INT_VAR, $BOOL_VAR) - - -# Global tags can be specified here in key="value" format. -[global_tags] - # dc = "us-east-1" # will tag all metrics with dc=us-east-1 - # rack = "1a" - ## Environment variables can be used as tags, and throughout the config file - # user = "$USER" - - -# Configuration for telegraf agent [agent] - ## Default data collection interval for all inputs interval = "10s" - ## Rounds collection interval to 'interval' - ## ie, if interval="10s" then always collect on :00, :10, :20, etc. round_interval = true - - ## Telegraf will send metrics to outputs in batches of at most - ## metric_batch_size metrics. - ## This controls the size of writes that Telegraf sends to output plugins. metric_batch_size = 1000 - - ## For failed writes, telegraf will cache metric_buffer_limit metrics for each - ## output, and will flush this buffer on a successful write. Oldest metrics - ## are dropped first when this buffer fills. - ## This buffer only fills when writes fail to output plugin(s). metric_buffer_limit = 10000 - - ## Collection jitter is used to jitter the collection by a random amount. - ## Each plugin will sleep for a random time within jitter before collecting. - ## This can be used to avoid many plugins querying things like sysfs at the - ## same time, which can have a measurable effect on the system. collection_jitter = "0s" - - ## Default flushing interval for all outputs. You shouldn't set this below - ## interval. Maximum flush_interval will be flush_interval + flush_jitter flush_interval = "10s" - ## Jitter the flush interval by a random amount. This is primarily to avoid - ## large write spikes for users running a large number of telegraf instances. - ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s flush_jitter = "0s" - - ## By default or when set to "0s", precision will be set to the same - ## timestamp order as the collection interval, with the maximum being 1s. - ## ie, when interval = "10s", precision will be "1s" - ## when interval = "250ms", precision will be "1ms" - ## Precision will NOT be used for service inputs. It is up to each individual - ## service input to set the timestamp at the appropriate precision. - ## Valid time units are "ns", "us" (or "µs"), "ms", "s". precision = "" - - ## Logging configuration: - ## Run telegraf with debug log messages. debug = false - ## Run telegraf in quiet mode (error log messages only). quiet = false - ## Specify the log file name. The empty string means to log to stderr. logfile = "" - - ## Override default hostname, if empty use os.Hostname() hostname = "" - ## If set to true, do no set the "host" tag in the telegraf agent. omit_hostname = false ` From bc95a2a2b50f33e5405d522d86463ba72e40b517 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 17 Apr 2019 15:47:03 -0700 Subject: [PATCH 0771/1815] Don't start telegraf when stale pidfile found (#5731) --- scripts/init.sh | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/scripts/init.sh b/scripts/init.sh index 67236d8c7..fc71536f9 100755 --- a/scripts/init.sh +++ b/scripts/init.sh @@ -120,13 +120,13 @@ confdir=/etc/telegraf/telegraf.d case $1 in start) # Checked the PID file exists and check the actual status of process - if [ -e $pidfile ]; then - pidofproc -p $pidfile $daemon > /dev/null 2>&1 && status="0" || status="$?" - # If the status is SUCCESS then don't need to start again. - if [ "x$status" = "x0" ]; then - log_failure_msg "$name process is running" - exit 0 # Exit - fi + if [ -e "$pidfile" ]; then + if pidofproc -p $pidfile $daemon > /dev/null; then + log_failure_msg "$name process is running" + else + log_failure_msg "$name pidfile has no corresponding process; ensure $name is stopped and remove $pidfile" + fi + exit 0 fi # Bump the file limits, before launching the daemon. These will carry over to @@ -150,8 +150,7 @@ case $1 in stop) # Stop the daemon. if [ -e $pidfile ]; then - pidofproc -p $pidfile $daemon > /dev/null 2>&1 && status="0" || status="$?" - if [ "$status" = 0 ]; then + if pidofproc -p $pidfile $daemon > /dev/null; then # periodically signal until process exists while true; do if ! pidofproc -p $pidfile $daemon > /dev/null; then @@ -172,8 +171,7 @@ case $1 in reload) # Reload the daemon. if [ -e $pidfile ]; then - pidofproc -p $pidfile $daemon > /dev/null 2>&1 && status="0" || status="$?" - if [ "$status" = 0 ]; then + if pidofproc -p $pidfile $daemon > /dev/null; then if killproc -p $pidfile SIGHUP; then log_success_msg "$name process was reloaded" else From 58a6209a76c9d6c8ad220deec763ae7d875393de Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 17 Apr 2019 15:49:22 -0700 Subject: [PATCH 0772/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4b7a3c47e..40faa609c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -30,6 +30,7 @@ #### Bugfixes - [#5631](https://github.com/influxdata/telegraf/pull/5631): Create Windows service only when specified or in service manager. +- [#5730](https://github.com/influxdata/telegraf/pull/5730): Don't start telegraf when stale pidfile found. ## v1.10.3 [2019-04-16] From e3348304584eb89d04238b79b81561caca399c7f Mon Sep 17 00:00:00 2001 From: Greg <2653109+glinton@users.noreply.github.com> Date: Mon, 22 Apr 2019 18:36:46 -0600 Subject: [PATCH 0773/1815] Collect cloudwatch stats using GetMetricData (#5544) --- plugins/inputs/cloudwatch/README.md | 84 ++- plugins/inputs/cloudwatch/cloudwatch.go | 616 ++++++++++++------- plugins/inputs/cloudwatch/cloudwatch_test.go | 181 ++++-- 3 files changed, 582 insertions(+), 299 deletions(-) diff --git a/plugins/inputs/cloudwatch/README.md b/plugins/inputs/cloudwatch/README.md index dfb5bf95d..fab3cc295 100644 --- a/plugins/inputs/cloudwatch/README.md +++ b/plugins/inputs/cloudwatch/README.md @@ -17,7 +17,7 @@ API endpoint. In the following order the plugin will attempt to authenticate. ```toml [[inputs.cloudwatch]] - ## Amazon Region (required) + ## Amazon Region region = "us-east-1" ## Amazon Credentials @@ -28,12 +28,12 @@ API endpoint. In the following order the plugin will attempt to authenticate. ## 4) environment variables ## 5) shared credentials file ## 6) EC2 Instance Profile - #access_key = "" - #secret_key = "" - #token = "" - #role_arn = "" - #profile = "" - #shared_credential_file = "" + # access_key = "" + # secret_key = "" + # token = "" + # role_arn = "" + # profile = "" + # shared_credential_file = "" ## Endpoint to make request against, the correct endpoint is automatically ## determined and this option should only be set if you wish to override the @@ -54,32 +54,43 @@ API endpoint. In the following order the plugin will attempt to authenticate. ## Collection Delay (required - must account for metrics availability via CloudWatch API) delay = "5m" - ## Override global run interval (optional - defaults to global interval) - ## Recomended: use metric 'interval' that is a multiple of 'period' to avoid + ## Recommended: use metric 'interval' that is a multiple of 'period' to avoid ## gaps or overlap in pulled data interval = "5m" + ## Configure the TTL for the internal cache of metrics. + # cache_ttl = "1h" + ## Metric Statistic Namespace (required) namespace = "AWS/ELB" ## Maximum requests per second. Note that the global default AWS rate limit is - ## 400 reqs/sec, so if you define multiple namespaces, these should add up to a - ## maximum of 400. Optional - default value is 200. + ## 50 reqs/sec, so if you define multiple namespaces, these should add up to a + ## maximum of 50. ## See http://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_limits.html - ratelimit = 200 + # ratelimit = 25 - ## Metrics to Pull (optional) + ## Namespace-wide statistic filters. These allow fewer queries to be made to + ## cloudwatch. + # statistic_include = [ "average", "sum", minimum", "maximum", sample_count" ] + # statistic_exclude = [] + + ## Metrics to Pull ## Defaults to all Metrics in Namespace if nothing is provided ## Refreshes Namespace available metrics every 1h - [[inputs.cloudwatch.metrics]] - names = ["Latency", "RequestCount"] - - ## Dimension filters for Metric. These are optional however all dimensions - ## defined for the metric names must be specified in order to retrieve - ## the metric statistics. - [[inputs.cloudwatch.metrics.dimensions]] - name = "LoadBalancerName" - value = "p-example" + #[[inputs.cloudwatch.metrics]] + # names = ["Latency", "RequestCount"] + # + # ## Statistic filters for Metric. These allow for retrieving specific + # ## statistics for an individual metric. + # # statistic_include = [ "average", "sum", minimum", "maximum", sample_count" ] + # # statistic_exclude = [] + # + # ## Dimension filters for Metric. All dimensions defined for the metric names + # ## must be specified in order to retrieve the metric statistics. + # [[inputs.cloudwatch.metrics.dimensions]] + # name = "LoadBalancerName" + # value = "p-example" ``` #### Requirements and Terminology @@ -97,17 +108,21 @@ wildcard dimension is ignored. Example: ``` -[[inputs.cloudwatch.metrics]] - names = ["Latency"] +[[inputs.cloudwatch]] + period = "1m" + interval = "5m" - ## Dimension filters for Metric (optional) - [[inputs.cloudwatch.metrics.dimensions]] - name = "LoadBalancerName" - value = "p-example" + [[inputs.cloudwatch.metrics]] + names = ["Latency"] - [[inputs.cloudwatch.metrics.dimensions]] - name = "AvailabilityZone" - value = "*" + ## Dimension filters for Metric (optional) + [[inputs.cloudwatch.metrics.dimensions]] + name = "LoadBalancerName" + value = "p-example" + + [[inputs.cloudwatch.metrics.dimensions]] + name = "AvailabilityZone" + value = "*" ``` If the following ELBs are available: @@ -124,9 +139,11 @@ Then 2 metrics will be output: If the `AvailabilityZone` wildcard dimension was omitted, then a single metric (name: `p-example`) would be exported containing the aggregate values of the ELB across availability zones. +To maximize efficiency and savings, consider making fewer requests by increasing `interval` but keeping `period` at the duration you would like metrics to be reported. The above example will request metrics from Cloudwatch every 5 minutes but will output five metrics timestamped one minute apart. + #### Restrictions and Limitations - CloudWatch metrics are not available instantly via the CloudWatch API. You should adjust your collection `delay` to account for this lag in metrics availability based on your [monitoring subscription level](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-cloudwatch-new.html) -- CloudWatch API usage incurs cost - see [GetMetricStatistics Pricing](https://aws.amazon.com/cloudwatch/pricing/) +- CloudWatch API usage incurs cost - see [GetMetricData Pricing](https://aws.amazon.com/cloudwatch/pricing/) ### Measurements & Fields: @@ -147,7 +164,6 @@ Tag Dimension names are represented in [snake case](https://en.wikipedia.org/wik - All measurements have the following tags: - region (CloudWatch Region) - - unit (CloudWatch Metric Unit) - {dimension-name} (Cloudwatch Dimension value - one for each metric dimension) ### Troubleshooting: @@ -168,5 +184,5 @@ aws cloudwatch get-metric-statistics --namespace AWS/EC2 --region us-east-1 --pe ``` $ ./telegraf --config telegraf.conf --input-filter cloudwatch --test -> cloudwatch_aws_elb,load_balancer_name=p-example,region=us-east-1,unit=seconds latency_average=0.004810798017284538,latency_maximum=0.1100282669067383,latency_minimum=0.0006084442138671875,latency_sample_count=4029,latency_sum=19.382705211639404 1459542420000000000 +> cloudwatch_aws_elb,load_balancer_name=p-example,region=us-east-1 latency_average=0.004810798017284538,latency_maximum=0.1100282669067383,latency_minimum=0.0006084442138671875,latency_sample_count=4029,latency_sum=19.382705211639404 1459542420000000000 ``` diff --git a/plugins/inputs/cloudwatch/cloudwatch.go b/plugins/inputs/cloudwatch/cloudwatch.go index 626511e2f..4b6469e2d 100644 --- a/plugins/inputs/cloudwatch/cloudwatch.go +++ b/plugins/inputs/cloudwatch/cloudwatch.go @@ -1,67 +1,83 @@ package cloudwatch import ( + "errors" "fmt" + "strconv" "strings" "sync" "time" "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/cloudwatch" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/filter" "github.com/influxdata/telegraf/internal" internalaws "github.com/influxdata/telegraf/internal/config/aws" "github.com/influxdata/telegraf/internal/limiter" + "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/plugins/inputs" ) type ( + // CloudWatch contains the configuration and cache for the cloudwatch plugin. CloudWatch struct { - Region string `toml:"region"` - AccessKey string `toml:"access_key"` - SecretKey string `toml:"secret_key"` - RoleARN string `toml:"role_arn"` - Profile string `toml:"profile"` - Filename string `toml:"shared_credential_file"` - Token string `toml:"token"` - EndpointURL string `toml:"endpoint_url"` + Region string `toml:"region"` + AccessKey string `toml:"access_key"` + SecretKey string `toml:"secret_key"` + RoleARN string `toml:"role_arn"` + Profile string `toml:"profile"` + CredentialPath string `toml:"shared_credential_file"` + Token string `toml:"token"` + EndpointURL string `toml:"endpoint_url"` + StatisticExclude []string `toml:"statistic_exclude"` + StatisticInclude []string `toml:"statistic_include"` - Period internal.Duration `toml:"period"` - Delay internal.Duration `toml:"delay"` - Namespace string `toml:"namespace"` - Metrics []*Metric `toml:"metrics"` - CacheTTL internal.Duration `toml:"cache_ttl"` - RateLimit int `toml:"ratelimit"` - client cloudwatchClient - metricCache *MetricCache - windowStart time.Time - windowEnd time.Time + Period internal.Duration `toml:"period"` + Delay internal.Duration `toml:"delay"` + Namespace string `toml:"namespace"` + Metrics []*Metric `toml:"metrics"` + CacheTTL internal.Duration `toml:"cache_ttl"` + RateLimit int `toml:"ratelimit"` + + client cloudwatchClient + statFilter filter.Filter + metricCache *metricCache + queryDimensions map[string]*map[string]string + windowStart time.Time + windowEnd time.Time } + // Metric defines a simplified Cloudwatch metric. Metric struct { - MetricNames []string `toml:"names"` - Dimensions []*Dimension `toml:"dimensions"` + StatisticExclude *[]string `toml:"statistic_exclude"` + StatisticInclude *[]string `toml:"statistic_include"` + MetricNames []string `toml:"names"` + Dimensions []*Dimension `toml:"dimensions"` } + // Dimension defines a simplified Cloudwatch dimension (provides metric filtering). Dimension struct { Name string `toml:"name"` Value string `toml:"value"` } - MetricCache struct { - TTL time.Duration - Fetched time.Time - Metrics []*cloudwatch.Metric + // metricCache caches metrics, their filters, and generated queries. + metricCache struct { + ttl time.Duration + built time.Time + metrics []filteredMetric + queries []*cloudwatch.MetricDataQuery } cloudwatchClient interface { ListMetrics(*cloudwatch.ListMetricsInput) (*cloudwatch.ListMetricsOutput, error) - GetMetricStatistics(*cloudwatch.GetMetricStatisticsInput) (*cloudwatch.GetMetricStatisticsOutput, error) + GetMetricData(*cloudwatch.GetMetricDataInput) (*cloudwatch.GetMetricDataOutput, error) } ) +// SampleConfig returns the default configuration of the Cloudwatch input plugin. func (c *CloudWatch) SampleConfig() string { return ` ## Amazon Region @@ -75,12 +91,12 @@ func (c *CloudWatch) SampleConfig() string { ## 4) environment variables ## 5) shared credentials file ## 6) EC2 Instance Profile - #access_key = "" - #secret_key = "" - #token = "" - #role_arn = "" - #profile = "" - #shared_credential_file = "" + # access_key = "" + # secret_key = "" + # token = "" + # role_arn = "" + # profile = "" + # shared_credential_file = "" ## Endpoint to make request against, the correct endpoint is automatically ## determined and this option should only be set if you wish to override the @@ -106,44 +122,155 @@ func (c *CloudWatch) SampleConfig() string { interval = "5m" ## Configure the TTL for the internal cache of metrics. - ## Defaults to 1 hr if not specified - #cache_ttl = "10m" + # cache_ttl = "1h" ## Metric Statistic Namespace (required) namespace = "AWS/ELB" ## Maximum requests per second. Note that the global default AWS rate limit is - ## 400 reqs/sec, so if you define multiple namespaces, these should add up to a - ## maximum of 400. Optional - default value is 200. + ## 50 reqs/sec, so if you define multiple namespaces, these should add up to a + ## maximum of 50. ## See http://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_limits.html - ratelimit = 200 + # ratelimit = 25 - ## Metrics to Pull (optional) + ## Namespace-wide statistic filters. These allow fewer queries to be made to + ## cloudwatch. + # statistic_include = [ "average", "sum", minimum", "maximum", sample_count" ] + # statistic_exclude = [] + + ## Metrics to Pull ## Defaults to all Metrics in Namespace if nothing is provided ## Refreshes Namespace available metrics every 1h #[[inputs.cloudwatch.metrics]] # names = ["Latency", "RequestCount"] # - # ## Dimension filters for Metric. These are optional however all dimensions - # ## defined for the metric names must be specified in order to retrieve - # ## the metric statistics. + # ## Statistic filters for Metric. These allow for retrieving specific + # ## statistics for an individual metric. + # # statistic_include = [ "average", "sum", minimum", "maximum", sample_count" ] + # # statistic_exclude = [] + # + # ## Dimension filters for Metric. All dimensions defined for the metric names + # ## must be specified in order to retrieve the metric statistics. # [[inputs.cloudwatch.metrics.dimensions]] # name = "LoadBalancerName" # value = "p-example" ` } +// Description returns a one-sentence description on the Cloudwatch input plugin. func (c *CloudWatch) Description() string { return "Pull Metric Statistics from Amazon CloudWatch" } -func SelectMetrics(c *CloudWatch) ([]*cloudwatch.Metric, error) { - var metrics []*cloudwatch.Metric +// Gather takes in an accumulator and adds the metrics that the Input +// gathers. This is called every "interval". +func (c *CloudWatch) Gather(acc telegraf.Accumulator) error { + if c.statFilter == nil { + var err error + // Set config level filter (won't change throughout life of plugin). + c.statFilter, err = filter.NewIncludeExcludeFilter(c.StatisticInclude, c.StatisticExclude) + if err != nil { + return err + } + } + + if c.client == nil { + c.initializeCloudWatch() + } + + filteredMetrics, err := getFilteredMetrics(c) + if err != nil { + return err + } + + err = c.updateWindow(time.Now()) + if err != nil { + return err + } + + // Get all of the possible queries so we can send groups of 100. + queries, err := c.getDataQueries(filteredMetrics) + if err != nil { + return err + } + + // Limit concurrency or we can easily exhaust user connection limit. + // See cloudwatch API request limits: + // http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_limits.html + lmtr := limiter.NewRateLimiter(c.RateLimit, time.Second) + defer lmtr.Stop() + wg := sync.WaitGroup{} + rLock := sync.Mutex{} + + results := []*cloudwatch.MetricDataResult{} + + // 100 is the maximum number of metric data queries a `GetMetricData` request can contain. + batchSize := 100 + var batches [][]*cloudwatch.MetricDataQuery + + for batchSize < len(queries) { + queries, batches = queries[batchSize:], append(batches, queries[0:batchSize:batchSize]) + } + batches = append(batches, queries) + + for i := range batches { + wg.Add(1) + <-lmtr.C + go func(inm []*cloudwatch.MetricDataQuery) { + defer wg.Done() + result, err := c.gatherMetrics(c.getDataInputs(inm)) + if err != nil { + acc.AddError(err) + return + } + + rLock.Lock() + results = append(results, result...) + rLock.Unlock() + }(batches[i]) + } + + wg.Wait() + + return c.aggregateMetrics(acc, results) +} + +func (c *CloudWatch) initializeCloudWatch() error { + credentialConfig := &internalaws.CredentialConfig{ + Region: c.Region, + AccessKey: c.AccessKey, + SecretKey: c.SecretKey, + RoleARN: c.RoleARN, + Profile: c.Profile, + Filename: c.CredentialPath, + Token: c.Token, + EndpointURL: c.EndpointURL, + } + configProvider := credentialConfig.Credentials() + + cfg := &aws.Config{} + loglevel := aws.LogOff + c.client = cloudwatch.New(configProvider, cfg.WithLogLevel(loglevel)) + return nil +} + +type filteredMetric struct { + metrics []*cloudwatch.Metric + statFilter filter.Filter +} + +// getFilteredMetrics returns metrics specified in the config file or metrics listed from Cloudwatch. +func getFilteredMetrics(c *CloudWatch) ([]filteredMetric, error) { + if c.metricCache != nil && c.metricCache.isValid() { + return c.metricCache.metrics, nil + } + + fMetrics := []filteredMetric{} // check for provided metric filter if c.Metrics != nil { - metrics = []*cloudwatch.Metric{} for _, m := range c.Metrics { + metrics := []*cloudwatch.Metric{} if !hasWilcard(m.Dimensions) { dimensions := make([]*cloudwatch.Dimension, len(m.Dimensions)) for k, d := range m.Dimensions { @@ -176,51 +303,71 @@ func SelectMetrics(c *CloudWatch) ([]*cloudwatch.Metric, error) { } } } + + if m.StatisticExclude == nil { + m.StatisticExclude = &c.StatisticExclude + } + if m.StatisticInclude == nil { + m.StatisticInclude = &c.StatisticInclude + } + statFilter, err := filter.NewIncludeExcludeFilter(*m.StatisticInclude, *m.StatisticExclude) + if err != nil { + return nil, err + } + + fMetrics = append(fMetrics, filteredMetric{ + metrics: metrics, + statFilter: statFilter, + }) } } else { - var err error - metrics, err = c.fetchNamespaceMetrics() + metrics, err := c.fetchNamespaceMetrics() if err != nil { return nil, err } + + fMetrics = []filteredMetric{{ + metrics: metrics, + statFilter: c.statFilter, + }} } - return metrics, nil + + c.metricCache = &metricCache{ + metrics: fMetrics, + built: time.Now(), + ttl: c.CacheTTL.Duration, + } + + return fMetrics, nil } -func (c *CloudWatch) Gather(acc telegraf.Accumulator) error { - if c.client == nil { - c.initializeCloudWatch() +// fetchNamespaceMetrics retrieves available metrics for a given CloudWatch namespace. +func (c *CloudWatch) fetchNamespaceMetrics() ([]*cloudwatch.Metric, error) { + metrics := []*cloudwatch.Metric{} + + var token *string + params := &cloudwatch.ListMetricsInput{ + Namespace: aws.String(c.Namespace), + Dimensions: []*cloudwatch.DimensionFilter{}, + NextToken: token, + MetricName: nil, } - metrics, err := SelectMetrics(c) - if err != nil { - return err + for { + resp, err := c.client.ListMetrics(params) + if err != nil { + return nil, err + } + + metrics = append(metrics, resp.Metrics...) + if resp.NextToken == nil { + break + } + + params.NextToken = resp.NextToken } - now := time.Now() - - err = c.updateWindow(now) - if err != nil { - return err - } - - // limit concurrency or we can easily exhaust user connection limit - // see cloudwatch API request limits: - // http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_limits.html - lmtr := limiter.NewRateLimiter(c.RateLimit, time.Second) - defer lmtr.Stop() - var wg sync.WaitGroup - wg.Add(len(metrics)) - for _, m := range metrics { - <-lmtr.C - go func(inm *cloudwatch.Metric) { - defer wg.Done() - acc.AddError(c.gatherMetric(acc, inm)) - }(m) - } - wg.Wait() - - return nil + return metrics, nil } func (c *CloudWatch) updateWindow(relativeTo time.Time) error { @@ -239,168 +386,197 @@ func (c *CloudWatch) updateWindow(relativeTo time.Time) error { return nil } +// getDataQueries gets all of the possible queries so we can maximize the request payload. +func (c *CloudWatch) getDataQueries(filteredMetrics []filteredMetric) ([]*cloudwatch.MetricDataQuery, error) { + if c.metricCache != nil && c.metricCache.queries != nil && c.metricCache.isValid() { + return c.metricCache.queries, nil + } + + c.queryDimensions = map[string]*map[string]string{} + + dataQueries := []*cloudwatch.MetricDataQuery{} + for i, filtered := range filteredMetrics { + for j, metric := range filtered.metrics { + id := strconv.Itoa(j) + "_" + strconv.Itoa(i) + dimension := ctod(metric.Dimensions) + if filtered.statFilter.Match("average") { + c.queryDimensions["average_"+id] = dimension + dataQueries = append(dataQueries, &cloudwatch.MetricDataQuery{ + Id: aws.String("average_" + id), + Label: aws.String(snakeCase(*metric.MetricName + "_average")), + MetricStat: &cloudwatch.MetricStat{ + Metric: metric, + Period: aws.Int64(int64(c.Period.Duration.Seconds())), + Stat: aws.String(cloudwatch.StatisticAverage), + }, + }) + } + if filtered.statFilter.Match("maximum") { + c.queryDimensions["maximum_"+id] = dimension + dataQueries = append(dataQueries, &cloudwatch.MetricDataQuery{ + Id: aws.String("maximum_" + id), + Label: aws.String(snakeCase(*metric.MetricName + "_maximum")), + MetricStat: &cloudwatch.MetricStat{ + Metric: metric, + Period: aws.Int64(int64(c.Period.Duration.Seconds())), + Stat: aws.String(cloudwatch.StatisticMaximum), + }, + }) + } + if filtered.statFilter.Match("minimum") { + c.queryDimensions["minimum_"+id] = dimension + dataQueries = append(dataQueries, &cloudwatch.MetricDataQuery{ + Id: aws.String("minimum_" + id), + Label: aws.String(snakeCase(*metric.MetricName + "_minimum")), + MetricStat: &cloudwatch.MetricStat{ + Metric: metric, + Period: aws.Int64(int64(c.Period.Duration.Seconds())), + Stat: aws.String(cloudwatch.StatisticMinimum), + }, + }) + } + if filtered.statFilter.Match("sum") { + c.queryDimensions["sum_"+id] = dimension + dataQueries = append(dataQueries, &cloudwatch.MetricDataQuery{ + Id: aws.String("sum_" + id), + Label: aws.String(snakeCase(*metric.MetricName + "_sum")), + MetricStat: &cloudwatch.MetricStat{ + Metric: metric, + Period: aws.Int64(int64(c.Period.Duration.Seconds())), + Stat: aws.String(cloudwatch.StatisticSum), + }, + }) + } + if filtered.statFilter.Match("sample_count") { + c.queryDimensions["sample_count_"+id] = dimension + dataQueries = append(dataQueries, &cloudwatch.MetricDataQuery{ + Id: aws.String("sample_count_" + id), + Label: aws.String(snakeCase(*metric.MetricName + "_sample_count")), + MetricStat: &cloudwatch.MetricStat{ + Metric: metric, + Period: aws.Int64(int64(c.Period.Duration.Seconds())), + Stat: aws.String(cloudwatch.StatisticSampleCount), + }, + }) + } + } + } + + if len(dataQueries) == 0 { + return nil, errors.New("no metrics found to collect") + } + + if c.metricCache == nil { + c.metricCache = &metricCache{ + queries: dataQueries, + built: time.Now(), + ttl: c.CacheTTL.Duration, + } + } else { + c.metricCache.queries = dataQueries + } + + return dataQueries, nil +} + +// gatherMetrics gets metric data from Cloudwatch. +func (c *CloudWatch) gatherMetrics( + params *cloudwatch.GetMetricDataInput, +) ([]*cloudwatch.MetricDataResult, error) { + results := []*cloudwatch.MetricDataResult{} + + for { + resp, err := c.client.GetMetricData(params) + if err != nil { + return nil, fmt.Errorf("failed to get metric data: %v", err) + } + + results = append(results, resp.MetricDataResults...) + if resp.NextToken == nil { + break + } + params.NextToken = resp.NextToken + } + + return results, nil +} + +func (c *CloudWatch) aggregateMetrics( + acc telegraf.Accumulator, + metricDataResults []*cloudwatch.MetricDataResult, +) error { + var ( + grouper = metric.NewSeriesGrouper() + namespace = sanitizeMeasurement(c.Namespace) + ) + + for _, result := range metricDataResults { + tags := map[string]string{} + + if dimensions, ok := c.queryDimensions[*result.Id]; ok { + tags = *dimensions + } + tags["region"] = c.Region + + for i := range result.Values { + grouper.Add(namespace, tags, *result.Timestamps[i], *result.Label, *result.Values[i]) + } + } + + for _, metric := range grouper.Metrics() { + acc.AddMetric(metric) + } + + return nil +} + func init() { inputs.Add("cloudwatch", func() telegraf.Input { - ttl, _ := time.ParseDuration("1hr") return &CloudWatch{ - CacheTTL: internal.Duration{Duration: ttl}, - RateLimit: 200, + CacheTTL: internal.Duration{Duration: time.Hour}, + RateLimit: 25, } }) } -/* - * Initialize CloudWatch client - */ -func (c *CloudWatch) initializeCloudWatch() error { - credentialConfig := &internalaws.CredentialConfig{ - Region: c.Region, - AccessKey: c.AccessKey, - SecretKey: c.SecretKey, - RoleARN: c.RoleARN, - Profile: c.Profile, - Filename: c.Filename, - Token: c.Token, - EndpointURL: c.EndpointURL, - } - configProvider := credentialConfig.Credentials() - - c.client = cloudwatch.New(configProvider) - return nil -} - -/* - * Fetch available metrics for given CloudWatch Namespace - */ -func (c *CloudWatch) fetchNamespaceMetrics() ([]*cloudwatch.Metric, error) { - if c.metricCache != nil && c.metricCache.IsValid() { - return c.metricCache.Metrics, nil - } - - metrics := []*cloudwatch.Metric{} - - var token *string - for more := true; more; { - params := &cloudwatch.ListMetricsInput{ - Namespace: aws.String(c.Namespace), - Dimensions: []*cloudwatch.DimensionFilter{}, - NextToken: token, - MetricName: nil, - } - - resp, err := c.client.ListMetrics(params) - if err != nil { - return nil, err - } - - metrics = append(metrics, resp.Metrics...) - - token = resp.NextToken - more = token != nil - } - - c.metricCache = &MetricCache{ - Metrics: metrics, - Fetched: time.Now(), - TTL: c.CacheTTL.Duration, - } - - return metrics, nil -} - -/* - * Gather given Metric and emit any error - */ -func (c *CloudWatch) gatherMetric( - acc telegraf.Accumulator, - metric *cloudwatch.Metric, -) error { - params := c.getStatisticsInput(metric) - resp, err := c.client.GetMetricStatistics(params) - if err != nil { - return err - } - - for _, point := range resp.Datapoints { - tags := map[string]string{ - "region": c.Region, - "unit": snakeCase(*point.Unit), - } - - for _, d := range metric.Dimensions { - tags[snakeCase(*d.Name)] = *d.Value - } - - // record field for each statistic - fields := map[string]interface{}{} - - if point.Average != nil { - fields[formatField(*metric.MetricName, cloudwatch.StatisticAverage)] = *point.Average - } - if point.Maximum != nil { - fields[formatField(*metric.MetricName, cloudwatch.StatisticMaximum)] = *point.Maximum - } - if point.Minimum != nil { - fields[formatField(*metric.MetricName, cloudwatch.StatisticMinimum)] = *point.Minimum - } - if point.SampleCount != nil { - fields[formatField(*metric.MetricName, cloudwatch.StatisticSampleCount)] = *point.SampleCount - } - if point.Sum != nil { - fields[formatField(*metric.MetricName, cloudwatch.StatisticSum)] = *point.Sum - } - - acc.AddFields(formatMeasurement(c.Namespace), fields, tags, *point.Timestamp) - } - - return nil -} - -/* - * Formatting helpers - */ -func formatField(metricName string, statistic string) string { - return fmt.Sprintf("%s_%s", snakeCase(metricName), snakeCase(statistic)) -} - -func formatMeasurement(namespace string) string { +func sanitizeMeasurement(namespace string) string { namespace = strings.Replace(namespace, "/", "_", -1) namespace = snakeCase(namespace) - return fmt.Sprintf("cloudwatch_%s", namespace) + return "cloudwatch_" + namespace } func snakeCase(s string) string { s = internal.SnakeCase(s) + s = strings.Replace(s, " ", "_", -1) s = strings.Replace(s, "__", "_", -1) return s } -/* - * Map Metric to *cloudwatch.GetMetricStatisticsInput for given timeframe - */ -func (c *CloudWatch) getStatisticsInput(metric *cloudwatch.Metric) *cloudwatch.GetMetricStatisticsInput { - input := &cloudwatch.GetMetricStatisticsInput{ - StartTime: aws.Time(c.windowStart), - EndTime: aws.Time(c.windowEnd), - MetricName: metric.MetricName, - Namespace: metric.Namespace, - Period: aws.Int64(int64(c.Period.Duration.Seconds())), - Dimensions: metric.Dimensions, - Statistics: []*string{ - aws.String(cloudwatch.StatisticAverage), - aws.String(cloudwatch.StatisticMaximum), - aws.String(cloudwatch.StatisticMinimum), - aws.String(cloudwatch.StatisticSum), - aws.String(cloudwatch.StatisticSampleCount)}, - } - return input +type dimension struct { + name string + value string } -/* - * Check Metric Cache validity - */ -func (c *MetricCache) IsValid() bool { - return c.Metrics != nil && time.Since(c.Fetched) < c.TTL +// ctod converts cloudwatch dimensions to regular dimensions. +func ctod(cDimensions []*cloudwatch.Dimension) *map[string]string { + dimensions := map[string]string{} + for i := range cDimensions { + dimensions[snakeCase(*cDimensions[i].Name)] = *cDimensions[i].Value + } + return &dimensions +} + +func (c *CloudWatch) getDataInputs(dataQueries []*cloudwatch.MetricDataQuery) *cloudwatch.GetMetricDataInput { + return &cloudwatch.GetMetricDataInput{ + StartTime: aws.Time(c.windowStart), + EndTime: aws.Time(c.windowEnd), + MetricDataQueries: dataQueries, + } +} + +// isValid checks the validity of the metric cache. +func (f *metricCache) isValid() bool { + return f.metrics != nil && time.Since(f.built) < f.ttl } func hasWilcard(dimensions []*Dimension) bool { diff --git a/plugins/inputs/cloudwatch/cloudwatch_test.go b/plugins/inputs/cloudwatch/cloudwatch_test.go index 9449cbead..f28473a57 100644 --- a/plugins/inputs/cloudwatch/cloudwatch_test.go +++ b/plugins/inputs/cloudwatch/cloudwatch_test.go @@ -6,46 +6,98 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/cloudwatch" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/filter" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" ) type mockGatherCloudWatchClient struct{} func (m *mockGatherCloudWatchClient) ListMetrics(params *cloudwatch.ListMetricsInput) (*cloudwatch.ListMetricsOutput, error) { - metric := &cloudwatch.Metric{ - Namespace: params.Namespace, - MetricName: aws.String("Latency"), - Dimensions: []*cloudwatch.Dimension{ + return &cloudwatch.ListMetricsOutput{ + Metrics: []*cloudwatch.Metric{ { - Name: aws.String("LoadBalancerName"), - Value: aws.String("p-example"), + Namespace: params.Namespace, + MetricName: aws.String("Latency"), + Dimensions: []*cloudwatch.Dimension{ + { + Name: aws.String("LoadBalancerName"), + Value: aws.String("p-example"), + }, + }, }, }, - } - - result := &cloudwatch.ListMetricsOutput{ - Metrics: []*cloudwatch.Metric{metric}, - } - return result, nil + }, nil } -func (m *mockGatherCloudWatchClient) GetMetricStatistics(params *cloudwatch.GetMetricStatisticsInput) (*cloudwatch.GetMetricStatisticsOutput, error) { - dataPoint := &cloudwatch.Datapoint{ - Timestamp: params.EndTime, - Minimum: aws.Float64(0.1), - Maximum: aws.Float64(0.3), - Average: aws.Float64(0.2), - Sum: aws.Float64(123), - SampleCount: aws.Float64(100), - Unit: aws.String("Seconds"), - } - result := &cloudwatch.GetMetricStatisticsOutput{ - Label: aws.String("Latency"), - Datapoints: []*cloudwatch.Datapoint{dataPoint}, - } - return result, nil +func (m *mockGatherCloudWatchClient) GetMetricData(params *cloudwatch.GetMetricDataInput) (*cloudwatch.GetMetricDataOutput, error) { + return &cloudwatch.GetMetricDataOutput{ + MetricDataResults: []*cloudwatch.MetricDataResult{ + { + Id: aws.String("minimum_0_0"), + Label: aws.String("latency_minimum"), + StatusCode: aws.String("completed"), + Timestamps: []*time.Time{ + params.EndTime, + }, + Values: []*float64{ + aws.Float64(0.1), + }, + }, + { + Id: aws.String("maximum_0_0"), + Label: aws.String("latency_maximum"), + StatusCode: aws.String("completed"), + Timestamps: []*time.Time{ + params.EndTime, + }, + Values: []*float64{ + aws.Float64(0.3), + }, + }, + { + Id: aws.String("average_0_0"), + Label: aws.String("latency_average"), + StatusCode: aws.String("completed"), + Timestamps: []*time.Time{ + params.EndTime, + }, + Values: []*float64{ + aws.Float64(0.2), + }, + }, + { + Id: aws.String("sum_0_0"), + Label: aws.String("latency_sum"), + StatusCode: aws.String("completed"), + Timestamps: []*time.Time{ + params.EndTime, + }, + Values: []*float64{ + aws.Float64(123), + }, + }, + { + Id: aws.String("sample_count_0_0"), + Label: aws.String("latency_sample_count"), + StatusCode: aws.String("completed"), + Timestamps: []*time.Time{ + params.EndTime, + }, + Values: []*float64{ + aws.Float64(100), + }, + }, + }, + }, nil +} + +func TestSnakeCase(t *testing.T) { + assert.Equal(t, "cluster_name", snakeCase("Cluster Name")) + assert.Equal(t, "broker_id", snakeCase("Broker ID")) } func TestGather(t *testing.T) { @@ -64,7 +116,7 @@ func TestGather(t *testing.T) { var acc testutil.Accumulator c.client = &mockGatherCloudWatchClient{} - acc.GatherError(c.Gather) + assert.NoError(t, acc.GatherError(c.Gather)) fields := map[string]interface{}{} fields["latency_minimum"] = 0.1 @@ -74,13 +126,11 @@ func TestGather(t *testing.T) { fields["latency_sample_count"] = 100.0 tags := map[string]string{} - tags["unit"] = "seconds" tags["region"] = "us-east-1" tags["load_balancer_name"] = "p-example" assert.True(t, acc.HasMeasurement("cloudwatch_aws_elb")) acc.AssertContainsTaggedFields(t, "cloudwatch_aws_elb", fields, tags) - } type mockSelectMetricsCloudWatchClient struct{} @@ -132,7 +182,7 @@ func (m *mockSelectMetricsCloudWatchClient) ListMetrics(params *cloudwatch.ListM return result, nil } -func (m *mockSelectMetricsCloudWatchClient) GetMetricStatistics(params *cloudwatch.GetMetricStatisticsInput) (*cloudwatch.GetMetricStatisticsOutput, error) { +func (m *mockSelectMetricsCloudWatchClient) GetMetricData(params *cloudwatch.GetMetricDataInput) (*cloudwatch.GetMetricDataOutput, error) { return nil, nil } @@ -164,10 +214,10 @@ func TestSelectMetrics(t *testing.T) { }, } c.client = &mockSelectMetricsCloudWatchClient{} - metrics, err := SelectMetrics(c) + filtered, err := getFilteredMetrics(c) // We've asked for 2 (out of 4) metrics, over all 3 load balancers in all 2 // AZs. We should get 12 metrics. - assert.Equal(t, 12, len(metrics)) + assert.Equal(t, 12, len(filtered[0].metrics)) assert.Nil(t, err) } @@ -199,25 +249,66 @@ func TestGenerateStatisticsInputParams(t *testing.T) { c.updateWindow(now) - params := c.getStatisticsInput(m) + statFilter, _ := filter.NewIncludeExcludeFilter(nil, nil) + queries, _ := c.getDataQueries([]filteredMetric{{metrics: []*cloudwatch.Metric{m}, statFilter: statFilter}}) + params := c.getDataInputs(queries) assert.EqualValues(t, *params.EndTime, now.Add(-c.Delay.Duration)) assert.EqualValues(t, *params.StartTime, now.Add(-c.Period.Duration).Add(-c.Delay.Duration)) - assert.Len(t, params.Dimensions, 1) - assert.Len(t, params.Statistics, 5) - assert.EqualValues(t, *params.Period, 60) + require.Len(t, params.MetricDataQueries, 5) + assert.Len(t, params.MetricDataQueries[0].MetricStat.Metric.Dimensions, 1) + assert.EqualValues(t, *params.MetricDataQueries[0].MetricStat.Period, 60) +} + +func TestGenerateStatisticsInputParamsFiltered(t *testing.T) { + d := &cloudwatch.Dimension{ + Name: aws.String("LoadBalancerName"), + Value: aws.String("p-example"), + } + + m := &cloudwatch.Metric{ + MetricName: aws.String("Latency"), + Dimensions: []*cloudwatch.Dimension{d}, + } + + duration, _ := time.ParseDuration("1m") + internalDuration := internal.Duration{ + Duration: duration, + } + + c := &CloudWatch{ + Namespace: "AWS/ELB", + Delay: internalDuration, + Period: internalDuration, + } + + c.initializeCloudWatch() + + now := time.Now() + + c.updateWindow(now) + + statFilter, _ := filter.NewIncludeExcludeFilter([]string{"average", "sample_count"}, nil) + queries, _ := c.getDataQueries([]filteredMetric{{metrics: []*cloudwatch.Metric{m}, statFilter: statFilter}}) + params := c.getDataInputs(queries) + + assert.EqualValues(t, *params.EndTime, now.Add(-c.Delay.Duration)) + assert.EqualValues(t, *params.StartTime, now.Add(-c.Period.Duration).Add(-c.Delay.Duration)) + require.Len(t, params.MetricDataQueries, 2) + assert.Len(t, params.MetricDataQueries[0].MetricStat.Metric.Dimensions, 1) + assert.EqualValues(t, *params.MetricDataQueries[0].MetricStat.Period, 60) } func TestMetricsCacheTimeout(t *testing.T) { - cache := &MetricCache{ - Metrics: []*cloudwatch.Metric{}, - Fetched: time.Now(), - TTL: time.Minute, + cache := &metricCache{ + metrics: []filteredMetric{}, + built: time.Now(), + ttl: time.Minute, } - assert.True(t, cache.IsValid()) - cache.Fetched = time.Now().Add(-time.Minute) - assert.False(t, cache.IsValid()) + assert.True(t, cache.isValid()) + cache.built = time.Now().Add(-time.Minute) + assert.False(t, cache.isValid()) } func TestUpdateWindow(t *testing.T) { From c7b4f9feaafc9ba436a0df201b926b974af55cb9 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 22 Apr 2019 17:39:11 -0700 Subject: [PATCH 0774/1815] Update changelog --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 40faa609c..3fea83579 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -26,6 +26,8 @@ - [#5681](https://github.com/influxdata/telegraf/pull/5681): Add cmdline tag to procstat input. - [#5704](https://github.com/influxdata/telegraf/pull/5704): Support verbose query param in ping endpoint of influxdb_listener. - [#5713](https://github.com/influxdata/telegraf/pull/5713): Enhance HTTP connection options for phpfpm input plugin. +- [#5544](https://github.com/influxdata/telegraf/pull/5544): Use more efficient GetMetricData API to collect cloudwatch metrics. +- [#5544](https://github.com/influxdata/telegraf/pull/5544): Allow selection of collected statistic types in cloudwatch input. #### Bugfixes From 17a772d7ae5dd5f0de38bea30e90ff599bdbb0ff Mon Sep 17 00:00:00 2001 From: Pavel Sviderski Date: Tue, 23 Apr 2019 10:40:55 +1000 Subject: [PATCH 0775/1815] Add link to Telegraf playground in README (#5745) --- README.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/README.md b/README.md index 758d7acb0..e858955f5 100644 --- a/README.md +++ b/README.md @@ -16,6 +16,10 @@ Telegraf is plugin-driven and has the concept of 4 distinct plugin types: New plugins are designed to be easy to contribute, we'll eagerly accept pull requests and will manage the set of plugins that Telegraf supports. +## Try in Browser :rocket: + +You can try Telegraf right in your browser in the [Telegraf playground](https://rootnroll.com/d/telegraf/). + ## Contributing There are many ways to contribute: From 01eecee8cfab43ceb2eae99ceb2ead434503a2e3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Ole=C5=9B?= Date: Tue, 23 Apr 2019 20:13:14 +0200 Subject: [PATCH 0776/1815] Speed up interface stats in net input (#5757) --- plugins/inputs/net/net.go | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/plugins/inputs/net/net.go b/plugins/inputs/net/net.go index 35d4a2448..f91501860 100644 --- a/plugins/inputs/net/net.go +++ b/plugins/inputs/net/net.go @@ -54,6 +54,15 @@ func (s *NetIOStats) Gather(acc telegraf.Accumulator) error { } } + interfaces, err := net.Interfaces() + if err != nil { + return fmt.Errorf("error getting list of interfaces: %s", err) + } + interfacesByName := map[string]net.Interface{} + for _, iface := range interfaces { + interfacesByName[iface.Name] = iface + } + for _, io := range netio { if len(s.Interfaces) != 0 { var found bool @@ -66,8 +75,8 @@ func (s *NetIOStats) Gather(acc telegraf.Accumulator) error { continue } } else if !s.skipChecks { - iface, err := net.InterfaceByName(io.Name) - if err != nil { + iface, ok := interfacesByName[io.Name] + if !ok { continue } From 3c57dafece321f0584fc569b9b8a751e82ec8214 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 23 Apr 2019 11:14:35 -0700 Subject: [PATCH 0777/1815] Support Minecraft server 1.13 and newer (#5733) --- plugins/inputs/minecraft/README.md | 102 +++--- plugins/inputs/minecraft/client.go | 205 +++++++++++ plugins/inputs/minecraft/client_test.go | 195 +++++++++++ plugins/inputs/minecraft/minecraft.go | 137 +++----- plugins/inputs/minecraft/minecraft_test.go | 318 ++++++------------ plugins/inputs/minecraft/rcon.go | 112 ------ .../minecraft/rcon_disconnect_error_test.go | 36 -- plugins/inputs/minecraft/rcon_test.go | 68 ---- testutil/accumulator.go | 16 + 9 files changed, 623 insertions(+), 566 deletions(-) create mode 100644 plugins/inputs/minecraft/client.go create mode 100644 plugins/inputs/minecraft/client_test.go delete mode 100644 plugins/inputs/minecraft/rcon.go delete mode 100644 plugins/inputs/minecraft/rcon_disconnect_error_test.go delete mode 100644 plugins/inputs/minecraft/rcon_test.go diff --git a/plugins/inputs/minecraft/README.md b/plugins/inputs/minecraft/README.md index 726f9a29e..933d8bb05 100644 --- a/plugins/inputs/minecraft/README.md +++ b/plugins/inputs/minecraft/README.md @@ -1,66 +1,84 @@ -# Minecraft Plugin +# Minecraft Input Plugin -This plugin uses the RCON protocol to collect [statistics](http://minecraft.gamepedia.com/Statistics) from a [scoreboard](http://minecraft.gamepedia.com/Scoreboard) on a -Minecraft server. +The `minecraft` plugin connects to a Minecraft server using the RCON protocol +to collects scores from the server [scoreboard][]. -To enable [RCON](http://wiki.vg/RCON) on the minecraft server, add this to your server configuration in the `server.properties` file: +This plugin is known to support Minecraft Java Edition versions 1.11 - 1.14. +When using an version of Minecraft earlier than 1.13, be aware that the values +for some criterion has changed and may need to be modified. -``` +#### Server Setup + +Enable [RCON][] on the Minecraft server, add this to your server configuration +in the [server.properties][] file: + +```conf enable-rcon=true rcon.password= rcon.port=<1-65535> ``` -To create a new scoreboard objective called `jump` on a minecraft server tracking the `stat.jump` criteria, run this command -in the Minecraft console: +Scoreboard [Objectives][] must be added using the server console for the +plugin to collect. These can be added in game by players with op status, +from the server console, or over an RCON connection. -`/scoreboard objectives add jump stat.jump` - -Stats are collected with the following RCON command, issued by the plugin: - -`/scoreboard players list *` - -### Configuration: +When getting started pick an easy to test objective. This command will add an +objective that counts the number of times a player has jumped: ``` +/scoreboard objectives add jumps minecraft.custom:minecraft.jump +``` + +Once a player has triggered the event they will be added to the scoreboard, +you can then list all players with recorded scores: +``` +/scoreboard players list +``` + +View the current scores with a command, substituting your player name: +``` +/scoreboard players list Etho +``` + +### Configuration + +```toml [[inputs.minecraft]] - # server address for minecraft - server = "localhost" - # port for RCON - port = "25575" - # password RCON for mincraft server - password = "replace_me" + ## Address of the Minecraft server. + # server = "localhost" + + ## Server RCON Port. + # port = "25575" + + ## Server RCON Password. + password = "" ``` -### Measurements & Fields: +### Metrics -*This plugin uses only one measurement, titled* `minecraft` - -- The field name is the scoreboard objective name. -- The field value is the count of the scoreboard objective - -- `minecraft` +- minecraft + - tags: + - player + - port (port of the server) + - server (hostname:port, deprecated in 1.11; use `source` and `port` tags) + - source (hostname of the server) + - fields: - `` (integer, count) -### Tags: - -- The `minecraft` measurement: - - `server`: the Minecraft RCON server - - `player`: the Minecraft player - - ### Sample Queries: Get the number of jumps per player in the last hour: ``` -SELECT SPREAD("jump") FROM "minecraft" WHERE time > now() - 1h GROUP BY "player" +SELECT SPREAD("jumps") FROM "minecraft" WHERE time > now() - 1h GROUP BY "player" ``` ### Example Output: +``` +minecraft,player=notch,source=127.0.0.1,port=25575 jumps=178i 1498261397000000000 +minecraft,player=dinnerbone,source=127.0.0.1,port=25575 deaths=1i,jumps=1999i,cow_kills=1i 1498261397000000000 +minecraft,player=jeb,source=127.0.0.1,port=25575 d_pickaxe=1i,damage_dealt=80i,d_sword=2i,hunger=20i,health=20i,kills=1i,level=33i,jumps=264i,armor=15i 1498261397000000000 +``` -``` -$ telegraf --input-filter minecraft --test -* Plugin: inputs.minecraft, Collection 1 -> minecraft,player=notch,server=127.0.0.1:25575 jumps=178i 1498261397000000000 -> minecraft,player=dinnerbone,server=127.0.0.1:25575 deaths=1i,jumps=1999i,cow_kills=1i 1498261397000000000 -> minecraft,player=jeb,server=127.0.0.1:25575 d_pickaxe=1i,damage_dealt=80i,d_sword=2i,hunger=20i,health=20i,kills=1i,level=33i,jumps=264i,armor=15i 1498261397000000000 -``` +[server.properies]: https://minecraft.gamepedia.com/Server.properties +[scoreboard]: http://minecraft.gamepedia.com/Scoreboard +[objectives]: https://minecraft.gamepedia.com/Scoreboard#Objectives +[rcon]: http://wiki.vg/RCON diff --git a/plugins/inputs/minecraft/client.go b/plugins/inputs/minecraft/client.go new file mode 100644 index 000000000..a46709993 --- /dev/null +++ b/plugins/inputs/minecraft/client.go @@ -0,0 +1,205 @@ +package minecraft + +import ( + "regexp" + "strconv" + "strings" + + "github.com/influxdata/telegraf/plugins/inputs/minecraft/internal/rcon" +) + +var ( + scoreboardRegexLegacy = regexp.MustCompile(`(?U):\s(?P\d+)\s\((?P.*)\)`) + scoreboardRegex = regexp.MustCompile(`\[(?P[^\]]+)\]: (?P\d+)`) +) + +// Connection is an established connection to the Minecraft server. +type Connection interface { + // Execute runs a command. + Execute(command string) (string, error) +} + +// Connector is used to create connections to the Minecraft server. +type Connector interface { + // Connect establishes a connection to the server. + Connect() (Connection, error) +} + +func NewConnector(hostname, port, password string) (*connector, error) { + return &connector{ + hostname: hostname, + port: port, + password: password, + }, nil +} + +type connector struct { + hostname string + port string + password string +} + +func (c *connector) Connect() (Connection, error) { + p, err := strconv.Atoi(c.port) + if err != nil { + return nil, err + } + + rcon, err := rcon.NewClient(c.hostname, p) + if err != nil { + return nil, err + } + + _, err = rcon.Authorize(c.password) + if err != nil { + return nil, err + } + + return &connection{rcon: rcon}, nil +} + +func NewClient(connector Connector) (*client, error) { + return &client{connector: connector}, nil +} + +type client struct { + connector Connector + conn Connection +} + +func (c *client) Connect() error { + conn, err := c.connector.Connect() + if err != nil { + return err + } + c.conn = conn + return nil +} + +func (c *client) Players() ([]string, error) { + if c.conn == nil { + err := c.Connect() + if err != nil { + return nil, err + } + } + + resp, err := c.conn.Execute("/scoreboard players list") + if err != nil { + c.conn = nil + return nil, err + } + + players, err := parsePlayers(resp) + if err != nil { + c.conn = nil + return nil, err + } + + return players, nil +} + +func (c *client) Scores(player string) ([]Score, error) { + if c.conn == nil { + err := c.Connect() + if err != nil { + return nil, err + } + } + + resp, err := c.conn.Execute("/scoreboard players list " + player) + if err != nil { + c.conn = nil + return nil, err + } + + scores, err := parseScores(resp) + if err != nil { + c.conn = nil + return nil, err + } + + return scores, nil +} + +type connection struct { + rcon *rcon.Client +} + +func (c *connection) Execute(command string) (string, error) { + packet, err := c.rcon.Execute(command) + if err != nil { + return "", err + } + return packet.Body, nil +} + +func parsePlayers(input string) ([]string, error) { + parts := strings.SplitAfterN(input, ":", 2) + if len(parts) != 2 { + return []string{}, nil + } + + names := strings.Split(parts[1], ",") + + // Detect Minecraft <= 1.12 + if strings.Contains(parts[0], "players on the scoreboard") && len(names) > 0 { + // Split the last two player names: ex: "notch and dinnerbone" + head := names[:len(names)-1] + tail := names[len(names)-1] + names = append(head, strings.SplitN(tail, " and ", 2)...) + } + + var players []string + for _, name := range names { + name := strings.TrimSpace(name) + if name == "" { + continue + } + players = append(players, name) + + } + return players, nil +} + +// Score is an individual tracked scoreboard stat. +type Score struct { + Name string + Value int64 +} + +func parseScores(input string) ([]Score, error) { + if strings.Contains(input, "has no scores") { + return []Score{}, nil + } + + // Detect Minecraft <= 1.12 + var re *regexp.Regexp + if strings.Contains(input, "tracked objective") { + re = scoreboardRegexLegacy + } else { + re = scoreboardRegex + } + + var scores []Score + matches := re.FindAllStringSubmatch(input, -1) + for _, match := range matches { + score := Score{} + for i, subexp := range re.SubexpNames() { + switch subexp { + case "name": + score.Name = match[i] + case "value": + value, err := strconv.ParseInt(match[i], 10, 64) + if err != nil { + continue + } + score.Value = value + default: + continue + } + } + scores = append(scores, score) + } + return scores, nil +} diff --git a/plugins/inputs/minecraft/client_test.go b/plugins/inputs/minecraft/client_test.go new file mode 100644 index 000000000..7c1f871ac --- /dev/null +++ b/plugins/inputs/minecraft/client_test.go @@ -0,0 +1,195 @@ +package minecraft + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +type MockConnection struct { + commands map[string]string +} + +func (c *MockConnection) Execute(command string) (string, error) { + return c.commands[command], nil +} + +type MockConnector struct { + conn *MockConnection +} + +func (c *MockConnector) Connect() (Connection, error) { + return c.conn, nil +} + +func TestClient_Player(t *testing.T) { + tests := []struct { + name string + commands map[string]string + expected []string + }{ + { + name: "minecraft 1.12 no players", + commands: map[string]string{ + "/scoreboard players list": "There are no tracked players on the scoreboard", + }, + expected: []string{}, + }, + { + name: "minecraft 1.12 single player", + commands: map[string]string{ + "/scoreboard players list": "Showing 1 tracked players on the scoreboard:Etho", + }, + expected: []string{"Etho"}, + }, + { + name: "minecraft 1.12 two players", + commands: map[string]string{ + "/scoreboard players list": "Showing 2 tracked players on the scoreboard:Etho and torham", + }, + expected: []string{"Etho", "torham"}, + }, + { + name: "minecraft 1.12 three players", + commands: map[string]string{ + "/scoreboard players list": "Showing 3 tracked players on the scoreboard:Etho, notch and torham", + }, + expected: []string{"Etho", "notch", "torham"}, + }, + { + name: "minecraft 1.12 players space in username", + commands: map[string]string{ + "/scoreboard players list": "Showing 4 tracked players on the scoreboard:with space, Etho, notch and torham", + }, + expected: []string{"with space", "Etho", "notch", "torham"}, + }, + { + name: "minecraft 1.12 players and in username", + commands: map[string]string{ + "/scoreboard players list": "Showing 5 tracked players on the scoreboard:left and right, with space,Etho, notch and torham", + }, + expected: []string{"left and right", "with space", "Etho", "notch", "torham"}, + }, + { + name: "minecraft 1.13 no players", + commands: map[string]string{ + "/scoreboard players list": "There are no tracked entities", + }, + expected: []string{}, + }, + { + name: "minecraft 1.13 single player", + commands: map[string]string{ + "/scoreboard players list": "There are 1 tracked entities: torham", + }, + expected: []string{"torham"}, + }, + { + name: "minecraft 1.13 multiple player", + commands: map[string]string{ + "/scoreboard players list": "There are 3 tracked entities: Etho, notch, torham", + }, + expected: []string{"Etho", "notch", "torham"}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + connector := &MockConnector{ + conn: &MockConnection{commands: tt.commands}, + } + + client, err := NewClient(connector) + require.NoError(t, err) + + actual, err := client.Players() + require.NoError(t, err) + + require.Equal(t, tt.expected, actual) + }) + } +} + +func TestClient_Scores(t *testing.T) { + tests := []struct { + name string + player string + commands map[string]string + expected []Score + }{ + { + name: "minecraft 1.12 player with no scores", + player: "Etho", + commands: map[string]string{ + "/scoreboard players list Etho": "Player Etho has no scores recorded", + }, + expected: []Score{}, + }, + { + name: "minecraft 1.12 player with one score", + player: "Etho", + commands: map[string]string{ + "/scoreboard players list Etho": "Showing 1 tracked objective(s) for Etho:- jump: 2 (jump)", + }, + expected: []Score{ + {Name: "jump", Value: 2}, + }, + }, + { + name: "minecraft 1.12 player with many scores", + player: "Etho", + commands: map[string]string{ + "/scoreboard players list Etho": "Showing 3 tracked objective(s) for Etho:- hopper: 2 (hopper)- dropper: 2 (dropper)- redstone: 1 (redstone)", + }, + expected: []Score{ + {Name: "hopper", Value: 2}, + {Name: "dropper", Value: 2}, + {Name: "redstone", Value: 1}, + }, + }, + { + name: "minecraft 1.13 player with no scores", + player: "Etho", + commands: map[string]string{ + "/scoreboard players list Etho": "Etho has no scores to show", + }, + expected: []Score{}, + }, + { + name: "minecraft 1.13 player with one score", + player: "Etho", + commands: map[string]string{ + "/scoreboard players list Etho": "Etho has 1 scores:[jumps]: 1", + }, + expected: []Score{ + {Name: "jumps", Value: 1}, + }, + }, + { + name: "minecraft 1.13 player with many scores", + player: "Etho", + commands: map[string]string{ + "/scoreboard players list Etho": "Etho has 3 scores:[hopper]: 2[dropper]: 2[redstone]: 1", + }, + expected: []Score{ + {Name: "hopper", Value: 2}, + {Name: "dropper", Value: 2}, + {Name: "redstone", Value: 1}, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + connector := &MockConnector{ + conn: &MockConnection{commands: tt.commands}, + } + + client, err := NewClient(connector) + require.NoError(t, err) + + actual, err := client.Scores(tt.player) + require.NoError(t, err) + + require.Equal(t, tt.expected, actual) + }) + } +} diff --git a/plugins/inputs/minecraft/minecraft.go b/plugins/inputs/minecraft/minecraft.go index 6debbd25b..0de79d94a 100644 --- a/plugins/inputs/minecraft/minecraft.go +++ b/plugins/inputs/minecraft/minecraft.go @@ -1,95 +1,89 @@ package minecraft import ( - "fmt" - "regexp" - "strconv" - "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" ) const sampleConfig = ` - ## server address for minecraft + ## Address of the Minecraft server. # server = "localhost" - ## port for RCON + + ## Server RCON Port. # port = "25575" - ## password RCON for mincraft server - # password = "" + + ## Server RCON Password. + password = "" + + ## Uncomment to remove deprecated metric components. + # tagdrop = ["server"] ` -var ( - playerNameRegex = regexp.MustCompile(`for\s([^:]+):-`) - scoreboardRegex = regexp.MustCompile(`(?U):\s(\d+)\s\((.*)\)`) -) - -// Client is an interface for a client which gathers data from a minecraft server +// Client is a client for the Minecraft server. type Client interface { - Gather(producer RCONClientProducer) ([]string, error) + // Connect establishes a connection to the server. + Connect() error + + // Players returns the players on the scoreboard. + Players() ([]string, error) + + // Scores return the objective scores for a player. + Scores(player string) ([]Score, error) } -// Minecraft represents a connection to a minecraft server +// Minecraft is the plugin type. type Minecraft struct { - Server string - Port string - Password string - client Client - clientSet bool + Server string `toml:"server"` + Port string `toml:"port"` + Password string `toml:"password"` + + client Client } -// Description gives a brief description. func (s *Minecraft) Description() string { - return "Collects scores from a minecraft server's scoreboard using the RCON protocol" + return "Collects scores from a Minecraft server's scoreboard using the RCON protocol" } -// SampleConfig returns our sampleConfig. func (s *Minecraft) SampleConfig() string { return sampleConfig } -// Gather uses the RCON protocol to collect player and -// scoreboard stats from a minecraft server. -//var hasClient bool = false func (s *Minecraft) Gather(acc telegraf.Accumulator) error { - // can't simply compare s.client to nil, because comparing an interface - // to nil often does not produce the desired result - if !s.clientSet { - var err error - s.client, err = NewRCON(s.Server, s.Port, s.Password) + if s.client == nil { + connector, err := NewConnector(s.Server, s.Port, s.Password) if err != nil { return err } - s.clientSet = true + + client, err := NewClient(connector) + if err != nil { + return err + } + + s.client = client } - // (*RCON).Gather() takes an RCONClientProducer for testing purposes - d := defaultClientProducer{ - Server: s.Server, - Port: s.Port, - } - - scores, err := s.client.Gather(d) + players, err := s.client.Players() if err != nil { return err } - for _, score := range scores { - player, err := ParsePlayerName(score) + for _, player := range players { + scores, err := s.client.Scores(player) if err != nil { return err } + tags := map[string]string{ "player": player, "server": s.Server + ":" + s.Port, + "source": s.Server, + "port": s.Port, } - stats, err := ParseScoreboard(score) - if err != nil { - return err - } - var fields = make(map[string]interface{}, len(stats)) - for _, stat := range stats { - fields[stat.Name] = stat.Value + var fields = make(map[string]interface{}, len(scores)) + for _, score := range scores { + fields[score.Name] = score.Value } acc.AddFields("minecraft", fields, tags) @@ -98,51 +92,6 @@ func (s *Minecraft) Gather(acc telegraf.Accumulator) error { return nil } -// ParsePlayerName takes an input string from rcon, to parse -// the player. -func ParsePlayerName(input string) (string, error) { - playerMatches := playerNameRegex.FindAllStringSubmatch(input, -1) - if playerMatches == nil { - return "", fmt.Errorf("no player was matched") - } - return playerMatches[0][1], nil -} - -// Score is an individual tracked scoreboard stat. -type Score struct { - Name string - Value int -} - -// ParseScoreboard takes an input string from rcon, to parse -// scoreboard stats. -func ParseScoreboard(input string) ([]Score, error) { - scoreMatches := scoreboardRegex.FindAllStringSubmatch(input, -1) - if scoreMatches == nil { - return nil, fmt.Errorf("No scores found") - } - - var scores []Score - - for _, match := range scoreMatches { - number := match[1] - name := match[2] - n, err := strconv.Atoi(number) - // Not necessary in current state, because regex can only match integers, - // maybe become necessary if regex is modified to match more types of - // numbers - if err != nil { - return nil, fmt.Errorf("Failed to parse score") - } - s := Score{ - Name: name, - Value: n, - } - scores = append(scores, s) - } - return scores, nil -} - func init() { inputs.Add("minecraft", func() telegraf.Input { return &Minecraft{ diff --git a/plugins/inputs/minecraft/minecraft_test.go b/plugins/inputs/minecraft/minecraft_test.go index c0a9e6cf5..487f7d58a 100644 --- a/plugins/inputs/minecraft/minecraft_test.go +++ b/plugins/inputs/minecraft/minecraft_test.go @@ -1,234 +1,124 @@ package minecraft import ( - "fmt" - "reflect" "testing" + "time" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" ) -// TestParsePlayerName tests different Minecraft RCON inputs for players -func TestParsePlayerName(t *testing.T) { - // Test a valid input string to ensure player is extracted - input := "1 tracked objective(s) for divislight:- jumps: 178 (jumps)" - got, err := ParsePlayerName(input) - want := "divislight" - if err != nil { - t.Fatalf("player returned error. Error: %s\n", err) - } - if got != want { - t.Errorf("got %s\nwant %s\n", got, want) - } - - // Test an invalid input string to ensure error is returned - input = "" - got, err = ParsePlayerName(input) - want = "" - if err == nil { - t.Fatal("Expected error when player not present. No error found.") - } - if got != want { - t.Errorf("got %s\n want %s\n", got, want) - } - - // Test an invalid input string to ensure error is returned - input = "1 tracked objective(s) for 😂:- jumps: 178 (jumps)" - got, err = ParsePlayerName(input) - want = "😂" - if err != nil { - t.Fatalf("player returned error. Error: %s\n", err) - } - if got != want { - t.Errorf("got %s\n want %s\n", got, want) - } -} - -// TestParseScoreboard tests different Minecraft RCON inputs for scoreboard stats. -func TestParseScoreboard(t *testing.T) { - // test a valid input string to ensure stats are parsed correctly. - input := `1 tracked objective(s) for divislight:- jumps: 178 (jumps)- sword: 5 (sword)` - got, err := ParseScoreboard(input) - if err != nil { - t.Fatal("Unexpected error") - } - - want := []Score{ - { - Name: "jumps", - Value: 178, - }, - { - Name: "sword", - Value: 5, - }, - } - - if !reflect.DeepEqual(got, want) { - t.Errorf("Got: \n%#v\nWant: %#v", got, want) - } - - // Tests a partial input string. - input = `1 tracked objective(s) for divislight:- jumps: (jumps)- sword: 5 (sword)` - got, err = ParseScoreboard(input) - - if err != nil { - t.Fatal("Unexpected error") - } - - want = []Score{ - { - Name: "sword", - Value: 5, - }, - } - - if !reflect.DeepEqual(got, want) { - t.Errorf("Got: \n%#v\nWant:\n%#v", got, want) - } - - // Tests an empty string. - input = `` - _, err = ParseScoreboard(input) - if err == nil { - t.Fatal("Expected input error, but error was nil") - } - - // Tests when a number isn't an integer. - input = `1 tracked objective(s) for divislight:- jumps: 178.5 (jumps)- sword: 5 (sword)` - got, err = ParseScoreboard(input) - if err != nil { - t.Fatal("Unexpected error") - } - - want = []Score{ - { - Name: "sword", - Value: 5, - }, - } - - if !reflect.DeepEqual(got, want) { - t.Errorf("Got: \n%#v\nWant: %#v", got, want) - } - - //Testing a real life data scenario with unicode characters - input = `7 tracked objective(s) for mauxlaim:- total_kills: 39 (total_kills)- "howdy doody": 37 (dalevel)- howdy: 37 (lvl)- jumps: 1290 (jumps)- iron_pickaxe: 284 (iron_pickaxe)- cow_kills: 1 (cow_kills)- "asdf": 37 (😂)` - got, err = ParseScoreboard(input) - if err != nil { - t.Fatal("Unexpected error") - } - - want = []Score{ - { - Name: "total_kills", - Value: 39, - }, - { - Name: "dalevel", - Value: 37, - }, - { - Name: "lvl", - Value: 37, - }, - { - Name: "jumps", - Value: 1290, - }, - { - Name: "iron_pickaxe", - Value: 284, - }, - { - Name: "cow_kills", - Value: 1, - }, - { - Name: "😂", - Value: 37, - }, - } - - if !reflect.DeepEqual(got, want) { - t.Errorf("Got: \n%#v\nWant: %#v", got, want) - } - -} - type MockClient struct { - Result []string - Err error + ConnectF func() error + PlayersF func() ([]string, error) + ScoresF func(player string) ([]Score, error) } -func (m *MockClient) Gather(d RCONClientProducer) ([]string, error) { - return m.Result, m.Err +func (c *MockClient) Connect() error { + return c.ConnectF() +} + +func (c *MockClient) Players() ([]string, error) { + return c.PlayersF() +} + +func (c *MockClient) Scores(player string) ([]Score, error) { + return c.ScoresF(player) } func TestGather(t *testing.T) { - var acc testutil.Accumulator - testConfig := Minecraft{ - Server: "biffsgang.net", - Port: "25575", - client: &MockClient{ - Result: []string{ - `1 tracked objective(s) for divislight:- jumps: 178 (jumps)`, - `7 tracked objective(s) for mauxlaim:- total_kills: 39 (total_kills)- "howdy doody": 37 (dalevel)- howdy: 37 (lvl)- jumps: 1290 (jumps)- iron_pickaxe: 284 (iron_pickaxe)- cow_kills: 1 (cow_kills)- "asdf": 37 (😂)`, - `5 tracked objective(s) for torham:- total_kills: 29 (total_kills)- "howdy doody": 33 (dalevel)- howdy: 33 (lvl)- jumps: 263 (jumps)- "asdf": 33 (😂)`, + now := time.Unix(0, 0) + + tests := []struct { + name string + client *MockClient + metrics []telegraf.Metric + err error + }{ + { + name: "no players", + client: &MockClient{ + ConnectF: func() error { + return nil + }, + PlayersF: func() ([]string, error) { + return []string{}, nil + }, }, - Err: nil, + metrics: []telegraf.Metric{}, }, - clientSet: true, - } - - err := testConfig.Gather(&acc) - - if err != nil { - t.Fatalf("gather returned error. Error: %s\n", err) - } - - if !testConfig.clientSet { - t.Fatalf("clientSet should be true, client should be set") - } - - tags := map[string]string{ - "player": "divislight", - "server": "biffsgang.net:25575", - } - - assertContainsTaggedStat(t, &acc, "minecraft", "jumps", 178, tags) - tags["player"] = "mauxlaim" - assertContainsTaggedStat(t, &acc, "minecraft", "cow_kills", 1, tags) - tags["player"] = "torham" - assertContainsTaggedStat(t, &acc, "minecraft", "total_kills", 29, tags) - -} - -func assertContainsTaggedStat( - t *testing.T, - acc *testutil.Accumulator, - measurement string, - field string, - expectedValue int, - tags map[string]string, -) { - var actualValue int - for _, pt := range acc.Metrics { - if pt.Measurement == measurement && reflect.DeepEqual(pt.Tags, tags) { - for fieldname, value := range pt.Fields { - if fieldname == field { - actualValue = value.(int) - if value == expectedValue { - return + { + name: "one player without scores", + client: &MockClient{ + ConnectF: func() error { + return nil + }, + PlayersF: func() ([]string, error) { + return []string{"Etho"}, nil + }, + ScoresF: func(player string) ([]Score, error) { + switch player { + case "Etho": + return []Score{}, nil + default: + panic("unknown player") } - t.Errorf("Expected value %d\n got value %d\n", expectedValue, value) - } - } - } + }, + }, + metrics: []telegraf.Metric{}, + }, + { + name: "one player with scores", + client: &MockClient{ + ConnectF: func() error { + return nil + }, + PlayersF: func() ([]string, error) { + return []string{"Etho"}, nil + }, + ScoresF: func(player string) ([]Score, error) { + switch player { + case "Etho": + return []Score{{Name: "jumps", Value: 42}}, nil + default: + panic("unknown player") + } + }, + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "minecraft", + map[string]string{ + "player": "Etho", + "server": "example.org:25575", + "source": "example.org", + "port": "25575", + }, + map[string]interface{}{ + "jumps": 42, + }, + now, + ), + }, + }, } - msg := fmt.Sprintf( - "Could not find measurement \"%s\" with requested tags within %s, Actual: %d", - measurement, field, actualValue) - t.Fatal(msg) + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + plugin := &Minecraft{ + Server: "example.org", + Port: "25575", + Password: "xyzzy", + client: tt.client, + } + var acc testutil.Accumulator + acc.TimeFunc = func() time.Time { return now } + + err := plugin.Gather(&acc) + + require.Equal(t, tt.err, err) + testutil.RequireMetricsEqual(t, tt.metrics, acc.GetTelegrafMetrics()) + }) + } } diff --git a/plugins/inputs/minecraft/rcon.go b/plugins/inputs/minecraft/rcon.go deleted file mode 100644 index f42fc8ba4..000000000 --- a/plugins/inputs/minecraft/rcon.go +++ /dev/null @@ -1,112 +0,0 @@ -package minecraft - -import ( - "strconv" - "strings" - - "github.com/influxdata/telegraf/plugins/inputs/minecraft/internal/rcon" -) - -const ( - // NoMatches is a sentinel value returned when there are no statistics defined on the - //minecraft server - NoMatches = `All matches failed` - // ScoreboardPlayerList is the command to see all player statistics - ScoreboardPlayerList = `scoreboard players list *` -) - -// RCONClient is a representation of RCON command authorizaiton and exectution -type RCONClient interface { - Authorize(password string) (*rcon.Packet, error) - Execute(command string) (*rcon.Packet, error) -} - -// RCON represents a RCON server connection -type RCON struct { - Server string - Port string - Password string - client RCONClient -} - -// RCONClientProducer is an interface which defines how a new client will be -// produced in the event of a network disconnect. It exists mainly for -// testing purposes -type RCONClientProducer interface { - newClient() (RCONClient, error) -} - -type defaultClientProducer struct { - Server string - Port string -} - -func (d defaultClientProducer) newClient() (RCONClient, error) { - return newClient(d.Server, d.Port) -} - -// NewRCON creates a new RCON -func NewRCON(server, port, password string) (*RCON, error) { - client, err := newClient(server, port) - if err != nil { - return nil, err - } - - return &RCON{ - Server: server, - Port: port, - Password: password, - client: client, - }, nil -} - -func newClient(server, port string) (*rcon.Client, error) { - p, err := strconv.Atoi(port) - if err != nil { - return nil, err - } - - client, err := rcon.NewClient(server, p) - - // If we've encountered any error, the contents of `client` could be corrupted, - // so we must return nil, err - if err != nil { - return nil, err - } - return client, nil -} - -// Gather receives all player scoreboard information and returns it per player. -func (r *RCON) Gather(producer RCONClientProducer) ([]string, error) { - if r.client == nil { - var err error - r.client, err = producer.newClient() - if err != nil { - return nil, err - } - } - - if _, err := r.client.Authorize(r.Password); err != nil { - // Potentially a network problem where the client will need to be - // re-initialized - r.client = nil - return nil, err - } - - packet, err := r.client.Execute(ScoreboardPlayerList) - if err != nil { - // Potentially a network problem where the client will need to be - // re-initialized - r.client = nil - return nil, err - } - - if !strings.Contains(packet.Body, NoMatches) { - players := strings.Split(packet.Body, "Showing") - if len(players) > 1 { - return players[1:], nil - } - } - - return []string{}, nil -} diff --git a/plugins/inputs/minecraft/rcon_disconnect_error_test.go b/plugins/inputs/minecraft/rcon_disconnect_error_test.go deleted file mode 100644 index c89308e06..000000000 --- a/plugins/inputs/minecraft/rcon_disconnect_error_test.go +++ /dev/null @@ -1,36 +0,0 @@ -package minecraft - -import ( - "errors" - "testing" -) - -type MockRCONProducer struct { - Err error -} - -func (m *MockRCONProducer) newClient() (RCONClient, error) { - return nil, m.Err -} - -func TestRCONErrorHandling(t *testing.T) { - m := &MockRCONProducer{ - Err: errors.New("Error: failed connection"), - } - c := &RCON{ - Server: "craftstuff.com", - Port: "2222", - Password: "pass", - //Force fetching of new client - client: nil, - } - - _, err := c.Gather(m) - if err == nil { - t.Errorf("Error nil, unexpected result") - } - - if c.client != nil { - t.Fatal("c.client should be nil, unexpected result") - } -} diff --git a/plugins/inputs/minecraft/rcon_test.go b/plugins/inputs/minecraft/rcon_test.go deleted file mode 100644 index 1660b53f9..000000000 --- a/plugins/inputs/minecraft/rcon_test.go +++ /dev/null @@ -1,68 +0,0 @@ -package minecraft - -import ( - "testing" - - "github.com/influxdata/telegraf/plugins/inputs/minecraft/internal/rcon" -) - -type MockRCONClient struct { - Result *rcon.Packet - Err error -} - -func (m *MockRCONClient) Authorize(password string) (*rcon.Packet, error) { - return m.Result, m.Err -} -func (m *MockRCONClient) Execute(command string) (*rcon.Packet, error) { - return m.Result, m.Err -} - -// TestRCONGather test the RCON gather function -func TestRCONGather(t *testing.T) { - mock := &MockRCONClient{ - Result: &rcon.Packet{ - Body: `Showing 1 tracked objective(s) for divislight:- jumps: 178 (jumps)Showing 7 tracked objective(s) for mauxlaim:- total_kills: 39 (total_kills)- "howdy doody": 37 (dalevel)- howdy: 37 (lvl)- jumps: 1290 (jumps)- iron_pickaxe: 284 (iron_pickaxe)- cow_kills: 1 (cow_kills)- "asdf": 37 (😂)Showing 5 tracked objective(s) for torham:- total_kills: 29 (total_kills)- "howdy doody": 33 (dalevel)- howdy: 33 (lvl)- jumps: 263 (jumps)- "asdf": 33 (😂)`, - }, - Err: nil, - } - - want := []string{ - ` 1 tracked objective(s) for divislight:- jumps: 178 (jumps)`, - ` 7 tracked objective(s) for mauxlaim:- total_kills: 39 (total_kills)- "howdy doody": 37 (dalevel)- howdy: 37 (lvl)- jumps: 1290 (jumps)- iron_pickaxe: 284 (iron_pickaxe)- cow_kills: 1 (cow_kills)- "asdf": 37 (😂)`, - ` 5 tracked objective(s) for torham:- total_kills: 29 (total_kills)- "howdy doody": 33 (dalevel)- howdy: 33 (lvl)- jumps: 263 (jumps)- "asdf": 33 (😂)`, - } - - client := &RCON{ - Server: "craftstuff.com", - Port: "2222", - Password: "pass", - client: mock, - } - - d := defaultClientProducer{} - got, err := client.Gather(d) - if err != nil { - t.Fatalf("Gather returned an error. Error %s\n", err) - } - for i, s := range got { - if want[i] != s { - t.Fatalf("Got %s at index %d, want %s at index %d", s, i, want[i], i) - } - } - - client.client = &MockRCONClient{ - Result: &rcon.Packet{ - Body: "", - }, - Err: nil, - } - - got, err = client.Gather(defaultClientProducer{}) - if err != nil { - t.Fatalf("Gather returned an error. Error %s\n", err) - } - if len(got) != 0 { - t.Fatalf("Expected empty slice of length %d, got slice of length %d", 0, len(got)) - } -} diff --git a/testutil/accumulator.go b/testutil/accumulator.go index a7b9fe8f6..19acebe1c 100644 --- a/testutil/accumulator.go +++ b/testutil/accumulator.go @@ -45,12 +45,22 @@ type Accumulator struct { Errors []error debug bool delivered chan telegraf.DeliveryInfo + + TimeFunc func() time.Time } func (a *Accumulator) NMetrics() uint64 { return atomic.LoadUint64(&a.nMetrics) } +func (a *Accumulator) GetTelegrafMetrics() []telegraf.Metric { + metrics := []telegraf.Metric{} + for _, m := range a.Metrics { + metrics = append(metrics, FromTestMetric(m)) + } + return metrics +} + func (a *Accumulator) FirstError() error { if len(a.Errors) == 0 { return nil @@ -101,6 +111,12 @@ func (a *Accumulator) AddFields( t = timestamp[0] } else { t = time.Now() + if a.TimeFunc == nil { + t = time.Now() + } else { + t = a.TimeFunc() + } + } if a.debug { From c71827ecfa8cd7b6d8d7abc053a1e457d901e95d Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 23 Apr 2019 11:16:51 -0700 Subject: [PATCH 0778/1815] Update changelog --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3fea83579..3ad450470 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -28,11 +28,13 @@ - [#5713](https://github.com/influxdata/telegraf/pull/5713): Enhance HTTP connection options for phpfpm input plugin. - [#5544](https://github.com/influxdata/telegraf/pull/5544): Use more efficient GetMetricData API to collect cloudwatch metrics. - [#5544](https://github.com/influxdata/telegraf/pull/5544): Allow selection of collected statistic types in cloudwatch input. +- [#5757](https://github.com/influxdata/telegraf/pull/5757): Speed up interface stat collection in net input. #### Bugfixes - [#5631](https://github.com/influxdata/telegraf/pull/5631): Create Windows service only when specified or in service manager. - [#5730](https://github.com/influxdata/telegraf/pull/5730): Don't start telegraf when stale pidfile found. +- [#5477](https://github.com/influxdata/telegraf/pull/5477): Support Minecraft server 1.13 and newer in minecraft input. ## v1.10.3 [2019-04-16] From a0202744cf41a6957130f1b491242f8ff27d9c0d Mon Sep 17 00:00:00 2001 From: dupondje Date: Wed, 24 Apr 2019 01:34:52 +0200 Subject: [PATCH 0779/1815] Add PowerDNS Recursor input plugin (#4545) --- README.md | 1 + etc/telegraf.conf | 14 + plugins/inputs/all/all.go | 1 + plugins/inputs/powerdns_recursor/README.md | 139 +++++ .../powerdns_recursor/powerdns_recursor.go | 156 +++++ .../powerdns_recursor_test.go | 545 ++++++++++++++++++ 6 files changed, 856 insertions(+) create mode 100644 plugins/inputs/powerdns_recursor/README.md create mode 100644 plugins/inputs/powerdns_recursor/powerdns_recursor.go create mode 100644 plugins/inputs/powerdns_recursor/powerdns_recursor_test.go diff --git a/README.md b/README.md index e858955f5..4446afd15 100644 --- a/README.md +++ b/README.md @@ -244,6 +244,7 @@ For documentation on the latest development code see the [documentation index][d * [postgresql_extensible](./plugins/inputs/postgresql_extensible) * [postgresql](./plugins/inputs/postgresql) * [powerdns](./plugins/inputs/powerdns) +* [powerdns_recursor](./plugins/inputs/powerdns_recursor) * [processes](./plugins/inputs/processes) * [procstat](./plugins/inputs/procstat) * [prometheus](./plugins/inputs/prometheus) (can be used for [Caddy server](./plugins/inputs/prometheus/README.md#usage-for-caddy-http-server)) diff --git a/etc/telegraf.conf b/etc/telegraf.conf index 8e3264a84..c386d171f 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -3346,6 +3346,20 @@ # unix_sockets = ["/var/run/pdns.controlsocket"] +# # Read metrics from one or many PowerDNS recursors +# [[inputs.powerdns_recursor]] +# ## An array of sockets to gather stats about. +# ## Specify a path to unix socket. +# ## +# ## If no servers are specified, then '/var/run/pdns_recursor.controlsocket' is used as the path. +# unix_sockets = ["/var/run/pdns_recursor.controlsocket"] +# +# ## Socket for Receive +# # socket_dir = "/var/run/" +# ## Socket permissions +# # socket_mode = "0666" + + # # Monitor process cpu and memory usage # [[inputs.procstat]] # ## PID file to monitor process diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index 7c592e925..02002a4f0 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -109,6 +109,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/postgresql" _ "github.com/influxdata/telegraf/plugins/inputs/postgresql_extensible" _ "github.com/influxdata/telegraf/plugins/inputs/powerdns" + _ "github.com/influxdata/telegraf/plugins/inputs/powerdns_recursor" _ "github.com/influxdata/telegraf/plugins/inputs/processes" _ "github.com/influxdata/telegraf/plugins/inputs/procstat" _ "github.com/influxdata/telegraf/plugins/inputs/prometheus" diff --git a/plugins/inputs/powerdns_recursor/README.md b/plugins/inputs/powerdns_recursor/README.md new file mode 100644 index 000000000..e653af930 --- /dev/null +++ b/plugins/inputs/powerdns_recursor/README.md @@ -0,0 +1,139 @@ +# PowerDNS Recursor Input Plugin + +The powerdns recursor plugin gathers metrics about PowerDNS Recursor using unix socket. + +### Configuration: + +``` +# Read metrics from one or many PowerDNS recursors +[[inputs.powerdns_recursor]] + ## An array of sockets to gather stats about. + ## Specify a path to unix socket. + ## + ## If no servers are specified, then '/var/run/pdns_recursor.controlsocket' is used as the path. + unix_sockets = ["/var/run/pdns_recursor.controlsocket"] + + ## Socket for Receive + # socket_dir = "/var/run/" + ## Socket permissions + # socket_mode = "0666" +``` + +### Measurements & Fields: + +- powerdns_recursor + - all-outqueries + - answers-slow + - answers0-1 + - answers1-10 + - answers10-100 + - answers100-1000 + - auth-zone-queries + - auth4-answers-slow + - auth4-answers0-1 + - auth4-answers1-10 + - auth4-answers10-100 + - auth4-answers100-1000 + - auth6-answers-slow + - auth6-answers0-1 + - auth6-answers1-10 + - auth6-answers10-100 + - auth6-answers100-1000 + - cache-entries + - cache-hits + - cache-misses + - case-mismatches + - chain-resends + - client-parse-errors + - concurrent-queries + - dlg-only-drops + - dnssec-queries + - dnssec-result-bogus + - dnssec-result-indeterminate + - dnssec-result-insecure + - dnssec-result-nta + - dnssec-result-secure + - dnssec-validations + - dont-outqueries + - ecs-queries + - ecs-responses + - edns-ping-matches + - edns-ping-mismatches + - failed-host-entries + - fd-usage + - ignored-packets + - ipv6-outqueries + - ipv6-questions + - malloc-bytes + - max-cache-entries + - max-mthread-stack + - max-packetcache-entries + - negcache-entries + - no-packet-error + - noedns-outqueries + - noerror-answers + - noping-outqueries + - nsset-invalidations + - nsspeeds-entries + - nxdomain-answers + - outgoing-timeouts + - outgoing4-timeouts + - outgoing6-timeouts + - over-capacity-drops + - packetcache-entries + - packetcache-hits + - packetcache-misses + - policy-drops + - policy-result-custom + - policy-result-drop + - policy-result-noaction + - policy-result-nodata + - policy-result-nxdomain + - policy-result-truncate + - qa-latency + - query-pipe-full-drops + - questions + - real-memory-usage + - resource-limits + - security-status + - server-parse-errors + - servfail-answers + - spoof-prevents + - sys-msec + - tcp-client-overflow + - tcp-clients + - tcp-outqueries + - tcp-questions + - throttle-entries + - throttled-out + - throttled-outqueries + - too-old-drops + - udp-in-errors + - udp-noport-errors + - udp-recvbuf-errors + - udp-sndbuf-errors + - unauthorized-tcp + - unauthorized-udp + - unexpected-packets + - unreachables + - uptime + - user-msec + - x-our-latency + - x-ourtime-slow + - x-ourtime0-1 + - x-ourtime1-2 + - x-ourtime16-32 + - x-ourtime2-4 + - x-ourtime4-8 + - x-ourtime8-16 + +### Tags: + +- tags: `server=socket` + +### Example Output: + +``` +$ ./telegraf --config telegraf.conf --input-filter powerdns_recursor --test +> powerdns_recursor,server=/var/run/pdns_recursor.controlsocket all-outqueries=3631810i,answers-slow=36863i,answers0-1=179612i,answers1-10=1223305i,answers10-100=1252199i,answers100-1000=408357i,auth-zone-queries=4i,auth4-answers-slow=44758i,auth4-answers0-1=59721i,auth4-answers1-10=1766787i,auth4-answers10-100=1329638i,auth4-answers100-1000=430372i,auth6-answers-slow=0i,auth6-answers0-1=0i,auth6-answers1-10=0i,auth6-answers10-100=0i,auth6-answers100-1000=0i,cache-entries=296689i,cache-hits=150654i,cache-misses=2949682i,case-mismatches=0i,chain-resends=420004i,client-parse-errors=0i,concurrent-queries=0i,dlg-only-drops=0i,dnssec-queries=152970i,dnssec-result-bogus=0i,dnssec-result-indeterminate=0i,dnssec-result-insecure=0i,dnssec-result-nta=0i,dnssec-result-secure=47i,dnssec-validations=47i,dont-outqueries=62i,ecs-queries=0i,ecs-responses=0i,edns-ping-matches=0i,edns-ping-mismatches=0i,failed-host-entries=21i,fd-usage=32i,ignored-packets=0i,ipv6-outqueries=0i,ipv6-questions=0i,malloc-bytes=0i,max-cache-entries=1000000i,max-mthread-stack=33747i,max-packetcache-entries=500000i,negcache-entries=100019i,no-packet-error=0i,noedns-outqueries=73341i,noerror-answers=25453808i,noping-outqueries=0i,nsset-invalidations=2398i,nsspeeds-entries=3966i,nxdomain-answers=3341302i,outgoing-timeouts=44384i,outgoing4-timeouts=44384i,outgoing6-timeouts=0i,over-capacity-drops=0i,packetcache-entries=78258i,packetcache-hits=25999027i,packetcache-misses=3100179i,policy-drops=0i,policy-result-custom=0i,policy-result-drop=0i,policy-result-noaction=3100336i,policy-result-nodata=0i,policy-result-nxdomain=0i,policy-result-truncate=0i,qa-latency=6553i,query-pipe-full-drops=0i,questions=29099363i,real-memory-usage=280494080i,resource-limits=0i,security-status=1i,server-parse-errors=0i,servfail-answers=304253i,spoof-prevents=0i,sys-msec=1312600i,tcp-client-overflow=0i,tcp-clients=0i,tcp-outqueries=116i,tcp-questions=133i,throttle-entries=21i,throttled-out=13296i,throttled-outqueries=13296i,too-old-drops=2i,udp-in-errors=4i,udp-noport-errors=2918i,udp-recvbuf-errors=0i,udp-sndbuf-errors=0i,unauthorized-tcp=0i,unauthorized-udp=0i,unexpected-packets=0i,unreachables=1708i,uptime=167482i,user-msec=1282640i,x-our-latency=19i,x-ourtime-slow=642i,x-ourtime0-1=3095566i,x-ourtime1-2=3401i,x-ourtime16-32=201i,x-ourtime2-4=304i,x-ourtime4-8=198i,x-ourtime8-16=24i 1533903879000000000 +``` diff --git a/plugins/inputs/powerdns_recursor/powerdns_recursor.go b/plugins/inputs/powerdns_recursor/powerdns_recursor.go new file mode 100644 index 000000000..bfd595597 --- /dev/null +++ b/plugins/inputs/powerdns_recursor/powerdns_recursor.go @@ -0,0 +1,156 @@ +package powerdns_recursor + +import ( + "bufio" + "errors" + "fmt" + "log" + "math/rand" + "net" + "os" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" +) + +type PowerdnsRecursor struct { + UnixSockets []string + + SocketDir string `toml:"socket_dir"` + SocketMode uint32 `toml:"socket_mode"` +} + +var defaultTimeout = 5 * time.Second + +var sampleConfig = ` + ## An array of sockets to gather stats about. + ## Specify a path to unix socket. + unix_sockets = ["/var/run/pdns_recursor.controlsocket"] + + ## Socket for Receive + #socket_dir = "/var/run/" + ## Socket permissions + #socket_mode = "0666" +` + +func (p *PowerdnsRecursor) SampleConfig() string { + return sampleConfig +} + +func (p *PowerdnsRecursor) Description() string { + return "Read metrics from one or many PowerDNS Recursor servers" +} + +func (p *PowerdnsRecursor) Gather(acc telegraf.Accumulator) error { + if len(p.UnixSockets) == 0 { + return p.gatherServer("/var/run/pdns_recursor.controlsocket", acc) + } + + for _, serverSocket := range p.UnixSockets { + if err := p.gatherServer(serverSocket, acc); err != nil { + acc.AddError(err) + } + } + + return nil +} + +func (p *PowerdnsRecursor) gatherServer(address string, acc telegraf.Accumulator) error { + randomNumber := rand.Int63() + recvSocket := filepath.Join("/", "var", "run", fmt.Sprintf("pdns_recursor_telegraf%d", randomNumber)) + if p.SocketDir != "" { + recvSocket = filepath.Join(p.SocketDir, fmt.Sprintf("pdns_recursor_telegraf%d", randomNumber)) + } + + laddr, err := net.ResolveUnixAddr("unixgram", recvSocket) + if err != nil { + return err + } + defer os.Remove(recvSocket) + raddr, err := net.ResolveUnixAddr("unixgram", address) + if err != nil { + return err + } + conn, err := net.DialUnix("unixgram", laddr, raddr) + if err != nil { + return err + } + perm := uint32(0666) + if p.SocketMode > 0 { + perm = p.SocketMode + } + if err := os.Chmod(recvSocket, os.FileMode(perm)); err != nil { + return err + } + defer conn.Close() + + conn.SetDeadline(time.Now().Add(defaultTimeout)) + + // Read and write buffer + rw := bufio.NewReadWriter(bufio.NewReader(conn), bufio.NewWriter(conn)) + + // Send command + if _, err := fmt.Fprint(rw, "get-all\n"); err != nil { + return nil + } + if err := rw.Flush(); err != nil { + return err + } + + // Read data + buf := make([]byte, 16384) + n, err := rw.Read(buf) + if err != nil { + return err + } + if n == 0 { + return errors.New("no data received") + } + + metrics := string(buf) + + // Process data + fields := parseResponse(metrics) + + // Add server socket as a tag + tags := map[string]string{"server": address} + + acc.AddFields("powerdns_recursor", fields, tags) + + conn.Close() + + return nil +} + +func parseResponse(metrics string) map[string]interface{} { + values := make(map[string]interface{}) + + s := strings.Split(metrics, "\n") + + for _, metric := range s[:len(s)-1] { + m := strings.Split(metric, "\t") + if len(m) < 2 { + continue + } + + i, err := strconv.ParseInt(m[1], 10, 64) + if err != nil { + log.Printf("E! [inputs.powerdns_recursor] Error parsing integer for metric [%s] %v", + metric, err) + continue + } + values[m[0]] = i + } + + return values +} + +func init() { + inputs.Add("powerdns_recursor", func() telegraf.Input { + return &PowerdnsRecursor{} + }) +} diff --git a/plugins/inputs/powerdns_recursor/powerdns_recursor_test.go b/plugins/inputs/powerdns_recursor/powerdns_recursor_test.go new file mode 100644 index 000000000..b62a6224d --- /dev/null +++ b/plugins/inputs/powerdns_recursor/powerdns_recursor_test.go @@ -0,0 +1,545 @@ +package powerdns_recursor + +import ( + "net" + "os" + "testing" + + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +type statServer struct{} + +var metrics = "all-outqueries\t3591637\nanswers-slow\t36451\nanswers0-1\t177297\nanswers1-10\t1209328\n" + + "answers10-100\t1238786\nanswers100-1000\t402917\nauth-zone-queries\t4\nauth4-answers-slow\t44248\n" + + "auth4-answers0-1\t59169\nauth4-answers1-10\t1747403\nauth4-answers10-100\t1315621\n" + + "auth4-answers100-1000\t424683\nauth6-answers-slow\t0\nauth6-answers0-1\t0\nauth6-answers1-10\t0\n" + + "auth6-answers10-100\t0\nauth6-answers100-1000\t0\ncache-entries\t295917\ncache-hits\t148630\n" + + "cache-misses\t2916149\ncase-mismatches\t0\nchain-resends\t418602\nclient-parse-errors\t0\n" + + "concurrent-queries\t0\ndlg-only-drops\t0\ndnssec-queries\t151536\ndnssec-result-bogus\t0\n" + + "dnssec-result-indeterminate\t0\ndnssec-result-insecure\t0\ndnssec-result-nta\t0\n" + + "dnssec-result-secure\t46\ndnssec-validations\t46\ndont-outqueries\t62\necs-queries\t0\n" + + "ecs-responses\t0\nedns-ping-matches\t0\nedns-ping-mismatches\t0\nfailed-host-entries\t33\n" + + "fd-usage\t32\nignored-packets\t0\nipv6-outqueries\t0\nipv6-questions\t0\nmalloc-bytes\t0\n" + + "max-cache-entries\t1000000\nmax-mthread-stack\t33747\nmax-packetcache-entries\t500000\n" + + "negcache-entries\t100070\nno-packet-error\t0\nnoedns-outqueries\t72409\nnoerror-answers\t25155259\n" + + "noping-outqueries\t0\nnsset-invalidations\t2385\nnsspeeds-entries\t3571\nnxdomain-answers\t3307768\n" + + "outgoing-timeouts\t43876\noutgoing4-timeouts\t43876\noutgoing6-timeouts\t0\nover-capacity-drops\t0\n" + + "packetcache-entries\t80756\npacketcache-hits\t25698497\npacketcache-misses\t3064625\npolicy-drops\t0\n" + + "policy-result-custom\t0\npolicy-result-drop\t0\npolicy-result-noaction\t3064779\npolicy-result-nodata\t0\n" + + "policy-result-nxdomain\t0\npolicy-result-truncate\t0\nqa-latency\t6587\nquery-pipe-full-drops\t0\n" + + "questions\t28763276\nreal-memory-usage\t280465408\nresource-limits\t0\nsecurity-status\t1\n" + + "server-parse-errors\t0\nservfail-answers\t300249\nspoof-prevents\t0\nsys-msec\t1296588\n" + + "tcp-client-overflow\t0\ntcp-clients\t0\ntcp-outqueries\t116\ntcp-questions\t130\nthrottle-entries\t33\n" + + "throttled-out\t13187\nthrottled-outqueries\t13187\ntoo-old-drops\t2\nudp-in-errors\t4\n" + + "udp-noport-errors\t2908\nudp-recvbuf-errors\t0\nudp-sndbuf-errors\t0\nunauthorized-tcp\t0\n" + + "unauthorized-udp\t0\nunexpected-packets\t0\nunreachables\t1695\nuptime\t165725\nuser-msec\t1266384\n" + + "x-our-latency\t19\nx-ourtime-slow\t632\nx-ourtime0-1\t3060079\nx-ourtime1-2\t3351\nx-ourtime16-32\t197\n" + + "x-ourtime2-4\t302\nx-ourtime4-8\t194\nx-ourtime8-16\t24\n" + +// first metric has no "\t" +var corruptMetrics = "all-outqueries3591637\nanswers-slow\t36451\nanswers0-1\t177297\nanswers1-10\t1209328\n" + + "answers10-100\t1238786\nanswers100-1000\t402917\nauth-zone-queries\t4\nauth4-answers-slow\t44248\n" + + "auth4-answers0-1\t59169\nauth4-answers1-10\t1747403\nauth4-answers10-100\t1315621\n" + + "auth4-answers100-1000\t424683\nauth6-answers-slow\t0\nauth6-answers0-1\t0\nauth6-answers1-10\t0\n" + + "auth6-answers10-100\t0\nauth6-answers100-1000\t0\ncache-entries\t295917\ncache-hits\t148630\n" + + "cache-misses\t2916149\ncase-mismatches\t0\nchain-resends\t418602\nclient-parse-errors\t0\n" + + "concurrent-queries\t0\ndlg-only-drops\t0\ndnssec-queries\t151536\ndnssec-result-bogus\t0\n" + + "dnssec-result-indeterminate\t0\ndnssec-result-insecure\t0\ndnssec-result-nta\t0\n" + + "dnssec-result-secure\t46\ndnssec-validations\t46\ndont-outqueries\t62\necs-queries\t0\n" + + "ecs-responses\t0\nedns-ping-matches\t0\nedns-ping-mismatches\t0\nfailed-host-entries\t33\n" + + "fd-usage\t32\nignored-packets\t0\nipv6-outqueries\t0\nipv6-questions\t0\nmalloc-bytes\t0\n" + + "max-cache-entries\t1000000\nmax-mthread-stack\t33747\nmax-packetcache-entries\t500000\n" + + "negcache-entries\t100070\nno-packet-error\t0\nnoedns-outqueries\t72409\nnoerror-answers\t25155259\n" + + "noping-outqueries\t0\nnsset-invalidations\t2385\nnsspeeds-entries\t3571\nnxdomain-answers\t3307768\n" + + "outgoing-timeouts\t43876\noutgoing4-timeouts\t43876\noutgoing6-timeouts\t0\nover-capacity-drops\t0\n" + + "packetcache-entries\t80756\npacketcache-hits\t25698497\npacketcache-misses\t3064625\npolicy-drops\t0\n" + + "policy-result-custom\t0\npolicy-result-drop\t0\npolicy-result-noaction\t3064779\npolicy-result-nodata\t0\n" + + "policy-result-nxdomain\t0\npolicy-result-truncate\t0\nqa-latency\t6587\nquery-pipe-full-drops\t0\n" + + "questions\t28763276\nreal-memory-usage\t280465408\nresource-limits\t0\nsecurity-status\t1\n" + + "server-parse-errors\t0\nservfail-answers\t300249\nspoof-prevents\t0\nsys-msec\t1296588\n" + + "tcp-client-overflow\t0\ntcp-clients\t0\ntcp-outqueries\t116\ntcp-questions\t130\nthrottle-entries\t33\n" + + "throttled-out\t13187\nthrottled-outqueries\t13187\ntoo-old-drops\t2\nudp-in-errors\t4\n" + + "udp-noport-errors\t2908\nudp-recvbuf-errors\t0\nudp-sndbuf-errors\t0\nunauthorized-tcp\t0\n" + + "unauthorized-udp\t0\nunexpected-packets\t0\nunreachables\t1695\nuptime\t165725\nuser-msec\t1266384\n" + + "x-our-latency\t19\nx-ourtime-slow\t632\nx-ourtime0-1\t3060079\nx-ourtime1-2\t3351\nx-ourtime16-32\t197\n" + + "x-ourtime2-4\t302\nx-ourtime4-8\t194\nx-ourtime8-16\t24\n" + +// integer overflow +var intOverflowMetrics = "all-outqueries\t18446744073709550195\nanswers-slow\t36451\nanswers0-1\t177297\nanswers1-10\t1209328\n" + + "answers10-100\t1238786\nanswers100-1000\t402917\nauth-zone-queries\t4\nauth4-answers-slow\t44248\n" + + "auth4-answers0-1\t59169\nauth4-answers1-10\t1747403\nauth4-answers10-100\t1315621\n" + + "auth4-answers100-1000\t424683\nauth6-answers-slow\t0\nauth6-answers0-1\t0\nauth6-answers1-10\t0\n" + + "auth6-answers10-100\t0\nauth6-answers100-1000\t0\ncache-entries\t295917\ncache-hits\t148630\n" + + "cache-misses\t2916149\ncase-mismatches\t0\nchain-resends\t418602\nclient-parse-errors\t0\n" + + "concurrent-queries\t0\ndlg-only-drops\t0\ndnssec-queries\t151536\ndnssec-result-bogus\t0\n" + + "dnssec-result-indeterminate\t0\ndnssec-result-insecure\t0\ndnssec-result-nta\t0\n" + + "dnssec-result-secure\t46\ndnssec-validations\t46\ndont-outqueries\t62\necs-queries\t0\n" + + "ecs-responses\t0\nedns-ping-matches\t0\nedns-ping-mismatches\t0\nfailed-host-entries\t33\n" + + "fd-usage\t32\nignored-packets\t0\nipv6-outqueries\t0\nipv6-questions\t0\nmalloc-bytes\t0\n" + + "max-cache-entries\t1000000\nmax-mthread-stack\t33747\nmax-packetcache-entries\t500000\n" + + "negcache-entries\t100070\nno-packet-error\t0\nnoedns-outqueries\t72409\nnoerror-answers\t25155259\n" + + "noping-outqueries\t0\nnsset-invalidations\t2385\nnsspeeds-entries\t3571\nnxdomain-answers\t3307768\n" + + "outgoing-timeouts\t43876\noutgoing4-timeouts\t43876\noutgoing6-timeouts\t0\nover-capacity-drops\t0\n" + + "packetcache-entries\t80756\npacketcache-hits\t25698497\npacketcache-misses\t3064625\npolicy-drops\t0\n" + + "policy-result-custom\t0\npolicy-result-drop\t0\npolicy-result-noaction\t3064779\npolicy-result-nodata\t0\n" + + "policy-result-nxdomain\t0\npolicy-result-truncate\t0\nqa-latency\t6587\nquery-pipe-full-drops\t0\n" + + "questions\t28763276\nreal-memory-usage\t280465408\nresource-limits\t0\nsecurity-status\t1\n" + + "server-parse-errors\t0\nservfail-answers\t300249\nspoof-prevents\t0\nsys-msec\t1296588\n" + + "tcp-client-overflow\t0\ntcp-clients\t0\ntcp-outqueries\t116\ntcp-questions\t130\nthrottle-entries\t33\n" + + "throttled-out\t13187\nthrottled-outqueries\t13187\ntoo-old-drops\t2\nudp-in-errors\t4\n" + + "udp-noport-errors\t2908\nudp-recvbuf-errors\t0\nudp-sndbuf-errors\t0\nunauthorized-tcp\t0\n" + + "unauthorized-udp\t0\nunexpected-packets\t0\nunreachables\t1695\nuptime\t165725\nuser-msec\t1266384\n" + + "x-our-latency\t19\nx-ourtime-slow\t632\nx-ourtime0-1\t3060079\nx-ourtime1-2\t3351\nx-ourtime16-32\t197\n" + + "x-ourtime2-4\t302\nx-ourtime4-8\t194\nx-ourtime8-16\t24\n" + +func (s statServer) serverSocket(l *net.UnixConn) { + + for { + go func(c *net.UnixConn) { + buf := make([]byte, 1024) + n, remote, _ := c.ReadFromUnix(buf) + + data := buf[:n] + if string(data) == "get-all\n" { + c.WriteToUnix([]byte(metrics), remote) + c.Close() + } + }(l) + } +} + +func TestPowerdnsRecursorGeneratesMetrics(t *testing.T) { + // We create a fake server to return test data + controlSocket := "/tmp/pdns5724354148158589552.controlsocket" + addr, err := net.ResolveUnixAddr("unixgram", controlSocket) + if err != nil { + t.Fatal("Cannot parse unix socket") + } + socket, err := net.ListenUnixgram("unixgram", addr) + if err != nil { + t.Fatal("Cannot initialize server on port ") + } + + defer socket.Close() + defer os.Remove(controlSocket) + + s := statServer{} + go s.serverSocket(socket) + + p := &PowerdnsRecursor{ + UnixSockets: []string{controlSocket}, + SocketDir: "/tmp", + } + + var acc testutil.Accumulator + + err = acc.GatherError(p.Gather) + require.NoError(t, err) + + intMetrics := []string{"all-outqueries", "answers-slow", "answers0-1", "answers1-10", + "answers10-100", "answers100-1000", "auth-zone-queries", "auth4-answers-slow", + "auth4-answers0-1", "auth4-answers1-10", "auth4-answers10-100", "auth4-answers100-1000", + "auth6-answers-slow", "auth6-answers0-1", "auth6-answers1-10", "auth6-answers10-100", + "auth6-answers100-1000", "cache-entries", "cache-hits", "cache-misses", "case-mismatches", + "chain-resends", "client-parse-errors", "concurrent-queries", "dlg-only-drops", "dnssec-queries", + "dnssec-result-bogus", "dnssec-result-indeterminate", "dnssec-result-insecure", "dnssec-result-nta", + "dnssec-result-secure", "dnssec-validations", "dont-outqueries", "ecs-queries", "ecs-responses", + "edns-ping-matches", "edns-ping-mismatches", "failed-host-entries", "fd-usage", "ignored-packets", + "ipv6-outqueries", "ipv6-questions", "malloc-bytes", "max-cache-entries", "max-mthread-stack", + "max-packetcache-entries", "negcache-entries", "no-packet-error", "noedns-outqueries", + "noerror-answers", "noping-outqueries", "nsset-invalidations", "nsspeeds-entries", + "nxdomain-answers", "outgoing-timeouts", "outgoing4-timeouts", "outgoing6-timeouts", + "over-capacity-drops", "packetcache-entries", "packetcache-hits", "packetcache-misses", + "policy-drops", "policy-result-custom", "policy-result-drop", "policy-result-noaction", + "policy-result-nodata", "policy-result-nxdomain", "policy-result-truncate", "qa-latency", + "query-pipe-full-drops", "questions", "real-memory-usage", "resource-limits", "security-status", + "server-parse-errors", "servfail-answers", "spoof-prevents", "sys-msec", "tcp-client-overflow", + "tcp-clients", "tcp-outqueries", "tcp-questions", "throttle-entries", "throttled-out", "throttled-outqueries", + "too-old-drops", "udp-in-errors", "udp-noport-errors", "udp-recvbuf-errors", "udp-sndbuf-errors", + "unauthorized-tcp", "unauthorized-udp", "unexpected-packets", "unreachables", "uptime", "user-msec", + "x-our-latency", "x-ourtime-slow", "x-ourtime0-1", "x-ourtime1-2", "x-ourtime16-32", + "x-ourtime2-4", "x-ourtime4-8", "x-ourtime8-16"} + + for _, metric := range intMetrics { + assert.True(t, acc.HasInt64Field("powerdns_recursor", metric), metric) + } +} + +func TestPowerdnsRecursorParseMetrics(t *testing.T) { + values := parseResponse(metrics) + + tests := []struct { + key string + value int64 + }{ + {"all-outqueries", 3591637}, + {"answers-slow", 36451}, + {"answers0-1", 177297}, + {"answers1-10", 1209328}, + {"answers10-100", 1238786}, + {"answers100-1000", 402917}, + {"auth-zone-queries", 4}, + {"auth4-answers-slow", 44248}, + {"auth4-answers0-1", 59169}, + {"auth4-answers1-10", 1747403}, + {"auth4-answers10-100", 1315621}, + {"auth4-answers100-1000", 424683}, + {"auth6-answers-slow", 0}, + {"auth6-answers0-1", 0}, + {"auth6-answers1-10", 0}, + {"auth6-answers10-100", 0}, + {"auth6-answers100-1000", 0}, + {"cache-entries", 295917}, + {"cache-hits", 148630}, + {"cache-misses", 2916149}, + {"case-mismatches", 0}, + {"chain-resends", 418602}, + {"client-parse-errors", 0}, + {"concurrent-queries", 0}, + {"dlg-only-drops", 0}, + {"dnssec-queries", 151536}, + {"dnssec-result-bogus", 0}, + {"dnssec-result-indeterminate", 0}, + {"dnssec-result-insecure", 0}, + {"dnssec-result-nta", 0}, + {"dnssec-result-secure", 46}, + {"dnssec-validations", 46}, + {"dont-outqueries", 62}, + {"ecs-queries", 0}, + {"ecs-responses", 0}, + {"edns-ping-matches", 0}, + {"edns-ping-mismatches", 0}, + {"failed-host-entries", 33}, + {"fd-usage", 32}, + {"ignored-packets", 0}, + {"ipv6-outqueries", 0}, + {"ipv6-questions", 0}, + {"malloc-bytes", 0}, + {"max-cache-entries", 1000000}, + {"max-mthread-stack", 33747}, + {"max-packetcache-entries", 500000}, + {"negcache-entries", 100070}, + {"no-packet-error", 0}, + {"noedns-outqueries", 72409}, + {"noerror-answers", 25155259}, + {"noping-outqueries", 0}, + {"nsset-invalidations", 2385}, + {"nsspeeds-entries", 3571}, + {"nxdomain-answers", 3307768}, + {"outgoing-timeouts", 43876}, + {"outgoing4-timeouts", 43876}, + {"outgoing6-timeouts", 0}, + {"over-capacity-drops", 0}, + {"packetcache-entries", 80756}, + {"packetcache-hits", 25698497}, + {"packetcache-misses", 3064625}, + {"policy-drops", 0}, + {"policy-result-custom", 0}, + {"policy-result-drop", 0}, + {"policy-result-noaction", 3064779}, + {"policy-result-nodata", 0}, + {"policy-result-nxdomain", 0}, + {"policy-result-truncate", 0}, + {"qa-latency", 6587}, + {"query-pipe-full-drops", 0}, + {"questions", 28763276}, + {"real-memory-usage", 280465408}, + {"resource-limits", 0}, + {"security-status", 1}, + {"server-parse-errors", 0}, + {"servfail-answers", 300249}, + {"spoof-prevents", 0}, + {"sys-msec", 1296588}, + {"tcp-client-overflow", 0}, + {"tcp-clients", 0}, + {"tcp-outqueries", 116}, + {"tcp-questions", 130}, + {"throttle-entries", 33}, + {"throttled-out", 13187}, + {"throttled-outqueries", 13187}, + {"too-old-drops", 2}, + {"udp-in-errors", 4}, + {"udp-noport-errors", 2908}, + {"udp-recvbuf-errors", 0}, + {"udp-sndbuf-errors", 0}, + {"unauthorized-tcp", 0}, + {"unauthorized-udp", 0}, + {"unexpected-packets", 0}, + {"unreachables", 1695}, + {"uptime", 165725}, + {"user-msec", 1266384}, + {"x-our-latency", 19}, + {"x-ourtime-slow", 632}, + {"x-ourtime0-1", 3060079}, + {"x-ourtime1-2", 3351}, + {"x-ourtime16-32", 197}, + {"x-ourtime2-4", 302}, + {"x-ourtime4-8", 194}, + {"x-ourtime8-16", 24}, + } + + for _, test := range tests { + value, ok := values[test.key] + if !ok { + t.Errorf("Did not find key for metric %s in values", test.key) + continue + } + if value != test.value { + t.Errorf("Metric: %s, Expected: %d, actual: %d", + test.key, test.value, value) + } + } +} + +func TestPowerdnsRecursorParseCorruptMetrics(t *testing.T) { + values := parseResponse(corruptMetrics) + + tests := []struct { + key string + value int64 + }{ + {"answers-slow", 36451}, + {"answers0-1", 177297}, + {"answers1-10", 1209328}, + {"answers10-100", 1238786}, + {"answers100-1000", 402917}, + {"auth-zone-queries", 4}, + {"auth4-answers-slow", 44248}, + {"auth4-answers0-1", 59169}, + {"auth4-answers1-10", 1747403}, + {"auth4-answers10-100", 1315621}, + {"auth4-answers100-1000", 424683}, + {"auth6-answers-slow", 0}, + {"auth6-answers0-1", 0}, + {"auth6-answers1-10", 0}, + {"auth6-answers10-100", 0}, + {"auth6-answers100-1000", 0}, + {"cache-entries", 295917}, + {"cache-hits", 148630}, + {"cache-misses", 2916149}, + {"case-mismatches", 0}, + {"chain-resends", 418602}, + {"client-parse-errors", 0}, + {"concurrent-queries", 0}, + {"dlg-only-drops", 0}, + {"dnssec-queries", 151536}, + {"dnssec-result-bogus", 0}, + {"dnssec-result-indeterminate", 0}, + {"dnssec-result-insecure", 0}, + {"dnssec-result-nta", 0}, + {"dnssec-result-secure", 46}, + {"dnssec-validations", 46}, + {"dont-outqueries", 62}, + {"ecs-queries", 0}, + {"ecs-responses", 0}, + {"edns-ping-matches", 0}, + {"edns-ping-mismatches", 0}, + {"failed-host-entries", 33}, + {"fd-usage", 32}, + {"ignored-packets", 0}, + {"ipv6-outqueries", 0}, + {"ipv6-questions", 0}, + {"malloc-bytes", 0}, + {"max-cache-entries", 1000000}, + {"max-mthread-stack", 33747}, + {"max-packetcache-entries", 500000}, + {"negcache-entries", 100070}, + {"no-packet-error", 0}, + {"noedns-outqueries", 72409}, + {"noerror-answers", 25155259}, + {"noping-outqueries", 0}, + {"nsset-invalidations", 2385}, + {"nsspeeds-entries", 3571}, + {"nxdomain-answers", 3307768}, + {"outgoing-timeouts", 43876}, + {"outgoing4-timeouts", 43876}, + {"outgoing6-timeouts", 0}, + {"over-capacity-drops", 0}, + {"packetcache-entries", 80756}, + {"packetcache-hits", 25698497}, + {"packetcache-misses", 3064625}, + {"policy-drops", 0}, + {"policy-result-custom", 0}, + {"policy-result-drop", 0}, + {"policy-result-noaction", 3064779}, + {"policy-result-nodata", 0}, + {"policy-result-nxdomain", 0}, + {"policy-result-truncate", 0}, + {"qa-latency", 6587}, + {"query-pipe-full-drops", 0}, + {"questions", 28763276}, + {"real-memory-usage", 280465408}, + {"resource-limits", 0}, + {"security-status", 1}, + {"server-parse-errors", 0}, + {"servfail-answers", 300249}, + {"spoof-prevents", 0}, + {"sys-msec", 1296588}, + {"tcp-client-overflow", 0}, + {"tcp-clients", 0}, + {"tcp-outqueries", 116}, + {"tcp-questions", 130}, + {"throttle-entries", 33}, + {"throttled-out", 13187}, + {"throttled-outqueries", 13187}, + {"too-old-drops", 2}, + {"udp-in-errors", 4}, + {"udp-noport-errors", 2908}, + {"udp-recvbuf-errors", 0}, + {"udp-sndbuf-errors", 0}, + {"unauthorized-tcp", 0}, + {"unauthorized-udp", 0}, + {"unexpected-packets", 0}, + {"unreachables", 1695}, + {"uptime", 165725}, + {"user-msec", 1266384}, + {"x-our-latency", 19}, + {"x-ourtime-slow", 632}, + {"x-ourtime0-1", 3060079}, + {"x-ourtime1-2", 3351}, + {"x-ourtime16-32", 197}, + {"x-ourtime2-4", 302}, + {"x-ourtime4-8", 194}, + {"x-ourtime8-16", 24}, + } + + for _, test := range tests { + value, ok := values[test.key] + if !ok { + t.Errorf("Did not find key for metric %s in values", test.key) + continue + } + if value != test.value { + t.Errorf("Metric: %s, Expected: %d, actual: %d", + test.key, test.value, value) + } + } +} + +func TestPowerdnsRecursorParseIntOverflowMetrics(t *testing.T) { + values := parseResponse(intOverflowMetrics) + + tests := []struct { + key string + value int64 + }{ + {"answers-slow", 36451}, + {"answers0-1", 177297}, + {"answers1-10", 1209328}, + {"answers10-100", 1238786}, + {"answers100-1000", 402917}, + {"auth-zone-queries", 4}, + {"auth4-answers-slow", 44248}, + {"auth4-answers0-1", 59169}, + {"auth4-answers1-10", 1747403}, + {"auth4-answers10-100", 1315621}, + {"auth4-answers100-1000", 424683}, + {"auth6-answers-slow", 0}, + {"auth6-answers0-1", 0}, + {"auth6-answers1-10", 0}, + {"auth6-answers10-100", 0}, + {"auth6-answers100-1000", 0}, + {"cache-entries", 295917}, + {"cache-hits", 148630}, + {"cache-misses", 2916149}, + {"case-mismatches", 0}, + {"chain-resends", 418602}, + {"client-parse-errors", 0}, + {"concurrent-queries", 0}, + {"dlg-only-drops", 0}, + {"dnssec-queries", 151536}, + {"dnssec-result-bogus", 0}, + {"dnssec-result-indeterminate", 0}, + {"dnssec-result-insecure", 0}, + {"dnssec-result-nta", 0}, + {"dnssec-result-secure", 46}, + {"dnssec-validations", 46}, + {"dont-outqueries", 62}, + {"ecs-queries", 0}, + {"ecs-responses", 0}, + {"edns-ping-matches", 0}, + {"edns-ping-mismatches", 0}, + {"failed-host-entries", 33}, + {"fd-usage", 32}, + {"ignored-packets", 0}, + {"ipv6-outqueries", 0}, + {"ipv6-questions", 0}, + {"malloc-bytes", 0}, + {"max-cache-entries", 1000000}, + {"max-mthread-stack", 33747}, + {"max-packetcache-entries", 500000}, + {"negcache-entries", 100070}, + {"no-packet-error", 0}, + {"noedns-outqueries", 72409}, + {"noerror-answers", 25155259}, + {"noping-outqueries", 0}, + {"nsset-invalidations", 2385}, + {"nsspeeds-entries", 3571}, + {"nxdomain-answers", 3307768}, + {"outgoing-timeouts", 43876}, + {"outgoing4-timeouts", 43876}, + {"outgoing6-timeouts", 0}, + {"over-capacity-drops", 0}, + {"packetcache-entries", 80756}, + {"packetcache-hits", 25698497}, + {"packetcache-misses", 3064625}, + {"policy-drops", 0}, + {"policy-result-custom", 0}, + {"policy-result-drop", 0}, + {"policy-result-noaction", 3064779}, + {"policy-result-nodata", 0}, + {"policy-result-nxdomain", 0}, + {"policy-result-truncate", 0}, + {"qa-latency", 6587}, + {"query-pipe-full-drops", 0}, + {"questions", 28763276}, + {"real-memory-usage", 280465408}, + {"resource-limits", 0}, + {"security-status", 1}, + {"server-parse-errors", 0}, + {"servfail-answers", 300249}, + {"spoof-prevents", 0}, + {"sys-msec", 1296588}, + {"tcp-client-overflow", 0}, + {"tcp-clients", 0}, + {"tcp-outqueries", 116}, + {"tcp-questions", 130}, + {"throttle-entries", 33}, + {"throttled-out", 13187}, + {"throttled-outqueries", 13187}, + {"too-old-drops", 2}, + {"udp-in-errors", 4}, + {"udp-noport-errors", 2908}, + {"udp-recvbuf-errors", 0}, + {"udp-sndbuf-errors", 0}, + {"unauthorized-tcp", 0}, + {"unauthorized-udp", 0}, + {"unexpected-packets", 0}, + {"unreachables", 1695}, + {"uptime", 165725}, + {"user-msec", 1266384}, + {"x-our-latency", 19}, + {"x-ourtime-slow", 632}, + {"x-ourtime0-1", 3060079}, + {"x-ourtime1-2", 3351}, + {"x-ourtime16-32", 197}, + {"x-ourtime2-4", 302}, + {"x-ourtime4-8", 194}, + {"x-ourtime8-16", 24}, + } + + for _, test := range tests { + value, ok := values[test.key] + if !ok { + t.Errorf("Did not find key for metric %s in values", test.key) + continue + } + if value != test.value { + t.Errorf("Metric: %s, Expected: %d, actual: %d", + test.key, test.value, value) + } + } +} From 2c3fa9abdbbe15e2ac8da88a22fc427670deeb17 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 23 Apr 2019 16:36:25 -0700 Subject: [PATCH 0780/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3ad450470..df57b84af 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,7 @@ - [bind](/plugins/inputs/bind/README.md) - Contributed by @dswarbrick & @danielllek - [github](/plugins/inputs/github/README.md) - Contributed by @influxdata +- [powerdns_recursor](/plugins/inputs/powerdns_recursor/README.md) - Contributed by @dupondje #### New Serializers From f5b44fd0bdbe14ad6671b06fc84fdc0dce0c7efc Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 24 Apr 2019 13:51:21 -0700 Subject: [PATCH 0781/1815] Wait for server socket to close in pdns_recursor test --- .../powerdns_recursor_test.go | 51 +++++++++++-------- 1 file changed, 30 insertions(+), 21 deletions(-) diff --git a/plugins/inputs/powerdns_recursor/powerdns_recursor_test.go b/plugins/inputs/powerdns_recursor/powerdns_recursor_test.go index b62a6224d..629fe81c8 100644 --- a/plugins/inputs/powerdns_recursor/powerdns_recursor_test.go +++ b/plugins/inputs/powerdns_recursor/powerdns_recursor_test.go @@ -3,7 +3,9 @@ package powerdns_recursor import ( "net" "os" + "sync" "testing" + "time" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" @@ -95,22 +97,6 @@ var intOverflowMetrics = "all-outqueries\t18446744073709550195\nanswers-slow\t36 "x-our-latency\t19\nx-ourtime-slow\t632\nx-ourtime0-1\t3060079\nx-ourtime1-2\t3351\nx-ourtime16-32\t197\n" + "x-ourtime2-4\t302\nx-ourtime4-8\t194\nx-ourtime8-16\t24\n" -func (s statServer) serverSocket(l *net.UnixConn) { - - for { - go func(c *net.UnixConn) { - buf := make([]byte, 1024) - n, remote, _ := c.ReadFromUnix(buf) - - data := buf[:n] - if string(data) == "get-all\n" { - c.WriteToUnix([]byte(metrics), remote) - c.Close() - } - }(l) - } -} - func TestPowerdnsRecursorGeneratesMetrics(t *testing.T) { // We create a fake server to return test data controlSocket := "/tmp/pdns5724354148158589552.controlsocket" @@ -120,14 +106,35 @@ func TestPowerdnsRecursorGeneratesMetrics(t *testing.T) { } socket, err := net.ListenUnixgram("unixgram", addr) if err != nil { - t.Fatal("Cannot initialize server on port ") + t.Fatal("Cannot initialize server on port") } - defer socket.Close() - defer os.Remove(controlSocket) + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer func() { + socket.Close() + os.Remove(controlSocket) + wg.Done() + }() - s := statServer{} - go s.serverSocket(socket) + for { + buf := make([]byte, 1024) + n, remote, err := socket.ReadFromUnix(buf) + if err != nil { + socket.Close() + return + } + + data := buf[:n] + if string(data) == "get-all\n" { + socket.WriteToUnix([]byte(metrics), remote) + socket.Close() + } + + time.Sleep(100 * time.Millisecond) + } + }() p := &PowerdnsRecursor{ UnixSockets: []string{controlSocket}, @@ -139,6 +146,8 @@ func TestPowerdnsRecursorGeneratesMetrics(t *testing.T) { err = acc.GatherError(p.Gather) require.NoError(t, err) + wg.Wait() + intMetrics := []string{"all-outqueries", "answers-slow", "answers0-1", "answers1-10", "answers10-100", "answers100-1000", "auth-zone-queries", "auth4-answers-slow", "auth4-answers0-1", "auth4-answers1-10", "auth4-answers10-100", "auth4-answers100-1000", From f32b064d6a6f38c6c809b09ca8ae4ef53937e9d8 Mon Sep 17 00:00:00 2001 From: Pontus Rydin Date: Thu, 25 Apr 2019 15:22:48 -0400 Subject: [PATCH 0782/1815] Fix race condition in the Wavefront parser (#5764) --- plugins/parsers/wavefront/parser.go | 69 +++++++++++++++++++---------- 1 file changed, 46 insertions(+), 23 deletions(-) diff --git a/plugins/parsers/wavefront/parser.go b/plugins/parsers/wavefront/parser.go index f5fc88dbf..62fe8f5ef 100644 --- a/plugins/parsers/wavefront/parser.go +++ b/plugins/parsers/wavefront/parser.go @@ -6,6 +6,7 @@ import ( "io" "log" "strconv" + "sync" "time" "github.com/influxdata/telegraf" @@ -22,7 +23,12 @@ type Point struct { Tags map[string]string } -// Parser represents a parser. +type WavefrontParser struct { + parsers *sync.Pool + defaultTags map[string]string +} + +// PointParser is a thread-unsafe parser and must be kept in a pool. type PointParser struct { s *PointScanner buf struct { @@ -30,10 +36,10 @@ type PointParser struct { lit []string // last read n literals n int // unscanned buffer size (max=2) } - scanBuf bytes.Buffer // buffer reused for scanning tokens - writeBuf bytes.Buffer // buffer reused for parsing elements - Elements []ElementParser - defaultTags map[string]string + scanBuf bytes.Buffer // buffer reused for scanning tokens + writeBuf bytes.Buffer // buffer reused for parsing elements + Elements []ElementParser + parent *WavefrontParser } // Returns a slice of ElementParser's for the Graphite format @@ -47,9 +53,40 @@ func NewWavefrontElements() []ElementParser { return elements } -func NewWavefrontParser(defaultTags map[string]string) *PointParser { +func NewWavefrontParser(defaultTags map[string]string) *WavefrontParser { + wp := &WavefrontParser{defaultTags: defaultTags} + wp.parsers = &sync.Pool{ + New: func() interface{} { + return NewPointParser(wp) + }, + } + return wp +} + +func NewPointParser(parent *WavefrontParser) *PointParser { elements := NewWavefrontElements() - return &PointParser{Elements: elements, defaultTags: defaultTags} + return &PointParser{Elements: elements, parent: parent} +} + +func (p *WavefrontParser) ParseLine(line string) (telegraf.Metric, error) { + buf := []byte(line) + + metrics, err := p.Parse(buf) + if err != nil { + return nil, err + } + + if len(metrics) > 0 { + return metrics[0], nil + } + + return nil, nil +} + +func (p *WavefrontParser) Parse(buf []byte) ([]telegraf.Metric, error) { + pp := p.parsers.Get().(*PointParser) + defer p.parsers.Put(pp) + return pp.Parse(buf) } func (p *PointParser) Parse(buf []byte) ([]telegraf.Metric, error) { @@ -91,21 +128,7 @@ func (p *PointParser) Parse(buf []byte) ([]telegraf.Metric, error) { return metrics, nil } -func (p *PointParser) ParseLine(line string) (telegraf.Metric, error) { - buf := []byte(line) - metrics, err := p.Parse(buf) - if err != nil { - return nil, err - } - - if len(metrics) > 0 { - return metrics[0], nil - } - - return nil, nil -} - -func (p *PointParser) SetDefaultTags(tags map[string]string) { +func (p *WavefrontParser) SetDefaultTags(tags map[string]string) { p.defaultTags = tags } @@ -119,7 +142,7 @@ func (p *PointParser) convertPointToTelegrafMetric(points []Point) ([]telegraf.M tags[k] = v } // apply default tags after parsed tags - for k, v := range p.defaultTags { + for k, v := range p.parent.defaultTags { tags[k] = v } From 10b2a3de30cfa93af9bab39bb2449869f42ea807 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 25 Apr 2019 12:24:34 -0700 Subject: [PATCH 0783/1815] Update changelog --- CHANGELOG.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index df57b84af..3dc3f5aab 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -37,6 +37,12 @@ - [#5730](https://github.com/influxdata/telegraf/pull/5730): Don't start telegraf when stale pidfile found. - [#5477](https://github.com/influxdata/telegraf/pull/5477): Support Minecraft server 1.13 and newer in minecraft input. +## v1.10.4 [unreleased] + +#### Bugfixes + +- [#5764](https://github.com/influxdata/telegraf/pull/5764): Fix race condition in the Wavefront parser. + ## v1.10.3 [2019-04-16] #### Bugfixes From 9c3af1e6ac1c6e619cef0b56fa53798c4f9369d5 Mon Sep 17 00:00:00 2001 From: Matthew Crenshaw <3420325+sgtsquiggs@users.noreply.github.com> Date: Thu, 25 Apr 2019 20:21:02 -0400 Subject: [PATCH 0784/1815] Add pagefault data to procstat input plugin (#5769) --- plugins/inputs/procstat/README.md | 4 ++++ plugins/inputs/procstat/process.go | 1 + plugins/inputs/procstat/procstat.go | 8 ++++++++ plugins/inputs/procstat/procstat_test.go | 4 ++++ 4 files changed, 17 insertions(+) diff --git a/plugins/inputs/procstat/README.md b/plugins/inputs/procstat/README.md index dfe95291a..277ec2c56 100644 --- a/plugins/inputs/procstat/README.md +++ b/plugins/inputs/procstat/README.md @@ -85,6 +85,8 @@ implemented as a WMI query. The pattern allows fuzzy matching using only - cgroup (when defined) - win_service (when defined) - fields: + - child_major_faults (int) + - child_minor_faults (int) - cpu_time (int) - cpu_time_guest (float) - cpu_time_guest_nice (float) @@ -99,12 +101,14 @@ implemented as a WMI query. The pattern allows fuzzy matching using only - cpu_time_user (float) - cpu_usage (float) - involuntary_context_switches (int) + - major_faults (int) - memory_data (int) - memory_locked (int) - memory_rss (int) - memory_stack (int) - memory_swap (int) - memory_vms (int) + - minor_faults (int) - nice_priority (int) - num_fds (int, *telegraf* may need to be ran as **root**) - num_threads (int) diff --git a/plugins/inputs/procstat/process.go b/plugins/inputs/procstat/process.go index 94a57c192..7e8c4859d 100644 --- a/plugins/inputs/procstat/process.go +++ b/plugins/inputs/procstat/process.go @@ -12,6 +12,7 @@ type Process interface { PID() PID Tags() map[string]string + PageFaults() (*process.PageFaultsStat, error) IOCounters() (*process.IOCountersStat, error) MemoryInfo() (*process.MemoryInfoStat, error) Name() (string, error) diff --git a/plugins/inputs/procstat/procstat.go b/plugins/inputs/procstat/procstat.go index 55552bb4a..2eab899c9 100644 --- a/plugins/inputs/procstat/procstat.go +++ b/plugins/inputs/procstat/procstat.go @@ -200,6 +200,14 @@ func (p *Procstat) addMetric(proc Process, acc telegraf.Accumulator) { fields[prefix+"involuntary_context_switches"] = ctx.Involuntary } + faults, err := proc.PageFaults() + if err == nil { + fields[prefix+"minor_faults"] = faults.MinorFaults + fields[prefix+"major_faults"] = faults.MajorFaults + fields[prefix+"child_minor_faults"] = faults.ChildMinorFaults + fields[prefix+"child_major_faults"] = faults.ChildMajorFaults + } + io, err := proc.IOCounters() if err == nil { fields[prefix+"read_count"] = io.ReadCount diff --git a/plugins/inputs/procstat/procstat_test.go b/plugins/inputs/procstat/procstat_test.go index 191c056ea..bf03f7599 100644 --- a/plugins/inputs/procstat/procstat_test.go +++ b/plugins/inputs/procstat/procstat_test.go @@ -116,6 +116,10 @@ func (p *testProc) Tags() map[string]string { return p.tags } +func (p *testProc) PageFaults() (*process.PageFaultsStat, error) { + return &process.PageFaultsStat{}, nil +} + func (p *testProc) IOCounters() (*process.IOCountersStat, error) { return &process.IOCountersStat{}, nil } From 4ad813aecd599d3f17e006cda0d6bbe66c975141 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 25 Apr 2019 17:21:48 -0700 Subject: [PATCH 0785/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3dc3f5aab..f13be70b1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -30,6 +30,7 @@ - [#5544](https://github.com/influxdata/telegraf/pull/5544): Use more efficient GetMetricData API to collect cloudwatch metrics. - [#5544](https://github.com/influxdata/telegraf/pull/5544): Allow selection of collected statistic types in cloudwatch input. - [#5757](https://github.com/influxdata/telegraf/pull/5757): Speed up interface stat collection in net input. +- [#5769](https://github.com/influxdata/telegraf/pull/5769): Add pagefault data to procstat input plugin. #### Bugfixes From 66153625fb944226a4bfb05e1bfed829ae89b224 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 25 Apr 2019 20:06:39 -0700 Subject: [PATCH 0786/1815] Set socket permissions for unix domain sockets (#5760) --- plugins/inputs/socket_listener/README.md | 7 +++++ .../inputs/socket_listener/socket_listener.go | 31 +++++++++++++++++++ 2 files changed, 38 insertions(+) diff --git a/plugins/inputs/socket_listener/README.md b/plugins/inputs/socket_listener/README.md index 2f1a0572e..1740d8bcf 100644 --- a/plugins/inputs/socket_listener/README.md +++ b/plugins/inputs/socket_listener/README.md @@ -25,6 +25,13 @@ This is a sample configuration for the plugin. # service_address = "unix:///tmp/telegraf.sock" # service_address = "unixgram:///tmp/telegraf.sock" + ## Change the file mode bits on unix sockets. These permissions may not be + ## respected by some platforms, to safely restrict write permissions it is best + ## to place the socket into a directory that has previously been created + ## with the desired permissions. + ## ex: socket_mode = "777" + # socket_mode = "" + ## Maximum number of concurrent connections. ## Only applies to stream sockets (e.g. TCP). ## 0 (default) is unlimited. diff --git a/plugins/inputs/socket_listener/socket_listener.go b/plugins/inputs/socket_listener/socket_listener.go index ed007a00a..d29cff582 100644 --- a/plugins/inputs/socket_listener/socket_listener.go +++ b/plugins/inputs/socket_listener/socket_listener.go @@ -8,6 +8,7 @@ import ( "log" "net" "os" + "strconv" "strings" "sync" "time" @@ -165,6 +166,7 @@ type SocketListener struct { ReadBufferSize internal.Size `toml:"read_buffer_size"` ReadTimeout *internal.Duration `toml:"read_timeout"` KeepAlivePeriod *internal.Duration `toml:"keep_alive_period"` + SocketMode string `toml:"socket_mode"` tlsint.ServerConfig parsers.Parser @@ -190,6 +192,13 @@ func (sl *SocketListener) SampleConfig() string { # service_address = "unix:///tmp/telegraf.sock" # service_address = "unixgram:///tmp/telegraf.sock" + ## Change the file mode bits on unix sockets. These permissions may not be + ## respected by some platforms, to safely restrict write permissions it is best + ## to place the socket into a directory that has previously been created + ## with the desired permissions. + ## ex: socket_mode = "777" + # socket_mode = "" + ## Maximum number of concurrent connections. ## Only applies to stream sockets (e.g. TCP). ## 0 (default) is unlimited. @@ -275,6 +284,17 @@ func (sl *SocketListener) Start(acc telegraf.Accumulator) error { log.Printf("I! [inputs.socket_listener] Listening on %s://%s", protocol, l.Addr()) + // Set permissions on socket + if (spl[0] == "unix" || spl[0] == "unixpacket") && sl.SocketMode != "" { + // Convert from octal in string to int + i, err := strconv.ParseUint(sl.SocketMode, 8, 32) + if err != nil { + return err + } + + os.Chmod(spl[1], os.FileMode(uint32(i))) + } + ssl := &streamSocketListener{ Listener: l, SocketListener: sl, @@ -289,6 +309,17 @@ func (sl *SocketListener) Start(acc telegraf.Accumulator) error { return err } + // Set permissions on socket + if spl[0] == "unixgram" && sl.SocketMode != "" { + // Convert from octal in string to int + i, err := strconv.ParseUint(sl.SocketMode, 8, 32) + if err != nil { + return err + } + + os.Chmod(spl[1], os.FileMode(uint32(i))) + } + if sl.ReadBufferSize.Size > 0 { if srb, ok := pc.(setReadBufferer); ok { srb.SetReadBuffer(int(sl.ReadBufferSize.Size)) From af8137eab724a0050a74e0fbdc2d1825b8058698 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 25 Apr 2019 20:08:17 -0700 Subject: [PATCH 0787/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index f13be70b1..7ae83c40f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -31,6 +31,7 @@ - [#5544](https://github.com/influxdata/telegraf/pull/5544): Allow selection of collected statistic types in cloudwatch input. - [#5757](https://github.com/influxdata/telegraf/pull/5757): Speed up interface stat collection in net input. - [#5769](https://github.com/influxdata/telegraf/pull/5769): Add pagefault data to procstat input plugin. +- [#5760](https://github.com/influxdata/telegraf/pull/5760): Add option to set permissions for unix domain sockets to socket_listener. #### Bugfixes From 32f0cb16f5ae8e66da4fbeee17e63200e5bb2640 Mon Sep 17 00:00:00 2001 From: Greg <2653109+glinton@users.noreply.github.com> Date: Thu, 25 Apr 2019 21:19:58 -0600 Subject: [PATCH 0788/1815] Update naoina/toml library dependency (#5513) --- Gopkg.lock | 6 +- Gopkg.toml | 2 +- internal/config/config_test.go | 75 +++++++++++++++++++ internal/config/testdata/inline_table.toml | 7 ++ internal/config/testdata/invalid_field.toml | 2 + internal/config/testdata/non_slice_slice.toml | 4 + internal/config/testdata/slice_comment.toml | 5 ++ internal/config/testdata/special_types.toml | 9 +++ .../config/testdata/wrong_field_type.toml | 2 + .../config/testdata/wrong_field_type2.toml | 2 + 10 files changed, 110 insertions(+), 4 deletions(-) create mode 100644 internal/config/testdata/inline_table.toml create mode 100644 internal/config/testdata/invalid_field.toml create mode 100644 internal/config/testdata/non_slice_slice.toml create mode 100644 internal/config/testdata/slice_comment.toml create mode 100644 internal/config/testdata/special_types.toml create mode 100644 internal/config/testdata/wrong_field_type.toml create mode 100644 internal/config/testdata/wrong_field_type2.toml diff --git a/Gopkg.lock b/Gopkg.lock index 1521eb2cd..79bb78c10 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -654,15 +654,15 @@ revision = "c43482518d410361b6c383d7aebce33d0471d7bc" [[projects]] - branch = "master" - digest = "1:7fb6cc9607eaa6ef309edebc42b57f704244bd4b9ab23bff128829c4ad09b95d" + branch = "telegraf" + digest = "1:65e98c3d449a34fe4644b503148d3a7244ceabe13f8bf71c2cfecfc2bdce05e9" name = "github.com/influxdata/toml" packages = [ ".", "ast", ] pruneopts = "" - revision = "2a2e3012f7cfbef64091cc79776311e65dfa211b" + revision = "270119a8ce653b297f12189c9099ef1409979f2b" [[projects]] branch = "master" diff --git a/Gopkg.toml b/Gopkg.toml index 057af5e3b..568f9fda7 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -80,7 +80,7 @@ [[constraint]] name = "github.com/influxdata/toml" - branch = "master" + branch = "telegraf" [[constraint]] name = "github.com/influxdata/wlog" diff --git a/internal/config/config_test.go b/internal/config/config_test.go index 77b0dffd4..f05419eef 100644 --- a/internal/config/config_test.go +++ b/internal/config/config_test.go @@ -5,13 +5,17 @@ import ( "testing" "time" + "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/internal/models" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/inputs/exec" + "github.com/influxdata/telegraf/plugins/inputs/http_listener_v2" "github.com/influxdata/telegraf/plugins/inputs/memcached" "github.com/influxdata/telegraf/plugins/inputs/procstat" + httpOut "github.com/influxdata/telegraf/plugins/outputs/http" "github.com/influxdata/telegraf/plugins/parsers" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestConfig_LoadSingleInputWithEnvVars(t *testing.T) { @@ -176,3 +180,74 @@ func TestConfig_LoadDirectory(t *testing.T) { assert.Equal(t, pConfig, c.Inputs[3].Config, "Merged Testdata did not produce correct procstat metadata.") } + +func TestConfig_LoadSpecialTypes(t *testing.T) { + c := NewConfig() + err := c.LoadConfig("./testdata/special_types.toml") + assert.NoError(t, err) + require.Equal(t, 1, len(c.Inputs)) + + inputHTTPListener, ok := c.Inputs[0].Input.(*http_listener_v2.HTTPListenerV2) + assert.Equal(t, true, ok) + // Tests telegraf duration parsing. + assert.Equal(t, internal.Duration{Duration: time.Second}, inputHTTPListener.WriteTimeout) + // Tests telegraf size parsing. + assert.Equal(t, internal.Size{Size: 1024 * 1024}, inputHTTPListener.MaxBodySize) + // Tests toml multiline basic strings. + assert.Equal(t, "/path/to/my/cert\n", inputHTTPListener.TLSCert) +} + +func TestConfig_FieldNotDefined(t *testing.T) { + c := NewConfig() + err := c.LoadConfig("./testdata/invalid_field.toml") + require.Error(t, err, "invalid field name") + assert.Equal(t, "Error parsing ./testdata/invalid_field.toml, line 2: field corresponding to `not_a_field' is not defined in http_listener_v2.HTTPListenerV2", err.Error()) + +} + +func TestConfig_WrongFieldType(t *testing.T) { + c := NewConfig() + err := c.LoadConfig("./testdata/wrong_field_type.toml") + require.Error(t, err, "invalid field type") + assert.Equal(t, "Error parsing ./testdata/wrong_field_type.toml, line 2: (http_listener_v2.HTTPListenerV2.Port) cannot unmarshal TOML string into int", err.Error()) + + c = NewConfig() + err = c.LoadConfig("./testdata/wrong_field_type2.toml") + require.Error(t, err, "invalid field type2") + assert.Equal(t, "Error parsing ./testdata/wrong_field_type2.toml, line 2: (http_listener_v2.HTTPListenerV2.Methods) cannot unmarshal TOML string into []string", err.Error()) +} + +func TestConfig_InlineTables(t *testing.T) { + // #4098 + c := NewConfig() + err := c.LoadConfig("./testdata/inline_table.toml") + assert.NoError(t, err) + require.Equal(t, 2, len(c.Outputs)) + + outputHTTP, ok := c.Outputs[1].Output.(*httpOut.HTTP) + assert.Equal(t, true, ok) + assert.Equal(t, map[string]string{"Authorization": "Token $TOKEN", "Content-Type": "application/json"}, outputHTTP.Headers) + assert.Equal(t, []string{"org_id"}, c.Outputs[0].Config.Filter.TagInclude) +} + +func TestConfig_SliceComment(t *testing.T) { + t.Skipf("Skipping until #3642 is resolved") + + c := NewConfig() + err := c.LoadConfig("./testdata/slice_comment.toml") + assert.NoError(t, err) + require.Equal(t, 1, len(c.Outputs)) + + outputHTTP, ok := c.Outputs[0].Output.(*httpOut.HTTP) + assert.Equal(t, []string{"test"}, outputHTTP.Scopes) + assert.Equal(t, true, ok) +} + +func TestConfig_BadOrdering(t *testing.T) { + // #3444: when not using inline tables, care has to be taken so subsequent configuration + // doesn't become part of the table. This is not a bug, but TOML syntax. + c := NewConfig() + err := c.LoadConfig("./testdata/non_slice_slice.toml") + require.Error(t, err, "bad ordering") + assert.Equal(t, "Error parsing ./testdata/non_slice_slice.toml, line 4: cannot unmarshal TOML array into string (need slice)", err.Error()) +} diff --git a/internal/config/testdata/inline_table.toml b/internal/config/testdata/inline_table.toml new file mode 100644 index 000000000..525fdce17 --- /dev/null +++ b/internal/config/testdata/inline_table.toml @@ -0,0 +1,7 @@ +[[outputs.http]] + headers = { Authorization = "Token $TOKEN",Content-Type = "application/json" } + taginclude = ["org_id"] + +[[outputs.http]] + headers = { Authorization = "Token $TOKEN",Content-Type = "application/json" } + taginclude = ["org_id"] diff --git a/internal/config/testdata/invalid_field.toml b/internal/config/testdata/invalid_field.toml new file mode 100644 index 000000000..4c718d7bb --- /dev/null +++ b/internal/config/testdata/invalid_field.toml @@ -0,0 +1,2 @@ +[[inputs.http_listener_v2]] + not_a_field = true diff --git a/internal/config/testdata/non_slice_slice.toml b/internal/config/testdata/non_slice_slice.toml new file mode 100644 index 000000000..f92edcc0b --- /dev/null +++ b/internal/config/testdata/non_slice_slice.toml @@ -0,0 +1,4 @@ +[[outputs.http]] + [outputs.http.headers] + Content-Type = "application/json" + taginclude = ["org_id"] diff --git a/internal/config/testdata/slice_comment.toml b/internal/config/testdata/slice_comment.toml new file mode 100644 index 000000000..1177e5f89 --- /dev/null +++ b/internal/config/testdata/slice_comment.toml @@ -0,0 +1,5 @@ +[[outputs.http]] + scopes = [ + # comment + "test" # comment + ] diff --git a/internal/config/testdata/special_types.toml b/internal/config/testdata/special_types.toml new file mode 100644 index 000000000..24b73ae45 --- /dev/null +++ b/internal/config/testdata/special_types.toml @@ -0,0 +1,9 @@ +[[inputs.http_listener_v2]] + write_timeout = "1s" + max_body_size = "1MiB" + tls_cert = """ +/path/to/my/cert +""" + tls_key = ''' +/path/to/my/key +''' diff --git a/internal/config/testdata/wrong_field_type.toml b/internal/config/testdata/wrong_field_type.toml new file mode 100644 index 000000000..237176e7e --- /dev/null +++ b/internal/config/testdata/wrong_field_type.toml @@ -0,0 +1,2 @@ +[[inputs.http_listener_v2]] + port = "80" diff --git a/internal/config/testdata/wrong_field_type2.toml b/internal/config/testdata/wrong_field_type2.toml new file mode 100644 index 000000000..6f3def792 --- /dev/null +++ b/internal/config/testdata/wrong_field_type2.toml @@ -0,0 +1,2 @@ +[[inputs.http_listener_v2]] + methods = "POST" From c11327ee3410ba05ff332d6e39fdcea062a76463 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 25 Apr 2019 20:28:04 -0700 Subject: [PATCH 0789/1815] Update changelog --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7ae83c40f..3c56c4240 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -38,6 +38,8 @@ - [#5631](https://github.com/influxdata/telegraf/pull/5631): Create Windows service only when specified or in service manager. - [#5730](https://github.com/influxdata/telegraf/pull/5730): Don't start telegraf when stale pidfile found. - [#5477](https://github.com/influxdata/telegraf/pull/5477): Support Minecraft server 1.13 and newer in minecraft input. +- [#4098](https://github.com/influxdata/telegraf/issues/4098): Fix inline table support in configuration file. +- [#1598](https://github.com/influxdata/telegraf/issues/1598): Fix multi-line basic strings support in configuration file. ## v1.10.4 [unreleased] From 643600555306bc1181c3e790569b35cc9badca37 Mon Sep 17 00:00:00 2001 From: Greg <2653109+glinton@users.noreply.github.com> Date: Thu, 25 Apr 2019 21:34:40 -0600 Subject: [PATCH 0790/1815] Add cli support for outputting sections of the config (#5585) --- cmd/telegraf/telegraf.go | 9 ++- internal/config/config.go | 144 +++++++++++++++++++++++++------------- 2 files changed, 104 insertions(+), 49 deletions(-) diff --git a/cmd/telegraf/telegraf.go b/cmd/telegraf/telegraf.go index 5dd29cef7..6d4121ad7 100644 --- a/cmd/telegraf/telegraf.go +++ b/cmd/telegraf/telegraf.go @@ -41,6 +41,8 @@ var fVersion = flag.Bool("version", false, "display the version and exit") var fSampleConfig = flag.Bool("sample-config", false, "print out full sample configuration") var fPidfile = flag.String("pidfile", "", "file to write our pid to") +var fSectionFilters = flag.String("section-filter", "", + "filter the sections to print, separator is ':'. Valid values are 'agent', 'global_tags', 'outputs', 'processors', 'aggregators' and 'inputs'") var fInputFilters = flag.String("input-filter", "", "filter the inputs to enable, separator is :") var fInputList = flag.Bool("input-list", false, @@ -249,7 +251,10 @@ func main() { flag.Parse() args := flag.Args() - inputFilters, outputFilters := []string{}, []string{} + sectionFilters, inputFilters, outputFilters := []string{}, []string{}, []string{} + if *fSectionFilters != "" { + sectionFilters = strings.Split(":"+strings.TrimSpace(*fSectionFilters)+":", ":") + } if *fInputFilters != "" { inputFilters = strings.Split(":"+strings.TrimSpace(*fInputFilters)+":", ":") } @@ -289,6 +294,7 @@ func main() { return case "config": config.PrintSampleConfig( + sectionFilters, inputFilters, outputFilters, aggregatorFilters, @@ -317,6 +323,7 @@ func main() { return case *fSampleConfig: config.PrintSampleConfig( + sectionFilters, inputFilters, outputFilters, aggregatorFilters, diff --git a/internal/config/config.go b/internal/config/config.go index 4f747113f..263481ebd 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -32,6 +32,10 @@ import ( ) var ( + // Default sections + sectionDefaults = []string{"agent", "global_tags", "outputs", + "processors", "aggregators", "inputs"} + // Default input plugins inputDefaults = []string{"cpu", "mem", "swap", "system", "kernel", "processes", "disk", "diskio"} @@ -212,7 +216,8 @@ var header = `# Telegraf Configuration # them with ${}. For strings the variable must be within quotes (ie, "${STR_VAR}"), # for numbers and booleans they should be plain (ie, ${INT_VAR}, ${BOOL_VAR}) - +` +var globalTagsConfig = ` # Global tags can be specified here in key="value" format. [global_tags] # dc = "us-east-1" # will tag all metrics with dc=us-east-1 @@ -220,7 +225,8 @@ var header = `# Telegraf Configuration ## Environment variables can be used as tags, and throughout the config file # user = "$USER" - +` +var agentConfig = ` # Configuration for telegraf agent [agent] ## Default data collection interval for all inputs @@ -273,106 +279,137 @@ var header = `# Telegraf Configuration ## If set to true, do no set the "host" tag in the telegraf agent. omit_hostname = false +` +var outputHeader = ` ############################################################################### # OUTPUT PLUGINS # ############################################################################### + ` var processorHeader = ` - ############################################################################### # PROCESSOR PLUGINS # ############################################################################### + ` var aggregatorHeader = ` - ############################################################################### # AGGREGATOR PLUGINS # ############################################################################### + ` var inputHeader = ` - ############################################################################### # INPUT PLUGINS # ############################################################################### + ` var serviceInputHeader = ` - ############################################################################### # SERVICE INPUT PLUGINS # ############################################################################### + ` // PrintSampleConfig prints the sample config func PrintSampleConfig( + sectionFilters []string, inputFilters []string, outputFilters []string, aggregatorFilters []string, processorFilters []string, ) { + // print headers fmt.Printf(header) + if len(sectionFilters) == 0 { + sectionFilters = sectionDefaults + } + printFilteredGlobalSections(sectionFilters) + // print output plugins - if len(outputFilters) != 0 { - printFilteredOutputs(outputFilters, false) - } else { - printFilteredOutputs(outputDefaults, false) - // Print non-default outputs, commented - var pnames []string - for pname := range outputs.Outputs { - if !sliceContains(pname, outputDefaults) { - pnames = append(pnames, pname) + if sliceContains("outputs", sectionFilters) { + if len(outputFilters) != 0 { + if len(outputFilters) >= 3 && outputFilters[1] != "none" { + fmt.Printf(outputHeader) } + printFilteredOutputs(outputFilters, false) + } else { + fmt.Printf(outputHeader) + printFilteredOutputs(outputDefaults, false) + // Print non-default outputs, commented + var pnames []string + for pname := range outputs.Outputs { + if !sliceContains(pname, outputDefaults) { + pnames = append(pnames, pname) + } + } + sort.Strings(pnames) + printFilteredOutputs(pnames, true) } - sort.Strings(pnames) - printFilteredOutputs(pnames, true) } // print processor plugins - fmt.Printf(processorHeader) - if len(processorFilters) != 0 { - printFilteredProcessors(processorFilters, false) - } else { - pnames := []string{} - for pname := range processors.Processors { - pnames = append(pnames, pname) + if sliceContains("processors", sectionFilters) { + if len(processorFilters) != 0 { + if len(processorFilters) >= 3 && processorFilters[1] != "none" { + fmt.Printf(processorHeader) + } + printFilteredProcessors(processorFilters, false) + } else { + fmt.Printf(processorHeader) + pnames := []string{} + for pname := range processors.Processors { + pnames = append(pnames, pname) + } + sort.Strings(pnames) + printFilteredProcessors(pnames, true) } - sort.Strings(pnames) - printFilteredProcessors(pnames, true) } - // pring aggregator plugins - fmt.Printf(aggregatorHeader) - if len(aggregatorFilters) != 0 { - printFilteredAggregators(aggregatorFilters, false) - } else { - pnames := []string{} - for pname := range aggregators.Aggregators { - pnames = append(pnames, pname) + // print aggregator plugins + if sliceContains("aggregators", sectionFilters) { + if len(aggregatorFilters) != 0 { + if len(aggregatorFilters) >= 3 && aggregatorFilters[1] != "none" { + fmt.Printf(aggregatorHeader) + } + printFilteredAggregators(aggregatorFilters, false) + } else { + fmt.Printf(aggregatorHeader) + pnames := []string{} + for pname := range aggregators.Aggregators { + pnames = append(pnames, pname) + } + sort.Strings(pnames) + printFilteredAggregators(pnames, true) } - sort.Strings(pnames) - printFilteredAggregators(pnames, true) } // print input plugins - fmt.Printf(inputHeader) - if len(inputFilters) != 0 { - printFilteredInputs(inputFilters, false) - } else { - printFilteredInputs(inputDefaults, false) - // Print non-default inputs, commented - var pnames []string - for pname := range inputs.Inputs { - if !sliceContains(pname, inputDefaults) { - pnames = append(pnames, pname) + if sliceContains("inputs", sectionFilters) { + if len(inputFilters) != 0 { + if len(inputFilters) >= 3 && inputFilters[1] != "none" { + fmt.Printf(inputHeader) } + printFilteredInputs(inputFilters, false) + } else { + fmt.Printf(inputHeader) + printFilteredInputs(inputDefaults, false) + // Print non-default inputs, commented + var pnames []string + for pname := range inputs.Inputs { + if !sliceContains(pname, inputDefaults) { + pnames = append(pnames, pname) + } + } + sort.Strings(pnames) + printFilteredInputs(pnames, true) } - sort.Strings(pnames) - printFilteredInputs(pnames, true) } } @@ -447,6 +484,7 @@ func printFilteredInputs(inputFilters []string, commented bool) { return } sort.Strings(servInputNames) + fmt.Printf(serviceInputHeader) for _, name := range servInputNames { printConfig(name, servInputs[name], "inputs", commented) @@ -471,6 +509,16 @@ func printFilteredOutputs(outputFilters []string, commented bool) { } } +func printFilteredGlobalSections(sectionFilters []string) { + if sliceContains("agent", sectionFilters) { + fmt.Printf(agentConfig) + } + + if sliceContains("global_tags", sectionFilters) { + fmt.Printf(globalTagsConfig) + } +} + type printer interface { Description() string SampleConfig() string From d2fbf2414a88919212dea88dc17de037c7f308c8 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 25 Apr 2019 20:39:29 -0700 Subject: [PATCH 0791/1815] Add --section-filter to usage --- internal/usage.go | 3 +++ internal/usage_windows.go | 3 +++ 2 files changed, 6 insertions(+) diff --git a/internal/usage.go b/internal/usage.go index a49021b43..c783da3f4 100644 --- a/internal/usage.go +++ b/internal/usage.go @@ -25,6 +25,9 @@ The commands & flags are: --pprof-addr
pprof address to listen on, don't activate pprof if empty --processor-filter filter the processors to enable, separator is : --quiet run in quiet mode + --section-filter filter config sections to output, separator is : + Valid values are 'agent', 'global_tags', 'outputs', + 'processors', 'aggregators' and 'inputs' --sample-config print out full sample configuration --test gather metrics, print them out, and exit; processors, aggregators, and outputs are not run diff --git a/internal/usage_windows.go b/internal/usage_windows.go index 0bdd73026..70842f5ab 100644 --- a/internal/usage_windows.go +++ b/internal/usage_windows.go @@ -26,6 +26,9 @@ The commands & flags are: --processor-filter filter the processors to enable, separator is : --quiet run in quiet mode --sample-config print out full sample configuration + --section-filter filter config sections to output, separator is : + Valid values are 'agent', 'global_tags', 'outputs', + 'processors', 'aggregators' and 'inputs' --test gather metrics, print them out, and exit; processors, aggregators, and outputs are not run --usage print usage for a plugin, ie, 'telegraf --usage mysql' From 52a00b4300aa63e50a0fd44af4bbb24d623a8b5f Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 25 Apr 2019 20:40:19 -0700 Subject: [PATCH 0792/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3c56c4240..646dac60e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -32,6 +32,7 @@ - [#5757](https://github.com/influxdata/telegraf/pull/5757): Speed up interface stat collection in net input. - [#5769](https://github.com/influxdata/telegraf/pull/5769): Add pagefault data to procstat input plugin. - [#5760](https://github.com/influxdata/telegraf/pull/5760): Add option to set permissions for unix domain sockets to socket_listener. +- [#5585](https://github.com/influxdata/telegraf/pull/5585): Add cli support for outputting sections of the config. #### Bugfixes From 2fb62d4aecc6583d863b66f24488f6166afb6837 Mon Sep 17 00:00:00 2001 From: marcv81 Date: Sat, 27 Apr 2019 01:50:06 +0800 Subject: [PATCH 0793/1815] No longer requires dep to be in $GOPATH/bin (#5763) --- scripts/build.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/scripts/build.py b/scripts/build.py index 85e1724a5..86813f8bd 100755 --- a/scripts/build.py +++ b/scripts/build.py @@ -160,8 +160,7 @@ def go_get(branch, update=False, no_uncommitted=False): logging.error("There are uncommitted changes in the current directory.") return False logging.info("Retrieving dependencies with `dep`...") - run("{}/bin/dep ensure -v -vendor-only".format(os.environ.get("GOPATH", - os.path.expanduser("~/go")))) + run("dep ensure -v -vendor-only") return True def run_tests(race, parallel, timeout, no_vet): From cb4387df3f9328e4ff38b8299fe5fbb169576235 Mon Sep 17 00:00:00 2001 From: Ferdi Date: Tue, 30 Apr 2019 00:32:10 +0200 Subject: [PATCH 0794/1815] Verify a process passed by pid_file exists (#5768) --- plugins/inputs/procstat/procstat.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/plugins/inputs/procstat/procstat.go b/plugins/inputs/procstat/procstat.go index 2eab899c9..c36970421 100644 --- a/plugins/inputs/procstat/procstat.go +++ b/plugins/inputs/procstat/procstat.go @@ -295,6 +295,10 @@ func (p *Procstat) updateProcesses(pids []PID, tags map[string]string, prevInfo for _, pid := range pids { info, ok := prevInfo[pid] if ok { + // Assumption: if a process has no name, it probably does not exist + if name, _ := info.Name(); name == "" { + continue + } procs[pid] = info } else { proc, err := p.createProcess(pid) @@ -302,6 +306,10 @@ func (p *Procstat) updateProcesses(pids []PID, tags map[string]string, prevInfo // No problem; process may have ended after we found it continue } + // Assumption: if a process has no name, it probably does not exist + if name, _ := proc.Name(); name == "" { + continue + } procs[pid] = proc // Add initial tags From 76660e22a9ec0501fb3480a0f8a306a47f5d72c3 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 29 Apr 2019 15:34:12 -0700 Subject: [PATCH 0795/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 646dac60e..11710435c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -41,6 +41,7 @@ - [#5477](https://github.com/influxdata/telegraf/pull/5477): Support Minecraft server 1.13 and newer in minecraft input. - [#4098](https://github.com/influxdata/telegraf/issues/4098): Fix inline table support in configuration file. - [#1598](https://github.com/influxdata/telegraf/issues/1598): Fix multi-line basic strings support in configuration file. +- [#5746](https://github.com/influxdata/telegraf/issues/5746): Verify a process passed by pid_file exists in procstat input. ## v1.10.4 [unreleased] From 2c3c3773342ee268d6e6802f126b5a009e911195 Mon Sep 17 00:00:00 2001 From: Lorenzo Affetti Date: Tue, 30 Apr 2019 20:04:45 +0200 Subject: [PATCH 0796/1815] Create telegraf user in pre-install rpm scriptlet (#5783) --- scripts/post-install.sh | 8 -------- scripts/pre-install.sh | 8 ++++++++ 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/scripts/post-install.sh b/scripts/post-install.sh index 9972364bc..50bef1081 100644 --- a/scripts/post-install.sh +++ b/scripts/post-install.sh @@ -24,14 +24,6 @@ function install_chkconfig { chkconfig --add telegraf } -if ! grep "^telegraf:" /etc/group &>/dev/null; then - groupadd -r telegraf -fi - -if ! id telegraf &>/dev/null; then - useradd -r -M telegraf -s /bin/false -d /etc/telegraf -g telegraf -fi - # Remove legacy symlink, if it exists if [[ -L /etc/init.d/telegraf ]]; then rm -f /etc/init.d/telegraf diff --git a/scripts/pre-install.sh b/scripts/pre-install.sh index b371f462d..3fad54f61 100644 --- a/scripts/pre-install.sh +++ b/scripts/pre-install.sh @@ -1,5 +1,13 @@ #!/bin/bash +if ! grep "^telegraf:" /etc/group &>/dev/null; then + groupadd -r telegraf +fi + +if ! id telegraf &>/dev/null; then + useradd -r -M telegraf -s /bin/false -d /etc/telegraf -g telegraf +fi + if [[ -d /etc/opt/telegraf ]]; then # Legacy configuration found if [[ ! -d /etc/telegraf ]]; then From e8248fccf9350f2de8bdd024d2c999df07590482 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 30 Apr 2019 11:08:54 -0700 Subject: [PATCH 0797/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 11710435c..033f250b8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -48,6 +48,7 @@ #### Bugfixes - [#5764](https://github.com/influxdata/telegraf/pull/5764): Fix race condition in the Wavefront parser. +- [#5783](https://github.com/influxdata/telegraf/pull/5783): Create telegraf user in pre-install rpm scriptlet. ## v1.10.3 [2019-04-16] From 0db31c9da7d30afae70945d8ac3ea046d3b54120 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 30 Apr 2019 16:11:18 -0700 Subject: [PATCH 0798/1815] Use non-release revision of pgx for pgbouncer fix (#5772) --- Gopkg.lock | 5 ++--- Gopkg.toml | 2 +- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/Gopkg.lock b/Gopkg.lock index 79bb78c10..b76092f87 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -673,7 +673,7 @@ revision = "7c63b0a71ef8300adc255344d275e10e5c3a71ec" [[projects]] - digest = "1:5544f7badae00bc5b9ec6829857bc08f88fce4d3ef73fb616ee57d49abbf7f48" + digest = "1:518822813d0d9e252f7abdbb6dd8939f171e2af6e001563fbee710e71e922ff2" name = "github.com/jackc/pgx" packages = [ ".", @@ -685,8 +685,7 @@ "stdlib", ] pruneopts = "" - revision = "89f1e6ac7276b61d885db5e5aed6fcbedd1c7e31" - version = "v3.2.0" + revision = "051e69d512355b5d5dd6f8b92970105ee36e0579" [[projects]] digest = "1:6f49eae0c1e5dab1dafafee34b207aeb7a42303105960944828c2079b92fc88e" diff --git a/Gopkg.toml b/Gopkg.toml index 568f9fda7..72a6e1cd2 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -88,7 +88,7 @@ [[constraint]] name = "github.com/jackc/pgx" - version = "3.2.0" + revision = "051e69d512355b5d5dd6f8b92970105ee36e0579" [[constraint]] name = "github.com/kardianos/service" From 801c2859585b41de26a8b58d2d824503c9c10738 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 30 Apr 2019 16:13:09 -0700 Subject: [PATCH 0799/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 033f250b8..ee69c867d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -42,6 +42,7 @@ - [#4098](https://github.com/influxdata/telegraf/issues/4098): Fix inline table support in configuration file. - [#1598](https://github.com/influxdata/telegraf/issues/1598): Fix multi-line basic strings support in configuration file. - [#5746](https://github.com/influxdata/telegraf/issues/5746): Verify a process passed by pid_file exists in procstat input. +- [#5455](https://github.com/influxdata/telegraf/issues/5455): Fix unsupported pkt type error in pgbouncer. ## v1.10.4 [unreleased] From 0aa25e2b6c735ccc228e56429066a8c1b6a606f6 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 30 Apr 2019 18:14:31 -0700 Subject: [PATCH 0800/1815] Use struct tags in plugin tutorials --- docs/INPUTS.md | 2 +- docs/OUTPUTS.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/INPUTS.md b/docs/INPUTS.md index 32eb9b9f5..2f4cce3b6 100644 --- a/docs/INPUTS.md +++ b/docs/INPUTS.md @@ -38,7 +38,7 @@ import ( ) type Simple struct { - Ok bool + Ok bool `toml:"ok"` } func (s *Simple) Description() string { diff --git a/docs/OUTPUTS.md b/docs/OUTPUTS.md index 306b9ea6f..8bba4687e 100644 --- a/docs/OUTPUTS.md +++ b/docs/OUTPUTS.md @@ -30,7 +30,7 @@ import ( ) type Simple struct { - Ok bool + Ok bool `toml:"ok"` } func (s *Simple) Description() string { From 7978ebef40da7b809a7b9e3a5f3ca7d4f643d1f0 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 30 Apr 2019 18:34:19 -0700 Subject: [PATCH 0801/1815] Add --service-display-name option for use with Windows service (#5770) --- cmd/telegraf/telegraf.go | 3 ++- internal/usage_windows.go | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/cmd/telegraf/telegraf.go b/cmd/telegraf/telegraf.go index 6d4121ad7..55ff58473 100644 --- a/cmd/telegraf/telegraf.go +++ b/cmd/telegraf/telegraf.go @@ -60,6 +60,7 @@ var fUsage = flag.String("usage", "", var fService = flag.String("service", "", "operate on the service (windows only)") var fServiceName = flag.String("service-name", "telegraf", "service name (windows only)") +var fServiceDisplayName = flag.String("service-display-name", "Telegraf Data Collector Service", "service display name (windows only)") var fRunAsConsole = flag.Bool("console", false, "run as console application (windows only)") var ( @@ -352,7 +353,7 @@ func main() { if runtime.GOOS == "windows" && windowsRunAsService() { svcConfig := &service.Config{ Name: *fServiceName, - DisplayName: "Telegraf Data Collector Service", + DisplayName: *fServiceDisplayName, Description: "Collects data using a series of plugins and publishes it to" + "another series of plugins.", Arguments: []string{"--config", "C:\\Program Files\\Telegraf\\telegraf.conf"}, diff --git a/internal/usage_windows.go b/internal/usage_windows.go index 70842f5ab..6e3c17835 100644 --- a/internal/usage_windows.go +++ b/internal/usage_windows.go @@ -37,6 +37,7 @@ The commands & flags are: --console run as console application (windows only) --service operate on the service (windows only) --service-name service name (windows only) + --service-display-name service display name (windows only) Examples: @@ -65,5 +66,5 @@ Examples: telegraf --service install --config "C:\Program Files\Telegraf\telegraf.conf" # install telegraf service with custom name - telegraf --service install --service-name=my-telegraf + telegraf --service install --service-name=my-telegraf --service-display-name="My Telegraf" ` From 597f3a679ce2dd31d9be1467c6a93de65deb11af Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 30 Apr 2019 18:35:17 -0700 Subject: [PATCH 0802/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index ee69c867d..50ff39e86 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -33,6 +33,7 @@ - [#5769](https://github.com/influxdata/telegraf/pull/5769): Add pagefault data to procstat input plugin. - [#5760](https://github.com/influxdata/telegraf/pull/5760): Add option to set permissions for unix domain sockets to socket_listener. - [#5585](https://github.com/influxdata/telegraf/pull/5585): Add cli support for outputting sections of the config. +- [#5770](https://github.com/influxdata/telegraf/pull/5770): Add service-display-name option for use with Windows service. #### Bugfixes From 2e6701b44ed0fb13b5dd0dab4f14e279c8ef1122 Mon Sep 17 00:00:00 2001 From: Greg <2653109+glinton@users.noreply.github.com> Date: Wed, 1 May 2019 14:09:32 -0600 Subject: [PATCH 0803/1815] Update cloudwatch input readme with getMetricData command (#5786) --- plugins/inputs/cloudwatch/README.md | 24 +++++++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) diff --git a/plugins/inputs/cloudwatch/README.md b/plugins/inputs/cloudwatch/README.md index fab3cc295..bd9feaade 100644 --- a/plugins/inputs/cloudwatch/README.md +++ b/plugins/inputs/cloudwatch/README.md @@ -177,7 +177,29 @@ aws cloudwatch list-metrics --namespace AWS/EC2 --region us-east-1 --metric-name If the expected metrics are not returned, you can try getting them manually for a short period of time: ``` -aws cloudwatch get-metric-statistics --namespace AWS/EC2 --region us-east-1 --period 300 --start-time 2018-07-01T00:00:00Z --end-time 2018-07-01T00:15:00Z --statistics Average --metric-name CPUCreditBalance --dimensions Name=InstanceId,Value=i-deadbeef +aws cloudwatch get-metric-data \ + --start-time 2018-07-01T00:00:00Z \ + --end-time 2018-07-01T00:15:00Z \ + --metric-data-queries '[ + { + "Id": "avgCPUCreditBalance", + "MetricStat": { + "Metric": { + "Namespace": "AWS/EC2", + "MetricName": "CPUCreditBalance", + "Dimensions": [ + { + "Name": "InstanceId", + "Value": "i-deadbeef" + } + ] + }, + "Period": 300, + "Stat": "Average" + }, + "Label": "avgCPUCreditBalance" + } +]' ``` ### Example Output: From 3592433b0620c5826dc9200db65045c0d97c8a30 Mon Sep 17 00:00:00 2001 From: Greg <2653109+glinton@users.noreply.github.com> Date: Wed, 1 May 2019 17:46:52 -0600 Subject: [PATCH 0804/1815] Buffer metrics from failed writes in influxdb2 output if token is invalid (#5792) --- plugins/outputs/influxdb_v2/http.go | 5 +++-- plugins/outputs/influxdb_v2/influxdb.go | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/plugins/outputs/influxdb_v2/http.go b/plugins/outputs/influxdb_v2/http.go index cdc40c148..7bc9a4770 100644 --- a/plugins/outputs/influxdb_v2/http.go +++ b/plugins/outputs/influxdb_v2/http.go @@ -228,10 +228,11 @@ func (c *httpClient) writeBatch(ctx context.Context, bucket string, metrics []te } switch resp.StatusCode { - case http.StatusBadRequest, http.StatusUnauthorized, - http.StatusForbidden, http.StatusRequestEntityTooLarge: + case http.StatusBadRequest, http.StatusRequestEntityTooLarge: log.Printf("E! [outputs.influxdb_v2] Failed to write metric: %s\n", desc) return nil + case http.StatusUnauthorized, http.StatusForbidden: + return fmt.Errorf("failed to write metric: %s", desc) case http.StatusTooManyRequests, http.StatusServiceUnavailable: retryAfter := resp.Header.Get("Retry-After") retry, err := strconv.Atoi(retryAfter) diff --git a/plugins/outputs/influxdb_v2/influxdb.go b/plugins/outputs/influxdb_v2/influxdb.go index dca02b0cb..ff621fe9a 100644 --- a/plugins/outputs/influxdb_v2/influxdb.go +++ b/plugins/outputs/influxdb_v2/influxdb.go @@ -165,7 +165,7 @@ func (i *InfluxDB) Write(metrics []telegraf.Metric) error { log.Printf("E! [outputs.influxdb_v2] when writing to [%s]: %v", client.URL(), err) } - return errors.New("could not write any address") + return err } func (i *InfluxDB) getHTTPClient(ctx context.Context, url *url.URL, proxy *url.URL) (Client, error) { From 6a73ad56ae733953e7c1108d9f9b5e6fd699c0b1 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 1 May 2019 16:49:20 -0700 Subject: [PATCH 0805/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 50ff39e86..ce3404311 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -51,6 +51,7 @@ - [#5764](https://github.com/influxdata/telegraf/pull/5764): Fix race condition in the Wavefront parser. - [#5783](https://github.com/influxdata/telegraf/pull/5783): Create telegraf user in pre-install rpm scriptlet. +- [#5792](https://github.com/influxdata/telegraf/pull/5792): Don't discard metrics on forbidden error in influxdb_v2 output. ## v1.10.3 [2019-04-16] From bae7f59bbf5647707730063215f933f026750d11 Mon Sep 17 00:00:00 2001 From: Alberto del Barrio Date: Fri, 3 May 2019 19:05:06 +0200 Subject: [PATCH 0806/1815] Fix syntax error in cloudwatch sample config (#5797) --- plugins/inputs/cloudwatch/README.md | 4 ++-- plugins/inputs/cloudwatch/cloudwatch.go | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/plugins/inputs/cloudwatch/README.md b/plugins/inputs/cloudwatch/README.md index bd9feaade..369eadbc1 100644 --- a/plugins/inputs/cloudwatch/README.md +++ b/plugins/inputs/cloudwatch/README.md @@ -72,7 +72,7 @@ API endpoint. In the following order the plugin will attempt to authenticate. ## Namespace-wide statistic filters. These allow fewer queries to be made to ## cloudwatch. - # statistic_include = [ "average", "sum", minimum", "maximum", sample_count" ] + # statistic_include = [ "average", "sum", "minimum", "maximum", sample_count" ] # statistic_exclude = [] ## Metrics to Pull @@ -83,7 +83,7 @@ API endpoint. In the following order the plugin will attempt to authenticate. # # ## Statistic filters for Metric. These allow for retrieving specific # ## statistics for an individual metric. - # # statistic_include = [ "average", "sum", minimum", "maximum", sample_count" ] + # # statistic_include = [ "average", "sum", "minimum", "maximum", sample_count" ] # # statistic_exclude = [] # # ## Dimension filters for Metric. All dimensions defined for the metric names diff --git a/plugins/inputs/cloudwatch/cloudwatch.go b/plugins/inputs/cloudwatch/cloudwatch.go index 4b6469e2d..7aad67f5b 100644 --- a/plugins/inputs/cloudwatch/cloudwatch.go +++ b/plugins/inputs/cloudwatch/cloudwatch.go @@ -135,7 +135,7 @@ func (c *CloudWatch) SampleConfig() string { ## Namespace-wide statistic filters. These allow fewer queries to be made to ## cloudwatch. - # statistic_include = [ "average", "sum", minimum", "maximum", sample_count" ] + # statistic_include = [ "average", "sum", "minimum", "maximum", sample_count" ] # statistic_exclude = [] ## Metrics to Pull @@ -146,7 +146,7 @@ func (c *CloudWatch) SampleConfig() string { # # ## Statistic filters for Metric. These allow for retrieving specific # ## statistics for an individual metric. - # # statistic_include = [ "average", "sum", minimum", "maximum", sample_count" ] + # # statistic_include = [ "average", "sum", "minimum", "maximum", sample_count" ] # # statistic_exclude = [] # # ## Dimension filters for Metric. All dimensions defined for the metric names From bcf7516a23fc45987fe5460c6563fde674faad75 Mon Sep 17 00:00:00 2001 From: javicrespo Date: Fri, 3 May 2019 19:25:28 +0200 Subject: [PATCH 0807/1815] Add in process log rotation (#5778) --- cmd/telegraf/telegraf.go | 17 ++- docs/CONFIGURATION.md | 8 ++ internal/config/config.go | 20 +++ internal/rotate/file_writer.go | 183 ++++++++++++++++++++++++++++ internal/rotate/file_writer_test.go | 115 +++++++++++++++++ logger/logger.go | 66 +++++++--- logger/logger_test.go | 51 ++++++-- 7 files changed, 428 insertions(+), 32 deletions(-) create mode 100644 internal/rotate/file_writer.go create mode 100644 internal/rotate/file_writer_test.go diff --git a/cmd/telegraf/telegraf.go b/cmd/telegraf/telegraf.go index 55ff58473..dcc8f29fa 100644 --- a/cmd/telegraf/telegraf.go +++ b/cmd/telegraf/telegraf.go @@ -115,7 +115,7 @@ func runAgent(ctx context.Context, ) error { // Setup default logging. This may need to change after reading the config // file, but we can configure it to use our logger implementation now. - logger.SetupLogging(false, false, "") + logger.SetupLogging(logger.LogConfig{}) log.Printf("I! Starting Telegraf %s", version) // If no other options are specified, load the config file and run. @@ -156,11 +156,16 @@ func runAgent(ctx context.Context, } // Setup logging as configured. - logger.SetupLogging( - ag.Config.Agent.Debug || *fDebug, - ag.Config.Agent.Quiet || *fQuiet, - ag.Config.Agent.Logfile, - ) + logConfig := logger.LogConfig{ + Debug: ag.Config.Agent.Debug || *fDebug, + Quiet: ag.Config.Agent.Quiet || *fQuiet, + Logfile: ag.Config.Agent.Logfile, + RotationInterval: ag.Config.Agent.LogfileRotationInterval, + RotationMaxSize: ag.Config.Agent.LogfileRotationMaxSize, + RotationMaxArchives: ag.Config.Agent.LogfileRotationMaxArchives, + } + + logger.SetupLogging(logConfig) if *fTest { return ag.Test(ctx) diff --git a/docs/CONFIGURATION.md b/docs/CONFIGURATION.md index edb334145..0baf2e033 100644 --- a/docs/CONFIGURATION.md +++ b/docs/CONFIGURATION.md @@ -141,6 +141,14 @@ The agent table configures Telegraf and the defaults used across all plugins. Run telegraf in quiet mode (error log messages only). - **logfile**: Specify the log file name. The empty string means to log to stderr. +- **logfile_rotation_interval**: + Log file rotation time [interval][], e.g. "1d" means logs will rotated every day. Default is 0 => no rotation based on time. +- **logfile_rotation_max_size**: + The log file max [size][]. Log files will be rotated when they exceed this size. Default is 0 => no rotation based on file size. +- **logfile_rotation_max_archives**: + Maximum number of archives (rotated) files to keep. Older log files are deleted first. + This setting is only applicable if `logfile_rotation_interval` and/or `logfile_rotation_max_size` settings have been specified (otherwise there is no rotation) + Default is 0 => all rotated files are deleted. Use -1 to keep all archives. - **hostname**: Override default hostname, if empty use os.Hostname() diff --git a/internal/config/config.go b/internal/config/config.go index 263481ebd..fd73657df 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -145,6 +145,15 @@ type AgentConfig struct { // Logfile specifies the file to send logs to Logfile string + // The log file rotation interval + LogfileRotationInterval internal.Duration + + // The log file max size. Logs will rotated when they exceed this size. + LogfileRotationMaxSize internal.Size + + // The max number of log archives to keep + LogfileRotationMaxArchives int + // Quiet is the option for running in quiet mode Quiet bool Hostname string @@ -273,6 +282,17 @@ var agentConfig = ` quiet = false ## Specify the log file name. The empty string means to log to stderr. logfile = "" + ## Rotation settings, only applicable when log file name is specified. + ## Log file rotation time interval, e.g. "1d" means logs will rotated every day. Default is 0 => no rotation based on time. + # logfile_rotation_interval = "1d" + ## The log file max size. Log files will be rotated when they exceed this size. Default is 0 => no rotation based on file size. + # logfile_rotation_max_size = "10 MB" + ## Maximum number of archives (rotated) files to keep. Older log files are deleted first. + ## This setting is only applicable if logfile_rotation_interval and/or logfile_rotation_max_size settings have been specified (otherwise there is no rotation) + ## Default is 0 => all rotated files are deleted. + ## Use -1 to keep all archives. + ## Analogous to logrotate "rotate" setting http://man7.org/linux/man-pages/man8/logrotate.8.html + # logfile_rotation_max_archives = 0 ## Override default hostname, if empty use os.Hostname() hostname = "" diff --git a/internal/rotate/file_writer.go b/internal/rotate/file_writer.go new file mode 100644 index 000000000..fe8c2fd71 --- /dev/null +++ b/internal/rotate/file_writer.go @@ -0,0 +1,183 @@ +package rotate + +// Rotating things +import ( + "fmt" + "io" + "os" + "path/filepath" + "sort" + "strconv" + "strings" + "sync" + "time" +) + +// FilePerm defines the permissions that Writer will use for all +// the files it creates. +const ( + FilePerm = os.FileMode(0644) + DateFormat = "2006-01-02" +) + +// FileWriter implements the io.Writer interface and writes to the +// filename specified. +// Will rotate at the specified interval and/or when the current file size exceeds maxSizeInBytes +// At rotation time, current file is renamed and a new file is created. +// If the number of archives exceeds maxArchives, older files are deleted. +type FileWriter struct { + filename string + filenameRotationTemplate string + current *os.File + interval time.Duration + maxSizeInBytes int64 + maxArchives int + expireTime time.Time + bytesWritten int64 + sync.Mutex +} + +// NewFileWriter creates a new file writer. +func NewFileWriter(filename string, interval time.Duration, maxSizeInBytes int64, maxArchives int) (io.WriteCloser, error) { + if interval == 0 && maxSizeInBytes <= 0 { + // No rotation needed so a basic io.Writer will do the trick + return openFile(filename) + } + + w := &FileWriter{ + filename: filename, + interval: interval, + maxSizeInBytes: maxSizeInBytes, + maxArchives: maxArchives, + filenameRotationTemplate: getFilenameRotationTemplate(filename), + } + + if err := w.openCurrent(); err != nil { + return nil, err + } + + return w, nil +} + +func openFile(filename string) (*os.File, error) { + return os.OpenFile(filename, os.O_RDWR|os.O_CREATE|os.O_APPEND, FilePerm) +} + +func getFilenameRotationTemplate(filename string) string { + // Extract the file extension + fileExt := filepath.Ext(filename) + // Remove the file extension from the filename (if any) + stem := strings.TrimSuffix(filename, fileExt) + return stem + ".%s-%s" + fileExt +} + +// Write writes p to the current file, then checks to see if +// rotation is necessary. +func (w *FileWriter) Write(p []byte) (n int, err error) { + w.Lock() + defer w.Unlock() + if n, err = w.current.Write(p); err != nil { + return 0, err + } + w.bytesWritten += int64(n) + + if err = w.rotateIfNeeded(); err != nil { + return 0, err + } + + return n, nil +} + +// Close closes the current file. Writer is unusable after this +// is called. +func (w *FileWriter) Close() (err error) { + w.Lock() + defer w.Unlock() + + // Rotate before closing + if err = w.rotate(); err != nil { + return err + } + + if err = w.current.Close(); err != nil { + return err + } + w.current = nil + return nil +} + +func (w *FileWriter) openCurrent() (err error) { + // In case ModTime() fails, we use time.Now() + w.expireTime = time.Now().Add(w.interval) + w.bytesWritten = 0 + w.current, err = openFile(w.filename) + + if err != nil { + return err + } + + // Goal here is to rotate old pre-existing files. + // For that we use fileInfo.ModTime, instead of time.Now(). + // Example: telegraf is restarted every 23 hours and + // the rotation interval is set to 24 hours. + // With time.now() as a reference we'd never rotate the file. + if fileInfo, err := w.current.Stat(); err == nil { + w.expireTime = fileInfo.ModTime().Add(w.interval) + } + return nil +} + +func (w *FileWriter) rotateIfNeeded() error { + if (w.interval > 0 && time.Now().After(w.expireTime)) || + (w.maxSizeInBytes > 0 && w.bytesWritten >= w.maxSizeInBytes) { + if err := w.rotate(); err != nil { + //Ignore rotation errors and keep the log open + fmt.Printf("unable to rotate the file '%s', %s", w.filename, err.Error()) + } + return w.openCurrent() + } + return nil +} + +func (w *FileWriter) rotate() (err error) { + if err = w.current.Close(); err != nil { + return err + } + + // Use year-month-date for readability, unix time to make the file name unique with second precision + now := time.Now() + rotatedFilename := fmt.Sprintf(w.filenameRotationTemplate, now.Format(DateFormat), strconv.FormatInt(now.Unix(), 10)) + if err = os.Rename(w.filename, rotatedFilename); err != nil { + return err + } + + if err = w.purgeArchivesIfNeeded(); err != nil { + return err + } + + return nil +} + +func (w *FileWriter) purgeArchivesIfNeeded() (err error) { + if w.maxArchives == -1 { + //Skip archiving + return nil + } + + var matches []string + if matches, err = filepath.Glob(fmt.Sprintf(w.filenameRotationTemplate, "*", "*")); err != nil { + return err + } + + //if there are more archives than the configured maximum, then purge older files + if len(matches) > w.maxArchives { + //sort files alphanumerically to delete older files first + sort.Strings(matches) + for _, filename := range matches[:len(matches)-w.maxArchives] { + if err = os.Remove(filename); err != nil { + return err + } + } + } + return nil +} diff --git a/internal/rotate/file_writer_test.go b/internal/rotate/file_writer_test.go new file mode 100644 index 000000000..88ba94b9d --- /dev/null +++ b/internal/rotate/file_writer_test.go @@ -0,0 +1,115 @@ +package rotate + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestFileWriter_NoRotation(t *testing.T) { + tempDir, err := ioutil.TempDir("", "RotationNo") + require.NoError(t, err) + writer, err := NewFileWriter(filepath.Join(tempDir, "test"), 0, 0, 0) + require.NoError(t, err) + defer func() { writer.Close(); os.RemoveAll(tempDir) }() + + _, err = writer.Write([]byte("Hello World")) + require.NoError(t, err) + _, err = writer.Write([]byte("Hello World 2")) + require.NoError(t, err) + files, _ := ioutil.ReadDir(tempDir) + assert.Equal(t, 1, len(files)) +} + +func TestFileWriter_TimeRotation(t *testing.T) { + tempDir, err := ioutil.TempDir("", "RotationTime") + require.NoError(t, err) + interval, _ := time.ParseDuration("1s") + writer, err := NewFileWriter(filepath.Join(tempDir, "test"), interval, 0, -1) + require.NoError(t, err) + defer func() { writer.Close(); os.RemoveAll(tempDir) }() + + _, err = writer.Write([]byte("Hello World")) + require.NoError(t, err) + time.Sleep(1 * time.Second) + _, err = writer.Write([]byte("Hello World 2")) + require.NoError(t, err) + files, _ := ioutil.ReadDir(tempDir) + assert.Equal(t, 2, len(files)) +} + +func TestFileWriter_SizeRotation(t *testing.T) { + tempDir, err := ioutil.TempDir("", "RotationSize") + require.NoError(t, err) + maxSize := int64(9) + writer, err := NewFileWriter(filepath.Join(tempDir, "test.log"), 0, maxSize, -1) + require.NoError(t, err) + defer func() { writer.Close(); os.RemoveAll(tempDir) }() + + _, err = writer.Write([]byte("Hello World")) + require.NoError(t, err) + _, err = writer.Write([]byte("World 2")) + require.NoError(t, err) + files, _ := ioutil.ReadDir(tempDir) + assert.Equal(t, 2, len(files)) +} + +func TestFileWriter_DeleteArchives(t *testing.T) { + tempDir, err := ioutil.TempDir("", "RotationDeleteArchives") + require.NoError(t, err) + maxSize := int64(5) + writer, err := NewFileWriter(filepath.Join(tempDir, "test.log"), 0, maxSize, 2) + require.NoError(t, err) + defer func() { writer.Close(); os.RemoveAll(tempDir) }() + + _, err = writer.Write([]byte("First file")) + require.NoError(t, err) + // File names include the date with second precision + // So, to force rotation with different file names + // we need to wait + time.Sleep(1 * time.Second) + _, err = writer.Write([]byte("Second file")) + require.NoError(t, err) + time.Sleep(1 * time.Second) + _, err = writer.Write([]byte("Third file")) + require.NoError(t, err) + + files, _ := ioutil.ReadDir(tempDir) + assert.Equal(t, 3, len(files)) + + for _, tempFile := range files { + var bytes []byte + var err error + path := filepath.Join(tempDir, tempFile.Name()) + if bytes, err = ioutil.ReadFile(path); err != nil { + t.Error(err.Error()) + return + } + contents := string(bytes) + + if contents != "" && contents != "Second file" && contents != "Third file" { + t.Error("Should have deleted the eldest log file") + return + } + } +} + +func TestFileWriter_CloseRotates(t *testing.T) { + tempDir, err := ioutil.TempDir("", "RotationClose") + require.NoError(t, err) + defer os.RemoveAll(tempDir) + maxSize := int64(9) + writer, err := NewFileWriter(filepath.Join(tempDir, "test.log"), 0, maxSize, -1) + require.NoError(t, err) + + writer.Close() + + files, _ := ioutil.ReadDir(tempDir) + assert.Equal(t, 1, len(files)) + assert.Regexp(t, "^test\\.[^\\.]+\\.log$", files[0].Name()) +} diff --git a/logger/logger.go b/logger/logger.go index 6250dedd6..a7b32b6e0 100644 --- a/logger/logger.go +++ b/logger/logger.go @@ -1,12 +1,15 @@ package logger import ( + "errors" "io" "log" "os" "regexp" "time" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/internal/rotate" "github.com/influxdata/wlog" ) @@ -15,12 +18,32 @@ var prefixRegex = regexp.MustCompile("^[DIWE]!") // newTelegrafWriter returns a logging-wrapped writer. func newTelegrafWriter(w io.Writer) io.Writer { return &telegrafLog{ - writer: wlog.NewWriter(w), + writer: wlog.NewWriter(w), + internalWriter: w, } } +// LogConfig contains the log configuration settings +type LogConfig struct { + // will set the log level to DEBUG + Debug bool + //will set the log level to ERROR + Quiet bool + // will direct the logging output to a file. Empty string is + // interpreted as stderr. If there is an error opening the file the + // logger will fallback to stderr + Logfile string + // will rotate when current file at the specified time interval + RotationInterval internal.Duration + // will rotate when current file size exceeds this parameter. + RotationMaxSize internal.Size + // maximum rotated files to keep (older ones will be deleted) + RotationMaxArchives int +} + type telegrafLog struct { - writer io.Writer + writer io.Writer + internalWriter io.Writer } func (t *telegrafLog) Write(b []byte) (n int, err error) { @@ -33,31 +56,40 @@ func (t *telegrafLog) Write(b []byte) (n int, err error) { return t.writer.Write(line) } +func (t *telegrafLog) Close() error { + closer, isCloser := t.internalWriter.(io.Closer) + if !isCloser { + return errors.New("the underlying writer cannot be closed") + } + return closer.Close() +} + // SetupLogging configures the logging output. -// debug will set the log level to DEBUG -// quiet will set the log level to ERROR -// logfile will direct the logging output to a file. Empty string is -// interpreted as stderr. If there is an error opening the file the -// logger will fallback to stderr. -func SetupLogging(debug, quiet bool, logfile string) { +func SetupLogging(config LogConfig) { + newLogWriter(config) +} + +func newLogWriter(config LogConfig) io.Writer { log.SetFlags(0) - if debug { + if config.Debug { wlog.SetLevel(wlog.DEBUG) } - if quiet { + if config.Quiet { wlog.SetLevel(wlog.ERROR) } - var oFile *os.File - if logfile != "" { + var writer io.Writer + if config.Logfile != "" { var err error - if oFile, err = os.OpenFile(logfile, os.O_CREATE|os.O_APPEND|os.O_WRONLY, os.ModeAppend|0644); err != nil { - log.Printf("E! Unable to open %s (%s), using stderr", logfile, err) - oFile = os.Stderr + if writer, err = rotate.NewFileWriter(config.Logfile, config.RotationInterval.Duration, config.RotationMaxSize.Size, config.RotationMaxArchives); err != nil { + log.Printf("E! Unable to open %s (%s), using stderr", config.Logfile, err) + writer = os.Stderr } } else { - oFile = os.Stderr + writer = os.Stderr } - log.SetOutput(newTelegrafWriter(oFile)) + telegrafLog := newTelegrafWriter(writer) + log.SetOutput(telegrafLog) + return telegrafLog } diff --git a/logger/logger_test.go b/logger/logger_test.go index a721cbba7..504e9a4bb 100644 --- a/logger/logger_test.go +++ b/logger/logger_test.go @@ -2,12 +2,16 @@ package logger import ( "bytes" + "io" "io/ioutil" "log" "os" + "path/filepath" "testing" + "github.com/influxdata/telegraf/internal" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestWriteLogToFile(t *testing.T) { @@ -15,7 +19,8 @@ func TestWriteLogToFile(t *testing.T) { assert.NoError(t, err) defer func() { os.Remove(tmpfile.Name()) }() - SetupLogging(false, false, tmpfile.Name()) + config := createBasicLogConfig(tmpfile.Name()) + SetupLogging(config) log.Printf("I! TEST") log.Printf("D! TEST") // <- should be ignored @@ -28,8 +33,9 @@ func TestDebugWriteLogToFile(t *testing.T) { tmpfile, err := ioutil.TempFile("", "") assert.NoError(t, err) defer func() { os.Remove(tmpfile.Name()) }() - - SetupLogging(true, false, tmpfile.Name()) + config := createBasicLogConfig(tmpfile.Name()) + config.Debug = true + SetupLogging(config) log.Printf("D! TEST") f, err := ioutil.ReadFile(tmpfile.Name()) @@ -41,8 +47,9 @@ func TestErrorWriteLogToFile(t *testing.T) { tmpfile, err := ioutil.TempFile("", "") assert.NoError(t, err) defer func() { os.Remove(tmpfile.Name()) }() - - SetupLogging(false, true, tmpfile.Name()) + config := createBasicLogConfig(tmpfile.Name()) + config.Quiet = true + SetupLogging(config) log.Printf("E! TEST") log.Printf("I! TEST") // <- should be ignored @@ -55,8 +62,9 @@ func TestAddDefaultLogLevel(t *testing.T) { tmpfile, err := ioutil.TempFile("", "") assert.NoError(t, err) defer func() { os.Remove(tmpfile.Name()) }() - - SetupLogging(true, false, tmpfile.Name()) + config := createBasicLogConfig(tmpfile.Name()) + config.Debug = true + SetupLogging(config) log.Printf("TEST") f, err := ioutil.ReadFile(tmpfile.Name()) @@ -68,8 +76,9 @@ func TestWriteToTruncatedFile(t *testing.T) { tmpfile, err := ioutil.TempFile("", "") assert.NoError(t, err) defer func() { os.Remove(tmpfile.Name()) }() - - SetupLogging(true, false, tmpfile.Name()) + config := createBasicLogConfig(tmpfile.Name()) + config.Debug = true + SetupLogging(config) log.Printf("TEST") f, err := ioutil.ReadFile(tmpfile.Name()) @@ -87,6 +96,23 @@ func TestWriteToTruncatedFile(t *testing.T) { assert.Equal(t, f[19:], []byte("Z I! SHOULD BE FIRST\n")) } +func TestWriteToFileInRotation(t *testing.T) { + tempDir, err := ioutil.TempDir("", "LogRotation") + require.NoError(t, err) + config := createBasicLogConfig(filepath.Join(tempDir, "test.log")) + config.RotationMaxSize = internal.Size{Size: int64(30)} + writer := newLogWriter(config) + // Close the writer here, otherwise the temp folder cannot be deleted because the current log file is in use. + closer, isCloser := writer.(io.Closer) + assert.True(t, isCloser) + defer func() { closer.Close(); os.RemoveAll(tempDir) }() + + log.Printf("I! TEST 1") // Writes 31 bytes, will rotate + log.Printf("I! TEST") // Writes 29 byes, no rotation expected + files, _ := ioutil.ReadDir(tempDir) + assert.Equal(t, 2, len(files)) +} + func BenchmarkTelegrafLogWrite(b *testing.B) { var msg = []byte("test") var buf bytes.Buffer @@ -96,3 +122,10 @@ func BenchmarkTelegrafLogWrite(b *testing.B) { w.Write(msg) } } + +func createBasicLogConfig(filename string) LogConfig { + return LogConfig{ + Logfile: filename, + RotationMaxArchives: -1, + } +} From 74c9d7ace2bf4b009c4568c2dded38e0c31e9812 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 3 May 2019 10:55:11 -0700 Subject: [PATCH 0808/1815] Update logfile documentation --- docs/CONFIGURATION.md | 22 +++++++----- internal/config/config.go | 76 +++++++++++++++++++++------------------ 2 files changed, 55 insertions(+), 43 deletions(-) diff --git a/docs/CONFIGURATION.md b/docs/CONFIGURATION.md index 0baf2e033..dd2512ef3 100644 --- a/docs/CONFIGURATION.md +++ b/docs/CONFIGURATION.md @@ -136,19 +136,25 @@ The agent table configures Telegraf and the defaults used across all plugins. service input to set the timestamp at the appropriate precision. - **debug**: - Run telegraf with debug log messages. + Log at debug level. + - **quiet**: - Run telegraf in quiet mode (error log messages only). + Log only error level messages. + - **logfile**: - Specify the log file name. The empty string means to log to stderr. + Log file name, the empty string means to log to stderr. + - **logfile_rotation_interval**: - Log file rotation time [interval][], e.g. "1d" means logs will rotated every day. Default is 0 => no rotation based on time. + The logfile will be rotated after the time interval specified. When set to + 0 no time based rotation is performed. + - **logfile_rotation_max_size**: - The log file max [size][]. Log files will be rotated when they exceed this size. Default is 0 => no rotation based on file size. + The logfile will be rotated when it becomes larger than the specified size. + When set to 0 no size based rotation is performed. + - **logfile_rotation_max_archives**: - Maximum number of archives (rotated) files to keep. Older log files are deleted first. - This setting is only applicable if `logfile_rotation_interval` and/or `logfile_rotation_max_size` settings have been specified (otherwise there is no rotation) - Default is 0 => all rotated files are deleted. Use -1 to keep all archives. + Maximum number of rotated archives to keep, any older logs are deleted. If + set to -1, no archives are removed. - **hostname**: Override default hostname, if empty use os.Hostname() diff --git a/internal/config/config.go b/internal/config/config.go index fd73657df..7f0ab8484 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -72,9 +72,10 @@ func NewConfig() *Config { c := &Config{ // Agent defaults: Agent: &AgentConfig{ - Interval: internal.Duration{Duration: 10 * time.Second}, - RoundInterval: true, - FlushInterval: internal.Duration{Duration: 10 * time.Second}, + Interval: internal.Duration{Duration: 10 * time.Second}, + RoundInterval: true, + FlushInterval: internal.Duration{Duration: 10 * time.Second}, + LogfileRotationMaxArchives: 5, }, Tags: make(map[string]string), @@ -140,22 +141,26 @@ type AgentConfig struct { UTC bool `toml:"utc"` // Debug is the option for running in debug mode - Debug bool - - // Logfile specifies the file to send logs to - Logfile string - - // The log file rotation interval - LogfileRotationInterval internal.Duration - - // The log file max size. Logs will rotated when they exceed this size. - LogfileRotationMaxSize internal.Size - - // The max number of log archives to keep - LogfileRotationMaxArchives int + Debug bool `toml:"debug"` // Quiet is the option for running in quiet mode - Quiet bool + Quiet bool `toml:"quiet"` + + // Log file name, the empty string means to log to stderr. + Logfile string `toml:"logfile"` + + // The logfile will be rotated when it becomes larger than the specified + // size. When set to 0 no size based rotation is performed. + LogfileRotationInterval internal.Duration `toml:"logfile_rotation_interval"` + + // Maximum number of rotated archives to keep, any older logs are deleted. + // If set to -1, no archives are removed. + LogfileRotationMaxSize internal.Size `toml:"logfile_rotation_max_size"` + + // Maximum number of rotated archives to keep, any older logs are deleted. + // If set to -1, no archives are removed. + LogfileRotationMaxArchives int `toml:"logfile_rotation_max_archives"` + Hostname string OmitHostname bool } @@ -275,24 +280,25 @@ var agentConfig = ` ## Valid time units are "ns", "us" (or "µs"), "ms", "s". precision = "" - ## Logging configuration: - ## Run telegraf with debug log messages. - debug = false - ## Run telegraf in quiet mode (error log messages only). - quiet = false - ## Specify the log file name. The empty string means to log to stderr. - logfile = "" - ## Rotation settings, only applicable when log file name is specified. - ## Log file rotation time interval, e.g. "1d" means logs will rotated every day. Default is 0 => no rotation based on time. - # logfile_rotation_interval = "1d" - ## The log file max size. Log files will be rotated when they exceed this size. Default is 0 => no rotation based on file size. - # logfile_rotation_max_size = "10 MB" - ## Maximum number of archives (rotated) files to keep. Older log files are deleted first. - ## This setting is only applicable if logfile_rotation_interval and/or logfile_rotation_max_size settings have been specified (otherwise there is no rotation) - ## Default is 0 => all rotated files are deleted. - ## Use -1 to keep all archives. - ## Analogous to logrotate "rotate" setting http://man7.org/linux/man-pages/man8/logrotate.8.html - # logfile_rotation_max_archives = 0 + ## Log at debug level. + # debug = false + ## Log only error level messages. + # quiet = false + + ## Log file name, the empty string means to log to stderr. + # logfile = "" + + ## The logfile will be rotated after the time interval specified. When set + ## to 0 no time based rotation is performed. + # logfile_rotation_interval = "0d" + + ## The logfile will be rotated when it becomes larger than the specified + ## size. When set to 0 no size based rotation is performed. + # logfile_rotation_max_size = "0MB" + + ## Maximum number of rotated archives to keep, any older logs are deleted. + ## If set to -1, no archives are removed. + # logfile_rotation_max_archives = 5 ## Override default hostname, if empty use os.Hostname() hostname = "" From 6b5162d0d2953c52bd373d526b0561c68bca68ef Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 3 May 2019 10:57:13 -0700 Subject: [PATCH 0809/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index ce3404311..a73abd893 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -34,6 +34,7 @@ - [#5760](https://github.com/influxdata/telegraf/pull/5760): Add option to set permissions for unix domain sockets to socket_listener. - [#5585](https://github.com/influxdata/telegraf/pull/5585): Add cli support for outputting sections of the config. - [#5770](https://github.com/influxdata/telegraf/pull/5770): Add service-display-name option for use with Windows service. +- [#5778](https://github.com/influxdata/telegraf/pull/5778): Add support for log rotation. #### Bugfixes From 93be5759d594d20b6ce12bd21649197954bea09b Mon Sep 17 00:00:00 2001 From: Greg <2653109+glinton@users.noreply.github.com> Date: Mon, 6 May 2019 13:13:51 -0600 Subject: [PATCH 0810/1815] Set host header if configured on http output (#5810) --- plugins/outputs/http/http.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/plugins/outputs/http/http.go b/plugins/outputs/http/http.go index abcea74b5..8ef77976f 100644 --- a/plugins/outputs/http/http.go +++ b/plugins/outputs/http/http.go @@ -193,6 +193,9 @@ func (h *HTTP) write(reqBody []byte) error { req.Header.Set("Content-Encoding", "gzip") } for k, v := range h.Headers { + if strings.ToLower(k) == "host" { + req.Host = v + } req.Header.Set(k, v) } From 901c50b9d4a143f1d7cbe6a549694ac96ed3382c Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 6 May 2019 12:15:46 -0700 Subject: [PATCH 0811/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index a73abd893..2391e40c4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -53,6 +53,7 @@ - [#5764](https://github.com/influxdata/telegraf/pull/5764): Fix race condition in the Wavefront parser. - [#5783](https://github.com/influxdata/telegraf/pull/5783): Create telegraf user in pre-install rpm scriptlet. - [#5792](https://github.com/influxdata/telegraf/pull/5792): Don't discard metrics on forbidden error in influxdb_v2 output. +- [#5803](https://github.com/influxdata/telegraf/issues/5803): Fix http output cannot set Host header. ## v1.10.3 [2019-04-16] From 1e1fa1a5805c23fd3621bc69d8f122efc0448d96 Mon Sep 17 00:00:00 2001 From: matthewwiesen Date: Mon, 6 May 2019 16:06:22 -0400 Subject: [PATCH 0812/1815] Add iso9660 to telegraf disk input ignore_fs (#5800) --- plugins/inputs/disk/README.md | 2 +- plugins/inputs/disk/disk.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/inputs/disk/README.md b/plugins/inputs/disk/README.md index 2979a5f2e..b0a8ac05a 100644 --- a/plugins/inputs/disk/README.md +++ b/plugins/inputs/disk/README.md @@ -15,7 +15,7 @@ https://en.wikipedia.org/wiki/Df_(Unix) for more details. # mount_points = ["/"] ## Ignore mount points by filesystem type. - ignore_fs = ["tmpfs", "devtmpfs", "devfs", "overlay", "aufs", "squashfs"] + ignore_fs = ["tmpfs", "devtmpfs", "devfs", "iso9660", "overlay", "aufs", "squashfs"] ``` #### Docker container diff --git a/plugins/inputs/disk/disk.go b/plugins/inputs/disk/disk.go index 5a30dbecf..86aefc60f 100644 --- a/plugins/inputs/disk/disk.go +++ b/plugins/inputs/disk/disk.go @@ -29,7 +29,7 @@ var diskSampleConfig = ` # mount_points = ["/"] ## Ignore mount points by filesystem type. - ignore_fs = ["tmpfs", "devtmpfs", "devfs", "overlay", "aufs", "squashfs"] + ignore_fs = ["tmpfs", "devtmpfs", "devfs", "iso9660", "overlay", "aufs", "squashfs"] ` func (_ *DiskStats) SampleConfig() string { From 8abf8c10a7e75bab3195b86fda8f84061de90398 Mon Sep 17 00:00:00 2001 From: frroberts Date: Tue, 7 May 2019 02:57:01 +0300 Subject: [PATCH 0813/1815] Fix only one job per storage target reported in lustre2 input (#5771) --- plugins/inputs/lustre2/lustre2.go | 100 ++++++++++++++----------- plugins/inputs/lustre2/lustre2_test.go | 96 ++++++++++++++++++++++-- 2 files changed, 148 insertions(+), 48 deletions(-) diff --git a/plugins/inputs/lustre2/lustre2.go b/plugins/inputs/lustre2/lustre2.go index 8ef9223b5..4ccb90115 100644 --- a/plugins/inputs/lustre2/lustre2.go +++ b/plugins/inputs/lustre2/lustre2.go @@ -9,23 +9,27 @@ for HPC environments. It stores statistics about its activity in package lustre2 import ( + "io/ioutil" "path/filepath" "strconv" "strings" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" ) +type tags struct { + name, job string +} + // Lustre proc files can change between versions, so we want to future-proof // by letting people choose what to look at. type Lustre2 struct { - Ost_procfiles []string - Mds_procfiles []string + Ost_procfiles []string `toml:"ost_jobstat"` + Mds_procfiles []string `toml:"mds_jobstat"` // allFields maps and OST name to the metric fields associated with that OST - allFields map[string]map[string]interface{} + allFields map[tags]map[string]interface{} } var sampleConfig = ` @@ -353,7 +357,7 @@ var wanted_mdt_jobstats_fields = []*mapping{ }, } -func (l *Lustre2) GetLustreProcStats(fileglob string, wanted_fields []*mapping, acc telegraf.Accumulator) error { +func (l *Lustre2) GetLustreProcStats(fileglob string, wantedFields []*mapping, acc telegraf.Accumulator) error { files, err := filepath.Glob(fileglob) if err != nil { return err @@ -367,43 +371,56 @@ func (l *Lustre2) GetLustreProcStats(fileglob string, wanted_fields []*mapping, */ path := strings.Split(file, "/") name := path[len(path)-2] - var fields map[string]interface{} - fields, ok := l.allFields[name] - if !ok { - fields = make(map[string]interface{}) - l.allFields[name] = fields - } - lines, err := internal.ReadLines(file) + //lines, err := internal.ReadLines(file) + wholeFile, err := ioutil.ReadFile(file) if err != nil { return err } + jobs := strings.Split(string(wholeFile), "-") + for _, job := range jobs { + lines := strings.Split(string(job), "\n") + jobid := "" - for _, line := range lines { - parts := strings.Fields(line) - if strings.HasPrefix(line, "- job_id:") { - // Set the job_id explicitly if present - fields["jobid"] = parts[2] + // figure out if the data should be tagged with job_id here + parts := strings.Fields(lines[0]) + if strings.TrimSuffix(parts[0], ":") == "job_id" { + jobid = parts[1] } - for _, wanted := range wanted_fields { - var data uint64 - if strings.TrimSuffix(parts[0], ":") == wanted.inProc { - wanted_field := wanted.field - // if not set, assume field[1]. Shouldn't be field[0], as - // that's a string - if wanted_field == 0 { - wanted_field = 1 + for _, line := range lines { + // skip any empty lines + if len(line) < 1 { + continue + } + parts := strings.Fields(line) + + var fields map[string]interface{} + fields, ok := l.allFields[tags{name, jobid}] + if !ok { + fields = make(map[string]interface{}) + l.allFields[tags{name, jobid}] = fields + } + + for _, wanted := range wantedFields { + var data uint64 + if strings.TrimSuffix(parts[0], ":") == wanted.inProc { + wantedField := wanted.field + // if not set, assume field[1]. Shouldn't be field[0], as + // that's a string + if wantedField == 0 { + wantedField = 1 + } + data, err = strconv.ParseUint(strings.TrimSuffix((parts[wantedField]), ","), 10, 64) + if err != nil { + return err + } + reportName := wanted.inProc + if wanted.reportAs != "" { + reportName = wanted.reportAs + } + fields[reportName] = data } - data, err = strconv.ParseUint(strings.TrimSuffix((parts[wanted_field]), ","), 10, 64) - if err != nil { - return err - } - report_name := wanted.inProc - if wanted.reportAs != "" { - report_name = wanted.reportAs - } - fields[report_name] = data } } } @@ -423,7 +440,8 @@ func (l *Lustre2) Description() string { // Gather reads stats from all lustre targets func (l *Lustre2) Gather(acc telegraf.Accumulator) error { - l.allFields = make(map[string]map[string]interface{}) + //l.allFields = make(map[string]map[string]interface{}) + l.allFields = make(map[tags]map[string]interface{}) if len(l.Ost_procfiles) == 0 { // read/write bytes are in obdfilter//stats @@ -483,15 +501,13 @@ func (l *Lustre2) Gather(acc telegraf.Accumulator) error { } } - for name, fields := range l.allFields { + for tgs, fields := range l.allFields { + tags := map[string]string{ - "name": name, + "name": tgs.name, } - if _, ok := fields["jobid"]; ok { - if jobid, ok := fields["jobid"].(string); ok { - tags["jobid"] = jobid - } - delete(fields, "jobid") + if len(tgs.job) > 0 { + tags["jobid"] = tgs.job } acc.AddFields("lustre2", fields, tags) } diff --git a/plugins/inputs/lustre2/lustre2_test.go b/plugins/inputs/lustre2/lustre2_test.go index 5cc9c0e43..6d0fd61f5 100644 --- a/plugins/inputs/lustre2/lustre2_test.go +++ b/plugins/inputs/lustre2/lustre2_test.go @@ -53,6 +53,20 @@ const obdfilterJobStatsContents = `job_stats: get_info: { samples: 0, unit: reqs } set_info: { samples: 0, unit: reqs } quotactl: { samples: 0, unit: reqs } +- job_id: testjob2 + snapshot_time: 1461772761 + read_bytes: { samples: 1, unit: bytes, min: 1024, max: 1024, sum: 1024 } + write_bytes: { samples: 25, unit: bytes, min: 2048, max: 2048, sum: 51200 } + getattr: { samples: 0, unit: reqs } + setattr: { samples: 0, unit: reqs } + punch: { samples: 1, unit: reqs } + sync: { samples: 0, unit: reqs } + destroy: { samples: 0, unit: reqs } + create: { samples: 0, unit: reqs } + statfs: { samples: 0, unit: reqs } + get_info: { samples: 0, unit: reqs } + set_info: { samples: 0, unit: reqs } + quotactl: { samples: 0, unit: reqs } ` const mdtProcContents = `snapshot_time 1438693238.20113 secs.usecs @@ -93,6 +107,24 @@ const mdtJobStatsContents = `job_stats: sync: { samples: 2, unit: reqs } samedir_rename: { samples: 705, unit: reqs } crossdir_rename: { samples: 200, unit: reqs } +- job_id: testjob2 + snapshot_time: 1461772761 + open: { samples: 6, unit: reqs } + close: { samples: 7, unit: reqs } + mknod: { samples: 8, unit: reqs } + link: { samples: 9, unit: reqs } + unlink: { samples: 20, unit: reqs } + mkdir: { samples: 200, unit: reqs } + rmdir: { samples: 210, unit: reqs } + rename: { samples: 8, unit: reqs } + getattr: { samples: 10, unit: reqs } + setattr: { samples: 2, unit: reqs } + getxattr: { samples: 4, unit: reqs } + setxattr: { samples: 5, unit: reqs } + statfs: { samples: 1207, unit: reqs } + sync: { samples: 3, unit: reqs } + samedir_rename: { samples: 706, unit: reqs } + crossdir_rename: { samples: 201, unit: reqs } ` func TestLustre2GeneratesMetrics(t *testing.T) { @@ -172,7 +204,7 @@ func TestLustre2GeneratesJobstatsMetrics(t *testing.T) { tempdir := os.TempDir() + "/telegraf/proc/fs/lustre/" ost_name := "OST0001" - job_name := "testjob1" + job_names := []string{"testjob1", "testjob2"} mdtdir := tempdir + "/mdt/" err := os.MkdirAll(mdtdir+"/"+ost_name, 0755) @@ -199,12 +231,23 @@ func TestLustre2GeneratesJobstatsMetrics(t *testing.T) { err = m.Gather(&acc) require.NoError(t, err) - tags := map[string]string{ - "name": ost_name, - "jobid": job_name, + // make this two tags + // and even further make this dependent on summing per OST + tags := []map[string]string{ + { + "name": ost_name, + "jobid": job_names[0], + }, + { + "name": ost_name, + "jobid": job_names[1], + }, } - fields := map[string]interface{}{ + // make this for two tags + var fields []map[string]interface{} + + fields = append(fields, map[string]interface{}{ "jobstats_read_calls": uint64(1), "jobstats_read_min_size": uint64(4096), "jobstats_read_max_size": uint64(4096), @@ -239,9 +282,50 @@ func TestLustre2GeneratesJobstatsMetrics(t *testing.T) { "jobstats_sync": uint64(2), "jobstats_samedir_rename": uint64(705), "jobstats_crossdir_rename": uint64(200), + }) + + fields = append(fields, map[string]interface{}{ + "jobstats_read_calls": uint64(1), + "jobstats_read_min_size": uint64(1024), + "jobstats_read_max_size": uint64(1024), + "jobstats_read_bytes": uint64(1024), + "jobstats_write_calls": uint64(25), + "jobstats_write_min_size": uint64(2048), + "jobstats_write_max_size": uint64(2048), + "jobstats_write_bytes": uint64(51200), + "jobstats_ost_getattr": uint64(0), + "jobstats_ost_setattr": uint64(0), + "jobstats_punch": uint64(1), + "jobstats_ost_sync": uint64(0), + "jobstats_destroy": uint64(0), + "jobstats_create": uint64(0), + "jobstats_ost_statfs": uint64(0), + "jobstats_get_info": uint64(0), + "jobstats_set_info": uint64(0), + "jobstats_quotactl": uint64(0), + "jobstats_open": uint64(6), + "jobstats_close": uint64(7), + "jobstats_mknod": uint64(8), + "jobstats_link": uint64(9), + "jobstats_unlink": uint64(20), + "jobstats_mkdir": uint64(200), + "jobstats_rmdir": uint64(210), + "jobstats_rename": uint64(8), + "jobstats_getattr": uint64(10), + "jobstats_setattr": uint64(2), + "jobstats_getxattr": uint64(4), + "jobstats_setxattr": uint64(5), + "jobstats_statfs": uint64(1207), + "jobstats_sync": uint64(3), + "jobstats_samedir_rename": uint64(706), + "jobstats_crossdir_rename": uint64(201), + }) + + for index := 0; index < len(fields); index++ { + acc.AssertContainsTaggedFields(t, "lustre2", fields[index], tags[index]) } - acc.AssertContainsTaggedFields(t, "lustre2", fields, tags) + // run this over both tags err = os.RemoveAll(os.TempDir() + "/telegraf") require.NoError(t, err) From a1c4b9fa47bbd336ab3a13ed6abae417fdcc25eb Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 6 May 2019 16:58:55 -0700 Subject: [PATCH 0814/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2391e40c4..49a5687b5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -45,6 +45,7 @@ - [#1598](https://github.com/influxdata/telegraf/issues/1598): Fix multi-line basic strings support in configuration file. - [#5746](https://github.com/influxdata/telegraf/issues/5746): Verify a process passed by pid_file exists in procstat input. - [#5455](https://github.com/influxdata/telegraf/issues/5455): Fix unsupported pkt type error in pgbouncer. +- [#5771](https://github.com/influxdata/telegraf/pull/5771): Fix only one job per storage target reported in lustre2 input. ## v1.10.4 [unreleased] From c2643d5f7e49881f522a178f5279bcf77ddc9df8 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 6 May 2019 17:31:08 -0700 Subject: [PATCH 0815/1815] Add README for lustre2 input plugin --- plugins/inputs/lustre2/README.md | 133 +++++++++++++++++++++++++++++++ 1 file changed, 133 insertions(+) create mode 100644 plugins/inputs/lustre2/README.md diff --git a/plugins/inputs/lustre2/README.md b/plugins/inputs/lustre2/README.md new file mode 100644 index 000000000..dbdf58f73 --- /dev/null +++ b/plugins/inputs/lustre2/README.md @@ -0,0 +1,133 @@ +# Lustre Input Plugin + +The [Lustre][]® file system is an open-source, parallel file system that supports +many requirements of leadership class HPC simulation environments. + +This plugin monitors the Lustre file system using its entries in the proc filesystem. + +### Configuration + +```toml +# Read metrics from local Lustre service on OST, MDS +[[inputs.lustre2]] + ## An array of /proc globs to search for Lustre stats + ## If not specified, the default will work on Lustre 2.5.x + ## + # ost_procfiles = [ + # "/proc/fs/lustre/obdfilter/*/stats", + # "/proc/fs/lustre/osd-ldiskfs/*/stats", + # "/proc/fs/lustre/obdfilter/*/job_stats", + # ] + # mds_procfiles = [ + # "/proc/fs/lustre/mdt/*/md_stats", + # "/proc/fs/lustre/mdt/*/job_stats", + # ] +``` + +### Metrics + +From `/proc/fs/lustre/obdfilter/*/stats` and `/proc/fs/lustre/osd-ldiskfs/*/stats`: + +- lustre2 + - tags: + - name + - fields: + - write_bytes + - write_calls + - read_bytes + - read_calls + - cache_hit + - cache_miss + - cache_access + +From `/proc/fs/lustre/obdfilter/*/job_stats`: + +- lustre2 + - tags: + - name + - jobid + - fields: + - jobstats_ost_getattr + - jobstats_ost_setattr + - jobstats_ost_sync + - jobstats_punch + - jobstats_destroy + - jobstats_create + - jobstats_ost_statfs + - jobstats_get_info + - jobstats_set_info + - jobstats_quotactl + - jobstats_read_bytes + - jobstats_read_calls + - jobstats_read_max_size + - jobstats_read_min_size + - jobstats_write_bytes + - jobstats_write_calls + - jobstats_write_max_size + - jobstats_write_min_size + +From `/proc/fs/lustre/mdt/*/md_stats`: + +- lustre2 + - tags: + - name + - fields: + - open + - close + - mknod + - link + - unlink + - mkdir + - rmdir + - rename + - getattr + - setattr + - getxattr + - setxattr + - statfs + - sync + - samedir_rename + - crossdir_rename + +From `/proc/fs/lustre/mdt/*/job_stats`: + +- lustre2 + - tags: + - name + - jobid + - fields: + - jobstats_close + - jobstats_crossdir_rename + - jobstats_getattr + - jobstats_getxattr + - jobstats_link + - jobstats_mkdir + - jobstats_mknod + - jobstats_open + - jobstats_rename + - jobstats_rmdir + - jobstats_samedir_rename + - jobstats_setattr + - jobstats_setxattr + - jobstats_statfs + - jobstats_sync + - jobstats_unlink + + +### Troubleshooting + +Check for the default or custom procfiles in the proc filesystem, and reference +the [Lustre Monitoring and Statistics Guide][guide]. This plugin does not +report all information from these files, only a limited set of items +corresponding to the above metric fields. + +### Example Output + +``` +lustre2,host=oss2,jobid=42990218,name=wrk-OST0041 jobstats_ost_setattr=0i,jobstats_ost_sync=0i,jobstats_punch=0i,jobstats_read_bytes=4096i,jobstats_read_calls=1i,jobstats_read_max_size=4096i,jobstats_read_min_size=4096i,jobstats_write_bytes=310206488i,jobstats_write_calls=7423i,jobstats_write_max_size=53048i,jobstats_write_min_size=8820i 1556525847000000000 +lustre2,host=mds1,jobid=42992017,name=wrk-MDT0000 jobstats_close=31798i,jobstats_crossdir_rename=0i,jobstats_getattr=34146i,jobstats_getxattr=15i,jobstats_link=0i,jobstats_mkdir=658i,jobstats_mknod=0i,jobstats_open=31797i,jobstats_rename=0i,jobstats_rmdir=0i,jobstats_samedir_rename=0i,jobstats_setattr=1788i,jobstats_setxattr=0i,jobstats_statfs=0i,jobstats_sync=0i,jobstats_unlink=0i 1556525828000000000 + +``` + +[lustre]: http://lustre.org/ +[guide]: http://wiki.lustre.org/Lustre_Monitoring_and_Statistics_Guide From aac4c29dc6c5cffa1f2f94df4bd81991475ee464 Mon Sep 17 00:00:00 2001 From: Pontus Rydin Date: Tue, 7 May 2019 14:52:24 -0400 Subject: [PATCH 0816/1815] Fix interval estimation in vsphere input (#5726) --- plugins/inputs/vsphere/endpoint.go | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/plugins/inputs/vsphere/endpoint.go b/plugins/inputs/vsphere/endpoint.go index 694efb574..411bdc965 100644 --- a/plugins/inputs/vsphere/endpoint.go +++ b/plugins/inputs/vsphere/endpoint.go @@ -806,9 +806,17 @@ func (e *Endpoint) collectResource(ctx context.Context, resourceType string, acc localNow := time.Now() estInterval := time.Duration(time.Minute) if !res.lastColl.IsZero() { - estInterval = localNow.Sub(res.lastColl).Truncate(time.Duration(res.sampling) * time.Second) + s := time.Duration(res.sampling) * time.Second + rawInterval := localNow.Sub(res.lastColl) + paddedInterval := rawInterval + time.Duration(res.sampling/2)*time.Second + estInterval = paddedInterval.Truncate(s) + if estInterval < s { + estInterval = s + } + log.Printf("D! [inputs.vsphere] Raw interval %s, padded: %s, estimated: %s", rawInterval, paddedInterval, estInterval) } log.Printf("D! [inputs.vsphere] Interval estimated to %s", estInterval) + res.lastColl = localNow latest := res.latestSample if !latest.IsZero() { From 7a07b827b5ffed350dc99b2649ee260bfbcb381e Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 7 May 2019 11:53:32 -0700 Subject: [PATCH 0817/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 49a5687b5..755b60fb4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -55,6 +55,7 @@ - [#5783](https://github.com/influxdata/telegraf/pull/5783): Create telegraf user in pre-install rpm scriptlet. - [#5792](https://github.com/influxdata/telegraf/pull/5792): Don't discard metrics on forbidden error in influxdb_v2 output. - [#5803](https://github.com/influxdata/telegraf/issues/5803): Fix http output cannot set Host header. +- [#5619](https://github.com/influxdata/telegraf/issues/5619): Fix interval estimation in vsphere input. ## v1.10.3 [2019-04-16] From 23b98754623abcf4172464da362d0ff5a9f63a07 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 7 May 2019 14:15:12 -0700 Subject: [PATCH 0818/1815] Require github.com/jackc/pgx v3.4.0 (#5814) --- Gopkg.lock | 9 +++++++-- Gopkg.toml | 2 +- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/Gopkg.lock b/Gopkg.lock index b76092f87..505bbaa39 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -673,7 +673,7 @@ revision = "7c63b0a71ef8300adc255344d275e10e5c3a71ec" [[projects]] - digest = "1:518822813d0d9e252f7abdbb6dd8939f171e2af6e001563fbee710e71e922ff2" + digest = "1:a7998e19ebb78fdd341cdaf3825fded9030ae27af9c70d298c05d88744e16a0b" name = "github.com/jackc/pgx" packages = [ ".", @@ -685,7 +685,8 @@ "stdlib", ] pruneopts = "" - revision = "051e69d512355b5d5dd6f8b92970105ee36e0579" + revision = "8faa4453fc7051d1076053f8854077753ab912f2" + version = "v3.4.0" [[projects]] digest = "1:6f49eae0c1e5dab1dafafee34b207aeb7a42303105960944828c2079b92fc88e" @@ -1305,6 +1306,7 @@ digest = "1:5acd3512b047305d49e8763eef7ba423901e85d5dd2fd1e71778a0ea8de10bd4" name = "golang.org/x/text" packages = [ + "cases", "collate", "collate/build", "encoding", @@ -1317,6 +1319,7 @@ "encoding/simplifiedchinese", "encoding/traditionalchinese", "encoding/unicode", + "internal", "internal/colltab", "internal/gen", "internal/tag", @@ -1326,11 +1329,13 @@ "language", "runes", "secure/bidirule", + "secure/precis", "transform", "unicode/bidi", "unicode/cldr", "unicode/norm", "unicode/rangetable", + "width", ] pruneopts = "" revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0" diff --git a/Gopkg.toml b/Gopkg.toml index 72a6e1cd2..4e50eb11b 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -88,7 +88,7 @@ [[constraint]] name = "github.com/jackc/pgx" - revision = "051e69d512355b5d5dd6f8b92970105ee36e0579" + version = "3.4.0" [[constraint]] name = "github.com/kardianos/service" From 61c2cc97a20ae895d903dd217e416d966b5244d2 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 7 May 2019 14:15:30 -0700 Subject: [PATCH 0819/1815] Set default timeout of 5s in fibaro input (#5813) --- plugins/inputs/fibaro/fibaro.go | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/plugins/inputs/fibaro/fibaro.go b/plugins/inputs/fibaro/fibaro.go index 6eacb3ee6..187b74a50 100644 --- a/plugins/inputs/fibaro/fibaro.go +++ b/plugins/inputs/fibaro/fibaro.go @@ -5,12 +5,15 @@ import ( "fmt" "net/http" "strconv" + "time" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" ) +const defaultTimeout = 5 * time.Second + const sampleConfig = ` ## Required Fibaro controller address/hostname. ## Note: at the time of writing this plugin, Fibaro only implemented http - no https available @@ -28,13 +31,13 @@ const description = "Read devices value(s) from a Fibaro controller" // Fibaro contains connection information type Fibaro struct { - URL string + URL string `toml:"url"` // HTTP Basic Auth Credentials - Username string - Password string + Username string `toml:"username"` + Password string `toml:"password"` - Timeout internal.Duration + Timeout internal.Duration `toml:"timeout"` client *http.Client } @@ -212,6 +215,8 @@ func (f *Fibaro) Gather(acc telegraf.Accumulator) error { func init() { inputs.Add("fibaro", func() telegraf.Input { - return &Fibaro{} + return &Fibaro{ + Timeout: internal.Duration{Duration: defaultTimeout}, + } }) } From a0f4c49fb4a8c7694c0edfcee4a142c870e739a8 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 7 May 2019 14:16:36 -0700 Subject: [PATCH 0820/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 755b60fb4..2cfd36ab0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -46,6 +46,7 @@ - [#5746](https://github.com/influxdata/telegraf/issues/5746): Verify a process passed by pid_file exists in procstat input. - [#5455](https://github.com/influxdata/telegraf/issues/5455): Fix unsupported pkt type error in pgbouncer. - [#5771](https://github.com/influxdata/telegraf/pull/5771): Fix only one job per storage target reported in lustre2 input. +- [#5796](https://github.com/influxdata/telegraf/issues/5796): Set default timeout of 5s in fibaro input. ## v1.10.4 [unreleased] From 3011a009e5ec1ba9a16bee5c4b2edc420f4d2fbd Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 7 May 2019 14:54:43 -0700 Subject: [PATCH 0821/1815] Skip lines with missing refid in ntpq input (#5782) --- plugins/inputs/ntpq/ntpq.go | 6 ++ plugins/inputs/ntpq/ntpq_test.go | 102 ++++++++++++++++++++----------- 2 files changed, 73 insertions(+), 35 deletions(-) diff --git a/plugins/inputs/ntpq/ntpq.go b/plugins/inputs/ntpq/ntpq.go index ce7bb96d7..d7b1b4f5b 100644 --- a/plugins/inputs/ntpq/ntpq.go +++ b/plugins/inputs/ntpq/ntpq.go @@ -75,6 +75,7 @@ func (n *NTPQ) Gather(acc telegraf.Accumulator) error { } lineCounter := 0 + numColumns := 0 scanner := bufio.NewScanner(bytes.NewReader(out)) for scanner.Scan() { line := scanner.Text() @@ -96,6 +97,7 @@ func (n *NTPQ) Gather(acc telegraf.Accumulator) error { // If lineCounter == 0, then this is the header line if lineCounter == 0 { + numColumns = len(fields) for i, field := range fields { // Check if field is a tag: if tagKey, ok := tagHeaders[field]; ok { @@ -116,6 +118,10 @@ func (n *NTPQ) Gather(acc telegraf.Accumulator) error { } } } else { + if len(fields) != numColumns { + continue + } + mFields := make(map[string]interface{}) // Get tags from output diff --git a/plugins/inputs/ntpq/ntpq_test.go b/plugins/inputs/ntpq/ntpq_test.go index 47b8cf8f4..016a9e5bd 100644 --- a/plugins/inputs/ntpq/ntpq_test.go +++ b/plugins/inputs/ntpq/ntpq_test.go @@ -3,10 +3,12 @@ package ntpq import ( "fmt" "testing" + "time" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestSingleNTPQ(t *testing.T) { @@ -39,35 +41,6 @@ func TestSingleNTPQ(t *testing.T) { acc.AssertContainsTaggedFields(t, "ntpq", fields, tags) } -func TestMissingJitterField(t *testing.T) { - tt := tester{ - ret: []byte(missingJitterField), - err: nil, - } - n := &NTPQ{ - runQ: tt.runqTest, - } - - acc := testutil.Accumulator{} - assert.NoError(t, acc.GatherError(n.Gather)) - - fields := map[string]interface{}{ - "when": int64(101), - "poll": int64(256), - "reach": int64(37), - "delay": float64(51.016), - "offset": float64(233.010), - } - tags := map[string]string{ - "remote": "uschi5-ntp-002.", - "state_prefix": "*", - "refid": "10.177.80.46", - "stratum": "2", - "type": "u", - } - acc.AssertContainsTaggedFields(t, "ntpq", fields, tags) -} - func TestBadIntNTPQ(t *testing.T) { tt := tester{ ret: []byte(badIntParseNTPQ), @@ -428,6 +401,62 @@ func TestFailedNTPQ(t *testing.T) { assert.Error(t, acc.GatherError(n.Gather)) } +// It is possible for the output of ntqp to be missing the refid column. This +// is believed to be http://bugs.ntp.org/show_bug.cgi?id=3484 which is fixed +// in ntp-4.2.8p12 (included first in Debian Buster). +func TestNoRefID(t *testing.T) { + now := time.Now() + expected := []telegraf.Metric{ + testutil.MustMetric("ntpq", + map[string]string{ + "refid": "10.177.80.37", + "remote": "83.137.98.96", + "stratum": "2", + "type": "u", + }, + map[string]interface{}{ + "delay": float64(54.033), + "jitter": float64(449514), + "offset": float64(243.426), + "poll": int64(1024), + "reach": int64(377), + "when": int64(740), + }, + now), + testutil.MustMetric("ntpq", + map[string]string{ + "refid": "10.177.80.37", + "remote": "131.188.3.221", + "stratum": "2", + "type": "u", + }, + map[string]interface{}{ + "delay": float64(111.820), + "jitter": float64(449528), + "offset": float64(261.921), + "poll": int64(1024), + "reach": int64(377), + "when": int64(783), + }, + now), + } + + tt := tester{ + ret: []byte(noRefID), + err: nil, + } + n := &NTPQ{ + runQ: tt.runqTest, + } + + acc := testutil.Accumulator{ + TimeFunc: func() time.Time { return now }, + } + + require.NoError(t, acc.GatherError(n.Gather)) + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics()) +} + type tester struct { ret []byte err error @@ -474,11 +503,6 @@ var singleNTPQ = ` remote refid st t when poll reach delay *uschi5-ntp-002. 10.177.80.46 2 u 101 256 37 51.016 233.010 17.462 ` -var missingJitterField = ` remote refid st t when poll reach delay offset jitter -============================================================================== -*uschi5-ntp-002. 10.177.80.46 2 u 101 256 37 51.016 233.010 -` - var badHeaderNTPQ = `remote refid foobar t when poll reach delay offset jitter ============================================================================== *uschi5-ntp-002. 10.177.80.46 2 u 101 256 37 51.016 233.010 17.462 @@ -527,6 +551,7 @@ var multiNTPQ = ` remote refid st t when poll reach delay 5.9.29.107 10.177.80.37 2 u 703 1024 377 205.704 160.406 449602. 91.189.94.4 10.177.80.37 2 u 673 1024 377 143.047 274.726 449445. ` + var multiParserNTPQ = ` remote refid st t when poll reach delay offset jitter ============================================================================== *SHM(0) .PPS. 1 u 60 64 377 0.000 0.045 1.012 @@ -535,3 +560,10 @@ var multiParserNTPQ = ` remote refid st t when poll reach d +37.58.57.238 ( 192.53.103.103 2 u 10 1024 377 1.748 0.373 0.101 -SHM(1) .GPS. 1 u 121 128 377 0.000 10.105 2.012 ` + +var noRefID = ` remote refid st t when poll reach delay offset jitter +============================================================================== + 83.137.98.96 10.177.80.37 2 u 740 1024 377 54.033 243.426 449514. + 91.189.94.4 2 u 673 1024 377 143.047 274.726 449445. + 131.188.3.221 10.177.80.37 2 u 783 1024 377 111.820 261.921 449528. +` From 67394709a92694176bd705bcef856704250d6dde Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 7 May 2019 14:56:25 -0700 Subject: [PATCH 0822/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2cfd36ab0..cffd537c4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -57,6 +57,7 @@ - [#5792](https://github.com/influxdata/telegraf/pull/5792): Don't discard metrics on forbidden error in influxdb_v2 output. - [#5803](https://github.com/influxdata/telegraf/issues/5803): Fix http output cannot set Host header. - [#5619](https://github.com/influxdata/telegraf/issues/5619): Fix interval estimation in vsphere input. +- [#5782](https://github.com/influxdata/telegraf/pull/5782): Skip lines with missing refid in ntpq input. ## v1.10.3 [2019-04-16] From 0d66ed70f84e18266bf641749c9ee4964215a044 Mon Sep 17 00:00:00 2001 From: Greg <2653109+glinton@users.noreply.github.com> Date: Tue, 7 May 2019 16:20:03 -0600 Subject: [PATCH 0823/1815] Update smart input plugin to support more drive types (#5765) --- plugins/inputs/smart/README.md | 18 +- plugins/inputs/smart/smart.go | 218 +++++++--- plugins/inputs/smart/smart_test.go | 670 ++++++++++++++++++++++++----- 3 files changed, 735 insertions(+), 171 deletions(-) diff --git a/plugins/inputs/smart/README.md b/plugins/inputs/smart/README.md index c60e11e35..127397f1e 100644 --- a/plugins/inputs/smart/README.md +++ b/plugins/inputs/smart/README.md @@ -31,29 +31,27 @@ smartctl -s on [[inputs.smart]] ## Optionally specify the path to the smartctl executable # path = "/usr/bin/smartctl" - # + ## On most platforms smartctl requires root access. ## Setting 'use_sudo' to true will make use of sudo to run smartctl. ## Sudo must be configured to to allow the telegraf user to run smartctl - ## with out password. + ## without a password. # use_sudo = false - # + ## Skip checking disks in this power mode. Defaults to ## "standby" to not wake up disks that have stoped rotating. - ## See --nockeck in the man pages for smartctl. + ## See --nocheck in the man pages for smartctl. ## smartctl version 5.41 and 5.42 have faulty detection of ## power mode and might require changing this value to - ## "never" depending on your storage device. + ## "never" depending on your disks. # nocheck = "standby" - # + ## Gather detailed metrics for each SMART Attribute. - ## Defaults to "false" - ## # attributes = false - # + ## Optionally specify devices to exclude from reporting. # excludes = [ "/dev/pass6" ] - # + ## Optionally specify devices and device type, if unset ## a scan (smartctl --scan) for S.M.A.R.T. devices will ## done and all found will be included except for the diff --git a/plugins/inputs/smart/smart.go b/plugins/inputs/smart/smart.go index 46912d487..8bec2581f 100644 --- a/plugins/inputs/smart/smart.go +++ b/plugins/inputs/smart/smart.go @@ -3,6 +3,7 @@ package smart import ( "bufio" "fmt" + "log" "os/exec" "path" "regexp" @@ -18,31 +19,46 @@ import ( ) var ( - execCommand = exec.Command // execCommand is used to mock commands in tests. - // Device Model: APPLE SSD SM256E - modelInInfo = regexp.MustCompile("^Device Model:\\s+(.*)$") + // Product: HUH721212AL5204 + // Model Number: TS128GMTE850 + modelInfo = regexp.MustCompile("^(Device Model|Product|Model Number):\\s+(.*)$") // Serial Number: S0X5NZBC422720 - serialInInfo = regexp.MustCompile("^Serial Number:\\s+(.*)$") + serialInfo = regexp.MustCompile("^Serial Number:\\s+(.*)$") // LU WWN Device Id: 5 002538 655584d30 - wwnInInfo = regexp.MustCompile("^LU WWN Device Id:\\s+(.*)$") + wwnInfo = regexp.MustCompile("^LU WWN Device Id:\\s+(.*)$") // User Capacity: 251,000,193,024 bytes [251 GB] - usercapacityInInfo = regexp.MustCompile("^User Capacity:\\s+([0-9,]+)\\s+bytes.*$") + usercapacityInfo = regexp.MustCompile("^User Capacity:\\s+([0-9,]+)\\s+bytes.*$") // SMART support is: Enabled - smartEnabledInInfo = regexp.MustCompile("^SMART support is:\\s+(\\w+)$") + smartEnabledInfo = regexp.MustCompile("^SMART support is:\\s+(\\w+)$") // SMART overall-health self-assessment test result: PASSED + // SMART Health Status: OK // PASSED, FAILED, UNKNOWN - smartOverallHealth = regexp.MustCompile("^SMART overall-health self-assessment test result:\\s+(\\w+).*$") + smartOverallHealth = regexp.MustCompile("^(SMART overall-health self-assessment test result|SMART Health Status):\\s+(\\w+).*$") + + // Accumulated start-stop cycles: 7 + sasStartStopAttr = regexp.MustCompile("^Accumulated start-stop cycles:\\s+(.*)$") + // Accumulated load-unload cycles: 39 + sasLoadCycleAttr = regexp.MustCompile("^Accumulated load-unload cycles:\\s+(.*)$") + // Current Drive Temperature: 34 C + sasTempAttr = regexp.MustCompile("^Current Drive Temperature:\\s+(.*)\\s+C(.*)$") + // Temperature: 38 Celsius + nvmeTempAttr = regexp.MustCompile("^Temperature:\\s+(.*)\\s+(.*)$") + // Power Cycles: 472 + nvmePowerCycleAttr = regexp.MustCompile("^Power Cycles:\\s+(.*)$") + // Power On Hours: 6,038 + nvmePowerOnAttr = regexp.MustCompile("^Power On Hours:\\s+(.*)$") // ID# ATTRIBUTE_NAME FLAGS VALUE WORST THRESH FAIL RAW_VALUE // 1 Raw_Read_Error_Rate -O-RC- 200 200 000 - 0 // 5 Reallocated_Sector_Ct PO--CK 100 100 000 - 0 // 192 Power-Off_Retract_Count -O--C- 097 097 000 - 14716 - attribute = regexp.MustCompile("^\\s*([0-9]+)\\s(\\S+)\\s+([-P][-O][-S][-R][-C][-K])\\s+([0-9]+)\\s+([0-9]+)\\s+([0-9]+)\\s+([-\\w]+)\\s+([\\w\\+\\.]+).*$") + attribute = regexp.MustCompile("^\\s*([0-9]+)\\s(\\S+)\\s+([-P][-O][-S][-R][-C][-K])\\s+([0-9]+)\\s+([0-9]+)\\s+([0-9-]+)\\s+([-\\w]+)\\s+([\\w\\+\\.]+).*$") deviceFieldIds = map[string]string{ "1": "read_error_rate", "7": "seek_error_rate", + "190": "temp_c", "194": "temp_c", "199": "udma_crc_errors", } @@ -60,13 +76,13 @@ type Smart struct { var sampleConfig = ` ## Optionally specify the path to the smartctl executable # path = "/usr/bin/smartctl" - # + ## On most platforms smartctl requires root access. ## Setting 'use_sudo' to true will make use of sudo to run smartctl. ## Sudo must be configured to to allow the telegraf user to run smartctl - ## with out password. + ## without a password. # use_sudo = false - # + ## Skip checking disks in this power mode. Defaults to ## "standby" to not wake up disks that have stoped rotating. ## See --nocheck in the man pages for smartctl. @@ -74,15 +90,13 @@ var sampleConfig = ` ## power mode and might require changing this value to ## "never" depending on your disks. # nocheck = "standby" - # + ## Gather detailed metrics for each SMART Attribute. - ## Defaults to "false" - ## # attributes = false - # + ## Optionally specify devices to exclude from reporting. # excludes = [ "/dev/pass6" ] - # + ## Optionally specify devices and device type, if unset ## a scan (smartctl --scan) for S.M.A.R.T. devices will ## done and all found will be included except for the @@ -111,34 +125,36 @@ func (m *Smart) Gather(acc telegraf.Accumulator) error { return err } } + log.Printf("D! [inputs.smart] devices: %+#v", devices) m.getAttributes(acc, devices) return nil } // Wrap with sudo -func sudo(sudo bool, command string, args ...string) *exec.Cmd { +var runCmd = func(sudo bool, command string, args ...string) ([]byte, error) { + cmd := exec.Command(command, args...) if sudo { - return execCommand("sudo", append([]string{"-n", command}, args...)...) + cmd = exec.Command("sudo", append([]string{"-n", command}, args...)...) } - - return execCommand(command, args...) + return internal.CombinedOutputTimeout(cmd, time.Second*5) } // Scan for S.M.A.R.T. devices func (m *Smart) scan() ([]string, error) { - - cmd := sudo(m.UseSudo, m.Path, "--scan") - out, err := internal.CombinedOutputTimeout(cmd, time.Second*5) + out, err := runCmd(m.UseSudo, m.Path, "--scan") if err != nil { - return []string{}, fmt.Errorf("failed to run command %s: %s - %s", strings.Join(cmd.Args, " "), err, string(out)) + return []string{}, fmt.Errorf("failed to run command '%s --scan': %s - %s", m.Path, err, string(out)) } devices := []string{} for _, line := range strings.Split(string(out), "\n") { dev := strings.Split(line, " ") if len(dev) > 1 && !excludedDev(m.Excludes, strings.TrimSpace(dev[0])) { + log.Printf("D! [inputs.smart] adding device: %+#v", dev) devices = append(devices, strings.TrimSpace(dev[0])) + } else { + log.Printf("D! [inputs.smart] skipping device: %+#v", dev) } } return devices, nil @@ -158,7 +174,6 @@ func excludedDev(excludes []string, deviceLine string) bool { // Get info and attributes for each S.M.A.R.T. device func (m *Smart) getAttributes(acc telegraf.Accumulator, devices []string) { - var wg sync.WaitGroup wg.Add(len(devices)) @@ -180,79 +195,77 @@ func exitStatus(err error) (int, error) { return 0, err } -func gatherDisk(acc telegraf.Accumulator, usesudo, attributes bool, smartctl, nockeck, device string, wg *sync.WaitGroup) { - +func gatherDisk(acc telegraf.Accumulator, usesudo, collectAttributes bool, smartctl, nocheck, device string, wg *sync.WaitGroup) { defer wg.Done() // smartctl 5.41 & 5.42 have are broken regarding handling of --nocheck/-n - args := []string{"--info", "--health", "--attributes", "--tolerance=verypermissive", "-n", nockeck, "--format=brief"} + args := []string{"--info", "--health", "--attributes", "--tolerance=verypermissive", "-n", nocheck, "--format=brief"} args = append(args, strings.Split(device, " ")...) - cmd := sudo(usesudo, smartctl, args...) - out, e := internal.CombinedOutputTimeout(cmd, time.Second*5) + out, e := runCmd(usesudo, smartctl, args...) outStr := string(out) // Ignore all exit statuses except if it is a command line parse error exitStatus, er := exitStatus(e) if er != nil { - acc.AddError(fmt.Errorf("failed to run command %s: %s - %s", strings.Join(cmd.Args, " "), e, outStr)) + acc.AddError(fmt.Errorf("failed to run command '%s %s': %s - %s", smartctl, strings.Join(args, " "), e, outStr)) return } - device_tags := map[string]string{} - device_node := strings.Split(device, " ")[0] - device_tags["device"] = path.Base(device_node) - device_fields := make(map[string]interface{}) - device_fields["exit_status"] = exitStatus + deviceTags := map[string]string{} + deviceNode := strings.Split(device, " ")[0] + deviceTags["device"] = path.Base(deviceNode) + deviceFields := make(map[string]interface{}) + deviceFields["exit_status"] = exitStatus + + log.Printf("D! [inputs.smart] gatherDisk '%s'", deviceNode) scanner := bufio.NewScanner(strings.NewReader(outStr)) for scanner.Scan() { line := scanner.Text() - model := modelInInfo.FindStringSubmatch(line) - if len(model) > 1 { - device_tags["model"] = model[1] + model := modelInfo.FindStringSubmatch(line) + if len(model) > 2 { + deviceTags["model"] = model[2] } - serial := serialInInfo.FindStringSubmatch(line) + serial := serialInfo.FindStringSubmatch(line) if len(serial) > 1 { - device_tags["serial_no"] = serial[1] + deviceTags["serial_no"] = serial[1] } - wwn := wwnInInfo.FindStringSubmatch(line) + wwn := wwnInfo.FindStringSubmatch(line) if len(wwn) > 1 { - device_tags["wwn"] = strings.Replace(wwn[1], " ", "", -1) + deviceTags["wwn"] = strings.Replace(wwn[1], " ", "", -1) } - capacity := usercapacityInInfo.FindStringSubmatch(line) + capacity := usercapacityInfo.FindStringSubmatch(line) if len(capacity) > 1 { - device_tags["capacity"] = strings.Replace(capacity[1], ",", "", -1) + deviceTags["capacity"] = strings.Replace(capacity[1], ",", "", -1) } - enabled := smartEnabledInInfo.FindStringSubmatch(line) + enabled := smartEnabledInfo.FindStringSubmatch(line) if len(enabled) > 1 { - device_tags["enabled"] = enabled[1] + deviceTags["enabled"] = enabled[1] } health := smartOverallHealth.FindStringSubmatch(line) - if len(health) > 1 { - device_fields["health_ok"] = (health[1] == "PASSED") + if len(health) > 2 { + deviceFields["health_ok"] = (health[2] == "PASSED" || health[2] == "OK") } + tags := map[string]string{} + fields := make(map[string]interface{}) + attr := attribute.FindStringSubmatch(line) - if len(attr) > 1 { + if collectAttributes { + deviceNode := strings.Split(device, " ")[0] + tags["device"] = path.Base(deviceNode) - if attributes { - tags := map[string]string{} - fields := make(map[string]interface{}) - - device_node := strings.Split(device, " ")[0] - tags["device"] = path.Base(device_node) - - if serial, ok := device_tags["serial_no"]; ok { + if serial, ok := deviceTags["serial_no"]; ok { tags["serial_no"] = serial } - if wwn, ok := device_tags["wwn"]; ok { + if wwn, ok := deviceTags["wwn"]; ok { tags["wwn"] = wwn } tags["id"] = attr[1] @@ -282,16 +295,95 @@ func gatherDisk(acc telegraf.Accumulator, usesudo, attributes bool, smartctl, no // save the raw value to a field. if field, ok := deviceFieldIds[attr[1]]; ok { if val, err := parseRawValue(attr[8]); err == nil { - device_fields[field] = val + deviceFields[field] = val + } + } + } else { + if collectAttributes { + if startStop := sasStartStopAttr.FindStringSubmatch(line); len(startStop) > 1 { + tags["id"] = "4" + tags["name"] = "Start_Stop_Count" + i, err := strconv.ParseInt(strings.Replace(startStop[1], ",", "", -1), 10, 64) + if err != nil { + continue + } + fields["raw_value"] = i + + acc.AddFields("smart_attribute", fields, tags) + continue + } + + if powerCycle := nvmePowerCycleAttr.FindStringSubmatch(line); len(powerCycle) > 1 { + tags["id"] = "12" + tags["name"] = "Power_Cycle_Count" + i, err := strconv.ParseInt(strings.Replace(powerCycle[1], ",", "", -1), 10, 64) + if err != nil { + continue + } + fields["raw_value"] = i + + acc.AddFields("smart_attribute", fields, tags) + continue + } + + if powerOn := nvmePowerOnAttr.FindStringSubmatch(line); len(powerOn) > 1 { + tags["id"] = "9" + tags["name"] = "Power_On_Hours" + i, err := strconv.ParseInt(strings.Replace(powerOn[1], ",", "", -1), 10, 64) + if err != nil { + continue + } + fields["raw_value"] = i + + acc.AddFields("smart_attribute", fields, tags) + continue + } + + if loadCycle := sasLoadCycleAttr.FindStringSubmatch(line); len(loadCycle) > 1 { + tags["id"] = "193" + tags["name"] = "Load_Cycle_Count" + i, err := strconv.ParseInt(strings.Replace(loadCycle[1], ",", "", -1), 10, 64) + if err != nil { + continue + } + fields["raw_value"] = i + + acc.AddFields("smart_attribute", fields, tags) + continue + } + + if temp := sasTempAttr.FindStringSubmatch(line); len(temp) > 1 { + tags["id"] = "194" + tags["name"] = "Temperature_Celsius" + tempC, err := strconv.ParseInt(temp[1], 10, 64) + if err != nil { + continue + } + fields["raw_value"] = tempC + deviceFields["temp_c"] = tempC + + acc.AddFields("smart_attribute", fields, tags) + } + + if temp := nvmeTempAttr.FindStringSubmatch(line); len(temp) > 1 { + tags["id"] = "194" + tags["name"] = "Temperature_Celsius" + tempC, err := strconv.ParseInt(temp[1], 10, 64) + if err != nil { + continue + } + fields["raw_value"] = tempC + deviceFields["temp_c"] = tempC + + acc.AddFields("smart_attribute", fields, tags) } } } } - acc.AddFields("smart_device", device_fields, device_tags) + acc.AddFields("smart_device", deviceFields, deviceTags) } func parseRawValue(rawVal string) (int64, error) { - // Integer if i, err := strconv.ParseInt(rawVal, 10, 64); err == nil { return i, nil diff --git a/plugins/inputs/smart/smart_test.go b/plugins/inputs/smart/smart_test.go index da658f5f9..525d99e3b 100644 --- a/plugins/inputs/smart/smart_test.go +++ b/plugins/inputs/smart/smart_test.go @@ -1,9 +1,8 @@ package smart import ( - "fmt" - "os" - "os/exec" + "errors" + "sync" "testing" "github.com/influxdata/telegraf/testutil" @@ -11,66 +10,24 @@ import ( "github.com/stretchr/testify/require" ) -var ( - mockScanData = `/dev/ada0 -d atacam # /dev/ada0, ATA device -` - mockInfoAttributeData = `smartctl 6.5 2016-05-07 r4318 [Darwin 16.4.0 x86_64] (local build) -Copyright (C) 2002-16, Bruce Allen, Christian Franke, www.smartmontools.org - -CHECK POWER MODE not implemented, ignoring -n option -=== START OF INFORMATION SECTION === -Model Family: Apple SD/SM/TS...E/F SSDs -Device Model: APPLE SSD SM256E -Serial Number: S0X5NZBC422720 -LU WWN Device Id: 5 002538 043584d30 -Firmware Version: CXM09A1Q -User Capacity: 251,000,193,024 bytes [251 GB] -Sector Sizes: 512 bytes logical, 4096 bytes physical -Rotation Rate: Solid State Device -Device is: In smartctl database [for details use: -P show] -ATA Version is: ATA8-ACS T13/1699-D revision 4c -SATA Version is: SATA 3.0, 6.0 Gb/s (current: 6.0 Gb/s) -Local Time is: Thu Feb 9 16:48:45 2017 CET -SMART support is: Available - device has SMART capability. -SMART support is: Enabled - -=== START OF READ SMART DATA SECTION === -SMART overall-health self-assessment test result: PASSED - -=== START OF READ SMART DATA SECTION === -SMART Attributes Data Structure revision number: 1 -Vendor Specific SMART Attributes with Thresholds: -ID# ATTRIBUTE_NAME FLAGS VALUE WORST THRESH FAIL RAW_VALUE - 1 Raw_Read_Error_Rate -O-RC- 200 200 000 - 0 - 5 Reallocated_Sector_Ct PO--CK 100 100 000 - 0 - 9 Power_On_Hours -O--CK 099 099 000 - 2988 - 12 Power_Cycle_Count -O--CK 085 085 000 - 14879 -169 Unknown_Attribute PO--C- 253 253 010 - 2044932921600 -173 Wear_Leveling_Count -O--CK 185 185 100 - 957808640337 -190 Airflow_Temperature_Cel -O---K 055 040 045 Past 45 (Min/Max 43/57 #2689) -192 Power-Off_Retract_Count -O--C- 097 097 000 - 14716 -194 Temperature_Celsius -O---K 066 021 000 - 34 (Min/Max 14/79) -197 Current_Pending_Sector -O---K 100 100 000 - 0 -199 UDMA_CRC_Error_Count -O-RC- 200 200 000 - 0 -240 Head_Flying_Hours ------ 100 253 000 - 6585h+55m+23.234s - ||||||_ K auto-keep - |||||__ C event count - ||||___ R error rate - |||____ S speed/performance - ||_____ O updated online - |______ P prefailure warning -` -) - func TestGatherAttributes(t *testing.T) { s := &Smart{ Path: "smartctl", Attributes: true, } - // overwriting exec commands with mock commands - execCommand = fakeExecCommand var acc testutil.Accumulator + runCmd = func(sudo bool, command string, args ...string) ([]byte, error) { + if len(args) > 0 { + if args[0] == "--scan" { + return []byte(mockScanData), nil + } else if args[0] == "--info" { + return []byte(mockInfoAttributeData), nil + } + } + return nil, errors.New("command not found") + } + err := s.Gather(&acc) require.NoError(t, err) @@ -302,8 +259,6 @@ func TestGatherAttributes(t *testing.T) { acc.AssertContainsTaggedFields(t, "smart_attribute", test.fields, test.tags) } - // tags = map[string]string{} - var testsAda0Device = []struct { fields map[string]interface{} tags map[string]string @@ -330,7 +285,6 @@ func TestGatherAttributes(t *testing.T) { for _, test := range testsAda0Device { acc.AssertContainsTaggedFields(t, "smart_device", test.fields, test.tags) } - } func TestGatherNoAttributes(t *testing.T) { @@ -339,7 +293,6 @@ func TestGatherNoAttributes(t *testing.T) { Attributes: false, } // overwriting exec commands with mock commands - execCommand = fakeExecCommand var acc testutil.Accumulator err := s.Gather(&acc) @@ -348,8 +301,6 @@ func TestGatherNoAttributes(t *testing.T) { assert.Equal(t, 5, acc.NFields(), "Wrong number of fields gathered") acc.AssertDoesNotContainMeasurement(t, "smart_attribute") - // tags = map[string]string{} - var testsAda0Device = []struct { fields map[string]interface{} tags map[string]string @@ -376,51 +327,574 @@ func TestGatherNoAttributes(t *testing.T) { for _, test := range testsAda0Device { acc.AssertContainsTaggedFields(t, "smart_device", test.fields, test.tags) } - } func TestExcludedDev(t *testing.T) { assert.Equal(t, true, excludedDev([]string{"/dev/pass6"}, "/dev/pass6 -d atacam"), "Should be excluded.") assert.Equal(t, false, excludedDev([]string{}, "/dev/pass6 -d atacam"), "Shouldn't be excluded.") assert.Equal(t, false, excludedDev([]string{"/dev/pass6"}, "/dev/pass1 -d atacam"), "Shouldn't be excluded.") - } -// fackeExecCommand is a helper function that mock -// the exec.Command call (and call the test binary) -func fakeExecCommand(command string, args ...string) *exec.Cmd { - cs := []string{"-test.run=TestHelperProcess", "--", command} - cs = append(cs, args...) - cmd := exec.Command(os.Args[0], cs...) - cmd.Env = []string{"GO_WANT_HELPER_PROCESS=1"} - return cmd -} - -// TestHelperProcess isn't a real test. It's used to mock exec.Command -// For example, if you run: -// GO_WANT_HELPER_PROCESS=1 go test -test.run=TestHelperProcess -- --scan -// it returns below mockScanData. -func TestHelperProcess(t *testing.T) { - if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" { - return +func TestGatherSATAInfo(t *testing.T) { + runCmd = func(sudo bool, command string, args ...string) ([]byte, error) { + return []byte(hgstSATAInfoData), nil } - args := os.Args + var ( + acc = &testutil.Accumulator{} + wg = &sync.WaitGroup{} + ) - // Previous arguments are tests stuff, that looks like : - // /tmp/go-build970079519/…/_test/integration.test -test.run=TestHelperProcess -- - cmd, arg1, args := args[3], args[4], args[5:] - - if cmd == "smartctl" { - if arg1 == "--scan" { - fmt.Fprint(os.Stdout, mockScanData) - } - if arg1 == "--info" { - fmt.Fprint(os.Stdout, mockInfoAttributeData) - } - } else { - fmt.Fprint(os.Stdout, "command not found") - os.Exit(1) - } - os.Exit(0) + wg.Add(1) + gatherDisk(acc, true, true, "", "", "", wg) + assert.Equal(t, 101, acc.NFields(), "Wrong number of fields gathered") + assert.Equal(t, uint64(20), acc.NMetrics(), "Wrong number of metrics gathered") } + +func TestGatherSATAInfo65(t *testing.T) { + runCmd = func(sudo bool, command string, args ...string) ([]byte, error) { + return []byte(hgstSATAInfoData65), nil + } + + var ( + acc = &testutil.Accumulator{} + wg = &sync.WaitGroup{} + ) + + wg.Add(1) + gatherDisk(acc, true, true, "", "", "", wg) + assert.Equal(t, 91, acc.NFields(), "Wrong number of fields gathered") + assert.Equal(t, uint64(18), acc.NMetrics(), "Wrong number of metrics gathered") +} + +func TestGatherHgstSAS(t *testing.T) { + runCmd = func(sudo bool, command string, args ...string) ([]byte, error) { + return []byte(hgstSASInfoData), nil + } + + var ( + acc = &testutil.Accumulator{} + wg = &sync.WaitGroup{} + ) + + wg.Add(1) + gatherDisk(acc, true, true, "", "", "", wg) + assert.Equal(t, 6, acc.NFields(), "Wrong number of fields gathered") + assert.Equal(t, uint64(4), acc.NMetrics(), "Wrong number of metrics gathered") +} + +func TestGatherHtSAS(t *testing.T) { + runCmd = func(sudo bool, command string, args ...string) ([]byte, error) { + return []byte(htSASInfoData), nil + } + + var ( + acc = &testutil.Accumulator{} + wg = &sync.WaitGroup{} + ) + + wg.Add(1) + gatherDisk(acc, true, true, "", "", "", wg) + assert.Equal(t, 5, acc.NFields(), "Wrong number of fields gathered") + assert.Equal(t, uint64(3), acc.NMetrics(), "Wrong number of metrics gathered") +} + +func TestGatherSSD(t *testing.T) { + runCmd = func(sudo bool, command string, args ...string) ([]byte, error) { + return []byte(ssdInfoData), nil + } + + var ( + acc = &testutil.Accumulator{} + wg = &sync.WaitGroup{} + ) + + wg.Add(1) + gatherDisk(acc, true, true, "", "", "", wg) + assert.Equal(t, 105, acc.NFields(), "Wrong number of fields gathered") + assert.Equal(t, uint64(26), acc.NMetrics(), "Wrong number of metrics gathered") +} + +func TestGatherSSDRaid(t *testing.T) { + runCmd = func(sudo bool, command string, args ...string) ([]byte, error) { + return []byte(ssdRaidInfoData), nil + } + + var ( + acc = &testutil.Accumulator{} + wg = &sync.WaitGroup{} + ) + + wg.Add(1) + gatherDisk(acc, true, true, "", "", "", wg) + assert.Equal(t, 74, acc.NFields(), "Wrong number of fields gathered") + assert.Equal(t, uint64(15), acc.NMetrics(), "Wrong number of metrics gathered") +} + +func TestGatherNvme(t *testing.T) { + runCmd = func(sudo bool, command string, args ...string) ([]byte, error) { + return []byte(nvmeInfoData), nil + } + + var ( + acc = &testutil.Accumulator{} + wg = &sync.WaitGroup{} + ) + + wg.Add(1) + gatherDisk(acc, true, true, "", "", "", wg) + assert.Equal(t, 6, acc.NFields(), "Wrong number of fields gathered") + assert.Equal(t, uint64(4), acc.NMetrics(), "Wrong number of metrics gathered") +} + +// smartctl output +var ( + // smartctl --scan + mockScanData = `/dev/ada0 -d atacam # /dev/ada0, ATA device +` + // smartctl --info --health --attributes --tolerance=verypermissive -n standby --format=brief [DEVICE] + mockInfoAttributeData = `smartctl 6.5 2016-05-07 r4318 [Darwin 16.4.0 x86_64] (local build) +Copyright (C) 2002-16, Bruce Allen, Christian Franke, www.smartmontools.org + +CHECK POWER MODE not implemented, ignoring -n option +=== START OF INFORMATION SECTION === +Model Family: Apple SD/SM/TS...E/F SSDs +Device Model: APPLE SSD SM256E +Serial Number: S0X5NZBC422720 +LU WWN Device Id: 5 002538 043584d30 +Firmware Version: CXM09A1Q +User Capacity: 251,000,193,024 bytes [251 GB] +Sector Sizes: 512 bytes logical, 4096 bytes physical +Rotation Rate: Solid State Device +Device is: In smartctl database [for details use: -P show] +ATA Version is: ATA8-ACS T13/1699-D revision 4c +SATA Version is: SATA 3.0, 6.0 Gb/s (current: 6.0 Gb/s) +Local Time is: Thu Feb 9 16:48:45 2017 CET +SMART support is: Available - device has SMART capability. +SMART support is: Enabled + +=== START OF READ SMART DATA SECTION === +SMART overall-health self-assessment test result: PASSED + +=== START OF READ SMART DATA SECTION === +SMART Attributes Data Structure revision number: 1 +Vendor Specific SMART Attributes with Thresholds: +ID# ATTRIBUTE_NAME FLAGS VALUE WORST THRESH FAIL RAW_VALUE + 1 Raw_Read_Error_Rate -O-RC- 200 200 000 - 0 + 5 Reallocated_Sector_Ct PO--CK 100 100 000 - 0 + 9 Power_On_Hours -O--CK 099 099 000 - 2988 + 12 Power_Cycle_Count -O--CK 085 085 000 - 14879 +169 Unknown_Attribute PO--C- 253 253 010 - 2044932921600 +173 Wear_Leveling_Count -O--CK 185 185 100 - 957808640337 +190 Airflow_Temperature_Cel -O---K 055 040 045 Past 45 (Min/Max 43/57 #2689) +192 Power-Off_Retract_Count -O--C- 097 097 000 - 14716 +194 Temperature_Celsius -O---K 066 021 000 - 34 (Min/Max 14/79) +197 Current_Pending_Sector -O---K 100 100 000 - 0 +199 UDMA_CRC_Error_Count -O-RC- 200 200 000 - 0 +240 Head_Flying_Hours ------ 100 253 000 - 6585h+55m+23.234s + ||||||_ K auto-keep + |||||__ C event count + ||||___ R error rate + |||____ S speed/performance + ||_____ O updated online + |______ P prefailure warning +` + + htSASInfoData = `smartctl 6.6 2016-05-31 r4324 [x86_64-linux-4.15.18-12-pve] (local build) +Copyright (C) 2002-16, Bruce Allen, Christian Franke, www.smar$montools.org + +=== START OF INFORMATION SECTION === +Vendor: HITACHI +Product: HUC103030CSS600 +Revision: J350 +Compliance: SPC-4 +User Capacity: 300,$00,000,000 bytes [300 GB] +Logical block size: 512 bytes +Rotation Rate: 10020 rpm +Form Factor: 2.5 inches +Logical Unit id: 0x5000cca00a4bdbc8 +Serial number: PDWAR9GE +Devicetype: disk +Transport protocol: SAS (SPL-3) +Local Time is: Wed Apr 17 15:01:28 2019 PDT +SMART support is: Available - device has SMART capability. +SMART support is: Enabled +Temp$rature Warning: Disabled or Not Supported + +=== START OF READ SMART DATA SECTION === +SMART Health Status: OK + +Current Drive Temperature: 36 C +Drive Trip Temperature: 85 C + +Manufactured in $eek 52 of year 2009 +Specified cycle count over device lifetime: 50000 +Accumulated start-stop cycles: 47 +Elements in grown defect list: 0 + +Vendor (Seagate) cache information + Blocks sent to initiator= 7270983270400000 +` + + hgstSASInfoData = `smartctl 6.6 2016-05-31 r4324 [x86_64-linux-4.15.0-46-generic] (local build) +Copyright (C) 2002-16, Bruce Allen, Christian Franke, www.smartmontools.org + +=== START OF INFORMATION SECTION === +Vendor: HGST +Product: HUH721212AL5204 +Revision: C3Q1 +Compliance: SPC-4 +User Capacity: 12,000,138,625,024 bytes [12.0 TB] +Logical block size: 512 bytes +Physical block size: 4096 bytes +LU is fully provisioned +Rotation Rate: 7200 rpm +Form Factor: 3.5 inches +Logical Unit id: 0x5000cca27076bfe8 +Serial number: 8HJ39K3H +Device type: disk +Transport protocol: SAS (SPL-3) +Local Time is: Thu Apr 18 13:25:03 2019 MSK +SMART support is: Available - device has SMART capability. +SMART support is: Enabled +Temperature Warning: Enabled + +=== START OF READ SMART DATA SECTION === +SMART Health Status: OK + +Current Drive Temperature: 34 C +Drive Trip Temperature: 85 C + +Manufactured in week 35 of year 2018 +Specified cycle count over device lifetime: 50000 +Accumulated start-stop cycles: 7 +Specified load-unload count over device lifetime: 600000 +Accumulated load-unload cycles: 39 +Elements in grown defect list: 0 + +Vendor (Seagate) cache information + Blocks sent to initiator = 544135446528 +` + + hgstSATAInfoData = `smartctl 6.6 2016-05-31 r4324 [x86_64-linux-4.15.0-46-generic] (local build) +Copyright (C) 2002-16, Bruce Allen, Christian Franke, www.smartmontools.org + +=== START OF INFORMATION SECTION === +Model Family: Hitachi/HGST Travelstar Z7K500 +Device Model: HGST HTE725050A7E630 +Serial Number: RCE50G20G81S9S +LU WWN Device Id: 5 000cca 90bc3a98b +Firmware Version: GS2OA3E0 +User Capacity: 500,107,862,016 bytes [500 GB] +Sector Sizes: 512 bytes logical, 4096 bytes physical +Rotation Rate: 7200 rpm +Form Factor: 2.5 inches +Device is: In smartctl database [for details use: -P show] +ATA Version is: ATA8-ACS T13/1699-D revision 6 +SATA Version is: SATA 2.6, 6.0 Gb/s (current: 6.0 Gb/s) +Local Time is: Thu Apr 18 13:27:51 2019 MSK +SMART support is: Available - device has SMART capability. +SMART support is: Enabled +Power mode is: ACTIVE or IDLE + +=== START OF READ SMART DATA SECTION === +SMART overall-health self-assessment test result: PASSED + +SMART Attributes Data Structure revision number: 16 +Vendor Specific SMART Attributes with Thresholds: +ID# ATTRIBUTE_NAME FLAGS VALUE WORST THRESH FAIL RAW_VALUE + 1 Raw_Read_Error_Rate PO-R-- 100 100 062 - 0 + 2 Throughput_Performance P-S--- 100 100 040 - 0 + 3 Spin_Up_Time POS--- 100 100 033 - 1 + 4 Start_Stop_Count -O--C- 100 100 000 - 4 + 5 Reallocated_Sector_Ct PO--CK 100 100 005 - 0 + 7 Seek_Error_Rate PO-R-- 100 100 067 - 0 + 8 Seek_Time_Performance P-S--- 100 100 040 - 0 + 9 Power_On_Hours -O--C- 099 099 000 - 743 + 10 Spin_Retry_Count PO--C- 100 100 060 - 0 + 12 Power_Cycle_Count -O--CK 100 100 000 - 4 +191 G-Sense_Error_Rate -O-R-- 100 100 000 - 0 +192 Power-Off_Retract_Count -O--CK 100 100 000 - 2 +193 Load_Cycle_Count -O--C- 100 100 000 - 13 +194 Temperature_Celsius -O---- 250 250 000 - 24 (Min/Max 15/29) +196 Reallocated_Event_Count -O--CK 100 100 000 - 0 +197 Current_Pending_Sector -O---K 100 100 000 - 0 +198 Offline_Uncorrectable ---R-- 100 100 000 - 0 +199 UDMA_CRC_Error_Count -O-R-- 200 200 000 - 0 +223 Load_Retry_Count -O-R-- 100 100 000 - 0 + ||||||_ K auto-keep + |||||__ C event count + ||||___ R error rate + |||____ S speed/performance + ||_____ O updated online + |______ P prefailure warning +` + + hgstSATAInfoData65 = `smartctl 6.5 2016-01-24 r4214 [x86_64-linux-4.4.0-145-generic] (local build) +Copyright (C) 2002-16, Bruce Allen, Christian Franke, www.smartmontools.org + +=== START OF INFORMATION SECTION === +Model Family: HGST Deskstar NAS +Device Model: HGST HDN724040ALE640 +Serial Number: PK1334PEK49SBS +LU WWN Device Id: 5 000cca 250ec3c9c +Firmware Version: MJAOA5E0 +User Capacity: 4,000,787,030,016 bytes [4.00 TB] +Sector Sizes: 512 bytes logical, 4096 bytes physical +Rotation Rate: 7200 rpm +Form Factor: 3.5 inches +Device is: In smartctl database [for details use: -P show] +ATA Version is: ATA8-ACS T13/1699-D revision 4 +SATA Version is: SATA 3.0, 6.0 Gb/s (current: 6.0 Gb/s) +Local Time is: Wed Apr 17 15:14:27 2019 PDT +SMART support is: Available - device has SMART capability. +SMART support is: Enabled +Power mode is: ACTIVE or IDLE + +=== START OF READ SMART DATA SECTION === +SMART overall-health self-assessment test result: PASSED + +SMART Attributes Data Structure revision number: 16 +Vendor Specific SMART Attributes with Thresholds: +ID# ATTRIBUTE_NAME FLAGS VALUE WORST THRESH FAIL RAW_VALUE + 1 Raw_Read_Error_Rate PO-R-- 100 100 016 - 0 + 2 Throughput_Performance P-S--- 135 135 054 - 84 + 3 Spin_Up_Time POS--- 125 125 024 - 621 (Average 619) + 4 Start_Stop_Count -O--C- 100 100 000 - 33 + 5 Reallocated_Sector_Ct PO--CK 100 100 005 - 0 + 7 Seek_Error_Rate PO-R-- 100 100 067 - 0 + 8 Seek_Time_Performance P-S--- 119 119 020 - 35 + 9 Power_On_Hours -O--C- 098 098 000 - 19371 + 10 Spin_Retry_Count PO--C- 100 100 060 - 0 + 12 Power_Cycle_Count -O--CK 100 100 000 - 33 +192 Power-Off_Retract_Count -O--CK 100 100 000 - 764 +193 Load_Cycle_Count -O--C- 100 100 000 - 764 +194 Temperature_Celsius -O---- 176 176 000 - 34 (Min/Max 21/53) +196 Reallocated_Event_Count -O--CK 100 100 000 - 0 +197 Current_Pending_Sector -O---K 100 100 000 - 0 +198 Offline_Uncorrectable ---R-- 100 100 000 - 0 +199 UDMA_CRC_Error_Count -O-R-- 200 200 000 - 0 + ||||||_ K auto-keep + |||||__ C event count + ||||___ R error rate + |||____ S speed/performance + ||_____ O updated online + |______ P prefailure warning +` + + ssdInfoData = `smartctl 6.6 2016-05-31 r4324 [x86_64-linux-4.15.0-33-generic] (local build) +Copyright (C) 2002-16, Bruce Allen, Christian Franke, www.smartmontools.org + +=== START OF INFORMATION SECTION === +Device Model: SanDisk Ultra II 240GB +Serial Number: XXXXXXXX +LU WWN Device Id: XXXXXXXX +Firmware Version: XXXXXXX +User Capacity: 240.057.409.536 bytes [240 GB] +Sector Size: 512 bytes logical/physical +Rotation Rate: Solid State Device +Form Factor: 2.5 inches +Device is: Not in smartctl database [for details use: -P showall] +ATA Version is: ACS-2 T13/2015-D revision 3 +SATA Version is: SATA 3.2, 6.0 Gb/s (current: 6.0 Gb/s) +Local Time is: Mon Sep 17 13:22:19 2018 CEST +SMART support is: Available - device has SMART capability. +SMART support is: Enabled +Power mode is: ACTIVE or IDLE + +=== START OF READ SMART DATA SECTION === +SMART overall-health self-assessment test result: PASSED + +SMART Attributes Data Structure revision number: 4 +Vendor Specific SMART Attributes with Thresholds: +ID# ATTRIBUTE_NAME FLAGS VALUE WORST THRESH FAIL RAW_VALUE + 5 Reallocated_Sector_Ct -O--CK 100 100 --- - 0 + 9 Power_On_Hours -O--CK 100 100 --- - 6383 + 12 Power_Cycle_Count -O--CK 100 100 --- - 19 +165 Unknown_Attribute -O--CK 100 100 --- - 59310806 +166 Unknown_Attribute -O--CK 100 100 --- - 1 +167 Unknown_Attribute -O--CK 100 100 --- - 57 +168 Unknown_Attribute -O--CK 100 100 --- - 43 +169 Unknown_Attribute -O--CK 100 100 --- - 221 +170 Unknown_Attribute -O--CK 100 100 --- - 0 +171 Unknown_Attribute -O--CK 100 100 --- - 0 +172 Unknown_Attribute -O--CK 100 100 --- - 0 +173 Unknown_Attribute -O--CK 100 100 --- - 13 +174 Unknown_Attribute -O--CK 100 100 --- - 4 +184 End-to-End_Error -O--CK 100 100 --- - 0 +187 Reported_Uncorrect -O--CK 100 100 --- - 0 +188 Command_Timeout -O--CK 100 100 --- - 0 +194 Temperature_Celsius -O---K 066 065 --- - 34 (Min/Max 19/65) +199 UDMA_CRC_Error_Count -O--CK 100 100 --- - 0 +230 Unknown_SSD_Attribute -O--CK 100 100 --- - 2229110374919 +232 Available_Reservd_Space PO--CK 100 100 004 - 100 +233 Media_Wearout_Indicator -O--CK 100 100 --- - 3129 +234 Unknown_Attribute -O--CK 100 100 --- - 7444 +241 Total_LBAs_Written ----CK 253 253 --- - 4812 +242 Total_LBAs_Read ----CK 253 253 --- - 671 +244 Unknown_Attribute -O--CK 000 100 --- - 0 + ||||||_ K auto-keep + |||||__ C event count + ||||___ R error rate + |||____ S speed/performance + ||_____ O updated online + |______ P prefailure warning +` + ssdRaidInfoData = `smartctl 6.6 2017-11-05 r4594 [FreeBSD 11.1-RELEASE-p13 amd64] (local build) +Copyright (C) 2002-17, Bruce Allen, Christian Franke, www.smartmontools.org + +CHECK POWER MODE: incomplete response, ATA output registers missing +CHECK POWER MODE not implemented, ignoring -n option +=== START OF INFORMATION SECTION === +Model Family: Samsung based SSDs +Device Model: Samsung SSD 850 PRO 256GB +Serial Number: S251NX0H869353L +LU WWN Device Id: 5 002538 84027f72f +Firmware Version: EXM02B6Q +User Capacity: 256 060 514 304 bytes [256 GB] +Sector Size: 512 bytes logical/physical +Rotation Rate: Solid State Device +Device is: In smartctl database [for details use: -P show] +ATA Version is: ACS-2, ATA8-ACS T13/1699-D revision 4c +SATA Version is: SATA 3.1, 6.0 Gb/s (current: 6.0 Gb/s) +Local Time is: Fri Sep 21 17:49:16 2018 CEST +SMART support is: Available - device has SMART capability. +SMART support is: Enabled + +=== START OF READ SMART DATA SECTION === +SMART Status not supported: Incomplete response, ATA output registers missing +SMART overall-health self-assessment test result: PASSED +Warning: This result is based on an Attribute check. + +General SMART Values: +Offline data collection status: (0x00) Offline data collection activity + was never started. + Auto Offline Data Collection: Disabled. +Self-test execution status: ( 0) The previous self-test routine completed + without error or no self-test has ever + been run. +Total time to complete Offline +data collection: ( 0) seconds. +Offline data collection +capabilities: (0x53) SMART execute Offline immediate. + Auto Offline data collection on/off support. + Suspend Offline collection upon new + command. + No Offline surface scan supported. + Self-test supported. + No Conveyance Self-test supported. + Selective Self-test supported. +SMART capabilities: (0x0003) Saves SMART data before entering + power-saving mode. + Supports SMART auto save timer. +Error logging capability: (0x01) Error logging supported. + General Purpose Logging supported. +Short self-test routine +recommended polling time: ( 2) minutes. +Extended self-test routine +recommended polling time: ( 136) minutes. +SCT capabilities: (0x003d) SCT Status supported. + SCT Error Recovery Control supported. + SCT Feature Control supported. + SCT Data Table supported. + +SMART Attributes Data Structure revision number: 1 +Vendor Specific SMART Attributes with Thresholds: +ID# ATTRIBUTE_NAME FLAGS VALUE WORST THRESH FAIL RAW_VALUE + 5 Reallocated_Sector_Ct PO--CK 099 099 010 - 1 + 9 Power_On_Hours -O--CK 094 094 000 - 26732 + 12 Power_Cycle_Count -O--CK 099 099 000 - 51 +177 Wear_Leveling_Count PO--C- 001 001 000 - 7282 +179 Used_Rsvd_Blk_Cnt_Tot PO--C- 099 099 010 - 1 +181 Program_Fail_Cnt_Total -O--CK 100 100 010 - 0 +182 Erase_Fail_Count_Total -O--CK 099 099 010 - 1 +183 Runtime_Bad_Block PO--C- 099 099 010 - 1 +187 Uncorrectable_Error_Cnt -O--CK 100 100 000 - 0 +190 Airflow_Temperature_Cel -O--CK 081 069 000 - 19 +195 ECC_Error_Rate -O-RC- 200 200 000 - 0 +199 CRC_Error_Count -OSRCK 100 100 000 - 0 +235 POR_Recovery_Count -O--C- 099 099 000 - 50 +241 Total_LBAs_Written -O--CK 099 099 000 - 61956393677 + ||||||_ K auto-keep + |||||__ C event count + ||||___ R error rate + |||____ S speed/performance + ||_____ O updated online + |______ P prefailure warning + +SMART Error Log Version: 1 +No Errors Logged + +SMART Self-test log structure revision number 1 +Num Test_Description Status Remaining LifeTime(hours) LBA_of_first_error +# 1 Short offline Completed without error 00% 26717 - +# 2 Short offline Completed without error 00% 26693 - +# 3 Short offline Completed without error 00% 26669 - +# 4 Short offline Completed without error 00% 26645 - +# 5 Short offline Completed without error 00% 26621 - +# 6 Short offline Completed without error 00% 26596 - +# 7 Extended offline Completed without error 00% 26574 - +# 8 Short offline Completed without error 00% 26572 - +# 9 Short offline Completed without error 00% 26548 - +#10 Short offline Completed without error 00% 26524 - +#11 Short offline Completed without error 00% 26500 - +#12 Short offline Completed without error 00% 26476 - +#13 Short offline Completed without error 00% 26452 - +#14 Short offline Completed without error 00% 26428 - +#15 Extended offline Completed without error 00% 26406 - +#16 Short offline Completed without error 00% 26404 - +#17 Short offline Completed without error 00% 26380 - +#18 Short offline Completed without error 00% 26356 - +#19 Short offline Completed without error 00% 26332 - +#20 Short offline Completed without error 00% 26308 - + +SMART Selective self-test log data structure revision number 1 + SPAN MIN_LBA MAX_LBA CURRENT_TEST_STATUS + 1 0 0 Not_testing + 2 0 0 Not_testing + 3 0 0 Not_testing + 4 0 0 Not_testing + 5 0 0 Not_testing +Selective self-test flags (0x0): + After scanning selected spans, do NOT read-scan remainder of disk. +If Selective self-test is pending on power-up, resume after 0 minute delay. +` + + nvmeInfoData = `smartctl 6.5 2016-05-07 r4318 [x86_64-linux-4.1.27-gvt-yocto-standard] (local build) +Copyright (C) 2002-16, Bruce Allen, Christian Franke, www.smartmontools.org + +=== START OF INFORMATION SECTION === +Model Number: TS128GMTE850 +Serial Number: D704940282? +Firmware Version: C2.3.13 +PCI Vendor/Subsystem ID: 0x126f +IEEE OUI Identifier: 0x000000 +Controller ID: 1 +Number of Namespaces: 1 +Namespace 1 Size/Capacity: 128,035,676,160 [128 GB] +Namespace 1 Formatted LBA Size: 512 +Local Time is: Fri Jun 15 11:41:35 2018 UTC + +=== START OF SMART DATA SECTION === +SMART overall-health self-assessment test result: PASSED + +SMART/Health Information (NVMe Log 0x02, NSID 0xffffffff) +Critical Warning: 0x00 +Temperature: 38 Celsius +Available Spare: 100% +Available Spare Threshold: 10% +Percentage Used: 16% +Data Units Read: 11,836,935 [6.06 TB] +Data Units Written: 62,288,091 [31.8 TB] +Host Read Commands: 135,924,188 +Host Write Commands: 7,715,573,429 +Controller Busy Time: 4,042 +Power Cycles: 472 +Power On Hours: 6,038 +Unsafe Shutdowns: 355 +Media and Data Integrity Errors: 0 +Error Information Log Entries: 119,699 +Warning Comp. Temperature Time: 0 +Critical Comp. Temperature Time: 0 +` +) From a1513e6235f6376e86742a5a2d25fb8f93f1dc1d Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 7 May 2019 15:22:10 -0700 Subject: [PATCH 0824/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index cffd537c4..ee65d69e5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -35,6 +35,7 @@ - [#5585](https://github.com/influxdata/telegraf/pull/5585): Add cli support for outputting sections of the config. - [#5770](https://github.com/influxdata/telegraf/pull/5770): Add service-display-name option for use with Windows service. - [#5778](https://github.com/influxdata/telegraf/pull/5778): Add support for log rotation. +- [#5765](https://github.com/influxdata/telegraf/pull/5765): Support more drive types in smart input. #### Bugfixes From b22bf01fdfd83dd7e44bdd1a907a256a46afbe7d Mon Sep 17 00:00:00 2001 From: Alirie Gray Date: Tue, 7 May 2019 15:42:44 -0700 Subject: [PATCH 0825/1815] Add support for hex values to ipmi_sensor input (#5816) --- plugins/inputs/ipmi_sensor/ipmi.go | 14 ++- plugins/inputs/ipmi_sensor/ipmi_test.go | 108 ++++++++++++++++++++++++ 2 files changed, 120 insertions(+), 2 deletions(-) diff --git a/plugins/inputs/ipmi_sensor/ipmi.go b/plugins/inputs/ipmi_sensor/ipmi.go index e4832cc65..2ec51525b 100644 --- a/plugins/inputs/ipmi_sensor/ipmi.go +++ b/plugins/inputs/ipmi_sensor/ipmi.go @@ -150,9 +150,19 @@ func parseV1(acc telegraf.Accumulator, hostname string, cmdOut []byte, measured_ fields["status"] = 0 } - if strings.Index(ipmiFields["description"], " ") > 0 { + description := ipmiFields["description"] + + // handle hex description field + if strings.HasPrefix(description, "0x") { + descriptionInt, err := strconv.ParseInt(description, 0, 64) + if err != nil { + continue + } + + fields["value"] = float64(descriptionInt) + } else if strings.Index(description, " ") > 0 { // split middle column into value and unit - valunit := strings.SplitN(ipmiFields["description"], " ", 2) + valunit := strings.SplitN(description, " ", 2) var err error fields["value"], err = aToFloat(valunit[0]) if err != nil { diff --git a/plugins/inputs/ipmi_sensor/ipmi_test.go b/plugins/inputs/ipmi_sensor/ipmi_test.go index a66cabfeb..9d448435d 100644 --- a/plugins/inputs/ipmi_sensor/ipmi_test.go +++ b/plugins/inputs/ipmi_sensor/ipmi_test.go @@ -610,3 +610,111 @@ Power Supply 1 | 03h | ok | 10.1 | 110 Watts, Presence detected extractFieldsFromRegex(re_v2_parse_line, tests[i]) } } + +func Test_parseV1(t *testing.T) { + type args struct { + hostname string + cmdOut []byte + measuredAt time.Time + } + tests := []struct { + name string + args args + wantFields map[string]interface{} + wantErr bool + }{ + { + name: "Test correct V1 parsing with hex code", + args: args{ + hostname: "host", + measuredAt: time.Now(), + cmdOut: []byte("PS1 Status | 0x02 | ok"), + }, + wantFields: map[string]interface{}{"value": float64(2), "status": 1}, + wantErr: false, + }, + { + name: "Test correct V1 parsing with value with unit", + args: args{ + hostname: "host", + measuredAt: time.Now(), + cmdOut: []byte("Avg Power | 210 Watts | ok"), + }, + wantFields: map[string]interface{}{"value": float64(210), "status": 1}, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var acc testutil.Accumulator + + if err := parseV1(&acc, tt.args.hostname, tt.args.cmdOut, tt.args.measuredAt); (err != nil) != tt.wantErr { + t.Errorf("parseV1() error = %v, wantErr %v", err, tt.wantErr) + } + + acc.AssertContainsFields(t, "ipmi_sensor", tt.wantFields) + }) + } +} + +func Test_parseV2(t *testing.T) { + type args struct { + hostname string + cmdOut []byte + measuredAt time.Time + } + tests := []struct { + name string + args args + wantFields map[string]interface{} + wantTags map[string]string + wantErr bool + }{ + { + name: "Test correct V2 parsing with analog value with unit", + args: args{ + hostname: "host", + cmdOut: []byte("Power Supply 1 | 03h | ok | 10.1 | 110 Watts, Presence detected"), + measuredAt: time.Now(), + }, + wantFields: map[string]interface{}{"value": float64(110)}, + wantTags: map[string]string{ + "name": "power_supply_1", + "status_code": "ok", + "server": "host", + "entity_id": "10.1", + "unit": "watts", + "status_desc": "presence_detected", + }, + wantErr: false, + }, + { + name: "Test correct V2 parsing without analog value", + args: args{ + hostname: "host", + cmdOut: []byte("Intrusion | 73h | ok | 7.1 |"), + measuredAt: time.Now(), + }, + wantFields: map[string]interface{}{"value": float64(0)}, + wantTags: map[string]string{ + "name": "intrusion", + "status_code": "ok", + "server": "host", + "entity_id": "7.1", + "status_desc": "ok", + }, + wantErr: false, + }, + } + for _, tt := range tests { + var acc testutil.Accumulator + + t.Run(tt.name, func(t *testing.T) { + if err := parseV2(&acc, tt.args.hostname, tt.args.cmdOut, tt.args.measuredAt); (err != nil) != tt.wantErr { + t.Errorf("parseV2() error = %v, wantErr %v", err, tt.wantErr) + } + }) + + acc.AssertContainsTaggedFields(t, "ipmi_sensor", tt.wantFields, tt.wantTags) + } +} From dfb1387771c75f9feac79bfbe66a7ff711097e52 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 7 May 2019 15:43:41 -0700 Subject: [PATCH 0826/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index ee65d69e5..585b503fa 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -59,6 +59,7 @@ - [#5803](https://github.com/influxdata/telegraf/issues/5803): Fix http output cannot set Host header. - [#5619](https://github.com/influxdata/telegraf/issues/5619): Fix interval estimation in vsphere input. - [#5782](https://github.com/influxdata/telegraf/pull/5782): Skip lines with missing refid in ntpq input. +- [#5755](https://github.com/influxdata/telegraf/issues/5755): Add support for hex values to ipmi_sensor input. ## v1.10.3 [2019-04-16] From 761705c29902821239bcd8b8d601c5c7281fd8e1 Mon Sep 17 00:00:00 2001 From: javicrespo Date: Wed, 8 May 2019 20:21:51 +0200 Subject: [PATCH 0827/1815] Add syslog output plugin (#5802) --- .../inputs => internal}/syslog/framing.go | 0 .../syslog/framing_test.go | 0 plugins/inputs/syslog/commons_test.go | 3 +- plugins/inputs/syslog/nontransparent_test.go | 5 +- plugins/inputs/syslog/octetcounting_test.go | 5 +- plugins/inputs/syslog/syslog.go | 7 +- plugins/outputs/all/all.go | 1 + plugins/outputs/syslog/README.md | 101 ++++++++ plugins/outputs/syslog/syslog.go | 245 ++++++++++++++++++ plugins/outputs/syslog/syslog_mapper.go | 199 ++++++++++++++ plugins/outputs/syslog/syslog_mapper_test.go | 200 ++++++++++++++ plugins/outputs/syslog/syslog_test.go | 205 +++++++++++++++ 12 files changed, 963 insertions(+), 8 deletions(-) rename {plugins/inputs => internal}/syslog/framing.go (100%) rename {plugins/inputs => internal}/syslog/framing_test.go (100%) create mode 100644 plugins/outputs/syslog/README.md create mode 100644 plugins/outputs/syslog/syslog.go create mode 100644 plugins/outputs/syslog/syslog_mapper.go create mode 100644 plugins/outputs/syslog/syslog_mapper_test.go create mode 100644 plugins/outputs/syslog/syslog_test.go diff --git a/plugins/inputs/syslog/framing.go b/internal/syslog/framing.go similarity index 100% rename from plugins/inputs/syslog/framing.go rename to internal/syslog/framing.go diff --git a/plugins/inputs/syslog/framing_test.go b/internal/syslog/framing_test.go similarity index 100% rename from plugins/inputs/syslog/framing_test.go rename to internal/syslog/framing_test.go diff --git a/plugins/inputs/syslog/commons_test.go b/plugins/inputs/syslog/commons_test.go index f55d080a1..5d5562fc7 100644 --- a/plugins/inputs/syslog/commons_test.go +++ b/plugins/inputs/syslog/commons_test.go @@ -2,6 +2,7 @@ package syslog import ( "github.com/influxdata/telegraf/internal" + framing "github.com/influxdata/telegraf/internal/syslog" "github.com/influxdata/telegraf/testutil" "time" ) @@ -37,7 +38,7 @@ func newUDPSyslogReceiver(address string, bestEffort bool) *Syslog { } } -func newTCPSyslogReceiver(address string, keepAlive *internal.Duration, maxConn int, bestEffort bool, f Framing) *Syslog { +func newTCPSyslogReceiver(address string, keepAlive *internal.Duration, maxConn int, bestEffort bool, f framing.Framing) *Syslog { d := &internal.Duration{ Duration: defaultReadTimeout, } diff --git a/plugins/inputs/syslog/nontransparent_test.go b/plugins/inputs/syslog/nontransparent_test.go index 1dea84144..2bf6aa4ef 100644 --- a/plugins/inputs/syslog/nontransparent_test.go +++ b/plugins/inputs/syslog/nontransparent_test.go @@ -11,6 +11,7 @@ import ( "github.com/google/go-cmp/cmp" "github.com/influxdata/telegraf/internal" + framing "github.com/influxdata/telegraf/internal/syslog" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) @@ -138,7 +139,7 @@ func testStrictNonTransparent(t *testing.T, protocol string, address string, wan for _, tc := range getTestCasesForNonTransparent() { t.Run(tc.name, func(t *testing.T) { // Creation of a strict mode receiver - receiver := newTCPSyslogReceiver(protocol+"://"+address, keepAlive, 0, false, NonTransparent) + receiver := newTCPSyslogReceiver(protocol+"://"+address, keepAlive, 0, false, framing.NonTransparent) require.NotNil(t, receiver) if wantTLS { receiver.ServerConfig = *pki.TLSServerConfig() @@ -200,7 +201,7 @@ func testBestEffortNonTransparent(t *testing.T, protocol string, address string, for _, tc := range getTestCasesForNonTransparent() { t.Run(tc.name, func(t *testing.T) { // Creation of a best effort mode receiver - receiver := newTCPSyslogReceiver(protocol+"://"+address, keepAlive, 0, true, NonTransparent) + receiver := newTCPSyslogReceiver(protocol+"://"+address, keepAlive, 0, true, framing.NonTransparent) require.NotNil(t, receiver) if wantTLS { receiver.ServerConfig = *pki.TLSServerConfig() diff --git a/plugins/inputs/syslog/octetcounting_test.go b/plugins/inputs/syslog/octetcounting_test.go index c61805131..4f8f2d278 100644 --- a/plugins/inputs/syslog/octetcounting_test.go +++ b/plugins/inputs/syslog/octetcounting_test.go @@ -12,6 +12,7 @@ import ( "github.com/google/go-cmp/cmp" "github.com/influxdata/telegraf/internal" + framing "github.com/influxdata/telegraf/internal/syslog" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) @@ -338,7 +339,7 @@ func testStrictOctetCounting(t *testing.T, protocol string, address string, want for _, tc := range getTestCasesForOctetCounting() { t.Run(tc.name, func(t *testing.T) { // Creation of a strict mode receiver - receiver := newTCPSyslogReceiver(protocol+"://"+address, keepAlive, 0, false, OctetCounting) + receiver := newTCPSyslogReceiver(protocol+"://"+address, keepAlive, 0, false, framing.OctetCounting) require.NotNil(t, receiver) if wantTLS { receiver.ServerConfig = *pki.TLSServerConfig() @@ -400,7 +401,7 @@ func testBestEffortOctetCounting(t *testing.T, protocol string, address string, for _, tc := range getTestCasesForOctetCounting() { t.Run(tc.name, func(t *testing.T) { // Creation of a best effort mode receiver - receiver := newTCPSyslogReceiver(protocol+"://"+address, keepAlive, 0, true, OctetCounting) + receiver := newTCPSyslogReceiver(protocol+"://"+address, keepAlive, 0, true, framing.OctetCounting) require.NotNil(t, receiver) if wantTLS { receiver.ServerConfig = *pki.TLSServerConfig() diff --git a/plugins/inputs/syslog/syslog.go b/plugins/inputs/syslog/syslog.go index 51d2ee455..e1e918759 100644 --- a/plugins/inputs/syslog/syslog.go +++ b/plugins/inputs/syslog/syslog.go @@ -18,6 +18,7 @@ import ( "github.com/influxdata/go-syslog/rfc5424" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" + framing "github.com/influxdata/telegraf/internal/syslog" tlsConfig "github.com/influxdata/telegraf/internal/tls" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -32,7 +33,7 @@ type Syslog struct { KeepAlivePeriod *internal.Duration MaxConnections int ReadTimeout *internal.Duration - Framing Framing + Framing framing.Framing Trailer nontransparent.TrailerType BestEffort bool Separator string `toml:"sdparam_separator"` @@ -313,7 +314,7 @@ func (s *Syslog) handle(conn net.Conn, acc telegraf.Accumulator) { } // Select the parser to use depeding on transport framing - if s.Framing == OctetCounting { + if s.Framing == framing.OctetCounting { // Octet counting transparent framing p = octetcounting.NewParser(opts...) } else { @@ -445,7 +446,7 @@ func init() { ReadTimeout: &internal.Duration{ Duration: defaultReadTimeout, }, - Framing: OctetCounting, + Framing: framing.OctetCounting, Trailer: nontransparent.LF, Separator: "_", } diff --git a/plugins/outputs/all/all.go b/plugins/outputs/all/all.go index a5d2a44da..c29d05efb 100644 --- a/plugins/outputs/all/all.go +++ b/plugins/outputs/all/all.go @@ -30,5 +30,6 @@ import ( _ "github.com/influxdata/telegraf/plugins/outputs/riemann_legacy" _ "github.com/influxdata/telegraf/plugins/outputs/socket_writer" _ "github.com/influxdata/telegraf/plugins/outputs/stackdriver" + _ "github.com/influxdata/telegraf/plugins/outputs/syslog" _ "github.com/influxdata/telegraf/plugins/outputs/wavefront" ) diff --git a/plugins/outputs/syslog/README.md b/plugins/outputs/syslog/README.md new file mode 100644 index 000000000..8655cbd6a --- /dev/null +++ b/plugins/outputs/syslog/README.md @@ -0,0 +1,101 @@ +# Syslog Output Plugin + +The syslog output plugin sends syslog messages transmitted over +[UDP](https://tools.ietf.org/html/rfc5426) or +[TCP](https://tools.ietf.org/html/rfc6587) or +[TLS](https://tools.ietf.org/html/rfc5425), with or without the octet counting framing. + +Syslog messages are formatted according to +[RFC 5424](https://tools.ietf.org/html/rfc5424). + +### Configuration + +```toml +[[outputs.syslog]] + ## URL to connect to + ## ex: address = "tcp://127.0.0.1:8094" + ## ex: address = "tcp4://127.0.0.1:8094" + ## ex: address = "tcp6://127.0.0.1:8094" + ## ex: address = "tcp6://[2001:db8::1]:8094" + ## ex: address = "udp://127.0.0.1:8094" + ## ex: address = "udp4://127.0.0.1:8094" + ## ex: address = "udp6://127.0.0.1:8094" + address = "tcp://127.0.0.1:8094" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false + + ## Period between keep alive probes. + ## Only applies to TCP sockets. + ## 0 disables keep alive probes. + ## Defaults to the OS configuration. + # keep_alive_period = "5m" + + ## The framing technique with which it is expected that messages are transported (default = "octet-counting"). + ## Whether the messages come using the octect-counting (RFC5425#section-4.3.1, RFC6587#section-3.4.1), + ## or the non-transparent framing technique (RFC6587#section-3.4.2). + ## Must be one of "octect-counting", "non-transparent". + # framing = "octet-counting" + + ## The trailer to be expected in case of non-trasparent framing (default = "LF"). + ## Must be one of "LF", or "NUL". + # trailer = "LF" + + ### SD-PARAMs settings + ### A syslog message can contain multiple parameters and multiple identifiers within structured data section + ### A syslog message can contain multiple structured data sections. + ### For each unrecognised metric tag/field a SD-PARAMS can be created. + ### Example + ### Configuration => + ### sdparam_separator = "_" + ### default_sdid = "default@32473" + ### sdids = ["foo@123", "bar@456"] + ### input => xyzzy,x=y foo@123_value=42,bar@456_value2=84,something_else=1 + ### output (structured data only) => [foo@123 value=42][bar@456 value2=84][default@32473 something_else=1 x=y] + + ## SD-PARAMs separator between the sdid and tag/field key (default = "_") + # sdparam_separator = "_" + + ## Default sdid used for tags/fields that don't contain a prefix defined in the explict sdids setting below + ## If no default is specified, no SD-PARAMs will be used for unrecognised field. + # default_sdid = "default@32473" + + ##List of explicit prefixes to extract from tag/field keys and use as the SDID, if they match (see above example for more details): + # sdids = ["foo@123", "bar@456"] + ### + + ## Default severity value. Severity and Facility are used to calculate the message PRI value (RFC5424#section-6.2.1) + ## Used when no metric field with key "severity_code" is defined. + ## If unset, 5 (notice) is the default + # default_severity_code = 5 + + ## Default facility value. Facility and Severity are used to calculate the message PRI value (RFC5424#section-6.2.1) + ## Used when no metric field with key "facility_code" is defined. + ## If unset, 1 (user-level) is the default + # default_facility_code = 1 + + ## Default APP-NAME value (RFC5424#section-6.2.5) + ## Used when no metric tag with key "appname" is defined. + ## If unset, "Telegraf" is the default + # default_appname = "Telegraf" +``` + +### Metric mapping +The output plugin expects syslog metrics tags and fields to match up with the ones created in the [syslog input plugin](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/syslog#metrics). + +The following table shows the metric tags, field and defaults used to format syslog messages. + +| Syslog field | Metric Tag | Metric Field | Default value | +| --- | --- | --- | --- | +| APP-NAME | appname | - | default_appname = "Telegraf" | +| TIMESTAMP | - | timestamp | Metric's own timestamp | +| VERSION | - | version | 1 | +| PRI | - | serverity_code + (8 * facility_code)| default_severity_code=5 (notice), default_facility_code=1 (user-level)| +| HOSTNAME | hostname OR source OR host | - | os.Hostname() | +| MSGID | - | msgid | Metric name | +| PROCID | - | procid | - | +| MSG | - | msg | - | \ No newline at end of file diff --git a/plugins/outputs/syslog/syslog.go b/plugins/outputs/syslog/syslog.go new file mode 100644 index 000000000..684806b85 --- /dev/null +++ b/plugins/outputs/syslog/syslog.go @@ -0,0 +1,245 @@ +package syslog + +import ( + "crypto/tls" + "fmt" + "log" + "net" + "strconv" + "strings" + + "github.com/influxdata/go-syslog/nontransparent" + "github.com/influxdata/go-syslog/rfc5424" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + framing "github.com/influxdata/telegraf/internal/syslog" + tlsint "github.com/influxdata/telegraf/internal/tls" + "github.com/influxdata/telegraf/plugins/outputs" +) + +type Syslog struct { + Address string + KeepAlivePeriod *internal.Duration + DefaultSdid string + DefaultSeverityCode uint8 + DefaultFacilityCode uint8 + DefaultAppname string + Sdids []string + Separator string `toml:"sdparam_separator"` + Framing framing.Framing + Trailer nontransparent.TrailerType + net.Conn + tlsint.ClientConfig + mapper *SyslogMapper +} + +var sampleConfig = ` + ## URL to connect to + ## ex: address = "tcp://127.0.0.1:8094" + ## ex: address = "tcp4://127.0.0.1:8094" + ## ex: address = "tcp6://127.0.0.1:8094" + ## ex: address = "tcp6://[2001:db8::1]:8094" + ## ex: address = "udp://127.0.0.1:8094" + ## ex: address = "udp4://127.0.0.1:8094" + ## ex: address = "udp6://127.0.0.1:8094" + address = "tcp://127.0.0.1:8094" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false + + ## Period between keep alive probes. + ## Only applies to TCP sockets. + ## 0 disables keep alive probes. + ## Defaults to the OS configuration. + # keep_alive_period = "5m" + + ## The framing technique with which it is expected that messages are transported (default = "octet-counting"). + ## Whether the messages come using the octect-counting (RFC5425#section-4.3.1, RFC6587#section-3.4.1), + ## or the non-transparent framing technique (RFC6587#section-3.4.2). + ## Must be one of "octect-counting", "non-transparent". + # framing = "octet-counting" + + ## The trailer to be expected in case of non-trasparent framing (default = "LF"). + ## Must be one of "LF", or "NUL". + # trailer = "LF" + + ### SD-PARAMs settings + ### A syslog message can contain multiple parameters and multiple identifiers within structured data section + ### A syslog message can contain multiple structured data sections. + ### For each unrecognised metric tag/field a SD-PARAMS can be created. + ### Example + ### Configuration => + ### sdparam_separator = "_" + ### default_sdid = "default@32473" + ### sdids = ["foo@123", "bar@456"] + ### input => xyzzy,x=y foo@123_value=42,bar@456_value2=84,something_else=1 + ### output (structured data only) => [foo@123 value=42][bar@456 value2=84][default@32473 something_else=1 x=y] + + ## SD-PARAMs separator between the sdid and tag/field key (default = "_") + # sdparam_separator = "_" + + ## Default sdid used for tags/fields that don't contain a prefix defined in the explict sdids setting below + ## If no default is specified, no SD-PARAMs will be used for unrecognised field. + # default_sdid = "default@32473" + + ##List of explicit prefixes to extract from tag/field keys and use as the SDID, if they match (see above example for more details): + # sdids = ["foo@123", "bar@456"] + ### + + ## Default severity value. Severity and Facility are used to calculate the message PRI value (RFC5424#section-6.2.1) + ## Used when no metric field with key "severity_code" is defined. + ## If unset, 5 (notice) is the default + # default_severity_code = 5 + + ## Default facility value. Facility and Severity are used to calculate the message PRI value (RFC5424#section-6.2.1) + ## Used when no metric field with key "facility_code" is defined. + ## If unset, 1 (user-level) is the default + # default_facility_code = 1 + + ## Default APP-NAME value (RFC5424#section-6.2.5) + ## Used when no metric tag with key "appname" is defined. + ## If unset, "Telegraf" is the default + # default_appname = "Telegraf" +` + +func (s *Syslog) Connect() error { + s.initializeSyslogMapper() + + spl := strings.SplitN(s.Address, "://", 2) + if len(spl) != 2 { + return fmt.Errorf("invalid address: %s", s.Address) + } + + tlsCfg, err := s.ClientConfig.TLSConfig() + if err != nil { + return err + } + + var c net.Conn + if tlsCfg == nil { + c, err = net.Dial(spl[0], spl[1]) + } else { + c, err = tls.Dial(spl[0], spl[1], tlsCfg) + } + if err != nil { + return err + } + + if err := s.setKeepAlive(c); err != nil { + log.Printf("unable to configure keep alive (%s): %s", s.Address, err) + } + + s.Conn = c + return nil +} + +func (s *Syslog) setKeepAlive(c net.Conn) error { + if s.KeepAlivePeriod == nil { + return nil + } + tcpc, ok := c.(*net.TCPConn) + if !ok { + return fmt.Errorf("cannot set keep alive on a %s socket", strings.SplitN(s.Address, "://", 2)[0]) + } + if s.KeepAlivePeriod.Duration == 0 { + return tcpc.SetKeepAlive(false) + } + if err := tcpc.SetKeepAlive(true); err != nil { + return err + } + return tcpc.SetKeepAlivePeriod(s.KeepAlivePeriod.Duration) +} + +func (s *Syslog) Close() error { + if s.Conn == nil { + return nil + } + err := s.Conn.Close() + s.Conn = nil + return err +} + +func (s *Syslog) SampleConfig() string { + return sampleConfig +} + +func (s *Syslog) Description() string { + return "Configuration for Syslog server to send metrics to" +} + +func (s *Syslog) Write(metrics []telegraf.Metric) (err error) { + if s.Conn == nil { + // previous write failed with permanent error and socket was closed. + if err = s.Connect(); err != nil { + return err + } + } + for _, metric := range metrics { + var msg *rfc5424.SyslogMessage + if msg, err = s.mapper.MapMetricToSyslogMessage(metric); err != nil { + log.Printf("E! [outputs.syslog] Failed to create syslog message: %v", err) + continue + } + var msgBytesWithFraming []byte + if msgBytesWithFraming, err = s.getSyslogMessageBytesWithFraming(msg); err != nil { + log.Printf("E! [outputs.syslog] Failed to convert syslog message with framing: %v", err) + continue + } + if _, err = s.Conn.Write(msgBytesWithFraming); err != nil { + if netErr, ok := err.(net.Error); !ok || !netErr.Temporary() { + s.Close() + s.Conn = nil + return fmt.Errorf("closing connection: %v", netErr) + } + return err + } + } + return nil +} + +func (s *Syslog) getSyslogMessageBytesWithFraming(msg *rfc5424.SyslogMessage) ([]byte, error) { + var msgString string + var err error + if msgString, err = msg.String(); err != nil { + return nil, err + } + msgBytes := []byte(msgString) + + if s.Framing == framing.OctetCounting { + return append([]byte(strconv.Itoa(len(msgBytes))+" "), msgBytes...), nil + } + // Non-transparent framing + return append(msgBytes, byte(s.Trailer)), nil +} + +func (s *Syslog) initializeSyslogMapper() { + if s.mapper != nil { + return + } + s.mapper = newSyslogMapper() + s.mapper.DefaultFacilityCode = s.DefaultFacilityCode + s.mapper.DefaultSeverityCode = s.DefaultSeverityCode + s.mapper.DefaultAppname = s.DefaultAppname + s.mapper.Separator = s.Separator + s.mapper.DefaultSdid = s.DefaultSdid + s.mapper.Sdids = s.Sdids +} + +func newSyslog() *Syslog { + return &Syslog{ + Framing: framing.OctetCounting, + Trailer: nontransparent.LF, + Separator: "_", + DefaultSeverityCode: uint8(5), // notice + DefaultFacilityCode: uint8(1), // user-level + DefaultAppname: "Telegraf", + } +} + +func init() { + outputs.Add("syslog", func() telegraf.Output { return newSyslog() }) +} diff --git a/plugins/outputs/syslog/syslog_mapper.go b/plugins/outputs/syslog/syslog_mapper.go new file mode 100644 index 000000000..ba6b0d660 --- /dev/null +++ b/plugins/outputs/syslog/syslog_mapper.go @@ -0,0 +1,199 @@ +package syslog + +import ( + "errors" + "math" + "os" + "strconv" + "strings" + "time" + + "github.com/influxdata/go-syslog/rfc5424" + "github.com/influxdata/telegraf" +) + +type SyslogMapper struct { + DefaultSdid string + DefaultSeverityCode uint8 + DefaultFacilityCode uint8 + DefaultAppname string + Sdids []string + Separator string + reservedKeys map[string]bool +} + +// MapMetricToSyslogMessage maps metrics tags/fields to syslog messages +func (sm *SyslogMapper) MapMetricToSyslogMessage(metric telegraf.Metric) (*rfc5424.SyslogMessage, error) { + msg := &rfc5424.SyslogMessage{} + + sm.mapPriority(metric, msg) + sm.mapStructuredData(metric, msg) + sm.mapAppname(metric, msg) + mapHostname(metric, msg) + mapTimestamp(metric, msg) + mapMsgID(metric, msg) + mapVersion(metric, msg) + mapProcID(metric, msg) + mapMsg(metric, msg) + + if !msg.Valid() { + return nil, errors.New("metric could not produce valid syslog message") + } + return msg, nil +} + +func (sm *SyslogMapper) mapStructuredData(metric telegraf.Metric, msg *rfc5424.SyslogMessage) { + for _, tag := range metric.TagList() { + sm.mapStructuredDataItem(tag.Key, tag.Value, msg) + } + for _, field := range metric.FieldList() { + sm.mapStructuredDataItem(field.Key, formatValue(field.Value), msg) + } +} + +func (sm *SyslogMapper) mapStructuredDataItem(key string, value string, msg *rfc5424.SyslogMessage) { + if sm.reservedKeys[key] { + return + } + isExplicitSdid := false + for _, sdid := range sm.Sdids { + k := strings.TrimLeft(key, sdid+sm.Separator) + if len(key) > len(k) { + isExplicitSdid = true + msg.SetParameter(sdid, k, value) + break + } + } + if !isExplicitSdid && len(sm.DefaultSdid) > 0 { + k := strings.TrimPrefix(key, sm.DefaultSdid+sm.Separator) + msg.SetParameter(sm.DefaultSdid, k, value) + } +} + +func (sm *SyslogMapper) mapAppname(metric telegraf.Metric, msg *rfc5424.SyslogMessage) { + if value, ok := metric.GetTag("appname"); ok { + msg.SetAppname(formatValue(value)) + } else { + //Use default appname + msg.SetAppname(sm.DefaultAppname) + } +} + +func mapMsgID(metric telegraf.Metric, msg *rfc5424.SyslogMessage) { + if value, ok := metric.GetField("msgid"); ok { + msg.SetMsgID(formatValue(value)) + } else { + // We default to metric name + msg.SetMsgID(metric.Name()) + } +} + +func mapVersion(metric telegraf.Metric, msg *rfc5424.SyslogMessage) { + if value, ok := metric.GetField("version"); ok { + switch v := value.(type) { + case uint64: + msg.SetVersion(uint16(v)) + return + } + } + msg.SetVersion(1) +} + +func mapMsg(metric telegraf.Metric, msg *rfc5424.SyslogMessage) { + if value, ok := metric.GetField("msg"); ok { + msg.SetMessage(formatValue(value)) + } +} + +func mapProcID(metric telegraf.Metric, msg *rfc5424.SyslogMessage) { + if value, ok := metric.GetField("procid"); ok { + msg.SetProcID(formatValue(value)) + } +} + +func (sm *SyslogMapper) mapPriority(metric telegraf.Metric, msg *rfc5424.SyslogMessage) { + severityCode := sm.DefaultSeverityCode + facilityCode := sm.DefaultFacilityCode + + if value, ok := getFieldCode(metric, "severity_code"); ok { + severityCode = *value + } + + if value, ok := getFieldCode(metric, "facility_code"); ok { + facilityCode = *value + } + + priority := (8 * facilityCode) + severityCode + msg.SetPriority(priority) +} + +func mapHostname(metric telegraf.Metric, msg *rfc5424.SyslogMessage) { + // Try with hostname, then with source, then with host tags, then take OS Hostname + if value, ok := metric.GetTag("hostname"); ok { + msg.SetHostname(formatValue(value)) + } else if value, ok := metric.GetTag("source"); ok { + msg.SetHostname(formatValue(value)) + } else if value, ok := metric.GetTag("host"); ok { + msg.SetHostname(formatValue(value)) + } else if value, err := os.Hostname(); err == nil { + msg.SetHostname(value) + } +} + +func mapTimestamp(metric telegraf.Metric, msg *rfc5424.SyslogMessage) { + timestamp := metric.Time() + if value, ok := metric.GetField("timestamp"); ok { + switch v := value.(type) { + case int64: + timestamp = time.Unix(0, v).UTC() + } + } + msg.SetTimestamp(timestamp.Format(time.RFC3339)) +} + +func formatValue(value interface{}) string { + switch v := value.(type) { + case string: + return v + case bool: + if v { + return "1" + } + return "0" + case uint64: + return strconv.FormatUint(v, 10) + case int64: + return strconv.FormatInt(v, 10) + case float64: + if math.IsNaN(v) { + return "" + } + + if math.IsInf(v, 0) { + return "" + } + return strconv.FormatFloat(v, 'f', -1, 64) + } + + return "" +} + +func getFieldCode(metric telegraf.Metric, fieldKey string) (*uint8, bool) { + if value, ok := metric.GetField(fieldKey); ok { + if v, err := strconv.ParseUint(formatValue(value), 10, 8); err == nil { + r := uint8(v) + return &r, true + } + } + return nil, false +} + +func newSyslogMapper() *SyslogMapper { + return &SyslogMapper{ + reservedKeys: map[string]bool{ + "version": true, "severity_code": true, "facility_code": true, + "procid": true, "msgid": true, "msg": true, "timestamp": true, "sdid": true, + "hostname": true, "source": true, "host": true, "severity": true, + "facility": true, "appname": true}, + } +} diff --git a/plugins/outputs/syslog/syslog_mapper_test.go b/plugins/outputs/syslog/syslog_mapper_test.go new file mode 100644 index 000000000..300d5fcab --- /dev/null +++ b/plugins/outputs/syslog/syslog_mapper_test.go @@ -0,0 +1,200 @@ +package syslog + +import ( + "os" + "testing" + "time" + + "github.com/influxdata/telegraf/metric" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestSyslogMapperWithDefaults(t *testing.T) { + s := newSyslog() + s.initializeSyslogMapper() + + // Init metrics + m1, _ := metric.New( + "testmetric", + map[string]string{}, + map[string]interface{}{}, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + ) + hostname, err := os.Hostname() + assert.NoError(t, err) + syslogMessage, err := s.mapper.MapMetricToSyslogMessage(m1) + require.NoError(t, err) + str, _ := syslogMessage.String() + assert.Equal(t, "<13>1 2010-11-10T23:00:00Z "+hostname+" Telegraf - testmetric -", str, "Wrong syslog message") +} + +func TestSyslogMapperWithHostname(t *testing.T) { + s := newSyslog() + s.initializeSyslogMapper() + + // Init metrics + m1, _ := metric.New( + "testmetric", + map[string]string{ + "hostname": "testhost", + "source": "sourcevalue", + "host": "hostvalue", + }, + map[string]interface{}{}, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + ) + syslogMessage, err := s.mapper.MapMetricToSyslogMessage(m1) + require.NoError(t, err) + str, _ := syslogMessage.String() + assert.Equal(t, "<13>1 2010-11-10T23:00:00Z testhost Telegraf - testmetric -", str, "Wrong syslog message") +} +func TestSyslogMapperWithHostnameSourceFallback(t *testing.T) { + s := newSyslog() + s.initializeSyslogMapper() + + // Init metrics + m1, _ := metric.New( + "testmetric", + map[string]string{ + "source": "sourcevalue", + "host": "hostvalue", + }, + map[string]interface{}{}, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + ) + syslogMessage, err := s.mapper.MapMetricToSyslogMessage(m1) + require.NoError(t, err) + str, _ := syslogMessage.String() + assert.Equal(t, "<13>1 2010-11-10T23:00:00Z sourcevalue Telegraf - testmetric -", str, "Wrong syslog message") +} + +func TestSyslogMapperWithHostnameHostFallback(t *testing.T) { + s := newSyslog() + s.initializeSyslogMapper() + + // Init metrics + m1, _ := metric.New( + "testmetric", + map[string]string{ + "host": "hostvalue", + }, + map[string]interface{}{}, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + ) + syslogMessage, err := s.mapper.MapMetricToSyslogMessage(m1) + require.NoError(t, err) + str, _ := syslogMessage.String() + assert.Equal(t, "<13>1 2010-11-10T23:00:00Z hostvalue Telegraf - testmetric -", str, "Wrong syslog message") +} + +func TestSyslogMapperWithDefaultSdid(t *testing.T) { + s := newSyslog() + s.DefaultSdid = "default@32473" + s.initializeSyslogMapper() + + // Init metrics + m1, _ := metric.New( + "testmetric", + map[string]string{ + "appname": "testapp", + "hostname": "testhost", + "tag1": "bar", + "default@32473_tag2": "foobar", + }, + map[string]interface{}{ + "severity_code": uint64(3), + "facility_code": uint64(3), + "msg": "Test message", + "procid": uint64(25), + "version": uint16(2), + "msgid": int64(555), + "timestamp": time.Date(2010, time.November, 10, 23, 30, 0, 0, time.UTC).UnixNano(), + "value1": int64(2), + "default@32473_value2": "foo", + "value3": float64(1.2), + }, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + ) + + syslogMessage, err := s.mapper.MapMetricToSyslogMessage(m1) + require.NoError(t, err) + str, _ := syslogMessage.String() + assert.Equal(t, "<27>2 2010-11-10T23:30:00Z testhost testapp 25 555 [default@32473 tag1=\"bar\" tag2=\"foobar\" value1=\"2\" value2=\"foo\" value3=\"1.2\"] Test message", str, "Wrong syslog message") +} + +func TestSyslogMapperWithDefaultSdidAndOtherSdids(t *testing.T) { + s := newSyslog() + s.DefaultSdid = "default@32473" + s.Sdids = []string{"bar@123", "foo@456"} + s.initializeSyslogMapper() + + // Init metrics + m1, _ := metric.New( + "testmetric", + map[string]string{ + "appname": "testapp", + "hostname": "testhost", + "tag1": "bar", + "default@32473_tag2": "foobar", + "bar@123_tag3": "barfoobar", + }, + map[string]interface{}{ + "severity_code": uint64(1), + "facility_code": uint64(3), + "msg": "Test message", + "procid": uint64(25), + "version": uint16(2), + "msgid": int64(555), + "timestamp": time.Date(2010, time.November, 10, 23, 30, 0, 0, time.UTC).UnixNano(), + "value1": int64(2), + "default@32473_value2": "default", + "bar@123_value3": int64(2), + "foo@456_value4": "foo", + }, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + ) + + syslogMessage, err := s.mapper.MapMetricToSyslogMessage(m1) + require.NoError(t, err) + str, _ := syslogMessage.String() + assert.Equal(t, "<25>2 2010-11-10T23:30:00Z testhost testapp 25 555 [bar@123 tag3=\"barfoobar\" value3=\"2\"][default@32473 tag1=\"bar\" tag2=\"foobar\" value1=\"2\" value2=\"default\"][foo@456 value4=\"foo\"] Test message", str, "Wrong syslog message") +} + +func TestSyslogMapperWithNoSdids(t *testing.T) { + // Init mapper + s := newSyslog() + s.initializeSyslogMapper() + + // Init metrics + m1, _ := metric.New( + "testmetric", + map[string]string{ + "appname": "testapp", + "hostname": "testhost", + "tag1": "bar", + "default@32473_tag2": "foobar", + "bar@123_tag3": "barfoobar", + "foo@456_tag4": "foobarfoo", + }, + map[string]interface{}{ + "severity_code": uint64(2), + "facility_code": uint64(3), + "msg": "Test message", + "procid": uint64(25), + "version": uint16(2), + "msgid": int64(555), + "timestamp": time.Date(2010, time.November, 10, 23, 30, 0, 0, time.UTC).UnixNano(), + "value1": int64(2), + "default@32473_value2": "default", + "bar@123_value3": int64(2), + "foo@456_value4": "foo", + }, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + ) + + syslogMessage, err := s.mapper.MapMetricToSyslogMessage(m1) + require.NoError(t, err) + str, _ := syslogMessage.String() + assert.Equal(t, "<26>2 2010-11-10T23:30:00Z testhost testapp 25 555 - Test message", str, "Wrong syslog message") +} diff --git a/plugins/outputs/syslog/syslog_test.go b/plugins/outputs/syslog/syslog_test.go new file mode 100644 index 000000000..7581a7b53 --- /dev/null +++ b/plugins/outputs/syslog/syslog_test.go @@ -0,0 +1,205 @@ +package syslog + +import ( + "net" + "sync" + "testing" + "time" + + "github.com/influxdata/telegraf" + framing "github.com/influxdata/telegraf/internal/syslog" + "github.com/influxdata/telegraf/metric" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestGetSyslogMessageWithFramingOctectCounting(t *testing.T) { + // Init plugin + s := newSyslog() + s.initializeSyslogMapper() + + // Init metrics + m1, _ := metric.New( + "testmetric", + map[string]string{ + "hostname": "testhost", + }, + map[string]interface{}{}, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + ) + + syslogMessage, err := s.mapper.MapMetricToSyslogMessage(m1) + require.NoError(t, err) + messageBytesWithFraming, err := s.getSyslogMessageBytesWithFraming(syslogMessage) + require.NoError(t, err) + + assert.Equal(t, "59 <13>1 2010-11-10T23:00:00Z testhost Telegraf - testmetric -", string(messageBytesWithFraming), "Incorrect Octect counting framing") +} + +func TestGetSyslogMessageWithFramingNonTransparent(t *testing.T) { + // Init plugin + s := newSyslog() + s.initializeSyslogMapper() + s.Framing = framing.NonTransparent + + // Init metrics + m1, _ := metric.New( + "testmetric", + map[string]string{ + "hostname": "testhost", + }, + map[string]interface{}{}, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + ) + + syslogMessage, err := s.mapper.MapMetricToSyslogMessage(m1) + require.NoError(t, err) + messageBytesWithFraming, err := s.getSyslogMessageBytesWithFraming(syslogMessage) + require.NoError(t, err) + + assert.Equal(t, "<13>1 2010-11-10T23:00:00Z testhost Telegraf - testmetric -\x00", string(messageBytesWithFraming), "Incorrect Octect counting framing") +} + +func TestSyslogWriteWithTcp(t *testing.T) { + listener, err := net.Listen("tcp", "127.0.0.1:0") + require.NoError(t, err) + + s := newSyslog() + s.Address = "tcp://" + listener.Addr().String() + + err = s.Connect() + require.NoError(t, err) + + lconn, err := listener.Accept() + require.NoError(t, err) + + testSyslogWriteWithStream(t, s, lconn) +} + +func TestSyslogWriteWithUdp(t *testing.T) { + listener, err := net.ListenPacket("udp", "127.0.0.1:0") + require.NoError(t, err) + + s := newSyslog() + s.Address = "udp://" + listener.LocalAddr().String() + + err = s.Connect() + require.NoError(t, err) + + testSyslogWriteWithPacket(t, s, listener) +} + +func testSyslogWriteWithStream(t *testing.T, s *Syslog, lconn net.Conn) { + metrics := []telegraf.Metric{} + m1, _ := metric.New( + "testmetric", + map[string]string{}, + map[string]interface{}{}, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC)) + + metrics = append(metrics, m1) + syslogMessage, err := s.mapper.MapMetricToSyslogMessage(metrics[0]) + require.NoError(t, err) + messageBytesWithFraming, err := s.getSyslogMessageBytesWithFraming(syslogMessage) + require.NoError(t, err) + + err = s.Write(metrics) + require.NoError(t, err) + + buf := make([]byte, 256) + n, err := lconn.Read(buf) + require.NoError(t, err) + assert.Equal(t, string(messageBytesWithFraming), string(buf[:n])) +} + +func testSyslogWriteWithPacket(t *testing.T, s *Syslog, lconn net.PacketConn) { + s.Framing = framing.NonTransparent + metrics := []telegraf.Metric{} + m1, _ := metric.New( + "testmetric", + map[string]string{}, + map[string]interface{}{}, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC)) + + metrics = append(metrics, m1) + syslogMessage, err := s.mapper.MapMetricToSyslogMessage(metrics[0]) + require.NoError(t, err) + messageBytesWithFraming, err := s.getSyslogMessageBytesWithFraming(syslogMessage) + require.NoError(t, err) + + err = s.Write(metrics) + require.NoError(t, err) + + buf := make([]byte, 256) + n, _, err := lconn.ReadFrom(buf) + require.NoError(t, err) + assert.Equal(t, string(messageBytesWithFraming), string(buf[:n])) +} + +func TestSyslogWriteErr(t *testing.T) { + listener, err := net.Listen("tcp", "127.0.0.1:0") + require.NoError(t, err) + + s := newSyslog() + s.Address = "tcp://" + listener.Addr().String() + + err = s.Connect() + require.NoError(t, err) + s.Conn.(*net.TCPConn).SetReadBuffer(256) + + lconn, err := listener.Accept() + require.NoError(t, err) + lconn.(*net.TCPConn).SetWriteBuffer(256) + + metrics := []telegraf.Metric{testutil.TestMetric(1, "testerr")} + + // close the socket to generate an error + lconn.Close() + s.Conn.Close() + err = s.Write(metrics) + require.Error(t, err) + assert.Nil(t, s.Conn) +} + +func TestSyslogWriteReconnect(t *testing.T) { + listener, err := net.Listen("tcp", "127.0.0.1:0") + require.NoError(t, err) + + s := newSyslog() + s.Address = "tcp://" + listener.Addr().String() + + err = s.Connect() + require.NoError(t, err) + s.Conn.(*net.TCPConn).SetReadBuffer(256) + + lconn, err := listener.Accept() + require.NoError(t, err) + lconn.(*net.TCPConn).SetWriteBuffer(256) + lconn.Close() + s.Conn = nil + + wg := sync.WaitGroup{} + wg.Add(1) + var lerr error + go func() { + lconn, lerr = listener.Accept() + wg.Done() + }() + + metrics := []telegraf.Metric{testutil.TestMetric(1, "testerr")} + err = s.Write(metrics) + require.NoError(t, err) + + wg.Wait() + assert.NoError(t, lerr) + + syslogMessage, err := s.mapper.MapMetricToSyslogMessage(metrics[0]) + require.NoError(t, err) + messageBytesWithFraming, err := s.getSyslogMessageBytesWithFraming(syslogMessage) + require.NoError(t, err) + buf := make([]byte, 256) + n, err := lconn.Read(buf) + require.NoError(t, err) + assert.Equal(t, string(messageBytesWithFraming), string(buf[:n])) +} From a0a9da371eb848ebe2e4b02da377da71c469f46b Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 8 May 2019 11:39:03 -0700 Subject: [PATCH 0828/1815] Reformat syslog output documentation --- plugins/inputs/syslog/syslog.go | 2 +- plugins/outputs/syslog/README.md | 65 ++++++++++++++++++-------------- plugins/outputs/syslog/syslog.go | 58 +++++++++++++++------------- 3 files changed, 68 insertions(+), 57 deletions(-) diff --git a/plugins/inputs/syslog/syslog.go b/plugins/inputs/syslog/syslog.go index e1e918759..43d02de5e 100644 --- a/plugins/inputs/syslog/syslog.go +++ b/plugins/inputs/syslog/syslog.go @@ -84,7 +84,7 @@ var sampleConfig = ` ## The framing technique with which it is expected that messages are transported (default = "octet-counting"). ## Whether the messages come using the octect-counting (RFC5425#section-4.3.1, RFC6587#section-3.4.1), ## or the non-transparent framing technique (RFC6587#section-3.4.2). - ## Must be one of "octect-counting", "non-transparent". + ## Must be one of "octet-counting", "non-transparent". # framing = "octet-counting" ## The trailer to be expected in case of non-trasparent framing (default = "LF"). diff --git a/plugins/outputs/syslog/README.md b/plugins/outputs/syslog/README.md index 8655cbd6a..65f038f57 100644 --- a/plugins/outputs/syslog/README.md +++ b/plugins/outputs/syslog/README.md @@ -35,57 +35,62 @@ Syslog messages are formatted according to ## Defaults to the OS configuration. # keep_alive_period = "5m" - ## The framing technique with which it is expected that messages are transported (default = "octet-counting"). - ## Whether the messages come using the octect-counting (RFC5425#section-4.3.1, RFC6587#section-3.4.1), - ## or the non-transparent framing technique (RFC6587#section-3.4.2). - ## Must be one of "octect-counting", "non-transparent". + ## The framing technique with which it is expected that messages are + ## transported (default = "octet-counting"). Whether the messages come + ## using the octect-counting (RFC5425#section-4.3.1, RFC6587#section-3.4.1), + ## or the non-transparent framing technique (RFC6587#section-3.4.2). Must + ## be one of "octet-counting", "non-transparent". # framing = "octet-counting" ## The trailer to be expected in case of non-trasparent framing (default = "LF"). ## Must be one of "LF", or "NUL". # trailer = "LF" - ### SD-PARAMs settings - ### A syslog message can contain multiple parameters and multiple identifiers within structured data section - ### A syslog message can contain multiple structured data sections. - ### For each unrecognised metric tag/field a SD-PARAMS can be created. - ### Example - ### Configuration => - ### sdparam_separator = "_" - ### default_sdid = "default@32473" - ### sdids = ["foo@123", "bar@456"] - ### input => xyzzy,x=y foo@123_value=42,bar@456_value2=84,something_else=1 - ### output (structured data only) => [foo@123 value=42][bar@456 value2=84][default@32473 something_else=1 x=y] + ## SD-PARAMs settings + ## Syslog messages can contain key/value pairs within zero or more + ## structured data sections. For each unrecognised metric tag/field a + ## SD-PARAMS is created. + ## + ## Example: + ## [[outputs.syslog]] + ## sdparam_separator = "_" + ## default_sdid = "default@32473" + ## sdids = ["foo@123", "bar@456"] + ## + ## input => xyzzy,x=y foo@123_value=42,bar@456_value2=84,something_else=1 + ## output (structured data only) => [foo@123 value=42][bar@456 value2=84][default@32473 something_else=1 x=y] - ## SD-PARAMs separator between the sdid and tag/field key (default = "_") + ## SD-PARAMs separator between the sdid and tag/field key (default = "_") # sdparam_separator = "_" - ## Default sdid used for tags/fields that don't contain a prefix defined in the explict sdids setting below - ## If no default is specified, no SD-PARAMs will be used for unrecognised field. + ## Default sdid used for tags/fields that don't contain a prefix defined in + ## the explict sdids setting below If no default is specified, no SD-PARAMs + ## will be used for unrecognised field. # default_sdid = "default@32473" - ##List of explicit prefixes to extract from tag/field keys and use as the SDID, if they match (see above example for more details): + ## List of explicit prefixes to extract from tag/field keys and use as the + ## SDID, if they match (see above example for more details): # sdids = ["foo@123", "bar@456"] - ### - ## Default severity value. Severity and Facility are used to calculate the message PRI value (RFC5424#section-6.2.1) - ## Used when no metric field with key "severity_code" is defined. - ## If unset, 5 (notice) is the default + ## Default severity value. Severity and Facility are used to calculate the + ## message PRI value (RFC5424#section-6.2.1). Used when no metric field + ## with key "severity_code" is defined. If unset, 5 (notice) is the default # default_severity_code = 5 - ## Default facility value. Facility and Severity are used to calculate the message PRI value (RFC5424#section-6.2.1) - ## Used when no metric field with key "facility_code" is defined. - ## If unset, 1 (user-level) is the default + ## Default facility value. Facility and Severity are used to calculate the + ## message PRI value (RFC5424#section-6.2.1). Used when no metric field with + ## key "facility_code" is defined. If unset, 1 (user-level) is the default # default_facility_code = 1 - ## Default APP-NAME value (RFC5424#section-6.2.5) + ## Default APP-NAME value (RFC5424#section-6.2.5) ## Used when no metric tag with key "appname" is defined. ## If unset, "Telegraf" is the default # default_appname = "Telegraf" ``` ### Metric mapping -The output plugin expects syslog metrics tags and fields to match up with the ones created in the [syslog input plugin](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/syslog#metrics). +The output plugin expects syslog metrics tags and fields to match up with the +ones created in the [syslog input][]. The following table shows the metric tags, field and defaults used to format syslog messages. @@ -98,4 +103,6 @@ The following table shows the metric tags, field and defaults used to format sys | HOSTNAME | hostname OR source OR host | - | os.Hostname() | | MSGID | - | msgid | Metric name | | PROCID | - | procid | - | -| MSG | - | msg | - | \ No newline at end of file +| MSG | - | msg | - | + +[syslog input]: /plugins/inputs/syslog#metrics diff --git a/plugins/outputs/syslog/syslog.go b/plugins/outputs/syslog/syslog.go index 684806b85..013db94a1 100644 --- a/plugins/outputs/syslog/syslog.go +++ b/plugins/outputs/syslog/syslog.go @@ -57,50 +57,54 @@ var sampleConfig = ` ## Defaults to the OS configuration. # keep_alive_period = "5m" - ## The framing technique with which it is expected that messages are transported (default = "octet-counting"). - ## Whether the messages come using the octect-counting (RFC5425#section-4.3.1, RFC6587#section-3.4.1), - ## or the non-transparent framing technique (RFC6587#section-3.4.2). - ## Must be one of "octect-counting", "non-transparent". + ## The framing technique with which it is expected that messages are + ## transported (default = "octet-counting"). Whether the messages come + ## using the octect-counting (RFC5425#section-4.3.1, RFC6587#section-3.4.1), + ## or the non-transparent framing technique (RFC6587#section-3.4.2). Must + ## be one of "octet-counting", "non-transparent". # framing = "octet-counting" ## The trailer to be expected in case of non-trasparent framing (default = "LF"). ## Must be one of "LF", or "NUL". # trailer = "LF" - ### SD-PARAMs settings - ### A syslog message can contain multiple parameters and multiple identifiers within structured data section - ### A syslog message can contain multiple structured data sections. - ### For each unrecognised metric tag/field a SD-PARAMS can be created. - ### Example - ### Configuration => - ### sdparam_separator = "_" - ### default_sdid = "default@32473" - ### sdids = ["foo@123", "bar@456"] - ### input => xyzzy,x=y foo@123_value=42,bar@456_value2=84,something_else=1 - ### output (structured data only) => [foo@123 value=42][bar@456 value2=84][default@32473 something_else=1 x=y] + ## SD-PARAMs settings + ## Syslog messages can contain key/value pairs within zero or more + ## structured data sections. For each unrecognised metric tag/field a + ## SD-PARAMS is created. + ## + ## Example: + ## [[outputs.syslog]] + ## sdparam_separator = "_" + ## default_sdid = "default@32473" + ## sdids = ["foo@123", "bar@456"] + ## + ## input => xyzzy,x=y foo@123_value=42,bar@456_value2=84,something_else=1 + ## output (structured data only) => [foo@123 value=42][bar@456 value2=84][default@32473 something_else=1 x=y] - ## SD-PARAMs separator between the sdid and tag/field key (default = "_") + ## SD-PARAMs separator between the sdid and tag/field key (default = "_") # sdparam_separator = "_" - ## Default sdid used for tags/fields that don't contain a prefix defined in the explict sdids setting below - ## If no default is specified, no SD-PARAMs will be used for unrecognised field. + ## Default sdid used for tags/fields that don't contain a prefix defined in + ## the explict sdids setting below If no default is specified, no SD-PARAMs + ## will be used for unrecognised field. # default_sdid = "default@32473" - ##List of explicit prefixes to extract from tag/field keys and use as the SDID, if they match (see above example for more details): + ## List of explicit prefixes to extract from tag/field keys and use as the + ## SDID, if they match (see above example for more details): # sdids = ["foo@123", "bar@456"] - ### - ## Default severity value. Severity and Facility are used to calculate the message PRI value (RFC5424#section-6.2.1) - ## Used when no metric field with key "severity_code" is defined. - ## If unset, 5 (notice) is the default + ## Default severity value. Severity and Facility are used to calculate the + ## message PRI value (RFC5424#section-6.2.1). Used when no metric field + ## with key "severity_code" is defined. If unset, 5 (notice) is the default # default_severity_code = 5 - ## Default facility value. Facility and Severity are used to calculate the message PRI value (RFC5424#section-6.2.1) - ## Used when no metric field with key "facility_code" is defined. - ## If unset, 1 (user-level) is the default + ## Default facility value. Facility and Severity are used to calculate the + ## message PRI value (RFC5424#section-6.2.1). Used when no metric field with + ## key "facility_code" is defined. If unset, 1 (user-level) is the default # default_facility_code = 1 - ## Default APP-NAME value (RFC5424#section-6.2.5) + ## Default APP-NAME value (RFC5424#section-6.2.5) ## Used when no metric tag with key "appname" is defined. ## If unset, "Telegraf" is the default # default_appname = "Telegraf" From 495a5e9f99bc4acb6bc45c00fdcf0726acb98928 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 8 May 2019 11:39:30 -0700 Subject: [PATCH 0829/1815] Update changelog --- CHANGELOG.md | 4 ++++ README.md | 3 ++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 585b503fa..66ae0f579 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,10 @@ - [github](/plugins/inputs/github/README.md) - Contributed by @influxdata - [powerdns_recursor](/plugins/inputs/powerdns_recursor/README.md) - Contributed by @dupondje +#### New Outputs + +- [syslog](/plugins/outputs/syslog/README.md) - Contributed by @javicrespo + #### New Serializers - [wavefront](/plugins/serializers/wavefront/README.md) - Contributed by @puckpuck diff --git a/README.md b/README.md index 4446afd15..42fe91286 100644 --- a/README.md +++ b/README.md @@ -340,7 +340,7 @@ For documentation on the latest development code see the [documentation index][d ## Output Plugins * [influxdb](./plugins/outputs/influxdb) (InfluxDB 1.x) -* [influxdb_v2](./plugins/outputs/influxdb_v2) ([InfluxDB 2.x](https://github.com/influxdata/platform)) +* [influxdb_v2](./plugins/outputs/influxdb_v2) ([InfluxDB 2.x](https://github.com/influxdata/influxdb)) * [amon](./plugins/outputs/amon) * [amqp](./plugins/outputs/amqp) (rabbitmq) * [application_insights](./plugins/outputs/application_insights) @@ -368,6 +368,7 @@ For documentation on the latest development code see the [documentation index][d * [riemann_legacy](./plugins/outputs/riemann_legacy) * [socket_writer](./plugins/outputs/socket_writer) * [stackdriver](./plugins/outputs/stackdriver) +* [syslog](./plugins/outputs/syslog) * [tcp](./plugins/outputs/socket_writer) * [udp](./plugins/outputs/socket_writer) * [wavefront](./plugins/outputs/wavefront) From 9b3523a91b7f868770623b820cb62f43120fc876 Mon Sep 17 00:00:00 2001 From: frizner Date: Fri, 10 May 2019 19:20:37 -0400 Subject: [PATCH 0830/1815] Add support for HTTP basic auth to solr input (#5832) --- plugins/inputs/solr/README.md | 4 ++++ plugins/inputs/solr/solr.go | 19 ++++++++++++++++++- 2 files changed, 22 insertions(+), 1 deletion(-) diff --git a/plugins/inputs/solr/README.md b/plugins/inputs/solr/README.md index 67f4e06ae..458572825 100644 --- a/plugins/inputs/solr/README.md +++ b/plugins/inputs/solr/README.md @@ -16,6 +16,10 @@ Tested from 3.5 to 7.* ## ## specify a list of one or more Solr cores (default - all) # cores = ["main"] + ## + ## Optional HTTP Basic Auth Credentials + # username = "username" + # password = "pa$$word" ``` ### Example output of gathered metrics: diff --git a/plugins/inputs/solr/solr.go b/plugins/inputs/solr/solr.go index 9b5ce9299..a9257c987 100644 --- a/plugins/inputs/solr/solr.go +++ b/plugins/inputs/solr/solr.go @@ -28,12 +28,18 @@ const sampleConfig = ` ## specify a list of one or more Solr cores (default - all) # cores = ["main"] + + ## Optional HTTP Basic Auth Credentials + # username = "username" + # password = "pa$$word" ` // Solr is a plugin to read stats from one or many Solr servers type Solr struct { Local bool Servers []string + Username string + Password string HTTPTimeout internal.Duration Cores []string client *http.Client @@ -471,7 +477,18 @@ func (s *Solr) createHTTPClient() *http.Client { } func (s *Solr) gatherData(url string, v interface{}) error { - r, err := s.client.Get(url) + req, reqErr := http.NewRequest(http.MethodGet, url, nil) + if reqErr != nil { + return reqErr + } + + if s.Username != "" { + req.SetBasicAuth(s.Username, s.Password) + } + + req.Header.Set("User-Agent", "Telegraf/"+internal.Version()) + + r, err := s.client.Do(req) if err != nil { return err } From ecb56f19bf7f6a70fb87fec4d3185218cf5a55e9 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 10 May 2019 16:22:01 -0700 Subject: [PATCH 0831/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 66ae0f579..675b20067 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -40,6 +40,7 @@ - [#5770](https://github.com/influxdata/telegraf/pull/5770): Add service-display-name option for use with Windows service. - [#5778](https://github.com/influxdata/telegraf/pull/5778): Add support for log rotation. - [#5765](https://github.com/influxdata/telegraf/pull/5765): Support more drive types in smart input. +- [#5829](https://github.com/influxdata/telegraf/pull/5829): Add support for HTTP basic auth to solr input. #### Bugfixes From 3e0efdac398201a310b44849f3dac72f1fb9c929 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 13 May 2019 10:40:48 -0700 Subject: [PATCH 0832/1815] Reword note about merging pull requests --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 42fe91286..3f4a70d13 100644 --- a/README.md +++ b/README.md @@ -13,8 +13,8 @@ Telegraf is plugin-driven and has the concept of 4 distinct plugin types: 3. [Aggregator Plugins](#aggregator-plugins) create aggregate metrics (e.g. mean, min, max, quantiles, etc.) 4. [Output Plugins](#output-plugins) write metrics to various destinations -New plugins are designed to be easy to contribute, we'll eagerly accept pull -requests and will manage the set of plugins that Telegraf supports. +New plugins are designed to be easy to contribute, pull requests are welcomed +and we work to incorporate as many pull requests as possible. ## Try in Browser :rocket: From e52f7056bab51a2a0cd43134d5d2b1dbf8e87440 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 14 May 2019 11:29:44 -0700 Subject: [PATCH 0833/1815] Fix parse of unix timestamp with more than ns precision (#5826) --- internal/internal.go | 25 +++++++++++++------------ plugins/parsers/csv/parser.go | 20 ++++++++------------ plugins/parsers/csv/parser_test.go | 28 ++++++++++++++++++++++++++++ 3 files changed, 49 insertions(+), 24 deletions(-) diff --git a/internal/internal.go b/internal/internal.go index 133b19e9b..ebb69db8a 100644 --- a/internal/internal.go +++ b/internal/internal.go @@ -348,25 +348,18 @@ func ParseTimestamp(timestamp interface{}, format string) (time.Time, error) { // format = "unix_ns": epoch is assumed to be in nanoseconds and can come as number or string. Cannot have a decimal part. func ParseTimestampWithLocation(timestamp interface{}, format string, location string) (time.Time, error) { timeInt, timeFractional := int64(0), int64(0) - timeEpochStr, ok := timestamp.(string) - var err error - if !ok { - timeEpochFloat, ok := timestamp.(float64) - if !ok { - return time.Time{}, fmt.Errorf("time: %v could not be converted to string nor float64", timestamp) - } - intPart, frac := math.Modf(timeEpochFloat) - timeInt, timeFractional = int64(intPart), int64(frac*1e9) - } else { - splitted := regexp.MustCompile("[.,]").Split(timeEpochStr, 2) + switch ts := timestamp.(type) { + case string: + var err error + splitted := regexp.MustCompile("[.,]").Split(ts, 2) timeInt, err = strconv.ParseInt(splitted[0], 10, 64) if err != nil { loc, err := time.LoadLocation(location) if err != nil { return time.Time{}, fmt.Errorf("location: %s could not be loaded as a location", location) } - return time.ParseInLocation(format, timeEpochStr, loc) + return time.ParseInLocation(format, ts, loc) } if len(splitted) == 2 { @@ -380,7 +373,15 @@ func ParseTimestampWithLocation(timestamp interface{}, format string, location s return time.Time{}, err } } + case int64: + timeInt = ts + case float64: + intPart, frac := math.Modf(ts) + timeInt, timeFractional = int64(intPart), int64(frac*1e9) + default: + return time.Time{}, fmt.Errorf("time: %v could not be converted to string nor float64", timestamp) } + if strings.EqualFold(format, "unix") { return time.Unix(timeInt, timeFractional).UTC(), nil } else if strings.EqualFold(format, "unix_ms") { diff --git a/plugins/parsers/csv/parser.go b/plugins/parsers/csv/parser.go index f8bf93e70..8fa1dfab1 100644 --- a/plugins/parsers/csv/parser.go +++ b/plugins/parsers/csv/parser.go @@ -225,29 +225,25 @@ outer: // to the format. func parseTimestamp(timeFunc func() time.Time, recordFields map[string]interface{}, timestampColumn, timestampFormat string, -) (metricTime time.Time, err error) { - metricTime = timeFunc() - +) (time.Time, error) { if timestampColumn != "" { if recordFields[timestampColumn] == nil { - err = fmt.Errorf("timestamp column: %v could not be found", timestampColumn) - return + return time.Time{}, fmt.Errorf("timestamp column: %v could not be found", timestampColumn) } - tStr := fmt.Sprintf("%v", recordFields[timestampColumn]) - switch timestampFormat { case "": - err = fmt.Errorf("timestamp format must be specified") - return + return time.Time{}, fmt.Errorf("timestamp format must be specified") default: - metricTime, err = internal.ParseTimestamp(tStr, timestampFormat) + metricTime, err := internal.ParseTimestamp(recordFields[timestampColumn], timestampFormat) if err != nil { - return + return time.Time{}, err } + return metricTime, err } } - return + + return timeFunc(), nil } // SetDefaultTags set the DefaultTags diff --git a/plugins/parsers/csv/parser_test.go b/plugins/parsers/csv/parser_test.go index 93ae6bcdd..6a10c0834 100644 --- a/plugins/parsers/csv/parser_test.go +++ b/plugins/parsers/csv/parser_test.go @@ -5,6 +5,7 @@ import ( "testing" "time" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" @@ -322,3 +323,30 @@ func TestParseStream(t *testing.T) { DefaultTime(), ), metric) } + +func TestTimestampUnixFloatPrecision(t *testing.T) { + p := Parser{ + MetricName: "csv", + ColumnNames: []string{"time", "value"}, + TimestampColumn: "time", + TimestampFormat: "unix", + TimeFunc: DefaultTime, + } + data := `1551129661.95456123352050781250,42` + + expected := []telegraf.Metric{ + testutil.MustMetric( + "csv", + map[string]string{}, + map[string]interface{}{ + "value": 42, + "time": 1551129661.954561233, + }, + time.Unix(1551129661, 954561233), + ), + } + + metrics, err := p.Parse([]byte(data)) + require.NoError(t, err) + testutil.RequireMetricsEqual(t, expected, metrics) +} From 74948edf23fe4df868bdf76687042fb09b6af804 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 14 May 2019 11:30:51 -0700 Subject: [PATCH 0834/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 675b20067..0b428ac26 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -65,6 +65,7 @@ - [#5619](https://github.com/influxdata/telegraf/issues/5619): Fix interval estimation in vsphere input. - [#5782](https://github.com/influxdata/telegraf/pull/5782): Skip lines with missing refid in ntpq input. - [#5755](https://github.com/influxdata/telegraf/issues/5755): Add support for hex values to ipmi_sensor input. +- [#5824](https://github.com/influxdata/telegraf/issues/5824): Fix parse of unix timestamp with more than ns precision. ## v1.10.3 [2019-04-16] From 2d44a88b19826eee3b3de115929dc51262e91a98 Mon Sep 17 00:00:00 2001 From: Greg <2653109+glinton@users.noreply.github.com> Date: Tue, 14 May 2019 12:56:19 -0600 Subject: [PATCH 0835/1815] Restore field name case in interrupts input (#5850) --- plugins/inputs/interrupts/interrupts.go | 2 +- plugins/inputs/interrupts/interrupts_test.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/inputs/interrupts/interrupts.go b/plugins/inputs/interrupts/interrupts.go index 5b0ca374c..39b3020dd 100644 --- a/plugins/inputs/interrupts/interrupts.go +++ b/plugins/inputs/interrupts/interrupts.go @@ -102,7 +102,7 @@ func gatherTagsFields(irq IRQ) (map[string]string, map[string]interface{}) { tags := map[string]string{"irq": irq.ID, "type": irq.Type, "device": irq.Device} fields := map[string]interface{}{"total": irq.Total} for i := 0; i < len(irq.Cpus); i++ { - cpu := fmt.Sprintf("cpu%d", i) + cpu := fmt.Sprintf("CPU%d", i) fields[cpu] = irq.Cpus[i] } return tags, fields diff --git a/plugins/inputs/interrupts/interrupts_test.go b/plugins/inputs/interrupts/interrupts_test.go index 2579d926d..63ff765b6 100644 --- a/plugins/inputs/interrupts/interrupts_test.go +++ b/plugins/inputs/interrupts/interrupts_test.go @@ -23,7 +23,7 @@ func expectCpuAsFields(m *testutil.Accumulator, t *testing.T, measurement string fields := map[string]interface{}{} total := int64(0) for idx, count := range irq.Cpus { - fields[fmt.Sprintf("cpu%d", idx)] = count + fields[fmt.Sprintf("CPU%d", idx)] = count total += count } fields["total"] = total From 44de622f9e89f9a20f5f76b28cd7427fc9a730ef Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 14 May 2019 11:57:33 -0700 Subject: [PATCH 0836/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0b428ac26..c97304dca 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -66,6 +66,7 @@ - [#5782](https://github.com/influxdata/telegraf/pull/5782): Skip lines with missing refid in ntpq input. - [#5755](https://github.com/influxdata/telegraf/issues/5755): Add support for hex values to ipmi_sensor input. - [#5824](https://github.com/influxdata/telegraf/issues/5824): Fix parse of unix timestamp with more than ns precision. +- [#5836](https://github.com/influxdata/telegraf/issues/5836): Restore field name case in interrupts input. ## v1.10.3 [2019-04-16] From 9318d47a385b7bcba510563896bbdb615666016b Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 14 May 2019 12:04:19 -0700 Subject: [PATCH 0837/1815] Set release date for 1.10.4 --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c97304dca..b1e845e4a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -54,7 +54,7 @@ - [#5771](https://github.com/influxdata/telegraf/pull/5771): Fix only one job per storage target reported in lustre2 input. - [#5796](https://github.com/influxdata/telegraf/issues/5796): Set default timeout of 5s in fibaro input. -## v1.10.4 [unreleased] +## v1.10.4 [2019-05-14] #### Bugfixes From 8f3ed4579774bee772b89889f0e7f24c7cc50a68 Mon Sep 17 00:00:00 2001 From: Jorge Landivar Date: Tue, 14 May 2019 18:20:35 -0500 Subject: [PATCH 0838/1815] Add support for datadog events to statsd input (#5791) --- docs/LICENSE_OF_DEPENDENCIES.md | 3 + plugins/inputs/statsd/README.md | 7 + plugins/inputs/statsd/datadog.go | 174 +++++++++ plugins/inputs/statsd/datadog_test.go | 478 +++++++++++++++++++++++++ plugins/inputs/statsd/running_stats.go | 2 +- plugins/inputs/statsd/statsd.go | 143 ++++---- plugins/inputs/statsd/statsd_test.go | 222 ++++++------ 7 files changed, 852 insertions(+), 177 deletions(-) create mode 100644 plugins/inputs/statsd/datadog.go create mode 100644 plugins/inputs/statsd/datadog_test.go diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index 5b6faf4c9..e0531210e 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -135,3 +135,6 @@ following works: - gopkg.in/olivere/elastic.v5 [MIT License](https://github.com/olivere/elastic/blob/v5.0.76/LICENSE) - gopkg.in/tomb.v1 [BSD 3-Clause Clear License](https://github.com/go-tomb/tomb/blob/v1/LICENSE) - gopkg.in/yaml.v2 [Apache License 2.0](https://github.com/go-yaml/yaml/blob/v2.2.2/LICENSE) + +## telegraf used and modified code from these projects +- github.com/DataDog/datadog-agent [Apache License 2.0](https://github.com/DataDog/datadog-agent/LICENSE) \ No newline at end of file diff --git a/plugins/inputs/statsd/README.md b/plugins/inputs/statsd/README.md index c1093bf39..a33480f61 100644 --- a/plugins/inputs/statsd/README.md +++ b/plugins/inputs/statsd/README.md @@ -42,8 +42,14 @@ ## Parses tags in the datadog statsd format ## http://docs.datadoghq.com/guides/dogstatsd/ + ## deprecated in 1.10; use datadog_extensions option instead parse_data_dog_tags = false + ## Parses extensions to statsd in the datadog statsd format + ## currently supports metrics and datadog tags. + ## http://docs.datadoghq.com/guides/dogstatsd/ + datadog_extensions = false + ## Statsd data translation templates, more info can be read here: ## https://github.com/influxdata/telegraf/blob/master/docs/TEMPLATE_PATTERN.md # templates = [ @@ -185,6 +191,7 @@ the accuracy of percentiles but also increases the memory usage and cpu time. - **templates** []string: Templates for transforming statsd buckets into influx measurements and tags. - **parse_data_dog_tags** boolean: Enable parsing of tags in DataDog's dogstatsd format (http://docs.datadoghq.com/guides/dogstatsd/) +- **datadog_extensions** boolean: Enable parsing of DataDog's extensions to dogstatsd format (http://docs.datadoghq.com/guides/dogstatsd/) ### Statsd bucket -> InfluxDB line-protocol Templates diff --git a/plugins/inputs/statsd/datadog.go b/plugins/inputs/statsd/datadog.go new file mode 100644 index 000000000..f2785ff38 --- /dev/null +++ b/plugins/inputs/statsd/datadog.go @@ -0,0 +1,174 @@ +package statsd + +// this is adapted from datadog's apache licensed version at +// https://github.com/DataDog/datadog-agent/blob/fcfc74f106ab1bd6991dfc6a7061c558d934158a/pkg/dogstatsd/parser.go#L173 + +import ( + "errors" + "fmt" + "strconv" + "strings" + "time" +) + +const ( + priorityNormal = "normal" + priorityLow = "low" + + eventInfo = "info" + eventWarning = "warning" + eventError = "error" + eventSuccess = "success" +) + +var uncommenter = strings.NewReplacer("\\n", "\n") + +func (s *Statsd) parseEventMessage(now time.Time, message string, defaultHostname string) error { + // _e{title.length,text.length}:title|text + // [ + // |d:date_happened + // |p:priority + // |h:hostname + // |t:alert_type + // |s:source_type_nam + // |#tag1,tag2 + // ] + // + // + // tag is key:value + messageRaw := strings.SplitN(message, ":", 2) + if len(messageRaw) < 2 || len(messageRaw[0]) < 7 || len(messageRaw[1]) < 3 { + return fmt.Errorf("Invalid message format") + } + header := messageRaw[0] + message = messageRaw[1] + + rawLen := strings.SplitN(header[3:], ",", 2) + if len(rawLen) != 2 { + return fmt.Errorf("Invalid message format") + } + + titleLen, err := strconv.ParseInt(rawLen[0], 10, 64) + if err != nil { + return fmt.Errorf("Invalid message format, could not parse title.length: '%s'", rawLen[0]) + } + if len(rawLen[1]) < 1 { + return fmt.Errorf("Invalid message format, could not parse text.length: '%s'", rawLen[0]) + } + textLen, err := strconv.ParseInt(rawLen[1][:len(rawLen[1])-1], 10, 64) + if err != nil { + return fmt.Errorf("Invalid message format, could not parse text.length: '%s'", rawLen[0]) + } + if titleLen+textLen+1 > int64(len(message)) { + return fmt.Errorf("Invalid message format, title.length and text.length exceed total message length") + } + + rawTitle := message[:titleLen] + rawText := message[titleLen+1 : titleLen+1+textLen] + message = message[titleLen+1+textLen:] + + if len(rawTitle) == 0 || len(rawText) == 0 { + return fmt.Errorf("Invalid event message format: empty 'title' or 'text' field") + } + + name := rawTitle + tags := make(map[string]string, strings.Count(message, ",")+2) // allocate for the approximate number of tags + fields := make(map[string]interface{}, 9) + fields["alert_type"] = eventInfo // default event type + fields["text"] = uncommenter.Replace(string(rawText)) + tags["source"] = defaultHostname // Use source tag because host is reserved tag key in Telegraf. + fields["priority"] = priorityNormal + ts := now + if len(message) < 2 { + s.acc.AddFields(name, fields, tags, ts) + return nil + } + + rawMetadataFields := strings.Split(message[1:], "|") + for i := range rawMetadataFields { + if len(rawMetadataFields[i]) < 2 { + return errors.New("too short metadata field") + } + switch rawMetadataFields[i][:2] { + case "d:": + ts, err := strconv.ParseInt(rawMetadataFields[i][2:], 10, 64) + if err != nil { + continue + } + fields["ts"] = ts + case "p:": + switch rawMetadataFields[i][2:] { + case priorityLow: + fields["priority"] = priorityLow + case priorityNormal: // we already used this as a default + default: + continue + } + case "h:": + tags["source"] = rawMetadataFields[i][2:] + case "t:": + switch rawMetadataFields[i][2:] { + case eventError, eventWarning, eventSuccess, eventInfo: + fields["alert_type"] = rawMetadataFields[i][2:] // already set for info + default: + continue + } + case "k:": + tags["aggregation_key"] = rawMetadataFields[i][2:] + case "s:": + fields["source_type_name"] = rawMetadataFields[i][2:] + default: + if rawMetadataFields[i][0] == '#' { + parseDataDogTags(tags, rawMetadataFields[i][1:]) + } else { + return fmt.Errorf("unknown metadata type: '%s'", rawMetadataFields[i]) + } + } + } + // Use source tag because host is reserved tag key in Telegraf. + // In datadog the host tag and `h:` are interchangable, so we have to chech for the host tag. + if host, ok := tags["host"]; ok { + delete(tags, "host") + tags["source"] = host + } + s.acc.AddFields(name, fields, tags, ts) + return nil +} + +func parseDataDogTags(tags map[string]string, message string) { + start, i := 0, 0 + var k string + var inVal bool // check if we are parsing the value part of the tag + for i = range message { + if message[i] == ',' { + if k == "" { + k = message[start:i] + tags[k] = "true" // this is because influx doesn't support empty tags + start = i + 1 + continue + } + v := message[start:i] + if v == "" { + v = "true" + } + tags[k] = v + start = i + 1 + k, inVal = "", false // reset state vars + } else if message[i] == ':' && !inVal { + k = message[start:i] + start = i + 1 + inVal = true + } + } + if k == "" && start < i+1 { + tags[message[start:i+1]] = "true" + } + // grab the last value + if k != "" { + if start < i+1 { + tags[k] = message[start : i+1] + return + } + tags[k] = "true" + } +} diff --git a/plugins/inputs/statsd/datadog_test.go b/plugins/inputs/statsd/datadog_test.go new file mode 100644 index 000000000..61762a2c4 --- /dev/null +++ b/plugins/inputs/statsd/datadog_test.go @@ -0,0 +1,478 @@ +package statsd + +import ( + "testing" + "time" + + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +func TestEventGather(t *testing.T) { + now := time.Now() + type expected struct { + title string + tags map[string]string + fields map[string]interface{} + } + tests := []struct { + name string + message string + hostname string + now time.Time + err bool + expected expected + }{{ + name: "basic", + message: "_e{10,9}:test title|test text", + hostname: "default-hostname", + now: now, + err: false, + expected: expected{ + title: "test title", + tags: map[string]string{"source": "default-hostname"}, + fields: map[string]interface{}{ + "priority": priorityNormal, + "alert_type": "info", + "text": "test text", + }, + }, + }, + { + name: "escape some stuff", + message: "_e{10,24}:test title|test\\line1\\nline2\\nline3", + hostname: "default-hostname", + now: now.Add(1), + err: false, + expected: expected{ + title: "test title", + tags: map[string]string{"source": "default-hostname"}, + fields: map[string]interface{}{ + "priority": priorityNormal, + "alert_type": "info", + "text": "test\\line1\nline2\nline3", + }, + }, + }, + { + name: "custom time", + message: "_e{10,9}:test title|test text|d:21", + hostname: "default-hostname", + now: now.Add(2), + err: false, + expected: expected{ + title: "test title", + tags: map[string]string{"source": "default-hostname"}, + fields: map[string]interface{}{ + "priority": priorityNormal, + "alert_type": "info", + "text": "test text", + "ts": int64(21), + }, + }, + }, + } + acc := &testutil.Accumulator{} + s := NewTestStatsd() + s.acc = acc + + for i := range tests { + t.Run(tests[i].name, func(t *testing.T) { + err := s.parseEventMessage(tests[i].now, tests[i].message, tests[i].hostname) + if tests[i].err { + require.NotNil(t, err) + } else { + require.Nil(t, err) + } + require.Equal(t, uint64(i+1), acc.NMetrics()) + + require.Nil(t, err) + require.Equal(t, tests[i].expected.title, acc.Metrics[i].Measurement) + require.Equal(t, tests[i].expected.tags, acc.Metrics[i].Tags) + require.Equal(t, tests[i].expected.fields, acc.Metrics[i].Fields) + }) + } +} + +// These tests adapted from tests in +// https://github.com/DataDog/datadog-agent/blob/master/pkg/dogstatsd/parser_test.go +// to ensure compatibility with the datadog-agent parser + +func TestEvents(t *testing.T) { + now := time.Now() + type args struct { + now time.Time + message string + hostname string + } + type expected struct { + title string + text interface{} + now time.Time + ts interface{} + priority string + source string + alertType interface{} + aggregationKey string + sourceTypeName interface{} + checkTags map[string]string + } + + tests := []struct { + name string + args args + expected expected + }{ + { + name: "event minimal", + args: args{ + now: now, + message: "_e{10,9}:test title|test text", + hostname: "default-hostname", + }, + expected: expected{ + title: "test title", + text: "test text", + now: now, + priority: priorityNormal, + source: "default-hostname", + alertType: eventInfo, + aggregationKey: "", + }, + }, + { + name: "event multilines text", + args: args{ + now: now.Add(1), + message: "_e{10,24}:test title|test\\line1\\nline2\\nline3", + hostname: "default-hostname", + }, + expected: expected{ + title: "test title", + text: "test\\line1\nline2\nline3", + now: now.Add(1), + priority: priorityNormal, + source: "default-hostname", + alertType: eventInfo, + aggregationKey: "", + }, + }, + { + name: "event pipe in title", + args: args{ + now: now.Add(2), + message: "_e{10,24}:test|title|test\\line1\\nline2\\nline3", + hostname: "default-hostname", + }, + expected: expected{ + title: "test|title", + text: "test\\line1\nline2\nline3", + now: now.Add(2), + priority: priorityNormal, + source: "default-hostname", + alertType: eventInfo, + aggregationKey: "", + }, + }, + { + name: "event metadata timestamp", + args: args{ + now: now.Add(3), + message: "_e{10,9}:test title|test text|d:21", + hostname: "default-hostname", + }, + expected: expected{ + title: "test title", + text: "test text", + now: now.Add(3), + priority: priorityNormal, + source: "default-hostname", + alertType: eventInfo, + aggregationKey: "", + ts: int64(21), + }, + }, + { + name: "event metadata priority", + args: args{ + now: now.Add(4), + message: "_e{10,9}:test title|test text|p:low", + hostname: "default-hostname", + }, + expected: expected{ + title: "test title", + text: "test text", + now: now.Add(4), + priority: priorityLow, + source: "default-hostname", + alertType: eventInfo, + }, + }, + { + name: "event metadata hostname", + args: args{ + now: now.Add(5), + message: "_e{10,9}:test title|test text|h:localhost", + hostname: "default-hostname", + }, + expected: expected{ + title: "test title", + text: "test text", + now: now.Add(5), + priority: priorityNormal, + source: "localhost", + alertType: eventInfo, + }, + }, + { + name: "event metadata hostname in tag", + args: args{ + now: now.Add(6), + message: "_e{10,9}:test title|test text|#host:localhost", + hostname: "default-hostname", + }, + expected: expected{ + title: "test title", + text: "test text", + now: now.Add(6), + priority: priorityNormal, + source: "localhost", + alertType: eventInfo, + }, + }, + { + name: "event metadata empty host tag", + args: args{ + now: now.Add(7), + message: "_e{10,9}:test title|test text|#host:,other:tag", + hostname: "default-hostname", + }, + expected: expected{ + title: "test title", + text: "test text", + now: now.Add(7), + priority: priorityNormal, + source: "true", + alertType: eventInfo, + checkTags: map[string]string{"other": "tag", "source": "true"}, + }, + }, + { + name: "event metadata alert type", + args: args{ + now: now.Add(8), + message: "_e{10,9}:test title|test text|t:warning", + hostname: "default-hostname", + }, + expected: expected{ + title: "test title", + text: "test text", + now: now.Add(8), + priority: priorityNormal, + source: "default-hostname", + alertType: eventWarning, + }, + }, + { + name: "event metadata aggregation key", + args: args{ + now: now.Add(9), + message: "_e{10,9}:test title|test text|k:some aggregation key", + hostname: "default-hostname", + }, + expected: expected{ + title: "test title", + text: "test text", + now: now.Add(9), + priority: priorityNormal, + source: "default-hostname", + alertType: eventInfo, + aggregationKey: "some aggregation key", + }, + }, + { + name: "event metadata aggregation key", + args: args{ + now: now.Add(10), + message: "_e{10,9}:test title|test text|k:some aggregation key", + hostname: "default-hostname", + }, + expected: expected{ + title: "test title", + text: "test text", + now: now.Add(10), + priority: priorityNormal, + source: "default-hostname", + alertType: eventInfo, + aggregationKey: "some aggregation key", + }, + }, + { + name: "event metadata source type", + args: args{ + now: now.Add(11), + message: "_e{10,9}:test title|test text|s:this is the source", + hostname: "default-hostname", + }, + expected: expected{ + title: "test title", + text: "test text", + now: now.Add(11), + priority: priorityNormal, + source: "default-hostname", + sourceTypeName: "this is the source", + alertType: eventInfo, + }, + }, + { + name: "event metadata source type", + args: args{ + now: now.Add(11), + message: "_e{10,9}:test title|test text|s:this is the source", + hostname: "default-hostname", + }, + expected: expected{ + title: "test title", + text: "test text", + now: now.Add(11), + priority: priorityNormal, + source: "default-hostname", + sourceTypeName: "this is the source", + alertType: eventInfo, + }, + }, + { + name: "event metadata source tags", + args: args{ + now: now.Add(11), + message: "_e{10,9}:test title|test text|#tag1,tag2:test", + hostname: "default-hostname", + }, + expected: expected{ + title: "test title", + text: "test text", + now: now.Add(11), + priority: priorityNormal, + source: "default-hostname", + alertType: eventInfo, + checkTags: map[string]string{"tag1": "true", "tag2": "test", "source": "default-hostname"}, + }, + }, + { + name: "event metadata multiple", + args: args{ + now: now.Add(11), + message: "_e{10,9}:test title|test text|t:warning|d:12345|p:low|h:some.host|k:aggKey|s:source test|#tag1,tag2:test", + hostname: "default-hostname", + }, + expected: expected{ + title: "test title", + text: "test text", + now: now.Add(11), + priority: priorityLow, + source: "some.host", + ts: int64(12345), + alertType: eventWarning, + aggregationKey: "aggKey", + sourceTypeName: "source test", + checkTags: map[string]string{"aggregation_key": "aggKey", "tag1": "true", "tag2": "test", "source": "some.host"}, + }, + }, + } + for i := range tests { + t.Run(tests[i].name, func(t *testing.T) { + s := NewTestStatsd() + acc := &testutil.Accumulator{} + s.acc = acc + err := s.parseEventMessage(tests[i].args.now, tests[i].args.message, tests[i].args.hostname) + require.Nil(t, err) + m := acc.Metrics[0] + require.Equal(t, tests[i].expected.title, m.Measurement) + require.Equal(t, tests[i].expected.text, m.Fields["text"]) + require.Equal(t, tests[i].expected.now, m.Time) + require.Equal(t, tests[i].expected.ts, m.Fields["ts"]) + require.Equal(t, tests[i].expected.priority, m.Fields["priority"]) + require.Equal(t, tests[i].expected.source, m.Tags["source"]) + require.Equal(t, tests[i].expected.alertType, m.Fields["alert_type"]) + require.Equal(t, tests[i].expected.aggregationKey, m.Tags["aggregation_key"]) + require.Equal(t, tests[i].expected.sourceTypeName, m.Fields["source_type_name"]) + if tests[i].expected.checkTags != nil { + require.Equal(t, tests[i].expected.checkTags, m.Tags) + } + }) + } +} + +func TestEventError(t *testing.T) { + now := time.Now() + s := NewTestStatsd() + s.acc = &testutil.Accumulator{} + // missing length header + err := s.parseEventMessage(now, "_e:title|text", "default-hostname") + require.Error(t, err) + + // greater length than packet + err = s.parseEventMessage(now, "_e{10,10}:title|text", "default-hostname") + require.Error(t, err) + + // zero length + err = s.parseEventMessage(now, "_e{0,0}:a|a", "default-hostname") + require.Error(t, err) + + // missing title or text length + err = s.parseEventMessage(now, "_e{5555:title|text", "default-hostname") + require.Error(t, err) + + // missing wrong len format + err = s.parseEventMessage(now, "_e{a,1}:title|text", "default-hostname") + require.Error(t, err) + + err = s.parseEventMessage(now, "_e{1,a}:title|text", "default-hostname") + require.Error(t, err) + + // missing title or text length + err = s.parseEventMessage(now, "_e{5,}:title|text", "default-hostname") + require.Error(t, err) + + err = s.parseEventMessage(now, "_e{100,:title|text", "default-hostname") + require.Error(t, err) + + err = s.parseEventMessage(now, "_e,100:title|text", "default-hostname") + require.Error(t, err) + + err = s.parseEventMessage(now, "_e{,4}:title|text", "default-hostname") + require.Error(t, err) + + err = s.parseEventMessage(now, "_e{}:title|text", "default-hostname") + require.Error(t, err) + + err = s.parseEventMessage(now, "_e{,}:title|text", "default-hostname") + require.Error(t, err) + + // not enough information + err = s.parseEventMessage(now, "_e|text", "default-hostname") + require.Error(t, err) + + err = s.parseEventMessage(now, "_e:|text", "default-hostname") + require.Error(t, err) + + // invalid timestamp + err = s.parseEventMessage(now, "_e{5,4}:title|text|d:abc", "default-hostname") + require.NoError(t, err) + + // invalid priority + err = s.parseEventMessage(now, "_e{5,4}:title|text|p:urgent", "default-hostname") + require.NoError(t, err) + + // invalid priority + err = s.parseEventMessage(now, "_e{5,4}:title|text|p:urgent", "default-hostname") + require.NoError(t, err) + + // invalid alert type + err = s.parseEventMessage(now, "_e{5,4}:title|text|t:test", "default-hostname") + require.NoError(t, err) + + // unknown metadata + err = s.parseEventMessage(now, "_e{5,4}:title|text|x:1234", "default-hostname") + require.Error(t, err) +} diff --git a/plugins/inputs/statsd/running_stats.go b/plugins/inputs/statsd/running_stats.go index 2395ab143..6f8045b42 100644 --- a/plugins/inputs/statsd/running_stats.go +++ b/plugins/inputs/statsd/running_stats.go @@ -49,7 +49,7 @@ func (rs *RunningStats) AddValue(v float64) { } // These are used for the running mean and variance - rs.n += 1 + rs.n++ rs.ex += v - rs.k rs.ex2 += (v - rs.k) * (v - rs.k) diff --git a/plugins/inputs/statsd/statsd.go b/plugins/inputs/statsd/statsd.go index 8b5e15502..7408482b6 100644 --- a/plugins/inputs/statsd/statsd.go +++ b/plugins/inputs/statsd/statsd.go @@ -7,6 +7,7 @@ import ( "fmt" "log" "net" + "net/url" "sort" "strconv" "strings" @@ -21,7 +22,7 @@ import ( ) const ( - // UDP packet limit, see + // UDP_MAX_PACKET_SIZE is the UDP packet limit, see // https://en.wikipedia.org/wiki/User_Datagram_Protocol#Packet_structure UDP_MAX_PACKET_SIZE int = 64 * 1024 @@ -34,13 +35,14 @@ const ( MaxTCPConnections = 250 ) -var dropwarn = "E! Error: statsd message queue full. " + +var dropwarn = "E! [inputs.statsd] Error: statsd message queue full. " + "We have dropped %d messages so far. " + "You may want to increase allowed_pending_messages in the config\n" -var malformedwarn = "E! Statsd over TCP has received %d malformed packets" + +var malformedwarn = "E! [inputs.statsd] Statsd over TCP has received %d malformed packets" + " thus far." +// Statsd allows the importing of statsd and dogstatsd data. type Statsd struct { // Protocol used on listener - udp or tcp Protocol string `toml:"protocol"` @@ -67,7 +69,12 @@ type Statsd struct { MetricSeparator string // This flag enables parsing of tags in the dogstatsd extension to the // statsd protocol (http://docs.datadoghq.com/guides/dogstatsd/) - ParseDataDogTags bool + ParseDataDogTags bool // depreciated in 1.10; use datadog_extensions + + // Parses extensions to statsd in the datadog statsd format + // currently supports metrics and datadog tags. + // http://docs.datadoghq.com/guides/dogstatsd/ + DataDogExtensions bool `toml:"datadog_extensions"` // UDPPacketSize is deprecated, it's only here for legacy support // we now always create 1 max size buffer and then copy only what we need @@ -91,7 +98,7 @@ type Statsd struct { malformed int // Channel for all incoming statsd packets - in chan *bytes.Buffer + in chan input done chan struct{} // Cache gauges, counters & sets so they can be aggregated as they arrive @@ -131,6 +138,12 @@ type Statsd struct { bufPool sync.Pool } +type input struct { + *bytes.Buffer + time.Time + Addr string +} + // One statsd metric, form is :||@ type metric struct { name string @@ -214,6 +227,9 @@ const sampleConfig = ` ## http://docs.datadoghq.com/guides/dogstatsd/ parse_data_dog_tags = false + ## Parses datadog extensions to the statsd format + datadog_extensions = false + ## Statsd data translation templates, more info can be read here: ## https://github.com/influxdata/telegraf/blob/master/docs/TEMPLATE_PATTERN.md # templates = [ @@ -239,12 +255,12 @@ func (s *Statsd) Gather(acc telegraf.Accumulator) error { defer s.Unlock() now := time.Now() - for _, metric := range s.timings { + for _, m := range s.timings { // Defining a template to parse field names for timers allows us to split // out multiple fields per timer. In this case we prefix each stat with the // field name and store these all in a single measurement. fields := make(map[string]interface{}) - for fieldName, stats := range metric.fields { + for fieldName, stats := range m.fields { var prefix string if fieldName != defaultFieldName { prefix = fieldName + "_" @@ -261,41 +277,44 @@ func (s *Statsd) Gather(acc telegraf.Accumulator) error { } } - acc.AddFields(metric.name, fields, metric.tags, now) + acc.AddFields(m.name, fields, m.tags, now) } if s.DeleteTimings { s.timings = make(map[string]cachedtimings) } - for _, metric := range s.gauges { - acc.AddGauge(metric.name, metric.fields, metric.tags, now) + for _, m := range s.gauges { + acc.AddGauge(m.name, m.fields, m.tags, now) } if s.DeleteGauges { s.gauges = make(map[string]cachedgauge) } - for _, metric := range s.counters { - acc.AddCounter(metric.name, metric.fields, metric.tags, now) + for _, m := range s.counters { + acc.AddCounter(m.name, m.fields, m.tags, now) } if s.DeleteCounters { s.counters = make(map[string]cachedcounter) } - for _, metric := range s.sets { + for _, m := range s.sets { fields := make(map[string]interface{}) - for field, set := range metric.fields { + for field, set := range m.fields { fields[field] = int64(len(set)) } - acc.AddFields(metric.name, fields, metric.tags, now) + acc.AddFields(m.name, fields, m.tags, now) } if s.DeleteSets { s.sets = make(map[string]cachedset) } - return nil } func (s *Statsd) Start(_ telegraf.Accumulator) error { + if s.ParseDataDogTags { + s.DataDogExtensions = true + log.Printf("W! [inputs.statsd] The parse_data_dog_tags option is deprecated, use datadog_extensions instead.") + } // Make data structures s.gauges = make(map[string]cachedgauge) s.counters = make(map[string]cachedcounter) @@ -315,7 +334,7 @@ func (s *Statsd) Start(_ telegraf.Accumulator) error { s.PacketsRecv = selfstat.Register("statsd", "tcp_packets_received", tags) s.BytesRecv = selfstat.Register("statsd", "tcp_bytes_received", tags) - s.in = make(chan *bytes.Buffer, s.AllowedPendingMessages) + s.in = make(chan input, s.AllowedPendingMessages) s.done = make(chan struct{}) s.accept = make(chan bool, s.MaxTCPConnections) s.conns = make(map[string]*net.TCPConn) @@ -329,7 +348,7 @@ func (s *Statsd) Start(_ telegraf.Accumulator) error { } if s.ConvertNames { - log.Printf("I! WARNING statsd: convert_names config option is deprecated," + + log.Printf("W! [inputs.statsd] statsd: convert_names config option is deprecated," + " please use metric_separator instead") } @@ -348,7 +367,7 @@ func (s *Statsd) Start(_ telegraf.Accumulator) error { return err } - log.Println("I! Statsd UDP listener listening on: ", conn.LocalAddr().String()) + log.Println("I! [inputs.statsd] Statsd UDP listener listening on: ", conn.LocalAddr().String()) s.UDPlistener = conn s.wg.Add(1) @@ -366,7 +385,7 @@ func (s *Statsd) Start(_ telegraf.Accumulator) error { return err } - log.Println("I! TCP Statsd listening on: ", listener.Addr().String()) + log.Println("I! [inputs.statsd] TCP Statsd listening on: ", listener.Addr().String()) s.TCPlistener = listener s.wg.Add(1) @@ -382,7 +401,7 @@ func (s *Statsd) Start(_ telegraf.Accumulator) error { defer s.wg.Done() s.parser() }() - log.Printf("I! Started the statsd service on %s\n", s.ServiceAddress) + log.Printf("I! [inputs.statsd] Started the statsd service on %s\n", s.ServiceAddress) return nil } @@ -439,17 +458,22 @@ func (s *Statsd) udpListen(conn *net.UDPConn) error { case <-s.done: return nil default: - n, _, err := conn.ReadFromUDP(buf) - if err != nil && !strings.Contains(err.Error(), "closed network") { - log.Printf("E! Error READ: %s\n", err.Error()) - continue + n, addr, err := conn.ReadFromUDP(buf) + if err != nil { + if !strings.Contains(err.Error(), "closed network") { + log.Printf("E! [inputs.statsd] Error READ: %s\n", err.Error()) + continue + } + return err } b := s.bufPool.Get().(*bytes.Buffer) b.Reset() b.Write(buf[:n]) - select { - case s.in <- b: + case s.in <- input{ + Buffer: b, + Time: time.Now(), + Addr: addr.IP.String()}: default: s.drops++ if s.drops == 1 || s.AllowedPendingMessages == 0 || s.drops%s.AllowedPendingMessages == 0 { @@ -468,12 +492,16 @@ func (s *Statsd) parser() error { select { case <-s.done: return nil - case buf := <-s.in: - lines := strings.Split(buf.String(), "\n") - s.bufPool.Put(buf) + case in := <-s.in: + lines := strings.Split(in.Buffer.String(), "\n") + s.bufPool.Put(in.Buffer) for _, line := range lines { line = strings.TrimSpace(line) - if line != "" { + switch { + case line == "": + case s.DataDogExtensions && strings.HasPrefix(line, "_e"): + s.parseEventMessage(in.Time, line, in.Addr) + default: s.parseStatsdLine(line) } } @@ -488,7 +516,7 @@ func (s *Statsd) parseStatsdLine(line string) error { defer s.Unlock() lineTags := make(map[string]string) - if s.ParseDataDogTags { + if s.DataDogExtensions { recombinedSegments := make([]string, 0) // datadog tags look like this: // users.online:1|c|@0.5|#country:china,environment:production @@ -499,24 +527,7 @@ func (s *Statsd) parseStatsdLine(line string) error { for _, segment := range pipesplit { if len(segment) > 0 && segment[0] == '#' { // we have ourselves a tag; they are comma separated - tagstr := segment[1:] - tags := strings.Split(tagstr, ",") - for _, tag := range tags { - ts := strings.SplitN(tag, ":", 2) - var k, v string - switch len(ts) { - case 1: - // just a tag - k = ts[0] - v = "" - case 2: - k = ts[0] - v = ts[1] - } - if k != "" { - lineTags[k] = v - } - } + parseDataDogTags(lineTags, segment[1:]) } else { recombinedSegments = append(recombinedSegments, segment) } @@ -527,7 +538,7 @@ func (s *Statsd) parseStatsdLine(line string) error { // Validate splitting the line on ":" bits := strings.Split(line, ":") if len(bits) < 2 { - log.Printf("E! Error: splitting ':', Unable to parse metric: %s\n", line) + log.Printf("E! [inputs.statsd] Error: splitting ':', Unable to parse metric: %s\n", line) return errors.New("Error Parsing statsd line") } @@ -543,11 +554,11 @@ func (s *Statsd) parseStatsdLine(line string) error { // Validate splitting the bit on "|" pipesplit := strings.Split(bit, "|") if len(pipesplit) < 2 { - log.Printf("E! Error: splitting '|', Unable to parse metric: %s\n", line) + log.Printf("E! [inputs.statsd] Error: splitting '|', Unable to parse metric: %s\n", line) return errors.New("Error Parsing statsd line") } else if len(pipesplit) > 2 { sr := pipesplit[2] - errmsg := "E! Error: parsing sample rate, %s, it must be in format like: " + + errmsg := "E! [inputs.statsd] parsing sample rate, %s, it must be in format like: " + "@0.1, @0.5, etc. Ignoring sample rate for line: %s\n" if strings.Contains(sr, "@") && len(sr) > 1 { samplerate, err := strconv.ParseFloat(sr[1:], 64) @@ -567,14 +578,14 @@ func (s *Statsd) parseStatsdLine(line string) error { case "g", "c", "s", "ms", "h": m.mtype = pipesplit[1] default: - log.Printf("E! Error: Statsd Metric type %s unsupported", pipesplit[1]) + log.Printf("E! [inputs.statsd] Error: Statsd Metric type %s unsupported", pipesplit[1]) return errors.New("Error Parsing statsd line") } // Parse the value if strings.HasPrefix(pipesplit[0], "-") || strings.HasPrefix(pipesplit[0], "+") { if m.mtype != "g" && m.mtype != "c" { - log.Printf("E! Error: +- values are only supported for gauges & counters: %s\n", line) + log.Printf("E! [inputs.statsd] Error: +- values are only supported for gauges & counters: %s\n", line) return errors.New("Error Parsing statsd line") } m.additive = true @@ -584,7 +595,7 @@ func (s *Statsd) parseStatsdLine(line string) error { case "g", "ms", "h": v, err := strconv.ParseFloat(pipesplit[0], 64) if err != nil { - log.Printf("E! Error: parsing value to float64: %s\n", line) + log.Printf("E! [inputs.statsd] Error: parsing value to float64: %s\n", line) return errors.New("Error Parsing statsd line") } m.floatvalue = v @@ -594,7 +605,7 @@ func (s *Statsd) parseStatsdLine(line string) error { if err != nil { v2, err2 := strconv.ParseFloat(pipesplit[0], 64) if err2 != nil { - log.Printf("E! Error: parsing value to int64: %s\n", line) + log.Printf("E! [inputs.statsd] Error: parsing value to int64: %s\n", line) return errors.New("Error Parsing statsd line") } v = int64(v2) @@ -622,7 +633,6 @@ func (s *Statsd) parseStatsdLine(line string) error { case "h": m.tags["metric_type"] = "histogram" } - if len(lineTags) > 0 { for k, v := range lineTags { m.tags[k] = v @@ -807,7 +817,14 @@ func (s *Statsd) handler(conn *net.TCPConn, id string) { s.forget(id) s.CurrentConnections.Incr(-1) }() - + addr := conn.RemoteAddr() + parsedURL, err := url.Parse(addr.String()) + if err != nil { + // this should never happen because the conn handler should give us parsable addresses, + // but if it does we will know + log.Printf("E! [inputs.statsd] failed to parse %s\n", addr) + return // close the connetion and return + } var n int scanner := bufio.NewScanner(conn) for { @@ -831,7 +848,7 @@ func (s *Statsd) handler(conn *net.TCPConn, id string) { b.WriteByte('\n') select { - case s.in <- b: + case s.in <- input{Buffer: b, Time: time.Now(), Addr: parsedURL.Host}: default: s.drops++ if s.drops == 1 || s.drops%s.AllowedPendingMessages == 0 { @@ -845,8 +862,8 @@ func (s *Statsd) handler(conn *net.TCPConn, id string) { // refuser refuses a TCP connection func (s *Statsd) refuser(conn *net.TCPConn) { conn.Close() - log.Printf("I! Refused TCP Connection from %s", conn.RemoteAddr()) - log.Printf("I! WARNING: Maximum TCP Connections reached, you may want to" + + log.Printf("I! [inputs.statsd] Refused TCP Connection from %s", conn.RemoteAddr()) + log.Printf("I! [inputs.statsd] WARNING: Maximum TCP Connections reached, you may want to" + " adjust max_tcp_connections") } @@ -866,7 +883,7 @@ func (s *Statsd) remember(id string, conn *net.TCPConn) { func (s *Statsd) Stop() { s.Lock() - log.Println("I! Stopping the statsd service") + log.Println("I! [inputs.statsd] Stopping the statsd service") close(s.done) if s.isUDP() { s.UDPlistener.Close() diff --git a/plugins/inputs/statsd/statsd_test.go b/plugins/inputs/statsd/statsd_test.go index 1e50c8341..4a856902d 100644 --- a/plugins/inputs/statsd/statsd_test.go +++ b/plugins/inputs/statsd/statsd_test.go @@ -1,8 +1,6 @@ package statsd import ( - "bytes" - "errors" "fmt" "net" "testing" @@ -17,8 +15,8 @@ const ( testMsg = "test.tcp.msg:100|c" ) -func newTestTcpListener() (*Statsd, chan *bytes.Buffer) { - in := make(chan *bytes.Buffer, 1500) +func newTestTCPListener() (*Statsd, chan input) { + in := make(chan input, 1500) listener := &Statsd{ Protocol: "tcp", ServiceAddress: "localhost:8125", @@ -35,7 +33,7 @@ func NewTestStatsd() *Statsd { // Make data structures s.done = make(chan struct{}) - s.in = make(chan *bytes.Buffer, s.AllowedPendingMessages) + s.in = make(chan input, s.AllowedPendingMessages) s.gauges = make(map[string]cachedgauge) s.counters = make(map[string]cachedcounter) s.sets = make(map[string]cachedset) @@ -189,7 +187,7 @@ func BenchmarkTCP(b *testing.B) { // Valid lines should be parsed and their values should be cached func TestParse_ValidLines(t *testing.T) { s := NewTestStatsd() - valid_lines := []string{ + validLines := []string{ "valid:45|c", "valid:45|s", "valid:45|g", @@ -197,7 +195,7 @@ func TestParse_ValidLines(t *testing.T) { "valid.timer:45|h", } - for _, line := range valid_lines { + for _, line := range validLines { err := s.parseStatsdLine(line) if err != nil { t.Errorf("Parsing line %s should not have resulted in an error\n", line) @@ -210,7 +208,7 @@ func TestParse_Gauges(t *testing.T) { s := NewTestStatsd() // Test that gauge +- values work - valid_lines := []string{ + validLines := []string{ "plus.minus:100|g", "plus.minus:-10|g", "plus.minus:+30|g", @@ -228,7 +226,7 @@ func TestParse_Gauges(t *testing.T) { "scientific.notation.minus:4.7E-5|g", } - for _, line := range valid_lines { + for _, line := range validLines { err := s.parseStatsdLine(line) if err != nil { t.Errorf("Parsing line %s should not have resulted in an error\n", line) @@ -274,7 +272,7 @@ func TestParse_Gauges(t *testing.T) { } for _, test := range validations { - err := test_validate_gauge(test.name, test.value, s.gauges) + err := testValidateGauge(test.name, test.value, s.gauges) if err != nil { t.Error(err.Error()) } @@ -286,7 +284,7 @@ func TestParse_Sets(t *testing.T) { s := NewTestStatsd() // Test that sets work - valid_lines := []string{ + validLines := []string{ "unique.user.ids:100|s", "unique.user.ids:100|s", "unique.user.ids:100|s", @@ -306,7 +304,7 @@ func TestParse_Sets(t *testing.T) { "string.sets:bar|s", } - for _, line := range valid_lines { + for _, line := range validLines { err := s.parseStatsdLine(line) if err != nil { t.Errorf("Parsing line %s should not have resulted in an error\n", line) @@ -336,7 +334,7 @@ func TestParse_Sets(t *testing.T) { } for _, test := range validations { - err := test_validate_set(test.name, test.value, s.sets) + err := testValidateSet(test.name, test.value, s.sets) if err != nil { t.Error(err.Error()) } @@ -348,7 +346,7 @@ func TestParse_Counters(t *testing.T) { s := NewTestStatsd() // Test that counters work - valid_lines := []string{ + validLines := []string{ "small.inc:1|c", "big.inc:100|c", "big.inc:1|c", @@ -363,7 +361,7 @@ func TestParse_Counters(t *testing.T) { "negative.test:-5|c", } - for _, line := range valid_lines { + for _, line := range validLines { err := s.parseStatsdLine(line) if err != nil { t.Errorf("Parsing line %s should not have resulted in an error\n", line) @@ -401,7 +399,7 @@ func TestParse_Counters(t *testing.T) { } for _, test := range validations { - err := test_validate_counter(test.name, test.value, s.counters) + err := testValidateCounter(test.name, test.value, s.counters) if err != nil { t.Error(err.Error()) } @@ -415,7 +413,7 @@ func TestParse_Timings(t *testing.T) { acc := &testutil.Accumulator{} // Test that counters work - valid_lines := []string{ + validLines := []string{ "test.timing:1|ms", "test.timing:11|ms", "test.timing:1|ms", @@ -423,7 +421,7 @@ func TestParse_Timings(t *testing.T) { "test.timing:1|ms", } - for _, line := range valid_lines { + for _, line := range validLines { err := s.parseStatsdLine(line) if err != nil { t.Errorf("Parsing line %s should not have resulted in an error\n", line) @@ -464,7 +462,7 @@ func TestParseScientificNotation(t *testing.T) { // Invalid lines should return an error func TestParse_InvalidLines(t *testing.T) { s := NewTestStatsd() - invalid_lines := []string{ + invalidLines := []string{ "i.dont.have.a.pipe:45g", "i.dont.have.a.colon45|c", "invalid.metric.type:45|e", @@ -475,7 +473,7 @@ func TestParse_InvalidLines(t *testing.T) { "invalid.value:d11|c", "invalid.value:1d1|c", } - for _, line := range invalid_lines { + for _, line := range invalidLines { err := s.parseStatsdLine(line) if err == nil { t.Errorf("Parsing line %s should have resulted in an error\n", line) @@ -486,21 +484,21 @@ func TestParse_InvalidLines(t *testing.T) { // Invalid sample rates should be ignored and not applied func TestParse_InvalidSampleRate(t *testing.T) { s := NewTestStatsd() - invalid_lines := []string{ + invalidLines := []string{ "invalid.sample.rate:45|c|0.1", "invalid.sample.rate.2:45|c|@foo", "invalid.sample.rate:45|g|@0.1", "invalid.sample.rate:45|s|@0.1", } - for _, line := range invalid_lines { + for _, line := range invalidLines { err := s.parseStatsdLine(line) if err != nil { t.Errorf("Parsing line %s should not have resulted in an error\n", line) } } - counter_validations := []struct { + counterValidations := []struct { name string value int64 cache map[string]cachedcounter @@ -517,19 +515,19 @@ func TestParse_InvalidSampleRate(t *testing.T) { }, } - for _, test := range counter_validations { - err := test_validate_counter(test.name, test.value, test.cache) + for _, test := range counterValidations { + err := testValidateCounter(test.name, test.value, test.cache) if err != nil { t.Error(err.Error()) } } - err := test_validate_gauge("invalid_sample_rate", 45, s.gauges) + err := testValidateGauge("invalid_sample_rate", 45, s.gauges) if err != nil { t.Error(err.Error()) } - err = test_validate_set("invalid_sample_rate", 1, s.sets) + err = testValidateSet("invalid_sample_rate", 1, s.sets) if err != nil { t.Error(err.Error()) } @@ -538,12 +536,12 @@ func TestParse_InvalidSampleRate(t *testing.T) { // Names should be parsed like . -> _ func TestParse_DefaultNameParsing(t *testing.T) { s := NewTestStatsd() - valid_lines := []string{ + validLines := []string{ "valid:1|c", "valid.foo-bar:11|c", } - for _, line := range valid_lines { + for _, line := range validLines { err := s.parseStatsdLine(line) if err != nil { t.Errorf("Parsing line %s should not have resulted in an error\n", line) @@ -565,7 +563,7 @@ func TestParse_DefaultNameParsing(t *testing.T) { } for _, test := range validations { - err := test_validate_counter(test.name, test.value, s.counters) + err := testValidateCounter(test.name, test.value, s.counters) if err != nil { t.Error(err.Error()) } @@ -607,7 +605,7 @@ func TestParse_Template(t *testing.T) { // Validate counters for _, test := range validations { - err := test_validate_counter(test.name, test.value, s.counters) + err := testValidateCounter(test.name, test.value, s.counters) if err != nil { t.Error(err.Error()) } @@ -649,7 +647,7 @@ func TestParse_TemplateFilter(t *testing.T) { // Validate counters for _, test := range validations { - err := test_validate_counter(test.name, test.value, s.counters) + err := testValidateCounter(test.name, test.value, s.counters) if err != nil { t.Error(err.Error()) } @@ -687,7 +685,7 @@ func TestParse_TemplateSpecificity(t *testing.T) { // Validate counters for _, test := range validations { - err := test_validate_counter(test.name, test.value, s.counters) + err := testValidateCounter(test.name, test.value, s.counters) if err != nil { t.Error(err.Error()) } @@ -723,7 +721,7 @@ func TestParse_TemplateFields(t *testing.T) { } } - counter_tests := []struct { + counterTests := []struct { name string value int64 field string @@ -745,14 +743,14 @@ func TestParse_TemplateFields(t *testing.T) { }, } // Validate counters - for _, test := range counter_tests { - err := test_validate_counter(test.name, test.value, s.counters, test.field) + for _, test := range counterTests { + err := testValidateCounter(test.name, test.value, s.counters, test.field) if err != nil { t.Error(err.Error()) } } - gauge_tests := []struct { + gaugeTests := []struct { name string value float64 field string @@ -769,14 +767,14 @@ func TestParse_TemplateFields(t *testing.T) { }, } // Validate gauges - for _, test := range gauge_tests { - err := test_validate_gauge(test.name, test.value, s.gauges, test.field) + for _, test := range gaugeTests { + err := testValidateGauge(test.name, test.value, s.gauges, test.field) if err != nil { t.Error(err.Error()) } } - set_tests := []struct { + setTests := []struct { name string value int64 field string @@ -793,8 +791,8 @@ func TestParse_TemplateFields(t *testing.T) { }, } // Validate sets - for _, test := range set_tests { - err := test_validate_set(test.name, test.value, s.sets, test.field) + for _, test := range setTests { + err := testValidateSet(test.name, test.value, s.sets, test.field) if err != nil { t.Error(err.Error()) } @@ -864,7 +862,7 @@ func TestParse_Tags(t *testing.T) { // Test that DataDog tags are parsed func TestParse_DataDogTags(t *testing.T) { s := NewTestStatsd() - s.ParseDataDogTags = true + s.DataDogExtensions = true lines := []string{ "my_counter:1|c|#host:localhost,environment:prod,endpoint:/:tenant?/oauth/ro", @@ -873,24 +871,28 @@ func TestParse_DataDogTags(t *testing.T) { "my_timer:3|ms|@0.1|#live,host:localhost", } - testTags := map[string]map[string]string{ + expectedTags := map[string]map[string]string{ "my_counter": { "host": "localhost", "environment": "prod", "endpoint": "/:tenant?/oauth/ro", + "metric_type": "counter", }, "my_gauge": { - "live": "", + "live": "true", + "metric_type": "gauge", }, "my_set": { - "host": "localhost", + "host": "localhost", + "metric_type": "set", }, "my_timer": { - "live": "", - "host": "localhost", + "live": "true", + "host": "localhost", + "metric_type": "timing", }, } @@ -901,18 +903,16 @@ func TestParse_DataDogTags(t *testing.T) { } } - sourceTags := map[string]map[string]string{ + actualTags := map[string]map[string]string{ "my_gauge": tagsForItem(s.gauges), "my_counter": tagsForItem(s.counters), "my_set": tagsForItem(s.sets), "my_timer": tagsForItem(s.timings), } - - for statName, tags := range testTags { - for k, v := range tags { - otherValue := sourceTags[statName][k] - if sourceTags[statName][k] != v { - t.Errorf("Error with %s, tag %s: %s != %s", statName, k, v, otherValue) + for name, tags := range expectedTags { + for expectedK, expectedV := range tags { + if expectedV != actualTags[name][expectedK] { + t.Errorf("failed: expected: %#v != %#v", tags, actualTags[name]) } } } @@ -945,8 +945,8 @@ func TestParseName(t *testing.T) { s := NewTestStatsd() tests := []struct { - in_name string - out_name string + inName string + outName string }{ { "foobar", @@ -963,9 +963,9 @@ func TestParseName(t *testing.T) { } for _, test := range tests { - name, _, _ := s.parseName(test.in_name) - if name != test.out_name { - t.Errorf("Expected: %s, got %s", test.out_name, name) + name, _, _ := s.parseName(test.inName) + if name != test.outName { + t.Errorf("Expected: %s, got %s", test.outName, name) } } @@ -973,8 +973,8 @@ func TestParseName(t *testing.T) { s.MetricSeparator = "." tests = []struct { - in_name string - out_name string + inName string + outName string }{ { "foobar", @@ -991,9 +991,9 @@ func TestParseName(t *testing.T) { } for _, test := range tests { - name, _, _ := s.parseName(test.in_name) - if name != test.out_name { - t.Errorf("Expected: %s, got %s", test.out_name, name) + name, _, _ := s.parseName(test.inName) + if name != test.outName { + t.Errorf("Expected: %s, got %s", test.outName, name) } } } @@ -1004,12 +1004,12 @@ func TestParse_MeasurementsWithSameName(t *testing.T) { s := NewTestStatsd() // Test that counters work - valid_lines := []string{ + validLines := []string{ "test.counter,host=localhost:1|c", "test.counter,host=localhost,region=west:1|c", } - for _, line := range valid_lines { + for _, line := range validLines { err := s.parseStatsdLine(line) if err != nil { t.Errorf("Parsing line %s should not have resulted in an error\n", line) @@ -1024,7 +1024,7 @@ func TestParse_MeasurementsWithSameName(t *testing.T) { // Test that measurements with multiple bits, are treated as different outputs // but are equal to their single-measurement representation func TestParse_MeasurementsWithMultipleValues(t *testing.T) { - single_lines := []string{ + singleLines := []string{ "valid.multiple:0|ms|@0.1", "valid.multiple:0|ms|", "valid.multiple:1|ms", @@ -1050,7 +1050,7 @@ func TestParse_MeasurementsWithMultipleValues(t *testing.T) { "valid.multiple.mixed:1|g", } - multiple_lines := []string{ + multipleLines := []string{ "valid.multiple:0|ms|@0.1:0|ms|:1|ms", "valid.multiple.duplicate:1|c:1|c:2|c:1|c", "valid.multiple.duplicate:1|h:1|h:2|h:1|h", @@ -1059,28 +1059,28 @@ func TestParse_MeasurementsWithMultipleValues(t *testing.T) { "valid.multiple.mixed:1|c:1|ms:2|s:1|g", } - s_single := NewTestStatsd() - s_multiple := NewTestStatsd() + sSingle := NewTestStatsd() + sMultiple := NewTestStatsd() - for _, line := range single_lines { - err := s_single.parseStatsdLine(line) + for _, line := range singleLines { + err := sSingle.parseStatsdLine(line) if err != nil { t.Errorf("Parsing line %s should not have resulted in an error\n", line) } } - for _, line := range multiple_lines { - err := s_multiple.parseStatsdLine(line) + for _, line := range multipleLines { + err := sMultiple.parseStatsdLine(line) if err != nil { t.Errorf("Parsing line %s should not have resulted in an error\n", line) } } - if len(s_single.timings) != 3 { - t.Errorf("Expected 3 measurement, found %d", len(s_single.timings)) + if len(sSingle.timings) != 3 { + t.Errorf("Expected 3 measurement, found %d", len(sSingle.timings)) } - if cachedtiming, ok := s_single.timings["metric_type=timingvalid_multiple"]; !ok { + if cachedtiming, ok := sSingle.timings["metric_type=timingvalid_multiple"]; !ok { t.Errorf("Expected cached measurement with hash 'metric_type=timingvalid_multiple' not found") } else { if cachedtiming.name != "valid_multiple" { @@ -1100,60 +1100,60 @@ func TestParse_MeasurementsWithMultipleValues(t *testing.T) { } } - // test if s_single and s_multiple did compute the same stats for valid.multiple.duplicate - if err := test_validate_set("valid_multiple_duplicate", 2, s_single.sets); err != nil { + // test if sSingle and sMultiple did compute the same stats for valid.multiple.duplicate + if err := testValidateSet("valid_multiple_duplicate", 2, sSingle.sets); err != nil { t.Error(err.Error()) } - if err := test_validate_set("valid_multiple_duplicate", 2, s_multiple.sets); err != nil { + if err := testValidateSet("valid_multiple_duplicate", 2, sMultiple.sets); err != nil { t.Error(err.Error()) } - if err := test_validate_counter("valid_multiple_duplicate", 5, s_single.counters); err != nil { + if err := testValidateCounter("valid_multiple_duplicate", 5, sSingle.counters); err != nil { t.Error(err.Error()) } - if err := test_validate_counter("valid_multiple_duplicate", 5, s_multiple.counters); err != nil { + if err := testValidateCounter("valid_multiple_duplicate", 5, sMultiple.counters); err != nil { t.Error(err.Error()) } - if err := test_validate_gauge("valid_multiple_duplicate", 1, s_single.gauges); err != nil { + if err := testValidateGauge("valid_multiple_duplicate", 1, sSingle.gauges); err != nil { t.Error(err.Error()) } - if err := test_validate_gauge("valid_multiple_duplicate", 1, s_multiple.gauges); err != nil { + if err := testValidateGauge("valid_multiple_duplicate", 1, sMultiple.gauges); err != nil { t.Error(err.Error()) } - // test if s_single and s_multiple did compute the same stats for valid.multiple.mixed - if err := test_validate_set("valid_multiple_mixed", 1, s_single.sets); err != nil { + // test if sSingle and sMultiple did compute the same stats for valid.multiple.mixed + if err := testValidateSet("valid_multiple_mixed", 1, sSingle.sets); err != nil { t.Error(err.Error()) } - if err := test_validate_set("valid_multiple_mixed", 1, s_multiple.sets); err != nil { + if err := testValidateSet("valid_multiple_mixed", 1, sMultiple.sets); err != nil { t.Error(err.Error()) } - if err := test_validate_counter("valid_multiple_mixed", 1, s_single.counters); err != nil { + if err := testValidateCounter("valid_multiple_mixed", 1, sSingle.counters); err != nil { t.Error(err.Error()) } - if err := test_validate_counter("valid_multiple_mixed", 1, s_multiple.counters); err != nil { + if err := testValidateCounter("valid_multiple_mixed", 1, sMultiple.counters); err != nil { t.Error(err.Error()) } - if err := test_validate_gauge("valid_multiple_mixed", 1, s_single.gauges); err != nil { + if err := testValidateGauge("valid_multiple_mixed", 1, sSingle.gauges); err != nil { t.Error(err.Error()) } - if err := test_validate_gauge("valid_multiple_mixed", 1, s_multiple.gauges); err != nil { + if err := testValidateGauge("valid_multiple_mixed", 1, sMultiple.gauges); err != nil { t.Error(err.Error()) } } // Tests low-level functionality of timings when multiple fields is enabled // and a measurement template has been defined which can parse field names -func TestParse_Timings_MultipleFieldsWithTemplate(t *testing.T) { +func TestParse_TimingsMultipleFieldsWithTemplate(t *testing.T) { s := NewTestStatsd() s.Templates = []string{"measurement.field"} s.Percentiles = []int{90} @@ -1204,7 +1204,7 @@ func TestParse_Timings_MultipleFieldsWithTemplate(t *testing.T) { // Tests low-level functionality of timings when multiple fields is enabled // but a measurement template hasn't been defined so we can't parse field names // In this case the behaviour should be the same as normal behaviour -func TestParse_Timings_MultipleFieldsWithoutTemplate(t *testing.T) { +func TestParse_TimingsMultipleFieldsWithoutTemplate(t *testing.T) { s := NewTestStatsd() s.Templates = []string{} s.Percentiles = []int{90} @@ -1420,14 +1420,14 @@ func TestParse_Gauges_Delete(t *testing.T) { t.Errorf("Parsing line %s should not have resulted in an error\n", line) } - err = test_validate_gauge("current_users", 100, s.gauges) + err = testValidateGauge("current_users", 100, s.gauges) if err != nil { t.Error(err.Error()) } s.Gather(fakeacc) - err = test_validate_gauge("current_users", 100, s.gauges) + err = testValidateGauge("current_users", 100, s.gauges) if err == nil { t.Error("current_users_gauge metric should have been deleted") } @@ -1446,14 +1446,14 @@ func TestParse_Sets_Delete(t *testing.T) { t.Errorf("Parsing line %s should not have resulted in an error\n", line) } - err = test_validate_set("unique_user_ids", 1, s.sets) + err = testValidateSet("unique_user_ids", 1, s.sets) if err != nil { t.Error(err.Error()) } s.Gather(fakeacc) - err = test_validate_set("unique_user_ids", 1, s.sets) + err = testValidateSet("unique_user_ids", 1, s.sets) if err == nil { t.Error("unique_user_ids_set metric should have been deleted") } @@ -1472,14 +1472,14 @@ func TestParse_Counters_Delete(t *testing.T) { t.Errorf("Parsing line %s should not have resulted in an error\n", line) } - err = test_validate_counter("total_users", 100, s.counters) + err = testValidateCounter("total_users", 100, s.counters) if err != nil { t.Error(err.Error()) } s.Gather(fakeacc) - err = test_validate_counter("total_users", 100, s.counters) + err = testValidateCounter("total_users", 100, s.counters) if err == nil { t.Error("total_users_counter metric should have been deleted") } @@ -1504,8 +1504,7 @@ func TestParseKeyValue(t *testing.T) { } // Test utility functions - -func test_validate_set( +func testValidateSet( name string, value int64, cache map[string]cachedset, @@ -1527,17 +1526,16 @@ func test_validate_set( } } if !found { - return errors.New(fmt.Sprintf("Test Error: Metric name %s not found\n", name)) + return fmt.Errorf("test Error: Metric name %s not found", name) } if value != int64(len(metric.fields[f])) { - return errors.New(fmt.Sprintf("Measurement: %s, expected %d, actual %d\n", - name, value, len(metric.fields[f]))) + return fmt.Errorf("measurement: %s, expected %d, actual %d", name, value, len(metric.fields[f])) } return nil } -func test_validate_counter( +func testValidateCounter( name string, valueExpected int64, cache map[string]cachedcounter, @@ -1559,17 +1557,16 @@ func test_validate_counter( } } if !found { - return errors.New(fmt.Sprintf("Test Error: Metric name %s not found\n", name)) + return fmt.Errorf("test Error: Metric name %s not found", name) } if valueExpected != valueActual { - return errors.New(fmt.Sprintf("Measurement: %s, expected %d, actual %d\n", - name, valueExpected, valueActual)) + return fmt.Errorf("measurement: %s, expected %d, actual %d", name, valueExpected, valueActual) } return nil } -func test_validate_gauge( +func testValidateGauge( name string, valueExpected float64, cache map[string]cachedgauge, @@ -1591,12 +1588,11 @@ func test_validate_gauge( } } if !found { - return errors.New(fmt.Sprintf("Test Error: Metric name %s not found\n", name)) + return fmt.Errorf("test Error: Metric name %s not found", name) } if valueExpected != valueActual { - return errors.New(fmt.Sprintf("Measurement: %s, expected %f, actual %f\n", - name, valueExpected, valueActual)) + return fmt.Errorf("Measurement: %s, expected %f, actual %f", name, valueExpected, valueActual) } return nil } From 3b91542985a435388ef8623308263b7e14faac1a Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 14 May 2019 16:21:30 -0700 Subject: [PATCH 0839/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index b1e845e4a..b190d60c6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -41,6 +41,7 @@ - [#5778](https://github.com/influxdata/telegraf/pull/5778): Add support for log rotation. - [#5765](https://github.com/influxdata/telegraf/pull/5765): Support more drive types in smart input. - [#5829](https://github.com/influxdata/telegraf/pull/5829): Add support for HTTP basic auth to solr input. +- [#5791](https://github.com/influxdata/telegraf/pull/5791): Add support for datadog events to statsd input. #### Bugfixes From 12831f43d4d15000ede21728e4da289643cf6c25 Mon Sep 17 00:00:00 2001 From: omgold Date: Wed, 15 May 2019 21:21:55 +0200 Subject: [PATCH 0840/1815] Allow devices option to match against devlinks (#5817) --- plugins/inputs/diskio/diskio.go | 42 ++++++++++++++++------ plugins/inputs/diskio/diskio_linux_test.go | 3 +- 2 files changed, 34 insertions(+), 11 deletions(-) diff --git a/plugins/inputs/diskio/diskio.go b/plugins/inputs/diskio/diskio.go index e0c6243bb..053765b4e 100644 --- a/plugins/inputs/diskio/diskio.go +++ b/plugins/inputs/diskio/diskio.go @@ -103,15 +103,32 @@ func (s *DiskIO) Gather(acc telegraf.Accumulator) error { } for _, io := range diskio { - if s.deviceFilter != nil && !s.deviceFilter.Match(io.Name) { - continue + + match := false + if s.deviceFilter != nil && s.deviceFilter.Match(io.Name) { + match = true } tags := map[string]string{} - tags["name"] = s.diskName(io.Name) + var devLinks []string + tags["name"], devLinks = s.diskName(io.Name) + + if s.deviceFilter != nil && !match { + for _, devLink := range devLinks { + if s.deviceFilter.Match(devLink) { + match = true + break + } + } + if !match { + continue + } + } + for t, v := range s.diskTags(io.Name) { tags[t] = v } + if !s.SkipSerialNumber { if len(io.SerialNumber) != 0 { tags["serial"] = io.SerialNumber @@ -137,15 +154,20 @@ func (s *DiskIO) Gather(acc telegraf.Accumulator) error { return nil } -func (s *DiskIO) diskName(devName string) string { - if len(s.NameTemplates) == 0 { - return devName +func (s *DiskIO) diskName(devName string) (string, []string) { + di, err := s.diskInfo(devName) + devLinks := strings.Split(di["DEVLINKS"], " ") + for i, devLink := range devLinks { + devLinks[i] = strings.TrimPrefix(devLink, "/dev/") + } + + if len(s.NameTemplates) == 0 { + return devName, devLinks } - di, err := s.diskInfo(devName) if err != nil { log.Printf("W! Error gathering disk info: %s", err) - return devName + return devName, devLinks } for _, nt := range s.NameTemplates { @@ -163,11 +185,11 @@ func (s *DiskIO) diskName(devName string) string { }) if !miss { - return name + return name, devLinks } } - return devName + return devName, devLinks } func (s *DiskIO) diskTags(devName string) map[string]string { diff --git a/plugins/inputs/diskio/diskio_linux_test.go b/plugins/inputs/diskio/diskio_linux_test.go index 9e79be165..1cb031bdc 100644 --- a/plugins/inputs/diskio/diskio_linux_test.go +++ b/plugins/inputs/diskio/diskio_linux_test.go @@ -88,7 +88,8 @@ func TestDiskIOStats_diskName(t *testing.T) { s := DiskIO{ NameTemplates: tc.templates, } - assert.Equal(t, tc.expected, s.diskName("null"), "Templates: %#v", tc.templates) + name, _ := s.diskName("null") + assert.Equal(t, tc.expected, name, "Templates: %#v", tc.templates) } } From d645e0303f771746d911ae7e63047a01753162c6 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 15 May 2019 12:22:33 -0700 Subject: [PATCH 0841/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index b190d60c6..67b221049 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -42,6 +42,7 @@ - [#5765](https://github.com/influxdata/telegraf/pull/5765): Support more drive types in smart input. - [#5829](https://github.com/influxdata/telegraf/pull/5829): Add support for HTTP basic auth to solr input. - [#5791](https://github.com/influxdata/telegraf/pull/5791): Add support for datadog events to statsd input. +- [#5817](https://github.com/influxdata/telegraf/pull/5817): Allow devices option to match against devlinks. #### Bugfixes From a724bf487f7c46d5e5b97ea8abd02e2480643aa1 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 15 May 2019 14:46:28 -0700 Subject: [PATCH 0842/1815] Add final aggregator (#5820) --- Gopkg.lock | 2 + plugins/aggregators/all/all.go | 1 + plugins/aggregators/final/README.md | 48 ++++++++ plugins/aggregators/final/final.go | 72 ++++++++++++ plugins/aggregators/final/final_test.go | 144 ++++++++++++++++++++++++ testutil/metric.go | 87 +++++++++++++- testutil/metric_test.go | 52 ++++++++- 7 files changed, 402 insertions(+), 4 deletions(-) create mode 100644 plugins/aggregators/final/README.md create mode 100644 plugins/aggregators/final/final.go create mode 100644 plugins/aggregators/final/final_test.go diff --git a/Gopkg.lock b/Gopkg.lock index 505bbaa39..76c5deb62 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -519,6 +519,7 @@ name = "github.com/google/go-cmp" packages = [ "cmp", + "cmp/cmpopts", "cmp/internal/diff", "cmp/internal/function", "cmp/internal/value", @@ -1585,6 +1586,7 @@ "github.com/golang/protobuf/ptypes/empty", "github.com/golang/protobuf/ptypes/timestamp", "github.com/google/go-cmp/cmp", + "github.com/google/go-cmp/cmp/cmpopts", "github.com/google/go-github/github", "github.com/gorilla/mux", "github.com/harlow/kinesis-consumer", diff --git a/plugins/aggregators/all/all.go b/plugins/aggregators/all/all.go index ff1bbfc70..ec04c0aaf 100644 --- a/plugins/aggregators/all/all.go +++ b/plugins/aggregators/all/all.go @@ -2,6 +2,7 @@ package all import ( _ "github.com/influxdata/telegraf/plugins/aggregators/basicstats" + _ "github.com/influxdata/telegraf/plugins/aggregators/final" _ "github.com/influxdata/telegraf/plugins/aggregators/histogram" _ "github.com/influxdata/telegraf/plugins/aggregators/minmax" _ "github.com/influxdata/telegraf/plugins/aggregators/valuecounter" diff --git a/plugins/aggregators/final/README.md b/plugins/aggregators/final/README.md new file mode 100644 index 000000000..444746d78 --- /dev/null +++ b/plugins/aggregators/final/README.md @@ -0,0 +1,48 @@ +# Final Aggregator Plugin + +The final aggregator emits the last metric of a contiguous series. A +contiguous series is defined as a series which receives updates within the +time period in `series_timeout`. The contiguous series may be longer than the +time interval defined by `period`. + +This is useful for getting the final value for data sources that produce +discrete time series such as procstat, cgroup, kubernetes etc. + +When a series has not been updated within the time defined in +`series_timeout`, the last metric is emitted with the `_final` appended. + +### Configuration + +```toml +[[aggregators.final]] + ## The period on which to flush & clear the aggregator. + period = "30s" + ## If true, the original metric will be dropped by the + ## aggregator and will not get sent to the output plugins. + drop_original = false + + ## The time that a series is not updated until considering it final. + series_timeout = "5m" +``` + +### Metrics + +Measurement and tags are unchanged, fields are emitted with the suffix +`_final`. + +### Example Output + +``` +counter,host=bar i_final=3,j_final=6 1554281635115090133 +counter,host=foo i_final=3,j_final=6 1554281635112992012 +``` + +Original input: +``` +counter,host=bar i=1,j=4 1554281633101153300 +counter,host=foo i=1,j=4 1554281633099323601 +counter,host=bar i=2,j=5 1554281634107980073 +counter,host=foo i=2,j=5 1554281634105931116 +counter,host=bar i=3,j=6 1554281635115090133 +counter,host=foo i=3,j=6 1554281635112992012 +``` diff --git a/plugins/aggregators/final/final.go b/plugins/aggregators/final/final.go new file mode 100644 index 000000000..53ad0a47c --- /dev/null +++ b/plugins/aggregators/final/final.go @@ -0,0 +1,72 @@ +package final + +import ( + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/aggregators" +) + +var sampleConfig = ` + ## The period on which to flush & clear the aggregator. + period = "30s" + ## If true, the original metric will be dropped by the + ## aggregator and will not get sent to the output plugins. + drop_original = false + + ## The time that a series is not updated until considering it final. + series_timeout = "5m" +` + +type Final struct { + SeriesTimeout internal.Duration `toml:"series_timeout"` + + // The last metric for all series which are active + metricCache map[uint64]telegraf.Metric +} + +func NewFinal() *Final { + return &Final{ + SeriesTimeout: internal.Duration{Duration: 5 * time.Minute}, + metricCache: make(map[uint64]telegraf.Metric), + } +} + +func (m *Final) SampleConfig() string { + return sampleConfig +} + +func (m *Final) Description() string { + return "Report the final metric of a series" +} + +func (m *Final) Add(in telegraf.Metric) { + id := in.HashID() + m.metricCache[id] = in +} + +func (m *Final) Push(acc telegraf.Accumulator) { + // Preserve timestamp of original metric + acc.SetPrecision(time.Nanosecond) + + for id, metric := range m.metricCache { + if time.Since(metric.Time()) > m.SeriesTimeout.Duration { + fields := map[string]interface{}{} + for _, field := range metric.FieldList() { + fields[field.Key+"_final"] = field.Value + } + acc.AddFields(metric.Name(), fields, metric.Tags(), metric.Time()) + delete(m.metricCache, id) + } + } +} + +func (m *Final) Reset() { +} + +func init() { + aggregators.Add("final", func() telegraf.Aggregator { + return NewFinal() + }) +} diff --git a/plugins/aggregators/final/final_test.go b/plugins/aggregators/final/final_test.go new file mode 100644 index 000000000..1b3367fa5 --- /dev/null +++ b/plugins/aggregators/final/final_test.go @@ -0,0 +1,144 @@ +package final + +import ( + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/metric" + "github.com/influxdata/telegraf/testutil" +) + +func TestSimple(t *testing.T) { + acc := testutil.Accumulator{} + final := NewFinal() + + tags := map[string]string{"foo": "bar"} + m1, _ := metric.New("m1", + tags, + map[string]interface{}{"a": int64(1)}, + time.Unix(1530939936, 0)) + m2, _ := metric.New("m1", + tags, + map[string]interface{}{"a": int64(2)}, + time.Unix(1530939937, 0)) + m3, _ := metric.New("m1", + tags, + map[string]interface{}{"a": int64(3)}, + time.Unix(1530939938, 0)) + final.Add(m1) + final.Add(m2) + final.Add(m3) + final.Push(&acc) + + expected := []telegraf.Metric{ + testutil.MustMetric( + "m1", + tags, + map[string]interface{}{ + "a_final": 3, + }, + time.Unix(1530939938, 0), + ), + } + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics()) +} + +func TestTwoTags(t *testing.T) { + acc := testutil.Accumulator{} + final := NewFinal() + + tags1 := map[string]string{"foo": "bar"} + tags2 := map[string]string{"foo": "baz"} + + m1, _ := metric.New("m1", + tags1, + map[string]interface{}{"a": int64(1)}, + time.Unix(1530939936, 0)) + m2, _ := metric.New("m1", + tags2, + map[string]interface{}{"a": int64(2)}, + time.Unix(1530939937, 0)) + m3, _ := metric.New("m1", + tags1, + map[string]interface{}{"a": int64(3)}, + time.Unix(1530939938, 0)) + final.Add(m1) + final.Add(m2) + final.Add(m3) + final.Push(&acc) + + expected := []telegraf.Metric{ + testutil.MustMetric( + "m1", + tags2, + map[string]interface{}{ + "a_final": 2, + }, + time.Unix(1530939937, 0), + ), + testutil.MustMetric( + "m1", + tags1, + map[string]interface{}{ + "a_final": 3, + }, + time.Unix(1530939938, 0), + ), + } + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.SortMetrics()) +} + +func TestLongDifference(t *testing.T) { + acc := testutil.Accumulator{} + final := NewFinal() + final.SeriesTimeout = internal.Duration{Duration: 30 * time.Second} + tags := map[string]string{"foo": "bar"} + + now := time.Now() + + m1, _ := metric.New("m", + tags, + map[string]interface{}{"a": int64(1)}, + now.Add(time.Second*-290)) + m2, _ := metric.New("m", + tags, + map[string]interface{}{"a": int64(2)}, + now.Add(time.Second*-275)) + m3, _ := metric.New("m", + tags, + map[string]interface{}{"a": int64(3)}, + now.Add(time.Second*-100)) + m4, _ := metric.New("m", + tags, + map[string]interface{}{"a": int64(4)}, + now.Add(time.Second*-20)) + final.Add(m1) + final.Add(m2) + final.Push(&acc) + final.Add(m3) + final.Push(&acc) + final.Add(m4) + final.Push(&acc) + + expected := []telegraf.Metric{ + testutil.MustMetric( + "m", + tags, + map[string]interface{}{ + "a_final": 2, + }, + now.Add(time.Second*-275), + ), + testutil.MustMetric( + "m", + tags, + map[string]interface{}{ + "a_final": 3, + }, + now.Add(time.Second*-100), + ), + } + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.SortMetrics()) +} diff --git a/testutil/metric.go b/testutil/metric.go index afb3de7fe..b92c724f1 100644 --- a/testutil/metric.go +++ b/testutil/metric.go @@ -1,11 +1,13 @@ package testutil import ( + "reflect" "sort" "testing" "time" "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" ) @@ -18,6 +20,77 @@ type metricDiff struct { Time time.Time } +func lessFunc(lhs, rhs *metricDiff) bool { + if lhs.Measurement != rhs.Measurement { + return lhs.Measurement < rhs.Measurement + } + + for i := 0; ; i++ { + if i >= len(lhs.Tags) && i >= len(rhs.Tags) { + break + } else if i >= len(lhs.Tags) { + return true + } else if i >= len(rhs.Tags) { + return false + } + + if lhs.Tags[i].Key != rhs.Tags[i].Key { + return lhs.Tags[i].Key < rhs.Tags[i].Key + } + if lhs.Tags[i].Value != rhs.Tags[i].Value { + return lhs.Tags[i].Value < rhs.Tags[i].Value + } + } + + for i := 0; ; i++ { + if i >= len(lhs.Fields) && i >= len(rhs.Fields) { + break + } else if i >= len(lhs.Fields) { + return true + } else if i >= len(rhs.Fields) { + return false + } + + if lhs.Fields[i].Key != rhs.Fields[i].Key { + return lhs.Fields[i].Key < rhs.Fields[i].Key + } + + if lhs.Fields[i].Value != rhs.Fields[i].Value { + ltype := reflect.TypeOf(lhs.Fields[i].Value) + rtype := reflect.TypeOf(lhs.Fields[i].Value) + + if ltype.Kind() != rtype.Kind() { + return ltype.Kind() < rtype.Kind() + } + + switch v := lhs.Fields[i].Value.(type) { + case int64: + return v < lhs.Fields[i].Value.(int64) + case uint64: + return v < lhs.Fields[i].Value.(uint64) + case float64: + return v < lhs.Fields[i].Value.(float64) + case string: + return v < lhs.Fields[i].Value.(string) + case bool: + return !v + default: + panic("unknown type") + } + } + } + + if lhs.Type != rhs.Type { + return lhs.Type < rhs.Type + } + + if lhs.Time.UnixNano() != rhs.Time.UnixNano() { + return lhs.Time.UnixNano() < rhs.Time.UnixNano() + } + + return false +} + func newMetricDiff(metric telegraf.Metric) *metricDiff { if metric == nil { return nil @@ -45,6 +118,12 @@ func newMetricDiff(metric telegraf.Metric) *metricDiff { return m } +// SortMetrics enables sorting metrics before comparison. +func SortMetrics() cmp.Option { + return cmpopts.SortSlices(lessFunc) +} + +// MetricEqual returns true if the metrics are equal. func MetricEqual(expected, actual telegraf.Metric) bool { var lhs, rhs *metricDiff if expected != nil { @@ -57,6 +136,8 @@ func MetricEqual(expected, actual telegraf.Metric) bool { return cmp.Equal(lhs, rhs) } +// RequireMetricEqual halts the test with an error if the metrics are not +// equal. func RequireMetricEqual(t *testing.T, expected, actual telegraf.Metric) { t.Helper() @@ -73,7 +154,9 @@ func RequireMetricEqual(t *testing.T, expected, actual telegraf.Metric) { } } -func RequireMetricsEqual(t *testing.T, expected, actual []telegraf.Metric) { +// RequireMetricsEqual halts the test with an error if the array of metrics +// are not equal. +func RequireMetricsEqual(t *testing.T, expected, actual []telegraf.Metric, opts ...cmp.Option) { t.Helper() lhs := make([]*metricDiff, 0, len(expected)) @@ -84,7 +167,7 @@ func RequireMetricsEqual(t *testing.T, expected, actual []telegraf.Metric) { for _, m := range actual { rhs = append(rhs, newMetricDiff(m)) } - if diff := cmp.Diff(lhs, rhs); diff != "" { + if diff := cmp.Diff(lhs, rhs, opts...); diff != "" { t.Fatalf("[]telegraf.Metric\n--- expected\n+++ actual\n%s", diff) } } diff --git a/testutil/metric_test.go b/testutil/metric_test.go index 5b5ef01f4..0c999185a 100644 --- a/testutil/metric_test.go +++ b/testutil/metric_test.go @@ -4,18 +4,19 @@ import ( "testing" "time" + "github.com/google/go-cmp/cmp" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" ) -func TestRequireMetricsEqual(t *testing.T) { +func TestRequireMetricEqual(t *testing.T) { tests := []struct { name string got telegraf.Metric want telegraf.Metric }{ { - name: "telegraf and testutil metrics should be equal", + name: "equal metrics should be equal", got: func() telegraf.Metric { m, _ := metric.New( "test", @@ -56,3 +57,50 @@ func TestRequireMetricsEqual(t *testing.T) { }) } } + +func TestRequireMetricsEqual(t *testing.T) { + tests := []struct { + name string + got []telegraf.Metric + want []telegraf.Metric + opts []cmp.Option + }{ + { + name: "sort metrics option sorts by name", + got: []telegraf.Metric{ + MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{}, + time.Unix(0, 0), + ), + MustMetric( + "net", + map[string]string{}, + map[string]interface{}{}, + time.Unix(0, 0), + ), + }, + want: []telegraf.Metric{ + MustMetric( + "net", + map[string]string{}, + map[string]interface{}{}, + time.Unix(0, 0), + ), + MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{}, + time.Unix(0, 0), + ), + }, + opts: []cmp.Option{SortMetrics()}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + RequireMetricsEqual(t, tt.want, tt.got, tt.opts...) + }) + } +} From 1b3ca655c60f5c576610de6d73f0d1b8e48a1680 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 15 May 2019 14:50:25 -0700 Subject: [PATCH 0843/1815] Update changelog and readme --- CHANGELOG.md | 4 ++++ README.md | 3 ++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 67b221049..35200c279 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,10 @@ - [github](/plugins/inputs/github/README.md) - Contributed by @influxdata - [powerdns_recursor](/plugins/inputs/powerdns_recursor/README.md) - Contributed by @dupondje +#### New Aggregators + +- [final](/plugins/aggregators/final/README.md) - Contributed by @oplehto + #### New Outputs - [syslog](/plugins/outputs/syslog/README.md) - Contributed by @javicrespo diff --git a/README.md b/README.md index 3f4a70d13..1bd96896b 100644 --- a/README.md +++ b/README.md @@ -333,8 +333,9 @@ For documentation on the latest development code see the [documentation index][d ## Aggregator Plugins * [basicstats](./plugins/aggregators/basicstats) -* [minmax](./plugins/aggregators/minmax) +* [final](./plugins/aggregators/final) * [histogram](./plugins/aggregators/histogram) +* [minmax](./plugins/aggregators/minmax) * [valuecounter](./plugins/aggregators/valuecounter) ## Output Plugins From 43c6d13c33f2dc314d2b7f13ed1ca8616be93db6 Mon Sep 17 00:00:00 2001 From: Charlie Vieth Date: Wed, 15 May 2019 19:35:07 -0400 Subject: [PATCH 0844/1815] Refactor templateSpec less method (#5840) --- internal/templating/template.go | 17 ++++++----------- internal/templating/template_test.go | 14 ++++++++++++++ 2 files changed, 20 insertions(+), 11 deletions(-) create mode 100644 internal/templating/template_test.go diff --git a/internal/templating/template.go b/internal/templating/template.go index 472bd2686..235d2f2a5 100644 --- a/internal/templating/template.go +++ b/internal/templating/template.go @@ -124,21 +124,16 @@ type templateSpecs []templateSpec // Less reports whether the element with // index j should sort before the element with index k. func (e templateSpecs) Less(j, k int) bool { - if len(e[j].filter) == 0 && len(e[k].filter) == 0 { - jlength := len(strings.Split(e[j].template, e[j].separator)) - klength := len(strings.Split(e[k].template, e[k].separator)) - return jlength < klength - } - if len(e[j].filter) == 0 { + jlen := len(e[j].filter) + klen := len(e[k].filter) + if jlen == 0 && klen != 0 { return true } - if len(e[k].filter) == 0 { + if klen == 0 && jlen != 0 { return false } - - jlength := len(strings.Split(e[j].template, e[j].separator)) - klength := len(strings.Split(e[k].template, e[k].separator)) - return jlength < klength + return strings.Count(e[j].template, e[j].separator) < + strings.Count(e[k].template, e[k].separator) } // Swap swaps the elements with indexes i and j. diff --git a/internal/templating/template_test.go b/internal/templating/template_test.go new file mode 100644 index 000000000..0a1aae5bc --- /dev/null +++ b/internal/templating/template_test.go @@ -0,0 +1,14 @@ +package templating + +import "testing" + +func BenchmarkTemplateLess(b *testing.B) { + a := templateSpec{ + template: "aa|bb|cc|dd|ee|ff", + separator: "|", + } + specs := templateSpecs{a, a} + for i := 0; i < b.N; i++ { + specs.Less(0, 1) + } +} From 10fd5b35f0fc5dc2e91126a377d770ab45894726 Mon Sep 17 00:00:00 2001 From: Greg <2653109+glinton@users.noreply.github.com> Date: Thu, 16 May 2019 16:59:19 -0600 Subject: [PATCH 0845/1815] Support tags in enum processor (#5855) --- plugins/processors/enum/README.md | 17 +++++----- plugins/processors/enum/enum.go | 47 +++++++++++++++++++++++----- plugins/processors/enum/enum_test.go | 19 +++++++++++ 3 files changed, 68 insertions(+), 15 deletions(-) diff --git a/plugins/processors/enum/README.md b/plugins/processors/enum/README.md index 20c6110a1..29821e83d 100644 --- a/plugins/processors/enum/README.md +++ b/plugins/processors/enum/README.md @@ -1,13 +1,13 @@ # Enum Processor Plugin -The Enum Processor allows the configuration of value mappings for metric fields. +The Enum Processor allows the configuration of value mappings for metric tags or fields. The main use-case for this is to rewrite status codes such as _red_, _amber_ and _green_ by numeric values such as 0, 1, 2. The plugin supports string and bool -types for the field values. Multiple Fields can be configured with separate -value mappings for each field. Default mapping values can be configured to be +types for the field values. Multiple tags or fields can be configured with separate +value mappings for each. Default mapping values can be configured to be used for all values, which are not contained in the value_mappings. The -processor supports explicit configuration of a destination field. By default the -source field is overwritten. +processor supports explicit configuration of a destination tag or field. By default the +source tag or field is overwritten. ### Configuration: @@ -17,8 +17,11 @@ source field is overwritten. ## Name of the field to map field = "status" - ## Destination field to be used for the mapped value. By default the source - ## field is used, overwriting the original value. + ## Name of the tag to map + # tag = "status" + + ## Destination tag or field to be used for the mapped value. By default the + ## source tag or field is used, overwriting the original value. dest = "status_code" ## Default value to be used for all values not contained in the mapping diff --git a/plugins/processors/enum/enum.go b/plugins/processors/enum/enum.go index b08307f09..427b7fb43 100644 --- a/plugins/processors/enum/enum.go +++ b/plugins/processors/enum/enum.go @@ -1,6 +1,7 @@ package enum import ( + "fmt" "strconv" "github.com/influxdata/telegraf" @@ -12,9 +13,12 @@ var sampleConfig = ` ## Name of the field to map field = "status" - ## Destination field to be used for the mapped value. By default the source - ## field is used, overwriting the original value. - # dest = "status_code" + ## Name of the tag to map + # tag = "status" + + ## Destination tag or field to be used for the mapped value. By default the + ## source tag or field is used, overwriting the original value. + dest = "status_code" ## Default value to be used for all values not contained in the mapping ## table. When unset, the unmodified value for the field will be used if no @@ -24,7 +28,7 @@ var sampleConfig = ` ## Table of mappings [processors.enum.mapping.value_mappings] green = 1 - yellow = 2 + amber = 2 red = 3 ` @@ -33,6 +37,7 @@ type EnumMapper struct { } type Mapping struct { + Tag string Field string Dest string Default interface{} @@ -56,10 +61,24 @@ func (mapper *EnumMapper) Apply(in ...telegraf.Metric) []telegraf.Metric { func (mapper *EnumMapper) applyMappings(metric telegraf.Metric) telegraf.Metric { for _, mapping := range mapper.Mappings { - if originalValue, isPresent := metric.GetField(mapping.Field); isPresent == true { - if adjustedValue, isString := adjustBoolValue(originalValue).(string); isString == true { - if mappedValue, isMappedValuePresent := mapping.mapValue(adjustedValue); isMappedValuePresent == true { - writeField(metric, mapping.getDestination(), mappedValue) + if mapping.Field != "" { + if originalValue, isPresent := metric.GetField(mapping.Field); isPresent { + if adjustedValue, isString := adjustBoolValue(originalValue).(string); isString { + if mappedValue, isMappedValuePresent := mapping.mapValue(adjustedValue); isMappedValuePresent { + writeField(metric, mapping.getDestination(), mappedValue) + } + } + } + } + if mapping.Tag != "" { + if originalValue, isPresent := metric.GetTag(mapping.Tag); isPresent { + if mappedValue, isMappedValuePresent := mapping.mapValue(originalValue); isMappedValuePresent { + switch val := mappedValue.(type) { + case string: + writeTag(metric, mapping.getDestinationTag(), val) + default: + writeTag(metric, mapping.getDestinationTag(), fmt.Sprintf("%v", val)) + } } } } @@ -91,11 +110,23 @@ func (mapping *Mapping) getDestination() string { return mapping.Field } +func (mapping *Mapping) getDestinationTag() string { + if mapping.Dest != "" { + return mapping.Dest + } + return mapping.Tag +} + func writeField(metric telegraf.Metric, name string, value interface{}) { metric.RemoveField(name) metric.AddField(name, value) } +func writeTag(metric telegraf.Metric, name string, value string) { + metric.RemoveTag(name) + metric.AddTag(name, value) +} + func init() { processors.Add("enum", func() telegraf.Processor { return &EnumMapper{} diff --git a/plugins/processors/enum/enum_test.go b/plugins/processors/enum/enum_test.go index d8c0e26de..06204523d 100644 --- a/plugins/processors/enum/enum_test.go +++ b/plugins/processors/enum/enum_test.go @@ -27,12 +27,23 @@ func calculateProcessedValues(mapper EnumMapper, metric telegraf.Metric) map[str return processed[0].Fields() } +func calculateProcessedTags(mapper EnumMapper, metric telegraf.Metric) map[string]string { + processed := mapper.Apply(metric) + return processed[0].Tags() +} + func assertFieldValue(t *testing.T, expected interface{}, field string, fields map[string]interface{}) { value, present := fields[field] assert.True(t, present, "value of field '"+field+"' was not present") assert.EqualValues(t, expected, value) } +func assertTagValue(t *testing.T, expected interface{}, tag string, tags map[string]string) { + value, present := tags[tag] + assert.True(t, present, "value of tag '"+tag+"' was not present") + assert.EqualValues(t, expected, value) +} + func TestRetainsMetric(t *testing.T) { mapper := EnumMapper{} source := createTestMetric() @@ -56,6 +67,14 @@ func TestMapsSingleStringValue(t *testing.T) { assertFieldValue(t, 1, "string_value", fields) } +func TestMapsSingleStringValueTag(t *testing.T) { + mapper := EnumMapper{Mappings: []Mapping{{Tag: "tag", ValueMappings: map[string]interface{}{"tag_value": "valuable"}}}} + + tags := calculateProcessedTags(mapper, createTestMetric()) + + assertTagValue(t, "valuable", "tag", tags) +} + func TestNoFailureOnMappingsOnNonStringValuedFields(t *testing.T) { mapper := EnumMapper{Mappings: []Mapping{{Field: "int_value", ValueMappings: map[string]interface{}{"13i": int64(7)}}}} From f76cca3af5f31661ba7d9fcd332899e4bb013dd6 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 16 May 2019 16:00:31 -0700 Subject: [PATCH 0846/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 35200c279..2265ec838 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -47,6 +47,7 @@ - [#5829](https://github.com/influxdata/telegraf/pull/5829): Add support for HTTP basic auth to solr input. - [#5791](https://github.com/influxdata/telegraf/pull/5791): Add support for datadog events to statsd input. - [#5817](https://github.com/influxdata/telegraf/pull/5817): Allow devices option to match against devlinks. +- [#5855](https://github.com/influxdata/telegraf/pull/5855): Support tags in enum processor. #### Bugfixes From 633a468ff960710efeb455a5d84d222dd8b7d6b2 Mon Sep 17 00:00:00 2001 From: Jon McKenzie Date: Fri, 17 May 2019 14:02:22 -0400 Subject: [PATCH 0847/1815] Improve Docker image identifier parsing (#5838) --- plugins/inputs/docker/docker.go | 45 ++++++++++++++++++------ plugins/inputs/docker/docker_test.go | 51 ++++++++++++++++++++++++++++ 2 files changed, 85 insertions(+), 11 deletions(-) diff --git a/plugins/inputs/docker/docker.go b/plugins/inputs/docker/docker.go index 6d9d56372..10759fc3e 100644 --- a/plugins/inputs/docker/docker.go +++ b/plugins/inputs/docker/docker.go @@ -346,6 +346,39 @@ func (d *Docker) gatherInfo(acc telegraf.Accumulator) error { return nil } +func parseImage(image string) (string, string) { + // Adapts some of the logic from the actual Docker library's image parsing + // routines: + // https://github.com/docker/distribution/blob/release/2.7/reference/normalize.go + domain := "" + remainder := "" + + i := strings.IndexRune(image, '/') + + if i == -1 || (!strings.ContainsAny(image[:i], ".:") && image[:i] != "localhost") { + remainder = image + } else { + domain, remainder = image[:i], image[i+1:] + } + + imageName := "" + imageVersion := "unknown" + + i = strings.LastIndex(remainder, ":") + if i > -1 { + imageVersion = remainder[i+1:] + imageName = remainder[:i] + } else { + imageName = remainder + } + + if domain != "" { + imageName = domain + "/" + imageName + } + + return imageName, imageVersion +} + func (d *Docker) gatherContainer( container types.Container, acc telegraf.Accumulator, @@ -366,17 +399,7 @@ func (d *Docker) gatherContainer( return nil } - // the image name sometimes has a version part, or a private repo - // ie, rabbitmq:3-management or docker.someco.net:4443/rabbitmq:3-management - imageName := "" - imageVersion := "unknown" - i := strings.LastIndex(container.Image, ":") // index of last ':' character - if i > -1 { - imageVersion = container.Image[i+1:] - imageName = container.Image[:i] - } else { - imageName = container.Image - } + imageName, imageVersion := parseImage(container.Image) tags := map[string]string{ "engine_host": d.engine_host, diff --git a/plugins/inputs/docker/docker_test.go b/plugins/inputs/docker/docker_test.go index ac95b5ccd..9209c6008 100644 --- a/plugins/inputs/docker/docker_test.go +++ b/plugins/inputs/docker/docker_test.go @@ -815,3 +815,54 @@ func TestContainerName(t *testing.T) { }) } } + +func TestParseImage(t *testing.T) { + tests := []struct { + image string + parsedName string + parsedVersion string + }{ + { + image: "postgres", + parsedName: "postgres", + parsedVersion: "unknown", + }, + { + image: "postgres:latest", + parsedName: "postgres", + parsedVersion: "latest", + }, + { + image: "coreos/etcd", + parsedName: "coreos/etcd", + parsedVersion: "unknown", + }, + { + image: "coreos/etcd:latest", + parsedName: "coreos/etcd", + parsedVersion: "latest", + }, + { + image: "quay.io/postgres", + parsedName: "quay.io/postgres", + parsedVersion: "unknown", + }, + { + image: "quay.io:4443/coreos/etcd", + parsedName: "quay.io:4443/coreos/etcd", + parsedVersion: "unknown", + }, + { + image: "quay.io:4443/coreos/etcd:latest", + parsedName: "quay.io:4443/coreos/etcd", + parsedVersion: "latest", + }, + } + for _, tt := range tests { + t.Run("parse name "+tt.image, func(t *testing.T) { + imageName, imageVersion := parseImage(tt.image) + require.Equal(t, tt.parsedName, imageName) + require.Equal(t, tt.parsedVersion, imageVersion) + }) + } +} From 27dec2c2c1d90d9d95efb4821579a0b735af05a8 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 17 May 2019 11:04:22 -0700 Subject: [PATCH 0848/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2265ec838..5f05e7a7d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -60,6 +60,7 @@ - [#5455](https://github.com/influxdata/telegraf/issues/5455): Fix unsupported pkt type error in pgbouncer. - [#5771](https://github.com/influxdata/telegraf/pull/5771): Fix only one job per storage target reported in lustre2 input. - [#5796](https://github.com/influxdata/telegraf/issues/5796): Set default timeout of 5s in fibaro input. +- [#5835](https://github.com/influxdata/telegraf/issues/5835): Fix docker input does not parse image name correctly. ## v1.10.4 [2019-05-14] From dc75f7bd3252f79ddab9f605abca60c0e193ec23 Mon Sep 17 00:00:00 2001 From: urusha Date: Fri, 17 May 2019 23:27:11 +0300 Subject: [PATCH 0849/1815] Add postgresql dsn connection string example (#5869) --- plugins/inputs/pgbouncer/README.md | 6 +++++- plugins/inputs/postgresql/README.md | 6 +++++- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/plugins/inputs/pgbouncer/README.md b/plugins/inputs/pgbouncer/README.md index 2a841c45a..dd8224e3e 100644 --- a/plugins/inputs/pgbouncer/README.md +++ b/plugins/inputs/pgbouncer/README.md @@ -5,7 +5,11 @@ This PgBouncer plugin provides metrics for your PgBouncer load balancer. More information about the meaning of these metrics can be found in the [PgBouncer Documentation](https://pgbouncer.github.io/usage.html) ## Configuration -Specify address via a url matching: +Specify address via a postgresql connection string: + + `host=/run/postgresql port=6432 user=telegraf database=pgbouncer` + +Or via an url matching: `postgres://[pqgotest[:password]]@localhost[/dbname]?sslmode=[disable|verify-ca|verify-full]` diff --git a/plugins/inputs/postgresql/README.md b/plugins/inputs/postgresql/README.md index a873ddac0..21f9097aa 100644 --- a/plugins/inputs/postgresql/README.md +++ b/plugins/inputs/postgresql/README.md @@ -31,7 +31,11 @@ _* value ignored and therefore not recorded._ More information about the meaning of these metrics can be found in the [PostgreSQL Documentation](http://www.postgresql.org/docs/9.2/static/monitoring-stats.html#PG-STAT-DATABASE-VIEW) ## Configuration -Specify address via a url matching: +Specify address via a postgresql connection string: + + `host=/run/postgresql user=telegraf database=telegraf` + +Or via an url matching: `postgres://[pqgotest[:password]]@localhost[/dbname]?sslmode=[disable|verify-ca|verify-full]` From d25fa3ced30bdb2b8d769576ce3dead1bcf48a7b Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 17 May 2019 13:44:08 -0700 Subject: [PATCH 0850/1815] Document that serializers are not thread-safe --- plugins/serializers/registry.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/plugins/serializers/registry.go b/plugins/serializers/registry.go index ecac63323..e21e9205c 100644 --- a/plugins/serializers/registry.go +++ b/plugins/serializers/registry.go @@ -5,7 +5,6 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/plugins/serializers/carbon2" "github.com/influxdata/telegraf/plugins/serializers/graphite" "github.com/influxdata/telegraf/plugins/serializers/influx" @@ -24,6 +23,9 @@ type SerializerOutput interface { // Serializer is an interface defining functions that a serializer plugin must // satisfy. +// +// Implementations of this interface should be reentrant but are not required +// to be thread-safe. type Serializer interface { // Serialize takes a single telegraf metric and turns it into a byte buffer. // separate metrics should be separated by a newline, and there should be From 5bb6e4603de06ce389198706176a8f18125d6615 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 17 May 2019 13:46:13 -0700 Subject: [PATCH 0851/1815] Fix publishing of direct exchange routing key (#5868) --- plugins/outputs/amqp/amqp.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/plugins/outputs/amqp/amqp.go b/plugins/outputs/amqp/amqp.go index dd45f72dc..56e1e13ef 100644 --- a/plugins/outputs/amqp/amqp.go +++ b/plugins/outputs/amqp/amqp.go @@ -12,7 +12,6 @@ import ( "github.com/influxdata/telegraf/internal/tls" "github.com/influxdata/telegraf/plugins/outputs" "github.com/influxdata/telegraf/plugins/serializers" - "github.com/streadway/amqp" ) @@ -206,8 +205,8 @@ func (q *AMQP) routingKey(metric telegraf.Metric) string { func (q *AMQP) Write(metrics []telegraf.Metric) error { batches := make(map[string][]telegraf.Metric) - if q.ExchangeType == "direct" || q.ExchangeType == "header" { - // Since the routing_key is ignored for these exchange types send as a + if q.ExchangeType == "header" { + // Since the routing_key is ignored for this exchange type send as a // single batch. batches[""] = metrics } else { From e6dd8536914d932cee5e50909b2eaf7b3f049054 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 17 May 2019 13:47:33 -0700 Subject: [PATCH 0852/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5f05e7a7d..3a0c10f89 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -61,6 +61,7 @@ - [#5771](https://github.com/influxdata/telegraf/pull/5771): Fix only one job per storage target reported in lustre2 input. - [#5796](https://github.com/influxdata/telegraf/issues/5796): Set default timeout of 5s in fibaro input. - [#5835](https://github.com/influxdata/telegraf/issues/5835): Fix docker input does not parse image name correctly. +- [#5661](https://github.com/influxdata/telegraf/issues/5661): Fix direct exchange routing key in amqp output. ## v1.10.4 [2019-05-14] From 9cdf1ea56e0140b4235017e5ce0131f02f90335d Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 20 May 2019 14:30:31 -0700 Subject: [PATCH 0853/1815] Log actual url on wrong status code (#5811) --- plugins/inputs/nginx_plus_api/nginx_plus_api_metrics.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics.go b/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics.go index 5583670e4..68be31e12 100644 --- a/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics.go +++ b/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics.go @@ -30,11 +30,11 @@ func (n *NginxPlusApi) gatherUrl(addr *url.URL, path string) ([]byte, error) { resp, err := n.client.Get(url) if err != nil { - return nil, fmt.Errorf("error making HTTP request to %s: %s", addr.String(), err) + return nil, fmt.Errorf("error making HTTP request to %s: %s", url, err) } defer resp.Body.Close() if resp.StatusCode != http.StatusOK { - return nil, fmt.Errorf("%s returned HTTP status %s", addr.String(), resp.Status) + return nil, fmt.Errorf("%s returned HTTP status %s", url, resp.Status) } contentType := strings.Split(resp.Header.Get("Content-Type"), ";")[0] switch contentType { From ad877fdd919fbb0fced86c303d42ef0d1b339a44 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 20 May 2019 14:32:04 -0700 Subject: [PATCH 0854/1815] Fix scale set resource id with azure_monitor output (#5821) --- .../outputs/azure_monitor/azure_monitor.go | 62 ++++++++++++------- 1 file changed, 39 insertions(+), 23 deletions(-) diff --git a/plugins/outputs/azure_monitor/azure_monitor.go b/plugins/outputs/azure_monitor/azure_monitor.go index 408976c53..9039d4aa4 100644 --- a/plugins/outputs/azure_monitor/azure_monitor.go +++ b/plugins/outputs/azure_monitor/azure_monitor.go @@ -43,6 +43,35 @@ type AzureMonitor struct { MetricOutsideWindow selfstat.Stat } +// VirtualMachineMetadata contains information about a VM from the metadata service +type virtualMachineMetadata struct { + Compute struct { + Location string `json:"location"` + Name string `json:"name"` + ResourceGroupName string `json:"resourceGroupName"` + SubscriptionID string `json:"subscriptionId"` + VMScaleSetName string `json:"vmScaleSetName"` + } `json:"compute"` +} + +func (m *virtualMachineMetadata) ResourceID() string { + if m.Compute.VMScaleSetName != "" { + return fmt.Sprintf( + resourceIDScaleSetTemplate, + m.Compute.SubscriptionID, + m.Compute.ResourceGroupName, + m.Compute.VMScaleSetName, + ) + } else { + return fmt.Sprintf( + resourceIDTemplate, + m.Compute.SubscriptionID, + m.Compute.ResourceGroupName, + m.Compute.Name, + ) + } +} + type dimension struct { name string value string @@ -63,11 +92,12 @@ const ( defaultNamespacePrefix = "Telegraf/" defaultAuthResource = "https://monitoring.azure.com/" - vmInstanceMetadataURL = "http://169.254.169.254/metadata/instance?api-version=2017-12-01" - resourceIDTemplate = "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/virtualMachines/%s" - urlTemplate = "https://%s.monitoring.azure.com%s/metrics" - urlOverrideTemplate = "%s%s/metrics" - maxRequestBodySize = 4000000 + vmInstanceMetadataURL = "http://169.254.169.254/metadata/instance?api-version=2017-12-01" + resourceIDTemplate = "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/virtualMachines/%s" + resourceIDScaleSetTemplate = "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/virtualMachineScaleSets/%s" + urlTemplate = "https://%s.monitoring.azure.com%s/metrics" + urlOverrideTemplate = "%s%s/metrics" + maxRequestBodySize = 4000000 ) var sampleConfig = ` @@ -200,31 +230,17 @@ func vmInstanceMetadata(c *http.Client) (string, string, error) { return "", "", err } if resp.StatusCode >= 300 || resp.StatusCode < 200 { - return "", "", fmt.Errorf("unable to fetch instance metadata: [%v] %s", resp.StatusCode, body) + return "", "", fmt.Errorf("unable to fetch instance metadata: [%s] %d", + vmInstanceMetadataURL, resp.StatusCode) } - // VirtualMachineMetadata contains information about a VM from the metadata service - type VirtualMachineMetadata struct { - Compute struct { - Location string `json:"location"` - Name string `json:"name"` - ResourceGroupName string `json:"resourceGroupName"` - SubscriptionID string `json:"subscriptionId"` - } `json:"compute"` - } - - var metadata VirtualMachineMetadata + var metadata virtualMachineMetadata if err := json.Unmarshal(body, &metadata); err != nil { return "", "", err } region := metadata.Compute.Location - resourceID := fmt.Sprintf( - resourceIDTemplate, - metadata.Compute.SubscriptionID, - metadata.Compute.ResourceGroupName, - metadata.Compute.Name, - ) + resourceID := metadata.ResourceID() return region, resourceID, nil } From 1b2773a762339dc2706c2cfc715f3922bcb0eeb0 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 20 May 2019 14:35:01 -0700 Subject: [PATCH 0855/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3a0c10f89..f907080c9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -62,6 +62,7 @@ - [#5796](https://github.com/influxdata/telegraf/issues/5796): Set default timeout of 5s in fibaro input. - [#5835](https://github.com/influxdata/telegraf/issues/5835): Fix docker input does not parse image name correctly. - [#5661](https://github.com/influxdata/telegraf/issues/5661): Fix direct exchange routing key in amqp output. +- [#5819](https://github.com/influxdata/telegraf/issues/5819): Fix scale set resource id with azure_monitor output. ## v1.10.4 [2019-05-14] From b5cd9a9ff2e317dbf503e08b940d54ca9f49572a Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 20 May 2019 14:36:23 -0700 Subject: [PATCH 0856/1815] Add support for gzip compression to amqp plugins (#5830) --- internal/content_coding.go | 122 ++++++++++++++++++ internal/content_coding_test.go | 58 +++++++++ plugins/inputs/amqp_consumer/README.md | 4 + plugins/inputs/amqp_consumer/amqp_consumer.go | 35 ++++- plugins/outputs/amqp/README.md | 8 ++ plugins/outputs/amqp/amqp.go | 25 +++- plugins/outputs/amqp/client.go | 10 +- 7 files changed, 250 insertions(+), 12 deletions(-) create mode 100644 internal/content_coding.go create mode 100644 internal/content_coding_test.go diff --git a/internal/content_coding.go b/internal/content_coding.go new file mode 100644 index 000000000..936dd9562 --- /dev/null +++ b/internal/content_coding.go @@ -0,0 +1,122 @@ +package internal + +import ( + "bytes" + "compress/gzip" + "errors" + "io" +) + +// NewContentEncoder returns a ContentEncoder for the encoding type. +func NewContentEncoder(encoding string) (ContentEncoder, error) { + switch encoding { + case "gzip": + return NewGzipEncoder() + + case "identity", "": + return NewIdentityEncoder(), nil + default: + return nil, errors.New("invalid value for content_encoding") + } +} + +// NewContentDecoder returns a ContentDecoder for the encoding type. +func NewContentDecoder(encoding string) (ContentDecoder, error) { + switch encoding { + case "gzip": + return NewGzipDecoder() + case "identity", "": + return NewIdentityDecoder(), nil + default: + return nil, errors.New("invalid value for content_encoding") + } +} + +// ContentEncoder applies a wrapper encoding to byte buffers. +type ContentEncoder interface { + Encode([]byte) ([]byte, error) +} + +// GzipEncoder compresses the buffer using gzip at the default level. +type GzipEncoder struct { + writer *gzip.Writer + buf *bytes.Buffer +} + +func NewGzipEncoder() (*GzipEncoder, error) { + var buf bytes.Buffer + return &GzipEncoder{ + writer: gzip.NewWriter(&buf), + buf: &buf, + }, nil +} + +func (e *GzipEncoder) Encode(data []byte) ([]byte, error) { + e.buf.Reset() + e.writer.Reset(e.buf) + + _, err := e.writer.Write(data) + if err != nil { + return nil, err + } + err = e.writer.Close() + if err != nil { + return nil, err + } + return e.buf.Bytes(), nil +} + +// IdentityEncoder is a null encoder that applies no transformation. +type IdentityEncoder struct{} + +func NewIdentityEncoder() *IdentityEncoder { + return &IdentityEncoder{} +} + +func (*IdentityEncoder) Encode(data []byte) ([]byte, error) { + return data, nil +} + +// ContentDecoder removes a wrapper encoding from byte buffers. +type ContentDecoder interface { + Decode([]byte) ([]byte, error) +} + +// GzipDecoder decompresses buffers with gzip compression. +type GzipDecoder struct { + reader *gzip.Reader + buf *bytes.Buffer +} + +func NewGzipDecoder() (*GzipDecoder, error) { + return &GzipDecoder{ + reader: new(gzip.Reader), + buf: new(bytes.Buffer), + }, nil +} + +func (d *GzipDecoder) Decode(data []byte) ([]byte, error) { + d.reader.Reset(bytes.NewBuffer(data)) + d.buf.Reset() + + _, err := d.buf.ReadFrom(d.reader) + if err != nil && err != io.EOF { + return nil, err + } + err = d.reader.Close() + if err != nil { + return nil, err + } + return d.buf.Bytes(), nil +} + +// IdentityDecoder is a null decoder that returns the input. +type IdentityDecoder struct{} + +func NewIdentityDecoder() *IdentityDecoder { + return &IdentityDecoder{} +} + +func (*IdentityDecoder) Decode(data []byte) ([]byte, error) { + return data, nil +} diff --git a/internal/content_coding_test.go b/internal/content_coding_test.go new file mode 100644 index 000000000..031633112 --- /dev/null +++ b/internal/content_coding_test.go @@ -0,0 +1,58 @@ +package internal + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestGzipEncodeDecode(t *testing.T) { + enc, err := NewGzipEncoder() + require.NoError(t, err) + dec, err := NewGzipDecoder() + require.NoError(t, err) + + payload, err := enc.Encode([]byte("howdy")) + require.NoError(t, err) + + actual, err := dec.Decode(payload) + require.NoError(t, err) + + require.Equal(t, "howdy", string(actual)) +} + +func TestGzipReuse(t *testing.T) { + enc, err := NewGzipEncoder() + require.NoError(t, err) + dec, err := NewGzipDecoder() + require.NoError(t, err) + + payload, err := enc.Encode([]byte("howdy")) + require.NoError(t, err) + + actual, err := dec.Decode(payload) + require.NoError(t, err) + + require.Equal(t, "howdy", string(actual)) + + payload, err = enc.Encode([]byte("doody")) + require.NoError(t, err) + + actual, err = dec.Decode(payload) + require.NoError(t, err) + + require.Equal(t, "doody", string(actual)) +} + +func TestIdentityEncodeDecode(t *testing.T) { + enc := NewIdentityEncoder() + dec := NewIdentityDecoder() + + payload, err := enc.Encode([]byte("howdy")) + require.NoError(t, err) + + actual, err := dec.Decode(payload) + require.NoError(t, err) + + require.Equal(t, "howdy", string(actual)) +} diff --git a/plugins/inputs/amqp_consumer/README.md b/plugins/inputs/amqp_consumer/README.md index ca1af800c..84371ba4d 100644 --- a/plugins/inputs/amqp_consumer/README.md +++ b/plugins/inputs/amqp_consumer/README.md @@ -77,6 +77,10 @@ The following defaults are known to work with RabbitMQ: ## Use TLS but skip chain & host verification # insecure_skip_verify = false + ## Content encoding for message payloads, can be set to "gzip" to or + ## "identity" to apply no encoding. + # content_encoding = "identity" + ## Data format to consume. ## Each data format has its own unique set of configuration options, read ## more about them here: diff --git a/plugins/inputs/amqp_consumer/amqp_consumer.go b/plugins/inputs/amqp_consumer/amqp_consumer.go index d80a3683b..994a3736a 100644 --- a/plugins/inputs/amqp_consumer/amqp_consumer.go +++ b/plugins/inputs/amqp_consumer/amqp_consumer.go @@ -11,6 +11,7 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/internal/tls" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/parsers" @@ -52,12 +53,15 @@ type AMQPConsumer struct { AuthMethod string tls.ClientConfig + ContentEncoding string `toml:"content_encoding"` + deliveries map[telegraf.TrackingID]amqp.Delivery - parser parsers.Parser - conn *amqp.Connection - wg *sync.WaitGroup - cancel context.CancelFunc + parser parsers.Parser + conn *amqp.Connection + wg *sync.WaitGroup + cancel context.CancelFunc + decoder internal.ContentDecoder } type externalAuth struct{} @@ -147,6 +151,10 @@ func (a *AMQPConsumer) SampleConfig() string { ## Use TLS but skip chain & host verification # insecure_skip_verify = false + ## Content encoding for message payloads, can be set to "gzip" to or + ## "identity" to apply no encoding. + # content_encoding = "identity" + ## Data format to consume. ## Each data format has its own unique set of configuration options, read ## more about them here: @@ -201,6 +209,11 @@ func (a *AMQPConsumer) Start(acc telegraf.Accumulator) error { return err } + a.decoder, err = internal.NewContentDecoder(a.ContentEncoding) + if err != nil { + return err + } + msgs, err := a.connect(amqpConf) if err != nil { return err @@ -428,8 +441,7 @@ func (a *AMQPConsumer) process(ctx context.Context, msgs <-chan amqp.Delivery, a } func (a *AMQPConsumer) onMessage(acc telegraf.TrackingAccumulator, d amqp.Delivery) error { - metrics, err := a.parser.Parse(d.Body) - if err != nil { + onError := func() { // Discard the message from the queue; will never be able to process // this message. rejErr := d.Ack(false) @@ -438,6 +450,17 @@ func (a *AMQPConsumer) onMessage(acc telegraf.TrackingAccumulator, d amqp.Delive d.DeliveryTag, rejErr) a.conn.Close() } + } + + body, err := a.decoder.Decode(d.Body) + if err != nil { + onError() + return err + } + + metrics, err := a.parser.Parse(body) + if err != nil { + onError() return err } diff --git a/plugins/outputs/amqp/README.md b/plugins/outputs/amqp/README.md index fe44ea4ed..68470a2c0 100644 --- a/plugins/outputs/amqp/README.md +++ b/plugins/outputs/amqp/README.md @@ -92,6 +92,14 @@ For an introduction to AMQP see: ## Recommended to set to true. # use_batch_format = false + ## Content encoding for message payloads, can be set to "gzip" to or + ## "identity" to apply no encoding. + ## + ## Please note that when use_batch_format = false each amqp message contains only + ## a single metric, it is recommended to use compression with batch format + ## for best results. + # content_encoding = "identity" + ## Data format to output. ## Each data format has its own unique set of configuration options, read ## more about them here: diff --git a/plugins/outputs/amqp/amqp.go b/plugins/outputs/amqp/amqp.go index 56e1e13ef..4350f2e74 100644 --- a/plugins/outputs/amqp/amqp.go +++ b/plugins/outputs/amqp/amqp.go @@ -54,6 +54,7 @@ type AMQP struct { Headers map[string]string `toml:"headers"` Timeout internal.Duration `toml:"timeout"` UseBatchFormat bool `toml:"use_batch_format"` + ContentEncoding string `toml:"content_encoding"` tls.ClientConfig serializer serializers.Serializer @@ -61,6 +62,7 @@ type AMQP struct { client Client config *ClientConfig sentMessages int + encoder internal.ContentEncoder } type Client interface { @@ -149,6 +151,14 @@ var sampleConfig = ` ## Recommended to set to true. # use_batch_format = false + ## Content encoding for message payloads, can be set to "gzip" to or + ## "identity" to apply no encoding. + ## + ## Please note that when use_batch_format = false each amqp message contains only + ## a single metric, it is recommended to use compression with batch format + ## for best results. + # content_encoding = "identity" + ## Data format to output. ## Each data format has its own unique set of configuration options, read ## more about them here: @@ -177,11 +187,16 @@ func (q *AMQP) Connect() error { q.config = config } - client, err := q.connect(q.config) + var err error + q.encoder, err = internal.NewContentEncoder(q.ContentEncoding) + if err != nil { + return err + } + + q.client, err = q.connect(q.config) if err != nil { return err } - q.client = client return nil } @@ -227,6 +242,11 @@ func (q *AMQP) Write(metrics []telegraf.Metric) error { return err } + body, err = q.encoder.Encode(body) + if err != nil { + return err + } + err = q.publish(key, body) if err != nil { // If this is the first attempt to publish and the connection is @@ -298,6 +318,7 @@ func (q *AMQP) makeClientConfig() (*ClientConfig, error) { exchange: q.Exchange, exchangeType: q.ExchangeType, exchangePassive: q.ExchangePassive, + encoding: q.ContentEncoding, timeout: q.Timeout.Duration, } diff --git a/plugins/outputs/amqp/client.go b/plugins/outputs/amqp/client.go index 0ee45d950..5e0dc3b49 100644 --- a/plugins/outputs/amqp/client.go +++ b/plugins/outputs/amqp/client.go @@ -19,6 +19,7 @@ type ClientConfig struct { exchangePassive bool exchangeDurable bool exchangeArguments amqp.Table + encoding string headers amqp.Table deliveryMode uint8 tlsConfig *tls.Config @@ -114,10 +115,11 @@ func (c *client) Publish(key string, body []byte) error { false, // mandatory false, // immediate amqp.Publishing{ - Headers: c.config.headers, - ContentType: "text/plain", - Body: body, - DeliveryMode: c.config.deliveryMode, + Headers: c.config.headers, + ContentType: "text/plain", + ContentEncoding: c.config.encoding, + Body: body, + DeliveryMode: c.config.deliveryMode, }) } From e141518cf05177ef0f6a8efb3608ec0c1acbf49f Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 20 May 2019 14:38:35 -0700 Subject: [PATCH 0857/1815] Support passive queue declaration in amqp_consumer (#5831) --- plugins/inputs/amqp_consumer/README.md | 8 +- plugins/inputs/amqp_consumer/amqp_consumer.go | 136 +++++++++++------- plugins/outputs/amqp/README.md | 2 +- plugins/outputs/amqp/amqp.go | 2 +- plugins/outputs/amqp/client.go | 4 + 5 files changed, 100 insertions(+), 52 deletions(-) diff --git a/plugins/inputs/amqp_consumer/README.md b/plugins/inputs/amqp_consumer/README.md index 84371ba4d..53fca513d 100644 --- a/plugins/inputs/amqp_consumer/README.md +++ b/plugins/inputs/amqp_consumer/README.md @@ -27,7 +27,7 @@ The following defaults are known to work with RabbitMQ: # username = "" # password = "" - ## Exchange to declare and consume from. + ## Name of the exchange to declare. If unset, no exchange will be declared. exchange = "telegraf" ## Exchange type; common types are "direct", "fanout", "topic", "header", "x-consistent-hash". @@ -49,7 +49,11 @@ The following defaults are known to work with RabbitMQ: ## AMQP queue durability can be "transient" or "durable". queue_durability = "durable" - ## Binding Key + ## If true, queue will be passively declared. + # queue_passive = false + + ## A binding between the exchange and queue using this binding key is + ## created. If unset, no binding is created. binding_key = "#" ## Maximum number of messages server should give to the worker. diff --git a/plugins/inputs/amqp_consumer/amqp_consumer.go b/plugins/inputs/amqp_consumer/amqp_consumer.go index 994a3736a..6cf6004f5 100644 --- a/plugins/inputs/amqp_consumer/amqp_consumer.go +++ b/plugins/inputs/amqp_consumer/amqp_consumer.go @@ -41,6 +41,7 @@ type AMQPConsumer struct { // Queue Name Queue string `toml:"queue"` QueueDurability string `toml:"queue_durability"` + QueuePassive bool `toml:"queue_passive"` // Binding Key BindingKey string `toml:"binding_key"` @@ -101,7 +102,7 @@ func (a *AMQPConsumer) SampleConfig() string { # username = "" # password = "" - ## Exchange to declare and consume from. + ## Name of the exchange to declare. If unset, no exchange will be declared. exchange = "telegraf" ## Exchange type; common types are "direct", "fanout", "topic", "header", "x-consistent-hash". @@ -123,7 +124,11 @@ func (a *AMQPConsumer) SampleConfig() string { ## AMQP queue durability can be "transient" or "durable". queue_durability = "durable" - ## Binding Key. + ## If true, queue will be passively declared. + # queue_passive = false + + ## A binding between the exchange and queue using this binding key is + ## created. If unset, no binding is created. binding_key = "#" ## Maximum number of messages server should give to the worker. @@ -286,59 +291,52 @@ func (a *AMQPConsumer) connect(amqpConf *amqp.Config) (<-chan amqp.Delivery, err return nil, fmt.Errorf("Failed to open a channel: %s", err) } - var exchangeDurable = true - switch a.ExchangeDurability { - case "transient": - exchangeDurable = false - default: - exchangeDurable = true + if a.Exchange != "" { + var exchangeDurable = true + switch a.ExchangeDurability { + case "transient": + exchangeDurable = false + default: + exchangeDurable = true + } + + exchangeArgs := make(amqp.Table, len(a.ExchangeArguments)) + for k, v := range a.ExchangeArguments { + exchangeArgs[k] = v + } + + err = declareExchange( + ch, + a.Exchange, + a.ExchangeType, + a.ExchangePassive, + exchangeDurable, + exchangeArgs) + if err != nil { + return nil, err + } } - exchangeArgs := make(amqp.Table, len(a.ExchangeArguments)) - for k, v := range a.ExchangeArguments { - exchangeArgs[k] = v - } - - err = declareExchange( + q, err := declareQueue( ch, - a.Exchange, - a.ExchangeType, - a.ExchangePassive, - exchangeDurable, - exchangeArgs) + a.Queue, + a.QueueDurability, + a.QueuePassive) if err != nil { return nil, err } - var queueDurable = true - switch a.QueueDurability { - case "transient": - queueDurable = false - default: - queueDurable = true - } - - q, err := ch.QueueDeclare( - a.Queue, // queue - queueDurable, // durable - false, // delete when unused - false, // exclusive - false, // no-wait - nil, // arguments - ) - if err != nil { - return nil, fmt.Errorf("Failed to declare a queue: %s", err) - } - - err = ch.QueueBind( - q.Name, // queue - a.BindingKey, // binding-key - a.Exchange, // exchange - false, - nil, - ) - if err != nil { - return nil, fmt.Errorf("Failed to bind a queue: %s", err) + if a.BindingKey != "" { + err = ch.QueueBind( + q.Name, // queue + a.BindingKey, // binding-key + a.Exchange, // exchange + false, + nil, + ) + if err != nil { + return nil, fmt.Errorf("Failed to bind a queue: %s", err) + } } err = ch.Qos( @@ -402,6 +400,48 @@ func declareExchange( return nil } +func declareQueue( + channel *amqp.Channel, + queueName string, + queueDurability string, + queuePassive bool, +) (*amqp.Queue, error) { + var queue amqp.Queue + var err error + + var queueDurable = true + switch queueDurability { + case "transient": + queueDurable = false + default: + queueDurable = true + } + + if queuePassive { + queue, err = channel.QueueDeclarePassive( + queueName, // queue + queueDurable, // durable + false, // delete when unused + false, // exclusive + false, // no-wait + nil, // arguments + ) + } else { + queue, err = channel.QueueDeclare( + queueName, // queue + queueDurable, // durable + false, // delete when unused + false, // exclusive + false, // no-wait + nil, // arguments + ) + } + if err != nil { + return nil, fmt.Errorf("error declaring queue: %v", err) + } + return &queue, nil +} + // Read messages from queue and add them to the Accumulator func (a *AMQPConsumer) process(ctx context.Context, msgs <-chan amqp.Delivery, ac telegraf.Accumulator) { a.deliveries = make(map[telegraf.TrackingID]amqp.Delivery) diff --git a/plugins/outputs/amqp/README.md b/plugins/outputs/amqp/README.md index 68470a2c0..f810a0a7b 100644 --- a/plugins/outputs/amqp/README.md +++ b/plugins/outputs/amqp/README.md @@ -33,7 +33,7 @@ For an introduction to AMQP see: # exchange_type = "topic" ## If true, exchange will be passively declared. - # exchange_declare_passive = false + # exchange_passive = false ## Exchange durability can be either "transient" or "durable". # exchange_durability = "durable" diff --git a/plugins/outputs/amqp/amqp.go b/plugins/outputs/amqp/amqp.go index 4350f2e74..f82faef64 100644 --- a/plugins/outputs/amqp/amqp.go +++ b/plugins/outputs/amqp/amqp.go @@ -92,7 +92,7 @@ var sampleConfig = ` # exchange_type = "topic" ## If true, exchange will be passively declared. - # exchange_declare_passive = false + # exchange_passive = false ## Exchange durability can be either "transient" or "durable". # exchange_durability = "durable" diff --git a/plugins/outputs/amqp/client.go b/plugins/outputs/amqp/client.go index 5e0dc3b49..8c230b706 100644 --- a/plugins/outputs/amqp/client.go +++ b/plugins/outputs/amqp/client.go @@ -78,6 +78,10 @@ func Connect(config *ClientConfig) (*client, error) { } func (c *client) DeclareExchange() error { + if c.config.exchange == "" { + return nil + } + var err error if c.config.exchangePassive { err = c.channel.ExchangeDeclarePassive( From 3c451a1f2577cd5837438e95b8f74ea160e7a6dc Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 20 May 2019 14:37:16 -0700 Subject: [PATCH 0858/1815] Update changelog --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index f907080c9..8eb567487 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -48,6 +48,8 @@ - [#5791](https://github.com/influxdata/telegraf/pull/5791): Add support for datadog events to statsd input. - [#5817](https://github.com/influxdata/telegraf/pull/5817): Allow devices option to match against devlinks. - [#5855](https://github.com/influxdata/telegraf/pull/5855): Support tags in enum processor. +- [#5830](https://github.com/influxdata/telegraf/pull/5830): Add support for gzip compression to amqp plugins. +- [#5831](https://github.com/influxdata/telegraf/pull/5831): Support passive queue declaration in amqp_consumer. #### Bugfixes From 0535dc92ed5c102ca09742826ddc0baae256a65e Mon Sep 17 00:00:00 2001 From: Max Renaud Date: Wed, 22 May 2019 10:33:09 -0700 Subject: [PATCH 0859/1815] Omit power_failed and power_restored when the Apex provides invalid timestamps (#5896) --- plugins/inputs/neptune_apex/README.md | 4 +-- plugins/inputs/neptune_apex/neptune_apex.go | 22 +++++------- .../inputs/neptune_apex/neptune_apex_test.go | 34 +++++++++++++++++-- 3 files changed, 42 insertions(+), 18 deletions(-) diff --git a/plugins/inputs/neptune_apex/README.md b/plugins/inputs/neptune_apex/README.md index 5531d3fa9..61919a5c6 100644 --- a/plugins/inputs/neptune_apex/README.md +++ b/plugins/inputs/neptune_apex/README.md @@ -59,8 +59,8 @@ programming. These tags are clearly marked in the list below and should be consi - amp (float, Ampere) is the amount of current flowing through the 120V outlet. - watt (float, Watt) represents the amount of energy flowing through the 120V outlet. - xstatus (string) indicates the xstatus of an outlet. Found on wireless Vortech devices. - - power_failed (int64, Unix epoch in ns) when the controller last lost power. - - power_restored (int64, Unix epoch in ns) when the controller last powered on. + - power_failed (int64, Unix epoch in ns) when the controller last lost power. Omitted if the apex reports it as "none" + - power_restored (int64, Unix epoch in ns) when the controller last powered on. Omitted if the apex reports it as "none" - serial (string, serial number) - time: - The time used for the metric is parsed from the status.xml page. This helps when cross-referencing events with diff --git a/plugins/inputs/neptune_apex/neptune_apex.go b/plugins/inputs/neptune_apex/neptune_apex.go index 370407a41..8161ac7b4 100644 --- a/plugins/inputs/neptune_apex/neptune_apex.go +++ b/plugins/inputs/neptune_apex/neptune_apex.go @@ -110,27 +110,21 @@ func (n *NeptuneApex) parseXML(acc telegraf.Accumulator, data []byte) error { err, data) } + mainFields := map[string]interface{}{ + "serial": r.Serial, + } var reportTime time.Time - var powerFailed, powerRestored int64 + if reportTime, err = parseTime(r.Date, r.Timezone); err != nil { return err } - if val, err := parseTime(r.PowerFailed, r.Timezone); err != nil { - return err - } else { - powerFailed = val.UnixNano() + if val, err := parseTime(r.PowerFailed, r.Timezone); err == nil { + mainFields["power_failed"] = val.UnixNano() } - if val, err := parseTime(r.PowerRestored, r.Timezone); err != nil { - return err - } else { - powerRestored = val.UnixNano() + if val, err := parseTime(r.PowerRestored, r.Timezone); err == nil { + mainFields["power_restored"] = val.UnixNano() } - mainFields := map[string]interface{}{ - "serial": r.Serial, - "power_failed": powerFailed, - "power_restored": powerRestored, - } acc.AddFields(Measurement, mainFields, map[string]string{ "source": r.Hostname, diff --git a/plugins/inputs/neptune_apex/neptune_apex_test.go b/plugins/inputs/neptune_apex/neptune_apex_test.go index 4a3cc6458..cefa5fad1 100644 --- a/plugins/inputs/neptune_apex/neptune_apex_test.go +++ b/plugins/inputs/neptune_apex/neptune_apex_test.go @@ -226,7 +226,22 @@ func TestParseXML(t *testing.T) { `12/22/2018 21:55:37 -8.0a 12/22/2018 22:55:37`), - wantErr: true, + wantMetrics: []*testutil.Metric{ + { + Measurement: Measurement, + Time: goodTime, + Tags: map[string]string{ + "source": "", + "type": "controller", + "hardware": "", + "software": "", + }, + Fields: map[string]interface{}{ + "serial": "", + "power_restored": int64(1545548137000000000), + }, + }, + }, }, { name: "Power restored time failure", @@ -234,7 +249,22 @@ func TestParseXML(t *testing.T) { `12/22/2018 21:55:37 -8.0a 12/22/2018 22:55:37`), - wantErr: true, + wantMetrics: []*testutil.Metric{ + { + Measurement: Measurement, + Time: goodTime, + Tags: map[string]string{ + "source": "", + "type": "controller", + "hardware": "", + "software": "", + }, + Fields: map[string]interface{}{ + "serial": "", + "power_failed": int64(1545548137000000000), + }, + }, + }, }, { name: "Power failed failure", From 6ed2b64b6c1010530c2d5de4dfe395dd9edf8007 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 22 May 2019 10:35:54 -0700 Subject: [PATCH 0860/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8eb567487..67598335f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -65,6 +65,7 @@ - [#5835](https://github.com/influxdata/telegraf/issues/5835): Fix docker input does not parse image name correctly. - [#5661](https://github.com/influxdata/telegraf/issues/5661): Fix direct exchange routing key in amqp output. - [#5819](https://github.com/influxdata/telegraf/issues/5819): Fix scale set resource id with azure_monitor output. +- [#5883](https://github.com/influxdata/telegraf/issues/5883): Skip invalid power times in apex_neptune input. ## v1.10.4 [2019-05-14] From adc32002da4a7dcbcf9b0ac15f34922b10372fdc Mon Sep 17 00:00:00 2001 From: Arno den Uijl Date: Wed, 22 May 2019 22:13:19 +0200 Subject: [PATCH 0861/1815] Fix sqlserver connection closing on error (#5897) --- plugins/inputs/sqlserver/sqlserver.go | 6 ------ 1 file changed, 6 deletions(-) diff --git a/plugins/inputs/sqlserver/sqlserver.go b/plugins/inputs/sqlserver/sqlserver.go index dc57c87a4..8fae93d15 100644 --- a/plugins/inputs/sqlserver/sqlserver.go +++ b/plugins/inputs/sqlserver/sqlserver.go @@ -151,12 +151,6 @@ func (s *SQLServer) gatherServer(server string, query Query, acc telegraf.Accumu if err != nil { return err } - // verify that a connection can be made before making a query - err = conn.Ping() - if err != nil { - // Handle error - return err - } defer conn.Close() // execute query From 597814ee83db1bbd66aad4f52552da46f759e380 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 22 May 2019 13:36:13 -0700 Subject: [PATCH 0862/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 67598335f..767c3a226 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -66,6 +66,7 @@ - [#5661](https://github.com/influxdata/telegraf/issues/5661): Fix direct exchange routing key in amqp output. - [#5819](https://github.com/influxdata/telegraf/issues/5819): Fix scale set resource id with azure_monitor output. - [#5883](https://github.com/influxdata/telegraf/issues/5883): Skip invalid power times in apex_neptune input. +- [#3485](https://github.com/influxdata/telegraf/issues/3485): Fix sqlserver connection closing on error. ## v1.10.4 [2019-05-14] From adb7a52b93eb5acbe78ed1ee8cde6be7f922d747 Mon Sep 17 00:00:00 2001 From: Javier Kohen Date: Fri, 24 May 2019 14:24:14 -0400 Subject: [PATCH 0863/1815] Set user agent in stackdriver output (#5901) --- plugins/outputs/stackdriver/stackdriver.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/plugins/outputs/stackdriver/stackdriver.go b/plugins/outputs/stackdriver/stackdriver.go index 572cdb4c7..fbb946fbd 100644 --- a/plugins/outputs/stackdriver/stackdriver.go +++ b/plugins/outputs/stackdriver/stackdriver.go @@ -12,7 +12,9 @@ import ( monitoring "cloud.google.com/go/monitoring/apiv3" // Imports the Stackdriver Monitoring client package. googlepb "github.com/golang/protobuf/ptypes/timestamp" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/outputs" + "google.golang.org/api/option" metricpb "google.golang.org/genproto/googleapis/api/metric" monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres" monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" @@ -88,7 +90,7 @@ func (s *Stackdriver) Connect() error { if s.client == nil { ctx := context.Background() - client, err := monitoring.NewMetricClient(ctx) + client, err := monitoring.NewMetricClient(ctx, option.WithUserAgent(internal.ProductToken())) if err != nil { return err } From fd57bb17755c4e0e2b752daf5428ffeb0e20edd5 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 24 May 2019 11:25:34 -0700 Subject: [PATCH 0864/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 767c3a226..a2355eb6c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -50,6 +50,7 @@ - [#5855](https://github.com/influxdata/telegraf/pull/5855): Support tags in enum processor. - [#5830](https://github.com/influxdata/telegraf/pull/5830): Add support for gzip compression to amqp plugins. - [#5831](https://github.com/influxdata/telegraf/pull/5831): Support passive queue declaration in amqp_consumer. +- [#5901](https://github.com/influxdata/telegraf/pull/5901): Set user agent in stackdriver output. #### Bugfixes From 43c3ceec96253168028dac8b12628e2b5c7b749a Mon Sep 17 00:00:00 2001 From: dupondje Date: Sat, 25 May 2019 09:33:22 +0200 Subject: [PATCH 0865/1815] Fix setfacl command in postfix docs (#5875) --- plugins/inputs/postfix/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/inputs/postfix/README.md b/plugins/inputs/postfix/README.md index 3dab2b39d..57abd0f5b 100644 --- a/plugins/inputs/postfix/README.md +++ b/plugins/inputs/postfix/README.md @@ -29,7 +29,7 @@ $ sudo chmod g+r /var/spool/postfix/maildrop Posix ACL: ```sh -$ sudo setfacl -Rdm u:telegraf:rX /var/spool/postfix/{active,hold,incoming,deferred,maildrop} +$ sudo setfacl -Rdm g:telegraf:rX /var/spool/postfix/{active,hold,incoming,deferred,maildrop} ``` ### Measurements & Fields: From 77659f33bc0cfc1ca1b161b2e2a118c3a4fa671d Mon Sep 17 00:00:00 2001 From: Aaron Wood Date: Sun, 26 May 2019 19:02:09 -0700 Subject: [PATCH 0866/1815] Extend metrics collected from Nvidia GPUs (#5885) --- plugins/inputs/nvidia_smi/README.md | 9 +++++++++ plugins/inputs/nvidia_smi/nvidia_smi.go | 11 ++++++++++- plugins/inputs/nvidia_smi/nvidia_smi_test.go | 8 ++++---- 3 files changed, 23 insertions(+), 5 deletions(-) diff --git a/plugins/inputs/nvidia_smi/README.md b/plugins/inputs/nvidia_smi/README.md index b59f2ee6a..c3bac8da5 100644 --- a/plugins/inputs/nvidia_smi/README.md +++ b/plugins/inputs/nvidia_smi/README.md @@ -35,6 +35,15 @@ On Windows, `nvidia-smi` is generally located at `C:\Program Files\NVIDIA Corpor - `temperature_gpu` (integer, degrees C) - `utilization_gpu` (integer, percentage) - `utilization_memory` (integer, percentage) + - `pcie_link_gen_current` (integer) + - `pcie_link_width_current` (integer) + - `encoder_stats_session_count` (integer) + - `encoder_stats_average_fps` (integer) + - `encoder_stats_average_latency` (integer) + - `clocks_current_graphics` (integer, MHz) + - `clocks_current_sm` (integer, MHz) + - `clocks_current_memory` (integer, MHz) + - `clocks_current_video` (integer, MHz) ### Sample Query diff --git a/plugins/inputs/nvidia_smi/nvidia_smi.go b/plugins/inputs/nvidia_smi/nvidia_smi.go index ea708f24f..37dde689a 100644 --- a/plugins/inputs/nvidia_smi/nvidia_smi.go +++ b/plugins/inputs/nvidia_smi/nvidia_smi.go @@ -16,7 +16,7 @@ import ( var ( measurement = "nvidia_smi" - metrics = "fan.speed,memory.total,memory.used,memory.free,pstate,temperature.gpu,name,uuid,compute_mode,utilization.gpu,utilization.memory,index,power.draw" + metrics = "fan.speed,memory.total,memory.used,memory.free,pstate,temperature.gpu,name,uuid,compute_mode,utilization.gpu,utilization.memory,index,power.draw,pcie.link.gen.current,pcie.link.width.current,encoder.stats.sessionCount,encoder.stats.averageFps,encoder.stats.averageLatency,clocks.current.graphics,clocks.current.sm,clocks.current.memory,clocks.current.video" metricNames = [][]string{ {"fan_speed", "integer"}, {"memory_total", "integer"}, @@ -31,6 +31,15 @@ var ( {"utilization_memory", "integer"}, {"index", "tag"}, {"power_draw", "float"}, + {"pcie_link_gen_current", "integer"}, + {"pcie_link_width_current", "integer"}, + {"encoder_stats_session_count", "integer"}, + {"encoder_stats_average_fps", "integer"}, + {"encoder_stats_average_latency", "integer"}, + {"clocks_current_graphics", "integer"}, + {"clocks_current_sm", "integer"}, + {"clocks_current_memory", "integer"}, + {"clocks_current_video", "integer"}, } ) diff --git a/plugins/inputs/nvidia_smi/nvidia_smi_test.go b/plugins/inputs/nvidia_smi/nvidia_smi_test.go index 87785fe87..4e0cc8eac 100644 --- a/plugins/inputs/nvidia_smi/nvidia_smi_test.go +++ b/plugins/inputs/nvidia_smi/nvidia_smi_test.go @@ -7,15 +7,15 @@ import ( ) func TestParseLineStandard(t *testing.T) { - line := "85, 8114, 553, 7561, P2, 61, GeForce GTX 1070 Ti, GPU-d1911b8a-f5c8-5e66-057c-486561269de8, Default, 100, 93, 1, 0.0\n" + line := "41, 11264, 1074, 10190, P8, 32, GeForce RTX 2080 Ti, GPU-c97b7f88-c06d-650f-5339-f8dd0c1315c0, Default, 1, 4, 0, 24.33, 1, 16, 0, 0, 0, 300, 300, 405, 540\n" tags, fields, err := parseLine(line) if err != nil { t.Fail() } - if tags["name"] != "GeForce GTX 1070 Ti" { + if tags["name"] != "GeForce RTX 2080 Ti" { t.Fail() } - if temp, ok := fields["temperature_gpu"].(int); ok && temp == 61 { + if temp, ok := fields["temperature_gpu"].(int); ok && temp != 32 { t.Fail() } } @@ -37,7 +37,7 @@ func TestParseLineBad(t *testing.T) { } func TestParseLineNotSupported(t *testing.T) { - line := "[Not Supported], 7606, 0, 7606, P0, 38, Tesla P4, GPU-xxx, Default, 0, 0, 0, 0.0\n" + line := "[Not Supported], 11264, 1074, 10190, P8, 32, GeForce RTX 2080 Ti, GPU-c97b7f88-c06d-650f-5339-f8dd0c1315c0, Default, 1, 4, 0, 24.33, 1, 16, 0, 0, 0, 300, 300, 405, 540\n" _, fields, err := parseLine(line) require.NoError(t, err) require.Equal(t, nil, fields["fan_speed"]) From 91a82b1e73707391774f97f2514958fb336e7117 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Sun, 26 May 2019 19:04:07 -0700 Subject: [PATCH 0867/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index a2355eb6c..b1b3b5f12 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -51,6 +51,7 @@ - [#5830](https://github.com/influxdata/telegraf/pull/5830): Add support for gzip compression to amqp plugins. - [#5831](https://github.com/influxdata/telegraf/pull/5831): Support passive queue declaration in amqp_consumer. - [#5901](https://github.com/influxdata/telegraf/pull/5901): Set user agent in stackdriver output. +- [#5885](https://github.com/influxdata/telegraf/pull/5885): Extend metrics collected from Nvidia GPUs. #### Bugfixes From dfb83778ea2f4054fa7b94c3db37025253dedf38 Mon Sep 17 00:00:00 2001 From: Evan Baker Date: Sun, 26 May 2019 22:01:02 -0500 Subject: [PATCH 0868/1815] Add ecs/fargate input plugin (#5121) --- README.md | 1 + plugins/inputs/all/all.go | 1 + plugins/inputs/docker/docker.go | 6 +- plugins/inputs/docker/stats_helpers.go | 8 +- plugins/inputs/ecs/README.md | 64 ++ plugins/inputs/ecs/client.go | 124 ++++ plugins/inputs/ecs/client_test.go | 211 ++++++ plugins/inputs/ecs/ecs.go | 251 +++++++ plugins/inputs/ecs/ecs_test.go | 767 ++++++++++++++++++++ plugins/inputs/ecs/stats.go | 295 ++++++++ plugins/inputs/ecs/stats_test.go | 226 ++++++ plugins/inputs/ecs/testdata/metadata.golden | 78 ++ plugins/inputs/ecs/testdata/stats.golden | 663 +++++++++++++++++ plugins/inputs/ecs/types.go | 75 ++ plugins/inputs/ecs/types_test.go | 61 ++ 15 files changed, 2824 insertions(+), 7 deletions(-) create mode 100644 plugins/inputs/ecs/README.md create mode 100644 plugins/inputs/ecs/client.go create mode 100644 plugins/inputs/ecs/client_test.go create mode 100644 plugins/inputs/ecs/ecs.go create mode 100644 plugins/inputs/ecs/ecs_test.go create mode 100644 plugins/inputs/ecs/stats.go create mode 100644 plugins/inputs/ecs/stats_test.go create mode 100644 plugins/inputs/ecs/testdata/metadata.golden create mode 100644 plugins/inputs/ecs/testdata/stats.golden create mode 100644 plugins/inputs/ecs/types.go create mode 100644 plugins/inputs/ecs/types_test.go diff --git a/README.md b/README.md index 1bd96896b..6b3931f42 100644 --- a/README.md +++ b/README.md @@ -165,6 +165,7 @@ For documentation on the latest development code see the [documentation index][d * [dns query time](./plugins/inputs/dns_query) * [docker](./plugins/inputs/docker) * [dovecot](./plugins/inputs/dovecot) +* [ecs](./plugins/inputs/ecs) * [elasticsearch](./plugins/inputs/elasticsearch) * [exec](./plugins/inputs/exec) (generic executable plugin, support JSON, influx, graphite and nagios) * [fail2ban](./plugins/inputs/fail2ban) diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index 02002a4f0..47f977f32 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -31,6 +31,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/dns_query" _ "github.com/influxdata/telegraf/plugins/inputs/docker" _ "github.com/influxdata/telegraf/plugins/inputs/dovecot" + _ "github.com/influxdata/telegraf/plugins/inputs/ecs" _ "github.com/influxdata/telegraf/plugins/inputs/elasticsearch" _ "github.com/influxdata/telegraf/plugins/inputs/exec" _ "github.com/influxdata/telegraf/plugins/inputs/fail2ban" diff --git a/plugins/inputs/docker/docker.go b/plugins/inputs/docker/docker.go index 10759fc3e..117aabfb4 100644 --- a/plugins/inputs/docker/docker.go +++ b/plugins/inputs/docker/docker.go @@ -549,10 +549,10 @@ func parseContainerStats( memfields["limit"] = stat.MemoryStats.Limit memfields["max_usage"] = stat.MemoryStats.MaxUsage - mem := calculateMemUsageUnixNoCache(stat.MemoryStats) + mem := CalculateMemUsageUnixNoCache(stat.MemoryStats) memLimit := float64(stat.MemoryStats.Limit) memfields["usage"] = uint64(mem) - memfields["usage_percent"] = calculateMemPercentUnixNoCache(memLimit, mem) + memfields["usage_percent"] = CalculateMemPercentUnixNoCache(memLimit, mem) } else { memfields["commit_bytes"] = stat.MemoryStats.Commit memfields["commit_peak_bytes"] = stat.MemoryStats.CommitPeak @@ -575,7 +575,7 @@ func parseContainerStats( if daemonOSType != "windows" { previousCPU := stat.PreCPUStats.CPUUsage.TotalUsage previousSystem := stat.PreCPUStats.SystemUsage - cpuPercent := calculateCPUPercentUnix(previousCPU, previousSystem, stat) + cpuPercent := CalculateCPUPercentUnix(previousCPU, previousSystem, stat) cpufields["usage_percent"] = cpuPercent } else { cpuPercent := calculateCPUPercentWindows(stat) diff --git a/plugins/inputs/docker/stats_helpers.go b/plugins/inputs/docker/stats_helpers.go index b4c91e2fc..93ea2f219 100644 --- a/plugins/inputs/docker/stats_helpers.go +++ b/plugins/inputs/docker/stats_helpers.go @@ -4,7 +4,7 @@ package docker import "github.com/docker/docker/api/types" -func calculateCPUPercentUnix(previousCPU, previousSystem uint64, v *types.StatsJSON) float64 { +func CalculateCPUPercentUnix(previousCPU, previousSystem uint64, v *types.StatsJSON) float64 { var ( cpuPercent = 0.0 // calculate the change for the cpu usage of the container in between readings @@ -39,13 +39,13 @@ func calculateCPUPercentWindows(v *types.StatsJSON) float64 { return 0.00 } -// calculateMemUsageUnixNoCache calculate memory usage of the container. +// CalculateMemUsageUnixNoCache calculate memory usage of the container. // Page cache is intentionally excluded to avoid misinterpretation of the output. -func calculateMemUsageUnixNoCache(mem types.MemoryStats) float64 { +func CalculateMemUsageUnixNoCache(mem types.MemoryStats) float64 { return float64(mem.Usage - mem.Stats["cache"]) } -func calculateMemPercentUnixNoCache(limit float64, usedNoCache float64) float64 { +func CalculateMemPercentUnixNoCache(limit float64, usedNoCache float64) float64 { // MemoryStats.Limit will never be 0 unless the container is not running and we haven't // got any data from cgroup if limit != 0 { diff --git a/plugins/inputs/ecs/README.md b/plugins/inputs/ecs/README.md new file mode 100644 index 000000000..411322959 --- /dev/null +++ b/plugins/inputs/ecs/README.md @@ -0,0 +1,64 @@ +# ECS Input Plugin + +ECS, Fargate compatible, input plugin which uses the [ECS v2 metadata and stats API](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-metadata-endpoint-v2.html) +endpoints to gather stats on running containers in a Task. + +The telegraf container must be run in the same Task as the workload it is inspecting. + +This is similar to (and reuses a few pieces of) the [Docker](../docker/README.md) input plugin, with some ECS specific modifications for AWS metadata and stats formats. + + +### Configuration: + +```toml +# Read metrics about ECS containers +[[inputs.ecs]] + # endpoint_url = http:// + ## Containers to include and exclude. Globs accepted. + ## Note that an empty array for both will include all containers + container_name_include = [] + container_name_exclude = [] + + ## Container states to include and exclude. Globs accepted. + ## When empty only containers in the "running" state will be captured. + # container_status_include = [] + # container_status_exclude = [] + + ## ecs labels to include and exclude as tags. Globs accepted. + ## Note that an empty array for both will include all labels as tags + ecs_label_include = [ "com.amazonaws.ecs.*" ] + ecs_label_exclude = [] + + ## Timeout for docker list, info, and stats commands + timeout = "5s" +``` + +#### Environment Configuration + +The ECS client can optionally also be configured with the following env vars: + +``` +ECS_TIMEOUT +``` + + +### Example Output: + +``` +ecs_task_status,cluster=test,family=nginx,host=c4b301d4a123,revision=2,task_arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a revision="2",desired_status="RUNNING",known_status="RUNNING",limit_cpu=0.5,limit_mem=512 1542641488000000000 +ecs_container_mem,cluster=test,com.amazonaws.ecs.cluster=test,com.amazonaws.ecs.container-name=~internal~ecs~pause,com.amazonaws.ecs.task-arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a,com.amazonaws.ecs.task-definition-family=nginx,com.amazonaws.ecs.task-definition-version=2,family=nginx,host=c4b301d4a123,id=e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba,name=~internal~ecs~pause,revision=2,task_arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a active_anon=40960i,active_file=8192i,cache=790528i,pgpgin=1243i,total_pgfault=1298i,total_rss=40960i,limit=1033658368i,max_usage=4825088i,hierarchical_memory_limit=536870912i,rss=40960i,total_active_file=8192i,total_mapped_file=618496i,usage_percent=0.05349543109392212,container_id="e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba",pgfault=1298i,pgmajfault=6i,pgpgout=1040i,total_active_anon=40960i,total_inactive_file=782336i,total_pgpgin=1243i,usage=552960i,inactive_file=782336i,mapped_file=618496i,total_cache=790528i,total_pgpgout=1040i 1542642001000000000 +ecs_container_cpu,cluster=test,com.amazonaws.ecs.cluster=test,com.amazonaws.ecs.container-name=~internal~ecs~pause,com.amazonaws.ecs.task-arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a,com.amazonaws.ecs.task-definition-family=nginx,com.amazonaws.ecs.task-definition-version=2,cpu=cpu-total,family=nginx,host=c4b301d4a123,id=e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba,name=~internal~ecs~pause,revision=2,task_arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a usage_in_kernelmode=0i,throttling_throttled_periods=0i,throttling_periods=0i,throttling_throttled_time=0i,container_id="e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba",usage_percent=0,usage_total=26426156i,usage_in_usermode=20000000i,usage_system=2336100000000i 1542642001000000000 +ecs_container_cpu,cluster=test,com.amazonaws.ecs.cluster=test,com.amazonaws.ecs.container-name=~internal~ecs~pause,com.amazonaws.ecs.task-arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a,com.amazonaws.ecs.task-definition-family=nginx,com.amazonaws.ecs.task-definition-version=2,cpu=cpu0,family=nginx,host=c4b301d4a123,id=e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba,name=~internal~ecs~pause,revision=2,task_arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a container_id="e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba",usage_total=26426156i 1542642001000000000 +ecs_container_net,cluster=test,com.amazonaws.ecs.cluster=test,com.amazonaws.ecs.container-name=~internal~ecs~pause,com.amazonaws.ecs.task-arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a,com.amazonaws.ecs.task-definition-family=nginx,com.amazonaws.ecs.task-definition-version=2,family=nginx,host=c4b301d4a123,id=e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba,name=~internal~ecs~pause,network=eth0,revision=2,task_arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a rx_errors=0i,rx_packets=36i,tx_errors=0i,tx_bytes=648i,container_id="e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba",rx_dropped=0i,rx_bytes=5338i,tx_packets=8i,tx_dropped=0i 1542642001000000000 +ecs_container_net,cluster=test,com.amazonaws.ecs.cluster=test,com.amazonaws.ecs.container-name=~internal~ecs~pause,com.amazonaws.ecs.task-arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a,com.amazonaws.ecs.task-definition-family=nginx,com.amazonaws.ecs.task-definition-version=2,family=nginx,host=c4b301d4a123,id=e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba,name=~internal~ecs~pause,network=eth5,revision=2,task_arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a rx_errors=0i,tx_packets=9i,rx_packets=26i,tx_errors=0i,rx_bytes=4641i,tx_dropped=0i,tx_bytes=690i,container_id="e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba",rx_dropped=0i 1542642001000000000 +ecs_container_net,cluster=test,com.amazonaws.ecs.cluster=test,com.amazonaws.ecs.container-name=~internal~ecs~pause,com.amazonaws.ecs.task-arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a,com.amazonaws.ecs.task-definition-family=nginx,com.amazonaws.ecs.task-definition-version=2,family=nginx,host=c4b301d4a123,id=e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba,name=~internal~ecs~pause,network=total,revision=2,task_arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a rx_dropped=0i,rx_bytes=9979i,rx_errors=0i,rx_packets=62i,tx_bytes=1338i,container_id="e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba",tx_packets=17i,tx_dropped=0i,tx_errors=0i 1542642001000000000 +ecs_container_blkio,cluster=test,com.amazonaws.ecs.cluster=test,com.amazonaws.ecs.container-name=~internal~ecs~pause,com.amazonaws.ecs.task-arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a,com.amazonaws.ecs.task-definition-family=nginx,com.amazonaws.ecs.task-definition-version=2,device=253:1,family=nginx,host=c4b301d4a123,id=e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba,name=~internal~ecs~pause,revision=2,task_arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a io_service_bytes_recursive_sync=790528i,io_service_bytes_recursive_total=790528i,io_serviced_recursive_sync=10i,io_serviced_recursive_write=0i,io_serviced_recursive_async=0i,io_serviced_recursive_total=10i,container_id="e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba",io_service_bytes_recursive_read=790528i,io_service_bytes_recursive_write=0i,io_service_bytes_recursive_async=0i,io_serviced_recursive_read=10i 1542642001000000000 +ecs_container_blkio,cluster=test,com.amazonaws.ecs.cluster=test,com.amazonaws.ecs.container-name=~internal~ecs~pause,com.amazonaws.ecs.task-arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a,com.amazonaws.ecs.task-definition-family=nginx,com.amazonaws.ecs.task-definition-version=2,device=253:2,family=nginx,host=c4b301d4a123,id=e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba,name=~internal~ecs~pause,revision=2,task_arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a io_service_bytes_recursive_sync=790528i,io_service_bytes_recursive_total=790528i,io_serviced_recursive_async=0i,io_serviced_recursive_total=10i,container_id="e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba",io_service_bytes_recursive_read=790528i,io_service_bytes_recursive_write=0i,io_service_bytes_recursive_async=0i,io_serviced_recursive_read=10i,io_serviced_recursive_write=0i,io_serviced_recursive_sync=10i 1542642001000000000 +ecs_container_blkio,cluster=test,com.amazonaws.ecs.cluster=test,com.amazonaws.ecs.container-name=~internal~ecs~pause,com.amazonaws.ecs.task-arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a,com.amazonaws.ecs.task-definition-family=nginx,com.amazonaws.ecs.task-definition-version=2,device=253:4,family=nginx,host=c4b301d4a123,id=e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba,name=~internal~ecs~pause,revision=2,task_arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a io_service_bytes_recursive_write=0i,io_service_bytes_recursive_sync=790528i,io_service_bytes_recursive_async=0i,io_service_bytes_recursive_total=790528i,io_serviced_recursive_async=0i,container_id="e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba",io_service_bytes_recursive_read=790528i,io_serviced_recursive_read=10i,io_serviced_recursive_write=0i,io_serviced_recursive_sync=10i,io_serviced_recursive_total=10i 1542642001000000000 +ecs_container_blkio,cluster=test,com.amazonaws.ecs.cluster=test,com.amazonaws.ecs.container-name=~internal~ecs~pause,com.amazonaws.ecs.task-arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a,com.amazonaws.ecs.task-definition-family=nginx,com.amazonaws.ecs.task-definition-version=2,device=202:26368,family=nginx,host=c4b301d4a123,id=e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba,name=~internal~ecs~pause,revision=2,task_arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a io_serviced_recursive_read=10i,io_serviced_recursive_write=0i,io_serviced_recursive_sync=10i,io_serviced_recursive_async=0i,io_serviced_recursive_total=10i,container_id="e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba",io_service_bytes_recursive_sync=790528i,io_service_bytes_recursive_total=790528i,io_service_bytes_recursive_async=0i,io_service_bytes_recursive_read=790528i,io_service_bytes_recursive_write=0i 1542642001000000000 +ecs_container_blkio,cluster=test,com.amazonaws.ecs.cluster=test,com.amazonaws.ecs.container-name=~internal~ecs~pause,com.amazonaws.ecs.task-arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a,com.amazonaws.ecs.task-definition-family=nginx,com.amazonaws.ecs.task-definition-version=2,device=total,family=nginx,host=c4b301d4a123,id=e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba,name=~internal~ecs~pause,revision=2,task_arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a io_serviced_recursive_async=0i,io_serviced_recursive_read=40i,io_serviced_recursive_sync=40i,io_serviced_recursive_write=0i,io_serviced_recursive_total=40i,io_service_bytes_recursive_read=3162112i,io_service_bytes_recursive_write=0i,io_service_bytes_recursive_async=0i,container_id="e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba",io_service_bytes_recursive_sync=3162112i,io_service_bytes_recursive_total=3162112i 1542642001000000000 +ecs_container_meta,cluster=test,com.amazonaws.ecs.cluster=test,com.amazonaws.ecs.container-name=~internal~ecs~pause,com.amazonaws.ecs.task-arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a,com.amazonaws.ecs.task-definition-family=nginx,com.amazonaws.ecs.task-definition-version=2,family=nginx,host=c4b301d4a123,id=e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba,name=~internal~ecs~pause,revision=2,task_arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a limit_mem=0,type="CNI_PAUSE",container_id="e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba",docker_name="ecs-nginx-2-internalecspause",limit_cpu=0,known_status="RESOURCES_PROVISIONED",image="amazon/amazon-ecs-pause:0.1.0",image_id="",desired_status="RESOURCES_PROVISIONED" 1542642001000000000 +``` + +### Notes: +- the amazon-ecs-agent (though it _is_ a container running on the host) is not present in the metadata/stats endpoints. \ No newline at end of file diff --git a/plugins/inputs/ecs/client.go b/plugins/inputs/ecs/client.go new file mode 100644 index 000000000..eba0b0856 --- /dev/null +++ b/plugins/inputs/ecs/client.go @@ -0,0 +1,124 @@ +package ecs + +import ( + "log" + "net/http" + "net/url" + "os" + "time" + + "github.com/docker/docker/api/types" +) + +var ( + ecsMetadataPath, _ = url.Parse("/v2/metadata") + ecsMetaStatsPath, _ = url.Parse("/v2/stats") +) + +// Client is the ECS client contract +type Client interface { + Task() (*Task, error) + ContainerStats() (map[string]types.StatsJSON, error) +} + +type httpClient interface { + Do(req *http.Request) (*http.Response, error) +} + +// NewEnvClient configures a new Client from the env +func NewEnvClient() (*EcsClient, error) { + timeout := 5 * time.Second + if t := os.Getenv("ECS_TIMEOUT"); t != "" { + if d, err := time.ParseDuration(t); err == nil { + timeout = d + } + } + + return NewClient( + timeout, + ) +} + +// NewClient constructs an ECS client with the passed configuration params +func NewClient(timeout time.Duration) (*EcsClient, error) { + c := &http.Client{ + Timeout: timeout, + } + + return &EcsClient{ + client: c, + }, nil +} + +// EcsClient contains ECS connection config +type EcsClient struct { + client httpClient + BaseURL *url.URL + taskURL string + statsURL string +} + +// Task calls the ECS metadata endpoint and returns a populated Task +func (c *EcsClient) Task() (*Task, error) { + if c.taskURL == "" { + c.taskURL = c.BaseURL.ResolveReference(ecsMetadataPath).String() + } + + req, _ := http.NewRequest("GET", c.taskURL, nil) + resp, err := c.client.Do(req) + + if err != nil { + log.Println("failed to GET metadata endpoint", err) + return nil, err + } + + task, err := unmarshalTask(resp.Body) + if err != nil { + log.Println("failed to decode response from metadata endpoint", err) + return nil, err + } + + return task, nil +} + +// ContainerStats calls the ECS stats endpoint and returns a populated container stats map +func (c *EcsClient) ContainerStats() (map[string]types.StatsJSON, error) { + if c.statsURL == "" { + c.statsURL = c.BaseURL.ResolveReference(ecsMetaStatsPath).String() + } + + req, _ := http.NewRequest("GET", c.statsURL, nil) + resp, err := c.client.Do(req) + + if err != nil { + log.Println("failed to GET stats endpoint", err) + return map[string]types.StatsJSON{}, err + } + + statsMap, err := unmarshalStats(resp.Body) + if err != nil { + log.Println("failed to decode response from stats endpoint") + return map[string]types.StatsJSON{}, err + } + + return statsMap, nil +} + +// PollSync executes Task and ContainerStats in parallel. If both succeed, both structs are returned. +// If either errors, a single error is returned. +func PollSync(c Client) (*Task, map[string]types.StatsJSON, error) { + + var task *Task + var stats map[string]types.StatsJSON + var err error + + if stats, err = c.ContainerStats(); err != nil { + return nil, nil, err + } + + if task, err = c.Task(); err != nil { + return nil, nil, err + } + + return task, stats, nil +} diff --git a/plugins/inputs/ecs/client_test.go b/plugins/inputs/ecs/client_test.go new file mode 100644 index 000000000..d6fbd1165 --- /dev/null +++ b/plugins/inputs/ecs/client_test.go @@ -0,0 +1,211 @@ +package ecs + +import ( + "bytes" + "errors" + "io/ioutil" + "net/http" + "os" + "testing" + + "github.com/docker/docker/api/types" + "github.com/stretchr/testify/assert" +) + +type pollMock struct { + task func() (*Task, error) + stats func() (map[string]types.StatsJSON, error) +} + +func (p *pollMock) Task() (*Task, error) { + return p.task() +} + +func (p *pollMock) ContainerStats() (map[string]types.StatsJSON, error) { + return p.stats() +} + +func TestEcsClient_PollSync(t *testing.T) { + + tests := []struct { + name string + mock *pollMock + want *Task + want1 map[string]types.StatsJSON + wantErr bool + }{ + { + name: "success", + mock: &pollMock{ + task: func() (*Task, error) { + return &validMeta, nil + }, + stats: func() (map[string]types.StatsJSON, error) { + return validStats, nil + }, + }, + want: &validMeta, + want1: validStats, + }, + { + name: "task err", + mock: &pollMock{ + task: func() (*Task, error) { + return nil, errors.New("err") + }, + stats: func() (map[string]types.StatsJSON, error) { + return validStats, nil + }, + }, + wantErr: true, + }, + { + name: "stats err", + mock: &pollMock{ + task: func() (*Task, error) { + return &validMeta, nil + }, + stats: func() (map[string]types.StatsJSON, error) { + return nil, errors.New("err") + }, + }, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, got1, err := PollSync(tt.mock) + + if (err != nil) != tt.wantErr { + t.Errorf("EcsClient.PollSync() error = %v, wantErr %v", err, tt.wantErr) + return + } + assert.Equal(t, tt.want, got, "EcsClient.PollSync() got = %v, want %v", got, tt.want) + assert.Equal(t, tt.want1, got1, "EcsClient.PollSync() got1 = %v, want %v", got1, tt.want1) + }) + } +} + +type mockDo struct { + do func(req *http.Request) (*http.Response, error) +} + +func (m mockDo) Do(req *http.Request) (*http.Response, error) { + return m.do(req) +} + +func TestEcsClient_Task(t *testing.T) { + rc, _ := os.Open("testdata/metadata.golden") + tests := []struct { + name string + client httpClient + want *Task + wantErr bool + }{ + { + name: "happy", + client: mockDo{ + do: func(req *http.Request) (*http.Response, error) { + return &http.Response{ + Body: ioutil.NopCloser(rc), + }, nil + }, + }, + want: &validMeta, + }, + { + name: "do err", + client: mockDo{ + do: func(req *http.Request) (*http.Response, error) { + return nil, errors.New("err") + }, + }, + wantErr: true, + }, + { + name: "malformed resp", + client: mockDo{ + do: func(req *http.Request) (*http.Response, error) { + return &http.Response{ + Body: ioutil.NopCloser(bytes.NewReader([]byte("foo"))), + }, nil + }, + }, + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := &EcsClient{ + client: tt.client, + taskURL: "abc", + } + got, err := c.Task() + if (err != nil) != tt.wantErr { + t.Errorf("EcsClient.Task() error = %v, wantErr %v", err, tt.wantErr) + return + } + assert.Equal(t, tt.want, got, "EcsClient.Task() = %v, want %v", got, tt.want) + }) + } +} + +func TestEcsClient_ContainerStats(t *testing.T) { + rc, _ := os.Open("testdata/stats.golden") + tests := []struct { + name string + client httpClient + want map[string]types.StatsJSON + wantErr bool + }{ + { + name: "happy", + client: mockDo{ + do: func(req *http.Request) (*http.Response, error) { + return &http.Response{ + Body: ioutil.NopCloser(rc), + }, nil + }, + }, + want: validStats, + }, + { + name: "do err", + client: mockDo{ + do: func(req *http.Request) (*http.Response, error) { + return nil, errors.New("err") + }, + }, + want: map[string]types.StatsJSON{}, + wantErr: true, + }, + { + name: "malformed resp", + client: mockDo{ + do: func(req *http.Request) (*http.Response, error) { + return &http.Response{ + Body: ioutil.NopCloser(bytes.NewReader([]byte("foo"))), + }, nil + }, + }, + want: map[string]types.StatsJSON{}, + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := &EcsClient{ + client: tt.client, + statsURL: "abc", + } + got, err := c.ContainerStats() + if (err != nil) != tt.wantErr { + t.Errorf("EcsClient.ContainerStats() error = %v, wantErr %v", err, tt.wantErr) + return + } + assert.Equal(t, tt.want, got, "EcsClient.ContainerStats() = %v, want %v", got, tt.want) + }) + } +} diff --git a/plugins/inputs/ecs/ecs.go b/plugins/inputs/ecs/ecs.go new file mode 100644 index 000000000..36a51229a --- /dev/null +++ b/plugins/inputs/ecs/ecs.go @@ -0,0 +1,251 @@ +package ecs + +import ( + "log" + "net/url" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/filter" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/inputs" +) + +// Ecs config object +type Ecs struct { + EndpointURL string `toml:"endpoint_url"` + EnvCfg bool `toml:"envcfg"` + Timeout internal.Duration + + ContainerNameInclude []string `toml:"container_name_include"` + ContainerNameExclude []string `toml:"container_name_exclude"` + + ContainerStatusInclude []string `toml:"container_status_include"` + ContainerStatusExclude []string `toml:"container_status_exclude"` + + LabelInclude []string `toml:"ecs_label_include"` + LabelExclude []string `toml:"ecs_label_exclude"` + + newEnvClient func() (*EcsClient, error) + newClient func(timeout time.Duration) (*EcsClient, error) + + client Client + filtersCreated bool + labelFilter filter.Filter + containerNameFilter filter.Filter + statusFilter filter.Filter +} + +const ( + KB = 1000 + MB = 1000 * KB + GB = 1000 * MB + TB = 1000 * GB + PB = 1000 * TB +) + +var sampleConfig = ` + ## ECS metadata url + # endpoint_url = "http://169.254.170.2" + + ## Set to true to configure from env vars + envcfg = false + + ## Containers to include and exclude. Globs accepted. + ## Note that an empty array for both will include all containers + container_name_include = [] + container_name_exclude = [] + + ## Container states to include and exclude. Globs accepted. + ## When empty only containers in the "running" state will be captured. + # container_status_include = [] + # container_status_exclude = [] + + ## ecs labels to include and exclude as tags. Globs accepted. + ## Note that an empty array for both will include all labels as tags + ecs_label_include = [ "com.amazonaws.ecs.*" ] + ecs_label_exclude = [] + + ## Timeout for docker list, info, and stats commands + timeout = "5s" +` + +// Description describes ECS plugin +func (ecs *Ecs) Description() string { + return "Read metrics about docker containers from Fargate/ECS v2 meta endpoints." +} + +// SampleConfig returns the ECS example config +func (ecs *Ecs) SampleConfig() string { + return sampleConfig +} + +// Gather is the entrypoint for telegraf metrics collection +func (ecs *Ecs) Gather(acc telegraf.Accumulator) error { + err := initSetup(ecs) + if err != nil { + return err + } + + task, stats, err := PollSync(ecs.client) + if err != nil { + return err + } + + mergeTaskStats(task, stats) + + taskTags := map[string]string{ + "cluster": task.Cluster, + "task_arn": task.TaskARN, + "family": task.Family, + "revision": task.Revision, + } + + // accumulate metrics + ecs.accTask(task, taskTags, acc) + ecs.accContainers(task, taskTags, acc) + + return nil +} + +func initSetup(ecs *Ecs) error { + if ecs.client == nil { + var c *EcsClient + var err error + if ecs.EnvCfg { + c, err = ecs.newEnvClient() + } else { + c, err = ecs.newClient(ecs.Timeout.Duration) + } + if err != nil { + return err + } + + c.BaseURL, err = url.Parse(ecs.EndpointURL) + if err != nil { + return err + } + + ecs.client = c + } + + // Create filters + if !ecs.filtersCreated { + err := ecs.createContainerNameFilters() + if err != nil { + return err + } + err = ecs.createContainerStatusFilters() + if err != nil { + return err + } + err = ecs.createLabelFilters() + if err != nil { + return err + } + ecs.filtersCreated = true + } + + return nil +} + +func (ecs *Ecs) accTask(task *Task, tags map[string]string, acc telegraf.Accumulator) { + taskFields := map[string]interface{}{ + "revision": task.Revision, + "desired_status": task.DesiredStatus, + "known_status": task.KnownStatus, + "limit_cpu": task.Limits["CPU"], + "limit_mem": task.Limits["Memory"], + } + + acc.AddFields("ecs_task", taskFields, tags, task.PullStoppedAt) +} + +func (ecs *Ecs) accContainers(task *Task, taskTags map[string]string, acc telegraf.Accumulator) { + for _, c := range task.Containers { + if !ecs.containerNameFilter.Match(c.Name) { + log.Printf("container %v did not match name filter", c.ID) + continue + } + + if !ecs.statusFilter.Match(c.KnownStatus) { + log.Printf("container %v did not match status filter", c.ID) + continue + } + + // add matching ECS container Labels + containerTags := map[string]string{ + "id": c.ID, + "name": c.Name, + } + for k, v := range c.Labels { + if ecs.labelFilter.Match(k) { + containerTags[k] = v + } + } + tags := mergeTags(taskTags, containerTags) + + parseContainerStats(c, acc, tags) + } +} + +// returns a new map with the same content values as the input map +func copyTags(in map[string]string) map[string]string { + out := make(map[string]string) + for k, v := range in { + out[k] = v + } + return out +} + +// returns a new map with the merged content values of the two input maps +func mergeTags(a map[string]string, b map[string]string) map[string]string { + c := copyTags(a) + for k, v := range b { + c[k] = v + } + return c +} + +func (ecs *Ecs) createContainerNameFilters() error { + filter, err := filter.NewIncludeExcludeFilter(ecs.ContainerNameInclude, ecs.ContainerNameExclude) + if err != nil { + return err + } + ecs.containerNameFilter = filter + return nil +} + +func (ecs *Ecs) createLabelFilters() error { + filter, err := filter.NewIncludeExcludeFilter(ecs.LabelInclude, ecs.LabelExclude) + if err != nil { + return err + } + ecs.labelFilter = filter + return nil +} + +func (ecs *Ecs) createContainerStatusFilters() error { + if len(ecs.ContainerStatusInclude) == 0 && len(ecs.ContainerStatusExclude) == 0 { + ecs.ContainerStatusInclude = []string{"running"} + } + filter, err := filter.NewIncludeExcludeFilter(ecs.ContainerStatusInclude, ecs.ContainerStatusExclude) + if err != nil { + return err + } + ecs.statusFilter = filter + return nil +} + +func init() { + inputs.Add("ecs", func() telegraf.Input { + return &Ecs{ + EndpointURL: "http://169.254.170.2", + Timeout: internal.Duration{Duration: 5 * time.Second}, + EnvCfg: true, + newEnvClient: NewEnvClient, + newClient: NewClient, + filtersCreated: false, + } + }) +} diff --git a/plugins/inputs/ecs/ecs_test.go b/plugins/inputs/ecs/ecs_test.go new file mode 100644 index 000000000..b105a433f --- /dev/null +++ b/plugins/inputs/ecs/ecs_test.go @@ -0,0 +1,767 @@ +package ecs + +import ( + "time" + + "github.com/docker/docker/api/types" +) + +// codified golden objects for tests + +// stats +const pauseStatsKey = "e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba" +const nginxStatsKey = "fffe894e232d46c76475cfeabf4907f712e8b92618a37fca3ef0805bbbfb0299" + +var pauseStatsRead, _ = time.Parse(time.RFC3339Nano, "2018-11-19T15:40:00.936081344Z") +var pauseStatsPreRead, _ = time.Parse(time.RFC3339Nano, "2018-11-19T15:39:59.933000984Z") + +var nginxStatsRead, _ = time.Parse(time.RFC3339Nano, "2018-11-19T15:40:00.93733207Z") +var nginxStatsPreRead, _ = time.Parse(time.RFC3339Nano, "2018-11-19T15:39:59.934291009Z") + +var validStats = map[string]types.StatsJSON{ + pauseStatsKey: { + Stats: types.Stats{ + Read: pauseStatsRead, + PreRead: pauseStatsPreRead, + BlkioStats: types.BlkioStats{ + IoServiceBytesRecursive: []types.BlkioStatEntry{ + { + Major: 202, + Minor: 26368, + Op: "Read", + Value: 790528, + }, + { + Major: 202, + Minor: 26368, + Op: "Write", + }, + { + Major: 202, + Minor: 26368, + Op: "Sync", + Value: 790528, + }, + { + Major: 202, + Minor: 26368, + Op: "Async", + }, + { + Major: 202, + Minor: 26368, + Op: "Total", + Value: 790528, + }, + { + Major: 253, + Minor: 1, + Op: "Read", + Value: 790528, + }, + { + Major: 253, + Minor: 1, + Op: "Write", + }, + { + Major: 253, + Minor: 1, + Op: "Sync", + Value: 790528, + }, + { + Major: 253, + Minor: 1, + Op: "Async", + }, + { + Major: 253, + Minor: 1, + Op: "Total", + Value: 790528, + }, + { + Major: 253, + Minor: 2, + Op: "Read", + Value: 790528, + }, + { + Major: 253, + Minor: 2, + Op: "Write", + }, + { + Major: 253, + Minor: 2, + Op: "Sync", + Value: 790528, + }, + { + Major: 253, + Minor: 2, + Op: "Async", + }, + { + Major: 253, + Minor: 2, + Op: "Total", + Value: 790528, + }, + { + Major: 253, + Minor: 4, + Op: "Read", + Value: 790528, + }, + { + Major: 253, + Minor: 4, + Op: "Write", + }, + { + Major: 253, + Minor: 4, + Op: "Sync", + Value: 790528, + }, + { + Major: 253, + Minor: 4, + Op: "Async", + }, + { + Major: 253, + Minor: 4, + Op: "Total", + Value: 790528, + }, + }, + IoServicedRecursive: []types.BlkioStatEntry{ + { + Major: 202, + Minor: 26368, + Op: "Read", + Value: 10, + }, + { + Major: 202, + Minor: 26368, + Op: "Write", + }, + { + Major: 202, + Minor: 26368, + Op: "Sync", + Value: 10, + }, + { + Major: 202, + Minor: 26368, + Op: "Async", + }, + { + Major: 202, + Minor: 26368, + Op: "Total", + Value: 10, + }, + { + Major: 253, + Minor: 1, + Op: "Read", + Value: 10, + }, + { + Major: 253, + Minor: 1, + Op: "Write", + }, + { + Major: 253, + Minor: 1, + Op: "Sync", + Value: 10, + }, + { + Major: 253, + Minor: 1, + Op: "Async", + }, + { + Major: 253, + Minor: 1, + Op: "Total", + Value: 10, + }, + { + Major: 253, + Minor: 2, + Op: "Read", + Value: 10, + }, + { + Major: 253, + Minor: 2, + Op: "Write", + }, + { + Major: 253, + Minor: 2, + Op: "Sync", + Value: 10, + }, + { + Major: 253, + Minor: 2, + Op: "Async", + }, + { + Major: 253, + Minor: 2, + Op: "Total", + Value: 10, + }, + { + Major: 253, + Minor: 4, + Op: "Read", + Value: 10, + }, + { + Major: 253, + Minor: 4, + Op: "Write", + }, + { + Major: 253, + Minor: 4, + Op: "Sync", + Value: 10, + }, + { + Major: 253, + Minor: 4, + Op: "Async", + }, + { + Major: 253, + Minor: 4, + Op: "Total", + Value: 10, + }, + }, + }, + CPUStats: types.CPUStats{ + CPUUsage: types.CPUUsage{ + PercpuUsage: []uint64{ + 26426156, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + }, + UsageInUsermode: 20000000, + TotalUsage: 26426156, + }, + SystemUsage: 2336100000000, + OnlineCPUs: 1, + ThrottlingData: types.ThrottlingData{}, + }, + PreCPUStats: types.CPUStats{ + CPUUsage: types.CPUUsage{ + PercpuUsage: []uint64{ + 26426156, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + }, + UsageInUsermode: 20000000, + TotalUsage: 26426156, + }, + SystemUsage: 2335090000000, + OnlineCPUs: 1, + ThrottlingData: types.ThrottlingData{}, + }, + MemoryStats: types.MemoryStats{ + Stats: map[string]uint64{ + "cache": 790528, + "mapped_file": 618496, + "total_inactive_file": 782336, + "pgpgout": 1040, + "rss": 40960, + "total_mapped_file": 618496, + "pgpgin": 1243, + "pgmajfault": 6, + "total_rss": 40960, + "hierarchical_memory_limit": 536870912, + "total_pgfault": 1298, + "total_active_file": 8192, + "active_anon": 40960, + "total_active_anon": 40960, + "total_pgpgout": 1040, + "total_cache": 790528, + "active_file": 8192, + "pgfault": 1298, + "inactive_file": 782336, + "total_pgpgin": 1243, + "hierarchical_memsw_limit": 9223372036854772000, + }, + MaxUsage: 4825088, + Usage: 1343488, + Limit: 1033658368, + }, + }, + Networks: map[string]types.NetworkStats{ + "eth0": { + RxBytes: uint64(5338), + RxDropped: uint64(0), + RxErrors: uint64(0), + RxPackets: uint64(36), + TxBytes: uint64(648), + TxDropped: uint64(0), + TxErrors: uint64(0), + TxPackets: uint64(8), + }, + "eth5": { + RxBytes: uint64(4641), + RxDropped: uint64(0), + RxErrors: uint64(0), + RxPackets: uint64(26), + TxBytes: uint64(690), + TxDropped: uint64(0), + TxErrors: uint64(0), + TxPackets: uint64(9), + }, + }, + }, + nginxStatsKey: { + Stats: types.Stats{ + Read: nginxStatsRead, + PreRead: nginxStatsPreRead, + BlkioStats: types.BlkioStats{ + IoServiceBytesRecursive: []types.BlkioStatEntry{ + { + Major: 202, + Minor: 26368, + Op: "Read", + Value: 5730304, + }, + { + Major: 202, + Minor: 26368, + Op: "Write", + }, + { + Major: 202, + Minor: 26368, + Op: "Sync", + Value: 5730304, + }, + { + Major: 202, + Minor: 26368, + Op: "Async", + }, + { + Major: 202, + Minor: 26368, + Op: "Total", + Value: 5730304, + }, + { + Major: 253, + Minor: 1, + Op: "Read", + Value: 5730304, + }, + { + Major: 253, + Minor: 1, + Op: "Write", + }, + { + Major: 253, + Minor: 1, + Op: "Sync", + Value: 5730304, + }, + { + Major: 253, + Minor: 1, + Op: "Async", + }, + { + Major: 253, + Minor: 1, + Op: "Total", + Value: 5730304, + }, + { + Major: 253, + Minor: 2, + Op: "Read", + Value: 5730304, + }, + { + Major: 253, + Minor: 2, + Op: "Write", + }, + { + Major: 253, + Minor: 2, + Op: "Sync", + Value: 5730304, + }, + { + Major: 253, + Minor: 2, + Op: "Async", + }, + { + Major: 253, + Minor: 2, + Op: "Total", + Value: 5730304, + }, + { + Major: 253, + Minor: 5, + Op: "Read", + Value: 5730304, + }, + { + Major: 253, + Minor: 5, + Op: "Write", + }, + { + Major: 253, + Minor: 5, + Op: "Sync", + Value: 5730304, + }, + { + Major: 253, + Minor: 5, + Op: "Async", + }, + { + Major: 253, + Minor: 5, + Op: "Total", + Value: 5730304, + }, + }, + IoServicedRecursive: []types.BlkioStatEntry{ + { + Major: 202, + Minor: 26368, + Op: "Read", + Value: 156, + }, + { + Major: 202, + Minor: 26368, + Op: "Write", + }, + { + Major: 202, + Minor: 26368, + Op: "Sync", + Value: 156, + }, + { + Major: 202, + Minor: 26368, + Op: "Async", + }, + { + Major: 202, + Minor: 26368, + Op: "Total", + Value: 156, + }, + { + Major: 253, + Minor: 1, + Op: "Read", + Value: 156, + }, + { + Major: 253, + Minor: 1, + Op: "Write", + }, + { + Major: 253, + Minor: 1, + Op: "Sync", + Value: 156, + }, + { + Major: 253, + Minor: 1, + Op: "Async", + }, + { + Major: 253, + Minor: 1, + Op: "Total", + Value: 156, + }, + { + Major: 253, + Minor: 2, + Op: "Read", + Value: 156, + }, + { + Major: 253, + Minor: 2, + Op: "Write", + }, + { + Major: 253, + Minor: 2, + Op: "Sync", + Value: 156, + }, + { + Major: 253, + Minor: 2, + Op: "Async", + }, + { + Major: 253, + Minor: 2, + Op: "Total", + Value: 156, + }, + { + Major: 253, + Minor: 5, + Op: "Read", + Value: 147, + }, + { + Major: 253, + Minor: 5, + Op: "Write", + }, + { + Major: 253, + Minor: 5, + Op: "Sync", + Value: 147, + }, + { + Major: 253, + Minor: 5, + Op: "Async", + }, + { + Major: 253, + Minor: 5, + Op: "Total", + Value: 147, + }, + }, + }, + CPUStats: types.CPUStats{ + CPUUsage: types.CPUUsage{ + PercpuUsage: []uint64{ + 65599511, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + }, + UsageInUsermode: 40000000, + TotalUsage: 65599511, + UsageInKernelmode: 10000000, + }, + SystemUsage: 2336100000000, + OnlineCPUs: 1, + ThrottlingData: types.ThrottlingData{}, + }, + PreCPUStats: types.CPUStats{ + CPUUsage: types.CPUUsage{ + PercpuUsage: []uint64{ + 65599511, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + }, + UsageInUsermode: 40000000, + TotalUsage: 65599511, + UsageInKernelmode: 10000000, + }, + SystemUsage: 2335090000000, + OnlineCPUs: 1, + ThrottlingData: types.ThrottlingData{}, + }, + MemoryStats: types.MemoryStats{ + Stats: map[string]uint64{ + "cache": 5787648, + "mapped_file": 3616768, + "total_inactive_file": 4321280, + "pgpgout": 1674, + "rss": 1597440, + "total_mapped_file": 3616768, + "pgpgin": 3477, + "pgmajfault": 40, + "total_rss": 1597440, + "total_inactive_anon": 4096, + "hierarchical_memory_limit": 536870912, + "total_pgfault": 2924, + "total_active_file": 1462272, + "active_anon": 1597440, + "total_active_anon": 1597440, + "total_pgpgout": 1674, + "total_cache": 5787648, + "inactive_anon": 4096, + "active_file": 1462272, + "pgfault": 2924, + "inactive_file": 4321280, + "total_pgpgin": 3477, + "hierarchical_memsw_limit": 9223372036854772000, + }, + MaxUsage: 8667136, + Usage: 8179712, + Limit: 1033658368, + }, + }, + }, +} + +// meta +var metaPauseCreated, _ = time.Parse(time.RFC3339Nano, "2018-11-19T15:31:26.641964373Z") +var metaPauseStarted, _ = time.Parse(time.RFC3339Nano, "2018-11-19T15:31:27.035698679Z") +var metaCreated, _ = time.Parse(time.RFC3339Nano, "2018-11-19T15:31:27.614884084Z") +var metaStarted, _ = time.Parse(time.RFC3339Nano, "2018-11-19T15:31:27.975996351Z") +var metaPullStart, _ = time.Parse(time.RFC3339Nano, "2018-11-19T15:31:27.197327103Z") +var metaPullStop, _ = time.Parse(time.RFC3339Nano, "2018-11-19T15:31:27.609089471Z") + +var validMeta = Task{ + Cluster: "test", + TaskARN: "arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a", + Family: "nginx", + Revision: "2", + DesiredStatus: "RUNNING", + KnownStatus: "RUNNING", + Containers: []Container{ + { + ID: pauseStatsKey, + Name: "~internal~ecs~pause", + DockerName: "ecs-nginx-2-internalecspause", + Image: "amazon/amazon-ecs-pause:0.1.0", + ImageID: "", + Labels: map[string]string{ + "com.amazonaws.ecs.cluster": "test", + "com.amazonaws.ecs.container-name": "~internal~ecs~pause", + "com.amazonaws.ecs.task-arn": "arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a", + "com.amazonaws.ecs.task-definition-family": "nginx", + "com.amazonaws.ecs.task-definition-version": "2", + }, + DesiredStatus: "RESOURCES_PROVISIONED", + KnownStatus: "RESOURCES_PROVISIONED", + Limits: map[string]float64{ + "CPU": 0, + "Memory": 0, + }, + CreatedAt: metaPauseCreated, + StartedAt: metaPauseStarted, + Type: "CNI_PAUSE", + Networks: []Network{ + { + NetworkMode: "awsvpc", + IPv4Addresses: []string{ + "172.31.25.181", + }, + }, + }, + }, + { + ID: nginxStatsKey, + Name: "nginx", + DockerName: "ecs-nginx-2-nginx", + Image: "nginx:alpine", + ImageID: "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + Labels: map[string]string{ + "com.amazonaws.ecs.cluster": "test", + "com.amazonaws.ecs.container-name": "nginx", + "com.amazonaws.ecs.task-arn": "arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a", + "com.amazonaws.ecs.task-definition-family": "nginx", + "com.amazonaws.ecs.task-definition-version": "2", + }, + DesiredStatus: "RUNNING", + KnownStatus: "RUNNING", + Limits: map[string]float64{ + "CPU": 0, + "Memory": 0, + }, + CreatedAt: metaCreated, + StartedAt: metaStarted, + Type: "NORMAL", + Networks: []Network{ + { + NetworkMode: "awsvpc", + IPv4Addresses: []string{ + "172.31.25.181", + }, + }, + }, + }, + }, + Limits: map[string]float64{ + "CPU": 0.5, + "Memory": 512, + }, + PullStartedAt: metaPullStart, + PullStoppedAt: metaPullStop, +} diff --git a/plugins/inputs/ecs/stats.go b/plugins/inputs/ecs/stats.go new file mode 100644 index 000000000..d2a8ee5d3 --- /dev/null +++ b/plugins/inputs/ecs/stats.go @@ -0,0 +1,295 @@ +package ecs + +import ( + "fmt" + "strings" + "time" + + "github.com/docker/docker/api/types" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs/docker" +) + +func parseContainerStats(c Container, acc telegraf.Accumulator, tags map[string]string) { + id := c.ID + stats := c.Stats + tm := stats.Read + + if tm.Before(time.Unix(0, 0)) { + tm = time.Now() + } + + metastats(id, c, acc, tags, tm) + memstats(id, stats, acc, tags, tm) + cpustats(id, stats, acc, tags, tm) + netstats(id, stats, acc, tags, tm) + blkstats(id, stats, acc, tags, tm) +} + +func metastats(id string, c Container, acc telegraf.Accumulator, tags map[string]string, tm time.Time) { + metafields := map[string]interface{}{ + "container_id": id, + "docker_name": c.DockerName, + "image": c.Image, + "image_id": c.ImageID, + "desired_status": c.DesiredStatus, + "known_status": c.KnownStatus, + "limit_cpu": c.Limits["CPU"], + "limit_mem": c.Limits["Memory"], + "created_at": c.CreatedAt, + "started_at": c.StartedAt, + "type": c.Type, + } + + acc.AddFields("ecs_container_meta", metafields, tags, tm) +} + +func memstats(id string, stats types.StatsJSON, acc telegraf.Accumulator, tags map[string]string, tm time.Time) { + memfields := map[string]interface{}{ + "container_id": id, + } + + memstats := []string{ + "active_anon", + "active_file", + "cache", + "hierarchical_memory_limit", + "inactive_anon", + "inactive_file", + "mapped_file", + "pgfault", + "pgmajfault", + "pgpgin", + "pgpgout", + "rss", + "rss_huge", + "total_active_anon", + "total_active_file", + "total_cache", + "total_inactive_anon", + "total_inactive_file", + "total_mapped_file", + "total_pgfault", + "total_pgmajfault", + "total_pgpgin", + "total_pgpgout", + "total_rss", + "total_rss_huge", + "total_unevictable", + "total_writeback", + "unevictable", + "writeback", + } + + for _, field := range memstats { + if value, ok := stats.MemoryStats.Stats[field]; ok { + memfields[field] = value + } + } + if stats.MemoryStats.Failcnt != 0 { + memfields["fail_count"] = stats.MemoryStats.Failcnt + } + + memfields["limit"] = stats.MemoryStats.Limit + memfields["max_usage"] = stats.MemoryStats.MaxUsage + + mem := docker.CalculateMemUsageUnixNoCache(stats.MemoryStats) + memLimit := float64(stats.MemoryStats.Limit) + memfields["usage"] = uint64(mem) + memfields["usage_percent"] = docker.CalculateMemPercentUnixNoCache(memLimit, mem) + + acc.AddFields("ecs_container_mem", memfields, tags, tm) +} + +func cpustats(id string, stats types.StatsJSON, acc telegraf.Accumulator, tags map[string]string, tm time.Time) { + cpufields := map[string]interface{}{ + "usage_total": stats.CPUStats.CPUUsage.TotalUsage, + "usage_in_usermode": stats.CPUStats.CPUUsage.UsageInUsermode, + "usage_in_kernelmode": stats.CPUStats.CPUUsage.UsageInKernelmode, + "usage_system": stats.CPUStats.SystemUsage, + "throttling_periods": stats.CPUStats.ThrottlingData.Periods, + "throttling_throttled_periods": stats.CPUStats.ThrottlingData.ThrottledPeriods, + "throttling_throttled_time": stats.CPUStats.ThrottlingData.ThrottledTime, + "container_id": id, + } + + previousCPU := stats.PreCPUStats.CPUUsage.TotalUsage + previousSystem := stats.PreCPUStats.SystemUsage + cpuPercent := docker.CalculateCPUPercentUnix(previousCPU, previousSystem, &stats) + cpufields["usage_percent"] = cpuPercent + + cputags := copyTags(tags) + cputags["cpu"] = "cpu-total" + acc.AddFields("ecs_container_cpu", cpufields, cputags, tm) + + // If we have OnlineCPUs field, then use it to restrict stats gathering to only Online CPUs + // (https://github.com/moby/moby/commit/115f91d7575d6de6c7781a96a082f144fd17e400) + var percpuusage []uint64 + if stats.CPUStats.OnlineCPUs > 0 { + percpuusage = stats.CPUStats.CPUUsage.PercpuUsage[:stats.CPUStats.OnlineCPUs] + } else { + percpuusage = stats.CPUStats.CPUUsage.PercpuUsage + } + + for i, percpu := range percpuusage { + percputags := copyTags(tags) + percputags["cpu"] = fmt.Sprintf("cpu%d", i) + fields := map[string]interface{}{ + "usage_total": percpu, + "container_id": id, + } + acc.AddFields("ecs_container_cpu", fields, percputags, tm) + } +} + +func netstats(id string, stats types.StatsJSON, acc telegraf.Accumulator, tags map[string]string, tm time.Time) { + totalNetworkStatMap := make(map[string]interface{}) + for network, netstats := range stats.Networks { + netfields := map[string]interface{}{ + "rx_dropped": netstats.RxDropped, + "rx_bytes": netstats.RxBytes, + "rx_errors": netstats.RxErrors, + "tx_packets": netstats.TxPackets, + "tx_dropped": netstats.TxDropped, + "rx_packets": netstats.RxPackets, + "tx_errors": netstats.TxErrors, + "tx_bytes": netstats.TxBytes, + "container_id": id, + } + + nettags := copyTags(tags) + nettags["network"] = network + acc.AddFields("ecs_container_net", netfields, nettags, tm) + + for field, value := range netfields { + if field == "container_id" { + continue + } + + var uintV uint64 + switch v := value.(type) { + case uint64: + uintV = v + case int64: + uintV = uint64(v) + default: + continue + } + + _, ok := totalNetworkStatMap[field] + if ok { + totalNetworkStatMap[field] = totalNetworkStatMap[field].(uint64) + uintV + } else { + totalNetworkStatMap[field] = uintV + } + } + } + + // totalNetworkStatMap could be empty if container is running with --net=host. + if len(totalNetworkStatMap) != 0 { + nettags := copyTags(tags) + nettags["network"] = "total" + totalNetworkStatMap["container_id"] = id + acc.AddFields("ecs_container_net", totalNetworkStatMap, nettags, tm) + } +} + +func blkstats(id string, stats types.StatsJSON, acc telegraf.Accumulator, tags map[string]string, tm time.Time) { + blkioStats := stats.BlkioStats + // Make a map of devices to their block io stats + deviceStatMap := make(map[string]map[string]interface{}) + + for _, metric := range blkioStats.IoServiceBytesRecursive { + device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor) + _, ok := deviceStatMap[device] + if !ok { + deviceStatMap[device] = make(map[string]interface{}) + } + + field := fmt.Sprintf("io_service_bytes_recursive_%s", strings.ToLower(metric.Op)) + deviceStatMap[device][field] = metric.Value + } + + for _, metric := range blkioStats.IoServicedRecursive { + device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor) + _, ok := deviceStatMap[device] + if !ok { + deviceStatMap[device] = make(map[string]interface{}) + } + + field := fmt.Sprintf("io_serviced_recursive_%s", strings.ToLower(metric.Op)) + deviceStatMap[device][field] = metric.Value + } + + for _, metric := range blkioStats.IoQueuedRecursive { + device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor) + field := fmt.Sprintf("io_queue_recursive_%s", strings.ToLower(metric.Op)) + deviceStatMap[device][field] = metric.Value + } + + for _, metric := range blkioStats.IoServiceTimeRecursive { + device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor) + field := fmt.Sprintf("io_service_time_recursive_%s", strings.ToLower(metric.Op)) + deviceStatMap[device][field] = metric.Value + } + + for _, metric := range blkioStats.IoWaitTimeRecursive { + device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor) + field := fmt.Sprintf("io_wait_time_%s", strings.ToLower(metric.Op)) + deviceStatMap[device][field] = metric.Value + } + + for _, metric := range blkioStats.IoMergedRecursive { + device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor) + field := fmt.Sprintf("io_merged_recursive_%s", strings.ToLower(metric.Op)) + deviceStatMap[device][field] = metric.Value + } + + for _, metric := range blkioStats.IoTimeRecursive { + device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor) + deviceStatMap[device]["io_time_recursive"] = metric.Value + } + + for _, metric := range blkioStats.SectorsRecursive { + device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor) + deviceStatMap[device]["sectors_recursive"] = metric.Value + } + + totalStatMap := make(map[string]interface{}) + for device, fields := range deviceStatMap { + fields["container_id"] = id + + iotags := copyTags(tags) + iotags["device"] = device + acc.AddFields("ecs_container_blkio", fields, iotags, tm) + + for field, value := range fields { + if field == "container_id" { + continue + } + + var uintV uint64 + switch v := value.(type) { + case uint64: + uintV = v + case int64: + uintV = uint64(v) + default: + continue + } + + _, ok := totalStatMap[field] + if ok { + totalStatMap[field] = totalStatMap[field].(uint64) + uintV + } else { + totalStatMap[field] = uintV + } + + } + } + + totalStatMap["container_id"] = id + iotags := copyTags(tags) + iotags["device"] = "total" + acc.AddFields("ecs_container_blkio", totalStatMap, iotags, tm) +} diff --git a/plugins/inputs/ecs/stats_test.go b/plugins/inputs/ecs/stats_test.go new file mode 100644 index 000000000..04632ac61 --- /dev/null +++ b/plugins/inputs/ecs/stats_test.go @@ -0,0 +1,226 @@ +package ecs + +import ( + "testing" + "time" + + "github.com/influxdata/telegraf/testutil" +) + +func Test_metastats(t *testing.T) { + var mockAcc testutil.Accumulator + + tags := map[string]string{ + "test_tag": "test", + } + tm := time.Now() + + metastats(nginxStatsKey, validMeta.Containers[1], &mockAcc, tags, tm) + mockAcc.AssertContainsTaggedFields( + t, + "ecs_container_meta", + map[string]interface{}{ + "container_id": nginxStatsKey, + "docker_name": "ecs-nginx-2-nginx", + "image": "nginx:alpine", + "image_id": "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "desired_status": "RUNNING", + "known_status": "RUNNING", + "limit_cpu": float64(0), + "limit_mem": float64(0), + "created_at": metaCreated, + "started_at": metaStarted, + "type": "NORMAL", + }, + tags, + ) +} + +func Test_memstats(t *testing.T) { + var mockAcc testutil.Accumulator + + tags := map[string]string{ + "test_tag": "test", + } + tm := time.Now() + + memstats(nginxStatsKey, validStats[nginxStatsKey], &mockAcc, tags, tm) + mockAcc.AssertContainsTaggedFields( + t, + "ecs_container_mem", + map[string]interface{}{ + "active_anon": uint64(1597440), + "active_file": uint64(1462272), + "cache": uint64(5787648), + "container_id": nginxStatsKey, + "hierarchical_memory_limit": uint64(536870912), + "inactive_anon": uint64(4096), + "inactive_file": uint64(4321280), + "limit": uint64(1033658368), + "mapped_file": uint64(3616768), + "max_usage": uint64(8667136), + "pgmajfault": uint64(40), + "pgpgin": uint64(3477), + "pgpgout": uint64(1674), + "pgfault": uint64(2924), + "rss": uint64(1597440), + "total_active_anon": uint64(1597440), + "total_active_file": uint64(1462272), + "total_cache": uint64(5787648), + "total_inactive_anon": uint64(4096), + "total_inactive_file": uint64(4321280), + "total_mapped_file": uint64(3616768), + "total_pgfault": uint64(2924), + "total_pgpgout": uint64(1674), + "total_pgpgin": uint64(3477), + "total_rss": uint64(1597440), + "usage": uint64(2392064), + "usage_percent": float64(0.23141727228778164), + }, + map[string]string{ + "test_tag": "test", + }, + ) +} + +func Test_cpustats(t *testing.T) { + var mockAcc testutil.Accumulator + + tags := map[string]string{ + "test_tag": "test", + } + tm := time.Now() + + cpustats(nginxStatsKey, validStats[nginxStatsKey], &mockAcc, tags, tm) + mockAcc.AssertContainsTaggedFields( + t, + "ecs_container_cpu", + map[string]interface{}{ + "container_id": nginxStatsKey, + "throttling_periods": uint64(0), + "throttling_throttled_periods": uint64(0), + "throttling_throttled_time": uint64(0), + "usage_in_usermode": uint64(40000000), + "usage_in_kernelmode": uint64(10000000), + "usage_percent": float64(0), + "usage_system": uint64(2336100000000), + "usage_total": uint64(65599511), + }, + map[string]string{ + "test_tag": "test", + "cpu": "cpu-total", + }, + ) + mockAcc.AssertContainsTaggedFields( + t, + "ecs_container_cpu", + map[string]interface{}{ + "container_id": nginxStatsKey, + "usage_total": uint64(65599511), + }, + map[string]string{ + "test_tag": "test", + "cpu": "cpu0", + }, + ) +} + +func Test_netstats(t *testing.T) { + var mockAcc testutil.Accumulator + + tags := map[string]string{ + "test_tag": "test", + } + tm := time.Now() + + netstats(pauseStatsKey, validStats[pauseStatsKey], &mockAcc, tags, tm) + mockAcc.AssertContainsTaggedFields( + t, + "ecs_container_net", + map[string]interface{}{ + "container_id": pauseStatsKey, + "rx_bytes": uint64(5338), + "rx_dropped": uint64(0), + "rx_errors": uint64(0), + "rx_packets": uint64(36), + "tx_bytes": uint64(648), + "tx_dropped": uint64(0), + "tx_errors": uint64(0), + "tx_packets": uint64(8), + }, + map[string]string{ + "test_tag": "test", + "network": "eth0", + }, + ) + mockAcc.AssertContainsTaggedFields( + t, + "ecs_container_net", + map[string]interface{}{ + "container_id": pauseStatsKey, + "rx_bytes": uint64(4641), + "rx_dropped": uint64(0), + "rx_errors": uint64(0), + "rx_packets": uint64(26), + "tx_bytes": uint64(690), + "tx_dropped": uint64(0), + "tx_errors": uint64(0), + "tx_packets": uint64(9), + }, + map[string]string{ + "test_tag": "test", + "network": "eth5", + }, + ) + mockAcc.AssertContainsTaggedFields( + t, + "ecs_container_net", + map[string]interface{}{ + "container_id": pauseStatsKey, + "rx_bytes": uint64(9979), + "rx_dropped": uint64(0), + "rx_errors": uint64(0), + "rx_packets": uint64(62), + "tx_bytes": uint64(1338), + "tx_dropped": uint64(0), + "tx_errors": uint64(0), + "tx_packets": uint64(17), + }, + map[string]string{ + "test_tag": "test", + "network": "total", + }, + ) +} + +func Test_blkstats(t *testing.T) { + var mockAcc testutil.Accumulator + + tags := map[string]string{ + "test_tag": "test", + } + tm := time.Now() + + blkstats(nginxStatsKey, validStats[nginxStatsKey], &mockAcc, tags, tm) + mockAcc.AssertContainsTaggedFields( + t, + "ecs_container_blkio", + map[string]interface{}{ + "container_id": nginxStatsKey, + "io_service_bytes_recursive_read": uint64(5730304), + "io_service_bytes_recursive_write": uint64(0), + "io_service_bytes_recursive_sync": uint64(5730304), + "io_service_bytes_recursive_async": uint64(0), + "io_service_bytes_recursive_total": uint64(5730304), + "io_serviced_recursive_read": uint64(156), + "io_serviced_recursive_write": uint64(0), + "io_serviced_recursive_sync": uint64(156), + "io_serviced_recursive_async": uint64(0), + "io_serviced_recursive_total": uint64(156), + }, + map[string]string{ + "test_tag": "test", + "device": "202:26368", + }, + ) +} diff --git a/plugins/inputs/ecs/testdata/metadata.golden b/plugins/inputs/ecs/testdata/metadata.golden new file mode 100644 index 000000000..6823d7e5e --- /dev/null +++ b/plugins/inputs/ecs/testdata/metadata.golden @@ -0,0 +1,78 @@ +{ + "Cluster": "test", + "TaskARN": "arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a", + "Family": "nginx", + "Revision": "2", + "DesiredStatus": "RUNNING", + "KnownStatus": "RUNNING", + "Containers": [ + { + "DockerId": "e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba", + "Name": "~internal~ecs~pause", + "DockerName": "ecs-nginx-2-internalecspause", + "Image": "amazon/amazon-ecs-pause:0.1.0", + "ImageID": "", + "Labels": { + "com.amazonaws.ecs.cluster": "test", + "com.amazonaws.ecs.container-name": "~internal~ecs~pause", + "com.amazonaws.ecs.task-arn": "arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a", + "com.amazonaws.ecs.task-definition-family": "nginx", + "com.amazonaws.ecs.task-definition-version": "2" + }, + "DesiredStatus": "RESOURCES_PROVISIONED", + "KnownStatus": "RESOURCES_PROVISIONED", + "Limits": { + "CPU": 0, + "Memory": 0 + }, + "CreatedAt": "2018-11-19T15:31:26.641964373Z", + "StartedAt": "2018-11-19T15:31:27.035698679Z", + "Type": "CNI_PAUSE", + "Networks": [ + { + "NetworkMode": "awsvpc", + "IPv4Addresses": [ + "172.31.25.181" + ] + } + ] + }, + { + "DockerId": "fffe894e232d46c76475cfeabf4907f712e8b92618a37fca3ef0805bbbfb0299", + "Name": "nginx", + "DockerName": "ecs-nginx-2-nginx", + "Image": "nginx:alpine", + "ImageID": "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "Labels": { + "com.amazonaws.ecs.cluster": "test", + "com.amazonaws.ecs.container-name": "nginx", + "com.amazonaws.ecs.task-arn": "arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a", + "com.amazonaws.ecs.task-definition-family": "nginx", + "com.amazonaws.ecs.task-definition-version": "2" + }, + "DesiredStatus": "RUNNING", + "KnownStatus": "RUNNING", + "Limits": { + "CPU": 0, + "Memory": 0 + }, + "CreatedAt": "2018-11-19T15:31:27.614884084Z", + "StartedAt": "2018-11-19T15:31:27.975996351Z", + "Type": "NORMAL", + "Networks": [ + { + "NetworkMode": "awsvpc", + "IPv4Addresses": [ + "172.31.25.181" + ] + } + ] + } + ], + "Limits": { + "CPU": 0.5, + "Memory": 512 + }, + "PullStartedAt": "2018-11-19T15:31:27.197327103Z", + "PullStoppedAt": "2018-11-19T15:31:27.609089471Z" +} \ No newline at end of file diff --git a/plugins/inputs/ecs/testdata/stats.golden b/plugins/inputs/ecs/testdata/stats.golden new file mode 100644 index 000000000..791f4f0b3 --- /dev/null +++ b/plugins/inputs/ecs/testdata/stats.golden @@ -0,0 +1,663 @@ +{ + "e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba": { + "read": "2018-11-19T15:40:00.936081344Z", + "preread": "2018-11-19T15:39:59.933000984Z", + "num_procs": 0, + "pids_stats": {}, + "networks": { + "eth0": { + "rx_bytes": 5338, + "rx_dropped": 0, + "rx_errors": 0, + "rx_packets": 36, + "tx_bytes": 648, + "tx_dropped": 0, + "tx_errors": 0, + "tx_packets": 8 + }, + "eth5": { + "rx_bytes": 4641, + "rx_dropped": 0, + "rx_errors": 0, + "rx_packets": 26, + "tx_bytes": 690, + "tx_dropped": 0, + "tx_errors": 0, + "tx_packets": 9 + } + }, + "memory_stats": { + "stats": { + "cache": 790528, + "mapped_file": 618496, + "total_inactive_file": 782336, + "pgpgout": 1040, + "rss": 40960, + "total_mapped_file": 618496, + "pgpgin": 1243, + "pgmajfault": 6, + "total_rss": 40960, + "hierarchical_memory_limit": 536870912, + "total_pgfault": 1298, + "total_active_file": 8192, + "active_anon": 40960, + "total_active_anon": 40960, + "total_pgpgout": 1040, + "total_cache": 790528, + "active_file": 8192, + "pgfault": 1298, + "inactive_file": 782336, + "total_pgpgin": 1243, + "hierarchical_memsw_limit": 9223372036854772000 + }, + "max_usage": 4825088, + "usage": 1343488, + "limit": 1033658368 + }, + "blkio_stats": { + "io_service_bytes_recursive": [ + { + "major": 202, + "minor": 26368, + "op": "Read", + "value": 790528 + }, + { + "major": 202, + "minor": 26368, + "op": "Write" + }, + { + "major": 202, + "minor": 26368, + "op": "Sync", + "value": 790528 + }, + { + "major": 202, + "minor": 26368, + "op": "Async" + }, + { + "major": 202, + "minor": 26368, + "op": "Total", + "value": 790528 + }, + { + "major": 253, + "minor": 1, + "op": "Read", + "value": 790528 + }, + { + "major": 253, + "minor": 1, + "op": "Write" + }, + { + "major": 253, + "minor": 1, + "op": "Sync", + "value": 790528 + }, + { + "major": 253, + "minor": 1, + "op": "Async" + }, + { + "major": 253, + "minor": 1, + "op": "Total", + "value": 790528 + }, + { + "major": 253, + "minor": 2, + "op": "Read", + "value": 790528 + }, + { + "major": 253, + "minor": 2, + "op": "Write" + }, + { + "major": 253, + "minor": 2, + "op": "Sync", + "value": 790528 + }, + { + "major": 253, + "minor": 2, + "op": "Async" + }, + { + "major": 253, + "minor": 2, + "op": "Total", + "value": 790528 + }, + { + "major": 253, + "minor": 4, + "op": "Read", + "value": 790528 + }, + { + "major": 253, + "minor": 4, + "op": "Write" + }, + { + "major": 253, + "minor": 4, + "op": "Sync", + "value": 790528 + }, + { + "major": 253, + "minor": 4, + "op": "Async" + }, + { + "major": 253, + "minor": 4, + "op": "Total", + "value": 790528 + } + ], + "io_serviced_recursive": [ + { + "major": 202, + "minor": 26368, + "op": "Read", + "value": 10 + }, + { + "major": 202, + "minor": 26368, + "op": "Write" + }, + { + "major": 202, + "minor": 26368, + "op": "Sync", + "value": 10 + }, + { + "major": 202, + "minor": 26368, + "op": "Async" + }, + { + "major": 202, + "minor": 26368, + "op": "Total", + "value": 10 + }, + { + "major": 253, + "minor": 1, + "op": "Read", + "value": 10 + }, + { + "major": 253, + "minor": 1, + "op": "Write" + }, + { + "major": 253, + "minor": 1, + "op": "Sync", + "value": 10 + }, + { + "major": 253, + "minor": 1, + "op": "Async" + }, + { + "major": 253, + "minor": 1, + "op": "Total", + "value": 10 + }, + { + "major": 253, + "minor": 2, + "op": "Read", + "value": 10 + }, + { + "major": 253, + "minor": 2, + "op": "Write" + }, + { + "major": 253, + "minor": 2, + "op": "Sync", + "value": 10 + }, + { + "major": 253, + "minor": 2, + "op": "Async" + }, + { + "major": 253, + "minor": 2, + "op": "Total", + "value": 10 + }, + { + "major": 253, + "minor": 4, + "op": "Read", + "value": 10 + }, + { + "major": 253, + "minor": 4, + "op": "Write" + }, + { + "major": 253, + "minor": 4, + "op": "Sync", + "value": 10 + }, + { + "major": 253, + "minor": 4, + "op": "Async" + }, + { + "major": 253, + "minor": 4, + "op": "Total", + "value": 10 + } + ] + }, + "cpu_stats": { + "cpu_usage": { + "percpu_usage": [ + 26426156, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ], + "usage_in_usermode": 20000000, + "total_usage": 26426156 + }, + "system_cpu_usage": 2336100000000, + "online_cpus": 1, + "throttling_data": {} + }, + "precpu_stats": { + "cpu_usage": { + "percpu_usage": [ + 26426156, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ], + "usage_in_usermode": 20000000, + "total_usage": 26426156 + }, + "system_cpu_usage": 2335090000000, + "online_cpus": 1, + "throttling_data": {} + }, + "storage_stats": {} + }, + "fffe894e232d46c76475cfeabf4907f712e8b92618a37fca3ef0805bbbfb0299": { + "read": "2018-11-19T15:40:00.93733207Z", + "preread": "2018-11-19T15:39:59.934291009Z", + "num_procs": 0, + "pids_stats": {}, + "network": {}, + "memory_stats": { + "stats": { + "cache": 5787648, + "mapped_file": 3616768, + "total_inactive_file": 4321280, + "pgpgout": 1674, + "rss": 1597440, + "total_mapped_file": 3616768, + "pgpgin": 3477, + "pgmajfault": 40, + "total_rss": 1597440, + "total_inactive_anon": 4096, + "hierarchical_memory_limit": 536870912, + "total_pgfault": 2924, + "total_active_file": 1462272, + "active_anon": 1597440, + "total_active_anon": 1597440, + "total_pgpgout": 1674, + "total_cache": 5787648, + "inactive_anon": 4096, + "active_file": 1462272, + "pgfault": 2924, + "inactive_file": 4321280, + "total_pgpgin": 3477, + "hierarchical_memsw_limit": 9223372036854772000 + }, + "max_usage": 8667136, + "usage": 8179712, + "limit": 1033658368 + }, + "blkio_stats": { + "io_service_bytes_recursive": [ + { + "major": 202, + "minor": 26368, + "op": "Read", + "value": 5730304 + }, + { + "major": 202, + "minor": 26368, + "op": "Write" + }, + { + "major": 202, + "minor": 26368, + "op": "Sync", + "value": 5730304 + }, + { + "major": 202, + "minor": 26368, + "op": "Async" + }, + { + "major": 202, + "minor": 26368, + "op": "Total", + "value": 5730304 + }, + { + "major": 253, + "minor": 1, + "op": "Read", + "value": 5730304 + }, + { + "major": 253, + "minor": 1, + "op": "Write" + }, + { + "major": 253, + "minor": 1, + "op": "Sync", + "value": 5730304 + }, + { + "major": 253, + "minor": 1, + "op": "Async" + }, + { + "major": 253, + "minor": 1, + "op": "Total", + "value": 5730304 + }, + { + "major": 253, + "minor": 2, + "op": "Read", + "value": 5730304 + }, + { + "major": 253, + "minor": 2, + "op": "Write" + }, + { + "major": 253, + "minor": 2, + "op": "Sync", + "value": 5730304 + }, + { + "major": 253, + "minor": 2, + "op": "Async" + }, + { + "major": 253, + "minor": 2, + "op": "Total", + "value": 5730304 + }, + { + "major": 253, + "minor": 5, + "op": "Read", + "value": 5730304 + }, + { + "major": 253, + "minor": 5, + "op": "Write" + }, + { + "major": 253, + "minor": 5, + "op": "Sync", + "value": 5730304 + }, + { + "major": 253, + "minor": 5, + "op": "Async" + }, + { + "major": 253, + "minor": 5, + "op": "Total", + "value": 5730304 + } + ], + "io_serviced_recursive": [ + { + "major": 202, + "minor": 26368, + "op": "Read", + "value": 156 + }, + { + "major": 202, + "minor": 26368, + "op": "Write" + }, + { + "major": 202, + "minor": 26368, + "op": "Sync", + "value": 156 + }, + { + "major": 202, + "minor": 26368, + "op": "Async" + }, + { + "major": 202, + "minor": 26368, + "op": "Total", + "value": 156 + }, + { + "major": 253, + "minor": 1, + "op": "Read", + "value": 156 + }, + { + "major": 253, + "minor": 1, + "op": "Write" + }, + { + "major": 253, + "minor": 1, + "op": "Sync", + "value": 156 + }, + { + "major": 253, + "minor": 1, + "op": "Async" + }, + { + "major": 253, + "minor": 1, + "op": "Total", + "value": 156 + }, + { + "major": 253, + "minor": 2, + "op": "Read", + "value": 156 + }, + { + "major": 253, + "minor": 2, + "op": "Write" + }, + { + "major": 253, + "minor": 2, + "op": "Sync", + "value": 156 + }, + { + "major": 253, + "minor": 2, + "op": "Async" + }, + { + "major": 253, + "minor": 2, + "op": "Total", + "value": 156 + }, + { + "major": 253, + "minor": 5, + "op": "Read", + "value": 147 + }, + { + "major": 253, + "minor": 5, + "op": "Write" + }, + { + "major": 253, + "minor": 5, + "op": "Sync", + "value": 147 + }, + { + "major": 253, + "minor": 5, + "op": "Async" + }, + { + "major": 253, + "minor": 5, + "op": "Total", + "value": 147 + } + ] + }, + "cpu_stats": { + "cpu_usage": { + "percpu_usage": [ + 65599511, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ], + "usage_in_usermode": 40000000, + "total_usage": 65599511, + "usage_in_kernelmode": 10000000 + }, + "system_cpu_usage": 2336100000000, + "online_cpus": 1, + "throttling_data": {} + }, + "precpu_stats": { + "cpu_usage": { + "percpu_usage": [ + 65599511, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ], + "usage_in_usermode": 40000000, + "total_usage": 65599511, + "usage_in_kernelmode": 10000000 + }, + "system_cpu_usage": 2335090000000, + "online_cpus": 1, + "throttling_data": {} + }, + "storage_stats": {} + } +} diff --git a/plugins/inputs/ecs/types.go b/plugins/inputs/ecs/types.go new file mode 100644 index 000000000..0b9b402f6 --- /dev/null +++ b/plugins/inputs/ecs/types.go @@ -0,0 +1,75 @@ +package ecs + +import ( + "encoding/json" + "io" + "strings" + "time" + + "github.com/docker/docker/api/types" +) + +// Task is the ECS task representation +type Task struct { + Cluster string + TaskARN string + Family string + Revision string + DesiredStatus string + KnownStatus string + Containers []Container + Limits map[string]float64 + PullStartedAt time.Time + PullStoppedAt time.Time +} + +// Container is the ECS metadata container representation +type Container struct { + ID string `json:"DockerId"` + Name string + DockerName string + Image string + ImageID string + Labels map[string]string + DesiredStatus string + KnownStatus string + Limits map[string]float64 + CreatedAt time.Time + StartedAt time.Time + Stats types.StatsJSON + Type string + Networks []Network +} + +// Network is a docker network configuration +type Network struct { + NetworkMode string + IPv4Addresses []string +} + +func unmarshalTask(r io.Reader) (*Task, error) { + task := &Task{} + err := json.NewDecoder(r).Decode(task) + return task, err +} + +// docker parsers +func unmarshalStats(r io.Reader) (map[string]types.StatsJSON, error) { + var statsMap map[string]types.StatsJSON + err := json.NewDecoder(r).Decode(&statsMap) + return statsMap, err +} + +// interleaves Stats in to the Container objects in the Task +func mergeTaskStats(task *Task, stats map[string]types.StatsJSON) { + for i, c := range task.Containers { + if strings.Trim(c.ID, " ") == "" { + continue + } + stat, ok := stats[c.ID] + if !ok { + continue + } + task.Containers[i].Stats = stat + } +} diff --git a/plugins/inputs/ecs/types_test.go b/plugins/inputs/ecs/types_test.go new file mode 100644 index 000000000..d62ac6b40 --- /dev/null +++ b/plugins/inputs/ecs/types_test.go @@ -0,0 +1,61 @@ +package ecs + +import ( + "os" + "testing" + + "github.com/stretchr/testify/assert" +) + +func Test_parseTask(t *testing.T) { + r, err := os.Open("testdata/metadata.golden") + if err != nil { + t.Errorf("error opening test files") + } + parsed, err := unmarshalTask(r) + if err != nil { + t.Errorf("error parsing task %v", err) + } + assert.Equal(t, validMeta, *parsed, "Got = %v, want = %v", parsed, validMeta) +} + +func Test_parseStats(t *testing.T) { + r, err := os.Open("testdata/stats.golden") + if err != nil { + t.Errorf("error opening test files") + } + + parsed, err := unmarshalStats(r) + if err != nil { + t.Errorf("error parsing stats %v", err) + } + assert.Equal(t, validStats, parsed, "Got = %v, want = %v", parsed, validStats) +} + +func Test_mergeTaskStats(t *testing.T) { + metadata, err := os.Open("testdata/metadata.golden") + if err != nil { + t.Errorf("error opening test files") + } + + parsedMetadata, err := unmarshalTask(metadata) + if err != nil { + t.Errorf("error parsing task %v", err) + } + + stats, err := os.Open("testdata/stats.golden") + if err != nil { + t.Errorf("error opening test files") + } + + parsedStats, err := unmarshalStats(stats) + if err != nil { + t.Errorf("error parsing stats %v", err) + } + + mergeTaskStats(parsedMetadata, parsedStats) + + for _, cont := range parsedMetadata.Containers { + assert.Equal(t, validStats[cont.ID], cont.Stats, "Got = %v, want = %v", cont.Stats, validStats[cont.ID]) + } +} From 980b1746877d4c11575d4a57f8afd22d46b68e21 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Sun, 26 May 2019 20:01:11 -0700 Subject: [PATCH 0869/1815] Tidy ECS readme and make review changes --- CHANGELOG.md | 1 + README.md | 2 +- plugins/inputs/ecs/README.md | 186 +++++++++++++++++++++++++++---- plugins/inputs/ecs/client.go | 20 ---- plugins/inputs/ecs/ecs.go | 24 +--- plugins/inputs/ecs/types_test.go | 42 +++---- 6 files changed, 187 insertions(+), 88 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b1b3b5f12..30e2c7646 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,7 @@ #### New Inputs - [bind](/plugins/inputs/bind/README.md) - Contributed by @dswarbrick & @danielllek +- [ecs](/plugins/inputs/ecs/README.md) - Contributed by @rbtr - [github](/plugins/inputs/github/README.md) - Contributed by @influxdata - [powerdns_recursor](/plugins/inputs/powerdns_recursor/README.md) - Contributed by @dupondje diff --git a/README.md b/README.md index 6b3931f42..5d0843e03 100644 --- a/README.md +++ b/README.md @@ -165,7 +165,7 @@ For documentation on the latest development code see the [documentation index][d * [dns query time](./plugins/inputs/dns_query) * [docker](./plugins/inputs/docker) * [dovecot](./plugins/inputs/dovecot) -* [ecs](./plugins/inputs/ecs) +* [ecs](./plugins/inputs/ecs) (Amazon Elastic Container Service, Fargate) * [elasticsearch](./plugins/inputs/elasticsearch) * [exec](./plugins/inputs/exec) (generic executable plugin, support JSON, influx, graphite and nagios) * [fail2ban](./plugins/inputs/fail2ban) diff --git a/plugins/inputs/ecs/README.md b/plugins/inputs/ecs/README.md index 411322959..e6407a58b 100644 --- a/plugins/inputs/ecs/README.md +++ b/plugins/inputs/ecs/README.md @@ -1,23 +1,31 @@ # ECS Input Plugin -ECS, Fargate compatible, input plugin which uses the [ECS v2 metadata and stats API](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-metadata-endpoint-v2.html) -endpoints to gather stats on running containers in a Task. +ECS, Fargate compatible, input plugin which uses the [ECS v2 metadata and +stats API][task-metadata-endpoint-v2] endpoints to gather stats on running +containers in a Task. -The telegraf container must be run in the same Task as the workload it is inspecting. +The telegraf container must be run in the same Task as the workload it is +inspecting. -This is similar to (and reuses a few pieces of) the [Docker](../docker/README.md) input plugin, with some ECS specific modifications for AWS metadata and stats formats. +This is similar to (and reuses a few pieces of) the [Docker][docker-input] +input plugin, with some ECS specific modifications for AWS metadata and stats +formats. +The amazon-ecs-agent (though it _is_ a container running on the host) is not +present in the metadata/stats endpoints. -### Configuration: +### Configuration ```toml # Read metrics about ECS containers [[inputs.ecs]] - # endpoint_url = http:// + ## ECS metadata url + # endpoint_url = "http://169.254.170.2" + ## Containers to include and exclude. Globs accepted. ## Note that an empty array for both will include all containers - container_name_include = [] - container_name_exclude = [] + # container_name_include = [] + # container_name_exclude = [] ## Container states to include and exclude. Globs accepted. ## When empty only containers in the "running" state will be captured. @@ -33,19 +41,157 @@ This is similar to (and reuses a few pieces of) the [Docker](../docker/README.md timeout = "5s" ``` -#### Environment Configuration +### Metrics -The ECS client can optionally also be configured with the following env vars: +- ecs_task + - tags: + - cluster + - task_arn + - family + - revision + - id + - name + - fields: + - revision (string) + - desired_status (string) + - known_status (string) + - limit_cpu (float) + - limit_mem (float) + ++ ecs_container_mem + - tags: + - cluster + - task_arn + - family + - revision + - id + - name + - fields: + - container_id + - active_anon + - active_file + - cache + - hierarchical_memory_limit + - inactive_anon + - inactive_file + - mapped_file + - pgfault + - pgmajfault + - pgpgin + - pgpgout + - rss + - rss_huge + - total_active_anon + - total_active_file + - total_cache + - total_inactive_anon + - total_inactive_file + - total_mapped_file + - total_pgfault + - total_pgmajfault + - total_pgpgin + - total_pgpgout + - total_rss + - total_rss_huge + - total_unevictable + - total_writeback + - unevictable + - writeback + - fail_count + - limit + - max_usage + - usage + - usage_percent + +- ecs_container_cpu + - tags: + - cluster + - task_arn + - family + - revision + - id + - name + - cpu + - fields: + - container_id + - usage_total + - usage_in_usermode + - usage_in_kernelmode + - usage_system + - throttling_periods + - throttling_throttled_periods + - throttling_throttled_time + - usage_percent + - usage_total + ++ ecs_container_net + - tags: + - cluster + - task_arn + - family + - revision + - id + - name + - network + - fields: + - container_id + - rx_packets + - rx_dropped + - rx_bytes + - rx_errors + - tx_packets + - tx_dropped + - tx_bytes + - tx_errors + +- ecs_container_blkio + - tags: + - cluster + - task_arn + - family + - revision + - id + - name + - device + - fields: + - container_id + - io_service_bytes_recursive_async + - io_service_bytes_recursive_read + - io_service_bytes_recursive_sync + - io_service_bytes_recursive_total + - io_service_bytes_recursive_write + - io_serviced_recursive_async + - io_serviced_recursive_read + - io_serviced_recursive_sync + - io_serviced_recursive_total + - io_serviced_recursive_write + ++ ecs_container_meta + - tags: + - cluster + - task_arn + - family + - revision + - id + - name + - fields: + - container_id + - docker_name + - image + - image_id + - desired_status + - known_status + - limit_cpu + - limit_mem + - created_at + - started_at + - type + + +### Example Output ``` -ECS_TIMEOUT -``` - - -### Example Output: - -``` -ecs_task_status,cluster=test,family=nginx,host=c4b301d4a123,revision=2,task_arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a revision="2",desired_status="RUNNING",known_status="RUNNING",limit_cpu=0.5,limit_mem=512 1542641488000000000 +ecs_task,cluster=test,family=nginx,host=c4b301d4a123,revision=2,task_arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a revision="2",desired_status="RUNNING",known_status="RUNNING",limit_cpu=0.5,limit_mem=512 1542641488000000000 ecs_container_mem,cluster=test,com.amazonaws.ecs.cluster=test,com.amazonaws.ecs.container-name=~internal~ecs~pause,com.amazonaws.ecs.task-arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a,com.amazonaws.ecs.task-definition-family=nginx,com.amazonaws.ecs.task-definition-version=2,family=nginx,host=c4b301d4a123,id=e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba,name=~internal~ecs~pause,revision=2,task_arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a active_anon=40960i,active_file=8192i,cache=790528i,pgpgin=1243i,total_pgfault=1298i,total_rss=40960i,limit=1033658368i,max_usage=4825088i,hierarchical_memory_limit=536870912i,rss=40960i,total_active_file=8192i,total_mapped_file=618496i,usage_percent=0.05349543109392212,container_id="e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba",pgfault=1298i,pgmajfault=6i,pgpgout=1040i,total_active_anon=40960i,total_inactive_file=782336i,total_pgpgin=1243i,usage=552960i,inactive_file=782336i,mapped_file=618496i,total_cache=790528i,total_pgpgout=1040i 1542642001000000000 ecs_container_cpu,cluster=test,com.amazonaws.ecs.cluster=test,com.amazonaws.ecs.container-name=~internal~ecs~pause,com.amazonaws.ecs.task-arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a,com.amazonaws.ecs.task-definition-family=nginx,com.amazonaws.ecs.task-definition-version=2,cpu=cpu-total,family=nginx,host=c4b301d4a123,id=e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba,name=~internal~ecs~pause,revision=2,task_arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a usage_in_kernelmode=0i,throttling_throttled_periods=0i,throttling_periods=0i,throttling_throttled_time=0i,container_id="e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba",usage_percent=0,usage_total=26426156i,usage_in_usermode=20000000i,usage_system=2336100000000i 1542642001000000000 ecs_container_cpu,cluster=test,com.amazonaws.ecs.cluster=test,com.amazonaws.ecs.container-name=~internal~ecs~pause,com.amazonaws.ecs.task-arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a,com.amazonaws.ecs.task-definition-family=nginx,com.amazonaws.ecs.task-definition-version=2,cpu=cpu0,family=nginx,host=c4b301d4a123,id=e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba,name=~internal~ecs~pause,revision=2,task_arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a container_id="e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba",usage_total=26426156i 1542642001000000000 @@ -60,5 +206,5 @@ ecs_container_blkio,cluster=test,com.amazonaws.ecs.cluster=test,com.amazonaws.ec ecs_container_meta,cluster=test,com.amazonaws.ecs.cluster=test,com.amazonaws.ecs.container-name=~internal~ecs~pause,com.amazonaws.ecs.task-arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a,com.amazonaws.ecs.task-definition-family=nginx,com.amazonaws.ecs.task-definition-version=2,family=nginx,host=c4b301d4a123,id=e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba,name=~internal~ecs~pause,revision=2,task_arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a limit_mem=0,type="CNI_PAUSE",container_id="e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba",docker_name="ecs-nginx-2-internalecspause",limit_cpu=0,known_status="RESOURCES_PROVISIONED",image="amazon/amazon-ecs-pause:0.1.0",image_id="",desired_status="RESOURCES_PROVISIONED" 1542642001000000000 ``` -### Notes: -- the amazon-ecs-agent (though it _is_ a container running on the host) is not present in the metadata/stats endpoints. \ No newline at end of file +[docker-input]: /plugins/inputs/docker/README.md +[task-metadata-endpoint-v2]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-metadata-endpoint-v2.html diff --git a/plugins/inputs/ecs/client.go b/plugins/inputs/ecs/client.go index eba0b0856..d1d92f097 100644 --- a/plugins/inputs/ecs/client.go +++ b/plugins/inputs/ecs/client.go @@ -1,10 +1,8 @@ package ecs import ( - "log" "net/http" "net/url" - "os" "time" "github.com/docker/docker/api/types" @@ -25,20 +23,6 @@ type httpClient interface { Do(req *http.Request) (*http.Response, error) } -// NewEnvClient configures a new Client from the env -func NewEnvClient() (*EcsClient, error) { - timeout := 5 * time.Second - if t := os.Getenv("ECS_TIMEOUT"); t != "" { - if d, err := time.ParseDuration(t); err == nil { - timeout = d - } - } - - return NewClient( - timeout, - ) -} - // NewClient constructs an ECS client with the passed configuration params func NewClient(timeout time.Duration) (*EcsClient, error) { c := &http.Client{ @@ -68,13 +52,11 @@ func (c *EcsClient) Task() (*Task, error) { resp, err := c.client.Do(req) if err != nil { - log.Println("failed to GET metadata endpoint", err) return nil, err } task, err := unmarshalTask(resp.Body) if err != nil { - log.Println("failed to decode response from metadata endpoint", err) return nil, err } @@ -91,13 +73,11 @@ func (c *EcsClient) ContainerStats() (map[string]types.StatsJSON, error) { resp, err := c.client.Do(req) if err != nil { - log.Println("failed to GET stats endpoint", err) return map[string]types.StatsJSON{}, err } statsMap, err := unmarshalStats(resp.Body) if err != nil { - log.Println("failed to decode response from stats endpoint") return map[string]types.StatsJSON{}, err } diff --git a/plugins/inputs/ecs/ecs.go b/plugins/inputs/ecs/ecs.go index 36a51229a..916b45bb2 100644 --- a/plugins/inputs/ecs/ecs.go +++ b/plugins/inputs/ecs/ecs.go @@ -1,7 +1,6 @@ package ecs import ( - "log" "net/url" "time" @@ -14,7 +13,6 @@ import ( // Ecs config object type Ecs struct { EndpointURL string `toml:"endpoint_url"` - EnvCfg bool `toml:"envcfg"` Timeout internal.Duration ContainerNameInclude []string `toml:"container_name_include"` @@ -26,8 +24,7 @@ type Ecs struct { LabelInclude []string `toml:"ecs_label_include"` LabelExclude []string `toml:"ecs_label_exclude"` - newEnvClient func() (*EcsClient, error) - newClient func(timeout time.Duration) (*EcsClient, error) + newClient func(timeout time.Duration) (*EcsClient, error) client Client filtersCreated bool @@ -48,13 +45,10 @@ var sampleConfig = ` ## ECS metadata url # endpoint_url = "http://169.254.170.2" - ## Set to true to configure from env vars - envcfg = false - ## Containers to include and exclude. Globs accepted. ## Note that an empty array for both will include all containers - container_name_include = [] - container_name_exclude = [] + # container_name_include = [] + # container_name_exclude = [] ## Container states to include and exclude. Globs accepted. ## When empty only containers in the "running" state will be captured. @@ -110,13 +104,9 @@ func (ecs *Ecs) Gather(acc telegraf.Accumulator) error { func initSetup(ecs *Ecs) error { if ecs.client == nil { - var c *EcsClient var err error - if ecs.EnvCfg { - c, err = ecs.newEnvClient() - } else { - c, err = ecs.newClient(ecs.Timeout.Duration) - } + var c *EcsClient + c, err = ecs.newClient(ecs.Timeout.Duration) if err != nil { return err } @@ -164,12 +154,10 @@ func (ecs *Ecs) accTask(task *Task, tags map[string]string, acc telegraf.Accumul func (ecs *Ecs) accContainers(task *Task, taskTags map[string]string, acc telegraf.Accumulator) { for _, c := range task.Containers { if !ecs.containerNameFilter.Match(c.Name) { - log.Printf("container %v did not match name filter", c.ID) continue } if !ecs.statusFilter.Match(c.KnownStatus) { - log.Printf("container %v did not match status filter", c.ID) continue } @@ -242,8 +230,6 @@ func init() { return &Ecs{ EndpointURL: "http://169.254.170.2", Timeout: internal.Duration{Duration: 5 * time.Second}, - EnvCfg: true, - newEnvClient: NewEnvClient, newClient: NewClient, filtersCreated: false, } diff --git a/plugins/inputs/ecs/types_test.go b/plugins/inputs/ecs/types_test.go index d62ac6b40..e68e9711e 100644 --- a/plugins/inputs/ecs/types_test.go +++ b/plugins/inputs/ecs/types_test.go @@ -4,58 +4,44 @@ import ( "os" "testing" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func Test_parseTask(t *testing.T) { r, err := os.Open("testdata/metadata.golden") - if err != nil { - t.Errorf("error opening test files") - } + require.NoError(t, err) + parsed, err := unmarshalTask(r) - if err != nil { - t.Errorf("error parsing task %v", err) - } - assert.Equal(t, validMeta, *parsed, "Got = %v, want = %v", parsed, validMeta) + require.NoError(t, err) + + require.Equal(t, validMeta, *parsed) } func Test_parseStats(t *testing.T) { r, err := os.Open("testdata/stats.golden") - if err != nil { - t.Errorf("error opening test files") - } + require.NoError(t, err) parsed, err := unmarshalStats(r) - if err != nil { - t.Errorf("error parsing stats %v", err) - } - assert.Equal(t, validStats, parsed, "Got = %v, want = %v", parsed, validStats) + require.NoError(t, err) + require.Equal(t, validStats, parsed) } func Test_mergeTaskStats(t *testing.T) { metadata, err := os.Open("testdata/metadata.golden") - if err != nil { - t.Errorf("error opening test files") - } + require.NoError(t, err) parsedMetadata, err := unmarshalTask(metadata) - if err != nil { - t.Errorf("error parsing task %v", err) - } + require.NoError(t, err) stats, err := os.Open("testdata/stats.golden") - if err != nil { - t.Errorf("error opening test files") - } + require.NoError(t, err) parsedStats, err := unmarshalStats(stats) - if err != nil { - t.Errorf("error parsing stats %v", err) - } + require.NoError(t, err) mergeTaskStats(parsedMetadata, parsedStats) for _, cont := range parsedMetadata.Containers { - assert.Equal(t, validStats[cont.ID], cont.Stats, "Got = %v, want = %v", cont.Stats, validStats[cont.ID]) + require.Equal(t, validStats[cont.ID], cont.Stats) } } From eb225b818fc258401444399d111a4a41b27e5384 Mon Sep 17 00:00:00 2001 From: Dmitry Ilyin Date: Tue, 28 May 2019 22:54:25 +0300 Subject: [PATCH 0870/1815] Fix toml option name in nginx_upstream_check (#5917) --- plugins/inputs/nginx_upstream_check/nginx_upstream_check.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/inputs/nginx_upstream_check/nginx_upstream_check.go b/plugins/inputs/nginx_upstream_check/nginx_upstream_check.go index e5a2e096d..1293f946e 100644 --- a/plugins/inputs/nginx_upstream_check/nginx_upstream_check.go +++ b/plugins/inputs/nginx_upstream_check/nginx_upstream_check.go @@ -44,7 +44,7 @@ const sampleConfig = ` const description = "Read nginx_upstream_check module status information (https://github.com/yaoweibin/nginx_upstream_check_module)" type NginxUpstreamCheck struct { - URL string `toml:"uls"` + URL string `toml:"url"` Username string `toml:"username"` Password string `toml:"password"` From ce2d501e919327df5610173a3ed2b38820079494 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 28 May 2019 16:13:19 -0700 Subject: [PATCH 0871/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 30e2c7646..7e864cc4f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -70,6 +70,7 @@ - [#5819](https://github.com/influxdata/telegraf/issues/5819): Fix scale set resource id with azure_monitor output. - [#5883](https://github.com/influxdata/telegraf/issues/5883): Skip invalid power times in apex_neptune input. - [#3485](https://github.com/influxdata/telegraf/issues/3485): Fix sqlserver connection closing on error. +- [#5917](https://github.com/influxdata/telegraf/issues/5917): Fix toml option name in nginx_upstream_check. ## v1.10.4 [2019-05-14] From fa492e0840c7f9cf341e0892748bf05078f729de Mon Sep 17 00:00:00 2001 From: Pontus Rydin Date: Tue, 28 May 2019 21:08:44 -0400 Subject: [PATCH 0872/1815] Fixed datastore name mapping in vsphere input (#5920) --- plugins/inputs/vsphere/endpoint.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/plugins/inputs/vsphere/endpoint.go b/plugins/inputs/vsphere/endpoint.go index 411bdc965..27bad51ca 100644 --- a/plugins/inputs/vsphere/endpoint.go +++ b/plugins/inputs/vsphere/endpoint.go @@ -369,7 +369,6 @@ func (e *Endpoint) discover(ctx context.Context) error { } log.Printf("D! [inputs.vsphere]: Discover new objects for %s", e.URL.Host) - resourceKinds := make(map[string]resourceKind) dcNameCache := make(map[string]string) numRes := int64(0) @@ -418,9 +417,9 @@ func (e *Endpoint) discover(ctx context.Context) error { } // Build lun2ds map - dss := resourceKinds["datastore"] + dss := newObjects["datastore"] l2d := make(map[string]string) - for _, ds := range dss.objects { + for _, ds := range dss { url := ds.altID m := isolateLUN.FindStringSubmatch(url) if m != nil { From dd09f238e14c0454833cbe49c8a85a58561d5ddb Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 28 May 2019 18:10:48 -0700 Subject: [PATCH 0873/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7e864cc4f..a1737d25a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -71,6 +71,7 @@ - [#5883](https://github.com/influxdata/telegraf/issues/5883): Skip invalid power times in apex_neptune input. - [#3485](https://github.com/influxdata/telegraf/issues/3485): Fix sqlserver connection closing on error. - [#5917](https://github.com/influxdata/telegraf/issues/5917): Fix toml option name in nginx_upstream_check. +- [#5920](https://github.com/influxdata/telegraf/issues/5920): Fixed datastore name mapping in vsphere input. ## v1.10.4 [2019-05-14] From aaaad4d217058e273f3a5cfad04d4f8f9128628c Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 29 May 2019 18:31:06 -0700 Subject: [PATCH 0874/1815] Add health output plugin (#5882) --- internal/http.go | 45 ++++ internal/internal.go | 3 +- plugins/outputs/all/all.go | 1 + plugins/outputs/health/README.md | 61 ++++++ plugins/outputs/health/compares.go | 77 +++++++ plugins/outputs/health/compares_test.go | 268 ++++++++++++++++++++++++ plugins/outputs/health/contains.go | 19 ++ plugins/outputs/health/contains_test.go | 68 ++++++ plugins/outputs/health/health.go | 252 ++++++++++++++++++++++ plugins/outputs/health/health_test.go | 124 +++++++++++ 10 files changed, 917 insertions(+), 1 deletion(-) create mode 100644 internal/http.go create mode 100644 plugins/outputs/health/README.md create mode 100644 plugins/outputs/health/compares.go create mode 100644 plugins/outputs/health/compares_test.go create mode 100644 plugins/outputs/health/contains.go create mode 100644 plugins/outputs/health/contains_test.go create mode 100644 plugins/outputs/health/health.go create mode 100644 plugins/outputs/health/health_test.go diff --git a/internal/http.go b/internal/http.go new file mode 100644 index 000000000..230fdf2b7 --- /dev/null +++ b/internal/http.go @@ -0,0 +1,45 @@ +package internal + +import ( + "crypto/subtle" + "net/http" +) + +// ErrorFunc is a callback for writing an error response. +type ErrorFunc func(rw http.ResponseWriter, code int) + +// AuthHandler returns a http handler that requires HTTP basic auth +// credentials to match the given username and password. +func AuthHandler(username, password string, onError ErrorFunc) func(h http.Handler) http.Handler { + return func(h http.Handler) http.Handler { + return &basicAuthHandler{ + username: username, + password: password, + onError: onError, + next: h, + } + } + +} + +type basicAuthHandler struct { + username string + password string + onError ErrorFunc + next http.Handler +} + +func (h *basicAuthHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) { + if h.username != "" || h.password != "" { + reqUsername, reqPassword, ok := req.BasicAuth() + if !ok || + subtle.ConstantTimeCompare([]byte(reqUsername), []byte(h.username)) != 1 || + subtle.ConstantTimeCompare([]byte(reqPassword), []byte(h.password)) != 1 { + + h.onError(rw, http.StatusUnauthorized) + return + } + } + + h.next.ServeHTTP(rw, req) +} diff --git a/internal/internal.go b/internal/internal.go index ebb69db8a..c191eac94 100644 --- a/internal/internal.go +++ b/internal/internal.go @@ -64,7 +64,8 @@ func Version() string { // ProductToken returns a tag for Telegraf that can be used in user agents. func ProductToken() string { - return fmt.Sprintf("Telegraf/%s Go/%s", Version(), runtime.Version()) + return fmt.Sprintf("Telegraf/%s Go/%s", + Version(), strings.TrimPrefix(runtime.Version(), "go")) } // UnmarshalTOML parses the duration from the TOML config file diff --git a/plugins/outputs/all/all.go b/plugins/outputs/all/all.go index c29d05efb..f9dd73c44 100644 --- a/plugins/outputs/all/all.go +++ b/plugins/outputs/all/all.go @@ -14,6 +14,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/outputs/file" _ "github.com/influxdata/telegraf/plugins/outputs/graphite" _ "github.com/influxdata/telegraf/plugins/outputs/graylog" + _ "github.com/influxdata/telegraf/plugins/outputs/health" _ "github.com/influxdata/telegraf/plugins/outputs/http" _ "github.com/influxdata/telegraf/plugins/outputs/influxdb" _ "github.com/influxdata/telegraf/plugins/outputs/influxdb_v2" diff --git a/plugins/outputs/health/README.md b/plugins/outputs/health/README.md new file mode 100644 index 000000000..5ef30fd57 --- /dev/null +++ b/plugins/outputs/health/README.md @@ -0,0 +1,61 @@ +# Health Output Plugin + +The health plugin provides a HTTP health check resource that can be configured +to return a failure status code based on the value of a metric. + +When the plugin is healthy it will return a 200 response; when unhealthy it +will return a 503 response. The default state is healthy, one or more checks +must fail in order for the resource to enter the failed state. + +### Configuration +```toml +[[outputs.health]] + ## Address and port to listen on. + ## ex: service_address = "tcp://localhost:8080" + ## service_address = "unix:///var/run/telegraf-health.sock" + # service_address = "tcp://:8080" + + ## The maximum duration for reading the entire request. + # read_timeout = "5s" + ## The maximum duration for writing the entire response. + # write_timeout = "5s" + + ## Username and password to accept for HTTP basic authentication. + # basic_username = "user1" + # basic_password = "secret" + + ## Allowed CA certificates for client certificates. + # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] + + ## TLS server certificate and private key. + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + + ## One or more check sub-tables should be defined, it is also recommended to + ## use metric filtering to limit the metrics that flow into this output. + ## + ## When using the default buffer sizes, this example will fail when the + ## metric buffer is half full. + ## + ## namepass = ["internal_write"] + ## tagpass = { output = ["influxdb"] } + ## + ## [[outputs.health.compares]] + ## field = "buffer_size" + ## lt = 5000.0 + ## + ## [[outputs.health.contains]] + ## field = "buffer_size" +``` + +#### compares + +The `compares` check is used to assert basic mathematical relationships. Use +it by choosing a field key and one or more comparisons. All comparisons must +be true on all metrics for the check to pass. If the field is not found on a +metric no comparison will be made. + +#### contains + +The `contains` check can be used to require a field key to exist on at least +one metric. diff --git a/plugins/outputs/health/compares.go b/plugins/outputs/health/compares.go new file mode 100644 index 000000000..9228bd2df --- /dev/null +++ b/plugins/outputs/health/compares.go @@ -0,0 +1,77 @@ +package health + +import ( + "github.com/influxdata/telegraf" +) + +type Compares struct { + Field string `toml:"field"` + GT *float64 `toml:"gt"` + GE *float64 `toml:"ge"` + LT *float64 `toml:"lt"` + LE *float64 `toml:"le"` + EQ *float64 `toml:"eq"` + NE *float64 `toml:"ne"` +} + +func (c *Compares) runChecks(fv float64) bool { + if c.GT != nil && !(fv > *c.GT) { + return false + } + if c.GE != nil && !(fv >= *c.GE) { + return false + } + if c.LT != nil && !(fv < *c.LT) { + return false + } + if c.LE != nil && !(fv <= *c.LE) { + return false + } + if c.EQ != nil && !(fv == *c.EQ) { + return false + } + if c.NE != nil && !(fv != *c.NE) { + return false + } + return true +} + +func (c *Compares) Check(metrics []telegraf.Metric) bool { + success := true + for _, m := range metrics { + fv, ok := m.GetField(c.Field) + if !ok { + continue + } + + f, ok := asFloat(fv) + if !ok { + return false + } + + result := c.runChecks(f) + if !result { + success = false + } + } + return success +} + +func asFloat(fv interface{}) (float64, bool) { + switch v := fv.(type) { + case int64: + return float64(v), true + case float64: + return v, true + case uint64: + return float64(v), true + case bool: + if v { + return 1.0, true + } else { + return 0.0, true + } + default: + return 0.0, false + } +} diff --git a/plugins/outputs/health/compares_test.go b/plugins/outputs/health/compares_test.go new file mode 100644 index 000000000..26f0dc1e1 --- /dev/null +++ b/plugins/outputs/health/compares_test.go @@ -0,0 +1,268 @@ +package health_test + +import ( + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/outputs/health" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +func addr(v float64) *float64 { + return &v +} + +func TestFieldNotFoundIsSuccess(t *testing.T) { + metrics := []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{}, + time.Now()), + } + + compares := &health.Compares{ + Field: "time_idle", + GT: addr(42.0), + } + result := compares.Check(metrics) + require.True(t, result) +} + +func TestStringFieldIsFailure(t *testing.T) { + metrics := []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": "foo", + }, + time.Now()), + } + + compares := &health.Compares{ + Field: "time_idle", + GT: addr(42.0), + } + result := compares.Check(metrics) + require.False(t, result) +} + +func TestFloatConvert(t *testing.T) { + tests := []struct { + name string + metrics []telegraf.Metric + expected bool + }{ + { + name: "int64 field", + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": int64(42.0), + }, + time.Now()), + }, + expected: true, + }, + { + name: "uint64 field", + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": uint64(42.0), + }, + time.Now()), + }, + expected: true, + }, + { + name: "float64 field", + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": float64(42.0), + }, + time.Now()), + }, + expected: true, + }, + { + name: "bool field true", + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": true, + }, + time.Now()), + }, + expected: true, + }, + { + name: "bool field false", + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": false, + }, + time.Now()), + }, + expected: false, + }, + { + name: "string field", + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": "42.0", + }, + time.Now()), + }, + expected: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + compares := &health.Compares{ + Field: "time_idle", + GT: addr(0.0), + } + actual := compares.Check(tt.metrics) + require.Equal(t, tt.expected, actual) + }) + } +} + +func TestOperators(t *testing.T) { + tests := []struct { + name string + compares *health.Compares + expected bool + }{ + { + name: "gt", + compares: &health.Compares{ + Field: "time_idle", + GT: addr(41.0), + }, + expected: true, + }, + { + name: "not gt", + compares: &health.Compares{ + Field: "time_idle", + GT: addr(42.0), + }, + expected: false, + }, + { + name: "ge", + compares: &health.Compares{ + Field: "time_idle", + GE: addr(42.0), + }, + expected: true, + }, + { + name: "not ge", + compares: &health.Compares{ + Field: "time_idle", + GE: addr(43.0), + }, + expected: false, + }, + { + name: "lt", + compares: &health.Compares{ + Field: "time_idle", + LT: addr(43.0), + }, + expected: true, + }, + { + name: "not lt", + compares: &health.Compares{ + Field: "time_idle", + LT: addr(42.0), + }, + expected: false, + }, + { + name: "le", + compares: &health.Compares{ + Field: "time_idle", + LE: addr(42.0), + }, + expected: true, + }, + { + name: "not le", + compares: &health.Compares{ + Field: "time_idle", + LE: addr(41.0), + }, + expected: false, + }, + { + name: "eq", + compares: &health.Compares{ + Field: "time_idle", + EQ: addr(42.0), + }, + expected: true, + }, + { + name: "not eq", + compares: &health.Compares{ + Field: "time_idle", + EQ: addr(41.0), + }, + expected: false, + }, + { + name: "ne", + compares: &health.Compares{ + Field: "time_idle", + NE: addr(41.0), + }, + expected: true, + }, + { + name: "not ne", + compares: &health.Compares{ + Field: "time_idle", + NE: addr(42.0), + }, + expected: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + metrics := []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": 42.0, + }, + time.Now()), + } + actual := tt.compares.Check(metrics) + require.Equal(t, tt.expected, actual) + }) + } +} diff --git a/plugins/outputs/health/contains.go b/plugins/outputs/health/contains.go new file mode 100644 index 000000000..ff03667e0 --- /dev/null +++ b/plugins/outputs/health/contains.go @@ -0,0 +1,19 @@ +package health + +import "github.com/influxdata/telegraf" + +type Contains struct { + Field string `toml:"field"` +} + +func (c *Contains) Check(metrics []telegraf.Metric) bool { + success := false + for _, m := range metrics { + ok := m.HasField(c.Field) + if ok { + success = true + } + } + + return success +} diff --git a/plugins/outputs/health/contains_test.go b/plugins/outputs/health/contains_test.go new file mode 100644 index 000000000..2337dd867 --- /dev/null +++ b/plugins/outputs/health/contains_test.go @@ -0,0 +1,68 @@ +package health_test + +import ( + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/outputs/health" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +func TestFieldFound(t *testing.T) { + metrics := []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": 42.0, + }, + time.Now()), + } + + contains := &health.Contains{ + Field: "time_idle", + } + result := contains.Check(metrics) + require.True(t, result) +} + +func TestFieldNotFound(t *testing.T) { + metrics := []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{}, + time.Now()), + } + + contains := &health.Contains{ + Field: "time_idle", + } + result := contains.Check(metrics) + require.False(t, result) +} + +func TestOneMetricWithFieldIsSuccess(t *testing.T) { + metrics := []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{}, + time.Now()), + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": 42.0, + }, + time.Now()), + } + + contains := &health.Contains{ + Field: "time_idle", + } + result := contains.Check(metrics) + require.True(t, result) +} diff --git a/plugins/outputs/health/health.go b/plugins/outputs/health/health.go new file mode 100644 index 000000000..c7c2cc547 --- /dev/null +++ b/plugins/outputs/health/health.go @@ -0,0 +1,252 @@ +package health + +import ( + "context" + "crypto/tls" + "log" + "net" + "net/http" + "net/url" + "sync" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + tlsint "github.com/influxdata/telegraf/internal/tls" + "github.com/influxdata/telegraf/plugins/outputs" +) + +const ( + defaultServiceAddress = "tcp://:8080" + defaultReadTimeout = 5 * time.Second + defaultWriteTimeout = 5 * time.Second +) + +var sampleConfig = ` + ## Address and port to listen on. + ## ex: service_address = "tcp://localhost:8080" + ## service_address = "unix:///var/run/telegraf-health.sock" + # service_address = "tcp://:8080" + + ## The maximum duration for reading the entire request. + # read_timeout = "5s" + ## The maximum duration for writing the entire response. + # write_timeout = "5s" + + ## Username and password to accept for HTTP basic authentication. + # basic_username = "user1" + # basic_password = "secret" + + ## Allowed CA certificates for client certificates. + # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] + + ## TLS server certificate and private key. + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + + ## One or more check sub-tables should be defined, it is also recommended to + ## use metric filtering to limit the metrics that flow into this output. + ## + ## When using the default buffer sizes, this example will fail when the + ## metric buffer is half full. + ## + ## namepass = ["internal_write"] + ## tagpass = { output = ["influxdb"] } + ## + ## [[outputs.health.compares]] + ## field = "buffer_size" + ## lt = 5000.0 + ## + ## [[outputs.health.contains]] + ## field = "buffer_size" +` + +type Checker interface { + // Check returns true if the metrics meet its criteria. + Check(metrics []telegraf.Metric) bool +} + +type Health struct { + ServiceAddress string `toml:"service_address"` + ReadTimeout internal.Duration `toml:"read_timeout"` + WriteTimeout internal.Duration `toml:"write_timeout"` + BasicUsername string `toml:"basic_username"` + BasicPassword string `toml:"basic_password"` + tlsint.ServerConfig + + Compares []*Compares `toml:"compares"` + Contains []*Contains `toml:"contains"` + checkers []Checker + + wg sync.WaitGroup + server *http.Server + origin string + + mu sync.Mutex + healthy bool +} + +func (h *Health) SampleConfig() string { + return sampleConfig +} + +func (h *Health) Description() string { + return "Configurable HTTP health check resource based on metrics" +} + +// Connect starts the HTTP server. +func (h *Health) Connect() error { + h.checkers = make([]Checker, 0) + for i := range h.Compares { + h.checkers = append(h.checkers, h.Compares[i]) + } + for i := range h.Contains { + h.checkers = append(h.checkers, h.Contains[i]) + } + + tlsConf, err := h.ServerConfig.TLSConfig() + if err != nil { + return err + } + + authHandler := internal.AuthHandler(h.BasicUsername, h.BasicPassword, onAuthError) + + h.server = &http.Server{ + Addr: h.ServiceAddress, + Handler: authHandler(h), + ReadTimeout: h.ReadTimeout.Duration, + WriteTimeout: h.WriteTimeout.Duration, + TLSConfig: tlsConf, + } + + listener, err := h.listen(tlsConf) + if err != nil { + return err + } + + h.origin = h.getOrigin(listener, tlsConf) + + log.Printf("I! [outputs.health] Listening on %s", h.origin) + + h.wg.Add(1) + go func() { + defer h.wg.Done() + err := h.server.Serve(listener) + if err != http.ErrServerClosed { + log.Printf("E! [outputs.health] Serve error on %s: %v", h.origin, err) + } + h.origin = "" + }() + + return nil +} + +func onAuthError(rw http.ResponseWriter, code int) { + http.Error(rw, http.StatusText(code), code) +} + +func (h *Health) listen(tlsConf *tls.Config) (net.Listener, error) { + u, err := url.Parse(h.ServiceAddress) + if err != nil { + return nil, err + } + + network := "tcp" + address := u.Host + if u.Host == "" { + network = "unix" + address = u.Path + } + + if tlsConf != nil { + return tls.Listen(network, address, tlsConf) + } else { + return net.Listen(network, address) + } + +} + +func (h *Health) ServeHTTP(rw http.ResponseWriter, req *http.Request) { + var code = http.StatusOK + if !h.isHealthy() { + code = http.StatusServiceUnavailable + } + + rw.Header().Set("Server", internal.ProductToken()) + http.Error(rw, http.StatusText(code), code) +} + +// Write runs all checks over the metric batch and adjust health state. +func (h *Health) Write(metrics []telegraf.Metric) error { + healthy := true + for _, checker := range h.checkers { + success := checker.Check(metrics) + if !success { + healthy = false + } + } + + h.setHealthy(healthy) + return nil +} + +// Close shuts down the HTTP server. +func (h *Health) Close() error { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + h.server.Shutdown(ctx) + h.wg.Wait() + return nil +} + +// Origin returns the URL of the HTTP server. +func (h *Health) Origin() string { + return h.origin +} + +func (h *Health) getOrigin(listener net.Listener, tlsConf *tls.Config) string { + switch listener.Addr().Network() { + case "tcp": + scheme := "http" + if tlsConf != nil { + scheme = "https" + } + origin := &url.URL{ + Scheme: scheme, + Host: listener.Addr().String(), + } + return origin.String() + case "unix": + return listener.Addr().String() + default: + return "" + } +} + +func (h *Health) setHealthy(healthy bool) { + h.mu.Lock() + defer h.mu.Unlock() + h.healthy = healthy +} + +func (h *Health) isHealthy() bool { + h.mu.Lock() + defer h.mu.Unlock() + return h.healthy +} + +func NewHealth() *Health { + return &Health{ + ServiceAddress: defaultServiceAddress, + ReadTimeout: internal.Duration{Duration: defaultReadTimeout}, + WriteTimeout: internal.Duration{Duration: defaultWriteTimeout}, + healthy: true, + } +} + +func init() { + outputs.Add("health", func() telegraf.Output { + return NewHealth() + }) +} diff --git a/plugins/outputs/health/health_test.go b/plugins/outputs/health/health_test.go new file mode 100644 index 000000000..234b0251c --- /dev/null +++ b/plugins/outputs/health/health_test.go @@ -0,0 +1,124 @@ +package health_test + +import ( + "io/ioutil" + "net/http" + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/outputs/health" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +func TestHealth(t *testing.T) { + type Options struct { + Compares []*health.Compares `toml:"compares"` + Contains []*health.Contains `toml:"contains"` + } + + now := time.Now() + tests := []struct { + name string + options Options + metrics []telegraf.Metric + expectedCode int + }{ + { + name: "healthy on startup", + expectedCode: 200, + }, + { + name: "check passes", + options: Options{ + Compares: []*health.Compares{ + { + Field: "time_idle", + GT: func() *float64 { v := 0.0; return &v }(), + }, + }, + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": 42, + }, + now), + }, + expectedCode: 200, + }, + { + name: "check fails", + options: Options{ + Compares: []*health.Compares{ + { + Field: "time_idle", + LT: func() *float64 { v := 0.0; return &v }(), + }, + }, + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": 42, + }, + now), + }, + expectedCode: 503, + }, + { + name: "mixed check fails", + options: Options{ + Compares: []*health.Compares{ + { + Field: "time_idle", + LT: func() *float64 { v := 0.0; return &v }(), + }, + }, + Contains: []*health.Contains{ + { + Field: "foo", + }, + }, + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": 42, + }, + now), + }, + expectedCode: 503, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + output := health.NewHealth() + output.ServiceAddress = "tcp://127.0.0.1:0" + output.Compares = tt.options.Compares + output.Contains = tt.options.Contains + + err := output.Connect() + + err = output.Write(tt.metrics) + require.NoError(t, err) + + resp, err := http.Get(output.Origin()) + require.NoError(t, err) + require.Equal(t, tt.expectedCode, resp.StatusCode) + + _, err = ioutil.ReadAll(resp.Body) + require.NoError(t, err) + + err = output.Close() + require.NoError(t, err) + }) + } +} From dd6a3dbafc5b78c5a3c33a77ea8b6b1a060c404a Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 29 May 2019 18:36:18 -0700 Subject: [PATCH 0875/1815] Update changelog and readme --- CHANGELOG.md | 1 + README.md | 1 + 2 files changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index a1737d25a..8b7ce0377 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -19,6 +19,7 @@ #### New Outputs - [syslog](/plugins/outputs/syslog/README.md) - Contributed by @javicrespo +- [health](/plugins/outputs/health/README.md) - Contributed by @influxdata #### New Serializers diff --git a/README.md b/README.md index 5d0843e03..45edae1e0 100644 --- a/README.md +++ b/README.md @@ -357,6 +357,7 @@ For documentation on the latest development code see the [documentation index][d * [file](./plugins/outputs/file) * [graphite](./plugins/outputs/graphite) * [graylog](./plugins/outputs/graylog) +* [health](./plugins/outputs/health) * [http](./plugins/outputs/http) * [instrumental](./plugins/outputs/instrumental) * [kafka](./plugins/outputs/kafka) From 59d646a8e897cddd30adc0c433befba4e6fc3b31 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 29 May 2019 18:54:44 -0700 Subject: [PATCH 0876/1815] Test and build official packages with Go 1.12 (#5923) --- .circleci/config.yml | 46 +++++++++++++++++++++++++++++++----------- Makefile | 9 +++++++-- appveyor.yml | 4 ++-- scripts/ci-1.11.docker | 2 +- scripts/ci-1.12.docker | 28 +++++++++++++++++++++++++ 5 files changed, 72 insertions(+), 17 deletions(-) create mode 100644 scripts/ci-1.12.docker diff --git a/.circleci/config.yml b/.circleci/config.yml index 4f8dc3d2d..f068ae108 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -10,12 +10,15 @@ defaults: - image: 'quay.io/influxdb/telegraf-ci:1.10.8' go-1_11: &go-1_11 docker: - - image: 'quay.io/influxdb/telegraf-ci:1.11.5' + - image: 'quay.io/influxdb/telegraf-ci:1.11.10' + go-1_12: &go-1_12 + docker: + - image: 'quay.io/influxdb/telegraf-ci:1.12.5' version: 2 jobs: deps: - <<: [ *defaults, *go-1_11 ] + <<: [ *defaults, *go-1_12 ] steps: - checkout - restore_cache: @@ -55,8 +58,15 @@ jobs: at: '/go/src' - run: 'make check' - run: 'make test' - test-go-1.11-386: - <<: [ *defaults, *go-1_11 ] + test-go-1.12: + <<: [ *defaults, *go-1_12 ] + steps: + - attach_workspace: + at: '/go/src' + - run: 'GOARCH=386 make check' + - run: 'GOARCH=386 make test' + test-go-1.12-386: + <<: [ *defaults, *go-1_12 ] steps: - attach_workspace: at: '/go/src' @@ -64,7 +74,7 @@ jobs: - run: 'GOARCH=386 make test' package: - <<: [ *defaults, *go-1_11 ] + <<: [ *defaults, *go-1_12 ] steps: - attach_workspace: at: '/go/src' @@ -73,7 +83,7 @@ jobs: path: './build' destination: 'build' release: - <<: [ *defaults, *go-1_11 ] + <<: [ *defaults, *go-1_12 ] steps: - attach_workspace: at: '/go/src' @@ -82,7 +92,7 @@ jobs: path: './build' destination: 'build' nightly: - <<: [ *defaults, *go-1_11 ] + <<: [ *defaults, *go-1_12 ] steps: - attach_workspace: at: '/go/src' @@ -117,7 +127,13 @@ workflows: filters: tags: only: /.*/ - - 'test-go-1.11-386': + - 'test-go-1.12': + requires: + - 'deps' + filters: + tags: + only: /.*/ + - 'test-go-1.12-386': requires: - 'deps' filters: @@ -128,13 +144,15 @@ workflows: - 'test-go-1.9' - 'test-go-1.10' - 'test-go-1.11' - - 'test-go-1.11-386' + - 'test-go-1.12' + - 'test-go-1.12-386' - 'release': requires: - 'test-go-1.9' - 'test-go-1.10' - 'test-go-1.11' - - 'test-go-1.11-386' + - 'test-go-1.12' + - 'test-go-1.12-386' filters: tags: only: /.*/ @@ -152,7 +170,10 @@ workflows: - 'test-go-1.11': requires: - 'deps' - - 'test-go-1.11-386': + - 'test-go-1.12': + requires: + - 'deps' + - 'test-go-1.12-386': requires: - 'deps' - 'nightly': @@ -160,7 +181,8 @@ workflows: - 'test-go-1.9' - 'test-go-1.10' - 'test-go-1.11' - - 'test-go-1.11-386' + - 'test-go-1.12' + - 'test-go-1.12-386' triggers: - schedule: cron: "0 7 * * *" diff --git a/Makefile b/Makefile index 100883198..3b5e01f30 100644 --- a/Makefile +++ b/Makefile @@ -131,10 +131,15 @@ plugin-%: @echo "Starting dev environment for $${$(@)} input plugin..." @docker-compose -f plugins/inputs/$${$(@)}/dev/docker-compose.yml up +.PHONY: ci-1.12 +ci-1.11: + docker build -t quay.io/influxdb/telegraf-ci:1.12.5 - < scripts/ci-1.12.docker + docker push quay.io/influxdb/telegraf-ci:1.12.5 + .PHONY: ci-1.11 ci-1.11: - docker build -t quay.io/influxdb/telegraf-ci:1.11.5 - < scripts/ci-1.11.docker - docker push quay.io/influxdb/telegraf-ci:1.11.5 + docker build -t quay.io/influxdb/telegraf-ci:1.11.10 - < scripts/ci-1.11.docker + docker push quay.io/influxdb/telegraf-ci:1.11.10 .PHONY: ci-1.10 ci-1.10: diff --git a/appveyor.yml b/appveyor.yml index 39ec04425..46dcf97ba 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -13,11 +13,11 @@ platform: x64 install: - IF NOT EXIST "C:\Cache" mkdir C:\Cache - - IF NOT EXIST "C:\Cache\go1.11.5.msi" curl -o "C:\Cache\go1.11.5.msi" https://storage.googleapis.com/golang/go1.11.5.windows-amd64.msi + - IF NOT EXIST "C:\Cache\go1.12.5.msi" curl -o "C:\Cache\go1.12.5.msi" https://storage.googleapis.com/golang/go1.12.5.windows-amd64.msi - IF NOT EXIST "C:\Cache\gnuwin32-bin.zip" curl -o "C:\Cache\gnuwin32-bin.zip" https://dl.influxdata.com/telegraf/ci/make-3.81-bin.zip - IF NOT EXIST "C:\Cache\gnuwin32-dep.zip" curl -o "C:\Cache\gnuwin32-dep.zip" https://dl.influxdata.com/telegraf/ci/make-3.81-dep.zip - IF EXIST "C:\Go" rmdir /S /Q C:\Go - - msiexec.exe /i "C:\Cache\go1.11.5.msi" /quiet + - msiexec.exe /i "C:\Cache\go1.12.5.msi" /quiet - 7z x "C:\Cache\gnuwin32-bin.zip" -oC:\GnuWin32 -y - 7z x "C:\Cache\gnuwin32-dep.zip" -oC:\GnuWin32 -y - go get -d github.com/golang/dep diff --git a/scripts/ci-1.11.docker b/scripts/ci-1.11.docker index 823b3dadf..5e4cb5662 100644 --- a/scripts/ci-1.11.docker +++ b/scripts/ci-1.11.docker @@ -1,4 +1,4 @@ -FROM golang:1.11.5 +FROM golang:1.11.10 RUN chmod -R 755 "$GOPATH" diff --git a/scripts/ci-1.12.docker b/scripts/ci-1.12.docker new file mode 100644 index 000000000..760c50a44 --- /dev/null +++ b/scripts/ci-1.12.docker @@ -0,0 +1,28 @@ +FROM golang:1.12.5 + +RUN chmod -R 755 "$GOPATH" + +RUN DEBIAN_FRONTEND=noninteractive \ + apt update && apt install -y --no-install-recommends \ + autoconf \ + git \ + libtool \ + locales \ + make \ + python-boto \ + rpm \ + ruby \ + ruby-dev \ + zip && \ + rm -rf /var/lib/apt/lists/* + +RUN ln -sf /usr/share/zoneinfo/Etc/UTC /etc/localtime +RUN locale-gen C.UTF-8 || true +ENV LANG=C.UTF-8 + +RUN gem install fpm + +RUN go get -d github.com/golang/dep && \ + cd src/github.com/golang/dep && \ + git checkout -q v0.5.0 && \ + go install -ldflags="-X main.version=v0.5.0" ./cmd/dep From 484122b7d44e20aea76d3a53cb234a755c7e5921 Mon Sep 17 00:00:00 2001 From: Sebastien Leger Date: Fri, 31 May 2019 00:17:04 +0200 Subject: [PATCH 0877/1815] Add open_weather_map input plugin (#5125) --- plugins/inputs/all/all.go | 1 + plugins/inputs/openweathermap/README.md | 73 +++ .../inputs/openweathermap/openweathermap.go | 305 +++++++++++ .../openweathermap/openweathermap_test.go | 482 ++++++++++++++++++ testutil/accumulator.go | 26 +- 5 files changed, 883 insertions(+), 4 deletions(-) create mode 100644 plugins/inputs/openweathermap/README.md create mode 100644 plugins/inputs/openweathermap/openweathermap.go create mode 100644 plugins/inputs/openweathermap/openweathermap_test.go diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index 47f977f32..a626fce92 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -101,6 +101,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/nvidia_smi" _ "github.com/influxdata/telegraf/plugins/inputs/openldap" _ "github.com/influxdata/telegraf/plugins/inputs/opensmtpd" + _ "github.com/influxdata/telegraf/plugins/inputs/openweathermap" _ "github.com/influxdata/telegraf/plugins/inputs/passenger" _ "github.com/influxdata/telegraf/plugins/inputs/pf" _ "github.com/influxdata/telegraf/plugins/inputs/pgbouncer" diff --git a/plugins/inputs/openweathermap/README.md b/plugins/inputs/openweathermap/README.md new file mode 100644 index 000000000..7b781b129 --- /dev/null +++ b/plugins/inputs/openweathermap/README.md @@ -0,0 +1,73 @@ +# Telegraf Plugin: openweathermap + +OpenWeatherMap provides the current weather and forecasts for more than 200,000 cities. To use this plugin you will need a token. For more information [click here](https://openweathermap.org/appid). + +Find city identifiers in this [list](http://bulk.openweathermap.org/sample/city.list.json.gz). You can also use this [url](https://openweathermap.org/find) as an alternative to downloading a file. The ID is in the url of the city: `https://openweathermap.org/city/2643743` + +### Configuration: + +```toml +[[inputs.openweathermap]] + ## Root url of API to pull stats + # base_url = "https://api.openweathermap.org/data/2.5/" + ## Your personal user token from openweathermap.org + # app_id = "xxxxxxxxxxxxxxxxxxxxxxx" + ## List of city identifiers + # city_id = ["2988507", "519188"] + ## HTTP response timeout (default: 5s) + # response_timeout = "5s" + ## Query the current weather and future forecast + # fetch = ["weather", "forecast"] + ## For temperature in Fahrenheit use units=imperial + ## For temperature in Celsius use units=metric (default) + # units = "metric" +``` + +### Metrics: + ++ weather + - fields: + - humidity (int, Humidity percentage) + - temperature (float, Unit: Celcius) + - pressure (float, Atmospheric pressure in hPa) + - rain (float, Rain volume for the last 3 hours, mm) + - wind_speed (float, Wind speed. Unit Default: meter/sec) + - wind_degrees (float, Wind direction, degrees) + - tags: + - city_id + - forecast + +### Example Output: + +Using this configuration: +```toml +[[inputs.openweathermap]] + base_url = "https://api.openweathermap.org/data/2.5/" + app_id = "change_this_with_your_appid" + city_id = ["2988507", "519188"] + response_timeout = "5s" + fetch = ["weather", "forecast"] + units = "metric" +``` + +When run with: +``` +./telegraf -config telegraf.conf -input-filter openweathermap -test +``` + +It produces data similar to: +``` +> weather,city_id=4303602,forecast=* humidity=51i,pressure=1012,rain=0,temperature=16.410000000000025,wind_degrees=170,wind_speed=2.6 1556393944000000000 +> weather,city_id=2988507,forecast=* humidity=87i,pressure=1020,rain=0,temperature=7.110000000000014,wind_degrees=260,wind_speed=5.1 1556393841000000000 +> weather,city_id=2988507,forecast=3h humidity=69i,pressure=1020.38,rain=0,temperature=5.650000000000034,wind_degrees=268.456,wind_speed=5.83 1556398800000000000 +> weather,city_id=2988507,forecast=* humidity=69i,pressure=1020.38,rain=0,temperature=5.650000000000034,wind_degrees=268.456,wind_speed=5.83 1556398800000000000 +> weather,city_id=2988507,forecast=6h humidity=74i,pressure=1020.87,rain=0,temperature=5.810000000000002,wind_degrees=261.296,wind_speed=5.43 1556409600000000000 +> weather,city_id=2988507,forecast=* humidity=74i,pressure=1020.87,rain=0,temperature=5.810000000000002,wind_degrees=261.296,wind_speed=5.43 1556409600000000000 +> weather,city_id=4303602,forecast=9h humidity=66i,pressure=1010.63,rain=0,temperature=14.740000000000009,wind_degrees=196.264,wind_speed=4.3 1556398800000000000 +> weather,city_id=4303602,forecast=* humidity=66i,pressure=1010.63,rain=0,temperature=14.740000000000009,wind_degrees=196.264,wind_speed=4.3 1556398800000000000 +``` + + + + + diff --git a/plugins/inputs/openweathermap/openweathermap.go b/plugins/inputs/openweathermap/openweathermap.go new file mode 100644 index 000000000..1c246d0b6 --- /dev/null +++ b/plugins/inputs/openweathermap/openweathermap.go @@ -0,0 +1,305 @@ +package openweathermap + +import ( + "bufio" + "encoding/json" + "fmt" + "net/http" + "net/url" + "strconv" + "strings" + "sync" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/inputs" +) + +type OpenWeatherMap struct { + BaseUrl string + AppId string + CityId []string + + client *http.Client + + ResponseTimeout internal.Duration + Fetch []string + Units string +} + +// https://openweathermap.org/current#severalid +// Call for several city IDs +// The limit of locations is 20. +const owmRequestSeveralCityId int = 20 +const defaultResponseTimeout time.Duration = time.Second * 5 +const defaultUnits string = "metric" + +var sampleConfig = ` + ## Root url of weather map REST API + base_url = "https://api.openweathermap.org/" + ## Your personal user token from openweathermap.org + app_id = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + city_id = ["2988507", "2988588"] + + ## HTTP response timeout (default: 5s) + response_timeout = "5s" + fetch = ["weather", "forecast"] + units = "metric" + ## Limit OpenWeatherMap query interval. See calls per minute info at: https://openweathermap.org/price + interval = "10m" +` + +func (n *OpenWeatherMap) SampleConfig() string { + return sampleConfig +} + +func (n *OpenWeatherMap) Description() string { + return "Read current weather and forecasts data from openweathermap.org" +} + +func (n *OpenWeatherMap) Gather(acc telegraf.Accumulator) error { + var wg sync.WaitGroup + var strs []string + + base, err := url.Parse(n.BaseUrl) + if err != nil { + return err + } + + // Create an HTTP client that is re-used for each + // collection interval + + if n.client == nil { + client, err := n.createHttpClient() + if err != nil { + return err + } + n.client = client + } + units := n.Units + if units == "" { + units = defaultUnits + } + for _, fetch := range n.Fetch { + if fetch == "forecast" { + var u *url.URL + var addr *url.URL + + for _, city := range n.CityId { + u, err = url.Parse(fmt.Sprintf("/data/2.5/forecast?id=%s&APPID=%s&units=%s", city, n.AppId, units)) + if err != nil { + acc.AddError(fmt.Errorf("Unable to parse address '%s': %s", u, err)) + continue + } + addr = base.ResolveReference(u) + wg.Add(1) + go func(addr *url.URL) { + defer wg.Done() + acc.AddError(n.gatherUrl(addr, acc, true)) + }(addr) + } + } else if fetch == "weather" { + j := 0 + for j < len(n.CityId) { + var u *url.URL + var addr *url.URL + strs = make([]string, 0) + for i := 0; j < len(n.CityId) && i < owmRequestSeveralCityId; i++ { + strs = append(strs, n.CityId[j]) + j++ + } + cities := strings.Join(strs, ",") + + u, err = url.Parse(fmt.Sprintf("/data/2.5/group?id=%s&APPID=%s&units=%s", cities, n.AppId, units)) + if err != nil { + acc.AddError(fmt.Errorf("Unable to parse address '%s': %s", u, err)) + continue + } + + addr = base.ResolveReference(u) + wg.Add(1) + go func(addr *url.URL) { + defer wg.Done() + acc.AddError(n.gatherUrl(addr, acc, false)) + }(addr) + } + + } + } + + wg.Wait() + return nil +} + +func (n *OpenWeatherMap) createHttpClient() (*http.Client, error) { + + if n.ResponseTimeout.Duration < time.Second { + n.ResponseTimeout.Duration = defaultResponseTimeout + } + + client := &http.Client{ + Transport: &http.Transport{}, + Timeout: n.ResponseTimeout.Duration, + } + + return client, nil +} + +func (n *OpenWeatherMap) gatherUrl(addr *url.URL, acc telegraf.Accumulator, forecast bool) error { + resp, err := n.client.Get(addr.String()) + + if err != nil { + return fmt.Errorf("error making HTTP request to %s: %s", addr.String(), err) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("%s returned HTTP status %s", addr.String(), resp.Status) + } + contentType := strings.Split(resp.Header.Get("Content-Type"), ";")[0] + switch contentType { + case "application/json": + err = gatherWeatherUrl(bufio.NewReader(resp.Body), forecast, acc) + return err + default: + return fmt.Errorf("%s returned unexpected content type %s", addr.String(), contentType) + } +} + +type WeatherEntry struct { + Dt int64 `json:"dt"` + Dttxt string `json:"dt_txt"` // empty for weather/ + Clouds struct { + All int64 `json:"all"` + } `json:"clouds"` + Main struct { + GrndLevel float64 `json:"grnd_level"` // empty for weather/ + Humidity int64 `json:"humidity"` + SeaLevel float64 `json:"sea_level"` // empty for weather/ + Pressure float64 `json:"pressure"` + Temp float64 `json:"temp"` + TempMax float64 `json:"temp_max"` + TempMin float64 `json:"temp_min"` + } `json:"main"` + Rain struct { + Rain3 float64 `json:"3h"` + } `json:"rain"` + Sys struct { + Pod string `json:"pod"` + Country string `json:"country"` + Message float64 `json:"message"` + Id int64 `json:"id"` + Type int64 `json:"type"` + Sunrise int64 `json:"sunrise"` + Sunset int64 `json:"sunset"` + } `json:"sys"` + Wind struct { + Deg float64 `json:"deg"` + Speed float64 `json:"speed"` + } `json:"wind"` + Weather []struct { + Description string `json:"description"` + Icon string `json:"icon"` + Id int64 `json:"id"` + Main string `json:"main"` + } `json:"weather"` + + // Additional entries for weather/ + Id int64 `json:"id"` + Name string `json:"name"` + Coord struct { + Lat float64 `json:"lat"` + Lon float64 `json:"lon"` + } `json:"coord"` + Visibility int64 `json:"visibility"` +} + +type Status struct { + City struct { + Coord struct { + Lat float64 `json:"lat"` + Lon float64 `json:"lon"` + } `json:"coord"` + Country string `json:"country"` + Id int64 `json:"id"` + Name string `json:"name"` + } `json:"city"` + List []WeatherEntry `json:"list"` +} + +func gatherWeatherUrl(r *bufio.Reader, forecast bool, acc telegraf.Accumulator) error { + dec := json.NewDecoder(r) + status := &Status{} + if err := dec.Decode(status); err != nil { + return fmt.Errorf("Error while decoding JSON response: %s", err) + } + status.Gather(forecast, acc) + return nil +} + +func (s *Status) Gather(forecast bool, acc telegraf.Accumulator) { + tags := map[string]string{ + "city_id": strconv.FormatInt(s.City.Id, 10), + "forecast": "*", + } + + for i, e := range s.List { + tm := time.Unix(e.Dt, 0) + if e.Id > 0 { + tags["city_id"] = strconv.FormatInt(e.Id, 10) + } + if forecast { + tags["forecast"] = fmt.Sprintf("%dh", (i+1)*3) + } + acc.AddFields( + "weather", + map[string]interface{}{ + "rain": e.Rain.Rain3, + "wind_degrees": e.Wind.Deg, + "wind_speed": e.Wind.Speed, + "humidity": e.Main.Humidity, + "pressure": e.Main.Pressure, + "temperature": e.Main.Temp, + }, + tags, + tm) + } + if forecast { + // intentional: overwrite future data points + // under the * tag + tags := map[string]string{ + "city_id": strconv.FormatInt(s.City.Id, 10), + "forecast": "*", + } + for _, e := range s.List { + tm := time.Unix(e.Dt, 0) + if e.Id > 0 { + tags["city_id"] = strconv.FormatInt(e.Id, 10) + } + acc.AddFields( + "weather", + map[string]interface{}{ + "rain": e.Rain.Rain3, + "wind_degrees": e.Wind.Deg, + "wind_speed": e.Wind.Speed, + "humidity": e.Main.Humidity, + "pressure": e.Main.Pressure, + "temperature": e.Main.Temp, + }, + tags, + tm) + } + } +} + +func init() { + inputs.Add("openweathermap", func() telegraf.Input { + tmout := internal.Duration{ + Duration: defaultResponseTimeout, + } + return &OpenWeatherMap{ + ResponseTimeout: tmout, + Units: defaultUnits, + } + }) +} diff --git a/plugins/inputs/openweathermap/openweathermap_test.go b/plugins/inputs/openweathermap/openweathermap_test.go new file mode 100644 index 000000000..98f0a64a2 --- /dev/null +++ b/plugins/inputs/openweathermap/openweathermap_test.go @@ -0,0 +1,482 @@ +package openweathermap + +import ( + "fmt" + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +const sampleNoContent = ` +{ +} +` + +const sampleStatusResponse = ` +{ + "city": { + "coord": { + "lat": 48.8534, + "lon": 2.3488 + }, + "country": "FR", + "id": 2988507, + "name": "Paris" + }, + "cnt": 40, + "cod": "200", + "list": [ + { + "clouds": { + "all": 88 + }, + "dt": 1543622400, + "dt_txt": "2018-12-01 00:00:00", + "main": { + "grnd_level": 1018.65, + "humidity": 91, + "pressure": 1018.65, + "sea_level": 1030.99, + "temp": 6.71, + "temp_kf": -2.14 + }, + "rain": { + "3h": 0.035 + }, + "sys": { + "pod": "n" + }, + "weather": [ + { + "description": "light rain", + "icon": "10n", + "id": 500, + "main": "Rain" + } + ], + "wind": { + "deg": 228.501, + "speed": 3.76 + } + }, + { + "clouds": { + "all": 92 + }, + "dt": 1544043600, + "dt_txt": "2018-12-05 21:00:00", + "main": { + "grnd_level": 1032.18, + "humidity": 98, + "pressure": 1032.18, + "sea_level": 1044.78, + "temp": 6.38, + "temp_kf": 0 + }, + "rain": { + "3h": 0.049999999999997 + }, + "sys": { + "pod": "n" + }, + "weather": [ + { + "description": "light rain", + "icon": "10n", + "id": 500, + "main": "Rain" + } + ], + "wind": { + "deg": 335.005, + "speed": 2.66 + } + } + ], + "message": 0.0025 +} +` + +const groupWeatherResponse = ` +{ + "cnt": 1, + "list": [{ + "coord": { + "lat": 48.85, + "lon": 2.35 + }, + "dt": 1544194800, + "id": 2988507, + "main": { + "humidity": 87, + "pressure": 1007, + "temp": 9.25 + }, + "name": "Paris", + "sys": { + "country": "FR", + "id": 6550, + "message": 0.002, + "sunrise": 1544167818, + "sunset": 1544198047, + "type": 1 + }, + "visibility": 10000, + "weather": [ + { + "description": "light intensity drizzle", + "icon": "09d", + "id": 300, + "main": "Drizzle" + } + ], + "wind": { + "deg": 290, + "speed": 8.7 + } + }] +} +` + +const batchWeatherResponse = ` +{ + "cnt": 3, + "list": [{ + "coord": { + "lon": 37.62, + "lat": 55.75 + }, + "sys": { + "type": 1, + "id": 9029, + "message": 0.0061, + "country": "RU", + "sunrise": 1556416455, + "sunset": 1556470779 + }, + "weather": [{ + "id": 802, + "main": "Clouds", + "description": "scattered clouds", + "icon": "03d" + }], + "main": { + "temp": 9.57, + "pressure": 1014, + "humidity": 46 + }, + "visibility": 10000, + "wind": { + "speed": 5, + "deg": 60 + }, + "clouds": { + "all": 40 + }, + "dt": 1556444155, + "id": 524901, + "name": "Moscow" + }, { + "coord": { + "lon": 30.52, + "lat": 50.43 + }, + "sys": { + "type": 1, + "id": 8903, + "message": 0.0076, + "country": "UA", + "sunrise": 1556419155, + "sunset": 1556471486 + }, + "weather": [{ + "id": 520, + "main": "Rain", + "description": "light intensity shower rain", + "icon": "09d" + }], + "main": { + "temp": 19.29, + "pressure": 1009, + "humidity": 63 + }, + "visibility": 10000, + "wind": { + "speed": 1 + }, + "clouds": { + "all": 0 + }, + "dt": 1556444155, + "id": 703448, + "name": "Kiev" + }, { + "coord": { + "lon": -0.13, + "lat": 51.51 + }, + "sys": { + "type": 1, + "id": 1414, + "message": 0.0088, + "country": "GB", + "sunrise": 1556426319, + "sunset": 1556479032 + }, + "weather": [{ + "id": 803, + "main": "Clouds", + "description": "broken clouds", + "icon": "04d" + }], + "main": { + "temp": 10.62, + "pressure": 1019, + "humidity": 66 + }, + "visibility": 10000, + "wind": { + "speed": 6.2, + "deg": 290 + }, + "rain": { + "3h": 0.072 + }, + "clouds": { + "all": 75 + }, + "dt": 1556444155, + "id": 2643743, + "name": "London" + }] +} +` + +func TestForecastGeneratesMetrics(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var rsp string + if r.URL.Path == "/data/2.5/forecast" { + rsp = sampleStatusResponse + w.Header()["Content-Type"] = []string{"application/json"} + } else if r.URL.Path == "/data/2.5/group" { + rsp = sampleNoContent + } else { + panic("Cannot handle request") + } + + fmt.Fprintln(w, rsp) + })) + defer ts.Close() + + n := &OpenWeatherMap{ + BaseUrl: ts.URL, + AppId: "noappid", + CityId: []string{"2988507"}, + Fetch: []string{"weather", "forecast"}, + Units: "metric", + } + + var acc testutil.Accumulator + + err_openweathermap := n.Gather(&acc) + require.NoError(t, err_openweathermap) + for _, forecast_tag := range []string{"*", "3h"} { + acc.AssertContainsTaggedFields( + t, + "weather", + map[string]interface{}{ + "humidity": int64(91), + "pressure": 1018.65, + "temperature": 6.71, + "rain": 0.035, + "wind_degrees": 228.501, + "wind_speed": 3.76, + }, + map[string]string{ + "city_id": "2988507", + "forecast": forecast_tag, + }) + } + for _, forecast_tag := range []string{"*", "6h"} { + acc.AssertContainsTaggedFields( + t, + "weather", + map[string]interface{}{ + "humidity": int64(98), + "pressure": 1032.18, + "temperature": 6.38, + "rain": 0.049999999999997, + "wind_degrees": 335.005, + "wind_speed": 2.66, + }, + map[string]string{ + "city_id": "2988507", + "forecast": forecast_tag, + }) + } +} + +func TestWeatherGeneratesMetrics(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var rsp string + if r.URL.Path == "/data/2.5/group" { + rsp = groupWeatherResponse + w.Header()["Content-Type"] = []string{"application/json"} + } else if r.URL.Path == "/data/2.5/forecast" { + rsp = sampleNoContent + } else { + panic("Cannot handle request") + } + + fmt.Fprintln(w, rsp) + })) + defer ts.Close() + + n := &OpenWeatherMap{ + BaseUrl: ts.URL, + AppId: "noappid", + CityId: []string{"2988507"}, + Fetch: []string{"weather"}, + Units: "metric", + } + + var acc testutil.Accumulator + + err_openweathermap := n.Gather(&acc) + + require.NoError(t, err_openweathermap) + + acc.AssertContainsTaggedFields( + t, + "weather", + map[string]interface{}{ + "humidity": int64(87), + "pressure": 1007.0, + "temperature": 9.25, + "wind_degrees": 290.0, + "wind_speed": 8.7, + "rain": 0.0, + }, + map[string]string{ + "city_id": "2988507", + "forecast": "*", + }) +} + +func TestBatchWeatherGeneratesMetrics(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var rsp string + if r.URL.Path == "/data/2.5/group" { + rsp = batchWeatherResponse + w.Header()["Content-Type"] = []string{"application/json"} + } else if r.URL.Path == "/data/2.5/forecast" { + rsp = sampleNoContent + } else { + panic("Cannot handle request") + } + + fmt.Fprintln(w, rsp) + })) + defer ts.Close() + + n := &OpenWeatherMap{ + BaseUrl: ts.URL, + AppId: "noappid", + CityId: []string{"524901", "703448", "2643743"}, + Fetch: []string{"weather"}, + Units: "metric", + } + + var acc testutil.Accumulator + + err_openweathermap := n.Gather(&acc) + + require.NoError(t, err_openweathermap) + + acc.AssertContainsTaggedFields( + t, + "weather", + map[string]interface{}{ + "humidity": int64(46), + "pressure": 1014.0, + "temperature": 9.57, + "wind_degrees": 60.0, + "wind_speed": 5.0, + "rain": 0.0, + }, + map[string]string{ + "city_id": "524901", + "forecast": "*", + }) + acc.AssertContainsTaggedFields( + t, + "weather", + map[string]interface{}{ + "humidity": int64(63), + "pressure": 1009.0, + "temperature": 19.29, + "wind_degrees": 0.0, + "wind_speed": 1.0, + "rain": 0.0, + }, + map[string]string{ + "city_id": "703448", + "forecast": "*", + }) + acc.AssertContainsTaggedFields( + t, + "weather", + map[string]interface{}{ + "humidity": int64(66), + "pressure": 1019.0, + "temperature": 10.62, + "wind_degrees": 290.0, + "wind_speed": 6.2, + "rain": 0.072, + }, + map[string]string{ + "city_id": "2643743", + "forecast": "*", + }) +} + +func TestResponseTimeout(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var rsp string + if r.URL.Path == "/data/2.5/group" { + rsp = groupWeatherResponse + w.Header()["Content-Type"] = []string{"application/json"} + } else if r.URL.Path == "/data/2.5/forecast" { + rsp = sampleStatusResponse + w.Header()["Content-Type"] = []string{"application/json"} + } else { + panic("Cannot handle request") + } + + time.Sleep(time.Second * 6) // Cause timeout + fmt.Fprintln(w, rsp) + })) + defer ts.Close() + + n := &OpenWeatherMap{ + BaseUrl: ts.URL, + AppId: "noappid", + CityId: []string{"2988507"}, + Fetch: []string{"weather"}, + Units: "metric", + } + + var acc testutil.Accumulator + + err_openweathermap := n.Gather(&acc) + + require.NoError(t, err_openweathermap) + acc.AssertDoesNotContainMeasurement( + t, + "weather", + ) +} diff --git a/testutil/accumulator.go b/testutil/accumulator.go index 19acebe1c..b5021010a 100644 --- a/testutil/accumulator.go +++ b/testutil/accumulator.go @@ -320,6 +320,26 @@ func (a *Accumulator) WaitError(n int) { a.Unlock() } +func (a *Accumulator) assertContainsTaggedFields( + t *testing.T, + measurement string, + fields map[string]interface{}, + tags map[string]string, +) { + for _, p := range a.Metrics { + if !reflect.DeepEqual(tags, p.Tags) { + continue + } + + if p.Measurement == measurement { + assert.Equal(t, fields, p.Fields) + return + } + } + msg := fmt.Sprintf("unknown measurement %s with tags %v", measurement, tags) + assert.Fail(t, msg) +} + func (a *Accumulator) AssertContainsTaggedFields( t *testing.T, measurement string, @@ -333,13 +353,11 @@ func (a *Accumulator) AssertContainsTaggedFields( continue } - if p.Measurement == measurement { - assert.Equal(t, fields, p.Fields) + if p.Measurement == measurement && reflect.DeepEqual(fields, p.Fields) { return } } - msg := fmt.Sprintf("unknown measurement %s with tags %v", measurement, tags) - assert.Fail(t, msg) + a.assertContainsTaggedFields(t, measurement, fields, tags) } func (a *Accumulator) AssertDoesNotContainsTaggedFields( From 4ac2ef1c7f6e69eb14da43494e848290868d472d Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 30 May 2019 15:21:25 -0700 Subject: [PATCH 0878/1815] Fix duplicate makefile target --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 3b5e01f30..9bf1e342b 100644 --- a/Makefile +++ b/Makefile @@ -132,7 +132,7 @@ plugin-%: @docker-compose -f plugins/inputs/$${$(@)}/dev/docker-compose.yml up .PHONY: ci-1.12 -ci-1.11: +ci-1.12: docker build -t quay.io/influxdb/telegraf-ci:1.12.5 - < scripts/ci-1.12.docker docker push quay.io/influxdb/telegraf-ci:1.12.5 From a0213d9c4f8b5968d4d66407d5a18acc0259074e Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 30 May 2019 15:21:53 -0700 Subject: [PATCH 0879/1815] Update copyright date --- LICENSE | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LICENSE b/LICENSE index 057cf997d..886dcef0b 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,6 @@ The MIT License (MIT) -Copyright (c) 2015-2018 InfluxData Inc. +Copyright (c) 2015-2019 InfluxData Inc. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal From 0ca8ea1724e2b5be6facf61eec9420f32c4a95c8 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 31 May 2019 16:22:37 -0700 Subject: [PATCH 0880/1815] Don't overwrite forecast points (#5930) --- CHANGELOG.md | 1 + README.md | 1 + plugins/inputs/openweathermap/README.md | 105 ++++---- .../inputs/openweathermap/openweathermap.go | 250 +++++++++--------- .../openweathermap/openweathermap_test.go | 248 ++++++++--------- testutil/accumulator.go | 23 +- 6 files changed, 315 insertions(+), 313 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8b7ce0377..966620754 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,6 +10,7 @@ - [bind](/plugins/inputs/bind/README.md) - Contributed by @dswarbrick & @danielllek - [ecs](/plugins/inputs/ecs/README.md) - Contributed by @rbtr - [github](/plugins/inputs/github/README.md) - Contributed by @influxdata +- [openweathermap](/plugins/inputs/openweathermap/README.md) - Contributed by @regel - [powerdns_recursor](/plugins/inputs/powerdns_recursor/README.md) - Contributed by @dupondje #### New Aggregators diff --git a/README.md b/README.md index 45edae1e0..d3adf1e2d 100644 --- a/README.md +++ b/README.md @@ -236,6 +236,7 @@ For documentation on the latest development code see the [documentation index][d * [nvidia_smi](./plugins/inputs/nvidia_smi) * [openldap](./plugins/inputs/openldap) * [opensmtpd](./plugins/inputs/opensmtpd) +* [openweathermap](./plugins/inputs/openweathermap) * [pf](./plugins/inputs/pf) * [pgbouncer](./plugins/inputs/pgbouncer) * [phpfpm](./plugins/inputs/phpfpm) diff --git a/plugins/inputs/openweathermap/README.md b/plugins/inputs/openweathermap/README.md index 7b781b129..d79699049 100644 --- a/plugins/inputs/openweathermap/README.md +++ b/plugins/inputs/openweathermap/README.md @@ -1,73 +1,68 @@ -# Telegraf Plugin: openweathermap +# OpenWeatherMap Input Plugin -OpenWeatherMap provides the current weather and forecasts for more than 200,000 cities. To use this plugin you will need a token. For more information [click here](https://openweathermap.org/appid). +Collect current weather and forecast data from OpenWeatherMap. -Find city identifiers in this [list](http://bulk.openweathermap.org/sample/city.list.json.gz). You can also use this [url](https://openweathermap.org/find) as an alternative to downloading a file. The ID is in the url of the city: `https://openweathermap.org/city/2643743` +To use this plugin you will need an [api key][] (app_id). -### Configuration: +City identifiers can be found in the [city list][]. Alternately you can +[search][] by name; the `city_id` can be found as the last digits of the URL: +https://openweathermap.org/city/2643743 + +### Configuration ```toml [[inputs.openweathermap]] - ## Root url of API to pull stats - # base_url = "https://api.openweathermap.org/data/2.5/" - ## Your personal user token from openweathermap.org - # app_id = "xxxxxxxxxxxxxxxxxxxxxxx" - ## List of city identifiers - # city_id = ["2988507", "519188"] - ## HTTP response timeout (default: 5s) + ## OpenWeatherMap API key. + app_id = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + + ## City ID's to collect weather data from. + city_id = ["5391959"] + + ## APIs to fetch; can contain "weather" or "forecast". + fetch = ["weather", "forecast"] + + ## OpenWeatherMap base URL + # base_url = "https://api.openweathermap.org/" + + ## Timeout for HTTP response. # response_timeout = "5s" - ## Query the current weather and future forecast - # fetch = ["weather", "forecast"] - ## For temperature in Fahrenheit use units=imperial - ## For temperature in Celsius use units=metric (default) + + ## Preferred unit system for temperature and wind speed. Can be one of + ## "metric", "imperial", or "standard". # units = "metric" + + ## Query interval; OpenWeatherMap weather data is updated every 10 + ## minutes. + interval = "10m" ``` -### Metrics: +### Metrics -+ weather - - fields: - - humidity (int, Humidity percentage) - - temperature (float, Unit: Celcius) - - pressure (float, Atmospheric pressure in hPa) - - rain (float, Rain volume for the last 3 hours, mm) - - wind_speed (float, Wind speed. Unit Default: meter/sec) - - wind_degrees (float, Wind direction, degrees) +- weather - tags: - city_id - forecast + - fields: + - cloudiness (int, percent) + - humidity (int, percent) + - pressure (float, atmospheric pressure hPa) + - rain (float, rain volume for the last 3 hours in mm) + - sunrise (int, nanoseconds since unix epoch) + - sunset (int, nanoseconds since unix epoch) + - temperature (float, degrees) + - visibility (int, meters, not available on forecast data) + - wind_degrees (float, wind direction in degrees) + - wind_speed (float, wind speed in meters/sec or miles/sec) -### Example Output: -Using this configuration: -```toml -[[inputs.openweathermap]] - base_url = "https://api.openweathermap.org/data/2.5/" - app_id = "change_this_with_your_appid" - city_id = ["2988507", "519188"] - response_timeout = "5s" - fetch = ["weather", "forecast"] - units = "metric" +### Example Output + +``` +> weather,city=San\ Francisco,city_id=5391959,country=US,forecast=* cloudiness=40i,humidity=72i,pressure=1013,rain=0,sunrise=1559220629000000000i,sunset=1559273058000000000i,temperature=13.31,visibility=16093i,wind_degrees=280,wind_speed=4.6 1559268695000000000 +> weather,city=San\ Francisco,city_id=5391959,country=US,forecast=3h cloudiness=0i,humidity=86i,pressure=1012.03,rain=0,temperature=10.69,wind_degrees=222.855,wind_speed=2.76 1559271600000000000 +> weather,city=San\ Francisco,city_id=5391959,country=US,forecast=6h cloudiness=11i,humidity=93i,pressure=1012.79,rain=0,temperature=9.34,wind_degrees=212.685,wind_speed=1.85 1559282400000000000 ``` -When run with: -``` -./telegraf -config telegraf.conf -input-filter openweathermap -test -``` - -It produces data similar to: -``` -> weather,city_id=4303602,forecast=* humidity=51i,pressure=1012,rain=0,temperature=16.410000000000025,wind_degrees=170,wind_speed=2.6 1556393944000000000 -> weather,city_id=2988507,forecast=* humidity=87i,pressure=1020,rain=0,temperature=7.110000000000014,wind_degrees=260,wind_speed=5.1 1556393841000000000 -> weather,city_id=2988507,forecast=3h humidity=69i,pressure=1020.38,rain=0,temperature=5.650000000000034,wind_degrees=268.456,wind_speed=5.83 1556398800000000000 -> weather,city_id=2988507,forecast=* humidity=69i,pressure=1020.38,rain=0,temperature=5.650000000000034,wind_degrees=268.456,wind_speed=5.83 1556398800000000000 -> weather,city_id=2988507,forecast=6h humidity=74i,pressure=1020.87,rain=0,temperature=5.810000000000002,wind_degrees=261.296,wind_speed=5.43 1556409600000000000 -> weather,city_id=2988507,forecast=* humidity=74i,pressure=1020.87,rain=0,temperature=5.810000000000002,wind_degrees=261.296,wind_speed=5.43 1556409600000000000 -> weather,city_id=4303602,forecast=9h humidity=66i,pressure=1010.63,rain=0,temperature=14.740000000000009,wind_degrees=196.264,wind_speed=4.3 1556398800000000000 -> weather,city_id=4303602,forecast=* humidity=66i,pressure=1010.63,rain=0,temperature=14.740000000000009,wind_degrees=196.264,wind_speed=4.3 1556398800000000000 -``` - - - - - +[api key]: https://openweathermap.org/appid +[city list]: http://bulk.openweathermap.org/sample/city.list.json.gz +[search]: https://openweathermap.org/find diff --git a/plugins/inputs/openweathermap/openweathermap.go b/plugins/inputs/openweathermap/openweathermap.go index 1c246d0b6..c15ee3832 100644 --- a/plugins/inputs/openweathermap/openweathermap.go +++ b/plugins/inputs/openweathermap/openweathermap.go @@ -1,9 +1,10 @@ package openweathermap import ( - "bufio" "encoding/json" "fmt" + "io" + "mime" "net/http" "net/url" "strconv" @@ -16,37 +17,50 @@ import ( "github.com/influxdata/telegraf/plugins/inputs" ) +const ( + // https://openweathermap.org/current#severalid + // Call for several city IDs + // The limit of locations is 20. + owmRequestSeveralCityId int = 20 + + defaultBaseURL = "https://api.openweathermap.org/" + defaultResponseTimeout time.Duration = time.Second * 5 + defaultUnits string = "metric" +) + type OpenWeatherMap struct { - BaseUrl string - AppId string - CityId []string + AppId string `toml:"app_id"` + CityId []string `toml:"city_id"` + Fetch []string `toml:"fetch"` + BaseUrl string `toml:"base_url"` + ResponseTimeout internal.Duration `toml:"response_timeout"` + Units string `toml:"units"` client *http.Client - - ResponseTimeout internal.Duration - Fetch []string - Units string } -// https://openweathermap.org/current#severalid -// Call for several city IDs -// The limit of locations is 20. -const owmRequestSeveralCityId int = 20 -const defaultResponseTimeout time.Duration = time.Second * 5 -const defaultUnits string = "metric" - var sampleConfig = ` - ## Root url of weather map REST API - base_url = "https://api.openweathermap.org/" - ## Your personal user token from openweathermap.org + ## OpenWeatherMap API key. app_id = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" - city_id = ["2988507", "2988588"] - ## HTTP response timeout (default: 5s) - response_timeout = "5s" + ## City ID's to collect weather data from. + city_id = ["5391959"] + + ## APIs to fetch; can contain "weather" or "forecast". fetch = ["weather", "forecast"] - units = "metric" - ## Limit OpenWeatherMap query interval. See calls per minute info at: https://openweathermap.org/price + + ## OpenWeatherMap base URL + # base_url = "https://api.openweathermap.org/" + + ## Timeout for HTTP response. + # response_timeout = "5s" + + ## Preferred unit system for temperature and wind speed. Can be one of + ## "metric", "imperial", or "standard". + # units = "metric" + + ## Query interval; OpenWeatherMap updates their weather data every 10 + ## minutes. interval = "10m" ` @@ -69,7 +83,6 @@ func (n *OpenWeatherMap) Gather(acc telegraf.Accumulator) error { // Create an HTTP client that is re-used for each // collection interval - if n.client == nil { client, err := n.createHttpClient() if err != nil { @@ -77,33 +90,43 @@ func (n *OpenWeatherMap) Gather(acc telegraf.Accumulator) error { } n.client = client } + units := n.Units - if units == "" { + switch n.Units { + case "imperial", "standard": + break + default: units = defaultUnits } + for _, fetch := range n.Fetch { if fetch == "forecast" { var u *url.URL - var addr *url.URL for _, city := range n.CityId { u, err = url.Parse(fmt.Sprintf("/data/2.5/forecast?id=%s&APPID=%s&units=%s", city, n.AppId, units)) if err != nil { - acc.AddError(fmt.Errorf("Unable to parse address '%s': %s", u, err)) + acc.AddError(fmt.Errorf("unable to parse address '%s': %s", u, err)) continue } - addr = base.ResolveReference(u) + + addr := base.ResolveReference(u).String() wg.Add(1) - go func(addr *url.URL) { + go func() { defer wg.Done() - acc.AddError(n.gatherUrl(addr, acc, true)) - }(addr) + status, err := n.gatherUrl(addr) + if err != nil { + acc.AddError(err) + return + } + + gatherForecast(acc, status) + }() } } else if fetch == "weather" { j := 0 for j < len(n.CityId) { var u *url.URL - var addr *url.URL strs = make([]string, 0) for i := 0; j < len(n.CityId) && i < owmRequestSeveralCityId; i++ { strs = append(strs, n.CityId[j]) @@ -117,12 +140,18 @@ func (n *OpenWeatherMap) Gather(acc telegraf.Accumulator) error { continue } - addr = base.ResolveReference(u) + addr := base.ResolveReference(u).String() wg.Add(1) - go func(addr *url.URL) { + go func() { defer wg.Done() - acc.AddError(n.gatherUrl(addr, acc, false)) - }(addr) + status, err := n.gatherUrl(addr) + if err != nil { + acc.AddError(err) + return + } + + gatherWeather(acc, status) + }() } } @@ -133,7 +162,6 @@ func (n *OpenWeatherMap) Gather(acc telegraf.Accumulator) error { } func (n *OpenWeatherMap) createHttpClient() (*http.Client, error) { - if n.ResponseTimeout.Duration < time.Second { n.ResponseTimeout.Duration = defaultResponseTimeout } @@ -146,65 +174,51 @@ func (n *OpenWeatherMap) createHttpClient() (*http.Client, error) { return client, nil } -func (n *OpenWeatherMap) gatherUrl(addr *url.URL, acc telegraf.Accumulator, forecast bool) error { - resp, err := n.client.Get(addr.String()) - +func (n *OpenWeatherMap) gatherUrl(addr string) (*Status, error) { + resp, err := n.client.Get(addr) if err != nil { - return fmt.Errorf("error making HTTP request to %s: %s", addr.String(), err) + return nil, fmt.Errorf("error making HTTP request to %s: %s", addr, err) } defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { - return fmt.Errorf("%s returned HTTP status %s", addr.String(), resp.Status) + return nil, fmt.Errorf("%s returned HTTP status %s", addr, resp.Status) } - contentType := strings.Split(resp.Header.Get("Content-Type"), ";")[0] - switch contentType { - case "application/json": - err = gatherWeatherUrl(bufio.NewReader(resp.Body), forecast, acc) - return err - default: - return fmt.Errorf("%s returned unexpected content type %s", addr.String(), contentType) + + mediaType, _, err := mime.ParseMediaType(resp.Header.Get("Content-Type")) + if err != nil { + return nil, err } + + if mediaType != "application/json" { + return nil, fmt.Errorf("%s returned unexpected content type %s", addr, mediaType) + } + + return gatherWeatherUrl(resp.Body) } type WeatherEntry struct { - Dt int64 `json:"dt"` - Dttxt string `json:"dt_txt"` // empty for weather/ + Dt int64 `json:"dt"` Clouds struct { All int64 `json:"all"` } `json:"clouds"` Main struct { - GrndLevel float64 `json:"grnd_level"` // empty for weather/ - Humidity int64 `json:"humidity"` - SeaLevel float64 `json:"sea_level"` // empty for weather/ - Pressure float64 `json:"pressure"` - Temp float64 `json:"temp"` - TempMax float64 `json:"temp_max"` - TempMin float64 `json:"temp_min"` + Humidity int64 `json:"humidity"` + Pressure float64 `json:"pressure"` + Temp float64 `json:"temp"` } `json:"main"` Rain struct { Rain3 float64 `json:"3h"` } `json:"rain"` Sys struct { - Pod string `json:"pod"` - Country string `json:"country"` - Message float64 `json:"message"` - Id int64 `json:"id"` - Type int64 `json:"type"` - Sunrise int64 `json:"sunrise"` - Sunset int64 `json:"sunset"` + Country string `json:"country"` + Sunrise int64 `json:"sunrise"` + Sunset int64 `json:"sunset"` } `json:"sys"` Wind struct { Deg float64 `json:"deg"` Speed float64 `json:"speed"` } `json:"wind"` - Weather []struct { - Description string `json:"description"` - Icon string `json:"icon"` - Id int64 `json:"id"` - Main string `json:"main"` - } `json:"weather"` - - // Additional entries for weather/ Id int64 `json:"id"` Name string `json:"name"` Coord struct { @@ -227,69 +241,66 @@ type Status struct { List []WeatherEntry `json:"list"` } -func gatherWeatherUrl(r *bufio.Reader, forecast bool, acc telegraf.Accumulator) error { +func gatherWeatherUrl(r io.Reader) (*Status, error) { dec := json.NewDecoder(r) status := &Status{} if err := dec.Decode(status); err != nil { - return fmt.Errorf("Error while decoding JSON response: %s", err) + return nil, fmt.Errorf("error while decoding JSON response: %s", err) } - status.Gather(forecast, acc) - return nil + return status, nil } -func (s *Status) Gather(forecast bool, acc telegraf.Accumulator) { - tags := map[string]string{ - "city_id": strconv.FormatInt(s.City.Id, 10), - "forecast": "*", - } - - for i, e := range s.List { +func gatherWeather(acc telegraf.Accumulator, status *Status) { + for _, e := range status.List { tm := time.Unix(e.Dt, 0) - if e.Id > 0 { - tags["city_id"] = strconv.FormatInt(e.Id, 10) - } - if forecast { - tags["forecast"] = fmt.Sprintf("%dh", (i+1)*3) - } acc.AddFields( "weather", map[string]interface{}{ - "rain": e.Rain.Rain3, - "wind_degrees": e.Wind.Deg, - "wind_speed": e.Wind.Speed, + "cloudiness": e.Clouds.All, "humidity": e.Main.Humidity, "pressure": e.Main.Pressure, + "rain": e.Rain.Rain3, + "sunrise": time.Unix(e.Sys.Sunrise, 0).UnixNano(), + "sunset": time.Unix(e.Sys.Sunset, 0).UnixNano(), "temperature": e.Main.Temp, + "visibility": e.Visibility, + "wind_degrees": e.Wind.Deg, + "wind_speed": e.Wind.Speed, + }, + map[string]string{ + "city": e.Name, + "city_id": strconv.FormatInt(e.Id, 10), + "country": e.Sys.Country, + "forecast": "*", + }, + tm) + } +} + +func gatherForecast(acc telegraf.Accumulator, status *Status) { + tags := map[string]string{ + "city_id": strconv.FormatInt(status.City.Id, 10), + "forecast": "*", + "city": status.City.Name, + "country": status.City.Country, + } + for i, e := range status.List { + tm := time.Unix(e.Dt, 0) + tags["forecast"] = fmt.Sprintf("%dh", (i+1)*3) + acc.AddFields( + "weather", + map[string]interface{}{ + "cloudiness": e.Clouds.All, + "humidity": e.Main.Humidity, + "pressure": e.Main.Pressure, + "rain": e.Rain.Rain3, + "temperature": e.Main.Temp, + "wind_degrees": e.Wind.Deg, + "wind_speed": e.Wind.Speed, }, tags, tm) } - if forecast { - // intentional: overwrite future data points - // under the * tag - tags := map[string]string{ - "city_id": strconv.FormatInt(s.City.Id, 10), - "forecast": "*", - } - for _, e := range s.List { - tm := time.Unix(e.Dt, 0) - if e.Id > 0 { - tags["city_id"] = strconv.FormatInt(e.Id, 10) - } - acc.AddFields( - "weather", - map[string]interface{}{ - "rain": e.Rain.Rain3, - "wind_degrees": e.Wind.Deg, - "wind_speed": e.Wind.Speed, - "humidity": e.Main.Humidity, - "pressure": e.Main.Pressure, - "temperature": e.Main.Temp, - }, - tags, - tm) - } - } } func init() { @@ -300,6 +311,7 @@ func init() { return &OpenWeatherMap{ ResponseTimeout: tmout, Units: defaultUnits, + BaseUrl: defaultBaseURL, } }) } diff --git a/plugins/inputs/openweathermap/openweathermap_test.go b/plugins/inputs/openweathermap/openweathermap_test.go index 98f0a64a2..d59766dd6 100644 --- a/plugins/inputs/openweathermap/openweathermap_test.go +++ b/plugins/inputs/openweathermap/openweathermap_test.go @@ -7,6 +7,7 @@ import ( "testing" "time" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) @@ -105,6 +106,9 @@ const groupWeatherResponse = ` { "cnt": 1, "list": [{ + "clouds": { + "all": 0 + }, "coord": { "lat": 48.85, "lon": 2.35 @@ -282,13 +286,20 @@ func TestForecastGeneratesMetrics(t *testing.T) { var acc testutil.Accumulator - err_openweathermap := n.Gather(&acc) - require.NoError(t, err_openweathermap) - for _, forecast_tag := range []string{"*", "3h"} { - acc.AssertContainsTaggedFields( - t, + err := n.Gather(&acc) + require.NoError(t, err) + + expected := []telegraf.Metric{ + testutil.MustMetric( "weather", + map[string]string{ + "city_id": "2988507", + "forecast": "3h", + "city": "Paris", + "country": "FR", + }, map[string]interface{}{ + "cloudiness": int64(88), "humidity": int64(91), "pressure": 1018.65, "temperature": 6.71, @@ -296,16 +307,18 @@ func TestForecastGeneratesMetrics(t *testing.T) { "wind_degrees": 228.501, "wind_speed": 3.76, }, + time.Unix(1543622400, 0), + ), + testutil.MustMetric( + "weather", map[string]string{ "city_id": "2988507", - "forecast": forecast_tag, - }) - } - for _, forecast_tag := range []string{"*", "6h"} { - acc.AssertContainsTaggedFields( - t, - "weather", + "forecast": "6h", + "city": "Paris", + "country": "FR", + }, map[string]interface{}{ + "cloudiness": int64(92), "humidity": int64(98), "pressure": 1032.18, "temperature": 6.38, @@ -313,11 +326,13 @@ func TestForecastGeneratesMetrics(t *testing.T) { "wind_degrees": 335.005, "wind_speed": 2.66, }, - map[string]string{ - "city_id": "2988507", - "forecast": forecast_tag, - }) + time.Unix(1544043600, 0), + ), } + + testutil.RequireMetricsEqual(t, + expected, acc.GetTelegrafMetrics(), + testutil.SortMetrics()) } func TestWeatherGeneratesMetrics(t *testing.T) { @@ -346,25 +361,34 @@ func TestWeatherGeneratesMetrics(t *testing.T) { var acc testutil.Accumulator - err_openweathermap := n.Gather(&acc) + err := n.Gather(&acc) + require.NoError(t, err) - require.NoError(t, err_openweathermap) - - acc.AssertContainsTaggedFields( - t, - "weather", - map[string]interface{}{ - "humidity": int64(87), - "pressure": 1007.0, - "temperature": 9.25, - "wind_degrees": 290.0, - "wind_speed": 8.7, - "rain": 0.0, - }, - map[string]string{ - "city_id": "2988507", - "forecast": "*", - }) + expected := []telegraf.Metric{ + testutil.MustMetric( + "weather", + map[string]string{ + "city_id": "2988507", + "forecast": "*", + "city": "Paris", + "country": "FR", + }, + map[string]interface{}{ + "cloudiness": int64(0), + "humidity": int64(87), + "pressure": 1007.0, + "temperature": 9.25, + "rain": 0.0, + "sunrise": int64(1544167818000000000), + "sunset": int64(1544198047000000000), + "wind_degrees": 290.0, + "wind_speed": 8.7, + "visibility": 10000, + }, + time.Unix(1544194800, 0), + ), + } + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics()) } func TestBatchWeatherGeneratesMetrics(t *testing.T) { @@ -393,90 +417,78 @@ func TestBatchWeatherGeneratesMetrics(t *testing.T) { var acc testutil.Accumulator - err_openweathermap := n.Gather(&acc) + err := n.Gather(&acc) + require.NoError(t, err) - require.NoError(t, err_openweathermap) - - acc.AssertContainsTaggedFields( - t, - "weather", - map[string]interface{}{ - "humidity": int64(46), - "pressure": 1014.0, - "temperature": 9.57, - "wind_degrees": 60.0, - "wind_speed": 5.0, - "rain": 0.0, - }, - map[string]string{ - "city_id": "524901", - "forecast": "*", - }) - acc.AssertContainsTaggedFields( - t, - "weather", - map[string]interface{}{ - "humidity": int64(63), - "pressure": 1009.0, - "temperature": 19.29, - "wind_degrees": 0.0, - "wind_speed": 1.0, - "rain": 0.0, - }, - map[string]string{ - "city_id": "703448", - "forecast": "*", - }) - acc.AssertContainsTaggedFields( - t, - "weather", - map[string]interface{}{ - "humidity": int64(66), - "pressure": 1019.0, - "temperature": 10.62, - "wind_degrees": 290.0, - "wind_speed": 6.2, - "rain": 0.072, - }, - map[string]string{ - "city_id": "2643743", - "forecast": "*", - }) -} - -func TestResponseTimeout(t *testing.T) { - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - var rsp string - if r.URL.Path == "/data/2.5/group" { - rsp = groupWeatherResponse - w.Header()["Content-Type"] = []string{"application/json"} - } else if r.URL.Path == "/data/2.5/forecast" { - rsp = sampleStatusResponse - w.Header()["Content-Type"] = []string{"application/json"} - } else { - panic("Cannot handle request") - } - - time.Sleep(time.Second * 6) // Cause timeout - fmt.Fprintln(w, rsp) - })) - defer ts.Close() - - n := &OpenWeatherMap{ - BaseUrl: ts.URL, - AppId: "noappid", - CityId: []string{"2988507"}, - Fetch: []string{"weather"}, - Units: "metric", + expected := []telegraf.Metric{ + testutil.MustMetric( + "weather", + map[string]string{ + "city_id": "524901", + "forecast": "*", + "city": "Moscow", + "country": "RU", + }, + map[string]interface{}{ + "cloudiness": 40, + "humidity": int64(46), + "pressure": 1014.0, + "temperature": 9.57, + "wind_degrees": 60.0, + "wind_speed": 5.0, + "rain": 0.0, + "sunrise": int64(1556416455000000000), + "sunset": int64(1556470779000000000), + "visibility": 10000, + }, + time.Unix(1556444155, 0), + ), + testutil.MustMetric( + "weather", + map[string]string{ + "city_id": "703448", + "forecast": "*", + "city": "Kiev", + "country": "UA", + }, + map[string]interface{}{ + "cloudiness": 0, + "humidity": int64(63), + "pressure": 1009.0, + "temperature": 19.29, + "wind_degrees": 0.0, + "wind_speed": 1.0, + "rain": 0.0, + "sunrise": int64(1556419155000000000), + "sunset": int64(1556471486000000000), + "visibility": 10000, + }, + time.Unix(1556444155, 0), + ), + testutil.MustMetric( + "weather", + map[string]string{ + "city_id": "2643743", + "forecast": "*", + "city": "London", + "country": "GB", + }, + map[string]interface{}{ + "cloudiness": 75, + "humidity": int64(66), + "pressure": 1019.0, + "temperature": 10.62, + "wind_degrees": 290.0, + "wind_speed": 6.2, + "rain": 0.072, + "sunrise": int64(1556426319000000000), + "sunset": int64(1556479032000000000), + "visibility": 10000, + }, + time.Unix(1556444155, 0), + ), } - - var acc testutil.Accumulator - - err_openweathermap := n.Gather(&acc) - - require.NoError(t, err_openweathermap) - acc.AssertDoesNotContainMeasurement( - t, - "weather", - ) + testutil.RequireMetricsEqual(t, + expected, acc.GetTelegrafMetrics(), + testutil.SortMetrics()) } diff --git a/testutil/accumulator.go b/testutil/accumulator.go index b5021010a..e33959a83 100644 --- a/testutil/accumulator.go +++ b/testutil/accumulator.go @@ -320,26 +320,6 @@ func (a *Accumulator) WaitError(n int) { a.Unlock() } -func (a *Accumulator) assertContainsTaggedFields( - t *testing.T, - measurement string, - fields map[string]interface{}, - tags map[string]string, -) { - for _, p := range a.Metrics { - if !reflect.DeepEqual(tags, p.Tags) { - continue - } - - if p.Measurement == measurement { - assert.Equal(t, fields, p.Fields) - return - } - } - msg := fmt.Sprintf("unknown measurement %s with tags %v", measurement, tags) - assert.Fail(t, msg) -} - func (a *Accumulator) AssertContainsTaggedFields( t *testing.T, measurement string, @@ -357,7 +337,8 @@ func (a *Accumulator) AssertContainsTaggedFields( return } } - a.assertContainsTaggedFields(t, measurement, fields, tags) + msg := fmt.Sprintf("unknown measurement %s with tags %v", measurement, tags) + assert.Fail(t, msg) } func (a *Accumulator) AssertDoesNotContainsTaggedFields( From 3d7a71889d5f9a50452f778e467cc9d1a4f0cd05 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 31 May 2019 16:50:44 -0700 Subject: [PATCH 0881/1815] Ignore context canceled error when reloading/stopping agent --- cmd/telegraf/telegraf.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/telegraf/telegraf.go b/cmd/telegraf/telegraf.go index dcc8f29fa..3678387cd 100644 --- a/cmd/telegraf/telegraf.go +++ b/cmd/telegraf/telegraf.go @@ -103,7 +103,7 @@ func reloadLoop( }() err := runAgent(ctx, inputFilters, outputFilters) - if err != nil { + if err != nil && err != context.Canceled { log.Fatalf("E! [telegraf] Error running agent: %v", err) } } From 9cc3b3d234806873b6b949964efcb4e5129f1495 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 31 May 2019 16:54:55 -0700 Subject: [PATCH 0882/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 966620754..d456954b7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -74,6 +74,7 @@ - [#3485](https://github.com/influxdata/telegraf/issues/3485): Fix sqlserver connection closing on error. - [#5917](https://github.com/influxdata/telegraf/issues/5917): Fix toml option name in nginx_upstream_check. - [#5920](https://github.com/influxdata/telegraf/issues/5920): Fixed datastore name mapping in vsphere input. +- [#5879](https://github.com/influxdata/telegraf/issues/5879): Fix multiple SIGHUP causes Telegraf to shutdown. ## v1.10.4 [2019-05-14] From 17d66b864c3c432366a773176bd9731a7845c512 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 31 May 2019 16:55:31 -0700 Subject: [PATCH 0883/1815] Close idle connections in influxdb outputs when reloading (#5912) --- internal/http_go1.11.go | 15 +++++++++++++++ internal/http_go1.12.go | 9 +++++++++ plugins/outputs/influxdb/http.go | 4 ++++ plugins/outputs/influxdb/influxdb.go | 4 ++++ plugins/outputs/influxdb/influxdb_test.go | 5 +++++ plugins/outputs/influxdb/udp.go | 3 +++ plugins/outputs/influxdb_v2/http.go | 4 ++++ plugins/outputs/influxdb_v2/influxdb.go | 4 ++++ 8 files changed, 48 insertions(+) create mode 100644 internal/http_go1.11.go create mode 100644 internal/http_go1.12.go diff --git a/internal/http_go1.11.go b/internal/http_go1.11.go new file mode 100644 index 000000000..d1a1ae31a --- /dev/null +++ b/internal/http_go1.11.go @@ -0,0 +1,15 @@ +// +build !go1.12 + +package internal + +import "net/http" + +func CloseIdleConnections(c *http.Client) { + type closeIdler interface { + CloseIdleConnections() + } + + if tr, ok := c.Transport.(closeIdler); ok { + tr.CloseIdleConnections() + } +} diff --git a/internal/http_go1.12.go b/internal/http_go1.12.go new file mode 100644 index 000000000..d5b1a847f --- /dev/null +++ b/internal/http_go1.12.go @@ -0,0 +1,9 @@ +// +build go1.12 + +package internal + +import "net/http" + +func CloseIdleConnections(c *http.Client) { + c.CloseIdleConnections() +} diff --git a/plugins/outputs/influxdb/http.go b/plugins/outputs/influxdb/http.go index 43aa55ea8..794eee8b8 100644 --- a/plugins/outputs/influxdb/http.go +++ b/plugins/outputs/influxdb/http.go @@ -448,3 +448,7 @@ func makeQueryURL(loc *url.URL) (string, error) { } return u.String(), nil } + +func (c *httpClient) Close() { + internal.CloseIdleConnections(c.client) +} diff --git a/plugins/outputs/influxdb/influxdb.go b/plugins/outputs/influxdb/influxdb.go index 3b3e80206..b2d1a9026 100644 --- a/plugins/outputs/influxdb/influxdb.go +++ b/plugins/outputs/influxdb/influxdb.go @@ -27,6 +27,7 @@ type Client interface { CreateDatabase(ctx context.Context, database string) error Database() string URL() string + Close() } // InfluxDB struct is the primary data structure for the plugin @@ -183,6 +184,9 @@ func (i *InfluxDB) Connect() error { } func (i *InfluxDB) Close() error { + for _, client := range i.clients { + client.Close() + } return nil } diff --git a/plugins/outputs/influxdb/influxdb_test.go b/plugins/outputs/influxdb/influxdb_test.go index 2f47d8134..73f481e9a 100644 --- a/plugins/outputs/influxdb/influxdb_test.go +++ b/plugins/outputs/influxdb/influxdb_test.go @@ -19,6 +19,7 @@ type MockClient struct { WriteF func(context.Context, []telegraf.Metric) error CreateDatabaseF func(ctx context.Context, database string) error DatabaseF func() string + CloseF func() } func (c *MockClient) URL() string { @@ -37,6 +38,10 @@ func (c *MockClient) Database() string { return c.DatabaseF() } +func (c *MockClient) Close() { + c.CloseF() +} + func TestDeprecatedURLSupport(t *testing.T) { var actual *influxdb.UDPConfig output := influxdb.InfluxDB{ diff --git a/plugins/outputs/influxdb/udp.go b/plugins/outputs/influxdb/udp.go index 31c854def..a33b98563 100644 --- a/plugins/outputs/influxdb/udp.go +++ b/plugins/outputs/influxdb/udp.go @@ -136,3 +136,6 @@ func scanLines(data []byte, atEOF bool) (advance int, token []byte, err error) { } return 0, nil, nil } + +func (c *udpClient) Close() { +} diff --git a/plugins/outputs/influxdb_v2/http.go b/plugins/outputs/influxdb_v2/http.go index 7bc9a4770..a57a1bc67 100644 --- a/plugins/outputs/influxdb_v2/http.go +++ b/plugins/outputs/influxdb_v2/http.go @@ -307,3 +307,7 @@ func makeWriteURL(loc url.URL, org, bucket string) (string, error) { loc.RawQuery = params.Encode() return loc.String(), nil } + +func (c *httpClient) Close() { + internal.CloseIdleConnections(c.client) +} diff --git a/plugins/outputs/influxdb_v2/influxdb.go b/plugins/outputs/influxdb_v2/influxdb.go index ff621fe9a..8998ba3c7 100644 --- a/plugins/outputs/influxdb_v2/influxdb.go +++ b/plugins/outputs/influxdb_v2/influxdb.go @@ -74,6 +74,7 @@ type Client interface { Write(context.Context, []telegraf.Metric) error URL() string // for logging + Close() } type InfluxDB struct { @@ -137,6 +138,9 @@ func (i *InfluxDB) Connect() error { } func (i *InfluxDB) Close() error { + for _, client := range i.clients { + client.Close() + } return nil } From d0beb192042f55dff1c468ec99e8972da5b3175b Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 31 May 2019 16:56:40 -0700 Subject: [PATCH 0884/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index d456954b7..7a2e68d0c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -75,6 +75,7 @@ - [#5917](https://github.com/influxdata/telegraf/issues/5917): Fix toml option name in nginx_upstream_check. - [#5920](https://github.com/influxdata/telegraf/issues/5920): Fixed datastore name mapping in vsphere input. - [#5879](https://github.com/influxdata/telegraf/issues/5879): Fix multiple SIGHUP causes Telegraf to shutdown. +- [#5891](https://github.com/influxdata/telegraf/issues/5891): Fix connection leak in influxdb outputs on reload. ## v1.10.4 [2019-05-14] From 1c0d3a0eb9bb2c4bbb274bb463ae0b41ed5a2cc4 Mon Sep 17 00:00:00 2001 From: Kristoffer Berdal Date: Sun, 2 Jun 2019 02:11:47 +0200 Subject: [PATCH 0885/1815] Add file rotation based on file age to file output plugin (#5547) --- plugins/outputs/file/README.md | 3 +++ plugins/outputs/file/file.go | 43 +++++++++++++++++++++++++--------- 2 files changed, 35 insertions(+), 11 deletions(-) diff --git a/plugins/outputs/file/README.md b/plugins/outputs/file/README.md index de577eacf..a6c4ac53f 100644 --- a/plugins/outputs/file/README.md +++ b/plugins/outputs/file/README.md @@ -8,6 +8,9 @@ This plugin writes telegraf metrics to files ## Files to write to, "stdout" is a specially handled file. files = ["stdout", "/tmp/metrics.out"] + ## If this is defined, files will be rotated by the time.Duration specified + # rotate_max_age = "1m" + ## Data format to output. ## Each data format has its own unique set of configuration options, read ## more about them here: diff --git a/plugins/outputs/file/file.go b/plugins/outputs/file/file.go index 0ef61df51..89380ae7e 100644 --- a/plugins/outputs/file/file.go +++ b/plugins/outputs/file/file.go @@ -4,16 +4,19 @@ import ( "fmt" "io" "os" + "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal/rotate" "github.com/influxdata/telegraf/plugins/outputs" "github.com/influxdata/telegraf/plugins/serializers" ) type File struct { - Files []string + Files []string + RotateMaxAge string - writers []io.Writer + writer io.Writer closers []io.Closer serializer serializers.Serializer @@ -23,6 +26,9 @@ var sampleConfig = ` ## Files to write to, "stdout" is a specially handled file. files = ["stdout", "/tmp/metrics.out"] + ## If this is defined, files will be rotated by the time.Duration specified + # rotate_max_age = "1m" + ## Data format to output. ## Each data format has its own unique set of configuration options, read ## more about them here: @@ -35,23 +41,38 @@ func (f *File) SetSerializer(serializer serializers.Serializer) { } func (f *File) Connect() error { + writers := []io.Writer{} + if len(f.Files) == 0 { f.Files = []string{"stdout"} } for _, file := range f.Files { if file == "stdout" { - f.writers = append(f.writers, os.Stdout) + writers = append(writers, os.Stdout) } else { - of, err := os.OpenFile(file, os.O_CREATE|os.O_APPEND|os.O_WRONLY, os.ModeAppend|0644) + var of io.WriteCloser + var err error + if f.RotateMaxAge != "" { + maxAge, err := time.ParseDuration(f.RotateMaxAge) + if err != nil { + return err + } + + // Only rotate by file age for now, keep no archives. + of, err = rotate.NewFileWriter(file, maxAge, 0, -1) + } else { + // Just open a normal file + of, err = rotate.NewFileWriter(file, 0, 0, -1) + } if err != nil { return err } - - f.writers = append(f.writers, of) + writers = append(writers, of) f.closers = append(f.closers, of) } } + f.writer = io.MultiWriter(writers...) return nil } @@ -76,19 +97,19 @@ func (f *File) Description() string { func (f *File) Write(metrics []telegraf.Metric) error { var writeErr error = nil + for _, metric := range metrics { b, err := f.serializer.Serialize(metric) if err != nil { return fmt.Errorf("failed to serialize message: %s", err) } - for _, writer := range f.writers { - _, err = writer.Write(b) - if err != nil && writer != os.Stdout { - writeErr = fmt.Errorf("E! failed to write message: %s, %s", b, err) - } + _, err = f.writer.Write(b) + if err != nil { + writeErr = fmt.Errorf("E! failed to write message: %s, %s", b, err) } } + return writeErr } From 411f67d2b85ec7f8d47d3459b62a96e1db52b58f Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Sat, 1 Jun 2019 17:57:11 -0700 Subject: [PATCH 0886/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7a2e68d0c..ea7493b6a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -55,6 +55,7 @@ - [#5831](https://github.com/influxdata/telegraf/pull/5831): Support passive queue declaration in amqp_consumer. - [#5901](https://github.com/influxdata/telegraf/pull/5901): Set user agent in stackdriver output. - [#5885](https://github.com/influxdata/telegraf/pull/5885): Extend metrics collected from Nvidia GPUs. +- [#5547](https://github.com/influxdata/telegraf/pull/5547): Add file rotation support to the file output. #### Bugfixes From 3c83a53d51363644663e3da68e5e37395a8ba8a0 Mon Sep 17 00:00:00 2001 From: Greg <2653109+glinton@users.noreply.github.com> Date: Mon, 3 Jun 2019 12:31:20 -0600 Subject: [PATCH 0887/1815] Remove verbose debug logs from smart input (#5948) --- plugins/inputs/smart/smart.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/plugins/inputs/smart/smart.go b/plugins/inputs/smart/smart.go index 8bec2581f..b606b6f38 100644 --- a/plugins/inputs/smart/smart.go +++ b/plugins/inputs/smart/smart.go @@ -125,7 +125,6 @@ func (m *Smart) Gather(acc telegraf.Accumulator) error { return err } } - log.Printf("D! [inputs.smart] devices: %+#v", devices) m.getAttributes(acc, devices) return nil @@ -216,8 +215,6 @@ func gatherDisk(acc telegraf.Accumulator, usesudo, collectAttributes bool, smart deviceFields := make(map[string]interface{}) deviceFields["exit_status"] = exitStatus - log.Printf("D! [inputs.smart] gatherDisk '%s'", deviceNode) - scanner := bufio.NewScanner(strings.NewReader(outStr)) for scanner.Scan() { From 4197426a734d901f7a13d0bdbb29d6555a340f18 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 3 Jun 2019 17:34:48 -0700 Subject: [PATCH 0888/1815] Ignore errors serializing single metrics (#5943) --- plugins/outputs/amqp/amqp.go | 3 ++- plugins/outputs/cloud_pubsub/pubsub.go | 6 ++++-- plugins/outputs/file/file.go | 3 ++- plugins/outputs/instrumental/instrumental.go | 3 ++- plugins/outputs/kafka/kafka.go | 6 +++--- plugins/outputs/kinesis/kinesis.go | 6 +++--- plugins/outputs/mqtt/mqtt.go | 5 +++-- plugins/outputs/nats/nats.go | 7 ++++--- plugins/outputs/nsq/nsq.go | 7 ++++--- plugins/outputs/socket_writer/socket_writer.go | 7 +++---- plugins/serializers/influx/influx.go | 3 +++ plugins/serializers/influx/reader.go | 9 +++++---- plugins/serializers/registry.go | 5 ++++- 13 files changed, 42 insertions(+), 28 deletions(-) diff --git a/plugins/outputs/amqp/amqp.go b/plugins/outputs/amqp/amqp.go index f82faef64..cb4cc4501 100644 --- a/plugins/outputs/amqp/amqp.go +++ b/plugins/outputs/amqp/amqp.go @@ -301,7 +301,8 @@ func (q *AMQP) serialize(metrics []telegraf.Metric) ([]byte, error) { for _, metric := range metrics { octets, err := q.serializer.Serialize(metric) if err != nil { - return nil, err + log.Printf("D! [outputs.amqp] Could not serialize metric: %v", err) + continue } _, err = buf.Write(octets) if err != nil { diff --git a/plugins/outputs/cloud_pubsub/pubsub.go b/plugins/outputs/cloud_pubsub/pubsub.go index c8fbf242d..5abb04afb 100644 --- a/plugins/outputs/cloud_pubsub/pubsub.go +++ b/plugins/outputs/cloud_pubsub/pubsub.go @@ -2,11 +2,12 @@ package cloud_pubsub import ( "context" + "encoding/base64" "fmt" + "log" "sync" "cloud.google.com/go/pubsub" - "encoding/base64" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/outputs" @@ -229,7 +230,8 @@ func (ps *PubSub) toMessages(metrics []telegraf.Metric) ([]*pubsub.Message, erro for i, m := range metrics { b, err := ps.serializer.Serialize(m) if err != nil { - return nil, err + log.Printf("D! [outputs.cloud_pubsub] Could not serialize metric: %v", err) + continue } if ps.Base64Data { diff --git a/plugins/outputs/file/file.go b/plugins/outputs/file/file.go index 89380ae7e..a5eb422b7 100644 --- a/plugins/outputs/file/file.go +++ b/plugins/outputs/file/file.go @@ -3,6 +3,7 @@ package file import ( "fmt" "io" + "log" "os" "time" @@ -101,7 +102,7 @@ func (f *File) Write(metrics []telegraf.Metric) error { for _, metric := range metrics { b, err := f.serializer.Serialize(metric) if err != nil { - return fmt.Errorf("failed to serialize message: %s", err) + log.Printf("D! [outputs.file] Could not serialize metric: %v", err) } _, err = f.writer.Write(b) diff --git a/plugins/outputs/instrumental/instrumental.go b/plugins/outputs/instrumental/instrumental.go index 117c9d434..f142705a5 100644 --- a/plugins/outputs/instrumental/instrumental.go +++ b/plugins/outputs/instrumental/instrumental.go @@ -110,7 +110,8 @@ func (i *Instrumental) Write(metrics []telegraf.Metric) error { buf, err := s.Serialize(m) if err != nil { - log.Printf("E! Error serializing a metric to Instrumental: %s", err) + log.Printf("D! [outputs.instrumental] Could not serialize metric: %v", err) + continue } switch metricType { diff --git a/plugins/outputs/kafka/kafka.go b/plugins/outputs/kafka/kafka.go index f2951e6d5..3df5a3a67 100644 --- a/plugins/outputs/kafka/kafka.go +++ b/plugins/outputs/kafka/kafka.go @@ -6,13 +6,12 @@ import ( "log" "strings" + "github.com/Shopify/sarama" "github.com/influxdata/telegraf" tlsint "github.com/influxdata/telegraf/internal/tls" "github.com/influxdata/telegraf/plugins/outputs" "github.com/influxdata/telegraf/plugins/serializers" uuid "github.com/satori/go.uuid" - - "github.com/Shopify/sarama" ) var ValidTopicSuffixMethods = []string{ @@ -294,7 +293,8 @@ func (k *Kafka) Write(metrics []telegraf.Metric) error { for _, metric := range metrics { buf, err := k.serializer.Serialize(metric) if err != nil { - return err + log.Printf("D! [outputs.kafka] Could not serialize metric: %v", err) + continue } m := &sarama.ProducerMessage{ diff --git a/plugins/outputs/kinesis/kinesis.go b/plugins/outputs/kinesis/kinesis.go index 497676486..1b7b747e9 100644 --- a/plugins/outputs/kinesis/kinesis.go +++ b/plugins/outputs/kinesis/kinesis.go @@ -6,12 +6,11 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/kinesis" - "github.com/satori/go.uuid" - "github.com/influxdata/telegraf" internalaws "github.com/influxdata/telegraf/internal/config/aws" "github.com/influxdata/telegraf/plugins/outputs" "github.com/influxdata/telegraf/plugins/serializers" + "github.com/satori/go.uuid" ) type ( @@ -221,7 +220,8 @@ func (k *KinesisOutput) Write(metrics []telegraf.Metric) error { values, err := k.serializer.Serialize(metric) if err != nil { - return err + log.Printf("D! [outputs.kinesis] Could not serialize metric: %v", err) + continue } partitionKey := k.getPartitionKey(metric) diff --git a/plugins/outputs/mqtt/mqtt.go b/plugins/outputs/mqtt/mqtt.go index bacdd3b0e..f6fba5501 100644 --- a/plugins/outputs/mqtt/mqtt.go +++ b/plugins/outputs/mqtt/mqtt.go @@ -2,6 +2,7 @@ package mqtt import ( "fmt" + "log" "strings" "sync" "time" @@ -150,9 +151,9 @@ func (m *MQTT) Write(metrics []telegraf.Metric) error { metricsmap[topic] = append(metricsmap[topic], metric) } else { buf, err := m.serializer.Serialize(metric) - if err != nil { - return err + log.Printf("D! [outputs.mqtt] Could not serialize metric: %v", err) + continue } err = m.publish(topic, buf) diff --git a/plugins/outputs/nats/nats.go b/plugins/outputs/nats/nats.go index d9fdb0e88..ef2c4bbf2 100644 --- a/plugins/outputs/nats/nats.go +++ b/plugins/outputs/nats/nats.go @@ -2,13 +2,13 @@ package nats import ( "fmt" - - nats_client "github.com/nats-io/go-nats" + "log" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal/tls" "github.com/influxdata/telegraf/plugins/outputs" "github.com/influxdata/telegraf/plugins/serializers" + nats_client "github.com/nats-io/go-nats" ) type NATS struct { @@ -108,7 +108,8 @@ func (n *NATS) Write(metrics []telegraf.Metric) error { for _, metric := range metrics { buf, err := n.serializer.Serialize(metric) if err != nil { - return err + log.Printf("D! [outputs.nats] Could not serialize metric: %v", err) + continue } err = n.conn.Publish(n.Subject, buf) diff --git a/plugins/outputs/nsq/nsq.go b/plugins/outputs/nsq/nsq.go index c826ab648..a9e2d94ac 100644 --- a/plugins/outputs/nsq/nsq.go +++ b/plugins/outputs/nsq/nsq.go @@ -2,12 +2,12 @@ package nsq import ( "fmt" - - "github.com/nsqio/go-nsq" + "log" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/outputs" "github.com/influxdata/telegraf/plugins/serializers" + "github.com/nsqio/go-nsq" ) type NSQ struct { @@ -68,7 +68,8 @@ func (n *NSQ) Write(metrics []telegraf.Metric) error { for _, metric := range metrics { buf, err := n.serializer.Serialize(metric) if err != nil { - return err + log.Printf("D! [outputs.nsq] Could not serialize metric: %v", err) + continue } err = n.producer.Publish(n.Topic, buf) diff --git a/plugins/outputs/socket_writer/socket_writer.go b/plugins/outputs/socket_writer/socket_writer.go index 8b0f56acc..833122dfc 100644 --- a/plugins/outputs/socket_writer/socket_writer.go +++ b/plugins/outputs/socket_writer/socket_writer.go @@ -1,13 +1,12 @@ package socket_writer import ( + "crypto/tls" "fmt" "log" "net" "strings" - "crypto/tls" - "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" tlsint "github.com/influxdata/telegraf/internal/tls" @@ -128,8 +127,8 @@ func (sw *SocketWriter) Write(metrics []telegraf.Metric) error { for _, m := range metrics { bs, err := sw.Serialize(m) if err != nil { - //TODO log & keep going with remaining metrics - return err + log.Printf("D! [outputs.socket_writer] Could not serialize metric: %v", err) + continue } if _, err := sw.Conn.Write(bs); err != nil { //TODO log & keep going with remaining strings diff --git a/plugins/serializers/influx/influx.go b/plugins/serializers/influx/influx.go index e7063cbd2..a675add4b 100644 --- a/plugins/serializers/influx/influx.go +++ b/plugins/serializers/influx/influx.go @@ -113,6 +113,9 @@ func (s *Serializer) SerializeBatch(metrics []telegraf.Metric) ([]byte, error) { for _, m := range metrics { _, err := s.Write(&s.buf, m) if err != nil { + if _, ok := err.(*MetricError); ok { + continue + } return nil, err } } diff --git a/plugins/serializers/influx/reader.go b/plugins/serializers/influx/reader.go index d0dad8eeb..55b6c2b41 100644 --- a/plugins/serializers/influx/reader.go +++ b/plugins/serializers/influx/reader.go @@ -53,12 +53,13 @@ func (r *reader) Read(p []byte) (int, error) { r.offset += 1 if err != nil { r.buf.Reset() - if err != nil { - // Since we are serializing multiple metrics, don't fail the - // the entire batch just because of one unserializable metric. - log.Printf("E! [serializers.influx] could not serialize metric: %v; discarding metric", err) + if _, ok := err.(*MetricError); ok { continue } + // Since we are serializing multiple metrics, don't fail the + // the entire batch just because of one unserializable metric. + log.Printf("E! [serializers.influx] could not serialize metric: %v; discarding metric", err) + continue } break } diff --git a/plugins/serializers/registry.go b/plugins/serializers/registry.go index e21e9205c..cfdb784cc 100644 --- a/plugins/serializers/registry.go +++ b/plugins/serializers/registry.go @@ -30,6 +30,9 @@ type Serializer interface { // Serialize takes a single telegraf metric and turns it into a byte buffer. // separate metrics should be separated by a newline, and there should be // a newline at the end of the buffer. + // + // New plugins should use SerializeBatch instead to allow for non-line + // delimited metrics. Serialize(metric telegraf.Metric) ([]byte, error) // SerializeBatch takes an array of telegraf metric and serializes it into @@ -41,7 +44,7 @@ type Serializer interface { // Config is a struct that covers the data types needed for all serializer types, // and can be used to instantiate _any_ of the serializers. type Config struct { - // Dataformat can be one of: influx, graphite, or json + // Dataformat can be one of the serializer types listed in NewSerializer. DataFormat string // Support tags in graphite protocol From 6c3534a66ee97cfd6b03da7887eaa2579954891d Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 3 Jun 2019 17:38:21 -0700 Subject: [PATCH 0889/1815] Add support for remaining file rotation options (#5944) --- internal/config/config.go | 8 +++--- plugins/outputs/file/README.md | 18 +++++++++++--- plugins/outputs/file/file.go | 45 +++++++++++++++++----------------- 3 files changed, 40 insertions(+), 31 deletions(-) diff --git a/internal/config/config.go b/internal/config/config.go index 7f0ab8484..21f5db336 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -149,12 +149,12 @@ type AgentConfig struct { // Log file name, the empty string means to log to stderr. Logfile string `toml:"logfile"` - // The logfile will be rotated when it becomes larger than the specified - // size. When set to 0 no size based rotation is performed. + // The file will be rotated after the time interval specified. When set + // to 0 no time based rotation is performed. LogfileRotationInterval internal.Duration `toml:"logfile_rotation_interval"` - // Maximum number of rotated archives to keep, any older logs are deleted. - // If set to -1, no archives are removed. + // The logfile will be rotated when it becomes larger than the specified + // size. When set to 0 no size based rotation is performed. LogfileRotationMaxSize internal.Size `toml:"logfile_rotation_max_size"` // Maximum number of rotated archives to keep, any older logs are deleted. diff --git a/plugins/outputs/file/README.md b/plugins/outputs/file/README.md index a6c4ac53f..e34f80807 100644 --- a/plugins/outputs/file/README.md +++ b/plugins/outputs/file/README.md @@ -1,15 +1,25 @@ -# file Output Plugin +# File Output Plugin This plugin writes telegraf metrics to files ### Configuration -``` + +```toml [[outputs.file]] ## Files to write to, "stdout" is a specially handled file. files = ["stdout", "/tmp/metrics.out"] - ## If this is defined, files will be rotated by the time.Duration specified - # rotate_max_age = "1m" + ## The file will be rotated after the time interval specified. When set + ## to 0 no time based rotation is performed. + # rotation_interval = "0h" + + ## The logfile will be rotated when it becomes larger than the specified + ## size. When set to 0 no size based rotation is performed. + # rotation_max_size = "0MB" + + ## Maximum number of rotated archives to keep, any older logs are deleted. + ## If set to -1, no archives are removed. + # rotation_max_archives = 5 ## Data format to output. ## Each data format has its own unique set of configuration options, read diff --git a/plugins/outputs/file/file.go b/plugins/outputs/file/file.go index a5eb422b7..11793bc4f 100644 --- a/plugins/outputs/file/file.go +++ b/plugins/outputs/file/file.go @@ -5,21 +5,22 @@ import ( "io" "log" "os" - "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/internal/rotate" "github.com/influxdata/telegraf/plugins/outputs" "github.com/influxdata/telegraf/plugins/serializers" ) type File struct { - Files []string - RotateMaxAge string - - writer io.Writer - closers []io.Closer + Files []string `toml:"files"` + RotationInterval internal.Duration `toml:"rotation_interval"` + RotationMaxSize internal.Size `toml:"rotation_max_size"` + RotationMaxArchives int `toml:"rotation_max_archives"` + writer io.Writer + closers []io.Closer serializer serializers.Serializer } @@ -27,8 +28,17 @@ var sampleConfig = ` ## Files to write to, "stdout" is a specially handled file. files = ["stdout", "/tmp/metrics.out"] - ## If this is defined, files will be rotated by the time.Duration specified - # rotate_max_age = "1m" + ## The file will be rotated after the time interval specified. When set + ## to 0 no time based rotation is performed. + # rotation_interval = "0d" + + ## The logfile will be rotated when it becomes larger than the specified + ## size. When set to 0 no size based rotation is performed. + # rotation_max_size = "0MB" + + ## Maximum number of rotated archives to keep, any older logs are deleted. + ## If set to -1, no archives are removed. + # rotation_max_archives = 5 ## Data format to output. ## Each data format has its own unique set of configuration options, read @@ -52,23 +62,12 @@ func (f *File) Connect() error { if file == "stdout" { writers = append(writers, os.Stdout) } else { - var of io.WriteCloser - var err error - if f.RotateMaxAge != "" { - maxAge, err := time.ParseDuration(f.RotateMaxAge) - if err != nil { - return err - } - - // Only rotate by file age for now, keep no archives. - of, err = rotate.NewFileWriter(file, maxAge, 0, -1) - } else { - // Just open a normal file - of, err = rotate.NewFileWriter(file, 0, 0, -1) - } + of, err := rotate.NewFileWriter( + file, f.RotationInterval.Duration, f.RotationMaxSize.Size, f.RotationMaxArchives) if err != nil { return err } + writers = append(writers, of) f.closers = append(f.closers, of) } @@ -107,7 +106,7 @@ func (f *File) Write(metrics []telegraf.Metric) error { _, err = f.writer.Write(b) if err != nil { - writeErr = fmt.Errorf("E! failed to write message: %s, %s", b, err) + writeErr = fmt.Errorf("E! [outputs.file] failed to write message: %v", err) } } From 25471f672287a7b00d57e15c3cb627d15bf05ccf Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 3 Jun 2019 17:37:50 -0700 Subject: [PATCH 0890/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index ea7493b6a..6c1028dda 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -77,6 +77,7 @@ - [#5920](https://github.com/influxdata/telegraf/issues/5920): Fixed datastore name mapping in vsphere input. - [#5879](https://github.com/influxdata/telegraf/issues/5879): Fix multiple SIGHUP causes Telegraf to shutdown. - [#5891](https://github.com/influxdata/telegraf/issues/5891): Fix connection leak in influxdb outputs on reload. +- [#5858](https://github.com/influxdata/telegraf/issues/5858): Fix batch fails when single metric is unserializable. ## v1.10.4 [2019-05-14] From e18393fabfca6cc099bbafb48c8d6c738610c601 Mon Sep 17 00:00:00 2001 From: Steven Barth Date: Tue, 4 Jun 2019 23:39:46 +0200 Subject: [PATCH 0891/1815] Add Cisco model-driven telemetry & GNMI inputs (#5852) --- Gopkg.lock | 26 + Gopkg.toml | 8 + README.md | 2 + docs/LICENSE_OF_DEPENDENCIES.md | 2 + plugins/inputs/all/all.go | 2 + plugins/inputs/cisco_telemetry_gnmi/README.md | 75 +++ .../cisco_telemetry_gnmi.go | 516 ++++++++++++++++++ .../cisco_telemetry_gnmi_test.go | 247 +++++++++ plugins/inputs/cisco_telemetry_mdt/README.md | 41 ++ .../cisco_telemetry_mdt.go | 391 +++++++++++++ .../cisco_telemetry_mdt_test.go | 362 ++++++++++++ 11 files changed, 1672 insertions(+) create mode 100644 plugins/inputs/cisco_telemetry_gnmi/README.md create mode 100644 plugins/inputs/cisco_telemetry_gnmi/cisco_telemetry_gnmi.go create mode 100644 plugins/inputs/cisco_telemetry_gnmi/cisco_telemetry_gnmi_test.go create mode 100644 plugins/inputs/cisco_telemetry_mdt/README.md create mode 100644 plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt.go create mode 100644 plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt_test.go diff --git a/Gopkg.lock b/Gopkg.lock index 76c5deb62..1f52f0087 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -219,6 +219,17 @@ revision = "2ea60e5f094469f9e65adb9cd103795b73ae743e" version = "v2.0.0" +[[projects]] + branch = "master" + digest = "1:ed5e77e0626ed76b7e7a2554bc4586aae768612381c5f62738f16a2dfa48763b" + name = "github.com/cisco-ie/nx-telemetry-proto" + packages = [ + "mdt_dialout", + "telemetry_bis", + ] + pruneopts = "" + revision = "82441e232cf6af9be0f808bf0c6421ee8519880e" + [[projects]] branch = "master" digest = "1:298e42868718da06fc0899ae8fdb99c48a14477045234c9274d81caa79af6a8f" @@ -858,6 +869,17 @@ revision = "eee57a3ac4174c55924125bb15eeeda8cffb6e6f" version = "v1.0.7" +[[projects]] + branch = "master" + digest = "1:06ee57a6252cc9c3a1650be9888e8df796d86947ec75bff7e2c4ac5689baa086" + name = "github.com/openconfig/gnmi" + packages = [ + "proto/gnmi", + "proto/gnmi_ext", + ] + pruneopts = "" + revision = "33a1865c302903e7a2e06f35960e6bc31e84b9f6" + [[projects]] digest = "1:5d9b668b0b4581a978f07e7d2e3314af18eb27b3fb5d19b70185b7c575723d11" name = "github.com/opencontainers/go-digest" @@ -1558,6 +1580,8 @@ "github.com/aws/aws-sdk-go/service/dynamodb", "github.com/aws/aws-sdk-go/service/kinesis", "github.com/bsm/sarama-cluster", + "github.com/cisco-ie/nx-telemetry-proto/mdt_dialout", + "github.com/cisco-ie/nx-telemetry-proto/telemetry_bis", "github.com/couchbase/go-couchbase", "github.com/denisenkom/go-mssqldb", "github.com/dgrijalva/jwt-go", @@ -1613,6 +1637,7 @@ "github.com/nats-io/gnatsd/server", "github.com/nats-io/go-nats", "github.com/nsqio/go-nsq", + "github.com/openconfig/gnmi/proto/gnmi", "github.com/openzipkin/zipkin-go-opentracing", "github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore", "github.com/pkg/errors", @@ -1669,6 +1694,7 @@ "google.golang.org/grpc/codes", "google.golang.org/grpc/credentials", "google.golang.org/grpc/metadata", + "google.golang.org/grpc/peer", "google.golang.org/grpc/status", "gopkg.in/gorethink/gorethink.v3", "gopkg.in/ldap.v2", diff --git a/Gopkg.toml b/Gopkg.toml index 4e50eb11b..c817c1865 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -288,3 +288,11 @@ [[constraint]] name = "github.com/google/go-github" version = "24.0.1" + +[[constraint]] + branch = "master" + name = "github.com/openconfig/gnmi" + +[[constraint]] + branch = "master" + name = "github.com/cisco-ie/nx-telemetry-proto" diff --git a/README.md b/README.md index d3adf1e2d..60f349e47 100644 --- a/README.md +++ b/README.md @@ -150,6 +150,8 @@ For documentation on the latest development code see the [documentation index][d * [ceph](./plugins/inputs/ceph) * [cgroup](./plugins/inputs/cgroup) * [chrony](./plugins/inputs/chrony) +* [cisco_telemetry_gnmi](./plugins/inputs/cisco_telemetry_gnmi) +* [cisco_telemetry_mdt](./plugins/inputs/cisco_telemetry_mdt) * [cloud_pubsub](./plugins/inputs/cloud_pubsub) Google Cloud Pub/Sub * [cloud_pubsub_push](./plugins/inputs/cloud_pubsub_push) Google Cloud Pub/Sub push endpoint * [conntrack](./plugins/inputs/conntrack) diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index e0531210e..17bac0a1a 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -16,6 +16,7 @@ following works: - github.com/beorn7/perks [MIT License](https://github.com/beorn7/perks/blob/master/LICENSE) - github.com/bsm/sarama-cluster [MIT License](https://github.com/bsm/sarama-cluster/blob/master/LICENSE) - github.com/cenkalti/backoff [MIT License](https://github.com/cenkalti/backoff/blob/master/LICENSE) +- github.com/cisco-ie/nx-telemetry-proto [Apache License 2.0](https://github.com/cisco-ie/nx-telemetry-proto/blob/master/LICENSE) - github.com/couchbase/go-couchbase [MIT License](https://github.com/couchbase/go-couchbase/blob/master/LICENSE) - github.com/couchbase/gomemcached [MIT License](https://github.com/couchbase/gomemcached/blob/master/LICENSE) - github.com/couchbase/goutils [COUCHBASE INC. COMMUNITY EDITION LICENSE](https://github.com/couchbase/goutils/blob/master/LICENSE.md) @@ -80,6 +81,7 @@ following works: - github.com/nats-io/go-nats [Apache License 2.0](https://github.com/nats-io/go-nats/blob/master/LICENSE) - github.com/nats-io/nuid [Apache License 2.0](https://github.com/nats-io/nuid/blob/master/LICENSE) - github.com/nsqio/go-nsq [MIT License](https://github.com/nsqio/go-nsq/blob/master/LICENSE) +- github.com/openconfig/gnmi [Apache License 2.0](https://github.com/openconfig/gnmi/blob/master/LICENSE) - github.com/opencontainers/go-digest [Apache License 2.0](https://github.com/opencontainers/go-digest/blob/master/LICENSE) - github.com/opencontainers/image-spec [Apache License 2.0](https://github.com/opencontainers/image-spec/blob/master/LICENSE) - github.com/opentracing-contrib/go-observer [Apache License 2.0](https://github.com/opentracing-contrib/go-observer/blob/master/LICENSE) diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index a626fce92..ef032fe47 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -15,6 +15,8 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/ceph" _ "github.com/influxdata/telegraf/plugins/inputs/cgroup" _ "github.com/influxdata/telegraf/plugins/inputs/chrony" + _ "github.com/influxdata/telegraf/plugins/inputs/cisco_telemetry_gnmi" + _ "github.com/influxdata/telegraf/plugins/inputs/cisco_telemetry_mdt" _ "github.com/influxdata/telegraf/plugins/inputs/cloud_pubsub" _ "github.com/influxdata/telegraf/plugins/inputs/cloud_pubsub_push" _ "github.com/influxdata/telegraf/plugins/inputs/cloudwatch" diff --git a/plugins/inputs/cisco_telemetry_gnmi/README.md b/plugins/inputs/cisco_telemetry_gnmi/README.md new file mode 100644 index 000000000..3e53cf0e5 --- /dev/null +++ b/plugins/inputs/cisco_telemetry_gnmi/README.md @@ -0,0 +1,75 @@ +# Cisco GNMI telemetry + +Cisco GNMI telemetry is an input plugin that consumes telemetry data similar to the [GNMI specification](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md). +This GRPC-based protocol can utilize TLS for authentication and encryption. + +This plugin has been developed to support GNMI telemetry as produced by Cisco IOS XR (64-bit) version 6.5.1 and later. + + +### Configuration: + +This is a sample configuration for the plugin. + +```toml +[[inputs.cisco_telemetry_gnmi]] + ## Address and port of the GNMI GRPC server + addresses = ["10.49.234.114:57777"] + + ## define credentials + username = "cisco" + password = "cisco" + + ## GNMI encoding requested (one of: "proto", "json", "json_ietf") + # encoding = "proto" + + ## redial in case of failures after + redial = "10s" + + ## enable client-side TLS and define CA to authenticate the device + # enable_tls = true + # tls_ca = "/etc/telegraf/ca.pem" + # insecure_skip_verify = true + + ## define client-side TLS certificate & key to authenticate to the device + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + + ## GNMI subscription prefix (optional, can usually be left empty) + ## See: https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#222-paths + # origin = "" + # prefix = "" + # target = "" + + ## Define additional aliases to map telemetry encoding paths to simple measurement names + # [inputs.cisco_telemetry_gnmi.aliases] + # ifcounters = "openconfig:/interfaces/interface/state/counters" + + [[inputs.cisco_telemetry_gnmi.subscription]] + ## Name of the measurement that will be emitted + name = "ifcounters" + + ## Origin and path of the subscription + ## See: https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#222-paths + ## + ## origin usually refers to a (YANG) data model implemented by the device + ## and path to a specific substructe inside it that should be subscribed to (similar to an XPath) + ## YANG models can be found e.g. here: https://github.com/YangModels/yang/tree/master/vendor/cisco/xr + origin = "openconfig-interfaces" + path = "/interfaces/interface/state/counters" + + # Subscription mode (one of: "target_defined", "sample", "on_change") and interval + subscription_mode = "sample" + sample_interval = "10s" + + ## Suppress redundant transmissions when measured values are unchanged + # suppress_redundant = false + + ## If suppression is enabled, send updates at least every X seconds anyway + # heartbeat_interval = "60s" +``` + +### Example Output +``` +ifcounters,path=openconfig-interfaces:/interfaces/interface/state/counters,host=linux,name=MgmtEth0/RP0/CPU0/0,source=10.49.234.115 in-multicast-pkts=0i,out-multicast-pkts=0i,out-errors=0i,out-discards=0i,in-broadcast-pkts=0i,out-broadcast-pkts=0i,in-discards=0i,in-unknown-protos=0i,in-errors=0i,out-unicast-pkts=0i,in-octets=0i,out-octets=0i,last-clear="2019-05-22T16:53:21Z",in-unicast-pkts=0i 1559145777425000000 +ifcounters,path=openconfig-interfaces:/interfaces/interface/state/counters,host=linux,name=GigabitEthernet0/0/0/0,source=10.49.234.115 out-multicast-pkts=0i,out-broadcast-pkts=0i,in-errors=0i,out-errors=0i,in-discards=0i,out-octets=0i,in-unknown-protos=0i,in-unicast-pkts=0i,in-octets=0i,in-multicast-pkts=0i,in-broadcast-pkts=0i,last-clear="2019-05-22T16:54:50Z",out-unicast-pkts=0i,out-discards=0i 1559145777425000000 +``` diff --git a/plugins/inputs/cisco_telemetry_gnmi/cisco_telemetry_gnmi.go b/plugins/inputs/cisco_telemetry_gnmi/cisco_telemetry_gnmi.go new file mode 100644 index 000000000..69495d6f6 --- /dev/null +++ b/plugins/inputs/cisco_telemetry_gnmi/cisco_telemetry_gnmi.go @@ -0,0 +1,516 @@ +package cisco_telemetry_gnmi + +import ( + "bytes" + "context" + "crypto/tls" + "encoding/json" + "fmt" + "io" + "log" + "net" + "strings" + "sync" + "time" + + "github.com/influxdata/telegraf/metric" + + "google.golang.org/grpc/credentials" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + internaltls "github.com/influxdata/telegraf/internal/tls" + "github.com/influxdata/telegraf/plugins/inputs" + jsonparser "github.com/influxdata/telegraf/plugins/parsers/json" + "github.com/openconfig/gnmi/proto/gnmi" + "google.golang.org/grpc" + "google.golang.org/grpc/metadata" +) + +// CiscoTelemetryGNMI plugin instance +type CiscoTelemetryGNMI struct { + Addresses []string `toml:"addresses"` + Subscriptions []Subscription `toml:"subscription"` + Aliases map[string]string `toml:"aliases"` + + // Optional subscription configuration + Encoding string + Origin string + Prefix string + Target string + UpdatesOnly bool `toml:"updates_only"` + + // Cisco IOS XR credentials + Username string + Password string + + // Redial + Redial internal.Duration + + // GRPC TLS settings + EnableTLS bool `toml:"enable_tls"` + internaltls.ClientConfig + + // Internal state + aliases map[string]string + acc telegraf.Accumulator + cancel context.CancelFunc + wg sync.WaitGroup +} + +// Subscription for a GNMI client +type Subscription struct { + Name string + Origin string + Path string + + // Subscription mode and interval + SubscriptionMode string `toml:"subscription_mode"` + SampleInterval internal.Duration `toml:"sample_interval"` + + // Duplicate suppression + SuppressRedundant bool `toml:"suppress_redundant"` + HeartbeatInterval internal.Duration `toml:"heartbeat_interval"` +} + +// Start the http listener service +func (c *CiscoTelemetryGNMI) Start(acc telegraf.Accumulator) error { + var err error + var ctx context.Context + var tlscfg *tls.Config + var request *gnmi.SubscribeRequest + c.acc = acc + ctx, c.cancel = context.WithCancel(context.Background()) + + // Validate configuration + if request, err = c.newSubscribeRequest(); err != nil { + return err + } else if c.Redial.Duration.Nanoseconds() <= 0 { + return fmt.Errorf("redial duration must be positive") + } + + // Parse TLS config + if c.EnableTLS { + if tlscfg, err = c.ClientConfig.TLSConfig(); err != nil { + return err + } + } + + if len(c.Username) > 0 { + ctx = metadata.AppendToOutgoingContext(ctx, "username", c.Username, "password", c.Password) + } + + // Invert explicit alias list and prefill subscription names + c.aliases = make(map[string]string, len(c.Subscriptions)+len(c.Aliases)) + for _, subscription := range c.Subscriptions { + path := subscription.Path + if len(subscription.Origin) > 0 { + path = subscription.Origin + ":" + path + } + + name := subscription.Name + if len(name) == 0 { + name = path[strings.LastIndexByte(path, '/')+1:] + } + if len(name) > 0 { + c.aliases[path] = name + } + } + for alias, path := range c.Aliases { + c.aliases[path] = alias + } + + // Create a goroutine for each device, dial and subscribe + c.wg.Add(len(c.Addresses)) + for _, addr := range c.Addresses { + go func(address string) { + defer c.wg.Done() + for ctx.Err() == nil { + if err := c.subscribeGNMI(ctx, address, tlscfg, request); err != nil && ctx.Err() == nil { + acc.AddError(err) + } + + select { + case <-ctx.Done(): + case <-time.After(c.Redial.Duration): + } + } + }(addr) + } + return nil +} + +// Create a new GNMI SubscribeRequest +func (c *CiscoTelemetryGNMI) newSubscribeRequest() (*gnmi.SubscribeRequest, error) { + // Create subscription objects + subscriptions := make([]*gnmi.Subscription, len(c.Subscriptions)) + for i, subscription := range c.Subscriptions { + gnmiPath, err := parsePath(subscription.Origin, subscription.Path, "") + if err != nil { + return nil, err + } + mode, ok := gnmi.SubscriptionMode_value[strings.ToUpper(subscription.SubscriptionMode)] + if !ok { + return nil, fmt.Errorf("invalid subscription mode %s", subscription.SubscriptionMode) + } + subscriptions[i] = &gnmi.Subscription{ + Path: gnmiPath, + Mode: gnmi.SubscriptionMode(mode), + SampleInterval: uint64(subscription.SampleInterval.Duration.Nanoseconds()), + SuppressRedundant: subscription.SuppressRedundant, + HeartbeatInterval: uint64(subscription.HeartbeatInterval.Duration.Nanoseconds()), + } + } + + // Construct subscribe request + gnmiPath, err := parsePath(c.Origin, c.Prefix, c.Target) + if err != nil { + return nil, err + } + + if c.Encoding != "proto" && c.Encoding != "json" && c.Encoding != "json_ietf" { + return nil, fmt.Errorf("unsupported encoding %s", c.Encoding) + } + + return &gnmi.SubscribeRequest{ + Request: &gnmi.SubscribeRequest_Subscribe{ + Subscribe: &gnmi.SubscriptionList{ + Prefix: gnmiPath, + Mode: gnmi.SubscriptionList_STREAM, + Encoding: gnmi.Encoding(gnmi.Encoding_value[strings.ToUpper(c.Encoding)]), + Subscription: subscriptions, + UpdatesOnly: c.UpdatesOnly, + }, + }, + }, nil +} + +// SubscribeGNMI and extract telemetry data +func (c *CiscoTelemetryGNMI) subscribeGNMI(ctx context.Context, address string, tlscfg *tls.Config, request *gnmi.SubscribeRequest) error { + var opt grpc.DialOption + if tlscfg != nil { + opt = grpc.WithTransportCredentials(credentials.NewTLS(tlscfg)) + } else { + opt = grpc.WithInsecure() + } + + client, err := grpc.DialContext(ctx, address, opt) + if err != nil { + return fmt.Errorf("failed to dial: %v", err) + } + defer client.Close() + + subscribeClient, err := gnmi.NewGNMIClient(client).Subscribe(ctx) + if err != nil { + return fmt.Errorf("failed to setup subscription: %v", err) + } + + if err = subscribeClient.Send(request); err != nil { + return fmt.Errorf("failed to send subscription request: %v", err) + } + + log.Printf("D! [inputs.cisco_telemetry_gnmi]: Connection to GNMI device %s established", address) + defer log.Printf("D! [inputs.cisco_telemetry_gnmi]: Connection to GNMI device %s closed", address) + for ctx.Err() == nil { + var reply *gnmi.SubscribeResponse + if reply, err = subscribeClient.Recv(); err != nil { + if err != io.EOF && ctx.Err() == nil { + return fmt.Errorf("aborted GNMI subscription: %v", err) + } + break + } + + c.handleSubscribeResponse(address, reply) + } + return nil +} + +// HandleSubscribeResponse message from GNMI and parse contained telemetry data +func (c *CiscoTelemetryGNMI) handleSubscribeResponse(address string, reply *gnmi.SubscribeResponse) { + // Check if response is a GNMI Update and if we have a prefix to derive the measurement name + response, ok := reply.Response.(*gnmi.SubscribeResponse_Update) + if !ok { + return + } + + var prefix, prefixAliasPath string + grouper := metric.NewSeriesGrouper() + timestamp := time.Unix(0, response.Update.Timestamp) + prefixTags := make(map[string]string) + + if response.Update.Prefix != nil { + prefix, prefixAliasPath = c.handlePath(response.Update.Prefix, prefixTags, "") + } + prefixTags["source"], _, _ = net.SplitHostPort(address) + prefixTags["path"] = prefix + + // Parse individual Update message and create measurements + var name, lastAliasPath string + for _, update := range response.Update.Update { + // Prepare tags from prefix + tags := make(map[string]string, len(prefixTags)) + for key, val := range prefixTags { + tags[key] = val + } + aliasPath, fields := c.handleTelemetryField(update, tags, prefix) + + // Inherent valid alias from prefix parsing + if len(prefixAliasPath) > 0 && len(aliasPath) == 0 { + aliasPath = prefixAliasPath + } + + // Lookup alias if alias-path has changed + if aliasPath != lastAliasPath { + name = prefix + if alias, ok := c.aliases[aliasPath]; ok { + name = alias + } else { + log.Printf("D! [inputs.cisco_telemetry_gnmi]: No measurement alias for GNMI path: %s", name) + } + } + + // Group metrics + for key, val := range fields { + grouper.Add(name, tags, timestamp, key[len(aliasPath)+1:], val) + } + + lastAliasPath = aliasPath + } + + // Add grouped measurements + for _, metric := range grouper.Metrics() { + c.acc.AddMetric(metric) + } +} + +// HandleTelemetryField and add it to a measurement +func (c *CiscoTelemetryGNMI) handleTelemetryField(update *gnmi.Update, tags map[string]string, prefix string) (string, map[string]interface{}) { + path, aliasPath := c.handlePath(update.Path, tags, prefix) + + var value interface{} + var jsondata []byte + + switch val := update.Val.Value.(type) { + case *gnmi.TypedValue_AsciiVal: + value = val.AsciiVal + case *gnmi.TypedValue_BoolVal: + value = val.BoolVal + case *gnmi.TypedValue_BytesVal: + value = val.BytesVal + case *gnmi.TypedValue_DecimalVal: + value = val.DecimalVal + case *gnmi.TypedValue_FloatVal: + value = val.FloatVal + case *gnmi.TypedValue_IntVal: + value = val.IntVal + case *gnmi.TypedValue_StringVal: + value = val.StringVal + case *gnmi.TypedValue_UintVal: + value = val.UintVal + case *gnmi.TypedValue_JsonIetfVal: + jsondata = val.JsonIetfVal + case *gnmi.TypedValue_JsonVal: + jsondata = val.JsonVal + } + + name := strings.Replace(path, "-", "_", -1) + fields := make(map[string]interface{}) + if value != nil { + fields[name] = value + } else if jsondata != nil { + if err := json.Unmarshal(jsondata, &value); err != nil { + c.acc.AddError(fmt.Errorf("failed to parse JSON value: %v", err)) + } else { + flattener := jsonparser.JSONFlattener{Fields: fields} + flattener.FullFlattenJSON(name, value, true, true) + } + } + return aliasPath, fields +} + +// Parse path to path-buffer and tag-field +func (c *CiscoTelemetryGNMI) handlePath(path *gnmi.Path, tags map[string]string, prefix string) (string, string) { + var aliasPath string + builder := bytes.NewBufferString(prefix) + + // Prefix with origin + if len(path.Origin) > 0 { + builder.WriteString(path.Origin) + builder.WriteRune(':') + } + + // Parse generic keys from prefix + for _, elem := range path.Elem { + builder.WriteRune('/') + builder.WriteString(elem.Name) + name := builder.String() + + if _, exists := c.aliases[name]; exists { + aliasPath = name + } + + for key, val := range elem.Key { + key = strings.Replace(key, "-", "_", -1) + + // Use short-form of key if possible + if _, exists := tags[key]; exists { + tags[name+"/"+key] = val + } else { + tags[key] = val + } + } + } + + return builder.String(), aliasPath +} + +//ParsePath from XPath-like string to GNMI path structure +func parsePath(origin string, path string, target string) (*gnmi.Path, error) { + var err error + gnmiPath := gnmi.Path{Origin: origin, Target: target} + + if len(path) > 0 && path[0] != '/' { + return nil, fmt.Errorf("path does not start with a '/': %s", path) + } + + elem := &gnmi.PathElem{} + start, name, value, end := 0, -1, -1, -1 + + path = path + "/" + + for i := 0; i < len(path); i++ { + if path[i] == '[' { + if name >= 0 { + break + } + if end < 0 { + end = i + elem.Key = make(map[string]string) + } + name = i + 1 + } else if path[i] == '=' { + if name <= 0 || value >= 0 { + break + } + value = i + 1 + } else if path[i] == ']' { + if name <= 0 || value <= name { + break + } + elem.Key[path[name:value-1]] = strings.Trim(path[value:i], "'\"") + name, value = -1, -1 + } else if path[i] == '/' { + if name < 0 { + if end < 0 { + end = i + } + + if end > start { + elem.Name = path[start:end] + gnmiPath.Elem = append(gnmiPath.Elem, elem) + gnmiPath.Element = append(gnmiPath.Element, path[start:i]) + } + + start, name, value, end = i+1, -1, -1, -1 + elem = &gnmi.PathElem{} + } + } + } + + if name >= 0 || value >= 0 { + err = fmt.Errorf("Invalid GNMI path: %s", path) + } + + if err != nil { + return nil, err + } + + return &gnmiPath, nil +} + +// Stop listener and cleanup +func (c *CiscoTelemetryGNMI) Stop() { + c.cancel() + c.wg.Wait() +} + +const sampleConfig = ` + ## Address and port of the GNMI GRPC server + addresses = ["10.49.234.114:57777"] + + ## define credentials + username = "cisco" + password = "cisco" + + ## GNMI encoding requested (one of: "proto", "json", "json_ietf") + # encoding = "proto" + + ## redial in case of failures after + redial = "10s" + + ## enable client-side TLS and define CA to authenticate the device + # enable_tls = true + # tls_ca = "/etc/telegraf/ca.pem" + # insecure_skip_verify = true + + ## define client-side TLS certificate & key to authenticate to the device + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + + ## GNMI subscription prefix (optional, can usually be left empty) + ## See: https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#222-paths + # origin = "" + # prefix = "" + # target = "" + + ## Define additional aliases to map telemetry encoding paths to simple measurement names + #[inputs.cisco_telemetry_gnmi.aliases] + # ifcounters = "openconfig:/interfaces/interface/state/counters" + + [[inputs.cisco_telemetry_gnmi.subscription]] + ## Name of the measurement that will be emitted + name = "ifcounters" + + ## Origin and path of the subscription + ## See: https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#222-paths + ## + ## origin usually refers to a (YANG) data model implemented by the device + ## and path to a specific substructe inside it that should be subscribed to (similar to an XPath) + ## YANG models can be found e.g. here: https://github.com/YangModels/yang/tree/master/vendor/cisco/xr + origin = "openconfig-interfaces" + path = "/interfaces/interface/state/counters" + + # Subscription mode (one of: "target_defined", "sample", "on_change") and interval + subscription_mode = "sample" + sample_interval = "10s" + + ## Suppress redundant transmissions when measured values are unchanged + # suppress_redundant = false + + ## If suppression is enabled, send updates at least every X seconds anyway + # heartbeat_interval = "60s" +` + +// SampleConfig of plugin +func (c *CiscoTelemetryGNMI) SampleConfig() string { + return sampleConfig +} + +// Description of plugin +func (c *CiscoTelemetryGNMI) Description() string { + return "Cisco GNMI telemetry input plugin based on GNMI telemetry data produced in IOS XR" +} + +// Gather plugin measurements (unused) +func (c *CiscoTelemetryGNMI) Gather(_ telegraf.Accumulator) error { + return nil +} + +func init() { + inputs.Add("cisco_telemetry_gnmi", func() telegraf.Input { + return &CiscoTelemetryGNMI{ + Encoding: "proto", + Redial: internal.Duration{Duration: 10 * time.Second}, + } + }) +} diff --git a/plugins/inputs/cisco_telemetry_gnmi/cisco_telemetry_gnmi_test.go b/plugins/inputs/cisco_telemetry_gnmi/cisco_telemetry_gnmi_test.go new file mode 100644 index 000000000..7e6b527b9 --- /dev/null +++ b/plugins/inputs/cisco_telemetry_gnmi/cisco_telemetry_gnmi_test.go @@ -0,0 +1,247 @@ +package cisco_telemetry_gnmi + +import ( + "context" + "errors" + "fmt" + "net" + "testing" + "time" + + "google.golang.org/grpc/metadata" + + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/testutil" + "google.golang.org/grpc" + + "github.com/openconfig/gnmi/proto/gnmi" + "github.com/stretchr/testify/assert" +) + +func TestParsePath(t *testing.T) { + path := "/foo/bar/bla[shoo=woo][shoop=/woop/]/z" + parsed, err := parsePath("theorigin", path, "thetarget") + + assert.Nil(t, err) + assert.Equal(t, parsed.Origin, "theorigin") + assert.Equal(t, parsed.Target, "thetarget") + assert.Equal(t, parsed.Element, []string{"foo", "bar", "bla[shoo=woo][shoop=/woop/]", "z"}) + assert.Equal(t, parsed.Elem, []*gnmi.PathElem{{Name: "foo"}, {Name: "bar"}, + {Name: "bla", Key: map[string]string{"shoo": "woo", "shoop": "/woop/"}}, {Name: "z"}}) + + parsed, err = parsePath("", "", "") + assert.Nil(t, err) + assert.Equal(t, *parsed, gnmi.Path{}) + + parsed, err = parsePath("", "/foo[[", "") + assert.Nil(t, parsed) + assert.Equal(t, errors.New("Invalid GNMI path: /foo[[/"), err) +} + +type mockGNMIServer struct { + t *testing.T + acc *testutil.Accumulator + server *grpc.Server + scenario int +} + +func (m *mockGNMIServer) Capabilities(context.Context, *gnmi.CapabilityRequest) (*gnmi.CapabilityResponse, error) { + return nil, nil +} + +func (m *mockGNMIServer) Get(context.Context, *gnmi.GetRequest) (*gnmi.GetResponse, error) { + return nil, nil +} + +func (m *mockGNMIServer) Set(context.Context, *gnmi.SetRequest) (*gnmi.SetResponse, error) { + return nil, nil +} + +func (m *mockGNMIServer) Subscribe(server gnmi.GNMI_SubscribeServer) error { + // Avoid race conditions + go func() { + if m.scenario == 0 { + m.acc.WaitError(1) + } else if m.scenario == 1 || m.scenario == 3 { + m.acc.Wait(4) + } else if m.scenario == 2 { + m.acc.Wait(2) + } + if m.scenario >= 0 { + m.server.Stop() + } + }() + + metadata, ok := metadata.FromIncomingContext(server.Context()) + assert.Equal(m.t, ok, true) + assert.Equal(m.t, metadata.Get("username"), []string{"theuser"}) + assert.Equal(m.t, metadata.Get("password"), []string{"thepassword"}) + + switch m.scenario { + case 0: + return fmt.Errorf("testerror") + case 1: + notification := mockGNMINotification() + server.Send(&gnmi.SubscribeResponse{Response: &gnmi.SubscribeResponse_Update{Update: notification}}) + server.Send(&gnmi.SubscribeResponse{Response: &gnmi.SubscribeResponse_SyncResponse{SyncResponse: true}}) + notification.Prefix.Elem[0].Key["foo"] = "bar2" + notification.Update[0].Path.Elem[1].Key["name"] = "str2" + notification.Update[0].Val = &gnmi.TypedValue{Value: &gnmi.TypedValue_JsonVal{JsonVal: []byte{'"', '1', '2', '3', '"'}}} + server.Send(&gnmi.SubscribeResponse{Response: &gnmi.SubscribeResponse_Update{Update: notification}}) + return nil + case 2: + notification := mockGNMINotification() + server.Send(&gnmi.SubscribeResponse{Response: &gnmi.SubscribeResponse_Update{Update: notification}}) + return nil + case 3: + notification := mockGNMINotification() + notification.Prefix.Elem[0].Key["foo"] = "bar2" + notification.Update[0].Path.Elem[1].Key["name"] = "str2" + notification.Update[0].Val = &gnmi.TypedValue{Value: &gnmi.TypedValue_BoolVal{BoolVal: false}} + server.Send(&gnmi.SubscribeResponse{Response: &gnmi.SubscribeResponse_Update{Update: notification}}) + return nil + default: + return fmt.Errorf("test not implemented ;)") + } +} + +func TestGNMIError(t *testing.T) { + listener, _ := net.Listen("tcp", "127.0.0.1:57003") + server := grpc.NewServer() + acc := &testutil.Accumulator{} + gnmi.RegisterGNMIServer(server, &mockGNMIServer{t: t, scenario: 0, server: server, acc: acc}) + + c := &CiscoTelemetryGNMI{Addresses: []string{"127.0.0.1:57003"}, + Username: "theuser", Password: "thepassword", Encoding: "proto", + Redial: internal.Duration{Duration: 1 * time.Second}} + + assert.Nil(t, c.Start(acc)) + server.Serve(listener) + c.Stop() + + assert.Contains(t, acc.Errors, errors.New("aborted GNMI subscription: rpc error: code = Unknown desc = testerror")) +} + +func mockGNMINotification() *gnmi.Notification { + return &gnmi.Notification{ + Timestamp: 1543236572000000000, + Prefix: &gnmi.Path{ + Origin: "type", + Elem: []*gnmi.PathElem{ + { + Name: "model", + Key: map[string]string{"foo": "bar"}, + }, + }, + Target: "subscription", + }, + Update: []*gnmi.Update{ + { + Path: &gnmi.Path{ + Elem: []*gnmi.PathElem{ + {Name: "some"}, + { + Name: "path", + Key: map[string]string{"name": "str", "uint64": "1234"}}, + }, + }, + Val: &gnmi.TypedValue{Value: &gnmi.TypedValue_IntVal{IntVal: 5678}}, + }, + { + Path: &gnmi.Path{ + Elem: []*gnmi.PathElem{ + {Name: "other"}, + {Name: "path"}, + }, + }, + Val: &gnmi.TypedValue{Value: &gnmi.TypedValue_StringVal{StringVal: "foobar"}}, + }, + { + Path: &gnmi.Path{ + Elem: []*gnmi.PathElem{ + {Name: "other"}, + {Name: "this"}, + }, + }, + Val: &gnmi.TypedValue{Value: &gnmi.TypedValue_StringVal{StringVal: "that"}}, + }, + }, + } +} + +func TestGNMIMultiple(t *testing.T) { + listener, _ := net.Listen("tcp", "127.0.0.1:57004") + server := grpc.NewServer() + acc := &testutil.Accumulator{} + gnmi.RegisterGNMIServer(server, &mockGNMIServer{t: t, scenario: 1, server: server, acc: acc}) + + c := &CiscoTelemetryGNMI{Addresses: []string{"127.0.0.1:57004"}, + Username: "theuser", Password: "thepassword", Encoding: "proto", + Redial: internal.Duration{Duration: 1 * time.Second}, + Subscriptions: []Subscription{{Name: "alias", Origin: "type", Path: "/model", SubscriptionMode: "sample"}}, + } + + assert.Nil(t, c.Start(acc)) + + server.Serve(listener) + c.Stop() + + assert.Empty(t, acc.Errors) + + tags := map[string]string{"path": "type:/model", "source": "127.0.0.1", "foo": "bar", "name": "str", "uint64": "1234"} + fields := map[string]interface{}{"some/path": int64(5678)} + acc.AssertContainsTaggedFields(t, "alias", fields, tags) + + tags = map[string]string{"path": "type:/model", "source": "127.0.0.1", "foo": "bar"} + fields = map[string]interface{}{"other/path": "foobar", "other/this": "that"} + acc.AssertContainsTaggedFields(t, "alias", fields, tags) + + tags = map[string]string{"path": "type:/model", "foo": "bar2", "source": "127.0.0.1", "name": "str2", "uint64": "1234"} + fields = map[string]interface{}{"some/path": "123"} + acc.AssertContainsTaggedFields(t, "alias", fields, tags) + + tags = map[string]string{"path": "type:/model", "source": "127.0.0.1", "foo": "bar2"} + fields = map[string]interface{}{"other/path": "foobar", "other/this": "that"} + acc.AssertContainsTaggedFields(t, "alias", fields, tags) +} + +func TestGNMIMultipleRedial(t *testing.T) { + listener, _ := net.Listen("tcp", "127.0.0.1:57004") + server := grpc.NewServer() + acc := &testutil.Accumulator{} + gnmi.RegisterGNMIServer(server, &mockGNMIServer{t: t, scenario: 2, server: server, acc: acc}) + + c := &CiscoTelemetryGNMI{Addresses: []string{"127.0.0.1:57004"}, + Username: "theuser", Password: "thepassword", Encoding: "proto", + Redial: internal.Duration{Duration: 500 * time.Millisecond}, + Subscriptions: []Subscription{{Name: "alias", Origin: "type", Path: "/model", SubscriptionMode: "sample"}}, + } + + assert.Nil(t, c.Start(acc)) + server.Serve(listener) + + listener, _ = net.Listen("tcp", "127.0.0.1:57004") + server = grpc.NewServer() + gnmi.RegisterGNMIServer(server, &mockGNMIServer{t: t, scenario: 3, server: server, acc: acc}) + + server.Serve(listener) + c.Stop() + + assert.Empty(t, acc.Errors) + + tags := map[string]string{"path": "type:/model", "source": "127.0.0.1", "foo": "bar", "name": "str", "uint64": "1234"} + fields := map[string]interface{}{"some/path": int64(5678)} + acc.AssertContainsTaggedFields(t, "alias", fields, tags) + + tags = map[string]string{"path": "type:/model", "source": "127.0.0.1", "foo": "bar"} + fields = map[string]interface{}{"other/path": "foobar", "other/this": "that"} + acc.AssertContainsTaggedFields(t, "alias", fields, tags) + + tags = map[string]string{"path": "type:/model", "foo": "bar2", "source": "127.0.0.1", "name": "str2", "uint64": "1234"} + fields = map[string]interface{}{"some/path": false} + acc.AssertContainsTaggedFields(t, "alias", fields, tags) + + tags = map[string]string{"path": "type:/model", "source": "127.0.0.1", "foo": "bar2"} + fields = map[string]interface{}{"other/path": "foobar", "other/this": "that"} + acc.AssertContainsTaggedFields(t, "alias", fields, tags) +} diff --git a/plugins/inputs/cisco_telemetry_mdt/README.md b/plugins/inputs/cisco_telemetry_mdt/README.md new file mode 100644 index 000000000..4672f4036 --- /dev/null +++ b/plugins/inputs/cisco_telemetry_mdt/README.md @@ -0,0 +1,41 @@ +# Cisco model-driven telemetry (MDT) + +Cisco model-driven telemetry (MDT) is an input plugin that consumes +telemetry data from Cisco IOS XR, IOS XE and NX-OS platforms. It supports TCP & GRPC dialout transports. +GRPC-based transport can utilize TLS for authentication and encryption. +Telemetry data is expected to be GPB-KV (self-describing-gpb) encoded. + +The GRPC dialout transport is supported on various IOS XR (64-bit) 6.1.x and later, IOS XE 16.10 and later, as well as NX-OS 7.x and later platforms. + +The TCP dialout transport is supported on IOS XR (32-bit and 64-bit) 6.1.x and later. + + +### Configuration: + +This is a sample configuration for the plugin. + +```toml +[[inputs.cisco_telemetry_mdt]] + ## Telemetry transport (one of: tcp, grpc) + transport = "grpc" + + ## Address and port to host telemetry listener + service_address = ":57000" + + ## Enable TLS for GRPC transport + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + + ## Enable TLS client authentication and define allowed CA certificates + # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] + + ## Define aliases to map telemetry encoding paths to simple measurement names + [inputs.cisco_telemetry_mdt.aliases] + ifstats = "ietf-interfaces:interfaces-state/interface/statistics" +``` + +### Example Output: +``` +ifstats,path=ietf-interfaces:interfaces-state/interface/statistics,host=linux,name=GigabitEthernet2,source=csr1kv,subscription=101 in-unicast-pkts=27i,in-multicast-pkts=0i,discontinuity-time="2019-05-23T07:40:23.000362+00:00",in-octets=5233i,in-errors=0i,out-multicast-pkts=0i,out-discards=0i,in-broadcast-pkts=0i,in-discards=0i,in-unknown-protos=0i,out-unicast-pkts=0i,out-broadcast-pkts=0i,out-octets=0i,out-errors=0i 1559150462624000000 +ifstats,path=ietf-interfaces:interfaces-state/interface/statistics,host=linux,name=GigabitEthernet1,source=csr1kv,subscription=101 in-octets=3394770806i,in-broadcast-pkts=0i,in-multicast-pkts=0i,out-broadcast-pkts=0i,in-unknown-protos=0i,out-octets=350212i,in-unicast-pkts=9477273i,in-discards=0i,out-unicast-pkts=2726i,out-discards=0i,discontinuity-time="2019-05-23T07:40:23.000363+00:00",in-errors=30i,out-multicast-pkts=0i,out-errors=0i 1559150462624000000 +``` \ No newline at end of file diff --git a/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt.go b/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt.go new file mode 100644 index 000000000..fc018a31e --- /dev/null +++ b/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt.go @@ -0,0 +1,391 @@ +package cisco_telemetry_mdt + +import ( + "bytes" + "encoding/binary" + "fmt" + "io" + "log" + "net" + "strings" + "sync" + "time" + + dialout "github.com/cisco-ie/nx-telemetry-proto/mdt_dialout" + telemetry "github.com/cisco-ie/nx-telemetry-proto/telemetry_bis" + "github.com/golang/protobuf/proto" + "github.com/influxdata/telegraf" + internaltls "github.com/influxdata/telegraf/internal/tls" + "github.com/influxdata/telegraf/plugins/inputs" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/peer" +) + +const ( + // Maximum telemetry payload size (in bytes) to accept for GRPC dialout transport + tcpMaxMsgLen uint32 = 1024 * 1024 +) + +// CiscoTelemetryMDT plugin for IOS XR, IOS XE and NXOS platforms +type CiscoTelemetryMDT struct { + // Common configuration + Transport string + ServiceAddress string `toml:"service_address"` + MaxMsgSize int `toml:"max_msg_size"` + Aliases map[string]string `toml:"aliases"` + + // GRPC TLS settings + internaltls.ServerConfig + + // Internal listener / client handle + grpcServer *grpc.Server + listener net.Listener + + // Internal state + aliases map[string]string + acc telegraf.Accumulator + wg sync.WaitGroup +} + +// Start the Cisco MDT service +func (c *CiscoTelemetryMDT) Start(acc telegraf.Accumulator) error { + var err error + c.acc = acc + c.listener, err = net.Listen("tcp", c.ServiceAddress) + if err != nil { + return err + } + + // Invert aliases list + c.aliases = make(map[string]string, len(c.Aliases)) + for alias, path := range c.Aliases { + c.aliases[path] = alias + } + + switch c.Transport { + case "tcp": + // TCP dialout server accept routine + c.wg.Add(1) + go func() { + c.acceptTCPClients() + c.wg.Done() + }() + + case "grpc": + var opts []grpc.ServerOption + tlsConfig, err := c.ServerConfig.TLSConfig() + if err != nil { + return err + } else if tlsConfig != nil { + opts = append(opts, grpc.Creds(credentials.NewTLS(tlsConfig))) + } + + if c.MaxMsgSize > 0 { + opts = append(opts, grpc.MaxRecvMsgSize(c.MaxMsgSize)) + } + + c.grpcServer = grpc.NewServer(opts...) + dialout.RegisterGRPCMdtDialoutServer(c.grpcServer, c) + + c.wg.Add(1) + go func() { + c.grpcServer.Serve(c.listener) + c.wg.Done() + }() + + default: + c.listener.Close() + return fmt.Errorf("invalid Cisco MDT transport: %s", c.Transport) + } + + return nil +} + +// AcceptTCPDialoutClients defines the TCP dialout server main routine +func (c *CiscoTelemetryMDT) acceptTCPClients() { + // Keep track of all active connections, so we can close them if necessary + var mutex sync.Mutex + clients := make(map[net.Conn]struct{}) + + for { + conn, err := c.listener.Accept() + if neterr, ok := err.(*net.OpError); ok && (neterr.Timeout() || neterr.Temporary()) { + continue + } else if err != nil { + break // Stop() will close the connection so Accept() will fail here + } + + mutex.Lock() + clients[conn] = struct{}{} + mutex.Unlock() + + // Individual client connection routine + c.wg.Add(1) + go func() { + log.Printf("D! [inputs.cisco_telemetry_mdt]: Accepted Cisco MDT TCP dialout connection from %s", conn.RemoteAddr()) + if err := c.handleTCPClient(conn); err != nil { + c.acc.AddError(err) + } + log.Printf("D! [inputs.cisco_telemetry_mdt]: Closed Cisco MDT TCP dialout connection from %s", conn.RemoteAddr()) + + mutex.Lock() + delete(clients, conn) + mutex.Unlock() + + conn.Close() + c.wg.Done() + }() + } + + // Close all remaining client connections + mutex.Lock() + for client := range clients { + if err := client.Close(); err != nil { + log.Printf("E! [inputs.cisco_telemetry_mdt]: Failed to close TCP dialout client: %v", err) + } + } + mutex.Unlock() +} + +// Handle a TCP telemetry client +func (c *CiscoTelemetryMDT) handleTCPClient(conn net.Conn) error { + // TCP Dialout telemetry framing header + var hdr struct { + MsgType uint16 + MsgEncap uint16 + MsgHdrVersion uint16 + MsgFlags uint16 + MsgLen uint32 + } + + var payload bytes.Buffer + + for { + // Read and validate dialout telemetry header + if err := binary.Read(conn, binary.BigEndian, &hdr); err != nil { + return err + } + + maxMsgSize := tcpMaxMsgLen + if c.MaxMsgSize > 0 { + maxMsgSize = uint32(c.MaxMsgSize) + } + + if hdr.MsgLen > maxMsgSize { + return fmt.Errorf("dialout packet too long: %v", hdr.MsgLen) + } else if hdr.MsgFlags != 0 { + return fmt.Errorf("invalid dialout flags: %v", hdr.MsgFlags) + } + + // Read and handle telemetry packet + payload.Reset() + if size, err := payload.ReadFrom(io.LimitReader(conn, int64(hdr.MsgLen))); size != int64(hdr.MsgLen) { + if err != nil { + return err + } + return fmt.Errorf("TCP dialout premature EOF") + } + + c.handleTelemetry(payload.Bytes()) + } +} + +// MdtDialout RPC server method for grpc-dialout transport +func (c *CiscoTelemetryMDT) MdtDialout(stream dialout.GRPCMdtDialout_MdtDialoutServer) error { + peer, peerOK := peer.FromContext(stream.Context()) + if peerOK { + log.Printf("D! [inputs.cisco_telemetry_mdt]: Accepted Cisco MDT GRPC dialout connection from %s", peer.Addr) + } + + for { + packet, err := stream.Recv() + if err != nil { + if err != io.EOF { + c.acc.AddError(fmt.Errorf("GRPC dialout receive error: %v", err)) + } + break + } + + if len(packet.Data) == 0 && len(packet.Errors) != 0 { + c.acc.AddError(fmt.Errorf("GRPC dialout error: %s", packet.Errors)) + break + } + + c.handleTelemetry(packet.Data) + } + + if peerOK { + log.Printf("D! [inputs.cisco_telemetry_mdt]: Closed Cisco MDT GRPC dialout connection from %s", peer.Addr) + } + + return nil +} + +// Handle telemetry packet from any transport, decode and add as measurement +func (c *CiscoTelemetryMDT) handleTelemetry(data []byte) { + var namebuf bytes.Buffer + telemetry := &telemetry.Telemetry{} + err := proto.Unmarshal(data, telemetry) + if err != nil { + c.acc.AddError(fmt.Errorf("Cisco MDT failed to decode: %v", err)) + return + } + + for _, gpbkv := range telemetry.DataGpbkv { + var fields map[string]interface{} + + // Produce metadata tags + var tags map[string]string + + // Top-level field may have measurement timestamp, if not use message timestamp + measured := gpbkv.Timestamp + if measured == 0 { + measured = telemetry.MsgTimestamp + } + + timestamp := time.Unix(int64(measured/1000), int64(measured%1000)*1000000) + + // Populate tags and fields from toplevel GPBKV fields "keys" and "content" + for _, field := range gpbkv.Fields { + switch field.Name { + case "keys": + tags = make(map[string]string, len(field.Fields)+2) + tags["source"] = telemetry.GetNodeIdStr() + tags["subscription"] = telemetry.GetSubscriptionIdStr() + for _, subfield := range field.Fields { + c.parseGPBKVField(subfield, &namebuf, telemetry.EncodingPath, timestamp, tags, nil) + } + case "content": + fields = make(map[string]interface{}, len(field.Fields)) + for _, subfield := range field.Fields { + c.parseGPBKVField(subfield, &namebuf, telemetry.EncodingPath, timestamp, tags, fields) + } + default: + log.Printf("I! [inputs.cisco_telemetry_mdt]: Unexpected top-level MDT field: %s", field.Name) + } + } + + // Find best alias for encoding path and emit measurement + if len(fields) > 0 && len(tags) > 0 && len(telemetry.EncodingPath) > 0 { + name := telemetry.EncodingPath + if alias, ok := c.aliases[name]; ok { + tags["path"] = name + name = alias + } else { + log.Printf("D! [inputs.cisco_telemetry_mdt]: No measurement alias for encoding path: %s", name) + } + c.acc.AddFields(name, fields, tags, timestamp) + } else { + c.acc.AddError(fmt.Errorf("empty encoding path or measurement")) + } + } +} + +// Recursively parse GPBKV field structure into fields or tags +func (c *CiscoTelemetryMDT) parseGPBKVField(field *telemetry.TelemetryField, namebuf *bytes.Buffer, + path string, timestamp time.Time, tags map[string]string, fields map[string]interface{}) { + + namelen := namebuf.Len() + if namelen > 0 { + namebuf.WriteRune('/') + } + namebuf.WriteString(strings.Replace(field.Name, "-", "_", -1)) + + // Decode Telemetry field value if set + var value interface{} + switch val := field.ValueByType.(type) { + case *telemetry.TelemetryField_BytesValue: + value = val.BytesValue + case *telemetry.TelemetryField_StringValue: + value = val.StringValue + case *telemetry.TelemetryField_BoolValue: + value = val.BoolValue + case *telemetry.TelemetryField_Uint32Value: + value = val.Uint32Value + case *telemetry.TelemetryField_Uint64Value: + value = val.Uint64Value + case *telemetry.TelemetryField_Sint32Value: + value = val.Sint32Value + case *telemetry.TelemetryField_Sint64Value: + value = val.Sint64Value + case *telemetry.TelemetryField_DoubleValue: + value = val.DoubleValue + case *telemetry.TelemetryField_FloatValue: + value = val.FloatValue + } + + if value != nil { + // Distinguish between tags (keys) and fields (data) to write to + if fields != nil { + fields[namebuf.String()] = value + } else { + if _, exists := tags[field.Name]; !exists { // Use short keys whenever possible + tags[field.Name] = fmt.Sprint(value) + } else { + tags[namebuf.String()] = fmt.Sprint(value) + } + } + } + + for _, subfield := range field.Fields { + c.parseGPBKVField(subfield, namebuf, path, timestamp, tags, fields) + } + + namebuf.Truncate(namelen) +} + +// Stop listener and cleanup +func (c *CiscoTelemetryMDT) Stop() { + if c.grpcServer != nil { + // Stop server and terminate all running dialout routines + c.grpcServer.Stop() + } + if c.listener != nil { + c.listener.Close() + } + c.wg.Wait() +} + +const sampleConfig = ` + ## Telemetry transport (one of: tcp, grpc) + transport = "grpc" + + ## Address and port to host telemetry listener + service_address = ":57000" + + ## Enable TLS for GRPC transport + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + + ## Enable TLS client authentication and define allowed CA certificates + # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] + + ## Define aliases to map telemetry encoding paths to simple measurement names + [inputs.cisco_telemetry_mdt.aliases] + ifstats = "ietf-interfaces:interfaces-state/interface/statistics" +` + +// SampleConfig of plugin +func (c *CiscoTelemetryMDT) SampleConfig() string { + return sampleConfig +} + +// Description of plugin +func (c *CiscoTelemetryMDT) Description() string { + return "Cisco model-driven telemetry (MDT) input plugin for IOS XR, IOS XE and NX-OS platforms" +} + +// Gather plugin measurements (unused) +func (c *CiscoTelemetryMDT) Gather(_ telegraf.Accumulator) error { + return nil +} + +func init() { + inputs.Add("cisco_telemetry_mdt", func() telegraf.Input { + return &CiscoTelemetryMDT{ + Transport: "grpc", + ServiceAddress: "127.0.0.1:57000", + } + }) +} diff --git a/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt_test.go b/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt_test.go new file mode 100644 index 000000000..d2c686c69 --- /dev/null +++ b/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt_test.go @@ -0,0 +1,362 @@ +package cisco_telemetry_mdt + +import ( + "context" + "encoding/binary" + "errors" + "net" + "testing" + + "github.com/golang/protobuf/proto" + + dialout "github.com/cisco-ie/nx-telemetry-proto/mdt_dialout" + telemetry "github.com/cisco-ie/nx-telemetry-proto/telemetry_bis" + "github.com/influxdata/telegraf/testutil" + + "github.com/stretchr/testify/assert" + "google.golang.org/grpc" +) + +func TestHandleTelemetryEmpty(t *testing.T) { + c := &CiscoTelemetryMDT{Transport: "dummy"} + acc := &testutil.Accumulator{} + c.Start(acc) + + telemetry := &telemetry.Telemetry{ + DataGpbkv: []*telemetry.TelemetryField{ + {}, + }, + } + data, _ := proto.Marshal(telemetry) + + c.handleTelemetry(data) + assert.Contains(t, acc.Errors, errors.New("empty encoding path or measurement")) + assert.Empty(t, acc.Metrics) +} + +func TestHandleTelemetryTwoSimple(t *testing.T) { + c := &CiscoTelemetryMDT{Transport: "dummy", Aliases: map[string]string{"alias": "type:model/some/path"}} + acc := &testutil.Accumulator{} + c.Start(acc) + + telemetry := &telemetry.Telemetry{ + MsgTimestamp: 1543236572000, + EncodingPath: "type:model/some/path", + NodeId: &telemetry.Telemetry_NodeIdStr{NodeIdStr: "hostname"}, + Subscription: &telemetry.Telemetry_SubscriptionIdStr{SubscriptionIdStr: "subscription"}, + DataGpbkv: []*telemetry.TelemetryField{ + { + Fields: []*telemetry.TelemetryField{ + { + Name: "keys", + Fields: []*telemetry.TelemetryField{ + { + Name: "name", + ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "str"}, + }, + { + Name: "uint64", + ValueByType: &telemetry.TelemetryField_Uint64Value{Uint64Value: 1234}, + }, + }, + }, + { + Name: "content", + Fields: []*telemetry.TelemetryField{ + { + Name: "bool", + ValueByType: &telemetry.TelemetryField_BoolValue{BoolValue: true}, + }, + }, + }, + }, + }, + { + Fields: []*telemetry.TelemetryField{ + { + Name: "keys", + Fields: []*telemetry.TelemetryField{ + { + Name: "name", + ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "str2"}, + }, + }, + }, + { + Name: "content", + Fields: []*telemetry.TelemetryField{ + { + Name: "bool", + ValueByType: &telemetry.TelemetryField_BoolValue{BoolValue: false}, + }, + }, + }, + }, + }, + }, + } + data, _ := proto.Marshal(telemetry) + + c.handleTelemetry(data) + assert.Empty(t, acc.Errors) + + tags := map[string]string{"path": "type:model/some/path", "name": "str", "uint64": "1234", "source": "hostname", "subscription": "subscription"} + fields := map[string]interface{}{"bool": true} + acc.AssertContainsTaggedFields(t, "alias", fields, tags) + + tags = map[string]string{"path": "type:model/some/path", "name": "str2", "source": "hostname", "subscription": "subscription"} + fields = map[string]interface{}{"bool": false} + acc.AssertContainsTaggedFields(t, "alias", fields, tags) +} + +func TestHandleTelemetrySingleNested(t *testing.T) { + c := &CiscoTelemetryMDT{Transport: "dummy", Aliases: map[string]string{"nested": "type:model/nested/path"}} + acc := &testutil.Accumulator{} + c.Start(acc) + + telemetry := &telemetry.Telemetry{ + MsgTimestamp: 1543236572000, + EncodingPath: "type:model/nested/path", + NodeId: &telemetry.Telemetry_NodeIdStr{NodeIdStr: "hostname"}, + Subscription: &telemetry.Telemetry_SubscriptionIdStr{SubscriptionIdStr: "subscription"}, + DataGpbkv: []*telemetry.TelemetryField{ + { + Fields: []*telemetry.TelemetryField{ + { + Name: "keys", + Fields: []*telemetry.TelemetryField{ + { + Name: "nested", + Fields: []*telemetry.TelemetryField{ + { + Name: "key", + Fields: []*telemetry.TelemetryField{ + { + Name: "level", + ValueByType: &telemetry.TelemetryField_DoubleValue{DoubleValue: 3}, + }, + }, + }, + }, + }, + }, + }, + { + Name: "content", + Fields: []*telemetry.TelemetryField{ + { + Name: "nested", + Fields: []*telemetry.TelemetryField{ + { + Name: "value", + Fields: []*telemetry.TelemetryField{ + { + Name: "foo", + ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "bar"}, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + data, _ := proto.Marshal(telemetry) + + c.handleTelemetry(data) + assert.Empty(t, acc.Errors) + + tags := map[string]string{"path": "type:model/nested/path", "level": "3", "source": "hostname", "subscription": "subscription"} + fields := map[string]interface{}{"nested/value/foo": "bar"} + acc.AssertContainsTaggedFields(t, "nested", fields, tags) +} + +func TestTCPDialoutOverflow(t *testing.T) { + c := &CiscoTelemetryMDT{Transport: "tcp", ServiceAddress: "127.0.0.1:57000"} + acc := &testutil.Accumulator{} + assert.Nil(t, c.Start(acc)) + + hdr := struct { + MsgType uint16 + MsgEncap uint16 + MsgHdrVersion uint16 + MsgFlags uint16 + MsgLen uint32 + }{MsgLen: uint32(1000000000)} + + conn, _ := net.Dial("tcp", "127.0.0.1:57000") + binary.Write(conn, binary.BigEndian, hdr) + conn.Read([]byte{0}) + conn.Close() + + c.Stop() + + assert.Contains(t, acc.Errors, errors.New("dialout packet too long: 1000000000")) +} + +func mockTelemetryMessage() *telemetry.Telemetry { + return &telemetry.Telemetry{ + MsgTimestamp: 1543236572000, + EncodingPath: "type:model/some/path", + NodeId: &telemetry.Telemetry_NodeIdStr{NodeIdStr: "hostname"}, + Subscription: &telemetry.Telemetry_SubscriptionIdStr{SubscriptionIdStr: "subscription"}, + DataGpbkv: []*telemetry.TelemetryField{ + { + Fields: []*telemetry.TelemetryField{ + { + Name: "keys", + Fields: []*telemetry.TelemetryField{ + { + Name: "name", + ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "str"}, + }, + }, + }, + { + Name: "content", + Fields: []*telemetry.TelemetryField{ + { + Name: "value", + ValueByType: &telemetry.TelemetryField_Sint64Value{Sint64Value: -1}, + }, + }, + }, + }, + }, + }, + } +} + +func TestTCPDialoutMultiple(t *testing.T) { + c := &CiscoTelemetryMDT{Transport: "tcp", ServiceAddress: "127.0.0.1:57000", Aliases: map[string]string{ + "some": "type:model/some/path", "parallel": "type:model/parallel/path", "other": "type:model/other/path"}} + acc := &testutil.Accumulator{} + assert.Nil(t, c.Start(acc)) + + telemetry := mockTelemetryMessage() + + hdr := struct { + MsgType uint16 + MsgEncap uint16 + MsgHdrVersion uint16 + MsgFlags uint16 + MsgLen uint32 + }{} + + conn, _ := net.Dial("tcp", "127.0.0.1:57000") + + data, _ := proto.Marshal(telemetry) + hdr.MsgLen = uint32(len(data)) + binary.Write(conn, binary.BigEndian, hdr) + conn.Write(data) + + conn2, _ := net.Dial("tcp", "127.0.0.1:57000") + telemetry.EncodingPath = "type:model/parallel/path" + data, _ = proto.Marshal(telemetry) + hdr.MsgLen = uint32(len(data)) + binary.Write(conn2, binary.BigEndian, hdr) + conn2.Write(data) + conn2.Write([]byte{0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0}) + conn2.Read([]byte{0}) + conn2.Close() + + telemetry.EncodingPath = "type:model/other/path" + data, _ = proto.Marshal(telemetry) + hdr.MsgLen = uint32(len(data)) + binary.Write(conn, binary.BigEndian, hdr) + conn.Write(data) + conn.Write([]byte{0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0}) + conn.Read([]byte{0}) + c.Stop() + conn.Close() + + // We use the invalid dialout flags to let the server close the connection + assert.Equal(t, acc.Errors, []error{errors.New("invalid dialout flags: 257"), errors.New("invalid dialout flags: 257")}) + + tags := map[string]string{"path": "type:model/some/path", "name": "str", "source": "hostname", "subscription": "subscription"} + fields := map[string]interface{}{"value": int64(-1)} + acc.AssertContainsTaggedFields(t, "some", fields, tags) + + tags = map[string]string{"path": "type:model/parallel/path", "name": "str", "source": "hostname", "subscription": "subscription"} + fields = map[string]interface{}{"value": int64(-1)} + acc.AssertContainsTaggedFields(t, "parallel", fields, tags) + + tags = map[string]string{"path": "type:model/other/path", "name": "str", "source": "hostname", "subscription": "subscription"} + fields = map[string]interface{}{"value": int64(-1)} + acc.AssertContainsTaggedFields(t, "other", fields, tags) +} + +func TestGRPCDialoutError(t *testing.T) { + c := &CiscoTelemetryMDT{Transport: "grpc", ServiceAddress: "127.0.0.1:57001"} + acc := &testutil.Accumulator{} + assert.Nil(t, c.Start(acc)) + + conn, _ := grpc.Dial("127.0.0.1:57001", grpc.WithInsecure()) + client := dialout.NewGRPCMdtDialoutClient(conn) + stream, _ := client.MdtDialout(context.Background()) + + args := &dialout.MdtDialoutArgs{Errors: "foobar"} + stream.Send(args) + + // Wait for the server to close + stream.Recv() + c.Stop() + + assert.Equal(t, acc.Errors, []error{errors.New("GRPC dialout error: foobar")}) +} + +func TestGRPCDialoutMultiple(t *testing.T) { + c := &CiscoTelemetryMDT{Transport: "grpc", ServiceAddress: "127.0.0.1:57001", Aliases: map[string]string{ + "some": "type:model/some/path", "parallel": "type:model/parallel/path", "other": "type:model/other/path"}} + acc := &testutil.Accumulator{} + assert.Nil(t, c.Start(acc)) + telemetry := mockTelemetryMessage() + + conn, _ := grpc.Dial("127.0.0.1:57001", grpc.WithInsecure(), grpc.WithBlock()) + client := dialout.NewGRPCMdtDialoutClient(conn) + stream, _ := client.MdtDialout(context.TODO()) + + data, _ := proto.Marshal(telemetry) + args := &dialout.MdtDialoutArgs{Data: data, ReqId: 456} + stream.Send(args) + + conn2, _ := grpc.Dial("127.0.0.1:57001", grpc.WithInsecure(), grpc.WithBlock()) + client2 := dialout.NewGRPCMdtDialoutClient(conn2) + stream2, _ := client2.MdtDialout(context.TODO()) + + telemetry.EncodingPath = "type:model/parallel/path" + data, _ = proto.Marshal(telemetry) + args = &dialout.MdtDialoutArgs{Data: data} + stream2.Send(args) + stream2.Send(&dialout.MdtDialoutArgs{Errors: "testclose"}) + stream2.Recv() + conn2.Close() + + telemetry.EncodingPath = "type:model/other/path" + data, _ = proto.Marshal(telemetry) + args = &dialout.MdtDialoutArgs{Data: data} + stream.Send(args) + stream.Send(&dialout.MdtDialoutArgs{Errors: "testclose"}) + stream.Recv() + + c.Stop() + conn.Close() + + assert.Equal(t, acc.Errors, []error{errors.New("GRPC dialout error: testclose"), errors.New("GRPC dialout error: testclose")}) + + tags := map[string]string{"path": "type:model/some/path", "name": "str", "source": "hostname", "subscription": "subscription"} + fields := map[string]interface{}{"value": int64(-1)} + acc.AssertContainsTaggedFields(t, "some", fields, tags) + + tags = map[string]string{"path": "type:model/parallel/path", "name": "str", "source": "hostname", "subscription": "subscription"} + fields = map[string]interface{}{"value": int64(-1)} + acc.AssertContainsTaggedFields(t, "parallel", fields, tags) + + tags = map[string]string{"path": "type:model/other/path", "name": "str", "source": "hostname", "subscription": "subscription"} + fields = map[string]interface{}{"value": int64(-1)} + acc.AssertContainsTaggedFields(t, "other", fields, tags) + +} From 476f7fb9c5fbe8814904ce6fcfbb3e56a028a7d6 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 4 Jun 2019 15:04:14 -0700 Subject: [PATCH 0892/1815] Update changelog --- CHANGELOG.md | 2 ++ plugins/inputs/cisco_telemetry_gnmi/README.md | 8 +++----- plugins/inputs/cisco_telemetry_mdt/README.md | 4 +--- 3 files changed, 6 insertions(+), 8 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6c1028dda..cbbcc495b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,8 @@ #### New Inputs - [bind](/plugins/inputs/bind/README.md) - Contributed by @dswarbrick & @danielllek +- [cisco_telemetry_gnmi](/plugins/inputs/cisco_telemetry_gnmi/README.md) - Contributed by @sbyx +- [cisco_telemetry_mdt](/plugins/inputs/cisco_telemetry_mdt/README.md) - Contributed by @sbyx - [ecs](/plugins/inputs/ecs/README.md) - Contributed by @rbtr - [github](/plugins/inputs/github/README.md) - Contributed by @influxdata - [openweathermap](/plugins/inputs/openweathermap/README.md) - Contributed by @regel diff --git a/plugins/inputs/cisco_telemetry_gnmi/README.md b/plugins/inputs/cisco_telemetry_gnmi/README.md index 3e53cf0e5..d70d66157 100644 --- a/plugins/inputs/cisco_telemetry_gnmi/README.md +++ b/plugins/inputs/cisco_telemetry_gnmi/README.md @@ -1,14 +1,12 @@ -# Cisco GNMI telemetry +# Cisco GNMI Telemetry -Cisco GNMI telemetry is an input plugin that consumes telemetry data similar to the [GNMI specification](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md). +Cisco GNMI Telemetry is an input plugin that consumes telemetry data similar to the [GNMI specification](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md). This GRPC-based protocol can utilize TLS for authentication and encryption. This plugin has been developed to support GNMI telemetry as produced by Cisco IOS XR (64-bit) version 6.5.1 and later. -### Configuration: - -This is a sample configuration for the plugin. +### Configuration ```toml [[inputs.cisco_telemetry_gnmi]] diff --git a/plugins/inputs/cisco_telemetry_mdt/README.md b/plugins/inputs/cisco_telemetry_mdt/README.md index 4672f4036..08f180b2e 100644 --- a/plugins/inputs/cisco_telemetry_mdt/README.md +++ b/plugins/inputs/cisco_telemetry_mdt/README.md @@ -12,8 +12,6 @@ The TCP dialout transport is supported on IOS XR (32-bit and 64-bit) 6.1.x and l ### Configuration: -This is a sample configuration for the plugin. - ```toml [[inputs.cisco_telemetry_mdt]] ## Telemetry transport (one of: tcp, grpc) @@ -38,4 +36,4 @@ This is a sample configuration for the plugin. ``` ifstats,path=ietf-interfaces:interfaces-state/interface/statistics,host=linux,name=GigabitEthernet2,source=csr1kv,subscription=101 in-unicast-pkts=27i,in-multicast-pkts=0i,discontinuity-time="2019-05-23T07:40:23.000362+00:00",in-octets=5233i,in-errors=0i,out-multicast-pkts=0i,out-discards=0i,in-broadcast-pkts=0i,in-discards=0i,in-unknown-protos=0i,out-unicast-pkts=0i,out-broadcast-pkts=0i,out-octets=0i,out-errors=0i 1559150462624000000 ifstats,path=ietf-interfaces:interfaces-state/interface/statistics,host=linux,name=GigabitEthernet1,source=csr1kv,subscription=101 in-octets=3394770806i,in-broadcast-pkts=0i,in-multicast-pkts=0i,out-broadcast-pkts=0i,in-unknown-protos=0i,out-octets=350212i,in-unicast-pkts=9477273i,in-discards=0i,out-unicast-pkts=2726i,out-discards=0i,discontinuity-time="2019-05-23T07:40:23.000363+00:00",in-errors=30i,out-multicast-pkts=0i,out-errors=0i 1559150462624000000 -``` \ No newline at end of file +``` From ba0b0c02f7110381989cace466c48538339f22ea Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 4 Jun 2019 23:00:24 -0700 Subject: [PATCH 0893/1815] Fix race conditions in gnmi telemetry tests (#5953) --- .../cisco_telemetry_gnmi.go | 6 +- .../cisco_telemetry_gnmi_test.go | 72 +++++++++---------- 2 files changed, 34 insertions(+), 44 deletions(-) diff --git a/plugins/inputs/cisco_telemetry_gnmi/cisco_telemetry_gnmi.go b/plugins/inputs/cisco_telemetry_gnmi/cisco_telemetry_gnmi.go index 69495d6f6..bb8a0dd7c 100644 --- a/plugins/inputs/cisco_telemetry_gnmi/cisco_telemetry_gnmi.go +++ b/plugins/inputs/cisco_telemetry_gnmi/cisco_telemetry_gnmi.go @@ -13,17 +13,15 @@ import ( "sync" "time" - "github.com/influxdata/telegraf/metric" - - "google.golang.org/grpc/credentials" - "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" internaltls "github.com/influxdata/telegraf/internal/tls" + "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/plugins/inputs" jsonparser "github.com/influxdata/telegraf/plugins/parsers/json" "github.com/openconfig/gnmi/proto/gnmi" "google.golang.org/grpc" + "google.golang.org/grpc/credentials" "google.golang.org/grpc/metadata" ) diff --git a/plugins/inputs/cisco_telemetry_gnmi/cisco_telemetry_gnmi_test.go b/plugins/inputs/cisco_telemetry_gnmi/cisco_telemetry_gnmi_test.go index 7e6b527b9..f0adc8f1f 100644 --- a/plugins/inputs/cisco_telemetry_gnmi/cisco_telemetry_gnmi_test.go +++ b/plugins/inputs/cisco_telemetry_gnmi/cisco_telemetry_gnmi_test.go @@ -8,14 +8,13 @@ import ( "testing" "time" - "google.golang.org/grpc/metadata" - "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/testutil" - "google.golang.org/grpc" - "github.com/openconfig/gnmi/proto/gnmi" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "google.golang.org/grpc" + "google.golang.org/grpc/metadata" ) func TestParsePath(t *testing.T) { @@ -58,24 +57,10 @@ func (m *mockGNMIServer) Set(context.Context, *gnmi.SetRequest) (*gnmi.SetRespon } func (m *mockGNMIServer) Subscribe(server gnmi.GNMI_SubscribeServer) error { - // Avoid race conditions - go func() { - if m.scenario == 0 { - m.acc.WaitError(1) - } else if m.scenario == 1 || m.scenario == 3 { - m.acc.Wait(4) - } else if m.scenario == 2 { - m.acc.Wait(2) - } - if m.scenario >= 0 { - m.server.Stop() - } - }() - metadata, ok := metadata.FromIncomingContext(server.Context()) - assert.Equal(m.t, ok, true) - assert.Equal(m.t, metadata.Get("username"), []string{"theuser"}) - assert.Equal(m.t, metadata.Get("password"), []string{"thepassword"}) + require.Equal(m.t, ok, true) + require.Equal(m.t, metadata.Get("username"), []string{"theuser"}) + require.Equal(m.t, metadata.Get("password"), []string{"thepassword"}) switch m.scenario { case 0: @@ -106,20 +91,22 @@ func (m *mockGNMIServer) Subscribe(server gnmi.GNMI_SubscribeServer) error { } func TestGNMIError(t *testing.T) { - listener, _ := net.Listen("tcp", "127.0.0.1:57003") + listener, _ := net.Listen("tcp", "127.0.0.1:0") server := grpc.NewServer() acc := &testutil.Accumulator{} gnmi.RegisterGNMIServer(server, &mockGNMIServer{t: t, scenario: 0, server: server, acc: acc}) - c := &CiscoTelemetryGNMI{Addresses: []string{"127.0.0.1:57003"}, + c := &CiscoTelemetryGNMI{Addresses: []string{listener.Addr().String()}, Username: "theuser", Password: "thepassword", Encoding: "proto", Redial: internal.Duration{Duration: 1 * time.Second}} - assert.Nil(t, c.Start(acc)) - server.Serve(listener) + require.Nil(t, c.Start(acc)) + go server.Serve(listener) + acc.WaitError(1) c.Stop() + server.Stop() - assert.Contains(t, acc.Errors, errors.New("aborted GNMI subscription: rpc error: code = Unknown desc = testerror")) + require.Contains(t, acc.Errors, errors.New("aborted GNMI subscription: rpc error: code = Unknown desc = testerror")) } func mockGNMINotification() *gnmi.Notification { @@ -170,23 +157,25 @@ func mockGNMINotification() *gnmi.Notification { } func TestGNMIMultiple(t *testing.T) { - listener, _ := net.Listen("tcp", "127.0.0.1:57004") + listener, _ := net.Listen("tcp", "127.0.0.1:0") server := grpc.NewServer() acc := &testutil.Accumulator{} gnmi.RegisterGNMIServer(server, &mockGNMIServer{t: t, scenario: 1, server: server, acc: acc}) - c := &CiscoTelemetryGNMI{Addresses: []string{"127.0.0.1:57004"}, + c := &CiscoTelemetryGNMI{Addresses: []string{listener.Addr().String()}, Username: "theuser", Password: "thepassword", Encoding: "proto", Redial: internal.Duration{Duration: 1 * time.Second}, Subscriptions: []Subscription{{Name: "alias", Origin: "type", Path: "/model", SubscriptionMode: "sample"}}, } - assert.Nil(t, c.Start(acc)) + require.Nil(t, c.Start(acc)) - server.Serve(listener) + go server.Serve(listener) + acc.Wait(4) c.Stop() + server.Stop() - assert.Empty(t, acc.Errors) + require.Empty(t, acc.Errors) tags := map[string]string{"path": "type:/model", "source": "127.0.0.1", "foo": "bar", "name": "str", "uint64": "1234"} fields := map[string]interface{}{"some/path": int64(5678)} @@ -206,28 +195,31 @@ func TestGNMIMultiple(t *testing.T) { } func TestGNMIMultipleRedial(t *testing.T) { - listener, _ := net.Listen("tcp", "127.0.0.1:57004") + listener, _ := net.Listen("tcp", "127.0.0.1:0") server := grpc.NewServer() acc := &testutil.Accumulator{} gnmi.RegisterGNMIServer(server, &mockGNMIServer{t: t, scenario: 2, server: server, acc: acc}) - c := &CiscoTelemetryGNMI{Addresses: []string{"127.0.0.1:57004"}, + c := &CiscoTelemetryGNMI{Addresses: []string{listener.Addr().String()}, Username: "theuser", Password: "thepassword", Encoding: "proto", - Redial: internal.Duration{Duration: 500 * time.Millisecond}, + Redial: internal.Duration{Duration: 10 * time.Millisecond}, Subscriptions: []Subscription{{Name: "alias", Origin: "type", Path: "/model", SubscriptionMode: "sample"}}, } - assert.Nil(t, c.Start(acc)) - server.Serve(listener) + require.Nil(t, c.Start(acc)) - listener, _ = net.Listen("tcp", "127.0.0.1:57004") + go server.Serve(listener) + acc.Wait(2) + server.Stop() + + listener, _ = net.Listen("tcp", listener.Addr().String()) server = grpc.NewServer() gnmi.RegisterGNMIServer(server, &mockGNMIServer{t: t, scenario: 3, server: server, acc: acc}) - server.Serve(listener) + go server.Serve(listener) + acc.Wait(4) c.Stop() - - assert.Empty(t, acc.Errors) + server.Stop() tags := map[string]string{"path": "type:/model", "source": "127.0.0.1", "foo": "bar", "name": "str", "uint64": "1234"} fields := map[string]interface{}{"some/path": int64(5678)} From 8bc768b2391504e79c7ebcfda5c40f4530147fbb Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 5 Jun 2019 03:00:59 -0700 Subject: [PATCH 0894/1815] Make case insensitive container status comparison (#5954) --- plugins/inputs/ecs/README.md | 8 +++++--- plugins/inputs/ecs/ecs.go | 22 +++++++++++++++++----- 2 files changed, 22 insertions(+), 8 deletions(-) diff --git a/plugins/inputs/ecs/README.md b/plugins/inputs/ecs/README.md index e6407a58b..f391a6b9c 100644 --- a/plugins/inputs/ecs/README.md +++ b/plugins/inputs/ecs/README.md @@ -28,7 +28,9 @@ present in the metadata/stats endpoints. # container_name_exclude = [] ## Container states to include and exclude. Globs accepted. - ## When empty only containers in the "running" state will be captured. + ## When empty only containers in the "RUNNING" state will be captured. + ## Possible values are "NONE", "PULLED", "CREATED", "RUNNING", + ## "RESOURCES_PROVISIONED", "STOPPED". # container_status_include = [] # container_status_exclude = [] @@ -37,8 +39,8 @@ present in the metadata/stats endpoints. ecs_label_include = [ "com.amazonaws.ecs.*" ] ecs_label_exclude = [] - ## Timeout for docker list, info, and stats commands - timeout = "5s" + ## Timeout for queries. + # timeout = "5s" ``` ### Metrics diff --git a/plugins/inputs/ecs/ecs.go b/plugins/inputs/ecs/ecs.go index 916b45bb2..b3fe5f347 100644 --- a/plugins/inputs/ecs/ecs.go +++ b/plugins/inputs/ecs/ecs.go @@ -2,6 +2,7 @@ package ecs import ( "net/url" + "strings" "time" "github.com/influxdata/telegraf" @@ -51,7 +52,9 @@ var sampleConfig = ` # container_name_exclude = [] ## Container states to include and exclude. Globs accepted. - ## When empty only containers in the "running" state will be captured. + ## When empty only containers in the "RUNNING" state will be captured. + ## Possible values are "NONE", "PULLED", "CREATED", "RUNNING", + ## "RESOURCES_PROVISIONED", "STOPPED". # container_status_include = [] # container_status_exclude = [] @@ -60,8 +63,8 @@ var sampleConfig = ` ecs_label_include = [ "com.amazonaws.ecs.*" ] ecs_label_exclude = [] - ## Timeout for docker list, info, and stats commands - timeout = "5s" + ## Timeout for queries. + # timeout = "5s" ` // Description describes ECS plugin @@ -157,7 +160,7 @@ func (ecs *Ecs) accContainers(task *Task, taskTags map[string]string, acc telegr continue } - if !ecs.statusFilter.Match(c.KnownStatus) { + if !ecs.statusFilter.Match(strings.ToUpper(c.KnownStatus)) { continue } @@ -215,8 +218,17 @@ func (ecs *Ecs) createLabelFilters() error { func (ecs *Ecs) createContainerStatusFilters() error { if len(ecs.ContainerStatusInclude) == 0 && len(ecs.ContainerStatusExclude) == 0 { - ecs.ContainerStatusInclude = []string{"running"} + ecs.ContainerStatusInclude = []string{"RUNNING"} } + + // ECS uses uppercase status names, normalizing for comparison. + for i, include := range ecs.ContainerStatusInclude { + ecs.ContainerStatusInclude[i] = strings.ToUpper(include) + } + for i, exclude := range ecs.ContainerStatusExclude { + ecs.ContainerStatusExclude[i] = strings.ToUpper(exclude) + } + filter, err := filter.NewIncludeExcludeFilter(ecs.ContainerStatusInclude, ecs.ContainerStatusExclude) if err != nil { return err From 77cac557baabadc18f16b08249779798c15002a9 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 5 Jun 2019 12:34:45 -0700 Subject: [PATCH 0895/1815] Log a warning on write if the metric buffer has overflowed (#5959) --- internal/models/buffer.go | 15 +++++++++++---- internal/models/running_output.go | 11 ++++++++++- 2 files changed, 21 insertions(+), 5 deletions(-) diff --git a/internal/models/buffer.go b/internal/models/buffer.go index 5d036c728..f7f343e46 100644 --- a/internal/models/buffer.go +++ b/internal/models/buffer.go @@ -99,10 +99,12 @@ func (b *Buffer) metricDropped(metric telegraf.Metric) { metric.Reject() } -func (b *Buffer) add(m telegraf.Metric) { +func (b *Buffer) add(m telegraf.Metric) int { + dropped := 0 // Check if Buffer is full if b.size == b.cap { b.metricDropped(b.buf[b.last]) + dropped++ if b.last == b.batchFirst && b.batchSize > 0 { b.batchSize-- @@ -120,18 +122,23 @@ func (b *Buffer) add(m telegraf.Metric) { } b.size = min(b.size+1, b.cap) + return dropped } -// Add adds metrics to the buffer -func (b *Buffer) Add(metrics ...telegraf.Metric) { +// Add adds metrics to the buffer and returns number of dropped metrics. +func (b *Buffer) Add(metrics ...telegraf.Metric) int { b.Lock() defer b.Unlock() + dropped := 0 for i := range metrics { - b.add(metrics[i]) + if n := b.add(metrics[i]); n != 0 { + dropped += n + } } b.BufferSize.Set(int64(b.length())) + return dropped } // Batch returns a slice containing up to batchSize of the most recently added diff --git a/internal/models/running_output.go b/internal/models/running_output.go index 4cec18cc8..ff2b88e2a 100644 --- a/internal/models/running_output.go +++ b/internal/models/running_output.go @@ -32,6 +32,7 @@ type OutputConfig struct { type RunningOutput struct { // Must be 64-bit aligned newMetricsCount int64 + droppedMetrics int64 Name string Output telegraf.Output @@ -118,7 +119,8 @@ func (ro *RunningOutput) AddMetric(metric telegraf.Metric) { return } - ro.buffer.Add(metric) + dropped := ro.buffer.Add(metric) + atomic.AddInt64(&ro.droppedMetrics, int64(dropped)) count := atomic.AddInt64(&ro.newMetricsCount, 1) if count == int64(ro.MetricBatchSize) { @@ -188,6 +190,13 @@ func (ro *RunningOutput) Close() { } func (ro *RunningOutput) write(metrics []telegraf.Metric) error { + dropped := atomic.LoadInt64(&ro.droppedMetrics) + if dropped > 0 { + log.Printf("W! [outputs.%s] Metric buffer overflow; %d metrics have been dropped", + ro.Name, dropped) + atomic.StoreInt64(&ro.droppedMetrics, 0) + } + start := time.Now() err := ro.Output.Write(metrics) elapsed := time.Since(start) From 7be74816a250f6cca7fc095108446c9e97d8ebe5 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 5 Jun 2019 12:47:17 -0700 Subject: [PATCH 0896/1815] Add source tag to hddtemp plugin (#5955) --- plugins/inputs/hddtemp/README.md | 54 +++++++++++++------------- plugins/inputs/hddtemp/hddtemp.go | 9 ++++- plugins/inputs/hddtemp/hddtemp_test.go | 3 ++ 3 files changed, 37 insertions(+), 29 deletions(-) diff --git a/plugins/inputs/hddtemp/README.md b/plugins/inputs/hddtemp/README.md index 3bafb4f21..d2d3e4f13 100644 --- a/plugins/inputs/hddtemp/README.md +++ b/plugins/inputs/hddtemp/README.md @@ -1,43 +1,41 @@ -# Hddtemp Input Plugin +# HDDtemp Input Plugin -This plugin reads data from hddtemp daemon +This plugin reads data from hddtemp daemon. -## Requirements +Hddtemp should be installed and its daemon running. -Hddtemp should be installed and its daemon running - -## Configuration +### Configuration ```toml [[inputs.hddtemp]] -## By default, telegraf gathers temps data from all disks detected by the -## hddtemp. -## -## Only collect temps from the selected disks. -## -## A * as the device name will return the temperature values of all disks. -## -# address = "127.0.0.1:7634" -# devices = ["sda", "*"] + ## By default, telegraf gathers temps data from all disks detected by the + ## hddtemp. + ## + ## Only collect temps from the selected disks. + ## + ## A * as the device name will return the temperature values of all disks. + ## + # address = "127.0.0.1:7634" + # devices = ["sda", "*"] ``` -## Measurements +### Metrics - hddtemp - - temperature - -Tags: -- device -- model -- unit -- status + - tags: + - device + - model + - unit + - status + - source + - fields: + - temperature - -## Example output +### Example output ``` -> hddtemp,unit=C,status=,host=server1,device=sdb,model=WDC\ WD740GD-00FLA1 temperature=43i 1481655647000000000 -> hddtemp,device=sdc,model=SAMSUNG\ HD103UI,unit=C,status=,host=server1 temperature=38i 148165564700000000 -> hddtemp,device=sdd,model=SAMSUNG\ HD103UI,unit=C,status=,host=server1 temperature=36i 1481655647000000000 +hddtemp,source=server1,unit=C,status=,device=sdb,model=WDC\ WD740GD-00FLA1 temperature=43i 1481655647000000000 +hddtemp,device=sdc,model=SAMSUNG\ HD103UI,unit=C,source=server1,status= temperature=38i 148165564700000000 +hddtemp,device=sdd,model=SAMSUNG\ HD103UI,unit=C,source=server1,status= temperature=36i 1481655647000000000 ``` diff --git a/plugins/inputs/hddtemp/hddtemp.go b/plugins/inputs/hddtemp/hddtemp.go index dd4622df4..0f084ac21 100644 --- a/plugins/inputs/hddtemp/hddtemp.go +++ b/plugins/inputs/hddtemp/hddtemp.go @@ -1,6 +1,8 @@ package hddtemp import ( + "net" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" gohddtemp "github.com/influxdata/telegraf/plugins/inputs/hddtemp/go-hddtemp" @@ -42,8 +44,12 @@ func (h *HDDTemp) Gather(acc telegraf.Accumulator) error { if h.fetcher == nil { h.fetcher = gohddtemp.New() } - disks, err := h.fetcher.Fetch(h.Address) + source, _, err := net.SplitHostPort(h.Address) + if err != nil { + source = h.Address + } + disks, err := h.fetcher.Fetch(h.Address) if err != nil { return err } @@ -56,6 +62,7 @@ func (h *HDDTemp) Gather(acc telegraf.Accumulator) error { "model": disk.Model, "unit": disk.Unit, "status": disk.Status, + "source": source, } fields := map[string]interface{}{ diff --git a/plugins/inputs/hddtemp/hddtemp_test.go b/plugins/inputs/hddtemp/hddtemp_test.go index e09e833e7..f299c2ac6 100644 --- a/plugins/inputs/hddtemp/hddtemp_test.go +++ b/plugins/inputs/hddtemp/hddtemp_test.go @@ -36,6 +36,7 @@ func newMockFetcher() *mockFetcher { func TestFetch(t *testing.T) { hddtemp := &HDDTemp{ fetcher: newMockFetcher(), + Address: "localhost", Devices: []string{"*"}, } @@ -58,6 +59,7 @@ func TestFetch(t *testing.T) { "model": "Model1", "unit": "C", "status": "", + "source": "localhost", }, }, { @@ -69,6 +71,7 @@ func TestFetch(t *testing.T) { "model": "Model2", "unit": "C", "status": "", + "source": "localhost", }, }, } From 0fb9040a64e5a53cad7845078856d6788f526244 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 5 Jun 2019 13:55:21 -0700 Subject: [PATCH 0897/1815] Fix double close error with log rotation (#5960) --- internal/rotate/file_writer.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/internal/rotate/file_writer.go b/internal/rotate/file_writer.go index fe8c2fd71..03f837f93 100644 --- a/internal/rotate/file_writer.go +++ b/internal/rotate/file_writer.go @@ -99,9 +99,6 @@ func (w *FileWriter) Close() (err error) { return err } - if err = w.current.Close(); err != nil { - return err - } w.current = nil return nil } From 4e147919d374374ad2731ef4541bd14ff57bc43d Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 5 Jun 2019 13:59:10 -0700 Subject: [PATCH 0898/1815] Update changelog --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index cbbcc495b..c59858ca3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -58,6 +58,7 @@ - [#5901](https://github.com/influxdata/telegraf/pull/5901): Set user agent in stackdriver output. - [#5885](https://github.com/influxdata/telegraf/pull/5885): Extend metrics collected from Nvidia GPUs. - [#5547](https://github.com/influxdata/telegraf/pull/5547): Add file rotation support to the file output. +- [#5955](https://github.com/influxdata/telegraf/pull/5955): Add source tag to hddtemp plugin. #### Bugfixes @@ -80,6 +81,7 @@ - [#5879](https://github.com/influxdata/telegraf/issues/5879): Fix multiple SIGHUP causes Telegraf to shutdown. - [#5891](https://github.com/influxdata/telegraf/issues/5891): Fix connection leak in influxdb outputs on reload. - [#5858](https://github.com/influxdata/telegraf/issues/5858): Fix batch fails when single metric is unserializable. +- [#5536](https://github.com/influxdata/telegraf/issues/5536): Log a warning on write if the metric buffer has overflowed. ## v1.10.4 [2019-05-14] From f2b3b356404c934f1da10f8a70d85151f0696a53 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 5 Jun 2019 14:07:02 -0700 Subject: [PATCH 0899/1815] Print global_tags first in sample configuration --- internal/config/config.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/internal/config/config.go b/internal/config/config.go index 21f5db336..ab50c7df8 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -33,7 +33,7 @@ import ( var ( // Default sections - sectionDefaults = []string{"agent", "global_tags", "outputs", + sectionDefaults = []string{"global_tags", "agent", "outputs", "processors", "aggregators", "inputs"} // Default input plugins @@ -536,13 +536,13 @@ func printFilteredOutputs(outputFilters []string, commented bool) { } func printFilteredGlobalSections(sectionFilters []string) { - if sliceContains("agent", sectionFilters) { - fmt.Printf(agentConfig) - } - if sliceContains("global_tags", sectionFilters) { fmt.Printf(globalTagsConfig) } + + if sliceContains("agent", sectionFilters) { + fmt.Printf(agentConfig) + } } type printer interface { From 9e3f918db5f4735a3c13eac4ba0bacc05b398bd8 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 5 Jun 2019 14:07:30 -0700 Subject: [PATCH 0900/1815] Update sample configurations --- etc/telegraf.conf | 433 +++++++++++++++++++++++++++++++++----- etc/telegraf_windows.conf | 12 ++ 2 files changed, 392 insertions(+), 53 deletions(-) diff --git a/etc/telegraf.conf b/etc/telegraf.conf index c386d171f..cf2a0d933 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -61,13 +61,25 @@ ## Valid time units are "ns", "us" (or "µs"), "ms", "s". precision = "" - ## Logging configuration: - ## Run telegraf with debug log messages. - debug = false - ## Run telegraf in quiet mode (error log messages only). - quiet = false - ## Specify the log file name. The empty string means to log to stderr. - logfile = "" + ## Log at debug level. + # debug = false + ## Log only error level messages. + # quiet = false + + ## Log file name, the empty string means to log to stderr. + # logfile = "" + + ## The logfile will be rotated after the time interval specified. When set + ## to 0 no time based rotation is performed. + # logfile_rotation_interval = "0d" + + ## The logfile will be rotated when it becomes larger than the specified + ## size. When set to 0 no size based rotation is performed. + # logfile_rotation_max_size = "0MB" + + ## Maximum number of rotated archives to keep, any older logs are deleted. + ## If set to -1, no archives are removed. + # logfile_rotation_max_archives = 5 ## Override default hostname, if empty use os.Hostname() hostname = "" @@ -79,6 +91,7 @@ # OUTPUT PLUGINS # ############################################################################### + # Configuration for sending metrics to InfluxDB [[outputs.influxdb]] ## The full HTTP or UDP URL for your InfluxDB instance. @@ -183,7 +196,7 @@ # # exchange_type = "topic" # # ## If true, exchange will be passively declared. -# # exchange_declare_passive = false +# # exchange_passive = false # # ## Exchange durability can be either "transient" or "durable". # # exchange_durability = "durable" @@ -242,6 +255,14 @@ # ## Recommended to set to true. # # use_batch_format = false # +# ## Content encoding for message payloads, can be set to "gzip" to or +# ## "identity" to apply no encoding. +# ## +# ## Please note that when use_batch_format = false each amqp message contains only +# ## a single metric, it is recommended to use compression with batch format +# ## for best results. +# # content_encoding = "identity" +# # ## Data format to output. # ## Each data format has its own unique set of configuration options, read # ## more about them here: @@ -476,6 +497,18 @@ # ## Files to write to, "stdout" is a specially handled file. # files = ["stdout", "/tmp/metrics.out"] # +# ## The file will be rotated after the time interval specified. When set +# ## to 0 no time based rotation is performed. +# # rotation_interval = "0d" +# +# ## The logfile will be rotated when it becomes larger than the specified +# ## size. When set to 0 no size based rotation is performed. +# # rotation_max_size = "0MB" +# +# ## Maximum number of rotated archives to keep, any older logs are deleted. +# ## If set to -1, no archives are removed. +# # rotation_max_archives = 5 +# # ## Data format to output. # ## Each data format has its own unique set of configuration options, read # ## more about them here: @@ -515,6 +548,46 @@ # servers = ["127.0.0.1:12201", "192.168.1.1:12201"] +# # Configurable HTTP health check resource based on metrics +# [[outputs.health]] +# ## Address and port to listen on. +# ## ex: service_address = "tcp://localhost:8080" +# ## service_address = "unix:///var/run/telegraf-health.sock" +# # service_address = "tcp://:8080" +# +# ## The maximum duration for reading the entire request. +# # read_timeout = "5s" +# ## The maximum duration for writing the entire response. +# # write_timeout = "5s" +# +# ## Username and password to accept for HTTP basic authentication. +# # basic_username = "user1" +# # basic_password = "secret" +# +# ## Allowed CA certificates for client certificates. +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# +# ## TLS server certificate and private key. +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## One or more check sub-tables should be defined, it is also recommended to +# ## use metric filtering to limit the metrics that flow into this output. +# ## +# ## When using the default buffer sizes, this example will fail when the +# ## metric buffer is half full. +# ## +# ## namepass = ["internal_write"] +# ## tagpass = { output = ["influxdb"] } +# ## +# ## [[outputs.health.compares]] +# ## field = "buffer_size" +# ## lt = 5000.0 +# ## +# ## [[outputs.health.contains]] +# ## field = "buffer_size" + + # # A plugin that can transmit metrics over HTTP # [[outputs.http]] # ## URL is the address to send metrics to @@ -1061,6 +1134,84 @@ # # location = "eu-north0" +# # Configuration for Syslog server to send metrics to +# [[outputs.syslog]] +# ## URL to connect to +# ## ex: address = "tcp://127.0.0.1:8094" +# ## ex: address = "tcp4://127.0.0.1:8094" +# ## ex: address = "tcp6://127.0.0.1:8094" +# ## ex: address = "tcp6://[2001:db8::1]:8094" +# ## ex: address = "udp://127.0.0.1:8094" +# ## ex: address = "udp4://127.0.0.1:8094" +# ## ex: address = "udp6://127.0.0.1:8094" +# address = "tcp://127.0.0.1:8094" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Period between keep alive probes. +# ## Only applies to TCP sockets. +# ## 0 disables keep alive probes. +# ## Defaults to the OS configuration. +# # keep_alive_period = "5m" +# +# ## The framing technique with which it is expected that messages are +# ## transported (default = "octet-counting"). Whether the messages come +# ## using the octect-counting (RFC5425#section-4.3.1, RFC6587#section-3.4.1), +# ## or the non-transparent framing technique (RFC6587#section-3.4.2). Must +# ## be one of "octet-counting", "non-transparent". +# # framing = "octet-counting" +# +# ## The trailer to be expected in case of non-trasparent framing (default = "LF"). +# ## Must be one of "LF", or "NUL". +# # trailer = "LF" +# +# ## SD-PARAMs settings +# ## Syslog messages can contain key/value pairs within zero or more +# ## structured data sections. For each unrecognised metric tag/field a +# ## SD-PARAMS is created. +# ## +# ## Example: +# ## [[outputs.syslog]] +# ## sdparam_separator = "_" +# ## default_sdid = "default@32473" +# ## sdids = ["foo@123", "bar@456"] +# ## +# ## input => xyzzy,x=y foo@123_value=42,bar@456_value2=84,something_else=1 +# ## output (structured data only) => [foo@123 value=42][bar@456 value2=84][default@32473 something_else=1 x=y] +# +# ## SD-PARAMs separator between the sdid and tag/field key (default = "_") +# # sdparam_separator = "_" +# +# ## Default sdid used for tags/fields that don't contain a prefix defined in +# ## the explict sdids setting below If no default is specified, no SD-PARAMs +# ## will be used for unrecognised field. +# # default_sdid = "default@32473" +# +# ## List of explicit prefixes to extract from tag/field keys and use as the +# ## SDID, if they match (see above example for more details): +# # sdids = ["foo@123", "bar@456"] +# +# ## Default severity value. Severity and Facility are used to calculate the +# ## message PRI value (RFC5424#section-6.2.1). Used when no metric field +# ## with key "severity_code" is defined. If unset, 5 (notice) is the default +# # default_severity_code = 5 +# +# ## Default facility value. Facility and Severity are used to calculate the +# ## message PRI value (RFC5424#section-6.2.1). Used when no metric field with +# ## key "facility_code" is defined. If unset, 1 (user-level) is the default +# # default_facility_code = 1 +# +# ## Default APP-NAME value (RFC5424#section-6.2.5) +# ## Used when no metric tag with key "appname" is defined. +# ## If unset, "Telegraf" is the default +# # default_appname = "Telegraf" + + # # Configuration for Wavefront server to send metrics to # [[outputs.wavefront]] # ## Url for Wavefront Direct Ingestion or using HTTP with Wavefront Proxy @@ -1111,11 +1262,11 @@ # # red = 0.0 - ############################################################################### # PROCESSOR PLUGINS # ############################################################################### + # # Convert values to another metric value type # [[processors.converter]] # ## Tags to convert @@ -1150,9 +1301,12 @@ # ## Name of the field to map # field = "status" # -# ## Destination field to be used for the mapped value. By default the source -# ## field is used, overwriting the original value. -# # dest = "status_code" +# ## Name of the tag to map +# # tag = "status" +# +# ## Destination tag or field to be used for the mapped value. By default the +# ## source tag or field is used, overwriting the original value. +# dest = "status_code" # # ## Default value to be used for all values not contained in the mapping # ## table. When unset, the unmodified value for the field will be used if no @@ -1162,7 +1316,7 @@ # ## Table of mappings # [processors.enum.mapping.value_mappings] # green = 1 -# yellow = 2 +# amber = 2 # red = 3 @@ -1327,11 +1481,11 @@ # # add_aggregate_fields = [] - ############################################################################### # AGGREGATOR PLUGINS # ############################################################################### + # # Keep the aggregate basicstats of each metric passing through. # [[aggregators.basicstats]] # ## The period on which to flush & clear the aggregator. @@ -1344,6 +1498,18 @@ # # stats = ["count", "min", "max", "mean", "stdev", "s2", "sum"] +# # Report the final metric of a series +# [[aggregators.final]] +# ## The period on which to flush & clear the aggregator. +# period = "30s" +# ## If true, the original metric will be dropped by the +# ## aggregator and will not get sent to the output plugins. +# drop_original = false +# +# ## The time that a series is not updated until considering it final. +# series_timeout = "5m" + + # # Create aggregate histograms. # [[aggregators.histogram]] # ## The period in which to flush the aggregator. @@ -1396,11 +1562,11 @@ # fields = [] - ############################################################################### # INPUT PLUGINS # ############################################################################### + # Read metrics about cpu usage [[inputs.cpu]] ## Whether to report per-cpu stats or not @@ -1420,7 +1586,7 @@ # mount_points = ["/"] ## Ignore mount points by filesystem type. - ignore_fs = ["tmpfs", "devtmpfs", "devfs", "overlay", "aufs", "squashfs"] + ignore_fs = ["tmpfs", "devtmpfs", "devfs", "iso9660", "overlay", "aufs", "squashfs"] # Read metrics about disk IO by device @@ -1725,12 +1891,12 @@ # ## 4) environment variables # ## 5) shared credentials file # ## 6) EC2 Instance Profile -# #access_key = "" -# #secret_key = "" -# #token = "" -# #role_arn = "" -# #profile = "" -# #shared_credential_file = "" +# # access_key = "" +# # secret_key = "" +# # token = "" +# # role_arn = "" +# # profile = "" +# # shared_credential_file = "" # # ## Endpoint to make request against, the correct endpoint is automatically # ## determined and this option should only be set if you wish to override the @@ -1756,27 +1922,35 @@ # interval = "5m" # # ## Configure the TTL for the internal cache of metrics. -# ## Defaults to 1 hr if not specified -# #cache_ttl = "10m" +# # cache_ttl = "1h" # # ## Metric Statistic Namespace (required) # namespace = "AWS/ELB" # # ## Maximum requests per second. Note that the global default AWS rate limit is -# ## 400 reqs/sec, so if you define multiple namespaces, these should add up to a -# ## maximum of 400. Optional - default value is 200. +# ## 50 reqs/sec, so if you define multiple namespaces, these should add up to a +# ## maximum of 50. # ## See http://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_limits.html -# ratelimit = 200 +# # ratelimit = 25 # -# ## Metrics to Pull (optional) +# ## Namespace-wide statistic filters. These allow fewer queries to be made to +# ## cloudwatch. +# # statistic_include = [ "average", "sum", "minimum", "maximum", sample_count" ] +# # statistic_exclude = [] +# +# ## Metrics to Pull # ## Defaults to all Metrics in Namespace if nothing is provided # ## Refreshes Namespace available metrics every 1h # #[[inputs.cloudwatch.metrics]] # # names = ["Latency", "RequestCount"] # # -# # ## Dimension filters for Metric. These are optional however all dimensions -# # ## defined for the metric names must be specified in order to retrieve -# # ## the metric statistics. +# # ## Statistic filters for Metric. These allow for retrieving specific +# # ## statistics for an individual metric. +# # # statistic_include = [ "average", "sum", "minimum", "maximum", sample_count" ] +# # # statistic_exclude = [] +# # +# # ## Dimension filters for Metric. All dimensions defined for the metric names +# # ## must be specified in order to retrieve the metric statistics. # # [[inputs.cloudwatch.metrics.dimensions]] # # name = "LoadBalancerName" # # value = "p-example" @@ -1997,6 +2171,32 @@ # filters = [""] +# # Read metrics about docker containers from Fargate/ECS v2 meta endpoints. +# [[inputs.ecs]] +# ## ECS metadata url +# # endpoint_url = "http://169.254.170.2" +# +# ## Containers to include and exclude. Globs accepted. +# ## Note that an empty array for both will include all containers +# # container_name_include = [] +# # container_name_exclude = [] +# +# ## Container states to include and exclude. Globs accepted. +# ## When empty only containers in the "RUNNING" state will be captured. +# ## Possible values are "NONE", "PULLED", "CREATED", "RUNNING", +# ## "RESOURCES_PROVISIONED", "STOPPED". +# # container_status_include = [] +# # container_status_exclude = [] +# +# ## ecs labels to include and exclude as tags. Globs accepted. +# ## Note that an empty array for both will include all labels as tags +# ecs_label_include = [ "com.amazonaws.ecs.*" ] +# ecs_label_exclude = [] +# +# ## Timeout for queries. +# # timeout = "5s" + + # # Read stats from one or more Elasticsearch servers or clusters # [[inputs.elasticsearch]] # ## specify a list of one or more Elasticsearch servers @@ -2884,14 +3084,19 @@ # # insecure_skip_verify = false -# # Collects scores from a minecraft server's scoreboard using the RCON protocol +# # Collects scores from a Minecraft server's scoreboard using the RCON protocol # [[inputs.minecraft]] -# ## server address for minecraft +# ## Address of the Minecraft server. # # server = "localhost" -# ## port for RCON +# +# ## Server RCON Port. # # port = "25575" -# ## password RCON for mincraft server -# # password = "" +# +# ## Server RCON Password. +# password = "" +# +# ## Uncomment to remove deprecated metric components. +# # tagdrop = ["server"] # # Read metrics from one or many MongoDB servers @@ -3246,6 +3451,32 @@ # timeout = 1000 +# # Read current weather and forecasts data from openweathermap.org +# [[inputs.openweathermap]] +# ## OpenWeatherMap API key. +# app_id = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" +# +# ## City ID's to collect weather data from. +# city_id = ["5391959"] +# +# ## APIs to fetch; can contain "weather" or "forecast". +# fetch = ["weather", "forecast"] +# +# ## OpenWeatherMap base URL +# # base_url = "https://api.openweathermap.org/" +# +# ## Timeout for HTTP response. +# # response_timeout = "5s" +# +# ## Preferred unit system for temperature and wind speed. Can be one of +# ## "metric", "imperial", or "standard". +# # units = "metric" +# +# ## Query interval; OpenWeatherMap updates their weather data every 10 +# ## minutes. +# interval = "10m" + + # # Read metrics of passenger using passenger-status # [[inputs.passenger]] # ## Path of passenger-status. @@ -3346,18 +3577,16 @@ # unix_sockets = ["/var/run/pdns.controlsocket"] -# # Read metrics from one or many PowerDNS recursors +# # Read metrics from one or many PowerDNS Recursor servers # [[inputs.powerdns_recursor]] # ## An array of sockets to gather stats about. # ## Specify a path to unix socket. -# ## -# ## If no servers are specified, then '/var/run/pdns_recursor.controlsocket' is used as the path. # unix_sockets = ["/var/run/pdns_recursor.controlsocket"] # # ## Socket for Receive -# # socket_dir = "/var/run/" +# #socket_dir = "/var/run/" # ## Socket permissions -# # socket_mode = "0666" +# #socket_mode = "0666" # # Monitor process cpu and memory usage @@ -3539,13 +3768,13 @@ # [[inputs.smart]] # ## Optionally specify the path to the smartctl executable # # path = "/usr/bin/smartctl" -# # +# # ## On most platforms smartctl requires root access. # ## Setting 'use_sudo' to true will make use of sudo to run smartctl. # ## Sudo must be configured to to allow the telegraf user to run smartctl -# ## with out password. +# ## without a password. # # use_sudo = false -# # +# # ## Skip checking disks in this power mode. Defaults to # ## "standby" to not wake up disks that have stoped rotating. # ## See --nocheck in the man pages for smartctl. @@ -3553,15 +3782,13 @@ # ## power mode and might require changing this value to # ## "never" depending on your disks. # # nocheck = "standby" -# # +# # ## Gather detailed metrics for each SMART Attribute. -# ## Defaults to "false" -# ## # # attributes = false -# # +# # ## Optionally specify devices to exclude from reporting. # # excludes = [ "/dev/pass6" ] -# # +# # ## Optionally specify devices and device type, if unset # ## a scan (smartctl --scan) for S.M.A.R.T. devices will # ## done and all found will be included except for the @@ -3739,6 +3966,10 @@ # # ## specify a list of one or more Solr cores (default - all) # # cores = ["main"] +# +# ## Optional HTTP Basic Auth Credentials +# # username = "username" +# # password = "pa$$word" # # Read metrics from Microsoft SQL Server @@ -4091,11 +4322,11 @@ # # insecure_skip_verify = true - ############################################################################### # SERVICE INPUT PLUGINS # ############################################################################### + # # AMQP consumer plugin # [[inputs.amqp_consumer]] # ## Broker to consume from. @@ -4111,7 +4342,7 @@ # # username = "" # # password = "" # -# ## Exchange to declare and consume from. +# ## Name of the exchange to declare. If unset, no exchange will be declared. # exchange = "telegraf" # # ## Exchange type; common types are "direct", "fanout", "topic", "header", "x-consistent-hash". @@ -4133,7 +4364,11 @@ # ## AMQP queue durability can be "transient" or "durable". # queue_durability = "durable" # -# ## Binding Key. +# ## If true, queue will be passively declared. +# # queue_passive = false +# +# ## A binding between the exchange and queue using this binding key is +# ## created. If unset, no binding is created. # binding_key = "#" # # ## Maximum number of messages server should give to the worker. @@ -4161,6 +4396,10 @@ # ## Use TLS but skip chain & host verification # # insecure_skip_verify = false # +# ## Content encoding for message payloads, can be set to "gzip" to or +# ## "identity" to apply no encoding. +# # content_encoding = "identity" +# # ## Data format to consume. # ## Each data format has its own unique set of configuration options, read # ## more about them here: @@ -4190,6 +4429,84 @@ # ] +# # Cisco GNMI telemetry input plugin based on GNMI telemetry data produced in IOS XR +# [[inputs.cisco_telemetry_gnmi]] +# ## Address and port of the GNMI GRPC server +# addresses = ["10.49.234.114:57777"] +# +# ## define credentials +# username = "cisco" +# password = "cisco" +# +# ## GNMI encoding requested (one of: "proto", "json", "json_ietf") +# # encoding = "proto" +# +# ## redial in case of failures after +# redial = "10s" +# +# ## enable client-side TLS and define CA to authenticate the device +# # enable_tls = true +# # tls_ca = "/etc/telegraf/ca.pem" +# # insecure_skip_verify = true +# +# ## define client-side TLS certificate & key to authenticate to the device +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## GNMI subscription prefix (optional, can usually be left empty) +# ## See: https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#222-paths +# # origin = "" +# # prefix = "" +# # target = "" +# +# ## Define additional aliases to map telemetry encoding paths to simple measurement names +# #[inputs.cisco_telemetry_gnmi.aliases] +# # ifcounters = "openconfig:/interfaces/interface/state/counters" +# +# [[inputs.cisco_telemetry_gnmi.subscription]] +# ## Name of the measurement that will be emitted +# name = "ifcounters" +# +# ## Origin and path of the subscription +# ## See: https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#222-paths +# ## +# ## origin usually refers to a (YANG) data model implemented by the device +# ## and path to a specific substructe inside it that should be subscribed to (similar to an XPath) +# ## YANG models can be found e.g. here: https://github.com/YangModels/yang/tree/master/vendor/cisco/xr +# origin = "openconfig-interfaces" +# path = "/interfaces/interface/state/counters" +# +# # Subscription mode (one of: "target_defined", "sample", "on_change") and interval +# subscription_mode = "sample" +# sample_interval = "10s" +# +# ## Suppress redundant transmissions when measured values are unchanged +# # suppress_redundant = false +# +# ## If suppression is enabled, send updates at least every X seconds anyway +# # heartbeat_interval = "60s" + + +# # Cisco model-driven telemetry (MDT) input plugin for IOS XR, IOS XE and NX-OS platforms +# [[inputs.cisco_telemetry_mdt]] +# ## Telemetry transport (one of: tcp, grpc) +# transport = "grpc" +# +# ## Address and port to host telemetry listener +# service_address = ":57000" +# +# ## Enable TLS for GRPC transport +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## Enable TLS client authentication and define allowed CA certificates +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# +# ## Define aliases to map telemetry encoding paths to simple measurement names +# [inputs.cisco_telemetry_mdt.aliases] +# ifstats = "ietf-interfaces:interfaces-state/interface/statistics" + + # # Read metrics from Google PubSub # [[inputs.cloud_pubsub]] # ## Required. Name of Google Cloud Platform (GCP) Project that owns @@ -4941,6 +5258,13 @@ # # service_address = "unix:///tmp/telegraf.sock" # # service_address = "unixgram:///tmp/telegraf.sock" # +# ## Change the file mode bits on unix sockets. These permissions may not be +# ## respected by some platforms, to safely restrict write permissions it is best +# ## to place the socket into a directory that has previously been created +# ## with the desired permissions. +# ## ex: socket_mode = "777" +# # socket_mode = "" +# # ## Maximum number of concurrent connections. # ## Only applies to stream sockets (e.g. TCP). # ## 0 (default) is unlimited. @@ -5018,6 +5342,9 @@ # ## http://docs.datadoghq.com/guides/dogstatsd/ # parse_data_dog_tags = false # +# ## Parses datadog extensions to the statsd format +# datadog_extensions = false +# # ## Statsd data translation templates, more info can be read here: # ## https://github.com/influxdata/telegraf/blob/master/docs/TEMPLATE_PATTERN.md # # templates = [ @@ -5065,7 +5392,7 @@ # ## The framing technique with which it is expected that messages are transported (default = "octet-counting"). # ## Whether the messages come using the octect-counting (RFC5425#section-4.3.1, RFC6587#section-3.4.1), # ## or the non-transparent framing technique (RFC6587#section-3.4.2). -# ## Must be one of "octect-counting", "non-transparent". +# ## Must be one of "octet-counting", "non-transparent". # # framing = "octet-counting" # # ## The trailer to be expected in case of non-trasparent framing (default = "LF"). diff --git a/etc/telegraf_windows.conf b/etc/telegraf_windows.conf index 3263eea11..b02a6e1e8 100644 --- a/etc/telegraf_windows.conf +++ b/etc/telegraf_windows.conf @@ -69,6 +69,18 @@ ## Specify the log file name. The empty string means to log to stderr. logfile = "/Program Files/Telegraf/telegraf.log" + ## The logfile will be rotated after the time interval specified. When set + ## to 0 no time based rotation is performed. + # logfile_rotation_interval = "0d" + + ## The logfile will be rotated when it becomes larger than the specified + ## size. When set to 0 no size based rotation is performed. + # logfile_rotation_max_size = "0MB" + + ## Maximum number of rotated archives to keep, any older logs are deleted. + ## If set to -1, no archives are removed. + # logfile_rotation_max_archives = 5 + ## Override default hostname, if empty use os.Hostname() hostname = "" ## If set to true, do no set the "host" tag in the telegraf agent. From f22e7a146596f5d3ce9fd65fae1ed74635772f92 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 5 Jun 2019 14:09:59 -0700 Subject: [PATCH 0901/1815] Update next version for builds --- scripts/build.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/build.py b/scripts/build.py index 86813f8bd..5f2bcf6b5 100755 --- a/scripts/build.py +++ b/scripts/build.py @@ -99,7 +99,7 @@ supported_packages = { "freebsd": [ "tar" ] } -next_version = '1.11.0' +next_version = '1.12.0' ################ #### Telegraf Functions From d31f1735d9eb5eabe0ada5be31aef281aab3056c Mon Sep 17 00:00:00 2001 From: Oleg Kovalov Date: Fri, 7 Jun 2019 21:24:26 +0200 Subject: [PATCH 0902/1815] Use fmt.Errorf for creating error with formatting (#5968) --- plugins/inputs/dns_query/dns_query.go | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/plugins/inputs/dns_query/dns_query.go b/plugins/inputs/dns_query/dns_query.go index 3fcf4a0b8..b33e508ea 100644 --- a/plugins/inputs/dns_query/dns_query.go +++ b/plugins/inputs/dns_query/dns_query.go @@ -1,7 +1,6 @@ package dns_query import ( - "errors" "fmt" "net" "strconv" @@ -162,7 +161,7 @@ func (d *DnsQuery) getDnsQueryTime(domain string, server string) (float64, int, func (d *DnsQuery) parseRecordType() (uint16, error) { var recordType uint16 - var error error + var err error switch d.RecordType { case "A": @@ -188,10 +187,10 @@ func (d *DnsQuery) parseRecordType() (uint16, error) { case "TXT": recordType = dns.TypeTXT default: - error = errors.New(fmt.Sprintf("Record type %s not recognized", d.RecordType)) + err = fmt.Errorf("Record type %s not recognized", d.RecordType) } - return recordType, error + return recordType, err } func setResult(result ResultType, fields map[string]interface{}, tags map[string]string) { From eeb036911fcf66ec75180350ca14076d50aa8817 Mon Sep 17 00:00:00 2001 From: Oleg Kovalov Date: Fri, 7 Jun 2019 21:25:55 +0200 Subject: [PATCH 0903/1815] Compile regexp once in zookeeper input (#5969) --- plugins/inputs/zookeeper/zookeeper.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/plugins/inputs/zookeeper/zookeeper.go b/plugins/inputs/zookeeper/zookeeper.go index ad990f28c..9c9a2fa77 100644 --- a/plugins/inputs/zookeeper/zookeeper.go +++ b/plugins/inputs/zookeeper/zookeeper.go @@ -17,6 +17,8 @@ import ( "github.com/influxdata/telegraf/plugins/inputs" ) +var zookeeperFormatRE = regexp.MustCompile(`^zk_(\w+)\s+([\w\.\-]+)`) + // Zookeeper is a zookeeper plugin type Zookeeper struct { Servers []string @@ -136,9 +138,7 @@ func (z *Zookeeper) gatherServer(ctx context.Context, address string, acc telegr fields := make(map[string]interface{}) for scanner.Scan() { line := scanner.Text() - - re := regexp.MustCompile(`^zk_(\w+)\s+([\w\.\-]+)`) - parts := re.FindStringSubmatch(string(line)) + parts := zookeeperFormatRE.FindStringSubmatch(string(line)) if len(parts) != 3 { return fmt.Errorf("unexpected line in mntr response: %q", line) From 07d3cd4ad79ad3c1bc6268a44bba8e9d9810b0b7 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 11 Jun 2019 11:14:54 -0700 Subject: [PATCH 0904/1815] Update release notes --- CHANGELOG.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index c59858ca3..7aa8c202b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,9 @@ - The `uptime_format` field in the system input has been deprecated, use the `uptime` field instead. +- The `cloudwatch` input has been updated to use a more efficient API, it now + requires `GetMetricData` permissions instead of `GetMetricStatistics`. The + `units` tag is not available from this API and is no longer collected. #### New Inputs From df8faab68b637445701f77a85c8aada9ac0ba49f Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 11 Jun 2019 11:18:08 -0700 Subject: [PATCH 0905/1815] Set Telegraf 1.11.0 release date --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7aa8c202b..436b33083 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,4 @@ -## v1.11 [unreleased] +## v1.11 [2019-06-11] #### Release Notes From 968714054aabc67239aee3fb1ef9698c4dce3d9f Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 11 Jun 2019 13:06:55 -0700 Subject: [PATCH 0906/1815] Fix title in wavefront serializer readme --- plugins/serializers/wavefront/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/serializers/wavefront/README.md b/plugins/serializers/wavefront/README.md index 7a6594da3..8ab77148d 100644 --- a/plugins/serializers/wavefront/README.md +++ b/plugins/serializers/wavefront/README.md @@ -1,4 +1,4 @@ -# Example +# Wavefront The `wavefront` serializer translates the Telegraf metric format to the [Wavefront Data Format](https://docs.wavefront.com/wavefront_data_format.html). From 5de7bdf9060bac02bc55e6ec3da387b271fba5a2 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 12 Jun 2019 10:11:23 -0700 Subject: [PATCH 0907/1815] Fix race condition in cisco telemetry tests (#5979) --- .../cisco_telemetry_gnmi_test.go | 44 ++++++++++++++----- 1 file changed, 32 insertions(+), 12 deletions(-) diff --git a/plugins/inputs/cisco_telemetry_gnmi/cisco_telemetry_gnmi_test.go b/plugins/inputs/cisco_telemetry_gnmi/cisco_telemetry_gnmi_test.go index f0adc8f1f..32ad714fd 100644 --- a/plugins/inputs/cisco_telemetry_gnmi/cisco_telemetry_gnmi_test.go +++ b/plugins/inputs/cisco_telemetry_gnmi/cisco_telemetry_gnmi_test.go @@ -62,6 +62,13 @@ func (m *mockGNMIServer) Subscribe(server gnmi.GNMI_SubscribeServer) error { require.Equal(m.t, metadata.Get("username"), []string{"theuser"}) require.Equal(m.t, metadata.Get("password"), []string{"thepassword"}) + // Must read request before sending a response; even though we don't check + // the request itself currently. + _, err := server.Recv() + if err != nil { + panic(err) + } + switch m.scenario { case 0: return fmt.Errorf("testerror") @@ -91,7 +98,8 @@ func (m *mockGNMIServer) Subscribe(server gnmi.GNMI_SubscribeServer) error { } func TestGNMIError(t *testing.T) { - listener, _ := net.Listen("tcp", "127.0.0.1:0") + listener, err := net.Listen("tcp", "127.0.0.1:0") + require.NoError(t, err) server := grpc.NewServer() acc := &testutil.Accumulator{} gnmi.RegisterGNMIServer(server, &mockGNMIServer{t: t, scenario: 0, server: server, acc: acc}) @@ -100,8 +108,11 @@ func TestGNMIError(t *testing.T) { Username: "theuser", Password: "thepassword", Encoding: "proto", Redial: internal.Duration{Duration: 1 * time.Second}} - require.Nil(t, c.Start(acc)) - go server.Serve(listener) + require.NoError(t, c.Start(acc)) + go func() { + err := server.Serve(listener) + require.NoError(t, err) + }() acc.WaitError(1) c.Stop() server.Stop() @@ -157,7 +168,8 @@ func mockGNMINotification() *gnmi.Notification { } func TestGNMIMultiple(t *testing.T) { - listener, _ := net.Listen("tcp", "127.0.0.1:0") + listener, err := net.Listen("tcp", "127.0.0.1:0") + require.NoError(t, err) server := grpc.NewServer() acc := &testutil.Accumulator{} gnmi.RegisterGNMIServer(server, &mockGNMIServer{t: t, scenario: 1, server: server, acc: acc}) @@ -168,9 +180,11 @@ func TestGNMIMultiple(t *testing.T) { Subscriptions: []Subscription{{Name: "alias", Origin: "type", Path: "/model", SubscriptionMode: "sample"}}, } - require.Nil(t, c.Start(acc)) - - go server.Serve(listener) + require.NoError(t, c.Start(acc)) + go func() { + err := server.Serve(listener) + require.NoError(t, err) + }() acc.Wait(4) c.Stop() server.Stop() @@ -195,7 +209,8 @@ func TestGNMIMultiple(t *testing.T) { } func TestGNMIMultipleRedial(t *testing.T) { - listener, _ := net.Listen("tcp", "127.0.0.1:0") + listener, err := net.Listen("tcp", "127.0.0.1:0") + require.NoError(t, err) server := grpc.NewServer() acc := &testutil.Accumulator{} gnmi.RegisterGNMIServer(server, &mockGNMIServer{t: t, scenario: 2, server: server, acc: acc}) @@ -206,9 +221,11 @@ func TestGNMIMultipleRedial(t *testing.T) { Subscriptions: []Subscription{{Name: "alias", Origin: "type", Path: "/model", SubscriptionMode: "sample"}}, } - require.Nil(t, c.Start(acc)) - - go server.Serve(listener) + require.NoError(t, c.Start(acc)) + go func() { + err := server.Serve(listener) + require.NoError(t, err) + }() acc.Wait(2) server.Stop() @@ -216,7 +233,10 @@ func TestGNMIMultipleRedial(t *testing.T) { server = grpc.NewServer() gnmi.RegisterGNMIServer(server, &mockGNMIServer{t: t, scenario: 3, server: server, acc: acc}) - go server.Serve(listener) + go func() { + err := server.Serve(listener) + require.NoError(t, err) + }() acc.Wait(4) c.Stop() server.Stop() From 8a34d2b27df785e2e3351f925c46eee8735f2fa7 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 12 Jun 2019 10:52:04 -0700 Subject: [PATCH 0908/1815] Fix setting mount_points option in disk input (#5982) --- plugins/inputs/disk/disk.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/inputs/disk/disk.go b/plugins/inputs/disk/disk.go index 86aefc60f..b2c7e5400 100644 --- a/plugins/inputs/disk/disk.go +++ b/plugins/inputs/disk/disk.go @@ -13,9 +13,9 @@ type DiskStats struct { ps system.PS // Legacy support - Mountpoints []string + Mountpoints []string `toml:"mountpoints"` - MountPoints []string + MountPoints []string `toml:"mount_points"` IgnoreFS []string `toml:"ignore_fs"` } From 1d682b847c1e71d1e473d3f10731f4e7b2afc2ec Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 12 Jun 2019 11:11:04 -0700 Subject: [PATCH 0909/1815] Update changelog --- CHANGELOG.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 436b33083..aa51a4e32 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,9 @@ +## v1.11.1 [unreleased] + +#### Bugfixes + +- [#5980](https://github.com/influxdata/telegraf/issues/5980): Cannot set mount_points option in disk input. + ## v1.11 [2019-06-11] #### Release Notes From 1a647fb6ba0725e48f60d1a727175a5696056369 Mon Sep 17 00:00:00 2001 From: Charlie Vieth Date: Wed, 12 Jun 2019 14:59:51 -0400 Subject: [PATCH 0910/1815] Improve performance of wavefront serializer (#5842) --- plugins/serializers/wavefront/wavefront.go | 200 ++++++++++-------- .../serializers/wavefront/wavefront_test.go | 49 ++++- 2 files changed, 155 insertions(+), 94 deletions(-) diff --git a/plugins/serializers/wavefront/wavefront.go b/plugins/serializers/wavefront/wavefront.go index 70b87512f..67fa1ae3a 100755 --- a/plugins/serializers/wavefront/wavefront.go +++ b/plugins/serializers/wavefront/wavefront.go @@ -1,11 +1,10 @@ package wavefront import ( - "bytes" - "fmt" "log" "strconv" "strings" + "sync" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/outputs/wavefront" @@ -16,6 +15,8 @@ type WavefrontSerializer struct { Prefix string UseStrict bool SourceOverride []string + scratch buffer + mu sync.Mutex // buffer mutex } // catch many of the invalid chars that could appear in a metric or tag name @@ -48,18 +49,16 @@ func NewSerializer(prefix string, useStrict bool, sourceOverride []string) (*Wav return s, nil } -// Serialize : Serialize based on Wavefront format -func (s *WavefrontSerializer) Serialize(m telegraf.Metric) ([]byte, error) { - out := []byte{} - metricSeparator := "." +func (s *WavefrontSerializer) serialize(buf *buffer, m telegraf.Metric) { + const metricSeparator = "." for fieldName, value := range m.Fields() { var name string if fieldName == "value" { - name = fmt.Sprintf("%s%s", s.Prefix, m.Name()) + name = s.Prefix + m.Name() } else { - name = fmt.Sprintf("%s%s%s%s", s.Prefix, m.Name(), metricSeparator, fieldName) + name = s.Prefix + m.Name() + metricSeparator + fieldName } if s.UseStrict { @@ -70,133 +69,150 @@ func (s *WavefrontSerializer) Serialize(m telegraf.Metric) ([]byte, error) { name = pathReplacer.Replace(name) - metric := &wavefront.MetricPoint{ - Metric: name, - Timestamp: m.Time().Unix(), - } - - metricValue, buildError := buildValue(value, metric.Metric) - if buildError != nil { + metricValue, valid := buildValue(value, name) + if !valid { // bad value continue to next metric continue } - metric.Value = metricValue - source, tags := buildTags(m.Tags(), s) - metric.Source = source - metric.Tags = tags - - out = append(out, formatMetricPoint(metric, s)...) + metric := wavefront.MetricPoint{ + Metric: name, + Timestamp: m.Time().Unix(), + Value: metricValue, + Source: source, + Tags: tags, + } + formatMetricPoint(&s.scratch, &metric, s) } +} + +// Serialize : Serialize based on Wavefront format +func (s *WavefrontSerializer) Serialize(m telegraf.Metric) ([]byte, error) { + s.mu.Lock() + s.scratch.Reset() + s.serialize(&s.scratch, m) + out := s.scratch.Copy() + s.mu.Unlock() return out, nil } func (s *WavefrontSerializer) SerializeBatch(metrics []telegraf.Metric) ([]byte, error) { - var batch bytes.Buffer + s.mu.Lock() + s.scratch.Reset() for _, m := range metrics { - buf, err := s.Serialize(m) - if err != nil { - return nil, err - } - _, err = batch.Write(buf) - if err != nil { - return nil, err + s.serialize(&s.scratch, m) + } + out := s.scratch.Copy() + s.mu.Unlock() + return out, nil +} + +func findSourceTag(mTags map[string]string, s *WavefrontSerializer) string { + if src, ok := mTags["source"]; ok { + delete(mTags, "source") + return src + } + for _, src := range s.SourceOverride { + if source, ok := mTags[src]; ok { + delete(mTags, src) + mTags["telegraf_host"] = mTags["host"] + return source } } - return batch.Bytes(), nil + return mTags["host"] } func buildTags(mTags map[string]string, s *WavefrontSerializer) (string, map[string]string) { - // Remove all empty tags. for k, v := range mTags { if v == "" { delete(mTags, k) } } - - var source string - - if src, ok := mTags["source"]; ok { - source = src - delete(mTags, "source") - } else { - sourceTagFound := false - for _, src := range s.SourceOverride { - for k, v := range mTags { - if k == src { - source = v - mTags["telegraf_host"] = mTags["host"] - sourceTagFound = true - delete(mTags, k) - break - } - } - if sourceTagFound { - break - } - } - - if !sourceTagFound { - source = mTags["host"] - } - } - + source := findSourceTag(mTags, s) delete(mTags, "host") - return tagValueReplacer.Replace(source), mTags } -func buildValue(v interface{}, name string) (float64, error) { +func buildValue(v interface{}, name string) (val float64, valid bool) { switch p := v.(type) { case bool: if p { - return 1, nil - } else { - return 0, nil + return 1, true } + return 0, true case int64: - return float64(v.(int64)), nil + return float64(p), true case uint64: - return float64(v.(uint64)), nil + return float64(p), true case float64: - return v.(float64), nil + return p, true case string: - // return an error but don't log - return 0, fmt.Errorf("string type not supported") + // return false but don't log + return 0, false default: - // return an error and log a debug message - err := fmt.Errorf("unexpected type: %T, with value: %v, for :%s", v, v, name) - log.Printf("D! Serializer [wavefront] %s\n", err.Error()) - return 0, err + // log a debug message + log.Printf("D! Serializer [wavefront] unexpected type: %T, with value: %v, for :%s\n", + v, v, name) + return 0, false } } -func formatMetricPoint(metricPoint *wavefront.MetricPoint, s *WavefrontSerializer) []byte { - var buffer bytes.Buffer - buffer.WriteString("\"") - buffer.WriteString(metricPoint.Metric) - buffer.WriteString("\" ") - buffer.WriteString(strconv.FormatFloat(metricPoint.Value, 'f', 6, 64)) - buffer.WriteString(" ") - buffer.WriteString(strconv.FormatInt(metricPoint.Timestamp, 10)) - buffer.WriteString(" source=\"") - buffer.WriteString(metricPoint.Source) - buffer.WriteString("\"") +func formatMetricPoint(b *buffer, metricPoint *wavefront.MetricPoint, s *WavefrontSerializer) []byte { + b.WriteChar('"') + b.WriteString(metricPoint.Metric) + b.WriteString(`" `) + b.WriteFloat64(metricPoint.Value) + b.WriteChar(' ') + b.WriteUint64(uint64(metricPoint.Timestamp)) + b.WriteString(` source="`) + b.WriteString(metricPoint.Source) + b.WriteChar('"') for k, v := range metricPoint.Tags { - buffer.WriteString(" \"") + b.WriteString(` "`) if s.UseStrict { - buffer.WriteString(strictSanitizedChars.Replace(k)) + b.WriteString(strictSanitizedChars.Replace(k)) } else { - buffer.WriteString(sanitizedChars.Replace(k)) + b.WriteString(sanitizedChars.Replace(k)) } - buffer.WriteString("\"=\"") - buffer.WriteString(tagValueReplacer.Replace(v)) - buffer.WriteString("\"") + b.WriteString(`"="`) + b.WriteString(tagValueReplacer.Replace(v)) + b.WriteChar('"') } - buffer.WriteString("\n") + b.WriteChar('\n') - return buffer.Bytes() + return *b +} + +type buffer []byte + +func (b *buffer) Reset() { *b = (*b)[:0] } + +func (b *buffer) Copy() []byte { + p := make([]byte, len(*b)) + copy(p, *b) + return p +} + +func (b *buffer) WriteString(s string) { + *b = append(*b, s...) +} + +// This is named WriteChar instead of WriteByte because the 'stdmethods' check +// of 'go vet' wants WriteByte to have the signature: +// +// func (b *buffer) WriteByte(c byte) error { ... } +// +func (b *buffer) WriteChar(c byte) { + *b = append(*b, c) +} + +func (b *buffer) WriteUint64(val uint64) { + *b = strconv.AppendUint(*b, val, 10) +} + +func (b *buffer) WriteFloat64(val float64) { + *b = strconv.AppendFloat(*b, val, 'f', 6, 64) } diff --git a/plugins/serializers/wavefront/wavefront_test.go b/plugins/serializers/wavefront/wavefront_test.go index 3230ce515..548326e70 100755 --- a/plugins/serializers/wavefront/wavefront_test.go +++ b/plugins/serializers/wavefront/wavefront_test.go @@ -7,6 +7,7 @@ import ( "testing" "time" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/plugins/outputs/wavefront" "github.com/stretchr/testify/assert" @@ -132,7 +133,7 @@ func TestFormatMetricPoint(t *testing.T) { s := WavefrontSerializer{} for _, pt := range pointTests { - bout := formatMetricPoint(pt.ptIn, &s) + bout := formatMetricPoint(new(buffer), pt.ptIn, &s) sout := string(bout[:]) if sout != pt.out { t.Errorf("\nexpected\t%s\nreceived\t%s\n", pt.out, sout) @@ -160,7 +161,7 @@ func TestUseStrict(t *testing.T) { s := WavefrontSerializer{UseStrict: true} for _, pt := range pointTests { - bout := formatMetricPoint(pt.ptIn, &s) + bout := formatMetricPoint(new(buffer), pt.ptIn, &s) sout := string(bout[:]) if sout != pt.out { t.Errorf("\nexpected\t%s\nreceived\t%s\n", pt.out, sout) @@ -293,3 +294,47 @@ func TestSerializeMetricPrefix(t *testing.T) { expS := []string{fmt.Sprintf("\"telegraf.cpu.usage.idle\" 91.000000 %d source=\"realHost\" \"cpu\"=\"cpu0\"", now.UnixNano()/1000000000)} assert.Equal(t, expS, mS) } + +func benchmarkMetrics(b *testing.B) [4]telegraf.Metric { + b.Helper() + now := time.Now() + tags := map[string]string{ + "cpu": "cpu0", + "host": "realHost", + } + newMetric := func(v interface{}) telegraf.Metric { + fields := map[string]interface{}{ + "usage_idle": v, + } + m, err := metric.New("cpu", tags, fields, now) + if err != nil { + b.Fatal(err) + } + return m + } + return [4]telegraf.Metric{ + newMetric(91.5), + newMetric(91), + newMetric(true), + newMetric(false), + } +} + +func BenchmarkSerialize(b *testing.B) { + var s WavefrontSerializer + metrics := benchmarkMetrics(b) + b.ResetTimer() + for i := 0; i < b.N; i++ { + s.Serialize(metrics[i%len(metrics)]) + } +} + +func BenchmarkSerializeBatch(b *testing.B) { + var s WavefrontSerializer + m := benchmarkMetrics(b) + metrics := m[:] + b.ResetTimer() + for i := 0; i < b.N; i++ { + s.SerializeBatch(metrics) + } +} From 4b5df84b3d5f33ace1e81395d447df210e004762 Mon Sep 17 00:00:00 2001 From: guitoulefoux <51740847+guitoulefoux@users.noreply.github.com> Date: Thu, 13 Jun 2019 12:48:06 +0200 Subject: [PATCH 0911/1815] Fix subtable name in jolokia java.conf (#5985) --- plugins/inputs/jolokia2/examples/java.conf | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/plugins/inputs/jolokia2/examples/java.conf b/plugins/inputs/jolokia2/examples/java.conf index 32a68195c..aa9bc6852 100644 --- a/plugins/inputs/jolokia2/examples/java.conf +++ b/plugins/inputs/jolokia2/examples/java.conf @@ -23,17 +23,17 @@ mbean = "java.lang:name=G1 Young Generation,type=GarbageCollector" paths = ["LastGcInfo/duration", "LastGcInfo/GcThreadCount", "LastGcInfo/memoryUsageAfterGc"] - [[inputs.jolokia2_agent.metrics]] + [[inputs.jolokia2_agent.metric]] name = "java_threading" mbean = "java.lang:type=Threading" paths = ["TotalStartedThreadCount", "ThreadCount", "DaemonThreadCount", "PeakThreadCount"] - [[inputs.jolokia2_agent.metrics]] + [[inputs.jolokia2_agent.metric]] name = "java_class_loading" mbean = "java.lang:type=ClassLoading" paths = ["LoadedClassCount", "UnloadedClassCount", "TotalLoadedClassCount"] - [[inputs.jolokia2_agent.metrics]] + [[inputs.jolokia2_agent.metric]] name = "java_memory_pool" mbean = "java.lang:name=*,type=MemoryPool" paths = ["Usage", "PeakUsage", "CollectionUsage"] From ae1aee320754c595b9aea1a5754ac42b1d27758a Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 13 Jun 2019 13:33:26 -0700 Subject: [PATCH 0912/1815] Update changelog --- CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index aa51a4e32..6880430bc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,9 @@ ## v1.11.1 [unreleased] +#### Features + +- [#5842](https://github.com/influxdata/telegraf/pull/5842): Improve performance of wavefront serializer. + #### Bugfixes - [#5980](https://github.com/influxdata/telegraf/issues/5980): Cannot set mount_points option in disk input. From 58e6eb6f07e0b4976b5a7046045a4561bb25fc13 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 13 Jun 2019 10:27:17 -0700 Subject: [PATCH 0913/1815] Clarify replacement string behavior in regex processor --- plugins/processors/regex/README.md | 4 +++- plugins/processors/regex/regex.go | 6 ++++-- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/plugins/processors/regex/README.md b/plugins/processors/regex/README.md index c9eec037b..d37b1ea88 100644 --- a/plugins/processors/regex/README.md +++ b/plugins/processors/regex/README.md @@ -14,10 +14,12 @@ The `regex` plugin transforms tag and field values with regex pattern. If `resul key = "resp_code" ## Regular expression to match on a tag value pattern = "^(\\d)\\d\\d$" - ## Pattern for constructing a new value (${1} represents first subgroup) + ## Matches of the pattern will be replaced with this string. Use ${1} + ## notation to use the text of the first submatch. replacement = "${1}xx" [[processors.regex.fields]] + ## Field to change key = "request" ## All the power of the Go regular expressions available here ## For example, named subgroups diff --git a/plugins/processors/regex/regex.go b/plugins/processors/regex/regex.go index f73ed06b6..b922cd2d5 100644 --- a/plugins/processors/regex/regex.go +++ b/plugins/processors/regex/regex.go @@ -27,14 +27,16 @@ const sampleConfig = ` # key = "resp_code" # ## Regular expression to match on a tag value # pattern = "^(\\d)\\d\\d$" - # ## Pattern for constructing a new value (${1} represents first subgroup) + # ## Matches of the pattern will be replaced with this string. Use ${1} + # ## notation to use the text of the first submatch. # replacement = "${1}xx" # [[processors.regex.fields]] + # ## Field to change # key = "request" # ## All the power of the Go regular expressions available here # ## For example, named subgroups - # pattern = "^/api(?P/[\\w/]+)\\S*" + # pattern = "^/api(?P/[\\w/]+)\\S*" # replacement = "${method}" # ## If result_key is present, a new field will be created # ## instead of changing existing field From d260437318184d77abf1e5f73ee939cbbb47a78a Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 13 Jun 2019 13:30:38 -0700 Subject: [PATCH 0914/1815] Add example output from the collectd parser --- plugins/parsers/collectd/README.md | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/plugins/parsers/collectd/README.md b/plugins/parsers/collectd/README.md index 06f14d6d4..cc7daa4f6 100644 --- a/plugins/parsers/collectd/README.md +++ b/plugins/parsers/collectd/README.md @@ -20,8 +20,8 @@ You can also change the path to the typesdb or add additional typesdb using ### Configuration ```toml -[[inputs.file]] - files = ["example"] +[[inputs.socket_listener]] + service_address = "udp://:25826" ## Data format to consume. ## Each data format has its own unique set of configuration options, read @@ -42,3 +42,16 @@ You can also change the path to the typesdb or add additional typesdb using ## "split" is the default behavior for backward compatability with previous versions of influxdb. collectd_parse_multivalue = "split" ``` + +### Example Output + +``` +memory,type=memory,type_instance=buffered value=2520051712 1560455990829955922 +memory,type=memory,type_instance=used value=3710791680 1560455990829955922 +memory,type=memory,type_instance=buffered value=2520047616 1560455980830417318 +memory,type=memory,type_instance=cached value=9472626688 1560455980830417318 +memory,type=memory,type_instance=slab_recl value=2088894464 1560455980830417318 +memory,type=memory,type_instance=slab_unrecl value=146984960 1560455980830417318 +memory,type=memory,type_instance=free value=2978258944 1560455980830417318 +memory,type=memory,type_instance=used value=3707047936 1560455980830417318 +``` From 4b6e791908daa744ffe31898b08b2dd4497a17c8 Mon Sep 17 00:00:00 2001 From: Marc Venturini Date: Sat, 15 Jun 2019 02:27:24 +0800 Subject: [PATCH 0915/1815] Fix sensor read error stops reporting of all sensors in temp input (#5941) --- CHANGELOG.md | 1 + Gopkg.lock | 6 +++--- plugins/inputs/procstat/README.md | 5 ++--- plugins/inputs/procstat/procstat.go | 1 - plugins/inputs/system/ps.go | 9 ++++++++- plugins/parsers/influx/machine_test.go | 4 ++-- plugins/parsers/influx/parser_test.go | 3 +-- plugins/serializers/influx/influx_test.go | 3 +-- 8 files changed, 18 insertions(+), 14 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6880430bc..5e05d835e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -75,6 +75,7 @@ #### Bugfixes +- [#5692](https://github.com/influxdata/telegraf/pull/5692): Temperature input plugin stops working when WiFi is turned off. - [#5631](https://github.com/influxdata/telegraf/pull/5631): Create Windows service only when specified or in service manager. - [#5730](https://github.com/influxdata/telegraf/pull/5730): Don't start telegraf when stale pidfile found. - [#5477](https://github.com/influxdata/telegraf/pull/5477): Support Minecraft server 1.13 and newer in minecraft input. diff --git a/Gopkg.lock b/Gopkg.lock index 1f52f0087..8f3de4211 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -1031,7 +1031,7 @@ version = "v1.2.0" [[projects]] - digest = "1:d77a85cf43b70ae61fa2543d402d782b40dca0f5f41413839b5f916782b0fab9" + digest = "1:8b478b1d29180a608666c36ea8dc160000e3466abb5a5354e571429679e972c0" name = "github.com/shirou/gopsutil" packages = [ "cpu", @@ -1044,8 +1044,8 @@ "process", ] pruneopts = "" - revision = "6c6abd6d1666d6b27f1c261e0f850441ba22aa3a" - version = "v2.19.02" + revision = "5335e3fd506df4cb63de0a8239c7461d23063be6" + version = "v2.19.05" [[projects]] branch = "master" diff --git a/plugins/inputs/procstat/README.md b/plugins/inputs/procstat/README.md index 277ec2c56..8ce834a70 100644 --- a/plugins/inputs/procstat/README.md +++ b/plugins/inputs/procstat/README.md @@ -96,7 +96,6 @@ implemented as a WMI query. The pattern allows fuzzy matching using only - cpu_time_nice (float) - cpu_time_soft_irq (float) - cpu_time_steal (float) - - cpu_time_stolen (float) - cpu_time_system (float) - cpu_time_user (float) - cpu_usage (float) @@ -164,6 +163,6 @@ implemented as a WMI query. The pattern allows fuzzy matching using only ### Example Output: ``` -procstat,pidfile=/var/run/lxc/dnsmasq.pid,process_name=dnsmasq rlimit_file_locks_soft=2147483647i,rlimit_signals_pending_hard=1758i,voluntary_context_switches=478i,read_bytes=307200i,cpu_time_user=0.01,cpu_time_guest=0,memory_swap=0i,memory_locked=0i,rlimit_num_fds_hard=4096i,rlimit_nice_priority_hard=0i,num_fds=11i,involuntary_context_switches=20i,read_count=23i,memory_rss=1388544i,rlimit_memory_rss_soft=2147483647i,rlimit_memory_rss_hard=2147483647i,nice_priority=20i,rlimit_cpu_time_hard=2147483647i,cpu_time=0i,write_bytes=0i,cpu_time_idle=0,cpu_time_nice=0,memory_data=229376i,memory_stack=135168i,rlimit_cpu_time_soft=2147483647i,rlimit_memory_data_hard=2147483647i,rlimit_memory_locked_hard=65536i,rlimit_signals_pending_soft=1758i,write_count=11i,cpu_time_iowait=0,cpu_time_steal=0,cpu_time_stolen=0,rlimit_memory_stack_soft=8388608i,cpu_time_system=0.02,cpu_time_guest_nice=0,rlimit_memory_locked_soft=65536i,rlimit_memory_vms_soft=2147483647i,rlimit_file_locks_hard=2147483647i,rlimit_realtime_priority_hard=0i,pid=828i,num_threads=1i,cpu_time_soft_irq=0,rlimit_memory_vms_hard=2147483647i,rlimit_realtime_priority_soft=0i,memory_vms=15884288i,rlimit_memory_stack_hard=2147483647i,cpu_time_irq=0,rlimit_memory_data_soft=2147483647i,rlimit_num_fds_soft=1024i,signals_pending=0i,rlimit_nice_priority_soft=0i,realtime_priority=0i -procstat,exe=influxd,process_name=influxd rlimit_num_fds_hard=16384i,rlimit_signals_pending_hard=1758i,realtime_priority=0i,rlimit_memory_vms_hard=2147483647i,rlimit_signals_pending_soft=1758i,cpu_time_stolen=0,rlimit_memory_stack_hard=2147483647i,rlimit_realtime_priority_hard=0i,cpu_time=0i,pid=500i,voluntary_context_switches=975i,cpu_time_idle=0,memory_rss=3072000i,memory_locked=0i,rlimit_nice_priority_soft=0i,signals_pending=0i,nice_priority=20i,read_bytes=823296i,cpu_time_soft_irq=0,rlimit_memory_data_hard=2147483647i,rlimit_memory_locked_soft=65536i,write_count=8i,cpu_time_irq=0,memory_vms=33501184i,rlimit_memory_stack_soft=8388608i,cpu_time_iowait=0,rlimit_memory_vms_soft=2147483647i,rlimit_nice_priority_hard=0i,num_fds=29i,memory_data=229376i,rlimit_cpu_time_soft=2147483647i,rlimit_file_locks_soft=2147483647i,num_threads=1i,write_bytes=0i,cpu_time_steal=0,rlimit_memory_rss_hard=2147483647i,cpu_time_guest=0,cpu_time_guest_nice=0,cpu_usage=0,rlimit_memory_locked_hard=65536i,rlimit_file_locks_hard=2147483647i,involuntary_context_switches=38i,read_count=16851i,memory_swap=0i,rlimit_memory_data_soft=2147483647i,cpu_time_user=0.11,rlimit_cpu_time_hard=2147483647i,rlimit_num_fds_soft=16384i,rlimit_realtime_priority_soft=0i,cpu_time_system=0.27,cpu_time_nice=0,memory_stack=135168i,rlimit_memory_rss_soft=2147483647i +procstat,pidfile=/var/run/lxc/dnsmasq.pid,process_name=dnsmasq rlimit_file_locks_soft=2147483647i,rlimit_signals_pending_hard=1758i,voluntary_context_switches=478i,read_bytes=307200i,cpu_time_user=0.01,cpu_time_guest=0,memory_swap=0i,memory_locked=0i,rlimit_num_fds_hard=4096i,rlimit_nice_priority_hard=0i,num_fds=11i,involuntary_context_switches=20i,read_count=23i,memory_rss=1388544i,rlimit_memory_rss_soft=2147483647i,rlimit_memory_rss_hard=2147483647i,nice_priority=20i,rlimit_cpu_time_hard=2147483647i,cpu_time=0i,write_bytes=0i,cpu_time_idle=0,cpu_time_nice=0,memory_data=229376i,memory_stack=135168i,rlimit_cpu_time_soft=2147483647i,rlimit_memory_data_hard=2147483647i,rlimit_memory_locked_hard=65536i,rlimit_signals_pending_soft=1758i,write_count=11i,cpu_time_iowait=0,cpu_time_steal=0,rlimit_memory_stack_soft=8388608i,cpu_time_system=0.02,cpu_time_guest_nice=0,rlimit_memory_locked_soft=65536i,rlimit_memory_vms_soft=2147483647i,rlimit_file_locks_hard=2147483647i,rlimit_realtime_priority_hard=0i,pid=828i,num_threads=1i,cpu_time_soft_irq=0,rlimit_memory_vms_hard=2147483647i,rlimit_realtime_priority_soft=0i,memory_vms=15884288i,rlimit_memory_stack_hard=2147483647i,cpu_time_irq=0,rlimit_memory_data_soft=2147483647i,rlimit_num_fds_soft=1024i,signals_pending=0i,rlimit_nice_priority_soft=0i,realtime_priority=0i +procstat,exe=influxd,process_name=influxd rlimit_num_fds_hard=16384i,rlimit_signals_pending_hard=1758i,realtime_priority=0i,rlimit_memory_vms_hard=2147483647i,rlimit_signals_pending_soft=1758i,rlimit_memory_stack_hard=2147483647i,rlimit_realtime_priority_hard=0i,cpu_time=0i,pid=500i,voluntary_context_switches=975i,cpu_time_idle=0,memory_rss=3072000i,memory_locked=0i,rlimit_nice_priority_soft=0i,signals_pending=0i,nice_priority=20i,read_bytes=823296i,cpu_time_soft_irq=0,rlimit_memory_data_hard=2147483647i,rlimit_memory_locked_soft=65536i,write_count=8i,cpu_time_irq=0,memory_vms=33501184i,rlimit_memory_stack_soft=8388608i,cpu_time_iowait=0,rlimit_memory_vms_soft=2147483647i,rlimit_nice_priority_hard=0i,num_fds=29i,memory_data=229376i,rlimit_cpu_time_soft=2147483647i,rlimit_file_locks_soft=2147483647i,num_threads=1i,write_bytes=0i,cpu_time_steal=0,rlimit_memory_rss_hard=2147483647i,cpu_time_guest=0,cpu_time_guest_nice=0,cpu_usage=0,rlimit_memory_locked_hard=65536i,rlimit_file_locks_hard=2147483647i,involuntary_context_switches=38i,read_count=16851i,memory_swap=0i,rlimit_memory_data_soft=2147483647i,cpu_time_user=0.11,rlimit_cpu_time_hard=2147483647i,rlimit_num_fds_soft=16384i,rlimit_realtime_priority_soft=0i,cpu_time_system=0.27,cpu_time_nice=0,memory_stack=135168i,rlimit_memory_rss_soft=2147483647i ``` diff --git a/plugins/inputs/procstat/procstat.go b/plugins/inputs/procstat/procstat.go index c36970421..a31f4340e 100644 --- a/plugins/inputs/procstat/procstat.go +++ b/plugins/inputs/procstat/procstat.go @@ -226,7 +226,6 @@ func (p *Procstat) addMetric(proc Process, acc telegraf.Accumulator) { fields[prefix+"cpu_time_irq"] = cpu_time.Irq fields[prefix+"cpu_time_soft_irq"] = cpu_time.Softirq fields[prefix+"cpu_time_steal"] = cpu_time.Steal - fields[prefix+"cpu_time_stolen"] = cpu_time.Stolen fields[prefix+"cpu_time_guest"] = cpu_time.Guest fields[prefix+"cpu_time_guest_nice"] = cpu_time.GuestNice } diff --git a/plugins/inputs/system/ps.go b/plugins/inputs/system/ps.go index 256aca059..824dbe446 100644 --- a/plugins/inputs/system/ps.go +++ b/plugins/inputs/system/ps.go @@ -171,7 +171,14 @@ func (s *SystemPS) SwapStat() (*mem.SwapMemoryStat, error) { } func (s *SystemPS) Temperature() ([]host.TemperatureStat, error) { - return host.SensorsTemperatures() + temp, err := host.SensorsTemperatures() + if err != nil { + _, ok := err.(*host.Warnings) + if !ok { + return temp, err + } + } + return temp, nil } func (s *SystemPSDisk) Partitions(all bool) ([]disk.PartitionStat, error) { diff --git a/plugins/parsers/influx/machine_test.go b/plugins/parsers/influx/machine_test.go index a1c921ef1..725634ae8 100644 --- a/plugins/parsers/influx/machine_test.go +++ b/plugins/parsers/influx/machine_test.go @@ -1773,7 +1773,7 @@ func BenchmarkMachine(b *testing.B) { } func TestMachineProcstat(t *testing.T) { - input := []byte("procstat,exe=bash,process_name=bash voluntary_context_switches=42i,memory_rss=5103616i,rlimit_memory_data_hard=2147483647i,cpu_time_user=0.02,rlimit_file_locks_soft=2147483647i,pid=29417i,cpu_time_nice=0,rlimit_memory_locked_soft=65536i,read_count=259i,rlimit_memory_vms_hard=2147483647i,memory_swap=0i,rlimit_num_fds_soft=1024i,rlimit_nice_priority_hard=0i,cpu_time_soft_irq=0,cpu_time=0i,rlimit_memory_locked_hard=65536i,realtime_priority=0i,signals_pending=0i,nice_priority=20i,cpu_time_idle=0,memory_stack=139264i,memory_locked=0i,rlimit_memory_stack_soft=8388608i,cpu_time_iowait=0,cpu_time_guest=0,cpu_time_guest_nice=0,rlimit_memory_data_soft=2147483647i,read_bytes=0i,rlimit_cpu_time_soft=2147483647i,involuntary_context_switches=2i,write_bytes=106496i,cpu_time_system=0,cpu_time_irq=0,cpu_usage=0,memory_vms=21659648i,memory_data=1576960i,rlimit_memory_stack_hard=2147483647i,num_threads=1i,cpu_time_stolen=0,rlimit_memory_rss_soft=2147483647i,rlimit_realtime_priority_soft=0i,num_fds=4i,write_count=35i,rlimit_signals_pending_soft=78994i,cpu_time_steal=0,rlimit_num_fds_hard=4096i,rlimit_file_locks_hard=2147483647i,rlimit_cpu_time_hard=2147483647i,rlimit_signals_pending_hard=78994i,rlimit_nice_priority_soft=0i,rlimit_memory_rss_hard=2147483647i,rlimit_memory_vms_soft=2147483647i,rlimit_realtime_priority_hard=0i 1517620624000000000") + input := []byte("procstat,exe=bash,process_name=bash voluntary_context_switches=42i,memory_rss=5103616i,rlimit_memory_data_hard=2147483647i,cpu_time_user=0.02,rlimit_file_locks_soft=2147483647i,pid=29417i,cpu_time_nice=0,rlimit_memory_locked_soft=65536i,read_count=259i,rlimit_memory_vms_hard=2147483647i,memory_swap=0i,rlimit_num_fds_soft=1024i,rlimit_nice_priority_hard=0i,cpu_time_soft_irq=0,cpu_time=0i,rlimit_memory_locked_hard=65536i,realtime_priority=0i,signals_pending=0i,nice_priority=20i,cpu_time_idle=0,memory_stack=139264i,memory_locked=0i,rlimit_memory_stack_soft=8388608i,cpu_time_iowait=0,cpu_time_guest=0,cpu_time_guest_nice=0,rlimit_memory_data_soft=2147483647i,read_bytes=0i,rlimit_cpu_time_soft=2147483647i,involuntary_context_switches=2i,write_bytes=106496i,cpu_time_system=0,cpu_time_irq=0,cpu_usage=0,memory_vms=21659648i,memory_data=1576960i,rlimit_memory_stack_hard=2147483647i,num_threads=1i,rlimit_memory_rss_soft=2147483647i,rlimit_realtime_priority_soft=0i,num_fds=4i,write_count=35i,rlimit_signals_pending_soft=78994i,cpu_time_steal=0,rlimit_num_fds_hard=4096i,rlimit_file_locks_hard=2147483647i,rlimit_cpu_time_hard=2147483647i,rlimit_signals_pending_hard=78994i,rlimit_nice_priority_soft=0i,rlimit_memory_rss_hard=2147483647i,rlimit_memory_vms_soft=2147483647i,rlimit_realtime_priority_hard=0i 1517620624000000000") handler := &TestingHandler{} fsm := influx.NewMachine(handler) fsm.SetData(input) @@ -1786,7 +1786,7 @@ func TestMachineProcstat(t *testing.T) { } func BenchmarkMachineProcstat(b *testing.B) { - input := []byte("procstat,exe=bash,process_name=bash voluntary_context_switches=42i,memory_rss=5103616i,rlimit_memory_data_hard=2147483647i,cpu_time_user=0.02,rlimit_file_locks_soft=2147483647i,pid=29417i,cpu_time_nice=0,rlimit_memory_locked_soft=65536i,read_count=259i,rlimit_memory_vms_hard=2147483647i,memory_swap=0i,rlimit_num_fds_soft=1024i,rlimit_nice_priority_hard=0i,cpu_time_soft_irq=0,cpu_time=0i,rlimit_memory_locked_hard=65536i,realtime_priority=0i,signals_pending=0i,nice_priority=20i,cpu_time_idle=0,memory_stack=139264i,memory_locked=0i,rlimit_memory_stack_soft=8388608i,cpu_time_iowait=0,cpu_time_guest=0,cpu_time_guest_nice=0,rlimit_memory_data_soft=2147483647i,read_bytes=0i,rlimit_cpu_time_soft=2147483647i,involuntary_context_switches=2i,write_bytes=106496i,cpu_time_system=0,cpu_time_irq=0,cpu_usage=0,memory_vms=21659648i,memory_data=1576960i,rlimit_memory_stack_hard=2147483647i,num_threads=1i,cpu_time_stolen=0,rlimit_memory_rss_soft=2147483647i,rlimit_realtime_priority_soft=0i,num_fds=4i,write_count=35i,rlimit_signals_pending_soft=78994i,cpu_time_steal=0,rlimit_num_fds_hard=4096i,rlimit_file_locks_hard=2147483647i,rlimit_cpu_time_hard=2147483647i,rlimit_signals_pending_hard=78994i,rlimit_nice_priority_soft=0i,rlimit_memory_rss_hard=2147483647i,rlimit_memory_vms_soft=2147483647i,rlimit_realtime_priority_hard=0i 1517620624000000000") + input := []byte("procstat,exe=bash,process_name=bash voluntary_context_switches=42i,memory_rss=5103616i,rlimit_memory_data_hard=2147483647i,cpu_time_user=0.02,rlimit_file_locks_soft=2147483647i,pid=29417i,cpu_time_nice=0,rlimit_memory_locked_soft=65536i,read_count=259i,rlimit_memory_vms_hard=2147483647i,memory_swap=0i,rlimit_num_fds_soft=1024i,rlimit_nice_priority_hard=0i,cpu_time_soft_irq=0,cpu_time=0i,rlimit_memory_locked_hard=65536i,realtime_priority=0i,signals_pending=0i,nice_priority=20i,cpu_time_idle=0,memory_stack=139264i,memory_locked=0i,rlimit_memory_stack_soft=8388608i,cpu_time_iowait=0,cpu_time_guest=0,cpu_time_guest_nice=0,rlimit_memory_data_soft=2147483647i,read_bytes=0i,rlimit_cpu_time_soft=2147483647i,involuntary_context_switches=2i,write_bytes=106496i,cpu_time_system=0,cpu_time_irq=0,cpu_usage=0,memory_vms=21659648i,memory_data=1576960i,rlimit_memory_stack_hard=2147483647i,num_threads=1i,rlimit_memory_rss_soft=2147483647i,rlimit_realtime_priority_soft=0i,num_fds=4i,write_count=35i,rlimit_signals_pending_soft=78994i,cpu_time_steal=0,rlimit_num_fds_hard=4096i,rlimit_file_locks_hard=2147483647i,rlimit_cpu_time_hard=2147483647i,rlimit_signals_pending_hard=78994i,rlimit_nice_priority_soft=0i,rlimit_memory_rss_hard=2147483647i,rlimit_memory_vms_soft=2147483647i,rlimit_realtime_priority_hard=0i 1517620624000000000") handler := &BenchmarkingHandler{} fsm := influx.NewMachine(handler) for n := 0; n < b.N; n++ { diff --git a/plugins/parsers/influx/parser_test.go b/plugins/parsers/influx/parser_test.go index 4d30eeb0b..50ab1e10f 100644 --- a/plugins/parsers/influx/parser_test.go +++ b/plugins/parsers/influx/parser_test.go @@ -576,7 +576,7 @@ var ptests = []struct { }, { name: "procstat", - input: []byte("procstat,exe=bash,process_name=bash voluntary_context_switches=42i,memory_rss=5103616i,rlimit_memory_data_hard=2147483647i,cpu_time_user=0.02,rlimit_file_locks_soft=2147483647i,pid=29417i,cpu_time_nice=0,rlimit_memory_locked_soft=65536i,read_count=259i,rlimit_memory_vms_hard=2147483647i,memory_swap=0i,rlimit_num_fds_soft=1024i,rlimit_nice_priority_hard=0i,cpu_time_soft_irq=0,cpu_time=0i,rlimit_memory_locked_hard=65536i,realtime_priority=0i,signals_pending=0i,nice_priority=20i,cpu_time_idle=0,memory_stack=139264i,memory_locked=0i,rlimit_memory_stack_soft=8388608i,cpu_time_iowait=0,cpu_time_guest=0,cpu_time_guest_nice=0,rlimit_memory_data_soft=2147483647i,read_bytes=0i,rlimit_cpu_time_soft=2147483647i,involuntary_context_switches=2i,write_bytes=106496i,cpu_time_system=0,cpu_time_irq=0,cpu_usage=0,memory_vms=21659648i,memory_data=1576960i,rlimit_memory_stack_hard=2147483647i,num_threads=1i,cpu_time_stolen=0,rlimit_memory_rss_soft=2147483647i,rlimit_realtime_priority_soft=0i,num_fds=4i,write_count=35i,rlimit_signals_pending_soft=78994i,cpu_time_steal=0,rlimit_num_fds_hard=4096i,rlimit_file_locks_hard=2147483647i,rlimit_cpu_time_hard=2147483647i,rlimit_signals_pending_hard=78994i,rlimit_nice_priority_soft=0i,rlimit_memory_rss_hard=2147483647i,rlimit_memory_vms_soft=2147483647i,rlimit_realtime_priority_hard=0i 1517620624000000000"), + input: []byte("procstat,exe=bash,process_name=bash voluntary_context_switches=42i,memory_rss=5103616i,rlimit_memory_data_hard=2147483647i,cpu_time_user=0.02,rlimit_file_locks_soft=2147483647i,pid=29417i,cpu_time_nice=0,rlimit_memory_locked_soft=65536i,read_count=259i,rlimit_memory_vms_hard=2147483647i,memory_swap=0i,rlimit_num_fds_soft=1024i,rlimit_nice_priority_hard=0i,cpu_time_soft_irq=0,cpu_time=0i,rlimit_memory_locked_hard=65536i,realtime_priority=0i,signals_pending=0i,nice_priority=20i,cpu_time_idle=0,memory_stack=139264i,memory_locked=0i,rlimit_memory_stack_soft=8388608i,cpu_time_iowait=0,cpu_time_guest=0,cpu_time_guest_nice=0,rlimit_memory_data_soft=2147483647i,read_bytes=0i,rlimit_cpu_time_soft=2147483647i,involuntary_context_switches=2i,write_bytes=106496i,cpu_time_system=0,cpu_time_irq=0,cpu_usage=0,memory_vms=21659648i,memory_data=1576960i,rlimit_memory_stack_hard=2147483647i,num_threads=1i,rlimit_memory_rss_soft=2147483647i,rlimit_realtime_priority_soft=0i,num_fds=4i,write_count=35i,rlimit_signals_pending_soft=78994i,cpu_time_steal=0,rlimit_num_fds_hard=4096i,rlimit_file_locks_hard=2147483647i,rlimit_cpu_time_hard=2147483647i,rlimit_signals_pending_hard=78994i,rlimit_nice_priority_soft=0i,rlimit_memory_rss_hard=2147483647i,rlimit_memory_vms_soft=2147483647i,rlimit_realtime_priority_hard=0i 1517620624000000000"), metrics: []telegraf.Metric{ Metric( metric.New( @@ -595,7 +595,6 @@ var ptests = []struct { "cpu_time_nice": float64(0), "cpu_time_soft_irq": float64(0), "cpu_time_steal": float64(0), - "cpu_time_stolen": float64(0), "cpu_time_system": float64(0), "cpu_time_user": float64(0.02), "cpu_usage": float64(0), diff --git a/plugins/serializers/influx/influx_test.go b/plugins/serializers/influx/influx_test.go index 8102bd973..eae3d755c 100644 --- a/plugins/serializers/influx/influx_test.go +++ b/plugins/serializers/influx/influx_test.go @@ -385,7 +385,6 @@ var tests = []struct { "cpu_time_nice": float64(0), "cpu_time_soft_irq": float64(0), "cpu_time_steal": float64(0), - "cpu_time_stolen": float64(0), "cpu_time_system": float64(0), "cpu_time_user": float64(0.02), "cpu_usage": float64(0), @@ -433,7 +432,7 @@ var tests = []struct { time.Unix(0, 1517620624000000000), ), ), - output: []byte("procstat,exe=bash,process_name=bash cpu_time=0i,cpu_time_guest=0,cpu_time_guest_nice=0,cpu_time_idle=0,cpu_time_iowait=0,cpu_time_irq=0,cpu_time_nice=0,cpu_time_soft_irq=0,cpu_time_steal=0,cpu_time_stolen=0,cpu_time_system=0,cpu_time_user=0.02,cpu_usage=0,involuntary_context_switches=2i,memory_data=1576960i,memory_locked=0i,memory_rss=5103616i,memory_stack=139264i,memory_swap=0i,memory_vms=21659648i,nice_priority=20i,num_fds=4i,num_threads=1i,pid=29417i,read_bytes=0i,read_count=259i,realtime_priority=0i,rlimit_cpu_time_hard=2147483647i,rlimit_cpu_time_soft=2147483647i,rlimit_file_locks_hard=2147483647i,rlimit_file_locks_soft=2147483647i,rlimit_memory_data_hard=2147483647i,rlimit_memory_data_soft=2147483647i,rlimit_memory_locked_hard=65536i,rlimit_memory_locked_soft=65536i,rlimit_memory_rss_hard=2147483647i,rlimit_memory_rss_soft=2147483647i,rlimit_memory_stack_hard=2147483647i,rlimit_memory_stack_soft=8388608i,rlimit_memory_vms_hard=2147483647i,rlimit_memory_vms_soft=2147483647i,rlimit_nice_priority_hard=0i,rlimit_nice_priority_soft=0i,rlimit_num_fds_hard=4096i,rlimit_num_fds_soft=1024i,rlimit_realtime_priority_hard=0i,rlimit_realtime_priority_soft=0i,rlimit_signals_pending_hard=78994i,rlimit_signals_pending_soft=78994i,signals_pending=0i,voluntary_context_switches=42i,write_bytes=106496i,write_count=35i 1517620624000000000\n"), + output: []byte("procstat,exe=bash,process_name=bash cpu_time=0i,cpu_time_guest=0,cpu_time_guest_nice=0,cpu_time_idle=0,cpu_time_iowait=0,cpu_time_irq=0,cpu_time_nice=0,cpu_time_soft_irq=0,cpu_time_steal=0,cpu_time_system=0,cpu_time_user=0.02,cpu_usage=0,involuntary_context_switches=2i,memory_data=1576960i,memory_locked=0i,memory_rss=5103616i,memory_stack=139264i,memory_swap=0i,memory_vms=21659648i,nice_priority=20i,num_fds=4i,num_threads=1i,pid=29417i,read_bytes=0i,read_count=259i,realtime_priority=0i,rlimit_cpu_time_hard=2147483647i,rlimit_cpu_time_soft=2147483647i,rlimit_file_locks_hard=2147483647i,rlimit_file_locks_soft=2147483647i,rlimit_memory_data_hard=2147483647i,rlimit_memory_data_soft=2147483647i,rlimit_memory_locked_hard=65536i,rlimit_memory_locked_soft=65536i,rlimit_memory_rss_hard=2147483647i,rlimit_memory_rss_soft=2147483647i,rlimit_memory_stack_hard=2147483647i,rlimit_memory_stack_soft=8388608i,rlimit_memory_vms_hard=2147483647i,rlimit_memory_vms_soft=2147483647i,rlimit_nice_priority_hard=0i,rlimit_nice_priority_soft=0i,rlimit_num_fds_hard=4096i,rlimit_num_fds_soft=1024i,rlimit_realtime_priority_hard=0i,rlimit_realtime_priority_soft=0i,rlimit_signals_pending_hard=78994i,rlimit_signals_pending_soft=78994i,signals_pending=0i,voluntary_context_switches=42i,write_bytes=106496i,write_count=35i 1517620624000000000\n"), }, } From 4cfd70b6c011661a5a08bc59f57e60606900065c Mon Sep 17 00:00:00 2001 From: Steven Barth Date: Fri, 14 Jun 2019 20:29:06 +0200 Subject: [PATCH 0916/1815] Omit keys when creating measurement names for GNMI telemetry (#5986) --- .../cisco_telemetry_gnmi.go | 32 ++++++++++++------- 1 file changed, 21 insertions(+), 11 deletions(-) diff --git a/plugins/inputs/cisco_telemetry_gnmi/cisco_telemetry_gnmi.go b/plugins/inputs/cisco_telemetry_gnmi/cisco_telemetry_gnmi.go index bb8a0dd7c..9ab920bed 100644 --- a/plugins/inputs/cisco_telemetry_gnmi/cisco_telemetry_gnmi.go +++ b/plugins/inputs/cisco_telemetry_gnmi/cisco_telemetry_gnmi.go @@ -101,12 +101,16 @@ func (c *CiscoTelemetryGNMI) Start(acc telegraf.Accumulator) error { // Invert explicit alias list and prefill subscription names c.aliases = make(map[string]string, len(c.Subscriptions)+len(c.Aliases)) for _, subscription := range c.Subscriptions { - path := subscription.Path - if len(subscription.Origin) > 0 { - path = subscription.Origin + ":" + path + // Build the subscription path without keys + gnmiPath, err := parsePath(subscription.Origin, subscription.Path, "") + if err != nil { + return err } + path, _ := c.handlePath(gnmiPath, nil, "") name := subscription.Name + + // If the user didn't provide a measurement name, use last path element if len(name) == 0 { name = path[strings.LastIndexByte(path, '/')+1:] } @@ -269,7 +273,10 @@ func (c *CiscoTelemetryGNMI) handleSubscribeResponse(address string, reply *gnmi // Group metrics for key, val := range fields { - grouper.Add(name, tags, timestamp, key[len(aliasPath)+1:], val) + if len(aliasPath) > 0 { + key = key[len(aliasPath)+1:] + } + grouper.Add(name, tags, timestamp, key, val) } lastAliasPath = aliasPath @@ -347,14 +354,17 @@ func (c *CiscoTelemetryGNMI) handlePath(path *gnmi.Path, tags map[string]string, aliasPath = name } - for key, val := range elem.Key { - key = strings.Replace(key, "-", "_", -1) + if tags != nil { + for key, val := range elem.Key { + key = strings.Replace(key, "-", "_", -1) + + // Use short-form of key if possible + if _, exists := tags[key]; exists { + tags[name+"/"+key] = val + } else { + tags[key] = val + } - // Use short-form of key if possible - if _, exists := tags[key]; exists { - tags[name+"/"+key] = val - } else { - tags[key] = val } } } From 63916ae1cf34c3ac6cb492027cb5161983aff50a Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 14 Jun 2019 11:29:58 -0700 Subject: [PATCH 0917/1815] Add default url for http output (#5976) --- plugins/outputs/http/README.md | 10 +++++----- plugins/outputs/http/http.go | 15 ++++++++++----- 2 files changed, 15 insertions(+), 10 deletions(-) diff --git a/plugins/outputs/http/README.md b/plugins/outputs/http/README.md index 5697b6030..0229c0e6a 100644 --- a/plugins/outputs/http/README.md +++ b/plugins/outputs/http/README.md @@ -9,7 +9,7 @@ data formats. For data_formats that support batching, metrics are sent in batch # A plugin that can transmit metrics over HTTP [[outputs.http]] ## URL is the address to send metrics to - url = "http://127.0.0.1:8080/metric" + url = "http://127.0.0.1:8080/telegraf" ## Timeout for HTTP message # timeout = "5s" @@ -40,12 +40,12 @@ data formats. For data_formats that support batching, metrics are sent in batch ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md # data_format = "influx" + ## HTTP Content-Encoding for write request body, can be set to "gzip" to + ## compress body or "identity" to apply no encoding. + # content_encoding = "identity" + ## Additional HTTP headers # [outputs.http.headers] # # Should be set manually to "application/json" for json data_format # Content-Type = "text/plain; charset=utf-8" - - ## HTTP Content-Encoding for write request body, can be set to "gzip" to - ## compress body or "identity" to apply no encoding. - # content_encoding = "identity" ``` diff --git a/plugins/outputs/http/http.go b/plugins/outputs/http/http.go index 8ef77976f..1967b6ef9 100644 --- a/plugins/outputs/http/http.go +++ b/plugins/outputs/http/http.go @@ -19,9 +19,13 @@ import ( "golang.org/x/oauth2/clientcredentials" ) +const ( + defaultURL = "http://127.0.0.1:8080/telegraf" +) + var sampleConfig = ` ## URL is the address to send metrics to - url = "http://127.0.0.1:8080/metric" + url = "http://127.0.0.1:8080/telegraf" ## Timeout for HTTP message # timeout = "5s" @@ -52,14 +56,14 @@ var sampleConfig = ` ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md # data_format = "influx" + ## HTTP Content-Encoding for write request body, can be set to "gzip" to + ## compress body or "identity" to apply no encoding. + # content_encoding = "identity" + ## Additional HTTP headers # [outputs.http.headers] # # Should be set manually to "application/json" for json data_format # Content-Type = "text/plain; charset=utf-8" - - ## HTTP Content-Encoding for write request body, can be set to "gzip" to - ## compress body or "identity" to apply no encoding. - # content_encoding = "identity" ` const ( @@ -218,6 +222,7 @@ func init() { return &HTTP{ Timeout: internal.Duration{Duration: defaultClientTimeout}, Method: defaultMethod, + URL: defaultURL, } }) } From d38f7600f805fec869b2280e6bc13a6aabc08f9b Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 14 Jun 2019 11:36:18 -0700 Subject: [PATCH 0918/1815] Update changelog --- CHANGELOG.md | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5e05d835e..cbd58710a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,4 @@ -## v1.11.1 [unreleased] +## v1.12 [unreleased] #### Features @@ -6,7 +6,14 @@ #### Bugfixes +- [#5692](https://github.com/influxdata/telegraf/issues/5692): Fix sensor read error stops reporting of all sensors in temp input. + +## v1.11.1 [unreleased] + +#### Bugfixes + - [#5980](https://github.com/influxdata/telegraf/issues/5980): Cannot set mount_points option in disk input. +- [#5983](https://github.com/influxdata/telegraf/issues/5983): Omit keys when creating measurement names for GNMI telemetry. ## v1.11 [2019-06-11] From cc2f3b29e1d86b7305919aa818997e8e736cc069 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jos=C3=A9=20Moreno=20Hanshing?= Date: Fri, 14 Jun 2019 15:00:14 -0400 Subject: [PATCH 0919/1815] Don't consider pid of 0 when using systemd lookup in procstat (#5972) --- plugins/inputs/procstat/procstat.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/inputs/procstat/procstat.go b/plugins/inputs/procstat/procstat.go index a31f4340e..5bbb11d45 100644 --- a/plugins/inputs/procstat/procstat.go +++ b/plugins/inputs/procstat/procstat.go @@ -397,7 +397,7 @@ func (p *Procstat) systemdUnitPIDs() ([]PID, error) { if !bytes.Equal(kv[0], []byte("MainPID")) { continue } - if len(kv[1]) == 0 { + if len(kv[1]) == 0 || bytes.Equal(kv[1], []byte("0")) { return nil, nil } pid, err := strconv.Atoi(string(kv[1])) From bf03b43d2a2ce4ca20cc5f31ad96975c96ba1baa Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 14 Jun 2019 12:01:17 -0700 Subject: [PATCH 0920/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index cbd58710a..ada0c5c44 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,6 +14,7 @@ - [#5980](https://github.com/influxdata/telegraf/issues/5980): Cannot set mount_points option in disk input. - [#5983](https://github.com/influxdata/telegraf/issues/5983): Omit keys when creating measurement names for GNMI telemetry. +- [#5972](https://github.com/influxdata/telegraf/issues/5972): Don't consider pid of 0 when using systemd lookup in procstat. ## v1.11 [2019-06-11] From fec1b3ec19bdb3c2b8cf2642dbf7caaf757a8321 Mon Sep 17 00:00:00 2001 From: Greg <2653109+glinton@users.noreply.github.com> Date: Fri, 14 Jun 2019 13:03:44 -0600 Subject: [PATCH 0921/1815] Support array of addresses in http_response input (#5975) --- plugins/inputs/http_response/README.md | 4 ++ plugins/inputs/http_response/http_response.go | 69 ++++++++++++------- 2 files changed, 48 insertions(+), 25 deletions(-) diff --git a/plugins/inputs/http_response/README.md b/plugins/inputs/http_response/README.md index 7c66928e2..54e229b30 100644 --- a/plugins/inputs/http_response/README.md +++ b/plugins/inputs/http_response/README.md @@ -7,9 +7,13 @@ This input plugin checks HTTP/HTTPS connections. ``` # HTTP/HTTPS request given an address a method and a timeout [[inputs.http_response]] + ## Deprecated in 1.12, use 'urls' ## Server address (default http://localhost) # address = "http://localhost" + ## List of urls to query. + # urls = ["http://localhost"] + ## Set http_proxy (telegraf uses the system wide proxy settings if it's is not set) # http_proxy = "http://localhost:8888" diff --git a/plugins/inputs/http_response/http_response.go b/plugins/inputs/http_response/http_response.go index 1f1f68707..7dbe47b0d 100644 --- a/plugins/inputs/http_response/http_response.go +++ b/plugins/inputs/http_response/http_response.go @@ -22,8 +22,9 @@ import ( // HTTPResponse struct type HTTPResponse struct { - Address string - HTTPProxy string `toml:"http_proxy"` + Address string // deprecated in 1.12 + URLs []string `toml:"urls"` + HTTPProxy string `toml:"http_proxy"` Body string Method string ResponseTimeout internal.Duration @@ -42,9 +43,13 @@ func (h *HTTPResponse) Description() string { } var sampleConfig = ` + ## Deprecated in 1.12, use 'urls' ## Server address (default http://localhost) # address = "http://localhost" + ## List of urls to query. + # urls = ["http://localhost"] + ## Set http_proxy (telegraf uses the system wide proxy settings if it's is not set) # http_proxy = "http://localhost:8888" @@ -171,16 +176,16 @@ func setError(err error, fields map[string]interface{}, tags map[string]string) } // HTTPGather gathers all fields and returns any errors it encounters -func (h *HTTPResponse) httpGather() (map[string]interface{}, map[string]string, error) { +func (h *HTTPResponse) httpGather(u string) (map[string]interface{}, map[string]string, error) { // Prepare fields and tags fields := make(map[string]interface{}) - tags := map[string]string{"server": h.Address, "method": h.Method} + tags := map[string]string{"server": u, "method": h.Method} var body io.Reader if h.Body != "" { body = strings.NewReader(h.Body) } - request, err := http.NewRequest(h.Method, h.Address, body) + request, err := http.NewRequest(h.Method, u, body) if err != nil { return nil, nil, err } @@ -201,7 +206,7 @@ func (h *HTTPResponse) httpGather() (map[string]interface{}, map[string]string, // HTTP error codes do not generate errors in the net/http library if err != nil { // Log error - log.Printf("D! Network error while polling %s: %s", h.Address, err.Error()) + log.Printf("D! Network error while polling %s: %s", u, err.Error()) // Get error details netErr := setError(err, fields, tags) @@ -284,20 +289,15 @@ func (h *HTTPResponse) Gather(acc telegraf.Accumulator) error { if h.Method == "" { h.Method = "GET" } - if h.Address == "" { - h.Address = "http://localhost" - } - addr, err := url.Parse(h.Address) - if err != nil { - return err - } - if addr.Scheme != "http" && addr.Scheme != "https" { - return errors.New("Only http and https are supported") - } - // Prepare data - var fields map[string]interface{} - var tags map[string]string + if len(h.URLs) == 0 { + if h.Address == "" { + h.URLs = []string{"http://localhost"} + } else { + log.Printf("W! [inputs.http_response] 'address' deprecated in telegraf 1.12, please use 'urls'") + h.URLs = []string{h.Address} + } + } if h.client == nil { client, err := h.createHttpClient() @@ -307,14 +307,33 @@ func (h *HTTPResponse) Gather(acc telegraf.Accumulator) error { h.client = client } - // Gather data - fields, tags, err = h.httpGather() - if err != nil { - return err + for _, u := range h.URLs { + addr, err := url.Parse(u) + if err != nil { + acc.AddError(err) + continue + } + + if addr.Scheme != "http" && addr.Scheme != "https" { + acc.AddError(errors.New("Only http and https are supported")) + continue + } + + // Prepare data + var fields map[string]interface{} + var tags map[string]string + + // Gather data + fields, tags, err = h.httpGather(u) + if err != nil { + acc.AddError(err) + continue + } + + // Add metrics + acc.AddFields("http_response", fields, tags) } - // Add metrics - acc.AddFields("http_response", fields, tags) return nil } From de096428be2b14a740a7369b9522620098b75244 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 14 Jun 2019 12:06:25 -0700 Subject: [PATCH 0922/1815] Run ServiceInputs during test mode; add --test-wait option (#5911) --- agent/agent.go | 74 ++++++++++++++++++++++++--------------- cmd/telegraf/telegraf.go | 9 +++-- internal/usage.go | 2 ++ internal/usage_windows.go | 2 ++ 4 files changed, 56 insertions(+), 31 deletions(-) diff --git a/agent/agent.go b/agent/agent.go index 2687bbc0f..e6e982c02 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -136,7 +136,7 @@ func (a *Agent) Run(ctx context.Context) error { } // Test runs the inputs once and prints the output to stdout in line protocol. -func (a *Agent) Test(ctx context.Context) error { +func (a *Agent) Test(ctx context.Context, waitDuration time.Duration) error { var wg sync.WaitGroup metricC := make(chan telegraf.Metric) nulC := make(chan telegraf.Metric) @@ -156,8 +156,8 @@ func (a *Agent) Test(ctx context.Context) error { octets, err := s.Serialize(metric) if err == nil { fmt.Print("> ", string(octets)) - } + metric.Reject() } }() @@ -168,43 +168,61 @@ func (a *Agent) Test(ctx context.Context) error { } }() + hasServiceInputs := false + for _, input := range a.Config.Inputs { + if _, ok := input.Input.(telegraf.ServiceInput); ok { + hasServiceInputs = true + break + } + } + + if hasServiceInputs { + log.Printf("D! [agent] Starting service inputs") + err := a.startServiceInputs(ctx, metricC) + if err != nil { + return err + } + } + for _, input := range a.Config.Inputs { select { case <-ctx.Done(): return nil default: - if _, ok := input.Input.(telegraf.ServiceInput); ok { - log.Printf("W!: [agent] skipping plugin [[%s]]: service inputs not supported in --test mode", - input.Name()) - continue + break + } + + acc := NewAccumulator(input, metricC) + acc.SetPrecision(a.Precision()) + + // Special instructions for some inputs. cpu, for example, needs to be + // run twice in order to return cpu usage percentages. + switch input.Name() { + case "inputs.cpu", "inputs.mongodb", "inputs.procstat": + nulAcc := NewAccumulator(input, nulC) + nulAcc.SetPrecision(a.Precision()) + if err := input.Input.Gather(nulAcc); err != nil { + acc.AddError(err) } - acc := NewAccumulator(input, metricC) - acc.SetPrecision(a.Precision()) - input.SetDefaultTags(a.Config.Tags) - - // Special instructions for some inputs. cpu, for example, needs to be - // run twice in order to return cpu usage percentages. - switch input.Name() { - case "inputs.cpu", "inputs.mongodb", "inputs.procstat": - nulAcc := NewAccumulator(input, nulC) - nulAcc.SetPrecision(a.Precision()) - if err := input.Input.Gather(nulAcc); err != nil { - return err - } - - time.Sleep(500 * time.Millisecond) - if err := input.Input.Gather(acc); err != nil { - return err - } - default: - if err := input.Input.Gather(acc); err != nil { - return err - } + time.Sleep(500 * time.Millisecond) + if err := input.Input.Gather(acc); err != nil { + acc.AddError(err) + } + default: + if err := input.Input.Gather(acc); err != nil { + acc.AddError(err) } } } + if hasServiceInputs { + log.Printf("D! [agent] Waiting for service inputs") + internal.SleepContext(ctx, waitDuration) + log.Printf("D! [agent] Stopping service inputs") + a.stopServiceInputs() + } + return nil } diff --git a/cmd/telegraf/telegraf.go b/cmd/telegraf/telegraf.go index 3678387cd..4545833a7 100644 --- a/cmd/telegraf/telegraf.go +++ b/cmd/telegraf/telegraf.go @@ -13,6 +13,7 @@ import ( "runtime" "strings" "syscall" + "time" "github.com/influxdata/telegraf/agent" "github.com/influxdata/telegraf/internal" @@ -33,7 +34,8 @@ var pprofAddr = flag.String("pprof-addr", "", "pprof address to listen on, not activate pprof if empty") var fQuiet = flag.Bool("quiet", false, "run in quiet mode") -var fTest = flag.Bool("test", false, "gather metrics, print them out, and exit") +var fTest = flag.Bool("test", false, "enable test mode: gather metrics, print them out, and exit") +var fTestWait = flag.Int("test-wait", 0, "wait up to this many seconds for service inputs to complete in test mode") var fConfig = flag.String("config", "", "configuration file to load") var fConfigDirectory = flag.String("config-directory", "", "directory containing additional *.conf files") @@ -167,8 +169,9 @@ func runAgent(ctx context.Context, logger.SetupLogging(logConfig) - if *fTest { - return ag.Test(ctx) + if *fTest || *fTestWait != 0 { + testWaitDuration := time.Duration(*fTestWait) * time.Second + return ag.Test(ctx, testWaitDuration) } log.Printf("I! Loaded inputs: %s", strings.Join(c.InputNames(), " ")) diff --git a/internal/usage.go b/internal/usage.go index c783da3f4..7909d3558 100644 --- a/internal/usage.go +++ b/internal/usage.go @@ -31,6 +31,8 @@ The commands & flags are: --sample-config print out full sample configuration --test gather metrics, print them out, and exit; processors, aggregators, and outputs are not run + --test-wait wait up to this many seconds for service + inputs to complete in test mode --usage print usage for a plugin, ie, 'telegraf --usage mysql' --version display the version and exit diff --git a/internal/usage_windows.go b/internal/usage_windows.go index 6e3c17835..af2506ec1 100644 --- a/internal/usage_windows.go +++ b/internal/usage_windows.go @@ -31,6 +31,8 @@ The commands & flags are: 'processors', 'aggregators' and 'inputs' --test gather metrics, print them out, and exit; processors, aggregators, and outputs are not run + --test-wait wait up to this many seconds for service + inputs to complete in test mode --usage print usage for a plugin, ie, 'telegraf --usage mysql' --version display the version and exit From 3e5cfad2b051b454c50e248e544791d1aecd6586 Mon Sep 17 00:00:00 2001 From: Anaisdg <30506042+Anaisdg@users.noreply.github.com> Date: Fri, 14 Jun 2019 14:08:10 -0500 Subject: [PATCH 0923/1815] Add Date Processor Plugin (#5895) --- plugins/processors/all/all.go | 1 + plugins/processors/date/README.md | 31 +++++++++++++ plugins/processors/date/date.go | 69 ++++++++++++++++++++++++++++ plugins/processors/date/date_test.go | 67 +++++++++++++++++++++++++++ 4 files changed, 168 insertions(+) create mode 100644 plugins/processors/date/README.md create mode 100644 plugins/processors/date/date.go create mode 100644 plugins/processors/date/date_test.go diff --git a/plugins/processors/all/all.go b/plugins/processors/all/all.go index 41e2707d3..65580a46f 100644 --- a/plugins/processors/all/all.go +++ b/plugins/processors/all/all.go @@ -2,6 +2,7 @@ package all import ( _ "github.com/influxdata/telegraf/plugins/processors/converter" + _ "github.com/influxdata/telegraf/plugins/processors/date" _ "github.com/influxdata/telegraf/plugins/processors/enum" _ "github.com/influxdata/telegraf/plugins/processors/override" _ "github.com/influxdata/telegraf/plugins/processors/parser" diff --git a/plugins/processors/date/README.md b/plugins/processors/date/README.md new file mode 100644 index 000000000..e2bd245e5 --- /dev/null +++ b/plugins/processors/date/README.md @@ -0,0 +1,31 @@ +# Date Processor Plugin + +The `date` processor adds the months and years as tags to your data. + +Provides the ability to group by months or years. + +A few example usecases include: +1) consumption data for utilities on per month basis +2) bandwith capacity per month +3) compare energy production or sales on a yearly or monthly basis + + +### Configuration: + +```toml +[[processors.date]] + ##Specify the date tags to add rename operation. + tagKey = "month" + dateFormat = "Jan" +``` + +### Tags: + +Tags are applied by this processor. + +### Example processing: + +``` +- throughput, hostname=example.com lower=10i,upper=1000i,mean=500i 1502489900000000000 ++ throughput,host=backend.example.com,month=Mar min=10i,max=1000i,mean=500i 1502489900000000000 +``` diff --git a/plugins/processors/date/date.go b/plugins/processors/date/date.go new file mode 100644 index 000000000..844f99cc7 --- /dev/null +++ b/plugins/processors/date/date.go @@ -0,0 +1,69 @@ +package date + +import ( + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/processors" +) + +const sampleConfig = ` +##Specify the date tags to add +tagKey = "month" +dateFormat = "%m" + +` + +type Date struct { + TagKey string `toml:"tagKey"` + DateFormat string `toml:"dateFormat"` +} + +func (d *Date) SampleConfig() string { + return sampleConfig +} + +func (d *Date) Description() string { + return "Dates measurements, tags, and fields that pass through this filter." +} + +func (d *Date) Apply(in ...telegraf.Metric) []telegraf.Metric { + for _, point := range in { + point.AddTag(d.TagKey, point.Time().Format(d.DateFormat)) + + } + + return in + +} + +func init() { + processors.Add("date", func() telegraf.Processor { + return &Date{} + }) +} + +/** + * + +[processors.date] + jdfj + + ##Set Months to True or False + tagKey = "month" + dateFormat = "%m" // January + +[processors.date] + jdfj + + ##Set Months to True or False + tagKey = "day_of_week" + dateFormat = "%d" // Wednesday + + + # [[processors.regex.fields]] + # key = "request" + # pattern = ".*category=(\\w+).*" + # replacement = "${1}" + # result_key = "search_category" + + +*/ diff --git a/plugins/processors/date/date_test.go b/plugins/processors/date/date_test.go new file mode 100644 index 000000000..98d88b351 --- /dev/null +++ b/plugins/processors/date/date_test.go @@ -0,0 +1,67 @@ +package date + +import ( + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" + "github.com/stretchr/testify/assert" +) + +func MustMetric(name string, tags map[string]string, fields map[string]interface{}, metricTime time.Time) telegraf.Metric { + if tags == nil { + tags = map[string]string{} + } + if fields == nil { + fields = map[string]interface{}{} + } + m, _ := metric.New(name, tags, fields, metricTime) + return m +} + +func TestMonthTag(t *testing.T) { + dateFormatMonth := Date{ + TagKey: "month", + DateFormat: "Jan", + } + + currentTime := time.Now() + month := currentTime.Format("Jan") + + m1 := MustMetric("foo", nil, nil, currentTime) + m2 := MustMetric("bar", nil, nil, currentTime) + m3 := MustMetric("baz", nil, nil, currentTime) + monthApply := dateFormatMonth.Apply(m1, m2, m3) + assert.Equal(t, map[string]string{"month": month}, monthApply[0].Tags(), "should add tag 'month'") + assert.Equal(t, map[string]string{"month": month}, monthApply[1].Tags(), "should add tag 'month'") + assert.Equal(t, map[string]string{"month": month}, monthApply[2].Tags(), "should add tag 'month'") +} + +func TestYearTag(t *testing.T) { + dateFormatYear := Date{ + TagKey: "year", + DateFormat: "2006", + } + currentTime := time.Now() + year := currentTime.Format("2006") + + m4 := MustMetric("foo", nil, nil, currentTime) + m5 := MustMetric("bar", nil, nil, currentTime) + m6 := MustMetric("baz", nil, nil, currentTime) + yearApply := dateFormatYear.Apply(m4, m5, m6) + assert.Equal(t, map[string]string{"year": year}, yearApply[0].Tags(), "should add tag 'year'") + assert.Equal(t, map[string]string{"year": year}, yearApply[1].Tags(), "should add tag 'year'") + assert.Equal(t, map[string]string{"year": year}, yearApply[2].Tags(), "should add tag 'year'") +} + +func TestOldDateTag(t *testing.T) { + dateFormatYear := Date{ + TagKey: "year", + DateFormat: "2006", + } + + m7 := MustMetric("foo", nil, nil, time.Date(1993, 05, 27, 0, 0, 0, 0, time.UTC)) + customDateApply := dateFormatYear.Apply(m7) + assert.Equal(t, map[string]string{"year": "1993"}, customDateApply[0].Tags(), "should add tag 'year'") +} From a276ddfe976861ab6b00d0af56a0d77fa28daf31 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 14 Jun 2019 12:26:47 -0700 Subject: [PATCH 0924/1815] Tidy date processor --- plugins/processors/date/README.md | 34 ++++++++++++------------- plugins/processors/date/date.go | 41 ++++++------------------------- 2 files changed, 23 insertions(+), 52 deletions(-) diff --git a/plugins/processors/date/README.md b/plugins/processors/date/README.md index e2bd245e5..1a68119e1 100644 --- a/plugins/processors/date/README.md +++ b/plugins/processors/date/README.md @@ -1,31 +1,29 @@ # Date Processor Plugin -The `date` processor adds the months and years as tags to your data. +Use the `date` processor to add the metric timestamp as a human readable tag. -Provides the ability to group by months or years. +A common use is to add a tag that can be used to group by month or year. -A few example usecases include: -1) consumption data for utilities on per month basis +A few example usecases include: +1) consumption data for utilities on per month basis 2) bandwith capacity per month -3) compare energy production or sales on a yearly or monthly basis +3) compare energy production or sales on a yearly or monthly basis - -### Configuration: +### Configuration ```toml [[processors.date]] - ##Specify the date tags to add rename operation. - tagKey = "month" - dateFormat = "Jan" + ## New tag to create + tag_key = "month" + + ## Date format string, must be a representation of the Go "reference time" + ## which is "Mon Jan 2 15:04:05 -0700 MST 2006". + date_format = "Jan" ``` -### Tags: - -Tags are applied by this processor. - -### Example processing: +### Example -``` -- throughput, hostname=example.com lower=10i,upper=1000i,mean=500i 1502489900000000000 -+ throughput,host=backend.example.com,month=Mar min=10i,max=1000i,mean=500i 1502489900000000000 +```diff +- throughput lower=10i,upper=1000i,mean=500i 1560540094000000000 ++ throughput,month=Jun lower=10i,upper=1000i,mean=500i 1560540094000000000 ``` diff --git a/plugins/processors/date/date.go b/plugins/processors/date/date.go index 844f99cc7..479106ef2 100644 --- a/plugins/processors/date/date.go +++ b/plugins/processors/date/date.go @@ -6,15 +6,17 @@ import ( ) const sampleConfig = ` -##Specify the date tags to add -tagKey = "month" -dateFormat = "%m" + ## New tag to create + tag_key = "month" + ## Date format string, must be a representation of the Go "reference time" + ## which is "Mon Jan 2 15:04:05 -0700 MST 2006". + date_format = "Jan" ` type Date struct { - TagKey string `toml:"tagKey"` - DateFormat string `toml:"dateFormat"` + TagKey string `toml:"tag_key"` + DateFormat string `toml:"date_format"` } func (d *Date) SampleConfig() string { @@ -28,11 +30,9 @@ func (d *Date) Description() string { func (d *Date) Apply(in ...telegraf.Metric) []telegraf.Metric { for _, point := range in { point.AddTag(d.TagKey, point.Time().Format(d.DateFormat)) - } return in - } func init() { @@ -40,30 +40,3 @@ func init() { return &Date{} }) } - -/** - * - -[processors.date] - jdfj - - ##Set Months to True or False - tagKey = "month" - dateFormat = "%m" // January - -[processors.date] - jdfj - - ##Set Months to True or False - tagKey = "day_of_week" - dateFormat = "%d" // Wednesday - - - # [[processors.regex.fields]] - # key = "request" - # pattern = ".*category=(\\w+).*" - # replacement = "${1}" - # result_key = "search_category" - - -*/ From 1f2cb853542cd04e52d897de2cb238ce89dcbeeb Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 14 Jun 2019 12:27:57 -0700 Subject: [PATCH 0925/1815] Update changelog and readme --- CHANGELOG.md | 4 ++++ README.md | 1 + 2 files changed, 5 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index ada0c5c44..9136b696b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,9 @@ ## v1.12 [unreleased] +#### New Processors + +- [date](/plugins/processors/date/README.md) - Contributed by @influxdata + #### Features - [#5842](https://github.com/influxdata/telegraf/pull/5842): Improve performance of wavefront serializer. diff --git a/README.md b/README.md index 60f349e47..1bae79ba1 100644 --- a/README.md +++ b/README.md @@ -325,6 +325,7 @@ For documentation on the latest development code see the [documentation index][d ## Processor Plugins * [converter](./plugins/processors/converter) +* [date](./plugins/processors/date) * [enum](./plugins/processors/enum) * [override](./plugins/processors/override) * [parser](./plugins/processors/parser) From b35beb2fbab3c1331d21ace5acc510141d3dee98 Mon Sep 17 00:00:00 2001 From: Charlie Vieth Date: Fri, 14 Jun 2019 15:45:07 -0400 Subject: [PATCH 0926/1815] Reduce the cpu/memory used by the graphite parser (#5841) --- plugins/parsers/graphite/parser.go | 55 +++++++++++++----------------- 1 file changed, 24 insertions(+), 31 deletions(-) diff --git a/plugins/parsers/graphite/parser.go b/plugins/parsers/graphite/parser.go index fc32bd83d..75c0475e3 100644 --- a/plugins/parsers/graphite/parser.go +++ b/plugins/parsers/graphite/parser.go @@ -1,10 +1,9 @@ package graphite import ( - "bufio" "bytes" + "errors" "fmt" - "io" "math" "strconv" "strings" @@ -63,42 +62,36 @@ func NewGraphiteParser( func (p *GraphiteParser) Parse(buf []byte) ([]telegraf.Metric, error) { // parse even if the buffer begins with a newline - buf = bytes.TrimPrefix(buf, []byte("\n")) - // add newline to end if not exists: - if len(buf) > 0 && !bytes.HasSuffix(buf, []byte("\n")) { - buf = append(buf, []byte("\n")...) + if len(buf) != 0 && buf[0] == '\n' { + buf = buf[1:] } - metrics := make([]telegraf.Metric, 0) + var metrics []telegraf.Metric + var errs []string - var errStr string - buffer := bytes.NewBuffer(buf) - reader := bufio.NewReader(buffer) for { - // Read up to the next newline. - buf, err := reader.ReadBytes('\n') - if err == io.EOF { + n := bytes.IndexByte(buf, '\n') + var line []byte + if n >= 0 { + line = bytes.TrimSpace(buf[:n:n]) + } else { + line = bytes.TrimSpace(buf) // last line + } + if len(line) != 0 { + metric, err := p.ParseLine(string(line)) + if err == nil { + metrics = append(metrics, metric) + } else { + errs = append(errs, err.Error()) + } + } + if n < 0 { break } - if err != nil && err != io.EOF { - return metrics, err - } - - // Trim the buffer, even though there should be no padding - line := strings.TrimSpace(string(buf)) - if line == "" { - continue - } - metric, err := p.ParseLine(line) - if err == nil { - metrics = append(metrics, metric) - } else { - errStr += err.Error() + "\n" - } + buf = buf[n+1:] } - - if errStr != "" { - return metrics, fmt.Errorf(strings.TrimSpace(errStr)) + if len(errs) != 0 { + return metrics, errors.New(strings.Join(errs, "\n")) } return metrics, nil } From 0ff9c8ef88366aac3a4dceb5860151415f932678 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 14 Jun 2019 15:12:27 -0700 Subject: [PATCH 0927/1815] Add call to optional Init function for all plugins (#5899) --- agent/agent.go | 46 ++++++++++++++++++++++++++- docs/AGGREGATORS.md | 4 +++ docs/INPUTS.md | 4 +++ docs/OUTPUTS.md | 4 +++ docs/PROCESSORS.md | 4 +++ input.go | 9 ++++++ internal/models/running_aggregator.go | 10 ++++++ internal/models/running_input.go | 10 ++++++ internal/models/running_output.go | 10 ++++++ internal/models/running_processor.go | 10 ++++++ plugins/inputs/http/http.go | 35 ++++++++++---------- plugins/inputs/http/http_test.go | 24 +++----------- 12 files changed, 131 insertions(+), 39 deletions(-) diff --git a/agent/agent.go b/agent/agent.go index e6e982c02..542154388 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -39,8 +39,14 @@ func (a *Agent) Run(ctx context.Context) error { return ctx.Err() } + log.Printf("D! [agent] Initializing plugins") + err := a.initPlugins() + if err != nil { + return err + } + log.Printf("D! [agent] Connecting outputs") - err := a.connectOutputs(ctx) + err = a.connectOutputs(ctx) if err != nil { return err } @@ -185,6 +191,11 @@ func (a *Agent) Test(ctx context.Context, waitDuration time.Duration) error { } for _, input := range a.Config.Inputs { + err := input.Init() + if err != nil { + return err + } + select { case <-ctx.Done(): return nil @@ -596,6 +607,39 @@ func (a *Agent) flushOnce( } +// initPlugins runs the Init function on plugins. +func (a *Agent) initPlugins() error { + for _, input := range a.Config.Inputs { + err := input.Init() + if err != nil { + return fmt.Errorf("could not initialize input %s: %v", + input.Config.Name, err) + } + } + for _, processor := range a.Config.Processors { + err := processor.Init() + if err != nil { + return fmt.Errorf("could not initialize processor %s: %v", + processor.Config.Name, err) + } + } + for _, aggregator := range a.Config.Aggregators { + err := aggregator.Init() + if err != nil { + return fmt.Errorf("could not initialize aggregator %s: %v", + aggregator.Config.Name, err) + } + } + for _, output := range a.Config.Outputs { + err := output.Init() + if err != nil { + return fmt.Errorf("could not initialize output %s: %v", + output.Config.Name, err) + } + } + return nil +} + // connectOutputs connects to all outputs. func (a *Agent) connectOutputs(ctx context.Context) error { for _, output := range a.Config.Outputs { diff --git a/docs/AGGREGATORS.md b/docs/AGGREGATORS.md index eee5b1de5..a5930a3e0 100644 --- a/docs/AGGREGATORS.md +++ b/docs/AGGREGATORS.md @@ -52,6 +52,10 @@ var sampleConfig = ` drop_original = false ` +func (m *Min) Init() error { + return nil +} + func (m *Min) SampleConfig() string { return sampleConfig } diff --git a/docs/INPUTS.md b/docs/INPUTS.md index 2f4cce3b6..f8e906f31 100644 --- a/docs/INPUTS.md +++ b/docs/INPUTS.md @@ -52,6 +52,10 @@ func (s *Simple) SampleConfig() string { ` } +func (s *Simple) Init() error { + return nil +} + func (s *Simple) Gather(acc telegraf.Accumulator) error { if s.Ok { acc.AddFields("state", map[string]interface{}{"value": "pretty good"}, nil) diff --git a/docs/OUTPUTS.md b/docs/OUTPUTS.md index 8bba4687e..9d89491cc 100644 --- a/docs/OUTPUTS.md +++ b/docs/OUTPUTS.md @@ -43,6 +43,10 @@ func (s *Simple) SampleConfig() string { ` } +func (s *Simple) Init() error { + return nil +} + func (s *Simple) Connect() error { // Make a connection to the URL here return nil diff --git a/docs/PROCESSORS.md b/docs/PROCESSORS.md index 4f18b2d55..6ea82fdae 100644 --- a/docs/PROCESSORS.md +++ b/docs/PROCESSORS.md @@ -46,6 +46,10 @@ func (p *Printer) Description() string { return "Print all metrics that pass through this filter." } +func (p *Printer) Init() error { + return nil +} + func (p *Printer) Apply(in ...telegraf.Metric) []telegraf.Metric { for _, metric := range in { fmt.Println(metric.String()) diff --git a/input.go b/input.go index 071ab7d9d..ee47bc347 100644 --- a/input.go +++ b/input.go @@ -1,5 +1,14 @@ package telegraf +// Initializer is an interface that all plugin types: Inputs, Outputs, +// Processors, and Aggregators can optionally implement to initialize the +// plugin. +type Initializer interface { + // Init performs one time setup of the plugin and returns an error if the + // configuration is invalid. + Init() error +} + type Input interface { // SampleConfig returns the default configuration of the Input SampleConfig() string diff --git a/internal/models/running_aggregator.go b/internal/models/running_aggregator.go index 8a2cd576a..8bd983eef 100644 --- a/internal/models/running_aggregator.go +++ b/internal/models/running_aggregator.go @@ -71,6 +71,16 @@ func (r *RunningAggregator) Name() string { return "aggregators." + r.Config.Name } +func (r *RunningAggregator) Init() error { + if p, ok := r.Aggregator.(telegraf.Initializer); ok { + err := p.Init() + if err != nil { + return err + } + } + return nil +} + func (r *RunningAggregator) Period() time.Duration { return r.Config.Period } diff --git a/internal/models/running_input.go b/internal/models/running_input.go index 08a804c40..73c14fc0f 100644 --- a/internal/models/running_input.go +++ b/internal/models/running_input.go @@ -56,6 +56,16 @@ func (r *RunningInput) metricFiltered(metric telegraf.Metric) { metric.Drop() } +func (r *RunningInput) Init() error { + if p, ok := r.Input.(telegraf.Initializer); ok { + err := p.Init() + if err != nil { + return err + } + } + return nil +} + func (r *RunningInput) MakeMetric(metric telegraf.Metric) telegraf.Metric { if ok := r.Config.Filter.Select(metric); !ok { r.metricFiltered(metric) diff --git a/internal/models/running_output.go b/internal/models/running_output.go index ff2b88e2a..438ecd480 100644 --- a/internal/models/running_output.go +++ b/internal/models/running_output.go @@ -97,6 +97,16 @@ func (ro *RunningOutput) metricFiltered(metric telegraf.Metric) { metric.Drop() } +func (ro *RunningOutput) Init() error { + if p, ok := ro.Output.(telegraf.Initializer); ok { + err := p.Init() + if err != nil { + return err + } + } + return nil +} + // AddMetric adds a metric to the output. // // Takes ownership of metric diff --git a/internal/models/running_processor.go b/internal/models/running_processor.go index 38369d03b..90d32fde5 100644 --- a/internal/models/running_processor.go +++ b/internal/models/running_processor.go @@ -40,6 +40,16 @@ func containsMetric(item telegraf.Metric, metrics []telegraf.Metric) bool { return false } +func (rp *RunningProcessor) Init() error { + if p, ok := rp.Processor.(telegraf.Initializer); ok { + err := p.Init() + if err != nil { + return err + } + } + return nil +} + func (rp *RunningProcessor) Apply(in ...telegraf.Metric) []telegraf.Metric { rp.Lock() defer rp.Unlock() diff --git a/plugins/inputs/http/http.go b/plugins/inputs/http/http.go index 6d2d528ba..34db9d287 100644 --- a/plugins/inputs/http/http.go +++ b/plugins/inputs/http/http.go @@ -1,7 +1,6 @@ package http import ( - "errors" "fmt" "io" "io/ioutil" @@ -89,27 +88,25 @@ func (*HTTP) Description() string { return "Read formatted metrics from one or more HTTP endpoints" } +func (h *HTTP) Init() error { + tlsCfg, err := h.ClientConfig.TLSConfig() + if err != nil { + return err + } + + h.client = &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: tlsCfg, + Proxy: http.ProxyFromEnvironment, + }, + Timeout: h.Timeout.Duration, + } + return nil +} + // Gather takes in an accumulator and adds the metrics that the Input // gathers. This is called every "interval" func (h *HTTP) Gather(acc telegraf.Accumulator) error { - if h.parser == nil { - return errors.New("Parser is not set") - } - - if h.client == nil { - tlsCfg, err := h.ClientConfig.TLSConfig() - if err != nil { - return err - } - h.client = &http.Client{ - Transport: &http.Transport{ - TLSClientConfig: tlsCfg, - Proxy: http.ProxyFromEnvironment, - }, - Timeout: h.Timeout.Duration, - } - } - var wg sync.WaitGroup for _, u := range h.URLs { wg.Add(1) diff --git a/plugins/inputs/http/http_test.go b/plugins/inputs/http/http_test.go index 7ac05e135..21eff6265 100644 --- a/plugins/inputs/http/http_test.go +++ b/plugins/inputs/http/http_test.go @@ -37,6 +37,7 @@ func TestHTTPwithJSONFormat(t *testing.T) { plugin.SetParser(p) var acc testutil.Accumulator + plugin.Init() require.NoError(t, acc.GatherError(plugin.Gather)) require.Len(t, acc.Metrics, 1) @@ -78,6 +79,7 @@ func TestHTTPHeaders(t *testing.T) { plugin.SetParser(p) var acc testutil.Accumulator + plugin.Init() require.NoError(t, acc.GatherError(plugin.Gather)) } @@ -100,6 +102,7 @@ func TestInvalidStatusCode(t *testing.T) { plugin.SetParser(p) var acc testutil.Accumulator + plugin.Init() require.Error(t, acc.GatherError(plugin.Gather)) } @@ -125,28 +128,10 @@ func TestMethod(t *testing.T) { plugin.SetParser(p) var acc testutil.Accumulator + plugin.Init() require.NoError(t, acc.GatherError(plugin.Gather)) } -func TestParserNotSet(t *testing.T) { - fakeServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.URL.Path == "/endpoint" { - _, _ = w.Write([]byte(simpleJSON)) - } else { - w.WriteHeader(http.StatusNotFound) - } - })) - defer fakeServer.Close() - - url := fakeServer.URL + "/endpoint" - plugin := &plugin.HTTP{ - URLs: []string{url}, - } - - var acc testutil.Accumulator - require.Error(t, acc.GatherError(plugin.Gather)) -} - const simpleJSON = ` { "a": 1.2 @@ -237,6 +222,7 @@ func TestBodyAndContentEncoding(t *testing.T) { tt.plugin.SetParser(parser) var acc testutil.Accumulator + tt.plugin.Init() err = tt.plugin.Gather(&acc) require.NoError(t, err) }) From d3af8fd873caa7d982df30df727b5540312bc222 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adri=C3=A1n=20L=C3=B3pez?= Date: Sat, 15 Jun 2019 00:23:54 +0200 Subject: [PATCH 0928/1815] Allow regexp processor to mix different tags (#5863) --- plugins/processors/regex/README.md | 2 ++ plugins/processors/regex/regex.go | 6 ++++++ plugins/processors/regex/regex_test.go | 14 ++++++++++++++ 3 files changed, 22 insertions(+) diff --git a/plugins/processors/regex/README.md b/plugins/processors/regex/README.md index d37b1ea88..a6cef82a0 100644 --- a/plugins/processors/regex/README.md +++ b/plugins/processors/regex/README.md @@ -2,6 +2,8 @@ The `regex` plugin transforms tag and field values with regex pattern. If `result_key` parameter is present, it can produce new tags and fields from existing ones. +For tags transforms, if `append` is set to `true`, it will append the transformation to the existing tag value, instead of overwriting it. + ### Configuration: ```toml diff --git a/plugins/processors/regex/regex.go b/plugins/processors/regex/regex.go index b922cd2d5..47b53546f 100644 --- a/plugins/processors/regex/regex.go +++ b/plugins/processors/regex/regex.go @@ -18,6 +18,7 @@ type converter struct { Pattern string Replacement string ResultKey string + Append bool } const sampleConfig = ` @@ -70,6 +71,11 @@ func (r *Regex) Apply(in ...telegraf.Metric) []telegraf.Metric { for _, converter := range r.Tags { if value, ok := metric.GetTag(converter.Key); ok { if key, newValue := r.convert(converter, value); newValue != "" { + if converter.Append { + if v, ok := metric.GetTag(key); ok { + newValue = v + newValue + } + } metric.AddTag(key, newValue) } } diff --git a/plugins/processors/regex/regex_test.go b/plugins/processors/regex/regex_test.go index f16ef7f5c..b0ddf47d0 100644 --- a/plugins/processors/regex/regex_test.go +++ b/plugins/processors/regex/regex_test.go @@ -108,6 +108,20 @@ func TestTagConversions(t *testing.T) { "resp_code": "2xx", }, }, + { + message: "Should append to existing tag", + converter: converter{ + Key: "verb", + Pattern: "^(.*)$", + Replacement: " (${1})", + ResultKey: "resp_code", + Append: true, + }, + expectedTags: map[string]string{ + "verb": "GET", + "resp_code": "200 (GET)", + }, + }, { message: "Should add new tag", converter: converter{ From 7f04511c302f5a195f39a487217604d285120868 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 14 Jun 2019 15:25:56 -0700 Subject: [PATCH 0929/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9136b696b..0fce7bcab 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ #### Features - [#5842](https://github.com/influxdata/telegraf/pull/5842): Improve performance of wavefront serializer. +- [#5863](https://github.com/influxdata/telegraf/pull/5863): Allow regex processor to append tag values. #### Bugfixes From 1ea7863b9b041f282a19c49c5d24bb8d9dca61fe Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 14 Jun 2019 15:26:56 -0700 Subject: [PATCH 0930/1815] Add pivot and unpivot processors (#5991) --- metric/metric.go | 4 +- plugins/processors/all/all.go | 2 + plugins/processors/pivot/README.md | 30 ++++++ plugins/processors/pivot/pivot.go | 54 ++++++++++ plugins/processors/pivot/pivot_test.go | 111 +++++++++++++++++++++ plugins/processors/unpivot/README.md | 26 +++++ plugins/processors/unpivot/unpivot.go | 71 +++++++++++++ plugins/processors/unpivot/unpivot_test.go | 90 +++++++++++++++++ 8 files changed, 386 insertions(+), 2 deletions(-) create mode 100644 plugins/processors/pivot/README.md create mode 100644 plugins/processors/pivot/pivot.go create mode 100644 plugins/processors/pivot/pivot_test.go create mode 100644 plugins/processors/unpivot/README.md create mode 100644 plugins/processors/unpivot/unpivot.go create mode 100644 plugins/processors/unpivot/unpivot_test.go diff --git a/metric/metric.go b/metric/metric.go index 29345e63c..4f1418b35 100644 --- a/metric/metric.go +++ b/metric/metric.go @@ -240,11 +240,11 @@ func (m *metric) Copy() telegraf.Metric { } for i, tag := range m.tags { - m2.tags[i] = tag + m2.tags[i] = &telegraf.Tag{Key: tag.Key, Value: tag.Value} } for i, field := range m.fields { - m2.fields[i] = field + m2.fields[i] = &telegraf.Field{Key: field.Key, Value: field.Value} } return m2 } diff --git a/plugins/processors/all/all.go b/plugins/processors/all/all.go index 65580a46f..5a61a2e80 100644 --- a/plugins/processors/all/all.go +++ b/plugins/processors/all/all.go @@ -6,9 +6,11 @@ import ( _ "github.com/influxdata/telegraf/plugins/processors/enum" _ "github.com/influxdata/telegraf/plugins/processors/override" _ "github.com/influxdata/telegraf/plugins/processors/parser" + _ "github.com/influxdata/telegraf/plugins/processors/pivot" _ "github.com/influxdata/telegraf/plugins/processors/printer" _ "github.com/influxdata/telegraf/plugins/processors/regex" _ "github.com/influxdata/telegraf/plugins/processors/rename" _ "github.com/influxdata/telegraf/plugins/processors/strings" _ "github.com/influxdata/telegraf/plugins/processors/topk" + _ "github.com/influxdata/telegraf/plugins/processors/unpivot" ) diff --git a/plugins/processors/pivot/README.md b/plugins/processors/pivot/README.md new file mode 100644 index 000000000..7d2fa91b4 --- /dev/null +++ b/plugins/processors/pivot/README.md @@ -0,0 +1,30 @@ +# Pivot Processor + +You can use the `pivot` processor to rotate single valued metrics into a multi +field metric. This transformation often results in data that is more easily +to apply mathematical operators and comparisons between, and flatten into a +more compact representation for write operations with some output data +formats. + +To perform the reverse operation use the [unpivot] processor. + +### Configuration + +```toml +[[processors.pivot]] + ## Tag to use for naming the new field. + tag_key = "name" + ## Field to use as the value of the new field. + value_key = "value" +``` + +### Example + +```diff +- cpu,cpu=cpu0,name=time_idle value=42i +- cpu,cpu=cpu0,name=time_user value=43i ++ cpu,cpu=cpu0 time_idle=42i ++ cpu,cpu=cpu0 time_user=42i +``` + +[unpivot]: /plugins/processors/unpivot/README.md diff --git a/plugins/processors/pivot/pivot.go b/plugins/processors/pivot/pivot.go new file mode 100644 index 000000000..b20c7f758 --- /dev/null +++ b/plugins/processors/pivot/pivot.go @@ -0,0 +1,54 @@ +package pivot + +import ( + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/processors" +) + +const ( + description = "Rotate a single valued metric into a multi field metric" + sampleConfig = ` + ## Tag to use for naming the new field. + tag_key = "name" + ## Field to use as the value of the new field. + value_key = "value" +` +) + +type Pivot struct { + TagKey string `toml:"tag_key"` + ValueKey string `toml:"value_key"` +} + +func (p *Pivot) SampleConfig() string { + return sampleConfig +} + +func (p *Pivot) Description() string { + return description +} + +func (p *Pivot) Apply(metrics ...telegraf.Metric) []telegraf.Metric { + for _, m := range metrics { + key, ok := m.GetTag(p.TagKey) + if !ok { + continue + } + + value, ok := m.GetField(p.ValueKey) + if !ok { + continue + } + + m.RemoveTag(p.TagKey) + m.RemoveField(p.ValueKey) + m.AddField(key, value) + } + return metrics +} + +func init() { + processors.Add("pivot", func() telegraf.Processor { + return &Pivot{} + }) +} diff --git a/plugins/processors/pivot/pivot_test.go b/plugins/processors/pivot/pivot_test.go new file mode 100644 index 000000000..34924f8fa --- /dev/null +++ b/plugins/processors/pivot/pivot_test.go @@ -0,0 +1,111 @@ +package pivot + +import ( + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/testutil" +) + +func TestPivot(t *testing.T) { + now := time.Now() + tests := []struct { + name string + pivot *Pivot + metrics []telegraf.Metric + expected []telegraf.Metric + }{ + { + name: "simple", + pivot: &Pivot{ + TagKey: "name", + ValueKey: "value", + }, + metrics: []telegraf.Metric{ + testutil.MustMetric("cpu", + map[string]string{ + "name": "idle_time", + }, + map[string]interface{}{ + "value": int64(42), + }, + now, + ), + }, + expected: []telegraf.Metric{ + testutil.MustMetric("cpu", + map[string]string{}, + map[string]interface{}{ + "idle_time": int64(42), + }, + now, + ), + }, + }, + { + name: "missing tag", + pivot: &Pivot{ + TagKey: "name", + ValueKey: "value", + }, + metrics: []telegraf.Metric{ + testutil.MustMetric("cpu", + map[string]string{ + "foo": "idle_time", + }, + map[string]interface{}{ + "value": int64(42), + }, + now, + ), + }, + expected: []telegraf.Metric{ + testutil.MustMetric("cpu", + map[string]string{ + "foo": "idle_time", + }, + map[string]interface{}{ + "value": int64(42), + }, + now, + ), + }, + }, + { + name: "missing field", + pivot: &Pivot{ + TagKey: "name", + ValueKey: "value", + }, + metrics: []telegraf.Metric{ + testutil.MustMetric("cpu", + map[string]string{ + "name": "idle_time", + }, + map[string]interface{}{ + "foo": int64(42), + }, + now, + ), + }, + expected: []telegraf.Metric{ + testutil.MustMetric("cpu", + map[string]string{ + "name": "idle_time", + }, + map[string]interface{}{ + "foo": int64(42), + }, + now, + ), + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + actual := tt.pivot.Apply(tt.metrics...) + testutil.RequireMetricsEqual(t, tt.expected, actual) + }) + } +} diff --git a/plugins/processors/unpivot/README.md b/plugins/processors/unpivot/README.md new file mode 100644 index 000000000..beee6c276 --- /dev/null +++ b/plugins/processors/unpivot/README.md @@ -0,0 +1,26 @@ +# Unpivot Processor + +You can use the `unpivot` processor to rotate a multi field series into single valued metrics. This transformation often results in data that is more easy to aggregate across fields. + +To perform the reverse operation use the [pivot] processor. + +### Configuration + +```toml +[[processors.unpivot]] + ## Tag to use for the name. + tag_key = "name" + ## Field to use for the name of the value. + value_key = "value" +``` + +### Example + +```diff +- cpu,cpu=cpu0 time_idle=42i,time_user=43i ++ cpu,cpu=cpu0,name=time_idle value=42i ++ cpu,cpu=cpu0,name=time_user value=43i +``` + +[pivot]: /plugins/processors/pivot/README.md + diff --git a/plugins/processors/unpivot/unpivot.go b/plugins/processors/unpivot/unpivot.go new file mode 100644 index 000000000..4a081a428 --- /dev/null +++ b/plugins/processors/unpivot/unpivot.go @@ -0,0 +1,71 @@ +package unpivot + +import ( + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/processors" +) + +const ( + description = "Rotate multi field metric into several single field metrics" + sampleConfig = ` + ## Tag to use for the name. + tag_key = "name" + ## Field to use for the name of the value. + value_key = "value" +` +) + +type Unpivot struct { + TagKey string `toml:"tag_key"` + ValueKey string `toml:"value_key"` +} + +func (p *Unpivot) SampleConfig() string { + return sampleConfig +} + +func (p *Unpivot) Description() string { + return description +} + +func copyWithoutFields(metric telegraf.Metric) telegraf.Metric { + m := metric.Copy() + + fieldKeys := make([]string, 0, len(m.FieldList())) + for _, field := range m.FieldList() { + fieldKeys = append(fieldKeys, field.Key) + } + + for _, fk := range fieldKeys { + m.RemoveField(fk) + } + + return m +} + +func (p *Unpivot) Apply(metrics ...telegraf.Metric) []telegraf.Metric { + fieldCount := 0 + for _, m := range metrics { + fieldCount += len(m.FieldList()) + } + + results := make([]telegraf.Metric, 0, fieldCount) + + for _, m := range metrics { + base := copyWithoutFields(m) + for _, field := range m.FieldList() { + newMetric := base.Copy() + newMetric.AddField(p.ValueKey, field.Value) + newMetric.AddTag(p.TagKey, field.Key) + results = append(results, newMetric) + } + m.Accept() + } + return results +} + +func init() { + processors.Add("unpivot", func() telegraf.Processor { + return &Unpivot{} + }) +} diff --git a/plugins/processors/unpivot/unpivot_test.go b/plugins/processors/unpivot/unpivot_test.go new file mode 100644 index 000000000..a3a538503 --- /dev/null +++ b/plugins/processors/unpivot/unpivot_test.go @@ -0,0 +1,90 @@ +package unpivot + +import ( + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/testutil" +) + +func TestUnpivot(t *testing.T) { + now := time.Now() + tests := []struct { + name string + unpivot *Unpivot + metrics []telegraf.Metric + expected []telegraf.Metric + }{ + { + name: "simple", + unpivot: &Unpivot{ + TagKey: "name", + ValueKey: "value", + }, + metrics: []telegraf.Metric{ + testutil.MustMetric("cpu", + map[string]string{}, + map[string]interface{}{ + "idle_time": int64(42), + }, + now, + ), + }, + expected: []telegraf.Metric{ + testutil.MustMetric("cpu", + map[string]string{ + "name": "idle_time", + }, + map[string]interface{}{ + "value": int64(42), + }, + now, + ), + }, + }, + { + name: "multi fields", + unpivot: &Unpivot{ + TagKey: "name", + ValueKey: "value", + }, + metrics: []telegraf.Metric{ + testutil.MustMetric("cpu", + map[string]string{}, + map[string]interface{}{ + "idle_time": int64(42), + "idle_user": int64(43), + }, + now, + ), + }, + expected: []telegraf.Metric{ + testutil.MustMetric("cpu", + map[string]string{ + "name": "idle_time", + }, + map[string]interface{}{ + "value": int64(42), + }, + now, + ), + testutil.MustMetric("cpu", + map[string]string{ + "name": "idle_user", + }, + map[string]interface{}{ + "value": int64(43), + }, + now, + ), + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + actual := tt.unpivot.Apply(tt.metrics...) + testutil.RequireMetricsEqual(t, tt.expected, actual, testutil.SortMetrics()) + }) + } +} From 1dcfcdbad31cbbc626c8adccdc085f8387df4fe2 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 14 Jun 2019 15:27:57 -0700 Subject: [PATCH 0931/1815] Update changelog and readme --- CHANGELOG.md | 2 ++ README.md | 2 ++ 2 files changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0fce7bcab..460ffb7e8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,8 @@ #### New Processors - [date](/plugins/processors/date/README.md) - Contributed by @influxdata +- [pivot](/plugins/processors/pivot/README.md) - Contributed by @influxdata +- [unpivot](/plugins/processors/unpivot/README.md) - Contributed by @influxdata #### Features diff --git a/README.md b/README.md index 1bae79ba1..cb01c303f 100644 --- a/README.md +++ b/README.md @@ -329,11 +329,13 @@ For documentation on the latest development code see the [documentation index][d * [enum](./plugins/processors/enum) * [override](./plugins/processors/override) * [parser](./plugins/processors/parser) +* [pivot](./plugins/processors/pivot) * [printer](./plugins/processors/printer) * [regex](./plugins/processors/regex) * [rename](./plugins/processors/rename) * [strings](./plugins/processors/strings) * [topk](./plugins/processors/topk) +* [unpivot](./plugins/processors/unpivot) ## Aggregator Plugins From 31291f5590671cc84a38d7a2eb13791abceafea4 Mon Sep 17 00:00:00 2001 From: dupondje Date: Mon, 17 Jun 2019 21:55:09 +0200 Subject: [PATCH 0932/1815] Add starttime to php-fpm metrics (#5997) --- plugins/inputs/phpfpm/phpfpm.go | 4 +++- plugins/inputs/phpfpm/phpfpm_test.go | 4 ++++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/plugins/inputs/phpfpm/phpfpm.go b/plugins/inputs/phpfpm/phpfpm.go index ed205e6e7..2d2806261 100644 --- a/plugins/inputs/phpfpm/phpfpm.go +++ b/plugins/inputs/phpfpm/phpfpm.go @@ -21,6 +21,7 @@ import ( const ( PF_POOL = "pool" PF_PROCESS_MANAGER = "process manager" + PF_START_SINCE = "start since" PF_ACCEPTED_CONN = "accepted conn" PF_LISTEN_QUEUE = "listen queue" PF_MAX_LISTEN_QUEUE = "max listen queue" @@ -242,7 +243,8 @@ func importMetric(r io.Reader, acc telegraf.Accumulator, addr string) (poolStat, // Start to parse metric for current pool switch fieldName { - case PF_ACCEPTED_CONN, + case PF_START_SINCE, + PF_ACCEPTED_CONN, PF_LISTEN_QUEUE, PF_MAX_LISTEN_QUEUE, PF_LISTEN_QUEUE_LEN, diff --git a/plugins/inputs/phpfpm/phpfpm_test.go b/plugins/inputs/phpfpm/phpfpm_test.go index ba24b0f36..f449b4649 100644 --- a/plugins/inputs/phpfpm/phpfpm_test.go +++ b/plugins/inputs/phpfpm/phpfpm_test.go @@ -44,6 +44,7 @@ func TestPhpFpmGeneratesMetrics_From_Http(t *testing.T) { } fields := map[string]interface{}{ + "start_since": int64(1991), "accepted_conn": int64(3), "listen_queue": int64(1), "max_listen_queue": int64(0), @@ -85,6 +86,7 @@ func TestPhpFpmGeneratesMetrics_From_Fcgi(t *testing.T) { } fields := map[string]interface{}{ + "start_since": int64(1991), "accepted_conn": int64(3), "listen_queue": int64(1), "max_listen_queue": int64(0), @@ -130,6 +132,7 @@ func TestPhpFpmGeneratesMetrics_From_Socket(t *testing.T) { } fields := map[string]interface{}{ + "start_since": int64(1991), "accepted_conn": int64(3), "listen_queue": int64(1), "max_listen_queue": int64(0), @@ -175,6 +178,7 @@ func TestPhpFpmGeneratesMetrics_From_Socket_Custom_Status_Path(t *testing.T) { } fields := map[string]interface{}{ + "start_since": int64(1991), "accepted_conn": int64(3), "listen_queue": int64(1), "max_listen_queue": int64(0), From 22366f2a419e320daf184c455a60ab42c7b1898d Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 17 Jun 2019 12:56:58 -0700 Subject: [PATCH 0933/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 460ffb7e8..678e7597f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,6 +10,7 @@ - [#5842](https://github.com/influxdata/telegraf/pull/5842): Improve performance of wavefront serializer. - [#5863](https://github.com/influxdata/telegraf/pull/5863): Allow regex processor to append tag values. +- [#5997](https://github.com/influxdata/telegraf/pull/5997): Add starttime field to phpfpm input. #### Bugfixes From 1775e1cdc692ae08cfad7668b02807cea3bd4ed3 Mon Sep 17 00:00:00 2001 From: dupondje Date: Mon, 17 Jun 2019 22:31:15 +0200 Subject: [PATCH 0934/1815] Add cluster name tag to elasticsearch indices (#5998) --- plugins/inputs/elasticsearch/elasticsearch.go | 4 +++- plugins/inputs/elasticsearch/elasticsearch_test.go | 4 ++-- plugins/inputs/elasticsearch/testdata_test.go | 3 +++ 3 files changed, 8 insertions(+), 3 deletions(-) diff --git a/plugins/inputs/elasticsearch/elasticsearch.go b/plugins/inputs/elasticsearch/elasticsearch.go index 479bfcfda..13c567b30 100644 --- a/plugins/inputs/elasticsearch/elasticsearch.go +++ b/plugins/inputs/elasticsearch/elasticsearch.go @@ -50,6 +50,7 @@ type clusterHealth struct { RelocatingShards int `json:"relocating_shards"` InitializingShards int `json:"initializing_shards"` UnassignedShards int `json:"unassigned_shards"` + DelayedUnassignedShards int `json:"delayed_unassigned_shards"` NumberOfPendingTasks int `json:"number_of_pending_tasks"` TaskMaxWaitingInQueueMillis int `json:"task_max_waiting_in_queue_millis"` ActiveShardsPercentAsNumber float64 `json:"active_shards_percent_as_number"` @@ -340,6 +341,7 @@ func (e *Elasticsearch) gatherClusterHealth(url string, acc telegraf.Accumulator "relocating_shards": healthStats.RelocatingShards, "initializing_shards": healthStats.InitializingShards, "unassigned_shards": healthStats.UnassignedShards, + "delayed_unassigned_shards": healthStats.DelayedUnassignedShards, "number_of_pending_tasks": healthStats.NumberOfPendingTasks, "task_max_waiting_in_queue_millis": healthStats.TaskMaxWaitingInQueueMillis, "active_shards_percent_as_number": healthStats.ActiveShardsPercentAsNumber, @@ -366,7 +368,7 @@ func (e *Elasticsearch) gatherClusterHealth(url string, acc telegraf.Accumulator acc.AddFields( "elasticsearch_indices", indexFields, - map[string]string{"index": name}, + map[string]string{"index": name, "name": healthStats.ClusterName}, measurementTime, ) } diff --git a/plugins/inputs/elasticsearch/elasticsearch_test.go b/plugins/inputs/elasticsearch/elasticsearch_test.go index 1616bfeb2..ec6951fbd 100644 --- a/plugins/inputs/elasticsearch/elasticsearch_test.go +++ b/plugins/inputs/elasticsearch/elasticsearch_test.go @@ -190,11 +190,11 @@ func TestGatherClusterHealthAlsoIndicesHealth(t *testing.T) { acc.AssertContainsTaggedFields(t, "elasticsearch_indices", v1IndexExpected, - map[string]string{"index": "v1"}) + map[string]string{"index": "v1", "name": "elasticsearch_telegraf"}) acc.AssertContainsTaggedFields(t, "elasticsearch_indices", v2IndexExpected, - map[string]string{"index": "v2"}) + map[string]string{"index": "v2", "name": "elasticsearch_telegraf"}) } func TestGatherClusterStatsMaster(t *testing.T) { diff --git a/plugins/inputs/elasticsearch/testdata_test.go b/plugins/inputs/elasticsearch/testdata_test.go index 622abeaf8..df50d0a2b 100644 --- a/plugins/inputs/elasticsearch/testdata_test.go +++ b/plugins/inputs/elasticsearch/testdata_test.go @@ -12,6 +12,7 @@ const clusterHealthResponse = ` "relocating_shards": 0, "initializing_shards": 0, "unassigned_shards": 0, + "delayed_unassigned_shards": 0, "number_of_pending_tasks": 0, "task_max_waiting_in_queue_millis": 0, "active_shards_percent_as_number": 100.0 @@ -30,6 +31,7 @@ const clusterHealthResponseWithIndices = ` "relocating_shards": 0, "initializing_shards": 0, "unassigned_shards": 0, + "delayed_unassigned_shards": 0, "number_of_pending_tasks": 0, "task_max_waiting_in_queue_millis": 0, "active_shards_percent_as_number": 100.0, @@ -69,6 +71,7 @@ var clusterHealthExpected = map[string]interface{}{ "relocating_shards": 0, "initializing_shards": 0, "unassigned_shards": 0, + "delayed_unassigned_shards": 0, "number_of_pending_tasks": 0, "task_max_waiting_in_queue_millis": 0, "active_shards_percent_as_number": 100.0, From 1da81799cb1e87a0bc163ab1fa66a6124f62637c Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 17 Jun 2019 13:31:53 -0700 Subject: [PATCH 0935/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 678e7597f..348b382da 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,7 @@ - [#5842](https://github.com/influxdata/telegraf/pull/5842): Improve performance of wavefront serializer. - [#5863](https://github.com/influxdata/telegraf/pull/5863): Allow regex processor to append tag values. - [#5997](https://github.com/influxdata/telegraf/pull/5997): Add starttime field to phpfpm input. +- [#5998](https://github.com/influxdata/telegraf/pull/5998): Add cluster name tag to elasticsearch indices. #### Bugfixes From fd9abd21662a2e019e1f1b08cb8bf276ac2ddad2 Mon Sep 17 00:00:00 2001 From: Boris Yonchev Date: Mon, 17 Jun 2019 23:34:54 +0300 Subject: [PATCH 0936/1815] Add formdata parser (#5749) --- internal/config/config.go | 13 ++ plugins/inputs/http_listener_v2/README.md | 15 +- .../http_listener_v2/http_listener_v2.go | 102 ++++++++--- .../http_listener_v2/http_listener_v2_test.go | 47 +++++ plugins/parsers/formdata/README.md | 76 ++++++++ plugins/parsers/formdata/parser.go | 130 +++++++++++++ plugins/parsers/formdata/parser_test.go | 172 ++++++++++++++++++ plugins/parsers/registry.go | 22 +++ 8 files changed, 551 insertions(+), 26 deletions(-) create mode 100644 plugins/parsers/formdata/README.md create mode 100644 plugins/parsers/formdata/parser.go create mode 100644 plugins/parsers/formdata/parser_test.go diff --git a/internal/config/config.go b/internal/config/config.go index ab50c7df8..0e4c6f23f 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -1726,6 +1726,18 @@ func getParserConfig(name string, tbl *ast.Table) (*parsers.Config, error) { } } + if node, ok := tbl.Fields["form_data_tag_keys"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if ary, ok := kv.Value.(*ast.Array); ok { + for _, elem := range ary.Value { + if str, ok := elem.(*ast.String); ok { + c.FormDataTagKeys = append(c.FormDataTagKeys, str.Value) + } + } + } + } + } + c.MetricName = name delete(tbl.Fields, "data_format") @@ -1767,6 +1779,7 @@ func getParserConfig(name string, tbl *ast.Table) (*parsers.Config, error) { delete(tbl.Fields, "csv_timestamp_column") delete(tbl.Fields, "csv_timestamp_format") delete(tbl.Fields, "csv_trim_space") + delete(tbl.Fields, "form_data_tag_keys") return c, nil } diff --git a/plugins/inputs/http_listener_v2/README.md b/plugins/inputs/http_listener_v2/README.md index f5a853189..4829e044d 100644 --- a/plugins/inputs/http_listener_v2/README.md +++ b/plugins/inputs/http_listener_v2/README.md @@ -1,7 +1,7 @@ # HTTP Listener v2 Input Plugin HTTP Listener v2 is a service input plugin that listens for metrics sent via -HTTP. Metrics may be sent in any supported [data format][data_format]. +HTTP. Metrics may be sent in any supported [data format][data_format]. **Note:** The plugin previously known as `http_listener` has been renamed `influxdb_listener`. If you would like Telegraf to act as a proxy/relay for @@ -49,11 +49,17 @@ This is a sample configuration for the plugin. ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "influx" + + ## Part of the request to consume. + ## Available options are "body" and "query". + ## Note that the data source and data format are independent properties. + ## To consume standard query params and POST forms - use "formdata" as a data_format. + # data_source = "body" ``` ### Metrics: -Metrics are created from the request body and are dependant on the value of `data_format`. +Metrics are collected from the part of the request specified by the `data_source` param and are parsed depending on the value of `data_format`. ### Troubleshooting: @@ -67,5 +73,10 @@ curl -i -XPOST 'http://localhost:8080/telegraf' --data-binary 'cpu_load_short,ho curl -i -XPOST 'http://localhost:8080/telegraf' --data-binary '{"value1": 42, "value2": 42}' ``` +**Send query params** +``` +curl -i -XGET 'http://localhost:8080/telegraf?host=server01&value=0.42' +``` + [data_format]: /docs/DATA_FORMATS_INPUT.md [influxdb_listener]: /plugins/inputs/influxdb_listener/README.md diff --git a/plugins/inputs/http_listener_v2/http_listener_v2.go b/plugins/inputs/http_listener_v2/http_listener_v2.go index 3fd8989f9..44581f11b 100644 --- a/plugins/inputs/http_listener_v2/http_listener_v2.go +++ b/plugins/inputs/http_listener_v2/http_listener_v2.go @@ -8,6 +8,8 @@ import ( "log" "net" "net/http" + "net/url" + "strings" "sync" "time" @@ -23,16 +25,25 @@ import ( // 500 MB const defaultMaxBodySize = 500 * 1024 * 1024 +const ( + body = "body" + query = "query" +) + +// TimeFunc provides a timestamp for the metrics type TimeFunc func() time.Time +// HTTPListenerV2 is an input plugin that collects external metrics sent via HTTP type HTTPListenerV2 struct { ServiceAddress string Path string Methods []string - ReadTimeout internal.Duration - WriteTimeout internal.Duration - MaxBodySize internal.Size - Port int + DataSource string + + ReadTimeout internal.Duration + WriteTimeout internal.Duration + MaxBodySize internal.Size + Port int tlsint.ServerConfig @@ -86,6 +97,12 @@ const sampleConfig = ` ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "influx" + + ## Part of the request to consume. + ## Available options are "body" and "query". + ## Note that the data source and data format are independent properties. + ## To consume standard query params and POST forms - use "formdata" as a data_format. + # data_source = "body" ` func (h *HTTPListenerV2) SampleConfig() string { @@ -164,11 +181,13 @@ func (h *HTTPListenerV2) Stop() { } func (h *HTTPListenerV2) ServeHTTP(res http.ResponseWriter, req *http.Request) { - if req.URL.Path == h.Path { - h.AuthenticateIfSet(h.serveWrite, res, req) - } else { - h.AuthenticateIfSet(http.NotFound, res, req) + handler := h.serveWrite + + if req.URL.Path != h.Path { + handler = http.NotFound } + + h.authenticateIfSet(handler, res, req) } func (h *HTTPListenerV2) serveWrite(res http.ResponseWriter, req *http.Request) { @@ -191,23 +210,17 @@ func (h *HTTPListenerV2) serveWrite(res http.ResponseWriter, req *http.Request) return } - // Handle gzip request bodies - body := req.Body - if req.Header.Get("Content-Encoding") == "gzip" { - var err error - body, err = gzip.NewReader(req.Body) - if err != nil { - log.Println("D! " + err.Error()) - badRequest(res) - return - } - defer body.Close() + var bytes []byte + var ok bool + + switch strings.ToLower(h.DataSource) { + case query: + bytes, ok = h.collectQuery(res, req) + default: + bytes, ok = h.collectBody(res, req) } - body = http.MaxBytesReader(res, body, h.MaxBodySize.Size) - bytes, err := ioutil.ReadAll(body) - if err != nil { - tooLarge(res) + if !ok { return } @@ -217,12 +230,52 @@ func (h *HTTPListenerV2) serveWrite(res http.ResponseWriter, req *http.Request) badRequest(res) return } + for _, m := range metrics { h.acc.AddFields(m.Name(), m.Fields(), m.Tags(), m.Time()) } + res.WriteHeader(http.StatusNoContent) } +func (h *HTTPListenerV2) collectBody(res http.ResponseWriter, req *http.Request) ([]byte, bool) { + body := req.Body + + // Handle gzip request bodies + if req.Header.Get("Content-Encoding") == "gzip" { + var err error + body, err = gzip.NewReader(req.Body) + if err != nil { + log.Println("D! " + err.Error()) + badRequest(res) + return nil, false + } + defer body.Close() + } + + body = http.MaxBytesReader(res, body, h.MaxBodySize.Size) + bytes, err := ioutil.ReadAll(body) + if err != nil { + tooLarge(res) + return nil, false + } + + return bytes, true +} + +func (h *HTTPListenerV2) collectQuery(res http.ResponseWriter, req *http.Request) ([]byte, bool) { + rawQuery := req.URL.RawQuery + + query, err := url.QueryUnescape(rawQuery) + if err != nil { + log.Println("D! " + err.Error()) + badRequest(res) + return nil, false + } + + return []byte(query), true +} + func tooLarge(res http.ResponseWriter) { res.Header().Set("Content-Type", "application/json") res.WriteHeader(http.StatusRequestEntityTooLarge) @@ -246,7 +299,7 @@ func badRequest(res http.ResponseWriter) { res.Write([]byte(`{"error":"http: bad request"}`)) } -func (h *HTTPListenerV2) AuthenticateIfSet(handler http.HandlerFunc, res http.ResponseWriter, req *http.Request) { +func (h *HTTPListenerV2) authenticateIfSet(handler http.HandlerFunc, res http.ResponseWriter, req *http.Request) { if h.BasicUsername != "" && h.BasicPassword != "" { reqUsername, reqPassword, ok := req.BasicAuth() if !ok || @@ -269,6 +322,7 @@ func init() { TimeFunc: time.Now, Path: "/telegraf", Methods: []string{"POST", "PUT"}, + DataSource: body, } }) } diff --git a/plugins/inputs/http_listener_v2/http_listener_v2_test.go b/plugins/inputs/http_listener_v2/http_listener_v2_test.go index ab0c89f81..3b647905d 100644 --- a/plugins/inputs/http_listener_v2/http_listener_v2_test.go +++ b/plugins/inputs/http_listener_v2/http_listener_v2_test.go @@ -53,6 +53,7 @@ func newTestHTTPListenerV2() *HTTPListenerV2 { Parser: parser, TimeFunc: time.Now, MaxBodySize: internal.Size{Size: 70000}, + DataSource: "body", } return listener } @@ -377,5 +378,51 @@ func TestWriteHTTPEmpty(t *testing.T) { require.EqualValues(t, 204, resp.StatusCode) } +func TestWriteHTTPQueryParams(t *testing.T) { + parser, _ := parsers.NewFormDataParser("query_measurement", nil, []string{"tagKey"}) + listener := newTestHTTPListenerV2() + listener.DataSource = "query" + listener.Parser = parser + + acc := &testutil.Accumulator{} + require.NoError(t, listener.Start(acc)) + defer listener.Stop() + + resp, err := http.Post(createURL(listener, "http", "/write", "tagKey=tagValue&fieldKey=42"), "", bytes.NewBuffer([]byte(emptyMsg))) + require.NoError(t, err) + resp.Body.Close() + require.EqualValues(t, 204, resp.StatusCode) + + acc.Wait(1) + acc.AssertContainsTaggedFields(t, "query_measurement", + map[string]interface{}{"fieldKey": float64(42)}, + map[string]string{"tagKey": "tagValue"}, + ) +} + +func TestWriteHTTPFormData(t *testing.T) { + parser, _ := parsers.NewFormDataParser("query_measurement", nil, []string{"tagKey"}) + listener := newTestHTTPListenerV2() + listener.Parser = parser + + acc := &testutil.Accumulator{} + require.NoError(t, listener.Start(acc)) + defer listener.Stop() + + resp, err := http.PostForm(createURL(listener, "http", "/write", ""), url.Values{ + "tagKey": {"tagValue"}, + "fieldKey": {"42"}, + }) + require.NoError(t, err) + resp.Body.Close() + require.EqualValues(t, 204, resp.StatusCode) + + acc.Wait(1) + acc.AssertContainsTaggedFields(t, "query_measurement", + map[string]interface{}{"fieldKey": float64(42)}, + map[string]string{"tagKey": "tagValue"}, + ) +} + const hugeMetric = `super_long_metric,foo=bar clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i ` diff --git a/plugins/parsers/formdata/README.md b/plugins/parsers/formdata/README.md new file mode 100644 index 000000000..4d3c6af31 --- /dev/null +++ b/plugins/parsers/formdata/README.md @@ -0,0 +1,76 @@ +# FormData + +The FormData data format parses a [query string/x-www-form-urlencoded][query_string] data into metric fields. + +Common use case is to pair it with http listener input plugin to parse request body or query params. + +### Configuration + +```toml +[[inputs.http_listener_v2]] + ## Address and port to host HTTP listener on + service_address = ":8080" + + ## Part of the request to consume. + ## Available options are "body" and "query". + ## To consume standard query params or application/x-www-form-urlencoded body, + ## set the data_format option to "formdata". + data_source = "body" + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "formdata" + + ## Array of key names which should be collected as tags. + ## By default, keys with string value are ignored if not marked as tags. + form_data_tag_keys = ["tag1"] +``` + +### Examples + +#### Basic parsing +Config: +```toml +[[inputs.http_listener_v2]] + service_address = ":8080" + data_source = "query" + data_format = "formdata" + name_override = "mymetric" +``` + +Request: +```bash +curl -i -XGET 'http://localhost:8080/telegraf?field=0.42' +``` + +Output: +``` +mymetric field=0.42 +``` + +#### Tags and key filter + +Config: +```toml +[[inputs.http_listener_v2]] + service_address = ":8080" + data_source = "query" + data_format = "formdata" + name_override = "mymetric" + fielddrop = ["tag2", "field2"] + form_data_tag_keys = ["tag1"] +``` + +Request: +```bash +curl -i -XGET 'http://localhost:8080/telegraf?tag1=foo&tag2=bar&field1=42&field2=69' +``` + +Output: +``` +mymetric,tag1=foo field1=42 +``` + +[query_string]: https://en.wikipedia.org/wiki/Query_string diff --git a/plugins/parsers/formdata/parser.go b/plugins/parsers/formdata/parser.go new file mode 100644 index 000000000..0fc4c572b --- /dev/null +++ b/plugins/parsers/formdata/parser.go @@ -0,0 +1,130 @@ +package formdata + +import ( + "bytes" + "fmt" + "net/url" + "strconv" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" +) + +var ( + // ErrNoMetric is returned when no metric is found in input line + ErrNoMetric = fmt.Errorf("no metric in line") +) + +// Parser decodes "application/x-www-form-urlencoded" data into metrics +type Parser struct { + MetricName string + DefaultTags map[string]string + TagKeys []string + AllowedKeys []string +} + +// Parse converts a slice of bytes in "application/x-www-form-urlencoded" format into metrics +func (p Parser) Parse(buf []byte) ([]telegraf.Metric, error) { + buf = bytes.TrimSpace(buf) + if len(buf) == 0 { + return make([]telegraf.Metric, 0), nil + } + + values, err := url.ParseQuery(string(buf)) + + if err != nil { + return nil, err + } + + if len(p.AllowedKeys) > 0 { + values = p.filterAllowedKeys(values) + } + + tags := p.extractTags(values) + fields := p.parseFields(values) + + for key, value := range p.DefaultTags { + tags[key] = value + } + + metric, err := metric.New(p.MetricName, tags, fields, time.Now().UTC()) + + if err != nil { + return nil, err + } + + return []telegraf.Metric{metric}, nil +} + +// ParseLine delegates a single line of text to the Parse function +func (p Parser) ParseLine(line string) (telegraf.Metric, error) { + metrics, err := p.Parse([]byte(line)) + + if err != nil { + return nil, err + } + + if len(metrics) < 1 { + return nil, ErrNoMetric + } + + return metrics[0], nil +} + +// SetDefaultTags sets the default tags for every metric +func (p *Parser) SetDefaultTags(tags map[string]string) { + p.DefaultTags = tags +} + +func (p Parser) filterAllowedKeys(original url.Values) url.Values { + result := make(url.Values) + + for _, key := range p.AllowedKeys { + value, exists := original[key] + + if !exists { + continue + } + + result[key] = value + } + + return result +} + +func (p Parser) extractTags(values url.Values) map[string]string { + tags := make(map[string]string) + for _, key := range p.TagKeys { + value, exists := values[key] + + if !exists || len(key) == 0 { + continue + } + + tags[key] = value[0] + delete(values, key) + } + + return tags +} + +func (p Parser) parseFields(values url.Values) map[string]interface{} { + fields := make(map[string]interface{}) + + for key, value := range values { + if len(key) == 0 || len(value) == 0 { + continue + } + + field, err := strconv.ParseFloat(value[0], 64) + + if err != nil { + continue + } + + fields[key] = field + } + + return fields +} diff --git a/plugins/parsers/formdata/parser_test.go b/plugins/parsers/formdata/parser_test.go new file mode 100644 index 000000000..cd837a057 --- /dev/null +++ b/plugins/parsers/formdata/parser_test.go @@ -0,0 +1,172 @@ +package formdata + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +const ( + validFormData = "tag1=foo&tag2=bar&tag3=baz&field1=42&field2=69" + encodedFormData = "tag1=%24%24%24&field1=1e%2B3" + notEscapedProperlyFormData = "invalid=%Y5" + blankKeyFormData = "=42&field2=69" + emptyFormData = "" +) + +func TestParseValidFormData(t *testing.T) { + parser := Parser{ + MetricName: "formdata_test", + } + + metrics, err := parser.Parse([]byte(validFormData)) + require.NoError(t, err) + require.Len(t, metrics, 1) + require.Equal(t, "formdata_test", metrics[0].Name()) + require.Equal(t, map[string]string{}, metrics[0].Tags()) + require.Equal(t, map[string]interface{}{ + "field1": float64(42), + "field2": float64(69), + }, metrics[0].Fields()) +} + +func TestParseLineValidFormData(t *testing.T) { + parser := Parser{ + MetricName: "formdata_test", + } + + metric, err := parser.ParseLine(validFormData) + require.NoError(t, err) + require.Equal(t, "formdata_test", metric.Name()) + require.Equal(t, map[string]string{}, metric.Tags()) + require.Equal(t, map[string]interface{}{ + "field1": float64(42), + "field2": float64(69), + }, metric.Fields()) +} + +func TestParseValidFormDataWithTags(t *testing.T) { + parser := Parser{ + MetricName: "formdata_test", + TagKeys: []string{"tag1", "tag2"}, + } + + metrics, err := parser.Parse([]byte(validFormData)) + require.NoError(t, err) + require.Len(t, metrics, 1) + require.Equal(t, "formdata_test", metrics[0].Name()) + require.Equal(t, map[string]string{ + "tag1": "foo", + "tag2": "bar", + }, metrics[0].Tags()) + require.Equal(t, map[string]interface{}{ + "field1": float64(42), + "field2": float64(69), + }, metrics[0].Fields()) +} + +func TestParseValidFormDataDefaultTags(t *testing.T) { + parser := Parser{ + MetricName: "formdata_test", + TagKeys: []string{"tag1", "tag2"}, + DefaultTags: map[string]string{"tag4": "default"}, + } + + metrics, err := parser.Parse([]byte(validFormData)) + require.NoError(t, err) + require.Len(t, metrics, 1) + require.Equal(t, "formdata_test", metrics[0].Name()) + require.Equal(t, map[string]string{ + "tag1": "foo", + "tag2": "bar", + "tag4": "default", + }, metrics[0].Tags()) + require.Equal(t, map[string]interface{}{ + "field1": float64(42), + "field2": float64(69), + }, metrics[0].Fields()) +} + +func TestParseValidFormDataDefaultTagsOverride(t *testing.T) { + parser := Parser{ + MetricName: "formdata_test", + TagKeys: []string{"tag1", "tag2"}, + DefaultTags: map[string]string{"tag1": "default"}, + } + + metrics, err := parser.Parse([]byte(validFormData)) + require.NoError(t, err) + require.Len(t, metrics, 1) + require.Equal(t, "formdata_test", metrics[0].Name()) + require.Equal(t, map[string]string{ + "tag1": "default", + "tag2": "bar", + }, metrics[0].Tags()) + require.Equal(t, map[string]interface{}{ + "field1": float64(42), + "field2": float64(69), + }, metrics[0].Fields()) +} + +func TestParseEncodedFormData(t *testing.T) { + parser := Parser{ + MetricName: "formdata_test", + TagKeys: []string{"tag1"}, + } + + metrics, err := parser.Parse([]byte(encodedFormData)) + require.NoError(t, err) + require.Len(t, metrics, 1) + require.Equal(t, "formdata_test", metrics[0].Name()) + require.Equal(t, map[string]string{ + "tag1": "$$$", + }, metrics[0].Tags()) + require.Equal(t, map[string]interface{}{ + "field1": float64(1000), + }, metrics[0].Fields()) +} + +func TestParseInvalidFormDataError(t *testing.T) { + parser := Parser{ + MetricName: "formdata_test", + } + + metrics, err := parser.Parse([]byte(notEscapedProperlyFormData)) + require.Error(t, err) + require.Len(t, metrics, 0) +} + +func TestParseInvalidFormDataEmptyKey(t *testing.T) { + parser := Parser{ + MetricName: "formdata_test", + } + + // Empty key for field + metrics, err := parser.Parse([]byte(blankKeyFormData)) + require.NoError(t, err) + require.Len(t, metrics, 1) + require.Equal(t, map[string]string{}, metrics[0].Tags()) + require.Equal(t, map[string]interface{}{ + "field2": float64(69), + }, metrics[0].Fields()) + + // Empty key for tag + parser.TagKeys = []string{""} + metrics, err = parser.Parse([]byte(blankKeyFormData)) + require.NoError(t, err) + require.Len(t, metrics, 1) + require.Equal(t, map[string]string{}, metrics[0].Tags()) + require.Equal(t, map[string]interface{}{ + "field2": float64(69), + }, metrics[0].Fields()) +} + +func TestParseInvalidFormDataEmptyString(t *testing.T) { + parser := Parser{ + MetricName: "formdata_test", + } + + metrics, err := parser.Parse([]byte(emptyFormData)) + require.NoError(t, err) + require.Len(t, metrics, 0) +} diff --git a/plugins/parsers/registry.go b/plugins/parsers/registry.go index e6e15469f..3511a99d7 100644 --- a/plugins/parsers/registry.go +++ b/plugins/parsers/registry.go @@ -8,6 +8,7 @@ import ( "github.com/influxdata/telegraf/plugins/parsers/collectd" "github.com/influxdata/telegraf/plugins/parsers/csv" "github.com/influxdata/telegraf/plugins/parsers/dropwizard" + "github.com/influxdata/telegraf/plugins/parsers/formdata" "github.com/influxdata/telegraf/plugins/parsers/graphite" "github.com/influxdata/telegraf/plugins/parsers/grok" "github.com/influxdata/telegraf/plugins/parsers/influx" @@ -141,6 +142,9 @@ type Config struct { CSVTimestampColumn string `toml:"csv_timestamp_column"` CSVTimestampFormat string `toml:"csv_timestamp_format"` CSVTrimSpace bool `toml:"csv_trim_space"` + + // FormData configuration + FormDataTagKeys []string `toml:"form_data_tag_keys"` } // NewParser returns a Parser interface based on the given config. @@ -209,6 +213,12 @@ func NewParser(config *Config) (Parser, error) { config.DefaultTags) case "logfmt": parser, err = NewLogFmtParser(config.MetricName, config.DefaultTags) + case "formdata": + parser, err = NewFormDataParser( + config.MetricName, + config.DefaultTags, + config.FormDataTagKeys, + ) default: err = fmt.Errorf("Invalid data format: %s", config.DataFormat) } @@ -400,3 +410,15 @@ func NewLogFmtParser(metricName string, defaultTags map[string]string) (Parser, func NewWavefrontParser(defaultTags map[string]string) (Parser, error) { return wavefront.NewWavefrontParser(defaultTags), nil } + +func NewFormDataParser( + metricName string, + defaultTags map[string]string, + tagKeys []string, +) (Parser, error) { + return &formdata.Parser{ + MetricName: metricName, + DefaultTags: defaultTags, + TagKeys: tagKeys, + }, nil +} From 9b338410cb2451a8f08f51cd4e2e23ff96d714a8 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 17 Jun 2019 14:44:25 -0700 Subject: [PATCH 0937/1815] Rename formdata parser to form_urlencoded --- internal/config/config.go | 6 +- plugins/inputs/http_listener_v2/README.md | 10 +-- .../http_listener_v2/http_listener_v2.go | 45 +++++------ plugins/parsers/form_urlencoded/README.md | 57 ++++++++++++++ .../{formdata => form_urlencoded}/parser.go | 7 +- .../parser_test.go | 32 ++++---- plugins/parsers/formdata/README.md | 76 ------------------- plugins/parsers/registry.go | 14 ++-- 8 files changed, 107 insertions(+), 140 deletions(-) create mode 100644 plugins/parsers/form_urlencoded/README.md rename plugins/parsers/{formdata => form_urlencoded}/parser.go (99%) rename plugins/parsers/{formdata => form_urlencoded}/parser_test.go (84%) delete mode 100644 plugins/parsers/formdata/README.md diff --git a/internal/config/config.go b/internal/config/config.go index 0e4c6f23f..a5315b9b6 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -1726,12 +1726,12 @@ func getParserConfig(name string, tbl *ast.Table) (*parsers.Config, error) { } } - if node, ok := tbl.Fields["form_data_tag_keys"]; ok { + if node, ok := tbl.Fields["form_urlencoded_tag_keys"]; ok { if kv, ok := node.(*ast.KeyValue); ok { if ary, ok := kv.Value.(*ast.Array); ok { for _, elem := range ary.Value { if str, ok := elem.(*ast.String); ok { - c.FormDataTagKeys = append(c.FormDataTagKeys, str.Value) + c.FormUrlencodedTagKeys = append(c.FormUrlencodedTagKeys, str.Value) } } } @@ -1779,7 +1779,7 @@ func getParserConfig(name string, tbl *ast.Table) (*parsers.Config, error) { delete(tbl.Fields, "csv_timestamp_column") delete(tbl.Fields, "csv_timestamp_format") delete(tbl.Fields, "csv_trim_space") - delete(tbl.Fields, "form_data_tag_keys") + delete(tbl.Fields, "form_urlencoded_tag_keys") return c, nil } diff --git a/plugins/inputs/http_listener_v2/README.md b/plugins/inputs/http_listener_v2/README.md index 4829e044d..b40e3554f 100644 --- a/plugins/inputs/http_listener_v2/README.md +++ b/plugins/inputs/http_listener_v2/README.md @@ -31,6 +31,10 @@ This is a sample configuration for the plugin. ## 0 means to use the default of 524,288,000 bytes (500 mebibytes) # max_body_size = "500MB" + ## Part of the request to consume. Available options are "body" and + ## "query". + # data_source = "body" + ## Set one or more allowed client CA certificate file names to ## enable mutually authenticated TLS connections # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] @@ -49,12 +53,6 @@ This is a sample configuration for the plugin. ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "influx" - - ## Part of the request to consume. - ## Available options are "body" and "query". - ## Note that the data source and data format are independent properties. - ## To consume standard query params and POST forms - use "formdata" as a data_format. - # data_source = "body" ``` ### Metrics: diff --git a/plugins/inputs/http_listener_v2/http_listener_v2.go b/plugins/inputs/http_listener_v2/http_listener_v2.go index 44581f11b..5427b384d 100644 --- a/plugins/inputs/http_listener_v2/http_listener_v2.go +++ b/plugins/inputs/http_listener_v2/http_listener_v2.go @@ -35,21 +35,18 @@ type TimeFunc func() time.Time // HTTPListenerV2 is an input plugin that collects external metrics sent via HTTP type HTTPListenerV2 struct { - ServiceAddress string - Path string - Methods []string - DataSource string - - ReadTimeout internal.Duration - WriteTimeout internal.Duration - MaxBodySize internal.Size - Port int - + ServiceAddress string `toml:"service_address"` + Path string `toml:"path"` + Methods []string `toml:"methods"` + DataSource string `toml:"data_source"` + ReadTimeout internal.Duration `toml:"read_timeout"` + WriteTimeout internal.Duration `toml:"write_timeout"` + MaxBodySize internal.Size `toml:"max_body_size"` + Port int `toml:"port"` + BasicUsername string `toml:"basic_username"` + BasicPassword string `toml:"basic_password"` tlsint.ServerConfig - BasicUsername string - BasicPassword string - TimeFunc wg sync.WaitGroup @@ -79,7 +76,11 @@ const sampleConfig = ` ## 0 means to use the default of 524,288,00 bytes (500 mebibytes) # max_body_size = "500MB" - ## Set one or more allowed client CA certificate file names to + ## Part of the request to consume. Available options are "body" and + ## "query". + # data_source = "body" + + ## Set one or more allowed client CA certificate file names to ## enable mutually authenticated TLS connections # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] @@ -97,12 +98,6 @@ const sampleConfig = ` ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "influx" - - ## Part of the request to consume. - ## Available options are "body" and "query". - ## Note that the data source and data format are independent properties. - ## To consume standard query params and POST forms - use "formdata" as a data_format. - # data_source = "body" ` func (h *HTTPListenerV2) SampleConfig() string { @@ -167,7 +162,7 @@ func (h *HTTPListenerV2) Start(acc telegraf.Accumulator) error { server.Serve(h.listener) }() - log.Printf("I! Started HTTP listener V2 service on %s\n", h.ServiceAddress) + log.Printf("I! [inputs.http_listener_v2] Listening on %s", listener.Addr().String()) return nil } @@ -176,8 +171,6 @@ func (h *HTTPListenerV2) Start(acc telegraf.Accumulator) error { func (h *HTTPListenerV2) Stop() { h.listener.Close() h.wg.Wait() - - log.Println("I! Stopped HTTP listener V2 service on ", h.ServiceAddress) } func (h *HTTPListenerV2) ServeHTTP(res http.ResponseWriter, req *http.Request) { @@ -226,13 +219,13 @@ func (h *HTTPListenerV2) serveWrite(res http.ResponseWriter, req *http.Request) metrics, err := h.Parse(bytes) if err != nil { - log.Println("D! " + err.Error()) + log.Printf("D! [inputs.http_listener_v2] Parse error: %v", err) badRequest(res) return } for _, m := range metrics { - h.acc.AddFields(m.Name(), m.Fields(), m.Tags(), m.Time()) + h.acc.AddMetric(m) } res.WriteHeader(http.StatusNoContent) @@ -268,7 +261,7 @@ func (h *HTTPListenerV2) collectQuery(res http.ResponseWriter, req *http.Request query, err := url.QueryUnescape(rawQuery) if err != nil { - log.Println("D! " + err.Error()) + log.Printf("D! [inputs.http_listener_v2] Error parsing query: %v", err) badRequest(res) return nil, false } diff --git a/plugins/parsers/form_urlencoded/README.md b/plugins/parsers/form_urlencoded/README.md new file mode 100644 index 000000000..0a07b7b99 --- /dev/null +++ b/plugins/parsers/form_urlencoded/README.md @@ -0,0 +1,57 @@ +# Form Urlencoded + + +The `form-urlencoded` data format parses `application/x-www-form-urlencoded` +data, such as commonly used in the [query string][]. + +A common use case is to pair it with [http_listener_v2][] input plugin to parse +request body or query params. + +### Configuration + +```toml +[[inputs.http_listener_v2]] + ## Address and port to host HTTP listener on + service_address = ":8080" + + ## Part of the request to consume. Available options are "body" and + ## "query". + data_source = "body" + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "form_urlencoded" + + ## Array of key names which should be collected as tags. + ## By default, keys with string value are ignored if not marked as tags. + form_urlencoded_tag_keys = ["tag1"] +``` + +### Examples + +#### Basic parsing + +Config: +```toml +[[inputs.http_listener_v2]] + name_override = "mymetric" + service_address = ":8080" + data_source = "query" + data_format = "form_urlencoded" + form_urlencoded_tag_keys = ["tag1"] +``` + +Request: +```bash +curl -i -XGET 'http://localhost:8080/telegraf?tag1=foo&field1=0.42&field2=42' +``` + +Output: +``` +mymetric,tag1=foo field1=0.42,field2=42 +``` + +[query_string]: https://en.wikipedia.org/wiki/Query_string +[http_listener_v2]: /plugins/inputs/http_listener_v2 diff --git a/plugins/parsers/formdata/parser.go b/plugins/parsers/form_urlencoded/parser.go similarity index 99% rename from plugins/parsers/formdata/parser.go rename to plugins/parsers/form_urlencoded/parser.go index 0fc4c572b..f38d87a80 100644 --- a/plugins/parsers/formdata/parser.go +++ b/plugins/parsers/form_urlencoded/parser.go @@ -1,4 +1,4 @@ -package formdata +package form_urlencoded import ( "bytes" @@ -32,7 +32,6 @@ func (p Parser) Parse(buf []byte) ([]telegraf.Metric, error) { } values, err := url.ParseQuery(string(buf)) - if err != nil { return nil, err } @@ -49,7 +48,6 @@ func (p Parser) Parse(buf []byte) ([]telegraf.Metric, error) { } metric, err := metric.New(p.MetricName, tags, fields, time.Now().UTC()) - if err != nil { return nil, err } @@ -60,7 +58,6 @@ func (p Parser) Parse(buf []byte) ([]telegraf.Metric, error) { // ParseLine delegates a single line of text to the Parse function func (p Parser) ParseLine(line string) (telegraf.Metric, error) { metrics, err := p.Parse([]byte(line)) - if err != nil { return nil, err } @@ -82,7 +79,6 @@ func (p Parser) filterAllowedKeys(original url.Values) url.Values { for _, key := range p.AllowedKeys { value, exists := original[key] - if !exists { continue } @@ -118,7 +114,6 @@ func (p Parser) parseFields(values url.Values) map[string]interface{} { } field, err := strconv.ParseFloat(value[0], 64) - if err != nil { continue } diff --git a/plugins/parsers/formdata/parser_test.go b/plugins/parsers/form_urlencoded/parser_test.go similarity index 84% rename from plugins/parsers/formdata/parser_test.go rename to plugins/parsers/form_urlencoded/parser_test.go index cd837a057..931d5a4ca 100644 --- a/plugins/parsers/formdata/parser_test.go +++ b/plugins/parsers/form_urlencoded/parser_test.go @@ -1,4 +1,4 @@ -package formdata +package form_urlencoded import ( "testing" @@ -16,13 +16,13 @@ const ( func TestParseValidFormData(t *testing.T) { parser := Parser{ - MetricName: "formdata_test", + MetricName: "form_urlencoded_test", } metrics, err := parser.Parse([]byte(validFormData)) require.NoError(t, err) require.Len(t, metrics, 1) - require.Equal(t, "formdata_test", metrics[0].Name()) + require.Equal(t, "form_urlencoded_test", metrics[0].Name()) require.Equal(t, map[string]string{}, metrics[0].Tags()) require.Equal(t, map[string]interface{}{ "field1": float64(42), @@ -32,12 +32,12 @@ func TestParseValidFormData(t *testing.T) { func TestParseLineValidFormData(t *testing.T) { parser := Parser{ - MetricName: "formdata_test", + MetricName: "form_urlencoded_test", } metric, err := parser.ParseLine(validFormData) require.NoError(t, err) - require.Equal(t, "formdata_test", metric.Name()) + require.Equal(t, "form_urlencoded_test", metric.Name()) require.Equal(t, map[string]string{}, metric.Tags()) require.Equal(t, map[string]interface{}{ "field1": float64(42), @@ -47,14 +47,14 @@ func TestParseLineValidFormData(t *testing.T) { func TestParseValidFormDataWithTags(t *testing.T) { parser := Parser{ - MetricName: "formdata_test", + MetricName: "form_urlencoded_test", TagKeys: []string{"tag1", "tag2"}, } metrics, err := parser.Parse([]byte(validFormData)) require.NoError(t, err) require.Len(t, metrics, 1) - require.Equal(t, "formdata_test", metrics[0].Name()) + require.Equal(t, "form_urlencoded_test", metrics[0].Name()) require.Equal(t, map[string]string{ "tag1": "foo", "tag2": "bar", @@ -67,7 +67,7 @@ func TestParseValidFormDataWithTags(t *testing.T) { func TestParseValidFormDataDefaultTags(t *testing.T) { parser := Parser{ - MetricName: "formdata_test", + MetricName: "form_urlencoded_test", TagKeys: []string{"tag1", "tag2"}, DefaultTags: map[string]string{"tag4": "default"}, } @@ -75,7 +75,7 @@ func TestParseValidFormDataDefaultTags(t *testing.T) { metrics, err := parser.Parse([]byte(validFormData)) require.NoError(t, err) require.Len(t, metrics, 1) - require.Equal(t, "formdata_test", metrics[0].Name()) + require.Equal(t, "form_urlencoded_test", metrics[0].Name()) require.Equal(t, map[string]string{ "tag1": "foo", "tag2": "bar", @@ -89,7 +89,7 @@ func TestParseValidFormDataDefaultTags(t *testing.T) { func TestParseValidFormDataDefaultTagsOverride(t *testing.T) { parser := Parser{ - MetricName: "formdata_test", + MetricName: "form_urlencoded_test", TagKeys: []string{"tag1", "tag2"}, DefaultTags: map[string]string{"tag1": "default"}, } @@ -97,7 +97,7 @@ func TestParseValidFormDataDefaultTagsOverride(t *testing.T) { metrics, err := parser.Parse([]byte(validFormData)) require.NoError(t, err) require.Len(t, metrics, 1) - require.Equal(t, "formdata_test", metrics[0].Name()) + require.Equal(t, "form_urlencoded_test", metrics[0].Name()) require.Equal(t, map[string]string{ "tag1": "default", "tag2": "bar", @@ -110,14 +110,14 @@ func TestParseValidFormDataDefaultTagsOverride(t *testing.T) { func TestParseEncodedFormData(t *testing.T) { parser := Parser{ - MetricName: "formdata_test", + MetricName: "form_urlencoded_test", TagKeys: []string{"tag1"}, } metrics, err := parser.Parse([]byte(encodedFormData)) require.NoError(t, err) require.Len(t, metrics, 1) - require.Equal(t, "formdata_test", metrics[0].Name()) + require.Equal(t, "form_urlencoded_test", metrics[0].Name()) require.Equal(t, map[string]string{ "tag1": "$$$", }, metrics[0].Tags()) @@ -128,7 +128,7 @@ func TestParseEncodedFormData(t *testing.T) { func TestParseInvalidFormDataError(t *testing.T) { parser := Parser{ - MetricName: "formdata_test", + MetricName: "form_urlencoded_test", } metrics, err := parser.Parse([]byte(notEscapedProperlyFormData)) @@ -138,7 +138,7 @@ func TestParseInvalidFormDataError(t *testing.T) { func TestParseInvalidFormDataEmptyKey(t *testing.T) { parser := Parser{ - MetricName: "formdata_test", + MetricName: "form_urlencoded_test", } // Empty key for field @@ -163,7 +163,7 @@ func TestParseInvalidFormDataEmptyKey(t *testing.T) { func TestParseInvalidFormDataEmptyString(t *testing.T) { parser := Parser{ - MetricName: "formdata_test", + MetricName: "form_urlencoded_test", } metrics, err := parser.Parse([]byte(emptyFormData)) diff --git a/plugins/parsers/formdata/README.md b/plugins/parsers/formdata/README.md deleted file mode 100644 index 4d3c6af31..000000000 --- a/plugins/parsers/formdata/README.md +++ /dev/null @@ -1,76 +0,0 @@ -# FormData - -The FormData data format parses a [query string/x-www-form-urlencoded][query_string] data into metric fields. - -Common use case is to pair it with http listener input plugin to parse request body or query params. - -### Configuration - -```toml -[[inputs.http_listener_v2]] - ## Address and port to host HTTP listener on - service_address = ":8080" - - ## Part of the request to consume. - ## Available options are "body" and "query". - ## To consume standard query params or application/x-www-form-urlencoded body, - ## set the data_format option to "formdata". - data_source = "body" - - ## Data format to consume. - ## Each data format has its own unique set of configuration options, read - ## more about them here: - ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md - data_format = "formdata" - - ## Array of key names which should be collected as tags. - ## By default, keys with string value are ignored if not marked as tags. - form_data_tag_keys = ["tag1"] -``` - -### Examples - -#### Basic parsing -Config: -```toml -[[inputs.http_listener_v2]] - service_address = ":8080" - data_source = "query" - data_format = "formdata" - name_override = "mymetric" -``` - -Request: -```bash -curl -i -XGET 'http://localhost:8080/telegraf?field=0.42' -``` - -Output: -``` -mymetric field=0.42 -``` - -#### Tags and key filter - -Config: -```toml -[[inputs.http_listener_v2]] - service_address = ":8080" - data_source = "query" - data_format = "formdata" - name_override = "mymetric" - fielddrop = ["tag2", "field2"] - form_data_tag_keys = ["tag1"] -``` - -Request: -```bash -curl -i -XGET 'http://localhost:8080/telegraf?tag1=foo&tag2=bar&field1=42&field2=69' -``` - -Output: -``` -mymetric,tag1=foo field1=42 -``` - -[query_string]: https://en.wikipedia.org/wiki/Query_string diff --git a/plugins/parsers/registry.go b/plugins/parsers/registry.go index 3511a99d7..2e8d20819 100644 --- a/plugins/parsers/registry.go +++ b/plugins/parsers/registry.go @@ -8,7 +8,7 @@ import ( "github.com/influxdata/telegraf/plugins/parsers/collectd" "github.com/influxdata/telegraf/plugins/parsers/csv" "github.com/influxdata/telegraf/plugins/parsers/dropwizard" - "github.com/influxdata/telegraf/plugins/parsers/formdata" + "github.com/influxdata/telegraf/plugins/parsers/form_urlencoded" "github.com/influxdata/telegraf/plugins/parsers/graphite" "github.com/influxdata/telegraf/plugins/parsers/grok" "github.com/influxdata/telegraf/plugins/parsers/influx" @@ -144,7 +144,7 @@ type Config struct { CSVTrimSpace bool `toml:"csv_trim_space"` // FormData configuration - FormDataTagKeys []string `toml:"form_data_tag_keys"` + FormUrlencodedTagKeys []string `toml:"form_urlencoded_tag_keys"` } // NewParser returns a Parser interface based on the given config. @@ -213,11 +213,11 @@ func NewParser(config *Config) (Parser, error) { config.DefaultTags) case "logfmt": parser, err = NewLogFmtParser(config.MetricName, config.DefaultTags) - case "formdata": - parser, err = NewFormDataParser( + case "form_urlencoded": + parser, err = NewFormUrlencodedParser( config.MetricName, config.DefaultTags, - config.FormDataTagKeys, + config.FormUrlencodedTagKeys, ) default: err = fmt.Errorf("Invalid data format: %s", config.DataFormat) @@ -411,12 +411,12 @@ func NewWavefrontParser(defaultTags map[string]string) (Parser, error) { return wavefront.NewWavefrontParser(defaultTags), nil } -func NewFormDataParser( +func NewFormUrlencodedParser( metricName string, defaultTags map[string]string, tagKeys []string, ) (Parser, error) { - return &formdata.Parser{ + return &form_urlencoded.Parser{ MetricName: metricName, DefaultTags: defaultTags, TagKeys: tagKeys, From 4199114e4cdde29a4775884fcb64bc6406de7c74 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 17 Jun 2019 14:46:49 -0700 Subject: [PATCH 0938/1815] Update changelog and readme --- CHANGELOG.md | 4 ++++ README.md | 1 + 2 files changed, 5 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 348b382da..ccee1e38a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,9 @@ ## v1.12 [unreleased] +#### New Parsers + +- [form_urlencoded](/plugins/processors/form_urlencoded/README.md) - Contributed by @byonchev + #### New Processors - [date](/plugins/processors/date/README.md) - Contributed by @influxdata diff --git a/README.md b/README.md index cb01c303f..9580e2b31 100644 --- a/README.md +++ b/README.md @@ -304,6 +304,7 @@ For documentation on the latest development code see the [documentation index][d - [Collectd](/plugins/parsers/collectd) - [CSV](/plugins/parsers/csv) - [Dropwizard](/plugins/parsers/dropwizard) +- [FormUrlencoded](/plugins/parser/form_urlencoded) - [Graphite](/plugins/parsers/graphite) - [Grok](/plugins/parsers/grok) - [JSON](/plugins/parsers/json) From 9f3c1c6ec79f2d181792f411c34a1348f13566ea Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 17 Jun 2019 16:20:09 -0700 Subject: [PATCH 0939/1815] Fix http_listener_v2 tests --- plugins/inputs/http_listener_v2/http_listener_v2_test.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/plugins/inputs/http_listener_v2/http_listener_v2_test.go b/plugins/inputs/http_listener_v2/http_listener_v2_test.go index 3b647905d..c27b022b2 100644 --- a/plugins/inputs/http_listener_v2/http_listener_v2_test.go +++ b/plugins/inputs/http_listener_v2/http_listener_v2_test.go @@ -16,7 +16,6 @@ import ( "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" ) @@ -379,7 +378,7 @@ func TestWriteHTTPEmpty(t *testing.T) { } func TestWriteHTTPQueryParams(t *testing.T) { - parser, _ := parsers.NewFormDataParser("query_measurement", nil, []string{"tagKey"}) + parser, _ := parsers.NewFormUrlencodedParser("query_measurement", nil, []string{"tagKey"}) listener := newTestHTTPListenerV2() listener.DataSource = "query" listener.Parser = parser @@ -401,7 +400,7 @@ func TestWriteHTTPQueryParams(t *testing.T) { } func TestWriteHTTPFormData(t *testing.T) { - parser, _ := parsers.NewFormDataParser("query_measurement", nil, []string{"tagKey"}) + parser, _ := parsers.NewFormUrlencodedParser("query_measurement", nil, []string{"tagKey"}) listener := newTestHTTPListenerV2() listener.Parser = parser From f8bef1486095138bd3b8b4cfeb8d69e977b4c0ce Mon Sep 17 00:00:00 2001 From: prashanthjbabu Date: Wed, 19 Jun 2019 04:26:55 +0530 Subject: [PATCH 0940/1815] Add docker log plugin (#4773) --- plugins/inputs/all/all.go | 1 + plugins/inputs/docker_log/README.md | 60 +++ plugins/inputs/docker_log/client.go | 63 +++ plugins/inputs/docker_log/docker_logs.go | 472 +++++++++++++++++++++++ 4 files changed, 596 insertions(+) create mode 100644 plugins/inputs/docker_log/README.md create mode 100644 plugins/inputs/docker_log/client.go create mode 100644 plugins/inputs/docker_log/docker_logs.go diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index ef032fe47..487f92b1f 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -32,6 +32,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/dmcache" _ "github.com/influxdata/telegraf/plugins/inputs/dns_query" _ "github.com/influxdata/telegraf/plugins/inputs/docker" + _ "github.com/influxdata/telegraf/plugins/inputs/docker_log" _ "github.com/influxdata/telegraf/plugins/inputs/dovecot" _ "github.com/influxdata/telegraf/plugins/inputs/ecs" _ "github.com/influxdata/telegraf/plugins/inputs/elasticsearch" diff --git a/plugins/inputs/docker_log/README.md b/plugins/inputs/docker_log/README.md new file mode 100644 index 000000000..d04adba33 --- /dev/null +++ b/plugins/inputs/docker_log/README.md @@ -0,0 +1,60 @@ +# Docker Log Input Plugin + +The docker log plugin uses the Docker Engine API to get logs on running +docker containers. + +The docker plugin uses the [Official Docker Client](https://github.com/moby/moby/tree/master/client) +to gather logs from the [Engine API](https://docs.docker.com/engine/api/v1.24/). +Note: This plugin works only for containers with the `local` or `json-file` or `journald` logging driver. +### Configuration: + +```toml +# Read metrics about docker containers +[[inputs.docker_log]] + ## Docker Endpoint + ## To use TCP, set endpoint = "tcp://[ip]:[port]" + ## To use environment variables (ie, docker-machine), set endpoint = "ENV" + endpoint = "unix:///var/run/docker.sock" + + ## Containers to include and exclude. Collect all if empty. Globs accepted. + container_name_include = [] + container_name_exclude = [] + + ## Container states to include and exclude. Globs accepted. + ## When empty only containers in the "running" state will be captured. + # container_state_include = [] + # container_state_exclude = [] + + ## docker labels to include and exclude as tags. Globs accepted. + ## Note that an empty array for both will include all labels as tags + docker_label_include = [] + docker_label_exclude = [] + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false +``` + +#### Environment Configuration + +When using the `"ENV"` endpoint, the connection is configured using the +[cli Docker environment variables](https://godoc.org/github.com/moby/moby/client#NewEnvClient). + +### Metrics: + +- docker_log + - tags: + - container_id + - container_name + - stream + - fields: + - message +### Example Output: + +``` +docker_log,com.docker.compose.config-hash=e19e13df8fd01ba2d7c1628158fca45cc91afbbe9661b2d30550547eb53a861e,com.docker.compose.container-number=1,com.docker.compose.oneoff=False,com.docker.compose.project=distribution,com.docker.compose.service=influxdb,com.docker.compose.version=1.21.2,containerId=fce475bbfa4c8380ff858d5d767f78622ca6de955b525477624c2b7896a5b8e4,containerName=aicon-influxdb,host=prash-laptop,logType=stderr log=" [httpd] 172.23.0.2 - aicon_admin [13/Apr/2019:08:35:53 +0000] \"POST /query?db=&q=SHOW+SUBSCRIPTIONS HTTP/1.1\" 200 232 \"-\" \"KapacitorInfluxDBClient\" 2661bc9c-5dc7-11e9-82f8-0242ac170007 1360\n" 1555144553541000000 +docker_log,com.docker.compose.config-hash=fd91b3b096c7ab346971c681b88fe1357c609dcc6850e4ea5b1287ad28a57e5d,com.docker.compose.container-number=1,com.docker.compose.oneoff=False,com.docker.compose.project=distribution,com.docker.compose.service=kapacitor,com.docker.compose.version=1.21.2,containerId=6514d1cf6d19e7ecfedf894941f0a2ea21b8aac5e6f48e64f19dbc9bb2805a25,containerName=aicon-kapacitor,host=prash-laptop,logType=stderr log=" ts=2019-04-13T08:36:00.019Z lvl=info msg=\"http request\" service=http host=172.23.0.7 username=- start=2019-04-13T08:36:00.013835165Z method=POST uri=/write?consistency=&db=_internal&precision=ns&rp=monitor protocol=HTTP/1.1 status=204 referer=- user-agent=InfluxDBClient request-id=2a3eb481-5dc7-11e9-825b-000000000000 duration=5.814404ms\n" 1555144560024000000 +``` diff --git a/plugins/inputs/docker_log/client.go b/plugins/inputs/docker_log/client.go new file mode 100644 index 000000000..7667c6e4d --- /dev/null +++ b/plugins/inputs/docker_log/client.go @@ -0,0 +1,63 @@ +package docker_log + +import ( + "context" + "crypto/tls" + "io" + "net/http" + + "github.com/docker/docker/api/types" + docker "github.com/docker/docker/client" +) + +/*This file is inherited from telegraf docker input plugin*/ +var ( + version = "1.24" + defaultHeaders = map[string]string{"User-Agent": "engine-api-cli-1.0"} +) + +type Client interface { + ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) + ContainerLogs(ctx context.Context, containerID string, options types.ContainerLogsOptions) (io.ReadCloser, error) + ContainerInspect(ctx context.Context, containerID string) (types.ContainerJSON, error) +} + +func NewEnvClient() (Client, error) { + client, err := docker.NewClientWithOpts(docker.FromEnv) + if err != nil { + return nil, err + } + return &SocketClient{client}, nil +} + +func NewClient(host string, tlsConfig *tls.Config) (Client, error) { + transport := &http.Transport{ + TLSClientConfig: tlsConfig, + } + httpClient := &http.Client{Transport: transport} + client, err := docker.NewClientWithOpts( + docker.WithHTTPHeaders(defaultHeaders), + docker.WithHTTPClient(httpClient), + docker.WithVersion(version), + docker.WithHost(host)) + + if err != nil { + return nil, err + } + return &SocketClient{client}, nil +} + +type SocketClient struct { + client *docker.Client +} + +func (c *SocketClient) ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) { + return c.client.ContainerList(ctx, options) +} + +func (c *SocketClient) ContainerLogs(ctx context.Context, containerID string, options types.ContainerLogsOptions) (io.ReadCloser, error) { + return c.client.ContainerLogs(ctx, containerID, options) +} +func (c *SocketClient) ContainerInspect(ctx context.Context, containerID string) (types.ContainerJSON, error) { + return c.client.ContainerInspect(ctx, containerID) +} diff --git a/plugins/inputs/docker_log/docker_logs.go b/plugins/inputs/docker_log/docker_logs.go new file mode 100644 index 000000000..813b868ee --- /dev/null +++ b/plugins/inputs/docker_log/docker_logs.go @@ -0,0 +1,472 @@ +package docker_log + +import ( + "context" + "crypto/tls" + "encoding/binary" + "errors" + "fmt" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/filter" + "github.com/influxdata/telegraf/internal" + tlsint "github.com/influxdata/telegraf/internal/tls" + "github.com/influxdata/telegraf/plugins/inputs" + "io" + "strings" + "sync" + "time" +) + +type StdType byte + +const ( + Stdin StdType = iota + Stdout + Stderr + Systemerr + + stdWriterPrefixLen = 8 + stdWriterFdIndex = 0 + stdWriterSizeIndex = 4 + + startingBufLen = 32*1024 + stdWriterPrefixLen + 1 + + ERR_PREFIX = "E! [inputs.docker_log]" + defaultEndpoint = "unix:///var/run/docker.sock" + logBytesMax = 1000 +) + +type DockerLogs struct { + Endpoint string + + Timeout internal.Duration + + LabelInclude []string `toml:"docker_label_include"` + LabelExclude []string `toml:"docker_label_exclude"` + + ContainerInclude []string `toml:"container_name_include"` + ContainerExclude []string `toml:"container_name_exclude"` + + ContainerStateInclude []string `toml:"container_state_include"` + ContainerStateExclude []string `toml:"container_state_exclude"` + + tlsint.ClientConfig + + newEnvClient func() (Client, error) + newClient func(string, *tls.Config) (Client, error) + + client Client + filtersCreated bool + labelFilter filter.Filter + containerFilter filter.Filter + stateFilter filter.Filter + opts types.ContainerListOptions + wg sync.WaitGroup + mu sync.Mutex + containerList map[string]io.ReadCloser +} + +var ( + containerStates = []string{"created", "restarting", "running", "removing", "paused", "exited", "dead"} +) + +var sampleConfig = ` + ## Docker Endpoint + ## To use TCP, set endpoint = "tcp://[ip]:[port]" + ## To use environment variables (ie, docker-machine), set endpoint = "ENV" + endpoint = "unix:///var/run/docker.sock" + ## Containers to include and exclude. Globs accepted. + ## Note that an empty array for both will include all containers + container_name_include = [] + container_name_exclude = [] + ## Container states to include and exclude. Globs accepted. + ## When empty only containers in the "running" state will be captured. + # container_state_include = [] + # container_state_exclude = [] + + ## docker labels to include and exclude as tags. Globs accepted. + ## Note that an empty array for both will include all labels as tags + docker_label_include = [] + docker_label_exclude = [] + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false +` + +func (d *DockerLogs) Description() string { + return "Plugin to get docker logs" +} + +func (d *DockerLogs) SampleConfig() string { + return sampleConfig +} + +func (d *DockerLogs) Gather(acc telegraf.Accumulator) error { + /*Check to see if any new containers have been created since last time*/ + return d.containerListUpdate(acc) +} + +/*Following few functions have been inherited from telegraf docker input plugin*/ +func (d *DockerLogs) createContainerFilters() error { + filter, err := filter.NewIncludeExcludeFilter(d.ContainerInclude, d.ContainerExclude) + if err != nil { + return err + } + d.containerFilter = filter + return nil +} + +func (d *DockerLogs) createLabelFilters() error { + filter, err := filter.NewIncludeExcludeFilter(d.LabelInclude, d.LabelExclude) + if err != nil { + return err + } + d.labelFilter = filter + return nil +} + +func (d *DockerLogs) createContainerStateFilters() error { + if len(d.ContainerStateInclude) == 0 && len(d.ContainerStateExclude) == 0 { + d.ContainerStateInclude = []string{"running"} + } + filter, err := filter.NewIncludeExcludeFilter(d.ContainerStateInclude, d.ContainerStateExclude) + if err != nil { + return err + } + d.stateFilter = filter + return nil +} + +func (d *DockerLogs) addToContainerList(containerId string, logReader io.ReadCloser) error { + d.mu.Lock() + defer d.mu.Unlock() + d.containerList[containerId] = logReader + return nil +} + +func (d *DockerLogs) removeFromContainerList(containerId string) error { + d.mu.Lock() + defer d.mu.Unlock() + delete(d.containerList, containerId) + return nil +} + +func (d *DockerLogs) containerInContainerList(containerId string) bool { + if _, ok := d.containerList[containerId]; ok { + return true + } + return false +} + +func (d *DockerLogs) stopAllReaders() error { + for _, container := range d.containerList { + container.Close() + } + return nil +} + +func (d *DockerLogs) containerListUpdate(acc telegraf.Accumulator) error { + ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration) + defer cancel() + if d.client == nil { + return errors.New(fmt.Sprintf("%s : Dock client is null", ERR_PREFIX)) + } + containers, err := d.client.ContainerList(ctx, d.opts) + if err != nil { + return err + } + for _, container := range containers { + if d.containerInContainerList(container.ID) { + continue + } + d.wg.Add(1) + /*Start a new goroutine for every new container that has logs to collect*/ + go func(c types.Container) { + defer d.wg.Done() + logOptions := types.ContainerLogsOptions{ + ShowStdout: true, + ShowStderr: true, + Timestamps: false, + Details: true, + Follow: true, + Tail: "0", + } + logReader, err := d.client.ContainerLogs(context.Background(), c.ID, logOptions) + if err != nil { + acc.AddError(err) + return + } + d.addToContainerList(c.ID, logReader) + err = d.tailContainerLogs(c, logReader, acc) + if err != nil { + acc.AddError(err) + } + d.removeFromContainerList(c.ID) + return + }(container) + } + return nil +} + +func (d *DockerLogs) tailContainerLogs( + container types.Container, logReader io.ReadCloser, + acc telegraf.Accumulator, +) error { + c, err := d.client.ContainerInspect(context.Background(), container.ID) + if err != nil { + return err + } + /* Parse container name */ + var cname string + for _, name := range container.Names { + trimmedName := strings.TrimPrefix(name, "/") + match := d.containerFilter.Match(trimmedName) + if match { + cname = trimmedName + break + } + } + + if cname == "" { + return errors.New(fmt.Sprintf("%s : container name is null", ERR_PREFIX)) + } + imageName, imageVersion := parseImage(container.Image) + tags := map[string]string{ + "container_name": cname, + "container_image": imageName, + "container_version": imageVersion, + } + fields := map[string]interface{}{} + fields["container_id"] = container.ID + // Add labels to tags + for k, label := range container.Labels { + if d.labelFilter.Match(k) { + tags[k] = label + } + } + if c.Config.Tty { + err = pushTtyLogs(acc, tags, fields, logReader) + } else { + _, err = pushLogs(acc, tags, fields, logReader) + } + if err != nil { + return err + } + return nil +} +func pushTtyLogs(acc telegraf.Accumulator, tags map[string]string, fields map[string]interface{}, src io.Reader) (err error) { + tags["logType"] = "unknown" //in tty mode we wont be able to differentiate b/w stdout and stderr hence unknown + data := make([]byte, logBytesMax) + for { + num, err := src.Read(data) + if num > 0 { + fields["message"] = data[1:num] + acc.AddFields("docker_log", fields, tags) + } + if err == io.EOF { + fields["message"] = data[1:num] + acc.AddFields("docker_log", fields, tags) + return nil + } + if err != nil { + return err + } + } +} + +/* Inspired from https://github.com/moby/moby/blob/master/pkg/stdcopy/stdcopy.go */ +func pushLogs(acc telegraf.Accumulator, tags map[string]string, fields map[string]interface{}, src io.Reader) (written int64, err error) { + var ( + buf = make([]byte, startingBufLen) + bufLen = len(buf) + nr int + er error + frameSize int + ) + for { + // Make sure we have at least a full header + for nr < stdWriterPrefixLen { + var nr2 int + nr2, er = src.Read(buf[nr:]) + nr += nr2 + if er == io.EOF { + if nr < stdWriterPrefixLen { + return written, nil + } + break + } + if er != nil { + return 0, er + } + } + stream := StdType(buf[stdWriterFdIndex]) + // Check the first byte to know where to write + var logType string + switch stream { + case Stdin: + logType = "stdin" + break + case Stdout: + logType = "stdout" + break + case Stderr: + logType = "stderr" + break + case Systemerr: + fallthrough + default: + return 0, fmt.Errorf("Unrecognized input header: %d", buf[stdWriterFdIndex]) + } + // Retrieve the size of the frame + frameSize = int(binary.BigEndian.Uint32(buf[stdWriterSizeIndex : stdWriterSizeIndex+4])) + + // Check if the buffer is big enough to read the frame. + // Extend it if necessary. + if frameSize+stdWriterPrefixLen > bufLen { + buf = append(buf, make([]byte, frameSize+stdWriterPrefixLen-bufLen+1)...) + bufLen = len(buf) + } + + // While the amount of bytes read is less than the size of the frame + header, we keep reading + for nr < frameSize+stdWriterPrefixLen { + var nr2 int + nr2, er = src.Read(buf[nr:]) + nr += nr2 + if er == io.EOF { + if nr < frameSize+stdWriterPrefixLen { + return written, nil + } + break + } + if er != nil { + return 0, er + } + } + + // we might have an error from the source mixed up in our multiplexed + // stream. if we do, return it. + if stream == Systemerr { + return written, fmt.Errorf("error from daemon in stream: %s", string(buf[stdWriterPrefixLen:frameSize+stdWriterPrefixLen])) + } + + tags["stream"] = logType + fields["message"] = buf[stdWriterPrefixLen+1 : frameSize+stdWriterPrefixLen] + acc.AddFields("docker_log", fields, tags) + written += int64(frameSize) + + // Move the rest of the buffer to the beginning + copy(buf, buf[frameSize+stdWriterPrefixLen:]) + // Move the index + nr -= frameSize + stdWriterPrefixLen + } +} + +func (d *DockerLogs) Start(acc telegraf.Accumulator) error { + var c Client + var err error + if d.Endpoint == "ENV" { + c, err = d.newEnvClient() + } else { + tlsConfig, err := d.ClientConfig.TLSConfig() + if err != nil { + return err + } + c, err = d.newClient(d.Endpoint, tlsConfig) + } + if err != nil { + return err + } + d.client = c + // Create label filters if not already created + if !d.filtersCreated { + err := d.createLabelFilters() + if err != nil { + return err + } + err = d.createContainerFilters() + if err != nil { + return err + } + err = d.createContainerStateFilters() + if err != nil { + return err + } + d.filtersCreated = true + } + filterArgs := filters.NewArgs() + for _, state := range containerStates { + if d.stateFilter.Match(state) { + filterArgs.Add("status", state) + } + } + + // All container states were excluded + if filterArgs.Len() == 0 { + return nil + } + + d.opts = types.ContainerListOptions{ + Filters: filterArgs, + } + return nil +} + +/* Inspired from https://github.com/influxdata/telegraf/blob/master/plugins/inputs/docker/docker.go */ +func parseImage(image string) (string, string) { + // Adapts some of the logic from the actual Docker library's image parsing + // routines: + // https://github.com/docker/distribution/blob/release/2.7/reference/normalize.go + domain := "" + remainder := "" + + i := strings.IndexRune(image, '/') + + if i == -1 || (!strings.ContainsAny(image[:i], ".:") && image[:i] != "localhost") { + remainder = image + } else { + domain, remainder = image[:i], image[i+1:] + } + + imageName := "" + imageVersion := "unknown" + + i = strings.LastIndex(remainder, ":") + if i > -1 { + imageVersion = remainder[i+1:] + imageName = remainder[:i] + } else { + imageName = remainder + } + + if domain != "" { + imageName = domain + "/" + imageName + } + + return imageName, imageVersion +} + +func (d *DockerLogs) Stop() { + d.mu.Lock() + d.stopAllReaders() + d.mu.Unlock() + d.wg.Wait() +} + +func init() { + inputs.Add("docker_log", func() telegraf.Input { + return &DockerLogs{ + Timeout: internal.Duration{Duration: time.Second * 5}, + Endpoint: defaultEndpoint, + newEnvClient: NewEnvClient, + newClient: NewClient, + filtersCreated: false, + containerList: make(map[string]io.ReadCloser), + } + }) +} From 8d04cb76fd07f3b5998f4d1ea7fd7d29d99f1927 Mon Sep 17 00:00:00 2001 From: George Date: Wed, 19 Jun 2019 21:40:53 +0100 Subject: [PATCH 0941/1815] Add support for interface field in http_response input plugin (#6006) --- plugins/inputs/http_response/README.md | 3 + plugins/inputs/http_response/http_response.go | 38 ++++++++++- .../http_response/http_response_test.go | 63 +++++++++++++++++++ 3 files changed, 103 insertions(+), 1 deletion(-) diff --git a/plugins/inputs/http_response/README.md b/plugins/inputs/http_response/README.md index 54e229b30..38d527fb0 100644 --- a/plugins/inputs/http_response/README.md +++ b/plugins/inputs/http_response/README.md @@ -46,6 +46,9 @@ This input plugin checks HTTP/HTTPS connections. ## HTTP Request Headers (all values must be strings) # [inputs.http_response.headers] # Host = "github.com" + + ## Interface to use when dialing an address + # interface = "eth0" ``` ### Metrics: diff --git a/plugins/inputs/http_response/http_response.go b/plugins/inputs/http_response/http_response.go index 7dbe47b0d..a9d82f13d 100644 --- a/plugins/inputs/http_response/http_response.go +++ b/plugins/inputs/http_response/http_response.go @@ -31,6 +31,7 @@ type HTTPResponse struct { Headers map[string]string FollowRedirects bool ResponseStringMatch string + Interface string tls.ClientConfig compiledStringMatch *regexp.Regexp @@ -82,6 +83,9 @@ var sampleConfig = ` ## HTTP Request Headers (all values must be strings) # [inputs.http_response.headers] # Host = "github.com" + + ## Interface to use when dialing an address + # interface = "eth0" ` // SampleConfig returns the plugin SampleConfig @@ -108,16 +112,27 @@ func getProxyFunc(http_proxy string) func(*http.Request) (*url.URL, error) { } } -// CreateHttpClient creates an http client which will timeout at the specified +// createHttpClient creates an http client which will timeout at the specified // timeout period and can follow redirects if specified func (h *HTTPResponse) createHttpClient() (*http.Client, error) { tlsCfg, err := h.ClientConfig.TLSConfig() if err != nil { return nil, err } + + dialer := &net.Dialer{} + + if h.Interface != "" { + dialer.LocalAddr, err = localAddress(h.Interface) + if err != nil { + return nil, err + } + } + client := &http.Client{ Transport: &http.Transport{ Proxy: getProxyFunc(h.HTTPProxy), + DialContext: dialer.DialContext, DisableKeepAlives: true, TLSClientConfig: tlsCfg, }, @@ -132,6 +147,27 @@ func (h *HTTPResponse) createHttpClient() (*http.Client, error) { return client, nil } +func localAddress(interfaceName string) (net.Addr, error) { + i, err := net.InterfaceByName(interfaceName) + if err != nil { + return nil, err + } + + addrs, err := i.Addrs() + if err != nil { + return nil, err + } + + for _, addr := range addrs { + if naddr, ok := addr.(*net.IPNet); ok { + // leaving port set to zero to let kernel pick + return &net.TCPAddr{IP: naddr.IP}, nil + } + } + + return nil, fmt.Errorf("cannot create local address for interface %q", interfaceName) +} + func setResult(result_string string, fields map[string]interface{}, tags map[string]string) { result_codes := map[string]int{ "success": 0, diff --git a/plugins/inputs/http_response/http_response_test.go b/plugins/inputs/http_response/http_response_test.go index a33805db3..159eaa562 100644 --- a/plugins/inputs/http_response/http_response_test.go +++ b/plugins/inputs/http_response/http_response_test.go @@ -1,8 +1,10 @@ package http_response import ( + "errors" "fmt" "io/ioutil" + "net" "net/http" "net/http/httptest" "testing" @@ -210,6 +212,67 @@ func TestFields(t *testing.T) { checkOutput(t, &acc, expectedFields, expectedTags, absentFields, nil) } +func findInterface() (net.Interface, error) { + potential, _ := net.Interfaces() + + for _, i := range potential { + // we are only interest in loopback interfaces which are up + if (i.Flags&net.FlagUp == 0) || (i.Flags&net.FlagLoopback == 0) { + continue + } + + if addrs, _ := i.Addrs(); len(addrs) > 0 { + // return interface if it has at least one unicast address + return i, nil + } + } + + return net.Interface{}, errors.New("cannot find suitable loopback interface") +} + +func TestInterface(t *testing.T) { + var ( + mux = setUpTestMux() + ts = httptest.NewServer(mux) + ) + + defer ts.Close() + + intf, err := findInterface() + require.NoError(t, err) + + h := &HTTPResponse{ + Address: ts.URL + "/good", + Body: "{ 'test': 'data'}", + Method: "GET", + ResponseTimeout: internal.Duration{Duration: time.Second * 20}, + Headers: map[string]string{ + "Content-Type": "application/json", + }, + FollowRedirects: true, + Interface: intf.Name, + } + + var acc testutil.Accumulator + err = h.Gather(&acc) + require.NoError(t, err) + + expectedFields := map[string]interface{}{ + "http_response_code": http.StatusOK, + "result_type": "success", + "result_code": 0, + "response_time": nil, + } + expectedTags := map[string]interface{}{ + "server": nil, + "method": "GET", + "status_code": "200", + "result": "success", + } + absentFields := []string{"response_string_match"} + checkOutput(t, &acc, expectedFields, expectedTags, absentFields, nil) +} + func TestRedirects(t *testing.T) { mux := setUpTestMux() ts := httptest.NewServer(mux) From 104db7c503cff2c49304737e7f35f7bd8b6e11f4 Mon Sep 17 00:00:00 2001 From: Chris Goller Date: Wed, 19 Jun 2019 16:28:00 -0500 Subject: [PATCH 0942/1815] Skip 404 error reporting in nginx_plus_api input (#6015) --- .../nginx_plus_api/nginx_plus_api_metrics.go | 46 ++++-- .../nginx_plus_api_metrics_test.go | 131 ++++++++++++++---- 2 files changed, 142 insertions(+), 35 deletions(-) diff --git a/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics.go b/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics.go index 68be31e12..1936591c9 100644 --- a/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics.go +++ b/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics.go @@ -2,6 +2,7 @@ package nginx_plus_api import ( "encoding/json" + "errors" "fmt" "io/ioutil" "net" @@ -13,16 +14,33 @@ import ( "github.com/influxdata/telegraf" ) +var ( + // errNotFound signals that the NGINX API routes does not exist. + errNotFound = errors.New("not found") +) + func (n *NginxPlusApi) gatherMetrics(addr *url.URL, acc telegraf.Accumulator) { - acc.AddError(n.gatherProcessesMetrics(addr, acc)) - acc.AddError(n.gatherConnectionsMetrics(addr, acc)) - acc.AddError(n.gatherSslMetrics(addr, acc)) - acc.AddError(n.gatherHttpRequestsMetrics(addr, acc)) - acc.AddError(n.gatherHttpServerZonesMetrics(addr, acc)) - acc.AddError(n.gatherHttpUpstreamsMetrics(addr, acc)) - acc.AddError(n.gatherHttpCachesMetrics(addr, acc)) - acc.AddError(n.gatherStreamServerZonesMetrics(addr, acc)) - acc.AddError(n.gatherStreamUpstreamsMetrics(addr, acc)) + addError(acc, n.gatherProcessesMetrics(addr, acc)) + addError(acc, n.gatherConnectionsMetrics(addr, acc)) + addError(acc, n.gatherSslMetrics(addr, acc)) + addError(acc, n.gatherHttpRequestsMetrics(addr, acc)) + addError(acc, n.gatherHttpServerZonesMetrics(addr, acc)) + addError(acc, n.gatherHttpUpstreamsMetrics(addr, acc)) + addError(acc, n.gatherHttpCachesMetrics(addr, acc)) + addError(acc, n.gatherStreamServerZonesMetrics(addr, acc)) + addError(acc, n.gatherStreamUpstreamsMetrics(addr, acc)) +} + +func addError(acc telegraf.Accumulator, err error) { + // This plugin has hardcoded API resource paths it checks that may not + // be in the nginx.conf. Currently, this is to prevent logging of + // paths that are not configured. + // + // The correct solution is to do a GET to /api to get the available paths + // on the server rather than simply ignore. + if err != errNotFound { + acc.AddError(err) + } } func (n *NginxPlusApi) gatherUrl(addr *url.URL, path string) ([]byte, error) { @@ -33,9 +51,17 @@ func (n *NginxPlusApi) gatherUrl(addr *url.URL, path string) ([]byte, error) { return nil, fmt.Errorf("error making HTTP request to %s: %s", url, err) } defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { + + switch resp.StatusCode { + case http.StatusOK: + case http.StatusNotFound: + // format as special error to catch and ignore as some nginx API + // features are either optional, or only available in some versions + return nil, errNotFound + default: return nil, fmt.Errorf("%s returned HTTP status %s", url, resp.Status) } + contentType := strings.Split(resp.Header.Get("Content-Type"), ";")[0] switch contentType { case "application/json": diff --git a/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics_test.go b/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics_test.go index 8105f35fb..da1806aac 100644 --- a/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics_test.go +++ b/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics_test.go @@ -448,11 +448,11 @@ const streamServerZonesPayload = ` ` func TestGatherProcessesMetrics(t *testing.T) { - ts, n := prepareEndpoint(processesPath, defaultApiVersion, processesPayload) + ts, n := prepareEndpoint(t, processesPath, defaultApiVersion, processesPayload) defer ts.Close() var acc testutil.Accumulator - addr, host, port := prepareAddr(ts) + addr, host, port := prepareAddr(t, ts) require.NoError(t, n.gatherProcessesMetrics(addr, &acc)) @@ -468,12 +468,12 @@ func TestGatherProcessesMetrics(t *testing.T) { }) } -func TestGatherConnectioinsMetrics(t *testing.T) { - ts, n := prepareEndpoint(connectionsPath, defaultApiVersion, connectionsPayload) +func TestGatherConnectionsMetrics(t *testing.T) { + ts, n := prepareEndpoint(t, connectionsPath, defaultApiVersion, connectionsPayload) defer ts.Close() var acc testutil.Accumulator - addr, host, port := prepareAddr(ts) + addr, host, port := prepareAddr(t, ts) require.NoError(t, n.gatherConnectionsMetrics(addr, &acc)) @@ -493,11 +493,11 @@ func TestGatherConnectioinsMetrics(t *testing.T) { } func TestGatherSslMetrics(t *testing.T) { - ts, n := prepareEndpoint(sslPath, defaultApiVersion, sslPayload) + ts, n := prepareEndpoint(t, sslPath, defaultApiVersion, sslPayload) defer ts.Close() var acc testutil.Accumulator - addr, host, port := prepareAddr(ts) + addr, host, port := prepareAddr(t, ts) require.NoError(t, n.gatherSslMetrics(addr, &acc)) @@ -516,11 +516,11 @@ func TestGatherSslMetrics(t *testing.T) { } func TestGatherHttpRequestsMetrics(t *testing.T) { - ts, n := prepareEndpoint(httpRequestsPath, defaultApiVersion, httpRequestsPayload) + ts, n := prepareEndpoint(t, httpRequestsPath, defaultApiVersion, httpRequestsPayload) defer ts.Close() var acc testutil.Accumulator - addr, host, port := prepareAddr(ts) + addr, host, port := prepareAddr(t, ts) require.NoError(t, n.gatherHttpRequestsMetrics(addr, &acc)) @@ -538,11 +538,11 @@ func TestGatherHttpRequestsMetrics(t *testing.T) { } func TestGatherHttpServerZonesMetrics(t *testing.T) { - ts, n := prepareEndpoint(httpServerZonesPath, defaultApiVersion, httpServerZonesPayload) + ts, n := prepareEndpoint(t, httpServerZonesPath, defaultApiVersion, httpServerZonesPayload) defer ts.Close() var acc testutil.Accumulator - addr, host, port := prepareAddr(ts) + addr, host, port := prepareAddr(t, ts) require.NoError(t, n.gatherHttpServerZonesMetrics(addr, &acc)) @@ -592,11 +592,11 @@ func TestGatherHttpServerZonesMetrics(t *testing.T) { } func TestHatherHttpUpstreamsMetrics(t *testing.T) { - ts, n := prepareEndpoint(httpUpstreamsPath, defaultApiVersion, httpUpstreamsPayload) + ts, n := prepareEndpoint(t, httpUpstreamsPath, defaultApiVersion, httpUpstreamsPayload) defer ts.Close() var acc testutil.Accumulator - addr, host, port := prepareAddr(ts) + addr, host, port := prepareAddr(t, ts) require.NoError(t, n.gatherHttpUpstreamsMetrics(addr, &acc)) @@ -764,11 +764,11 @@ func TestHatherHttpUpstreamsMetrics(t *testing.T) { } func TestGatherHttpCachesMetrics(t *testing.T) { - ts, n := prepareEndpoint(httpCachesPath, defaultApiVersion, httpCachesPayload) + ts, n := prepareEndpoint(t, httpCachesPath, defaultApiVersion, httpCachesPayload) defer ts.Close() var acc testutil.Accumulator - addr, host, port := prepareAddr(ts) + addr, host, port := prepareAddr(t, ts) require.NoError(t, n.gatherHttpCachesMetrics(addr, &acc)) @@ -842,11 +842,11 @@ func TestGatherHttpCachesMetrics(t *testing.T) { } func TestGatherStreamUpstreams(t *testing.T) { - ts, n := prepareEndpoint(streamUpstreamsPath, defaultApiVersion, streamUpstreamsPayload) + ts, n := prepareEndpoint(t, streamUpstreamsPath, defaultApiVersion, streamUpstreamsPayload) defer ts.Close() var acc testutil.Accumulator - addr, host, port := prepareAddr(ts) + addr, host, port := prepareAddr(t, ts) require.NoError(t, n.gatherStreamUpstreamsMetrics(addr, &acc)) @@ -984,12 +984,12 @@ func TestGatherStreamUpstreams(t *testing.T) { } -func TestGatherStreamServerZonesMatrics(t *testing.T) { - ts, n := prepareEndpoint(streamServerZonesPath, defaultApiVersion, streamServerZonesPayload) +func TestGatherStreamServerZonesMetrics(t *testing.T) { + ts, n := prepareEndpoint(t, streamServerZonesPath, defaultApiVersion, streamServerZonesPayload) defer ts.Close() var acc testutil.Accumulator - addr, host, port := prepareAddr(ts) + addr, host, port := prepareAddr(t, ts) require.NoError(t, n.gatherStreamServerZonesMetrics(addr, &acc)) @@ -1023,11 +1023,92 @@ func TestGatherStreamServerZonesMatrics(t *testing.T) { "zone": "dns", }) } +func TestUnavailableEndpoints(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNotFound) + })) + defer ts.Close() -func prepareAddr(ts *httptest.Server) (*url.URL, string, string) { + n := &NginxPlusApi{ + client: ts.Client(), + } + + addr, err := url.Parse(ts.URL) + if err != nil { + t.Fatal(err) + } + + var acc testutil.Accumulator + n.gatherMetrics(addr, &acc) + require.NoError(t, acc.FirstError()) +} + +func TestServerError(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusInternalServerError) + })) + defer ts.Close() + + n := &NginxPlusApi{ + client: ts.Client(), + } + + addr, err := url.Parse(ts.URL) + if err != nil { + t.Fatal(err) + } + + var acc testutil.Accumulator + n.gatherMetrics(addr, &acc) + require.Error(t, acc.FirstError()) +} + +func TestMalformedJSON(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json; charset=utf-8") + fmt.Fprintln(w, "this is not JSON") + })) + defer ts.Close() + + n := &NginxPlusApi{ + client: ts.Client(), + } + + addr, err := url.Parse(ts.URL) + if err != nil { + t.Fatal(err) + } + + var acc testutil.Accumulator + n.gatherMetrics(addr, &acc) + require.Error(t, acc.FirstError()) +} + +func TestUnknownContentType(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "text/plain") + })) + defer ts.Close() + + n := &NginxPlusApi{ + client: ts.Client(), + } + + addr, err := url.Parse(ts.URL) + if err != nil { + t.Fatal(err) + } + + var acc testutil.Accumulator + n.gatherMetrics(addr, &acc) + require.Error(t, acc.FirstError()) +} + +func prepareAddr(t *testing.T, ts *httptest.Server) (*url.URL, string, string) { + t.Helper() addr, err := url.Parse(fmt.Sprintf("%s/api", ts.URL)) if err != nil { - panic(err) + t.Fatal(err) } host, port, err := net.SplitHostPort(addr.Host) @@ -1046,7 +1127,7 @@ func prepareAddr(ts *httptest.Server) (*url.URL, string, string) { return addr, host, port } -func prepareEndpoint(path string, apiVersion int64, payload string) (*httptest.Server, *NginxPlusApi) { +func prepareEndpoint(t *testing.T, path string, apiVersion int64, payload string) (*httptest.Server, *NginxPlusApi) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { var rsp string @@ -1054,7 +1135,7 @@ func prepareEndpoint(path string, apiVersion int64, payload string) (*httptest.S rsp = payload w.Header()["Content-Type"] = []string{"application/json"} } else { - panic("Cannot handle request") + t.Errorf("unknown request path") } fmt.Fprintln(w, rsp) @@ -1067,7 +1148,7 @@ func prepareEndpoint(path string, apiVersion int64, payload string) (*httptest.S client, err := n.createHttpClient() if err != nil { - panic(err) + t.Fatal(err) } n.client = client From 2aeaed2f27e0ea19ac7054d403cdf85bb84d414d Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 19 Jun 2019 14:34:01 -0700 Subject: [PATCH 0943/1815] Update changelog and readme --- CHANGELOG.md | 6 ++++++ README.md | 1 + 2 files changed, 7 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index ccee1e38a..3cce0de28 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,9 @@ ## v1.12 [unreleased] +#### New Inputs + +- [docker_log](/plugins/inputs/docker_log) - Contributed by @prashanthjbabu + #### New Parsers - [form_urlencoded](/plugins/processors/form_urlencoded/README.md) - Contributed by @byonchev @@ -16,6 +20,7 @@ - [#5863](https://github.com/influxdata/telegraf/pull/5863): Allow regex processor to append tag values. - [#5997](https://github.com/influxdata/telegraf/pull/5997): Add starttime field to phpfpm input. - [#5998](https://github.com/influxdata/telegraf/pull/5998): Add cluster name tag to elasticsearch indices. +- [#6006](https://github.com/influxdata/telegraf/pull/6006): Add support for interface field in http_response input plugin. #### Bugfixes @@ -28,6 +33,7 @@ - [#5980](https://github.com/influxdata/telegraf/issues/5980): Cannot set mount_points option in disk input. - [#5983](https://github.com/influxdata/telegraf/issues/5983): Omit keys when creating measurement names for GNMI telemetry. - [#5972](https://github.com/influxdata/telegraf/issues/5972): Don't consider pid of 0 when using systemd lookup in procstat. +- [#5807](https://github.com/influxdata/telegraf/issues/5807): Skip 404 error reporting in nginx_plus_api input. ## v1.11 [2019-06-11] diff --git a/README.md b/README.md index 9580e2b31..3fed264ae 100644 --- a/README.md +++ b/README.md @@ -166,6 +166,7 @@ For documentation on the latest development code see the [documentation index][d * [dmcache](./plugins/inputs/dmcache) * [dns query time](./plugins/inputs/dns_query) * [docker](./plugins/inputs/docker) +* [docker_log](./plugins/inputs/docker_log) * [dovecot](./plugins/inputs/dovecot) * [ecs](./plugins/inputs/ecs) (Amazon Elastic Container Service, Fargate) * [elasticsearch](./plugins/inputs/elasticsearch) From c52e7d88d7aa6329a04855956cfd2b3372cb0573 Mon Sep 17 00:00:00 2001 From: George Date: Wed, 19 Jun 2019 23:37:10 +0100 Subject: [PATCH 0944/1815] Add container uptime_ns in docker input plugin (#5996) --- plugins/inputs/docker/README.md | 1 + plugins/inputs/docker/docker.go | 20 +++- plugins/inputs/docker/docker_test.go | 134 ++++++++++++++++++++++- plugins/inputs/docker/docker_testdata.go | 54 ++++----- 4 files changed, 175 insertions(+), 34 deletions(-) diff --git a/plugins/inputs/docker/README.md b/plugins/inputs/docker/README.md index 39fc7d6a6..a26b5763e 100644 --- a/plugins/inputs/docker/README.md +++ b/plugins/inputs/docker/README.md @@ -278,6 +278,7 @@ status if configured. - exitcode (integer) - started_at (integer) - finished_at (integer) + - uptime_ns (integer) - docker_swarm - tags: diff --git a/plugins/inputs/docker/docker.go b/plugins/inputs/docker/docker.go index 117aabfb4..f9b538080 100644 --- a/plugins/inputs/docker/docker.go +++ b/plugins/inputs/docker/docker.go @@ -73,6 +73,7 @@ const ( var ( sizeRegex = regexp.MustCompile(`^(\d+(\.\d+)*) ?([kKmMgGtTpP])?[bB]?$`) containerStates = []string{"created", "restarting", "running", "removing", "paused", "exited", "dead"} + now = time.Now ) var sampleConfig = ` @@ -462,14 +463,21 @@ func (d *Docker) gatherContainer( "pid": info.State.Pid, "exitcode": info.State.ExitCode, } - container_time, err := time.Parse(time.RFC3339, info.State.StartedAt) - if err == nil && !container_time.IsZero() { - statefields["started_at"] = container_time.UnixNano() + + finished, err := time.Parse(time.RFC3339, info.State.FinishedAt) + if err == nil && !finished.IsZero() { + statefields["finished_at"] = finished.UnixNano() + } else { + // set finished to now for use in uptime + finished = now() } - container_time, err = time.Parse(time.RFC3339, info.State.FinishedAt) - if err == nil && !container_time.IsZero() { - statefields["finished_at"] = container_time.UnixNano() + + started, err := time.Parse(time.RFC3339, info.State.StartedAt) + if err == nil && !started.IsZero() { + statefields["started_at"] = started.UnixNano() + statefields["uptime_ns"] = finished.Sub(started).Nanoseconds() } + acc.AddFields("docker_container_status", statefields, tags, time.Now()) if info.State.Health != nil { diff --git a/plugins/inputs/docker/docker_test.go b/plugins/inputs/docker/docker_test.go index 9209c6008..e1a425314 100644 --- a/plugins/inputs/docker/docker_test.go +++ b/plugins/inputs/docker/docker_test.go @@ -7,6 +7,7 @@ import ( "sort" "strings" "testing" + "time" "github.com/influxdata/telegraf/testutil" @@ -83,7 +84,7 @@ var baseClient = MockClient{ return containerStats(s), nil }, ContainerInspectF: func(context.Context, string) (types.ContainerJSON, error) { - return containerInspect, nil + return containerInspect(), nil }, ServiceListF: func(context.Context, types.ServiceListOptions) ([]swarm.Service, error) { return ServiceList, nil @@ -264,7 +265,7 @@ func TestDocker_WindowsMemoryContainerStats(t *testing.T) { return containerStatsWindows(), nil }, ContainerInspectF: func(ctx context.Context, containerID string) (types.ContainerJSON, error) { - return containerInspect, nil + return containerInspect(), nil }, ServiceListF: func(context.Context, types.ServiceListOptions) ([]swarm.Service, error) { return ServiceList, nil @@ -538,6 +539,135 @@ func TestContainerNames(t *testing.T) { } } +func TestContainerStatus(t *testing.T) { + type expectation struct { + // tags + Status string + // fields + OOMKilled bool + Pid int + ExitCode int + StartedAt time.Time + FinishedAt time.Time + UptimeNs int64 + } + + var tests = []struct { + name string + now func() time.Time + inspect types.ContainerJSON + expect expectation + }{ + { + name: "finished_at is zero value", + now: func() time.Time { + return time.Date(2018, 6, 14, 5, 51, 53, 266176036, time.UTC) + }, + inspect: containerInspect(), + expect: expectation{ + Status: "running", + OOMKilled: false, + Pid: 1234, + ExitCode: 0, + StartedAt: time.Date(2018, 6, 14, 5, 48, 53, 266176036, time.UTC), + UptimeNs: int64(3 * time.Minute), + }, + }, + { + name: "finished_at is non-zero value", + inspect: func() types.ContainerJSON { + i := containerInspect() + i.ContainerJSONBase.State.FinishedAt = "2018-06-14T05:53:53.266176036Z" + return i + }(), + expect: expectation{ + Status: "running", + OOMKilled: false, + Pid: 1234, + ExitCode: 0, + StartedAt: time.Date(2018, 6, 14, 5, 48, 53, 266176036, time.UTC), + FinishedAt: time.Date(2018, 6, 14, 5, 53, 53, 266176036, time.UTC), + UptimeNs: int64(5 * time.Minute), + }, + }, + { + name: "started_at is zero value", + inspect: func() types.ContainerJSON { + i := containerInspect() + i.ContainerJSONBase.State.StartedAt = "" + i.ContainerJSONBase.State.FinishedAt = "2018-06-14T05:53:53.266176036Z" + return i + }(), + expect: expectation{ + Status: "running", + OOMKilled: false, + Pid: 1234, + ExitCode: 0, + FinishedAt: time.Date(2018, 6, 14, 5, 53, 53, 266176036, time.UTC), + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var ( + acc testutil.Accumulator + newClientFunc = func(string, *tls.Config) (Client, error) { + client := baseClient + client.ContainerListF = func(context.Context, types.ContainerListOptions) ([]types.Container, error) { + return containerList[:1], nil + } + client.ContainerInspectF = func(c context.Context, s string) (types.ContainerJSON, error) { + return tt.inspect, nil + } + + return &client, nil + } + d = Docker{newClient: newClientFunc} + ) + + // mock time + if tt.now != nil { + now = tt.now + } + defer func() { + now = time.Now + }() + + err := acc.GatherError(d.Gather) + require.NoError(t, err) + + fields := map[string]interface{}{ + "oomkilled": tt.expect.OOMKilled, + "pid": tt.expect.Pid, + "exitcode": tt.expect.ExitCode, + } + + if started := tt.expect.StartedAt; !started.IsZero() { + fields["started_at"] = started.UnixNano() + fields["uptime_ns"] = tt.expect.UptimeNs + } + + if finished := tt.expect.FinishedAt; !finished.IsZero() { + fields["finished_at"] = finished.UnixNano() + } + + acc.AssertContainsTaggedFields(t, + "docker_container_status", + fields, + map[string]string{ + "container_name": "etcd", + "container_image": "quay.io/coreos/etcd", + "container_version": "v2.2.2", + "engine_host": "absol", + "label1": "test_value_1", + "label2": "test_value_2", + "server_version": "17.09.0-ce", + "container_status": tt.expect.Status, + }) + }) + } +} + func TestDockerGatherInfo(t *testing.T) { var acc testutil.Accumulator d := Docker{ diff --git a/plugins/inputs/docker/docker_testdata.go b/plugins/inputs/docker/docker_testdata.go index 7302e219d..ba5c2ffa1 100644 --- a/plugins/inputs/docker/docker_testdata.go +++ b/plugins/inputs/docker/docker_testdata.go @@ -492,32 +492,34 @@ func containerStatsWindows() types.ContainerStats { return stat } -var containerInspect = types.ContainerJSON{ - Config: &container.Config{ - Env: []string{ - "ENVVAR1=loremipsum", - "ENVVAR1FOO=loremipsum", - "ENVVAR2=dolorsitamet", - "ENVVAR3==ubuntu:10.04", - "ENVVAR4", - "ENVVAR5=", - "ENVVAR6= ", - "ENVVAR7=ENVVAR8=ENVVAR9", - "PATH=/bin:/sbin", - }, - }, - ContainerJSONBase: &types.ContainerJSONBase{ - State: &types.ContainerState{ - Health: &types.Health{ - FailingStreak: 1, - Status: "Unhealthy", +func containerInspect() types.ContainerJSON { + return types.ContainerJSON{ + Config: &container.Config{ + Env: []string{ + "ENVVAR1=loremipsum", + "ENVVAR1FOO=loremipsum", + "ENVVAR2=dolorsitamet", + "ENVVAR3==ubuntu:10.04", + "ENVVAR4", + "ENVVAR5=", + "ENVVAR6= ", + "ENVVAR7=ENVVAR8=ENVVAR9", + "PATH=/bin:/sbin", }, - Status: "running", - OOMKilled: false, - Pid: 1234, - ExitCode: 0, - StartedAt: "2018-06-14T05:48:53.266176036Z", - FinishedAt: "0001-01-01T00:00:00Z", }, - }, + ContainerJSONBase: &types.ContainerJSONBase{ + State: &types.ContainerState{ + Health: &types.Health{ + FailingStreak: 1, + Status: "Unhealthy", + }, + Status: "running", + OOMKilled: false, + Pid: 1234, + ExitCode: 0, + StartedAt: "2018-06-14T05:48:53.266176036Z", + FinishedAt: "0001-01-01T00:00:00Z", + }, + }, + } } From ba39d7b6a8e311653272bd628a5064f6af6a139b Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 19 Jun 2019 15:39:11 -0700 Subject: [PATCH 0945/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3cce0de28..51c995b60 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -21,6 +21,7 @@ - [#5997](https://github.com/influxdata/telegraf/pull/5997): Add starttime field to phpfpm input. - [#5998](https://github.com/influxdata/telegraf/pull/5998): Add cluster name tag to elasticsearch indices. - [#6006](https://github.com/influxdata/telegraf/pull/6006): Add support for interface field in http_response input plugin. +- [#5996](https://github.com/influxdata/telegraf/pull/5996): Add container uptime_ns in docker input plugin. #### Bugfixes From 80089c7caf7248ec2c54c3042600ed18569fbe81 Mon Sep 17 00:00:00 2001 From: Chris Goller Date: Wed, 19 Jun 2019 20:02:51 -0500 Subject: [PATCH 0946/1815] Add better user-facing errors for API timeouts (#6016) --- plugins/inputs/docker/docker.go | 71 ++++++++++++++++++++++++--------- plugins/inputs/docker/errors.go | 11 +++++ 2 files changed, 64 insertions(+), 18 deletions(-) create mode 100644 plugins/inputs/docker/errors.go diff --git a/plugins/inputs/docker/docker.go b/plugins/inputs/docker/docker.go index f9b538080..bc47583b7 100644 --- a/plugins/inputs/docker/docker.go +++ b/plugins/inputs/docker/docker.go @@ -51,7 +51,7 @@ type Docker struct { client Client httpClient *http.Client - engine_host string + engineHost string serverVersion string filtersCreated bool labelFilter filter.Filter @@ -122,12 +122,15 @@ var sampleConfig = ` # insecure_skip_verify = false ` +// SampleConfig returns the default Docker TOML configuration. +func (d *Docker) SampleConfig() string { return sampleConfig } + +// Description the metrics returned. func (d *Docker) Description() string { return "Read metrics about docker containers" } -func (d *Docker) SampleConfig() string { return sampleConfig } - +// Gather metrics from the docker server. func (d *Docker) Gather(acc telegraf.Accumulator) error { if d.client == nil { c, err := d.getNewClient() @@ -185,7 +188,11 @@ func (d *Docker) Gather(acc telegraf.Accumulator) error { } ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration) defer cancel() + containers, err := d.client.ContainerList(ctx, opts) + if err == context.DeadlineExceeded { + return errListTimeout + } if err != nil { return err } @@ -196,10 +203,8 @@ func (d *Docker) Gather(acc telegraf.Accumulator) error { for _, container := range containers { go func(c types.Container) { defer wg.Done() - err := d.gatherContainer(c, acc) - if err != nil { - acc.AddError(fmt.Errorf("E! Error gathering container %s stats: %s\n", - c.Names, err.Error())) + if err := d.gatherContainer(c, acc); err != nil { + acc.AddError(err) } }(container) } @@ -211,7 +216,11 @@ func (d *Docker) Gather(acc telegraf.Accumulator) error { func (d *Docker) gatherSwarmInfo(acc telegraf.Accumulator) error { ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration) defer cancel() + services, err := d.client.ServiceList(ctx, types.ServiceListOptions{}) + if err == context.DeadlineExceeded { + return errServiceTimeout + } if err != nil { return err } @@ -280,19 +289,24 @@ func (d *Docker) gatherInfo(acc telegraf.Accumulator) error { dataFields := make(map[string]interface{}) metadataFields := make(map[string]interface{}) now := time.Now() + // Get info from docker daemon ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration) defer cancel() + info, err := d.client.Info(ctx) + if err == context.DeadlineExceeded { + return errInfoTimeout + } if err != nil { return err } - d.engine_host = info.Name + d.engineHost = info.Name d.serverVersion = info.ServerVersion tags := map[string]string{ - "engine_host": d.engine_host, + "engine_host": d.engineHost, "server_version": d.serverVersion, } @@ -403,7 +417,7 @@ func (d *Docker) gatherContainer( imageName, imageVersion := parseImage(container.Image) tags := map[string]string{ - "engine_host": d.engine_host, + "engine_host": d.engineHost, "server_version": d.serverVersion, "container_name": cname, "container_image": imageName, @@ -412,17 +426,22 @@ func (d *Docker) gatherContainer( ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration) defer cancel() + r, err := d.client.ContainerStats(ctx, container.ID, false) - if err != nil { - return fmt.Errorf("Error getting docker stats: %s", err.Error()) + if err == context.DeadlineExceeded { + return errStatsTimeout } + if err != nil { + return fmt.Errorf("error getting docker stats: %v", err) + } + defer r.Body.Close() dec := json.NewDecoder(r.Body) if err = dec.Decode(&v); err != nil { if err == io.EOF { return nil } - return fmt.Errorf("Error decoding: %s", err.Error()) + return fmt.Errorf("error decoding: %v", err) } daemonOSType := r.OSType @@ -438,19 +457,35 @@ func (d *Docker) gatherContainer( } } + return d.gatherContainerInspect(container, acc, tags, daemonOSType, v) +} + +func (d *Docker) gatherContainerInspect( + container types.Container, + acc telegraf.Accumulator, + tags map[string]string, + daemonOSType string, + v *types.StatsJSON, +) error { + ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration) + defer cancel() + info, err := d.client.ContainerInspect(ctx, container.ID) + if err == context.DeadlineExceeded { + return errInspectTimeout + } if err != nil { - return fmt.Errorf("Error inspecting docker container: %s", err.Error()) + return fmt.Errorf("error inspecting docker container: %v", err) } // Add whitelisted environment variables to tags if len(d.TagEnvironment) > 0 { for _, envvar := range info.Config.Env { for _, configvar := range d.TagEnvironment { - dock_env := strings.SplitN(envvar, "=", 2) + dockEnv := strings.SplitN(envvar, "=", 2) //check for presence of tag in whitelist - if len(dock_env) == 2 && len(strings.TrimSpace(dock_env[1])) != 0 && configvar == dock_env[0] { - tags[dock_env[0]] = dock_env[1] + if len(dockEnv) == 2 && len(strings.TrimSpace(dockEnv[1])) != 0 && configvar == dockEnv[0] { + tags[dockEnv[0]] = dockEnv[1] } } } @@ -800,7 +835,7 @@ func sliceContains(in string, sl []string) bool { func parseSize(sizeStr string) (int64, error) { matches := sizeRegex.FindStringSubmatch(sizeStr) if len(matches) != 4 { - return -1, fmt.Errorf("invalid size: '%s'", sizeStr) + return -1, fmt.Errorf("invalid size: %s", sizeStr) } size, err := strconv.ParseFloat(matches[1], 64) diff --git a/plugins/inputs/docker/errors.go b/plugins/inputs/docker/errors.go new file mode 100644 index 000000000..f3c0f76a5 --- /dev/null +++ b/plugins/inputs/docker/errors.go @@ -0,0 +1,11 @@ +package docker + +import "errors" + +var ( + errInfoTimeout = errors.New("timeout retrieving docker engine info") + errStatsTimeout = errors.New("timeout retrieving container stats") + errInspectTimeout = errors.New("timeout retrieving container environment") + errListTimeout = errors.New("timeout retrieving container list") + errServiceTimeout = errors.New("timeout retrieving swarm service list") +) From 791ea5e38edd08b65a3f2f4af15c3fafeb5e2f0f Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 19 Jun 2019 18:04:05 -0700 Subject: [PATCH 0947/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 51c995b60..13c65423c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -22,6 +22,7 @@ - [#5998](https://github.com/influxdata/telegraf/pull/5998): Add cluster name tag to elasticsearch indices. - [#6006](https://github.com/influxdata/telegraf/pull/6006): Add support for interface field in http_response input plugin. - [#5996](https://github.com/influxdata/telegraf/pull/5996): Add container uptime_ns in docker input plugin. +- [#6016](https://github.com/influxdata/telegraf/pull/6016): Add better user-facing errors for API timeouts in docker input. #### Bugfixes From 049d364917a651ed41f3008f4daa4dee73bc5536 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 20 Jun 2019 11:51:41 -0700 Subject: [PATCH 0948/1815] Fix panic if pool_mode column does not exist (#6000) --- plugins/inputs/pgbouncer/pgbouncer.go | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/plugins/inputs/pgbouncer/pgbouncer.go b/plugins/inputs/pgbouncer/pgbouncer.go index 722648c48..edff10509 100644 --- a/plugins/inputs/pgbouncer/pgbouncer.go +++ b/plugins/inputs/pgbouncer/pgbouncer.go @@ -2,14 +2,12 @@ package pgbouncer import ( "bytes" - "github.com/influxdata/telegraf/plugins/inputs/postgresql" - - // register in driver. - _ "github.com/jackc/pgx/stdlib" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/inputs/postgresql" + _ "github.com/jackc/pgx/stdlib" // register driver ) type PgBouncer struct { @@ -98,12 +96,16 @@ func (p *PgBouncer) Gather(acc telegraf.Accumulator) error { return err } - if s, ok := (*columnMap["user"]).(string); ok && s != "" { - tags["user"] = s + if user, ok := columnMap["user"]; ok { + if s, ok := (*user).(string); ok && s != "" { + tags["user"] = s + } } - if s, ok := (*columnMap["pool_mode"]).(string); ok && s != "" { - tags["pool_mode"] = s + if poolMode, ok := columnMap["pool_mode"]; ok { + if s, ok := (*poolMode).(string); ok && s != "" { + tags["pool_mode"] = s + } } fields := make(map[string]interface{}) From 29c3d42e7e56d962c335dacd72deb5a0596480e4 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 20 Jun 2019 11:52:41 -0700 Subject: [PATCH 0949/1815] Fix double pct replacement in sysstat input (#6001) --- plugins/inputs/sysstat/sysstat.go | 1 + plugins/inputs/sysstat/sysstat_test.go | 4 ++++ 2 files changed, 5 insertions(+) diff --git a/plugins/inputs/sysstat/sysstat.go b/plugins/inputs/sysstat/sysstat.go index 42ce89550..f1778fd6a 100644 --- a/plugins/inputs/sysstat/sysstat.go +++ b/plugins/inputs/sysstat/sysstat.go @@ -335,6 +335,7 @@ func (s *Sysstat) sadfOptions(activityOption string) []string { // escape removes % and / chars in field names func escape(dirty string) string { var fieldEscaper = strings.NewReplacer( + `%%`, "pct_", `%`, "pct_", `/`, "_per_", ) diff --git a/plugins/inputs/sysstat/sysstat_test.go b/plugins/inputs/sysstat/sysstat_test.go index 876e6d2c8..1674f2747 100644 --- a/plugins/inputs/sysstat/sysstat_test.go +++ b/plugins/inputs/sysstat/sysstat_test.go @@ -225,6 +225,10 @@ func TestEscape(t *testing.T) { "%util", "pct_util", }, + { + "%%util", + "pct_util", + }, { "bread/s", "bread_per_s", From a0c739eec7db7e928b8e73510b6666e325141d0f Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 20 Jun 2019 11:54:12 -0700 Subject: [PATCH 0950/1815] Follow up work on docker_log input (#6008) --- Gopkg.lock | 2 + internal/docker/docker.go | 36 ++ internal/docker/docker_test.go | 59 +++ plugins/inputs/docker/docker.go | 37 +- plugins/inputs/docker/docker_test.go | 54 +-- plugins/inputs/docker_log/README.md | 60 ++- plugins/inputs/docker_log/docker_log.go | 429 +++++++++++++++++ plugins/inputs/docker_log/docker_log_test.go | 175 +++++++ plugins/inputs/docker_log/docker_logs.go | 472 ------------------- testutil/metric.go | 5 + 10 files changed, 752 insertions(+), 577 deletions(-) create mode 100644 internal/docker/docker.go create mode 100644 internal/docker/docker_test.go create mode 100644 plugins/inputs/docker_log/docker_log.go create mode 100644 plugins/inputs/docker_log/docker_log_test.go delete mode 100644 plugins/inputs/docker_log/docker_logs.go diff --git a/Gopkg.lock b/Gopkg.lock index 8f3de4211..bcdf6cd07 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -326,6 +326,7 @@ "api/types/versions", "api/types/volume", "client", + "pkg/stdcopy", ] pruneopts = "" revision = "ed7b6428c133e7c59404251a09b7d6b02fa83cc2" @@ -1591,6 +1592,7 @@ "github.com/docker/docker/api/types/registry", "github.com/docker/docker/api/types/swarm", "github.com/docker/docker/client", + "github.com/docker/docker/pkg/stdcopy", "github.com/docker/libnetwork/ipvs", "github.com/eclipse/paho.mqtt.golang", "github.com/ericchiang/k8s", diff --git a/internal/docker/docker.go b/internal/docker/docker.go new file mode 100644 index 000000000..1808944ae --- /dev/null +++ b/internal/docker/docker.go @@ -0,0 +1,36 @@ +package docker + +import "strings" + +// Adapts some of the logic from the actual Docker library's image parsing +// routines: +// https://github.com/docker/distribution/blob/release/2.7/reference/normalize.go +func ParseImage(image string) (string, string) { + domain := "" + remainder := "" + + i := strings.IndexRune(image, '/') + + if i == -1 || (!strings.ContainsAny(image[:i], ".:") && image[:i] != "localhost") { + remainder = image + } else { + domain, remainder = image[:i], image[i+1:] + } + + imageName := "" + imageVersion := "unknown" + + i = strings.LastIndex(remainder, ":") + if i > -1 { + imageVersion = remainder[i+1:] + imageName = remainder[:i] + } else { + imageName = remainder + } + + if domain != "" { + imageName = domain + "/" + imageName + } + + return imageName, imageVersion +} diff --git a/internal/docker/docker_test.go b/internal/docker/docker_test.go new file mode 100644 index 000000000..14591ab87 --- /dev/null +++ b/internal/docker/docker_test.go @@ -0,0 +1,59 @@ +package docker_test + +import ( + "testing" + + "github.com/influxdata/telegraf/internal/docker" + "github.com/stretchr/testify/require" +) + +func TestParseImage(t *testing.T) { + tests := []struct { + image string + parsedName string + parsedVersion string + }{ + { + image: "postgres", + parsedName: "postgres", + parsedVersion: "unknown", + }, + { + image: "postgres:latest", + parsedName: "postgres", + parsedVersion: "latest", + }, + { + image: "coreos/etcd", + parsedName: "coreos/etcd", + parsedVersion: "unknown", + }, + { + image: "coreos/etcd:latest", + parsedName: "coreos/etcd", + parsedVersion: "latest", + }, + { + image: "quay.io/postgres", + parsedName: "quay.io/postgres", + parsedVersion: "unknown", + }, + { + image: "quay.io:4443/coreos/etcd", + parsedName: "quay.io:4443/coreos/etcd", + parsedVersion: "unknown", + }, + { + image: "quay.io:4443/coreos/etcd:latest", + parsedName: "quay.io:4443/coreos/etcd", + parsedVersion: "latest", + }, + } + for _, tt := range tests { + t.Run("parse name "+tt.image, func(t *testing.T) { + imageName, imageVersion := docker.ParseImage(tt.image) + require.Equal(t, tt.parsedName, imageName) + require.Equal(t, tt.parsedVersion, imageVersion) + }) + } +} diff --git a/plugins/inputs/docker/docker.go b/plugins/inputs/docker/docker.go index bc47583b7..c57ed5c48 100644 --- a/plugins/inputs/docker/docker.go +++ b/plugins/inputs/docker/docker.go @@ -20,6 +20,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/filter" "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/internal/docker" tlsint "github.com/influxdata/telegraf/internal/tls" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -361,44 +362,12 @@ func (d *Docker) gatherInfo(acc telegraf.Accumulator) error { return nil } -func parseImage(image string) (string, string) { - // Adapts some of the logic from the actual Docker library's image parsing - // routines: - // https://github.com/docker/distribution/blob/release/2.7/reference/normalize.go - domain := "" - remainder := "" - - i := strings.IndexRune(image, '/') - - if i == -1 || (!strings.ContainsAny(image[:i], ".:") && image[:i] != "localhost") { - remainder = image - } else { - domain, remainder = image[:i], image[i+1:] - } - - imageName := "" - imageVersion := "unknown" - - i = strings.LastIndex(remainder, ":") - if i > -1 { - imageVersion = remainder[i+1:] - imageName = remainder[:i] - } else { - imageName = remainder - } - - if domain != "" { - imageName = domain + "/" + imageName - } - - return imageName, imageVersion -} - func (d *Docker) gatherContainer( container types.Container, acc telegraf.Accumulator, ) error { var v *types.StatsJSON + // Parse container name var cname string for _, name := range container.Names { @@ -414,7 +383,7 @@ func (d *Docker) gatherContainer( return nil } - imageName, imageVersion := parseImage(container.Image) + imageName, imageVersion := docker.ParseImage(container.Image) tags := map[string]string{ "engine_host": d.engineHost, diff --git a/plugins/inputs/docker/docker_test.go b/plugins/inputs/docker/docker_test.go index e1a425314..376d3ed0c 100644 --- a/plugins/inputs/docker/docker_test.go +++ b/plugins/inputs/docker/docker_test.go @@ -9,10 +9,9 @@ import ( "testing" "time" - "github.com/influxdata/telegraf/testutil" - "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/swarm" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) @@ -945,54 +944,3 @@ func TestContainerName(t *testing.T) { }) } } - -func TestParseImage(t *testing.T) { - tests := []struct { - image string - parsedName string - parsedVersion string - }{ - { - image: "postgres", - parsedName: "postgres", - parsedVersion: "unknown", - }, - { - image: "postgres:latest", - parsedName: "postgres", - parsedVersion: "latest", - }, - { - image: "coreos/etcd", - parsedName: "coreos/etcd", - parsedVersion: "unknown", - }, - { - image: "coreos/etcd:latest", - parsedName: "coreos/etcd", - parsedVersion: "latest", - }, - { - image: "quay.io/postgres", - parsedName: "quay.io/postgres", - parsedVersion: "unknown", - }, - { - image: "quay.io:4443/coreos/etcd", - parsedName: "quay.io:4443/coreos/etcd", - parsedVersion: "unknown", - }, - { - image: "quay.io:4443/coreos/etcd:latest", - parsedName: "quay.io:4443/coreos/etcd", - parsedVersion: "latest", - }, - } - for _, tt := range tests { - t.Run("parse name "+tt.image, func(t *testing.T) { - imageName, imageVersion := parseImage(tt.image) - require.Equal(t, tt.parsedName, imageName) - require.Equal(t, tt.parsedVersion, imageVersion) - }) - } -} diff --git a/plugins/inputs/docker_log/README.md b/plugins/inputs/docker_log/README.md index d04adba33..02f44e14c 100644 --- a/plugins/inputs/docker_log/README.md +++ b/plugins/inputs/docker_log/README.md @@ -3,22 +3,35 @@ The docker log plugin uses the Docker Engine API to get logs on running docker containers. -The docker plugin uses the [Official Docker Client](https://github.com/moby/moby/tree/master/client) -to gather logs from the [Engine API](https://docs.docker.com/engine/api/v1.24/). -Note: This plugin works only for containers with the `local` or `json-file` or `journald` logging driver. -### Configuration: +The docker plugin uses the [Official Docker Client][] to gather logs from the +[Engine API][]. + +**Note:** This plugin works only for containers with the `local` or +`json-file` or `journald` logging driver. + +[Official Docker Client]: https://github.com/moby/moby/tree/master/client +[Engine API]: https://docs.docker.com/engine/api/v1.24/ + +### Configuration ```toml -# Read metrics about docker containers [[inputs.docker_log]] ## Docker Endpoint ## To use TCP, set endpoint = "tcp://[ip]:[port]" ## To use environment variables (ie, docker-machine), set endpoint = "ENV" - endpoint = "unix:///var/run/docker.sock" + # endpoint = "unix:///var/run/docker.sock" - ## Containers to include and exclude. Collect all if empty. Globs accepted. - container_name_include = [] - container_name_exclude = [] + ## When true, container logs are read from the beginning; otherwise + ## reading begins at the end of the log. + # from_beginning = false + + ## Timeout for Docker API calls. + # timeout = "5s" + + ## Containers to include and exclude. Globs accepted. + ## Note that an empty array for both will include all containers + # container_name_include = [] + # container_name_exclude = [] ## Container states to include and exclude. Globs accepted. ## When empty only containers in the "running" state will be captured. @@ -27,8 +40,8 @@ Note: This plugin works only for containers with the `local` or `json-file` or ## docker labels to include and exclude as tags. Globs accepted. ## Note that an empty array for both will include all labels as tags - docker_label_include = [] - docker_label_exclude = [] + # docker_label_include = [] + # docker_label_exclude = [] ## Optional TLS Config # tls_ca = "/etc/telegraf/ca.pem" @@ -41,20 +54,31 @@ Note: This plugin works only for containers with the `local` or `json-file` or #### Environment Configuration When using the `"ENV"` endpoint, the connection is configured using the -[cli Docker environment variables](https://godoc.org/github.com/moby/moby/client#NewEnvClient). +[CLI Docker environment variables][env] -### Metrics: +[env]: https://godoc.org/github.com/moby/moby/client#NewEnvClient + +### Metrics - docker_log - tags: - - container_id + - container_image + - container_version - container_name - - stream + - stream (stdout, stderr, or tty) - fields: + - container_id - message -### Example Output: + +### Example Output ``` -docker_log,com.docker.compose.config-hash=e19e13df8fd01ba2d7c1628158fca45cc91afbbe9661b2d30550547eb53a861e,com.docker.compose.container-number=1,com.docker.compose.oneoff=False,com.docker.compose.project=distribution,com.docker.compose.service=influxdb,com.docker.compose.version=1.21.2,containerId=fce475bbfa4c8380ff858d5d767f78622ca6de955b525477624c2b7896a5b8e4,containerName=aicon-influxdb,host=prash-laptop,logType=stderr log=" [httpd] 172.23.0.2 - aicon_admin [13/Apr/2019:08:35:53 +0000] \"POST /query?db=&q=SHOW+SUBSCRIPTIONS HTTP/1.1\" 200 232 \"-\" \"KapacitorInfluxDBClient\" 2661bc9c-5dc7-11e9-82f8-0242ac170007 1360\n" 1555144553541000000 -docker_log,com.docker.compose.config-hash=fd91b3b096c7ab346971c681b88fe1357c609dcc6850e4ea5b1287ad28a57e5d,com.docker.compose.container-number=1,com.docker.compose.oneoff=False,com.docker.compose.project=distribution,com.docker.compose.service=kapacitor,com.docker.compose.version=1.21.2,containerId=6514d1cf6d19e7ecfedf894941f0a2ea21b8aac5e6f48e64f19dbc9bb2805a25,containerName=aicon-kapacitor,host=prash-laptop,logType=stderr log=" ts=2019-04-13T08:36:00.019Z lvl=info msg=\"http request\" service=http host=172.23.0.7 username=- start=2019-04-13T08:36:00.013835165Z method=POST uri=/write?consistency=&db=_internal&precision=ns&rp=monitor protocol=HTTP/1.1 status=204 referer=- user-agent=InfluxDBClient request-id=2a3eb481-5dc7-11e9-825b-000000000000 duration=5.814404ms\n" 1555144560024000000 +docker_log,container_image=telegraf,container_name=sharp_bell,container_version=alpine,stream=stderr container_id="371ee5d3e58726112f499be62cddef800138ca72bbba635ed2015fbf475b1023",message="2019-06-19T03:11:11Z I! [agent] Config: Interval:10s, Quiet:false, Hostname:\"371ee5d3e587\", Flush Interval:10s" 1560913872000000000 +docker_log,container_image=telegraf,container_name=sharp_bell,container_version=alpine,stream=stderr container_id="371ee5d3e58726112f499be62cddef800138ca72bbba635ed2015fbf475b1023",message="2019-06-19T03:11:11Z I! Tags enabled: host=371ee5d3e587" 1560913872000000000 +docker_log,container_image=telegraf,container_name=sharp_bell,container_version=alpine,stream=stderr container_id="371ee5d3e58726112f499be62cddef800138ca72bbba635ed2015fbf475b1023",message="2019-06-19T03:11:11Z I! Loaded outputs: file" 1560913872000000000 +docker_log,container_image=telegraf,container_name=sharp_bell,container_version=alpine,stream=stderr container_id="371ee5d3e58726112f499be62cddef800138ca72bbba635ed2015fbf475b1023",message="2019-06-19T03:11:11Z I! Loaded processors:" 1560913872000000000 +docker_log,container_image=telegraf,container_name=sharp_bell,container_version=alpine,stream=stderr container_id="371ee5d3e58726112f499be62cddef800138ca72bbba635ed2015fbf475b1023",message="2019-06-19T03:11:11Z I! Loaded aggregators:" 1560913872000000000 +docker_log,container_image=telegraf,container_name=sharp_bell,container_version=alpine,stream=stderr container_id="371ee5d3e58726112f499be62cddef800138ca72bbba635ed2015fbf475b1023",message="2019-06-19T03:11:11Z I! Loaded inputs: net" 1560913872000000000 +docker_log,container_image=telegraf,container_name=sharp_bell,container_version=alpine,stream=stderr container_id="371ee5d3e58726112f499be62cddef800138ca72bbba635ed2015fbf475b1023",message="2019-06-19T03:11:11Z I! Using config file: /etc/telegraf/telegraf.conf" 1560913872000000000 +docker_log,container_image=telegraf,container_name=sharp_bell,container_version=alpine,stream=stderr container_id="371ee5d3e58726112f499be62cddef800138ca72bbba635ed2015fbf475b1023",message="2019-06-19T03:11:11Z I! Starting Telegraf 1.10.4" 1560913872000000000 ``` diff --git a/plugins/inputs/docker_log/docker_log.go b/plugins/inputs/docker_log/docker_log.go new file mode 100644 index 000000000..01a2f83da --- /dev/null +++ b/plugins/inputs/docker_log/docker_log.go @@ -0,0 +1,429 @@ +package docker_log + +import ( + "bufio" + "context" + "crypto/tls" + "io" + "strings" + "sync" + "time" + "unicode" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/pkg/stdcopy" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/filter" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/internal/docker" + tlsint "github.com/influxdata/telegraf/internal/tls" + "github.com/influxdata/telegraf/plugins/inputs" +) + +var sampleConfig = ` + ## Docker Endpoint + ## To use TCP, set endpoint = "tcp://[ip]:[port]" + ## To use environment variables (ie, docker-machine), set endpoint = "ENV" + # endpoint = "unix:///var/run/docker.sock" + + ## When true, container logs are read from the beginning; otherwise + ## reading begins at the end of the log. + # from_beginning = false + + ## Timeout for Docker API calls. + # timeout = "5s" + + ## Containers to include and exclude. Globs accepted. + ## Note that an empty array for both will include all containers + # container_name_include = [] + # container_name_exclude = [] + + ## Container states to include and exclude. Globs accepted. + ## When empty only containers in the "running" state will be captured. + # container_state_include = [] + # container_state_exclude = [] + + ## docker labels to include and exclude as tags. Globs accepted. + ## Note that an empty array for both will include all labels as tags + # docker_label_include = [] + # docker_label_exclude = [] + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false +` + +const ( + defaultEndpoint = "unix:///var/run/docker.sock" + + // Maximum bytes of a log line before it will be split, size is mirroring + // docker code: + // https://github.com/moby/moby/blob/master/daemon/logger/copier.go#L21 + maxLineBytes = 16 * 1024 +) + +var ( + containerStates = []string{"created", "restarting", "running", "removing", "paused", "exited", "dead"} +) + +type DockerLogs struct { + Endpoint string `toml:"endpoint"` + FromBeginning bool `toml:"from_beginning"` + Timeout internal.Duration `toml:"timeout"` + LabelInclude []string `toml:"docker_label_include"` + LabelExclude []string `toml:"docker_label_exclude"` + ContainerInclude []string `toml:"container_name_include"` + ContainerExclude []string `toml:"container_name_exclude"` + ContainerStateInclude []string `toml:"container_state_include"` + ContainerStateExclude []string `toml:"container_state_exclude"` + + tlsint.ClientConfig + + newEnvClient func() (Client, error) + newClient func(string, *tls.Config) (Client, error) + + client Client + labelFilter filter.Filter + containerFilter filter.Filter + stateFilter filter.Filter + opts types.ContainerListOptions + wg sync.WaitGroup + mu sync.Mutex + containerList map[string]context.CancelFunc +} + +func (d *DockerLogs) Description() string { + return "Read logging output from the Docker engine" +} + +func (d *DockerLogs) SampleConfig() string { + return sampleConfig +} + +func (d *DockerLogs) Init() error { + var err error + if d.Endpoint == "ENV" { + d.client, err = d.newEnvClient() + if err != nil { + return err + } + } else { + tlsConfig, err := d.ClientConfig.TLSConfig() + if err != nil { + return err + } + d.client, err = d.newClient(d.Endpoint, tlsConfig) + if err != nil { + return err + } + } + + // Create filters + err = d.createLabelFilters() + if err != nil { + return err + } + err = d.createContainerFilters() + if err != nil { + return err + } + err = d.createContainerStateFilters() + if err != nil { + return err + } + + filterArgs := filters.NewArgs() + for _, state := range containerStates { + if d.stateFilter.Match(state) { + filterArgs.Add("status", state) + } + } + + if filterArgs.Len() != 0 { + d.opts = types.ContainerListOptions{ + Filters: filterArgs, + } + } + + return nil +} + +func (d *DockerLogs) addToContainerList(containerID string, cancel context.CancelFunc) error { + d.mu.Lock() + defer d.mu.Unlock() + d.containerList[containerID] = cancel + return nil +} + +func (d *DockerLogs) removeFromContainerList(containerID string) error { + d.mu.Lock() + defer d.mu.Unlock() + delete(d.containerList, containerID) + return nil +} + +func (d *DockerLogs) containerInContainerList(containerID string) bool { + d.mu.Lock() + defer d.mu.Unlock() + _, ok := d.containerList[containerID] + return ok +} + +func (d *DockerLogs) cancelTails() error { + d.mu.Lock() + defer d.mu.Unlock() + for _, cancel := range d.containerList { + cancel() + } + return nil +} + +func (d *DockerLogs) matchedContainerName(names []string) string { + // Check if all container names are filtered; in practice I believe + // this array is always of length 1. + for _, name := range names { + trimmedName := strings.TrimPrefix(name, "/") + match := d.containerFilter.Match(trimmedName) + if match { + return trimmedName + } + } + return "" +} + +func (d *DockerLogs) Gather(acc telegraf.Accumulator) error { + ctx := context.Background() + + ctx, cancel := context.WithTimeout(ctx, d.Timeout.Duration) + defer cancel() + containers, err := d.client.ContainerList(ctx, d.opts) + if err != nil { + return err + } + + for _, container := range containers { + if d.containerInContainerList(container.ID) { + continue + } + + containerName := d.matchedContainerName(container.Names) + if containerName == "" { + continue + } + + ctx, cancel := context.WithCancel(context.Background()) + d.addToContainerList(container.ID, cancel) + + // Start a new goroutine for every new container that has logs to collect + d.wg.Add(1) + go func(container types.Container) { + defer d.wg.Done() + defer d.removeFromContainerList(container.ID) + + err = d.tailContainerLogs(ctx, acc, container, containerName) + if err != nil { + acc.AddError(err) + } + }(container) + } + return nil +} + +func (d *DockerLogs) hasTTY(ctx context.Context, container types.Container) (bool, error) { + ctx, cancel := context.WithTimeout(ctx, d.Timeout.Duration) + defer cancel() + c, err := d.client.ContainerInspect(ctx, container.ID) + if err != nil { + return false, err + } + return c.Config.Tty, nil +} + +func (d *DockerLogs) tailContainerLogs( + ctx context.Context, + acc telegraf.Accumulator, + container types.Container, + containerName string, +) error { + imageName, imageVersion := docker.ParseImage(container.Image) + tags := map[string]string{ + "container_name": containerName, + "container_image": imageName, + "container_version": imageVersion, + } + + // Add matching container labels as tags + for k, label := range container.Labels { + if d.labelFilter.Match(k) { + tags[k] = label + } + } + + hasTTY, err := d.hasTTY(ctx, container) + if err != nil { + return err + } + + tail := "0" + if d.FromBeginning { + tail = "all" + } + + logOptions := types.ContainerLogsOptions{ + ShowStdout: true, + ShowStderr: true, + Timestamps: false, + Details: false, + Follow: true, + Tail: tail, + } + + logReader, err := d.client.ContainerLogs(ctx, container.ID, logOptions) + if err != nil { + return err + } + + // If the container is using a TTY, there is only a single stream + // (stdout), and data is copied directly from the container output stream, + // no extra multiplexing or headers. + // + // If the container is *not* using a TTY, streams for stdout and stderr are + // multiplexed. + if hasTTY { + return tailStream(acc, tags, container.ID, logReader, "tty") + } else { + return tailMultiplexed(acc, tags, container.ID, logReader) + } +} + +func tailStream( + acc telegraf.Accumulator, + baseTags map[string]string, + containerID string, + reader io.ReadCloser, + stream string, +) error { + defer reader.Close() + + tags := make(map[string]string, len(baseTags)+1) + for k, v := range baseTags { + tags[k] = v + } + tags["stream"] = stream + + r := bufio.NewReaderSize(reader, 64*1024) + + var err error + var message string + for { + message, err = r.ReadString('\n') + + // Keep any leading space, but remove whitespace from end of line. + // This preserves space in, for example, stacktraces, while removing + // annoying end of line characters and is similar to how other logging + // plugins such as syslog behave. + message = strings.TrimRightFunc(message, unicode.IsSpace) + + if len(message) != 0 { + acc.AddFields("docker_log", map[string]interface{}{ + "container_id": containerID, + "message": message, + }, tags) + } + + if err != nil { + if err == io.EOF { + return nil + } + return err + } + } +} + +func tailMultiplexed( + acc telegraf.Accumulator, + tags map[string]string, + containerID string, + src io.ReadCloser, +) error { + outReader, outWriter := io.Pipe() + errReader, errWriter := io.Pipe() + + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + err := tailStream(acc, tags, containerID, outReader, "stdout") + if err != nil { + acc.AddError(err) + } + }() + + wg.Add(1) + go func() { + defer wg.Done() + err := tailStream(acc, tags, containerID, errReader, "stderr") + if err != nil { + acc.AddError(err) + } + }() + + _, err := stdcopy.StdCopy(outWriter, errWriter, src) + outWriter.Close() + errWriter.Close() + src.Close() + wg.Wait() + return err +} + +func (d *DockerLogs) Stop() { + d.cancelTails() + d.wg.Wait() +} + +// Following few functions have been inherited from telegraf docker input plugin +func (d *DockerLogs) createContainerFilters() error { + filter, err := filter.NewIncludeExcludeFilter(d.ContainerInclude, d.ContainerExclude) + if err != nil { + return err + } + d.containerFilter = filter + return nil +} + +func (d *DockerLogs) createLabelFilters() error { + filter, err := filter.NewIncludeExcludeFilter(d.LabelInclude, d.LabelExclude) + if err != nil { + return err + } + d.labelFilter = filter + return nil +} + +func (d *DockerLogs) createContainerStateFilters() error { + if len(d.ContainerStateInclude) == 0 && len(d.ContainerStateExclude) == 0 { + d.ContainerStateInclude = []string{"running"} + } + filter, err := filter.NewIncludeExcludeFilter(d.ContainerStateInclude, d.ContainerStateExclude) + if err != nil { + return err + } + d.stateFilter = filter + return nil +} + +func init() { + inputs.Add("docker_log", func() telegraf.Input { + return &DockerLogs{ + Timeout: internal.Duration{Duration: time.Second * 5}, + Endpoint: defaultEndpoint, + newEnvClient: NewEnvClient, + newClient: NewClient, + containerList: make(map[string]context.CancelFunc), + } + }) +} diff --git a/plugins/inputs/docker_log/docker_log_test.go b/plugins/inputs/docker_log/docker_log_test.go new file mode 100644 index 000000000..ce61f6135 --- /dev/null +++ b/plugins/inputs/docker_log/docker_log_test.go @@ -0,0 +1,175 @@ +package docker_log + +import ( + "bytes" + "context" + "crypto/tls" + "io" + "testing" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/pkg/stdcopy" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +type MockClient struct { + ContainerListF func(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) + ContainerInspectF func(ctx context.Context, containerID string) (types.ContainerJSON, error) + ContainerLogsF func(ctx context.Context, containerID string, options types.ContainerLogsOptions) (io.ReadCloser, error) +} + +func (c *MockClient) ContainerList( + ctx context.Context, + options types.ContainerListOptions, +) ([]types.Container, error) { + return c.ContainerListF(ctx, options) +} + +func (c *MockClient) ContainerInspect( + ctx context.Context, + containerID string, +) (types.ContainerJSON, error) { + return c.ContainerInspectF(ctx, containerID) +} + +func (c *MockClient) ContainerLogs( + ctx context.Context, + containerID string, + options types.ContainerLogsOptions, +) (io.ReadCloser, error) { + return c.ContainerLogsF(ctx, containerID, options) +} + +type Response struct { + io.Reader +} + +func (r *Response) Close() error { + return nil +} + +func Test(t *testing.T) { + tests := []struct { + name string + client *MockClient + expected []telegraf.Metric + }{ + { + name: "no containers", + client: &MockClient{ + ContainerListF: func(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) { + return nil, nil + }, + }, + }, + { + name: "one container tty", + client: &MockClient{ + ContainerListF: func(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) { + return []types.Container{ + { + ID: "deadbeef", + Names: []string{"/telegraf"}, + Image: "influxdata/telegraf:1.11.0", + }, + }, nil + }, + ContainerInspectF: func(ctx context.Context, containerID string) (types.ContainerJSON, error) { + return types.ContainerJSON{ + Config: &container.Config{ + Tty: true, + }, + }, nil + }, + ContainerLogsF: func(ctx context.Context, containerID string, options types.ContainerLogsOptions) (io.ReadCloser, error) { + return &Response{Reader: bytes.NewBuffer([]byte("hello\n"))}, nil + }, + }, + expected: []telegraf.Metric{ + testutil.MustMetric( + "docker_log", + map[string]string{ + "container_name": "telegraf", + "container_image": "influxdata/telegraf", + "container_version": "1.11.0", + "stream": "tty", + }, + map[string]interface{}{ + "container_id": "deadbeef", + "message": "hello", + }, + time.Now(), + ), + }, + }, + { + name: "one container multiplex", + client: &MockClient{ + ContainerListF: func(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) { + return []types.Container{ + { + ID: "deadbeef", + Names: []string{"/telegraf"}, + Image: "influxdata/telegraf:1.11.0", + }, + }, nil + }, + ContainerInspectF: func(ctx context.Context, containerID string) (types.ContainerJSON, error) { + return types.ContainerJSON{ + Config: &container.Config{ + Tty: false, + }, + }, nil + }, + ContainerLogsF: func(ctx context.Context, containerID string, options types.ContainerLogsOptions) (io.ReadCloser, error) { + var buf bytes.Buffer + w := stdcopy.NewStdWriter(&buf, stdcopy.Stdout) + w.Write([]byte("hello from stdout")) + return &Response{Reader: &buf}, nil + }, + }, + expected: []telegraf.Metric{ + testutil.MustMetric( + "docker_log", + map[string]string{ + "container_name": "telegraf", + "container_image": "influxdata/telegraf", + "container_version": "1.11.0", + "stream": "stdout", + }, + map[string]interface{}{ + "container_id": "deadbeef", + "message": "hello from stdout", + }, + time.Now(), + ), + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var acc testutil.Accumulator + plugin := &DockerLogs{ + Timeout: internal.Duration{Duration: time.Second * 5}, + newClient: func(string, *tls.Config) (Client, error) { return tt.client, nil }, + containerList: make(map[string]context.CancelFunc), + } + + err := plugin.Init() + require.NoError(t, err) + + err = plugin.Gather(&acc) + require.NoError(t, err) + + acc.Wait(len(tt.expected)) + plugin.Stop() + + testutil.RequireMetricsEqual(t, tt.expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime()) + }) + } +} diff --git a/plugins/inputs/docker_log/docker_logs.go b/plugins/inputs/docker_log/docker_logs.go deleted file mode 100644 index 813b868ee..000000000 --- a/plugins/inputs/docker_log/docker_logs.go +++ /dev/null @@ -1,472 +0,0 @@ -package docker_log - -import ( - "context" - "crypto/tls" - "encoding/binary" - "errors" - "fmt" - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/filter" - "github.com/influxdata/telegraf/internal" - tlsint "github.com/influxdata/telegraf/internal/tls" - "github.com/influxdata/telegraf/plugins/inputs" - "io" - "strings" - "sync" - "time" -) - -type StdType byte - -const ( - Stdin StdType = iota - Stdout - Stderr - Systemerr - - stdWriterPrefixLen = 8 - stdWriterFdIndex = 0 - stdWriterSizeIndex = 4 - - startingBufLen = 32*1024 + stdWriterPrefixLen + 1 - - ERR_PREFIX = "E! [inputs.docker_log]" - defaultEndpoint = "unix:///var/run/docker.sock" - logBytesMax = 1000 -) - -type DockerLogs struct { - Endpoint string - - Timeout internal.Duration - - LabelInclude []string `toml:"docker_label_include"` - LabelExclude []string `toml:"docker_label_exclude"` - - ContainerInclude []string `toml:"container_name_include"` - ContainerExclude []string `toml:"container_name_exclude"` - - ContainerStateInclude []string `toml:"container_state_include"` - ContainerStateExclude []string `toml:"container_state_exclude"` - - tlsint.ClientConfig - - newEnvClient func() (Client, error) - newClient func(string, *tls.Config) (Client, error) - - client Client - filtersCreated bool - labelFilter filter.Filter - containerFilter filter.Filter - stateFilter filter.Filter - opts types.ContainerListOptions - wg sync.WaitGroup - mu sync.Mutex - containerList map[string]io.ReadCloser -} - -var ( - containerStates = []string{"created", "restarting", "running", "removing", "paused", "exited", "dead"} -) - -var sampleConfig = ` - ## Docker Endpoint - ## To use TCP, set endpoint = "tcp://[ip]:[port]" - ## To use environment variables (ie, docker-machine), set endpoint = "ENV" - endpoint = "unix:///var/run/docker.sock" - ## Containers to include and exclude. Globs accepted. - ## Note that an empty array for both will include all containers - container_name_include = [] - container_name_exclude = [] - ## Container states to include and exclude. Globs accepted. - ## When empty only containers in the "running" state will be captured. - # container_state_include = [] - # container_state_exclude = [] - - ## docker labels to include and exclude as tags. Globs accepted. - ## Note that an empty array for both will include all labels as tags - docker_label_include = [] - docker_label_exclude = [] - - ## Optional TLS Config - # tls_ca = "/etc/telegraf/ca.pem" - # tls_cert = "/etc/telegraf/cert.pem" - # tls_key = "/etc/telegraf/key.pem" - ## Use TLS but skip chain & host verification - # insecure_skip_verify = false -` - -func (d *DockerLogs) Description() string { - return "Plugin to get docker logs" -} - -func (d *DockerLogs) SampleConfig() string { - return sampleConfig -} - -func (d *DockerLogs) Gather(acc telegraf.Accumulator) error { - /*Check to see if any new containers have been created since last time*/ - return d.containerListUpdate(acc) -} - -/*Following few functions have been inherited from telegraf docker input plugin*/ -func (d *DockerLogs) createContainerFilters() error { - filter, err := filter.NewIncludeExcludeFilter(d.ContainerInclude, d.ContainerExclude) - if err != nil { - return err - } - d.containerFilter = filter - return nil -} - -func (d *DockerLogs) createLabelFilters() error { - filter, err := filter.NewIncludeExcludeFilter(d.LabelInclude, d.LabelExclude) - if err != nil { - return err - } - d.labelFilter = filter - return nil -} - -func (d *DockerLogs) createContainerStateFilters() error { - if len(d.ContainerStateInclude) == 0 && len(d.ContainerStateExclude) == 0 { - d.ContainerStateInclude = []string{"running"} - } - filter, err := filter.NewIncludeExcludeFilter(d.ContainerStateInclude, d.ContainerStateExclude) - if err != nil { - return err - } - d.stateFilter = filter - return nil -} - -func (d *DockerLogs) addToContainerList(containerId string, logReader io.ReadCloser) error { - d.mu.Lock() - defer d.mu.Unlock() - d.containerList[containerId] = logReader - return nil -} - -func (d *DockerLogs) removeFromContainerList(containerId string) error { - d.mu.Lock() - defer d.mu.Unlock() - delete(d.containerList, containerId) - return nil -} - -func (d *DockerLogs) containerInContainerList(containerId string) bool { - if _, ok := d.containerList[containerId]; ok { - return true - } - return false -} - -func (d *DockerLogs) stopAllReaders() error { - for _, container := range d.containerList { - container.Close() - } - return nil -} - -func (d *DockerLogs) containerListUpdate(acc telegraf.Accumulator) error { - ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration) - defer cancel() - if d.client == nil { - return errors.New(fmt.Sprintf("%s : Dock client is null", ERR_PREFIX)) - } - containers, err := d.client.ContainerList(ctx, d.opts) - if err != nil { - return err - } - for _, container := range containers { - if d.containerInContainerList(container.ID) { - continue - } - d.wg.Add(1) - /*Start a new goroutine for every new container that has logs to collect*/ - go func(c types.Container) { - defer d.wg.Done() - logOptions := types.ContainerLogsOptions{ - ShowStdout: true, - ShowStderr: true, - Timestamps: false, - Details: true, - Follow: true, - Tail: "0", - } - logReader, err := d.client.ContainerLogs(context.Background(), c.ID, logOptions) - if err != nil { - acc.AddError(err) - return - } - d.addToContainerList(c.ID, logReader) - err = d.tailContainerLogs(c, logReader, acc) - if err != nil { - acc.AddError(err) - } - d.removeFromContainerList(c.ID) - return - }(container) - } - return nil -} - -func (d *DockerLogs) tailContainerLogs( - container types.Container, logReader io.ReadCloser, - acc telegraf.Accumulator, -) error { - c, err := d.client.ContainerInspect(context.Background(), container.ID) - if err != nil { - return err - } - /* Parse container name */ - var cname string - for _, name := range container.Names { - trimmedName := strings.TrimPrefix(name, "/") - match := d.containerFilter.Match(trimmedName) - if match { - cname = trimmedName - break - } - } - - if cname == "" { - return errors.New(fmt.Sprintf("%s : container name is null", ERR_PREFIX)) - } - imageName, imageVersion := parseImage(container.Image) - tags := map[string]string{ - "container_name": cname, - "container_image": imageName, - "container_version": imageVersion, - } - fields := map[string]interface{}{} - fields["container_id"] = container.ID - // Add labels to tags - for k, label := range container.Labels { - if d.labelFilter.Match(k) { - tags[k] = label - } - } - if c.Config.Tty { - err = pushTtyLogs(acc, tags, fields, logReader) - } else { - _, err = pushLogs(acc, tags, fields, logReader) - } - if err != nil { - return err - } - return nil -} -func pushTtyLogs(acc telegraf.Accumulator, tags map[string]string, fields map[string]interface{}, src io.Reader) (err error) { - tags["logType"] = "unknown" //in tty mode we wont be able to differentiate b/w stdout and stderr hence unknown - data := make([]byte, logBytesMax) - for { - num, err := src.Read(data) - if num > 0 { - fields["message"] = data[1:num] - acc.AddFields("docker_log", fields, tags) - } - if err == io.EOF { - fields["message"] = data[1:num] - acc.AddFields("docker_log", fields, tags) - return nil - } - if err != nil { - return err - } - } -} - -/* Inspired from https://github.com/moby/moby/blob/master/pkg/stdcopy/stdcopy.go */ -func pushLogs(acc telegraf.Accumulator, tags map[string]string, fields map[string]interface{}, src io.Reader) (written int64, err error) { - var ( - buf = make([]byte, startingBufLen) - bufLen = len(buf) - nr int - er error - frameSize int - ) - for { - // Make sure we have at least a full header - for nr < stdWriterPrefixLen { - var nr2 int - nr2, er = src.Read(buf[nr:]) - nr += nr2 - if er == io.EOF { - if nr < stdWriterPrefixLen { - return written, nil - } - break - } - if er != nil { - return 0, er - } - } - stream := StdType(buf[stdWriterFdIndex]) - // Check the first byte to know where to write - var logType string - switch stream { - case Stdin: - logType = "stdin" - break - case Stdout: - logType = "stdout" - break - case Stderr: - logType = "stderr" - break - case Systemerr: - fallthrough - default: - return 0, fmt.Errorf("Unrecognized input header: %d", buf[stdWriterFdIndex]) - } - // Retrieve the size of the frame - frameSize = int(binary.BigEndian.Uint32(buf[stdWriterSizeIndex : stdWriterSizeIndex+4])) - - // Check if the buffer is big enough to read the frame. - // Extend it if necessary. - if frameSize+stdWriterPrefixLen > bufLen { - buf = append(buf, make([]byte, frameSize+stdWriterPrefixLen-bufLen+1)...) - bufLen = len(buf) - } - - // While the amount of bytes read is less than the size of the frame + header, we keep reading - for nr < frameSize+stdWriterPrefixLen { - var nr2 int - nr2, er = src.Read(buf[nr:]) - nr += nr2 - if er == io.EOF { - if nr < frameSize+stdWriterPrefixLen { - return written, nil - } - break - } - if er != nil { - return 0, er - } - } - - // we might have an error from the source mixed up in our multiplexed - // stream. if we do, return it. - if stream == Systemerr { - return written, fmt.Errorf("error from daemon in stream: %s", string(buf[stdWriterPrefixLen:frameSize+stdWriterPrefixLen])) - } - - tags["stream"] = logType - fields["message"] = buf[stdWriterPrefixLen+1 : frameSize+stdWriterPrefixLen] - acc.AddFields("docker_log", fields, tags) - written += int64(frameSize) - - // Move the rest of the buffer to the beginning - copy(buf, buf[frameSize+stdWriterPrefixLen:]) - // Move the index - nr -= frameSize + stdWriterPrefixLen - } -} - -func (d *DockerLogs) Start(acc telegraf.Accumulator) error { - var c Client - var err error - if d.Endpoint == "ENV" { - c, err = d.newEnvClient() - } else { - tlsConfig, err := d.ClientConfig.TLSConfig() - if err != nil { - return err - } - c, err = d.newClient(d.Endpoint, tlsConfig) - } - if err != nil { - return err - } - d.client = c - // Create label filters if not already created - if !d.filtersCreated { - err := d.createLabelFilters() - if err != nil { - return err - } - err = d.createContainerFilters() - if err != nil { - return err - } - err = d.createContainerStateFilters() - if err != nil { - return err - } - d.filtersCreated = true - } - filterArgs := filters.NewArgs() - for _, state := range containerStates { - if d.stateFilter.Match(state) { - filterArgs.Add("status", state) - } - } - - // All container states were excluded - if filterArgs.Len() == 0 { - return nil - } - - d.opts = types.ContainerListOptions{ - Filters: filterArgs, - } - return nil -} - -/* Inspired from https://github.com/influxdata/telegraf/blob/master/plugins/inputs/docker/docker.go */ -func parseImage(image string) (string, string) { - // Adapts some of the logic from the actual Docker library's image parsing - // routines: - // https://github.com/docker/distribution/blob/release/2.7/reference/normalize.go - domain := "" - remainder := "" - - i := strings.IndexRune(image, '/') - - if i == -1 || (!strings.ContainsAny(image[:i], ".:") && image[:i] != "localhost") { - remainder = image - } else { - domain, remainder = image[:i], image[i+1:] - } - - imageName := "" - imageVersion := "unknown" - - i = strings.LastIndex(remainder, ":") - if i > -1 { - imageVersion = remainder[i+1:] - imageName = remainder[:i] - } else { - imageName = remainder - } - - if domain != "" { - imageName = domain + "/" + imageName - } - - return imageName, imageVersion -} - -func (d *DockerLogs) Stop() { - d.mu.Lock() - d.stopAllReaders() - d.mu.Unlock() - d.wg.Wait() -} - -func init() { - inputs.Add("docker_log", func() telegraf.Input { - return &DockerLogs{ - Timeout: internal.Duration{Duration: time.Second * 5}, - Endpoint: defaultEndpoint, - newEnvClient: NewEnvClient, - newClient: NewClient, - filtersCreated: false, - containerList: make(map[string]io.ReadCloser), - } - }) -} diff --git a/testutil/metric.go b/testutil/metric.go index b92c724f1..0dca9c641 100644 --- a/testutil/metric.go +++ b/testutil/metric.go @@ -123,6 +123,11 @@ func SortMetrics() cmp.Option { return cmpopts.SortSlices(lessFunc) } +// IgnoreTime disables comparison of timestamp. +func IgnoreTime() cmp.Option { + return cmpopts.IgnoreFields(metricDiff{}, "Time") +} + // MetricEqual returns true if the metrics are equal. func MetricEqual(expected, actual telegraf.Metric) bool { var lhs, rhs *metricDiff From 0a40f5d55ba4c0ac1cc508fdc5458d186a662038 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 20 Jun 2019 11:56:30 -0700 Subject: [PATCH 0951/1815] Update changelog --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 13c65423c..deb45ec3d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -27,6 +27,7 @@ #### Bugfixes - [#5692](https://github.com/influxdata/telegraf/issues/5692): Fix sensor read error stops reporting of all sensors in temp input. +- [#4356](https://github.com/influxdata/telegraf/issues/4356): Fix double pct replacement in sysstat input. ## v1.11.1 [unreleased] @@ -36,6 +37,7 @@ - [#5983](https://github.com/influxdata/telegraf/issues/5983): Omit keys when creating measurement names for GNMI telemetry. - [#5972](https://github.com/influxdata/telegraf/issues/5972): Don't consider pid of 0 when using systemd lookup in procstat. - [#5807](https://github.com/influxdata/telegraf/issues/5807): Skip 404 error reporting in nginx_plus_api input. +- [#5999](https://github.com/influxdata/telegraf/issues/5999): Fix panic if pool_mode column does not exist. ## v1.11 [2019-06-11] From beb64770803196a54f606fae7152528776c8da91 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 20 Jun 2019 13:29:51 -0700 Subject: [PATCH 0952/1815] Drop support for Go 1.9 (#6026) --- .circleci/config.yml | 23 ----------------------- README.md | 4 ++-- 2 files changed, 2 insertions(+), 25 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index f068ae108..df25a3749 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -2,9 +2,6 @@ defaults: defaults: &defaults working_directory: '/go/src/github.com/influxdata/telegraf' - go-1_9: &go-1_9 - docker: - - image: 'quay.io/influxdb/telegraf-ci:1.9.7' go-1_10: &go-1_10 docker: - image: 'quay.io/influxdb/telegraf-ci:1.10.8' @@ -35,14 +32,6 @@ jobs: paths: - '*' - test-go-1.9: - <<: [ *defaults, *go-1_9 ] - steps: - - attach_workspace: - at: '/go/src' - # disabled due to gofmt differences (1.10 vs 1.11). - #- run: 'make check' - - run: 'make test' test-go-1.10: <<: [ *defaults, *go-1_10 ] steps: @@ -109,12 +98,6 @@ workflows: filters: tags: only: /.*/ - - 'test-go-1.9': - requires: - - 'deps' - filters: - tags: - only: /.*/ - 'test-go-1.10': requires: - 'deps' @@ -141,14 +124,12 @@ workflows: only: /.*/ - 'package': requires: - - 'test-go-1.9' - 'test-go-1.10' - 'test-go-1.11' - 'test-go-1.12' - 'test-go-1.12-386' - 'release': requires: - - 'test-go-1.9' - 'test-go-1.10' - 'test-go-1.11' - 'test-go-1.12' @@ -161,9 +142,6 @@ workflows: nightly: jobs: - 'deps' - - 'test-go-1.9': - requires: - - 'deps' - 'test-go-1.10': requires: - 'deps' @@ -178,7 +156,6 @@ workflows: - 'deps' - 'nightly': requires: - - 'test-go-1.9' - 'test-go-1.10' - 'test-go-1.11' - 'test-go-1.12' diff --git a/README.md b/README.md index 3fed264ae..86f34738a 100644 --- a/README.md +++ b/README.md @@ -40,9 +40,9 @@ Ansible role: https://github.com/rossmcdonald/telegraf ### From Source: -Telegraf requires golang version 1.9 or newer, the Makefile requires GNU make. +Telegraf requires golang version 1.10 or newer, the Makefile requires GNU make. -1. [Install Go](https://golang.org/doc/install) >=1.9 (1.11 recommended) +1. [Install Go](https://golang.org/doc/install) >=1.10 (1.12 recommended) 2. [Install dep](https://golang.github.io/dep/docs/installation.html) ==v0.5.0 3. Download Telegraf source: ``` From 6738d566dd26e9224d853d0539676811445337b8 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 21 Jun 2019 11:57:08 -0700 Subject: [PATCH 0953/1815] Update permission configuration example in postfix input --- plugins/inputs/postfix/README.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/plugins/inputs/postfix/README.md b/plugins/inputs/postfix/README.md index 57abd0f5b..a8d4a7537 100644 --- a/plugins/inputs/postfix/README.md +++ b/plugins/inputs/postfix/README.md @@ -29,7 +29,8 @@ $ sudo chmod g+r /var/spool/postfix/maildrop Posix ACL: ```sh -$ sudo setfacl -Rdm g:telegraf:rX /var/spool/postfix/{active,hold,incoming,deferred,maildrop} +$ sudo setfacl -m g:telegraf:rX /var/spool/postfix/{,active,hold,incoming,deferred,maildrop} +$ sudo setfacl -Rdm g:telegraf:rX /var/spool/postfix/{,active,hold,incoming,deferred,maildrop} ``` ### Measurements & Fields: From 587dd149f3f1e0a6ec6924f9ca014a6615f36a14 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 21 Jun 2019 12:14:14 -0700 Subject: [PATCH 0954/1815] Add troubleshooting section to smart input docs --- plugins/inputs/smart/README.md | 22 ++++++++++++++++++---- 1 file changed, 18 insertions(+), 4 deletions(-) diff --git a/plugins/inputs/smart/README.md b/plugins/inputs/smart/README.md index 127397f1e..f520308b0 100644 --- a/plugins/inputs/smart/README.md +++ b/plugins/inputs/smart/README.md @@ -24,7 +24,7 @@ To enable SMART on a storage device run: smartctl -s on ``` -### Configuration: +### Configuration ```toml # Read metrics from storage devices supporting S.M.A.R.T. @@ -59,7 +59,7 @@ smartctl -s on # devices = [ "/dev/ada0 -d atacam" ] ``` -### Permissions: +### Permissions It's important to note that this plugin references smartctl, which may require additional permissions to execute successfully. Depending on the user/group permissions of the telegraf user executing this plugin, you may need to use sudo. @@ -80,7 +80,7 @@ telegraf ALL=(ALL) NOPASSWD: SMARTCTL Defaults!SMARTCTL !logfile, !syslog, !pam_session ``` -### Metrics: +### Metrics - smart_device: - tags: @@ -142,7 +142,21 @@ devices can be referenced by the WWN in the following location: To run `smartctl` with `sudo` create a wrapper script and use `path` in the configuration to execute that. -### Output +### Troubleshooting + +If this plugin is not working as expected for your SMART enabled device, +please run these commands and include the output in a bug report: +``` +smartctl --scan +``` + +Run the following command replacing your configuration setting for NOCHECK and +the DEVICE from the previous command: +``` +smartctl --info --health --attributes --tolerance=verypermissive --nocheck NOCHECK --format=brief -d DEVICE +``` + +### Example Output ``` smart_device,enabled=Enabled,host=mbpro.local,device=rdisk0,model=APPLE\ SSD\ SM0512F,serial_no=S1K5NYCD964433,wwn=5002538655584d30,capacity=500277790720 udma_crc_errors=0i,exit_status=0i,health_ok=true,read_error_rate=0i,temp_c=40i 1502536854000000000 From e6d71bdb659c22249a75499e10557eae0379598c Mon Sep 17 00:00:00 2001 From: masuyama-ascade Date: Sat, 22 Jun 2019 04:20:35 +0900 Subject: [PATCH 0955/1815] Add container_id field to docker_container_status metrics (#6019) --- plugins/inputs/docker/README.md | 1 + plugins/inputs/docker/docker.go | 7 ++++--- plugins/inputs/docker/docker_test.go | 26 ++++++++++++++++++++++++++ 3 files changed, 31 insertions(+), 3 deletions(-) diff --git a/plugins/inputs/docker/README.md b/plugins/inputs/docker/README.md index a26b5763e..e8c8d6366 100644 --- a/plugins/inputs/docker/README.md +++ b/plugins/inputs/docker/README.md @@ -273,6 +273,7 @@ status if configured. - container_status - container_version - fields: + - container_id - oomkilled (boolean) - pid (integer) - exitcode (integer) diff --git a/plugins/inputs/docker/docker.go b/plugins/inputs/docker/docker.go index c57ed5c48..355b8cd8a 100644 --- a/plugins/inputs/docker/docker.go +++ b/plugins/inputs/docker/docker.go @@ -463,9 +463,10 @@ func (d *Docker) gatherContainerInspect( if info.State != nil { tags["container_status"] = info.State.Status statefields := map[string]interface{}{ - "oomkilled": info.State.OOMKilled, - "pid": info.State.Pid, - "exitcode": info.State.ExitCode, + "oomkilled": info.State.OOMKilled, + "pid": info.State.Pid, + "exitcode": info.State.ExitCode, + "container_id": container.ID, } finished, err := time.Parse(time.RFC3339, info.State.FinishedAt) diff --git a/plugins/inputs/docker/docker_test.go b/plugins/inputs/docker/docker_test.go index 376d3ed0c..bf2c26a30 100644 --- a/plugins/inputs/docker/docker_test.go +++ b/plugins/inputs/docker/docker_test.go @@ -756,6 +756,32 @@ func TestDockerGatherInfo(t *testing.T) { "container_status": "running", }, ) + + acc.AssertContainsTaggedFields(t, + "docker_container_status", + map[string]interface{}{ + "container_id": "b7dfbb9478a6ae55e237d4d74f8bbb753f0817192b5081334dc78476296e2173", + "exitcode": int(0), + "oomkilled": false, + "pid": int(1234), + "started_at": int64(1528955333266176036), + //"finished_at": float64(0), + }, + map[string]string{ + "engine_host": "absol", + "container_name": "etcd2", + "container_image": "quay.io:4443/coreos/etcd", + "container_version": "v2.2.2", + "ENVVAR1": "loremipsum", + "ENVVAR2": "dolorsitamet", + "ENVVAR3": "=ubuntu:10.04", + "ENVVAR7": "ENVVAR8=ENVVAR9", + "label1": "test_value_1", + "label2": "test_value_2", + "server_version": "17.09.0-ce", + "container_status": "running", + }, + ) } func TestDockerGatherSwarmInfo(t *testing.T) { From 773ed5e62269ad9e1db2fef9f43f7da0037fd404 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 21 Jun 2019 12:21:49 -0700 Subject: [PATCH 0956/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index deb45ec3d..497ecea49 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -38,6 +38,7 @@ - [#5972](https://github.com/influxdata/telegraf/issues/5972): Don't consider pid of 0 when using systemd lookup in procstat. - [#5807](https://github.com/influxdata/telegraf/issues/5807): Skip 404 error reporting in nginx_plus_api input. - [#5999](https://github.com/influxdata/telegraf/issues/5999): Fix panic if pool_mode column does not exist. +- [#6019](https://github.com/influxdata/telegraf/issues/6019): Add missing container_id field to docker_container_status metrics. ## v1.11 [2019-06-11] From 131f85db7348710a8b1eba4c746de56afe3e697b Mon Sep 17 00:00:00 2001 From: Mike Moein Date: Fri, 21 Jun 2019 15:25:45 -0400 Subject: [PATCH 0957/1815] Add TLS mutual auth supoort to jti_openconfig_telemetry plugin (#6027) --- etc/telegraf.conf | 9 ++-- .../inputs/jti_openconfig_telemetry/README.md | 9 ++-- .../openconfig_telemetry.go | 52 +++++++++++-------- 3 files changed, 43 insertions(+), 27 deletions(-) diff --git a/etc/telegraf.conf b/etc/telegraf.conf index cf2a0d933..03427e913 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -4772,9 +4772,12 @@ # "/interfaces", # ] # -# ## x509 Certificate to use with TLS connection. If it is not provided, an insecure -# ## channel will be opened with server -# ssl_cert = "/etc/telegraf/cert.pem" +# ## Optional TLS Config +# tls_ca = "/etc/telegraf/ca.pem" +# tls_cert = "/etc/telegraf/cert.pem" +# tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# insecure_skip_verify = false # # ## Delay between retry attempts of failed RPC calls or streams. Defaults to 1000ms. # ## Failed streams/calls will not be retried if 0 is provided diff --git a/plugins/inputs/jti_openconfig_telemetry/README.md b/plugins/inputs/jti_openconfig_telemetry/README.md index 7c30aaa8d..c0cdf0168 100644 --- a/plugins/inputs/jti_openconfig_telemetry/README.md +++ b/plugins/inputs/jti_openconfig_telemetry/README.md @@ -41,9 +41,12 @@ This plugin reads Juniper Networks implementation of OpenConfig telemetry data f "/interfaces", ] - ## x509 Certificate to use with TLS connection. If it is not provided, an insecure - ## channel will be opened with server - ssl_cert = "/etc/telegraf/cert.pem" + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false ## Delay between retry attempts of failed RPC calls or streams. Defaults to 1000ms. ## Failed streams/calls will not be retried if 0 is provided diff --git a/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry.go b/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry.go index b721c4943..536383732 100644 --- a/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry.go +++ b/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry.go @@ -1,6 +1,7 @@ package jti_openconfig_telemetry import ( + "crypto/tls" "fmt" "log" "net" @@ -11,6 +12,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" + internaltls "github.com/influxdata/telegraf/internal/tls" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/inputs/jti_openconfig_telemetry/auth" "github.com/influxdata/telegraf/plugins/inputs/jti_openconfig_telemetry/oc" @@ -28,13 +30,17 @@ type OpenConfigTelemetry struct { Password string ClientID string `toml:"client_id"` SampleFrequency internal.Duration `toml:"sample_frequency"` - SSLCert string `toml:"ssl_cert"` StrAsTags bool `toml:"str_as_tags"` RetryDelay internal.Duration `toml:"retry_delay"` - sensorsConfig []sensorConfig + sensorsConfig []sensorConfig + + // GRPC settings grpcClientConns []*grpc.ClientConn - wg *sync.WaitGroup + EnableTLS bool `toml:"enable_tls"` + internaltls.ClientConfig + + wg *sync.WaitGroup } var ( @@ -74,10 +80,13 @@ var ( "/interfaces", ] - ## x509 Certificate to use with TLS connection. If it is not provided, an insecure - ## channel will be opened with server - ssl_cert = "/etc/telegraf/cert.pem" - + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false + ## Delay between retry attempts of failed RPC calls or streams. Defaults to 1000ms. ## Failed streams/calls will not be retried if 0 is provided retry_delay = "1000ms" @@ -343,21 +352,27 @@ func (m *OpenConfigTelemetry) collectData(ctx context.Context, } func (m *OpenConfigTelemetry) Start(acc telegraf.Accumulator) error { + + var tlscfg *tls.Config + var opts []grpc.DialOption + var err error + // Build sensors config if m.splitSensorConfig() == 0 { return fmt.Errorf("E! No valid sensor configuration available") } - // If SSL certificate is provided, use transport credentials - var err error - var transportCredentials credentials.TransportCredentials - if m.SSLCert != "" { - transportCredentials, err = credentials.NewClientTLSFromFile(m.SSLCert, "") - if err != nil { - return fmt.Errorf("E! Failed to read certificate: %v", err) + // Parse TLS config + if m.EnableTLS { + if tlscfg, err = m.ClientConfig.TLSConfig(); err != nil { + return err } + } + + if tlscfg != nil { + opts = append(opts, grpc.WithTransportCredentials(credentials.NewTLS(tlscfg))) } else { - transportCredentials = nil + opts = append(opts, grpc.WithInsecure()) } // Connect to given list of servers and start collecting data @@ -373,12 +388,7 @@ func (m *OpenConfigTelemetry) Start(acc telegraf.Accumulator) error { continue } - // If a certificate is provided, open a secure channel. Else open insecure one - if transportCredentials != nil { - grpcClientConn, err = grpc.Dial(server, grpc.WithTransportCredentials(transportCredentials)) - } else { - grpcClientConn, err = grpc.Dial(server, grpc.WithInsecure()) - } + grpcClientConn, err = grpc.Dial(server, opts...) if err != nil { log.Printf("E! Failed to connect to %s: %v", server, err) } else { From 1dc30282373862e0d4745fd3666dcbb22df2b777 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 21 Jun 2019 12:29:34 -0700 Subject: [PATCH 0958/1815] Add struct tags for jti_openconfig_telemetry plugin --- .../inputs/jti_openconfig_telemetry/README.md | 1 + .../openconfig_telemetry.go | 46 ++++++++----------- 2 files changed, 20 insertions(+), 27 deletions(-) diff --git a/plugins/inputs/jti_openconfig_telemetry/README.md b/plugins/inputs/jti_openconfig_telemetry/README.md index c0cdf0168..1a28b55ae 100644 --- a/plugins/inputs/jti_openconfig_telemetry/README.md +++ b/plugins/inputs/jti_openconfig_telemetry/README.md @@ -42,6 +42,7 @@ This plugin reads Juniper Networks implementation of OpenConfig telemetry data f ] ## Optional TLS Config + # enable_tls = true # tls_ca = "/etc/telegraf/ca.pem" # tls_cert = "/etc/telegraf/cert.pem" # tls_key = "/etc/telegraf/key.pem" diff --git a/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry.go b/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry.go index 536383732..c30ef9bf4 100644 --- a/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry.go +++ b/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry.go @@ -1,7 +1,6 @@ package jti_openconfig_telemetry import ( - "crypto/tls" "fmt" "log" "net" @@ -24,23 +23,20 @@ import ( ) type OpenConfigTelemetry struct { - Servers []string - Sensors []string - Username string - Password string + Servers []string `toml:"servers"` + Sensors []string `toml:"sensors"` + Username string `toml:"username"` + Password string `toml:"password"` ClientID string `toml:"client_id"` SampleFrequency internal.Duration `toml:"sample_frequency"` StrAsTags bool `toml:"str_as_tags"` RetryDelay internal.Duration `toml:"retry_delay"` - - sensorsConfig []sensorConfig - - // GRPC settings - grpcClientConns []*grpc.ClientConn - EnableTLS bool `toml:"enable_tls"` + EnableTLS bool `toml:"enable_tls"` internaltls.ClientConfig - wg *sync.WaitGroup + sensorsConfig []sensorConfig + grpcClientConns []*grpc.ClientConn + wg *sync.WaitGroup } var ( @@ -50,8 +46,8 @@ var ( ## List of device addresses to collect telemetry from servers = ["localhost:1883"] - ## Authentication details. Username and password are must if device expects - ## authentication. Client ID must be unique when connecting from multiple instances + ## Authentication details. Username and password are must if device expects + ## authentication. Client ID must be unique when connecting from multiple instances ## of telegraf to the same device username = "user" password = "pass" @@ -63,16 +59,16 @@ var ( ## Sensors to subscribe for ## A identifier for each sensor can be provided in path by separating with space ## Else sensor path will be used as identifier - ## When identifier is used, we can provide a list of space separated sensors. - ## A single subscription will be created with all these sensors and data will + ## When identifier is used, we can provide a list of space separated sensors. + ## A single subscription will be created with all these sensors and data will ## be saved to measurement with this identifier name sensors = [ "/interfaces/", "collection /components/ /lldp", ] - ## We allow specifying sensor group level reporting rate. To do this, specify the - ## reporting rate in Duration at the beginning of sensor paths / collection + ## We allow specifying sensor group level reporting rate. To do this, specify the + ## reporting rate in Duration at the beginning of sensor paths / collection ## name. For entries without reporting rate, we use configured sample frequency sensors = [ "1000ms customReporting /interfaces /lldp", @@ -81,12 +77,13 @@ var ( ] ## Optional TLS Config + # enable_tls = true # tls_ca = "/etc/telegraf/ca.pem" # tls_cert = "/etc/telegraf/cert.pem" # tls_key = "/etc/telegraf/key.pem" ## Use TLS but skip chain & host verification # insecure_skip_verify = false - + ## Delay between retry attempts of failed RPC calls or streams. Defaults to 1000ms. ## Failed streams/calls will not be retried if 0 is provided retry_delay = "1000ms" @@ -353,23 +350,18 @@ func (m *OpenConfigTelemetry) collectData(ctx context.Context, func (m *OpenConfigTelemetry) Start(acc telegraf.Accumulator) error { - var tlscfg *tls.Config - var opts []grpc.DialOption - var err error - // Build sensors config if m.splitSensorConfig() == 0 { return fmt.Errorf("E! No valid sensor configuration available") } // Parse TLS config + var opts []grpc.DialOption if m.EnableTLS { - if tlscfg, err = m.ClientConfig.TLSConfig(); err != nil { + tlscfg, err := m.ClientConfig.TLSConfig() + if err != nil { return err } - } - - if tlscfg != nil { opts = append(opts, grpc.WithTransportCredentials(credentials.NewTLS(tlscfg))) } else { opts = append(opts, grpc.WithInsecure()) From 791d154ec1ddeb89c17c05903cedcc52b5fd1274 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 21 Jun 2019 12:31:19 -0700 Subject: [PATCH 0959/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 497ecea49..f936a2aaf 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -23,6 +23,7 @@ - [#6006](https://github.com/influxdata/telegraf/pull/6006): Add support for interface field in http_response input plugin. - [#5996](https://github.com/influxdata/telegraf/pull/5996): Add container uptime_ns in docker input plugin. - [#6016](https://github.com/influxdata/telegraf/pull/6016): Add better user-facing errors for API timeouts in docker input. +- [#6027](https://github.com/influxdata/telegraf/pull/6027): Add TLS mutal auth support to jti_openconfig_telemetry input. #### Bugfixes From f405bca034e9348360cc041108756eade5a2f63c Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 21 Jun 2019 14:45:56 -0700 Subject: [PATCH 0960/1815] Fix docker input unit tests --- plugins/inputs/docker/docker_test.go | 85 +++++++++++----------------- 1 file changed, 32 insertions(+), 53 deletions(-) diff --git a/plugins/inputs/docker/docker_test.go b/plugins/inputs/docker/docker_test.go index bf2c26a30..e29c6afe9 100644 --- a/plugins/inputs/docker/docker_test.go +++ b/plugins/inputs/docker/docker_test.go @@ -543,12 +543,13 @@ func TestContainerStatus(t *testing.T) { // tags Status string // fields - OOMKilled bool - Pid int - ExitCode int - StartedAt time.Time - FinishedAt time.Time - UptimeNs int64 + ContainerID string + OOMKilled bool + Pid int + ExitCode int + StartedAt time.Time + FinishedAt time.Time + UptimeNs int64 } var tests = []struct { @@ -564,12 +565,13 @@ func TestContainerStatus(t *testing.T) { }, inspect: containerInspect(), expect: expectation{ - Status: "running", - OOMKilled: false, - Pid: 1234, - ExitCode: 0, - StartedAt: time.Date(2018, 6, 14, 5, 48, 53, 266176036, time.UTC), - UptimeNs: int64(3 * time.Minute), + ContainerID: "e2173b9478a6ae55e237d4d74f8bbb753f0817192b5081334dc78476296b7dfb", + Status: "running", + OOMKilled: false, + Pid: 1234, + ExitCode: 0, + StartedAt: time.Date(2018, 6, 14, 5, 48, 53, 266176036, time.UTC), + UptimeNs: int64(3 * time.Minute), }, }, { @@ -580,13 +582,14 @@ func TestContainerStatus(t *testing.T) { return i }(), expect: expectation{ - Status: "running", - OOMKilled: false, - Pid: 1234, - ExitCode: 0, - StartedAt: time.Date(2018, 6, 14, 5, 48, 53, 266176036, time.UTC), - FinishedAt: time.Date(2018, 6, 14, 5, 53, 53, 266176036, time.UTC), - UptimeNs: int64(5 * time.Minute), + ContainerID: "e2173b9478a6ae55e237d4d74f8bbb753f0817192b5081334dc78476296b7dfb", + Status: "running", + OOMKilled: false, + Pid: 1234, + ExitCode: 0, + StartedAt: time.Date(2018, 6, 14, 5, 48, 53, 266176036, time.UTC), + FinishedAt: time.Date(2018, 6, 14, 5, 53, 53, 266176036, time.UTC), + UptimeNs: int64(5 * time.Minute), }, }, { @@ -598,11 +601,12 @@ func TestContainerStatus(t *testing.T) { return i }(), expect: expectation{ - Status: "running", - OOMKilled: false, - Pid: 1234, - ExitCode: 0, - FinishedAt: time.Date(2018, 6, 14, 5, 53, 53, 266176036, time.UTC), + ContainerID: "e2173b9478a6ae55e237d4d74f8bbb753f0817192b5081334dc78476296b7dfb", + Status: "running", + OOMKilled: false, + Pid: 1234, + ExitCode: 0, + FinishedAt: time.Date(2018, 6, 14, 5, 53, 53, 266176036, time.UTC), }, }, } @@ -636,9 +640,10 @@ func TestContainerStatus(t *testing.T) { require.NoError(t, err) fields := map[string]interface{}{ - "oomkilled": tt.expect.OOMKilled, - "pid": tt.expect.Pid, - "exitcode": tt.expect.ExitCode, + "oomkilled": tt.expect.OOMKilled, + "pid": tt.expect.Pid, + "exitcode": tt.expect.ExitCode, + "container_id": tt.expect.ContainerID, } if started := tt.expect.StartedAt; !started.IsZero() { @@ -756,32 +761,6 @@ func TestDockerGatherInfo(t *testing.T) { "container_status": "running", }, ) - - acc.AssertContainsTaggedFields(t, - "docker_container_status", - map[string]interface{}{ - "container_id": "b7dfbb9478a6ae55e237d4d74f8bbb753f0817192b5081334dc78476296e2173", - "exitcode": int(0), - "oomkilled": false, - "pid": int(1234), - "started_at": int64(1528955333266176036), - //"finished_at": float64(0), - }, - map[string]string{ - "engine_host": "absol", - "container_name": "etcd2", - "container_image": "quay.io:4443/coreos/etcd", - "container_version": "v2.2.2", - "ENVVAR1": "loremipsum", - "ENVVAR2": "dolorsitamet", - "ENVVAR3": "=ubuntu:10.04", - "ENVVAR7": "ENVVAR8=ENVVAR9", - "label1": "test_value_1", - "label2": "test_value_2", - "server_version": "17.09.0-ce", - "container_status": "running", - }, - ) } func TestDockerGatherSwarmInfo(t *testing.T) { From aa84011dc33ce1f5d4383d34df057f3d08178bec Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 21 Jun 2019 18:39:00 -0700 Subject: [PATCH 0961/1815] Remove flaky test for udp_listener --- .../inputs/udp_listener/udp_listener_test.go | 69 +++++++++---------- 1 file changed, 33 insertions(+), 36 deletions(-) diff --git a/plugins/inputs/udp_listener/udp_listener_test.go b/plugins/inputs/udp_listener/udp_listener_test.go index ed206f173..345db62a4 100644 --- a/plugins/inputs/udp_listener/udp_listener_test.go +++ b/plugins/inputs/udp_listener/udp_listener_test.go @@ -8,14 +8,11 @@ import ( "log" "net" "os" - "runtime" "strings" "testing" "github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/testutil" - - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -42,42 +39,42 @@ func newTestUdpListener() (*UdpListener, chan []byte) { return listener, in } -func TestHighTrafficUDP(t *testing.T) { - listener := UdpListener{ - ServiceAddress: ":8126", - AllowedPendingMessages: 100000, - } - var err error - listener.parser, err = parsers.NewInfluxParser() - require.NoError(t, err) - acc := &testutil.Accumulator{} +// func TestHighTrafficUDP(t *testing.T) { +// listener := UdpListener{ +// ServiceAddress: ":8126", +// AllowedPendingMessages: 100000, +// } +// var err error +// listener.parser, err = parsers.NewInfluxParser() +// require.NoError(t, err) +// acc := &testutil.Accumulator{} - // send multiple messages to socket - err = listener.Start(acc) - require.NoError(t, err) +// // send multiple messages to socket +// err = listener.Start(acc) +// require.NoError(t, err) - conn, err := net.Dial("udp", "127.0.0.1:8126") - require.NoError(t, err) - mlen := int64(len(testMsgs)) - var sent int64 - for i := 0; i < 20000; i++ { - for sent > listener.BytesRecv.Get()+32000 { - // more than 32kb sitting in OS buffer, let it drain - runtime.Gosched() - } - conn.Write([]byte(testMsgs)) - sent += mlen - } - for sent > listener.BytesRecv.Get() { - runtime.Gosched() - } - for len(listener.in) > 0 { - runtime.Gosched() - } - listener.Stop() +// conn, err := net.Dial("udp", "127.0.0.1:8126") +// require.NoError(t, err) +// mlen := int64(len(testMsgs)) +// var sent int64 +// for i := 0; i < 20000; i++ { +// for sent > listener.BytesRecv.Get()+32000 { +// // more than 32kb sitting in OS buffer, let it drain +// runtime.Gosched() +// } +// conn.Write([]byte(testMsgs)) +// sent += mlen +// } +// for sent > listener.BytesRecv.Get() { +// runtime.Gosched() +// } +// for len(listener.in) > 0 { +// runtime.Gosched() +// } +// listener.Stop() - assert.Equal(t, uint64(100000), acc.NMetrics()) -} +// assert.Equal(t, uint64(100000), acc.NMetrics()) +// } func TestConnectUDP(t *testing.T) { listener := UdpListener{ From bd9ddd8cb15a122214c9b67f45d270253828ae7b Mon Sep 17 00:00:00 2001 From: Nic Grobler Date: Mon, 24 Jun 2019 20:03:05 +0200 Subject: [PATCH 0962/1815] Fix filecount plugin size tests (#6038) --- plugins/inputs/filecount/filecount.go | 11 ++- plugins/inputs/filecount/filecount_test.go | 84 +++++++++++++---- .../inputs/filecount/filesystem_helpers.go | 73 +++++++++++++++ .../filecount/filesystem_helpers_test.go | 90 +++++++++++++++++++ 4 files changed, 236 insertions(+), 22 deletions(-) create mode 100644 plugins/inputs/filecount/filesystem_helpers.go create mode 100644 plugins/inputs/filecount/filesystem_helpers_test.go diff --git a/plugins/inputs/filecount/filecount.go b/plugins/inputs/filecount/filecount.go index c0072e0d8..929ec66a7 100644 --- a/plugins/inputs/filecount/filecount.go +++ b/plugins/inputs/filecount/filecount.go @@ -58,6 +58,7 @@ type FileCount struct { MTime internal.Duration `toml:"mtime"` fileFilters []fileFilterFunc globPaths []globpath.GlobPath + Fs fileSystem } func (_ *FileCount) Description() string { @@ -159,7 +160,7 @@ func (fc *FileCount) count(acc telegraf.Accumulator, basedir string, glob globpa if err == nil && rel == "." { return nil } - file, err := os.Stat(path) + file, err := fc.Fs.Stat(path) if err != nil { if os.IsNotExist(err) { return nil @@ -244,7 +245,7 @@ func (fc *FileCount) Gather(acc telegraf.Accumulator) error { } for _, glob := range fc.globPaths { - for _, dir := range onlyDirectories(glob.GetRoots()) { + for _, dir := range fc.onlyDirectories(glob.GetRoots()) { fc.count(acc, dir, glob) } } @@ -252,10 +253,10 @@ func (fc *FileCount) Gather(acc telegraf.Accumulator) error { return nil } -func onlyDirectories(directories []string) []string { +func (fc *FileCount) onlyDirectories(directories []string) []string { out := make([]string, 0) for _, path := range directories { - info, err := os.Stat(path) + info, err := fc.Fs.Stat(path) if err == nil && info.IsDir() { out = append(out, path) } @@ -286,6 +287,7 @@ func (fc *FileCount) initGlobPaths(acc telegraf.Accumulator) { fc.globPaths = append(fc.globPaths, *glob) } } + } func NewFileCount() *FileCount { @@ -298,6 +300,7 @@ func NewFileCount() *FileCount { Size: internal.Size{Size: 0}, MTime: internal.Duration{Duration: 0}, fileFilters: nil, + Fs: osFS{}, } } diff --git a/plugins/inputs/filecount/filecount_test.go b/plugins/inputs/filecount/filecount_test.go index 2294e8ce6..99213104b 100644 --- a/plugins/inputs/filecount/filecount_test.go +++ b/plugins/inputs/filecount/filecount_test.go @@ -2,7 +2,6 @@ package filecount import ( "os" - "path/filepath" "runtime" "strings" "testing" @@ -18,7 +17,7 @@ func TestNoFilters(t *testing.T) { matches := []string{"foo", "bar", "baz", "qux", "subdir/", "subdir/quux", "subdir/quuz", "subdir/nested2", "subdir/nested2/qux"} - fileCountEquals(t, fc, len(matches), 9084) + fileCountEquals(t, fc, len(matches), 5096) } func TestNoFiltersOnChildDir(t *testing.T) { @@ -30,9 +29,8 @@ func TestNoFiltersOnChildDir(t *testing.T) { tags := map[string]string{"directory": getTestdataDir() + "/subdir"} acc := testutil.Accumulator{} acc.GatherError(fc.Gather) - require.True(t, acc.HasPoint("filecount", tags, "count", int64(len(matches)))) - require.True(t, acc.HasPoint("filecount", tags, "size_bytes", int64(4542))) + require.True(t, acc.HasPoint("filecount", tags, "size_bytes", int64(600))) } func TestNoRecursiveButSuperMeta(t *testing.T) { @@ -46,7 +44,7 @@ func TestNoRecursiveButSuperMeta(t *testing.T) { acc.GatherError(fc.Gather) require.True(t, acc.HasPoint("filecount", tags, "count", int64(len(matches)))) - require.True(t, acc.HasPoint("filecount", tags, "size_bytes", int64(4096))) + require.True(t, acc.HasPoint("filecount", tags, "size_bytes", int64(200))) } func TestNameFilter(t *testing.T) { @@ -60,20 +58,22 @@ func TestNonRecursive(t *testing.T) { fc := getNoFilterFileCount() fc.Recursive = false matches := []string{"foo", "bar", "baz", "qux", "subdir"} - fileCountEquals(t, fc, len(matches), 4542) + + fileCountEquals(t, fc, len(matches), 4496) } func TestDoubleAndSimpleStar(t *testing.T) { fc := getNoFilterFileCount() fc.Directories = []string{getTestdataDir() + "/**/*"} matches := []string{"qux"} + tags := map[string]string{"directory": getTestdataDir() + "/subdir/nested2"} acc := testutil.Accumulator{} acc.GatherError(fc.Gather) require.True(t, acc.HasPoint("filecount", tags, "count", int64(len(matches)))) - require.True(t, acc.HasPoint("filecount", tags, "size_bytes", int64(446))) + require.True(t, acc.HasPoint("filecount", tags, "size_bytes", int64(400))) } func TestRegularOnlyFilter(t *testing.T) { @@ -82,7 +82,8 @@ func TestRegularOnlyFilter(t *testing.T) { matches := []string{ "foo", "bar", "baz", "qux", "subdir/quux", "subdir/quuz", "subdir/nested2/qux"} - fileCountEquals(t, fc, len(matches), 892) + + fileCountEquals(t, fc, len(matches), 800) } func TestSizeFilter(t *testing.T) { @@ -94,23 +95,22 @@ func TestSizeFilter(t *testing.T) { fc.Size = internal.Size{Size: 100} matches = []string{"qux", "subdir/nested2//qux"} - fileCountEquals(t, fc, len(matches), 892) + + fileCountEquals(t, fc, len(matches), 800) } func TestMTimeFilter(t *testing.T) { - oldFile := filepath.Join(getTestdataDir(), "baz") - mtime := time.Date(1979, time.December, 14, 18, 25, 5, 0, time.UTC) - if err := os.Chtimes(oldFile, mtime, mtime); err != nil { - t.Skip("skipping mtime filter test.") - } + + mtime := time.Date(2011, time.December, 14, 18, 25, 5, 0, time.UTC) fileAge := time.Since(mtime) - (60 * time.Second) fc := getNoFilterFileCount() fc.MTime = internal.Duration{Duration: -fileAge} matches := []string{"foo", "bar", "qux", "subdir/", "subdir/quux", "subdir/quuz", - "sbudir/nested2", "subdir/nested2/qux"} - fileCountEquals(t, fc, len(matches), 9084) + "subdir/nested2", "subdir/nested2/qux"} + + fileCountEquals(t, fc, len(matches), 5096) fc.MTime = internal.Duration{Duration: fileAge} matches = []string{"baz"} @@ -126,12 +126,60 @@ func getNoFilterFileCount() FileCount { Size: internal.Size{Size: 0}, MTime: internal.Duration{Duration: 0}, fileFilters: nil, + Fs: getFakeFileSystem(getTestdataDir()), } } func getTestdataDir() string { - _, filename, _, _ := runtime.Caller(1) - return strings.Replace(filename, "filecount_test.go", "testdata", 1) + dir, err := os.Getwd() + if err != nil { + // if we cannot even establish the test directory, further progress is meaningless + panic(err) + } + + var chunks []string + var testDirectory string + + if runtime.GOOS == "windows" { + chunks = strings.Split(dir, "\\") + testDirectory = strings.Join(chunks[:], "\\") + "\\testdata" + } else { + chunks = strings.Split(dir, "/") + testDirectory = strings.Join(chunks[:], "/") + "/testdata" + } + return testDirectory +} + +func getFakeFileSystem(basePath string) fakeFileSystem { + // create our desired "filesystem" object, complete with an internal map allowing our funcs to return meta data as requested + + mtime := time.Date(2015, time.December, 14, 18, 25, 5, 0, time.UTC) + olderMtime := time.Date(2010, time.December, 14, 18, 25, 5, 0, time.UTC) + + // set file permisions + var fmask uint32 = 0666 + var dmask uint32 = 0666 + + // set directory bit + dmask |= (1 << uint(32-1)) + + // create a lookup map for getting "files" from the "filesystem" + fileList := map[string]fakeFileInfo{ + basePath: {name: "testdata", size: int64(4096), filemode: uint32(dmask), modtime: mtime, isdir: true}, + basePath + "/foo": {name: "foo", filemode: uint32(fmask), modtime: mtime}, + basePath + "/bar": {name: "bar", filemode: uint32(fmask), modtime: mtime}, + basePath + "/baz": {name: "baz", filemode: uint32(fmask), modtime: olderMtime}, + basePath + "/qux": {name: "qux", size: int64(400), filemode: uint32(fmask), modtime: mtime}, + basePath + "/subdir": {name: "subdir", size: int64(4096), filemode: uint32(dmask), modtime: mtime, isdir: true}, + basePath + "/subdir/quux": {name: "quux", filemode: uint32(fmask), modtime: mtime}, + basePath + "/subdir/quuz": {name: "quuz", filemode: uint32(fmask), modtime: mtime}, + basePath + "/subdir/nested2": {name: "nested2", size: int64(200), filemode: uint32(dmask), modtime: mtime, isdir: true}, + basePath + "/subdir/nested2/qux": {name: "qux", filemode: uint32(fmask), modtime: mtime, size: int64(400)}, + } + + fs := fakeFileSystem{files: fileList} + return fs + } func fileCountEquals(t *testing.T, fc FileCount, expectedCount int, expectedSize int) { diff --git a/plugins/inputs/filecount/filesystem_helpers.go b/plugins/inputs/filecount/filesystem_helpers.go new file mode 100644 index 000000000..2bd6c0951 --- /dev/null +++ b/plugins/inputs/filecount/filesystem_helpers.go @@ -0,0 +1,73 @@ +package filecount + +import ( + "errors" + "io" + "os" + "time" +) + +/* + The code below is lifted from numerous articles and originates from Andrew Gerrand's 10 things you (probably) don't know about Go. + it allows for mocking a filesystem; this allows for consistent testing of this code across platforms (directory sizes reported + differently by different platforms, for example), while preserving the rest of the functionality as-is, without modification. +*/ + +type fileSystem interface { + Open(name string) (file, error) + Stat(name string) (os.FileInfo, error) +} + +type file interface { + io.Closer + io.Reader + io.ReaderAt + io.Seeker + Stat() (os.FileInfo, error) +} + +// osFS implements fileSystem using the local disk +type osFS struct{} + +func (osFS) Open(name string) (file, error) { return os.Open(name) } +func (osFS) Stat(name string) (os.FileInfo, error) { return os.Stat(name) } + +/* + The following are for mocking the filesystem - this allows us to mock Stat() files. This means that we can set file attributes, and know that they + will be the same regardless of the platform sitting underneath our tests (directory sizes vary see https://github.com/influxdata/telegraf/issues/6011) + + NOTE: still need the on-disk file structure to mirror this because the 3rd party library ("github.com/karrick/godirwalk") uses its own + walk functions, that we cannot mock from here. +*/ + +type fakeFileSystem struct { + files map[string]fakeFileInfo +} + +type fakeFileInfo struct { + name string + size int64 + filemode uint32 + modtime time.Time + isdir bool + sys interface{} +} + +func (f fakeFileInfo) Name() string { return f.name } +func (f fakeFileInfo) Size() int64 { return f.size } +func (f fakeFileInfo) Mode() os.FileMode { return os.FileMode(f.filemode) } +func (f fakeFileInfo) ModTime() time.Time { return f.modtime } +func (f fakeFileInfo) IsDir() bool { return f.isdir } +func (f fakeFileInfo) Sys() interface{} { return f.sys } + +func (f fakeFileSystem) Open(name string) (file, error) { + return nil, &os.PathError{Op: "Open", Path: name, Err: errors.New("Not implemented by fake filesystem")} +} + +func (f fakeFileSystem) Stat(name string) (os.FileInfo, error) { + if fakeInfo, found := f.files[name]; found { + return fakeInfo, nil + } + return nil, &os.PathError{Op: "Stat", Path: name, Err: errors.New("No such file or directory")} + +} diff --git a/plugins/inputs/filecount/filesystem_helpers_test.go b/plugins/inputs/filecount/filesystem_helpers_test.go new file mode 100644 index 000000000..de028dcab --- /dev/null +++ b/plugins/inputs/filecount/filesystem_helpers_test.go @@ -0,0 +1,90 @@ +package filecount + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func TestMTime(t *testing.T) { + //this is the time our foo file should have + mtime := time.Date(2015, time.December, 14, 18, 25, 5, 0, time.UTC) + + fs := getTestFileSystem() + fileInfo, err := fs.Stat("/testdata/foo") + require.Nil(t, err) + require.Equal(t, mtime, fileInfo.ModTime()) +} + +func TestSize(t *testing.T) { + //this is the time our foo file should have + size := int64(4096) + fs := getTestFileSystem() + fileInfo, err := fs.Stat("/testdata") + require.Nil(t, err) + require.Equal(t, size, fileInfo.Size()) +} + +func TestIsDir(t *testing.T) { + //this is the time our foo file should have + dir := true + fs := getTestFileSystem() + fileInfo, err := fs.Stat("/testdata") + require.Nil(t, err) + require.Equal(t, dir, fileInfo.IsDir()) +} + +func TestRealFS(t *testing.T) { + //test that the default (non-test) empty FS causes expected behaviour + var fs fileSystem = osFS{} + //the following file exists on disk - and not in our fake fs + fileInfo, err := fs.Stat(getTestdataDir() + "/qux") + require.Nil(t, err) + require.Equal(t, false, fileInfo.IsDir()) + require.Equal(t, int64(446), fileInfo.Size()) + + // now swap out real, for fake filesystem + fs = getTestFileSystem() + // now, the same test as above will return an error as the file doesn't exist in our fake fs + expectedError := "Stat " + getTestdataDir() + "/qux: No such file or directory" + fileInfo, err = fs.Stat(getTestdataDir() + "/qux") + require.Equal(t, expectedError, err.Error()) + // and verify that what we DO expect to find, we do + fileInfo, err = fs.Stat("/testdata/foo") + require.Nil(t, err) +} + +func getTestFileSystem() fakeFileSystem { + /* + create our desired "filesystem" object, complete with an internal map allowing our funcs to return meta data as requested + + type FileInfo interface { + Name() string // base name of the file + Size() int64 // length in bytes of file + Mode() FileMode // file mode bits + ModTime() time.Time // modification time + IsDir() bool // returns bool indicating if a Dir or not + Sys() interface{} // underlying data source. always nil (in this case) + } + + */ + + mtime := time.Date(2015, time.December, 14, 18, 25, 5, 0, time.UTC) + + // set file permisions + var fmask uint32 = 0666 + var dmask uint32 = 0666 + + // set directory bit + dmask |= (1 << uint(32-1)) + + fileList := map[string]fakeFileInfo{ + "/testdata": {name: "testdata", size: int64(4096), filemode: uint32(dmask), modtime: mtime, isdir: true}, + "/testdata/foo": {name: "foo", filemode: uint32(fmask), modtime: mtime}, + } + + fs := fakeFileSystem{files: fileList} + return fs + +} From a5c94db6259e30938f37971f78dc0953881e56a5 Mon Sep 17 00:00:00 2001 From: Tim Ehlers Date: Mon, 24 Jun 2019 20:48:07 -0500 Subject: [PATCH 0963/1815] Ignore error when utmp is missing (#5742) --- plugins/inputs/system/README.md | 2 +- plugins/inputs/system/system.go | 7 +++++-- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/plugins/inputs/system/README.md b/plugins/inputs/system/README.md index efaa8a17f..d5bcd7b03 100644 --- a/plugins/inputs/system/README.md +++ b/plugins/inputs/system/README.md @@ -13,7 +13,7 @@ and number of users logged in. It is similar to the unix `uptime` command. #### Permissions: The `n_users` field requires read access to `/var/run/utmp`, and may require -the `telegraf` user to be added to the `utmp` group on some systems. +the `telegraf` user to be added to the `utmp` group on some systems. If this file does not exist `n_users` will be skipped. ### Metrics: diff --git a/plugins/inputs/system/system.go b/plugins/inputs/system/system.go index 5c68870bb..faf44f03e 100644 --- a/plugins/inputs/system/system.go +++ b/plugins/inputs/system/system.go @@ -4,6 +4,7 @@ import ( "bufio" "bytes" "fmt" + "log" "os" "runtime" "strings" @@ -44,8 +45,10 @@ func (_ *SystemStats) Gather(acc telegraf.Accumulator) error { users, err := host.Users() if err == nil { fields["n_users"] = len(users) - } else if !os.IsPermission(err) { - return err + } else if os.IsNotExist(err) { + log.Printf("D! [inputs.system] Error reading users: %v", err) + } else if os.IsPermission(err) { + log.Printf("D! [inputs.system] %v", err) } now := time.Now() From e1cfc9f3a03a044ca16d6ca757537d411cf77797 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 24 Jun 2019 18:55:30 -0700 Subject: [PATCH 0964/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index f936a2aaf..eae5176cd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -40,6 +40,7 @@ - [#5807](https://github.com/influxdata/telegraf/issues/5807): Skip 404 error reporting in nginx_plus_api input. - [#5999](https://github.com/influxdata/telegraf/issues/5999): Fix panic if pool_mode column does not exist. - [#6019](https://github.com/influxdata/telegraf/issues/6019): Add missing container_id field to docker_container_status metrics. +- [#5742](https://github.com/influxdata/telegraf/issues/5742): Ignore error when utmp is missing in system input. ## v1.11 [2019-06-11] From e8a596858c71fb06ae70e3bf918d1b276f268863 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 25 Jun 2019 11:51:51 -0700 Subject: [PATCH 0965/1815] Add device, serial_no, and wwn tags to synthetic attributes (#6040) --- plugins/inputs/smart/smart.go | 21 ++++++----- plugins/inputs/smart/smart_test.go | 59 +++++++++++++++++++++++++++++- 2 files changed, 69 insertions(+), 11 deletions(-) diff --git a/plugins/inputs/smart/smart.go b/plugins/inputs/smart/smart.go index b606b6f38..93d4a0076 100644 --- a/plugins/inputs/smart/smart.go +++ b/plugins/inputs/smart/smart.go @@ -253,18 +253,21 @@ func gatherDisk(acc telegraf.Accumulator, usesudo, collectAttributes bool, smart tags := map[string]string{} fields := make(map[string]interface{}) + if collectAttributes { + deviceNode := strings.Split(device, " ")[0] + tags["device"] = path.Base(deviceNode) + + if serial, ok := deviceTags["serial_no"]; ok { + tags["serial_no"] = serial + } + if wwn, ok := deviceTags["wwn"]; ok { + tags["wwn"] = wwn + } + } + attr := attribute.FindStringSubmatch(line) if len(attr) > 1 { if collectAttributes { - deviceNode := strings.Split(device, " ")[0] - tags["device"] = path.Base(deviceNode) - - if serial, ok := deviceTags["serial_no"]; ok { - tags["serial_no"] = serial - } - if wwn, ok := deviceTags["wwn"]; ok { - tags["wwn"] = wwn - } tags["id"] = attr[1] tags["name"] = attr[2] tags["flags"] = attr[3] diff --git a/plugins/inputs/smart/smart_test.go b/plugins/inputs/smart/smart_test.go index 525d99e3b..c801c7a61 100644 --- a/plugins/inputs/smart/smart_test.go +++ b/plugins/inputs/smart/smart_test.go @@ -4,7 +4,9 @@ import ( "errors" "sync" "testing" + "time" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -443,8 +445,61 @@ func TestGatherNvme(t *testing.T) { wg.Add(1) gatherDisk(acc, true, true, "", "", "", wg) - assert.Equal(t, 6, acc.NFields(), "Wrong number of fields gathered") - assert.Equal(t, uint64(4), acc.NMetrics(), "Wrong number of metrics gathered") + + expected := []telegraf.Metric{ + testutil.MustMetric("smart_device", + map[string]string{ + "device": ".", + "model": "TS128GMTE850", + "serial_no": "D704940282?", + }, + map[string]interface{}{ + "exit_status": 0, + "health_ok": true, + "temp_c": 38, + }, + time.Now(), + ), + testutil.MustMetric("smart_attribute", + map[string]string{ + "device": ".", + "id": "9", + "name": "Power_On_Hours", + "serial_no": "D704940282?", + }, + map[string]interface{}{ + "raw_value": 6038, + }, + time.Now(), + ), + testutil.MustMetric("smart_attribute", + map[string]string{ + "device": ".", + "id": "12", + "name": "Power_Cycle_Count", + "serial_no": "D704940282?", + }, + map[string]interface{}{ + "raw_value": 472, + }, + time.Now(), + ), + testutil.MustMetric("smart_attribute", + map[string]string{ + "device": ".", + "id": "194", + "name": "Temperature_Celsius", + "serial_no": "D704940282?", + }, + map[string]interface{}{ + "raw_value": 38, + }, + time.Now(), + ), + } + + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), + testutil.SortMetrics(), testutil.IgnoreTime()) } // smartctl output From a231b3e79d7014a04879db0bb9538c5985cd00ca Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 25 Jun 2019 12:04:39 -0700 Subject: [PATCH 0966/1815] Fix parsing of remote tcp address in statsd input (#6031) --- plugins/inputs/statsd/datadog.go | 4 +- plugins/inputs/statsd/statsd.go | 15 +++---- plugins/inputs/statsd/statsd_test.go | 60 ++++++++++++++++++++++------ testutil/metric.go | 4 +- 4 files changed, 58 insertions(+), 25 deletions(-) diff --git a/plugins/inputs/statsd/datadog.go b/plugins/inputs/statsd/datadog.go index f2785ff38..6cce2316e 100644 --- a/plugins/inputs/statsd/datadog.go +++ b/plugins/inputs/statsd/datadog.go @@ -76,7 +76,9 @@ func (s *Statsd) parseEventMessage(now time.Time, message string, defaultHostnam fields := make(map[string]interface{}, 9) fields["alert_type"] = eventInfo // default event type fields["text"] = uncommenter.Replace(string(rawText)) - tags["source"] = defaultHostname // Use source tag because host is reserved tag key in Telegraf. + if defaultHostname != "" { + tags["source"] = defaultHostname + } fields["priority"] = priorityNormal ts := now if len(message) < 2 { diff --git a/plugins/inputs/statsd/statsd.go b/plugins/inputs/statsd/statsd.go index 7408482b6..89d67b1ee 100644 --- a/plugins/inputs/statsd/statsd.go +++ b/plugins/inputs/statsd/statsd.go @@ -7,7 +7,6 @@ import ( "fmt" "log" "net" - "net/url" "sort" "strconv" "strings" @@ -817,14 +816,12 @@ func (s *Statsd) handler(conn *net.TCPConn, id string) { s.forget(id) s.CurrentConnections.Incr(-1) }() - addr := conn.RemoteAddr() - parsedURL, err := url.Parse(addr.String()) - if err != nil { - // this should never happen because the conn handler should give us parsable addresses, - // but if it does we will know - log.Printf("E! [inputs.statsd] failed to parse %s\n", addr) - return // close the connetion and return + + var remoteIP string + if addr, ok := conn.RemoteAddr().(*net.TCPAddr); ok { + remoteIP = addr.IP.String() } + var n int scanner := bufio.NewScanner(conn) for { @@ -848,7 +845,7 @@ func (s *Statsd) handler(conn *net.TCPConn, id string) { b.WriteByte('\n') select { - case s.in <- input{Buffer: b, Time: time.Now(), Addr: parsedURL.Host}: + case s.in <- input{Buffer: b, Time: time.Now(), Addr: remoteIP}: default: s.drops++ if s.drops == 1 || s.drops%s.AllowedPendingMessages == 0 { diff --git a/plugins/inputs/statsd/statsd_test.go b/plugins/inputs/statsd/statsd_test.go index 4a856902d..9f760b9f9 100644 --- a/plugins/inputs/statsd/statsd_test.go +++ b/plugins/inputs/statsd/statsd_test.go @@ -6,6 +6,7 @@ import ( "testing" "time" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -15,19 +16,6 @@ const ( testMsg = "test.tcp.msg:100|c" ) -func newTestTCPListener() (*Statsd, chan input) { - in := make(chan input, 1500) - listener := &Statsd{ - Protocol: "tcp", - ServiceAddress: "localhost:8125", - AllowedPendingMessages: 10000, - MaxTCPConnections: 250, - in: in, - done: make(chan struct{}), - } - return listener, in -} - func NewTestStatsd() *Statsd { s := Statsd{} @@ -1596,3 +1584,49 @@ func testValidateGauge( } return nil } + +func TestTCP(t *testing.T) { + statsd := Statsd{ + Protocol: "tcp", + ServiceAddress: "localhost:0", + AllowedPendingMessages: 10000, + MaxTCPConnections: 2, + } + var acc testutil.Accumulator + require.NoError(t, statsd.Start(&acc)) + defer statsd.Stop() + + addr := statsd.TCPlistener.Addr().String() + + conn, err := net.Dial("tcp", addr) + _, err = conn.Write([]byte("cpu.time_idle:42|c\n")) + require.NoError(t, err) + err = conn.Close() + require.NoError(t, err) + + for { + err = statsd.Gather(&acc) + require.NoError(t, err) + + if len(acc.Metrics) > 0 { + break + } + } + + testutil.RequireMetricsEqual(t, + []telegraf.Metric{ + testutil.MustMetric( + "cpu_time_idle", + map[string]string{ + "metric_type": "counter", + }, + map[string]interface{}{ + "value": 42, + }, + time.Now(), + ), + }, + acc.GetTelegrafMetrics(), + testutil.IgnoreTime(), + ) +} diff --git a/testutil/metric.go b/testutil/metric.go index 0dca9c641..25e23fa20 100644 --- a/testutil/metric.go +++ b/testutil/metric.go @@ -143,7 +143,7 @@ func MetricEqual(expected, actual telegraf.Metric) bool { // RequireMetricEqual halts the test with an error if the metrics are not // equal. -func RequireMetricEqual(t *testing.T, expected, actual telegraf.Metric) { +func RequireMetricEqual(t *testing.T, expected, actual telegraf.Metric, opts ...cmp.Option) { t.Helper() var lhs, rhs *metricDiff @@ -154,7 +154,7 @@ func RequireMetricEqual(t *testing.T, expected, actual telegraf.Metric) { rhs = newMetricDiff(actual) } - if diff := cmp.Diff(lhs, rhs); diff != "" { + if diff := cmp.Diff(lhs, rhs, opts...); diff != "" { t.Fatalf("telegraf.Metric\n--- expected\n+++ actual\n%s", diff) } } From cb261be9034929dfe46e6be758cb59ef8b9eaf6f Mon Sep 17 00:00:00 2001 From: John Seekins Date: Tue, 25 Jun 2019 13:06:27 -0600 Subject: [PATCH 0967/1815] Fix typo in cassandra jolokia example config (#6044) --- plugins/inputs/jolokia2/examples/cassandra.conf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/inputs/jolokia2/examples/cassandra.conf b/plugins/inputs/jolokia2/examples/cassandra.conf index b8bb60980..bc9c97ff1 100644 --- a/plugins/inputs/jolokia2/examples/cassandra.conf +++ b/plugins/inputs/jolokia2/examples/cassandra.conf @@ -2,7 +2,7 @@ urls = ["http://localhost:8778/jolokia"] name_prefix = "java_" - [[inputs.jolokia2_agent.metrics]] + [[inputs.jolokia2_agent.metric]] name = "Memory" mbean = "java.lang:type=Memory" From f557af3077bca7d98edaa031477f53c702de563c Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 25 Jun 2019 12:08:24 -0700 Subject: [PATCH 0968/1815] Update changelog --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index eae5176cd..2708cd9d7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -41,6 +41,8 @@ - [#5999](https://github.com/influxdata/telegraf/issues/5999): Fix panic if pool_mode column does not exist. - [#6019](https://github.com/influxdata/telegraf/issues/6019): Add missing container_id field to docker_container_status metrics. - [#5742](https://github.com/influxdata/telegraf/issues/5742): Ignore error when utmp is missing in system input. +- [#6032](https://github.com/influxdata/telegraf/issues/6032): Add device, serial_no, and wwn tags to synthetic attributes. +- [#6012](https://github.com/influxdata/telegraf/issues/6012): Fix parsing of remote tcp address in statsd input. ## v1.11 [2019-06-11] From 26ee42ce8d118cd9c4722f95e5bf6a2bdaa6ce7c Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 25 Jun 2019 13:01:07 -0700 Subject: [PATCH 0969/1815] Set release date for 1.11.1 --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2708cd9d7..c7c8fa59a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -30,7 +30,7 @@ - [#5692](https://github.com/influxdata/telegraf/issues/5692): Fix sensor read error stops reporting of all sensors in temp input. - [#4356](https://github.com/influxdata/telegraf/issues/4356): Fix double pct replacement in sysstat input. -## v1.11.1 [unreleased] +## v1.11.1 [2019-06-25] #### Bugfixes From 83c8d7be2a40bd83f671710f73cf0a6581bcd15d Mon Sep 17 00:00:00 2001 From: dupondje Date: Wed, 26 Jun 2019 01:16:15 +0200 Subject: [PATCH 0970/1815] Fix master check and move cluster health indices to separate measurement (#6004) --- plugins/inputs/elasticsearch/elasticsearch.go | 161 ++++--- .../elasticsearch/elasticsearch_test.go | 64 ++- plugins/inputs/elasticsearch/testdata_test.go | 413 ++++++++++++++++++ 3 files changed, 560 insertions(+), 78 deletions(-) diff --git a/plugins/inputs/elasticsearch/elasticsearch.go b/plugins/inputs/elasticsearch/elasticsearch.go index 13c567b30..59b21f2cd 100644 --- a/plugins/inputs/elasticsearch/elasticsearch.go +++ b/plugins/inputs/elasticsearch/elasticsearch.go @@ -40,31 +40,32 @@ type nodeStat struct { } type clusterHealth struct { - ClusterName string `json:"cluster_name"` - Status string `json:"status"` - TimedOut bool `json:"timed_out"` - NumberOfNodes int `json:"number_of_nodes"` - NumberOfDataNodes int `json:"number_of_data_nodes"` ActivePrimaryShards int `json:"active_primary_shards"` ActiveShards int `json:"active_shards"` - RelocatingShards int `json:"relocating_shards"` - InitializingShards int `json:"initializing_shards"` - UnassignedShards int `json:"unassigned_shards"` - DelayedUnassignedShards int `json:"delayed_unassigned_shards"` - NumberOfPendingTasks int `json:"number_of_pending_tasks"` - TaskMaxWaitingInQueueMillis int `json:"task_max_waiting_in_queue_millis"` ActiveShardsPercentAsNumber float64 `json:"active_shards_percent_as_number"` + ClusterName string `json:"cluster_name"` + DelayedUnassignedShards int `json:"delayed_unassigned_shards"` + InitializingShards int `json:"initializing_shards"` + NumberOfDataNodes int `json:"number_of_data_nodes"` + NumberOfInFlightFetch int `json:"number_of_in_flight_fetch"` + NumberOfNodes int `json:"number_of_nodes"` + NumberOfPendingTasks int `json:"number_of_pending_tasks"` + RelocatingShards int `json:"relocating_shards"` + Status string `json:"status"` + TaskMaxWaitingInQueueMillis int `json:"task_max_waiting_in_queue_millis"` + TimedOut bool `json:"timed_out"` + UnassignedShards int `json:"unassigned_shards"` Indices map[string]indexHealth `json:"indices"` } type indexHealth struct { - Status string `json:"status"` - NumberOfShards int `json:"number_of_shards"` - NumberOfReplicas int `json:"number_of_replicas"` ActivePrimaryShards int `json:"active_primary_shards"` ActiveShards int `json:"active_shards"` - RelocatingShards int `json:"relocating_shards"` InitializingShards int `json:"initializing_shards"` + NumberOfReplicas int `json:"number_of_replicas"` + NumberOfShards int `json:"number_of_shards"` + RelocatingShards int `json:"relocating_shards"` + Status string `json:"status"` UnassignedShards int `json:"unassigned_shards"` } @@ -137,9 +138,17 @@ type Elasticsearch struct { NodeStats []string tls.ClientConfig - client *http.Client - catMasterResponseTokens []string - isMaster bool + client *http.Client + serverInfo map[string]serverInfo + serverInfoMutex sync.Mutex +} +type serverInfo struct { + nodeID string + masterID string +} + +func (i serverInfo) isMaster() bool { + return i.nodeID == i.masterID } // NewElasticsearch return a new instance of Elasticsearch @@ -186,6 +195,40 @@ func (e *Elasticsearch) Gather(acc telegraf.Accumulator) error { e.client = client } + if e.ClusterStats { + var wgC sync.WaitGroup + wgC.Add(len(e.Servers)) + + e.serverInfo = make(map[string]serverInfo) + for _, serv := range e.Servers { + go func(s string, acc telegraf.Accumulator) { + defer wgC.Done() + info := serverInfo{} + + var err error + + // Gather node ID + if info.nodeID, err = e.gatherNodeID(s + "/_nodes/_local/name"); err != nil { + acc.AddError(fmt.Errorf(mask.ReplaceAllString(err.Error(), "http(s)://XXX:XXX@"))) + return + } + + // get cat/master information here so NodeStats can determine + // whether this node is the Master + if info.masterID, err = e.getCatMaster(s + "/_cat/master"); err != nil { + acc.AddError(fmt.Errorf(mask.ReplaceAllString(err.Error(), "http(s)://XXX:XXX@"))) + return + } + + e.serverInfoMutex.Lock() + e.serverInfo[s] = info + e.serverInfoMutex.Unlock() + + }(serv, acc) + } + wgC.Wait() + } + var wg sync.WaitGroup wg.Add(len(e.Servers)) @@ -193,18 +236,8 @@ func (e *Elasticsearch) Gather(acc telegraf.Accumulator) error { go func(s string, acc telegraf.Accumulator) { defer wg.Done() url := e.nodeStatsUrl(s) - e.isMaster = false - if e.ClusterStats { - // get cat/master information here so NodeStats can determine - // whether this node is the Master - if err := e.setCatMaster(s + "/_cat/master"); err != nil { - acc.AddError(fmt.Errorf(mask.ReplaceAllString(err.Error(), "http(s)://XXX:XXX@"))) - return - } - } - - // Always gather node states + // Always gather node stats if err := e.gatherNodeStats(url, acc); err != nil { acc.AddError(fmt.Errorf(mask.ReplaceAllString(err.Error(), "http(s)://XXX:XXX@"))) return @@ -221,7 +254,7 @@ func (e *Elasticsearch) Gather(acc telegraf.Accumulator) error { } } - if e.ClusterStats && (e.isMaster || !e.ClusterStatsOnlyFromMaster || !e.Local) { + if e.ClusterStats && (e.serverInfo[s].isMaster() || !e.ClusterStatsOnlyFromMaster || !e.Local) { if err := e.gatherClusterStats(s+"/_cluster/stats", acc); err != nil { acc.AddError(fmt.Errorf(mask.ReplaceAllString(err.Error(), "http(s)://XXX:XXX@"))) return @@ -267,6 +300,22 @@ func (e *Elasticsearch) nodeStatsUrl(baseUrl string) string { return fmt.Sprintf("%s/%s", url, strings.Join(e.NodeStats, ",")) } +func (e *Elasticsearch) gatherNodeID(url string) (string, error) { + nodeStats := &struct { + ClusterName string `json:"cluster_name"` + Nodes map[string]*nodeStat `json:"nodes"` + }{} + if err := e.gatherJsonData(url, nodeStats); err != nil { + return "", err + } + + // Only 1 should be returned + for id := range nodeStats.Nodes { + return id, nil + } + return "", nil +} + func (e *Elasticsearch) gatherNodeStats(url string, acc telegraf.Accumulator) error { nodeStats := &struct { ClusterName string `json:"cluster_name"` @@ -284,11 +333,6 @@ func (e *Elasticsearch) gatherNodeStats(url string, acc telegraf.Accumulator) er "cluster_name": nodeStats.ClusterName, } - if e.ClusterStats { - // check for master - e.isMaster = (id == e.catMasterResponseTokens[0]) - } - for k, v := range n.Attributes { tags["node_attribute_"+k] = v } @@ -331,20 +375,21 @@ func (e *Elasticsearch) gatherClusterHealth(url string, acc telegraf.Accumulator } measurementTime := time.Now() clusterFields := map[string]interface{}{ - "status": healthStats.Status, - "status_code": mapHealthStatusToCode(healthStats.Status), - "timed_out": healthStats.TimedOut, - "number_of_nodes": healthStats.NumberOfNodes, - "number_of_data_nodes": healthStats.NumberOfDataNodes, "active_primary_shards": healthStats.ActivePrimaryShards, "active_shards": healthStats.ActiveShards, - "relocating_shards": healthStats.RelocatingShards, - "initializing_shards": healthStats.InitializingShards, - "unassigned_shards": healthStats.UnassignedShards, - "delayed_unassigned_shards": healthStats.DelayedUnassignedShards, - "number_of_pending_tasks": healthStats.NumberOfPendingTasks, - "task_max_waiting_in_queue_millis": healthStats.TaskMaxWaitingInQueueMillis, "active_shards_percent_as_number": healthStats.ActiveShardsPercentAsNumber, + "delayed_unassigned_shards": healthStats.DelayedUnassignedShards, + "initializing_shards": healthStats.InitializingShards, + "number_of_data_nodes": healthStats.NumberOfDataNodes, + "number_of_in_flight_fetch": healthStats.NumberOfInFlightFetch, + "number_of_nodes": healthStats.NumberOfNodes, + "number_of_pending_tasks": healthStats.NumberOfPendingTasks, + "relocating_shards": healthStats.RelocatingShards, + "status": healthStats.Status, + "status_code": mapHealthStatusToCode(healthStats.Status), + "task_max_waiting_in_queue_millis": healthStats.TaskMaxWaitingInQueueMillis, + "timed_out": healthStats.TimedOut, + "unassigned_shards": healthStats.UnassignedShards, } acc.AddFields( "elasticsearch_cluster_health", @@ -355,18 +400,18 @@ func (e *Elasticsearch) gatherClusterHealth(url string, acc telegraf.Accumulator for name, health := range healthStats.Indices { indexFields := map[string]interface{}{ - "status": health.Status, - "status_code": mapHealthStatusToCode(health.Status), - "number_of_shards": health.NumberOfShards, - "number_of_replicas": health.NumberOfReplicas, "active_primary_shards": health.ActivePrimaryShards, "active_shards": health.ActiveShards, - "relocating_shards": health.RelocatingShards, "initializing_shards": health.InitializingShards, + "number_of_replicas": health.NumberOfReplicas, + "number_of_shards": health.NumberOfShards, + "relocating_shards": health.RelocatingShards, + "status": health.Status, + "status_code": mapHealthStatusToCode(health.Status), "unassigned_shards": health.UnassignedShards, } acc.AddFields( - "elasticsearch_indices", + "elasticsearch_cluster_health_indices", indexFields, map[string]string{"index": name, "name": healthStats.ClusterName}, measurementTime, @@ -405,27 +450,27 @@ func (e *Elasticsearch) gatherClusterStats(url string, acc telegraf.Accumulator) return nil } -func (e *Elasticsearch) setCatMaster(url string) error { +func (e *Elasticsearch) getCatMaster(url string) (string, error) { r, err := e.client.Get(url) if err != nil { - return err + return "", err } defer r.Body.Close() if r.StatusCode != http.StatusOK { // NOTE: we are not going to read/discard r.Body under the assumption we'd prefer // to let the underlying transport close the connection and re-establish a new one for // future calls. - return fmt.Errorf("elasticsearch: Unable to retrieve master node information. API responded with status-code %d, expected %d", r.StatusCode, http.StatusOK) + return "", fmt.Errorf("elasticsearch: Unable to retrieve master node information. API responded with status-code %d, expected %d", r.StatusCode, http.StatusOK) } response, err := ioutil.ReadAll(r.Body) if err != nil { - return err + return "", err } - e.catMasterResponseTokens = strings.Split(string(response), " ") + masterID := strings.Split(string(response), " ")[0] - return nil + return masterID, nil } func (e *Elasticsearch) gatherJsonData(url string, v interface{}) error { diff --git a/plugins/inputs/elasticsearch/elasticsearch_test.go b/plugins/inputs/elasticsearch/elasticsearch_test.go index ec6951fbd..298784132 100644 --- a/plugins/inputs/elasticsearch/elasticsearch_test.go +++ b/plugins/inputs/elasticsearch/elasticsearch_test.go @@ -9,6 +9,7 @@ import ( "github.com/influxdata/telegraf/testutil" "fmt" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -22,6 +23,9 @@ func defaultTags() map[string]string { "node_host": "test", } } +func defaultServerInfo() serverInfo { + return serverInfo{nodeID: "", masterID: "SDFsfSDFsdfFSDSDfSFDSDF"} +} type transportMock struct { statusCode int @@ -49,8 +53,8 @@ func (t *transportMock) RoundTrip(r *http.Request) (*http.Response, error) { func (t *transportMock) CancelRequest(_ *http.Request) { } -func checkIsMaster(es *Elasticsearch, expected bool, t *testing.T) { - if es.isMaster != expected { +func checkIsMaster(es *Elasticsearch, server string, expected bool, t *testing.T) { + if es.serverInfo[server].isMaster() != expected { msg := fmt.Sprintf("IsMaster set incorrectly") assert.Fail(t, msg) } @@ -73,13 +77,15 @@ func TestGather(t *testing.T) { es := newElasticsearchWithClient() es.Servers = []string{"http://example.com:9200"} es.client.Transport = newTransportMock(http.StatusOK, nodeStatsResponse) + es.serverInfo = make(map[string]serverInfo) + es.serverInfo["http://example.com:9200"] = defaultServerInfo() var acc testutil.Accumulator if err := acc.GatherError(es.Gather); err != nil { t.Fatal(err) } - checkIsMaster(es, false, t) + checkIsMaster(es, es.Servers[0], false, t) checkNodeStatsResult(t, &acc) } @@ -88,13 +94,15 @@ func TestGatherIndividualStats(t *testing.T) { es.Servers = []string{"http://example.com:9200"} es.NodeStats = []string{"jvm", "process"} es.client.Transport = newTransportMock(http.StatusOK, nodeStatsResponseJVMProcess) + es.serverInfo = make(map[string]serverInfo) + es.serverInfo["http://example.com:9200"] = defaultServerInfo() var acc testutil.Accumulator if err := acc.GatherError(es.Gather); err != nil { t.Fatal(err) } - checkIsMaster(es, false, t) + checkIsMaster(es, es.Servers[0], false, t) tags := defaultTags() acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_indices", nodestatsIndicesExpected, tags) @@ -112,13 +120,15 @@ func TestGatherNodeStats(t *testing.T) { es := newElasticsearchWithClient() es.Servers = []string{"http://example.com:9200"} es.client.Transport = newTransportMock(http.StatusOK, nodeStatsResponse) + es.serverInfo = make(map[string]serverInfo) + es.serverInfo["http://example.com:9200"] = defaultServerInfo() var acc testutil.Accumulator if err := es.gatherNodeStats("junk", &acc); err != nil { t.Fatal(err) } - checkIsMaster(es, false, t) + checkIsMaster(es, es.Servers[0], false, t) checkNodeStatsResult(t, &acc) } @@ -128,21 +138,23 @@ func TestGatherClusterHealthEmptyClusterHealth(t *testing.T) { es.ClusterHealth = true es.ClusterHealthLevel = "" es.client.Transport = newTransportMock(http.StatusOK, clusterHealthResponse) + es.serverInfo = make(map[string]serverInfo) + es.serverInfo["http://example.com:9200"] = defaultServerInfo() var acc testutil.Accumulator require.NoError(t, es.gatherClusterHealth("junk", &acc)) - checkIsMaster(es, false, t) + checkIsMaster(es, es.Servers[0], false, t) acc.AssertContainsTaggedFields(t, "elasticsearch_cluster_health", clusterHealthExpected, map[string]string{"name": "elasticsearch_telegraf"}) - acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_indices", + acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_cluster_health_indices", v1IndexExpected, map[string]string{"index": "v1"}) - acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_indices", + acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_cluster_health_indices", v2IndexExpected, map[string]string{"index": "v2"}) } @@ -153,21 +165,23 @@ func TestGatherClusterHealthSpecificClusterHealth(t *testing.T) { es.ClusterHealth = true es.ClusterHealthLevel = "cluster" es.client.Transport = newTransportMock(http.StatusOK, clusterHealthResponse) + es.serverInfo = make(map[string]serverInfo) + es.serverInfo["http://example.com:9200"] = defaultServerInfo() var acc testutil.Accumulator require.NoError(t, es.gatherClusterHealth("junk", &acc)) - checkIsMaster(es, false, t) + checkIsMaster(es, es.Servers[0], false, t) acc.AssertContainsTaggedFields(t, "elasticsearch_cluster_health", clusterHealthExpected, map[string]string{"name": "elasticsearch_telegraf"}) - acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_indices", + acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_cluster_health_indices", v1IndexExpected, map[string]string{"index": "v1"}) - acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_indices", + acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_cluster_health_indices", v2IndexExpected, map[string]string{"index": "v2"}) } @@ -178,21 +192,23 @@ func TestGatherClusterHealthAlsoIndicesHealth(t *testing.T) { es.ClusterHealth = true es.ClusterHealthLevel = "indices" es.client.Transport = newTransportMock(http.StatusOK, clusterHealthResponseWithIndices) + es.serverInfo = make(map[string]serverInfo) + es.serverInfo["http://example.com:9200"] = defaultServerInfo() var acc testutil.Accumulator require.NoError(t, es.gatherClusterHealth("junk", &acc)) - checkIsMaster(es, false, t) + checkIsMaster(es, es.Servers[0], false, t) acc.AssertContainsTaggedFields(t, "elasticsearch_cluster_health", clusterHealthExpected, map[string]string{"name": "elasticsearch_telegraf"}) - acc.AssertContainsTaggedFields(t, "elasticsearch_indices", + acc.AssertContainsTaggedFields(t, "elasticsearch_cluster_health_indices", v1IndexExpected, map[string]string{"index": "v1", "name": "elasticsearch_telegraf"}) - acc.AssertContainsTaggedFields(t, "elasticsearch_indices", + acc.AssertContainsTaggedFields(t, "elasticsearch_cluster_health_indices", v2IndexExpected, map[string]string{"index": "v2", "name": "elasticsearch_telegraf"}) } @@ -202,13 +218,18 @@ func TestGatherClusterStatsMaster(t *testing.T) { es := newElasticsearchWithClient() es.ClusterStats = true es.Servers = []string{"http://example.com:9200"} + es.serverInfo = make(map[string]serverInfo) + info := serverInfo{nodeID: "SDFsfSDFsdfFSDSDfSFDSDF", masterID: ""} // first get catMaster es.client.Transport = newTransportMock(http.StatusOK, IsMasterResult) - require.NoError(t, es.setCatMaster("junk")) + masterID, err := es.getCatMaster("junk") + require.NoError(t, err) + info.masterID = masterID + es.serverInfo["http://example.com:9200"] = info IsMasterResultTokens := strings.Split(string(IsMasterResult), " ") - if es.catMasterResponseTokens[0] != IsMasterResultTokens[0] { + if masterID != IsMasterResultTokens[0] { msg := fmt.Sprintf("catmaster is incorrect") assert.Fail(t, msg) } @@ -221,7 +242,7 @@ func TestGatherClusterStatsMaster(t *testing.T) { t.Fatal(err) } - checkIsMaster(es, true, t) + checkIsMaster(es, es.Servers[0], true, t) checkNodeStatsResult(t, &acc) // now test the clusterstats method @@ -243,13 +264,16 @@ func TestGatherClusterStatsNonMaster(t *testing.T) { es := newElasticsearchWithClient() es.ClusterStats = true es.Servers = []string{"http://example.com:9200"} + es.serverInfo = make(map[string]serverInfo) + es.serverInfo["http://example.com:9200"] = serverInfo{nodeID: "SDFsfSDFsdfFSDSDfSFDSDF", masterID: ""} // first get catMaster es.client.Transport = newTransportMock(http.StatusOK, IsNotMasterResult) - require.NoError(t, es.setCatMaster("junk")) + masterID, err := es.getCatMaster("junk") + require.NoError(t, err) IsNotMasterResultTokens := strings.Split(string(IsNotMasterResult), " ") - if es.catMasterResponseTokens[0] != IsNotMasterResultTokens[0] { + if masterID != IsNotMasterResultTokens[0] { msg := fmt.Sprintf("catmaster is incorrect") assert.Fail(t, msg) } @@ -263,7 +287,7 @@ func TestGatherClusterStatsNonMaster(t *testing.T) { } // ensure flag is clear so Cluster Stats would not be done - checkIsMaster(es, false, t) + checkIsMaster(es, es.Servers[0], false, t) checkNodeStatsResult(t, &acc) } diff --git a/plugins/inputs/elasticsearch/testdata_test.go b/plugins/inputs/elasticsearch/testdata_test.go index df50d0a2b..ffdf9559d 100644 --- a/plugins/inputs/elasticsearch/testdata_test.go +++ b/plugins/inputs/elasticsearch/testdata_test.go @@ -7,6 +7,7 @@ const clusterHealthResponse = ` "timed_out": false, "number_of_nodes": 3, "number_of_data_nodes": 3, + "number_of_in_flight_fetch": 0, "active_primary_shards": 5, "active_shards": 15, "relocating_shards": 0, @@ -26,6 +27,7 @@ const clusterHealthResponseWithIndices = ` "timed_out": false, "number_of_nodes": 3, "number_of_data_nodes": 3, + "number_of_in_flight_fetch": 0, "active_primary_shards": 5, "active_shards": 15, "relocating_shards": 0, @@ -66,6 +68,7 @@ var clusterHealthExpected = map[string]interface{}{ "timed_out": false, "number_of_nodes": 3, "number_of_data_nodes": 3, + "number_of_in_flight_fetch": 0, "active_primary_shards": 5, "active_shards": 15, "relocating_shards": 0, @@ -514,6 +517,416 @@ const nodeStatsResponse = ` "tripped": 0 } } + }, + "SDFsfSDFsdfFSDSDfSPOJUY": { + "timestamp": 1436365550135, + "name": "test.host.com", + "transport_address": "inet[/127.0.0.1:9300]", + "host": "test", + "ip": [ + "inet[/127.0.0.1:9300]", + "NONE" + ], + "attributes": { + "master": "true" + }, + "indices": { + "docs": { + "count": 29652, + "deleted": 5229 + }, + "store": { + "size_in_bytes": 37715234, + "throttle_time_in_millis": 215 + }, + "indexing": { + "index_total": 84790, + "index_time_in_millis": 29680, + "index_current": 0, + "delete_total": 13879, + "delete_time_in_millis": 1139, + "delete_current": 0, + "noop_update_total": 0, + "is_throttled": false, + "throttle_time_in_millis": 0 + }, + "get": { + "total": 1, + "time_in_millis": 2, + "exists_total": 0, + "exists_time_in_millis": 0, + "missing_total": 1, + "missing_time_in_millis": 2, + "current": 0 + }, + "search": { + "open_contexts": 0, + "query_total": 1452, + "query_time_in_millis": 5695, + "query_current": 0, + "fetch_total": 414, + "fetch_time_in_millis": 146, + "fetch_current": 0 + }, + "merges": { + "current": 0, + "current_docs": 0, + "current_size_in_bytes": 0, + "total": 133, + "total_time_in_millis": 21060, + "total_docs": 203672, + "total_size_in_bytes": 142900226 + }, + "refresh": { + "total": 1076, + "total_time_in_millis": 20078 + }, + "flush": { + "total": 115, + "total_time_in_millis": 2401 + }, + "warmer": { + "current": 0, + "total": 2319, + "total_time_in_millis": 448 + }, + "filter_cache": { + "memory_size_in_bytes": 7384, + "evictions": 0 + }, + "id_cache": { + "memory_size_in_bytes": 0 + }, + "fielddata": { + "memory_size_in_bytes": 12996, + "evictions": 0 + }, + "percolate": { + "total": 0, + "time_in_millis": 0, + "current": 0, + "memory_size_in_bytes": -1, + "memory_size": "-1b", + "queries": 0 + }, + "completion": { + "size_in_bytes": 0 + }, + "segments": { + "count": 134, + "memory_in_bytes": 1285212, + "index_writer_memory_in_bytes": 0, + "index_writer_max_memory_in_bytes": 172368955, + "version_map_memory_in_bytes": 611844, + "fixed_bit_set_memory_in_bytes": 0 + }, + "translog": { + "operations": 17702, + "size_in_bytes": 17 + }, + "suggest": { + "total": 0, + "time_in_millis": 0, + "current": 0 + }, + "query_cache": { + "memory_size_in_bytes": 0, + "evictions": 0, + "hit_count": 0, + "miss_count": 0 + }, + "recovery": { + "current_as_source": 0, + "current_as_target": 0, + "throttle_time_in_millis": 0 + } + }, + "os": { + "timestamp": 1436460392944, + "load_average": [ + 0.01, + 0.04, + 0.05 + ], + "mem": { + "free_in_bytes": 477761536, + "used_in_bytes": 1621868544, + "free_percent": 74, + "used_percent": 25, + "actual_free_in_bytes": 1565470720, + "actual_used_in_bytes": 534159360 + }, + "swap": { + "used_in_bytes": 0, + "free_in_bytes": 487997440 + } + }, + "process": { + "timestamp": 1436460392945, + "open_file_descriptors": 160, + "cpu": { + "percent": 2, + "sys_in_millis": 1870, + "user_in_millis": 13610, + "total_in_millis": 15480 + }, + "mem": { + "total_virtual_in_bytes": 4747890688 + } + }, + "jvm": { + "timestamp": 1436460392945, + "uptime_in_millis": 202245, + "mem": { + "heap_used_in_bytes": 52709568, + "heap_used_percent": 5, + "heap_committed_in_bytes": 259522560, + "heap_max_in_bytes": 1038876672, + "non_heap_used_in_bytes": 39634576, + "non_heap_committed_in_bytes": 40841216, + "pools": { + "young": { + "used_in_bytes": 32685760, + "max_in_bytes": 279183360, + "peak_used_in_bytes": 71630848, + "peak_max_in_bytes": 279183360 + }, + "survivor": { + "used_in_bytes": 8912880, + "max_in_bytes": 34865152, + "peak_used_in_bytes": 8912888, + "peak_max_in_bytes": 34865152 + }, + "old": { + "used_in_bytes": 11110928, + "max_in_bytes": 724828160, + "peak_used_in_bytes": 14354608, + "peak_max_in_bytes": 724828160 + } + } + }, + "threads": { + "count": 44, + "peak_count": 45 + }, + "gc": { + "collectors": { + "young": { + "collection_count": 2, + "collection_time_in_millis": 98 + }, + "old": { + "collection_count": 1, + "collection_time_in_millis": 24 + } + } + }, + "buffer_pools": { + "direct": { + "count": 40, + "used_in_bytes": 6304239, + "total_capacity_in_bytes": 6304239 + }, + "mapped": { + "count": 0, + "used_in_bytes": 0, + "total_capacity_in_bytes": 0 + } + } + }, + "thread_pool": { + "percolate": { + "threads": 123, + "queue": 23, + "active": 13, + "rejected": 235, + "largest": 23, + "completed": 33 + }, + "fetch_shard_started": { + "threads": 3, + "queue": 1, + "active": 5, + "rejected": 6, + "largest": 4, + "completed": 54 + }, + "listener": { + "threads": 1, + "queue": 2, + "active": 4, + "rejected": 8, + "largest": 1, + "completed": 1 + }, + "index": { + "threads": 6, + "queue": 8, + "active": 4, + "rejected": 2, + "largest": 3, + "completed": 6 + }, + "refresh": { + "threads": 23, + "queue": 7, + "active": 3, + "rejected": 4, + "largest": 8, + "completed": 3 + }, + "suggest": { + "threads": 2, + "queue": 7, + "active": 2, + "rejected": 1, + "largest": 8, + "completed": 3 + }, + "generic": { + "threads": 1, + "queue": 4, + "active": 6, + "rejected": 3, + "largest": 2, + "completed": 27 + }, + "warmer": { + "threads": 2, + "queue": 7, + "active": 3, + "rejected": 2, + "largest": 3, + "completed": 1 + }, + "search": { + "threads": 5, + "queue": 7, + "active": 2, + "rejected": 7, + "largest": 2, + "completed": 4 + }, + "flush": { + "threads": 3, + "queue": 8, + "active": 0, + "rejected": 1, + "largest": 5, + "completed": 3 + }, + "optimize": { + "threads": 3, + "queue": 4, + "active": 1, + "rejected": 2, + "largest": 7, + "completed": 3 + }, + "fetch_shard_store": { + "threads": 1, + "queue": 7, + "active": 4, + "rejected": 2, + "largest": 4, + "completed": 1 + }, + "management": { + "threads": 2, + "queue": 3, + "active": 1, + "rejected": 6, + "largest": 2, + "completed": 22 + }, + "get": { + "threads": 1, + "queue": 8, + "active": 4, + "rejected": 3, + "largest": 2, + "completed": 1 + }, + "merge": { + "threads": 6, + "queue": 4, + "active": 5, + "rejected": 2, + "largest": 5, + "completed": 1 + }, + "bulk": { + "threads": 4, + "queue": 5, + "active": 7, + "rejected": 3, + "largest": 1, + "completed": 4 + }, + "snapshot": { + "threads": 8, + "queue": 5, + "active": 6, + "rejected": 2, + "largest": 1, + "completed": 0 + } + }, + "fs": { + "timestamp": 1436460392946, + "total": { + "total_in_bytes": 19507089408, + "free_in_bytes": 16909316096, + "available_in_bytes": 15894814720 + }, + "data": [ + { + "path": "/usr/share/elasticsearch/data/elasticsearch/nodes/0", + "mount": "/usr/share/elasticsearch/data", + "type": "ext4", + "total_in_bytes": 19507089408, + "free_in_bytes": 16909316096, + "available_in_bytes": 15894814720 + } + ] + }, + "transport": { + "server_open": 13, + "rx_count": 6, + "rx_size_in_bytes": 1380, + "tx_count": 6, + "tx_size_in_bytes": 1380 + }, + "http": { + "current_open": 3, + "total_opened": 3 + }, + "breakers": { + "fielddata": { + "limit_size_in_bytes": 623326003, + "limit_size": "594.4mb", + "estimated_size_in_bytes": 0, + "estimated_size": "0b", + "overhead": 1.03, + "tripped": 0 + }, + "request": { + "limit_size_in_bytes": 415550668, + "limit_size": "396.2mb", + "estimated_size_in_bytes": 0, + "estimated_size": "0b", + "overhead": 1.0, + "tripped": 0 + }, + "parent": { + "limit_size_in_bytes": 727213670, + "limit_size": "693.5mb", + "estimated_size_in_bytes": 0, + "estimated_size": "0b", + "overhead": 1.0, + "tripped": 0 + } + } } } } From 123e29c1f4f78b7c9fcbbda960442a4df2735a38 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 25 Jun 2019 17:28:47 -0700 Subject: [PATCH 0971/1815] Update elasticsearch input README --- plugins/inputs/elasticsearch/README.md | 857 ++++++++++++++++--------- 1 file changed, 560 insertions(+), 297 deletions(-) diff --git a/plugins/inputs/elasticsearch/README.md b/plugins/inputs/elasticsearch/README.md index d8e43da38..a9d4db0c8 100644 --- a/plugins/inputs/elasticsearch/README.md +++ b/plugins/inputs/elasticsearch/README.md @@ -1,13 +1,13 @@ -# Elasticsearch input plugin +# Elasticsearch Input Plugin The [elasticsearch](https://www.elastic.co/) plugin queries endpoints to obtain [node](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-stats.html) and optionally [cluster-health](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html) or [cluster-stats](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-stats.html) metrics. -### Configuration: +### Configuration -``` +```toml [[inputs.elasticsearch]] ## specify a list of one or more Elasticsearch servers servers = ["http://localhost:9200"] @@ -17,7 +17,7 @@ or [cluster-stats](https://www.elastic.co/guide/en/elasticsearch/reference/curre ## When local is true (the default), the node will read only its own stats. ## Set local to false when you want to read the node stats from all nodes - ## of the cluster. + ## of the cluster. local = true ## Set cluster_health to true when you want to also obtain cluster health stats @@ -48,306 +48,569 @@ or [cluster-stats](https://www.elastic.co/guide/en/elasticsearch/reference/curre # insecure_skip_verify = false ``` -### Status mappings +### Metrics -When reporting health (green/yellow/red), additional field `status_code` -is reported. Field contains mapping from status:string to status_code:int -with following rules: +Emitted when `cluster_health = true`: -* `green` - 1 -* `yellow` - 2 -* `red` - 3 -* `unknown` - 0 +- elasticsearch_cluster_health + - tags: + - name + - fields: + - active_primary_shards (integer) + - active_shards (integer) + - active_shards_percent_as_number (float) + - delayed_unassigned_shards (integer) + - initializing_shards (integer) + - number_of_data_nodes (integer) + - number_of_in_flight_fetch (integer) + - number_of_nodes (integer) + - number_of_pending_tasks (integer) + - relocating_shards (integer) + - status (string, one of green, yellow or red) + - status_code (integer, green = 1, yellow = 2, red = 3), + - task_max_waiting_in_queue_millis (integer) + - timed_out (boolean) + - unassigned_shards (integer) -### Measurements & Fields: +Emitted when `cluster_health = true` and `cluster_health_level = "indices"`: -field data circuit breaker measurement names: -- elasticsearch_breakers - - fielddata_estimated_size_in_bytes value=0 - - fielddata_overhead value=1.03 - - fielddata_tripped value=0 - - fielddata_limit_size_in_bytes value=623326003 - - request_estimated_size_in_bytes value=0 - - request_overhead value=1.0 - - request_tripped value=0 - - request_limit_size_in_bytes value=415550668 - - parent_overhead value=1.0 - - parent_tripped value=0 - - parent_limit_size_in_bytes value=727213670 - - parent_estimated_size_in_bytes value=0 +- elasticsearch_cluster_health_indices + - tags: + - index + - name + - fields: + - active_primary_shards (integer) + - active_shards (integer) + - initializing_shards (integer) + - number_of_replicas (integer) + - number_of_shards (integer) + - relocating_shards (integer) + - status (string, one of green, yellow or red) + - status_code (integer, green = 1, yellow = 2, red = 3), + - unassigned_shards (integer) -File system information, data path, free disk space, read/write measurement names: -- elasticsearch_fs - - timestamp value=1436460392946 - - total_free_in_bytes value=16909316096 - - total_available_in_bytes value=15894814720 - - total_total_in_bytes value=19507089408 +Emitted when `cluster__stats = true`: -indices size, document count, indexing and deletion times, search times, -field cache size, merges and flushes measurement names: -- elasticsearch_indices - - id_cache_memory_size_in_bytes value=0 - - completion_size_in_bytes value=0 - - suggest_total value=0 - - suggest_time_in_millis value=0 - - suggest_current value=0 - - query_cache_memory_size_in_bytes value=0 - - query_cache_evictions value=0 - - query_cache_hit_count value=0 - - query_cache_miss_count value=0 - - store_size_in_bytes value=37715234 - - store_throttle_time_in_millis value=215 - - merges_current_docs value=0 - - merges_current_size_in_bytes value=0 - - merges_total value=133 - - merges_total_time_in_millis value=21060 - - merges_total_docs value=203672 - - merges_total_size_in_bytes value=142900226 - - merges_current value=0 - - filter_cache_memory_size_in_bytes value=7384 - - filter_cache_evictions value=0 - - indexing_index_total value=84790 - - indexing_index_time_in_millis value=29680 - - indexing_index_current value=0 - - indexing_noop_update_total value=0 - - indexing_throttle_time_in_millis value=0 - - indexing_delete_tota value=13879 - - indexing_delete_time_in_millis value=1139 - - indexing_delete_current value=0 - - get_exists_time_in_millis value=0 - - get_missing_total value=1 - - get_missing_time_in_millis value=2 - - get_current value=0 - - get_total value=1 - - get_time_in_millis value=2 - - get_exists_total value=0 - - refresh_total value=1076 - - refresh_total_time_in_millis value=20078 - - percolate_current value=0 - - percolate_memory_size_in_bytes value=-1 - - percolate_queries value=0 - - percolate_total value=0 - - percolate_time_in_millis value=0 - - translog_operations value=17702 - - translog_size_in_bytes value=17 - - recovery_current_as_source value=0 - - recovery_current_as_target value=0 - - recovery_throttle_time_in_millis value=0 - - docs_count value=29652 - - docs_deleted value=5229 - - flush_total_time_in_millis value=2401 - - flush_total value=115 - - fielddata_memory_size_in_bytes value=12996 - - fielddata_evictions value=0 - - search_fetch_current value=0 - - search_open_contexts value=0 - - search_query_total value=1452 - - search_query_time_in_millis value=5695 - - search_query_current value=0 - - search_fetch_total value=414 - - search_fetch_time_in_millis value=146 - - warmer_current value=0 - - warmer_total value=2319 - - warmer_total_time_in_millis value=448 - - segments_count value=134 - - segments_memory_in_bytes value=1285212 - - segments_index_writer_memory_in_bytes value=0 - - segments_index_writer_max_memory_in_bytes value=172368955 - - segments_version_map_memory_in_bytes value=611844 - - segments_fixed_bit_set_memory_in_bytes value=0 +- elasticsearch_clusterstats_indices + - tags: + - cluster_name + - node_name + - status + - fields: + - completion_size_in_bytes (float) + - count (float) + - docs_count (float) + - docs_deleted (float) + - fielddata_evictions (float) + - fielddata_memory_size_in_bytes (float) + - query_cache_cache_count (float) + - query_cache_cache_size (float) + - query_cache_evictions (float) + - query_cache_hit_count (float) + - query_cache_memory_size_in_bytes (float) + - query_cache_miss_count (float) + - query_cache_total_count (float) + - segments_count (float) + - segments_doc_values_memory_in_bytes (float) + - segments_fixed_bit_set_memory_in_bytes (float) + - segments_index_writer_memory_in_bytes (float) + - segments_max_unsafe_auto_id_timestamp (float) + - segments_memory_in_bytes (float) + - segments_norms_memory_in_bytes (float) + - segments_points_memory_in_bytes (float) + - segments_stored_fields_memory_in_bytes (float) + - segments_term_vectors_memory_in_bytes (float) + - segments_terms_memory_in_bytes (float) + - segments_version_map_memory_in_bytes (float) + - shards_index_primaries_avg (float) + - shards_index_primaries_max (float) + - shards_index_primaries_min (float) + - shards_index_replication_avg (float) + - shards_index_replication_max (float) + - shards_index_replication_min (float) + - shards_index_shards_avg (float) + - shards_index_shards_max (float) + - shards_index_shards_min (float) + - shards_primaries (float) + - shards_replication (float) + - shards_total (float) + - store_size_in_bytes (float) -HTTP connection measurement names: -- elasticsearch_http - - current_open value=3 - - total_opened value=3 ++ elasticsearch_clusterstats_nodes + - tags: + - cluster_name + - node_name + - status + - fields: + - count_coordinating_only (float) + - count_data (float) + - count_ingest (float) + - count_master (float) + - count_total (float) + - fs_available_in_bytes (float) + - fs_free_in_bytes (float) + - fs_total_in_bytes (float) + - jvm_max_uptime_in_millis (float) + - jvm_mem_heap_max_in_bytes (float) + - jvm_mem_heap_used_in_bytes (float) + - jvm_threads (float) + - jvm_versions_0_count (float) + - jvm_versions_0_version (string) + - jvm_versions_0_vm_name (string) + - jvm_versions_0_vm_vendor (string) + - jvm_versions_0_vm_version (string) + - network_types_http_types_security4 (float) + - network_types_transport_types_security4 (float) + - os_allocated_processors (float) + - os_available_processors (float) + - os_mem_free_in_bytes (float) + - os_mem_free_percent (float) + - os_mem_total_in_bytes (float) + - os_mem_used_in_bytes (float) + - os_mem_used_percent (float) + - os_names_0_count (float) + - os_names_0_name (string) + - os_pretty_names_0_count (float) + - os_pretty_names_0_pretty_name (string) + - process_cpu_percent (float) + - process_open_file_descriptors_avg (float) + - process_open_file_descriptors_max (float) + - process_open_file_descriptors_min (float) + - versions_0 (string) -JVM stats, memory pool information, garbage collection, buffer pools measurement names: -- elasticsearch_jvm - - timestamp value=1436460392945 - - uptime_in_millis value=202245 - - mem_non_heap_used_in_bytes value=39634576 - - mem_non_heap_committed_in_bytes value=40841216 - - mem_pools_young_max_in_bytes value=279183360 - - mem_pools_young_peak_used_in_bytes value=71630848 - - mem_pools_young_peak_max_in_bytes value=279183360 - - mem_pools_young_used_in_bytes value=32685760 - - mem_pools_survivor_peak_used_in_bytes value=8912888 - - mem_pools_survivor_peak_max_in_bytes value=34865152 - - mem_pools_survivor_used_in_bytes value=8912880 - - mem_pools_survivor_max_in_bytes value=34865152 - - mem_pools_old_peak_max_in_bytes value=724828160 - - mem_pools_old_used_in_bytes value=11110928 - - mem_pools_old_max_in_bytes value=724828160 - - mem_pools_old_peak_used_in_bytes value=14354608 - - mem_heap_used_in_bytes value=52709568 - - mem_heap_used_percent value=5 - - mem_heap_committed_in_bytes value=259522560 - - mem_heap_max_in_bytes value=1038876672 - - threads_peak_count value=45 - - threads_count value=44 - - gc_collectors_young_collection_count value=2 - - gc_collectors_young_collection_time_in_millis value=98 - - gc_collectors_old_collection_count value=1 - - gc_collectors_old_collection_time_in_millis value=24 - - buffer_pools_direct_count value=40 - - buffer_pools_direct_used_in_bytes value=6304239 - - buffer_pools_direct_total_capacity_in_bytes value=6304239 - - buffer_pools_mapped_count value=0 - - buffer_pools_mapped_used_in_bytes value=0 - - buffer_pools_mapped_total_capacity_in_bytes value=0 +Emitted when the appropriate `node_stats` options are set. -TCP information measurement names: -- elasticsearch_network - - tcp_in_errs value=0 - - tcp_passive_opens value=16 - - tcp_curr_estab value=29 - - tcp_in_segs value=113 - - tcp_out_segs value=97 - - tcp_retrans_segs value=0 - - tcp_attempt_fails value=0 - - tcp_active_opens value=13 - - tcp_estab_resets value=0 - - tcp_out_rsts value=0 - -Operating system stats, load average, cpu, mem, swap measurement names: -- elasticsearch_os - - swap_used_in_bytes value=0 - - swap_free_in_bytes value=487997440 - - timestamp value=1436460392944 - - uptime_in_millis value=25092 - - cpu_sys value=0 - - cpu_user value=0 - - cpu_idle value=99 - - cpu_usage value=0 - - cpu_stolen value=0 - - mem_free_percent value=74 - - mem_used_percent value=25 - - mem_actual_free_in_bytes value=1565470720 - - mem_actual_used_in_bytes value=534159360 - - mem_free_in_bytes value=477761536 - - mem_used_in_bytes value=1621868544 - -Process statistics, memory consumption, cpu usage, open file descriptors measurement names: -- elasticsearch_process - - mem_resident_in_bytes value=246382592 - - mem_share_in_bytes value=18747392 - - mem_total_virtual_in_bytes value=4747890688 - - timestamp value=1436460392945 - - open_file_descriptors value=160 - - cpu_total_in_millis value=15480 - - cpu_percent value=2 - - cpu_sys_in_millis value=1870 - - cpu_user_in_millis value=13610 - -Statistics about each thread pool, including current size, queue and rejected tasks measurement names: -- elasticsearch_thread_pool - - merge_threads value=6 - - merge_queue value=4 - - merge_active value=5 - - merge_rejected value=2 - - merge_largest value=5 - - merge_completed value=1 - - bulk_threads value=4 - - bulk_queue value=5 - - bulk_active value=7 - - bulk_rejected value=3 - - bulk_largest value=1 - - bulk_completed value=4 - - warmer_threads value=2 - - warmer_queue value=7 - - warmer_active value=3 - - warmer_rejected value=2 - - warmer_largest value=3 - - warmer_completed value=1 - - get_largest value=2 - - get_completed value=1 - - get_threads value=1 - - get_queue value=8 - - get_active value=4 - - get_rejected value=3 - - index_threads value=6 - - index_queue value=8 - - index_active value=4 - - index_rejected value=2 - - index_largest value=3 - - index_completed value=6 - - suggest_threads value=2 - - suggest_queue value=7 - - suggest_active value=2 - - suggest_rejected value=1 - - suggest_largest value=8 - - suggest_completed value=3 - - fetch_shard_store_queue value=7 - - fetch_shard_store_active value=4 - - fetch_shard_store_rejected value=2 - - fetch_shard_store_largest value=4 - - fetch_shard_store_completed value=1 - - fetch_shard_store_threads value=1 - - management_threads value=2 - - management_queue value=3 - - management_active value=1 - - management_rejected value=6 - - management_largest value=2 - - management_completed value=22 - - percolate_queue value=23 - - percolate_active value=13 - - percolate_rejected value=235 - - percolate_largest value=23 - - percolate_completed value=33 - - percolate_threads value=123 - - listener_active value=4 - - listener_rejected value=8 - - listener_largest value=1 - - listener_completed value=1 - - listener_threads value=1 - - listener_queue value=2 - - search_rejected value=7 - - search_largest value=2 - - search_completed value=4 - - search_threads value=5 - - search_queue value=7 - - search_active value=2 - - fetch_shard_started_threads value=3 - - fetch_shard_started_queue value=1 - - fetch_shard_started_active value=5 - - fetch_shard_started_rejected value=6 - - fetch_shard_started_largest value=4 - - fetch_shard_started_completed value=54 - - refresh_rejected value=4 - - refresh_largest value=8 - - refresh_completed value=3 - - refresh_threads value=23 - - refresh_queue value=7 - - refresh_active value=3 - - optimize_threads value=3 - - optimize_queue value=4 - - optimize_active value=1 - - optimize_rejected value=2 - - optimize_largest value=7 - - optimize_completed value=3 - - snapshot_largest value=1 - - snapshot_completed value=0 - - snapshot_threads value=8 - - snapshot_queue value=5 - - snapshot_active value=6 - - snapshot_rejected value=2 - - generic_threads value=1 - - generic_queue value=4 - - generic_active value=6 - - generic_rejected value=3 - - generic_largest value=2 - - generic_completed value=27 - - flush_threads value=3 - - flush_queue value=8 - - flush_active value=0 - - flush_rejected value=1 - - flush_largest value=5 - - flush_completed value=3 - -Transport statistics about sent and received bytes in cluster communication measurement names: - elasticsearch_transport - - server_open value=13 - - rx_count value=6 - - rx_size_in_bytes value=1380 - - tx_count value=6 - - tx_size_in_bytes value=1380 + - tags: + - cluster_name + - node_attribute_ml.enabled + - node_attribute_ml.machine_memory + - node_attribute_ml.max_open_jobs + - node_attribute_xpack.installed + - node_host + - node_id + - node_name + - fields: + - rx_count (float) + - rx_size_in_bytes (float) + - server_open (float) + - tx_count (float) + - tx_size_in_bytes (float) + ++ elasticsearch_breakers + - tags: + - cluster_name + - node_attribute_ml.enabled + - node_attribute_ml.machine_memory + - node_attribute_ml.max_open_jobs + - node_attribute_xpack.installed + - node_host + - node_id + - node_name + - fields: + - accounting_estimated_size_in_bytes (float) + - accounting_limit_size_in_bytes (float) + - accounting_overhead (float) + - accounting_tripped (float) + - fielddata_estimated_size_in_bytes (float) + - fielddata_limit_size_in_bytes (float) + - fielddata_overhead (float) + - fielddata_tripped (float) + - in_flight_requests_estimated_size_in_bytes (float) + - in_flight_requests_limit_size_in_bytes (float) + - in_flight_requests_overhead (float) + - in_flight_requests_tripped (float) + - parent_estimated_size_in_bytes (float) + - parent_limit_size_in_bytes (float) + - parent_overhead (float) + - parent_tripped (float) + - request_estimated_size_in_bytes (float) + - request_limit_size_in_bytes (float) + - request_overhead (float) + - request_tripped (float) + +- elasticsearch_fs + - tags: + - cluster_name + - node_attribute_ml.enabled + - node_attribute_ml.machine_memory + - node_attribute_ml.max_open_jobs + - node_attribute_xpack.installed + - node_host + - node_id + - node_name + - fields: + - data_0_available_in_bytes (float) + - data_0_free_in_bytes (float) + - data_0_total_in_bytes (float) + - io_stats_devices_0_operations (float) + - io_stats_devices_0_read_kilobytes (float) + - io_stats_devices_0_read_operations (float) + - io_stats_devices_0_write_kilobytes (float) + - io_stats_devices_0_write_operations (float) + - io_stats_total_operations (float) + - io_stats_total_read_kilobytes (float) + - io_stats_total_read_operations (float) + - io_stats_total_write_kilobytes (float) + - io_stats_total_write_operations (float) + - timestamp (float) + - total_available_in_bytes (float) + - total_free_in_bytes (float) + - total_total_in_bytes (float) + ++ elasticsearch_http + - tags: + - cluster_name + - node_attribute_ml.enabled + - node_attribute_ml.machine_memory + - node_attribute_ml.max_open_jobs + - node_attribute_xpack.installed + - node_host + - node_id + - node_name + - fields: + - current_open (float) + - total_opened (float) + +- elasticsearch_indices + - tags: + - cluster_name + - node_attribute_ml.enabled + - node_attribute_ml.machine_memory + - node_attribute_ml.max_open_jobs + - node_attribute_xpack.installed + - node_host + - node_id + - node_name + - fields: + - completion_size_in_bytes (float) + - docs_count (float) + - docs_deleted (float) + - fielddata_evictions (float) + - fielddata_memory_size_in_bytes (float) + - flush_periodic (float) + - flush_total (float) + - flush_total_time_in_millis (float) + - get_current (float) + - get_exists_time_in_millis (float) + - get_exists_total (float) + - get_missing_time_in_millis (float) + - get_missing_total (float) + - get_time_in_millis (float) + - get_total (float) + - indexing_delete_current (float) + - indexing_delete_time_in_millis (float) + - indexing_delete_total (float) + - indexing_index_current (float) + - indexing_index_failed (float) + - indexing_index_time_in_millis (float) + - indexing_index_total (float) + - indexing_noop_update_total (float) + - indexing_throttle_time_in_millis (float) + - merges_current (float) + - merges_current_docs (float) + - merges_current_size_in_bytes (float) + - merges_total (float) + - merges_total_auto_throttle_in_bytes (float) + - merges_total_docs (float) + - merges_total_size_in_bytes (float) + - merges_total_stopped_time_in_millis (float) + - merges_total_throttled_time_in_millis (float) + - merges_total_time_in_millis (float) + - query_cache_cache_count (float) + - query_cache_cache_size (float) + - query_cache_evictions (float) + - query_cache_hit_count (float) + - query_cache_memory_size_in_bytes (float) + - query_cache_miss_count (float) + - query_cache_total_count (float) + - recovery_current_as_source (float) + - recovery_current_as_target (float) + - recovery_throttle_time_in_millis (float) + - refresh_listeners (float) + - refresh_total (float) + - refresh_total_time_in_millis (float) + - request_cache_evictions (float) + - request_cache_hit_count (float) + - request_cache_memory_size_in_bytes (float) + - request_cache_miss_count (float) + - search_fetch_current (float) + - search_fetch_time_in_millis (float) + - search_fetch_total (float) + - search_open_contexts (float) + - search_query_current (float) + - search_query_time_in_millis (float) + - search_query_total (float) + - search_scroll_current (float) + - search_scroll_time_in_millis (float) + - search_scroll_total (float) + - search_suggest_current (float) + - search_suggest_time_in_millis (float) + - search_suggest_total (float) + - segments_count (float) + - segments_doc_values_memory_in_bytes (float) + - segments_fixed_bit_set_memory_in_bytes (float) + - segments_index_writer_memory_in_bytes (float) + - segments_max_unsafe_auto_id_timestamp (float) + - segments_memory_in_bytes (float) + - segments_norms_memory_in_bytes (float) + - segments_points_memory_in_bytes (float) + - segments_stored_fields_memory_in_bytes (float) + - segments_term_vectors_memory_in_bytes (float) + - segments_terms_memory_in_bytes (float) + - segments_version_map_memory_in_bytes (float) + - store_size_in_bytes (float) + - translog_earliest_last_modified_age (float) + - translog_operations (float) + - translog_size_in_bytes (float) + - translog_uncommitted_operations (float) + - translog_uncommitted_size_in_bytes (float) + - warmer_current (float) + - warmer_total (float) + - warmer_total_time_in_millis (float) + ++ elasticsearch_jvm + - tags: + - cluster_name + - node_attribute_ml.enabled + - node_attribute_ml.machine_memory + - node_attribute_ml.max_open_jobs + - node_attribute_xpack.installed + - node_host + - node_id + - node_name + - fields: + - buffer_pools_direct_count (float) + - buffer_pools_direct_total_capacity_in_bytes (float) + - buffer_pools_direct_used_in_bytes (float) + - buffer_pools_mapped_count (float) + - buffer_pools_mapped_total_capacity_in_bytes (float) + - buffer_pools_mapped_used_in_bytes (float) + - classes_current_loaded_count (float) + - classes_total_loaded_count (float) + - classes_total_unloaded_count (float) + - gc_collectors_old_collection_count (float) + - gc_collectors_old_collection_time_in_millis (float) + - gc_collectors_young_collection_count (float) + - gc_collectors_young_collection_time_in_millis (float) + - mem_heap_committed_in_bytes (float) + - mem_heap_max_in_bytes (float) + - mem_heap_used_in_bytes (float) + - mem_heap_used_percent (float) + - mem_non_heap_committed_in_bytes (float) + - mem_non_heap_used_in_bytes (float) + - mem_pools_old_max_in_bytes (float) + - mem_pools_old_peak_max_in_bytes (float) + - mem_pools_old_peak_used_in_bytes (float) + - mem_pools_old_used_in_bytes (float) + - mem_pools_survivor_max_in_bytes (float) + - mem_pools_survivor_peak_max_in_bytes (float) + - mem_pools_survivor_peak_used_in_bytes (float) + - mem_pools_survivor_used_in_bytes (float) + - mem_pools_young_max_in_bytes (float) + - mem_pools_young_peak_max_in_bytes (float) + - mem_pools_young_peak_used_in_bytes (float) + - mem_pools_young_used_in_bytes (float) + - threads_count (float) + - threads_peak_count (float) + - timestamp (float) + - uptime_in_millis (float) + +- elasticsearch_os + - tags: + - cluster_name + - node_attribute_ml.enabled + - node_attribute_ml.machine_memory + - node_attribute_ml.max_open_jobs + - node_attribute_xpack.installed + - node_host + - node_id + - node_name + - fields: + - cgroup_cpu_cfs_period_micros (float) + - cgroup_cpu_cfs_quota_micros (float) + - cgroup_cpu_stat_number_of_elapsed_periods (float) + - cgroup_cpu_stat_number_of_times_throttled (float) + - cgroup_cpu_stat_time_throttled_nanos (float) + - cgroup_cpuacct_usage_nanos (float) + - cpu_load_average_15m (float) + - cpu_load_average_1m (float) + - cpu_load_average_5m (float) + - cpu_percent (float) + - mem_free_in_bytes (float) + - mem_free_percent (float) + - mem_total_in_bytes (float) + - mem_used_in_bytes (float) + - mem_used_percent (float) + - swap_free_in_bytes (float) + - swap_total_in_bytes (float) + - swap_used_in_bytes (float) + - timestamp (float) + ++ elasticsearch_process + - tags: + - cluster_name + - node_attribute_ml.enabled + - node_attribute_ml.machine_memory + - node_attribute_ml.max_open_jobs + - node_attribute_xpack.installed + - node_host + - node_id + - node_name + - fields: + - cpu_percent (float) + - cpu_total_in_millis (float) + - max_file_descriptors (float) + - mem_total_virtual_in_bytes (float) + - open_file_descriptors (float) + - timestamp (float) + +- elasticsearch_thread_pool + - tags: + - cluster_name + - node_attribute_ml.enabled + - node_attribute_ml.machine_memory + - node_attribute_ml.max_open_jobs + - node_attribute_xpack.installed + - node_host + - node_id + - node_name + - fields: + - analyze_active (float) + - analyze_completed (float) + - analyze_largest (float) + - analyze_queue (float) + - analyze_rejected (float) + - analyze_threads (float) + - ccr_active (float) + - ccr_completed (float) + - ccr_largest (float) + - ccr_queue (float) + - ccr_rejected (float) + - ccr_threads (float) + - fetch_shard_started_active (float) + - fetch_shard_started_completed (float) + - fetch_shard_started_largest (float) + - fetch_shard_started_queue (float) + - fetch_shard_started_rejected (float) + - fetch_shard_started_threads (float) + - fetch_shard_store_active (float) + - fetch_shard_store_completed (float) + - fetch_shard_store_largest (float) + - fetch_shard_store_queue (float) + - fetch_shard_store_rejected (float) + - fetch_shard_store_threads (float) + - flush_active (float) + - flush_completed (float) + - flush_largest (float) + - flush_queue (float) + - flush_rejected (float) + - flush_threads (float) + - force_merge_active (float) + - force_merge_completed (float) + - force_merge_largest (float) + - force_merge_queue (float) + - force_merge_rejected (float) + - force_merge_threads (float) + - generic_active (float) + - generic_completed (float) + - generic_largest (float) + - generic_queue (float) + - generic_rejected (float) + - generic_threads (float) + - get_active (float) + - get_completed (float) + - get_largest (float) + - get_queue (float) + - get_rejected (float) + - get_threads (float) + - index_active (float) + - index_completed (float) + - index_largest (float) + - index_queue (float) + - index_rejected (float) + - index_threads (float) + - listener_active (float) + - listener_completed (float) + - listener_largest (float) + - listener_queue (float) + - listener_rejected (float) + - listener_threads (float) + - management_active (float) + - management_completed (float) + - management_largest (float) + - management_queue (float) + - management_rejected (float) + - management_threads (float) + - ml_autodetect_active (float) + - ml_autodetect_completed (float) + - ml_autodetect_largest (float) + - ml_autodetect_queue (float) + - ml_autodetect_rejected (float) + - ml_autodetect_threads (float) + - ml_datafeed_active (float) + - ml_datafeed_completed (float) + - ml_datafeed_largest (float) + - ml_datafeed_queue (float) + - ml_datafeed_rejected (float) + - ml_datafeed_threads (float) + - ml_utility_active (float) + - ml_utility_completed (float) + - ml_utility_largest (float) + - ml_utility_queue (float) + - ml_utility_rejected (float) + - ml_utility_threads (float) + - refresh_active (float) + - refresh_completed (float) + - refresh_largest (float) + - refresh_queue (float) + - refresh_rejected (float) + - refresh_threads (float) + - rollup_indexing_active (float) + - rollup_indexing_completed (float) + - rollup_indexing_largest (float) + - rollup_indexing_queue (float) + - rollup_indexing_rejected (float) + - rollup_indexing_threads (float) + - search_active (float) + - search_completed (float) + - search_largest (float) + - search_queue (float) + - search_rejected (float) + - search_threads (float) + - search_throttled_active (float) + - search_throttled_completed (float) + - search_throttled_largest (float) + - search_throttled_queue (float) + - search_throttled_rejected (float) + - search_throttled_threads (float) + - security-token-key_active (float) + - security-token-key_completed (float) + - security-token-key_largest (float) + - security-token-key_queue (float) + - security-token-key_rejected (float) + - security-token-key_threads (float) + - snapshot_active (float) + - snapshot_completed (float) + - snapshot_largest (float) + - snapshot_queue (float) + - snapshot_rejected (float) + - snapshot_threads (float) + - warmer_active (float) + - warmer_completed (float) + - warmer_largest (float) + - warmer_queue (float) + - warmer_rejected (float) + - warmer_threads (float) + - watcher_active (float) + - watcher_completed (float) + - watcher_largest (float) + - watcher_queue (float) + - watcher_rejected (float) + - watcher_threads (float) + - write_active (float) + - write_completed (float) + - write_largest (float) + - write_queue (float) + - write_rejected (float) + - write_threads (float) From 1fe7301b951da4c5201f1cc24c94ebde3d27b212 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 25 Jun 2019 17:30:27 -0700 Subject: [PATCH 0972/1815] Update changelog --- CHANGELOG.md | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index c7c8fa59a..0b6871042 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,12 @@ ## v1.12 [unreleased] +#### Release Notes + +- The cluster health related fields in the elasticsearch input have been split + out from the `elasticsearch_indices` mesasurement into the new + `elasticsearch_cluster_health_indices` measurement as they were originally + combined by error. + #### New Inputs - [docker_log](/plugins/inputs/docker_log) - Contributed by @prashanthjbabu @@ -29,6 +36,7 @@ - [#5692](https://github.com/influxdata/telegraf/issues/5692): Fix sensor read error stops reporting of all sensors in temp input. - [#4356](https://github.com/influxdata/telegraf/issues/4356): Fix double pct replacement in sysstat input. +- [#6004](https://github.com/influxdata/telegraf/issues/6004): Fix race in master node detection in elasticsearch input. ## v1.11.1 [2019-06-25] From fae63ed297811421f58f4726442d98330615be32 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 1 Jul 2019 12:14:30 -0700 Subject: [PATCH 0973/1815] Clarify that cisco mdt telemetry tcp transport does not support TLS --- plugins/inputs/cisco_telemetry_mdt/README.md | 26 ++++++++++--------- .../cisco_telemetry_mdt.go | 8 +++--- 2 files changed, 19 insertions(+), 15 deletions(-) diff --git a/plugins/inputs/cisco_telemetry_mdt/README.md b/plugins/inputs/cisco_telemetry_mdt/README.md index 08f180b2e..2848d0493 100644 --- a/plugins/inputs/cisco_telemetry_mdt/README.md +++ b/plugins/inputs/cisco_telemetry_mdt/README.md @@ -14,22 +14,24 @@ The TCP dialout transport is supported on IOS XR (32-bit and 64-bit) 6.1.x and l ```toml [[inputs.cisco_telemetry_mdt]] - ## Telemetry transport (one of: tcp, grpc) - transport = "grpc" + ## Telemetry transport can be "tcp" or "grpc". TLS is only supported when + ## using the grpc transport. + transport = "grpc" - ## Address and port to host telemetry listener - service_address = ":57000" + ## Address and port to host telemetry listener + service_address = ":57000" - ## Enable TLS for GRPC transport - # tls_cert = "/etc/telegraf/cert.pem" - # tls_key = "/etc/telegraf/key.pem" + ## Enable TLS; grpc transport only. + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" - ## Enable TLS client authentication and define allowed CA certificates - # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] + ## Enable TLS client authentication and define allowed CA certificates; grpc + ## transport only. + # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] - ## Define aliases to map telemetry encoding paths to simple measurement names - [inputs.cisco_telemetry_mdt.aliases] - ifstats = "ietf-interfaces:interfaces-state/interface/statistics" + ## Define aliases to map telemetry encoding paths to simple measurement names + [inputs.cisco_telemetry_mdt.aliases] + ifstats = "ietf-interfaces:interfaces-state/interface/statistics" ``` ### Example Output: diff --git a/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt.go b/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt.go index fc018a31e..74480cb8a 100644 --- a/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt.go +++ b/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt.go @@ -348,17 +348,19 @@ func (c *CiscoTelemetryMDT) Stop() { } const sampleConfig = ` - ## Telemetry transport (one of: tcp, grpc) + ## Telemetry transport can be "tcp" or "grpc". TLS is only supported when + ## using the grpc transport. transport = "grpc" ## Address and port to host telemetry listener service_address = ":57000" - ## Enable TLS for GRPC transport + ## Enable TLS; grpc transport only. # tls_cert = "/etc/telegraf/cert.pem" # tls_key = "/etc/telegraf/key.pem" - ## Enable TLS client authentication and define allowed CA certificates + ## Enable TLS client authentication and define allowed CA certificates; grpc + ## transport only. # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] ## Define aliases to map telemetry encoding paths to simple measurement names From 234a9460938bacc41bcd25fa09841d4832e53039 Mon Sep 17 00:00:00 2001 From: David Wahlund Date: Tue, 2 Jul 2019 00:14:03 +0200 Subject: [PATCH 0974/1815] Fix source address ping flag on BSD (#6056) --- plugins/inputs/ping/ping.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/inputs/ping/ping.go b/plugins/inputs/ping/ping.go index 28e967a85..efd1da32e 100644 --- a/plugins/inputs/ping/ping.go +++ b/plugins/inputs/ping/ping.go @@ -234,7 +234,7 @@ func (p *Ping) args(url string, system string) []string { case "darwin": args = append(args, "-I", p.Interface) case "freebsd", "netbsd", "openbsd": - args = append(args, "-s", p.Interface) + args = append(args, "-S", p.Interface) case "linux": args = append(args, "-I", p.Interface) default: From 5bad2c3a432f1290137ba2bf10f95fcf2844c298 Mon Sep 17 00:00:00 2001 From: Leandro Piccilli Date: Tue, 2 Jul 2019 01:22:17 +0200 Subject: [PATCH 0975/1815] Add support for ES 7.x to elasticsearch output (#6053) --- docker-compose.yml | 7 +- plugins/outputs/elasticsearch/README.md | 13 +- .../outputs/elasticsearch/elasticsearch.go | 165 +++++++++++------- 3 files changed, 111 insertions(+), 74 deletions(-) diff --git a/docker-compose.yml b/docker-compose.yml index a5991434b..bce3f4922 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -24,12 +24,13 @@ services: depends_on: - zookeeper elasticsearch: - image: elasticsearch:5 + image: docker.elastic.co/elasticsearch/elasticsearch:7.2.0 environment: - - JAVA_OPTS="-Xms256m -Xmx256m" + - "ES_JAVA_OPTS=-Xms256m -Xmx256m" + - discovery.type=single-node + - xpack.security.enabled=false ports: - "9200:9200" - - "9300:9300" mysql: image: mysql environment: diff --git a/plugins/outputs/elasticsearch/README.md b/plugins/outputs/elasticsearch/README.md index 11f3c1385..2ba46c87e 100644 --- a/plugins/outputs/elasticsearch/README.md +++ b/plugins/outputs/elasticsearch/README.md @@ -1,8 +1,8 @@ -## Elasticsearch Output Plugin for Telegraf +# Elasticsearch Output Plugin for Telegraf -This plugin writes to [Elasticsearch](https://www.elastic.co) via HTTP using Elastic (http://olivere.github.io/elastic/). +This plugin writes to [Elasticsearch](https://www.elastic.co) via HTTP using Elastic ( -Currently it only supports Elasticsearch 5.x series. +It supports Elasticsearch releases from 5.x up to 7.x. ## Elasticsearch indexes and templates @@ -22,7 +22,7 @@ For more information on how this works, see https://www.elastic.co/guide/en/elas This plugin can create a working template for use with telegraf metrics. It uses Elasticsearch dynamic templates feature to set proper types for the tags and metrics fields. If the template specified already exists, it will not overwrite unless you configure this plugin to do so. Thus you can customize this template after its creation if necessary. -Example of an index template created by telegraf: +Example of an index template created by telegraf on Elasticsearch 5.x: ```json { @@ -35,6 +35,8 @@ Example of an index template created by telegraf: "limit": "5000" } }, + "auto_expand_replicas" : "0-1", + "codec" : "best_compression", "refresh_interval": "10s" } }, @@ -159,7 +161,7 @@ This plugin will format the events in the following way: ## Set the interval to check if the Elasticsearch nodes are available ## Setting to "0s" will disable the health check (not recommended in production) health_check_interval = "10s" - ## HTTP basic authentication details (eg. when using Shield) + ## HTTP basic authentication details # username = "telegraf" # password = "mypassword" @@ -209,6 +211,7 @@ This plugin will format the events in the following way: %H - hour (00..23) %V - week of the year (ISO week) (01..53) ``` + Additionally, you can specify dynamic index names by using tags with the notation ```{{tag_name}}```. This will store the metrics with different tag values in different indices. If the tag does not exist in a particular metric, the `default_tag_value` will be used instead. ### Optional parameters: diff --git a/plugins/outputs/elasticsearch/elasticsearch.go b/plugins/outputs/elasticsearch/elasticsearch.go index 56169135a..7c4d4755a 100644 --- a/plugins/outputs/elasticsearch/elasticsearch.go +++ b/plugins/outputs/elasticsearch/elasticsearch.go @@ -1,12 +1,14 @@ package elasticsearch import ( + "bytes" "context" "fmt" "log" "net/http" "strconv" "strings" + "text/template" "time" "github.com/influxdata/telegraf" @@ -29,6 +31,7 @@ type Elasticsearch struct { ManageTemplate bool TemplateName string OverwriteTemplate bool + MajorReleaseNumber int tls.ClientConfig Client *elastic.Client @@ -47,7 +50,7 @@ var sampleConfig = ` ## Set the interval to check if the Elasticsearch nodes are available ## Setting to "0s" will disable the health check (not recommended in production) health_check_interval = "10s" - ## HTTP basic authentication details (eg. when using Shield) + ## HTTP basic authentication details # username = "telegraf" # password = "mypassword" @@ -85,6 +88,81 @@ var sampleConfig = ` overwrite_template = false ` +const telegrafTemplate = ` +{ + {{ if (lt .Version 6) }} + "template": "{{.TemplatePattern}}", + {{ else }} + "index_patterns" : [ "{{.TemplatePattern}}" ], + {{ end }} + "settings": { + "index": { + "refresh_interval": "10s", + "mapping.total_fields.limit": 5000, + "auto_expand_replicas" : "0-1", + "codec" : "best_compression" + } + }, + "mappings" : { + {{ if (lt .Version 7) }} + "metrics" : { + {{ if (lt .Version 6) }} + "_all": { "enabled": false }, + {{ end }} + {{ end }} + "properties" : { + "@timestamp" : { "type" : "date" }, + "measurement_name" : { "type" : "keyword" } + }, + "dynamic_templates": [ + { + "tags": { + "match_mapping_type": "string", + "path_match": "tag.*", + "mapping": { + "ignore_above": 512, + "type": "keyword" + } + } + }, + { + "metrics_long": { + "match_mapping_type": "long", + "mapping": { + "type": "float", + "index": false + } + } + }, + { + "metrics_double": { + "match_mapping_type": "double", + "mapping": { + "type": "float", + "index": false + } + } + }, + { + "text_fields": { + "match": "*", + "mapping": { + "norms": false + } + } + } + ] + {{ if (lt .Version 7) }} + } + {{ end }} + } +}` + +type templatePart struct { + TemplatePattern string + Version int +} + func (a *Elasticsearch) Connect() error { if a.URLs == nil || a.IndexName == "" { return fmt.Errorf("Elasticsearch urls or index_name is not defined") @@ -142,14 +220,15 @@ func (a *Elasticsearch) Connect() error { } // quit if ES version is not supported - i, err := strconv.Atoi(strings.Split(esVersion, ".")[0]) - if err != nil || i < 5 { + majorReleaseNumber, err := strconv.Atoi(strings.Split(esVersion, ".")[0]) + if err != nil || majorReleaseNumber < 5 { return fmt.Errorf("Elasticsearch version not supported: %s", esVersion) } log.Println("I! Elasticsearch version: " + esVersion) a.Client = client + a.MajorReleaseNumber = majorReleaseNumber if a.ManageTemplate { err := a.manageTemplate(ctx) @@ -184,10 +263,13 @@ func (a *Elasticsearch) Write(metrics []telegraf.Metric) error { m["tag"] = metric.Tags() m[name] = metric.Fields() - bulkRequest.Add(elastic.NewBulkIndexRequest(). - Index(indexName). - Type("metrics"). - Doc(m)) + br := elastic.NewBulkIndexRequest().Index(indexName).Doc(m) + + if a.MajorReleaseNumber <= 6 { + br.Type("metrics") + } + + bulkRequest.Add(br) } @@ -237,65 +319,16 @@ func (a *Elasticsearch) manageTemplate(ctx context.Context) error { } if (a.OverwriteTemplate) || (!templateExists) || (templatePattern != "") { - // Create or update the template - tmpl := fmt.Sprintf(` - { - "template":"%s", - "settings": { - "index": { - "refresh_interval": "10s", - "mapping.total_fields.limit": 5000 - } - }, - "mappings" : { - "_default_" : { - "_all": { "enabled": false }, - "properties" : { - "@timestamp" : { "type" : "date" }, - "measurement_name" : { "type" : "keyword" } - }, - "dynamic_templates": [ - { - "tags": { - "match_mapping_type": "string", - "path_match": "tag.*", - "mapping": { - "ignore_above": 512, - "type": "keyword" - } - } - }, - { - "metrics_long": { - "match_mapping_type": "long", - "mapping": { - "type": "float", - "index": false - } - } - }, - { - "metrics_double": { - "match_mapping_type": "double", - "mapping": { - "type": "float", - "index": false - } - } - }, - { - "text_fields": { - "match": "*", - "mapping": { - "norms": false - } - } - } - ] - } - } - }`, templatePattern+"*") - _, errCreateTemplate := a.Client.IndexPutTemplate(a.TemplateName).BodyString(tmpl).Do(ctx) + tp := templatePart{ + TemplatePattern: templatePattern + "*", + Version: a.MajorReleaseNumber, + } + + t := template.Must(template.New("template").Parse(telegrafTemplate)) + var tmpl bytes.Buffer + + t.Execute(&tmpl, tp) + _, errCreateTemplate := a.Client.IndexPutTemplate(a.TemplateName).BodyString(tmpl.String()).Do(ctx) if errCreateTemplate != nil { return fmt.Errorf("Elasticsearch failed to create index template %s : %s", a.TemplateName, errCreateTemplate) From f7e85ebac253307d7e6ca60e30baca0abd54acf4 Mon Sep 17 00:00:00 2001 From: Greg <2653109+glinton@users.noreply.github.com> Date: Tue, 2 Jul 2019 12:14:48 -0600 Subject: [PATCH 0976/1815] Add basic auth to prometheus input plugin (#6062) --- plugins/inputs/prometheus/README.md | 5 +++++ plugins/inputs/prometheus/prometheus.go | 13 ++++++++++++- 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/plugins/inputs/prometheus/README.md b/plugins/inputs/prometheus/README.md index c1f50bb96..edc8a27d6 100644 --- a/plugins/inputs/prometheus/README.md +++ b/plugins/inputs/prometheus/README.md @@ -33,6 +33,11 @@ in Prometheus format. ## OR # bearer_token_string = "abc_123" + ## HTTP Basic Authentication username and password. ('bearer_token' and + ## 'bearer_token_string' take priority) + # username = "" + # password = "" + ## Specify timeout duration for slower prometheus clients (default is 3s) # response_timeout = "3s" diff --git a/plugins/inputs/prometheus/prometheus.go b/plugins/inputs/prometheus/prometheus.go index a4409c5b0..284114258 100644 --- a/plugins/inputs/prometheus/prometheus.go +++ b/plugins/inputs/prometheus/prometheus.go @@ -34,6 +34,10 @@ type Prometheus struct { BearerToken string `toml:"bearer_token"` BearerTokenString string `toml:"bearer_token_string"` + // Basic authentication credentials + Username string `toml:"username"` + Password string `toml:"password"` + ResponseTimeout internal.Duration `toml:"response_timeout"` tls.ClientConfig @@ -75,7 +79,12 @@ var sampleConfig = ` ## OR # bearer_token_string = "abc_123" - ## Specify timeout duration for slower prometheus clients (default is 3s) + ## HTTP Basic Authentication username and password. ('bearer_token' and + ## 'bearer_token_string' take priority) + # username = "" + # password = "" + + ## Specify timeout duration for slower prometheus clients (default is 3s) # response_timeout = "3s" ## Optional TLS Config @@ -251,6 +260,8 @@ func (p *Prometheus) gatherURL(u URLAndAddress, acc telegraf.Accumulator) error req.Header.Set("Authorization", "Bearer "+string(token)) } else if p.BearerTokenString != "" { req.Header.Set("Authorization", "Bearer "+p.BearerTokenString) + } else if p.Username != "" || p.Password != "" { + req.SetBasicAuth(p.Username, p.Password) } var resp *http.Response From 9af39bbb7dfeb4a8e083a06ec93c5c6d756d22fc Mon Sep 17 00:00:00 2001 From: dupondje Date: Wed, 3 Jul 2019 22:04:07 +0200 Subject: [PATCH 0977/1815] Add node roles tag to elasticsearch input (#6064) --- plugins/inputs/elasticsearch/elasticsearch.go | 4 ++++ plugins/inputs/elasticsearch/elasticsearch_test.go | 1 + plugins/inputs/elasticsearch/testdata_test.go | 10 ++++++++++ 3 files changed, 15 insertions(+) diff --git a/plugins/inputs/elasticsearch/elasticsearch.go b/plugins/inputs/elasticsearch/elasticsearch.go index 59b21f2cd..70377320f 100644 --- a/plugins/inputs/elasticsearch/elasticsearch.go +++ b/plugins/inputs/elasticsearch/elasticsearch.go @@ -6,6 +6,7 @@ import ( "io/ioutil" "net/http" "regexp" + "sort" "strings" "sync" "time" @@ -27,6 +28,7 @@ const statsPathLocal = "/_nodes/_local/stats" type nodeStat struct { Host string `json:"host"` Name string `json:"name"` + Roles []string `json:"roles"` Attributes map[string]string `json:"attributes"` Indices interface{} `json:"indices"` OS interface{} `json:"os"` @@ -326,11 +328,13 @@ func (e *Elasticsearch) gatherNodeStats(url string, acc telegraf.Accumulator) er } for id, n := range nodeStats.Nodes { + sort.Strings(n.Roles) tags := map[string]string{ "node_id": id, "node_host": n.Host, "node_name": n.Name, "cluster_name": nodeStats.ClusterName, + "node_roles": strings.Join(n.Roles, ","), } for k, v := range n.Attributes { diff --git a/plugins/inputs/elasticsearch/elasticsearch_test.go b/plugins/inputs/elasticsearch/elasticsearch_test.go index 298784132..4bf5c6a55 100644 --- a/plugins/inputs/elasticsearch/elasticsearch_test.go +++ b/plugins/inputs/elasticsearch/elasticsearch_test.go @@ -21,6 +21,7 @@ func defaultTags() map[string]string { "node_id": "SDFsfSDFsdfFSDSDfSFDSDF", "node_name": "test.host.com", "node_host": "test", + "node_roles": "data,ingest,master", } } func defaultServerInfo() serverInfo { diff --git a/plugins/inputs/elasticsearch/testdata_test.go b/plugins/inputs/elasticsearch/testdata_test.go index ffdf9559d..c637bb9a9 100644 --- a/plugins/inputs/elasticsearch/testdata_test.go +++ b/plugins/inputs/elasticsearch/testdata_test.go @@ -117,6 +117,11 @@ const nodeStatsResponse = ` "inet[/127.0.0.1:9300]", "NONE" ], + "roles": [ + "master", + "data", + "ingest" + ], "attributes": { "master": "true" }, @@ -945,6 +950,11 @@ const nodeStatsResponseJVMProcess = ` "inet[/127.0.0.1:9300]", "NONE" ], + "roles": [ + "master", + "data", + "ingest" + ], "attributes": { "master": "true" }, From ad5d5844c720c846503cc49e903af81532a4c929 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 3 Jul 2019 16:28:11 -0700 Subject: [PATCH 0978/1815] Use int64 for fields in bind plugin (#6063) --- plugins/inputs/bind/bind_test.go | 44 ++++++++++++++--------------- plugins/inputs/bind/json_stats.go | 14 ++++----- plugins/inputs/bind/xml_stats_v2.go | 14 ++++----- plugins/inputs/bind/xml_stats_v3.go | 18 ++++++------ 4 files changed, 44 insertions(+), 46 deletions(-) diff --git a/plugins/inputs/bind/bind_test.go b/plugins/inputs/bind/bind_test.go index b961d549d..40c32aee3 100644 --- a/plugins/inputs/bind/bind_test.go +++ b/plugins/inputs/bind/bind_test.go @@ -7,7 +7,6 @@ import ( "testing" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" ) @@ -127,20 +126,19 @@ func TestBindJsonStats(t *testing.T) { } fields := map[string]interface{}{ - "block_size": 13893632, - "context_size": 3685480, - "in_use": 3064368, - "lost": 0, - "total_use": 18206566, + "block_size": int64(13893632), + "context_size": int64(3685480), + "in_use": int64(3064368), + "lost": int64(0), + "total_use": int64(18206566), } - acc.AssertContainsTaggedFields(t, "bind_memory", fields, tags) }) // Subtest for per-context memory stats t.Run("memory_context", func(t *testing.T) { - assert.True(t, acc.HasIntField("bind_memory_context", "total")) - assert.True(t, acc.HasIntField("bind_memory_context", "in_use")) + assert.True(t, acc.HasInt64Field("bind_memory_context", "total")) + assert.True(t, acc.HasInt64Field("bind_memory_context", "in_use")) }) } @@ -329,11 +327,11 @@ func TestBindXmlStatsV2(t *testing.T) { } fields := map[string]interface{}{ - "block_size": 77070336, - "context_size": 6663840, - "in_use": 20772579, - "lost": 0, - "total_use": 81804609, + "block_size": int64(77070336), + "context_size": int64(6663840), + "in_use": int64(20772579), + "lost": int64(0), + "total_use": int64(81804609), } acc.AssertContainsTaggedFields(t, "bind_memory", fields, tags) @@ -341,8 +339,8 @@ func TestBindXmlStatsV2(t *testing.T) { // Subtest for per-context memory stats t.Run("memory_context", func(t *testing.T) { - assert.True(t, acc.HasIntField("bind_memory_context", "total")) - assert.True(t, acc.HasIntField("bind_memory_context", "in_use")) + assert.True(t, acc.HasInt64Field("bind_memory_context", "total")) + assert.True(t, acc.HasInt64Field("bind_memory_context", "in_use")) }) } @@ -553,11 +551,11 @@ func TestBindXmlStatsV3(t *testing.T) { } fields := map[string]interface{}{ - "block_size": 45875200, - "context_size": 10037400, - "in_use": 6000232, - "lost": 0, - "total_use": 777821909, + "block_size": int64(45875200), + "context_size": int64(10037400), + "in_use": int64(6000232), + "lost": int64(0), + "total_use": int64(777821909), } acc.AssertContainsTaggedFields(t, "bind_memory", fields, tags) @@ -565,8 +563,8 @@ func TestBindXmlStatsV3(t *testing.T) { // Subtest for per-context memory stats t.Run("memory_context", func(t *testing.T) { - assert.True(t, acc.HasIntField("bind_memory_context", "total")) - assert.True(t, acc.HasIntField("bind_memory_context", "in_use")) + assert.True(t, acc.HasInt64Field("bind_memory_context", "total")) + assert.True(t, acc.HasInt64Field("bind_memory_context", "in_use")) }) } diff --git a/plugins/inputs/bind/json_stats.go b/plugins/inputs/bind/json_stats.go index 95c7e6fe8..cf595c1a3 100644 --- a/plugins/inputs/bind/json_stats.go +++ b/plugins/inputs/bind/json_stats.go @@ -23,16 +23,16 @@ type jsonStats struct { } type jsonMemory struct { - TotalUse int - InUse int - BlockSize int - ContextSize int - Lost int + TotalUse int64 + InUse int64 + BlockSize int64 + ContextSize int64 + Lost int64 Contexts []struct { Id string Name string - Total int - InUse int + Total int64 + InUse int64 } } diff --git a/plugins/inputs/bind/xml_stats_v2.go b/plugins/inputs/bind/xml_stats_v2.go index 45071bdc0..5e17851fb 100644 --- a/plugins/inputs/bind/xml_stats_v2.go +++ b/plugins/inputs/bind/xml_stats_v2.go @@ -44,15 +44,15 @@ type v2Statistics struct { // Omitted nodes: references, maxinuse, blocksize, pools, hiwater, lowater Id string `xml:"id"` Name string `xml:"name"` - Total int `xml:"total"` - InUse int `xml:"inuse"` + Total int64 `xml:"total"` + InUse int64 `xml:"inuse"` } `xml:"contexts>context"` Summary struct { - TotalUse int - InUse int - BlockSize int - ContextSize int - Lost int + TotalUse int64 + InUse int64 + BlockSize int64 + ContextSize int64 + Lost int64 } `xml:"summary"` } `xml:"memory"` } diff --git a/plugins/inputs/bind/xml_stats_v3.go b/plugins/inputs/bind/xml_stats_v3.go index ed2cc1b7f..89e4ea0b8 100644 --- a/plugins/inputs/bind/xml_stats_v3.go +++ b/plugins/inputs/bind/xml_stats_v3.go @@ -27,15 +27,15 @@ type v3Memory struct { // Omitted nodes: references, maxinuse, blocksize, pools, hiwater, lowater Id string `xml:"id"` Name string `xml:"name"` - Total int `xml:"total"` - InUse int `xml:"inuse"` + Total int64 `xml:"total"` + InUse int64 `xml:"inuse"` } `xml:"contexts>context"` Summary struct { - TotalUse int - InUse int - BlockSize int - ContextSize int - Lost int + TotalUse int64 + InUse int64 + BlockSize int64 + ContextSize int64 + Lost int64 } `xml:"summary"` } @@ -53,7 +53,7 @@ type v3View struct { Name string `xml:"name,attr"` RRSets []struct { Name string `xml:"name"` - Value int `xml:"counter"` + Value int64 `xml:"counter"` } `xml:"rrset"` } `xml:"cache"` } @@ -63,7 +63,7 @@ type v3CounterGroup struct { Type string `xml:"type,attr"` Counters []struct { Name string `xml:"name,attr"` - Value int `xml:",chardata"` + Value int64 `xml:",chardata"` } `xml:"counter"` } From 370d54b0239e142542bacf37d150c56e389833e6 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 3 Jul 2019 16:37:45 -0700 Subject: [PATCH 0979/1815] Update changelog --- CHANGELOG.md | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0b6871042..0ebe9f9c7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -31,6 +31,9 @@ - [#5996](https://github.com/influxdata/telegraf/pull/5996): Add container uptime_ns in docker input plugin. - [#6016](https://github.com/influxdata/telegraf/pull/6016): Add better user-facing errors for API timeouts in docker input. - [#6027](https://github.com/influxdata/telegraf/pull/6027): Add TLS mutal auth support to jti_openconfig_telemetry input. +- [#6053](https://github.com/influxdata/telegraf/pull/6053): Add support for ES 7.x to elasticsearch output +- [#6062](https://github.com/influxdata/telegraf/pull/6062): Add basic auth to prometheus input plugin +- [#6064](https://github.com/influxdata/telegraf/pull/6064): Add node roles tag to elasticsearch input #### Bugfixes @@ -38,6 +41,13 @@ - [#4356](https://github.com/influxdata/telegraf/issues/4356): Fix double pct replacement in sysstat input. - [#6004](https://github.com/influxdata/telegraf/issues/6004): Fix race in master node detection in elasticsearch input. +## v1.11.2 [unreleased] + +#### Bugfixes + +- [#6056](https://github.com/influxdata/telegraf/pull/6056): Fix source address ping flag on BSD. +- [#6059](https://github.com/influxdata/telegraf/issues/6059): Fix value out of range error on 32-bit systems in bind input. + ## v1.11.1 [2019-06-25] #### Bugfixes From 04937d04988457c73cf14f4254da8cc57c8465d1 Mon Sep 17 00:00:00 2001 From: Alvaro Olmedo Rodriguez Date: Fri, 5 Jul 2019 21:03:52 +0200 Subject: [PATCH 0980/1815] Apply topic filter to partitions metrics in burrow input (#6070) --- plugins/inputs/burrow/burrow.go | 3 +++ plugins/inputs/burrow/burrow_test.go | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/plugins/inputs/burrow/burrow.go b/plugins/inputs/burrow/burrow.go index 9c532e3be..f08563dbd 100644 --- a/plugins/inputs/burrow/burrow.go +++ b/plugins/inputs/burrow/burrow.go @@ -432,6 +432,9 @@ func (b *burrow) genGroupStatusMetrics(r *apiResponse, cluster, group string, ac func (b *burrow) genGroupLagMetrics(r *apiResponse, cluster, group string, acc telegraf.Accumulator) { for _, partition := range r.Status.Partitions { + if !b.filterTopics.Match(partition.Topic) { + continue + } acc.AddFields( "burrow_partition", map[string]interface{}{ diff --git a/plugins/inputs/burrow/burrow_test.go b/plugins/inputs/burrow/burrow_test.go index 3847a5d7c..cafbcb940 100644 --- a/plugins/inputs/burrow/burrow_test.go +++ b/plugins/inputs/burrow/burrow_test.go @@ -262,7 +262,7 @@ func TestFilterGroups(t *testing.T) { acc := &testutil.Accumulator{} plugin.Gather(acc) - require.Exactly(t, 4, len(acc.Metrics)) + require.Exactly(t, 1, len(acc.Metrics)) require.Empty(t, acc.Errors) } From 5dea2175d29076a0b2d85a3ea2c327d95883dc4e Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 8 Jul 2019 14:44:36 -0700 Subject: [PATCH 0981/1815] Fix panic with empty datadog tag string (#6088) --- plugins/inputs/statsd/datadog.go | 4 + plugins/inputs/statsd/statsd_test.go | 175 ++++++++++++++++----------- 2 files changed, 110 insertions(+), 69 deletions(-) diff --git a/plugins/inputs/statsd/datadog.go b/plugins/inputs/statsd/datadog.go index 6cce2316e..377db66e6 100644 --- a/plugins/inputs/statsd/datadog.go +++ b/plugins/inputs/statsd/datadog.go @@ -138,6 +138,10 @@ func (s *Statsd) parseEventMessage(now time.Time, message string, defaultHostnam } func parseDataDogTags(tags map[string]string, message string) { + if len(message) == 0 { + return + } + start, i := 0, 0 var k string var inVal bool // check if we are parsing the value part of the tag diff --git a/plugins/inputs/statsd/statsd_test.go b/plugins/inputs/statsd/statsd_test.go index 9f760b9f9..80b544234 100644 --- a/plugins/inputs/statsd/statsd_test.go +++ b/plugins/inputs/statsd/statsd_test.go @@ -847,85 +847,122 @@ func TestParse_Tags(t *testing.T) { } } -// Test that DataDog tags are parsed func TestParse_DataDogTags(t *testing.T) { - s := NewTestStatsd() - s.DataDogExtensions = true - - lines := []string{ - "my_counter:1|c|#host:localhost,environment:prod,endpoint:/:tenant?/oauth/ro", - "my_gauge:10.1|g|#live", - "my_set:1|s|#host:localhost", - "my_timer:3|ms|@0.1|#live,host:localhost", - } - - expectedTags := map[string]map[string]string{ - "my_counter": { - "host": "localhost", - "environment": "prod", - "endpoint": "/:tenant?/oauth/ro", - "metric_type": "counter", + tests := []struct { + name string + line string + expected []telegraf.Metric + }{ + { + name: "counter", + line: "my_counter:1|c|#host:localhost,environment:prod,endpoint:/:tenant?/oauth/ro", + expected: []telegraf.Metric{ + testutil.MustMetric( + "my_counter", + map[string]string{ + "endpoint": "/:tenant?/oauth/ro", + "environment": "prod", + "host": "localhost", + "metric_type": "counter", + }, + map[string]interface{}{ + "value": 1, + }, + time.Now(), + ), + }, }, - - "my_gauge": { - "live": "true", - "metric_type": "gauge", + { + name: "gauge", + line: "my_gauge:10.1|g|#live", + expected: []telegraf.Metric{ + testutil.MustMetric( + "my_gauge", + map[string]string{ + "live": "true", + "metric_type": "gauge", + }, + map[string]interface{}{ + "value": 10.1, + }, + time.Now(), + ), + }, }, - - "my_set": { - "host": "localhost", - "metric_type": "set", + { + name: "set", + line: "my_set:1|s|#host:localhost", + expected: []telegraf.Metric{ + testutil.MustMetric( + "my_set", + map[string]string{ + "host": "localhost", + "metric_type": "set", + }, + map[string]interface{}{ + "value": 1, + }, + time.Now(), + ), + }, }, - - "my_timer": { - "live": "true", - "host": "localhost", - "metric_type": "timing", + { + name: "timer", + line: "my_timer:3|ms|@0.1|#live,host:localhost", + expected: []telegraf.Metric{ + testutil.MustMetric( + "my_timer", + map[string]string{ + "host": "localhost", + "live": "true", + "metric_type": "timing", + }, + map[string]interface{}{ + "count": 10, + "lower": float64(3), + "mean": float64(3), + "stddev": float64(0), + "sum": float64(30), + "upper": float64(3), + }, + time.Now(), + ), + }, + }, + { + name: "empty tag set", + line: "cpu:42|c|#", + expected: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{ + "metric_type": "counter", + }, + map[string]interface{}{ + "value": 42, + }, + time.Now(), + ), + }, }, } - for _, line := range lines { - err := s.parseStatsdLine(line) - if err != nil { - t.Errorf("Parsing line %s should not have resulted in an error\n", line) - } - } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var acc testutil.Accumulator - actualTags := map[string]map[string]string{ - "my_gauge": tagsForItem(s.gauges), - "my_counter": tagsForItem(s.counters), - "my_set": tagsForItem(s.sets), - "my_timer": tagsForItem(s.timings), - } - for name, tags := range expectedTags { - for expectedK, expectedV := range tags { - if expectedV != actualTags[name][expectedK] { - t.Errorf("failed: expected: %#v != %#v", tags, actualTags[name]) - } - } - } -} + s := NewTestStatsd() + s.DataDogExtensions = true -func tagsForItem(m interface{}) map[string]string { - switch m.(type) { - case map[string]cachedcounter: - for _, v := range m.(map[string]cachedcounter) { - return v.tags - } - case map[string]cachedgauge: - for _, v := range m.(map[string]cachedgauge) { - return v.tags - } - case map[string]cachedset: - for _, v := range m.(map[string]cachedset) { - return v.tags - } - case map[string]cachedtimings: - for _, v := range m.(map[string]cachedtimings) { - return v.tags - } + err := s.parseStatsdLine(tt.line) + require.NoError(t, err) + err = s.Gather(&acc) + require.NoError(t, err) + + testutil.RequireMetricsEqual(t, tt.expected, acc.GetTelegrafMetrics(), + testutil.SortMetrics(), testutil.IgnoreTime()) + }) } - return nil } // Test that statsd buckets are parsed to measurement names properly From c5d8e63a0f420d3587bebaeb36df091844909298 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 8 Jul 2019 14:46:00 -0700 Subject: [PATCH 0982/1815] Fix path separator matching in filecount input (#6077) --- internal/globpath/globpath.go | 11 +++++++---- internal/globpath/globpath_test.go | 11 +++++++++++ 2 files changed, 18 insertions(+), 4 deletions(-) diff --git a/internal/globpath/globpath.go b/internal/globpath/globpath.go index b21d93520..d4e7ffd87 100644 --- a/internal/globpath/globpath.go +++ b/internal/globpath/globpath.go @@ -21,7 +21,7 @@ func Compile(path string) (*GlobPath, error) { out := GlobPath{ hasMeta: hasMeta(path), HasSuperMeta: hasSuperMeta(path), - path: path, + path: filepath.FromSlash(path), } // if there are no glob meta characters in the path, don't bother compiling @@ -41,8 +41,9 @@ func Compile(path string) (*GlobPath, error) { return &out, nil } -// Match returns all files matching the expression -// If it's a static path, returns path +// Match returns all files matching the expression. +// If it's a static path, returns path. +// All returned path will have the host platform separator. func (g *GlobPath) Match() []string { if !g.hasMeta { return []string{g.path} @@ -82,7 +83,8 @@ func (g *GlobPath) Match() []string { return out } -// MatchString test a string against the glob +// MatchString tests the path string against the glob. The path should contain +// the host platform separator. func (g *GlobPath) MatchString(path string) bool { if !g.HasSuperMeta { res, _ := filepath.Match(g.path, path) @@ -96,6 +98,7 @@ func (g *GlobPath) MatchString(path string) bool { // - any directory under these roots may contain a matching file // - no file outside of these roots can match the pattern // Note that it returns both files and directories. +// All returned path will have the host platform separator. func (g *GlobPath) GetRoots() []string { if !g.hasMeta { return []string{g.path} diff --git a/internal/globpath/globpath_test.go b/internal/globpath/globpath_test.go index 476ba9243..60562d8f8 100644 --- a/internal/globpath/globpath_test.go +++ b/internal/globpath/globpath_test.go @@ -87,3 +87,14 @@ func TestMatch_ErrPermission(t *testing.T) { require.Equal(t, test.expected, actual) } } + +func TestWindowsSeparator(t *testing.T) { + if runtime.GOOS != "windows" { + t.Skip("Skipping Windows only test") + } + + glob, err := Compile("testdata/nested1") + require.NoError(t, err) + ok := glob.MatchString("testdata\\nested1") + require.True(t, ok) +} From 601f4991265259e6859270dfd7974772f82f0247 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 8 Jul 2019 14:48:47 -0700 Subject: [PATCH 0983/1815] Remove tail cleanup call to avoid double decrement (#6089) --- plugins/inputs/logparser/logparser.go | 1 - plugins/inputs/tail/tail.go | 4 ---- 2 files changed, 5 deletions(-) diff --git a/plugins/inputs/logparser/logparser.go b/plugins/inputs/logparser/logparser.go index a7fd97e8e..e724f2d4b 100644 --- a/plugins/inputs/logparser/logparser.go +++ b/plugins/inputs/logparser/logparser.go @@ -294,7 +294,6 @@ func (l *LogParserPlugin) Stop() { if err != nil { log.Printf("E! Error stopping tail on file %s\n", t.Filename) } - t.Cleanup() } close(l.done) l.wg.Wait() diff --git a/plugins/inputs/tail/tail.go b/plugins/inputs/tail/tail.go index bdfa2de44..834d7cf8f 100644 --- a/plugins/inputs/tail/tail.go +++ b/plugins/inputs/tail/tail.go @@ -9,7 +9,6 @@ import ( "sync" "github.com/influxdata/tail" - "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal/globpath" "github.com/influxdata/telegraf/plugins/inputs" @@ -213,9 +212,6 @@ func (t *Tail) Stop() { } } - for _, tailer := range t.tailers { - tailer.Cleanup() - } t.wg.Wait() } From 130c5c5f12f85a62df7841f19632d3d8d2286a7b Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 9 Jul 2019 10:40:14 -0700 Subject: [PATCH 0984/1815] Fix https support in activemq input (#6092) --- plugins/inputs/activemq/README.md | 60 ++++++----- plugins/inputs/activemq/activemq.go | 132 ++++++++++++++++------- plugins/inputs/activemq/activemq_test.go | 41 +++++++ 3 files changed, 163 insertions(+), 70 deletions(-) diff --git a/plugins/inputs/activemq/README.md b/plugins/inputs/activemq/README.md index b44d12d22..aba5a7f83 100644 --- a/plugins/inputs/activemq/README.md +++ b/plugins/inputs/activemq/README.md @@ -1,4 +1,4 @@ -# Telegraf Input Plugin: ActiveMQ +# ActiveMQ Input Plugin This plugin gather queues, topics & subscribers metrics using ActiveMQ Console API. @@ -7,12 +7,14 @@ This plugin gather queues, topics & subscribers metrics using ActiveMQ Console A ```toml # Description [[inputs.activemq]] - ## Required ActiveMQ Endpoint - # server = "192.168.50.10" + ## ActiveMQ WebConsole URL + url = "http://127.0.0.1:8161" - ## Required ActiveMQ port + ## Required ActiveMQ Endpoint + ## deprecated in 1.11; use the url option + # server = "192.168.50.10" # port = 8161 - + ## Credentials for basic HTTP authentication # username = "admin" # password = "admin" @@ -22,46 +24,41 @@ This plugin gather queues, topics & subscribers metrics using ActiveMQ Console A ## Maximum time to receive response. # response_timeout = "5s" - + ## Optional TLS Config # tls_ca = "/etc/telegraf/ca.pem" # tls_cert = "/etc/telegraf/cert.pem" # tls_key = "/etc/telegraf/key.pem" ## Use TLS but skip chain & host verification + # insecure_skip_verify = false ``` -### Measurements & Fields: +### Metrics Every effort was made to preserve the names based on the XML response from the ActiveMQ Console API. -- activemq_queues: +- activemq_queues + - tags: + - name + - source + - port + - fields: - size - consumer_count - enqueue_count - dequeue_count - - activemq_topics: ++ activemq_topics + - tags: + - name + - source + - port + - fields: - size - consumer_count - enqueue_count - dequeue_count - - subscribers_metrics: - - pending_queue_size - - dispatched_queue_size - - dispatched_counter - - enqueue_counter - - dequeue_counter - -### Tags: - -- activemq_queues: - - name - - source - - port -- activemq_topics: - - name - - source - - port -- activemq_subscribers: +- activemq_subscribers + - tags: - client_id - subscription_name - connection_id @@ -70,11 +67,16 @@ Every effort was made to preserve the names based on the XML response from the A - active - source - port + - fields: + - pending_queue_size + - dispatched_queue_size + - dispatched_counter + - enqueue_counter + - dequeue_counter -### Example Output: +### Example Output ``` -$ ./telegraf -config telegraf.conf -input-filter activemq -test activemq_queues,name=sandra,host=88284b2fe51b,source=localhost,port=8161 consumer_count=0i,enqueue_count=0i,dequeue_count=0i,size=0i 1492610703000000000 activemq_queues,name=Test,host=88284b2fe51b,source=localhost,port=8161 dequeue_count=0i,size=0i,consumer_count=0i,enqueue_count=0i 1492610703000000000 activemq_topics,name=ActiveMQ.Advisory.MasterBroker\ ,host=88284b2fe51b,source=localhost,port=8161 size=0i,consumer_count=0i,enqueue_count=1i,dequeue_count=0i 1492610703000000000 diff --git a/plugins/inputs/activemq/activemq.go b/plugins/inputs/activemq/activemq.go index 9cc9037ed..9d08661b7 100644 --- a/plugins/inputs/activemq/activemq.go +++ b/plugins/inputs/activemq/activemq.go @@ -5,10 +5,11 @@ import ( "fmt" "io/ioutil" "net/http" + "net/url" + "path" "strconv" - "time" - "strings" + "time" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" @@ -17,15 +18,17 @@ import ( ) type ActiveMQ struct { - Server string `json:"server"` - Port int `json:"port"` - Username string `json:"username"` - Password string `json:"password"` - Webadmin string `json:"webadmin"` - ResponseTimeout internal.Duration + Server string `toml:"server"` + Port int `toml:"port"` + URL string `toml:"url"` + Username string `toml:"username"` + Password string `toml:"password"` + Webadmin string `toml:"webadmin"` + ResponseTimeout internal.Duration `toml:"response_timeout"` tls.ClientConfig - client *http.Client + client *http.Client + baseURL *url.URL } type Topics struct { @@ -79,17 +82,13 @@ type Stats struct { DequeueCounter int `xml:"dequeueCounter,attr"` } -const ( - QUEUES_STATS = "queues" - TOPICS_STATS = "topics" - SUBSCRIBERS_STATS = "subscribers" -) - var sampleConfig = ` - ## Required ActiveMQ Endpoint - # server = "192.168.50.10" + ## ActiveMQ WebConsole URL + url = "http://127.0.0.1:8161" - ## Required ActiveMQ port + ## Required ActiveMQ Endpoint + ## deprecated in 1.11; use the url option + # server = "127.0.0.1" # port = 8161 ## Credentials for basic HTTP authentication @@ -107,6 +106,7 @@ var sampleConfig = ` # tls_cert = "/etc/telegraf/cert.pem" # tls_key = "/etc/telegraf/key.pem" ## Use TLS but skip chain & host verification + # insecure_skip_verify = false ` func (a *ActiveMQ) Description() string { @@ -133,32 +133,57 @@ func (a *ActiveMQ) createHttpClient() (*http.Client, error) { return client, nil } -func (a *ActiveMQ) GetMetrics(keyword string) ([]byte, error) { +func (a *ActiveMQ) Init() error { if a.ResponseTimeout.Duration < time.Second { a.ResponseTimeout.Duration = time.Second * 5 } - if a.client == nil { - client, err := a.createHttpClient() + var err error + u := &url.URL{Scheme: "http", Host: a.Server + ":" + strconv.Itoa(a.Port)} + if a.URL != "" { + u, err = url.Parse(a.URL) if err != nil { - return nil, err + return err } - a.client = client } - url := fmt.Sprintf("http://%s:%d/%s/xml/%s.jsp", a.Server, a.Port, a.Webadmin, keyword) - req, err := http.NewRequest("GET", url, nil) + if !strings.HasPrefix(u.Scheme, "http") { + return fmt.Errorf("invalid scheme %q", u.Scheme) + } + + if u.Hostname() == "" { + return fmt.Errorf("invalid hostname %q", u.Hostname()) + } + + a.baseURL = u + + a.client, err = a.createHttpClient() + if err != nil { + return err + } + return nil +} + +func (a *ActiveMQ) GetMetrics(u string) ([]byte, error) { + req, err := http.NewRequest("GET", u, nil) if err != nil { return nil, err } - req.SetBasicAuth(a.Username, a.Password) + if a.Username != "" || a.Password != "" { + req.SetBasicAuth(a.Username, a.Password) + } + resp, err := a.client.Do(req) if err != nil { return nil, err } - defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("GET %s returned status %q", u, resp.Status) + } + return ioutil.ReadAll(resp.Body) } @@ -168,8 +193,8 @@ func (a *ActiveMQ) GatherQueuesMetrics(acc telegraf.Accumulator, queues Queues) tags := make(map[string]string) tags["name"] = strings.TrimSpace(queue.Name) - tags["source"] = a.Server - tags["port"] = strconv.Itoa(a.Port) + tags["source"] = a.baseURL.Hostname() + tags["port"] = a.baseURL.Port() records["size"] = queue.Stats.Size records["consumer_count"] = queue.Stats.ConsumerCount @@ -186,8 +211,8 @@ func (a *ActiveMQ) GatherTopicsMetrics(acc telegraf.Accumulator, topics Topics) tags := make(map[string]string) tags["name"] = topic.Name - tags["source"] = a.Server - tags["port"] = strconv.Itoa(a.Port) + tags["source"] = a.baseURL.Hostname() + tags["port"] = a.baseURL.Port() records["size"] = topic.Stats.Size records["consumer_count"] = topic.Stats.ConsumerCount @@ -209,8 +234,8 @@ func (a *ActiveMQ) GatherSubscribersMetrics(acc telegraf.Accumulator, subscriber tags["destination_name"] = subscriber.DestinationName tags["selector"] = subscriber.Selector tags["active"] = subscriber.Active - tags["source"] = a.Server - tags["port"] = strconv.Itoa(a.Port) + tags["source"] = a.baseURL.Hostname() + tags["port"] = a.baseURL.Port() records["pending_queue_size"] = subscriber.Stats.PendingQueueSize records["dispatched_queue_size"] = subscriber.Stats.DispatchedQueueSize @@ -223,25 +248,34 @@ func (a *ActiveMQ) GatherSubscribersMetrics(acc telegraf.Accumulator, subscriber } func (a *ActiveMQ) Gather(acc telegraf.Accumulator) error { - dataQueues, err := a.GetMetrics(QUEUES_STATS) + dataQueues, err := a.GetMetrics(a.QueuesURL()) + if err != nil { + return err + } queues := Queues{} err = xml.Unmarshal(dataQueues, &queues) if err != nil { - return err + return fmt.Errorf("queues XML unmarshal error: %v", err) } - dataTopics, err := a.GetMetrics(TOPICS_STATS) + dataTopics, err := a.GetMetrics(a.TopicsURL()) + if err != nil { + return err + } topics := Topics{} err = xml.Unmarshal(dataTopics, &topics) if err != nil { - return err + return fmt.Errorf("topics XML unmarshal error: %v", err) } - dataSubscribers, err := a.GetMetrics(SUBSCRIBERS_STATS) + dataSubscribers, err := a.GetMetrics(a.SubscribersURL()) + if err != nil { + return err + } subscribers := Subscribers{} err = xml.Unmarshal(dataSubscribers, &subscribers) if err != nil { - return err + return fmt.Errorf("subscribers XML unmarshal error: %v", err) } a.GatherQueuesMetrics(acc, queues) @@ -251,11 +285,27 @@ func (a *ActiveMQ) Gather(acc telegraf.Accumulator) error { return nil } +func (a *ActiveMQ) QueuesURL() string { + ref := url.URL{Path: path.Join("/", a.Webadmin, "/xml/queues.jsp")} + return a.baseURL.ResolveReference(&ref).String() +} + +func (a *ActiveMQ) TopicsURL() string { + ref := url.URL{Path: path.Join("/", a.Webadmin, "/xml/topics.jsp")} + return a.baseURL.ResolveReference(&ref).String() +} + +func (a *ActiveMQ) SubscribersURL() string { + ref := url.URL{Path: path.Join("/", a.Webadmin, "/xml/subscribers.jsp")} + return a.baseURL.ResolveReference(&ref).String() +} + func init() { inputs.Add("activemq", func() telegraf.Input { return &ActiveMQ{ - Server: "localhost", - Port: 8161, + Server: "localhost", + Port: 8161, + Webadmin: "admin", } }) } diff --git a/plugins/inputs/activemq/activemq_test.go b/plugins/inputs/activemq/activemq_test.go index c277af3c5..407a38177 100644 --- a/plugins/inputs/activemq/activemq_test.go +++ b/plugins/inputs/activemq/activemq_test.go @@ -2,9 +2,12 @@ package activemq import ( "encoding/xml" + "net/http" + "net/http/httptest" "testing" "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" ) func TestGatherQueuesMetrics(t *testing.T) { @@ -47,6 +50,7 @@ func TestGatherQueuesMetrics(t *testing.T) { activeMQ := new(ActiveMQ) activeMQ.Server = "localhost" activeMQ.Port = 8161 + activeMQ.Init() activeMQ.GatherQueuesMetrics(&acc, queues) acc.AssertContainsTaggedFields(t, "activemq_queues", records, tags) @@ -93,6 +97,7 @@ func TestGatherTopicsMetrics(t *testing.T) { activeMQ := new(ActiveMQ) activeMQ.Server = "localhost" activeMQ.Port = 8161 + activeMQ.Init() activeMQ.GatherTopicsMetrics(&acc, topics) acc.AssertContainsTaggedFields(t, "activemq_topics", records, tags) @@ -133,7 +138,43 @@ func TestGatherSubscribersMetrics(t *testing.T) { activeMQ := new(ActiveMQ) activeMQ.Server = "localhost" activeMQ.Port = 8161 + activeMQ.Init() activeMQ.GatherSubscribersMetrics(&acc, subscribers) acc.AssertContainsTaggedFields(t, "activemq_subscribers", records, tags) } + +func TestURLs(t *testing.T) { + ts := httptest.NewServer(http.NotFoundHandler()) + defer ts.Close() + + ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/admin/xml/queues.jsp": + w.WriteHeader(http.StatusOK) + w.Write([]byte("")) + case "/admin/xml/topics.jsp": + w.WriteHeader(http.StatusOK) + w.Write([]byte("")) + case "/admin/xml/subscribers.jsp": + w.WriteHeader(http.StatusOK) + w.Write([]byte("")) + default: + w.WriteHeader(http.StatusNotFound) + t.Fatalf("unexpected path: " + r.URL.Path) + } + }) + + plugin := ActiveMQ{ + URL: "http://" + ts.Listener.Addr().String(), + Webadmin: "admin", + } + err := plugin.Init() + require.NoError(t, err) + + var acc testutil.Accumulator + err = plugin.Gather(&acc) + require.NoError(t, err) + + require.Len(t, acc.GetTelegrafMetrics(), 0) +} From adec1eba0c71e9599a91061840b3edf15adede80 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 9 Jul 2019 10:52:12 -0700 Subject: [PATCH 0985/1815] Update changelog --- CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0ebe9f9c7..129ea6329 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -47,6 +47,10 @@ - [#6056](https://github.com/influxdata/telegraf/pull/6056): Fix source address ping flag on BSD. - [#6059](https://github.com/influxdata/telegraf/issues/6059): Fix value out of range error on 32-bit systems in bind input. +- [#3573](https://github.com/influxdata/telegraf/issues/3573): Fix tail and logparser stop working after reload. +- [#6077](https://github.com/influxdata/telegraf/pull/6077): Fix filecount path separator handling in Windows. +- [#6075](https://github.com/influxdata/telegraf/issues/6075): Fix panic with empty datadog tag string. +- [#6069](https://github.com/influxdata/telegraf/issues/6069): Apply topic filter to partition metrics in burrow input. ## v1.11.1 [2019-06-25] From f46a9c02c64f782a04d77c4b225807f628f95df3 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 9 Jul 2019 11:10:02 -0700 Subject: [PATCH 0986/1815] Set 1.11.2 release date --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 129ea6329..bc0829037 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -41,7 +41,7 @@ - [#4356](https://github.com/influxdata/telegraf/issues/4356): Fix double pct replacement in sysstat input. - [#6004](https://github.com/influxdata/telegraf/issues/6004): Fix race in master node detection in elasticsearch input. -## v1.11.2 [unreleased] +## v1.11.2 [2019-07-09] #### Bugfixes From 10c31ca2096fbf01e6e9d7031992e2c4b4f9d8c3 Mon Sep 17 00:00:00 2001 From: Pontus Rydin Date: Tue, 9 Jul 2019 18:25:53 -0400 Subject: [PATCH 0987/1815] Fix reconnection when vCenter is rebooted (#6085) --- plugins/inputs/vsphere/client.go | 47 ++++++++++++++++++++------------ 1 file changed, 29 insertions(+), 18 deletions(-) diff --git a/plugins/inputs/vsphere/client.go b/plugins/inputs/vsphere/client.go index ca7af5843..0d78cac01 100644 --- a/plugins/inputs/vsphere/client.go +++ b/plugins/inputs/vsphere/client.go @@ -61,28 +61,39 @@ func NewClientFactory(ctx context.Context, url *url.URL, parent *VSphere) *Clien func (cf *ClientFactory) GetClient(ctx context.Context) (*Client, error) { cf.mux.Lock() defer cf.mux.Unlock() - if cf.client == nil { - var err error - if cf.client, err = NewClient(ctx, cf.url, cf.parent); err != nil { - return nil, err + retrying := false + for { + if cf.client == nil { + var err error + if cf.client, err = NewClient(ctx, cf.url, cf.parent); err != nil { + return nil, err + } } - } - // Execute a dummy call against the server to make sure the client is - // still functional. If not, try to log back in. If that doesn't work, - // we give up. - ctx1, cancel1 := context.WithTimeout(ctx, cf.parent.Timeout.Duration) - defer cancel1() - if _, err := methods.GetCurrentTime(ctx1, cf.client.Client); err != nil { - log.Printf("I! [inputs.vsphere]: Client session seems to have time out. Reauthenticating!") - ctx2, cancel2 := context.WithTimeout(ctx, cf.parent.Timeout.Duration) - defer cancel2() - if cf.client.Client.SessionManager.Login(ctx2, url.UserPassword(cf.parent.Username, cf.parent.Password)) != nil { - return nil, fmt.Errorf("Renewing authentication failed: %v", err) + // Execute a dummy call against the server to make sure the client is + // still functional. If not, try to log back in. If that doesn't work, + // we give up. + ctx1, cancel1 := context.WithTimeout(ctx, cf.parent.Timeout.Duration) + defer cancel1() + if _, err := methods.GetCurrentTime(ctx1, cf.client.Client); err != nil { + log.Printf("I! [inputs.vsphere]: Client session seems to have time out. Reauthenticating!") + ctx2, cancel2 := context.WithTimeout(ctx, cf.parent.Timeout.Duration) + defer cancel2() + if err := cf.client.Client.SessionManager.Login(ctx2, url.UserPassword(cf.parent.Username, cf.parent.Password)); err != nil { + if !retrying { + // The client went stale. Probably because someone rebooted vCenter. Clear it to + // force us to create a fresh one. We only get one chance at this. If we fail a second time + // we will simply skip this collection round and hope things have stabilized for the next one. + retrying = true + cf.client = nil + continue + } + return nil, fmt.Errorf("Renewing authentication failed: %v", err) + } } - } - return cf.client, nil + return cf.client, nil + } } // NewClient creates a new vSphere client based on the url and setting passed as parameters. From 1e12006ad603800428fff1b2911bc3c5ec3682c0 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 9 Jul 2019 15:27:43 -0700 Subject: [PATCH 0988/1815] Update changelog --- CHANGELOG.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index bc0829037..c0d9a57ff 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -41,6 +41,12 @@ - [#4356](https://github.com/influxdata/telegraf/issues/4356): Fix double pct replacement in sysstat input. - [#6004](https://github.com/influxdata/telegraf/issues/6004): Fix race in master node detection in elasticsearch input. +## v1.11.3 [unreleased] + +#### Bugfixes + +- [#6054](https://github.com/influxdata/telegraf/issues/6054): Fix unable to reconnect after vCenter reboot in vsphere input. + ## v1.11.2 [2019-07-09] #### Bugfixes From aa07b95e00e279cd61694984bc6586dda3d4c7f7 Mon Sep 17 00:00:00 2001 From: Aaron Wood Date: Tue, 9 Jul 2019 15:45:02 -0700 Subject: [PATCH 0989/1815] Handle unknown error in nvidia-smi output (#6073) --- plugins/inputs/nvidia_smi/README.md | 4 ++++ plugins/inputs/nvidia_smi/nvidia_smi.go | 11 ++++++++++- plugins/inputs/nvidia_smi/nvidia_smi_test.go | 7 +++++++ 3 files changed, 21 insertions(+), 1 deletion(-) diff --git a/plugins/inputs/nvidia_smi/README.md b/plugins/inputs/nvidia_smi/README.md index c3bac8da5..7fe0c077a 100644 --- a/plugins/inputs/nvidia_smi/README.md +++ b/plugins/inputs/nvidia_smi/README.md @@ -59,3 +59,7 @@ nvidia_smi,compute_mode=Default,host=8218cf,index=0,name=GeForce\ GTX\ 1070,psta nvidia_smi,compute_mode=Default,host=8218cf,index=1,name=GeForce\ GTX\ 1080,pstate=P2,uuid=GPU-f9ba66fc-a7f5-94c5-da19-019ef2f9c665 fan_speed=100i,memory_free=7557i,memory_total=8114i,memory_used=557i,temperature_gpu=50i,utilization_gpu=100i,utilization_memory=85i 1523991122000000000 nvidia_smi,compute_mode=Default,host=8218cf,index=2,name=GeForce\ GTX\ 1080,pstate=P2,uuid=GPU-d4cfc28d-0481-8d07-b81a-ddfc63d74adf fan_speed=100i,memory_free=7557i,memory_total=8114i,memory_used=557i,temperature_gpu=58i,utilization_gpu=100i,utilization_memory=86i 1523991122000000000 ``` + +### Limitations +Note that there seems to be an issue with getting current memory clock values when the memory is overclocked. +This may or may not apply to everyone but it's confirmed to be an issue on an EVGA 2080 Ti. diff --git a/plugins/inputs/nvidia_smi/nvidia_smi.go b/plugins/inputs/nvidia_smi/nvidia_smi.go index 37dde689a..e2ec19959 100644 --- a/plugins/inputs/nvidia_smi/nvidia_smi.go +++ b/plugins/inputs/nvidia_smi/nvidia_smi.go @@ -143,7 +143,16 @@ func parseLine(line string) (map[string]string, map[string]interface{}, error) { continue } - if strings.Contains(col, "[Not Supported]") { + // In some cases we may not be able to get data. + // One such case is when the memory is overclocked. + // nvidia-smi reads the max supported memory clock from the stock value. + // If the current memory clock is greater than the max detected memory clock then we receive [Unknown Error] as a value. + + // For example, the stock max memory clock speed on a 2080 Ti is 7000 MHz which nvidia-smi detects. + // The user has overclocked their memory using an offset of +1000 so under load the memory clock reaches 8000 MHz. + // Now when nvidia-smi tries to read the current memory clock it fails and spits back [Unknown Error] as the value. + // This value will break the parsing logic below unless it is accounted for here. + if strings.Contains(col, "[Not Supported]") || strings.Contains(col, "[Unknown Error]") { continue } diff --git a/plugins/inputs/nvidia_smi/nvidia_smi_test.go b/plugins/inputs/nvidia_smi/nvidia_smi_test.go index 4e0cc8eac..a16447d69 100644 --- a/plugins/inputs/nvidia_smi/nvidia_smi_test.go +++ b/plugins/inputs/nvidia_smi/nvidia_smi_test.go @@ -42,3 +42,10 @@ func TestParseLineNotSupported(t *testing.T) { require.NoError(t, err) require.Equal(t, nil, fields["fan_speed"]) } + +func TestParseLineUnknownError(t *testing.T) { + line := "[Unknown Error], 11264, 1074, 10190, P8, 32, GeForce RTX 2080 Ti, GPU-c97b7f88-c06d-650f-5339-f8dd0c1315c0, Default, 1, 4, 0, 24.33, 1, 16, 0, 0, 0, 300, 300, 405, 540\n" + _, fields, err := parseLine(line) + require.NoError(t, err) + require.Equal(t, nil, fields["fan_speed"]) +} From 70e2ccce75747eeb85e97b6d3e7ad561ee98588f Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 9 Jul 2019 15:45:59 -0700 Subject: [PATCH 0990/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index c0d9a57ff..a1d538ba0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -46,6 +46,7 @@ #### Bugfixes - [#6054](https://github.com/influxdata/telegraf/issues/6054): Fix unable to reconnect after vCenter reboot in vsphere input. +- [#6073](https://github.com/influxdata/telegraf/issues/6073): Handle unknown error in nvidia-smi output. ## v1.11.2 [2019-07-09] From 72c2ac964875202f1c7d5d3e751e1464af04d719 Mon Sep 17 00:00:00 2001 From: Pitxyoki Date: Wed, 10 Jul 2019 00:50:20 +0100 Subject: [PATCH 0991/1815] Support floats in statsd percentiles (#5572) --- internal/internal.go | 14 ++++++++++++++ plugins/inputs/statsd/README.md | 4 ++-- plugins/inputs/statsd/running_stats.go | 12 ++++++------ plugins/inputs/statsd/running_stats_test.go | 15 +++++++++++++++ plugins/inputs/statsd/statsd.go | 8 ++++---- plugins/inputs/statsd/statsd_test.go | 7 ++++--- 6 files changed, 45 insertions(+), 15 deletions(-) diff --git a/internal/internal.go b/internal/internal.go index c191eac94..a38f7703a 100644 --- a/internal/internal.go +++ b/internal/internal.go @@ -48,6 +48,10 @@ type Size struct { Size int64 } +type Number struct { + Value float64 +} + // SetVersion sets the telegraf agent version func SetVersion(v string) error { if version != "" { @@ -124,6 +128,16 @@ func (s *Size) UnmarshalTOML(b []byte) error { return nil } +func (n *Number) UnmarshalTOML(b []byte) error { + value, err := strconv.ParseFloat(string(b), 64) + if err != nil { + return err + } + + n.Value = value + return nil +} + // ReadLines reads contents from a file and splits them by new lines. // A convenience wrapper to ReadLinesOffsetN(filename, 0, -1). func ReadLines(filename string) ([]string, error) { diff --git a/plugins/inputs/statsd/README.md b/plugins/inputs/statsd/README.md index a33480f61..79f759817 100644 --- a/plugins/inputs/statsd/README.md +++ b/plugins/inputs/statsd/README.md @@ -34,8 +34,8 @@ ## Reset timings & histograms every interval (default=true) delete_timings = true - ## Percentiles to calculate for timing & histogram stats - percentiles = [90] + ## Percentiles to calculate for timing & histogram stats. + percentiles = [50.0, 90.0, 99.0, 99.9, 99.95, 100.0] ## separator to use between elements of a statsd metric metric_separator = "_" diff --git a/plugins/inputs/statsd/running_stats.go b/plugins/inputs/statsd/running_stats.go index 6f8045b42..e33749b2c 100644 --- a/plugins/inputs/statsd/running_stats.go +++ b/plugins/inputs/statsd/running_stats.go @@ -99,7 +99,7 @@ func (rs *RunningStats) Count() int64 { return rs.n } -func (rs *RunningStats) Percentile(n int) float64 { +func (rs *RunningStats) Percentile(n float64) float64 { if n > 100 { n = 100 } @@ -109,16 +109,16 @@ func (rs *RunningStats) Percentile(n int) float64 { rs.sorted = true } - i := int(float64(len(rs.perc)) * float64(n) / float64(100)) + i := float64(len(rs.perc)) * n / float64(100) return rs.perc[clamp(i, 0, len(rs.perc)-1)] } -func clamp(i int, min int, max int) int { - if i < min { +func clamp(i float64, min int, max int) int { + if i < float64(min) { return min } - if i > max { + if i > float64(max) { return max } - return i + return int(i) } diff --git a/plugins/inputs/statsd/running_stats_test.go b/plugins/inputs/statsd/running_stats_test.go index 4571f76d7..a52209c56 100644 --- a/plugins/inputs/statsd/running_stats_test.go +++ b/plugins/inputs/statsd/running_stats_test.go @@ -26,6 +26,9 @@ func TestRunningStats_Single(t *testing.T) { if rs.Percentile(100) != 10.1 { t.Errorf("Expected %v, got %v", 10.1, rs.Percentile(100)) } + if rs.Percentile(99.95) != 10.1 { + t.Errorf("Expected %v, got %v", 10.1, rs.Percentile(99.95)) + } if rs.Percentile(90) != 10.1 { t.Errorf("Expected %v, got %v", 10.1, rs.Percentile(90)) } @@ -67,6 +70,9 @@ func TestRunningStats_Duplicate(t *testing.T) { if rs.Percentile(100) != 10.1 { t.Errorf("Expected %v, got %v", 10.1, rs.Percentile(100)) } + if rs.Percentile(99.95) != 10.1 { + t.Errorf("Expected %v, got %v", 10.1, rs.Percentile(99.95)) + } if rs.Percentile(90) != 10.1 { t.Errorf("Expected %v, got %v", 10.1, rs.Percentile(90)) } @@ -108,12 +114,21 @@ func TestRunningStats(t *testing.T) { if rs.Percentile(100) != 45 { t.Errorf("Expected %v, got %v", 45, rs.Percentile(100)) } + if rs.Percentile(99.98) != 45 { + t.Errorf("Expected %v, got %v", 45, rs.Percentile(99.98)) + } if rs.Percentile(90) != 32 { t.Errorf("Expected %v, got %v", 32, rs.Percentile(90)) } + if rs.Percentile(50.1) != 11 { + t.Errorf("Expected %v, got %v", 11, rs.Percentile(50.1)) + } if rs.Percentile(50) != 11 { t.Errorf("Expected %v, got %v", 11, rs.Percentile(50)) } + if rs.Percentile(49.9) != 10 { + t.Errorf("Expected %v, got %v", 10, rs.Percentile(49.9)) + } if rs.Percentile(0) != 5 { t.Errorf("Expected %v, got %v", 5, rs.Percentile(0)) } diff --git a/plugins/inputs/statsd/statsd.go b/plugins/inputs/statsd/statsd.go index 89d67b1ee..8979b9c02 100644 --- a/plugins/inputs/statsd/statsd.go +++ b/plugins/inputs/statsd/statsd.go @@ -55,7 +55,7 @@ type Statsd struct { // Percentiles specifies the percentiles that will be calculated for timing // and histogram stats. - Percentiles []int + Percentiles []internal.Number PercentileLimit int DeleteGauges bool @@ -217,7 +217,7 @@ const sampleConfig = ` delete_timings = true ## Percentiles to calculate for timing & histogram stats - percentiles = [90] + percentiles = [50.0, 90.0, 99.0, 99.9, 99.95, 100.0] ## separator to use between elements of a statsd metric metric_separator = "_" @@ -271,8 +271,8 @@ func (s *Statsd) Gather(acc telegraf.Accumulator) error { fields[prefix+"lower"] = stats.Lower() fields[prefix+"count"] = stats.Count() for _, percentile := range s.Percentiles { - name := fmt.Sprintf("%s%v_percentile", prefix, percentile) - fields[name] = stats.Percentile(percentile) + name := fmt.Sprintf("%s%v_percentile", prefix, percentile.Value) + fields[name] = stats.Percentile(percentile.Value) } } diff --git a/plugins/inputs/statsd/statsd_test.go b/plugins/inputs/statsd/statsd_test.go index 80b544234..e629f164f 100644 --- a/plugins/inputs/statsd/statsd_test.go +++ b/plugins/inputs/statsd/statsd_test.go @@ -7,6 +7,7 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -397,7 +398,7 @@ func TestParse_Counters(t *testing.T) { // Tests low-level functionality of timings func TestParse_Timings(t *testing.T) { s := NewTestStatsd() - s.Percentiles = []int{90} + s.Percentiles = []internal.Number{{Value: 90.0}} acc := &testutil.Accumulator{} // Test that counters work @@ -1181,7 +1182,7 @@ func TestParse_MeasurementsWithMultipleValues(t *testing.T) { func TestParse_TimingsMultipleFieldsWithTemplate(t *testing.T) { s := NewTestStatsd() s.Templates = []string{"measurement.field"} - s.Percentiles = []int{90} + s.Percentiles = []internal.Number{{Value: 90.0}} acc := &testutil.Accumulator{} validLines := []string{ @@ -1232,7 +1233,7 @@ func TestParse_TimingsMultipleFieldsWithTemplate(t *testing.T) { func TestParse_TimingsMultipleFieldsWithoutTemplate(t *testing.T) { s := NewTestStatsd() s.Templates = []string{} - s.Percentiles = []int{90} + s.Percentiles = []internal.Number{{Value: 90.0}} acc := &testutil.Accumulator{} validLines := []string{ From 1f3951d36f8f08ce831c61897ace293a253b6069 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 9 Jul 2019 16:52:13 -0700 Subject: [PATCH 0992/1815] Update changelog --- CHANGELOG.md | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a1d538ba0..c8ee654c3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -31,9 +31,10 @@ - [#5996](https://github.com/influxdata/telegraf/pull/5996): Add container uptime_ns in docker input plugin. - [#6016](https://github.com/influxdata/telegraf/pull/6016): Add better user-facing errors for API timeouts in docker input. - [#6027](https://github.com/influxdata/telegraf/pull/6027): Add TLS mutal auth support to jti_openconfig_telemetry input. -- [#6053](https://github.com/influxdata/telegraf/pull/6053): Add support for ES 7.x to elasticsearch output -- [#6062](https://github.com/influxdata/telegraf/pull/6062): Add basic auth to prometheus input plugin -- [#6064](https://github.com/influxdata/telegraf/pull/6064): Add node roles tag to elasticsearch input +- [#6053](https://github.com/influxdata/telegraf/pull/6053): Add support for ES 7.x to elasticsearch output. +- [#6062](https://github.com/influxdata/telegraf/pull/6062): Add basic auth to prometheus input plugin. +- [#6064](https://github.com/influxdata/telegraf/pull/6064): Add node roles tag to elasticsearch input. +- [#5572](https://github.com/influxdata/telegraf/pull/5572): Support floats in statsd percentiles. #### Bugfixes From c9107015b074c0ce1648584c7061ec13f1d21fea Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 11 Jul 2019 13:50:12 -0700 Subject: [PATCH 0993/1815] Emit sarama library log messages at debug level (#6091) --- plugins/outputs/kafka/kafka.go | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/plugins/outputs/kafka/kafka.go b/plugins/outputs/kafka/kafka.go index 3df5a3a67..7ba457c59 100644 --- a/plugins/outputs/kafka/kafka.go +++ b/plugins/outputs/kafka/kafka.go @@ -62,6 +62,26 @@ type ( } ) +// DebugLogger logs messages from sarama at the debug level. +type DebugLogger struct { +} + +func (*DebugLogger) Print(v ...interface{}) { + args := make([]interface{}, 0, len(v)+1) + args = append(args, "D! [sarama] ") + log.Print(v...) +} + +func (*DebugLogger) Printf(format string, v ...interface{}) { + log.Printf("D! [sarama] "+format, v...) +} + +func (*DebugLogger) Println(v ...interface{}) { + args := make([]interface{}, 0, len(v)+1) + args = append(args, "D! [sarama] ") + log.Println(args...) +} + var sampleConfig = ` ## URLs of kafka brokers brokers = ["localhost:9092"] @@ -327,6 +347,7 @@ func (k *Kafka) Write(metrics []telegraf.Metric) error { } func init() { + sarama.Logger = &DebugLogger{} outputs.Add("kafka", func() telegraf.Output { return &Kafka{ MaxRetry: 3, From ea6b398fa3cf61443d06e5e8eb2b0c67b1d2e21a Mon Sep 17 00:00:00 2001 From: Greg <2653109+glinton@users.noreply.github.com> Date: Thu, 11 Jul 2019 16:07:58 -0600 Subject: [PATCH 0994/1815] Add native Go ping method to ping input plugin (#6050) --- Gopkg.lock | 10 + plugins/inputs/ping/README.md | 39 ++- plugins/inputs/ping/ping.go | 423 +++++++++++++------------ plugins/inputs/ping/ping_notwindows.go | 212 +++++++++++++ plugins/inputs/ping/ping_test.go | 23 +- plugins/inputs/ping/ping_windows.go | 106 +------ 6 files changed, 492 insertions(+), 321 deletions(-) create mode 100644 plugins/inputs/ping/ping_notwindows.go diff --git a/Gopkg.lock b/Gopkg.lock index bcdf6cd07..f5bc5a0cd 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -423,6 +423,14 @@ pruneopts = "" revision = "25d852aebe32c875e9c044af3eef9c7dc6bc777f" +[[projects]] + digest = "1:c6f371f2b02c751a83be83139a12a5467e55393feda16d4f8dfa95adfc4efede" + name = "github.com/glinton/ping" + packages = ["."] + pruneopts = "" + revision = "1983bc2fd5de3ea00aa5457bbc8774300e889db9" + version = "v0.1.1" + [[projects]] digest = "1:858b7fe7b0f4bc7ef9953926828f2816ea52d01a88d72d1c45bc8c108f23c356" name = "github.com/go-ini/ini" @@ -1266,6 +1274,7 @@ "http/httpguts", "http2", "http2/hpack", + "icmp", "idna", "internal/iana", "internal/socket", @@ -1603,6 +1612,7 @@ "github.com/ericchiang/k8s/apis/resource", "github.com/ericchiang/k8s/util/intstr", "github.com/ghodss/yaml", + "github.com/glinton/ping", "github.com/go-logfmt/logfmt", "github.com/go-redis/redis", "github.com/go-sql-driver/mysql", diff --git a/plugins/inputs/ping/README.md b/plugins/inputs/ping/README.md index 5d3904e92..8f1e3cf6f 100644 --- a/plugins/inputs/ping/README.md +++ b/plugins/inputs/ping/README.md @@ -9,6 +9,10 @@ use the iputils-ping implementation: apt-get install iputils-ping ``` +When using `method = "native"` a ping is sent and the results are reported in pure go, eliminating the need to execute the system `ping` command. Not using the system binary allows the use of this plugin on non-english systems. + +There is currently no support for TTL on windows with `"native"`; track progress at https://github.com/golang/go/issues/7175 and https://github.com/golang/go/issues/7174 + ### Configuration: ```toml @@ -33,12 +37,18 @@ apt-get install iputils-ping ## on Darwin and Freebsd only source address possible: (ping -S ) # interface = "" + ## How to ping. "native" doesn't have external dependencies, while "exec" depends on 'ping'. + # method = "exec" + ## Specify the ping executable binary, default is "ping" # binary = "ping" - ## Arguments for ping command - ## when arguments is not empty, other options (ping_interval, timeout, etc) will be ignored + ## Arguments for ping command. When arguments is not empty, system binary will be used and + ## other options (ping_interval, timeout, etc) will be ignored # arguments = ["-c", "3"] + + ## Use only ipv6 addresses when resolving hostnames. + # ipv6 = false ``` #### File Limit @@ -62,6 +72,21 @@ Set the file number limit: LimitNOFILE=4096 ``` +#### Permission Caveat (non Windows) + +It is preferred that this plugin listen on privileged ICMP sockets. To do so, telegraf can either be run as the root user or the root user can add the capability to access raw sockets to telegraf by running the following commant: + +``` +setcap cap_net_raw=eip /path/to/telegraf +``` + +Another option (doesn't work as well or in all circumstances) is to listen on unprivileged raw sockets (non-Windows only). The system group of the user running telegraf must be allowed to create ICMP Echo sockets. [See man pages icmp(7) for `ping_group_range`](http://man7.org/linux/man-pages/man7/icmp.7.html). On Linux hosts, run the following to give a group the proper permissions: + +``` +sudo sysctl -w net.ipv4.ping_group_range="GROUP_ID_LOW GROUP_ID_HIGH" +``` + + ### Metrics: - ping @@ -75,15 +100,15 @@ LimitNOFILE=4096 - average_response_ms (integer) - minimum_response_ms (integer) - maximum_response_ms (integer) - - standard_deviation_ms (integer, Not available on Windows) + - standard_deviation_ms (integer, Available on Windows only with native ping) - errors (float, Windows only) - - reply_received (integer, Windows only) - - percent_reply_loss (float, Windows only) + - reply_received (integer, Windows only*) + - percent_reply_loss (float, Windows only*) - result_code (int, success = 0, no such host = 1, ping error = 2) ##### reply_received vs packets_received -On Windows systems, "Destination net unreachable" reply will increment `packets_received` but not `reply_received`. +On Windows systems, "Destination net unreachable" reply will increment `packets_received` but not `reply_received`* ### Example Output: @@ -96,3 +121,5 @@ ping,url=example.org result_code=0i,average_response_ms=7i,maximum_response_ms=9 ``` ping,url=example.org average_response_ms=23.066,ttl=63,maximum_response_ms=24.64,minimum_response_ms=22.451,packets_received=5i,packets_transmitted=5i,percent_packet_loss=0,result_code=0i,standard_deviation_ms=0.809 1535747258000000000 ``` + +*not when `method = "native"` is used diff --git a/plugins/inputs/ping/ping.go b/plugins/inputs/ping/ping.go index efd1da32e..469859a34 100644 --- a/plugins/inputs/ping/ping.go +++ b/plugins/inputs/ping/ping.go @@ -1,20 +1,17 @@ -// +build !windows - package ping import ( + "context" "errors" - "fmt" + "math" "net" "os/exec" - "regexp" "runtime" - "strconv" - "strings" "sync" - "syscall" "time" + "github.com/glinton/ping" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" @@ -34,7 +31,7 @@ type Ping struct { // Number of pings to send (ping -c ) Count int - // Ping timeout, in seconds. 0 means no timeout (ping -W ) + // Per-ping timeout, in seconds. 0 means no timeout (ping -W ) Timeout float64 // Ping deadline, in seconds. 0 means no deadline. (ping -w ) @@ -46,18 +43,27 @@ type Ping struct { // URLs to ping Urls []string + // Method defines how to ping (native or exec) + Method string + // Ping executable binary Binary string - // Arguments for ping command. - // when `Arguments` is not empty, other options (ping_interval, timeout, etc) will be ignored + // Arguments for ping command. When arguments is not empty, system binary will be used and + // other options (ping_interval, timeout, etc) will be ignored Arguments []string + // Whether to resolve addresses using ipv6 or not. + IPv6 bool + // host ping function pingHost HostPinger + + // listenAddr is the address associated with the interface defined. + listenAddr string } -func (_ *Ping) Description() string { +func (*Ping) Description() string { return "Ping given url(s) and return statistics" } @@ -69,7 +75,6 @@ const sampleConfig = ` # count = 1 ## Interval, in s, at which to ping. 0 == default (ping -i ) - ## Not available in Windows. # ping_interval = 1.0 ## Per-ping timeout, in s. 0 == no timeout (ping -W ) @@ -78,27 +83,53 @@ const sampleConfig = ` ## Total-ping deadline, in s. 0 == no deadline (ping -w ) # deadline = 10 - ## Interface or source address to send ping from (ping -I ) - ## on Darwin and Freebsd only source address possible: (ping -S ) + ## Interface or source address to send ping from (ping -I[-S] ) # interface = "" - ## Specify the ping executable binary, default is "ping" - # binary = "ping" + ## How to ping. "native" doesn't have external dependencies, while "exec" depends on 'ping'. + # method = "exec" - ## Arguments for ping command - ## when arguments is not empty, other options (ping_interval, timeout, etc) will be ignored + ## Specify the ping executable binary, default is "ping" + # binary = "ping" + + ## Arguments for ping command. When arguments is not empty, system binary will be used and + ## other options (ping_interval, timeout, etc) will be ignored. # arguments = ["-c", "3"] + + ## Use only ipv6 addresses when resolving hostnames. + # ipv6 = false ` -func (_ *Ping) SampleConfig() string { +func (*Ping) SampleConfig() string { return sampleConfig } func (p *Ping) Gather(acc telegraf.Accumulator) error { - // Spin off a go routine for each url to ping - for _, url := range p.Urls { - p.wg.Add(1) - go p.pingToURL(url, acc) + if p.Interface != "" && p.listenAddr != "" { + p.listenAddr = getAddr(p.Interface) + } + + for _, ip := range p.Urls { + _, err := net.LookupHost(ip) + if err != nil { + acc.AddFields("ping", map[string]interface{}{"result_code": 1}, map[string]string{"ip": ip}) + acc.AddError(err) + return nil + } + + if p.Method == "native" { + p.wg.Add(1) + go func(ip string) { + defer p.wg.Done() + p.pingToURLNative(ip, acc) + }(ip) + } else { + p.wg.Add(1) + go func(ip string) { + defer p.wg.Done() + p.pingToURL(ip, acc) + }(ip) + } } p.wg.Wait() @@ -106,81 +137,39 @@ func (p *Ping) Gather(acc telegraf.Accumulator) error { return nil } -func (p *Ping) pingToURL(u string, acc telegraf.Accumulator) { - defer p.wg.Done() - tags := map[string]string{"url": u} - fields := map[string]interface{}{"result_code": 0} - - _, err := net.LookupHost(u) - if err != nil { - acc.AddError(err) - fields["result_code"] = 1 - acc.AddFields("ping", fields, tags) - return +func getAddr(iface string) string { + if addr := net.ParseIP(iface); addr != nil { + return addr.String() } - args := p.args(u, runtime.GOOS) - totalTimeout := 60.0 - if len(p.Arguments) == 0 { - totalTimeout = float64(p.Count)*p.Timeout + float64(p.Count-1)*p.PingInterval + ifaces, err := net.Interfaces() + if err != nil { + return "" } - out, err := p.pingHost(p.Binary, totalTimeout, args...) - if err != nil { - // Some implementations of ping return a 1 exit code on - // timeout, if this occurs we will not exit and try to parse - // the output. - status := -1 - if exitError, ok := err.(*exec.ExitError); ok { - if ws, ok := exitError.Sys().(syscall.WaitStatus); ok { - status = ws.ExitStatus() - fields["result_code"] = status + var ip net.IP + for i := range ifaces { + if ifaces[i].Name == iface { + addrs, err := ifaces[i].Addrs() + if err != nil { + return "" + } + if len(addrs) > 0 { + switch v := addrs[0].(type) { + case *net.IPNet: + ip = v.IP + case *net.IPAddr: + ip = v.IP + } + if len(ip) == 0 { + return "" + } + return ip.String() } } - - if status != 1 { - // Combine go err + stderr output - out = strings.TrimSpace(out) - if len(out) > 0 { - acc.AddError(fmt.Errorf("host %s: %s, %s", u, out, err)) - } else { - acc.AddError(fmt.Errorf("host %s: %s", u, err)) - } - fields["result_code"] = 2 - acc.AddFields("ping", fields, tags) - return - } } - trans, rec, ttl, min, avg, max, stddev, err := processPingOutput(out) - if err != nil { - // fatal error - acc.AddError(fmt.Errorf("%s: %s", err, u)) - fields["result_code"] = 2 - acc.AddFields("ping", fields, tags) - return - } - // Calculate packet loss percentage - loss := float64(trans-rec) / float64(trans) * 100.0 - fields["packets_transmitted"] = trans - fields["packets_received"] = rec - fields["percent_packet_loss"] = loss - if ttl >= 0 { - fields["ttl"] = ttl - } - if min >= 0 { - fields["minimum_response_ms"] = min - } - if avg >= 0 { - fields["average_response_ms"] = avg - } - if max >= 0 { - fields["maximum_response_ms"] = max - } - if stddev >= 0 { - fields["standard_deviation_ms"] = stddev - } - acc.AddFields("ping", fields, tags) + return "" } func hostPinger(binary string, timeout float64, args ...string) (string, error) { @@ -194,137 +183,156 @@ func hostPinger(binary string, timeout float64, args ...string) (string, error) return string(out), err } -// args returns the arguments for the 'ping' executable -func (p *Ping) args(url string, system string) []string { - if len(p.Arguments) > 0 { - return append(p.Arguments, url) +func (p *Ping) pingToURLNative(destination string, acc telegraf.Accumulator) { + ctx := context.Background() + + network := "ip4" + if p.IPv6 { + network = "ip6" } - // build the ping command args based on toml config - args := []string{"-c", strconv.Itoa(p.Count), "-n", "-s", "16"} - if p.PingInterval > 0 { - args = append(args, "-i", strconv.FormatFloat(p.PingInterval, 'f', -1, 64)) + host, err := net.ResolveIPAddr(network, destination) + if err != nil { + acc.AddFields("ping", map[string]interface{}{"result_code": 1}, map[string]string{"url": destination}) + acc.AddError(err) + return } - if p.Timeout > 0 { - switch system { - case "darwin": - args = append(args, "-W", strconv.FormatFloat(p.Timeout*1000, 'f', -1, 64)) - case "freebsd", "netbsd", "openbsd": - args = append(args, "-W", strconv.FormatFloat(p.Timeout*1000, 'f', -1, 64)) - case "linux": - args = append(args, "-W", strconv.FormatFloat(p.Timeout, 'f', -1, 64)) - default: - // Not sure the best option here, just assume GNU ping? - args = append(args, "-W", strconv.FormatFloat(p.Timeout, 'f', -1, 64)) - } + + interval := p.PingInterval + if interval < 0.2 { + interval = 0.2 } + + timeout := p.Timeout + if timeout == 0 { + timeout = 5 + } + + tick := time.NewTicker(time.Duration(interval * float64(time.Second))) + defer tick.Stop() + if p.Deadline > 0 { - switch system { - case "darwin", "freebsd", "netbsd", "openbsd": - args = append(args, "-t", strconv.Itoa(p.Deadline)) - case "linux": - args = append(args, "-w", strconv.Itoa(p.Deadline)) - default: - // not sure the best option here, just assume gnu ping? - args = append(args, "-w", strconv.Itoa(p.Deadline)) + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, time.Duration(p.Deadline)*time.Second) + defer cancel() + } + + resps := make(chan *ping.Response) + rsps := []*ping.Response{} + + r := &sync.WaitGroup{} + r.Add(1) + go func() { + for res := range resps { + rsps = append(rsps, res) + } + r.Done() + }() + + wg := &sync.WaitGroup{} + c := ping.Client{} + + var i int + for i = 0; i < p.Count; i++ { + select { + case <-ctx.Done(): + goto finish + case <-tick.C: + ctx, cancel := context.WithTimeout(ctx, time.Duration(timeout*float64(time.Second))) + defer cancel() + + wg.Add(1) + go func(seq int) { + defer wg.Done() + resp, err := c.Do(ctx, &ping.Request{ + Dst: net.ParseIP(host.String()), + Src: net.ParseIP(p.listenAddr), + Seq: seq, + }) + if err != nil { + acc.AddFields("ping", map[string]interface{}{"result_code": 2}, map[string]string{"url": destination}) + acc.AddError(err) + return + } + + resps <- resp + }(i + 1) } } - if p.Interface != "" { - switch system { - case "darwin": - args = append(args, "-I", p.Interface) - case "freebsd", "netbsd", "openbsd": - args = append(args, "-S", p.Interface) - case "linux": - args = append(args, "-I", p.Interface) - default: - // not sure the best option here, just assume gnu ping? - args = append(args, "-i", p.Interface) + +finish: + wg.Wait() + close(resps) + + r.Wait() + tags, fields := onFin(i, rsps, destination) + acc.AddFields("ping", fields, tags) +} + +func onFin(packetsSent int, resps []*ping.Response, destination string) (map[string]string, map[string]interface{}) { + packetsRcvd := len(resps) + + tags := map[string]string{"url": destination} + fields := map[string]interface{}{ + "result_code": 0, + "packets_transmitted": packetsSent, + "packets_received": packetsRcvd, + } + + if packetsSent == 0 { + return tags, fields + } + + if packetsRcvd == 0 { + fields["percent_packet_loss"] = float64(100) + return tags, fields + } + + fields["percent_packet_loss"] = float64(packetsSent-packetsRcvd) / float64(packetsSent) * 100 + ttl := resps[0].TTL + + var min, max, avg, total time.Duration + min = resps[0].RTT + max = resps[0].RTT + + for _, res := range resps { + if res.RTT < min { + min = res.RTT } - } - args = append(args, url) - return args -} - -// processPingOutput takes in a string output from the ping command, like: -// -// ping www.google.com (173.194.115.84): 56 data bytes -// 64 bytes from 173.194.115.84: icmp_seq=0 ttl=54 time=52.172 ms -// 64 bytes from 173.194.115.84: icmp_seq=1 ttl=54 time=34.843 ms -// -// --- www.google.com ping statistics --- -// 2 packets transmitted, 2 packets received, 0.0% packet loss -// round-trip min/avg/max/stddev = 34.843/43.508/52.172/8.664 ms -// -// It returns (, , ) -func processPingOutput(out string) (int, int, int, float64, float64, float64, float64, error) { - var trans, recv, ttl int = 0, 0, -1 - var min, avg, max, stddev float64 = -1.0, -1.0, -1.0, -1.0 - // Set this error to nil if we find a 'transmitted' line - err := errors.New("Fatal error processing ping output") - lines := strings.Split(out, "\n") - for _, line := range lines { - // Reading only first TTL, ignoring other TTL messages - if ttl == -1 && strings.Contains(line, "ttl=") { - ttl, err = getTTL(line) - } else if strings.Contains(line, "transmitted") && - strings.Contains(line, "received") { - trans, recv, err = getPacketStats(line, trans, recv) - if err != nil { - return trans, recv, ttl, min, avg, max, stddev, err - } - } else if strings.Contains(line, "min/avg/max") { - min, avg, max, stddev, err = checkRoundTripTimeStats(line, min, avg, max, stddev) - if err != nil { - return trans, recv, ttl, min, avg, max, stddev, err - } + if res.RTT > max { + max = res.RTT } + total += res.RTT } - return trans, recv, ttl, min, avg, max, stddev, err + + avg = total / time.Duration(packetsRcvd) + var sumsquares time.Duration + for _, res := range resps { + sumsquares += (res.RTT - avg) * (res.RTT - avg) + } + stdDev := time.Duration(math.Sqrt(float64(sumsquares / time.Duration(packetsRcvd)))) + + // Set TTL only on supported platform. See golang.org/x/net/ipv4/payload_cmsg.go + switch runtime.GOOS { + case "aix", "darwin", "dragonfly", "freebsd", "linux", "netbsd", "openbsd", "solaris": + fields["ttl"] = ttl + } + + fields["minimum_response_ms"] = float64(min.Nanoseconds()) / float64(time.Millisecond) + fields["average_response_ms"] = float64(avg.Nanoseconds()) / float64(time.Millisecond) + fields["maximum_response_ms"] = float64(max.Nanoseconds()) / float64(time.Millisecond) + fields["standard_deviation_ms"] = float64(stdDev.Nanoseconds()) / float64(time.Millisecond) + + return tags, fields } -func getPacketStats(line string, trans, recv int) (int, int, error) { - stats := strings.Split(line, ", ") - // Transmitted packets - trans, err := strconv.Atoi(strings.Split(stats[0], " ")[0]) - if err != nil { - return trans, recv, err +// Init ensures the plugin is configured correctly. +func (p *Ping) Init() error { + if p.Count < 1 { + return errors.New("bad number of packets to transmit") } - // Received packets - recv, err = strconv.Atoi(strings.Split(stats[1], " ")[0]) - return trans, recv, err -} -func getTTL(line string) (int, error) { - ttlLine := regexp.MustCompile(`ttl=(\d+)`) - ttlMatch := ttlLine.FindStringSubmatch(line) - return strconv.Atoi(ttlMatch[1]) -} - -func checkRoundTripTimeStats(line string, min, avg, max, - stddev float64) (float64, float64, float64, float64, error) { - stats := strings.Split(line, " ")[3] - data := strings.Split(stats, "/") - - min, err := strconv.ParseFloat(data[0], 64) - if err != nil { - return min, avg, max, stddev, err - } - avg, err = strconv.ParseFloat(data[1], 64) - if err != nil { - return min, avg, max, stddev, err - } - max, err = strconv.ParseFloat(data[2], 64) - if err != nil { - return min, avg, max, stddev, err - } - if len(data) == 4 { - stddev, err = strconv.ParseFloat(data[3], 64) - if err != nil { - return min, avg, max, stddev, err - } - } - return min, avg, max, stddev, err + return nil } func init() { @@ -335,6 +343,7 @@ func init() { Count: 1, Timeout: 1.0, Deadline: 10, + Method: "exec", Binary: "ping", Arguments: []string{}, } diff --git a/plugins/inputs/ping/ping_notwindows.go b/plugins/inputs/ping/ping_notwindows.go new file mode 100644 index 000000000..b39ffdd8f --- /dev/null +++ b/plugins/inputs/ping/ping_notwindows.go @@ -0,0 +1,212 @@ +// +build !windows + +package ping + +import ( + "errors" + "fmt" + "os/exec" + "regexp" + "runtime" + "strconv" + "strings" + "syscall" + + "github.com/influxdata/telegraf" +) + +func (p *Ping) pingToURL(u string, acc telegraf.Accumulator) { + tags := map[string]string{"url": u} + fields := map[string]interface{}{"result_code": 0} + + out, err := p.pingHost(p.Binary, 60.0, p.args(u, runtime.GOOS)...) + if err != nil { + // Some implementations of ping return a 1 exit code on + // timeout, if this occurs we will not exit and try to parse + // the output. + status := -1 + if exitError, ok := err.(*exec.ExitError); ok { + if ws, ok := exitError.Sys().(syscall.WaitStatus); ok { + status = ws.ExitStatus() + fields["result_code"] = status + } + } + + if status != 1 { + // Combine go err + stderr output + out = strings.TrimSpace(out) + if len(out) > 0 { + acc.AddError(fmt.Errorf("host %s: %s, %s", u, out, err)) + } else { + acc.AddError(fmt.Errorf("host %s: %s", u, err)) + } + fields["result_code"] = 2 + acc.AddFields("ping", fields, tags) + return + } + } + trans, rec, ttl, min, avg, max, stddev, err := processPingOutput(out) + if err != nil { + // fatal error + acc.AddError(fmt.Errorf("%s: %s", err, u)) + fields["result_code"] = 2 + acc.AddFields("ping", fields, tags) + return + } + + // Calculate packet loss percentage + loss := float64(trans-rec) / float64(trans) * 100.0 + + fields["packets_transmitted"] = trans + fields["packets_received"] = rec + fields["percent_packet_loss"] = loss + if ttl >= 0 { + fields["ttl"] = ttl + } + if min >= 0 { + fields["minimum_response_ms"] = min + } + if avg >= 0 { + fields["average_response_ms"] = avg + } + if max >= 0 { + fields["maximum_response_ms"] = max + } + if stddev >= 0 { + fields["standard_deviation_ms"] = stddev + } + acc.AddFields("ping", fields, tags) +} + +// args returns the arguments for the 'ping' executable +func (p *Ping) args(url string, system string) []string { + if len(p.Arguments) > 0 { + return append(p.Arguments, url) + } + + // build the ping command args based on toml config + args := []string{"-c", strconv.Itoa(p.Count), "-n", "-s", "16"} + if p.PingInterval > 0 { + args = append(args, "-i", strconv.FormatFloat(p.PingInterval, 'f', -1, 64)) + } + if p.Timeout > 0 { + switch system { + case "darwin": + args = append(args, "-W", strconv.FormatFloat(p.Timeout*1000, 'f', -1, 64)) + case "freebsd", "netbsd", "openbsd": + args = append(args, "-W", strconv.FormatFloat(p.Timeout*1000, 'f', -1, 64)) + case "linux": + args = append(args, "-W", strconv.FormatFloat(p.Timeout, 'f', -1, 64)) + default: + // Not sure the best option here, just assume GNU ping? + args = append(args, "-W", strconv.FormatFloat(p.Timeout, 'f', -1, 64)) + } + } + if p.Deadline > 0 { + switch system { + case "darwin", "freebsd", "netbsd", "openbsd": + args = append(args, "-t", strconv.Itoa(p.Deadline)) + case "linux": + args = append(args, "-w", strconv.Itoa(p.Deadline)) + default: + // not sure the best option here, just assume gnu ping? + args = append(args, "-w", strconv.Itoa(p.Deadline)) + } + } + if p.Interface != "" { + switch system { + case "darwin": + args = append(args, "-I", p.Interface) + case "freebsd", "netbsd", "openbsd": + args = append(args, "-S", p.Interface) + case "linux": + args = append(args, "-I", p.Interface) + default: + // not sure the best option here, just assume gnu ping? + args = append(args, "-i", p.Interface) + } + } + args = append(args, url) + return args +} + +// processPingOutput takes in a string output from the ping command, like: +// +// ping www.google.com (173.194.115.84): 56 data bytes +// 64 bytes from 173.194.115.84: icmp_seq=0 ttl=54 time=52.172 ms +// 64 bytes from 173.194.115.84: icmp_seq=1 ttl=54 time=34.843 ms +// +// --- www.google.com ping statistics --- +// 2 packets transmitted, 2 packets received, 0.0% packet loss +// round-trip min/avg/max/stddev = 34.843/43.508/52.172/8.664 ms +// +// It returns (, , ) +func processPingOutput(out string) (int, int, int, float64, float64, float64, float64, error) { + var trans, recv, ttl int = 0, 0, -1 + var min, avg, max, stddev float64 = -1.0, -1.0, -1.0, -1.0 + // Set this error to nil if we find a 'transmitted' line + err := errors.New("Fatal error processing ping output") + lines := strings.Split(out, "\n") + for _, line := range lines { + // Reading only first TTL, ignoring other TTL messages + if ttl == -1 && strings.Contains(line, "ttl=") { + ttl, err = getTTL(line) + } else if strings.Contains(line, "transmitted") && + strings.Contains(line, "received") { + trans, recv, err = getPacketStats(line, trans, recv) + if err != nil { + return trans, recv, ttl, min, avg, max, stddev, err + } + } else if strings.Contains(line, "min/avg/max") { + min, avg, max, stddev, err = checkRoundTripTimeStats(line, min, avg, max, stddev) + if err != nil { + return trans, recv, ttl, min, avg, max, stddev, err + } + } + } + return trans, recv, ttl, min, avg, max, stddev, err +} + +func getPacketStats(line string, trans, recv int) (int, int, error) { + stats := strings.Split(line, ", ") + // Transmitted packets + trans, err := strconv.Atoi(strings.Split(stats[0], " ")[0]) + if err != nil { + return trans, recv, err + } + // Received packets + recv, err = strconv.Atoi(strings.Split(stats[1], " ")[0]) + return trans, recv, err +} + +func getTTL(line string) (int, error) { + ttlLine := regexp.MustCompile(`ttl=(\d+)`) + ttlMatch := ttlLine.FindStringSubmatch(line) + return strconv.Atoi(ttlMatch[1]) +} + +func checkRoundTripTimeStats(line string, min, avg, max, + stddev float64) (float64, float64, float64, float64, error) { + stats := strings.Split(line, " ")[3] + data := strings.Split(stats, "/") + + min, err := strconv.ParseFloat(data[0], 64) + if err != nil { + return min, avg, max, stddev, err + } + avg, err = strconv.ParseFloat(data[1], 64) + if err != nil { + return min, avg, max, stddev, err + } + max, err = strconv.ParseFloat(data[2], 64) + if err != nil { + return min, avg, max, stddev, err + } + if len(data) == 4 { + stddev, err = strconv.ParseFloat(data[3], 64) + if err != nil { + return min, avg, max, stddev, err + } + } + return min, avg, max, stddev, err +} diff --git a/plugins/inputs/ping/ping_test.go b/plugins/inputs/ping/ping_test.go index 8870d4156..56303b1b2 100644 --- a/plugins/inputs/ping/ping_test.go +++ b/plugins/inputs/ping/ping_test.go @@ -180,12 +180,12 @@ func mockHostPinger(binary string, timeout float64, args ...string) (string, err func TestPingGather(t *testing.T) { var acc testutil.Accumulator p := Ping{ - Urls: []string{"www.google.com", "www.reddit.com"}, + Urls: []string{"localhost", "influxdata.com"}, pingHost: mockHostPinger, } acc.GatherError(p.Gather) - tags := map[string]string{"url": "www.google.com"} + tags := map[string]string{"url": "localhost"} fields := map[string]interface{}{ "packets_transmitted": 5, "packets_received": 5, @@ -199,7 +199,7 @@ func TestPingGather(t *testing.T) { } acc.AssertContainsTaggedFields(t, "ping", fields, tags) - tags = map[string]string{"url": "www.reddit.com"} + tags = map[string]string{"url": "influxdata.com"} acc.AssertContainsTaggedFields(t, "ping", fields, tags) } @@ -339,3 +339,20 @@ func TestPingBinary(t *testing.T) { } acc.GatherError(p.Gather) } + +// Test that Gather function works using native ping +func TestPingGatherNative(t *testing.T) { + if testing.Short() { + t.Skip("Skipping test due to permission requirements.") + } + + var acc testutil.Accumulator + p := Ping{ + Urls: []string{"localhost", "127.0.0.2"}, + Method: "native", + Count: 5, + } + + assert.NoError(t, acc.GatherError(p.Gather)) + assert.True(t, acc.HasPoint("ping", map[string]string{"url": "localhost"}, "packets_transmitted", 5)) +} diff --git a/plugins/inputs/ping/ping_windows.go b/plugins/inputs/ping/ping_windows.go index 6064fabe4..adfd60480 100644 --- a/plugins/inputs/ping/ping_windows.go +++ b/plugins/inputs/ping/ping_windows.go @@ -5,103 +5,21 @@ package ping import ( "errors" "fmt" - "net" - "os/exec" "regexp" "strconv" "strings" - "sync" - "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" - "github.com/influxdata/telegraf/plugins/inputs" ) -// HostPinger is a function that runs the "ping" function using a list of -// passed arguments. This can be easily switched with a mocked ping function -// for unit test purposes (see ping_test.go) -type HostPinger func(binary string, timeout float64, args ...string) (string, error) - -type Ping struct { - wg sync.WaitGroup - - // Number of pings to send (ping -c ) - Count int - - // Ping timeout, in seconds. 0 means no timeout (ping -W ) - Timeout float64 - - // URLs to ping - Urls []string - - // Ping executable binary - Binary string - - // Arguments for ping command. - // when `Arguments` is not empty, other options (ping_interval, timeout, etc) will be ignored - Arguments []string - - // host ping function - pingHost HostPinger -} - -func (s *Ping) Description() string { - return "Ping given url(s) and return statistics" -} - -const sampleConfig = ` - ## List of urls to ping - urls = ["www.google.com"] - - ## number of pings to send per collection (ping -n ) - # count = 1 - - ## Ping timeout, in seconds. 0.0 means default timeout (ping -w ) - # timeout = 0.0 - - ## Specify the ping executable binary, default is "ping" - # binary = "ping" - - ## Arguments for ping command - ## when arguments is not empty, other options (ping_interval, timeout, etc) will be ignored - # arguments = ["-c", "3"] -` - -func (s *Ping) SampleConfig() string { - return sampleConfig -} - -func (p *Ping) Gather(acc telegraf.Accumulator) error { +func (p *Ping) pingToURL(u string, acc telegraf.Accumulator) { if p.Count < 1 { p.Count = 1 } - // Spin off a go routine for each url to ping - for _, url := range p.Urls { - p.wg.Add(1) - go p.pingToURL(url, acc) - } - - p.wg.Wait() - - return nil -} - -func (p *Ping) pingToURL(u string, acc telegraf.Accumulator) { - defer p.wg.Done() - tags := map[string]string{"url": u} fields := map[string]interface{}{"result_code": 0} - _, err := net.LookupHost(u) - if err != nil { - acc.AddError(err) - fields["result_code"] = 1 - acc.AddFields("ping", fields, tags) - return - } - args := p.args(u) totalTimeout := 60.0 if len(p.Arguments) == 0 { @@ -151,17 +69,6 @@ func (p *Ping) pingToURL(u string, acc telegraf.Accumulator) { acc.AddFields("ping", fields, tags) } -func hostPinger(binary string, timeout float64, args ...string) (string, error) { - bin, err := exec.LookPath(binary) - if err != nil { - return "", err - } - c := exec.Command(bin, args...) - out, err := internal.CombinedOutputTimeout(c, - time.Second*time.Duration(timeout+1)) - return string(out), err -} - // args returns the arguments for the 'ping' executable func (p *Ping) args(url string) []string { if len(p.Arguments) > 0 { @@ -246,14 +153,3 @@ func (p *Ping) timeout() float64 { } return 4 + 1 } - -func init() { - inputs.Add("ping", func() telegraf.Input { - return &Ping{ - pingHost: hostPinger, - Count: 1, - Binary: "ping", - Arguments: []string{}, - } - }) -} From a0fec3cd8211dec4d7a71900e0b60fbd145053dc Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 11 Jul 2019 15:09:31 -0700 Subject: [PATCH 0995/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index c8ee654c3..a8a391593 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -35,6 +35,7 @@ - [#6062](https://github.com/influxdata/telegraf/pull/6062): Add basic auth to prometheus input plugin. - [#6064](https://github.com/influxdata/telegraf/pull/6064): Add node roles tag to elasticsearch input. - [#5572](https://github.com/influxdata/telegraf/pull/5572): Support floats in statsd percentiles. +- [#6050](https://github.com/influxdata/telegraf/pull/6050): Add native Go ping method to ping input plugin. #### Bugfixes From 981dd5bfc0ea72c8d9858aaff494f69d2298cbb1 Mon Sep 17 00:00:00 2001 From: Matthew Crenshaw <3420325+sgtsquiggs@users.noreply.github.com> Date: Thu, 11 Jul 2019 20:39:59 -0400 Subject: [PATCH 0996/1815] Resume from last known offset when reloading in tail input (#6074) --- plugins/inputs/logparser/logparser.go | 88 ++++++++++++++++++++++----- plugins/inputs/tail/tail.go | 78 +++++++++++++++++++----- plugins/inputs/tail/tail_test.go | 2 +- 3 files changed, 135 insertions(+), 33 deletions(-) diff --git a/plugins/inputs/logparser/logparser.go b/plugins/inputs/logparser/logparser.go index e724f2d4b..c132ba7a2 100644 --- a/plugins/inputs/logparser/logparser.go +++ b/plugins/inputs/logparser/logparser.go @@ -3,11 +3,13 @@ package logparser import ( + "fmt" "log" "strings" "sync" "github.com/influxdata/tail" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal/globpath" "github.com/influxdata/telegraf/plugins/inputs" @@ -19,6 +21,11 @@ const ( defaultWatchMethod = "inotify" ) +var ( + offsets = make(map[string]int64) + offsetsMutex = new(sync.Mutex) +) + // LogParser in the primary interface for the plugin type GrokConfig struct { MeasurementName string `toml:"measurement"` @@ -42,6 +49,7 @@ type LogParserPlugin struct { WatchMethod string tailers map[string]*tail.Tail + offsets map[string]int64 lines chan logEntry done chan struct{} wg sync.WaitGroup @@ -53,6 +61,20 @@ type LogParserPlugin struct { GrokConfig GrokConfig `toml:"grok"` } +func NewLogParser() *LogParserPlugin { + offsetsMutex.Lock() + offsetsCopy := make(map[string]int64, len(offsets)) + for k, v := range offsets { + offsetsCopy[k] = v + } + offsetsMutex.Unlock() + + return &LogParserPlugin{ + WatchMethod: defaultWatchMethod, + offsets: offsetsCopy, + } +} + const sampleConfig = ` ## Log files to parse. ## These accept standard unix glob matching rules, but with the addition of @@ -161,18 +183,21 @@ func (l *LogParserPlugin) Start(acc telegraf.Accumulator) error { l.wg.Add(1) go l.parser() - return l.tailNewfiles(l.FromBeginning) + err = l.tailNewfiles(l.FromBeginning) + + // clear offsets + l.offsets = make(map[string]int64) + // assumption that once Start is called, all parallel plugins have already been initialized + offsetsMutex.Lock() + offsets = make(map[string]int64) + offsetsMutex.Unlock() + + return err } // check the globs against files on disk, and start tailing any new files. // Assumes l's lock is held! func (l *LogParserPlugin) tailNewfiles(fromBeginning bool) error { - var seek tail.SeekInfo - if !fromBeginning { - seek.Whence = 2 - seek.Offset = 0 - } - var poll bool if l.WatchMethod == "poll" { poll = true @@ -182,7 +207,7 @@ func (l *LogParserPlugin) tailNewfiles(fromBeginning bool) error { for _, filepath := range l.Files { g, err := globpath.Compile(filepath) if err != nil { - log.Printf("E! Error Glob %s failed to compile, %s", filepath, err) + log.Printf("E! [inputs.logparser] Error Glob %s failed to compile, %s", filepath, err) continue } files := g.Match() @@ -193,11 +218,27 @@ func (l *LogParserPlugin) tailNewfiles(fromBeginning bool) error { continue } + var seek *tail.SeekInfo + if !fromBeginning { + if offset, ok := l.offsets[file]; ok { + log.Printf("D! [inputs.tail] using offset %d for file: %v", offset, file) + seek = &tail.SeekInfo{ + Whence: 0, + Offset: offset, + } + } else { + seek = &tail.SeekInfo{ + Whence: 2, + Offset: 0, + } + } + } + tailer, err := tail.TailFile(file, tail.Config{ ReOpen: true, Follow: true, - Location: &seek, + Location: seek, MustExist: true, Poll: poll, Logger: tail.DiscardingLogger, @@ -228,7 +269,7 @@ func (l *LogParserPlugin) receiver(tailer *tail.Tail) { for line = range tailer.Lines { if line.Err != nil { - log.Printf("E! Error tailing file %s, Error: %s\n", + log.Printf("E! [inputs.logparser] Error tailing file %s, Error: %s", tailer.Filename, line.Err) continue } @@ -274,7 +315,7 @@ func (l *LogParserPlugin) parser() { l.acc.AddFields(m.Name(), m.Fields(), tags, m.Time()) } } else { - log.Println("E! Error parsing log line: " + err.Error()) + log.Println("E! [inputs.logparser] Error parsing log line: " + err.Error()) } } @@ -286,23 +327,38 @@ func (l *LogParserPlugin) Stop() { defer l.Unlock() for _, t := range l.tailers { + if !l.FromBeginning { + // store offset for resume + offset, err := t.Tell() + if err == nil { + l.offsets[t.Filename] = offset + log.Printf("D! [inputs.logparser] recording offset %d for file: %v", offset, t.Filename) + } else { + l.acc.AddError(fmt.Errorf("error recording offset for file %s", t.Filename)) + } + } err := t.Stop() //message for a stopped tailer - log.Printf("D! tail dropped for file: %v", t.Filename) + log.Printf("D! [inputs.logparser] tail dropped for file: %v", t.Filename) if err != nil { - log.Printf("E! Error stopping tail on file %s\n", t.Filename) + log.Printf("E! [inputs.logparser] Error stopping tail on file %s", t.Filename) } } close(l.done) l.wg.Wait() + + // persist offsets + offsetsMutex.Lock() + for k, v := range l.offsets { + offsets[k] = v + } + offsetsMutex.Unlock() } func init() { inputs.Add("logparser", func() telegraf.Input { - return &LogParserPlugin{ - WatchMethod: defaultWatchMethod, - } + return NewLogParser() }) } diff --git a/plugins/inputs/tail/tail.go b/plugins/inputs/tail/tail.go index 834d7cf8f..245010764 100644 --- a/plugins/inputs/tail/tail.go +++ b/plugins/inputs/tail/tail.go @@ -9,6 +9,7 @@ import ( "sync" "github.com/influxdata/tail" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal/globpath" "github.com/influxdata/telegraf/plugins/inputs" @@ -19,6 +20,11 @@ const ( defaultWatchMethod = "inotify" ) +var ( + offsets = make(map[string]int64) + offsetsMutex = new(sync.Mutex) +) + type Tail struct { Files []string FromBeginning bool @@ -26,6 +32,7 @@ type Tail struct { WatchMethod string tailers map[string]*tail.Tail + offsets map[string]int64 parserFunc parsers.ParserFunc wg sync.WaitGroup acc telegraf.Accumulator @@ -34,8 +41,16 @@ type Tail struct { } func NewTail() *Tail { + offsetsMutex.Lock() + offsetsCopy := make(map[string]int64, len(offsets)) + for k, v := range offsets { + offsetsCopy[k] = v + } + offsetsMutex.Unlock() + return &Tail{ FromBeginning: false, + offsets: offsetsCopy, } } @@ -87,18 +102,19 @@ func (t *Tail) Start(acc telegraf.Accumulator) error { t.acc = acc t.tailers = make(map[string]*tail.Tail) - return t.tailNewFiles(t.FromBeginning) + err := t.tailNewFiles(t.FromBeginning) + + // clear offsets + t.offsets = make(map[string]int64) + // assumption that once Start is called, all parallel plugins have already been initialized + offsetsMutex.Lock() + offsets = make(map[string]int64) + offsetsMutex.Unlock() + + return err } func (t *Tail) tailNewFiles(fromBeginning bool) error { - var seek *tail.SeekInfo - if !t.Pipe && !fromBeginning { - seek = &tail.SeekInfo{ - Whence: 2, - Offset: 0, - } - } - var poll bool if t.WatchMethod == "poll" { poll = true @@ -108,7 +124,7 @@ func (t *Tail) tailNewFiles(fromBeginning bool) error { for _, filepath := range t.Files { g, err := globpath.Compile(filepath) if err != nil { - t.acc.AddError(fmt.Errorf("E! Error Glob %s failed to compile, %s", filepath, err)) + t.acc.AddError(fmt.Errorf("glob %s failed to compile, %s", filepath, err)) } for _, file := range g.Match() { if _, ok := t.tailers[file]; ok { @@ -116,6 +132,22 @@ func (t *Tail) tailNewFiles(fromBeginning bool) error { continue } + var seek *tail.SeekInfo + if !t.Pipe && !fromBeginning { + if offset, ok := t.offsets[file]; ok { + log.Printf("D! [inputs.tail] using offset %d for file: %v", offset, file) + seek = &tail.SeekInfo{ + Whence: 0, + Offset: offset, + } + } else { + seek = &tail.SeekInfo{ + Whence: 2, + Offset: 0, + } + } + } + tailer, err := tail.TailFile(file, tail.Config{ ReOpen: true, @@ -159,8 +191,7 @@ func (t *Tail) receiver(parser parsers.Parser, tailer *tail.Tail) { var line *tail.Line for line = range tailer.Lines { if line.Err != nil { - t.acc.AddError(fmt.Errorf("E! Error tailing file %s, Error: %s\n", - tailer.Filename, err)) + t.acc.AddError(fmt.Errorf("error tailing file %s, Error: %s", tailer.Filename, err)) continue } // Fix up files with Windows line endings. @@ -188,7 +219,7 @@ func (t *Tail) receiver(parser parsers.Parser, tailer *tail.Tail) { t.acc.AddFields(m.Name(), m.Fields(), tags, m.Time()) } } else { - t.acc.AddError(fmt.Errorf("E! Malformed log line in %s: [%s], Error: %s\n", + t.acc.AddError(fmt.Errorf("malformed log line in %s: [%s], Error: %s", tailer.Filename, line.Text, err)) } } @@ -196,8 +227,7 @@ func (t *Tail) receiver(parser parsers.Parser, tailer *tail.Tail) { log.Printf("D! [inputs.tail] tail removed for file: %v", tailer.Filename) if err := tailer.Err(); err != nil { - t.acc.AddError(fmt.Errorf("E! Error tailing file %s, Error: %s\n", - tailer.Filename, err)) + t.acc.AddError(fmt.Errorf("error tailing file %s, Error: %s", tailer.Filename, err)) } } @@ -206,13 +236,29 @@ func (t *Tail) Stop() { defer t.Unlock() for _, tailer := range t.tailers { + if !t.Pipe && !t.FromBeginning { + // store offset for resume + offset, err := tailer.Tell() + if err == nil { + log.Printf("D! [inputs.tail] recording offset %d for file: %v", offset, tailer.Filename) + } else { + t.acc.AddError(fmt.Errorf("error recording offset for file %s", tailer.Filename)) + } + } err := tailer.Stop() if err != nil { - t.acc.AddError(fmt.Errorf("E! Error stopping tail on file %s\n", tailer.Filename)) + t.acc.AddError(fmt.Errorf("error stopping tail on file %s", tailer.Filename)) } } t.wg.Wait() + + // persist offsets + offsetsMutex.Lock() + for k, v := range t.offsets { + offsets[k] = v + } + offsetsMutex.Unlock() } func (t *Tail) SetParserFunc(fn parsers.ParserFunc) { diff --git a/plugins/inputs/tail/tail_test.go b/plugins/inputs/tail/tail_test.go index 06db2c172..fb5e05a76 100644 --- a/plugins/inputs/tail/tail_test.go +++ b/plugins/inputs/tail/tail_test.go @@ -108,7 +108,7 @@ func TestTailBadLine(t *testing.T) { require.NoError(t, err) acc.WaitError(1) - assert.Contains(t, acc.Errors[0].Error(), "E! Malformed log line") + assert.Contains(t, acc.Errors[0].Error(), "malformed log line") } func TestTailDosLineendings(t *testing.T) { From 55c9da028c11d21554c1a9a775794b2362181f3c Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 11 Jul 2019 17:44:24 -0700 Subject: [PATCH 0997/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index a8a391593..0cbad5f69 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -36,6 +36,7 @@ - [#6064](https://github.com/influxdata/telegraf/pull/6064): Add node roles tag to elasticsearch input. - [#5572](https://github.com/influxdata/telegraf/pull/5572): Support floats in statsd percentiles. - [#6050](https://github.com/influxdata/telegraf/pull/6050): Add native Go ping method to ping input plugin. +- [#6074](https://github.com/influxdata/telegraf/pull/6074): Resume from last known offset in tail inputwhen reloading Telegraf. #### Bugfixes From 149be55c6455b12ffa5cedfee71e2824b1eb34cf Mon Sep 17 00:00:00 2001 From: denzilribeiro Date: Fri, 12 Jul 2019 13:37:00 -0500 Subject: [PATCH 0998/1815] Add improved support for Azure SQL Database (#6111) --- plugins/inputs/sqlserver/README.md | 29 ++- plugins/inputs/sqlserver/sqlserver.go | 244 +++++++++++++++++++++----- 2 files changed, 222 insertions(+), 51 deletions(-) diff --git a/plugins/inputs/sqlserver/README.md b/plugins/inputs/sqlserver/README.md index e83aca304..16bb353c5 100644 --- a/plugins/inputs/sqlserver/README.md +++ b/plugins/inputs/sqlserver/README.md @@ -18,9 +18,21 @@ GRANT VIEW ANY DEFINITION TO [telegraf]; GO ``` +For Azure SQL Database, you require the View Database State permission and can create a user with a password directly in the database. +```sql +CREATE USER [telegraf] WITH PASSWORD = N'mystrongpassword'; +GO +GRANT VIEW DATABASE STATE TO [telegraf]; +GO +``` + ### Configuration: ```toml +[agent] + ## Default data collection interval for all inputs, can be changed as per collection interval needs + interval = "10s" + # Read metrics from Microsoft SQL Server [[inputs.sqlserver]] ## Specify instances to monitor with a list of connection strings. @@ -35,7 +47,7 @@ GO ## Optional parameter, setting this to 2 will use a new version ## of the collection queries that break compatibility with the original - ## dashboards. + ## dashboards. All new functionality is under V2 query_version = 2 ## If you are using AzureDB, setting this to true will gather resource utilization metrics @@ -52,7 +64,10 @@ GO ## - DatabaseStats ## - MemoryClerk ## - VolumeSpace - exclude_query = [ 'DatabaseIO' ] + ## - Schedulers + ## - AzureDBResourceStats + ## - AzureDBResourceGovernance + exclude_query = [ 'Schedulers' ] ``` ### Metrics: @@ -79,7 +94,6 @@ If you are using the original queries all stats have the following tags: #### Version 2: The new (version 2) metrics provide: -- *AzureDB*: AzureDB resource utilization from `sys.dm_db_resource_stats` - *Database IO*: IO stats from `sys.dm_io_virtual_file_stats` - *Memory Clerk*: Memory clerk breakdown from `sys.dm_os_memory_clerks`, most clerks have been given a friendly name. - *Performance Counters*: A select list of performance counters from `sys.dm_os_performance_counters`. Some of the important metrics included: @@ -89,8 +103,9 @@ The new (version 2) metrics provide: - *Memory*: PLE, Page reads/sec, Page writes/sec, + more - *TempDB*: Free space, Version store usage, Active temp tables, temp table creation rate, + more - *Resource Governor*: CPU Usage, Requests/sec, Queued Requests, and Blocked tasks per workload group + more -- *Server properties*: Number of databases in all possible states (online, offline, suspect, etc.), cpu count, physical memory, SQL Server service uptime, and SQL Server version +- *Server properties*: Number of databases in all possible states (online, offline, suspect, etc.), cpu count, physical memory, SQL Server service uptime, and SQL Server version. In the case of Azure SQL relevent properties such as Tier, #Vcores, Memory etc. - *Wait stats*: Wait time in ms, number of waiting tasks, resource wait time, signal wait time, max wait time in ms, wait type, and wait category. The waits are categorized using the same categories used in Query Store. +- *Schedulers* - This captures sys.dm_os_schedulers. - *Azure Managed Instances* - Stats from `sys.server_resource_stats`: - cpu_count @@ -101,6 +116,11 @@ The new (version 2) metrics provide: - total_storage_mb - available_storage_mb - uptime + - Resource governance stats from sys.dm_instance_resource_governance +- *Azure SQL Database* + - Stats from sys.dm_db_wait_stats + - Resource governance stats from sys.dm_user_db_resource_governance + - Stats from sys.dm_db_resource_stats The following metrics can be used directly, with no delta calculations: - SQLServer:Buffer Manager\Buffer cache hit ratio @@ -140,3 +160,4 @@ The following metrics can be used directly, with no delta calculations: Version 2 queries have the following tags: - `sql_instance`: Physical host and instance name (hostname:instance) +- database_name: For Azure SQLDB, database_name denotes the name of the Azure SQL Database as server name is a logical construct. diff --git a/plugins/inputs/sqlserver/sqlserver.go b/plugins/inputs/sqlserver/sqlserver.go index 8fae93d15..aaad6006c 100644 --- a/plugins/inputs/sqlserver/sqlserver.go +++ b/plugins/inputs/sqlserver/sqlserver.go @@ -66,7 +66,10 @@ var sampleConfig = ` ## - MemoryClerk ## - VolumeSpace ## - PerformanceMetrics - # exclude_query = [ 'DatabaseIO' ] + ## - Schedulers + ## - AzureDBResourceStats + ## - AzureDBResourceGovernance + exclude_query = [ 'Schedulers' ] ` // SampleConfig return the sample configuration @@ -88,7 +91,8 @@ func initQueries(s *SQLServer) { // If this is an AzureDB instance, grab some extra metrics if s.AzureDB { - queries["AzureDB"] = Query{Script: sqlAzureDB, ResultByRow: false} + queries["AzureDBResourceStats"] = Query{Script: sqlAzureDBResourceStats, ResultByRow: false} + queries["AzureDBResourceGovernance"] = Query{Script: sqlAzureDBResourceGovernance, ResultByRow: false} } // Decide if we want to run version 1 or version 2 queries @@ -98,6 +102,7 @@ func initQueries(s *SQLServer) { queries["DatabaseIO"] = Query{Script: sqlDatabaseIOV2, ResultByRow: false} queries["ServerProperties"] = Query{Script: sqlServerPropertiesV2, ResultByRow: false} queries["MemoryClerk"] = Query{Script: sqlMemoryClerkV2, ResultByRow: false} + queries["Schedulers"] = Query{Script: sqlServerSchedulersV2, ResultByRow: false} } else { queries["PerformanceCounters"] = Query{Script: sqlPerformanceCounters, ResultByRow: true} queries["WaitStatsCategorized"] = Query{Script: sqlWaitStatsCategorized, ResultByRow: false} @@ -240,6 +245,7 @@ const sqlMemoryClerkV2 = `SET DEADLOCK_PRIORITY -10; DECLARE @SQL NVARCHAR(MAX) = 'SELECT "sqlserver_memory_clerks" As [measurement], REPLACE(@@SERVERNAME,"\",":") AS [sql_instance], +DB_NAME() as [database_name], ISNULL(clerk_names.name,mc.type) AS clerk_type, SUM({pages_kb}) AS size_kb FROM @@ -341,6 +347,8 @@ ELSE EXEC(@SQL) ` +// Conditional check based on Azure SQL DB OR On-prem SQL Server +// EngineEdition=5 is Azure SQL DB const sqlDatabaseIOV2 = `SET DEADLOCK_PRIORITY -10; IF SERVERPROPERTY('EngineEdition') = 5 BEGIN @@ -382,6 +390,9 @@ inner join sys.master_files b on b.database_id = vfs.database_id and b.file_id = END ` +// Conditional check based on Azure SQL DB, Azure SQL Managed instance OR On-prem SQL Server +// EngineEdition=5 is Azure SQL DB, EngineEdition=8 is Managed instance + const sqlServerPropertiesV2 = `SET DEADLOCK_PRIORITY -10; DECLARE @sys_info TABLE ( cpu_count INT, @@ -394,41 +405,54 @@ DECLARE @sys_info TABLE ( uptime INT ) -IF OBJECT_ID('master.sys.dm_os_sys_info') IS NOT NULL -BEGIN - IF SERVERPROPERTY('EngineEdition') = 8 -- Managed Instance - INSERT INTO @sys_info ( cpu_count, server_memory, sku, engine_edition, hardware_type, total_storage_mb, available_storage_mb, uptime ) - SELECT TOP(1) - virtual_core_count AS cpu_count, - (SELECT process_memory_limit_mb FROM sys.dm_os_job_object) AS server_memory, - sku, - cast(SERVERPROPERTY('EngineEdition') as smallint) AS engine_edition, - hardware_generation AS hardware_type, - reserved_storage_mb AS total_storage_mb, - (reserved_storage_mb - storage_space_used_mb) AS available_storage_mb, - (select DATEDIFF(MINUTE,sqlserver_start_time,GETDATE()) from sys.dm_os_sys_info) as uptime - FROM sys.server_resource_stats - ORDER BY start_time DESC +IF SERVERPROPERTY('EngineEdition') = 8 -- Managed Instance + INSERT INTO @sys_info ( cpu_count, server_memory, sku, engine_edition, hardware_type, total_storage_mb, available_storage_mb, uptime ) + SELECT TOP(1) + virtual_core_count AS cpu_count, + (SELECT process_memory_limit_mb FROM sys.dm_os_job_object) AS server_memory, + sku, + cast(SERVERPROPERTY('EngineEdition') as smallint) AS engine_edition, + hardware_generation AS hardware_type, + reserved_storage_mb AS total_storage_mb, + (reserved_storage_mb - storage_space_used_mb) AS available_storage_mb, + (select DATEDIFF(MINUTE,sqlserver_start_time,GETDATE()) from sys.dm_os_sys_info) as uptime + FROM sys.server_resource_stats + ORDER BY start_time DESC - ELSE - BEGIN - INSERT INTO @sys_info ( cpu_count, server_memory, sku, engine_edition, hardware_type, total_storage_mb, available_storage_mb, uptime ) - SELECT cpu_count, - (SELECT total_physical_memory_kb FROM sys.dm_os_sys_memory) AS server_memory, - CAST(SERVERPROPERTY('Edition') AS NVARCHAR(64)) as sku, - CAST(SERVERPROPERTY('EngineEdition') as smallint) as engine_edition, - CASE virtual_machine_type_desc - WHEN 'NONE' THEN 'PHYSICAL Machine' - ELSE virtual_machine_type_desc - END AS hardware_type, - NULL, - NULL, - DATEDIFF(MINUTE,sqlserver_start_time,GETDATE()) - FROM sys.dm_os_sys_info - END +IF SERVERPROPERTY('EngineEdition') = 5 -- Azure SQL DB + INSERT INTO @sys_info ( cpu_count, server_memory, sku, engine_edition, hardware_type, total_storage_mb, available_storage_mb, uptime ) + SELECT TOP(1) + (SELECT count(*) FROM sys.dm_os_schedulers WHERE status = 'VISIBLE ONLINE') AS cpu_count, + (SELECT process_memory_limit_mb FROM sys.dm_os_job_object) AS server_memory, + slo.edition as sku, + cast(SERVERPROPERTY('EngineEdition') as smallint) AS engine_edition, + slo.service_objective AS hardware_type, + cast(DATABASEPROPERTYEX(DB_NAME(),'MaxSizeInBytes') as bigint)/(1024*1024) AS total_storage_mb, + NULL AS available_storage_mb, -- Can we find out storage? + NULL as uptime + FROM sys.databases d + JOIN sys.database_service_objectives slo + ON d.database_id = slo.database_id + +ELSE +BEGIN + INSERT INTO @sys_info ( cpu_count, server_memory, sku, engine_edition, hardware_type, total_storage_mb, available_storage_mb, uptime ) + SELECT cpu_count, + (SELECT total_physical_memory_kb FROM sys.dm_os_sys_memory) AS server_memory, + CAST(SERVERPROPERTY('Edition') AS NVARCHAR(64)) as sku, + CAST(SERVERPROPERTY('EngineEdition') as smallint) as engine_edition, + CASE virtual_machine_type_desc + WHEN 'NONE' THEN 'PHYSICAL Machine' + ELSE virtual_machine_type_desc + END AS hardware_type, + NULL, + NULL, + DATEDIFF(MINUTE,sqlserver_start_time,GETDATE()) + FROM sys.dm_os_sys_info END SELECT 'sqlserver_server_properties' AS [measurement], REPLACE(@@SERVERNAME,'\',':') AS [sql_instance], + DB_NAME() as [database_name], s.cpu_count, s.server_memory, s.sku, @@ -457,7 +481,16 @@ FROM ( SELECT cpu_count, server_memory, sku, engine_edition, hardware_type, total_storage_mb, available_storage_mb, uptime FROM @sys_info ) AS s -OPTION( RECOMPILE ) +` + +//Recommend disabling this by default, but is useful to detect single CPU spikes/bottlenecks +const sqlServerSchedulersV2 string = `SET DEADLOCK_PRIORITY -10; +SELECT 'sqlserver_schedulers' AS [measurement], + REPLACE(@@SERVERNAME,'\',':') AS [sql_instance], + DB_NAME() as [database_name], + cast(scheduler_id as varchar(4)) as scheduler_id, cast(cpu_id as varchar(4)) as cpu_id,is_online,is_idle,preemptive_switches_count,context_switches_count,current_tasks_count,runnable_tasks_count,current_workers_count + , active_workers_count,work_queue_count, pending_disk_io_count,load_factor,yield_count, total_cpu_usage_ms, total_scheduler_delay_ms +from sys.dm_os_schedulers ` const sqlPerformanceCountersV2 string = `SET DEADLOCK_PRIORITY -10; @@ -572,8 +605,7 @@ WHERE ( ) DECLARE @SQL NVARCHAR(MAX) -SET @SQL = REPLACE(' -SELECT +SET @SQL = REPLACE('SELECT "SQLServer:Workload Group Stats" AS object, counter, instance, @@ -605,6 +637,7 @@ EXEC( @SQL ) SELECT 'sqlserver_performance' AS [measurement], REPLACE(@@SERVERNAME,'\',':') AS [sql_instance], + DB_NAME() as [database_name], pc.object_name AS [object], pc.counter_name AS [counter], CASE pc.instance_name WHEN '_Total' THEN 'Total' ELSE ISNULL(pc.instance_name,'') END AS [instance], @@ -622,10 +655,14 @@ WHERE pc.counter_name NOT LIKE '% base' OPTION(RECOMPILE); ` +// Conditional check based on Azure SQL DB v/s the rest aka (Azure SQL Managed instance OR On-prem SQL Server) +// EngineEdition=5 is Azure SQL DB const sqlWaitStatsCategorizedV2 string = `SET DEADLOCK_PRIORITY -10; +IF SERVERPROPERTY('EngineEdition') != 5 SELECT -'sqlserver_waitstats' AS [measurement], + 'sqlserver_waitstats' AS [measurement], REPLACE(@@SERVERNAME,'\',':') AS [sql_instance], +DB_NAME() as [database_name], ws.wait_type, wait_time_ms, wait_time_ms - signal_wait_time_ms AS [resource_wait_ms], @@ -1178,16 +1215,68 @@ ws.wait_type NOT IN ( N'XE_DISPATCHER_WAIT', N'XE_LIVE_TARGET_TVF', N'XE_TIMER_EVENT', N'SOS_WORK_DISPATCHER','RESERVED_MEMORY_ALLOCATION_EXT') AND waiting_tasks_count > 0 -AND wait_time_ms > 100 -OPTION (RECOMPILE); +AND wait_time_ms > 100; + +ELSE + SELECT + 'sqlserver_azuredb_waitstats' AS [measurement], + REPLACE(@@SERVERNAME,'\',':') AS [sql_instance], + DB_NAME() as [database_name'], + dbws.wait_type, + dbws.wait_time_ms, + dbws.wait_time_ms - signal_wait_time_ms AS [resource_wait_ms], + dbws.signal_wait_time_ms, + dbws.max_wait_time_ms, + dbws.waiting_tasks_count + FROM + sys.dm_db_wait_stats AS dbws WITH (NOLOCK) + WHERE + dbws.wait_type NOT IN ( + N'BROKER_EVENTHANDLER', N'BROKER_RECEIVE_WAITFOR', N'BROKER_TASK_STOP', + N'BROKER_TO_FLUSH', N'BROKER_TRANSMITTER', N'CHECKPOINT_QUEUE', + N'CHKPT', N'CLR_AUTO_EVENT', N'CLR_MANUAL_EVENT', N'CLR_SEMAPHORE', + N'DBMIRROR_DBM_EVENT', N'DBMIRROR_EVENTS_QUEUE', N'DBMIRROR_WORKER_QUEUE', + N'DBMIRRORING_CMD', N'DIRTY_PAGE_POLL', N'DISPATCHER_QUEUE_SEMAPHORE', + N'EXECSYNC', N'FSAGENT', N'FT_IFTS_SCHEDULER_IDLE_WAIT', N'FT_IFTSHC_MUTEX', + N'HADR_CLUSAPI_CALL', N'HADR_FILESTREAM_IOMGR_IOCOMPLETION', N'HADR_LOGCAPTURE_WAIT', + N'HADR_NOTIFICATION_DEQUEUE', N'HADR_TIMER_TASK', N'HADR_WORK_QUEUE', + N'KSOURCE_WAKEUP', N'LAZYWRITER_SLEEP', N'LOGMGR_QUEUE', + N'MEMORY_ALLOCATION_EXT', N'ONDEMAND_TASK_QUEUE', + N'PARALLEL_REDO_WORKER_WAIT_WORK', + N'PREEMPTIVE_HADR_LEASE_MECHANISM', N'PREEMPTIVE_SP_SERVER_DIAGNOSTICS', + N'PREEMPTIVE_OS_LIBRARYOPS', N'PREEMPTIVE_OS_COMOPS', N'PREEMPTIVE_OS_CRYPTOPS', + N'PREEMPTIVE_OS_PIPEOPS','PREEMPTIVE_OS_GENERICOPS', N'PREEMPTIVE_OS_VERIFYTRUST', + N'PREEMPTIVE_OS_DEVICEOPS', + N'PREEMPTIVE_XE_CALLBACKEXECUTE', N'PREEMPTIVE_XE_DISPATCHER', + N'PREEMPTIVE_XE_GETTARGETSTATE', N'PREEMPTIVE_XE_SESSIONCOMMIT', + N'PREEMPTIVE_XE_TARGETINIT', N'PREEMPTIVE_XE_TARGETFINALIZE', + N'PWAIT_ALL_COMPONENTS_INITIALIZED', N'PWAIT_DIRECTLOGCONSUMER_GETNEXT', + N'QDS_PERSIST_TASK_MAIN_LOOP_SLEEP', + N'QDS_ASYNC_QUEUE', + N'QDS_CLEANUP_STALE_QUERIES_TASK_MAIN_LOOP_SLEEP', N'REQUEST_FOR_DEADLOCK_SEARCH', + N'RESOURCE_QUEUE', N'SERVER_IDLE_CHECK', N'SLEEP_BPOOL_FLUSH', N'SLEEP_DBSTARTUP', + N'SLEEP_DCOMSTARTUP', N'SLEEP_MASTERDBREADY', N'SLEEP_MASTERMDREADY', + N'SLEEP_MASTERUPGRADED', N'SLEEP_MSDBSTARTUP', N'SLEEP_SYSTEMTASK', N'SLEEP_TASK', + N'SLEEP_TEMPDBSTARTUP', N'SNI_HTTP_ACCEPT', N'SP_SERVER_DIAGNOSTICS_SLEEP', + N'SQLTRACE_BUFFER_FLUSH', N'SQLTRACE_INCREMENTAL_FLUSH_SLEEP', + N'SQLTRACE_WAIT_ENTRIES', + N'WAIT_FOR_RESULTS', N'WAITFOR', N'WAITFOR_TASKSHUTDOWN', N'WAIT_XTP_HOST_WAIT', + N'WAIT_XTP_OFFLINE_CKPT_NEW_LOG', N'WAIT_XTP_CKPT_CLOSE', + N'XE_BUFFERMGR_ALLPROCESSED_EVENT', N'XE_DISPATCHER_JOIN', + N'XE_DISPATCHER_WAIT', N'XE_LIVE_TARGET_TVF', N'XE_TIMER_EVENT', + N'SOS_WORK_DISPATCHER','RESERVED_MEMORY_ALLOCATION_EXT') + AND waiting_tasks_count > 0 + AND wait_time_ms > 100; ` -const sqlAzureDB string = `SET DEADLOCK_PRIORITY -10; -IF OBJECT_ID('sys.dm_db_resource_stats') IS NOT NULL +// Only executed if AzureDB flag is set +const sqlAzureDBResourceStats string = `SET DEADLOCK_PRIORITY -10; +IF SERVERPROPERTY('EngineEdition') = 5 -- Is this Azure SQL DB? BEGIN SELECT TOP(1) 'sqlserver_azurestats' AS [measurement], REPLACE(@@SERVERNAME,'\',':') AS [sql_instance], + DB_NAME() as [database_name], avg_cpu_percent, avg_data_io_percent, avg_log_write_percent, @@ -1197,18 +1286,79 @@ BEGIN max_session_percent, dtu_limit, avg_login_rate_percent, - end_time + end_time, + avg_instance_memory_percent, + avg_instance_cpu_percent FROM sys.dm_db_resource_stats WITH (NOLOCK) ORDER BY end_time DESC - OPTION (RECOMPILE) -END -ELSE -BEGIN - RAISERROR('This does not seem to be an AzureDB instance. Set "azureDB = false" in your telegraf configuration.',16,1) END` +//Only executed if AzureDB Flag is set +const sqlAzureDBResourceGovernance string = ` +IF SERVERPROPERTY('EngineEdition') = 5 -- Is this Azure SQL DB? +SELECT + 'sqlserver_db_resource_governance' AS [measurement], + server_name AS [sql_instance], + DB_NAME() as [database_name], + slo_name, + dtu_limit, + max_cpu, + cap_cpu, + instance_cap_cpu, + max_db_memory, + max_db_max_size_in_mb, + db_file_growth_in_mb, + log_size_in_mb, + instance_max_worker_threads, + primary_group_max_workers, + instance_max_log_rate, + primary_min_log_rate, + primary_max_log_rate, + primary_group_min_io, + primary_group_max_io, + primary_group_min_cpu, + primary_group_max_cpu, + primary_pool_max_workers, + pool_max_io, + checkpoint_rate_mbps, + checkpoint_rate_io, + volume_local_iops, + volume_managed_xstore_iops, + volume_external_xstore_iops, + volume_type_local_iops, + volume_type_managed_xstore_iops, + volume_type_external_xstore_iops, + volume_pfs_iops, + volume_type_pfs_iops + FROM + sys.dm_user_db_resource_governance WITH (NOLOCK); +ELSE + IF SERVERPROPERTY('EngineEdition') = 8 -- Is this Azure SQL Managed Instance? + BEGIN + SELECT + 'sqlserver_instance_resource_governance' AS [measurement], + server_name AS [sql_instance], + instance_cap_cpu, + instance_max_log_rate, + instance_max_worker_threads, + volume_local_iops, + volume_external_xstore_iops, + volume_managed_xstore_iops, + volume_type_local_iops, + volume_type_managed_xstore_iops, + volume_type_external_xstore_iops, + volume_external_xstore_iops, + volume_local_max_oustanding_io, + volume_managed_xstore_max_oustanding_io, + volume_external_xstore_max_oustanding_io, + tempdb_log_file_number + from + sys.dm_instance_resource_governance + END; +` + // Queries V1 const sqlPerformanceMetrics string = `SET DEADLOCK_PRIORITY -10; SET NOCOUNT ON; From e5158107c15e00db70770e04ceda264bbc01a199 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 12 Jul 2019 11:41:05 -0700 Subject: [PATCH 0999/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0cbad5f69..9c842861b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -37,6 +37,7 @@ - [#5572](https://github.com/influxdata/telegraf/pull/5572): Support floats in statsd percentiles. - [#6050](https://github.com/influxdata/telegraf/pull/6050): Add native Go ping method to ping input plugin. - [#6074](https://github.com/influxdata/telegraf/pull/6074): Resume from last known offset in tail inputwhen reloading Telegraf. +- [#6111](https://github.com/influxdata/telegraf/pull/6111): Add improved support for Azure SQL Database to sqlserver input. #### Bugfixes From 43c16aa898aee58598ddcbd42f334485eb015b48 Mon Sep 17 00:00:00 2001 From: George Date: Fri, 12 Jul 2019 22:25:45 +0100 Subject: [PATCH 1000/1815] Add extra attributes for NVMe devices to smart input (#6079) --- plugins/inputs/smart/smart.go | 187 +++++++++++++++-------------- plugins/inputs/smart/smart_test.go | 46 ++++++- 2 files changed, 145 insertions(+), 88 deletions(-) diff --git a/plugins/inputs/smart/smart.go b/plugins/inputs/smart/smart.go index 93d4a0076..06bf21e10 100644 --- a/plugins/inputs/smart/smart.go +++ b/plugins/inputs/smart/smart.go @@ -36,18 +36,8 @@ var ( // PASSED, FAILED, UNKNOWN smartOverallHealth = regexp.MustCompile("^(SMART overall-health self-assessment test result|SMART Health Status):\\s+(\\w+).*$") - // Accumulated start-stop cycles: 7 - sasStartStopAttr = regexp.MustCompile("^Accumulated start-stop cycles:\\s+(.*)$") - // Accumulated load-unload cycles: 39 - sasLoadCycleAttr = regexp.MustCompile("^Accumulated load-unload cycles:\\s+(.*)$") - // Current Drive Temperature: 34 C - sasTempAttr = regexp.MustCompile("^Current Drive Temperature:\\s+(.*)\\s+C(.*)$") - // Temperature: 38 Celsius - nvmeTempAttr = regexp.MustCompile("^Temperature:\\s+(.*)\\s+(.*)$") - // Power Cycles: 472 - nvmePowerCycleAttr = regexp.MustCompile("^Power Cycles:\\s+(.*)$") - // Power On Hours: 6,038 - nvmePowerOnAttr = regexp.MustCompile("^Power On Hours:\\s+(.*)$") + // sasNvmeAttr is a SAS or NVME SMART attribute + sasNvmeAttr = regexp.MustCompile(`^([^:]+):\s+(.+)$`) // ID# ATTRIBUTE_NAME FLAGS VALUE WORST THRESH FAIL RAW_VALUE // 1 Raw_Read_Error_Rate -O-RC- 200 200 000 - 0 @@ -62,6 +52,64 @@ var ( "194": "temp_c", "199": "udma_crc_errors", } + + sasNvmeAttributes = map[string]struct { + ID string + Name string + Parse func(fields, deviceFields map[string]interface{}, str string) error + }{ + "Accumulated start-stop cycles": { + ID: "4", + Name: "Start_Stop_Count", + }, + "Accumulated load-unload cycles": { + ID: "193", + Name: "Load_Cycle_Count", + }, + "Current Drive Temperature": { + ID: "194", + Name: "Temperature_Celsius", + Parse: parseTemperature, + }, + "Temperature": { + ID: "194", + Name: "Temperature_Celsius", + Parse: parseTemperature, + }, + "Power Cycles": { + ID: "12", + Name: "Power_Cycle_Count", + }, + "Power On Hours": { + ID: "9", + Name: "Power_On_Hours", + }, + "Media and Data Integrity Errors": { + Name: "Media_and_Data_Integrity_Errors", + }, + "Error Information Log Entries": { + Name: "Error_Information_Log_Entries", + }, + "Critical Warning": { + Name: "Critical_Warning", + Parse: func(fields, _ map[string]interface{}, str string) error { + var value int64 + if _, err := fmt.Sscanf(str, "0x%x", &value); err != nil { + return err + } + + fields["raw_value"] = value + + return nil + }, + }, + "Available Spare": { + Name: "Available_Spare", + Parse: func(fields, deviceFields map[string]interface{}, str string) error { + return parseCommaSeperatedInt(fields, deviceFields, strings.TrimSuffix(str, "%")) + }, + }, + } ) type Smart struct { @@ -300,82 +348,24 @@ func gatherDisk(acc telegraf.Accumulator, usesudo, collectAttributes bool, smart } } else { if collectAttributes { - if startStop := sasStartStopAttr.FindStringSubmatch(line); len(startStop) > 1 { - tags["id"] = "4" - tags["name"] = "Start_Stop_Count" - i, err := strconv.ParseInt(strings.Replace(startStop[1], ",", "", -1), 10, 64) - if err != nil { - continue + if matches := sasNvmeAttr.FindStringSubmatch(line); len(matches) > 2 { + if attr, ok := sasNvmeAttributes[matches[1]]; ok { + tags["name"] = attr.Name + if attr.ID != "" { + tags["id"] = attr.ID + } + + parse := parseCommaSeperatedInt + if attr.Parse != nil { + parse = attr.Parse + } + + if err := parse(fields, deviceFields, matches[2]); err != nil { + continue + } + + acc.AddFields("smart_attribute", fields, tags) } - fields["raw_value"] = i - - acc.AddFields("smart_attribute", fields, tags) - continue - } - - if powerCycle := nvmePowerCycleAttr.FindStringSubmatch(line); len(powerCycle) > 1 { - tags["id"] = "12" - tags["name"] = "Power_Cycle_Count" - i, err := strconv.ParseInt(strings.Replace(powerCycle[1], ",", "", -1), 10, 64) - if err != nil { - continue - } - fields["raw_value"] = i - - acc.AddFields("smart_attribute", fields, tags) - continue - } - - if powerOn := nvmePowerOnAttr.FindStringSubmatch(line); len(powerOn) > 1 { - tags["id"] = "9" - tags["name"] = "Power_On_Hours" - i, err := strconv.ParseInt(strings.Replace(powerOn[1], ",", "", -1), 10, 64) - if err != nil { - continue - } - fields["raw_value"] = i - - acc.AddFields("smart_attribute", fields, tags) - continue - } - - if loadCycle := sasLoadCycleAttr.FindStringSubmatch(line); len(loadCycle) > 1 { - tags["id"] = "193" - tags["name"] = "Load_Cycle_Count" - i, err := strconv.ParseInt(strings.Replace(loadCycle[1], ",", "", -1), 10, 64) - if err != nil { - continue - } - fields["raw_value"] = i - - acc.AddFields("smart_attribute", fields, tags) - continue - } - - if temp := sasTempAttr.FindStringSubmatch(line); len(temp) > 1 { - tags["id"] = "194" - tags["name"] = "Temperature_Celsius" - tempC, err := strconv.ParseInt(temp[1], 10, 64) - if err != nil { - continue - } - fields["raw_value"] = tempC - deviceFields["temp_c"] = tempC - - acc.AddFields("smart_attribute", fields, tags) - } - - if temp := nvmeTempAttr.FindStringSubmatch(line); len(temp) > 1 { - tags["id"] = "194" - tags["name"] = "Temperature_Celsius" - tempC, err := strconv.ParseInt(temp[1], 10, 64) - if err != nil { - continue - } - fields["raw_value"] = tempC - deviceFields["temp_c"] = tempC - - acc.AddFields("smart_attribute", fields, tags) } } } @@ -424,6 +414,29 @@ func parseInt(str string) int64 { return 0 } +func parseCommaSeperatedInt(fields, _ map[string]interface{}, str string) error { + i, err := strconv.ParseInt(strings.Replace(str, ",", "", -1), 10, 64) + if err != nil { + return err + } + + fields["raw_value"] = i + + return nil +} + +func parseTemperature(fields, deviceFields map[string]interface{}, str string) error { + var temp int64 + if _, err := fmt.Sscanf(str, "%d C", &temp); err != nil { + return err + } + + fields["raw_value"] = temp + deviceFields["temp_c"] = temp + + return nil +} + func init() { m := Smart{} path, _ := exec.LookPath("smartctl") diff --git a/plugins/inputs/smart/smart_test.go b/plugins/inputs/smart/smart_test.go index c801c7a61..b9886bb08 100644 --- a/plugins/inputs/smart/smart_test.go +++ b/plugins/inputs/smart/smart_test.go @@ -484,6 +484,39 @@ func TestGatherNvme(t *testing.T) { }, time.Now(), ), + testutil.MustMetric("smart_attribute", + map[string]string{ + "device": ".", + "name": "Media_and_Data_Integrity_Errors", + "serial_no": "D704940282?", + }, + map[string]interface{}{ + "raw_value": 0, + }, + time.Now(), + ), + testutil.MustMetric("smart_attribute", + map[string]string{ + "device": ".", + "name": "Error_Information_Log_Entries", + "serial_no": "D704940282?", + }, + map[string]interface{}{ + "raw_value": 119699, + }, + time.Now(), + ), + testutil.MustMetric("smart_attribute", + map[string]string{ + "device": ".", + "name": "Available_Spare", + "serial_no": "D704940282?", + }, + map[string]interface{}{ + "raw_value": 100, + }, + time.Now(), + ), testutil.MustMetric("smart_attribute", map[string]string{ "device": ".", @@ -496,6 +529,17 @@ func TestGatherNvme(t *testing.T) { }, time.Now(), ), + testutil.MustMetric("smart_attribute", + map[string]string{ + "device": ".", + "name": "Critical_Warning", + "serial_no": "D704940282?", + }, + map[string]interface{}{ + "raw_value": int64(9), + }, + time.Now(), + ), } testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), @@ -934,7 +978,7 @@ Local Time is: Fri Jun 15 11:41:35 2018 UTC SMART overall-health self-assessment test result: PASSED SMART/Health Information (NVMe Log 0x02, NSID 0xffffffff) -Critical Warning: 0x00 +Critical Warning: 0x09 Temperature: 38 Celsius Available Spare: 100% Available Spare Threshold: 10% From ad4edf4a1261fa76d78a11ab9bc4750249f0eae4 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 12 Jul 2019 14:28:50 -0700 Subject: [PATCH 1001/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9c842861b..ab9975181 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -38,6 +38,7 @@ - [#6050](https://github.com/influxdata/telegraf/pull/6050): Add native Go ping method to ping input plugin. - [#6074](https://github.com/influxdata/telegraf/pull/6074): Resume from last known offset in tail inputwhen reloading Telegraf. - [#6111](https://github.com/influxdata/telegraf/pull/6111): Add improved support for Azure SQL Database to sqlserver input. +- [#6079](https://github.com/influxdata/telegraf/pull/6079): Add extra attributes for NVMe devices to smart input. #### Bugfixes From 93584f7ce7cbc34c170e28299c3decf8b2a823b7 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 12 Jul 2019 16:12:30 -0700 Subject: [PATCH 1002/1815] Update go-mssqldb version to latest (#6100) --- Gopkg.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Gopkg.lock b/Gopkg.lock index f5bc5a0cd..495dc1445 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -270,14 +270,14 @@ [[projects]] branch = "master" - digest = "1:7fdc54859cd901c25b9d8db964410a4e0d98fa0dca267fe4cf49c0eede5e06c2" + digest = "1:44330613a423ea575a90180ee9bf6f49de87df42725488764da71e18865c1469" name = "github.com/denisenkom/go-mssqldb" packages = [ ".", "internal/cp", ] pruneopts = "" - revision = "1eb28afdf9b6e56cf673badd47545f844fe81103" + revision = "2be1aa521ff4499e74b7861a2779ba1e96e3e2c5" [[projects]] digest = "1:6098222470fe0172157ce9bbef5d2200df4edde17ee649c5d6e48330e4afa4c6" From 3f424b88c961abcb54aead979dc6ea23ebd834bd Mon Sep 17 00:00:00 2001 From: George Date: Mon, 15 Jul 2019 10:24:47 +0100 Subject: [PATCH 1003/1815] Add docker_devicemapper measurement to docker input plugin (#6084) --- plugins/inputs/docker/README.md | 39 ++++++++++----- plugins/inputs/docker/docker.go | 47 ++++++++++++++++++- plugins/inputs/docker/docker_test.go | 60 +++++++++++++++++++++++- plugins/inputs/docker/docker_testdata.go | 2 +- 4 files changed, 134 insertions(+), 14 deletions(-) diff --git a/plugins/inputs/docker/README.md b/plugins/inputs/docker/README.md index e8c8d6366..c909b6683 100644 --- a/plugins/inputs/docker/README.md +++ b/plugins/inputs/docker/README.md @@ -106,7 +106,7 @@ may prefer to exclude them: - unit - engine_host - server_version - - fields: + + fields: - n_used_file_descriptors - n_cpus - n_containers @@ -117,32 +117,49 @@ may prefer to exclude them: - n_goroutines - n_listener_events - memory_total - - pool_blocksize (requires devicemapper storage driver) + - pool_blocksize (requires devicemapper storage driver) (deprecated see: `docker_devicemapper`) The `docker_data` and `docker_metadata` measurements are available only for some storage drivers such as devicemapper. -- docker_data ++ docker_data (deprecated see: `docker_devicemapper`) - tags: - unit - engine_host - server_version - - fields: + + fields: - available - total - used -- docker_metadata +- docker_metadata (deprecated see: `docker_devicemapper`) - tags: - unit - engine_host - server_version - - fields: + + fields: - available - total - used -- docker_container_mem +The above measurements for the devicemapper storage driver can now be found in the new `docker_devicemapper` measurement + +- docker_devicemapper + - tags: + - engine_host + - server_version + - pool_name + + fields: + - pool_blocksize_bytes + - data_space_used_bytes + - data_space_total_bytes + - data_space_available_bytes + - metadata_space_used_bytes + - metadata_space_total_bytes + - metadata_space_available_bytes + - thin_pool_minimum_free_space_bytes + ++ docker_container_mem - tags: - engine_host - server_version @@ -150,7 +167,7 @@ some storage drivers such as devicemapper. - container_name - container_status - container_version - - fields: + + fields: - total_pgmafault - cache - mapped_file @@ -195,7 +212,7 @@ some storage drivers such as devicemapper. - container_status - container_version - cpu - - fields: + + fields: - throttling_periods - throttling_throttled_periods - throttling_throttled_time @@ -206,7 +223,7 @@ some storage drivers such as devicemapper. - usage_percent - container_id -- docker_container_net ++ docker_container_net - tags: - engine_host - server_version @@ -215,7 +232,7 @@ some storage drivers such as devicemapper. - container_status - container_version - network - - fields: + + fields: - rx_dropped - rx_bytes - rx_errors diff --git a/plugins/inputs/docker/docker.go b/plugins/inputs/docker/docker.go index 355b8cd8a..00e6f58e7 100644 --- a/plugins/inputs/docker/docker.go +++ b/plugins/inputs/docker/docker.go @@ -322,21 +322,50 @@ func (d *Docker) gatherInfo(acc telegraf.Accumulator) error { "n_goroutines": info.NGoroutines, "n_listener_events": info.NEventsListener, } + // Add metrics acc.AddFields("docker", fields, tags, now) acc.AddFields("docker", map[string]interface{}{"memory_total": info.MemTotal}, tags, now) + // Get storage metrics tags["unit"] = "bytes" + + var ( + // "docker_devicemapper" measurement fields + poolName string + deviceMapperFields = map[string]interface{}{} + ) + for _, rawData := range info.DriverStatus { + name := strings.ToLower(strings.Replace(rawData[0], " ", "_", -1)) + if name == "pool_name" { + poolName = rawData[1] + continue + } + // Try to convert string to int (bytes) value, err := parseSize(rawData[1]) if err != nil { continue } - name := strings.ToLower(strings.Replace(rawData[0], " ", "_", -1)) + + switch name { + case "pool_blocksize", + "base_device_size", + "data_space_used", + "data_space_total", + "data_space_available", + "metadata_space_used", + "metadata_space_total", + "metadata_space_available", + "thin_pool_minimum_free_space": + deviceMapperFields[name+"_bytes"] = value + } + + // Legacy devicemapper measurements if name == "pool_blocksize" { // pool blocksize acc.AddFields("docker", @@ -353,12 +382,28 @@ func (d *Docker) gatherInfo(acc telegraf.Accumulator) error { metadataFields[fieldName] = value } } + if len(dataFields) > 0 { acc.AddFields("docker_data", dataFields, tags, now) } + if len(metadataFields) > 0 { acc.AddFields("docker_metadata", metadataFields, tags, now) } + + if len(deviceMapperFields) > 0 { + tags := map[string]string{ + "engine_host": d.engineHost, + "server_version": d.serverVersion, + } + + if poolName != "" { + tags["pool_name"] = poolName + } + + acc.AddFields("docker_devicemapper", deviceMapperFields, tags, now) + } + return nil } diff --git a/plugins/inputs/docker/docker_test.go b/plugins/inputs/docker/docker_test.go index e29c6afe9..77228b00c 100644 --- a/plugins/inputs/docker/docker_test.go +++ b/plugins/inputs/docker/docker_test.go @@ -702,6 +702,29 @@ func TestDockerGatherInfo(t *testing.T) { }, ) + acc.AssertContainsTaggedFields(t, + "docker", + map[string]interface{}{ + "memory_total": int64(3840757760), + }, + map[string]string{ + "engine_host": "absol", + "server_version": "17.09.0-ce", + }, + ) + + acc.AssertContainsTaggedFields(t, + "docker", + map[string]interface{}{ + "pool_blocksize": int64(65540), + }, + map[string]string{ + "engine_host": "absol", + "server_version": "17.09.0-ce", + "unit": "bytes", + }, + ) + acc.AssertContainsTaggedFields(t, "docker_data", map[string]interface{}{ @@ -710,11 +733,46 @@ func TestDockerGatherInfo(t *testing.T) { "available": int64(36530000000), }, map[string]string{ - "unit": "bytes", "engine_host": "absol", "server_version": "17.09.0-ce", + "unit": "bytes", }, ) + + acc.AssertContainsTaggedFields(t, + "docker_metadata", + map[string]interface{}{ + "used": int64(20970000), + "total": int64(2146999999), + "available": int64(2126999999), + }, + map[string]string{ + "engine_host": "absol", + "server_version": "17.09.0-ce", + "unit": "bytes", + }, + ) + + acc.AssertContainsTaggedFields(t, + "docker_devicemapper", + map[string]interface{}{ + "base_device_size_bytes": int64(10740000000), + "pool_blocksize_bytes": int64(65540), + "data_space_used_bytes": int64(17300000000), + "data_space_total_bytes": int64(107400000000), + "data_space_available_bytes": int64(36530000000), + "metadata_space_used_bytes": int64(20970000), + "metadata_space_total_bytes": int64(2146999999), + "metadata_space_available_bytes": int64(2126999999), + "thin_pool_minimum_free_space_bytes": int64(10740000000), + }, + map[string]string{ + "engine_host": "absol", + "server_version": "17.09.0-ce", + "pool_name": "docker-8:1-1182287-pool", + }, + ) + acc.AssertContainsTaggedFields(t, "docker_container_cpu", map[string]interface{}{ diff --git a/plugins/inputs/docker/docker_testdata.go b/plugins/inputs/docker/docker_testdata.go index ba5c2ffa1..d50b80b9a 100644 --- a/plugins/inputs/docker/docker_testdata.go +++ b/plugins/inputs/docker/docker_testdata.go @@ -47,7 +47,7 @@ var info = types.Info{ HTTPSProxy: "", Labels: []string{}, MemoryLimit: false, - DriverStatus: [][2]string{{"Pool Name", "docker-8:1-1182287-pool"}, {"Pool Blocksize", "65.54 kB"}, {"Backing Filesystem", "extfs"}, {"Data file", "/dev/loop0"}, {"Metadata file", "/dev/loop1"}, {"Data Space Used", "17.3 GB"}, {"Data Space Total", "107.4 GB"}, {"Data Space Available", "36.53 GB"}, {"Metadata Space Used", "20.97 MB"}, {"Metadata Space Total", "2.147 GB"}, {"Metadata Space Available", "2.127 GB"}, {"Udev Sync Supported", "true"}, {"Deferred Removal Enabled", "false"}, {"Data loop file", "/var/lib/docker/devicemapper/devicemapper/data"}, {"Metadata loop file", "/var/lib/docker/devicemapper/devicemapper/metadata"}, {"Library Version", "1.02.115 (2016-01-25)"}}, + DriverStatus: [][2]string{{"Pool Name", "docker-8:1-1182287-pool"}, {"Base Device Size", "10.74 GB"}, {"Pool Blocksize", "65.54 kB"}, {"Backing Filesystem", "extfs"}, {"Data file", "/dev/loop0"}, {"Metadata file", "/dev/loop1"}, {"Data Space Used", "17.3 GB"}, {"Data Space Total", "107.4 GB"}, {"Data Space Available", "36.53 GB"}, {"Metadata Space Used", "20.97 MB"}, {"Metadata Space Total", "2.147 GB"}, {"Metadata Space Available", "2.127 GB"}, {"Udev Sync Supported", "true"}, {"Deferred Removal Enabled", "false"}, {"Data loop file", "/var/lib/docker/devicemapper/devicemapper/data"}, {"Metadata loop file", "/var/lib/docker/devicemapper/devicemapper/metadata"}, {"Library Version", "1.02.115 (2016-01-25)"}, {"Thin Pool Minimum Free Space", "10.74GB"}}, NFd: 19, HTTPProxy: "", Driver: "devicemapper", From 329179c0ff4013fe2a29a2056c0bb84ecddd5973 Mon Sep 17 00:00:00 2001 From: denzilribeiro Date: Mon, 15 Jul 2019 13:46:52 -0500 Subject: [PATCH 1004/1815] Fix sql_instance tag with sqlserver managed instance (#6116) --- plugins/inputs/sqlserver/sqlserver.go | 41 +++++++++++++-------------- 1 file changed, 19 insertions(+), 22 deletions(-) diff --git a/plugins/inputs/sqlserver/sqlserver.go b/plugins/inputs/sqlserver/sqlserver.go index aaad6006c..f491954f7 100644 --- a/plugins/inputs/sqlserver/sqlserver.go +++ b/plugins/inputs/sqlserver/sqlserver.go @@ -1300,7 +1300,7 @@ const sqlAzureDBResourceGovernance string = ` IF SERVERPROPERTY('EngineEdition') = 5 -- Is this Azure SQL DB? SELECT 'sqlserver_db_resource_governance' AS [measurement], - server_name AS [sql_instance], + @@servername AS [sql_instance], DB_NAME() as [database_name], slo_name, dtu_limit, @@ -1335,27 +1335,24 @@ SELECT FROM sys.dm_user_db_resource_governance WITH (NOLOCK); ELSE - IF SERVERPROPERTY('EngineEdition') = 8 -- Is this Azure SQL Managed Instance? - BEGIN - SELECT - 'sqlserver_instance_resource_governance' AS [measurement], - server_name AS [sql_instance], - instance_cap_cpu, - instance_max_log_rate, - instance_max_worker_threads, - volume_local_iops, - volume_external_xstore_iops, - volume_managed_xstore_iops, - volume_type_local_iops, - volume_type_managed_xstore_iops, - volume_type_external_xstore_iops, - volume_external_xstore_iops, - volume_local_max_oustanding_io, - volume_managed_xstore_max_oustanding_io, - volume_external_xstore_max_oustanding_io, - tempdb_log_file_number - from - sys.dm_instance_resource_governance +BEGIN + IF SERVERPROPERTY('EngineEdition') = 8 -- Is this Azure SQL Managed Instance? + SELECT + 'sqlserver_instance_resource_governance' AS [measurement], + @@SERVERNAME AS [sql_instance], + instance_cap_cpu, + instance_max_log_rate, + instance_max_worker_threads, + tempdb_log_file_number, + volume_local_iops, + volume_external_xstore_iops, + volume_managed_xstore_iops, + volume_type_local_iops as voltype_local_iops, + volume_type_managed_xstore_iops as voltype_man_xtore_iops, + volume_type_external_xstore_iops as voltype_ext_xtore_iops, + volume_external_xstore_iops as vol_ext_xtore_iops + from + sys.dm_instance_resource_governance END; ` From 66beeb6523b3fa988b8c4420e7f2fd2b9cc653ff Mon Sep 17 00:00:00 2001 From: Greg <2653109+glinton@users.noreply.github.com> Date: Mon, 15 Jul 2019 17:41:29 -0600 Subject: [PATCH 1005/1815] Add basic auth support to elasticsearch input (#6122) --- plugins/inputs/elasticsearch/README.md | 4 +++ plugins/inputs/elasticsearch/elasticsearch.go | 28 +++++++++++++++++-- 2 files changed, 30 insertions(+), 2 deletions(-) diff --git a/plugins/inputs/elasticsearch/README.md b/plugins/inputs/elasticsearch/README.md index a9d4db0c8..445e6f82e 100644 --- a/plugins/inputs/elasticsearch/README.md +++ b/plugins/inputs/elasticsearch/README.md @@ -40,6 +40,10 @@ or [cluster-stats](https://www.elastic.co/guide/en/elasticsearch/reference/curre ## "breaker". Per default, all stats are gathered. # node_stats = ["jvm", "http"] + ## HTTP Basic Authentication username and password. + # username = "" + # password = "" + ## Optional TLS Config # tls_ca = "/etc/telegraf/ca.pem" # tls_cert = "/etc/telegraf/cert.pem" diff --git a/plugins/inputs/elasticsearch/elasticsearch.go b/plugins/inputs/elasticsearch/elasticsearch.go index 70377320f..71ef2a01a 100644 --- a/plugins/inputs/elasticsearch/elasticsearch.go +++ b/plugins/inputs/elasticsearch/elasticsearch.go @@ -119,6 +119,10 @@ const sampleConfig = ` ## "breaker". Per default, all stats are gathered. # node_stats = ["jvm", "http"] + ## HTTP Basic Authentication username and password. + # username = "" + # password = "" + ## Optional TLS Config # tls_ca = "/etc/telegraf/ca.pem" # tls_cert = "/etc/telegraf/cert.pem" @@ -138,6 +142,8 @@ type Elasticsearch struct { ClusterStats bool ClusterStatsOnlyFromMaster bool NodeStats []string + Username string `toml:"username"` + Password string `toml:"password"` tls.ClientConfig client *http.Client @@ -455,7 +461,16 @@ func (e *Elasticsearch) gatherClusterStats(url string, acc telegraf.Accumulator) } func (e *Elasticsearch) getCatMaster(url string) (string, error) { - r, err := e.client.Get(url) + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return "", err + } + + if e.Username != "" || e.Password != "" { + req.SetBasicAuth(e.Username, e.Password) + } + + r, err := e.client.Do(req) if err != nil { return "", err } @@ -478,7 +493,16 @@ func (e *Elasticsearch) getCatMaster(url string) (string, error) { } func (e *Elasticsearch) gatherJsonData(url string, v interface{}) error { - r, err := e.client.Get(url) + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return err + } + + if e.Username != "" || e.Password != "" { + req.SetBasicAuth(e.Username, e.Password) + } + + r, err := e.client.Do(req) if err != nil { return err } From 96530c220f880a7522bff1dc7fcf95b9703e4e2b Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 15 Jul 2019 16:45:25 -0700 Subject: [PATCH 1006/1815] Compile against gjson 1.3.0 (#6101) --- Gopkg.lock | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/Gopkg.lock b/Gopkg.lock index 495dc1445..5a4f12706 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -1109,12 +1109,12 @@ version = "v1.2.2" [[projects]] - digest = "1:e139a0dfe24e723193005b291ed82a975041718cfcab9136aa6c9540df70a4ff" + digest = "1:d2e45c5ed1c65576448b7adca867fc826f0c4710299d560819f1fa376189b70f" name = "github.com/tidwall/gjson" packages = ["."] pruneopts = "" - revision = "f123b340873a0084cb27267eddd8ff615115fbff" - version = "v1.1.2" + revision = "d7c940e59395fdcaff4584cb442b2e7808f6711e" + version = "v1.3.0" [[projects]] branch = "master" @@ -1124,6 +1124,14 @@ pruneopts = "" revision = "1731857f09b1f38450e2c12409748407822dc6be" +[[projects]] + digest = "1:1d7cab09854959fe179fe2f209400626f3dda9ec8e8b719c661d7b2add7b54b5" + name = "github.com/tidwall/pretty" + packages = ["."] + pruneopts = "" + revision = "1166b9ac2b65e46a43d8618d30d1554f4652d49b" + version = "v1.0.0" + [[projects]] digest = "1:026b6ceaabbacaa147e94a63579efc3d3c73e00c73b67fa5c43ab46191ed04eb" name = "github.com/vishvananda/netlink" From 41176dd1f1123f83fa44953938940221b263d912 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 15 Jul 2019 16:48:19 -0700 Subject: [PATCH 1007/1815] Support string field glob matching in json parser (#6102) --- plugins/parsers/json/README.md | 2 +- plugins/parsers/json/parser.go | 119 ++++++++------ plugins/parsers/json/parser_test.go | 244 ++++++++++++++++++---------- plugins/parsers/registry.go | 62 ++----- 4 files changed, 245 insertions(+), 182 deletions(-) diff --git a/plugins/parsers/json/README.md b/plugins/parsers/json/README.md index 60e1f3f9e..10bcf21bd 100644 --- a/plugins/parsers/json/README.md +++ b/plugins/parsers/json/README.md @@ -31,7 +31,7 @@ ignored unless specified in the `tag_key` or `json_string_fields` options. "my_tag_2" ] - ## String fields is an array of keys that should be added as string fields. + ## Array of glob pattern strings keys that should be added as string fields. json_string_fields = [] ## Name key is the key to use as the measurement name. diff --git a/plugins/parsers/json/parser.go b/plugins/parsers/json/parser.go index ebe31fd23..6df5d9196 100644 --- a/plugins/parsers/json/parser.go +++ b/plugins/parsers/json/parser.go @@ -9,30 +9,61 @@ import ( "strings" "time" - "github.com/tidwall/gjson" - "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/filter" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/metric" + "github.com/tidwall/gjson" ) var ( utf8BOM = []byte("\xef\xbb\xbf") ) -type JSONParser struct { - MetricName string - TagKeys []string - StringFields []string - JSONNameKey string - JSONQuery string - JSONTimeKey string - JSONTimeFormat string - JSONTimezone string - DefaultTags map[string]string +type Config struct { + MetricName string + TagKeys []string + NameKey string + StringFields []string + Query string + TimeKey string + TimeFormat string + Timezone string + DefaultTags map[string]string } -func (p *JSONParser) parseArray(buf []byte) ([]telegraf.Metric, error) { +type Parser struct { + metricName string + tagKeys []string + stringFields filter.Filter + nameKey string + query string + timeKey string + timeFormat string + timezone string + defaultTags map[string]string +} + +func New(config *Config) (*Parser, error) { + stringFilter, err := filter.Compile(config.StringFields) + if err != nil { + return nil, err + } + + return &Parser{ + metricName: config.MetricName, + tagKeys: config.TagKeys, + nameKey: config.NameKey, + stringFields: stringFilter, + query: config.Query, + timeKey: config.TimeKey, + timeFormat: config.TimeFormat, + timezone: config.Timezone, + defaultTags: config.DefaultTags, + }, nil +} + +func (p *Parser) parseArray(buf []byte) ([]telegraf.Metric, error) { metrics := make([]telegraf.Metric, 0) var jsonOut []map[string]interface{} @@ -50,9 +81,9 @@ func (p *JSONParser) parseArray(buf []byte) ([]telegraf.Metric, error) { return metrics, nil } -func (p *JSONParser) parseObject(metrics []telegraf.Metric, jsonOut map[string]interface{}) ([]telegraf.Metric, error) { +func (p *Parser) parseObject(metrics []telegraf.Metric, jsonOut map[string]interface{}) ([]telegraf.Metric, error) { tags := make(map[string]string) - for k, v := range p.DefaultTags { + for k, v := range p.defaultTags { tags[k] = v } @@ -62,33 +93,35 @@ func (p *JSONParser) parseObject(metrics []telegraf.Metric, jsonOut map[string]i return nil, err } + name := p.metricName + //checks if json_name_key is set - if p.JSONNameKey != "" { - switch field := f.Fields[p.JSONNameKey].(type) { + if p.nameKey != "" { + switch field := f.Fields[p.nameKey].(type) { case string: - p.MetricName = field + name = field } } //if time key is specified, set it to nTime nTime := time.Now().UTC() - if p.JSONTimeKey != "" { - if p.JSONTimeFormat == "" { + if p.timeKey != "" { + if p.timeFormat == "" { err := fmt.Errorf("use of 'json_time_key' requires 'json_time_format'") return nil, err } - if f.Fields[p.JSONTimeKey] == nil { + if f.Fields[p.timeKey] == nil { err := fmt.Errorf("JSON time key could not be found") return nil, err } - nTime, err = internal.ParseTimestampWithLocation(f.Fields[p.JSONTimeKey], p.JSONTimeFormat, p.JSONTimezone) + nTime, err = internal.ParseTimestampWithLocation(f.Fields[p.timeKey], p.timeFormat, p.timezone) if err != nil { return nil, err } - delete(f.Fields, p.JSONTimeKey) + delete(f.Fields, p.timeKey) //if the year is 0, set to current year if nTime.Year() == 0 { @@ -97,7 +130,7 @@ func (p *JSONParser) parseObject(metrics []telegraf.Metric, jsonOut map[string]i } tags, nFields := p.switchFieldToTag(tags, f.Fields) - metric, err := metric.New(p.MetricName, tags, nFields, nTime) + metric, err := metric.New(name, tags, nFields, nTime) if err != nil { return nil, err } @@ -108,8 +141,8 @@ func (p *JSONParser) parseObject(metrics []telegraf.Metric, jsonOut map[string]i //search for TagKeys that match fieldnames and add them to tags //will delete any strings/bools that shouldn't be fields //assumes that any non-numeric values in TagKeys should be displayed as tags -func (p *JSONParser) switchFieldToTag(tags map[string]string, fields map[string]interface{}) (map[string]string, map[string]interface{}) { - for _, name := range p.TagKeys { +func (p *Parser) switchFieldToTag(tags map[string]string, fields map[string]interface{}) (map[string]string, map[string]interface{}) { + for _, name := range p.tagKeys { //switch any fields in tagkeys into tags if fields[name] == nil { continue @@ -130,31 +163,23 @@ func (p *JSONParser) switchFieldToTag(tags map[string]string, fields map[string] } //remove any additional string/bool values from fields - for k := range fields { - //check if field is in StringFields - sField := false - for _, v := range p.StringFields { - if v == k { - sField = true - } - } - if sField { - continue - } - - switch fields[k].(type) { + for fk := range fields { + switch fields[fk].(type) { case string: - delete(fields, k) + if p.stringFields != nil && p.stringFields.Match(fk) { + continue + } + delete(fields, fk) case bool: - delete(fields, k) + delete(fields, fk) } } return tags, fields } -func (p *JSONParser) Parse(buf []byte) ([]telegraf.Metric, error) { - if p.JSONQuery != "" { - result := gjson.GetBytes(buf, p.JSONQuery) +func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) { + if p.query != "" { + result := gjson.GetBytes(buf, p.query) buf = []byte(result.Raw) if !result.IsArray() && !result.IsObject() { err := fmt.Errorf("E! Query path must lead to a JSON object or array of objects, but lead to: %v", result.Type) @@ -181,7 +206,7 @@ func (p *JSONParser) Parse(buf []byte) ([]telegraf.Metric, error) { return p.parseArray(buf) } -func (p *JSONParser) ParseLine(line string) (telegraf.Metric, error) { +func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { metrics, err := p.Parse([]byte(line + "\n")) if err != nil { @@ -195,8 +220,8 @@ func (p *JSONParser) ParseLine(line string) (telegraf.Metric, error) { return metrics[0], nil } -func (p *JSONParser) SetDefaultTags(tags map[string]string) { - p.DefaultTags = tags +func (p *Parser) SetDefaultTags(tags map[string]string) { + p.defaultTags = tags } type JSONFlattener struct { diff --git a/plugins/parsers/json/parser_test.go b/plugins/parsers/json/parser_test.go index 2db9ad78f..f656a96a1 100644 --- a/plugins/parsers/json/parser_test.go +++ b/plugins/parsers/json/parser_test.go @@ -53,9 +53,10 @@ const validJSONArrayTags = ` ` func TestParseValidJSON(t *testing.T) { - parser := JSONParser{ + parser, err := New(&Config{ MetricName: "json_test", - } + }) + require.NoError(t, err) // Most basic vanilla test metrics, err := parser.Parse([]byte(validJSON)) @@ -102,9 +103,10 @@ func TestParseValidJSON(t *testing.T) { } func TestParseLineValidJSON(t *testing.T) { - parser := JSONParser{ + parser, err := New(&Config{ MetricName: "json_test", - } + }) + require.NoError(t, err) // Most basic vanilla test metric, err := parser.ParseLine(validJSON) @@ -138,11 +140,12 @@ func TestParseLineValidJSON(t *testing.T) { } func TestParseInvalidJSON(t *testing.T) { - parser := JSONParser{ + parser, err := New(&Config{ MetricName: "json_test", - } + }) + require.NoError(t, err) - _, err := parser.Parse([]byte(invalidJSON)) + _, err = parser.Parse([]byte(invalidJSON)) require.Error(t, err) _, err = parser.Parse([]byte(invalidJSON2)) require.Error(t, err) @@ -152,10 +155,12 @@ func TestParseInvalidJSON(t *testing.T) { func TestParseWithTagKeys(t *testing.T) { // Test that strings not matching tag keys are ignored - parser := JSONParser{ + parser, err := New(&Config{ MetricName: "json_test", TagKeys: []string{"wrongtagkey"}, - } + }) + require.NoError(t, err) + metrics, err := parser.Parse([]byte(validJSONTags)) require.NoError(t, err) require.Len(t, metrics, 1) @@ -167,10 +172,12 @@ func TestParseWithTagKeys(t *testing.T) { require.Equal(t, map[string]string{}, metrics[0].Tags()) // Test that single tag key is found and applied - parser = JSONParser{ + parser, err = New(&Config{ MetricName: "json_test", TagKeys: []string{"mytag"}, - } + }) + require.NoError(t, err) + metrics, err = parser.Parse([]byte(validJSONTags)) require.NoError(t, err) require.Len(t, metrics, 1) @@ -184,10 +191,11 @@ func TestParseWithTagKeys(t *testing.T) { }, metrics[0].Tags()) // Test that both tag keys are found and applied - parser = JSONParser{ + parser, err = New(&Config{ MetricName: "json_test", TagKeys: []string{"mytag", "othertag"}, - } + }) + require.NoError(t, err) metrics, err = parser.Parse([]byte(validJSONTags)) require.NoError(t, err) require.Len(t, metrics, 1) @@ -204,10 +212,11 @@ func TestParseWithTagKeys(t *testing.T) { func TestParseLineWithTagKeys(t *testing.T) { // Test that strings not matching tag keys are ignored - parser := JSONParser{ + parser, err := New(&Config{ MetricName: "json_test", TagKeys: []string{"wrongtagkey"}, - } + }) + require.NoError(t, err) metric, err := parser.ParseLine(validJSONTags) require.NoError(t, err) require.Equal(t, "json_test", metric.Name()) @@ -218,10 +227,12 @@ func TestParseLineWithTagKeys(t *testing.T) { require.Equal(t, map[string]string{}, metric.Tags()) // Test that single tag key is found and applied - parser = JSONParser{ + parser, err = New(&Config{ MetricName: "json_test", TagKeys: []string{"mytag"}, - } + }) + require.NoError(t, err) + metric, err = parser.ParseLine(validJSONTags) require.NoError(t, err) require.Equal(t, "json_test", metric.Name()) @@ -234,10 +245,12 @@ func TestParseLineWithTagKeys(t *testing.T) { }, metric.Tags()) // Test that both tag keys are found and applied - parser = JSONParser{ + parser, err = New(&Config{ MetricName: "json_test", TagKeys: []string{"mytag", "othertag"}, - } + }) + require.NoError(t, err) + metric, err = parser.ParseLine(validJSONTags) require.NoError(t, err) require.Equal(t, "json_test", metric.Name()) @@ -252,13 +265,14 @@ func TestParseLineWithTagKeys(t *testing.T) { } func TestParseValidJSONDefaultTags(t *testing.T) { - parser := JSONParser{ + parser, err := New(&Config{ MetricName: "json_test", TagKeys: []string{"mytag"}, DefaultTags: map[string]string{ "t4g": "default", }, - } + }) + require.NoError(t, err) // Most basic vanilla test metrics, err := parser.Parse([]byte(validJSON)) @@ -288,13 +302,14 @@ func TestParseValidJSONDefaultTags(t *testing.T) { // Test that default tags are overridden by tag keys func TestParseValidJSONDefaultTagsOverride(t *testing.T) { - parser := JSONParser{ + parser, err := New(&Config{ MetricName: "json_test", TagKeys: []string{"mytag"}, DefaultTags: map[string]string{ "mytag": "default", }, - } + }) + require.NoError(t, err) // Most basic vanilla test metrics, err := parser.Parse([]byte(validJSON)) @@ -323,9 +338,10 @@ func TestParseValidJSONDefaultTagsOverride(t *testing.T) { // Test that json arrays can be parsed func TestParseValidJSONArray(t *testing.T) { - parser := JSONParser{ + parser, err := New(&Config{ MetricName: "json_array_test", - } + }) + require.NoError(t, err) // Most basic vanilla test metrics, err := parser.Parse([]byte(validJSONArray)) @@ -358,10 +374,12 @@ func TestParseValidJSONArray(t *testing.T) { func TestParseArrayWithTagKeys(t *testing.T) { // Test that strings not matching tag keys are ignored - parser := JSONParser{ + parser, err := New(&Config{ MetricName: "json_array_test", TagKeys: []string{"wrongtagkey"}, - } + }) + require.NoError(t, err) + metrics, err := parser.Parse([]byte(validJSONArrayTags)) require.NoError(t, err) require.Len(t, metrics, 2) @@ -380,10 +398,12 @@ func TestParseArrayWithTagKeys(t *testing.T) { require.Equal(t, map[string]string{}, metrics[1].Tags()) // Test that single tag key is found and applied - parser = JSONParser{ + parser, err = New(&Config{ MetricName: "json_array_test", TagKeys: []string{"mytag"}, - } + }) + require.NoError(t, err) + metrics, err = parser.Parse([]byte(validJSONArrayTags)) require.NoError(t, err) require.Len(t, metrics, 2) @@ -406,10 +426,12 @@ func TestParseArrayWithTagKeys(t *testing.T) { }, metrics[1].Tags()) // Test that both tag keys are found and applied - parser = JSONParser{ + parser, err = New(&Config{ MetricName: "json_array_test", TagKeys: []string{"mytag", "othertag"}, - } + }) + require.NoError(t, err) + metrics, err = parser.Parse([]byte(validJSONArrayTags)) require.NoError(t, err) require.Len(t, metrics, 2) @@ -437,12 +459,13 @@ func TestParseArrayWithTagKeys(t *testing.T) { var jsonBOM = []byte("\xef\xbb\xbf[{\"value\":17}]") func TestHttpJsonBOM(t *testing.T) { - parser := JSONParser{ + parser, err := New(&Config{ MetricName: "json_test", - } + }) + require.NoError(t, err) // Most basic vanilla test - _, err := parser.Parse(jsonBOM) + _, err = parser.Parse(jsonBOM) require.NoError(t, err) } @@ -466,15 +489,16 @@ func TestJSONParseNestedArray(t *testing.T) { } }` - parser := JSONParser{ + parser, err := New(&Config{ MetricName: "json_test", TagKeys: []string{"total_devices", "total_threads", "shares_tester3_fun"}, - } + }) + require.NoError(t, err) metrics, err := parser.Parse([]byte(testString)) log.Printf("m[0] name: %v, tags: %v, fields: %v", metrics[0].Name(), metrics[0].Tags(), metrics[0].Fields()) require.NoError(t, err) - require.Equal(t, len(parser.TagKeys), len(metrics[0].Tags())) + require.Equal(t, 3, len(metrics[0].Tags())) } func TestJSONQueryErrorOnArray(t *testing.T) { @@ -494,13 +518,14 @@ func TestJSONQueryErrorOnArray(t *testing.T) { } }` - parser := JSONParser{ + parser, err := New(&Config{ MetricName: "json_test", TagKeys: []string{}, - JSONQuery: "shares.myArr", - } + Query: "shares.myArr", + }) + require.NoError(t, err) - _, err := parser.Parse([]byte(testString)) + _, err = parser.Parse([]byte(testString)) require.Error(t, err) } @@ -527,11 +552,12 @@ func TestArrayOfObjects(t *testing.T) { "more_stuff":"junk" }` - parser := JSONParser{ + parser, err := New(&Config{ MetricName: "json_test", TagKeys: []string{"ice"}, - JSONQuery: "meta.shares", - } + Query: "meta.shares", + }) + require.NoError(t, err) metrics, err := parser.Parse([]byte(testString)) require.NoError(t, err) @@ -553,12 +579,13 @@ func TestUseCaseJSONQuery(t *testing.T) { } }` - parser := JSONParser{ + parser, err := New(&Config{ MetricName: "json_test", StringFields: []string{"last"}, TagKeys: []string{"first"}, - JSONQuery: "obj.friends", - } + Query: "obj.friends", + }) + require.NoError(t, err) metrics, err := parser.Parse([]byte(testString)) require.NoError(t, err) @@ -588,11 +615,12 @@ func TestTimeParser(t *testing.T) { } ]` - parser := JSONParser{ - MetricName: "json_test", - JSONTimeKey: "b_time", - JSONTimeFormat: "02 Jan 06 15:04 MST", - } + parser, err := New(&Config{ + MetricName: "json_test", + TimeKey: "b_time", + TimeFormat: "02 Jan 06 15:04 MST", + }) + require.NoError(t, err) metrics, err := parser.Parse([]byte(testString)) require.NoError(t, err) require.Equal(t, 2, len(metrics)) @@ -604,12 +632,13 @@ func TestTimeParserWithTimezone(t *testing.T) { "time": "04 Jan 06 15:04" }` - parser := JSONParser{ - MetricName: "json_test", - JSONTimeKey: "time", - JSONTimeFormat: "02 Jan 06 15:04", - JSONTimezone: "America/New_York", - } + parser, err := New(&Config{ + MetricName: "json_test", + TimeKey: "time", + TimeFormat: "02 Jan 06 15:04", + Timezone: "America/New_York", + }) + require.NoError(t, err) metrics, err := parser.Parse([]byte(testString)) require.NoError(t, err) require.Equal(t, 1, len(metrics)) @@ -638,11 +667,13 @@ func TestUnixTimeParser(t *testing.T) { } ]` - parser := JSONParser{ - MetricName: "json_test", - JSONTimeKey: "b_time", - JSONTimeFormat: "unix", - } + parser, err := New(&Config{ + MetricName: "json_test", + TimeKey: "b_time", + TimeFormat: "unix", + }) + require.NoError(t, err) + metrics, err := parser.Parse([]byte(testString)) require.NoError(t, err) require.Equal(t, 2, len(metrics)) @@ -671,11 +702,13 @@ func TestUnixMsTimeParser(t *testing.T) { } ]` - parser := JSONParser{ - MetricName: "json_test", - JSONTimeKey: "b_time", - JSONTimeFormat: "unix_ms", - } + parser, err := New(&Config{ + MetricName: "json_test", + TimeKey: "b_time", + TimeFormat: "unix_ms", + }) + require.NoError(t, err) + metrics, err := parser.Parse([]byte(testString)) require.NoError(t, err) require.Equal(t, 2, len(metrics)) @@ -693,11 +726,12 @@ func TestTimeErrors(t *testing.T) { "my_tag_2": "baz" }` - parser := JSONParser{ - MetricName: "json_test", - JSONTimeKey: "b_time", - JSONTimeFormat: "02 January 06 15:04 MST", - } + parser, err := New(&Config{ + MetricName: "json_test", + TimeKey: "b_time", + TimeFormat: "02 January 06 15:04 MST", + }) + require.NoError(t, err) metrics, err := parser.Parse([]byte(testString)) require.Error(t, err) @@ -712,11 +746,12 @@ func TestTimeErrors(t *testing.T) { "my_tag_2": "baz" }` - parser = JSONParser{ - MetricName: "json_test", - JSONTimeKey: "b_time", - JSONTimeFormat: "02 January 06 15:04 MST", - } + parser, err = New(&Config{ + MetricName: "json_test", + TimeKey: "b_time", + TimeFormat: "02 January 06 15:04 MST", + }) + require.NoError(t, err) metrics, err = parser.Parse([]byte(testString2)) log.Printf("err: %v", err) @@ -736,9 +771,10 @@ func TestNameKey(t *testing.T) { "my_tag_2": "baz" }` - parser := JSONParser{ - JSONNameKey: "b_c", - } + parser, err := New(&Config{ + NameKey: "b_c", + }) + require.NoError(t, err) metrics, err := parser.Parse([]byte(testString)) require.NoError(t, err) @@ -751,11 +787,12 @@ func TestTimeKeyDelete(t *testing.T) { "value": 42 }` - parser := JSONParser{ - MetricName: "json", - JSONTimeKey: "timestamp", - JSONTimeFormat: "unix", - } + parser, err := New(&Config{ + MetricName: "json", + TimeKey: "timestamp", + TimeFormat: "unix", + }) + require.NoError(t, err) metrics, err := parser.Parse([]byte(data)) require.NoError(t, err) @@ -768,3 +805,38 @@ func TestTimeKeyDelete(t *testing.T) { testutil.RequireMetricsEqual(t, expected, metrics) } + +func TestStringFieldGlob(t *testing.T) { + data := ` +{ + "color": "red", + "status": "error", + "time": "1541183052" +} +` + + parser, err := New(&Config{ + MetricName: "json", + StringFields: []string{"*"}, + TimeKey: "time", + TimeFormat: "unix", + }) + require.NoError(t, err) + + actual, err := parser.Parse([]byte(data)) + require.NoError(t, err) + + expected := []telegraf.Metric{ + testutil.MustMetric( + "json", + map[string]string{}, + map[string]interface{}{ + "color": "red", + "status": "error", + }, + time.Unix(1541183052, 0), + ), + } + + testutil.RequireMetricsEqual(t, expected, actual) +} diff --git a/plugins/parsers/registry.go b/plugins/parsers/registry.go index 2e8d20819..9e4ea2b1f 100644 --- a/plugins/parsers/registry.go +++ b/plugins/parsers/registry.go @@ -70,7 +70,7 @@ type Config struct { // TagKeys only apply to JSON data TagKeys []string `toml:"tag_keys"` - // FieldKeys only apply to JSON + // Array of glob pattern strings keys that should be added as string fields. JSONStringFields []string `toml:"json_string_fields"` JSONNameKey string `toml:"json_name_key"` @@ -153,15 +153,19 @@ func NewParser(config *Config) (Parser, error) { var parser Parser switch config.DataFormat { case "json": - parser = newJSONParser(config.MetricName, - config.TagKeys, - config.JSONNameKey, - config.JSONStringFields, - config.JSONQuery, - config.JSONTimeKey, - config.JSONTimeFormat, - config.JSONTimezone, - config.DefaultTags) + parser, err = json.New( + &json.Config{ + MetricName: config.MetricName, + TagKeys: config.TagKeys, + NameKey: config.JSONNameKey, + StringFields: config.JSONStringFields, + Query: config.JSONQuery, + TimeKey: config.JSONTimeKey, + TimeFormat: config.JSONTimeFormat, + Timezone: config.JSONTimezone, + DefaultTags: config.DefaultTags, + }, + ) case "value": parser, err = NewValueParser(config.MetricName, config.DataType, config.DefaultTags) @@ -283,31 +287,6 @@ func newCSVParser(metricName string, return parser, nil } -func newJSONParser( - metricName string, - tagKeys []string, - jsonNameKey string, - stringFields []string, - jsonQuery string, - timeKey string, - timeFormat string, - timezone string, - defaultTags map[string]string, -) Parser { - parser := &json.JSONParser{ - MetricName: metricName, - TagKeys: tagKeys, - StringFields: stringFields, - JSONNameKey: jsonNameKey, - JSONQuery: jsonQuery, - JSONTimeKey: timeKey, - JSONTimeFormat: timeFormat, - JSONTimezone: timezone, - DefaultTags: defaultTags, - } - return parser -} - func newGrokParser(metricName string, patterns []string, nPatterns []string, cPatterns string, cPatternFiles []string, @@ -326,19 +305,6 @@ func newGrokParser(metricName string, return &parser, err } -func NewJSONParser( - metricName string, - tagKeys []string, - defaultTags map[string]string, -) (Parser, error) { - parser := &json.JSONParser{ - MetricName: metricName, - TagKeys: tagKeys, - DefaultTags: defaultTags, - } - return parser, nil -} - func NewNagiosParser() (Parser, error) { return &nagios.NagiosParser{}, nil } From f22f12c99e65cd9d2a8b3def4379634662d78605 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 15 Jul 2019 17:08:11 -0700 Subject: [PATCH 1008/1815] Update changelog --- CHANGELOG.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index ab9975181..fcfb36351 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -39,12 +39,17 @@ - [#6074](https://github.com/influxdata/telegraf/pull/6074): Resume from last known offset in tail inputwhen reloading Telegraf. - [#6111](https://github.com/influxdata/telegraf/pull/6111): Add improved support for Azure SQL Database to sqlserver input. - [#6079](https://github.com/influxdata/telegraf/pull/6079): Add extra attributes for NVMe devices to smart input. +- [#6084](https://github.com/influxdata/telegraf/pull/6084): Add docker_devicemapper measurement to docker input plugin. +- [#6122](https://github.com/influxdata/telegraf/pull/6122): Add basic auth support to elasticsearch input. +- [#6102](https://github.com/influxdata/telegraf/pull/6102): Support string field glob matching in json parser. +- [#6101](https://github.com/influxdata/telegraf/pull/6101): Update gjson to allow multipath syntax in json parser. #### Bugfixes - [#5692](https://github.com/influxdata/telegraf/issues/5692): Fix sensor read error stops reporting of all sensors in temp input. - [#4356](https://github.com/influxdata/telegraf/issues/4356): Fix double pct replacement in sysstat input. - [#6004](https://github.com/influxdata/telegraf/issues/6004): Fix race in master node detection in elasticsearch input. +- [#6100](https://github.com/influxdata/telegraf/issues/6100): Fix SSPI authentication not working in sqlserver input. ## v1.11.3 [unreleased] From 169fd647882c1e1f78a1cace6677fa8348aab8ac Mon Sep 17 00:00:00 2001 From: kden416 Date: Tue, 16 Jul 2019 03:10:42 +0300 Subject: [PATCH 1009/1815] Add list of possible container states to docker docs (#6099) --- plugins/inputs/docker/README.md | 2 ++ plugins/inputs/docker/docker.go | 2 ++ 2 files changed, 4 insertions(+) diff --git a/plugins/inputs/docker/README.md b/plugins/inputs/docker/README.md index c909b6683..1816107ea 100644 --- a/plugins/inputs/docker/README.md +++ b/plugins/inputs/docker/README.md @@ -32,6 +32,8 @@ to gather stats from the [Engine API](https://docs.docker.com/engine/api/v1.24/) ## Container states to include and exclude. Globs accepted. ## When empty only containers in the "running" state will be captured. + ## example: container_state_include = ["created", "restarting", "running", "removing", "paused", "exited", "dead"] + ## example: container_state_exclude = ["created", "restarting", "running", "removing", "paused", "exited", "dead"] # container_state_include = [] # container_state_exclude = [] diff --git a/plugins/inputs/docker/docker.go b/plugins/inputs/docker/docker.go index 00e6f58e7..3c92ca278 100644 --- a/plugins/inputs/docker/docker.go +++ b/plugins/inputs/docker/docker.go @@ -96,6 +96,8 @@ var sampleConfig = ` ## Container states to include and exclude. Globs accepted. ## When empty only containers in the "running" state will be captured. + ## example: container_state_include = ["created", "restarting", "running", "removing", "paused", "exited", "dead"] + ## example: container_state_exclude = ["created", "restarting", "running", "removing", "paused", "exited", "dead"] # container_state_include = [] # container_state_exclude = [] From b15fe4a28e74bf9f14a712eb91500c6c37f8c5e7 Mon Sep 17 00:00:00 2001 From: Cristofer Gonzales Date: Mon, 15 Jul 2019 21:23:56 -0400 Subject: [PATCH 1010/1815] Initialize accumulator in statsd during Start (#6121) --- plugins/inputs/statsd/datadog_test.go | 16 +++++++++++----- plugins/inputs/statsd/statsd.go | 5 ++++- 2 files changed, 15 insertions(+), 6 deletions(-) diff --git a/plugins/inputs/statsd/datadog_test.go b/plugins/inputs/statsd/datadog_test.go index 61762a2c4..9800d9e67 100644 --- a/plugins/inputs/statsd/datadog_test.go +++ b/plugins/inputs/statsd/datadog_test.go @@ -74,7 +74,8 @@ func TestEventGather(t *testing.T) { } acc := &testutil.Accumulator{} s := NewTestStatsd() - s.acc = acc + require.NoError(t, s.Start(acc)) + defer s.Stop() for i := range tests { t.Run(tests[i].name, func(t *testing.T) { @@ -379,11 +380,13 @@ func TestEvents(t *testing.T) { }, }, } + s := NewTestStatsd() + acc := &testutil.Accumulator{} + require.NoError(t, s.Start(acc)) + defer s.Stop() for i := range tests { t.Run(tests[i].name, func(t *testing.T) { - s := NewTestStatsd() - acc := &testutil.Accumulator{} - s.acc = acc + acc.ClearMetrics() err := s.parseEventMessage(tests[i].args.now, tests[i].args.message, tests[i].args.hostname) require.Nil(t, err) m := acc.Metrics[0] @@ -406,7 +409,10 @@ func TestEvents(t *testing.T) { func TestEventError(t *testing.T) { now := time.Now() s := NewTestStatsd() - s.acc = &testutil.Accumulator{} + acc := &testutil.Accumulator{} + require.NoError(t, s.Start(acc)) + defer s.Stop() + // missing length header err := s.parseEventMessage(now, "_e:title|text", "default-hostname") require.Error(t, err) diff --git a/plugins/inputs/statsd/statsd.go b/plugins/inputs/statsd/statsd.go index 8979b9c02..107a6e388 100644 --- a/plugins/inputs/statsd/statsd.go +++ b/plugins/inputs/statsd/statsd.go @@ -309,11 +309,14 @@ func (s *Statsd) Gather(acc telegraf.Accumulator) error { return nil } -func (s *Statsd) Start(_ telegraf.Accumulator) error { +func (s *Statsd) Start(ac telegraf.Accumulator) error { if s.ParseDataDogTags { s.DataDogExtensions = true log.Printf("W! [inputs.statsd] The parse_data_dog_tags option is deprecated, use datadog_extensions instead.") } + + s.acc = ac + // Make data structures s.gauges = make(map[string]cachedgauge) s.counters = make(map[string]cachedcounter) From 35d689401d30da4f0e2c86de5bd44254d6efc9f8 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 15 Jul 2019 18:28:54 -0700 Subject: [PATCH 1011/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index fcfb36351..2939a5e15 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -57,6 +57,7 @@ - [#6054](https://github.com/influxdata/telegraf/issues/6054): Fix unable to reconnect after vCenter reboot in vsphere input. - [#6073](https://github.com/influxdata/telegraf/issues/6073): Handle unknown error in nvidia-smi output. +- [#6121](https://github.com/influxdata/telegraf/pull/6121): Fix panic in statd input when processing datadog events. ## v1.11.2 [2019-07-09] From a07f29c02f2feca19286b4fee5f9874bd145dbf3 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 17 Jul 2019 14:31:57 -0700 Subject: [PATCH 1012/1815] Add missing rcode and zonestat to bind input (#6123) --- plugins/inputs/bind/bind_test.go | 38 +++++++++++++++++++++ plugins/inputs/bind/json_stats.go | 10 ++++++ plugins/inputs/bind/testdata/json/v1/server | 33 +++++++++++++++++- 3 files changed, 80 insertions(+), 1 deletion(-) diff --git a/plugins/inputs/bind/bind_test.go b/plugins/inputs/bind/bind_test.go index 40c32aee3..f2bfbbf66 100644 --- a/plugins/inputs/bind/bind_test.go +++ b/plugins/inputs/bind/bind_test.go @@ -47,6 +47,36 @@ func TestBindJsonStats(t *testing.T) { {"STATUS", 0}, }, }, + { + "rcode", + []fieldSet{ + {"NOERROR", 1732}, + {"FORMERR", 0}, + {"SERVFAIL", 6}, + {"NXDOMAIN", 200}, + {"NOTIMP", 0}, + {"REFUSED", 6}, + {"REFUSED", 0}, + {"YXDOMAIN", 0}, + {"YXRRSET", 0}, + {"NXRRSET", 0}, + {"NOTAUTH", 0}, + {"NOTZONE", 0}, + {"RESERVED11", 0}, + {"RESERVED12", 0}, + {"RESERVED13", 0}, + {"RESERVED14", 0}, + {"RESERVED15", 0}, + {"BADVERS", 0}, + {"17", 0}, + {"18", 0}, + {"19", 0}, + {"20", 0}, + {"21", 0}, + {"22", 0}, + {"BADCOOKIE", 0}, + }, + }, { "qtype", []fieldSet{ @@ -96,6 +126,14 @@ func TestBindJsonStats(t *testing.T) { {"TCP6Open", 2}, }, }, + { + "zonestat", + []fieldSet{ + {"NotifyOutv4", 8}, + {"NotifyInv4", 5}, + {"SOAOutv4", 5}, + }, + }, } for _, tc := range testCases { diff --git a/plugins/inputs/bind/json_stats.go b/plugins/inputs/bind/json_stats.go index cf595c1a3..87b6065e2 100644 --- a/plugins/inputs/bind/json_stats.go +++ b/plugins/inputs/bind/json_stats.go @@ -16,6 +16,8 @@ import ( type jsonStats struct { OpCodes map[string]int QTypes map[string]int + RCodes map[string]int + ZoneStats map[string]int NSStats map[string]int SockStats map[string]int Views map[string]jsonView @@ -78,6 +80,10 @@ func (b *Bind) addStatsJSON(stats jsonStats, acc telegraf.Accumulator, urlTag st tags["type"] = "opcode" addJSONCounter(acc, tags, stats.OpCodes) + // RCodes stats + tags["type"] = "rcode" + addJSONCounter(acc, tags, stats.RCodes) + // Query RDATA types tags["type"] = "qtype" addJSONCounter(acc, tags, stats.QTypes) @@ -90,6 +96,10 @@ func (b *Bind) addStatsJSON(stats jsonStats, acc telegraf.Accumulator, urlTag st tags["type"] = "sockstat" addJSONCounter(acc, tags, stats.SockStats) + // Zonestats + tags["type"] = "zonestat" + addJSONCounter(acc, tags, stats.ZoneStats) + // Memory stats fields := map[string]interface{}{ "total_use": stats.Memory.TotalUse, diff --git a/plugins/inputs/bind/testdata/json/v1/server b/plugins/inputs/bind/testdata/json/v1/server index 53acd9067..060fab6b1 100644 --- a/plugins/inputs/bind/testdata/json/v1/server +++ b/plugins/inputs/bind/testdata/json/v1/server @@ -21,6 +21,32 @@ "RESERVED14":0, "RESERVED15":0 }, + "rcodes":{ + "NOERROR":1732, + "FORMERR":0, + "SERVFAIL":6, + "NXDOMAIN":200, + "NOTIMP":0, + "REFUSED":0, + "YXDOMAIN":0, + "YXRRSET":0, + "NXRRSET":0, + "NOTAUTH":0, + "NOTZONE":0, + "RESERVED11":0, + "RESERVED12":0, + "RESERVED13":0, + "RESERVED14":0, + "RESERVED15":0, + "BADVERS":0, + "17":0, + "18":0, + "19":0, + "20":0, + "21":0, + "22":0, + "BADCOOKIE":0 + }, "qtypes":{ "A":2, "PTR":7, @@ -40,6 +66,11 @@ "QryDuplicate":1, "QryUDP":13 }, + "zonestats":{ + "NotifyOutv4":8, + "NotifyInv4":5, + "SOAOutv4":5 + }, "views":{ "_default":{ "resolver":{ @@ -138,4 +169,4 @@ } } } -} \ No newline at end of file +} From e576048e0211d00f01fe217643a29c65acb220af Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 17 Jul 2019 15:03:09 -0700 Subject: [PATCH 1013/1815] Treat empty array as successful parse (#6130) --- plugins/parsers/json/parser.go | 71 ++++++++++++++--------------- plugins/parsers/json/parser_test.go | 52 +++++++++++++++++++-- 2 files changed, 83 insertions(+), 40 deletions(-) diff --git a/plugins/parsers/json/parser.go b/plugins/parsers/json/parser.go index 6df5d9196..7606b7629 100644 --- a/plugins/parsers/json/parser.go +++ b/plugins/parsers/json/parser.go @@ -3,6 +3,7 @@ package json import ( "bytes" "encoding/json" + "errors" "fmt" "log" "strconv" @@ -17,7 +18,8 @@ import ( ) var ( - utf8BOM = []byte("\xef\xbb\xbf") + utf8BOM = []byte("\xef\xbb\xbf") + ErrWrongType = errors.New("must be an object or an array of objects") ) type Config struct { @@ -63,32 +65,34 @@ func New(config *Config) (*Parser, error) { }, nil } -func (p *Parser) parseArray(buf []byte) ([]telegraf.Metric, error) { - metrics := make([]telegraf.Metric, 0) +func (p *Parser) parseArray(data []interface{}) ([]telegraf.Metric, error) { + results := make([]telegraf.Metric, 0) + + for _, item := range data { + switch v := item.(type) { + case map[string]interface{}: + metrics, err := p.parseObject(v) + if err != nil { + return nil, err + } + results = append(results, metrics...) + default: + return nil, ErrWrongType - var jsonOut []map[string]interface{} - err := json.Unmarshal(buf, &jsonOut) - if err != nil { - err = fmt.Errorf("unable to parse out as JSON Array, %s", err) - return nil, err - } - for _, item := range jsonOut { - metrics, err = p.parseObject(metrics, item) - if err != nil { - return nil, err } } - return metrics, nil + + return results, nil } -func (p *Parser) parseObject(metrics []telegraf.Metric, jsonOut map[string]interface{}) ([]telegraf.Metric, error) { +func (p *Parser) parseObject(data map[string]interface{}) ([]telegraf.Metric, error) { tags := make(map[string]string) for k, v := range p.defaultTags { tags[k] = v } f := JSONFlattener{} - err := f.FullFlattenJSON("", jsonOut, true, true) + err := f.FullFlattenJSON("", data, true, true) if err != nil { return nil, err } @@ -134,7 +138,7 @@ func (p *Parser) parseObject(metrics []telegraf.Metric, jsonOut map[string]inter if err != nil { return nil, err } - return append(metrics, metric), nil + return []telegraf.Metric{metric}, nil } //will take in field map with strings and bools, @@ -193,17 +197,20 @@ func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) { return make([]telegraf.Metric, 0), nil } - if !isarray(buf) { - metrics := make([]telegraf.Metric, 0) - var jsonOut map[string]interface{} - err := json.Unmarshal(buf, &jsonOut) - if err != nil { - err = fmt.Errorf("unable to parse out as JSON, %s", err) - return nil, err - } - return p.parseObject(metrics, jsonOut) + var data interface{} + err := json.Unmarshal(buf, &data) + if err != nil { + return nil, err + } + + switch v := data.(type) { + case map[string]interface{}: + return p.parseObject(v) + case []interface{}: + return p.parseArray(v) + default: + return nil, ErrWrongType } - return p.parseArray(buf) } func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { @@ -288,13 +295,3 @@ func (f *JSONFlattener) FullFlattenJSON( } return nil } - -func isarray(buf []byte) bool { - ia := bytes.IndexByte(buf, '[') - ib := bytes.IndexByte(buf, '{') - if ia > -1 && ia < ib { - return true - } else { - return false - } -} diff --git a/plugins/parsers/json/parser_test.go b/plugins/parsers/json/parser_test.go index f656a96a1..44ae73af5 100644 --- a/plugins/parsers/json/parser_test.go +++ b/plugins/parsers/json/parser_test.go @@ -2,7 +2,6 @@ package json import ( "fmt" - "log" "testing" "time" @@ -496,7 +495,7 @@ func TestJSONParseNestedArray(t *testing.T) { require.NoError(t, err) metrics, err := parser.Parse([]byte(testString)) - log.Printf("m[0] name: %v, tags: %v, fields: %v", metrics[0].Name(), metrics[0].Tags(), metrics[0].Fields()) + require.Len(t, metrics, 1) require.NoError(t, err) require.Equal(t, 3, len(metrics[0].Tags())) } @@ -754,7 +753,6 @@ func TestTimeErrors(t *testing.T) { require.NoError(t, err) metrics, err = parser.Parse([]byte(testString2)) - log.Printf("err: %v", err) require.Error(t, err) require.Equal(t, 0, len(metrics)) require.Equal(t, fmt.Errorf("JSON time key could not be found"), err) @@ -840,3 +838,51 @@ func TestStringFieldGlob(t *testing.T) { testutil.RequireMetricsEqual(t, expected, actual) } + +func TestParseEmptyArray(t *testing.T) { + data := `[]` + + parser, err := New(&Config{}) + require.NoError(t, err) + + actual, err := parser.Parse([]byte(data)) + require.NoError(t, err) + + expected := []telegraf.Metric{} + testutil.RequireMetricsEqual(t, expected, actual) +} + +func TestParseSimpleArray(t *testing.T) { + data := `[{"answer": 42}]` + + parser, err := New(&Config{ + MetricName: "json", + }) + require.NoError(t, err) + + actual, err := parser.Parse([]byte(data)) + require.NoError(t, err) + + expected := []telegraf.Metric{ + testutil.MustMetric( + "json", + map[string]string{}, + map[string]interface{}{ + "answer": 42.0, + }, + time.Unix(0, 0), + ), + } + + testutil.RequireMetricsEqual(t, expected, actual, testutil.IgnoreTime()) +} + +func TestParseArrayWithWrongType(t *testing.T) { + data := `[{"answer": 42}, 123]` + + parser, err := New(&Config{}) + require.NoError(t, err) + + _, err = parser.Parse([]byte(data)) + require.Error(t, err) +} From f93441d2a45e40031ddbf24f3baacb838af0728a Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 17 Jul 2019 15:23:51 -0700 Subject: [PATCH 1014/1815] Update changelog --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2939a5e15..716ebd947 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -58,6 +58,8 @@ - [#6054](https://github.com/influxdata/telegraf/issues/6054): Fix unable to reconnect after vCenter reboot in vsphere input. - [#6073](https://github.com/influxdata/telegraf/issues/6073): Handle unknown error in nvidia-smi output. - [#6121](https://github.com/influxdata/telegraf/pull/6121): Fix panic in statd input when processing datadog events. +- [#6125](https://github.com/influxdata/telegraf/issues/6125): Treat empty array as successful parse in json parser. +- [#6094](https://github.com/influxdata/telegraf/issues/6094): Add missing rcode and zonestat to bind input. ## v1.11.2 [2019-07-09] From 56c6539a919879dbf9c40cec86e00e82b06d07d8 Mon Sep 17 00:00:00 2001 From: George Date: Thu, 18 Jul 2019 15:40:05 +0200 Subject: [PATCH 1015/1815] Fix lustre2 input plugin config parse regression (#6114) --- plugins/inputs/lustre2/lustre2.go | 4 +-- plugins/inputs/lustre2/lustre2_test.go | 38 ++++++++++++++++++++++++++ 2 files changed, 40 insertions(+), 2 deletions(-) diff --git a/plugins/inputs/lustre2/lustre2.go b/plugins/inputs/lustre2/lustre2.go index 4ccb90115..026636dd2 100644 --- a/plugins/inputs/lustre2/lustre2.go +++ b/plugins/inputs/lustre2/lustre2.go @@ -25,8 +25,8 @@ type tags struct { // Lustre proc files can change between versions, so we want to future-proof // by letting people choose what to look at. type Lustre2 struct { - Ost_procfiles []string `toml:"ost_jobstat"` - Mds_procfiles []string `toml:"mds_jobstat"` + Ost_procfiles []string `toml:"ost_procfiles"` + Mds_procfiles []string `toml:"mds_procfiles"` // allFields maps and OST name to the metric fields associated with that OST allFields map[tags]map[string]interface{} diff --git a/plugins/inputs/lustre2/lustre2_test.go b/plugins/inputs/lustre2/lustre2_test.go index 6d0fd61f5..67cf4216b 100644 --- a/plugins/inputs/lustre2/lustre2_test.go +++ b/plugins/inputs/lustre2/lustre2_test.go @@ -6,6 +6,9 @@ import ( "testing" "github.com/influxdata/telegraf/testutil" + "github.com/influxdata/toml" + "github.com/influxdata/toml/ast" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -330,3 +333,38 @@ func TestLustre2GeneratesJobstatsMetrics(t *testing.T) { err = os.RemoveAll(os.TempDir() + "/telegraf") require.NoError(t, err) } + +func TestLustre2CanParseConfiguration(t *testing.T) { + config := []byte(` +[[inputs.lustre2]] + ost_procfiles = [ + "/proc/fs/lustre/obdfilter/*/stats", + "/proc/fs/lustre/osd-ldiskfs/*/stats", + ] + mds_procfiles = [ + "/proc/fs/lustre/mdt/*/md_stats", + ]`) + + table, err := toml.Parse([]byte(config)) + require.NoError(t, err) + + inputs, ok := table.Fields["inputs"] + require.True(t, ok) + + lustre2, ok := inputs.(*ast.Table).Fields["lustre2"] + require.True(t, ok) + + var plugin Lustre2 + + require.NoError(t, toml.UnmarshalTable(lustre2.([]*ast.Table)[0], &plugin)) + + assert.Equal(t, Lustre2{ + Ost_procfiles: []string{ + "/proc/fs/lustre/obdfilter/*/stats", + "/proc/fs/lustre/osd-ldiskfs/*/stats", + }, + Mds_procfiles: []string{ + "/proc/fs/lustre/mdt/*/md_stats", + }, + }, plugin) +} From 5c9923a20a0ff0645ddbe64c08fb45526587f776 Mon Sep 17 00:00:00 2001 From: George MacRorie Date: Thu, 18 Jul 2019 15:42:06 +0200 Subject: [PATCH 1016/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 716ebd947..a87c1aca9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -60,6 +60,7 @@ - [#6121](https://github.com/influxdata/telegraf/pull/6121): Fix panic in statd input when processing datadog events. - [#6125](https://github.com/influxdata/telegraf/issues/6125): Treat empty array as successful parse in json parser. - [#6094](https://github.com/influxdata/telegraf/issues/6094): Add missing rcode and zonestat to bind input. +- [#6114](https://github.com/influxdata/telegraf/issues/6114): Fix lustre2 input plugin config parse regression. ## v1.11.2 [2019-07-09] From 77b1a43539530215269495902cb0471009d6db79 Mon Sep 17 00:00:00 2001 From: George Date: Fri, 19 Jul 2019 22:14:15 +0200 Subject: [PATCH 1017/1815] Fix template pattern partial wildcard matching (#6135) --- internal/templating/engine_test.go | 55 ++++++++++++++++++++++++++++++ internal/templating/node.go | 38 ++++++++++++++------- 2 files changed, 80 insertions(+), 13 deletions(-) diff --git a/internal/templating/engine_test.go b/internal/templating/engine_test.go index b7dd23f38..0dfcb89d8 100644 --- a/internal/templating/engine_test.go +++ b/internal/templating/engine_test.go @@ -3,6 +3,7 @@ package templating import ( "testing" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -20,3 +21,57 @@ func TestEngineAlternateSeparator(t *testing.T) { }, tags) require.Equal(t, "", field) } + +func TestEngineWithWildcardTemplate(t *testing.T) { + var ( + defaultTmpl, err = NewDefaultTemplateWithPattern("measurement*") + templates = []string{ + "taskmanagerTask.alarm-detector.Assign.alarmDefinitionId metricsType.process.nodeId.x.alarmDefinitionId.measurement.field rule=1", + "taskmanagerTask.*.*.*.* metricsType.process.nodeId.measurement rule=2", + } + ) + require.NoError(t, err) + + engine, err := NewEngine(".", defaultTmpl, templates) + require.NoError(t, err) + + for _, testCase := range []struct { + line string + measurement string + field string + tags map[string]string + }{ + { + line: "taskmanagerTask.alarm-detector.Assign.alarmDefinitionId.timeout_errors.duration.p75", + measurement: "duration", + field: "p75", + tags: map[string]string{ + "metricsType": "taskmanagerTask", + "process": "alarm-detector", + "nodeId": "Assign", + "x": "alarmDefinitionId", + "alarmDefinitionId": "timeout_errors", + "rule": "1", + }, + }, + { + line: "taskmanagerTask.alarm-detector.Assign.numRecordsInPerSecond.m5_rate", + measurement: "numRecordsInPerSecond", + tags: map[string]string{ + "metricsType": "taskmanagerTask", + "process": "alarm-detector", + "nodeId": "Assign", + "rule": "2", + }, + }, + } { + t.Run(testCase.line, func(t *testing.T) { + measurement, tags, field, err := engine.Apply(testCase.line) + require.NoError(t, err) + + assert.Equal(t, testCase.measurement, measurement) + assert.Equal(t, testCase.field, field) + assert.Equal(t, testCase.tags, tags) + }) + } +} diff --git a/internal/templating/node.go b/internal/templating/node.go index 83ab1a40c..53d028fd0 100644 --- a/internal/templating/node.go +++ b/internal/templating/node.go @@ -55,32 +55,44 @@ func (n *node) search(line string) *Template { // recursiveSearch performs the actual recursive search func (n *node) recursiveSearch(lineParts []string) *Template { - // Nothing to search + // nothing to search if len(lineParts) == 0 || len(n.children) == 0 { return n.template } - // If last element is a wildcard, don't include it in this search since it's sorted - // to the end but lexicographically it would not always be and sort.Search assumes - // the slice is sorted. - length := len(n.children) - if n.children[length-1].value == "*" { + var ( + hasWildcard bool + length = len(n.children) + ) + + // exclude last child from search if it is a wildcard. sort.Search expects + // a lexicographically sorted set of children and we have artificially sorted + // wildcards to the end of the child set + // wildcards will be searched seperately if no exact match is found + if hasWildcard = n.children[length-1].value == "*"; hasWildcard { length-- } - // Find the index of child with an exact match i := sort.Search(length, func(i int) bool { return n.children[i].value >= lineParts[0] }) - // Found an exact match, so search that child sub-tree - if i < len(n.children) && n.children[i].value == lineParts[0] { - return n.children[i].recursiveSearch(lineParts[1:]) + // given an exact match is found within children set + if i < length && n.children[i].value == lineParts[0] { + // decend into the matching node + if tmpl := n.children[i].recursiveSearch(lineParts[1:]); tmpl != nil { + // given a template is found return it + return tmpl + } } - // Not an exact match, see if we have a wildcard child to search - if n.children[len(n.children)-1].value == "*" { - return n.children[len(n.children)-1].recursiveSearch(lineParts[1:]) + + // given no template is found and the last child is a wildcard + if hasWildcard { + // also search the wildcard child node + return n.children[length].recursiveSearch(lineParts[1:]) } + + // fallback to returning template at this node return n.template } From bdb4598b3f219f2245352e0c907d19be17fb521a Mon Sep 17 00:00:00 2001 From: Greg <2653109+glinton@users.noreply.github.com> Date: Fri, 19 Jul 2019 14:16:54 -0600 Subject: [PATCH 1018/1815] Add support for field/tag keys to strings processor (#6129) --- plugins/processors/strings/README.md | 52 +++- plugins/processors/strings/strings.go | 52 ++++ plugins/processors/strings/strings_test.go | 317 +++++++++++++++++++++ 3 files changed, 411 insertions(+), 10 deletions(-) diff --git a/plugins/processors/strings/README.md b/plugins/processors/strings/README.md index 06bffaee8..30d2cbb08 100644 --- a/plugins/processors/strings/README.md +++ b/plugins/processors/strings/README.md @@ -14,41 +14,50 @@ Implemented functions are: Please note that in this implementation these are processed in the order that they appear above. -Specify the `measurement`, `tag` or `field` that you want processed in each section and optionally a `dest` if you want the result stored in a new tag or field. You can specify lots of transformations on data with a single strings processor. +Specify the `measurement`, `tag`, `tag_key`, `field`, or `field_key` that you want processed in each section and optionally a `dest` if you want the result stored in a new tag or field. You can specify lots of transformations on data with a single strings processor. -If you'd like to apply the change to every `tag`, `field`, or `measurement`, use the value "*" for each respective field. Note that the `dest` field will be ignored if "*" is used +If you'd like to apply the change to every `tag`, `tag_key`, `field`, `field_key`, or `measurement`, use the value `"*"` for each respective field. Note that the `dest` field will be ignored if `"*"` is used. + +If you'd like to apply multiple processings to the same `tag_key` or `field_key`, note the process order stated above. See [Example 2]() for an example. ### Configuration: ```toml [[processors.strings]] - # [[processors.strings.uppercase]] - # tag = "method" - + ## Convert a field value to lowercase and store in a new field # [[processors.strings.lowercase]] # field = "uri_stem" # dest = "uri_stem_normalised" - ## Convert a tag value to lowercase + ## Convert a tag value to uppercase + # [[processors.strings.uppercase]] + # tag = "method" + + ## Trim leading and trailing whitespace using the default cutset # [[processors.strings.trim]] # field = "message" + ## Trim leading characters in cutset # [[processors.strings.trim_left]] # field = "message" # cutset = "\t" + ## Trim trailing characters in cutset # [[processors.strings.trim_right]] # field = "message" # cutset = "\r\n" + ## Trim the given prefix from the field # [[processors.strings.trim_prefix]] # field = "my_value" # prefix = "my_" + ## Trim the given suffix from the field # [[processors.strings.trim_suffix]] # field = "read_count" # suffix = "_count" + ## Replace all non-overlapping instances of old with new # [[processors.strings.replace]] # measurement = "*" # old = ":" @@ -79,10 +88,10 @@ the operation and keep the old name. ```toml [[processors.strings]] [[processors.strings.lowercase]] - field = "uri-stem" + tag = "uri_stem" [[processors.strings.trim_prefix]] - field = "uri_stem" + tag = "uri_stem" prefix = "/api/" [[processors.strings.uppercase]] @@ -92,10 +101,33 @@ the operation and keep the old name. **Input** ``` -iis_log,method=get,uri_stem=/API/HealthCheck cs-host="MIXEDCASE_host",referrer="-",ident="-",http_version=1.1,agent="UserAgent",resp_bytes=270i 1519652321000000000 +iis_log,method=get,uri_stem=/API/HealthCheck cs-host="MIXEDCASE_host",http_version=1.1 1519652321000000000 ``` **Output** ``` -iis_log,method=get,uri_stem=healthcheck cs-host="MIXEDCASE_host",cs-host_normalised="MIXEDCASE_HOST",referrer="-",ident="-",http_version=1.1,agent="UserAgent",resp_bytes=270i 1519652321000000000 +iis_log,method=get,uri_stem=healthcheck cs-host="MIXEDCASE_host",http_version=1.1,cs-host_normalised="MIXEDCASE_HOST" 1519652321000000000 +``` + +### Example 2 +**Config** +```toml +[[processors.strings]] + [[processors.strings.lowercase]] + tag_key = "URI-Stem" + + [[processors.strings.replace]] + tag_key = "uri-stem" + old = "-" + new = "_" +``` + +**Input** +``` +iis_log,URI-Stem=/API/HealthCheck http_version=1.1 1519652321000000000 +``` + +**Output** +``` +iis_log,uri_stem=/API/HealthCheck http_version=1.1 1519652321000000000 ``` diff --git a/plugins/processors/strings/strings.go b/plugins/processors/strings/strings.go index 00c7d99b1..56bcf1b2c 100644 --- a/plugins/processors/strings/strings.go +++ b/plugins/processors/strings/strings.go @@ -26,7 +26,9 @@ type ConvertFunc func(s string) string type converter struct { Field string + FieldKey string Tag string + TagKey string Measurement string Dest string Cutset string @@ -109,6 +111,27 @@ func (c *converter) convertTag(metric telegraf.Metric) { } } +func (c *converter) convertTagKey(metric telegraf.Metric) { + var tags map[string]string + if c.TagKey == "*" { + tags = metric.Tags() + } else { + tags = make(map[string]string) + tv, ok := metric.GetTag(c.TagKey) + if !ok { + return + } + tags[c.TagKey] = tv + } + + for key, value := range tags { + if k := c.fn(key); k != "" { + metric.RemoveTag(key) + metric.AddTag(k, value) + } + } +} + func (c *converter) convertField(metric telegraf.Metric) { var fields map[string]interface{} if c.Field == "*" { @@ -133,6 +156,27 @@ func (c *converter) convertField(metric telegraf.Metric) { } } +func (c *converter) convertFieldKey(metric telegraf.Metric) { + var fields map[string]interface{} + if c.FieldKey == "*" { + fields = metric.Fields() + } else { + fields = make(map[string]interface{}) + fv, ok := metric.GetField(c.FieldKey) + if !ok { + return + } + fields[c.FieldKey] = fv + } + + for key, value := range fields { + if k := c.fn(key); k != "" { + metric.RemoveField(key) + metric.AddField(k, value) + } + } +} + func (c *converter) convertMeasurement(metric telegraf.Metric) { if metric.Name() != c.Measurement && c.Measurement != "*" { return @@ -146,10 +190,18 @@ func (c *converter) convert(metric telegraf.Metric) { c.convertField(metric) } + if c.FieldKey != "" { + c.convertFieldKey(metric) + } + if c.Tag != "" { c.convertTag(metric) } + if c.TagKey != "" { + c.convertTagKey(metric) + } + if c.Measurement != "" { c.convertMeasurement(metric) } diff --git a/plugins/processors/strings/strings_test.go b/plugins/processors/strings/strings_test.go index e108c04f7..c89ab7b66 100644 --- a/plugins/processors/strings/strings_test.go +++ b/plugins/processors/strings/strings_test.go @@ -25,6 +25,22 @@ func newM1() telegraf.Metric { return m1 } +func newM2() telegraf.Metric { + m1, _ := metric.New("IIS_log", + map[string]string{ + "verb": "GET", + "S-ComputerName": "MIXEDCASE_hostname", + }, + map[string]interface{}{ + "Request": "/mixed/CASE/paTH/?from=-1D&to=now", + "req/sec": 5, + " whitespace ": " whitespace\t", + }, + time.Now(), + ) + return m1 +} + func TestFieldConversions(t *testing.T) { tests := []struct { name string @@ -253,6 +269,226 @@ func TestFieldConversions(t *testing.T) { } } +func TestFieldKeyConversions(t *testing.T) { + tests := []struct { + name string + plugin *Strings + check func(t *testing.T, actual telegraf.Metric) + }{ + { + name: "Should change existing field key to lowercase", + plugin: &Strings{ + Lowercase: []converter{ + { + FieldKey: "Request", + }, + }, + }, + check: func(t *testing.T, actual telegraf.Metric) { + fv, ok := actual.GetField("request") + require.True(t, ok) + require.Equal(t, "/mixed/CASE/paTH/?from=-1D&to=now", fv) + }, + }, + { + name: "Should change existing field key to uppercase", + plugin: &Strings{ + Uppercase: []converter{ + { + FieldKey: "Request", + }, + }, + }, + check: func(t *testing.T, actual telegraf.Metric) { + fv, ok := actual.GetField("Request") + require.False(t, ok) + + fv, ok = actual.GetField("REQUEST") + require.True(t, ok) + require.Equal(t, "/mixed/CASE/paTH/?from=-1D&to=now", fv) + }, + }, + { + name: "Should trim from both sides", + plugin: &Strings{ + Trim: []converter{ + { + FieldKey: "Request", + Cutset: "eR", + }, + }, + }, + check: func(t *testing.T, actual telegraf.Metric) { + fv, ok := actual.GetField("quest") + require.True(t, ok) + require.Equal(t, "/mixed/CASE/paTH/?from=-1D&to=now", fv) + }, + }, + { + name: "Should trim from both sides but not make lowercase", + plugin: &Strings{ + // Tag/field key multiple executions occur in the following order: (initOnce) + // Lowercase + // Uppercase + // Trim + // TrimLeft + // TrimRight + // TrimPrefix + // TrimSuffix + // Replace + Lowercase: []converter{ + { + FieldKey: "Request", + }, + }, + Trim: []converter{ + { + FieldKey: "request", + Cutset: "tse", + }, + }, + }, + check: func(t *testing.T, actual telegraf.Metric) { + fv, ok := actual.GetField("requ") + require.True(t, ok) + require.Equal(t, "/mixed/CASE/paTH/?from=-1D&to=now", fv) + }, + }, + { + name: "Should trim from left side", + plugin: &Strings{ + TrimLeft: []converter{ + { + FieldKey: "req/sec", + Cutset: "req/", + }, + }, + }, + check: func(t *testing.T, actual telegraf.Metric) { + fv, ok := actual.GetField("sec") + require.True(t, ok) + require.Equal(t, int64(5), fv) + }, + }, + { + name: "Should trim from right side", + plugin: &Strings{ + TrimRight: []converter{ + { + FieldKey: "req/sec", + Cutset: "req/", + }, + }, + }, + check: func(t *testing.T, actual telegraf.Metric) { + fv, ok := actual.GetField("req/sec") + require.True(t, ok) + require.Equal(t, int64(5), fv) + }, + }, + { + name: "Should trim prefix 'req/'", + plugin: &Strings{ + TrimPrefix: []converter{ + { + FieldKey: "req/sec", + Prefix: "req/", + }, + }, + }, + check: func(t *testing.T, actual telegraf.Metric) { + fv, ok := actual.GetField("sec") + require.True(t, ok) + require.Equal(t, int64(5), fv) + }, + }, + { + name: "Should trim suffix '/sec'", + plugin: &Strings{ + TrimSuffix: []converter{ + { + FieldKey: "req/sec", + Suffix: "/sec", + }, + }, + }, + check: func(t *testing.T, actual telegraf.Metric) { + fv, ok := actual.GetField("req") + require.True(t, ok) + require.Equal(t, int64(5), fv) + }, + }, + { + name: "Trim without cutset removes whitespace", + plugin: &Strings{ + Trim: []converter{ + { + FieldKey: " whitespace ", + }, + }, + }, + check: func(t *testing.T, actual telegraf.Metric) { + fv, ok := actual.GetField("whitespace") + require.True(t, ok) + require.Equal(t, " whitespace\t", fv) + }, + }, + { + name: "Trim left without cutset removes whitespace", + plugin: &Strings{ + TrimLeft: []converter{ + { + FieldKey: " whitespace ", + }, + }, + }, + check: func(t *testing.T, actual telegraf.Metric) { + fv, ok := actual.GetField("whitespace ") + require.True(t, ok) + require.Equal(t, " whitespace\t", fv) + }, + }, + { + name: "Trim right without cutset removes whitespace", + plugin: &Strings{ + TrimRight: []converter{ + { + FieldKey: " whitespace ", + }, + }, + }, + check: func(t *testing.T, actual telegraf.Metric) { + fv, ok := actual.GetField(" whitespace") + require.True(t, ok) + require.Equal(t, " whitespace\t", fv) + }, + }, + { + name: "No change if field missing", + plugin: &Strings{ + Lowercase: []converter{ + { + FieldKey: "xyzzy", + Suffix: "-1D&to=now", + }, + }, + }, + check: func(t *testing.T, actual telegraf.Metric) { + fv, ok := actual.GetField("Request") + require.True(t, ok) + require.Equal(t, "/mixed/CASE/paTH/?from=-1D&to=now", fv) + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + metrics := tt.plugin.Apply(newM2()) + require.Len(t, metrics, 1) + tt.check(t, metrics[0]) + }) + } +} + func TestTagConversions(t *testing.T) { tests := []struct { name string @@ -337,6 +573,87 @@ func TestTagConversions(t *testing.T) { } } +func TestTagKeyConversions(t *testing.T) { + tests := []struct { + name string + plugin *Strings + check func(t *testing.T, actual telegraf.Metric) + }{ + { + name: "Should change existing tag key to lowercase", + plugin: &Strings{ + Lowercase: []converter{ + { + Tag: "S-ComputerName", + TagKey: "S-ComputerName", + }, + }, + }, + check: func(t *testing.T, actual telegraf.Metric) { + tv, ok := actual.GetTag("verb") + require.True(t, ok) + require.Equal(t, "GET", tv) + + tv, ok = actual.GetTag("s-computername") + require.True(t, ok) + require.Equal(t, "mixedcase_hostname", tv) + }, + }, + { + name: "Should add new lowercase tag key", + plugin: &Strings{ + Lowercase: []converter{ + { + TagKey: "S-ComputerName", + }, + }, + }, + check: func(t *testing.T, actual telegraf.Metric) { + tv, ok := actual.GetTag("verb") + require.True(t, ok) + require.Equal(t, "GET", tv) + + tv, ok = actual.GetTag("S-ComputerName") + require.False(t, ok) + + tv, ok = actual.GetTag("s-computername") + require.True(t, ok) + require.Equal(t, "MIXEDCASE_hostname", tv) + }, + }, + { + name: "Should add new uppercase tag key", + plugin: &Strings{ + Uppercase: []converter{ + { + TagKey: "S-ComputerName", + }, + }, + }, + check: func(t *testing.T, actual telegraf.Metric) { + tv, ok := actual.GetTag("verb") + require.True(t, ok) + require.Equal(t, "GET", tv) + + tv, ok = actual.GetTag("S-ComputerName") + require.False(t, ok) + + tv, ok = actual.GetTag("S-COMPUTERNAME") + require.True(t, ok) + require.Equal(t, "MIXEDCASE_hostname", tv) + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + metrics := tt.plugin.Apply(newM2()) + require.Len(t, metrics, 1) + tt.check(t, metrics[0]) + }) + } +} + func TestMeasurementConversions(t *testing.T) { tests := []struct { name string From 877c42362d9d3c8264add86a727af7b10f01c571 Mon Sep 17 00:00:00 2001 From: Greg <2653109+glinton@users.noreply.github.com> Date: Fri, 19 Jul 2019 14:18:50 -0600 Subject: [PATCH 1019/1815] Collect k8s endpoints, ingress, and services in kube_inventory plugin (#6105) --- Gopkg.lock | 2 + plugins/inputs/kube_inventory/client.go | 22 ++ plugins/inputs/kube_inventory/client_test.go | 8 + plugins/inputs/kube_inventory/endpoint.go | 82 ++++++++ .../inputs/kube_inventory/endpoint_test.go | 194 ++++++++++++++++++ plugins/inputs/kube_inventory/ingress.go | 60 ++++++ plugins/inputs/kube_inventory/ingress_test.go | 142 +++++++++++++ plugins/inputs/kube_inventory/kube_state.go | 6 + plugins/inputs/kube_inventory/service.go | 70 +++++++ plugins/inputs/kube_inventory/service_test.go | 123 +++++++++++ 10 files changed, 709 insertions(+) create mode 100644 plugins/inputs/kube_inventory/endpoint.go create mode 100644 plugins/inputs/kube_inventory/endpoint_test.go create mode 100644 plugins/inputs/kube_inventory/ingress.go create mode 100644 plugins/inputs/kube_inventory/ingress_test.go create mode 100644 plugins/inputs/kube_inventory/service.go create mode 100644 plugins/inputs/kube_inventory/service_test.go diff --git a/Gopkg.lock b/Gopkg.lock index 5a4f12706..ac3bb8165 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -403,6 +403,7 @@ "apis/apps/v1beta1", "apis/apps/v1beta2", "apis/core/v1", + "apis/extensions/v1beta1", "apis/meta/v1", "apis/policy/v1beta1", "apis/resource", @@ -1616,6 +1617,7 @@ "github.com/ericchiang/k8s/apis/apps/v1beta1", "github.com/ericchiang/k8s/apis/apps/v1beta2", "github.com/ericchiang/k8s/apis/core/v1", + "github.com/ericchiang/k8s/apis/extensions/v1beta1", "github.com/ericchiang/k8s/apis/meta/v1", "github.com/ericchiang/k8s/apis/resource", "github.com/ericchiang/k8s/util/intstr", diff --git a/plugins/inputs/kube_inventory/client.go b/plugins/inputs/kube_inventory/client.go index bf207b0ad..5bb2baf5c 100644 --- a/plugins/inputs/kube_inventory/client.go +++ b/plugins/inputs/kube_inventory/client.go @@ -8,6 +8,7 @@ import ( "github.com/ericchiang/k8s/apis/apps/v1beta1" "github.com/ericchiang/k8s/apis/apps/v1beta2" "github.com/ericchiang/k8s/apis/core/v1" + v1beta1EXT "github.com/ericchiang/k8s/apis/extensions/v1beta1" "github.com/influxdata/telegraf/internal/tls" ) @@ -61,6 +62,20 @@ func (c *client) getDeployments(ctx context.Context) (*v1beta1.DeploymentList, e return list, c.List(ctx, c.namespace, list) } +func (c *client) getEndpoints(ctx context.Context) (*v1.EndpointsList, error) { + list := new(v1.EndpointsList) + ctx, cancel := context.WithTimeout(ctx, c.timeout) + defer cancel() + return list, c.List(ctx, c.namespace, list) +} + +func (c *client) getIngress(ctx context.Context) (*v1beta1EXT.IngressList, error) { + list := new(v1beta1EXT.IngressList) + ctx, cancel := context.WithTimeout(ctx, c.timeout) + defer cancel() + return list, c.List(ctx, c.namespace, list) +} + func (c *client) getNodes(ctx context.Context) (*v1.NodeList, error) { list := new(v1.NodeList) ctx, cancel := context.WithTimeout(ctx, c.timeout) @@ -89,6 +104,13 @@ func (c *client) getPods(ctx context.Context) (*v1.PodList, error) { return list, c.List(ctx, c.namespace, list) } +func (c *client) getServices(ctx context.Context) (*v1.ServiceList, error) { + list := new(v1.ServiceList) + ctx, cancel := context.WithTimeout(ctx, c.timeout) + defer cancel() + return list, c.List(ctx, c.namespace, list) +} + func (c *client) getStatefulSets(ctx context.Context) (*v1beta1.StatefulSetList, error) { list := new(v1beta1.StatefulSetList) ctx, cancel := context.WithTimeout(ctx, c.timeout) diff --git a/plugins/inputs/kube_inventory/client_test.go b/plugins/inputs/kube_inventory/client_test.go index 4f54755b0..3e4eaf752 100644 --- a/plugins/inputs/kube_inventory/client_test.go +++ b/plugins/inputs/kube_inventory/client_test.go @@ -4,6 +4,7 @@ import ( "testing" "time" + "github.com/ericchiang/k8s/util/intstr" "github.com/influxdata/telegraf/internal/tls" ) @@ -27,6 +28,13 @@ func toBoolPtr(b bool) *bool { return &b } +func toIntStrPtrS(s string) *intstr.IntOrString { + return &intstr.IntOrString{StrVal: &s} +} + +func toIntStrPtrI(i int32) *intstr.IntOrString { + return &intstr.IntOrString{IntVal: &i} +} func TestNewClient(t *testing.T) { _, err := newClient("https://127.0.0.1:443/", "default", "abc123", time.Second, tls.ClientConfig{}) if err != nil { diff --git a/plugins/inputs/kube_inventory/endpoint.go b/plugins/inputs/kube_inventory/endpoint.go new file mode 100644 index 000000000..7298789da --- /dev/null +++ b/plugins/inputs/kube_inventory/endpoint.go @@ -0,0 +1,82 @@ +package kube_inventory + +import ( + "context" + "strings" + "time" + + "github.com/ericchiang/k8s/apis/core/v1" + + "github.com/influxdata/telegraf" +) + +func collectEndpoints(ctx context.Context, acc telegraf.Accumulator, ki *KubernetesInventory) { + list, err := ki.client.getEndpoints(ctx) + if err != nil { + acc.AddError(err) + return + } + for _, i := range list.Items { + if err = ki.gatherEndpoint(*i, acc); err != nil { + acc.AddError(err) + return + } + } +} + +func (ki *KubernetesInventory) gatherEndpoint(e v1.Endpoints, acc telegraf.Accumulator) error { + if e.Metadata.CreationTimestamp.GetSeconds() == 0 && e.Metadata.CreationTimestamp.GetNanos() == 0 { + return nil + } + + fields := map[string]interface{}{ + "created": time.Unix(e.Metadata.CreationTimestamp.GetSeconds(), int64(e.Metadata.CreationTimestamp.GetNanos())).UnixNano(), + "generation": e.Metadata.GetGeneration(), + } + + tags := map[string]string{ + "endpoint_name": e.Metadata.GetName(), + "namespace": e.Metadata.GetNamespace(), + } + + for _, endpoint := range e.GetSubsets() { + for _, readyAddr := range endpoint.GetAddresses() { + fields["ready"] = true + + tags["hostname"] = readyAddr.GetHostname() + tags["node_name"] = readyAddr.GetNodeName() + if readyAddr.TargetRef != nil { + tags[strings.ToLower(readyAddr.GetTargetRef().GetKind())] = readyAddr.GetTargetRef().GetName() + } + + for _, port := range endpoint.GetPorts() { + fields["port"] = port.GetPort() + + tags["port_name"] = port.GetName() + tags["port_protocol"] = port.GetProtocol() + + acc.AddFields(endpointMeasurement, fields, tags) + } + } + for _, notReadyAddr := range endpoint.GetNotReadyAddresses() { + fields["ready"] = false + + tags["hostname"] = notReadyAddr.GetHostname() + tags["node_name"] = notReadyAddr.GetNodeName() + if notReadyAddr.TargetRef != nil { + tags[strings.ToLower(notReadyAddr.GetTargetRef().GetKind())] = notReadyAddr.GetTargetRef().GetName() + } + + for _, port := range endpoint.GetPorts() { + fields["port"] = port.GetPort() + + tags["port_name"] = port.GetName() + tags["port_protocol"] = port.GetProtocol() + + acc.AddFields(endpointMeasurement, fields, tags) + } + } + } + + return nil +} diff --git a/plugins/inputs/kube_inventory/endpoint_test.go b/plugins/inputs/kube_inventory/endpoint_test.go new file mode 100644 index 000000000..b88c38816 --- /dev/null +++ b/plugins/inputs/kube_inventory/endpoint_test.go @@ -0,0 +1,194 @@ +package kube_inventory + +import ( + "testing" + "time" + + "github.com/ericchiang/k8s/apis/core/v1" + metav1 "github.com/ericchiang/k8s/apis/meta/v1" + "github.com/influxdata/telegraf/testutil" +) + +func TestEndpoint(t *testing.T) { + cli := &client{} + + now := time.Now() + now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 1, 36, 0, now.Location()) + + tests := []struct { + name string + handler *mockHandler + output *testutil.Accumulator + hasError bool + }{ + { + name: "no endpoints", + handler: &mockHandler{ + responseMap: map[string]interface{}{ + "/endpoints/": &v1.EndpointsList{}, + }, + }, + hasError: false, + }, + { + name: "collect ready endpoints", + handler: &mockHandler{ + responseMap: map[string]interface{}{ + "/endpoints/": &v1.EndpointsList{ + Items: []*v1.Endpoints{ + { + Subsets: []*v1.EndpointSubset{ + { + Addresses: []*v1.EndpointAddress{ + { + Hostname: toStrPtr("storage-6"), + NodeName: toStrPtr("b.storage.internal"), + TargetRef: &v1.ObjectReference{ + Kind: toStrPtr("pod"), + Name: toStrPtr("storage-6"), + }, + }, + }, + Ports: []*v1.EndpointPort{ + { + Name: toStrPtr("server"), + Protocol: toStrPtr("TCP"), + Port: toInt32Ptr(8080), + }, + }, + }, + }, + Metadata: &metav1.ObjectMeta{ + Generation: toInt64Ptr(12), + Namespace: toStrPtr("ns1"), + Name: toStrPtr("storage"), + CreationTimestamp: &metav1.Time{Seconds: toInt64Ptr(now.Unix())}, + }, + }, + }, + }, + }, + }, + output: &testutil.Accumulator{ + Metrics: []*testutil.Metric{ + { + Fields: map[string]interface{}{ + "ready": true, + "port": int32(8080), + "generation": int64(12), + "created": now.UnixNano(), + }, + Tags: map[string]string{ + "endpoint_name": "storage", + "namespace": "ns1", + "hostname": "storage-6", + "node_name": "b.storage.internal", + "port_name": "server", + "port_protocol": "TCP", + "pod": "storage-6", + }, + }, + }, + }, + hasError: false, + }, + { + name: "collect notready endpoints", + handler: &mockHandler{ + responseMap: map[string]interface{}{ + "/endpoints/": &v1.EndpointsList{ + Items: []*v1.Endpoints{ + { + Subsets: []*v1.EndpointSubset{ + { + NotReadyAddresses: []*v1.EndpointAddress{ + { + Hostname: toStrPtr("storage-6"), + NodeName: toStrPtr("b.storage.internal"), + TargetRef: &v1.ObjectReference{ + Kind: toStrPtr("pod"), + Name: toStrPtr("storage-6"), + }, + }, + }, + Ports: []*v1.EndpointPort{ + { + Name: toStrPtr("server"), + Protocol: toStrPtr("TCP"), + Port: toInt32Ptr(8080), + }, + }, + }, + }, + Metadata: &metav1.ObjectMeta{ + Generation: toInt64Ptr(12), + Namespace: toStrPtr("ns1"), + Name: toStrPtr("storage"), + CreationTimestamp: &metav1.Time{Seconds: toInt64Ptr(now.Unix())}, + }, + }, + }, + }, + }, + }, + output: &testutil.Accumulator{ + Metrics: []*testutil.Metric{ + { + Fields: map[string]interface{}{ + "ready": false, + "port": int32(8080), + "generation": int64(12), + "created": now.UnixNano(), + }, + Tags: map[string]string{ + "endpoint_name": "storage", + "namespace": "ns1", + "hostname": "storage-6", + "node_name": "b.storage.internal", + "port_name": "server", + "port_protocol": "TCP", + "pod": "storage-6", + }, + }, + }, + }, + hasError: false, + }, + } + + for _, v := range tests { + ks := &KubernetesInventory{ + client: cli, + } + acc := new(testutil.Accumulator) + for _, endpoint := range ((v.handler.responseMap["/endpoints/"]).(*v1.EndpointsList)).Items { + err := ks.gatherEndpoint(*endpoint, acc) + if err != nil { + t.Errorf("Failed to gather endpoint - %s", err.Error()) + } + } + + err := acc.FirstError() + if err == nil && v.hasError { + t.Fatalf("%s failed, should have error", v.name) + } else if err != nil && !v.hasError { + t.Fatalf("%s failed, err: %v", v.name, err) + } + if v.output == nil && len(acc.Metrics) > 0 { + t.Fatalf("%s: collected extra data", v.name) + } else if v.output != nil && len(v.output.Metrics) > 0 { + for i := range v.output.Metrics { + for k, m := range v.output.Metrics[i].Tags { + if acc.Metrics[i].Tags[k] != m { + t.Fatalf("%s: tag %s metrics unmatch Expected %s, got '%v'\n", v.name, k, m, acc.Metrics[i].Tags[k]) + } + } + for k, m := range v.output.Metrics[i].Fields { + if acc.Metrics[i].Fields[k] != m { + t.Fatalf("%s: field %s metrics unmatch Expected %v(%T), got %v(%T)\n", v.name, k, m, m, acc.Metrics[i].Fields[k], acc.Metrics[i].Fields[k]) + } + } + } + } + } +} diff --git a/plugins/inputs/kube_inventory/ingress.go b/plugins/inputs/kube_inventory/ingress.go new file mode 100644 index 000000000..6d5c80199 --- /dev/null +++ b/plugins/inputs/kube_inventory/ingress.go @@ -0,0 +1,60 @@ +package kube_inventory + +import ( + "context" + "time" + + v1beta1EXT "github.com/ericchiang/k8s/apis/extensions/v1beta1" + + "github.com/influxdata/telegraf" +) + +func collectIngress(ctx context.Context, acc telegraf.Accumulator, ki *KubernetesInventory) { + list, err := ki.client.getIngress(ctx) + if err != nil { + acc.AddError(err) + return + } + for _, i := range list.Items { + if err = ki.gatherIngress(*i, acc); err != nil { + acc.AddError(err) + return + } + } +} + +func (ki *KubernetesInventory) gatherIngress(i v1beta1EXT.Ingress, acc telegraf.Accumulator) error { + if i.Metadata.CreationTimestamp.GetSeconds() == 0 && i.Metadata.CreationTimestamp.GetNanos() == 0 { + return nil + } + + fields := map[string]interface{}{ + "created": time.Unix(i.Metadata.CreationTimestamp.GetSeconds(), int64(i.Metadata.CreationTimestamp.GetNanos())).UnixNano(), + "generation": i.Metadata.GetGeneration(), + } + + tags := map[string]string{ + "ingress_name": i.Metadata.GetName(), + "namespace": i.Metadata.GetNamespace(), + } + + for _, ingress := range i.GetStatus().GetLoadBalancer().GetIngress() { + tags["hostname"] = ingress.GetHostname() + tags["ip"] = ingress.GetIp() + + for _, rule := range i.GetSpec().GetRules() { + for _, path := range rule.GetIngressRuleValue().GetHttp().GetPaths() { + fields["backend_service_port"] = path.GetBackend().GetServicePort().GetIntVal() + fields["tls"] = i.GetSpec().GetTls() != nil + + tags["backend_service_name"] = path.GetBackend().GetServiceName() + tags["path"] = path.GetPath() + tags["host"] = rule.GetHost() + + acc.AddFields(ingressMeasurement, fields, tags) + } + } + } + + return nil +} diff --git a/plugins/inputs/kube_inventory/ingress_test.go b/plugins/inputs/kube_inventory/ingress_test.go new file mode 100644 index 000000000..e3b44512c --- /dev/null +++ b/plugins/inputs/kube_inventory/ingress_test.go @@ -0,0 +1,142 @@ +package kube_inventory + +import ( + "testing" + "time" + + "github.com/ericchiang/k8s/apis/core/v1" + v1beta1EXT "github.com/ericchiang/k8s/apis/extensions/v1beta1" + metav1 "github.com/ericchiang/k8s/apis/meta/v1" + "github.com/influxdata/telegraf/testutil" +) + +func TestIngress(t *testing.T) { + cli := &client{} + + now := time.Now() + now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 1, 36, 0, now.Location()) + + tests := []struct { + name string + handler *mockHandler + output *testutil.Accumulator + hasError bool + }{ + { + name: "no ingress", + handler: &mockHandler{ + responseMap: map[string]interface{}{ + "/ingress/": &v1beta1EXT.IngressList{}, + }, + }, + hasError: false, + }, + { + name: "collect ingress", + handler: &mockHandler{ + responseMap: map[string]interface{}{ + "/ingress/": &v1beta1EXT.IngressList{ + Items: []*v1beta1EXT.Ingress{ + { + Status: &v1beta1EXT.IngressStatus{ + LoadBalancer: &v1.LoadBalancerStatus{ + Ingress: []*v1.LoadBalancerIngress{ + { + Hostname: toStrPtr("chron-1"), + Ip: toStrPtr("1.0.0.127"), + }, + }, + }, + }, + Spec: &v1beta1EXT.IngressSpec{ + Rules: []*v1beta1EXT.IngressRule{ + { + Host: toStrPtr("ui.internal"), + IngressRuleValue: &v1beta1EXT.IngressRuleValue{ + Http: &v1beta1EXT.HTTPIngressRuleValue{ + Paths: []*v1beta1EXT.HTTPIngressPath{ + { + Path: toStrPtr("/"), + Backend: &v1beta1EXT.IngressBackend{ + ServiceName: toStrPtr("chronografd"), + ServicePort: toIntStrPtrI(8080), + }, + }, + }, + }, + }, + }, + }, + }, + Metadata: &metav1.ObjectMeta{ + Generation: toInt64Ptr(12), + Namespace: toStrPtr("ns1"), + Name: toStrPtr("ui-lb"), + CreationTimestamp: &metav1.Time{Seconds: toInt64Ptr(now.Unix())}, + }, + }, + }, + }, + }, + }, + output: &testutil.Accumulator{ + Metrics: []*testutil.Metric{ + { + Fields: map[string]interface{}{ + "tls": false, + "backend_service_port": int32(8080), + "generation": int64(12), + "created": now.UnixNano(), + }, + Tags: map[string]string{ + "ingress_name": "ui-lb", + "namespace": "ns1", + "ip": "1.0.0.127", + "hostname": "chron-1", + "backend_service_name": "chronografd", + "host": "ui.internal", + "path": "/", + }, + }, + }, + }, + hasError: false, + }, + } + + for _, v := range tests { + ks := &KubernetesInventory{ + client: cli, + } + acc := new(testutil.Accumulator) + for _, ingress := range ((v.handler.responseMap["/ingress/"]).(*v1beta1EXT.IngressList)).Items { + err := ks.gatherIngress(*ingress, acc) + if err != nil { + t.Errorf("Failed to gather ingress - %s", err.Error()) + } + } + + err := acc.FirstError() + if err == nil && v.hasError { + t.Fatalf("%s failed, should have error", v.name) + } else if err != nil && !v.hasError { + t.Fatalf("%s failed, err: %v", v.name, err) + } + if v.output == nil && len(acc.Metrics) > 0 { + t.Fatalf("%s: collected extra data", v.name) + } else if v.output != nil && len(v.output.Metrics) > 0 { + for i := range v.output.Metrics { + for k, m := range v.output.Metrics[i].Tags { + if acc.Metrics[i].Tags[k] != m { + t.Fatalf("%s: tag %s metrics unmatch Expected %s, got '%v'\n", v.name, k, m, acc.Metrics[i].Tags[k]) + } + } + for k, m := range v.output.Metrics[i].Fields { + if acc.Metrics[i].Fields[k] != m { + t.Fatalf("%s: field %s metrics unmatch Expected %v(%T), got %v(%T)\n", v.name, k, m, m, acc.Metrics[i].Fields[k], acc.Metrics[i].Fields[k]) + } + } + } + } + } +} diff --git a/plugins/inputs/kube_inventory/kube_state.go b/plugins/inputs/kube_inventory/kube_state.go index 57d31908d..9ffe0765e 100644 --- a/plugins/inputs/kube_inventory/kube_state.go +++ b/plugins/inputs/kube_inventory/kube_state.go @@ -111,10 +111,13 @@ func (ki *KubernetesInventory) Gather(acc telegraf.Accumulator) (err error) { var availableCollectors = map[string]func(ctx context.Context, acc telegraf.Accumulator, ki *KubernetesInventory){ "daemonsets": collectDaemonSets, "deployments": collectDeployments, + "endpoints": collectEndpoints, + "ingress": collectIngress, "nodes": collectNodes, "persistentvolumes": collectPersistentVolumes, "persistentvolumeclaims": collectPersistentVolumeClaims, "pods": collectPods, + "services": collectServices, "statefulsets": collectStatefulSets, } @@ -158,10 +161,13 @@ func convertQuantity(s string, m float64) int64 { var ( daemonSetMeasurement = "kubernetes_daemonset" deploymentMeasurement = "kubernetes_deployment" + endpointMeasurement = "kubernetes_endpoint" + ingressMeasurement = "kubernetes_ingress" nodeMeasurement = "kubernetes_node" persistentVolumeMeasurement = "kubernetes_persistentvolume" persistentVolumeClaimMeasurement = "kubernetes_persistentvolumeclaim" podContainerMeasurement = "kubernetes_pod_container" + serviceMeasurement = "kubernetes_service" statefulSetMeasurement = "kubernetes_statefulset" ) diff --git a/plugins/inputs/kube_inventory/service.go b/plugins/inputs/kube_inventory/service.go new file mode 100644 index 000000000..4b0cc0845 --- /dev/null +++ b/plugins/inputs/kube_inventory/service.go @@ -0,0 +1,70 @@ +package kube_inventory + +import ( + "context" + "time" + + "github.com/ericchiang/k8s/apis/core/v1" + + "github.com/influxdata/telegraf" +) + +func collectServices(ctx context.Context, acc telegraf.Accumulator, ki *KubernetesInventory) { + list, err := ki.client.getServices(ctx) + if err != nil { + acc.AddError(err) + return + } + for _, i := range list.Items { + if err = ki.gatherService(*i, acc); err != nil { + acc.AddError(err) + return + } + } +} + +func (ki *KubernetesInventory) gatherService(s v1.Service, acc telegraf.Accumulator) error { + if s.Metadata.CreationTimestamp.GetSeconds() == 0 && s.Metadata.CreationTimestamp.GetNanos() == 0 { + return nil + } + + fields := map[string]interface{}{ + "created": time.Unix(s.Metadata.CreationTimestamp.GetSeconds(), int64(s.Metadata.CreationTimestamp.GetNanos())).UnixNano(), + "generation": s.Metadata.GetGeneration(), + } + + tags := map[string]string{ + "service_name": s.Metadata.GetName(), + "namespace": s.Metadata.GetNamespace(), + } + + var getPorts = func() { + for _, port := range s.GetSpec().GetPorts() { + fields["port"] = port.GetPort() + fields["target_port"] = port.GetTargetPort().GetIntVal() + + tags["port_name"] = port.GetName() + tags["port_protocol"] = port.GetProtocol() + + if s.GetSpec().GetType() == "ExternalName" { + tags["external_name"] = s.GetSpec().GetExternalName() + } else { + tags["cluster_ip"] = s.GetSpec().GetClusterIP() + } + + acc.AddFields(serviceMeasurement, fields, tags) + } + } + + if externIPs := s.GetSpec().GetExternalIPs(); externIPs != nil { + for _, ip := range externIPs { + tags["ip"] = ip + + getPorts() + } + } else { + getPorts() + } + + return nil +} diff --git a/plugins/inputs/kube_inventory/service_test.go b/plugins/inputs/kube_inventory/service_test.go new file mode 100644 index 000000000..6c0c8787a --- /dev/null +++ b/plugins/inputs/kube_inventory/service_test.go @@ -0,0 +1,123 @@ +package kube_inventory + +import ( + "testing" + "time" + + "github.com/ericchiang/k8s/apis/core/v1" + metav1 "github.com/ericchiang/k8s/apis/meta/v1" + "github.com/influxdata/telegraf/testutil" +) + +func TestService(t *testing.T) { + cli := &client{} + + now := time.Now() + now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 1, 36, 0, now.Location()) + + tests := []struct { + name string + handler *mockHandler + output *testutil.Accumulator + hasError bool + }{ + { + name: "no service", + handler: &mockHandler{ + responseMap: map[string]interface{}{ + "/service/": &v1.ServiceList{}, + }, + }, + hasError: false, + }, + { + name: "collect service", + handler: &mockHandler{ + responseMap: map[string]interface{}{ + "/service/": &v1.ServiceList{ + Items: []*v1.Service{ + { + Spec: &v1.ServiceSpec{ + Ports: []*v1.ServicePort{ + { + Port: toInt32Ptr(8080), + TargetPort: toIntStrPtrI(1234), + Name: toStrPtr("diagnostic"), + Protocol: toStrPtr("TCP"), + }, + }, + ExternalIPs: []string{"1.0.0.127"}, + ClusterIP: toStrPtr("127.0.0.1"), + }, + Metadata: &metav1.ObjectMeta{ + Generation: toInt64Ptr(12), + Namespace: toStrPtr("ns1"), + Name: toStrPtr("checker"), + CreationTimestamp: &metav1.Time{Seconds: toInt64Ptr(now.Unix())}, + }, + }, + }, + }, + }, + }, + + output: &testutil.Accumulator{ + Metrics: []*testutil.Metric{ + { + Fields: map[string]interface{}{ + "port": int32(8080), + "target_port": int32(1234), + "generation": int64(12), + "created": now.UnixNano(), + }, + Tags: map[string]string{ + "service_name": "checker", + "namespace": "ns1", + "port_name": "diagnostic", + "port_protocol": "TCP", + "cluster_ip": "127.0.0.1", + "ip": "1.0.0.127", + }, + }, + }, + }, + hasError: false, + }, + } + + for _, v := range tests { + ks := &KubernetesInventory{ + client: cli, + } + acc := new(testutil.Accumulator) + for _, service := range ((v.handler.responseMap["/service/"]).(*v1.ServiceList)).Items { + err := ks.gatherService(*service, acc) + if err != nil { + t.Errorf("Failed to gather service - %s", err.Error()) + } + } + + err := acc.FirstError() + if err == nil && v.hasError { + t.Fatalf("%s failed, should have error", v.name) + } else if err != nil && !v.hasError { + t.Fatalf("%s failed, err: %v", v.name, err) + } + if v.output == nil && len(acc.Metrics) > 0 { + t.Fatalf("%s: collected extra data", v.name) + } else if v.output != nil && len(v.output.Metrics) > 0 { + for i := range v.output.Metrics { + for k, m := range v.output.Metrics[i].Tags { + if acc.Metrics[i].Tags[k] != m { + t.Fatalf("%s: tag %s metrics unmatch Expected %s, got '%v'\n", v.name, k, m, acc.Metrics[i].Tags[k]) + } + } + for k, m := range v.output.Metrics[i].Fields { + if acc.Metrics[i].Fields[k] != m { + t.Fatalf("%s: field %s metrics unmatch Expected %v(%T), got %v(%T)\n", v.name, k, m, m, acc.Metrics[i].Fields[k], acc.Metrics[i].Fields[k]) + } + } + } + } + } +} From 6f2e57ad6437871226deee8600abb12771bd204d Mon Sep 17 00:00:00 2001 From: Greg <2653109+glinton@users.noreply.github.com> Date: Mon, 22 Jul 2019 15:14:23 -0600 Subject: [PATCH 1020/1815] Update paho.mqtt.golang (#6149) --- Gopkg.lock | 6 +++--- Gopkg.toml | 2 +- plugins/inputs/mqtt_consumer/mqtt_consumer_test.go | 4 ++++ 3 files changed, 8 insertions(+), 4 deletions(-) diff --git a/Gopkg.lock b/Gopkg.lock index ac3bb8165..3bf568686 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -384,15 +384,15 @@ version = "v1.1.0" [[projects]] - digest = "1:3fa846cb3feb4e65371fe3c347c299de9b5bc3e71e256c0d940cd19b767a6ba0" + digest = "1:392ebbe504a822b15b41dd09cecc5baa98e9e0942502950dc14ba1f23c149e32" name = "github.com/eclipse/paho.mqtt.golang" packages = [ ".", "packets", ] pruneopts = "" - revision = "36d01c2b4cbeb3d2a12063e4880ce30800af9560" - version = "v1.1.1" + revision = "adca289fdcf8c883800aafa545bc263452290bae" + version = "v1.2.0" [[projects]] digest = "1:99a0607f79d36202b64b674c0464781549917cfc4bfb88037aaa98b31e124a18" diff --git a/Gopkg.toml b/Gopkg.toml index c817c1865..92457c626 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -40,7 +40,7 @@ [[constraint]] name = "github.com/eclipse/paho.mqtt.golang" - version = "~1.1.1" + version = "1" [[constraint]] name = "github.com/go-sql-driver/mysql" diff --git a/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go b/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go index 4209963bb..2d17c16c3 100644 --- a/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go +++ b/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go @@ -90,6 +90,10 @@ func (m *message) Duplicate() bool { return m.duplicate } +func (m *message) Ack() { + return +} + func (m *message) Qos() byte { return m.qos } From 109d1e1e15bd5017eb2798d43e6a5566a8aa0487 Mon Sep 17 00:00:00 2001 From: denzilribeiro Date: Mon, 22 Jul 2019 17:04:15 -0500 Subject: [PATCH 1021/1815] Add support for collecting SQL Requests to identify waits and blocking (#6144) --- plugins/inputs/sqlserver/README.md | 6 ++- plugins/inputs/sqlserver/sqlserver.go | 60 +++++++++++++++++++++++++++ 2 files changed, 64 insertions(+), 2 deletions(-) diff --git a/plugins/inputs/sqlserver/README.md b/plugins/inputs/sqlserver/README.md index 16bb353c5..f04e0b6de 100644 --- a/plugins/inputs/sqlserver/README.md +++ b/plugins/inputs/sqlserver/README.md @@ -51,7 +51,7 @@ GO query_version = 2 ## If you are using AzureDB, setting this to true will gather resource utilization metrics - # azuredb = false + # azuredb = true ## If you would like to exclude some of the metrics queries, list them here ## Possible choices: @@ -67,7 +67,8 @@ GO ## - Schedulers ## - AzureDBResourceStats ## - AzureDBResourceGovernance - exclude_query = [ 'Schedulers' ] + ## - SqlRequests + exclude_query = [ 'Schedulers' , 'SqlRequests'] ``` ### Metrics: @@ -106,6 +107,7 @@ The new (version 2) metrics provide: - *Server properties*: Number of databases in all possible states (online, offline, suspect, etc.), cpu count, physical memory, SQL Server service uptime, and SQL Server version. In the case of Azure SQL relevent properties such as Tier, #Vcores, Memory etc. - *Wait stats*: Wait time in ms, number of waiting tasks, resource wait time, signal wait time, max wait time in ms, wait type, and wait category. The waits are categorized using the same categories used in Query Store. - *Schedulers* - This captures sys.dm_os_schedulers. +- *SqlRequests* - This captures a snapshot of dm_exec_requests and dm_exec_sessions that gives you running requests as well as wait types and blocking sessions - *Azure Managed Instances* - Stats from `sys.server_resource_stats`: - cpu_count diff --git a/plugins/inputs/sqlserver/sqlserver.go b/plugins/inputs/sqlserver/sqlserver.go index f491954f7..e5de5202b 100644 --- a/plugins/inputs/sqlserver/sqlserver.go +++ b/plugins/inputs/sqlserver/sqlserver.go @@ -69,6 +69,7 @@ var sampleConfig = ` ## - Schedulers ## - AzureDBResourceStats ## - AzureDBResourceGovernance + ## - SqlRequests exclude_query = [ 'Schedulers' ] ` @@ -103,6 +104,7 @@ func initQueries(s *SQLServer) { queries["ServerProperties"] = Query{Script: sqlServerPropertiesV2, ResultByRow: false} queries["MemoryClerk"] = Query{Script: sqlMemoryClerkV2, ResultByRow: false} queries["Schedulers"] = Query{Script: sqlServerSchedulersV2, ResultByRow: false} + queries["SqlRequests"] = Query{Script: sqlServerRequestsV2, ResultByRow: false} } else { queries["PerformanceCounters"] = Query{Script: sqlPerformanceCounters, ResultByRow: true} queries["WaitStatsCategorized"] = Query{Script: sqlWaitStatsCategorized, ResultByRow: false} @@ -1356,6 +1358,64 @@ BEGIN END; ` +const sqlServerRequestsV2 string = ` +SET NOCOUNT ON; +SELECT blocking_session_id into #blockingSessions FROM sys.dm_exec_requests WHERE blocking_session_id != 0 +create index ix_blockingSessions_1 on #blockingSessions (blocking_session_id) +SELECT + 'sqlserver_requests' AS [measurement], + @@servername AS [sql_instance], + DB_NAME() as [database_name], + r.session_id + , r.request_id + , DB_NAME(s.database_id) as session_db_name + , r.status + , r.cpu_time as cpu_time_ms + , r.total_elapsed_time as total_elasped_time_ms + , r.logical_reads + , r.writes + , r.command + , wait_time as wait_time_ms + , wait_type + , wait_resource + , blocking_session_id + , s.program_name + , s.host_name + , s.nt_user_name + , r.open_transaction_count AS open_transaction + , LEFT (CASE COALESCE(r.transaction_isolation_level, s.transaction_isolation_level) + WHEN 0 THEN '0-Read Committed' + WHEN 1 THEN '1-Read Uncommitted (NOLOCK)' + WHEN 2 THEN '2-Read Committed' + WHEN 3 THEN '3-Repeatable Read' + WHEN 4 THEN '4-Serializable' + WHEN 5 THEN '5-Snapshot' + ELSE CONVERT (varchar(30), r.transaction_isolation_level) + '-UNKNOWN' + END, 30) AS transaction_isolation_level + ,r.granted_query_memory as granted_query_memory_pages + , r.percent_complete + , (SUBSTRING(qt.text, r.statement_start_offset / 2 + 1, + (CASE WHEN r.statement_end_offset = -1 + THEN LEN(CONVERT(NVARCHAR(MAX), qt.text)) * 2 + ELSE r.statement_end_offset + END - r.statement_start_offset) / 2) + ) AS statement_text + , qt.objectid + , QUOTENAME(OBJECT_SCHEMA_NAME(qt.objectid,qt.dbid)) + '.' + QUOTENAME(OBJECT_NAME(qt.objectid,qt.dbid)) as stmt_object_name + , DB_NAME(qt.dbid) stmt_db_name + , r.query_hash + , r.query_plan_hash + FROM sys.dm_exec_requests r + LEFT OUTER JOIN sys.dm_exec_sessions s ON (s.session_id = r.session_id) + OUTER APPLY sys.dm_exec_sql_text(sql_handle) AS qt + + WHERE 1=1 + AND (r.session_id IS NOT NULL AND (s.is_user_process = 1 OR r.status COLLATE Latin1_General_BIN NOT IN ('background', 'sleeping'))) + OR (s.session_id IN (SELECT blocking_session_id FROM #blockingSessions)) + OPTION(MAXDOP 1) + +` + // Queries V1 const sqlPerformanceMetrics string = `SET DEADLOCK_PRIORITY -10; SET NOCOUNT ON; From e098758d7891fedbb92d36a9fb1abd1cd651590a Mon Sep 17 00:00:00 2001 From: Lance O'Connor Date: Mon, 22 Jul 2019 15:11:34 -0700 Subject: [PATCH 1022/1815] Add Fireboard Input Plugin (#6052) --- plugins/inputs/all/all.go | 1 + plugins/inputs/fireboard/README.md | 62 ++++++++ plugins/inputs/fireboard/fireboard.go | 157 +++++++++++++++++++++ plugins/inputs/fireboard/fireboard_test.go | 74 ++++++++++ 4 files changed, 294 insertions(+) create mode 100644 plugins/inputs/fireboard/README.md create mode 100644 plugins/inputs/fireboard/fireboard.go create mode 100644 plugins/inputs/fireboard/fireboard_test.go diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index 487f92b1f..0352e552a 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -42,6 +42,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/file" _ "github.com/influxdata/telegraf/plugins/inputs/filecount" _ "github.com/influxdata/telegraf/plugins/inputs/filestat" + _ "github.com/influxdata/telegraf/plugins/inputs/fireboard" _ "github.com/influxdata/telegraf/plugins/inputs/fluentd" _ "github.com/influxdata/telegraf/plugins/inputs/github" _ "github.com/influxdata/telegraf/plugins/inputs/graylog" diff --git a/plugins/inputs/fireboard/README.md b/plugins/inputs/fireboard/README.md new file mode 100644 index 000000000..b4b8376ce --- /dev/null +++ b/plugins/inputs/fireboard/README.md @@ -0,0 +1,62 @@ +# Fireboard Input Plugin + +The fireboard plugin gathers the real time temperature data from fireboard +thermometers. In order to use this input plugin, you'll need to sign up +to use their REST API, you can find more information on their website +here [https://docs.fireboard.io/reference/restapi.html] + +### Configuration + +This section contains the default TOML to configure the plugin. You can +generate it using `telegraf --usage `. + +```toml +[[inputs.fireboard]] + ## Specify auth token for your account + auth_token = "invalidAuthToken" + ## You can override the fireboard server URL if necessary + # url = https://fireboard.io/api/v1/devices.json + ## You can set a different http_timeout if you need to + # http_timeout = 4 +``` + +#### auth_token + +In lieu of requiring a username and password, this plugin requires the +authToken that you can generate using the Fireboard REST API as described +in their docs [https://docs.fireboard.io/reference/restapi.html#Authentication] + +#### url + +While there should be no reason to override the URL, the option is available +in case Fireboard changes their site, etc. + +#### http_timeout + +If you need to increase the HTTP timeout, you can do so here. You can set this +value in seconds. The default value is four (4) seconds. + +### Metrics + +The Fireboard REST API docs have good examples of the data that is available, +currently this input only returns the real time temperatures. Temperature +values are included if they are less than a minute old. + +- fireboard + - tags: + - channel + - scale (Celcius; Farenheit) + - title (name of the Fireboard) + - uuid (UUID of the Fireboard) + - fields: + - temperature (float, unit) + +### Example Output + +This section shows example output in Line Protocol format. You can often use +`telegraf --input-filter --test` or use the `file` output to get +this information. + +``` +fireboard,channel=2,host=patas-mbp,scale=Farenheit,title=telegraf-FireBoard,uuid=b55e766c-b308-49b5-93a4-df89fe31efd0 temperature=78.2 1561690040000000000 +``` \ No newline at end of file diff --git a/plugins/inputs/fireboard/fireboard.go b/plugins/inputs/fireboard/fireboard.go new file mode 100644 index 000000000..2e9c7b025 --- /dev/null +++ b/plugins/inputs/fireboard/fireboard.go @@ -0,0 +1,157 @@ +package fireboard + +import ( + "encoding/json" + "fmt" + "net/http" + "strconv" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/inputs" +) + +// Fireboard gathers statistics from the fireboard.io servers +type Fireboard struct { + AuthToken string `toml:"auth_token"` + URL string `toml:"url"` + HTTPTimeout internal.Duration `toml:"http_timeout"` + + client *http.Client +} + +// NewFireboard return a new instance of Fireboard with a default http client +func NewFireboard() *Fireboard { + tr := &http.Transport{ResponseHeaderTimeout: time.Duration(3 * time.Second)} + client := &http.Client{ + Transport: tr, + Timeout: time.Duration(4 * time.Second), + } + return &Fireboard{client: client} +} + +// RTT fireboardStats represents the data that is received from Fireboard +type RTT struct { + Temp float64 `json:"temp"` + Channel int64 `json:"channel"` + Degreetype int `json:"degreetype"` + Created string `json:"created"` +} + +type fireboardStats struct { + Title string `json:"title"` + UUID string `json:"uuid"` + Latesttemps []RTT `json:"latest_temps"` +} + +// A sample configuration to only gather stats from localhost, default port. +const sampleConfig = ` + ## Specify auth token for your account + auth_token = "invalidAuthToken" + ## You can override the fireboard server URL if necessary + # url = https://fireboard.io/api/v1/devices.json + ## You can set a different http_timeout if you need to + ## You should set a string using an number and time indicator + ## for example "12s" for 12 seconds. + # http_timeout = "4s" +` + +// SampleConfig Returns a sample configuration for the plugin +func (r *Fireboard) SampleConfig() string { + return sampleConfig +} + +// Description Returns a description of the plugin +func (r *Fireboard) Description() string { + return "Read real time temps from fireboard.io servers" +} + +// Init the things +func (r *Fireboard) Init() error { + + if len(r.AuthToken) == 0 { + return fmt.Errorf("You must specify an authToken") + } + if len(r.URL) == 0 { + r.URL = "https://fireboard.io/api/v1/devices.json" + } + // Have a default timeout of 4s + if r.HTTPTimeout.Duration == 0 { + r.HTTPTimeout.Duration = time.Second * 4 + } + + r.client.Timeout = r.HTTPTimeout.Duration + + return nil +} + +// Gather Reads stats from all configured servers. +func (r *Fireboard) Gather(acc telegraf.Accumulator) error { + + // Perform the GET request to the fireboard servers + req, err := http.NewRequest("GET", r.URL, nil) + if err != nil { + return err + } + req.Header.Set("Authorization", "Token "+r.AuthToken) + resp, err := r.client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + // Successful responses will always return status code 200 + if resp.StatusCode != http.StatusOK { + if resp.StatusCode == http.StatusForbidden { + return fmt.Errorf("fireboard server responded with %d [Forbidden], verify your authToken", resp.StatusCode) + } + return fmt.Errorf("fireboard responded with unexepcted status code %d", resp.StatusCode) + } + // Decode the response JSON into a new stats struct + var stats []fireboardStats + if err := json.NewDecoder(resp.Body).Decode(&stats); err != nil { + return fmt.Errorf("unable to decode fireboard response: %s", err) + } + // Range over all devices, gathering stats. Returns early in case of any error. + for _, s := range stats { + r.gatherTemps(s, acc) + } + return nil +} + +// Return text description of degree type (scale) +func scale(n int) string { + switch n { + case 1: + return "Celcius" + case 2: + return "Fahrenheit" + default: + return "" + } +} + +// Gathers stats from a single device, adding them to the accumulator +func (r *Fireboard) gatherTemps(s fireboardStats, acc telegraf.Accumulator) { + // Construct lookup for scale values + + for _, t := range s.Latesttemps { + tags := map[string]string{ + "title": s.Title, + "uuid": s.UUID, + "channel": strconv.FormatInt(t.Channel, 10), + "scale": scale(t.Degreetype), + } + fields := map[string]interface{}{ + "temperature": t.Temp, + } + acc.AddFields("fireboard", fields, tags) + } +} + +func init() { + inputs.Add("fireboard", func() telegraf.Input { + return NewFireboard() + }) +} diff --git a/plugins/inputs/fireboard/fireboard_test.go b/plugins/inputs/fireboard/fireboard_test.go new file mode 100644 index 000000000..a5e93a453 --- /dev/null +++ b/plugins/inputs/fireboard/fireboard_test.go @@ -0,0 +1,74 @@ +package fireboard + +import ( + "fmt" + "net/http" + "net/http/httptest" + "net/url" + "strconv" + "testing" + + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +func TestFireboard(t *testing.T) { + // Create a test server with the const response JSON + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + fmt.Fprintln(w, response) + })) + defer ts.Close() + + // Parse the URL of the test server, used to verify the expected host + u, err := url.Parse(ts.URL) + require.NoError(t, err) + + // Create a new fb instance with our given test server + fireboard := NewFireboard() + fireboard.AuthToken = "b4bb6e6a7b6231acb9f71b304edb2274693d8849" + fireboard.URL = u.String() + + // Create a test accumulator + acc := &testutil.Accumulator{} + + // Gather data from the test server + err = fireboard.Gather(acc) + require.NoError(t, err) + + // Expect the correct values for all known keys + expectFields := map[string]interface{}{ + "temperature": float64(79.9), + } + // Expect the correct values for all tags + expectTags := map[string]string{ + "title": "telegraf-FireBoard", + "uuid": "b55e766c-b308-49b5-93a4-df89fe31efd0", + "channel": strconv.FormatInt(1, 10), + "scale": "Fahrenheit", + } + + acc.AssertContainsTaggedFields(t, "fireboard", expectFields, expectTags) +} + +var response = ` +[{ + "id": 99999, + "title": "telegraf-FireBoard", + "created": "2019-03-23T16:48:32.152010Z", + "uuid": "b55e766c-b308-49b5-93a4-df89fe31efd0", + "hardware_id": "XXXXXXXXX", + "latest_temps": [ + { + "temp": 79.9, + "channel": 1, + "degreetype": 2, + "created": "2019-06-25T06:07:10Z" + } + ], + "last_templog": "2019-06-25T06:06:40Z", + "model": "FBX11E", + "channel_count": 6, + "degreetype": 2 + }] +` From d364abf870404ef227714154afdaf0bd738a2d09 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 22 Jul 2019 15:20:25 -0700 Subject: [PATCH 1023/1815] Update changelog --- CHANGELOG.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index a87c1aca9..d6afd783c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,6 +10,7 @@ #### New Inputs - [docker_log](/plugins/inputs/docker_log) - Contributed by @prashanthjbabu +- [fireboard](/plugins/inputs/fireboard) - Contributed by @ronnocol #### New Parsers @@ -43,6 +44,9 @@ - [#6122](https://github.com/influxdata/telegraf/pull/6122): Add basic auth support to elasticsearch input. - [#6102](https://github.com/influxdata/telegraf/pull/6102): Support string field glob matching in json parser. - [#6101](https://github.com/influxdata/telegraf/pull/6101): Update gjson to allow multipath syntax in json parser. +- [#6144](https://github.com/influxdata/telegraf/pull/6144): Add support for collecting SQL Requests to identify waits and blocking to sqlserver input. +- [#6105](https://github.com/influxdata/telegraf/pull/6105): Collect k8s endpoints, ingress, and services in kube_inventory plugin. +- [#6129](https://github.com/influxdata/telegraf/pull/6129): Add support for field/tag keys to strings processor. #### Bugfixes @@ -50,6 +54,7 @@ - [#4356](https://github.com/influxdata/telegraf/issues/4356): Fix double pct replacement in sysstat input. - [#6004](https://github.com/influxdata/telegraf/issues/6004): Fix race in master node detection in elasticsearch input. - [#6100](https://github.com/influxdata/telegraf/issues/6100): Fix SSPI authentication not working in sqlserver input. +- [#6142](https://github.com/influxdata/telegraf/issues/6142): Fix memory error panic in mqtt input. ## v1.11.3 [unreleased] @@ -61,6 +66,7 @@ - [#6125](https://github.com/influxdata/telegraf/issues/6125): Treat empty array as successful parse in json parser. - [#6094](https://github.com/influxdata/telegraf/issues/6094): Add missing rcode and zonestat to bind input. - [#6114](https://github.com/influxdata/telegraf/issues/6114): Fix lustre2 input plugin config parse regression. +- [#5894](https://github.com/influxdata/telegraf/issues/5894): Fix template pattern partial wildcard matching. ## v1.11.2 [2019-07-09] From 92cabcd323f97739edfe3b1176935d3d30e76f64 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 22 Jul 2019 15:37:52 -0700 Subject: [PATCH 1024/1815] Add fireboard to top level README --- README.md | 1 + plugins/inputs/fireboard/README.md | 18 +++++++----------- 2 files changed, 8 insertions(+), 11 deletions(-) diff --git a/README.md b/README.md index 86f34738a..ad3c3bc4e 100644 --- a/README.md +++ b/README.md @@ -176,6 +176,7 @@ For documentation on the latest development code see the [documentation index][d * [file](./plugins/inputs/file) * [filestat](./plugins/inputs/filestat) * [filecount](./plugins/inputs/filecount) +* [fireboard](/plugins/inputs/fireboard) * [fluentd](./plugins/inputs/fluentd) * [github](./plugins/inputs/github) * [graylog](./plugins/inputs/graylog) diff --git a/plugins/inputs/fireboard/README.md b/plugins/inputs/fireboard/README.md index b4b8376ce..7e1f351fa 100644 --- a/plugins/inputs/fireboard/README.md +++ b/plugins/inputs/fireboard/README.md @@ -1,15 +1,11 @@ # Fireboard Input Plugin The fireboard plugin gathers the real time temperature data from fireboard -thermometers. In order to use this input plugin, you'll need to sign up -to use their REST API, you can find more information on their website -here [https://docs.fireboard.io/reference/restapi.html] +thermometers. In order to use this input plugin, you'll need to sign up to use +the [Fireboard REST API](https://docs.fireboard.io/reference/restapi.html). ### Configuration -This section contains the default TOML to configure the plugin. You can -generate it using `telegraf --usage `. - ```toml [[inputs.fireboard]] ## Specify auth token for your account @@ -22,9 +18,9 @@ generate it using `telegraf --usage `. #### auth_token -In lieu of requiring a username and password, this plugin requires the -authToken that you can generate using the Fireboard REST API as described -in their docs [https://docs.fireboard.io/reference/restapi.html#Authentication] +In lieu of requiring a username and password, this plugin requires an +authentication token that you can generate using the [Fireboard REST +API](https://docs.fireboard.io/reference/restapi.html#Authentication). #### url @@ -39,7 +35,7 @@ value in seconds. The default value is four (4) seconds. ### Metrics The Fireboard REST API docs have good examples of the data that is available, -currently this input only returns the real time temperatures. Temperature +currently this input only returns the real time temperatures. Temperature values are included if they are less than a minute old. - fireboard @@ -59,4 +55,4 @@ this information. ``` fireboard,channel=2,host=patas-mbp,scale=Farenheit,title=telegraf-FireBoard,uuid=b55e766c-b308-49b5-93a4-df89fe31efd0 temperature=78.2 1561690040000000000 -``` \ No newline at end of file +``` From 3e50db904ab58b0090c29793cd1c53f2759271fc Mon Sep 17 00:00:00 2001 From: Greg <2653109+glinton@users.noreply.github.com> Date: Mon, 22 Jul 2019 17:10:40 -0600 Subject: [PATCH 1025/1815] Add certificate verification status to x509_cert input (#6143) --- plugins/inputs/x509_cert/README.md | 12 ++-- plugins/inputs/x509_cert/dev/telegraf.conf | 3 +- plugins/inputs/x509_cert/x509_cert.go | 71 ++++++++++++++++------ plugins/inputs/x509_cert/x509_cert_test.go | 5 ++ 4 files changed, 66 insertions(+), 25 deletions(-) diff --git a/plugins/inputs/x509_cert/README.md b/plugins/inputs/x509_cert/README.md index a85d05463..450dd3d10 100644 --- a/plugins/inputs/x509_cert/README.md +++ b/plugins/inputs/x509_cert/README.md @@ -19,9 +19,6 @@ file or network connection. # tls_ca = "/etc/telegraf/ca.pem" # tls_cert = "/etc/telegraf/cert.pem" # tls_key = "/etc/telegraf/key.pem" - - ## Use TLS but skip chain & host verification - # insecure_skip_verify = false ``` @@ -35,7 +32,10 @@ file or network connection. - country - province - locality + - verification - fields: + - verification_code (int) + - verification_error (string) - expiry (int, seconds) - age (int, seconds) - startdate (int, seconds) @@ -45,6 +45,8 @@ file or network connection. ### Example output ``` -x509_cert,host=myhost,source=https://example.org age=1753627i,expiry=5503972i,startdate=1516092060i,enddate=1523349660i 1517845687000000000 -x509_cert,host=myhost,source=/etc/ssl/certs/ssl-cert-snakeoil.pem age=7522207i,expiry=308002732i,startdate=1510323480i,enddate=1825848420i 1517845687000000000 +x509_cert,common_name=ubuntu,source=/etc/ssl/certs/ssl-cert-snakeoil.pem,verification=valid age=7693222i,enddate=1871249033i,expiry=307666777i,startdate=1555889033i,verification_code=0i 1563582256000000000 +x509_cert,common_name=www.example.org,country=US,locality=Los\ Angeles,organization=Internet\ Corporation\ for\ Assigned\ Names\ and\ Numbers,organizational_unit=Technology,province=California,source=https://example.org:443,verification=invalid age=20219055i,enddate=1606910400i,expiry=43328144i,startdate=1543363200i,verification_code=1i,verification_error="x509: certificate signed by unknown authority" 1563582256000000000 +x509_cert,common_name=DigiCert\ SHA2\ Secure\ Server\ CA,country=US,organization=DigiCert\ Inc,source=https://example.org:443,verification=valid age=200838255i,enddate=1678276800i,expiry=114694544i,startdate=1362744000i,verification_code=0i 1563582256000000000 +x509_cert,common_name=DigiCert\ Global\ Root\ CA,country=US,organization=DigiCert\ Inc,organizational_unit=www.digicert.com,source=https://example.org:443,verification=valid age=400465455i,enddate=1952035200i,expiry=388452944i,startdate=1163116800i,verification_code=0i 1563582256000000000 ``` diff --git a/plugins/inputs/x509_cert/dev/telegraf.conf b/plugins/inputs/x509_cert/dev/telegraf.conf index 1eda94f02..7545997a4 100644 --- a/plugins/inputs/x509_cert/dev/telegraf.conf +++ b/plugins/inputs/x509_cert/dev/telegraf.conf @@ -1,5 +1,4 @@ [[inputs.x509_cert]] - sources = ["https://www.influxdata.com:443"] + sources = ["https://expired.badssl.com:443", "https://wrong.host.badssl.com:443"] [[outputs.file]] - files = ["stdout"] diff --git a/plugins/inputs/x509_cert/x509_cert.go b/plugins/inputs/x509_cert/x509_cert.go index 81bcb0d2c..8558378d1 100644 --- a/plugins/inputs/x509_cert/x509_cert.go +++ b/plugins/inputs/x509_cert/x509_cert.go @@ -30,9 +30,6 @@ const sampleConfig = ` # tls_ca = "/etc/telegraf/ca.pem" # tls_cert = "/etc/telegraf/cert.pem" # tls_key = "/etc/telegraf/key.pem" - - ## Use TLS but skip chain & host verification - # insecure_skip_verify = false ` const description = "Reads metrics from a SSL certificate" @@ -40,6 +37,7 @@ const description = "Reads metrics from a SSL certificate" type X509Cert struct { Sources []string `toml:"sources"` Timeout internal.Duration `toml:"timeout"` + tlsCfg *tls.Config _tls.ClientConfig } @@ -53,16 +51,20 @@ func (c *X509Cert) SampleConfig() string { return sampleConfig } -func (c *X509Cert) getCert(location string, timeout time.Duration) ([]*x509.Certificate, error) { +func (c *X509Cert) locationToURL(location string) (*url.URL, error) { if strings.HasPrefix(location, "/") { location = "file://" + location } u, err := url.Parse(location) if err != nil { - return nil, fmt.Errorf("failed to parse cert location - %s\n", err.Error()) + return nil, fmt.Errorf("failed to parse cert location - %s", err.Error()) } + return u, nil +} + +func (c *X509Cert) getCert(u *url.URL, timeout time.Duration) ([]*x509.Certificate, error) { switch u.Scheme { case "https": u.Scheme = "tcp" @@ -70,22 +72,15 @@ func (c *X509Cert) getCert(location string, timeout time.Duration) ([]*x509.Cert case "udp", "udp4", "udp6": fallthrough case "tcp", "tcp4", "tcp6": - tlsCfg, err := c.ClientConfig.TLSConfig() - if err != nil { - return nil, err - } - ipConn, err := net.DialTimeout(u.Scheme, u.Host, timeout) if err != nil { return nil, err } defer ipConn.Close() - if tlsCfg == nil { - tlsCfg = &tls.Config{} - } - tlsCfg.ServerName = u.Hostname() - conn := tls.Client(ipConn, tlsCfg) + c.tlsCfg.ServerName = u.Hostname() + c.tlsCfg.InsecureSkipVerify = true + conn := tls.Client(ipConn, c.tlsCfg) defer conn.Close() hsErr := conn.Handshake() @@ -114,7 +109,7 @@ func (c *X509Cert) getCert(location string, timeout time.Duration) ([]*x509.Cert return []*x509.Certificate{cert}, nil default: - return nil, fmt.Errorf("unsuported scheme '%s' in location %s\n", u.Scheme, location) + return nil, fmt.Errorf("unsuported scheme '%s' in location %s", u.Scheme, u.String()) } } @@ -164,15 +159,41 @@ func (c *X509Cert) Gather(acc telegraf.Accumulator) error { now := time.Now() for _, location := range c.Sources { - certs, err := c.getCert(location, c.Timeout.Duration*time.Second) + u, err := c.locationToURL(location) + if err != nil { + acc.AddError(err) + return nil + } + + certs, err := c.getCert(u, c.Timeout.Duration*time.Second) if err != nil { acc.AddError(fmt.Errorf("cannot get SSL cert '%s': %s", location, err.Error())) } - for _, cert := range certs { + for i, cert := range certs { fields := getFields(cert, now) tags := getTags(cert.Subject, location) + // The first certificate is the leaf/end-entity certificate which needs DNS + // name validation against the URL hostname. + opts := x509.VerifyOptions{} + if i == 0 { + opts.DNSName = u.Hostname() + } + if c.tlsCfg.RootCAs != nil { + opts.Roots = c.tlsCfg.RootCAs + } + + _, err = cert.Verify(opts) + if err == nil { + tags["verification"] = "valid" + fields["verification_code"] = 0 + } else { + tags["verification"] = "invalid" + fields["verification_code"] = 1 + fields["verification_error"] = err.Error() + } + acc.AddFields("x509_cert", fields, tags) } } @@ -180,6 +201,20 @@ func (c *X509Cert) Gather(acc telegraf.Accumulator) error { return nil } +func (c *X509Cert) Init() error { + tlsCfg, err := c.ClientConfig.TLSConfig() + if err != nil { + return err + } + if tlsCfg == nil { + tlsCfg = &tls.Config{} + } + + c.tlsCfg = tlsCfg + + return nil +} + func init() { inputs.Add("x509_cert", func() telegraf.Input { return &X509Cert{ diff --git a/plugins/inputs/x509_cert/x509_cert_test.go b/plugins/inputs/x509_cert/x509_cert_test.go index 933676417..188b510d2 100644 --- a/plugins/inputs/x509_cert/x509_cert_test.go +++ b/plugins/inputs/x509_cert/x509_cert_test.go @@ -110,6 +110,7 @@ func TestGatherRemote(t *testing.T) { Sources: []string{test.server}, Timeout: internal.Duration{Duration: test.timeout}, } + sc.Init() sc.InsecureSkipVerify = true testErr := false @@ -169,6 +170,7 @@ func TestGatherLocal(t *testing.T) { sc := X509Cert{ Sources: []string{f.Name()}, } + sc.Init() error := false @@ -218,6 +220,7 @@ func TestGatherChain(t *testing.T) { sc := X509Cert{ Sources: []string{f.Name()}, } + sc.Init() error := false @@ -237,6 +240,7 @@ func TestGatherChain(t *testing.T) { func TestStrings(t *testing.T) { sc := X509Cert{} + sc.Init() tests := []struct { name string @@ -265,6 +269,7 @@ func TestGatherCert(t *testing.T) { m := &X509Cert{ Sources: []string{"https://www.influxdata.com:443"}, } + m.Init() var acc testutil.Accumulator err := m.Gather(&acc) From afe86c0f4642415760616f9f9fedf68d44ce8c94 Mon Sep 17 00:00:00 2001 From: Greg <2653109+glinton@users.noreply.github.com> Date: Mon, 22 Jul 2019 17:30:53 -0600 Subject: [PATCH 1026/1815] Avoid panic in github input (#6152) --- plugins/inputs/github/github.go | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/plugins/inputs/github/github.go b/plugins/inputs/github/github.go index ff497e55b..4cba9b2d2 100644 --- a/plugins/inputs/github/github.go +++ b/plugins/inputs/github/github.go @@ -148,9 +148,9 @@ func splitRepositoryName(repositoryName string) (string, string, error) { return splits[0], splits[1], nil } -func getLicense(repositoryInfo *github.Repository) string { - if repositoryInfo.GetLicense() != nil { - return *repositoryInfo.License.Name +func getLicense(rI *github.Repository) string { + if licenseName := rI.GetLicense().GetName(); licenseName != "" { + return licenseName } return "None" @@ -158,19 +158,19 @@ func getLicense(repositoryInfo *github.Repository) string { func getTags(repositoryInfo *github.Repository) map[string]string { return map[string]string{ - "owner": *repositoryInfo.Owner.Login, - "name": *repositoryInfo.Name, - "language": *repositoryInfo.Language, + "owner": repositoryInfo.GetOwner().GetLogin(), + "name": repositoryInfo.GetName(), + "language": repositoryInfo.GetLanguage(), "license": getLicense(repositoryInfo), } } func getFields(repositoryInfo *github.Repository) map[string]interface{} { return map[string]interface{}{ - "stars": *repositoryInfo.StargazersCount, - "forks": *repositoryInfo.ForksCount, - "open_issues": *repositoryInfo.OpenIssuesCount, - "size": *repositoryInfo.Size, + "stars": repositoryInfo.GetStargazersCount(), + "forks": repositoryInfo.GetForksCount(), + "open_issues": repositoryInfo.GetOpenIssuesCount(), + "size": repositoryInfo.GetSize(), } } From b9cb606ca0450a82aae4ba4b4c04b581f545e058 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 22 Jul 2019 16:34:41 -0700 Subject: [PATCH 1027/1815] Update changelog --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index d6afd783c..adb0367ed 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -47,6 +47,7 @@ - [#6144](https://github.com/influxdata/telegraf/pull/6144): Add support for collecting SQL Requests to identify waits and blocking to sqlserver input. - [#6105](https://github.com/influxdata/telegraf/pull/6105): Collect k8s endpoints, ingress, and services in kube_inventory plugin. - [#6129](https://github.com/influxdata/telegraf/pull/6129): Add support for field/tag keys to strings processor. +- [#6143](https://github.com/influxdata/telegraf/pull/6143): Add certificate verification status to x509_cert input. #### Bugfixes @@ -67,6 +68,7 @@ - [#6094](https://github.com/influxdata/telegraf/issues/6094): Add missing rcode and zonestat to bind input. - [#6114](https://github.com/influxdata/telegraf/issues/6114): Fix lustre2 input plugin config parse regression. - [#5894](https://github.com/influxdata/telegraf/issues/5894): Fix template pattern partial wildcard matching. +- [#6151](https://github.com/influxdata/telegraf/issues/6151): Fix panic in github input. ## v1.11.2 [2019-07-09] From aec231fbed4e487bd3574c63c386f4dd1d1a7c9c Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 23 Jul 2019 10:43:39 -0700 Subject: [PATCH 1028/1815] Set 1.11.3 release date --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index adb0367ed..da5fc7e2d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -57,7 +57,7 @@ - [#6100](https://github.com/influxdata/telegraf/issues/6100): Fix SSPI authentication not working in sqlserver input. - [#6142](https://github.com/influxdata/telegraf/issues/6142): Fix memory error panic in mqtt input. -## v1.11.3 [unreleased] +## v1.11.3 [2019-07-23] #### Bugfixes From ac107143329b15f52a98cdb1f40688e66e264a67 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 23 Jul 2019 13:20:39 -0700 Subject: [PATCH 1029/1815] Add Linux mips build (#6153) --- scripts/build.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/build.py b/scripts/build.py index 5f2bcf6b5..dbca3a50d 100755 --- a/scripts/build.py +++ b/scripts/build.py @@ -89,7 +89,7 @@ targets = { supported_builds = { "windows": [ "amd64", "i386" ], - "linux": [ "amd64", "i386", "armhf", "armel", "arm64", "static_amd64", "s390x", "mipsel"], + "linux": [ "amd64", "i386", "armhf", "armel", "arm64", "static_amd64", "s390x", "mipsel", "mips"], "freebsd": [ "amd64", "i386" ] } @@ -574,7 +574,7 @@ def package(build_output, pkg_name, version, nightly=False, iteration=1, static= shutil.copy(fr, to) for package_type in supported_packages[platform]: - if package_type == "rpm" and arch == "mipsel": + if package_type == "rpm" and arch in ["mipsel", "mips"]: continue # Package the directory structure for each package type for the platform logging.debug("Packaging directory '{}' as '{}'.".format(build_root, package_type)) From eb0f493998a704ca0002e2d54e8b7d62fb8f7f57 Mon Sep 17 00:00:00 2001 From: George Date: Wed, 24 Jul 2019 01:00:07 +0200 Subject: [PATCH 1030/1815] Add Start() function to DockerLogs input plugin (#6157) --- plugins/inputs/docker_log/docker_log.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/plugins/inputs/docker_log/docker_log.go b/plugins/inputs/docker_log/docker_log.go index 01a2f83da..f2b5b5148 100644 --- a/plugins/inputs/docker_log/docker_log.go +++ b/plugins/inputs/docker_log/docker_log.go @@ -68,6 +68,8 @@ const ( var ( containerStates = []string{"created", "restarting", "running", "removing", "paused", "exited", "dead"} + // ensure *DockerLogs implements telegaf.ServiceInput + _ telegraf.ServiceInput = (*DockerLogs)(nil) ) type DockerLogs struct { @@ -380,6 +382,12 @@ func tailMultiplexed( return err } +// Start is a noop which is required for a *DockerLogs to implement +// the telegraf.ServiceInput interface +func (d *DockerLogs) Start(telegraf.Accumulator) error { + return nil +} + func (d *DockerLogs) Stop() { d.cancelTails() d.wg.Wait() From 4e59d51844dc2da4f2771eac4a9e33a986f8751b Mon Sep 17 00:00:00 2001 From: Greg <2653109+glinton@users.noreply.github.com> Date: Tue, 23 Jul 2019 17:04:51 -0600 Subject: [PATCH 1031/1815] Add networks, subscribers, and watchers to github input (#6161) --- plugins/inputs/github/README.md | 7 +++++-- plugins/inputs/github/github.go | 3 +++ plugins/inputs/github/github_test.go | 16 ++++++++++++---- 3 files changed, 20 insertions(+), 6 deletions(-) diff --git a/plugins/inputs/github/README.md b/plugins/inputs/github/README.md index 524d1d0e7..29eddf25d 100644 --- a/plugins/inputs/github/README.md +++ b/plugins/inputs/github/README.md @@ -28,10 +28,13 @@ alternative method for collecting repository information. - language - The primary language of the repository - license - The license set for the repository - fields: - - stars (int) - forks (int) - open_issues (int) + - networks (int) - size (int) + - subscribers (int) + - stars (int) + - watchers (int) When the [internal][] input is enabled: @@ -46,7 +49,7 @@ When the [internal][] input is enabled: ### Example Output ``` -github,full_name=influxdata/telegraf,name=telegraf,owner=influxdata,language=Go,license=MIT\ License stars=6401i,forks=2421i,open_issues=722i,size=22611i 1552651811000000000 +github_repository,language=Go,license=MIT\ License,name=telegraf,owner=influxdata forks=2679i,networks=2679i,open_issues=794i,size=23263i,stars=7091i,subscribers=316i,watchers=7091i 1563901372000000000 internal_github,access_token=Unauthenticated rate_limit_remaining=59i,rate_limit_limit=60i,rate_limit_blocks=0i 1552653551000000000 ``` diff --git a/plugins/inputs/github/github.go b/plugins/inputs/github/github.go index 4cba9b2d2..906c99a20 100644 --- a/plugins/inputs/github/github.go +++ b/plugins/inputs/github/github.go @@ -168,6 +168,9 @@ func getTags(repositoryInfo *github.Repository) map[string]string { func getFields(repositoryInfo *github.Repository) map[string]interface{} { return map[string]interface{}{ "stars": repositoryInfo.GetStargazersCount(), + "subscribers": repositoryInfo.GetSubscribersCount(), + "watchers": repositoryInfo.GetWatchersCount(), + "networks": repositoryInfo.GetNetworkCount(), "forks": repositoryInfo.GetForksCount(), "open_issues": repositoryInfo.GetOpenIssuesCount(), "size": repositoryInfo.GetSize(), diff --git a/plugins/inputs/github/github_test.go b/plugins/inputs/github/github_test.go index 0ebae3a67..33abc1c3e 100644 --- a/plugins/inputs/github/github_test.go +++ b/plugins/inputs/github/github_test.go @@ -98,12 +98,17 @@ func TestGetFields(t *testing.T) { forks := 2 openIssues := 3 size := 4 + subscribers := 5 + watchers := 6 repository := gh.Repository{ - StargazersCount: &stars, - ForksCount: &forks, - OpenIssuesCount: &openIssues, - Size: &size, + StargazersCount: &stars, + ForksCount: &forks, + OpenIssuesCount: &openIssues, + Size: &size, + NetworkCount: &forks, + SubscribersCount: &subscribers, + WatchersCount: &watchers, } getFieldsReturn := getFields(&repository) @@ -112,8 +117,11 @@ func TestGetFields(t *testing.T) { correctFieldReturn["stars"] = 1 correctFieldReturn["forks"] = 2 + correctFieldReturn["networks"] = 2 correctFieldReturn["open_issues"] = 3 correctFieldReturn["size"] = 4 + correctFieldReturn["subscribers"] = 5 + correctFieldReturn["watchers"] = 6 require.Equal(t, true, reflect.DeepEqual(getFieldsReturn, correctFieldReturn)) } From 9fc8976c669ae211aa27cecbac38cd9f92dd096f Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 23 Jul 2019 16:58:31 -0700 Subject: [PATCH 1032/1815] Correct error message when converting to a float --- plugins/processors/converter/converter.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/processors/converter/converter.go b/plugins/processors/converter/converter.go index db240abf4..bf9b851fb 100644 --- a/plugins/processors/converter/converter.go +++ b/plugins/processors/converter/converter.go @@ -240,7 +240,7 @@ func (p *Converter) convertFields(metric telegraf.Metric) { v, ok := toFloat(value) if !ok { metric.RemoveField(key) - logPrintf("error converting to integer [%T]: %v\n", value, value) + logPrintf("error converting to float [%T]: %v\n", value, value) continue } From 417740738d46ad806cc50add734b8046666f9f23 Mon Sep 17 00:00:00 2001 From: Mika Eloranta Date: Wed, 24 Jul 2019 23:52:44 +0300 Subject: [PATCH 1033/1815] Support percentage value parsing in redis input (#6163) --- plugins/inputs/redis/redis.go | 7 +++++-- plugins/inputs/redis/redis_test.go | 4 ++++ 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/plugins/inputs/redis/redis.go b/plugins/inputs/redis/redis.go index cd438397c..40f059ce8 100644 --- a/plugins/inputs/redis/redis.go +++ b/plugins/inputs/redis/redis.go @@ -253,8 +253,11 @@ func gatherInfoOutput( val := strings.TrimSpace(parts[1]) + // Some percentage values have a "%" suffix that we need to get rid of before int/float conversion + num_val := strings.TrimSuffix(val, "%") + // Try parsing as int - if ival, err := strconv.ParseInt(val, 10, 64); err == nil { + if ival, err := strconv.ParseInt(num_val, 10, 64); err == nil { switch name { case "keyspace_hits": keyspace_hits = ival @@ -269,7 +272,7 @@ func gatherInfoOutput( } // Try parsing as a float - if fval, err := strconv.ParseFloat(val, 64); err == nil { + if fval, err := strconv.ParseFloat(num_val, 64); err == nil { fields[metric] = fval continue } diff --git a/plugins/inputs/redis/redis_test.go b/plugins/inputs/redis/redis_test.go index fd16bbdd9..1257befca 100644 --- a/plugins/inputs/redis/redis_test.go +++ b/plugins/inputs/redis/redis_test.go @@ -49,6 +49,8 @@ func TestRedis_ParseMetrics(t *testing.T) { "used_memory_rss": int64(811008), "used_memory_peak": int64(1003936), "used_memory_lua": int64(33792), + "used_memory_peak_perc": float64(93.58), + "used_memory_dataset_perc": float64(20.27), "mem_fragmentation_ratio": float64(0.81), "loading": int64(0), "rdb_changes_since_last_save": int64(0), @@ -152,6 +154,8 @@ used_memory_peak_human:980.41K used_memory_lua:33792 mem_fragmentation_ratio:0.81 mem_allocator:libc +used_memory_peak_perc:93.58% +used_memory_dataset_perc:20.27% # Persistence loading:0 From 4f115437e6a1ea8d25e65d6e2d205ae927133414 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 24 Jul 2019 14:04:33 -0700 Subject: [PATCH 1034/1815] Use Go style conventions for variable name --- plugins/inputs/redis/redis.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/plugins/inputs/redis/redis.go b/plugins/inputs/redis/redis.go index 40f059ce8..d17e7a845 100644 --- a/plugins/inputs/redis/redis.go +++ b/plugins/inputs/redis/redis.go @@ -254,10 +254,10 @@ func gatherInfoOutput( val := strings.TrimSpace(parts[1]) // Some percentage values have a "%" suffix that we need to get rid of before int/float conversion - num_val := strings.TrimSuffix(val, "%") + val = strings.TrimSuffix(val, "%") // Try parsing as int - if ival, err := strconv.ParseInt(num_val, 10, 64); err == nil { + if ival, err := strconv.ParseInt(val, 10, 64); err == nil { switch name { case "keyspace_hits": keyspace_hits = ival @@ -272,7 +272,7 @@ func gatherInfoOutput( } // Try parsing as a float - if fval, err := strconv.ParseFloat(num_val, 64); err == nil { + if fval, err := strconv.ParseFloat(val, 64); err == nil { fields[metric] = fval continue } From 7625b6f0895513000c6b9aff6fdb13caae3fa1b3 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 24 Jul 2019 14:05:04 -0700 Subject: [PATCH 1035/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index da5fc7e2d..8f91570b8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -48,6 +48,7 @@ - [#6105](https://github.com/influxdata/telegraf/pull/6105): Collect k8s endpoints, ingress, and services in kube_inventory plugin. - [#6129](https://github.com/influxdata/telegraf/pull/6129): Add support for field/tag keys to strings processor. - [#6143](https://github.com/influxdata/telegraf/pull/6143): Add certificate verification status to x509_cert input. +- [#6163](https://github.com/influxdata/telegraf/pull/6163): Support percentage value parsing in redis input. #### Bugfixes From 785b76d3d1b104af2badc6e77f62abcf71db9098 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 25 Jul 2019 17:15:32 -0700 Subject: [PATCH 1036/1815] Update sample config --- etc/telegraf.conf | 170 ++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 140 insertions(+), 30 deletions(-) diff --git a/etc/telegraf.conf b/etc/telegraf.conf index 03427e913..febfe6454 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -454,7 +454,7 @@ # ## Set the interval to check if the Elasticsearch nodes are available # ## Setting to "0s" will disable the health check (not recommended in production) # health_check_interval = "10s" -# ## HTTP basic authentication details (eg. when using Shield) +# ## HTTP basic authentication details # # username = "telegraf" # # password = "mypassword" # @@ -591,7 +591,7 @@ # # A plugin that can transmit metrics over HTTP # [[outputs.http]] # ## URL is the address to send metrics to -# url = "http://127.0.0.1:8080/metric" +# url = "http://127.0.0.1:8080/telegraf" # # ## Timeout for HTTP message # # timeout = "5s" @@ -622,14 +622,14 @@ # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md # # data_format = "influx" # +# ## HTTP Content-Encoding for write request body, can be set to "gzip" to +# ## compress body or "identity" to apply no encoding. +# # content_encoding = "identity" +# # ## Additional HTTP headers # # [outputs.http.headers] # # # Should be set manually to "application/json" for json data_format # # Content-Type = "text/plain; charset=utf-8" -# -# ## HTTP Content-Encoding for write request body, can be set to "gzip" to -# ## compress body or "identity" to apply no encoding. -# # content_encoding = "identity" # # Configuration for sending metrics to InfluxDB @@ -1295,6 +1295,16 @@ # float = [] +# # Dates measurements, tags, and fields that pass through this filter. +# [[processors.date]] +# ## New tag to create +# tag_key = "month" +# +# ## Date format string, must be a representation of the Go "reference time" +# ## which is "Mon Jan 2 15:04:05 -0700 MST 2006". +# date_format = "Jan" + + # # Map enum values according to given table. # [[processors.enum]] # [[processors.enum.mapping]] @@ -1351,6 +1361,14 @@ # data_format = "influx" +# # Rotate a single valued metric into a multi field metric +# [[processors.pivot]] +# ## Tag to use for naming the new field. +# tag_key = "name" +# ## Field to use as the value of the new field. +# value_key = "value" + + # # Print all metrics that pass through this filter. # [[processors.printer]] @@ -1363,10 +1381,12 @@ # # key = "resp_code" # # ## Regular expression to match on a tag value # # pattern = "^(\\d)\\d\\d$" -# # ## Pattern for constructing a new value (${1} represents first subgroup) +# # ## Matches of the pattern will be replaced with this string. Use ${1} +# # ## notation to use the text of the first submatch. # # replacement = "${1}xx" # # # [[processors.regex.fields]] +# # ## Field to change # # key = "request" # # ## All the power of the Go regular expressions available here # # ## For example, named subgroups @@ -1481,6 +1501,14 @@ # # add_aggregate_fields = [] +# # Rotate multi field metric into several single field metrics +# [[processors.unpivot]] +# ## Tag to use for the name. +# tag_key = "name" +# ## Field to use for the name of the value. +# value_key = "value" + + ############################################################################### # AGGREGATOR PLUGINS # ############################################################################### @@ -1646,10 +1674,12 @@ # # Gather ActiveMQ metrics # [[inputs.activemq]] -# ## Required ActiveMQ Endpoint -# # server = "192.168.50.10" +# ## ActiveMQ WebConsole URL +# url = "http://127.0.0.1:8161" # -# ## Required ActiveMQ port +# ## Required ActiveMQ Endpoint +# ## deprecated in 1.11; use the url option +# # server = "127.0.0.1" # # port = 8161 # # ## Credentials for basic HTTP authentication @@ -1667,6 +1697,7 @@ # # tls_cert = "/etc/telegraf/cert.pem" # # tls_key = "/etc/telegraf/key.pem" # ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false # # Read stats from aerospike server(s) @@ -2129,6 +2160,8 @@ # # ## Container states to include and exclude. Globs accepted. # ## When empty only containers in the "running" state will be captured. +# ## example: container_state_include = ["created", "restarting", "running", "removing", "paused", "exited", "dead"] +# ## example: container_state_exclude = ["created", "restarting", "running", "removing", "paused", "exited", "dead"] # # container_state_include = [] # # container_state_exclude = [] # @@ -2232,6 +2265,10 @@ # ## "breaker". Per default, all stats are gathered. # # node_stats = ["jvm", "http"] # +# ## HTTP Basic Authentication username and password. +# # username = "" +# # password = "" +# # ## Optional TLS Config # # tls_ca = "/etc/telegraf/ca.pem" # # tls_cert = "/etc/telegraf/cert.pem" @@ -2350,6 +2387,18 @@ # md5 = false +# # Read real time temps from fireboard.io servers +# [[inputs.fireboard]] +# ## Specify auth token for your account +# auth_token = "invalidAuthToken" +# ## You can override the fireboard server URL if necessary +# # url = https://fireboard.io/api/v1/devices.json +# ## You can set a different http_timeout if you need to +# ## You should set a string using an number and time indicator +# ## for example "12s" for 12 seconds. +# # http_timeout = "4s" + + # # Read metrics exposed by fluentd in_monitor plugin # [[inputs.fluentd]] # ## This plugin reads information exposed by fluentd (using /api/plugins.json endpoint). @@ -2503,9 +2552,13 @@ # # HTTP/HTTPS request given an address a method and a timeout # [[inputs.http_response]] +# ## Deprecated in 1.12, use 'urls' # ## Server address (default http://localhost) # # address = "http://localhost" # +# ## List of urls to query. +# # urls = ["http://localhost"] +# # ## Set http_proxy (telegraf uses the system wide proxy settings if it's is not set) # # http_proxy = "http://localhost:8888" # @@ -2538,6 +2591,9 @@ # ## HTTP Request Headers (all values must be strings) # # [inputs.http_response.headers] # # Host = "github.com" +# +# ## Interface to use when dialing an address +# # interface = "eth0" # # Read flattened metrics from one or more JSON HTTP endpoints @@ -3542,7 +3598,6 @@ # # count = 1 # # ## Interval, in s, at which to ping. 0 == default (ping -i ) -# ## Not available in Windows. # # ping_interval = 1.0 # # ## Per-ping timeout, in s. 0 == no timeout (ping -W ) @@ -3551,16 +3606,21 @@ # ## Total-ping deadline, in s. 0 == no deadline (ping -w ) # # deadline = 10 # -# ## Interface or source address to send ping from (ping -I ) -# ## on Darwin and Freebsd only source address possible: (ping -S ) +# ## Interface or source address to send ping from (ping -I[-S] ) # # interface = "" # -# ## Specify the ping executable binary, default is "ping" -# # binary = "ping" +# ## How to ping. "native" doesn't have external dependencies, while "exec" depends on 'ping'. +# # method = "exec" # -# ## Arguments for ping command -# ## when arguments is not empty, other options (ping_interval, timeout, etc) will be ignored +# ## Specify the ping executable binary, default is "ping" +# # binary = "ping" +# +# ## Arguments for ping command. When arguments is not empty, system binary will be used and +# ## other options (ping_interval, timeout, etc) will be ignored. # # arguments = ["-c", "3"] +# +# ## Use only ipv6 addresses when resolving hostnames. +# # ipv6 = false # # Measure postfix queue statistics @@ -4004,7 +4064,11 @@ # ## - MemoryClerk # ## - VolumeSpace # ## - PerformanceMetrics -# # exclude_query = [ 'DatabaseIO' ] +# ## - Schedulers +# ## - AzureDBResourceStats +# ## - AzureDBResourceGovernance +# ## - SqlRequests +# exclude_query = [ 'Schedulers' ] # # Gather timeseries from Google Cloud Platform v3 monitoring API @@ -4280,9 +4344,6 @@ # # tls_ca = "/etc/telegraf/ca.pem" # # tls_cert = "/etc/telegraf/cert.pem" # # tls_key = "/etc/telegraf/key.pem" -# -# ## Use TLS but skip chain & host verification -# # insecure_skip_verify = false # # Read metrics of ZFS from arcstats, zfetchstats, vdev_cache_stats, and pools @@ -4489,17 +4550,19 @@ # # Cisco model-driven telemetry (MDT) input plugin for IOS XR, IOS XE and NX-OS platforms # [[inputs.cisco_telemetry_mdt]] -# ## Telemetry transport (one of: tcp, grpc) +# ## Telemetry transport can be "tcp" or "grpc". TLS is only supported when +# ## using the grpc transport. # transport = "grpc" # # ## Address and port to host telemetry listener # service_address = ":57000" # -# ## Enable TLS for GRPC transport +# ## Enable TLS; grpc transport only. # # tls_cert = "/etc/telegraf/cert.pem" # # tls_key = "/etc/telegraf/key.pem" # -# ## Enable TLS client authentication and define allowed CA certificates +# ## Enable TLS client authentication and define allowed CA certificates; grpc +# ## transport only. # # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] # # ## Define aliases to map telemetry encoding paths to simple measurement names @@ -4633,6 +4696,43 @@ # data_format = "influx" +# # Read logging output from the Docker engine +# [[inputs.docker_log]] +# ## Docker Endpoint +# ## To use TCP, set endpoint = "tcp://[ip]:[port]" +# ## To use environment variables (ie, docker-machine), set endpoint = "ENV" +# # endpoint = "unix:///var/run/docker.sock" +# +# ## When true, container logs are read from the beginning; otherwise +# ## reading begins at the end of the log. +# # from_beginning = false +# +# ## Timeout for Docker API calls. +# # timeout = "5s" +# +# ## Containers to include and exclude. Globs accepted. +# ## Note that an empty array for both will include all containers +# # container_name_include = [] +# # container_name_exclude = [] +# +# ## Container states to include and exclude. Globs accepted. +# ## When empty only containers in the "running" state will be captured. +# # container_state_include = [] +# # container_state_exclude = [] +# +# ## docker labels to include and exclude as tags. Globs accepted. +# ## Note that an empty array for both will include all labels as tags +# # docker_label_include = [] +# # docker_label_exclude = [] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + # # Influx HTTP write listener # [[inputs.http_listener]] # ## Address and port to host HTTP listener on @@ -4685,6 +4785,10 @@ # ## 0 means to use the default of 524,288,00 bytes (500 mebibytes) # # max_body_size = "500MB" # +# ## Part of the request to consume. Available options are "body" and +# ## "query". +# # data_source = "body" +# # ## Set one or more allowed client CA certificate file names to # ## enable mutually authenticated TLS connections # # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] @@ -4773,11 +4877,12 @@ # ] # # ## Optional TLS Config -# tls_ca = "/etc/telegraf/ca.pem" -# tls_cert = "/etc/telegraf/cert.pem" -# tls_key = "/etc/telegraf/key.pem" +# # enable_tls = true +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" # ## Use TLS but skip chain & host verification -# insecure_skip_verify = false +# # insecure_skip_verify = false # # ## Delay between retry attempts of failed RPC calls or streams. Defaults to 1000ms. # ## Failed streams/calls will not be retried if 0 is provided @@ -5236,7 +5341,12 @@ # ## OR # # bearer_token_string = "abc_123" # -# ## Specify timeout duration for slower prometheus clients (default is 3s) +# ## HTTP Basic Authentication username and password. ('bearer_token' and +# ## 'bearer_token_string' take priority) +# # username = "" +# # password = "" +# +# ## Specify timeout duration for slower prometheus clients (default is 3s) # # response_timeout = "3s" # # ## Optional TLS Config @@ -5336,7 +5446,7 @@ # delete_timings = true # # ## Percentiles to calculate for timing & histogram stats -# percentiles = [90] +# percentiles = [50.0, 90.0, 99.0, 99.9, 99.95, 100.0] # # ## separator to use between elements of a statsd metric # metric_separator = "_" From a941779ea8e27750f3597688513e83d0d1f5988b Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 25 Jul 2019 17:36:46 -0700 Subject: [PATCH 1037/1815] Call Init before Start in test mode (#6171) --- agent/agent.go | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/agent/agent.go b/agent/agent.go index 542154388..aa3e32a43 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -182,6 +182,12 @@ func (a *Agent) Test(ctx context.Context, waitDuration time.Duration) error { } } + log.Printf("D! [agent] Initializing plugins") + err := a.initPlugins() + if err != nil { + return err + } + if hasServiceInputs { log.Printf("D! [agent] Starting service inputs") err := a.startServiceInputs(ctx, metricC) @@ -191,11 +197,6 @@ func (a *Agent) Test(ctx context.Context, waitDuration time.Duration) error { } for _, input := range a.Config.Inputs { - err := input.Init() - if err != nil { - return err - } - select { case <-ctx.Done(): return nil From 4d73290e13ab966f32d54381f45e96f9ab5ec758 Mon Sep 17 00:00:00 2001 From: Greg <2653109+glinton@users.noreply.github.com> Date: Fri, 26 Jul 2019 16:01:17 -0600 Subject: [PATCH 1038/1815] Update links in fluentd input readme (#6175) --- plugins/inputs/fluentd/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/inputs/fluentd/README.md b/plugins/inputs/fluentd/README.md index 6c5bada3c..3fabbddb7 100644 --- a/plugins/inputs/fluentd/README.md +++ b/plugins/inputs/fluentd/README.md @@ -1,10 +1,10 @@ # Fluentd Input Plugin -The fluentd plugin gathers metrics from plugin endpoint provided by [in_monitor plugin](http://docs.fluentd.org/v0.12/articles/monitoring). +The fluentd plugin gathers metrics from plugin endpoint provided by [in_monitor plugin](https://docs.fluentd.org/input/monitor_agent). This plugin understands data provided by /api/plugin.json resource (/api/config.json is not covered). You might need to adjust your fluentd configuration, in order to reduce series cardinality in case your fluentd restarts frequently. Every time fluentd starts, `plugin_id` value is given a new random value. -According to [fluentd documentation](http://docs.fluentd.org/v0.12/articles/config-file), you are able to add `@id` parameter for each plugin to avoid this behaviour and define custom `plugin_id`. +According to [fluentd documentation](https://docs.fluentd.org/configuration/config-file#common-plugin-parameter), you are able to add `@id` parameter for each plugin to avoid this behaviour and define custom `plugin_id`. example configuration with `@id` parameter for http plugin: ``` From aea09b3a204126d5bf753f86c555c656ebab1b08 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 29 Jul 2019 20:41:12 -0700 Subject: [PATCH 1039/1815] Use sarama built in support for consumer groups (#6172) --- Gopkg.lock | 108 +++- Gopkg.toml | 4 - Makefile | 8 +- docs/LICENSE_OF_DEPENDENCIES.md | 3 +- plugins/inputs/kafka_consumer/README.md | 17 +- .../inputs/kafka_consumer/kafka_consumer.go | 427 +++++++++------ .../kafka_consumer_integration_test.go | 94 ---- .../kafka_consumer/kafka_consumer_test.go | 511 +++++++++++------- 8 files changed, 691 insertions(+), 481 deletions(-) delete mode 100644 plugins/inputs/kafka_consumer/kafka_consumer_integration_test.go diff --git a/Gopkg.lock b/Gopkg.lock index 3bf568686..470c56cda 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -61,6 +61,14 @@ revision = "1f7cd6cfe0adea687ad44a512dfe76140f804318" version = "v10.12.0" +[[projects]] + digest = "1:82041ab48e5c76da656b723fdc13a2b9ec716cdc736f82adaac77f5c39d4fca8" + name = "github.com/DataDog/zstd" + packages = ["."] + pruneopts = "" + revision = "2347a397da4ee9c6b8226d4aff82c302d0e52773" + version = "v1.4.1" + [[projects]] branch = "master" digest = "1:298712a3ee36b59c3ca91f4183bd75d174d5eaa8b4aed5072831f126e2e752f6" @@ -81,12 +89,12 @@ version = "v0.4.9" [[projects]] - digest = "1:213b41361ad1cb4768add9d26c2e27794c65264eefdb24ed6ea34cdfeeff3f3c" + digest = "1:5dd52495eaf9fad11f4742f341166aa9eb68f70061fc1a9b546f9481b284b6d8" name = "github.com/Shopify/sarama" packages = ["."] pruneopts = "" - revision = "a6144ae922fd99dd0ea5046c8137acfb7fab0914" - version = "v1.18.0" + revision = "46c83074a05474240f9620fb7c70fb0d80ca401a" + version = "v1.23.1" [[projects]] digest = "1:f82b8ac36058904227087141017bb82f4b0fc58272990a4cdae3e2d6d222644e" @@ -195,14 +203,6 @@ pruneopts = "" revision = "3a771d992973f24aa725d07868b467d1ddfceafb" -[[projects]] - digest = "1:c5978131c797af795972c27c25396c81d1bf53b7b6e8e3e0259e58375765c071" - name = "github.com/bsm/sarama-cluster" - packages = ["."] - pruneopts = "" - revision = "cf455bc755fe41ac9bb2861e7a961833d9c2ecc3" - version = "v2.1.13" - [[projects]] digest = "1:e5691038f8e87e7da05280095d968e50c17d624e25cca095d4e4cd947a805563" name = "github.com/caio/go-tdigest" @@ -640,6 +640,14 @@ pruneopts = "" revision = "6bb64b370b90e7ef1fa532be9e591a81c3493e00" +[[projects]] + digest = "1:0038a7f43b51c8b2a8cd03b5372e73f8eadfe156484c2ae8185ae836f8ebc2cd" + name = "github.com/hashicorp/go-uuid" + packages = ["."] + pruneopts = "" + revision = "4f571afc59f3043a65f8fe6bf46d887b10a01d43" + version = "v1.0.1" + [[projects]] digest = "1:f72168ea995f398bab88e84bd1ff58a983466ba162fb8d50d47420666cd57fad" name = "github.com/hashicorp/serf" @@ -710,6 +718,17 @@ revision = "8faa4453fc7051d1076053f8854077753ab912f2" version = "v3.4.0" +[[projects]] + digest = "1:d45477e90c25c8c6d7d4237281167aa56079382fc042db4b44a8328071649bfa" + name = "github.com/jcmturner/gofork" + packages = [ + "encoding/asn1", + "x/crypto/pbkdf2", + ] + pruneopts = "" + revision = "dc7c13fece037a4a36e2b3c69db4991498d30692" + version = "v1.0.0" + [[projects]] digest = "1:6f49eae0c1e5dab1dafafee34b207aeb7a42303105960944828c2079b92fc88e" name = "github.com/jmespath/go-jmespath" @@ -1523,6 +1542,72 @@ revision = "d2d2541c53f18d2a059457998ce2876cc8e67cbf" version = "v0.9.1" +[[projects]] + digest = "1:4777ba481cc12866b89aafb0a67529e7ac48b9aea06a25f3737b2cf5a3ffda12" + name = "gopkg.in/jcmturner/aescts.v1" + packages = ["."] + pruneopts = "" + revision = "f6abebb3171c4c1b1fea279cb7c7325020a26290" + version = "v1.0.1" + +[[projects]] + digest = "1:84c5b1392ef65ad1bb64da4b4d0beb2f204eefc769d6d96082347bb7057cb7b1" + name = "gopkg.in/jcmturner/dnsutils.v1" + packages = ["."] + pruneopts = "" + revision = "13eeb8d49ffb74d7a75784c35e4d900607a3943c" + version = "v1.0.1" + +[[projects]] + digest = "1:502ab576ba8c47c4de77fe3f2b2386adc1a1447bb5afae2ac7bf0edd2b6f7c52" + name = "gopkg.in/jcmturner/gokrb5.v7" + packages = [ + "asn1tools", + "client", + "config", + "credentials", + "crypto", + "crypto/common", + "crypto/etype", + "crypto/rfc3961", + "crypto/rfc3962", + "crypto/rfc4757", + "crypto/rfc8009", + "gssapi", + "iana", + "iana/addrtype", + "iana/adtype", + "iana/asnAppTag", + "iana/chksumtype", + "iana/errorcode", + "iana/etypeID", + "iana/flags", + "iana/keyusage", + "iana/msgtype", + "iana/nametype", + "iana/patype", + "kadmin", + "keytab", + "krberror", + "messages", + "pac", + "types", + ] + pruneopts = "" + revision = "363118e62befa8a14ff01031c025026077fe5d6d" + version = "v7.3.0" + +[[projects]] + digest = "1:f9956ccc103c6208cd50c71ee5191b6fdcc635972c12624ef949c9b20b2bb9d1" + name = "gopkg.in/jcmturner/rpc.v1" + packages = [ + "mstypes", + "ndr", + ] + pruneopts = "" + revision = "99a8ce2fbf8b8087b6ed12a37c61b10f04070043" + version = "v1.1.0" + [[projects]] digest = "1:367baf06b7dbd0ef0bbdd785f6a79f929c96b0c18e9d3b29c0eed1ac3f5db133" name = "gopkg.in/ldap.v2" @@ -1598,7 +1683,6 @@ "github.com/aws/aws-sdk-go/service/cloudwatch", "github.com/aws/aws-sdk-go/service/dynamodb", "github.com/aws/aws-sdk-go/service/kinesis", - "github.com/bsm/sarama-cluster", "github.com/cisco-ie/nx-telemetry-proto/mdt_dialout", "github.com/cisco-ie/nx-telemetry-proto/telemetry_bis", "github.com/couchbase/go-couchbase", diff --git a/Gopkg.toml b/Gopkg.toml index 92457c626..d1c7f4589 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -18,10 +18,6 @@ name = "github.com/aws/aws-sdk-go" version = "1.15.54" -[[constraint]] - name = "github.com/bsm/sarama-cluster" - version = "2.1.13" - [[constraint]] name = "github.com/couchbase/go-couchbase" branch = "master" diff --git a/Makefile b/Makefile index 9bf1e342b..6c9717c5d 100644 --- a/Makefile +++ b/Makefile @@ -1,9 +1,8 @@ -ifeq ($(SHELL), cmd) - VERSION := $(shell git describe --exact-match --tags 2>nil) - HOME := $(HOMEPATH) -else ifeq ($(SHELL), sh.exe) +ifeq ($(OS), Windows_NT) VERSION := $(shell git describe --exact-match --tags 2>nil) HOME := $(HOMEPATH) + CGO_ENABLED ?= 0 + export CGO_ENABLED else VERSION := $(shell git describe --exact-match --tags 2>/dev/null) endif @@ -48,7 +47,6 @@ install: telegraf mkdir -p $(DESTDIR)$(PREFIX)/bin/ cp telegraf $(DESTDIR)$(PREFIX)/bin/ - .PHONY: test test: go test -short ./... diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index 17bac0a1a..755fbbbae 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -14,7 +14,6 @@ following works: - github.com/aws/aws-sdk-go [Apache License 2.0](https://github.com/aws/aws-sdk-go/blob/master/LICENSE.txt) - github.com/Azure/go-autorest [Apache License 2.0](https://github.com/Azure/go-autorest/blob/master/LICENSE) - github.com/beorn7/perks [MIT License](https://github.com/beorn7/perks/blob/master/LICENSE) -- github.com/bsm/sarama-cluster [MIT License](https://github.com/bsm/sarama-cluster/blob/master/LICENSE) - github.com/cenkalti/backoff [MIT License](https://github.com/cenkalti/backoff/blob/master/LICENSE) - github.com/cisco-ie/nx-telemetry-proto [Apache License 2.0](https://github.com/cisco-ie/nx-telemetry-proto/blob/master/LICENSE) - github.com/couchbase/go-couchbase [MIT License](https://github.com/couchbase/go-couchbase/blob/master/LICENSE) @@ -139,4 +138,4 @@ following works: - gopkg.in/yaml.v2 [Apache License 2.0](https://github.com/go-yaml/yaml/blob/v2.2.2/LICENSE) ## telegraf used and modified code from these projects -- github.com/DataDog/datadog-agent [Apache License 2.0](https://github.com/DataDog/datadog-agent/LICENSE) \ No newline at end of file +- github.com/DataDog/datadog-agent [Apache License 2.0](https://github.com/DataDog/datadog-agent/LICENSE) diff --git a/plugins/inputs/kafka_consumer/README.md b/plugins/inputs/kafka_consumer/README.md index 56fc59245..26ebca39d 100644 --- a/plugins/inputs/kafka_consumer/README.md +++ b/plugins/inputs/kafka_consumer/README.md @@ -10,11 +10,13 @@ and use the old zookeeper connection method. ```toml [[inputs.kafka_consumer]] - ## kafka servers + ## Kafka brokers. brokers = ["localhost:9092"] - ## topic(s) to consume + + ## Topics to consume. topics = ["telegraf"] - ## Add topic as tag if topic_tag is not empty + + ## When set this tag will be added to all metrics with the topic as the value. # topic_tag = "" ## Optional Client id @@ -37,10 +39,11 @@ and use the old zookeeper connection method. # sasl_username = "kafka" # sasl_password = "secret" - ## the name of the consumer group - consumer_group = "telegraf_metrics_consumers" - ## Offset (must be either "oldest" or "newest") - offset = "oldest" + ## Name of the consumer group. + # consumer_group = "telegraf_metrics_consumers" + + ## Initial offset position; one of "oldest" or "newest". + # offset = "oldest" ## Maximum length of a message to consume, in bytes (default 0/unlimited); ## larger messages are dropped diff --git a/plugins/inputs/kafka_consumer/kafka_consumer.go b/plugins/inputs/kafka_consumer/kafka_consumer.go index 545e37f5a..10a6251be 100644 --- a/plugins/inputs/kafka_consumer/kafka_consumer.go +++ b/plugins/inputs/kafka_consumer/kafka_consumer.go @@ -8,61 +8,20 @@ import ( "sync" "github.com/Shopify/sarama" - cluster "github.com/bsm/sarama-cluster" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal/tls" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/parsers" ) -const ( - defaultMaxUndeliveredMessages = 1000 -) - -type empty struct{} -type semaphore chan empty - -type Consumer interface { - Errors() <-chan error - Messages() <-chan *sarama.ConsumerMessage - MarkOffset(msg *sarama.ConsumerMessage, metadata string) - Close() error -} - -type Kafka struct { - ConsumerGroup string `toml:"consumer_group"` - ClientID string `toml:"client_id"` - Topics []string `toml:"topics"` - Brokers []string `toml:"brokers"` - MaxMessageLen int `toml:"max_message_len"` - Version string `toml:"version"` - MaxUndeliveredMessages int `toml:"max_undelivered_messages"` - Offset string `toml:"offset"` - SASLUsername string `toml:"sasl_username"` - SASLPassword string `toml:"sasl_password"` - TopicTag string `toml:"topic_tag"` - - tls.ClientConfig - - cluster Consumer - parser parsers.Parser - wg *sync.WaitGroup - cancel context.CancelFunc - - // Unconfirmed messages - messages map[telegraf.TrackingID]*sarama.ConsumerMessage - - // doNotCommitMsgs tells the parser not to call CommitUpTo on the consumer - // this is mostly for test purposes, but there may be a use-case for it later. - doNotCommitMsgs bool -} - -var sampleConfig = ` - ## kafka servers +const sampleConfig = ` + ## Kafka brokers. brokers = ["localhost:9092"] - ## topic(s) to consume + + ## Topics to consume. topics = ["telegraf"] - ## Add topic as tag if topic_tag is not empty + + ## When set this tag will be added to all metrics with the topic as the value. # topic_tag = "" ## Optional Client id @@ -85,10 +44,12 @@ var sampleConfig = ` # sasl_username = "kafka" # sasl_password = "secret" - ## the name of the consumer group - consumer_group = "telegraf_metrics_consumers" - ## Offset (must be either "oldest" or "newest") - offset = "oldest" + ## Name of the consumer group. + # consumer_group = "telegraf_metrics_consumers" + + ## Initial offset position; one of "oldest" or "newest". + # offset = "oldest" + ## Maximum length of a message to consume, in bytes (default 0/unlimited); ## larger messages are dropped max_message_len = 1000000 @@ -110,22 +71,77 @@ var sampleConfig = ` data_format = "influx" ` -func (k *Kafka) SampleConfig() string { +const ( + defaultMaxUndeliveredMessages = 1000 + defaultMaxMessageLen = 1000000 + defaultConsumerGroup = "telegraf_metrics_consumers" +) + +type empty struct{} +type semaphore chan empty + +type KafkaConsumer struct { + Brokers []string `toml:"brokers"` + ClientID string `toml:"client_id"` + ConsumerGroup string `toml:"consumer_group"` + MaxMessageLen int `toml:"max_message_len"` + MaxUndeliveredMessages int `toml:"max_undelivered_messages"` + Offset string `toml:"offset"` + Topics []string `toml:"topics"` + TopicTag string `toml:"topic_tag"` + Version string `toml:"version"` + SASLPassword string `toml:"sasl_password"` + SASLUsername string `toml:"sasl_username"` + + tls.ClientConfig + + ConsumerCreator ConsumerGroupCreator `toml:"-"` + consumer ConsumerGroup + config *sarama.Config + + parser parsers.Parser + wg sync.WaitGroup + cancel context.CancelFunc +} + +type ConsumerGroup interface { + Consume(ctx context.Context, topics []string, handler sarama.ConsumerGroupHandler) error + Errors() <-chan error + Close() error +} + +type ConsumerGroupCreator interface { + Create(brokers []string, group string, config *sarama.Config) (ConsumerGroup, error) +} + +type SaramaCreator struct{} + +func (*SaramaCreator) Create(brokers []string, group string, config *sarama.Config) (ConsumerGroup, error) { + return sarama.NewConsumerGroup(brokers, group, config) +} + +func (k *KafkaConsumer) SampleConfig() string { return sampleConfig } -func (k *Kafka) Description() string { - return "Read metrics from Kafka topic(s)" +func (k *KafkaConsumer) Description() string { + return "Read metrics from Kafka topics" } -func (k *Kafka) SetParser(parser parsers.Parser) { +func (k *KafkaConsumer) SetParser(parser parsers.Parser) { k.parser = parser } -func (k *Kafka) Start(acc telegraf.Accumulator) error { - var clusterErr error +func (k *KafkaConsumer) Init() error { + if k.MaxUndeliveredMessages == 0 { + k.MaxUndeliveredMessages = defaultMaxUndeliveredMessages + } + if k.ConsumerGroup == "" { + k.ConsumerGroup = defaultConsumerGroup + } - config := cluster.NewConfig() + config := sarama.NewConfig() + config.Consumer.Return.Errors = true if k.Version != "" { version, err := sarama.ParseKafkaVersion(k.Version) @@ -135,172 +151,255 @@ func (k *Kafka) Start(acc telegraf.Accumulator) error { config.Version = version } - config.Consumer.Return.Errors = true - tlsConfig, err := k.ClientConfig.TLSConfig() if err != nil { return err } + if tlsConfig != nil { + config.Net.TLS.Config = tlsConfig + config.Net.TLS.Enable = true + } + + if k.SASLUsername != "" && k.SASLPassword != "" { + config.Net.SASL.User = k.SASLUsername + config.Net.SASL.Password = k.SASLPassword + config.Net.SASL.Enable = true + } + if k.ClientID != "" { config.ClientID = k.ClientID } else { config.ClientID = "Telegraf" } - if tlsConfig != nil { - log.Printf("D! TLS Enabled") - config.Net.TLS.Config = tlsConfig - config.Net.TLS.Enable = true - } - if k.SASLUsername != "" && k.SASLPassword != "" { - log.Printf("D! Using SASL auth with username '%s',", - k.SASLUsername) - config.Net.SASL.User = k.SASLUsername - config.Net.SASL.Password = k.SASLPassword - config.Net.SASL.Enable = true - } - switch strings.ToLower(k.Offset) { case "oldest", "": config.Consumer.Offsets.Initial = sarama.OffsetOldest case "newest": config.Consumer.Offsets.Initial = sarama.OffsetNewest default: - log.Printf("I! WARNING: Kafka consumer invalid offset '%s', using 'oldest'", - k.Offset) - config.Consumer.Offsets.Initial = sarama.OffsetOldest + return fmt.Errorf("invalid offset %q", k.Offset) } - if k.cluster == nil { - k.cluster, clusterErr = cluster.NewConsumer( - k.Brokers, - k.ConsumerGroup, - k.Topics, - config, - ) + if k.ConsumerCreator == nil { + k.ConsumerCreator = &SaramaCreator{} + } - if clusterErr != nil { - log.Printf("E! Error when creating Kafka Consumer, brokers: %v, topics: %v", - k.Brokers, k.Topics) - return clusterErr - } + k.config = config + return nil +} + +func (k *KafkaConsumer) Start(acc telegraf.Accumulator) error { + var err error + k.consumer, err = k.ConsumerCreator.Create( + k.Brokers, + k.ConsumerGroup, + k.config, + ) + if err != nil { + return err } ctx, cancel := context.WithCancel(context.Background()) k.cancel = cancel // Start consumer goroutine - k.wg = &sync.WaitGroup{} k.wg.Add(1) go func() { defer k.wg.Done() - k.receiver(ctx, acc) + for ctx.Err() == nil { + handler := NewConsumerGroupHandler(acc, k.MaxUndeliveredMessages, k.parser) + handler.MaxMessageLen = k.MaxMessageLen + handler.TopicTag = k.TopicTag + err := k.consumer.Consume(ctx, k.Topics, handler) + if err != nil { + acc.AddError(err) + } + } + err = k.consumer.Close() + if err != nil { + acc.AddError(err) + } + }() + + k.wg.Add(1) + go func() { + defer k.wg.Done() + for err := range k.consumer.Errors() { + acc.AddError(err) + } }() - log.Printf("I! Started the kafka consumer service, brokers: %v, topics: %v", - k.Brokers, k.Topics) return nil } -// receiver() reads all incoming messages from the consumer, and parses them into -// influxdb metric points. -func (k *Kafka) receiver(ctx context.Context, ac telegraf.Accumulator) { - k.messages = make(map[telegraf.TrackingID]*sarama.ConsumerMessage) +func (k *KafkaConsumer) Gather(acc telegraf.Accumulator) error { + return nil +} - acc := ac.WithTracking(k.MaxUndeliveredMessages) - sem := make(semaphore, k.MaxUndeliveredMessages) +func (k *KafkaConsumer) Stop() { + k.cancel() + k.wg.Wait() +} +// Message is an aggregate type binding the Kafka message and the session so +// that offsets can be updated. +type Message struct { + message *sarama.ConsumerMessage + session sarama.ConsumerGroupSession +} + +func NewConsumerGroupHandler(acc telegraf.Accumulator, maxUndelivered int, parser parsers.Parser) *ConsumerGroupHandler { + handler := &ConsumerGroupHandler{ + acc: acc.WithTracking(maxUndelivered), + sem: make(chan empty, maxUndelivered), + undelivered: make(map[telegraf.TrackingID]Message, maxUndelivered), + parser: parser, + } + return handler +} + +// ConsumerGroupHandler is a sarama.ConsumerGroupHandler implementation. +type ConsumerGroupHandler struct { + MaxMessageLen int + TopicTag string + + acc telegraf.TrackingAccumulator + sem semaphore + parser parsers.Parser + wg sync.WaitGroup + cancel context.CancelFunc + + mu sync.Mutex + undelivered map[telegraf.TrackingID]Message +} + +// Setup is called once when a new session is opened. It setups up the handler +// and begins processing delivered messages. +func (h *ConsumerGroupHandler) Setup(sarama.ConsumerGroupSession) error { + h.undelivered = make(map[telegraf.TrackingID]Message) + + ctx, cancel := context.WithCancel(context.Background()) + h.cancel = cancel + + h.wg.Add(1) + go func() { + defer h.wg.Done() + h.run(ctx) + }() + return nil +} + +// Run processes any delivered metrics during the lifetime of the session. +func (h *ConsumerGroupHandler) run(ctx context.Context) error { for { select { case <-ctx.Done(): - return - case track := <-acc.Delivered(): - <-sem - k.onDelivery(track) - case err := <-k.cluster.Errors(): - acc.AddError(err) - case sem <- empty{}: - select { - case <-ctx.Done(): - return - case track := <-acc.Delivered(): - // Once for the delivered message, once to leave the case - <-sem - <-sem - k.onDelivery(track) - case err := <-k.cluster.Errors(): - <-sem - acc.AddError(err) - case msg := <-k.cluster.Messages(): - err := k.onMessage(acc, msg) - if err != nil { - acc.AddError(err) - <-sem - } - } + return nil + case track := <-h.acc.Delivered(): + h.onDelivery(track) } } } -func (k *Kafka) markOffset(msg *sarama.ConsumerMessage) { - if !k.doNotCommitMsgs { - k.cluster.MarkOffset(msg, "") - } -} +func (h *ConsumerGroupHandler) onDelivery(track telegraf.DeliveryInfo) { + h.mu.Lock() + defer h.mu.Unlock() -func (k *Kafka) onMessage(acc telegraf.TrackingAccumulator, msg *sarama.ConsumerMessage) error { - if k.MaxMessageLen != 0 && len(msg.Value) > k.MaxMessageLen { - k.markOffset(msg) - return fmt.Errorf("Message longer than max_message_len (%d > %d)", - len(msg.Value), k.MaxMessageLen) - } - - metrics, err := k.parser.Parse(msg.Value) - if err != nil { - return err - } - if len(k.TopicTag) > 0 { - for _, metric := range metrics { - metric.AddTag(k.TopicTag, msg.Topic) - } - } - id := acc.AddTrackingMetricGroup(metrics) - k.messages[id] = msg - - return nil -} - -func (k *Kafka) onDelivery(track telegraf.DeliveryInfo) { - msg, ok := k.messages[track.ID()] + msg, ok := h.undelivered[track.ID()] if !ok { log.Printf("E! [inputs.kafka_consumer] Could not mark message delivered: %d", track.ID()) return } if track.Delivered() { - k.markOffset(msg) + msg.session.MarkMessage(msg.message, "") } - delete(k.messages, track.ID()) + + delete(h.undelivered, track.ID()) + <-h.sem } -func (k *Kafka) Stop() { - k.cancel() - k.wg.Wait() - - if err := k.cluster.Close(); err != nil { - log.Printf("E! [inputs.kafka_consumer] Error closing consumer: %v", err) +// Reserve blocks until there is an available slot for a new message. +func (h *ConsumerGroupHandler) Reserve(ctx context.Context) error { + select { + case <-ctx.Done(): + return ctx.Err() + case h.sem <- empty{}: + return nil } } -func (k *Kafka) Gather(acc telegraf.Accumulator) error { +func (h *ConsumerGroupHandler) release() { + <-h.sem +} + +// Handle processes a message and if successful saves it to be acknowledged +// after delivery. +func (h *ConsumerGroupHandler) Handle(session sarama.ConsumerGroupSession, msg *sarama.ConsumerMessage) error { + if h.MaxMessageLen != 0 && len(msg.Value) > h.MaxMessageLen { + session.MarkMessage(msg, "") + h.release() + return fmt.Errorf("message exceeds max_message_len (actual %d, max %d)", + len(msg.Value), h.MaxMessageLen) + } + + metrics, err := h.parser.Parse(msg.Value) + if err != nil { + h.release() + return err + } + + if len(h.TopicTag) > 0 { + for _, metric := range metrics { + metric.AddTag(h.TopicTag, msg.Topic) + } + } + + id := h.acc.AddTrackingMetricGroup(metrics) + h.mu.Lock() + h.undelivered[id] = Message{session: session, message: msg} + h.mu.Unlock() + return nil +} + +// ConsumeClaim is called once each claim in a goroutine and must be +// thread-safe. Should run until the claim is closed. +func (h *ConsumerGroupHandler) ConsumeClaim(session sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim) error { + ctx := session.Context() + + for { + err := h.Reserve(ctx) + if err != nil { + return nil + } + + select { + case <-ctx.Done(): + return nil + case msg, ok := <-claim.Messages(): + if !ok { + return nil + } + err := h.Handle(session, msg) + if err != nil { + h.acc.AddError(err) + } + } + } +} + +// Cleanup stops the internal goroutine and is called after all ConsumeClaim +// functions have completed. +func (h *ConsumerGroupHandler) Cleanup(sarama.ConsumerGroupSession) error { + h.cancel() + h.wg.Wait() return nil } func init() { inputs.Add("kafka_consumer", func() telegraf.Input { - return &Kafka{ - MaxUndeliveredMessages: defaultMaxUndeliveredMessages, - } + return &KafkaConsumer{} }) } diff --git a/plugins/inputs/kafka_consumer/kafka_consumer_integration_test.go b/plugins/inputs/kafka_consumer/kafka_consumer_integration_test.go deleted file mode 100644 index 23f9e0f92..000000000 --- a/plugins/inputs/kafka_consumer/kafka_consumer_integration_test.go +++ /dev/null @@ -1,94 +0,0 @@ -package kafka_consumer - -import ( - "fmt" - "testing" - "time" - - "github.com/Shopify/sarama" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/influxdata/telegraf/plugins/parsers" -) - -func TestReadsMetricsFromKafka(t *testing.T) { - if testing.Short() { - t.Skip("Skipping integration test in short mode") - } - - brokerPeers := []string{testutil.GetLocalHost() + ":9092"} - testTopic := fmt.Sprintf("telegraf_test_topic_%d", time.Now().Unix()) - - // Send a Kafka message to the kafka host - msg := "cpu_load_short,direction=in,host=server01,region=us-west value=23422.0 1422568543702900257\n" - producer, err := sarama.NewSyncProducer(brokerPeers, nil) - require.NoError(t, err) - _, _, err = producer.SendMessage( - &sarama.ProducerMessage{ - Topic: testTopic, - Value: sarama.StringEncoder(msg), - }) - require.NoError(t, err) - defer producer.Close() - - // Start the Kafka Consumer - k := &Kafka{ - ConsumerGroup: "telegraf_test_consumers", - Topics: []string{testTopic}, - Brokers: brokerPeers, - Offset: "oldest", - } - p, _ := parsers.NewInfluxParser() - k.SetParser(p) - - // Verify that we can now gather the sent message - var acc testutil.Accumulator - - // Sanity check - assert.Equal(t, 0, len(acc.Metrics), "There should not be any points") - if err := k.Start(&acc); err != nil { - t.Fatal(err.Error()) - } else { - defer k.Stop() - } - - waitForPoint(&acc, t) - - // Gather points - err = acc.GatherError(k.Gather) - require.NoError(t, err) - if len(acc.Metrics) == 1 { - point := acc.Metrics[0] - assert.Equal(t, "cpu_load_short", point.Measurement) - assert.Equal(t, map[string]interface{}{"value": 23422.0}, point.Fields) - assert.Equal(t, map[string]string{ - "host": "server01", - "direction": "in", - "region": "us-west", - }, point.Tags) - assert.Equal(t, time.Unix(0, 1422568543702900257).Unix(), point.Time.Unix()) - } else { - t.Errorf("No points found in accumulator, expected 1") - } -} - -// Waits for the metric that was sent to the kafka broker to arrive at the kafka -// consumer -func waitForPoint(acc *testutil.Accumulator, t *testing.T) { - // Give the kafka container up to 2 seconds to get the point to the consumer - ticker := time.NewTicker(5 * time.Millisecond) - counter := 0 - for { - select { - case <-ticker.C: - counter++ - if counter > 1000 { - t.Fatal("Waited for 5s, point never arrived to consumer") - } else if acc.NFields() == 1 { - return - } - } - } -} diff --git a/plugins/inputs/kafka_consumer/kafka_consumer_test.go b/plugins/inputs/kafka_consumer/kafka_consumer_test.go index a4d06efe6..3aa0efa50 100644 --- a/plugins/inputs/kafka_consumer/kafka_consumer_test.go +++ b/plugins/inputs/kafka_consumer/kafka_consumer_test.go @@ -2,219 +2,344 @@ package kafka_consumer import ( "context" - "strings" "testing" + "time" "github.com/Shopify/sarama" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/plugins/parsers" + "github.com/influxdata/telegraf/plugins/parsers/value" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) -const ( - testMsg = "cpu_load_short,host=server01 value=23422.0 1422568543702900257\n" - testMsgGraphite = "cpu.load.short.graphite 23422 1454780029" - testMsgJSON = "{\"a\": 5, \"b\": {\"c\": 6}}\n" - invalidMsg = "cpu_load_short,host=server01 1422568543702900257\n" -) +type FakeConsumerGroup struct { + brokers []string + group string + config *sarama.Config -type TestConsumer struct { - errors chan error - messages chan *sarama.ConsumerMessage + handler sarama.ConsumerGroupHandler + errors chan error } -func (c *TestConsumer) Errors() <-chan error { - return c.errors -} - -func (c *TestConsumer) Messages() <-chan *sarama.ConsumerMessage { - return c.messages -} - -func (c *TestConsumer) MarkOffset(msg *sarama.ConsumerMessage, metadata string) { -} - -func (c *TestConsumer) Close() error { +func (g *FakeConsumerGroup) Consume(ctx context.Context, topics []string, handler sarama.ConsumerGroupHandler) error { + g.handler = handler + g.handler.Setup(nil) return nil } -func (c *TestConsumer) Inject(msg *sarama.ConsumerMessage) { - c.messages <- msg +func (g *FakeConsumerGroup) Errors() <-chan error { + return g.errors } -func newTestKafka() (*Kafka, *TestConsumer) { - consumer := &TestConsumer{ - errors: make(chan error), - messages: make(chan *sarama.ConsumerMessage, 1000), +func (g *FakeConsumerGroup) Close() error { + close(g.errors) + return nil +} + +type FakeCreator struct { + ConsumerGroup *FakeConsumerGroup +} + +func (c *FakeCreator) Create(brokers []string, group string, config *sarama.Config) (ConsumerGroup, error) { + c.ConsumerGroup.brokers = brokers + c.ConsumerGroup.group = group + c.ConsumerGroup.config = config + return c.ConsumerGroup, nil +} + +func TestInit(t *testing.T) { + tests := []struct { + name string + plugin *KafkaConsumer + initError bool + check func(t *testing.T, plugin *KafkaConsumer) + }{ + { + name: "default config", + plugin: &KafkaConsumer{}, + check: func(t *testing.T, plugin *KafkaConsumer) { + require.Equal(t, plugin.ConsumerGroup, defaultConsumerGroup) + require.Equal(t, plugin.MaxUndeliveredMessages, defaultMaxUndeliveredMessages) + require.Equal(t, plugin.config.ClientID, "Telegraf") + require.Equal(t, plugin.config.Consumer.Offsets.Initial, sarama.OffsetOldest) + }, + }, + { + name: "parses valid version string", + plugin: &KafkaConsumer{ + Version: "1.0.0", + }, + check: func(t *testing.T, plugin *KafkaConsumer) { + require.Equal(t, plugin.config.Version, sarama.V1_0_0_0) + }, + }, + { + name: "invalid version string", + plugin: &KafkaConsumer{ + Version: "100", + }, + initError: true, + }, + { + name: "custom client_id", + plugin: &KafkaConsumer{ + ClientID: "custom", + }, + check: func(t *testing.T, plugin *KafkaConsumer) { + require.Equal(t, plugin.config.ClientID, "custom") + }, + }, + { + name: "custom offset", + plugin: &KafkaConsumer{ + Offset: "newest", + }, + check: func(t *testing.T, plugin *KafkaConsumer) { + require.Equal(t, plugin.config.Consumer.Offsets.Initial, sarama.OffsetNewest) + }, + }, + { + name: "invalid offset", + plugin: &KafkaConsumer{ + Offset: "middle", + }, + initError: true, + }, } - k := Kafka{ - cluster: consumer, - ConsumerGroup: "test", - Topics: []string{"telegraf"}, - Brokers: []string{"localhost:9092"}, - Offset: "oldest", - MaxUndeliveredMessages: defaultMaxUndeliveredMessages, - doNotCommitMsgs: true, - messages: make(map[telegraf.TrackingID]*sarama.ConsumerMessage), - } - return &k, consumer -} + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cg := &FakeConsumerGroup{} + tt.plugin.ConsumerCreator = &FakeCreator{ConsumerGroup: cg} + err := tt.plugin.Init() + if tt.initError { + require.Error(t, err) + return + } -func newTestKafkaWithTopicTag() (*Kafka, *TestConsumer) { - consumer := &TestConsumer{ - errors: make(chan error), - messages: make(chan *sarama.ConsumerMessage, 1000), - } - k := Kafka{ - cluster: consumer, - ConsumerGroup: "test", - Topics: []string{"telegraf"}, - Brokers: []string{"localhost:9092"}, - Offset: "oldest", - MaxUndeliveredMessages: defaultMaxUndeliveredMessages, - doNotCommitMsgs: true, - messages: make(map[telegraf.TrackingID]*sarama.ConsumerMessage), - TopicTag: "topic", - } - return &k, consumer -} - -// Test that the parser parses kafka messages into points -func TestRunParser(t *testing.T) { - k, consumer := newTestKafka() - acc := testutil.Accumulator{} - ctx := context.Background() - - k.parser, _ = parsers.NewInfluxParser() - go k.receiver(ctx, &acc) - consumer.Inject(saramaMsg(testMsg)) - acc.Wait(1) - - assert.Equal(t, acc.NFields(), 1) -} - -// Test that the parser parses kafka messages into points -// and adds the topic tag -func TestRunParserWithTopic(t *testing.T) { - k, consumer := newTestKafkaWithTopicTag() - acc := testutil.Accumulator{} - ctx := context.Background() - - k.parser, _ = parsers.NewInfluxParser() - go k.receiver(ctx, &acc) - consumer.Inject(saramaMsgWithTopic(testMsg, "test_topic")) - acc.Wait(1) - - assert.Equal(t, acc.NFields(), 1) - assert.True(t, acc.HasTag("cpu_load_short", "topic")) -} - -// Test that the parser ignores invalid messages -func TestRunParserInvalidMsg(t *testing.T) { - k, consumer := newTestKafka() - acc := testutil.Accumulator{} - ctx := context.Background() - - k.parser, _ = parsers.NewInfluxParser() - go k.receiver(ctx, &acc) - consumer.Inject(saramaMsg(invalidMsg)) - acc.WaitError(1) - - assert.Equal(t, acc.NFields(), 0) -} - -// Test that overlong messages are dropped -func TestDropOverlongMsg(t *testing.T) { - const maxMessageLen = 64 * 1024 - k, consumer := newTestKafka() - k.MaxMessageLen = maxMessageLen - acc := testutil.Accumulator{} - ctx := context.Background() - overlongMsg := strings.Repeat("v", maxMessageLen+1) - - go k.receiver(ctx, &acc) - consumer.Inject(saramaMsg(overlongMsg)) - acc.WaitError(1) - - assert.Equal(t, acc.NFields(), 0) -} - -// Test that the parser parses kafka messages into points -func TestRunParserAndGather(t *testing.T) { - k, consumer := newTestKafka() - acc := testutil.Accumulator{} - ctx := context.Background() - - k.parser, _ = parsers.NewInfluxParser() - go k.receiver(ctx, &acc) - consumer.Inject(saramaMsg(testMsg)) - acc.Wait(1) - - acc.GatherError(k.Gather) - - assert.Equal(t, acc.NFields(), 1) - acc.AssertContainsFields(t, "cpu_load_short", - map[string]interface{}{"value": float64(23422)}) -} - -// Test that the parser parses kafka messages into points -func TestRunParserAndGatherGraphite(t *testing.T) { - k, consumer := newTestKafka() - acc := testutil.Accumulator{} - ctx := context.Background() - - k.parser, _ = parsers.NewGraphiteParser("_", []string{}, nil) - go k.receiver(ctx, &acc) - consumer.Inject(saramaMsg(testMsgGraphite)) - acc.Wait(1) - - acc.GatherError(k.Gather) - - assert.Equal(t, acc.NFields(), 1) - acc.AssertContainsFields(t, "cpu_load_short_graphite", - map[string]interface{}{"value": float64(23422)}) -} - -// Test that the parser parses kafka messages into points -func TestRunParserAndGatherJSON(t *testing.T) { - k, consumer := newTestKafka() - acc := testutil.Accumulator{} - ctx := context.Background() - - k.parser, _ = parsers.NewParser(&parsers.Config{ - DataFormat: "json", - MetricName: "kafka_json_test", - }) - go k.receiver(ctx, &acc) - consumer.Inject(saramaMsg(testMsgJSON)) - acc.Wait(1) - - acc.GatherError(k.Gather) - - assert.Equal(t, acc.NFields(), 2) - acc.AssertContainsFields(t, "kafka_json_test", - map[string]interface{}{ - "a": float64(5), - "b_c": float64(6), + tt.check(t, tt.plugin) }) -} - -func saramaMsg(val string) *sarama.ConsumerMessage { - return &sarama.ConsumerMessage{ - Key: nil, - Value: []byte(val), - Offset: 0, - Partition: 0, } } -func saramaMsgWithTopic(val string, topic string) *sarama.ConsumerMessage { - return &sarama.ConsumerMessage{ - Key: nil, - Value: []byte(val), - Offset: 0, - Partition: 0, - Topic: topic, +func TestStartStop(t *testing.T) { + cg := &FakeConsumerGroup{errors: make(chan error)} + plugin := &KafkaConsumer{ + ConsumerCreator: &FakeCreator{ConsumerGroup: cg}, + } + err := plugin.Init() + require.NoError(t, err) + + var acc testutil.Accumulator + err = plugin.Start(&acc) + require.NoError(t, err) + + plugin.Stop() +} + +type FakeConsumerGroupSession struct { + ctx context.Context +} + +func (s *FakeConsumerGroupSession) Claims() map[string][]int32 { + panic("not implemented") +} + +func (s *FakeConsumerGroupSession) MemberID() string { + panic("not implemented") +} + +func (s *FakeConsumerGroupSession) GenerationID() int32 { + panic("not implemented") +} + +func (s *FakeConsumerGroupSession) MarkOffset(topic string, partition int32, offset int64, metadata string) { + panic("not implemented") +} + +func (s *FakeConsumerGroupSession) ResetOffset(topic string, partition int32, offset int64, metadata string) { + panic("not implemented") +} + +func (s *FakeConsumerGroupSession) MarkMessage(msg *sarama.ConsumerMessage, metadata string) { +} + +func (s *FakeConsumerGroupSession) Context() context.Context { + return s.ctx +} + +type FakeConsumerGroupClaim struct { + messages chan *sarama.ConsumerMessage +} + +func (c *FakeConsumerGroupClaim) Topic() string { + panic("not implemented") +} + +func (c *FakeConsumerGroupClaim) Partition() int32 { + panic("not implemented") +} + +func (c *FakeConsumerGroupClaim) InitialOffset() int64 { + panic("not implemented") +} + +func (c *FakeConsumerGroupClaim) HighWaterMarkOffset() int64 { + panic("not implemented") +} + +func (c *FakeConsumerGroupClaim) Messages() <-chan *sarama.ConsumerMessage { + return c.messages +} + +func TestConsumerGroupHandler_Lifecycle(t *testing.T) { + acc := &testutil.Accumulator{} + parser := &value.ValueParser{MetricName: "cpu", DataType: "int"} + cg := NewConsumerGroupHandler(acc, 1, parser) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + session := &FakeConsumerGroupSession{ + ctx: ctx, + } + var claim FakeConsumerGroupClaim + var err error + + err = cg.Setup(session) + require.NoError(t, err) + + cancel() + err = cg.ConsumeClaim(session, &claim) + require.NoError(t, err) + + err = cg.Cleanup(session) + require.NoError(t, err) +} + +func TestConsumerGroupHandler_ConsumeClaim(t *testing.T) { + acc := &testutil.Accumulator{} + parser := &value.ValueParser{MetricName: "cpu", DataType: "int"} + cg := NewConsumerGroupHandler(acc, 1, parser) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + session := &FakeConsumerGroupSession{ctx: ctx} + claim := &FakeConsumerGroupClaim{ + messages: make(chan *sarama.ConsumerMessage, 1), + } + + err := cg.Setup(session) + require.NoError(t, err) + + claim.messages <- &sarama.ConsumerMessage{ + Topic: "telegraf", + Value: []byte("42"), + } + + go func() { + err = cg.ConsumeClaim(session, claim) + require.NoError(t, err) + }() + + acc.Wait(1) + cancel() + + err = cg.Cleanup(session) + require.NoError(t, err) + + expected := []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42, + }, + time.Now(), + ), + } + + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime()) +} + +func TestConsumerGroupHandler_Handle(t *testing.T) { + tests := []struct { + name string + maxMessageLen int + topicTag string + msg *sarama.ConsumerMessage + expected []telegraf.Metric + }{ + { + name: "happy path", + msg: &sarama.ConsumerMessage{ + Topic: "telegraf", + Value: []byte("42"), + }, + expected: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42, + }, + time.Now(), + ), + }, + }, + { + name: "message to long", + maxMessageLen: 4, + msg: &sarama.ConsumerMessage{ + Topic: "telegraf", + Value: []byte("12345"), + }, + expected: []telegraf.Metric{}, + }, + { + name: "parse error", + msg: &sarama.ConsumerMessage{ + Topic: "telegraf", + Value: []byte("not an integer"), + }, + expected: []telegraf.Metric{}, + }, + { + name: "add topic tag", + topicTag: "topic", + msg: &sarama.ConsumerMessage{ + Topic: "telegraf", + Value: []byte("42"), + }, + expected: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{ + "topic": "telegraf", + }, + map[string]interface{}{ + "value": 42, + }, + time.Now(), + ), + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + acc := &testutil.Accumulator{} + parser := &value.ValueParser{MetricName: "cpu", DataType: "int"} + cg := NewConsumerGroupHandler(acc, 1, parser) + cg.MaxMessageLen = tt.maxMessageLen + cg.TopicTag = tt.topicTag + + ctx := context.Background() + session := &FakeConsumerGroupSession{ctx: ctx} + + cg.Reserve(ctx) + cg.Handle(session, tt.msg) + + testutil.RequireMetricsEqual(t, tt.expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime()) + }) } } From 5c7c9e40ee8f2c1296bbdaf72909ed1b458ff15f Mon Sep 17 00:00:00 2001 From: Preston Carpenter Date: Tue, 30 Jul 2019 00:34:03 -0400 Subject: [PATCH 1040/1815] Load external go plugins (#6024) --- cmd/telegraf/telegraf.go | 54 +++++++++++++++++++++++++++++++++++++++- internal/usage.go | 3 +++ 2 files changed, 56 insertions(+), 1 deletion(-) diff --git a/cmd/telegraf/telegraf.go b/cmd/telegraf/telegraf.go index 4545833a7..2da192da0 100644 --- a/cmd/telegraf/telegraf.go +++ b/cmd/telegraf/telegraf.go @@ -10,6 +10,9 @@ import ( _ "net/http/pprof" // Comment this line to disable pprof endpoint. "os" "os/signal" + "path" + "path/filepath" + "plugin" "runtime" "strings" "syscall" @@ -64,6 +67,8 @@ var fService = flag.String("service", "", var fServiceName = flag.String("service-name", "telegraf", "service name (windows only)") var fServiceDisplayName = flag.String("service-display-name", "Telegraf Data Collector Service", "service display name (windows only)") var fRunAsConsole = flag.Bool("console", false, "run as console application (windows only)") +var fPlugins = flag.String("plugin-directory", "", + "path to directory containing external plugins") var ( version string @@ -111,6 +116,45 @@ func reloadLoop( } } +// loadExternalPlugins loads external plugins from shared libraries (.so, .dll, etc.) +// in the specified directory. +func loadExternalPlugins(rootDir string) error { + return filepath.Walk(rootDir, func(pth string, info os.FileInfo, err error) error { + // Stop if there was an error. + if err != nil { + return err + } + + // Ignore directories. + if info.IsDir() { + return nil + } + + // Ignore files that aren't shared libraries. + ext := strings.ToLower(path.Ext(pth)) + if ext != ".so" && ext != ".dll" { + return nil + } + + // name will be the path to the plugin file beginning at the root + // directory, minus the extension. + // ie, if the plugin file is ./telegraf-plugins/foo.so, name + // will be "telegraf-plugins/foo" + name := strings.TrimPrefix(strings.TrimPrefix(pth, rootDir), string(os.PathSeparator)) + name = strings.TrimSuffix(name, filepath.Ext(pth)) + + // Load plugin. + _, err = plugin.Open(pth) + if err != nil { + errorMsg := fmt.Sprintf("error loading [%s]: %s", pth, err) + log.Printf(errorMsg) + return errors.New(errorMsg) + } + + return nil + }) +} + func runAgent(ctx context.Context, inputFilters []string, outputFilters []string, @@ -138,7 +182,7 @@ func runAgent(ctx context.Context, if !*fTest && len(c.Outputs) == 0 { return errors.New("Error: no outputs found, did you provide a valid config file?") } - if len(c.Inputs) == 0 { + if *fPlugins == "" && len(c.Inputs) == 0 { return errors.New("Error: no inputs found, did you provide a valid config file?") } @@ -279,6 +323,14 @@ func main() { processorFilters = strings.Split(":"+strings.TrimSpace(*fProcessorFilters)+":", ":") } + // Load external plugins, if requested. + if *fPlugins != "" { + log.Printf("Loading external plugins from: %s\n", *fPlugins) + if err := loadExternalPlugins(*fPlugins); err != nil { + log.Fatal(err.Error()) + } + } + if *pprofAddr != "" { go func() { pprofHostPort := *pprofAddr diff --git a/internal/usage.go b/internal/usage.go index 7909d3558..90ea92986 100644 --- a/internal/usage.go +++ b/internal/usage.go @@ -16,6 +16,9 @@ The commands & flags are: --aggregator-filter filter the aggregators to enable, separator is : --config configuration file to load --config-directory directory containing additional *.conf files + --plugin-directory directory containing *.so files, this directory will be + searched recursively. Any Plugin found will be loaded + and namespaced. --debug turn on debug logging --input-filter filter the inputs to enable, separator is : --input-list print available input plugins. From 3f63c14179e57961b24890e4f5f59d8f0b87470a Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 29 Jul 2019 21:47:49 -0700 Subject: [PATCH 1041/1815] Setup default logging before logging. --- cmd/telegraf/telegraf.go | 18 +++++------------- 1 file changed, 5 insertions(+), 13 deletions(-) diff --git a/cmd/telegraf/telegraf.go b/cmd/telegraf/telegraf.go index 2da192da0..6eceb5cdc 100644 --- a/cmd/telegraf/telegraf.go +++ b/cmd/telegraf/telegraf.go @@ -136,19 +136,10 @@ func loadExternalPlugins(rootDir string) error { return nil } - // name will be the path to the plugin file beginning at the root - // directory, minus the extension. - // ie, if the plugin file is ./telegraf-plugins/foo.so, name - // will be "telegraf-plugins/foo" - name := strings.TrimPrefix(strings.TrimPrefix(pth, rootDir), string(os.PathSeparator)) - name = strings.TrimSuffix(name, filepath.Ext(pth)) - // Load plugin. _, err = plugin.Open(pth) if err != nil { - errorMsg := fmt.Sprintf("error loading [%s]: %s", pth, err) - log.Printf(errorMsg) - return errors.New(errorMsg) + return fmt.Errorf("error loading %s: %s", pth, err) } return nil @@ -161,7 +152,6 @@ func runAgent(ctx context.Context, ) error { // Setup default logging. This may need to change after reading the config // file, but we can configure it to use our logger implementation now. - logger.SetupLogging(logger.LogConfig{}) log.Printf("I! Starting Telegraf %s", version) // If no other options are specified, load the config file and run. @@ -323,11 +313,13 @@ func main() { processorFilters = strings.Split(":"+strings.TrimSpace(*fProcessorFilters)+":", ":") } + logger.SetupLogging(logger.LogConfig{}) + // Load external plugins, if requested. if *fPlugins != "" { - log.Printf("Loading external plugins from: %s\n", *fPlugins) + log.Printf("I! Loading external plugins from: %s", *fPlugins) if err := loadExternalPlugins(*fPlugins); err != nil { - log.Fatal(err.Error()) + log.Fatal("E! " + err.Error()) } } From a1bff8f550667be66592ee93ab85bb8cdb483ae4 Mon Sep 17 00:00:00 2001 From: Greg <2653109+glinton@users.noreply.github.com> Date: Tue, 30 Jul 2019 15:16:51 -0600 Subject: [PATCH 1042/1815] Add ability to exclude db/bucket tag from influxdb outputs (#6184) --- plugins/outputs/influxdb/README.md | 3 ++ plugins/outputs/influxdb/http.go | 5 +++ plugins/outputs/influxdb/influxdb.go | 5 +++ plugins/outputs/influxdb_v2/README.md | 3 ++ plugins/outputs/influxdb_v2/http.go | 55 ++++++++++++++----------- plugins/outputs/influxdb_v2/influxdb.go | 51 ++++++++++++----------- 6 files changed, 75 insertions(+), 47 deletions(-) diff --git a/plugins/outputs/influxdb/README.md b/plugins/outputs/influxdb/README.md index 48ab3d51b..1d11443ac 100644 --- a/plugins/outputs/influxdb/README.md +++ b/plugins/outputs/influxdb/README.md @@ -23,6 +23,9 @@ The InfluxDB output plugin writes metrics to the [InfluxDB v1.x] HTTP or UDP ser ## tag is not set the 'database' option is used as the default. # database_tag = "" + ## If true, the database tag will not be added to the metric. + # exclude_database_tag = false + ## If true, no CREATE DATABASE queries will be sent. Set to true when using ## Telegraf with a user without permissions to create databases or when the ## database already exists. diff --git a/plugins/outputs/influxdb/http.go b/plugins/outputs/influxdb/http.go index 794eee8b8..56576082f 100644 --- a/plugins/outputs/influxdb/http.go +++ b/plugins/outputs/influxdb/http.go @@ -94,6 +94,7 @@ type HTTPConfig struct { ContentEncoding string Database string DatabaseTag string + ExcludeDatabaseTag bool RetentionPolicy string Consistency string SkipDatabaseCreation bool @@ -250,6 +251,10 @@ func (c *httpClient) Write(ctx context.Context, metrics []telegraf.Metric) error batches[db] = make([]telegraf.Metric, 0) } + if c.config.ExcludeDatabaseTag { + metric.RemoveTag(c.config.DatabaseTag) + } + batches[db] = append(batches[db], metric) } diff --git a/plugins/outputs/influxdb/influxdb.go b/plugins/outputs/influxdb/influxdb.go index b2d1a9026..b07d58fc2 100644 --- a/plugins/outputs/influxdb/influxdb.go +++ b/plugins/outputs/influxdb/influxdb.go @@ -38,6 +38,7 @@ type InfluxDB struct { Password string Database string DatabaseTag string `toml:"database_tag"` + ExcludeDatabaseTag bool `toml:"exclude_database_tag"` UserAgent string RetentionPolicy string WriteConsistency string @@ -77,6 +78,9 @@ var sampleConfig = ` ## tag is not set the 'database' option is used as the default. # database_tag = "" + ## If true, the database tag will not be added to the metric. + # exclude_database_tag = false + ## If true, no CREATE DATABASE queries will be sent. Set to true when using ## Telegraf with a user without permissions to create databases or when the ## database already exists. @@ -262,6 +266,7 @@ func (i *InfluxDB) httpClient(ctx context.Context, url *url.URL, proxy *url.URL) Headers: i.HTTPHeaders, Database: i.Database, DatabaseTag: i.DatabaseTag, + ExcludeDatabaseTag: i.ExcludeDatabaseTag, SkipDatabaseCreation: i.SkipDatabaseCreation, RetentionPolicy: i.RetentionPolicy, Consistency: i.WriteConsistency, diff --git a/plugins/outputs/influxdb_v2/README.md b/plugins/outputs/influxdb_v2/README.md index 830e70b41..226c3ab62 100644 --- a/plugins/outputs/influxdb_v2/README.md +++ b/plugins/outputs/influxdb_v2/README.md @@ -26,6 +26,9 @@ The InfluxDB output plugin writes metrics to the [InfluxDB v2.x] HTTP service. ## tag is not set the 'bucket' option is used as the default. # bucket_tag = "" + ## If true, the bucket tag will not be added to the metric. + # exclude_bucket_tag = false + ## Timeout for HTTP messages. # timeout = "5s" diff --git a/plugins/outputs/influxdb_v2/http.go b/plugins/outputs/influxdb_v2/http.go index a57a1bc67..47b736844 100644 --- a/plugins/outputs/influxdb_v2/http.go +++ b/plugins/outputs/influxdb_v2/http.go @@ -40,28 +40,30 @@ const ( ) type HTTPConfig struct { - URL *url.URL - Token string - Organization string - Bucket string - BucketTag string - Timeout time.Duration - Headers map[string]string - Proxy *url.URL - UserAgent string - ContentEncoding string - TLSConfig *tls.Config + URL *url.URL + Token string + Organization string + Bucket string + BucketTag string + ExcludeBucketTag bool + Timeout time.Duration + Headers map[string]string + Proxy *url.URL + UserAgent string + ContentEncoding string + TLSConfig *tls.Config Serializer *influx.Serializer } type httpClient struct { - ContentEncoding string - Timeout time.Duration - Headers map[string]string - Organization string - Bucket string - BucketTag string + ContentEncoding string + Timeout time.Duration + Headers map[string]string + Organization string + Bucket string + BucketTag string + ExcludeBucketTag bool client *http.Client serializer *influx.Serializer @@ -130,13 +132,14 @@ func NewHTTPClient(config *HTTPConfig) (*httpClient, error) { Timeout: timeout, Transport: transport, }, - url: config.URL, - ContentEncoding: config.ContentEncoding, - Timeout: timeout, - Headers: headers, - Organization: config.Organization, - Bucket: config.Bucket, - BucketTag: config.BucketTag, + url: config.URL, + ContentEncoding: config.ContentEncoding, + Timeout: timeout, + Headers: headers, + Organization: config.Organization, + Bucket: config.Bucket, + BucketTag: config.BucketTag, + ExcludeBucketTag: config.ExcludeBucketTag, } return client, nil } @@ -185,6 +188,10 @@ func (c *httpClient) Write(ctx context.Context, metrics []telegraf.Metric) error batches[bucket] = make([]telegraf.Metric, 0) } + if c.ExcludeBucketTag { + metric.RemoveTag(c.BucketTag) + } + batches[bucket] = append(batches[bucket], metric) } diff --git a/plugins/outputs/influxdb_v2/influxdb.go b/plugins/outputs/influxdb_v2/influxdb.go index 8998ba3c7..0f40a96e3 100644 --- a/plugins/outputs/influxdb_v2/influxdb.go +++ b/plugins/outputs/influxdb_v2/influxdb.go @@ -42,6 +42,9 @@ var sampleConfig = ` ## tag is not set the 'bucket' option is used as the default. # bucket_tag = "" + ## If true, the bucket tag will not be added to the metric. + # exclude_bucket_tag = false + ## Timeout for HTTP messages. # timeout = "5s" @@ -78,17 +81,18 @@ type Client interface { } type InfluxDB struct { - URLs []string `toml:"urls"` - Token string `toml:"token"` - Organization string `toml:"organization"` - Bucket string `toml:"bucket"` - BucketTag string `toml:"bucket_tag"` - Timeout internal.Duration `toml:"timeout"` - HTTPHeaders map[string]string `toml:"http_headers"` - HTTPProxy string `toml:"http_proxy"` - UserAgent string `toml:"user_agent"` - ContentEncoding string `toml:"content_encoding"` - UintSupport bool `toml:"influx_uint_support"` + URLs []string `toml:"urls"` + Token string `toml:"token"` + Organization string `toml:"organization"` + Bucket string `toml:"bucket"` + BucketTag string `toml:"bucket_tag"` + ExcludeBucketTag bool `toml:"exclude_bucket_tag"` + Timeout internal.Duration `toml:"timeout"` + HTTPHeaders map[string]string `toml:"http_headers"` + HTTPProxy string `toml:"http_proxy"` + UserAgent string `toml:"user_agent"` + ContentEncoding string `toml:"content_encoding"` + UintSupport bool `toml:"influx_uint_support"` tls.ClientConfig clients []Client @@ -179,18 +183,19 @@ func (i *InfluxDB) getHTTPClient(ctx context.Context, url *url.URL, proxy *url.U } config := &HTTPConfig{ - URL: url, - Token: i.Token, - Organization: i.Organization, - Bucket: i.Bucket, - BucketTag: i.BucketTag, - Timeout: i.Timeout.Duration, - Headers: i.HTTPHeaders, - Proxy: proxy, - UserAgent: i.UserAgent, - ContentEncoding: i.ContentEncoding, - TLSConfig: tlsConfig, - Serializer: i.serializer, + URL: url, + Token: i.Token, + Organization: i.Organization, + Bucket: i.Bucket, + BucketTag: i.BucketTag, + ExcludeBucketTag: i.ExcludeBucketTag, + Timeout: i.Timeout.Duration, + Headers: i.HTTPHeaders, + Proxy: proxy, + UserAgent: i.UserAgent, + ContentEncoding: i.ContentEncoding, + TLSConfig: tlsConfig, + Serializer: i.serializer, } c, err := NewHTTPClient(config) From 51c1659de8b3b6317d3e4134c633df405113aff5 Mon Sep 17 00:00:00 2001 From: Greg <2653109+glinton@users.noreply.github.com> Date: Tue, 30 Jul 2019 17:31:03 -0600 Subject: [PATCH 1043/1815] Add uWSGI input plugin (#6179) --- plugins/inputs/all/all.go | 1 + plugins/inputs/uwsgi/README.md | 92 +++++++++ plugins/inputs/uwsgi/uwsgi.go | 295 +++++++++++++++++++++++++++++ plugins/inputs/uwsgi/uwsgi_test.go | 185 ++++++++++++++++++ 4 files changed, 573 insertions(+) create mode 100644 plugins/inputs/uwsgi/README.md create mode 100644 plugins/inputs/uwsgi/uwsgi.go create mode 100644 plugins/inputs/uwsgi/uwsgi_test.go diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index 0352e552a..8d2144df3 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -149,6 +149,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/twemproxy" _ "github.com/influxdata/telegraf/plugins/inputs/udp_listener" _ "github.com/influxdata/telegraf/plugins/inputs/unbound" + _ "github.com/influxdata/telegraf/plugins/inputs/uwsgi" _ "github.com/influxdata/telegraf/plugins/inputs/varnish" _ "github.com/influxdata/telegraf/plugins/inputs/vsphere" _ "github.com/influxdata/telegraf/plugins/inputs/webhooks" diff --git a/plugins/inputs/uwsgi/README.md b/plugins/inputs/uwsgi/README.md new file mode 100644 index 000000000..8053676c0 --- /dev/null +++ b/plugins/inputs/uwsgi/README.md @@ -0,0 +1,92 @@ +# uWSGI + +The uWSGI input plugin gathers metrics about uWSGI using its [Stats Server](https://uwsgi-docs.readthedocs.io/en/latest/StatsServer.html). + +### Configuration + +```toml +[[inputs.uwsgi]] + ## List with urls of uWSGI Stats servers. Url must match pattern: + ## scheme://address[:port] + ## + ## For example: + ## servers = ["tcp://localhost:5050", "http://localhost:1717", "unix:///tmp/statsock"] + servers = ["tcp://127.0.0.1:1717"] + + ## General connection timout + # timeout = "5s" +``` + + +### Metrics: + + - uwsgi_overview + - tags: + - source + - uid + - gid + - version + - fields: + - listen_queue + - listen_queue_errors + - signal_queue + - load + - pid + ++ uwsgi_workers + - tags: + - worker_id + - source + - fields: + - requests + - accepting + - delta_request + - exceptions + - harakiri_count + - pid + - signals + - signal_queue + - status + - rss + - vsz + - running_time + - last_spawn + - respawn_count + - tx + - avg_rt + +- uwsgi_apps + - tags: + - app_id + - worker_id + - source + - fields: + - modifier1 + - requests + - startup_time + - exceptions + ++ uwsgi_cores + - tags: + - core_id + - worker_id + - source + - fields: + - requests + - static_requests + - routed_requests + - offloaded_requests + - write_errors + - read_errors + - in_request + + +### Example Output: + +``` +uwsgi_overview,gid=0,uid=0,source=172.17.0.2,version=2.0.18 listen_queue=0i,listen_queue_errors=0i,load=0i,pid=1i,signal_queue=0i 1564441407000000000 +uwsgi_workers,source=172.17.0.2,worker_id=1 accepting=1i,avg_rt=0i,delta_request=0i,exceptions=0i,harakiri_count=0i,last_spawn=1564441202i,pid=6i,requests=0i,respawn_count=1i,rss=0i,running_time=0i,signal_queue=0i,signals=0i,status="idle",tx=0i,vsz=0i 1564441407000000000 +uwsgi_apps,app_id=0,worker_id=1,source=172.17.0.2 exceptions=0i,modifier1=0i,requests=0i,startup_time=0i 1564441407000000000 +uwsgi_cores,core_id=0,worker_id=1,source=172.17.0.2 in_request=0i,offloaded_requests=0i,read_errors=0i,requests=0i,routed_requests=0i,static_requests=0i,write_errors=0i 1564441407000000000 +``` + diff --git a/plugins/inputs/uwsgi/uwsgi.go b/plugins/inputs/uwsgi/uwsgi.go new file mode 100644 index 000000000..15a9bbe22 --- /dev/null +++ b/plugins/inputs/uwsgi/uwsgi.go @@ -0,0 +1,295 @@ +// Package uwsgi implements a telegraf plugin for collecting uwsgi stats from +// the uwsgi stats server. +package uwsgi + +import ( + "encoding/json" + "fmt" + "io" + "net" + "net/http" + "net/url" + "os" + "strconv" + "sync" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/inputs" +) + +// Uwsgi server struct +type Uwsgi struct { + Servers []string `toml:"servers"` + Timeout internal.Duration `toml:"timeout"` + + client *http.Client +} + +// Description returns the plugin description +func (u *Uwsgi) Description() string { + return "Read uWSGI metrics." +} + +// SampleConfig returns the sample configuration +func (u *Uwsgi) SampleConfig() string { + return ` + ## List with urls of uWSGI Stats servers. URL must match pattern: + ## scheme://address[:port] + ## + ## For example: + ## servers = ["tcp://localhost:5050", "http://localhost:1717", "unix:///tmp/statsock"] + servers = ["tcp://127.0.0.1:1717"] + + ## General connection timout + # timeout = "5s" +` +} + +// Gather collect data from uWSGI Server +func (u *Uwsgi) Gather(acc telegraf.Accumulator) error { + if u.client == nil { + u.client = &http.Client{ + Timeout: u.Timeout.Duration, + } + } + wg := &sync.WaitGroup{} + + for _, s := range u.Servers { + wg.Add(1) + go func(s string) { + defer wg.Done() + n, err := url.Parse(s) + if err != nil { + acc.AddError(fmt.Errorf("could not parse uWSGI Stats Server url '%s': %s", s, err.Error())) + return + } + + if err := u.gatherServer(acc, n); err != nil { + acc.AddError(err) + return + } + }(s) + } + + wg.Wait() + + return nil +} + +func (u *Uwsgi) gatherServer(acc telegraf.Accumulator, url *url.URL) error { + var err error + var r io.ReadCloser + var s StatsServer + + switch url.Scheme { + case "tcp": + r, err = net.DialTimeout(url.Scheme, url.Host, u.Timeout.Duration) + if err != nil { + return err + } + s.source = url.Host + case "unix": + r, err = net.DialTimeout(url.Scheme, url.Host, u.Timeout.Duration) + if err != nil { + return err + } + s.source, err = os.Hostname() + if err != nil { + s.source = url.Host + } + case "http": + resp, err := u.client.Get(url.String()) + if err != nil { + return err + } + r = resp.Body + s.source = url.Host + default: + return fmt.Errorf("'%s' is not a supported scheme", url.Scheme) + } + + defer r.Close() + + if err := json.NewDecoder(r).Decode(&s); err != nil { + return fmt.Errorf("failed to decode json payload from '%s': %s", url.String(), err.Error()) + } + + u.gatherStatServer(acc, &s) + + return err +} + +func (u *Uwsgi) gatherStatServer(acc telegraf.Accumulator, s *StatsServer) { + fields := map[string]interface{}{ + "listen_queue": s.ListenQueue, + "listen_queue_errors": s.ListenQueueErrors, + "signal_queue": s.SignalQueue, + "load": s.Load, + "pid": s.PID, + } + + tags := map[string]string{ + "source": s.source, + "uid": strconv.Itoa(s.UID), + "gid": strconv.Itoa(s.GID), + "version": s.Version, + } + acc.AddFields("uwsgi_overview", fields, tags) + + u.gatherWorkers(acc, s) + u.gatherApps(acc, s) + u.gatherCores(acc, s) +} + +func (u *Uwsgi) gatherWorkers(acc telegraf.Accumulator, s *StatsServer) { + for _, w := range s.Workers { + fields := map[string]interface{}{ + "requests": w.Requests, + "accepting": w.Accepting, + "delta_request": w.DeltaRequests, + "exceptions": w.Exceptions, + "harakiri_count": w.HarakiriCount, + "pid": w.PID, + "signals": w.Signals, + "signal_queue": w.SignalQueue, + "status": w.Status, + "rss": w.Rss, + "vsz": w.Vsz, + "running_time": w.RunningTime, + "last_spawn": w.LastSpawn, + "respawn_count": w.RespawnCount, + "tx": w.Tx, + "avg_rt": w.AvgRt, + } + tags := map[string]string{ + "worker_id": strconv.Itoa(w.WorkerID), + "source": s.source, + } + + acc.AddFields("uwsgi_workers", fields, tags) + } +} + +func (u *Uwsgi) gatherApps(acc telegraf.Accumulator, s *StatsServer) { + for _, w := range s.Workers { + for _, a := range w.Apps { + fields := map[string]interface{}{ + "modifier1": a.Modifier1, + "requests": a.Requests, + "startup_time": a.StartupTime, + "exceptions": a.Exceptions, + } + tags := map[string]string{ + "app_id": strconv.Itoa(a.AppID), + "worker_id": strconv.Itoa(w.WorkerID), + "source": s.source, + } + acc.AddFields("uwsgi_apps", fields, tags) + } + } +} + +func (u *Uwsgi) gatherCores(acc telegraf.Accumulator, s *StatsServer) { + for _, w := range s.Workers { + for _, c := range w.Cores { + fields := map[string]interface{}{ + "requests": c.Requests, + "static_requests": c.StaticRequests, + "routed_requests": c.RoutedRequests, + "offloaded_requests": c.OffloadedRequests, + "write_errors": c.WriteErrors, + "read_errors": c.ReadErrors, + "in_request": c.InRequest, + } + tags := map[string]string{ + "core_id": strconv.Itoa(c.CoreID), + "worker_id": strconv.Itoa(w.WorkerID), + "source": s.source, + } + acc.AddFields("uwsgi_cores", fields, tags) + } + + } +} + +func init() { + inputs.Add("uwsgi", func() telegraf.Input { + return &Uwsgi{ + Timeout: internal.Duration{Duration: 5 * time.Second}, + } + }) +} + +// StatsServer defines the stats server structure. +type StatsServer struct { + // Tags + source string + PID int `json:"pid"` + UID int `json:"uid"` + GID int `json:"gid"` + Version string `json:"version"` + + // Fields + ListenQueue int `json:"listen_queue"` + ListenQueueErrors int `json:"listen_queue_errors"` + SignalQueue int `json:"signal_queue"` + Load int `json:"load"` + + Workers []*Worker `json:"workers"` +} + +// Worker defines the worker metric structure. +type Worker struct { + // Tags + WorkerID int `json:"id"` + PID int `json:"pid"` + + // Fields + Accepting int `json:"accepting"` + Requests int `json:"requests"` + DeltaRequests int `json:"delta_requests"` + Exceptions int `json:"exceptions"` + HarakiriCount int `json:"harakiri_count"` + Signals int `json:"signals"` + SignalQueue int `json:"signal_queue"` + Status string `json:"status"` + Rss int `json:"rss"` + Vsz int `json:"vsz"` + RunningTime int `json:"running_time"` + LastSpawn int `json:"last_spawn"` + RespawnCount int `json:"respawn_count"` + Tx int `json:"tx"` + AvgRt int `json:"avg_rt"` + + Apps []*App `json:"apps"` + Cores []*Core `json:"cores"` +} + +// App defines the app metric structure. +type App struct { + // Tags + AppID int `json:"id"` + + // Fields + Modifier1 int `json:"modifier1"` + Requests int `json:"requests"` + StartupTime int `json:"startup_time"` + Exceptions int `json:"exceptions"` +} + +// Core defines the core metric structure. +type Core struct { + // Tags + CoreID int `json:"id"` + + // Fields + Requests int `json:"requests"` + StaticRequests int `json:"static_requests"` + RoutedRequests int `json:"routed_requests"` + OffloadedRequests int `json:"offloaded_requests"` + WriteErrors int `json:"write_errors"` + ReadErrors int `json:"read_errors"` + InRequest int `json:"in_request"` +} diff --git a/plugins/inputs/uwsgi/uwsgi_test.go b/plugins/inputs/uwsgi/uwsgi_test.go new file mode 100644 index 000000000..34581791e --- /dev/null +++ b/plugins/inputs/uwsgi/uwsgi_test.go @@ -0,0 +1,185 @@ +package uwsgi_test + +import ( + "net/http" + "net/http/httptest" + "testing" + + "github.com/influxdata/telegraf/plugins/inputs/uwsgi" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +func TestBasic(t *testing.T) { + js := ` +{ + "version":"2.0.12", + "listen_queue":0, + "listen_queue_errors":0, + "signal_queue":0, + "load":0, + "pid":28372, + "uid":1000, + "gid":1000, + "cwd":"/opt/uwsgi", + "locks":[ + { + "user 0":0 + }, + { + "signal":0 + }, + { + "filemon":0 + }, + { + "timer":0 + }, + { + "rbtimer":0 + }, + { + "cron":0 + }, + { + "rpc":0 + }, + { + "snmp":0 + } + ], + "sockets":[ + { + "name":"127.0.0.1:47430", + "proto":"uwsgi", + "queue":0, + "max_queue":100, + "shared":0, + "can_offload":0 + } + ], + "workers":[ + { + "id":1, + "pid":28375, + "accepting":1, + "requests":0, + "delta_requests":0, + "exceptions":0, + "harakiri_count":0, + "signals":0, + "signal_queue":0, + "status":"idle", + "rss":0, + "vsz":0, + "running_time":0, + "last_spawn":1459942782, + "respawn_count":1, + "tx":0, + "avg_rt":0, + "apps":[ + { + "id":0, + "modifier1":0, + "mountpoint":"", + "startup_time":0, + "requests":0, + "exceptions":0, + "chdir":"" + } + ], + "cores":[ + { + "id":0, + "requests":0, + "static_requests":0, + "routed_requests":0, + "offloaded_requests":0, + "write_errors":0, + "read_errors":0, + "in_request":0, + "vars":[ + + ] + } + ] + } + ] +} +` + + fakeServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path == "/" { + _, _ = w.Write([]byte(js)) + } else { + w.WriteHeader(http.StatusNotFound) + } + })) + + defer fakeServer.Close() + + plugin := &uwsgi.Uwsgi{ + Servers: []string{fakeServer.URL + "/"}, + } + var acc testutil.Accumulator + plugin.Gather(&acc) + require.Equal(t, 0, len(acc.Errors)) +} + +func TestInvalidJSON(t *testing.T) { + js := ` +{ + "version":"2.0.12", + "listen_queue":0, + "listen_queue_errors":0, + "signal_queue":0, + "load":0, + "pid:28372 + "uid":10 +} +` + + fakeServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path == "/" { + _, _ = w.Write([]byte(js)) + } else { + w.WriteHeader(http.StatusNotFound) + } + })) + + defer fakeServer.Close() + + plugin := &uwsgi.Uwsgi{ + Servers: []string{fakeServer.URL + "/"}, + } + var acc testutil.Accumulator + plugin.Gather(&acc) + require.Equal(t, 1, len(acc.Errors)) +} + +func TestHttpError(t *testing.T) { + plugin := &uwsgi.Uwsgi{ + Servers: []string{"http://novalidurladress/"}, + } + var acc testutil.Accumulator + plugin.Gather(&acc) + require.Equal(t, 1, len(acc.Errors)) +} + +func TestTcpError(t *testing.T) { + plugin := &uwsgi.Uwsgi{ + Servers: []string{"tcp://novalidtcpadress/"}, + } + var acc testutil.Accumulator + plugin.Gather(&acc) + require.Equal(t, 1, len(acc.Errors)) +} + +func TestUnixSocketError(t *testing.T) { + plugin := &uwsgi.Uwsgi{ + Servers: []string{"unix:///novalidunixsocket"}, + } + var acc testutil.Accumulator + plugin.Gather(&acc) + require.Equal(t, 1, len(acc.Errors)) +} From 9bdb3992d58fcb81e5d7b324a4dade6d404ee144 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 30 Jul 2019 21:33:29 -0700 Subject: [PATCH 1044/1815] Require Kafka 0.10.2.0 or later in kafka_consumer (#6181) --- plugins/inputs/kafka_consumer/README.md | 3 +-- plugins/inputs/kafka_consumer/kafka_consumer.go | 7 +++++-- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/plugins/inputs/kafka_consumer/README.md b/plugins/inputs/kafka_consumer/README.md index 26ebca39d..efd3ffad6 100644 --- a/plugins/inputs/kafka_consumer/README.md +++ b/plugins/inputs/kafka_consumer/README.md @@ -23,8 +23,7 @@ and use the old zookeeper connection method. # client_id = "Telegraf" ## Set the minimal supported Kafka version. Setting this enables the use of new - ## Kafka features and APIs. Of particular interest, lz4 compression - ## requires at least version 0.10.0.0. + ## Kafka features and APIs. Must be 0.10.2.0 or greater. ## ex: version = "1.1.0" # version = "" diff --git a/plugins/inputs/kafka_consumer/kafka_consumer.go b/plugins/inputs/kafka_consumer/kafka_consumer.go index 10a6251be..2703bb52d 100644 --- a/plugins/inputs/kafka_consumer/kafka_consumer.go +++ b/plugins/inputs/kafka_consumer/kafka_consumer.go @@ -28,8 +28,7 @@ const sampleConfig = ` # client_id = "Telegraf" ## Set the minimal supported Kafka version. Setting this enables the use of new - ## Kafka features and APIs. Of particular interest, lz4 compression - ## requires at least version 0.10.0.0. + ## Kafka features and APIs. Must be 0.10.2.0 or greater. ## ex: version = "1.1.0" # version = "" @@ -143,11 +142,15 @@ func (k *KafkaConsumer) Init() error { config := sarama.NewConfig() config.Consumer.Return.Errors = true + // Kafka version 0.10.2.0 is required for consumer groups. + config.Version = sarama.V0_10_2_0 + if k.Version != "" { version, err := sarama.ParseKafkaVersion(k.Version) if err != nil { return err } + config.Version = version } From ef9a1c0d5b7afb9f5d725df7f7fdbd86bdede601 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Thierry=20Sall=C3=A9?= Date: Wed, 31 Jul 2019 06:36:19 +0200 Subject: [PATCH 1045/1815] Gather per collections stats in mongodb input plugin (#6137) --- etc/telegraf.conf | 5 +++ plugins/inputs/mongodb/README.md | 20 +++++++++ plugins/inputs/mongodb/mongodb.go | 9 +++- plugins/inputs/mongodb/mongodb_data.go | 44 +++++++++++++++++++ plugins/inputs/mongodb/mongodb_server.go | 49 ++++++++++++++++++++- plugins/inputs/mongodb/mongostat.go | 54 ++++++++++++++++++++++++ 6 files changed, 179 insertions(+), 2 deletions(-) diff --git a/etc/telegraf.conf b/etc/telegraf.conf index febfe6454..d74c6a0a4 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -3166,6 +3166,11 @@ # # ## When true, collect per database stats # # gather_perdb_stats = false +# ## When true, collect per collection stats +# # gather_col_stats = false +# ## List of db where collections stats are collected +# ## If empty, all db are concerned +# # col_stats_dbs = ["local"] # # ## Optional TLS Config # # tls_ca = "/etc/telegraf/ca.pem" diff --git a/plugins/inputs/mongodb/README.md b/plugins/inputs/mongodb/README.md index 07ab133d4..c6c4abc15 100644 --- a/plugins/inputs/mongodb/README.md +++ b/plugins/inputs/mongodb/README.md @@ -13,6 +13,11 @@ ## When true, collect per database stats # gather_perdb_stats = false + ## When true, collect per collection stats + # gather_col_stats = false + ## List of db where collections stats are collected + ## If empty, all db are concerned + # col_stats_dbs = ["local"] ## Optional TLS Config # tls_ca = "/etc/telegraf/ca.pem" @@ -147,6 +152,20 @@ Error in input [mongodb]: not authorized on admin to execute command { serverSta - storage_size (integer) - type (string) +- mongodb_col_stats + - tags: + - hostname + - collection + - db_name + - fields: + - size (integer) + - avg_obj_size (integer) + - storage_size (integer) + - total_index_size (integer) + - ok (integer) + - count (integer) + - type (tring) + - mongodb_shard_stats - tags: - hostname @@ -161,5 +180,6 @@ Error in input [mongodb]: not authorized on admin to execute command { serverSta mongodb,hostname=127.0.0.1:27017 active_reads=0i,active_writes=0i,commands=1335i,commands_per_sec=7i,connections_available=814i,connections_current=5i,connections_total_created=0i,cursor_no_timeout=0i,cursor_no_timeout_count=0i,cursor_pinned=0i,cursor_pinned_count=1i,cursor_timed_out=0i,cursor_timed_out_count=0i,cursor_total=0i,cursor_total_count=1i,deletes=0i,deletes_per_sec=0i,document_deleted=0i,document_inserted=0i,document_returned=13i,document_updated=0i,flushes=5i,flushes_per_sec=0i,getmores=269i,getmores_per_sec=0i,inserts=0i,inserts_per_sec=0i,jumbo_chunks=0i,member_status="PRI",net_in_bytes=986i,net_in_bytes_count=358006i,net_out_bytes=23906i,net_out_bytes_count=661507i,open_connections=5i,percent_cache_dirty=0,percent_cache_used=0,queries=18i,queries_per_sec=3i,queued_reads=0i,queued_writes=0i,repl_commands=0i,repl_commands_per_sec=0i,repl_deletes=0i,repl_deletes_per_sec=0i,repl_getmores=0i,repl_getmores_per_sec=0i,repl_inserts=0i,repl_inserts_per_sec=0i,repl_lag=0i,repl_oplog_window_sec=24355215i,repl_queries=0i,repl_queries_per_sec=0i,repl_updates=0i,repl_updates_per_sec=0i,resident_megabytes=62i,state="PRIMARY",total_available=0i,total_created=0i,total_in_use=0i,total_refreshing=0i,ttl_deletes=0i,ttl_deletes_per_sec=0i,ttl_passes=23i,ttl_passes_per_sec=0i,updates=0i,updates_per_sec=0i,vsize_megabytes=713i,wtcache_app_threads_page_read_count=13i,wtcache_app_threads_page_read_time=74i,wtcache_app_threads_page_write_count=0i,wtcache_bytes_read_into=55271i,wtcache_bytes_written_from=125402i,wtcache_current_bytes=117050i,wtcache_max_bytes_configured=1073741824i,wtcache_pages_evicted_by_app_thread=0i,wtcache_pages_queued_for_eviction=0i,wtcache_server_evicting_pages=0i,wtcache_tracked_dirty_bytes=0i,wtcache_worker_thread_evictingpages=0i 1547159491000000000 mongodb_db_stats,db_name=admin,hostname=127.0.0.1:27017 avg_obj_size=241,collections=2i,data_size=723i,index_size=49152i,indexes=3i,num_extents=0i,objects=3i,ok=1i,storage_size=53248i,type="db_stat" 1547159491000000000 mongodb_db_stats,db_name=local,hostname=127.0.0.1:27017 avg_obj_size=813.9705882352941,collections=6i,data_size=55350i,index_size=102400i,indexes=5i,num_extents=0i,objects=68i,ok=1i,storage_size=204800i,type="db_stat" 1547159491000000000 +mongodb_col_stats,collection=foo,db_name=local,hostname=127.0.0.1:27017 size=375005928i,avg_obj_size=5494,type="col_stat",storage_size=249307136i,total_index_size=2138112i,ok=1i,count=68251i 1547159491000000000 mongodb_shard_stats,hostname=127.0.0.1:27017,in_use=3i,available=3i,created=4i,refreshing=0i 1522799074000000000 ``` diff --git a/plugins/inputs/mongodb/mongodb.go b/plugins/inputs/mongodb/mongodb.go index 895667dee..696d595e6 100644 --- a/plugins/inputs/mongodb/mongodb.go +++ b/plugins/inputs/mongodb/mongodb.go @@ -22,6 +22,8 @@ type MongoDB struct { Ssl Ssl mongos map[string]*Server GatherPerdbStats bool + GatherColStats bool + ColStatsDbs []string tlsint.ClientConfig } @@ -40,6 +42,11 @@ var sampleConfig = ` ## When true, collect per database stats # gather_perdb_stats = false + ## When true, collect per collection stats + # gather_col_stats = false + ## List of db where collections stats are collected + ## If empty, all db are concerned + # col_stats_dbs = ["local"] ## Optional TLS Config # tls_ca = "/etc/telegraf/ca.pem" @@ -164,7 +171,7 @@ func (m *MongoDB) gatherServer(server *Server, acc telegraf.Accumulator) error { } server.Session = sess } - return server.gatherData(acc, m.GatherPerdbStats) + return server.gatherData(acc, m.GatherPerdbStats, m.GatherColStats, m.ColStatsDbs) } func init() { diff --git a/plugins/inputs/mongodb/mongodb_data.go b/plugins/inputs/mongodb/mongodb_data.go index 5fa0c237d..c218fd3ad 100644 --- a/plugins/inputs/mongodb/mongodb_data.go +++ b/plugins/inputs/mongodb/mongodb_data.go @@ -13,6 +13,7 @@ type MongodbData struct { Fields map[string]interface{} Tags map[string]string DbData []DbData + ColData []ColData ShardHostData []DbData } @@ -21,6 +22,12 @@ type DbData struct { Fields map[string]interface{} } +type ColData struct { + Name string + DbName string + Fields map[string]interface{} +} + func NewMongodbData(statLine *StatLine, tags map[string]string) *MongodbData { return &MongodbData{ StatLine: statLine, @@ -159,6 +166,15 @@ var DbDataStats = map[string]string{ "ok": "Ok", } +var ColDataStats = map[string]string{ + "count": "Count", + "size": "Size", + "avg_obj_size": "AvgObjSize", + "storage_size": "StorageSize", + "total_index_size": "TotalIndexSize", + "ok": "Ok", +} + func (d *MongodbData) AddDbStats() { for _, dbstat := range d.StatLine.DbStatsLines { dbStatLine := reflect.ValueOf(&dbstat).Elem() @@ -175,6 +191,23 @@ func (d *MongodbData) AddDbStats() { } } +func (d *MongodbData) AddColStats() { + for _, colstat := range d.StatLine.ColStatsLines { + colStatLine := reflect.ValueOf(&colstat).Elem() + newColData := &ColData{ + Name: colstat.Name, + DbName: colstat.DbName, + Fields: make(map[string]interface{}), + } + newColData.Fields["type"] = "col_stat" + for key, value := range ColDataStats { + val := colStatLine.FieldByName(value).Interface() + newColData.Fields[key] = val + } + d.ColData = append(d.ColData, *newColData) + } +} + func (d *MongodbData) AddShardHostStats() { for host, hostStat := range d.StatLine.ShardHostStatsLines { hostStatLine := reflect.ValueOf(&hostStat).Elem() @@ -242,6 +275,17 @@ func (d *MongodbData) flush(acc telegraf.Accumulator) { ) db.Fields = make(map[string]interface{}) } + for _, col := range d.ColData { + d.Tags["collection"] = col.Name + d.Tags["db_name"] = col.DbName + acc.AddFields( + "mongodb_col_stats", + col.Fields, + d.Tags, + d.StatLine.Time, + ) + col.Fields = make(map[string]interface{}) + } for _, host := range d.ShardHostData { d.Tags["hostname"] = host.Name acc.AddFields( diff --git a/plugins/inputs/mongodb/mongodb_server.go b/plugins/inputs/mongodb/mongodb_server.go index 6ab236b58..5adc58d04 100644 --- a/plugins/inputs/mongodb/mongodb_server.go +++ b/plugins/inputs/mongodb/mongodb_server.go @@ -70,7 +70,7 @@ func (s *Server) gatherOplogStats() *OplogStats { return stats } -func (s *Server) gatherData(acc telegraf.Accumulator, gatherDbStats bool) error { +func (s *Server) gatherData(acc telegraf.Accumulator, gatherDbStats bool, gatherColStats bool, colStatsDbs []string) error { s.Session.SetMode(mgo.Eventual, true) s.Session.SetSocketTimeout(0) result_server := &ServerStatus{} @@ -147,11 +147,48 @@ func (s *Server) gatherData(acc telegraf.Accumulator, gatherDbStats bool) error } } + result_col_stats := &ColStats{} + if gatherColStats == true { + names := []string{} + names, err = s.Session.DatabaseNames() + if err != nil { + log.Println("E! Error getting database names (" + err.Error() + ")") + } + for _, db_name := range names { + if stringInSlice(db_name, colStatsDbs) || len(colStatsDbs) == 0 { + var colls []string + colls, err = s.Session.DB(db_name).CollectionNames() + if err != nil { + log.Println("E! Error getting collection names (" + err.Error() + ")") + } + for _, col_name := range colls { + col_stat_line := &ColStatsData{} + err = s.Session.DB(db_name).Run(bson.D{ + { + Name: "collStats", + Value: col_name, + }, + }, col_stat_line) + if err != nil { + log.Println("E! Error getting col stats from " + col_name + "(" + err.Error() + ")") + } + collection := &Collection{ + Name: col_name, + DbName: db_name, + ColStatsData: col_stat_line, + } + result_col_stats.Collections = append(result_col_stats.Collections, *collection) + } + } + } + } + result := &MongoStatus{ ServerStatus: result_server, ReplSetStatus: result_repl, ClusterStatus: result_cluster, DbStats: result_db_stats, + ColStats: result_col_stats, ShardStats: resultShards, OplogStats: oplogStats, } @@ -173,8 +210,18 @@ func (s *Server) gatherData(acc telegraf.Accumulator, gatherDbStats bool) error ) data.AddDefaultStats() data.AddDbStats() + data.AddColStats() data.AddShardHostStats() data.flush(acc) } return nil } + +func stringInSlice(a string, list []string) bool { + for _, b := range list { + if b == a { + return true + } + } + return false +} diff --git a/plugins/inputs/mongodb/mongostat.go b/plugins/inputs/mongodb/mongostat.go index b763631ca..709c074d7 100644 --- a/plugins/inputs/mongodb/mongostat.go +++ b/plugins/inputs/mongodb/mongostat.go @@ -34,6 +34,7 @@ type MongoStatus struct { ReplSetStatus *ReplSetStatus ClusterStatus *ClusterStatus DbStats *DbStats + ColStats *ColStats ShardStats *ShardStats OplogStats *OplogStats } @@ -92,6 +93,26 @@ type DbStatsData struct { GleStats interface{} `bson:"gleStats"` } +type ColStats struct { + Collections []Collection +} + +type Collection struct { + Name string + DbName string + ColStatsData *ColStatsData +} + +type ColStatsData struct { + Collection string `bson:"ns"` + Count int64 `bson:"count"` + Size int64 `bson:"size"` + AvgObjSize float64 `bson:"avgObjSize"` + StorageSize int64 `bson:"storageSize"` + TotalIndexSize int64 `bson:"totalIndexSize"` + Ok int64 `bson:"ok"` +} + // ClusterStatus stores information related to the whole cluster type ClusterStatus struct { JumboChunksCount int64 @@ -541,6 +562,9 @@ type StatLine struct { // DB stats field DbStatsLines []DbStatLine + // Col Stats field + ColStatsLines []ColStatLine + // Shard stats TotalInUse, TotalAvailable, TotalCreated, TotalRefreshing int64 @@ -560,6 +584,16 @@ type DbStatLine struct { IndexSize int64 Ok int64 } +type ColStatLine struct { + Name string + DbName string + Count int64 + Size int64 + AvgObjSize float64 + StorageSize int64 + TotalIndexSize int64 + Ok int64 +} type ShardHostStatLine struct { InUse int64 @@ -918,6 +952,26 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec returnVal.DbStatsLines = append(returnVal.DbStatsLines, *dbStatLine) } + newColStats := *newMongo.ColStats + for _, col := range newColStats.Collections { + colStatsData := col.ColStatsData + // mongos doesn't have the db key, so setting the db name + if colStatsData.Collection == "" { + colStatsData.Collection = col.Name + } + colStatLine := &ColStatLine{ + Name: colStatsData.Collection, + DbName: col.DbName, + Count: colStatsData.Count, + Size: colStatsData.Size, + AvgObjSize: colStatsData.AvgObjSize, + StorageSize: colStatsData.StorageSize, + TotalIndexSize: colStatsData.TotalIndexSize, + Ok: colStatsData.Ok, + } + returnVal.ColStatsLines = append(returnVal.ColStatsLines, *colStatLine) + } + // Set shard stats newShardStats := *newMongo.ShardStats returnVal.TotalInUse = newShardStats.TotalInUse From df9023034115659a88a7f2eaba9f88be3d8831d1 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 30 Jul 2019 22:39:07 -0700 Subject: [PATCH 1046/1815] Update changelog --- CHANGELOG.md | 5 +++++ README.md | 1 + 2 files changed, 6 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8f91570b8..be22fd47f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,7 @@ - [docker_log](/plugins/inputs/docker_log) - Contributed by @prashanthjbabu - [fireboard](/plugins/inputs/fireboard) - Contributed by @ronnocol +- [uwsgi](/plugins/inputs/uswgi) - Contributed by @blaggacao #### New Parsers @@ -49,6 +50,9 @@ - [#6129](https://github.com/influxdata/telegraf/pull/6129): Add support for field/tag keys to strings processor. - [#6143](https://github.com/influxdata/telegraf/pull/6143): Add certificate verification status to x509_cert input. - [#6163](https://github.com/influxdata/telegraf/pull/6163): Support percentage value parsing in redis input. +- [#6024](https://github.com/influxdata/telegraf/pull/6024): Load external Go plugins from --plugin-directory. +- [#6184](https://github.com/influxdata/telegraf/pull/6184): Add ability to exclude db/bucket tag from influxdb outputs. +- [#6137](https://github.com/influxdata/telegraf/pull/6137): Gather per collections stats in mongodb input plugin. #### Bugfixes @@ -57,6 +61,7 @@ - [#6004](https://github.com/influxdata/telegraf/issues/6004): Fix race in master node detection in elasticsearch input. - [#6100](https://github.com/influxdata/telegraf/issues/6100): Fix SSPI authentication not working in sqlserver input. - [#6142](https://github.com/influxdata/telegraf/issues/6142): Fix memory error panic in mqtt input. +- [#6136](https://github.com/influxdata/telegraf/issues/6136): Support Kafka 2.3.0 consumer groups. ## v1.11.3 [2019-07-23] diff --git a/README.md b/README.md index ad3c3bc4e..fd4c4fc40 100644 --- a/README.md +++ b/README.md @@ -283,6 +283,7 @@ For documentation on the latest development code see the [documentation index][d * [twemproxy](./plugins/inputs/twemproxy) * [udp_listener](./plugins/inputs/socket_listener) * [unbound](./plugins/inputs/unbound) +* [uswgi](./plugins/inputs/uswgi) * [varnish](./plugins/inputs/varnish) * [vsphere](./plugins/inputs/vsphere) VMware vSphere * [webhooks](./plugins/inputs/webhooks) From 282c8ce0965e644d767cb7ca16aaddfa15e2049e Mon Sep 17 00:00:00 2001 From: Moritz Maisel Date: Wed, 31 Jul 2019 08:50:49 +0200 Subject: [PATCH 1047/1815] Add diff and non_negative_diff to basicstats aggregator (#4435) --- plugins/aggregators/basicstats/README.md | 12 +-- plugins/aggregators/basicstats/basicstats.go | 38 +++++++-- .../aggregators/basicstats/basicstats_test.go | 81 +++++++++++++++++++ 3 files changed, 118 insertions(+), 13 deletions(-) diff --git a/plugins/aggregators/basicstats/README.md b/plugins/aggregators/basicstats/README.md index e9318036b..e6ae3b31a 100644 --- a/plugins/aggregators/basicstats/README.md +++ b/plugins/aggregators/basicstats/README.md @@ -1,6 +1,6 @@ # BasicStats Aggregator Plugin -The BasicStats aggregator plugin give us count,max,min,mean,sum,s2(variance), stdev for a set of values, +The BasicStats aggregator plugin give us count,diff,max,min,mean,non_negative_diff,sum,s2(variance), stdev for a set of values, emitting the aggregate every `period` seconds. ### Configuration: @@ -15,20 +15,22 @@ emitting the aggregate every `period` seconds. drop_original = false ## Configures which basic stats to push as fields - # stats = ["count", "min", "max", "mean", "stdev", "s2", "sum"] + # stats = ["count","diff","min","max","mean","non_negative_diff","stdev","s2","sum"] ``` - stats - - If not specified, then `count`, `min`, `max`, `mean`, `stdev`, and `s2` are aggregated and pushed as fields. `sum` is not aggregated by default to maintain backwards compatibility. + - If not specified, then `count`, `min`, `max`, `mean`, `stdev`, and `s2` are aggregated and pushed as fields. `sum`, `diff` and `non_negative_diff` are not aggregated by default to maintain backwards compatibility. - If empty array, no stats are aggregated ### Measurements & Fields: - measurement1 - field1_count + - field1_diff (difference) - field1_max - field1_min - field1_mean + - field1_non_negative_diff (non-negative difference) - field1_sum - field1_s2 (variance) - field1_stdev (standard deviation) @@ -43,8 +45,8 @@ No tags are applied by this aggregator. $ telegraf --config telegraf.conf --quiet system,host=tars load1=1 1475583980000000000 system,host=tars load1=1 1475583990000000000 -system,host=tars load1_count=2,load1_max=1,load1_min=1,load1_mean=1,load1_sum=2,load1_s2=0,load1_stdev=0 1475584010000000000 +system,host=tars load1_count=2,load1_diff=0,load1_max=1,load1_min=1,load1_mean=1,load1_sum=2,load1_s2=0,load1_stdev=0 1475584010000000000 system,host=tars load1=1 1475584020000000000 system,host=tars load1=3 1475584030000000000 -system,host=tars load1_count=2,load1_max=3,load1_min=1,load1_mean=2,load1_sum=4,load1_s2=2,load1_stdev=1.414162 1475584010000000000 +system,host=tars load1_count=2,load1_diff=2,load1_max=3,load1_min=1,load1_mean=2,load1_sum=4,load1_s2=2,load1_stdev=1.414162 1475584010000000000 ``` diff --git a/plugins/aggregators/basicstats/basicstats.go b/plugins/aggregators/basicstats/basicstats.go index d054f39f0..28d1f2741 100644 --- a/plugins/aggregators/basicstats/basicstats.go +++ b/plugins/aggregators/basicstats/basicstats.go @@ -16,13 +16,15 @@ type BasicStats struct { } type configuredStats struct { - count bool - min bool - max bool - mean bool - variance bool - stdev bool - sum bool + count bool + min bool + max bool + mean bool + variance bool + stdev bool + sum bool + diff bool + non_negative_diff bool } func NewBasicStats() *BasicStats { @@ -43,7 +45,9 @@ type basicstats struct { max float64 sum float64 mean float64 - M2 float64 //intermedia value for variance/stdev + diff float64 + M2 float64 //intermediate value for variance/stdev + LAST float64 //intermediate value for diff } var sampleConfig = ` @@ -82,7 +86,9 @@ func (m *BasicStats) Add(in telegraf.Metric) { max: fv, mean: fv, sum: fv, + diff: 0.0, M2: 0.0, + LAST: fv, } } } @@ -98,7 +104,9 @@ func (m *BasicStats) Add(in telegraf.Metric) { max: fv, mean: fv, sum: fv, + diff: 0.0, M2: 0.0, + LAST: fv, } continue } @@ -127,6 +135,8 @@ func (m *BasicStats) Add(in telegraf.Metric) { } //sum compute tmp.sum += fv + //diff compute + tmp.diff = fv - tmp.LAST //store final data m.cache[id].fields[field.Key] = tmp } @@ -167,6 +177,13 @@ func (m *BasicStats) Push(acc telegraf.Accumulator) { if config.stdev { fields[k+"_stdev"] = math.Sqrt(variance) } + if config.diff { + fields[k+"_diff"] = v.diff + } + if config.non_negative_diff && v.diff >= 0 { + fields[k+"_non_negative_diff"] = v.diff + } + } //if count == 1 StdDev = infinite => so I won't send data } @@ -199,6 +216,10 @@ func parseStats(names []string) *configuredStats { parsed.stdev = true case "sum": parsed.sum = true + case "diff": + parsed.diff = true + case "non_negative_diff": + parsed.non_negative_diff = true default: log.Printf("W! Unrecognized basic stat '%s', ignoring", name) @@ -219,6 +240,7 @@ func defaultStats() *configuredStats { defaults.variance = true defaults.stdev = true defaults.sum = false + defaults.non_negative_diff = false return defaults } diff --git a/plugins/aggregators/basicstats/basicstats_test.go b/plugins/aggregators/basicstats/basicstats_test.go index 040cb0b82..9965c5caa 100644 --- a/plugins/aggregators/basicstats/basicstats_test.go +++ b/plugins/aggregators/basicstats/basicstats_test.go @@ -17,6 +17,7 @@ var m1, _ = metric.New("m1", "b": int64(1), "c": float64(2), "d": float64(2), + "g": int64(3), }, time.Now(), ) @@ -31,6 +32,7 @@ var m2, _ = metric.New("m1", "f": uint64(200), "ignoreme": "string", "andme": true, + "g": int64(1), }, time.Now(), ) @@ -86,6 +88,12 @@ func TestBasicStatsWithPeriod(t *testing.T) { "f_max": float64(200), "f_min": float64(200), "f_mean": float64(200), + "g_count": float64(2), //g + "g_max": float64(3), + "g_min": float64(1), + "g_mean": float64(2), + "g_s2": float64(2), + "g_stdev": math.Sqrt(2), } expectedTags := map[string]string{ "foo": "bar", @@ -118,6 +126,10 @@ func TestBasicStatsDifferentPeriods(t *testing.T) { "d_max": float64(2), "d_min": float64(2), "d_mean": float64(2), + "g_count": float64(1), //g + "g_max": float64(3), + "g_min": float64(3), + "g_mean": float64(3), } expectedTags := map[string]string{ "foo": "bar", @@ -153,6 +165,10 @@ func TestBasicStatsDifferentPeriods(t *testing.T) { "f_max": float64(200), "f_min": float64(200), "f_mean": float64(200), + "g_count": float64(1), //g + "g_max": float64(1), + "g_min": float64(1), + "g_mean": float64(1), } expectedTags = map[string]string{ "foo": "bar", @@ -179,6 +195,7 @@ func TestBasicStatsWithOnlyCount(t *testing.T) { "d_count": float64(2), "e_count": float64(1), "f_count": float64(1), + "g_count": float64(2), } expectedTags := map[string]string{ "foo": "bar", @@ -205,6 +222,7 @@ func TestBasicStatsWithOnlyMin(t *testing.T) { "d_min": float64(2), "e_min": float64(200), "f_min": float64(200), + "g_min": float64(1), } expectedTags := map[string]string{ "foo": "bar", @@ -231,6 +249,7 @@ func TestBasicStatsWithOnlyMax(t *testing.T) { "d_max": float64(6), "e_max": float64(200), "f_max": float64(200), + "g_max": float64(3), } expectedTags := map[string]string{ "foo": "bar", @@ -257,6 +276,7 @@ func TestBasicStatsWithOnlyMean(t *testing.T) { "d_mean": float64(4), "e_mean": float64(200), "f_mean": float64(200), + "g_mean": float64(2), } expectedTags := map[string]string{ "foo": "bar", @@ -283,6 +303,7 @@ func TestBasicStatsWithOnlySum(t *testing.T) { "d_sum": float64(8), "e_sum": float64(200), "f_sum": float64(200), + "g_sum": float64(4), } expectedTags := map[string]string{ "foo": "bar", @@ -359,6 +380,7 @@ func TestBasicStatsWithOnlyVariance(t *testing.T) { "b_s2": float64(2), "c_s2": float64(2), "d_s2": float64(8), + "g_s2": float64(2), } expectedTags := map[string]string{ "foo": "bar", @@ -383,6 +405,7 @@ func TestBasicStatsWithOnlyStandardDeviation(t *testing.T) { "b_stdev": math.Sqrt(2), "c_stdev": math.Sqrt(2), "d_stdev": math.Sqrt(8), + "g_stdev": math.Sqrt(2), } expectedTags := map[string]string{ "foo": "bar", @@ -415,6 +438,57 @@ func TestBasicStatsWithMinAndMax(t *testing.T) { "e_min": float64(200), "f_max": float64(200), //f "f_min": float64(200), + "g_max": float64(3), //g + "g_min": float64(1), + } + expectedTags := map[string]string{ + "foo": "bar", + } + acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags) +} + +// Test only aggregating diff +func TestBasicStatsWithDiff(t *testing.T) { + + aggregator := NewBasicStats() + aggregator.Stats = []string{"diff"} + + aggregator.Add(m1) + aggregator.Add(m2) + + acc := testutil.Accumulator{} + aggregator.Push(&acc) + + expectedFields := map[string]interface{}{ + "a_diff": float64(0), + "b_diff": float64(2), + "c_diff": float64(2), + "d_diff": float64(4), + "g_diff": float64(-2), + } + expectedTags := map[string]string{ + "foo": "bar", + } + acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags) +} + +// Test only aggregating non_negative_diff +func TestBasicStatsWithNonNegativeDiff(t *testing.T) { + + aggregator := NewBasicStats() + aggregator.Stats = []string{"non_negative_diff"} + + aggregator.Add(m1) + aggregator.Add(m2) + + acc := testutil.Accumulator{} + aggregator.Push(&acc) + + expectedFields := map[string]interface{}{ + "a_non_negative_diff": float64(0), + "b_non_negative_diff": float64(2), + "c_non_negative_diff": float64(2), + "d_non_negative_diff": float64(4), } expectedTags := map[string]string{ "foo": "bar", @@ -471,6 +545,13 @@ func TestBasicStatsWithAllStats(t *testing.T) { "f_min": float64(200), "f_mean": float64(200), "f_sum": float64(200), + "g_count": float64(2), //g + "g_max": float64(3), + "g_min": float64(1), + "g_mean": float64(2), + "g_s2": float64(2), + "g_stdev": math.Sqrt(2), + "g_sum": float64(4), } expectedTags := map[string]string{ "foo": "bar", From dc292b73a99ac345c5fbef11328ccbe9edf382d7 Mon Sep 17 00:00:00 2001 From: pberlowski Date: Wed, 31 Jul 2019 20:52:12 +0100 Subject: [PATCH 1048/1815] Add grace period for metrics late for aggregation (#6049) --- docs/CONFIGURATION.md | 4 ++ internal/config/config.go | 14 +++++ internal/models/running_aggregator.go | 7 +-- internal/models/running_aggregator_test.go | 62 ++++++++++++++++++++++ 4 files changed, 84 insertions(+), 3 deletions(-) diff --git a/docs/CONFIGURATION.md b/docs/CONFIGURATION.md index dd2512ef3..1b101b02d 100644 --- a/docs/CONFIGURATION.md +++ b/docs/CONFIGURATION.md @@ -325,6 +325,10 @@ Parameters that can be used with any aggregator plugin: how long for aggregators to wait before receiving metrics from input plugins, in the case that aggregators are flushing and inputs are gathering on the same interval. +- **grace**: The duration when the metrics will still be aggregated + by the plugin, even though they're outside of the aggregation period. This + is needed in a situation when the agent is expected to receive late metrics + and it's acceptable to roll them up into next aggregation period. - **drop_original**: If true, the original metric will be dropped by the aggregator and will not get sent to the output plugins. - **name_override**: Override the base name of the measurement. (Default is diff --git a/internal/config/config.go b/internal/config/config.go index a5315b9b6..d8d545734 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -1025,6 +1025,7 @@ func buildAggregator(name string, tbl *ast.Table) (*models.AggregatorConfig, err Name: name, Delay: time.Millisecond * 100, Period: time.Second * 30, + Grace: time.Second * 0, } if node, ok := tbl.Fields["period"]; ok { @@ -1053,6 +1054,18 @@ func buildAggregator(name string, tbl *ast.Table) (*models.AggregatorConfig, err } } + if node, ok := tbl.Fields["grace"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if str, ok := kv.Value.(*ast.String); ok { + dur, err := time.ParseDuration(str.Value) + if err != nil { + return nil, err + } + + conf.Grace = dur + } + } + } if node, ok := tbl.Fields["drop_original"]; ok { if kv, ok := node.(*ast.KeyValue); ok { if b, ok := kv.Value.(*ast.Boolean); ok { @@ -1100,6 +1113,7 @@ func buildAggregator(name string, tbl *ast.Table) (*models.AggregatorConfig, err delete(tbl.Fields, "period") delete(tbl.Fields, "delay") + delete(tbl.Fields, "grace") delete(tbl.Fields, "drop_original") delete(tbl.Fields, "name_prefix") delete(tbl.Fields, "name_suffix") diff --git a/internal/models/running_aggregator.go b/internal/models/running_aggregator.go index 8bd983eef..e029aad56 100644 --- a/internal/models/running_aggregator.go +++ b/internal/models/running_aggregator.go @@ -59,6 +59,7 @@ type AggregatorConfig struct { DropOriginal bool Period time.Duration Delay time.Duration + Grace time.Duration NameOverride string MeasurementPrefix string @@ -135,9 +136,9 @@ func (r *RunningAggregator) Add(m telegraf.Metric) bool { r.Lock() defer r.Unlock() - if m.Time().Before(r.periodStart) || m.Time().After(r.periodEnd.Add(r.Config.Delay)) { - log.Printf("D! [%s] metric is outside aggregation window; discarding. %s: m: %s e: %s", - r.Name(), m.Time(), r.periodStart, r.periodEnd) + if m.Time().Before(r.periodStart.Add(-r.Config.Grace)) || m.Time().After(r.periodEnd.Add(r.Config.Delay)) { + log.Printf("D! [%s] metric is outside aggregation window; discarding. %s: m: %s e: %s g: %s", + r.Name(), m.Time(), r.periodStart, r.periodEnd, r.Config.Grace) r.MetricsDropped.Incr(1) return r.Config.DropOriginal } diff --git a/internal/models/running_aggregator_test.go b/internal/models/running_aggregator_test.go index 19476eecf..83b9dea0a 100644 --- a/internal/models/running_aggregator_test.go +++ b/internal/models/running_aggregator_test.go @@ -89,6 +89,68 @@ func TestAddMetricsOutsideCurrentPeriod(t *testing.T) { require.Equal(t, int64(101), acc.Metrics[0].Fields["sum"]) } +func TestAddMetricsOutsideCurrentPeriodWithGrace(t *testing.T) { + a := &TestAggregator{} + ra := NewRunningAggregator(a, &AggregatorConfig{ + Name: "TestRunningAggregator", + Filter: Filter{ + NamePass: []string{"*"}, + }, + Period: time.Millisecond * 1500, + Grace: time.Millisecond * 500, + }) + require.NoError(t, ra.Config.Filter.Compile()) + acc := testutil.Accumulator{} + now := time.Now() + ra.UpdateWindow(now, now.Add(ra.Config.Period)) + + m := testutil.MustMetric("RITest", + map[string]string{}, + map[string]interface{}{ + "value": int64(101), + }, + now.Add(-time.Hour), + telegraf.Untyped, + ) + require.False(t, ra.Add(m)) + + // metric before current period (late) + m = testutil.MustMetric("RITest", + map[string]string{}, + map[string]interface{}{ + "value": int64(100), + }, + now.Add(-time.Millisecond*1000), + telegraf.Untyped, + ) + require.False(t, ra.Add(m)) + + // metric before current period, but within grace period (late) + m = testutil.MustMetric("RITest", + map[string]string{}, + map[string]interface{}{ + "value": int64(102), + }, + now.Add(-time.Millisecond*200), + telegraf.Untyped, + ) + require.False(t, ra.Add(m)) + + // "now" metric + m = testutil.MustMetric("RITest", + map[string]string{}, + map[string]interface{}{ + "value": int64(101), + }, + time.Now().Add(time.Millisecond*50), + telegraf.Untyped) + require.False(t, ra.Add(m)) + + ra.Push(&acc) + require.Equal(t, 1, len(acc.Metrics)) + require.Equal(t, int64(203), acc.Metrics[0].Fields["sum"]) +} + func TestAddAndPushOnePeriod(t *testing.T) { a := &TestAggregator{} ra := NewRunningAggregator(a, &AggregatorConfig{ From 28f1bdb696e9826f33adec09acec23cee93a3ccf Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 31 Jul 2019 13:30:30 -0700 Subject: [PATCH 1049/1815] Add note to configuration about when log rotation occurs --- etc/telegraf.conf | 50 ++++++++++++++++++++++++++++++--------- etc/telegraf_windows.conf | 17 ++++++------- internal/config/config.go | 3 ++- 3 files changed, 50 insertions(+), 20 deletions(-) diff --git a/etc/telegraf.conf b/etc/telegraf.conf index d74c6a0a4..ecefb668e 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -70,7 +70,8 @@ # logfile = "" ## The logfile will be rotated after the time interval specified. When set - ## to 0 no time based rotation is performed. + ## to 0 no time based rotation is performed. Logs are rotated only when + ## written to, if there is no log activity rotation may be delayed. # logfile_rotation_interval = "0d" ## The logfile will be rotated when it becomes larger than the specified @@ -110,6 +111,9 @@ ## tag is not set the 'database' option is used as the default. # database_tag = "" + ## If true, the database tag will not be added to the metric. + # exclude_database_tag = false + ## If true, no CREATE DATABASE queries will be sent. Set to true when using ## Telegraf with a user without permissions to create databases or when the ## database already exists. @@ -653,6 +657,9 @@ # ## tag is not set the 'bucket' option is used as the default. # # bucket_tag = "" # +# ## If true, the bucket tag will not be added to the metric. +# # exclude_bucket_tag = false +# # ## Timeout for HTTP messages. # # timeout = "5s" # @@ -2277,6 +2284,11 @@ # # insecure_skip_verify = false +# # Example go-plugin for Telegraf +# [[inputs.example]] +# value = 42 + + # # Read metrics from one or more commands that can output to stdout # [[inputs.exec]] # ## Commands array @@ -4308,6 +4320,19 @@ # thread_as_tag = false +# # Read uWSGI metrics. +# [[inputs.uwsgi]] +# ## List with urls of uWSGI Stats servers. URL must match pattern: +# ## scheme://address[:port] +# ## +# ## For example: +# ## servers = ["tcp://localhost:5050", "http://localhost:1717", "unix:///tmp/statsock"] +# servers = ["tcp://127.0.0.1:1717"] +# +# ## General connection timout +# # timeout = "5s" + + # # A plugin to collect stats from Varnish HTTP Cache # [[inputs.varnish]] # ## If running as a restricted user you can prepend sudo for additional access: @@ -4897,21 +4922,22 @@ # str_as_tags = false -# # Read metrics from Kafka topic(s) +# # Read metrics from Kafka topics # [[inputs.kafka_consumer]] -# ## kafka servers +# ## Kafka brokers. # brokers = ["localhost:9092"] -# ## topic(s) to consume +# +# ## Topics to consume. # topics = ["telegraf"] -# ## Add topic as tag if topic_tag is not empty +# +# ## When set this tag will be added to all metrics with the topic as the value. # # topic_tag = "" # # ## Optional Client id # # client_id = "Telegraf" # # ## Set the minimal supported Kafka version. Setting this enables the use of new -# ## Kafka features and APIs. Of particular interest, lz4 compression -# ## requires at least version 0.10.0.0. +# ## Kafka features and APIs. Must be 0.10.2.0 or greater. # ## ex: version = "1.1.0" # # version = "" # @@ -4926,10 +4952,12 @@ # # sasl_username = "kafka" # # sasl_password = "secret" # -# ## the name of the consumer group -# consumer_group = "telegraf_metrics_consumers" -# ## Offset (must be either "oldest" or "newest") -# offset = "oldest" +# ## Name of the consumer group. +# # consumer_group = "telegraf_metrics_consumers" +# +# ## Initial offset position; one of "oldest" or "newest". +# # offset = "oldest" +# # ## Maximum length of a message to consume, in bytes (default 0/unlimited); # ## larger messages are dropped # max_message_len = 1000000 diff --git a/etc/telegraf_windows.conf b/etc/telegraf_windows.conf index b02a6e1e8..01bb21e28 100644 --- a/etc/telegraf_windows.conf +++ b/etc/telegraf_windows.conf @@ -61,16 +61,17 @@ ## Valid time units are "ns", "us" (or "µs"), "ms", "s". precision = "" - ## Logging configuration: - ## Run telegraf with debug log messages. - debug = false - ## Run telegraf in quiet mode (error log messages only). - quiet = false - ## Specify the log file name. The empty string means to log to stderr. - logfile = "/Program Files/Telegraf/telegraf.log" + ## Log at debug level. + # debug = false + ## Log only error level messages. + # quiet = false + + ## Log file name, the empty string means to log to stderr. + # logfile = "" ## The logfile will be rotated after the time interval specified. When set - ## to 0 no time based rotation is performed. + ## to 0 no time based rotation is performed. Logs are rotated only when + ## written to, if there is no log activity rotation may be delayed. # logfile_rotation_interval = "0d" ## The logfile will be rotated when it becomes larger than the specified diff --git a/internal/config/config.go b/internal/config/config.go index d8d545734..802e3152e 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -289,7 +289,8 @@ var agentConfig = ` # logfile = "" ## The logfile will be rotated after the time interval specified. When set - ## to 0 no time based rotation is performed. + ## to 0 no time based rotation is performed. Logs are rotated only when + ## written to, if there is no log activity rotation may be delayed. # logfile_rotation_interval = "0d" ## The logfile will be rotated when it becomes larger than the specified From ef4f4eed3a70c8f0fbac3a4fd739178439fc4e4b Mon Sep 17 00:00:00 2001 From: Russ Savage Date: Wed, 31 Jul 2019 16:55:25 -0700 Subject: [PATCH 1050/1815] Add left function to string processor (#6189) --- plugins/processors/strings/README.md | 6 ++++ plugins/processors/strings/strings.go | 18 ++++++++++++ plugins/processors/strings/strings_test.go | 32 ++++++++++++++++++++++ 3 files changed, 56 insertions(+) diff --git a/plugins/processors/strings/README.md b/plugins/processors/strings/README.md index 30d2cbb08..367732c6f 100644 --- a/plugins/processors/strings/README.md +++ b/plugins/processors/strings/README.md @@ -11,6 +11,7 @@ Implemented functions are: - trim_prefix - trim_suffix - replace +- left Please note that in this implementation these are processed in the order that they appear above. @@ -62,6 +63,11 @@ If you'd like to apply multiple processings to the same `tag_key` or `field_key` # measurement = "*" # old = ":" # new = "_" + + ## Trims strings based on width + # [[processors.strings.left]] + # field = "message" + # width = 10 ``` #### Trim, TrimLeft, TrimRight diff --git a/plugins/processors/strings/strings.go b/plugins/processors/strings/strings.go index 56bcf1b2c..e185bdd3b 100644 --- a/plugins/processors/strings/strings.go +++ b/plugins/processors/strings/strings.go @@ -17,6 +17,7 @@ type Strings struct { TrimPrefix []converter `toml:"trim_prefix"` TrimSuffix []converter `toml:"trim_suffix"` Replace []converter `toml:"replace"` + Left []converter `toml:"left"` converters []converter init bool @@ -36,6 +37,7 @@ type converter struct { Prefix string Old string New string + Width int fn ConvertFunc } @@ -79,6 +81,11 @@ const sampleConfig = ` # measurement = "*" # old = ":" # new = "_" + + ## Trims strings based on width + # [[processors.strings.left]] + # field = "message" + # width = 10 ` func (s *Strings) SampleConfig() string { @@ -270,6 +277,17 @@ func (s *Strings) initOnce() { } s.converters = append(s.converters, c) } + for _, c := range s.Left { + c := c + c.fn = func(s string) string { + if len(s) < c.Width { + return s + } else { + return s[:c.Width] + } + } + s.converters = append(s.converters, c) + } s.init = true } diff --git a/plugins/processors/strings/strings_test.go b/plugins/processors/strings/strings_test.go index c89ab7b66..95d16c05e 100644 --- a/plugins/processors/strings/strings_test.go +++ b/plugins/processors/strings/strings_test.go @@ -479,6 +479,38 @@ func TestFieldKeyConversions(t *testing.T) { require.Equal(t, "/mixed/CASE/paTH/?from=-1D&to=now", fv) }, }, + { + name: "Should trim the existing field to 6 characters", + plugin: &Strings{ + Left: []converter{ + { + Field: "Request", + Width: 6, + }, + }, + }, + check: func(t *testing.T, actual telegraf.Metric) { + fv, ok := actual.GetField("Request") + require.True(t, ok) + require.Equal(t, "/mixed", fv) + }, + }, + { + name: "Should do nothing to the string", + plugin: &Strings{ + Left: []converter{ + { + Field: "Request", + Width: 600, + }, + }, + }, + check: func(t *testing.T, actual telegraf.Metric) { + fv, ok := actual.GetField("Request") + require.True(t, ok) + require.Equal(t, "/mixed/CASE/paTH/?from=-1D&to=now", fv) + }, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { From 004b2cf5782d3b0ff6a5a3e35955cc31585b5571 Mon Sep 17 00:00:00 2001 From: Greg <2653109+glinton@users.noreply.github.com> Date: Wed, 31 Jul 2019 17:59:54 -0600 Subject: [PATCH 1051/1815] Add intermediates when verifying cert in x509 input (#6159) --- plugins/inputs/x509_cert/x509_cert.go | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/plugins/inputs/x509_cert/x509_cert.go b/plugins/inputs/x509_cert/x509_cert.go index 8558378d1..825fd5eeb 100644 --- a/plugins/inputs/x509_cert/x509_cert.go +++ b/plugins/inputs/x509_cert/x509_cert.go @@ -176,9 +176,16 @@ func (c *X509Cert) Gather(acc telegraf.Accumulator) error { // The first certificate is the leaf/end-entity certificate which needs DNS // name validation against the URL hostname. - opts := x509.VerifyOptions{} + opts := x509.VerifyOptions{ + Intermediates: x509.NewCertPool(), + } if i == 0 { opts.DNSName = u.Hostname() + for j, cert := range certs { + if j != 0 { + opts.Intermediates.AddCert(cert) + } + } } if c.tlsCfg.RootCAs != nil { opts.Roots = c.tlsCfg.RootCAs From 1557e9094d0a476b7392b00eb9bae2889352376c Mon Sep 17 00:00:00 2001 From: Russ Savage Date: Fri, 2 Aug 2019 10:48:40 -0700 Subject: [PATCH 1052/1815] Update smart input docs for attributes clarity (#6192) --- plugins/inputs/smart/README.md | 5 ++++- plugins/inputs/smart/smart.go | 3 ++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/plugins/inputs/smart/README.md b/plugins/inputs/smart/README.md index f520308b0..1051e3662 100644 --- a/plugins/inputs/smart/README.md +++ b/plugins/inputs/smart/README.md @@ -3,6 +3,8 @@ Get metrics using the command line utility `smartctl` for S.M.A.R.T. (Self-Monitoring, Analysis and Reporting Technology) storage devices. SMART is a monitoring system included in computer hard disk drives (HDDs) and solid-state drives (SSDs)[1] that detects and reports on various indicators of drive reliability, with the intent of enabling the anticipation of hardware failures. See smartmontools (https://www.smartmontools.org/). +SMART information is separated between different measurements: `smart_device` is used for general information, while `smart_attribute` stores the detailed attribute information if `attributes = true` is enabled in the plugin configuration. + If no devices are specified, the plugin will scan for SMART devices via the following command: ``` @@ -46,7 +48,8 @@ smartctl -s on ## "never" depending on your disks. # nocheck = "standby" - ## Gather detailed metrics for each SMART Attribute. + ## Gather all returned S.M.A.R.T. attribute metrics and the detailed + ## information from each drive into the `smart_attribute` measurement. # attributes = false ## Optionally specify devices to exclude from reporting. diff --git a/plugins/inputs/smart/smart.go b/plugins/inputs/smart/smart.go index 06bf21e10..f022261df 100644 --- a/plugins/inputs/smart/smart.go +++ b/plugins/inputs/smart/smart.go @@ -139,7 +139,8 @@ var sampleConfig = ` ## "never" depending on your disks. # nocheck = "standby" - ## Gather detailed metrics for each SMART Attribute. + ## Gather all returned S.M.A.R.T. attribute metrics and the detailed + ## information from each drive into the 'smart_attribute' measurement. # attributes = false ## Optionally specify devices to exclude from reporting. From 364bf38b4ae9a86555dac22306c41c9d6320a9be Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 2 Aug 2019 12:34:19 -0700 Subject: [PATCH 1053/1815] Update fail2ban readme --- plugins/inputs/fail2ban/README.md | 61 ++++++++++++++++--------------- 1 file changed, 32 insertions(+), 29 deletions(-) diff --git a/plugins/inputs/fail2ban/README.md b/plugins/inputs/fail2ban/README.md index 0b0e65414..1762bbaf2 100644 --- a/plugins/inputs/fail2ban/README.md +++ b/plugins/inputs/fail2ban/README.md @@ -1,31 +1,15 @@ # Fail2ban Input Plugin -The fail2ban plugin gathers the count of failed and banned ip addresses using [fail2ban](https://www.fail2ban.org). +The fail2ban plugin gathers the count of failed and banned ip addresses using +[fail2ban](https://www.fail2ban.org). This plugin runs the `fail2ban-client` command which generally requires root access. Acquiring the required permissions can be done using several methods: -- Use sudo run fail2ban-client. +- [Use sudo](#using-sudo) run fail2ban-client. - Run telegraf as root. (not recommended) -### Using sudo - -You will need the following in your telegraf config: -```toml -[[inputs.fail2ban]] - use_sudo = true -``` - -You will also need to update your sudoers file: -```bash -$ visudo -# Add the following line: -Cmnd_Alias FAIL2BAN = /usr/bin/fail2ban-client status, /usr/bin/fail2ban-client status * -telegraf ALL=(root) NOEXEC: NOPASSWD: FAIL2BAN -Defaults!FAIL2BAN !logfile, !syslog, !pam_session -``` - -### Configuration: +### Configuration ```toml # Read metrics from fail2ban. @@ -34,18 +18,37 @@ Defaults!FAIL2BAN !logfile, !syslog, !pam_session use_sudo = false ``` -### Measurements & Fields: +### Using sudo + +Make sure to set `use_sudo = true` in your configuration file. + +You will also need to update your sudoers file. It is recommended to modify a +file in the `/etc/sudoers.d` directory using `visudo`: + +```bash +$ sudo visudo -f /etc/sudoers.d/telegraf +``` + +Add the following lines to the file, these commands allow the `telegraf` user +to call `fail2ban-client` without needing to provide a password and disables +logging of the call in the auth.log. Consult `man 8 visudo` and `man 5 +sudoers` for details. +``` +Cmnd_Alias FAIL2BAN = /usr/bin/fail2ban-client status, /usr/bin/fail2ban-client status * +telegraf ALL=(root) NOEXEC: NOPASSWD: FAIL2BAN +Defaults!FAIL2BAN !logfile, !syslog, !pam_session +``` + +### Metrics - fail2ban - - failed (integer, count) - - banned (integer, count) + - tags: + - jail + - fields: + - failed (integer, count) + - banned (integer, count) -### Tags: - -- All measurements have the following tags: - - jail - -### Example Output: +### Example Output ``` # fail2ban-client status sshd From 78d3b86581bab9df9758a3ffd9f47ea238c0421b Mon Sep 17 00:00:00 2001 From: dupondje Date: Fri, 2 Aug 2019 21:42:25 +0200 Subject: [PATCH 1054/1815] Add Indices stats to elasticsearch input (#6060) --- etc/telegraf.conf | 8 +- plugins/inputs/elasticsearch/README.md | 244 +- plugins/inputs/elasticsearch/elasticsearch.go | 191 +- .../elasticsearch/elasticsearch_test.go | 52 +- plugins/inputs/elasticsearch/testdata_test.go | 2169 ++++++++++++++++- 5 files changed, 2625 insertions(+), 39 deletions(-) diff --git a/etc/telegraf.conf b/etc/telegraf.conf index ecefb668e..27d319c20 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -2267,6 +2267,12 @@ # ## Only gather cluster_stats from the master node. To work this require local = true # cluster_stats_only_from_master = true # +# ## Indices to collect; can be one or more indices names or _all +# indices_include = ["_all"] +# +# ## One of "shards", "cluster", "indices" +# indices_level = "shards" +# # ## node_stats is a list of sub-stats that you want to have gathered. Valid options # ## are "indices", "os", "process", "jvm", "thread_pool", "fs", "transport", "http", # ## "breaker". Per default, all stats are gathered. @@ -3512,7 +3518,7 @@ # reverse_metric_names = true -# # A plugin to collect stats from Opensmtpd - a validating, recursive, and caching DNS resolver +# # A plugin to collect stats from Opensmtpd - a validating, recursive, and caching DNS resolver # [[inputs.opensmtpd]] # ## If running as a restricted user you can prepend sudo for additional access: # #use_sudo = false diff --git a/plugins/inputs/elasticsearch/README.md b/plugins/inputs/elasticsearch/README.md index 445e6f82e..57c107cc2 100644 --- a/plugins/inputs/elasticsearch/README.md +++ b/plugins/inputs/elasticsearch/README.md @@ -1,15 +1,32 @@ # Elasticsearch Input Plugin The [elasticsearch](https://www.elastic.co/) plugin queries endpoints to obtain -[node](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-stats.html) -and optionally [cluster-health](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html) -or [cluster-stats](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-stats.html) metrics. +[Node Stats](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-stats.html) +and optionally +[Cluster-Health](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html) +metrics. + +In addition, the following optional queries are only made by the master node: + [Cluster Stats](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-stats.html) + [Indices Stats](https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-stats.html) + [Shard Stats](https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-stats.html) + +Specific Elasticsearch endpoints that are queried: +- Node: either /_nodes/stats or /_nodes/_local/stats depending on 'local' configuration setting +- Cluster Heath: /_cluster/health?level=indices +- Cluster Stats: /_cluster/stats +- Indices Stats: /_all/_stats +- Shard Stats: /_all/_stats?level=shards + +Note that specific statistics information can change between Elassticsearch versions. In general, this plugin attempts to stay as version-generic as possible by tagging high-level categories only and using a generic json parser to make unique field names of whatever statistics names are provided at the mid-low level. ### Configuration ```toml [[inputs.elasticsearch]] ## specify a list of one or more Elasticsearch servers + ## you can add username and password to your url to use basic authentication: + ## servers = ["http://user:pass@localhost:9200"] servers = ["http://localhost:9200"] ## Timeout for HTTP requests to the elastic search server(s) @@ -20,21 +37,28 @@ or [cluster-stats](https://www.elastic.co/guide/en/elasticsearch/reference/curre ## of the cluster. local = true - ## Set cluster_health to true when you want to also obtain cluster health stats + ## Set cluster_health to true when you want to obtain cluster health stats cluster_health = false - ## Adjust cluster_health_level when you want to also obtain detailed health stats + ## Adjust cluster_health_level when you want to obtain detailed health stats ## The options are ## - indices (default) ## - cluster # cluster_health_level = "indices" - ## Set cluster_stats to true when you want to also obtain cluster stats. + ## Set cluster_stats to true when you want to obtain cluster stats. cluster_stats = false ## Only gather cluster_stats from the master node. To work this require local = true cluster_stats_only_from_master = true + ## Indices to collect; can be one or more indices names or _all + indices_include = ["_all"] + + ## One of "shards", "cluster", "indices" + ## Currently only "shards" is implemented + indices_level = "shards" + ## node_stats is a list of sub-stats that you want to have gathered. Valid options ## are "indices", "os", "process", "jvm", "thread_pool", "fs", "transport", "http", ## "breaker". Per default, all stats are gathered. @@ -618,3 +642,211 @@ Emitted when the appropriate `node_stats` options are set. - write_queue (float) - write_rejected (float) - write_threads (float) + +Emitted when the appropriate `indices_stats` options are set. + +- elasticsearch_indices_stats_(primaries|total) + - tags: + - index_name + - fields: + - completion_size_in_bytes (float) + - docs_count (float) + - docs_deleted (float) + - fielddata_evictions (float) + - fielddata_memory_size_in_bytes (float) + - flush_periodic (float) + - flush_total (float) + - flush_total_time_in_millis (float) + - get_current (float) + - get_exists_time_in_millis (float) + - get_exists_total (float) + - get_missing_time_in_millis (float) + - get_missing_total (float) + - get_time_in_millis (float) + - get_total (float) + - indexing_delete_current (float) + - indexing_delete_time_in_millis (float) + - indexing_delete_total (float) + - indexing_index_current (float) + - indexing_index_failed (float) + - indexing_index_time_in_millis (float) + - indexing_index_total (float) + - indexing_is_throttled (float) + - indexing_noop_update_total (float) + - indexing_throttle_time_in_millis (float) + - merges_current (float) + - merges_current_docs (float) + - merges_current_size_in_bytes (float) + - merges_total (float) + - merges_total_auto_throttle_in_bytes (float) + - merges_total_docs (float) + - merges_total_size_in_bytes (float) + - merges_total_stopped_time_in_millis (float) + - merges_total_throttled_time_in_millis (float) + - merges_total_time_in_millis (float) + - query_cache_cache_count (float) + - query_cache_cache_size (float) + - query_cache_evictions (float) + - query_cache_hit_count (float) + - query_cache_memory_size_in_bytes (float) + - query_cache_miss_count (float) + - query_cache_total_count (float) + - recovery_current_as_source (float) + - recovery_current_as_target (float) + - recovery_throttle_time_in_millis (float) + - refresh_external_total (float) + - refresh_external_total_time_in_millis (float) + - refresh_listeners (float) + - refresh_total (float) + - refresh_total_time_in_millis (float) + - request_cache_evictions (float) + - request_cache_hit_count (float) + - request_cache_memory_size_in_bytes (float) + - request_cache_miss_count (float) + - search_fetch_current (float) + - search_fetch_time_in_millis (float) + - search_fetch_total (float) + - search_open_contexts (float) + - search_query_current (float) + - search_query_time_in_millis (float) + - search_query_total (float) + - search_scroll_current (float) + - search_scroll_time_in_millis (float) + - search_scroll_total (float) + - search_suggest_current (float) + - search_suggest_time_in_millis (float) + - search_suggest_total (float) + - segments_count (float) + - segments_doc_values_memory_in_bytes (float) + - segments_fixed_bit_set_memory_in_bytes (float) + - segments_index_writer_memory_in_bytes (float) + - segments_max_unsafe_auto_id_timestamp (float) + - segments_memory_in_bytes (float) + - segments_norms_memory_in_bytes (float) + - segments_points_memory_in_bytes (float) + - segments_stored_fields_memory_in_bytes (float) + - segments_term_vectors_memory_in_bytes (float) + - segments_terms_memory_in_bytes (float) + - segments_version_map_memory_in_bytes (float) + - store_size_in_bytes (float) + - translog_earliest_last_modified_age (float) + - translog_operations (float) + - translog_size_in_bytes (float) + - translog_uncommitted_operations (float) + - translog_uncommitted_size_in_bytes (float) + - warmer_current (float) + - warmer_total (float) + - warmer_total_time_in_millis (float) + +Emitted when the appropriate `shards_stats` options are set. + +- elasticsearch_indices_stats_shards_total + - fields: + - failed (float) + - successful (float) + - total (float) + +- elasticsearch_indices_stats_shards + - tags: + - index_name + - node_name + - shard_name + - type + - fields: + - commit_generation (float) + - commit_num_docs (float) + - completion_size_in_bytes (float) + - docs_count (float) + - docs_deleted (float) + - fielddata_evictions (float) + - fielddata_memory_size_in_bytes (float) + - flush_periodic (float) + - flush_total (float) + - flush_total_time_in_millis (float) + - get_current (float) + - get_exists_time_in_millis (float) + - get_exists_total (float) + - get_missing_time_in_millis (float) + - get_missing_total (float) + - get_time_in_millis (float) + - get_total (float) + - indexing_delete_current (float) + - indexing_delete_time_in_millis (float) + - indexing_delete_total (float) + - indexing_index_current (float) + - indexing_index_failed (float) + - indexing_index_time_in_millis (float) + - indexing_index_total (float) + - indexing_is_throttled (bool) + - indexing_noop_update_total (float) + - indexing_throttle_time_in_millis (float) + - merges_current (float) + - merges_current_docs (float) + - merges_current_size_in_bytes (float) + - merges_total (float) + - merges_total_auto_throttle_in_bytes (float) + - merges_total_docs (float) + - merges_total_size_in_bytes (float) + - merges_total_stopped_time_in_millis (float) + - merges_total_throttled_time_in_millis (float) + - merges_total_time_in_millis (float) + - query_cache_cache_count (float) + - query_cache_cache_size (float) + - query_cache_evictions (float) + - query_cache_hit_count (float) + - query_cache_memory_size_in_bytes (float) + - query_cache_miss_count (float) + - query_cache_total_count (float) + - recovery_current_as_source (float) + - recovery_current_as_target (float) + - recovery_throttle_time_in_millis (float) + - refresh_external_total (float) + - refresh_external_total_time_in_millis (float) + - refresh_listeners (float) + - refresh_total (float) + - refresh_total_time_in_millis (float) + - request_cache_evictions (float) + - request_cache_hit_count (float) + - request_cache_memory_size_in_bytes (float) + - request_cache_miss_count (float) + - retention_leases_primary_term (float) + - retention_leases_version (float) + - routing_state (int) (UNASSIGNED = 1, INITIALIZING = 2, STARTED = 3, RELOCATING = 4, other = 0) + - search_fetch_current (float) + - search_fetch_time_in_millis (float) + - search_fetch_total (float) + - search_open_contexts (float) + - search_query_current (float) + - search_query_time_in_millis (float) + - search_query_total (float) + - search_scroll_current (float) + - search_scroll_time_in_millis (float) + - search_scroll_total (float) + - search_suggest_current (float) + - search_suggest_time_in_millis (float) + - search_suggest_total (float) + - segments_count (float) + - segments_doc_values_memory_in_bytes (float) + - segments_fixed_bit_set_memory_in_bytes (float) + - segments_index_writer_memory_in_bytes (float) + - segments_max_unsafe_auto_id_timestamp (float) + - segments_memory_in_bytes (float) + - segments_norms_memory_in_bytes (float) + - segments_points_memory_in_bytes (float) + - segments_stored_fields_memory_in_bytes (float) + - segments_term_vectors_memory_in_bytes (float) + - segments_terms_memory_in_bytes (float) + - segments_version_map_memory_in_bytes (float) + - seq_no_global_checkpoint (float) + - seq_no_local_checkpoint (float) + - seq_no_max_seq_no (float) + - shard_path_is_custom_data_path (bool) + - store_size_in_bytes (float) + - translog_earliest_last_modified_age (float) + - translog_operations (float) + - translog_size_in_bytes (float) + - translog_uncommitted_operations (float) + - translog_uncommitted_size_in_bytes (float) + - warmer_current (float) + - warmer_total (float) + - warmer_total_time_in_millis (float) \ No newline at end of file diff --git a/plugins/inputs/elasticsearch/elasticsearch.go b/plugins/inputs/elasticsearch/elasticsearch.go index 71ef2a01a..7cecb2357 100644 --- a/plugins/inputs/elasticsearch/elasticsearch.go +++ b/plugins/inputs/elasticsearch/elasticsearch.go @@ -79,10 +79,10 @@ type clusterStats struct { Nodes interface{} `json:"nodes"` } -type catMaster struct { - NodeID string `json:"id"` - NodeIP string `json:"ip"` - NodeName string `json:"node"` +type indexStat struct { + Primaries interface{} `json:"primaries"` + Total interface{} `json:"total"` + Shards map[string][]interface{} `json:"shards"` } const sampleConfig = ` @@ -114,6 +114,12 @@ const sampleConfig = ` ## Only gather cluster_stats from the master node. To work this require local = true cluster_stats_only_from_master = true + ## Indices to collect; can be one or more indices names or _all + indices_include = ["_all"] + + ## One of "shards", "cluster", "indices" + indices_level = "shards" + ## node_stats is a list of sub-stats that you want to have gathered. Valid options ## are "indices", "os", "process", "jvm", "thread_pool", "fs", "transport", "http", ## "breaker". Per default, all stats are gathered. @@ -134,16 +140,18 @@ const sampleConfig = ` // Elasticsearch is a plugin to read stats from one or many Elasticsearch // servers. type Elasticsearch struct { - Local bool - Servers []string - HttpTimeout internal.Duration - ClusterHealth bool - ClusterHealthLevel string - ClusterStats bool - ClusterStatsOnlyFromMaster bool - NodeStats []string - Username string `toml:"username"` - Password string `toml:"password"` + Local bool `toml:"local"` + Servers []string `toml:"servers"` + HTTPTimeout internal.Duration `toml:"http_timeout"` + ClusterHealth bool `toml:"cluster_health"` + ClusterHealthLevel string `toml:"cluster_health_level"` + ClusterStats bool `toml:"cluster_stats"` + ClusterStatsOnlyFromMaster bool `toml:"cluster_stats_only_from_master"` + IndicesInclude []string `toml:"indices_include"` + IndicesLevel string `toml:"indices_level"` + NodeStats []string `toml:"node_stats"` + Username string `toml:"username"` + Password string `toml:"password"` tls.ClientConfig client *http.Client @@ -162,7 +170,7 @@ func (i serverInfo) isMaster() bool { // NewElasticsearch return a new instance of Elasticsearch func NewElasticsearch() *Elasticsearch { return &Elasticsearch{ - HttpTimeout: internal.Duration{Duration: time.Second * 5}, + HTTPTimeout: internal.Duration{Duration: time.Second * 5}, ClusterStatsOnlyFromMaster: true, ClusterHealthLevel: "indices", } @@ -181,6 +189,21 @@ func mapHealthStatusToCode(s string) int { return 0 } +// perform shard status mapping +func mapShardStatusToCode(s string) int { + switch strings.ToUpper(s) { + case "UNASSIGNED": + return 1 + case "INITIALIZING": + return 2 + case "STARTED": + return 3 + case "RELOCATING": + return 4 + } + return 0 +} + // SampleConfig returns sample configuration for this plugin. func (e *Elasticsearch) SampleConfig() string { return sampleConfig @@ -195,7 +218,7 @@ func (e *Elasticsearch) Description() string { // Accumulator. func (e *Elasticsearch) Gather(acc telegraf.Accumulator) error { if e.client == nil { - client, err := e.createHttpClient() + client, err := e.createHTTPClient() if err != nil { return err @@ -203,7 +226,7 @@ func (e *Elasticsearch) Gather(acc telegraf.Accumulator) error { e.client = client } - if e.ClusterStats { + if e.ClusterStats || len(e.IndicesInclude) > 0 || len(e.IndicesLevel) > 0 { var wgC sync.WaitGroup wgC.Add(len(e.Servers)) @@ -243,7 +266,7 @@ func (e *Elasticsearch) Gather(acc telegraf.Accumulator) error { for _, serv := range e.Servers { go func(s string, acc telegraf.Accumulator) { defer wg.Done() - url := e.nodeStatsUrl(s) + url := e.nodeStatsURL(s) // Always gather node stats if err := e.gatherNodeStats(url, acc); err != nil { @@ -268,6 +291,20 @@ func (e *Elasticsearch) Gather(acc telegraf.Accumulator) error { return } } + + if len(e.IndicesInclude) > 0 && (e.serverInfo[s].isMaster() || !e.ClusterStatsOnlyFromMaster || !e.Local) { + if e.IndicesLevel != "shards" { + if err := e.gatherIndicesStats(s+"/"+strings.Join(e.IndicesInclude, ",")+"/_stats", acc); err != nil { + acc.AddError(fmt.Errorf(mask.ReplaceAllString(err.Error(), "http(s)://XXX:XXX@"))) + return + } + } else { + if err := e.gatherIndicesStats(s+"/"+strings.Join(e.IndicesInclude, ",")+"/_stats?level=shards", acc); err != nil { + acc.AddError(fmt.Errorf(mask.ReplaceAllString(err.Error(), "http(s)://XXX:XXX@"))) + return + } + } + } }(serv, acc) } @@ -275,30 +312,30 @@ func (e *Elasticsearch) Gather(acc telegraf.Accumulator) error { return nil } -func (e *Elasticsearch) createHttpClient() (*http.Client, error) { +func (e *Elasticsearch) createHTTPClient() (*http.Client, error) { tlsCfg, err := e.ClientConfig.TLSConfig() if err != nil { return nil, err } tr := &http.Transport{ - ResponseHeaderTimeout: e.HttpTimeout.Duration, + ResponseHeaderTimeout: e.HTTPTimeout.Duration, TLSClientConfig: tlsCfg, } client := &http.Client{ Transport: tr, - Timeout: e.HttpTimeout.Duration, + Timeout: e.HTTPTimeout.Duration, } return client, nil } -func (e *Elasticsearch) nodeStatsUrl(baseUrl string) string { +func (e *Elasticsearch) nodeStatsURL(baseURL string) string { var url string if e.Local { - url = baseUrl + statsPathLocal + url = baseURL + statsPathLocal } else { - url = baseUrl + statsPath + url = baseURL + statsPath } if len(e.NodeStats) == 0 { @@ -313,7 +350,7 @@ func (e *Elasticsearch) gatherNodeID(url string) (string, error) { ClusterName string `json:"cluster_name"` Nodes map[string]*nodeStat `json:"nodes"` }{} - if err := e.gatherJsonData(url, nodeStats); err != nil { + if err := e.gatherJSONData(url, nodeStats); err != nil { return "", err } @@ -329,7 +366,7 @@ func (e *Elasticsearch) gatherNodeStats(url string, acc telegraf.Accumulator) er ClusterName string `json:"cluster_name"` Nodes map[string]*nodeStat `json:"nodes"` }{} - if err := e.gatherJsonData(url, nodeStats); err != nil { + if err := e.gatherJSONData(url, nodeStats); err != nil { return err } @@ -380,7 +417,7 @@ func (e *Elasticsearch) gatherNodeStats(url string, acc telegraf.Accumulator) er func (e *Elasticsearch) gatherClusterHealth(url string, acc telegraf.Accumulator) error { healthStats := &clusterHealth{} - if err := e.gatherJsonData(url, healthStats); err != nil { + if err := e.gatherJSONData(url, healthStats); err != nil { return err } measurementTime := time.Now() @@ -432,7 +469,7 @@ func (e *Elasticsearch) gatherClusterHealth(url string, acc telegraf.Accumulator func (e *Elasticsearch) gatherClusterStats(url string, acc telegraf.Accumulator) error { clusterStats := &clusterStats{} - if err := e.gatherJsonData(url, clusterStats); err != nil { + if err := e.gatherJSONData(url, clusterStats); err != nil { return err } now := time.Now() @@ -460,6 +497,102 @@ func (e *Elasticsearch) gatherClusterStats(url string, acc telegraf.Accumulator) return nil } +func (e *Elasticsearch) gatherIndicesStats(url string, acc telegraf.Accumulator) error { + indicesStats := &struct { + Shards map[string]interface{} `json:"_shards"` + All map[string]interface{} `json:"_all"` + Indices map[string]indexStat `json:"indices"` + }{} + + if err := e.gatherJSONData(url, indicesStats); err != nil { + return err + } + now := time.Now() + + // Total Shards Stats + shardsStats := map[string]interface{}{} + for k, v := range indicesStats.Shards { + shardsStats[k] = v + } + acc.AddFields("elasticsearch_indices_stats_shards_total", shardsStats, map[string]string{}, now) + + // All Stats + for m, s := range indicesStats.All { + // parse Json, ignoring strings and bools + jsonParser := jsonparser.JSONFlattener{} + err := jsonParser.FullFlattenJSON("_", s, true, true) + if err != nil { + return err + } + acc.AddFields("elasticsearch_indices_stats_"+m, jsonParser.Fields, map[string]string{"index_name": "_all"}, now) + } + + // Individual Indices stats + for id, index := range indicesStats.Indices { + indexTag := map[string]string{"index_name": id} + stats := map[string]interface{}{ + "primaries": index.Primaries, + "total": index.Total, + } + for m, s := range stats { + f := jsonparser.JSONFlattener{} + // parse Json, getting strings and bools + err := f.FullFlattenJSON("", s, true, true) + if err != nil { + return err + } + acc.AddFields("elasticsearch_indices_stats_"+m, f.Fields, indexTag, now) + } + + if e.IndicesLevel == "shards" { + for shardNumber, shard := range index.Shards { + if len(shard) > 0 { + // Get Shard Stats + flattened := jsonparser.JSONFlattener{} + err := flattened.FullFlattenJSON("", shard[0], true, true) + if err != nil { + return err + } + + // determine shard tag and primary/replica designation + shardType := "replica" + if flattened.Fields["routing_primary"] == true { + shardType = "primary" + } + delete(flattened.Fields, "routing_primary") + + routingState, ok := flattened.Fields["routing_state"].(string) + if ok { + flattened.Fields["routing_state"] = mapShardStatusToCode(routingState) + } + + routingNode, _ := flattened.Fields["routing_node"].(string) + shardTags := map[string]string{ + "index_name": id, + "node_id": routingNode, + "shard_name": string(shardNumber), + "type": shardType, + } + + for key, field := range flattened.Fields { + switch field.(type) { + case string, bool: + delete(flattened.Fields, key) + } + } + + acc.AddFields("elasticsearch_indices_stats_shards", + flattened.Fields, + shardTags, + now) + } + } + } + } + + return nil +} + func (e *Elasticsearch) getCatMaster(url string) (string, error) { req, err := http.NewRequest("GET", url, nil) if err != nil { @@ -492,7 +625,7 @@ func (e *Elasticsearch) getCatMaster(url string) (string, error) { return masterID, nil } -func (e *Elasticsearch) gatherJsonData(url string, v interface{}) error { +func (e *Elasticsearch) gatherJSONData(url string, v interface{}) error { req, err := http.NewRequest("GET", url, nil) if err != nil { return err diff --git a/plugins/inputs/elasticsearch/elasticsearch_test.go b/plugins/inputs/elasticsearch/elasticsearch_test.go index 4bf5c6a55..e70923bc0 100644 --- a/plugins/inputs/elasticsearch/elasticsearch_test.go +++ b/plugins/inputs/elasticsearch/elasticsearch_test.go @@ -70,7 +70,7 @@ func checkNodeStatsResult(t *testing.T, acc *testutil.Accumulator) { acc.AssertContainsTaggedFields(t, "elasticsearch_thread_pool", nodestatsThreadPoolExpected, tags) acc.AssertContainsTaggedFields(t, "elasticsearch_fs", nodestatsFsExpected, tags) acc.AssertContainsTaggedFields(t, "elasticsearch_transport", nodestatsTransportExpected, tags) - acc.AssertContainsTaggedFields(t, "elasticsearch_http", nodestatsHttpExpected, tags) + acc.AssertContainsTaggedFields(t, "elasticsearch_http", nodestatsHTTPExpected, tags) acc.AssertContainsTaggedFields(t, "elasticsearch_breakers", nodestatsBreakersExpected, tags) } @@ -113,7 +113,7 @@ func TestGatherIndividualStats(t *testing.T) { acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_thread_pool", nodestatsThreadPoolExpected, tags) acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_fs", nodestatsFsExpected, tags) acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_transport", nodestatsTransportExpected, tags) - acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_http", nodestatsHttpExpected, tags) + acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_http", nodestatsHTTPExpected, tags) acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_breakers", nodestatsBreakersExpected, tags) } @@ -292,6 +292,54 @@ func TestGatherClusterStatsNonMaster(t *testing.T) { checkNodeStatsResult(t, &acc) } +func TestGatherClusterIndicesStats(t *testing.T) { + es := newElasticsearchWithClient() + es.IndicesInclude = []string{"_all"} + es.Servers = []string{"http://example.com:9200"} + es.client.Transport = newTransportMock(http.StatusOK, clusterIndicesResponse) + es.serverInfo = make(map[string]serverInfo) + es.serverInfo["http://example.com:9200"] = defaultServerInfo() + + var acc testutil.Accumulator + if err := es.gatherIndicesStats("junk", &acc); err != nil { + t.Fatal(err) + } + + acc.AssertContainsTaggedFields(t, "elasticsearch_indices_stats_primaries", + clusterIndicesExpected, + map[string]string{"index_name": "twitter"}) +} + +func TestGatherClusterIndiceShardsStats(t *testing.T) { + es := newElasticsearchWithClient() + es.IndicesLevel = "shards" + es.Servers = []string{"http://example.com:9200"} + es.client.Transport = newTransportMock(http.StatusOK, clusterIndicesShardsResponse) + es.serverInfo = make(map[string]serverInfo) + es.serverInfo["http://example.com:9200"] = defaultServerInfo() + + var acc testutil.Accumulator + if err := es.gatherIndicesStats("junk", &acc); err != nil { + t.Fatal(err) + } + + acc.AssertContainsTaggedFields(t, "elasticsearch_indices_stats_primaries", + clusterIndicesExpected, + map[string]string{"index_name": "twitter"}) + + tags := map[string]string{ + "index_name": "twitter", + "node_id": "oqvR8I1dTpONvwRM30etww", + "shard_name": "1", + "type": "replica", + } + + acc.AssertContainsTaggedFields(t, "elasticsearch_indices_stats_shards", + clusterIndicesShardsExpected, + tags) + +} + func newElasticsearchWithClient() *Elasticsearch { es := NewElasticsearch() es.client = &http.Client{} diff --git a/plugins/inputs/elasticsearch/testdata_test.go b/plugins/inputs/elasticsearch/testdata_test.go index c637bb9a9..63c21a85c 100644 --- a/plugins/inputs/elasticsearch/testdata_test.go +++ b/plugins/inputs/elasticsearch/testdata_test.go @@ -1291,7 +1291,7 @@ var nodestatsTransportExpected = map[string]interface{}{ "tx_size_in_bytes": float64(1380), } -var nodestatsHttpExpected = map[string]interface{}{ +var nodestatsHTTPExpected = map[string]interface{}{ "current_open": float64(3), "total_opened": float64(3), } @@ -1580,3 +1580,2170 @@ var clusterstatsNodesExpected = map[string]interface{}{ const IsMasterResult = "SDFsfSDFsdfFSDSDfSFDSDF 10.206.124.66 10.206.124.66 test.host.com " const IsNotMasterResult = "junk 10.206.124.66 10.206.124.66 test.junk.com " + +const clusterIndicesResponse = ` +{ + "_shards": { + "total": 9, + "successful": 6, + "failed": 0 + }, + "_all": { + "primaries": { + "docs": { + "count": 999, + "deleted": 0 + }, + "store": { + "size_in_bytes": 267500 + }, + "indexing": { + "index_total": 999, + "index_time_in_millis": 548, + "index_current": 0, + "index_failed": 0, + "delete_total": 0, + "delete_time_in_millis": 0, + "delete_current": 0, + "noop_update_total": 0, + "is_throttled": false, + "throttle_time_in_millis": 0 + }, + "get": { + "total": 0, + "time_in_millis": 0, + "exists_total": 0, + "exists_time_in_millis": 0, + "missing_total": 0, + "missing_time_in_millis": 0, + "current": 0 + }, + "search": { + "open_contexts": 0, + "query_total": 0, + "query_time_in_millis": 0, + "query_current": 0, + "fetch_total": 0, + "fetch_time_in_millis": 0, + "fetch_current": 0, + "scroll_total": 0, + "scroll_time_in_millis": 0, + "scroll_current": 0, + "suggest_total": 0, + "suggest_time_in_millis": 0, + "suggest_current": 0 + }, + "merges": { + "current": 0, + "current_docs": 0, + "current_size_in_bytes": 0, + "total": 0, + "total_time_in_millis": 0, + "total_docs": 0, + "total_size_in_bytes": 0, + "total_stopped_time_in_millis": 0, + "total_throttled_time_in_millis": 0, + "total_auto_throttle_in_bytes": 62914560 + }, + "refresh": { + "total": 9, + "total_time_in_millis": 256, + "external_total": 9, + "external_total_time_in_millis": 258, + "listeners": 0 + }, + "flush": { + "total": 0, + "periodic": 0, + "total_time_in_millis": 0 + }, + "warmer": { + "current": 0, + "total": 6, + "total_time_in_millis": 0 + }, + "query_cache": { + "memory_size_in_bytes": 0, + "total_count": 0, + "hit_count": 0, + "miss_count": 0, + "cache_size": 0, + "cache_count": 0, + "evictions": 0 + }, + "fielddata": { + "memory_size_in_bytes": 0, + "evictions": 0 + }, + "completion": { + "size_in_bytes": 0 + }, + "segments": { + "count": 3, + "memory_in_bytes": 12849, + "terms_memory_in_bytes": 10580, + "stored_fields_memory_in_bytes": 904, + "term_vectors_memory_in_bytes": 0, + "norms_memory_in_bytes": 1152, + "points_memory_in_bytes": 9, + "doc_values_memory_in_bytes": 204, + "index_writer_memory_in_bytes": 0, + "version_map_memory_in_bytes": 0, + "fixed_bit_set_memory_in_bytes": 0, + "max_unsafe_auto_id_timestamp": -1, + "file_sizes": {} + }, + "translog": { + "operations": 999, + "size_in_bytes": 226444, + "uncommitted_operations": 999, + "uncommitted_size_in_bytes": 226444, + "earliest_last_modified_age": 0 + }, + "request_cache": { + "memory_size_in_bytes": 0, + "evictions": 0, + "hit_count": 0, + "miss_count": 0 + }, + "recovery": { + "current_as_source": 0, + "current_as_target": 0, + "throttle_time_in_millis": 0 + } + }, + "total": { + "docs": { + "count": 1998, + "deleted": 0 + }, + "store": { + "size_in_bytes": 535000 + }, + "indexing": { + "index_total": 1998, + "index_time_in_millis": 793, + "index_current": 0, + "index_failed": 0, + "delete_total": 0, + "delete_time_in_millis": 0, + "delete_current": 0, + "noop_update_total": 0, + "is_throttled": false, + "throttle_time_in_millis": 0 + }, + "get": { + "total": 0, + "time_in_millis": 0, + "exists_total": 0, + "exists_time_in_millis": 0, + "missing_total": 0, + "missing_time_in_millis": 0, + "current": 0 + }, + "search": { + "open_contexts": 0, + "query_total": 0, + "query_time_in_millis": 0, + "query_current": 0, + "fetch_total": 0, + "fetch_time_in_millis": 0, + "fetch_current": 0, + "scroll_total": 0, + "scroll_time_in_millis": 0, + "scroll_current": 0, + "suggest_total": 0, + "suggest_time_in_millis": 0, + "suggest_current": 0 + }, + "merges": { + "current": 0, + "current_docs": 0, + "current_size_in_bytes": 0, + "total": 0, + "total_time_in_millis": 0, + "total_docs": 0, + "total_size_in_bytes": 0, + "total_stopped_time_in_millis": 0, + "total_throttled_time_in_millis": 0, + "total_auto_throttle_in_bytes": 125829120 + }, + "refresh": { + "total": 18, + "total_time_in_millis": 518, + "external_total": 18, + "external_total_time_in_millis": 522, + "listeners": 0 + }, + "flush": { + "total": 0, + "periodic": 0, + "total_time_in_millis": 0 + }, + "warmer": { + "current": 0, + "total": 12, + "total_time_in_millis": 0 + }, + "query_cache": { + "memory_size_in_bytes": 0, + "total_count": 0, + "hit_count": 0, + "miss_count": 0, + "cache_size": 0, + "cache_count": 0, + "evictions": 0 + }, + "fielddata": { + "memory_size_in_bytes": 0, + "evictions": 0 + }, + "completion": { + "size_in_bytes": 0 + }, + "segments": { + "count": 6, + "memory_in_bytes": 25698, + "terms_memory_in_bytes": 21160, + "stored_fields_memory_in_bytes": 1808, + "term_vectors_memory_in_bytes": 0, + "norms_memory_in_bytes": 2304, + "points_memory_in_bytes": 18, + "doc_values_memory_in_bytes": 408, + "index_writer_memory_in_bytes": 0, + "version_map_memory_in_bytes": 0, + "fixed_bit_set_memory_in_bytes": 0, + "max_unsafe_auto_id_timestamp": -1, + "file_sizes": {} + }, + "translog": { + "operations": 1998, + "size_in_bytes": 452888, + "uncommitted_operations": 1998, + "uncommitted_size_in_bytes": 452888, + "earliest_last_modified_age": 0 + }, + "request_cache": { + "memory_size_in_bytes": 0, + "evictions": 0, + "hit_count": 0, + "miss_count": 0 + }, + "recovery": { + "current_as_source": 0, + "current_as_target": 0, + "throttle_time_in_millis": 0 + } + } + }, + "indices": { + "twitter": { + "uuid": "AtNrbbl_QhirW0p7Fnq26A", + "primaries": { + "docs": { + "count": 999, + "deleted": 0 + }, + "store": { + "size_in_bytes": 267500 + }, + "indexing": { + "index_total": 999, + "index_time_in_millis": 548, + "index_current": 0, + "index_failed": 0, + "delete_total": 0, + "delete_time_in_millis": 0, + "delete_current": 0, + "noop_update_total": 0, + "is_throttled": false, + "throttle_time_in_millis": 0 + }, + "get": { + "total": 0, + "time_in_millis": 0, + "exists_total": 0, + "exists_time_in_millis": 0, + "missing_total": 0, + "missing_time_in_millis": 0, + "current": 0 + }, + "search": { + "open_contexts": 0, + "query_total": 0, + "query_time_in_millis": 0, + "query_current": 0, + "fetch_total": 0, + "fetch_time_in_millis": 0, + "fetch_current": 0, + "scroll_total": 0, + "scroll_time_in_millis": 0, + "scroll_current": 0, + "suggest_total": 0, + "suggest_time_in_millis": 0, + "suggest_current": 0 + }, + "merges": { + "current": 0, + "current_docs": 0, + "current_size_in_bytes": 0, + "total": 0, + "total_time_in_millis": 0, + "total_docs": 0, + "total_size_in_bytes": 0, + "total_stopped_time_in_millis": 0, + "total_throttled_time_in_millis": 0, + "total_auto_throttle_in_bytes": 62914560 + }, + "refresh": { + "total": 9, + "total_time_in_millis": 256, + "external_total": 9, + "external_total_time_in_millis": 258, + "listeners": 0 + }, + "flush": { + "total": 0, + "periodic": 0, + "total_time_in_millis": 0 + }, + "warmer": { + "current": 0, + "total": 6, + "total_time_in_millis": 0 + }, + "query_cache": { + "memory_size_in_bytes": 0, + "total_count": 0, + "hit_count": 0, + "miss_count": 0, + "cache_size": 0, + "cache_count": 0, + "evictions": 0 + }, + "fielddata": { + "memory_size_in_bytes": 0, + "evictions": 0 + }, + "completion": { + "size_in_bytes": 0 + }, + "segments": { + "count": 3, + "memory_in_bytes": 12849, + "terms_memory_in_bytes": 10580, + "stored_fields_memory_in_bytes": 904, + "term_vectors_memory_in_bytes": 0, + "norms_memory_in_bytes": 1152, + "points_memory_in_bytes": 9, + "doc_values_memory_in_bytes": 204, + "index_writer_memory_in_bytes": 0, + "version_map_memory_in_bytes": 0, + "fixed_bit_set_memory_in_bytes": 0, + "max_unsafe_auto_id_timestamp": -1, + "file_sizes": {} + }, + "translog": { + "operations": 999, + "size_in_bytes": 226444, + "uncommitted_operations": 999, + "uncommitted_size_in_bytes": 226444, + "earliest_last_modified_age": 0 + }, + "request_cache": { + "memory_size_in_bytes": 0, + "evictions": 0, + "hit_count": 0, + "miss_count": 0 + }, + "recovery": { + "current_as_source": 0, + "current_as_target": 0, + "throttle_time_in_millis": 0 + } + }, + "total": { + "docs": { + "count": 1998, + "deleted": 0 + }, + "store": { + "size_in_bytes": 535000 + }, + "indexing": { + "index_total": 1998, + "index_time_in_millis": 793, + "index_current": 0, + "index_failed": 0, + "delete_total": 0, + "delete_time_in_millis": 0, + "delete_current": 0, + "noop_update_total": 0, + "is_throttled": false, + "throttle_time_in_millis": 0 + }, + "get": { + "total": 0, + "time_in_millis": 0, + "exists_total": 0, + "exists_time_in_millis": 0, + "missing_total": 0, + "missing_time_in_millis": 0, + "current": 0 + }, + "search": { + "open_contexts": 0, + "query_total": 0, + "query_time_in_millis": 0, + "query_current": 0, + "fetch_total": 0, + "fetch_time_in_millis": 0, + "fetch_current": 0, + "scroll_total": 0, + "scroll_time_in_millis": 0, + "scroll_current": 0, + "suggest_total": 0, + "suggest_time_in_millis": 0, + "suggest_current": 0 + }, + "merges": { + "current": 0, + "current_docs": 0, + "current_size_in_bytes": 0, + "total": 0, + "total_time_in_millis": 0, + "total_docs": 0, + "total_size_in_bytes": 0, + "total_stopped_time_in_millis": 0, + "total_throttled_time_in_millis": 0, + "total_auto_throttle_in_bytes": 125829120 + }, + "refresh": { + "total": 18, + "total_time_in_millis": 518, + "external_total": 18, + "external_total_time_in_millis": 522, + "listeners": 0 + }, + "flush": { + "total": 0, + "periodic": 0, + "total_time_in_millis": 0 + }, + "warmer": { + "current": 0, + "total": 12, + "total_time_in_millis": 0 + }, + "query_cache": { + "memory_size_in_bytes": 0, + "total_count": 0, + "hit_count": 0, + "miss_count": 0, + "cache_size": 0, + "cache_count": 0, + "evictions": 0 + }, + "fielddata": { + "memory_size_in_bytes": 0, + "evictions": 0 + }, + "completion": { + "size_in_bytes": 0 + }, + "segments": { + "count": 6, + "memory_in_bytes": 25698, + "terms_memory_in_bytes": 21160, + "stored_fields_memory_in_bytes": 1808, + "term_vectors_memory_in_bytes": 0, + "norms_memory_in_bytes": 2304, + "points_memory_in_bytes": 18, + "doc_values_memory_in_bytes": 408, + "index_writer_memory_in_bytes": 0, + "version_map_memory_in_bytes": 0, + "fixed_bit_set_memory_in_bytes": 0, + "max_unsafe_auto_id_timestamp": -1, + "file_sizes": {} + }, + "translog": { + "operations": 1998, + "size_in_bytes": 452888, + "uncommitted_operations": 1998, + "uncommitted_size_in_bytes": 452888, + "earliest_last_modified_age": 0 + }, + "request_cache": { + "memory_size_in_bytes": 0, + "evictions": 0, + "hit_count": 0, + "miss_count": 0 + }, + "recovery": { + "current_as_source": 0, + "current_as_target": 0, + "throttle_time_in_millis": 0 + } + } + } + } +}` + +var clusterIndicesExpected = map[string]interface{}{ + "completion_size_in_bytes": float64(0), + "docs_count": float64(999), + "docs_deleted": float64(0), + "fielddata_evictions": float64(0), + "fielddata_memory_size_in_bytes": float64(0), + "flush_periodic": float64(0), + "flush_total": float64(0), + "flush_total_time_in_millis": float64(0), + "get_current": float64(0), + "get_exists_time_in_millis": float64(0), + "get_exists_total": float64(0), + "get_missing_time_in_millis": float64(0), + "get_missing_total": float64(0), + "get_time_in_millis": float64(0), + "get_total": float64(0), + "indexing_delete_current": float64(0), + "indexing_delete_time_in_millis": float64(0), + "indexing_delete_total": float64(0), + "indexing_index_current": float64(0), + "indexing_index_failed": float64(0), + "indexing_index_time_in_millis": float64(548), + "indexing_index_total": float64(999), + "indexing_is_throttled": false, + "indexing_noop_update_total": float64(0), + "indexing_throttle_time_in_millis": float64(0), + "merges_current": float64(0), + "merges_current_docs": float64(0), + "merges_current_size_in_bytes": float64(0), + "merges_total": float64(0), + "merges_total_auto_throttle_in_bytes": float64(62914560), + "merges_total_docs": float64(0), + "merges_total_size_in_bytes": float64(0), + "merges_total_stopped_time_in_millis": float64(0), + "merges_total_throttled_time_in_millis": float64(0), + "merges_total_time_in_millis": float64(0), + "query_cache_cache_count": float64(0), + "query_cache_cache_size": float64(0), + "query_cache_evictions": float64(0), + "query_cache_hit_count": float64(0), + "query_cache_memory_size_in_bytes": float64(0), + "query_cache_miss_count": float64(0), + "query_cache_total_count": float64(0), + "recovery_current_as_source": float64(0), + "recovery_current_as_target": float64(0), + "recovery_throttle_time_in_millis": float64(0), + "refresh_external_total": float64(9), + "refresh_external_total_time_in_millis": float64(258), + "refresh_listeners": float64(0), + "refresh_total": float64(9), + "refresh_total_time_in_millis": float64(256), + "request_cache_evictions": float64(0), + "request_cache_hit_count": float64(0), + "request_cache_memory_size_in_bytes": float64(0), + "request_cache_miss_count": float64(0), + "search_fetch_current": float64(0), + "search_fetch_time_in_millis": float64(0), + "search_fetch_total": float64(0), + "search_open_contexts": float64(0), + "search_query_current": float64(0), + "search_query_time_in_millis": float64(0), + "search_query_total": float64(0), + "search_scroll_current": float64(0), + "search_scroll_time_in_millis": float64(0), + "search_scroll_total": float64(0), + "search_suggest_current": float64(0), + "search_suggest_time_in_millis": float64(0), + "search_suggest_total": float64(0), + "segments_count": float64(3), + "segments_doc_values_memory_in_bytes": float64(204), + "segments_fixed_bit_set_memory_in_bytes": float64(0), + "segments_index_writer_memory_in_bytes": float64(0), + "segments_max_unsafe_auto_id_timestamp": float64(-1), + "segments_memory_in_bytes": float64(12849), + "segments_norms_memory_in_bytes": float64(1152), + "segments_points_memory_in_bytes": float64(9), + "segments_stored_fields_memory_in_bytes": float64(904), + "segments_term_vectors_memory_in_bytes": float64(0), + "segments_terms_memory_in_bytes": float64(10580), + "segments_version_map_memory_in_bytes": float64(0), + "store_size_in_bytes": float64(267500), + "translog_earliest_last_modified_age": float64(0), + "translog_operations": float64(999), + "translog_size_in_bytes": float64(226444), + "translog_uncommitted_operations": float64(999), + "translog_uncommitted_size_in_bytes": float64(226444), + "warmer_current": float64(0), + "warmer_total": float64(6), + "warmer_total_time_in_millis": float64(0), +} + +const clusterIndicesShardsResponse = ` +{ + "_shards": { + "total": 9, + "successful": 6, + "failed": 0 + }, + "_all": { + "primaries": { + "docs": { + "count": 999, + "deleted": 0 + }, + "store": { + "size_in_bytes": 267500 + }, + "indexing": { + "index_total": 999, + "index_time_in_millis": 548, + "index_current": 0, + "index_failed": 0, + "delete_total": 0, + "delete_time_in_millis": 0, + "delete_current": 0, + "noop_update_total": 0, + "is_throttled": false, + "throttle_time_in_millis": 0 + }, + "get": { + "total": 0, + "time_in_millis": 0, + "exists_total": 0, + "exists_time_in_millis": 0, + "missing_total": 0, + "missing_time_in_millis": 0, + "current": 0 + }, + "search": { + "open_contexts": 0, + "query_total": 0, + "query_time_in_millis": 0, + "query_current": 0, + "fetch_total": 0, + "fetch_time_in_millis": 0, + "fetch_current": 0, + "scroll_total": 0, + "scroll_time_in_millis": 0, + "scroll_current": 0, + "suggest_total": 0, + "suggest_time_in_millis": 0, + "suggest_current": 0 + }, + "merges": { + "current": 0, + "current_docs": 0, + "current_size_in_bytes": 0, + "total": 0, + "total_time_in_millis": 0, + "total_docs": 0, + "total_size_in_bytes": 0, + "total_stopped_time_in_millis": 0, + "total_throttled_time_in_millis": 0, + "total_auto_throttle_in_bytes": 62914560 + }, + "refresh": { + "total": 9, + "total_time_in_millis": 256, + "external_total": 9, + "external_total_time_in_millis": 258, + "listeners": 0 + }, + "flush": { + "total": 0, + "periodic": 0, + "total_time_in_millis": 0 + }, + "warmer": { + "current": 0, + "total": 6, + "total_time_in_millis": 0 + }, + "query_cache": { + "memory_size_in_bytes": 0, + "total_count": 0, + "hit_count": 0, + "miss_count": 0, + "cache_size": 0, + "cache_count": 0, + "evictions": 0 + }, + "fielddata": { + "memory_size_in_bytes": 0, + "evictions": 0 + }, + "completion": { + "size_in_bytes": 0 + }, + "segments": { + "count": 3, + "memory_in_bytes": 12849, + "terms_memory_in_bytes": 10580, + "stored_fields_memory_in_bytes": 904, + "term_vectors_memory_in_bytes": 0, + "norms_memory_in_bytes": 1152, + "points_memory_in_bytes": 9, + "doc_values_memory_in_bytes": 204, + "index_writer_memory_in_bytes": 0, + "version_map_memory_in_bytes": 0, + "fixed_bit_set_memory_in_bytes": 0, + "max_unsafe_auto_id_timestamp": -1, + "file_sizes": {} + }, + "translog": { + "operations": 999, + "size_in_bytes": 226444, + "uncommitted_operations": 999, + "uncommitted_size_in_bytes": 226444, + "earliest_last_modified_age": 0 + }, + "request_cache": { + "memory_size_in_bytes": 0, + "evictions": 0, + "hit_count": 0, + "miss_count": 0 + }, + "recovery": { + "current_as_source": 0, + "current_as_target": 0, + "throttle_time_in_millis": 0 + } + }, + "total": { + "docs": { + "count": 1998, + "deleted": 0 + }, + "store": { + "size_in_bytes": 535000 + }, + "indexing": { + "index_total": 1998, + "index_time_in_millis": 793, + "index_current": 0, + "index_failed": 0, + "delete_total": 0, + "delete_time_in_millis": 0, + "delete_current": 0, + "noop_update_total": 0, + "is_throttled": false, + "throttle_time_in_millis": 0 + }, + "get": { + "total": 0, + "time_in_millis": 0, + "exists_total": 0, + "exists_time_in_millis": 0, + "missing_total": 0, + "missing_time_in_millis": 0, + "current": 0 + }, + "search": { + "open_contexts": 0, + "query_total": 0, + "query_time_in_millis": 0, + "query_current": 0, + "fetch_total": 0, + "fetch_time_in_millis": 0, + "fetch_current": 0, + "scroll_total": 0, + "scroll_time_in_millis": 0, + "scroll_current": 0, + "suggest_total": 0, + "suggest_time_in_millis": 0, + "suggest_current": 0 + }, + "merges": { + "current": 0, + "current_docs": 0, + "current_size_in_bytes": 0, + "total": 0, + "total_time_in_millis": 0, + "total_docs": 0, + "total_size_in_bytes": 0, + "total_stopped_time_in_millis": 0, + "total_throttled_time_in_millis": 0, + "total_auto_throttle_in_bytes": 125829120 + }, + "refresh": { + "total": 18, + "total_time_in_millis": 518, + "external_total": 18, + "external_total_time_in_millis": 522, + "listeners": 0 + }, + "flush": { + "total": 0, + "periodic": 0, + "total_time_in_millis": 0 + }, + "warmer": { + "current": 0, + "total": 12, + "total_time_in_millis": 0 + }, + "query_cache": { + "memory_size_in_bytes": 0, + "total_count": 0, + "hit_count": 0, + "miss_count": 0, + "cache_size": 0, + "cache_count": 0, + "evictions": 0 + }, + "fielddata": { + "memory_size_in_bytes": 0, + "evictions": 0 + }, + "completion": { + "size_in_bytes": 0 + }, + "segments": { + "count": 6, + "memory_in_bytes": 25698, + "terms_memory_in_bytes": 21160, + "stored_fields_memory_in_bytes": 1808, + "term_vectors_memory_in_bytes": 0, + "norms_memory_in_bytes": 2304, + "points_memory_in_bytes": 18, + "doc_values_memory_in_bytes": 408, + "index_writer_memory_in_bytes": 0, + "version_map_memory_in_bytes": 0, + "fixed_bit_set_memory_in_bytes": 0, + "max_unsafe_auto_id_timestamp": -1, + "file_sizes": {} + }, + "translog": { + "operations": 1998, + "size_in_bytes": 452888, + "uncommitted_operations": 1998, + "uncommitted_size_in_bytes": 452888, + "earliest_last_modified_age": 0 + }, + "request_cache": { + "memory_size_in_bytes": 0, + "evictions": 0, + "hit_count": 0, + "miss_count": 0 + }, + "recovery": { + "current_as_source": 0, + "current_as_target": 0, + "throttle_time_in_millis": 0 + } + } + }, + "indices": { + "twitter": { + "uuid": "AtNrbbl_QhirW0p7Fnq26A", + "primaries": { + "docs": { + "count": 999, + "deleted": 0 + }, + "store": { + "size_in_bytes": 267500 + }, + "indexing": { + "index_total": 999, + "index_time_in_millis": 548, + "index_current": 0, + "index_failed": 0, + "delete_total": 0, + "delete_time_in_millis": 0, + "delete_current": 0, + "noop_update_total": 0, + "is_throttled": false, + "throttle_time_in_millis": 0 + }, + "get": { + "total": 0, + "time_in_millis": 0, + "exists_total": 0, + "exists_time_in_millis": 0, + "missing_total": 0, + "missing_time_in_millis": 0, + "current": 0 + }, + "search": { + "open_contexts": 0, + "query_total": 0, + "query_time_in_millis": 0, + "query_current": 0, + "fetch_total": 0, + "fetch_time_in_millis": 0, + "fetch_current": 0, + "scroll_total": 0, + "scroll_time_in_millis": 0, + "scroll_current": 0, + "suggest_total": 0, + "suggest_time_in_millis": 0, + "suggest_current": 0 + }, + "merges": { + "current": 0, + "current_docs": 0, + "current_size_in_bytes": 0, + "total": 0, + "total_time_in_millis": 0, + "total_docs": 0, + "total_size_in_bytes": 0, + "total_stopped_time_in_millis": 0, + "total_throttled_time_in_millis": 0, + "total_auto_throttle_in_bytes": 62914560 + }, + "refresh": { + "total": 9, + "total_time_in_millis": 256, + "external_total": 9, + "external_total_time_in_millis": 258, + "listeners": 0 + }, + "flush": { + "total": 0, + "periodic": 0, + "total_time_in_millis": 0 + }, + "warmer": { + "current": 0, + "total": 6, + "total_time_in_millis": 0 + }, + "query_cache": { + "memory_size_in_bytes": 0, + "total_count": 0, + "hit_count": 0, + "miss_count": 0, + "cache_size": 0, + "cache_count": 0, + "evictions": 0 + }, + "fielddata": { + "memory_size_in_bytes": 0, + "evictions": 0 + }, + "completion": { + "size_in_bytes": 0 + }, + "segments": { + "count": 3, + "memory_in_bytes": 12849, + "terms_memory_in_bytes": 10580, + "stored_fields_memory_in_bytes": 904, + "term_vectors_memory_in_bytes": 0, + "norms_memory_in_bytes": 1152, + "points_memory_in_bytes": 9, + "doc_values_memory_in_bytes": 204, + "index_writer_memory_in_bytes": 0, + "version_map_memory_in_bytes": 0, + "fixed_bit_set_memory_in_bytes": 0, + "max_unsafe_auto_id_timestamp": -1, + "file_sizes": {} + }, + "translog": { + "operations": 999, + "size_in_bytes": 226444, + "uncommitted_operations": 999, + "uncommitted_size_in_bytes": 226444, + "earliest_last_modified_age": 0 + }, + "request_cache": { + "memory_size_in_bytes": 0, + "evictions": 0, + "hit_count": 0, + "miss_count": 0 + }, + "recovery": { + "current_as_source": 0, + "current_as_target": 0, + "throttle_time_in_millis": 0 + } + }, + "total": { + "docs": { + "count": 1998, + "deleted": 0 + }, + "store": { + "size_in_bytes": 535000 + }, + "indexing": { + "index_total": 1998, + "index_time_in_millis": 793, + "index_current": 0, + "index_failed": 0, + "delete_total": 0, + "delete_time_in_millis": 0, + "delete_current": 0, + "noop_update_total": 0, + "is_throttled": false, + "throttle_time_in_millis": 0 + }, + "get": { + "total": 0, + "time_in_millis": 0, + "exists_total": 0, + "exists_time_in_millis": 0, + "missing_total": 0, + "missing_time_in_millis": 0, + "current": 0 + }, + "search": { + "open_contexts": 0, + "query_total": 0, + "query_time_in_millis": 0, + "query_current": 0, + "fetch_total": 0, + "fetch_time_in_millis": 0, + "fetch_current": 0, + "scroll_total": 0, + "scroll_time_in_millis": 0, + "scroll_current": 0, + "suggest_total": 0, + "suggest_time_in_millis": 0, + "suggest_current": 0 + }, + "merges": { + "current": 0, + "current_docs": 0, + "current_size_in_bytes": 0, + "total": 0, + "total_time_in_millis": 0, + "total_docs": 0, + "total_size_in_bytes": 0, + "total_stopped_time_in_millis": 0, + "total_throttled_time_in_millis": 0, + "total_auto_throttle_in_bytes": 125829120 + }, + "refresh": { + "total": 18, + "total_time_in_millis": 518, + "external_total": 18, + "external_total_time_in_millis": 522, + "listeners": 0 + }, + "flush": { + "total": 0, + "periodic": 0, + "total_time_in_millis": 0 + }, + "warmer": { + "current": 0, + "total": 12, + "total_time_in_millis": 0 + }, + "query_cache": { + "memory_size_in_bytes": 0, + "total_count": 0, + "hit_count": 0, + "miss_count": 0, + "cache_size": 0, + "cache_count": 0, + "evictions": 0 + }, + "fielddata": { + "memory_size_in_bytes": 0, + "evictions": 0 + }, + "completion": { + "size_in_bytes": 0 + }, + "segments": { + "count": 6, + "memory_in_bytes": 25698, + "terms_memory_in_bytes": 21160, + "stored_fields_memory_in_bytes": 1808, + "term_vectors_memory_in_bytes": 0, + "norms_memory_in_bytes": 2304, + "points_memory_in_bytes": 18, + "doc_values_memory_in_bytes": 408, + "index_writer_memory_in_bytes": 0, + "version_map_memory_in_bytes": 0, + "fixed_bit_set_memory_in_bytes": 0, + "max_unsafe_auto_id_timestamp": -1, + "file_sizes": {} + }, + "translog": { + "operations": 1998, + "size_in_bytes": 452888, + "uncommitted_operations": 1998, + "uncommitted_size_in_bytes": 452888, + "earliest_last_modified_age": 0 + }, + "request_cache": { + "memory_size_in_bytes": 0, + "evictions": 0, + "hit_count": 0, + "miss_count": 0 + }, + "recovery": { + "current_as_source": 0, + "current_as_target": 0, + "throttle_time_in_millis": 0 + } + }, + "shards": { + "0": [ + { + "routing": { + "state": "STARTED", + "primary": true, + "node": "oqvR8I1dTpONvwRM30etww", + "relocating_node": null + }, + "docs": { + "count": 340, + "deleted": 0 + }, + "store": { + "size_in_bytes": 90564 + }, + "indexing": { + "index_total": 340, + "index_time_in_millis": 176, + "index_current": 0, + "index_failed": 0, + "delete_total": 0, + "delete_time_in_millis": 0, + "delete_current": 0, + "noop_update_total": 0, + "is_throttled": false, + "throttle_time_in_millis": 0 + }, + "get": { + "total": 0, + "time_in_millis": 0, + "exists_total": 0, + "exists_time_in_millis": 0, + "missing_total": 0, + "missing_time_in_millis": 0, + "current": 0 + }, + "search": { + "open_contexts": 0, + "query_total": 0, + "query_time_in_millis": 0, + "query_current": 0, + "fetch_total": 0, + "fetch_time_in_millis": 0, + "fetch_current": 0, + "scroll_total": 0, + "scroll_time_in_millis": 0, + "scroll_current": 0, + "suggest_total": 0, + "suggest_time_in_millis": 0, + "suggest_current": 0 + }, + "merges": { + "current": 0, + "current_docs": 0, + "current_size_in_bytes": 0, + "total": 0, + "total_time_in_millis": 0, + "total_docs": 0, + "total_size_in_bytes": 0, + "total_stopped_time_in_millis": 0, + "total_throttled_time_in_millis": 0, + "total_auto_throttle_in_bytes": 20971520 + }, + "refresh": { + "total": 6, + "total_time_in_millis": 103, + "external_total": 4, + "external_total_time_in_millis": 105, + "listeners": 0 + }, + "flush": { + "total": 1, + "periodic": 0, + "total_time_in_millis": 32 + }, + "warmer": { + "current": 0, + "total": 3, + "total_time_in_millis": 0 + }, + "query_cache": { + "memory_size_in_bytes": 0, + "total_count": 0, + "hit_count": 0, + "miss_count": 0, + "cache_size": 0, + "cache_count": 0, + "evictions": 0 + }, + "fielddata": { + "memory_size_in_bytes": 0, + "evictions": 0 + }, + "completion": { + "size_in_bytes": 0 + }, + "segments": { + "count": 1, + "memory_in_bytes": 4301, + "terms_memory_in_bytes": 3534, + "stored_fields_memory_in_bytes": 312, + "term_vectors_memory_in_bytes": 0, + "norms_memory_in_bytes": 384, + "points_memory_in_bytes": 3, + "doc_values_memory_in_bytes": 68, + "index_writer_memory_in_bytes": 0, + "version_map_memory_in_bytes": 0, + "fixed_bit_set_memory_in_bytes": 0, + "max_unsafe_auto_id_timestamp": -1, + "file_sizes": {} + }, + "translog": { + "operations": 340, + "size_in_bytes": 77158, + "uncommitted_operations": 0, + "uncommitted_size_in_bytes": 55, + "earliest_last_modified_age": 936870 + }, + "request_cache": { + "memory_size_in_bytes": 0, + "evictions": 0, + "hit_count": 0, + "miss_count": 0 + }, + "recovery": { + "current_as_source": 0, + "current_as_target": 0, + "throttle_time_in_millis": 0 + }, + "commit": { + "id": "13gxQDHZ96BnNkzSgEdElQ==", + "generation": 4, + "user_data": { + "local_checkpoint": "339", + "max_unsafe_auto_id_timestamp": "-1", + "min_retained_seq_no": "340", + "translog_uuid": "4rp02VCQRTSJXgochWk3Hg", + "history_uuid": "-od5QvNmQlero8jatbG-5w", + "sync_id": "KKglZYafSaWN_MFUbpNviA", + "translog_generation": "3", + "max_seq_no": "339" + }, + "num_docs": 340 + }, + "seq_no": { + "max_seq_no": 339, + "local_checkpoint": 339, + "global_checkpoint": 339 + }, + "retention_leases": { + "primary_term": 1, + "version": 0, + "leases": [] + }, + "shard_path": { + "state_path": "/usr/share/elasticsearch/data/nodes/0", + "data_path": "/usr/share/elasticsearch/data/nodes/0", + "is_custom_data_path": false + } + }, + { + "routing": { + "state": "STARTED", + "primary": false, + "node": "0jfDeZxuTsGblcDGa39DzQ", + "relocating_node": null + }, + "docs": { + "count": 340, + "deleted": 0 + }, + "store": { + "size_in_bytes": 90564 + }, + "indexing": { + "index_total": 340, + "index_time_in_millis": 99, + "index_current": 0, + "index_failed": 0, + "delete_total": 0, + "delete_time_in_millis": 0, + "delete_current": 0, + "noop_update_total": 0, + "is_throttled": false, + "throttle_time_in_millis": 0 + }, + "get": { + "total": 0, + "time_in_millis": 0, + "exists_total": 0, + "exists_time_in_millis": 0, + "missing_total": 0, + "missing_time_in_millis": 0, + "current": 0 + }, + "search": { + "open_contexts": 0, + "query_total": 0, + "query_time_in_millis": 0, + "query_current": 0, + "fetch_total": 0, + "fetch_time_in_millis": 0, + "fetch_current": 0, + "scroll_total": 0, + "scroll_time_in_millis": 0, + "scroll_current": 0, + "suggest_total": 0, + "suggest_time_in_millis": 0, + "suggest_current": 0 + }, + "merges": { + "current": 0, + "current_docs": 0, + "current_size_in_bytes": 0, + "total": 0, + "total_time_in_millis": 0, + "total_docs": 0, + "total_size_in_bytes": 0, + "total_stopped_time_in_millis": 0, + "total_throttled_time_in_millis": 0, + "total_auto_throttle_in_bytes": 20971520 + }, + "refresh": { + "total": 6, + "total_time_in_millis": 139, + "external_total": 4, + "external_total_time_in_millis": 140, + "listeners": 0 + }, + "flush": { + "total": 1, + "periodic": 0, + "total_time_in_millis": 34 + }, + "warmer": { + "current": 0, + "total": 3, + "total_time_in_millis": 0 + }, + "query_cache": { + "memory_size_in_bytes": 0, + "total_count": 0, + "hit_count": 0, + "miss_count": 0, + "cache_size": 0, + "cache_count": 0, + "evictions": 0 + }, + "fielddata": { + "memory_size_in_bytes": 0, + "evictions": 0 + }, + "completion": { + "size_in_bytes": 0 + }, + "segments": { + "count": 1, + "memory_in_bytes": 4301, + "terms_memory_in_bytes": 3534, + "stored_fields_memory_in_bytes": 312, + "term_vectors_memory_in_bytes": 0, + "norms_memory_in_bytes": 384, + "points_memory_in_bytes": 3, + "doc_values_memory_in_bytes": 68, + "index_writer_memory_in_bytes": 0, + "version_map_memory_in_bytes": 0, + "fixed_bit_set_memory_in_bytes": 0, + "max_unsafe_auto_id_timestamp": -1, + "file_sizes": {} + }, + "translog": { + "operations": 340, + "size_in_bytes": 77158, + "uncommitted_operations": 0, + "uncommitted_size_in_bytes": 55, + "earliest_last_modified_age": 936653 + }, + "request_cache": { + "memory_size_in_bytes": 0, + "evictions": 0, + "hit_count": 0, + "miss_count": 0 + }, + "recovery": { + "current_as_source": 0, + "current_as_target": 0, + "throttle_time_in_millis": 0 + }, + "commit": { + "id": "A8QO9SiMWYX000riUOApBg==", + "generation": 5, + "user_data": { + "local_checkpoint": "339", + "max_unsafe_auto_id_timestamp": "-1", + "min_retained_seq_no": "340", + "translog_uuid": "9kWpEKQyQ3yIUwwEp4fP8A", + "history_uuid": "-od5QvNmQlero8jatbG-5w", + "sync_id": "KKglZYafSaWN_MFUbpNviA", + "translog_generation": "3", + "max_seq_no": "339" + }, + "num_docs": 340 + }, + "seq_no": { + "max_seq_no": 339, + "local_checkpoint": 339, + "global_checkpoint": 339 + }, + "retention_leases": { + "primary_term": 1, + "version": 0, + "leases": [] + }, + "shard_path": { + "state_path": "/usr/share/elasticsearch/data/nodes/0", + "data_path": "/usr/share/elasticsearch/data/nodes/0", + "is_custom_data_path": false + } + } + ], + "1": [ + { + "routing": { + "state": "STARTED", + "primary": false, + "node": "oqvR8I1dTpONvwRM30etww", + "relocating_node": null + }, + "docs": { + "count": 352, + "deleted": 0 + }, + "store": { + "size_in_bytes": 94584 + }, + "indexing": { + "index_total": 352, + "index_time_in_millis": 66, + "index_current": 0, + "index_failed": 0, + "delete_total": 0, + "delete_time_in_millis": 0, + "delete_current": 0, + "noop_update_total": 0, + "is_throttled": false, + "throttle_time_in_millis": 0 + }, + "get": { + "total": 0, + "time_in_millis": 0, + "exists_total": 0, + "exists_time_in_millis": 0, + "missing_total": 0, + "missing_time_in_millis": 0, + "current": 0 + }, + "search": { + "open_contexts": 0, + "query_total": 0, + "query_time_in_millis": 0, + "query_current": 0, + "fetch_total": 0, + "fetch_time_in_millis": 0, + "fetch_current": 0, + "scroll_total": 0, + "scroll_time_in_millis": 0, + "scroll_current": 0, + "suggest_total": 0, + "suggest_time_in_millis": 0, + "suggest_current": 0 + }, + "merges": { + "current": 0, + "current_docs": 0, + "current_size_in_bytes": 0, + "total": 0, + "total_time_in_millis": 0, + "total_docs": 0, + "total_size_in_bytes": 0, + "total_stopped_time_in_millis": 0, + "total_throttled_time_in_millis": 0, + "total_auto_throttle_in_bytes": 20971520 + }, + "refresh": { + "total": 6, + "total_time_in_millis": 104, + "external_total": 4, + "external_total_time_in_millis": 106, + "listeners": 0 + }, + "flush": { + "total": 1, + "periodic": 0, + "total_time_in_millis": 26 + }, + "warmer": { + "current": 0, + "total": 3, + "total_time_in_millis": 0 + }, + "query_cache": { + "memory_size_in_bytes": 0, + "total_count": 0, + "hit_count": 0, + "miss_count": 0, + "cache_size": 0, + "cache_count": 0, + "evictions": 0 + }, + "fielddata": { + "memory_size_in_bytes": 0, + "evictions": 0 + }, + "completion": { + "size_in_bytes": 0 + }, + "segments": { + "count": 1, + "memory_in_bytes": 4280, + "terms_memory_in_bytes": 3529, + "stored_fields_memory_in_bytes": 296, + "term_vectors_memory_in_bytes": 0, + "norms_memory_in_bytes": 384, + "points_memory_in_bytes": 3, + "doc_values_memory_in_bytes": 68, + "index_writer_memory_in_bytes": 0, + "version_map_memory_in_bytes": 0, + "fixed_bit_set_memory_in_bytes": 0, + "max_unsafe_auto_id_timestamp": -1, + "file_sizes": {} + }, + "translog": { + "operations": 352, + "size_in_bytes": 79980, + "uncommitted_operations": 0, + "uncommitted_size_in_bytes": 55, + "earliest_last_modified_age": 936144 + }, + "request_cache": { + "memory_size_in_bytes": 0, + "evictions": 0, + "hit_count": 0, + "miss_count": 0 + }, + "recovery": { + "current_as_source": 0, + "current_as_target": 0, + "throttle_time_in_millis": 0 + }, + "commit": { + "id": "13gxQDHZ96BnNkzSgEdEkg==", + "generation": 5, + "user_data": { + "local_checkpoint": "351", + "max_unsafe_auto_id_timestamp": "-1", + "min_retained_seq_no": "352", + "translog_uuid": "SjKxb5TIRqCinxWbqVBo-g", + "history_uuid": "3SAavs9KTPm-jhaioYg4UA", + "sync_id": "swZVzk6tShS0tcbBQt9AjA", + "translog_generation": "3", + "max_seq_no": "351" + }, + "num_docs": 352 + }, + "seq_no": { + "max_seq_no": 351, + "local_checkpoint": 351, + "global_checkpoint": 351 + }, + "retention_leases": { + "primary_term": 1, + "version": 0, + "leases": [] + }, + "shard_path": { + "state_path": "/usr/share/elasticsearch/data/nodes/0", + "data_path": "/usr/share/elasticsearch/data/nodes/0", + "is_custom_data_path": false + } + }, + { + "routing": { + "state": "STARTED", + "primary": true, + "node": "0jfDeZxuTsGblcDGa39DzQ", + "relocating_node": null + }, + "docs": { + "count": 352, + "deleted": 0 + }, + "store": { + "size_in_bytes": 94584 + }, + "indexing": { + "index_total": 352, + "index_time_in_millis": 154, + "index_current": 0, + "index_failed": 0, + "delete_total": 0, + "delete_time_in_millis": 0, + "delete_current": 0, + "noop_update_total": 0, + "is_throttled": false, + "throttle_time_in_millis": 0 + }, + "get": { + "total": 0, + "time_in_millis": 0, + "exists_total": 0, + "exists_time_in_millis": 0, + "missing_total": 0, + "missing_time_in_millis": 0, + "current": 0 + }, + "search": { + "open_contexts": 0, + "query_total": 0, + "query_time_in_millis": 0, + "query_current": 0, + "fetch_total": 0, + "fetch_time_in_millis": 0, + "fetch_current": 0, + "scroll_total": 0, + "scroll_time_in_millis": 0, + "scroll_current": 0, + "suggest_total": 0, + "suggest_time_in_millis": 0, + "suggest_current": 0 + }, + "merges": { + "current": 0, + "current_docs": 0, + "current_size_in_bytes": 0, + "total": 0, + "total_time_in_millis": 0, + "total_docs": 0, + "total_size_in_bytes": 0, + "total_stopped_time_in_millis": 0, + "total_throttled_time_in_millis": 0, + "total_auto_throttle_in_bytes": 20971520 + }, + "refresh": { + "total": 6, + "total_time_in_millis": 74, + "external_total": 4, + "external_total_time_in_millis": 74, + "listeners": 0 + }, + "flush": { + "total": 1, + "periodic": 0, + "total_time_in_millis": 29 + }, + "warmer": { + "current": 0, + "total": 3, + "total_time_in_millis": 0 + }, + "query_cache": { + "memory_size_in_bytes": 0, + "total_count": 0, + "hit_count": 0, + "miss_count": 0, + "cache_size": 0, + "cache_count": 0, + "evictions": 0 + }, + "fielddata": { + "memory_size_in_bytes": 0, + "evictions": 0 + }, + "completion": { + "size_in_bytes": 0 + }, + "segments": { + "count": 1, + "memory_in_bytes": 4280, + "terms_memory_in_bytes": 3529, + "stored_fields_memory_in_bytes": 296, + "term_vectors_memory_in_bytes": 0, + "norms_memory_in_bytes": 384, + "points_memory_in_bytes": 3, + "doc_values_memory_in_bytes": 68, + "index_writer_memory_in_bytes": 0, + "version_map_memory_in_bytes": 0, + "fixed_bit_set_memory_in_bytes": 0, + "max_unsafe_auto_id_timestamp": -1, + "file_sizes": {} + }, + "translog": { + "operations": 352, + "size_in_bytes": 79980, + "uncommitted_operations": 0, + "uncommitted_size_in_bytes": 55, + "earliest_last_modified_age": 936839 + }, + "request_cache": { + "memory_size_in_bytes": 0, + "evictions": 0, + "hit_count": 0, + "miss_count": 0 + }, + "recovery": { + "current_as_source": 0, + "current_as_target": 0, + "throttle_time_in_millis": 0 + }, + "commit": { + "id": "A8QO9SiMWYX000riUOApAw==", + "generation": 4, + "user_data": { + "local_checkpoint": "351", + "max_unsafe_auto_id_timestamp": "-1", + "min_retained_seq_no": "352", + "translog_uuid": "GpauXMbxQpWKUYGYqQUIdQ", + "history_uuid": "3SAavs9KTPm-jhaioYg4UA", + "sync_id": "swZVzk6tShS0tcbBQt9AjA", + "translog_generation": "3", + "max_seq_no": "351" + }, + "num_docs": 352 + }, + "seq_no": { + "max_seq_no": 351, + "local_checkpoint": 351, + "global_checkpoint": 351 + }, + "retention_leases": { + "primary_term": 1, + "version": 0, + "leases": [] + }, + "shard_path": { + "state_path": "/usr/share/elasticsearch/data/nodes/0", + "data_path": "/usr/share/elasticsearch/data/nodes/0", + "is_custom_data_path": false + } + } + ], + "2": [ + { + "routing": { + "state": "STARTED", + "primary": true, + "node": "oqvR8I1dTpONvwRM30etww", + "relocating_node": null + }, + "docs": { + "count": 307, + "deleted": 0 + }, + "store": { + "size_in_bytes": 82727 + }, + "indexing": { + "index_total": 307, + "index_time_in_millis": 218, + "index_current": 0, + "index_failed": 0, + "delete_total": 0, + "delete_time_in_millis": 0, + "delete_current": 0, + "noop_update_total": 0, + "is_throttled": false, + "throttle_time_in_millis": 0 + }, + "get": { + "total": 0, + "time_in_millis": 0, + "exists_total": 0, + "exists_time_in_millis": 0, + "missing_total": 0, + "missing_time_in_millis": 0, + "current": 0 + }, + "search": { + "open_contexts": 0, + "query_total": 0, + "query_time_in_millis": 0, + "query_current": 0, + "fetch_total": 0, + "fetch_time_in_millis": 0, + "fetch_current": 0, + "scroll_total": 0, + "scroll_time_in_millis": 0, + "scroll_current": 0, + "suggest_total": 0, + "suggest_time_in_millis": 0, + "suggest_current": 0 + }, + "merges": { + "current": 0, + "current_docs": 0, + "current_size_in_bytes": 0, + "total": 0, + "total_time_in_millis": 0, + "total_docs": 0, + "total_size_in_bytes": 0, + "total_stopped_time_in_millis": 0, + "total_throttled_time_in_millis": 0, + "total_auto_throttle_in_bytes": 20971520 + }, + "refresh": { + "total": 6, + "total_time_in_millis": 86, + "external_total": 4, + "external_total_time_in_millis": 87, + "listeners": 0 + }, + "flush": { + "total": 1, + "periodic": 0, + "total_time_in_millis": 33 + }, + "warmer": { + "current": 0, + "total": 3, + "total_time_in_millis": 0 + }, + "query_cache": { + "memory_size_in_bytes": 0, + "total_count": 0, + "hit_count": 0, + "miss_count": 0, + "cache_size": 0, + "cache_count": 0, + "evictions": 0 + }, + "fielddata": { + "memory_size_in_bytes": 0, + "evictions": 0 + }, + "completion": { + "size_in_bytes": 0 + }, + "segments": { + "count": 1, + "memory_in_bytes": 4268, + "terms_memory_in_bytes": 3517, + "stored_fields_memory_in_bytes": 296, + "term_vectors_memory_in_bytes": 0, + "norms_memory_in_bytes": 384, + "points_memory_in_bytes": 3, + "doc_values_memory_in_bytes": 68, + "index_writer_memory_in_bytes": 0, + "version_map_memory_in_bytes": 0, + "fixed_bit_set_memory_in_bytes": 0, + "max_unsafe_auto_id_timestamp": -1, + "file_sizes": {} + }, + "translog": { + "operations": 307, + "size_in_bytes": 69471, + "uncommitted_operations": 0, + "uncommitted_size_in_bytes": 55, + "earliest_last_modified_age": 936881 + }, + "request_cache": { + "memory_size_in_bytes": 0, + "evictions": 0, + "hit_count": 0, + "miss_count": 0 + }, + "recovery": { + "current_as_source": 0, + "current_as_target": 0, + "throttle_time_in_millis": 0 + }, + "commit": { + "id": "13gxQDHZ96BnNkzSgEdElg==", + "generation": 4, + "user_data": { + "local_checkpoint": "306", + "max_unsafe_auto_id_timestamp": "-1", + "min_retained_seq_no": "307", + "translog_uuid": "Y0a3bdIQTD2Ir6Ex9J3gSQ", + "history_uuid": "WmsCMyRyRaGz9mnR50wYFA", + "sync_id": "nvNppgfgTp63llS8r-Pwiw", + "translog_generation": "3", + "max_seq_no": "306" + }, + "num_docs": 307 + }, + "seq_no": { + "max_seq_no": 306, + "local_checkpoint": 306, + "global_checkpoint": 306 + }, + "retention_leases": { + "primary_term": 1, + "version": 0, + "leases": [] + }, + "shard_path": { + "state_path": "/usr/share/elasticsearch/data/nodes/0", + "data_path": "/usr/share/elasticsearch/data/nodes/0", + "is_custom_data_path": false + } + }, + { + "routing": { + "state": "STARTED", + "primary": false, + "node": "0jfDeZxuTsGblcDGa39DzQ", + "relocating_node": null + }, + "docs": { + "count": 307, + "deleted": 0 + }, + "store": { + "size_in_bytes": 82727 + }, + "indexing": { + "index_total": 307, + "index_time_in_millis": 80, + "index_current": 0, + "index_failed": 0, + "delete_total": 0, + "delete_time_in_millis": 0, + "delete_current": 0, + "noop_update_total": 0, + "is_throttled": false, + "throttle_time_in_millis": 0 + }, + "get": { + "total": 0, + "time_in_millis": 0, + "exists_total": 0, + "exists_time_in_millis": 0, + "missing_total": 0, + "missing_time_in_millis": 0, + "current": 0 + }, + "search": { + "open_contexts": 0, + "query_total": 0, + "query_time_in_millis": 0, + "query_current": 0, + "fetch_total": 0, + "fetch_time_in_millis": 0, + "fetch_current": 0, + "scroll_total": 0, + "scroll_time_in_millis": 0, + "scroll_current": 0, + "suggest_total": 0, + "suggest_time_in_millis": 0, + "suggest_current": 0 + }, + "merges": { + "current": 0, + "current_docs": 0, + "current_size_in_bytes": 0, + "total": 0, + "total_time_in_millis": 0, + "total_docs": 0, + "total_size_in_bytes": 0, + "total_stopped_time_in_millis": 0, + "total_throttled_time_in_millis": 0, + "total_auto_throttle_in_bytes": 20971520 + }, + "refresh": { + "total": 6, + "total_time_in_millis": 33, + "external_total": 4, + "external_total_time_in_millis": 30, + "listeners": 0 + }, + "flush": { + "total": 1, + "periodic": 0, + "total_time_in_millis": 37 + }, + "warmer": { + "current": 0, + "total": 3, + "total_time_in_millis": 0 + }, + "query_cache": { + "memory_size_in_bytes": 0, + "total_count": 0, + "hit_count": 0, + "miss_count": 0, + "cache_size": 0, + "cache_count": 0, + "evictions": 0 + }, + "fielddata": { + "memory_size_in_bytes": 0, + "evictions": 0 + }, + "completion": { + "size_in_bytes": 0 + }, + "segments": { + "count": 1, + "memory_in_bytes": 4268, + "terms_memory_in_bytes": 3517, + "stored_fields_memory_in_bytes": 296, + "term_vectors_memory_in_bytes": 0, + "norms_memory_in_bytes": 384, + "points_memory_in_bytes": 3, + "doc_values_memory_in_bytes": 68, + "index_writer_memory_in_bytes": 0, + "version_map_memory_in_bytes": 0, + "fixed_bit_set_memory_in_bytes": 0, + "max_unsafe_auto_id_timestamp": -1, + "file_sizes": {} + }, + "translog": { + "operations": 307, + "size_in_bytes": 69471, + "uncommitted_operations": 0, + "uncommitted_size_in_bytes": 55, + "earliest_last_modified_age": 936696 + }, + "request_cache": { + "memory_size_in_bytes": 0, + "evictions": 0, + "hit_count": 0, + "miss_count": 0 + }, + "recovery": { + "current_as_source": 0, + "current_as_target": 0, + "throttle_time_in_millis": 0 + }, + "commit": { + "id": "A8QO9SiMWYX000riUOApBw==", + "generation": 5, + "user_data": { + "local_checkpoint": "306", + "max_unsafe_auto_id_timestamp": "-1", + "min_retained_seq_no": "307", + "translog_uuid": "s62inR7FRA2p86axtAIvgA", + "history_uuid": "WmsCMyRyRaGz9mnR50wYFA", + "sync_id": "nvNppgfgTp63llS8r-Pwiw", + "translog_generation": "3", + "max_seq_no": "306" + }, + "num_docs": 307 + }, + "seq_no": { + "max_seq_no": 306, + "local_checkpoint": 306, + "global_checkpoint": 306 + }, + "retention_leases": { + "primary_term": 1, + "version": 0, + "leases": [] + }, + "shard_path": { + "state_path": "/usr/share/elasticsearch/data/nodes/0", + "data_path": "/usr/share/elasticsearch/data/nodes/0", + "is_custom_data_path": false + } + } + ] + } + } + } +}` + +var clusterIndicesShardsExpected = map[string]interface{}{ + "commit_generation": float64(5), + "commit_num_docs": float64(352), + "completion_size_in_bytes": float64(0), + "docs_count": float64(352), + "docs_deleted": float64(0), + "fielddata_evictions": float64(0), + "fielddata_memory_size_in_bytes": float64(0), + "flush_periodic": float64(0), + "flush_total": float64(1), + "flush_total_time_in_millis": float64(26), + "get_current": float64(0), + "get_exists_time_in_millis": float64(0), + "get_exists_total": float64(0), + "get_missing_time_in_millis": float64(0), + "get_missing_total": float64(0), + "get_time_in_millis": float64(0), + "get_total": float64(0), + "indexing_delete_current": float64(0), + "indexing_delete_time_in_millis": float64(0), + "indexing_delete_total": float64(0), + "indexing_index_current": float64(0), + "indexing_index_failed": float64(0), + "indexing_index_time_in_millis": float64(66), + "indexing_index_total": float64(352), + "indexing_noop_update_total": float64(0), + "indexing_throttle_time_in_millis": float64(0), + "merges_current": float64(0), + "merges_current_docs": float64(0), + "merges_current_size_in_bytes": float64(0), + "merges_total": float64(0), + "merges_total_auto_throttle_in_bytes": float64(20971520), + "merges_total_docs": float64(0), + "merges_total_size_in_bytes": float64(0), + "merges_total_stopped_time_in_millis": float64(0), + "merges_total_throttled_time_in_millis": float64(0), + "merges_total_time_in_millis": float64(0), + "query_cache_cache_count": float64(0), + "query_cache_cache_size": float64(0), + "query_cache_evictions": float64(0), + "query_cache_hit_count": float64(0), + "query_cache_memory_size_in_bytes": float64(0), + "query_cache_miss_count": float64(0), + "query_cache_total_count": float64(0), + "recovery_current_as_source": float64(0), + "recovery_current_as_target": float64(0), + "recovery_throttle_time_in_millis": float64(0), + "refresh_external_total": float64(4), + "refresh_external_total_time_in_millis": float64(106), + "refresh_listeners": float64(0), + "refresh_total": float64(6), + "refresh_total_time_in_millis": float64(104), + "request_cache_evictions": float64(0), + "request_cache_hit_count": float64(0), + "request_cache_memory_size_in_bytes": float64(0), + "request_cache_miss_count": float64(0), + "retention_leases_primary_term": float64(1), + "retention_leases_version": float64(0), + "routing_state": int(3), + "search_fetch_current": float64(0), + "search_fetch_time_in_millis": float64(0), + "search_fetch_total": float64(0), + "search_open_contexts": float64(0), + "search_query_current": float64(0), + "search_query_time_in_millis": float64(0), + "search_query_total": float64(0), + "search_scroll_current": float64(0), + "search_scroll_time_in_millis": float64(0), + "search_scroll_total": float64(0), + "search_suggest_current": float64(0), + "search_suggest_time_in_millis": float64(0), + "search_suggest_total": float64(0), + "segments_count": float64(1), + "segments_doc_values_memory_in_bytes": float64(68), + "segments_fixed_bit_set_memory_in_bytes": float64(0), + "segments_index_writer_memory_in_bytes": float64(0), + "segments_max_unsafe_auto_id_timestamp": float64(-1), + "segments_memory_in_bytes": float64(4280), + "segments_norms_memory_in_bytes": float64(384), + "segments_points_memory_in_bytes": float64(3), + "segments_stored_fields_memory_in_bytes": float64(296), + "segments_term_vectors_memory_in_bytes": float64(0), + "segments_terms_memory_in_bytes": float64(3529), + "segments_version_map_memory_in_bytes": float64(0), + "seq_no_global_checkpoint": float64(351), + "seq_no_local_checkpoint": float64(351), + "seq_no_max_seq_no": float64(351), + "store_size_in_bytes": float64(94584), + "translog_earliest_last_modified_age": float64(936144), + "translog_operations": float64(352), + "translog_size_in_bytes": float64(79980), + "translog_uncommitted_operations": float64(0), + "translog_uncommitted_size_in_bytes": float64(55), + "warmer_current": float64(0), + "warmer_total": float64(3), + "warmer_total_time_in_millis": float64(0), +} From 3c811c15b3de9d11ac15dab2ed53857ae42dc05c Mon Sep 17 00:00:00 2001 From: shane Date: Fri, 2 Aug 2019 15:05:46 -0500 Subject: [PATCH 1055/1815] Add support for enterprise repos to github plugin (#6194) --- plugins/inputs/github/README.md | 3 +++ plugins/inputs/github/github.go | 21 ++++++++++++++++----- plugins/inputs/github/github_test.go | 13 +++++++++++++ 3 files changed, 32 insertions(+), 5 deletions(-) diff --git a/plugins/inputs/github/README.md b/plugins/inputs/github/README.md index 29eddf25d..7227b167d 100644 --- a/plugins/inputs/github/README.md +++ b/plugins/inputs/github/README.md @@ -14,6 +14,9 @@ alternative method for collecting repository information. ## Github API access token. Unauthenticated requests are limited to 60 per hour. # access_token = "" + + ## Github API enterprise url. Github Enterprise accounts must specify their base url. + # enterprise_base_url = "" ## Timeout for HTTP requests. # http_timeout = "5s" diff --git a/plugins/inputs/github/github.go b/plugins/inputs/github/github.go index 906c99a20..5c9682e4a 100644 --- a/plugins/inputs/github/github.go +++ b/plugins/inputs/github/github.go @@ -18,10 +18,11 @@ import ( // GitHub - plugin main structure type GitHub struct { - Repositories []string `toml:"repositories"` - AccessToken string `toml:"access_token"` - HTTPTimeout internal.Duration `toml:"http_timeout"` - githubClient *github.Client + Repositories []string `toml:"repositories"` + AccessToken string `toml:"access_token"` + EnterpriseBaseURL string `toml:"enterprise_base_url"` + HTTPTimeout internal.Duration `toml:"http_timeout"` + githubClient *github.Client obfusticatedToken string @@ -36,6 +37,9 @@ const sampleConfig = ` ## Github API access token. Unauthenticated requests are limited to 60 per hour. # access_token = "" + + ## Github API enterprise url. Github Enterprise accounts must specify their base url. + # enterprise_base_url = "" ## Timeout for HTTP requests. # http_timeout = "5s" @@ -71,9 +75,16 @@ func (g *GitHub) createGitHubClient(ctx context.Context) (*github.Client, error) g.obfusticatedToken = g.AccessToken[0:4] + "..." + g.AccessToken[len(g.AccessToken)-3:] - return github.NewClient(oauthClient), nil + return g.newGithubClient(oauthClient) } + return g.newGithubClient(httpClient) +} + +func (g *GitHub) newGithubClient(httpClient *http.Client) (*github.Client, error) { + if g.EnterpriseBaseURL != "" { + return github.NewEnterpriseClient(g.EnterpriseBaseURL, "", httpClient) + } return github.NewClient(httpClient), nil } diff --git a/plugins/inputs/github/github_test.go b/plugins/inputs/github/github_test.go index 33abc1c3e..3c346b7f8 100644 --- a/plugins/inputs/github/github_test.go +++ b/plugins/inputs/github/github_test.go @@ -1,6 +1,7 @@ package github import ( + "net/http" "reflect" "testing" @@ -8,6 +9,18 @@ import ( "github.com/stretchr/testify/require" ) +func TestNewGithubClient(t *testing.T) { + httpClient := &http.Client{} + g := &GitHub{} + client, err := g.newGithubClient(httpClient) + require.Nil(t, err) + require.Contains(t, client.BaseURL.String(), "api.github.com") + g.EnterpriseBaseURL = "api.example.com/" + enterpriseClient, err := g.newGithubClient(httpClient) + require.Nil(t, err) + require.Contains(t, enterpriseClient.BaseURL.String(), "api.example.com") +} + func TestSplitRepositoryNameWithWorkingExample(t *testing.T) { var validRepositoryNames = []struct { fullName string From 0732b41b4bbbb883b63ef004d43f627b8689a405 Mon Sep 17 00:00:00 2001 From: Mike Melnyk Date: Fri, 2 Aug 2019 16:10:14 -0400 Subject: [PATCH 1056/1815] Add TLS & credentials configuration for nats_consumer input plugin (#6195) --- plugins/inputs/nats_consumer/README.md | 13 +++++- plugins/inputs/nats_consumer/nats_consumer.go | 40 +++++++++++++++++-- 2 files changed, 48 insertions(+), 5 deletions(-) diff --git a/plugins/inputs/nats_consumer/README.md b/plugins/inputs/nats_consumer/README.md index 8a89d90c5..9c3bfb2d7 100644 --- a/plugins/inputs/nats_consumer/README.md +++ b/plugins/inputs/nats_consumer/README.md @@ -12,13 +12,22 @@ instances of telegraf can read from a NATS cluster in parallel. [[inputs.nats_consumer]] ## urls of NATS servers servers = ["nats://localhost:4222"] - ## Use Transport Layer Security - secure = false ## subject(s) to consume subjects = ["telegraf"] ## name a queue group queue_group = "telegraf_consumers" + ## Optional credentials + # username = "" + # password = "" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false + ## Sets the limits for pending msgs and bytes for each subscription ## These shouldn't need to be adjusted except in very high throughput scenarios # pending_message_limit = 65536 diff --git a/plugins/inputs/nats_consumer/nats_consumer.go b/plugins/inputs/nats_consumer/nats_consumer.go index 4411d8c3e..7ee05bc17 100644 --- a/plugins/inputs/nats_consumer/nats_consumer.go +++ b/plugins/inputs/nats_consumer/nats_consumer.go @@ -7,6 +7,7 @@ import ( "sync" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal/tls" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/parsers" nats "github.com/nats-io/go-nats" @@ -34,7 +35,11 @@ type natsConsumer struct { QueueGroup string `toml:"queue_group"` Subjects []string `toml:"subjects"` Servers []string `toml:"servers"` - Secure bool `toml:"secure"` + Username string `toml:"username"` + Password string `toml:"password"` + tls.ClientConfig + // Legacy; Should be deprecated + Secure bool `toml:"secure"` // Client pending limits: PendingMessageLimit int `toml:"pending_message_limit"` @@ -61,13 +66,24 @@ type natsConsumer struct { var sampleConfig = ` ## urls of NATS servers servers = ["nats://localhost:4222"] - ## Use Transport Layer Security + ## Deprecated: Use Transport Layer Security secure = false ## subject(s) to consume subjects = ["telegraf"] ## name a queue group queue_group = "telegraf_consumers" + ## Optional credentials + # username = "" + # password = "" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false + ## Sets the limits for pending msgs and bytes for each subscription ## These shouldn't need to be adjusted except in very high throughput scenarios # pending_message_limit = 65536 @@ -125,7 +141,25 @@ func (n *natsConsumer) Start(acc telegraf.Accumulator) error { // override servers if any were specified opts.Servers = n.Servers - opts.Secure = n.Secure + // override authentication, if any was specified + if n.Username != "" { + opts.User = n.Username + opts.Password = n.Password + } + + // override TLS, if it was specified + tlsConfig, err := n.ClientConfig.TLSConfig() + if err != nil { + return err + } + if tlsConfig != nil { + // set NATS connection TLS options + opts.Secure = true + opts.TLSConfig = tlsConfig + } else { + // should be deprecated; use TLS + opts.Secure = n.Secure + } if n.conn == nil || n.conn.IsClosed() { n.conn, connectErr = opts.Connect() From be7abd99590c148435054f3b3322ea3fc04dd65a Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 2 Aug 2019 14:46:20 -0700 Subject: [PATCH 1057/1815] Update changelog --- CHANGELOG.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index be22fd47f..cb9d8558e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -53,6 +53,12 @@ - [#6024](https://github.com/influxdata/telegraf/pull/6024): Load external Go plugins from --plugin-directory. - [#6184](https://github.com/influxdata/telegraf/pull/6184): Add ability to exclude db/bucket tag from influxdb outputs. - [#6137](https://github.com/influxdata/telegraf/pull/6137): Gather per collections stats in mongodb input plugin. +- [#6195](https://github.com/influxdata/telegraf/pull/6195): Add TLS & credentials configuration for nats_consumer input plugin. +- [#6194](https://github.com/influxdata/telegraf/pull/6194): Add support for enterprise repos to github plugin. +- [#6060](https://github.com/influxdata/telegraf/pull/6060): Add Indices stats to elasticsearch input. +- [#6189](https://github.com/influxdata/telegraf/pull/6189): Add left function to string processor. +- [#6049](https://github.com/influxdata/telegraf/pull/6049): Add grace period for metrics late for aggregation. +- [#4435](https://github.com/influxdata/telegraf/pull/4435): Add diff and non_negative_diff to basicstats aggregator. #### Bugfixes From ffe9494663d0b88838c6a88131d70294b27c259d Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 2 Aug 2019 14:59:28 -0700 Subject: [PATCH 1058/1815] Restore secure option to control tls in nats_consumer --- plugins/inputs/nats_consumer/README.md | 3 +++ plugins/inputs/nats_consumer/nats_consumer.go | 25 ++++++++----------- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/plugins/inputs/nats_consumer/README.md b/plugins/inputs/nats_consumer/README.md index 9c3bfb2d7..205578a17 100644 --- a/plugins/inputs/nats_consumer/README.md +++ b/plugins/inputs/nats_consumer/README.md @@ -21,6 +21,9 @@ instances of telegraf can read from a NATS cluster in parallel. # username = "" # password = "" + ## Use Transport Layer Security + # secure = false + ## Optional TLS Config # tls_ca = "/etc/telegraf/ca.pem" # tls_cert = "/etc/telegraf/cert.pem" diff --git a/plugins/inputs/nats_consumer/nats_consumer.go b/plugins/inputs/nats_consumer/nats_consumer.go index 7ee05bc17..b82e3f3a6 100644 --- a/plugins/inputs/nats_consumer/nats_consumer.go +++ b/plugins/inputs/nats_consumer/nats_consumer.go @@ -35,11 +35,10 @@ type natsConsumer struct { QueueGroup string `toml:"queue_group"` Subjects []string `toml:"subjects"` Servers []string `toml:"servers"` + Secure bool `toml:"secure"` Username string `toml:"username"` Password string `toml:"password"` tls.ClientConfig - // Legacy; Should be deprecated - Secure bool `toml:"secure"` // Client pending limits: PendingMessageLimit int `toml:"pending_message_limit"` @@ -66,8 +65,7 @@ type natsConsumer struct { var sampleConfig = ` ## urls of NATS servers servers = ["nats://localhost:4222"] - ## Deprecated: Use Transport Layer Security - secure = false + ## subject(s) to consume subjects = ["telegraf"] ## name a queue group @@ -77,6 +75,9 @@ var sampleConfig = ` # username = "" # password = "" + ## Use Transport Layer Security + # secure = false + ## Optional TLS Config # tls_ca = "/etc/telegraf/ca.pem" # tls_cert = "/etc/telegraf/cert.pem" @@ -147,18 +148,14 @@ func (n *natsConsumer) Start(acc telegraf.Accumulator) error { opts.Password = n.Password } - // override TLS, if it was specified - tlsConfig, err := n.ClientConfig.TLSConfig() - if err != nil { - return err - } - if tlsConfig != nil { - // set NATS connection TLS options + if n.Secure { + tlsConfig, err := n.ClientConfig.TLSConfig() + if err != nil { + return err + } + opts.Secure = true opts.TLSConfig = tlsConfig - } else { - // should be deprecated; use TLS - opts.Secure = n.Secure } if n.conn == nil || n.conn.IsClosed() { From 633dfe2a193e148a10c40c2e32380bdf5810921f Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 2 Aug 2019 16:56:49 -0700 Subject: [PATCH 1059/1815] Add secure option to NATS output to mirror input --- plugins/outputs/nats/README.md | 21 +++++++-------------- plugins/outputs/nats/nats.go | 28 ++++++++++++++-------------- 2 files changed, 21 insertions(+), 28 deletions(-) diff --git a/plugins/outputs/nats/README.md b/plugins/outputs/nats/README.md index d9462650a..f6dc04f53 100644 --- a/plugins/outputs/nats/README.md +++ b/plugins/outputs/nats/README.md @@ -2,7 +2,7 @@ This plugin writes to a (list of) specified NATS instance(s). -``` +```toml [[outputs.nats]] ## URLs of NATS servers servers = ["nats://localhost:4222"] @@ -11,9 +11,14 @@ This plugin writes to a (list of) specified NATS instance(s). # password = "" ## NATS subject for producer messages subject = "telegraf" + + ## Use Transport Layer Security + # secure = false + ## Optional TLS Config - ## CA certificate used to self-sign NATS server(s) TLS certificate(s) # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" ## Use TLS but skip chain & host verification # insecure_skip_verify = false @@ -23,15 +28,3 @@ This plugin writes to a (list of) specified NATS instance(s). ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md data_format = "influx" ``` - -### Required parameters: - -* `servers`: List of strings, this is for NATS clustering support. Each URL should start with `nats://`. -* `subject`: The NATS subject to publish to. - -### Optional parameters: - -* `username`: Username for NATS -* `password`: Password for NATS -* `tls_ca`: TLS CA -* `insecure_skip_verify`: Use SSL but skip chain & host verification (default: false) diff --git a/plugins/outputs/nats/nats.go b/plugins/outputs/nats/nats.go index ef2c4bbf2..e4817d6c9 100644 --- a/plugins/outputs/nats/nats.go +++ b/plugins/outputs/nats/nats.go @@ -12,13 +12,11 @@ import ( ) type NATS struct { - // Servers is the NATS server pool to connect to - Servers []string - // Credentials - Username string - Password string - // NATS subject to publish metrics to - Subject string + Servers []string `toml:"servers"` + Secure bool `toml:"secure"` + Username string `toml:"username"` + Password string `toml:"password"` + Subject string `toml:"subject"` tls.ClientConfig conn *nats_client.Conn @@ -34,6 +32,9 @@ var sampleConfig = ` ## NATS subject for producer messages subject = "telegraf" + ## Use Transport Layer Security + # secure = false + ## Optional TLS Config # tls_ca = "/etc/telegraf/ca.pem" # tls_cert = "/etc/telegraf/cert.pem" @@ -70,13 +71,12 @@ func (n *NATS) Connect() error { opts.Password = n.Password } - // override TLS, if it was specified - tlsConfig, err := n.ClientConfig.TLSConfig() - if err != nil { - return err - } - if tlsConfig != nil { - // set NATS connection TLS options + if n.Secure { + tlsConfig, err := n.ClientConfig.TLSConfig() + if err != nil { + return err + } + opts.Secure = true opts.TLSConfig = tlsConfig } From 8a8125692ad387007f2a002264c003eaf461ef6f Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 5 Aug 2019 11:21:26 -0700 Subject: [PATCH 1060/1815] Update Windows changelog --- etc/telegraf_windows.conf | 50 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 50 insertions(+) diff --git a/etc/telegraf_windows.conf b/etc/telegraf_windows.conf index 01bb21e28..5befa7bf8 100644 --- a/etc/telegraf_windows.conf +++ b/etc/telegraf_windows.conf @@ -155,6 +155,56 @@ ## existing data has been written. # influx_uint_support = false +# # Configuration for sending metrics to InfluxDB +# [[outputs.influxdb_v2]] +# ## The URLs of the InfluxDB cluster nodes. +# ## +# ## Multiple URLs can be specified for a single cluster, only ONE of the +# ## urls will be written to each interval. +# urls = ["http://127.0.0.1:9999"] +# +# ## Token for authentication. +# token = "" +# +# ## Organization is the name of the organization you wish to write to; must exist. +# organization = "" +# +# ## Destination bucket to write into. +# bucket = "" +# +# ## The value of this tag will be used to determine the bucket. If this +# ## tag is not set the 'bucket' option is used as the default. +# # bucket_tag = "" +# +# ## If true, the bucket tag will not be added to the metric. +# # exclude_bucket_tag = false +# +# ## Timeout for HTTP messages. +# # timeout = "5s" +# +# ## Additional HTTP headers +# # http_headers = {"X-Special-Header" = "Special-Value"} +# +# ## HTTP Proxy override, if unset values the standard proxy environment +# ## variables are consulted to determine which proxy, if any, should be used. +# # http_proxy = "http://corporate.proxy:3128" +# +# ## HTTP User-Agent +# # user_agent = "telegraf" +# +# ## Content-Encoding for write request body, can be set to "gzip" to +# ## compress body or "identity" to apply no encoding. +# # content_encoding = "gzip" +# +# ## Enable or disable uint support for writing uints influxdb 2.0. +# # influx_uint_support = false +# +# ## Optional TLS Config for use on HTTP connections. +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false ############################################################################### # INPUTS # From f2503722a06eec3647ad9fad926832299b70d849 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 5 Aug 2019 13:56:03 -0700 Subject: [PATCH 1061/1815] Fix link in rabbitmq README --- plugins/inputs/rabbitmq/README.md | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/plugins/inputs/rabbitmq/README.md b/plugins/inputs/rabbitmq/README.md index 0406df700..5d500afd1 100644 --- a/plugins/inputs/rabbitmq/README.md +++ b/plugins/inputs/rabbitmq/README.md @@ -1,8 +1,11 @@ # RabbitMQ Input Plugin -Reads metrics from RabbitMQ servers via the [Management Plugin](https://www.rabbitmq.com/management.html). +Reads metrics from RabbitMQ servers via the [Management Plugin][management]. -For additional details reference the [RabbitMQ Management HTTP Stats](https://cdn.rawgit.com/rabbitmq/rabbitmq-management/master/priv/www/doc/stats.html). +For additional details reference the [RabbitMQ Management HTTP Stats][management-reference]. + +[management]: https://www.rabbitmq.com/management.html +[management-reference]: https://raw.githack.com/rabbitmq/rabbitmq-management/rabbitmq_v3_6_9/priv/www/api/index.html ### Configuration: From 7e793e87e30af268f450adc736e3c3bf86f83825 Mon Sep 17 00:00:00 2001 From: Russ Savage Date: Mon, 5 Aug 2019 13:57:15 -0700 Subject: [PATCH 1062/1815] Clean up cassandra input README (#6206) --- plugins/inputs/cassandra/README.md | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/plugins/inputs/cassandra/README.md b/plugins/inputs/cassandra/README.md index 86c6a65a3..881bba3e0 100644 --- a/plugins/inputs/cassandra/README.md +++ b/plugins/inputs/cassandra/README.md @@ -39,19 +39,19 @@ Here is a list of metrics that might be useful to monitor your cassandra cluster - [How to monitor Cassandra performance metrics](https://www.datadoghq.com/blog/how-to-monitor-cassandra-performance-metrics) - [Cassandra Documentation](http://docs.datastax.com/en/cassandra/3.x/cassandra/operations/monitoringCassandraTOC.html) -####measurement = javaGarbageCollector +#### measurement = javaGarbageCollector - /java.lang:type=GarbageCollector,name=ConcurrentMarkSweep/CollectionTime - /java.lang:type=GarbageCollector,name=ConcurrentMarkSweep/CollectionCount - /java.lang:type=GarbageCollector,name=ParNew/CollectionTime - /java.lang:type=GarbageCollector,name=ParNew/CollectionCount -####measurement = javaMemory +#### measurement = javaMemory - /java.lang:type=Memory/HeapMemoryUsage - /java.lang:type=Memory/NonHeapMemoryUsage -####measurement = cassandraCache +#### measurement = cassandraCache - /org.apache.cassandra.metrics:type=Cache,scope=KeyCache,name=Hits - /org.apache.cassandra.metrics:type=Cache,scope=KeyCache,name=Requests @@ -64,11 +64,11 @@ Here is a list of metrics that might be useful to monitor your cassandra cluster - /org.apache.cassandra.metrics:type=Cache,scope=RowCache,name=Size - /org.apache.cassandra.metrics:type=Cache,scope=RowCache,name=Capacity -####measurement = cassandraClient +#### measurement = cassandraClient - /org.apache.cassandra.metrics:type=Client,name=connectedNativeClients -####measurement = cassandraClientRequest +#### measurement = cassandraClientRequest - /org.apache.cassandra.metrics:type=ClientRequest,scope=Read,name=TotalLatency - /org.apache.cassandra.metrics:type=ClientRequest,scope=Write,name=TotalLatency @@ -81,24 +81,24 @@ Here is a list of metrics that might be useful to monitor your cassandra cluster - /org.apache.cassandra.metrics:type=ClientRequest,scope=Read,name=Failures - /org.apache.cassandra.metrics:type=ClientRequest,scope=Write,name=Failures -####measurement = cassandraCommitLog +#### measurement = cassandraCommitLog - /org.apache.cassandra.metrics:type=CommitLog,name=PendingTasks - /org.apache.cassandra.metrics:type=CommitLog,name=TotalCommitLogSize -####measurement = cassandraCompaction +#### measurement = cassandraCompaction - /org.apache.cassandra.metrics:type=Compaction,name=CompletedTasks - /org.apache.cassandra.metrics:type=Compaction,name=PendingTasks - /org.apache.cassandra.metrics:type=Compaction,name=TotalCompactionsCompleted - /org.apache.cassandra.metrics:type=Compaction,name=BytesCompacted -####measurement = cassandraStorage +#### measurement = cassandraStorage - /org.apache.cassandra.metrics:type=Storage,name=Load -- /org.apache.cassandra.metrics:type=Storage,name=Exceptions +- /org.apache.cassandra.metrics:type=Storage,name=Exceptions -####measurement = cassandraTable +#### measurement = cassandraTable Using wildcards for "keyspace" and "scope" can create a lot of series as metrics will be reported for every table and keyspace including internal system tables. Specify a keyspace name and/or a table name to limit them. - /org.apache.cassandra.metrics:type=Table,keyspace=\*,scope=\*,name=LiveDiskSpaceUsed @@ -110,7 +110,7 @@ Using wildcards for "keyspace" and "scope" can create a lot of series as metrics - /org.apache.cassandra.metrics:type=Table,keyspace=\*,scope=\*,name=WriteTotalLatency -####measurement = cassandraThreadPools +#### measurement = cassandraThreadPools - /org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=CompactionExecutor,name=ActiveTasks - /org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=AntiEntropyStage,name=ActiveTasks From 374aa0b36bcd49b337b8f026c9cfdfc5b6138a0c Mon Sep 17 00:00:00 2001 From: Russ Savage Date: Mon, 5 Aug 2019 13:58:35 -0700 Subject: [PATCH 1063/1815] Add README for disque input (#6208) --- plugins/inputs/disque/README.md | 38 +++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) create mode 100644 plugins/inputs/disque/README.md diff --git a/plugins/inputs/disque/README.md b/plugins/inputs/disque/README.md new file mode 100644 index 000000000..0df757061 --- /dev/null +++ b/plugins/inputs/disque/README.md @@ -0,0 +1,38 @@ +# Disque Input + +[Disque](https://github.com/antirez/disque) is an ongoing experiment to build a distributed, in-memory, message broker. + + +### Configuration: + +```toml +[[inputs.disque]] + ## An array of URI to gather stats about. Specify an ip or hostname + ## with optional port and password. + ## ie disque://localhost, disque://10.10.3.33:18832, 10.0.0.1:10000, etc. + ## If no servers are specified, then localhost is used as the host. + servers = ["localhost"] +``` + +### Metrics + + +- disque + - disque_host + - uptime_in_seconds + - connected_clients + - blocked_clients + - used_memory + - used_memory_rss + - used_memory_peak + - total_connections_received + - total_commands_processed + - instantaneous_ops_per_sec + - latest_fork_usec + - mem_fragmentation_ratio + - used_cpu_sys + - used_cpu_user + - used_cpu_sys_children + - used_cpu_user_children + - registered_jobs + - registered_queues From 6ecfd01f9bc7332973a30b1d1dd5980e2948d02f Mon Sep 17 00:00:00 2001 From: Rob Cowart Date: Mon, 5 Aug 2019 23:04:41 +0200 Subject: [PATCH 1064/1815] Correct typo in kubernetes logsfs_available_bytes field (#6200) --- plugins/inputs/kubernetes/kubernetes.go | 4 ++-- plugins/inputs/kubernetes/kubernetes_test.go | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/plugins/inputs/kubernetes/kubernetes.go b/plugins/inputs/kubernetes/kubernetes.go index fdeb78ec4..4e6e17ef1 100644 --- a/plugins/inputs/kubernetes/kubernetes.go +++ b/plugins/inputs/kubernetes/kubernetes.go @@ -156,7 +156,7 @@ func buildSystemContainerMetrics(summaryMetrics *SummaryMetrics, acc telegraf.Ac fields["memory_major_page_faults"] = container.Memory.MajorPageFaults fields["rootfs_available_bytes"] = container.RootFS.AvailableBytes fields["rootfs_capacity_bytes"] = container.RootFS.CapacityBytes - fields["logsfs_avaialble_bytes"] = container.LogsFS.AvailableBytes + fields["logsfs_available_bytes"] = container.LogsFS.AvailableBytes fields["logsfs_capacity_bytes"] = container.LogsFS.CapacityBytes acc.AddFields("kubernetes_system_container", fields, tags) } @@ -208,7 +208,7 @@ func buildPodMetrics(summaryMetrics *SummaryMetrics, acc telegraf.Accumulator) { fields["rootfs_available_bytes"] = container.RootFS.AvailableBytes fields["rootfs_capacity_bytes"] = container.RootFS.CapacityBytes fields["rootfs_used_bytes"] = container.RootFS.UsedBytes - fields["logsfs_avaialble_bytes"] = container.LogsFS.AvailableBytes + fields["logsfs_available_bytes"] = container.LogsFS.AvailableBytes fields["logsfs_capacity_bytes"] = container.LogsFS.CapacityBytes fields["logsfs_used_bytes"] = container.LogsFS.UsedBytes acc.AddFields("kubernetes_pod_container", fields, tags) diff --git a/plugins/inputs/kubernetes/kubernetes_test.go b/plugins/inputs/kubernetes/kubernetes_test.go index 289e36ae4..081bca03a 100644 --- a/plugins/inputs/kubernetes/kubernetes_test.go +++ b/plugins/inputs/kubernetes/kubernetes_test.go @@ -35,7 +35,7 @@ func TestKubernetesStats(t *testing.T) { "memory_major_page_faults": int64(13), "rootfs_available_bytes": int64(84379979776), "rootfs_capacity_bytes": int64(105553100800), - "logsfs_avaialble_bytes": int64(84379979776), + "logsfs_available_bytes": int64(84379979776), "logsfs_capacity_bytes": int64(105553100800), } tags := map[string]string{ @@ -80,7 +80,7 @@ func TestKubernetesStats(t *testing.T) { "rootfs_available_bytes": int64(84379979776), "rootfs_capacity_bytes": int64(105553100800), "rootfs_used_bytes": int64(57344), - "logsfs_avaialble_bytes": int64(84379979776), + "logsfs_available_bytes": int64(84379979776), "logsfs_capacity_bytes": int64(105553100800), "logsfs_used_bytes": int64(24576), } @@ -103,7 +103,7 @@ func TestKubernetesStats(t *testing.T) { "rootfs_available_bytes": int64(0), "rootfs_capacity_bytes": int64(0), "rootfs_used_bytes": int64(0), - "logsfs_avaialble_bytes": int64(0), + "logsfs_available_bytes": int64(0), "logsfs_capacity_bytes": int64(0), "logsfs_used_bytes": int64(0), } From de6416ff829bf535a753baeca7c3f127a1c6f9ef Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 5 Aug 2019 14:13:34 -0700 Subject: [PATCH 1065/1815] Update changelog --- CHANGELOG.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index cb9d8558e..7891d7949 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -69,6 +69,12 @@ - [#6142](https://github.com/influxdata/telegraf/issues/6142): Fix memory error panic in mqtt input. - [#6136](https://github.com/influxdata/telegraf/issues/6136): Support Kafka 2.3.0 consumer groups. +## v1.11.4 [unreleased] + +#### Bugfixes + +- [#6200](https://github.com/influxdata/telegraf/pull/6200): Correct typo in kubernetes logsfs_available_bytes field. + ## v1.11.3 [2019-07-23] #### Bugfixes From b5710a6a2128405f424b0f6670260b2e662fefc3 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 5 Aug 2019 14:50:29 -0700 Subject: [PATCH 1066/1815] Skip floats that are NaN or Inf in Datadog output. (#6198) --- plugins/outputs/datadog/datadog.go | 17 ++++++--- plugins/outputs/datadog/datadog_test.go | 46 +++++++++++++++++++++++-- 2 files changed, 57 insertions(+), 6 deletions(-) diff --git a/plugins/outputs/datadog/datadog.go b/plugins/outputs/datadog/datadog.go index 62e73f115..736570726 100644 --- a/plugins/outputs/datadog/datadog.go +++ b/plugins/outputs/datadog/datadog.go @@ -5,6 +5,7 @@ import ( "encoding/json" "fmt" "log" + "math" "net/http" "net/url" "strings" @@ -63,9 +64,6 @@ func (d *Datadog) Connect() error { } func (d *Datadog) Write(metrics []telegraf.Metric) error { - if len(metrics) == 0 { - return nil - } ts := TimeSeries{} tempSeries := []*Metric{} metricCounter := 0 @@ -75,6 +73,10 @@ func (d *Datadog) Write(metrics []telegraf.Metric) error { metricTags := buildTags(m.TagList()) host, _ := m.GetTag("host") + if len(dogMs) == 0 { + continue + } + for fieldName, dogM := range dogMs { // name of the datadog measurement var dname string @@ -98,6 +100,10 @@ func (d *Datadog) Write(metrics []telegraf.Metric) error { } } + if len(tempSeries) == 0 { + return nil + } + redactedApiKey := "****************" ts.Series = make([]*Metric, metricCounter) copy(ts.Series, tempSeries[0:]) @@ -166,9 +172,12 @@ func buildTags(tagList []*telegraf.Tag) []string { } func verifyValue(v interface{}) bool { - switch v.(type) { + switch v := v.(type) { case string: return false + case float64: + // The payload will be encoded as JSON, which does not allow NaN or Inf. + return !math.IsNaN(v) && !math.IsInf(v, 0) } return true } diff --git a/plugins/outputs/datadog/datadog_test.go b/plugins/outputs/datadog/datadog_test.go index 7bbc91254..be8541ee8 100644 --- a/plugins/outputs/datadog/datadog_test.go +++ b/plugins/outputs/datadog/datadog_test.go @@ -3,15 +3,15 @@ package datadog import ( "encoding/json" "fmt" + "math" "net/http" "net/http/httptest" "reflect" "testing" "time" - "github.com/influxdata/telegraf/testutil" - "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -249,3 +249,45 @@ func TestVerifyValue(t *testing.T) { } } } + +func TestNaNIsSkipped(t *testing.T) { + plugin := &Datadog{ + Apikey: "testing", + URL: "", // No request will be sent because all fields are skipped + } + + err := plugin.Connect() + require.NoError(t, err) + + err = plugin.Write([]telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": math.NaN(), + }, + time.Now()), + }) + require.NoError(t, err) +} + +func TestInfIsSkipped(t *testing.T) { + plugin := &Datadog{ + Apikey: "testing", + URL: "", // No request will be sent because all fields are skipped + } + + err := plugin.Connect() + require.NoError(t, err) + + err = plugin.Write([]telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": math.Inf(0), + }, + time.Now()), + }) + require.NoError(t, err) +} From 61f6794846bd7c2f74ec9e7f7ff1e6b3ddac08b5 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 5 Aug 2019 15:04:50 -0700 Subject: [PATCH 1067/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7891d7949..122973554 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -74,6 +74,7 @@ #### Bugfixes - [#6200](https://github.com/influxdata/telegraf/pull/6200): Correct typo in kubernetes logsfs_available_bytes field. +- [#6191](https://github.com/influxdata/telegraf/issues/6191): Skip floats that are NaN or Inf in Datadog output. ## v1.11.3 [2019-07-23] From 1c1c41c300174615915e3dfd7cb11de443995367 Mon Sep 17 00:00:00 2001 From: Marc Venturini Date: Tue, 6 Aug 2019 08:36:34 +0800 Subject: [PATCH 1068/1815] Add device tags to smart_attributes (#6201) --- plugins/inputs/smart/README.md | 10 ++++--- plugins/inputs/smart/smart.go | 13 ++++----- plugins/inputs/smart/smart_test.go | 43 ++++++++++++++++++++++++++++++ 3 files changed, 54 insertions(+), 12 deletions(-) diff --git a/plugins/inputs/smart/README.md b/plugins/inputs/smart/README.md index 1051e3662..b677bf7bd 100644 --- a/plugins/inputs/smart/README.md +++ b/plugins/inputs/smart/README.md @@ -89,9 +89,8 @@ Defaults!SMARTCTL !logfile, !syslog, !pam_session - tags: - capacity - device - - device_model - enabled - - health + - model - serial_no - wwn - fields: @@ -104,10 +103,13 @@ Defaults!SMARTCTL !logfile, !syslog, !pam_session - smart_attribute: - tags: + - capacity - device + - enabled - fail - flags - id + - model - name - serial_no - wwn @@ -163,6 +165,6 @@ smartctl --info --health --attributes --tolerance=verypermissive --nocheck NOCHE ``` smart_device,enabled=Enabled,host=mbpro.local,device=rdisk0,model=APPLE\ SSD\ SM0512F,serial_no=S1K5NYCD964433,wwn=5002538655584d30,capacity=500277790720 udma_crc_errors=0i,exit_status=0i,health_ok=true,read_error_rate=0i,temp_c=40i 1502536854000000000 -smart_attribute,serial_no=S1K5NYCD964433,wwn=5002538655584d30,id=199,name=UDMA_CRC_Error_Count,flags=-O-RC-,fail=-,host=mbpro.local,device=rdisk0 threshold=0i,raw_value=0i,exit_status=0i,value=200i,worst=200i 1502536854000000000 -smart_attribute,device=rdisk0,serial_no=S1K5NYCD964433,wwn=5002538655584d30,id=240,name=Unknown_SSD_Attribute,flags=-O---K,fail=-,host=mbpro.local exit_status=0i,value=100i,worst=100i,threshold=0i,raw_value=0i 1502536854000000000 +smart_attribute,capacity=500277790720,device=rdisk0,enabled=Enabled,fail=-,flags=-O-RC-,host=mbpro.local,id=199,model=APPLE\ SSD\ SM0512F,name=UDMA_CRC_Error_Count,serial_no=S1K5NYCD964433,wwn=5002538655584d30 exit_status=0i,raw_value=0i,threshold=0i,value=200i,worst=200i 1502536854000000000 +smart_attribute,capacity=500277790720,device=rdisk0,enabled=Enabled,fail=-,flags=-O---K,host=mbpro.local,id=199,model=APPLE\ SSD\ SM0512F,name=Unknown_SSD_Attribute,serial_no=S1K5NYCD964433,wwn=5002538655584d30 exit_status=0i,raw_value=0i,threshold=0i,value=100i,worst=100i 1502536854000000000 ``` diff --git a/plugins/inputs/smart/smart.go b/plugins/inputs/smart/smart.go index f022261df..3e6620c8c 100644 --- a/plugins/inputs/smart/smart.go +++ b/plugins/inputs/smart/smart.go @@ -303,14 +303,11 @@ func gatherDisk(acc telegraf.Accumulator, usesudo, collectAttributes bool, smart fields := make(map[string]interface{}) if collectAttributes { - deviceNode := strings.Split(device, " ")[0] - tags["device"] = path.Base(deviceNode) - - if serial, ok := deviceTags["serial_no"]; ok { - tags["serial_no"] = serial - } - if wwn, ok := deviceTags["wwn"]; ok { - tags["wwn"] = wwn + keys := [...]string{"device", "model", "serial_no", "wwn", "capacity", "enabled"} + for _, key := range keys { + if value, ok := deviceTags[key]; ok { + tags[key] = value + } } } diff --git a/plugins/inputs/smart/smart_test.go b/plugins/inputs/smart/smart_test.go index b9886bb08..0b030366d 100644 --- a/plugins/inputs/smart/smart_test.go +++ b/plugins/inputs/smart/smart_test.go @@ -49,8 +49,11 @@ func TestGatherAttributes(t *testing.T) { }, map[string]string{ "device": "ada0", + "model": "APPLE SSD SM256E", "serial_no": "S0X5NZBC422720", "wwn": "5002538043584d30", + "enabled": "Enabled", + "capacity": "251000193024", "id": "1", "name": "Raw_Read_Error_Rate", "flags": "-O-RC-", @@ -67,8 +70,11 @@ func TestGatherAttributes(t *testing.T) { }, map[string]string{ "device": "ada0", + "model": "APPLE SSD SM256E", "serial_no": "S0X5NZBC422720", "wwn": "5002538043584d30", + "enabled": "Enabled", + "capacity": "251000193024", "id": "5", "name": "Reallocated_Sector_Ct", "flags": "PO--CK", @@ -85,8 +91,11 @@ func TestGatherAttributes(t *testing.T) { }, map[string]string{ "device": "ada0", + "model": "APPLE SSD SM256E", "serial_no": "S0X5NZBC422720", "wwn": "5002538043584d30", + "enabled": "Enabled", + "capacity": "251000193024", "id": "9", "name": "Power_On_Hours", "flags": "-O--CK", @@ -103,8 +112,11 @@ func TestGatherAttributes(t *testing.T) { }, map[string]string{ "device": "ada0", + "model": "APPLE SSD SM256E", "serial_no": "S0X5NZBC422720", "wwn": "5002538043584d30", + "enabled": "Enabled", + "capacity": "251000193024", "id": "12", "name": "Power_Cycle_Count", "flags": "-O--CK", @@ -121,8 +133,11 @@ func TestGatherAttributes(t *testing.T) { }, map[string]string{ "device": "ada0", + "model": "APPLE SSD SM256E", "serial_no": "S0X5NZBC422720", "wwn": "5002538043584d30", + "enabled": "Enabled", + "capacity": "251000193024", "id": "169", "name": "Unknown_Attribute", "flags": "PO--C-", @@ -139,8 +154,11 @@ func TestGatherAttributes(t *testing.T) { }, map[string]string{ "device": "ada0", + "model": "APPLE SSD SM256E", "serial_no": "S0X5NZBC422720", "wwn": "5002538043584d30", + "enabled": "Enabled", + "capacity": "251000193024", "id": "173", "name": "Wear_Leveling_Count", "flags": "-O--CK", @@ -157,8 +175,11 @@ func TestGatherAttributes(t *testing.T) { }, map[string]string{ "device": "ada0", + "model": "APPLE SSD SM256E", "serial_no": "S0X5NZBC422720", "wwn": "5002538043584d30", + "enabled": "Enabled", + "capacity": "251000193024", "id": "190", "name": "Airflow_Temperature_Cel", "flags": "-O---K", @@ -175,8 +196,11 @@ func TestGatherAttributes(t *testing.T) { }, map[string]string{ "device": "ada0", + "model": "APPLE SSD SM256E", "serial_no": "S0X5NZBC422720", "wwn": "5002538043584d30", + "enabled": "Enabled", + "capacity": "251000193024", "id": "192", "name": "Power-Off_Retract_Count", "flags": "-O--C-", @@ -193,8 +217,11 @@ func TestGatherAttributes(t *testing.T) { }, map[string]string{ "device": "ada0", + "model": "APPLE SSD SM256E", "serial_no": "S0X5NZBC422720", "wwn": "5002538043584d30", + "enabled": "Enabled", + "capacity": "251000193024", "id": "194", "name": "Temperature_Celsius", "flags": "-O---K", @@ -211,8 +238,11 @@ func TestGatherAttributes(t *testing.T) { }, map[string]string{ "device": "ada0", + "model": "APPLE SSD SM256E", "serial_no": "S0X5NZBC422720", "wwn": "5002538043584d30", + "enabled": "Enabled", + "capacity": "251000193024", "id": "197", "name": "Current_Pending_Sector", "flags": "-O---K", @@ -229,8 +259,11 @@ func TestGatherAttributes(t *testing.T) { }, map[string]string{ "device": "ada0", + "model": "APPLE SSD SM256E", "serial_no": "S0X5NZBC422720", "wwn": "5002538043584d30", + "enabled": "Enabled", + "capacity": "251000193024", "id": "199", "name": "UDMA_CRC_Error_Count", "flags": "-O-RC-", @@ -247,8 +280,11 @@ func TestGatherAttributes(t *testing.T) { }, map[string]string{ "device": "ada0", + "model": "APPLE SSD SM256E", "serial_no": "S0X5NZBC422720", "wwn": "5002538043584d30", + "enabled": "Enabled", + "capacity": "251000193024", "id": "240", "name": "Head_Flying_Hours", "flags": "------", @@ -466,6 +502,7 @@ func TestGatherNvme(t *testing.T) { "id": "9", "name": "Power_On_Hours", "serial_no": "D704940282?", + "model": "TS128GMTE850", }, map[string]interface{}{ "raw_value": 6038, @@ -478,6 +515,7 @@ func TestGatherNvme(t *testing.T) { "id": "12", "name": "Power_Cycle_Count", "serial_no": "D704940282?", + "model": "TS128GMTE850", }, map[string]interface{}{ "raw_value": 472, @@ -489,6 +527,7 @@ func TestGatherNvme(t *testing.T) { "device": ".", "name": "Media_and_Data_Integrity_Errors", "serial_no": "D704940282?", + "model": "TS128GMTE850", }, map[string]interface{}{ "raw_value": 0, @@ -500,6 +539,7 @@ func TestGatherNvme(t *testing.T) { "device": ".", "name": "Error_Information_Log_Entries", "serial_no": "D704940282?", + "model": "TS128GMTE850", }, map[string]interface{}{ "raw_value": 119699, @@ -511,6 +551,7 @@ func TestGatherNvme(t *testing.T) { "device": ".", "name": "Available_Spare", "serial_no": "D704940282?", + "model": "TS128GMTE850", }, map[string]interface{}{ "raw_value": 100, @@ -523,6 +564,7 @@ func TestGatherNvme(t *testing.T) { "id": "194", "name": "Temperature_Celsius", "serial_no": "D704940282?", + "model": "TS128GMTE850", }, map[string]interface{}{ "raw_value": 38, @@ -534,6 +576,7 @@ func TestGatherNvme(t *testing.T) { "device": ".", "name": "Critical_Warning", "serial_no": "D704940282?", + "model": "TS128GMTE850", }, map[string]interface{}{ "raw_value": int64(9), From 493510a5f7875f4f3d3f3aaf98cf6a0bcbb17602 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 5 Aug 2019 17:37:56 -0700 Subject: [PATCH 1069/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 122973554..d404d3f3a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -59,6 +59,7 @@ - [#6189](https://github.com/influxdata/telegraf/pull/6189): Add left function to string processor. - [#6049](https://github.com/influxdata/telegraf/pull/6049): Add grace period for metrics late for aggregation. - [#4435](https://github.com/influxdata/telegraf/pull/4435): Add diff and non_negative_diff to basicstats aggregator. +- [#6201](https://github.com/influxdata/telegraf/pull/6201): Add device tags to smart_attributes. #### Bugfixes From cd417c04790208e9ea747530a40c45dfa135d47f Mon Sep 17 00:00:00 2001 From: Greg Taylor Date: Mon, 5 Aug 2019 21:03:03 -0700 Subject: [PATCH 1070/1815] Upgrade aws-go-sdk (#5945) --- Gopkg.lock | 23 ++++++++++------------- Gopkg.toml | 2 +- 2 files changed, 11 insertions(+), 14 deletions(-) diff --git a/Gopkg.lock b/Gopkg.lock index 470c56cda..dc6fee195 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -152,7 +152,7 @@ revision = "f2867c24984aa53edec54a138c03db934221bdea" [[projects]] - digest = "1:996727880e06dcf037f712c4d046e241d1b1b01844636fefb0fbaa480cfd230e" + digest = "1:f8bf2fcf62410b565b9caacb6a7a858302c22968f5738549c09a17dbe6ae306a" name = "github.com/aws/aws-sdk-go" packages = [ "aws", @@ -164,7 +164,9 @@ "aws/credentials", "aws/credentials/ec2rolecreds", "aws/credentials/endpointcreds", + "aws/credentials/processcreds", "aws/credentials/stscreds", + "aws/crr", "aws/csm", "aws/defaults", "aws/ec2metadata", @@ -172,11 +174,14 @@ "aws/request", "aws/session", "aws/signer/v4", + "internal/ini", "internal/sdkio", "internal/sdkrand", "internal/sdkuri", "internal/shareddefaults", "private/protocol", + "private/protocol/eventstream", + "private/protocol/eventstream/eventstreamapi", "private/protocol/json/jsonutil", "private/protocol/jsonrpc", "private/protocol/query", @@ -192,8 +197,8 @@ "service/sts", ] pruneopts = "" - revision = "bf8067ceb6e7f51e150c218972dccfeeed892b85" - version = "v1.15.54" + revision = "5312c8dac9067d339c4e68d7e0dd5507b2f01849" + version = "v1.19.41" [[projects]] branch = "master" @@ -432,14 +437,6 @@ revision = "1983bc2fd5de3ea00aa5457bbc8774300e889db9" version = "v0.1.1" -[[projects]] - digest = "1:858b7fe7b0f4bc7ef9953926828f2816ea52d01a88d72d1c45bc8c108f23c356" - name = "github.com/go-ini/ini" - packages = ["."] - pruneopts = "" - revision = "358ee7663966325963d4e8b2e1fbd570c5195153" - version = "v1.38.1" - [[projects]] digest = "1:df89444601379b2e1ee82bf8e6b72af9901cbeed4b469fa380a519c89c339310" name = "github.com/go-logfmt/logfmt" @@ -730,11 +727,11 @@ version = "v1.0.0" [[projects]] - digest = "1:6f49eae0c1e5dab1dafafee34b207aeb7a42303105960944828c2079b92fc88e" + digest = "1:13fe471d0ed891e8544eddfeeb0471fd3c9f2015609a1c000aefdedf52a19d40" name = "github.com/jmespath/go-jmespath" packages = ["."] pruneopts = "" - revision = "0b12d6b5" + revision = "c2b33e84" [[projects]] branch = "master" diff --git a/Gopkg.toml b/Gopkg.toml index d1c7f4589..028af3487 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -16,7 +16,7 @@ [[constraint]] name = "github.com/aws/aws-sdk-go" - version = "1.15.54" + version = "1.19.41" [[constraint]] name = "github.com/couchbase/go-couchbase" From e65324d2c1695b3998c6d64c7dcd2baa7d8e6cdd Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 5 Aug 2019 22:48:01 -0700 Subject: [PATCH 1071/1815] Update gopsutil (#6212) --- Gopkg.lock | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/Gopkg.lock b/Gopkg.lock index dc6fee195..72ca0ede3 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -1057,7 +1057,7 @@ version = "v1.2.0" [[projects]] - digest = "1:8b478b1d29180a608666c36ea8dc160000e3466abb5a5354e571429679e972c0" + digest = "1:2226ffdae873216a5bc8a0bab7a51ac670b27a4aed852007d77600f809aa04e3" name = "github.com/shirou/gopsutil" packages = [ "cpu", @@ -1070,8 +1070,8 @@ "process", ] pruneopts = "" - revision = "5335e3fd506df4cb63de0a8239c7461d23063be6" - version = "v2.19.05" + revision = "d80c43f9c984a48783daf22f4bd9278006ae483a" + version = "v2.19.7" [[projects]] branch = "master" @@ -1345,7 +1345,7 @@ [[projects]] branch = "master" - digest = "1:6a6eed3727d0e15703d9e930d8dbe333bea09eda309d75a015d3c6dc4e5c92a6" + digest = "1:0b5c2207c72f2d13995040f176feb6e3f453d6b01af2b9d57df76b05ded2e926" name = "golang.org/x/sys" packages = [ "unix", @@ -1357,7 +1357,7 @@ "windows/svc/mgr", ] pruneopts = "" - revision = "7c4c994c65f702f41ed7d6620a2cb34107576a77" + revision = "51ab0e2deafac1f46c46ad59cf0921be2f180c3d" source = "https://github.com/golang/sys.git" [[projects]] From 60c8f382be18b9e8a0c13e2e476d568e7e164edc Mon Sep 17 00:00:00 2001 From: Matthew Crenshaw <3420325+sgtsquiggs@users.noreply.github.com> Date: Tue, 6 Aug 2019 14:29:29 -0400 Subject: [PATCH 1072/1815] Fix reload panic in socket_listener input plugin (#6218) --- .../inputs/socket_listener/socket_listener.go | 27 ++++++++++++++++--- 1 file changed, 24 insertions(+), 3 deletions(-) diff --git a/plugins/inputs/socket_listener/socket_listener.go b/plugins/inputs/socket_listener/socket_listener.go index d29cff582..a127a0738 100644 --- a/plugins/inputs/socket_listener/socket_listener.go +++ b/plugins/inputs/socket_listener/socket_listener.go @@ -37,6 +37,8 @@ type streamSocketListener struct { func (ssl *streamSocketListener) listen() { ssl.connections = map[string]net.Conn{} + wg := sync.WaitGroup{} + for { c, err := ssl.Accept() if err != nil { @@ -67,7 +69,11 @@ func (ssl *streamSocketListener) listen() { ssl.AddError(fmt.Errorf("unable to configure keep alive (%s): %s", ssl.ServiceAddress, err)) } - go ssl.read(c) + wg.Add(1) + go func() { + defer wg.Done() + ssl.read(c) + }() } ssl.connectionsMtx.Lock() @@ -75,6 +81,8 @@ func (ssl *streamSocketListener) listen() { c.Close() } ssl.connectionsMtx.Unlock() + + wg.Wait() } func (ssl *streamSocketListener) setKeepAlive(c net.Conn) error { @@ -169,6 +177,8 @@ type SocketListener struct { SocketMode string `toml:"socket_mode"` tlsint.ServerConfig + wg sync.WaitGroup + parsers.Parser telegraf.Accumulator io.Closer @@ -302,7 +312,12 @@ func (sl *SocketListener) Start(acc telegraf.Accumulator) error { } sl.Closer = ssl - go ssl.listen() + sl.wg = sync.WaitGroup{} + sl.wg.Add(1) + go func() { + defer sl.wg.Done() + ssl.listen() + }() case "udp", "udp4", "udp6", "ip", "ip4", "ip6", "unixgram": pc, err := udpListen(protocol, addr) if err != nil { @@ -336,7 +351,12 @@ func (sl *SocketListener) Start(acc telegraf.Accumulator) error { } sl.Closer = psl - go psl.listen() + sl.wg = sync.WaitGroup{} + sl.wg.Add(1) + go func() { + defer sl.wg.Done() + psl.listen() + }() default: return fmt.Errorf("unknown protocol '%s' in '%s'", protocol, sl.ServiceAddress) } @@ -378,6 +398,7 @@ func (sl *SocketListener) Stop() { sl.Close() sl.Closer = nil } + sl.wg.Wait() } func newSocketListener() *SocketListener { From 7a01b45e96d5d3e064be024d1a349ee39ce79f03 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 6 Aug 2019 11:36:46 -0700 Subject: [PATCH 1073/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index d404d3f3a..f2e25b0ec 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -76,6 +76,7 @@ - [#6200](https://github.com/influxdata/telegraf/pull/6200): Correct typo in kubernetes logsfs_available_bytes field. - [#6191](https://github.com/influxdata/telegraf/issues/6191): Skip floats that are NaN or Inf in Datadog output. +- [#6209](https://github.com/influxdata/telegraf/issues/6209): Fix reload panic in socket_listener input plugin. ## v1.11.3 [2019-07-23] From d7b69af9cd5ece5e791516e640edf5b3fe737a24 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 6 Aug 2019 11:40:05 -0700 Subject: [PATCH 1074/1815] Add 1.11.4 release date --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f2e25b0ec..9398c7c48 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -70,7 +70,7 @@ - [#6142](https://github.com/influxdata/telegraf/issues/6142): Fix memory error panic in mqtt input. - [#6136](https://github.com/influxdata/telegraf/issues/6136): Support Kafka 2.3.0 consumer groups. -## v1.11.4 [unreleased] +## v1.11.4 [2019-08-06] #### Bugfixes From a3a6752f04d189a8ff8e87eb8fe08152261485c7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adri=C3=A1n=20L=C3=B3pez?= Date: Wed, 7 Aug 2019 00:55:06 +0200 Subject: [PATCH 1075/1815] Update the number of logical CPUs dynamically in system plugin (#6214) --- plugins/inputs/system/README.md | 2 ++ plugins/inputs/system/system.go | 9 +++++++-- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/plugins/inputs/system/README.md b/plugins/inputs/system/README.md index d5bcd7b03..8b16c1de0 100644 --- a/plugins/inputs/system/README.md +++ b/plugins/inputs/system/README.md @@ -3,6 +3,8 @@ The system plugin gathers general stats on system load, uptime, and number of users logged in. It is similar to the unix `uptime` command. +Number of CPUs is obtained from the /proc/cpuinfo file. + ### Configuration: ```toml diff --git a/plugins/inputs/system/system.go b/plugins/inputs/system/system.go index faf44f03e..82e6b6db0 100644 --- a/plugins/inputs/system/system.go +++ b/plugins/inputs/system/system.go @@ -6,12 +6,12 @@ import ( "fmt" "log" "os" - "runtime" "strings" "time" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" + "github.com/shirou/gopsutil/cpu" "github.com/shirou/gopsutil/host" "github.com/shirou/gopsutil/load" ) @@ -35,11 +35,16 @@ func (_ *SystemStats) Gather(acc telegraf.Accumulator) error { return err } + numCPUs, err := cpu.Counts(true) + if err != nil { + return err + } + fields := map[string]interface{}{ "load1": loadavg.Load1, "load5": loadavg.Load5, "load15": loadavg.Load15, - "n_cpus": runtime.NumCPU(), + "n_cpus": numCPUs, } users, err := host.Users() From f88004c62b051125810979f35a54735ab961ede3 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 7 Aug 2019 13:50:21 -0700 Subject: [PATCH 1076/1815] Ignore context cancelled error in docker_log (#6221) --- plugins/inputs/docker_log/docker_log.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/inputs/docker_log/docker_log.go b/plugins/inputs/docker_log/docker_log.go index f2b5b5148..6a675219f 100644 --- a/plugins/inputs/docker_log/docker_log.go +++ b/plugins/inputs/docker_log/docker_log.go @@ -227,7 +227,7 @@ func (d *DockerLogs) Gather(acc telegraf.Accumulator) error { defer d.removeFromContainerList(container.ID) err = d.tailContainerLogs(ctx, acc, container, containerName) - if err != nil { + if err != nil && err != context.Canceled { acc.AddError(err) } }(container) From 17465b0aaf2489eac8ec1a5a9b72ed32f7d2ebf1 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 8 Aug 2019 10:43:51 -0700 Subject: [PATCH 1077/1815] Skip mongodb collection on error --- plugins/inputs/mongodb/mongodb_server.go | 1 + 1 file changed, 1 insertion(+) diff --git a/plugins/inputs/mongodb/mongodb_server.go b/plugins/inputs/mongodb/mongodb_server.go index 5adc58d04..45d39d6ae 100644 --- a/plugins/inputs/mongodb/mongodb_server.go +++ b/plugins/inputs/mongodb/mongodb_server.go @@ -171,6 +171,7 @@ func (s *Server) gatherData(acc telegraf.Accumulator, gatherDbStats bool, gather }, col_stat_line) if err != nil { log.Println("E! Error getting col stats from " + col_name + "(" + err.Error() + ")") + continue } collection := &Collection{ Name: col_name, From eb8959272d5629c60cdf0823eb19a6741caeb186 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adri=C3=A1n=20L=C3=B3pez?= Date: Thu, 8 Aug 2019 19:51:03 +0200 Subject: [PATCH 1078/1815] Add telegraf and go version to the internal input plugin (#6216) --- plugins/inputs/internal/README.md | 21 ++++++++++++--------- plugins/inputs/internal/internal.go | 9 +++++++++ plugins/inputs/internal/internal_test.go | 9 ++++++--- 3 files changed, 27 insertions(+), 12 deletions(-) diff --git a/plugins/inputs/internal/README.md b/plugins/inputs/internal/README.md index 73f0b018e..1f7fa645c 100644 --- a/plugins/inputs/internal/README.md +++ b/plugins/inputs/internal/README.md @@ -42,14 +42,16 @@ agent stats collect aggregate stats on all telegraf plugins. - metrics_written internal_gather stats collect aggregate stats on all input plugins -that are of the same input type. They are tagged with `input=`. +that are of the same input type. They are tagged with `input=` +`version=` and `go_version=`. - internal_gather - gather_time_ns - metrics_gathered internal_write stats collect aggregate stats on all output plugins -that are of the same input type. They are tagged with `output=`. +that are of the same input type. They are tagged with `output=` +and `version=`. - internal_write @@ -63,7 +65,7 @@ that are of the same input type. They are tagged with `output=`. internal_ are metrics which are defined on a per-plugin basis, and usually contain tags which differentiate each instance of a particular type of -plugin. +plugin and `version=`. - internal_ - individual plugin-specific fields, such as requests counts. @@ -71,15 +73,16 @@ plugin. ### Tags: All measurements for specific plugins are tagged with information relevant -to each particular plugin. +to each particular plugin and with `version=`. + ### Example Output: ``` internal_memstats,host=tyrion alloc_bytes=4457408i,sys_bytes=10590456i,pointer_lookups=7i,mallocs=17642i,frees=7473i,heap_sys_bytes=6848512i,heap_idle_bytes=1368064i,heap_in_use_bytes=5480448i,heap_released_bytes=0i,total_alloc_bytes=6875560i,heap_alloc_bytes=4457408i,heap_objects_bytes=10169i,num_gc=2i 1480682800000000000 -internal_agent,host=tyrion metrics_written=18i,metrics_dropped=0i,metrics_gathered=19i,gather_errors=0i 1480682800000000000 -internal_write,output=file,host=tyrion buffer_limit=10000i,write_time_ns=636609i,metrics_added=18i,metrics_written=18i,buffer_size=0i 1480682800000000000 -internal_gather,input=internal,host=tyrion metrics_gathered=19i,gather_time_ns=442114i 1480682800000000000 -internal_gather,input=http_listener,host=tyrion metrics_gathered=0i,gather_time_ns=167285i 1480682800000000000 -internal_http_listener,address=:8186,host=tyrion queries_received=0i,writes_received=0i,requests_received=0i,buffers_created=0i,requests_served=0i,pings_received=0i,bytes_received=0i,not_founds_served=0i,pings_served=0i,queries_served=0i,writes_served=0i 1480682800000000000 +internal_agent,host=tyrion,go_version=1.12.7,version=1.99.0 metrics_written=18i,metrics_dropped=0i,metrics_gathered=19i,gather_errors=0i 1480682800000000000 +internal_write,output=file,host=tyrion,version=1.99.0 buffer_limit=10000i,write_time_ns=636609i,metrics_added=18i,metrics_written=18i,buffer_size=0i 1480682800000000000 +internal_gather,input=internal,host=tyrion,version=1.99.0 metrics_gathered=19i,gather_time_ns=442114i 1480682800000000000 +internal_gather,input=http_listener,host=tyrion,version=1.99.0 metrics_gathered=0i,gather_time_ns=167285i 1480682800000000000 +internal_http_listener,address=:8186,host=tyrion,version=1.99.0 queries_received=0i,writes_received=0i,requests_received=0i,buffers_created=0i,requests_served=0i,pings_received=0i,bytes_received=0i,not_founds_served=0i,pings_served=0i,queries_served=0i,writes_served=0i 1480682800000000000 ``` diff --git a/plugins/inputs/internal/internal.go b/plugins/inputs/internal/internal.go index 8b5286f56..2eb8b91c9 100644 --- a/plugins/inputs/internal/internal.go +++ b/plugins/inputs/internal/internal.go @@ -2,8 +2,10 @@ package internal import ( "runtime" + "strings" "github.com/influxdata/telegraf" + inter "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/selfstat" ) @@ -54,7 +56,14 @@ func (s *Self) Gather(acc telegraf.Accumulator) error { acc.AddFields("internal_memstats", fields, map[string]string{}) } + telegrafVersion := inter.Version() + goVersion := strings.TrimPrefix(runtime.Version(), "go") + for _, m := range selfstat.Metrics() { + if m.Name() == "internal_agent" { + m.AddTag("go_version", goVersion) + } + m.AddTag("version", telegrafVersion) acc.AddFields(m.Name(), m.Fields(), m.Tags(), m.Time()) } diff --git a/plugins/inputs/internal/internal_test.go b/plugins/inputs/internal/internal_test.go index b17c53038..4cdba9099 100644 --- a/plugins/inputs/internal/internal_test.go +++ b/plugins/inputs/internal/internal_test.go @@ -26,7 +26,8 @@ func TestSelfPlugin(t *testing.T) { "test": int64(3), }, map[string]string{ - "test": "foo", + "test": "foo", + "version": "", }, ) acc.ClearMetrics() @@ -39,7 +40,8 @@ func TestSelfPlugin(t *testing.T) { "test": int64(101), }, map[string]string{ - "test": "foo", + "test": "foo", + "version": "", }, ) acc.ClearMetrics() @@ -56,7 +58,8 @@ func TestSelfPlugin(t *testing.T) { "test_ns": int64(150), }, map[string]string{ - "test": "foo", + "test": "foo", + "version": "", }, ) } From 13c0ff0a7cd4137cae848e678d8964fae7a53127 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 8 Aug 2019 17:24:04 -0700 Subject: [PATCH 1079/1815] Limit number of processes in CircleCI Hopefully this will reduce the amount of memory in use. --- .circleci/config.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.circleci/config.yml b/.circleci/config.yml index df25a3749..27da00e02 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -2,6 +2,8 @@ defaults: defaults: &defaults working_directory: '/go/src/github.com/influxdata/telegraf' + environment: + GOFLAGS: -p=8 go-1_10: &go-1_10 docker: - image: 'quay.io/influxdb/telegraf-ci:1.10.8' From 23cd2f058cff4f98c472153ac7f7b5613595da23 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 9 Aug 2019 13:56:14 -0700 Subject: [PATCH 1080/1815] Document permission setup for powerdns_recursor (#6231) --- plugins/inputs/powerdns_recursor/README.md | 268 ++++++++++-------- .../powerdns_recursor/powerdns_recursor.go | 12 +- 2 files changed, 152 insertions(+), 128 deletions(-) diff --git a/plugins/inputs/powerdns_recursor/README.md b/plugins/inputs/powerdns_recursor/README.md index e653af930..09192db35 100644 --- a/plugins/inputs/powerdns_recursor/README.md +++ b/plugins/inputs/powerdns_recursor/README.md @@ -1,139 +1,163 @@ # PowerDNS Recursor Input Plugin -The powerdns recursor plugin gathers metrics about PowerDNS Recursor using unix socket. +The `powerdns_recursor` plugin gathers metrics about PowerDNS Recursor using +the unix controlsocket. -### Configuration: +### Configuration -``` -# Read metrics from one or many PowerDNS recursors +```toml [[inputs.powerdns_recursor]] - ## An array of sockets to gather stats about. - ## Specify a path to unix socket. - ## - ## If no servers are specified, then '/var/run/pdns_recursor.controlsocket' is used as the path. + ## Path to the Recursor control socket. unix_sockets = ["/var/run/pdns_recursor.controlsocket"] - ## Socket for Receive + ## Directory to create receive socket. This default is likely not writable, + ## please reference the full plugin documentation for a recommended setup. # socket_dir = "/var/run/" - ## Socket permissions + ## Socket permissions for the receive socket. # socket_mode = "0666" ``` -### Measurements & Fields: +#### Permissions + +Telegraf will need read/write access to the control socket and to the +`socket_dir`. PowerDNS will need to be able to write to the `socket_dir`. + +The setup described below was tested on a Debian Stretch system and may need +adapted for other systems. + +First change permissions on the controlsocket in the PowerDNS recursor +configuration, usually in `/etc/powerdns/recursor.conf`: +``` +socket-mode = 660 +``` + +Then place the `telegraf` user into the `pdns` group: +``` +usermod telegraf -a -G pdns +``` + +Since `telegraf` cannot write to to the default `/var/run` socket directory, +create a subdirectory and adjust permissions for this directory so that both +users can access it. +```sh +$ mkdir /var/run/pdns +$ chown root:pdns /var/run/pdns +$ chmod 770 /var/run/pdns +``` + +### Metrics - powerdns_recursor - - all-outqueries - - answers-slow - - answers0-1 - - answers1-10 - - answers10-100 - - answers100-1000 - - auth-zone-queries - - auth4-answers-slow - - auth4-answers0-1 - - auth4-answers1-10 - - auth4-answers10-100 - - auth4-answers100-1000 - - auth6-answers-slow - - auth6-answers0-1 - - auth6-answers1-10 - - auth6-answers10-100 - - auth6-answers100-1000 - - cache-entries - - cache-hits - - cache-misses - - case-mismatches - - chain-resends - - client-parse-errors - - concurrent-queries - - dlg-only-drops - - dnssec-queries - - dnssec-result-bogus - - dnssec-result-indeterminate - - dnssec-result-insecure - - dnssec-result-nta - - dnssec-result-secure - - dnssec-validations - - dont-outqueries - - ecs-queries - - ecs-responses - - edns-ping-matches - - edns-ping-mismatches - - failed-host-entries - - fd-usage - - ignored-packets - - ipv6-outqueries - - ipv6-questions - - malloc-bytes - - max-cache-entries - - max-mthread-stack - - max-packetcache-entries - - negcache-entries - - no-packet-error - - noedns-outqueries - - noerror-answers - - noping-outqueries - - nsset-invalidations - - nsspeeds-entries - - nxdomain-answers - - outgoing-timeouts - - outgoing4-timeouts - - outgoing6-timeouts - - over-capacity-drops - - packetcache-entries - - packetcache-hits - - packetcache-misses - - policy-drops - - policy-result-custom - - policy-result-drop - - policy-result-noaction - - policy-result-nodata - - policy-result-nxdomain - - policy-result-truncate - - qa-latency - - query-pipe-full-drops - - questions - - real-memory-usage - - resource-limits - - security-status - - server-parse-errors - - servfail-answers - - spoof-prevents - - sys-msec - - tcp-client-overflow - - tcp-clients - - tcp-outqueries - - tcp-questions - - throttle-entries - - throttled-out - - throttled-outqueries - - too-old-drops - - udp-in-errors - - udp-noport-errors - - udp-recvbuf-errors - - udp-sndbuf-errors - - unauthorized-tcp - - unauthorized-udp - - unexpected-packets - - unreachables - - uptime - - user-msec - - x-our-latency - - x-ourtime-slow - - x-ourtime0-1 - - x-ourtime1-2 - - x-ourtime16-32 - - x-ourtime2-4 - - x-ourtime4-8 - - x-ourtime8-16 + - tags: + - server + - fields: + - all-outqueries + - answers-slow + - answers0-1 + - answers1-10 + - answers10-100 + - answers100-1000 + - auth-zone-queries + - auth4-answers-slow + - auth4-answers0-1 + - auth4-answers1-10 + - auth4-answers10-100 + - auth4-answers100-1000 + - auth6-answers-slow + - auth6-answers0-1 + - auth6-answers1-10 + - auth6-answers10-100 + - auth6-answers100-1000 + - cache-entries + - cache-hits + - cache-misses + - case-mismatches + - chain-resends + - client-parse-errors + - concurrent-queries + - dlg-only-drops + - dnssec-queries + - dnssec-result-bogus + - dnssec-result-indeterminate + - dnssec-result-insecure + - dnssec-result-nta + - dnssec-result-secure + - dnssec-validations + - dont-outqueries + - ecs-queries + - ecs-responses + - edns-ping-matches + - edns-ping-mismatches + - failed-host-entries + - fd-usage + - ignored-packets + - ipv6-outqueries + - ipv6-questions + - malloc-bytes + - max-cache-entries + - max-mthread-stack + - max-packetcache-entries + - negcache-entries + - no-packet-error + - noedns-outqueries + - noerror-answers + - noping-outqueries + - nsset-invalidations + - nsspeeds-entries + - nxdomain-answers + - outgoing-timeouts + - outgoing4-timeouts + - outgoing6-timeouts + - over-capacity-drops + - packetcache-entries + - packetcache-hits + - packetcache-misses + - policy-drops + - policy-result-custom + - policy-result-drop + - policy-result-noaction + - policy-result-nodata + - policy-result-nxdomain + - policy-result-truncate + - qa-latency + - query-pipe-full-drops + - questions + - real-memory-usage + - resource-limits + - security-status + - server-parse-errors + - servfail-answers + - spoof-prevents + - sys-msec + - tcp-client-overflow + - tcp-clients + - tcp-outqueries + - tcp-questions + - throttle-entries + - throttled-out + - throttled-outqueries + - too-old-drops + - udp-in-errors + - udp-noport-errors + - udp-recvbuf-errors + - udp-sndbuf-errors + - unauthorized-tcp + - unauthorized-udp + - unexpected-packets + - unreachables + - uptime + - user-msec + - x-our-latency + - x-ourtime-slow + - x-ourtime0-1 + - x-ourtime1-2 + - x-ourtime16-32 + - x-ourtime2-4 + - x-ourtime4-8 + - x-ourtime8-16 -### Tags: - -- tags: `server=socket` - -### Example Output: +### Example Output ``` -$ ./telegraf --config telegraf.conf --input-filter powerdns_recursor --test -> powerdns_recursor,server=/var/run/pdns_recursor.controlsocket all-outqueries=3631810i,answers-slow=36863i,answers0-1=179612i,answers1-10=1223305i,answers10-100=1252199i,answers100-1000=408357i,auth-zone-queries=4i,auth4-answers-slow=44758i,auth4-answers0-1=59721i,auth4-answers1-10=1766787i,auth4-answers10-100=1329638i,auth4-answers100-1000=430372i,auth6-answers-slow=0i,auth6-answers0-1=0i,auth6-answers1-10=0i,auth6-answers10-100=0i,auth6-answers100-1000=0i,cache-entries=296689i,cache-hits=150654i,cache-misses=2949682i,case-mismatches=0i,chain-resends=420004i,client-parse-errors=0i,concurrent-queries=0i,dlg-only-drops=0i,dnssec-queries=152970i,dnssec-result-bogus=0i,dnssec-result-indeterminate=0i,dnssec-result-insecure=0i,dnssec-result-nta=0i,dnssec-result-secure=47i,dnssec-validations=47i,dont-outqueries=62i,ecs-queries=0i,ecs-responses=0i,edns-ping-matches=0i,edns-ping-mismatches=0i,failed-host-entries=21i,fd-usage=32i,ignored-packets=0i,ipv6-outqueries=0i,ipv6-questions=0i,malloc-bytes=0i,max-cache-entries=1000000i,max-mthread-stack=33747i,max-packetcache-entries=500000i,negcache-entries=100019i,no-packet-error=0i,noedns-outqueries=73341i,noerror-answers=25453808i,noping-outqueries=0i,nsset-invalidations=2398i,nsspeeds-entries=3966i,nxdomain-answers=3341302i,outgoing-timeouts=44384i,outgoing4-timeouts=44384i,outgoing6-timeouts=0i,over-capacity-drops=0i,packetcache-entries=78258i,packetcache-hits=25999027i,packetcache-misses=3100179i,policy-drops=0i,policy-result-custom=0i,policy-result-drop=0i,policy-result-noaction=3100336i,policy-result-nodata=0i,policy-result-nxdomain=0i,policy-result-truncate=0i,qa-latency=6553i,query-pipe-full-drops=0i,questions=29099363i,real-memory-usage=280494080i,resource-limits=0i,security-status=1i,server-parse-errors=0i,servfail-answers=304253i,spoof-prevents=0i,sys-msec=1312600i,tcp-client-overflow=0i,tcp-clients=0i,tcp-outqueries=116i,tcp-questions=133i,throttle-entries=21i,throttled-out=13296i,throttled-outqueries=13296i,too-old-drops=2i,udp-in-errors=4i,udp-noport-errors=2918i,udp-recvbuf-errors=0i,udp-sndbuf-errors=0i,unauthorized-tcp=0i,unauthorized-udp=0i,unexpected-packets=0i,unreachables=1708i,uptime=167482i,user-msec=1282640i,x-our-latency=19i,x-ourtime-slow=642i,x-ourtime0-1=3095566i,x-ourtime1-2=3401i,x-ourtime16-32=201i,x-ourtime2-4=304i,x-ourtime4-8=198i,x-ourtime8-16=24i 1533903879000000000 +powerdns_recursor,server=/var/run/pdns_recursor.controlsocket all-outqueries=3631810i,answers-slow=36863i,answers0-1=179612i,answers1-10=1223305i,answers10-100=1252199i,answers100-1000=408357i,auth-zone-queries=4i,auth4-answers-slow=44758i,auth4-answers0-1=59721i,auth4-answers1-10=1766787i,auth4-answers10-100=1329638i,auth4-answers100-1000=430372i,auth6-answers-slow=0i,auth6-answers0-1=0i,auth6-answers1-10=0i,auth6-answers10-100=0i,auth6-answers100-1000=0i,cache-entries=296689i,cache-hits=150654i,cache-misses=2949682i,case-mismatches=0i,chain-resends=420004i,client-parse-errors=0i,concurrent-queries=0i,dlg-only-drops=0i,dnssec-queries=152970i,dnssec-result-bogus=0i,dnssec-result-indeterminate=0i,dnssec-result-insecure=0i,dnssec-result-nta=0i,dnssec-result-secure=47i,dnssec-validations=47i,dont-outqueries=62i,ecs-queries=0i,ecs-responses=0i,edns-ping-matches=0i,edns-ping-mismatches=0i,failed-host-entries=21i,fd-usage=32i,ignored-packets=0i,ipv6-outqueries=0i,ipv6-questions=0i,malloc-bytes=0i,max-cache-entries=1000000i,max-mthread-stack=33747i,max-packetcache-entries=500000i,negcache-entries=100019i,no-packet-error=0i,noedns-outqueries=73341i,noerror-answers=25453808i,noping-outqueries=0i,nsset-invalidations=2398i,nsspeeds-entries=3966i,nxdomain-answers=3341302i,outgoing-timeouts=44384i,outgoing4-timeouts=44384i,outgoing6-timeouts=0i,over-capacity-drops=0i,packetcache-entries=78258i,packetcache-hits=25999027i,packetcache-misses=3100179i,policy-drops=0i,policy-result-custom=0i,policy-result-drop=0i,policy-result-noaction=3100336i,policy-result-nodata=0i,policy-result-nxdomain=0i,policy-result-truncate=0i,qa-latency=6553i,query-pipe-full-drops=0i,questions=29099363i,real-memory-usage=280494080i,resource-limits=0i,security-status=1i,server-parse-errors=0i,servfail-answers=304253i,spoof-prevents=0i,sys-msec=1312600i,tcp-client-overflow=0i,tcp-clients=0i,tcp-outqueries=116i,tcp-questions=133i,throttle-entries=21i,throttled-out=13296i,throttled-outqueries=13296i,too-old-drops=2i,udp-in-errors=4i,udp-noport-errors=2918i,udp-recvbuf-errors=0i,udp-sndbuf-errors=0i,unauthorized-tcp=0i,unauthorized-udp=0i,unexpected-packets=0i,unreachables=1708i,uptime=167482i,user-msec=1282640i,x-our-latency=19i,x-ourtime-slow=642i,x-ourtime0-1=3095566i,x-ourtime1-2=3401i,x-ourtime16-32=201i,x-ourtime2-4=304i,x-ourtime4-8=198i,x-ourtime8-16=24i 1533903879000000000 ``` diff --git a/plugins/inputs/powerdns_recursor/powerdns_recursor.go b/plugins/inputs/powerdns_recursor/powerdns_recursor.go index bfd595597..85c7cbcca 100644 --- a/plugins/inputs/powerdns_recursor/powerdns_recursor.go +++ b/plugins/inputs/powerdns_recursor/powerdns_recursor.go @@ -27,14 +27,14 @@ type PowerdnsRecursor struct { var defaultTimeout = 5 * time.Second var sampleConfig = ` - ## An array of sockets to gather stats about. - ## Specify a path to unix socket. + ## Path to the Recursor control socket. unix_sockets = ["/var/run/pdns_recursor.controlsocket"] - ## Socket for Receive - #socket_dir = "/var/run/" - ## Socket permissions - #socket_mode = "0666" + ## Directory to create receive socket. This default is likely not writable, + ## please reference the full plugin documentation for a recommended setup. + # socket_dir = "/var/run/" + ## Socket permissions for the receive socket. + # socket_mode = "0666" ` func (p *PowerdnsRecursor) SampleConfig() string { From 6512b7b7fb6c83b33183b83b2c064025389a821a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Th=C3=A9o?= <33425208+Theoooooo@users.noreply.github.com> Date: Sat, 10 Aug 2019 02:18:27 +0200 Subject: [PATCH 1081/1815] Document service account setup for the elasticsearch output (#6224) --- plugins/outputs/elasticsearch/README.md | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/plugins/outputs/elasticsearch/README.md b/plugins/outputs/elasticsearch/README.md index 2ba46c87e..388183637 100644 --- a/plugins/outputs/elasticsearch/README.md +++ b/plugins/outputs/elasticsearch/README.md @@ -161,7 +161,11 @@ This plugin will format the events in the following way: ## Set the interval to check if the Elasticsearch nodes are available ## Setting to "0s" will disable the health check (not recommended in production) health_check_interval = "10s" - ## HTTP basic authentication details + ## HTTP basic authentication details. + ## If you are using authentication within your Elasticsearch cluster, + ## you need to create a account and create a role with at least the manage role in the Cluster Privileges category. + ## Overwise, your account will not be able to connect to your Elasticsearch cluster and send logs to your cluster. + ## After that, you need to add "create_indice" and "write" permission to your specific index pattern. # username = "telegraf" # password = "mypassword" From 337a579dd0cf3f299779deae15c24961d202c112 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 9 Aug 2019 17:23:08 -0700 Subject: [PATCH 1082/1815] Move elasticsearch permission setup into new readme section --- plugins/outputs/elasticsearch/README.md | 26 ++++++++++++++----------- 1 file changed, 15 insertions(+), 11 deletions(-) diff --git a/plugins/outputs/elasticsearch/README.md b/plugins/outputs/elasticsearch/README.md index 388183637..cf8c4d9ca 100644 --- a/plugins/outputs/elasticsearch/README.md +++ b/plugins/outputs/elasticsearch/README.md @@ -1,10 +1,10 @@ -# Elasticsearch Output Plugin for Telegraf +# Elasticsearch Output Plugin This plugin writes to [Elasticsearch](https://www.elastic.co) via HTTP using Elastic ( It supports Elasticsearch releases from 5.x up to 7.x. -## Elasticsearch indexes and templates +### Elasticsearch indexes and templates ### Indexes per time-frame @@ -144,10 +144,9 @@ This plugin will format the events in the following way: } ``` -### Configuration: +### Configuration ```toml -# Configuration for Elasticsearch to send metrics to. [[outputs.elasticsearch]] ## The full HTTP endpoint URL for your Elasticsearch instance ## Multiple urls can be specified as part of the same cluster, @@ -162,10 +161,6 @@ This plugin will format the events in the following way: ## Setting to "0s" will disable the health check (not recommended in production) health_check_interval = "10s" ## HTTP basic authentication details. - ## If you are using authentication within your Elasticsearch cluster, - ## you need to create a account and create a role with at least the manage role in the Cluster Privileges category. - ## Overwise, your account will not be able to connect to your Elasticsearch cluster and send logs to your cluster. - ## After that, you need to add "create_indice" and "write" permission to your specific index pattern. # username = "telegraf" # password = "mypassword" @@ -203,7 +198,16 @@ This plugin will format the events in the following way: overwrite_template = false ``` -### Required parameters: +#### Permissions + +If you are using authentication within your Elasticsearch cluster, you need +to create a account and create a role with at least the manage role in the +Cluster Privileges category. Overwise, your account will not be able to +connect to your Elasticsearch cluster and send logs to your cluster. After +that, you need to add "create_indice" and "write" permission to your specific +index pattern. + +#### Required parameters: * `urls`: A list containing the full HTTP URL of one or more nodes from your Elasticsearch instance. * `index_name`: The target index for metrics. You can use the date specifiers below to create indexes per time frame. @@ -218,7 +222,7 @@ This plugin will format the events in the following way: Additionally, you can specify dynamic index names by using tags with the notation ```{{tag_name}}```. This will store the metrics with different tag values in different indices. If the tag does not exist in a particular metric, the `default_tag_value` will be used instead. -### Optional parameters: +#### Optional parameters: * `timeout`: Elasticsearch client timeout, defaults to "5s" if not set. * `enable_sniffer`: Set to true to ask Elasticsearch a list of all cluster nodes, thus it is not necessary to list all nodes in the urls config option. @@ -229,7 +233,7 @@ Additionally, you can specify dynamic index names by using tags with the notatio * `template_name`: The template name used for telegraf indexes. * `overwrite_template`: Set to true if you want telegraf to overwrite an existing template. -## Known issues +### Known issues Integer values collected that are bigger than 2^63 and smaller than 1e21 (or in this exact same window of their negative counterparts) are encoded by golang JSON encoder in decimal format and that is not fully supported by Elasticsearch dynamic field mapping. This causes the metrics with such values to be dropped in case a field mapping has not been created yet on the telegraf index. If that's the case you will see an exception on Elasticsearch side like this: From f5a4d723822e129109ac0e7d7d479758066a1dfd Mon Sep 17 00:00:00 2001 From: Branden Rolston Date: Fri, 9 Aug 2019 17:27:59 -0700 Subject: [PATCH 1083/1815] Collect framework_offers and allocator metrics in mesos input (#5719) --- plugins/inputs/mesos/README.md | 72 +++++ plugins/inputs/mesos/mesos.go | 54 +++- plugins/inputs/mesos/mesos_test.go | 478 +++++++++++++++++------------ 3 files changed, 400 insertions(+), 204 deletions(-) diff --git a/plugins/inputs/mesos/README.md b/plugins/inputs/mesos/README.md index b18908b8a..b9a46eaa9 100644 --- a/plugins/inputs/mesos/README.md +++ b/plugins/inputs/mesos/README.md @@ -19,10 +19,12 @@ For more information, please check the [Mesos Observability Metrics](http://meso "system", "agents", "frameworks", + "framework_offers", "tasks", "messages", "evqueue", "registrar", + "allocator", ] ## A list of Mesos slaves, default is [] # slaves = [] @@ -100,6 +102,10 @@ Mesos master metric groups - master/slaves_connected - master/slaves_disconnected - master/slaves_inactive + - master/slave_unreachable_canceled + - master/slave_unreachable_completed + - master/slave_unreachable_scheduled + - master/slaves_unreachable - frameworks - master/frameworks_active @@ -108,6 +114,22 @@ Mesos master metric groups - master/frameworks_inactive - master/outstanding_offers +- framework offers + - master/frameworks/subscribed + - master/frameworks/calls_total + - master/frameworks/calls + - master/frameworks/events_total + - master/frameworks/events + - master/frameworks/operations_total + - master/frameworks/operations + - master/frameworks/tasks/active + - master/frameworks/tasks/terminal + - master/frameworks/offers/sent + - master/frameworks/offers/accepted + - master/frameworks/offers/declined + - master/frameworks/offers/rescinded + - master/frameworks/roles/suppressed + - tasks - master/tasks_error - master/tasks_failed @@ -117,6 +139,11 @@ Mesos master metric groups - master/tasks_running - master/tasks_staging - master/tasks_starting + - master/tasks_dropped + - master/tasks_gone + - master/tasks_gone_by_operator + - master/tasks_killing + - master/tasks_unreachable - messages - master/invalid_executor_to_framework_messages @@ -155,11 +182,17 @@ Mesos master metric groups - master/task_lost/source_master/reason_slave_removed - master/task_lost/source_slave/reason_executor_terminated - master/valid_executor_to_framework_messages + - master/invalid_operation_status_update_acknowledgements + - master/messages_operation_status_update_acknowledgement + - master/messages_reconcile_operations + - master/messages_suppress_offers + - master/valid_operation_status_update_acknowledgements - evqueue - master/event_queue_dispatches - master/event_queue_http_requests - master/event_queue_messages + - master/operator_event_stream_subscribers - registrar - registrar/state_fetch_ms @@ -172,6 +205,45 @@ Mesos master metric groups - registrar/state_store_ms/p99 - registrar/state_store_ms/p999 - registrar/state_store_ms/p9999 + - registrar/state_store_ms/count + - registrar/log/ensemble_size + - registrar/log/recovered + - registrar/queued_operations + - registrar/registry_size_bytes + +- allocator + - allocator/allocation_run_ms + - allocator/allocation_run_ms/count + - allocator/allocation_run_ms/max + - allocator/allocation_run_ms/min + - allocator/allocation_run_ms/p50 + - allocator/allocation_run_ms/p90 + - allocator/allocation_run_ms/p95 + - allocator/allocation_run_ms/p99 + - allocator/allocation_run_ms/p999 + - allocator/allocation_run_ms/p9999 + - allocator/allocation_runs + - allocator/allocation_run_latency_ms + - allocator/allocation_run_latency_ms/count + - allocator/allocation_run_latency_ms/max + - allocator/allocation_run_latency_ms/min + - allocator/allocation_run_latency_ms/p50 + - allocator/allocation_run_latency_ms/p90 + - allocator/allocation_run_latency_ms/p95 + - allocator/allocation_run_latency_ms/p99 + - allocator/allocation_run_latency_ms/p999 + - allocator/allocation_run_latency_ms/p9999 + - allocator/roles/shares/dominant + - allocator/event_queue_dispatches + - allocator/offer_filters/roles/active + - allocator/quota/roles/resources/offered_or_allocated + - allocator/quota/roles/resources/guarantee + - allocator/resources/cpus/offered_or_allocated + - allocator/resources/cpus/total + - allocator/resources/disk/offered_or_allocated + - allocator/resources/disk/total + - allocator/resources/mem/offered_or_allocated + - allocator/resources/mem/total Mesos slave metric groups - resources diff --git a/plugins/inputs/mesos/mesos.go b/plugins/inputs/mesos/mesos.go index 9190ceae8..3e0e25691 100644 --- a/plugins/inputs/mesos/mesos.go +++ b/plugins/inputs/mesos/mesos.go @@ -42,7 +42,7 @@ type Mesos struct { } var allMetrics = map[Role][]string{ - MASTER: {"resources", "master", "system", "agents", "frameworks", "tasks", "messages", "evqueue", "registrar"}, + MASTER: {"resources", "master", "system", "agents", "frameworks", "framework_offers", "tasks", "messages", "evqueue", "registrar", "allocator"}, SLAVE: {"resources", "agent", "system", "executors", "tasks", "messages"}, } @@ -58,10 +58,12 @@ var sampleConfig = ` "system", "agents", "frameworks", + "framework_offers", "tasks", "messages", "evqueue", "registrar", + "allocator", ] ## A list of Mesos slaves, default is [] # slaves = [] @@ -305,6 +307,10 @@ func getMetrics(role Role, group string) []string { "master/slaves_connected", "master/slaves_disconnected", "master/slaves_inactive", + "master/slave_unreachable_canceled", + "master/slave_unreachable_completed", + "master/slave_unreachable_scheduled", + "master/slaves_unreachable", } m["frameworks"] = []string{ @@ -315,6 +321,12 @@ func getMetrics(role Role, group string) []string { "master/outstanding_offers", } + // framework_offers and allocator metrics have unpredictable names, so they can't be listed here. + // These empty groups are included to prevent the "unknown metrics group" info log below. + // filterMetrics() filters these metrics by looking for names with the corresponding prefix. + m["framework_offers"] = []string{} + m["allocator"] = []string{} + m["tasks"] = []string{ "master/tasks_error", "master/tasks_failed", @@ -324,6 +336,11 @@ func getMetrics(role Role, group string) []string { "master/tasks_running", "master/tasks_staging", "master/tasks_starting", + "master/tasks_dropped", + "master/tasks_gone", + "master/tasks_gone_by_operator", + "master/tasks_killing", + "master/tasks_unreachable", } m["messages"] = []string{ @@ -363,12 +380,18 @@ func getMetrics(role Role, group string) []string { "master/task_lost/source_master/reason_slave_removed", "master/task_lost/source_slave/reason_executor_terminated", "master/valid_executor_to_framework_messages", + "master/invalid_operation_status_update_acknowledgements", + "master/messages_operation_status_update_acknowledgement", + "master/messages_reconcile_operations", + "master/messages_suppress_offers", + "master/valid_operation_status_update_acknowledgements", } m["evqueue"] = []string{ "master/event_queue_dispatches", "master/event_queue_http_requests", "master/event_queue_messages", + "master/operator_event_stream_subscribers", } m["registrar"] = []string{ @@ -382,6 +405,11 @@ func getMetrics(role Role, group string) []string { "registrar/state_store_ms/p99", "registrar/state_store_ms/p999", "registrar/state_store_ms/p9999", + "registrar/log/ensemble_size", + "registrar/log/recovered", + "registrar/queued_operations", + "registrar/registry_size_bytes", + "registrar/state_store_ms/count", } } else if role == SLAVE { m["resources"] = []string{ @@ -477,9 +505,27 @@ func (m *Mesos) filterMetrics(role Role, metrics *map[string]interface{}) { } for _, k := range metricsDiff(role, selectedMetrics) { - for _, v := range getMetrics(role, k) { - if _, ok = (*metrics)[v]; ok { - delete((*metrics), v) + switch k { + // allocator and framework_offers metrics have unpredictable names, so we have to identify them by name prefix. + case "allocator": + for m := range *metrics { + if strings.HasPrefix(m, "allocator/") { + delete((*metrics), m) + } + } + case "framework_offers": + for m := range *metrics { + if strings.HasPrefix(m, "master/frameworks/") || strings.HasPrefix(m, "frameworks/") { + delete((*metrics), m) + } + } + + // All other metrics have predictable names. We can use getMetrics() to retrieve them. + default: + for _, v := range getMetrics(role, k) { + if _, ok = (*metrics)[v]; ok { + delete((*metrics), v) + } } } } diff --git a/plugins/inputs/mesos/mesos_test.go b/plugins/inputs/mesos/mesos_test.go index 905adb6e3..066d5b971 100644 --- a/plugins/inputs/mesos/mesos_test.go +++ b/plugins/inputs/mesos/mesos_test.go @@ -8,6 +8,7 @@ import ( "net/http/httptest" "net/url" "os" + "strings" "testing" "github.com/influxdata/telegraf/testutil" @@ -27,194 +28,262 @@ func randUUID() string { return fmt.Sprintf("%x-%x-%x-%x-%x", b[0:4], b[4:6], b[6:8], b[8:10], b[10:]) } +// master metrics that will be returned by generateMetrics() +var masterMetricNames []string = []string{ + // resources + "master/cpus_percent", + "master/cpus_used", + "master/cpus_total", + "master/cpus_revocable_percent", + "master/cpus_revocable_total", + "master/cpus_revocable_used", + "master/disk_percent", + "master/disk_used", + "master/disk_total", + "master/disk_revocable_percent", + "master/disk_revocable_total", + "master/disk_revocable_used", + "master/gpus_percent", + "master/gpus_used", + "master/gpus_total", + "master/gpus_revocable_percent", + "master/gpus_revocable_total", + "master/gpus_revocable_used", + "master/mem_percent", + "master/mem_used", + "master/mem_total", + "master/mem_revocable_percent", + "master/mem_revocable_total", + "master/mem_revocable_used", + // master + "master/elected", + "master/uptime_secs", + // system + "system/cpus_total", + "system/load_15min", + "system/load_5min", + "system/load_1min", + "system/mem_free_bytes", + "system/mem_total_bytes", + // agents + "master/slave_registrations", + "master/slave_removals", + "master/slave_reregistrations", + "master/slave_shutdowns_scheduled", + "master/slave_shutdowns_canceled", + "master/slave_shutdowns_completed", + "master/slaves_active", + "master/slaves_connected", + "master/slaves_disconnected", + "master/slaves_inactive", + "master/slave_unreachable_canceled", + "master/slave_unreachable_completed", + "master/slave_unreachable_scheduled", + "master/slaves_unreachable", + // frameworks + "master/frameworks_active", + "master/frameworks_connected", + "master/frameworks_disconnected", + "master/frameworks_inactive", + "master/outstanding_offers", + // framework offers + "master/frameworks/marathon/abc-123/calls", + "master/frameworks/marathon/abc-123/calls/accept", + "master/frameworks/marathon/abc-123/events", + "master/frameworks/marathon/abc-123/events/error", + "master/frameworks/marathon/abc-123/offers/sent", + "master/frameworks/marathon/abc-123/operations", + "master/frameworks/marathon/abc-123/operations/create", + "master/frameworks/marathon/abc-123/roles/*/suppressed", + "master/frameworks/marathon/abc-123/subscribed", + "master/frameworks/marathon/abc-123/tasks/active/task_killing", + "master/frameworks/marathon/abc-123/tasks/active/task_dropped", + "master/frameworks/marathon/abc-123/tasks/terminal/task_dropped", + "master/frameworks/marathon/abc-123/unknown/unknown", // test case for unknown metric type + // tasks + "master/tasks_error", + "master/tasks_failed", + "master/tasks_finished", + "master/tasks_killed", + "master/tasks_lost", + "master/tasks_running", + "master/tasks_staging", + "master/tasks_starting", + "master/tasks_dropped", + "master/tasks_gone", + "master/tasks_gone_by_operator", + "master/tasks_killing", + "master/tasks_unreachable", + // messages + "master/invalid_executor_to_framework_messages", + "master/invalid_framework_to_executor_messages", + "master/invalid_status_update_acknowledgements", + "master/invalid_status_updates", + "master/dropped_messages", + "master/messages_authenticate", + "master/messages_deactivate_framework", + "master/messages_decline_offers", + "master/messages_executor_to_framework", + "master/messages_exited_executor", + "master/messages_framework_to_executor", + "master/messages_kill_task", + "master/messages_launch_tasks", + "master/messages_reconcile_tasks", + "master/messages_register_framework", + "master/messages_register_slave", + "master/messages_reregister_framework", + "master/messages_reregister_slave", + "master/messages_resource_request", + "master/messages_revive_offers", + "master/messages_status_update", + "master/messages_status_update_acknowledgement", + "master/messages_unregister_framework", + "master/messages_unregister_slave", + "master/messages_update_slave", + "master/recovery_slave_removals", + "master/slave_removals/reason_registered", + "master/slave_removals/reason_unhealthy", + "master/slave_removals/reason_unregistered", + "master/valid_framework_to_executor_messages", + "master/valid_status_update_acknowledgements", + "master/valid_status_updates", + "master/task_lost/source_master/reason_invalid_offers", + "master/task_lost/source_master/reason_slave_removed", + "master/task_lost/source_slave/reason_executor_terminated", + "master/valid_executor_to_framework_messages", + "master/invalid_operation_status_update_acknowledgements", + "master/messages_operation_status_update_acknowledgement", + "master/messages_reconcile_operations", + "master/messages_suppress_offers", + "master/valid_operation_status_update_acknowledgements", + // evgqueue + "master/event_queue_dispatches", + "master/event_queue_http_requests", + "master/event_queue_messages", + "master/operator_event_stream_subscribers", + // registrar + "registrar/log/ensemble_size", + "registrar/log/recovered", + "registrar/queued_operations", + "registrar/registry_size_bytes", + "registrar/state_fetch_ms", + "registrar/state_store_ms", + "registrar/state_store_ms/max", + "registrar/state_store_ms/min", + "registrar/state_store_ms/p50", + "registrar/state_store_ms/p90", + "registrar/state_store_ms/p95", + "registrar/state_store_ms/p99", + "registrar/state_store_ms/p999", + "registrar/state_store_ms/p9999", + "registrar/state_store_ms/count", + // allocator + "allocator/mesos/allocation_run_ms", + "allocator/mesos/allocation_run_ms/count", + "allocator/mesos/allocation_run_ms/max", + "allocator/mesos/allocation_run_ms/min", + "allocator/mesos/allocation_run_ms/p50", + "allocator/mesos/allocation_run_ms/p90", + "allocator/mesos/allocation_run_ms/p95", + "allocator/mesos/allocation_run_ms/p99", + "allocator/mesos/allocation_run_ms/p999", + "allocator/mesos/allocation_run_ms/p9999", + "allocator/mesos/allocation_runs", + "allocator/mesos/allocation_run_latency_ms", + "allocator/mesos/allocation_run_latency_ms/count", + "allocator/mesos/allocation_run_latency_ms/max", + "allocator/mesos/allocation_run_latency_ms/min", + "allocator/mesos/allocation_run_latency_ms/p50", + "allocator/mesos/allocation_run_latency_ms/p90", + "allocator/mesos/allocation_run_latency_ms/p95", + "allocator/mesos/allocation_run_latency_ms/p99", + "allocator/mesos/allocation_run_latency_ms/p999", + "allocator/mesos/allocation_run_latency_ms/p9999", + "allocator/mesos/roles/*/shares/dominant", + "allocator/mesos/event_queue_dispatches", + "allocator/mesos/offer_filters/roles/*/active", + "allocator/mesos/quota/roles/*/resources/disk/offered_or_allocated", + "allocator/mesos/quota/roles/*/resources/mem/guarantee", + "allocator/mesos/quota/roles/*/resources/disk/guarantee", + "allocator/mesos/resources/cpus/offered_or_allocated", + "allocator/mesos/resources/cpus/total", + "allocator/mesos/resources/disk/offered_or_allocated", + "allocator/mesos/resources/disk/total", + "allocator/mesos/resources/mem/offered_or_allocated", + "allocator/mesos/resources/mem/total", +} + +// slave metrics that will be returned by generateMetrics() +var slaveMetricNames []string = []string{ + // resources + "slave/cpus_percent", + "slave/cpus_used", + "slave/cpus_total", + "slave/cpus_revocable_percent", + "slave/cpus_revocable_total", + "slave/cpus_revocable_used", + "slave/disk_percent", + "slave/disk_used", + "slave/disk_total", + "slave/disk_revocable_percent", + "slave/disk_revocable_total", + "slave/disk_revocable_used", + "slave/gpus_percent", + "slave/gpus_used", + "slave/gpus_total", + "slave/gpus_revocable_percent", + "slave/gpus_revocable_total", + "slave/gpus_revocable_used", + "slave/mem_percent", + "slave/mem_used", + "slave/mem_total", + "slave/mem_revocable_percent", + "slave/mem_revocable_total", + "slave/mem_revocable_used", + // agent + "slave/registered", + "slave/uptime_secs", + // system + "system/cpus_total", + "system/load_15min", + "system/load_5min", + "system/load_1min", + "system/mem_free_bytes", + "system/mem_total_bytes", + // executors + "containerizer/mesos/container_destroy_errors", + "slave/container_launch_errors", + "slave/executors_preempted", + "slave/frameworks_active", + "slave/executor_directory_max_allowed_age_secs", + "slave/executors_registering", + "slave/executors_running", + "slave/executors_terminated", + "slave/executors_terminating", + "slave/recovery_errors", + // tasks + "slave/tasks_failed", + "slave/tasks_finished", + "slave/tasks_killed", + "slave/tasks_lost", + "slave/tasks_running", + "slave/tasks_staging", + "slave/tasks_starting", + // messages + "slave/invalid_framework_messages", + "slave/invalid_status_updates", + "slave/valid_framework_messages", + "slave/valid_status_updates", +} + func generateMetrics() { masterMetrics = make(map[string]interface{}) - - metricNames := []string{ - // resources - "master/cpus_percent", - "master/cpus_used", - "master/cpus_total", - "master/cpus_revocable_percent", - "master/cpus_revocable_total", - "master/cpus_revocable_used", - "master/disk_percent", - "master/disk_used", - "master/disk_total", - "master/disk_revocable_percent", - "master/disk_revocable_total", - "master/disk_revocable_used", - "master/gpus_percent", - "master/gpus_used", - "master/gpus_total", - "master/gpus_revocable_percent", - "master/gpus_revocable_total", - "master/gpus_revocable_used", - "master/mem_percent", - "master/mem_used", - "master/mem_total", - "master/mem_revocable_percent", - "master/mem_revocable_total", - "master/mem_revocable_used", - // master - "master/elected", - "master/uptime_secs", - // system - "system/cpus_total", - "system/load_15min", - "system/load_5min", - "system/load_1min", - "system/mem_free_bytes", - "system/mem_total_bytes", - // agents - "master/slave_registrations", - "master/slave_removals", - "master/slave_reregistrations", - "master/slave_shutdowns_scheduled", - "master/slave_shutdowns_canceled", - "master/slave_shutdowns_completed", - "master/slaves_active", - "master/slaves_connected", - "master/slaves_disconnected", - "master/slaves_inactive", - // frameworks - "master/frameworks_active", - "master/frameworks_connected", - "master/frameworks_disconnected", - "master/frameworks_inactive", - "master/outstanding_offers", - // tasks - "master/tasks_error", - "master/tasks_failed", - "master/tasks_finished", - "master/tasks_killed", - "master/tasks_lost", - "master/tasks_running", - "master/tasks_staging", - "master/tasks_starting", - // messages - "master/invalid_executor_to_framework_messages", - "master/invalid_framework_to_executor_messages", - "master/invalid_status_update_acknowledgements", - "master/invalid_status_updates", - "master/dropped_messages", - "master/messages_authenticate", - "master/messages_deactivate_framework", - "master/messages_decline_offers", - "master/messages_executor_to_framework", - "master/messages_exited_executor", - "master/messages_framework_to_executor", - "master/messages_kill_task", - "master/messages_launch_tasks", - "master/messages_reconcile_tasks", - "master/messages_register_framework", - "master/messages_register_slave", - "master/messages_reregister_framework", - "master/messages_reregister_slave", - "master/messages_resource_request", - "master/messages_revive_offers", - "master/messages_status_update", - "master/messages_status_update_acknowledgement", - "master/messages_unregister_framework", - "master/messages_unregister_slave", - "master/messages_update_slave", - "master/recovery_slave_removals", - "master/slave_removals/reason_registered", - "master/slave_removals/reason_unhealthy", - "master/slave_removals/reason_unregistered", - "master/valid_framework_to_executor_messages", - "master/valid_status_update_acknowledgements", - "master/valid_status_updates", - "master/task_lost/source_master/reason_invalid_offers", - "master/task_lost/source_master/reason_slave_removed", - "master/task_lost/source_slave/reason_executor_terminated", - "master/valid_executor_to_framework_messages", - // evgqueue - "master/event_queue_dispatches", - "master/event_queue_http_requests", - "master/event_queue_messages", - // registrar - "registrar/state_fetch_ms", - "registrar/state_store_ms", - "registrar/state_store_ms/max", - "registrar/state_store_ms/min", - "registrar/state_store_ms/p50", - "registrar/state_store_ms/p90", - "registrar/state_store_ms/p95", - "registrar/state_store_ms/p99", - "registrar/state_store_ms/p999", - "registrar/state_store_ms/p9999", - } - - for _, k := range metricNames { + for _, k := range masterMetricNames { masterMetrics[k] = rand.Float64() } slaveMetrics = make(map[string]interface{}) - - metricNames = []string{ - // resources - "slave/cpus_percent", - "slave/cpus_used", - "slave/cpus_total", - "slave/cpus_revocable_percent", - "slave/cpus_revocable_total", - "slave/cpus_revocable_used", - "slave/disk_percent", - "slave/disk_used", - "slave/disk_total", - "slave/disk_revocable_percent", - "slave/disk_revocable_total", - "slave/disk_revocable_used", - "slave/gpus_percent", - "slave/gpus_used", - "slave/gpus_total", - "slave/gpus_revocable_percent", - "slave/gpus_revocable_total", - "slave/gpus_revocable_used", - "slave/mem_percent", - "slave/mem_used", - "slave/mem_total", - "slave/mem_revocable_percent", - "slave/mem_revocable_total", - "slave/mem_revocable_used", - // agent - "slave/registered", - "slave/uptime_secs", - // system - "system/cpus_total", - "system/load_15min", - "system/load_5min", - "system/load_1min", - "system/mem_free_bytes", - "system/mem_total_bytes", - // executors - "containerizer/mesos/container_destroy_errors", - "slave/container_launch_errors", - "slave/executors_preempted", - "slave/frameworks_active", - "slave/executor_directory_max_allowed_age_secs", - "slave/executors_registering", - "slave/executors_running", - "slave/executors_terminated", - "slave/executors_terminating", - "slave/recovery_errors", - // tasks - "slave/tasks_failed", - "slave/tasks_finished", - "slave/tasks_killed", - "slave/tasks_lost", - "slave/tasks_running", - "slave/tasks_staging", - "slave/tasks_starting", - // messages - "slave/invalid_framework_messages", - "slave/invalid_status_updates", - "slave/valid_framework_messages", - "slave/valid_status_updates", - } - - for _, k := range metricNames { + for _, k := range slaveMetricNames { slaveMetrics[k] = rand.Float64() } @@ -296,7 +365,7 @@ func TestMesosMaster(t *testing.T) { func TestMasterFilter(t *testing.T) { m := Mesos{ MasterCols: []string{ - "resources", "master", "registrar", + "resources", "master", "registrar", "allocator", }, } b := []string{ @@ -306,6 +375,26 @@ func TestMasterFilter(t *testing.T) { m.filterMetrics(MASTER, &masterMetrics) + // Assert expected metrics are present. + for _, v := range m.MasterCols { + for _, x := range getMetrics(MASTER, v) { + if _, ok := masterMetrics[x]; !ok { + t.Errorf("Didn't find key %s, it should present.", x) + } + } + } + // m.MasterCols includes "allocator", so allocator metrics should be present. + // allocator metrics have unpredictable names, so we can't rely on the list of metrics returned from + // getMetrics(). We have to find them by checking name prefixes. + for _, x := range masterMetricNames { + if strings.HasPrefix(x, "allocator/") { + if _, ok := masterMetrics[x]; !ok { + t.Errorf("Didn't find key %s, it should be present.", x) + } + } + } + + // Assert unexpected metrics are not present. for _, v := range b { for _, x := range getMetrics(MASTER, v) { if _, ok := masterMetrics[x]; ok { @@ -313,11 +402,12 @@ func TestMasterFilter(t *testing.T) { } } } - for _, v := range m.MasterCols { - for _, x := range getMetrics(MASTER, v) { - if _, ok := masterMetrics[x]; !ok { - t.Errorf("Didn't find key %s, it should present.", x) - } + // m.MasterCols does not include "framework_offers", so framework_offers metrics should not be present. + // framework_offers metrics have unpredictable names, so we can't rely on the list of metrics returned from + // getMetrics(). We have to find them by checking name prefixes. + for k := range masterMetrics { + if strings.HasPrefix(k, "master/frameworks/") || strings.HasPrefix(k, "frameworks/") { + t.Errorf("Found key %s, it should be gone.", k) } } } @@ -339,18 +429,6 @@ func TestMesosSlave(t *testing.T) { } acc.AssertContainsFields(t, "mesos", slaveMetrics) - - // expectedFields := make(map[string]interface{}, len(slaveTaskMetrics["statistics"].(map[string]interface{}))+1) - // for k, v := range slaveTaskMetrics["statistics"].(map[string]interface{}) { - // expectedFields[k] = v - // } - // expectedFields["executor_id"] = slaveTaskMetrics["executor_id"] - - // acc.AssertContainsTaggedFields( - // t, - // "mesos_tasks", - // expectedFields, - // map[string]string{"server": "127.0.0.1", "framework_id": slaveTaskMetrics["framework_id"].(string)}) } func TestSlaveFilter(t *testing.T) { From 23b86552fd588206f770ea429689a40f176c31c9 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 9 Aug 2019 18:16:01 -0700 Subject: [PATCH 1084/1815] Update changelog --- CHANGELOG.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9398c7c48..15ce2ddda 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -60,6 +60,9 @@ - [#6049](https://github.com/influxdata/telegraf/pull/6049): Add grace period for metrics late for aggregation. - [#4435](https://github.com/influxdata/telegraf/pull/4435): Add diff and non_negative_diff to basicstats aggregator. - [#6201](https://github.com/influxdata/telegraf/pull/6201): Add device tags to smart_attributes. +- [#5719](https://github.com/influxdata/telegraf/pull/5719): Collect framework_offers and allocator metrics in mesos input. +- [#6216](https://github.com/influxdata/telegraf/pull/6216): Add telegraf and go version to the internal input plugin. +- [#6214](https://github.com/influxdata/telegraf/pull/6214): Update the number of logical CPUs dynamically in system plugin. #### Bugfixes From 5473872ac1f62e00e1c8ecc3bca0cb7fd6debfd1 Mon Sep 17 00:00:00 2001 From: Marc Venturini Date: Wed, 14 Aug 2019 01:24:44 +0800 Subject: [PATCH 1085/1815] Add configurable timeout setting to smart input (#6241) --- plugins/inputs/smart/README.md | 3 ++ plugins/inputs/smart/smart.go | 37 ++++++++++++++-------- plugins/inputs/smart/smart_test.go | 51 ++++++++++++++++-------------- 3 files changed, 54 insertions(+), 37 deletions(-) diff --git a/plugins/inputs/smart/README.md b/plugins/inputs/smart/README.md index b677bf7bd..47320aeac 100644 --- a/plugins/inputs/smart/README.md +++ b/plugins/inputs/smart/README.md @@ -60,6 +60,9 @@ smartctl -s on ## done and all found will be included except for the ## excluded in excludes. # devices = [ "/dev/ada0 -d atacam" ] + + ## Timeout for the smartctl command to complete. + # timeout = "30s" ``` ### Permissions diff --git a/plugins/inputs/smart/smart.go b/plugins/inputs/smart/smart.go index 3e6620c8c..b17f979d3 100644 --- a/plugins/inputs/smart/smart.go +++ b/plugins/inputs/smart/smart.go @@ -119,6 +119,7 @@ type Smart struct { Excludes []string Devices []string UseSudo bool + Timeout internal.Duration } var sampleConfig = ` @@ -151,8 +152,17 @@ var sampleConfig = ` ## done and all found will be included except for the ## excluded in excludes. # devices = [ "/dev/ada0 -d atacam" ] + + ## Timeout for the smartctl command to complete. + # timeout = "30s" ` +func NewSmart() *Smart { + return &Smart{ + Timeout: internal.Duration{Duration: time.Second * 30}, + } +} + func (m *Smart) SampleConfig() string { return sampleConfig } @@ -180,17 +190,17 @@ func (m *Smart) Gather(acc telegraf.Accumulator) error { } // Wrap with sudo -var runCmd = func(sudo bool, command string, args ...string) ([]byte, error) { +var runCmd = func(timeout internal.Duration, sudo bool, command string, args ...string) ([]byte, error) { cmd := exec.Command(command, args...) if sudo { cmd = exec.Command("sudo", append([]string{"-n", command}, args...)...) } - return internal.CombinedOutputTimeout(cmd, time.Second*5) + return internal.CombinedOutputTimeout(cmd, timeout.Duration) } // Scan for S.M.A.R.T. devices func (m *Smart) scan() ([]string, error) { - out, err := runCmd(m.UseSudo, m.Path, "--scan") + out, err := runCmd(m.Timeout, m.UseSudo, m.Path, "--scan") if err != nil { return []string{}, fmt.Errorf("failed to run command '%s --scan': %s - %s", m.Path, err, string(out)) } @@ -226,7 +236,7 @@ func (m *Smart) getAttributes(acc telegraf.Accumulator, devices []string) { wg.Add(len(devices)) for _, device := range devices { - go gatherDisk(acc, m.UseSudo, m.Attributes, m.Path, m.Nocheck, device, &wg) + go gatherDisk(acc, m.Timeout, m.UseSudo, m.Attributes, m.Path, m.Nocheck, device, &wg) } wg.Wait() @@ -243,12 +253,12 @@ func exitStatus(err error) (int, error) { return 0, err } -func gatherDisk(acc telegraf.Accumulator, usesudo, collectAttributes bool, smartctl, nocheck, device string, wg *sync.WaitGroup) { +func gatherDisk(acc telegraf.Accumulator, timeout internal.Duration, usesudo, collectAttributes bool, smartctl, nocheck, device string, wg *sync.WaitGroup) { defer wg.Done() // smartctl 5.41 & 5.42 have are broken regarding handling of --nocheck/-n args := []string{"--info", "--health", "--attributes", "--tolerance=verypermissive", "-n", nocheck, "--format=brief"} args = append(args, strings.Split(device, " ")...) - out, e := runCmd(usesudo, smartctl, args...) + out, e := runCmd(timeout, usesudo, smartctl, args...) outStr := string(out) // Ignore all exit statuses except if it is a command line parse error @@ -436,14 +446,13 @@ func parseTemperature(fields, deviceFields map[string]interface{}, str string) e } func init() { - m := Smart{} - path, _ := exec.LookPath("smartctl") - if len(path) > 0 { - m.Path = path - } - m.Nocheck = "standby" - inputs.Add("smart", func() telegraf.Input { - return &m + m := NewSmart() + path, _ := exec.LookPath("smartctl") + if len(path) > 0 { + m.Path = path + } + m.Nocheck = "standby" + return m }) } diff --git a/plugins/inputs/smart/smart_test.go b/plugins/inputs/smart/smart_test.go index 0b030366d..d66a31fea 100644 --- a/plugins/inputs/smart/smart_test.go +++ b/plugins/inputs/smart/smart_test.go @@ -7,19 +7,22 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestGatherAttributes(t *testing.T) { - s := &Smart{ - Path: "smartctl", - Attributes: true, - } + s := NewSmart() + s.Path = "smartctl" + s.Attributes = true + + assert.Equal(t, time.Second*30, s.Timeout.Duration) + var acc testutil.Accumulator - runCmd = func(sudo bool, command string, args ...string) ([]byte, error) { + runCmd = func(timeout internal.Duration, sudo bool, command string, args ...string) ([]byte, error) { if len(args) > 0 { if args[0] == "--scan" { return []byte(mockScanData), nil @@ -326,10 +329,12 @@ func TestGatherAttributes(t *testing.T) { } func TestGatherNoAttributes(t *testing.T) { - s := &Smart{ - Path: "smartctl", - Attributes: false, - } + s := NewSmart() + s.Path = "smartctl" + s.Attributes = false + + assert.Equal(t, time.Second*30, s.Timeout.Duration) + // overwriting exec commands with mock commands var acc testutil.Accumulator @@ -374,7 +379,7 @@ func TestExcludedDev(t *testing.T) { } func TestGatherSATAInfo(t *testing.T) { - runCmd = func(sudo bool, command string, args ...string) ([]byte, error) { + runCmd = func(timeout internal.Duration, sudo bool, command string, args ...string) ([]byte, error) { return []byte(hgstSATAInfoData), nil } @@ -384,13 +389,13 @@ func TestGatherSATAInfo(t *testing.T) { ) wg.Add(1) - gatherDisk(acc, true, true, "", "", "", wg) + gatherDisk(acc, internal.Duration{Duration: time.Second * 30}, true, true, "", "", "", wg) assert.Equal(t, 101, acc.NFields(), "Wrong number of fields gathered") assert.Equal(t, uint64(20), acc.NMetrics(), "Wrong number of metrics gathered") } func TestGatherSATAInfo65(t *testing.T) { - runCmd = func(sudo bool, command string, args ...string) ([]byte, error) { + runCmd = func(timeout internal.Duration, sudo bool, command string, args ...string) ([]byte, error) { return []byte(hgstSATAInfoData65), nil } @@ -400,13 +405,13 @@ func TestGatherSATAInfo65(t *testing.T) { ) wg.Add(1) - gatherDisk(acc, true, true, "", "", "", wg) + gatherDisk(acc, internal.Duration{Duration: time.Second * 30}, true, true, "", "", "", wg) assert.Equal(t, 91, acc.NFields(), "Wrong number of fields gathered") assert.Equal(t, uint64(18), acc.NMetrics(), "Wrong number of metrics gathered") } func TestGatherHgstSAS(t *testing.T) { - runCmd = func(sudo bool, command string, args ...string) ([]byte, error) { + runCmd = func(timeout internal.Duration, sudo bool, command string, args ...string) ([]byte, error) { return []byte(hgstSASInfoData), nil } @@ -416,13 +421,13 @@ func TestGatherHgstSAS(t *testing.T) { ) wg.Add(1) - gatherDisk(acc, true, true, "", "", "", wg) + gatherDisk(acc, internal.Duration{Duration: time.Second * 30}, true, true, "", "", "", wg) assert.Equal(t, 6, acc.NFields(), "Wrong number of fields gathered") assert.Equal(t, uint64(4), acc.NMetrics(), "Wrong number of metrics gathered") } func TestGatherHtSAS(t *testing.T) { - runCmd = func(sudo bool, command string, args ...string) ([]byte, error) { + runCmd = func(timeout internal.Duration, sudo bool, command string, args ...string) ([]byte, error) { return []byte(htSASInfoData), nil } @@ -432,13 +437,13 @@ func TestGatherHtSAS(t *testing.T) { ) wg.Add(1) - gatherDisk(acc, true, true, "", "", "", wg) + gatherDisk(acc, internal.Duration{Duration: time.Second * 30}, true, true, "", "", "", wg) assert.Equal(t, 5, acc.NFields(), "Wrong number of fields gathered") assert.Equal(t, uint64(3), acc.NMetrics(), "Wrong number of metrics gathered") } func TestGatherSSD(t *testing.T) { - runCmd = func(sudo bool, command string, args ...string) ([]byte, error) { + runCmd = func(timeout internal.Duration, sudo bool, command string, args ...string) ([]byte, error) { return []byte(ssdInfoData), nil } @@ -448,13 +453,13 @@ func TestGatherSSD(t *testing.T) { ) wg.Add(1) - gatherDisk(acc, true, true, "", "", "", wg) + gatherDisk(acc, internal.Duration{Duration: time.Second * 30}, true, true, "", "", "", wg) assert.Equal(t, 105, acc.NFields(), "Wrong number of fields gathered") assert.Equal(t, uint64(26), acc.NMetrics(), "Wrong number of metrics gathered") } func TestGatherSSDRaid(t *testing.T) { - runCmd = func(sudo bool, command string, args ...string) ([]byte, error) { + runCmd = func(timeout internal.Duration, sudo bool, command string, args ...string) ([]byte, error) { return []byte(ssdRaidInfoData), nil } @@ -464,13 +469,13 @@ func TestGatherSSDRaid(t *testing.T) { ) wg.Add(1) - gatherDisk(acc, true, true, "", "", "", wg) + gatherDisk(acc, internal.Duration{Duration: time.Second * 30}, true, true, "", "", "", wg) assert.Equal(t, 74, acc.NFields(), "Wrong number of fields gathered") assert.Equal(t, uint64(15), acc.NMetrics(), "Wrong number of metrics gathered") } func TestGatherNvme(t *testing.T) { - runCmd = func(sudo bool, command string, args ...string) ([]byte, error) { + runCmd = func(timeout internal.Duration, sudo bool, command string, args ...string) ([]byte, error) { return []byte(nvmeInfoData), nil } @@ -480,7 +485,7 @@ func TestGatherNvme(t *testing.T) { ) wg.Add(1) - gatherDisk(acc, true, true, "", "", "", wg) + gatherDisk(acc, internal.Duration{Duration: time.Second * 30}, true, true, "", "", "", wg) expected := []telegraf.Metric{ testutil.MustMetric("smart_device", From e36639b15d597b56157c431bfc91464b0e9db52a Mon Sep 17 00:00:00 2001 From: Matthew Crenshaw <3420325+sgtsquiggs@users.noreply.github.com> Date: Wed, 14 Aug 2019 13:17:38 -0400 Subject: [PATCH 1086/1815] Add memory_usage field to procstat input plugin (#6249) --- plugins/inputs/procstat/process.go | 1 + plugins/inputs/procstat/procstat.go | 5 +++++ plugins/inputs/procstat/procstat_test.go | 4 ++++ 3 files changed, 10 insertions(+) diff --git a/plugins/inputs/procstat/process.go b/plugins/inputs/procstat/process.go index 7e8c4859d..7d3a9431d 100644 --- a/plugins/inputs/procstat/process.go +++ b/plugins/inputs/procstat/process.go @@ -21,6 +21,7 @@ type Process interface { NumFDs() (int32, error) NumThreads() (int32, error) Percent(interval time.Duration) (float64, error) + MemoryPercent() (float32, error) Times() (*cpu.TimesStat, error) RlimitUsage(bool) ([]process.RlimitStat, error) Username() (string, error) diff --git a/plugins/inputs/procstat/procstat.go b/plugins/inputs/procstat/procstat.go index 5bbb11d45..995aa5bdd 100644 --- a/plugins/inputs/procstat/procstat.go +++ b/plugins/inputs/procstat/procstat.go @@ -245,6 +245,11 @@ func (p *Procstat) addMetric(proc Process, acc telegraf.Accumulator) { fields[prefix+"memory_locked"] = mem.Locked } + mem_perc, err := proc.MemoryPercent() + if err == nil { + fields[prefix+"memory_usage"] = mem_perc + } + rlims, err := proc.RlimitUsage(true) if err == nil { for _, rlim := range rlims { diff --git a/plugins/inputs/procstat/procstat_test.go b/plugins/inputs/procstat/procstat_test.go index bf03f7599..22c8abb89 100644 --- a/plugins/inputs/procstat/procstat_test.go +++ b/plugins/inputs/procstat/procstat_test.go @@ -148,6 +148,10 @@ func (p *testProc) Percent(interval time.Duration) (float64, error) { return 0, nil } +func (p *testProc) MemoryPercent() (float32, error) { + return 0, nil +} + func (p *testProc) Times() (*cpu.TimesStat, error) { return &cpu.TimesStat{}, nil } From 71da67ec54e1ed25a60c617f743998b283da5975 Mon Sep 17 00:00:00 2001 From: Chris Goller Date: Wed, 14 Aug 2019 16:04:57 -0500 Subject: [PATCH 1087/1815] Add darwin (macOS) builds to the release (#6259) This will build a darwin tar.gz package for macOS. This allows people an easy way to download releases from github directly. --- README.md | 1 + scripts/build.py | 2 ++ 2 files changed, 3 insertions(+) diff --git a/README.md b/README.md index fd4c4fc40..395975052 100644 --- a/README.md +++ b/README.md @@ -62,6 +62,7 @@ version. ### Nightly Builds These builds are generated from the master branch: +- [telegraf-nightly_darwin_amd64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_darwin_amd64.tar.gz) - [telegraf_nightly_amd64.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_amd64.deb) - [telegraf_nightly_arm64.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_arm64.deb) - [telegraf-nightly.arm64.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.arm64.rpm) diff --git a/scripts/build.py b/scripts/build.py index dbca3a50d..14545b9eb 100755 --- a/scripts/build.py +++ b/scripts/build.py @@ -88,12 +88,14 @@ targets = { } supported_builds = { + 'darwin': [ "amd64" ], "windows": [ "amd64", "i386" ], "linux": [ "amd64", "i386", "armhf", "armel", "arm64", "static_amd64", "s390x", "mipsel", "mips"], "freebsd": [ "amd64", "i386" ] } supported_packages = { + "darwin": [ "tar" ], "linux": [ "deb", "rpm", "tar" ], "windows": [ "zip" ], "freebsd": [ "tar" ] From 87d0e5c221925a172b23f2e02b020fb89095180f Mon Sep 17 00:00:00 2001 From: Chris Goller Date: Wed, 14 Aug 2019 16:20:59 -0500 Subject: [PATCH 1088/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 15ce2ddda..d096fb0b7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -63,6 +63,7 @@ - [#5719](https://github.com/influxdata/telegraf/pull/5719): Collect framework_offers and allocator metrics in mesos input. - [#6216](https://github.com/influxdata/telegraf/pull/6216): Add telegraf and go version to the internal input plugin. - [#6214](https://github.com/influxdata/telegraf/pull/6214): Update the number of logical CPUs dynamically in system plugin. +- [#6259](https://github.com/influxdata/telegraf/pull/6259): Add darwin (macOS) builds to the release. #### Bugfixes From 4ab29817a49851e82082022e4145e007b0a78138 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 14 Aug 2019 16:56:45 -0700 Subject: [PATCH 1089/1815] Add troubleshooting section to nvidia_smi input --- plugins/inputs/nvidia_smi/README.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/plugins/inputs/nvidia_smi/README.md b/plugins/inputs/nvidia_smi/README.md index 7fe0c077a..8afa74538 100644 --- a/plugins/inputs/nvidia_smi/README.md +++ b/plugins/inputs/nvidia_smi/README.md @@ -53,6 +53,13 @@ The below query could be used to alert on the average temperature of the your GP SELECT mean("temperature_gpu") FROM "nvidia_smi" WHERE time > now() - 5m GROUP BY time(1m), "index", "name", "host" ``` +### Troubleshooting + +As the `telegraf` user run the following command. Adjust the path to `nvidia-smi` if customized. +``` +/usr/bin/nvidia-smi --format=noheader,nounits,csv --query-gpu=fan.speed,memory.total,memory.used,memory.free,pstate,temperature.gpu,name,uuid,compute_mode,utilization.gpu,utilization.memory,index,power.draw,pcie.link.gen.current,pcie.link.width.current,encoder.stats.sessionCount,encoder.stats.averageFps,encoder.stats.averageLatency,clocks.current.graphics,clocks.current.sm,clocks.current.memory,clocks.current.video +``` + ### Example Output ``` nvidia_smi,compute_mode=Default,host=8218cf,index=0,name=GeForce\ GTX\ 1070,pstate=P2,uuid=GPU-823bc202-6279-6f2c-d729-868a30f14d96 fan_speed=100i,memory_free=7563i,memory_total=8112i,memory_used=549i,temperature_gpu=53i,utilization_gpu=100i,utilization_memory=90i 1523991122000000000 From 5e0c63f2e6cf699cd1a1e7f125e90c65b519afad Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 14 Aug 2019 16:59:02 -0700 Subject: [PATCH 1090/1815] Fix and improve error handling in mongodb collection stats (#6230) --- plugins/inputs/mongodb/README.md | 4 + plugins/inputs/mongodb/mongodb_server.go | 105 ++++++++++++----------- 2 files changed, 60 insertions(+), 49 deletions(-) diff --git a/plugins/inputs/mongodb/README.md b/plugins/inputs/mongodb/README.md index c6c4abc15..e1e4988e8 100644 --- a/plugins/inputs/mongodb/README.md +++ b/plugins/inputs/mongodb/README.md @@ -44,6 +44,10 @@ Telegraf logs similar to: Error in input [mongodb]: not authorized on admin to execute command { serverStatus: 1, recordStats: 0 } ``` +Some permission related errors are logged at debug level, you can check these +messages by setting `debug = true` in the agent section of the configuration or +by running Telegraf with the `--debug` argument. + ### Metrics: - mongodb diff --git a/plugins/inputs/mongodb/mongodb_server.go b/plugins/inputs/mongodb/mongodb_server.go index 45d39d6ae..4df14c014 100644 --- a/plugins/inputs/mongodb/mongodb_server.go +++ b/plugins/inputs/mongodb/mongodb_server.go @@ -31,6 +31,14 @@ func IsAuthorization(err error) bool { return strings.Contains(err.Error(), "not authorized") } +func authLogLevel(err error) string { + if IsAuthorization(err) { + return "D!" + } else { + return "E!" + } +} + func (s *Server) gatherOplogStats() *OplogStats { stats := &OplogStats{} localdb := s.Session.DB("local") @@ -44,22 +52,14 @@ func (s *Server) gatherOplogStats() *OplogStats { if err == mgo.ErrNotFound { continue } - if IsAuthorization(err) { - log.Println("D! Error getting first oplog entry (" + err.Error() + ")") - } else { - log.Println("E! Error getting first oplog entry (" + err.Error() + ")") - } + log.Printf("%s [inputs.mongodb] Error getting first oplog entry: %v", authLogLevel(err), err) return stats } if err := localdb.C(collection_name).Find(query).Sort("-$natural").Limit(1).One(&op_last); err != nil { if err == mgo.ErrNotFound || IsAuthorization(err) { continue } - if IsAuthorization(err) { - log.Println("D! Error getting first oplog entry (" + err.Error() + ")") - } else { - log.Println("E! Error getting first oplog entry (" + err.Error() + ")") - } + log.Printf("%s [inputs.mongodb] Error getting first oplog entry: %v", authLogLevel(err), err) return stats } } @@ -70,6 +70,45 @@ func (s *Server) gatherOplogStats() *OplogStats { return stats } +func (s *Server) gatherCollectionStats(colStatsDbs []string) (*ColStats, error) { + names, err := s.Session.DatabaseNames() + if err != nil { + return nil, err + } + + results := &ColStats{} + for _, db_name := range names { + if stringInSlice(db_name, colStatsDbs) || len(colStatsDbs) == 0 { + var colls []string + colls, err = s.Session.DB(db_name).CollectionNames() + if err != nil { + log.Printf("E! [inputs.mongodb] Error getting collection names: %v", err) + continue + } + for _, col_name := range colls { + col_stat_line := &ColStatsData{} + err = s.Session.DB(db_name).Run(bson.D{ + { + Name: "collStats", + Value: col_name, + }, + }, col_stat_line) + if err != nil { + log.Printf("%s [inputs.mongodb] Error getting col stats from %q: %v", authLogLevel(err), col_name, err) + continue + } + collection := &Collection{ + Name: col_name, + DbName: db_name, + ColStatsData: col_stat_line, + } + results.Collections = append(results.Collections, *collection) + } + } + } + return results, nil +} + func (s *Server) gatherData(acc telegraf.Accumulator, gatherDbStats bool, gatherColStats bool, colStatsDbs []string) error { s.Session.SetMode(mgo.Eventual, true) s.Session.SetSocketTimeout(0) @@ -112,9 +151,9 @@ func (s *Server) gatherData(acc telegraf.Accumulator, gatherDbStats bool, gather }, &resultShards) if err != nil { if IsAuthorization(err) { - log.Println("D! Error getting database shard stats (" + err.Error() + ")") + log.Printf("D! [inputs.mongodb] Error getting database shard stats: %v", err) } else { - log.Println("E! Error getting database shard stats (" + err.Error() + ")") + log.Printf("E! [inputs.mongodb] Error getting database shard stats: %v", err) } } @@ -125,7 +164,7 @@ func (s *Server) gatherData(acc telegraf.Accumulator, gatherDbStats bool, gather names := []string{} names, err = s.Session.DatabaseNames() if err != nil { - log.Println("E! Error getting database names (" + err.Error() + ")") + log.Printf("E! [inputs.mongodb] Error getting database names: %v", err) } for _, db_name := range names { db_stat_line := &DbStatsData{} @@ -136,7 +175,7 @@ func (s *Server) gatherData(acc telegraf.Accumulator, gatherDbStats bool, gather }, }, db_stat_line) if err != nil { - log.Println("E! Error getting db stats from " + db_name + "(" + err.Error() + ")") + log.Printf("E! [inputs.mongodb] Error getting db stats from %q: %v", db_name, err) } db := &Db{ Name: db_name, @@ -147,41 +186,9 @@ func (s *Server) gatherData(acc telegraf.Accumulator, gatherDbStats bool, gather } } - result_col_stats := &ColStats{} - if gatherColStats == true { - names := []string{} - names, err = s.Session.DatabaseNames() - if err != nil { - log.Println("E! Error getting database names (" + err.Error() + ")") - } - for _, db_name := range names { - if stringInSlice(db_name, colStatsDbs) || len(colStatsDbs) == 0 { - var colls []string - colls, err = s.Session.DB(db_name).CollectionNames() - if err != nil { - log.Println("E! Error getting collection names (" + err.Error() + ")") - } - for _, col_name := range colls { - col_stat_line := &ColStatsData{} - err = s.Session.DB(db_name).Run(bson.D{ - { - Name: "collStats", - Value: col_name, - }, - }, col_stat_line) - if err != nil { - log.Println("E! Error getting col stats from " + col_name + "(" + err.Error() + ")") - continue - } - collection := &Collection{ - Name: col_name, - DbName: db_name, - ColStatsData: col_stat_line, - } - result_col_stats.Collections = append(result_col_stats.Collections, *collection) - } - } - } + result_col_stats, err := s.gatherCollectionStats(colStatsDbs) + if err != nil { + return err } result := &MongoStatus{ From 27555950195a220c494c9760170419d5ec167d22 Mon Sep 17 00:00:00 2001 From: Pontus Rydin Date: Wed, 14 Aug 2019 20:03:33 -0400 Subject: [PATCH 1091/1815] Add support for custom attributes to vsphere input (#5971) --- plugins/inputs/vsphere/README.md | 17 ++- plugins/inputs/vsphere/client.go | 15 +++ plugins/inputs/vsphere/endpoint.go | 173 +++++++++++++++++++++---- plugins/inputs/vsphere/finder.go | 5 +- plugins/inputs/vsphere/vsphere.go | 18 ++- plugins/inputs/vsphere/vsphere_test.go | 34 +---- 6 files changed, 205 insertions(+), 57 deletions(-) diff --git a/plugins/inputs/vsphere/README.md b/plugins/inputs/vsphere/README.md index ae7cdc37b..7689f45da 100644 --- a/plugins/inputs/vsphere/README.md +++ b/plugins/inputs/vsphere/README.md @@ -118,9 +118,13 @@ vm_metric_exclude = [ "*" ] "storageAdapter.write.average", "sys.uptime.latest", ] + ## Collect IP addresses? Valid values are "ipv4" and "ipv6" + # ip_addresses = ["ipv6", "ipv4" ] + # host_metric_exclude = [] ## Nothing excluded by default # host_instances = true ## true by default + ## Clusters # cluster_include = [ "/*/host/**"] # Inventory path to clusters to collect (by default all are collected) # cluster_metric_include = [] ## if omitted or empty, all metrics are collected @@ -173,6 +177,17 @@ vm_metric_exclude = [ "*" ] ## the plugin. Setting this flag to "false" will send values as floats to ## preserve the full precision when averaging takes place. # use_int_samples = true + + ## Custom attributes from vCenter can be very useful for queries in order to slice the + ## metrics along different dimension and for forming ad-hoc relationships. They are disabled + ## by default, since they can add a considerable amount of tags to the resulting metrics. To + ## enable, simply set custom_attribute_exlude to [] (empty set) and use custom_attribute_include + ## to select the attributes you want to include. + # by default, since they can add a considerable amount of tags to the resulting metrics. To + # enable, simply set custom_attribute_exlude to [] (empty set) and use custom_attribute_include + # to select the attributes you want to include. + # custom_attribute_include = [] + # custom_attribute_exclude = ["*"] # Default is to exclude everything ## Optional SSL Config # ssl_ca = "/path/to/cafile" @@ -241,7 +256,7 @@ to a file system. A vSphere inventory has a structure similar to this: #### Using Inventory Paths Using familiar UNIX-style paths, one could select e.g. VM2 with the path ```/DC0/vm/VM2```. -Often, we want to select a group of resource, such as all the VMs in a folder. We could use the path ```/DC0/vm/Folder1/*``` for that. +Often, we want to select a group of resource, such as all the VMs in a folder. We could use the path ```/DC0/vm/Folder1/*``` for that. Another possibility is to select objects using a partial name, such as ```/DC0/vm/Folder1/hadoop*``` yielding all vms in Folder1 with a name starting with "hadoop". diff --git a/plugins/inputs/vsphere/client.go b/plugins/inputs/vsphere/client.go index 0d78cac01..b514813ab 100644 --- a/plugins/inputs/vsphere/client.go +++ b/plugins/inputs/vsphere/client.go @@ -305,3 +305,18 @@ func (c *Client) ListResources(ctx context.Context, root *view.ContainerView, ki defer cancel1() return root.Retrieve(ctx1, kind, ps, dst) } + +func (c *Client) GetCustomFields(ctx context.Context) (map[int32]string, error) { + ctx1, cancel1 := context.WithTimeout(ctx, c.Timeout) + defer cancel1() + cfm := object.NewCustomFieldsManager(c.Client.Client) + fields, err := cfm.Field(ctx1) + if err != nil { + return nil, err + } + r := make(map[int32]string) + for _, f := range fields { + r[f.Key] = f.Name + } + return r, nil +} diff --git a/plugins/inputs/vsphere/endpoint.go b/plugins/inputs/vsphere/endpoint.go index 27bad51ca..c361754ab 100644 --- a/plugins/inputs/vsphere/endpoint.go +++ b/plugins/inputs/vsphere/endpoint.go @@ -26,6 +26,10 @@ import ( var isolateLUN = regexp.MustCompile(".*/([^/]+)/?$") +var isIPv4 = regexp.MustCompile("^(?:[0-9]{1,3}\\.){3}[0-9]{1,3}$") + +var isIPv6 = regexp.MustCompile("^(?:[A-Fa-f0-9]{0,4}:){1,7}[A-Fa-f0-9]{1,4}$") + const metricLookback = 3 // Number of time periods to look back at for non-realtime metrics const rtMetricLookback = 3 // Number of time periods to look back at for realtime metrics @@ -37,16 +41,19 @@ const maxMetadataSamples = 100 // Number of resources to sample for metric metad // Endpoint is a high-level representation of a connected vCenter endpoint. It is backed by the lower // level Client type. type Endpoint struct { - Parent *VSphere - URL *url.URL - resourceKinds map[string]*resourceKind - hwMarks *TSCache - lun2ds map[string]string - discoveryTicker *time.Ticker - collectMux sync.RWMutex - initialized bool - clientFactory *ClientFactory - busy sync.Mutex + Parent *VSphere + URL *url.URL + resourceKinds map[string]*resourceKind + hwMarks *TSCache + lun2ds map[string]string + discoveryTicker *time.Ticker + collectMux sync.RWMutex + initialized bool + clientFactory *ClientFactory + busy sync.Mutex + customFields map[int32]string + customAttrFilter filter.Filter + customAttrEnabled bool } type resourceKind struct { @@ -80,12 +87,14 @@ type metricEntry struct { type objectMap map[string]objectRef type objectRef struct { - name string - altID string - ref types.ManagedObjectReference - parentRef *types.ManagedObjectReference //Pointer because it must be nillable - guest string - dcname string + name string + altID string + ref types.ManagedObjectReference + parentRef *types.ManagedObjectReference //Pointer because it must be nillable + guest string + dcname string + customValues map[string]string + lookup map[string]string } func (e *Endpoint) getParent(obj *objectRef, res *resourceKind) (*objectRef, bool) { @@ -101,12 +110,14 @@ func (e *Endpoint) getParent(obj *objectRef, res *resourceKind) (*objectRef, boo // as parameters. func NewEndpoint(ctx context.Context, parent *VSphere, url *url.URL) (*Endpoint, error) { e := Endpoint{ - URL: url, - Parent: parent, - hwMarks: NewTSCache(1 * time.Hour), - lun2ds: make(map[string]string), - initialized: false, - clientFactory: NewClientFactory(ctx, url, parent), + URL: url, + Parent: parent, + hwMarks: NewTSCache(1 * time.Hour), + lun2ds: make(map[string]string), + initialized: false, + clientFactory: NewClientFactory(ctx, url, parent), + customAttrFilter: newFilterOrPanic(parent.CustomAttributeInclude, parent.CustomAttributeExclude), + customAttrEnabled: anythingEnabled(parent.CustomAttributeExclude), } e.resourceKinds = map[string]*resourceKind{ @@ -259,6 +270,20 @@ func (e *Endpoint) initalDiscovery(ctx context.Context) { } func (e *Endpoint) init(ctx context.Context) error { + client, err := e.clientFactory.GetClient(ctx) + if err != nil { + return err + } + + // Initial load of custom field metadata + if e.customAttrEnabled { + fields, err := client.GetCustomFields(ctx) + if err != nil { + log.Println("W! [inputs.vsphere] Could not load custom field metadata") + } else { + e.customFields = fields + } + } if e.Parent.ObjectDiscoveryInterval.Duration > 0 { @@ -427,6 +452,16 @@ func (e *Endpoint) discover(ctx context.Context) error { } } + // Load custom field metadata + var fields map[int32]string + if e.customAttrEnabled { + fields, err = client.GetCustomFields(ctx) + if err != nil { + log.Println("W! [inputs.vsphere] Could not load custom field metadata") + fields = nil + } + } + // Atomically swap maps e.collectMux.Lock() defer e.collectMux.Unlock() @@ -436,6 +471,10 @@ func (e *Endpoint) discover(ctx context.Context) error { } e.lun2ds = l2d + if fields != nil { + e.customFields = fields + } + sw.Stop() SendInternalCounterWithTags("discovered_objects", e.URL.Host, map[string]string{"type": "instance-total"}, numRes) return nil @@ -609,14 +648,77 @@ func getVMs(ctx context.Context, e *Endpoint, filter *ResourceFilter) (objectMap } guest := "unknown" uuid := "" + lookup := make(map[string]string) + + // Extract host name + if r.Guest != nil && r.Guest.HostName != "" { + lookup["guesthostname"] = r.Guest.HostName + } + + // Collect network information + for _, net := range r.Guest.Net { + if net.DeviceConfigId == -1 { + continue + } + if net.IpConfig == nil || net.IpConfig.IpAddress == nil { + continue + } + ips := make(map[string][]string) + for _, ip := range net.IpConfig.IpAddress { + addr := ip.IpAddress + for _, ipType := range e.Parent.IpAddresses { + if !(ipType == "ipv4" && isIPv4.MatchString(addr) || + ipType == "ipv6" && isIPv6.MatchString(addr)) { + continue + } + + // By convention, we want the preferred addresses to appear first in the array. + if _, ok := ips[ipType]; !ok { + ips[ipType] = make([]string, 0) + } + if ip.State == "preferred" { + ips[ipType] = append([]string{addr}, ips[ipType]...) + } else { + ips[ipType] = append(ips[ipType], addr) + } + } + } + for ipType, ipList := range ips { + lookup["nic/"+strconv.Itoa(int(net.DeviceConfigId))+"/"+ipType] = strings.Join(ipList, ",") + } + } + // Sometimes Config is unknown and returns a nil pointer - // if r.Config != nil { guest = cleanGuestID(r.Config.GuestId) uuid = r.Config.Uuid } + cvs := make(map[string]string) + if e.customAttrEnabled { + for _, cv := range r.Summary.CustomValue { + val := cv.(*types.CustomFieldStringValue) + if val.Value == "" { + continue + } + key, ok := e.customFields[val.Key] + if !ok { + log.Printf("W! [inputs.vsphere] Metadata for custom field %d not found. Skipping", val.Key) + continue + } + if e.customAttrFilter.Match(key) { + cvs[key] = val.Value + } + } + } m[r.ExtensibleManagedObject.Reference().Value] = objectRef{ - name: r.Name, ref: r.ExtensibleManagedObject.Reference(), parentRef: r.Runtime.Host, guest: guest, altID: uuid} + name: r.Name, + ref: r.ExtensibleManagedObject.Reference(), + parentRef: r.Runtime.Host, + guest: guest, + altID: uuid, + customValues: cvs, + lookup: lookup, + } } return m, nil } @@ -1032,6 +1134,9 @@ func (e *Endpoint) populateTags(objectRef *objectRef, resourceType string, resou if objectRef.guest != "" { t["guest"] = objectRef.guest } + if gh := objectRef.lookup["guesthostname"]; gh != "" { + t["guesthostname"] = gh + } if c, ok := e.resourceKinds["cluster"].objects[parent.parentRef.Value]; ok { t["clustername"] = c.name } @@ -1062,6 +1167,17 @@ func (e *Endpoint) populateTags(objectRef *objectRef, resourceType string, resou t["disk"] = cleanDiskTag(instance) } else if strings.HasPrefix(name, "net.") { t["interface"] = instance + + // Add IP addresses to NIC data. + if resourceType == "vm" && objectRef.lookup != nil { + key := "nic/" + t["interface"] + "/" + if ip, ok := objectRef.lookup[key+"ipv6"]; ok { + t["ipv6"] = ip + } + if ip, ok := objectRef.lookup[key+"ipv4"]; ok { + t["ipv4"] = ip + } + } } else if strings.HasPrefix(name, "storageAdapter.") { t["adapter"] = instance } else if strings.HasPrefix(name, "storagePath.") { @@ -1076,6 +1192,15 @@ func (e *Endpoint) populateTags(objectRef *objectRef, resourceType string, resou // default t["instance"] = v.Instance } + + // Fill in custom values if they exist + if objectRef.customValues != nil { + for k, v := range objectRef.customValues { + if v != "" { + t[k] = v + } + } + } } func (e *Endpoint) makeMetricIdentifier(prefix, metric string) (string, string) { diff --git a/plugins/inputs/vsphere/finder.go b/plugins/inputs/vsphere/finder.go index 599655402..228a942d9 100644 --- a/plugins/inputs/vsphere/finder.go +++ b/plugins/inputs/vsphere/finder.go @@ -231,8 +231,9 @@ func init() { } addFields = map[string][]string{ - "HostSystem": {"parent"}, - "VirtualMachine": {"runtime.host", "config.guestId", "config.uuid", "runtime.powerState"}, + "HostSystem": {"parent"}, + "VirtualMachine": {"runtime.host", "config.guestId", "config.uuid", "runtime.powerState", + "summary.customValue", "guest.net", "guest.hostName"}, "Datastore": {"parent", "info"}, "ClusterComputeResource": {"parent"}, "Datacenter": {"parent"}, diff --git a/plugins/inputs/vsphere/vsphere.go b/plugins/inputs/vsphere/vsphere.go index d64b5273d..2f9f08cc6 100644 --- a/plugins/inputs/vsphere/vsphere.go +++ b/plugins/inputs/vsphere/vsphere.go @@ -40,7 +40,10 @@ type VSphere struct { DatastoreMetricExclude []string DatastoreInclude []string Separator string + CustomAttributeInclude []string + CustomAttributeExclude []string UseIntSamples bool + IpAddresses []string MaxQueryObjects int MaxQueryMetrics int @@ -155,6 +158,8 @@ var sampleConfig = ` "storageAdapter.write.average", "sys.uptime.latest", ] + ## Collect IP addresses? Valid values are "ipv4" and "ipv6" + # ip_addresses = ["ipv6", "ipv4" ] # host_metric_exclude = [] ## Nothing excluded by default # host_instances = true ## true by default @@ -173,7 +178,7 @@ var sampleConfig = ` datacenter_metric_exclude = [ "*" ] ## Datacenters are not collected by default. # datacenter_instances = false ## false by default for Datastores only - ## Plugin Settings + ## Plugin Settings ## separator character to use for measurement and field names (default: "_") # separator = "_" @@ -208,6 +213,14 @@ var sampleConfig = ` ## preserve the full precision when averaging takes place. # use_int_samples = true + ## Custom attributes from vCenter can be very useful for queries in order to slice the + ## metrics along different dimension and for forming ad-hoc relationships. They are disabled + ## by default, since they can add a considerable amount of tags to the resulting metrics. To + ## enable, simply set custom_attribute_exlude to [] (empty set) and use custom_attribute_include + ## to select the attributes you want to include. + # custom_attribute_include = [] + # custom_attribute_exclude = ["*"] + ## Optional SSL Config # ssl_ca = "/path/to/cafile" # ssl_cert = "/path/to/certfile" @@ -321,7 +334,10 @@ func init() { DatastoreMetricExclude: nil, DatastoreInclude: []string{"/*/datastore/**"}, Separator: "_", + CustomAttributeInclude: []string{}, + CustomAttributeExclude: []string{"*"}, UseIntSamples: true, + IpAddresses: []string{}, MaxQueryObjects: 256, MaxQueryMetrics: 256, diff --git a/plugins/inputs/vsphere/vsphere_test.go b/plugins/inputs/vsphere/vsphere_test.go index 73956b542..08e4405b3 100644 --- a/plugins/inputs/vsphere/vsphere_test.go +++ b/plugins/inputs/vsphere/vsphere_test.go @@ -6,7 +6,6 @@ import ( "fmt" "regexp" "sort" - "strings" "sync" "sync/atomic" "testing" @@ -256,34 +255,6 @@ func TestThrottledExecutor(t *testing.T) { require.Equal(t, int64(5), max, "Wrong number of goroutines spawned") } -func TestTimeout(t *testing.T) { - // Don't run test on 32-bit machines due to bug in simulator. - // https://github.com/vmware/govmomi/issues/1330 - var i int - if unsafe.Sizeof(i) < 8 { - return - } - - m, s, err := createSim() - if err != nil { - t.Fatal(err) - } - defer m.Remove() - defer s.Close() - - v := defaultVSphere() - var acc testutil.Accumulator - v.Vcenters = []string{s.URL.String()} - v.Timeout = internal.Duration{Duration: 1 * time.Nanosecond} - require.NoError(t, v.Start(nil)) // We're not using the Accumulator, so it can be nil. - defer v.Stop() - err = v.Gather(&acc) - - // The accumulator must contain exactly one error and it must be a deadline exceeded. - require.Equal(t, 1, len(acc.Errors)) - require.True(t, strings.Contains(acc.Errors[0].Error(), "context deadline exceeded")) -} - func TestMaxQuery(t *testing.T) { // Don't run test on 32-bit machines due to bug in simulator. // https://github.com/vmware/govmomi/issues/1330 @@ -414,6 +385,11 @@ func TestFinder(t *testing.T) { require.NoError(t, err) require.Equal(t, 2, len(vm)) + vm = []mo.VirtualMachine{} + err = f.Find(ctx, "VirtualMachine", "/DC0/**/DC0_H0_VM*", &vm) + require.NoError(t, err) + require.Equal(t, 2, len(vm)) + vm = []mo.VirtualMachine{} err = f.Find(ctx, "VirtualMachine", "/**/vm/**", &vm) require.NoError(t, err) From ffe135c7fe9d460cfc6f61a4748c6fe886cd87f2 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 14 Aug 2019 17:04:05 -0700 Subject: [PATCH 1092/1815] Update github.com/go-sql-driver/mysql to 1.4.1 (#6250) --- Gopkg.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Gopkg.lock b/Gopkg.lock index 72ca0ede3..7ad06dccd 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -474,12 +474,12 @@ version = "v6.12.0" [[projects]] - digest = "1:c07de423ca37dc2765396d6971599ab652a339538084b9b58c9f7fc533b28525" + digest = "1:e692d16fdfbddb94e9e4886aaf6c08bdbae5cb4ac80651445de9181b371c6e46" name = "github.com/go-sql-driver/mysql" packages = ["."] pruneopts = "" - revision = "d523deb1b23d913de5bdada721a6071e71283618" - version = "v1.4.0" + revision = "72cd26f257d44c1114970e19afddcd812016007e" + version = "v1.4.1" [[projects]] digest = "1:9ab1b1c637d7c8f49e39d8538a650d7eb2137b076790cff69d160823b505964c" From 5e06e56785fa849de569accbcdf2e869ac1ae665 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 14 Aug 2019 17:05:34 -0700 Subject: [PATCH 1093/1815] Fix persistent session in mqtt_consumer (#6236) --- plugins/inputs/mqtt_consumer/README.md | 39 +-- plugins/inputs/mqtt_consumer/mqtt_consumer.go | 141 ++++++--- .../mqtt_consumer/mqtt_consumer_test.go | 287 +++++++++++++----- 3 files changed, 316 insertions(+), 151 deletions(-) diff --git a/plugins/inputs/mqtt_consumer/README.md b/plugins/inputs/mqtt_consumer/README.md index da3ce43f5..53476cb3d 100644 --- a/plugins/inputs/mqtt_consumer/README.md +++ b/plugins/inputs/mqtt_consumer/README.md @@ -3,13 +3,20 @@ The [MQTT][mqtt] consumer plugin reads from the specified MQTT topics and creates metrics using one of the supported [input data formats][]. -### Configuration: +### Configuration ```toml [[inputs.mqtt_consumer]] ## MQTT broker URLs to be used. The format should be scheme://host:port, ## schema can be tcp, ssl, or ws. - servers = ["tcp://localhost:1883"] + servers = ["tcp://127.0.0.1:1883"] + + ## Topics that will be subscribed to. + topics = [ + "telegraf/host01/cpu", + "telegraf/+/mem", + "sensors/#", + ] ## QoS policy for messages ## 0 = at most once @@ -18,10 +25,10 @@ and creates metrics using one of the supported [input data formats][]. ## ## When using a QoS of 1 or 2, you should enable persistent_session to allow ## resuming unacknowledged messages. - qos = 0 + # qos = 0 ## Connection timeout for initial connection in seconds - connection_timeout = "30s" + # connection_timeout = "30s" ## Maximum messages to read from the broker that have not been written by an ## output. For best throughput set based on the number of metrics within @@ -33,21 +40,17 @@ and creates metrics using one of the supported [input data formats][]. ## waiting until the next flush_interval. # max_undelivered_messages = 1000 - ## Topics to subscribe to - topics = [ - "telegraf/host01/cpu", - "telegraf/+/mem", - "sensors/#", - ] + ## Persistent session disables clearing of the client session on connection. + ## In order for this option to work you must also set client_id to identity + ## the client. To receive messages that arrived while the client is offline, + ## also set the qos option to 1 or 2 and don't forget to also set the QoS when + ## publishing. + # persistent_session = false - # if true, messages that can't be delivered while the subscriber is offline - # will be delivered when it comes back (such as on service restart). - # NOTE: if true, client_id MUST be set - persistent_session = false - # If empty, a random client ID will be generated. - client_id = "" + ## If unset, a random client ID will be generated. + # client_id = "" - ## username and password to connect MQTT server. + ## Username and password to connect MQTT server. # username = "telegraf" # password = "metricsmetricsmetricsmetrics" @@ -65,7 +68,7 @@ and creates metrics using one of the supported [input data formats][]. data_format = "influx" ``` -### Tags: +### Metrics - All measurements are tagged with the incoming topic, ie `topic=telegraf/host01/cpu` diff --git a/plugins/inputs/mqtt_consumer/mqtt_consumer.go b/plugins/inputs/mqtt_consumer/mqtt_consumer.go index da556159e..8a6d0d4de 100644 --- a/plugins/inputs/mqtt_consumer/mqtt_consumer.go +++ b/plugins/inputs/mqtt_consumer/mqtt_consumer.go @@ -33,6 +33,15 @@ const ( Connected ) +type Client interface { + Connect() mqtt.Token + SubscribeMultiple(filters map[string]byte, callback mqtt.MessageHandler) mqtt.Token + AddRoute(topic string, callback mqtt.MessageHandler) + Disconnect(quiesce uint) +} + +type ClientFactory func(o *mqtt.ClientOptions) Client + type MQTTConsumer struct { Servers []string Topics []string @@ -51,12 +60,13 @@ type MQTTConsumer struct { ClientID string `toml:"client_id"` tls.ClientConfig - client mqtt.Client - acc telegraf.TrackingAccumulator - state ConnectionState - subscribed bool - sem semaphore - messages map[telegraf.TrackingID]bool + clientFactory ClientFactory + client Client + opts *mqtt.ClientOptions + acc telegraf.TrackingAccumulator + state ConnectionState + sem semaphore + messages map[telegraf.TrackingID]bool ctx context.Context cancel context.CancelFunc @@ -65,7 +75,14 @@ type MQTTConsumer struct { var sampleConfig = ` ## MQTT broker URLs to be used. The format should be scheme://host:port, ## schema can be tcp, ssl, or ws. - servers = ["tcp://localhost:1883"] + servers = ["tcp://127.0.0.1:1883"] + + ## Topics that will be subscribed to. + topics = [ + "telegraf/host01/cpu", + "telegraf/+/mem", + "sensors/#", + ] ## QoS policy for messages ## 0 = at most once @@ -74,10 +91,10 @@ var sampleConfig = ` ## ## When using a QoS of 1 or 2, you should enable persistent_session to allow ## resuming unacknowledged messages. - qos = 0 + # qos = 0 ## Connection timeout for initial connection in seconds - connection_timeout = "30s" + # connection_timeout = "30s" ## Maximum messages to read from the broker that have not been written by an ## output. For best throughput set based on the number of metrics within @@ -89,21 +106,17 @@ var sampleConfig = ` ## waiting until the next flush_interval. # max_undelivered_messages = 1000 - ## Topics to subscribe to - topics = [ - "telegraf/host01/cpu", - "telegraf/+/mem", - "sensors/#", - ] + ## Persistent session disables clearing of the client session on connection. + ## In order for this option to work you must also set client_id to identity + ## the client. To receive messages that arrived while the client is offline, + ## also set the qos option to 1 or 2 and don't forget to also set the QoS when + ## publishing. + # persistent_session = false - # if true, messages that can't be delivered while the subscriber is offline - # will be delivered when it comes back (such as on service restart). - # NOTE: if true, client_id MUST be set - persistent_session = false - # If empty, a random client ID will be generated. - client_id = "" + ## If unset, a random client ID will be generated. + # client_id = "" - ## username and password to connect MQTT server. + ## Username and password to connect MQTT server. # username = "telegraf" # password = "metricsmetricsmetricsmetrics" @@ -133,7 +146,7 @@ func (m *MQTTConsumer) SetParser(parser parsers.Parser) { m.parser = parser } -func (m *MQTTConsumer) Start(acc telegraf.Accumulator) error { +func (m *MQTTConsumer) Init() error { m.state = Disconnected if m.PersistentSession && m.ClientID == "" { @@ -148,15 +161,32 @@ func (m *MQTTConsumer) Start(acc telegraf.Accumulator) error { return fmt.Errorf("connection_timeout must be greater than 1s: %s", m.ConnectionTimeout.Duration) } - m.acc = acc.WithTracking(m.MaxUndeliveredMessages) - m.ctx, m.cancel = context.WithCancel(context.Background()) - opts, err := m.createOpts() if err != nil { return err } - m.client = mqtt.NewClient(opts) + m.opts = opts + + return nil +} + +func (m *MQTTConsumer) Start(acc telegraf.Accumulator) error { + m.state = Disconnected + + m.acc = acc.WithTracking(m.MaxUndeliveredMessages) + m.ctx, m.cancel = context.WithCancel(context.Background()) + + m.client = m.clientFactory(m.opts) + + // AddRoute sets up the function for handling messages. These need to be + // added in case we find a persistent session containing subscriptions so we + // know where to dispatch presisted and new messages to. In the alternate + // case that we need to create the subscriptions these will be replaced. + for _, topic := range m.Topics { + m.client.AddRoute(topic, m.recvMessage) + } + m.state = Connecting m.connect() @@ -164,7 +194,8 @@ func (m *MQTTConsumer) Start(acc telegraf.Accumulator) error { } func (m *MQTTConsumer) connect() error { - if token := m.client.Connect(); token.Wait() && token.Error() != nil { + token := m.client.Connect() + if token.Wait() && token.Error() != nil { err := token.Error() m.state = Disconnected return err @@ -175,22 +206,26 @@ func (m *MQTTConsumer) connect() error { m.sem = make(semaphore, m.MaxUndeliveredMessages) m.messages = make(map[telegraf.TrackingID]bool) - // Only subscribe on first connection when using persistent sessions. On - // subsequent connections the subscriptions should be stored in the - // session, but the proper way to do this is to check the connection - // response to ensure a session was found. - if !m.PersistentSession || !m.subscribed { - topics := make(map[string]byte) - for _, topic := range m.Topics { - topics[topic] = byte(m.QoS) - } - subscribeToken := m.client.SubscribeMultiple(topics, m.recvMessage) - subscribeToken.Wait() - if subscribeToken.Error() != nil { - m.acc.AddError(fmt.Errorf("subscription error: topics: %s: %v", - strings.Join(m.Topics[:], ","), subscribeToken.Error())) - } - m.subscribed = true + // Presistent sessions should skip subscription if a session is present, as + // the subscriptions are stored by the server. + type sessionPresent interface { + SessionPresent() bool + } + if t, ok := token.(sessionPresent); ok && t.SessionPresent() { + log.Printf("D! [inputs.mqtt_consumer] Session found %v", m.Servers) + return nil + } + + topics := make(map[string]byte) + for _, topic := range m.Topics { + topics[topic] = byte(m.QoS) + } + + subscribeToken := m.client.SubscribeMultiple(topics, m.recvMessage) + subscribeToken.Wait() + if subscribeToken.Error() != nil { + m.acc.AddError(fmt.Errorf("subscription error: topics: %s: %v", + strings.Join(m.Topics[:], ","), subscribeToken.Error())) } return nil @@ -316,12 +351,20 @@ func (m *MQTTConsumer) createOpts() (*mqtt.ClientOptions, error) { return opts, nil } +func New(factory ClientFactory) *MQTTConsumer { + return &MQTTConsumer{ + Servers: []string{"tcp://127.0.0.1:1883"}, + ConnectionTimeout: defaultConnectionTimeout, + MaxUndeliveredMessages: defaultMaxUndeliveredMessages, + clientFactory: factory, + state: Disconnected, + } +} + func init() { inputs.Add("mqtt_consumer", func() telegraf.Input { - return &MQTTConsumer{ - ConnectionTimeout: defaultConnectionTimeout, - MaxUndeliveredMessages: defaultMaxUndeliveredMessages, - state: Disconnected, - } + return New(func(o *mqtt.ClientOptions) Client { + return mqtt.NewClient(o) + }) }) } diff --git a/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go b/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go index 2d17c16c3..07d2015a8 100644 --- a/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go +++ b/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go @@ -2,114 +2,233 @@ package mqtt_consumer import ( "testing" + "time" "github.com/eclipse/paho.mqtt.golang" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) -const ( - testMsg = "cpu_load_short,host=server01 value=23422.0 1422568543702900257\n" - invalidMsg = "cpu_load_short,host=server01 1422568543702900257\n" -) +type FakeClient struct { + ConnectF func() mqtt.Token + SubscribeMultipleF func(filters map[string]byte, callback mqtt.MessageHandler) mqtt.Token + AddRouteF func(topic string, callback mqtt.MessageHandler) + DisconnectF func(quiesce uint) -func newTestMQTTConsumer() *MQTTConsumer { - n := &MQTTConsumer{ - Topics: []string{"telegraf"}, - Servers: []string{"localhost:1883"}, - } + connectCallCount int + subscribeCallCount int + addRouteCallCount int + disconnectCallCount int +} - return n +func (c *FakeClient) Connect() mqtt.Token { + c.connectCallCount++ + return c.ConnectF() +} + +func (c *FakeClient) SubscribeMultiple(filters map[string]byte, callback mqtt.MessageHandler) mqtt.Token { + c.subscribeCallCount++ + return c.SubscribeMultipleF(filters, callback) +} + +func (c *FakeClient) AddRoute(topic string, callback mqtt.MessageHandler) { + c.addRouteCallCount++ + c.AddRouteF(topic, callback) +} + +func (c *FakeClient) Disconnect(quiesce uint) { + c.disconnectCallCount++ + c.DisconnectF(quiesce) +} + +type FakeParser struct { +} + +// FakeParser satisfies parsers.Parser +var _ parsers.Parser = &FakeParser{} + +func (p *FakeParser) Parse(buf []byte) ([]telegraf.Metric, error) { + panic("not implemented") +} + +func (p *FakeParser) ParseLine(line string) (telegraf.Metric, error) { + panic("not implemented") +} + +func (p *FakeParser) SetDefaultTags(tags map[string]string) { + panic("not implemented") +} + +type FakeToken struct { + sessionPresent bool +} + +// FakeToken satisfies mqtt.Token +var _ mqtt.Token = &FakeToken{} + +func (t *FakeToken) Wait() bool { + return true +} + +func (t *FakeToken) WaitTimeout(time.Duration) bool { + return true +} + +func (t *FakeToken) Error() error { + return nil +} + +func (t *FakeToken) SessionPresent() bool { + return t.sessionPresent +} + +// Test the basic lifecycle transitions of the plugin. +func TestLifecycleSanity(t *testing.T) { + var acc testutil.Accumulator + + plugin := New(func(o *mqtt.ClientOptions) Client { + return &FakeClient{ + ConnectF: func() mqtt.Token { + return &FakeToken{} + }, + AddRouteF: func(topic string, callback mqtt.MessageHandler) { + }, + SubscribeMultipleF: func(filters map[string]byte, callback mqtt.MessageHandler) mqtt.Token { + return &FakeToken{} + }, + DisconnectF: func(quiesce uint) { + }, + } + }) + plugin.Servers = []string{"tcp://127.0.0.1"} + + parser := &FakeParser{} + plugin.SetParser(parser) + + err := plugin.Init() + require.NoError(t, err) + + err = plugin.Start(&acc) + require.NoError(t, err) + + err = plugin.Gather(&acc) + require.NoError(t, err) + + plugin.Stop() } // Test that default client has random ID func TestRandomClientID(t *testing.T) { - m1 := &MQTTConsumer{ - Servers: []string{"localhost:1883"}} - opts, err := m1.createOpts() - assert.NoError(t, err) + var err error - m2 := &MQTTConsumer{ - Servers: []string{"localhost:1883"}} - opts2, err2 := m2.createOpts() - assert.NoError(t, err2) + m1 := New(nil) + err = m1.Init() + require.NoError(t, err) - assert.NotEqual(t, opts.ClientID, opts2.ClientID) + m2 := New(nil) + err = m2.Init() + require.NoError(t, err) + + require.NotEqual(t, m1.opts.ClientID, m2.opts.ClientID) } -// Test that default client has random ID -func TestClientID(t *testing.T) { - m1 := &MQTTConsumer{ - Servers: []string{"localhost:1883"}, - ClientID: "telegraf-test", - } - opts, err := m1.createOpts() - assert.NoError(t, err) - - m2 := &MQTTConsumer{ - Servers: []string{"localhost:1883"}, - ClientID: "telegraf-test", - } - opts2, err2 := m2.createOpts() - assert.NoError(t, err2) - - assert.Equal(t, "telegraf-test", opts2.ClientID) - assert.Equal(t, "telegraf-test", opts.ClientID) -} - -// Test that Start() fails if client ID is not set but persistent is +// PersistentSession requires ClientID func TestPersistentClientIDFail(t *testing.T) { - m1 := &MQTTConsumer{ - Servers: []string{"localhost:1883"}, - PersistentSession: true, + plugin := New(nil) + plugin.PersistentSession = true + + err := plugin.Init() + require.Error(t, err) +} + +func TestAddRouteCalledForEachTopic(t *testing.T) { + client := &FakeClient{ + ConnectF: func() mqtt.Token { + return &FakeToken{} + }, + AddRouteF: func(topic string, callback mqtt.MessageHandler) { + }, + SubscribeMultipleF: func(filters map[string]byte, callback mqtt.MessageHandler) mqtt.Token { + return &FakeToken{} + }, + DisconnectF: func(quiesce uint) { + }, } - acc := testutil.Accumulator{} - err := m1.Start(&acc) - assert.Error(t, err) + plugin := New(func(o *mqtt.ClientOptions) Client { + return client + }) + plugin.Topics = []string{"a", "b"} + + err := plugin.Init() + require.NoError(t, err) + + var acc testutil.Accumulator + err = plugin.Start(&acc) + require.NoError(t, err) + + plugin.Stop() + + require.Equal(t, client.addRouteCallCount, 2) } -func mqttMsg(val string) mqtt.Message { - return &message{ - topic: "telegraf/unit_test", - payload: []byte(val), +func TestSubscribeCalledIfNoSession(t *testing.T) { + client := &FakeClient{ + ConnectF: func() mqtt.Token { + return &FakeToken{} + }, + AddRouteF: func(topic string, callback mqtt.MessageHandler) { + }, + SubscribeMultipleF: func(filters map[string]byte, callback mqtt.MessageHandler) mqtt.Token { + return &FakeToken{} + }, + DisconnectF: func(quiesce uint) { + }, } + plugin := New(func(o *mqtt.ClientOptions) Client { + return client + }) + plugin.Topics = []string{"b"} + + err := plugin.Init() + require.NoError(t, err) + + var acc testutil.Accumulator + err = plugin.Start(&acc) + require.NoError(t, err) + + plugin.Stop() + + require.Equal(t, client.subscribeCallCount, 1) } -// Take the message struct from the paho mqtt client library for returning -// a test message interface. -type message struct { - duplicate bool - qos byte - retained bool - topic string - messageID uint16 - payload []byte -} +func TestSubscribeNotCalledIfSession(t *testing.T) { + client := &FakeClient{ + ConnectF: func() mqtt.Token { + return &FakeToken{sessionPresent: true} + }, + AddRouteF: func(topic string, callback mqtt.MessageHandler) { + }, + SubscribeMultipleF: func(filters map[string]byte, callback mqtt.MessageHandler) mqtt.Token { + return &FakeToken{} + }, + DisconnectF: func(quiesce uint) { + }, + } + plugin := New(func(o *mqtt.ClientOptions) Client { + return client + }) + plugin.Topics = []string{"b"} -func (m *message) Duplicate() bool { - return m.duplicate -} + err := plugin.Init() + require.NoError(t, err) -func (m *message) Ack() { - return -} + var acc testutil.Accumulator + err = plugin.Start(&acc) + require.NoError(t, err) -func (m *message) Qos() byte { - return m.qos -} + plugin.Stop() -func (m *message) Retained() bool { - return m.retained -} - -func (m *message) Topic() string { - return m.topic -} - -func (m *message) MessageID() uint16 { - return m.messageID -} - -func (m *message) Payload() []byte { - return m.payload + require.Equal(t, client.subscribeCallCount, 0) } From 73914ac920e6e95763fcbc0ccf378a60046c7d04 Mon Sep 17 00:00:00 2001 From: timhallinflux Date: Fri, 16 Aug 2019 10:32:29 -0700 Subject: [PATCH 1094/1815] Add security vulnerability reporting into to contributing doc (#6268) --- CONTRIBUTING.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index badf71c12..71e2b4520 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -13,6 +13,12 @@ 1. Ensure you have added proper unit tests and documentation. 1. Open a new [pull request][]. +#### Security Vulnerability Reporting +InfluxData takes security and our users' trust very seriously. If you believe you have found a security issue in any of our +open source projects, please responsibly disclose it by contacting security@influxdata.com. More details about +security vulnerability reporting, +including our GPG key, [can be found here](https://www.influxdata.com/how-to-report-security-vulnerabilities/). + ### GoDoc Public interfaces for inputs, outputs, processors, aggregators, metrics, From 3ed25b1269215ce0d289d49a6762f4384200ad0e Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 16 Aug 2019 14:41:15 -0700 Subject: [PATCH 1095/1815] Update changelog --- CHANGELOG.md | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index d096fb0b7..095a4e117 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -64,6 +64,9 @@ - [#6216](https://github.com/influxdata/telegraf/pull/6216): Add telegraf and go version to the internal input plugin. - [#6214](https://github.com/influxdata/telegraf/pull/6214): Update the number of logical CPUs dynamically in system plugin. - [#6259](https://github.com/influxdata/telegraf/pull/6259): Add darwin (macOS) builds to the release. +- [#6241](https://github.com/influxdata/telegraf/pull/6241): Add configurable timeout setting to smart input. +- [#6249](https://github.com/influxdata/telegraf/pull/6249): Add memory_usage field to procstat input plugin. +- [#5971](https://github.com/influxdata/telegraf/pull/5971): Add support for custom attributes to vsphere input. #### Bugfixes @@ -73,6 +76,11 @@ - [#6100](https://github.com/influxdata/telegraf/issues/6100): Fix SSPI authentication not working in sqlserver input. - [#6142](https://github.com/influxdata/telegraf/issues/6142): Fix memory error panic in mqtt input. - [#6136](https://github.com/influxdata/telegraf/issues/6136): Support Kafka 2.3.0 consumer groups. +- [#6232](https://github.com/influxdata/telegraf/issues/6232): Fix persistent session in mqtt_consumer. + +## v1.11.5 [unreleased] + +- [#6250](https://github.com/influxdata/telegraf/pull/6250): Update go-sql-driver/mysql driver to 1.4.1 to address auth issues. ## v1.11.4 [2019-08-06] From a079e2d569b7e279b1a6cb32f5d366a36be08008 Mon Sep 17 00:00:00 2001 From: Frank Riley Date: Fri, 16 Aug 2019 14:44:38 -0700 Subject: [PATCH 1096/1815] Return error status from --test if any of the input plugins produced an error (#6279) --- agent/agent.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/agent/agent.go b/agent/agent.go index aa3e32a43..636c4ba68 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -235,6 +235,9 @@ func (a *Agent) Test(ctx context.Context, waitDuration time.Duration) error { a.stopServiceInputs() } + if NErrors.Get() > 0 { + return fmt.Errorf("One or more input plugins had an error") + } return nil } From ed23466a5335680c7c6c826be1223872dac08a92 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 16 Aug 2019 14:53:36 -0700 Subject: [PATCH 1097/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 095a4e117..635a5a891 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -81,6 +81,7 @@ ## v1.11.5 [unreleased] - [#6250](https://github.com/influxdata/telegraf/pull/6250): Update go-sql-driver/mysql driver to 1.4.1 to address auth issues. +- [#6279](https://github.com/influxdata/telegraf/issues/6279): Return error status from --test if input plugins produce an error. ## v1.11.4 [2019-08-06] From beb788bfc65e3d5426a25ef27d04bb1ee7509672 Mon Sep 17 00:00:00 2001 From: Randy Coburn Date: Sat, 17 Aug 2019 00:05:08 +0200 Subject: [PATCH 1098/1815] Add database_tag option to influxdb_listener to add database from query string (#6257) --- plugins/inputs/influxdb_listener/README.md | 8 ++++ .../inputs/influxdb_listener/http_listener.go | 40 +++++++++++++------ .../influxdb_listener/http_listener_test.go | 33 ++++++++------- 3 files changed, 54 insertions(+), 27 deletions(-) diff --git a/plugins/inputs/influxdb_listener/README.md b/plugins/inputs/influxdb_listener/README.md index 8b6d2ad51..5efa6baf1 100644 --- a/plugins/inputs/influxdb_listener/README.md +++ b/plugins/inputs/influxdb_listener/README.md @@ -46,6 +46,14 @@ submits data to InfluxDB determines the destination database. tls_cert = "/etc/telegraf/cert.pem" tls_key = "/etc/telegraf/key.pem" + ## Optional tag name used to store the database name. + ## If the write has a database in the query string then it will be kept in this tag name. + ## This tag can be used in downstream outputs. + ## The default value of nothing means it will be off and the database will not be recorded. + ## If you have a tag that is the same as the one specified below, and supply a database, + ## the tag will be overwritten with the database supplied. + # database_tag = "" + ## Optional username and password to accept for HTTP basic authentication. ## You probably want to make sure you have TLS configured above for this. # basic_username = "foobar" diff --git a/plugins/inputs/influxdb_listener/http_listener.go b/plugins/inputs/influxdb_listener/http_listener.go index 7e5544786..5383fd2aa 100644 --- a/plugins/inputs/influxdb_listener/http_listener.go +++ b/plugins/inputs/influxdb_listener/http_listener.go @@ -37,17 +37,18 @@ const ( type TimeFunc func() time.Time type HTTPListener struct { - ServiceAddress string - ReadTimeout internal.Duration - WriteTimeout internal.Duration - MaxBodySize internal.Size - MaxLineSize internal.Size - Port int - + ServiceAddress string `toml:"service_address"` + // Port gets pulled out of ServiceAddress + Port int tlsint.ServerConfig - BasicUsername string - BasicPassword string + ReadTimeout internal.Duration `toml:"read_timeout"` + WriteTimeout internal.Duration `toml:"write_timeout"` + MaxBodySize internal.Size `toml:"max_body_size"` + MaxLineSize internal.Size `toml:"max_line_size"` + BasicUsername string `toml:"basic_username"` + BasicPassword string `toml:"basic_password"` + DatabaseTag string `toml:"database_tag"` TimeFunc @@ -93,6 +94,13 @@ const sampleConfig = ` ## Maximum line size allowed to be sent in bytes. ## 0 means to use the default of 65536 bytes (64 kibibytes) max_line_size = "64KiB" + + + ## Optional tag name used to store the database. + ## If the write has a database in the query string then it will be kept in this tag name. + ## This tag can be used in downstream outputs. + ## The default value of nothing means it will be off and the database will not be recorded. + # database_tag = "" ## Set one or more allowed client CA certificate file names to ## enable mutually authenticated TLS connections @@ -258,6 +266,7 @@ func (h *HTTPListener) serveWrite(res http.ResponseWriter, req *http.Request) { now := h.TimeFunc() precision := req.URL.Query().Get("precision") + db := req.URL.Query().Get("db") // Handle gzip request bodies body := req.Body @@ -315,7 +324,7 @@ func (h *HTTPListener) serveWrite(res http.ResponseWriter, req *http.Request) { if err == io.ErrUnexpectedEOF { // finished reading the request body - err = h.parse(buf[:n+bufStart], now, precision) + err = h.parse(buf[:n+bufStart], now, precision, db) if err != nil { log.Println("D! "+err.Error(), bufStart+n) return400 = true @@ -346,7 +355,7 @@ func (h *HTTPListener) serveWrite(res http.ResponseWriter, req *http.Request) { bufStart = 0 continue } - if err := h.parse(buf[:i+1], now, precision); err != nil { + if err := h.parse(buf[:i+1], now, precision, db); err != nil { log.Println("D! " + err.Error()) return400 = true } @@ -359,7 +368,7 @@ func (h *HTTPListener) serveWrite(res http.ResponseWriter, req *http.Request) { } } -func (h *HTTPListener) parse(b []byte, t time.Time, precision string) error { +func (h *HTTPListener) parse(b []byte, t time.Time, precision, db string) error { h.mu.Lock() defer h.mu.Unlock() @@ -371,6 +380,13 @@ func (h *HTTPListener) parse(b []byte, t time.Time, precision string) error { } for _, m := range metrics { + // Do we need to keep the database name in the query string. + // If a tag has been supplied to put the db in and we actually got a db query, + // then we write it in. This overwrites the database tag if one was sent. + // This makes it behave like the influx endpoint. + if h.DatabaseTag != "" && db != "" { + m.AddTag(h.DatabaseTag, db) + } h.acc.AddFields(m.Name(), m.Fields(), m.Tags(), m.Time()) } diff --git a/plugins/inputs/influxdb_listener/http_listener_test.go b/plugins/inputs/influxdb_listener/http_listener_test.go index 964295061..6d14e6539 100644 --- a/plugins/inputs/influxdb_listener/http_listener_test.go +++ b/plugins/inputs/influxdb_listener/http_listener_test.go @@ -146,8 +146,11 @@ func TestWriteHTTPBasicAuth(t *testing.T) { require.EqualValues(t, http.StatusNoContent, resp.StatusCode) } -func TestWriteHTTP(t *testing.T) { +func TestWriteHTTPKeepDatabase(t *testing.T) { + testMsgWithDB := "cpu_load_short,host=server01,database=wrongdb value=12.0 1422568543702900257\n" + listener := newTestHTTPListener() + listener.DatabaseTag = "database" acc := &testutil.Accumulator{} require.NoError(t, listener.Start(acc)) @@ -162,7 +165,19 @@ func TestWriteHTTP(t *testing.T) { acc.Wait(1) acc.AssertContainsTaggedFields(t, "cpu_load_short", map[string]interface{}{"value": float64(12)}, - map[string]string{"host": "server01"}, + map[string]string{"host": "server01", "database": "mydb"}, + ) + + // post single message to listener with a database tag in it already. It should be clobbered. + resp, err = http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsgWithDB))) + require.NoError(t, err) + resp.Body.Close() + require.EqualValues(t, 204, resp.StatusCode) + + acc.Wait(1) + acc.AssertContainsTaggedFields(t, "cpu_load_short", + map[string]interface{}{"value": float64(12)}, + map[string]string{"host": "server01", "database": "mydb"}, ) // post multiple message to listener @@ -177,21 +192,9 @@ func TestWriteHTTP(t *testing.T) { for _, hostTag := range hostTags { acc.AssertContainsTaggedFields(t, "cpu_load_short", map[string]interface{}{"value": float64(12)}, - map[string]string{"host": hostTag}, + map[string]string{"host": hostTag, "database": "mydb"}, ) } - - // Post a gigantic metric to the listener and verify that an error is returned: - resp, err = http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(hugeMetric))) - require.NoError(t, err) - resp.Body.Close() - require.EqualValues(t, 400, resp.StatusCode) - - acc.Wait(3) - acc.AssertContainsTaggedFields(t, "cpu_load_short", - map[string]interface{}{"value": float64(12)}, - map[string]string{"host": "server01"}, - ) } // http listener should add a newline at the end of the buffer if it's not there From 50dc8d56598c7edca1534b946b31c110c8ba0602 Mon Sep 17 00:00:00 2001 From: Pavel Frolov <34869051+invine@users.noreply.github.com> Date: Sat, 17 Aug 2019 01:45:20 +0300 Subject: [PATCH 1099/1815] Add content_length metric to http_response input plugin (#6261) --- plugins/inputs/http_response/README.md | 3 +- plugins/inputs/http_response/http_response.go | 27 +++--- .../http_response/http_response_test.go | 86 ++++++++++++++++++- 3 files changed, 98 insertions(+), 18 deletions(-) diff --git a/plugins/inputs/http_response/README.md b/plugins/inputs/http_response/README.md index 38d527fb0..8f1427534 100644 --- a/plugins/inputs/http_response/README.md +++ b/plugins/inputs/http_response/README.md @@ -61,6 +61,7 @@ This input plugin checks HTTP/HTTPS connections. - result ([see below](#result--result_code)) - fields: - response_time (float, seconds) + - content_length (int, response body length) - response_string_match (int, 0 = mismatch / body read error, 1 = match) - http_response_code (int, response status code) - result_type (string, deprecated in 1.6: use `result` tag and `result_code` field) @@ -85,5 +86,5 @@ This tag is used to expose network and plugin errors. HTTP errors are considered ### Example Output: ``` -http_response,method=GET,server=http://www.github.com,status_code=200,result=success http_response_code=200i,response_time=6.223266528,result_type="success",result_code=0i 1459419354977857955 +http_response,method=GET,result=success,server=http://github.com,status_code=200 content_length=87878i,http_response_code=200i,response_time=0.937655534,result_code=0i,result_type="success" 1565839598000000000 ``` diff --git a/plugins/inputs/http_response/http_response.go b/plugins/inputs/http_response/http_response.go index a9d82f13d..acab62b94 100644 --- a/plugins/inputs/http_response/http_response.go +++ b/plugins/inputs/http_response/http_response.go @@ -272,26 +272,27 @@ func (h *HTTPResponse) httpGather(u string) (map[string]interface{}, map[string] // This function closes the response body, as // required by the net/http library - defer func() { - io.Copy(ioutil.Discard, resp.Body) - resp.Body.Close() - }() + defer resp.Body.Close() // Set log the HTTP response code tags["status_code"] = strconv.Itoa(resp.StatusCode) fields["http_response_code"] = resp.StatusCode + bodyBytes, err := ioutil.ReadAll(resp.Body) + if err != nil { + log.Printf("D! Failed to read body of HTTP Response : %s", err) + setResult("body_read_error", fields, tags) + fields["content_length"] = len(bodyBytes) + if h.ResponseStringMatch != "" { + fields["response_string_match"] = 0 + } + return fields, tags, nil + } + + fields["content_length"] = len(bodyBytes) + // Check the response for a regex match. if h.ResponseStringMatch != "" { - - bodyBytes, err := ioutil.ReadAll(resp.Body) - if err != nil { - log.Printf("D! Failed to read body of HTTP Response : %s", err) - setResult("body_read_error", fields, tags) - fields["response_string_match"] = 0 - return fields, tags, nil - } - if h.compiledStringMatch.Match(bodyBytes) { setResult("success", fields, tags) fields["response_string_match"] = 1 diff --git a/plugins/inputs/http_response/http_response_test.go b/plugins/inputs/http_response/http_response_test.go index 159eaa562..44563973b 100644 --- a/plugins/inputs/http_response/http_response_test.go +++ b/plugins/inputs/http_response/http_response_test.go @@ -165,6 +165,7 @@ func TestHeaders(t *testing.T) { "result_type": "success", "result_code": 0, "response_time": nil, + "content_length": nil, } expectedTags := map[string]interface{}{ "server": nil, @@ -201,6 +202,7 @@ func TestFields(t *testing.T) { "result_type": "success", "result_code": 0, "response_time": nil, + "content_length": nil, } expectedTags := map[string]interface{}{ "server": nil, @@ -262,6 +264,7 @@ func TestInterface(t *testing.T) { "result_type": "success", "result_code": 0, "response_time": nil, + "content_length": nil, } expectedTags := map[string]interface{}{ "server": nil, @@ -297,6 +300,7 @@ func TestRedirects(t *testing.T) { "result_type": "success", "result_code": 0, "response_time": nil, + "content_length": nil, } expectedTags := map[string]interface{}{ "server": nil, @@ -362,6 +366,7 @@ func TestMethod(t *testing.T) { "result_type": "success", "result_code": 0, "response_time": nil, + "content_length": nil, } expectedTags := map[string]interface{}{ "server": nil, @@ -391,6 +396,7 @@ func TestMethod(t *testing.T) { "result_type": "success", "result_code": 0, "response_time": nil, + "content_length": nil, } expectedTags = map[string]interface{}{ "server": nil, @@ -421,6 +427,7 @@ func TestMethod(t *testing.T) { "result_type": "success", "result_code": 0, "response_time": nil, + "content_length": nil, } expectedTags = map[string]interface{}{ "server": nil, @@ -456,6 +463,7 @@ func TestBody(t *testing.T) { "result_type": "success", "result_code": 0, "response_time": nil, + "content_length": nil, } expectedTags := map[string]interface{}{ "server": nil, @@ -520,6 +528,7 @@ func TestStringMatch(t *testing.T) { "result_type": "success", "result_code": 0, "response_time": nil, + "content_length": nil, } expectedTags := map[string]interface{}{ "server": nil, @@ -556,6 +565,7 @@ func TestStringMatchJson(t *testing.T) { "result_type": "success", "result_code": 0, "response_time": nil, + "content_length": nil, } expectedTags := map[string]interface{}{ "server": nil, @@ -593,6 +603,7 @@ func TestStringMatchFail(t *testing.T) { "result_type": "response_string_mismatch", "result_code": 1, "response_time": nil, + "content_length": nil, } expectedTags := map[string]interface{}{ "server": nil, @@ -635,7 +646,7 @@ func TestTimeout(t *testing.T) { "method": "GET", "result": "timeout", } - absentFields := []string{"http_response_code", "response_time", "response_string_match"} + absentFields := []string{"http_response_code", "response_time", "content_length", "response_string_match"} absentTags := []string{"status_code"} checkOutput(t, &acc, expectedFields, expectedTags, absentFields, absentTags) } @@ -662,7 +673,7 @@ func TestPluginErrors(t *testing.T) { err := h.Gather(&acc) require.Error(t, err) - absentFields := []string{"http_response_code", "response_time", "response_string_match", "result_type", "result_code"} + absentFields := []string{"http_response_code", "response_time", "content_length", "response_string_match", "result_type", "result_code"} absentTags := []string{"status_code", "result", "server", "method"} checkOutput(t, &acc, nil, nil, absentFields, absentTags) @@ -686,6 +697,7 @@ func TestPluginErrors(t *testing.T) { "result_type": "body_read_error", "result_code": 2, "response_time": nil, + "content_length": nil, } expectedTags := map[string]interface{}{ "server": nil, @@ -719,7 +731,7 @@ func TestNetworkErrors(t *testing.T) { "method": "GET", "result": "dns_error", } - absentFields := []string{"http_response_code", "response_time", "response_string_match"} + absentFields := []string{"http_response_code", "response_time", "content_length", "response_string_match"} absentTags := []string{"status_code"} checkOutput(t, &acc, expectedFields, expectedTags, absentFields, absentTags) @@ -745,7 +757,73 @@ func TestNetworkErrors(t *testing.T) { "method": "GET", "result": "connection_failed", } - absentFields = []string{"http_response_code", "response_time", "response_string_match"} + absentFields = []string{"http_response_code", "response_time", "content_length", "response_string_match"} absentTags = []string{"status_code"} checkOutput(t, &acc, expectedFields, expectedTags, absentFields, absentTags) } + +func TestContentLength(t *testing.T) { + mux := setUpTestMux() + ts := httptest.NewServer(mux) + defer ts.Close() + + h := &HTTPResponse{ + URLs: []string{ts.URL + "/good"}, + Body: "{ 'test': 'data'}", + Method: "GET", + ResponseTimeout: internal.Duration{Duration: time.Second * 20}, + Headers: map[string]string{ + "Content-Type": "application/json", + }, + FollowRedirects: true, + } + var acc testutil.Accumulator + err := h.Gather(&acc) + require.NoError(t, err) + + expectedFields := map[string]interface{}{ + "http_response_code": http.StatusOK, + "result_type": "success", + "result_code": 0, + "response_time": nil, + "content_length": len([]byte("hit the good page!")), + } + expectedTags := map[string]interface{}{ + "server": nil, + "method": "GET", + "status_code": "200", + "result": "success", + } + absentFields := []string{"response_string_match"} + checkOutput(t, &acc, expectedFields, expectedTags, absentFields, nil) + + h = &HTTPResponse{ + URLs: []string{ts.URL + "/musthaveabody"}, + Body: "{ 'test': 'data'}", + Method: "GET", + ResponseTimeout: internal.Duration{Duration: time.Second * 20}, + Headers: map[string]string{ + "Content-Type": "application/json", + }, + FollowRedirects: true, + } + acc = testutil.Accumulator{} + err = h.Gather(&acc) + require.NoError(t, err) + + expectedFields = map[string]interface{}{ + "http_response_code": http.StatusOK, + "result_type": "success", + "result_code": 0, + "response_time": nil, + "content_length": len([]byte("sent a body!")), + } + expectedTags = map[string]interface{}{ + "server": nil, + "method": "GET", + "status_code": "200", + "result": "success", + } + absentFields = []string{"response_string_match"} + checkOutput(t, &acc, expectedFields, expectedTags, absentFields, nil) +} From 96fa7fa6d24bb18fd5c873ee88f88dca10c3b904 Mon Sep 17 00:00:00 2001 From: Adam Flott Date: Fri, 16 Aug 2019 18:46:48 -0400 Subject: [PATCH 1100/1815] Add cmdstat metrics to redis input (#5926) --- plugins/inputs/redis/README.md | 15 +++++++++ plugins/inputs/redis/redis.go | 52 +++++++++++++++++++++++++++++- plugins/inputs/redis/redis_test.go | 20 ++++++++++++ 3 files changed, 86 insertions(+), 1 deletion(-) diff --git a/plugins/inputs/redis/README.md b/plugins/inputs/redis/README.md index 79122f228..38d80b591 100644 --- a/plugins/inputs/redis/README.md +++ b/plugins/inputs/redis/README.md @@ -120,6 +120,13 @@ Additionally the plugin also calculates the hit/miss ratio (keyspace\_hitrate) a - expires(int, number) - avg_ttl(int, number) +- redis_cmdstat + Every Redis used command will have 3 new fields: + - calls(int, number) + - usec(int, mircoseconds) + - usec_per_call(float, microseconds) + + ### Tags: - All measurements have the following tags: @@ -130,6 +137,9 @@ Additionally the plugin also calculates the hit/miss ratio (keyspace\_hitrate) a - The redis_keyspace measurement has an additional database tag: - database +- The redis_cmdstat measurement has an additional tag: + - command + ### Example Output: Using this configuration: @@ -161,3 +171,8 @@ redis_keyspace: ``` > redis_keyspace,database=db1,host=host,server=localhost,port=6379,replication_role=master keys=1i,expires=0i,avg_ttl=0i 1493101350000000000 ``` + +redis_command: +``` +> redis_cmdstat,command=publish,host=host,port=6379,replication_role=master,server=localhost calls=68113i,usec=325146i,usec_per_call=4.77 1559227136000000000 +``` diff --git a/plugins/inputs/redis/redis.go b/plugins/inputs/redis/redis.go index d17e7a845..e89aabb40 100644 --- a/plugins/inputs/redis/redis.go +++ b/plugins/inputs/redis/redis.go @@ -37,7 +37,7 @@ type RedisClient struct { } func (r *RedisClient) Info() *redis.StringCmd { - return r.client.Info() + return r.client.Info("ALL") } func (r *RedisClient) BaseTags() map[string]string { @@ -248,6 +248,11 @@ func gatherInfoOutput( gatherKeyspaceLine(name, kline, acc, tags) continue } + if section == "Commandstats" { + kline := strings.TrimSpace(parts[1]) + gatherCommandstateLine(name, kline, acc, tags) + continue + } metric = name } @@ -324,6 +329,51 @@ func gatherKeyspaceLine( } } +// Parse the special cmdstat lines. +// Example: +// cmdstat_publish:calls=33791,usec=208789,usec_per_call=6.18 +// Tag: cmdstat=publish; Fields: calls=33791i,usec=208789i,usec_per_call=6.18 +func gatherCommandstateLine( + name string, + line string, + acc telegraf.Accumulator, + global_tags map[string]string, +) { + if !strings.HasPrefix(name, "cmdstat") { + return + } + + fields := make(map[string]interface{}) + tags := make(map[string]string) + for k, v := range global_tags { + tags[k] = v + } + tags["command"] = strings.TrimPrefix(name, "cmdstat_") + parts := strings.Split(line, ",") + for _, part := range parts { + kv := strings.Split(part, "=") + if len(kv) != 2 { + continue + } + + switch kv[0] { + case "calls": + fallthrough + case "usec": + ival, err := strconv.ParseInt(kv[1], 10, 64) + if err == nil { + fields[kv[0]] = ival + } + case "usec_per_call": + fval, err := strconv.ParseFloat(kv[1], 64) + if err == nil { + fields[kv[0]] = fval + } + } + } + acc.AddFields("redis_cmdstat", fields, tags) +} + func init() { inputs.Add("redis", func() telegraf.Input { return &Redis{} diff --git a/plugins/inputs/redis/redis_test.go b/plugins/inputs/redis/redis_test.go index 1257befca..b1c3ca3d3 100644 --- a/plugins/inputs/redis/redis_test.go +++ b/plugins/inputs/redis/redis_test.go @@ -118,6 +118,22 @@ func TestRedis_ParseMetrics(t *testing.T) { } acc.AssertContainsTaggedFields(t, "redis", fields, tags) acc.AssertContainsTaggedFields(t, "redis_keyspace", keyspaceFields, keyspaceTags) + + cmdstatSetTags := map[string]string{"host": "redis.net", "replication_role": "master", "command": "set"} + cmdstatSetFields := map[string]interface{}{ + "calls": int64(261265), + "usec": int64(1634157), + "usec_per_call": float64(6.25), + } + acc.AssertContainsTaggedFields(t, "redis_cmdstat", cmdstatSetFields, cmdstatSetTags) + + cmdstatCommandTags := map[string]string{"host": "redis.net", "replication_role": "master", "command": "command"} + cmdstatCommandFields := map[string]interface{}{ + "calls": int64(1), + "usec": int64(990), + "usec_per_call": float64(990.0), + } + acc.AssertContainsTaggedFields(t, "redis_cmdstat", cmdstatCommandFields, cmdstatCommandTags) } const testOutput = `# Server @@ -209,6 +225,10 @@ used_cpu_user:0.05 used_cpu_sys_children:0.00 used_cpu_user_children:0.00 +# Commandstats +cmdstat_set:calls=261265,usec=1634157,usec_per_call=6.25 +cmdstat_command:calls=1,usec=990,usec_per_call=990.00 + # Keyspace db0:keys=2,expires=0,avg_ttl=0 From fbfaf767f197cf48811936958401658e83ac5df6 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 16 Aug 2019 15:56:37 -0700 Subject: [PATCH 1101/1815] Update changelog --- CHANGELOG.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 635a5a891..e13427f18 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -67,6 +67,9 @@ - [#6241](https://github.com/influxdata/telegraf/pull/6241): Add configurable timeout setting to smart input. - [#6249](https://github.com/influxdata/telegraf/pull/6249): Add memory_usage field to procstat input plugin. - [#5971](https://github.com/influxdata/telegraf/pull/5971): Add support for custom attributes to vsphere input. +- [#5926](https://github.com/influxdata/telegraf/pull/5926): Add cmdstat metrics to redis input. +- [#6261](https://github.com/influxdata/telegraf/pull/6261): Add content_length metric to http_response input plugin. +- [#6257](https://github.com/influxdata/telegraf/pull/6257): Add database_tag option to influxdb_listener to add database from query string. #### Bugfixes From 149d2211915c485ed0244c4913a265d000a84b28 Mon Sep 17 00:00:00 2001 From: Stanislav Putrya Date: Tue, 20 Aug 2019 01:01:01 +0200 Subject: [PATCH 1102/1815] Add capability to limit TLS versions and cipher suites (#6246) --- docs/TLS.md | 44 +++++++++ internal/tls/common.go | 34 +++++++ internal/tls/common_go112.go | 12 +++ internal/tls/config.go | 36 ++++++++ internal/tls/config_test.go | 91 +++++++++++++++++++ internal/tls/utils.go | 30 ++++++ plugins/outputs/prometheus_client/README.md | 8 ++ .../prometheus_client/prometheus_client.go | 2 + .../prometheus_client_tls_test.go | 15 ++- testutil/tls.go | 15 +++ 10 files changed, 286 insertions(+), 1 deletion(-) create mode 100644 docs/TLS.md create mode 100644 internal/tls/common.go create mode 100644 internal/tls/common_go112.go create mode 100644 internal/tls/utils.go diff --git a/docs/TLS.md b/docs/TLS.md new file mode 100644 index 000000000..0af0c384b --- /dev/null +++ b/docs/TLS.md @@ -0,0 +1,44 @@ +# TLS settings + +TLS for output plugin will be used if you provide options `tls_cert` and `tls_key`. +Settings that can be used to configure TLS: + +- `tls_cert` - path to certificate. Type: `string`. Ex. `tls_cert = "/etc/ssl/telegraf.crt"` +- `tls_key` - path to key. Type: `string`, Ex. `tls_key = "/etc/ssl/telegraf.key"` +- `tls_allowed_cacerts` - Set one or more allowed client CA certificate file names to enable mutually authenticated TLS connections. Type: `list`. Ex. `tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]` +- `tls_cipher_suites`- Define list of ciphers that will be supported. If wasn't defined default will be used. Type: `list`. Ex. `tls_cipher_suites = ["TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", "TLS_RSA_WITH_AES_128_GCM_SHA256", "TLS_RSA_WITH_AES_256_GCM_SHA384", "TLS_RSA_WITH_AES_128_CBC_SHA256", "TLS_RSA_WITH_AES_128_CBC_SHA", "TLS_RSA_WITH_AES_256_CBC_SHA"]` +- `tls_min_version` - Minimum TLS version that is acceptable. If wasn't defined default (TLS 1.0) will be used. Type: `string`. Ex. `tls_min_version = "TLS11"` +- `tls_max_version` - Maximum SSL/TLS version that is acceptable. If not set, then the maximum version supported is used, which is currently TLS 1.2 (for go < 1.12) or TLS 1.3 (for go == 1.12). Ex. `tls_max_version = "TLS12"` + +tls ciphers are supported: +- TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 +- TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 +- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 +- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 +- TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 +- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 +- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 +- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA +- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 +- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA +- TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA +- TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA +- TLS_RSA_WITH_AES_128_GCM_SHA256 +- TLS_RSA_WITH_AES_256_GCM_SHA384 +- TLS_RSA_WITH_AES_128_CBC_SHA256 +- TLS_RSA_WITH_AES_128_CBC_SHA +- TLS_RSA_WITH_AES_256_CBC_SHA +- TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA +- TLS_RSA_WITH_3DES_EDE_CBC_SHA +- TLS_RSA_WITH_RC4_128_SHA +- TLS_ECDHE_RSA_WITH_RC4_128_SHA +- TLS_ECDHE_ECDSA_WITH_RC4_128_SHA +- TLS_AES_128_GCM_SHA256 (only if version go1.12 was used for make build) +- TLS_AES_256_GCM_SHA384 (only if version go1.12 was used for make build) +- TLS_CHACHA20_POLY1305_SHA256 (only if version go1.12 was used for make build) + +TLS versions are supported: +- TLS10 +- TLS11 +- TLS12 +- TLS13 (only if version go1.12 was used for make build) diff --git a/internal/tls/common.go b/internal/tls/common.go new file mode 100644 index 000000000..3100a73a1 --- /dev/null +++ b/internal/tls/common.go @@ -0,0 +1,34 @@ +package tls + +import "crypto/tls" + +var tlsVersionMap = map[string]uint16{ + "TLS10": tls.VersionTLS10, + "TLS11": tls.VersionTLS11, + "TLS12": tls.VersionTLS12, +} + +var tlsCipherMap = map[string]uint16{ + "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305": tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, + "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305": tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, + "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256": tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256": tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384": tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, + "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384": tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256": tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, + "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, + "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256": tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, + "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA": tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, + "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, + "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA": tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, + "TLS_RSA_WITH_AES_128_GCM_SHA256": tls.TLS_RSA_WITH_AES_128_GCM_SHA256, + "TLS_RSA_WITH_AES_256_GCM_SHA384": tls.TLS_RSA_WITH_AES_256_GCM_SHA384, + "TLS_RSA_WITH_AES_128_CBC_SHA256": tls.TLS_RSA_WITH_AES_128_CBC_SHA256, + "TLS_RSA_WITH_AES_128_CBC_SHA": tls.TLS_RSA_WITH_AES_128_CBC_SHA, + "TLS_RSA_WITH_AES_256_CBC_SHA": tls.TLS_RSA_WITH_AES_256_CBC_SHA, + "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, + "TLS_RSA_WITH_3DES_EDE_CBC_SHA": tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA, + "TLS_RSA_WITH_RC4_128_SHA": tls.TLS_RSA_WITH_RC4_128_SHA, + "TLS_ECDHE_RSA_WITH_RC4_128_SHA": tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA, + "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA": tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, +} diff --git a/internal/tls/common_go112.go b/internal/tls/common_go112.go new file mode 100644 index 000000000..988d6f936 --- /dev/null +++ b/internal/tls/common_go112.go @@ -0,0 +1,12 @@ +// +build go1.12 + +package tls + +import "crypto/tls" + +func init() { + tlsVersionMap["TLS13"] = tls.VersionTLS13 + tlsCipherMap["TLS_AES_128_GCM_SHA256"] = tls.TLS_AES_128_GCM_SHA256 + tlsCipherMap["TLS_AES_256_GCM_SHA384"] = tls.TLS_AES_256_GCM_SHA384 + tlsCipherMap["TLS_CHACHA20_POLY1305_SHA256"] = tls.TLS_CHACHA20_POLY1305_SHA256 +} diff --git a/internal/tls/config.go b/internal/tls/config.go index ce7958343..185c92cd0 100644 --- a/internal/tls/config.go +++ b/internal/tls/config.go @@ -5,6 +5,7 @@ import ( "crypto/x509" "fmt" "io/ioutil" + "strings" ) // ClientConfig represents the standard client TLS config. @@ -25,6 +26,9 @@ type ServerConfig struct { TLSCert string `toml:"tls_cert"` TLSKey string `toml:"tls_key"` TLSAllowedCACerts []string `toml:"tls_allowed_cacerts"` + TLSCipherSuites []string `toml:"tls_cipher_suites"` + TLSMinVersion string `toml:"tls_min_version"` + TLSMaxVersion string `toml:"tls_max_version"` } // TLSConfig returns a tls.Config, may be nil without error if TLS is not @@ -97,6 +101,38 @@ func (c *ServerConfig) TLSConfig() (*tls.Config, error) { } } + if len(c.TLSCipherSuites) != 0 { + cipherSuites, err := ParseCiphers(c.TLSCipherSuites) + if err != nil { + return nil, fmt.Errorf( + "could not parse server cipher suites %s: %v", strings.Join(c.TLSCipherSuites, ","), err) + } + tlsConfig.CipherSuites = cipherSuites + } + + if c.TLSMaxVersion != "" { + version, err := ParseTLSVersion(c.TLSMaxVersion) + if err != nil { + return nil, fmt.Errorf( + "could not parse tls max version %q: %v", c.TLSMaxVersion, err) + } + tlsConfig.MaxVersion = version + } + + if c.TLSMinVersion != "" { + version, err := ParseTLSVersion(c.TLSMinVersion) + if err != nil { + return nil, fmt.Errorf( + "could not parse tls min version %q: %v", c.TLSMinVersion, err) + } + tlsConfig.MinVersion = version + } + + if tlsConfig.MinVersion != 0 && tlsConfig.MaxVersion != 0 && tlsConfig.MinVersion > tlsConfig.MaxVersion { + return nil, fmt.Errorf( + "tls min version %q can't be greater then tls max version %q", tlsConfig.MinVersion, tlsConfig.MaxVersion) + } + return tlsConfig, nil } diff --git a/internal/tls/config_test.go b/internal/tls/config_test.go index 31a70d9a1..d7d75236e 100644 --- a/internal/tls/config_test.go +++ b/internal/tls/config_test.go @@ -123,6 +123,47 @@ func TestServerConfig(t *testing.T) { TLSCert: pki.ServerCertPath(), TLSKey: pki.ServerKeyPath(), TLSAllowedCACerts: []string{pki.CACertPath()}, + TLSCipherSuites: []string{pki.CipherSuite()}, + TLSMinVersion: pki.TLSMinVersion(), + TLSMaxVersion: pki.TLSMaxVersion(), + }, + }, + { + name: "missing tls cipher suites is okay", + server: tls.ServerConfig{ + TLSCert: pki.ServerCertPath(), + TLSKey: pki.ServerKeyPath(), + TLSAllowedCACerts: []string{pki.CACertPath()}, + TLSCipherSuites: []string{pki.CipherSuite()}, + }, + }, + { + name: "missing tls max version is okay", + server: tls.ServerConfig{ + TLSCert: pki.ServerCertPath(), + TLSKey: pki.ServerKeyPath(), + TLSAllowedCACerts: []string{pki.CACertPath()}, + TLSCipherSuites: []string{pki.CipherSuite()}, + TLSMaxVersion: pki.TLSMaxVersion(), + }, + }, + { + name: "missing tls min version is okay", + server: tls.ServerConfig{ + TLSCert: pki.ServerCertPath(), + TLSKey: pki.ServerKeyPath(), + TLSAllowedCACerts: []string{pki.CACertPath()}, + TLSCipherSuites: []string{pki.CipherSuite()}, + TLSMinVersion: pki.TLSMinVersion(), + }, + }, + { + name: "missing tls min/max versions is okay", + server: tls.ServerConfig{ + TLSCert: pki.ServerCertPath(), + TLSKey: pki.ServerKeyPath(), + TLSAllowedCACerts: []string{pki.CACertPath()}, + TLSCipherSuites: []string{pki.CipherSuite()}, }, }, { @@ -172,6 +213,56 @@ func TestServerConfig(t *testing.T) { expNil: true, expErr: true, }, + { + name: "invalid cipher suites", + server: tls.ServerConfig{ + TLSCert: pki.ServerCertPath(), + TLSKey: pki.ServerKeyPath(), + TLSAllowedCACerts: []string{pki.CACertPath()}, + TLSCipherSuites: []string{pki.CACertPath()}, + }, + expNil: true, + expErr: true, + }, + { + name: "TLS Max Version less then TLS Min version", + server: tls.ServerConfig{ + TLSCert: pki.ServerCertPath(), + TLSKey: pki.ServerKeyPath(), + TLSAllowedCACerts: []string{pki.CACertPath()}, + TLSCipherSuites: []string{pki.CACertPath()}, + TLSMinVersion: pki.TLSMaxVersion(), + TLSMaxVersion: pki.TLSMinVersion(), + }, + expNil: true, + expErr: true, + }, + { + name: "invalid tls min version", + server: tls.ServerConfig{ + TLSCert: pki.ServerCertPath(), + TLSKey: pki.ServerKeyPath(), + TLSAllowedCACerts: []string{pki.CACertPath()}, + TLSCipherSuites: []string{pki.CipherSuite()}, + TLSMinVersion: pki.ServerKeyPath(), + TLSMaxVersion: pki.TLSMaxVersion(), + }, + expNil: true, + expErr: true, + }, + { + name: "invalid tls max version", + server: tls.ServerConfig{ + TLSCert: pki.ServerCertPath(), + TLSKey: pki.ServerKeyPath(), + TLSAllowedCACerts: []string{pki.CACertPath()}, + TLSCipherSuites: []string{pki.CACertPath()}, + TLSMinVersion: pki.TLSMinVersion(), + TLSMaxVersion: pki.ServerCertPath(), + }, + expNil: true, + expErr: true, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { diff --git a/internal/tls/utils.go b/internal/tls/utils.go new file mode 100644 index 000000000..560d07ee2 --- /dev/null +++ b/internal/tls/utils.go @@ -0,0 +1,30 @@ +package tls + +import ( + "fmt" +) + +// ParseCiphers returns a `[]uint16` by received `[]string` key that represents ciphers from crypto/tls. +// If some of ciphers in received list doesn't exists ParseCiphers returns nil with error +func ParseCiphers(ciphers []string) ([]uint16, error) { + suites := []uint16{} + + for _, cipher := range ciphers { + if v, ok := tlsCipherMap[cipher]; ok { + suites = append(suites, v) + } else { + return nil, fmt.Errorf("unsupported cipher %q", cipher) + } + } + + return suites, nil +} + +// ParseTLSVersion returns a `uint16` by received version string key that represents tls version from crypto/tls. +// If version isn't supportes ParseTLSVersion returns 0 with error +func ParseTLSVersion(version string) (uint16, error) { + if v, ok := tlsVersionMap[version]; ok { + return v, nil + } + return 0, fmt.Errorf("unsupported version %q", version) +} diff --git a/plugins/outputs/prometheus_client/README.md b/plugins/outputs/prometheus_client/README.md index d1b4a1b0e..967c01ee6 100644 --- a/plugins/outputs/prometheus_client/README.md +++ b/plugins/outputs/prometheus_client/README.md @@ -40,6 +40,14 @@ This plugin starts a [Prometheus](https://prometheus.io/) Client, it exposes all ## enable mutually authenticated TLS connections # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] + ## contains the minimum SSL/TLS version that is acceptable. + ## If not set, then TLS 1.0 is taken as the minimum. + # tls_min_version = "TLS11" + + ## contains the maximum SSL/TLS version that is acceptable. + ## If not set, then the maximum supported version is used. + # tls_max_version = "TLS12" + ## Export metric collection time. # export_timestamp = false ``` diff --git a/plugins/outputs/prometheus_client/prometheus_client.go b/plugins/outputs/prometheus_client/prometheus_client.go index 32dcdbb89..f7b2ea966 100644 --- a/plugins/outputs/prometheus_client/prometheus_client.go +++ b/plugins/outputs/prometheus_client/prometheus_client.go @@ -117,6 +117,8 @@ var sampleConfig = ` ## enable mutually authenticated TLS connections # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] + # tls_cipher_suites = ["TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", "TLS_RSA_WITH_AES_128_GCM_SHA256", "TLS_RSA_WITH_AES_256_GCM_SHA384", "TLS_RSA_WITH_AES_128_CBC_SHA256", "TLS_RSA_WITH_AES_128_CBC_SHA", "TLS_RSA_WITH_AES_256_CBC_SHA"] + ## Export metric collection time. # export_timestamp = false ` diff --git a/plugins/outputs/prometheus_client/prometheus_client_tls_test.go b/plugins/outputs/prometheus_client/prometheus_client_tls_test.go index bcf6b4381..bcbb4e70e 100644 --- a/plugins/outputs/prometheus_client/prometheus_client_tls_test.go +++ b/plugins/outputs/prometheus_client/prometheus_client_tls_test.go @@ -6,6 +6,7 @@ import ( "net/http" "testing" + inttls "github.com/influxdata/telegraf/internal/tls" "github.com/influxdata/telegraf/plugins/outputs/prometheus_client" "github.com/influxdata/telegraf/testutil" "github.com/influxdata/toml" @@ -19,7 +20,9 @@ var configWithTLS = fmt.Sprintf(` tls_allowed_cacerts = ["%s"] tls_cert = "%s" tls_key = "%s" -`, pki.TLSServerConfig().TLSAllowedCACerts[0], pki.TLSServerConfig().TLSCert, pki.TLSServerConfig().TLSKey) + tls_cipher_suites = ["%s"] + tls_min_version = "%s" +`, pki.TLSServerConfig().TLSAllowedCACerts[0], pki.TLSServerConfig().TLSCert, pki.TLSServerConfig().TLSKey, pki.CipherSuite(), pki.TLSMaxVersion()) var configWithoutTLS = ` listen = "127.0.0.1:0" @@ -50,12 +53,22 @@ func TestWorksWithTLS(t *testing.T) { require.NoError(t, err) defer tc.Output.Close() + serverCiphers, err := inttls.ParseCiphers(tc.Output.ServerConfig.TLSCipherSuites) + require.NoError(t, err) + require.Equal(t, 1, len(serverCiphers)) + + tlsVersion, err := inttls.ParseTLSVersion(tc.Output.ServerConfig.TLSMinVersion) + require.NoError(t, err) + response, err := tc.Client.Get(tc.Output.URL()) require.NoError(t, err) require.NoError(t, err) require.Equal(t, response.StatusCode, http.StatusOK) + require.Equal(t, response.TLS.CipherSuite, serverCiphers[0]) + require.Equal(t, response.TLS.Version, tlsVersion) + tr := &http.Transport{ TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, } diff --git a/testutil/tls.go b/testutil/tls.go index 4f7fc012a..333db3838 100644 --- a/testutil/tls.go +++ b/testutil/tls.go @@ -30,6 +30,9 @@ func (p *pki) TLSServerConfig() *tls.ServerConfig { TLSAllowedCACerts: []string{p.CACertPath()}, TLSCert: p.ServerCertPath(), TLSKey: p.ServerKeyPath(), + TLSCipherSuites: []string{p.CipherSuite()}, + TLSMinVersion: p.TLSMinVersion(), + TLSMaxVersion: p.TLSMaxVersion(), } } @@ -41,6 +44,18 @@ func (p *pki) CACertPath() string { return path.Join(p.path, "cacert.pem") } +func (p *pki) CipherSuite() string { + return "TLS_RSA_WITH_3DES_EDE_CBC_SHA" +} + +func (p *pki) TLSMinVersion() string { + return "TLS11" +} + +func (p *pki) TLSMaxVersion() string { + return "TLS12" +} + func (p *pki) ReadClientCert() string { return readCertificate(p.ClientCertPath()) } From d5b41cfc9aa864f4eb580dcfad478e592a1de808 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 19 Aug 2019 17:40:32 -0700 Subject: [PATCH 1103/1815] Update TLS documentation --- docs/CONFIGURATION.md | 5 ++ docs/TLS.md | 149 ++++++++++++++++++++++++++++++------------ 2 files changed, 114 insertions(+), 40 deletions(-) diff --git a/docs/CONFIGURATION.md b/docs/CONFIGURATION.md index 1b101b02d..3440b0d30 100644 --- a/docs/CONFIGURATION.md +++ b/docs/CONFIGURATION.md @@ -550,6 +550,10 @@ output. The tag is removed in the outputs before writing. influxdb_database = "other" ``` +### Transport Layer Security (TLS) + +Reference the detailed [TLS][] documentation. + [TOML]: https://github.com/toml-lang/toml#toml [global tags]: #global-tags [interval]: #intervals @@ -561,3 +565,4 @@ output. The tag is removed in the outputs before writing. [aggregators]: #aggregator-plugins [metric filtering]: #metric-filtering [telegraf.conf]: /etc/telegraf.conf +[TLS]: /docs/TLS.md diff --git a/docs/TLS.md b/docs/TLS.md index 0af0c384b..363b0d968 100644 --- a/docs/TLS.md +++ b/docs/TLS.md @@ -1,44 +1,113 @@ -# TLS settings +# Transport Layer Security -TLS for output plugin will be used if you provide options `tls_cert` and `tls_key`. -Settings that can be used to configure TLS: +There is an ongoing effort to standardize TLS options across plugins. When +possible, plugins will provide the standard settings described below. With the +exception of the advanced configuration available TLS settings will be +documented in the sample configuration. -- `tls_cert` - path to certificate. Type: `string`. Ex. `tls_cert = "/etc/ssl/telegraf.crt"` -- `tls_key` - path to key. Type: `string`, Ex. `tls_key = "/etc/ssl/telegraf.key"` -- `tls_allowed_cacerts` - Set one or more allowed client CA certificate file names to enable mutually authenticated TLS connections. Type: `list`. Ex. `tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]` -- `tls_cipher_suites`- Define list of ciphers that will be supported. If wasn't defined default will be used. Type: `list`. Ex. `tls_cipher_suites = ["TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", "TLS_RSA_WITH_AES_128_GCM_SHA256", "TLS_RSA_WITH_AES_256_GCM_SHA384", "TLS_RSA_WITH_AES_128_CBC_SHA256", "TLS_RSA_WITH_AES_128_CBC_SHA", "TLS_RSA_WITH_AES_256_CBC_SHA"]` -- `tls_min_version` - Minimum TLS version that is acceptable. If wasn't defined default (TLS 1.0) will be used. Type: `string`. Ex. `tls_min_version = "TLS11"` -- `tls_max_version` - Maximum SSL/TLS version that is acceptable. If not set, then the maximum version supported is used, which is currently TLS 1.2 (for go < 1.12) or TLS 1.3 (for go == 1.12). Ex. `tls_max_version = "TLS12"` +### Client Configuration -tls ciphers are supported: -- TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 -- TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 -- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 -- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 -- TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 -- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 -- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 -- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA -- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 -- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA -- TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA -- TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA -- TLS_RSA_WITH_AES_128_GCM_SHA256 -- TLS_RSA_WITH_AES_256_GCM_SHA384 -- TLS_RSA_WITH_AES_128_CBC_SHA256 -- TLS_RSA_WITH_AES_128_CBC_SHA -- TLS_RSA_WITH_AES_256_CBC_SHA -- TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA -- TLS_RSA_WITH_3DES_EDE_CBC_SHA -- TLS_RSA_WITH_RC4_128_SHA -- TLS_ECDHE_RSA_WITH_RC4_128_SHA -- TLS_ECDHE_ECDSA_WITH_RC4_128_SHA -- TLS_AES_128_GCM_SHA256 (only if version go1.12 was used for make build) -- TLS_AES_256_GCM_SHA384 (only if version go1.12 was used for make build) -- TLS_CHACHA20_POLY1305_SHA256 (only if version go1.12 was used for make build) +For client TLS support we have the following options: +```toml +## Root certificates for verifying server certificates encoded in PEM format. +# tls_ca = "/etc/telegraf/ca.pem" -TLS versions are supported: -- TLS10 -- TLS11 -- TLS12 -- TLS13 (only if version go1.12 was used for make build) +## The public and private keypairs for the client encoded in PEM format. May +## contain intermediate certificates. +# tls_cert = "/etc/telegraf/cert.pem" +# tls_key = "/etc/telegraf/key.pem" +## Skip TLS verification. +# insecure_skip_verify = false +``` + +#### Advanced Configuration + +For plugins using the standard client configuration you can also set several +advanced settings. These options are not included in the sample configuration +for the interest of brevity. + +```toml +## Define list of allowed ciphers suites. If not defined the default ciphers +## supported by Go will be used. +## ex: tls_cipher_suites = [ +## "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", +## "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", +## "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", +## "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", +## "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", +## "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", +## "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", +## "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", +## "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", +## "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", +## "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", +## "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", +## "TLS_RSA_WITH_AES_128_GCM_SHA256", +## "TLS_RSA_WITH_AES_256_GCM_SHA384", +## "TLS_RSA_WITH_AES_128_CBC_SHA256", +## "TLS_RSA_WITH_AES_128_CBC_SHA", +## "TLS_RSA_WITH_AES_256_CBC_SHA" +# ] +# tls_cipher_suites = [] + +## Minimum TLS version that is acceptable. +# tls_min_version = "TLS10" + +## Maximum SSL/TLS version that is acceptable. +# tls_max_version = "TLS12" +``` + +Cipher suites for use with `tls_cipher_suites`: +- `TLS_RSA_WITH_RC4_128_SHA` +- `TLS_RSA_WITH_3DES_EDE_CBC_SHA` +- `TLS_RSA_WITH_AES_128_CBC_SHA` +- `TLS_RSA_WITH_AES_256_CBC_SHA` +- `TLS_RSA_WITH_AES_128_CBC_SHA256` +- `TLS_RSA_WITH_AES_128_GCM_SHA256` +- `TLS_RSA_WITH_AES_256_GCM_SHA384` +- `TLS_ECDHE_ECDSA_WITH_RC4_128_SHA` +- `TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA` +- `TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA` +- `TLS_ECDHE_RSA_WITH_RC4_128_SHA` +- `TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA` +- `TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA` +- `TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA` +- `TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256` +- `TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256` +- `TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256` +- `TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256` +- `TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384` +- `TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384` +- `TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305` +- `TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305` + +TLS 1.3 cipher suites require Telegraf 1.12 and Go 1.12 or later: +- `TLS_AES_128_GCM_SHA256` +- `TLS_AES_256_GCM_SHA384` +- `TLS_CHACHA20_POLY1305_SHA256` + +TLS versions for use with `tls_min_version` or `tls_max_version`: +- `TLS10` +- `TLS11` +- `TLS12` +- `TLS13` (Telegraf 1.12 and Go 1.12 required, must enable TLS 1.3 using environment variables) + +### TLS 1.3 + +TLS 1.3 is available only on an opt-in basis in Go 1.12. To enable it, set the +GODEBUG environment variable (comma-separated key=value options) such that it +includes "tls13=1". + +### Server Configuration + +The server TLS configuration provides support for TLS mutual authentication: + +```toml +## Set one or more allowed client CA certificate file names to +## enable mutually authenticated TLS connections. +# tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] + +## Add service certificate and key. +# tls_cert = "/etc/telegraf/cert.pem" +# tls_key = "/etc/telegraf/key.pem" +``` From 328a2bf16fe287c6fb004260d1c48093d531598c Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 19 Aug 2019 17:42:00 -0700 Subject: [PATCH 1104/1815] Remove advanced TLS conf from prometheus output sample config --- plugins/outputs/prometheus_client/README.md | 10 +--------- plugins/outputs/prometheus_client/prometheus_client.go | 2 -- 2 files changed, 1 insertion(+), 11 deletions(-) diff --git a/plugins/outputs/prometheus_client/README.md b/plugins/outputs/prometheus_client/README.md index 967c01ee6..49030bb3c 100644 --- a/plugins/outputs/prometheus_client/README.md +++ b/plugins/outputs/prometheus_client/README.md @@ -35,19 +35,11 @@ This plugin starts a [Prometheus](https://prometheus.io/) Client, it exposes all ## If set, enable TLS with the given certificate. # tls_cert = "/etc/ssl/telegraf.crt" # tls_key = "/etc/ssl/telegraf.key" - + ## Set one or more allowed client CA certificate file names to ## enable mutually authenticated TLS connections # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] - ## contains the minimum SSL/TLS version that is acceptable. - ## If not set, then TLS 1.0 is taken as the minimum. - # tls_min_version = "TLS11" - - ## contains the maximum SSL/TLS version that is acceptable. - ## If not set, then the maximum supported version is used. - # tls_max_version = "TLS12" - ## Export metric collection time. # export_timestamp = false ``` diff --git a/plugins/outputs/prometheus_client/prometheus_client.go b/plugins/outputs/prometheus_client/prometheus_client.go index f7b2ea966..32dcdbb89 100644 --- a/plugins/outputs/prometheus_client/prometheus_client.go +++ b/plugins/outputs/prometheus_client/prometheus_client.go @@ -117,8 +117,6 @@ var sampleConfig = ` ## enable mutually authenticated TLS connections # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] - # tls_cipher_suites = ["TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", "TLS_RSA_WITH_AES_128_GCM_SHA256", "TLS_RSA_WITH_AES_256_GCM_SHA384", "TLS_RSA_WITH_AES_128_CBC_SHA256", "TLS_RSA_WITH_AES_128_CBC_SHA", "TLS_RSA_WITH_AES_256_CBC_SHA"] - ## Export metric collection time. # export_timestamp = false ` From 3e4a9173791418aa4e46bd681d27105b9f7b682c Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 19 Aug 2019 17:43:41 -0700 Subject: [PATCH 1105/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index e13427f18..4db8ed9be 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -70,6 +70,7 @@ - [#5926](https://github.com/influxdata/telegraf/pull/5926): Add cmdstat metrics to redis input. - [#6261](https://github.com/influxdata/telegraf/pull/6261): Add content_length metric to http_response input plugin. - [#6257](https://github.com/influxdata/telegraf/pull/6257): Add database_tag option to influxdb_listener to add database from query string. +- [#6246](https://github.com/influxdata/telegraf/pull/6246): Add capability to limit TLS versions and cipher suites. #### Bugfixes From 065b92d490e6da74be875c68c43f190c6825883c Mon Sep 17 00:00:00 2001 From: Russ Savage Date: Mon, 19 Aug 2019 14:55:35 -1000 Subject: [PATCH 1106/1815] Add readme for mailchimp input (#6210) --- plugins/inputs/mailchimp/README.md | 59 ++++++++++++++++++++++++++++++ 1 file changed, 59 insertions(+) create mode 100644 plugins/inputs/mailchimp/README.md diff --git a/plugins/inputs/mailchimp/README.md b/plugins/inputs/mailchimp/README.md new file mode 100644 index 000000000..cf82eb243 --- /dev/null +++ b/plugins/inputs/mailchimp/README.md @@ -0,0 +1,59 @@ +# Mailchimp Input + +Pulls campaign reports from the [Mailchimp API](https://developer.mailchimp.com/). + +### Configuration + +This section contains the default TOML to configure the plugin. You can +generate it using `telegraf --usage mailchimp`. + +```toml +[[inputs.mailchimp]] + ## MailChimp API key + ## get from https://admin.mailchimp.com/account/api/ + api_key = "" # required + + ## Reports for campaigns sent more than days_old ago will not be collected. + ## 0 means collect all and is the default value. + days_old = 0 + + ## Campaign ID to get, if empty gets all campaigns, this option overrides days_old + # campaign_id = "" +``` + +### Metrics + +- mailchimp + - tags: + - id + - campaign_title + - fields: + - emails_sent (integer, emails) + - abuse_reports (integer, reports) + - unsubscribed (integer, unsubscribes) + - hard_bounces (integer, emails) + - soft_bounces (integer, emails) + - syntax_errors (integer, errors) + - forwards_count (integer, emails) + - forwards_opens (integer, emails) + - opens_total (integer, emails) + - unique_opens (integer, emails) + - open_rate (double, percentage) + - clicks_total (integer, clicks) + - unique_clicks (integer, clicks) + - unique_subscriber_clicks (integer, clicks) + - click_rate (double, percentage) + - facebook_recipient_likes (integer, likes) + - facebook_unique_likes (integer, likes) + - facebook_likes (integer, likes) + - industry_type (string, type) + - industry_open_rate (double, percentage) + - industry_click_rate (double, percentage) + - industry_bounce_rate (double, percentage) + - industry_unopen_rate (double, percentage) + - industry_unsub_rate (double, percentage) + - industry_abuse_rate (double, percentage) + - list_stats_sub_rate (double, percentage) + - list_stats_unsub_rate (double, percentage) + - list_stats_open_rate (double, percentage) + - list_stats_click_rate (double, percentage) From bdf08d9cf79615d3dd8a0aa4ba3b32e957ca4ce4 Mon Sep 17 00:00:00 2001 From: Russ Savage Date: Mon, 19 Aug 2019 15:08:46 -1000 Subject: [PATCH 1107/1815] Add readme for rethinkdb input (#6211) --- plugins/inputs/rethinkdb/README.md | 61 ++++++++++++++++++++++++++++++ 1 file changed, 61 insertions(+) create mode 100644 plugins/inputs/rethinkdb/README.md diff --git a/plugins/inputs/rethinkdb/README.md b/plugins/inputs/rethinkdb/README.md new file mode 100644 index 000000000..3930146e9 --- /dev/null +++ b/plugins/inputs/rethinkdb/README.md @@ -0,0 +1,61 @@ +# RethinkDB Input + +Collect metrics from [RethinkDB](https://www.rethinkdb.com/). + +### Configuration + +This section contains the default TOML to configure the plugin. You can +generate it using `telegraf --usage rethinkdb`. + +```toml +[[inputs.rethinkdb]] + ## An array of URI to gather stats about. Specify an ip or hostname + ## with optional port add password. ie, + ## rethinkdb://user:auth_key@10.10.3.30:28105, + ## rethinkdb://10.10.3.33:18832, + ## 10.0.0.1:10000, etc. + servers = ["127.0.0.1:28015"] + + ## If you use actual rethinkdb of > 2.3.0 with username/password authorization, + ## protocol have to be named "rethinkdb2" - it will use 1_0 H. + # servers = ["rethinkdb2://username:password@127.0.0.1:28015"] + + ## If you use older versions of rethinkdb (<2.2) with auth_key, protocol + ## have to be named "rethinkdb". + # servers = ["rethinkdb://username:auth_key@127.0.0.1:28015"] +``` + +### Metrics + +- rethinkdb + - tags: + - type + - ns + - rethinkdb_host + - rethinkdb_hostname + - fields: + - cache_bytes_in_use (integer, bytes) + - disk_read_bytes_per_sec (integer, reads) + - disk_read_bytes_total (integer, bytes) + - disk_written_bytes_per_sec (integer, bytes) + - disk_written_bytes_total (integer, bytes) + - disk_usage_data_bytes (integer, bytes) + - disk_usage_garbage_bytes (integer, bytes) + - disk_usage_metadata_bytes (integer, bytes) + - disk_usage_preallocated_bytes (integer, bytes) + +- rethinkdb_engine + - tags: + - type + - ns + - rethinkdb_host + - rethinkdb_hostname + - fields: + - active_clients (integer, clients) + - clients (integer, clients) + - queries_per_sec (integer, queries) + - total_queries (integer, queries) + - read_docs_per_sec (integer, reads) + - total_reads (integer, reads) + - written_docs_per_sec (integer, writes) + - total_writes (integer, writes) From ef14131f1c20c30ed19c7ea34df27d65fafe5e0c Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 19 Aug 2019 18:09:21 -0700 Subject: [PATCH 1108/1815] Alternate markdown list symbol --- plugins/inputs/rethinkdb/README.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/plugins/inputs/rethinkdb/README.md b/plugins/inputs/rethinkdb/README.md index 3930146e9..d10453ace 100644 --- a/plugins/inputs/rethinkdb/README.md +++ b/plugins/inputs/rethinkdb/README.md @@ -15,11 +15,11 @@ generate it using `telegraf --usage rethinkdb`. ## rethinkdb://10.10.3.33:18832, ## 10.0.0.1:10000, etc. servers = ["127.0.0.1:28015"] - + ## If you use actual rethinkdb of > 2.3.0 with username/password authorization, ## protocol have to be named "rethinkdb2" - it will use 1_0 H. # servers = ["rethinkdb2://username:password@127.0.0.1:28015"] - + ## If you use older versions of rethinkdb (<2.2) with auth_key, protocol ## have to be named "rethinkdb". # servers = ["rethinkdb://username:auth_key@127.0.0.1:28015"] @@ -43,8 +43,8 @@ generate it using `telegraf --usage rethinkdb`. - disk_usage_garbage_bytes (integer, bytes) - disk_usage_metadata_bytes (integer, bytes) - disk_usage_preallocated_bytes (integer, bytes) - -- rethinkdb_engine + ++ rethinkdb_engine - tags: - type - ns From 8b2374143d0169bc22a533c026bf23334ec91f41 Mon Sep 17 00:00:00 2001 From: Pontus Rydin Date: Mon, 19 Aug 2019 21:17:27 -0400 Subject: [PATCH 1109/1815] Fix finder inconsistencies in vsphere input (#6245) --- Gopkg.lock | 1 - plugins/inputs/vsphere/finder.go | 118 +++++++++++---------- plugins/inputs/vsphere/vsphere_test.go | 135 ++++++++++++++----------- 3 files changed, 140 insertions(+), 114 deletions(-) diff --git a/Gopkg.lock b/Gopkg.lock index 7ad06dccd..b884eb9b9 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -1177,7 +1177,6 @@ name = "github.com/vmware/govmomi" packages = [ ".", - "find", "list", "nfc", "object", diff --git a/plugins/inputs/vsphere/finder.go b/plugins/inputs/vsphere/finder.go index 228a942d9..24427b205 100644 --- a/plugins/inputs/vsphere/finder.go +++ b/plugins/inputs/vsphere/finder.go @@ -16,6 +16,8 @@ var childTypes map[string][]string var addFields map[string][]string +var containers map[string]interface{} + // Finder allows callers to find resources in vCenter given a query string. type Finder struct { client *Client @@ -29,11 +31,6 @@ type ResourceFilter struct { paths []string } -type nameAndRef struct { - name string - ref types.ManagedObjectReference -} - // FindAll returns the union of resources found given the supplied resource type and paths. func (f *Finder) FindAll(ctx context.Context, resType string, paths []string, dst interface{}) error { for _, p := range paths { @@ -87,15 +84,18 @@ func (f *Finder) descend(ctx context.Context, root types.ManagedObjectReference, var content []types.ObjectContent fields := []string{"name"} + recurse := tokens[pos]["name"] == "**" + + types := ct if isLeaf { - // Special case: The last token is a recursive wildcard, so we can grab everything - // recursively in a single call. - if tokens[pos]["name"] == "**" { + if af, ok := addFields[resType]; ok { + fields = append(fields, af...) + } + if recurse { + // Special case: The last token is a recursive wildcard, so we can grab everything + // recursively in a single call. v2, err := m.CreateContainerView(ctx, root, []string{resType}, true) defer v2.Destroy(ctx) - if af, ok := addFields[resType]; ok { - fields = append(fields, af...) - } err = v2.Retrieve(ctx, []string{resType}, fields, &content) if err != nil { return err @@ -105,23 +105,16 @@ func (f *Finder) descend(ctx context.Context, root types.ManagedObjectReference, } return nil } - - if af, ok := addFields[resType]; ok { - fields = append(fields, af...) - } - err = v.Retrieve(ctx, []string{resType}, fields, &content) - if err != nil { - return err - } - } else { - err = v.Retrieve(ctx, ct, fields, &content) - if err != nil { - return err - } + types = []string{resType} // Only load wanted object type at leaf level + } + err = v.Retrieve(ctx, types, fields, &content) + if err != nil { + return err } + rerunAsLeaf := false for _, c := range content { - if !tokens[pos].MatchPropertyList(c.PropSet[:1]) { + if !matchName(tokens[pos], c.PropSet) { continue } @@ -137,44 +130,45 @@ func (f *Finder) descend(ctx context.Context, root types.ManagedObjectReference, } // Deal with recursive wildcards (**) - inc := 1 // Normally we advance one token. - if tokens[pos]["name"] == "**" { - if isLeaf { - inc = 0 // Can't advance past last token, so keep descending the tree - } else { - // Lookahead to next token. If it matches this child, we are out of - // the recursive wildcard handling and we can advance TWO tokens ahead, since - // the token that ended the recursive wildcard mode is now consumed. - if tokens[pos+1].MatchPropertyList(c.PropSet) { - if pos < len(tokens)-2 { + var inc int + if recurse { + inc = 0 // By default, we stay on this token + if !isLeaf { + // Lookahead to next token. + if matchName(tokens[pos+1], c.PropSet) { + // Are we looking ahead at a leaf node that has the wanted type? + // Rerun the entire level as a leaf. This is needed since all properties aren't loaded + // when we're processing non-leaf nodes. + if pos == len(tokens)-2 { + if c.Obj.Type == resType { + rerunAsLeaf = true + continue + } + } else if _, ok := containers[c.Obj.Type]; ok { + // Tokens match and we're looking ahead at a container type that's not a leaf + // Consume this token and the next. inc = 2 - } else { - // We found match and it's at a leaf! Grab it! - objs[c.Obj.String()] = c - continue } - } else { - // We didn't break out of recursicve wildcard mode yet, so stay on this token. - inc = 0 - } } + } else { + // The normal case: Advance to next token before descending + inc = 1 } err := f.descend(ctx, c.Obj, resType, tokens, pos+inc, objs) if err != nil { return err } } - return nil -} -func nameFromObjectContent(o types.ObjectContent) string { - for _, p := range o.PropSet { - if p.Name == "name" { - return p.Val.(string) - } + if rerunAsLeaf { + // We're at a "pseudo leaf", i.e. we looked ahead a token and found that this level contains leaf nodes. + // Rerun the entire level as a leaf to get those nodes. This will only be executed when pos is one token + // before the last, to pos+1 will always point to a leaf token. + return f.descend(ctx, root, resType, tokens, pos+1, objs) } - return "" + + return nil } func objectContentToTypedArray(objs map[string]types.ObjectContent, dst interface{}) error { @@ -214,11 +208,20 @@ func (r *ResourceFilter) FindAll(ctx context.Context, dst interface{}) error { return r.finder.FindAll(ctx, r.resType, r.paths, dst) } +func matchName(f property.Filter, props []types.DynamicProperty) bool { + for _, prop := range props { + if prop.Name == "name" { + return f.MatchProperty(prop) + } + } + return false +} + func init() { childTypes = map[string][]string{ "HostSystem": {"VirtualMachine"}, - "ComputeResource": {"HostSystem", "ResourcePool"}, - "ClusterComputeResource": {"HostSystem", "ResourcePool"}, + "ComputeResource": {"HostSystem", "ResourcePool", "VirtualApp"}, + "ClusterComputeResource": {"HostSystem", "ResourcePool", "VirtualApp"}, "Datacenter": {"Folder"}, "Folder": { "Folder", @@ -238,4 +241,13 @@ func init() { "ClusterComputeResource": {"parent"}, "Datacenter": {"parent"}, } + + containers = map[string]interface{}{ + "HostSystem": nil, + "ComputeResource": nil, + "Datacenter": nil, + "ResourcePool": nil, + "Folder": nil, + "VirtualApp": nil, + } } diff --git a/plugins/inputs/vsphere/vsphere_test.go b/plugins/inputs/vsphere/vsphere_test.go index 08e4405b3..28c2c7934 100644 --- a/plugins/inputs/vsphere/vsphere_test.go +++ b/plugins/inputs/vsphere/vsphere_test.go @@ -155,9 +155,13 @@ func defaultVSphere() *VSphere { } } -func createSim() (*simulator.Model, *simulator.Server, error) { +func createSim(folders int) (*simulator.Model, *simulator.Server, error) { model := simulator.VPX() + model.Folder = folders + model.Datacenter = 2 + //model.App = 1 + err := model.Create() if err != nil { return nil, nil, err @@ -262,7 +266,7 @@ func TestMaxQuery(t *testing.T) { if unsafe.Sizeof(i) < 8 { return } - m, s, err := createSim() + m, s, err := createSim(0) if err != nil { t.Fatal(err) } @@ -298,6 +302,20 @@ func TestMaxQuery(t *testing.T) { c2.close() } +func testLookupVM(ctx context.Context, t *testing.T, f *Finder, path string, expected int, expectedName string) { + poweredOn := types.VirtualMachinePowerState("poweredOn") + var vm []mo.VirtualMachine + err := f.Find(ctx, "VirtualMachine", path, &vm) + require.NoError(t, err) + require.Equal(t, expected, len(vm)) + if expectedName != "" { + require.Equal(t, expectedName, vm[0].Name) + } + for _, v := range vm { + require.Equal(t, poweredOn, v.Runtime.PowerState) + } +} + func TestFinder(t *testing.T) { // Don't run test on 32-bit machines due to bug in simulator. // https://github.com/vmware/govmomi/issues/1330 @@ -306,7 +324,7 @@ func TestFinder(t *testing.T) { return } - m, s, err := createSim() + m, s, err := createSim(0) if err != nil { t.Fatal(err) } @@ -320,13 +338,13 @@ func TestFinder(t *testing.T) { f := Finder{c} - dc := []mo.Datacenter{} + var dc []mo.Datacenter err = f.Find(ctx, "Datacenter", "/DC0", &dc) require.NoError(t, err) require.Equal(t, 1, len(dc)) require.Equal(t, "DC0", dc[0].Name) - host := []mo.HostSystem{} + var host []mo.HostSystem err = f.Find(ctx, "HostSystem", "/DC0/host/DC0_H0/DC0_H0", &host) require.NoError(t, err) require.Equal(t, 1, len(host)) @@ -343,67 +361,64 @@ func TestFinder(t *testing.T) { require.NoError(t, err) require.Equal(t, 3, len(host)) - vm := []mo.VirtualMachine{} - err = f.Find(ctx, "VirtualMachine", "/DC0/vm/DC0_H0_VM0", &vm) - require.NoError(t, err) - require.Equal(t, 1, len(dc)) - require.Equal(t, "DC0_H0_VM0", vm[0].Name) - - vm = []mo.VirtualMachine{} - err = f.Find(ctx, "VirtualMachine", "/DC0/vm/DC0_C0*", &vm) - require.NoError(t, err) - require.Equal(t, 1, len(dc)) - - vm = []mo.VirtualMachine{} - err = f.Find(ctx, "VirtualMachine", "/DC0/*/DC0_H0_VM0", &vm) - require.NoError(t, err) - require.Equal(t, 1, len(dc)) - require.Equal(t, "DC0_H0_VM0", vm[0].Name) - - vm = []mo.VirtualMachine{} - err = f.Find(ctx, "VirtualMachine", "/DC0/*/DC0_H0_*", &vm) - require.NoError(t, err) - require.Equal(t, 2, len(vm)) - - vm = []mo.VirtualMachine{} - err = f.Find(ctx, "VirtualMachine", "/DC0/**/DC0_H0_VM*", &vm) - require.NoError(t, err) - require.Equal(t, 2, len(vm)) - - vm = []mo.VirtualMachine{} - err = f.Find(ctx, "VirtualMachine", "/DC0/**", &vm) - require.NoError(t, err) - require.Equal(t, 4, len(vm)) - - vm = []mo.VirtualMachine{} - err = f.Find(ctx, "VirtualMachine", "/**", &vm) - require.NoError(t, err) - require.Equal(t, 4, len(vm)) - - vm = []mo.VirtualMachine{} - err = f.Find(ctx, "VirtualMachine", "/**/DC0_H0_VM*", &vm) - require.NoError(t, err) - require.Equal(t, 2, len(vm)) - - vm = []mo.VirtualMachine{} - err = f.Find(ctx, "VirtualMachine", "/DC0/**/DC0_H0_VM*", &vm) - require.NoError(t, err) - require.Equal(t, 2, len(vm)) - - vm = []mo.VirtualMachine{} - err = f.Find(ctx, "VirtualMachine", "/**/vm/**", &vm) - require.NoError(t, err) - require.Equal(t, 4, len(vm)) + var vm []mo.VirtualMachine + testLookupVM(ctx, t, &f, "/DC0/vm/DC0_H0_VM0", 1, "") + testLookupVM(ctx, t, &f, "/DC0/vm/DC0_C0*", 2, "") + testLookupVM(ctx, t, &f, "/DC0/*/DC0_H0_VM0", 1, "DC0_H0_VM0") + testLookupVM(ctx, t, &f, "/DC0/*/DC0_H0_*", 2, "") + testLookupVM(ctx, t, &f, "/DC0/**/DC0_H0_VM*", 2, "") + testLookupVM(ctx, t, &f, "/DC0/**", 4, "") + testLookupVM(ctx, t, &f, "/DC1/**", 4, "") + testLookupVM(ctx, t, &f, "/**", 8, "") + testLookupVM(ctx, t, &f, "/**/vm/**", 8, "") + testLookupVM(ctx, t, &f, "/*/host/**/*DC*", 8, "") + testLookupVM(ctx, t, &f, "/*/host/**/*DC*VM*", 8, "") + testLookupVM(ctx, t, &f, "/*/host/**/*DC*/*/*DC*", 4, "") vm = []mo.VirtualMachine{} err = f.FindAll(ctx, "VirtualMachine", []string{"/DC0/vm/DC0_H0*", "/DC0/vm/DC0_C0*"}, &vm) require.NoError(t, err) require.Equal(t, 4, len(vm)) - vm = []mo.VirtualMachine{} - err = f.FindAll(ctx, "VirtualMachine", []string{"/**"}, &vm) +} + +func TestFolders(t *testing.T) { + // Don't run test on 32-bit machines due to bug in simulator. + // https://github.com/vmware/govmomi/issues/1330 + var i int + if unsafe.Sizeof(i) < 8 { + return + } + + m, s, err := createSim(1) + if err != nil { + t.Fatal(err) + } + defer m.Remove() + defer s.Close() + + v := defaultVSphere() + ctx := context.Background() + + c, err := NewClient(ctx, s.URL, v) + + f := Finder{c} + + var folder []mo.Folder + err = f.Find(ctx, "Folder", "/F0", &folder) require.NoError(t, err) - require.Equal(t, 4, len(vm)) + require.Equal(t, 1, len(folder)) + require.Equal(t, "F0", folder[0].Name) + + var dc []mo.Datacenter + err = f.Find(ctx, "Datacenter", "/F0/DC1", &dc) + require.NoError(t, err) + require.Equal(t, 1, len(dc)) + require.Equal(t, "DC1", dc[0].Name) + + testLookupVM(ctx, t, &f, "/F0/DC0/vm/**/F*", 0, "") + testLookupVM(ctx, t, &f, "/F0/DC1/vm/**/F*/*VM*", 4, "") + testLookupVM(ctx, t, &f, "/F0/DC1/vm/**/F*/**", 4, "") } func TestAll(t *testing.T) { @@ -414,7 +429,7 @@ func TestAll(t *testing.T) { return } - m, s, err := createSim() + m, s, err := createSim(0) if err != nil { t.Fatal(err) } From f45ba14f738b1d4f2c1895571ffb053c44501bde Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 19 Aug 2019 18:18:31 -0700 Subject: [PATCH 1110/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4db8ed9be..05e64a076 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -81,6 +81,7 @@ - [#6142](https://github.com/influxdata/telegraf/issues/6142): Fix memory error panic in mqtt input. - [#6136](https://github.com/influxdata/telegraf/issues/6136): Support Kafka 2.3.0 consumer groups. - [#6232](https://github.com/influxdata/telegraf/issues/6232): Fix persistent session in mqtt_consumer. +- [#6235](https://github.com/influxdata/telegraf/issues/6235): Fix finder inconsistencies in vsphere input. ## v1.11.5 [unreleased] From 92385a4630eca605d95a0e038bea357f337567fa Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 19 Aug 2019 19:05:22 -0700 Subject: [PATCH 1111/1815] Add topic_tag option to mqtt_consumer (#6266) --- plugins/inputs/mqtt_consumer/README.md | 4 + plugins/inputs/mqtt_consumer/mqtt_consumer.go | 27 +++- .../mqtt_consumer/mqtt_consumer_test.go | 136 ++++++++++++++++++ 3 files changed, 160 insertions(+), 7 deletions(-) diff --git a/plugins/inputs/mqtt_consumer/README.md b/plugins/inputs/mqtt_consumer/README.md index 53476cb3d..9e60679f6 100644 --- a/plugins/inputs/mqtt_consumer/README.md +++ b/plugins/inputs/mqtt_consumer/README.md @@ -18,6 +18,10 @@ and creates metrics using one of the supported [input data formats][]. "sensors/#", ] + ## The message topic will be stored in a tag specified by this value. If set + ## to the empty string no topic tag will be created. + # topic_tag = "topic" + ## QoS policy for messages ## 0 = at most once ## 1 = at least once diff --git a/plugins/inputs/mqtt_consumer/mqtt_consumer.go b/plugins/inputs/mqtt_consumer/mqtt_consumer.go index 8a6d0d4de..7e3b43d44 100644 --- a/plugins/inputs/mqtt_consumer/mqtt_consumer.go +++ b/plugins/inputs/mqtt_consumer/mqtt_consumer.go @@ -43,10 +43,11 @@ type Client interface { type ClientFactory func(o *mqtt.ClientOptions) Client type MQTTConsumer struct { - Servers []string - Topics []string - Username string - Password string + Servers []string `toml:"servers"` + Topics []string `toml:"topics"` + TopicTag *string `toml:"topic_tag"` + Username string `toml:"username"` + Password string `toml:"password"` QoS int `toml:"qos"` ConnectionTimeout internal.Duration `toml:"connection_timeout"` MaxUndeliveredMessages int `toml:"max_undelivered_messages"` @@ -67,6 +68,7 @@ type MQTTConsumer struct { state ConnectionState sem semaphore messages map[telegraf.TrackingID]bool + topicTag string ctx context.Context cancel context.CancelFunc @@ -84,6 +86,10 @@ var sampleConfig = ` "sensors/#", ] + ## The message topic will be stored in a tag specified by this value. If set + ## to the empty string no topic tag will be created. + # topic_tag = "topic" + ## QoS policy for messages ## 0 = at most once ## 1 = at least once @@ -161,6 +167,11 @@ func (m *MQTTConsumer) Init() error { return fmt.Errorf("connection_timeout must be greater than 1s: %s", m.ConnectionTimeout.Duration) } + m.topicTag = "topic" + if m.TopicTag != nil { + m.topicTag = *m.TopicTag + } + opts, err := m.createOpts() if err != nil { return err @@ -267,9 +278,11 @@ func (m *MQTTConsumer) onMessage(acc telegraf.TrackingAccumulator, msg mqtt.Mess return err } - topic := msg.Topic() - for _, metric := range metrics { - metric.AddTag("topic", topic) + if m.topicTag != "" { + topic := msg.Topic() + for _, metric := range metrics { + metric.AddTag(m.topicTag, topic) + } } id := acc.AddTrackingMetricGroup(metrics) diff --git a/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go b/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go index 07d2015a8..cbc6ee986 100644 --- a/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go +++ b/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go @@ -143,6 +143,142 @@ func TestPersistentClientIDFail(t *testing.T) { require.Error(t, err) } +type Message struct { +} + +func (m *Message) Duplicate() bool { + panic("not implemented") +} + +func (m *Message) Qos() byte { + panic("not implemented") +} + +func (m *Message) Retained() bool { + panic("not implemented") +} + +func (m *Message) Topic() string { + return "telegraf" +} + +func (m *Message) MessageID() uint16 { + panic("not implemented") +} + +func (m *Message) Payload() []byte { + return []byte("cpu time_idle=42i") +} + +func (m *Message) Ack() { + panic("not implemented") +} + +func TestTopicTag(t *testing.T) { + tests := []struct { + name string + topicTag func() *string + expected []telegraf.Metric + }{ + { + name: "default topic when topic tag is unset for backwards compatibility", + topicTag: func() *string { + return nil + }, + expected: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{ + "topic": "telegraf", + }, + map[string]interface{}{ + "time_idle": 42, + }, + time.Unix(0, 0), + ), + }, + }, + { + name: "use topic tag when set", + topicTag: func() *string { + tag := "topic_tag" + return &tag + }, + expected: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{ + "topic_tag": "telegraf", + }, + map[string]interface{}{ + "time_idle": 42, + }, + time.Unix(0, 0), + ), + }, + }, + { + name: "no topic tag is added when topic tag is set to the empty string", + topicTag: func() *string { + tag := "" + return &tag + }, + expected: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": 42, + }, + time.Unix(0, 0), + ), + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var handler mqtt.MessageHandler + client := &FakeClient{ + ConnectF: func() mqtt.Token { + return &FakeToken{} + }, + AddRouteF: func(topic string, callback mqtt.MessageHandler) { + handler = callback + }, + SubscribeMultipleF: func(filters map[string]byte, callback mqtt.MessageHandler) mqtt.Token { + return &FakeToken{} + }, + DisconnectF: func(quiesce uint) { + }, + } + + plugin := New(func(o *mqtt.ClientOptions) Client { + return client + }) + plugin.Topics = []string{"telegraf"} + plugin.TopicTag = tt.topicTag() + + parser, err := parsers.NewInfluxParser() + require.NoError(t, err) + plugin.SetParser(parser) + + err = plugin.Init() + require.NoError(t, err) + + var acc testutil.Accumulator + err = plugin.Start(&acc) + require.NoError(t, err) + + handler(nil, &Message{}) + + plugin.Stop() + + testutil.RequireMetricsEqual(t, tt.expected, acc.GetTelegrafMetrics(), + testutil.IgnoreTime()) + }) + } +} + func TestAddRouteCalledForEachTopic(t *testing.T) { client := &FakeClient{ ConnectF: func() mqtt.Token { From 47a41071df5dafd5f5b49346aeab1bf583871eb8 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 19 Aug 2019 19:07:10 -0700 Subject: [PATCH 1112/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 05e64a076..6089c96c7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -71,6 +71,7 @@ - [#6261](https://github.com/influxdata/telegraf/pull/6261): Add content_length metric to http_response input plugin. - [#6257](https://github.com/influxdata/telegraf/pull/6257): Add database_tag option to influxdb_listener to add database from query string. - [#6246](https://github.com/influxdata/telegraf/pull/6246): Add capability to limit TLS versions and cipher suites. +- [#6266](https://github.com/influxdata/telegraf/pull/6266): Add topic_tag option to mqtt_consumer. #### Bugfixes From edb05b58a0d8258a6be54df89086108f2e66956b Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 19 Aug 2019 19:08:38 -0700 Subject: [PATCH 1113/1815] Add multiple repository example to github input readme --- plugins/inputs/github/README.md | 7 +++++-- plugins/inputs/github/github.go | 7 +++++-- 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/plugins/inputs/github/README.md b/plugins/inputs/github/README.md index 7227b167d..65fda0301 100644 --- a/plugins/inputs/github/README.md +++ b/plugins/inputs/github/README.md @@ -10,11 +10,14 @@ alternative method for collecting repository information. ```toml [[inputs.github]] ## List of repositories to monitor - repositories = ["influxdata/telegraf"] + repositories = [ + "influxdata/telegraf", + "influxdata/influxdb" + ] ## Github API access token. Unauthenticated requests are limited to 60 per hour. # access_token = "" - + ## Github API enterprise url. Github Enterprise accounts must specify their base url. # enterprise_base_url = "" diff --git a/plugins/inputs/github/github.go b/plugins/inputs/github/github.go index 5c9682e4a..2f31a7268 100644 --- a/plugins/inputs/github/github.go +++ b/plugins/inputs/github/github.go @@ -33,11 +33,14 @@ type GitHub struct { const sampleConfig = ` ## List of repositories to monitor. - repositories = ["influxdata/telegraf"] + repositories = [ + "influxdata/telegraf", + "influxdata/influxdb" + ] ## Github API access token. Unauthenticated requests are limited to 60 per hour. # access_token = "" - + ## Github API enterprise url. Github Enterprise accounts must specify their base url. # enterprise_base_url = "" From 46b9000ef6dafc9241e49eddcdd73128ae4cd4eb Mon Sep 17 00:00:00 2001 From: memory Date: Mon, 19 Aug 2019 23:54:40 -0400 Subject: [PATCH 1114/1815] Add tag_limit processor (#6086) --- plugins/processors/all/all.go | 1 + plugins/processors/tag_limit/README.md | 27 ++++++ plugins/processors/tag_limit/tag_limit.go | 86 +++++++++++++++++++ .../processors/tag_limit/tag_limit_test.go | 86 +++++++++++++++++++ 4 files changed, 200 insertions(+) create mode 100644 plugins/processors/tag_limit/README.md create mode 100644 plugins/processors/tag_limit/tag_limit.go create mode 100644 plugins/processors/tag_limit/tag_limit_test.go diff --git a/plugins/processors/all/all.go b/plugins/processors/all/all.go index 5a61a2e80..47ff83f54 100644 --- a/plugins/processors/all/all.go +++ b/plugins/processors/all/all.go @@ -11,6 +11,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/processors/regex" _ "github.com/influxdata/telegraf/plugins/processors/rename" _ "github.com/influxdata/telegraf/plugins/processors/strings" + _ "github.com/influxdata/telegraf/plugins/processors/tag_limit" _ "github.com/influxdata/telegraf/plugins/processors/topk" _ "github.com/influxdata/telegraf/plugins/processors/unpivot" ) diff --git a/plugins/processors/tag_limit/README.md b/plugins/processors/tag_limit/README.md new file mode 100644 index 000000000..b287f0f8d --- /dev/null +++ b/plugins/processors/tag_limit/README.md @@ -0,0 +1,27 @@ +# Tag Limit Processor Plugin + +Use the `tag_limit` processor to ensure that only a certain number of tags are +preserved for any given metric, and to choose the tags to preserve when the +number of tags appended by the data source is over the limit. + +This can be useful when dealing with output systems (e.g. Stackdriver) that +impose hard limits on the number of tags/labels per metric or where high +levels of cardinality are computationally and/or financially expensive. + +### Configuration + +```toml +[[processors.tag_limit]] + ## Maximum number of tags to preserve + limit = 3 + + ## List of tags to preferentially preserve + keep = ["environment", "region"] +``` + +### Example + +```diff ++ throughput month=Jun,environment=qa,region=us-east1,lower=10i,upper=1000i,mean=500i 1560540094000000000 ++ throughput environment=qa,region=us-east1,lower=10i 1560540094000000000 +``` diff --git a/plugins/processors/tag_limit/tag_limit.go b/plugins/processors/tag_limit/tag_limit.go new file mode 100644 index 000000000..41353a8f8 --- /dev/null +++ b/plugins/processors/tag_limit/tag_limit.go @@ -0,0 +1,86 @@ +package taglimit + +import ( + "fmt" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/processors" + "log" +) + +const sampleConfig = ` + ## Maximum number of tags to preserve + limit = 10 + + ## List of tags to preferentially preserve + keep = ["foo", "bar", "baz"] +` + +type TagLimit struct { + Limit int `toml:"limit"` + Keep []string `toml:"keep"` + init bool + keepTags map[string]string +} + +func (d *TagLimit) SampleConfig() string { + return sampleConfig +} + +func (d *TagLimit) Description() string { + return "Restricts the number of tags that can pass through this filter and chooses which tags to preserve when over the limit." +} + +func (d *TagLimit) initOnce() error { + if d.init { + return nil + } + if len(d.Keep) > d.Limit { + return fmt.Errorf("%d keep tags is greater than %d total tag limit", len(d.Keep), d.Limit) + } + d.keepTags = make(map[string]string) + // convert list of tags-to-keep to a map so we can do constant-time lookups + for _, tag_key := range d.Keep { + d.keepTags[tag_key] = "" + } + d.init = true + return nil +} + +func (d *TagLimit) Apply(in ...telegraf.Metric) []telegraf.Metric { + err := d.initOnce() + if err != nil { + log.Printf("E! [processors.tag_limit] could not create tag_limit processor: %v", err) + return in + } + for _, point := range in { + pointOriginalTags := point.TagList() + lenPointTags := len(pointOriginalTags) + if lenPointTags <= d.Limit { + continue + } + tagsToRemove := make([]string, lenPointTags-d.Limit) + removeIdx := 0 + // remove extraneous tags, stop once we're at the limit + for _, t := range pointOriginalTags { + if _, ok := d.keepTags[t.Key]; !ok { + tagsToRemove[removeIdx] = t.Key + removeIdx++ + lenPointTags-- + } + if lenPointTags <= d.Limit { + break + } + } + for _, t := range tagsToRemove { + point.RemoveTag(t) + } + } + + return in +} + +func init() { + processors.Add("tag_limit", func() telegraf.Processor { + return &TagLimit{} + }) +} diff --git a/plugins/processors/tag_limit/tag_limit_test.go b/plugins/processors/tag_limit/tag_limit_test.go new file mode 100644 index 000000000..9412d866b --- /dev/null +++ b/plugins/processors/tag_limit/tag_limit_test.go @@ -0,0 +1,86 @@ +package taglimit + +import ( + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" + "github.com/stretchr/testify/assert" +) + +func MustMetric(name string, tags map[string]string, fields map[string]interface{}, metricTime time.Time) telegraf.Metric { + if tags == nil { + tags = map[string]string{} + } + if fields == nil { + fields = map[string]interface{}{} + } + m, _ := metric.New(name, tags, fields, metricTime) + return m +} + +func TestUnderLimit(t *testing.T) { + currentTime := time.Now() + + oneTags := make(map[string]string) + oneTags["foo"] = "bar" + + tenTags := make(map[string]string) + tenTags["a"] = "bar" + tenTags["b"] = "bar" + tenTags["c"] = "bar" + tenTags["d"] = "bar" + tenTags["e"] = "bar" + tenTags["f"] = "bar" + tenTags["g"] = "bar" + tenTags["h"] = "bar" + tenTags["i"] = "bar" + tenTags["j"] = "bar" + + tagLimitConfig := TagLimit{ + Limit: 10, + Keep: []string{"foo", "bar"}, + } + + m1 := MustMetric("foo", oneTags, nil, currentTime) + m2 := MustMetric("bar", tenTags, nil, currentTime) + limitApply := tagLimitConfig.Apply(m1, m2) + assert.Equal(t, oneTags, limitApply[0].Tags(), "one tag") + assert.Equal(t, tenTags, limitApply[1].Tags(), "ten tags") +} + +func TestTrim(t *testing.T) { + currentTime := time.Now() + + threeTags := make(map[string]string) + threeTags["a"] = "foo" + threeTags["b"] = "bar" + threeTags["z"] = "baz" + + tenTags := make(map[string]string) + tenTags["a"] = "foo" + tenTags["b"] = "bar" + tenTags["c"] = "baz" + tenTags["d"] = "abc" + tenTags["e"] = "def" + tenTags["f"] = "ghi" + tenTags["g"] = "jkl" + tenTags["h"] = "mno" + tenTags["i"] = "pqr" + tenTags["j"] = "stu" + + tagLimitConfig := TagLimit{ + Limit: 3, + Keep: []string{"a", "b"}, + } + + m1 := MustMetric("foo", threeTags, nil, currentTime) + m2 := MustMetric("bar", tenTags, nil, currentTime) + limitApply := tagLimitConfig.Apply(m1, m2) + assert.Equal(t, threeTags, limitApply[0].Tags(), "three tags") + trimmedTags := limitApply[1].Tags() + assert.Equal(t, 3, len(trimmedTags), "ten tags") + assert.Equal(t, "foo", trimmedTags["a"], "preserved: a") + assert.Equal(t, "bar", trimmedTags["b"], "preserved: b") +} From 54180dacb41cba0bbab8bff98f316fe637039968 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 19 Aug 2019 20:58:01 -0700 Subject: [PATCH 1115/1815] Add tag_limit to readme and changelog --- CHANGELOG.md | 1 + README.md | 1 + 2 files changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6089c96c7..2d3088d22 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -21,6 +21,7 @@ - [date](/plugins/processors/date/README.md) - Contributed by @influxdata - [pivot](/plugins/processors/pivot/README.md) - Contributed by @influxdata +- [tag_limit](/plugins/processors/tag_limit/README.md) - Contributed by @memory - [unpivot](/plugins/processors/unpivot/README.md) - Contributed by @influxdata #### Features diff --git a/README.md b/README.md index 395975052..739c002a3 100644 --- a/README.md +++ b/README.md @@ -339,6 +339,7 @@ For documentation on the latest development code see the [documentation index][d * [regex](./plugins/processors/regex) * [rename](./plugins/processors/rename) * [strings](./plugins/processors/strings) +* [tag_limit](./plugins/processors/tag_limit) * [topk](./plugins/processors/topk) * [unpivot](./plugins/processors/unpivot) From 7bbed9e2a31a7fbca23d64e58ac22cce5ebe2db5 Mon Sep 17 00:00:00 2001 From: Scott Anderson Date: Tue, 20 Aug 2019 15:44:16 -0600 Subject: [PATCH 1116/1815] Update logparser readme requirement to collect at least one field (#6288) --- plugins/inputs/logparser/README.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/plugins/inputs/logparser/README.md b/plugins/inputs/logparser/README.md index 47edbd296..efd50952f 100644 --- a/plugins/inputs/logparser/README.md +++ b/plugins/inputs/logparser/README.md @@ -81,7 +81,8 @@ Timestamp modifiers can be used to convert captures to the timestamp of the parsed metric. If no timestamp is parsed the metric will be created using the current time. -You must capture at least one field per line. +**Note:** You must capture at least one field per line. +Patterns that convert all captures to tags will result in points that can't be written to InfluxDB. - Available modifiers: - string (default if nothing is specified) From 153dd585af014c475d6c6aa64560a1387d90a1f6 Mon Sep 17 00:00:00 2001 From: aromeyer Date: Wed, 21 Aug 2019 00:14:11 +0200 Subject: [PATCH 1117/1815] Add openntpd input plugin (#3627) --- README.md | 1 + plugins/inputs/all/all.go | 1 + plugins/inputs/openntpd/README.md | 96 +++++++ plugins/inputs/openntpd/openntpd.go | 223 +++++++++++++++ plugins/inputs/openntpd/openntpd_test.go | 329 +++++++++++++++++++++++ 5 files changed, 650 insertions(+) create mode 100644 plugins/inputs/openntpd/README.md create mode 100644 plugins/inputs/openntpd/openntpd.go create mode 100644 plugins/inputs/openntpd/openntpd_test.go diff --git a/README.md b/README.md index 739c002a3..6587ec3bc 100644 --- a/README.md +++ b/README.md @@ -240,6 +240,7 @@ For documentation on the latest development code see the [documentation index][d * [ntpq](./plugins/inputs/ntpq) * [nvidia_smi](./plugins/inputs/nvidia_smi) * [openldap](./plugins/inputs/openldap) +* [openntpd](./plugins/inputs/openntpd) * [opensmtpd](./plugins/inputs/opensmtpd) * [openweathermap](./plugins/inputs/openweathermap) * [pf](./plugins/inputs/pf) diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index 8d2144df3..bd8393c0b 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -104,6 +104,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/ntpq" _ "github.com/influxdata/telegraf/plugins/inputs/nvidia_smi" _ "github.com/influxdata/telegraf/plugins/inputs/openldap" + _ "github.com/influxdata/telegraf/plugins/inputs/openntpd" _ "github.com/influxdata/telegraf/plugins/inputs/opensmtpd" _ "github.com/influxdata/telegraf/plugins/inputs/openweathermap" _ "github.com/influxdata/telegraf/plugins/inputs/passenger" diff --git a/plugins/inputs/openntpd/README.md b/plugins/inputs/openntpd/README.md new file mode 100644 index 000000000..d1bca049f --- /dev/null +++ b/plugins/inputs/openntpd/README.md @@ -0,0 +1,96 @@ +# OpenNTPD Input Plugin + +Get standard NTP query metrics from OpenNTPD ([OpenNTPD - a FREE, easy to use +implementation of the Network Time Protocol](http://www.openntpd.org/)). + +Below is the documentation of the various headers returned from the NTP query +command when running `ntpctl -s peers`. + +- remote – The remote peer or server being synced to. +- wt – the peer weight +- tl – the peer trust level +- st (stratum) – The remote peer or server Stratum +- next – number of seconds until the next poll +- poll – polling interval in seconds +- delay – Round trip communication delay to the remote peer +or server (milliseconds); +- offset – Mean offset (phase) in the times reported between this local host and +the remote peer or server (RMS, milliseconds); +- jitter – Mean deviation (jitter) in the time reported for that remote peer or +server (RMS of difference of multiple time samples, milliseconds); + +### Configuration: + +```toml +# Get standard NTP query metrics, requires ntpctls executable +# provided by openntpd packages +[[inputs.openntpd]] + ## If running as a restricted user you can prepend sudo for additional access: + #use_sudo = false + + ## The default location of the ntpctl binary can be overridden with: + binary = "/usr/sbin/ntpctl" + + ## The default timeout of 1000ms can be overriden with (in milliseconds): + #timeout = 1000 +``` + +### Measurements & Fields: + +- ntpctl + - delay (float, milliseconds) + - jitter (float, milliseconds) + - offset (float, milliseconds) + - poll (int, seconds) + - next (int,,seconds) + - wt (int) + - tl (int) + +### Tags: + +- All measurements have the following tags: + - remote + - stratum + +### Permissions: + +It's important to note that this plugin references ntpctl, which may require +additional permissions to execute successfully. +Depending on the user/group permissions of the telegraf user executing this +plugin, you may need to alter the group membership, set facls, or use sudo. + +**Group membership (Recommended)**: +```bash +$ groups telegraf +telegraf : telegraf + +$ usermod -a -G ntpd telegraf + +$ groups telegraf +telegraf : telegraf ntpd +``` + +**Sudo privileges**: +If you use this method, you will need the following in your telegraf config: +```toml +[[inputs.openntpd]] + use_sudo = true +``` + +You will also need to update your sudoers file: +```bash +$ visudo +# Add the following line: +telegraf ALL=(ALL) NOPASSWD: /usr/sbin/ntpctl +``` + +Please use the solution you see as most appropriate. + +### Example Output: + +``` +$ telegraf --config ~/ws/telegraf.conf --input-filter openntpd --test +* Plugin: openntpd, Collection 1 +> openntpd,remote=194.57.169.1,stratum=2,host=localhost tl=10i,poll=1007i, +offset=2.295,jitter=3.896,delay=53.766,next=266i,wt=1i 1514454299000000000 +``` diff --git a/plugins/inputs/openntpd/openntpd.go b/plugins/inputs/openntpd/openntpd.go new file mode 100644 index 000000000..ed742ee00 --- /dev/null +++ b/plugins/inputs/openntpd/openntpd.go @@ -0,0 +1,223 @@ +package openntpd + +import ( + "bufio" + "bytes" + "fmt" + "os/exec" + "strconv" + "strings" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/filter" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/inputs" +) + +// Mapping of ntpctl header names to tag keys +var tagHeaders = map[string]string{ + "st": "stratum", +} + +// Mapping of the ntpctl tag key to the index in the command output +var tagI = map[string]int{ + "stratum": 2, +} + +// Mapping of float metrics to their index in the command output +var floatI = map[string]int{ + "offset": 5, + "delay": 6, + "jitter": 7, +} + +// Mapping of int metrics to their index in the command output +var intI = map[string]int{ + "wt": 0, + "tl": 1, + "next": 3, + "poll": 4, +} + +type runner func(cmdName string, Timeout internal.Duration, UseSudo bool) (*bytes.Buffer, error) + +// Openntpd is used to store configuration values +type Openntpd struct { + Binary string + Timeout internal.Duration + UseSudo bool + + filter filter.Filter + run runner +} + +var defaultBinary = "/usr/sbin/ntpctl" +var defaultTimeout = internal.Duration{Duration: time.Second} + +func (n *Openntpd) Description() string { + return "Get standard NTP query metrics from OpenNTPD." +} + +func (n *Openntpd) SampleConfig() string { + return ` + ## If running as a restricted user you can prepend sudo for additional access: + #use_sudo = false + + ## The default location of the ntpctl binary can be overridden with: + binary = "/usr/sbin/ntpctl" + + ## The default timeout of 1000ms can be overriden with (in milliseconds): + timeout = 1000 + ` +} + +// Shell out to ntpctl and return the output +func openntpdRunner(cmdName string, Timeout internal.Duration, UseSudo bool) (*bytes.Buffer, error) { + cmdArgs := []string{"-s", "peers"} + + cmd := exec.Command(cmdName, cmdArgs...) + + if UseSudo { + cmdArgs = append([]string{cmdName}, cmdArgs...) + cmd = exec.Command("sudo", cmdArgs...) + } + + var out bytes.Buffer + cmd.Stdout = &out + err := internal.RunTimeout(cmd, Timeout.Duration) + if err != nil { + return &out, fmt.Errorf("error running ntpctl: %s", err) + } + + return &out, nil +} + +func (n *Openntpd) Gather(acc telegraf.Accumulator) error { + out, err := n.run(n.Binary, n.Timeout, n.UseSudo) + if err != nil { + return fmt.Errorf("error gathering metrics: %s", err) + } + + lineCounter := 0 + scanner := bufio.NewScanner(out) + for scanner.Scan() { + // skip first (peer) and second (field list) line + if lineCounter < 2 { + lineCounter++ + continue + } + + line := scanner.Text() + + fields := strings.Fields(line) + + mFields := make(map[string]interface{}) + tags := make(map[string]string) + + // Even line ---> ntp server info + if lineCounter%2 == 0 { + // DNS resolution error ---> keep DNS name as remote name + if fields[0] != "not" { + tags["remote"] = fields[0] + } else { + tags["remote"] = fields[len(fields)-1] + } + } + + // Read next line - Odd line ---> ntp server stats + scanner.Scan() + line = scanner.Text() + lineCounter++ + + fields = strings.Fields(line) + + // if there is an ntpctl state prefix, remove it and make it it's own tag + if strings.ContainsAny(string(fields[0]), "*") { + tags["state_prefix"] = string(fields[0]) + fields = append(fields[:0], fields[1:]...) + } + + // Get tags from output + for key, index := range tagI { + if len(fields) < index { + continue + } + tags[key] = fields[index] + } + + // Get integer metrics from output + for key, index := range intI { + if index >= len(fields) { + continue + } + if fields[index] == "-" { + continue + } + + if key == "next" || key == "poll" { + + m, err := strconv.ParseInt(strings.TrimSuffix(fields[index], "s"), 10, 64) + if err != nil { + acc.AddError(fmt.Errorf("integer value expected, got: %s", fields[index])) + continue + } + mFields[key] = m + + } else { + + m, err := strconv.ParseInt(fields[index], 10, 64) + if err != nil { + acc.AddError(fmt.Errorf("integer value expected, got: %s", fields[index])) + continue + } + mFields[key] = m + } + } + + // get float metrics from output + for key, index := range floatI { + if len(fields) <= index { + continue + } + if fields[index] == "-" || fields[index] == "----" || fields[index] == "peer" || fields[index] == "not" || fields[index] == "valid" { + continue + } + + if key == "offset" || key == "delay" || key == "jitter" { + + m, err := strconv.ParseFloat(strings.TrimSuffix(fields[index], "ms"), 64) + if err != nil { + acc.AddError(fmt.Errorf("float value expected, got: %s", fields[index])) + continue + } + mFields[key] = m + + } else { + + m, err := strconv.ParseFloat(fields[index], 64) + if err != nil { + acc.AddError(fmt.Errorf("float value expected, got: %s", fields[index])) + continue + } + mFields[key] = m + + } + } + acc.AddFields("openntpd", mFields, tags) + + lineCounter++ + } + return nil +} + +func init() { + inputs.Add("openntpd", func() telegraf.Input { + return &Openntpd{ + run: openntpdRunner, + Binary: defaultBinary, + Timeout: defaultTimeout, + UseSudo: false, + } + }) +} diff --git a/plugins/inputs/openntpd/openntpd_test.go b/plugins/inputs/openntpd/openntpd_test.go new file mode 100644 index 000000000..0c2d20142 --- /dev/null +++ b/plugins/inputs/openntpd/openntpd_test.go @@ -0,0 +1,329 @@ +package openntpd + +import ( + "bytes" + "testing" + "time" + + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/assert" +) + +var TestTimeout = internal.Duration{Duration: time.Second} + +func OpenntpdCTL(output string, Timeout internal.Duration, useSudo bool) func(string, internal.Duration, bool) (*bytes.Buffer, error) { + return func(string, internal.Duration, bool) (*bytes.Buffer, error) { + return bytes.NewBuffer([]byte(output)), nil + } +} + +func TestParseSimpleOutput(t *testing.T) { + acc := &testutil.Accumulator{} + v := &Openntpd{ + run: OpenntpdCTL(simpleOutput, TestTimeout, false), + } + err := v.Gather(acc) + + assert.NoError(t, err) + assert.True(t, acc.HasMeasurement("openntpd")) + assert.Equal(t, acc.NMetrics(), uint64(1)) + + assert.Equal(t, acc.NFields(), 7) + + firstpeerfields := map[string]interface{}{ + "wt": int64(1), + "tl": int64(10), + "next": int64(56), + "poll": int64(63), + "offset": float64(9.271), + "delay": float64(44.662), + "jitter": float64(2.678), + } + + firstpeertags := map[string]string{ + "remote": "212.129.9.36", + "stratum": "3", + } + + acc.AssertContainsTaggedFields(t, "openntpd", firstpeerfields, firstpeertags) +} + +func TestParseSimpleOutputwithStatePrefix(t *testing.T) { + acc := &testutil.Accumulator{} + v := &Openntpd{ + run: OpenntpdCTL(simpleOutputwithStatePrefix, TestTimeout, false), + } + err := v.Gather(acc) + + assert.NoError(t, err) + assert.True(t, acc.HasMeasurement("openntpd")) + assert.Equal(t, acc.NMetrics(), uint64(1)) + + assert.Equal(t, acc.NFields(), 7) + + firstpeerfields := map[string]interface{}{ + "wt": int64(1), + "tl": int64(10), + "next": int64(45), + "poll": int64(980), + "offset": float64(-9.901), + "delay": float64(67.573), + "jitter": float64(29.350), + } + + firstpeertags := map[string]string{ + "remote": "92.243.6.5", + "stratum": "2", + "state_prefix": "*", + } + + acc.AssertContainsTaggedFields(t, "openntpd", firstpeerfields, firstpeertags) +} + +func TestParseSimpleOutputInavlidPeer(t *testing.T) { + acc := &testutil.Accumulator{} + v := &Openntpd{ + run: OpenntpdCTL(simpleOutputInvalidPeer, TestTimeout, false), + } + err := v.Gather(acc) + + assert.NoError(t, err) + assert.True(t, acc.HasMeasurement("openntpd")) + assert.Equal(t, acc.NMetrics(), uint64(1)) + + assert.Equal(t, acc.NFields(), 4) + + firstpeerfields := map[string]interface{}{ + "wt": int64(1), + "tl": int64(2), + "next": int64(203), + "poll": int64(300), + } + + firstpeertags := map[string]string{ + "remote": "178.33.111.49", + "stratum": "-", + } + + acc.AssertContainsTaggedFields(t, "openntpd", firstpeerfields, firstpeertags) +} + +func TestParseSimpleOutputServersDNSError(t *testing.T) { + acc := &testutil.Accumulator{} + v := &Openntpd{ + run: OpenntpdCTL(simpleOutputServersDNSError, TestTimeout, false), + } + err := v.Gather(acc) + + assert.NoError(t, err) + assert.True(t, acc.HasMeasurement("openntpd")) + assert.Equal(t, acc.NMetrics(), uint64(1)) + + assert.Equal(t, acc.NFields(), 4) + + firstpeerfields := map[string]interface{}{ + "next": int64(2), + "poll": int64(15), + "wt": int64(1), + "tl": int64(2), + } + + firstpeertags := map[string]string{ + "remote": "pool.nl.ntp.org", + "stratum": "-", + } + + acc.AssertContainsTaggedFields(t, "openntpd", firstpeerfields, firstpeertags) + + secondpeerfields := map[string]interface{}{ + "next": int64(2), + "poll": int64(15), + "wt": int64(1), + "tl": int64(2), + } + + secondpeertags := map[string]string{ + "remote": "pool.nl.ntp.org", + "stratum": "-", + } + + acc.AssertContainsTaggedFields(t, "openntpd", secondpeerfields, secondpeertags) +} + +func TestParseSimpleOutputServerDNSError(t *testing.T) { + acc := &testutil.Accumulator{} + v := &Openntpd{ + run: OpenntpdCTL(simpleOutputServerDNSError, TestTimeout, false), + } + err := v.Gather(acc) + + assert.NoError(t, err) + assert.True(t, acc.HasMeasurement("openntpd")) + assert.Equal(t, acc.NMetrics(), uint64(1)) + + assert.Equal(t, acc.NFields(), 4) + + firstpeerfields := map[string]interface{}{ + "next": int64(12), + "poll": int64(15), + "wt": int64(1), + "tl": int64(2), + } + + firstpeertags := map[string]string{ + "remote": "pool.fr.ntp.org", + "stratum": "-", + } + + acc.AssertContainsTaggedFields(t, "openntpd", firstpeerfields, firstpeertags) +} + +func TestParseFullOutput(t *testing.T) { + acc := &testutil.Accumulator{} + v := &Openntpd{ + run: OpenntpdCTL(fullOutput, TestTimeout, false), + } + err := v.Gather(acc) + + assert.NoError(t, err) + assert.True(t, acc.HasMeasurement("openntpd")) + assert.Equal(t, acc.NMetrics(), uint64(20)) + + assert.Equal(t, acc.NFields(), 113) + + firstpeerfields := map[string]interface{}{ + "wt": int64(1), + "tl": int64(10), + "next": int64(56), + "poll": int64(63), + "offset": float64(9.271), + "delay": float64(44.662), + "jitter": float64(2.678), + } + + firstpeertags := map[string]string{ + "remote": "212.129.9.36", + "stratum": "3", + } + + acc.AssertContainsTaggedFields(t, "openntpd", firstpeerfields, firstpeertags) + + secondpeerfields := map[string]interface{}{ + "wt": int64(1), + "tl": int64(10), + "next": int64(21), + "poll": int64(64), + "offset": float64(-0.103), + "delay": float64(53.199), + "jitter": float64(9.046), + } + + secondpeertags := map[string]string{ + "remote": "163.172.25.19", + "stratum": "2", + } + + acc.AssertContainsTaggedFields(t, "openntpd", secondpeerfields, secondpeertags) + + thirdpeerfields := map[string]interface{}{ + "wt": int64(1), + "tl": int64(10), + "next": int64(45), + "poll": int64(980), + "offset": float64(-9.901), + "delay": float64(67.573), + "jitter": float64(29.350), + } + + thirdpeertags := map[string]string{ + "remote": "92.243.6.5", + "stratum": "2", + "state_prefix": "*", + } + + acc.AssertContainsTaggedFields(t, "openntpd", thirdpeerfields, thirdpeertags) + + fourthpeerfields := map[string]interface{}{ + "wt": int64(1), + "tl": int64(2), + "next": int64(203), + "poll": int64(300), + } + + fourthpeertags := map[string]string{ + "remote": "178.33.111.49", + "stratum": "-", + } + + acc.AssertContainsTaggedFields(t, "openntpd", fourthpeerfields, fourthpeertags) +} + +var simpleOutput = `peer +wt tl st next poll offset delay jitter +212.129.9.36 from pool 0.debian.pool.ntp.org +1 10 3 56s 63s 9.271ms 44.662ms 2.678ms` + +var simpleOutputwithStatePrefix = `peer +wt tl st next poll offset delay jitter +92.243.6.5 from pool 0.debian.pool.ntp.org +* 1 10 2 45s 980s -9.901ms 67.573ms 29.350ms` + +var simpleOutputInvalidPeer = `peer +wt tl st next poll offset delay jitter +178.33.111.49 from pool 0.debian.pool.ntp.org +1 2 - 203s 300s ---- peer not valid ----` + +var simpleOutputServersDNSError = `peer +wt tl st next poll offset delay jitter +not resolved from pool pool.nl.ntp.org +1 2 - 2s 15s ---- peer not valid ---- +` +var simpleOutputServerDNSError = `peer +wt tl st next poll offset delay jitter +not resolved pool.fr.ntp.org +1 2 - 12s 15s ---- peer not valid ---- +` + +var fullOutput = `peer +wt tl st next poll offset delay jitter +212.129.9.36 from pool 0.debian.pool.ntp.org +1 10 3 56s 63s 9.271ms 44.662ms 2.678ms +163.172.25.19 from pool 0.debian.pool.ntp.org +1 10 2 21s 64s -0.103ms 53.199ms 9.046ms +92.243.6.5 from pool 0.debian.pool.ntp.org +* 1 10 2 45s 980s -9.901ms 67.573ms 29.350ms +178.33.111.49 from pool 0.debian.pool.ntp.org +1 2 - 203s 300s ---- peer not valid ---- +62.210.122.129 from pool 1.debian.pool.ntp.org +1 10 3 4s 60s 5.372ms 53.690ms 14.700ms +163.172.225.159 from pool 1.debian.pool.ntp.org +1 10 3 38s 61s 12.276ms 40.631ms 1.282ms +5.196.192.58 from pool 1.debian.pool.ntp.org +1 2 - 0s 300s ---- peer not valid ---- +129.250.35.250 from pool 1.debian.pool.ntp.org +1 10 2 28s 63s 11.236ms 43.874ms 1.381ms +2001:41d0:a:5a7::1 from pool 2.debian.pool.ntp.org +1 2 - 5s 15s ---- peer not valid ---- +2001:41d0:8:188d::16 from pool 2.debian.pool.ntp.org +1 2 - 3s 15s ---- peer not valid ---- +2001:4b98:dc0:41:216:3eff:fe69:46e3 from pool 2.debian.pool.ntp.org +1 2 - 14s 15s ---- peer not valid ---- +2a01:e0d:1:3:58bf:fa61:0:1 from pool 2.debian.pool.ntp.org +1 2 - 9s 15s ---- peer not valid ---- +163.172.179.38 from pool 2.debian.pool.ntp.org +1 10 2 51s 65s -19.229ms 85.404ms 48.734ms +5.135.3.88 from pool 2.debian.pool.ntp.org +1 2 - 173s 300s ---- peer not valid ---- +195.154.41.195 from pool 2.debian.pool.ntp.org +1 10 2 84s 1004s -3.956ms 54.549ms 13.658ms +62.210.81.130 from pool 2.debian.pool.ntp.org +1 10 2 158s 1043s -42.593ms 124.353ms 94.230ms +149.202.97.123 from pool 3.debian.pool.ntp.org +1 2 - 205s 300s ---- peer not valid ---- +51.15.175.224 from pool 3.debian.pool.ntp.org +1 10 2 9s 64s 8.861ms 46.640ms 0.668ms +37.187.5.167 from pool 3.debian.pool.ntp.org +1 2 - 105s 300s ---- peer not valid ---- +194.57.169.1 from pool 3.debian.pool.ntp.org +1 10 2 32s 63s 6.589ms 52.051ms 2.057ms` From 1ad10c8a52214a547462382da0572b76e497fc8a Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 20 Aug 2019 15:28:26 -0700 Subject: [PATCH 1118/1815] Update changelog and tidy openntpd input --- CHANGELOG.md | 1 + plugins/inputs/openntpd/README.md | 61 ++++++++++++++--------------- plugins/inputs/openntpd/openntpd.go | 18 ++++----- 3 files changed, 39 insertions(+), 41 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2d3088d22..470bc16d3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,7 @@ - [docker_log](/plugins/inputs/docker_log) - Contributed by @prashanthjbabu - [fireboard](/plugins/inputs/fireboard) - Contributed by @ronnocol +- [openntpd](/plugins/inputs/openntpd) - Contributed by @aromeyer - [uwsgi](/plugins/inputs/uswgi) - Contributed by @blaggacao #### New Parsers diff --git a/plugins/inputs/openntpd/README.md b/plugins/inputs/openntpd/README.md index d1bca049f..877c3a460 100644 --- a/plugins/inputs/openntpd/README.md +++ b/plugins/inputs/openntpd/README.md @@ -1,7 +1,8 @@ # OpenNTPD Input Plugin -Get standard NTP query metrics from OpenNTPD ([OpenNTPD - a FREE, easy to use -implementation of the Network Time Protocol](http://www.openntpd.org/)). +Get standard NTP query metrics from [OpenNTPD][] using the ntpctl command. + +[OpenNTPD]: http://www.openntpd.org/ Below is the documentation of the various headers returned from the NTP query command when running `ntpctl -s peers`. @@ -19,40 +20,36 @@ the remote peer or server (RMS, milliseconds); - jitter – Mean deviation (jitter) in the time reported for that remote peer or server (RMS of difference of multiple time samples, milliseconds); -### Configuration: +### Configuration ```toml -# Get standard NTP query metrics, requires ntpctls executable -# provided by openntpd packages [[inputs.openntpd]] - ## If running as a restricted user you can prepend sudo for additional access: - #use_sudo = false + ## Run ntpctl binary with sudo. + # use_sudo = false - ## The default location of the ntpctl binary can be overridden with: - binary = "/usr/sbin/ntpctl" + ## Location of the ntpctl binary. + # binary = "/usr/sbin/ntpctl" - ## The default timeout of 1000ms can be overriden with (in milliseconds): - #timeout = 1000 + ## Maximum time the ntpctl binary is allowed to run. + # timeout = "5ms" ``` -### Measurements & Fields: +### Metrics - ntpctl - - delay (float, milliseconds) - - jitter (float, milliseconds) - - offset (float, milliseconds) - - poll (int, seconds) - - next (int,,seconds) - - wt (int) - - tl (int) + - tags: + - remote + - stratum + - fields: + - delay (float, milliseconds) + - jitter (float, milliseconds) + - offset (float, milliseconds) + - poll (int, seconds) + - next (int, seconds) + - wt (int) + - tl (int) -### Tags: - -- All measurements have the following tags: - - remote - - stratum - -### Permissions: +### Permissions It's important to note that this plugin references ntpctl, which may require additional permissions to execute successfully. @@ -80,17 +77,17 @@ If you use this method, you will need the following in your telegraf config: You will also need to update your sudoers file: ```bash $ visudo -# Add the following line: -telegraf ALL=(ALL) NOPASSWD: /usr/sbin/ntpctl +# Add the following lines: +Cmnd_Alias NTPCTL = /usr/sbin/ntpctl +telegraf ALL=(ALL) NOPASSWD: NTPCTL +Defaults!NTPCTL !logfile, !syslog, !pam_session ``` Please use the solution you see as most appropriate. -### Example Output: +### Example Output ``` -$ telegraf --config ~/ws/telegraf.conf --input-filter openntpd --test -* Plugin: openntpd, Collection 1 -> openntpd,remote=194.57.169.1,stratum=2,host=localhost tl=10i,poll=1007i, +openntpd,remote=194.57.169.1,stratum=2,host=localhost tl=10i,poll=1007i, offset=2.295,jitter=3.896,delay=53.766,next=266i,wt=1i 1514454299000000000 ``` diff --git a/plugins/inputs/openntpd/openntpd.go b/plugins/inputs/openntpd/openntpd.go index ed742ee00..e7723b480 100644 --- a/plugins/inputs/openntpd/openntpd.go +++ b/plugins/inputs/openntpd/openntpd.go @@ -53,7 +53,7 @@ type Openntpd struct { } var defaultBinary = "/usr/sbin/ntpctl" -var defaultTimeout = internal.Duration{Duration: time.Second} +var defaultTimeout = internal.Duration{Duration: 5 * time.Second} func (n *Openntpd) Description() string { return "Get standard NTP query metrics from OpenNTPD." @@ -61,14 +61,14 @@ func (n *Openntpd) Description() string { func (n *Openntpd) SampleConfig() string { return ` - ## If running as a restricted user you can prepend sudo for additional access: - #use_sudo = false + ## Run ntpctl binary with sudo. + # use_sudo = false - ## The default location of the ntpctl binary can be overridden with: - binary = "/usr/sbin/ntpctl" + ## Location of the ntpctl binary. + # binary = "/usr/sbin/ntpctl" - ## The default timeout of 1000ms can be overriden with (in milliseconds): - timeout = 1000 + ## Maximum time the ntpctl binary is allowed to run. + # timeout = "5ms" ` } @@ -135,12 +135,12 @@ func (n *Openntpd) Gather(acc telegraf.Accumulator) error { // if there is an ntpctl state prefix, remove it and make it it's own tag if strings.ContainsAny(string(fields[0]), "*") { tags["state_prefix"] = string(fields[0]) - fields = append(fields[:0], fields[1:]...) + fields = fields[1:] } // Get tags from output for key, index := range tagI { - if len(fields) < index { + if index >= len(fields) { continue } tags[key] = fields[index] From 819bf8e99d242ca975fb354a5ab180c6afb6e1ab Mon Sep 17 00:00:00 2001 From: Greg <2653109+glinton@users.noreply.github.com> Date: Tue, 20 Aug 2019 18:07:24 -0600 Subject: [PATCH 1119/1815] Add exec output plugin (#6267) --- README.md | 1 + internal/internal.go | 10 +- plugins/outputs/all/all.go | 1 + plugins/outputs/exec/README.md | 26 +++++ plugins/outputs/exec/exec.go | 153 ++++++++++++++++++++++++++++++ plugins/outputs/exec/exec_test.go | 105 ++++++++++++++++++++ 6 files changed, 291 insertions(+), 5 deletions(-) create mode 100644 plugins/outputs/exec/README.md create mode 100644 plugins/outputs/exec/exec.go create mode 100644 plugins/outputs/exec/exec_test.go diff --git a/README.md b/README.md index 6587ec3bc..7e5f18954 100644 --- a/README.md +++ b/README.md @@ -367,6 +367,7 @@ For documentation on the latest development code see the [documentation index][d * [datadog](./plugins/outputs/datadog) * [discard](./plugins/outputs/discard) * [elasticsearch](./plugins/outputs/elasticsearch) +* [exec](./plugins/output/exec) * [file](./plugins/outputs/file) * [graphite](./plugins/outputs/graphite) * [graylog](./plugins/outputs/graylog) diff --git a/internal/internal.go b/internal/internal.go index a38f7703a..6f135938a 100644 --- a/internal/internal.go +++ b/internal/internal.go @@ -230,17 +230,17 @@ func WaitTimeout(c *exec.Cmd, timeout time.Duration) error { timer := time.AfterFunc(timeout, func() { err := c.Process.Kill() if err != nil { - log.Printf("E! FATAL error killing process: %s", err) + log.Printf("E! [agent] Error killing process: %s", err) return } }) err := c.Wait() - isTimeout := timer.Stop() + if err == nil { + return nil + } - if err != nil { - return err - } else if isTimeout == false { + if !timer.Stop() { return TimeoutErr } diff --git a/plugins/outputs/all/all.go b/plugins/outputs/all/all.go index f9dd73c44..e40230993 100644 --- a/plugins/outputs/all/all.go +++ b/plugins/outputs/all/all.go @@ -11,6 +11,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/outputs/datadog" _ "github.com/influxdata/telegraf/plugins/outputs/discard" _ "github.com/influxdata/telegraf/plugins/outputs/elasticsearch" + _ "github.com/influxdata/telegraf/plugins/outputs/exec" _ "github.com/influxdata/telegraf/plugins/outputs/file" _ "github.com/influxdata/telegraf/plugins/outputs/graphite" _ "github.com/influxdata/telegraf/plugins/outputs/graylog" diff --git a/plugins/outputs/exec/README.md b/plugins/outputs/exec/README.md new file mode 100644 index 000000000..2b0c2d3f1 --- /dev/null +++ b/plugins/outputs/exec/README.md @@ -0,0 +1,26 @@ +# Exec Output Plugin + +This plugin sends telegraf metrics to an external application over stdin. + +The command should be defined similar to docker's `exec` form: + + ["executable", "param1", "param2"] + +On non-zero exit stderr will be logged at error level. + +### Configuration + +```toml +[[outputs.exec]] + ## Command to injest metrics via stdin. + command = ["tee", "-a", "/dev/null"] + + ## Timeout for command to complete. + # timeout = "5s" + + ## Data format to output. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md + # data_format = "influx" +``` diff --git a/plugins/outputs/exec/exec.go b/plugins/outputs/exec/exec.go new file mode 100644 index 000000000..583646bb5 --- /dev/null +++ b/plugins/outputs/exec/exec.go @@ -0,0 +1,153 @@ +package exec + +import ( + "bytes" + "fmt" + "io" + "log" + "os/exec" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/outputs" + "github.com/influxdata/telegraf/plugins/serializers" +) + +const maxStderrBytes = 512 + +// Exec defines the exec output plugin. +type Exec struct { + Command []string `toml:"command"` + Timeout internal.Duration `toml:"timeout"` + + runner Runner + serializer serializers.Serializer +} + +var sampleConfig = ` + ## Command to injest metrics via stdin. + command = ["tee", "-a", "/dev/null"] + + ## Timeout for command to complete. + # timeout = "5s" + + ## Data format to output. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md + # data_format = "influx" +` + +// SetSerializer sets the serializer for the output. +func (e *Exec) SetSerializer(serializer serializers.Serializer) { + e.serializer = serializer +} + +// Connect satisfies the Ouput interface. +func (e *Exec) Connect() error { + return nil +} + +// Close satisfies the Ouput interface. +func (e *Exec) Close() error { + return nil +} + +// Description describes the plugin. +func (e *Exec) Description() string { + return "Send metrics to command as input over stdin" +} + +// SampleConfig returns a sample configuration. +func (e *Exec) SampleConfig() string { + return sampleConfig +} + +// Write writes the metrics to the configured command. +func (e *Exec) Write(metrics []telegraf.Metric) error { + var buffer bytes.Buffer + for _, metric := range metrics { + value, err := e.serializer.Serialize(metric) + if err != nil { + return err + } + buffer.Write(value) + } + + if buffer.Len() <= 0 { + return nil + } + + return e.runner.Run(e.Timeout.Duration, e.Command, &buffer) +} + +// Runner provides an interface for running exec.Cmd. +type Runner interface { + Run(time.Duration, []string, io.Reader) error +} + +// CommandRunner runs a command with the ability to kill the process before the timeout. +type CommandRunner struct { + cmd *exec.Cmd +} + +// Run runs the command. +func (c *CommandRunner) Run(timeout time.Duration, command []string, buffer io.Reader) error { + cmd := exec.Command(command[0], command[1:]...) + cmd.Stdin = buffer + var stderr bytes.Buffer + cmd.Stderr = &stderr + + err := internal.RunTimeout(cmd, timeout) + s := stderr + + if err != nil { + if err == internal.TimeoutErr { + return fmt.Errorf("%q timed out and was killed", command) + } + + if s.Len() > 0 { + log.Printf("E! [outputs.exec] Command error: %q", truncate(s)) + } + + if status, ok := internal.ExitStatus(err); ok { + return fmt.Errorf("%q exited %d with %s", command, status, err.Error()) + } + + return fmt.Errorf("%q failed with %s", command, err.Error()) + } + + c.cmd = cmd + + return nil +} + +func truncate(buf bytes.Buffer) string { + // Limit the number of bytes. + didTruncate := false + if buf.Len() > maxStderrBytes { + buf.Truncate(maxStderrBytes) + didTruncate = true + } + if i := bytes.IndexByte(buf.Bytes(), '\n'); i > 0 { + // Only show truncation if the newline wasn't the last character. + if i < buf.Len()-1 { + didTruncate = true + } + buf.Truncate(i) + } + if didTruncate { + buf.WriteString("...") + } + return buf.String() +} + +func init() { + outputs.Add("exec", func() telegraf.Output { + return &Exec{ + runner: &CommandRunner{}, + Timeout: internal.Duration{Duration: time.Second * 5}, + } + }) +} diff --git a/plugins/outputs/exec/exec_test.go b/plugins/outputs/exec/exec_test.go new file mode 100644 index 000000000..850ba7328 --- /dev/null +++ b/plugins/outputs/exec/exec_test.go @@ -0,0 +1,105 @@ +package exec + +import ( + "bytes" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/serializers" + "github.com/influxdata/telegraf/testutil" +) + +func TestExec(t *testing.T) { + if testing.Short() { + t.Skip("Skipping test due to OS/executable dependencies") + } + + tests := []struct { + name string + command []string + err bool + metrics []telegraf.Metric + }{ + { + name: "test success", + command: []string{"tee"}, + err: false, + metrics: testutil.MockMetrics(), + }, + { + name: "test doesn't accept stdin", + command: []string{"sleep", "5s"}, + err: true, + metrics: testutil.MockMetrics(), + }, + { + name: "test command not found", + command: []string{"/no/exist", "-h"}, + err: true, + metrics: testutil.MockMetrics(), + }, + { + name: "test no metrics output", + command: []string{"tee"}, + err: false, + metrics: []telegraf.Metric{}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + e := &Exec{ + Command: tt.command, + Timeout: internal.Duration{Duration: time.Second}, + runner: &CommandRunner{}, + } + + s, _ := serializers.NewInfluxSerializer() + e.SetSerializer(s) + + e.Connect() + + require.Equal(t, tt.err, e.Write(tt.metrics) != nil) + }) + } +} + +func TestTruncate(t *testing.T) { + tests := []struct { + name string + buf *bytes.Buffer + len int + }{ + { + name: "long out", + buf: bytes.NewBufferString(strings.Repeat("a", maxStderrBytes+100)), + len: maxStderrBytes + len("..."), + }, + { + name: "multiline out", + buf: bytes.NewBufferString("hola\ngato\n"), + len: len("hola") + len("..."), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + s := truncate(*tt.buf) + require.Equal(t, tt.len, len(s)) + }) + } +} + +func TestExecDocs(t *testing.T) { + e := &Exec{} + e.Description() + e.SampleConfig() + require.NoError(t, e.Close()) + + e = &Exec{runner: &CommandRunner{}} + require.NoError(t, e.Close()) +} From d3cf7d669b1d223f73237c4a34a065f52c8ca735 Mon Sep 17 00:00:00 2001 From: Greg <2653109+glinton@users.noreply.github.com> Date: Tue, 20 Aug 2019 18:10:25 -0600 Subject: [PATCH 1120/1815] Add apcupsd input plugin (#6226) --- Gopkg.lock | 8 + Gopkg.toml | 4 + README.md | 1 + plugins/inputs/all/all.go | 1 + plugins/inputs/apcupsd/README.md | 45 +++++ plugins/inputs/apcupsd/apcupsd.go | 108 ++++++++++++ plugins/inputs/apcupsd/apcupsd_test.go | 227 +++++++++++++++++++++++++ 7 files changed, 394 insertions(+) create mode 100644 plugins/inputs/apcupsd/README.md create mode 100644 plugins/inputs/apcupsd/apcupsd.go create mode 100644 plugins/inputs/apcupsd/apcupsd_test.go diff --git a/Gopkg.lock b/Gopkg.lock index b884eb9b9..248d55456 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -813,6 +813,13 @@ revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c" version = "v1.0.1" +[[projects]] + digest = "1:d4a3035a03b4612c714b993891c071706a64890e55ef64bcc42bc2b461cb2756" + name = "github.com/mdlayher/apcupsd" + packages = ["."] + pruneopts = "" + revision = "2fe55d9e1d0704d3c6f03f69a1fd9ebe2aef9df1" + [[projects]] digest = "1:4c8d8358c45ba11ab7bb15df749d4df8664ff1582daead28bae58cf8cbe49890" name = "github.com/miekg/dns" @@ -1734,6 +1741,7 @@ "github.com/kballard/go-shellquote", "github.com/kubernetes/apimachinery/pkg/api/resource", "github.com/matttproud/golang_protobuf_extensions/pbutil", + "github.com/mdlayher/apcupsd", "github.com/miekg/dns", "github.com/multiplay/go-ts3", "github.com/nats-io/gnatsd/server", diff --git a/Gopkg.toml b/Gopkg.toml index 028af3487..2cc57dd71 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -220,6 +220,10 @@ source = "https://github.com/fsnotify/fsnotify/archive/v1.4.7.tar.gz" name = "gopkg.in/fsnotify.v1" +[[constraint]] + name = "github.com/mdlayher/apcupsd" + revision = "2fe55d9e1d0704d3c6f03f69a1fd9ebe2aef9df1" + [[constraint]] branch = "master" name = "google.golang.org/genproto" diff --git a/README.md b/README.md index 7e5f18954..7a9650e97 100644 --- a/README.md +++ b/README.md @@ -141,6 +141,7 @@ For documentation on the latest development code see the [documentation index][d * [amqp_consumer](./plugins/inputs/amqp_consumer) (rabbitmq) * [apache](./plugins/inputs/apache) * [aurora](./plugins/inputs/aurora) +* [apcupsd](./plugins/inputs/apcupsd) * [aws cloudwatch](./plugins/inputs/cloudwatch) * [bcache](./plugins/inputs/bcache) * [beanstalkd](./plugins/inputs/beanstalkd) diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index bd8393c0b..c3b134684 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -5,6 +5,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/aerospike" _ "github.com/influxdata/telegraf/plugins/inputs/amqp_consumer" _ "github.com/influxdata/telegraf/plugins/inputs/apache" + _ "github.com/influxdata/telegraf/plugins/inputs/apcupsd" _ "github.com/influxdata/telegraf/plugins/inputs/aurora" _ "github.com/influxdata/telegraf/plugins/inputs/bcache" _ "github.com/influxdata/telegraf/plugins/inputs/beanstalkd" diff --git a/plugins/inputs/apcupsd/README.md b/plugins/inputs/apcupsd/README.md new file mode 100644 index 000000000..52edeafe6 --- /dev/null +++ b/plugins/inputs/apcupsd/README.md @@ -0,0 +1,45 @@ +# apcupsd Input Plugin + +This plugin reads data from an apcupsd daemon over its NIS network protocol. + +### Requirements + +apcupsd should be installed and it's daemon should be running. + +### Configuration + +```toml +[[inputs.apcupsd]] + # A list of running apcupsd server to connect to. + # If not provided will default to tcp://127.0.0.1:3551 + servers = ["tcp://127.0.0.1:3551"] + + ## Timeout for dialing server. + timeout = "5s" +``` + +### Metrics + +- apcupsd + - tags: + - serial + - ups_name + - status + - fields: + - online + - input_voltage + - load_percent + - battery_charge_percent + - time_left_ns + - output_voltage + - internal_temp + - battery_voltage + - input_frequency + - time_on_battery_ns + + +### Example output + +``` +apcupsd,serial=AS1231515,ups_name=name1,host=server1 time_on_battery=0,load_percent=9.7,time_left_minutes=98,output_voltage=230.4,internal_temp=32.4,battery_voltage=27.4,input_frequency=50.2,online=true,input_voltage=230.4,battery_charge_percent=100 1490035922000000000 +``` diff --git a/plugins/inputs/apcupsd/apcupsd.go b/plugins/inputs/apcupsd/apcupsd.go new file mode 100644 index 000000000..9a73c454a --- /dev/null +++ b/plugins/inputs/apcupsd/apcupsd.go @@ -0,0 +1,108 @@ +package apcupsd + +import ( + "context" + "net/url" + "strconv" + "strings" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/inputs" + "github.com/mdlayher/apcupsd" +) + +const defaultAddress = "tcp://127.0.0.1:3551" + +var defaultTimeout = internal.Duration{Duration: time.Duration(time.Second * 5)} + +type ApcUpsd struct { + Servers []string + Timeout internal.Duration +} + +func (*ApcUpsd) Description() string { + return "Monitor APC UPSes connected to apcupsd" +} + +var sampleConfig = ` + # A list of running apcupsd server to connect to. + # If not provided will default to tcp://127.0.0.1:3551 + servers = ["tcp://127.0.0.1:3551"] + + ## Timeout for dialing server. + timeout = "5s" +` + +func (*ApcUpsd) SampleConfig() string { + return sampleConfig +} + +func (h *ApcUpsd) Gather(acc telegraf.Accumulator) error { + ctx := context.Background() + + for _, addr := range h.Servers { + addrBits, err := url.Parse(addr) + if err != nil { + return err + } + if addrBits.Scheme == "" { + addrBits.Scheme = "tcp" + } + + ctx, cancel := context.WithTimeout(ctx, h.Timeout.Duration) + defer cancel() + + status, err := fetchStatus(ctx, addrBits) + if err != nil { + return err + } + + tags := map[string]string{ + "serial": status.SerialNumber, + "ups_name": status.UPSName, + "status": status.Status, + } + + flags, err := strconv.ParseUint(strings.Fields(status.StatusFlags)[0], 0, 64) + if err != nil { + return err + } + + fields := map[string]interface{}{ + "status_flags": flags, + "input_voltage": status.LineVoltage, + "load_percent": status.LoadPercent, + "battery_charge_percent": status.BatteryChargePercent, + "time_left_ns": status.TimeLeft.Nanoseconds(), + "output_voltage": status.OutputVoltage, + "internal_temp": status.InternalTemp, + "battery_voltage": status.BatteryVoltage, + "input_frequency": status.LineFrequency, + "time_on_battery_ns": status.TimeOnBattery.Nanoseconds(), + } + + acc.AddFields("apcupsd", fields, tags) + } + return nil +} + +func fetchStatus(ctx context.Context, addr *url.URL) (*apcupsd.Status, error) { + client, err := apcupsd.DialContext(ctx, addr.Scheme, addr.Host) + if err != nil { + return nil, err + } + defer client.Close() + + return client.Status() +} + +func init() { + inputs.Add("apcupsd", func() telegraf.Input { + return &ApcUpsd{ + Servers: []string{defaultAddress}, + Timeout: defaultTimeout, + } + }) +} diff --git a/plugins/inputs/apcupsd/apcupsd_test.go b/plugins/inputs/apcupsd/apcupsd_test.go new file mode 100644 index 000000000..2418faf85 --- /dev/null +++ b/plugins/inputs/apcupsd/apcupsd_test.go @@ -0,0 +1,227 @@ +// +build go1.11 + +package apcupsd + +import ( + "context" + "encoding/binary" + "net" + "testing" + "time" + + "github.com/influxdata/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +func TestApcupsdDocs(t *testing.T) { + apc := &ApcUpsd{} + apc.Description() + apc.SampleConfig() +} + +func TestApcupsdInit(t *testing.T) { + input, ok := inputs.Inputs["apcupsd"] + if !ok { + t.Fatal("Input not defined") + } + + _ = input().(*ApcUpsd) +} + +func listen(ctx context.Context, t *testing.T, out [][]byte) (string, error) { + lc := net.ListenConfig{} + ln, err := lc.Listen(ctx, "tcp4", "127.0.0.1:0") + if err != nil { + return "", err + } + + go func() { + for ctx.Err() == nil { + defer ln.Close() + + conn, err := ln.Accept() + if err != nil { + continue + } + defer conn.Close() + conn.SetReadDeadline(time.Now().Add(time.Second)) + + in := make([]byte, 128) + n, err := conn.Read(in) + require.NoError(t, err, "failed to read from connection") + + status := []byte{0, 6, 's', 't', 'a', 't', 'u', 's'} + want, got := status, in[:n] + require.Equal(t, want, got) + + // Run against test function and append EOF to end of output bytes + out = append(out, []byte{0, 0}) + + for _, o := range out { + _, err := conn.Write(o) + require.NoError(t, err, "failed to write to connection") + } + } + }() + + return ln.Addr().String(), nil +} + +func TestConfig(t *testing.T) { + apc := &ApcUpsd{Timeout: defaultTimeout} + + var ( + tests = []struct { + name string + servers []string + err bool + }{ + { + name: "test listen address no scheme", + servers: []string{"127.0.0.1:1234"}, + err: true, + }, + { + name: "test no port", + servers: []string{"127.0.0.3"}, + err: true, + }, + } + + acc testutil.Accumulator + ) + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + apc.Servers = tt.servers + + err := apc.Gather(&acc) + if tt.err { + require.Error(t, err) + } else { + require.NoError(t, err) + } + }) + } + +} + +func TestApcupsdGather(t *testing.T) { + apc := &ApcUpsd{Timeout: defaultTimeout} + + var ( + tests = []struct { + name string + err bool + tags map[string]string + fields map[string]interface{} + out func() [][]byte + }{ + { + name: "test listening server with output", + err: false, + tags: map[string]string{ + "serial": "ABC123", + "status": "ONLINE", + "ups_name": "BERTHA", + }, + fields: map[string]interface{}{ + "status_flags": uint64(8), + "battery_charge_percent": float64(0), + "battery_voltage": float64(0), + "input_frequency": float64(0), + "input_voltage": float64(0), + "internal_temp": float64(0), + "load_percent": float64(13), + "output_voltage": float64(0), + "time_left_ns": int64(2790000000000), + "time_on_battery_ns": int64(0), + }, + out: genOutput, + }, + { + name: "test with bad output", + err: true, + out: genBadOutput, + }, + } + + acc testutil.Accumulator + ) + + for _, tt := range tests { + + t.Run(tt.name, func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + + lAddr, err := listen(ctx, t, tt.out()) + if err != nil { + t.Fatal(err) + } + + apc.Servers = []string{"tcp://" + lAddr} + + err = apc.Gather(&acc) + if tt.err { + require.Error(t, err) + } else { + require.NoError(t, err) + acc.AssertContainsTaggedFields(t, "apcupsd", tt.fields, tt.tags) + } + cancel() + }) + } +} + +// The following functionality is straight from apcupsd tests. + +// kvBytes is a helper to generate length and key/value byte buffers. +func kvBytes(kv string) ([]byte, []byte) { + lenb := make([]byte, 2) + binary.BigEndian.PutUint16(lenb, uint16(len(kv))) + + return lenb, []byte(kv) +} + +func genOutput() [][]byte { + kvs := []string{ + "SERIALNO : ABC123", + "STATUS : ONLINE", + "STATFLAG : 0x08 Status Flag", + "UPSNAME : BERTHA", + "DATE : 2016-09-06 22:13:28 -0400", + "HOSTNAME : example", + "LOADPCT : 13.0 Percent Load Capacity", + "BATTDATE : 2016-09-06", + "TIMELEFT : 46.5 Minutes", + "TONBATT : 0 seconds", + "NUMXFERS : 0", + "SELFTEST : NO", + "NOMPOWER : 865 Watts", + } + + var out [][]byte + for _, kv := range kvs { + lenb, kvb := kvBytes(kv) + out = append(out, lenb) + out = append(out, kvb) + } + + return out +} + +func genBadOutput() [][]byte { + kvs := []string{ + "STATFLAG : 0x08Status Flag", + } + + var out [][]byte + for _, kv := range kvs { + lenb, kvb := kvBytes(kv) + out = append(out, lenb) + out = append(out, kvb) + } + + return out +} From ac66838f9fa58cc5b9632c1614db6f49c2fa4515 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 20 Aug 2019 17:12:21 -0700 Subject: [PATCH 1121/1815] Update win_perf_counters sample config (#6286) Remove extra percent from win_perf_counters sample config and have it mirror the config provided in the Windows package. --- etc/telegraf_windows.conf | 3 +- .../win_perf_counters/win_perf_counters.go | 81 ++++++++++++++++--- 2 files changed, 73 insertions(+), 11 deletions(-) diff --git a/etc/telegraf_windows.conf b/etc/telegraf_windows.conf index 5befa7bf8..0d72e79e8 100644 --- a/etc/telegraf_windows.conf +++ b/etc/telegraf_windows.conf @@ -244,8 +244,8 @@ "% Disk Time", "% Disk Read Time", "% Disk Write Time", - "Current Disk Queue Length", "% Free Space", + "Current Disk Queue Length", "Free Megabytes", ] Measurement = "win_disk" @@ -311,7 +311,6 @@ "Standby Cache Reserve Bytes", "Standby Cache Normal Priority Bytes", "Standby Cache Core Bytes", - ] # Use 6 x - to remove the Instance bit from the query. Instances = ["------"] diff --git a/plugins/inputs/win_perf_counters/win_perf_counters.go b/plugins/inputs/win_perf_counters/win_perf_counters.go index 2bf50e5cc..f858ba6e7 100644 --- a/plugins/inputs/win_perf_counters/win_perf_counters.go +++ b/plugins/inputs/win_perf_counters/win_perf_counters.go @@ -36,9 +36,12 @@ var sampleConfig = ` ObjectName = "Processor" Instances = ["*"] Counters = [ - "%% Idle Time", "%% Interrupt Time", - "%% Privileged Time", "%% User Time", - "%% Processor Time" + "% Idle Time", + "% Interrupt Time", + "% Privileged Time", + "% User Time", + "% Processor Time", + "% DPC Time", ] Measurement = "win_cpu" # Set to true to include _Total instance when querying for all (*). @@ -51,14 +54,56 @@ var sampleConfig = ` ObjectName = "LogicalDisk" Instances = ["*"] Counters = [ - "%% Idle Time", "%% Disk Time","%% Disk Read Time", - "%% Disk Write Time", "%% User Time", "Current Disk Queue Length" + "% Idle Time", + "% Disk Time", + "% Disk Read Time", + "% Disk Write Time", + "% User Time", + "% Free Space", + "Current Disk Queue Length", + "Free Megabytes", ] Measurement = "win_disk" + [[inputs.win_perf_counters.object]] + ObjectName = "PhysicalDisk" + Instances = ["*"] + Counters = [ + "Disk Read Bytes/sec", + "Disk Write Bytes/sec", + "Current Disk Queue Length", + "Disk Reads/sec", + "Disk Writes/sec", + "% Disk Time", + "% Disk Read Time", + "% Disk Write Time", + ] + Measurement = "win_diskio" + + [[inputs.win_perf_counters.object]] + ObjectName = "Network Interface" + Instances = ["*"] + Counters = [ + "Bytes Received/sec", + "Bytes Sent/sec", + "Packets Received/sec", + "Packets Sent/sec", + "Packets Received Discarded", + "Packets Outbound Discarded", + "Packets Received Errors", + "Packets Outbound Errors", + ] + Measurement = "win_net" + + [[inputs.win_perf_counters.object]] ObjectName = "System" - Counters = ["Context Switches/sec","System Calls/sec"] + Counters = [ + "Context Switches/sec", + "System Calls/sec", + "Processor Queue Length", + "System Up Time", + ] Instances = ["------"] Measurement = "win_system" @@ -67,12 +112,30 @@ var sampleConfig = ` # such as from the Memory object. ObjectName = "Memory" Counters = [ - "Available Bytes", "Cache Faults/sec", "Demand Zero Faults/sec", - "Page Faults/sec", "Pages/sec", "Transition Faults/sec", - "Pool Nonpaged Bytes", "Pool Paged Bytes" + "Available Bytes", + "Cache Faults/sec", + "Demand Zero Faults/sec", + "Page Faults/sec", + "Pages/sec", + "Transition Faults/sec", + "Pool Nonpaged Bytes", + "Pool Paged Bytes", + "Standby Cache Reserve Bytes", + "Standby Cache Normal Priority Bytes", + "Standby Cache Core Bytes", ] Instances = ["------"] # Use 6 x - to remove the Instance bit from the counterPath. Measurement = "win_mem" + + [[inputs.win_perf_counters.object]] + # Example query where the Instance portion must be removed to get data back, + # such as from the Paging File object. + ObjectName = "Paging File" + Counters = [ + "% Usage", + ] + Instances = ["_Total"] + Measurement = "win_swap" ` type Win_PerfCounters struct { From 139fcc5805bf787fbe8409b85a001b856fbf82ad Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 21 Aug 2019 10:04:43 -0700 Subject: [PATCH 1122/1815] Update changelog and dependency licenses --- CHANGELOG.md | 13 +++++++++---- docs/LICENSE_OF_DEPENDENCIES.md | 1 + 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 470bc16d3..29e803787 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,10 +9,11 @@ #### New Inputs -- [docker_log](/plugins/inputs/docker_log) - Contributed by @prashanthjbabu -- [fireboard](/plugins/inputs/fireboard) - Contributed by @ronnocol -- [openntpd](/plugins/inputs/openntpd) - Contributed by @aromeyer -- [uwsgi](/plugins/inputs/uswgi) - Contributed by @blaggacao +- [apcupsd](/plugins/inputs/apcupsd/README.md) - Contributed by @jonaz +- [docker_log](/plugins/inputs/docker_log/README.md) - Contributed by @prashanthjbabu +- [fireboard](/plugins/inputs/fireboard/README.md) - Contributed by @ronnocol +- [openntpd](/plugins/inputs/openntpd/README.md) - Contributed by @aromeyer +- [uwsgi](/plugins/inputs/uswgi/README.md) - Contributed by @blaggacao #### New Parsers @@ -25,6 +26,10 @@ - [tag_limit](/plugins/processors/tag_limit/README.md) - Contributed by @memory - [unpivot](/plugins/processors/unpivot/README.md) - Contributed by @influxdata +#### New Outputs + +- [exec](/plugins/outputs/exec/README.md) - Contributed by @Jaeyo + #### Features - [#5842](https://github.com/influxdata/telegraf/pull/5842): Improve performance of wavefront serializer. diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index 755fbbbae..81ecaac81 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -69,6 +69,7 @@ following works: - github.com/leodido/ragel-machinery [MIT License](https://github.com/leodido/ragel-machinery/blob/develop/LICENSE) - github.com/mailru/easyjson [MIT License](https://github.com/mailru/easyjson/blob/master/LICENSE) - github.com/matttproud/golang_protobuf_extensions [Apache License 2.0](https://github.com/matttproud/golang_protobuf_extensions/blob/master/LICENSE) +- github.com/mdlayher/apcupsd [MIT License](https://github.com/mdlayher/apcupsd/blob/master/LICENSE.md) - github.com/Microsoft/ApplicationInsights-Go [MIT License](https://github.com/Microsoft/ApplicationInsights-Go/blob/master/LICENSE) - github.com/Microsoft/go-winio [MIT License](https://github.com/Microsoft/go-winio/blob/master/LICENSE) - github.com/miekg/dns [BSD 3-Clause Clear License](https://github.com/miekg/dns/blob/master/LICENSE) From 10671d2641f084d3ed96fd28fa76091bd9981878 Mon Sep 17 00:00:00 2001 From: Greg <2653109+glinton@users.noreply.github.com> Date: Wed, 21 Aug 2019 12:13:38 -0600 Subject: [PATCH 1123/1815] Stop timer when command exits in WaitTimeout (#6296) --- internal/internal.go | 1 + internal/internal_test.go | 25 +++++++++++++++++++++++++ 2 files changed, 26 insertions(+) diff --git a/internal/internal.go b/internal/internal.go index 6f135938a..893f34383 100644 --- a/internal/internal.go +++ b/internal/internal.go @@ -237,6 +237,7 @@ func WaitTimeout(c *exec.Cmd, timeout time.Duration) error { err := c.Wait() if err == nil { + timer.Stop() return nil } diff --git a/internal/internal_test.go b/internal/internal_test.go index da2fe01c5..5e9b9a97c 100644 --- a/internal/internal_test.go +++ b/internal/internal_test.go @@ -4,6 +4,7 @@ import ( "bytes" "compress/gzip" "io/ioutil" + "log" "os/exec" "testing" "time" @@ -64,6 +65,30 @@ func TestRunTimeout(t *testing.T) { assert.True(t, elapsed < time.Millisecond*75) } +// Verifies behavior of a command that doesn't get killed. +func TestRunTimeoutFastExit(t *testing.T) { + if testing.Short() { + t.Skip("Skipping test due to random failures.") + } + if echobin == "" { + t.Skip("'echo' binary not available on OS, skipping.") + } + cmd := exec.Command(echobin) + start := time.Now() + err := RunTimeout(cmd, time.Millisecond*20) + buf := &bytes.Buffer{} + log.SetOutput(buf) + elapsed := time.Since(start) + + require.NoError(t, err) + // Verify that command gets killed in 20ms, with some breathing room + assert.True(t, elapsed < time.Millisecond*75) + + // Verify "process already finished" log doesn't occur. + time.Sleep(time.Millisecond * 75) + require.Equal(t, "", buf.String()) +} + func TestCombinedOutputTimeout(t *testing.T) { // TODO: Fix this test t.Skip("Test failing too often, skip for now and revisit later.") From 8c2b3addd32af7c6478c32dd63fb5411d492ae60 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 21 Aug 2019 16:30:55 -0700 Subject: [PATCH 1124/1815] Fix parsing multiple metrics on the first line of tailed file (#6289) --- plugins/inputs/tail/tail.go | 75 ++++++++++--------- plugins/inputs/tail/tail_test.go | 119 ++++++++++++++++++++++++++++++- 2 files changed, 160 insertions(+), 34 deletions(-) diff --git a/plugins/inputs/tail/tail.go b/plugins/inputs/tail/tail.go index 245010764..da5b81a60 100644 --- a/plugins/inputs/tail/tail.go +++ b/plugins/inputs/tail/tail.go @@ -9,11 +9,11 @@ import ( "sync" "github.com/influxdata/tail" - "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal/globpath" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/parsers" + "github.com/influxdata/telegraf/plugins/parsers/csv" ) const ( @@ -172,55 +172,64 @@ func (t *Tail) tailNewFiles(fromBeginning bool) error { // create a goroutine for each "tailer" t.wg.Add(1) - go t.receiver(parser, tailer) + go func() { + defer t.wg.Done() + t.receiver(parser, tailer) + }() t.tailers[tailer.Filename] = tailer } } return nil } -// this is launched as a goroutine to continuously watch a tailed logfile +// ParseLine parses a line of text. +func parseLine(parser parsers.Parser, line string, firstLine bool) ([]telegraf.Metric, error) { + switch parser.(type) { + case *csv.Parser: + // The csv parser parses headers in Parse and skips them in ParseLine. + // As a temporary solution call Parse only when getting the first + // line from the file. + if firstLine { + return parser.Parse([]byte(line)) + } else { + m, err := parser.ParseLine(line) + if err != nil { + return nil, err + } + + if m != nil { + return []telegraf.Metric{m}, nil + } + return []telegraf.Metric{}, nil + } + default: + return parser.Parse([]byte(line)) + } +} + +// Receiver is launched as a goroutine to continuously watch a tailed logfile // for changes, parse any incoming msgs, and add to the accumulator. func (t *Tail) receiver(parser parsers.Parser, tailer *tail.Tail) { - defer t.wg.Done() - var firstLine = true - var metrics []telegraf.Metric - var m telegraf.Metric - var err error - var line *tail.Line - for line = range tailer.Lines { + for line := range tailer.Lines { if line.Err != nil { - t.acc.AddError(fmt.Errorf("error tailing file %s, Error: %s", tailer.Filename, err)) + t.acc.AddError(fmt.Errorf("error tailing file %s, Error: %s", tailer.Filename, line.Err)) continue } // Fix up files with Windows line endings. text := strings.TrimRight(line.Text, "\r") - if firstLine { - metrics, err = parser.Parse([]byte(text)) - if err == nil { - if len(metrics) == 0 { - firstLine = false - continue - } else { - m = metrics[0] - } - } - firstLine = false - } else { - m, err = parser.ParseLine(text) - } - - if err == nil { - if m != nil { - tags := m.Tags() - tags["path"] = tailer.Filename - t.acc.AddFields(m.Name(), m.Fields(), tags, m.Time()) - } - } else { + metrics, err := parseLine(parser, text, firstLine) + if err != nil { t.acc.AddError(fmt.Errorf("malformed log line in %s: [%s], Error: %s", tailer.Filename, line.Text, err)) + continue + } + firstLine = false + + for _, metric := range metrics { + metric.AddTag("path", tailer.Filename) + t.acc.AddMetric(metric) } } diff --git a/plugins/inputs/tail/tail_test.go b/plugins/inputs/tail/tail_test.go index fb5e05a76..41db76cac 100644 --- a/plugins/inputs/tail/tail_test.go +++ b/plugins/inputs/tail/tail_test.go @@ -5,10 +5,13 @@ import ( "os" "runtime" "testing" + "time" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/parsers" + "github.com/influxdata/telegraf/plugins/parsers/csv" + "github.com/influxdata/telegraf/plugins/parsers/json" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -139,3 +142,117 @@ func TestTailDosLineendings(t *testing.T) { "usage_idle": float64(200), }) } + +// The csv parser should only parse the header line once per file. +func TestCSVHeadersParsedOnce(t *testing.T) { + tmpfile, err := ioutil.TempFile("", "") + require.NoError(t, err) + defer func() { + tmpfile.Close() + os.Remove(tmpfile.Name()) + }() + + _, err = tmpfile.WriteString(` +measurement,time_idle +cpu,42 +cpu,42 +`) + require.NoError(t, err) + + plugin := NewTail() + plugin.FromBeginning = true + plugin.Files = []string{tmpfile.Name()} + plugin.SetParserFunc(func() (parsers.Parser, error) { + return &csv.Parser{ + MeasurementColumn: "measurement", + HeaderRowCount: 1, + TimeFunc: func() time.Time { return time.Unix(0, 0) }, + }, nil + }) + defer plugin.Stop() + + acc := testutil.Accumulator{} + err = plugin.Start(&acc) + require.NoError(t, err) + err = plugin.Gather(&acc) + require.NoError(t, err) + acc.Wait(2) + plugin.Stop() + + expected := []telegraf.Metric{ + testutil.MustMetric("cpu", + map[string]string{ + "path": tmpfile.Name(), + }, + map[string]interface{}{ + "time_idle": 42, + "measurement": "cpu", + }, + time.Unix(0, 0)), + testutil.MustMetric("cpu", + map[string]string{ + "path": tmpfile.Name(), + }, + map[string]interface{}{ + "time_idle": 42, + "measurement": "cpu", + }, + time.Unix(0, 0)), + } + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics()) +} + +// Ensure that the first line can produce multiple metrics (#6138) +func TestMultipleMetricsOnFirstLine(t *testing.T) { + tmpfile, err := ioutil.TempFile("", "") + require.NoError(t, err) + defer func() { + tmpfile.Close() + os.Remove(tmpfile.Name()) + }() + + _, err = tmpfile.WriteString(` +[{"time_idle": 42}, {"time_idle": 42}] +`) + require.NoError(t, err) + + plugin := NewTail() + plugin.FromBeginning = true + plugin.Files = []string{tmpfile.Name()} + plugin.SetParserFunc(func() (parsers.Parser, error) { + return json.New( + &json.Config{ + MetricName: "cpu", + }) + }) + defer plugin.Stop() + + acc := testutil.Accumulator{} + err = plugin.Start(&acc) + require.NoError(t, err) + err = plugin.Gather(&acc) + require.NoError(t, err) + acc.Wait(2) + plugin.Stop() + + expected := []telegraf.Metric{ + testutil.MustMetric("cpu", + map[string]string{ + "path": tmpfile.Name(), + }, + map[string]interface{}{ + "time_idle": 42.0, + }, + time.Unix(0, 0)), + testutil.MustMetric("cpu", + map[string]string{ + "path": tmpfile.Name(), + }, + map[string]interface{}{ + "time_idle": 42.0, + }, + time.Unix(0, 0)), + } + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), + testutil.IgnoreTime()) +} From 02174031c8fae33b57a88bffdc67758db8bd01bc Mon Sep 17 00:00:00 2001 From: Dmitry Ilyin Date: Thu, 22 Aug 2019 02:34:28 +0300 Subject: [PATCH 1125/1815] Add logstash input plugin (#4910) --- .gitignore | 5 + plugins/inputs/all/all.go | 1 + plugins/inputs/logstash/README.md | 189 +++++ plugins/inputs/logstash/logstash.go | 506 +++++++++++++ plugins/inputs/logstash/logstash_test.go | 726 +++++++++++++++++++ plugins/inputs/logstash/samples_logstash5.go | 156 ++++ plugins/inputs/logstash/samples_logstash6.go | 256 +++++++ 7 files changed, 1839 insertions(+) create mode 100644 plugins/inputs/logstash/README.md create mode 100644 plugins/inputs/logstash/logstash.go create mode 100644 plugins/inputs/logstash/logstash_test.go create mode 100644 plugins/inputs/logstash/samples_logstash5.go create mode 100644 plugins/inputs/logstash/samples_logstash6.go diff --git a/.gitignore b/.gitignore index 4176a0413..9e012aabd 100644 --- a/.gitignore +++ b/.gitignore @@ -1,5 +1,10 @@ +# Build and binaries /build /telegraf /telegraf.exe /telegraf.gz /vendor + +# Editor files +*~ +.idea diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index c3b134684..13b70d5ca 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -78,6 +78,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/leofs" _ "github.com/influxdata/telegraf/plugins/inputs/linux_sysctl_fs" _ "github.com/influxdata/telegraf/plugins/inputs/logparser" + _ "github.com/influxdata/telegraf/plugins/inputs/logstash" _ "github.com/influxdata/telegraf/plugins/inputs/lustre2" _ "github.com/influxdata/telegraf/plugins/inputs/mailchimp" _ "github.com/influxdata/telegraf/plugins/inputs/mcrouter" diff --git a/plugins/inputs/logstash/README.md b/plugins/inputs/logstash/README.md new file mode 100644 index 000000000..f54697c39 --- /dev/null +++ b/plugins/inputs/logstash/README.md @@ -0,0 +1,189 @@ +# Logstash Input Plugin + +This plugin reads metrics exposed by +[Logstash Monitoring API](https://www.elastic.co/guide/en/logstash/current/monitoring-logstash.html). + +### Configuration: + +```toml + ## This plugin reads metrics exposed by Logstash Monitoring API. + ## https://www.elastic.co/guide/en/logstash/current/monitoring.html + + ## The URL of the exposed Logstash API endpoint + url = "http://127.0.0.1:9600" + + ## Enable Logstash 6+ multi-pipeline statistics support + multi_pipeline = true + + ## Should the general process statistics be gathered + collect_process_stats = true + + ## Should the JVM specific statistics be gathered + collect_jvm_stats = true + + ## Should the event pipelines statistics be gathered + collect_pipelines_stats = true + + ## Should the plugin statistics be gathered + collect_plugins_stats = true + + ## Should the queue statistics be gathered + collect_queue_stats = true + + ## HTTP method + # method = "GET" + + ## Optional HTTP headers + # headers = {"X-Special-Header" = "Special-Value"} + + ## Override HTTP "Host" header + # host_header = "logstash.example.com" + + ## Timeout for HTTP requests + timeout = "5s" + + ## Optional HTTP Basic Auth credentials + # username = "username" + # password = "pa$$word" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false +``` + +### Measurements & Fields: + +- **logstash_jvm** + * Fields: + - threads_peak_count + - mem_pools_survivor_peak_max_in_bytes + - mem_pools_survivor_max_in_bytes + - mem_pools_old_peak_used_in_bytes + - mem_pools_young_used_in_bytes + - mem_non_heap_committed_in_bytes + - threads_count + - mem_pools_old_committed_in_bytes + - mem_pools_young_peak_max_in_bytes + - mem_heap_used_percent + - gc_collectors_young_collection_time_in_millis + - mem_pools_survivor_peak_used_in_bytes + - mem_pools_young_committed_in_bytes + - gc_collectors_old_collection_time_in_millis + - gc_collectors_old_collection_count + - mem_pools_survivor_used_in_bytes + - mem_pools_old_used_in_bytes + - mem_pools_young_max_in_bytes + - mem_heap_max_in_bytes + - mem_non_heap_used_in_bytes + - mem_pools_survivor_committed_in_bytes + - mem_pools_old_max_in_bytes + - mem_heap_committed_in_bytes + - mem_pools_old_peak_max_in_bytes + - mem_pools_young_peak_used_in_bytes + - mem_heap_used_in_bytes + - gc_collectors_young_collection_count + - uptime_in_millis + * Tags: + - node_id + - node_name + - node_host + - node_version + +- **logstash_process** + * Fields: + - open_file_descriptors + - cpu_load_average_1m + - cpu_load_average_5m + - cpu_load_average_15m + - cpu_total_in_millis + - cpu_percent + - peak_open_file_descriptors + - max_file_descriptors + - mem_total_virtual_in_bytes + - mem_total_virtual_in_bytes + * Tags: + - node_id + - node_name + - node_host + - node_version + +- **logstash_events** + * Fields: + - queue_push_duration_in_millis + - duration_in_millis + - in + - filtered + - out + * Tags: + - node_id + - node_name + - node_host + - node_version + - pipeline (for Logstash 6 only) + +- **logstash_plugins** + * Fields: + - queue_push_duration_in_millis (for input plugins only) + - duration_in_millis + - in + - out + * Tags: + - node_id + - node_name + - node_host + - node_version + - pipeline (for Logstash 6 only) + - plugin_id + - plugin_name + - plugin_type + +- **logstash_queue** + * Fields: + - events + - free_space_in_bytes + - max_queue_size_in_bytes + - max_unread_events + - page_capacity_in_bytes + - queue_size_in_bytes + * Tags: + - node_id + - node_name + - node_host + - node_version + - pipeline (for Logstash 6 only) + - queue_type + +### Tags description + +- node_id - The uuid of the logstash node. Randomly generated. +- node_name - The name of the logstash node. Can be defined in the *logstash.yml* or defaults to the hostname. + Can be used to break apart metrics from different logstash instances of the same host. +- node_host - The hostname of the logstash node. + Can be different from the telegraf's host if a remote connection to logstash instance is used. +- node_version - The version of logstash service running on this node. +- pipeline (for Logstash 6 only) - The name of a pipeline if multi-pipeline is configured. + Will defaults to "main" if there is only one pipeline and will be missing for logstash 5. +- plugin_id - The unique id of this plugin. + It will be a randomly generated string unless it's defined in the logstash pipeline config file. +- plugin_name - The name of this plugin. i.e. file, elasticsearch, date, mangle. +- plugin_type - The type of this plugin i.e. input/filter/output. +- queue_type - The type of the event queue (memory/persisted). + +### Example Output: + +``` +$ ./telegraf -config telegraf.conf -input-filter logstash -test + +> logstash_jvm,host=node-6,node_host=node-6,node_id=3044f675-21ce-4335-898a-8408aa678245,node_name=node-6-test,node_version=6.4.2 + gc_collectors_old_collection_count=5,gc_collectors_old_collection_time_in_millis=702,gc_collectors_young_collection_count=95,gc_collectors_young_collection_time_in_millis=4772,mem_heap_committed_in_bytes=360804352,mem_heap_max_in_bytes=8389328896,mem_heap_used_in_bytes=252629768,mem_heap_used_percent=3,mem_non_heap_committed_in_bytes=212144128,mem_non_heap_used_in_bytes=188726024,mem_pools_old_committed_in_bytes=280260608,mem_pools_old_max_in_bytes=6583418880,mem_pools_old_peak_max_in_bytes=6583418880,mem_pools_old_peak_used_in_bytes=235352976,mem_pools_old_used_in_bytes=194754608,mem_pools_survivor_committed_in_bytes=8912896,mem_pools_survivor_max_in_bytes=200605696,mem_pools_survivor_peak_max_in_bytes=200605696,mem_pools_survivor_peak_used_in_bytes=8912896,mem_pools_survivor_used_in_bytes=4476680,mem_pools_young_committed_in_bytes=71630848,mem_pools_young_max_in_bytes=1605304320,mem_pools_young_peak_max_in_bytes=1605304320,mem_pools_young_peak_used_in_bytes=71630848,mem_pools_young_used_in_bytes=53398480,threads_count=60,threads_peak_count=62,uptime_in_millis=10469014 1540289864000000000 +> logstash_process,host=node-6,node_host=node-6,node_id=3044f675-21ce-4335-898a-8408aa678245,node_name=node-6-test,node_version=6.4.2 + cpu_load_average_15m=39.84,cpu_load_average_1m=32.87,cpu_load_average_5m=39.23,cpu_percent=0,cpu_total_in_millis=389920,max_file_descriptors=262144,mem_total_virtual_in_bytes=17921355776,open_file_descriptors=132,peak_open_file_descriptors=140 1540289864000000000 +> logstash_events,host=node-6,node_host=node-6,node_id=3044f675-21ce-4335-898a-8408aa678245,node_name=node-6-test,node_version=6.4.2,pipeline=main + duration_in_millis=175144,filtered=4543,in=4543,out=4543,queue_push_duration_in_millis=19 1540289864000000000 +> logstash_plugins,host=node-6,node_host=node-6,node_id=3044f675-21ce-4335-898a-8408aa678245,node_name=node-6-test,node_version=6.4.2,pipeline=main,plugin_id=input-kafka,plugin_name=kafka,plugin_type=input + duration_in_millis=0,in=0,out=4543,queue_push_duration_in_millis=19 1540289864000000000 +``` diff --git a/plugins/inputs/logstash/logstash.go b/plugins/inputs/logstash/logstash.go new file mode 100644 index 000000000..ba25fafd5 --- /dev/null +++ b/plugins/inputs/logstash/logstash.go @@ -0,0 +1,506 @@ +package logstash + +import ( + "encoding/json" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/internal/tls" + "github.com/influxdata/telegraf/plugins/inputs" + "net/http" + "net/url" + "time" + + jsonParser "github.com/influxdata/telegraf/plugins/parsers/json" +) + +// ##### Interface ##### + +const sampleConfig = ` + ## This plugin reads metrics exposed by Logstash Monitoring API. + ## https://www.elastic.co/guide/en/logstash/current/monitoring.html + + ## The URL of the exposed Logstash API endpoint + url = "http://127.0.0.1:9600" + + ## Enable Logstash 6+ multi-pipeline statistics support + multi_pipeline = true + + ## Should the general process statistics be gathered + collect_process_stats = true + + ## Should the JVM specific statistics be gathered + collect_jvm_stats = true + + ## Should the event pipelines statistics be gathered + collect_pipelines_stats = true + + ## Should the plugin statistics be gathered + collect_plugins_stats = true + + ## Should the queue statistics be gathered + collect_queue_stats = true + + ## HTTP method + # method = "GET" + + ## Optional HTTP headers + # headers = {"X-Special-Header" = "Special-Value"} + + ## Override HTTP "Host" header + # host_header = "logstash.example.com" + + ## Timeout for HTTP requests + timeout = "5s" + + ## Optional HTTP Basic Auth credentials + # username = "username" + # password = "pa$$word" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false +` + +type Logstash struct { + URL string `toml:"url"` + + MultiPipeline bool `toml:"multi_pipeline"` + CollectProcessStats bool `toml:"collect_process_stats"` + CollectJVMStats bool `toml:"collect_jvm_stats"` + CollectPipelinesStats bool `toml:"collect_pipelines_stats"` + CollectPluginsStats bool `toml:"collect_plugins_stats"` + CollectQueueStats bool `toml:"collect_queue_stats"` + + Username string `toml:"username"` + Password string `toml:"password"` + Method string `toml:"method"` + Headers map[string]string `toml:"headers"` + HostHeader string `toml:"host_header"` + Timeout internal.Duration `toml:"timeout"` + + tls.ClientConfig + client *http.Client +} + +// NewLogstash create an instance of the plugin with default settings +func NewLogstash() *Logstash { + return &Logstash{ + URL: "http://127.0.0.1:9600", + MultiPipeline: true, + CollectProcessStats: true, + CollectJVMStats: true, + CollectPipelinesStats: true, + CollectPluginsStats: true, + CollectQueueStats: true, + Method: "GET", + Headers: make(map[string]string), + HostHeader: "", + Timeout: internal.Duration{Duration: time.Second * 5}, + } +} + +// init initialise this plugin instance +func init() { + inputs.Add("logstash", func() telegraf.Input { + return NewLogstash() + }) +} + +// Description returns short info about plugin +func (logstash *Logstash) Description() string { + return "Read metrics exposed by Logstash" +} + +// SampleConfig returns details how to configure plugin +func (logstash *Logstash) SampleConfig() string { + return sampleConfig +} + +type ProcessStats struct { + ID string `json:"id"` + Process interface{} `json:"process"` + Name string `json:"name"` + Host string `json:"host"` + Version string `json:"version"` +} + +type JVMStats struct { + ID string `json:"id"` + JVM interface{} `json:"jvm"` + Name string `json:"name"` + Host string `json:"host"` + Version string `json:"version"` +} + +type PipelinesStats struct { + ID string `json:"id"` + Pipelines map[string]Pipeline `json:"pipelines"` + Name string `json:"name"` + Host string `json:"host"` + Version string `json:"version"` +} + +type PipelineStats struct { + ID string `json:"id"` + Pipeline Pipeline `json:"pipeline"` + Name string `json:"name"` + Host string `json:"host"` + Version string `json:"version"` +} + +type Pipeline struct { + Events interface{} `json:"events"` + Plugins PipelinePlugins `json:"plugins"` + Reloads interface{} `json:"reloads"` + Queue PipelineQueue `json:"queue"` +} + +type Plugin struct { + ID string `json:"id"` + Events interface{} `json:"events"` + Name string `json:"name"` +} + +type PipelinePlugins struct { + Inputs []Plugin `json:"inputs"` + Filters []Plugin `json:"filters"` + Outputs []Plugin `json:"outputs"` +} + +type PipelineQueue struct { + Events float64 `json:"events"` + Type string `json:"type"` + Capacity interface{} `json:"capacity"` + Data interface{} `json:"data"` +} + +const jvmStats = "/_node/stats/jvm" +const processStats = "/_node/stats/process" +const pipelinesStats = "/_node/stats/pipelines" +const pipelineStats = "/_node/stats/pipeline" + +// createHttpClient create a clients to access API +func (logstash *Logstash) createHttpClient() (*http.Client, error) { + tlsConfig, err := logstash.ClientConfig.TLSConfig() + if err != nil { + return nil, err + } + + client := &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: tlsConfig, + }, + Timeout: logstash.Timeout.Duration, + } + + return client, nil +} + +// gatherJsonData query the data source and parse the response JSON +func (logstash *Logstash) gatherJsonData(url string, value interface{}) error { + + var method string + if logstash.Method != "" { + method = logstash.Method + } else { + method = "GET" + } + + request, err := http.NewRequest(method, url, nil) + if err != nil { + return err + } + + if (logstash.Username != "") || (logstash.Password != "") { + request.SetBasicAuth(logstash.Username, logstash.Password) + } + for header, value := range logstash.Headers { + request.Header.Add(header, value) + } + if logstash.HostHeader != "" { + request.Host = logstash.HostHeader + } + + response, err := logstash.client.Do(request) + if err != nil { + return err + } + + defer response.Body.Close() + + err = json.NewDecoder(response.Body).Decode(value) + if err != nil { + return err + } + + return nil +} + +// gatherJVMStats gather the JVM metrics and add results to the accumulator +func (logstash *Logstash) gatherJVMStats(url string, accumulator telegraf.Accumulator) error { + jvmStats := &JVMStats{} + + err := logstash.gatherJsonData(url, jvmStats) + if err != nil { + return err + } + + tags := map[string]string{ + "node_id": jvmStats.ID, + "node_name": jvmStats.Name, + "node_host": jvmStats.Host, + "node_version": jvmStats.Version, + } + + flattener := jsonParser.JSONFlattener{} + err = flattener.FlattenJSON("", jvmStats.JVM) + if err != nil { + return err + } + accumulator.AddFields("logstash_jvm", flattener.Fields, tags) + + return nil +} + +// gatherJVMStats gather the Process metrics and add results to the accumulator +func (logstash *Logstash) gatherProcessStats(url string, accumulator telegraf.Accumulator) error { + processStats := &ProcessStats{} + + err := logstash.gatherJsonData(url, processStats) + if err != nil { + return err + } + + tags := map[string]string{ + "node_id": processStats.ID, + "node_name": processStats.Name, + "node_host": processStats.Host, + "node_version": processStats.Version, + } + + flattener := jsonParser.JSONFlattener{} + err = flattener.FlattenJSON("", processStats.Process) + if err != nil { + return err + } + accumulator.AddFields("logstash_process", flattener.Fields, tags) + + return nil +} + +// gatherPluginsStats go through a list of plugins and add their metrics to the accumulator +func (logstash *Logstash) gatherPluginsStats( + plugins []Plugin, + pluginType string, + tags map[string]string, + accumulator telegraf.Accumulator) error { + + for _, plugin := range plugins { + pluginTags := map[string]string{ + "plugin_name": plugin.Name, + "plugin_id": plugin.ID, + "plugin_type": pluginType, + } + for tag, value := range tags { + pluginTags[tag] = value + } + flattener := jsonParser.JSONFlattener{} + err := flattener.FlattenJSON("", plugin.Events) + if err != nil { + return err + } + accumulator.AddFields("logstash_plugins", flattener.Fields, pluginTags) + } + + return nil +} + +func (logstash *Logstash) gatherQueueStats( + queue *PipelineQueue, + tags map[string]string, + accumulator telegraf.Accumulator) error { + + var err error + queueTags := map[string]string{ + "queue_type": queue.Type, + } + for tag, value := range tags { + queueTags[tag] = value + } + + queueFields := map[string]interface{}{ + "events": queue.Events, + } + + if queue.Type != "memory" { + flattener := jsonParser.JSONFlattener{} + err = flattener.FlattenJSON("", queue.Capacity) + if err != nil { + return err + } + err = flattener.FlattenJSON("", queue.Data) + if err != nil { + return err + } + for field, value := range flattener.Fields { + queueFields[field] = value + } + } + + accumulator.AddFields("logstash_queue", queueFields, queueTags) + + return nil +} + +// gatherJVMStats gather the Pipeline metrics and add results to the accumulator (for Logstash < 6) +func (logstash *Logstash) gatherPipelineStats(url string, accumulator telegraf.Accumulator) error { + pipelineStats := &PipelineStats{} + + err := logstash.gatherJsonData(url, pipelineStats) + if err != nil { + return err + } + + tags := map[string]string{ + "node_id": pipelineStats.ID, + "node_name": pipelineStats.Name, + "node_host": pipelineStats.Host, + "node_version": pipelineStats.Version, + } + + flattener := jsonParser.JSONFlattener{} + err = flattener.FlattenJSON("", pipelineStats.Pipeline.Events) + if err != nil { + return err + } + accumulator.AddFields("logstash_events", flattener.Fields, tags) + + if logstash.CollectPluginsStats { + err = logstash.gatherPluginsStats(pipelineStats.Pipeline.Plugins.Inputs, "input", tags, accumulator) + if err != nil { + return err + } + err = logstash.gatherPluginsStats(pipelineStats.Pipeline.Plugins.Filters, "filter", tags, accumulator) + if err != nil { + return err + } + err = logstash.gatherPluginsStats(pipelineStats.Pipeline.Plugins.Outputs, "output", tags, accumulator) + if err != nil { + return err + } + } + + if logstash.CollectQueueStats { + err = logstash.gatherQueueStats(&pipelineStats.Pipeline.Queue, tags, accumulator) + } + + return nil +} + +// gatherJVMStats gather the Pipelines metrics and add results to the accumulator (for Logstash >= 6) +func (logstash *Logstash) gatherPipelinesStats(url string, accumulator telegraf.Accumulator) error { + pipelinesStats := &PipelinesStats{} + + err := logstash.gatherJsonData(url, pipelinesStats) + if err != nil { + return err + } + + for pipelineName, pipeline := range pipelinesStats.Pipelines { + tags := map[string]string{ + "node_id": pipelinesStats.ID, + "node_name": pipelinesStats.Name, + "node_host": pipelinesStats.Host, + "node_version": pipelinesStats.Version, + "pipeline": pipelineName, + } + + flattener := jsonParser.JSONFlattener{} + err := flattener.FlattenJSON("", pipeline.Events) + if err != nil { + return err + } + accumulator.AddFields("logstash_events", flattener.Fields, tags) + + if logstash.CollectPluginsStats { + err = logstash.gatherPluginsStats(pipeline.Plugins.Inputs, "input", tags, accumulator) + if err != nil { + return err + } + err = logstash.gatherPluginsStats(pipeline.Plugins.Filters, "filter", tags, accumulator) + if err != nil { + return err + } + err = logstash.gatherPluginsStats(pipeline.Plugins.Outputs, "output", tags, accumulator) + if err != nil { + return err + } + } + + if logstash.CollectQueueStats { + err = logstash.gatherQueueStats(&pipeline.Queue, tags, accumulator) + } + + } + + return nil +} + +// Gather ask this plugin to start gathering metrics +func (logstash *Logstash) Gather(accumulator telegraf.Accumulator) error { + + if logstash.client == nil { + client, err := logstash.createHttpClient() + + if err != nil { + return err + } + logstash.client = client + } + + if logstash.CollectJVMStats { + jvmUrl, err := url.Parse(logstash.URL + jvmStats) + if err != nil { + return err + } + if err := logstash.gatherJVMStats(jvmUrl.String(), accumulator); err != nil { + return err + } + } + + if logstash.CollectProcessStats { + processUrl, err := url.Parse(logstash.URL + processStats) + if err != nil { + return err + } + if err := logstash.gatherProcessStats(processUrl.String(), accumulator); err != nil { + return err + } + } + + if logstash.CollectPipelinesStats { + if logstash.MultiPipeline { + pipelinesUrl, err := url.Parse(logstash.URL + pipelinesStats) + if err != nil { + return err + } + if err := logstash.gatherPipelinesStats(pipelinesUrl.String(), accumulator); err != nil { + return err + } + } else { + pipelineUrl, err := url.Parse(logstash.URL + pipelineStats) + if err != nil { + return err + } + if err := logstash.gatherPipelineStats(pipelineUrl.String(), accumulator); err != nil { + return err + } + } + } + + return nil +} diff --git a/plugins/inputs/logstash/logstash_test.go b/plugins/inputs/logstash/logstash_test.go new file mode 100644 index 000000000..c091be83c --- /dev/null +++ b/plugins/inputs/logstash/logstash_test.go @@ -0,0 +1,726 @@ +package logstash + +import ( + "fmt" + "net" + "net/http" + "net/http/httptest" + "net/url" + "testing" + + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/assert" +) + +var logstashTest = NewLogstash() + +var ( + logstash5accPipelineStats testutil.Accumulator + logstash6accPipelinesStats testutil.Accumulator + logstash5accProcessStats testutil.Accumulator + logstash6accProcessStats testutil.Accumulator + logstash5accJVMStats testutil.Accumulator + logstash6accJVMStats testutil.Accumulator +) + +func Test_Logstash5GatherProcessStats(test *testing.T) { + fakeServer := httptest.NewUnstartedServer(http.HandlerFunc(func(writer http.ResponseWriter, request *http.Request) { + writer.Header().Set("Content-Type", "application/json") + fmt.Fprintf(writer, "%s", string(logstash5ProcessJSON)) + })) + requestURL, err := url.Parse(logstashTest.URL) + if err != nil { + test.Logf("Can't connect to: %s", logstashTest.URL) + } + fakeServer.Listener, _ = net.Listen("tcp", fmt.Sprintf("%s:%s", requestURL.Hostname(), requestURL.Port())) + fakeServer.Start() + defer fakeServer.Close() + + if logstashTest.client == nil { + client, err := logstashTest.createHttpClient() + + if err != nil { + test.Logf("Can't createHttpClient") + } + logstashTest.client = client + } + + if err := logstashTest.gatherProcessStats(logstashTest.URL+processStats, &logstash5accProcessStats); err != nil { + test.Logf("Can't gather Process stats") + } + + logstash5accProcessStats.AssertContainsTaggedFields( + test, + "logstash_process", + map[string]interface{}{ + "open_file_descriptors": float64(89.0), + "max_file_descriptors": float64(1.048576e+06), + "cpu_percent": float64(3.0), + "cpu_load_average_5m": float64(0.61), + "cpu_load_average_15m": float64(0.54), + "mem_total_virtual_in_bytes": float64(4.809506816e+09), + "cpu_total_in_millis": float64(1.5526e+11), + "cpu_load_average_1m": float64(0.49), + "peak_open_file_descriptors": float64(100.0), + }, + map[string]string{ + "node_id": string("a360d8cf-6289-429d-8419-6145e324b574"), + "node_name": string("node-5-test"), + "node_host": string("node-5"), + "node_version": string("5.3.0"), + }, + ) +} + +func Test_Logstash6GatherProcessStats(test *testing.T) { + fakeServer := httptest.NewUnstartedServer(http.HandlerFunc(func(writer http.ResponseWriter, request *http.Request) { + writer.Header().Set("Content-Type", "application/json") + fmt.Fprintf(writer, "%s", string(logstash6ProcessJSON)) + })) + requestURL, err := url.Parse(logstashTest.URL) + if err != nil { + test.Logf("Can't connect to: %s", logstashTest.URL) + } + fakeServer.Listener, _ = net.Listen("tcp", fmt.Sprintf("%s:%s", requestURL.Hostname(), requestURL.Port())) + fakeServer.Start() + defer fakeServer.Close() + + if logstashTest.client == nil { + client, err := logstashTest.createHttpClient() + + if err != nil { + test.Logf("Can't createHttpClient") + } + logstashTest.client = client + } + + if err := logstashTest.gatherProcessStats(logstashTest.URL+processStats, &logstash6accProcessStats); err != nil { + test.Logf("Can't gather Process stats") + } + + logstash6accProcessStats.AssertContainsTaggedFields( + test, + "logstash_process", + map[string]interface{}{ + "open_file_descriptors": float64(133.0), + "max_file_descriptors": float64(262144.0), + "cpu_percent": float64(0.0), + "cpu_load_average_5m": float64(42.4), + "cpu_load_average_15m": float64(38.95), + "mem_total_virtual_in_bytes": float64(17923452928.0), + "cpu_total_in_millis": float64(5841460), + "cpu_load_average_1m": float64(48.2), + "peak_open_file_descriptors": float64(145.0), + }, + map[string]string{ + "node_id": string("3044f675-21ce-4335-898a-8408aa678245"), + "node_name": string("node-6-test"), + "node_host": string("node-6"), + "node_version": string("6.4.2"), + }, + ) +} + +func Test_Logstash5GatherPipelineStats(test *testing.T) { + //logstash5accPipelineStats.SetDebug(true) + fakeServer := httptest.NewUnstartedServer(http.HandlerFunc(func(writer http.ResponseWriter, request *http.Request) { + writer.Header().Set("Content-Type", "application/json") + fmt.Fprintf(writer, "%s", string(logstash5PipelineJSON)) + })) + requestURL, err := url.Parse(logstashTest.URL) + if err != nil { + test.Logf("Can't connect to: %s", logstashTest.URL) + } + fakeServer.Listener, _ = net.Listen("tcp", fmt.Sprintf("%s:%s", requestURL.Hostname(), requestURL.Port())) + fakeServer.Start() + defer fakeServer.Close() + + if logstashTest.client == nil { + client, err := logstashTest.createHttpClient() + + if err != nil { + test.Logf("Can't createHttpClient") + } + logstashTest.client = client + } + + if err := logstashTest.gatherPipelineStats(logstashTest.URL+pipelineStats, &logstash5accPipelineStats); err != nil { + test.Logf("Can't gather Pipeline stats") + } + + logstash5accPipelineStats.AssertContainsTaggedFields( + test, + "logstash_events", + map[string]interface{}{ + "duration_in_millis": float64(1151.0), + "in": float64(1269.0), + "filtered": float64(1269.0), + "out": float64(1269.0), + }, + map[string]string{ + "node_id": string("a360d8cf-6289-429d-8419-6145e324b574"), + "node_name": string("node-5-test"), + "node_host": string("node-5"), + "node_version": string("5.3.0"), + }, + ) + + fields := make(map[string]interface{}) + fields["queue_push_duration_in_millis"] = float64(32.0) + fields["out"] = float64(2.0) + + logstash5accPipelineStats.AssertContainsTaggedFields( + test, + "logstash_plugins", + fields, + map[string]string{ + "node_id": string("a360d8cf-6289-429d-8419-6145e324b574"), + "node_name": string("node-5-test"), + "node_host": string("node-5"), + "node_version": string("5.3.0"), + "plugin_name": string("beats"), + "plugin_id": string("a35197a509596954e905e38521bae12b1498b17d-1"), + "plugin_type": string("input"), + }, + ) + + logstash5accPipelineStats.AssertContainsTaggedFields( + test, + "logstash_plugins", + map[string]interface{}{ + "duration_in_millis": float64(360.0), + "in": float64(1269.0), + "out": float64(1269.0), + }, + map[string]string{ + "node_id": string("a360d8cf-6289-429d-8419-6145e324b574"), + "node_name": string("node-5-test"), + "node_host": string("node-5"), + "node_version": string("5.3.0"), + "plugin_name": string("stdout"), + "plugin_id": string("582d5c2becb582a053e1e9a6bcc11d49b69a6dfd-2"), + "plugin_type": string("output"), + }, + ) + + logstash5accPipelineStats.AssertContainsTaggedFields( + test, + "logstash_plugins", + map[string]interface{}{ + "duration_in_millis": float64(228.0), + "in": float64(1269.0), + "out": float64(1269.0), + }, + map[string]string{ + "node_id": string("a360d8cf-6289-429d-8419-6145e324b574"), + "node_name": string("node-5-test"), + "node_host": string("node-5"), + "node_version": string("5.3.0"), + "plugin_name": string("s3"), + "plugin_id": string("582d5c2becb582a053e1e9a6bcc11d49b69a6dfd-3"), + "plugin_type": string("output"), + }, + ) +} + +func Test_Logstash6GatherPipelinesStats(test *testing.T) { + //logstash6accPipelinesStats.SetDebug(true) + fakeServer := httptest.NewUnstartedServer(http.HandlerFunc(func(writer http.ResponseWriter, request *http.Request) { + writer.Header().Set("Content-Type", "application/json") + fmt.Fprintf(writer, "%s", string(logstash6PipelinesJSON)) + })) + requestURL, err := url.Parse(logstashTest.URL) + if err != nil { + test.Logf("Can't connect to: %s", logstashTest.URL) + } + fakeServer.Listener, _ = net.Listen("tcp", fmt.Sprintf("%s:%s", requestURL.Hostname(), requestURL.Port())) + fakeServer.Start() + defer fakeServer.Close() + + if logstashTest.client == nil { + client, err := logstashTest.createHttpClient() + + if err != nil { + test.Logf("Can't createHttpClient") + } + logstashTest.client = client + } + + if err := logstashTest.gatherPipelinesStats(logstashTest.URL+pipelineStats, &logstash6accPipelinesStats); err != nil { + test.Logf("Can't gather Pipeline stats") + } + + fields := make(map[string]interface{}) + fields["duration_in_millis"] = float64(8540751.0) + fields["queue_push_duration_in_millis"] = float64(366.0) + fields["in"] = float64(180659.0) + fields["filtered"] = float64(180659.0) + fields["out"] = float64(180659.0) + + logstash6accPipelinesStats.AssertContainsTaggedFields( + test, + "logstash_events", + fields, + map[string]string{ + "node_id": string("3044f675-21ce-4335-898a-8408aa678245"), + "node_name": string("node-6-test"), + "node_host": string("node-6"), + "node_version": string("6.4.2"), + "pipeline": string("main"), + }, + ) + + fields = make(map[string]interface{}) + fields["queue_push_duration_in_millis"] = float64(366.0) + fields["out"] = float64(180659.0) + + logstash6accPipelinesStats.AssertContainsTaggedFields( + test, + "logstash_plugins", + fields, + map[string]string{ + "node_id": string("3044f675-21ce-4335-898a-8408aa678245"), + "node_name": string("node-6-test"), + "node_host": string("node-6"), + "node_version": string("6.4.2"), + "pipeline": string("main"), + "plugin_name": string("kafka"), + "plugin_id": string("input-kafka"), + "plugin_type": string("input"), + }, + ) + + logstash6accPipelinesStats.AssertContainsTaggedFields( + test, + "logstash_plugins", + map[string]interface{}{ + "duration_in_millis": float64(2117.0), + "in": float64(27641.0), + "out": float64(27641.0), + }, + map[string]string{ + "node_id": string("3044f675-21ce-4335-898a-8408aa678245"), + "node_name": string("node-6-test"), + "node_host": string("node-6"), + "node_version": string("6.4.2"), + "pipeline": string("main"), + "plugin_name": string("mutate"), + "plugin_id": string("155b0ad18abbf3df1e0cb7bddef0d77c5ba699efe5a0f8a28502d140549baf54"), + "plugin_type": string("filter"), + }, + ) + + logstash6accPipelinesStats.AssertContainsTaggedFields( + test, + "logstash_plugins", + map[string]interface{}{ + "duration_in_millis": float64(2117.0), + "in": float64(27641.0), + "out": float64(27641.0), + }, + map[string]string{ + "node_id": string("3044f675-21ce-4335-898a-8408aa678245"), + "node_name": string("node-6-test"), + "node_host": string("node-6"), + "node_version": string("6.4.2"), + "pipeline": string("main"), + "plugin_name": string("mutate"), + "plugin_id": string("155b0ad18abbf3df1e0cb7bddef0d77c5ba699efe5a0f8a28502d140549baf54"), + "plugin_type": string("filter"), + }, + ) + + logstash6accPipelinesStats.AssertContainsTaggedFields( + test, + "logstash_plugins", + map[string]interface{}{ + "duration_in_millis": float64(13149.0), + "in": float64(180659.0), + "out": float64(177549.0), + }, + map[string]string{ + "node_id": string("3044f675-21ce-4335-898a-8408aa678245"), + "node_name": string("node-6-test"), + "node_host": string("node-6"), + "node_version": string("6.4.2"), + "pipeline": string("main"), + "plugin_name": string("date"), + "plugin_id": string("d079424bb6b7b8c7c61d9c5e0ddae445e92fa9ffa2e8690b0a669f7c690542f0"), + "plugin_type": string("filter"), + }, + ) + + logstash6accPipelinesStats.AssertContainsTaggedFields( + test, + "logstash_plugins", + map[string]interface{}{ + "duration_in_millis": float64(2814.0), + "in": float64(76602.0), + "out": float64(76602.0), + }, + map[string]string{ + "node_id": string("3044f675-21ce-4335-898a-8408aa678245"), + "node_name": string("node-6-test"), + "node_host": string("node-6"), + "node_version": string("6.4.2"), + "pipeline": string("main"), + "plugin_name": string("mutate"), + "plugin_id": string("25afa60ab6dc30512fe80efa3493e4928b5b1b109765b7dc46a3e4bbf293d2d4"), + "plugin_type": string("filter"), + }, + ) + + logstash6accPipelinesStats.AssertContainsTaggedFields( + test, + "logstash_plugins", + map[string]interface{}{ + "duration_in_millis": float64(9.0), + "in": float64(934.0), + "out": float64(934.0), + }, + map[string]string{ + "node_id": string("3044f675-21ce-4335-898a-8408aa678245"), + "node_name": string("node-6-test"), + "node_host": string("node-6"), + "node_version": string("6.4.2"), + "pipeline": string("main"), + "plugin_name": string("mutate"), + "plugin_id": string("2d9fa8f74eeb137bfa703b8050bad7d76636fface729e4585b789b5fc9bed668"), + "plugin_type": string("filter"), + }, + ) + + logstash6accPipelinesStats.AssertContainsTaggedFields( + test, + "logstash_plugins", + map[string]interface{}{ + "duration_in_millis": float64(173.0), + "in": float64(3110.0), + "out": float64(0.0), + }, + map[string]string{ + "node_id": string("3044f675-21ce-4335-898a-8408aa678245"), + "node_name": string("node-6-test"), + "node_host": string("node-6"), + "node_version": string("6.4.2"), + "pipeline": string("main"), + "plugin_name": string("drop"), + "plugin_id": string("4ed14c9ef0198afe16c31200041e98d321cb5c2e6027e30b077636b8c4842110"), + "plugin_type": string("filter"), + }, + ) + + logstash6accPipelinesStats.AssertContainsTaggedFields( + test, + "logstash_plugins", + map[string]interface{}{ + "duration_in_millis": float64(5605.0), + "in": float64(75482.0), + "out": float64(75482.0), + }, + map[string]string{ + "node_id": string("3044f675-21ce-4335-898a-8408aa678245"), + "node_name": string("node-6-test"), + "node_host": string("node-6"), + "node_version": string("6.4.2"), + "pipeline": string("main"), + "plugin_name": string("mutate"), + "plugin_id": string("358ce1eb387de7cd5711c2fb4de64cd3b12e5ca9a4c45f529516bcb053a31df4"), + "plugin_type": string("filter"), + }, + ) + + logstash6accPipelinesStats.AssertContainsTaggedFields( + test, + "logstash_plugins", + map[string]interface{}{ + "duration_in_millis": float64(313992.0), + "in": float64(180659.0), + "out": float64(180659.0), + }, + map[string]string{ + "node_id": string("3044f675-21ce-4335-898a-8408aa678245"), + "node_name": string("node-6-test"), + "node_host": string("node-6"), + "node_version": string("6.4.2"), + "pipeline": string("main"), + "plugin_name": string("csv"), + "plugin_id": string("82a9bbb02fff37a63c257c1f146b0a36273c7cbbebe83c0a51f086e5280bf7bb"), + "plugin_type": string("filter"), + }, + ) + + logstash6accPipelinesStats.AssertContainsTaggedFields( + test, + "logstash_plugins", + map[string]interface{}{ + "duration_in_millis": float64(0.0), + "in": float64(0.0), + "out": float64(0.0), + }, + map[string]string{ + "node_id": string("3044f675-21ce-4335-898a-8408aa678245"), + "node_name": string("node-6-test"), + "node_host": string("node-6"), + "node_version": string("6.4.2"), + "pipeline": string("main"), + "plugin_name": string("mutate"), + "plugin_id": string("8fb13a8cdd4257b52724d326aa1549603ffdd4e4fde6d20720c96b16238c18c3"), + "plugin_type": string("filter"), + }, + ) + + logstash6accPipelinesStats.AssertContainsTaggedFields( + test, + "logstash_plugins", + map[string]interface{}{ + "duration_in_millis": float64(651386.0), + "in": float64(177549.0), + "out": float64(177549.0), + }, + map[string]string{ + "node_id": string("3044f675-21ce-4335-898a-8408aa678245"), + "node_name": string("node-6-test"), + "node_host": string("node-6"), + "node_version": string("6.4.2"), + "pipeline": string("main"), + "plugin_name": string("elasticsearch"), + "plugin_id": string("output-elk"), + "plugin_type": string("output"), + }, + ) + + logstash6accPipelinesStats.AssertContainsTaggedFields( + test, + "logstash_plugins", + map[string]interface{}{ + "duration_in_millis": float64(186751.0), + "in": float64(177549.0), + "out": float64(177549.0), + }, + map[string]string{ + "node_id": string("3044f675-21ce-4335-898a-8408aa678245"), + "node_name": string("node-6-test"), + "node_host": string("node-6"), + "node_version": string("6.4.2"), + "pipeline": string("main"), + "plugin_name": string("kafka"), + "plugin_id": string("output-kafka1"), + "plugin_type": string("output"), + }, + ) + + logstash6accPipelinesStats.AssertContainsTaggedFields( + test, + "logstash_plugins", + map[string]interface{}{ + "duration_in_millis": float64(7335196.0), + "in": float64(177549.0), + "out": float64(177549.0), + }, + map[string]string{ + "node_id": string("3044f675-21ce-4335-898a-8408aa678245"), + "node_name": string("node-6-test"), + "node_host": string("node-6"), + "node_version": string("6.4.2"), + "pipeline": string("main"), + "plugin_name": string("kafka"), + "plugin_id": string("output-kafka2"), + "plugin_type": string("output"), + }, + ) + + logstash6accPipelinesStats.AssertContainsTaggedFields( + test, + "logstash_queue", + map[string]interface{}{ + "events": float64(103), + "free_space_in_bytes": float64(36307369984), + "max_queue_size_in_bytes": float64(1073741824), + "max_unread_events": float64(0), + "page_capacity_in_bytes": float64(67108864), + "queue_size_in_bytes": float64(1872391), + }, + map[string]string{ + "node_id": string("3044f675-21ce-4335-898a-8408aa678245"), + "node_name": string("node-6-test"), + "node_host": string("node-6"), + "node_version": string("6.4.2"), + "pipeline": string("main"), + "queue_type": string("persisted"), + }, + ) + +} + +func Test_Logstash5GatherJVMStats(test *testing.T) { + fakeServer := httptest.NewUnstartedServer(http.HandlerFunc(func(writer http.ResponseWriter, request *http.Request) { + writer.Header().Set("Content-Type", "application/json") + fmt.Fprintf(writer, "%s", string(logstash5JvmJSON)) + })) + requestURL, err := url.Parse(logstashTest.URL) + if err != nil { + test.Logf("Can't connect to: %s", logstashTest.URL) + } + fakeServer.Listener, _ = net.Listen("tcp", fmt.Sprintf("%s:%s", requestURL.Hostname(), requestURL.Port())) + fakeServer.Start() + defer fakeServer.Close() + + if logstashTest.client == nil { + client, err := logstashTest.createHttpClient() + + if err != nil { + test.Logf("Can't createHttpClient") + } + logstashTest.client = client + } + + if err := logstashTest.gatherJVMStats(logstashTest.URL+jvmStats, &logstash5accJVMStats); err != nil { + test.Logf("Can't gather JVM stats") + } + + logstash5accJVMStats.AssertContainsTaggedFields( + test, + "logstash_jvm", + map[string]interface{}{ + "mem_pools_young_max_in_bytes": float64(5.5836672e+08), + "mem_pools_young_committed_in_bytes": float64(1.43261696e+08), + "mem_heap_committed_in_bytes": float64(5.1904512e+08), + "threads_count": float64(29.0), + "mem_pools_old_peak_used_in_bytes": float64(1.27900864e+08), + "mem_pools_old_peak_max_in_bytes": float64(7.2482816e+08), + "mem_heap_used_percent": float64(16.0), + "gc_collectors_young_collection_time_in_millis": float64(3235.0), + "mem_pools_survivor_committed_in_bytes": float64(1.7825792e+07), + "mem_pools_young_used_in_bytes": float64(7.6049384e+07), + "mem_non_heap_committed_in_bytes": float64(2.91487744e+08), + "mem_pools_survivor_peak_max_in_bytes": float64(3.4865152e+07), + "mem_pools_young_peak_max_in_bytes": float64(2.7918336e+08), + "uptime_in_millis": float64(4.803461e+06), + "mem_pools_survivor_peak_used_in_bytes": float64(8.912896e+06), + "mem_pools_survivor_max_in_bytes": float64(6.9730304e+07), + "gc_collectors_old_collection_count": float64(2.0), + "mem_pools_survivor_used_in_bytes": float64(9.419672e+06), + "mem_pools_old_used_in_bytes": float64(2.55801728e+08), + "mem_pools_old_max_in_bytes": float64(1.44965632e+09), + "mem_pools_young_peak_used_in_bytes": float64(7.1630848e+07), + "mem_heap_used_in_bytes": float64(3.41270784e+08), + "mem_heap_max_in_bytes": float64(2.077753344e+09), + "gc_collectors_young_collection_count": float64(616.0), + "threads_peak_count": float64(31.0), + "mem_pools_old_committed_in_bytes": float64(3.57957632e+08), + "gc_collectors_old_collection_time_in_millis": float64(114.0), + "mem_non_heap_used_in_bytes": float64(2.68905936e+08), + }, + map[string]string{ + "node_id": string("a360d8cf-6289-429d-8419-6145e324b574"), + "node_name": string("node-5-test"), + "node_host": string("node-5"), + "node_version": string("5.3.0"), + }, + ) + +} + +func Test_Logstash6GatherJVMStats(test *testing.T) { + fakeServer := httptest.NewUnstartedServer(http.HandlerFunc(func(writer http.ResponseWriter, request *http.Request) { + writer.Header().Set("Content-Type", "application/json") + fmt.Fprintf(writer, "%s", string(logstash6JvmJSON)) + })) + requestURL, err := url.Parse(logstashTest.URL) + if err != nil { + test.Logf("Can't connect to: %s", logstashTest.URL) + } + fakeServer.Listener, _ = net.Listen("tcp", fmt.Sprintf("%s:%s", requestURL.Hostname(), requestURL.Port())) + fakeServer.Start() + defer fakeServer.Close() + + if logstashTest.client == nil { + client, err := logstashTest.createHttpClient() + + if err != nil { + test.Logf("Can't createHttpClient") + } + logstashTest.client = client + } + + if err := logstashTest.gatherJVMStats(logstashTest.URL+jvmStats, &logstash6accJVMStats); err != nil { + test.Logf("Can't gather JVM stats") + } + + logstash6accJVMStats.AssertContainsTaggedFields( + test, + "logstash_jvm", + map[string]interface{}{ + "mem_pools_young_max_in_bytes": float64(1605304320.0), + "mem_pools_young_committed_in_bytes": float64(71630848.0), + "mem_heap_committed_in_bytes": float64(824963072.0), + "threads_count": float64(60.0), + "mem_pools_old_peak_used_in_bytes": float64(696572600.0), + "mem_pools_old_peak_max_in_bytes": float64(6583418880.0), + "mem_heap_used_percent": float64(2.0), + "gc_collectors_young_collection_time_in_millis": float64(107321.0), + "mem_pools_survivor_committed_in_bytes": float64(8912896.0), + "mem_pools_young_used_in_bytes": float64(11775120.0), + "mem_non_heap_committed_in_bytes": float64(222986240.0), + "mem_pools_survivor_peak_max_in_bytes": float64(200605696), + "mem_pools_young_peak_max_in_bytes": float64(1605304320.0), + "uptime_in_millis": float64(281850926.0), + "mem_pools_survivor_peak_used_in_bytes": float64(8912896.0), + "mem_pools_survivor_max_in_bytes": float64(200605696.0), + "gc_collectors_old_collection_count": float64(37.0), + "mem_pools_survivor_used_in_bytes": float64(835008.0), + "mem_pools_old_used_in_bytes": float64(189750576.0), + "mem_pools_old_max_in_bytes": float64(6583418880.0), + "mem_pools_young_peak_used_in_bytes": float64(71630848.0), + "mem_heap_used_in_bytes": float64(202360704.0), + "mem_heap_max_in_bytes": float64(8389328896.0), + "gc_collectors_young_collection_count": float64(2094.0), + "threads_peak_count": float64(62.0), + "mem_pools_old_committed_in_bytes": float64(744419328.0), + "gc_collectors_old_collection_time_in_millis": float64(7492.0), + "mem_non_heap_used_in_bytes": float64(197878896.0), + }, + map[string]string{ + "node_id": string("3044f675-21ce-4335-898a-8408aa678245"), + "node_name": string("node-6-test"), + "node_host": string("node-6"), + "node_version": string("6.4.2"), + }, + ) + +} + +func Test_LogstashRequests(test *testing.T) { + fakeServer := httptest.NewUnstartedServer(http.HandlerFunc(func(writer http.ResponseWriter, request *http.Request) { + writer.Header().Set("Content-Type", "application/json") + fmt.Fprintf(writer, "%s", string(logstash6JvmJSON)) + assert.Equal(test, request.Host, "logstash.test.local") + assert.Equal(test, request.Method, "POST") + assert.Equal(test, request.Header.Get("X-Test"), "test-header") + })) + requestURL, err := url.Parse(logstashTest.URL) + if err != nil { + test.Logf("Can't connect to: %s", logstashTest.URL) + } + fakeServer.Listener, _ = net.Listen("tcp", fmt.Sprintf("%s:%s", requestURL.Hostname(), requestURL.Port())) + fakeServer.Start() + defer fakeServer.Close() + + if logstashTest.client == nil { + client, err := logstashTest.createHttpClient() + + if err != nil { + test.Logf("Can't createHttpClient") + } + logstashTest.client = client + } + + logstashTest.Method = "POST" + logstashTest.Headers["X-Test"] = "test-header" + logstashTest.HostHeader = "logstash.test.local" + + if err := logstashTest.gatherJsonData(logstashTest.URL+jvmStats, &logstash6accJVMStats); err != nil { + test.Logf("Can't gather JVM stats") + } +} diff --git a/plugins/inputs/logstash/samples_logstash5.go b/plugins/inputs/logstash/samples_logstash5.go new file mode 100644 index 000000000..598f6dab5 --- /dev/null +++ b/plugins/inputs/logstash/samples_logstash5.go @@ -0,0 +1,156 @@ +package logstash + +const logstash5ProcessJSON = ` +{ + "host" : "node-5", + "version" : "5.3.0", + "http_address" : "0.0.0.0:9600", + "id" : "a360d8cf-6289-429d-8419-6145e324b574", + "name" : "node-5-test", + "process" : { + "open_file_descriptors" : 89, + "peak_open_file_descriptors" : 100, + "max_file_descriptors" : 1048576, + "mem" : { + "total_virtual_in_bytes" : 4809506816 + }, + "cpu" : { + "total_in_millis" : 155260000000, + "percent" : 3, + "load_average" : { + "1m" : 0.49, + "5m" : 0.61, + "15m" : 0.54 + } + } + } +} +` + +const logstash5JvmJSON = ` +{ + "host" : "node-5", + "version" : "5.3.0", + "http_address" : "0.0.0.0:9600", + "id" : "a360d8cf-6289-429d-8419-6145e324b574", + "name" : "node-5-test", + "jvm" : { + "threads" : { + "count" : 29, + "peak_count" : 31 + }, + "mem" : { + "heap_used_in_bytes" : 341270784, + "heap_used_percent" : 16, + "heap_committed_in_bytes" : 519045120, + "heap_max_in_bytes" : 2077753344, + "non_heap_used_in_bytes" : 268905936, + "non_heap_committed_in_bytes" : 291487744, + "pools" : { + "survivor" : { + "peak_used_in_bytes" : 8912896, + "used_in_bytes" : 9419672, + "peak_max_in_bytes" : 34865152, + "max_in_bytes" : 69730304, + "committed_in_bytes" : 17825792 + }, + "old" : { + "peak_used_in_bytes" : 127900864, + "used_in_bytes" : 255801728, + "peak_max_in_bytes" : 724828160, + "max_in_bytes" : 1449656320, + "committed_in_bytes" : 357957632 + }, + "young" : { + "peak_used_in_bytes" : 71630848, + "used_in_bytes" : 76049384, + "peak_max_in_bytes" : 279183360, + "max_in_bytes" : 558366720, + "committed_in_bytes" : 143261696 + } + } + }, + "gc" : { + "collectors" : { + "old" : { + "collection_time_in_millis" : 114, + "collection_count" : 2 + }, + "young" : { + "collection_time_in_millis" : 3235, + "collection_count" : 616 + } + } + }, + "uptime_in_millis" : 4803461 + } +} +` + +const logstash5PipelineJSON = ` +{ + "host" : "node-5", + "version" : "5.3.0", + "http_address" : "0.0.0.0:9600", + "id" : "a360d8cf-6289-429d-8419-6145e324b574", + "name" : "node-5-test", + "pipeline" : { + "events" : { + "duration_in_millis" : 1151, + "in" : 1269, + "filtered" : 1269, + "out" : 1269 + }, + "plugins" : { + "inputs" : [ { + "id" : "a35197a509596954e905e38521bae12b1498b17d-1", + "events" : { + "out" : 2, + "queue_push_duration_in_millis" : 32 + }, + "name" : "beats" + } ], + "filters" : [ ], + "outputs" : [ { + "id" : "582d5c2becb582a053e1e9a6bcc11d49b69a6dfd-3", + "events" : { + "duration_in_millis" : 228, + "in" : 1269, + "out" : 1269 + }, + "name" : "s3" + }, { + "id" : "582d5c2becb582a053e1e9a6bcc11d49b69a6dfd-2", + "events" : { + "duration_in_millis" : 360, + "in" : 1269, + "out" : 1269 + }, + "name" : "stdout" + } ] + }, + "reloads" : { + "last_error" : null, + "successes" : 0, + "last_success_timestamp" : null, + "last_failure_timestamp" : null, + "failures" : 0 + }, + "queue" : { + "events" : 208, + "type" : "persisted", + "capacity" : { + "page_capacity_in_bytes" : 262144000, + "max_queue_size_in_bytes" : 8589934592, + "max_unread_events" : 0 + }, + "data" : { + "path" : "/path/to/data/queue", + "free_space_in_bytes" : 89280552960, + "storage_type" : "hfs" + } + }, + "id" : "main" + } +} +` diff --git a/plugins/inputs/logstash/samples_logstash6.go b/plugins/inputs/logstash/samples_logstash6.go new file mode 100644 index 000000000..16df2b0fd --- /dev/null +++ b/plugins/inputs/logstash/samples_logstash6.go @@ -0,0 +1,256 @@ +package logstash + +const logstash6ProcessJSON = ` +{ + "host" : "node-6", + "version" : "6.4.2", + "http_address" : "127.0.0.1:9600", + "id" : "3044f675-21ce-4335-898a-8408aa678245", + "name" : "node-6-test", + "process" : { + "open_file_descriptors" : 133, + "peak_open_file_descriptors" : 145, + "max_file_descriptors" : 262144, + "mem" : { + "total_virtual_in_bytes" : 17923452928 + }, + "cpu" : { + "total_in_millis" : 5841460, + "percent" : 0, + "load_average" : { + "1m" : 48.2, + "5m" : 42.4, + "15m" : 38.95 + } + } + } +} +` +const logstash6JvmJSON = ` +{ + "host" : "node-6", + "version" : "6.4.2", + "http_address" : "127.0.0.1:9600", + "id" : "3044f675-21ce-4335-898a-8408aa678245", + "name" : "node-6-test", + "jvm" : { + "threads" : { + "count" : 60, + "peak_count" : 62 + }, + "mem" : { + "heap_used_percent" : 2, + "heap_committed_in_bytes" : 824963072, + "heap_max_in_bytes" : 8389328896, + "heap_used_in_bytes" : 202360704, + "non_heap_used_in_bytes" : 197878896, + "non_heap_committed_in_bytes" : 222986240, + "pools" : { + "survivor" : { + "peak_used_in_bytes" : 8912896, + "used_in_bytes" : 835008, + "peak_max_in_bytes" : 200605696, + "max_in_bytes" : 200605696, + "committed_in_bytes" : 8912896 + }, + "old" : { + "peak_used_in_bytes" : 696572600, + "used_in_bytes" : 189750576, + "peak_max_in_bytes" : 6583418880, + "max_in_bytes" : 6583418880, + "committed_in_bytes" : 744419328 + }, + "young" : { + "peak_used_in_bytes" : 71630848, + "used_in_bytes" : 11775120, + "peak_max_in_bytes" : 1605304320, + "max_in_bytes" : 1605304320, + "committed_in_bytes" : 71630848 + } + } + }, + "gc" : { + "collectors" : { + "old" : { + "collection_time_in_millis" : 7492, + "collection_count" : 37 + }, + "young" : { + "collection_time_in_millis" : 107321, + "collection_count" : 2094 + } + } + }, + "uptime_in_millis" : 281850926 + } +} +` + +const logstash6PipelinesJSON = ` +{ + "host" : "node-6", + "version" : "6.4.2", + "http_address" : "127.0.0.1:9600", + "id" : "3044f675-21ce-4335-898a-8408aa678245", + "name" : "node-6-test", + "pipelines" : { + "main" : { + "events" : { + "duration_in_millis" : 8540751, + "in" : 180659, + "out" : 180659, + "filtered" : 180659, + "queue_push_duration_in_millis" : 366 + }, + "plugins" : { + "inputs" : [ + { + "id" : "input-kafka", + "events" : { + "out" : 180659, + "queue_push_duration_in_millis" : 366 + }, + "name" : "kafka" + } + ], + "filters" : [ + { + "id" : "155b0ad18abbf3df1e0cb7bddef0d77c5ba699efe5a0f8a28502d140549baf54", + "events" : { + "duration_in_millis" : 2117, + "in" : 27641, + "out" : 27641 + }, + "name" : "mutate" + }, + { + "id" : "d079424bb6b7b8c7c61d9c5e0ddae445e92fa9ffa2e8690b0a669f7c690542f0", + "events" : { + "duration_in_millis" : 13149, + "in" : 180659, + "out" : 177549 + }, + "matches" : 177546, + "failures" : 2, + "name" : "date" + }, + { + "id" : "25afa60ab6dc30512fe80efa3493e4928b5b1b109765b7dc46a3e4bbf293d2d4", + "events" : { + "duration_in_millis" : 2814, + "in" : 76602, + "out" : 76602 + }, + "name" : "mutate" + }, + { + "id" : "2d9fa8f74eeb137bfa703b8050bad7d76636fface729e4585b789b5fc9bed668", + "events" : { + "duration_in_millis" : 9, + "in" : 934, + "out" : 934 + }, + "name" : "mutate" + }, + { + "id" : "4ed14c9ef0198afe16c31200041e98d321cb5c2e6027e30b077636b8c4842110", + "events" : { + "duration_in_millis" : 173, + "in" : 3110, + "out" : 0 + }, + "name" : "drop" + }, + { + "id" : "358ce1eb387de7cd5711c2fb4de64cd3b12e5ca9a4c45f529516bcb053a31df4", + "events" : { + "duration_in_millis" : 5605, + "in" : 75482, + "out" : 75482 + }, + "name" : "mutate" + }, + { + "id" : "82a9bbb02fff37a63c257c1f146b0a36273c7cbbebe83c0a51f086e5280bf7bb", + "events" : { + "duration_in_millis" : 313992, + "in" : 180659, + "out" : 180659 + }, + "name" : "csv" + }, + { + "id" : "8fb13a8cdd4257b52724d326aa1549603ffdd4e4fde6d20720c96b16238c18c3", + "events" : { + "duration_in_millis" : 0, + "in" : 0, + "out" : 0 + }, + "name" : "mutate" + } + ], + "outputs" : [ + { + "id" : "output-elk", + "documents" : { + "successes" : 221 + }, + "events" : { + "duration_in_millis" : 651386, + "in" : 177549, + "out" : 177549 + }, + "bulk_requests" : { + "successes" : 1, + "responses" : { + "200" : 748 + } + }, + "name" : "elasticsearch" + }, + { + "id" : "output-kafka1", + "events" : { + "duration_in_millis" : 186751, + "in" : 177549, + "out" : 177549 + }, + "name" : "kafka" + }, + { + "id" : "output-kafka2", + "events" : { + "duration_in_millis" : 7335196, + "in" : 177549, + "out" : 177549 + }, + "name" : "kafka" + } + ] + }, + "reloads" : { + "last_error" : null, + "successes" : 0, + "last_success_timestamp" : null, + "last_failure_timestamp" : null, + "failures" : 0 + }, + "queue": { + "events": 103, + "type": "persisted", + "capacity": { + "queue_size_in_bytes": 1872391, + "page_capacity_in_bytes": 67108864, + "max_queue_size_in_bytes": 1073741824, + "max_unread_events": 0 + }, + "data": { + "path": "/var/lib/logstash/queue/main", + "free_space_in_bytes": 36307369984, + "storage_type": "ext4" + } + } + } + } +} +` From bc52592c87d16a6f6ebb6a23ccb54c977e9c5beb Mon Sep 17 00:00:00 2001 From: Greg <2653109+glinton@users.noreply.github.com> Date: Wed, 21 Aug 2019 17:39:57 -0600 Subject: [PATCH 1126/1815] Document additional collected Kubernetes resources (#6297) --- plugins/inputs/kube_inventory/README.md | 50 +++++++++++++++++++-- plugins/inputs/kube_inventory/kube_state.go | 4 +- 2 files changed, 49 insertions(+), 5 deletions(-) diff --git a/plugins/inputs/kube_inventory/README.md b/plugins/inputs/kube_inventory/README.md index 7bcb63d14..a884e24bd 100644 --- a/plugins/inputs/kube_inventory/README.md +++ b/plugins/inputs/kube_inventory/README.md @@ -42,8 +42,8 @@ avoid cardinality issues: ## Optional Resources to exclude from gathering ## Leave them with blank with try to gather everything available. - ## Values can be - "daemonsets", deployments", "nodes", "persistentvolumes", - ## "persistentvolumeclaims", "pods", "statefulsets" + ## Values can be - "daemonsets", deployments", "endpoints", "ingress", "nodes", + ## "persistentvolumes", "persistentvolumeclaims", "pods", "services", "statefulsets" # resource_exclude = [ "deployments", "nodes", "statefulsets" ] ## Optional Resources to include when gathering @@ -131,6 +131,36 @@ subjects: - replicas_unavailable - created ++ kubernetes_endpoints + - tags: + - endpoint_name + - namespace + - hostname + - node_name + - port_name + - port_protocol + - kind (*varies) + - fields: + - created + - generation + - ready + - port + +- kubernetes_ingress + - tags: + - ingress_name + - namespace + - hostname + - ip + - backend_service_name + - path + - host + - fields: + - created + - generation + - backend_service_port + - tls + + kubernetes_node - tags: - node_name @@ -174,7 +204,21 @@ subjects: - resource_limits_cpu_units - resource_limits_memory_bytes -+ kubernetes_statefulset ++ kubernetes_service + - tags: + - service_name + - namespace + - port_name + - port_protocol + - external_name + - cluster_ip + - fields + - created + - generation + - port + - target_port + +- kubernetes_statefulset - tags: - statefulset_name - namespace diff --git a/plugins/inputs/kube_inventory/kube_state.go b/plugins/inputs/kube_inventory/kube_state.go index 9ffe0765e..b69ad47ac 100644 --- a/plugins/inputs/kube_inventory/kube_state.go +++ b/plugins/inputs/kube_inventory/kube_state.go @@ -51,8 +51,8 @@ var sampleConfig = ` ## Optional Resources to exclude from gathering ## Leave them with blank with try to gather everything available. - ## Values can be - "daemonsets", deployments", "nodes", "persistentvolumes", - ## "persistentvolumeclaims", "pods", "statefulsets" + ## Values can be - "daemonsets", deployments", "endpoints", "ingress", "nodes", + ## "persistentvolumes", "persistentvolumeclaims", "pods", "services", "statefulsets" # resource_exclude = [ "deployments", "nodes", "statefulsets" ] ## Optional Resources to include when gathering From 5c8d0e3ac9d888ef2851406db72c014d9cbbe974 Mon Sep 17 00:00:00 2001 From: Greg <2653109+glinton@users.noreply.github.com> Date: Wed, 21 Aug 2019 17:49:07 -0600 Subject: [PATCH 1127/1815] Add ability to label inputs for logging (#6207) --- agent/accumulator.go | 4 +- agent/accumulator_test.go | 4 + agent/agent.go | 30 +++---- input.go | 9 -- internal/config/config.go | 46 ++++++++-- internal/models/log.go | 87 +++++++++++++++++++ internal/models/log_test.go | 70 +++++++++++++++ internal/models/running_aggregator.go | 35 +++++--- internal/models/running_input.go | 23 +++-- internal/models/running_output.go | 78 ++++++++++------- internal/models/running_processor.go | 25 +++++- plugin.go | 30 +++++++ .../cloud_pubsub_push/pubsub_push_test.go | 4 + plugins/inputs/exec/exec.go | 8 +- plugins/inputs/exec/exec_test.go | 4 +- plugins/outputs/influxdb/http.go | 17 ++-- plugins/outputs/influxdb/http_test.go | 2 + plugins/outputs/influxdb/influxdb.go | 13 ++- plugins/outputs/influxdb/influxdb_test.go | 22 ++++- plugins/outputs/influxdb/udp.go | 10 ++- plugins/outputs/influxdb/udp_test.go | 7 ++ testutil/log.go | 50 +++++++++++ 22 files changed, 475 insertions(+), 103 deletions(-) create mode 100644 internal/models/log.go create mode 100644 internal/models/log_test.go create mode 100644 plugin.go create mode 100644 testutil/log.go diff --git a/agent/accumulator.go b/agent/accumulator.go index 9e0bb11ca..21146e3e2 100644 --- a/agent/accumulator.go +++ b/agent/accumulator.go @@ -14,7 +14,7 @@ var ( ) type MetricMaker interface { - Name() string + LogName() string MakeMetric(metric telegraf.Metric) telegraf.Metric } @@ -111,7 +111,7 @@ func (ac *accumulator) AddError(err error) { return } NErrors.Incr(1) - log.Printf("E! [%s]: Error in plugin: %v", ac.maker.Name(), err) + log.Printf("E! [%s] Error in plugin: %v", ac.maker.LogName(), err) } func (ac *accumulator) SetPrecision(precision time.Duration) { diff --git a/agent/accumulator_test.go b/agent/accumulator_test.go index 933821701..c84948ba9 100644 --- a/agent/accumulator_test.go +++ b/agent/accumulator_test.go @@ -147,6 +147,10 @@ func (tm *TestMetricMaker) Name() string { return "TestPlugin" } +func (tm *TestMetricMaker) LogName() string { + return tm.Name() +} + func (tm *TestMetricMaker) MakeMetric(metric telegraf.Metric) telegraf.Metric { return metric } diff --git a/agent/agent.go b/agent/agent.go index 636c4ba68..700bccb05 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -209,7 +209,7 @@ func (a *Agent) Test(ctx context.Context, waitDuration time.Duration) error { // Special instructions for some inputs. cpu, for example, needs to be // run twice in order to return cpu usage percentages. - switch input.Name() { + switch input.Config.Name { case "inputs.cpu", "inputs.mongodb", "inputs.procstat": nulAcc := NewAccumulator(input, nulC) nulAcc.SetPrecision(a.Precision()) @@ -337,8 +337,8 @@ func (a *Agent) gatherOnce( case err := <-done: return err case <-ticker.C: - log.Printf("W! [agent] input %q did not complete within its interval", - input.Name()) + log.Printf("W! [agent] [%s] did not complete within its interval", + input.LogName()) } } } @@ -551,7 +551,7 @@ func (a *Agent) flush( logError := func(err error) { if err != nil { - log.Printf("E! [agent] Error writing to output [%s]: %v", output.Name, err) + log.Printf("E! [agent] Error writing to %s: %v", output.LogName(), err) } } @@ -603,8 +603,8 @@ func (a *Agent) flushOnce( output.LogBufferStatus() return err case <-ticker.C: - log.Printf("W! [agent] output %q did not complete within its flush interval", - output.Name) + log.Printf("W! [agent] [%q] did not complete within its flush interval", + output.LogName()) output.LogBufferStatus() } } @@ -617,7 +617,7 @@ func (a *Agent) initPlugins() error { err := input.Init() if err != nil { return fmt.Errorf("could not initialize input %s: %v", - input.Config.Name, err) + input.LogName(), err) } } for _, processor := range a.Config.Processors { @@ -647,11 +647,11 @@ func (a *Agent) initPlugins() error { // connectOutputs connects to all outputs. func (a *Agent) connectOutputs(ctx context.Context) error { for _, output := range a.Config.Outputs { - log.Printf("D! [agent] Attempting connection to output: %s\n", output.Name) + log.Printf("D! [agent] Attempting connection to [%s]", output.LogName()) err := output.Output.Connect() if err != nil { - log.Printf("E! [agent] Failed to connect to output %s, retrying in 15s, "+ - "error was '%s' \n", output.Name, err) + log.Printf("E! [agent] Failed to connect to [%s], retrying in 15s, "+ + "error was '%s'", output.LogName(), err) err := internal.SleepContext(ctx, 15*time.Second) if err != nil { @@ -663,7 +663,7 @@ func (a *Agent) connectOutputs(ctx context.Context) error { return err } } - log.Printf("D! [agent] Successfully connected to output: %s\n", output.Name) + log.Printf("D! [agent] Successfully connected to %s", output.LogName()) } return nil } @@ -693,8 +693,8 @@ func (a *Agent) startServiceInputs( err := si.Start(acc) if err != nil { - log.Printf("E! [agent] Service for input %s failed to start: %v", - input.Name(), err) + log.Printf("E! [agent] Service for [%s] failed to start: %v", + input.LogName(), err) for _, si := range started { si.Stop() @@ -745,8 +745,8 @@ func panicRecover(input *models.RunningInput) { if err := recover(); err != nil { trace := make([]byte, 2048) runtime.Stack(trace, true) - log.Printf("E! FATAL: Input [%s] panicked: %s, Stack:\n%s\n", - input.Name(), err, trace) + log.Printf("E! FATAL: [%s] panicked: %s, Stack:\n%s", + input.LogName(), err, trace) log.Println("E! PLEASE REPORT THIS PANIC ON GITHUB with " + "stack trace, configuration, and OS information: " + "https://github.com/influxdata/telegraf/issues/new/choose") diff --git a/input.go b/input.go index ee47bc347..071ab7d9d 100644 --- a/input.go +++ b/input.go @@ -1,14 +1,5 @@ package telegraf -// Initializer is an interface that all plugin types: Inputs, Outputs, -// Processors, and Aggregators can optionally implement to initialize the -// plugin. -type Initializer interface { - // Init performs one time setup of the plugin and returns an error if the - // configuration is invalid. - Init() error -} - type Input interface { // SampleConfig returns the default configuration of the Input SampleConfig() string diff --git a/internal/config/config.go b/internal/config/config.go index 802e3152e..f2617e8b3 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -187,7 +187,7 @@ func (c *Config) AggregatorNames() []string { func (c *Config) ProcessorNames() []string { var name []string for _, processor := range c.Processors { - name = append(name, processor.Name) + name = append(name, processor.Config.Name) } return name } @@ -196,7 +196,7 @@ func (c *Config) ProcessorNames() []string { func (c *Config) OutputNames() []string { var name []string for _, output := range c.Outputs { - name = append(name, output.Name) + name = append(name, output.Config.Name) } return name } @@ -920,11 +920,7 @@ func (c *Config) addProcessor(name string, table *ast.Table) error { return err } - rf := &models.RunningProcessor{ - Name: name, - Processor: processor, - Config: processorConfig, - } + rf := models.NewRunningProcessor(processor, processorConfig) c.Processors = append(c.Processors, rf) return nil @@ -1103,6 +1099,14 @@ func buildAggregator(name string, tbl *ast.Table) (*models.AggregatorConfig, err } } + if node, ok := tbl.Fields["alias"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if str, ok := kv.Value.(*ast.String); ok { + conf.Alias = str.Value + } + } + } + conf.Tags = make(map[string]string) if node, ok := tbl.Fields["tags"]; ok { if subtbl, ok := node.(*ast.Table); ok { @@ -1119,6 +1123,7 @@ func buildAggregator(name string, tbl *ast.Table) (*models.AggregatorConfig, err delete(tbl.Fields, "name_prefix") delete(tbl.Fields, "name_suffix") delete(tbl.Fields, "name_override") + delete(tbl.Fields, "alias") delete(tbl.Fields, "tags") var err error conf.Filter, err = buildFilter(tbl) @@ -1146,6 +1151,15 @@ func buildProcessor(name string, tbl *ast.Table) (*models.ProcessorConfig, error } } + if node, ok := tbl.Fields["alias"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if str, ok := kv.Value.(*ast.String); ok { + conf.Alias = str.Value + } + } + } + + delete(tbl.Fields, "alias") delete(tbl.Fields, "order") var err error conf.Filter, err = buildFilter(tbl) @@ -1334,6 +1348,14 @@ func buildInput(name string, tbl *ast.Table) (*models.InputConfig, error) { } } + if node, ok := tbl.Fields["alias"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if str, ok := kv.Value.(*ast.String); ok { + cp.Alias = str.Value + } + } + } + cp.Tags = make(map[string]string) if node, ok := tbl.Fields["tags"]; ok { if subtbl, ok := node.(*ast.Table); ok { @@ -1346,6 +1368,7 @@ func buildInput(name string, tbl *ast.Table) (*models.InputConfig, error) { delete(tbl.Fields, "name_prefix") delete(tbl.Fields, "name_suffix") delete(tbl.Fields, "name_override") + delete(tbl.Fields, "alias") delete(tbl.Fields, "interval") delete(tbl.Fields, "tags") var err error @@ -2007,9 +2030,18 @@ func buildOutput(name string, tbl *ast.Table) (*models.OutputConfig, error) { } } + if node, ok := tbl.Fields["alias"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if str, ok := kv.Value.(*ast.String); ok { + oc.Alias = str.Value + } + } + } + delete(tbl.Fields, "flush_interval") delete(tbl.Fields, "metric_buffer_limit") delete(tbl.Fields, "metric_batch_size") + delete(tbl.Fields, "alias") return oc, nil } diff --git a/internal/models/log.go b/internal/models/log.go new file mode 100644 index 000000000..a99eb3212 --- /dev/null +++ b/internal/models/log.go @@ -0,0 +1,87 @@ +package models + +import ( + "log" + "reflect" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/selfstat" +) + +// Logger defines a logging structure for plugins. +type Logger struct { + Errs selfstat.Stat + Name string // Name is the plugin name, will be printed in the `[]`. +} + +// Errorf logs an error message, patterned after log.Printf. +func (l *Logger) Errorf(format string, args ...interface{}) { + l.Errs.Incr(1) + log.Printf("E! ["+l.Name+"] "+format, args...) +} + +// Error logs an error message, patterned after log.Print. +func (l *Logger) Error(args ...interface{}) { + l.Errs.Incr(1) + log.Print(append([]interface{}{"E! [" + l.Name + "] "}, args...)...) +} + +// Debugf logs a debug message, patterned after log.Printf. +func (l *Logger) Debugf(format string, args ...interface{}) { + log.Printf("D! ["+l.Name+"] "+format, args...) +} + +// Debug logs a debug message, patterned after log.Print. +func (l *Logger) Debug(args ...interface{}) { + log.Print(append([]interface{}{"D! [" + l.Name + "] "}, args...)...) +} + +// Warnf logs a warning message, patterned after log.Printf. +func (l *Logger) Warnf(format string, args ...interface{}) { + log.Printf("W! ["+l.Name+"] "+format, args...) +} + +// Warn logs a warning message, patterned after log.Print. +func (l *Logger) Warn(args ...interface{}) { + log.Print(append([]interface{}{"W! [" + l.Name + "] "}, args...)...) +} + +// Infof logs an information message, patterned after log.Printf. +func (l *Logger) Infof(format string, args ...interface{}) { + log.Printf("I! ["+l.Name+"] "+format, args...) +} + +// Info logs an information message, patterned after log.Print. +func (l *Logger) Info(args ...interface{}) { + log.Print(append([]interface{}{"I! [" + l.Name + "] "}, args...)...) +} + +// logName returns the log-friendly name/type. +func logName(pluginType, name, alias string) string { + if alias == "" { + return pluginType + "." + name + } + return pluginType + "." + name + "::" + alias +} + +func setLogIfExist(i interface{}, log telegraf.Logger) { + valI := reflect.ValueOf(i) + + if valI.Type().Kind() != reflect.Ptr { + valI = reflect.New(reflect.TypeOf(i)) + } + + field := valI.Elem().FieldByName("Log") + if !field.IsValid() { + return + } + + switch field.Type().String() { + case "telegraf.Logger": + if field.CanSet() { + field.Set(reflect.ValueOf(log)) + } + } + + return +} diff --git a/internal/models/log_test.go b/internal/models/log_test.go new file mode 100644 index 000000000..d4bb6ca09 --- /dev/null +++ b/internal/models/log_test.go @@ -0,0 +1,70 @@ +package models + +import ( + "testing" + + "github.com/influxdata/telegraf/selfstat" + "github.com/stretchr/testify/require" +) + +func TestErrorCounting(t *testing.T) { + iLog := Logger{Name: "inputs.test", Errs: selfstat.Register( + "gather", + "errors", + map[string]string{"input": "test"}, + )} + iLog.Error("something went wrong") + iLog.Errorf("something went wrong") + + aLog := Logger{Name: "aggregators.test", Errs: selfstat.Register( + "aggregate", + "errors", + map[string]string{"aggregator": "test"}, + )} + aLog.Name = "aggregators.test" + aLog.Error("another thing happened") + + oLog := Logger{Name: "outputs.test", Errs: selfstat.Register( + "write", + "errors", + map[string]string{"output": "test"}, + )} + oLog.Error("another thing happened") + + pLog := Logger{Name: "processors.test", Errs: selfstat.Register( + "process", + "errors", + map[string]string{"processor": "test"}, + )} + pLog.Error("another thing happened") + + require.Equal(t, int64(2), iLog.Errs.Get()) + require.Equal(t, int64(1), aLog.Errs.Get()) + require.Equal(t, int64(1), oLog.Errs.Get()) + require.Equal(t, int64(1), pLog.Errs.Get()) +} + +func TestLogging(t *testing.T) { + log := Logger{Name: "inputs.test", Errs: selfstat.Register( + "gather", + "errors", + map[string]string{"input": "test"}, + )} + + log.Errs.Set(0) + + log.Debugf("something happened") + log.Debug("something happened") + + log.Warnf("something happened") + log.Warn("something happened") + require.Equal(t, int64(0), log.Errs.Get()) + + log.Infof("something happened") + log.Info("something happened") + require.Equal(t, int64(0), log.Errs.Get()) + + log.Errorf("something happened") + log.Error("something happened") + require.Equal(t, int64(2), log.Errs.Get()) +} diff --git a/internal/models/running_aggregator.go b/internal/models/running_aggregator.go index e029aad56..ee46e5b13 100644 --- a/internal/models/running_aggregator.go +++ b/internal/models/running_aggregator.go @@ -1,7 +1,6 @@ package models import ( - "log" "sync" "time" @@ -16,6 +15,7 @@ type RunningAggregator struct { Config *AggregatorConfig periodStart time.Time periodEnd time.Time + log telegraf.Logger MetricsPushed selfstat.Stat MetricsFiltered selfstat.Stat @@ -23,39 +23,46 @@ type RunningAggregator struct { PushTime selfstat.Stat } -func NewRunningAggregator( - aggregator telegraf.Aggregator, - config *AggregatorConfig, -) *RunningAggregator { +func NewRunningAggregator(aggregator telegraf.Aggregator, config *AggregatorConfig) *RunningAggregator { + logger := &Logger{ + Name: logName("aggregators", config.Name, config.Alias), + Errs: selfstat.Register("aggregate", "errors", + map[string]string{"input": config.Name, "alias": config.Alias}), + } + + setLogIfExist(aggregator, logger) + return &RunningAggregator{ Aggregator: aggregator, Config: config, MetricsPushed: selfstat.Register( "aggregate", "metrics_pushed", - map[string]string{"aggregator": config.Name}, + map[string]string{"aggregator": config.Name, "alias": config.Alias}, ), MetricsFiltered: selfstat.Register( "aggregate", "metrics_filtered", - map[string]string{"aggregator": config.Name}, + map[string]string{"aggregator": config.Name, "alias": config.Alias}, ), MetricsDropped: selfstat.Register( "aggregate", "metrics_dropped", - map[string]string{"aggregator": config.Name}, + map[string]string{"aggregator": config.Name, "alias": config.Alias}, ), PushTime: selfstat.Register( "aggregate", "push_time_ns", - map[string]string{"aggregator": config.Name}, + map[string]string{"aggregator": config.Name, "alias": config.Alias}, ), + log: logger, } } // AggregatorConfig is the common config for all aggregators. type AggregatorConfig struct { Name string + Alias string DropOriginal bool Period time.Duration Delay time.Duration @@ -68,8 +75,8 @@ type AggregatorConfig struct { Filter Filter } -func (r *RunningAggregator) Name() string { - return "aggregators." + r.Config.Name +func (r *RunningAggregator) LogName() string { + return logName("aggregators", r.Config.Name, r.Config.Alias) } func (r *RunningAggregator) Init() error { @@ -93,7 +100,7 @@ func (r *RunningAggregator) EndPeriod() time.Time { func (r *RunningAggregator) UpdateWindow(start, until time.Time) { r.periodStart = start r.periodEnd = until - log.Printf("D! [%s] Updated aggregation range [%s, %s]", r.Name(), start, until) + r.log.Debugf("Updated aggregation range [%s, %s]", start, until) } func (r *RunningAggregator) MakeMetric(metric telegraf.Metric) telegraf.Metric { @@ -137,8 +144,8 @@ func (r *RunningAggregator) Add(m telegraf.Metric) bool { defer r.Unlock() if m.Time().Before(r.periodStart.Add(-r.Config.Grace)) || m.Time().After(r.periodEnd.Add(r.Config.Delay)) { - log.Printf("D! [%s] metric is outside aggregation window; discarding. %s: m: %s e: %s g: %s", - r.Name(), m.Time(), r.periodStart, r.periodEnd, r.Config.Grace) + r.log.Debugf("metric is outside aggregation window; discarding. %s: m: %s e: %s g: %s", + m.Time(), r.periodStart, r.periodEnd, r.Config.Grace) r.MetricsDropped.Incr(1) return r.Config.DropOriginal } diff --git a/internal/models/running_input.go b/internal/models/running_input.go index 73c14fc0f..85f0afb81 100644 --- a/internal/models/running_input.go +++ b/internal/models/running_input.go @@ -13,6 +13,7 @@ type RunningInput struct { Input telegraf.Input Config *InputConfig + log telegraf.Logger defaultTags map[string]string MetricsGathered selfstat.Stat @@ -20,25 +21,35 @@ type RunningInput struct { } func NewRunningInput(input telegraf.Input, config *InputConfig) *RunningInput { + logger := &Logger{ + Name: logName("inputs", config.Name, config.Alias), + Errs: selfstat.Register("gather", "errors", + map[string]string{"input": config.Name, "alias": config.Alias}), + } + + setLogIfExist(input, logger) + return &RunningInput{ Input: input, Config: config, MetricsGathered: selfstat.Register( "gather", "metrics_gathered", - map[string]string{"input": config.Name}, + map[string]string{"input": config.Name, "alias": config.Alias}, ), GatherTime: selfstat.RegisterTiming( "gather", "gather_time_ns", - map[string]string{"input": config.Name}, + map[string]string{"input": config.Name, "alias": config.Alias}, ), + log: logger, } } // InputConfig is the common config for all inputs. type InputConfig struct { Name string + Alias string Interval time.Duration NameOverride string @@ -48,14 +59,14 @@ type InputConfig struct { Filter Filter } -func (r *RunningInput) Name() string { - return "inputs." + r.Config.Name -} - func (r *RunningInput) metricFiltered(metric telegraf.Metric) { metric.Drop() } +func (r *RunningInput) LogName() string { + return logName("inputs", r.Config.Name, r.Config.Alias) +} + func (r *RunningInput) Init() error { if p, ok := r.Input.(telegraf.Initializer); ok { err := p.Init() diff --git a/internal/models/running_output.go b/internal/models/running_output.go index 438ecd480..86e68f057 100644 --- a/internal/models/running_output.go +++ b/internal/models/running_output.go @@ -1,7 +1,6 @@ package models import ( - "log" "sync" "sync/atomic" "time" @@ -21,6 +20,7 @@ const ( // OutputConfig containing name and filter type OutputConfig struct { Name string + Alias string Filter Filter FlushInterval time.Duration @@ -34,7 +34,6 @@ type RunningOutput struct { newMetricsCount int64 droppedMetrics int64 - Name string Output telegraf.Output Config *OutputConfig MetricBufferLimit int @@ -46,6 +45,7 @@ type RunningOutput struct { BatchReady chan time.Time buffer *Buffer + log telegraf.Logger aggMutex sync.Mutex } @@ -53,56 +53,77 @@ type RunningOutput struct { func NewRunningOutput( name string, output telegraf.Output, - conf *OutputConfig, + config *OutputConfig, batchSize int, bufferLimit int, ) *RunningOutput { - if conf.MetricBufferLimit > 0 { - bufferLimit = conf.MetricBufferLimit + logger := &Logger{ + Name: logName("outputs", config.Name, config.Alias), + Errs: selfstat.Register("gather", "errors", + map[string]string{"output": config.Name, "alias": config.Alias}), + } + + setLogIfExist(output, logger) + + if config.MetricBufferLimit > 0 { + bufferLimit = config.MetricBufferLimit } if bufferLimit == 0 { bufferLimit = DEFAULT_METRIC_BUFFER_LIMIT } - if conf.MetricBatchSize > 0 { - batchSize = conf.MetricBatchSize + if config.MetricBatchSize > 0 { + batchSize = config.MetricBatchSize } if batchSize == 0 { batchSize = DEFAULT_METRIC_BATCH_SIZE } + ro := &RunningOutput{ - Name: name, - buffer: NewBuffer(name, bufferLimit), + buffer: NewBuffer(config.LogName(), bufferLimit), BatchReady: make(chan time.Time, 1), Output: output, - Config: conf, + Config: config, MetricBufferLimit: bufferLimit, MetricBatchSize: batchSize, MetricsFiltered: selfstat.Register( "write", "metrics_filtered", - map[string]string{"output": name}, + map[string]string{"output": config.Name, "alias": config.Alias}, ), WriteTime: selfstat.RegisterTiming( "write", "write_time_ns", - map[string]string{"output": name}, + map[string]string{"output": config.Name, "alias": config.Alias}, ), + log: logger, } return ro } +func (c *OutputConfig) LogName() string { + if c.Alias == "" { + return c.Name + } + return c.Name + "::" + c.Alias +} + +func (r *RunningOutput) LogName() string { + return logName("outputs", r.Config.Name, r.Config.Alias) +} + func (ro *RunningOutput) metricFiltered(metric telegraf.Metric) { ro.MetricsFiltered.Incr(1) metric.Drop() } -func (ro *RunningOutput) Init() error { - if p, ok := ro.Output.(telegraf.Initializer); ok { +func (r *RunningOutput) Init() error { + if p, ok := r.Output.(telegraf.Initializer); ok { err := p.Init() if err != nil { return err } + } return nil } @@ -192,35 +213,32 @@ func (ro *RunningOutput) WriteBatch() error { return nil } -func (ro *RunningOutput) Close() { - err := ro.Output.Close() +func (r *RunningOutput) Close() { + err := r.Output.Close() if err != nil { - log.Printf("E! [outputs.%s] Error closing output: %v", ro.Name, err) + r.log.Errorf("Error closing output: %v", err) } } -func (ro *RunningOutput) write(metrics []telegraf.Metric) error { - dropped := atomic.LoadInt64(&ro.droppedMetrics) +func (r *RunningOutput) write(metrics []telegraf.Metric) error { + dropped := atomic.LoadInt64(&r.droppedMetrics) if dropped > 0 { - log.Printf("W! [outputs.%s] Metric buffer overflow; %d metrics have been dropped", - ro.Name, dropped) - atomic.StoreInt64(&ro.droppedMetrics, 0) + r.log.Warnf("Metric buffer overflow; %d metrics have been dropped", dropped) + atomic.StoreInt64(&r.droppedMetrics, 0) } start := time.Now() - err := ro.Output.Write(metrics) + err := r.Output.Write(metrics) elapsed := time.Since(start) - ro.WriteTime.Incr(elapsed.Nanoseconds()) + r.WriteTime.Incr(elapsed.Nanoseconds()) if err == nil { - log.Printf("D! [outputs.%s] wrote batch of %d metrics in %s\n", - ro.Name, len(metrics), elapsed) + r.log.Debugf("Wrote batch of %d metrics in %s", len(metrics), elapsed) } return err } -func (ro *RunningOutput) LogBufferStatus() { - nBuffer := ro.buffer.Len() - log.Printf("D! [outputs.%s] buffer fullness: %d / %d metrics. ", - ro.Name, nBuffer, ro.MetricBufferLimit) +func (r *RunningOutput) LogBufferStatus() { + nBuffer := r.buffer.Len() + r.log.Debugf("Buffer fullness: %d / %d metrics", nBuffer, r.MetricBufferLimit) } diff --git a/internal/models/running_processor.go b/internal/models/running_processor.go index 90d32fde5..5a12716e5 100644 --- a/internal/models/running_processor.go +++ b/internal/models/running_processor.go @@ -4,12 +4,12 @@ import ( "sync" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/selfstat" ) type RunningProcessor struct { - Name string - sync.Mutex + log telegraf.Logger Processor telegraf.Processor Config *ProcessorConfig } @@ -23,10 +23,27 @@ func (rp RunningProcessors) Less(i, j int) bool { return rp[i].Config.Order < rp // FilterConfig containing a name and filter type ProcessorConfig struct { Name string + Alias string Order int64 Filter Filter } +func NewRunningProcessor(processor telegraf.Processor, config *ProcessorConfig) *RunningProcessor { + logger := &Logger{ + Name: logName("processors", config.Name, config.Alias), + Errs: selfstat.Register("process", "errors", + map[string]string{"input": config.Name, "alias": config.Alias}), + } + + setLogIfExist(processor, logger) + + return &RunningProcessor{ + Processor: processor, + Config: config, + log: logger, + } +} + func (rp *RunningProcessor) metricFiltered(metric telegraf.Metric) { metric.Drop() } @@ -40,8 +57,8 @@ func containsMetric(item telegraf.Metric, metrics []telegraf.Metric) bool { return false } -func (rp *RunningProcessor) Init() error { - if p, ok := rp.Processor.(telegraf.Initializer); ok { +func (r *RunningProcessor) Init() error { + if p, ok := r.Processor.(telegraf.Initializer); ok { err := p.Init() if err != nil { return err diff --git a/plugin.go b/plugin.go new file mode 100644 index 000000000..f79721958 --- /dev/null +++ b/plugin.go @@ -0,0 +1,30 @@ +package telegraf + +// Initializer is an interface that all plugin types: Inputs, Outputs, +// Processors, and Aggregators can optionally implement to initialize the +// plugin. +type Initializer interface { + // Init performs one time setup of the plugin and returns an error if the + // configuration is invalid. + Init() error +} + +// Logger defines an interface for logging. +type Logger interface { + // Errorf logs an error message, patterned after log.Printf. + Errorf(format string, args ...interface{}) + // Error logs an error message, patterned after log.Print. + Error(args ...interface{}) + // Debugf logs a debug message, patterned after log.Printf. + Debugf(format string, args ...interface{}) + // Debug logs a debug message, patterned after log.Print. + Debug(args ...interface{}) + // Warnf logs a warning message, patterned after log.Printf. + Warnf(format string, args ...interface{}) + // Warn logs a warning message, patterned after log.Print. + Warn(args ...interface{}) + // Infof logs an information message, patterned after log.Printf. + Infof(format string, args ...interface{}) + // Info logs an information message, patterned after log.Print. + Info(args ...interface{}) +} diff --git a/plugins/inputs/cloud_pubsub_push/pubsub_push_test.go b/plugins/inputs/cloud_pubsub_push/pubsub_push_test.go index 57734c705..45a304e60 100644 --- a/plugins/inputs/cloud_pubsub_push/pubsub_push_test.go +++ b/plugins/inputs/cloud_pubsub_push/pubsub_push_test.go @@ -183,6 +183,10 @@ func (tm *testMetricMaker) Name() string { return "TestPlugin" } +func (tm *testMetricMaker) LogName() string { + return tm.Name() +} + func (tm *testMetricMaker) MakeMetric(metric telegraf.Metric) telegraf.Metric { return metric } diff --git a/plugins/inputs/exec/exec.go b/plugins/inputs/exec/exec.go index 615736b3c..2d3643ad0 100644 --- a/plugins/inputs/exec/exec.go +++ b/plugins/inputs/exec/exec.go @@ -3,7 +3,6 @@ package exec import ( "bytes" "fmt" - "log" "os/exec" "path/filepath" "runtime" @@ -51,6 +50,7 @@ type Exec struct { parser parsers.Parser runner Runner + log telegraf.Logger } func NewExec() *Exec { @@ -161,7 +161,7 @@ func (e *Exec) ProcessCommand(command string, acc telegraf.Accumulator, wg *sync if isNagios { metrics, err = nagios.TryAddState(runErr, metrics) if err != nil { - log.Printf("E! [inputs.exec] failed to add nagios state: %s", err) + e.log.Errorf("failed to add nagios state: %s", err) } } @@ -229,6 +229,10 @@ func (e *Exec) Gather(acc telegraf.Accumulator) error { return nil } +func (e *Exec) Init() error { + return nil +} + func init() { inputs.Add("exec", func() telegraf.Input { return NewExec() diff --git a/plugins/inputs/exec/exec_test.go b/plugins/inputs/exec/exec_test.go index 5aaef8961..0523a181d 100644 --- a/plugins/inputs/exec/exec_test.go +++ b/plugins/inputs/exec/exec_test.go @@ -8,7 +8,6 @@ import ( "time" "github.com/influxdata/telegraf/plugins/parsers" - "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -97,6 +96,7 @@ func TestExec(t *testing.T) { MetricName: "exec", }) e := &Exec{ + log: testutil.Logger{}, runner: newRunnerMock([]byte(validJson), nil, nil), Commands: []string{"testcommand arg1"}, parser: parser, @@ -126,6 +126,7 @@ func TestExecMalformed(t *testing.T) { MetricName: "exec", }) e := &Exec{ + log: testutil.Logger{}, runner: newRunnerMock([]byte(malformedJson), nil, nil), Commands: []string{"badcommand arg1"}, parser: parser, @@ -142,6 +143,7 @@ func TestCommandError(t *testing.T) { MetricName: "exec", }) e := &Exec{ + log: testutil.Logger{}, runner: newRunnerMock(nil, nil, fmt.Errorf("exit status code 1")), Commands: []string{"badcommand"}, parser: parser, diff --git a/plugins/outputs/influxdb/http.go b/plugins/outputs/influxdb/http.go index 56576082f..c7c29a638 100644 --- a/plugins/outputs/influxdb/http.go +++ b/plugins/outputs/influxdb/http.go @@ -6,7 +6,6 @@ import ( "encoding/json" "fmt" "io" - "log" "net" "net/http" "net/url" @@ -101,12 +100,15 @@ type HTTPConfig struct { InfluxUintSupport bool `toml:"influx_uint_support"` Serializer *influx.Serializer + log telegraf.Logger } type httpClient struct { client *http.Client config HTTPConfig createdDatabases map[string]bool + + log telegraf.Logger } func NewHTTPClient(config HTTPConfig) (*httpClient, error) { @@ -174,6 +176,7 @@ func NewHTTPClient(config HTTPConfig) (*httpClient, error) { }, createdDatabases: make(map[string]bool), config: config, + log: config.log, } return client, nil } @@ -183,6 +186,10 @@ func (c *httpClient) URL() string { return c.config.URL.String() } +func (c *httpClient) SetLogger(log telegraf.Logger) { + c.log = log +} + // Database returns the default database that this client connects too. func (c *httpClient) Database() string { return c.config.Database @@ -262,7 +269,7 @@ func (c *httpClient) Write(ctx context.Context, metrics []telegraf.Metric) error if !c.config.SkipDatabaseCreation && !c.createdDatabases[db] { err := c.CreateDatabase(ctx, db) if err != nil { - log.Printf("W! [outputs.influxdb] when writing to [%s]: database %q creation failed: %v", + c.log.Warnf("when writing to [%s]: database %q creation failed: %v", c.config.URL, db, err) } } @@ -328,7 +335,7 @@ func (c *httpClient) writeBatch(ctx context.Context, db string, metrics []telegr // discarded for being older than the retention policy. Usually this not // a cause for concern and we don't want to retry. if strings.Contains(desc, errStringPointsBeyondRP) { - log.Printf("W! [outputs.influxdb]: when writing to [%s]: received error %v", + c.log.Warnf("when writing to [%s]: received error %v", c.URL(), desc) return nil } @@ -337,7 +344,7 @@ func (c *httpClient) writeBatch(ctx context.Context, db string, metrics []telegr // correctable at this point and so the point is dropped instead of // retrying. if strings.Contains(desc, errStringPartialWrite) { - log.Printf("E! [outputs.influxdb]: when writing to [%s]: received error %v; discarding points", + c.log.Errorf("when writing to [%s]: received error %v; discarding points", c.URL(), desc) return nil } @@ -345,7 +352,7 @@ func (c *httpClient) writeBatch(ctx context.Context, db string, metrics []telegr // This error indicates a bug in either Telegraf line protocol // serialization, retries would not be successful. if strings.Contains(desc, errStringUnableToParse) { - log.Printf("E! [outputs.influxdb]: when writing to [%s]: received error %v; discarding points", + c.log.Errorf("when writing to [%s]: received error %v; discarding points", c.URL(), desc) return nil } diff --git a/plugins/outputs/influxdb/http_test.go b/plugins/outputs/influxdb/http_test.go index 2b6b45eef..98ec4ef5b 100644 --- a/plugins/outputs/influxdb/http_test.go +++ b/plugins/outputs/influxdb/http_test.go @@ -21,6 +21,7 @@ import ( "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/plugins/outputs/influxdb" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) @@ -471,6 +472,7 @@ func TestHTTP_Write(t *testing.T) { client, err := influxdb.NewHTTPClient(tt.config) require.NoError(t, err) + client.SetLogger(testutil.Logger{}) err = client.Write(ctx, metrics) if tt.errFunc != nil { tt.errFunc(t, err) diff --git a/plugins/outputs/influxdb/influxdb.go b/plugins/outputs/influxdb/influxdb.go index b07d58fc2..0a6f66696 100644 --- a/plugins/outputs/influxdb/influxdb.go +++ b/plugins/outputs/influxdb/influxdb.go @@ -4,7 +4,6 @@ import ( "context" "errors" "fmt" - "log" "math/rand" "net/url" "time" @@ -28,6 +27,7 @@ type Client interface { Database() string URL() string Close() + SetLogger(telegraf.Logger) } // InfluxDB struct is the primary data structure for the plugin @@ -59,6 +59,7 @@ type InfluxDB struct { CreateUDPClientF func(config *UDPConfig) (Client, error) serializer *influx.Serializer + Log telegraf.Logger } var sampleConfig = ` @@ -171,6 +172,8 @@ func (i *InfluxDB) Connect() error { return err } + c.SetLogger(i.Log) + i.clients = append(i.clients, c) case "http", "https", "unix": c, err := i.httpClient(ctx, parts, proxy) @@ -178,6 +181,8 @@ func (i *InfluxDB) Connect() error { return err } + c.SetLogger(i.Log) + i.clients = append(i.clients, c) default: return fmt.Errorf("unsupported scheme [%q]: %q", u, parts.Scheme) @@ -221,13 +226,13 @@ func (i *InfluxDB) Write(metrics []telegraf.Metric) error { if !i.SkipDatabaseCreation { err := client.CreateDatabase(ctx, apiError.Database) if err != nil { - log.Printf("E! [outputs.influxdb] when writing to [%s]: database %q not found and failed to recreate", + i.Log.Errorf("when writing to [%s]: database %q not found and failed to recreate", client.URL(), apiError.Database) } } } - log.Printf("E! [outputs.influxdb] when writing to [%s]: %v", client.URL(), err) + i.Log.Errorf("when writing to [%s]: %v", client.URL(), err) } return errors.New("could not write any address") @@ -281,7 +286,7 @@ func (i *InfluxDB) httpClient(ctx context.Context, url *url.URL, proxy *url.URL) if !i.SkipDatabaseCreation { err = c.CreateDatabase(ctx, c.Database()) if err != nil { - log.Printf("W! [outputs.influxdb] when writing to [%s]: database %q creation failed: %v", + i.Log.Warnf("when writing to [%s]: database %q creation failed: %v", c.URL(), i.Database, err) } } diff --git a/plugins/outputs/influxdb/influxdb_test.go b/plugins/outputs/influxdb/influxdb_test.go index 73f481e9a..4b86de4de 100644 --- a/plugins/outputs/influxdb/influxdb_test.go +++ b/plugins/outputs/influxdb/influxdb_test.go @@ -11,6 +11,7 @@ import ( "github.com/influxdata/telegraf/internal/tls" "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/plugins/outputs/influxdb" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) @@ -20,6 +21,8 @@ type MockClient struct { CreateDatabaseF func(ctx context.Context, database string) error DatabaseF func() string CloseF func() + + log telegraf.Logger } func (c *MockClient) URL() string { @@ -42,6 +45,10 @@ func (c *MockClient) Close() { c.CloseF() } +func (c *MockClient) SetLogger(log telegraf.Logger) { + c.log = log +} + func TestDeprecatedURLSupport(t *testing.T) { var actual *influxdb.UDPConfig output := influxdb.InfluxDB{ @@ -52,6 +59,9 @@ func TestDeprecatedURLSupport(t *testing.T) { return &MockClient{}, nil }, } + + output.Log = testutil.Logger{} + err := output.Connect() require.NoError(t, err) require.Equal(t, "udp://localhost:8089", actual.URL.String()) @@ -72,6 +82,9 @@ func TestDefaultURL(t *testing.T) { }, nil }, } + + output.Log = testutil.Logger{} + err := output.Connect() require.NoError(t, err) require.Equal(t, "http://localhost:8086", actual.URL.String()) @@ -89,6 +102,8 @@ func TestConnectUDPConfig(t *testing.T) { return &MockClient{}, nil }, } + output.Log = testutil.Logger{} + err := output.Connect() require.NoError(t, err) @@ -130,6 +145,9 @@ func TestConnectHTTPConfig(t *testing.T) { }, nil }, } + + output.Log = testutil.Logger{} + err := output.Connect() require.NoError(t, err) @@ -153,7 +171,6 @@ func TestConnectHTTPConfig(t *testing.T) { func TestWriteRecreateDatabaseIfDatabaseNotFound(t *testing.T) { output := influxdb.InfluxDB{ URLs: []string{"http://localhost:8086"}, - CreateHTTPClientF: func(config *influxdb.HTTPConfig) (influxdb.Client, error) { return &MockClient{ DatabaseF: func() string { @@ -173,12 +190,13 @@ func TestWriteRecreateDatabaseIfDatabaseNotFound(t *testing.T) { }, URLF: func() string { return "http://localhost:8086" - }, }, nil }, } + output.Log = testutil.Logger{} + err := output.Connect() require.NoError(t, err) diff --git a/plugins/outputs/influxdb/udp.go b/plugins/outputs/influxdb/udp.go index a33b98563..76fdb7862 100644 --- a/plugins/outputs/influxdb/udp.go +++ b/plugins/outputs/influxdb/udp.go @@ -5,7 +5,6 @@ import ( "bytes" "context" "fmt" - "log" "net" "net/url" @@ -32,6 +31,7 @@ type UDPConfig struct { URL *url.URL Serializer *influx.Serializer Dialer Dialer + Log telegraf.Logger } func NewUDPClient(config UDPConfig) (*udpClient, error) { @@ -69,12 +69,18 @@ type udpClient struct { dialer Dialer serializer *influx.Serializer url *url.URL + + log telegraf.Logger } func (c *udpClient) URL() string { return c.url.String() } +func (c *udpClient) SetLogger(log telegraf.Logger) { + c.log = log +} + func (c *udpClient) Database() string { return "" } @@ -93,7 +99,7 @@ func (c *udpClient) Write(ctx context.Context, metrics []telegraf.Metric) error if err != nil { // Since we are serializing multiple metrics, don't fail the // entire batch just because of one unserializable metric. - log.Printf("E! [outputs.influxdb] when writing to [%s] could not serialize metric: %v", + c.log.Errorf("when writing to [%s] could not serialize metric: %v", c.URL(), err) continue } diff --git a/plugins/outputs/influxdb/udp_test.go b/plugins/outputs/influxdb/udp_test.go index 136ebb787..61c64ff77 100644 --- a/plugins/outputs/influxdb/udp_test.go +++ b/plugins/outputs/influxdb/udp_test.go @@ -14,6 +14,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/plugins/outputs/influxdb" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) @@ -78,6 +79,7 @@ func TestUDP_URL(t *testing.T) { } client, err := influxdb.NewUDPClient(config) + client.SetLogger(testutil.Logger{}) require.NoError(t, err) require.Equal(t, u.String(), client.URL()) @@ -101,6 +103,7 @@ func TestUDP_Simple(t *testing.T) { }, } client, err := influxdb.NewUDPClient(config) + client.SetLogger(testutil.Logger{}) require.NoError(t, err) ctx := context.Background() @@ -127,6 +130,7 @@ func TestUDP_DialError(t *testing.T) { }, } client, err := influxdb.NewUDPClient(config) + client.SetLogger(testutil.Logger{}) require.NoError(t, err) ctx := context.Background() @@ -156,6 +160,7 @@ func TestUDP_WriteError(t *testing.T) { }, } client, err := influxdb.NewUDPClient(config) + client.SetLogger(testutil.Logger{}) require.NoError(t, err) ctx := context.Background() @@ -219,6 +224,7 @@ func TestUDP_ErrorLogging(t *testing.T) { log.SetOutput(&b) client, err := influxdb.NewUDPClient(tt.config) + client.SetLogger(testutil.Logger{}) require.NoError(t, err) ctx := context.Background() @@ -262,6 +268,7 @@ func TestUDP_WriteWithRealConn(t *testing.T) { URL: u, } client, err := influxdb.NewUDPClient(config) + client.SetLogger(testutil.Logger{}) require.NoError(t, err) ctx := context.Background() diff --git a/testutil/log.go b/testutil/log.go new file mode 100644 index 000000000..5e0458dc7 --- /dev/null +++ b/testutil/log.go @@ -0,0 +1,50 @@ +package testutil + +import ( + "log" +) + +// Logger defines a logging structure for plugins. +type Logger struct { + Name string // Name is the plugin name, will be printed in the `[]`. +} + +// Errorf logs an error message, patterned after log.Printf. +func (l Logger) Errorf(format string, args ...interface{}) { + log.Printf("E! ["+l.Name+"] "+format, args...) +} + +// Error logs an error message, patterned after log.Print. +func (l Logger) Error(args ...interface{}) { + log.Print(append([]interface{}{"E! [" + l.Name + "] "}, args...)...) +} + +// Debugf logs a debug message, patterned after log.Printf. +func (l Logger) Debugf(format string, args ...interface{}) { + log.Printf("D! ["+l.Name+"] "+format, args...) +} + +// Debug logs a debug message, patterned after log.Print. +func (l Logger) Debug(args ...interface{}) { + log.Print(append([]interface{}{"D! [" + l.Name + "] "}, args...)...) +} + +// Warnf logs a warning message, patterned after log.Printf. +func (l Logger) Warnf(format string, args ...interface{}) { + log.Printf("W! ["+l.Name+"] "+format, args...) +} + +// Warn logs a warning message, patterned after log.Print. +func (l Logger) Warn(args ...interface{}) { + log.Print(append([]interface{}{"W! [" + l.Name + "] "}, args...)...) +} + +// Infof logs an information message, patterned after log.Printf. +func (l Logger) Infof(format string, args ...interface{}) { + log.Printf("I! ["+l.Name+"] "+format, args...) +} + +// Info logs an information message, patterned after log.Print. +func (l Logger) Info(args ...interface{}) { + log.Print(append([]interface{}{"I! [" + l.Name + "] "}, args...)...) +} From 8b938f3bd43c83747db9cd3ab33ffde117c19c3c Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 21 Aug 2019 18:04:30 -0700 Subject: [PATCH 1128/1815] Make review changes to logstash input (#6299) --- .gitignore | 5 - internal/choice/choice.go | 36 ++++ plugins/inputs/logstash/README.md | 167 +++++++--------- plugins/inputs/logstash/logstash.go | 233 ++++++++++------------- plugins/inputs/logstash/logstash_test.go | 81 +++----- 5 files changed, 226 insertions(+), 296 deletions(-) create mode 100644 internal/choice/choice.go diff --git a/.gitignore b/.gitignore index 9e012aabd..4176a0413 100644 --- a/.gitignore +++ b/.gitignore @@ -1,10 +1,5 @@ -# Build and binaries /build /telegraf /telegraf.exe /telegraf.gz /vendor - -# Editor files -*~ -.idea diff --git a/internal/choice/choice.go b/internal/choice/choice.go new file mode 100644 index 000000000..33c26096d --- /dev/null +++ b/internal/choice/choice.go @@ -0,0 +1,36 @@ +// Package choice provides basic functions for working with +// plugin options that must be one of several values. +package choice + +import "fmt" + +// Contains return true if the choice in the list of choices. +func Contains(choice string, choices []string) bool { + for _, item := range choices { + if item == choice { + return true + } + } + return false +} + +// CheckSContains returns an error if a choice is not one of +// the available choices. +func Check(choice string, available []string) error { + if !Contains(choice, available) { + return fmt.Errorf("unknown choice %s", choice) + } + return nil +} + +// CheckSliceContains returns an error if the choices is not a subset of +// available. +func CheckSlice(choices, available []string) error { + for _, choice := range choices { + err := Check(choice, available) + if err != nil { + return err + } + } + return nil +} diff --git a/plugins/inputs/logstash/README.md b/plugins/inputs/logstash/README.md index f54697c39..9571de5fd 100644 --- a/plugins/inputs/logstash/README.md +++ b/plugins/inputs/logstash/README.md @@ -3,62 +3,52 @@ This plugin reads metrics exposed by [Logstash Monitoring API](https://www.elastic.co/guide/en/logstash/current/monitoring-logstash.html). -### Configuration: +Logstash 5 and later is supported. + +### Configuration ```toml - ## This plugin reads metrics exposed by Logstash Monitoring API. - ## https://www.elastic.co/guide/en/logstash/current/monitoring.html - - ## The URL of the exposed Logstash API endpoint +[[inputs.logstash]] + ## The URL of the exposed Logstash API endpoint. url = "http://127.0.0.1:9600" - ## Enable Logstash 6+ multi-pipeline statistics support - multi_pipeline = true + ## Use Logstash 5 single pipeline API, set to true when monitoring + ## Logstash 5. + # single_pipeline = false - ## Should the general process statistics be gathered - collect_process_stats = true + ## Enable optional collection components. Can contain + ## "pipelines", "process", and "jvm". + # collect = ["pipelines", "process", "jvm"] - ## Should the JVM specific statistics be gathered - collect_jvm_stats = true + ## Timeout for HTTP requests. + # timeout = "5s" - ## Should the event pipelines statistics be gathered - collect_pipelines_stats = true - - ## Should the plugin statistics be gathered - collect_plugins_stats = true - - ## Should the queue statistics be gathered - collect_queue_stats = true - - ## HTTP method - # method = "GET" - - ## Optional HTTP headers - # headers = {"X-Special-Header" = "Special-Value"} - - ## Override HTTP "Host" header - # host_header = "logstash.example.com" - - ## Timeout for HTTP requests - timeout = "5s" - - ## Optional HTTP Basic Auth credentials + ## Optional HTTP Basic Auth credentials. # username = "username" # password = "pa$$word" - ## Optional TLS Config + ## Optional TLS Config. # tls_ca = "/etc/telegraf/ca.pem" # tls_cert = "/etc/telegraf/cert.pem" # tls_key = "/etc/telegraf/key.pem" - ## Use TLS but skip chain & host verification + ## Use TLS but skip chain & host verification. # insecure_skip_verify = false + + ## Optional HTTP headers. + # [inputs.logstash.headers] + # "X-Special-Header" = "Special-Value" ``` -### Measurements & Fields: +### Metrics -- **logstash_jvm** - * Fields: +- logstash_jvm + - tags: + - node_id + - node_name + - node_host + - node_version + - fields: - threads_peak_count - mem_pools_survivor_peak_max_in_bytes - mem_pools_survivor_max_in_bytes @@ -87,14 +77,14 @@ This plugin reads metrics exposed by - mem_heap_used_in_bytes - gc_collectors_young_collection_count - uptime_in_millis - * Tags: + ++ logstash_process + - tags: - node_id - node_name - - node_host - - node_version - -- **logstash_process** - * Fields: + - source + - node_version + - fields: - open_file_descriptors - cpu_load_average_1m - cpu_load_average_5m @@ -105,85 +95,60 @@ This plugin reads metrics exposed by - max_file_descriptors - mem_total_virtual_in_bytes - mem_total_virtual_in_bytes - * Tags: + +- logstash_events + - tags: - node_id - node_name - - node_host - - node_version - -- **logstash_events** - * Fields: + - source + - node_version + - pipeline (for Logstash 6+) + - fields: - queue_push_duration_in_millis - duration_in_millis - in - filtered - out - * Tags: + ++ logstash_plugins + - tags: - node_id - node_name - - node_host - - node_version - - pipeline (for Logstash 6 only) - -- **logstash_plugins** - * Fields: + - source + - node_version + - pipeline (for Logstash 6+) + - plugin_id + - plugin_name + - plugin_type + - fields: - queue_push_duration_in_millis (for input plugins only) - duration_in_millis - in - out - * Tags: + +- logstash_queue + - tags: - node_id - node_name - - node_host - - node_version - - pipeline (for Logstash 6 only) - - plugin_id - - plugin_name - - plugin_type - -- **logstash_queue** - * Fields: + - source + - node_version + - pipeline (for Logstash 6+) + - queue_type + - fields: - events - free_space_in_bytes - max_queue_size_in_bytes - max_unread_events - page_capacity_in_bytes - queue_size_in_bytes - * Tags: - - node_id - - node_name - - node_host - - node_version - - pipeline (for Logstash 6 only) - - queue_type -### Tags description - -- node_id - The uuid of the logstash node. Randomly generated. -- node_name - The name of the logstash node. Can be defined in the *logstash.yml* or defaults to the hostname. - Can be used to break apart metrics from different logstash instances of the same host. -- node_host - The hostname of the logstash node. - Can be different from the telegraf's host if a remote connection to logstash instance is used. -- node_version - The version of logstash service running on this node. -- pipeline (for Logstash 6 only) - The name of a pipeline if multi-pipeline is configured. - Will defaults to "main" if there is only one pipeline and will be missing for logstash 5. -- plugin_id - The unique id of this plugin. - It will be a randomly generated string unless it's defined in the logstash pipeline config file. -- plugin_name - The name of this plugin. i.e. file, elasticsearch, date, mangle. -- plugin_type - The type of this plugin i.e. input/filter/output. -- queue_type - The type of the event queue (memory/persisted). - -### Example Output: +### Example Output ``` -$ ./telegraf -config telegraf.conf -input-filter logstash -test - -> logstash_jvm,host=node-6,node_host=node-6,node_id=3044f675-21ce-4335-898a-8408aa678245,node_name=node-6-test,node_version=6.4.2 - gc_collectors_old_collection_count=5,gc_collectors_old_collection_time_in_millis=702,gc_collectors_young_collection_count=95,gc_collectors_young_collection_time_in_millis=4772,mem_heap_committed_in_bytes=360804352,mem_heap_max_in_bytes=8389328896,mem_heap_used_in_bytes=252629768,mem_heap_used_percent=3,mem_non_heap_committed_in_bytes=212144128,mem_non_heap_used_in_bytes=188726024,mem_pools_old_committed_in_bytes=280260608,mem_pools_old_max_in_bytes=6583418880,mem_pools_old_peak_max_in_bytes=6583418880,mem_pools_old_peak_used_in_bytes=235352976,mem_pools_old_used_in_bytes=194754608,mem_pools_survivor_committed_in_bytes=8912896,mem_pools_survivor_max_in_bytes=200605696,mem_pools_survivor_peak_max_in_bytes=200605696,mem_pools_survivor_peak_used_in_bytes=8912896,mem_pools_survivor_used_in_bytes=4476680,mem_pools_young_committed_in_bytes=71630848,mem_pools_young_max_in_bytes=1605304320,mem_pools_young_peak_max_in_bytes=1605304320,mem_pools_young_peak_used_in_bytes=71630848,mem_pools_young_used_in_bytes=53398480,threads_count=60,threads_peak_count=62,uptime_in_millis=10469014 1540289864000000000 -> logstash_process,host=node-6,node_host=node-6,node_id=3044f675-21ce-4335-898a-8408aa678245,node_name=node-6-test,node_version=6.4.2 - cpu_load_average_15m=39.84,cpu_load_average_1m=32.87,cpu_load_average_5m=39.23,cpu_percent=0,cpu_total_in_millis=389920,max_file_descriptors=262144,mem_total_virtual_in_bytes=17921355776,open_file_descriptors=132,peak_open_file_descriptors=140 1540289864000000000 -> logstash_events,host=node-6,node_host=node-6,node_id=3044f675-21ce-4335-898a-8408aa678245,node_name=node-6-test,node_version=6.4.2,pipeline=main - duration_in_millis=175144,filtered=4543,in=4543,out=4543,queue_push_duration_in_millis=19 1540289864000000000 -> logstash_plugins,host=node-6,node_host=node-6,node_id=3044f675-21ce-4335-898a-8408aa678245,node_name=node-6-test,node_version=6.4.2,pipeline=main,plugin_id=input-kafka,plugin_name=kafka,plugin_type=input - duration_in_millis=0,in=0,out=4543,queue_push_duration_in_millis=19 1540289864000000000 +logstash_jvm,node_id=3da53ed0-a946-4a33-9cdb-33013f2273f6,node_name=debian-stretch-logstash6.virt,node_version=6.8.1,source=debian-stretch-logstash6.virt gc_collectors_old_collection_count=2,gc_collectors_old_collection_time_in_millis=100,gc_collectors_young_collection_count=26,gc_collectors_young_collection_time_in_millis=1028,mem_heap_committed_in_bytes=1056309248,mem_heap_max_in_bytes=1056309248,mem_heap_used_in_bytes=207216328,mem_heap_used_percent=19,mem_non_heap_committed_in_bytes=160878592,mem_non_heap_used_in_bytes=140838184,mem_pools_old_committed_in_bytes=899284992,mem_pools_old_max_in_bytes=899284992,mem_pools_old_peak_max_in_bytes=899284992,mem_pools_old_peak_used_in_bytes=189468088,mem_pools_old_used_in_bytes=189468088,mem_pools_survivor_committed_in_bytes=17432576,mem_pools_survivor_max_in_bytes=17432576,mem_pools_survivor_peak_max_in_bytes=17432576,mem_pools_survivor_peak_used_in_bytes=17432576,mem_pools_survivor_used_in_bytes=12572640,mem_pools_young_committed_in_bytes=139591680,mem_pools_young_max_in_bytes=139591680,mem_pools_young_peak_max_in_bytes=139591680,mem_pools_young_peak_used_in_bytes=139591680,mem_pools_young_used_in_bytes=5175600,threads_count=20,threads_peak_count=24,uptime_in_millis=739089 1566425244000000000 +logstash_process,node_id=3da53ed0-a946-4a33-9cdb-33013f2273f6,node_name=debian-stretch-logstash6.virt,node_version=6.8.1,source=debian-stretch-logstash6.virt cpu_load_average_15m=0.03,cpu_load_average_1m=0.01,cpu_load_average_5m=0.04,cpu_percent=0,cpu_total_in_millis=83230,max_file_descriptors=16384,mem_total_virtual_in_bytes=3689132032,open_file_descriptors=118,peak_open_file_descriptors=118 1566425244000000000 +logstash_events,node_id=3da53ed0-a946-4a33-9cdb-33013f2273f6,node_name=debian-stretch-logstash6.virt,node_version=6.8.1,pipeline=main,source=debian-stretch-logstash6.virt duration_in_millis=0,filtered=0,in=0,out=0,queue_push_duration_in_millis=0 1566425244000000000 +logstash_plugins,node_id=3da53ed0-a946-4a33-9cdb-33013f2273f6,node_name=debian-stretch-logstash6.virt,node_version=6.8.1,pipeline=main,plugin_id=2807cb8610ba7854efa9159814fcf44c3dda762b43bd088403b30d42c88e69ab,plugin_name=beats,plugin_type=input,source=debian-stretch-logstash6.virt out=0,queue_push_duration_in_millis=0 1566425244000000000 +logstash_plugins,node_id=3da53ed0-a946-4a33-9cdb-33013f2273f6,node_name=debian-stretch-logstash6.virt,node_version=6.8.1,pipeline=main,plugin_id=7a6c973366186a695727c73935634a00bccd52fceedf30d0746983fce572d50c,plugin_name=file,plugin_type=output,source=debian-stretch-logstash6.virt duration_in_millis=0,in=0,out=0 1566425244000000000 +logstash_queue,node_id=3da53ed0-a946-4a33-9cdb-33013f2273f6,node_name=debian-stretch-logstash6.virt,node_version=6.8.1,pipeline=main,queue_type=memory,source=debian-stretch-logstash6.virt events=0 1566425244000000000 ``` diff --git a/plugins/inputs/logstash/logstash.go b/plugins/inputs/logstash/logstash.go index ba25fafd5..b97600700 100644 --- a/plugins/inputs/logstash/logstash.go +++ b/plugins/inputs/logstash/logstash.go @@ -2,114 +2,78 @@ package logstash import ( "encoding/json" - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" - "github.com/influxdata/telegraf/internal/tls" - "github.com/influxdata/telegraf/plugins/inputs" + "fmt" "net/http" "net/url" + "strings" "time" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/internal/choice" + "github.com/influxdata/telegraf/internal/tls" + "github.com/influxdata/telegraf/plugins/inputs" jsonParser "github.com/influxdata/telegraf/plugins/parsers/json" ) -// ##### Interface ##### - const sampleConfig = ` - ## This plugin reads metrics exposed by Logstash Monitoring API. - ## https://www.elastic.co/guide/en/logstash/current/monitoring.html - - ## The URL of the exposed Logstash API endpoint + ## The URL of the exposed Logstash API endpoint. url = "http://127.0.0.1:9600" - ## Enable Logstash 6+ multi-pipeline statistics support - multi_pipeline = true + ## Use Logstash 5 single pipeline API, set to true when monitoring + ## Logstash 5. + # single_pipeline = false - ## Should the general process statistics be gathered - collect_process_stats = true + ## Enable optional collection components. Can contain + ## "pipelines", "process", and "jvm". + # collect = ["pipelines", "process", "jvm"] - ## Should the JVM specific statistics be gathered - collect_jvm_stats = true + ## Timeout for HTTP requests. + # timeout = "5s" - ## Should the event pipelines statistics be gathered - collect_pipelines_stats = true - - ## Should the plugin statistics be gathered - collect_plugins_stats = true - - ## Should the queue statistics be gathered - collect_queue_stats = true - - ## HTTP method - # method = "GET" - - ## Optional HTTP headers - # headers = {"X-Special-Header" = "Special-Value"} - - ## Override HTTP "Host" header - # host_header = "logstash.example.com" - - ## Timeout for HTTP requests - timeout = "5s" - - ## Optional HTTP Basic Auth credentials + ## Optional HTTP Basic Auth credentials. # username = "username" # password = "pa$$word" - ## Optional TLS Config + ## Optional TLS Config. # tls_ca = "/etc/telegraf/ca.pem" # tls_cert = "/etc/telegraf/cert.pem" # tls_key = "/etc/telegraf/key.pem" - ## Use TLS but skip chain & host verification + ## Use TLS but skip chain & host verification. # insecure_skip_verify = false + + ## Optional HTTP headers. + # [inputs.logstash.headers] + # "X-Special-Header" = "Special-Value" ` type Logstash struct { URL string `toml:"url"` - MultiPipeline bool `toml:"multi_pipeline"` - CollectProcessStats bool `toml:"collect_process_stats"` - CollectJVMStats bool `toml:"collect_jvm_stats"` - CollectPipelinesStats bool `toml:"collect_pipelines_stats"` - CollectPluginsStats bool `toml:"collect_plugins_stats"` - CollectQueueStats bool `toml:"collect_queue_stats"` - - Username string `toml:"username"` - Password string `toml:"password"` - Method string `toml:"method"` - Headers map[string]string `toml:"headers"` - HostHeader string `toml:"host_header"` - Timeout internal.Duration `toml:"timeout"` + SinglePipeline bool `toml:"single_pipeline"` + Collect []string `toml:"collect"` + Username string `toml:"username"` + Password string `toml:"password"` + Headers map[string]string `toml:"headers"` + Timeout internal.Duration `toml:"timeout"` tls.ClientConfig + client *http.Client } // NewLogstash create an instance of the plugin with default settings func NewLogstash() *Logstash { return &Logstash{ - URL: "http://127.0.0.1:9600", - MultiPipeline: true, - CollectProcessStats: true, - CollectJVMStats: true, - CollectPipelinesStats: true, - CollectPluginsStats: true, - CollectQueueStats: true, - Method: "GET", - Headers: make(map[string]string), - HostHeader: "", - Timeout: internal.Duration{Duration: time.Second * 5}, + URL: "http://127.0.0.1:9600", + SinglePipeline: false, + Collect: []string{"pipelines", "process", "jvm"}, + Headers: make(map[string]string), + Timeout: internal.Duration{Duration: time.Second * 5}, } } -// init initialise this plugin instance -func init() { - inputs.Add("logstash", func() telegraf.Input { - return NewLogstash() - }) -} - // Description returns short info about plugin func (logstash *Logstash) Description() string { return "Read metrics exposed by Logstash" @@ -183,6 +147,14 @@ const processStats = "/_node/stats/process" const pipelinesStats = "/_node/stats/pipelines" const pipelineStats = "/_node/stats/pipeline" +func (i *Logstash) Init() error { + err := choice.CheckSlice(i.Collect, []string{"pipelines", "process", "jvm"}) + if err != nil { + return fmt.Errorf(`cannot verify "collect" setting: %v`, err) + } + return nil +} + // createHttpClient create a clients to access API func (logstash *Logstash) createHttpClient() (*http.Client, error) { tlsConfig, err := logstash.ClientConfig.TLSConfig() @@ -202,15 +174,7 @@ func (logstash *Logstash) createHttpClient() (*http.Client, error) { // gatherJsonData query the data source and parse the response JSON func (logstash *Logstash) gatherJsonData(url string, value interface{}) error { - - var method string - if logstash.Method != "" { - method = logstash.Method - } else { - method = "GET" - } - - request, err := http.NewRequest(method, url, nil) + request, err := http.NewRequest("GET", url, nil) if err != nil { return err } @@ -218,11 +182,13 @@ func (logstash *Logstash) gatherJsonData(url string, value interface{}) error { if (logstash.Username != "") || (logstash.Password != "") { request.SetBasicAuth(logstash.Username, logstash.Password) } + for header, value := range logstash.Headers { - request.Header.Add(header, value) - } - if logstash.HostHeader != "" { - request.Host = logstash.HostHeader + if strings.ToLower(header) == "host" { + request.Host = value + } else { + request.Header.Add(header, value) + } } response, err := logstash.client.Do(request) @@ -252,8 +218,8 @@ func (logstash *Logstash) gatherJVMStats(url string, accumulator telegraf.Accumu tags := map[string]string{ "node_id": jvmStats.ID, "node_name": jvmStats.Name, - "node_host": jvmStats.Host, "node_version": jvmStats.Version, + "source": jvmStats.Host, } flattener := jsonParser.JSONFlattener{} @@ -278,8 +244,8 @@ func (logstash *Logstash) gatherProcessStats(url string, accumulator telegraf.Ac tags := map[string]string{ "node_id": processStats.ID, "node_name": processStats.Name, - "node_host": processStats.Host, "node_version": processStats.Version, + "source": processStats.Host, } flattener := jsonParser.JSONFlattener{} @@ -368,8 +334,8 @@ func (logstash *Logstash) gatherPipelineStats(url string, accumulator telegraf.A tags := map[string]string{ "node_id": pipelineStats.ID, "node_name": pipelineStats.Name, - "node_host": pipelineStats.Host, "node_version": pipelineStats.Version, + "source": pipelineStats.Host, } flattener := jsonParser.JSONFlattener{} @@ -379,23 +345,22 @@ func (logstash *Logstash) gatherPipelineStats(url string, accumulator telegraf.A } accumulator.AddFields("logstash_events", flattener.Fields, tags) - if logstash.CollectPluginsStats { - err = logstash.gatherPluginsStats(pipelineStats.Pipeline.Plugins.Inputs, "input", tags, accumulator) - if err != nil { - return err - } - err = logstash.gatherPluginsStats(pipelineStats.Pipeline.Plugins.Filters, "filter", tags, accumulator) - if err != nil { - return err - } - err = logstash.gatherPluginsStats(pipelineStats.Pipeline.Plugins.Outputs, "output", tags, accumulator) - if err != nil { - return err - } + err = logstash.gatherPluginsStats(pipelineStats.Pipeline.Plugins.Inputs, "input", tags, accumulator) + if err != nil { + return err + } + err = logstash.gatherPluginsStats(pipelineStats.Pipeline.Plugins.Filters, "filter", tags, accumulator) + if err != nil { + return err + } + err = logstash.gatherPluginsStats(pipelineStats.Pipeline.Plugins.Outputs, "output", tags, accumulator) + if err != nil { + return err } - if logstash.CollectQueueStats { - err = logstash.gatherQueueStats(&pipelineStats.Pipeline.Queue, tags, accumulator) + err = logstash.gatherQueueStats(&pipelineStats.Pipeline.Queue, tags, accumulator) + if err != nil { + return err } return nil @@ -414,9 +379,9 @@ func (logstash *Logstash) gatherPipelinesStats(url string, accumulator telegraf. tags := map[string]string{ "node_id": pipelinesStats.ID, "node_name": pipelinesStats.Name, - "node_host": pipelinesStats.Host, "node_version": pipelinesStats.Version, "pipeline": pipelineName, + "source": pipelinesStats.Host, } flattener := jsonParser.JSONFlattener{} @@ -426,25 +391,23 @@ func (logstash *Logstash) gatherPipelinesStats(url string, accumulator telegraf. } accumulator.AddFields("logstash_events", flattener.Fields, tags) - if logstash.CollectPluginsStats { - err = logstash.gatherPluginsStats(pipeline.Plugins.Inputs, "input", tags, accumulator) - if err != nil { - return err - } - err = logstash.gatherPluginsStats(pipeline.Plugins.Filters, "filter", tags, accumulator) - if err != nil { - return err - } - err = logstash.gatherPluginsStats(pipeline.Plugins.Outputs, "output", tags, accumulator) - if err != nil { - return err - } + err = logstash.gatherPluginsStats(pipeline.Plugins.Inputs, "input", tags, accumulator) + if err != nil { + return err + } + err = logstash.gatherPluginsStats(pipeline.Plugins.Filters, "filter", tags, accumulator) + if err != nil { + return err + } + err = logstash.gatherPluginsStats(pipeline.Plugins.Outputs, "output", tags, accumulator) + if err != nil { + return err } - if logstash.CollectQueueStats { - err = logstash.gatherQueueStats(&pipeline.Queue, tags, accumulator) + err = logstash.gatherQueueStats(&pipeline.Queue, tags, accumulator) + if err != nil { + return err } - } return nil @@ -452,7 +415,6 @@ func (logstash *Logstash) gatherPipelinesStats(url string, accumulator telegraf. // Gather ask this plugin to start gathering metrics func (logstash *Logstash) Gather(accumulator telegraf.Accumulator) error { - if logstash.client == nil { client, err := logstash.createHttpClient() @@ -462,7 +424,7 @@ func (logstash *Logstash) Gather(accumulator telegraf.Accumulator) error { logstash.client = client } - if logstash.CollectJVMStats { + if choice.Contains("jvm", logstash.Collect) { jvmUrl, err := url.Parse(logstash.URL + jvmStats) if err != nil { return err @@ -472,7 +434,7 @@ func (logstash *Logstash) Gather(accumulator telegraf.Accumulator) error { } } - if logstash.CollectProcessStats { + if choice.Contains("process", logstash.Collect) { processUrl, err := url.Parse(logstash.URL + processStats) if err != nil { return err @@ -482,16 +444,8 @@ func (logstash *Logstash) Gather(accumulator telegraf.Accumulator) error { } } - if logstash.CollectPipelinesStats { - if logstash.MultiPipeline { - pipelinesUrl, err := url.Parse(logstash.URL + pipelinesStats) - if err != nil { - return err - } - if err := logstash.gatherPipelinesStats(pipelinesUrl.String(), accumulator); err != nil { - return err - } - } else { + if choice.Contains("pipelines", logstash.Collect) { + if logstash.SinglePipeline { pipelineUrl, err := url.Parse(logstash.URL + pipelineStats) if err != nil { return err @@ -499,8 +453,23 @@ func (logstash *Logstash) Gather(accumulator telegraf.Accumulator) error { if err := logstash.gatherPipelineStats(pipelineUrl.String(), accumulator); err != nil { return err } + } else { + pipelinesUrl, err := url.Parse(logstash.URL + pipelinesStats) + if err != nil { + return err + } + if err := logstash.gatherPipelinesStats(pipelinesUrl.String(), accumulator); err != nil { + return err + } } } return nil } + +// init registers this plugin instance +func init() { + inputs.Add("logstash", func() telegraf.Input { + return NewLogstash() + }) +} diff --git a/plugins/inputs/logstash/logstash_test.go b/plugins/inputs/logstash/logstash_test.go index c091be83c..aeb4e46f8 100644 --- a/plugins/inputs/logstash/logstash_test.go +++ b/plugins/inputs/logstash/logstash_test.go @@ -9,7 +9,6 @@ import ( "testing" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" ) var logstashTest = NewLogstash() @@ -66,7 +65,7 @@ func Test_Logstash5GatherProcessStats(test *testing.T) { map[string]string{ "node_id": string("a360d8cf-6289-429d-8419-6145e324b574"), "node_name": string("node-5-test"), - "node_host": string("node-5"), + "source": string("node-5"), "node_version": string("5.3.0"), }, ) @@ -115,7 +114,7 @@ func Test_Logstash6GatherProcessStats(test *testing.T) { map[string]string{ "node_id": string("3044f675-21ce-4335-898a-8408aa678245"), "node_name": string("node-6-test"), - "node_host": string("node-6"), + "source": string("node-6"), "node_version": string("6.4.2"), }, ) @@ -160,7 +159,7 @@ func Test_Logstash5GatherPipelineStats(test *testing.T) { map[string]string{ "node_id": string("a360d8cf-6289-429d-8419-6145e324b574"), "node_name": string("node-5-test"), - "node_host": string("node-5"), + "source": string("node-5"), "node_version": string("5.3.0"), }, ) @@ -176,7 +175,7 @@ func Test_Logstash5GatherPipelineStats(test *testing.T) { map[string]string{ "node_id": string("a360d8cf-6289-429d-8419-6145e324b574"), "node_name": string("node-5-test"), - "node_host": string("node-5"), + "source": string("node-5"), "node_version": string("5.3.0"), "plugin_name": string("beats"), "plugin_id": string("a35197a509596954e905e38521bae12b1498b17d-1"), @@ -195,7 +194,7 @@ func Test_Logstash5GatherPipelineStats(test *testing.T) { map[string]string{ "node_id": string("a360d8cf-6289-429d-8419-6145e324b574"), "node_name": string("node-5-test"), - "node_host": string("node-5"), + "source": string("node-5"), "node_version": string("5.3.0"), "plugin_name": string("stdout"), "plugin_id": string("582d5c2becb582a053e1e9a6bcc11d49b69a6dfd-2"), @@ -214,7 +213,7 @@ func Test_Logstash5GatherPipelineStats(test *testing.T) { map[string]string{ "node_id": string("a360d8cf-6289-429d-8419-6145e324b574"), "node_name": string("node-5-test"), - "node_host": string("node-5"), + "source": string("node-5"), "node_version": string("5.3.0"), "plugin_name": string("s3"), "plugin_id": string("582d5c2becb582a053e1e9a6bcc11d49b69a6dfd-3"), @@ -264,7 +263,7 @@ func Test_Logstash6GatherPipelinesStats(test *testing.T) { map[string]string{ "node_id": string("3044f675-21ce-4335-898a-8408aa678245"), "node_name": string("node-6-test"), - "node_host": string("node-6"), + "source": string("node-6"), "node_version": string("6.4.2"), "pipeline": string("main"), }, @@ -281,7 +280,7 @@ func Test_Logstash6GatherPipelinesStats(test *testing.T) { map[string]string{ "node_id": string("3044f675-21ce-4335-898a-8408aa678245"), "node_name": string("node-6-test"), - "node_host": string("node-6"), + "source": string("node-6"), "node_version": string("6.4.2"), "pipeline": string("main"), "plugin_name": string("kafka"), @@ -301,7 +300,7 @@ func Test_Logstash6GatherPipelinesStats(test *testing.T) { map[string]string{ "node_id": string("3044f675-21ce-4335-898a-8408aa678245"), "node_name": string("node-6-test"), - "node_host": string("node-6"), + "source": string("node-6"), "node_version": string("6.4.2"), "pipeline": string("main"), "plugin_name": string("mutate"), @@ -321,7 +320,7 @@ func Test_Logstash6GatherPipelinesStats(test *testing.T) { map[string]string{ "node_id": string("3044f675-21ce-4335-898a-8408aa678245"), "node_name": string("node-6-test"), - "node_host": string("node-6"), + "source": string("node-6"), "node_version": string("6.4.2"), "pipeline": string("main"), "plugin_name": string("mutate"), @@ -341,7 +340,7 @@ func Test_Logstash6GatherPipelinesStats(test *testing.T) { map[string]string{ "node_id": string("3044f675-21ce-4335-898a-8408aa678245"), "node_name": string("node-6-test"), - "node_host": string("node-6"), + "source": string("node-6"), "node_version": string("6.4.2"), "pipeline": string("main"), "plugin_name": string("date"), @@ -361,7 +360,7 @@ func Test_Logstash6GatherPipelinesStats(test *testing.T) { map[string]string{ "node_id": string("3044f675-21ce-4335-898a-8408aa678245"), "node_name": string("node-6-test"), - "node_host": string("node-6"), + "source": string("node-6"), "node_version": string("6.4.2"), "pipeline": string("main"), "plugin_name": string("mutate"), @@ -381,7 +380,7 @@ func Test_Logstash6GatherPipelinesStats(test *testing.T) { map[string]string{ "node_id": string("3044f675-21ce-4335-898a-8408aa678245"), "node_name": string("node-6-test"), - "node_host": string("node-6"), + "source": string("node-6"), "node_version": string("6.4.2"), "pipeline": string("main"), "plugin_name": string("mutate"), @@ -401,7 +400,7 @@ func Test_Logstash6GatherPipelinesStats(test *testing.T) { map[string]string{ "node_id": string("3044f675-21ce-4335-898a-8408aa678245"), "node_name": string("node-6-test"), - "node_host": string("node-6"), + "source": string("node-6"), "node_version": string("6.4.2"), "pipeline": string("main"), "plugin_name": string("drop"), @@ -421,7 +420,7 @@ func Test_Logstash6GatherPipelinesStats(test *testing.T) { map[string]string{ "node_id": string("3044f675-21ce-4335-898a-8408aa678245"), "node_name": string("node-6-test"), - "node_host": string("node-6"), + "source": string("node-6"), "node_version": string("6.4.2"), "pipeline": string("main"), "plugin_name": string("mutate"), @@ -441,7 +440,7 @@ func Test_Logstash6GatherPipelinesStats(test *testing.T) { map[string]string{ "node_id": string("3044f675-21ce-4335-898a-8408aa678245"), "node_name": string("node-6-test"), - "node_host": string("node-6"), + "source": string("node-6"), "node_version": string("6.4.2"), "pipeline": string("main"), "plugin_name": string("csv"), @@ -461,7 +460,7 @@ func Test_Logstash6GatherPipelinesStats(test *testing.T) { map[string]string{ "node_id": string("3044f675-21ce-4335-898a-8408aa678245"), "node_name": string("node-6-test"), - "node_host": string("node-6"), + "source": string("node-6"), "node_version": string("6.4.2"), "pipeline": string("main"), "plugin_name": string("mutate"), @@ -481,7 +480,7 @@ func Test_Logstash6GatherPipelinesStats(test *testing.T) { map[string]string{ "node_id": string("3044f675-21ce-4335-898a-8408aa678245"), "node_name": string("node-6-test"), - "node_host": string("node-6"), + "source": string("node-6"), "node_version": string("6.4.2"), "pipeline": string("main"), "plugin_name": string("elasticsearch"), @@ -501,7 +500,7 @@ func Test_Logstash6GatherPipelinesStats(test *testing.T) { map[string]string{ "node_id": string("3044f675-21ce-4335-898a-8408aa678245"), "node_name": string("node-6-test"), - "node_host": string("node-6"), + "source": string("node-6"), "node_version": string("6.4.2"), "pipeline": string("main"), "plugin_name": string("kafka"), @@ -521,7 +520,7 @@ func Test_Logstash6GatherPipelinesStats(test *testing.T) { map[string]string{ "node_id": string("3044f675-21ce-4335-898a-8408aa678245"), "node_name": string("node-6-test"), - "node_host": string("node-6"), + "source": string("node-6"), "node_version": string("6.4.2"), "pipeline": string("main"), "plugin_name": string("kafka"), @@ -544,7 +543,7 @@ func Test_Logstash6GatherPipelinesStats(test *testing.T) { map[string]string{ "node_id": string("3044f675-21ce-4335-898a-8408aa678245"), "node_name": string("node-6-test"), - "node_host": string("node-6"), + "source": string("node-6"), "node_version": string("6.4.2"), "pipeline": string("main"), "queue_type": string("persisted"), @@ -615,7 +614,7 @@ func Test_Logstash5GatherJVMStats(test *testing.T) { map[string]string{ "node_id": string("a360d8cf-6289-429d-8419-6145e324b574"), "node_name": string("node-5-test"), - "node_host": string("node-5"), + "source": string("node-5"), "node_version": string("5.3.0"), }, ) @@ -684,43 +683,9 @@ func Test_Logstash6GatherJVMStats(test *testing.T) { map[string]string{ "node_id": string("3044f675-21ce-4335-898a-8408aa678245"), "node_name": string("node-6-test"), - "node_host": string("node-6"), + "source": string("node-6"), "node_version": string("6.4.2"), }, ) } - -func Test_LogstashRequests(test *testing.T) { - fakeServer := httptest.NewUnstartedServer(http.HandlerFunc(func(writer http.ResponseWriter, request *http.Request) { - writer.Header().Set("Content-Type", "application/json") - fmt.Fprintf(writer, "%s", string(logstash6JvmJSON)) - assert.Equal(test, request.Host, "logstash.test.local") - assert.Equal(test, request.Method, "POST") - assert.Equal(test, request.Header.Get("X-Test"), "test-header") - })) - requestURL, err := url.Parse(logstashTest.URL) - if err != nil { - test.Logf("Can't connect to: %s", logstashTest.URL) - } - fakeServer.Listener, _ = net.Listen("tcp", fmt.Sprintf("%s:%s", requestURL.Hostname(), requestURL.Port())) - fakeServer.Start() - defer fakeServer.Close() - - if logstashTest.client == nil { - client, err := logstashTest.createHttpClient() - - if err != nil { - test.Logf("Can't createHttpClient") - } - logstashTest.client = client - } - - logstashTest.Method = "POST" - logstashTest.Headers["X-Test"] = "test-header" - logstashTest.HostHeader = "logstash.test.local" - - if err := logstashTest.gatherJsonData(logstashTest.URL+jvmStats, &logstash6accJVMStats); err != nil { - test.Logf("Can't gather JVM stats") - } -} From 40bbd805b63a570af6258c36cabf8afdee538bde Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 21 Aug 2019 18:04:51 -0700 Subject: [PATCH 1129/1815] Add TLS support to nginx_plus, nginx_plus_api and nginx_vts (#6300) --- plugins/inputs/nginx_plus/nginx_plus.go | 26 +++++++++++++---- .../inputs/nginx_plus_api/nginx_plus_api.go | 28 ++++++++++++++----- plugins/inputs/nginx_vts/nginx_vts.go | 25 +++++++++++++---- 3 files changed, 61 insertions(+), 18 deletions(-) diff --git a/plugins/inputs/nginx_plus/nginx_plus.go b/plugins/inputs/nginx_plus/nginx_plus.go index 089ba7d93..ea3aeb28b 100644 --- a/plugins/inputs/nginx_plus/nginx_plus.go +++ b/plugins/inputs/nginx_plus/nginx_plus.go @@ -14,15 +14,16 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/internal/tls" "github.com/influxdata/telegraf/plugins/inputs" ) type NginxPlus struct { - Urls []string + Urls []string `toml:"urls"` + ResponseTimeout internal.Duration `toml:"response_timeout"` + tls.ClientConfig client *http.Client - - ResponseTimeout internal.Duration } var sampleConfig = ` @@ -31,6 +32,13 @@ var sampleConfig = ` # HTTP response timeout (default: 5s) response_timeout = "5s" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false ` func (n *NginxPlus) SampleConfig() string { @@ -74,14 +82,20 @@ func (n *NginxPlus) Gather(acc telegraf.Accumulator) error { } func (n *NginxPlus) createHttpClient() (*http.Client, error) { - if n.ResponseTimeout.Duration < time.Second { n.ResponseTimeout.Duration = time.Second * 5 } + tlsConfig, err := n.ClientConfig.TLSConfig() + if err != nil { + return nil, err + } + client := &http.Client{ - Transport: &http.Transport{}, - Timeout: n.ResponseTimeout.Duration, + Transport: &http.Transport{ + TLSClientConfig: tlsConfig, + }, + Timeout: n.ResponseTimeout.Duration, } return client, nil diff --git a/plugins/inputs/nginx_plus_api/nginx_plus_api.go b/plugins/inputs/nginx_plus_api/nginx_plus_api.go index d44f793f1..3487dd512 100644 --- a/plugins/inputs/nginx_plus_api/nginx_plus_api.go +++ b/plugins/inputs/nginx_plus_api/nginx_plus_api.go @@ -9,17 +9,17 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/internal/tls" "github.com/influxdata/telegraf/plugins/inputs" ) type NginxPlusApi struct { - Urls []string - - ApiVersion int64 + Urls []string `toml:"urls"` + ApiVersion int64 `toml:"api_version"` + ResponseTimeout internal.Duration `toml:"response_timeout"` + tls.ClientConfig client *http.Client - - ResponseTimeout internal.Duration } const ( @@ -49,6 +49,13 @@ var sampleConfig = ` # HTTP response timeout (default: 5s) response_timeout = "5s" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false ` func (n *NginxPlusApi) SampleConfig() string { @@ -100,9 +107,16 @@ func (n *NginxPlusApi) createHttpClient() (*http.Client, error) { n.ResponseTimeout.Duration = time.Second * 5 } + tlsConfig, err := n.ClientConfig.TLSConfig() + if err != nil { + return nil, err + } + client := &http.Client{ - Transport: &http.Transport{}, - Timeout: n.ResponseTimeout.Duration, + Transport: &http.Transport{ + TLSClientConfig: tlsConfig, + }, + Timeout: n.ResponseTimeout.Duration, } return client, nil diff --git a/plugins/inputs/nginx_vts/nginx_vts.go b/plugins/inputs/nginx_vts/nginx_vts.go index 66a16e6c1..f9372eabd 100644 --- a/plugins/inputs/nginx_vts/nginx_vts.go +++ b/plugins/inputs/nginx_vts/nginx_vts.go @@ -13,15 +13,16 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/internal/tls" "github.com/influxdata/telegraf/plugins/inputs" ) type NginxVTS struct { - Urls []string + Urls []string `toml:"urls"` + ResponseTimeout internal.Duration `toml:"response_timeout"` + tls.ClientConfig client *http.Client - - ResponseTimeout internal.Duration } var sampleConfig = ` @@ -30,6 +31,13 @@ var sampleConfig = ` ## HTTP response timeout (default: 5s) response_timeout = "5s" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false ` func (n *NginxVTS) SampleConfig() string { @@ -77,9 +85,16 @@ func (n *NginxVTS) createHTTPClient() (*http.Client, error) { n.ResponseTimeout.Duration = time.Second * 5 } + tlsConfig, err := n.ClientConfig.TLSConfig() + if err != nil { + return nil, err + } + client := &http.Client{ - Transport: &http.Transport{}, - Timeout: n.ResponseTimeout.Duration, + Transport: &http.Transport{ + TLSClientConfig: tlsConfig, + }, + Timeout: n.ResponseTimeout.Duration, } return client, nil From 3cdc6c32dd81b6668ba8fdd153adb42ddb0ca157 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 21 Aug 2019 18:02:30 -0700 Subject: [PATCH 1130/1815] Fix measurement name for write errors --- internal/models/running_output.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/models/running_output.go b/internal/models/running_output.go index 86e68f057..c8167bac6 100644 --- a/internal/models/running_output.go +++ b/internal/models/running_output.go @@ -59,7 +59,7 @@ func NewRunningOutput( ) *RunningOutput { logger := &Logger{ Name: logName("outputs", config.Name, config.Alias), - Errs: selfstat.Register("gather", "errors", + Errs: selfstat.Register("write", "errors", map[string]string{"output": config.Name, "alias": config.Alias}), } From 94f68c06d7b091d6d5e2c98093f8c00ee8e65d95 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 21 Aug 2019 18:02:51 -0700 Subject: [PATCH 1131/1815] Use alias name in output metric buffer stats --- internal/models/buffer.go | 12 ++-- internal/models/buffer_test.go | 84 +++++++++++++-------------- internal/models/running_output.go | 9 +-- plugins/outputs/influxdb/http.go | 8 +-- plugins/outputs/influxdb/http_test.go | 17 +++++- plugins/outputs/influxdb/influxdb.go | 7 +-- plugins/outputs/influxdb/udp.go | 8 +-- plugins/outputs/influxdb/udp_test.go | 8 +-- 8 files changed, 73 insertions(+), 80 deletions(-) diff --git a/internal/models/buffer.go b/internal/models/buffer.go index f7f343e46..7769ac1e9 100644 --- a/internal/models/buffer.go +++ b/internal/models/buffer.go @@ -32,7 +32,7 @@ type Buffer struct { } // NewBuffer returns a new empty Buffer with the given capacity. -func NewBuffer(name string, capacity int) *Buffer { +func NewBuffer(name string, alias string, capacity int) *Buffer { b := &Buffer{ buf: make([]telegraf.Metric, capacity), first: 0, @@ -43,27 +43,27 @@ func NewBuffer(name string, capacity int) *Buffer { MetricsAdded: selfstat.Register( "write", "metrics_added", - map[string]string{"output": name}, + map[string]string{"output": name, "alias": alias}, ), MetricsWritten: selfstat.Register( "write", "metrics_written", - map[string]string{"output": name}, + map[string]string{"output": name, "alias": alias}, ), MetricsDropped: selfstat.Register( "write", "metrics_dropped", - map[string]string{"output": name}, + map[string]string{"output": name, "alias": alias}, ), BufferSize: selfstat.Register( "write", "buffer_size", - map[string]string{"output": name}, + map[string]string{"output": name, "alias": alias}, ), BufferLimit: selfstat.Register( "write", "buffer_limit", - map[string]string{"output": name}, + map[string]string{"output": name, "alias": alias}, ), } b.BufferSize.Set(int64(0)) diff --git a/internal/models/buffer_test.go b/internal/models/buffer_test.go index bc19680d1..fa8fb1668 100644 --- a/internal/models/buffer_test.go +++ b/internal/models/buffer_test.go @@ -49,7 +49,7 @@ func MetricTime(sec int64) telegraf.Metric { } func BenchmarkAddMetrics(b *testing.B) { - buf := NewBuffer("test", 10000) + buf := NewBuffer("test", "", 10000) m := Metric() for n := 0; n < b.N; n++ { buf.Add(m) @@ -64,14 +64,14 @@ func setup(b *Buffer) *Buffer { } func TestBuffer_LenEmpty(t *testing.T) { - b := setup(NewBuffer("test", 5)) + b := setup(NewBuffer("test", "", 5)) require.Equal(t, 0, b.Len()) } func TestBuffer_LenOne(t *testing.T) { m := Metric() - b := setup(NewBuffer("test", 5)) + b := setup(NewBuffer("test", "", 5)) b.Add(m) require.Equal(t, 1, b.Len()) @@ -79,7 +79,7 @@ func TestBuffer_LenOne(t *testing.T) { func TestBuffer_LenFull(t *testing.T) { m := Metric() - b := setup(NewBuffer("test", 5)) + b := setup(NewBuffer("test", "", 5)) b.Add(m, m, m, m, m) require.Equal(t, 5, b.Len()) @@ -87,7 +87,7 @@ func TestBuffer_LenFull(t *testing.T) { func TestBuffer_LenOverfill(t *testing.T) { m := Metric() - b := setup(NewBuffer("test", 5)) + b := setup(NewBuffer("test", "", 5)) setup(b) b.Add(m, m, m, m, m, m) @@ -95,14 +95,14 @@ func TestBuffer_LenOverfill(t *testing.T) { } func TestBuffer_BatchLenZero(t *testing.T) { - b := setup(NewBuffer("test", 5)) + b := setup(NewBuffer("test", "", 5)) batch := b.Batch(0) require.Len(t, batch, 0) } func TestBuffer_BatchLenBufferEmpty(t *testing.T) { - b := setup(NewBuffer("test", 5)) + b := setup(NewBuffer("test", "", 5)) batch := b.Batch(2) require.Len(t, batch, 0) @@ -110,7 +110,7 @@ func TestBuffer_BatchLenBufferEmpty(t *testing.T) { func TestBuffer_BatchLenUnderfill(t *testing.T) { m := Metric() - b := setup(NewBuffer("test", 5)) + b := setup(NewBuffer("test", "", 5)) b.Add(m) batch := b.Batch(2) @@ -119,7 +119,7 @@ func TestBuffer_BatchLenUnderfill(t *testing.T) { func TestBuffer_BatchLenFill(t *testing.T) { m := Metric() - b := setup(NewBuffer("test", 5)) + b := setup(NewBuffer("test", "", 5)) b.Add(m, m, m) batch := b.Batch(2) require.Len(t, batch, 2) @@ -127,7 +127,7 @@ func TestBuffer_BatchLenFill(t *testing.T) { func TestBuffer_BatchLenExact(t *testing.T) { m := Metric() - b := setup(NewBuffer("test", 5)) + b := setup(NewBuffer("test", "", 5)) b.Add(m, m) batch := b.Batch(2) require.Len(t, batch, 2) @@ -135,7 +135,7 @@ func TestBuffer_BatchLenExact(t *testing.T) { func TestBuffer_BatchLenLargerThanBuffer(t *testing.T) { m := Metric() - b := setup(NewBuffer("test", 5)) + b := setup(NewBuffer("test", "", 5)) b.Add(m, m, m, m, m) batch := b.Batch(6) require.Len(t, batch, 5) @@ -143,7 +143,7 @@ func TestBuffer_BatchLenLargerThanBuffer(t *testing.T) { func TestBuffer_BatchWrap(t *testing.T) { m := Metric() - b := setup(NewBuffer("test", 5)) + b := setup(NewBuffer("test", "", 5)) b.Add(m, m, m, m, m) batch := b.Batch(2) b.Accept(batch) @@ -153,7 +153,7 @@ func TestBuffer_BatchWrap(t *testing.T) { } func TestBuffer_BatchLatest(t *testing.T) { - b := setup(NewBuffer("test", 4)) + b := setup(NewBuffer("test", "", 4)) b.Add(MetricTime(1)) b.Add(MetricTime(2)) b.Add(MetricTime(3)) @@ -167,7 +167,7 @@ func TestBuffer_BatchLatest(t *testing.T) { } func TestBuffer_BatchLatestWrap(t *testing.T) { - b := setup(NewBuffer("test", 4)) + b := setup(NewBuffer("test", "", 4)) b.Add(MetricTime(1)) b.Add(MetricTime(2)) b.Add(MetricTime(3)) @@ -183,7 +183,7 @@ func TestBuffer_BatchLatestWrap(t *testing.T) { } func TestBuffer_MultipleBatch(t *testing.T) { - b := setup(NewBuffer("test", 10)) + b := setup(NewBuffer("test", "", 10)) b.Add(MetricTime(1)) b.Add(MetricTime(2)) b.Add(MetricTime(3)) @@ -209,7 +209,7 @@ func TestBuffer_MultipleBatch(t *testing.T) { } func TestBuffer_RejectWithRoom(t *testing.T) { - b := setup(NewBuffer("test", 5)) + b := setup(NewBuffer("test", "", 5)) b.Add(MetricTime(1)) b.Add(MetricTime(2)) b.Add(MetricTime(3)) @@ -232,7 +232,7 @@ func TestBuffer_RejectWithRoom(t *testing.T) { } func TestBuffer_RejectNothingNewFull(t *testing.T) { - b := setup(NewBuffer("test", 5)) + b := setup(NewBuffer("test", "", 5)) b.Add(MetricTime(1)) b.Add(MetricTime(2)) b.Add(MetricTime(3)) @@ -255,7 +255,7 @@ func TestBuffer_RejectNothingNewFull(t *testing.T) { } func TestBuffer_RejectNoRoom(t *testing.T) { - b := setup(NewBuffer("test", 5)) + b := setup(NewBuffer("test", "", 5)) b.Add(MetricTime(1)) b.Add(MetricTime(2)) @@ -284,7 +284,7 @@ func TestBuffer_RejectNoRoom(t *testing.T) { } func TestBuffer_RejectRoomExact(t *testing.T) { - b := setup(NewBuffer("test", 5)) + b := setup(NewBuffer("test", "", 5)) b.Add(MetricTime(1)) b.Add(MetricTime(2)) batch := b.Batch(2) @@ -308,7 +308,7 @@ func TestBuffer_RejectRoomExact(t *testing.T) { } func TestBuffer_RejectRoomOverwriteOld(t *testing.T) { - b := setup(NewBuffer("test", 5)) + b := setup(NewBuffer("test", "", 5)) b.Add(MetricTime(1)) b.Add(MetricTime(2)) b.Add(MetricTime(3)) @@ -333,7 +333,7 @@ func TestBuffer_RejectRoomOverwriteOld(t *testing.T) { } func TestBuffer_RejectPartialRoom(t *testing.T) { - b := setup(NewBuffer("test", 5)) + b := setup(NewBuffer("test", "", 5)) b.Add(MetricTime(1)) b.Add(MetricTime(2)) @@ -360,7 +360,7 @@ func TestBuffer_RejectPartialRoom(t *testing.T) { } func TestBuffer_RejectNewMetricsWrapped(t *testing.T) { - b := setup(NewBuffer("test", 5)) + b := setup(NewBuffer("test", "", 5)) b.Add(MetricTime(1)) b.Add(MetricTime(2)) b.Add(MetricTime(3)) @@ -403,7 +403,7 @@ func TestBuffer_RejectNewMetricsWrapped(t *testing.T) { } func TestBuffer_RejectWrapped(t *testing.T) { - b := setup(NewBuffer("test", 5)) + b := setup(NewBuffer("test", "", 5)) b.Add(MetricTime(1)) b.Add(MetricTime(2)) b.Add(MetricTime(3)) @@ -434,7 +434,7 @@ func TestBuffer_RejectWrapped(t *testing.T) { } func TestBuffer_RejectAdjustFirst(t *testing.T) { - b := setup(NewBuffer("test", 10)) + b := setup(NewBuffer("test", "", 10)) b.Add(MetricTime(1)) b.Add(MetricTime(2)) b.Add(MetricTime(3)) @@ -482,7 +482,7 @@ func TestBuffer_RejectAdjustFirst(t *testing.T) { func TestBuffer_AddDropsOverwrittenMetrics(t *testing.T) { m := Metric() - b := setup(NewBuffer("test", 5)) + b := setup(NewBuffer("test", "", 5)) b.Add(m, m, m, m, m) b.Add(m, m, m, m, m) @@ -493,7 +493,7 @@ func TestBuffer_AddDropsOverwrittenMetrics(t *testing.T) { func TestBuffer_AcceptRemovesBatch(t *testing.T) { m := Metric() - b := setup(NewBuffer("test", 5)) + b := setup(NewBuffer("test", "", 5)) b.Add(m, m, m) batch := b.Batch(2) b.Accept(batch) @@ -502,7 +502,7 @@ func TestBuffer_AcceptRemovesBatch(t *testing.T) { func TestBuffer_RejectLeavesBatch(t *testing.T) { m := Metric() - b := setup(NewBuffer("test", 5)) + b := setup(NewBuffer("test", "", 5)) b.Add(m, m, m) batch := b.Batch(2) b.Reject(batch) @@ -511,7 +511,7 @@ func TestBuffer_RejectLeavesBatch(t *testing.T) { func TestBuffer_AcceptWritesOverwrittenBatch(t *testing.T) { m := Metric() - b := setup(NewBuffer("test", 5)) + b := setup(NewBuffer("test", "", 5)) b.Add(m, m, m, m, m) batch := b.Batch(5) @@ -524,7 +524,7 @@ func TestBuffer_AcceptWritesOverwrittenBatch(t *testing.T) { func TestBuffer_BatchRejectDropsOverwrittenBatch(t *testing.T) { m := Metric() - b := setup(NewBuffer("test", 5)) + b := setup(NewBuffer("test", "", 5)) b.Add(m, m, m, m, m) batch := b.Batch(5) @@ -537,7 +537,7 @@ func TestBuffer_BatchRejectDropsOverwrittenBatch(t *testing.T) { func TestBuffer_MetricsOverwriteBatchAccept(t *testing.T) { m := Metric() - b := setup(NewBuffer("test", 5)) + b := setup(NewBuffer("test", "", 5)) b.Add(m, m, m, m, m) batch := b.Batch(3) @@ -549,7 +549,7 @@ func TestBuffer_MetricsOverwriteBatchAccept(t *testing.T) { func TestBuffer_MetricsOverwriteBatchReject(t *testing.T) { m := Metric() - b := setup(NewBuffer("test", 5)) + b := setup(NewBuffer("test", "", 5)) b.Add(m, m, m, m, m) batch := b.Batch(3) @@ -561,7 +561,7 @@ func TestBuffer_MetricsOverwriteBatchReject(t *testing.T) { func TestBuffer_MetricsBatchAcceptRemoved(t *testing.T) { m := Metric() - b := setup(NewBuffer("test", 5)) + b := setup(NewBuffer("test", "", 5)) b.Add(m, m, m, m, m) batch := b.Batch(3) @@ -573,7 +573,7 @@ func TestBuffer_MetricsBatchAcceptRemoved(t *testing.T) { func TestBuffer_WrapWithBatch(t *testing.T) { m := Metric() - b := setup(NewBuffer("test", 5)) + b := setup(NewBuffer("test", "", 5)) b.Add(m, m, m) b.Batch(3) @@ -584,7 +584,7 @@ func TestBuffer_WrapWithBatch(t *testing.T) { func TestBuffer_BatchNotRemoved(t *testing.T) { m := Metric() - b := setup(NewBuffer("test", 5)) + b := setup(NewBuffer("test", "", 5)) b.Add(m, m, m, m, m) b.Batch(2) require.Equal(t, 5, b.Len()) @@ -592,7 +592,7 @@ func TestBuffer_BatchNotRemoved(t *testing.T) { func TestBuffer_BatchRejectAcceptNoop(t *testing.T) { m := Metric() - b := setup(NewBuffer("test", 5)) + b := setup(NewBuffer("test", "", 5)) b.Add(m, m, m, m, m) batch := b.Batch(2) b.Reject(batch) @@ -608,7 +608,7 @@ func TestBuffer_AcceptCallsMetricAccept(t *testing.T) { accept++ }, } - b := setup(NewBuffer("test", 5)) + b := setup(NewBuffer("test", "", 5)) b.Add(mm, mm, mm) batch := b.Batch(2) b.Accept(batch) @@ -623,7 +623,7 @@ func TestBuffer_AddCallsMetricRejectWhenNoBatch(t *testing.T) { reject++ }, } - b := setup(NewBuffer("test", 5)) + b := setup(NewBuffer("test", "", 5)) setup(b) b.Add(mm, mm, mm, mm, mm) b.Add(mm, mm) @@ -638,7 +638,7 @@ func TestBuffer_AddCallsMetricRejectWhenNotInBatch(t *testing.T) { reject++ }, } - b := setup(NewBuffer("test", 5)) + b := setup(NewBuffer("test", "", 5)) setup(b) b.Add(mm, mm, mm, mm, mm) batch := b.Batch(2) @@ -656,7 +656,7 @@ func TestBuffer_RejectCallsMetricRejectWithOverwritten(t *testing.T) { reject++ }, } - b := setup(NewBuffer("test", 5)) + b := setup(NewBuffer("test", "", 5)) b.Add(mm, mm, mm, mm, mm) batch := b.Batch(5) b.Add(mm, mm) @@ -673,7 +673,7 @@ func TestBuffer_AddOverwriteAndReject(t *testing.T) { reject++ }, } - b := setup(NewBuffer("test", 5)) + b := setup(NewBuffer("test", "", 5)) b.Add(mm, mm, mm, mm, mm) batch := b.Batch(5) b.Add(mm, mm, mm, mm, mm) @@ -697,7 +697,7 @@ func TestBuffer_AddOverwriteAndRejectOffset(t *testing.T) { accept++ }, } - b := setup(NewBuffer("test", 5)) + b := setup(NewBuffer("test", "", 5)) b.Add(mm, mm, mm) b.Add(mm, mm, mm, mm) require.Equal(t, 2, reject) @@ -716,7 +716,7 @@ func TestBuffer_AddOverwriteAndRejectOffset(t *testing.T) { } func TestBuffer_RejectEmptyBatch(t *testing.T) { - b := setup(NewBuffer("test", 5)) + b := setup(NewBuffer("test", "", 5)) batch := b.Batch(2) b.Add(MetricTime(1)) b.Reject(batch) diff --git a/internal/models/running_output.go b/internal/models/running_output.go index c8167bac6..282c2d23b 100644 --- a/internal/models/running_output.go +++ b/internal/models/running_output.go @@ -79,7 +79,7 @@ func NewRunningOutput( } ro := &RunningOutput{ - buffer: NewBuffer(config.LogName(), bufferLimit), + buffer: NewBuffer(config.Name, config.Alias, bufferLimit), BatchReady: make(chan time.Time, 1), Output: output, Config: config, @@ -101,13 +101,6 @@ func NewRunningOutput( return ro } -func (c *OutputConfig) LogName() string { - if c.Alias == "" { - return c.Name - } - return c.Name + "::" + c.Alias -} - func (r *RunningOutput) LogName() string { return logName("outputs", r.Config.Name, r.Config.Alias) } diff --git a/plugins/outputs/influxdb/http.go b/plugins/outputs/influxdb/http.go index c7c29a638..9497cadcc 100644 --- a/plugins/outputs/influxdb/http.go +++ b/plugins/outputs/influxdb/http.go @@ -100,7 +100,7 @@ type HTTPConfig struct { InfluxUintSupport bool `toml:"influx_uint_support"` Serializer *influx.Serializer - log telegraf.Logger + Log telegraf.Logger } type httpClient struct { @@ -176,7 +176,7 @@ func NewHTTPClient(config HTTPConfig) (*httpClient, error) { }, createdDatabases: make(map[string]bool), config: config, - log: config.log, + log: config.Log, } return client, nil } @@ -186,10 +186,6 @@ func (c *httpClient) URL() string { return c.config.URL.String() } -func (c *httpClient) SetLogger(log telegraf.Logger) { - c.log = log -} - // Database returns the default database that this client connects too. func (c *httpClient) Database() string { return c.config.Database diff --git a/plugins/outputs/influxdb/http_test.go b/plugins/outputs/influxdb/http_test.go index 98ec4ef5b..e4acb1641 100644 --- a/plugins/outputs/influxdb/http_test.go +++ b/plugins/outputs/influxdb/http_test.go @@ -262,6 +262,7 @@ func TestHTTP_Write(t *testing.T) { config: influxdb.HTTPConfig{ URL: u, Database: "telegraf", + Log: testutil.Logger{}, }, queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { require.Equal(t, r.FormValue("db"), "telegraf") @@ -278,6 +279,7 @@ func TestHTTP_Write(t *testing.T) { Database: "telegraf", Username: "guy", Password: "smiley", + Log: testutil.Logger{}, }, queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { username, password, ok := r.BasicAuth() @@ -293,6 +295,7 @@ func TestHTTP_Write(t *testing.T) { URL: u, Database: "telegraf", UserAgent: "telegraf", + Log: testutil.Logger{}, }, queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { require.Equal(t, r.Header.Get("User-Agent"), "telegraf") @@ -304,6 +307,7 @@ func TestHTTP_Write(t *testing.T) { config: influxdb.HTTPConfig{ URL: u, Database: "telegraf", + Log: testutil.Logger{}, }, queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { require.Equal(t, r.Header.Get("User-Agent"), "Telegraf/1.2.3") @@ -314,6 +318,7 @@ func TestHTTP_Write(t *testing.T) { name: "default database", config: influxdb.HTTPConfig{ URL: u, + Log: testutil.Logger{}, }, queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { require.Equal(t, "telegraf", r.FormValue("db")) @@ -328,6 +333,7 @@ func TestHTTP_Write(t *testing.T) { "A": "B", "C": "D", }, + Log: testutil.Logger{}, }, queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { require.Equal(t, r.Header.Get("A"), "B") @@ -341,6 +347,7 @@ func TestHTTP_Write(t *testing.T) { URL: u, Database: "telegraf", RetentionPolicy: "foo", + Log: testutil.Logger{}, }, queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { require.Equal(t, "foo", r.FormValue("rp")) @@ -353,6 +360,7 @@ func TestHTTP_Write(t *testing.T) { URL: u, Database: "telegraf", Consistency: "all", + Log: testutil.Logger{}, }, queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { require.Equal(t, "all", r.FormValue("consistency")) @@ -364,6 +372,7 @@ func TestHTTP_Write(t *testing.T) { config: influxdb.HTTPConfig{ URL: u, Database: "telegraf", + Log: testutil.Logger{}, }, queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusBadRequest) @@ -378,6 +387,7 @@ func TestHTTP_Write(t *testing.T) { config: influxdb.HTTPConfig{ URL: u, Database: "telegraf", + Log: testutil.Logger{}, }, queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusBadRequest) @@ -392,6 +402,7 @@ func TestHTTP_Write(t *testing.T) { config: influxdb.HTTPConfig{ URL: u, Database: "telegraf", + Log: testutil.Logger{}, }, queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusBadRequest) @@ -406,6 +417,7 @@ func TestHTTP_Write(t *testing.T) { config: influxdb.HTTPConfig{ URL: u, Database: "telegraf", + Log: testutil.Logger{}, }, queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusBadGateway) @@ -423,6 +435,7 @@ func TestHTTP_Write(t *testing.T) { config: influxdb.HTTPConfig{ URL: u, Database: "telegraf", + Log: testutil.Logger{}, }, queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusServiceUnavailable) @@ -472,7 +485,6 @@ func TestHTTP_Write(t *testing.T) { client, err := influxdb.NewHTTPClient(tt.config) require.NoError(t, err) - client.SetLogger(testutil.Logger{}) err = client.Write(ctx, metrics) if tt.errFunc != nil { tt.errFunc(t, err) @@ -525,6 +537,7 @@ func TestHTTP_WritePathPrefix(t *testing.T) { config := influxdb.HTTPConfig{ URL: u, Database: "telegraf", + Log: testutil.Logger{}, } client, err := influxdb.NewHTTPClient(config) @@ -579,6 +592,7 @@ func TestHTTP_WriteContentEncodingGzip(t *testing.T) { URL: u, Database: "telegraf", ContentEncoding: "gzip", + Log: testutil.Logger{}, } client, err := influxdb.NewHTTPClient(config) @@ -618,6 +632,7 @@ func TestHTTP_UnixSocket(t *testing.T) { config: influxdb.HTTPConfig{ URL: &url.URL{Scheme: "unix", Path: sock}, Database: "xyzzy", + Log: testutil.Logger{}, }, queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { require.Equal(t, `CREATE DATABASE "xyzzy"`, r.FormValue("q")) diff --git a/plugins/outputs/influxdb/influxdb.go b/plugins/outputs/influxdb/influxdb.go index 0a6f66696..6af6dc173 100644 --- a/plugins/outputs/influxdb/influxdb.go +++ b/plugins/outputs/influxdb/influxdb.go @@ -27,7 +27,6 @@ type Client interface { Database() string URL() string Close() - SetLogger(telegraf.Logger) } // InfluxDB struct is the primary data structure for the plugin @@ -172,8 +171,6 @@ func (i *InfluxDB) Connect() error { return err } - c.SetLogger(i.Log) - i.clients = append(i.clients, c) case "http", "https", "unix": c, err := i.httpClient(ctx, parts, proxy) @@ -181,8 +178,6 @@ func (i *InfluxDB) Connect() error { return err } - c.SetLogger(i.Log) - i.clients = append(i.clients, c) default: return fmt.Errorf("unsupported scheme [%q]: %q", u, parts.Scheme) @@ -243,6 +238,7 @@ func (i *InfluxDB) udpClient(url *url.URL) (Client, error) { URL: url, MaxPayloadSize: int(i.UDPPayload.Size), Serializer: i.serializer, + Log: i.Log, } c, err := i.CreateUDPClientF(config) @@ -276,6 +272,7 @@ func (i *InfluxDB) httpClient(ctx context.Context, url *url.URL, proxy *url.URL) RetentionPolicy: i.RetentionPolicy, Consistency: i.WriteConsistency, Serializer: i.serializer, + Log: i.Log, } c, err := i.CreateHTTPClientF(config) diff --git a/plugins/outputs/influxdb/udp.go b/plugins/outputs/influxdb/udp.go index 76fdb7862..a50516c97 100644 --- a/plugins/outputs/influxdb/udp.go +++ b/plugins/outputs/influxdb/udp.go @@ -60,6 +60,7 @@ func NewUDPClient(config UDPConfig) (*udpClient, error) { url: config.URL, serializer: serializer, dialer: dialer, + log: config.Log, } return client, nil } @@ -69,18 +70,13 @@ type udpClient struct { dialer Dialer serializer *influx.Serializer url *url.URL - - log telegraf.Logger + log telegraf.Logger } func (c *udpClient) URL() string { return c.url.String() } -func (c *udpClient) SetLogger(log telegraf.Logger) { - c.log = log -} - func (c *udpClient) Database() string { return "" } diff --git a/plugins/outputs/influxdb/udp_test.go b/plugins/outputs/influxdb/udp_test.go index 61c64ff77..2e60c586c 100644 --- a/plugins/outputs/influxdb/udp_test.go +++ b/plugins/outputs/influxdb/udp_test.go @@ -79,7 +79,6 @@ func TestUDP_URL(t *testing.T) { } client, err := influxdb.NewUDPClient(config) - client.SetLogger(testutil.Logger{}) require.NoError(t, err) require.Equal(t, u.String(), client.URL()) @@ -103,7 +102,6 @@ func TestUDP_Simple(t *testing.T) { }, } client, err := influxdb.NewUDPClient(config) - client.SetLogger(testutil.Logger{}) require.NoError(t, err) ctx := context.Background() @@ -130,7 +128,6 @@ func TestUDP_DialError(t *testing.T) { }, } client, err := influxdb.NewUDPClient(config) - client.SetLogger(testutil.Logger{}) require.NoError(t, err) ctx := context.Background() @@ -160,7 +157,6 @@ func TestUDP_WriteError(t *testing.T) { }, } client, err := influxdb.NewUDPClient(config) - client.SetLogger(testutil.Logger{}) require.NoError(t, err) ctx := context.Background() @@ -187,6 +183,7 @@ func TestUDP_ErrorLogging(t *testing.T) { return conn, nil }, }, + Log: testutil.Logger{}, }, metrics: []telegraf.Metric{getMetric()}, logContains: `could not serialize metric: "cpu": need more space`, @@ -201,6 +198,7 @@ func TestUDP_ErrorLogging(t *testing.T) { return conn, nil }, }, + Log: testutil.Logger{}, }, metrics: []telegraf.Metric{ func() telegraf.Metric { @@ -224,7 +222,6 @@ func TestUDP_ErrorLogging(t *testing.T) { log.SetOutput(&b) client, err := influxdb.NewUDPClient(tt.config) - client.SetLogger(testutil.Logger{}) require.NoError(t, err) ctx := context.Background() @@ -268,7 +265,6 @@ func TestUDP_WriteWithRealConn(t *testing.T) { URL: u, } client, err := influxdb.NewUDPClient(config) - client.SetLogger(testutil.Logger{}) require.NoError(t, err) ctx := context.Background() From 93b41457df77b0a23ba2de1ca757c8d59ca5f0f5 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 21 Aug 2019 18:17:41 -0700 Subject: [PATCH 1132/1815] Update changelog --- CHANGELOG.md | 4 ++++ README.md | 1 + 2 files changed, 5 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 29e803787..e98256c60 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,6 +12,7 @@ - [apcupsd](/plugins/inputs/apcupsd/README.md) - Contributed by @jonaz - [docker_log](/plugins/inputs/docker_log/README.md) - Contributed by @prashanthjbabu - [fireboard](/plugins/inputs/fireboard/README.md) - Contributed by @ronnocol +- [logstash](/plugins/inputs/logstash/README.md) - Contributed by @lkmcs @dmitryilyin @arkady-emelyanov - [openntpd](/plugins/inputs/openntpd/README.md) - Contributed by @aromeyer - [uwsgi](/plugins/inputs/uswgi/README.md) - Contributed by @blaggacao @@ -79,6 +80,8 @@ - [#6257](https://github.com/influxdata/telegraf/pull/6257): Add database_tag option to influxdb_listener to add database from query string. - [#6246](https://github.com/influxdata/telegraf/pull/6246): Add capability to limit TLS versions and cipher suites. - [#6266](https://github.com/influxdata/telegraf/pull/6266): Add topic_tag option to mqtt_consumer. +- [#6207](https://github.com/influxdata/telegraf/pull/6207): Add ability to label inputs for logging. +- [#6300](https://github.com/influxdata/telegraf/pull/6300): Add TLS support to nginx_plus, nginx_plus_api and nginx_vts. #### Bugfixes @@ -90,6 +93,7 @@ - [#6136](https://github.com/influxdata/telegraf/issues/6136): Support Kafka 2.3.0 consumer groups. - [#6232](https://github.com/influxdata/telegraf/issues/6232): Fix persistent session in mqtt_consumer. - [#6235](https://github.com/influxdata/telegraf/issues/6235): Fix finder inconsistencies in vsphere input. +- [#6138](https://github.com/influxdata/telegraf/issues/6138): Fix parsing multiple metrics on the first line of tailed file. ## v1.11.5 [unreleased] diff --git a/README.md b/README.md index 7a9650e97..cd61ef407 100644 --- a/README.md +++ b/README.md @@ -213,6 +213,7 @@ For documentation on the latest development code see the [documentation index][d * [leofs](./plugins/inputs/leofs) * [linux_sysctl_fs](./plugins/inputs/linux_sysctl_fs) * [logparser](./plugins/inputs/logparser) +* [logstash](./plugins/inputs/logstash) * [lustre2](./plugins/inputs/lustre2) * [mailchimp](./plugins/inputs/mailchimp) * [mcrouter](./plugins/inputs/mcrouter) From f0c26dbd995c9e80826bb7bc95d35ef6a8ac6ce2 Mon Sep 17 00:00:00 2001 From: Craig Hobbs Date: Wed, 21 Aug 2019 18:22:00 -0700 Subject: [PATCH 1133/1815] Add Marklogic Input Plugin (#6193) --- plugins/inputs/all/all.go | 1 + plugins/inputs/marklogic/README.md | 64 + plugins/inputs/marklogic/marklogic.go | 260 ++++ plugins/inputs/marklogic/marklogic_test.go | 1282 ++++++++++++++++++++ 4 files changed, 1607 insertions(+) create mode 100644 plugins/inputs/marklogic/README.md create mode 100644 plugins/inputs/marklogic/marklogic.go create mode 100644 plugins/inputs/marklogic/marklogic_test.go diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index 13b70d5ca..7381487d5 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -81,6 +81,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/logstash" _ "github.com/influxdata/telegraf/plugins/inputs/lustre2" _ "github.com/influxdata/telegraf/plugins/inputs/mailchimp" + _ "github.com/influxdata/telegraf/plugins/inputs/marklogic" _ "github.com/influxdata/telegraf/plugins/inputs/mcrouter" _ "github.com/influxdata/telegraf/plugins/inputs/mem" _ "github.com/influxdata/telegraf/plugins/inputs/memcached" diff --git a/plugins/inputs/marklogic/README.md b/plugins/inputs/marklogic/README.md new file mode 100644 index 000000000..afbfb2824 --- /dev/null +++ b/plugins/inputs/marklogic/README.md @@ -0,0 +1,64 @@ +# MarkLogic Plugin + +The MarkLogic Telegraf plugin gathers health status metrics from one or more host. + +### Configuration: + +```toml +[[inputs.marklogic]] + ## Base URL of the MarkLogic HTTP Server. + url = "http://localhost:8002" + + ## List of specific hostnames to retrieve information. At least (1) required. + # hosts = ["hostname1", "hostname2"] + + ## Using HTTP Basic Authentication. Management API requires 'manage-user' role privileges + # username = "myuser" + # password = "mypassword" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false +``` + +### Metrics + +- marklogic + - tags: + - source (the hostname of the server address, ex. `ml1.local`) + - id (the host node unique id ex. `2592913110757471141`) + - fields: + - online + - total_load + - total_rate + - ncpus + - ncores + - total_cpu_stat_user + - total_cpu_stat_system + - total_cpu_stat_idle + - total_cpu_stat_iowait + - memory_process_size + - memory_process_rss + - memory_system_total + - memory_system_free + - memory_process_swap_size + - memory_size + - host_size + - log_device_space + - data_dir_space + - query_read_bytes + - query_read_load + - merge_read_bytes + - merge_write_load + - http_server_receive_bytes + - http_server_send_bytes + +### Example Output: + +``` +$> marklogic,host=localhost,id=2592913110757471141,source=ml1.local total_cpu_stat_iowait=0.0125649003311992,memory_process_swap_size=0i,host_size=380i,data_dir_space=28216i,query_read_load=0i,ncpus=1i,log_device_space=28216i,query_read_bytes=13947332i,merge_write_load=0i,http_server_receive_bytes=225893i,online=true,ncores=4i,total_cpu_stat_user=0.150778993964195,total_cpu_stat_system=0.598927974700928,total_cpu_stat_idle=99.2210006713867,memory_system_total=3947i,memory_system_free=2669i,memory_size=4096i,total_rate=14.7697010040283,http_server_send_bytes=0i,memory_process_size=903i,memory_process_rss=486i,merge_read_load=0i,total_load=0.00502600101754069 1566373000000000000 + +``` diff --git a/plugins/inputs/marklogic/marklogic.go b/plugins/inputs/marklogic/marklogic.go new file mode 100644 index 000000000..b62d017de --- /dev/null +++ b/plugins/inputs/marklogic/marklogic.go @@ -0,0 +1,260 @@ +package marklogic + +import ( + "encoding/json" + "fmt" + "net/http" + "net/url" + "path" + "sync" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal/tls" + "github.com/influxdata/telegraf/plugins/inputs" +) + +// Marklogic configuration toml +type Marklogic struct { + URL string `toml:"url"` + Hosts []string `toml:"hosts"` + Username string `toml:"username"` + Password string `toml:"password"` + Sources []string + + tls.ClientConfig + + client *http.Client +} + +type MlPointInt struct { + Value int `json:"value"` +} + +type MlPointFloat struct { + Value float64 `json:"value"` +} + +type MlPointBool struct { + Value bool `json:"value"` +} + +// MarkLogic v2 management api endpoints for hosts status +const statsPath = "/manage/v2/hosts/" +const viewFormat = "view=status&format=json" + +type MlHost struct { + HostStatus struct { + ID string `json:"id"` + Name string `json:"name"` + StatusProperties struct { + Online MlPointBool `json:"online"` + LoadProperties struct { + TotalLoad MlPointFloat `json:"total-load"` + } `json:"load-properties"` + RateProperties struct { + TotalRate MlPointFloat `json:"total-rate"` + } `json:"rate-properties"` + StatusDetail struct { + Cpus MlPointInt `json:"cpus"` + Cores MlPointInt `json:"cores"` + TotalCPUStatUser float64 `json:"total-cpu-stat-user"` + TotalCPUStatSystem float64 `json:"total-cpu-stat-system"` + TotalCPUStatIdle float64 `json:"total-cpu-stat-idle"` + TotalCPUStatIowait float64 `json:"total-cpu-stat-iowait"` + MemoryProcessSize MlPointInt `json:"memory-process-size"` + MemoryProcessRss MlPointInt `json:"memory-process-rss"` + MemorySystemTotal MlPointInt `json:"memory-system-total"` + MemorySystemFree MlPointInt `json:"memory-system-free"` + MemoryProcessSwapSize MlPointInt `json:"memory-process-swap-size"` + MemorySize MlPointInt `json:"memory-size"` + HostSize MlPointInt `json:"host-size"` + LogDeviceSpace MlPointInt `json:"log-device-space"` + DataDirSpace MlPointInt `json:"data-dir-space"` + QueryReadBytes MlPointInt `json:"query-read-bytes"` + QueryReadLoad MlPointInt `json:"query-read-load"` + MergeReadLoad MlPointInt `json:"merge-read-load"` + MergeWriteLoad MlPointInt `json:"merge-write-load"` + HTTPServerReceiveBytes MlPointInt `json:"http-server-receive-bytes"` + HTTPServerSendBytes MlPointInt `json:"http-server-send-bytes"` + } `json:"status-detail"` + } `json:"status-properties"` + } `json:"host-status"` +} + +// Description of plugin returned +func (c *Marklogic) Description() string { + return "Retrives information on a specific host in a MarkLogic Cluster" +} + +var sampleConfig = ` + ## Base URL of the MarkLogic HTTP Server. + url = "http://localhost:8002" + + ## List of specific hostnames to retrieve information. At least (1) required. + # hosts = ["hostname1", "hostname2"] + + ## Using HTTP Basic Authentication. Management API requires 'manage-user' role privileges + # username = "myuser" + # password = "mypassword" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false +` + +// Init parse all source URLs and place on the Marklogic struct +func (c *Marklogic) Init() error { + + if len(c.URL) == 0 { + c.URL = "http://localhost:8002/" + } + + for _, u := range c.Hosts { + base, err := url.Parse(c.URL) + if err != nil { + return err + } + + base.Path = path.Join(base.Path, statsPath, u) + addr := base.ResolveReference(base) + + addr.RawQuery = viewFormat + u := addr.String() + c.Sources = append(c.Sources, u) + } + return nil +} + +// SampleConfig to gather stats from localhost, default port. +func (c *Marklogic) SampleConfig() string { + return sampleConfig +} + +// Gather metrics from HTTP Server. +func (c *Marklogic) Gather(accumulator telegraf.Accumulator) error { + var wg sync.WaitGroup + + if c.client == nil { + client, err := c.createHTTPClient() + + if err != nil { + return err + } + c.client = client + } + + // Range over all source URL's appended to the struct + for _, serv := range c.Sources { + //fmt.Printf("Encoded URL is %q\n", serv) + wg.Add(1) + go func(serv string) { + defer wg.Done() + if err := c.fetchAndInsertData(accumulator, serv); err != nil { + accumulator.AddError(fmt.Errorf("[host=%s]: %s", serv, err)) + } + }(serv) + } + + wg.Wait() + + return nil +} + +func (c *Marklogic) fetchAndInsertData(acc telegraf.Accumulator, url string) error { + ml := &MlHost{} + if err := c.gatherJSONData(url, ml); err != nil { + return err + } + + // Build a map of tags + tags := map[string]string{ + "source": ml.HostStatus.Name, + "id": ml.HostStatus.ID, + } + + // Build a map of field values + fields := map[string]interface{}{ + "online": ml.HostStatus.StatusProperties.Online.Value, + "total_load": ml.HostStatus.StatusProperties.LoadProperties.TotalLoad.Value, + "total_rate": ml.HostStatus.StatusProperties.RateProperties.TotalRate.Value, + "ncpus": ml.HostStatus.StatusProperties.StatusDetail.Cpus.Value, + "ncores": ml.HostStatus.StatusProperties.StatusDetail.Cores.Value, + "total_cpu_stat_user": ml.HostStatus.StatusProperties.StatusDetail.TotalCPUStatUser, + "total_cpu_stat_system": ml.HostStatus.StatusProperties.StatusDetail.TotalCPUStatSystem, + "total_cpu_stat_idle": ml.HostStatus.StatusProperties.StatusDetail.TotalCPUStatIdle, + "total_cpu_stat_iowait": ml.HostStatus.StatusProperties.StatusDetail.TotalCPUStatIowait, + "memory_process_size": ml.HostStatus.StatusProperties.StatusDetail.MemoryProcessSize.Value, + "memory_process_rss": ml.HostStatus.StatusProperties.StatusDetail.MemoryProcessRss.Value, + "memory_system_total": ml.HostStatus.StatusProperties.StatusDetail.MemorySystemTotal.Value, + "memory_system_free": ml.HostStatus.StatusProperties.StatusDetail.MemorySystemFree.Value, + "memory_process_swap_size": ml.HostStatus.StatusProperties.StatusDetail.MemoryProcessSwapSize.Value, + "memory_size": ml.HostStatus.StatusProperties.StatusDetail.MemorySize.Value, + "host_size": ml.HostStatus.StatusProperties.StatusDetail.HostSize.Value, + "log_device_space": ml.HostStatus.StatusProperties.StatusDetail.LogDeviceSpace.Value, + "data_dir_space": ml.HostStatus.StatusProperties.StatusDetail.DataDirSpace.Value, + "query_read_bytes": ml.HostStatus.StatusProperties.StatusDetail.QueryReadBytes.Value, + "query_read_load": ml.HostStatus.StatusProperties.StatusDetail.QueryReadLoad.Value, + "merge_read_load": ml.HostStatus.StatusProperties.StatusDetail.MergeReadLoad.Value, + "merge_write_load": ml.HostStatus.StatusProperties.StatusDetail.MergeWriteLoad.Value, + "http_server_receive_bytes": ml.HostStatus.StatusProperties.StatusDetail.HTTPServerReceiveBytes.Value, + "http_server_send_bytes": ml.HostStatus.StatusProperties.StatusDetail.HTTPServerSendBytes.Value, + } + + // Accumulate the tags and values + acc.AddFields("marklogic", fields, tags) + + return nil +} + +func (c *Marklogic) createHTTPClient() (*http.Client, error) { + tlsCfg, err := c.ClientConfig.TLSConfig() + if err != nil { + return nil, err + } + + client := &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: tlsCfg, + }, + Timeout: time.Duration(5 * time.Second), + } + + return client, nil +} + +func (c *Marklogic) gatherJSONData(url string, v interface{}) error { + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return err + } + + if c.Username != "" || c.Password != "" { + req.SetBasicAuth(c.Username, c.Password) + } + + response, err := c.client.Do(req) + if err != nil { + return err + } + defer response.Body.Close() + if response.StatusCode != http.StatusOK { + return fmt.Errorf("marklogic: API responded with status-code %d, expected %d", + response.StatusCode, http.StatusOK) + } + + if err = json.NewDecoder(response.Body).Decode(v); err != nil { + return err + } + + return nil +} + +func init() { + inputs.Add("marklogic", func() telegraf.Input { + return &Marklogic{} + }) +} diff --git a/plugins/inputs/marklogic/marklogic_test.go b/plugins/inputs/marklogic/marklogic_test.go new file mode 100644 index 000000000..34e4bbd6b --- /dev/null +++ b/plugins/inputs/marklogic/marklogic_test.go @@ -0,0 +1,1282 @@ +package marklogic + +import ( + "fmt" + "net/http" + "net/http/httptest" + "net/url" + "testing" + + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +func TestMarklogic(t *testing.T) { + // Create a test server with the const response JSON + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + fmt.Fprintln(w, response) + })) + defer ts.Close() + + // Parse the URL of the test server, used to verify the expected host + _, err := url.Parse(ts.URL) + require.NoError(t, err) + + // Create a new Marklogic instance with our given test server + + ml := &Marklogic{ + Hosts: []string{"example1"}, + URL: string(ts.URL), + //Sources: []string{"http://localhost:8002/manage/v2/hosts/hostname1?view=status&format=json"}, + } + + // Create a test accumulator + acc := &testutil.Accumulator{} + + // Init() call to parse all source URL's + err = ml.Init() + require.NoError(t, err) + + // Gather data from the test server + err = ml.Gather(acc) + require.NoError(t, err) + + // Expect the correct values for all known keys + expectFields := map[string]interface{}{ + "online": true, + "total_load": 0.00429263804107904, + "ncpus": 1, + "ncores": 4, + "total_rate": 15.6527042388916, + "total_cpu_stat_user": 0.276381999254227, + "total_cpu_stat_system": 0.636515974998474, + "total_cpu_stat_idle": 99.0578002929688, + "total_cpu_stat_iowait": 0.0125628001987934, + "memory_process_size": 1234, + "memory_process_rss": 815, + "memory_system_total": 3947, + "memory_system_free": 2761, + "memory_process_swap_size": 0, + "memory_size": 4096, + "host_size": 64, + "log_device_space": 34968, + "data_dir_space": 34968, + "query_read_bytes": 11492428, + "query_read_load": 0, + "merge_read_load": 0, + "merge_write_load": 0, + "http_server_receive_bytes": 285915, + "http_server_send_bytes": 0, + } + // Expect the correct values for all tags + expectTags := map[string]string{ + "source": "ml1.local", + "id": "2592913110757471141", + } + + acc.AssertContainsTaggedFields(t, "marklogic", expectFields, expectTags) + +} + +var response = ` +{ + "host-status": { + "id": "2592913110757471141", + "name": "ml1.local", + "version": "10.0-1", + "effective-version": 10000100, + "host-mode": "normal", + "host-mode-description": "", + "meta": { + "uri": "/manage/v2/hosts/ml1.local?view=status", + "current-time": "2019-07-28T22:32:19.056203Z", + "elapsed-time": { + "units": "sec", + "value": 0.013035 + } + }, + "relations": { + "relation-group": [ + { + "uriref": "/manage/v2/forests?view=status&host-id=ml1.local", + "typeref": "forests", + "relation": [ + { + "uriref": "/manage/v2/forests/App-Services", + "idref": "8573569457346659714", + "nameref": "App-Services" + }, + { + "uriref": "/manage/v2/forests/Documents", + "idref": "17189472171231792168", + "nameref": "Documents" + }, + { + "uriref": "/manage/v2/forests/Extensions", + "idref": "1510244530748962553", + "nameref": "Extensions" + }, + { + "uriref": "/manage/v2/forests/Fab", + "idref": "16221965829238302106", + "nameref": "Fab" + }, + { + "uriref": "/manage/v2/forests/Last-Login", + "idref": "1093671762706318022", + "nameref": "Last-Login" + }, + { + "uriref": "/manage/v2/forests/Meters", + "idref": "1573439446779995954", + "nameref": "Meters" + }, + { + "uriref": "/manage/v2/forests/Modules", + "idref": "18320951141685848719", + "nameref": "Modules" + }, + { + "uriref": "/manage/v2/forests/Schemas", + "idref": "18206720449696085936", + "nameref": "Schemas" + }, + { + "uriref": "/manage/v2/forests/Security", + "idref": "9348728036360382939", + "nameref": "Security" + }, + { + "uriref": "/manage/v2/forests/Triggers", + "idref": "10142793547905338229", + "nameref": "Triggers" + } + ] + }, + { + "typeref": "groups", + "relation": [ + { + "uriref": "/manage/v2/groups/Default?view=status", + "idref": "16808579782544283978", + "nameref": "Default" + } + ] + } + ] + }, + "status-properties": { + "online": { + "units": "bool", + "value": true + }, + "secure": { + "units": "bool", + "value": false + }, + "cache-properties": { + "cache-detail": { + "compressed-tree-cache-partition": [ + { + "partition-size": 64, + "partition-table": 3.40000009536743, + "partition-used": 29.7000007629395, + "partition-free": 70.1999969482422, + "partition-overhead": 0.100000001490116 + } + ], + "expanded-tree-cache-partition": [ + { + "partition-size": 128, + "partition-table": 6.19999980926514, + "partition-busy": 0, + "partition-used": 87.3000030517578, + "partition-free": 12.3999996185303, + "partition-overhead": 0.300000011920929 + } + ], + "triple-cache-partition": [ + { + "partition-size": 64, + "partition-busy": 0, + "partition-used": 0, + "partition-free": 100 + } + ], + "triple-value-cache-partition": [ + { + "partition-size": 128, + "partition-busy": 0, + "partition-used": 0, + "partition-free": 100, + "value-count": 0, + "value-bytes-total": 0, + "value-bytes-average": 0 + } + ] + } + }, + "load-properties": { + "total-load": { + "units": "sec/sec", + "value": 0.00429263804107904 + }, + "load-detail": { + "query-read-load": { + "units": "sec/sec", + "value": 0 + }, + "journal-write-load": { + "units": "sec/sec", + "value": 0 + }, + "save-write-load": { + "units": "sec/sec", + "value": 0 + }, + "merge-read-load": { + "units": "sec/sec", + "value": 0 + }, + "merge-write-load": { + "units": "sec/sec", + "value": 0 + }, + "backup-read-load": { + "units": "sec/sec", + "value": 0 + }, + "backup-write-load": { + "units": "sec/sec", + "value": 0 + }, + "restore-read-load": { + "units": "sec/sec", + "value": 0 + }, + "restore-write-load": { + "units": "sec/sec", + "value": 0 + }, + "large-read-load": { + "units": "sec/sec", + "value": 0 + }, + "large-write-load": { + "units": "sec/sec", + "value": 0 + }, + "external-binary-read-load": { + "units": "sec/sec", + "value": 0 + }, + "xdqp-client-receive-load": { + "units": "sec/sec", + "value": 0 + }, + "xdqp-client-send-load": { + "units": "sec/sec", + "value": 0 + }, + "xdqp-server-receive-load": { + "units": "sec/sec", + "value": 0 + }, + "xdqp-server-send-load": { + "units": "sec/sec", + "value": 0 + }, + "foreign-xdqp-client-receive-load": { + "units": "sec/sec", + "value": 0 + }, + "foreign-xdqp-client-send-load": { + "units": "sec/sec", + "value": 0 + }, + "foreign-xdqp-server-receive-load": { + "units": "sec/sec", + "value": 0 + }, + "foreign-xdqp-server-send-load": { + "units": "sec/sec", + "value": 0 + }, + "read-lock-wait-load": { + "units": "sec/sec", + "value": 0 + }, + "read-lock-hold-load": { + "units": "sec/sec", + "value": 0 + }, + "write-lock-wait-load": { + "units": "sec/sec", + "value": 0 + }, + "write-lock-hold-load": { + "units": "sec/sec", + "value": 0.00429263804107904 + }, + "deadlock-wait-load": { + "units": "sec/sec", + "value": 0 + } + } + }, + "rate-properties": { + "total-rate": { + "units": "MB/sec", + "value": 15.6527042388916 + }, + "rate-detail": { + "memory-system-pagein-rate": { + "units": "MB/sec", + "value": 0 + }, + "memory-system-pageout-rate": { + "units": "MB/sec", + "value": 15.6420001983643 + }, + "memory-system-swapin-rate": { + "units": "MB/sec", + "value": 0 + }, + "memory-system-swapout-rate": { + "units": "MB/sec", + "value": 0 + }, + "query-read-rate": { + "units": "MB/sec", + "value": 0 + }, + "journal-write-rate": { + "units": "MB/sec", + "value": 0.00372338597662747 + }, + "save-write-rate": { + "units": "MB/sec", + "value": 0.0024786819703877 + }, + "merge-read-rate": { + "units": "MB/sec", + "value": 0 + }, + "merge-write-rate": { + "units": "MB/sec", + "value": 0 + }, + "backup-read-rate": { + "units": "MB/sec", + "value": 0 + }, + "backup-write-rate": { + "units": "MB/sec", + "value": 0 + }, + "restore-read-rate": { + "units": "MB/sec", + "value": 0 + }, + "restore-write-rate": { + "units": "MB/sec", + "value": 0 + }, + "large-read-rate": { + "units": "MB/sec", + "value": 0 + }, + "large-write-rate": { + "units": "MB/sec", + "value": 0 + }, + "external-binary-read-rate": { + "units": "MB/sec", + "value": 0 + }, + "xdqp-client-receive-rate": { + "units": "MB/sec", + "value": 0 + }, + "xdqp-client-send-rate": { + "units": "MB/sec", + "value": 0.00293614692054689 + }, + "xdqp-server-receive-rate": { + "units": "MB/sec", + "value": 0.00156576896551996 + }, + "xdqp-server-send-rate": { + "units": "MB/sec", + "value": 0 + }, + "foreign-xdqp-client-receive-rate": { + "units": "MB/sec", + "value": 0 + }, + "foreign-xdqp-client-send-rate": { + "units": "MB/sec", + "value": 0 + }, + "foreign-xdqp-server-receive-rate": { + "units": "MB/sec", + "value": 0 + }, + "foreign-xdqp-server-send-rate": { + "units": "MB/sec", + "value": 0 + }, + "read-lock-rate": { + "units": "MB/sec", + "value": 0 + }, + "write-lock-rate": { + "units": "MB/sec", + "value": 0.251882910728455 + }, + "deadlock-rate": { + "units": "MB/sec", + "value": 0 + } + } + }, + "status-detail": { + "bind-port": 7999, + "connect-port": 7999, + "ssl-fips-enabled": { + "units": "bool", + "value": true + }, + "foreign-bind-port": 7998, + "foreign-connect-port": 7998, + "background-io-limit": { + "units": "quantity", + "value": 0 + }, + "metering-enabled": { + "units": "bool", + "value": true + }, + "meters-database": { + "units": "quantity", + "value": "11952918530142281790" + }, + "performance-metering-enabled": { + "units": "bool", + "value": true + }, + "performance-metering-period": { + "units": "second", + "value": 60 + }, + "performance-metering-retain-raw": { + "units": "day", + "value": 7 + }, + "performance-metering-retain-hourly": { + "units": "day", + "value": 30 + }, + "performance-metering-retain-daily": { + "units": "day", + "value": 90 + }, + "last-startup": { + "units": "datetime", + "value": "2019-07-26T17:23:36.412644Z" + }, + "version": "10.0-1", + "effective-version": { + "units": "quantity", + "value": 10000100 + }, + "software-version": { + "units": "quantity", + "value": 10000100 + }, + "os-version": "NA", + "converters-version": "10.0-1", + "host-mode": { + "units": "enum", + "value": "normal" + }, + "architecture": "x86_64", + "platform": "linux", + "license-key": "000-000-000-000-000-000-000", + "licensee": "NA", + "license-key-expires": { + "units": "datetime", + "value": "2999-01-23T00:00:00Z" + }, + "license-key-cpus": { + "units": "quantity", + "value": 0 + }, + "license-key-cores": { + "units": "quantity", + "value": 0 + }, + "license-key-size": { + "units": "MB", + "value": 0 + }, + "license-key-option": [ + { + "units": "enum", + "value": "conversion" + }, + { + "units": "enum", + "value": "failover" + }, + { + "units": "enum", + "value": "alerting" + }, + { + "units": "enum", + "value": "geospatial" + }, + { + "units": "enum", + "value": "flexible replication" + }, + { + "units": "enum", + "value": "tiered storage" + }, + { + "units": "enum", + "value": "semantics" + }, + { + "units": "enum", + "value": "French" + }, + { + "units": "enum", + "value": "Italian" + }, + { + "units": "enum", + "value": "German" + }, + { + "units": "enum", + "value": "Spanish" + }, + { + "units": "enum", + "value": "Traditional Chinese" + }, + { + "units": "enum", + "value": "Simplified Chinese" + }, + { + "units": "enum", + "value": "Arabic" + }, + { + "units": "enum", + "value": "Russian" + }, + { + "units": "enum", + "value": "Dutch" + }, + { + "units": "enum", + "value": "Korean" + }, + { + "units": "enum", + "value": "Persian" + }, + { + "units": "enum", + "value": "Japanese" + }, + { + "units": "enum", + "value": "Portuguese" + }, + { + "units": "enum", + "value": "English" + } + ], + "edition": { + "units": "enum", + "value": "Enterprise Edition" + }, + "environment": { + "units": "enum", + "value": "developer" + }, + "cpus": { + "units": "quantity", + "value": 1 + }, + "cores": { + "units": "quantity", + "value": 4 + }, + "core-threads": { + "units": "quantity", + "value": 4 + }, + "total-cpu-stat-user": 0.276381999254227, + "total-cpu-stat-nice": 0, + "total-cpu-stat-system": 0.636515974998474, + "total-cpu-stat-idle": 99.0578002929688, + "total-cpu-stat-iowait": 0.0125628001987934, + "total-cpu-stat-irq": 0, + "total-cpu-stat-softirq": 0.0167504008859396, + "total-cpu-stat-steal": 0, + "total-cpu-stat-guest": 0, + "total-cpu-stat-guest-nice": 0, + "memory-process-size": { + "units": "fraction", + "value": 1234 + }, + "memory-process-rss": { + "units": "fraction", + "value": 815 + }, + "memory-process-anon": { + "units": "fraction", + "value": 743 + }, + "memory-process-rss-hwm": { + "units": "fraction", + "value": 1072 + }, + "memory-process-swap-size": { + "units": "fraction", + "value": 0 + }, + "memory-process-huge-pages-size": { + "units": "fraction", + "value": 0 + }, + "memory-system-total": { + "units": "fraction", + "value": 3947 + }, + "memory-system-free": { + "units": "fraction", + "value": 2761 + }, + "memory-system-pagein-rate": { + "units": "fraction", + "value": 0 + }, + "memory-system-pageout-rate": { + "units": "fraction", + "value": 15.6420001983643 + }, + "memory-system-swapin-rate": { + "units": "fraction", + "value": 0 + }, + "memory-system-swapout-rate": { + "units": "fraction", + "value": 0 + }, + "memory-size": { + "units": "quantity", + "value": 4096 + }, + "memory-file-size": { + "units": "quantity", + "value": 5 + }, + "memory-forest-size": { + "units": "quantity", + "value": 849 + }, + "memory-unclosed-size": { + "units": "quantity", + "value": 0 + }, + "memory-cache-size": { + "units": "quantity", + "value": 320 + }, + "memory-registry-size": { + "units": "quantity", + "value": 1 + }, + "memory-join-size": { + "units": "quantity", + "value": 0 + }, + "host-size": { + "units": "MB", + "value": 64 + }, + "host-large-data-size": { + "units": "MB", + "value": 0 + }, + "log-device-space": { + "units": "MB", + "value": 34968 + }, + "data-dir-space": { + "units": "MB", + "value": 34968 + }, + "query-read-bytes": { + "units": "bytes", + "value": 11492428 + }, + "query-read-time": { + "units": "time", + "value": "PT0.141471S" + }, + "query-read-rate": { + "units": "MB/sec", + "value": 0 + }, + "query-read-load": { + "units": "", + "value": 0 + }, + "journal-write-bytes": { + "units": "bytes", + "value": 285717868 + }, + "journal-write-time": { + "units": "time", + "value": "PT17.300832S" + }, + "journal-write-rate": { + "units": "MB/sec", + "value": 0.00372338597662747 + }, + "journal-write-load": { + "units": "", + "value": 0 + }, + "save-write-bytes": { + "units": "bytes", + "value": 95818597 + }, + "save-write-time": { + "units": "time", + "value": "PT2.972855S" + }, + "save-write-rate": { + "units": "MB/sec", + "value": 0.0024786819703877 + }, + "save-write-load": { + "units": "", + "value": 0 + }, + "merge-read-bytes": { + "units": "bytes", + "value": 55374848 + }, + "merge-read-time": { + "units": "time", + "value": "PT0.535705S" + }, + "merge-read-rate": { + "units": "MB/sec", + "value": 0 + }, + "merge-read-load": { + "units": "", + "value": 0 + }, + "merge-write-bytes": { + "units": "bytes", + "value": 146451731 + }, + "merge-write-time": { + "units": "time", + "value": "PT5.392288S" + }, + "merge-write-rate": { + "units": "MB/sec", + "value": 0 + }, + "merge-write-load": { + "units": "", + "value": 0 + }, + "backup-read-bytes": { + "units": "bytes", + "value": 0 + }, + "backup-read-time": { + "units": "time", + "value": "PT0S" + }, + "backup-read-rate": { + "units": "MB/sec", + "value": 0 + }, + "backup-read-load": { + "units": "", + "value": 0 + }, + "backup-write-bytes": { + "units": "bytes", + "value": 0 + }, + "backup-write-time": { + "units": "time", + "value": "PT0S" + }, + "backup-write-rate": { + "units": "MB/sec", + "value": 0 + }, + "backup-write-load": { + "units": "", + "value": 0 + }, + "restore-read-bytes": { + "units": "bytes", + "value": 0 + }, + "restore-read-time": { + "units": "time", + "value": "PT0S" + }, + "restore-read-rate": { + "units": "MB/sec", + "value": 0 + }, + "restore-read-load": { + "units": "", + "value": 0 + }, + "restore-write-bytes": { + "units": "bytes", + "value": 0 + }, + "restore-write-time": { + "units": "time", + "value": "PT0S" + }, + "restore-write-rate": { + "units": "MB/sec", + "value": 0 + }, + "restore-write-load": { + "units": "", + "value": 0 + }, + "large-read-bytes": { + "units": "bytes", + "value": 0 + }, + "large-read-time": { + "units": "time", + "value": "PT0S" + }, + "large-read-rate": { + "units": "MB/sec", + "value": 0 + }, + "large-read-load": { + "units": "", + "value": 0 + }, + "large-write-bytes": { + "units": "bytes", + "value": 0 + }, + "large-write-time": { + "units": "time", + "value": "PT0S" + }, + "large-write-rate": { + "units": "MB/sec", + "value": 0 + }, + "large-write-load": { + "units": "", + "value": 0 + }, + "external-binary-read-bytes": { + "units": "bytes", + "value": 0 + }, + "external-binary-read-time": { + "units": "time", + "value": "PT0S" + }, + "external-binary-read-rate": { + "units": "MB/sec", + "value": 0 + }, + "external-binary-read-load": { + "units": "", + "value": 0 + }, + "webDAV-server-receive-bytes": { + "units": "bytes", + "value": 0 + }, + "webDAV-server-receive-time": { + "units": "sec", + "value": "PT0S" + }, + "webDAV-server-receive-rate": { + "units": "MB/sec", + "value": 0 + }, + "webDAV-server-receive-load": { + "units": "", + "value": 0 + }, + "webDAV-server-send-bytes": { + "units": "bytes", + "value": 0 + }, + "webDAV-server-send-time": { + "units": "sec", + "value": "PT0S" + }, + "webDAV-server-send-rate": { + "units": "MB/sec", + "value": 0 + }, + "webDAV-server-send-load": { + "units": "", + "value": 0 + }, + "http-server-receive-bytes": { + "units": "bytes", + "value": 285915 + }, + "http-server-receive-time": { + "units": "sec", + "value": "PT0.02028S" + }, + "http-server-receive-rate": { + "units": "MB/sec", + "value": 0 + }, + "http-server-receive-load": { + "units": "", + "value": 0 + }, + "http-server-send-bytes": { + "units": "bytes", + "value": 0 + }, + "http-server-send-time": { + "units": "sec", + "value": "PT0S" + }, + "http-server-send-rate": { + "units": "MB/sec", + "value": 0 + }, + "http-server-send-load": { + "units": "", + "value": 0 + }, + "xdbc-server-receive-bytes": { + "units": "bytes", + "value": 0 + }, + "xdbc-server-receive-time": { + "units": "sec", + "value": "PT0S" + }, + "xdbc-server-receive-rate": { + "units": "MB/sec", + "value": 0 + }, + "xdbc-server-receive-load": { + "units": "", + "value": 0 + }, + "xdbc-server-send-bytes": { + "units": "bytes", + "value": 0 + }, + "xdbc-server-send-time": { + "units": "sec", + "value": "PT0S" + }, + "xdbc-server-send-rate": { + "units": "MB/sec", + "value": 0 + }, + "xdbc-server-send-load": { + "units": "", + "value": 0 + }, + "odbc-server-receive-bytes": { + "units": "bytes", + "value": 0 + }, + "odbc-server-receive-time": { + "units": "sec", + "value": "PT0S" + }, + "odbc-server-receive-rate": { + "units": "MB/sec", + "value": 0 + }, + "odbc-server-receive-load": { + "units": "", + "value": 0 + }, + "odbc-server-send-bytes": { + "units": "bytes", + "value": 0 + }, + "odbc-server-send-time": { + "units": "sec", + "value": "PT0S" + }, + "odbc-server-send-rate": { + "units": "MB/sec", + "value": 0 + }, + "odbc-server-send-load": { + "units": "", + "value": 0 + }, + "xdqp-client-receive-bytes": { + "units": "bytes", + "value": 3020032 + }, + "xdqp-client-receive-time": { + "units": "time", + "value": "PT0.046612S" + }, + "xdqp-client-receive-rate": { + "units": "MB/sec", + "value": 0 + }, + "xdqp-client-receive-load": { + "units": "", + "value": 0 + }, + "xdqp-client-send-bytes": { + "units": "bytes", + "value": 163513952 + }, + "xdqp-client-send-time": { + "units": "time", + "value": "PT22.700289S" + }, + "xdqp-client-send-rate": { + "units": "MB/sec", + "value": 0.00293614692054689 + }, + "xdqp-client-send-load": { + "units": "", + "value": 0 + }, + "xdqp-server-receive-bytes": { + "units": "bytes", + "value": 131973888 + }, + "xdqp-server-receive-time": { + "units": "time", + "value": "PT3.474521S" + }, + "xdqp-server-receive-rate": { + "units": "MB/sec", + "value": 0.00156576896551996 + }, + "xdqp-server-receive-load": { + "units": "", + "value": 0 + }, + "xdqp-server-send-bytes": { + "units": "bytes", + "value": 10035300 + }, + "xdqp-server-send-time": { + "units": "time", + "value": "PT4.275597S" + }, + "xdqp-server-send-rate": { + "units": "MB/sec", + "value": 0 + }, + "xdqp-server-send-load": { + "units": "", + "value": 0 + }, + "xdqp-server-request-time": { + "units": "milliseconds", + "value": 0.743777990341187 + }, + "xdqp-server-request-rate": { + "units": "requests/sec", + "value": 0.371862411499023 + }, + "foreign-xdqp-client-receive-bytes": { + "units": "bytes", + "value": 0 + }, + "foreign-xdqp-client-receive-time": { + "units": "time", + "value": "PT0S" + }, + "foreign-xdqp-client-receive-rate": { + "units": "MB/sec", + "value": 0 + }, + "foreign-xdqp-client-receive-load": { + "units": "", + "value": 0 + }, + "foreign-xdqp-client-send-bytes": { + "units": "bytes", + "value": 0 + }, + "foreign-xdqp-client-send-time": { + "units": "time", + "value": "PT0S" + }, + "foreign-xdqp-client-send-rate": { + "units": "MB/sec", + "value": 0 + }, + "foreign-xdqp-client-send-load": { + "units": "", + "value": 0 + }, + "foreign-xdqp-server-receive-bytes": { + "units": "bytes", + "value": 0 + }, + "foreign-xdqp-server-receive-time": { + "units": "time", + "value": "PT0S" + }, + "foreign-xdqp-server-receive-rate": { + "units": "MB/sec", + "value": 0 + }, + "foreign-xdqp-server-receive-load": { + "units": "", + "value": 0 + }, + "foreign-xdqp-server-send-bytes": { + "units": "bytes", + "value": 0 + }, + "foreign-xdqp-server-send-time": { + "units": "time", + "value": "PT0S" + }, + "foreign-xdqp-server-send-rate": { + "units": "MB/sec", + "value": 0 + }, + "foreign-xdqp-server-send-load": { + "units": "", + "value": 0 + }, + "read-lock-count": { + "units": "locks", + "value": 104 + }, + "read-lock-wait-time": { + "units": "seconds", + "value": "PT0.001464S" + }, + "read-lock-hold-time": { + "units": "seconds", + "value": "PT3.022913S" + }, + "read-lock-rate": { + "units": "locks/sec", + "value": 0 + }, + "read-lock-wait-load": { + "units": "", + "value": 0 + }, + "read-lock-hold-load": { + "units": "", + "value": 0 + }, + "write-lock-count": { + "units": "locks", + "value": 15911 + }, + "write-lock-wait-time": { + "units": "seconds", + "value": "PT0.317098S" + }, + "write-lock-hold-time": { + "units": "seconds", + "value": "PT11M46.9923759S" + }, + "write-lock-rate": { + "units": "locks/sec", + "value": 0.251882910728455 + }, + "write-lock-wait-load": { + "units": "", + "value": 0 + }, + "write-lock-hold-load": { + "units": "", + "value": 0.00429263804107904 + }, + "deadlock-count": { + "units": "locks", + "value": 0 + }, + "deadlock-wait-time": { + "units": "seconds", + "value": "PT0S" + }, + "deadlock-rate": { + "units": "locks/sec", + "value": 0 + }, + "deadlock-wait-load": { + "units": "", + "value": 0 + }, + "external-kms-request-rate": { + "units": "requests/sec", + "value": 0 + }, + "external-kms-request-time": { + "units": "milliseconds", + "value": 0 + }, + "keystore-status": "normal", + "ldap-request-rate": { + "units": "requests/sec", + "value": 0 + }, + "ldap-request-time": { + "units": "milliseconds", + "value": 0 + } + } + }, + "related-views": { + "related-view": [ + { + "view-type": "item", + "view-name": "default", + "view-uri": "/manage/v2/hosts/example" + } + ] + } + } +} +` From 0a4d74c82714da656ea454969c2083ef1d1bacfa Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 21 Aug 2019 18:26:44 -0700 Subject: [PATCH 1134/1815] Update changelog --- CHANGELOG.md | 1 + README.md | 1 + 2 files changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index e98256c60..1fa6c91eb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,6 +13,7 @@ - [docker_log](/plugins/inputs/docker_log/README.md) - Contributed by @prashanthjbabu - [fireboard](/plugins/inputs/fireboard/README.md) - Contributed by @ronnocol - [logstash](/plugins/inputs/logstash/README.md) - Contributed by @lkmcs @dmitryilyin @arkady-emelyanov +- [marklogic](/plugins/inputs/marklogic/README.md) - Contributed by @influxdata - [openntpd](/plugins/inputs/openntpd/README.md) - Contributed by @aromeyer - [uwsgi](/plugins/inputs/uswgi/README.md) - Contributed by @blaggacao diff --git a/README.md b/README.md index cd61ef407..db4012f62 100644 --- a/README.md +++ b/README.md @@ -216,6 +216,7 @@ For documentation on the latest development code see the [documentation index][d * [logstash](./plugins/inputs/logstash) * [lustre2](./plugins/inputs/lustre2) * [mailchimp](./plugins/inputs/mailchimp) +* [marklogic](./plugins/inputs/marklogic) * [mcrouter](./plugins/inputs/mcrouter) * [memcached](./plugins/inputs/memcached) * [mem](./plugins/inputs/mem) From b578586d4a8c84bc5c5f8fdbb21c674313784d03 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 22 Aug 2019 10:50:02 -0700 Subject: [PATCH 1135/1815] Send TERM to exec processes before sending KILL signal (#6302) --- internal/exec.go | 30 +++++++++++++++++++++ internal/exec_unix.go | 58 ++++++++++++++++++++++++++++++++++++++++ internal/exec_windows.go | 41 ++++++++++++++++++++++++++++ internal/internal.go | 49 --------------------------------- 4 files changed, 129 insertions(+), 49 deletions(-) create mode 100644 internal/exec.go create mode 100644 internal/exec_unix.go create mode 100644 internal/exec_windows.go diff --git a/internal/exec.go b/internal/exec.go new file mode 100644 index 000000000..795822f46 --- /dev/null +++ b/internal/exec.go @@ -0,0 +1,30 @@ +package internal + +import ( + "bytes" + "os/exec" + "time" +) + +// CombinedOutputTimeout runs the given command with the given timeout and +// returns the combined output of stdout and stderr. +// If the command times out, it attempts to kill the process. +func CombinedOutputTimeout(c *exec.Cmd, timeout time.Duration) ([]byte, error) { + var b bytes.Buffer + c.Stdout = &b + c.Stderr = &b + if err := c.Start(); err != nil { + return nil, err + } + err := WaitTimeout(c, timeout) + return b.Bytes(), err +} + +// RunTimeout runs the given command with the given timeout. +// If the command times out, it attempts to kill the process. +func RunTimeout(c *exec.Cmd, timeout time.Duration) error { + if err := c.Start(); err != nil { + return err + } + return WaitTimeout(c, timeout) +} diff --git a/internal/exec_unix.go b/internal/exec_unix.go new file mode 100644 index 000000000..d41aae825 --- /dev/null +++ b/internal/exec_unix.go @@ -0,0 +1,58 @@ +// +build !windows + +package internal + +import ( + "log" + "os/exec" + "syscall" + "time" +) + +// KillGrace is the amount of time we allow a process to shutdown before +// sending a SIGKILL. +const KillGrace = 5 * time.Second + +// WaitTimeout waits for the given command to finish with a timeout. +// It assumes the command has already been started. +// If the command times out, it attempts to kill the process. +func WaitTimeout(c *exec.Cmd, timeout time.Duration) error { + var kill *time.Timer + term := time.AfterFunc(timeout, func() { + err := c.Process.Signal(syscall.SIGTERM) + if err != nil { + log.Printf("E! [agent] Error terminating process: %s", err) + return + } + + kill = time.AfterFunc(KillGrace, func() { + err := c.Process.Kill() + if err != nil { + log.Printf("E! [agent] Error killing process: %s", err) + return + } + }) + }) + + err := c.Wait() + + // Shutdown all timers + if kill != nil { + kill.Stop() + } + termSent := !term.Stop() + + // If the process exited without error treat it as success. This allows a + // process to do a clean shutdown on signal. + if err == nil { + return nil + } + + // If SIGTERM was sent then treat any process error as a timeout. + if termSent { + return TimeoutErr + } + + // Otherwise there was an error unrelated to termination. + return err +} diff --git a/internal/exec_windows.go b/internal/exec_windows.go new file mode 100644 index 000000000..f010bdd96 --- /dev/null +++ b/internal/exec_windows.go @@ -0,0 +1,41 @@ +// +build windows + +package internal + +import ( + "log" + "os/exec" + "time" +) + +// WaitTimeout waits for the given command to finish with a timeout. +// It assumes the command has already been started. +// If the command times out, it attempts to kill the process. +func WaitTimeout(c *exec.Cmd, timeout time.Duration) error { + timer := time.AfterFunc(timeout, func() { + err := c.Process.Kill() + if err != nil { + log.Printf("E! [agent] Error killing process: %s", err) + return + } + }) + + err := c.Wait() + + // Shutdown all timers + termSent := !timer.Stop() + + // If the process exited without error treat it as success. This allows a + // process to do a clean shutdown on signal. + if err == nil { + return nil + } + + // If SIGTERM was sent then treat any process error as a timeout. + if termSent { + return TimeoutErr + } + + // Otherwise there was an error unrelated to termination. + return err +} diff --git a/internal/internal.go b/internal/internal.go index 893f34383..13c851a8d 100644 --- a/internal/internal.go +++ b/internal/internal.go @@ -9,7 +9,6 @@ import ( "errors" "fmt" "io" - "log" "math" "math/big" "os" @@ -200,54 +199,6 @@ func SnakeCase(in string) string { return string(out) } -// CombinedOutputTimeout runs the given command with the given timeout and -// returns the combined output of stdout and stderr. -// If the command times out, it attempts to kill the process. -func CombinedOutputTimeout(c *exec.Cmd, timeout time.Duration) ([]byte, error) { - var b bytes.Buffer - c.Stdout = &b - c.Stderr = &b - if err := c.Start(); err != nil { - return nil, err - } - err := WaitTimeout(c, timeout) - return b.Bytes(), err -} - -// RunTimeout runs the given command with the given timeout. -// If the command times out, it attempts to kill the process. -func RunTimeout(c *exec.Cmd, timeout time.Duration) error { - if err := c.Start(); err != nil { - return err - } - return WaitTimeout(c, timeout) -} - -// WaitTimeout waits for the given command to finish with a timeout. -// It assumes the command has already been started. -// If the command times out, it attempts to kill the process. -func WaitTimeout(c *exec.Cmd, timeout time.Duration) error { - timer := time.AfterFunc(timeout, func() { - err := c.Process.Kill() - if err != nil { - log.Printf("E! [agent] Error killing process: %s", err) - return - } - }) - - err := c.Wait() - if err == nil { - timer.Stop() - return nil - } - - if !timer.Stop() { - return TimeoutErr - } - - return err -} - // RandomSleep will sleep for a random amount of time up to max. // If the shutdown channel is closed, it will return before it has finished // sleeping. From 1848adaf799bd56a1d04958b06cc313ebbef3af3 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 22 Aug 2019 10:51:15 -0700 Subject: [PATCH 1136/1815] Update to Go 1.12.9 and 1.11.13 (#6303) --- .circleci/config.yml | 4 ++-- Makefile | 13 ++++--------- appveyor.yml | 4 ++-- scripts/ci-1.11.docker | 2 +- scripts/ci-1.12.docker | 2 +- 5 files changed, 10 insertions(+), 15 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 27da00e02..2c8713b19 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -9,10 +9,10 @@ defaults: - image: 'quay.io/influxdb/telegraf-ci:1.10.8' go-1_11: &go-1_11 docker: - - image: 'quay.io/influxdb/telegraf-ci:1.11.10' + - image: 'quay.io/influxdb/telegraf-ci:1.11.13' go-1_12: &go-1_12 docker: - - image: 'quay.io/influxdb/telegraf-ci:1.12.5' + - image: 'quay.io/influxdb/telegraf-ci:1.12.9' version: 2 jobs: diff --git a/Makefile b/Makefile index 6c9717c5d..3c0fb3952 100644 --- a/Makefile +++ b/Makefile @@ -131,20 +131,15 @@ plugin-%: .PHONY: ci-1.12 ci-1.12: - docker build -t quay.io/influxdb/telegraf-ci:1.12.5 - < scripts/ci-1.12.docker - docker push quay.io/influxdb/telegraf-ci:1.12.5 + docker build -t quay.io/influxdb/telegraf-ci:1.12.9 - < scripts/ci-1.12.docker + docker push quay.io/influxdb/telegraf-ci:1.12.9 .PHONY: ci-1.11 ci-1.11: - docker build -t quay.io/influxdb/telegraf-ci:1.11.10 - < scripts/ci-1.11.docker - docker push quay.io/influxdb/telegraf-ci:1.11.10 + docker build -t quay.io/influxdb/telegraf-ci:1.11.13 - < scripts/ci-1.11.docker + docker push quay.io/influxdb/telegraf-ci:1.11.13 .PHONY: ci-1.10 ci-1.10: docker build -t quay.io/influxdb/telegraf-ci:1.10.8 - < scripts/ci-1.10.docker docker push quay.io/influxdb/telegraf-ci:1.10.8 - -.PHONY: ci-1.9 -ci-1.9: - docker build -t quay.io/influxdb/telegraf-ci:1.9.7 - < scripts/ci-1.9.docker - docker push quay.io/influxdb/telegraf-ci:1.9.7 diff --git a/appveyor.yml b/appveyor.yml index 46dcf97ba..c2349dd32 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -13,11 +13,11 @@ platform: x64 install: - IF NOT EXIST "C:\Cache" mkdir C:\Cache - - IF NOT EXIST "C:\Cache\go1.12.5.msi" curl -o "C:\Cache\go1.12.5.msi" https://storage.googleapis.com/golang/go1.12.5.windows-amd64.msi + - IF NOT EXIST "C:\Cache\go1.12.9.msi" curl -o "C:\Cache\go1.12.9.msi" https://storage.googleapis.com/golang/go1.12.9.windows-amd64.msi - IF NOT EXIST "C:\Cache\gnuwin32-bin.zip" curl -o "C:\Cache\gnuwin32-bin.zip" https://dl.influxdata.com/telegraf/ci/make-3.81-bin.zip - IF NOT EXIST "C:\Cache\gnuwin32-dep.zip" curl -o "C:\Cache\gnuwin32-dep.zip" https://dl.influxdata.com/telegraf/ci/make-3.81-dep.zip - IF EXIST "C:\Go" rmdir /S /Q C:\Go - - msiexec.exe /i "C:\Cache\go1.12.5.msi" /quiet + - msiexec.exe /i "C:\Cache\go1.12.9.msi" /quiet - 7z x "C:\Cache\gnuwin32-bin.zip" -oC:\GnuWin32 -y - 7z x "C:\Cache\gnuwin32-dep.zip" -oC:\GnuWin32 -y - go get -d github.com/golang/dep diff --git a/scripts/ci-1.11.docker b/scripts/ci-1.11.docker index 5e4cb5662..93f2d64b6 100644 --- a/scripts/ci-1.11.docker +++ b/scripts/ci-1.11.docker @@ -1,4 +1,4 @@ -FROM golang:1.11.10 +FROM golang:1.11.13 RUN chmod -R 755 "$GOPATH" diff --git a/scripts/ci-1.12.docker b/scripts/ci-1.12.docker index 760c50a44..f5b093413 100644 --- a/scripts/ci-1.12.docker +++ b/scripts/ci-1.12.docker @@ -1,4 +1,4 @@ -FROM golang:1.12.5 +FROM golang:1.12.9 RUN chmod -R 755 "$GOPATH" From 66d6b1f1d370116ba2859b1864d36d7269d40c10 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 22 Aug 2019 20:00:48 -0700 Subject: [PATCH 1137/1815] Split out -w argument in iptables input (#6304) --- plugins/inputs/iptables/iptables.go | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/plugins/inputs/iptables/iptables.go b/plugins/inputs/iptables/iptables.go index 21f6642a9..d2598cd0d 100644 --- a/plugins/inputs/iptables/iptables.go +++ b/plugins/inputs/iptables/iptables.go @@ -37,7 +37,7 @@ func (ipt *Iptables) SampleConfig() string { ## iptables can be restricted to only list command "iptables -nvL". use_sudo = false ## Setting 'use_lock' to true runs iptables with the "-w" option. - ## Adjust your sudo settings appropriately if using this option ("iptables -wnvl") + ## Adjust your sudo settings appropriately if using this option ("iptables -w 5 -nvl") use_lock = false ## Define an alternate executable, such as "ip6tables". Default is "iptables". # binary = "ip6tables" @@ -89,11 +89,10 @@ func (ipt *Iptables) chainList(table, chain string) (string, error) { name = "sudo" args = append(args, iptablePath) } - iptablesBaseArgs := "-nvL" if ipt.UseLock { - iptablesBaseArgs = "-wnvL" + args = append(args, "-w", "5") } - args = append(args, iptablesBaseArgs, chain, "-t", table, "-x") + args = append(args, "-nvL", chain, "-t", table, "-x") c := exec.Command(name, args...) out, err := c.Output() return string(out), err From d52c733c3b50357e764d9e7ef6576bc3814b2011 Mon Sep 17 00:00:00 2001 From: Pontus Rydin Date: Sat, 24 Aug 2019 00:55:56 -0400 Subject: [PATCH 1138/1815] Add supported versions to vsphere README (#6312) --- plugins/inputs/vsphere/README.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/plugins/inputs/vsphere/README.md b/plugins/inputs/vsphere/README.md index 7689f45da..4009c8cde 100644 --- a/plugins/inputs/vsphere/README.md +++ b/plugins/inputs/vsphere/README.md @@ -7,6 +7,9 @@ The VMware vSphere plugin uses the vSphere API to gather metrics from multiple v * VMs * Datastores +## Supported versions of vSphere +This plugin supports vSphere version 5.5 through 6.7. + ## Configuration NOTE: To disable collection of a specific resource type, simply exclude all metrics using the XX_metric_exclude. From 718f60bb4a0d6400cc61820de667d746b4ba973a Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 23 Aug 2019 21:57:52 -0700 Subject: [PATCH 1139/1815] Add table name to jenkins example configuration --- plugins/inputs/jenkins/README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/plugins/inputs/jenkins/README.md b/plugins/inputs/jenkins/README.md index 8d375f087..79f55e6aa 100644 --- a/plugins/inputs/jenkins/README.md +++ b/plugins/inputs/jenkins/README.md @@ -7,6 +7,7 @@ This plugin does not require a plugin on jenkins and it makes use of Jenkins API ### Configuration: ```toml +[[inputs.jenkins]] ## The Jenkins URL url = "http://my-jenkins-instance:8080" # username = "admin" From 628edfa9b42ec6260d7075c98eca6a2f97a3ff16 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 26 Aug 2019 16:05:56 -0700 Subject: [PATCH 1140/1815] Add support for parked process state on Linux (#6308) --- plugins/inputs/processes/README.md | 1 + plugins/inputs/processes/processes.go | 5 +++ plugins/inputs/processes/processes_test.go | 52 ++++++++++++++++++++++ 3 files changed, 58 insertions(+) diff --git a/plugins/inputs/processes/README.md b/plugins/inputs/processes/README.md index 3c2e27291..4113f0d3a 100644 --- a/plugins/inputs/processes/README.md +++ b/plugins/inputs/processes/README.md @@ -32,6 +32,7 @@ Using the environment variable `HOST_PROC` the plugin will retrieve process info - wait (freebsd only) - idle (bsd and Linux 4+ only) - paging (linux only) + - parked (linux only) - total_threads (linux only) ### Process State Mappings diff --git a/plugins/inputs/processes/processes.go b/plugins/inputs/processes/processes.go index c71d72f50..379a9cb37 100644 --- a/plugins/inputs/processes/processes.go +++ b/plugins/inputs/processes/processes.go @@ -178,6 +178,11 @@ func (p *Processes) gatherFromProc(fields map[string]interface{}) error { fields["paging"] = fields["paging"].(int64) + int64(1) case 'I': fields["idle"] = fields["idle"].(int64) + int64(1) + case 'P': + if _, ok := fields["parked"]; ok { + fields["parked"] = fields["parked"].(int64) + int64(1) + } + fields["parked"] = int64(1) default: log.Printf("I! processes: Unknown state [ %s ] in file %s", string(stats[0][0]), filename) diff --git a/plugins/inputs/processes/processes_test.go b/plugins/inputs/processes/processes_test.go index 27fdf76a1..f9bad4b60 100644 --- a/plugins/inputs/processes/processes_test.go +++ b/plugins/inputs/processes/processes_test.go @@ -6,7 +6,9 @@ import ( "fmt" "runtime" "testing" + "time" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -107,6 +109,56 @@ func TestFromProcFilesWithSpaceInCmd(t *testing.T) { acc.AssertContainsTaggedFields(t, "processes", fields, map[string]string{}) } +// Based on `man 5 proc`, parked processes an be found in a +// limited range of Linux versions: +// +// > P Parked (Linux 3.9 to 3.13 only) +// +// However, we have had reports of this process state on Ubuntu +// Bionic w/ Linux 4.15 (#6270) +func TestParkedProcess(t *testing.T) { + procstat := `88 (watchdog/13) P 2 0 0 0 -1 69238848 0 0 0 0 0 0 0 0 20 0 1 0 20 0 0 18446744073709551615 0 0 0 0 0 0 0 2147483647 0 1 0 0 17 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +` + plugin := &Processes{ + readProcFile: func(string) ([]byte, error) { + return []byte(procstat), nil + }, + forceProc: true, + } + + var acc testutil.Accumulator + err := plugin.Gather(&acc) + require.NoError(t, err) + + expected := []telegraf.Metric{ + testutil.MustMetric( + "processes", + map[string]string{}, + map[string]interface{}{ + "blocked": 0, + "dead": 0, + "idle": 0, + "paging": 0, + "parked": 1, + "running": 0, + "sleeping": 0, + "stopped": 0, + "unknown": 0, + "zombies": 0, + }, + time.Unix(0, 0), + telegraf.Untyped, + ), + } + actual := acc.GetTelegrafMetrics() + for _, a := range actual { + a.RemoveField("total") + a.RemoveField("total_threads") + } + testutil.RequireMetricsEqual(t, expected, actual, + testutil.IgnoreTime()) +} + func testExecPS() ([]byte, error) { return []byte(testPSOut), nil } From e7b783d39713c99b81030fd9b36fe23374710d8a Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 26 Aug 2019 16:16:44 -0700 Subject: [PATCH 1141/1815] Remove leading slash from rcon commands (#6315) This is required when using the Spigot Minecraft server and compatible with the vanilla server. --- plugins/inputs/minecraft/client.go | 4 ++-- plugins/inputs/minecraft/client_test.go | 30 ++++++++++++------------- 2 files changed, 17 insertions(+), 17 deletions(-) diff --git a/plugins/inputs/minecraft/client.go b/plugins/inputs/minecraft/client.go index a46709993..30f56213a 100644 --- a/plugins/inputs/minecraft/client.go +++ b/plugins/inputs/minecraft/client.go @@ -84,7 +84,7 @@ func (c *client) Players() ([]string, error) { } } - resp, err := c.conn.Execute("/scoreboard players list") + resp, err := c.conn.Execute("scoreboard players list") if err != nil { c.conn = nil return nil, err @@ -107,7 +107,7 @@ func (c *client) Scores(player string) ([]Score, error) { } } - resp, err := c.conn.Execute("/scoreboard players list " + player) + resp, err := c.conn.Execute("scoreboard players list " + player) if err != nil { c.conn = nil return nil, err diff --git a/plugins/inputs/minecraft/client_test.go b/plugins/inputs/minecraft/client_test.go index 7c1f871ac..767a0c30e 100644 --- a/plugins/inputs/minecraft/client_test.go +++ b/plugins/inputs/minecraft/client_test.go @@ -31,63 +31,63 @@ func TestClient_Player(t *testing.T) { { name: "minecraft 1.12 no players", commands: map[string]string{ - "/scoreboard players list": "There are no tracked players on the scoreboard", + "scoreboard players list": "There are no tracked players on the scoreboard", }, expected: []string{}, }, { name: "minecraft 1.12 single player", commands: map[string]string{ - "/scoreboard players list": "Showing 1 tracked players on the scoreboard:Etho", + "scoreboard players list": "Showing 1 tracked players on the scoreboard:Etho", }, expected: []string{"Etho"}, }, { name: "minecraft 1.12 two players", commands: map[string]string{ - "/scoreboard players list": "Showing 2 tracked players on the scoreboard:Etho and torham", + "scoreboard players list": "Showing 2 tracked players on the scoreboard:Etho and torham", }, expected: []string{"Etho", "torham"}, }, { name: "minecraft 1.12 three players", commands: map[string]string{ - "/scoreboard players list": "Showing 3 tracked players on the scoreboard:Etho, notch and torham", + "scoreboard players list": "Showing 3 tracked players on the scoreboard:Etho, notch and torham", }, expected: []string{"Etho", "notch", "torham"}, }, { name: "minecraft 1.12 players space in username", commands: map[string]string{ - "/scoreboard players list": "Showing 4 tracked players on the scoreboard:with space, Etho, notch and torham", + "scoreboard players list": "Showing 4 tracked players on the scoreboard:with space, Etho, notch and torham", }, expected: []string{"with space", "Etho", "notch", "torham"}, }, { name: "minecraft 1.12 players and in username", commands: map[string]string{ - "/scoreboard players list": "Showing 5 tracked players on the scoreboard:left and right, with space,Etho, notch and torham", + "scoreboard players list": "Showing 5 tracked players on the scoreboard:left and right, with space,Etho, notch and torham", }, expected: []string{"left and right", "with space", "Etho", "notch", "torham"}, }, { name: "minecraft 1.13 no players", commands: map[string]string{ - "/scoreboard players list": "There are no tracked entities", + "scoreboard players list": "There are no tracked entities", }, expected: []string{}, }, { name: "minecraft 1.13 single player", commands: map[string]string{ - "/scoreboard players list": "There are 1 tracked entities: torham", + "scoreboard players list": "There are 1 tracked entities: torham", }, expected: []string{"torham"}, }, { name: "minecraft 1.13 multiple player", commands: map[string]string{ - "/scoreboard players list": "There are 3 tracked entities: Etho, notch, torham", + "scoreboard players list": "There are 3 tracked entities: Etho, notch, torham", }, expected: []string{"Etho", "notch", "torham"}, }, @@ -120,7 +120,7 @@ func TestClient_Scores(t *testing.T) { name: "minecraft 1.12 player with no scores", player: "Etho", commands: map[string]string{ - "/scoreboard players list Etho": "Player Etho has no scores recorded", + "scoreboard players list Etho": "Player Etho has no scores recorded", }, expected: []Score{}, }, @@ -128,7 +128,7 @@ func TestClient_Scores(t *testing.T) { name: "minecraft 1.12 player with one score", player: "Etho", commands: map[string]string{ - "/scoreboard players list Etho": "Showing 1 tracked objective(s) for Etho:- jump: 2 (jump)", + "scoreboard players list Etho": "Showing 1 tracked objective(s) for Etho:- jump: 2 (jump)", }, expected: []Score{ {Name: "jump", Value: 2}, @@ -138,7 +138,7 @@ func TestClient_Scores(t *testing.T) { name: "minecraft 1.12 player with many scores", player: "Etho", commands: map[string]string{ - "/scoreboard players list Etho": "Showing 3 tracked objective(s) for Etho:- hopper: 2 (hopper)- dropper: 2 (dropper)- redstone: 1 (redstone)", + "scoreboard players list Etho": "Showing 3 tracked objective(s) for Etho:- hopper: 2 (hopper)- dropper: 2 (dropper)- redstone: 1 (redstone)", }, expected: []Score{ {Name: "hopper", Value: 2}, @@ -150,7 +150,7 @@ func TestClient_Scores(t *testing.T) { name: "minecraft 1.13 player with no scores", player: "Etho", commands: map[string]string{ - "/scoreboard players list Etho": "Etho has no scores to show", + "scoreboard players list Etho": "Etho has no scores to show", }, expected: []Score{}, }, @@ -158,7 +158,7 @@ func TestClient_Scores(t *testing.T) { name: "minecraft 1.13 player with one score", player: "Etho", commands: map[string]string{ - "/scoreboard players list Etho": "Etho has 1 scores:[jumps]: 1", + "scoreboard players list Etho": "Etho has 1 scores:[jumps]: 1", }, expected: []Score{ {Name: "jumps", Value: 1}, @@ -168,7 +168,7 @@ func TestClient_Scores(t *testing.T) { name: "minecraft 1.13 player with many scores", player: "Etho", commands: map[string]string{ - "/scoreboard players list Etho": "Etho has 3 scores:[hopper]: 2[dropper]: 2[redstone]: 1", + "scoreboard players list Etho": "Etho has 3 scores:[hopper]: 2[dropper]: 2[redstone]: 1", }, expected: []Score{ {Name: "hopper", Value: 2}, From 818d511749c48a100b33c756070759f35d9ce320 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 26 Aug 2019 16:25:35 -0700 Subject: [PATCH 1142/1815] Update changelog --- CHANGELOG.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1fa6c91eb..d82471115 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -95,11 +95,17 @@ - [#6232](https://github.com/influxdata/telegraf/issues/6232): Fix persistent session in mqtt_consumer. - [#6235](https://github.com/influxdata/telegraf/issues/6235): Fix finder inconsistencies in vsphere input. - [#6138](https://github.com/influxdata/telegraf/issues/6138): Fix parsing multiple metrics on the first line of tailed file. +- [#2526](https://github.com/influxdata/telegraf/issues/2526): Send TERM to exec processes before sending KILL signal. ## v1.11.5 [unreleased] - [#6250](https://github.com/influxdata/telegraf/pull/6250): Update go-sql-driver/mysql driver to 1.4.1 to address auth issues. - [#6279](https://github.com/influxdata/telegraf/issues/6279): Return error status from --test if input plugins produce an error. +- [#6309](https://github.com/influxdata/telegraf/issues/6309): Fix with multiple instances only last configuration is used in smart input. +- [#6303](https://github.com/influxdata/telegraf/pull/6303): Build official packages with Go 1.12.9. +- [#6234](https://github.com/influxdata/telegraf/issues/6234): Split out -w argument in iptables input. +- [#6270](https://github.com/influxdata/telegraf/issues/6270): Add support for parked process state on Linux. +- [#6287](https://github.com/influxdata/telegraf/issues/6287): Remove leading slash from rcon command. ## v1.11.4 [2019-08-06] From acedbe0633f799afd59ab70e8f9dd8fed9f74345 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 26 Aug 2019 16:29:45 -0700 Subject: [PATCH 1143/1815] Promote the use of http as the scheme over tcp in health output (#6311) --- plugins/outputs/health/README.md | 13 ++-- plugins/outputs/health/health.go | 103 ++++++++++++++++---------- plugins/outputs/health/health_test.go | 82 +++++++++++++++++++- 3 files changed, 151 insertions(+), 47 deletions(-) diff --git a/plugins/outputs/health/README.md b/plugins/outputs/health/README.md index 5ef30fd57..0a56d5192 100644 --- a/plugins/outputs/health/README.md +++ b/plugins/outputs/health/README.md @@ -11,9 +11,9 @@ must fail in order for the resource to enter the failed state. ```toml [[outputs.health]] ## Address and port to listen on. - ## ex: service_address = "tcp://localhost:8080" + ## ex: service_address = "http://localhost:8080" ## service_address = "unix:///var/run/telegraf-health.sock" - # service_address = "tcp://:8080" + # service_address = "http://:8080" ## The maximum duration for reading the entire request. # read_timeout = "5s" @@ -51,11 +51,14 @@ must fail in order for the resource to enter the failed state. #### compares The `compares` check is used to assert basic mathematical relationships. Use -it by choosing a field key and one or more comparisons. All comparisons must -be true on all metrics for the check to pass. If the field is not found on a -metric no comparison will be made. +it by choosing a field key and one or more comparisons that must hold true. If +the field is not found on a metric no comparison will be made. + +Comparisons must be hold true on all metrics for the check to pass. #### contains The `contains` check can be used to require a field key to exist on at least one metric. + +If the field is found on any metric the check passes. diff --git a/plugins/outputs/health/health.go b/plugins/outputs/health/health.go index c7c2cc547..a6db63183 100644 --- a/plugins/outputs/health/health.go +++ b/plugins/outputs/health/health.go @@ -3,6 +3,7 @@ package health import ( "context" "crypto/tls" + "errors" "log" "net" "net/http" @@ -24,9 +25,9 @@ const ( var sampleConfig = ` ## Address and port to listen on. - ## ex: service_address = "tcp://localhost:8080" + ## ex: service_address = "http://localhost:8080" ## service_address = "unix:///var/run/telegraf-health.sock" - # service_address = "tcp://:8080" + # service_address = "http://:8080" ## The maximum duration for reading the entire request. # read_timeout = "5s" @@ -78,9 +79,12 @@ type Health struct { Contains []*Contains `toml:"contains"` checkers []Checker - wg sync.WaitGroup - server *http.Server - origin string + wg sync.WaitGroup + server *http.Server + origin string + network string + address string + tlsConf *tls.Config mu sync.Mutex healthy bool @@ -94,8 +98,31 @@ func (h *Health) Description() string { return "Configurable HTTP health check resource based on metrics" } -// Connect starts the HTTP server. -func (h *Health) Connect() error { +func (h *Health) Init() error { + u, err := url.Parse(h.ServiceAddress) + if err != nil { + return err + } + + switch u.Scheme { + case "http", "https": + h.network = "tcp" + h.address = u.Host + case "unix": + h.network = u.Scheme + h.address = u.Path + case "tcp4", "tcp6", "tcp": + h.network = u.Scheme + h.address = u.Host + default: + return errors.New("service_address contains invalid scheme") + } + + h.tlsConf, err = h.ServerConfig.TLSConfig() + if err != nil { + return err + } + h.checkers = make([]Checker, 0) for i := range h.Compares { h.checkers = append(h.checkers, h.Compares[i]) @@ -104,11 +131,11 @@ func (h *Health) Connect() error { h.checkers = append(h.checkers, h.Contains[i]) } - tlsConf, err := h.ServerConfig.TLSConfig() - if err != nil { - return err - } + return nil +} +// Connect starts the HTTP server. +func (h *Health) Connect() error { authHandler := internal.AuthHandler(h.BasicUsername, h.BasicPassword, onAuthError) h.server = &http.Server{ @@ -116,15 +143,15 @@ func (h *Health) Connect() error { Handler: authHandler(h), ReadTimeout: h.ReadTimeout.Duration, WriteTimeout: h.WriteTimeout.Duration, - TLSConfig: tlsConf, + TLSConfig: h.tlsConf, } - listener, err := h.listen(tlsConf) + listener, err := h.listen() if err != nil { return err } - h.origin = h.getOrigin(listener, tlsConf) + h.origin = h.getOrigin(listener) log.Printf("I! [outputs.health] Listening on %s", h.origin) @@ -145,25 +172,12 @@ func onAuthError(rw http.ResponseWriter, code int) { http.Error(rw, http.StatusText(code), code) } -func (h *Health) listen(tlsConf *tls.Config) (net.Listener, error) { - u, err := url.Parse(h.ServiceAddress) - if err != nil { - return nil, err - } - - network := "tcp" - address := u.Host - if u.Host == "" { - network = "unix" - address = u.Path - } - - if tlsConf != nil { - return tls.Listen(network, address, tlsConf) +func (h *Health) listen() (net.Listener, error) { + if h.tlsConf != nil { + return tls.Listen(h.network, h.address, h.tlsConf) } else { - return net.Listen(network, address) + return net.Listen(h.network, h.address) } - } func (h *Health) ServeHTTP(rw http.ResponseWriter, req *http.Request) { @@ -205,23 +219,30 @@ func (h *Health) Origin() string { return h.origin } -func (h *Health) getOrigin(listener net.Listener, tlsConf *tls.Config) string { - switch listener.Addr().Network() { - case "tcp": - scheme := "http" - if tlsConf != nil { - scheme = "https" +func (h *Health) getOrigin(listener net.Listener) string { + scheme := "http" + if h.tlsConf != nil { + scheme = "https" + } + if h.network == "unix" { + scheme = "unix" + } + + switch h.network { + case "unix": + origin := &url.URL{ + Scheme: scheme, + Path: listener.Addr().String(), } + return origin.String() + default: origin := &url.URL{ Scheme: scheme, Host: listener.Addr().String(), } return origin.String() - case "unix": - return listener.Addr().String() - default: - return "" } + } func (h *Health) setHealthy(healthy bool) { diff --git a/plugins/outputs/health/health_test.go b/plugins/outputs/health/health_test.go index 234b0251c..5bf35ad83 100644 --- a/plugins/outputs/health/health_test.go +++ b/plugins/outputs/health/health_test.go @@ -12,6 +12,8 @@ import ( "github.com/stretchr/testify/require" ) +var pki = testutil.NewPKI("../../../testutil/pki") + func TestHealth(t *testing.T) { type Options struct { Compares []*health.Compares `toml:"compares"` @@ -105,7 +107,11 @@ func TestHealth(t *testing.T) { output.Compares = tt.options.Compares output.Contains = tt.options.Contains - err := output.Connect() + err := output.Init() + require.NoError(t, err) + + err = output.Connect() + require.NoError(t, err) err = output.Write(tt.metrics) require.NoError(t, err) @@ -122,3 +128,77 @@ func TestHealth(t *testing.T) { }) } } + +func TestInitServiceAddress(t *testing.T) { + tests := []struct { + name string + plugin *health.Health + err bool + origin string + }{ + { + name: "port without scheme is not allowed", + plugin: &health.Health{ + ServiceAddress: ":8080", + }, + err: true, + }, + { + name: "path without scheme is not allowed", + plugin: &health.Health{ + ServiceAddress: "/tmp/telegraf", + }, + err: true, + }, + { + name: "tcp with port maps to http", + plugin: &health.Health{ + ServiceAddress: "tcp://:8080", + }, + }, + { + name: "tcp with tlsconf maps to https", + plugin: &health.Health{ + ServiceAddress: "tcp://:8080", + ServerConfig: *pki.TLSServerConfig(), + }, + }, + { + name: "tcp4 is allowed", + plugin: &health.Health{ + ServiceAddress: "tcp4://:8080", + }, + }, + { + name: "tcp6 is allowed", + plugin: &health.Health{ + ServiceAddress: "tcp6://:8080", + }, + }, + { + name: "http scheme", + plugin: &health.Health{ + ServiceAddress: "http://:8080", + }, + }, + { + name: "https scheme", + plugin: &health.Health{ + ServiceAddress: "https://:8080", + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + output := health.NewHealth() + output.ServiceAddress = tt.plugin.ServiceAddress + + err := output.Init() + if tt.err { + require.Error(t, err) + return + } + require.NoError(t, err) + }) + } +} From 17a79e2d6f950a1b4f49804d8bba41625fd43dda Mon Sep 17 00:00:00 2001 From: Jesse Hanley Date: Mon, 26 Aug 2019 22:03:35 -0400 Subject: [PATCH 1144/1815] Allow jobs with dashes in the name in lustre2 input (#6313) --- plugins/inputs/lustre2/lustre2.go | 2 +- plugins/inputs/lustre2/lustre2_test.go | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/plugins/inputs/lustre2/lustre2.go b/plugins/inputs/lustre2/lustre2.go index 026636dd2..4af999b71 100644 --- a/plugins/inputs/lustre2/lustre2.go +++ b/plugins/inputs/lustre2/lustre2.go @@ -377,7 +377,7 @@ func (l *Lustre2) GetLustreProcStats(fileglob string, wantedFields []*mapping, a if err != nil { return err } - jobs := strings.Split(string(wholeFile), "-") + jobs := strings.Split(string(wholeFile), "- ") for _, job := range jobs { lines := strings.Split(string(job), "\n") jobid := "" diff --git a/plugins/inputs/lustre2/lustre2_test.go b/plugins/inputs/lustre2/lustre2_test.go index 67cf4216b..8e93da8e8 100644 --- a/plugins/inputs/lustre2/lustre2_test.go +++ b/plugins/inputs/lustre2/lustre2_test.go @@ -42,7 +42,7 @@ cache_miss 11653333250 samples [pages] 1 1 11653333250 ` const obdfilterJobStatsContents = `job_stats: -- job_id: testjob1 +- job_id: cluster-testjob1 snapshot_time: 1461772761 read_bytes: { samples: 1, unit: bytes, min: 4096, max: 4096, sum: 4096 } write_bytes: { samples: 25, unit: bytes, min: 1048576, max: 1048576, sum: 26214400 } @@ -92,7 +92,7 @@ crossdir_rename 369571 samples [reqs] ` const mdtJobStatsContents = `job_stats: -- job_id: testjob1 +- job_id: cluster-testjob1 snapshot_time: 1461772761 open: { samples: 5, unit: reqs } close: { samples: 4, unit: reqs } @@ -207,7 +207,7 @@ func TestLustre2GeneratesJobstatsMetrics(t *testing.T) { tempdir := os.TempDir() + "/telegraf/proc/fs/lustre/" ost_name := "OST0001" - job_names := []string{"testjob1", "testjob2"} + job_names := []string{"cluster-testjob1", "testjob2"} mdtdir := tempdir + "/mdt/" err := os.MkdirAll(mdtdir+"/"+ost_name, 0755) From 701339b024e15ba3d24a4443a8c375f40f1fc5a2 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 26 Aug 2019 19:04:59 -0700 Subject: [PATCH 1145/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index d82471115..a6ca48e0b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -106,6 +106,7 @@ - [#6234](https://github.com/influxdata/telegraf/issues/6234): Split out -w argument in iptables input. - [#6270](https://github.com/influxdata/telegraf/issues/6270): Add support for parked process state on Linux. - [#6287](https://github.com/influxdata/telegraf/issues/6287): Remove leading slash from rcon command. +- [#6313](https://github.com/influxdata/telegraf/pull/6313): Allow jobs with dashes in the name in lustre2 input. ## v1.11.4 [2019-08-06] From 2d2e793c907db577636bd4643e5a9824d531c949 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 27 Aug 2019 10:31:42 -0700 Subject: [PATCH 1146/1815] Query oplog only when connected to a replica set (#6307) --- agent/agent.go | 2 +- plugins/inputs/mongodb/mongodb_data.go | 6 +- plugins/inputs/mongodb/mongodb_data_test.go | 1 - plugins/inputs/mongodb/mongodb_server.go | 253 ++++++++++++-------- plugins/inputs/mongodb/mongostat.go | 111 +++++---- 5 files changed, 220 insertions(+), 153 deletions(-) diff --git a/agent/agent.go b/agent/agent.go index 700bccb05..e2ef79b84 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -210,7 +210,7 @@ func (a *Agent) Test(ctx context.Context, waitDuration time.Duration) error { // Special instructions for some inputs. cpu, for example, needs to be // run twice in order to return cpu usage percentages. switch input.Config.Name { - case "inputs.cpu", "inputs.mongodb", "inputs.procstat": + case "cpu", "mongodb", "procstat": nulAcc := NewAccumulator(input, nulC) nulAcc.SetPrecision(a.Precision()) if err := input.Input.Gather(nulAcc); err != nil { diff --git a/plugins/inputs/mongodb/mongodb_data.go b/plugins/inputs/mongodb/mongodb_data.go index c218fd3ad..6f999cbd7 100644 --- a/plugins/inputs/mongodb/mongodb_data.go +++ b/plugins/inputs/mongodb/mongodb_data.go @@ -101,7 +101,6 @@ var DefaultReplStats = map[string]string{ "member_status": "NodeType", "state": "NodeState", "repl_lag": "ReplLag", - "repl_oplog_window_sec": "OplogTimeDiff", } var DefaultClusterStats = map[string]string{ @@ -230,6 +229,11 @@ func (d *MongodbData) AddDefaultStats() { if d.StatLine.NodeType != "" { d.addStat(statLine, DefaultReplStats) } + + if d.StatLine.OplogStats != nil { + d.add("repl_oplog_window_sec", d.StatLine.OplogStats.TimeDiff) + } + d.addStat(statLine, DefaultClusterStats) d.addStat(statLine, DefaultShardStats) if d.StatLine.StorageEngine == "mmapv1" || d.StatLine.StorageEngine == "rocksdb" { diff --git a/plugins/inputs/mongodb/mongodb_data_test.go b/plugins/inputs/mongodb/mongodb_data_test.go index da50bdc9e..527e7ab93 100644 --- a/plugins/inputs/mongodb/mongodb_data_test.go +++ b/plugins/inputs/mongodb/mongodb_data_test.go @@ -232,7 +232,6 @@ func TestStateTag(t *testing.T) { "repl_updates": int64(0), "repl_updates_per_sec": int64(0), "repl_lag": int64(0), - "repl_oplog_window_sec": int64(0), "resident_megabytes": int64(0), "updates": int64(0), "updates_per_sec": int64(0), diff --git a/plugins/inputs/mongodb/mongodb_server.go b/plugins/inputs/mongodb/mongodb_server.go index 4df14c014..404fa8143 100644 --- a/plugins/inputs/mongodb/mongodb_server.go +++ b/plugins/inputs/mongodb/mongodb_server.go @@ -39,35 +39,116 @@ func authLogLevel(err error) string { } } -func (s *Server) gatherOplogStats() *OplogStats { - stats := &OplogStats{} - localdb := s.Session.DB("local") +func (s *Server) gatherServerStatus() (*ServerStatus, error) { + serverStatus := &ServerStatus{} + err := s.Session.DB("admin").Run(bson.D{ + { + Name: "serverStatus", + Value: 1, + }, + { + Name: "recordStats", + Value: 0, + }, + }, serverStatus) + if err != nil { + return nil, err + } + return serverStatus, nil +} - op_first := oplogEntry{} - op_last := oplogEntry{} - query := bson.M{"ts": bson.M{"$exists": true}} +func (s *Server) gatherReplSetStatus() (*ReplSetStatus, error) { + replSetStatus := &ReplSetStatus{} + err := s.Session.DB("admin").Run(bson.D{ + { + Name: "replSetGetStatus", + Value: 1, + }, + }, replSetStatus) + if err != nil { + return nil, err + } + return replSetStatus, nil +} - for _, collection_name := range []string{"oplog.rs", "oplog.$main"} { - if err := localdb.C(collection_name).Find(query).Sort("$natural").Limit(1).One(&op_first); err != nil { - if err == mgo.ErrNotFound { - continue - } - log.Printf("%s [inputs.mongodb] Error getting first oplog entry: %v", authLogLevel(err), err) - return stats - } - if err := localdb.C(collection_name).Find(query).Sort("-$natural").Limit(1).One(&op_last); err != nil { - if err == mgo.ErrNotFound || IsAuthorization(err) { - continue - } - log.Printf("%s [inputs.mongodb] Error getting first oplog entry: %v", authLogLevel(err), err) - return stats - } +func (s *Server) gatherClusterStatus() (*ClusterStatus, error) { + chunkCount, err := s.Session.DB("config").C("chunks").Find(bson.M{"jumbo": true}).Count() + if err != nil { + return nil, err } - op_first_time := time.Unix(int64(op_first.Timestamp>>32), 0) - op_last_time := time.Unix(int64(op_last.Timestamp>>32), 0) - stats.TimeDiff = int64(op_last_time.Sub(op_first_time).Seconds()) - return stats + return &ClusterStatus{ + JumboChunksCount: int64(chunkCount), + }, nil +} + +func (s *Server) gatherShardConnPoolStats() (*ShardStats, error) { + shardStats := &ShardStats{} + err := s.Session.DB("admin").Run(bson.D{ + { + Name: "shardConnPoolStats", + Value: 1, + }, + }, &shardStats) + if err != nil { + return nil, err + } + return shardStats, nil +} + +func (s *Server) gatherDBStats(name string) (*Db, error) { + stats := &DbStatsData{} + err := s.Session.DB(name).Run(bson.D{ + { + Name: "dbStats", + Value: 1, + }, + }, stats) + if err != nil { + return nil, err + } + + return &Db{ + Name: name, + DbStatsData: stats, + }, nil +} + +func (s *Server) getOplogReplLag(collection string) (*OplogStats, error) { + query := bson.M{"ts": bson.M{"$exists": true}} + + var first oplogEntry + err := s.Session.DB("local").C(collection).Find(query).Sort("$natural").Limit(1).One(&first) + if err != nil { + return nil, err + } + + var last oplogEntry + err = s.Session.DB("local").C(collection).Find(query).Sort("-$natural").Limit(1).One(&last) + if err != nil { + return nil, err + } + + firstTime := time.Unix(int64(first.Timestamp>>32), 0) + lastTime := time.Unix(int64(last.Timestamp>>32), 0) + stats := &OplogStats{ + TimeDiff: int64(lastTime.Sub(firstTime).Seconds()), + } + return stats, nil +} + +// The "oplog.rs" collection is stored on all replica set members. +// +// The "oplog.$main" collection is created on the master node of a +// master-slave replicated deployment. As of MongoDB 3.2, master-slave +// replication has been deprecated. +func (s *Server) gatherOplogStats() (*OplogStats, error) { + stats, err := s.getOplogReplLag("oplog.rs") + if err == nil { + return stats, nil + } + + return s.getOplogReplLag("oplog.$main") } func (s *Server) gatherCollectionStats(colStatsDbs []string) (*ColStats, error) { @@ -112,99 +193,71 @@ func (s *Server) gatherCollectionStats(colStatsDbs []string) (*ColStats, error) func (s *Server) gatherData(acc telegraf.Accumulator, gatherDbStats bool, gatherColStats bool, colStatsDbs []string) error { s.Session.SetMode(mgo.Eventual, true) s.Session.SetSocketTimeout(0) - result_server := &ServerStatus{} - err := s.Session.DB("admin").Run(bson.D{ - { - Name: "serverStatus", - Value: 1, - }, - { - Name: "recordStats", - Value: 0, - }, - }, result_server) + + serverStatus, err := s.gatherServerStatus() if err != nil { return err } - result_repl := &ReplSetStatus{} - // ignore error because it simply indicates that the db is not a member - // in a replica set, which is fine. - _ = s.Session.DB("admin").Run(bson.D{ - { - Name: "replSetGetStatus", - Value: 1, - }, - }, result_repl) - jumbo_chunks, _ := s.Session.DB("config").C("chunks").Find(bson.M{"jumbo": true}).Count() - - result_cluster := &ClusterStatus{ - JumboChunksCount: int64(jumbo_chunks), - } - - resultShards := &ShardStats{} - err = s.Session.DB("admin").Run(bson.D{ - { - Name: "shardConnPoolStats", - Value: 1, - }, - }, &resultShards) + // Get replica set status, an error indicates that the server is not a + // member of a replica set. + replSetStatus, err := s.gatherReplSetStatus() if err != nil { - if IsAuthorization(err) { - log.Printf("D! [inputs.mongodb] Error getting database shard stats: %v", err) - } else { - log.Printf("E! [inputs.mongodb] Error getting database shard stats: %v", err) - } + log.Printf("D! [inputs.mongodb] Unable to gather replica set status: %v", err) } - oplogStats := s.gatherOplogStats() - - result_db_stats := &DbStats{} - if gatherDbStats == true { - names := []string{} - names, err = s.Session.DatabaseNames() + // Gather the oplog if we are a member of a replica set. Non-replica set + // members do not have the oplog collections. + var oplogStats *OplogStats + if replSetStatus != nil { + oplogStats, err = s.gatherOplogStats() if err != nil { - log.Printf("E! [inputs.mongodb] Error getting database names: %v", err) - } - for _, db_name := range names { - db_stat_line := &DbStatsData{} - err = s.Session.DB(db_name).Run(bson.D{ - { - Name: "dbStats", - Value: 1, - }, - }, db_stat_line) - if err != nil { - log.Printf("E! [inputs.mongodb] Error getting db stats from %q: %v", db_name, err) - } - db := &Db{ - Name: db_name, - DbStatsData: db_stat_line, - } - - result_db_stats.Dbs = append(result_db_stats.Dbs, *db) + return err } } - result_col_stats, err := s.gatherCollectionStats(colStatsDbs) + clusterStatus, err := s.gatherClusterStatus() + if err != nil { + log.Printf("D! [inputs.mongodb] Unable to gather cluster status: %v", err) + } + + shardStats, err := s.gatherShardConnPoolStats() + if err != nil { + log.Printf("%s [inputs.mongodb] Unable to gather shard connection pool stats: %v", + authLogLevel(err), err) + } + + collectionStats, err := s.gatherCollectionStats(colStatsDbs) if err != nil { return err } + dbStats := &DbStats{} + if gatherDbStats { + names, err := s.Session.DatabaseNames() + if err != nil { + return err + } + + for _, name := range names { + db, err := s.gatherDBStats(name) + if err != nil { + log.Printf("D! [inputs.mongodb] Error getting db stats from %q: %v", name, err) + } + dbStats.Dbs = append(dbStats.Dbs, *db) + } + } + result := &MongoStatus{ - ServerStatus: result_server, - ReplSetStatus: result_repl, - ClusterStatus: result_cluster, - DbStats: result_db_stats, - ColStats: result_col_stats, - ShardStats: resultShards, + ServerStatus: serverStatus, + ReplSetStatus: replSetStatus, + ClusterStatus: clusterStatus, + DbStats: dbStats, + ColStats: collectionStats, + ShardStats: shardStats, OplogStats: oplogStats, } - defer func() { - s.lastResult = result - }() - result.SampleTime = time.Now() if s.lastResult != nil && result != nil { duration := result.SampleTime.Sub(s.lastResult.SampleTime) @@ -222,6 +275,8 @@ func (s *Server) gatherData(acc telegraf.Accumulator, gatherDbStats bool, gather data.AddShardHostStats() data.flush(acc) } + + s.lastResult = result return nil } diff --git a/plugins/inputs/mongodb/mongostat.go b/plugins/inputs/mongodb/mongostat.go index 709c074d7..d82100974 100644 --- a/plugins/inputs/mongodb/mongostat.go +++ b/plugins/inputs/mongodb/mongostat.go @@ -541,7 +541,7 @@ type StatLine struct { GetMoreR, GetMoreRCnt int64 CommandR, CommandRCnt int64 ReplLag int64 - OplogTimeDiff int64 + OplogStats *OplogStats Flushes, FlushesCnt int64 FlushesTotalTime int64 Mapped, Virtual, Resident, NonMapped int64 @@ -890,66 +890,75 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec returnVal.NumConnections = newStat.Connections.Current } - newReplStat := *newMongo.ReplSetStatus + if newMongo.ReplSetStatus != nil { + newReplStat := *newMongo.ReplSetStatus - if newReplStat.Members != nil { - myName := newStat.Repl.Me - // Find the master and myself - master := ReplSetMember{} - me := ReplSetMember{} - for _, member := range newReplStat.Members { - if member.Name == myName { - // Store my state string - returnVal.NodeState = member.StateStr - if member.State == 1 { - // I'm the master - returnVal.ReplLag = 0 - break - } else { - // I'm secondary - me = member + if newReplStat.Members != nil { + myName := newStat.Repl.Me + // Find the master and myself + master := ReplSetMember{} + me := ReplSetMember{} + for _, member := range newReplStat.Members { + if member.Name == myName { + // Store my state string + returnVal.NodeState = member.StateStr + if member.State == 1 { + // I'm the master + returnVal.ReplLag = 0 + break + } else { + // I'm secondary + me = member + } + } else if member.State == 1 { + // Master found + master = member } - } else if member.State == 1 { - // Master found - master = member } - } - if me.State == 2 { - // OptimeDate.Unix() type is int64 - lag := master.OptimeDate.Unix() - me.OptimeDate.Unix() - if lag < 0 { - returnVal.ReplLag = 0 - } else { - returnVal.ReplLag = lag + if me.State == 2 { + // OptimeDate.Unix() type is int64 + lag := master.OptimeDate.Unix() - me.OptimeDate.Unix() + if lag < 0 { + returnVal.ReplLag = 0 + } else { + returnVal.ReplLag = lag + } } } } - newClusterStat := *newMongo.ClusterStatus - returnVal.JumboChunksCount = newClusterStat.JumboChunksCount - returnVal.OplogTimeDiff = newMongo.OplogStats.TimeDiff + if newMongo.ClusterStatus != nil { + newClusterStat := *newMongo.ClusterStatus + returnVal.JumboChunksCount = newClusterStat.JumboChunksCount + } - newDbStats := *newMongo.DbStats - for _, db := range newDbStats.Dbs { - dbStatsData := db.DbStatsData - // mongos doesn't have the db key, so setting the db name - if dbStatsData.Db == "" { - dbStatsData.Db = db.Name + if newMongo.OplogStats != nil { + returnVal.OplogStats = newMongo.OplogStats + } + + if newMongo.DbStats != nil { + newDbStats := *newMongo.DbStats + for _, db := range newDbStats.Dbs { + dbStatsData := db.DbStatsData + // mongos doesn't have the db key, so setting the db name + if dbStatsData.Db == "" { + dbStatsData.Db = db.Name + } + dbStatLine := &DbStatLine{ + Name: dbStatsData.Db, + Collections: dbStatsData.Collections, + Objects: dbStatsData.Objects, + AvgObjSize: dbStatsData.AvgObjSize, + DataSize: dbStatsData.DataSize, + StorageSize: dbStatsData.StorageSize, + NumExtents: dbStatsData.NumExtents, + Indexes: dbStatsData.Indexes, + IndexSize: dbStatsData.IndexSize, + Ok: dbStatsData.Ok, + } + returnVal.DbStatsLines = append(returnVal.DbStatsLines, *dbStatLine) } - dbStatLine := &DbStatLine{ - Name: dbStatsData.Db, - Collections: dbStatsData.Collections, - Objects: dbStatsData.Objects, - AvgObjSize: dbStatsData.AvgObjSize, - DataSize: dbStatsData.DataSize, - StorageSize: dbStatsData.StorageSize, - NumExtents: dbStatsData.NumExtents, - Indexes: dbStatsData.Indexes, - IndexSize: dbStatsData.IndexSize, - Ok: dbStatsData.Ok, - } - returnVal.DbStatsLines = append(returnVal.DbStatsLines, *dbStatLine) } newColStats := *newMongo.ColStats From 79c6179f31ffede8253db820964df00f12fbce06 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 27 Aug 2019 10:35:15 -0700 Subject: [PATCH 1147/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index a6ca48e0b..7f5b0975a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -96,6 +96,7 @@ - [#6235](https://github.com/influxdata/telegraf/issues/6235): Fix finder inconsistencies in vsphere input. - [#6138](https://github.com/influxdata/telegraf/issues/6138): Fix parsing multiple metrics on the first line of tailed file. - [#2526](https://github.com/influxdata/telegraf/issues/2526): Send TERM to exec processes before sending KILL signal. +- [#5326](https://github.com/influxdata/telegraf/issues/5326): Query oplog only when connected to a replica set. ## v1.11.5 [unreleased] From ad49e311e4b3a497a9e993ff71d9b71108728770 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 27 Aug 2019 11:29:11 -0700 Subject: [PATCH 1148/1815] Set 1.11.5 release date --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7f5b0975a..adb4852dc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -98,7 +98,7 @@ - [#2526](https://github.com/influxdata/telegraf/issues/2526): Send TERM to exec processes before sending KILL signal. - [#5326](https://github.com/influxdata/telegraf/issues/5326): Query oplog only when connected to a replica set. -## v1.11.5 [unreleased] +## v1.11.5 [2019-08-27] - [#6250](https://github.com/influxdata/telegraf/pull/6250): Update go-sql-driver/mysql driver to 1.4.1 to address auth issues. - [#6279](https://github.com/influxdata/telegraf/issues/6279): Return error status from --test if input plugins produce an error. From 9f612fd52d63f471d3d8db59551cbdffcec1ef9b Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 27 Aug 2019 12:41:16 -0700 Subject: [PATCH 1149/1815] Update sample configuration --- etc/telegraf.conf | 253 +++++++++++++++++++++++++++++++++++++++------- 1 file changed, 216 insertions(+), 37 deletions(-) diff --git a/etc/telegraf.conf b/etc/telegraf.conf index 27d319c20..49edc842f 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -496,6 +496,21 @@ # overwrite_template = false +# # Send metrics to command as input over stdin +# [[outputs.exec]] +# ## Command to injest metrics via stdin. +# command = ["tee", "-a", "/dev/null"] +# +# ## Timeout for command to complete. +# # timeout = "5s" +# +# ## Data format to output. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# # data_format = "influx" + + # # Send telegraf metrics to file(s) # [[outputs.file]] # ## Files to write to, "stdout" is a specially handled file. @@ -555,9 +570,9 @@ # # Configurable HTTP health check resource based on metrics # [[outputs.health]] # ## Address and port to listen on. -# ## ex: service_address = "tcp://localhost:8080" +# ## ex: service_address = "http://localhost:8080" # ## service_address = "unix:///var/run/telegraf-health.sock" -# # service_address = "tcp://:8080" +# # service_address = "http://:8080" # # ## The maximum duration for reading the entire request. # # read_timeout = "5s" @@ -950,6 +965,9 @@ # ## NATS subject for producer messages # subject = "telegraf" # +# ## Use Transport Layer Security +# # secure = false +# # ## Optional TLS Config # # tls_ca = "/etc/telegraf/ca.pem" # # tls_cert = "/etc/telegraf/cert.pem" @@ -1456,6 +1474,20 @@ # # measurement = "*" # # old = ":" # # new = "_" +# +# ## Trims strings based on width +# # [[processors.strings.left]] +# # field = "message" +# # width = 10 + + +# # Restricts the number of tags that can pass through this filter and chooses which tags to preserve when over the limit. +# [[processors.tag_limit]] +# ## Maximum number of tags to preserve +# limit = 10 +# +# ## List of tags to preferentially preserve +# keep = ["foo", "bar", "baz"] # # Print all metrics that pass through this filter. @@ -1748,6 +1780,16 @@ # # insecure_skip_verify = false +# # Monitor APC UPSes connected to apcupsd +# [[inputs.apcupsd]] +# # A list of running apcupsd server to connect to. +# # If not provided will default to tcp://127.0.0.1:3551 +# servers = ["tcp://127.0.0.1:3551"] +# +# ## Timeout for dialing server. +# timeout = "5s" + + # # Gather metrics from Apache Aurora schedulers # [[inputs.aurora]] # ## Schedulers are the base addresses of your Aurora Schedulers @@ -2290,11 +2332,6 @@ # # insecure_skip_verify = false -# # Example go-plugin for Telegraf -# [[inputs.example]] -# value = 42 - - # # Read metrics from one or more commands that can output to stdout # [[inputs.exec]] # ## Commands array @@ -2436,11 +2473,17 @@ # # Gather repository information from GitHub hosted repositories. # [[inputs.github]] # ## List of repositories to monitor. -# repositories = ["influxdata/telegraf"] +# repositories = [ +# "influxdata/telegraf", +# "influxdata/influxdb" +# ] # # ## Github API access token. Unauthenticated requests are limited to 60 per hour. # # access_token = "" # +# ## Github API enterprise url. Github Enterprise accounts must specify their base url. +# # enterprise_base_url = "" +# # ## Timeout for HTTP requests. # # http_timeout = "5s" @@ -2776,7 +2819,7 @@ # ## iptables can be restricted to only list command "iptables -nvL". # use_sudo = false # ## Setting 'use_lock' to true runs iptables with the "-w" option. -# ## Adjust your sudo settings appropriately if using this option ("iptables -wnvl") +# ## Adjust your sudo settings appropriately if using this option ("iptables -w 5 -nvl") # use_lock = false # ## Define an alternate executable, such as "ip6tables". Default is "iptables". # # binary = "ip6tables" @@ -3025,8 +3068,8 @@ # # ## Optional Resources to exclude from gathering # ## Leave them with blank with try to gather everything available. -# ## Values can be - "daemonsets", deployments", "nodes", "persistentvolumes", -# ## "persistentvolumeclaims", "pods", "statefulsets" +# ## Values can be - "daemonsets", deployments", "endpoints", "ingress", "nodes", +# ## "persistentvolumes", "persistentvolumeclaims", "pods", "services", "statefulsets" # # resource_exclude = [ "deployments", "nodes", "statefulsets" ] # # ## Optional Resources to include when gathering @@ -3074,6 +3117,39 @@ # # no configuration +# # Read metrics exposed by Logstash +# [[inputs.logstash]] +# ## The URL of the exposed Logstash API endpoint. +# url = "http://127.0.0.1:9600" +# +# ## Use Logstash 5 single pipeline API, set to true when monitoring +# ## Logstash 5. +# # single_pipeline = false +# +# ## Enable optional collection components. Can contain +# ## "pipelines", "process", and "jvm". +# # collect = ["pipelines", "process", "jvm"] +# +# ## Timeout for HTTP requests. +# # timeout = "5s" +# +# ## Optional HTTP Basic Auth credentials. +# # username = "username" +# # password = "pa$$word" +# +# ## Optional TLS Config. +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## Use TLS but skip chain & host verification. +# # insecure_skip_verify = false +# +# ## Optional HTTP headers. +# # [inputs.logstash.headers] +# # "X-Special-Header" = "Special-Value" + + # # Read metrics from local Lustre service on OST, MDS # [[inputs.lustre2]] # ## An array of /proc globs to search for Lustre stats @@ -3102,6 +3178,26 @@ # # campaign_id = "" +# # Retrives information on a specific host in a MarkLogic Cluster +# [[inputs.marklogic]] +# ## Base URL of the MarkLogic HTTP Server. +# url = "http://localhost:8002" +# +# ## List of specific hostnames to retrieve information. At least (1) required. +# # hosts = ["hostname1", "hostname2"] +# +# ## Using HTTP Basic Authentication. Management API requires 'manage-user' role privileges +# # username = "myuser" +# # password = "mypassword" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + # # Read metrics from one or many mcrouter servers # [[inputs.mcrouter]] # ## An array of address to gather stats about. Specify an ip or hostname @@ -3133,10 +3229,12 @@ # "system", # "agents", # "frameworks", +# "framework_offers", # "tasks", # "messages", # "evqueue", # "registrar", +# "allocator", # ] # ## A list of Mesos slaves, default is [] # # slaves = [] @@ -3400,6 +3498,13 @@ # # # HTTP response timeout (default: 5s) # response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false # # Read Nginx Plus Api documentation @@ -3412,6 +3517,13 @@ # # # HTTP response timeout (default: 5s) # response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false # # Read nginx_upstream_check module status information (https://github.com/yaoweibin/nginx_upstream_check_module) @@ -3451,6 +3563,13 @@ # # ## HTTP response timeout (default: 5s) # response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false # # Read NSQ topic and channel statistics. @@ -3518,7 +3637,19 @@ # reverse_metric_names = true -# # A plugin to collect stats from Opensmtpd - a validating, recursive, and caching DNS resolver +# # Get standard NTP query metrics from OpenNTPD. +# [[inputs.openntpd]] +# ## Run ntpctl binary with sudo. +# # use_sudo = false +# +# ## Location of the ntpctl binary. +# # binary = "/usr/sbin/ntpctl" +# +# ## Maximum time the ntpctl binary is allowed to run. +# # timeout = "5ms" + + +# # A plugin to collect stats from Opensmtpd - a validating, recursive, and caching DNS resolver # [[inputs.opensmtpd]] # ## If running as a restricted user you can prepend sudo for additional access: # #use_sudo = false @@ -3662,14 +3793,14 @@ # # Read metrics from one or many PowerDNS Recursor servers # [[inputs.powerdns_recursor]] -# ## An array of sockets to gather stats about. -# ## Specify a path to unix socket. +# ## Path to the Recursor control socket. # unix_sockets = ["/var/run/pdns_recursor.controlsocket"] # -# ## Socket for Receive -# #socket_dir = "/var/run/" -# ## Socket permissions -# #socket_mode = "0666" +# ## Directory to create receive socket. This default is likely not writable, +# ## please reference the full plugin documentation for a recommended setup. +# # socket_dir = "/var/run/" +# ## Socket permissions for the receive socket. +# # socket_mode = "0666" # # Monitor process cpu and memory usage @@ -3866,7 +3997,8 @@ # ## "never" depending on your disks. # # nocheck = "standby" # -# ## Gather detailed metrics for each SMART Attribute. +# ## Gather all returned S.M.A.R.T. attribute metrics and the detailed +# ## information from each drive into the 'smart_attribute' measurement. # # attributes = false # # ## Optionally specify devices to exclude from reporting. @@ -3877,6 +4009,9 @@ # ## done and all found will be included except for the # ## excluded in excludes. # # devices = [ "/dev/ada0 -d atacam" ] +# +# ## Timeout for the smartctl command to complete. +# # timeout = "30s" # # Retrieves SNMP values from remote agents @@ -4787,6 +4922,13 @@ # ## 0 means to use the default of 65536 bytes (64 kibibytes) # max_line_size = "64KiB" # +# +# ## Optional tag name used to store the database. +# ## If the write has a database in the query string then it will be kept in this tag name. +# ## This tag can be used in downstream outputs. +# ## The default value of nothing means it will be off and the database will not be recorded. +# # database_tag = "" +# # ## Set one or more allowed client CA certificate file names to # ## enable mutually authenticated TLS connections # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] @@ -4863,6 +5005,13 @@ # ## 0 means to use the default of 65536 bytes (64 kibibytes) # max_line_size = "64KiB" # +# +# ## Optional tag name used to store the database. +# ## If the write has a database in the query string then it will be kept in this tag name. +# ## This tag can be used in downstream outputs. +# ## The default value of nothing means it will be off and the database will not be recorded. +# # database_tag = "" +# # ## Set one or more allowed client CA certificate file names to # ## enable mutually authenticated TLS connections # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] @@ -5123,7 +5272,18 @@ # [[inputs.mqtt_consumer]] # ## MQTT broker URLs to be used. The format should be scheme://host:port, # ## schema can be tcp, ssl, or ws. -# servers = ["tcp://localhost:1883"] +# servers = ["tcp://127.0.0.1:1883"] +# +# ## Topics that will be subscribed to. +# topics = [ +# "telegraf/host01/cpu", +# "telegraf/+/mem", +# "sensors/#", +# ] +# +# ## The message topic will be stored in a tag specified by this value. If set +# ## to the empty string no topic tag will be created. +# # topic_tag = "topic" # # ## QoS policy for messages # ## 0 = at most once @@ -5132,10 +5292,10 @@ # ## # ## When using a QoS of 1 or 2, you should enable persistent_session to allow # ## resuming unacknowledged messages. -# qos = 0 +# # qos = 0 # # ## Connection timeout for initial connection in seconds -# connection_timeout = "30s" +# # connection_timeout = "30s" # # ## Maximum messages to read from the broker that have not been written by an # ## output. For best throughput set based on the number of metrics within @@ -5147,21 +5307,17 @@ # ## waiting until the next flush_interval. # # max_undelivered_messages = 1000 # -# ## Topics to subscribe to -# topics = [ -# "telegraf/host01/cpu", -# "telegraf/+/mem", -# "sensors/#", -# ] +# ## Persistent session disables clearing of the client session on connection. +# ## In order for this option to work you must also set client_id to identity +# ## the client. To receive messages that arrived while the client is offline, +# ## also set the qos option to 1 or 2 and don't forget to also set the QoS when +# ## publishing. +# # persistent_session = false # -# # if true, messages that can't be delivered while the subscriber is offline -# # will be delivered when it comes back (such as on service restart). -# # NOTE: if true, client_id MUST be set -# persistent_session = false -# # If empty, a random client ID will be generated. -# client_id = "" +# ## If unset, a random client ID will be generated. +# # client_id = "" # -# ## username and password to connect MQTT server. +# ## Username and password to connect MQTT server. # # username = "telegraf" # # password = "metricsmetricsmetricsmetrics" # @@ -5183,13 +5339,26 @@ # [[inputs.nats_consumer]] # ## urls of NATS servers # servers = ["nats://localhost:4222"] -# ## Use Transport Layer Security -# secure = false +# # ## subject(s) to consume # subjects = ["telegraf"] # ## name a queue group # queue_group = "telegraf_consumers" # +# ## Optional credentials +# # username = "" +# # password = "" +# +# ## Use Transport Layer Security +# # secure = false +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# # ## Sets the limits for pending msgs and bytes for each subscription # ## These shouldn't need to be adjusted except in very high throughput scenarios # # pending_message_limit = 65536 @@ -5703,6 +5872,8 @@ # "storageAdapter.write.average", # "sys.uptime.latest", # ] +# ## Collect IP addresses? Valid values are "ipv4" and "ipv6" +# # ip_addresses = ["ipv6", "ipv4" ] # # host_metric_exclude = [] ## Nothing excluded by default # # host_instances = true ## true by default # @@ -5756,6 +5927,14 @@ # ## preserve the full precision when averaging takes place. # # use_int_samples = true # +# ## Custom attributes from vCenter can be very useful for queries in order to slice the +# ## metrics along different dimension and for forming ad-hoc relationships. They are disabled +# ## by default, since they can add a considerable amount of tags to the resulting metrics. To +# ## enable, simply set custom_attribute_exlude to [] (empty set) and use custom_attribute_include +# ## to select the attributes you want to include. +# # custom_attribute_include = [] +# # custom_attribute_exclude = ["*"] +# # ## Optional SSL Config # # ssl_ca = "/path/to/cafile" # # ssl_cert = "/path/to/certfile" From 08b903a646c5af242a1f009a8ae57d247c43184b Mon Sep 17 00:00:00 2001 From: Mattias Jiderhamn Date: Tue, 27 Aug 2019 22:47:01 +0200 Subject: [PATCH 1150/1815] Use environment variables to locate Program Files on Windows (#6317) --- cmd/telegraf/telegraf.go | 6 +++++- internal/config/config.go | 6 +++++- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/cmd/telegraf/telegraf.go b/cmd/telegraf/telegraf.go index 6eceb5cdc..a1303dc6c 100644 --- a/cmd/telegraf/telegraf.go +++ b/cmd/telegraf/telegraf.go @@ -403,12 +403,16 @@ func main() { } if runtime.GOOS == "windows" && windowsRunAsService() { + programFiles := os.Getenv("ProgramFiles") + if programFiles == "" { // Should never happen + programFiles = "C:\\Program Files" + } svcConfig := &service.Config{ Name: *fServiceName, DisplayName: *fServiceDisplayName, Description: "Collects data using a series of plugins and publishes it to" + "another series of plugins.", - Arguments: []string{"--config", "C:\\Program Files\\Telegraf\\telegraf.conf"}, + Arguments: []string{"--config", programFiles + "\\Telegraf\\telegraf.conf"}, } prg := &program{ diff --git a/internal/config/config.go b/internal/config/config.go index f2617e8b3..2357685a1 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -641,7 +641,11 @@ func getDefaultConfigPath() (string, error) { homefile := os.ExpandEnv("${HOME}/.telegraf/telegraf.conf") etcfile := "/etc/telegraf/telegraf.conf" if runtime.GOOS == "windows" { - etcfile = `C:\Program Files\Telegraf\telegraf.conf` + programFiles := os.Getenv("ProgramFiles") + if programFiles == "" { // Should never happen + programFiles = `C:\Program Files` + } + etcfile = programFiles + `\Telegraf\telegraf.conf` } for _, path := range []string{envfile, homefile, etcfile} { if _, err := os.Stat(path); err == nil { From cb578e579023b59f10c215b906f2f2250a999943 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 27 Aug 2019 13:48:49 -0700 Subject: [PATCH 1151/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index adb4852dc..e99c1481f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -97,6 +97,7 @@ - [#6138](https://github.com/influxdata/telegraf/issues/6138): Fix parsing multiple metrics on the first line of tailed file. - [#2526](https://github.com/influxdata/telegraf/issues/2526): Send TERM to exec processes before sending KILL signal. - [#5326](https://github.com/influxdata/telegraf/issues/5326): Query oplog only when connected to a replica set. +- [#6317](https://github.com/influxdata/telegraf/pull/6317): Use environment variables to locate Program Files on Windows. ## v1.11.5 [2019-08-27] From 82c729aba57923f99271bd830139602ee1e69081 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 27 Aug 2019 14:05:28 -0700 Subject: [PATCH 1152/1815] Set next version for builds --- scripts/build.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/build.py b/scripts/build.py index 14545b9eb..4bde92514 100755 --- a/scripts/build.py +++ b/scripts/build.py @@ -101,7 +101,7 @@ supported_packages = { "freebsd": [ "tar" ] } -next_version = '1.12.0' +next_version = '1.13.0' ################ #### Telegraf Functions From 8c6ddcd7fa45dbf84db883647af056ca67fbbd66 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 28 Aug 2019 12:23:49 -0700 Subject: [PATCH 1153/1815] Add note about unix domain sockets to syslog input documentation --- plugins/inputs/syslog/README.md | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/plugins/inputs/syslog/README.md b/plugins/inputs/syslog/README.md index 8183d2c90..dca51bd97 100644 --- a/plugins/inputs/syslog/README.md +++ b/plugins/inputs/syslog/README.md @@ -1,9 +1,10 @@ # Syslog Input Plugin The syslog plugin listens for syslog messages transmitted over -[UDP](https://tools.ietf.org/html/rfc5426) or -[TCP](https://tools.ietf.org/html/rfc6587) or -[TLS](https://tools.ietf.org/html/rfc5425), with or without the octet counting framing. +a Unix Domain socket, +[UDP](https://tools.ietf.org/html/rfc5426), +[TCP](https://tools.ietf.org/html/rfc6587), or +[TLS](https://tools.ietf.org/html/rfc5425); with or without the octet counting framing. Syslog messages should be formatted according to [RFC 5424](https://tools.ietf.org/html/rfc5424). @@ -12,10 +13,12 @@ Syslog messages should be formatted according to ```toml [[inputs.syslog]] - ## Specify an ip or hostname with port - eg., tcp://localhost:6514, tcp://10.0.0.1:6514 ## Protocol, address and port to host the syslog receiver. ## If no host is specified, then localhost is used. ## If no port is specified, 6514 is used (RFC5425#section-4.1). + ## ex: server = "tcp://localhost:6514" + ## server = "udp://:6514" + ## server = "unix:///var/run/telegraf-syslog.sock" server = "tcp://:6514" ## TLS Config From 7ec54d4be91e691e71ee23b78aeaf1653ebd1695 Mon Sep 17 00:00:00 2001 From: Vlasta Hajek Date: Wed, 28 Aug 2019 23:34:44 +0200 Subject: [PATCH 1154/1815] Improve startup error reporting when ran as Windows service (#4291) --- cmd/telegraf/telegraf.go | 17 ++++-- docs/CONFIGURATION.md | 6 +- docs/WINDOWS_SERVICE.md | 9 ++- internal/config/config.go | 5 +- logger/event_logger.go | 49 +++++++++++++++ logger/event_logger_test.go | 100 ++++++++++++++++++++++++++++++ logger/logger.go | 118 ++++++++++++++++++++++++++++-------- logger/logger_test.go | 24 +++++++- 8 files changed, 295 insertions(+), 33 deletions(-) create mode 100644 logger/event_logger.go create mode 100644 logger/event_logger_test.go diff --git a/cmd/telegraf/telegraf.go b/cmd/telegraf/telegraf.go index a1303dc6c..4d4b62759 100644 --- a/cmd/telegraf/telegraf.go +++ b/cmd/telegraf/telegraf.go @@ -150,8 +150,6 @@ func runAgent(ctx context.Context, inputFilters []string, outputFilters []string, ) error { - // Setup default logging. This may need to change after reading the config - // file, but we can configure it to use our logger implementation now. log.Printf("I! Starting Telegraf %s", version) // If no other options are specified, load the config file and run. @@ -195,6 +193,7 @@ func runAgent(ctx context.Context, logConfig := logger.LogConfig{ Debug: ag.Config.Agent.Debug || *fDebug, Quiet: ag.Config.Agent.Quiet || *fQuiet, + LogTarget: ag.Config.Agent.LogTarget, Logfile: ag.Config.Agent.Logfile, RotationInterval: ag.Config.Agent.LogfileRotationInterval, RotationMaxSize: ag.Config.Agent.LogfileRotationMaxSize, @@ -429,18 +428,28 @@ func main() { // may not have an interactive session, e.g. installing from Ansible. if *fService != "" { if *fConfig != "" { - (*svcConfig).Arguments = []string{"--config", *fConfig} + svcConfig.Arguments = []string{"--config", *fConfig} } if *fConfigDirectory != "" { - (*svcConfig).Arguments = append((*svcConfig).Arguments, "--config-directory", *fConfigDirectory) + svcConfig.Arguments = append(svcConfig.Arguments, "--config-directory", *fConfigDirectory) } + //set servicename to service cmd line, to have a custom name after relaunch as a service + svcConfig.Arguments = append(svcConfig.Arguments, "--service-name", *fServiceName) + err := service.Control(s, *fService) if err != nil { log.Fatal("E! " + err.Error()) } os.Exit(0) } else { + winlogger, err := s.Logger(nil) + if err == nil { + //When in service mode, register eventlog target andd setup default logging to eventlog + logger.RegisterEventLogger(winlogger) + logger.SetupLogging(logger.LogConfig{LogTarget: logger.LogTargetEventlog}) + } err = s.Run() + if err != nil { log.Println("E! " + err.Error()) } diff --git a/docs/CONFIGURATION.md b/docs/CONFIGURATION.md index 3440b0d30..071a92f1c 100644 --- a/docs/CONFIGURATION.md +++ b/docs/CONFIGURATION.md @@ -141,8 +141,12 @@ The agent table configures Telegraf and the defaults used across all plugins. - **quiet**: Log only error level messages. +- **logtarget**: + Log target - `file`, `stderr` or `eventlog` (Windows only). + The empty string means to log to stderr. + - **logfile**: - Log file name, the empty string means to log to stderr. + Log file name. - **logfile_rotation_interval**: The logfile will be rotated after the time interval specified. When set to diff --git a/docs/WINDOWS_SERVICE.md b/docs/WINDOWS_SERVICE.md index 51ce6a7a6..5b630076c 100644 --- a/docs/WINDOWS_SERVICE.md +++ b/docs/WINDOWS_SERVICE.md @@ -56,8 +56,13 @@ You can install multiple telegraf instances with --service-name flag: > C:\"Program Files"\Telegraf\telegraf.exe --service uninstall --service-name telegraf-1 ``` -Troubleshooting common error #1067 +## Troubleshooting + +When Telegraf runs as a Windows service, Telegraf logs messages to Windows events log before configuration file with logging settings is loaded. +Check event log for an error reported by `telegraf` service in case of Telegraf service reports failure on its start: Event Viewer->Windows Logs->Application + +**Troubleshooting common error #1067** When installing as service in Windows, always double check to specify full path of the config file, otherwise windows service will fail to start - --config C:\"Program Files"\Telegraf\telegraf.conf + --config "C:\Program Files\Telegraf\telegraf.conf" diff --git a/internal/config/config.go b/internal/config/config.go index 2357685a1..43866a32e 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -146,7 +146,10 @@ type AgentConfig struct { // Quiet is the option for running in quiet mode Quiet bool `toml:"quiet"` - // Log file name, the empty string means to log to stderr. + // Log target - file, stderr, eventlog (Windows only). The empty string means to log to stderr. + LogTarget string `toml:"logtarget"` + + // Log file name . Logfile string `toml:"logfile"` // The file will be rotated after the time interval specified. When set diff --git a/logger/event_logger.go b/logger/event_logger.go new file mode 100644 index 000000000..48b645dde --- /dev/null +++ b/logger/event_logger.go @@ -0,0 +1,49 @@ +package logger + +import ( + "io" + "strings" + + "github.com/influxdata/wlog" + "github.com/kardianos/service" +) + +const ( + LogTargetEventlog = "eventlog" +) + +type eventLogger struct { + logger service.Logger +} + +func (t *eventLogger) Write(b []byte) (n int, err error) { + loc := prefixRegex.FindIndex(b) + n = len(b) + if loc == nil { + err = t.logger.Info(b) + } else if n > 2 { //skip empty log messages + line := strings.Trim(string(b[loc[1]:]), " \t\r\n") + switch rune(b[loc[0]]) { + case 'I': + err = t.logger.Info(line) + case 'W': + err = t.logger.Warning(line) + case 'E': + err = t.logger.Error(line) + } + } + + return +} + +type eventLoggerCreator struct { + serviceLogger service.Logger +} + +func (e *eventLoggerCreator) CreateLogger(config LogConfig) (io.Writer, error) { + return wlog.NewWriter(&eventLogger{logger: e.serviceLogger}), nil +} + +func RegisterEventLogger(serviceLogger service.Logger) { + registerLogger(LogTargetEventlog, &eventLoggerCreator{serviceLogger: serviceLogger}) +} diff --git a/logger/event_logger_test.go b/logger/event_logger_test.go new file mode 100644 index 000000000..f2d4eb420 --- /dev/null +++ b/logger/event_logger_test.go @@ -0,0 +1,100 @@ +//+build windows + +package logger + +import ( + "bytes" + "encoding/xml" + "log" + "os/exec" + "testing" + "time" + + "github.com/kardianos/service" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +type Levels int + +const ( + Info Levels = iota + 1 + Warning + Error +) + +type Event struct { + Message string `xml:"EventData>Data"` + Level Levels `xml:"System>EventID"` +} + +func getEventLog(t *testing.T, since time.Time) []Event { + timeStr := since.UTC().Format(time.RFC3339) + cmd := exec.Command("wevtutil", "qe", "Application", "/rd:true", "/q:Event[System[TimeCreated[@SystemTime >= '"+timeStr+"'] and Provider[@Name='Telegraf']]]") + var out bytes.Buffer + cmd.Stdout = &out + err := cmd.Run() + require.NoError(t, err) + xmlStr := "" + out.String() + "" + var events struct { + Events []Event `xml:"Event"` + } + err = xml.Unmarshal([]byte(xmlStr), &events) + require.NoError(t, err) + return events.Events +} + +func TestEventLog(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + prepareLogger(t) + + config := LogConfig{ + LogTarget: LogTargetEventlog, + Logfile: "", + } + + SetupLogging(config) + now := time.Now() + log.Println("I! Info message") + log.Println("W! Warn message") + log.Println("E! Err message") + events := getEventLog(t, now) + assert.Len(t, events, 3) + assert.Contains(t, events, Event{Message: "Info message", Level: Info}) + assert.Contains(t, events, Event{Message: "Warn message", Level: Warning}) + assert.Contains(t, events, Event{Message: "Err message", Level: Error}) +} + +func TestRestrictedEventLog(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + prepareLogger(t) + + config := LogConfig{ + LogTarget: LogTargetEventlog, + Quiet: true, + } + + SetupLogging(config) + //separate previous log messages by small delay + time.Sleep(time.Second) + now := time.Now() + log.Println("I! Info message") + log.Println("W! Warning message") + log.Println("E! Error message") + events := getEventLog(t, now) + assert.Len(t, events, 1) + assert.Contains(t, events, Event{Message: "Error message", Level: Error}) +} + +func prepareLogger(t *testing.T) { + svc, err := service.New(nil, &service.Config{Name: "Telegraf"}) + require.NoError(t, err) + svcLogger, err := svc.SystemLogger(nil) + require.NoError(t, err) + require.NotNil(t, svcLogger) + registerLogger(LogTargetEventlog, &eventLoggerCreator{serviceLogger: svcLogger}) +} diff --git a/logger/logger.go b/logger/logger.go index a7b32b6e0..a52e709a8 100644 --- a/logger/logger.go +++ b/logger/logger.go @@ -2,6 +2,7 @@ package logger import ( "errors" + "io" "log" "os" @@ -15,13 +16,10 @@ import ( var prefixRegex = regexp.MustCompile("^[DIWE]!") -// newTelegrafWriter returns a logging-wrapped writer. -func newTelegrafWriter(w io.Writer) io.Writer { - return &telegrafLog{ - writer: wlog.NewWriter(w), - internalWriter: w, - } -} +const ( + LogTargetFile = "file" + LogTargetStderr = "stderr" +) // LogConfig contains the log configuration settings type LogConfig struct { @@ -29,6 +27,8 @@ type LogConfig struct { Debug bool //will set the log level to ERROR Quiet bool + //stderr, stdout, file or eventlog (Windows only) + LogTarget string // will direct the logging output to a file. Empty string is // interpreted as stderr. If there is an error opening the file the // logger will fallback to stderr @@ -41,6 +41,19 @@ type LogConfig struct { RotationMaxArchives int } +type LoggerCreator interface { + CreateLogger(config LogConfig) (io.Writer, error) +} + +var loggerRegistry map[string]LoggerCreator + +func registerLogger(name string, loggerCreator LoggerCreator) { + if loggerRegistry == nil { + loggerRegistry = make(map[string]LoggerCreator) + } + loggerRegistry[name] = loggerCreator +} + type telegrafLog struct { writer io.Writer internalWriter io.Writer @@ -57,11 +70,25 @@ func (t *telegrafLog) Write(b []byte) (n int, err error) { } func (t *telegrafLog) Close() error { - closer, isCloser := t.internalWriter.(io.Closer) - if !isCloser { - return errors.New("the underlying writer cannot be closed") + var stdErrWriter io.Writer + stdErrWriter = os.Stderr + // avoid closing stderr + if t.internalWriter != stdErrWriter { + closer, isCloser := t.internalWriter.(io.Closer) + if !isCloser { + return errors.New("the underlying writer cannot be closed") + } + return closer.Close() + } + return nil +} + +// newTelegrafWriter returns a logging-wrapped writer. +func newTelegrafWriter(w io.Writer) io.Writer { + return &telegrafLog{ + writer: wlog.NewWriter(w), + internalWriter: w, } - return closer.Close() } // SetupLogging configures the logging output. @@ -69,6 +96,39 @@ func SetupLogging(config LogConfig) { newLogWriter(config) } +type telegrafLogCreator struct { +} + +func (t *telegrafLogCreator) CreateLogger(config LogConfig) (io.Writer, error) { + var writer, defaultWriter io.Writer + defaultWriter = os.Stderr + + switch config.LogTarget { + case LogTargetFile: + if config.Logfile != "" { + var err error + if writer, err = rotate.NewFileWriter(config.Logfile, config.RotationInterval.Duration, config.RotationMaxSize.Size, config.RotationMaxArchives); err != nil { + log.Printf("E! Unable to open %s (%s), using stderr", config.Logfile, err) + writer = defaultWriter + } + } else { + log.Print("E! Empty logfile, using stderr") + writer = defaultWriter + } + case LogTargetStderr, "": + writer = defaultWriter + default: + log.Printf("E! Unsupported logtarget: %s, using stderr", config.LogTarget) + writer = defaultWriter + } + + return newTelegrafWriter(writer), nil +} + +// Keep track what is actually set as a log output, because log package doesn't provide a getter. +// It allows closing previous writer if re-set and have possibility to test what is actually set +var actualLogger io.Writer + func newLogWriter(config LogConfig) io.Writer { log.SetFlags(0) if config.Debug { @@ -77,19 +137,29 @@ func newLogWriter(config LogConfig) io.Writer { if config.Quiet { wlog.SetLevel(wlog.ERROR) } - - var writer io.Writer - if config.Logfile != "" { - var err error - if writer, err = rotate.NewFileWriter(config.Logfile, config.RotationInterval.Duration, config.RotationMaxSize.Size, config.RotationMaxArchives); err != nil { - log.Printf("E! Unable to open %s (%s), using stderr", config.Logfile, err) - writer = os.Stderr - } - } else { - writer = os.Stderr + if !config.Debug && !config.Quiet { + wlog.SetLevel(wlog.INFO) + } + var logWriter io.Writer + if logCreator, ok := loggerRegistry[config.LogTarget]; ok { + logWriter, _ = logCreator.CreateLogger(config) + } + if logWriter == nil { + logWriter, _ = (&telegrafLogCreator{}).CreateLogger(config) } - telegrafLog := newTelegrafWriter(writer) - log.SetOutput(telegrafLog) - return telegrafLog + if closer, isCloser := actualLogger.(io.Closer); isCloser { + closer.Close() + } + log.SetOutput(logWriter) + actualLogger = logWriter + + return logWriter +} + +func init() { + tlc := &telegrafLogCreator{} + registerLogger("", tlc) + registerLogger(LogTargetStderr, tlc) + registerLogger(LogTargetFile, tlc) } diff --git a/logger/logger_test.go b/logger/logger_test.go index 504e9a4bb..a5f53ca17 100644 --- a/logger/logger_test.go +++ b/logger/logger_test.go @@ -85,7 +85,7 @@ func TestWriteToTruncatedFile(t *testing.T) { assert.NoError(t, err) assert.Equal(t, f[19:], []byte("Z I! TEST\n")) - tmpf, err := os.OpenFile(tmpfile.Name(), os.O_TRUNC, 0644) + tmpf, err := os.OpenFile(tmpfile.Name(), os.O_RDWR|os.O_TRUNC, 0644) assert.NoError(t, err) assert.NoError(t, tmpf.Close()) @@ -100,6 +100,7 @@ func TestWriteToFileInRotation(t *testing.T) { tempDir, err := ioutil.TempDir("", "LogRotation") require.NoError(t, err) config := createBasicLogConfig(filepath.Join(tempDir, "test.log")) + config.LogTarget = LogTargetFile config.RotationMaxSize = internal.Size{Size: int64(30)} writer := newLogWriter(config) // Close the writer here, otherwise the temp folder cannot be deleted because the current log file is in use. @@ -113,6 +114,26 @@ func TestWriteToFileInRotation(t *testing.T) { assert.Equal(t, 2, len(files)) } +func TestLogTargetSettings(t *testing.T) { + config := LogConfig{ + LogTarget: "", + Quiet: true, + } + SetupLogging(config) + logger, isTelegrafLogger := actualLogger.(*telegrafLog) + assert.True(t, isTelegrafLogger) + assert.Equal(t, logger.internalWriter, os.Stderr) + + config = LogConfig{ + LogTarget: "stderr", + Quiet: true, + } + SetupLogging(config) + logger, isTelegrafLogger = actualLogger.(*telegrafLog) + assert.True(t, isTelegrafLogger) + assert.Equal(t, logger.internalWriter, os.Stderr) +} + func BenchmarkTelegrafLogWrite(b *testing.B) { var msg = []byte("test") var buf bytes.Buffer @@ -126,6 +147,7 @@ func BenchmarkTelegrafLogWrite(b *testing.B) { func createBasicLogConfig(filename string) LogConfig { return LogConfig{ Logfile: filename, + LogTarget: LogTargetFile, RotationMaxArchives: -1, } } From 558c8254786d625807cd2dc19db3db0604b9d673 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 28 Aug 2019 18:01:50 -0700 Subject: [PATCH 1155/1815] Link to specific gjson version we are using --- plugins/parsers/json/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/parsers/json/README.md b/plugins/parsers/json/README.md index 10bcf21bd..08aeef18e 100644 --- a/plugins/parsers/json/README.md +++ b/plugins/parsers/json/README.md @@ -22,7 +22,7 @@ ignored unless specified in the `tag_key` or `json_string_fields` options. ## parsed, if not specified the whole document will be parsed. ## ## GJSON query paths are described here: - ## https://github.com/tidwall/gjson#path-syntax + ## https://github.com/tidwall/gjson/tree/v1.3.0#path-syntax json_query = "" ## Tag keys is an array of keys that should be added as tags. From 4f54b11973553723564c31c984fd319abb34a5c4 Mon Sep 17 00:00:00 2001 From: Mattias Jiderhamn Date: Fri, 30 Aug 2019 01:29:25 +0200 Subject: [PATCH 1156/1815] Add azure_storage_queue input plugin (#5323) --- Gopkg.lock | 17 +++ Gopkg.toml | 8 ++ docs/LICENSE_OF_DEPENDENCIES.md | 1 + plugins/inputs/all/all.go | 1 + plugins/inputs/azure_storage_queue/README.md | 35 +++++ .../azure_storage_queue.go | 134 ++++++++++++++++++ 6 files changed, 196 insertions(+) create mode 100644 plugins/inputs/azure_storage_queue/README.md create mode 100644 plugins/inputs/azure_storage_queue/azure_storage_queue.go diff --git a/Gopkg.lock b/Gopkg.lock index 248d55456..c38cbb506 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -47,6 +47,22 @@ revision = "2b93072101d466aa4120b3c23c2e1b08af01541c" version = "v0.6.0" +[[projects]] + digest = "1:bd444f85703c5aff1ba686cb52766fd38c3730d4e1dfb02327b2481bfe674997" + name = "github.com/Azure/azure-pipeline-go" + packages = ["pipeline"] + pruneopts = "" + revision = "b8e3409182fd52e74f7d7bdfbff5833591b3b655" + version = "v0.1.8" + +[[projects]] + digest = "1:6ef03ecdaf3e9a003c2ebd67bfa673bbe8df2c23c82217a4448da766e8ef6b30" + name = "github.com/Azure/azure-storage-queue-go" + packages = ["azqueue"] + pruneopts = "" + revision = "6ed74e755687d1a74f08d9aab5a9e3f2fbe7d162" + version = "0.2.0" + [[projects]] digest = "1:5923e22a060ab818a015593422f9e8a35b9d881d4cfcfed0669a82959b11c7ee" name = "github.com/Azure/go-autorest" @@ -1669,6 +1685,7 @@ "cloud.google.com/go/pubsub", "collectd.org/api", "collectd.org/network", + "github.com/Azure/azure-storage-queue-go/azqueue", "github.com/Azure/go-autorest/autorest", "github.com/Azure/go-autorest/autorest/azure/auth", "github.com/Microsoft/ApplicationInsights-Go/appinsights", diff --git a/Gopkg.toml b/Gopkg.toml index 2cc57dd71..6848b947f 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -236,6 +236,10 @@ name = "github.com/Azure/go-autorest" version = "10.12.0" +[[constraint]] + name = "github.com/Azure/azure-storage-queue-go" + version = "0.2.0" + [[constraint]] branch = "master" name = "golang.org/x/oauth2" @@ -257,6 +261,10 @@ name = "github.com/karrick/godirwalk" version = "1.7.5" +[[constraint]] + name = "github.com/Azure/azure-pipeline-go" + version = "0.1.8" + [[override]] name = "github.com/harlow/kinesis-consumer" branch = "master" diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index 81ecaac81..bb1e90007 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -12,6 +12,7 @@ following works: - github.com/amir/raidman [The Unlicense](https://github.com/amir/raidman/blob/master/UNLICENSE) - github.com/apache/thrift [Apache License 2.0](https://github.com/apache/thrift/blob/master/LICENSE) - github.com/aws/aws-sdk-go [Apache License 2.0](https://github.com/aws/aws-sdk-go/blob/master/LICENSE.txt) +- github.com/Azure/azure-storage-queue-go [MIT License](https://github.com/Azure/azure-storage-queue-go/blob/master/LICENSE) - github.com/Azure/go-autorest [Apache License 2.0](https://github.com/Azure/go-autorest/blob/master/LICENSE) - github.com/beorn7/perks [MIT License](https://github.com/beorn7/perks/blob/master/LICENSE) - github.com/cenkalti/backoff [MIT License](https://github.com/cenkalti/backoff/blob/master/LICENSE) diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index 7381487d5..f8ff6c879 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -7,6 +7,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/apache" _ "github.com/influxdata/telegraf/plugins/inputs/apcupsd" _ "github.com/influxdata/telegraf/plugins/inputs/aurora" + _ "github.com/influxdata/telegraf/plugins/inputs/azure_storage_queue" _ "github.com/influxdata/telegraf/plugins/inputs/bcache" _ "github.com/influxdata/telegraf/plugins/inputs/beanstalkd" _ "github.com/influxdata/telegraf/plugins/inputs/bind" diff --git a/plugins/inputs/azure_storage_queue/README.md b/plugins/inputs/azure_storage_queue/README.md new file mode 100644 index 000000000..7985c886e --- /dev/null +++ b/plugins/inputs/azure_storage_queue/README.md @@ -0,0 +1,35 @@ +# Telegraf Input Plugin: Azure Storage Queue + +This plugin gathers sizes of Azure Storage Queues. + +### Configuration: + +```toml +# Description +[[inputs.azure_storage_queue]] + ## Required Azure Storage Account name + account_name = "mystorageaccount" + + ## Required Azure Storage Account access key + account_key = "storageaccountaccesskey" + + ## Set to false to disable peeking age of oldest message (executes faster) + # peek_oldest_message_age = true +``` + +### Metrics +- azure_storage_queues + - tags: + - queue + - account + - fields: + - size (integer, count) + - oldest_message_age_ns (integer, nanoseconds) Age of message at the head of the queue. + Requires `peek_oldest_message_age` to be configured to `true`. + +### Example Output + +``` +azure_storage_queues,queue=myqueue,account=mystorageaccount oldest_message_age=799714900i,size=7i 1565970503000000000 +azure_storage_queues,queue=myemptyqueue,account=mystorageaccount size=0i 1565970502000000000 +``` \ No newline at end of file diff --git a/plugins/inputs/azure_storage_queue/azure_storage_queue.go b/plugins/inputs/azure_storage_queue/azure_storage_queue.go new file mode 100644 index 000000000..0fa7b0fd6 --- /dev/null +++ b/plugins/inputs/azure_storage_queue/azure_storage_queue.go @@ -0,0 +1,134 @@ +package activemq + +import ( + "context" + "errors" + "log" + "net/url" + "strings" + "time" + + "github.com/Azure/azure-storage-queue-go/azqueue" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" +) + +type AzureStorageQueue struct { + StorageAccountName string `toml:"account_name"` + StorageAccountKey string `toml:"account_key"` + PeekOldestMessageAge bool `toml:"peek_oldest_message_age"` + + serviceURL *azqueue.ServiceURL +} + +var sampleConfig = ` + ## Required Azure Storage Account name + account_name = "mystorageaccount" + + ## Required Azure Storage Account access key + account_key = "storageaccountaccesskey" + + ## Set to false to disable peeking age of oldest message (executes faster) + # peek_oldest_message_age = true + ` + +func (a *AzureStorageQueue) Description() string { + return "Gather Azure Storage Queue metrics" +} + +func (a *AzureStorageQueue) SampleConfig() string { + return sampleConfig +} + +func (a *AzureStorageQueue) Init() error { + if a.StorageAccountName == "" { + return errors.New("account_name must be configured") + } + + if a.StorageAccountKey == "" { + return errors.New("account_key must be configured") + } + return nil +} + +func (a *AzureStorageQueue) GetServiceURL() (azqueue.ServiceURL, error) { + if a.serviceURL == nil { + _url, err := url.Parse("https://" + a.StorageAccountName + ".queue.core.windows.net") + if err != nil { + return azqueue.ServiceURL{}, err + } + + credential, err := azqueue.NewSharedKeyCredential(a.StorageAccountName, a.StorageAccountKey) + if err != nil { + return azqueue.ServiceURL{}, err + } + + pipeline := azqueue.NewPipeline(credential, azqueue.PipelineOptions{}) + + serviceURL := azqueue.NewServiceURL(*_url, pipeline) + a.serviceURL = &serviceURL + } + return *a.serviceURL, nil +} + +func (a *AzureStorageQueue) GatherQueueMetrics(acc telegraf.Accumulator, queueItem azqueue.QueueItem, properties *azqueue.QueueGetPropertiesResponse, peekedMessage *azqueue.PeekedMessage) { + fields := make(map[string]interface{}) + tags := make(map[string]string) + tags["queue"] = strings.TrimSpace(queueItem.Name) + tags["account"] = a.StorageAccountName + fields["size"] = properties.ApproximateMessagesCount() + if peekedMessage != nil { + fields["oldest_message_age_ns"] = time.Now().UnixNano() - peekedMessage.InsertionTime.UnixNano() + } + acc.AddFields("azure_storage_queues", fields, tags) +} + +func (a *AzureStorageQueue) Gather(acc telegraf.Accumulator) error { + serviceURL, err := a.GetServiceURL() + if err != nil { + return err + } + + ctx := context.TODO() + + for marker := (azqueue.Marker{}); marker.NotDone(); { + log.Printf("D! [inputs.azure_storage_queue] Listing queues of storage account '%s'", a.StorageAccountName) + queuesSegment, err := serviceURL.ListQueuesSegment(ctx, marker, + azqueue.ListQueuesSegmentOptions{ + Detail: azqueue.ListQueuesSegmentDetails{Metadata: false}, + }) + if err != nil { + return err + } + marker = queuesSegment.NextMarker + + for _, queueItem := range queuesSegment.QueueItems { + log.Printf("D! [inputs.azure_storage_queue] Processing queue '%s' of storage account '%s'", queueItem.Name, a.StorageAccountName) + queueURL := serviceURL.NewQueueURL(queueItem.Name) + properties, err := queueURL.GetProperties(ctx) + if err != nil { + log.Printf("E! [inputs.azure_storage_queue] Error getting properties for queue %s: %s", queueItem.Name, err.Error()) + continue + } + var peekedMessage *azqueue.PeekedMessage + if a.PeekOldestMessageAge { + messagesURL := queueURL.NewMessagesURL() + messagesResponse, err := messagesURL.Peek(ctx, 1) + if err != nil { + log.Printf("E! [inputs.azure_storage_queue] Error peeking queue %s: %s", queueItem.Name, err.Error()) + } else if messagesResponse.NumMessages() > 0 { + peekedMessage = messagesResponse.Message(0) + } + } + + a.GatherQueueMetrics(acc, queueItem, properties, peekedMessage) + } + } + return nil +} + +func init() { + inputs.Add("azure_storage_queue", func() telegraf.Input { + return &AzureStorageQueue{PeekOldestMessageAge: true} + }) +} From 80f38ae352a784164570c04a12624ac5d8552d24 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 29 Aug 2019 16:32:43 -0700 Subject: [PATCH 1157/1815] Update readme and changelog --- CHANGELOG.md | 6 ++++++ README.md | 3 ++- docs/LICENSE_OF_DEPENDENCIES.md | 1 + 3 files changed, 9 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e99c1481f..d91b574b3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,9 @@ +## v1.13 [unreleased] + +#### New Inputs + +- [azure_storage_queue](/plugins/inputs/azure_storage_queue/README.md) - Contributed by @mjiderhamn + ## v1.12 [unreleased] #### Release Notes diff --git a/README.md b/README.md index db4012f62..2616d920d 100644 --- a/README.md +++ b/README.md @@ -140,9 +140,10 @@ For documentation on the latest development code see the [documentation index][d * [aerospike](./plugins/inputs/aerospike) * [amqp_consumer](./plugins/inputs/amqp_consumer) (rabbitmq) * [apache](./plugins/inputs/apache) -* [aurora](./plugins/inputs/aurora) * [apcupsd](./plugins/inputs/apcupsd) +* [aurora](./plugins/inputs/aurora) * [aws cloudwatch](./plugins/inputs/cloudwatch) +* [azure_storage_queue](./plugins/inputs/azure_storage_queue) * [bcache](./plugins/inputs/bcache) * [beanstalkd](./plugins/inputs/beanstalkd) * [bind](./plugins/inputs/bind) diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index bb1e90007..e0332196b 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -13,6 +13,7 @@ following works: - github.com/apache/thrift [Apache License 2.0](https://github.com/apache/thrift/blob/master/LICENSE) - github.com/aws/aws-sdk-go [Apache License 2.0](https://github.com/aws/aws-sdk-go/blob/master/LICENSE.txt) - github.com/Azure/azure-storage-queue-go [MIT License](https://github.com/Azure/azure-storage-queue-go/blob/master/LICENSE) +- github.com/Azure/azure-pipeline-go [MIT License](https://github.com/Azure/azure-pipeline-go/blob/master/LICENSE) - github.com/Azure/go-autorest [Apache License 2.0](https://github.com/Azure/go-autorest/blob/master/LICENSE) - github.com/beorn7/perks [MIT License](https://github.com/beorn7/perks/blob/master/LICENSE) - github.com/cenkalti/backoff [MIT License](https://github.com/cenkalti/backoff/blob/master/LICENSE) From 32de8bb45954b6f658430e52e53827795846735d Mon Sep 17 00:00:00 2001 From: paebersold <32950171+paebersold@users.noreply.github.com> Date: Fri, 30 Aug 2019 10:12:57 +1000 Subject: [PATCH 1158/1815] Add per node memory stats to rabbitmq input (#6326) --- plugins/inputs/rabbitmq/README.md | 20 +++++ plugins/inputs/rabbitmq/rabbitmq.go | 86 +++++++++++++++++--- plugins/inputs/rabbitmq/rabbitmq_test.go | 25 +++++- plugins/inputs/rabbitmq/testdata/memory.json | 24 ++++++ 4 files changed, 141 insertions(+), 14 deletions(-) create mode 100644 plugins/inputs/rabbitmq/testdata/memory.json diff --git a/plugins/inputs/rabbitmq/README.md b/plugins/inputs/rabbitmq/README.md index 5d500afd1..d52a760f2 100644 --- a/plugins/inputs/rabbitmq/README.md +++ b/plugins/inputs/rabbitmq/README.md @@ -107,6 +107,26 @@ For additional details reference the [RabbitMQ Management HTTP Stats][management - io_write_avg_time_rate (float, milliseconds per second) - io_write_bytes (int, bytes) - io_write_bytes_rate (float, bytes per second) + - mem_connection_readers (int, bytes) + - mem_connection_writers (int, bytes) + - mem_connection_channels (int, bytes) + - mem_connection_other (int, bytes) + - mem_queue_procs (int, bytes) + - mem_queue_slave_procs (int, bytes) + - mem_plugins (int, bytes) + - mem_other_proc (int, bytes) + - mem_metrics (int, bytes) + - mem_mgmt_db (int, bytes) + - mem_mnesia (int, bytes) + - mem_other_ets (int, bytes) + - mem_binary (int, bytes) + - mem_msg_index (int, bytes) + - mem_code (int, bytes) + - mem_atom (int, bytes) + - mem_other_system (int, bytes) + - mem_allocated_unused (int, bytes) + - mem_reserved_unallocated (int, bytes) + - mem_total (int, bytes) - rabbitmq_queue - consumer_utilisation (float, percent) diff --git a/plugins/inputs/rabbitmq/rabbitmq.go b/plugins/inputs/rabbitmq/rabbitmq.go index 4e7e918da..168a340b0 100644 --- a/plugins/inputs/rabbitmq/rabbitmq.go +++ b/plugins/inputs/rabbitmq/rabbitmq.go @@ -182,6 +182,35 @@ type HealthCheck struct { Status string `json:"status"` } +// MemoryResponse ... +type MemoryResponse struct { + Memory *Memory `json:"memory"` +} + +// Memory details +type Memory struct { + ConnectionReaders int64 `json:"connection_readers"` + ConnectionWriters int64 `json:"connection_writers"` + ConnectionChannels int64 `json:"connection_channels"` + ConnectionOther int64 `json:"connection_other"` + QueueProcs int64 `json:"queue_procs"` + QueueSlaveProcs int64 `json:"queue_slave_procs"` + Plugins int64 `json:"plugins"` + OtherProc int64 `json:"other_proc"` + Metrics int64 `json:"metrics"` + MgmtDb int64 `json:"mgmt_db"` + Mnesia int64 `json:"mnesia"` + OtherEts int64 `json:"other_ets"` + Binary int64 `json:"binary"` + MsgIndex int64 `json:"msg_index"` + Code int64 `json:"code"` + Atom int64 `json:"atom"` + OtherSystem int64 `json:"other_system"` + AllocatedUnused int64 `json:"allocated_unused"` + ReservedUnallocated int64 `json:"reserved_unallocated"` + Total int64 `json:"total"` +} + // gatherFunc ... type gatherFunc func(r *RabbitMQ, acc telegraf.Accumulator) @@ -391,43 +420,52 @@ func gatherNodes(r *RabbitMQ, acc telegraf.Accumulator) { return } - type NodeHealthCheck struct { + type NodeCheck struct { NodeName string HealthCheck HealthCheck - Error error + Memory *Memory } - healthChecksChannel := make(chan NodeHealthCheck, numberNodes) + nodeChecksChannel := make(chan NodeCheck, numberNodes) for _, node := range nodes { - go func(nodeName string, healthChecksChannel chan NodeHealthCheck) { + go func(nodeName string, healthChecksChannel chan NodeCheck) { var healthCheck HealthCheck + var memoryresponse MemoryResponse err := r.requestJSON("/api/healthchecks/node/"+nodeName, &healthCheck) - nodeHealthCheck := NodeHealthCheck{ + nodeCheck := NodeCheck{ NodeName: nodeName, - Error: err, HealthCheck: healthCheck, } + if err != nil { + acc.AddError(err) + return + } - healthChecksChannel <- nodeHealthCheck - }(node.Name, healthChecksChannel) + err = r.requestJSON("/api/nodes/"+nodeName+"/memory", &memoryresponse) + nodeCheck.Memory = memoryresponse.Memory + if err != nil { + acc.AddError(err) + return + } + + nodeChecksChannel <- nodeCheck + }(node.Name, nodeChecksChannel) } now := time.Now() for i := 0; i < len(nodes); i++ { - nodeHealthCheck := <-healthChecksChannel + nodeCheck := <-nodeChecksChannel var healthCheckStatus int64 = 0 - if nodeHealthCheck.Error != nil { - acc.AddError(nodeHealthCheck.Error) - } else if nodeHealthCheck.HealthCheck.Status == "ok" { + if nodeCheck.HealthCheck.Status == "ok" { healthCheckStatus = 1 } - node := nodes[nodeHealthCheck.NodeName] + node := nodes[nodeCheck.NodeName] tags := map[string]string{"url": r.URL} tags["node"] = node.Name @@ -466,6 +504,28 @@ func gatherNodes(r *RabbitMQ, acc telegraf.Accumulator) { "running": boolToInt(node.Running), "health_check_status": healthCheckStatus, } + if nodeCheck.Memory != nil { + fields["mem_connection_readers"] = nodeCheck.Memory.ConnectionReaders + fields["mem_connection_writers"] = nodeCheck.Memory.ConnectionWriters + fields["mem_connection_channels"] = nodeCheck.Memory.ConnectionChannels + fields["mem_connection_other"] = nodeCheck.Memory.ConnectionOther + fields["mem_queue_procs"] = nodeCheck.Memory.QueueProcs + fields["mem_queue_slave_procs"] = nodeCheck.Memory.QueueSlaveProcs + fields["mem_plugins"] = nodeCheck.Memory.Plugins + fields["mem_other_proc"] = nodeCheck.Memory.OtherProc + fields["mem_metrics"] = nodeCheck.Memory.Metrics + fields["mem_mgmt_db"] = nodeCheck.Memory.MgmtDb + fields["mem_mnesia"] = nodeCheck.Memory.Mnesia + fields["mem_other_ets"] = nodeCheck.Memory.OtherEts + fields["mem_binary"] = nodeCheck.Memory.Binary + fields["mem_msg_index"] = nodeCheck.Memory.MsgIndex + fields["mem_code"] = nodeCheck.Memory.Code + fields["mem_atom"] = nodeCheck.Memory.Atom + fields["mem_other_system"] = nodeCheck.Memory.OtherSystem + fields["mem_allocated_unused"] = nodeCheck.Memory.AllocatedUnused + fields["mem_reserved_unallocated"] = nodeCheck.Memory.ReservedUnallocated + fields["mem_total"] = nodeCheck.Memory.Total + } acc.AddFields("rabbitmq_node", fields, tags, now) } } diff --git a/plugins/inputs/rabbitmq/rabbitmq_test.go b/plugins/inputs/rabbitmq/rabbitmq_test.go index 0f98f95ce..9d35718d9 100644 --- a/plugins/inputs/rabbitmq/rabbitmq_test.go +++ b/plugins/inputs/rabbitmq/rabbitmq_test.go @@ -6,10 +6,11 @@ import ( "net/http/httptest" "testing" + "io/ioutil" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "io/ioutil" ) func TestRabbitMQGeneratesMetrics(t *testing.T) { @@ -27,6 +28,8 @@ func TestRabbitMQGeneratesMetrics(t *testing.T) { jsonFilePath = "testdata/exchanges.json" case "/api/healthchecks/node/rabbit@vagrant-ubuntu-trusty-64": jsonFilePath = "testdata/healthchecks.json" + case "/api/nodes/rabbit@vagrant-ubuntu-trusty-64/memory": + jsonFilePath = "testdata/memory.json" default: panic("Cannot handle request") } @@ -129,6 +132,26 @@ func TestRabbitMQGeneratesMetrics(t *testing.T) { "io_write_avg_time_rate": 4.32, "io_write_bytes": 823, "io_write_bytes_rate": 32.8, + "mem_connection_readers": 1234, + "mem_connection_writers": 5678, + "mem_connection_channels": 1133, + "mem_connection_other": 2840, + "mem_queue_procs": 2840, + "mem_queue_slave_procs": 0, + "mem_plugins": 1755976, + "mem_other_proc": 23056584, + "mem_metrics": 196536, + "mem_mgmt_db": 491272, + "mem_mnesia": 115600, + "mem_other_ets": 2121872, + "mem_binary": 418848, + "mem_msg_index": 42848, + "mem_code": 25179322, + "mem_atom": 1041593, + "mem_other_system": 14741981, + "mem_allocated_unused": 38208528, + "mem_reserved_unallocated": 0, + "mem_total": 83025920, } compareMetrics(t, nodeMetrics, acc, "rabbitmq_node") diff --git a/plugins/inputs/rabbitmq/testdata/memory.json b/plugins/inputs/rabbitmq/testdata/memory.json new file mode 100644 index 000000000..da252eb61 --- /dev/null +++ b/plugins/inputs/rabbitmq/testdata/memory.json @@ -0,0 +1,24 @@ +{ + "memory": { + "connection_readers": 1234, + "connection_writers": 5678, + "connection_channels": 1133, + "connection_other": 2840, + "queue_procs": 2840, + "queue_slave_procs": 0, + "plugins": 1755976, + "other_proc": 23056584, + "metrics": 196536, + "mgmt_db": 491272, + "mnesia": 115600, + "other_ets": 2121872, + "binary": 418848, + "msg_index": 42848, + "code": 25179322, + "atom": 1041593, + "other_system": 14741981, + "allocated_unused": 38208528, + "reserved_unallocated": 0, + "total": 83025920 + } +} \ No newline at end of file From e8a119b07b7c2094fa8609a8747dc2376f98bc31 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 29 Aug 2019 17:14:21 -0700 Subject: [PATCH 1159/1815] Update changelog --- CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index d91b574b3..bbca6cf19 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,10 @@ - [azure_storage_queue](/plugins/inputs/azure_storage_queue/README.md) - Contributed by @mjiderhamn +#### Features + +- [#6326](https://github.com/influxdata/telegraf/pull/5842): Add per node memory stats to rabbitmq input. + ## v1.12 [unreleased] #### Release Notes From 9463b894f27a22a53df1d3761791f7af2dcb8c57 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 3 Sep 2019 10:58:37 -0700 Subject: [PATCH 1160/1815] Fix links in changelog --- CHANGELOG.md | 4 ++-- plugins/parsers/form_urlencoded/README.md | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index bbca6cf19..78a5dda27 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -25,11 +25,11 @@ - [logstash](/plugins/inputs/logstash/README.md) - Contributed by @lkmcs @dmitryilyin @arkady-emelyanov - [marklogic](/plugins/inputs/marklogic/README.md) - Contributed by @influxdata - [openntpd](/plugins/inputs/openntpd/README.md) - Contributed by @aromeyer -- [uwsgi](/plugins/inputs/uswgi/README.md) - Contributed by @blaggacao +- [uwsgi](/plugins/inputs/uwsgi/README.md) - Contributed by @blaggacao #### New Parsers -- [form_urlencoded](/plugins/processors/form_urlencoded/README.md) - Contributed by @byonchev +- [form_urlencoded](/plugins/parsers/form_urlencoded/README.md) - Contributed by @byonchev #### New Processors diff --git a/plugins/parsers/form_urlencoded/README.md b/plugins/parsers/form_urlencoded/README.md index 0a07b7b99..e3700f44e 100644 --- a/plugins/parsers/form_urlencoded/README.md +++ b/plugins/parsers/form_urlencoded/README.md @@ -53,5 +53,5 @@ Output: mymetric,tag1=foo field1=0.42,field2=42 ``` -[query_string]: https://en.wikipedia.org/wiki/Query_string +[query string]: https://en.wikipedia.org/wiki/Query_string [http_listener_v2]: /plugins/inputs/http_listener_v2 From c8f8a42035ce40a987cd04c8b1c7ba7ae6322fec Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 3 Sep 2019 11:17:34 -0700 Subject: [PATCH 1161/1815] Set 1.12.0 release date --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 78a5dda27..09c86989b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,7 +8,7 @@ - [#6326](https://github.com/influxdata/telegraf/pull/5842): Add per node memory stats to rabbitmq input. -## v1.12 [unreleased] +## v1.12 [2019-09-03] #### Release Notes From 11d40a9f0afd048ec75173387d00027d4bb8723c Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 4 Sep 2019 19:05:39 -0700 Subject: [PATCH 1162/1815] Switch to fork of sarama without zstd dependency (#6349) --- Gopkg.lock | 15 ++++----------- Gopkg.toml | 3 ++- 2 files changed, 6 insertions(+), 12 deletions(-) diff --git a/Gopkg.lock b/Gopkg.lock index c38cbb506..aa6068ebb 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -77,14 +77,6 @@ revision = "1f7cd6cfe0adea687ad44a512dfe76140f804318" version = "v10.12.0" -[[projects]] - digest = "1:82041ab48e5c76da656b723fdc13a2b9ec716cdc736f82adaac77f5c39d4fca8" - name = "github.com/DataDog/zstd" - packages = ["."] - pruneopts = "" - revision = "2347a397da4ee9c6b8226d4aff82c302d0e52773" - version = "v1.4.1" - [[projects]] branch = "master" digest = "1:298712a3ee36b59c3ca91f4183bd75d174d5eaa8b4aed5072831f126e2e752f6" @@ -105,12 +97,12 @@ version = "v0.4.9" [[projects]] - digest = "1:5dd52495eaf9fad11f4742f341166aa9eb68f70061fc1a9b546f9481b284b6d8" + digest = "1:322bf7f4bb312294fc551f6e2c82d02f2ab8f94920f4163b3deeb07a8141ac79" name = "github.com/Shopify/sarama" packages = ["."] pruneopts = "" - revision = "46c83074a05474240f9620fb7c70fb0d80ca401a" - version = "v1.23.1" + revision = "b12709e6ca29240128c89fe0b30b6a76be42b457" + source = "https://github.com/influxdata/sarama.git" [[projects]] digest = "1:f82b8ac36058904227087141017bb82f4b0fc58272990a4cdae3e2d6d222644e" @@ -1200,6 +1192,7 @@ name = "github.com/vmware/govmomi" packages = [ ".", + "find", "list", "nfc", "object", diff --git a/Gopkg.toml b/Gopkg.toml index 6848b947f..85eb7659a 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -148,7 +148,8 @@ [[constraint]] name = "github.com/Shopify/sarama" - version = "1.18.0" + revision = "b12709e6ca29240128c89fe0b30b6a76be42b457" + source = "https://github.com/influxdata/sarama.git" [[constraint]] name = "github.com/soniah/gosnmp" From be7c71a39bd07a97303daf52a4abb5c9529f3d27 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 4 Sep 2019 19:07:57 -0700 Subject: [PATCH 1163/1815] Update changelog --- CHANGELOG.md | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 09c86989b..f79986901 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,12 @@ - [#6326](https://github.com/influxdata/telegraf/pull/5842): Add per node memory stats to rabbitmq input. +## v1.12.1 [unreleased] + +#### Bugfixes + +- [#6344](https://github.com/influxdata/telegraf/pull/6344): Fix depends on GLIBC_2.14 symbol version. + ## v1.12 [2019-09-03] #### Release Notes @@ -111,6 +117,8 @@ ## v1.11.5 [2019-08-27] +#### Bugfixes + - [#6250](https://github.com/influxdata/telegraf/pull/6250): Update go-sql-driver/mysql driver to 1.4.1 to address auth issues. - [#6279](https://github.com/influxdata/telegraf/issues/6279): Return error status from --test if input plugins produce an error. - [#6309](https://github.com/influxdata/telegraf/issues/6309): Fix with multiple instances only last configuration is used in smart input. From 76e7b57fcd1cd3cf61891020b94ec7ffa7407265 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 6 Sep 2019 12:35:33 -0700 Subject: [PATCH 1164/1815] Fix could not mark message delivered error in kafka_consumer (#6363) --- plugins/inputs/kafka_consumer/kafka_consumer.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/inputs/kafka_consumer/kafka_consumer.go b/plugins/inputs/kafka_consumer/kafka_consumer.go index 2703bb52d..997988ca6 100644 --- a/plugins/inputs/kafka_consumer/kafka_consumer.go +++ b/plugins/inputs/kafka_consumer/kafka_consumer.go @@ -360,8 +360,8 @@ func (h *ConsumerGroupHandler) Handle(session sarama.ConsumerGroupSession, msg * } } - id := h.acc.AddTrackingMetricGroup(metrics) h.mu.Lock() + id := h.acc.AddTrackingMetricGroup(metrics) h.undelivered[id] = Message{session: session, message: msg} h.mu.Unlock() return nil From 7d2cffe0566a8500c819d9cf22db575024a2a882 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 6 Sep 2019 12:37:17 -0700 Subject: [PATCH 1165/1815] Convert check state to an integer in icinga2 input (#6333) --- plugins/inputs/icinga2/icinga2.go | 16 +-- plugins/inputs/icinga2/icinga2_test.go | 141 ++++++++++++++----------- 2 files changed, 91 insertions(+), 66 deletions(-) diff --git a/plugins/inputs/icinga2/icinga2.go b/plugins/inputs/icinga2/icinga2.go index 37590ab8b..82120da2c 100644 --- a/plugins/inputs/icinga2/icinga2.go +++ b/plugins/inputs/icinga2/icinga2.go @@ -38,10 +38,10 @@ type Object struct { } type Attribute struct { - CheckCommand string `json:"check_command"` - DisplayName string `json:"display_name"` - Name string `json:"name"` - State int `json:"state"` + CheckCommand string `json:"check_command"` + DisplayName string `json:"display_name"` + Name string `json:"name"` + State float64 `json:"state"` } var levels = []string{"ok", "warning", "critical", "unknown"} @@ -51,7 +51,7 @@ type ObjectType string var sampleConfig = ` ## Required Icinga2 server address (default: "https://localhost:5665") # server = "https://localhost:5665" - + ## Required Icinga2 object type ("services" or "hosts, default "services") # object_type = "services" @@ -88,12 +88,14 @@ func (i *Icinga2) GatherStatus(acc telegraf.Accumulator, checks []Object) { log.Fatal(err) } + state := int64(check.Attrs.State) + fields["name"] = check.Attrs.Name - fields["state_code"] = check.Attrs.State + fields["state_code"] = state tags["display_name"] = check.Attrs.DisplayName tags["check_command"] = check.Attrs.CheckCommand - tags["state"] = levels[check.Attrs.State] + tags["state"] = levels[state] tags["source"] = url.Hostname() tags["scheme"] = url.Scheme tags["port"] = url.Port() diff --git a/plugins/inputs/icinga2/icinga2_test.go b/plugins/inputs/icinga2/icinga2_test.go index ad9268347..e62a8d423 100644 --- a/plugins/inputs/icinga2/icinga2_test.go +++ b/plugins/inputs/icinga2/icinga2_test.go @@ -3,89 +3,112 @@ package icinga2 import ( "encoding/json" "testing" + "time" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" ) func TestGatherServicesStatus(t *testing.T) { - - s := `{"results":[ - { - "attrs": { - "check_command": "check-bgp-juniper-netconf", - "display_name": "eq-par.dc2.fr", - "name": "ef017af8-c684-4f3f-bb20-0dfe9fcd3dbe", - "state": 0 - }, - "joins": {}, - "meta": {}, - "name": "eq-par.dc2.fr!ef017af8-c684-4f3f-bb20-0dfe9fcd3dbe", - "type": "Service" - } - ]}` + s := `{ + "results": [ + { + "attrs": { + "check_command": "check-bgp-juniper-netconf", + "display_name": "eq-par.dc2.fr", + "name": "ef017af8-c684-4f3f-bb20-0dfe9fcd3dbe", + "state": 0 + }, + "joins": {}, + "meta": {}, + "name": "eq-par.dc2.fr!ef017af8-c684-4f3f-bb20-0dfe9fcd3dbe", + "type": "Service" + } + ] +} +` checks := Result{} json.Unmarshal([]byte(s), &checks) - fields := map[string]interface{}{ - "name": "ef017af8-c684-4f3f-bb20-0dfe9fcd3dbe", - "state_code": 0, - } - tags := map[string]string{ - "display_name": "eq-par.dc2.fr", - "check_command": "check-bgp-juniper-netconf", - "state": "ok", - "source": "localhost", - "port": "5665", - "scheme": "https", - } - - var acc testutil.Accumulator icinga2 := new(Icinga2) icinga2.ObjectType = "services" icinga2.Server = "https://localhost:5665" + + var acc testutil.Accumulator icinga2.GatherStatus(&acc, checks.Results) - acc.AssertContainsTaggedFields(t, "icinga2_services", fields, tags) + + expected := []telegraf.Metric{ + testutil.MustMetric( + "icinga2_services", + map[string]string{ + "display_name": "eq-par.dc2.fr", + "check_command": "check-bgp-juniper-netconf", + "state": "ok", + "source": "localhost", + "port": "5665", + "scheme": "https", + }, + map[string]interface{}{ + "name": "ef017af8-c684-4f3f-bb20-0dfe9fcd3dbe", + "state_code": 0, + }, + time.Unix(0, 0), + ), + } + + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime()) } func TestGatherHostsStatus(t *testing.T) { - - s := `{"results":[ - { - "attrs": { - "name": "webserver", - "address": "192.168.1.1", - "check_command": "ping", - "display_name": "apache", - "state": 2 - }, - "joins": {}, - "meta": {}, - "name": "webserver", - "type": "Host" - } - ]}` + s := `{ + "results": [ + { + "attrs": { + "address": "192.168.1.1", + "check_command": "ping", + "display_name": "apache", + "name": "webserver", + "state": 2.0 + }, + "joins": {}, + "meta": {}, + "name": "webserver", + "type": "Host" + } + ] +} +` checks := Result{} json.Unmarshal([]byte(s), &checks) - fields := map[string]interface{}{ - "name": "webserver", - "state_code": 2, - } - tags := map[string]string{ - "display_name": "apache", - "check_command": "ping", - "state": "critical", - "source": "localhost", - "port": "5665", - "scheme": "https", - } var acc testutil.Accumulator icinga2 := new(Icinga2) icinga2.ObjectType = "hosts" icinga2.Server = "https://localhost:5665" + icinga2.GatherStatus(&acc, checks.Results) - acc.AssertContainsTaggedFields(t, "icinga2_hosts", fields, tags) + + expected := []telegraf.Metric{ + testutil.MustMetric( + "icinga2_hosts", + map[string]string{ + "display_name": "apache", + "check_command": "ping", + "state": "critical", + "source": "localhost", + "port": "5665", + "scheme": "https", + }, + map[string]interface{}{ + "name": "webserver", + "state_code": 2, + }, + time.Unix(0, 0), + ), + } + + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime()) } From 7ac5dc541683ecf86fa5dbe8100a1bea847948fc Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 6 Sep 2019 12:38:20 -0700 Subject: [PATCH 1166/1815] Fix filecount for paths with trailing slash (#6332) --- plugins/inputs/filecount/filecount.go | 4 +-- plugins/inputs/filecount/filecount_test.go | 33 ++++++++++++++++++++++ 2 files changed, 35 insertions(+), 2 deletions(-) diff --git a/plugins/inputs/filecount/filecount.go b/plugins/inputs/filecount/filecount.go index 929ec66a7..965f41d2c 100644 --- a/plugins/inputs/filecount/filecount.go +++ b/plugins/inputs/filecount/filecount.go @@ -267,11 +267,11 @@ func (fc *FileCount) onlyDirectories(directories []string) []string { func (fc *FileCount) getDirs() []string { dirs := make([]string, len(fc.Directories)) for i, dir := range fc.Directories { - dirs[i] = dir + dirs[i] = filepath.Clean(dir) } if fc.Directory != "" { - dirs = append(dirs, fc.Directory) + dirs = append(dirs, filepath.Clean(fc.Directory)) } return dirs diff --git a/plugins/inputs/filecount/filecount_test.go b/plugins/inputs/filecount/filecount_test.go index 99213104b..9cd7c747c 100644 --- a/plugins/inputs/filecount/filecount_test.go +++ b/plugins/inputs/filecount/filecount_test.go @@ -2,11 +2,13 @@ package filecount import ( "os" + "path/filepath" "runtime" "strings" "testing" "time" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" @@ -117,6 +119,37 @@ func TestMTimeFilter(t *testing.T) { fileCountEquals(t, fc, len(matches), 0) } +// Paths with a trailing slash will not exactly match paths produced during the +// walk as these paths are cleaned before being returned from godirwalk. #6329 +func TestDirectoryWithTrailingSlash(t *testing.T) { + plugin := &FileCount{ + Directories: []string{getTestdataDir() + string(filepath.Separator)}, + Name: "*", + Recursive: true, + Fs: getFakeFileSystem(getTestdataDir()), + } + + var acc testutil.Accumulator + err := plugin.Gather(&acc) + require.NoError(t, err) + + expected := []telegraf.Metric{ + testutil.MustMetric( + "filecount", + map[string]string{ + "directory": getTestdataDir(), + }, + map[string]interface{}{ + "count": 9, + "size_bytes": 5096, + }, + time.Unix(0, 0), + ), + } + + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime()) +} + func getNoFilterFileCount() FileCount { return FileCount{ Directories: []string{getTestdataDir()}, From 9e5bd8cc150617e965fb9f7a5d0fa8a4d7c9d261 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 6 Sep 2019 12:42:07 -0700 Subject: [PATCH 1167/1815] Update changelog --- CHANGELOG.md | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f79986901..bd8338fd6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,7 +12,10 @@ #### Bugfixes -- [#6344](https://github.com/influxdata/telegraf/pull/6344): Fix depends on GLIBC_2.14 symbol version. +- [#6344](https://github.com/influxdata/telegraf/issues/6344): Fix depends on GLIBC_2.14 symbol version. +- [#6329](https://github.com/influxdata/telegraf/issues/6329): Fix filecount for paths with trailing slash. +- [#6331](https://github.com/influxdata/telegraf/issues/6331): Convert check state to an integer in icinga2 input. +- [#6354](https://github.com/influxdata/telegraf/issues/6354): Fix could not mark message delivered error in kafka_consumer. ## v1.12 [2019-09-03] From 23cddef30a737cdda6585a2781079b571e97a355 Mon Sep 17 00:00:00 2001 From: Mitchell Bradd Date: Tue, 10 Sep 2019 03:23:44 +1000 Subject: [PATCH 1168/1815] Fix broken exec output link in README (#6371) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 2616d920d..f618daa4e 100644 --- a/README.md +++ b/README.md @@ -371,7 +371,7 @@ For documentation on the latest development code see the [documentation index][d * [datadog](./plugins/outputs/datadog) * [discard](./plugins/outputs/discard) * [elasticsearch](./plugins/outputs/elasticsearch) -* [exec](./plugins/output/exec) +* [exec](./plugins/outputs/exec) * [file](./plugins/outputs/file) * [graphite](./plugins/outputs/graphite) * [graylog](./plugins/outputs/graylog) From a4078da648a57847d628f6abc730997ceca55666 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 9 Sep 2019 15:55:46 -0700 Subject: [PATCH 1169/1815] Skip collection stats when disabled in mongodb input (#6364) --- plugins/inputs/mongodb/README.md | 2 ++ plugins/inputs/mongodb/mongodb.go | 5 +++- plugins/inputs/mongodb/mongodb_server.go | 10 +++++-- plugins/inputs/mongodb/mongostat.go | 35 ++++++++++++------------ 4 files changed, 31 insertions(+), 21 deletions(-) diff --git a/plugins/inputs/mongodb/README.md b/plugins/inputs/mongodb/README.md index e1e4988e8..5772f4fc3 100644 --- a/plugins/inputs/mongodb/README.md +++ b/plugins/inputs/mongodb/README.md @@ -13,8 +13,10 @@ ## When true, collect per database stats # gather_perdb_stats = false + ## When true, collect per collection stats # gather_col_stats = false + ## List of db where collections stats are collected ## If empty, all db are concerned # col_stats_dbs = ["local"] diff --git a/plugins/inputs/mongodb/mongodb.go b/plugins/inputs/mongodb/mongodb.go index 696d595e6..14fcce12e 100644 --- a/plugins/inputs/mongodb/mongodb.go +++ b/plugins/inputs/mongodb/mongodb.go @@ -42,8 +42,10 @@ var sampleConfig = ` ## When true, collect per database stats # gather_perdb_stats = false + ## When true, collect per collection stats # gather_col_stats = false + ## List of db where collections stats are collected ## If empty, all db are concerned # col_stats_dbs = ["local"] @@ -177,7 +179,8 @@ func (m *MongoDB) gatherServer(server *Server, acc telegraf.Accumulator) error { func init() { inputs.Add("mongodb", func() telegraf.Input { return &MongoDB{ - mongos: make(map[string]*Server), + ColStatsDbs: []string{"local"}, + mongos: make(map[string]*Server), } }) } diff --git a/plugins/inputs/mongodb/mongodb_server.go b/plugins/inputs/mongodb/mongodb_server.go index 404fa8143..e6e66a2a4 100644 --- a/plugins/inputs/mongodb/mongodb_server.go +++ b/plugins/inputs/mongodb/mongodb_server.go @@ -227,9 +227,13 @@ func (s *Server) gatherData(acc telegraf.Accumulator, gatherDbStats bool, gather authLogLevel(err), err) } - collectionStats, err := s.gatherCollectionStats(colStatsDbs) - if err != nil { - return err + var collectionStats *ColStats + if gatherColStats { + stats, err := s.gatherCollectionStats(colStatsDbs) + if err != nil { + return err + } + collectionStats = stats } dbStats := &DbStats{} diff --git a/plugins/inputs/mongodb/mongostat.go b/plugins/inputs/mongodb/mongostat.go index d82100974..44c071a2f 100644 --- a/plugins/inputs/mongodb/mongostat.go +++ b/plugins/inputs/mongodb/mongostat.go @@ -961,24 +961,25 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec } } - newColStats := *newMongo.ColStats - for _, col := range newColStats.Collections { - colStatsData := col.ColStatsData - // mongos doesn't have the db key, so setting the db name - if colStatsData.Collection == "" { - colStatsData.Collection = col.Name + if newMongo.ColStats != nil { + for _, col := range newMongo.ColStats.Collections { + colStatsData := col.ColStatsData + // mongos doesn't have the db key, so setting the db name + if colStatsData.Collection == "" { + colStatsData.Collection = col.Name + } + colStatLine := &ColStatLine{ + Name: colStatsData.Collection, + DbName: col.DbName, + Count: colStatsData.Count, + Size: colStatsData.Size, + AvgObjSize: colStatsData.AvgObjSize, + StorageSize: colStatsData.StorageSize, + TotalIndexSize: colStatsData.TotalIndexSize, + Ok: colStatsData.Ok, + } + returnVal.ColStatsLines = append(returnVal.ColStatsLines, *colStatLine) } - colStatLine := &ColStatLine{ - Name: colStatsData.Collection, - DbName: col.DbName, - Count: colStatsData.Count, - Size: colStatsData.Size, - AvgObjSize: colStatsData.AvgObjSize, - StorageSize: colStatsData.StorageSize, - TotalIndexSize: colStatsData.TotalIndexSize, - Ok: colStatsData.Ok, - } - returnVal.ColStatsLines = append(returnVal.ColStatsLines, *colStatLine) } // Set shard stats From f041ace3655191bfa51984f2b2b031fd4966013a Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 9 Sep 2019 15:58:20 -0700 Subject: [PATCH 1170/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index bd8338fd6..50a2887ac 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,6 +16,7 @@ - [#6329](https://github.com/influxdata/telegraf/issues/6329): Fix filecount for paths with trailing slash. - [#6331](https://github.com/influxdata/telegraf/issues/6331): Convert check state to an integer in icinga2 input. - [#6354](https://github.com/influxdata/telegraf/issues/6354): Fix could not mark message delivered error in kafka_consumer. +- [#6362](https://github.com/influxdata/telegraf/issues/6362): Skip collection stats when disabled in mongodb input. ## v1.12 [2019-09-03] From 5dcd0daa4295d8fec49c0ad34487835af58c5d18 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 9 Sep 2019 17:50:46 -0700 Subject: [PATCH 1171/1815] Document the pros and cons of increasing the metric_buffer_limit --- docs/CONFIGURATION.md | 4 +++- internal/config/config.go | 4 +++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/docs/CONFIGURATION.md b/docs/CONFIGURATION.md index 071a92f1c..36feac791 100644 --- a/docs/CONFIGURATION.md +++ b/docs/CONFIGURATION.md @@ -112,7 +112,9 @@ The agent table configures Telegraf and the defaults used across all plugins. This controls the size of writes that Telegraf sends to output plugins. - **metric_buffer_limit**: - Maximum number of unwritten metrics per output. + Maximum number of unwritten metrics per output. Increasing this value + allows for longer periods of output downtime without dropping metrics at the + cost of higher maximum memory usage. - **collection_jitter**: Collection jitter is used to jitter the collection by a random [interval][]. diff --git a/internal/config/config.go b/internal/config/config.go index 43866a32e..d7fe11427 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -257,7 +257,9 @@ var agentConfig = ` ## This controls the size of writes that Telegraf sends to output plugins. metric_batch_size = 1000 - ## Maximum number of unwritten metrics per output. + ## Maximum number of unwritten metrics per output. Increasing this value + ## allows for longer periods of output downtime without dropping metrics at the + ## cost of higher maximum memory usage. metric_buffer_limit = 10000 ## Collection jitter is used to jitter the collection by a random amount. From cd99ceea62b370d8c8f692386c11a094788315ac Mon Sep 17 00:00:00 2001 From: Russ Savage Date: Tue, 10 Sep 2019 10:55:39 -0700 Subject: [PATCH 1172/1815] Fixing spelling in bug template (#6374) --- .github/ISSUE_TEMPLATE/Bug_report.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/ISSUE_TEMPLATE/Bug_report.md b/.github/ISSUE_TEMPLATE/Bug_report.md index 49cfdefe3..188df248e 100644 --- a/.github/ISSUE_TEMPLATE/Bug_report.md +++ b/.github/ISSUE_TEMPLATE/Bug_report.md @@ -5,7 +5,7 @@ about: Create a report to help us improve --- ### Relevant telegraf.conf: - + ```toml ``` From 15dd43344dcce709c5acb5faac801b8463e5bd81 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 10 Sep 2019 11:04:24 -0700 Subject: [PATCH 1173/1815] Fix error reading closed response body on redirect (#6372) --- plugins/inputs/http_response/http_response.go | 13 +-- .../http_response/http_response_test.go | 84 +++++++++++-------- 2 files changed, 53 insertions(+), 44 deletions(-) diff --git a/plugins/inputs/http_response/http_response.go b/plugins/inputs/http_response/http_response.go index acab62b94..b863190d7 100644 --- a/plugins/inputs/http_response/http_response.go +++ b/plugins/inputs/http_response/http_response.go @@ -141,7 +141,7 @@ func (h *HTTPResponse) createHttpClient() (*http.Client, error) { if h.FollowRedirects == false { client.CheckRedirect = func(req *http.Request, via []*http.Request) error { - return ErrRedirectAttempted + return http.ErrUseLastResponse } } return client, nil @@ -254,16 +254,7 @@ func (h *HTTPResponse) httpGather(u string) (map[string]interface{}, map[string] // Any error not recognized by `set_error` is considered a "connection_failed" setResult("connection_failed", fields, tags) - - // If the error is a redirect we continue processing and log the HTTP code - urlError, isUrlError := err.(*url.Error) - if !h.FollowRedirects && isUrlError && urlError.Err == ErrRedirectAttempted { - err = nil - } else { - // If the error isn't a timeout or a redirect stop - // processing the request - return fields, tags, nil - } + return fields, tags, nil } if _, ok := fields["response_time"]; !ok { diff --git a/plugins/inputs/http_response/http_response_test.go b/plugins/inputs/http_response/http_response_test.go index 44563973b..5ba586c59 100644 --- a/plugins/inputs/http_response/http_response_test.go +++ b/plugins/inputs/http_response/http_response_test.go @@ -10,9 +10,9 @@ import ( "testing" "time" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -36,6 +36,7 @@ func checkAbsentTags(t *testing.T, tags []string, acc *testutil.Accumulator) { // Receives a dictionary and with expected fields and their values. If a value is nil, it will only check // that the field exists, but not its contents func checkFields(t *testing.T, fields map[string]interface{}, acc *testutil.Accumulator) { + t.Helper() for key, field := range fields { switch v := field.(type) { case int: @@ -121,6 +122,7 @@ func setUpTestMux() http.Handler { } func checkOutput(t *testing.T, acc *testutil.Accumulator, presentFields map[string]interface{}, presentTags map[string]interface{}, absentFields []string, absentTags []string) { + t.Helper() if presentFields != nil { checkFields(t, presentFields, acc) } @@ -651,12 +653,11 @@ func TestTimeout(t *testing.T) { checkOutput(t, &acc, expectedFields, expectedTags, absentFields, absentTags) } -func TestPluginErrors(t *testing.T) { +func TestBadRegex(t *testing.T) { mux := setUpTestMux() ts := httptest.NewServer(mux) defer ts.Close() - // Bad regex test. Should return an error and return nothing h := &HTTPResponse{ Address: ts.URL + "/good", Body: "{ 'test': 'data'}", @@ -676,36 +677,6 @@ func TestPluginErrors(t *testing.T) { absentFields := []string{"http_response_code", "response_time", "content_length", "response_string_match", "result_type", "result_code"} absentTags := []string{"status_code", "result", "server", "method"} checkOutput(t, &acc, nil, nil, absentFields, absentTags) - - // Attempt to read empty body test - h = &HTTPResponse{ - Address: ts.URL + "/redirect", - Body: "", - Method: "GET", - ResponseStringMatch: ".*", - ResponseTimeout: internal.Duration{Duration: time.Second * 20}, - FollowRedirects: false, - } - - acc = testutil.Accumulator{} - err = h.Gather(&acc) - require.NoError(t, err) - - expectedFields := map[string]interface{}{ - "http_response_code": http.StatusMovedPermanently, - "response_string_match": 0, - "result_type": "body_read_error", - "result_code": 2, - "response_time": nil, - "content_length": nil, - } - expectedTags := map[string]interface{}{ - "server": nil, - "method": "GET", - "status_code": "301", - "result": "body_read_error", - } - checkOutput(t, &acc, expectedFields, expectedTags, nil, nil) } func TestNetworkErrors(t *testing.T) { @@ -827,3 +798,50 @@ func TestContentLength(t *testing.T) { absentFields = []string{"response_string_match"} checkOutput(t, &acc, expectedFields, expectedTags, absentFields, nil) } + +func TestRedirect(t *testing.T) { + ts := httptest.NewServer(http.NotFoundHandler()) + defer ts.Close() + + ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Add("Location", "http://example.org") + w.WriteHeader(http.StatusMovedPermanently) + w.Write([]byte("test")) + }) + + plugin := &HTTPResponse{ + URLs: []string{ts.URL}, + ResponseStringMatch: "test", + } + + var acc testutil.Accumulator + err := plugin.Gather(&acc) + require.NoError(t, err) + + expected := []telegraf.Metric{ + testutil.MustMetric( + "http_response", + map[string]string{ + "server": ts.URL, + "method": "GET", + "result": "success", + "status_code": "301", + }, + map[string]interface{}{ + "result_code": 0, + "result_type": "success", + "http_response_code": 301, + "response_string_match": 1, + "content_length": 4, + }, + time.Unix(0, 0), + ), + } + + actual := acc.GetTelegrafMetrics() + for _, m := range actual { + m.RemoveField("response_time") + } + + testutil.RequireMetricsEqual(t, expected, actual, testutil.IgnoreTime()) +} From d7c85768a40c818a2a46e29407ed69a79f3b0418 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 10 Sep 2019 11:07:13 -0700 Subject: [PATCH 1174/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 50a2887ac..0dbe2c059 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,6 +17,7 @@ - [#6331](https://github.com/influxdata/telegraf/issues/6331): Convert check state to an integer in icinga2 input. - [#6354](https://github.com/influxdata/telegraf/issues/6354): Fix could not mark message delivered error in kafka_consumer. - [#6362](https://github.com/influxdata/telegraf/issues/6362): Skip collection stats when disabled in mongodb input. +- [#6366](https://github.com/influxdata/telegraf/issues/6366): Fix error reading closed response body on redirect in http_response. ## v1.12 [2019-09-03] From 88b60a2e9bc6fe363e7dbe2f7e2c82baf0b4c766 Mon Sep 17 00:00:00 2001 From: Greg <2653109+glinton@users.noreply.github.com> Date: Tue, 10 Sep 2019 12:46:47 -0600 Subject: [PATCH 1175/1815] Return error rather than default a retry value (#6376) --- plugins/outputs/influxdb_v2/http.go | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/plugins/outputs/influxdb_v2/http.go b/plugins/outputs/influxdb_v2/http.go index 47b736844..fbfdf6958 100644 --- a/plugins/outputs/influxdb_v2/http.go +++ b/plugins/outputs/influxdb_v2/http.go @@ -240,17 +240,28 @@ func (c *httpClient) writeBatch(ctx context.Context, bucket string, metrics []te return nil case http.StatusUnauthorized, http.StatusForbidden: return fmt.Errorf("failed to write metric: %s", desc) - case http.StatusTooManyRequests, http.StatusServiceUnavailable: + case http.StatusTooManyRequests: retryAfter := resp.Header.Get("Retry-After") retry, err := strconv.Atoi(retryAfter) if err != nil { - retry = 0 + return errors.New("rate limit exceeded") } if retry > defaultMaxWait { retry = defaultMaxWait } c.retryTime = time.Now().Add(time.Duration(retry) * time.Second) - return fmt.Errorf("Waiting %ds for server before sending metric again", retry) + return fmt.Errorf("waiting %ds for server before sending metric again", retry) + case http.StatusServiceUnavailable: + retryAfter := resp.Header.Get("Retry-After") + retry, err := strconv.Atoi(retryAfter) + if err != nil { + return errors.New("server responded: service unavailable") + } + if retry > defaultMaxWait { + retry = defaultMaxWait + } + c.retryTime = time.Now().Add(time.Duration(retry) * time.Second) + return fmt.Errorf("waiting %ds for server before sending metric again", retry) } // This is only until platform spec is fully implemented. As of the From 59c43b644fb4b86fe12283dd088e175117caa39b Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 10 Sep 2019 11:51:34 -0700 Subject: [PATCH 1176/1815] Fix apcupsd documentation to reflect actual plugin (#6377) --- plugins/inputs/apcupsd/README.md | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/plugins/inputs/apcupsd/README.md b/plugins/inputs/apcupsd/README.md index 52edeafe6..be79ab4a8 100644 --- a/plugins/inputs/apcupsd/README.md +++ b/plugins/inputs/apcupsd/README.md @@ -1,4 +1,4 @@ -# apcupsd Input Plugin +# APCUPSD Input Plugin This plugin reads data from an apcupsd daemon over its NIS network protocol. @@ -23,10 +23,10 @@ apcupsd should be installed and it's daemon should be running. - apcupsd - tags: - serial + - status (string representing the set status_flags) - ups_name - - status - fields: - - online + - status_flags ([status-bits][]) - input_voltage - load_percent - battery_charge_percent @@ -38,8 +38,11 @@ apcupsd should be installed and it's daemon should be running. - time_on_battery_ns + ### Example output ``` -apcupsd,serial=AS1231515,ups_name=name1,host=server1 time_on_battery=0,load_percent=9.7,time_left_minutes=98,output_voltage=230.4,internal_temp=32.4,battery_voltage=27.4,input_frequency=50.2,online=true,input_voltage=230.4,battery_charge_percent=100 1490035922000000000 +apcupsd,serial=AS1231515,status=ONLINE,ups_name=name1 time_on_battery=0,load_percent=9.7,time_left_minutes=98,output_voltage=230.4,internal_temp=32.4,battery_voltage=27.4,input_frequency=50.2,input_voltage=230.4,battery_charge_percent=100,status_flags=8i 1490035922000000000 ``` + +[status-bits]: http://www.apcupsd.org/manual/manual.html#status-bits From f4cad12dcb589f19046f9f900d357166fd7b8b56 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 10 Sep 2019 11:55:42 -0700 Subject: [PATCH 1177/1815] Update changelog --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0dbe2c059..ad9d84c36 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -18,6 +18,8 @@ - [#6354](https://github.com/influxdata/telegraf/issues/6354): Fix could not mark message delivered error in kafka_consumer. - [#6362](https://github.com/influxdata/telegraf/issues/6362): Skip collection stats when disabled in mongodb input. - [#6366](https://github.com/influxdata/telegraf/issues/6366): Fix error reading closed response body on redirect in http_response. +- [#6373](https://github.com/influxdata/telegraf/issues/6373): Fix apcupsd documentation to reflect plugin. +- [#6375](https://github.com/influxdata/telegraf/issues/6375): Display retry log message only when retry after is received. ## v1.12 [2019-09-03] From bae12dde1e2df495e647459e2d882a1f0604331b Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 10 Sep 2019 12:05:36 -0700 Subject: [PATCH 1178/1815] Set 1.12.1 release date --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index ad9d84c36..15665dc86 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,7 +8,7 @@ - [#6326](https://github.com/influxdata/telegraf/pull/5842): Add per node memory stats to rabbitmq input. -## v1.12.1 [unreleased] +## v1.12.1 [2019-09-10] #### Bugfixes From d717e8ea03812e67f787dd874392f4cd0c6fd89c Mon Sep 17 00:00:00 2001 From: Boris Popovschi Date: Thu, 12 Sep 2019 23:38:35 +0300 Subject: [PATCH 1179/1815] Add ability to read query from file to postgresql_extensible input (#6361) --- .../inputs/postgresql_extensible/README.md | 5 ++- .../postgresql_extensible.go | 36 ++++++++++++++++++- .../postgresql_extensible_test.go | 27 +++++++++++++- .../postgresql_extensible/testdata/test.sql | 1 + 4 files changed, 66 insertions(+), 3 deletions(-) create mode 100644 plugins/inputs/postgresql_extensible/testdata/test.sql diff --git a/plugins/inputs/postgresql_extensible/README.md b/plugins/inputs/postgresql_extensible/README.md index 29c5e36d8..4621e46b5 100644 --- a/plugins/inputs/postgresql_extensible/README.md +++ b/plugins/inputs/postgresql_extensible/README.md @@ -44,6 +44,9 @@ The example below has two queries are specified, with the following parameters: # Be careful that if the withdbname is set to false you don't have to define # the where clause (aka with the dbname) # + # The script option can be used to specify the .sql file path. + # If script and sqlquery options specified at same time, sqlquery will be used + # # the tagvalue field is used to define custom tags (separated by comas). # the query is expected to return columns which match the names of the # defined tags. The values in these columns must be of a string-type, @@ -61,7 +64,7 @@ The example below has two queries are specified, with the following parameters: withdbname=false tagvalue="" [[inputs.postgresql_extensible.query]] - sqlquery="SELECT * FROM pg_stat_bgwriter" + script="your_sql-filepath.sql" version=901 withdbname=false tagvalue="" diff --git a/plugins/inputs/postgresql_extensible/postgresql_extensible.go b/plugins/inputs/postgresql_extensible/postgresql_extensible.go index c2bcb7b60..05e57583f 100644 --- a/plugins/inputs/postgresql_extensible/postgresql_extensible.go +++ b/plugins/inputs/postgresql_extensible/postgresql_extensible.go @@ -3,7 +3,9 @@ package postgresql_extensible import ( "bytes" "fmt" + "io/ioutil" "log" + "os" "strings" // register in driver. @@ -25,6 +27,7 @@ type Postgresql struct { type query []struct { Sqlquery string + Script string Version int Withdbname bool Tagvalue string @@ -75,7 +78,10 @@ var sampleConfig = ` ## field is used to define custom tags (separated by commas) ## The optional "measurement" value can be used to override the default ## output measurement name ("postgresql"). - # + ## + ## The script option can be used to specify the .sql file path. + ## If script and sqlquery options specified at same time, sqlquery will be used + ## ## Structure : ## [[inputs.postgresql_extensible.query]] ## sqlquery string @@ -96,6 +102,19 @@ var sampleConfig = ` tagvalue="postgresql.stats" ` +func (p *Postgresql) Init() error { + var err error + for i := range p.Query { + if p.Query[i].Sqlquery == "" { + p.Query[i].Sqlquery, err = ReadQueryFromFile(p.Query[i].Script) + if err != nil { + return err + } + } + } + return nil +} + func (p *Postgresql) SampleConfig() string { return sampleConfig } @@ -108,6 +127,20 @@ func (p *Postgresql) IgnoredColumns() map[string]bool { return ignoredColumns } +func ReadQueryFromFile(filePath string) (string, error) { + file, err := os.Open(filePath) + if err != nil { + return "", err + } + defer file.Close() + + query, err := ioutil.ReadAll(file) + if err != nil { + return "", err + } + return string(query), err +} + func (p *Postgresql) Gather(acc telegraf.Accumulator) error { var ( err error @@ -131,6 +164,7 @@ func (p *Postgresql) Gather(acc telegraf.Accumulator) error { for i := range p.Query { sql_query = p.Query[i].Sqlquery tag_value = p.Query[i].Tagvalue + if p.Query[i].Measurement != "" { meas_name = p.Query[i].Measurement } else { diff --git a/plugins/inputs/postgresql_extensible/postgresql_extensible_test.go b/plugins/inputs/postgresql_extensible/postgresql_extensible_test.go index 1ed62a1cd..7fbc34302 100644 --- a/plugins/inputs/postgresql_extensible/postgresql_extensible_test.go +++ b/plugins/inputs/postgresql_extensible/postgresql_extensible_test.go @@ -25,7 +25,7 @@ func queryRunner(t *testing.T, q query) *testutil.Accumulator { } var acc testutil.Accumulator p.Start(&acc) - + p.Init() require.NoError(t, acc.GatherError(p.Gather)) return &acc } @@ -201,6 +201,31 @@ func TestPostgresqlFieldOutput(t *testing.T) { } } +func TestPostgresqlSqlScript(t *testing.T) { + q := query{{ + Script: "testdata/test.sql", + Version: 901, + Withdbname: false, + Tagvalue: "", + }} + p := &Postgresql{ + Service: postgresql.Service{ + Address: fmt.Sprintf( + "host=%s user=postgres sslmode=disable", + testutil.GetLocalHost(), + ), + IsPgBouncer: false, + }, + Databases: []string{"postgres"}, + Query: q, + } + var acc testutil.Accumulator + p.Start(&acc) + p.Init() + + require.NoError(t, acc.GatherError(p.Gather)) +} + func TestPostgresqlIgnoresUnwantedColumns(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") diff --git a/plugins/inputs/postgresql_extensible/testdata/test.sql b/plugins/inputs/postgresql_extensible/testdata/test.sql new file mode 100644 index 000000000..49ec02b25 --- /dev/null +++ b/plugins/inputs/postgresql_extensible/testdata/test.sql @@ -0,0 +1 @@ +select * from pg_stat_database \ No newline at end of file From a3df39c91e86cdac97a9fb6f1e1a7c803cd3be16 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 12 Sep 2019 13:39:25 -0700 Subject: [PATCH 1180/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 15665dc86..cd93e476d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ #### Features - [#6326](https://github.com/influxdata/telegraf/pull/5842): Add per node memory stats to rabbitmq input. +- [#6361](https://github.com/influxdata/telegraf/pull/6361): Add ability to read query from file to postgresql_extensible input. ## v1.12.1 [2019-09-10] From 57f58fdbcce39c9b57c711b321e630ebe61819aa Mon Sep 17 00:00:00 2001 From: Adam Flott Date: Thu, 12 Sep 2019 17:52:03 -0400 Subject: [PATCH 1181/1815] Add replication metrics to the redis input(#5921) --- plugins/inputs/redis/README.md | 10 ++++++ plugins/inputs/redis/redis.go | 53 ++++++++++++++++++++++++++++++ plugins/inputs/redis/redis_test.go | 36 ++++++++++++++++++-- 3 files changed, 97 insertions(+), 2 deletions(-) diff --git a/plugins/inputs/redis/README.md b/plugins/inputs/redis/README.md index 38d80b591..207a44750 100644 --- a/plugins/inputs/redis/README.md +++ b/plugins/inputs/redis/README.md @@ -126,6 +126,16 @@ Additionally the plugin also calculates the hit/miss ratio (keyspace\_hitrate) a - usec(int, mircoseconds) - usec_per_call(float, microseconds) +- redis_replication + - tags: + - replication_role + - replica_ip + - replica_port + - state (either "online", "wait_bgsave", or "send_bulk") + + - fields: + - lag(int, number) + - offset(int, number) ### Tags: diff --git a/plugins/inputs/redis/redis.go b/plugins/inputs/redis/redis.go index e89aabb40..715b553c9 100644 --- a/plugins/inputs/redis/redis.go +++ b/plugins/inputs/redis/redis.go @@ -6,6 +6,7 @@ import ( "io" "log" "net/url" + "regexp" "strconv" "strings" "sync" @@ -48,6 +49,8 @@ func (r *RedisClient) BaseTags() map[string]string { return tags } +var replicationSlaveMetricPrefix = regexp.MustCompile(`^slave\d+`) + var sampleConfig = ` ## specify servers via a url matching: ## [protocol://][:password]@address[:port] @@ -253,6 +256,12 @@ func gatherInfoOutput( gatherCommandstateLine(name, kline, acc, tags) continue } + if section == "Replication" && replicationSlaveMetricPrefix.MatchString(name) { + kline := strings.TrimSpace(parts[1]) + gatherReplicationLine(name, kline, acc, tags) + continue + } + metric = name } @@ -374,6 +383,50 @@ func gatherCommandstateLine( acc.AddFields("redis_cmdstat", fields, tags) } +// Parse the special Replication line +// Example: +// slave0:ip=127.0.0.1,port=7379,state=online,offset=4556468,lag=0 +// This line will only be visible when a node has a replica attached. +func gatherReplicationLine( + name string, + line string, + acc telegraf.Accumulator, + global_tags map[string]string, +) { + fields := make(map[string]interface{}) + tags := make(map[string]string) + for k, v := range global_tags { + tags[k] = v + } + + tags["replica_id"] = strings.TrimLeft(name, "slave") + tags["replication_role"] = "slave" + + parts := strings.Split(line, ",") + for _, part := range parts { + kv := strings.Split(part, "=") + if len(kv) != 2 { + continue + } + + switch kv[0] { + case "ip": + tags["replica_ip"] = kv[1] + case "port": + tags["replica_port"] = kv[1] + case "state": + tags[kv[0]] = kv[1] + default: + ival, err := strconv.ParseInt(kv[1], 10, 64) + if err == nil { + fields[kv[0]] = ival + } + } + } + + acc.AddFields("redis_replication", fields, tags) +} + func init() { inputs.Add("redis", func() telegraf.Input { return &Redis{} diff --git a/plugins/inputs/redis/redis_test.go b/plugins/inputs/redis/redis_test.go index b1c3ca3d3..e684225af 100644 --- a/plugins/inputs/redis/redis_test.go +++ b/plugins/inputs/redis/redis_test.go @@ -82,7 +82,7 @@ func TestRedis_ParseMetrics(t *testing.T) { "pubsub_channels": int64(0), "pubsub_patterns": int64(0), "latest_fork_usec": int64(0), - "connected_slaves": int64(0), + "connected_slaves": int64(2), "master_repl_offset": int64(0), "repl_backlog_active": int64(0), "repl_backlog_size": int64(1048576), @@ -134,6 +134,36 @@ func TestRedis_ParseMetrics(t *testing.T) { "usec_per_call": float64(990.0), } acc.AssertContainsTaggedFields(t, "redis_cmdstat", cmdstatCommandFields, cmdstatCommandTags) + + replicationTags := map[string]string{ + "host": "redis.net", + "replication_role": "slave", + "replica_id": "0", + "replica_ip": "127.0.0.1", + "replica_port": "7379", + "state": "online", + } + replicationFields := map[string]interface{}{ + "lag": int64(0), + "offset": int64(4556468), + } + + acc.AssertContainsTaggedFields(t, "redis_replication", replicationFields, replicationTags) + + replicationTags = map[string]string{ + "host": "redis.net", + "replication_role": "slave", + "replica_id": "1", + "replica_ip": "127.0.0.1", + "replica_port": "8379", + "state": "send_bulk", + } + replicationFields = map[string]interface{}{ + "lag": int64(1), + "offset": int64(0), + } + + acc.AssertContainsTaggedFields(t, "redis_replication", replicationFields, replicationTags) } const testOutput = `# Server @@ -209,7 +239,9 @@ latest_fork_usec:0 # Replication role:master -connected_slaves:0 +connected_slaves:2 +slave0:ip=127.0.0.1,port=7379,state=online,offset=4556468,lag=0 +slave1:ip=127.0.0.1,port=8379,state=send_bulk,offset=0,lag=1 master_replid:8c4d7b768b26826825ceb20ff4a2c7c54616350b master_replid2:0000000000000000000000000000000000000000 master_repl_offset:0 From acb6cd67be38b971c71373fe184ad0843bd8a016 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 12 Sep 2019 14:54:17 -0700 Subject: [PATCH 1182/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index cd93e476d..43a560b9c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,7 @@ - [#6326](https://github.com/influxdata/telegraf/pull/5842): Add per node memory stats to rabbitmq input. - [#6361](https://github.com/influxdata/telegraf/pull/6361): Add ability to read query from file to postgresql_extensible input. +- [#5921](https://github.com/influxdata/telegraf/pull/5921): Add replication metrics to the redis input. ## v1.12.1 [2019-09-10] From df288a0bb40dbbd96e6b02e00b59d7cf4d3bab9f Mon Sep 17 00:00:00 2001 From: Greg <2653109+glinton@users.noreply.github.com> Date: Mon, 16 Sep 2019 17:30:34 -0600 Subject: [PATCH 1183/1815] Keep boolean when listed in json_string_fields (#6400) --- plugins/parsers/json/parser.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/plugins/parsers/json/parser.go b/plugins/parsers/json/parser.go index 7606b7629..e2a2115b3 100644 --- a/plugins/parsers/json/parser.go +++ b/plugins/parsers/json/parser.go @@ -169,13 +169,11 @@ func (p *Parser) switchFieldToTag(tags map[string]string, fields map[string]inte //remove any additional string/bool values from fields for fk := range fields { switch fields[fk].(type) { - case string: + case string, bool: if p.stringFields != nil && p.stringFields.Match(fk) { continue } delete(fields, fk) - case bool: - delete(fields, fk) } } return tags, fields From 7167a23c527eb63e22cc0370e6e4c919ac8dc734 Mon Sep 17 00:00:00 2001 From: Greg <2653109+glinton@users.noreply.github.com> Date: Mon, 16 Sep 2019 17:31:37 -0600 Subject: [PATCH 1184/1815] Update apcupsd library (#6401) --- Gopkg.lock | 4 ++-- Gopkg.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Gopkg.lock b/Gopkg.lock index aa6068ebb..f954dd84a 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -822,11 +822,11 @@ version = "v1.0.1" [[projects]] - digest = "1:d4a3035a03b4612c714b993891c071706a64890e55ef64bcc42bc2b461cb2756" + digest = "1:d905825446d3547ebf8f58a4ff30c30439b39781b54d756f5ff3bf19765a3fdb" name = "github.com/mdlayher/apcupsd" packages = ["."] pruneopts = "" - revision = "2fe55d9e1d0704d3c6f03f69a1fd9ebe2aef9df1" + revision = "eb3dd99a75fe58389e357b732691320dcf706b5f" [[projects]] digest = "1:4c8d8358c45ba11ab7bb15df749d4df8664ff1582daead28bae58cf8cbe49890" diff --git a/Gopkg.toml b/Gopkg.toml index 85eb7659a..6ee73b7fb 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -223,7 +223,7 @@ [[constraint]] name = "github.com/mdlayher/apcupsd" - revision = "2fe55d9e1d0704d3c6f03f69a1fd9ebe2aef9df1" + revision = "eb3dd99a75fe58389e357b732691320dcf706b5f" [[constraint]] branch = "master" From 6dc61be6eb3b6ce982e919125578ca21b0914137 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 16 Sep 2019 16:32:14 -0700 Subject: [PATCH 1185/1815] Fix detection of layout timestamps (#6390) --- internal/internal.go | 148 ++++++++++++++++++++++----------- internal/internal_test.go | 140 +++++++++++++++++++++++++------ plugins/parsers/csv/parser.go | 2 +- plugins/parsers/json/parser.go | 2 +- 4 files changed, 215 insertions(+), 77 deletions(-) diff --git a/internal/internal.go b/internal/internal.go index 13c851a8d..af36460e3 100644 --- a/internal/internal.go +++ b/internal/internal.go @@ -13,7 +13,6 @@ import ( "math/big" "os" "os/exec" - "regexp" "runtime" "strconv" "strings" @@ -302,62 +301,115 @@ func CompressWithGzip(data io.Reader) (io.Reader, error) { return pipeReader, err } -// ParseTimestamp with no location provided parses a timestamp value as UTC -func ParseTimestamp(timestamp interface{}, format string) (time.Time, error) { - return ParseTimestampWithLocation(timestamp, format, "UTC") +// ParseTimestamp parses a Time according to the standard Telegraf options. +// These are generally displayed in the toml similar to: +// json_time_key= "timestamp" +// json_time_format = "2006-01-02T15:04:05Z07:00" +// json_timezone = "America/Los_Angeles" +// +// The format can be one of "unix", "unix_ms", "unix_us", "unix_ns", or a Go +// time layout suitable for time.Parse. +// +// When using the "unix" format, a optional fractional component is allowed. +// Specific unix time precisions cannot have a fractional component. +// +// Unix times may be an int64, float64, or string. When using a Go format +// string the timestamp must be a string. +// +// The location is a location string suitable for time.LoadLocation. Unix +// times do not use the location string, a unix time is always return in the +// UTC location. +func ParseTimestamp(format string, timestamp interface{}, location string) (time.Time, error) { + switch format { + case "unix", "unix_ms", "unix_us", "unix_ns": + return parseUnix(format, timestamp) + default: + if location == "" { + location = "UTC" + } + return parseTime(format, timestamp, location) + } } -// ParseTimestamp parses a timestamp value as a unix epoch of various precision. -// -// format = "unix": epoch is assumed to be in seconds and can come as number or string. Can have a decimal part. -// format = "unix_ms": epoch is assumed to be in milliseconds and can come as number or string. Cannot have a decimal part. -// format = "unix_us": epoch is assumed to be in microseconds and can come as number or string. Cannot have a decimal part. -// format = "unix_ns": epoch is assumed to be in nanoseconds and can come as number or string. Cannot have a decimal part. -func ParseTimestampWithLocation(timestamp interface{}, format string, location string) (time.Time, error) { - timeInt, timeFractional := int64(0), int64(0) +func parseUnix(format string, timestamp interface{}) (time.Time, error) { + integer, fractional, err := parseComponents(timestamp) + if err != nil { + return time.Unix(0, 0), err + } + switch strings.ToLower(format) { + case "unix": + return time.Unix(integer, fractional).UTC(), nil + case "unix_ms": + return time.Unix(0, integer*1e6).UTC(), nil + case "unix_us": + return time.Unix(0, integer*1e3).UTC(), nil + case "unix_ns": + return time.Unix(0, integer).UTC(), nil + default: + return time.Unix(0, 0), errors.New("unsupported type") + } +} + +// Returns the integers before and after an optional decimal point. Both '.' +// and ',' are supported for the decimal point. The timestamp can be an int64, +// float64, or string. +// ex: "42.5" -> (42, 5, nil) +func parseComponents(timestamp interface{}) (int64, int64, error) { switch ts := timestamp.(type) { case string: - var err error - splitted := regexp.MustCompile("[.,]").Split(ts, 2) - timeInt, err = strconv.ParseInt(splitted[0], 10, 64) + parts := strings.SplitN(ts, ".", 2) + if len(parts) == 2 { + return parseUnixTimeComponents(parts[0], parts[1]) + } + + parts = strings.SplitN(ts, ",", 2) + if len(parts) == 2 { + return parseUnixTimeComponents(parts[0], parts[1]) + } + + integer, err := strconv.ParseInt(ts, 10, 64) if err != nil { - loc, err := time.LoadLocation(location) - if err != nil { - return time.Time{}, fmt.Errorf("location: %s could not be loaded as a location", location) - } - return time.ParseInLocation(format, ts, loc) - } - - if len(splitted) == 2 { - if len(splitted[1]) > 9 { - splitted[1] = splitted[1][:9] //truncates decimal part to nanoseconds precision - } - nanosecStr := splitted[1] + strings.Repeat("0", 9-len(splitted[1])) //adds 0's to the right to obtain a valid number of nanoseconds - - timeFractional, err = strconv.ParseInt(nanosecStr, 10, 64) - if err != nil { - return time.Time{}, err - } + return 0, 0, err } + return integer, 0, nil case int64: - timeInt = ts + return ts, 0, nil case float64: - intPart, frac := math.Modf(ts) - timeInt, timeFractional = int64(intPart), int64(frac*1e9) + integer, fractional := math.Modf(ts) + return int64(integer), int64(fractional * 1e9), nil default: - return time.Time{}, fmt.Errorf("time: %v could not be converted to string nor float64", timestamp) - } - - if strings.EqualFold(format, "unix") { - return time.Unix(timeInt, timeFractional).UTC(), nil - } else if strings.EqualFold(format, "unix_ms") { - return time.Unix(timeInt/1000, (timeInt%1000)*1e6).UTC(), nil - } else if strings.EqualFold(format, "unix_us") { - return time.Unix(0, timeInt*1e3).UTC(), nil - } else if strings.EqualFold(format, "unix_ns") { - return time.Unix(0, timeInt).UTC(), nil - } else { - return time.Time{}, errors.New("Invalid unix format") + return 0, 0, errors.New("unsupported type") + } +} + +func parseUnixTimeComponents(first, second string) (int64, int64, error) { + integer, err := strconv.ParseInt(first, 10, 64) + if err != nil { + return 0, 0, err + } + + // Convert to nanoseconds, dropping any greater precision. + buf := []byte("000000000") + copy(buf, second) + + fractional, err := strconv.ParseInt(string(buf), 10, 64) + if err != nil { + return 0, 0, err + } + return integer, fractional, nil +} + +// ParseTime parses a string timestamp according to the format string. +func parseTime(format string, timestamp interface{}, location string) (time.Time, error) { + switch ts := timestamp.(type) { + case string: + loc, err := time.LoadLocation(location) + if err != nil { + return time.Unix(0, 0), err + } + return time.ParseInLocation(format, ts, loc) + default: + return time.Unix(0, 0), errors.New("unsupported type") } } diff --git a/internal/internal_test.go b/internal/internal_test.go index 5e9b9a97c..f4627ee74 100644 --- a/internal/internal_test.go +++ b/internal/internal_test.go @@ -331,32 +331,118 @@ func TestAlignTime(t *testing.T) { } func TestParseTimestamp(t *testing.T) { - time, err := ParseTimestamp("2019-02-20 21:50:34.029665", "2006-01-02 15:04:05.000000") - assert.Nil(t, err) - assert.EqualValues(t, int64(1550699434029665000), time.UnixNano()) + rfc3339 := func(value string) time.Time { + tm, err := time.Parse(time.RFC3339Nano, value) + if err != nil { + panic(err) + } + return tm + } - time, err = ParseTimestamp("2019-02-20 21:50:34.029665-04:00", "2006-01-02 15:04:05.000000-07:00") - assert.Nil(t, err) - assert.EqualValues(t, int64(1550713834029665000), time.UnixNano()) - - time, err = ParseTimestamp("2019-02-20 21:50:34.029665", "2006-01-02 15:04:05.000000-06:00") - assert.NotNil(t, err) -} - -func TestParseTimestampWithLocation(t *testing.T) { - time, err := ParseTimestampWithLocation("2019-02-20 21:50:34.029665", "2006-01-02 15:04:05.000000", "UTC") - assert.Nil(t, err) - assert.EqualValues(t, int64(1550699434029665000), time.UnixNano()) - - time, err = ParseTimestampWithLocation("2019-02-20 21:50:34.029665", "2006-01-02 15:04:05.000000", "America/New_York") - assert.Nil(t, err) - assert.EqualValues(t, int64(1550717434029665000), time.UnixNano()) - - //Provided location is ignored if an offset is successfully parsed - time, err = ParseTimestampWithLocation("2019-02-20 21:50:34.029665-07:00", "2006-01-02 15:04:05.000000-07:00", "America/New_York") - assert.Nil(t, err) - assert.EqualValues(t, int64(1550724634029665000), time.UnixNano()) - - time, err = ParseTimestampWithLocation("2019-02-20 21:50:34.029665", "2006-01-02 15:04:05.000000", "InvalidTimeZone") - assert.NotNil(t, err) + tests := []struct { + name string + format string + timestamp interface{} + location string + expected time.Time + err bool + }{ + { + name: "parse layout string in utc", + format: "2006-01-02 15:04:05", + timestamp: "2019-02-20 21:50:34", + location: "UTC", + expected: rfc3339("2019-02-20T21:50:34Z"), + }, + { + name: "parse layout string with invalid timezone", + format: "2006-01-02 15:04:05", + timestamp: "2019-02-20 21:50:34", + location: "InvalidTimeZone", + err: true, + }, + { + name: "layout regression 6386", + format: "02.01.2006 15:04:05", + timestamp: "09.07.2019 00:11:00", + expected: rfc3339("2019-07-09T00:11:00Z"), + }, + { + name: "default location is utc", + format: "2006-01-02 15:04:05", + timestamp: "2019-02-20 21:50:34", + expected: rfc3339("2019-02-20T21:50:34Z"), + }, + { + name: "unix seconds without fractional", + format: "unix", + timestamp: "1568338208", + expected: rfc3339("2019-09-13T01:30:08Z"), + }, + { + name: "unix seconds with fractional", + format: "unix", + timestamp: "1568338208.500", + expected: rfc3339("2019-09-13T01:30:08.500Z"), + }, + { + name: "unix seconds with fractional and comma decimal point", + format: "unix", + timestamp: "1568338208,500", + expected: rfc3339("2019-09-13T01:30:08.500Z"), + }, + { + name: "unix seconds extra precision", + format: "unix", + timestamp: "1568338208.00000050042", + expected: rfc3339("2019-09-13T01:30:08.000000500Z"), + }, + { + name: "unix seconds integer", + format: "unix", + timestamp: int64(1568338208), + expected: rfc3339("2019-09-13T01:30:08Z"), + }, + { + name: "unix seconds float", + format: "unix", + timestamp: float64(1568338208.500), + expected: rfc3339("2019-09-13T01:30:08.500Z"), + }, + { + name: "unix milliseconds", + format: "unix_ms", + timestamp: "1568338208500", + expected: rfc3339("2019-09-13T01:30:08.500Z"), + }, + { + name: "unix milliseconds with fractional is ignored", + format: "unix_ms", + timestamp: "1568338208500.42", + expected: rfc3339("2019-09-13T01:30:08.500Z"), + }, + { + name: "unix microseconds", + format: "unix_us", + timestamp: "1568338208000500", + expected: rfc3339("2019-09-13T01:30:08.000500Z"), + }, + { + name: "unix nanoseconds", + format: "unix_ns", + timestamp: "1568338208000000500", + expected: rfc3339("2019-09-13T01:30:08.000000500Z"), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + tm, err := ParseTimestamp(tt.format, tt.timestamp, tt.location) + if tt.err { + require.Error(t, err) + } else { + require.NoError(t, err) + require.Equal(t, tt.expected, tm) + } + }) + } } diff --git a/plugins/parsers/csv/parser.go b/plugins/parsers/csv/parser.go index 8fa1dfab1..861844488 100644 --- a/plugins/parsers/csv/parser.go +++ b/plugins/parsers/csv/parser.go @@ -235,7 +235,7 @@ func parseTimestamp(timeFunc func() time.Time, recordFields map[string]interface case "": return time.Time{}, fmt.Errorf("timestamp format must be specified") default: - metricTime, err := internal.ParseTimestamp(recordFields[timestampColumn], timestampFormat) + metricTime, err := internal.ParseTimestamp(timestampFormat, recordFields[timestampColumn], "UTC") if err != nil { return time.Time{}, err } diff --git a/plugins/parsers/json/parser.go b/plugins/parsers/json/parser.go index e2a2115b3..fb64997fe 100644 --- a/plugins/parsers/json/parser.go +++ b/plugins/parsers/json/parser.go @@ -120,7 +120,7 @@ func (p *Parser) parseObject(data map[string]interface{}) ([]telegraf.Metric, er return nil, err } - nTime, err = internal.ParseTimestampWithLocation(f.Fields[p.timeKey], p.timeFormat, p.timezone) + nTime, err = internal.ParseTimestamp(p.timeFormat, f.Fields[p.timeKey], p.timezone) if err != nil { return nil, err } From 8ed633bae580f461e157525442f6f3e7b377734b Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 16 Sep 2019 16:39:20 -0700 Subject: [PATCH 1186/1815] Update changelog --- CHANGELOG.md | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 43a560b9c..41d1f6d63 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,6 +10,14 @@ - [#6361](https://github.com/influxdata/telegraf/pull/6361): Add ability to read query from file to postgresql_extensible input. - [#5921](https://github.com/influxdata/telegraf/pull/5921): Add replication metrics to the redis input. +## v1.12.2 [unreleased] + +#### Bugfixes + +- [#6386](https://github.com/influxdata/telegraf/issues/6386): Fix detection of layout timestamps in csv and json parser. +- [#6394](https://github.com/influxdata/telegraf/issues/6394): Fix parsing of BATTDATE in apcupsd input. +- [#6398](https://github.com/influxdata/telegraf/issues/6398): Keep boolean values listed in json_string_fields. + ## v1.12.1 [2019-09-10] #### Bugfixes From 97328cf17756afd6531fb08e512ba2fd4e8c5dfe Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 16 Sep 2019 16:45:55 -0700 Subject: [PATCH 1187/1815] Use gopsutil 2.19.8 --- Gopkg.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Gopkg.lock b/Gopkg.lock index f954dd84a..1260d471e 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -1072,7 +1072,7 @@ version = "v1.2.0" [[projects]] - digest = "1:2226ffdae873216a5bc8a0bab7a51ac670b27a4aed852007d77600f809aa04e3" + digest = "1:55dcddb2ba6ab25098ee6b96f176f39305f1fde7ea3d138e7e10bb64a5bf45be" name = "github.com/shirou/gopsutil" packages = [ "cpu", @@ -1085,8 +1085,8 @@ "process", ] pruneopts = "" - revision = "d80c43f9c984a48783daf22f4bd9278006ae483a" - version = "v2.19.7" + revision = "e4ec7b275ada47ca32799106c2dba142d96aaf93" + version = "v2.19.8" [[projects]] branch = "master" From b5510eb12833036daec511c86fbf8e2c9f11d14f Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 16 Sep 2019 16:46:34 -0700 Subject: [PATCH 1188/1815] Remove transitive dependency azure-pipeline-go from Gopkg.toml --- Gopkg.toml | 4 ---- 1 file changed, 4 deletions(-) diff --git a/Gopkg.toml b/Gopkg.toml index 6ee73b7fb..2d545e224 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -262,10 +262,6 @@ name = "github.com/karrick/godirwalk" version = "1.7.5" -[[constraint]] - name = "github.com/Azure/azure-pipeline-go" - version = "0.1.8" - [[override]] name = "github.com/harlow/kinesis-consumer" branch = "master" From ee9d0fc4932c86b210f201ac78e949435c25df68 Mon Sep 17 00:00:00 2001 From: Steven Barth Date: Tue, 17 Sep 2019 01:57:25 +0200 Subject: [PATCH 1189/1815] Support NX-OS telemetry extensions in cisco_telemetry_mdt (#6177) --- Gopkg.lock | 2 + plugins/inputs/cisco_telemetry_mdt/README.md | 3 + .../cisco_telemetry_mdt.go | 305 ++++++++++++++---- .../cisco_telemetry_mdt_test.go | 244 +++++++++++++- 4 files changed, 466 insertions(+), 88 deletions(-) diff --git a/Gopkg.lock b/Gopkg.lock index 1260d471e..22520af3a 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -1486,6 +1486,7 @@ "credentials", "credentials/oauth", "encoding", + "encoding/gzip", "encoding/proto", "grpclog", "internal", @@ -1813,6 +1814,7 @@ "google.golang.org/grpc", "google.golang.org/grpc/codes", "google.golang.org/grpc/credentials", + "google.golang.org/grpc/encoding/gzip", "google.golang.org/grpc/metadata", "google.golang.org/grpc/peer", "google.golang.org/grpc/status", diff --git a/plugins/inputs/cisco_telemetry_mdt/README.md b/plugins/inputs/cisco_telemetry_mdt/README.md index 2848d0493..3545c6120 100644 --- a/plugins/inputs/cisco_telemetry_mdt/README.md +++ b/plugins/inputs/cisco_telemetry_mdt/README.md @@ -29,6 +29,9 @@ The TCP dialout transport is supported on IOS XR (32-bit and 64-bit) 6.1.x and l ## transport only. # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] + ## Define (for certain nested telemetry measurements with embedded tags) which fields are tags + # embedded_tags = ["Cisco-IOS-XR-qos-ma-oper:qos/interface-table/interface/input/service-policy-names/service-policy-instance/statistics/class-stats/class-name"] + ## Define aliases to map telemetry encoding paths to simple measurement names [inputs.cisco_telemetry_mdt.aliases] ifstats = "ietf-interfaces:interfaces-state/interface/statistics" diff --git a/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt.go b/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt.go index 74480cb8a..ddca8247d 100644 --- a/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt.go +++ b/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt.go @@ -7,10 +7,14 @@ import ( "io" "log" "net" + "path" + "strconv" "strings" "sync" "time" + "github.com/influxdata/telegraf/metric" + dialout "github.com/cisco-ie/nx-telemetry-proto/mdt_dialout" telemetry "github.com/cisco-ie/nx-telemetry-proto/telemetry_bis" "github.com/golang/protobuf/proto" @@ -19,6 +23,9 @@ import ( "github.com/influxdata/telegraf/plugins/inputs" "google.golang.org/grpc" "google.golang.org/grpc/credentials" + + // Register GRPC gzip decoder to support compressed telemetry + _ "google.golang.org/grpc/encoding/gzip" "google.golang.org/grpc/peer" ) @@ -34,6 +41,7 @@ type CiscoTelemetryMDT struct { ServiceAddress string `toml:"service_address"` MaxMsgSize int `toml:"max_msg_size"` Aliases map[string]string `toml:"aliases"` + EmbeddedTags []string `toml:"embedded_tags"` // GRPC TLS settings internaltls.ServerConfig @@ -43,9 +51,12 @@ type CiscoTelemetryMDT struct { listener net.Listener // Internal state - aliases map[string]string - acc telegraf.Accumulator - wg sync.WaitGroup + aliases map[string]string + warned map[string]struct{} + extraTags map[string]map[string]struct{} + mutex sync.Mutex + acc telegraf.Accumulator + wg sync.WaitGroup } // Start the Cisco MDT service @@ -58,11 +69,22 @@ func (c *CiscoTelemetryMDT) Start(acc telegraf.Accumulator) error { } // Invert aliases list + c.warned = make(map[string]struct{}) c.aliases = make(map[string]string, len(c.Aliases)) for alias, path := range c.Aliases { c.aliases[path] = alias } + // Fill extra tags + c.extraTags = make(map[string]map[string]struct{}) + for _, tag := range c.EmbeddedTags { + dir := path.Dir(tag) + if _, hasKey := c.extraTags[dir]; !hasKey { + c.extraTags[dir] = make(map[string]struct{}) + } + c.extraTags[dir][path.Base(tag)] = struct{}{} + } + switch c.Transport { case "tcp": // TCP dialout server accept routine @@ -76,6 +98,7 @@ func (c *CiscoTelemetryMDT) Start(acc telegraf.Accumulator) error { var opts []grpc.ServerOption tlsConfig, err := c.ServerConfig.TLSConfig() if err != nil { + c.listener.Close() return err } else if tlsConfig != nil { opts = append(opts, grpc.Creds(credentials.NewTLS(tlsConfig))) @@ -198,6 +221,8 @@ func (c *CiscoTelemetryMDT) MdtDialout(stream dialout.GRPCMdtDialout_MdtDialoutS log.Printf("D! [inputs.cisco_telemetry_mdt]: Accepted Cisco MDT GRPC dialout connection from %s", peer.Addr) } + var chunkBuffer bytes.Buffer + for { packet, err := stream.Recv() if err != nil { @@ -212,7 +237,18 @@ func (c *CiscoTelemetryMDT) MdtDialout(stream dialout.GRPCMdtDialout_MdtDialoutS break } - c.handleTelemetry(packet.Data) + // Reassemble chunked telemetry data received from NX-OS + if packet.TotalSize == 0 { + c.handleTelemetry(packet.Data) + } else if int(packet.TotalSize) <= c.MaxMsgSize { + chunkBuffer.Write(packet.Data) + if chunkBuffer.Len() >= int(packet.TotalSize) { + c.handleTelemetry(chunkBuffer.Bytes()) + chunkBuffer.Reset() + } + } else { + c.acc.AddError(fmt.Errorf("dropped too large packet: %dB > %dB", packet.TotalSize, c.MaxMsgSize)) + } } if peerOK { @@ -224,115 +260,239 @@ func (c *CiscoTelemetryMDT) MdtDialout(stream dialout.GRPCMdtDialout_MdtDialoutS // Handle telemetry packet from any transport, decode and add as measurement func (c *CiscoTelemetryMDT) handleTelemetry(data []byte) { - var namebuf bytes.Buffer - telemetry := &telemetry.Telemetry{} - err := proto.Unmarshal(data, telemetry) + msg := &telemetry.Telemetry{} + err := proto.Unmarshal(data, msg) if err != nil { c.acc.AddError(fmt.Errorf("Cisco MDT failed to decode: %v", err)) return } - for _, gpbkv := range telemetry.DataGpbkv { - var fields map[string]interface{} - + grouper := metric.NewSeriesGrouper() + for _, gpbkv := range msg.DataGpbkv { // Produce metadata tags var tags map[string]string // Top-level field may have measurement timestamp, if not use message timestamp measured := gpbkv.Timestamp if measured == 0 { - measured = telemetry.MsgTimestamp + measured = msg.MsgTimestamp } timestamp := time.Unix(int64(measured/1000), int64(measured%1000)*1000000) - // Populate tags and fields from toplevel GPBKV fields "keys" and "content" + // Find toplevel GPBKV fields "keys" and "content" + var keys, content *telemetry.TelemetryField = nil, nil for _, field := range gpbkv.Fields { - switch field.Name { - case "keys": - tags = make(map[string]string, len(field.Fields)+2) - tags["source"] = telemetry.GetNodeIdStr() - tags["subscription"] = telemetry.GetSubscriptionIdStr() - for _, subfield := range field.Fields { - c.parseGPBKVField(subfield, &namebuf, telemetry.EncodingPath, timestamp, tags, nil) - } - case "content": - fields = make(map[string]interface{}, len(field.Fields)) - for _, subfield := range field.Fields { - c.parseGPBKVField(subfield, &namebuf, telemetry.EncodingPath, timestamp, tags, fields) - } - default: - log.Printf("I! [inputs.cisco_telemetry_mdt]: Unexpected top-level MDT field: %s", field.Name) + if field.Name == "keys" { + keys = field + } else if field.Name == "content" { + content = field } } - // Find best alias for encoding path and emit measurement - if len(fields) > 0 && len(tags) > 0 && len(telemetry.EncodingPath) > 0 { - name := telemetry.EncodingPath - if alias, ok := c.aliases[name]; ok { - tags["path"] = name - name = alias - } else { - log.Printf("D! [inputs.cisco_telemetry_mdt]: No measurement alias for encoding path: %s", name) - } - c.acc.AddFields(name, fields, tags, timestamp) - } else { - c.acc.AddError(fmt.Errorf("empty encoding path or measurement")) + if keys == nil || content == nil { + log.Printf("I! [inputs.cisco_telemetry_mdt]: Message from %s missing keys or content", msg.GetNodeIdStr()) + continue } + + // Parse keys + tags = make(map[string]string, len(keys.Fields)+3) + tags["source"] = msg.GetNodeIdStr() + tags["subscription"] = msg.GetSubscriptionIdStr() + tags["path"] = msg.GetEncodingPath() + + for _, subfield := range keys.Fields { + c.parseKeyField(tags, subfield, "") + } + + // Parse values + for _, subfield := range content.Fields { + c.parseContentField(grouper, subfield, "", msg.EncodingPath, tags, timestamp) + } + } + + for _, metric := range grouper.Metrics() { + c.acc.AddMetric(metric) } } -// Recursively parse GPBKV field structure into fields or tags -func (c *CiscoTelemetryMDT) parseGPBKVField(field *telemetry.TelemetryField, namebuf *bytes.Buffer, - path string, timestamp time.Time, tags map[string]string, fields map[string]interface{}) { - - namelen := namebuf.Len() - if namelen > 0 { - namebuf.WriteRune('/') - } - namebuf.WriteString(strings.Replace(field.Name, "-", "_", -1)) - - // Decode Telemetry field value if set - var value interface{} +func decodeValue(field *telemetry.TelemetryField) interface{} { switch val := field.ValueByType.(type) { case *telemetry.TelemetryField_BytesValue: - value = val.BytesValue + return val.BytesValue case *telemetry.TelemetryField_StringValue: - value = val.StringValue + if len(val.StringValue) > 0 { + return val.StringValue + } case *telemetry.TelemetryField_BoolValue: - value = val.BoolValue + return val.BoolValue case *telemetry.TelemetryField_Uint32Value: - value = val.Uint32Value + return val.Uint32Value case *telemetry.TelemetryField_Uint64Value: - value = val.Uint64Value + return val.Uint64Value case *telemetry.TelemetryField_Sint32Value: - value = val.Sint32Value + return val.Sint32Value case *telemetry.TelemetryField_Sint64Value: - value = val.Sint64Value + return val.Sint64Value case *telemetry.TelemetryField_DoubleValue: - value = val.DoubleValue + return val.DoubleValue case *telemetry.TelemetryField_FloatValue: - value = val.FloatValue + return val.FloatValue + } + return nil +} + +func decodeTag(field *telemetry.TelemetryField) string { + switch val := field.ValueByType.(type) { + case *telemetry.TelemetryField_BytesValue: + return string(val.BytesValue) + case *telemetry.TelemetryField_StringValue: + return val.StringValue + case *telemetry.TelemetryField_BoolValue: + if val.BoolValue { + return "true" + } + return "false" + case *telemetry.TelemetryField_Uint32Value: + return strconv.FormatUint(uint64(val.Uint32Value), 10) + case *telemetry.TelemetryField_Uint64Value: + return strconv.FormatUint(val.Uint64Value, 10) + case *telemetry.TelemetryField_Sint32Value: + return strconv.FormatInt(int64(val.Sint32Value), 10) + case *telemetry.TelemetryField_Sint64Value: + return strconv.FormatInt(val.Sint64Value, 10) + case *telemetry.TelemetryField_DoubleValue: + return strconv.FormatFloat(val.DoubleValue, 'f', -1, 64) + case *telemetry.TelemetryField_FloatValue: + return strconv.FormatFloat(float64(val.FloatValue), 'f', -1, 32) + default: + return "" + } +} + +// Recursively parse tag fields +func (c *CiscoTelemetryMDT) parseKeyField(tags map[string]string, field *telemetry.TelemetryField, prefix string) { + localname := strings.Replace(field.Name, "-", "_", -1) + name := localname + if len(localname) == 0 { + name = prefix + } else if len(prefix) > 0 { + name = prefix + "/" + localname } - if value != nil { - // Distinguish between tags (keys) and fields (data) to write to - if fields != nil { - fields[namebuf.String()] = value + if tag := decodeTag(field); len(name) > 0 && len(tag) > 0 { + if _, exists := tags[localname]; !exists { // Use short keys whenever possible + tags[localname] = tag } else { - if _, exists := tags[field.Name]; !exists { // Use short keys whenever possible - tags[field.Name] = fmt.Sprint(value) - } else { - tags[namebuf.String()] = fmt.Sprint(value) - } + tags[name] = tag } } for _, subfield := range field.Fields { - c.parseGPBKVField(subfield, namebuf, path, timestamp, tags, fields) + c.parseKeyField(tags, subfield, name) + } +} + +func (c *CiscoTelemetryMDT) parseContentField(grouper *metric.SeriesGrouper, field *telemetry.TelemetryField, prefix string, + path string, tags map[string]string, timestamp time.Time) { + name := strings.Replace(field.Name, "-", "_", -1) + if len(name) == 0 { + name = prefix + } else if len(prefix) > 0 { + name = prefix + "/" + name } - namebuf.Truncate(namelen) + extraTags := c.extraTags[path+"/"+name] + + if value := decodeValue(field); value != nil { + // Do alias lookup, to shorten measurement names + measurement := path + if alias, ok := c.aliases[path]; ok { + measurement = alias + } else { + c.mutex.Lock() + if _, haveWarned := c.warned[path]; !haveWarned { + log.Printf("D! [inputs.cisco_telemetry_mdt]: No measurement alias for encoding path: %s", path) + c.warned[path] = struct{}{} + } + c.mutex.Unlock() + } + + grouper.Add(measurement, tags, timestamp, name, value) + return + } + + if len(extraTags) > 0 { + for _, subfield := range field.Fields { + if _, isExtraTag := extraTags[subfield.Name]; isExtraTag { + tags[name+"/"+subfield.Name] = decodeTag(subfield) + } + } + } + + var nxAttributes, nxChildren, nxRows *telemetry.TelemetryField + isNXOS := !strings.ContainsRune(path, ':') // IOS-XR and IOS-XE have a colon in their encoding path, NX-OS does not + for _, subfield := range field.Fields { + if isNXOS && subfield.Name == "attributes" && len(subfield.Fields) > 0 { + nxAttributes = subfield.Fields[0] + } else if isNXOS && subfield.Name == "children" && len(subfield.Fields) > 0 { + nxChildren = subfield + } else if isNXOS && strings.HasPrefix(subfield.Name, "ROW_") { + nxRows = subfield + } else if _, isExtraTag := extraTags[subfield.Name]; !isExtraTag { // Regular telemetry decoding + c.parseContentField(grouper, subfield, name, path, tags, timestamp) + } + } + + if nxAttributes == nil && nxRows == nil { + return + } else if nxRows != nil { + // NXAPI structure: https://developer.cisco.com/docs/cisco-nexus-9000-series-nx-api-cli-reference-release-9-2x/ + for _, row := range nxRows.Fields { + for i, subfield := range row.Fields { + if i == 0 { // First subfield contains the index, promote it from value to tag + tags[prefix] = decodeTag(subfield) + } else { + c.parseContentField(grouper, subfield, "", path, tags, timestamp) + } + } + delete(tags, prefix) + } + return + } + + // DME structure: https://developer.cisco.com/site/nxapi-dme-model-reference-api/ + rn := "" + dn := false + + for _, subfield := range nxAttributes.Fields { + if subfield.Name == "rn" { + rn = decodeTag(subfield) + } else if subfield.Name == "dn" { + dn = true + } + } + + if len(rn) > 0 { + tags[prefix] = rn + } else if !dn { // Check for distinguished name being present + c.acc.AddError(fmt.Errorf("NX-OS decoding failed: missing dn field")) + return + } + + for _, subfield := range nxAttributes.Fields { + if subfield.Name != "rn" { + c.parseContentField(grouper, subfield, "", path, tags, timestamp) + } + } + + if nxChildren != nil { + // This is a nested structure, children will inherit relative name keys of parent + for _, subfield := range nxChildren.Fields { + c.parseContentField(grouper, subfield, prefix, path, tags, timestamp) + } + } + delete(tags, prefix) } // Stop listener and cleanup @@ -363,6 +523,9 @@ const sampleConfig = ` ## transport only. # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] + ## Define (for certain nested telemetry measurements with embedded tags) which fields are tags + # embedded_tags = ["Cisco-IOS-XR-qos-ma-oper:qos/interface-table/interface/input/service-policy-names/service-policy-instance/statistics/class-stats/class-name"] + ## Define aliases to map telemetry encoding paths to simple measurement names [inputs.cisco_telemetry_mdt.aliases] ifstats = "ietf-interfaces:interfaces-state/interface/statistics" diff --git a/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt_test.go b/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt_test.go index d2c686c69..3736a8531 100644 --- a/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt_test.go +++ b/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt_test.go @@ -17,23 +17,6 @@ import ( "google.golang.org/grpc" ) -func TestHandleTelemetryEmpty(t *testing.T) { - c := &CiscoTelemetryMDT{Transport: "dummy"} - acc := &testutil.Accumulator{} - c.Start(acc) - - telemetry := &telemetry.Telemetry{ - DataGpbkv: []*telemetry.TelemetryField{ - {}, - }, - } - data, _ := proto.Marshal(telemetry) - - c.handleTelemetry(data) - assert.Contains(t, acc.Errors, errors.New("empty encoding path or measurement")) - assert.Empty(t, acc.Metrics) -} - func TestHandleTelemetryTwoSimple(t *testing.T) { c := &CiscoTelemetryMDT{Transport: "dummy", Aliases: map[string]string{"alias": "type:model/some/path"}} acc := &testutil.Accumulator{} @@ -174,6 +157,233 @@ func TestHandleTelemetrySingleNested(t *testing.T) { acc.AssertContainsTaggedFields(t, "nested", fields, tags) } +func TestHandleEmbeddedTags(t *testing.T) { + c := &CiscoTelemetryMDT{Transport: "dummy", Aliases: map[string]string{"extra": "type:model/extra"}, EmbeddedTags: []string{"type:model/extra/list/name"}} + acc := &testutil.Accumulator{} + c.Start(acc) + + telemetry := &telemetry.Telemetry{ + MsgTimestamp: 1543236572000, + EncodingPath: "type:model/extra", + NodeId: &telemetry.Telemetry_NodeIdStr{NodeIdStr: "hostname"}, + Subscription: &telemetry.Telemetry_SubscriptionIdStr{SubscriptionIdStr: "subscription"}, + DataGpbkv: []*telemetry.TelemetryField{ + { + Fields: []*telemetry.TelemetryField{ + { + Name: "keys", + Fields: []*telemetry.TelemetryField{ + { + Name: "foo", + ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "bar"}, + }, + }, + }, + { + Name: "content", + Fields: []*telemetry.TelemetryField{ + { + Name: "list", + Fields: []*telemetry.TelemetryField{ + { + Name: "name", + ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "entry1"}, + }, + { + Name: "test", + ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "foo"}, + }, + }, + }, + { + Name: "list", + Fields: []*telemetry.TelemetryField{ + { + Name: "name", + ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "entry2"}, + }, + { + Name: "test", + ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "bar"}, + }, + }, + }, + }, + }, + }, + }, + }, + } + data, _ := proto.Marshal(telemetry) + + c.handleTelemetry(data) + assert.Empty(t, acc.Errors) + + tags1 := map[string]string{"path": "type:model/extra", "foo": "bar", "source": "hostname", "subscription": "subscription", "list/name": "entry1"} + fields1 := map[string]interface{}{"list/test": "foo"} + tags2 := map[string]string{"path": "type:model/extra", "foo": "bar", "source": "hostname", "subscription": "subscription", "list/name": "entry2"} + fields2 := map[string]interface{}{"list/test": "bar"} + acc.AssertContainsTaggedFields(t, "extra", fields1, tags1) + acc.AssertContainsTaggedFields(t, "extra", fields2, tags2) +} + +func TestHandleNXAPI(t *testing.T) { + c := &CiscoTelemetryMDT{Transport: "dummy", Aliases: map[string]string{"nxapi": "show nxapi"}} + acc := &testutil.Accumulator{} + c.Start(acc) + + telemetry := &telemetry.Telemetry{ + MsgTimestamp: 1543236572000, + EncodingPath: "show nxapi", + NodeId: &telemetry.Telemetry_NodeIdStr{NodeIdStr: "hostname"}, + Subscription: &telemetry.Telemetry_SubscriptionIdStr{SubscriptionIdStr: "subscription"}, + DataGpbkv: []*telemetry.TelemetryField{ + { + Fields: []*telemetry.TelemetryField{ + { + Name: "keys", + Fields: []*telemetry.TelemetryField{ + { + Name: "foo", + ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "bar"}, + }, + }, + }, + { + Name: "content", + Fields: []*telemetry.TelemetryField{ + { + Fields: []*telemetry.TelemetryField{ + { + Name: "TABLE_nxapi", + Fields: []*telemetry.TelemetryField{ + { + Fields: []*telemetry.TelemetryField{ + { + Name: "ROW_nxapi", + Fields: []*telemetry.TelemetryField{ + { + Fields: []*telemetry.TelemetryField{ + { + Name: "index", + ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "i1"}, + }, + { + Name: "value", + ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "foo"}, + }, + }, + }, + { + Fields: []*telemetry.TelemetryField{ + { + Name: "index", + ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "i2"}, + }, + { + Name: "value", + ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "bar"}, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + data, _ := proto.Marshal(telemetry) + + c.handleTelemetry(data) + assert.Empty(t, acc.Errors) + + tags1 := map[string]string{"path": "show nxapi", "foo": "bar", "TABLE_nxapi": "i1", "source": "hostname", "subscription": "subscription"} + fields1 := map[string]interface{}{"value": "foo"} + tags2 := map[string]string{"path": "show nxapi", "foo": "bar", "TABLE_nxapi": "i2", "source": "hostname", "subscription": "subscription"} + fields2 := map[string]interface{}{"value": "bar"} + acc.AssertContainsTaggedFields(t, "nxapi", fields1, tags1) + acc.AssertContainsTaggedFields(t, "nxapi", fields2, tags2) +} + +func TestHandleNXDME(t *testing.T) { + c := &CiscoTelemetryMDT{Transport: "dummy", Aliases: map[string]string{"dme": "sys/dme"}} + acc := &testutil.Accumulator{} + c.Start(acc) + + telemetry := &telemetry.Telemetry{ + MsgTimestamp: 1543236572000, + EncodingPath: "sys/dme", + NodeId: &telemetry.Telemetry_NodeIdStr{NodeIdStr: "hostname"}, + Subscription: &telemetry.Telemetry_SubscriptionIdStr{SubscriptionIdStr: "subscription"}, + DataGpbkv: []*telemetry.TelemetryField{ + { + Fields: []*telemetry.TelemetryField{ + { + Name: "keys", + Fields: []*telemetry.TelemetryField{ + { + Name: "foo", + ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "bar"}, + }, + }, + }, + { + Name: "content", + Fields: []*telemetry.TelemetryField{ + { + Fields: []*telemetry.TelemetryField{ + { + Name: "fooEntity", + Fields: []*telemetry.TelemetryField{ + { + Fields: []*telemetry.TelemetryField{ + { + Name: "attributes", + Fields: []*telemetry.TelemetryField{ + { + Fields: []*telemetry.TelemetryField{ + { + Name: "rn", + ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "some-rn"}, + }, + { + Name: "value", + ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "foo"}, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + data, _ := proto.Marshal(telemetry) + + c.handleTelemetry(data) + assert.Empty(t, acc.Errors) + + tags1 := map[string]string{"path": "sys/dme", "foo": "bar", "fooEntity": "some-rn", "source": "hostname", "subscription": "subscription"} + fields1 := map[string]interface{}{"value": "foo"} + acc.AssertContainsTaggedFields(t, "dme", fields1, tags1) +} + func TestTCPDialoutOverflow(t *testing.T) { c := &CiscoTelemetryMDT{Transport: "tcp", ServiceAddress: "127.0.0.1:57000"} acc := &testutil.Accumulator{} From fe616ed473b101f1f3e893e3950e83d898551a52 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 16 Sep 2019 16:58:09 -0700 Subject: [PATCH 1190/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 41d1f6d63..d449f7dea 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,7 @@ - [#6326](https://github.com/influxdata/telegraf/pull/5842): Add per node memory stats to rabbitmq input. - [#6361](https://github.com/influxdata/telegraf/pull/6361): Add ability to read query from file to postgresql_extensible input. - [#5921](https://github.com/influxdata/telegraf/pull/5921): Add replication metrics to the redis input. +- [#6177](https://github.com/influxdata/telegraf/pull/6177): Support NX-OS telemetry extensions in cisco_telemetry_mdt. ## v1.12.2 [unreleased] From a2c28f2762559dc6e0f3b86636eba1a79c1ebcba Mon Sep 17 00:00:00 2001 From: David McKay Date: Tue, 17 Sep 2019 20:52:34 +0100 Subject: [PATCH 1191/1815] Recommend installing TICK Stack with Helm Stable Charts (#6404) --- plugins/inputs/kube_inventory/README.md | 1 - plugins/inputs/kubernetes/README.md | 12 ++++++++++-- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/plugins/inputs/kube_inventory/README.md b/plugins/inputs/kube_inventory/README.md index a884e24bd..d24ca95bd 100644 --- a/plugins/inputs/kube_inventory/README.md +++ b/plugins/inputs/kube_inventory/README.md @@ -280,4 +280,3 @@ kubernetes_statefulset,namespace=default,statefulset_name=etcd replicas_updated= [series cardinality]: https://docs.influxdata.com/influxdb/latest/query_language/spec/#show-cardinality [influx-docs]: https://docs.influxdata.com/influxdb/latest/ [k8s-telegraf]: https://www.influxdata.com/blog/monitoring-kubernetes-architecture/ -[tick-charts]: https://github.com/influxdata/tick-charts diff --git a/plugins/inputs/kubernetes/README.md b/plugins/inputs/kubernetes/README.md index 33cca8590..d53d94e97 100644 --- a/plugins/inputs/kubernetes/README.md +++ b/plugins/inputs/kubernetes/README.md @@ -48,7 +48,12 @@ avoid cardinality issues: ### DaemonSet For recommendations on running Telegraf as a DaemonSet see [Monitoring Kubernetes -Architecture][k8s-telegraf] or view the [Helm charts][tick-charts]. +Architecture][k8s-telegraf] or view the Helm charts: + +- [Telegraf][] +- [InfluxDB][] +- [Chronograf][] +- [Kapacitor][] ### Metrics @@ -136,4 +141,7 @@ kubernetes_system_container [series cardinality]: https://docs.influxdata.com/influxdb/latest/query_language/spec/#show-cardinality [influx-docs]: https://docs.influxdata.com/influxdb/latest/ [k8s-telegraf]: https://www.influxdata.com/blog/monitoring-kubernetes-architecture/ -[tick-charts]: https://github.com/influxdata/tick-charts +[Telegraf]: https://github.com/helm/charts/tree/master/stable/telegraf +[InfluxDB]: https://github.com/helm/charts/tree/master/stable/influxdb +[Chronograf]: https://github.com/helm/charts/tree/master/stable/chronograf +[Kapacitor]: https://github.com/helm/charts/tree/master/stable/kapacitor From e553341879ebd5e7db3fb35f04a9f216647253bc Mon Sep 17 00:00:00 2001 From: GeorgeJahad Date: Tue, 17 Sep 2019 15:48:08 -0700 Subject: [PATCH 1192/1815] Update sqlserver docs for TLS connections (#6409) --- plugins/inputs/sqlserver/README.md | 3 ++- plugins/inputs/sqlserver/sqlserver.go | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/plugins/inputs/sqlserver/README.md b/plugins/inputs/sqlserver/README.md index f04e0b6de..2d41c5dcc 100644 --- a/plugins/inputs/sqlserver/README.md +++ b/plugins/inputs/sqlserver/README.md @@ -40,7 +40,8 @@ GO ## By default, the host is localhost, listening on default port, TCP 1433. ## for Windows, the user is the currently running AD user (SSO). ## See https://github.com/denisenkom/go-mssqldb for detailed connection - ## parameters. + ## parameters, in particular, tls connections can be created like so: + ## "encrypt=true;certificate=;hostNameInCertificate=" # servers = [ # "Server=192.168.1.10;Port=1433;User Id=;Password=;app name=telegraf;log=1;", # ] diff --git a/plugins/inputs/sqlserver/sqlserver.go b/plugins/inputs/sqlserver/sqlserver.go index e5de5202b..51e729a31 100644 --- a/plugins/inputs/sqlserver/sqlserver.go +++ b/plugins/inputs/sqlserver/sqlserver.go @@ -41,7 +41,8 @@ var sampleConfig = ` ## By default, the host is localhost, listening on default port, TCP 1433. ## for Windows, the user is the currently running AD user (SSO). ## See https://github.com/denisenkom/go-mssqldb for detailed connection - ## parameters. + ## parameters, in particular, tls connections can be created like so: + ## "encrypt=true;certificate=;hostNameInCertificate=" # servers = [ # "Server=192.168.1.10;Port=1433;User Id=;Password=;app name=telegraf;log=1;", # ] From 8d96dd71c7739c1e6c2f2c86509df08f23752841 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 19 Sep 2019 20:03:10 -0700 Subject: [PATCH 1193/1815] Allow graphite parser to create Inf and NaN values (#6420) --- plugins/parsers/graphite/errors.go | 14 --------- plugins/parsers/graphite/parser.go | 7 +---- plugins/parsers/graphite/parser_test.go | 42 ++++++++++++++++++++----- testutil/metric.go | 8 +++-- 4 files changed, 41 insertions(+), 30 deletions(-) delete mode 100644 plugins/parsers/graphite/errors.go diff --git a/plugins/parsers/graphite/errors.go b/plugins/parsers/graphite/errors.go deleted file mode 100644 index 2cd2f5583..000000000 --- a/plugins/parsers/graphite/errors.go +++ /dev/null @@ -1,14 +0,0 @@ -package graphite - -import "fmt" - -// An UnsupposedValueError is returned when a parsed value is not -// supposed. -type UnsupposedValueError struct { - Field string - Value float64 -} - -func (err *UnsupposedValueError) Error() string { - return fmt.Sprintf(`field "%s" value: "%v" is unsupported`, err.Field, err.Value) -} diff --git a/plugins/parsers/graphite/parser.go b/plugins/parsers/graphite/parser.go index 75c0475e3..f50217711 100644 --- a/plugins/parsers/graphite/parser.go +++ b/plugins/parsers/graphite/parser.go @@ -9,9 +9,8 @@ import ( "strings" "time" - "github.com/influxdata/telegraf/internal/templating" - "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal/templating" "github.com/influxdata/telegraf/metric" ) @@ -121,10 +120,6 @@ func (p *GraphiteParser) ParseLine(line string) (telegraf.Metric, error) { return nil, fmt.Errorf(`field "%s" value: %s`, fields[0], err) } - if math.IsNaN(v) || math.IsInf(v, 0) { - return nil, &UnsupposedValueError{Field: fields[0], Value: v} - } - fieldValues := map[string]interface{}{} if field != "" { fieldValues[field] = v diff --git a/plugins/parsers/graphite/parser_test.go b/plugins/parsers/graphite/parser_test.go index d84551add..9254574b6 100644 --- a/plugins/parsers/graphite/parser_test.go +++ b/plugins/parsers/graphite/parser_test.go @@ -1,14 +1,14 @@ package graphite import ( - "reflect" + "math" "strconv" "testing" "time" "github.com/influxdata/telegraf/internal/templating" "github.com/influxdata/telegraf/metric" - + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -355,14 +355,40 @@ func TestParse(t *testing.T) { func TestParseNaN(t *testing.T) { p, err := NewGraphiteParser("", []string{"measurement*"}, nil) - assert.NoError(t, err) + require.NoError(t, err) - _, err = p.ParseLine("servers.localhost.cpu_load NaN 1435077219") - assert.Error(t, err) + m, err := p.ParseLine("servers.localhost.cpu_load NaN 1435077219") + require.NoError(t, err) - if _, ok := err.(*UnsupposedValueError); !ok { - t.Fatalf("expected *ErrUnsupportedValue, got %v", reflect.TypeOf(err)) - } + expected := testutil.MustMetric( + "servers.localhost.cpu_load", + map[string]string{}, + map[string]interface{}{ + "value": math.NaN(), + }, + time.Unix(1435077219, 0), + ) + + testutil.RequireMetricEqual(t, expected, m) +} + +func TestParseInf(t *testing.T) { + p, err := NewGraphiteParser("", []string{"measurement*"}, nil) + require.NoError(t, err) + + m, err := p.ParseLine("servers.localhost.cpu_load +Inf 1435077219") + require.NoError(t, err) + + expected := testutil.MustMetric( + "servers.localhost.cpu_load", + map[string]string{}, + map[string]interface{}{ + "value": math.Inf(1), + }, + time.Unix(1435077219, 0), + ) + + testutil.RequireMetricEqual(t, expected, m) } func TestFilterMatchDefault(t *testing.T) { diff --git a/testutil/metric.go b/testutil/metric.go index 25e23fa20..da3ace0f2 100644 --- a/testutil/metric.go +++ b/testutil/metric.go @@ -129,7 +129,7 @@ func IgnoreTime() cmp.Option { } // MetricEqual returns true if the metrics are equal. -func MetricEqual(expected, actual telegraf.Metric) bool { +func MetricEqual(expected, actual telegraf.Metric, opts ...cmp.Option) bool { var lhs, rhs *metricDiff if expected != nil { lhs = newMetricDiff(expected) @@ -138,7 +138,8 @@ func MetricEqual(expected, actual telegraf.Metric) bool { rhs = newMetricDiff(actual) } - return cmp.Equal(lhs, rhs) + opts = append(opts, cmpopts.EquateNaNs()) + return cmp.Equal(lhs, rhs, opts...) } // RequireMetricEqual halts the test with an error if the metrics are not @@ -154,6 +155,7 @@ func RequireMetricEqual(t *testing.T, expected, actual telegraf.Metric, opts ... rhs = newMetricDiff(actual) } + opts = append(opts, cmpopts.EquateNaNs()) if diff := cmp.Diff(lhs, rhs, opts...); diff != "" { t.Fatalf("telegraf.Metric\n--- expected\n+++ actual\n%s", diff) } @@ -172,6 +174,8 @@ func RequireMetricsEqual(t *testing.T, expected, actual []telegraf.Metric, opts for _, m := range actual { rhs = append(rhs, newMetricDiff(m)) } + + opts = append(opts, cmpopts.EquateNaNs()) if diff := cmp.Diff(lhs, rhs, opts...); diff != "" { t.Fatalf("[]telegraf.Metric\n--- expected\n+++ actual\n%s", diff) } From 51a33acbb637661947c964da56f0a96a50bf54ff Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 19 Sep 2019 20:04:24 -0700 Subject: [PATCH 1194/1815] Update cpu docs to latest style (#6411) --- plugins/inputs/cpu/README.md | 132 +++++++++++++---------------------- 1 file changed, 50 insertions(+), 82 deletions(-) diff --git a/plugins/inputs/cpu/README.md b/plugins/inputs/cpu/README.md index dfb8561a2..bc86ae898 100644 --- a/plugins/inputs/cpu/README.md +++ b/plugins/inputs/cpu/README.md @@ -1,12 +1,9 @@ -# Telegraf plugin: CPU +# CPU Input Plugin -#### Plugin arguments: -- **totalcpu** boolean: If true, include `cpu-total` data -- **percpu** boolean: If true, include data on a per-cpu basis `cpu0, cpu1, etc.` +The `cpu` plugin gather metrics on the system CPUs. - -##### Configuration: -``` +#### Configuration +```toml [[inputs.cpu]] ## Whether to report per-cpu stats or not percpu = true @@ -18,82 +15,53 @@ report_active = false ``` -#### Description +### Metrics -The CPU plugin collects standard CPU metrics as defined in `man proc`. All -architectures do not support all of these metrics. +On Linux, consult `man proc` for details on the meanings of these values. + +- cpu + - tags: + - cpu (CPU ID or `cpu-total`) + - fields: + - time_user (float) + - time_system (float) + - time_idle (float) + - time_active (float) + - time_nice (float) + - time_iowait (float) + - time_irq (float) + - time_softirq (float) + - time_steal (float) + - time_guest (float) + - time_guest_nice (float) + - usage_user (float, percent) + - usage_system (float, percent) + - usage_idle (float, percent) + - usage_active (float) + - usage_nice (float, percent) + - usage_iowait (float, percent) + - usage_irq (float, percent) + - usage_softirq (float, percent) + - usage_steal (float, percent) + - usage_guest (float, percent) + - usage_guest_nice (float, percent) + +### Troubleshooting + +On Linux systems the `/proc/stat` file is used to gather CPU times. +Percentages are based on the last 2 samples. + +### Example Output ``` -cpu 3357 0 4313 1362393 - The amount of time, measured in units of USER_HZ (1/100ths of a second on - most architectures, use sysconf(_SC_CLK_TCK) to obtain the right value), - that the system spent in various states: - - user (1) Time spent in user mode. - - nice (2) Time spent in user mode with low priority (nice). - - system (3) Time spent in system mode. - - idle (4) Time spent in the idle task. This value should be USER_HZ times - the second entry in the /proc/uptime pseudo-file. - - iowait (since Linux 2.5.41) - (5) Time waiting for I/O to complete. - - irq (since Linux 2.6.0-test4) - (6) Time servicing interrupts. - - softirq (since Linux 2.6.0-test4) - (7) Time servicing softirqs. - - steal (since Linux 2.6.11) - (8) Stolen time, which is the time spent in other operating systems - when running in a virtualized environment - - guest (since Linux 2.6.24) - (9) Time spent running a virtual CPU for guest operating systems - under the control of the Linux kernel. - - guest_nice (since Linux 2.6.33) - (10) Time spent running a niced guest (virtual CPU for guest operating systems under the control of the Linux kernel). +cpu,cpu=cpu0,host=loaner time_active=202224.15999999992,time_guest=30250.35,time_guest_nice=0,time_idle=1527035.04,time_iowait=1352,time_irq=0,time_nice=169.28,time_softirq=6281.4,time_steal=0,time_system=40097.14,time_user=154324.34 1568760922000000000 +cpu,cpu=cpu0,host=loaner usage_active=31.249999981810106,usage_guest=2.083333333080696,usage_guest_nice=0,usage_idle=68.7500000181899,usage_iowait=0,usage_irq=0,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=4.166666666161392,usage_user=25.000000002273737 1568760922000000000 +cpu,cpu=cpu1,host=loaner time_active=201890.02000000002,time_guest=30508.41,time_guest_nice=0,time_idle=264641.18,time_iowait=210.44,time_irq=0,time_nice=181.75,time_softirq=4537.88,time_steal=0,time_system=39480.7,time_user=157479.25 1568760922000000000 +cpu,cpu=cpu1,host=loaner usage_active=12.500000010610771,usage_guest=2.0833333328280585,usage_guest_nice=0,usage_idle=87.49999998938922,usage_iowait=0,usage_irq=0,usage_nice=0,usage_softirq=2.0833333332070145,usage_steal=0,usage_system=4.166666665656117,usage_user=4.166666666414029 1568760922000000000 +cpu,cpu=cpu2,host=loaner time_active=201382.78999999998,time_guest=30325.8,time_guest_nice=0,time_idle=264686.63,time_iowait=202.77,time_irq=0,time_nice=162.81,time_softirq=3378.34,time_steal=0,time_system=39270.59,time_user=158368.28 1568760922000000000 +cpu,cpu=cpu2,host=loaner usage_active=15.999999993480742,usage_guest=1.9999999999126885,usage_guest_nice=0,usage_idle=84.00000000651926,usage_iowait=0,usage_irq=0,usage_nice=0,usage_softirq=2.0000000002764864,usage_steal=0,usage_system=3.999999999825377,usage_user=7.999999998923158 1568760922000000000 +cpu,cpu=cpu3,host=loaner time_active=198953.51000000007,time_guest=30344.43,time_guest_nice=0,time_idle=265504.09,time_iowait=187.64,time_irq=0,time_nice=197.47,time_softirq=2301.47,time_steal=0,time_system=39313.73,time_user=156953.2 1568760922000000000 +cpu,cpu=cpu3,host=loaner usage_active=10.41666667424579,usage_guest=0,usage_guest_nice=0,usage_idle=89.58333332575421,usage_iowait=0,usage_irq=0,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=4.166666666666667,usage_user=6.249999998484175 1568760922000000000 +cpu,cpu=cpu-total,host=loaner time_active=804450.5299999998,time_guest=121429,time_guest_nice=0,time_idle=2321866.96,time_iowait=1952.86,time_irq=0,time_nice=711.32,time_softirq=16499.1,time_steal=0,time_system=158162.17,time_user=627125.08 1568760922000000000 +cpu,cpu=cpu-total,host=loaner usage_active=17.616580305880305,usage_guest=1.036269430422946,usage_guest_nice=0,usage_idle=82.3834196941197,usage_iowait=0,usage_irq=0,usage_nice=0,usage_softirq=1.0362694300459534,usage_steal=0,usage_system=4.145077721691784,usage_user=11.398963731636465 1568760922000000000 ``` - -# Measurements: -### CPU Time measurements: - -Meta: -- units: CPU Time -- tags: `cpu= or ` - -Measurement names: -- cpu_time_user -- cpu_time_system -- cpu_time_idle -- cpu_time_active (must be explicitly enabled by setting `report_active = true`) -- cpu_time_nice -- cpu_time_iowait -- cpu_time_irq -- cpu_time_softirq -- cpu_time_steal -- cpu_time_guest -- cpu_time_guest_nice - -### CPU Usage Percent Measurements: - -Meta: -- units: percent (out of 100) -- tags: `cpu= or ` - -Measurement names: -- cpu_usage_user -- cpu_usage_system -- cpu_usage_idle -- cpu_usage_active (must be explicitly enabled by setting `report_active = true`) -- cpu_usage_nice -- cpu_usage_iowait -- cpu_usage_irq -- cpu_usage_softirq -- cpu_usage_steal -- cpu_usage_guest -- cpu_usage_guest_nice From f669ef445277016cafe94f0f4ceb362006b36c1a Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 19 Sep 2019 20:11:20 -0700 Subject: [PATCH 1195/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index d449f7dea..dba57206a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,6 +10,7 @@ - [#6361](https://github.com/influxdata/telegraf/pull/6361): Add ability to read query from file to postgresql_extensible input. - [#5921](https://github.com/influxdata/telegraf/pull/5921): Add replication metrics to the redis input. - [#6177](https://github.com/influxdata/telegraf/pull/6177): Support NX-OS telemetry extensions in cisco_telemetry_mdt. +- [#6415](https://github.com/influxdata/telegraf/pull/6415): Allow graphite parser to create Inf and NaN values. ## v1.12.2 [unreleased] From d2d6f1ab212bcfdd066c686306ea69c934335244 Mon Sep 17 00:00:00 2001 From: Sascha Steinbiss Date: Sat, 21 Sep 2019 00:35:21 +0200 Subject: [PATCH 1196/1815] Add Suricata input plugin (#3145) --- plugins/inputs/all/all.go | 1 + plugins/inputs/suricata/README.md | 129 +++++ plugins/inputs/suricata/suricata.go | 229 +++++++++ plugins/inputs/suricata/suricata_test.go | 472 +++++++++++++++++++ plugins/inputs/suricata/suricata_testutil.go | 38 ++ plugins/inputs/suricata/testdata/test1.json | 1 + 6 files changed, 870 insertions(+) create mode 100644 plugins/inputs/suricata/README.md create mode 100644 plugins/inputs/suricata/suricata.go create mode 100644 plugins/inputs/suricata/suricata_test.go create mode 100644 plugins/inputs/suricata/suricata_testutil.go create mode 100644 plugins/inputs/suricata/testdata/test1.json diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index f8ff6c879..693426642 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -140,6 +140,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/sqlserver" _ "github.com/influxdata/telegraf/plugins/inputs/stackdriver" _ "github.com/influxdata/telegraf/plugins/inputs/statsd" + _ "github.com/influxdata/telegraf/plugins/inputs/suricata" _ "github.com/influxdata/telegraf/plugins/inputs/swap" _ "github.com/influxdata/telegraf/plugins/inputs/syslog" _ "github.com/influxdata/telegraf/plugins/inputs/sysstat" diff --git a/plugins/inputs/suricata/README.md b/plugins/inputs/suricata/README.md new file mode 100644 index 000000000..5d00f4be4 --- /dev/null +++ b/plugins/inputs/suricata/README.md @@ -0,0 +1,129 @@ +# Suricata plugin for Telegraf + +This plugin reports internal performance counters of the Suricata IDS/IPS +engine, such as captured traffic volume, memory usage, uptime, flow counters, +and much more. It provides a socket for the Suricata log output to write JSON +stats output to, and processes the incoming data to fit Telegraf's format. + +### Configuration: + +```toml +[[input.suricata]] + ## Data sink for Suricata stats log. + # This is expected to be a filename of a + # unix socket to be created for listening. + source = "/var/run/suricata-stats.sock" + + # Delimiter for flattening field keys, e.g. subitem "alert" of "detect" + # becomes "detect_alert" when delimiter is "_". + delimiter = "_" +``` + +### Measurements & Fields: + +Fields in the 'suricata' measurement follow the JSON format used by Suricata's +stats output. +See http://suricata.readthedocs.io/en/latest/performance/statistics.html for +more information. + +All fields are numeric. +- suricata + - app_layer_flow_dcerpc_udp + - app_layer_flow_dns_tcp + - app_layer_flow_dns_udp + - app_layer_flow_enip_udp + - app_layer_flow_failed_tcp + - app_layer_flow_failed_udp + - app_layer_flow_http + - app_layer_flow_ssh + - app_layer_flow_tls + - app_layer_tx_dns_tcp + - app_layer_tx_dns_udp + - app_layer_tx_enip_udp + - app_layer_tx_http + - app_layer_tx_smtp + - capture_kernel_drops + - capture_kernel_packets + - decoder_avg_pkt_size + - decoder_bytes + - decoder_ethernet + - decoder_gre + - decoder_icmpv4 + - decoder_icmpv4_ipv4_unknown_ver + - decoder_icmpv6 + - decoder_invalid + - decoder_ipv4 + - decoder_ipv6 + - decoder_max_pkt_size + - decoder_pkts + - decoder_tcp + - decoder_tcp_hlen_too_small + - decoder_tcp_invalid_optlen + - decoder_teredo + - decoder_udp + - decoder_vlan + - detect_alert + - dns_memcap_global + - dns_memuse + - flow_memuse + - flow_mgr_closed_pruned + - flow_mgr_est_pruned + - flow_mgr_flows_checked + - flow_mgr_flows_notimeout + - flow_mgr_flows_removed + - flow_mgr_flows_timeout + - flow_mgr_flows_timeout_inuse + - flow_mgr_new_pruned + - flow_mgr_rows_checked + - flow_mgr_rows_empty + - flow_mgr_rows_maxlen + - flow_mgr_rows_skipped + - flow_spare + - flow_tcp_reuse + - http_memuse + - tcp_memuse + - tcp_pseudo + - tcp_reassembly_gap + - tcp_reassembly_memuse + - tcp_rst + - tcp_sessions + - tcp_syn + - tcp_synack + - ... + +### Tags: + +The `suricata` measurement has the following tags: + +- thread: `Global` for global statistics (if enabled), thread IDs (e.g. `W#03-enp0s31f6`) for thread-specific statistics + +## Suricata configuration + +Suricata needs to deliver the 'stats' event type to a given unix socket for +this plugin to pick up. This can be done, for example, by creating an additional +output in the Suricata configuration file: + +```yaml +- eve-log: + enabled: yes + filetype: unix_stream + filename: /tmp/suricata-stats.sock + types: + - stats: + threads: yes +``` + +## Example Output: + +```text +suricata,host=myhost,thread=FM#01 flow_mgr_rows_empty=0,flow_mgr_rows_checked=65536,flow_mgr_closed_pruned=0,flow_emerg_mode_over=0,flow_mgr_flows_timeout_inuse=0,flow_mgr_rows_skipped=65535,flow_mgr_bypassed_pruned=0,flow_mgr_flows_removed=0,flow_mgr_est_pruned=0,flow_mgr_flows_notimeout=1,flow_mgr_flows_checked=1,flow_mgr_rows_busy=0,flow_spare=10000,flow_mgr_rows_maxlen=1,flow_mgr_new_pruned=0,flow_emerg_mode_entered=0,flow_tcp_reuse=0,flow_mgr_flows_timeout=0 1568368562545197545 +suricata,host=myhost,thread=W#04-wlp4s0 decoder_ltnull_pkt_too_small=0,decoder_ipraw_invalid_ip_version=0,defrag_ipv4_reassembled=0,tcp_no_flow=0,app_layer_flow_tls=1,decoder_udp=25,defrag_ipv6_fragments=0,defrag_ipv4_fragments=0,decoder_tcp=59,decoder_vlan=0,decoder_pkts=84,decoder_vlan_qinq=0,decoder_avg_pkt_size=574,flow_memcap=0,defrag_max_frag_hits=0,tcp_ssn_memcap_drop=0,capture_kernel_packets=84,app_layer_flow_dcerpc_udp=0,app_layer_tx_dns_tcp=0,tcp_rst=0,decoder_icmpv4=0,app_layer_tx_tls=0,decoder_ipv4=84,decoder_erspan=0,decoder_ltnull_unsupported_type=0,decoder_invalid=0,app_layer_flow_ssh=0,capture_kernel_drops=0,app_layer_flow_ftp=0,app_layer_tx_http=0,tcp_pseudo_failed=0,defrag_ipv6_reassembled=0,defrag_ipv6_timeouts=0,tcp_pseudo=0,tcp_sessions=1,decoder_ethernet=84,decoder_raw=0,decoder_sctp=0,app_layer_flow_dns_udp=1,decoder_gre=0,app_layer_flow_http=0,app_layer_flow_imap=0,tcp_segment_memcap_drop=0,detect_alert=0,app_layer_flow_failed_tcp=0,decoder_teredo=0,decoder_mpls=0,decoder_ppp=0,decoder_max_pkt_size=1422,decoder_ipv6=0,tcp_reassembly_gap=0,app_layer_flow_dcerpc_tcp=0,decoder_ipv4_in_ipv6=0,tcp_stream_depth_reached=0,app_layer_flow_dns_tcp=0,app_layer_flow_smtp=0,tcp_syn=1,decoder_sll=0,tcp_invalid_checksum=0,app_layer_tx_dns_udp=1,decoder_bytes=48258,defrag_ipv4_timeouts=0,app_layer_flow_msn=0,decoder_pppoe=0,decoder_null=0,app_layer_flow_failed_udp=3,app_layer_tx_smtp=0,decoder_icmpv6=0,decoder_ipv6_in_ipv6=0,tcp_synack=1,app_layer_flow_smb=0,decoder_dce_pkt_too_small=0 1568368562545174807 +suricata,host=myhost,thread=W#01-wlp4s0 tcp_synack=0,app_layer_flow_imap=0,decoder_ipv4_in_ipv6=0,decoder_max_pkt_size=684,decoder_gre=0,defrag_ipv4_timeouts=0,tcp_invalid_checksum=0,decoder_ipv4=53,flow_memcap=0,app_layer_tx_http=0,app_layer_tx_smtp=0,decoder_null=0,tcp_no_flow=0,app_layer_tx_tls=0,app_layer_flow_ssh=0,app_layer_flow_smtp=0,decoder_pppoe=0,decoder_teredo=0,decoder_ipraw_invalid_ip_version=0,decoder_ltnull_pkt_too_small=0,tcp_rst=0,decoder_ppp=0,decoder_ipv6=29,app_layer_flow_dns_udp=3,decoder_vlan=0,app_layer_flow_dcerpc_tcp=0,tcp_syn=0,defrag_ipv4_fragments=0,defrag_ipv6_timeouts=0,decoder_raw=0,defrag_ipv6_reassembled=0,tcp_reassembly_gap=0,tcp_sessions=0,decoder_udp=44,tcp_segment_memcap_drop=0,app_layer_tx_dns_udp=3,app_layer_flow_tls=0,decoder_tcp=37,defrag_ipv4_reassembled=0,app_layer_flow_failed_udp=6,app_layer_flow_ftp=0,decoder_icmpv6=1,tcp_stream_depth_reached=0,capture_kernel_drops=0,decoder_sll=0,decoder_bytes=15883,decoder_ethernet=91,tcp_pseudo=0,app_layer_flow_http=0,decoder_sctp=0,decoder_pkts=91,decoder_avg_pkt_size=174,decoder_erspan=0,app_layer_flow_msn=0,app_layer_flow_smb=0,capture_kernel_packets=91,decoder_icmpv4=0,decoder_ipv6_in_ipv6=0,tcp_ssn_memcap_drop=0,decoder_vlan_qinq=0,decoder_ltnull_unsupported_type=0,decoder_invalid=0,defrag_max_frag_hits=0,tcp_pseudo_failed=0,detect_alert=0,app_layer_tx_dns_tcp=0,app_layer_flow_failed_tcp=0,app_layer_flow_dcerpc_udp=0,app_layer_flow_dns_tcp=0,defrag_ipv6_fragments=0,decoder_mpls=0,decoder_dce_pkt_too_small=0 1568368562545148438 +suricata,host=myhost flow_memuse=7094464,tcp_memuse=3276800,tcp_reassembly_memuse=12332832,dns_memuse=0,dns_memcap_state=0,dns_memcap_global=0,http_memuse=0,http_memcap=0 1568368562545144569 +suricata,host=myhost,thread=W#07-wlp4s0 app_layer_tx_http=0,app_layer_tx_dns_tcp=0,decoder_vlan=0,decoder_pppoe=0,decoder_sll=0,decoder_tcp=0,flow_memcap=0,app_layer_flow_msn=0,tcp_no_flow=0,tcp_rst=0,tcp_segment_memcap_drop=0,tcp_sessions=0,detect_alert=0,defrag_ipv6_reassembled=0,decoder_ipraw_invalid_ip_version=0,decoder_erspan=0,decoder_icmpv4=0,app_layer_tx_dns_udp=2,decoder_ltnull_pkt_too_small=0,decoder_bytes=1998,decoder_ipv6=1,defrag_ipv4_fragments=0,defrag_ipv6_fragments=0,app_layer_tx_smtp=0,decoder_ltnull_unsupported_type=0,decoder_max_pkt_size=342,app_layer_flow_ftp=0,decoder_ipv6_in_ipv6=0,defrag_ipv4_reassembled=0,defrag_ipv6_timeouts=0,app_layer_flow_dns_tcp=0,decoder_avg_pkt_size=181,defrag_ipv4_timeouts=0,tcp_stream_depth_reached=0,decoder_mpls=0,app_layer_flow_dns_udp=2,tcp_ssn_memcap_drop=0,app_layer_flow_dcerpc_tcp=0,app_layer_flow_failed_udp=2,app_layer_flow_smb=0,app_layer_flow_failed_tcp=0,decoder_invalid=0,decoder_null=0,decoder_gre=0,decoder_ethernet=11,app_layer_flow_ssh=0,defrag_max_frag_hits=0,capture_kernel_drops=0,tcp_pseudo_failed=0,app_layer_flow_smtp=0,decoder_udp=10,decoder_sctp=0,decoder_teredo=0,decoder_icmpv6=1,tcp_pseudo=0,tcp_synack=0,app_layer_tx_tls=0,app_layer_flow_imap=0,capture_kernel_packets=11,decoder_pkts=11,decoder_raw=0,decoder_ppp=0,tcp_syn=0,tcp_invalid_checksum=0,app_layer_flow_tls=0,decoder_ipv4_in_ipv6=0,app_layer_flow_http=0,decoder_dce_pkt_too_small=0,decoder_ipv4=10,decoder_vlan_qinq=0,tcp_reassembly_gap=0,app_layer_flow_dcerpc_udp=0 1568368562545110847 +suricata,host=myhost,thread=W#06-wlp4s0 app_layer_tx_smtp=0,decoder_ipv6_in_ipv6=0,decoder_dce_pkt_too_small=0,tcp_segment_memcap_drop=0,tcp_sessions=1,decoder_ppp=0,tcp_pseudo_failed=0,app_layer_tx_dns_tcp=0,decoder_invalid=0,defrag_ipv4_timeouts=0,app_layer_flow_smb=0,app_layer_flow_ssh=0,decoder_bytes=19407,decoder_null=0,app_layer_flow_tls=1,decoder_avg_pkt_size=473,decoder_pkts=41,decoder_pppoe=0,decoder_tcp=32,defrag_ipv4_reassembled=0,tcp_reassembly_gap=0,decoder_raw=0,flow_memcap=0,defrag_ipv6_timeouts=0,app_layer_flow_smtp=0,app_layer_tx_http=0,decoder_sll=0,decoder_udp=8,decoder_ltnull_pkt_too_small=0,decoder_ltnull_unsupported_type=0,decoder_ipv4_in_ipv6=0,decoder_vlan=0,decoder_max_pkt_size=1422,tcp_no_flow=0,app_layer_flow_failed_tcp=0,app_layer_flow_dns_tcp=0,app_layer_flow_ftp=0,decoder_icmpv4=0,defrag_max_frag_hits=0,tcp_rst=0,app_layer_flow_msn=0,app_layer_flow_failed_udp=2,app_layer_flow_dns_udp=0,app_layer_flow_dcerpc_udp=0,decoder_ipv4=39,decoder_ethernet=41,defrag_ipv6_reassembled=0,tcp_ssn_memcap_drop=0,app_layer_tx_tls=0,decoder_gre=0,decoder_vlan_qinq=0,tcp_pseudo=0,app_layer_flow_imap=0,app_layer_flow_dcerpc_tcp=0,defrag_ipv4_fragments=0,defrag_ipv6_fragments=0,tcp_synack=1,app_layer_flow_http=0,app_layer_tx_dns_udp=0,capture_kernel_packets=41,decoder_ipv6=2,tcp_invalid_checksum=0,tcp_stream_depth_reached=0,decoder_ipraw_invalid_ip_version=0,decoder_icmpv6=1,tcp_syn=1,detect_alert=0,capture_kernel_drops=0,decoder_teredo=0,decoder_erspan=0,decoder_sctp=0,decoder_mpls=0 1568368562545084670 +suricata,host=myhost,thread=W#02-wlp4s0 decoder_tcp=53,tcp_rst=3,tcp_reassembly_gap=0,defrag_ipv6_timeouts=0,tcp_ssn_memcap_drop=0,app_layer_flow_dcerpc_tcp=0,decoder_max_pkt_size=1422,decoder_ipv6_in_ipv6=0,tcp_no_flow=0,app_layer_flow_ftp=0,app_layer_flow_ssh=0,decoder_pkts=82,decoder_sctp=0,tcp_invalid_checksum=0,app_layer_flow_dns_tcp=0,decoder_ipraw_invalid_ip_version=0,decoder_bytes=26441,decoder_erspan=0,tcp_pseudo_failed=0,tcp_syn=1,app_layer_tx_http=0,app_layer_tx_smtp=0,decoder_teredo=0,decoder_ipv4=80,defrag_ipv4_fragments=0,tcp_stream_depth_reached=0,app_layer_flow_smb=0,capture_kernel_packets=82,decoder_null=0,decoder_ltnull_pkt_too_small=0,decoder_ppp=0,decoder_icmpv6=1,app_layer_flow_dns_udp=2,app_layer_flow_http=0,app_layer_tx_dns_udp=3,decoder_mpls=0,decoder_sll=0,defrag_ipv4_reassembled=0,tcp_segment_memcap_drop=0,app_layer_flow_imap=0,decoder_ltnull_unsupported_type=0,decoder_icmpv4=0,decoder_raw=0,defrag_ipv4_timeouts=0,app_layer_flow_failed_udp=8,decoder_gre=0,capture_kernel_drops=0,defrag_ipv6_reassembled=0,tcp_pseudo=0,app_layer_flow_tls=1,decoder_avg_pkt_size=322,decoder_dce_pkt_too_small=0,decoder_ethernet=82,defrag_ipv6_fragments=0,tcp_sessions=1,tcp_synack=1,app_layer_tx_dns_tcp=0,decoder_vlan=0,flow_memcap=0,decoder_vlan_qinq=0,decoder_udp=28,decoder_invalid=0,detect_alert=0,app_layer_flow_failed_tcp=0,app_layer_tx_tls=0,decoder_pppoe=0,decoder_ipv6=2,decoder_ipv4_in_ipv6=0,defrag_max_frag_hits=0,app_layer_flow_dcerpc_udp=0,app_layer_flow_smtp=0,app_layer_flow_msn=0 1568368562545061864 +suricata,host=myhost,thread=W#08-wlp4s0 decoder_dce_pkt_too_small=0,app_layer_tx_dns_tcp=0,decoder_pkts=58,decoder_ppp=0,decoder_raw=0,decoder_ipv4_in_ipv6=0,decoder_max_pkt_size=1392,tcp_invalid_checksum=0,tcp_syn=0,decoder_ipv4=51,decoder_ipv6_in_ipv6=0,decoder_tcp=0,decoder_ltnull_pkt_too_small=0,flow_memcap=0,decoder_udp=58,tcp_ssn_memcap_drop=0,tcp_pseudo=0,app_layer_flow_dcerpc_udp=0,app_layer_flow_dns_udp=5,app_layer_tx_http=0,capture_kernel_drops=0,decoder_vlan=0,tcp_segment_memcap_drop=0,app_layer_flow_ftp=0,app_layer_flow_imap=0,app_layer_flow_http=0,app_layer_flow_tls=0,decoder_icmpv4=0,decoder_sctp=0,defrag_ipv4_timeouts=0,tcp_reassembly_gap=0,detect_alert=0,decoder_ethernet=58,tcp_pseudo_failed=0,decoder_teredo=0,defrag_ipv4_reassembled=0,tcp_sessions=0,app_layer_flow_msn=0,decoder_ipraw_invalid_ip_version=0,tcp_no_flow=0,app_layer_flow_dns_tcp=0,decoder_null=0,defrag_ipv4_fragments=0,app_layer_flow_dcerpc_tcp=0,app_layer_flow_failed_udp=8,app_layer_tx_tls=0,decoder_bytes=15800,decoder_ipv6=7,tcp_stream_depth_reached=0,decoder_invalid=0,decoder_ltnull_unsupported_type=0,app_layer_tx_dns_udp=6,decoder_pppoe=0,decoder_avg_pkt_size=272,decoder_erspan=0,defrag_ipv6_timeouts=0,app_layer_flow_failed_tcp=0,decoder_gre=0,decoder_sll=0,defrag_max_frag_hits=0,app_layer_flow_ssh=0,capture_kernel_packets=58,decoder_mpls=0,decoder_vlan_qinq=0,tcp_rst=0,app_layer_flow_smb=0,app_layer_tx_smtp=0,decoder_icmpv6=0,defrag_ipv6_fragments=0,defrag_ipv6_reassembled=0,tcp_synack=0,app_layer_flow_smtp=0 1568368562545035575 +suricata,host=myhost,thread=W#05-wlp4s0 tcp_reassembly_gap=0,capture_kernel_drops=0,decoder_ltnull_unsupported_type=0,tcp_sessions=0,tcp_stream_depth_reached=0,tcp_pseudo_failed=0,app_layer_flow_failed_tcp=0,app_layer_tx_dns_tcp=0,decoder_null=0,decoder_dce_pkt_too_small=0,decoder_udp=7,tcp_rst=3,app_layer_flow_dns_tcp=0,decoder_invalid=0,defrag_ipv4_reassembled=0,tcp_synack=0,app_layer_flow_ftp=0,decoder_bytes=3117,decoder_pppoe=0,app_layer_flow_dcerpc_tcp=0,app_layer_flow_smb=0,decoder_ipv6_in_ipv6=0,decoder_ipraw_invalid_ip_version=0,app_layer_flow_imap=0,app_layer_tx_dns_udp=2,decoder_ppp=0,decoder_ipv4=21,decoder_tcp=14,flow_memcap=0,tcp_syn=0,tcp_invalid_checksum=0,decoder_teredo=0,decoder_ltnull_pkt_too_small=0,defrag_max_frag_hits=0,app_layer_tx_tls=0,decoder_pkts=24,decoder_sll=0,defrag_ipv6_fragments=0,app_layer_flow_dcerpc_udp=0,app_layer_flow_smtp=0,decoder_icmpv6=3,defrag_ipv6_timeouts=0,decoder_ipv6=3,decoder_raw=0,defrag_ipv6_reassembled=0,tcp_no_flow=0,detect_alert=0,app_layer_flow_tls=0,decoder_ethernet=24,decoder_vlan=0,decoder_icmpv4=0,decoder_ipv4_in_ipv6=0,app_layer_flow_failed_udp=1,decoder_mpls=0,decoder_max_pkt_size=653,decoder_sctp=0,defrag_ipv4_timeouts=0,tcp_ssn_memcap_drop=0,app_layer_flow_dns_udp=1,app_layer_tx_smtp=0,capture_kernel_packets=24,decoder_vlan_qinq=0,decoder_gre=0,app_layer_flow_ssh=0,app_layer_flow_msn=0,defrag_ipv4_fragments=0,app_layer_flow_http=0,tcp_segment_memcap_drop=0,tcp_pseudo=0,app_layer_tx_http=0,decoder_erspan=0,decoder_avg_pkt_size=129 1568368562545009684 +suricata,host=myhost,thread=W#03-wlp4s0 app_layer_flow_failed_tcp=0,decoder_teredo=0,decoder_ipv6_in_ipv6=0,tcp_pseudo_failed=0,tcp_stream_depth_reached=0,tcp_syn=0,decoder_gre=0,tcp_segment_memcap_drop=0,tcp_ssn_memcap_drop=0,app_layer_tx_smtp=0,decoder_raw=0,decoder_ltnull_pkt_too_small=0,tcp_sessions=0,tcp_reassembly_gap=0,app_layer_flow_ssh=0,app_layer_flow_imap=0,decoder_ipv4=463,decoder_ethernet=463,capture_kernel_packets=463,decoder_pppoe=0,defrag_ipv4_reassembled=0,app_layer_flow_tls=0,app_layer_flow_dcerpc_udp=0,app_layer_flow_dns_udp=0,decoder_vlan=0,decoder_ipraw_invalid_ip_version=0,decoder_mpls=0,tcp_no_flow=0,decoder_avg_pkt_size=445,decoder_udp=432,flow_memcap=0,app_layer_tx_dns_udp=0,app_layer_flow_msn=0,app_layer_flow_http=0,app_layer_flow_dcerpc_tcp=0,decoder_ipv6=0,decoder_ipv4_in_ipv6=0,defrag_ipv4_timeouts=0,defrag_ipv4_fragments=0,defrag_ipv6_timeouts=0,decoder_sctp=0,defrag_ipv6_fragments=0,app_layer_flow_dns_tcp=0,app_layer_tx_tls=0,defrag_max_frag_hits=0,decoder_bytes=206345,decoder_vlan_qinq=0,decoder_invalid=0,decoder_ppp=0,tcp_rst=0,detect_alert=0,capture_kernel_drops=0,app_layer_flow_failed_udp=4,decoder_null=0,decoder_icmpv4=0,decoder_icmpv6=0,decoder_ltnull_unsupported_type=0,defrag_ipv6_reassembled=0,tcp_invalid_checksum=0,tcp_synack=0,decoder_tcp=31,tcp_pseudo=0,app_layer_flow_smb=0,app_layer_flow_smtp=0,decoder_max_pkt_size=1463,decoder_dce_pkt_too_small=0,app_layer_tx_http=0,decoder_pkts=463,decoder_sll=0,app_layer_flow_ftp=0,app_layer_tx_dns_tcp=0,decoder_erspan=0 1568368562544966078 +``` diff --git a/plugins/inputs/suricata/suricata.go b/plugins/inputs/suricata/suricata.go new file mode 100644 index 000000000..17c0b5715 --- /dev/null +++ b/plugins/inputs/suricata/suricata.go @@ -0,0 +1,229 @@ +package suricata + +import ( + "bufio" + "context" + "encoding/json" + "fmt" + "io" + "net" + "strings" + "sync" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" +) + +const ( + // InBufSize is the input buffer size for JSON received via socket. + // Set to 10MB, as depending on the number of threads the output might be + // large. + InBufSize = 10 * 1024 * 1024 +) + +// Suricata is a Telegraf input plugin for Suricata runtime statistics. +type Suricata struct { + Source string `toml:"source"` + Delimiter string `toml:"delimiter"` + + inputListener *net.UnixListener + cancel context.CancelFunc + + Log telegraf.Logger `toml:"-"` + + wg sync.WaitGroup +} + +// Description returns the plugin description. +func (s *Suricata) Description() string { + return "Suricata stats plugin" +} + +const sampleConfig = ` + ## Data sink for Suricata stats log + # This is expected to be a filename of a + # unix socket to be created for listening. + source = "/var/run/suricata-stats.sock" + + # Delimiter for flattening field keys, e.g. subitem "alert" of "detect" + # becomes "detect_alert" when delimiter is "_". + delimiter = "_" +` + +// SampleConfig returns a sample TOML section to illustrate configuration +// options. +func (s *Suricata) SampleConfig() string { + return sampleConfig +} + +// Start initiates background collection of JSON data from the socket +// provided to Suricata. +func (s *Suricata) Start(acc telegraf.Accumulator) error { + var err error + s.inputListener, err = net.ListenUnix("unix", &net.UnixAddr{ + Name: s.Source, + Net: "unix", + }) + if err != nil { + return err + } + ctx, cancel := context.WithCancel(context.Background()) + s.cancel = cancel + s.inputListener.SetUnlinkOnClose(true) + s.wg.Add(1) + go func() { + defer s.wg.Done() + go s.handleServerConnection(ctx, acc) + }() + return nil +} + +// Stop causes the plugin to cease collecting JSON data from the socket provided +// to Suricata. +func (s *Suricata) Stop() { + s.inputListener.Close() + if s.cancel != nil { + s.cancel() + } + s.wg.Wait() +} + +func (s *Suricata) readInput(ctx context.Context, acc telegraf.Accumulator, conn net.Conn) error { + reader := bufio.NewReaderSize(conn, InBufSize) + for { + select { + case <-ctx.Done(): + return nil + default: + line, rerr := reader.ReadBytes('\n') + if rerr != nil { + return rerr + } else if len(line) > 0 { + s.parse(acc, line) + } + } + } +} + +func (s *Suricata) handleServerConnection(ctx context.Context, acc telegraf.Accumulator) { + var err error + for { + select { + case <-ctx.Done(): + return + default: + var conn net.Conn + conn, err = s.inputListener.Accept() + if err != nil { + if !strings.HasSuffix(err.Error(), ": use of closed network connection") { + acc.AddError(err) + } + continue + } + err = s.readInput(ctx, acc, conn) + // we want to handle EOF as an opportunity to wait for a new + // connection -- this could, for example, happen when Suricata is + // restarted while Telegraf is running. + if err != io.EOF { + acc.AddError(err) + return + } + } + } +} + +func flexFlatten(outmap map[string]interface{}, field string, v interface{}, delimiter string) error { + switch t := v.(type) { + case map[string]interface{}: + for k, v := range t { + var err error + if field == "" { + err = flexFlatten(outmap, k, v, delimiter) + } else { + err = flexFlatten(outmap, fmt.Sprintf("%s%s%s", field, delimiter, k), v, delimiter) + } + if err != nil { + return err + } + } + case float64: + outmap[field] = v.(float64) + default: + return fmt.Errorf("Unsupported type %T encountered", t) + } + return nil +} + +func (s *Suricata) parse(acc telegraf.Accumulator, sjson []byte) { + // initial parsing + var result map[string]interface{} + err := json.Unmarshal([]byte(sjson), &result) + if err != nil { + acc.AddError(err) + return + } + + // check for presence of relevant stats + if _, ok := result["stats"]; !ok { + s.Log.Debug("Input does not contain necessary 'stats' sub-object") + return + } + + if _, ok := result["stats"].(map[string]interface{}); !ok { + s.Log.Debug("The 'stats' sub-object does not have required structure") + return + } + + fields := make(map[string](map[string]interface{})) + totalmap := make(map[string]interface{}) + for k, v := range result["stats"].(map[string]interface{}) { + if k == "threads" { + if v, ok := v.(map[string]interface{}); ok { + for k, t := range v { + outmap := make(map[string]interface{}) + if threadStruct, ok := t.(map[string]interface{}); ok { + err = flexFlatten(outmap, "", threadStruct, s.Delimiter) + if err != nil { + s.Log.Debug(err) + // we skip this thread as something did not parse correctly + continue + } + fields[k] = outmap + } + } + } else { + s.Log.Debug("The 'threads' sub-object does not have required structure") + } + } else { + err = flexFlatten(totalmap, k, v, s.Delimiter) + if err != nil { + s.Log.Debug(err.Error()) + // we skip this subitem as something did not parse correctly + } + } + } + fields["total"] = totalmap + + for k := range fields { + if k == "Global" { + acc.AddFields("suricata", fields[k], nil) + } else { + acc.AddFields("suricata", fields[k], map[string]string{"thread": k}) + } + } +} + +// Gather measures and submits one full set of telemetry to Telegraf. +// Not used here, submission is completely input-driven. +func (s *Suricata) Gather(acc telegraf.Accumulator) error { + return nil +} + +func init() { + inputs.Add("suricata", func() telegraf.Input { + return &Suricata{ + Source: "/var/run/suricata-stats.sock", + Delimiter: "_", + } + }) +} diff --git a/plugins/inputs/suricata/suricata_test.go b/plugins/inputs/suricata/suricata_test.go new file mode 100644 index 000000000..093efd347 --- /dev/null +++ b/plugins/inputs/suricata/suricata_test.go @@ -0,0 +1,472 @@ +package suricata + +import ( + "bytes" + "fmt" + "io/ioutil" + "log" + "math/rand" + "net" + "os" + "path/filepath" + "regexp" + "strings" + "testing" + "time" + + "github.com/influxdata/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/assert" +) + +var ex2 = `{"timestamp":"2017-03-06T07:43:39.000397+0000","event_type":"stats","stats":{"capture":{"kernel_packets":905344474,"kernel_drops":78355440,"kernel_packets_delta":2376742,"kernel_drops_delta":82049}}}` +var ex3 = `{"timestamp":"2017-03-06T07:43:39.000397+0000","event_type":"stats","stats":{"threads": { "foo": { "capture":{"kernel_packets":905344474,"kernel_drops":78355440}}}}}` +var ex4 = `{"timestamp":"2017-03-06T07:43:39.000397+0000","event_type":"stats","stats":{"threads": { "W1#en..bar1": { "capture":{"kernel_packets":905344474,"kernel_drops":78355440}}}}}` +var brokenType1 = `{"timestamp":"2017-03-06T07:43:39.000397+0000","event_type":"stats","stats":{"threads": { "W1#en..bar1": { "capture":{"kernel_packets":905344474,"kernel_drops": true}}}}}` +var brokenType2 = `{"timestamp":"2017-03-06T07:43:39.000397+0000","event_type":"stats","stats":{"threads": { "W1#en..bar1": { "capture":{"kernel_packets":905344474,"kernel_drops": ["foo"]}}}}}` +var brokenType3 = `{"timestamp":"2017-03-06T07:43:39.000397+0000","event_type":"stats","stats":{"threads": { "W1#en..bar1": { "capture":{"kernel_packets":905344474,"kernel_drops":"none this time"}}}}}` +var brokenType4 = `{"timestamp":"2017-03-06T07:43:39.000397+0000","event_type":"stats","stats":{"threads": { "W1#en..bar1": { "capture":{"kernel_packets":905344474,"kernel_drops":null}}}}}` +var brokenType5 = `{"timestamp":"2017-03-06T07:43:39.000397+0000","event_type":"stats","stats":{"foo": null}}` +var brokenStruct1 = `{"timestamp":"2017-03-06T07:43:39.000397+0000","event_type":"stats","stats":{"threads": ["foo"]}}` +var brokenStruct2 = `{"timestamp":"2017-03-06T07:43:39.000397+0000","event_type":"stats"}` +var brokenStruct3 = `{"timestamp":"2017-03-06T07:43:39.000397+0000","event_type":"stats","stats": "foobar"}` +var brokenStruct4 = `{"timestamp":"2017-03-06T07:43:39.000397+0000","event_type":"stats","stats": null}` +var singleDotRegexp = regexp.MustCompilePOSIX(`[^.]\.[^.]`) + +func TestSuricataLarge(t *testing.T) { + dir, err := ioutil.TempDir("", "test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dir) + tmpfn := filepath.Join(dir, fmt.Sprintf("t%d", rand.Int63())) + + s := Suricata{ + Source: tmpfn, + Delimiter: ".", + Log: testutil.Logger{ + Name: "inputs.suricata", + }, + } + acc := testutil.Accumulator{} + acc.SetDebug(true) + assert.NoError(t, s.Start(&acc)) + + data, err := ioutil.ReadFile("testdata/test1.json") + if err != nil { + t.Fatal(err) + } + + c, err := net.Dial("unix", tmpfn) + if err != nil { + t.Fatal(err) + } + c.Write([]byte(data)) + c.Write([]byte("\n")) + c.Close() + + acc.Wait(1) + + s.Stop() +} + +func TestSuricata(t *testing.T) { + dir, err := ioutil.TempDir("", "test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dir) + tmpfn := filepath.Join(dir, fmt.Sprintf("t%d", rand.Int63())) + + s := Suricata{ + Source: tmpfn, + Delimiter: ".", + Log: testutil.Logger{ + Name: "inputs.suricata", + }, + } + acc := testutil.Accumulator{} + acc.SetDebug(true) + assert.NoError(t, s.Start(&acc)) + + c, err := net.Dial("unix", tmpfn) + if err != nil { + t.Fatalf("failed: %s", err.Error()) + } + c.Write([]byte(ex2)) + c.Write([]byte("\n")) + c.Close() + + acc.Wait(1) + + s.Stop() + s = Suricata{ + Source: tmpfn, + Delimiter: ".", + Log: testutil.Logger{ + Name: "inputs.suricata", + }, + } + + acc.AssertContainsTaggedFields(t, "suricata", + map[string]interface{}{ + "capture.kernel_packets": float64(905344474), + "capture.kernel_drops": float64(78355440), + "capture.kernel_packets_delta": float64(2376742), + "capture.kernel_drops_delta": float64(82049), + }, + map[string]string{"thread": "total"}) + + acc = testutil.Accumulator{} + acc.SetDebug(true) + assert.NoError(t, s.Start(&acc)) + + c, err = net.Dial("unix", tmpfn) + if err != nil { + log.Println(err) + } + c.Write([]byte("")) + c.Write([]byte("\n")) + c.Write([]byte("foobard}\n")) + c.Write([]byte(ex3)) + c.Write([]byte("\n")) + c.Close() + acc.Wait(1) + + s.Stop() + + acc.AssertContainsTaggedFields(t, "suricata", + map[string]interface{}{ + "capture.kernel_packets": float64(905344474), + "capture.kernel_drops": float64(78355440), + }, + map[string]string{"thread": "foo"}) +} + +func TestSuricataInvalid(t *testing.T) { + dir, err := ioutil.TempDir("", "test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dir) + tmpfn := filepath.Join(dir, fmt.Sprintf("t%d", rand.Int63())) + + s := Suricata{ + Source: tmpfn, + Log: testutil.Logger{ + Name: "inputs.suricata", + }, + } + acc := testutil.Accumulator{} + acc.SetDebug(true) + + assert.NoError(t, s.Start(&acc)) + + c, err := net.Dial("unix", tmpfn) + if err != nil { + log.Println(err) + } + c.Write([]byte("sfjiowef")) + c.Write([]byte("\n")) + c.Close() + + acc.WaitError(1) + s.Stop() +} + +func splitAtSingleDot(in string) []string { + res := singleDotRegexp.FindAllStringIndex(in, -1) + if res == nil { + return []string{in} + } + ret := make([]string, 0) + startpos := 0 + for _, v := range res { + ret = append(ret, in[startpos:v[0]+1]) + startpos = v[1] - 1 + } + return append(ret, in[startpos:]) +} + +func TestSuricataSplitDots(t *testing.T) { + dir, err := ioutil.TempDir("", "test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dir) + tmpfn := filepath.Join(dir, fmt.Sprintf("t%d", rand.Int63())) + + out := splitAtSingleDot("foo") + if len(out) != 1 { + t.Fatalf("splitting 'foo' should yield one result") + } + if out[0] != "foo" { + t.Fatalf("splitting 'foo' should yield one result, 'foo'") + } + + s := Suricata{ + Source: tmpfn, + Delimiter: ".", + Log: testutil.Logger{ + Name: "inputs.suricata", + }, + } + acc := testutil.Accumulator{} + acc.SetDebug(true) + + assert.NoError(t, s.Start(&acc)) + + c, err := net.Dial("unix", tmpfn) + if err != nil { + log.Println(err) + } + c.Write([]byte(ex4)) + c.Write([]byte("\n")) + c.Close() + + acc.Wait(1) + acc.AssertContainsTaggedFields(t, "suricata", + map[string]interface{}{ + "capture.kernel_packets": float64(905344474), + "capture.kernel_drops": float64(78355440), + }, + map[string]string{"thread": "W1#en..bar1"}) + + s.Stop() +} + +func TestSuricataInvalidPath(t *testing.T) { + tmpfn := fmt.Sprintf("/t%d/X", rand.Int63()) + s := Suricata{ + Source: tmpfn, + Log: testutil.Logger{ + Name: "inputs.suricata", + }, + } + + acc := testutil.Accumulator{} + acc.SetDebug(true) + + assert.Error(t, s.Start(&acc)) +} + +func TestSuricataTooLongLine(t *testing.T) { + dir, err := ioutil.TempDir("", "test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dir) + tmpfn := filepath.Join(dir, fmt.Sprintf("t%d", rand.Int63())) + + s := Suricata{ + Source: tmpfn, + Log: testutil.Logger{ + Name: "inputs.suricata", + }, + } + acc := testutil.Accumulator{} + acc.SetDebug(true) + + assert.NoError(t, s.Start(&acc)) + + c, err := net.Dial("unix", tmpfn) + if err != nil { + log.Println(err) + } + c.Write([]byte(strings.Repeat("X", 20000000))) + c.Write([]byte("\n")) + c.Close() + + acc.WaitError(1) + + s.Stop() +} + +func TestSuricataEmptyJSON(t *testing.T) { + dir, err := ioutil.TempDir("", "test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dir) + tmpfn := filepath.Join(dir, fmt.Sprintf("t%d", rand.Int63())) + + s := Suricata{ + Source: tmpfn, + Log: testutil.Logger{ + Name: "inputs.suricata", + }, + } + acc := testutil.Accumulator{} + acc.SetDebug(true) + + assert.NoError(t, s.Start(&acc)) + + c, err := net.Dial("unix", tmpfn) + if err != nil { + log.Println(err) + } + c.Write([]byte("\n")) + c.Close() + + acc.WaitError(1) + + s.Stop() +} + +func TestSuricataInvalidInputs(t *testing.T) { + dir, err := ioutil.TempDir("", "test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dir) + defer func() { + log.SetOutput(os.Stderr) + }() + tmpfn := filepath.Join(dir, fmt.Sprintf("t%d", rand.Int63())) + + for input, errmsg := range map[string]string{ + brokenType1: `Unsupported type bool encountered`, + brokenType2: `Unsupported type []interface {} encountered`, + brokenType3: `Unsupported type string encountered`, + brokenType4: `Unsupported type encountered`, + brokenType5: `Unsupported type encountered`, + brokenStruct1: `The 'threads' sub-object does not have required structure`, + brokenStruct2: `Input does not contain necessary 'stats' sub-object`, + brokenStruct3: `The 'stats' sub-object does not have required structure`, + brokenStruct4: `The 'stats' sub-object does not have required structure`, + } { + var logBuf buffer + logBuf.Reset() + log.SetOutput(&logBuf) + + acc := testutil.Accumulator{} + acc.SetDebug(true) + + s := Suricata{ + Source: tmpfn, + Delimiter: ".", + Log: testutil.Logger{ + Name: "inputs.suricata", + }, + } + assert.NoError(t, s.Start(&acc)) + + c, err := net.Dial("unix", tmpfn) + if err != nil { + t.Fatal(err) + } + c.Write([]byte(input)) + c.Write([]byte("\n")) + c.Close() + + for { + if bytes.Count(logBuf.Bytes(), []byte{'\n'}) > 0 { + break + } + time.Sleep(50 * time.Millisecond) + } + + assert.Contains(t, logBuf.String(), errmsg) + s.Stop() + } +} + +func TestSuricataDisconnectSocket(t *testing.T) { + dir, err := ioutil.TempDir("", "test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dir) + tmpfn := filepath.Join(dir, fmt.Sprintf("t%d", rand.Int63())) + + s := Suricata{ + Source: tmpfn, + Log: testutil.Logger{ + Name: "inputs.suricata", + }, + } + acc := testutil.Accumulator{} + acc.SetDebug(true) + + assert.NoError(t, s.Start(&acc)) + + c, err := net.Dial("unix", tmpfn) + if err != nil { + log.Println(err) + } + c.Write([]byte(ex2)) + c.Write([]byte("\n")) + c.Close() + + c, err = net.Dial("unix", tmpfn) + if err != nil { + log.Println(err) + } + c.Write([]byte(ex3)) + c.Write([]byte("\n")) + c.Close() + + acc.Wait(2) + + s.Stop() +} + +func TestSuricataPluginDesc(t *testing.T) { + v, ok := inputs.Inputs["suricata"] + if !ok { + t.Fatal("suricata plugin not registered") + } + desc := v().Description() + if desc != "Suricata stats plugin" { + t.Fatal("invalid description ", desc) + } +} + +func TestSuricataStartStop(t *testing.T) { + dir, err := ioutil.TempDir("", "test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dir) + tmpfn := filepath.Join(dir, fmt.Sprintf("t%d", rand.Int63())) + + s := Suricata{ + Source: tmpfn, + Log: testutil.Logger{ + Name: "inputs.suricata", + }, + } + acc := testutil.Accumulator{} + acc.SetDebug(true) + assert.NoError(t, s.Start(&acc)) + s.Stop() +} + +func TestSuricataGather(t *testing.T) { + dir, err := ioutil.TempDir("", "test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dir) + tmpfn := filepath.Join(dir, fmt.Sprintf("t%d", rand.Int63())) + + s := Suricata{ + Source: tmpfn, + Log: testutil.Logger{ + Name: "inputs.suricata", + }, + } + acc := testutil.Accumulator{} + acc.SetDebug(true) + assert.NoError(t, s.Gather(&acc)) +} + +func TestSuricataSampleConfig(t *testing.T) { + v, ok := inputs.Inputs["suricata"] + if !ok { + t.Fatal("suricata plugin not registered") + } + if v().SampleConfig() != sampleConfig { + t.Fatal("wrong sampleconfig") + } +} diff --git a/plugins/inputs/suricata/suricata_testutil.go b/plugins/inputs/suricata/suricata_testutil.go new file mode 100644 index 000000000..55aa2bb9b --- /dev/null +++ b/plugins/inputs/suricata/suricata_testutil.go @@ -0,0 +1,38 @@ +package suricata + +import ( + "bytes" + "sync" +) + +// A thread-safe Buffer wrapper to enable concurrent access to log output. +type buffer struct { + b bytes.Buffer + m sync.Mutex +} + +func (b *buffer) Read(p []byte) (n int, err error) { + b.m.Lock() + defer b.m.Unlock() + return b.b.Read(p) +} +func (b *buffer) Write(p []byte) (n int, err error) { + b.m.Lock() + defer b.m.Unlock() + return b.b.Write(p) +} +func (b *buffer) String() string { + b.m.Lock() + defer b.m.Unlock() + return b.b.String() +} +func (b *buffer) Reset() { + b.m.Lock() + defer b.m.Unlock() + b.b.Reset() +} +func (b *buffer) Bytes() []byte { + b.m.Lock() + defer b.m.Unlock() + return b.b.Bytes() +} diff --git a/plugins/inputs/suricata/testdata/test1.json b/plugins/inputs/suricata/testdata/test1.json new file mode 100644 index 000000000..31208c4d1 --- /dev/null +++ b/plugins/inputs/suricata/testdata/test1.json @@ -0,0 +1 @@ +{ "timestamp": "2019-08-08T16:26:33.000244+0200", "event_type": "stats", "stats": { "uptime": 15, "capture": { "kernel_packets": 135, "kernel_packets_delta": 74, "kernel_drops": 0, "kernel_drops_delta": 0 }, "decoder": { "pkts": 141, "pkts_delta": 63, "bytes": 26018, "bytes_delta": 13415, "invalid": 0, "invalid_delta": 0, "ipv4": 132, "ipv4_delta": 58, "ipv6": 4, "ipv6_delta": 2, "ethernet": 141, "ethernet_delta": 63, "raw": 0, "raw_delta": 0, "null": 0, "null_delta": 0, "sll": 0, "sll_delta": 0, "tcp": 79, "tcp_delta": 35, "udp": 53, "udp_delta": 23, "sctp": 0, "sctp_delta": 0, "icmpv4": 0, "icmpv4_delta": 0, "icmpv6": 4, "icmpv6_delta": 2, "ppp": 0, "ppp_delta": 0, "pppoe": 0, "pppoe_delta": 0, "gre": 0, "gre_delta": 0, "vlan": 0, "vlan_delta": 0, "vlan_qinq": 0, "vlan_qinq_delta": 0, "teredo": 0, "teredo_delta": 0, "ipv4_in_ipv6": 0, "ipv4_in_ipv6_delta": 0, "ipv6_in_ipv6": 0, "ipv6_in_ipv6_delta": 0, "mpls": 0, "mpls_delta": 0, "avg_pkt_size": 184, "avg_pkt_size_delta": 23, "max_pkt_size": 1422, "max_pkt_size_delta": 0, "erspan": 0, "erspan_delta": 0, "ipraw": { "invalid_ip_version": 0, "invalid_ip_version_delta": 0 }, "ltnull": { "pkt_too_small": 0, "pkt_too_small_delta": 0, "unsupported_type": 0, "unsupported_type_delta": 0 }, "dce": { "pkt_too_small": 0, "pkt_too_small_delta": 0 } }, "flow": { "memcap": 0, "memcap_delta": 0, "spare": 10000, "spare_delta": 0, "emerg_mode_entered": 0, "emerg_mode_entered_delta": 0, "emerg_mode_over": 0, "emerg_mode_over_delta": 0, "tcp_reuse": 0, "tcp_reuse_delta": 0, "memuse": 7083520, "memuse_delta": 4608 }, "defrag": { "ipv4": { "fragments": 0, "fragments_delta": 0, "reassembled": 0, "reassembled_delta": 0, "timeouts": 0, "timeouts_delta": 0 }, "ipv6": { "fragments": 0, "fragments_delta": 0, "reassembled": 0, "reassembled_delta": 0, "timeouts": 0, "timeouts_delta": 0 }, "max_frag_hits": 0, "max_frag_hits_delta": 0 }, "tcp": { "sessions": 1, "sessions_delta": 1, "ssn_memcap_drop": 0, "ssn_memcap_drop_delta": 0, "pseudo": 0, "pseudo_delta": 0, "pseudo_failed": 0, "pseudo_failed_delta": 0, "invalid_checksum": 0, "invalid_checksum_delta": 0, "no_flow": 0, "no_flow_delta": 0, "syn": 1, "syn_delta": 1, "synack": 1, "synack_delta": 1, "rst": 0, "rst_delta": 0, "segment_memcap_drop": 0, "segment_memcap_drop_delta": 0, "stream_depth_reached": 0, "stream_depth_reached_delta": 0, "reassembly_gap": 0, "reassembly_gap_delta": 0, "memuse": 3276800, "memuse_delta": 0, "reassembly_memuse": 12332832, "reassembly_memuse_delta": 0 }, "detect": { "alert": 2, "alert_delta": 0 }, "app_layer": { "flow": { "http": 0, "http_delta": 0, "ftp": 0, "ftp_delta": 0, "smtp": 0, "smtp_delta": 0, "tls": 1, "tls_delta": 1, "ssh": 0, "ssh_delta": 0, "imap": 0, "imap_delta": 0, "msn": 0, "msn_delta": 0, "smb": 0, "smb_delta": 0, "dcerpc_tcp": 0, "dcerpc_tcp_delta": 0, "dns_tcp": 0, "dns_tcp_delta": 0, "failed_tcp": 0, "failed_tcp_delta": 0, "dcerpc_udp": 0, "dcerpc_udp_delta": 0, "dns_udp": 5, "dns_udp_delta": 2, "failed_udp": 12, "failed_udp_delta": 6 }, "tx": { "http": 0, "http_delta": 0, "smtp": 0, "smtp_delta": 0, "tls": 0, "tls_delta": 0, "dns_tcp": 0, "dns_tcp_delta": 0, "dns_udp": 12, "dns_udp_delta": 2 } }, "flow_mgr": { "closed_pruned": 0, "closed_pruned_delta": 0, "new_pruned": 0, "new_pruned_delta": 0, "est_pruned": 0, "est_pruned_delta": 0, "bypassed_pruned": 0, "bypassed_pruned_delta": 0, "flows_checked": 1, "flows_checked_delta": 1, "flows_notimeout": 1, "flows_notimeout_delta": 1, "flows_timeout": 0, "flows_timeout_delta": 0, "flows_timeout_inuse": 0, "flows_timeout_inuse_delta": 0, "flows_removed": 0, "flows_removed_delta": 0, "rows_checked": 65536, "rows_checked_delta": 0, "rows_skipped": 65535, "rows_skipped_delta": -1, "rows_empty": 0, "rows_empty_delta": 0, "rows_busy": 0, "rows_busy_delta": 0, "rows_maxlen": 1, "rows_maxlen_delta": 1 }, "dns": { "memuse": 1402, "memuse_delta": 595, "memcap_state": 0, "memcap_state_delta": 0, "memcap_global": 0, "memcap_global_delta": 0 }, "http": { "memuse": 0, "memuse_delta": 0, "memcap": 0, "memcap_delta": 0 }, "threads": { "W#01-wlp4s0": { "capture": { "kernel_packets": 25, "kernel_packets_delta": 22, "kernel_drops": 0, "kernel_drops_delta": 0 }, "decoder": { "pkts": 25, "pkts_delta": 22, "bytes": 7026, "bytes_delta": 6828, "invalid": 0, "invalid_delta": 0, "ipv4": 19, "ipv4_delta": 19, "ipv6": 1, "ipv6_delta": 0, "ethernet": 25, "ethernet_delta": 22, "raw": 0, "raw_delta": 0, "null": 0, "null_delta": 0, "sll": 0, "sll_delta": 0, "tcp": 17, "tcp_delta": 17, "udp": 2, "udp_delta": 2, "sctp": 0, "sctp_delta": 0, "icmpv4": 0, "icmpv4_delta": 0, "icmpv6": 1, "icmpv6_delta": 0, "ppp": 0, "ppp_delta": 0, "pppoe": 0, "pppoe_delta": 0, "gre": 0, "gre_delta": 0, "vlan": 0, "vlan_delta": 0, "vlan_qinq": 0, "vlan_qinq_delta": 0, "teredo": 0, "teredo_delta": 0, "ipv4_in_ipv6": 0, "ipv4_in_ipv6_delta": 0, "ipv6_in_ipv6": 0, "ipv6_in_ipv6_delta": 0, "mpls": 0, "mpls_delta": 0, "avg_pkt_size": 281, "avg_pkt_size_delta": 215, "max_pkt_size": 1422, "max_pkt_size_delta": 1336, "erspan": 0, "erspan_delta": 0, "ipraw": { "invalid_ip_version": 0, "invalid_ip_version_delta": 0 }, "ltnull": { "pkt_too_small": 0, "pkt_too_small_delta": 0, "unsupported_type": 0, "unsupported_type_delta": 0 }, "dce": { "pkt_too_small": 0, "pkt_too_small_delta": 0 } }, "flow": { "memcap": 0, "memcap_delta": 0 }, "defrag": { "ipv4": { "fragments": 0, "fragments_delta": 0, "reassembled": 0, "reassembled_delta": 0, "timeouts": 0, "timeouts_delta": 0 }, "ipv6": { "fragments": 0, "fragments_delta": 0, "reassembled": 0, "reassembled_delta": 0, "timeouts": 0, "timeouts_delta": 0 }, "max_frag_hits": 0, "max_frag_hits_delta": 0 }, "tcp": { "sessions": 1, "sessions_delta": 1, "ssn_memcap_drop": 0, "ssn_memcap_drop_delta": 0, "pseudo": 0, "pseudo_delta": 0, "pseudo_failed": 0, "pseudo_failed_delta": 0, "invalid_checksum": 0, "invalid_checksum_delta": 0, "no_flow": 0, "no_flow_delta": 0, "syn": 1, "syn_delta": 1, "synack": 1, "synack_delta": 1, "rst": 0, "rst_delta": 0, "segment_memcap_drop": 0, "segment_memcap_drop_delta": 0, "stream_depth_reached": 0, "stream_depth_reached_delta": 0, "reassembly_gap": 0, "reassembly_gap_delta": 0 }, "detect": { "alert": 0, "alert_delta": 0 }, "app_layer": { "flow": { "http": 0, "http_delta": 0, "ftp": 0, "ftp_delta": 0, "smtp": 0, "smtp_delta": 0, "tls": 1, "tls_delta": 1, "ssh": 0, "ssh_delta": 0, "imap": 0, "imap_delta": 0, "msn": 0, "msn_delta": 0, "smb": 0, "smb_delta": 0, "dcerpc_tcp": 0, "dcerpc_tcp_delta": 0, "dns_tcp": 0, "dns_tcp_delta": 0, "failed_tcp": 0, "failed_tcp_delta": 0, "dcerpc_udp": 0, "dcerpc_udp_delta": 0, "dns_udp": 0, "dns_udp_delta": 0, "failed_udp": 1, "failed_udp_delta": 1 }, "tx": { "http": 0, "http_delta": 0, "smtp": 0, "smtp_delta": 0, "tls": 0, "tls_delta": 0, "dns_tcp": 0, "dns_tcp_delta": 0, "dns_udp": 0, "dns_udp_delta": 0 } } }, "W#02-wlp4s0": { "capture": { "kernel_packets": 32, "kernel_packets_delta": 21, "kernel_drops": 0, "kernel_drops_delta": 0 }, "decoder": { "pkts": 32, "pkts_delta": 19, "bytes": 5378, "bytes_delta": 3085, "invalid": 0, "invalid_delta": 0, "ipv4": 32, "ipv4_delta": 19, "ipv6": 0, "ipv6_delta": 0, "ethernet": 32, "ethernet_delta": 19, "raw": 0, "raw_delta": 0, "null": 0, "null_delta": 0, "sll": 0, "sll_delta": 0, "tcp": 25, "tcp_delta": 12, "udp": 7, "udp_delta": 7, "sctp": 0, "sctp_delta": 0, "icmpv4": 0, "icmpv4_delta": 0, "icmpv6": 0, "icmpv6_delta": 0, "ppp": 0, "ppp_delta": 0, "pppoe": 0, "pppoe_delta": 0, "gre": 0, "gre_delta": 0, "vlan": 0, "vlan_delta": 0, "vlan_qinq": 0, "vlan_qinq_delta": 0, "teredo": 0, "teredo_delta": 0, "ipv4_in_ipv6": 0, "ipv4_in_ipv6_delta": 0, "ipv6_in_ipv6": 0, "ipv6_in_ipv6_delta": 0, "mpls": 0, "mpls_delta": 0, "avg_pkt_size": 168, "avg_pkt_size_delta": -8, "max_pkt_size": 626, "max_pkt_size_delta": 0, "erspan": 0, "erspan_delta": 0, "ipraw": { "invalid_ip_version": 0, "invalid_ip_version_delta": 0 }, "ltnull": { "pkt_too_small": 0, "pkt_too_small_delta": 0, "unsupported_type": 0, "unsupported_type_delta": 0 }, "dce": { "pkt_too_small": 0, "pkt_too_small_delta": 0 } }, "flow": { "memcap": 0, "memcap_delta": 0 }, "defrag": { "ipv4": { "fragments": 0, "fragments_delta": 0, "reassembled": 0, "reassembled_delta": 0, "timeouts": 0, "timeouts_delta": 0 }, "ipv6": { "fragments": 0, "fragments_delta": 0, "reassembled": 0, "reassembled_delta": 0, "timeouts": 0, "timeouts_delta": 0 }, "max_frag_hits": 0, "max_frag_hits_delta": 0 }, "tcp": { "sessions": 0, "sessions_delta": 0, "ssn_memcap_drop": 0, "ssn_memcap_drop_delta": 0, "pseudo": 0, "pseudo_delta": 0, "pseudo_failed": 0, "pseudo_failed_delta": 0, "invalid_checksum": 0, "invalid_checksum_delta": 0, "no_flow": 0, "no_flow_delta": 0, "syn": 0, "syn_delta": 0, "synack": 0, "synack_delta": 0, "rst": 0, "rst_delta": 0, "segment_memcap_drop": 0, "segment_memcap_drop_delta": 0, "stream_depth_reached": 0, "stream_depth_reached_delta": 0, "reassembly_gap": 0, "reassembly_gap_delta": 0 }, "detect": { "alert": 0, "alert_delta": 0 }, "app_layer": { "flow": { "http": 0, "http_delta": 0, "ftp": 0, "ftp_delta": 0, "smtp": 0, "smtp_delta": 0, "tls": 0, "tls_delta": 0, "ssh": 0, "ssh_delta": 0, "imap": 0, "imap_delta": 0, "msn": 0, "msn_delta": 0, "smb": 0, "smb_delta": 0, "dcerpc_tcp": 0, "dcerpc_tcp_delta": 0, "dns_tcp": 0, "dns_tcp_delta": 0, "failed_tcp": 0, "failed_tcp_delta": 0, "dcerpc_udp": 0, "dcerpc_udp_delta": 0, "dns_udp": 0, "dns_udp_delta": 0, "failed_udp": 2, "failed_udp_delta": 2 }, "tx": { "http": 0, "http_delta": 0, "smtp": 0, "smtp_delta": 0, "tls": 0, "tls_delta": 0, "dns_tcp": 0, "dns_tcp_delta": 0, "dns_udp": 0, "dns_udp_delta": 0 } } }, "W#03-wlp4s0": { "capture": { "kernel_packets": 44, "kernel_packets_delta": 9, "kernel_drops": 0, "kernel_drops_delta": 0 }, "decoder": { "pkts": 45, "pkts_delta": 9, "bytes": 9392, "bytes_delta": 1718, "invalid": 0, "invalid_delta": 0, "ipv4": 45, "ipv4_delta": 9, "ipv6": 0, "ipv6_delta": 0, "ethernet": 45, "ethernet_delta": 9, "raw": 0, "raw_delta": 0, "null": 0, "null_delta": 0, "sll": 0, "sll_delta": 0, "tcp": 33, "tcp_delta": 2, "udp": 12, "udp_delta": 7, "sctp": 0, "sctp_delta": 0, "icmpv4": 0, "icmpv4_delta": 0, "icmpv6": 0, "icmpv6_delta": 0, "ppp": 0, "ppp_delta": 0, "pppoe": 0, "pppoe_delta": 0, "gre": 0, "gre_delta": 0, "vlan": 0, "vlan_delta": 0, "vlan_qinq": 0, "vlan_qinq_delta": 0, "teredo": 0, "teredo_delta": 0, "ipv4_in_ipv6": 0, "ipv4_in_ipv6_delta": 0, "ipv6_in_ipv6": 0, "ipv6_in_ipv6_delta": 0, "mpls": 0, "mpls_delta": 0, "avg_pkt_size": 208, "avg_pkt_size_delta": -5, "max_pkt_size": 1422, "max_pkt_size_delta": 0, "erspan": 0, "erspan_delta": 0, "ipraw": { "invalid_ip_version": 0, "invalid_ip_version_delta": 0 }, "ltnull": { "pkt_too_small": 0, "pkt_too_small_delta": 0, "unsupported_type": 0, "unsupported_type_delta": 0 }, "dce": { "pkt_too_small": 0, "pkt_too_small_delta": 0 } }, "flow": { "memcap": 0, "memcap_delta": 0 }, "defrag": { "ipv4": { "fragments": 0, "fragments_delta": 0, "reassembled": 0, "reassembled_delta": 0, "timeouts": 0, "timeouts_delta": 0 }, "ipv6": { "fragments": 0, "fragments_delta": 0, "reassembled": 0, "reassembled_delta": 0, "timeouts": 0, "timeouts_delta": 0 }, "max_frag_hits": 0, "max_frag_hits_delta": 0 }, "tcp": { "sessions": 0, "sessions_delta": 0, "ssn_memcap_drop": 0, "ssn_memcap_drop_delta": 0, "pseudo": 0, "pseudo_delta": 0, "pseudo_failed": 0, "pseudo_failed_delta": 0, "invalid_checksum": 0, "invalid_checksum_delta": 0, "no_flow": 0, "no_flow_delta": 0, "syn": 0, "syn_delta": 0, "synack": 0, "synack_delta": 0, "rst": 0, "rst_delta": 0, "segment_memcap_drop": 0, "segment_memcap_drop_delta": 0, "stream_depth_reached": 0, "stream_depth_reached_delta": 0, "reassembly_gap": 0, "reassembly_gap_delta": 0 }, "detect": { "alert": 1, "alert_delta": 0 }, "app_layer": { "flow": { "http": 0, "http_delta": 0, "ftp": 0, "ftp_delta": 0, "smtp": 0, "smtp_delta": 0, "tls": 0, "tls_delta": 0, "ssh": 0, "ssh_delta": 0, "imap": 0, "imap_delta": 0, "msn": 0, "msn_delta": 0, "smb": 0, "smb_delta": 0, "dcerpc_tcp": 0, "dcerpc_tcp_delta": 0, "dns_tcp": 0, "dns_tcp_delta": 0, "failed_tcp": 0, "failed_tcp_delta": 0, "dcerpc_udp": 0, "dcerpc_udp_delta": 0, "dns_udp": 0, "dns_udp_delta": 0, "failed_udp": 5, "failed_udp_delta": 2 }, "tx": { "http": 0, "http_delta": 0, "smtp": 0, "smtp_delta": 0, "tls": 0, "tls_delta": 0, "dns_tcp": 0, "dns_tcp_delta": 0, "dns_udp": 0, "dns_udp_delta": 0 } } }, "W#04-wlp4s0": { "capture": { "kernel_packets": 4, "kernel_packets_delta": 0, "kernel_drops": 0, "kernel_drops_delta": 0 }, "decoder": { "pkts": 10, "pkts_delta": 0, "bytes": 740, "bytes_delta": 0, "invalid": 0, "invalid_delta": 0, "ipv4": 10, "ipv4_delta": 0, "ipv6": 0, "ipv6_delta": 0, "ethernet": 10, "ethernet_delta": 0, "raw": 0, "raw_delta": 0, "null": 0, "null_delta": 0, "sll": 0, "sll_delta": 0, "tcp": 0, "tcp_delta": 0, "udp": 10, "udp_delta": 0, "sctp": 0, "sctp_delta": 0, "icmpv4": 0, "icmpv4_delta": 0, "icmpv6": 0, "icmpv6_delta": 0, "ppp": 0, "ppp_delta": 0, "pppoe": 0, "pppoe_delta": 0, "gre": 0, "gre_delta": 0, "vlan": 0, "vlan_delta": 0, "vlan_qinq": 0, "vlan_qinq_delta": 0, "teredo": 0, "teredo_delta": 0, "ipv4_in_ipv6": 0, "ipv4_in_ipv6_delta": 0, "ipv6_in_ipv6": 0, "ipv6_in_ipv6_delta": 0, "mpls": 0, "mpls_delta": 0, "avg_pkt_size": 74, "avg_pkt_size_delta": 0, "max_pkt_size": 86, "max_pkt_size_delta": 0, "erspan": 0, "erspan_delta": 0, "ipraw": { "invalid_ip_version": 0, "invalid_ip_version_delta": 0 }, "ltnull": { "pkt_too_small": 0, "pkt_too_small_delta": 0, "unsupported_type": 0, "unsupported_type_delta": 0 }, "dce": { "pkt_too_small": 0, "pkt_too_small_delta": 0 } }, "flow": { "memcap": 0, "memcap_delta": 0 }, "defrag": { "ipv4": { "fragments": 0, "fragments_delta": 0, "reassembled": 0, "reassembled_delta": 0, "timeouts": 0, "timeouts_delta": 0 }, "ipv6": { "fragments": 0, "fragments_delta": 0, "reassembled": 0, "reassembled_delta": 0, "timeouts": 0, "timeouts_delta": 0 }, "max_frag_hits": 0, "max_frag_hits_delta": 0 }, "tcp": { "sessions": 0, "sessions_delta": 0, "ssn_memcap_drop": 0, "ssn_memcap_drop_delta": 0, "pseudo": 0, "pseudo_delta": 0, "pseudo_failed": 0, "pseudo_failed_delta": 0, "invalid_checksum": 0, "invalid_checksum_delta": 0, "no_flow": 0, "no_flow_delta": 0, "syn": 0, "syn_delta": 0, "synack": 0, "synack_delta": 0, "rst": 0, "rst_delta": 0, "segment_memcap_drop": 0, "segment_memcap_drop_delta": 0, "stream_depth_reached": 0, "stream_depth_reached_delta": 0, "reassembly_gap": 0, "reassembly_gap_delta": 0 }, "detect": { "alert": 1, "alert_delta": 0 }, "app_layer": { "flow": { "http": 0, "http_delta": 0, "ftp": 0, "ftp_delta": 0, "smtp": 0, "smtp_delta": 0, "tls": 0, "tls_delta": 0, "ssh": 0, "ssh_delta": 0, "imap": 0, "imap_delta": 0, "msn": 0, "msn_delta": 0, "smb": 0, "smb_delta": 0, "dcerpc_tcp": 0, "dcerpc_tcp_delta": 0, "dns_tcp": 0, "dns_tcp_delta": 0, "failed_tcp": 0, "failed_tcp_delta": 0, "dcerpc_udp": 0, "dcerpc_udp_delta": 0, "dns_udp": 1, "dns_udp_delta": 0, "failed_udp": 1, "failed_udp_delta": 0 }, "tx": { "http": 0, "http_delta": 0, "smtp": 0, "smtp_delta": 0, "tls": 0, "tls_delta": 0, "dns_tcp": 0, "dns_tcp_delta": 0, "dns_udp": 4, "dns_udp_delta": 0 } } }, "W#05-wlp4s0": { "capture": { "kernel_packets": 14, "kernel_packets_delta": 11, "kernel_drops": 0, "kernel_drops_delta": 0 }, "decoder": { "pkts": 14, "pkts_delta": 4, "bytes": 1723, "bytes_delta": 797, "invalid": 0, "invalid_delta": 0, "ipv4": 13, "ipv4_delta": 3, "ipv6": 1, "ipv6_delta": 1, "ethernet": 14, "ethernet_delta": 4, "raw": 0, "raw_delta": 0, "null": 0, "null_delta": 0, "sll": 0, "sll_delta": 0, "tcp": 2, "tcp_delta": 2, "udp": 11, "udp_delta": 1, "sctp": 0, "sctp_delta": 0, "icmpv4": 0, "icmpv4_delta": 0, "icmpv6": 1, "icmpv6_delta": 1, "ppp": 0, "ppp_delta": 0, "pppoe": 0, "pppoe_delta": 0, "gre": 0, "gre_delta": 0, "vlan": 0, "vlan_delta": 0, "vlan_qinq": 0, "vlan_qinq_delta": 0, "teredo": 0, "teredo_delta": 0, "ipv4_in_ipv6": 0, "ipv4_in_ipv6_delta": 0, "ipv6_in_ipv6": 0, "ipv6_in_ipv6_delta": 0, "mpls": 0, "mpls_delta": 0, "avg_pkt_size": 123, "avg_pkt_size_delta": 31, "max_pkt_size": 478, "max_pkt_size_delta": 299, "erspan": 0, "erspan_delta": 0, "ipraw": { "invalid_ip_version": 0, "invalid_ip_version_delta": 0 }, "ltnull": { "pkt_too_small": 0, "pkt_too_small_delta": 0, "unsupported_type": 0, "unsupported_type_delta": 0 }, "dce": { "pkt_too_small": 0, "pkt_too_small_delta": 0 } }, "flow": { "memcap": 0, "memcap_delta": 0 }, "defrag": { "ipv4": { "fragments": 0, "fragments_delta": 0, "reassembled": 0, "reassembled_delta": 0, "timeouts": 0, "timeouts_delta": 0 }, "ipv6": { "fragments": 0, "fragments_delta": 0, "reassembled": 0, "reassembled_delta": 0, "timeouts": 0, "timeouts_delta": 0 }, "max_frag_hits": 0, "max_frag_hits_delta": 0 }, "tcp": { "sessions": 0, "sessions_delta": 0, "ssn_memcap_drop": 0, "ssn_memcap_drop_delta": 0, "pseudo": 0, "pseudo_delta": 0, "pseudo_failed": 0, "pseudo_failed_delta": 0, "invalid_checksum": 0, "invalid_checksum_delta": 0, "no_flow": 0, "no_flow_delta": 0, "syn": 0, "syn_delta": 0, "synack": 0, "synack_delta": 0, "rst": 0, "rst_delta": 0, "segment_memcap_drop": 0, "segment_memcap_drop_delta": 0, "stream_depth_reached": 0, "stream_depth_reached_delta": 0, "reassembly_gap": 0, "reassembly_gap_delta": 0 }, "detect": { "alert": 0, "alert_delta": 0 }, "app_layer": { "flow": { "http": 0, "http_delta": 0, "ftp": 0, "ftp_delta": 0, "smtp": 0, "smtp_delta": 0, "tls": 0, "tls_delta": 0, "ssh": 0, "ssh_delta": 0, "imap": 0, "imap_delta": 0, "msn": 0, "msn_delta": 0, "smb": 0, "smb_delta": 0, "dcerpc_tcp": 0, "dcerpc_tcp_delta": 0, "dns_tcp": 0, "dns_tcp_delta": 0, "failed_tcp": 0, "failed_tcp_delta": 0, "dcerpc_udp": 0, "dcerpc_udp_delta": 0, "dns_udp": 1, "dns_udp_delta": 0, "failed_udp": 1, "failed_udp_delta": 0 }, "tx": { "http": 0, "http_delta": 0, "smtp": 0, "smtp_delta": 0, "tls": 0, "tls_delta": 0, "dns_tcp": 0, "dns_tcp_delta": 0, "dns_udp": 4, "dns_udp_delta": 0 } } }, "W#06-wlp4s0": { "capture": { "kernel_packets": 11, "kernel_packets_delta": 8, "kernel_drops": 0, "kernel_drops_delta": 0 }, "decoder": { "pkts": 11, "pkts_delta": 6, "bytes": 1254, "bytes_delta": 696, "invalid": 0, "invalid_delta": 0, "ipv4": 10, "ipv4_delta": 6, "ipv6": 1, "ipv6_delta": 0, "ethernet": 11, "ethernet_delta": 6, "raw": 0, "raw_delta": 0, "null": 0, "null_delta": 0, "sll": 0, "sll_delta": 0, "tcp": 2, "tcp_delta": 2, "udp": 8, "udp_delta": 4, "sctp": 0, "sctp_delta": 0, "icmpv4": 0, "icmpv4_delta": 0, "icmpv6": 1, "icmpv6_delta": 0, "ppp": 0, "ppp_delta": 0, "pppoe": 0, "pppoe_delta": 0, "gre": 0, "gre_delta": 0, "vlan": 0, "vlan_delta": 0, "vlan_qinq": 0, "vlan_qinq_delta": 0, "teredo": 0, "teredo_delta": 0, "ipv4_in_ipv6": 0, "ipv4_in_ipv6_delta": 0, "ipv6_in_ipv6": 0, "ipv6_in_ipv6_delta": 0, "mpls": 0, "mpls_delta": 0, "avg_pkt_size": 114, "avg_pkt_size_delta": 3, "max_pkt_size": 215, "max_pkt_size_delta": 62, "erspan": 0, "erspan_delta": 0, "ipraw": { "invalid_ip_version": 0, "invalid_ip_version_delta": 0 }, "ltnull": { "pkt_too_small": 0, "pkt_too_small_delta": 0, "unsupported_type": 0, "unsupported_type_delta": 0 }, "dce": { "pkt_too_small": 0, "pkt_too_small_delta": 0 } }, "flow": { "memcap": 0, "memcap_delta": 0 }, "defrag": { "ipv4": { "fragments": 0, "fragments_delta": 0, "reassembled": 0, "reassembled_delta": 0, "timeouts": 0, "timeouts_delta": 0 }, "ipv6": { "fragments": 0, "fragments_delta": 0, "reassembled": 0, "reassembled_delta": 0, "timeouts": 0, "timeouts_delta": 0 }, "max_frag_hits": 0, "max_frag_hits_delta": 0 }, "tcp": { "sessions": 0, "sessions_delta": 0, "ssn_memcap_drop": 0, "ssn_memcap_drop_delta": 0, "pseudo": 0, "pseudo_delta": 0, "pseudo_failed": 0, "pseudo_failed_delta": 0, "invalid_checksum": 0, "invalid_checksum_delta": 0, "no_flow": 0, "no_flow_delta": 0, "syn": 0, "syn_delta": 0, "synack": 0, "synack_delta": 0, "rst": 0, "rst_delta": 0, "segment_memcap_drop": 0, "segment_memcap_drop_delta": 0, "stream_depth_reached": 0, "stream_depth_reached_delta": 0, "reassembly_gap": 0, "reassembly_gap_delta": 0 }, "detect": { "alert": 0, "alert_delta": 0 }, "app_layer": { "flow": { "http": 0, "http_delta": 0, "ftp": 0, "ftp_delta": 0, "smtp": 0, "smtp_delta": 0, "tls": 0, "tls_delta": 0, "ssh": 0, "ssh_delta": 0, "imap": 0, "imap_delta": 0, "msn": 0, "msn_delta": 0, "smb": 0, "smb_delta": 0, "dcerpc_tcp": 0, "dcerpc_tcp_delta": 0, "dns_tcp": 0, "dns_tcp_delta": 0, "failed_tcp": 0, "failed_tcp_delta": 0, "dcerpc_udp": 0, "dcerpc_udp_delta": 0, "dns_udp": 2, "dns_udp_delta": 1, "failed_udp": 1, "failed_udp_delta": 1 }, "tx": { "http": 0, "http_delta": 0, "smtp": 0, "smtp_delta": 0, "tls": 0, "tls_delta": 0, "dns_tcp": 0, "dns_tcp_delta": 0, "dns_udp": 3, "dns_udp_delta": 1 } } }, "W#07-wlp4s0": { "capture": { "kernel_packets": 1, "kernel_packets_delta": 0, "kernel_drops": 0, "kernel_drops_delta": 0 }, "decoder": { "pkts": 1, "pkts_delta": 0, "bytes": 214, "bytes_delta": 0, "invalid": 0, "invalid_delta": 0, "ipv4": 1, "ipv4_delta": 0, "ipv6": 0, "ipv6_delta": 0, "ethernet": 1, "ethernet_delta": 0, "raw": 0, "raw_delta": 0, "null": 0, "null_delta": 0, "sll": 0, "sll_delta": 0, "tcp": 0, "tcp_delta": 0, "udp": 1, "udp_delta": 0, "sctp": 0, "sctp_delta": 0, "icmpv4": 0, "icmpv4_delta": 0, "icmpv6": 0, "icmpv6_delta": 0, "ppp": 0, "ppp_delta": 0, "pppoe": 0, "pppoe_delta": 0, "gre": 0, "gre_delta": 0, "vlan": 0, "vlan_delta": 0, "vlan_qinq": 0, "vlan_qinq_delta": 0, "teredo": 0, "teredo_delta": 0, "ipv4_in_ipv6": 0, "ipv4_in_ipv6_delta": 0, "ipv6_in_ipv6": 0, "ipv6_in_ipv6_delta": 0, "mpls": 0, "mpls_delta": 0, "avg_pkt_size": 214, "avg_pkt_size_delta": 0, "max_pkt_size": 214, "max_pkt_size_delta": 0, "erspan": 0, "erspan_delta": 0, "ipraw": { "invalid_ip_version": 0, "invalid_ip_version_delta": 0 }, "ltnull": { "pkt_too_small": 0, "pkt_too_small_delta": 0, "unsupported_type": 0, "unsupported_type_delta": 0 }, "dce": { "pkt_too_small": 0, "pkt_too_small_delta": 0 } }, "flow": { "memcap": 0, "memcap_delta": 0 }, "defrag": { "ipv4": { "fragments": 0, "fragments_delta": 0, "reassembled": 0, "reassembled_delta": 0, "timeouts": 0, "timeouts_delta": 0 }, "ipv6": { "fragments": 0, "fragments_delta": 0, "reassembled": 0, "reassembled_delta": 0, "timeouts": 0, "timeouts_delta": 0 }, "max_frag_hits": 0, "max_frag_hits_delta": 0 }, "tcp": { "sessions": 0, "sessions_delta": 0, "ssn_memcap_drop": 0, "ssn_memcap_drop_delta": 0, "pseudo": 0, "pseudo_delta": 0, "pseudo_failed": 0, "pseudo_failed_delta": 0, "invalid_checksum": 0, "invalid_checksum_delta": 0, "no_flow": 0, "no_flow_delta": 0, "syn": 0, "syn_delta": 0, "synack": 0, "synack_delta": 0, "rst": 0, "rst_delta": 0, "segment_memcap_drop": 0, "segment_memcap_drop_delta": 0, "stream_depth_reached": 0, "stream_depth_reached_delta": 0, "reassembly_gap": 0, "reassembly_gap_delta": 0 }, "detect": { "alert": 0, "alert_delta": 0 }, "app_layer": { "flow": { "http": 0, "http_delta": 0, "ftp": 0, "ftp_delta": 0, "smtp": 0, "smtp_delta": 0, "tls": 0, "tls_delta": 0, "ssh": 0, "ssh_delta": 0, "imap": 0, "imap_delta": 0, "msn": 0, "msn_delta": 0, "smb": 0, "smb_delta": 0, "dcerpc_tcp": 0, "dcerpc_tcp_delta": 0, "dns_tcp": 0, "dns_tcp_delta": 0, "failed_tcp": 0, "failed_tcp_delta": 0, "dcerpc_udp": 0, "dcerpc_udp_delta": 0, "dns_udp": 0, "dns_udp_delta": 0, "failed_udp": 1, "failed_udp_delta": 0 }, "tx": { "http": 0, "http_delta": 0, "smtp": 0, "smtp_delta": 0, "tls": 0, "tls_delta": 0, "dns_tcp": 0, "dns_tcp_delta": 0, "dns_udp": 0, "dns_udp_delta": 0 } } }, "W#08-wlp4s0": { "capture": { "kernel_packets": 4, "kernel_packets_delta": 3, "kernel_drops": 0, "kernel_drops_delta": 0 }, "decoder": { "pkts": 3, "pkts_delta": 3, "bytes": 291, "bytes_delta": 291, "invalid": 0, "invalid_delta": 0, "ipv4": 2, "ipv4_delta": 2, "ipv6": 1, "ipv6_delta": 1, "ethernet": 3, "ethernet_delta": 3, "raw": 0, "raw_delta": 0, "null": 0, "null_delta": 0, "sll": 0, "sll_delta": 0, "tcp": 0, "tcp_delta": 0, "udp": 2, "udp_delta": 2, "sctp": 0, "sctp_delta": 0, "icmpv4": 0, "icmpv4_delta": 0, "icmpv6": 1, "icmpv6_delta": 1, "ppp": 0, "ppp_delta": 0, "pppoe": 0, "pppoe_delta": 0, "gre": 0, "gre_delta": 0, "vlan": 0, "vlan_delta": 0, "vlan_qinq": 0, "vlan_qinq_delta": 0, "teredo": 0, "teredo_delta": 0, "ipv4_in_ipv6": 0, "ipv4_in_ipv6_delta": 0, "ipv6_in_ipv6": 0, "ipv6_in_ipv6_delta": 0, "mpls": 0, "mpls_delta": 0, "avg_pkt_size": 97, "avg_pkt_size_delta": 97, "max_pkt_size": 134, "max_pkt_size_delta": 134, "erspan": 0, "erspan_delta": 0, "ipraw": { "invalid_ip_version": 0, "invalid_ip_version_delta": 0 }, "ltnull": { "pkt_too_small": 0, "pkt_too_small_delta": 0, "unsupported_type": 0, "unsupported_type_delta": 0 }, "dce": { "pkt_too_small": 0, "pkt_too_small_delta": 0 } }, "flow": { "memcap": 0, "memcap_delta": 0 }, "defrag": { "ipv4": { "fragments": 0, "fragments_delta": 0, "reassembled": 0, "reassembled_delta": 0, "timeouts": 0, "timeouts_delta": 0 }, "ipv6": { "fragments": 0, "fragments_delta": 0, "reassembled": 0, "reassembled_delta": 0, "timeouts": 0, "timeouts_delta": 0 }, "max_frag_hits": 0, "max_frag_hits_delta": 0 }, "tcp": { "sessions": 0, "sessions_delta": 0, "ssn_memcap_drop": 0, "ssn_memcap_drop_delta": 0, "pseudo": 0, "pseudo_delta": 0, "pseudo_failed": 0, "pseudo_failed_delta": 0, "invalid_checksum": 0, "invalid_checksum_delta": 0, "no_flow": 0, "no_flow_delta": 0, "syn": 0, "syn_delta": 0, "synack": 0, "synack_delta": 0, "rst": 0, "rst_delta": 0, "segment_memcap_drop": 0, "segment_memcap_drop_delta": 0, "stream_depth_reached": 0, "stream_depth_reached_delta": 0, "reassembly_gap": 0, "reassembly_gap_delta": 0 }, "detect": { "alert": 0, "alert_delta": 0 }, "app_layer": { "flow": { "http": 0, "http_delta": 0, "ftp": 0, "ftp_delta": 0, "smtp": 0, "smtp_delta": 0, "tls": 0, "tls_delta": 0, "ssh": 0, "ssh_delta": 0, "imap": 0, "imap_delta": 0, "msn": 0, "msn_delta": 0, "smb": 0, "smb_delta": 0, "dcerpc_tcp": 0, "dcerpc_tcp_delta": 0, "dns_tcp": 0, "dns_tcp_delta": 0, "failed_tcp": 0, "failed_tcp_delta": 0, "dcerpc_udp": 0, "dcerpc_udp_delta": 0, "dns_udp": 1, "dns_udp_delta": 1, "failed_udp": 0, "failed_udp_delta": 0 }, "tx": { "http": 0, "http_delta": 0, "smtp": 0, "smtp_delta": 0, "tls": 0, "tls_delta": 0, "dns_tcp": 0, "dns_tcp_delta": 0, "dns_udp": 1, "dns_udp_delta": 1 } } }, "FM#01": { "flow_mgr": { "closed_pruned": 0, "closed_pruned_delta": 0, "new_pruned": 0, "new_pruned_delta": 0, "est_pruned": 0, "est_pruned_delta": 0, "bypassed_pruned": 0, "bypassed_pruned_delta": 0, "flows_checked": 1, "flows_checked_delta": 1, "flows_notimeout": 1, "flows_notimeout_delta": 1, "flows_timeout": 0, "flows_timeout_delta": 0, "flows_timeout_inuse": 0, "flows_timeout_inuse_delta": 0, "flows_removed": 0, "flows_removed_delta": 0, "rows_checked": 65536, "rows_checked_delta": 0, "rows_skipped": 65535, "rows_skipped_delta": -1, "rows_empty": 0, "rows_empty_delta": 0, "rows_busy": 0, "rows_busy_delta": 0, "rows_maxlen": 1, "rows_maxlen_delta": 1 }, "flow": { "spare": 10000, "spare_delta": 0, "emerg_mode_entered": 0, "emerg_mode_entered_delta": 0, "emerg_mode_over": 0, "emerg_mode_over_delta": 0, "tcp_reuse": 0, "tcp_reuse_delta": 0 } }, "Global": { "tcp": { "memuse": 3276800, "memuse_delta": 0, "reassembly_memuse": 12332832, "reassembly_memuse_delta": 0 }, "dns": { "memuse": 1402, "memuse_delta": 595, "memcap_state": 0, "memcap_state_delta": 0, "memcap_global": 0, "memcap_global_delta": 0 }, "http": { "memuse": 0, "memuse_delta": 0, "memcap": 0, "memcap_delta": 0 }, "flow": { "memuse": 7083520, "memuse_delta": 4608 } } } }} \ No newline at end of file From 84840d848c106122a987a61b3b73e7cb703c4ca3 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 20 Sep 2019 15:38:21 -0700 Subject: [PATCH 1197/1815] Update changelog and link to readme to suricata --- CHANGELOG.md | 1 + README.md | 1 + 2 files changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index dba57206a..079c2d393 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,7 @@ #### New Inputs - [azure_storage_queue](/plugins/inputs/azure_storage_queue/README.md) - Contributed by @mjiderhamn +- [suricata](/plugins/inputs/suricata/README.md) - Contributed by @satta #### Features diff --git a/README.md b/README.md index f618daa4e..5f34ebf2a 100644 --- a/README.md +++ b/README.md @@ -276,6 +276,7 @@ For documentation on the latest development code see the [documentation index][d * [sql server](./plugins/inputs/sqlserver) (microsoft) * [stackdriver](./plugins/inputs/stackdriver) * [statsd](./plugins/inputs/statsd) +* [suricata](./plugins/inputs/suricata) * [swap](./plugins/inputs/swap) * [syslog](./plugins/inputs/syslog) * [sysstat](./plugins/inputs/sysstat) From cd1bb2bc2306d43780ae627e452b5e5230570682 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 20 Sep 2019 15:44:55 -0700 Subject: [PATCH 1198/1815] Update suricata readme style --- plugins/inputs/suricata/README.md | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/plugins/inputs/suricata/README.md b/plugins/inputs/suricata/README.md index 5d00f4be4..5b4f16c00 100644 --- a/plugins/inputs/suricata/README.md +++ b/plugins/inputs/suricata/README.md @@ -1,11 +1,11 @@ -# Suricata plugin for Telegraf +# Suricata Input Plugin This plugin reports internal performance counters of the Suricata IDS/IPS engine, such as captured traffic volume, memory usage, uptime, flow counters, and much more. It provides a socket for the Suricata log output to write JSON stats output to, and processes the incoming data to fit Telegraf's format. -### Configuration: +### Configuration ```toml [[input.suricata]] @@ -19,7 +19,7 @@ stats output to, and processes the incoming data to fit Telegraf's format. delimiter = "_" ``` -### Measurements & Fields: +### Metrics Fields in the 'suricata' measurement follow the JSON format used by Suricata's stats output. @@ -28,6 +28,9 @@ more information. All fields are numeric. - suricata + - tags: + - thread: `Global` for global statistics (if enabled), thread IDs (e.g. `W#03-enp0s31f6`) for thread-specific statistics + - fields: - app_layer_flow_dcerpc_udp - app_layer_flow_dns_tcp - app_layer_flow_dns_udp @@ -91,13 +94,8 @@ All fields are numeric. - tcp_synack - ... -### Tags: -The `suricata` measurement has the following tags: - -- thread: `Global` for global statistics (if enabled), thread IDs (e.g. `W#03-enp0s31f6`) for thread-specific statistics - -## Suricata configuration +#### Suricata configuration Suricata needs to deliver the 'stats' event type to a given unix socket for this plugin to pick up. This can be done, for example, by creating an additional @@ -113,7 +111,7 @@ output in the Suricata configuration file: threads: yes ``` -## Example Output: +### Example Output ```text suricata,host=myhost,thread=FM#01 flow_mgr_rows_empty=0,flow_mgr_rows_checked=65536,flow_mgr_closed_pruned=0,flow_emerg_mode_over=0,flow_mgr_flows_timeout_inuse=0,flow_mgr_rows_skipped=65535,flow_mgr_bypassed_pruned=0,flow_mgr_flows_removed=0,flow_mgr_est_pruned=0,flow_mgr_flows_notimeout=1,flow_mgr_flows_checked=1,flow_mgr_rows_busy=0,flow_spare=10000,flow_mgr_rows_maxlen=1,flow_mgr_new_pruned=0,flow_emerg_mode_entered=0,flow_tcp_reuse=0,flow_mgr_flows_timeout=0 1568368562545197545 From 46b89b379a36d6e94e28439ce20432bcfc57feb1 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 20 Sep 2019 16:49:14 -0700 Subject: [PATCH 1199/1815] Update ping input readme (#6412) --- plugins/inputs/ping/README.md | 169 ++++++++++++++++++++++------------ plugins/inputs/ping/ping.go | 57 +++++++----- 2 files changed, 143 insertions(+), 83 deletions(-) diff --git a/plugins/inputs/ping/README.md b/plugins/inputs/ping/README.md index 8f1e3cf6f..4376c7a19 100644 --- a/plugins/inputs/ping/README.md +++ b/plugins/inputs/ping/README.md @@ -2,92 +2,142 @@ Sends a ping message by executing the system ping command and reports the results. +This plugin has two main methods of operation: `exec` and `native`. The +recommended method is `native`, which has greater system compatibility and +performance. However, for backwards compatibility the `exec` method is the +default. + +When using `method = "exec"`, the systems ping utility is executed to send the +ping packets. + Most ping command implementations are supported, one notable exception being -that there is currently no support for GNU Inetutils ping. You may instead -use the iputils-ping implementation: +that there is currently no support for GNU Inetutils ping. You may instead use +the iputils-ping implementation: ``` apt-get install iputils-ping ``` -When using `method = "native"` a ping is sent and the results are reported in pure go, eliminating the need to execute the system `ping` command. Not using the system binary allows the use of this plugin on non-english systems. - -There is currently no support for TTL on windows with `"native"`; track progress at https://github.com/golang/go/issues/7175 and https://github.com/golang/go/issues/7174 +When using `method = "native"` a ping is sent and the results are reported in +native Go by the Telegraf process, eliminating the need to execute the system +`ping` command. ### Configuration: ```toml [[inputs.ping]] - ## List of urls to ping + ## Hosts to send ping packets to. urls = ["example.org"] - ## Number of pings to send per collection (ping -c ) - # count = 1 - - ## Interval, in s, at which to ping. 0 == default (ping -i ) - ## Not available in Windows. - # ping_interval = 1.0 - - ## Per-ping timeout, in s. 0 == no timeout (ping -W ) - # timeout = 1.0 - - ## Total-ping deadline, in s. 0 == no deadline (ping -w ) - # deadline = 10 - - ## Interface or source address to send ping from (ping -I ) - ## on Darwin and Freebsd only source address possible: (ping -S ) - # interface = "" - - ## How to ping. "native" doesn't have external dependencies, while "exec" depends on 'ping'. + ## Method used for sending pings, can be either "exec" or "native". When set + ## to "exec" the systems ping command will be executed. When set to "native" + ## the plugin will send pings directly. + ## + ## While the default is "exec" for backwards compatibility, new deployments + ## are encouraged to use the "native" method for improved compatibility and + ## performance. # method = "exec" - ## Specify the ping executable binary, default is "ping" + ## Number of ping packets to send per interval. Corresponds to the "-c" + ## option of the ping command. + # count = 1 + + ## Time to wait between sending ping packets in seconds. Operates like the + ## "-i" option of the ping command. + # ping_interval = 1.0 + + ## If set, the time to wait for a ping response in seconds. Operates like + ## the "-W" option of the ping command. + # timeout = 1.0 + + ## If set, the total ping deadline, in seconds. Operates like the -w option + ## of the ping command. + # deadline = 10 + + ## Interface or source address to send ping from. Operates like the -I or -S + ## option of the ping command. + # interface = "" + + ## Specify the ping executable binary. # binary = "ping" - ## Arguments for ping command. When arguments is not empty, system binary will be used and - ## other options (ping_interval, timeout, etc) will be ignored + ## Arguments for ping command. When arguments is not empty, the command from + ## the binary option will be used and other options (ping_interval, timeout, + ## etc) will be ignored. # arguments = ["-c", "3"] - ## Use only ipv6 addresses when resolving hostnames. + ## Use only IPv6 addresses when resolving a hostname. # ipv6 = false ``` #### File Limit -Since this plugin runs the ping command, it may need to open several files per -host. With a large host list you may receive a `too many open files` error. +Since this plugin runs the ping command, it may need to open multiple files per +host. The number of files used is lessened with the `native` option but still +many files are used. With a large host list you may receive a `too many open +files` error. -To increase this limit on platforms using systemd it must be done in the -service file. +To increase this limit on platforms using systemd the recommended method is to +use the "drop-in directory", usually located at +`/etc/systemd/system/telegraf.service.d`. - -Find the service unit file: -``` -$ systemctl show telegraf.service -p FragmentPath -FragmentPath=/lib/systemd/system/telegraf.service +You can create or edit a drop-in file in the correct location using: +```sh +$ systemctl edit telegraf ``` -Set the file number limit: -``` +Increase the number of open files: +```ini [Service] -LimitNOFILE=4096 +LimitNOFILE=8192 ``` -#### Permission Caveat (non Windows) - -It is preferred that this plugin listen on privileged ICMP sockets. To do so, telegraf can either be run as the root user or the root user can add the capability to access raw sockets to telegraf by running the following commant: - -``` -setcap cap_net_raw=eip /path/to/telegraf +Restart Telegraf: +```sh +$ systemctl edit telegraf ``` -Another option (doesn't work as well or in all circumstances) is to listen on unprivileged raw sockets (non-Windows only). The system group of the user running telegraf must be allowed to create ICMP Echo sockets. [See man pages icmp(7) for `ping_group_range`](http://man7.org/linux/man-pages/man7/icmp.7.html). On Linux hosts, run the following to give a group the proper permissions: +#### Linux Permissions +When using `method = "native"`, Telegraf will attempt to use privileged raw +ICMP sockets. On most systems, doing so requires `CAP_NET_RAW` capabilities. + +With systemd: +```sh +$ systemctl edit telegraf ``` -sudo sysctl -w net.ipv4.ping_group_range="GROUP_ID_LOW GROUP_ID_HIGH" +```ini +[Service] +CapabilityBoundingSet=CAP_NET_RAW +AmbientCapabilities=CAP_NET_RAW +``` +```sh +$ systemctl restart telegraf ``` +Without systemd: +```sh +$ setcap cap_net_raw=eip /usr/bin/telegraf +``` -### Metrics: +Reference [`man 7 capabilities`][man 7 capabilities] for more information about +setting capabilities. + +[man 7 capabilities]: http://man7.org/linux/man-pages/man7/capabilities.7.html + +When Telegraf cannot listen on a privileged ICMP socket it will attempt to use +ICMP echo sockets. If you wish to use this method you must ensure Telegraf's +group, usually `telegraf`, is allowed to use ICMP echo sockets: + +```sh +$ sysctl -w net.ipv4.ping_group_range="GROUP_ID_LOW GROUP_ID_HIGH" +``` + +Reference [`man 7 icmp`][man 7 icmp] for more information about ICMP echo +sockets and the `ping_group_range` setting. + +[man 7 icmp]: http://man7.org/linux/man-pages/man7/icmp.7.html + +### Metrics - ping - tags: @@ -102,24 +152,23 @@ sudo sysctl -w net.ipv4.ping_group_range="GROUP_ID_LOW GROUP_ID_HIGH" - maximum_response_ms (integer) - standard_deviation_ms (integer, Available on Windows only with native ping) - errors (float, Windows only) - - reply_received (integer, Windows only*) - - percent_reply_loss (float, Windows only*) + - reply_received (integer, Windows with method = "exec" only) + - percent_reply_loss (float, Windows with method = "exec" only) - result_code (int, success = 0, no such host = 1, ping error = 2) ##### reply_received vs packets_received -On Windows systems, "Destination net unreachable" reply will increment `packets_received` but not `reply_received`* +On Windows systems with `method = "exec"`, the "Destination net unreachable" reply will increment `packets_received` but not `reply_received`*. -### Example Output: +##### ttl -**Windows:** -``` -ping,url=example.org result_code=0i,average_response_ms=7i,maximum_response_ms=9i,minimum_response_ms=7i,packets_received=4i,packets_transmitted=4i,percent_packet_loss=0,percent_reply_loss=0,reply_received=4i 1469879119000000000 -``` +There is currently no support for TTL on windows with `"native"`; track +progress at https://github.com/golang/go/issues/7175 and +https://github.com/golang/go/issues/7174 + + +### Example Output -**Linux:** ``` ping,url=example.org average_response_ms=23.066,ttl=63,maximum_response_ms=24.64,minimum_response_ms=22.451,packets_received=5i,packets_transmitted=5i,percent_packet_loss=0,result_code=0i,standard_deviation_ms=0.809 1535747258000000000 ``` - -*not when `method = "native"` is used diff --git a/plugins/inputs/ping/ping.go b/plugins/inputs/ping/ping.go index 469859a34..ac0e9ebdf 100644 --- a/plugins/inputs/ping/ping.go +++ b/plugins/inputs/ping/ping.go @@ -11,7 +11,6 @@ import ( "time" "github.com/glinton/ping" - "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" @@ -68,35 +67,47 @@ func (*Ping) Description() string { } const sampleConfig = ` - ## List of urls to ping + ## Hosts to send ping packets to. urls = ["example.org"] - ## Number of pings to send per collection (ping -c ) - # count = 1 - - ## Interval, in s, at which to ping. 0 == default (ping -i ) - # ping_interval = 1.0 - - ## Per-ping timeout, in s. 0 == no timeout (ping -W ) - # timeout = 1.0 - - ## Total-ping deadline, in s. 0 == no deadline (ping -w ) - # deadline = 10 - - ## Interface or source address to send ping from (ping -I[-S] ) - # interface = "" - - ## How to ping. "native" doesn't have external dependencies, while "exec" depends on 'ping'. + ## Method used for sending pings, can be either "exec" or "native". When set + ## to "exec" the systems ping command will be executed. When set to "native" + ## the plugin will send pings directly. + ## + ## While the default is "exec" for backwards compatibility, new deployments + ## are encouraged to use the "native" method for improved compatibility and + ## performance. # method = "exec" - ## Specify the ping executable binary, default is "ping" - # binary = "ping" + ## Number of ping packets to send per interval. Corresponds to the "-c" + ## option of the ping command. + # count = 1 - ## Arguments for ping command. When arguments is not empty, system binary will be used and - ## other options (ping_interval, timeout, etc) will be ignored. + ## Time to wait between sending ping packets in seconds. Operates like the + ## "-i" option of the ping command. + # ping_interval = 1.0 + + ## If set, the time to wait for a ping response in seconds. Operates like + ## the "-W" option of the ping command. + # timeout = 1.0 + + ## If set, the total ping deadline, in seconds. Operates like the -w option + ## of the ping command. + # deadline = 10 + + ## Interface or source address to send ping from. Operates like the -I or -S + ## option of the ping command. + # interface = "" + + ## Specify the ping executable binary. + # binary = "ping" + + ## Arguments for ping command. When arguments is not empty, the command from + ## the binary option will be used and other options (ping_interval, timeout, + ## etc) will be ignored. # arguments = ["-c", "3"] - ## Use only ipv6 addresses when resolving hostnames. + ## Use only IPv6 addresses when resolving a hostname. # ipv6 = false ` From 776e92ffabe4af70e2c1543774e374490d0ff9e0 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 20 Sep 2019 16:50:19 -0700 Subject: [PATCH 1200/1815] Require goplugin build flag to enable go plugin support (#6393) --- cmd/telegraf/telegraf.go | 36 ++---------------------------- internal/goplugin/noplugin.go | 9 ++++++++ internal/goplugin/plugin.go | 42 +++++++++++++++++++++++++++++++++++ 3 files changed, 53 insertions(+), 34 deletions(-) create mode 100644 internal/goplugin/noplugin.go create mode 100644 internal/goplugin/plugin.go diff --git a/cmd/telegraf/telegraf.go b/cmd/telegraf/telegraf.go index 4d4b62759..f865cee51 100644 --- a/cmd/telegraf/telegraf.go +++ b/cmd/telegraf/telegraf.go @@ -10,9 +10,6 @@ import ( _ "net/http/pprof" // Comment this line to disable pprof endpoint. "os" "os/signal" - "path" - "path/filepath" - "plugin" "runtime" "strings" "syscall" @@ -21,6 +18,7 @@ import ( "github.com/influxdata/telegraf/agent" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/internal/config" + "github.com/influxdata/telegraf/internal/goplugin" "github.com/influxdata/telegraf/logger" _ "github.com/influxdata/telegraf/plugins/aggregators/all" "github.com/influxdata/telegraf/plugins/inputs" @@ -116,36 +114,6 @@ func reloadLoop( } } -// loadExternalPlugins loads external plugins from shared libraries (.so, .dll, etc.) -// in the specified directory. -func loadExternalPlugins(rootDir string) error { - return filepath.Walk(rootDir, func(pth string, info os.FileInfo, err error) error { - // Stop if there was an error. - if err != nil { - return err - } - - // Ignore directories. - if info.IsDir() { - return nil - } - - // Ignore files that aren't shared libraries. - ext := strings.ToLower(path.Ext(pth)) - if ext != ".so" && ext != ".dll" { - return nil - } - - // Load plugin. - _, err = plugin.Open(pth) - if err != nil { - return fmt.Errorf("error loading %s: %s", pth, err) - } - - return nil - }) -} - func runAgent(ctx context.Context, inputFilters []string, outputFilters []string, @@ -317,7 +285,7 @@ func main() { // Load external plugins, if requested. if *fPlugins != "" { log.Printf("I! Loading external plugins from: %s", *fPlugins) - if err := loadExternalPlugins(*fPlugins); err != nil { + if err := goplugin.LoadExternalPlugins(*fPlugins); err != nil { log.Fatal("E! " + err.Error()) } } diff --git a/internal/goplugin/noplugin.go b/internal/goplugin/noplugin.go new file mode 100644 index 000000000..23d8634c4 --- /dev/null +++ b/internal/goplugin/noplugin.go @@ -0,0 +1,9 @@ +// +build !goplugin + +package goplugin + +import "errors" + +func LoadExternalPlugins(rootDir string) error { + return errors.New("go plugin support is not enabled") +} diff --git a/internal/goplugin/plugin.go b/internal/goplugin/plugin.go new file mode 100644 index 000000000..7e58ec32e --- /dev/null +++ b/internal/goplugin/plugin.go @@ -0,0 +1,42 @@ +// +build goplugin + +package goplugin + +import ( + "fmt" + "os" + "path" + "path/filepath" + "plugin" + "strings" +) + +// loadExternalPlugins loads external plugins from shared libraries (.so, .dll, etc.) +// in the specified directory. +func LoadExternalPlugins(rootDir string) error { + return filepath.Walk(rootDir, func(pth string, info os.FileInfo, err error) error { + // Stop if there was an error. + if err != nil { + return err + } + + // Ignore directories. + if info.IsDir() { + return nil + } + + // Ignore files that aren't shared libraries. + ext := strings.ToLower(path.Ext(pth)) + if ext != ".so" && ext != ".dll" { + return nil + } + + // Load plugin. + _, err = plugin.Open(pth) + if err != nil { + return fmt.Errorf("error loading %s: %s", pth, err) + } + + return nil + }) +} From 2de217fb4c234db762b6dda653b3f446c0c7b15d Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 20 Sep 2019 16:52:00 -0700 Subject: [PATCH 1201/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 079c2d393..d7966a597 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -20,6 +20,7 @@ - [#6386](https://github.com/influxdata/telegraf/issues/6386): Fix detection of layout timestamps in csv and json parser. - [#6394](https://github.com/influxdata/telegraf/issues/6394): Fix parsing of BATTDATE in apcupsd input. - [#6398](https://github.com/influxdata/telegraf/issues/6398): Keep boolean values listed in json_string_fields. +- [#6393](https://github.com/influxdata/telegraf/issues/6393): Disable Go plugin support in official builds. ## v1.12.1 [2019-09-10] From 24d5a93e63e70533df4924dc46104065c15a58da Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 23 Sep 2019 10:02:19 -0700 Subject: [PATCH 1202/1815] Add example URL for cloud2 to influxdb_v2 output --- plugins/outputs/influxdb_v2/README.md | 1 + plugins/outputs/influxdb_v2/influxdb.go | 1 + 2 files changed, 2 insertions(+) diff --git a/plugins/outputs/influxdb_v2/README.md b/plugins/outputs/influxdb_v2/README.md index 226c3ab62..49c080f33 100644 --- a/plugins/outputs/influxdb_v2/README.md +++ b/plugins/outputs/influxdb_v2/README.md @@ -11,6 +11,7 @@ The InfluxDB output plugin writes metrics to the [InfluxDB v2.x] HTTP service. ## ## Multiple URLs can be specified for a single cluster, only ONE of the ## urls will be written to each interval. + ## ex: urls = ["https://us-west-2-1.aws.cloud2.influxdata.com"] urls = ["http://127.0.0.1:9999"] ## Token for authentication. diff --git a/plugins/outputs/influxdb_v2/influxdb.go b/plugins/outputs/influxdb_v2/influxdb.go index 0f40a96e3..972773f79 100644 --- a/plugins/outputs/influxdb_v2/influxdb.go +++ b/plugins/outputs/influxdb_v2/influxdb.go @@ -27,6 +27,7 @@ var sampleConfig = ` ## ## Multiple URLs can be specified for a single cluster, only ONE of the ## urls will be written to each interval. + ## ex: urls = ["https://us-west-2-1.aws.cloud2.influxdata.com"] urls = ["http://127.0.0.1:9999"] ## Token for authentication. From f080b58834874d49b45dd5f8716d42f7388892db Mon Sep 17 00:00:00 2001 From: Kai Groner Date: Mon, 23 Sep 2019 13:26:17 -0400 Subject: [PATCH 1203/1815] Use prefix base detection for ints in grok parser (#6434) --- plugins/parsers/grok/parser.go | 2 +- plugins/parsers/grok/parser_test.go | 25 +++++++++++++++++++++++++ 2 files changed, 26 insertions(+), 1 deletion(-) diff --git a/plugins/parsers/grok/parser.go b/plugins/parsers/grok/parser.go index cecb69f94..ce9c0af59 100644 --- a/plugins/parsers/grok/parser.go +++ b/plugins/parsers/grok/parser.go @@ -248,7 +248,7 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { case MEASUREMENT: p.Measurement = v case INT: - iv, err := strconv.ParseInt(v, 10, 64) + iv, err := strconv.ParseInt(v, 0, 64) if err != nil { log.Printf("E! Error parsing %s to int: %s", v, err) } else { diff --git a/plugins/parsers/grok/parser_test.go b/plugins/parsers/grok/parser_test.go index 2b8815264..e0b9575cb 100644 --- a/plugins/parsers/grok/parser_test.go +++ b/plugins/parsers/grok/parser_test.go @@ -649,6 +649,31 @@ func TestParseErrors_WrongTimeLayout(t *testing.T) { testutil.MustMetric("grok", map[string]string{}, map[string]interface{}{}, time.Unix(0, 0))) } +func TestParseInteger_Base16(t *testing.T) { + p := &Parser{ + Patterns: []string{"%{TEST_LOG_C}"}, + CustomPatterns: ` + DURATION %{NUMBER}[nuµm]?s + BASE10OR16NUM (?:%{BASE10NUM}|%{BASE16NUM}) + TEST_LOG_C %{NUMBER:myfloat} %{BASE10OR16NUM:response_code:int} %{IPORHOST:clientip} %{DURATION:rt} + `, + } + assert.NoError(t, p.Compile()) + + metricA, err := p.ParseLine(`1.25 0xc8 192.168.1.1 5.432µs`) + require.NotNil(t, metricA) + assert.NoError(t, err) + assert.Equal(t, + map[string]interface{}{ + "clientip": "192.168.1.1", + "response_code": int64(200), + "myfloat": "1.25", + "rt": "5.432µs", + }, + metricA.Fields()) + assert.Equal(t, map[string]string{}, metricA.Tags()) +} + func TestTsModder(t *testing.T) { tsm := &tsModder{} From e42d2e39c63bd4c9efd51143a6d14c9b394de689 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 23 Sep 2019 10:28:21 -0700 Subject: [PATCH 1204/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index d7966a597..3a6e7fffb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,6 +12,7 @@ - [#5921](https://github.com/influxdata/telegraf/pull/5921): Add replication metrics to the redis input. - [#6177](https://github.com/influxdata/telegraf/pull/6177): Support NX-OS telemetry extensions in cisco_telemetry_mdt. - [#6415](https://github.com/influxdata/telegraf/pull/6415): Allow graphite parser to create Inf and NaN values. +- [#6434](https://github.com/influxdata/telegraf/pull/6434): Use prefix base detection for ints in grok parser. ## v1.12.2 [unreleased] From 817c9a69a94060aece50889c27e7c821a36a6530 Mon Sep 17 00:00:00 2001 From: Greg <2653109+glinton@users.noreply.github.com> Date: Mon, 23 Sep 2019 16:39:50 -0600 Subject: [PATCH 1205/1815] Document and add support to input plugins for logging alias (#6357) --- docs/CONFIGURATION.md | 1 + internal/models/running_aggregator.go | 2 +- plugins/aggregators/basicstats/README.md | 1 + plugins/aggregators/basicstats/basicstats.go | 110 +++--- .../aggregators/basicstats/basicstats_test.go | 36 ++ plugins/inputs/amqp_consumer/amqp_consumer.go | 29 +- .../azure_storage_queue.go | 12 +- plugins/inputs/ceph/ceph.go | 6 +- .../cisco_telemetry_gnmi.go | 9 +- .../cisco_telemetry_gnmi_test.go | 18 +- .../cisco_telemetry_mdt.go | 17 +- .../cisco_telemetry_mdt_test.go | 12 +- plugins/inputs/cloud_pubsub/pubsub.go | 16 +- plugins/inputs/cloud_pubsub/pubsub_test.go | 8 +- .../inputs/cloud_pubsub_push/pubsub_push.go | 10 +- .../cloud_pubsub_push/pubsub_push_test.go | 2 + plugins/inputs/diskio/diskio.go | 11 +- plugins/inputs/diskio/diskio_test.go | 1 + plugins/inputs/docker/docker.go | 7 +- plugins/inputs/docker/docker_test.go | 12 +- plugins/inputs/dovecot/README.md | 2 + plugins/inputs/dovecot/dovecot.go | 7 +- plugins/inputs/exec/exec.go | 2 +- plugins/inputs/filecount/filecount.go | 4 +- plugins/inputs/filecount/filecount_test.go | 1 + plugins/inputs/filestat/README.md | 1 + plugins/inputs/filestat/filestat.go | 10 +- plugins/inputs/filestat/filestat_test.go | 6 + .../http_listener_v2/http_listener_v2.go | 10 +- .../http_listener_v2/http_listener_v2_test.go | 4 + plugins/inputs/http_response/http_response.go | 9 +- .../http_response/http_response_test.go | 19 ++ plugins/inputs/icinga2/README.md | 4 +- plugins/inputs/icinga2/icinga2.go | 40 ++- plugins/inputs/icinga2/icinga2_test.go | 2 + .../inputs/influxdb_listener/http_listener.go | 17 +- .../influxdb_listener/http_listener_test.go | 6 + plugins/inputs/ipvs/ipvs.go | 4 +- plugins/inputs/jenkins/jenkins.go | 5 +- plugins/inputs/jenkins/jenkins_test.go | 6 + .../openconfig_telemetry.go | 30 +- .../openconfig_telemetry_test.go | 1 + .../inputs/kafka_consumer_legacy/README.md | 4 + .../kafka_consumer_legacy.go | 11 +- .../kafka_consumer_legacy_integration_test.go | 1 + .../kafka_consumer_legacy_test.go | 1 + .../kinesis_consumer/kinesis_consumer.go | 7 +- plugins/inputs/kube_inventory/kube_state.go | 8 +- plugins/inputs/logparser/logparser.go | 20 +- plugins/inputs/logparser/logparser_test.go | 5 + plugins/inputs/mailchimp/chimp_api.go | 4 +- plugins/inputs/mesos/README.md | 4 + plugins/inputs/mesos/mesos.go | 24 +- plugins/inputs/mesos/mesos_test.go | 4 + plugins/inputs/mongodb/mongodb.go | 17 +- plugins/inputs/mongodb/mongodb_server.go | 45 +-- plugins/inputs/mqtt_consumer/mqtt_consumer.go | 17 +- .../mqtt_consumer/mqtt_consumer_test.go | 8 + plugins/inputs/nats_consumer/README.md | 2 + plugins/inputs/nats_consumer/nats_consumer.go | 16 +- plugins/inputs/nsq_consumer/README.md | 2 + plugins/inputs/nsq_consumer/nsq_consumer.go | 13 +- .../inputs/nsq_consumer/nsq_consumer_test.go | 1 + .../postgresql_extensible.go | 14 +- .../postgresql_extensible_test.go | 7 +- plugins/inputs/powerdns/powerdns.go | 4 +- .../powerdns_recursor/powerdns_recursor.go | 4 +- plugins/inputs/processes/processes.go | 11 +- plugins/inputs/processes/processes_test.go | 6 + plugins/inputs/prometheus/kubernetes.go | 10 +- plugins/inputs/prometheus/kubernetes_test.go | 9 +- plugins/inputs/prometheus/prometheus.go | 7 +- plugins/inputs/prometheus/prometheus_test.go | 3 + plugins/inputs/redis/redis.go | 7 +- plugins/inputs/redis/redis_test.go | 1 + plugins/inputs/smart/smart.go | 6 +- plugins/inputs/smart/smart_test.go | 2 + plugins/inputs/snmp/snmp.go | 2 +- plugins/inputs/snmp_legacy/snmp_legacy.go | 13 +- .../inputs/socket_listener/socket_listener.go | 25 +- .../socket_listener/socket_listener_test.go | 6 + plugins/inputs/stackdriver/stackdriver.go | 13 +- .../inputs/stackdriver/stackdriver_test.go | 2 + plugins/inputs/statsd/statsd.go | 72 ++-- plugins/inputs/statsd/statsd_test.go | 8 +- plugins/inputs/sysstat/README.md | 15 +- plugins/inputs/sysstat/sysstat.go | 20 +- plugins/inputs/sysstat/sysstat_test.go | 1 + plugins/inputs/system/system.go | 15 +- plugins/inputs/tail/tail.go | 28 +- plugins/inputs/tail/tail_test.go | 16 +- plugins/inputs/tcp_listener/tcp_listener.go | 24 +- .../inputs/tcp_listener/tcp_listener_test.go | 7 + plugins/inputs/udp_listener/udp_listener.go | 24 +- .../inputs/udp_listener/udp_listener_test.go | 2 + plugins/inputs/vsphere/client.go | 25 +- plugins/inputs/vsphere/endpoint.go | 71 ++-- plugins/inputs/vsphere/finder.go | 3 +- plugins/inputs/vsphere/tscache.go | 2 +- plugins/inputs/vsphere/vsphere.go | 9 +- plugins/inputs/vsphere/vsphere_test.go | 1 + .../win_perf_counters/win_perf_counters.go | 9 +- .../win_perf_counters_test.go | 316 +++++++++++------- plugins/inputs/win_services/win_services.go | 7 +- .../win_services_integration_test.go | 6 +- .../inputs/win_services/win_services_test.go | 41 ++- plugins/inputs/zipkin/zipkin.go | 5 +- plugins/inputs/zipkin/zipkin_test.go | 1 + plugins/outputs/influxdb/http.go | 8 +- plugins/outputs/influxdb/influxdb.go | 6 +- plugins/outputs/influxdb/udp.go | 2 +- 111 files changed, 961 insertions(+), 659 deletions(-) diff --git a/docs/CONFIGURATION.md b/docs/CONFIGURATION.md index 36feac791..5b3eb5887 100644 --- a/docs/CONFIGURATION.md +++ b/docs/CONFIGURATION.md @@ -188,6 +188,7 @@ driven operation. Parameters that can be used with any input plugin: +- **alias**: Name an instance of a plugin. - **interval**: How often to gather this metric. Normal plugins use a single global interval, but if one particular input should be run less or more often, you can configure that here. diff --git a/internal/models/running_aggregator.go b/internal/models/running_aggregator.go index ee46e5b13..91a10debb 100644 --- a/internal/models/running_aggregator.go +++ b/internal/models/running_aggregator.go @@ -144,7 +144,7 @@ func (r *RunningAggregator) Add(m telegraf.Metric) bool { defer r.Unlock() if m.Time().Before(r.periodStart.Add(-r.Config.Grace)) || m.Time().After(r.periodEnd.Add(r.Config.Delay)) { - r.log.Debugf("metric is outside aggregation window; discarding. %s: m: %s e: %s g: %s", + r.log.Debugf("Metric is outside aggregation window; discarding. %s: m: %s e: %s g: %s", m.Time(), r.periodStart, r.periodEnd, r.Config.Grace) r.MetricsDropped.Incr(1) return r.Config.DropOriginal diff --git a/plugins/aggregators/basicstats/README.md b/plugins/aggregators/basicstats/README.md index e6ae3b31a..8fef0c6f4 100644 --- a/plugins/aggregators/basicstats/README.md +++ b/plugins/aggregators/basicstats/README.md @@ -10,6 +10,7 @@ emitting the aggregate every `period` seconds. [[aggregators.basicstats]] ## The period on which to flush & clear the aggregator. period = "30s" + ## If true, the original metric will be dropped by the ## aggregator and will not get sent to the output plugins. drop_original = false diff --git a/plugins/aggregators/basicstats/basicstats.go b/plugins/aggregators/basicstats/basicstats.go index 28d1f2741..4e62ee311 100644 --- a/plugins/aggregators/basicstats/basicstats.go +++ b/plugins/aggregators/basicstats/basicstats.go @@ -1,7 +1,6 @@ package basicstats import ( - "log" "math" "github.com/influxdata/telegraf" @@ -10,6 +9,7 @@ import ( type BasicStats struct { Stats []string `toml:"stats"` + Log telegraf.Logger cache map[uint64]aggregate statsConfig *configuredStats @@ -28,9 +28,9 @@ type configuredStats struct { } func NewBasicStats() *BasicStats { - mm := &BasicStats{} - mm.Reset() - return mm + return &BasicStats{ + cache: make(map[uint64]aggregate), + } } type aggregate struct { @@ -53,6 +53,7 @@ type basicstats struct { var sampleConfig = ` ## The period on which to flush & clear the aggregator. period = "30s" + ## If true, the original metric will be dropped by the ## aggregator and will not get sent to the output plugins. drop_original = false @@ -61,17 +62,17 @@ var sampleConfig = ` # stats = ["count", "min", "max", "mean", "stdev", "s2", "sum"] ` -func (m *BasicStats) SampleConfig() string { +func (*BasicStats) SampleConfig() string { return sampleConfig } -func (m *BasicStats) Description() string { +func (*BasicStats) Description() string { return "Keep the aggregate basicstats of each metric passing through." } -func (m *BasicStats) Add(in telegraf.Metric) { +func (b *BasicStats) Add(in telegraf.Metric) { id := in.HashID() - if _, ok := m.cache[id]; !ok { + if _, ok := b.cache[id]; !ok { // hit an uncached metric, create caches for first time: a := aggregate{ name: in.Name(), @@ -92,13 +93,13 @@ func (m *BasicStats) Add(in telegraf.Metric) { } } } - m.cache[id] = a + b.cache[id] = a } else { for _, field := range in.FieldList() { if fv, ok := convert(field.Value); ok { - if _, ok := m.cache[id].fields[field.Key]; !ok { + if _, ok := b.cache[id].fields[field.Key]; !ok { // hit an uncached field of a cached metric - m.cache[id].fields[field.Key] = basicstats{ + b.cache[id].fields[field.Key] = basicstats{ count: 1, min: fv, max: fv, @@ -111,7 +112,7 @@ func (m *BasicStats) Add(in telegraf.Metric) { continue } - tmp := m.cache[id].fields[field.Key] + tmp := b.cache[id].fields[field.Key] //https://en.m.wikipedia.org/wiki/Algorithms_for_calculating_variance //variable initialization x := fv @@ -138,32 +139,30 @@ func (m *BasicStats) Add(in telegraf.Metric) { //diff compute tmp.diff = fv - tmp.LAST //store final data - m.cache[id].fields[field.Key] = tmp + b.cache[id].fields[field.Key] = tmp } } } } -func (m *BasicStats) Push(acc telegraf.Accumulator) { - config := getConfiguredStats(m) - - for _, aggregate := range m.cache { +func (b *BasicStats) Push(acc telegraf.Accumulator) { + for _, aggregate := range b.cache { fields := map[string]interface{}{} for k, v := range aggregate.fields { - if config.count { + if b.statsConfig.count { fields[k+"_count"] = v.count } - if config.min { + if b.statsConfig.min { fields[k+"_min"] = v.min } - if config.max { + if b.statsConfig.max { fields[k+"_max"] = v.max } - if config.mean { + if b.statsConfig.mean { fields[k+"_mean"] = v.mean } - if config.sum { + if b.statsConfig.sum { fields[k+"_sum"] = v.sum } @@ -171,16 +170,16 @@ func (m *BasicStats) Push(acc telegraf.Accumulator) { if v.count > 1 { variance := v.M2 / (v.count - 1) - if config.variance { + if b.statsConfig.variance { fields[k+"_s2"] = variance } - if config.stdev { + if b.statsConfig.stdev { fields[k+"_stdev"] = math.Sqrt(variance) } - if config.diff { + if b.statsConfig.diff { fields[k+"_diff"] = v.diff } - if config.non_negative_diff && v.diff >= 0 { + if b.statsConfig.non_negative_diff && v.diff >= 0 { fields[k+"_non_negative_diff"] = v.diff } @@ -194,14 +193,12 @@ func (m *BasicStats) Push(acc telegraf.Accumulator) { } } -func parseStats(names []string) *configuredStats { - +// member function for logging. +func (b *BasicStats) parseStats() *configuredStats { parsed := &configuredStats{} - for _, name := range names { - + for _, name := range b.Stats { switch name { - case "count": parsed.count = true case "min": @@ -222,45 +219,32 @@ func parseStats(names []string) *configuredStats { parsed.non_negative_diff = true default: - log.Printf("W! Unrecognized basic stat '%s', ignoring", name) + b.Log.Warnf("Unrecognized basic stat %q, ignoring", name) } } return parsed } -func defaultStats() *configuredStats { - - defaults := &configuredStats{} - - defaults.count = true - defaults.min = true - defaults.max = true - defaults.mean = true - defaults.variance = true - defaults.stdev = true - defaults.sum = false - defaults.non_negative_diff = false - - return defaults -} - -func getConfiguredStats(m *BasicStats) *configuredStats { - - if m.statsConfig == nil { - - if m.Stats == nil { - m.statsConfig = defaultStats() - } else { - m.statsConfig = parseStats(m.Stats) +func (b *BasicStats) getConfiguredStats() { + if b.Stats == nil { + b.statsConfig = &configuredStats{ + count: true, + min: true, + max: true, + mean: true, + variance: true, + stdev: true, + sum: false, + non_negative_diff: false, } + } else { + b.statsConfig = b.parseStats() } - - return m.statsConfig } -func (m *BasicStats) Reset() { - m.cache = make(map[uint64]aggregate) +func (b *BasicStats) Reset() { + b.cache = make(map[uint64]aggregate) } func convert(in interface{}) (float64, bool) { @@ -276,6 +260,12 @@ func convert(in interface{}) (float64, bool) { } } +func (b *BasicStats) Init() error { + b.getConfiguredStats() + + return nil +} + func init() { aggregators.Add("basicstats", func() telegraf.Aggregator { return NewBasicStats() diff --git a/plugins/aggregators/basicstats/basicstats_test.go b/plugins/aggregators/basicstats/basicstats_test.go index 9965c5caa..c5a093840 100644 --- a/plugins/aggregators/basicstats/basicstats_test.go +++ b/plugins/aggregators/basicstats/basicstats_test.go @@ -39,6 +39,8 @@ var m2, _ = metric.New("m1", func BenchmarkApply(b *testing.B) { minmax := NewBasicStats() + minmax.Log = testutil.Logger{} + minmax.getConfiguredStats() for n := 0; n < b.N; n++ { minmax.Add(m1) @@ -50,6 +52,8 @@ func BenchmarkApply(b *testing.B) { func TestBasicStatsWithPeriod(t *testing.T) { acc := testutil.Accumulator{} minmax := NewBasicStats() + minmax.Log = testutil.Logger{} + minmax.getConfiguredStats() minmax.Add(m1) minmax.Add(m2) @@ -106,6 +110,8 @@ func TestBasicStatsWithPeriod(t *testing.T) { func TestBasicStatsDifferentPeriods(t *testing.T) { acc := testutil.Accumulator{} minmax := NewBasicStats() + minmax.Log = testutil.Logger{} + minmax.getConfiguredStats() minmax.Add(m1) minmax.Push(&acc) @@ -181,6 +187,8 @@ func TestBasicStatsWithOnlyCount(t *testing.T) { aggregator := NewBasicStats() aggregator.Stats = []string{"count"} + aggregator.Log = testutil.Logger{} + aggregator.getConfiguredStats() aggregator.Add(m1) aggregator.Add(m2) @@ -208,6 +216,8 @@ func TestBasicStatsWithOnlyMin(t *testing.T) { aggregator := NewBasicStats() aggregator.Stats = []string{"min"} + aggregator.Log = testutil.Logger{} + aggregator.getConfiguredStats() aggregator.Add(m1) aggregator.Add(m2) @@ -235,6 +245,8 @@ func TestBasicStatsWithOnlyMax(t *testing.T) { aggregator := NewBasicStats() aggregator.Stats = []string{"max"} + aggregator.Log = testutil.Logger{} + aggregator.getConfiguredStats() aggregator.Add(m1) aggregator.Add(m2) @@ -262,6 +274,8 @@ func TestBasicStatsWithOnlyMean(t *testing.T) { aggregator := NewBasicStats() aggregator.Stats = []string{"mean"} + aggregator.Log = testutil.Logger{} + aggregator.getConfiguredStats() aggregator.Add(m1) aggregator.Add(m2) @@ -289,6 +303,8 @@ func TestBasicStatsWithOnlySum(t *testing.T) { aggregator := NewBasicStats() aggregator.Stats = []string{"sum"} + aggregator.Log = testutil.Logger{} + aggregator.getConfiguredStats() aggregator.Add(m1) aggregator.Add(m2) @@ -347,6 +363,8 @@ func TestBasicStatsWithOnlySumFloatingPointErrata(t *testing.T) { aggregator := NewBasicStats() aggregator.Stats = []string{"sum"} + aggregator.Log = testutil.Logger{} + aggregator.getConfiguredStats() aggregator.Add(sum1) aggregator.Add(sum2) @@ -368,6 +386,8 @@ func TestBasicStatsWithOnlyVariance(t *testing.T) { aggregator := NewBasicStats() aggregator.Stats = []string{"s2"} + aggregator.Log = testutil.Logger{} + aggregator.getConfiguredStats() aggregator.Add(m1) aggregator.Add(m2) @@ -393,6 +413,8 @@ func TestBasicStatsWithOnlyStandardDeviation(t *testing.T) { aggregator := NewBasicStats() aggregator.Stats = []string{"stdev"} + aggregator.Log = testutil.Logger{} + aggregator.getConfiguredStats() aggregator.Add(m1) aggregator.Add(m2) @@ -418,6 +440,8 @@ func TestBasicStatsWithMinAndMax(t *testing.T) { aggregator := NewBasicStats() aggregator.Stats = []string{"min", "max"} + aggregator.Log = testutil.Logger{} + aggregator.getConfiguredStats() aggregator.Add(m1) aggregator.Add(m2) @@ -452,6 +476,8 @@ func TestBasicStatsWithDiff(t *testing.T) { aggregator := NewBasicStats() aggregator.Stats = []string{"diff"} + aggregator.Log = testutil.Logger{} + aggregator.getConfiguredStats() aggregator.Add(m1) aggregator.Add(m2) @@ -477,6 +503,8 @@ func TestBasicStatsWithNonNegativeDiff(t *testing.T) { aggregator := NewBasicStats() aggregator.Stats = []string{"non_negative_diff"} + aggregator.Log = testutil.Logger{} + aggregator.getConfiguredStats() aggregator.Add(m1) aggregator.Add(m2) @@ -500,7 +528,9 @@ func TestBasicStatsWithNonNegativeDiff(t *testing.T) { func TestBasicStatsWithAllStats(t *testing.T) { acc := testutil.Accumulator{} minmax := NewBasicStats() + minmax.Log = testutil.Logger{} minmax.Stats = []string{"count", "min", "max", "mean", "stdev", "s2", "sum"} + minmax.getConfiguredStats() minmax.Add(m1) minmax.Add(m2) @@ -564,6 +594,8 @@ func TestBasicStatsWithNoStats(t *testing.T) { aggregator := NewBasicStats() aggregator.Stats = []string{} + aggregator.Log = testutil.Logger{} + aggregator.getConfiguredStats() aggregator.Add(m1) aggregator.Add(m2) @@ -579,6 +611,8 @@ func TestBasicStatsWithUnknownStat(t *testing.T) { aggregator := NewBasicStats() aggregator.Stats = []string{"crazy"} + aggregator.Log = testutil.Logger{} + aggregator.getConfiguredStats() aggregator.Add(m1) aggregator.Add(m2) @@ -596,6 +630,8 @@ func TestBasicStatsWithUnknownStat(t *testing.T) { func TestBasicStatsWithDefaultStats(t *testing.T) { aggregator := NewBasicStats() + aggregator.Log = testutil.Logger{} + aggregator.getConfiguredStats() aggregator.Add(m1) aggregator.Add(m2) diff --git a/plugins/inputs/amqp_consumer/amqp_consumer.go b/plugins/inputs/amqp_consumer/amqp_consumer.go index 6cf6004f5..cee425f60 100644 --- a/plugins/inputs/amqp_consumer/amqp_consumer.go +++ b/plugins/inputs/amqp_consumer/amqp_consumer.go @@ -4,7 +4,6 @@ import ( "context" "errors" "fmt" - "log" "math/rand" "strings" "sync" @@ -55,6 +54,7 @@ type AMQPConsumer struct { tls.ClientConfig ContentEncoding string `toml:"content_encoding"` + Log telegraf.Logger deliveries map[telegraf.TrackingID]amqp.Delivery @@ -241,11 +241,11 @@ func (a *AMQPConsumer) Start(acc telegraf.Accumulator) error { break } - log.Printf("I! [inputs.amqp_consumer] connection closed: %s; trying to reconnect", err) + a.Log.Infof("Connection closed: %s; trying to reconnect", err) for { msgs, err := a.connect(amqpConf) if err != nil { - log.Printf("E! AMQP connection failed: %s", err) + a.Log.Errorf("AMQP connection failed: %s", err) time.Sleep(10 * time.Second) continue } @@ -272,14 +272,14 @@ func (a *AMQPConsumer) connect(amqpConf *amqp.Config) (<-chan amqp.Delivery, err p := rand.Perm(len(brokers)) for _, n := range p { broker := brokers[n] - log.Printf("D! [inputs.amqp_consumer] connecting to %q", broker) + a.Log.Debugf("Connecting to %q", broker) conn, err := amqp.DialConfig(broker, *amqpConf) if err == nil { a.conn = conn - log.Printf("D! [inputs.amqp_consumer] connected to %q", broker) + a.Log.Debugf("Connected to %q", broker) break } - log.Printf("D! [inputs.amqp_consumer] error connecting to %q", broker) + a.Log.Debugf("Error connecting to %q", broker) } if a.conn == nil { @@ -288,7 +288,7 @@ func (a *AMQPConsumer) connect(amqpConf *amqp.Config) (<-chan amqp.Delivery, err ch, err := a.conn.Channel() if err != nil { - return nil, fmt.Errorf("Failed to open a channel: %s", err) + return nil, fmt.Errorf("Failed to open a channel: %s", err.Error()) } if a.Exchange != "" { @@ -395,7 +395,7 @@ func declareExchange( ) } if err != nil { - return fmt.Errorf("error declaring exchange: %v", err) + return fmt.Errorf("Error declaring exchange: %v", err) } return nil } @@ -437,7 +437,7 @@ func declareQueue( ) } if err != nil { - return nil, fmt.Errorf("error declaring queue: %v", err) + return nil, fmt.Errorf("Error declaring queue: %v", err) } return &queue, nil } @@ -486,8 +486,7 @@ func (a *AMQPConsumer) onMessage(acc telegraf.TrackingAccumulator, d amqp.Delive // this message. rejErr := d.Ack(false) if rejErr != nil { - log.Printf("E! [inputs.amqp_consumer] Unable to reject message: %d: %v", - d.DeliveryTag, rejErr) + a.Log.Errorf("Unable to reject message: %d: %v", d.DeliveryTag, rejErr) a.conn.Close() } } @@ -519,15 +518,13 @@ func (a *AMQPConsumer) onDelivery(track telegraf.DeliveryInfo) bool { if track.Delivered() { err := delivery.Ack(false) if err != nil { - log.Printf("E! [inputs.amqp_consumer] Unable to ack written delivery: %d: %v", - delivery.DeliveryTag, err) + a.Log.Errorf("Unable to ack written delivery: %d: %v", delivery.DeliveryTag, err) a.conn.Close() } } else { err := delivery.Reject(false) if err != nil { - log.Printf("E! [inputs.amqp_consumer] Unable to reject failed delivery: %d: %v", - delivery.DeliveryTag, err) + a.Log.Errorf("Unable to reject failed delivery: %d: %v", delivery.DeliveryTag, err) a.conn.Close() } } @@ -541,7 +538,7 @@ func (a *AMQPConsumer) Stop() { a.wg.Wait() err := a.conn.Close() if err != nil && err != amqp.ErrClosed { - log.Printf("E! [inputs.amqp_consumer] Error closing AMQP connection: %s", err) + a.Log.Errorf("Error closing AMQP connection: %s", err) return } } diff --git a/plugins/inputs/azure_storage_queue/azure_storage_queue.go b/plugins/inputs/azure_storage_queue/azure_storage_queue.go index 0fa7b0fd6..6d132a5ef 100644 --- a/plugins/inputs/azure_storage_queue/azure_storage_queue.go +++ b/plugins/inputs/azure_storage_queue/azure_storage_queue.go @@ -1,9 +1,8 @@ -package activemq +package azure_storage_queue import ( "context" "errors" - "log" "net/url" "strings" "time" @@ -17,6 +16,7 @@ type AzureStorageQueue struct { StorageAccountName string `toml:"account_name"` StorageAccountKey string `toml:"account_key"` PeekOldestMessageAge bool `toml:"peek_oldest_message_age"` + Log telegraf.Logger serviceURL *azqueue.ServiceURL } @@ -92,7 +92,7 @@ func (a *AzureStorageQueue) Gather(acc telegraf.Accumulator) error { ctx := context.TODO() for marker := (azqueue.Marker{}); marker.NotDone(); { - log.Printf("D! [inputs.azure_storage_queue] Listing queues of storage account '%s'", a.StorageAccountName) + a.Log.Debugf("Listing queues of storage account '%s'", a.StorageAccountName) queuesSegment, err := serviceURL.ListQueuesSegment(ctx, marker, azqueue.ListQueuesSegmentOptions{ Detail: azqueue.ListQueuesSegmentDetails{Metadata: false}, @@ -103,11 +103,11 @@ func (a *AzureStorageQueue) Gather(acc telegraf.Accumulator) error { marker = queuesSegment.NextMarker for _, queueItem := range queuesSegment.QueueItems { - log.Printf("D! [inputs.azure_storage_queue] Processing queue '%s' of storage account '%s'", queueItem.Name, a.StorageAccountName) + a.Log.Debugf("Processing queue '%s' of storage account '%s'", queueItem.Name, a.StorageAccountName) queueURL := serviceURL.NewQueueURL(queueItem.Name) properties, err := queueURL.GetProperties(ctx) if err != nil { - log.Printf("E! [inputs.azure_storage_queue] Error getting properties for queue %s: %s", queueItem.Name, err.Error()) + a.Log.Errorf("Error getting properties for queue %s: %s", queueItem.Name, err.Error()) continue } var peekedMessage *azqueue.PeekedMessage @@ -115,7 +115,7 @@ func (a *AzureStorageQueue) Gather(acc telegraf.Accumulator) error { messagesURL := queueURL.NewMessagesURL() messagesResponse, err := messagesURL.Peek(ctx, 1) if err != nil { - log.Printf("E! [inputs.azure_storage_queue] Error peeking queue %s: %s", queueItem.Name, err.Error()) + a.Log.Errorf("Error peeking queue %s: %s", queueItem.Name, err.Error()) } else if messagesResponse.NumMessages() > 0 { peekedMessage = messagesResponse.Message(0) } diff --git a/plugins/inputs/ceph/ceph.go b/plugins/inputs/ceph/ceph.go index e28f977d2..9a2fc47a3 100644 --- a/plugins/inputs/ceph/ceph.go +++ b/plugins/inputs/ceph/ceph.go @@ -101,12 +101,12 @@ func (c *Ceph) gatherAdminSocketStats(acc telegraf.Accumulator) error { for _, s := range sockets { dump, err := perfDump(c.CephBinary, s) if err != nil { - acc.AddError(fmt.Errorf("E! error reading from socket '%s': %v", s.socket, err)) + acc.AddError(fmt.Errorf("error reading from socket '%s': %v", s.socket, err)) continue } data, err := parseDump(dump) if err != nil { - acc.AddError(fmt.Errorf("E! error parsing dump from socket '%s': %v", s.socket, err)) + acc.AddError(fmt.Errorf("error parsing dump from socket '%s': %v", s.socket, err)) continue } for tag, metrics := range data { @@ -287,7 +287,7 @@ func flatten(data interface{}) []*metric { } } default: - log.Printf("I! Ignoring unexpected type '%T' for value %v", val, val) + log.Printf("I! [inputs.ceph] ignoring unexpected type '%T' for value %v", val, val) } return metrics diff --git a/plugins/inputs/cisco_telemetry_gnmi/cisco_telemetry_gnmi.go b/plugins/inputs/cisco_telemetry_gnmi/cisco_telemetry_gnmi.go index 9ab920bed..75a073bb6 100644 --- a/plugins/inputs/cisco_telemetry_gnmi/cisco_telemetry_gnmi.go +++ b/plugins/inputs/cisco_telemetry_gnmi/cisco_telemetry_gnmi.go @@ -7,7 +7,6 @@ import ( "encoding/json" "fmt" "io" - "log" "net" "strings" "sync" @@ -54,6 +53,8 @@ type CiscoTelemetryGNMI struct { acc telegraf.Accumulator cancel context.CancelFunc wg sync.WaitGroup + + Log telegraf.Logger } // Subscription for a GNMI client @@ -211,8 +212,8 @@ func (c *CiscoTelemetryGNMI) subscribeGNMI(ctx context.Context, address string, return fmt.Errorf("failed to send subscription request: %v", err) } - log.Printf("D! [inputs.cisco_telemetry_gnmi]: Connection to GNMI device %s established", address) - defer log.Printf("D! [inputs.cisco_telemetry_gnmi]: Connection to GNMI device %s closed", address) + c.Log.Debugf("Connection to GNMI device %s established", address) + defer c.Log.Debugf("Connection to GNMI device %s closed", address) for ctx.Err() == nil { var reply *gnmi.SubscribeResponse if reply, err = subscribeClient.Recv(); err != nil { @@ -267,7 +268,7 @@ func (c *CiscoTelemetryGNMI) handleSubscribeResponse(address string, reply *gnmi if alias, ok := c.aliases[aliasPath]; ok { name = alias } else { - log.Printf("D! [inputs.cisco_telemetry_gnmi]: No measurement alias for GNMI path: %s", name) + c.Log.Debugf("No measurement alias for GNMI path: %s", name) } } diff --git a/plugins/inputs/cisco_telemetry_gnmi/cisco_telemetry_gnmi_test.go b/plugins/inputs/cisco_telemetry_gnmi/cisco_telemetry_gnmi_test.go index 32ad714fd..7a62bcd14 100644 --- a/plugins/inputs/cisco_telemetry_gnmi/cisco_telemetry_gnmi_test.go +++ b/plugins/inputs/cisco_telemetry_gnmi/cisco_telemetry_gnmi_test.go @@ -104,8 +104,10 @@ func TestGNMIError(t *testing.T) { acc := &testutil.Accumulator{} gnmi.RegisterGNMIServer(server, &mockGNMIServer{t: t, scenario: 0, server: server, acc: acc}) - c := &CiscoTelemetryGNMI{Addresses: []string{listener.Addr().String()}, - Username: "theuser", Password: "thepassword", Encoding: "proto", + c := &CiscoTelemetryGNMI{ + Log: testutil.Logger{}, + Addresses: []string{listener.Addr().String()}, + Username: "theuser", Password: "thepassword", Encoding: "proto", Redial: internal.Duration{Duration: 1 * time.Second}} require.NoError(t, c.Start(acc)) @@ -174,8 +176,10 @@ func TestGNMIMultiple(t *testing.T) { acc := &testutil.Accumulator{} gnmi.RegisterGNMIServer(server, &mockGNMIServer{t: t, scenario: 1, server: server, acc: acc}) - c := &CiscoTelemetryGNMI{Addresses: []string{listener.Addr().String()}, - Username: "theuser", Password: "thepassword", Encoding: "proto", + c := &CiscoTelemetryGNMI{ + Log: testutil.Logger{}, + Addresses: []string{listener.Addr().String()}, + Username: "theuser", Password: "thepassword", Encoding: "proto", Redial: internal.Duration{Duration: 1 * time.Second}, Subscriptions: []Subscription{{Name: "alias", Origin: "type", Path: "/model", SubscriptionMode: "sample"}}, } @@ -215,8 +219,10 @@ func TestGNMIMultipleRedial(t *testing.T) { acc := &testutil.Accumulator{} gnmi.RegisterGNMIServer(server, &mockGNMIServer{t: t, scenario: 2, server: server, acc: acc}) - c := &CiscoTelemetryGNMI{Addresses: []string{listener.Addr().String()}, - Username: "theuser", Password: "thepassword", Encoding: "proto", + c := &CiscoTelemetryGNMI{ + Log: testutil.Logger{}, + Addresses: []string{listener.Addr().String()}, + Username: "theuser", Password: "thepassword", Encoding: "proto", Redial: internal.Duration{Duration: 10 * time.Millisecond}, Subscriptions: []Subscription{{Name: "alias", Origin: "type", Path: "/model", SubscriptionMode: "sample"}}, } diff --git a/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt.go b/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt.go index ddca8247d..37ccff926 100644 --- a/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt.go +++ b/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt.go @@ -5,7 +5,6 @@ import ( "encoding/binary" "fmt" "io" - "log" "net" "path" "strconv" @@ -43,6 +42,8 @@ type CiscoTelemetryMDT struct { Aliases map[string]string `toml:"aliases"` EmbeddedTags []string `toml:"embedded_tags"` + Log telegraf.Logger + // GRPC TLS settings internaltls.ServerConfig @@ -146,11 +147,11 @@ func (c *CiscoTelemetryMDT) acceptTCPClients() { // Individual client connection routine c.wg.Add(1) go func() { - log.Printf("D! [inputs.cisco_telemetry_mdt]: Accepted Cisco MDT TCP dialout connection from %s", conn.RemoteAddr()) + c.Log.Debugf("Accepted Cisco MDT TCP dialout connection from %s", conn.RemoteAddr()) if err := c.handleTCPClient(conn); err != nil { c.acc.AddError(err) } - log.Printf("D! [inputs.cisco_telemetry_mdt]: Closed Cisco MDT TCP dialout connection from %s", conn.RemoteAddr()) + c.Log.Debugf("Closed Cisco MDT TCP dialout connection from %s", conn.RemoteAddr()) mutex.Lock() delete(clients, conn) @@ -165,7 +166,7 @@ func (c *CiscoTelemetryMDT) acceptTCPClients() { mutex.Lock() for client := range clients { if err := client.Close(); err != nil { - log.Printf("E! [inputs.cisco_telemetry_mdt]: Failed to close TCP dialout client: %v", err) + c.Log.Errorf("Failed to close TCP dialout client: %v", err) } } mutex.Unlock() @@ -218,7 +219,7 @@ func (c *CiscoTelemetryMDT) handleTCPClient(conn net.Conn) error { func (c *CiscoTelemetryMDT) MdtDialout(stream dialout.GRPCMdtDialout_MdtDialoutServer) error { peer, peerOK := peer.FromContext(stream.Context()) if peerOK { - log.Printf("D! [inputs.cisco_telemetry_mdt]: Accepted Cisco MDT GRPC dialout connection from %s", peer.Addr) + c.Log.Debugf("Accepted Cisco MDT GRPC dialout connection from %s", peer.Addr) } var chunkBuffer bytes.Buffer @@ -252,7 +253,7 @@ func (c *CiscoTelemetryMDT) MdtDialout(stream dialout.GRPCMdtDialout_MdtDialoutS } if peerOK { - log.Printf("D! [inputs.cisco_telemetry_mdt]: Closed Cisco MDT GRPC dialout connection from %s", peer.Addr) + c.Log.Debugf("Closed Cisco MDT GRPC dialout connection from %s", peer.Addr) } return nil @@ -291,7 +292,7 @@ func (c *CiscoTelemetryMDT) handleTelemetry(data []byte) { } if keys == nil || content == nil { - log.Printf("I! [inputs.cisco_telemetry_mdt]: Message from %s missing keys or content", msg.GetNodeIdStr()) + c.Log.Infof("Message from %s missing keys or content", msg.GetNodeIdStr()) continue } @@ -412,7 +413,7 @@ func (c *CiscoTelemetryMDT) parseContentField(grouper *metric.SeriesGrouper, fie } else { c.mutex.Lock() if _, haveWarned := c.warned[path]; !haveWarned { - log.Printf("D! [inputs.cisco_telemetry_mdt]: No measurement alias for encoding path: %s", path) + c.Log.Debugf("No measurement alias for encoding path: %s", path) c.warned[path] = struct{}{} } c.mutex.Unlock() diff --git a/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt_test.go b/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt_test.go index 3736a8531..5261bd399 100644 --- a/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt_test.go +++ b/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt_test.go @@ -18,7 +18,7 @@ import ( ) func TestHandleTelemetryTwoSimple(t *testing.T) { - c := &CiscoTelemetryMDT{Transport: "dummy", Aliases: map[string]string{"alias": "type:model/some/path"}} + c := &CiscoTelemetryMDT{Log: testutil.Logger{}, Transport: "dummy", Aliases: map[string]string{"alias": "type:model/some/path"}} acc := &testutil.Accumulator{} c.Start(acc) @@ -93,7 +93,7 @@ func TestHandleTelemetryTwoSimple(t *testing.T) { } func TestHandleTelemetrySingleNested(t *testing.T) { - c := &CiscoTelemetryMDT{Transport: "dummy", Aliases: map[string]string{"nested": "type:model/nested/path"}} + c := &CiscoTelemetryMDT{Log: testutil.Logger{}, Transport: "dummy", Aliases: map[string]string{"nested": "type:model/nested/path"}} acc := &testutil.Accumulator{} c.Start(acc) @@ -385,7 +385,7 @@ func TestHandleNXDME(t *testing.T) { } func TestTCPDialoutOverflow(t *testing.T) { - c := &CiscoTelemetryMDT{Transport: "tcp", ServiceAddress: "127.0.0.1:57000"} + c := &CiscoTelemetryMDT{Log: testutil.Logger{}, Transport: "tcp", ServiceAddress: "127.0.0.1:57000"} acc := &testutil.Accumulator{} assert.Nil(t, c.Start(acc)) @@ -441,7 +441,7 @@ func mockTelemetryMessage() *telemetry.Telemetry { } func TestTCPDialoutMultiple(t *testing.T) { - c := &CiscoTelemetryMDT{Transport: "tcp", ServiceAddress: "127.0.0.1:57000", Aliases: map[string]string{ + c := &CiscoTelemetryMDT{Log: testutil.Logger{}, Transport: "tcp", ServiceAddress: "127.0.0.1:57000", Aliases: map[string]string{ "some": "type:model/some/path", "parallel": "type:model/parallel/path", "other": "type:model/other/path"}} acc := &testutil.Accumulator{} assert.Nil(t, c.Start(acc)) @@ -500,7 +500,7 @@ func TestTCPDialoutMultiple(t *testing.T) { } func TestGRPCDialoutError(t *testing.T) { - c := &CiscoTelemetryMDT{Transport: "grpc", ServiceAddress: "127.0.0.1:57001"} + c := &CiscoTelemetryMDT{Log: testutil.Logger{}, Transport: "grpc", ServiceAddress: "127.0.0.1:57001"} acc := &testutil.Accumulator{} assert.Nil(t, c.Start(acc)) @@ -519,7 +519,7 @@ func TestGRPCDialoutError(t *testing.T) { } func TestGRPCDialoutMultiple(t *testing.T) { - c := &CiscoTelemetryMDT{Transport: "grpc", ServiceAddress: "127.0.0.1:57001", Aliases: map[string]string{ + c := &CiscoTelemetryMDT{Log: testutil.Logger{}, Transport: "grpc", ServiceAddress: "127.0.0.1:57001", Aliases: map[string]string{ "some": "type:model/some/path", "parallel": "type:model/parallel/path", "other": "type:model/other/path"}} acc := &testutil.Accumulator{} assert.Nil(t, c.Start(acc)) diff --git a/plugins/inputs/cloud_pubsub/pubsub.go b/plugins/inputs/cloud_pubsub/pubsub.go index 845711e7d..b418274f3 100644 --- a/plugins/inputs/cloud_pubsub/pubsub.go +++ b/plugins/inputs/cloud_pubsub/pubsub.go @@ -5,16 +5,16 @@ import ( "fmt" "sync" - "cloud.google.com/go/pubsub" "encoding/base64" + "time" + + "cloud.google.com/go/pubsub" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/parsers" "golang.org/x/oauth2/google" "google.golang.org/api/option" - "log" - "time" ) type empty struct{} @@ -43,6 +43,8 @@ type PubSub struct { Base64Data bool `toml:"base64_data"` + Log telegraf.Logger + sub subscription stubSub func() subscription @@ -134,14 +136,14 @@ func (ps *PubSub) receiveWithRetry(parentCtx context.Context) { err := ps.startReceiver(parentCtx) for err != nil && parentCtx.Err() == nil { - log.Printf("E! [inputs.cloud_pubsub] Receiver for subscription %s exited with error: %v", ps.sub.ID(), err) + ps.Log.Errorf("Receiver for subscription %s exited with error: %v", ps.sub.ID(), err) delay := defaultRetryDelaySeconds if ps.RetryReceiveDelaySeconds > 0 { delay = ps.RetryReceiveDelaySeconds } - log.Printf("I! [inputs.cloud_pubsub] Waiting %d seconds before attempting to restart receiver...", delay) + ps.Log.Infof("Waiting %d seconds before attempting to restart receiver...", delay) time.Sleep(time.Duration(delay) * time.Second) err = ps.startReceiver(parentCtx) @@ -149,7 +151,7 @@ func (ps *PubSub) receiveWithRetry(parentCtx context.Context) { } func (ps *PubSub) startReceiver(parentCtx context.Context) error { - log.Printf("I! [inputs.cloud_pubsub] Starting receiver for subscription %s...", ps.sub.ID()) + ps.Log.Infof("Starting receiver for subscription %s...", ps.sub.ID()) cctx, ccancel := context.WithCancel(parentCtx) err := ps.sub.Receive(cctx, func(ctx context.Context, msg message) { if err := ps.onMessage(ctx, msg); err != nil { @@ -159,7 +161,7 @@ func (ps *PubSub) startReceiver(parentCtx context.Context) error { if err != nil { ps.acc.AddError(fmt.Errorf("receiver for subscription %s exited: %v", ps.sub.ID(), err)) } else { - log.Printf("I! [inputs.cloud_pubsub] subscription pull ended (no error, most likely stopped)") + ps.Log.Info("Subscription pull ended (no error, most likely stopped)") } ccancel() return err diff --git a/plugins/inputs/cloud_pubsub/pubsub_test.go b/plugins/inputs/cloud_pubsub/pubsub_test.go index 6233546aa..2045cf4cc 100644 --- a/plugins/inputs/cloud_pubsub/pubsub_test.go +++ b/plugins/inputs/cloud_pubsub/pubsub_test.go @@ -3,10 +3,11 @@ package cloud_pubsub import ( "encoding/base64" "errors" + "testing" + "github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" - "testing" ) const ( @@ -26,6 +27,7 @@ func TestRunParse(t *testing.T) { sub.receiver = testMessagesReceive(sub) ps := &PubSub{ + Log: testutil.Logger{}, parser: testParser, stubSub: func() subscription { return sub }, Project: "projectIDontMatterForTests", @@ -69,6 +71,7 @@ func TestRunBase64(t *testing.T) { sub.receiver = testMessagesReceive(sub) ps := &PubSub{ + Log: testutil.Logger{}, parser: testParser, stubSub: func() subscription { return sub }, Project: "projectIDontMatterForTests", @@ -112,6 +115,7 @@ func TestRunInvalidMessages(t *testing.T) { sub.receiver = testMessagesReceive(sub) ps := &PubSub{ + Log: testutil.Logger{}, parser: testParser, stubSub: func() subscription { return sub }, Project: "projectIDontMatterForTests", @@ -158,6 +162,7 @@ func TestRunOverlongMessages(t *testing.T) { sub.receiver = testMessagesReceive(sub) ps := &PubSub{ + Log: testutil.Logger{}, parser: testParser, stubSub: func() subscription { return sub }, Project: "projectIDontMatterForTests", @@ -205,6 +210,7 @@ func TestRunErrorInSubscriber(t *testing.T) { sub.receiver = testMessagesError(sub, errors.New("a fake error")) ps := &PubSub{ + Log: testutil.Logger{}, parser: testParser, stubSub: func() subscription { return sub }, Project: "projectIDontMatterForTests", diff --git a/plugins/inputs/cloud_pubsub_push/pubsub_push.go b/plugins/inputs/cloud_pubsub_push/pubsub_push.go index 8b83a440d..d1c521349 100644 --- a/plugins/inputs/cloud_pubsub_push/pubsub_push.go +++ b/plugins/inputs/cloud_pubsub_push/pubsub_push.go @@ -6,7 +6,6 @@ import ( "encoding/base64" "encoding/json" "io/ioutil" - "log" "net" "net/http" "sync" @@ -33,6 +32,7 @@ type PubSubPush struct { WriteTimeout internal.Duration MaxBodySize internal.Size AddMeta bool + Log telegraf.Logger MaxUndeliveredMessages int `toml:"max_undelivered_messages"` @@ -227,21 +227,21 @@ func (p *PubSubPush) serveWrite(res http.ResponseWriter, req *http.Request) { var payload Payload if err = json.Unmarshal(bytes, &payload); err != nil { - log.Printf("E! [inputs.cloud_pubsub_push] Error decoding payload %s", err.Error()) + p.Log.Errorf("Error decoding payload %s", err.Error()) res.WriteHeader(http.StatusBadRequest) return } sDec, err := base64.StdEncoding.DecodeString(payload.Msg.Data) if err != nil { - log.Printf("E! [inputs.cloud_pubsub_push] Base64-Decode Failed %s", err.Error()) + p.Log.Errorf("Base64-decode failed %s", err.Error()) res.WriteHeader(http.StatusBadRequest) return } metrics, err := p.Parse(sDec) if err != nil { - log.Println("D! [inputs.cloud_pubsub_push] " + err.Error()) + p.Log.Debug(err.Error()) res.WriteHeader(http.StatusBadRequest) return } @@ -295,7 +295,7 @@ func (p *PubSubPush) receiveDelivered() { ch <- true } else { ch <- false - log.Println("D! [inputs.cloud_pubsub_push] Metric group failed to process") + p.Log.Debug("Metric group failed to process") } } } diff --git a/plugins/inputs/cloud_pubsub_push/pubsub_push_test.go b/plugins/inputs/cloud_pubsub_push/pubsub_push_test.go index 45a304e60..a0d71da94 100644 --- a/plugins/inputs/cloud_pubsub_push/pubsub_push_test.go +++ b/plugins/inputs/cloud_pubsub_push/pubsub_push_test.go @@ -18,6 +18,7 @@ import ( "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/internal/models" "github.com/influxdata/telegraf/plugins/parsers" + "github.com/influxdata/telegraf/testutil" ) func TestServeHTTP(t *testing.T) { @@ -118,6 +119,7 @@ func TestServeHTTP(t *testing.T) { rr := httptest.NewRecorder() pubPush := &PubSubPush{ + Log: testutil.Logger{}, Path: "/", MaxBodySize: internal.Size{ Size: test.maxsize, diff --git a/plugins/inputs/diskio/diskio.go b/plugins/inputs/diskio/diskio.go index 053765b4e..875ec9582 100644 --- a/plugins/inputs/diskio/diskio.go +++ b/plugins/inputs/diskio/diskio.go @@ -2,7 +2,6 @@ package diskio import ( "fmt" - "log" "regexp" "strings" @@ -24,6 +23,8 @@ type DiskIO struct { NameTemplates []string SkipSerialNumber bool + Log telegraf.Logger + infoCache map[string]diskInfoCache deviceFilter filter.Filter initialized bool @@ -75,7 +76,7 @@ func (s *DiskIO) init() error { if hasMeta(device) { filter, err := filter.Compile(s.Devices) if err != nil { - return fmt.Errorf("error compiling device pattern: %v", err) + return fmt.Errorf("error compiling device pattern: %s", err.Error()) } s.deviceFilter = filter } @@ -99,7 +100,7 @@ func (s *DiskIO) Gather(acc telegraf.Accumulator) error { diskio, err := s.ps.DiskIO(devices) if err != nil { - return fmt.Errorf("error getting disk io info: %s", err) + return fmt.Errorf("error getting disk io info: %s", err.Error()) } for _, io := range diskio { @@ -166,7 +167,7 @@ func (s *DiskIO) diskName(devName string) (string, []string) { } if err != nil { - log.Printf("W! Error gathering disk info: %s", err) + s.Log.Warnf("Error gathering disk info: %s", err) return devName, devLinks } @@ -199,7 +200,7 @@ func (s *DiskIO) diskTags(devName string) map[string]string { di, err := s.diskInfo(devName) if err != nil { - log.Printf("W! Error gathering disk info: %s", err) + s.Log.Warnf("Error gathering disk info: %s", err) return nil } diff --git a/plugins/inputs/diskio/diskio_test.go b/plugins/inputs/diskio/diskio_test.go index 41c4b53e2..b013e30ba 100644 --- a/plugins/inputs/diskio/diskio_test.go +++ b/plugins/inputs/diskio/diskio_test.go @@ -103,6 +103,7 @@ func TestDiskIO(t *testing.T) { var acc testutil.Accumulator diskio := &DiskIO{ + Log: testutil.Logger{}, ps: &mps, Devices: tt.devices, } diff --git a/plugins/inputs/docker/docker.go b/plugins/inputs/docker/docker.go index 3c92ca278..a3dc78bd4 100644 --- a/plugins/inputs/docker/docker.go +++ b/plugins/inputs/docker/docker.go @@ -6,7 +6,6 @@ import ( "encoding/json" "fmt" "io" - "log" "net/http" "regexp" "strconv" @@ -45,6 +44,8 @@ type Docker struct { ContainerStateInclude []string `toml:"container_state_include"` ContainerStateExclude []string `toml:"container_state_exclude"` + Log telegraf.Logger + tlsint.ClientConfig newEnvClient func() (Client, error) @@ -107,8 +108,10 @@ var sampleConfig = ` ## Whether to report for each container per-device blkio (8:0, 8:1...) and ## network (eth0, eth1, ...) stats or not perdevice = true + ## Whether to report for each container total blkio and network stats or not total = false + ## Which environment variables should we use as a tag ##tag_env = ["JAVA_HOME", "HEAP_SIZE"] @@ -274,7 +277,7 @@ func (d *Docker) gatherSwarmInfo(acc telegraf.Accumulator) error { fields["tasks_running"] = running[service.ID] fields["tasks_desired"] = tasksNoShutdown[service.ID] } else { - log.Printf("E! Unknow Replicas Mode") + d.Log.Error("Unknown replica mode") } // Add metrics acc.AddFields("docker_swarm", diff --git a/plugins/inputs/docker/docker_test.go b/plugins/inputs/docker/docker_test.go index 77228b00c..4add3340d 100644 --- a/plugins/inputs/docker/docker_test.go +++ b/plugins/inputs/docker/docker_test.go @@ -252,6 +252,7 @@ func TestDocker_WindowsMemoryContainerStats(t *testing.T) { var acc testutil.Accumulator d := Docker{ + Log: testutil.Logger{}, newClient: func(string, *tls.Config) (Client, error) { return &MockClient{ InfoF: func(ctx context.Context) (types.Info, error) { @@ -390,6 +391,7 @@ func TestContainerLabels(t *testing.T) { } d := Docker{ + Log: testutil.Logger{}, newClient: newClientFunc, LabelInclude: tt.include, LabelExclude: tt.exclude, @@ -511,6 +513,7 @@ func TestContainerNames(t *testing.T) { } d := Docker{ + Log: testutil.Logger{}, newClient: newClientFunc, ContainerInclude: tt.include, ContainerExclude: tt.exclude, @@ -625,7 +628,10 @@ func TestContainerStatus(t *testing.T) { return &client, nil } - d = Docker{newClient: newClientFunc} + d = Docker{ + Log: testutil.Logger{}, + newClient: newClientFunc, + } ) // mock time @@ -675,6 +681,7 @@ func TestContainerStatus(t *testing.T) { func TestDockerGatherInfo(t *testing.T) { var acc testutil.Accumulator d := Docker{ + Log: testutil.Logger{}, newClient: newClient, TagEnvironment: []string{"ENVVAR1", "ENVVAR2", "ENVVAR3", "ENVVAR5", "ENVVAR6", "ENVVAR7", "ENVVAR8", "ENVVAR9"}, @@ -824,6 +831,7 @@ func TestDockerGatherInfo(t *testing.T) { func TestDockerGatherSwarmInfo(t *testing.T) { var acc testutil.Accumulator d := Docker{ + Log: testutil.Logger{}, newClient: newClient, } @@ -931,6 +939,7 @@ func TestContainerStateFilter(t *testing.T) { } d := Docker{ + Log: testutil.Logger{}, newClient: newClientFunc, ContainerStateInclude: tt.include, ContainerStateExclude: tt.exclude, @@ -992,6 +1001,7 @@ func TestContainerName(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { d := Docker{ + Log: testutil.Logger{}, newClient: tt.clientFunc, } var acc testutil.Accumulator diff --git a/plugins/inputs/dovecot/README.md b/plugins/inputs/dovecot/README.md index c853832b6..d28ae3dd9 100644 --- a/plugins/inputs/dovecot/README.md +++ b/plugins/inputs/dovecot/README.md @@ -17,8 +17,10 @@ the [upgrading steps][upgrading]. ## ## If no servers are specified, then localhost is used as the host. servers = ["localhost:24242"] + ## Type is one of "user", "domain", "ip", or "global" type = "global" + ## Wildcard matches like "*.com". An empty string "" is same as "*" ## If type = "ip" filters should be filters = [""] diff --git a/plugins/inputs/dovecot/dovecot.go b/plugins/inputs/dovecot/dovecot.go index a621252e5..66282c434 100644 --- a/plugins/inputs/dovecot/dovecot.go +++ b/plugins/inputs/dovecot/dovecot.go @@ -4,7 +4,6 @@ import ( "bytes" "fmt" "io" - // "log" "net" "strconv" "strings" @@ -32,8 +31,10 @@ var sampleConfig = ` ## ## If no servers are specified, then localhost is used as the host. servers = ["localhost:24242"] + ## Type is one of "user", "domain", "ip", or "global" type = "global" + ## Wildcard matches like "*.com". An empty string "" is same as "*" ## If type = "ip" filters should be filters = [""] @@ -82,12 +83,12 @@ func (d *Dovecot) Gather(acc telegraf.Accumulator) error { func (d *Dovecot) gatherServer(addr string, acc telegraf.Accumulator, qtype string, filter string) error { _, _, err := net.SplitHostPort(addr) if err != nil { - return fmt.Errorf("Error: %s on url %s\n", err, addr) + return fmt.Errorf("%q on url %s", err.Error(), addr) } c, err := net.DialTimeout("tcp", addr, defaultTimeout) if err != nil { - return fmt.Errorf("Unable to connect to dovecot server '%s': %s", addr, err) + return fmt.Errorf("enable to connect to dovecot server '%s': %s", addr, err) } defer c.Close() diff --git a/plugins/inputs/exec/exec.go b/plugins/inputs/exec/exec.go index 2d3643ad0..3176b5a6a 100644 --- a/plugins/inputs/exec/exec.go +++ b/plugins/inputs/exec/exec.go @@ -161,7 +161,7 @@ func (e *Exec) ProcessCommand(command string, acc telegraf.Accumulator, wg *sync if isNagios { metrics, err = nagios.TryAddState(runErr, metrics) if err != nil { - e.log.Errorf("failed to add nagios state: %s", err) + e.log.Errorf("Failed to add nagios state: %s", err) } } diff --git a/plugins/inputs/filecount/filecount.go b/plugins/inputs/filecount/filecount.go index 965f41d2c..4d42da603 100644 --- a/plugins/inputs/filecount/filecount.go +++ b/plugins/inputs/filecount/filecount.go @@ -1,7 +1,6 @@ package filecount import ( - "log" "os" "path/filepath" "time" @@ -59,6 +58,7 @@ type FileCount struct { fileFilters []fileFilterFunc globPaths []globpath.GlobPath Fs fileSystem + Log telegraf.Logger } func (_ *FileCount) Description() string { @@ -210,7 +210,7 @@ func (fc *FileCount) count(acc telegraf.Accumulator, basedir string, glob globpa Unsorted: true, ErrorCallback: func(osPathname string, err error) godirwalk.ErrorAction { if os.IsPermission(errors.Cause(err)) { - log.Println("D! [inputs.filecount]", err) + fc.Log.Debug(err) return godirwalk.SkipNode } return godirwalk.Halt diff --git a/plugins/inputs/filecount/filecount_test.go b/plugins/inputs/filecount/filecount_test.go index 9cd7c747c..dcd6d9d8e 100644 --- a/plugins/inputs/filecount/filecount_test.go +++ b/plugins/inputs/filecount/filecount_test.go @@ -152,6 +152,7 @@ func TestDirectoryWithTrailingSlash(t *testing.T) { func getNoFilterFileCount() FileCount { return FileCount{ + Log: testutil.Logger{}, Directories: []string{getTestdataDir()}, Name: "*", Recursive: true, diff --git a/plugins/inputs/filestat/README.md b/plugins/inputs/filestat/README.md index 3102c13b0..840cafb53 100644 --- a/plugins/inputs/filestat/README.md +++ b/plugins/inputs/filestat/README.md @@ -11,6 +11,7 @@ The filestat plugin gathers metrics about file existence, size, and other stats. ## These accept standard unix glob matching rules, but with the addition of ## ** as a "super asterisk". See https://github.com/gobwas/glob. files = ["/etc/telegraf/telegraf.conf", "/var/log/**.log"] + ## If true, read the entire file and calculate an md5 checksum. md5 = false ``` diff --git a/plugins/inputs/filestat/filestat.go b/plugins/inputs/filestat/filestat.go index 692e58c53..bf8ea6c16 100644 --- a/plugins/inputs/filestat/filestat.go +++ b/plugins/inputs/filestat/filestat.go @@ -4,7 +4,6 @@ import ( "crypto/md5" "fmt" "io" - "log" "os" "github.com/influxdata/telegraf" @@ -23,6 +22,7 @@ const sampleConfig = ` ## See https://github.com/gobwas/glob for more examples ## files = ["/var/log/**.log"] + ## If true, read the entire file and calculate an md5 checksum. md5 = false ` @@ -31,6 +31,8 @@ type FileStat struct { Md5 bool Files []string + Log telegraf.Logger + // maps full file paths to globmatch obj globs map[string]*globpath.GlobPath } @@ -41,11 +43,11 @@ func NewFileStat() *FileStat { } } -func (_ *FileStat) Description() string { +func (*FileStat) Description() string { return "Read stats about given file(s)" } -func (_ *FileStat) SampleConfig() string { return sampleConfig } +func (*FileStat) SampleConfig() string { return sampleConfig } func (f *FileStat) Gather(acc telegraf.Accumulator) error { var err error @@ -86,7 +88,7 @@ func (f *FileStat) Gather(acc telegraf.Accumulator) error { } if fileInfo == nil { - log.Printf("E! Unable to get info for file [%s], possible permissions issue", + f.Log.Errorf("Unable to get info for file %q, possible permissions issue", fileName) } else { fields["size_bytes"] = fileInfo.Size() diff --git a/plugins/inputs/filestat/filestat_test.go b/plugins/inputs/filestat/filestat_test.go index 7fdf6cde8..a38d3b0aa 100644 --- a/plugins/inputs/filestat/filestat_test.go +++ b/plugins/inputs/filestat/filestat_test.go @@ -14,6 +14,7 @@ import ( func TestGatherNoMd5(t *testing.T) { dir := getTestdataDir() fs := NewFileStat() + fs.Log = testutil.Logger{} fs.Files = []string{ dir + "log1.log", dir + "log2.log", @@ -44,6 +45,7 @@ func TestGatherNoMd5(t *testing.T) { func TestGatherExplicitFiles(t *testing.T) { dir := getTestdataDir() fs := NewFileStat() + fs.Log = testutil.Logger{} fs.Md5 = true fs.Files = []string{ dir + "log1.log", @@ -77,6 +79,7 @@ func TestGatherExplicitFiles(t *testing.T) { func TestGatherGlob(t *testing.T) { dir := getTestdataDir() fs := NewFileStat() + fs.Log = testutil.Logger{} fs.Md5 = true fs.Files = []string{ dir + "*.log", @@ -103,6 +106,7 @@ func TestGatherGlob(t *testing.T) { func TestGatherSuperAsterisk(t *testing.T) { dir := getTestdataDir() fs := NewFileStat() + fs.Log = testutil.Logger{} fs.Md5 = true fs.Files = []string{ dir + "**", @@ -136,6 +140,7 @@ func TestGatherSuperAsterisk(t *testing.T) { func TestModificationTime(t *testing.T) { dir := getTestdataDir() fs := NewFileStat() + fs.Log = testutil.Logger{} fs.Files = []string{ dir + "log1.log", } @@ -153,6 +158,7 @@ func TestModificationTime(t *testing.T) { func TestNoModificationTime(t *testing.T) { fs := NewFileStat() + fs.Log = testutil.Logger{} fs.Files = []string{ "/non/existant/file", } diff --git a/plugins/inputs/http_listener_v2/http_listener_v2.go b/plugins/inputs/http_listener_v2/http_listener_v2.go index 5427b384d..21d35fab9 100644 --- a/plugins/inputs/http_listener_v2/http_listener_v2.go +++ b/plugins/inputs/http_listener_v2/http_listener_v2.go @@ -5,7 +5,6 @@ import ( "crypto/subtle" "crypto/tls" "io/ioutil" - "log" "net" "net/http" "net/url" @@ -48,6 +47,7 @@ type HTTPListenerV2 struct { tlsint.ServerConfig TimeFunc + Log telegraf.Logger wg sync.WaitGroup @@ -162,7 +162,7 @@ func (h *HTTPListenerV2) Start(acc telegraf.Accumulator) error { server.Serve(h.listener) }() - log.Printf("I! [inputs.http_listener_v2] Listening on %s", listener.Addr().String()) + h.Log.Infof("Listening on %s", listener.Addr().String()) return nil } @@ -219,7 +219,7 @@ func (h *HTTPListenerV2) serveWrite(res http.ResponseWriter, req *http.Request) metrics, err := h.Parse(bytes) if err != nil { - log.Printf("D! [inputs.http_listener_v2] Parse error: %v", err) + h.Log.Debugf("Parse error: %s", err.Error()) badRequest(res) return } @@ -239,7 +239,7 @@ func (h *HTTPListenerV2) collectBody(res http.ResponseWriter, req *http.Request) var err error body, err = gzip.NewReader(req.Body) if err != nil { - log.Println("D! " + err.Error()) + h.Log.Debug(err.Error()) badRequest(res) return nil, false } @@ -261,7 +261,7 @@ func (h *HTTPListenerV2) collectQuery(res http.ResponseWriter, req *http.Request query, err := url.QueryUnescape(rawQuery) if err != nil { - log.Printf("D! [inputs.http_listener_v2] Error parsing query: %v", err) + h.Log.Debugf("Error parsing query: %s", err.Error()) badRequest(res) return nil, false } diff --git a/plugins/inputs/http_listener_v2/http_listener_v2_test.go b/plugins/inputs/http_listener_v2/http_listener_v2_test.go index c27b022b2..c9e96b92d 100644 --- a/plugins/inputs/http_listener_v2/http_listener_v2_test.go +++ b/plugins/inputs/http_listener_v2/http_listener_v2_test.go @@ -46,6 +46,7 @@ func newTestHTTPListenerV2() *HTTPListenerV2 { parser, _ := parsers.NewInfluxParser() listener := &HTTPListenerV2{ + Log: testutil.Logger{}, ServiceAddress: "localhost:0", Path: "/write", Methods: []string{"POST"}, @@ -68,6 +69,7 @@ func newTestHTTPSListenerV2() *HTTPListenerV2 { parser, _ := parsers.NewInfluxParser() listener := &HTTPListenerV2{ + Log: testutil.Logger{}, ServiceAddress: "localhost:0", Path: "/write", Methods: []string{"POST"}, @@ -231,6 +233,7 @@ func TestWriteHTTPExactMaxBodySize(t *testing.T) { parser, _ := parsers.NewInfluxParser() listener := &HTTPListenerV2{ + Log: testutil.Logger{}, ServiceAddress: "localhost:0", Path: "/write", Methods: []string{"POST"}, @@ -253,6 +256,7 @@ func TestWriteHTTPVerySmallMaxBody(t *testing.T) { parser, _ := parsers.NewInfluxParser() listener := &HTTPListenerV2{ + Log: testutil.Logger{}, ServiceAddress: "localhost:0", Path: "/write", Methods: []string{"POST"}, diff --git a/plugins/inputs/http_response/http_response.go b/plugins/inputs/http_response/http_response.go index b863190d7..24c22f72f 100644 --- a/plugins/inputs/http_response/http_response.go +++ b/plugins/inputs/http_response/http_response.go @@ -5,7 +5,6 @@ import ( "fmt" "io" "io/ioutil" - "log" "net" "net/http" "net/url" @@ -34,6 +33,8 @@ type HTTPResponse struct { Interface string tls.ClientConfig + Log telegraf.Logger + compiledStringMatch *regexp.Regexp client *http.Client } @@ -242,7 +243,7 @@ func (h *HTTPResponse) httpGather(u string) (map[string]interface{}, map[string] // HTTP error codes do not generate errors in the net/http library if err != nil { // Log error - log.Printf("D! Network error while polling %s: %s", u, err.Error()) + h.Log.Debugf("Network error while polling %s: %s", u, err.Error()) // Get error details netErr := setError(err, fields, tags) @@ -271,7 +272,7 @@ func (h *HTTPResponse) httpGather(u string) (map[string]interface{}, map[string] bodyBytes, err := ioutil.ReadAll(resp.Body) if err != nil { - log.Printf("D! Failed to read body of HTTP Response : %s", err) + h.Log.Debugf("Failed to read body of HTTP Response : %s", err.Error()) setResult("body_read_error", fields, tags) fields["content_length"] = len(bodyBytes) if h.ResponseStringMatch != "" { @@ -322,7 +323,7 @@ func (h *HTTPResponse) Gather(acc telegraf.Accumulator) error { if h.Address == "" { h.URLs = []string{"http://localhost"} } else { - log.Printf("W! [inputs.http_response] 'address' deprecated in telegraf 1.12, please use 'urls'") + h.Log.Warn("'address' deprecated in telegraf 1.12, please use 'urls'") h.URLs = []string{h.Address} } } diff --git a/plugins/inputs/http_response/http_response_test.go b/plugins/inputs/http_response/http_response_test.go index 5ba586c59..530c81901 100644 --- a/plugins/inputs/http_response/http_response_test.go +++ b/plugins/inputs/http_response/http_response_test.go @@ -150,6 +150,7 @@ func TestHeaders(t *testing.T) { defer ts.Close() h := &HTTPResponse{ + Log: testutil.Logger{}, Address: ts.URL, Method: "GET", ResponseTimeout: internal.Duration{Duration: time.Second * 2}, @@ -185,6 +186,7 @@ func TestFields(t *testing.T) { defer ts.Close() h := &HTTPResponse{ + Log: testutil.Logger{}, Address: ts.URL + "/good", Body: "{ 'test': 'data'}", Method: "GET", @@ -246,6 +248,7 @@ func TestInterface(t *testing.T) { require.NoError(t, err) h := &HTTPResponse{ + Log: testutil.Logger{}, Address: ts.URL + "/good", Body: "{ 'test': 'data'}", Method: "GET", @@ -284,6 +287,7 @@ func TestRedirects(t *testing.T) { defer ts.Close() h := &HTTPResponse{ + Log: testutil.Logger{}, Address: ts.URL + "/redirect", Body: "{ 'test': 'data'}", Method: "GET", @@ -314,6 +318,7 @@ func TestRedirects(t *testing.T) { checkOutput(t, &acc, expectedFields, expectedTags, absentFields, nil) h = &HTTPResponse{ + Log: testutil.Logger{}, Address: ts.URL + "/badredirect", Body: "{ 'test': 'data'}", Method: "GET", @@ -350,6 +355,7 @@ func TestMethod(t *testing.T) { defer ts.Close() h := &HTTPResponse{ + Log: testutil.Logger{}, Address: ts.URL + "/mustbepostmethod", Body: "{ 'test': 'data'}", Method: "POST", @@ -380,6 +386,7 @@ func TestMethod(t *testing.T) { checkOutput(t, &acc, expectedFields, expectedTags, absentFields, nil) h = &HTTPResponse{ + Log: testutil.Logger{}, Address: ts.URL + "/mustbepostmethod", Body: "{ 'test': 'data'}", Method: "GET", @@ -411,6 +418,7 @@ func TestMethod(t *testing.T) { //check that lowercase methods work correctly h = &HTTPResponse{ + Log: testutil.Logger{}, Address: ts.URL + "/mustbepostmethod", Body: "{ 'test': 'data'}", Method: "head", @@ -447,6 +455,7 @@ func TestBody(t *testing.T) { defer ts.Close() h := &HTTPResponse{ + Log: testutil.Logger{}, Address: ts.URL + "/musthaveabody", Body: "{ 'test': 'data'}", Method: "GET", @@ -477,6 +486,7 @@ func TestBody(t *testing.T) { checkOutput(t, &acc, expectedFields, expectedTags, absentFields, nil) h = &HTTPResponse{ + Log: testutil.Logger{}, Address: ts.URL + "/musthaveabody", Method: "GET", ResponseTimeout: internal.Duration{Duration: time.Second * 20}, @@ -510,6 +520,7 @@ func TestStringMatch(t *testing.T) { defer ts.Close() h := &HTTPResponse{ + Log: testutil.Logger{}, Address: ts.URL + "/good", Body: "{ 'test': 'data'}", Method: "GET", @@ -547,6 +558,7 @@ func TestStringMatchJson(t *testing.T) { defer ts.Close() h := &HTTPResponse{ + Log: testutil.Logger{}, Address: ts.URL + "/jsonresponse", Body: "{ 'test': 'data'}", Method: "GET", @@ -584,6 +596,7 @@ func TestStringMatchFail(t *testing.T) { defer ts.Close() h := &HTTPResponse{ + Log: testutil.Logger{}, Address: ts.URL + "/good", Body: "{ 'test': 'data'}", Method: "GET", @@ -626,6 +639,7 @@ func TestTimeout(t *testing.T) { defer ts.Close() h := &HTTPResponse{ + Log: testutil.Logger{}, Address: ts.URL + "/twosecondnap", Body: "{ 'test': 'data'}", Method: "GET", @@ -659,6 +673,7 @@ func TestBadRegex(t *testing.T) { defer ts.Close() h := &HTTPResponse{ + Log: testutil.Logger{}, Address: ts.URL + "/good", Body: "{ 'test': 'data'}", Method: "GET", @@ -682,6 +697,7 @@ func TestBadRegex(t *testing.T) { func TestNetworkErrors(t *testing.T) { // DNS error h := &HTTPResponse{ + Log: testutil.Logger{}, Address: "https://nonexistent.nonexistent", // Any non-resolvable URL works here Body: "", Method: "GET", @@ -708,6 +724,7 @@ func TestNetworkErrors(t *testing.T) { // Connecton failed h = &HTTPResponse{ + Log: testutil.Logger{}, Address: "https:/nonexistent.nonexistent", // Any non-routable IP works here Body: "", Method: "GET", @@ -739,6 +756,7 @@ func TestContentLength(t *testing.T) { defer ts.Close() h := &HTTPResponse{ + Log: testutil.Logger{}, URLs: []string{ts.URL + "/good"}, Body: "{ 'test': 'data'}", Method: "GET", @@ -769,6 +787,7 @@ func TestContentLength(t *testing.T) { checkOutput(t, &acc, expectedFields, expectedTags, absentFields, nil) h = &HTTPResponse{ + Log: testutil.Logger{}, URLs: []string{ts.URL + "/musthaveabody"}, Body: "{ 'test': 'data'}", Method: "GET", diff --git a/plugins/inputs/icinga2/README.md b/plugins/inputs/icinga2/README.md index 697c6c59c..14708cd41 100644 --- a/plugins/inputs/icinga2/README.md +++ b/plugins/inputs/icinga2/README.md @@ -11,10 +11,10 @@ services and hosts. You can read Icinga2's documentation for their remote API ```toml # Description [[inputs.icinga2]] - ## Required Icinga2 server address (default: "https://localhost:5665") + ## Required Icinga2 server address # server = "https://localhost:5665" - ## Required Icinga2 object type ("services" or "hosts, default "services") + ## Required Icinga2 object type ("services" or "hosts") # object_type = "services" ## Credentials for basic HTTP authentication diff --git a/plugins/inputs/icinga2/icinga2.go b/plugins/inputs/icinga2/icinga2.go index 82120da2c..67b9bcab9 100644 --- a/plugins/inputs/icinga2/icinga2.go +++ b/plugins/inputs/icinga2/icinga2.go @@ -3,7 +3,6 @@ package icinga2 import ( "encoding/json" "fmt" - "log" "net/http" "net/url" "time" @@ -22,6 +21,8 @@ type Icinga2 struct { ResponseTimeout internal.Duration tls.ClientConfig + Log telegraf.Logger + client *http.Client } @@ -49,10 +50,10 @@ var levels = []string{"ok", "warning", "critical", "unknown"} type ObjectType string var sampleConfig = ` - ## Required Icinga2 server address (default: "https://localhost:5665") + ## Required Icinga2 server address # server = "https://localhost:5665" - - ## Required Icinga2 object type ("services" or "hosts, default "services") + + ## Required Icinga2 object type ("services" or "hosts") # object_type = "services" ## Credentials for basic HTTP authentication @@ -80,25 +81,27 @@ func (i *Icinga2) SampleConfig() string { func (i *Icinga2) GatherStatus(acc telegraf.Accumulator, checks []Object) { for _, check := range checks { - fields := make(map[string]interface{}) - tags := make(map[string]string) - url, err := url.Parse(i.Server) if err != nil { - log.Fatal(err) + i.Log.Error(err.Error()) + continue } state := int64(check.Attrs.State) - fields["name"] = check.Attrs.Name - fields["state_code"] = state + fields := map[string]interface{}{ + "name": check.Attrs.Name, + "state_code": state, + } - tags["display_name"] = check.Attrs.DisplayName - tags["check_command"] = check.Attrs.CheckCommand - tags["state"] = levels[state] - tags["source"] = url.Hostname() - tags["scheme"] = url.Scheme - tags["port"] = url.Port() + tags := map[string]string{ + "display_name": check.Attrs.DisplayName, + "check_command": check.Attrs.CheckCommand, + "state": levels[state], + "source": url.Hostname(), + "scheme": url.Scheme, + "port": url.Port(), + } acc.AddFields(fmt.Sprintf("icinga2_%s", i.ObjectType), fields, tags) } @@ -165,8 +168,9 @@ func (i *Icinga2) Gather(acc telegraf.Accumulator) error { func init() { inputs.Add("icinga2", func() telegraf.Input { return &Icinga2{ - Server: "https://localhost:5665", - ObjectType: "services", + Server: "https://localhost:5665", + ObjectType: "services", + ResponseTimeout: internal.Duration{Duration: time.Second * 5}, } }) } diff --git a/plugins/inputs/icinga2/icinga2_test.go b/plugins/inputs/icinga2/icinga2_test.go index e62a8d423..a908af7d5 100644 --- a/plugins/inputs/icinga2/icinga2_test.go +++ b/plugins/inputs/icinga2/icinga2_test.go @@ -32,6 +32,7 @@ func TestGatherServicesStatus(t *testing.T) { json.Unmarshal([]byte(s), &checks) icinga2 := new(Icinga2) + icinga2.Log = testutil.Logger{} icinga2.ObjectType = "services" icinga2.Server = "https://localhost:5665" @@ -86,6 +87,7 @@ func TestGatherHostsStatus(t *testing.T) { var acc testutil.Accumulator icinga2 := new(Icinga2) + icinga2.Log = testutil.Logger{} icinga2.ObjectType = "hosts" icinga2.Server = "https://localhost:5665" diff --git a/plugins/inputs/influxdb_listener/http_listener.go b/plugins/inputs/influxdb_listener/http_listener.go index 5383fd2aa..aeb2b589f 100644 --- a/plugins/inputs/influxdb_listener/http_listener.go +++ b/plugins/inputs/influxdb_listener/http_listener.go @@ -8,7 +8,6 @@ import ( "encoding/json" "fmt" "io" - "log" "net" "net/http" "sync" @@ -75,6 +74,8 @@ type HTTPListener struct { BuffersCreated selfstat.Stat AuthFailures selfstat.Stat + Log telegraf.Logger + longLines selfstat.Stat } @@ -202,7 +203,7 @@ func (h *HTTPListener) Start(acc telegraf.Accumulator) error { server.Serve(h.listener) }() - log.Printf("I! Started HTTP listener service on %s\n", h.ServiceAddress) + h.Log.Infof("Started HTTP listener service on %s", h.ServiceAddress) return nil } @@ -215,7 +216,7 @@ func (h *HTTPListener) Stop() { h.listener.Close() h.wg.Wait() - log.Println("I! Stopped HTTP listener service on ", h.ServiceAddress) + h.Log.Infof("Stopped HTTP listener service on %s", h.ServiceAddress) } func (h *HTTPListener) ServeHTTP(res http.ResponseWriter, req *http.Request) { @@ -274,7 +275,7 @@ func (h *HTTPListener) serveWrite(res http.ResponseWriter, req *http.Request) { var err error body, err = gzip.NewReader(req.Body) if err != nil { - log.Println("D! " + err.Error()) + h.Log.Debug(err.Error()) badRequest(res, err.Error()) return } @@ -290,7 +291,7 @@ func (h *HTTPListener) serveWrite(res http.ResponseWriter, req *http.Request) { for { n, err := io.ReadFull(body, buf[bufStart:]) if err != nil && err != io.ErrUnexpectedEOF && err != io.EOF { - log.Println("D! " + err.Error()) + h.Log.Debug(err.Error()) // problem reading the request body badRequest(res, err.Error()) return @@ -326,7 +327,7 @@ func (h *HTTPListener) serveWrite(res http.ResponseWriter, req *http.Request) { // finished reading the request body err = h.parse(buf[:n+bufStart], now, precision, db) if err != nil { - log.Println("D! "+err.Error(), bufStart+n) + h.Log.Debugf("%s: %s", err.Error(), bufStart+n) return400 = true } if return400 { @@ -348,7 +349,7 @@ func (h *HTTPListener) serveWrite(res http.ResponseWriter, req *http.Request) { if i == -1 { h.longLines.Incr(1) // drop any line longer than the max buffer size - log.Printf("D! http_listener received a single line longer than the maximum of %d bytes", + h.Log.Debugf("Http_listener received a single line longer than the maximum of %d bytes", len(buf)) hangingBytes = true return400 = true @@ -356,7 +357,7 @@ func (h *HTTPListener) serveWrite(res http.ResponseWriter, req *http.Request) { continue } if err := h.parse(buf[:i+1], now, precision, db); err != nil { - log.Println("D! " + err.Error()) + h.Log.Debug(err.Error()) return400 = true } // rotate the bit remaining after the last newline to the front of the buffer diff --git a/plugins/inputs/influxdb_listener/http_listener_test.go b/plugins/inputs/influxdb_listener/http_listener_test.go index 6d14e6539..771bb5faf 100644 --- a/plugins/inputs/influxdb_listener/http_listener_test.go +++ b/plugins/inputs/influxdb_listener/http_listener_test.go @@ -44,6 +44,7 @@ var ( func newTestHTTPListener() *HTTPListener { listener := &HTTPListener{ + Log: testutil.Logger{}, ServiceAddress: "localhost:0", TimeFunc: time.Now, } @@ -59,6 +60,7 @@ func newTestHTTPAuthListener() *HTTPListener { func newTestHTTPSListener() *HTTPListener { listener := &HTTPListener{ + Log: testutil.Logger{}, ServiceAddress: "localhost:0", ServerConfig: *pki.TLSServerConfig(), TimeFunc: time.Now, @@ -220,6 +222,7 @@ func TestWriteHTTPNoNewline(t *testing.T) { func TestWriteHTTPMaxLineSizeIncrease(t *testing.T) { listener := &HTTPListener{ + Log: testutil.Logger{}, ServiceAddress: "localhost:0", MaxLineSize: internal.Size{Size: 128 * 1000}, TimeFunc: time.Now, @@ -238,6 +241,7 @@ func TestWriteHTTPMaxLineSizeIncrease(t *testing.T) { func TestWriteHTTPVerySmallMaxBody(t *testing.T) { listener := &HTTPListener{ + Log: testutil.Logger{}, ServiceAddress: "localhost:0", MaxBodySize: internal.Size{Size: 4096}, TimeFunc: time.Now, @@ -255,6 +259,7 @@ func TestWriteHTTPVerySmallMaxBody(t *testing.T) { func TestWriteHTTPVerySmallMaxLineSize(t *testing.T) { listener := &HTTPListener{ + Log: testutil.Logger{}, ServiceAddress: "localhost:0", MaxLineSize: internal.Size{Size: 70}, TimeFunc: time.Now, @@ -282,6 +287,7 @@ func TestWriteHTTPVerySmallMaxLineSize(t *testing.T) { func TestWriteHTTPLargeLinesSkipped(t *testing.T) { listener := &HTTPListener{ + Log: testutil.Logger{}, ServiceAddress: "localhost:0", MaxLineSize: internal.Size{Size: 100}, TimeFunc: time.Now, diff --git a/plugins/inputs/ipvs/ipvs.go b/plugins/inputs/ipvs/ipvs.go index 2d3ad0278..4f36b95ce 100644 --- a/plugins/inputs/ipvs/ipvs.go +++ b/plugins/inputs/ipvs/ipvs.go @@ -5,7 +5,6 @@ package ipvs import ( "errors" "fmt" - "log" "math/bits" "strconv" "syscall" @@ -18,6 +17,7 @@ import ( // IPVS holds the state for this input plugin type IPVS struct { handle *ipvs.Handle + Log telegraf.Logger } // Description returns a description string @@ -61,7 +61,7 @@ func (i *IPVS) Gather(acc telegraf.Accumulator) error { destinations, err := i.handle.GetDestinations(s) if err != nil { - log.Println("E! Failed to list destinations for a virtual server") + i.Log.Errorf("Failed to list destinations for a virtual server: %s", err.Error()) continue // move on to the next virtual server } diff --git a/plugins/inputs/jenkins/jenkins.go b/plugins/inputs/jenkins/jenkins.go index cfa0a38e4..e13d5c25d 100644 --- a/plugins/inputs/jenkins/jenkins.go +++ b/plugins/inputs/jenkins/jenkins.go @@ -4,7 +4,6 @@ import ( "context" "errors" "fmt" - "log" "net/http" "strconv" "strings" @@ -29,6 +28,8 @@ type Jenkins struct { tls.ClientConfig client *client + Log telegraf.Logger + MaxConnections int `toml:"max_connections"` MaxBuildAge internal.Duration `toml:"max_build_age"` MaxSubJobDepth int `toml:"max_subjob_depth"` @@ -304,7 +305,7 @@ func (j *Jenkins) getJobDetail(jr jobRequest, acc telegraf.Accumulator) error { } if build.Building { - log.Printf("D! Ignore running build on %s, build %v", jr.name, number) + j.Log.Debugf("Ignore running build on %s, build %v", jr.name, number) return nil } diff --git a/plugins/inputs/jenkins/jenkins_test.go b/plugins/inputs/jenkins/jenkins_test.go index 7724fc0e3..04aaffaad 100644 --- a/plugins/inputs/jenkins/jenkins_test.go +++ b/plugins/inputs/jenkins/jenkins_test.go @@ -206,6 +206,7 @@ func TestGatherNodeData(t *testing.T) { ts := httptest.NewServer(test.input) defer ts.Close() j := &Jenkins{ + Log: testutil.Logger{}, URL: ts.URL, ResponseTimeout: internal.Duration{Duration: time.Microsecond}, NodeExclude: []string{"ignore-1", "ignore-2"}, @@ -258,6 +259,7 @@ func TestInitialize(t *testing.T) { { name: "bad jenkins config", input: &Jenkins{ + Log: testutil.Logger{}, URL: "http://a bad url", ResponseTimeout: internal.Duration{Duration: time.Microsecond}, }, @@ -266,6 +268,7 @@ func TestInitialize(t *testing.T) { { name: "has filter", input: &Jenkins{ + Log: testutil.Logger{}, URL: ts.URL, ResponseTimeout: internal.Duration{Duration: time.Microsecond}, JobExclude: []string{"job1", "job2"}, @@ -275,10 +278,12 @@ func TestInitialize(t *testing.T) { { name: "default config", input: &Jenkins{ + Log: testutil.Logger{}, URL: ts.URL, ResponseTimeout: internal.Duration{Duration: time.Microsecond}, }, output: &Jenkins{ + Log: testutil.Logger{}, MaxConnections: 5, MaxSubJobPerLayer: 10, }, @@ -570,6 +575,7 @@ func TestGatherJobs(t *testing.T) { ts := httptest.NewServer(test.input) defer ts.Close() j := &Jenkins{ + Log: testutil.Logger{}, URL: ts.URL, MaxBuildAge: internal.Duration{Duration: time.Hour}, ResponseTimeout: internal.Duration{Duration: time.Microsecond}, diff --git a/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry.go b/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry.go index c30ef9bf4..39f9bb58a 100644 --- a/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry.go +++ b/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry.go @@ -2,7 +2,6 @@ package jti_openconfig_telemetry import ( "fmt" - "log" "net" "regexp" "strings" @@ -34,6 +33,8 @@ type OpenConfigTelemetry struct { EnableTLS bool `toml:"enable_tls"` internaltls.ClientConfig + Log telegraf.Logger + sensorsConfig []sensorConfig grpcClientConns []*grpc.ClientConn wg *sync.WaitGroup @@ -243,7 +244,7 @@ func (m *OpenConfigTelemetry) splitSensorConfig() int { } if len(spathSplit) == 0 { - log.Printf("E! No sensors are specified") + m.Log.Error("No sensors are specified") continue } @@ -257,7 +258,7 @@ func (m *OpenConfigTelemetry) splitSensorConfig() int { } if len(spathSplit) == 0 { - log.Printf("E! No valid sensors are specified") + m.Log.Error("No valid sensors are specified") continue } @@ -294,13 +295,13 @@ func (m *OpenConfigTelemetry) collectData(ctx context.Context, rpcStatus, _ := status.FromError(err) // If service is currently unavailable and may come back later, retry if rpcStatus.Code() != codes.Unavailable { - acc.AddError(fmt.Errorf("E! Could not subscribe to %s: %v", grpcServer, + acc.AddError(fmt.Errorf("could not subscribe to %s: %v", grpcServer, err)) return } else { // Retry with delay. If delay is not provided, use default if m.RetryDelay.Duration > 0 { - log.Printf("D! Retrying %s with timeout %v", grpcServer, + m.Log.Debugf("Retrying %s with timeout %v", grpcServer, m.RetryDelay.Duration) time.Sleep(m.RetryDelay.Duration) continue @@ -314,11 +315,11 @@ func (m *OpenConfigTelemetry) collectData(ctx context.Context, if err != nil { // If we encounter error in the stream, break so we can retry // the connection - acc.AddError(fmt.Errorf("E! Failed to read from %s: %v", err, grpcServer)) + acc.AddError(fmt.Errorf("failed to read from %s: %s", grpcServer, err)) break } - log.Printf("D! Received from %s: %v", grpcServer, r) + m.Log.Debugf("Received from %s: %v", grpcServer, r) // Create a point and add to batch tags := make(map[string]string) @@ -329,7 +330,7 @@ func (m *OpenConfigTelemetry) collectData(ctx context.Context, dgroups := m.extractData(r, grpcServer) // Print final data collection - log.Printf("D! Available collection for %s is: %v", grpcServer, dgroups) + m.Log.Debugf("Available collection for %s is: %v", grpcServer, dgroups) tnow := time.Now() // Iterate through data groups and add them @@ -349,10 +350,9 @@ func (m *OpenConfigTelemetry) collectData(ctx context.Context, } func (m *OpenConfigTelemetry) Start(acc telegraf.Accumulator) error { - // Build sensors config if m.splitSensorConfig() == 0 { - return fmt.Errorf("E! No valid sensor configuration available") + return fmt.Errorf("no valid sensor configuration available") } // Parse TLS config @@ -376,15 +376,15 @@ func (m *OpenConfigTelemetry) Start(acc telegraf.Accumulator) error { // Extract device address and port grpcServer, grpcPort, err := net.SplitHostPort(server) if err != nil { - log.Printf("E! Invalid server address: %v", err) + m.Log.Errorf("Invalid server address: %s", err.Error()) continue } grpcClientConn, err = grpc.Dial(server, opts...) if err != nil { - log.Printf("E! Failed to connect to %s: %v", server, err) + m.Log.Errorf("Failed to connect to %s: %s", server, err.Error()) } else { - log.Printf("D! Opened a new gRPC session to %s on port %s", grpcServer, grpcPort) + m.Log.Debugf("Opened a new gRPC session to %s on port %s", grpcServer, grpcPort) } // Add to the list of client connections @@ -396,13 +396,13 @@ func (m *OpenConfigTelemetry) Start(acc telegraf.Accumulator) error { &authentication.LoginRequest{UserName: m.Username, Password: m.Password, ClientId: m.ClientID}) if loginErr != nil { - log.Printf("E! Could not initiate login check for %s: %v", server, loginErr) + m.Log.Errorf("Could not initiate login check for %s: %v", server, loginErr) continue } // Check if the user is authenticated. Bail if auth error if !loginReply.Result { - log.Printf("E! Failed to authenticate the user for %s", server) + m.Log.Errorf("Failed to authenticate the user for %s", server) continue } } diff --git a/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry_test.go b/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry_test.go index 8b0abd883..a3df62e1b 100644 --- a/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry_test.go +++ b/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry_test.go @@ -17,6 +17,7 @@ import ( ) var cfg = &OpenConfigTelemetry{ + Log: testutil.Logger{}, Servers: []string{"127.0.0.1:50051"}, SampleFrequency: internal.Duration{Duration: time.Second * 2}, } diff --git a/plugins/inputs/kafka_consumer_legacy/README.md b/plugins/inputs/kafka_consumer_legacy/README.md index 31976788b..8fc7ed295 100644 --- a/plugins/inputs/kafka_consumer_legacy/README.md +++ b/plugins/inputs/kafka_consumer_legacy/README.md @@ -13,12 +13,16 @@ from the same topic in parallel. [[inputs.kafka_consumer]] ## topic(s) to consume topics = ["telegraf"] + ## an array of Zookeeper connection strings zookeeper_peers = ["localhost:2181"] + ## Zookeeper Chroot zookeeper_chroot = "" + ## the name of the consumer group consumer_group = "telegraf_metrics_consumers" + ## Offset (must be either "oldest" or "newest") offset = "oldest" diff --git a/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy.go b/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy.go index d9558d5bd..939fc8850 100644 --- a/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy.go +++ b/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy.go @@ -2,7 +2,6 @@ package kafka_consumer_legacy import ( "fmt" - "log" "strings" "sync" @@ -30,6 +29,8 @@ type Kafka struct { Offset string parser parsers.Parser + Log telegraf.Logger + sync.Mutex // channel for all incoming kafka messages @@ -49,12 +50,16 @@ type Kafka struct { var sampleConfig = ` ## topic(s) to consume topics = ["telegraf"] + ## an array of Zookeeper connection strings zookeeper_peers = ["localhost:2181"] + ## Zookeeper Chroot zookeeper_chroot = "" + ## the name of the consumer group consumer_group = "telegraf_metrics_consumers" + ## Offset (must be either "oldest" or "newest") offset = "oldest" @@ -96,7 +101,7 @@ func (k *Kafka) Start(acc telegraf.Accumulator) error { case "newest": config.Offsets.Initial = sarama.OffsetNewest default: - log.Printf("I! WARNING: Kafka consumer invalid offset '%s', using 'oldest'\n", + k.Log.Infof("WARNING: Kafka consumer invalid offset '%s', using 'oldest'\n", k.Offset) config.Offsets.Initial = sarama.OffsetOldest } @@ -121,7 +126,7 @@ func (k *Kafka) Start(acc telegraf.Accumulator) error { // Start the kafka message reader go k.receiver() - log.Printf("I! Started the kafka consumer service, peers: %v, topics: %v\n", + k.Log.Infof("Started the kafka consumer service, peers: %v, topics: %v\n", k.ZookeeperPeers, k.Topics) return nil } diff --git a/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy_integration_test.go b/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy_integration_test.go index 60404cfac..31bea2210 100644 --- a/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy_integration_test.go +++ b/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy_integration_test.go @@ -37,6 +37,7 @@ func TestReadsMetricsFromKafka(t *testing.T) { // Start the Kafka Consumer k := &Kafka{ + Log: testutil.Logger{}, ConsumerGroup: "telegraf_test_consumers", Topics: []string{testTopic}, ZookeeperPeers: zkPeers, diff --git a/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy_test.go b/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy_test.go index 38bc48290..8037f49a0 100644 --- a/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy_test.go +++ b/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy_test.go @@ -21,6 +21,7 @@ const ( func newTestKafka() (*Kafka, chan *sarama.ConsumerMessage) { in := make(chan *sarama.ConsumerMessage, 1000) k := Kafka{ + Log: testutil.Logger{}, ConsumerGroup: "test", Topics: []string{"telegraf"}, ZookeeperPeers: []string{"localhost:2181"}, diff --git a/plugins/inputs/kinesis_consumer/kinesis_consumer.go b/plugins/inputs/kinesis_consumer/kinesis_consumer.go index b9b98243b..aec806da1 100644 --- a/plugins/inputs/kinesis_consumer/kinesis_consumer.go +++ b/plugins/inputs/kinesis_consumer/kinesis_consumer.go @@ -3,7 +3,6 @@ package kinesis_consumer import ( "context" "fmt" - "log" "math/big" "strings" "sync" @@ -40,6 +39,8 @@ type ( DynamoDB *DynamoDB `toml:"checkpoint_dynamodb"` MaxUndeliveredMessages int `toml:"max_undelivered_messages"` + Log telegraf.Logger + cons *consumer.Consumer parser parsers.Parser cancel context.CancelFunc @@ -220,7 +221,7 @@ func (k *KinesisConsumer) connect(ac telegraf.Accumulator) error { }) if err != nil { k.cancel() - log.Printf("E! [inputs.kinesis_consumer] Scan encounterred an error - %s", err.Error()) + k.Log.Errorf("Scan encounterred an error: %s", err.Error()) k.cons = nil } }() @@ -285,7 +286,7 @@ func (k *KinesisConsumer) onDelivery(ctx context.Context) { k.lastSeqNum = strToBint(sequenceNum) k.checkpoint.Set(chk.streamName, chk.shardID, sequenceNum) } else { - log.Println("D! [inputs.kinesis_consumer] Metric group failed to process") + k.Log.Debug("Metric group failed to process") } } } diff --git a/plugins/inputs/kube_inventory/kube_state.go b/plugins/inputs/kube_inventory/kube_state.go index b69ad47ac..19de9b882 100644 --- a/plugins/inputs/kube_inventory/kube_state.go +++ b/plugins/inputs/kube_inventory/kube_state.go @@ -114,11 +114,11 @@ var availableCollectors = map[string]func(ctx context.Context, acc telegraf.Accu "endpoints": collectEndpoints, "ingress": collectIngress, "nodes": collectNodes, - "persistentvolumes": collectPersistentVolumes, - "persistentvolumeclaims": collectPersistentVolumeClaims, "pods": collectPods, "services": collectServices, "statefulsets": collectStatefulSets, + "persistentvolumes": collectPersistentVolumes, + "persistentvolumeclaims": collectPersistentVolumeClaims, } func (ki *KubernetesInventory) initClient() (*client, error) { @@ -144,12 +144,12 @@ func atoi(s string) int64 { func convertQuantity(s string, m float64) int64 { q, err := resource.ParseQuantity(s) if err != nil { - log.Printf("E! Failed to parse quantity - %v", err) + log.Printf("D! [inputs.kube_inventory] failed to parse quantity: %s", err.Error()) return 0 } f, err := strconv.ParseFloat(fmt.Sprint(q.AsDec()), 64) if err != nil { - log.Printf("E! Failed to parse float - %v", err) + log.Printf("D! [inputs.kube_inventory] failed to parse float: %s", err.Error()) return 0 } if m < 1 { diff --git a/plugins/inputs/logparser/logparser.go b/plugins/inputs/logparser/logparser.go index c132ba7a2..0ce3ede04 100644 --- a/plugins/inputs/logparser/logparser.go +++ b/plugins/inputs/logparser/logparser.go @@ -4,7 +4,6 @@ package logparser import ( "fmt" - "log" "strings" "sync" @@ -14,7 +13,6 @@ import ( "github.com/influxdata/telegraf/internal/globpath" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/parsers" - // Parsers ) const ( @@ -48,6 +46,8 @@ type LogParserPlugin struct { FromBeginning bool WatchMethod string + Log telegraf.Logger + tailers map[string]*tail.Tail offsets map[string]int64 lines chan logEntry @@ -207,7 +207,7 @@ func (l *LogParserPlugin) tailNewfiles(fromBeginning bool) error { for _, filepath := range l.Files { g, err := globpath.Compile(filepath) if err != nil { - log.Printf("E! [inputs.logparser] Error Glob %s failed to compile, %s", filepath, err) + l.Log.Errorf("Glob %q failed to compile: %s", filepath, err) continue } files := g.Match() @@ -221,7 +221,7 @@ func (l *LogParserPlugin) tailNewfiles(fromBeginning bool) error { var seek *tail.SeekInfo if !fromBeginning { if offset, ok := l.offsets[file]; ok { - log.Printf("D! [inputs.tail] using offset %d for file: %v", offset, file) + l.Log.Debugf("Using offset %d for file: %v", offset, file) seek = &tail.SeekInfo{ Whence: 0, Offset: offset, @@ -248,7 +248,7 @@ func (l *LogParserPlugin) tailNewfiles(fromBeginning bool) error { continue } - log.Printf("D! [inputs.logparser] tail added for file: %v", file) + l.Log.Debugf("Tail added for file: %v", file) // create a goroutine for each "tailer" l.wg.Add(1) @@ -269,7 +269,7 @@ func (l *LogParserPlugin) receiver(tailer *tail.Tail) { for line = range tailer.Lines { if line.Err != nil { - log.Printf("E! [inputs.logparser] Error tailing file %s, Error: %s", + l.Log.Errorf("Error tailing file %s, Error: %s", tailer.Filename, line.Err) continue } @@ -315,7 +315,7 @@ func (l *LogParserPlugin) parser() { l.acc.AddFields(m.Name(), m.Fields(), tags, m.Time()) } } else { - log.Println("E! [inputs.logparser] Error parsing log line: " + err.Error()) + l.Log.Errorf("Error parsing log line: %s", err.Error()) } } @@ -332,7 +332,7 @@ func (l *LogParserPlugin) Stop() { offset, err := t.Tell() if err == nil { l.offsets[t.Filename] = offset - log.Printf("D! [inputs.logparser] recording offset %d for file: %v", offset, t.Filename) + l.Log.Debugf("Recording offset %d for file: %v", offset, t.Filename) } else { l.acc.AddError(fmt.Errorf("error recording offset for file %s", t.Filename)) } @@ -340,10 +340,10 @@ func (l *LogParserPlugin) Stop() { err := t.Stop() //message for a stopped tailer - log.Printf("D! [inputs.logparser] tail dropped for file: %v", t.Filename) + l.Log.Debugf("Tail dropped for file: %v", t.Filename) if err != nil { - log.Printf("E! [inputs.logparser] Error stopping tail on file %s", t.Filename) + l.Log.Errorf("Error stopping tail on file %s", t.Filename) } } close(l.done) diff --git a/plugins/inputs/logparser/logparser_test.go b/plugins/inputs/logparser/logparser_test.go index 90ae39161..1ecbd39ff 100644 --- a/plugins/inputs/logparser/logparser_test.go +++ b/plugins/inputs/logparser/logparser_test.go @@ -14,6 +14,7 @@ import ( func TestStartNoParsers(t *testing.T) { logparser := &LogParserPlugin{ + Log: testutil.Logger{}, FromBeginning: true, Files: []string{"testdata/*.log"}, } @@ -26,6 +27,7 @@ func TestGrokParseLogFilesNonExistPattern(t *testing.T) { thisdir := getCurrentDir() logparser := &LogParserPlugin{ + Log: testutil.Logger{}, FromBeginning: true, Files: []string{thisdir + "testdata/*.log"}, GrokConfig: GrokConfig{ @@ -43,6 +45,7 @@ func TestGrokParseLogFiles(t *testing.T) { thisdir := getCurrentDir() logparser := &LogParserPlugin{ + Log: testutil.Logger{}, GrokConfig: GrokConfig{ MeasurementName: "logparser_grok", Patterns: []string{"%{TEST_LOG_A}", "%{TEST_LOG_B}"}, @@ -89,6 +92,7 @@ func TestGrokParseLogFilesAppearLater(t *testing.T) { thisdir := getCurrentDir() logparser := &LogParserPlugin{ + Log: testutil.Logger{}, FromBeginning: true, Files: []string{emptydir + "/*.log"}, GrokConfig: GrokConfig{ @@ -128,6 +132,7 @@ func TestGrokParseLogFilesOneBad(t *testing.T) { thisdir := getCurrentDir() logparser := &LogParserPlugin{ + Log: testutil.Logger{}, FromBeginning: true, Files: []string{thisdir + "testdata/test_a.log"}, GrokConfig: GrokConfig{ diff --git a/plugins/inputs/mailchimp/chimp_api.go b/plugins/inputs/mailchimp/chimp_api.go index db0004ce2..6e4ec2d4f 100644 --- a/plugins/inputs/mailchimp/chimp_api.go +++ b/plugins/inputs/mailchimp/chimp_api.go @@ -134,7 +134,7 @@ func runChimp(api *ChimpAPI, params ReportsParams) ([]byte, error) { req.URL.RawQuery = params.String() req.Header.Set("User-Agent", "Telegraf-MailChimp-Plugin") if api.Debug { - log.Printf("D! Request URL: %s", req.URL.String()) + log.Printf("D! [inputs.mailchimp] request URL: %s", req.URL.String()) } resp, err := client.Do(req) @@ -148,7 +148,7 @@ func runChimp(api *ChimpAPI, params ReportsParams) ([]byte, error) { return nil, err } if api.Debug { - log.Printf("D! Response Body:%s", string(body)) + log.Printf("D! [inputs.mailchimp] response Body: %q", string(body)) } if err = chimpErrorCheck(body); err != nil { diff --git a/plugins/inputs/mesos/README.md b/plugins/inputs/mesos/README.md index b9a46eaa9..284588188 100644 --- a/plugins/inputs/mesos/README.md +++ b/plugins/inputs/mesos/README.md @@ -10,8 +10,10 @@ For more information, please check the [Mesos Observability Metrics](http://meso [[inputs.mesos]] ## Timeout, in ms. timeout = 100 + ## A list of Mesos masters. masters = ["http://localhost:5050"] + ## Master metrics groups to be collected, by default, all enabled. master_collections = [ "resources", @@ -26,8 +28,10 @@ For more information, please check the [Mesos Observability Metrics](http://meso "registrar", "allocator", ] + ## A list of Mesos slaves, default is [] # slaves = [] + ## Slave metrics groups to be collected, by default, all enabled. # slave_collections = [ # "resources", diff --git a/plugins/inputs/mesos/mesos.go b/plugins/inputs/mesos/mesos.go index 3e0e25691..741dd73dc 100644 --- a/plugins/inputs/mesos/mesos.go +++ b/plugins/inputs/mesos/mesos.go @@ -32,9 +32,10 @@ type Mesos struct { MasterCols []string `toml:"master_collections"` Slaves []string SlaveCols []string `toml:"slave_collections"` - //SlaveTasks bool tls.ClientConfig + Log telegraf.Logger + initialized bool client *http.Client masterURLs []*url.URL @@ -49,8 +50,10 @@ var allMetrics = map[Role][]string{ var sampleConfig = ` ## Timeout, in ms. timeout = 100 + ## A list of Mesos masters. masters = ["http://localhost:5050"] + ## Master metrics groups to be collected, by default, all enabled. master_collections = [ "resources", @@ -65,8 +68,10 @@ var sampleConfig = ` "registrar", "allocator", ] + ## A list of Mesos slaves, default is [] # slaves = [] + ## Slave metrics groups to be collected, by default, all enabled. # slave_collections = [ # "resources", @@ -110,7 +115,7 @@ func parseURL(s string, role Role) (*url.URL, error) { } s = "http://" + host + ":" + port - log.Printf("W! [inputs.mesos] Using %q as connection URL; please update your configuration to use an URL", s) + log.Printf("W! [inputs.mesos] using %q as connection URL; please update your configuration to use an URL", s) } return url.Parse(s) @@ -126,7 +131,7 @@ func (m *Mesos) initialize() error { } if m.Timeout == 0 { - log.Println("I! [inputs.mesos] Missing timeout value, setting default value (100ms)") + m.Log.Info("Missing timeout value, setting default value (100ms)") m.Timeout = 100 } @@ -191,17 +196,6 @@ func (m *Mesos) Gather(acc telegraf.Accumulator) error { wg.Done() return }(slave) - - // if !m.SlaveTasks { - // continue - // } - - // wg.Add(1) - // go func(c string) { - // acc.AddError(m.gatherSlaveTaskMetrics(slave, acc)) - // wg.Done() - // return - // }(v) } wg.Wait() @@ -487,7 +481,7 @@ func getMetrics(role Role, group string) []string { ret, ok := m[group] if !ok { - log.Printf("I! [mesos] Unknown %s metrics group: %s\n", role, group) + log.Printf("I! [inputs.mesos] unknown role %q metrics group: %s", role, group) return []string{} } diff --git a/plugins/inputs/mesos/mesos_test.go b/plugins/inputs/mesos/mesos_test.go index 066d5b971..e25f250c8 100644 --- a/plugins/inputs/mesos/mesos_test.go +++ b/plugins/inputs/mesos/mesos_test.go @@ -349,6 +349,7 @@ func TestMesosMaster(t *testing.T) { var acc testutil.Accumulator m := Mesos{ + Log: testutil.Logger{}, Masters: []string{masterTestServer.Listener.Addr().String()}, Timeout: 10, } @@ -364,6 +365,7 @@ func TestMesosMaster(t *testing.T) { func TestMasterFilter(t *testing.T) { m := Mesos{ + Log: testutil.Logger{}, MasterCols: []string{ "resources", "master", "registrar", "allocator", }, @@ -416,6 +418,7 @@ func TestMesosSlave(t *testing.T) { var acc testutil.Accumulator m := Mesos{ + Log: testutil.Logger{}, Masters: []string{}, Slaves: []string{slaveTestServer.Listener.Addr().String()}, // SlaveTasks: true, @@ -433,6 +436,7 @@ func TestMesosSlave(t *testing.T) { func TestSlaveFilter(t *testing.T) { m := Mesos{ + Log: testutil.Logger{}, SlaveCols: []string{ "resources", "agent", "tasks", }, diff --git a/plugins/inputs/mongodb/mongodb.go b/plugins/inputs/mongodb/mongodb.go index 14fcce12e..b61bb671a 100644 --- a/plugins/inputs/mongodb/mongodb.go +++ b/plugins/inputs/mongodb/mongodb.go @@ -4,7 +4,6 @@ import ( "crypto/tls" "crypto/x509" "fmt" - "log" "net" "net/url" "strings" @@ -25,6 +24,8 @@ type MongoDB struct { GatherColStats bool ColStatsDbs []string tlsint.ClientConfig + + Log telegraf.Logger } type Ssl struct { @@ -82,24 +83,24 @@ func (m *MongoDB) Gather(acc telegraf.Accumulator) error { // Preserve backwards compatibility for hostnames without a // scheme, broken in go 1.8. Remove in Telegraf 2.0 serv = "mongodb://" + serv - log.Printf("W! [inputs.mongodb] Using %q as connection URL; please update your configuration to use an URL", serv) + m.Log.Warnf("Using %q as connection URL; please update your configuration to use an URL", serv) m.Servers[i] = serv } u, err := url.Parse(serv) if err != nil { - acc.AddError(fmt.Errorf("Unable to parse address %q: %s", serv, err)) + m.Log.Errorf("Unable to parse address %q: %s", serv, err.Error()) continue } if u.Host == "" { - acc.AddError(fmt.Errorf("Unable to parse address %q", serv)) + m.Log.Errorf("Unable to parse address %q", serv) continue } wg.Add(1) go func(srv *Server) { defer wg.Done() - acc.AddError(m.gatherServer(srv, acc)) + m.Log.Error(m.gatherServer(srv, acc)) }(m.getMongoServer(u)) } @@ -110,6 +111,7 @@ func (m *MongoDB) Gather(acc telegraf.Accumulator) error { func (m *MongoDB) getMongoServer(url *url.URL) *Server { if _, ok := m.mongos[url.Host]; !ok { m.mongos[url.Host] = &Server{ + Log: m.Log, Url: url, } } @@ -126,8 +128,7 @@ func (m *MongoDB) gatherServer(server *Server, acc telegraf.Accumulator) error { } dialInfo, err := mgo.ParseURL(dialAddrs[0]) if err != nil { - return fmt.Errorf("Unable to parse URL (%s), %s\n", - dialAddrs[0], err.Error()) + return fmt.Errorf("unable to parse URL %q: %s", dialAddrs[0], err.Error()) } dialInfo.Direct = true dialInfo.Timeout = 5 * time.Second @@ -169,7 +170,7 @@ func (m *MongoDB) gatherServer(server *Server, acc telegraf.Accumulator) error { sess, err := mgo.DialWithInfo(dialInfo) if err != nil { - return fmt.Errorf("Unable to connect to MongoDB, %s\n", err.Error()) + return fmt.Errorf("unable to connect to MongoDB: %s", err.Error()) } server.Session = sess } diff --git a/plugins/inputs/mongodb/mongodb_server.go b/plugins/inputs/mongodb/mongodb_server.go index e6e66a2a4..d311a9058 100644 --- a/plugins/inputs/mongodb/mongodb_server.go +++ b/plugins/inputs/mongodb/mongodb_server.go @@ -1,7 +1,7 @@ package mongodb import ( - "log" + "fmt" "net/url" "strings" "time" @@ -15,6 +15,8 @@ type Server struct { Url *url.URL Session *mgo.Session lastResult *MongoStatus + + Log telegraf.Logger } func (s *Server) getDefaultTags() map[string]string { @@ -31,11 +33,11 @@ func IsAuthorization(err error) bool { return strings.Contains(err.Error(), "not authorized") } -func authLogLevel(err error) string { +func (s *Server) authLog(err error) { if IsAuthorization(err) { - return "D!" + s.Log.Debug(err.Error()) } else { - return "E!" + s.Log.Error(err.Error()) } } @@ -158,30 +160,30 @@ func (s *Server) gatherCollectionStats(colStatsDbs []string) (*ColStats, error) } results := &ColStats{} - for _, db_name := range names { - if stringInSlice(db_name, colStatsDbs) || len(colStatsDbs) == 0 { + for _, dbName := range names { + if stringInSlice(dbName, colStatsDbs) || len(colStatsDbs) == 0 { var colls []string - colls, err = s.Session.DB(db_name).CollectionNames() + colls, err = s.Session.DB(dbName).CollectionNames() if err != nil { - log.Printf("E! [inputs.mongodb] Error getting collection names: %v", err) + s.Log.Errorf("Error getting collection names: %s", err.Error()) continue } - for _, col_name := range colls { - col_stat_line := &ColStatsData{} - err = s.Session.DB(db_name).Run(bson.D{ + for _, colName := range colls { + colStatLine := &ColStatsData{} + err = s.Session.DB(dbName).Run(bson.D{ { Name: "collStats", - Value: col_name, + Value: colName, }, - }, col_stat_line) + }, colStatLine) if err != nil { - log.Printf("%s [inputs.mongodb] Error getting col stats from %q: %v", authLogLevel(err), col_name, err) + s.authLog(fmt.Errorf("error getting col stats from %q: %v", colName, err)) continue } collection := &Collection{ - Name: col_name, - DbName: db_name, - ColStatsData: col_stat_line, + Name: colName, + DbName: dbName, + ColStatsData: colStatLine, } results.Collections = append(results.Collections, *collection) } @@ -203,7 +205,7 @@ func (s *Server) gatherData(acc telegraf.Accumulator, gatherDbStats bool, gather // member of a replica set. replSetStatus, err := s.gatherReplSetStatus() if err != nil { - log.Printf("D! [inputs.mongodb] Unable to gather replica set status: %v", err) + s.Log.Debugf("Unable to gather replica set status: %s", err.Error()) } // Gather the oplog if we are a member of a replica set. Non-replica set @@ -218,13 +220,12 @@ func (s *Server) gatherData(acc telegraf.Accumulator, gatherDbStats bool, gather clusterStatus, err := s.gatherClusterStatus() if err != nil { - log.Printf("D! [inputs.mongodb] Unable to gather cluster status: %v", err) + s.Log.Debugf("Unable to gather cluster status: %s", err.Error()) } shardStats, err := s.gatherShardConnPoolStats() if err != nil { - log.Printf("%s [inputs.mongodb] Unable to gather shard connection pool stats: %v", - authLogLevel(err), err) + s.authLog(fmt.Errorf("unable to gather shard connection pool stats: %s", err.Error())) } var collectionStats *ColStats @@ -246,7 +247,7 @@ func (s *Server) gatherData(acc telegraf.Accumulator, gatherDbStats bool, gather for _, name := range names { db, err := s.gatherDBStats(name) if err != nil { - log.Printf("D! [inputs.mongodb] Error getting db stats from %q: %v", name, err) + s.Log.Debugf("Error getting db stats from %q: %s", name, err.Error()) } dbStats.Dbs = append(dbStats.Dbs, *db) } diff --git a/plugins/inputs/mqtt_consumer/mqtt_consumer.go b/plugins/inputs/mqtt_consumer/mqtt_consumer.go index 7e3b43d44..5c59eda87 100644 --- a/plugins/inputs/mqtt_consumer/mqtt_consumer.go +++ b/plugins/inputs/mqtt_consumer/mqtt_consumer.go @@ -4,7 +4,6 @@ import ( "context" "errors" "fmt" - "log" "strings" "time" @@ -61,6 +60,8 @@ type MQTTConsumer struct { ClientID string `toml:"client_id"` tls.ClientConfig + Log telegraf.Logger + clientFactory ClientFactory client Client opts *mqtt.ClientOptions @@ -212,7 +213,7 @@ func (m *MQTTConsumer) connect() error { return err } - log.Printf("I! [inputs.mqtt_consumer] Connected %v", m.Servers) + m.Log.Infof("Connected %v", m.Servers) m.state = Connected m.sem = make(semaphore, m.MaxUndeliveredMessages) m.messages = make(map[telegraf.TrackingID]bool) @@ -223,7 +224,7 @@ func (m *MQTTConsumer) connect() error { SessionPresent() bool } if t, ok := token.(sessionPresent); ok && t.SessionPresent() { - log.Printf("D! [inputs.mqtt_consumer] Session found %v", m.Servers) + m.Log.Debugf("Session found %v", m.Servers) return nil } @@ -244,7 +245,7 @@ func (m *MQTTConsumer) connect() error { func (m *MQTTConsumer) onConnectionLost(c mqtt.Client, err error) { m.acc.AddError(fmt.Errorf("connection lost: %v", err)) - log.Printf("D! [inputs.mqtt_consumer] Disconnected %v", m.Servers) + m.Log.Debugf("Disconnected %v", m.Servers) m.state = Disconnected return } @@ -292,9 +293,9 @@ func (m *MQTTConsumer) onMessage(acc telegraf.TrackingAccumulator, msg mqtt.Mess func (m *MQTTConsumer) Stop() { if m.state == Connected { - log.Printf("D! [inputs.mqtt_consumer] Disconnecting %v", m.Servers) + m.Log.Debugf("Disconnecting %v", m.Servers) m.client.Disconnect(200) - log.Printf("D! [inputs.mqtt_consumer] Disconnected %v", m.Servers) + m.Log.Debugf("Disconnected %v", m.Servers) m.state = Disconnected } m.cancel() @@ -303,7 +304,7 @@ func (m *MQTTConsumer) Stop() { func (m *MQTTConsumer) Gather(acc telegraf.Accumulator) error { if m.state == Disconnected { m.state = Connecting - log.Printf("D! [inputs.mqtt_consumer] Connecting %v", m.Servers) + m.Log.Debugf("Connecting %v", m.Servers) m.connect() } @@ -346,7 +347,7 @@ func (m *MQTTConsumer) createOpts() (*mqtt.ClientOptions, error) { for _, server := range m.Servers { // Preserve support for host:port style servers; deprecated in Telegraf 1.4.4 if !strings.Contains(server, "://") { - log.Printf("W! [inputs.mqtt_consumer] Server %q should be updated to use `scheme://host:port` format", server) + m.Log.Warnf("Server %q should be updated to use `scheme://host:port` format", server) if tlsCfg == nil { server = "tcp://" + server } else { diff --git a/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go b/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go index cbc6ee986..4884fc050 100644 --- a/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go +++ b/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go @@ -102,6 +102,7 @@ func TestLifecycleSanity(t *testing.T) { }, } }) + plugin.Log = testutil.Logger{} plugin.Servers = []string{"tcp://127.0.0.1"} parser := &FakeParser{} @@ -124,10 +125,12 @@ func TestRandomClientID(t *testing.T) { var err error m1 := New(nil) + m1.Log = testutil.Logger{} err = m1.Init() require.NoError(t, err) m2 := New(nil) + m2.Log = testutil.Logger{} err = m2.Init() require.NoError(t, err) @@ -137,6 +140,7 @@ func TestRandomClientID(t *testing.T) { // PersistentSession requires ClientID func TestPersistentClientIDFail(t *testing.T) { plugin := New(nil) + plugin.Log = testutil.Logger{} plugin.PersistentSession = true err := plugin.Init() @@ -255,6 +259,7 @@ func TestTopicTag(t *testing.T) { plugin := New(func(o *mqtt.ClientOptions) Client { return client }) + plugin.Log = testutil.Logger{} plugin.Topics = []string{"telegraf"} plugin.TopicTag = tt.topicTag() @@ -295,6 +300,7 @@ func TestAddRouteCalledForEachTopic(t *testing.T) { plugin := New(func(o *mqtt.ClientOptions) Client { return client }) + plugin.Log = testutil.Logger{} plugin.Topics = []string{"a", "b"} err := plugin.Init() @@ -325,6 +331,7 @@ func TestSubscribeCalledIfNoSession(t *testing.T) { plugin := New(func(o *mqtt.ClientOptions) Client { return client }) + plugin.Log = testutil.Logger{} plugin.Topics = []string{"b"} err := plugin.Init() @@ -355,6 +362,7 @@ func TestSubscribeNotCalledIfSession(t *testing.T) { plugin := New(func(o *mqtt.ClientOptions) Client { return client }) + plugin.Log = testutil.Logger{} plugin.Topics = []string{"b"} err := plugin.Init() diff --git a/plugins/inputs/nats_consumer/README.md b/plugins/inputs/nats_consumer/README.md index 205578a17..7c1abab0b 100644 --- a/plugins/inputs/nats_consumer/README.md +++ b/plugins/inputs/nats_consumer/README.md @@ -12,8 +12,10 @@ instances of telegraf can read from a NATS cluster in parallel. [[inputs.nats_consumer]] ## urls of NATS servers servers = ["nats://localhost:4222"] + ## subject(s) to consume subjects = ["telegraf"] + ## name a queue group queue_group = "telegraf_consumers" diff --git a/plugins/inputs/nats_consumer/nats_consumer.go b/plugins/inputs/nats_consumer/nats_consumer.go index b82e3f3a6..eff726964 100644 --- a/plugins/inputs/nats_consumer/nats_consumer.go +++ b/plugins/inputs/nats_consumer/nats_consumer.go @@ -3,7 +3,6 @@ package natsconsumer import ( "context" "fmt" - "log" "sync" "github.com/influxdata/telegraf" @@ -40,6 +39,8 @@ type natsConsumer struct { Password string `toml:"password"` tls.ClientConfig + Log telegraf.Logger + // Client pending limits: PendingMessageLimit int `toml:"pending_message_limit"` PendingBytesLimit int `toml:"pending_bytes_limit"` @@ -68,6 +69,7 @@ var sampleConfig = ` ## subject(s) to consume subjects = ["telegraf"] + ## name a queue group queue_group = "telegraf_consumers" @@ -198,7 +200,7 @@ func (n *natsConsumer) Start(acc telegraf.Accumulator) error { go n.receiver(ctx) }() - log.Printf("I! Started the NATS consumer service, nats: %v, subjects: %v, queue: %v\n", + n.Log.Infof("Started the NATS consumer service, nats: %v, subjects: %v, queue: %v", n.conn.ConnectedUrl(), n.Subjects, n.QueueGroup) return nil @@ -216,21 +218,21 @@ func (n *natsConsumer) receiver(ctx context.Context) { case <-n.acc.Delivered(): <-sem case err := <-n.errs: - n.acc.AddError(err) + n.Log.Error(err) case sem <- empty{}: select { case <-ctx.Done(): return case err := <-n.errs: <-sem - n.acc.AddError(err) + n.Log.Error(err) case <-n.acc.Delivered(): <-sem <-sem case msg := <-n.in: metrics, err := n.parser.Parse(msg.Data) if err != nil { - n.acc.AddError(fmt.Errorf("subject: %s, error: %s", msg.Subject, err.Error())) + n.Log.Errorf("Subject: %s, error: %s", msg.Subject, err.Error()) <-sem continue } @@ -244,8 +246,8 @@ func (n *natsConsumer) receiver(ctx context.Context) { func (n *natsConsumer) clean() { for _, sub := range n.subs { if err := sub.Unsubscribe(); err != nil { - n.acc.AddError(fmt.Errorf("Error unsubscribing from subject %s in queue %s: %s\n", - sub.Subject, sub.Queue, err.Error())) + n.Log.Errorf("Error unsubscribing from subject %s in queue %s: %s", + sub.Subject, sub.Queue, err.Error()) } } diff --git a/plugins/inputs/nsq_consumer/README.md b/plugins/inputs/nsq_consumer/README.md index 0dae26e8c..d1e7194bb 100644 --- a/plugins/inputs/nsq_consumer/README.md +++ b/plugins/inputs/nsq_consumer/README.md @@ -10,8 +10,10 @@ of the supported [input data formats][]. [[inputs.nsq_consumer]] ## Server option still works but is deprecated, we just prepend it to the nsqd array. # server = "localhost:4150" + ## An array representing the NSQD TCP HTTP Endpoints nsqd = ["localhost:4150"] + ## An array representing the NSQLookupd HTTP Endpoints nsqlookupd = ["localhost:4161"] topic = "telegraf" diff --git a/plugins/inputs/nsq_consumer/nsq_consumer.go b/plugins/inputs/nsq_consumer/nsq_consumer.go index de7572316..2c25cce7d 100644 --- a/plugins/inputs/nsq_consumer/nsq_consumer.go +++ b/plugins/inputs/nsq_consumer/nsq_consumer.go @@ -2,7 +2,6 @@ package nsq_consumer import ( "context" - "log" "sync" "github.com/influxdata/telegraf" @@ -18,10 +17,12 @@ const ( type empty struct{} type semaphore chan empty -type logger struct{} +type logger struct { + log telegraf.Logger +} func (l *logger) Output(calldepth int, s string) error { - log.Println("D! [inputs.nsq_consumer] " + s) + l.log.Debug(s) return nil } @@ -39,6 +40,8 @@ type NSQConsumer struct { parser parsers.Parser consumer *nsq.Consumer + Log telegraf.Logger + mu sync.Mutex messages map[telegraf.TrackingID]*nsq.Message wg sync.WaitGroup @@ -48,8 +51,10 @@ type NSQConsumer struct { var sampleConfig = ` ## Server option still works but is deprecated, we just prepend it to the nsqd array. # server = "localhost:4150" + ## An array representing the NSQD TCP HTTP Endpoints nsqd = ["localhost:4150"] + ## An array representing the NSQLookupd HTTP Endpoints nsqlookupd = ["localhost:4161"] topic = "telegraf" @@ -98,7 +103,7 @@ func (n *NSQConsumer) Start(ac telegraf.Accumulator) error { n.cancel = cancel n.connect() - n.consumer.SetLogger(&logger{}, nsq.LogLevelInfo) + n.consumer.SetLogger(&logger{log: n.Log}, nsq.LogLevelInfo) n.consumer.AddHandler(nsq.HandlerFunc(func(message *nsq.Message) error { metrics, err := n.parser.Parse(message.Body) if err != nil { diff --git a/plugins/inputs/nsq_consumer/nsq_consumer_test.go b/plugins/inputs/nsq_consumer/nsq_consumer_test.go index 6558dfba2..1e8264d06 100644 --- a/plugins/inputs/nsq_consumer/nsq_consumer_test.go +++ b/plugins/inputs/nsq_consumer/nsq_consumer_test.go @@ -36,6 +36,7 @@ func TestReadsMetricsFromNSQ(t *testing.T) { newMockNSQD(script, addr.String()) consumer := &NSQConsumer{ + Log: testutil.Logger{}, Server: "127.0.0.1:4155", Topic: "telegraf", Channel: "consume", diff --git a/plugins/inputs/postgresql_extensible/postgresql_extensible.go b/plugins/inputs/postgresql_extensible/postgresql_extensible.go index 05e57583f..9a3457228 100644 --- a/plugins/inputs/postgresql_extensible/postgresql_extensible.go +++ b/plugins/inputs/postgresql_extensible/postgresql_extensible.go @@ -4,11 +4,9 @@ import ( "bytes" "fmt" "io/ioutil" - "log" "os" "strings" - // register in driver. _ "github.com/jackc/pgx/stdlib" "github.com/influxdata/telegraf" @@ -23,6 +21,8 @@ type Postgresql struct { AdditionalTags []string Query query Debug bool + + Log telegraf.Logger } type query []struct { @@ -186,7 +186,7 @@ func (p *Postgresql) Gather(acc telegraf.Accumulator) error { if p.Query[i].Version <= db_version { rows, err := p.DB.Query(sql_query) if err != nil { - acc.AddError(err) + p.Log.Error(err.Error()) continue } @@ -194,7 +194,7 @@ func (p *Postgresql) Gather(acc telegraf.Accumulator) error { // grab the column information from the result if columns, err = rows.Columns(); err != nil { - acc.AddError(err) + p.Log.Error(err.Error()) continue } @@ -209,7 +209,7 @@ func (p *Postgresql) Gather(acc telegraf.Accumulator) error { for rows.Next() { err = p.accRow(meas_name, rows, acc, columns) if err != nil { - acc.AddError(err) + p.Log.Error(err.Error()) break } } @@ -272,7 +272,7 @@ func (p *Postgresql) accRow(meas_name string, row scanner, acc telegraf.Accumula fields := make(map[string]interface{}) COLUMN: for col, val := range columnMap { - log.Printf("D! postgresql_extensible: column: %s = %T: %v\n", col, *val, *val) + p.Log.Debugf("Column: %s = %T: %v\n", col, *val, *val) _, ignore := ignoredColumns[col] if ignore || *val == nil { continue @@ -290,7 +290,7 @@ COLUMN: case int64, int32, int: tags[col] = fmt.Sprintf("%d", v) default: - log.Println("failed to add additional tag", col) + p.Log.Debugf("Failed to add %q as additional tag", col) } continue COLUMN } diff --git a/plugins/inputs/postgresql_extensible/postgresql_extensible_test.go b/plugins/inputs/postgresql_extensible/postgresql_extensible_test.go index 7fbc34302..757b468f2 100644 --- a/plugins/inputs/postgresql_extensible/postgresql_extensible_test.go +++ b/plugins/inputs/postgresql_extensible/postgresql_extensible_test.go @@ -13,6 +13,7 @@ import ( func queryRunner(t *testing.T, q query) *testutil.Accumulator { p := &Postgresql{ + Log: testutil.Logger{}, Service: postgresql.Service{ Address: fmt.Sprintf( "host=%s user=postgres sslmode=disable", @@ -232,6 +233,7 @@ func TestPostgresqlIgnoresUnwantedColumns(t *testing.T) { } p := &Postgresql{ + Log: testutil.Logger{}, Service: postgresql.Service{ Address: fmt.Sprintf( "host=%s user=postgres sslmode=disable", @@ -251,7 +253,10 @@ func TestPostgresqlIgnoresUnwantedColumns(t *testing.T) { } func TestAccRow(t *testing.T) { - p := Postgresql{} + p := Postgresql{ + Log: testutil.Logger{}, + } + var acc testutil.Accumulator columns := []string{"datname", "cat"} diff --git a/plugins/inputs/powerdns/powerdns.go b/plugins/inputs/powerdns/powerdns.go index e53373baf..3c661990c 100644 --- a/plugins/inputs/powerdns/powerdns.go +++ b/plugins/inputs/powerdns/powerdns.go @@ -110,8 +110,8 @@ func parseResponse(metrics string) map[string]interface{} { i, err := strconv.ParseInt(m[1], 10, 64) if err != nil { - log.Printf("E! powerdns: Error parsing integer for metric [%s]: %s", - metric, err) + log.Printf("E! [inputs.powerdns] error parsing integer for metric %q: %s", + metric, err.Error()) continue } values[m[0]] = i diff --git a/plugins/inputs/powerdns_recursor/powerdns_recursor.go b/plugins/inputs/powerdns_recursor/powerdns_recursor.go index 85c7cbcca..fe6ecb5fe 100644 --- a/plugins/inputs/powerdns_recursor/powerdns_recursor.go +++ b/plugins/inputs/powerdns_recursor/powerdns_recursor.go @@ -139,8 +139,8 @@ func parseResponse(metrics string) map[string]interface{} { i, err := strconv.ParseInt(m[1], 10, 64) if err != nil { - log.Printf("E! [inputs.powerdns_recursor] Error parsing integer for metric [%s] %v", - metric, err) + log.Printf("E! [inputs.powerdns_recursor] error parsing integer for metric %q: %s", + metric, err.Error()) continue } values[m[0]] = i diff --git a/plugins/inputs/processes/processes.go b/plugins/inputs/processes/processes.go index 379a9cb37..4421010d5 100644 --- a/plugins/inputs/processes/processes.go +++ b/plugins/inputs/processes/processes.go @@ -6,7 +6,6 @@ import ( "bytes" "fmt" "io/ioutil" - "log" "os" "os/exec" "path/filepath" @@ -23,6 +22,8 @@ type Processes struct { execPS func() ([]byte, error) readProcFile func(filename string) ([]byte, error) + Log telegraf.Logger + forcePS bool forceProc bool } @@ -124,8 +125,7 @@ func (p *Processes) gatherFromPS(fields map[string]interface{}) error { case '?': fields["unknown"] = fields["unknown"].(int64) + int64(1) default: - log.Printf("I! processes: Unknown state [ %s ] from ps", - string(status[0])) + p.Log.Infof("Unknown state %q from ps", string(status[0])) } fields["total"] = fields["total"].(int64) + int64(1) } @@ -184,14 +184,13 @@ func (p *Processes) gatherFromProc(fields map[string]interface{}) error { } fields["parked"] = int64(1) default: - log.Printf("I! processes: Unknown state [ %s ] in file %s", - string(stats[0][0]), filename) + p.Log.Infof("Unknown state %q in file %q", string(stats[0][0]), filename) } fields["total"] = fields["total"].(int64) + int64(1) threads, err := strconv.Atoi(string(stats[17])) if err != nil { - log.Printf("I! processes: Error parsing thread count: %s", err) + p.Log.Infof("Error parsing thread count: %s", err.Error()) continue } fields["total_threads"] = fields["total_threads"].(int64) + int64(threads) diff --git a/plugins/inputs/processes/processes_test.go b/plugins/inputs/processes/processes_test.go index f9bad4b60..fa9ad62da 100644 --- a/plugins/inputs/processes/processes_test.go +++ b/plugins/inputs/processes/processes_test.go @@ -16,6 +16,7 @@ import ( func TestProcesses(t *testing.T) { processes := &Processes{ + Log: testutil.Logger{}, execPS: execPS, readProcFile: readProcFile, } @@ -35,6 +36,7 @@ func TestProcesses(t *testing.T) { func TestFromPS(t *testing.T) { processes := &Processes{ + Log: testutil.Logger{}, execPS: testExecPS, forcePS: true, } @@ -56,6 +58,7 @@ func TestFromPS(t *testing.T) { func TestFromPSError(t *testing.T) { processes := &Processes{ + Log: testutil.Logger{}, execPS: testExecPSError, forcePS: true, } @@ -71,6 +74,7 @@ func TestFromProcFiles(t *testing.T) { } tester := tester{} processes := &Processes{ + Log: testutil.Logger{}, readProcFile: tester.testProcFile, forceProc: true, } @@ -93,6 +97,7 @@ func TestFromProcFilesWithSpaceInCmd(t *testing.T) { } tester := tester{} processes := &Processes{ + Log: testutil.Logger{}, readProcFile: tester.testProcFile2, forceProc: true, } @@ -120,6 +125,7 @@ func TestParkedProcess(t *testing.T) { procstat := `88 (watchdog/13) P 2 0 0 0 -1 69238848 0 0 0 0 0 0 0 0 20 0 1 0 20 0 0 18446744073709551615 0 0 0 0 0 0 0 2147483647 0 1 0 0 17 0 0 0 0 0 0 0 0 0 0 0 0 0 0 ` plugin := &Processes{ + Log: testutil.Logger{}, readProcFile: func(string) ([]byte, error) { return []byte(procstat), nil }, diff --git a/plugins/inputs/prometheus/kubernetes.go b/plugins/inputs/prometheus/kubernetes.go index d92d90ead..617509384 100644 --- a/plugins/inputs/prometheus/kubernetes.go +++ b/plugins/inputs/prometheus/kubernetes.go @@ -68,7 +68,7 @@ func (p *Prometheus) start(ctx context.Context) error { case <-time.After(time.Second): err := p.watch(ctx, client) if err != nil { - log.Printf("E! [inputs.prometheus] unable to watch resources: %v", err) + p.Log.Errorf("Unable to watch resources: %s", err.Error()) } } } @@ -144,7 +144,7 @@ func registerPod(pod *corev1.Pod, p *Prometheus) { return } - log.Printf("D! [inputs.prometheus] will scrape metrics from %s", *targetURL) + log.Printf("D! [inputs.prometheus] will scrape metrics from %q", *targetURL) // add annotation as metrics tags tags := pod.GetMetadata().GetAnnotations() if tags == nil { @@ -158,7 +158,7 @@ func registerPod(pod *corev1.Pod, p *Prometheus) { } URL, err := url.Parse(*targetURL) if err != nil { - log.Printf("E! [inputs.prometheus] could not parse URL %s: %v", *targetURL, err) + log.Printf("E! [inputs.prometheus] could not parse URL %q: %s", *targetURL, err.Error()) return } podURL := p.AddressToURL(URL, URL.Hostname()) @@ -211,13 +211,13 @@ func unregisterPod(pod *corev1.Pod, p *Prometheus) { return } - log.Printf("D! [inputs.prometheus] registered a delete request for %s in namespace %s", + log.Printf("D! [inputs.prometheus] registered a delete request for %q in namespace %q", pod.GetMetadata().GetName(), pod.GetMetadata().GetNamespace()) p.lock.Lock() defer p.lock.Unlock() if _, ok := p.kubernetesPods[*url]; ok { delete(p.kubernetesPods, *url) - log.Printf("D! [inputs.prometheus] will stop scraping for %s", *url) + log.Printf("D! [inputs.prometheus] will stop scraping for %q", *url) } } diff --git a/plugins/inputs/prometheus/kubernetes_test.go b/plugins/inputs/prometheus/kubernetes_test.go index c1bbe0a1f..b926f7393 100644 --- a/plugins/inputs/prometheus/kubernetes_test.go +++ b/plugins/inputs/prometheus/kubernetes_test.go @@ -3,6 +3,7 @@ package prometheus import ( "testing" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" v1 "github.com/ericchiang/k8s/apis/core/v1" @@ -53,7 +54,7 @@ func TestScrapeURLAnnotationsCustomPathWithSep(t *testing.T) { } func TestAddPod(t *testing.T) { - prom := &Prometheus{} + prom := &Prometheus{Log: testutil.Logger{}} p := pod() p.Metadata.Annotations = map[string]string{"prometheus.io/scrape": "true"} @@ -62,7 +63,7 @@ func TestAddPod(t *testing.T) { } func TestAddMultipleDuplicatePods(t *testing.T) { - prom := &Prometheus{} + prom := &Prometheus{Log: testutil.Logger{}} p := pod() p.Metadata.Annotations = map[string]string{"prometheus.io/scrape": "true"} @@ -73,7 +74,7 @@ func TestAddMultipleDuplicatePods(t *testing.T) { } func TestAddMultiplePods(t *testing.T) { - prom := &Prometheus{} + prom := &Prometheus{Log: testutil.Logger{}} p := pod() p.Metadata.Annotations = map[string]string{"prometheus.io/scrape": "true"} @@ -85,7 +86,7 @@ func TestAddMultiplePods(t *testing.T) { } func TestDeletePods(t *testing.T) { - prom := &Prometheus{} + prom := &Prometheus{Log: testutil.Logger{}} p := pod() p.Metadata.Annotations = map[string]string{"prometheus.io/scrape": "true"} diff --git a/plugins/inputs/prometheus/prometheus.go b/plugins/inputs/prometheus/prometheus.go index 284114258..aeeec9265 100644 --- a/plugins/inputs/prometheus/prometheus.go +++ b/plugins/inputs/prometheus/prometheus.go @@ -5,7 +5,6 @@ import ( "errors" "fmt" "io/ioutil" - "log" "net" "net/http" "net/url" @@ -42,6 +41,8 @@ type Prometheus struct { tls.ClientConfig + Log telegraf.Logger + client *http.Client // Should we scrape Kubernetes services for prometheus annotations @@ -136,7 +137,7 @@ func (p *Prometheus) GetAllURLs() (map[string]URLAndAddress, error) { for _, u := range p.URLs { URL, err := url.Parse(u) if err != nil { - log.Printf("prometheus: Could not parse %s, skipping it. Error: %s", u, err.Error()) + p.Log.Errorf("Could not parse %q, skipping it. Error: %s", u, err.Error()) continue } allURLs[URL.String()] = URLAndAddress{URL: URL, OriginalURL: URL} @@ -157,7 +158,7 @@ func (p *Prometheus) GetAllURLs() (map[string]URLAndAddress, error) { resolvedAddresses, err := net.LookupHost(URL.Hostname()) if err != nil { - log.Printf("prometheus: Could not resolve %s, skipping it. Error: %s", URL.Host, err.Error()) + p.Log.Errorf("Could not resolve %q, skipping it. Error: %s", URL.Host, err.Error()) continue } for _, resolved := range resolvedAddresses { diff --git a/plugins/inputs/prometheus/prometheus_test.go b/plugins/inputs/prometheus/prometheus_test.go index ef3902fc9..f5a05b890 100644 --- a/plugins/inputs/prometheus/prometheus_test.go +++ b/plugins/inputs/prometheus/prometheus_test.go @@ -37,6 +37,7 @@ func TestPrometheusGeneratesMetrics(t *testing.T) { defer ts.Close() p := &Prometheus{ + Log: testutil.Logger{}, URLs: []string{ts.URL}, } @@ -60,6 +61,7 @@ func TestPrometheusGeneratesMetricsWithHostNameTag(t *testing.T) { defer ts.Close() p := &Prometheus{ + Log: testutil.Logger{}, KubernetesServices: []string{ts.URL}, } u, _ := url.Parse(ts.URL) @@ -89,6 +91,7 @@ func TestPrometheusGeneratesMetricsAlthoughFirstDNSFails(t *testing.T) { defer ts.Close() p := &Prometheus{ + Log: testutil.Logger{}, URLs: []string{ts.URL}, KubernetesServices: []string{"http://random.telegraf.local:88/metrics"}, } diff --git a/plugins/inputs/redis/redis.go b/plugins/inputs/redis/redis.go index 715b553c9..598c6c4f8 100644 --- a/plugins/inputs/redis/redis.go +++ b/plugins/inputs/redis/redis.go @@ -4,7 +4,6 @@ import ( "bufio" "fmt" "io" - "log" "net/url" "regexp" "strconv" @@ -23,6 +22,8 @@ type Redis struct { Password string tls.ClientConfig + Log telegraf.Logger + clients []Client initialized bool } @@ -101,13 +102,13 @@ func (r *Redis) init(acc telegraf.Accumulator) error { for i, serv := range r.Servers { if !strings.HasPrefix(serv, "tcp://") && !strings.HasPrefix(serv, "unix://") { - log.Printf("W! [inputs.redis]: server URL found without scheme; please update your configuration file") + r.Log.Warn("Server URL found without scheme; please update your configuration file") serv = "tcp://" + serv } u, err := url.Parse(serv) if err != nil { - return fmt.Errorf("Unable to parse to address %q: %v", serv, err) + return fmt.Errorf("unable to parse to address %q: %s", serv, err.Error()) } password := "" diff --git a/plugins/inputs/redis/redis_test.go b/plugins/inputs/redis/redis_test.go index e684225af..637b464f9 100644 --- a/plugins/inputs/redis/redis_test.go +++ b/plugins/inputs/redis/redis_test.go @@ -20,6 +20,7 @@ func TestRedisConnect(t *testing.T) { addr := fmt.Sprintf(testutil.GetLocalHost() + ":6379") r := &Redis{ + Log: testutil.Logger{}, Servers: []string{addr}, } diff --git a/plugins/inputs/smart/smart.go b/plugins/inputs/smart/smart.go index b17f979d3..b69100596 100644 --- a/plugins/inputs/smart/smart.go +++ b/plugins/inputs/smart/smart.go @@ -3,7 +3,6 @@ package smart import ( "bufio" "fmt" - "log" "os/exec" "path" "regexp" @@ -120,6 +119,7 @@ type Smart struct { Devices []string UseSudo bool Timeout internal.Duration + Log telegraf.Logger } var sampleConfig = ` @@ -209,10 +209,10 @@ func (m *Smart) scan() ([]string, error) { for _, line := range strings.Split(string(out), "\n") { dev := strings.Split(line, " ") if len(dev) > 1 && !excludedDev(m.Excludes, strings.TrimSpace(dev[0])) { - log.Printf("D! [inputs.smart] adding device: %+#v", dev) + m.Log.Debugf("Adding device: %+#v", dev) devices = append(devices, strings.TrimSpace(dev[0])) } else { - log.Printf("D! [inputs.smart] skipping device: %+#v", dev) + m.Log.Debugf("Skipping device: %+#v", dev) } } return devices, nil diff --git a/plugins/inputs/smart/smart_test.go b/plugins/inputs/smart/smart_test.go index d66a31fea..b0085d3fc 100644 --- a/plugins/inputs/smart/smart_test.go +++ b/plugins/inputs/smart/smart_test.go @@ -15,6 +15,7 @@ import ( func TestGatherAttributes(t *testing.T) { s := NewSmart() + s.Log = testutil.Logger{} s.Path = "smartctl" s.Attributes = true @@ -330,6 +331,7 @@ func TestGatherAttributes(t *testing.T) { func TestGatherNoAttributes(t *testing.T) { s := NewSmart() + s.Log = testutil.Logger{} s.Path = "smartctl" s.Attributes = false diff --git a/plugins/inputs/snmp/snmp.go b/plugins/inputs/snmp/snmp.go index 24250c22a..18eed4e47 100644 --- a/plugins/inputs/snmp/snmp.go +++ b/plugins/inputs/snmp/snmp.go @@ -90,7 +90,7 @@ func execCmd(arg0 string, args ...string) ([]byte, error) { for _, arg := range args { quoted = append(quoted, fmt.Sprintf("%q", arg)) } - log.Printf("D! [inputs.snmp] Executing %q %s", arg0, strings.Join(quoted, " ")) + log.Printf("D! [inputs.snmp] executing %q %s", arg0, strings.Join(quoted, " ")) } out, err := execCommand(arg0, args...).Output() diff --git a/plugins/inputs/snmp_legacy/snmp_legacy.go b/plugins/inputs/snmp_legacy/snmp_legacy.go index 57f9f4fe2..8df9cff06 100644 --- a/plugins/inputs/snmp_legacy/snmp_legacy.go +++ b/plugins/inputs/snmp_legacy/snmp_legacy.go @@ -1,7 +1,6 @@ package snmp_legacy import ( - "fmt" "io/ioutil" "log" "net" @@ -24,6 +23,8 @@ type Snmp struct { Subtable []Subtable SnmptranslateFile string + Log telegraf.Logger + nameToOid map[string]string initNode Node subTableMap map[string]Subtable @@ -297,7 +298,7 @@ func (s *Snmp) Gather(acc telegraf.Accumulator) error { data, err := ioutil.ReadFile(s.SnmptranslateFile) if err != nil { - log.Printf("E! Reading SNMPtranslate file error: %s", err) + s.Log.Errorf("Reading SNMPtranslate file error: %s", err.Error()) return err } else { for _, line := range strings.Split(string(data), "\n") { @@ -395,16 +396,16 @@ func (s *Snmp) Gather(acc telegraf.Accumulator) error { // only if len(s.OidInstanceMapping) == 0 if len(host.OidInstanceMapping) >= 0 { if err := host.SNMPMap(acc, s.nameToOid, s.subTableMap); err != nil { - acc.AddError(fmt.Errorf("E! SNMP Mapping error for host '%s': %s", host.Address, err)) + s.Log.Errorf("Mapping error for host %q: %s", host.Address, err.Error()) continue } } // Launch Get requests if err := host.SNMPGet(acc, s.initNode); err != nil { - acc.AddError(fmt.Errorf("E! SNMP Error for host '%s': %s", host.Address, err)) + s.Log.Errorf("Error for host %q: %s", host.Address, err.Error()) } if err := host.SNMPBulk(acc, s.initNode); err != nil { - acc.AddError(fmt.Errorf("E! SNMP Error for host '%s': %s", host.Address, err)) + s.Log.Errorf("Error for host %q: %s", host.Address, err.Error()) } } return nil @@ -801,7 +802,7 @@ func (h *Host) HandleResponse( acc.AddFields(field_name, fields, tags) case gosnmp.NoSuchObject, gosnmp.NoSuchInstance: // Oid not found - log.Printf("E! [snmp input] Oid not found: %s", oid_key) + log.Printf("E! [inputs.snmp_legacy] oid %q not found", oid_key) default: // delete other data } diff --git a/plugins/inputs/socket_listener/socket_listener.go b/plugins/inputs/socket_listener/socket_listener.go index a127a0738..b5b4d0405 100644 --- a/plugins/inputs/socket_listener/socket_listener.go +++ b/plugins/inputs/socket_listener/socket_listener.go @@ -5,7 +5,6 @@ import ( "crypto/tls" "fmt" "io" - "log" "net" "os" "strconv" @@ -43,7 +42,7 @@ func (ssl *streamSocketListener) listen() { c, err := ssl.Accept() if err != nil { if !strings.HasSuffix(err.Error(), ": use of closed network connection") { - ssl.AddError(err) + ssl.Log.Error(err.Error()) } break } @@ -52,7 +51,7 @@ func (ssl *streamSocketListener) listen() { if srb, ok := c.(setReadBufferer); ok { srb.SetReadBuffer(int(ssl.ReadBufferSize.Size)) } else { - log.Printf("W! Unable to set read buffer on a %s socket", ssl.sockType) + ssl.Log.Warnf("Unable to set read buffer on a %s socket", ssl.sockType) } } @@ -66,7 +65,7 @@ func (ssl *streamSocketListener) listen() { ssl.connectionsMtx.Unlock() if err := ssl.setKeepAlive(c); err != nil { - ssl.AddError(fmt.Errorf("unable to configure keep alive (%s): %s", ssl.ServiceAddress, err)) + ssl.Log.Errorf("Unable to configure keep alive %q: %s", ssl.ServiceAddress, err.Error()) } wg.Add(1) @@ -122,7 +121,7 @@ func (ssl *streamSocketListener) read(c net.Conn) { } metrics, err := ssl.Parse(scnr.Bytes()) if err != nil { - ssl.AddError(fmt.Errorf("unable to parse incoming line: %s", err)) + ssl.Log.Errorf("Unable to parse incoming line: %s", err.Error()) // TODO rate limit continue } @@ -133,9 +132,9 @@ func (ssl *streamSocketListener) read(c net.Conn) { if err := scnr.Err(); err != nil { if netErr, ok := err.(net.Error); ok && netErr.Timeout() { - log.Printf("D! Timeout in plugin [input.socket_listener]: %s", err) + ssl.Log.Debugf("Timeout in plugin: %s", err.Error()) } else if netErr != nil && !strings.HasSuffix(err.Error(), ": use of closed network connection") { - ssl.AddError(err) + ssl.Log.Error(err.Error()) } } } @@ -151,14 +150,14 @@ func (psl *packetSocketListener) listen() { n, _, err := psl.ReadFrom(buf) if err != nil { if !strings.HasSuffix(err.Error(), ": use of closed network connection") { - psl.AddError(err) + psl.Log.Error(err.Error()) } break } metrics, err := psl.Parse(buf[:n]) if err != nil { - psl.AddError(fmt.Errorf("unable to parse incoming packet: %s", err)) + psl.Log.Errorf("Unable to parse incoming packet: %s", err.Error()) // TODO rate limit continue } @@ -179,6 +178,8 @@ type SocketListener struct { wg sync.WaitGroup + Log telegraf.Logger + parsers.Parser telegraf.Accumulator io.Closer @@ -292,7 +293,7 @@ func (sl *SocketListener) Start(acc telegraf.Accumulator) error { return err } - log.Printf("I! [inputs.socket_listener] Listening on %s://%s", protocol, l.Addr()) + sl.Log.Infof("Listening on %s://%s", protocol, l.Addr()) // Set permissions on socket if (spl[0] == "unix" || spl[0] == "unixpacket") && sl.SocketMode != "" { @@ -339,11 +340,11 @@ func (sl *SocketListener) Start(acc telegraf.Accumulator) error { if srb, ok := pc.(setReadBufferer); ok { srb.SetReadBuffer(int(sl.ReadBufferSize.Size)) } else { - log.Printf("W! Unable to set read buffer on a %s socket", protocol) + sl.Log.Warnf("Unable to set read buffer on a %s socket", protocol) } } - log.Printf("I! [inputs.socket_listener] Listening on %s://%s", protocol, pc.LocalAddr()) + sl.Log.Infof("Listening on %s://%s", protocol, pc.LocalAddr()) psl := &packetSocketListener{ PacketConn: pc, diff --git a/plugins/inputs/socket_listener/socket_listener_test.go b/plugins/inputs/socket_listener/socket_listener_test.go index b4415e092..481a0c1a5 100644 --- a/plugins/inputs/socket_listener/socket_listener_test.go +++ b/plugins/inputs/socket_listener/socket_listener_test.go @@ -48,6 +48,7 @@ func TestSocketListener_tcp_tls(t *testing.T) { defer testEmptyLog(t)() sl := newSocketListener() + sl.Log = testutil.Logger{} sl.ServiceAddress = "tcp://127.0.0.1:0" sl.ServerConfig = *pki.TLSServerConfig() @@ -72,6 +73,7 @@ func TestSocketListener_unix_tls(t *testing.T) { sock := filepath.Join(tmpdir, "sl.TestSocketListener_unix_tls.sock") sl := newSocketListener() + sl.Log = testutil.Logger{} sl.ServiceAddress = "unix://" + sock sl.ServerConfig = *pki.TLSServerConfig() @@ -94,6 +96,7 @@ func TestSocketListener_tcp(t *testing.T) { defer testEmptyLog(t)() sl := newSocketListener() + sl.Log = testutil.Logger{} sl.ServiceAddress = "tcp://127.0.0.1:0" sl.ReadBufferSize = internal.Size{Size: 1024} @@ -112,6 +115,7 @@ func TestSocketListener_udp(t *testing.T) { defer testEmptyLog(t)() sl := newSocketListener() + sl.Log = testutil.Logger{} sl.ServiceAddress = "udp://127.0.0.1:0" sl.ReadBufferSize = internal.Size{Size: 1024} @@ -136,6 +140,7 @@ func TestSocketListener_unix(t *testing.T) { os.Create(sock) sl := newSocketListener() + sl.Log = testutil.Logger{} sl.ServiceAddress = "unix://" + sock sl.ReadBufferSize = internal.Size{Size: 1024} @@ -160,6 +165,7 @@ func TestSocketListener_unixgram(t *testing.T) { os.Create(sock) sl := newSocketListener() + sl.Log = testutil.Logger{} sl.ServiceAddress = "unixgram://" + sock sl.ReadBufferSize = internal.Size{Size: 1024} diff --git a/plugins/inputs/stackdriver/stackdriver.go b/plugins/inputs/stackdriver/stackdriver.go index 4f4e35695..f8b4294b7 100644 --- a/plugins/inputs/stackdriver/stackdriver.go +++ b/plugins/inputs/stackdriver/stackdriver.go @@ -3,7 +3,6 @@ package stackdriver import ( "context" "fmt" - "log" "math" "strconv" "strings" @@ -128,6 +127,8 @@ type ( DistributionAggregationAligners []string `toml:"distribution_aggregation_aligners"` Filter *ListTimeSeriesFilter `toml:"filter"` + Log telegraf.Logger + client metricClient timeSeriesConfCache *timeSeriesConfCache prevEnd time.Time @@ -167,6 +168,7 @@ type ( // stackdriverMetricClient is a metric client for stackdriver stackdriverMetricClient struct { + log telegraf.Logger conn *monitoring.MetricClient listMetricDescriptorsCalls selfstat.Stat @@ -206,7 +208,7 @@ func (c *stackdriverMetricClient) ListMetricDescriptors( mdChan := make(chan *metricpb.MetricDescriptor, 1000) go func() { - log.Printf("D! [inputs.stackdriver] ListMetricDescriptors: %s", req.Filter) + c.log.Debugf("List metric descriptor request filter: %s", req.Filter) defer close(mdChan) // Iterate over metric descriptors and send them to buffered channel @@ -216,7 +218,7 @@ func (c *stackdriverMetricClient) ListMetricDescriptors( mdDesc, mdErr := mdResp.Next() if mdErr != nil { if mdErr != iterator.Done { - log.Printf("E! [inputs.stackdriver] Received error response: %s: %v", req, mdErr) + c.log.Errorf("Failed iterating metric desciptor responses: %q: %v", req.String(), mdErr) } break } @@ -235,7 +237,7 @@ func (c *stackdriverMetricClient) ListTimeSeries( tsChan := make(chan *monitoringpb.TimeSeries, 1000) go func() { - log.Printf("D! [inputs.stackdriver] ListTimeSeries: %s", req.Filter) + c.log.Debugf("List time series request filter: %s", req.Filter) defer close(tsChan) // Iterate over timeseries and send them to buffered channel @@ -245,7 +247,7 @@ func (c *stackdriverMetricClient) ListTimeSeries( tsDesc, tsErr := tsResp.Next() if tsErr != nil { if tsErr != iterator.Done { - log.Printf("E! [inputs.stackdriver] Received error response: %s: %v", req, tsErr) + c.log.Errorf("Failed iterating time series responses: %q: %v", req.String(), tsErr) } break } @@ -458,6 +460,7 @@ func (s *Stackdriver) initializeStackdriverClient(ctx context.Context) error { "stackdriver", "list_timeseries_calls", tags) s.client = &stackdriverMetricClient{ + log: s.Log, conn: client, listMetricDescriptorsCalls: listMetricDescriptorsCalls, listTimeSeriesCalls: listTimeSeriesCalls, diff --git a/plugins/inputs/stackdriver/stackdriver_test.go b/plugins/inputs/stackdriver/stackdriver_test.go index 99e5deabd..348cd497b 100644 --- a/plugins/inputs/stackdriver/stackdriver_test.go +++ b/plugins/inputs/stackdriver/stackdriver_test.go @@ -640,6 +640,7 @@ func TestGather(t *testing.T) { t.Run(tt.name, func(t *testing.T) { var acc testutil.Accumulator s := &Stackdriver{ + Log: testutil.Logger{}, Project: "test", RateLimit: 10, GatherRawDistributionBuckets: true, @@ -775,6 +776,7 @@ func TestGatherAlign(t *testing.T) { } s := &Stackdriver{ + Log: testutil.Logger{}, Project: "test", RateLimit: 10, GatherRawDistributionBuckets: false, diff --git a/plugins/inputs/statsd/statsd.go b/plugins/inputs/statsd/statsd.go index 107a6e388..a0d3c9ee7 100644 --- a/plugins/inputs/statsd/statsd.go +++ b/plugins/inputs/statsd/statsd.go @@ -5,7 +5,6 @@ import ( "bytes" "errors" "fmt" - "log" "net" "sort" "strconv" @@ -34,13 +33,6 @@ const ( MaxTCPConnections = 250 ) -var dropwarn = "E! [inputs.statsd] Error: statsd message queue full. " + - "We have dropped %d messages so far. " + - "You may want to increase allowed_pending_messages in the config\n" - -var malformedwarn = "E! [inputs.statsd] Statsd over TCP has received %d malformed packets" + - " thus far." - // Statsd allows the importing of statsd and dogstatsd data. type Statsd struct { // Protocol used on listener - udp or tcp @@ -133,6 +125,8 @@ type Statsd struct { PacketsRecv selfstat.Stat BytesRecv selfstat.Stat + Log telegraf.Logger + // A pool of byte slices to handle parsing bufPool sync.Pool } @@ -312,7 +306,7 @@ func (s *Statsd) Gather(acc telegraf.Accumulator) error { func (s *Statsd) Start(ac telegraf.Accumulator) error { if s.ParseDataDogTags { s.DataDogExtensions = true - log.Printf("W! [inputs.statsd] The parse_data_dog_tags option is deprecated, use datadog_extensions instead.") + s.Log.Warn("'parse_data_dog_tags' config option is deprecated, please use 'datadog_extensions' instead") } s.acc = ac @@ -350,8 +344,7 @@ func (s *Statsd) Start(ac telegraf.Accumulator) error { } if s.ConvertNames { - log.Printf("W! [inputs.statsd] statsd: convert_names config option is deprecated," + - " please use metric_separator instead") + s.Log.Warn("'convert_names' config option is deprecated, please use 'metric_separator' instead") } if s.MetricSeparator == "" { @@ -369,7 +362,7 @@ func (s *Statsd) Start(ac telegraf.Accumulator) error { return err } - log.Println("I! [inputs.statsd] Statsd UDP listener listening on: ", conn.LocalAddr().String()) + s.Log.Infof("UDP listening on %q", conn.LocalAddr().String()) s.UDPlistener = conn s.wg.Add(1) @@ -387,7 +380,7 @@ func (s *Statsd) Start(ac telegraf.Accumulator) error { return err } - log.Println("I! [inputs.statsd] TCP Statsd listening on: ", listener.Addr().String()) + s.Log.Infof("TCP listening on %q", listener.Addr().String()) s.TCPlistener = listener s.wg.Add(1) @@ -403,7 +396,7 @@ func (s *Statsd) Start(ac telegraf.Accumulator) error { defer s.wg.Done() s.parser() }() - log.Printf("I! [inputs.statsd] Started the statsd service on %s\n", s.ServiceAddress) + s.Log.Infof("Started the statsd service on %q", s.ServiceAddress) return nil } @@ -463,7 +456,7 @@ func (s *Statsd) udpListen(conn *net.UDPConn) error { n, addr, err := conn.ReadFromUDP(buf) if err != nil { if !strings.Contains(err.Error(), "closed network") { - log.Printf("E! [inputs.statsd] Error READ: %s\n", err.Error()) + s.Log.Errorf("Error reading: %s", err.Error()) continue } return err @@ -479,7 +472,9 @@ func (s *Statsd) udpListen(conn *net.UDPConn) error { default: s.drops++ if s.drops == 1 || s.AllowedPendingMessages == 0 || s.drops%s.AllowedPendingMessages == 0 { - log.Printf(dropwarn, s.drops) + s.Log.Errorf("Statsd message queue full. "+ + "We have dropped %d messages so far. "+ + "You may want to increase allowed_pending_messages in the config", s.drops) } } } @@ -540,8 +535,8 @@ func (s *Statsd) parseStatsdLine(line string) error { // Validate splitting the line on ":" bits := strings.Split(line, ":") if len(bits) < 2 { - log.Printf("E! [inputs.statsd] Error: splitting ':', Unable to parse metric: %s\n", line) - return errors.New("Error Parsing statsd line") + s.Log.Errorf("Splitting ':', unable to parse metric: %s", line) + return errors.New("error Parsing statsd line") } // Extract bucket name from individual metric bits @@ -556,22 +551,22 @@ func (s *Statsd) parseStatsdLine(line string) error { // Validate splitting the bit on "|" pipesplit := strings.Split(bit, "|") if len(pipesplit) < 2 { - log.Printf("E! [inputs.statsd] Error: splitting '|', Unable to parse metric: %s\n", line) - return errors.New("Error Parsing statsd line") + s.Log.Errorf("Splitting '|', unable to parse metric: %s", line) + return errors.New("error parsing statsd line") } else if len(pipesplit) > 2 { sr := pipesplit[2] - errmsg := "E! [inputs.statsd] parsing sample rate, %s, it must be in format like: " + - "@0.1, @0.5, etc. Ignoring sample rate for line: %s\n" + if strings.Contains(sr, "@") && len(sr) > 1 { samplerate, err := strconv.ParseFloat(sr[1:], 64) if err != nil { - log.Printf(errmsg, err.Error(), line) + s.Log.Errorf("Parsing sample rate: %s", err.Error()) } else { // sample rate successfully parsed m.samplerate = samplerate } } else { - log.Printf(errmsg, "", line) + s.Log.Debugf("Sample rate must be in format like: "+ + "@0.1, @0.5, etc. Ignoring sample rate for line: %s", line) } } @@ -580,15 +575,15 @@ func (s *Statsd) parseStatsdLine(line string) error { case "g", "c", "s", "ms", "h": m.mtype = pipesplit[1] default: - log.Printf("E! [inputs.statsd] Error: Statsd Metric type %s unsupported", pipesplit[1]) - return errors.New("Error Parsing statsd line") + s.Log.Errorf("Metric type %q unsupported", pipesplit[1]) + return errors.New("error parsing statsd line") } // Parse the value if strings.HasPrefix(pipesplit[0], "-") || strings.HasPrefix(pipesplit[0], "+") { if m.mtype != "g" && m.mtype != "c" { - log.Printf("E! [inputs.statsd] Error: +- values are only supported for gauges & counters: %s\n", line) - return errors.New("Error Parsing statsd line") + s.Log.Errorf("+- values are only supported for gauges & counters, unable to parse metric: %s", line) + return errors.New("error parsing statsd line") } m.additive = true } @@ -597,8 +592,8 @@ func (s *Statsd) parseStatsdLine(line string) error { case "g", "ms", "h": v, err := strconv.ParseFloat(pipesplit[0], 64) if err != nil { - log.Printf("E! [inputs.statsd] Error: parsing value to float64: %s\n", line) - return errors.New("Error Parsing statsd line") + s.Log.Errorf("Parsing value to float64, unable to parse metric: %s", line) + return errors.New("error parsing statsd line") } m.floatvalue = v case "c": @@ -607,8 +602,8 @@ func (s *Statsd) parseStatsdLine(line string) error { if err != nil { v2, err2 := strconv.ParseFloat(pipesplit[0], 64) if err2 != nil { - log.Printf("E! [inputs.statsd] Error: parsing value to int64: %s\n", line) - return errors.New("Error Parsing statsd line") + s.Log.Errorf("Parsing value to int64, unable to parse metric: %s", line) + return errors.New("error parsing statsd line") } v = int64(v2) } @@ -852,7 +847,9 @@ func (s *Statsd) handler(conn *net.TCPConn, id string) { default: s.drops++ if s.drops == 1 || s.drops%s.AllowedPendingMessages == 0 { - log.Printf(dropwarn, s.drops) + s.Log.Errorf("Statsd message queue full. "+ + "We have dropped %d messages so far. "+ + "You may want to increase allowed_pending_messages in the config", s.drops) } } } @@ -862,9 +859,8 @@ func (s *Statsd) handler(conn *net.TCPConn, id string) { // refuser refuses a TCP connection func (s *Statsd) refuser(conn *net.TCPConn) { conn.Close() - log.Printf("I! [inputs.statsd] Refused TCP Connection from %s", conn.RemoteAddr()) - log.Printf("I! [inputs.statsd] WARNING: Maximum TCP Connections reached, you may want to" + - " adjust max_tcp_connections") + s.Log.Infof("Refused TCP Connection from %s", conn.RemoteAddr()) + s.Log.Warn("Maximum TCP Connections reached, you may want to adjust max_tcp_connections") } // forget a TCP connection @@ -883,7 +879,7 @@ func (s *Statsd) remember(id string, conn *net.TCPConn) { func (s *Statsd) Stop() { s.Lock() - log.Println("I! [inputs.statsd] Stopping the statsd service") + s.Log.Infof("Stopping the statsd service") close(s.done) if s.isUDP() { s.UDPlistener.Close() @@ -909,7 +905,7 @@ func (s *Statsd) Stop() { s.Lock() close(s.in) - log.Println("I! Stopped Statsd listener service on ", s.ServiceAddress) + s.Log.Infof("Stopped listener service on %q", s.ServiceAddress) s.Unlock() } diff --git a/plugins/inputs/statsd/statsd_test.go b/plugins/inputs/statsd/statsd_test.go index e629f164f..ae025feec 100644 --- a/plugins/inputs/statsd/statsd_test.go +++ b/plugins/inputs/statsd/statsd_test.go @@ -18,7 +18,7 @@ const ( ) func NewTestStatsd() *Statsd { - s := Statsd{} + s := Statsd{Log: testutil.Logger{}} // Make data structures s.done = make(chan struct{}) @@ -36,6 +36,7 @@ func NewTestStatsd() *Statsd { // Test that MaxTCPConections is respected func TestConcurrentConns(t *testing.T) { listener := Statsd{ + Log: testutil.Logger{}, Protocol: "tcp", ServiceAddress: "localhost:8125", AllowedPendingMessages: 10000, @@ -66,6 +67,7 @@ func TestConcurrentConns(t *testing.T) { // Test that MaxTCPConections is respected when max==1 func TestConcurrentConns1(t *testing.T) { listener := Statsd{ + Log: testutil.Logger{}, Protocol: "tcp", ServiceAddress: "localhost:8125", AllowedPendingMessages: 10000, @@ -94,6 +96,7 @@ func TestConcurrentConns1(t *testing.T) { // Test that MaxTCPConections is respected func TestCloseConcurrentConns(t *testing.T) { listener := Statsd{ + Log: testutil.Logger{}, Protocol: "tcp", ServiceAddress: "localhost:8125", AllowedPendingMessages: 10000, @@ -115,6 +118,7 @@ func TestCloseConcurrentConns(t *testing.T) { // benchmark how long it takes to accept & process 100,000 metrics: func BenchmarkUDP(b *testing.B) { listener := Statsd{ + Log: testutil.Logger{}, Protocol: "udp", ServiceAddress: "localhost:8125", AllowedPendingMessages: 250000, @@ -145,6 +149,7 @@ func BenchmarkUDP(b *testing.B) { // benchmark how long it takes to accept & process 100,000 metrics: func BenchmarkTCP(b *testing.B) { listener := Statsd{ + Log: testutil.Logger{}, Protocol: "tcp", ServiceAddress: "localhost:8125", AllowedPendingMessages: 250000, @@ -1625,6 +1630,7 @@ func testValidateGauge( func TestTCP(t *testing.T) { statsd := Statsd{ + Log: testutil.Logger{}, Protocol: "tcp", ServiceAddress: "localhost:0", AllowedPendingMessages: 10000, diff --git a/plugins/inputs/sysstat/README.md b/plugins/inputs/sysstat/README.md index d8e0e95d8..9775c1a30 100644 --- a/plugins/inputs/sysstat/README.md +++ b/plugins/inputs/sysstat/README.md @@ -16,18 +16,15 @@ the created binary data file with the `sadf` utility. ## On Debian and Arch Linux the default path is /usr/lib/sa/sadc whereas ## on RHEL and CentOS the default path is /usr/lib64/sa/sadc sadc_path = "/usr/lib/sa/sadc" # required - # - # + ## Path to the sadf command, if it is not in PATH # sadf_path = "/usr/bin/sadf" - # - # + ## Activities is a list of activities, that are passed as argument to the ## sadc collector utility (e.g: DISK, SNMP etc...) ## The more activities that are added, the more data is collected. # activities = ["DISK"] - # - # + ## Group metrics to measurements. ## ## If group is false each metric will be prefixed with a description @@ -35,8 +32,7 @@ the created binary data file with the `sadf` utility. ## ## If Group is true, corresponding metrics are grouped to a single measurement. # group = true - # - # + ## Options for the sadf command. The values on the left represent the sadf options and ## the values on the right their description (wich are used for grouping and prefixing metrics). ## @@ -58,8 +54,7 @@ the created binary data file with the `sadf` utility. -w = "task" # -H = "hugepages" # only available for newer linux distributions # "-I ALL" = "interrupts" # requires INT activity - # - # + ## Device tags can be used to add additional tags for devices. For example the configuration below ## adds a tag vg with value rootvg for all metrics with sda devices. # [[inputs.sysstat.device_tags.sda]] diff --git a/plugins/inputs/sysstat/sysstat.go b/plugins/inputs/sysstat/sysstat.go index f1778fd6a..9f530024b 100644 --- a/plugins/inputs/sysstat/sysstat.go +++ b/plugins/inputs/sysstat/sysstat.go @@ -7,7 +7,6 @@ import ( "encoding/csv" "fmt" "io" - "log" "os" "os/exec" "path" @@ -67,6 +66,8 @@ type Sysstat struct { DeviceTags map[string][]map[string]string `toml:"device_tags"` tmpFile string interval int + + Log telegraf.Logger } func (*Sysstat) Description() string { @@ -81,18 +82,15 @@ var sampleConfig = ` ## Arch: /usr/lib/sa/sadc ## RHEL/CentOS: /usr/lib64/sa/sadc sadc_path = "/usr/lib/sa/sadc" # required - # - # + ## Path to the sadf command, if it is not in PATH # sadf_path = "/usr/bin/sadf" - # - # + ## Activities is a list of activities, that are passed as argument to the ## sadc collector utility (e.g: DISK, SNMP etc...) ## The more activities that are added, the more data is collected. # activities = ["DISK"] - # - # + ## Group metrics to measurements. ## ## If group is false each metric will be prefixed with a description @@ -100,8 +98,7 @@ var sampleConfig = ` ## ## If Group is true, corresponding metrics are grouped to a single measurement. # group = true - # - # + ## Options for the sadf command. The values on the left represent the sadf ## options and the values on the right their description (which are used for ## grouping and prefixing metrics). @@ -125,8 +122,7 @@ var sampleConfig = ` -w = "task" # -H = "hugepages" # only available for newer linux distributions # "-I ALL" = "interrupts" # requires INT activity - # - # + ## Device tags can be used to add additional tags for devices. ## For example the configuration below adds a tag vg with value rootvg for ## all metrics with sda devices. @@ -196,7 +192,7 @@ func (s *Sysstat) collect() error { out, err := internal.CombinedOutputTimeout(cmd, time.Second*time.Duration(collectInterval+parseInterval)) if err != nil { if err := os.Remove(s.tmpFile); err != nil { - log.Printf("E! failed to remove tmp file after %s command: %s", strings.Join(cmd.Args, " "), err) + s.Log.Errorf("Failed to remove tmp file after %q command: %s", strings.Join(cmd.Args, " "), err.Error()) } return fmt.Errorf("failed to run command %s: %s - %s", strings.Join(cmd.Args, " "), err, string(out)) } diff --git a/plugins/inputs/sysstat/sysstat_test.go b/plugins/inputs/sysstat/sysstat_test.go index 1674f2747..4aecfaacc 100644 --- a/plugins/inputs/sysstat/sysstat_test.go +++ b/plugins/inputs/sysstat/sysstat_test.go @@ -13,6 +13,7 @@ import ( ) var s = Sysstat{ + Log: testutil.Logger{}, interval: 10, Sadc: "/usr/lib/sa/sadc", Sadf: "/usr/bin/sadf", diff --git a/plugins/inputs/system/system.go b/plugins/inputs/system/system.go index 82e6b6db0..32747cca2 100644 --- a/plugins/inputs/system/system.go +++ b/plugins/inputs/system/system.go @@ -4,7 +4,6 @@ import ( "bufio" "bytes" "fmt" - "log" "os" "strings" "time" @@ -16,20 +15,22 @@ import ( "github.com/shirou/gopsutil/load" ) -type SystemStats struct{} +type SystemStats struct { + Log telegraf.Logger +} -func (_ *SystemStats) Description() string { +func (*SystemStats) Description() string { return "Read metrics about system load & uptime" } -func (_ *SystemStats) SampleConfig() string { +func (*SystemStats) SampleConfig() string { return ` ## Uncomment to remove deprecated metrics. # fielddrop = ["uptime_format"] ` } -func (_ *SystemStats) Gather(acc telegraf.Accumulator) error { +func (s *SystemStats) Gather(acc telegraf.Accumulator) error { loadavg, err := load.Avg() if err != nil && !strings.Contains(err.Error(), "not implemented") { return err @@ -51,9 +52,9 @@ func (_ *SystemStats) Gather(acc telegraf.Accumulator) error { if err == nil { fields["n_users"] = len(users) } else if os.IsNotExist(err) { - log.Printf("D! [inputs.system] Error reading users: %v", err) + s.Log.Debugf("Reading users: %s", err.Error()) } else if os.IsPermission(err) { - log.Printf("D! [inputs.system] %v", err) + s.Log.Debug(err.Error()) } now := time.Now() diff --git a/plugins/inputs/tail/tail.go b/plugins/inputs/tail/tail.go index da5b81a60..0b2e2628b 100644 --- a/plugins/inputs/tail/tail.go +++ b/plugins/inputs/tail/tail.go @@ -3,8 +3,6 @@ package tail import ( - "fmt" - "log" "strings" "sync" @@ -31,6 +29,8 @@ type Tail struct { Pipe bool WatchMethod string + Log telegraf.Logger + tailers map[string]*tail.Tail offsets map[string]int64 parserFunc parsers.ParserFunc @@ -124,7 +124,7 @@ func (t *Tail) tailNewFiles(fromBeginning bool) error { for _, filepath := range t.Files { g, err := globpath.Compile(filepath) if err != nil { - t.acc.AddError(fmt.Errorf("glob %s failed to compile, %s", filepath, err)) + t.Log.Errorf("Glob %q failed to compile: %s", filepath, err.Error()) } for _, file := range g.Match() { if _, ok := t.tailers[file]; ok { @@ -135,7 +135,7 @@ func (t *Tail) tailNewFiles(fromBeginning bool) error { var seek *tail.SeekInfo if !t.Pipe && !fromBeginning { if offset, ok := t.offsets[file]; ok { - log.Printf("D! [inputs.tail] using offset %d for file: %v", offset, file) + t.Log.Debugf("Using offset %d for %q", offset, file) seek = &tail.SeekInfo{ Whence: 0, Offset: offset, @@ -163,11 +163,11 @@ func (t *Tail) tailNewFiles(fromBeginning bool) error { continue } - log.Printf("D! [inputs.tail] tail added for file: %v", file) + t.Log.Debugf("Tail added for %q", file) parser, err := t.parserFunc() if err != nil { - t.acc.AddError(fmt.Errorf("error creating parser: %v", err)) + t.Log.Errorf("Creating parser: %s", err.Error()) } // create a goroutine for each "tailer" @@ -213,7 +213,7 @@ func (t *Tail) receiver(parser parsers.Parser, tailer *tail.Tail) { var firstLine = true for line := range tailer.Lines { if line.Err != nil { - t.acc.AddError(fmt.Errorf("error tailing file %s, Error: %s", tailer.Filename, line.Err)) + t.Log.Errorf("Tailing %q: %s", tailer.Filename, line.Err.Error()) continue } // Fix up files with Windows line endings. @@ -221,8 +221,8 @@ func (t *Tail) receiver(parser parsers.Parser, tailer *tail.Tail) { metrics, err := parseLine(parser, text, firstLine) if err != nil { - t.acc.AddError(fmt.Errorf("malformed log line in %s: [%s], Error: %s", - tailer.Filename, line.Text, err)) + t.Log.Errorf("Malformed log line in %q: [%q]: %s", + tailer.Filename, line.Text, err.Error()) continue } firstLine = false @@ -233,10 +233,10 @@ func (t *Tail) receiver(parser parsers.Parser, tailer *tail.Tail) { } } - log.Printf("D! [inputs.tail] tail removed for file: %v", tailer.Filename) + t.Log.Debugf("Tail removed for %q", tailer.Filename) if err := tailer.Err(); err != nil { - t.acc.AddError(fmt.Errorf("error tailing file %s, Error: %s", tailer.Filename, err)) + t.Log.Errorf("Tailing %q: %s", tailer.Filename, err.Error()) } } @@ -249,14 +249,14 @@ func (t *Tail) Stop() { // store offset for resume offset, err := tailer.Tell() if err == nil { - log.Printf("D! [inputs.tail] recording offset %d for file: %v", offset, tailer.Filename) + t.Log.Debugf("Recording offset %d for %q", offset, tailer.Filename) } else { - t.acc.AddError(fmt.Errorf("error recording offset for file %s", tailer.Filename)) + t.Log.Errorf("Recording offset for %q: %s", tailer.Filename, err.Error()) } } err := tailer.Stop() if err != nil { - t.acc.AddError(fmt.Errorf("error stopping tail on file %s", tailer.Filename)) + t.Log.Errorf("Stopping tail on %q: %s", tailer.Filename, err.Error()) } } diff --git a/plugins/inputs/tail/tail_test.go b/plugins/inputs/tail/tail_test.go index 41db76cac..4b96e092f 100644 --- a/plugins/inputs/tail/tail_test.go +++ b/plugins/inputs/tail/tail_test.go @@ -1,7 +1,9 @@ package tail import ( + "bytes" "io/ioutil" + "log" "os" "runtime" "testing" @@ -28,6 +30,7 @@ func TestTailFromBeginning(t *testing.T) { require.NoError(t, err) tt := NewTail() + tt.Log = testutil.Logger{} tt.FromBeginning = true tt.Files = []string{tmpfile.Name()} tt.SetParserFunc(parsers.NewInfluxParser) @@ -61,6 +64,7 @@ func TestTailFromEnd(t *testing.T) { require.NoError(t, err) tt := NewTail() + tt.Log = testutil.Logger{} tt.Files = []string{tmpfile.Name()} tt.SetParserFunc(parsers.NewInfluxParser) defer tt.Stop() @@ -97,6 +101,7 @@ func TestTailBadLine(t *testing.T) { defer os.Remove(tmpfile.Name()) tt := NewTail() + tt.Log = testutil.Logger{} tt.FromBeginning = true tt.Files = []string{tmpfile.Name()} tt.SetParserFunc(parsers.NewInfluxParser) @@ -105,13 +110,17 @@ func TestTailBadLine(t *testing.T) { acc := testutil.Accumulator{} require.NoError(t, tt.Start(&acc)) + + buf := &bytes.Buffer{} + log.SetOutput(buf) + require.NoError(t, acc.GatherError(tt.Gather)) _, err = tmpfile.WriteString("cpu mytag= foo usage_idle= 100\n") require.NoError(t, err) - acc.WaitError(1) - assert.Contains(t, acc.Errors[0].Error(), "malformed log line") + time.Sleep(500 * time.Millisecond) + assert.Contains(t, buf.String(), "Malformed log line") } func TestTailDosLineendings(t *testing.T) { @@ -122,6 +131,7 @@ func TestTailDosLineendings(t *testing.T) { require.NoError(t, err) tt := NewTail() + tt.Log = testutil.Logger{} tt.FromBeginning = true tt.Files = []string{tmpfile.Name()} tt.SetParserFunc(parsers.NewInfluxParser) @@ -160,6 +170,7 @@ cpu,42 require.NoError(t, err) plugin := NewTail() + plugin.Log = testutil.Logger{} plugin.FromBeginning = true plugin.Files = []string{tmpfile.Name()} plugin.SetParserFunc(func() (parsers.Parser, error) { @@ -217,6 +228,7 @@ func TestMultipleMetricsOnFirstLine(t *testing.T) { require.NoError(t, err) plugin := NewTail() + plugin.Log = testutil.Logger{} plugin.FromBeginning = true plugin.Files = []string{tmpfile.Name()} plugin.SetParserFunc(func() (parsers.Parser, error) { diff --git a/plugins/inputs/tcp_listener/tcp_listener.go b/plugins/inputs/tcp_listener/tcp_listener.go index 544f36bd6..41b8e4637 100644 --- a/plugins/inputs/tcp_listener/tcp_listener.go +++ b/plugins/inputs/tcp_listener/tcp_listener.go @@ -48,13 +48,15 @@ type TcpListener struct { TotalConnections selfstat.Stat PacketsRecv selfstat.Stat BytesRecv selfstat.Stat + + Log telegraf.Logger } -var dropwarn = "E! Error: tcp_listener message queue full. " + +var dropwarn = "tcp_listener message queue full. " + "We have dropped %d messages so far. " + - "You may want to increase allowed_pending_messages in the config\n" + "You may want to increase allowed_pending_messages in the config" -var malformedwarn = "E! tcp_listener has received %d malformed packets" + +var malformedwarn = "tcp_listener has received %d malformed packets" + " thus far." const sampleConfig = ` @@ -114,16 +116,15 @@ func (t *TcpListener) Start(acc telegraf.Accumulator) error { address, _ := net.ResolveTCPAddr("tcp", t.ServiceAddress) t.listener, err = net.ListenTCP("tcp", address) if err != nil { - log.Fatalf("ERROR: ListenUDP - %s", err) + t.Log.Errorf("Failed to listen: %s", err.Error()) return err } - log.Println("I! TCP server listening on: ", t.listener.Addr().String()) t.wg.Add(2) go t.tcpListen() go t.tcpParser() - log.Printf("I! Started TCP listener service on %s\n", t.ServiceAddress) + t.Log.Infof("Started TCP listener service on %q", t.ServiceAddress) return nil } @@ -150,7 +151,7 @@ func (t *TcpListener) Stop() { t.wg.Wait() close(t.in) - log.Println("I! Stopped TCP listener service on ", t.ServiceAddress) + t.Log.Infof("Stopped TCP listener service on %q", t.ServiceAddress) } // tcpListen listens for incoming TCP connections. @@ -191,9 +192,8 @@ func (t *TcpListener) refuser(conn *net.TCPConn) { " reached, closing.\nYou may want to increase max_tcp_connections in"+ " the Telegraf tcp listener configuration.\n", t.MaxTCPConnections) conn.Close() - log.Printf("I! Refused TCP Connection from %s", conn.RemoteAddr()) - log.Printf("I! WARNING: Maximum TCP Connections reached, you may want to" + - " adjust max_tcp_connections") + t.Log.Infof("Refused TCP Connection from %s", conn.RemoteAddr()) + t.Log.Warn("Maximum TCP Connections reached, you may want to adjust max_tcp_connections") } // handler handles a single TCP Connection @@ -235,7 +235,7 @@ func (t *TcpListener) handler(conn *net.TCPConn, id string) { default: t.drops++ if t.drops == 1 || t.drops%t.AllowedPendingMessages == 0 { - log.Printf(dropwarn, t.drops) + t.Log.Errorf(dropwarn, t.drops) } } } @@ -268,7 +268,7 @@ func (t *TcpListener) tcpParser() error { } else { t.malformed++ if t.malformed == 1 || t.malformed%1000 == 0 { - log.Printf(malformedwarn, t.malformed) + t.Log.Errorf(malformedwarn, t.malformed) } } } diff --git a/plugins/inputs/tcp_listener/tcp_listener_test.go b/plugins/inputs/tcp_listener/tcp_listener_test.go index 6ff40ad87..7c04ecaba 100644 --- a/plugins/inputs/tcp_listener/tcp_listener_test.go +++ b/plugins/inputs/tcp_listener/tcp_listener_test.go @@ -33,6 +33,7 @@ cpu_load_short,host=server06 value=12.0 1422568543702900257 func newTestTcpListener() (*TcpListener, chan []byte) { in := make(chan []byte, 1500) listener := &TcpListener{ + Log: testutil.Logger{}, ServiceAddress: "localhost:8194", AllowedPendingMessages: 10000, MaxTCPConnections: 250, @@ -45,6 +46,7 @@ func newTestTcpListener() (*TcpListener, chan []byte) { // benchmark how long it takes to accept & process 100,000 metrics: func BenchmarkTCP(b *testing.B) { listener := TcpListener{ + Log: testutil.Logger{}, ServiceAddress: "localhost:8198", AllowedPendingMessages: 100000, MaxTCPConnections: 250, @@ -76,6 +78,7 @@ func BenchmarkTCP(b *testing.B) { func TestHighTrafficTCP(t *testing.T) { listener := TcpListener{ + Log: testutil.Logger{}, ServiceAddress: "localhost:8199", AllowedPendingMessages: 100000, MaxTCPConnections: 250, @@ -103,6 +106,7 @@ func TestHighTrafficTCP(t *testing.T) { func TestConnectTCP(t *testing.T) { listener := TcpListener{ + Log: testutil.Logger{}, ServiceAddress: "localhost:8194", AllowedPendingMessages: 10000, MaxTCPConnections: 250, @@ -140,6 +144,7 @@ func TestConnectTCP(t *testing.T) { // Test that MaxTCPConections is respected func TestConcurrentConns(t *testing.T) { listener := TcpListener{ + Log: testutil.Logger{}, ServiceAddress: "localhost:8195", AllowedPendingMessages: 10000, MaxTCPConnections: 2, @@ -175,6 +180,7 @@ func TestConcurrentConns(t *testing.T) { // Test that MaxTCPConections is respected when max==1 func TestConcurrentConns1(t *testing.T) { listener := TcpListener{ + Log: testutil.Logger{}, ServiceAddress: "localhost:8196", AllowedPendingMessages: 10000, MaxTCPConnections: 1, @@ -208,6 +214,7 @@ func TestConcurrentConns1(t *testing.T) { // Test that MaxTCPConections is respected func TestCloseConcurrentConns(t *testing.T) { listener := TcpListener{ + Log: testutil.Logger{}, ServiceAddress: "localhost:8195", AllowedPendingMessages: 10000, MaxTCPConnections: 2, diff --git a/plugins/inputs/udp_listener/udp_listener.go b/plugins/inputs/udp_listener/udp_listener.go index d0a728b3c..7fa59fdb1 100644 --- a/plugins/inputs/udp_listener/udp_listener.go +++ b/plugins/inputs/udp_listener/udp_listener.go @@ -53,17 +53,19 @@ type UdpListener struct { PacketsRecv selfstat.Stat BytesRecv selfstat.Stat + + Log telegraf.Logger } // UDP_MAX_PACKET_SIZE is packet limit, see // https://en.wikipedia.org/wiki/User_Datagram_Protocol#Packet_structure const UDP_MAX_PACKET_SIZE int = 64 * 1024 -var dropwarn = "E! Error: udp_listener message queue full. " + +var dropwarn = "udp_listener message queue full. " + "We have dropped %d messages so far. " + - "You may want to increase allowed_pending_messages in the config\n" + "You may want to increase allowed_pending_messages in the config" -var malformedwarn = "E! udp_listener has received %d malformed packets" + +var malformedwarn = "udp_listener has received %d malformed packets" + " thus far." const sampleConfig = ` @@ -113,7 +115,7 @@ func (u *UdpListener) Start(acc telegraf.Accumulator) error { u.wg.Add(1) go u.udpParser() - log.Printf("I! Started UDP listener service on %s (ReadBuffer: %d)\n", u.ServiceAddress, u.UDPBufferSize) + u.Log.Infof("Started service on %q (ReadBuffer: %d)", u.ServiceAddress, u.UDPBufferSize) return nil } @@ -124,7 +126,7 @@ func (u *UdpListener) Stop() { u.wg.Wait() u.listener.Close() close(u.in) - log.Println("I! Stopped UDP listener service on ", u.ServiceAddress) + u.Log.Infof("Stopped service on %q", u.ServiceAddress) } func (u *UdpListener) udpListen() error { @@ -134,15 +136,15 @@ func (u *UdpListener) udpListen() error { u.listener, err = net.ListenUDP("udp", address) if err != nil { - return fmt.Errorf("E! Error: ListenUDP - %s", err) + return err } - log.Println("I! UDP server listening on: ", u.listener.LocalAddr().String()) + u.Log.Infof("Server listening on %q", u.listener.LocalAddr().String()) if u.UDPBufferSize > 0 { err = u.listener.SetReadBuffer(u.UDPBufferSize) // if we want to move away from OS default if err != nil { - return fmt.Errorf("E! Failed to set UDP read buffer to %d: %s", u.UDPBufferSize, err) + return fmt.Errorf("failed to set UDP read buffer to %d: %s", u.UDPBufferSize, err) } } @@ -166,7 +168,7 @@ func (u *UdpListener) udpListenLoop() { if err != nil { if err, ok := err.(net.Error); ok && err.Timeout() { } else { - log.Printf("E! Error: %s\n", err.Error()) + u.Log.Error(err.Error()) } continue } @@ -180,7 +182,7 @@ func (u *UdpListener) udpListenLoop() { default: u.drops++ if u.drops == 1 || u.drops%u.AllowedPendingMessages == 0 { - log.Printf(dropwarn, u.drops) + u.Log.Errorf(dropwarn, u.drops) } } } @@ -208,7 +210,7 @@ func (u *UdpListener) udpParser() error { } else { u.malformed++ if u.malformed == 1 || u.malformed%1000 == 0 { - log.Printf(malformedwarn, u.malformed) + u.Log.Errorf(malformedwarn, u.malformed) } } } diff --git a/plugins/inputs/udp_listener/udp_listener_test.go b/plugins/inputs/udp_listener/udp_listener_test.go index 345db62a4..b241235e4 100644 --- a/plugins/inputs/udp_listener/udp_listener_test.go +++ b/plugins/inputs/udp_listener/udp_listener_test.go @@ -31,6 +31,7 @@ cpu_load_short,host=server06 value=12.0 1422568543702900257 func newTestUdpListener() (*UdpListener, chan []byte) { in := make(chan []byte, 1500) listener := &UdpListener{ + Log: testutil.Logger{}, ServiceAddress: ":8125", AllowedPendingMessages: 10000, in: in, @@ -78,6 +79,7 @@ func newTestUdpListener() (*UdpListener, chan []byte) { func TestConnectUDP(t *testing.T) { listener := UdpListener{ + Log: testutil.Logger{}, ServiceAddress: ":8127", AllowedPendingMessages: 10000, } diff --git a/plugins/inputs/vsphere/client.go b/plugins/inputs/vsphere/client.go index b514813ab..176f48323 100644 --- a/plugins/inputs/vsphere/client.go +++ b/plugins/inputs/vsphere/client.go @@ -4,13 +4,13 @@ import ( "context" "crypto/tls" "fmt" - "log" "net/url" "strconv" "strings" "sync" "time" + "github.com/influxdata/telegraf" "github.com/vmware/govmomi" "github.com/vmware/govmomi/object" "github.com/vmware/govmomi/performance" @@ -45,6 +45,7 @@ type Client struct { Valid bool Timeout time.Duration closeGate sync.Once + log telegraf.Logger } // NewClientFactory creates a new ClientFactory and prepares it for use. @@ -76,7 +77,7 @@ func (cf *ClientFactory) GetClient(ctx context.Context) (*Client, error) { ctx1, cancel1 := context.WithTimeout(ctx, cf.parent.Timeout.Duration) defer cancel1() if _, err := methods.GetCurrentTime(ctx1, cf.client.Client); err != nil { - log.Printf("I! [inputs.vsphere]: Client session seems to have time out. Reauthenticating!") + cf.parent.Log.Info("Client session seems to have time out. Reauthenticating!") ctx2, cancel2 := context.WithTimeout(ctx, cf.parent.Timeout.Duration) defer cancel2() if err := cf.client.Client.SessionManager.Login(ctx2, url.UserPassword(cf.parent.Username, cf.parent.Password)); err != nil { @@ -88,7 +89,7 @@ func (cf *ClientFactory) GetClient(ctx context.Context) (*Client, error) { cf.client = nil continue } - return nil, fmt.Errorf("Renewing authentication failed: %v", err) + return nil, fmt.Errorf("renewing authentication failed: %s", err.Error()) } } @@ -113,7 +114,7 @@ func NewClient(ctx context.Context, u *url.URL, vs *VSphere) (*Client, error) { u.User = url.UserPassword(vs.Username, vs.Password) } - log.Printf("D! [inputs.vsphere]: Creating client: %s", u.Host) + vs.Log.Debugf("Creating client: %s", u.Host) soapClient := soap.NewClient(u, tlsCfg.InsecureSkipVerify) // Add certificate if we have it. Use it to log us in. @@ -170,6 +171,7 @@ func NewClient(ctx context.Context, u *url.URL, vs *VSphere) (*Client, error) { p := performance.NewManager(c.Client) client := &Client{ + log: vs.Log, Client: c, Views: m, Root: v, @@ -184,9 +186,9 @@ func NewClient(ctx context.Context, u *url.URL, vs *VSphere) (*Client, error) { if err != nil { return nil, err } - log.Printf("D! [inputs.vsphere] vCenter says max_query_metrics should be %d", n) + vs.Log.Debugf("vCenter says max_query_metrics should be %d", n) if n < vs.MaxQueryMetrics { - log.Printf("W! [inputs.vsphere] Configured max_query_metrics is %d, but server limits it to %d. Reducing.", vs.MaxQueryMetrics, n) + vs.Log.Warnf("Configured max_query_metrics is %d, but server limits it to %d. Reducing.", vs.MaxQueryMetrics, n) vs.MaxQueryMetrics = n } return client, nil @@ -202,7 +204,6 @@ func (cf *ClientFactory) Close() { } func (c *Client) close() { - // Use a Once to prevent us from panics stemming from trying // to close it multiple times. c.closeGate.Do(func() { @@ -210,7 +211,7 @@ func (c *Client) close() { defer cancel() if c.Client != nil { if err := c.Client.Logout(ctx); err != nil { - log.Printf("E! [inputs.vsphere]: Error during logout: %s", err) + c.log.Errorf("Logout: %s", err.Error()) } } }) @@ -239,7 +240,7 @@ func (c *Client) GetMaxQueryMetrics(ctx context.Context) (int, error) { if s, ok := res[0].GetOptionValue().Value.(string); ok { v, err := strconv.Atoi(s) if err == nil { - log.Printf("D! [inputs.vsphere] vCenter maxQueryMetrics is defined: %d", v) + c.log.Debugf("vCenter maxQueryMetrics is defined: %d", v) if v == -1 { // Whatever the server says, we never ask for more metrics than this. return absoluteMaxMetrics, nil @@ -250,17 +251,17 @@ func (c *Client) GetMaxQueryMetrics(ctx context.Context) (int, error) { // Fall through version-based inference if value isn't usable } } else { - log.Println("D! [inputs.vsphere] Option query for maxQueryMetrics failed. Using default") + c.log.Debug("Option query for maxQueryMetrics failed. Using default") } // No usable maxQueryMetrics setting. Infer based on version ver := c.Client.Client.ServiceContent.About.Version parts := strings.Split(ver, ".") if len(parts) < 2 { - log.Printf("W! [inputs.vsphere] vCenter returned an invalid version string: %s. Using default query size=64", ver) + c.log.Warnf("vCenter returned an invalid version string: %s. Using default query size=64", ver) return 64, nil } - log.Printf("D! [inputs.vsphere] vCenter version is: %s", ver) + c.log.Debugf("vCenter version is: %s", ver) major, err := strconv.Atoi(parts[0]) if err != nil { return 0, err diff --git a/plugins/inputs/vsphere/endpoint.go b/plugins/inputs/vsphere/endpoint.go index c361754ab..63fe3eb03 100644 --- a/plugins/inputs/vsphere/endpoint.go +++ b/plugins/inputs/vsphere/endpoint.go @@ -250,10 +250,10 @@ func (e *Endpoint) startDiscovery(ctx context.Context) { case <-e.discoveryTicker.C: err := e.discover(ctx) if err != nil && err != context.Canceled { - log.Printf("E! [inputs.vsphere]: Error in discovery for %s: %v", e.URL.Host, err) + e.Parent.Log.Errorf("Discovery for %s: %s", e.URL.Host, err.Error()) } case <-ctx.Done(): - log.Printf("D! [inputs.vsphere]: Exiting discovery goroutine for %s", e.URL.Host) + e.Parent.Log.Debugf("Exiting discovery goroutine for %s", e.URL.Host) e.discoveryTicker.Stop() return } @@ -264,7 +264,7 @@ func (e *Endpoint) startDiscovery(ctx context.Context) { func (e *Endpoint) initalDiscovery(ctx context.Context) { err := e.discover(ctx) if err != nil && err != context.Canceled { - log.Printf("E! [inputs.vsphere]: Error in discovery for %s: %v", e.URL.Host, err) + e.Parent.Log.Errorf("Discovery for %s: %s", e.URL.Host, err.Error()) } e.startDiscovery(ctx) } @@ -279,7 +279,7 @@ func (e *Endpoint) init(ctx context.Context) error { if e.customAttrEnabled { fields, err := client.GetCustomFields(ctx) if err != nil { - log.Println("W! [inputs.vsphere] Could not load custom field metadata") + e.Parent.Log.Warn("Could not load custom field metadata") } else { e.customFields = fields } @@ -291,7 +291,7 @@ func (e *Endpoint) init(ctx context.Context) error { // goroutine without waiting for it. This will probably cause us to report an empty // dataset on the first collection, but it solves the issue of the first collection timing out. if e.Parent.ForceDiscoverOnInit { - log.Printf("D! [inputs.vsphere]: Running initial discovery and waiting for it to finish") + e.Parent.Log.Debug("Running initial discovery and waiting for it to finish") e.initalDiscovery(ctx) } else { // Otherwise, just run it in the background. We'll probably have an incomplete first metric @@ -354,7 +354,7 @@ func (e *Endpoint) getDatacenterName(ctx context.Context, client *Client, cache defer cancel1() err := o.Properties(ctx1, here, []string{"parent", "name"}, &result) if err != nil { - log.Printf("W! [inputs.vsphere]: Error while resolving parent. Assuming no parent exists. Error: %s", err) + e.Parent.Log.Warnf("Error while resolving parent. Assuming no parent exists. Error: %s", err.Error()) break } if result.Reference().Type == "Datacenter" { @@ -363,7 +363,7 @@ func (e *Endpoint) getDatacenterName(ctx context.Context, client *Client, cache break } if result.Parent == nil { - log.Printf("D! [inputs.vsphere]: No parent found for %s (ascending from %s)", here.Reference(), r.Reference()) + e.Parent.Log.Debugf("No parent found for %s (ascending from %s)", here.Reference(), r.Reference()) break } here = result.Parent.Reference() @@ -393,7 +393,7 @@ func (e *Endpoint) discover(ctx context.Context) error { return err } - log.Printf("D! [inputs.vsphere]: Discover new objects for %s", e.URL.Host) + e.Parent.Log.Debugf("Discover new objects for %s", e.URL.Host) dcNameCache := make(map[string]string) numRes := int64(0) @@ -401,7 +401,7 @@ func (e *Endpoint) discover(ctx context.Context) error { // Populate resource objects, and endpoint instance info. newObjects := make(map[string]objectMap) for k, res := range e.resourceKinds { - log.Printf("D! [inputs.vsphere] Discovering resources for %s", res.name) + e.Parent.Log.Debugf("Discovering resources for %s", res.name) // Need to do this for all resource types even if they are not enabled if res.enabled || k != "vm" { rf := ResourceFilter{ @@ -457,7 +457,7 @@ func (e *Endpoint) discover(ctx context.Context) error { if e.customAttrEnabled { fields, err = client.GetCustomFields(ctx) if err != nil { - log.Println("W! [inputs.vsphere] Could not load custom field metadata") + e.Parent.Log.Warn("Could not load custom field metadata") fields = nil } } @@ -481,10 +481,10 @@ func (e *Endpoint) discover(ctx context.Context) error { } func (e *Endpoint) simpleMetadataSelect(ctx context.Context, client *Client, res *resourceKind) { - log.Printf("D! [inputs.vsphere] Using fast metric metadata selection for %s", res.name) + e.Parent.Log.Debugf("Using fast metric metadata selection for %s", res.name) m, err := client.CounterInfoByName(ctx) if err != nil { - log.Printf("E! [inputs.vsphere]: Error while getting metric metadata. Discovery will be incomplete. Error: %s", err) + e.Parent.Log.Errorf("Getting metric metadata. Discovery will be incomplete. Error: %s", err.Error()) return } res.metrics = make(performance.MetricList, 0, len(res.include)) @@ -500,7 +500,7 @@ func (e *Endpoint) simpleMetadataSelect(ctx context.Context, client *Client, res } res.metrics = append(res.metrics, cnt) } else { - log.Printf("W! [inputs.vsphere] Metric name %s is unknown. Will not be collected", s) + e.Parent.Log.Warnf("Metric name %s is unknown. Will not be collected", s) } } } @@ -533,7 +533,7 @@ func (e *Endpoint) complexMetadataSelect(ctx context.Context, res *resourceKind, te.Run(ctx, func() { metrics, err := e.getMetadata(ctx, obj, res.sampling) if err != nil { - log.Printf("E! [inputs.vsphere]: Error while getting metric metadata. Discovery will be incomplete. Error: %s", err) + e.Parent.Log.Errorf("Getting metric metadata. Discovery will be incomplete. Error: %s", err.Error()) } mMap := make(map[string]types.PerfMetricId) for _, m := range metrics { @@ -546,7 +546,7 @@ func (e *Endpoint) complexMetadataSelect(ctx context.Context, res *resourceKind, mMap[strconv.Itoa(int(m.CounterId))+"|"+m.Instance] = m } } - log.Printf("D! [inputs.vsphere] Found %d metrics for %s", len(mMap), obj.name) + e.Parent.Log.Debugf("Found %d metrics for %s", len(mMap), obj.name) instInfoMux.Lock() defer instInfoMux.Unlock() if len(mMap) > len(res.metrics) { @@ -605,7 +605,7 @@ func getClusters(ctx context.Context, e *Endpoint, filter *ResourceFilter) (obje defer cancel3() err = o.Properties(ctx3, *r.Parent, []string{"parent"}, &folder) if err != nil { - log.Printf("W! [inputs.vsphere] Error while getting folder parent: %e", err) + e.Parent.Log.Warnf("Error while getting folder parent: %s", err.Error()) p = nil } else { pp := folder.Parent.Reference() @@ -702,7 +702,7 @@ func getVMs(ctx context.Context, e *Endpoint, filter *ResourceFilter) (objectMap } key, ok := e.customFields[val.Key] if !ok { - log.Printf("W! [inputs.vsphere] Metadata for custom field %d not found. Skipping", val.Key) + e.Parent.Log.Warnf("Metadata for custom field %d not found. Skipping", val.Key) continue } if e.customAttrFilter.Match(key) { @@ -847,7 +847,7 @@ func (e *Endpoint) chunkify(ctx context.Context, res *resourceKind, now time.Tim // Make sure endtime is always after start time. We may occasionally see samples from the future // returned from vCenter. This is presumably due to time drift between vCenter and EXSi nodes. if pq.StartTime.After(*pq.EndTime) { - log.Printf("D! [inputs.vsphere] Future sample. Res: %s, StartTime: %s, EndTime: %s, Now: %s", pq.Entity, *pq.StartTime, *pq.EndTime, now) + e.Parent.Log.Debugf("Future sample. Res: %s, StartTime: %s, EndTime: %s, Now: %s", pq.Entity, *pq.StartTime, *pq.EndTime, now) end := start.Add(time.Second) pq.EndTime = &end } @@ -861,7 +861,7 @@ func (e *Endpoint) chunkify(ctx context.Context, res *resourceKind, now time.Tim // 2) We are at the last resource and have no more data to process. // 3) The query contains more than 100,000 individual metrics if mr > 0 || nRes >= e.Parent.MaxQueryObjects || len(pqs) > 100000 { - log.Printf("D! [inputs.vsphere]: Queueing query: %d objects, %d metrics (%d remaining) of type %s for %s. Processed objects: %d. Total objects %d", + e.Parent.Log.Debugf("Queueing query: %d objects, %d metrics (%d remaining) of type %s for %s. Processed objects: %d. Total objects %d", len(pqs), metrics, mr, res.name, e.URL.Host, total+1, len(res.objects)) // Don't send work items if the context has been cancelled. @@ -882,7 +882,7 @@ func (e *Endpoint) chunkify(ctx context.Context, res *resourceKind, now time.Tim // Handle final partially filled chunk if len(pqs) > 0 { // Run collection job - log.Printf("D! [inputs.vsphere]: Queuing query: %d objects, %d metrics (0 remaining) of type %s for %s. Total objects %d (final chunk)", + e.Parent.Log.Debugf("Queuing query: %d objects, %d metrics (0 remaining) of type %s for %s. Total objects %d (final chunk)", len(pqs), metrics, res.name, e.URL.Host, len(res.objects)) submitChunkJob(ctx, te, job, pqs) } @@ -914,18 +914,18 @@ func (e *Endpoint) collectResource(ctx context.Context, resourceType string, acc if estInterval < s { estInterval = s } - log.Printf("D! [inputs.vsphere] Raw interval %s, padded: %s, estimated: %s", rawInterval, paddedInterval, estInterval) + e.Parent.Log.Debugf("Raw interval %s, padded: %s, estimated: %s", rawInterval, paddedInterval, estInterval) } - log.Printf("D! [inputs.vsphere] Interval estimated to %s", estInterval) + e.Parent.Log.Debugf("Interval estimated to %s", estInterval) res.lastColl = localNow latest := res.latestSample if !latest.IsZero() { elapsed := now.Sub(latest).Seconds() + 5.0 // Allow 5 second jitter. - log.Printf("D! [inputs.vsphere]: Latest: %s, elapsed: %f, resource: %s", latest, elapsed, resourceType) + e.Parent.Log.Debugf("Latest: %s, elapsed: %f, resource: %s", latest, elapsed, resourceType) if !res.realTime && elapsed < float64(res.sampling) { // No new data would be available. We're outta here! - log.Printf("D! [inputs.vsphere]: Sampling period for %s of %d has not elapsed on %s", + e.Parent.Log.Debugf("Sampling period for %s of %d has not elapsed on %s", resourceType, res.sampling, e.URL.Host) return nil } @@ -936,7 +936,7 @@ func (e *Endpoint) collectResource(ctx context.Context, resourceType string, acc internalTags := map[string]string{"resourcetype": resourceType} sw := NewStopwatchWithTags("gather_duration", e.URL.Host, internalTags) - log.Printf("D! [inputs.vsphere]: Collecting metrics for %d objects of type %s for %s", + e.Parent.Log.Debugf("Collecting metrics for %d objects of type %s for %s", len(res.objects), resourceType, e.URL.Host) count := int64(0) @@ -948,9 +948,9 @@ func (e *Endpoint) collectResource(ctx context.Context, resourceType string, acc e.chunkify(ctx, res, now, latest, acc, func(chunk []types.PerfQuerySpec) { n, localLatest, err := e.collectChunk(ctx, chunk, res, acc, now, estInterval) - log.Printf("D! [inputs.vsphere] CollectChunk for %s returned %d metrics", resourceType, n) + e.Parent.Log.Debugf("CollectChunk for %s returned %d metrics", resourceType, n) if err != nil { - acc.AddError(errors.New("While collecting " + res.name + ": " + err.Error())) + acc.AddError(errors.New("while collecting " + res.name + ": " + err.Error())) } atomic.AddInt64(&count, int64(n)) tsMux.Lock() @@ -960,7 +960,7 @@ func (e *Endpoint) collectResource(ctx context.Context, resourceType string, acc } }) - log.Printf("D! [inputs.vsphere] Latest sample for %s set to %s", resourceType, latestSample) + e.Parent.Log.Debugf("Latest sample for %s set to %s", resourceType, latestSample) if !latestSample.IsZero() { res.latestSample = latestSample } @@ -1004,12 +1004,11 @@ func alignSamples(info []types.PerfSampleInfo, values []int64, interval time.Dur lastBucket = roundedTs } } - //log.Printf("D! [inputs.vsphere] Aligned samples: %d collapsed into %d", len(info), len(rInfo)) return rInfo, rValues } func (e *Endpoint) collectChunk(ctx context.Context, pqs []types.PerfQuerySpec, res *resourceKind, acc telegraf.Accumulator, now time.Time, interval time.Duration) (int, time.Time, error) { - log.Printf("D! [inputs.vsphere] Query for %s has %d QuerySpecs", res.name, len(pqs)) + e.Parent.Log.Debugf("Query for %s has %d QuerySpecs", res.name, len(pqs)) latestSample := time.Time{} count := 0 resourceType := res.name @@ -1030,14 +1029,14 @@ func (e *Endpoint) collectChunk(ctx context.Context, pqs []types.PerfQuerySpec, return count, latestSample, err } - log.Printf("D! [inputs.vsphere] Query for %s returned metrics for %d objects", resourceType, len(ems)) + e.Parent.Log.Debugf("Query for %s returned metrics for %d objects", resourceType, len(ems)) // Iterate through results for _, em := range ems { moid := em.Entity.Reference().Value instInfo, found := res.objects[moid] if !found { - log.Printf("E! [inputs.vsphere]: MOID %s not found in cache. Skipping! (This should not happen!)", moid) + e.Parent.Log.Errorf("MOID %s not found in cache. Skipping! (This should not happen!)", moid) continue } buckets := make(map[string]metricEntry) @@ -1052,7 +1051,7 @@ func (e *Endpoint) collectChunk(ctx context.Context, pqs []types.PerfQuerySpec, // Populate tags objectRef, ok := res.objects[moid] if !ok { - log.Printf("E! [inputs.vsphere]: MOID %s not found in cache. Skipping", moid) + e.Parent.Log.Errorf("MOID %s not found in cache. Skipping", moid) continue } e.populateTags(&objectRef, resourceType, res, t, &v) @@ -1064,7 +1063,7 @@ func (e *Endpoint) collectChunk(ctx context.Context, pqs []types.PerfQuerySpec, // According to the docs, SampleInfo and Value should have the same length, but we've seen corrupted // data coming back with missing values. Take care of that gracefully! if idx >= len(alignedValues) { - log.Printf("D! [inputs.vsphere] len(SampleInfo)>len(Value) %d > %d", len(alignedInfo), len(alignedValues)) + e.Parent.Log.Debugf("Len(SampleInfo)>len(Value) %d > %d", len(alignedInfo), len(alignedValues)) break } ts := sample.Timestamp @@ -1085,7 +1084,7 @@ func (e *Endpoint) collectChunk(ctx context.Context, pqs []types.PerfQuerySpec, // Percentage values must be scaled down by 100. info, ok := metricInfo[name] if !ok { - log.Printf("E! [inputs.vsphere]: Could not determine unit for %s. Skipping", name) + e.Parent.Log.Errorf("Could not determine unit for %s. Skipping", name) } v := alignedValues[idx] if info.UnitInfo.GetElementDescription().Key == "percent" { @@ -1103,7 +1102,7 @@ func (e *Endpoint) collectChunk(ctx context.Context, pqs []types.PerfQuerySpec, e.hwMarks.Put(moid, ts) } if nValues == 0 { - log.Printf("D! [inputs.vsphere]: Missing value for: %s, %s", name, objectRef.name) + e.Parent.Log.Debugf("Missing value for: %s, %s", name, objectRef.name) continue } } diff --git a/plugins/inputs/vsphere/finder.go b/plugins/inputs/vsphere/finder.go index 24427b205..14f317df4 100644 --- a/plugins/inputs/vsphere/finder.go +++ b/plugins/inputs/vsphere/finder.go @@ -2,7 +2,6 @@ package vsphere import ( "context" - "log" "reflect" "strings" @@ -54,7 +53,7 @@ func (f *Finder) Find(ctx context.Context, resType, path string, dst interface{} return err } objectContentToTypedArray(objs, dst) - log.Printf("D! [inputs.vsphere] Find(%s, %s) returned %d objects", resType, path, len(objs)) + f.client.log.Debugf("Find(%s, %s) returned %d objects", resType, path, len(objs)) return nil } diff --git a/plugins/inputs/vsphere/tscache.go b/plugins/inputs/vsphere/tscache.go index 4f73c4fe8..6e7d00c8b 100644 --- a/plugins/inputs/vsphere/tscache.go +++ b/plugins/inputs/vsphere/tscache.go @@ -34,7 +34,7 @@ func (t *TSCache) Purge() { n++ } } - log.Printf("D! [inputs.vsphere] Purged timestamp cache. %d deleted with %d remaining", n, len(t.table)) + log.Printf("D! [inputs.vsphere] purged timestamp cache. %d deleted with %d remaining", n, len(t.table)) } // IsNew returns true if the supplied timestamp for the supplied key is more recent than the diff --git a/plugins/inputs/vsphere/vsphere.go b/plugins/inputs/vsphere/vsphere.go index 2f9f08cc6..176d55010 100644 --- a/plugins/inputs/vsphere/vsphere.go +++ b/plugins/inputs/vsphere/vsphere.go @@ -2,7 +2,6 @@ package vsphere import ( "context" - "log" "sync" "time" @@ -58,6 +57,8 @@ type VSphere struct { // Mix in the TLS/SSL goodness from core tls.ClientConfig + + Log telegraf.Logger } var sampleConfig = ` @@ -243,7 +244,7 @@ func (v *VSphere) Description() string { // Start is called from telegraf core when a plugin is started and allows it to // perform initialization tasks. func (v *VSphere) Start(acc telegraf.Accumulator) error { - log.Println("D! [inputs.vsphere]: Starting plugin") + v.Log.Info("Starting plugin") ctx, cancel := context.WithCancel(context.Background()) v.cancel = cancel @@ -266,7 +267,7 @@ func (v *VSphere) Start(acc telegraf.Accumulator) error { // Stop is called from telegraf core when a plugin is stopped and allows it to // perform shutdown tasks. func (v *VSphere) Stop() { - log.Println("D! [inputs.vsphere]: Stopping plugin") + v.Log.Info("Stopping plugin") v.cancel() // Wait for all endpoints to finish. No need to wait for @@ -275,7 +276,7 @@ func (v *VSphere) Stop() { // wait for any discovery to complete by trying to grab the // "busy" mutex. for _, ep := range v.endpoints { - log.Printf("D! [inputs.vsphere]: Waiting for endpoint %s to finish", ep.URL.Host) + v.Log.Debugf("Waiting for endpoint %q to finish", ep.URL.Host) func() { ep.busy.Lock() // Wait until discovery is finished defer ep.busy.Unlock() diff --git a/plugins/inputs/vsphere/vsphere_test.go b/plugins/inputs/vsphere/vsphere_test.go index 28c2c7934..aa56d44a1 100644 --- a/plugins/inputs/vsphere/vsphere_test.go +++ b/plugins/inputs/vsphere/vsphere_test.go @@ -42,6 +42,7 @@ var configHeader = ` func defaultVSphere() *VSphere { return &VSphere{ + Log: testutil.Logger{}, ClusterMetricInclude: []string{ "cpu.usage.*", "cpu.usagemhz.*", diff --git a/plugins/inputs/win_perf_counters/win_perf_counters.go b/plugins/inputs/win_perf_counters/win_perf_counters.go index f858ba6e7..bd130a3fd 100644 --- a/plugins/inputs/win_perf_counters/win_perf_counters.go +++ b/plugins/inputs/win_perf_counters/win_perf_counters.go @@ -5,7 +5,6 @@ package win_perf_counters import ( "errors" "fmt" - "log" "strings" "time" @@ -147,6 +146,8 @@ type Win_PerfCounters struct { CountersRefreshInterval internal.Duration UseWildcardsExpansion bool + Log telegraf.Logger + lastRefreshed time.Time counters []*counter query PerformanceQuery @@ -289,7 +290,7 @@ func (m *Win_PerfCounters) AddItem(counterPath string, objectName string, instan m.counters = append(m.counters, newItem) if m.PrintValid { - log.Printf("Valid: %s\n", counterPath) + m.Log.Infof("Valid: %s", counterPath) } } } else { @@ -297,7 +298,7 @@ func (m *Win_PerfCounters) AddItem(counterPath string, objectName string, instan includeTotal, counterHandle} m.counters = append(m.counters, newItem) if m.PrintValid { - log.Printf("Valid: %s\n", counterPath) + m.Log.Infof("Valid: %s", counterPath) } } @@ -323,7 +324,7 @@ func (m *Win_PerfCounters) ParseConfig() error { if err != nil { if PerfObject.FailOnMissing || PerfObject.WarnOnMissing { - log.Printf("Invalid counterPath: '%s'. Error: %s\n", counterPath, err.Error()) + m.Log.Errorf("Invalid counterPath: '%s'. Error: %s\n", counterPath, err.Error()) } if PerfObject.FailOnMissing { return err diff --git a/plugins/inputs/win_perf_counters/win_perf_counters_test.go b/plugins/inputs/win_perf_counters/win_perf_counters_test.go index 5052fb7a2..13eebdc95 100644 --- a/plugins/inputs/win_perf_counters/win_perf_counters_test.go +++ b/plugins/inputs/win_perf_counters/win_perf_counters_test.go @@ -247,13 +247,17 @@ func TestCounterPathParsing(t *testing.T) { func TestAddItemSimple(t *testing.T) { var err error cps1 := []string{"\\O(I)\\C"} - m := Win_PerfCounters{PrintValid: false, Object: nil, query: &FakePerformanceQuery{ - counters: createCounterMap(cps1, []float64{1.1}, []uint32{0}), - expandPaths: map[string][]string{ - cps1[0]: cps1, - }, - vistaAndNewer: true, - }} + m := Win_PerfCounters{ + Log: testutil.Logger{}, + PrintValid: false, + Object: nil, + query: &FakePerformanceQuery{ + counters: createCounterMap(cps1, []float64{1.1}, []uint32{0}), + expandPaths: map[string][]string{ + cps1[0]: cps1, + }, + vistaAndNewer: true, + }} err = m.query.Open() require.NoError(t, err) err = m.AddItem(cps1[0], "O", "I", "c", "test", false) @@ -265,13 +269,18 @@ func TestAddItemSimple(t *testing.T) { func TestAddItemInvalidCountPath(t *testing.T) { var err error cps1 := []string{"\\O\\C"} - m := Win_PerfCounters{PrintValid: false, Object: nil, UseWildcardsExpansion: true, query: &FakePerformanceQuery{ - counters: createCounterMap(cps1, []float64{1.1}, []uint32{0}), - expandPaths: map[string][]string{ - cps1[0]: {"\\O/C"}, - }, - vistaAndNewer: true, - }} + m := Win_PerfCounters{ + Log: testutil.Logger{}, + PrintValid: false, + Object: nil, + UseWildcardsExpansion: true, + query: &FakePerformanceQuery{ + counters: createCounterMap(cps1, []float64{1.1}, []uint32{0}), + expandPaths: map[string][]string{ + cps1[0]: {"\\O/C"}, + }, + vistaAndNewer: true, + }} err = m.query.Open() require.NoError(t, err) err = m.AddItem("\\O\\C", "O", "------", "C", "test", false) @@ -284,16 +293,20 @@ func TestParseConfigBasic(t *testing.T) { var err error perfObjects := createPerfObject("m", "O", []string{"I1", "I2"}, []string{"C1", "C2"}, false, false) cps1 := []string{"\\O(I1)\\C1", "\\O(I1)\\C2", "\\O(I2)\\C1", "\\O(I2)\\C2"} - m := Win_PerfCounters{PrintValid: false, Object: perfObjects, query: &FakePerformanceQuery{ - counters: createCounterMap(cps1, []float64{1.1, 1.2, 1.3, 1.4}, []uint32{0, 0, 0, 0}), - expandPaths: map[string][]string{ - cps1[0]: {cps1[0]}, - cps1[1]: {cps1[1]}, - cps1[2]: {cps1[2]}, - cps1[3]: {cps1[3]}, - }, - vistaAndNewer: true, - }} + m := Win_PerfCounters{ + Log: testutil.Logger{}, + PrintValid: false, + Object: perfObjects, + query: &FakePerformanceQuery{ + counters: createCounterMap(cps1, []float64{1.1, 1.2, 1.3, 1.4}, []uint32{0, 0, 0, 0}), + expandPaths: map[string][]string{ + cps1[0]: {cps1[0]}, + cps1[1]: {cps1[1]}, + cps1[2]: {cps1[2]}, + cps1[3]: {cps1[3]}, + }, + vistaAndNewer: true, + }} err = m.query.Open() require.NoError(t, err) err = m.ParseConfig() @@ -318,14 +331,19 @@ func TestParseConfigNoInstance(t *testing.T) { var err error perfObjects := createPerfObject("m", "O", []string{"------"}, []string{"C1", "C2"}, false, false) cps1 := []string{"\\O\\C1", "\\O\\C2"} - m := Win_PerfCounters{PrintValid: false, Object: perfObjects, UseWildcardsExpansion: false, query: &FakePerformanceQuery{ - counters: createCounterMap(cps1, []float64{1.1, 1.2}, []uint32{0, 0}), - expandPaths: map[string][]string{ - cps1[0]: {cps1[0]}, - cps1[1]: {cps1[1]}, - }, - vistaAndNewer: true, - }} + m := Win_PerfCounters{ + Log: testutil.Logger{}, + PrintValid: false, + Object: perfObjects, + UseWildcardsExpansion: false, + query: &FakePerformanceQuery{ + counters: createCounterMap(cps1, []float64{1.1, 1.2}, []uint32{0, 0}), + expandPaths: map[string][]string{ + cps1[0]: {cps1[0]}, + cps1[1]: {cps1[1]}, + }, + vistaAndNewer: true, + }} err = m.query.Open() require.NoError(t, err) err = m.ParseConfig() @@ -350,15 +368,19 @@ func TestParseConfigInvalidCounterError(t *testing.T) { var err error perfObjects := createPerfObject("m", "O", []string{"I1", "I2"}, []string{"C1", "C2"}, true, false) cps1 := []string{"\\O(I1)\\C2", "\\O(I2)\\C1", "\\O(I2)\\C2"} - m := Win_PerfCounters{PrintValid: false, Object: perfObjects, query: &FakePerformanceQuery{ - counters: createCounterMap(cps1, []float64{1.1, 1.2, 1.3}, []uint32{0, 0, 0}), - expandPaths: map[string][]string{ - cps1[0]: {cps1[0]}, - cps1[1]: {cps1[1]}, - cps1[2]: {cps1[2]}, - }, - vistaAndNewer: true, - }} + m := Win_PerfCounters{ + Log: testutil.Logger{}, + PrintValid: false, + Object: perfObjects, + query: &FakePerformanceQuery{ + counters: createCounterMap(cps1, []float64{1.1, 1.2, 1.3}, []uint32{0, 0, 0}), + expandPaths: map[string][]string{ + cps1[0]: {cps1[0]}, + cps1[1]: {cps1[1]}, + cps1[2]: {cps1[2]}, + }, + vistaAndNewer: true, + }} err = m.query.Open() require.NoError(t, err) err = m.ParseConfig() @@ -381,15 +403,19 @@ func TestParseConfigInvalidCounterNoError(t *testing.T) { var err error perfObjects := createPerfObject("m", "O", []string{"I1", "I2"}, []string{"C1", "C2"}, false, false) cps1 := []string{"\\O(I1)\\C2", "\\O(I2)\\C1", "\\O(I2)\\C2"} - m := Win_PerfCounters{PrintValid: false, Object: perfObjects, query: &FakePerformanceQuery{ - counters: createCounterMap(cps1, []float64{1.1, 1.2, 1.3}, []uint32{0, 0, 0}), - expandPaths: map[string][]string{ - cps1[0]: {cps1[0]}, - cps1[1]: {cps1[1]}, - cps1[2]: {cps1[2]}, - }, - vistaAndNewer: true, - }} + m := Win_PerfCounters{ + Log: testutil.Logger{}, + PrintValid: false, + Object: perfObjects, + query: &FakePerformanceQuery{ + counters: createCounterMap(cps1, []float64{1.1, 1.2, 1.3}, []uint32{0, 0, 0}), + expandPaths: map[string][]string{ + cps1[0]: {cps1[0]}, + cps1[1]: {cps1[1]}, + cps1[2]: {cps1[2]}, + }, + vistaAndNewer: true, + }} err = m.query.Open() require.NoError(t, err) err = m.ParseConfig() @@ -413,13 +439,18 @@ func TestParseConfigTotalExpansion(t *testing.T) { var err error perfObjects := createPerfObject("m", "O", []string{"*"}, []string{"*"}, true, true) cps1 := []string{"\\O(I1)\\C1", "\\O(I1)\\C2", "\\O(_Total)\\C1", "\\O(_Total)\\C2"} - m := Win_PerfCounters{PrintValid: false, UseWildcardsExpansion: true, Object: perfObjects, query: &FakePerformanceQuery{ - counters: createCounterMap(append(cps1, "\\O(*)\\*"), []float64{1.1, 1.2, 1.3, 1.4, 0}, []uint32{0, 0, 0, 0, 0}), - expandPaths: map[string][]string{ - "\\O(*)\\*": cps1, - }, - vistaAndNewer: true, - }} + m := Win_PerfCounters{ + Log: testutil.Logger{}, + PrintValid: false, + UseWildcardsExpansion: true, + Object: perfObjects, + query: &FakePerformanceQuery{ + counters: createCounterMap(append(cps1, "\\O(*)\\*"), []float64{1.1, 1.2, 1.3, 1.4, 0}, []uint32{0, 0, 0, 0, 0}), + expandPaths: map[string][]string{ + "\\O(*)\\*": cps1, + }, + vistaAndNewer: true, + }} err = m.query.Open() require.NoError(t, err) err = m.ParseConfig() @@ -430,13 +461,18 @@ func TestParseConfigTotalExpansion(t *testing.T) { perfObjects[0].IncludeTotal = false - m = Win_PerfCounters{PrintValid: false, UseWildcardsExpansion: true, Object: perfObjects, query: &FakePerformanceQuery{ - counters: createCounterMap(append(cps1, "\\O(*)\\*"), []float64{1.1, 1.2, 1.3, 1.4, 0}, []uint32{0, 0, 0, 0, 0}), - expandPaths: map[string][]string{ - "\\O(*)\\*": cps1, - }, - vistaAndNewer: true, - }} + m = Win_PerfCounters{ + Log: testutil.Logger{}, + PrintValid: false, + UseWildcardsExpansion: true, + Object: perfObjects, + query: &FakePerformanceQuery{ + counters: createCounterMap(append(cps1, "\\O(*)\\*"), []float64{1.1, 1.2, 1.3, 1.4, 0}, []uint32{0, 0, 0, 0, 0}), + expandPaths: map[string][]string{ + "\\O(*)\\*": cps1, + }, + vistaAndNewer: true, + }} err = m.query.Open() require.NoError(t, err) err = m.ParseConfig() @@ -450,13 +486,18 @@ func TestParseConfigExpand(t *testing.T) { var err error perfObjects := createPerfObject("m", "O", []string{"*"}, []string{"*"}, false, false) cps1 := []string{"\\O(I1)\\C1", "\\O(I1)\\C2", "\\O(I2)\\C1", "\\O(I2)\\C2"} - m := Win_PerfCounters{PrintValid: false, UseWildcardsExpansion: true, Object: perfObjects, query: &FakePerformanceQuery{ - counters: createCounterMap(append(cps1, "\\O(*)\\*"), []float64{1.1, 1.2, 1.3, 1.4, 0}, []uint32{0, 0, 0, 0, 0}), - expandPaths: map[string][]string{ - "\\O(*)\\*": cps1, - }, - vistaAndNewer: true, - }} + m := Win_PerfCounters{ + Log: testutil.Logger{}, + PrintValid: false, + UseWildcardsExpansion: true, + Object: perfObjects, + query: &FakePerformanceQuery{ + counters: createCounterMap(append(cps1, "\\O(*)\\*"), []float64{1.1, 1.2, 1.3, 1.4, 0}, []uint32{0, 0, 0, 0, 0}), + expandPaths: map[string][]string{ + "\\O(*)\\*": cps1, + }, + vistaAndNewer: true, + }} err = m.query.Open() require.NoError(t, err) err = m.ParseConfig() @@ -474,13 +515,17 @@ func TestSimpleGather(t *testing.T) { measurement := "test" perfObjects := createPerfObject(measurement, "O", []string{"I"}, []string{"C"}, false, false) cp1 := "\\O(I)\\C" - m := Win_PerfCounters{PrintValid: false, Object: perfObjects, query: &FakePerformanceQuery{ - counters: createCounterMap([]string{cp1}, []float64{1.2}, []uint32{0}), - expandPaths: map[string][]string{ - cp1: {cp1}, - }, - vistaAndNewer: false, - }} + m := Win_PerfCounters{ + Log: testutil.Logger{}, + PrintValid: false, + Object: perfObjects, + query: &FakePerformanceQuery{ + counters: createCounterMap([]string{cp1}, []float64{1.2}, []uint32{0}), + expandPaths: map[string][]string{ + cp1: {cp1}, + }, + vistaAndNewer: false, + }} var acc1 testutil.Accumulator err = m.Gather(&acc1) require.NoError(t, err) @@ -513,13 +558,17 @@ func TestSimpleGatherNoData(t *testing.T) { measurement := "test" perfObjects := createPerfObject(measurement, "O", []string{"I"}, []string{"C"}, false, false) cp1 := "\\O(I)\\C" - m := Win_PerfCounters{PrintValid: false, Object: perfObjects, query: &FakePerformanceQuery{ - counters: createCounterMap([]string{cp1}, []float64{1.2}, []uint32{PDH_NO_DATA}), - expandPaths: map[string][]string{ - cp1: {cp1}, - }, - vistaAndNewer: false, - }} + m := Win_PerfCounters{ + Log: testutil.Logger{}, + PrintValid: false, + Object: perfObjects, + query: &FakePerformanceQuery{ + counters: createCounterMap([]string{cp1}, []float64{1.2}, []uint32{PDH_NO_DATA}), + expandPaths: map[string][]string{ + cp1: {cp1}, + }, + vistaAndNewer: false, + }} var acc1 testutil.Accumulator err = m.Gather(&acc1) // this "PDH_NO_DATA" error should not be returned to caller, but checked, and handled @@ -555,13 +604,18 @@ func TestSimpleGatherWithTimestamp(t *testing.T) { measurement := "test" perfObjects := createPerfObject(measurement, "O", []string{"I"}, []string{"C"}, false, false) cp1 := "\\O(I)\\C" - m := Win_PerfCounters{PrintValid: false, UsePerfCounterTime: true, Object: perfObjects, query: &FakePerformanceQuery{ - counters: createCounterMap([]string{cp1}, []float64{1.2}, []uint32{0}), - expandPaths: map[string][]string{ - cp1: {cp1}, - }, - vistaAndNewer: true, - }} + m := Win_PerfCounters{ + Log: testutil.Logger{}, + PrintValid: false, + UsePerfCounterTime: true, + Object: perfObjects, + query: &FakePerformanceQuery{ + counters: createCounterMap([]string{cp1}, []float64{1.2}, []uint32{0}), + expandPaths: map[string][]string{ + cp1: {cp1}, + }, + vistaAndNewer: true, + }} var acc1 testutil.Accumulator err = m.Gather(&acc1) require.NoError(t, err) @@ -586,13 +640,17 @@ func TestGatherError(t *testing.T) { measurement := "test" perfObjects := createPerfObject(measurement, "O", []string{"I"}, []string{"C"}, false, false) cp1 := "\\O(I)\\C" - m := Win_PerfCounters{PrintValid: false, Object: perfObjects, query: &FakePerformanceQuery{ - counters: createCounterMap([]string{cp1}, []float64{-2}, []uint32{PDH_PLA_VALIDATION_WARNING}), - expandPaths: map[string][]string{ - cp1: {cp1}, - }, - vistaAndNewer: false, - }} + m := Win_PerfCounters{ + Log: testutil.Logger{}, + PrintValid: false, + Object: perfObjects, + query: &FakePerformanceQuery{ + counters: createCounterMap([]string{cp1}, []float64{-2}, []uint32{PDH_PLA_VALIDATION_WARNING}), + expandPaths: map[string][]string{ + cp1: {cp1}, + }, + vistaAndNewer: false, + }} var acc1 testutil.Accumulator err = m.Gather(&acc1) require.Error(t, err) @@ -617,15 +675,19 @@ func TestGatherInvalidDataIgnore(t *testing.T) { measurement := "test" perfObjects := createPerfObject(measurement, "O", []string{"I"}, []string{"C1", "C2", "C3"}, false, false) cps1 := []string{"\\O(I)\\C1", "\\O(I)\\C2", "\\O(I)\\C3"} - m := Win_PerfCounters{PrintValid: false, Object: perfObjects, query: &FakePerformanceQuery{ - counters: createCounterMap(cps1, []float64{1.2, 1, 0}, []uint32{0, PDH_INVALID_DATA, 0}), - expandPaths: map[string][]string{ - cps1[0]: {cps1[0]}, - cps1[1]: {cps1[1]}, - cps1[2]: {cps1[2]}, - }, - vistaAndNewer: false, - }} + m := Win_PerfCounters{ + Log: testutil.Logger{}, + PrintValid: false, + Object: perfObjects, + query: &FakePerformanceQuery{ + counters: createCounterMap(cps1, []float64{1.2, 1, 0}, []uint32{0, PDH_INVALID_DATA, 0}), + expandPaths: map[string][]string{ + cps1[0]: {cps1[0]}, + cps1[1]: {cps1[1]}, + cps1[2]: {cps1[2]}, + }, + vistaAndNewer: false, + }} var acc1 testutil.Accumulator err = m.Gather(&acc1) require.NoError(t, err) @@ -666,7 +728,14 @@ func TestGatherRefreshingWithExpansion(t *testing.T) { }, vistaAndNewer: true, } - m := Win_PerfCounters{PrintValid: false, Object: perfObjects, UseWildcardsExpansion: true, query: fpm, CountersRefreshInterval: internal.Duration{Duration: time.Second * 10}} + m := Win_PerfCounters{ + Log: testutil.Logger{}, + PrintValid: false, + Object: perfObjects, + UseWildcardsExpansion: true, + query: fpm, + CountersRefreshInterval: internal.Duration{Duration: time.Second * 10}, + } var acc1 testutil.Accumulator err = m.Gather(&acc1) assert.Len(t, m.counters, 4) @@ -752,7 +821,13 @@ func TestGatherRefreshingWithoutExpansion(t *testing.T) { }, vistaAndNewer: true, } - m := Win_PerfCounters{PrintValid: false, Object: perfObjects, UseWildcardsExpansion: false, query: fpm, CountersRefreshInterval: internal.Duration{Duration: time.Second * 10}} + m := Win_PerfCounters{ + Log: testutil.Logger{}, + PrintValid: false, + Object: perfObjects, + UseWildcardsExpansion: false, + query: fpm, + CountersRefreshInterval: internal.Duration{Duration: time.Second * 10}} var acc1 testutil.Accumulator err = m.Gather(&acc1) assert.Len(t, m.counters, 2) @@ -862,14 +937,19 @@ func TestGatherTotalNoExpansion(t *testing.T) { measurement := "m" perfObjects := createPerfObject(measurement, "O", []string{"*"}, []string{"C1", "C2"}, true, true) cps1 := []string{"\\O(I1)\\C1", "\\O(I1)\\C2", "\\O(_Total)\\C1", "\\O(_Total)\\C2"} - m := Win_PerfCounters{PrintValid: false, UseWildcardsExpansion: false, Object: perfObjects, query: &FakePerformanceQuery{ - counters: createCounterMap(append([]string{"\\O(*)\\C1", "\\O(*)\\C2"}, cps1...), []float64{0, 0, 1.1, 1.2, 1.3, 1.4}, []uint32{0, 0, 0, 0, 0, 0}), - expandPaths: map[string][]string{ - "\\O(*)\\C1": {cps1[0], cps1[2]}, - "\\O(*)\\C2": {cps1[1], cps1[3]}, - }, - vistaAndNewer: true, - }} + m := Win_PerfCounters{ + Log: testutil.Logger{}, + PrintValid: false, + UseWildcardsExpansion: false, + Object: perfObjects, + query: &FakePerformanceQuery{ + counters: createCounterMap(append([]string{"\\O(*)\\C1", "\\O(*)\\C2"}, cps1...), []float64{0, 0, 1.1, 1.2, 1.3, 1.4}, []uint32{0, 0, 0, 0, 0, 0}), + expandPaths: map[string][]string{ + "\\O(*)\\C1": {cps1[0], cps1[2]}, + "\\O(*)\\C2": {cps1[1], cps1[3]}, + }, + vistaAndNewer: true, + }} var acc1 testutil.Accumulator err = m.Gather(&acc1) require.NoError(t, err) diff --git a/plugins/inputs/win_services/win_services.go b/plugins/inputs/win_services/win_services.go index 1befc4a60..6ac1bde68 100644 --- a/plugins/inputs/win_services/win_services.go +++ b/plugins/inputs/win_services/win_services.go @@ -4,7 +4,6 @@ package win_services import ( "fmt" - "log" "os" "github.com/influxdata/telegraf" @@ -90,6 +89,8 @@ var description = "Input plugin to report Windows services info." //WinServices is an implementation if telegraf.Input interface, providing info about Windows Services type WinServices struct { + Log telegraf.Logger + ServiceNames []string `toml:"service_names"` mgrProvider ManagerProvider } @@ -125,9 +126,9 @@ func (m *WinServices) Gather(acc telegraf.Accumulator) error { service, err := collectServiceInfo(scmgr, srvName) if err != nil { if IsPermission(err) { - log.Printf("D! Error in plugin [inputs.win_services]: %v", err) + m.Log.Debug(err.Error()) } else { - acc.AddError(err) + m.Log.Error(err.Error()) } continue } diff --git a/plugins/inputs/win_services/win_services_integration_test.go b/plugins/inputs/win_services/win_services_integration_test.go index a39df49c7..0c375c3dd 100644 --- a/plugins/inputs/win_services/win_services_integration_test.go +++ b/plugins/inputs/win_services/win_services_integration_test.go @@ -47,7 +47,11 @@ func TestGatherErrors(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } - ws := &WinServices{InvalidServices, &MgProvider{}} + ws := &WinServices{ + Log: testutil.Logger{}, + ServiceNames: InvalidServices, + mgrProvider: &MgProvider{}, + } require.Len(t, ws.ServiceNames, 3, "Different number of services") var acc testutil.Accumulator require.NoError(t, ws.Gather(&acc)) diff --git a/plugins/inputs/win_services/win_services_test.go b/plugins/inputs/win_services/win_services_test.go index 37dc3f08c..e33ab2ddc 100644 --- a/plugins/inputs/win_services/win_services_test.go +++ b/plugins/inputs/win_services/win_services_test.go @@ -3,8 +3,10 @@ package win_services import ( + "bytes" "errors" "fmt" + "log" "testing" "github.com/influxdata/telegraf/testutil" @@ -128,47 +130,51 @@ var testErrors = []testData{ func TestBasicInfo(t *testing.T) { - winServices := &WinServices{nil, &FakeMgProvider{testErrors[0]}} + winServices := &WinServices{testutil.Logger{}, nil, &FakeMgProvider{testErrors[0]}} assert.NotEmpty(t, winServices.SampleConfig()) assert.NotEmpty(t, winServices.Description()) } func TestMgrErrors(t *testing.T) { //mgr.connect error - winServices := &WinServices{nil, &FakeMgProvider{testErrors[0]}} + winServices := &WinServices{testutil.Logger{}, nil, &FakeMgProvider{testErrors[0]}} var acc1 testutil.Accumulator err := winServices.Gather(&acc1) require.Error(t, err) assert.Contains(t, err.Error(), testErrors[0].mgrConnectError.Error()) ////mgr.listServices error - winServices = &WinServices{nil, &FakeMgProvider{testErrors[1]}} + winServices = &WinServices{testutil.Logger{}, nil, &FakeMgProvider{testErrors[1]}} var acc2 testutil.Accumulator err = winServices.Gather(&acc2) require.Error(t, err) assert.Contains(t, err.Error(), testErrors[1].mgrListServicesError.Error()) ////mgr.listServices error 2 - winServices = &WinServices{[]string{"Fake service 1"}, &FakeMgProvider{testErrors[3]}} + winServices = &WinServices{testutil.Logger{}, []string{"Fake service 1"}, &FakeMgProvider{testErrors[3]}} var acc3 testutil.Accumulator - err = winServices.Gather(&acc3) - require.NoError(t, err) - assert.Len(t, acc3.Errors, 1) + buf := &bytes.Buffer{} + log.SetOutput(buf) + require.NoError(t, winServices.Gather(&acc3)) + + require.Contains(t, buf.String(), testErrors[2].services[0].serviceOpenError.Error()) } func TestServiceErrors(t *testing.T) { - winServices := &WinServices{nil, &FakeMgProvider{testErrors[2]}} + winServices := &WinServices{testutil.Logger{}, nil, &FakeMgProvider{testErrors[2]}} var acc1 testutil.Accumulator - require.NoError(t, winServices.Gather(&acc1)) - assert.Len(t, acc1.Errors, 3) - //open service error - assert.Contains(t, acc1.Errors[0].Error(), testErrors[2].services[0].serviceOpenError.Error()) - //query service error - assert.Contains(t, acc1.Errors[1].Error(), testErrors[2].services[1].serviceQueryError.Error()) - //config service error - assert.Contains(t, acc1.Errors[2].Error(), testErrors[2].services[2].serviceConfigError.Error()) + buf := &bytes.Buffer{} + log.SetOutput(buf) + require.NoError(t, winServices.Gather(&acc1)) + + //open service error + require.Contains(t, buf.String(), testErrors[2].services[0].serviceOpenError.Error()) + //query service error + require.Contains(t, buf.String(), testErrors[2].services[1].serviceQueryError.Error()) + //config service error + require.Contains(t, buf.String(), testErrors[2].services[2].serviceConfigError.Error()) } var testSimpleData = []testData{ @@ -179,7 +185,7 @@ var testSimpleData = []testData{ } func TestGather2(t *testing.T) { - winServices := &WinServices{nil, &FakeMgProvider{testSimpleData[0]}} + winServices := &WinServices{testutil.Logger{}, nil, &FakeMgProvider{testSimpleData[0]}} var acc1 testutil.Accumulator require.NoError(t, winServices.Gather(&acc1)) assert.Len(t, acc1.Errors, 0, "There should be no errors after gather") @@ -193,5 +199,4 @@ func TestGather2(t *testing.T) { tags["display_name"] = s.displayName acc1.AssertContainsTaggedFields(t, "win_services", fields, tags) } - } diff --git a/plugins/inputs/zipkin/zipkin.go b/plugins/inputs/zipkin/zipkin.go index 18a63dccd..4224fea3d 100644 --- a/plugins/inputs/zipkin/zipkin.go +++ b/plugins/inputs/zipkin/zipkin.go @@ -3,7 +3,6 @@ package zipkin import ( "context" "fmt" - "log" "net" "net/http" "strconv" @@ -60,6 +59,8 @@ type Zipkin struct { Port int Path string + Log telegraf.Logger + address string handler Handler server *http.Server @@ -105,7 +106,7 @@ func (z *Zipkin) Start(acc telegraf.Accumulator) error { } z.address = ln.Addr().String() - log.Printf("I! Started the zipkin listener on %s", z.address) + z.Log.Infof("Started the zipkin listener on %s", z.address) go func() { wg.Add(1) diff --git a/plugins/inputs/zipkin/zipkin_test.go b/plugins/inputs/zipkin/zipkin_test.go index 2ac269db1..c022b6055 100644 --- a/plugins/inputs/zipkin/zipkin_test.go +++ b/plugins/inputs/zipkin/zipkin_test.go @@ -562,6 +562,7 @@ func TestZipkinPlugin(t *testing.T) { DefaultNetwork = "tcp4" z := &Zipkin{ + Log: testutil.Logger{}, Path: "/api/v1/spans", Port: 0, } diff --git a/plugins/outputs/influxdb/http.go b/plugins/outputs/influxdb/http.go index 9497cadcc..7d26ddeb5 100644 --- a/plugins/outputs/influxdb/http.go +++ b/plugins/outputs/influxdb/http.go @@ -265,7 +265,7 @@ func (c *httpClient) Write(ctx context.Context, metrics []telegraf.Metric) error if !c.config.SkipDatabaseCreation && !c.createdDatabases[db] { err := c.CreateDatabase(ctx, db) if err != nil { - c.log.Warnf("when writing to [%s]: database %q creation failed: %v", + c.log.Warnf("When writing to [%s]: database %q creation failed: %v", c.config.URL, db, err) } } @@ -331,7 +331,7 @@ func (c *httpClient) writeBatch(ctx context.Context, db string, metrics []telegr // discarded for being older than the retention policy. Usually this not // a cause for concern and we don't want to retry. if strings.Contains(desc, errStringPointsBeyondRP) { - c.log.Warnf("when writing to [%s]: received error %v", + c.log.Warnf("When writing to [%s]: received error %v", c.URL(), desc) return nil } @@ -340,7 +340,7 @@ func (c *httpClient) writeBatch(ctx context.Context, db string, metrics []telegr // correctable at this point and so the point is dropped instead of // retrying. if strings.Contains(desc, errStringPartialWrite) { - c.log.Errorf("when writing to [%s]: received error %v; discarding points", + c.log.Errorf("When writing to [%s]: received error %v; discarding points", c.URL(), desc) return nil } @@ -348,7 +348,7 @@ func (c *httpClient) writeBatch(ctx context.Context, db string, metrics []telegr // This error indicates a bug in either Telegraf line protocol // serialization, retries would not be successful. if strings.Contains(desc, errStringUnableToParse) { - c.log.Errorf("when writing to [%s]: received error %v; discarding points", + c.log.Errorf("When writing to [%s]: received error %v; discarding points", c.URL(), desc) return nil } diff --git a/plugins/outputs/influxdb/influxdb.go b/plugins/outputs/influxdb/influxdb.go index 6af6dc173..01a09208a 100644 --- a/plugins/outputs/influxdb/influxdb.go +++ b/plugins/outputs/influxdb/influxdb.go @@ -221,13 +221,13 @@ func (i *InfluxDB) Write(metrics []telegraf.Metric) error { if !i.SkipDatabaseCreation { err := client.CreateDatabase(ctx, apiError.Database) if err != nil { - i.Log.Errorf("when writing to [%s]: database %q not found and failed to recreate", + i.Log.Errorf("When writing to [%s]: database %q not found and failed to recreate", client.URL(), apiError.Database) } } } - i.Log.Errorf("when writing to [%s]: %v", client.URL(), err) + i.Log.Errorf("When writing to [%s]: %v", client.URL(), err) } return errors.New("could not write any address") @@ -283,7 +283,7 @@ func (i *InfluxDB) httpClient(ctx context.Context, url *url.URL, proxy *url.URL) if !i.SkipDatabaseCreation { err = c.CreateDatabase(ctx, c.Database()) if err != nil { - i.Log.Warnf("when writing to [%s]: database %q creation failed: %v", + i.Log.Warnf("When writing to [%s]: database %q creation failed: %v", c.URL(), i.Database, err) } } diff --git a/plugins/outputs/influxdb/udp.go b/plugins/outputs/influxdb/udp.go index a50516c97..0add3c6c3 100644 --- a/plugins/outputs/influxdb/udp.go +++ b/plugins/outputs/influxdb/udp.go @@ -95,7 +95,7 @@ func (c *udpClient) Write(ctx context.Context, metrics []telegraf.Metric) error if err != nil { // Since we are serializing multiple metrics, don't fail the // entire batch just because of one unserializable metric. - c.log.Errorf("when writing to [%s] could not serialize metric: %v", + c.log.Errorf("When writing to [%s] could not serialize metric: %v", c.URL(), err) continue } From 00d9b842340066dc78038437762bf5628a5d2c27 Mon Sep 17 00:00:00 2001 From: Steven Barth Date: Tue, 24 Sep 2019 20:05:56 +0200 Subject: [PATCH 1206/1815] Fix path handling issues in cisco_telemetry_gnmi (#6403) - Avoid crashing when a field has no value or one of deprecated type - Emit measurement names correctly for replies with empty origin - Skip paths with empty names instead of adding a '/' --- .../cisco_telemetry_gnmi.go | 29 ++++++++++++++----- 1 file changed, 22 insertions(+), 7 deletions(-) diff --git a/plugins/inputs/cisco_telemetry_gnmi/cisco_telemetry_gnmi.go b/plugins/inputs/cisco_telemetry_gnmi/cisco_telemetry_gnmi.go index 75a073bb6..cb946eebf 100644 --- a/plugins/inputs/cisco_telemetry_gnmi/cisco_telemetry_gnmi.go +++ b/plugins/inputs/cisco_telemetry_gnmi/cisco_telemetry_gnmi.go @@ -8,6 +8,7 @@ import ( "fmt" "io" "net" + "path" "strings" "sync" "time" @@ -102,21 +103,27 @@ func (c *CiscoTelemetryGNMI) Start(acc telegraf.Accumulator) error { // Invert explicit alias list and prefill subscription names c.aliases = make(map[string]string, len(c.Subscriptions)+len(c.Aliases)) for _, subscription := range c.Subscriptions { + var gnmiLongPath, gnmiShortPath *gnmi.Path + // Build the subscription path without keys - gnmiPath, err := parsePath(subscription.Origin, subscription.Path, "") - if err != nil { + if gnmiLongPath, err = parsePath(subscription.Origin, subscription.Path, ""); err != nil { + return err + } + if gnmiShortPath, err = parsePath("", subscription.Path, ""); err != nil { return err } - path, _ := c.handlePath(gnmiPath, nil, "") + longPath, _ := c.handlePath(gnmiLongPath, nil, "") + shortPath, _ := c.handlePath(gnmiShortPath, nil, "") name := subscription.Name // If the user didn't provide a measurement name, use last path element if len(name) == 0 { - name = path[strings.LastIndexByte(path, '/')+1:] + name = path.Base(shortPath) } if len(name) > 0 { - c.aliases[path] = name + c.aliases[longPath] = name + c.aliases[shortPath] = name } } for alias, path := range c.Aliases { @@ -296,6 +303,12 @@ func (c *CiscoTelemetryGNMI) handleTelemetryField(update *gnmi.Update, tags map[ var value interface{} var jsondata []byte + // Make sure a value is actually set + if update.Val == nil || update.Val.Value == nil { + log.Printf("I! [inputs.cisco_telemetry_gnmi]: Discarded empty or legacy type value with path: %s", path) + return aliasPath, nil + } + switch val := update.Val.Value.(type) { case *gnmi.TypedValue_AsciiVal: value = val.AsciiVal @@ -347,8 +360,10 @@ func (c *CiscoTelemetryGNMI) handlePath(path *gnmi.Path, tags map[string]string, // Parse generic keys from prefix for _, elem := range path.Elem { - builder.WriteRune('/') - builder.WriteString(elem.Name) + if len(elem.Name) > 0 { + builder.WriteRune('/') + builder.WriteString(elem.Name) + } name := builder.String() if _, exists := c.aliases[name]; exists { From 54b83361e822e437420820f6affcbceaf659cfc6 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 24 Sep 2019 11:09:25 -0700 Subject: [PATCH 1207/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3a6e7fffb..dd5158939 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -22,6 +22,7 @@ - [#6394](https://github.com/influxdata/telegraf/issues/6394): Fix parsing of BATTDATE in apcupsd input. - [#6398](https://github.com/influxdata/telegraf/issues/6398): Keep boolean values listed in json_string_fields. - [#6393](https://github.com/influxdata/telegraf/issues/6393): Disable Go plugin support in official builds. +- [#6391](https://github.com/influxdata/telegraf/issues/6391): Fix path handling issues in cisco_telemetry_gnmi. ## v1.12.1 [2019-09-10] From 3cf5b86aee18bbbf132e32b8ed613772a020ec11 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 24 Sep 2019 11:17:43 -0700 Subject: [PATCH 1208/1815] Use new log style in cisco_telemetry_gnmi --- plugins/inputs/cisco_telemetry_gnmi/cisco_telemetry_gnmi.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/inputs/cisco_telemetry_gnmi/cisco_telemetry_gnmi.go b/plugins/inputs/cisco_telemetry_gnmi/cisco_telemetry_gnmi.go index cb946eebf..38297b976 100644 --- a/plugins/inputs/cisco_telemetry_gnmi/cisco_telemetry_gnmi.go +++ b/plugins/inputs/cisco_telemetry_gnmi/cisco_telemetry_gnmi.go @@ -305,7 +305,7 @@ func (c *CiscoTelemetryGNMI) handleTelemetryField(update *gnmi.Update, tags map[ // Make sure a value is actually set if update.Val == nil || update.Val.Value == nil { - log.Printf("I! [inputs.cisco_telemetry_gnmi]: Discarded empty or legacy type value with path: %s", path) + c.Log.Infof("Discarded empty or legacy type value with path: %q", path) return aliasPath, nil } From aef93fd1c6bf73b9d1f08fc04508eb0a678062ce Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 24 Sep 2019 13:54:28 -0700 Subject: [PATCH 1209/1815] Set release date for 1.12.2 --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index dd5158939..f3a07b6f5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,7 +14,7 @@ - [#6415](https://github.com/influxdata/telegraf/pull/6415): Allow graphite parser to create Inf and NaN values. - [#6434](https://github.com/influxdata/telegraf/pull/6434): Use prefix base detection for ints in grok parser. -## v1.12.2 [unreleased] +## v1.12.2 [2019-09-24] #### Bugfixes From 62c6e30a78476f4bfd86733a98bc4c1c6bbe06c0 Mon Sep 17 00:00:00 2001 From: Randy Coburn Date: Fri, 27 Sep 2019 01:14:54 +0200 Subject: [PATCH 1210/1815] Use batch serialization format in exec output (#6446) --- plugins/outputs/exec/exec.go | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/plugins/outputs/exec/exec.go b/plugins/outputs/exec/exec.go index 583646bb5..474c96791 100644 --- a/plugins/outputs/exec/exec.go +++ b/plugins/outputs/exec/exec.go @@ -67,13 +67,11 @@ func (e *Exec) SampleConfig() string { // Write writes the metrics to the configured command. func (e *Exec) Write(metrics []telegraf.Metric) error { var buffer bytes.Buffer - for _, metric := range metrics { - value, err := e.serializer.Serialize(metric) - if err != nil { - return err - } - buffer.Write(value) + serializedMetrics, err := e.serializer.SerializeBatch(metrics) + if err != nil { + return err } + buffer.Write(serializedMetrics) if buffer.Len() <= 0 { return nil From 86539515b88e76ff87baeacc7bac05aa7740c319 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 26 Sep 2019 16:17:25 -0700 Subject: [PATCH 1211/1815] Update changelog --- CHANGELOG.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index f3a07b6f5..7bbfce4bd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,6 +14,12 @@ - [#6415](https://github.com/influxdata/telegraf/pull/6415): Allow graphite parser to create Inf and NaN values. - [#6434](https://github.com/influxdata/telegraf/pull/6434): Use prefix base detection for ints in grok parser. +## v1.12.3 [unreleased] + +#### Bugfixes + +- [#6445](https://github.com/influxdata/telegraf/issues/6445): Use batch serialization format in exec output. + ## v1.12.2 [2019-09-24] #### Bugfixes From fc6fb330678af46a778b73b9f018fda8872ce280 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 26 Sep 2019 17:09:44 -0700 Subject: [PATCH 1212/1815] Add merge aggregator (#6410) --- plugins/aggregators/all/all.go | 1 + plugins/aggregators/merge/README.md | 23 +++ plugins/aggregators/merge/merge.go | 62 ++++++++ plugins/aggregators/merge/merge_test.go | 186 ++++++++++++++++++++++++ 4 files changed, 272 insertions(+) create mode 100644 plugins/aggregators/merge/README.md create mode 100644 plugins/aggregators/merge/merge.go create mode 100644 plugins/aggregators/merge/merge_test.go diff --git a/plugins/aggregators/all/all.go b/plugins/aggregators/all/all.go index ec04c0aaf..eabfaa4bf 100644 --- a/plugins/aggregators/all/all.go +++ b/plugins/aggregators/all/all.go @@ -4,6 +4,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/aggregators/basicstats" _ "github.com/influxdata/telegraf/plugins/aggregators/final" _ "github.com/influxdata/telegraf/plugins/aggregators/histogram" + _ "github.com/influxdata/telegraf/plugins/aggregators/merge" _ "github.com/influxdata/telegraf/plugins/aggregators/minmax" _ "github.com/influxdata/telegraf/plugins/aggregators/valuecounter" ) diff --git a/plugins/aggregators/merge/README.md b/plugins/aggregators/merge/README.md new file mode 100644 index 000000000..58fa47bbd --- /dev/null +++ b/plugins/aggregators/merge/README.md @@ -0,0 +1,23 @@ +# Merge Aggregator + +Merge metrics together into a metric with multiple fields into the most memory +and network transfer efficient form. + +Use this plugin when fields are split over multiple metrics, with the same +measurement, tag set and timestamp. By merging into a single metric they can +be handled more efficiently by the output. + +### Configuration + +```toml +[[aggregators.merge]] + # no configuration +``` + +### Example + +```diff +- cpu,host=localhost usage_time=42 1567562620000000000 +- cpu,host=localhost idle_time=42 1567562620000000000 ++ cpu,host=localhost idle_time=42,usage_time=42 1567562620000000000 +``` diff --git a/plugins/aggregators/merge/merge.go b/plugins/aggregators/merge/merge.go new file mode 100644 index 000000000..6a1e82911 --- /dev/null +++ b/plugins/aggregators/merge/merge.go @@ -0,0 +1,62 @@ +package seriesgrouper + +import ( + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" + "github.com/influxdata/telegraf/plugins/aggregators" +) + +const ( + description = "Merge metrics into multifield metrics by series key" + sampleConfig = "" +) + +type Merge struct { + grouper *metric.SeriesGrouper + log telegraf.Logger +} + +func (a *Merge) Init() error { + a.grouper = metric.NewSeriesGrouper() + return nil +} + +func (a *Merge) Description() string { + return description +} + +func (a *Merge) SampleConfig() string { + return sampleConfig +} + +func (a *Merge) Add(m telegraf.Metric) { + tags := m.Tags() + for _, field := range m.FieldList() { + err := a.grouper.Add(m.Name(), tags, m.Time(), field.Key, field.Value) + if err != nil { + a.log.Errorf("Error adding metric: %v", err) + } + } +} + +func (a *Merge) Push(acc telegraf.Accumulator) { + // Always use nanosecond precision to avoid rounding metrics that were + // produced at a precision higher than the agent default. + acc.SetPrecision(time.Nanosecond) + + for _, m := range a.grouper.Metrics() { + acc.AddMetric(m) + } +} + +func (a *Merge) Reset() { + a.grouper = metric.NewSeriesGrouper() +} + +func init() { + aggregators.Add("merge", func() telegraf.Aggregator { + return &Merge{} + }) +} diff --git a/plugins/aggregators/merge/merge_test.go b/plugins/aggregators/merge/merge_test.go new file mode 100644 index 000000000..2f2703c8f --- /dev/null +++ b/plugins/aggregators/merge/merge_test.go @@ -0,0 +1,186 @@ +package seriesgrouper + +import ( + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +func TestSimple(t *testing.T) { + plugin := &Merge{} + + err := plugin.Init() + require.NoError(t, err) + + plugin.Add( + testutil.MustMetric( + "cpu", + map[string]string{ + "cpu": "cpu0", + }, + map[string]interface{}{ + "time_idle": 42, + }, + time.Unix(0, 0), + ), + ) + require.NoError(t, err) + + plugin.Add( + testutil.MustMetric( + "cpu", + map[string]string{ + "cpu": "cpu0", + }, + map[string]interface{}{ + "time_guest": 42, + }, + time.Unix(0, 0), + ), + ) + require.NoError(t, err) + + var acc testutil.Accumulator + plugin.Push(&acc) + + expected := []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{ + "cpu": "cpu0", + }, + map[string]interface{}{ + "time_idle": 42, + "time_guest": 42, + }, + time.Unix(0, 0), + ), + } + + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics()) +} + +func TestNanosecondPrecision(t *testing.T) { + plugin := &Merge{} + + err := plugin.Init() + require.NoError(t, err) + + plugin.Add( + testutil.MustMetric( + "cpu", + map[string]string{ + "cpu": "cpu0", + }, + map[string]interface{}{ + "time_idle": 42, + }, + time.Unix(0, 1), + ), + ) + require.NoError(t, err) + + plugin.Add( + testutil.MustMetric( + "cpu", + map[string]string{ + "cpu": "cpu0", + }, + map[string]interface{}{ + "time_guest": 42, + }, + time.Unix(0, 1), + ), + ) + require.NoError(t, err) + + var acc testutil.Accumulator + acc.SetPrecision(time.Second) + plugin.Push(&acc) + + expected := []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{ + "cpu": "cpu0", + }, + map[string]interface{}{ + "time_idle": 42, + "time_guest": 42, + }, + time.Unix(0, 1), + ), + } + + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics()) +} + +func TestReset(t *testing.T) { + plugin := &Merge{} + + err := plugin.Init() + require.NoError(t, err) + + plugin.Add( + testutil.MustMetric( + "cpu", + map[string]string{ + "cpu": "cpu0", + }, + map[string]interface{}{ + "time_idle": 42, + }, + time.Unix(0, 0), + ), + ) + require.NoError(t, err) + + var acc testutil.Accumulator + plugin.Push(&acc) + + plugin.Reset() + + plugin.Add( + testutil.MustMetric( + "cpu", + map[string]string{ + "cpu": "cpu0", + }, + map[string]interface{}{ + "time_guest": 42, + }, + time.Unix(0, 0), + ), + ) + require.NoError(t, err) + + plugin.Push(&acc) + + expected := []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{ + "cpu": "cpu0", + }, + map[string]interface{}{ + "time_idle": 42, + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "cpu", + map[string]string{ + "cpu": "cpu0", + }, + map[string]interface{}{ + "time_guest": 42, + }, + time.Unix(0, 0), + ), + } + + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics()) +} From 31d8d2baa7387b1adb24ac965b37c4b01b109644 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 26 Sep 2019 17:10:50 -0700 Subject: [PATCH 1213/1815] Update changelog --- CHANGELOG.md | 4 ++++ README.md | 1 + 2 files changed, 5 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7bbfce4bd..efe551717 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,10 @@ - [azure_storage_queue](/plugins/inputs/azure_storage_queue/README.md) - Contributed by @mjiderhamn - [suricata](/plugins/inputs/suricata/README.md) - Contributed by @satta +#### New Aggregators + +- [merge](/plugins/aggregators/merge/README.md) - Contributed by @influxdata + #### Features - [#6326](https://github.com/influxdata/telegraf/pull/5842): Add per node memory stats to rabbitmq input. diff --git a/README.md b/README.md index 5f34ebf2a..6601379bd 100644 --- a/README.md +++ b/README.md @@ -354,6 +354,7 @@ For documentation on the latest development code see the [documentation index][d * [basicstats](./plugins/aggregators/basicstats) * [final](./plugins/aggregators/final) * [histogram](./plugins/aggregators/histogram) +* [merge](./plugins/aggregators/merge) * [minmax](./plugins/aggregators/minmax) * [valuecounter](./plugins/aggregators/valuecounter) From 01e948488198e9e33cb27f18b7366f4dc6432f9e Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 27 Sep 2019 16:44:54 -0700 Subject: [PATCH 1214/1815] Use Go 1.12.10 for builds (#6455) --- .circleci/config.yml | 2 +- Makefile | 4 ++-- scripts/ci-1.12.docker | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 2c8713b19..3f02a3386 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -12,7 +12,7 @@ defaults: - image: 'quay.io/influxdb/telegraf-ci:1.11.13' go-1_12: &go-1_12 docker: - - image: 'quay.io/influxdb/telegraf-ci:1.12.9' + - image: 'quay.io/influxdb/telegraf-ci:1.12.10' version: 2 jobs: diff --git a/Makefile b/Makefile index 3c0fb3952..abb107838 100644 --- a/Makefile +++ b/Makefile @@ -131,8 +131,8 @@ plugin-%: .PHONY: ci-1.12 ci-1.12: - docker build -t quay.io/influxdb/telegraf-ci:1.12.9 - < scripts/ci-1.12.docker - docker push quay.io/influxdb/telegraf-ci:1.12.9 + docker build -t quay.io/influxdb/telegraf-ci:1.12.10 - < scripts/ci-1.12.docker + docker push quay.io/influxdb/telegraf-ci:1.12.10 .PHONY: ci-1.11 ci-1.11: diff --git a/scripts/ci-1.12.docker b/scripts/ci-1.12.docker index f5b093413..0572f4641 100644 --- a/scripts/ci-1.12.docker +++ b/scripts/ci-1.12.docker @@ -1,4 +1,4 @@ -FROM golang:1.12.9 +FROM golang:1.12.10 RUN chmod -R 755 "$GOPATH" From 518c09a9f6cce3c7831d343f2da78b085f5cc336 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 27 Sep 2019 16:47:27 -0700 Subject: [PATCH 1215/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index efe551717..7b12bf83f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -23,6 +23,7 @@ #### Bugfixes - [#6445](https://github.com/influxdata/telegraf/issues/6445): Use batch serialization format in exec output. +- [#6455](https://github.com/influxdata/telegraf/issues/6455): Build official packages with Go 1.12.10. ## v1.12.2 [2019-09-24] From fc1b1e8d2090bcf9aca647cf1621fbfb6acc630b Mon Sep 17 00:00:00 2001 From: Gregory Brzeski Date: Mon, 30 Sep 2019 19:41:25 +0200 Subject: [PATCH 1216/1815] Use case insensitive serial numer match in smart input (#6464) --- plugins/inputs/smart/smart.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/inputs/smart/smart.go b/plugins/inputs/smart/smart.go index b69100596..35b44217f 100644 --- a/plugins/inputs/smart/smart.go +++ b/plugins/inputs/smart/smart.go @@ -23,7 +23,7 @@ var ( // Model Number: TS128GMTE850 modelInfo = regexp.MustCompile("^(Device Model|Product|Model Number):\\s+(.*)$") // Serial Number: S0X5NZBC422720 - serialInfo = regexp.MustCompile("^Serial Number:\\s+(.*)$") + serialInfo = regexp.MustCompile("(?i)^Serial Number:\\s+(.*)$") // LU WWN Device Id: 5 002538 655584d30 wwnInfo = regexp.MustCompile("^LU WWN Device Id:\\s+(.*)$") // User Capacity: 251,000,193,024 bytes [251 GB] From 9867fe327955b9d610937823f2539fb7a3f6291b Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 30 Sep 2019 10:42:47 -0700 Subject: [PATCH 1217/1815] Add test case for smart serial number --- plugins/inputs/smart/smart.go | 4 --- plugins/inputs/smart/smart_test.go | 54 +++++++++++++++++++++++++++--- 2 files changed, 50 insertions(+), 8 deletions(-) diff --git a/plugins/inputs/smart/smart.go b/plugins/inputs/smart/smart.go index 35b44217f..c80e86859 100644 --- a/plugins/inputs/smart/smart.go +++ b/plugins/inputs/smart/smart.go @@ -119,7 +119,6 @@ type Smart struct { Devices []string UseSudo bool Timeout internal.Duration - Log telegraf.Logger } var sampleConfig = ` @@ -209,10 +208,7 @@ func (m *Smart) scan() ([]string, error) { for _, line := range strings.Split(string(out), "\n") { dev := strings.Split(line, " ") if len(dev) > 1 && !excludedDev(m.Excludes, strings.TrimSpace(dev[0])) { - m.Log.Debugf("Adding device: %+#v", dev) devices = append(devices, strings.TrimSpace(dev[0])) - } else { - m.Log.Debugf("Skipping device: %+#v", dev) } } return devices, nil diff --git a/plugins/inputs/smart/smart_test.go b/plugins/inputs/smart/smart_test.go index b0085d3fc..615ea9ba6 100644 --- a/plugins/inputs/smart/smart_test.go +++ b/plugins/inputs/smart/smart_test.go @@ -15,7 +15,6 @@ import ( func TestGatherAttributes(t *testing.T) { s := NewSmart() - s.Log = testutil.Logger{} s.Path = "smartctl" s.Attributes = true @@ -331,7 +330,6 @@ func TestGatherAttributes(t *testing.T) { func TestGatherNoAttributes(t *testing.T) { s := NewSmart() - s.Log = testutil.Logger{} s.Path = "smartctl" s.Attributes = false @@ -440,8 +438,56 @@ func TestGatherHtSAS(t *testing.T) { wg.Add(1) gatherDisk(acc, internal.Duration{Duration: time.Second * 30}, true, true, "", "", "", wg) - assert.Equal(t, 5, acc.NFields(), "Wrong number of fields gathered") - assert.Equal(t, uint64(3), acc.NMetrics(), "Wrong number of metrics gathered") + + expected := []telegraf.Metric{ + testutil.MustMetric( + "smart_attribute", + map[string]string{ + "device": ".", + "serial_no": "PDWAR9GE", + "enabled": "Enabled", + "id": "194", + "model": "HUC103030CSS600", + "name": "Temperature_Celsius", + }, + map[string]interface{}{ + "raw_value": 36, + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "smart_attribute", + map[string]string{ + "device": ".", + "serial_no": "PDWAR9GE", + "enabled": "Enabled", + "id": "4", + "model": "HUC103030CSS600", + "name": "Start_Stop_Count", + }, + map[string]interface{}{ + "raw_value": 47, + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "smart_device", + map[string]string{ + "device": ".", + "serial_no": "PDWAR9GE", + "enabled": "Enabled", + "model": "HUC103030CSS600", + }, + map[string]interface{}{ + "exit_status": 0, + "health_ok": true, + "temp_c": 36, + }, + time.Unix(0, 0), + ), + } + + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.SortMetrics(), testutil.IgnoreTime()) } func TestGatherSSD(t *testing.T) { From 07faceadd51d221016fe2dc96d29dbe80522bdb8 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 30 Sep 2019 10:44:12 -0700 Subject: [PATCH 1218/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7b12bf83f..c89b16ff7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -24,6 +24,7 @@ - [#6445](https://github.com/influxdata/telegraf/issues/6445): Use batch serialization format in exec output. - [#6455](https://github.com/influxdata/telegraf/issues/6455): Build official packages with Go 1.12.10. +- [#6464](https://github.com/influxdata/telegraf/pull/6464): Use case insensitive serial numer match in smart input. ## v1.12.2 [2019-09-24] From 68d11e01ab98f67693ef1b8d29237162cf49f032 Mon Sep 17 00:00:00 2001 From: Mark Wilkinson - m82labs Date: Mon, 30 Sep 2019 19:50:33 -0400 Subject: [PATCH 1219/1815] Add more performance counter metrics to sqlserver input (#6465) --- plugins/inputs/sqlserver/sqlserver.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/plugins/inputs/sqlserver/sqlserver.go b/plugins/inputs/sqlserver/sqlserver.go index 51e729a31..38a1b16e0 100644 --- a/plugins/inputs/sqlserver/sqlserver.go +++ b/plugins/inputs/sqlserver/sqlserver.go @@ -592,11 +592,16 @@ WHERE ( 'Background Writer pages/sec', 'Percent Log Used', 'Log Send Queue KB', - 'Redo Queue KB' + 'Redo Queue KB', + 'Mirrored Write Transactions/sec', + 'Group Commit Time', + 'Group Commits/sec' ) ) OR ( object_name LIKE '%User Settable%' OR object_name LIKE '%SQL Errors%' + ) OR ( + object_name LIKE 'SQLServer:Batch Resp Statistics%' ) OR ( instance_name IN ('_Total') AND counter_name IN ( From d75b5e5e10af9c960e5029d4cfefd9b342fe50ab Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 30 Sep 2019 16:51:31 -0700 Subject: [PATCH 1220/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index c89b16ff7..b5df30429 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,6 +17,7 @@ - [#6177](https://github.com/influxdata/telegraf/pull/6177): Support NX-OS telemetry extensions in cisco_telemetry_mdt. - [#6415](https://github.com/influxdata/telegraf/pull/6415): Allow graphite parser to create Inf and NaN values. - [#6434](https://github.com/influxdata/telegraf/pull/6434): Use prefix base detection for ints in grok parser. +- [#6465](https://github.com/influxdata/telegraf/pull/6465): Add more performance counter metrics to sqlserver input. ## v1.12.3 [unreleased] From 367dee791a107a43a7d99a8c95fb236c5d7849e8 Mon Sep 17 00:00:00 2001 From: David McKay Date: Mon, 30 Sep 2019 16:55:47 -0700 Subject: [PATCH 1221/1815] Add auth header only when env var is set (#6469) --- internal/config/config.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/internal/config/config.go b/internal/config/config.go index d7fe11427..f01888499 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -841,13 +841,14 @@ func loadConfig(config string) ([]byte, error) { } func fetchConfig(u *url.URL) ([]byte, error) { - v := os.Getenv("INFLUX_TOKEN") - req, err := http.NewRequest("GET", u.String(), nil) if err != nil { return nil, err } - req.Header.Add("Authorization", "Token "+v) + + if v, exists := os.LookupEnv("INFLUX_TOKEN"); exists { + req.Header.Add("Authorization", "Token "+v) + } req.Header.Add("Accept", "application/toml") resp, err := http.DefaultClient.Do(req) if err != nil { From e41d90080a4eb2e14fc8403924a9b68ecbaf0ad6 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 30 Sep 2019 16:56:58 -0700 Subject: [PATCH 1222/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index b5df30429..334949d4d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -26,6 +26,7 @@ - [#6445](https://github.com/influxdata/telegraf/issues/6445): Use batch serialization format in exec output. - [#6455](https://github.com/influxdata/telegraf/issues/6455): Build official packages with Go 1.12.10. - [#6464](https://github.com/influxdata/telegraf/pull/6464): Use case insensitive serial numer match in smart input. +- [#6469](https://github.com/influxdata/telegraf/pull/6469): Add auth header only when env var is set. ## v1.12.2 [2019-09-24] From 8eb8643a3a06974fa2a60af87802ea9455f6f18c Mon Sep 17 00:00:00 2001 From: George Date: Fri, 4 Oct 2019 19:30:43 +0100 Subject: [PATCH 1223/1815] Add CLA check GitHub action (#6479) --- .github/workflows/main.yml | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) create mode 100644 .github/workflows/main.yml diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml new file mode 100644 index 000000000..d638476cc --- /dev/null +++ b/.github/workflows/main.yml @@ -0,0 +1,16 @@ +on: + pull_request: + types: [opened] + +jobs: + cla-checker: + runs-on: ubuntu-latest + name: "Check CLA" + steps: + - name: "Lookup PR Author" + uses: influxdata/clasee@v1 + with: + spreadsheet: "1jnRZYSw83oa6hcEBb1lxK6nNvXrWnOzPT8Bz9iR4Q8s" + range: "Form Responses!E:E" + env: + CLASEE_SECRET: ${{ secrets.CLASEE_SECRET }} From c26aeb871df5fb23d872bb93b95b353cc009cee7 Mon Sep 17 00:00:00 2001 From: GeorgeJahad Date: Fri, 4 Oct 2019 12:18:34 -0700 Subject: [PATCH 1224/1815] Remove package level vars from sqlserver and mysql input plugins (#6468) --- plugins/inputs/mysql/mysql.go | 25 +++++---- plugins/inputs/mysql/mysql_test.go | 48 +++++++++++++++++ plugins/inputs/sqlserver/sqlserver.go | 29 +++++----- plugins/inputs/sqlserver/sqlserver_test.go | 61 +++++++++++++++++++++- 4 files changed, 133 insertions(+), 30 deletions(-) diff --git a/plugins/inputs/mysql/mysql.go b/plugins/inputs/mysql/mysql.go index 0516e22b7..4b6bae1ad 100644 --- a/plugins/inputs/mysql/mysql.go +++ b/plugins/inputs/mysql/mysql.go @@ -39,9 +39,12 @@ type Mysql struct { IntervalSlow string `toml:"interval_slow"` MetricVersion int `toml:"metric_version"` tls.ClientConfig + lastT time.Time + initDone bool + scanIntervalSlow uint32 } -var sampleConfig = ` +const sampleConfig = ` ## specify servers via a url matching: ## [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify|custom]] ## see https://github.com/go-sql-driver/mysql#dsn-data-source-name @@ -123,7 +126,7 @@ var sampleConfig = ` # insecure_skip_verify = false ` -var defaultTimeout = time.Second * time.Duration(5) +const defaultTimeout = time.Second * time.Duration(5) func (m *Mysql) SampleConfig() string { return sampleConfig @@ -133,21 +136,16 @@ func (m *Mysql) Description() string { return "Read metrics from one or many mysql servers" } -var ( - localhost = "" - lastT time.Time - initDone = false - scanIntervalSlow uint32 -) +const localhost = "" func (m *Mysql) InitMysql() { if len(m.IntervalSlow) > 0 { interval, err := time.ParseDuration(m.IntervalSlow) if err == nil && interval.Seconds() >= 1.0 { - scanIntervalSlow = uint32(interval.Seconds()) + m.scanIntervalSlow = uint32(interval.Seconds()) } } - initDone = true + m.initDone = true } func (m *Mysql) Gather(acc telegraf.Accumulator) error { @@ -156,7 +154,7 @@ func (m *Mysql) Gather(acc telegraf.Accumulator) error { return m.gatherServer(localhost, acc) } // Initialise additional query intervals - if !initDone { + if !m.initDone { m.InitMysql() } @@ -184,6 +182,7 @@ func (m *Mysql) Gather(acc telegraf.Accumulator) error { return nil } +// These are const but can't be declared as such because golang doesn't allow const maps var ( // status counter generalThreadStates = map[string]uint32{ @@ -426,12 +425,12 @@ func (m *Mysql) gatherServer(serv string, acc telegraf.Accumulator) error { // Global Variables may be gathered less often if len(m.IntervalSlow) > 0 { - if uint32(time.Since(lastT).Seconds()) >= scanIntervalSlow { + if uint32(time.Since(m.lastT).Seconds()) >= m.scanIntervalSlow { err = m.gatherGlobalVariables(db, serv, acc) if err != nil { return err } - lastT = time.Now() + m.lastT = time.Now() } } diff --git a/plugins/inputs/mysql/mysql_test.go b/plugins/inputs/mysql/mysql_test.go index b4983ba0e..be9c338bf 100644 --- a/plugins/inputs/mysql/mysql_test.go +++ b/plugins/inputs/mysql/mysql_test.go @@ -26,6 +26,54 @@ func TestMysqlDefaultsToLocal(t *testing.T) { assert.True(t, acc.HasMeasurement("mysql")) } +func TestMysqlMultipleInstances(t *testing.T) { + // Invoke Gather() from two separate configurations and + // confirm they don't interfere with each other + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + testServer := "root@tcp(127.0.0.1:3306)/?tls=false" + m := &Mysql{ + Servers: []string{testServer}, + IntervalSlow: "30s", + } + + var acc, acc2 testutil.Accumulator + err := m.Gather(&acc) + require.NoError(t, err) + assert.True(t, acc.HasMeasurement("mysql")) + // acc should have global variables + assert.True(t, acc.HasMeasurement("mysql_variables")) + + m2 := &Mysql{ + Servers: []string{testServer}, + } + err = m2.Gather(&acc2) + require.NoError(t, err) + assert.True(t, acc2.HasMeasurement("mysql")) + // acc2 should not have global variables + assert.False(t, acc2.HasMeasurement("mysql_variables")) +} + +func TestMysqlMultipleInits(t *testing.T) { + m := &Mysql{ + IntervalSlow: "30s", + } + m2 := &Mysql{} + + m.InitMysql() + assert.True(t, m.initDone) + assert.False(t, m2.initDone) + assert.Equal(t, m.scanIntervalSlow, uint32(30)) + assert.Equal(t, m2.scanIntervalSlow, uint32(0)) + + m2.InitMysql() + assert.True(t, m.initDone) + assert.True(t, m2.initDone) + assert.Equal(t, m.scanIntervalSlow, uint32(30)) + assert.Equal(t, m2.scanIntervalSlow, uint32(0)) +} + func TestMysqlGetDSNTag(t *testing.T) { tests := []struct { input string diff --git a/plugins/inputs/sqlserver/sqlserver.go b/plugins/inputs/sqlserver/sqlserver.go index 38a1b16e0..2aaccd871 100644 --- a/plugins/inputs/sqlserver/sqlserver.go +++ b/plugins/inputs/sqlserver/sqlserver.go @@ -12,10 +12,12 @@ import ( // SQLServer struct type SQLServer struct { - Servers []string `toml:"servers"` - QueryVersion int `toml:"query_version"` - AzureDB bool `toml:"azuredb"` - ExcludeQuery []string `toml:"exclude_query"` + Servers []string `toml:"servers"` + QueryVersion int `toml:"query_version"` + AzureDB bool `toml:"azuredb"` + ExcludeQuery []string `toml:"exclude_query"` + queries MapQuery + isInitialized bool } // Query struct @@ -28,14 +30,9 @@ type Query struct { // MapQuery type type MapQuery map[string]Query -var queries MapQuery +const defaultServer = "Server=.;app name=telegraf;log=1;" -// Initialized flag -var isInitialized = false - -var defaultServer = "Server=.;app name=telegraf;log=1;" - -var sampleConfig = ` +const sampleConfig = ` ## Specify instances to monitor with a list of connection strings. ## All connection parameters are optional. ## By default, the host is localhost, listening on default port, TCP 1433. @@ -89,8 +86,8 @@ type scanner interface { } func initQueries(s *SQLServer) { - queries = make(MapQuery) - + s.queries = make(MapQuery) + queries := s.queries // If this is an AzureDB instance, grab some extra metrics if s.AzureDB { queries["AzureDBResourceStats"] = Query{Script: sqlAzureDBResourceStats, ResultByRow: false} @@ -124,12 +121,12 @@ func initQueries(s *SQLServer) { } // Set a flag so we know that queries have already been initialized - isInitialized = true + s.isInitialized = true } // Gather collect data from SQL Server func (s *SQLServer) Gather(acc telegraf.Accumulator) error { - if !isInitialized { + if !s.isInitialized { initQueries(s) } @@ -140,7 +137,7 @@ func (s *SQLServer) Gather(acc telegraf.Accumulator) error { var wg sync.WaitGroup for _, serv := range s.Servers { - for _, query := range queries { + for _, query := range s.queries { wg.Add(1) go func(serv string, query Query) { defer wg.Done() diff --git a/plugins/inputs/sqlserver/sqlserver_test.go b/plugins/inputs/sqlserver/sqlserver_test.go index 063af7595..b493fb13c 100644 --- a/plugins/inputs/sqlserver/sqlserver_test.go +++ b/plugins/inputs/sqlserver/sqlserver_test.go @@ -1,6 +1,7 @@ package sqlserver import ( + "github.com/stretchr/testify/assert" "strconv" "strings" "testing" @@ -14,7 +15,7 @@ func TestSqlServer_ParseMetrics(t *testing.T) { var acc testutil.Accumulator - queries = make(MapQuery) + queries := make(MapQuery) queries["PerformanceCounters"] = Query{Script: mockPerformanceCounters, ResultByRow: true} queries["WaitStatsCategorized"] = Query{Script: mockWaitStatsCategorized, ResultByRow: false} queries["CPUHistory"] = Query{Script: mockCPUHistory, ResultByRow: false} @@ -81,6 +82,64 @@ func TestSqlServer_ParseMetrics(t *testing.T) { } } +func TestSqlServer_MultipleInstance(t *testing.T) { + // Invoke Gather() from two separate configurations and + // confirm they don't interfere with each other + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + testServer := "Server=127.0.0.1;Port=1433;User Id=SA;Password=ABCabc01;app name=telegraf;log=1" + s := &SQLServer{ + Servers: []string{testServer}, + ExcludeQuery: []string{"MemoryClerk"}, + } + s2 := &SQLServer{ + Servers: []string{testServer}, + ExcludeQuery: []string{"DatabaseSize"}, + } + + var acc, acc2 testutil.Accumulator + err := s.Gather(&acc) + require.NoError(t, err) + assert.Equal(t, s.isInitialized, true) + assert.Equal(t, s2.isInitialized, false) + + err = s2.Gather(&acc2) + require.NoError(t, err) + assert.Equal(t, s.isInitialized, true) + assert.Equal(t, s2.isInitialized, true) + + // acc includes size metrics, and excludes memory metrics + assert.False(t, acc.HasMeasurement("Memory breakdown (%)")) + assert.True(t, acc.HasMeasurement("Log size (bytes)")) + + // acc2 includes memory metrics, and excludes size metrics + assert.True(t, acc2.HasMeasurement("Memory breakdown (%)")) + assert.False(t, acc2.HasMeasurement("Log size (bytes)")) +} + +func TestSqlServer_MultipleInit(t *testing.T) { + + s := &SQLServer{} + s2 := &SQLServer{ + ExcludeQuery: []string{"DatabaseSize"}, + } + + initQueries(s) + _, ok := s.queries["DatabaseSize"] + // acc includes size metrics + assert.True(t, ok) + assert.Equal(t, s.isInitialized, true) + assert.Equal(t, s2.isInitialized, false) + + initQueries(s2) + _, ok = s2.queries["DatabaseSize"] + // acc2 excludes size metrics + assert.False(t, ok) + assert.Equal(t, s.isInitialized, true) + assert.Equal(t, s2.isInitialized, true) +} + const mockPerformanceMetrics = `measurement;servername;type;Point In Time Recovery;Available physical memory (bytes);Average pending disk IO;Average runnable tasks;Average tasks;Buffer pool rate (bytes/sec);Connection memory per connection (bytes);Memory grant pending;Page File Usage (%);Page lookup per batch request;Page split per batch request;Readahead per page read;Signal wait (%);Sql compilation per batch request;Sql recompilation per batch request;Total target memory ratio Performance metrics;WIN8-DEV;Performance metrics;0;6353158144;0;0;7;2773;415061;0;25;229371;130;10;18;188;52;14` From ddd79762acee1d0aeefbdccf2cc5e896d6d2106a Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 4 Oct 2019 17:08:58 -0700 Subject: [PATCH 1225/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 334949d4d..b2f8a720f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -27,6 +27,7 @@ - [#6455](https://github.com/influxdata/telegraf/issues/6455): Build official packages with Go 1.12.10. - [#6464](https://github.com/influxdata/telegraf/pull/6464): Use case insensitive serial numer match in smart input. - [#6469](https://github.com/influxdata/telegraf/pull/6469): Add auth header only when env var is set. +- [#6468](https://github.com/influxdata/telegraf/pull/6468): Fix running multiple mysql and sqlserver plugin instances. ## v1.12.2 [2019-09-24] From b9a4ef7484b12a4750dd8191fa23bc1962019593 Mon Sep 17 00:00:00 2001 From: Rajiv Kushwaha Date: Sat, 5 Oct 2019 06:08:48 +0530 Subject: [PATCH 1226/1815] Add millisecond unix time support to grok parser (#6476) --- plugins/inputs/logparser/README.md | 1 + plugins/inputs/logparser/logparser_test.go | 36 ++++++++++++++++++- .../inputs/logparser/testdata/test-patterns | 4 +++ plugins/inputs/logparser/testdata/test_c.log | 1 + plugins/parsers/grok/README.md | 1 + plugins/parsers/grok/parser.go | 20 ++++++++--- plugins/parsers/grok/parser_test.go | 22 ++++++++++++ 7 files changed, 79 insertions(+), 6 deletions(-) create mode 100644 plugins/inputs/logparser/testdata/test_c.log diff --git a/plugins/inputs/logparser/README.md b/plugins/inputs/logparser/README.md index efd50952f..22250ff45 100644 --- a/plugins/inputs/logparser/README.md +++ b/plugins/inputs/logparser/README.md @@ -105,6 +105,7 @@ Patterns that convert all captures to tags will result in points that can't be w - ts-rfc3339nano ("2006-01-02T15:04:05.999999999Z07:00") - ts-httpd ("02/Jan/2006:15:04:05 -0700") - ts-epoch (seconds since unix epoch, may contain decimal) + - ts-epochmilli (milliseconds since unix epoch, may contain decimal) - ts-epochnano (nanoseconds since unix epoch) - ts-syslog ("Jan 02 15:04:05", parsed time is set to the current year) - ts-"CUSTOM" diff --git a/plugins/inputs/logparser/logparser_test.go b/plugins/inputs/logparser/logparser_test.go index 1ecbd39ff..8342e38ee 100644 --- a/plugins/inputs/logparser/logparser_test.go +++ b/plugins/inputs/logparser/logparser_test.go @@ -48,7 +48,7 @@ func TestGrokParseLogFiles(t *testing.T) { Log: testutil.Logger{}, GrokConfig: GrokConfig{ MeasurementName: "logparser_grok", - Patterns: []string{"%{TEST_LOG_A}", "%{TEST_LOG_B}"}, + Patterns: []string{"%{TEST_LOG_A}", "%{TEST_LOG_B}", "%{TEST_LOG_C}"}, CustomPatternFiles: []string{thisdir + "testdata/test-patterns"}, }, FromBeginning: true, @@ -162,6 +162,40 @@ func TestGrokParseLogFilesOneBad(t *testing.T) { }) } +func TestGrokParseLogFiles_TimestampInEpochMilli(t *testing.T) { + thisdir := getCurrentDir() + + logparser := &LogParserPlugin{ + Log: testutil.Logger{}, + GrokConfig: GrokConfig{ + MeasurementName: "logparser_grok", + Patterns: []string{"%{TEST_LOG_C}"}, + CustomPatternFiles: []string{thisdir + "testdata/test-patterns"}, + }, + FromBeginning: true, + Files: []string{thisdir + "testdata/test_c.log"}, + } + + acc := testutil.Accumulator{} + acc.SetDebug(true) + assert.NoError(t, logparser.Start(&acc)) + acc.Wait(1) + + logparser.Stop() + + acc.AssertContainsTaggedFields(t, "logparser_grok", + map[string]interface{}{ + "clientip": "192.168.1.1", + "myfloat": float64(1.25), + "response_time": int64(5432), + "myint": int64(101), + }, + map[string]string{ + "response_code": "200", + "path": thisdir + "testdata/test_c.log", + }) +} + func getCurrentDir() string { _, filename, _, _ := runtime.Caller(1) return strings.Replace(filename, "logparser_test.go", "", 1) diff --git a/plugins/inputs/logparser/testdata/test-patterns b/plugins/inputs/logparser/testdata/test-patterns index ba995fbd1..45970a9c8 100644 --- a/plugins/inputs/logparser/testdata/test-patterns +++ b/plugins/inputs/logparser/testdata/test-patterns @@ -12,3 +12,7 @@ TEST_LOG_B \[%{TEST_TIMESTAMP:timestamp:ts-"02/01/2006--15:04:05"}\] %{NUMBER:my TEST_TIMESTAMP %{MONTHDAY}/%{MONTHNUM}/%{YEAR}--%{TIME} TEST_LOG_BAD \[%{TEST_TIMESTAMP:timestamp:ts-"02/01/2006--15:04:05"}\] %{NUMBER:myfloat:float} %{WORD:mystring:int} %{WORD:dropme:drop} %{WORD:nomodifier} + +# Test C log line: +# 1568723594631 1.25 200 192.168.1.1 5.432µs 101 +TEST_LOG_C %{POSINT:timestamp:ts-epochmilli} %{NUMBER:myfloat:float} %{RESPONSE_CODE} %{IPORHOST:clientip} %{RESPONSE_TIME} %{NUMBER:myint:int} diff --git a/plugins/inputs/logparser/testdata/test_c.log b/plugins/inputs/logparser/testdata/test_c.log new file mode 100644 index 000000000..f814c0c30 --- /dev/null +++ b/plugins/inputs/logparser/testdata/test_c.log @@ -0,0 +1 @@ +1568723594631 1.25 200 192.168.1.1 5.432µs 101 diff --git a/plugins/parsers/grok/README.md b/plugins/parsers/grok/README.md index 6263eecc9..14c128f16 100644 --- a/plugins/parsers/grok/README.md +++ b/plugins/parsers/grok/README.md @@ -50,6 +50,7 @@ You must capture at least one field per line. - ts-httpd ("02/Jan/2006:15:04:05 -0700") - ts-epoch (seconds since unix epoch, may contain decimal) - ts-epochnano (nanoseconds since unix epoch) + - ts-epochmilli (milliseconds since unix epoch) - ts-syslog ("Jan 02 15:04:05", parsed time is set to the current year) - ts-"CUSTOM" diff --git a/plugins/parsers/grok/parser.go b/plugins/parsers/grok/parser.go index ce9c0af59..60eff1afe 100644 --- a/plugins/parsers/grok/parser.go +++ b/plugins/parsers/grok/parser.go @@ -28,12 +28,13 @@ var timeLayouts = map[string]string{ "ts-rfc3339": "2006-01-02T15:04:05Z07:00", "ts-rfc3339nano": "2006-01-02T15:04:05.999999999Z07:00", "ts-httpd": "02/Jan/2006:15:04:05 -0700", - // These three are not exactly "layouts", but they are special cases that + // These four are not exactly "layouts", but they are special cases that // will get handled in the ParseLine function. - "ts-epoch": "EPOCH", - "ts-epochnano": "EPOCH_NANO", - "ts-syslog": "SYSLOG_TIMESTAMP", - "ts": "GENERIC_TIMESTAMP", // try parsing all known timestamp layouts. + "ts-epoch": "EPOCH", + "ts-epochnano": "EPOCH_NANO", + "ts-epochmilli": "EPOCH_MILLI", + "ts-syslog": "SYSLOG_TIMESTAMP", + "ts": "GENERIC_TIMESTAMP", // try parsing all known timestamp layouts. } const ( @@ -45,6 +46,7 @@ const ( DURATION = "duration" DROP = "drop" EPOCH = "EPOCH" + EPOCH_MILLI = "EPOCH_MILLI" EPOCH_NANO = "EPOCH_NANO" SYSLOG_TIMESTAMP = "SYSLOG_TIMESTAMP" GENERIC_TIMESTAMP = "GENERIC_TIMESTAMP" @@ -297,6 +299,14 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { ts = ts.Add(time.Duration(nanosec) * time.Nanosecond) } timestamp = ts + case EPOCH_MILLI: + ms, err := strconv.ParseInt(v, 10, 64) + if err != nil { + log.Printf("E! Error parsing %s to int: %s", v, err) + } else { + timestamp = time.Unix(0, ms*int64(time.Millisecond)) + fmt.Println(timestamp) + } case EPOCH_NANO: iv, err := strconv.ParseInt(v, 10, 64) if err != nil { diff --git a/plugins/parsers/grok/parser_test.go b/plugins/parsers/grok/parser_test.go index e0b9575cb..ec5e47388 100644 --- a/plugins/parsers/grok/parser_test.go +++ b/plugins/parsers/grok/parser_test.go @@ -277,6 +277,28 @@ func TestParsePatternsWithoutCustom(t *testing.T) { assert.Equal(t, time.Unix(0, 1466004605359052000), metricA.Time()) } +func TestParseEpochMilli(t *testing.T) { + p := &Parser{ + Patterns: []string{"%{MYAPP}"}, + CustomPatterns: ` + MYAPP %{POSINT:ts:ts-epochmilli} response_time=%{POSINT:response_time:int} mymetric=%{NUMBER:metric:float} + `, + } + assert.NoError(t, p.Compile()) + + metricA, err := p.ParseLine(`1568540909963 response_time=20821 mymetric=10890.645`) + require.NotNil(t, metricA) + assert.NoError(t, err) + assert.Equal(t, + map[string]interface{}{ + "response_time": int64(20821), + "metric": float64(10890.645), + }, + metricA.Fields()) + assert.Equal(t, map[string]string{}, metricA.Tags()) + assert.Equal(t, time.Unix(0, 1568540909963000000), metricA.Time()) +} + func TestParseEpochNano(t *testing.T) { p := &Parser{ Patterns: []string{"%{MYAPP}"}, From d71c8ed3b984394990e19a21c55495f5a6c308a0 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 4 Oct 2019 17:39:55 -0700 Subject: [PATCH 1227/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index b2f8a720f..f4af75e6f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -18,6 +18,7 @@ - [#6415](https://github.com/influxdata/telegraf/pull/6415): Allow graphite parser to create Inf and NaN values. - [#6434](https://github.com/influxdata/telegraf/pull/6434): Use prefix base detection for ints in grok parser. - [#6465](https://github.com/influxdata/telegraf/pull/6465): Add more performance counter metrics to sqlserver input. +- [#6476](https://github.com/influxdata/telegraf/pull/6476): Add millisecond unix time support to grok parser. ## v1.12.3 [unreleased] From 47fd285b4afcc285c75ee70bd9b12ae1f04b117a Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 7 Oct 2019 12:13:39 -0700 Subject: [PATCH 1228/1815] Fix database routing on retry with exclude_database_tag (#6486) --- plugins/outputs/influxdb/http.go | 3 ++ plugins/outputs/influxdb/http_test.go | 58 +++++++++++++++++++++ plugins/outputs/influxdb_v2/http.go | 3 ++ plugins/outputs/influxdb_v2/http_test.go | 64 ++++++++++++++++++++++++ 4 files changed, 128 insertions(+) diff --git a/plugins/outputs/influxdb/http.go b/plugins/outputs/influxdb/http.go index 7d26ddeb5..b30a8206d 100644 --- a/plugins/outputs/influxdb/http.go +++ b/plugins/outputs/influxdb/http.go @@ -255,6 +255,9 @@ func (c *httpClient) Write(ctx context.Context, metrics []telegraf.Metric) error } if c.config.ExcludeDatabaseTag { + // Avoid modifying the metric in case we need to retry the request. + metric = metric.Copy() + metric.Accept() metric.RemoveTag(c.config.DatabaseTag) } diff --git a/plugins/outputs/influxdb/http_test.go b/plugins/outputs/influxdb/http_test.go index e4acb1641..a09b02d43 100644 --- a/plugins/outputs/influxdb/http_test.go +++ b/plugins/outputs/influxdb/http_test.go @@ -675,3 +675,61 @@ func TestHTTP_UnixSocket(t *testing.T) { }) } } + +func TestHTTP_WriteDatabaseTagWorksOnRetry(t *testing.T) { + ts := httptest.NewServer( + http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/write": + r.ParseForm() + require.Equal(t, r.Form["db"], []string{"foo"}) + + body, err := ioutil.ReadAll(r.Body) + require.NoError(t, err) + require.Contains(t, string(body), "cpu value=42") + + w.WriteHeader(http.StatusNoContent) + return + default: + w.WriteHeader(http.StatusNotFound) + return + } + }), + ) + defer ts.Close() + + addr := &url.URL{ + Scheme: "http", + Host: ts.Listener.Addr().String(), + } + + config := influxdb.HTTPConfig{ + URL: addr, + Database: "telegraf", + DatabaseTag: "database", + ExcludeDatabaseTag: true, + Log: testutil.Logger{}, + } + + client, err := influxdb.NewHTTPClient(config) + require.NoError(t, err) + + metrics := []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{ + "database": "foo", + }, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(0, 0), + ), + } + + ctx := context.Background() + err = client.Write(ctx, metrics) + require.NoError(t, err) + err = client.Write(ctx, metrics) + require.NoError(t, err) +} diff --git a/plugins/outputs/influxdb_v2/http.go b/plugins/outputs/influxdb_v2/http.go index fbfdf6958..b8706c9a5 100644 --- a/plugins/outputs/influxdb_v2/http.go +++ b/plugins/outputs/influxdb_v2/http.go @@ -189,6 +189,9 @@ func (c *httpClient) Write(ctx context.Context, metrics []telegraf.Metric) error } if c.ExcludeBucketTag { + // Avoid modifying the metric in case we need to retry the request. + metric = metric.Copy() + metric.Accept() metric.RemoveTag(c.BucketTag) } diff --git a/plugins/outputs/influxdb_v2/http_test.go b/plugins/outputs/influxdb_v2/http_test.go index 33ff9e24b..23c3ff05e 100644 --- a/plugins/outputs/influxdb_v2/http_test.go +++ b/plugins/outputs/influxdb_v2/http_test.go @@ -1,10 +1,17 @@ package influxdb_v2_test import ( + "context" + "io/ioutil" + "net/http" + "net/http/httptest" "net/url" "testing" + "time" + "github.com/influxdata/telegraf" influxdb "github.com/influxdata/telegraf/plugins/outputs/influxdb_v2" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) @@ -47,3 +54,60 @@ func TestNewHTTPClient(t *testing.T) { } } } + +func TestWriteBucketTagWorksOnRetry(t *testing.T) { + ts := httptest.NewServer( + http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/api/v2/write": + r.ParseForm() + require.Equal(t, r.Form["bucket"], []string{"foo"}) + + body, err := ioutil.ReadAll(r.Body) + require.NoError(t, err) + require.Contains(t, string(body), "cpu value=42") + + w.WriteHeader(http.StatusNoContent) + return + default: + w.WriteHeader(http.StatusNotFound) + return + } + }), + ) + defer ts.Close() + + addr := &url.URL{ + Scheme: "http", + Host: ts.Listener.Addr().String(), + } + + config := &influxdb.HTTPConfig{ + URL: addr, + Bucket: "telegraf", + BucketTag: "bucket", + ExcludeBucketTag: true, + } + + client, err := influxdb.NewHTTPClient(config) + require.NoError(t, err) + + metrics := []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{ + "bucket": "foo", + }, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(0, 0), + ), + } + + ctx := context.Background() + err = client.Write(ctx, metrics) + require.NoError(t, err) + err = client.Write(ctx, metrics) + require.NoError(t, err) +} From d7988915e9af68411c3ee0946ceb2cc58501e635 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 7 Oct 2019 12:17:33 -0700 Subject: [PATCH 1229/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index f4af75e6f..e1cf75150 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -29,6 +29,7 @@ - [#6464](https://github.com/influxdata/telegraf/pull/6464): Use case insensitive serial numer match in smart input. - [#6469](https://github.com/influxdata/telegraf/pull/6469): Add auth header only when env var is set. - [#6468](https://github.com/influxdata/telegraf/pull/6468): Fix running multiple mysql and sqlserver plugin instances. +- [#6471](https://github.com/influxdata/telegraf/issues/6471): Fix database routing on retry with exclude_database_tag. ## v1.12.2 [2019-09-24] From 74c7dd3ce70552855ef9000263746e7fd3b430df Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 7 Oct 2019 12:18:36 -0700 Subject: [PATCH 1230/1815] Fix logger initialization in exec input (#6492) --- internal/config/config_test.go | 5 +++++ plugins/inputs/exec/exec.go | 7 +++---- plugins/inputs/exec/exec_test.go | 6 +++--- 3 files changed, 11 insertions(+), 7 deletions(-) diff --git a/internal/config/config_test.go b/internal/config/config_test.go index f05419eef..7559bf9fe 100644 --- a/internal/config/config_test.go +++ b/internal/config/config_test.go @@ -158,6 +158,11 @@ func TestConfig_LoadDirectory(t *testing.T) { MeasurementSuffix: "_myothercollector", } eConfig.Tags = make(map[string]string) + + exec := c.Inputs[1].Input.(*exec.Exec) + require.NotNil(t, exec.Log) + exec.Log = nil + assert.Equal(t, ex, c.Inputs[1].Input, "Merged Testdata did not produce a correct exec struct.") assert.Equal(t, eConfig, c.Inputs[1].Config, diff --git a/plugins/inputs/exec/exec.go b/plugins/inputs/exec/exec.go index 3176b5a6a..cb4420b0f 100644 --- a/plugins/inputs/exec/exec.go +++ b/plugins/inputs/exec/exec.go @@ -10,13 +10,12 @@ import ( "sync" "time" - "github.com/kballard/go-shellquote" - "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/plugins/parsers/nagios" + "github.com/kballard/go-shellquote" ) const sampleConfig = ` @@ -50,7 +49,7 @@ type Exec struct { parser parsers.Parser runner Runner - log telegraf.Logger + Log telegraf.Logger `toml:"-"` } func NewExec() *Exec { @@ -161,7 +160,7 @@ func (e *Exec) ProcessCommand(command string, acc telegraf.Accumulator, wg *sync if isNagios { metrics, err = nagios.TryAddState(runErr, metrics) if err != nil { - e.log.Errorf("Failed to add nagios state: %s", err) + e.Log.Errorf("Failed to add nagios state: %s", err) } } diff --git a/plugins/inputs/exec/exec_test.go b/plugins/inputs/exec/exec_test.go index 0523a181d..d0fcc71f6 100644 --- a/plugins/inputs/exec/exec_test.go +++ b/plugins/inputs/exec/exec_test.go @@ -96,7 +96,7 @@ func TestExec(t *testing.T) { MetricName: "exec", }) e := &Exec{ - log: testutil.Logger{}, + Log: testutil.Logger{}, runner: newRunnerMock([]byte(validJson), nil, nil), Commands: []string{"testcommand arg1"}, parser: parser, @@ -126,7 +126,7 @@ func TestExecMalformed(t *testing.T) { MetricName: "exec", }) e := &Exec{ - log: testutil.Logger{}, + Log: testutil.Logger{}, runner: newRunnerMock([]byte(malformedJson), nil, nil), Commands: []string{"badcommand arg1"}, parser: parser, @@ -143,7 +143,7 @@ func TestCommandError(t *testing.T) { MetricName: "exec", }) e := &Exec{ - log: testutil.Logger{}, + Log: testutil.Logger{}, runner: newRunnerMock(nil, nil, fmt.Errorf("exit status code 1")), Commands: []string{"badcommand"}, parser: parser, From 6c4cce1705628041aaf9325db76e799c98c97485 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 7 Oct 2019 12:20:36 -0700 Subject: [PATCH 1231/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index e1cf75150..36e0c1637 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -30,6 +30,7 @@ - [#6469](https://github.com/influxdata/telegraf/pull/6469): Add auth header only when env var is set. - [#6468](https://github.com/influxdata/telegraf/pull/6468): Fix running multiple mysql and sqlserver plugin instances. - [#6471](https://github.com/influxdata/telegraf/issues/6471): Fix database routing on retry with exclude_database_tag. +- [#6488](https://github.com/influxdata/telegraf/issues/6488): Fix logging panic in exec input with nagios data format. ## v1.12.2 [2019-09-24] From e7cf8319b04843dda0e1be3e0d20906fed79f15f Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 7 Oct 2019 12:31:43 -0700 Subject: [PATCH 1232/1815] Set 1.12.3 release date --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 36e0c1637..2e21bf227 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -20,7 +20,7 @@ - [#6465](https://github.com/influxdata/telegraf/pull/6465): Add more performance counter metrics to sqlserver input. - [#6476](https://github.com/influxdata/telegraf/pull/6476): Add millisecond unix time support to grok parser. -## v1.12.3 [unreleased] +## v1.12.3 [2019-10-07] #### Bugfixes From a5294fde32ece53726cec33a68a7f0300f0199a5 Mon Sep 17 00:00:00 2001 From: Randy Coburn Date: Tue, 8 Oct 2019 02:27:32 +0200 Subject: [PATCH 1233/1815] Add container id as optional source tag to docker and docker_log input (#6473) --- plugins/inputs/docker/README.md | 14 +++++++ plugins/inputs/docker/docker.go | 16 ++++++++ plugins/inputs/docker/docker_test.go | 40 +++++++++++++++++++- plugins/inputs/docker_log/README.md | 15 ++++++++ plugins/inputs/docker_log/docker_log.go | 15 ++++++++ plugins/inputs/docker_log/docker_log_test.go | 9 +++-- 6 files changed, 104 insertions(+), 5 deletions(-) diff --git a/plugins/inputs/docker/README.md b/plugins/inputs/docker/README.md index 1816107ea..6ec95b64f 100644 --- a/plugins/inputs/docker/README.md +++ b/plugins/inputs/docker/README.md @@ -26,6 +26,9 @@ to gather stats from the [Engine API](https://docs.docker.com/engine/api/v1.24/) ## Deprecated (1.4.0), use container_name_include container_names = [] + ## Set the source tag for the metrics to the container ID hostname, eg first 12 chars + source_tag = false + ## Containers to include and exclude. Collect all if empty. Globs accepted. container_name_include = [] container_name_exclude = [] @@ -93,6 +96,17 @@ volumes: - /var/run/docker.sock:/var/run/docker.sock ``` +#### source tag + +Selecting the containers measurements can be tricky if you have many containers with the same name. +To alleviate this issue you can set the below value to `true` + +```toml +source_tag = true +``` + +This will cause all measurements to have the `source` tag be set to the first 12 characters of the container id. The first 12 characters is the common hostname for containers that have no explicit hostname set, as defined by docker. + #### Kubernetes Labels Kubernetes may add many labels to your containers, if they are not needed you diff --git a/plugins/inputs/docker/docker.go b/plugins/inputs/docker/docker.go index a3dc78bd4..02442baf0 100644 --- a/plugins/inputs/docker/docker.go +++ b/plugins/inputs/docker/docker.go @@ -44,6 +44,8 @@ type Docker struct { ContainerStateInclude []string `toml:"container_state_include"` ContainerStateExclude []string `toml:"container_state_exclude"` + IncludeSourceTag bool `toml:"source_tag"` + Log telegraf.Logger tlsint.ClientConfig @@ -90,6 +92,9 @@ var sampleConfig = ` ## Only collect metrics for these containers, collect all if empty container_names = [] + ## Set the source tag for the metrics to the container ID hostname, eg first 12 chars + source_tag = false + ## Containers to include and exclude. Globs accepted. ## Note that an empty array for both will include all containers container_name_include = [] @@ -412,6 +417,13 @@ func (d *Docker) gatherInfo(acc telegraf.Accumulator) error { return nil } +func hostnameFromID(id string) string { + if len(id) > 12 { + return id[0:12] + } + return id +} + func (d *Docker) gatherContainer( container types.Container, acc telegraf.Accumulator, @@ -443,6 +455,10 @@ func (d *Docker) gatherContainer( "container_version": imageVersion, } + if d.IncludeSourceTag { + tags["source"] = hostnameFromID(container.ID) + } + ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration) defer cancel() diff --git a/plugins/inputs/docker/docker_test.go b/plugins/inputs/docker/docker_test.go index 4add3340d..148228af4 100644 --- a/plugins/inputs/docker/docker_test.go +++ b/plugins/inputs/docker/docker_test.go @@ -629,8 +629,9 @@ func TestContainerStatus(t *testing.T) { return &client, nil } d = Docker{ - Log: testutil.Logger{}, - newClient: newClientFunc, + Log: testutil.Logger{}, + newClient: newClientFunc, + IncludeSourceTag: true, } ) @@ -673,6 +674,7 @@ func TestContainerStatus(t *testing.T) { "label2": "test_value_2", "server_version": "17.09.0-ce", "container_status": tt.expect.Status, + "source": "e2173b9478a6", }) }) } @@ -1017,3 +1019,37 @@ func TestContainerName(t *testing.T) { }) } } + +func TestHostnameFromID(t *testing.T) { + tests := []struct { + name string + id string + expect string + }{ + { + name: "Real ID", + id: "565e3a55f5843cfdd4aa5659a1a75e4e78d47f73c3c483f782fe4a26fc8caa07", + expect: "565e3a55f584", + }, + { + name: "Short ID", + id: "shortid123", + expect: "shortid123", + }, + { + name: "No ID", + id: "", + expect: "shortid123", + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + output := hostnameFromID(test.id) + if test.expect != output { + t.Logf("Container ID for hostname is wrong. Want: %s, Got: %s", output, test.expect) + } + }) + } + +} diff --git a/plugins/inputs/docker_log/README.md b/plugins/inputs/docker_log/README.md index 02f44e14c..d2f0dc614 100644 --- a/plugins/inputs/docker_log/README.md +++ b/plugins/inputs/docker_log/README.md @@ -43,6 +43,9 @@ The docker plugin uses the [Official Docker Client][] to gather logs from the # docker_label_include = [] # docker_label_exclude = [] + ## Set the source tag for the metrics to the container ID hostname, eg first 12 chars + source_tag = false + ## Optional TLS Config # tls_ca = "/etc/telegraf/ca.pem" # tls_cert = "/etc/telegraf/cert.pem" @@ -58,6 +61,17 @@ When using the `"ENV"` endpoint, the connection is configured using the [env]: https://godoc.org/github.com/moby/moby/client#NewEnvClient +### source tag + +Selecting the containers can be tricky if you have many containers with the same name. +To alleviate this issue you can set the below value to `true` + +```toml +source_tag = true +``` + +This will cause all data points to have the `source` tag be set to the first 12 characters of the container id. The first 12 characters is the common hostname for containers that have no explicit hostname set, as defined by docker. + ### Metrics - docker_log @@ -66,6 +80,7 @@ When using the `"ENV"` endpoint, the connection is configured using the - container_version - container_name - stream (stdout, stderr, or tty) + - source - fields: - container_id - message diff --git a/plugins/inputs/docker_log/docker_log.go b/plugins/inputs/docker_log/docker_log.go index 6a675219f..81268f5f5 100644 --- a/plugins/inputs/docker_log/docker_log.go +++ b/plugins/inputs/docker_log/docker_log.go @@ -49,6 +49,9 @@ var sampleConfig = ` # docker_label_include = [] # docker_label_exclude = [] + ## Set the source tag for the metrics to the container ID hostname, eg first 12 chars + source_tag = false + ## Optional TLS Config # tls_ca = "/etc/telegraf/ca.pem" # tls_cert = "/etc/telegraf/cert.pem" @@ -82,6 +85,7 @@ type DockerLogs struct { ContainerExclude []string `toml:"container_name_exclude"` ContainerStateInclude []string `toml:"container_state_include"` ContainerStateExclude []string `toml:"container_state_exclude"` + IncludeSourceTag bool `toml:"source_tag"` tlsint.ClientConfig @@ -258,6 +262,10 @@ func (d *DockerLogs) tailContainerLogs( "container_version": imageVersion, } + if d.IncludeSourceTag { + tags["source"] = hostnameFromID(container.ID) + } + // Add matching container labels as tags for k, label := range container.Labels { if d.labelFilter.Match(k) { @@ -435,3 +443,10 @@ func init() { } }) } + +func hostnameFromID(id string) string { + if len(id) > 12 { + return id[0:12] + } + return id +} diff --git a/plugins/inputs/docker_log/docker_log_test.go b/plugins/inputs/docker_log/docker_log_test.go index ce61f6135..11cf0befd 100644 --- a/plugins/inputs/docker_log/docker_log_test.go +++ b/plugins/inputs/docker_log/docker_log_test.go @@ -98,6 +98,7 @@ func Test(t *testing.T) { "container_image": "influxdata/telegraf", "container_version": "1.11.0", "stream": "tty", + "source": "deadbeef", }, map[string]interface{}{ "container_id": "deadbeef", @@ -141,6 +142,7 @@ func Test(t *testing.T) { "container_image": "influxdata/telegraf", "container_version": "1.11.0", "stream": "stdout", + "source": "deadbeef", }, map[string]interface{}{ "container_id": "deadbeef", @@ -155,9 +157,10 @@ func Test(t *testing.T) { t.Run(tt.name, func(t *testing.T) { var acc testutil.Accumulator plugin := &DockerLogs{ - Timeout: internal.Duration{Duration: time.Second * 5}, - newClient: func(string, *tls.Config) (Client, error) { return tt.client, nil }, - containerList: make(map[string]context.CancelFunc), + Timeout: internal.Duration{Duration: time.Second * 5}, + newClient: func(string, *tls.Config) (Client, error) { return tt.client, nil }, + containerList: make(map[string]context.CancelFunc), + IncludeSourceTag: true, } err := plugin.Init() From c67674eff35834206419296f88d6546e2710c064 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 7 Oct 2019 17:29:55 -0700 Subject: [PATCH 1234/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2e21bf227..43ac797df 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -19,6 +19,7 @@ - [#6434](https://github.com/influxdata/telegraf/pull/6434): Use prefix base detection for ints in grok parser. - [#6465](https://github.com/influxdata/telegraf/pull/6465): Add more performance counter metrics to sqlserver input. - [#6476](https://github.com/influxdata/telegraf/pull/6476): Add millisecond unix time support to grok parser. +- [#6473](https://github.com/influxdata/telegraf/pull/6473): Add container id as optional source tag to docker and docker_log input. ## v1.12.3 [2019-10-07] From da17d6569dac6e17380ab78e33cc9d502d43cea3 Mon Sep 17 00:00:00 2001 From: Richard Wise Date: Tue, 8 Oct 2019 09:08:35 +0800 Subject: [PATCH 1235/1815] Clarify behaviour of enum processor without default or defined mapping (#6301) --- plugins/processors/enum/README.md | 10 ++++++++-- plugins/processors/enum/enum_test.go | 11 +++++++++++ 2 files changed, 19 insertions(+), 2 deletions(-) diff --git a/plugins/processors/enum/README.md b/plugins/processors/enum/README.md index 29821e83d..0f2a6135d 100644 --- a/plugins/processors/enum/README.md +++ b/plugins/processors/enum/README.md @@ -25,8 +25,8 @@ source tag or field is overwritten. dest = "status_code" ## Default value to be used for all values not contained in the mapping - ## table. When unset, the unmodified value for the field will be used if no - ## match is found. + ## table. When unset and no match is found, the original field will remain + ## unmodified and the destination tag or field will not be created. # default = 0 ## Table of mappings @@ -42,3 +42,9 @@ source tag or field is overwritten. - xyzzy status="green" 1502489900000000000 + xyzzy status="green",status_code=1i 1502489900000000000 ``` + +With unknown value and no default set: +```diff +- xyzzy status="black" 1502489900000000000 ++ xyzzy status="black" 1502489900000000000 +``` diff --git a/plugins/processors/enum/enum_test.go b/plugins/processors/enum/enum_test.go index 06204523d..5f89510ca 100644 --- a/plugins/processors/enum/enum_test.go +++ b/plugins/processors/enum/enum_test.go @@ -123,3 +123,14 @@ func TestWritesToDestination(t *testing.T) { assertFieldValue(t, "test", "string_value", fields) assertFieldValue(t, 1, "string_code", fields) } + +func TestDoNotWriteToDestinationWithoutDefaultOrDefinedMapping(t *testing.T) { + field := "string_code" + mapper := EnumMapper{Mappings: []Mapping{{Field: "string_value", Dest: field, ValueMappings: map[string]interface{}{"other": int64(1)}}}} + + fields := calculateProcessedValues(mapper, createTestMetric()) + + assertFieldValue(t, "test", "string_value", fields) + _, present := fields[field] + assert.False(t, present, "value of field '"+field+"' was present") +} From 5bd5cdc6d7a55bed959cf2f0fd879df755bddee8 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 7 Oct 2019 21:08:28 -0700 Subject: [PATCH 1236/1815] Build official packages with Go 1.13.1 (#6462) --- .circleci/config.yml | 98 ++++++++------- Makefile | 15 +-- README.md | 4 +- appveyor.yml | 4 +- .../application_insights_test.go | 2 +- .../prometheus_client_tls_test.go | 114 ------------------ scripts/ci-1.11.docker | 28 ----- scripts/{ci-1.10.docker => ci-1.13.docker} | 2 +- 8 files changed, 58 insertions(+), 209 deletions(-) delete mode 100644 plugins/outputs/prometheus_client/prometheus_client_tls_test.go delete mode 100644 scripts/ci-1.11.docker rename scripts/{ci-1.10.docker => ci-1.13.docker} (96%) diff --git a/.circleci/config.yml b/.circleci/config.yml index 3f02a3386..50a8080ec 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -4,20 +4,17 @@ defaults: working_directory: '/go/src/github.com/influxdata/telegraf' environment: GOFLAGS: -p=8 - go-1_10: &go-1_10 - docker: - - image: 'quay.io/influxdb/telegraf-ci:1.10.8' - go-1_11: &go-1_11 - docker: - - image: 'quay.io/influxdb/telegraf-ci:1.11.13' go-1_12: &go-1_12 docker: - image: 'quay.io/influxdb/telegraf-ci:1.12.10' + go-1_13: &go-1_13 + docker: + - image: 'quay.io/influxdb/telegraf-ci:1.13.1' version: 2 jobs: deps: - <<: [ *defaults, *go-1_12 ] + <<: [ *defaults, *go-1_13 ] steps: - checkout - restore_cache: @@ -34,28 +31,13 @@ jobs: paths: - '*' - test-go-1.10: - <<: [ *defaults, *go-1_10 ] - steps: - - attach_workspace: - at: '/go/src' - # disabled due to gofmt differences (1.10 vs 1.11). - #- run: 'make check' - - run: 'make test' - test-go-1.11: - <<: [ *defaults, *go-1_11 ] - steps: - - attach_workspace: - at: '/go/src' - - run: 'make check' - - run: 'make test' test-go-1.12: <<: [ *defaults, *go-1_12 ] steps: - attach_workspace: at: '/go/src' - - run: 'GOARCH=386 make check' - - run: 'GOARCH=386 make test' + - run: 'make check' + - run: 'make test' test-go-1.12-386: <<: [ *defaults, *go-1_12 ] steps: @@ -63,9 +45,23 @@ jobs: at: '/go/src' - run: 'GOARCH=386 make check' - run: 'GOARCH=386 make test' + test-go-1.13: + <<: [ *defaults, *go-1_13 ] + steps: + - attach_workspace: + at: '/go/src' + - run: 'make check' + - run: 'make test' + test-go-1.13-386: + <<: [ *defaults, *go-1_13 ] + steps: + - attach_workspace: + at: '/go/src' + - run: 'GOARCH=386 make check' + - run: 'GOARCH=386 make test' package: - <<: [ *defaults, *go-1_12 ] + <<: [ *defaults, *go-1_13 ] steps: - attach_workspace: at: '/go/src' @@ -74,7 +70,7 @@ jobs: path: './build' destination: 'build' release: - <<: [ *defaults, *go-1_12 ] + <<: [ *defaults, *go-1_13 ] steps: - attach_workspace: at: '/go/src' @@ -83,7 +79,7 @@ jobs: path: './build' destination: 'build' nightly: - <<: [ *defaults, *go-1_12 ] + <<: [ *defaults, *go-1_13 ] steps: - attach_workspace: at: '/go/src' @@ -100,18 +96,6 @@ workflows: filters: tags: only: /.*/ - - 'test-go-1.10': - requires: - - 'deps' - filters: - tags: - only: /.*/ - - 'test-go-1.11': - requires: - - 'deps' - filters: - tags: - only: /.*/ - 'test-go-1.12': requires: - 'deps' @@ -124,18 +108,30 @@ workflows: filters: tags: only: /.*/ + - 'test-go-1.13': + requires: + - 'deps' + filters: + tags: + only: /.*/ + - 'test-go-1.13-386': + requires: + - 'deps' + filters: + tags: + only: /.*/ - 'package': requires: - - 'test-go-1.10' - - 'test-go-1.11' - 'test-go-1.12' - 'test-go-1.12-386' + - 'test-go-1.13' + - 'test-go-1.13-386' - 'release': requires: - - 'test-go-1.10' - - 'test-go-1.11' - 'test-go-1.12' - 'test-go-1.12-386' + - 'test-go-1.13' + - 'test-go-1.13-386' filters: tags: only: /.*/ @@ -144,24 +140,24 @@ workflows: nightly: jobs: - 'deps' - - 'test-go-1.10': - requires: - - 'deps' - - 'test-go-1.11': - requires: - - 'deps' - 'test-go-1.12': requires: - 'deps' - 'test-go-1.12-386': requires: - 'deps' + - 'test-go-1.13': + requires: + - 'deps' + - 'test-go-1.13-386': + requires: + - 'deps' - 'nightly': requires: - - 'test-go-1.10' - - 'test-go-1.11' - 'test-go-1.12' - 'test-go-1.12-386' + - 'test-go-1.13' + - 'test-go-1.13-386' triggers: - schedule: cron: "0 7 * * *" diff --git a/Makefile b/Makefile index abb107838..0846e73b6 100644 --- a/Makefile +++ b/Makefile @@ -129,17 +129,12 @@ plugin-%: @echo "Starting dev environment for $${$(@)} input plugin..." @docker-compose -f plugins/inputs/$${$(@)}/dev/docker-compose.yml up +.PHONY: ci-1.13 +ci-1.13: + docker build -t quay.io/influxdb/telegraf-ci:1.13.1 - < scripts/ci-1.13.docker + docker push quay.io/influxdb/telegraf-ci:1.13.1 + .PHONY: ci-1.12 ci-1.12: docker build -t quay.io/influxdb/telegraf-ci:1.12.10 - < scripts/ci-1.12.docker docker push quay.io/influxdb/telegraf-ci:1.12.10 - -.PHONY: ci-1.11 -ci-1.11: - docker build -t quay.io/influxdb/telegraf-ci:1.11.13 - < scripts/ci-1.11.docker - docker push quay.io/influxdb/telegraf-ci:1.11.13 - -.PHONY: ci-1.10 -ci-1.10: - docker build -t quay.io/influxdb/telegraf-ci:1.10.8 - < scripts/ci-1.10.docker - docker push quay.io/influxdb/telegraf-ci:1.10.8 diff --git a/README.md b/README.md index 6601379bd..01e0cc070 100644 --- a/README.md +++ b/README.md @@ -40,9 +40,9 @@ Ansible role: https://github.com/rossmcdonald/telegraf ### From Source: -Telegraf requires golang version 1.10 or newer, the Makefile requires GNU make. +Telegraf requires golang version 1.12 or newer, the Makefile requires GNU make. -1. [Install Go](https://golang.org/doc/install) >=1.10 (1.12 recommended) +1. [Install Go](https://golang.org/doc/install) >=1.12 (1.13 recommended) 2. [Install dep](https://golang.github.io/dep/docs/installation.html) ==v0.5.0 3. Download Telegraf source: ``` diff --git a/appveyor.yml b/appveyor.yml index c2349dd32..8197172ba 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -13,11 +13,11 @@ platform: x64 install: - IF NOT EXIST "C:\Cache" mkdir C:\Cache - - IF NOT EXIST "C:\Cache\go1.12.9.msi" curl -o "C:\Cache\go1.12.9.msi" https://storage.googleapis.com/golang/go1.12.9.windows-amd64.msi + - IF NOT EXIST "C:\Cache\go1.13.1.msi" curl -o "C:\Cache\go1.13.1.msi" https://storage.googleapis.com/golang/go1.13.1.windows-amd64.msi - IF NOT EXIST "C:\Cache\gnuwin32-bin.zip" curl -o "C:\Cache\gnuwin32-bin.zip" https://dl.influxdata.com/telegraf/ci/make-3.81-bin.zip - IF NOT EXIST "C:\Cache\gnuwin32-dep.zip" curl -o "C:\Cache\gnuwin32-dep.zip" https://dl.influxdata.com/telegraf/ci/make-3.81-dep.zip - IF EXIST "C:\Go" rmdir /S /Q C:\Go - - msiexec.exe /i "C:\Cache\go1.12.9.msi" /quiet + - msiexec.exe /i "C:\Cache\go1.13.1.msi" /quiet - 7z x "C:\Cache\gnuwin32-bin.zip" -oC:\GnuWin32 -y - 7z x "C:\Cache\gnuwin32-dep.zip" -oC:\GnuWin32 -y - go get -d github.com/golang/dep diff --git a/plugins/outputs/application_insights/application_insights_test.go b/plugins/outputs/application_insights/application_insights_test.go index 561e6c9f9..7255ad068 100644 --- a/plugins/outputs/application_insights/application_insights_test.go +++ b/plugins/outputs/application_insights/application_insights_test.go @@ -184,7 +184,7 @@ func TestSimpleMetricCreated(t *testing.T) { {"neither value nor count", map[string]interface{}{"v1": "alpha", "v2": 45.8}, "", []string{"v2"}}, {"value is of wrong type", map[string]interface{}{"value": "alpha", "count": 15}, "", []string{"count"}}, {"count is of wrong type", map[string]interface{}{"value": 23.77, "count": 7.5}, "", []string{"count", "value"}}, - {"count is out of range", map[string]interface{}{"value": -98.45E4, "count": math.MaxUint64 - uint64(20)}, "", []string{"value", "count"}}, + {"count is out of range", map[string]interface{}{"value": -98.45e4, "count": math.MaxUint64 - uint64(20)}, "", []string{"value", "count"}}, {"several additional fields", map[string]interface{}{"alpha": 10, "bravo": "bravo", "charlie": 30, "delta": 40.7}, "", []string{"alpha", "charlie", "delta"}}, } diff --git a/plugins/outputs/prometheus_client/prometheus_client_tls_test.go b/plugins/outputs/prometheus_client/prometheus_client_tls_test.go deleted file mode 100644 index bcbb4e70e..000000000 --- a/plugins/outputs/prometheus_client/prometheus_client_tls_test.go +++ /dev/null @@ -1,114 +0,0 @@ -package prometheus_client_test - -import ( - "crypto/tls" - "fmt" - "net/http" - "testing" - - inttls "github.com/influxdata/telegraf/internal/tls" - "github.com/influxdata/telegraf/plugins/outputs/prometheus_client" - "github.com/influxdata/telegraf/testutil" - "github.com/influxdata/toml" - "github.com/stretchr/testify/require" -) - -var pki = testutil.NewPKI("../../../testutil/pki") - -var configWithTLS = fmt.Sprintf(` - listen = "127.0.0.1:0" - tls_allowed_cacerts = ["%s"] - tls_cert = "%s" - tls_key = "%s" - tls_cipher_suites = ["%s"] - tls_min_version = "%s" -`, pki.TLSServerConfig().TLSAllowedCACerts[0], pki.TLSServerConfig().TLSCert, pki.TLSServerConfig().TLSKey, pki.CipherSuite(), pki.TLSMaxVersion()) - -var configWithoutTLS = ` - listen = "127.0.0.1:0" -` - -type PrometheusClientTestContext struct { - Output *prometheus_client.PrometheusClient - Accumulator *testutil.Accumulator - Client *http.Client -} - -func TestWorksWithoutTLS(t *testing.T) { - tc := buildTestContext(t, []byte(configWithoutTLS)) - err := tc.Output.Connect() - require.NoError(t, err) - defer tc.Output.Close() - - response, err := tc.Client.Get(tc.Output.URL()) - require.NoError(t, err) - - require.NoError(t, err) - require.Equal(t, response.StatusCode, http.StatusOK) -} - -func TestWorksWithTLS(t *testing.T) { - tc := buildTestContext(t, []byte(configWithTLS)) - err := tc.Output.Connect() - require.NoError(t, err) - defer tc.Output.Close() - - serverCiphers, err := inttls.ParseCiphers(tc.Output.ServerConfig.TLSCipherSuites) - require.NoError(t, err) - require.Equal(t, 1, len(serverCiphers)) - - tlsVersion, err := inttls.ParseTLSVersion(tc.Output.ServerConfig.TLSMinVersion) - require.NoError(t, err) - - response, err := tc.Client.Get(tc.Output.URL()) - require.NoError(t, err) - - require.NoError(t, err) - require.Equal(t, response.StatusCode, http.StatusOK) - - require.Equal(t, response.TLS.CipherSuite, serverCiphers[0]) - require.Equal(t, response.TLS.Version, tlsVersion) - - tr := &http.Transport{ - TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, - } - - client := &http.Client{Transport: tr} - response, err = client.Get(tc.Output.URL()) - - require.Error(t, err) -} - -func buildTestContext(t *testing.T, config []byte) *PrometheusClientTestContext { - output := prometheus_client.NewClient() - err := toml.Unmarshal(config, output) - require.NoError(t, err) - - var ( - httpClient *http.Client - ) - - if len(output.TLSAllowedCACerts) != 0 { - httpClient = buildClientWithTLS(t, output) - } else { - httpClient = buildClientWithoutTLS() - } - - return &PrometheusClientTestContext{ - Output: output, - Accumulator: &testutil.Accumulator{}, - Client: httpClient, - } -} - -func buildClientWithoutTLS() *http.Client { - return &http.Client{} -} - -func buildClientWithTLS(t *testing.T, output *prometheus_client.PrometheusClient) *http.Client { - tlsConfig, err := pki.TLSClientConfig().TLSConfig() - require.NoError(t, err) - - transport := &http.Transport{TLSClientConfig: tlsConfig} - return &http.Client{Transport: transport} -} diff --git a/scripts/ci-1.11.docker b/scripts/ci-1.11.docker deleted file mode 100644 index 93f2d64b6..000000000 --- a/scripts/ci-1.11.docker +++ /dev/null @@ -1,28 +0,0 @@ -FROM golang:1.11.13 - -RUN chmod -R 755 "$GOPATH" - -RUN DEBIAN_FRONTEND=noninteractive \ - apt update && apt install -y --no-install-recommends \ - autoconf \ - git \ - libtool \ - locales \ - make \ - python-boto \ - rpm \ - ruby \ - ruby-dev \ - zip && \ - rm -rf /var/lib/apt/lists/* - -RUN ln -sf /usr/share/zoneinfo/Etc/UTC /etc/localtime -RUN locale-gen C.UTF-8 || true -ENV LANG=C.UTF-8 - -RUN gem install fpm - -RUN go get -d github.com/golang/dep && \ - cd src/github.com/golang/dep && \ - git checkout -q v0.5.0 && \ - go install -ldflags="-X main.version=v0.5.0" ./cmd/dep diff --git a/scripts/ci-1.10.docker b/scripts/ci-1.13.docker similarity index 96% rename from scripts/ci-1.10.docker rename to scripts/ci-1.13.docker index 54c30f382..d859850dc 100644 --- a/scripts/ci-1.10.docker +++ b/scripts/ci-1.13.docker @@ -1,4 +1,4 @@ -FROM golang:1.10.8 +FROM golang:1.13.1 RUN chmod -R 755 "$GOPATH" From b5182f4f9aabee52171c84a26db48f0c96188e31 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 7 Oct 2019 22:06:43 -0700 Subject: [PATCH 1237/1815] Update changelog --- CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 43ac797df..2edcf6024 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,9 @@ ## v1.13 [unreleased] +#### Release Notes + +- Official packages are built with Go 1.13.1. + #### New Inputs - [azure_storage_queue](/plugins/inputs/azure_storage_queue/README.md) - Contributed by @mjiderhamn From b9909011104e4c0ceadb0f45ea83b7d09d7f404c Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 14 Oct 2019 15:08:22 -0700 Subject: [PATCH 1238/1815] Revert "Add CLA check GitHub action (#6479)" This reverts commit 8eb8643a3a06974fa2a60af87802ea9455f6f18c. --- .github/workflows/main.yml | 16 ---------------- 1 file changed, 16 deletions(-) delete mode 100644 .github/workflows/main.yml diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml deleted file mode 100644 index d638476cc..000000000 --- a/.github/workflows/main.yml +++ /dev/null @@ -1,16 +0,0 @@ -on: - pull_request: - types: [opened] - -jobs: - cla-checker: - runs-on: ubuntu-latest - name: "Check CLA" - steps: - - name: "Lookup PR Author" - uses: influxdata/clasee@v1 - with: - spreadsheet: "1jnRZYSw83oa6hcEBb1lxK6nNvXrWnOzPT8Bz9iR4Q8s" - range: "Form Responses!E:E" - env: - CLASEE_SECRET: ${{ secrets.CLASEE_SECRET }} From c8f4215ac5f9b4d17d36545cea0ac6f2abfb2002 Mon Sep 17 00:00:00 2001 From: pierwill <19642016+pierwill@users.noreply.github.com> Date: Mon, 14 Oct 2019 15:29:41 -0700 Subject: [PATCH 1239/1815] Document data types should be specified in column order (#6526) --- plugins/parsers/csv/README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/plugins/parsers/csv/README.md b/plugins/parsers/csv/README.md index ec1ffa1ca..1881248ee 100644 --- a/plugins/parsers/csv/README.md +++ b/plugins/parsers/csv/README.md @@ -29,6 +29,7 @@ values. ## For assigning explicit data types to columns. ## Supported types: "int", "float", "bool", "string". + ## Specify types in order by column (e.g. `["string", "int", "float"]`) ## If this is not specified, type conversion will be done on the types above. csv_column_types = [] From 00962783f8d4b3c19425f3c26765c53a625a9438 Mon Sep 17 00:00:00 2001 From: reimda Date: Fri, 18 Oct 2019 13:48:23 -0600 Subject: [PATCH 1240/1815] Add lang parameter to OpenWeathermap input plugin (#6504) --- plugins/inputs/openweathermap/README.md | 26 ++- .../inputs/openweathermap/openweathermap.go | 194 +++++++++------- .../openweathermap/openweathermap_test.go | 211 +++++++++++------- 3 files changed, 269 insertions(+), 162 deletions(-) diff --git a/plugins/inputs/openweathermap/README.md b/plugins/inputs/openweathermap/README.md index d79699049..b2a9d1c0a 100644 --- a/plugins/inputs/openweathermap/README.md +++ b/plugins/inputs/openweathermap/README.md @@ -4,9 +4,11 @@ Collect current weather and forecast data from OpenWeatherMap. To use this plugin you will need an [api key][] (app_id). -City identifiers can be found in the [city list][]. Alternately you can -[search][] by name; the `city_id` can be found as the last digits of the URL: -https://openweathermap.org/city/2643743 +City identifiers can be found in the [city list][]. Alternately you +can [search][] by name; the `city_id` can be found as the last digits +of the URL: https://openweathermap.org/city/2643743. Language +identifiers can be found in the [lang list][]. Documentation for +condition ID, icon, and main is at [weather conditions][]. ### Configuration @@ -18,6 +20,12 @@ https://openweathermap.org/city/2643743 ## City ID's to collect weather data from. city_id = ["5391959"] + ## Language of the description field. Can be one of "ar", "bg", + ## "ca", "cz", "de", "el", "en", "fa", "fi", "fr", "gl", "hr", "hu", + ## "it", "ja", "kr", "la", "lt", "mk", "nl", "pl", "pt", "ro", "ru", + ## "se", "sk", "sl", "es", "tr", "ua", "vi", "zh_cn", "zh_tw" + # lang = "en" + ## APIs to fetch; can contain "weather" or "forecast". fetch = ["weather", "forecast"] @@ -42,6 +50,8 @@ https://openweathermap.org/city/2643743 - tags: - city_id - forecast + - condition_id + - condition_main - fields: - cloudiness (int, percent) - humidity (int, percent) @@ -53,16 +63,20 @@ https://openweathermap.org/city/2643743 - visibility (int, meters, not available on forecast data) - wind_degrees (float, wind direction in degrees) - wind_speed (float, wind speed in meters/sec or miles/sec) + - condition_description (string, localized long description) + - condition_icon ### Example Output ``` -> weather,city=San\ Francisco,city_id=5391959,country=US,forecast=* cloudiness=40i,humidity=72i,pressure=1013,rain=0,sunrise=1559220629000000000i,sunset=1559273058000000000i,temperature=13.31,visibility=16093i,wind_degrees=280,wind_speed=4.6 1559268695000000000 -> weather,city=San\ Francisco,city_id=5391959,country=US,forecast=3h cloudiness=0i,humidity=86i,pressure=1012.03,rain=0,temperature=10.69,wind_degrees=222.855,wind_speed=2.76 1559271600000000000 -> weather,city=San\ Francisco,city_id=5391959,country=US,forecast=6h cloudiness=11i,humidity=93i,pressure=1012.79,rain=0,temperature=9.34,wind_degrees=212.685,wind_speed=1.85 1559282400000000000 +> weather,city=San\ Francisco,city_id=5391959,condition_id=800,condition_main=Clear,country=US,forecast=* cloudiness=1i,condition_description="clear sky",condition_icon="01d",humidity=35i,pressure=1012,rain=0,sunrise=1570630329000000000i,sunset=1570671689000000000i,temperature=21.52,visibility=16093i,wind_degrees=280,wind_speed=5.7 1570659256000000000 +> weather,city=San\ Francisco,city_id=5391959,condition_id=800,condition_main=Clear,country=US,forecast=3h cloudiness=0i,condition_description="clear sky",condition_icon="01n",humidity=41i,pressure=1010,rain=0,temperature=22.34,wind_degrees=249.393,wind_speed=2.085 1570665600000000000 +> weather,city=San\ Francisco,city_id=5391959,condition_id=800,condition_main=Clear,country=US,forecast=6h cloudiness=0i,condition_description="clear sky",condition_icon="01n",humidity=50i,pressure=1012,rain=0,temperature=17.09,wind_degrees=310.754,wind_speed=3.009 1570676400000000000 ``` [api key]: https://openweathermap.org/appid [city list]: http://bulk.openweathermap.org/sample/city.list.json.gz [search]: https://openweathermap.org/find +[lang list]: https://openweathermap.org/current#multi +[weather conditions]: https://openweathermap.org/weather-conditions diff --git a/plugins/inputs/openweathermap/openweathermap.go b/plugins/inputs/openweathermap/openweathermap.go index c15ee3832..079973ddd 100644 --- a/plugins/inputs/openweathermap/openweathermap.go +++ b/plugins/inputs/openweathermap/openweathermap.go @@ -23,20 +23,23 @@ const ( // The limit of locations is 20. owmRequestSeveralCityId int = 20 - defaultBaseURL = "https://api.openweathermap.org/" + defaultBaseUrl = "https://api.openweathermap.org/" defaultResponseTimeout time.Duration = time.Second * 5 defaultUnits string = "metric" + defaultLang string = "en" ) type OpenWeatherMap struct { AppId string `toml:"app_id"` CityId []string `toml:"city_id"` + Lang string `toml:"lang"` Fetch []string `toml:"fetch"` BaseUrl string `toml:"base_url"` ResponseTimeout internal.Duration `toml:"response_timeout"` Units string `toml:"units"` - client *http.Client + client *http.Client + baseUrl *url.URL } var sampleConfig = ` @@ -46,6 +49,12 @@ var sampleConfig = ` ## City ID's to collect weather data from. city_id = ["5391959"] + ## Language of the description field. Can be one of "ar", "bg", + ## "ca", "cz", "de", "el", "en", "fa", "fi", "fr", "gl", "hr", "hu", + ## "it", "ja", "kr", "la", "lt", "mk", "nl", "pl", "pt", "ro", "ru", + ## "se", "sk", "sl", "es", "tr", "ua", "vi", "zh_cn", "zh_tw" + # lang = "en" + ## APIs to fetch; can contain "weather" or "forecast". fetch = ["weather", "forecast"] @@ -76,41 +85,10 @@ func (n *OpenWeatherMap) Gather(acc telegraf.Accumulator) error { var wg sync.WaitGroup var strs []string - base, err := url.Parse(n.BaseUrl) - if err != nil { - return err - } - - // Create an HTTP client that is re-used for each - // collection interval - if n.client == nil { - client, err := n.createHttpClient() - if err != nil { - return err - } - n.client = client - } - - units := n.Units - switch n.Units { - case "imperial", "standard": - break - default: - units = defaultUnits - } - for _, fetch := range n.Fetch { if fetch == "forecast" { - var u *url.URL - for _, city := range n.CityId { - u, err = url.Parse(fmt.Sprintf("/data/2.5/forecast?id=%s&APPID=%s&units=%s", city, n.AppId, units)) - if err != nil { - acc.AddError(fmt.Errorf("unable to parse address '%s': %s", u, err)) - continue - } - - addr := base.ResolveReference(u).String() + addr := n.formatURL("/data/2.5/forecast", city) wg.Add(1) go func() { defer wg.Done() @@ -126,7 +104,6 @@ func (n *OpenWeatherMap) Gather(acc telegraf.Accumulator) error { } else if fetch == "weather" { j := 0 for j < len(n.CityId) { - var u *url.URL strs = make([]string, 0) for i := 0; j < len(n.CityId) && i < owmRequestSeveralCityId; i++ { strs = append(strs, n.CityId[j]) @@ -134,13 +111,7 @@ func (n *OpenWeatherMap) Gather(acc telegraf.Accumulator) error { } cities := strings.Join(strs, ",") - u, err = url.Parse(fmt.Sprintf("/data/2.5/group?id=%s&APPID=%s&units=%s", cities, n.AppId, units)) - if err != nil { - acc.AddError(fmt.Errorf("Unable to parse address '%s': %s", u, err)) - continue - } - - addr := base.ResolveReference(u).String() + addr := n.formatURL("/data/2.5/group", cities) wg.Add(1) go func() { defer wg.Done() @@ -226,6 +197,12 @@ type WeatherEntry struct { Lon float64 `json:"lon"` } `json:"coord"` Visibility int64 `json:"visibility"` + Weather []struct { + ID int64 `json:"id"` + Main string `json:"main"` + Description string `json:"description"` + Icon string `json:"icon"` + } `json:"weather"` } type Status struct { @@ -253,27 +230,34 @@ func gatherWeatherUrl(r io.Reader) (*Status, error) { func gatherWeather(acc telegraf.Accumulator, status *Status) { for _, e := range status.List { tm := time.Unix(e.Dt, 0) - acc.AddFields( - "weather", - map[string]interface{}{ - "cloudiness": e.Clouds.All, - "humidity": e.Main.Humidity, - "pressure": e.Main.Pressure, - "rain": e.Rain.Rain3, - "sunrise": time.Unix(e.Sys.Sunrise, 0).UnixNano(), - "sunset": time.Unix(e.Sys.Sunset, 0).UnixNano(), - "temperature": e.Main.Temp, - "visibility": e.Visibility, - "wind_degrees": e.Wind.Deg, - "wind_speed": e.Wind.Speed, - }, - map[string]string{ - "city": e.Name, - "city_id": strconv.FormatInt(e.Id, 10), - "country": e.Sys.Country, - "forecast": "*", - }, - tm) + + fields := map[string]interface{}{ + "cloudiness": e.Clouds.All, + "humidity": e.Main.Humidity, + "pressure": e.Main.Pressure, + "rain": e.Rain.Rain3, + "sunrise": time.Unix(e.Sys.Sunrise, 0).UnixNano(), + "sunset": time.Unix(e.Sys.Sunset, 0).UnixNano(), + "temperature": e.Main.Temp, + "visibility": e.Visibility, + "wind_degrees": e.Wind.Deg, + "wind_speed": e.Wind.Speed, + } + tags := map[string]string{ + "city": e.Name, + "city_id": strconv.FormatInt(e.Id, 10), + "country": e.Sys.Country, + "forecast": "*", + } + + if len(e.Weather) > 0 { + fields["condition_description"] = e.Weather[0].Description + fields["condition_icon"] = e.Weather[0].Icon + tags["condition_id"] = strconv.FormatInt(e.Weather[0].ID, 10) + tags["condition_main"] = e.Weather[0].Main + } + + acc.AddFields("weather", fields, tags, tm) } } @@ -286,20 +270,23 @@ func gatherForecast(acc telegraf.Accumulator, status *Status) { } for i, e := range status.List { tm := time.Unix(e.Dt, 0) + fields := map[string]interface{}{ + "cloudiness": e.Clouds.All, + "humidity": e.Main.Humidity, + "pressure": e.Main.Pressure, + "rain": e.Rain.Rain3, + "temperature": e.Main.Temp, + "wind_degrees": e.Wind.Deg, + "wind_speed": e.Wind.Speed, + } + if len(e.Weather) > 0 { + fields["condition_description"] = e.Weather[0].Description + fields["condition_icon"] = e.Weather[0].Icon + tags["condition_id"] = strconv.FormatInt(e.Weather[0].ID, 10) + tags["condition_main"] = e.Weather[0].Main + } tags["forecast"] = fmt.Sprintf("%dh", (i+1)*3) - acc.AddFields( - "weather", - map[string]interface{}{ - "cloudiness": e.Clouds.All, - "humidity": e.Main.Humidity, - "pressure": e.Main.Pressure, - "rain": e.Rain.Rain3, - "temperature": e.Main.Temp, - "wind_degrees": e.Wind.Deg, - "wind_speed": e.Wind.Speed, - }, - tags, - tm) + acc.AddFields("weather", fields, tags, tm) } } @@ -310,8 +297,59 @@ func init() { } return &OpenWeatherMap{ ResponseTimeout: tmout, - Units: defaultUnits, - BaseUrl: defaultBaseURL, + BaseUrl: defaultBaseUrl, } }) } + +func (n *OpenWeatherMap) Init() error { + var err error + n.baseUrl, err = url.Parse(n.BaseUrl) + if err != nil { + return err + } + + // Create an HTTP client that is re-used for each + // collection interval + n.client, err = n.createHttpClient() + if err != nil { + return err + } + + switch n.Units { + case "imperial", "standard", "metric": + case "": + n.Units = defaultUnits + default: + return fmt.Errorf("unknown units: %s", n.Units) + } + + switch n.Lang { + case "ar", "bg", "ca", "cz", "de", "el", "en", "fa", "fi", "fr", "gl", + "hr", "hu", "it", "ja", "kr", "la", "lt", "mk", "nl", "pl", + "pt", "ro", "ru", "se", "sk", "sl", "es", "tr", "ua", "vi", + "zh_cn", "zh_tw": + case "": + n.Lang = defaultLang + default: + return fmt.Errorf("unknown language: %s", n.Lang) + } + + return nil +} + +func (n *OpenWeatherMap) formatURL(path string, city string) string { + v := url.Values{ + "id": []string{city}, + "APPID": []string{n.AppId}, + "lang": []string{n.Lang}, + "units": []string{n.Units}, + } + + relative := &url.URL{ + Path: path, + RawQuery: v.Encode(), + } + + return n.baseUrl.ResolveReference(relative).String() +} diff --git a/plugins/inputs/openweathermap/openweathermap_test.go b/plugins/inputs/openweathermap/openweathermap_test.go index d59766dd6..20a00e5db 100644 --- a/plugins/inputs/openweathermap/openweathermap_test.go +++ b/plugins/inputs/openweathermap/openweathermap_test.go @@ -283,6 +283,7 @@ func TestForecastGeneratesMetrics(t *testing.T) { Fetch: []string{"weather", "forecast"}, Units: "metric", } + n.Init() var acc testutil.Accumulator @@ -293,38 +294,46 @@ func TestForecastGeneratesMetrics(t *testing.T) { testutil.MustMetric( "weather", map[string]string{ - "city_id": "2988507", - "forecast": "3h", - "city": "Paris", - "country": "FR", + "city_id": "2988507", + "forecast": "3h", + "city": "Paris", + "country": "FR", + "condition_id": "500", + "condition_main": "Rain", }, map[string]interface{}{ - "cloudiness": int64(88), - "humidity": int64(91), - "pressure": 1018.65, - "temperature": 6.71, - "rain": 0.035, - "wind_degrees": 228.501, - "wind_speed": 3.76, + "cloudiness": int64(88), + "humidity": int64(91), + "pressure": 1018.65, + "temperature": 6.71, + "rain": 0.035, + "wind_degrees": 228.501, + "wind_speed": 3.76, + "condition_description": "light rain", + "condition_icon": "10n", }, time.Unix(1543622400, 0), ), testutil.MustMetric( "weather", map[string]string{ - "city_id": "2988507", - "forecast": "6h", - "city": "Paris", - "country": "FR", + "city_id": "2988507", + "forecast": "6h", + "city": "Paris", + "country": "FR", + "condition_id": "500", + "condition_main": "Rain", }, map[string]interface{}{ - "cloudiness": int64(92), - "humidity": int64(98), - "pressure": 1032.18, - "temperature": 6.38, - "rain": 0.049999999999997, - "wind_degrees": 335.005, - "wind_speed": 2.66, + "cloudiness": int64(92), + "humidity": int64(98), + "pressure": 1032.18, + "temperature": 6.38, + "rain": 0.049999999999997, + "wind_degrees": 335.005, + "wind_speed": 2.66, + "condition_description": "light rain", + "condition_icon": "10n", }, time.Unix(1544043600, 0), ), @@ -358,6 +367,7 @@ func TestWeatherGeneratesMetrics(t *testing.T) { Fetch: []string{"weather"}, Units: "metric", } + n.Init() var acc testutil.Accumulator @@ -368,22 +378,26 @@ func TestWeatherGeneratesMetrics(t *testing.T) { testutil.MustMetric( "weather", map[string]string{ - "city_id": "2988507", - "forecast": "*", - "city": "Paris", - "country": "FR", + "city_id": "2988507", + "forecast": "*", + "city": "Paris", + "country": "FR", + "condition_id": "300", + "condition_main": "Drizzle", }, map[string]interface{}{ - "cloudiness": int64(0), - "humidity": int64(87), - "pressure": 1007.0, - "temperature": 9.25, - "rain": 0.0, - "sunrise": int64(1544167818000000000), - "sunset": int64(1544198047000000000), - "wind_degrees": 290.0, - "wind_speed": 8.7, - "visibility": 10000, + "cloudiness": int64(0), + "humidity": int64(87), + "pressure": 1007.0, + "temperature": 9.25, + "rain": 0.0, + "sunrise": int64(1544167818000000000), + "sunset": int64(1544198047000000000), + "wind_degrees": 290.0, + "wind_speed": 8.7, + "visibility": 10000, + "condition_description": "light intensity drizzle", + "condition_icon": "09d", }, time.Unix(1544194800, 0), ), @@ -414,6 +428,7 @@ func TestBatchWeatherGeneratesMetrics(t *testing.T) { Fetch: []string{"weather"}, Units: "metric", } + n.Init() var acc testutil.Accumulator @@ -424,66 +439,78 @@ func TestBatchWeatherGeneratesMetrics(t *testing.T) { testutil.MustMetric( "weather", map[string]string{ - "city_id": "524901", - "forecast": "*", - "city": "Moscow", - "country": "RU", + "city_id": "524901", + "forecast": "*", + "city": "Moscow", + "country": "RU", + "condition_id": "802", + "condition_main": "Clouds", }, map[string]interface{}{ - "cloudiness": 40, - "humidity": int64(46), - "pressure": 1014.0, - "temperature": 9.57, - "wind_degrees": 60.0, - "wind_speed": 5.0, - "rain": 0.0, - "sunrise": int64(1556416455000000000), - "sunset": int64(1556470779000000000), - "visibility": 10000, + "cloudiness": 40, + "humidity": int64(46), + "pressure": 1014.0, + "temperature": 9.57, + "wind_degrees": 60.0, + "wind_speed": 5.0, + "rain": 0.0, + "sunrise": int64(1556416455000000000), + "sunset": int64(1556470779000000000), + "visibility": 10000, + "condition_description": "scattered clouds", + "condition_icon": "03d", }, time.Unix(1556444155, 0), ), testutil.MustMetric( "weather", map[string]string{ - "city_id": "703448", - "forecast": "*", - "city": "Kiev", - "country": "UA", + "city_id": "703448", + "forecast": "*", + "city": "Kiev", + "country": "UA", + "condition_id": "520", + "condition_main": "Rain", }, map[string]interface{}{ - "cloudiness": 0, - "humidity": int64(63), - "pressure": 1009.0, - "temperature": 19.29, - "wind_degrees": 0.0, - "wind_speed": 1.0, - "rain": 0.0, - "sunrise": int64(1556419155000000000), - "sunset": int64(1556471486000000000), - "visibility": 10000, + "cloudiness": 0, + "humidity": int64(63), + "pressure": 1009.0, + "temperature": 19.29, + "wind_degrees": 0.0, + "wind_speed": 1.0, + "rain": 0.0, + "sunrise": int64(1556419155000000000), + "sunset": int64(1556471486000000000), + "visibility": 10000, + "condition_description": "light intensity shower rain", + "condition_icon": "09d", }, time.Unix(1556444155, 0), ), testutil.MustMetric( "weather", map[string]string{ - "city_id": "2643743", - "forecast": "*", - "city": "London", - "country": "GB", + "city_id": "2643743", + "forecast": "*", + "city": "London", + "country": "GB", + "condition_id": "803", + "condition_main": "Clouds", }, map[string]interface{}{ - "cloudiness": 75, - "humidity": int64(66), - "pressure": 1019.0, - "temperature": 10.62, - "wind_degrees": 290.0, - "wind_speed": 6.2, - "rain": 0.072, - "sunrise": int64(1556426319000000000), - "sunset": int64(1556479032000000000), - "visibility": 10000, + "cloudiness": 75, + "humidity": int64(66), + "pressure": 1019.0, + "temperature": 10.62, + "wind_degrees": 290.0, + "wind_speed": 6.2, + "rain": 0.072, + "sunrise": int64(1556426319000000000), + "sunset": int64(1556479032000000000), + "visibility": 10000, + "condition_description": "broken clouds", + "condition_icon": "04d", }, time.Unix(1556444155, 0), ), @@ -492,3 +519,31 @@ func TestBatchWeatherGeneratesMetrics(t *testing.T) { expected, acc.GetTelegrafMetrics(), testutil.SortMetrics()) } + +func TestFormatURL(t *testing.T) { + n := &OpenWeatherMap{ + AppId: "appid", + Units: "units", + Lang: "lang", + BaseUrl: "http://foo.com", + } + n.Init() + + require.Equal(t, + "http://foo.com/data/2.5/forecast?APPID=appid&id=12345&lang=lang&units=units", + n.formatURL("/data/2.5/forecast", "12345")) +} + +func TestDefaultUnits(t *testing.T) { + n := &OpenWeatherMap{} + n.Init() + + require.Equal(t, "metric", n.Units) +} + +func TestDefaultLang(t *testing.T) { + n := &OpenWeatherMap{} + n.Init() + + require.Equal(t, "en", n.Lang) +} From 59adbe8b3975acf3deb504ef3ced36b267f6dba7 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 18 Oct 2019 12:52:53 -0700 Subject: [PATCH 1241/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2edcf6024..87953a439 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -24,6 +24,7 @@ - [#6465](https://github.com/influxdata/telegraf/pull/6465): Add more performance counter metrics to sqlserver input. - [#6476](https://github.com/influxdata/telegraf/pull/6476): Add millisecond unix time support to grok parser. - [#6473](https://github.com/influxdata/telegraf/pull/6473): Add container id as optional source tag to docker and docker_log input. +- [#6504](https://github.com/influxdata/telegraf/pull/6504): Add lang parameter to OpenWeathermap input plugin. ## v1.12.3 [2019-10-07] From 89c4c1d0247439cc844b073d33272b5aaca32268 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adri=C3=A1n=20L=C3=B3pez?= Date: Mon, 21 Oct 2019 20:59:32 +0200 Subject: [PATCH 1242/1815] Add clone processor (#6529) --- plugins/processors/clone/README.md | 38 ++++++++++++ plugins/processors/clone/clone.go | 60 +++++++++++++++++++ plugins/processors/clone/clone_test.go | 83 ++++++++++++++++++++++++++ 3 files changed, 181 insertions(+) create mode 100644 plugins/processors/clone/README.md create mode 100644 plugins/processors/clone/clone.go create mode 100644 plugins/processors/clone/clone_test.go diff --git a/plugins/processors/clone/README.md b/plugins/processors/clone/README.md new file mode 100644 index 000000000..7ae33d36b --- /dev/null +++ b/plugins/processors/clone/README.md @@ -0,0 +1,38 @@ +# Clone Processor Plugin + +The clone processor plugin create a copy of each metric passing through it, +preserving untouched the original metric and allowing modifications in the +copied one. + +The modifications allowed are the ones supported by input plugins and aggregators: + +* name_override +* name_prefix +* name_suffix +* tags + +Select the metrics to modify using the standard +[measurement filtering](https://github.com/influxdata/telegraf/blob/master/docs/CONFIGURATION.md#measurement-filtering) +options. + +Values of *name_override*, *name_prefix*, *name_suffix* and already present +*tags* with conflicting keys will be overwritten. Absent *tags* will be +created. + +A typical use-case is gathering metrics once and cloning them to simulate +having several hosts (modifying ``host`` tag). + +### Configuration: + +```toml +# Apply metric modifications using override semantics. +[[processors.clone]] + ## All modifications on inputs and aggregators can be overridden: + # name_override = "new_name" + # name_prefix = "new_name_prefix" + # name_suffix = "new_name_suffix" + + ## Tags to be added (all values must be strings) + # [processors.clone.tags] + # additional_tag = "tag_value" +``` diff --git a/plugins/processors/clone/clone.go b/plugins/processors/clone/clone.go new file mode 100644 index 000000000..ad03fd3e4 --- /dev/null +++ b/plugins/processors/clone/clone.go @@ -0,0 +1,60 @@ +package clone + +import ( + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/processors" +) + +var sampleConfig = ` + ## All modifications on inputs and aggregators can be overridden: + # name_override = "new_name" + # name_prefix = "new_name_prefix" + # name_suffix = "new_name_suffix" + + ## Tags to be added (all values must be strings) + # [processors.clone.tags] + # additional_tag = "tag_value" +` + +type Clone struct { + NameOverride string + NamePrefix string + NameSuffix string + Tags map[string]string +} + +func (c *Clone) SampleConfig() string { + return sampleConfig +} + +func (c *Clone) Description() string { + return "Clone metrics and apply modifications." +} + +func (c *Clone) Apply(in ...telegraf.Metric) []telegraf.Metric { + cloned := []telegraf.Metric{} + + for _, metric := range in { + cloned = append(cloned, metric.Copy()) + + if len(c.NameOverride) > 0 { + metric.SetName(c.NameOverride) + } + if len(c.NamePrefix) > 0 { + metric.AddPrefix(c.NamePrefix) + } + if len(c.NameSuffix) > 0 { + metric.AddSuffix(c.NameSuffix) + } + for key, value := range c.Tags { + metric.AddTag(key, value) + } + } + return append(in, cloned...) +} + +func init() { + processors.Add("clone", func() telegraf.Processor { + return &Clone{} + }) +} diff --git a/plugins/processors/clone/clone_test.go b/plugins/processors/clone/clone_test.go new file mode 100644 index 000000000..f1b8dc5b2 --- /dev/null +++ b/plugins/processors/clone/clone_test.go @@ -0,0 +1,83 @@ +package clone + +import ( + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" + "github.com/stretchr/testify/assert" +) + +func createTestMetric() telegraf.Metric { + metric, _ := metric.New("m1", + map[string]string{"metric_tag": "from_metric"}, + map[string]interface{}{"value": int64(1)}, + time.Now(), + ) + return metric +} + +func calculateProcessedTags(processor Clone, metric telegraf.Metric) map[string]string { + processed := processor.Apply(metric) + return processed[0].Tags() +} + +func TestRetainsTags(t *testing.T) { + processor := Clone{} + + tags := calculateProcessedTags(processor, createTestMetric()) + + value, present := tags["metric_tag"] + assert.True(t, present, "Tag of metric was not present") + assert.Equal(t, "from_metric", value, "Value of Tag was changed") +} + +func TestAddTags(t *testing.T) { + processor := Clone{Tags: map[string]string{"added_tag": "from_config", "another_tag": ""}} + + tags := calculateProcessedTags(processor, createTestMetric()) + + value, present := tags["added_tag"] + assert.True(t, present, "Additional Tag of metric was not present") + assert.Equal(t, "from_config", value, "Value of Tag was changed") + assert.Equal(t, 3, len(tags), "Should have one previous and two added tags.") +} + +func TestOverwritesPresentTagValues(t *testing.T) { + processor := Clone{Tags: map[string]string{"metric_tag": "from_config"}} + + tags := calculateProcessedTags(processor, createTestMetric()) + + value, present := tags["metric_tag"] + assert.True(t, present, "Tag of metric was not present") + assert.Equal(t, 1, len(tags), "Should only have one tag.") + assert.Equal(t, "from_config", value, "Value of Tag was not changed") +} + +func TestOverridesName(t *testing.T) { + processor := Clone{NameOverride: "overridden"} + + processed := processor.Apply(createTestMetric()) + + assert.Equal(t, "overridden", processed[0].Name(), "Name was not overridden") + assert.Equal(t, "m1", processed[1].Name(), "Original metric was modified") +} + +func TestNamePrefix(t *testing.T) { + processor := Clone{NamePrefix: "Pre-"} + + processed := processor.Apply(createTestMetric()) + + assert.Equal(t, "Pre-m1", processed[0].Name(), "Prefix was not applied") + assert.Equal(t, "m1", processed[1].Name(), "Original metric was modified") +} + +func TestNameSuffix(t *testing.T) { + processor := Clone{NameSuffix: "-suff"} + + processed := processor.Apply(createTestMetric()) + + assert.Equal(t, "m1-suff", processed[0].Name(), "Suffix was not applied") + assert.Equal(t, "m1", processed[1].Name(), "Original metric was modified") +} From d64f1d2a51f792e3f9137744669a8f8fa569d68b Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 21 Oct 2019 12:03:30 -0700 Subject: [PATCH 1243/1815] Update changelog --- CHANGELOG.md | 4 ++++ README.md | 1 + 2 files changed, 5 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 87953a439..e1830e1ea 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,10 @@ - [azure_storage_queue](/plugins/inputs/azure_storage_queue/README.md) - Contributed by @mjiderhamn - [suricata](/plugins/inputs/suricata/README.md) - Contributed by @satta +#### New Processors + +- [clone](/plugins/processors/clone/README.md) - Contributed by @adrianlzt + #### New Aggregators - [merge](/plugins/aggregators/merge/README.md) - Contributed by @influxdata diff --git a/README.md b/README.md index 01e0cc070..eb35705f2 100644 --- a/README.md +++ b/README.md @@ -335,6 +335,7 @@ For documentation on the latest development code see the [documentation index][d ## Processor Plugins +* [clone](./plugins/processors/clone) * [converter](./plugins/processors/converter) * [date](./plugins/processors/date) * [enum](./plugins/processors/enum) From f22947ee421617db0b027c299cecd5e491f38713 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sebastian=20Th=C3=B6rn?= Date: Mon, 21 Oct 2019 21:06:19 +0200 Subject: [PATCH 1244/1815] Fix link to zookeeper documentation (#6551) --- plugins/inputs/zookeeper/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/inputs/zookeeper/README.md b/plugins/inputs/zookeeper/README.md index d54caae44..c452e8663 100644 --- a/plugins/inputs/zookeeper/README.md +++ b/plugins/inputs/zookeeper/README.md @@ -1,7 +1,7 @@ ## Zookeeper Input Plugin The zookeeper plugin collects variables outputted from the 'mntr' command -[Zookeeper Admin](https://zookeeper.apache.org/doc/trunk/zookeeperAdmin.html). +[Zookeeper Admin](https://zookeeper.apache.org/doc/current/zookeeperAdmin.html). ### Configuration From a1bcc0f87bfc54a09ab5e33afc36fea52551fc89 Mon Sep 17 00:00:00 2001 From: David McKay Date: Mon, 21 Oct 2019 20:10:56 +0100 Subject: [PATCH 1245/1815] Log file not found errors at debug level in tail input (#6540) --- plugins/inputs/tail/tail.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/inputs/tail/tail.go b/plugins/inputs/tail/tail.go index 0b2e2628b..db4d56424 100644 --- a/plugins/inputs/tail/tail.go +++ b/plugins/inputs/tail/tail.go @@ -159,7 +159,7 @@ func (t *Tail) tailNewFiles(fromBeginning bool) error { Logger: tail.DiscardingLogger, }) if err != nil { - t.acc.AddError(err) + t.Log.Debugf("Failed to open file (%s): %v", file, err) continue } From e5baf7de891aa4b4df1d597fb8328719241294db Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 21 Oct 2019 12:11:48 -0700 Subject: [PATCH 1246/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index e1830e1ea..a4cdac1c9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -29,6 +29,7 @@ - [#6476](https://github.com/influxdata/telegraf/pull/6476): Add millisecond unix time support to grok parser. - [#6473](https://github.com/influxdata/telegraf/pull/6473): Add container id as optional source tag to docker and docker_log input. - [#6504](https://github.com/influxdata/telegraf/pull/6504): Add lang parameter to OpenWeathermap input plugin. +- [#6540](https://github.com/influxdata/telegraf/pull/6540): Log file open errors at debug level in tail input. ## v1.12.3 [2019-10-07] From 8ec79513b6d8d205e4698a11badceb69e2155d35 Mon Sep 17 00:00:00 2001 From: Greg <2653109+glinton@users.noreply.github.com> Date: Mon, 21 Oct 2019 15:18:55 -0600 Subject: [PATCH 1247/1815] Add timeout option to cloudwatch input (#6553) --- plugins/inputs/cloudwatch/README.md | 3 ++ plugins/inputs/cloudwatch/cloudwatch.go | 59 ++++++++++++++++--------- 2 files changed, 42 insertions(+), 20 deletions(-) diff --git a/plugins/inputs/cloudwatch/README.md b/plugins/inputs/cloudwatch/README.md index 369eadbc1..3cd098f47 100644 --- a/plugins/inputs/cloudwatch/README.md +++ b/plugins/inputs/cloudwatch/README.md @@ -70,6 +70,9 @@ API endpoint. In the following order the plugin will attempt to authenticate. ## See http://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_limits.html # ratelimit = 25 + ## Timeout for http requests made by the cloudwatch client. + # timeout = "5s" + ## Namespace-wide statistic filters. These allow fewer queries to be made to ## cloudwatch. # statistic_include = [ "average", "sum", "minimum", "maximum", sample_count" ] diff --git a/plugins/inputs/cloudwatch/cloudwatch.go b/plugins/inputs/cloudwatch/cloudwatch.go index 7aad67f5b..5af281cfc 100644 --- a/plugins/inputs/cloudwatch/cloudwatch.go +++ b/plugins/inputs/cloudwatch/cloudwatch.go @@ -3,6 +3,8 @@ package cloudwatch import ( "errors" "fmt" + "net" + "net/http" "strconv" "strings" "sync" @@ -23,16 +25,17 @@ import ( type ( // CloudWatch contains the configuration and cache for the cloudwatch plugin. CloudWatch struct { - Region string `toml:"region"` - AccessKey string `toml:"access_key"` - SecretKey string `toml:"secret_key"` - RoleARN string `toml:"role_arn"` - Profile string `toml:"profile"` - CredentialPath string `toml:"shared_credential_file"` - Token string `toml:"token"` - EndpointURL string `toml:"endpoint_url"` - StatisticExclude []string `toml:"statistic_exclude"` - StatisticInclude []string `toml:"statistic_include"` + Region string `toml:"region"` + AccessKey string `toml:"access_key"` + SecretKey string `toml:"secret_key"` + RoleARN string `toml:"role_arn"` + Profile string `toml:"profile"` + CredentialPath string `toml:"shared_credential_file"` + Token string `toml:"token"` + EndpointURL string `toml:"endpoint_url"` + StatisticExclude []string `toml:"statistic_exclude"` + StatisticInclude []string `toml:"statistic_include"` + Timeout internal.Duration `toml:"timeout"` Period internal.Duration `toml:"period"` Delay internal.Duration `toml:"delay"` @@ -133,6 +136,9 @@ func (c *CloudWatch) SampleConfig() string { ## See http://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_limits.html # ratelimit = 25 + ## Timeout for http requests made by the cloudwatch client. + # timeout = "5s" + ## Namespace-wide statistic filters. These allow fewer queries to be made to ## cloudwatch. # statistic_include = [ "average", "sum", "minimum", "maximum", sample_count" ] @@ -183,10 +189,7 @@ func (c *CloudWatch) Gather(acc telegraf.Accumulator) error { return err } - err = c.updateWindow(time.Now()) - if err != nil { - return err - } + c.updateWindow(time.Now()) // Get all of the possible queries so we can send groups of 100. queries, err := c.getDataQueries(filteredMetrics) @@ -235,7 +238,7 @@ func (c *CloudWatch) Gather(acc telegraf.Accumulator) error { return c.aggregateMetrics(acc, results) } -func (c *CloudWatch) initializeCloudWatch() error { +func (c *CloudWatch) initializeCloudWatch() { credentialConfig := &internalaws.CredentialConfig{ Region: c.Region, AccessKey: c.AccessKey, @@ -248,10 +251,27 @@ func (c *CloudWatch) initializeCloudWatch() error { } configProvider := credentialConfig.Credentials() - cfg := &aws.Config{} + cfg := &aws.Config{ + HTTPClient: &http.Client{ + // use values from DefaultTransport + Transport: &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + }).DialContext, + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + }, + Timeout: c.Timeout.Duration, + }, + } + loglevel := aws.LogOff c.client = cloudwatch.New(configProvider, cfg.WithLogLevel(loglevel)) - return nil } type filteredMetric struct { @@ -370,7 +390,7 @@ func (c *CloudWatch) fetchNamespaceMetrics() ([]*cloudwatch.Metric, error) { return metrics, nil } -func (c *CloudWatch) updateWindow(relativeTo time.Time) error { +func (c *CloudWatch) updateWindow(relativeTo time.Time) { windowEnd := relativeTo.Add(-c.Delay.Duration) if c.windowEnd.IsZero() { @@ -382,8 +402,6 @@ func (c *CloudWatch) updateWindow(relativeTo time.Time) error { } c.windowEnd = windowEnd - - return nil } // getDataQueries gets all of the possible queries so we can maximize the request payload. @@ -535,6 +553,7 @@ func init() { return &CloudWatch{ CacheTTL: internal.Duration{Duration: time.Hour}, RateLimit: 25, + Timeout: internal.Duration{Duration: time.Second * 5}, } }) } From 3802c8b8cbf091a63bbf8b3e0710975666fff083 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 21 Oct 2019 14:19:51 -0700 Subject: [PATCH 1248/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index a4cdac1c9..263880e68 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -30,6 +30,7 @@ - [#6473](https://github.com/influxdata/telegraf/pull/6473): Add container id as optional source tag to docker and docker_log input. - [#6504](https://github.com/influxdata/telegraf/pull/6504): Add lang parameter to OpenWeathermap input plugin. - [#6540](https://github.com/influxdata/telegraf/pull/6540): Log file open errors at debug level in tail input. +- [#6553](https://github.com/influxdata/telegraf/pull/6553): Add timeout option to cloudwatch input. ## v1.12.3 [2019-10-07] From a01d273c45ac64c478c2e460ad03d23b8fcba932 Mon Sep 17 00:00:00 2001 From: Dheeraj Dwivedi Date: Tue, 22 Oct 2019 02:53:36 +0530 Subject: [PATCH 1249/1815] Support custom success codes in http input (#6549) --- etc/telegraf.conf | 3 +++ plugins/inputs/http/README.md | 3 +++ plugins/inputs/http/http.go | 25 +++++++++++++++++++++---- plugins/inputs/http/http_test.go | 24 ++++++++++++++++++++++++ 4 files changed, 51 insertions(+), 4 deletions(-) diff --git a/etc/telegraf.conf b/etc/telegraf.conf index 49edc842f..bab1fb456 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -2604,6 +2604,9 @@ # ## Amount of time allowed to complete the HTTP request # # timeout = "5s" # +# ## List of success status codes +# # success_status_codes = [200] +# # ## Data format to consume. # ## Each data format has its own unique set of configuration options, read # ## more about them here: diff --git a/plugins/inputs/http/README.md b/plugins/inputs/http/README.md index 240fd90c9..9cd136bd0 100644 --- a/plugins/inputs/http/README.md +++ b/plugins/inputs/http/README.md @@ -40,6 +40,9 @@ The HTTP input plugin collects metrics from one or more HTTP(S) endpoints. The ## Amount of time allowed to complete the HTTP request # timeout = "5s" + ## List of success status codes + # success_status_codes = [200] + ## Data format to consume. ## Each data format has its own unique set of configuration options, read ## more about them here: diff --git a/plugins/inputs/http/http.go b/plugins/inputs/http/http.go index 34db9d287..dc155f254 100644 --- a/plugins/inputs/http/http.go +++ b/plugins/inputs/http/http.go @@ -29,6 +29,8 @@ type HTTP struct { Password string `toml:"password"` tls.ClientConfig + SuccessStatusCodes []int `toml:"success_status_codes"` + Timeout internal.Duration `toml:"timeout"` client *http.Client @@ -71,6 +73,9 @@ var sampleConfig = ` ## Amount of time allowed to complete the HTTP request # timeout = "5s" + ## List of success status codes + # success_status_codes = [200] + ## Data format to consume. ## Each data format has its own unique set of configuration options, read ## more about them here: @@ -101,6 +106,11 @@ func (h *HTTP) Init() error { }, Timeout: h.Timeout.Duration, } + + // Set default as [200] + if len(h.SuccessStatusCodes) == 0 { + h.SuccessStatusCodes = []int{200} + } return nil } @@ -171,12 +181,19 @@ func (h *HTTP) gatherURL( } defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - return fmt.Errorf("Received status code %d (%s), expected %d (%s)", + responseHasSuccessCode := false + for _, statusCode := range h.SuccessStatusCodes { + if resp.StatusCode == statusCode { + responseHasSuccessCode = true + break + } + } + + if !responseHasSuccessCode { + return fmt.Errorf("received status code %d (%s), expected any value out of %v", resp.StatusCode, http.StatusText(resp.StatusCode), - http.StatusOK, - http.StatusText(http.StatusOK)) + h.SuccessStatusCodes) } b, err := ioutil.ReadAll(resp.Body) diff --git a/plugins/inputs/http/http_test.go b/plugins/inputs/http/http_test.go index 21eff6265..993eda732 100644 --- a/plugins/inputs/http/http_test.go +++ b/plugins/inputs/http/http_test.go @@ -106,6 +106,30 @@ func TestInvalidStatusCode(t *testing.T) { require.Error(t, acc.GatherError(plugin.Gather)) } +func TestSuccessStatusCodes(t *testing.T) { + fakeServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusAccepted) + })) + defer fakeServer.Close() + + url := fakeServer.URL + "/endpoint" + plugin := &plugin.HTTP{ + URLs: []string{url}, + SuccessStatusCodes: []int{200, 202}, + } + + metricName := "metricName" + p, _ := parsers.NewParser(&parsers.Config{ + DataFormat: "json", + MetricName: metricName, + }) + plugin.SetParser(p) + + var acc testutil.Accumulator + plugin.Init() + require.NoError(t, acc.GatherError(plugin.Gather)) +} + func TestMethod(t *testing.T) { fakeServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.Method == "POST" { From a5ec0b1d16299b3b6fd60c3b914d28996fec0483 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 21 Oct 2019 14:24:33 -0700 Subject: [PATCH 1250/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 263880e68..78af7485a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -31,6 +31,7 @@ - [#6504](https://github.com/influxdata/telegraf/pull/6504): Add lang parameter to OpenWeathermap input plugin. - [#6540](https://github.com/influxdata/telegraf/pull/6540): Log file open errors at debug level in tail input. - [#6553](https://github.com/influxdata/telegraf/pull/6553): Add timeout option to cloudwatch input. +- [#6549](https://github.com/influxdata/telegraf/pull/6549): Support custom success codes in http input. ## v1.12.3 [2019-10-07] From 592ca4ebde07141a4c6bf14a312f6cd045e1ea68 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 21 Oct 2019 14:46:19 -0700 Subject: [PATCH 1251/1815] Update GitHub bug issue template (#6554) --- .github/ISSUE_TEMPLATE/Bug_report.md | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/Bug_report.md b/.github/ISSUE_TEMPLATE/Bug_report.md index 188df248e..ee9a35d4f 100644 --- a/.github/ISSUE_TEMPLATE/Bug_report.md +++ b/.github/ISSUE_TEMPLATE/Bug_report.md @@ -3,26 +3,39 @@ name: Bug report about: Create a report to help us improve --- + ### Relevant telegraf.conf: - + ```toml ``` ### System info: - + ### Steps to reproduce: + + 1. ... 2. ... ### Expected behavior: + + ### Actual behavior: + + ### Additional info: - + From acdfa1be0705c81ffe1c6933acf46ac4c3e3b576 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 21 Oct 2019 21:22:53 -0700 Subject: [PATCH 1252/1815] Show default settings in mysql sample config (#6484) --- plugins/inputs/mysql/README.md | 98 ++++++++++++++++++++-------------- plugins/inputs/mysql/mysql.go | 85 ++++++++++++++++------------- 2 files changed, 104 insertions(+), 79 deletions(-) diff --git a/plugins/inputs/mysql/README.md b/plugins/inputs/mysql/README.md index 564d75e61..af00da03d 100644 --- a/plugins/inputs/mysql/README.md +++ b/plugins/inputs/mysql/README.md @@ -21,10 +21,9 @@ This plugin gathers the statistic data from MySQL server ### Configuration ```toml -# Read metrics from one or many mysql servers [[inputs.mysql]] ## specify servers via a url matching: - ## [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify]] + ## [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify|custom]] ## see https://github.com/go-sql-driver/mysql#dsn-data-source-name ## e.g. ## servers = ["user:passwd@tcp(127.0.0.1:3306)/?tls=false"] @@ -32,60 +31,77 @@ This plugin gathers the statistic data from MySQL server # ## If no servers are specified, then localhost is used as the host. servers = ["tcp(127.0.0.1:3306)/"] - ## the limits for metrics form perf_events_statements - perf_events_statements_digest_text_limit = 120 - perf_events_statements_limit = 250 - perf_events_statements_time_limit = 86400 - # - ## if the list is empty, then metrics are gathered from all database tables - table_schema_databases = [] - # + + ## Selects the metric output format. + ## + ## This option exists to maintain backwards compatibility, if you have + ## existing metrics do not set or change this value until you are ready to + ## migrate to the new format. + ## + ## If you do not have existing metrics from this plugin set to the latest + ## version. + ## + ## Telegraf >=1.6: metric_version = 2 + ## <1.6: metric_version = 1 (or unset) + metric_version = 2 + + ## if the list is empty, then metrics are gathered from all databasee tables + # table_schema_databases = [] + ## gather metrics from INFORMATION_SCHEMA.TABLES for databases provided above list - gather_table_schema = false - # + # gather_table_schema = false + ## gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST - gather_process_list = true - # - ## gather thread state counts from INFORMATION_SCHEMA.USER_STATISTICS - gather_user_statistics = true - # + # gather_process_list = false + + ## gather user statistics from INFORMATION_SCHEMA.USER_STATISTICS + # gather_user_statistics = false + ## gather auto_increment columns and max values from information schema - gather_info_schema_auto_inc = true - # + # gather_info_schema_auto_inc = false + ## gather metrics from INFORMATION_SCHEMA.INNODB_METRICS - gather_innodb_metrics = true - # + # gather_innodb_metrics = false + ## gather metrics from SHOW SLAVE STATUS command output - gather_slave_status = true - # + # gather_slave_status = false + ## gather metrics from SHOW BINARY LOGS command output - gather_binary_logs = false - # + # gather_binary_logs = false + ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE - gather_table_io_waits = false - # + # gather_table_io_waits = false + ## gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS - gather_table_lock_waits = false - # + # gather_table_lock_waits = false + ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_INDEX_USAGE - gather_index_io_waits = false - # + # gather_index_io_waits = false + ## gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS - gather_event_waits = false - # + # gather_event_waits = false + ## gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME - gather_file_events_stats = false - # + # gather_file_events_stats = false + ## gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST - gather_perf_events_statements = false - # + # gather_perf_events_statements = false + + ## the limits for metrics form perf_events_statements + # perf_events_statements_digest_text_limit = 120 + # perf_events_statements_limit = 250 + # perf_events_statements_time_limit = 86400 + ## Some queries we may want to run less often (such as SHOW GLOBAL VARIABLES) - interval_slow = "30m" + ## example: interval_slow = "30m" + # interval_slow = "" ## Optional TLS Config (will be used if tls=custom parameter specified in server uri) - tls_ca = "/etc/telegraf/ca.pem" - tls_cert = "/etc/telegraf/cert.pem" - tls_key = "/etc/telegraf/key.pem" + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false ``` #### Metric Version diff --git a/plugins/inputs/mysql/mysql.go b/plugins/inputs/mysql/mysql.go index 4b6bae1ad..965170301 100644 --- a/plugins/inputs/mysql/mysql.go +++ b/plugins/inputs/mysql/mysql.go @@ -9,12 +9,11 @@ import ( "sync" "time" + "github.com/go-sql-driver/mysql" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal/tls" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/inputs/mysql/v1" - - "github.com/go-sql-driver/mysql" ) type Mysql struct { @@ -68,55 +67,56 @@ const sampleConfig = ` ## <1.6: metric_version = 1 (or unset) metric_version = 2 - ## the limits for metrics form perf_events_statements - perf_events_statements_digest_text_limit = 120 - perf_events_statements_limit = 250 - perf_events_statements_time_limit = 86400 - # ## if the list is empty, then metrics are gathered from all databasee tables - table_schema_databases = [] - # + # table_schema_databases = [] + ## gather metrics from INFORMATION_SCHEMA.TABLES for databases provided above list - gather_table_schema = false - # + # gather_table_schema = false + ## gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST - gather_process_list = true - # + # gather_process_list = false + ## gather user statistics from INFORMATION_SCHEMA.USER_STATISTICS - gather_user_statistics = true - # + # gather_user_statistics = false + ## gather auto_increment columns and max values from information schema - gather_info_schema_auto_inc = true - # + # gather_info_schema_auto_inc = false + ## gather metrics from INFORMATION_SCHEMA.INNODB_METRICS - gather_innodb_metrics = true - # + # gather_innodb_metrics = false + ## gather metrics from SHOW SLAVE STATUS command output - gather_slave_status = true - # + # gather_slave_status = false + ## gather metrics from SHOW BINARY LOGS command output - gather_binary_logs = false - # + # gather_binary_logs = false + ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE - gather_table_io_waits = false - # + # gather_table_io_waits = false + ## gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS - gather_table_lock_waits = false - # + # gather_table_lock_waits = false + ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_INDEX_USAGE - gather_index_io_waits = false - # + # gather_index_io_waits = false + ## gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS - gather_event_waits = false - # + # gather_event_waits = false + ## gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME - gather_file_events_stats = false - # + # gather_file_events_stats = false + ## gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST - gather_perf_events_statements = false - # + # gather_perf_events_statements = false + + ## the limits for metrics form perf_events_statements + # perf_events_statements_digest_text_limit = 120 + # perf_events_statements_limit = 250 + # perf_events_statements_time_limit = 86400 + ## Some queries we may want to run less often (such as SHOW GLOBAL VARIABLES) - interval_slow = "30m" + ## example: interval_slow = "30m" + # interval_slow = "" ## Optional TLS Config (will be used if tls=custom parameter specified in server uri) # tls_ca = "/etc/telegraf/ca.pem" @@ -126,7 +126,12 @@ const sampleConfig = ` # insecure_skip_verify = false ` -const defaultTimeout = time.Second * time.Duration(5) +const ( + defaultTimeout = 5 * time.Second + defaultPerfEventsStatementsDigestTextLimit = 120 + defaultPerfEventsStatementsLimit = 250 + defaultPerfEventsStatementsTimeLimit = 86400 +) func (m *Mysql) SampleConfig() string { return sampleConfig @@ -1734,6 +1739,10 @@ func getDSNTag(dsn string) string { func init() { inputs.Add("mysql", func() telegraf.Input { - return &Mysql{} + return &Mysql{ + PerfEventsStatementsDigestTextLimit: defaultPerfEventsStatementsDigestTextLimit, + PerfEventsStatementsLimit: defaultPerfEventsStatementsLimit, + PerfEventsStatementsTimeLimit: defaultPerfEventsStatementsTimeLimit, + } }) } From 82ba2cd52acfdbb9db468b3d3b4778f2f19facf7 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 21 Oct 2019 21:24:52 -0700 Subject: [PATCH 1253/1815] Update changelog --- CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 78af7485a..e9235d553 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -33,6 +33,10 @@ - [#6553](https://github.com/influxdata/telegraf/pull/6553): Add timeout option to cloudwatch input. - [#6549](https://github.com/influxdata/telegraf/pull/6549): Support custom success codes in http input. +#### Bugfixes + +- [#6484](https://github.com/influxdata/telegraf/issues/6484): Show correct default settings in mysql sample config. + ## v1.12.3 [2019-10-07] #### Bugfixes From 17c4e0b06f183b053477604c5a6f8682c6738773 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 21 Oct 2019 21:27:05 -0700 Subject: [PATCH 1254/1815] Improve ipvs input error strings and logging (#6530) --- Gopkg.lock | 1 + plugins/common/logrus/hook.go | 35 +++++++++++++++++++++++++++++++++++ plugins/inputs/ipvs/ipvs.go | 13 ++++++++----- 3 files changed, 44 insertions(+), 5 deletions(-) create mode 100644 plugins/common/logrus/hook.go diff --git a/Gopkg.lock b/Gopkg.lock index 22520af3a..d1cde9e56 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -1774,6 +1774,7 @@ "github.com/shirou/gopsutil/mem", "github.com/shirou/gopsutil/net", "github.com/shirou/gopsutil/process", + "github.com/sirupsen/logrus", "github.com/soniah/gosnmp", "github.com/streadway/amqp", "github.com/stretchr/testify/assert", diff --git a/plugins/common/logrus/hook.go b/plugins/common/logrus/hook.go new file mode 100644 index 000000000..a7f99023b --- /dev/null +++ b/plugins/common/logrus/hook.go @@ -0,0 +1,35 @@ +package logrus + +import ( + "io/ioutil" + "log" + "strings" + "sync" + + "github.com/sirupsen/logrus" +) + +var once sync.Once + +type LogHook struct { +} + +// Install a logging hook into the logrus standard logger, diverting all logs +// through the Telegraf logger at debug level. This is useful for libraries +// that directly log to the logrus system without providing an override method. +func InstallHook() { + once.Do(func() { + logrus.SetOutput(ioutil.Discard) + logrus.AddHook(&LogHook{}) + }) +} + +func (h *LogHook) Fire(entry *logrus.Entry) error { + msg := strings.ReplaceAll(entry.Message, "\n", " ") + log.Print("D! [logrus] ", msg) + return nil +} + +func (h *LogHook) Levels() []logrus.Level { + return logrus.AllLevels +} diff --git a/plugins/inputs/ipvs/ipvs.go b/plugins/inputs/ipvs/ipvs.go index 4f36b95ce..5e3ae0d56 100644 --- a/plugins/inputs/ipvs/ipvs.go +++ b/plugins/inputs/ipvs/ipvs.go @@ -3,7 +3,6 @@ package ipvs import ( - "errors" "fmt" "math/bits" "strconv" @@ -11,6 +10,7 @@ import ( "github.com/docker/libnetwork/ipvs" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/common/logrus" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -35,7 +35,7 @@ func (i *IPVS) Gather(acc telegraf.Accumulator) error { if i.handle == nil { h, err := ipvs.New("") // TODO: make the namespace configurable if err != nil { - return errors.New("Unable to open IPVS handle") + return fmt.Errorf("unable to open IPVS handle: %v", err) } i.handle = h } @@ -44,7 +44,7 @@ func (i *IPVS) Gather(acc telegraf.Accumulator) error { if err != nil { i.handle.Close() i.handle = nil // trigger a reopen on next call to gather - return errors.New("Failed to list IPVS services") + return fmt.Errorf("failed to list IPVS services: %v", err) } for _, s := range services { fields := map[string]interface{}{ @@ -61,7 +61,7 @@ func (i *IPVS) Gather(acc telegraf.Accumulator) error { destinations, err := i.handle.GetDestinations(s) if err != nil { - i.Log.Errorf("Failed to list destinations for a virtual server: %s", err.Error()) + i.Log.Errorf("Failed to list destinations for a virtual server: %v", err) continue // move on to the next virtual server } @@ -148,5 +148,8 @@ func addressFamilyToString(af uint16) string { } func init() { - inputs.Add("ipvs", func() telegraf.Input { return &IPVS{} }) + inputs.Add("ipvs", func() telegraf.Input { + logrus.InstallHook() + return &IPVS{} + }) } From 82a8057dc1458917d2a54155285b0c63aa2decdb Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 21 Oct 2019 21:30:18 -0700 Subject: [PATCH 1255/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index e9235d553..2ce0eab52 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -32,6 +32,7 @@ - [#6540](https://github.com/influxdata/telegraf/pull/6540): Log file open errors at debug level in tail input. - [#6553](https://github.com/influxdata/telegraf/pull/6553): Add timeout option to cloudwatch input. - [#6549](https://github.com/influxdata/telegraf/pull/6549): Support custom success codes in http input. +- [#6530](https://github.com/influxdata/telegraf/pull/6530): Improve ipvs input error strings and logging. #### Bugfixes From d2f3215890fd0d00fbe7a27624b6bc6af7318f03 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 22 Oct 2019 13:18:02 -0700 Subject: [PATCH 1256/1815] Build with Go 1.13.3 and 1.12.12 (#6565) --- .circleci/config.yml | 4 ++-- Makefile | 8 ++++---- appveyor.yml | 4 ++-- scripts/ci-1.12.docker | 2 +- scripts/ci-1.13.docker | 2 +- 5 files changed, 10 insertions(+), 10 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 50a8080ec..a32bd77a4 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -6,10 +6,10 @@ defaults: GOFLAGS: -p=8 go-1_12: &go-1_12 docker: - - image: 'quay.io/influxdb/telegraf-ci:1.12.10' + - image: 'quay.io/influxdb/telegraf-ci:1.12.12' go-1_13: &go-1_13 docker: - - image: 'quay.io/influxdb/telegraf-ci:1.13.1' + - image: 'quay.io/influxdb/telegraf-ci:1.13.3' version: 2 jobs: diff --git a/Makefile b/Makefile index 0846e73b6..aeae48e4c 100644 --- a/Makefile +++ b/Makefile @@ -131,10 +131,10 @@ plugin-%: .PHONY: ci-1.13 ci-1.13: - docker build -t quay.io/influxdb/telegraf-ci:1.13.1 - < scripts/ci-1.13.docker - docker push quay.io/influxdb/telegraf-ci:1.13.1 + docker build -t quay.io/influxdb/telegraf-ci:1.13.3 - < scripts/ci-1.13.docker + docker push quay.io/influxdb/telegraf-ci:1.13.3 .PHONY: ci-1.12 ci-1.12: - docker build -t quay.io/influxdb/telegraf-ci:1.12.10 - < scripts/ci-1.12.docker - docker push quay.io/influxdb/telegraf-ci:1.12.10 + docker build -t quay.io/influxdb/telegraf-ci:1.12.12 - < scripts/ci-1.12.docker + docker push quay.io/influxdb/telegraf-ci:1.12.12 diff --git a/appveyor.yml b/appveyor.yml index 8197172ba..fba80d46f 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -13,11 +13,11 @@ platform: x64 install: - IF NOT EXIST "C:\Cache" mkdir C:\Cache - - IF NOT EXIST "C:\Cache\go1.13.1.msi" curl -o "C:\Cache\go1.13.1.msi" https://storage.googleapis.com/golang/go1.13.1.windows-amd64.msi + - IF NOT EXIST "C:\Cache\go1.13.3.msi" curl -o "C:\Cache\go1.13.3.msi" https://storage.googleapis.com/golang/go1.13.3.windows-amd64.msi - IF NOT EXIST "C:\Cache\gnuwin32-bin.zip" curl -o "C:\Cache\gnuwin32-bin.zip" https://dl.influxdata.com/telegraf/ci/make-3.81-bin.zip - IF NOT EXIST "C:\Cache\gnuwin32-dep.zip" curl -o "C:\Cache\gnuwin32-dep.zip" https://dl.influxdata.com/telegraf/ci/make-3.81-dep.zip - IF EXIST "C:\Go" rmdir /S /Q C:\Go - - msiexec.exe /i "C:\Cache\go1.13.1.msi" /quiet + - msiexec.exe /i "C:\Cache\go1.13.3.msi" /quiet - 7z x "C:\Cache\gnuwin32-bin.zip" -oC:\GnuWin32 -y - 7z x "C:\Cache\gnuwin32-dep.zip" -oC:\GnuWin32 -y - go get -d github.com/golang/dep diff --git a/scripts/ci-1.12.docker b/scripts/ci-1.12.docker index 0572f4641..f60f49a43 100644 --- a/scripts/ci-1.12.docker +++ b/scripts/ci-1.12.docker @@ -1,4 +1,4 @@ -FROM golang:1.12.10 +FROM golang:1.12.12 RUN chmod -R 755 "$GOPATH" diff --git a/scripts/ci-1.13.docker b/scripts/ci-1.13.docker index d859850dc..c3c9792d2 100644 --- a/scripts/ci-1.13.docker +++ b/scripts/ci-1.13.docker @@ -1,4 +1,4 @@ -FROM golang:1.13.1 +FROM golang:1.13.3 RUN chmod -R 755 "$GOPATH" From d8c8458e1ecd74b6902fb61e96002bd822538d2a Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 22 Oct 2019 13:18:27 -0700 Subject: [PATCH 1257/1815] Update changelog --- CHANGELOG.md | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2ce0eab52..40134cb01 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,7 +2,7 @@ #### Release Notes -- Official packages are built with Go 1.13.1. +- Official packages built with Go 1.13.3. #### New Inputs @@ -38,6 +38,12 @@ - [#6484](https://github.com/influxdata/telegraf/issues/6484): Show correct default settings in mysql sample config. +## v1.12.4 [unreleased] + +#### Release Notes + +- Official packages built with Go 1.12.12. + ## v1.12.3 [2019-10-07] #### Bugfixes From b3f20f69f55e4034cd0bf1dd094ea4a46040ced0 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 22 Oct 2019 13:32:03 -0700 Subject: [PATCH 1258/1815] Default logtarget to file for backwards compatibility (#6555) --- docs/CONFIGURATION.md | 9 ++++++--- internal/config/config.go | 16 +++++++++++++--- 2 files changed, 19 insertions(+), 6 deletions(-) diff --git a/docs/CONFIGURATION.md b/docs/CONFIGURATION.md index 5b3eb5887..75aa1503b 100644 --- a/docs/CONFIGURATION.md +++ b/docs/CONFIGURATION.md @@ -144,11 +144,14 @@ The agent table configures Telegraf and the defaults used across all plugins. Log only error level messages. - **logtarget**: - Log target - `file`, `stderr` or `eventlog` (Windows only). - The empty string means to log to stderr. + Log target controls the destination for logs and can be one of "file", + "stderr" or, on Windows, "eventlog". When set to "file", the output file is + determined by the "logfile" setting. - **logfile**: - Log file name. + Name of the file to be logged to when using the "file" logtarget. If set to + the empty string then logs are written to stderr. + - **logfile_rotation_interval**: The logfile will be rotated after the time interval specified. When set to diff --git a/internal/config/config.go b/internal/config/config.go index f01888499..0d54dc566 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -75,6 +75,7 @@ func NewConfig() *Config { Interval: internal.Duration{Duration: 10 * time.Second}, RoundInterval: true, FlushInterval: internal.Duration{Duration: 10 * time.Second}, + LogTarget: "file", LogfileRotationMaxArchives: 5, }, @@ -146,10 +147,13 @@ type AgentConfig struct { // Quiet is the option for running in quiet mode Quiet bool `toml:"quiet"` - // Log target - file, stderr, eventlog (Windows only). The empty string means to log to stderr. + // Log target controls the destination for logs and can be one of "file", + // "stderr" or, on Windows, "eventlog". When set to "file", the output file + // is determined by the "logfile" setting. LogTarget string `toml:"logtarget"` - // Log file name . + // Name of the file to be logged to when using the "file" logtarget. If set to + // the empty string then logs are written to stderr. Logfile string `toml:"logfile"` // The file will be rotated after the time interval specified. When set @@ -290,7 +294,13 @@ var agentConfig = ` ## Log only error level messages. # quiet = false - ## Log file name, the empty string means to log to stderr. + ## Log target controls the destination for logs and can be one of "file", + ## "stderr" or, on Windows, "eventlog". When set to "file", the output file + ## is determined by the "logfile" setting. + # logtarget = "file" + + ## Name of the file to be logged to when using the "file" logtarget. If set to + ## the empty string then logs are written to stderr. # logfile = "" ## The logfile will be rotated after the time interval specified. When set From b46bb222c4e3de1765f13ec172434578de63da46 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 22 Oct 2019 13:32:58 -0700 Subject: [PATCH 1259/1815] Update godirwalk version (#6557) Version 1.10 and later support Solaris --- Gopkg.lock | 6 +++--- Gopkg.toml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Gopkg.lock b/Gopkg.lock index d1cde9e56..3a064661f 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -758,12 +758,12 @@ revision = "615a14ed75099c9eaac6949e22ac2341bf9d3197" [[projects]] - digest = "1:a12b6f20a7e5eb7412d2e5cd15e1262a021f735fa958d664d9e7ba2160eefd0a" + digest = "1:3e160bec100719bb664ce5192b42e82e66b290397da4c0845aed5ce3cfce60cb" name = "github.com/karrick/godirwalk" packages = ["."] pruneopts = "" - revision = "2de2192f9e35ce981c152a873ed943b93b79ced4" - version = "v1.7.5" + revision = "532e518bccc921708e14b29e16503b1bf5c898cc" + version = "v1.12.0" [[projects]] branch = "master" diff --git a/Gopkg.toml b/Gopkg.toml index 2d545e224..3069cbf40 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -260,7 +260,7 @@ [[constraint]] name = "github.com/karrick/godirwalk" - version = "1.7.5" + version = "1.10" [[override]] name = "github.com/harlow/kinesis-consumer" From c1521b5f6897b772739be605798d1538727c0e14 Mon Sep 17 00:00:00 2001 From: Greg <2653109+glinton@users.noreply.github.com> Date: Tue, 22 Oct 2019 17:46:57 -0600 Subject: [PATCH 1260/1815] Ensure metrics generated are correct in ping plugin using "native" (#6563) --- Gopkg.lock | 6 ++-- plugins/inputs/ping/ping.go | 56 ++++++++++++++++++++++++++++++++----- 2 files changed, 52 insertions(+), 10 deletions(-) diff --git a/Gopkg.lock b/Gopkg.lock index 3a064661f..aef85a69b 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -438,12 +438,12 @@ revision = "25d852aebe32c875e9c044af3eef9c7dc6bc777f" [[projects]] - digest = "1:c6f371f2b02c751a83be83139a12a5467e55393feda16d4f8dfa95adfc4efede" + digest = "1:7a9dc29b3fbc9a6440d98fcff422a2ce1a613975697ea560e3610084234f91ec" name = "github.com/glinton/ping" packages = ["."] pruneopts = "" - revision = "1983bc2fd5de3ea00aa5457bbc8774300e889db9" - version = "v0.1.1" + revision = "d3c0ecf4df108179eccdff2176f4ff569c3aab37" + version = "v0.1.3" [[projects]] digest = "1:df89444601379b2e1ee82bf8e6b72af9901cbeed4b469fa380a519c89c339310" diff --git a/plugins/inputs/ping/ping.go b/plugins/inputs/ping/ping.go index ac0e9ebdf..581d429f7 100644 --- a/plugins/inputs/ping/ping.go +++ b/plugins/inputs/ping/ping.go @@ -3,10 +3,12 @@ package ping import ( "context" "errors" + "log" "math" "net" "os/exec" "runtime" + "strings" "sync" "time" @@ -204,7 +206,11 @@ func (p *Ping) pingToURLNative(destination string, acc telegraf.Accumulator) { host, err := net.ResolveIPAddr(network, destination) if err != nil { - acc.AddFields("ping", map[string]interface{}{"result_code": 1}, map[string]string{"url": destination}) + acc.AddFields( + "ping", + map[string]interface{}{"result_code": 1}, + map[string]string{"url": destination}, + ) acc.AddError(err) return } @@ -243,8 +249,29 @@ func (p *Ping) pingToURLNative(destination string, acc telegraf.Accumulator) { wg := &sync.WaitGroup{} c := ping.Client{} - var i int - for i = 0; i < p.Count; i++ { + var doErr error + var packetsSent int + + type sentReq struct { + err error + sent bool + } + sents := make(chan sentReq) + + r.Add(1) + go func() { + for sent := range sents { + if sent.err != nil { + doErr = sent.err + } + if sent.sent { + packetsSent++ + } + } + r.Done() + }() + + for i := 0; i < p.Count; i++ { select { case <-ctx.Done(): goto finish @@ -260,9 +287,12 @@ func (p *Ping) pingToURLNative(destination string, acc telegraf.Accumulator) { Src: net.ParseIP(p.listenAddr), Seq: seq, }) + + sent := sentReq{err: err, sent: true} if err != nil { - acc.AddFields("ping", map[string]interface{}{"result_code": 2}, map[string]string{"url": destination}) - acc.AddError(err) + if strings.Contains(err.Error(), "not permitted") { + sent.sent = false + } return } @@ -274,13 +304,19 @@ func (p *Ping) pingToURLNative(destination string, acc telegraf.Accumulator) { finish: wg.Wait() close(resps) + close(sents) r.Wait() - tags, fields := onFin(i, rsps, destination) + + if doErr != nil && strings.Contains(doErr.Error(), "not permitted") { + log.Printf("D! [inputs.ping] %s", doErr.Error()) + } + + tags, fields := onFin(packetsSent, rsps, doErr, destination) acc.AddFields("ping", fields, tags) } -func onFin(packetsSent int, resps []*ping.Response, destination string) (map[string]string, map[string]interface{}) { +func onFin(packetsSent int, resps []*ping.Response, err error, destination string) (map[string]string, map[string]interface{}) { packetsRcvd := len(resps) tags := map[string]string{"url": destination} @@ -291,10 +327,16 @@ func onFin(packetsSent int, resps []*ping.Response, destination string) (map[str } if packetsSent == 0 { + if err != nil { + fields["result_code"] = 2 + } return tags, fields } if packetsRcvd == 0 { + if err != nil { + fields["result_code"] = 1 + } fields["percent_packet_loss"] = float64(100) return tags, fields } From 5a6fe149f684f77b53fdd6f17739450afd7cad5b Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 22 Oct 2019 16:50:04 -0700 Subject: [PATCH 1261/1815] Update changelog --- CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 40134cb01..fb5d8563a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -44,6 +44,10 @@ - Official packages built with Go 1.12.12. +#### Bugfixes + +- [#6521](https://github.com/influxdata/telegraf/issues/6521): Fix metric generation with ping input native method. + ## v1.12.3 [2019-10-07] #### Bugfixes From 2397c53d7db18443874079d5aecdac9e7038d06d Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 23 Oct 2019 12:40:31 -0700 Subject: [PATCH 1262/1815] Exclude alias tag if unset from plugin internal stats (#6571) --- internal/models/running_aggregator.go | 16 +++-- internal/models/running_input.go | 15 +++-- internal/models/running_output.go | 15 +++-- internal/models/running_processor.go | 11 ++-- selfstat/selfstat.go | 90 +++++++++++++++++++-------- selfstat/selfstat_test.go | 46 ++++++-------- selfstat/stat.go | 7 --- selfstat/timingStat.go | 7 --- 8 files changed, 118 insertions(+), 89 deletions(-) diff --git a/internal/models/running_aggregator.go b/internal/models/running_aggregator.go index 91a10debb..b8957e30a 100644 --- a/internal/models/running_aggregator.go +++ b/internal/models/running_aggregator.go @@ -24,10 +24,14 @@ type RunningAggregator struct { } func NewRunningAggregator(aggregator telegraf.Aggregator, config *AggregatorConfig) *RunningAggregator { + tags := map[string]string{"aggregator": config.Name} + if config.Alias != "" { + tags["alias"] = config.Alias + } + logger := &Logger{ Name: logName("aggregators", config.Name, config.Alias), - Errs: selfstat.Register("aggregate", "errors", - map[string]string{"input": config.Name, "alias": config.Alias}), + Errs: selfstat.Register("aggregate", "errors", tags), } setLogIfExist(aggregator, logger) @@ -38,22 +42,22 @@ func NewRunningAggregator(aggregator telegraf.Aggregator, config *AggregatorConf MetricsPushed: selfstat.Register( "aggregate", "metrics_pushed", - map[string]string{"aggregator": config.Name, "alias": config.Alias}, + tags, ), MetricsFiltered: selfstat.Register( "aggregate", "metrics_filtered", - map[string]string{"aggregator": config.Name, "alias": config.Alias}, + tags, ), MetricsDropped: selfstat.Register( "aggregate", "metrics_dropped", - map[string]string{"aggregator": config.Name, "alias": config.Alias}, + tags, ), PushTime: selfstat.Register( "aggregate", "push_time_ns", - map[string]string{"aggregator": config.Name, "alias": config.Alias}, + tags, ), log: logger, } diff --git a/internal/models/running_input.go b/internal/models/running_input.go index 85f0afb81..c09fb1409 100644 --- a/internal/models/running_input.go +++ b/internal/models/running_input.go @@ -21,12 +21,15 @@ type RunningInput struct { } func NewRunningInput(input telegraf.Input, config *InputConfig) *RunningInput { - logger := &Logger{ - Name: logName("inputs", config.Name, config.Alias), - Errs: selfstat.Register("gather", "errors", - map[string]string{"input": config.Name, "alias": config.Alias}), + tags := map[string]string{"input": config.Name} + if config.Alias != "" { + tags["alias"] = config.Alias } + logger := &Logger{ + Name: logName("inputs", config.Name, config.Alias), + Errs: selfstat.Register("gather", "errors", tags), + } setLogIfExist(input, logger) return &RunningInput{ @@ -35,12 +38,12 @@ func NewRunningInput(input telegraf.Input, config *InputConfig) *RunningInput { MetricsGathered: selfstat.Register( "gather", "metrics_gathered", - map[string]string{"input": config.Name, "alias": config.Alias}, + tags, ), GatherTime: selfstat.RegisterTiming( "gather", "gather_time_ns", - map[string]string{"input": config.Name, "alias": config.Alias}, + tags, ), log: logger, } diff --git a/internal/models/running_output.go b/internal/models/running_output.go index 282c2d23b..32e9d5ceb 100644 --- a/internal/models/running_output.go +++ b/internal/models/running_output.go @@ -57,12 +57,15 @@ func NewRunningOutput( batchSize int, bufferLimit int, ) *RunningOutput { - logger := &Logger{ - Name: logName("outputs", config.Name, config.Alias), - Errs: selfstat.Register("write", "errors", - map[string]string{"output": config.Name, "alias": config.Alias}), + tags := map[string]string{"output": config.Name} + if config.Alias != "" { + tags["alias"] = config.Alias } + logger := &Logger{ + Name: logName("outputs", config.Name, config.Alias), + Errs: selfstat.Register("write", "errors", tags), + } setLogIfExist(output, logger) if config.MetricBufferLimit > 0 { @@ -88,12 +91,12 @@ func NewRunningOutput( MetricsFiltered: selfstat.Register( "write", "metrics_filtered", - map[string]string{"output": config.Name, "alias": config.Alias}, + tags, ), WriteTime: selfstat.RegisterTiming( "write", "write_time_ns", - map[string]string{"output": config.Name, "alias": config.Alias}, + tags, ), log: logger, } diff --git a/internal/models/running_processor.go b/internal/models/running_processor.go index 5a12716e5..22a7d0198 100644 --- a/internal/models/running_processor.go +++ b/internal/models/running_processor.go @@ -29,12 +29,15 @@ type ProcessorConfig struct { } func NewRunningProcessor(processor telegraf.Processor, config *ProcessorConfig) *RunningProcessor { - logger := &Logger{ - Name: logName("processors", config.Name, config.Alias), - Errs: selfstat.Register("process", "errors", - map[string]string{"input": config.Name, "alias": config.Alias}), + tags := map[string]string{"processor": config.Name} + if config.Alias != "" { + tags["alias"] = config.Alias } + logger := &Logger{ + Name: logName("processors", config.Name, config.Alias), + Errs: selfstat.Register("process", "errors", tags), + } setLogIfExist(processor, logger) return &RunningProcessor{ diff --git a/selfstat/selfstat.go b/selfstat/selfstat.go index 98ecbb4d4..821db1c94 100644 --- a/selfstat/selfstat.go +++ b/selfstat/selfstat.go @@ -32,9 +32,6 @@ type Stat interface { // Tags is a tag map. Each time this is called a new map is allocated. Tags() map[string]string - // Key is the unique measurement+tags key of the stat. - Key() uint64 - // Incr increments a regular stat by 'v'. // in the case of a timing stat, increment adds the timing to the cache. Incr(v int64) @@ -56,11 +53,7 @@ type Stat interface { // The returned Stat can be incremented by the consumer of Register(), and it's // value will be returned as a telegraf metric when Metrics() is called. func Register(measurement, field string, tags map[string]string) Stat { - return registry.register(&stat{ - measurement: "internal_" + measurement, - field: field, - tags: tags, - }) + return registry.register("internal_"+measurement, field, tags) } // RegisterTiming registers the given measurement, field, and tags in the selfstat @@ -80,11 +73,7 @@ func Register(measurement, field string, tags map[string]string) Stat { // The returned Stat can be incremented by the consumer of Register(), and it's // value will be returned as a telegraf metric when Metrics() is called. func RegisterTiming(measurement, field string, tags map[string]string) Stat { - return registry.register(&timingStat{ - measurement: "internal_" + measurement, - field: field, - tags: tags, - }) + return registry.registerTiming("internal_"+measurement, field, tags) } // Metrics returns all registered stats as telegraf metrics. @@ -125,22 +114,71 @@ type rgstry struct { mu sync.Mutex } -func (r *rgstry) register(s Stat) Stat { +func (r *rgstry) register(measurement, field string, tags map[string]string) Stat { r.mu.Lock() defer r.mu.Unlock() - if stats, ok := r.stats[s.Key()]; ok { - // measurement exists - if stat, ok := stats[s.FieldName()]; ok { - // field already exists, so don't create a new one - return stat - } - r.stats[s.Key()][s.FieldName()] = s - return s - } else { - // creating a new unique metric - r.stats[s.Key()] = map[string]Stat{s.FieldName(): s} - return s + + key := key(measurement, tags) + if stat, ok := registry.get(key, field); ok { + return stat } + + t := make(map[string]string, len(tags)) + for k, v := range tags { + t[k] = v + } + + s := &stat{ + measurement: measurement, + field: field, + tags: t, + } + registry.set(key, s) + return s +} + +func (r *rgstry) registerTiming(measurement, field string, tags map[string]string) Stat { + r.mu.Lock() + defer r.mu.Unlock() + + key := key(measurement, tags) + if stat, ok := registry.get(key, field); ok { + return stat + } + + t := make(map[string]string, len(tags)) + for k, v := range tags { + t[k] = v + } + + s := &timingStat{ + measurement: measurement, + field: field, + tags: t, + } + registry.set(key, s) + return s +} + +func (r *rgstry) get(key uint64, field string) (Stat, bool) { + if _, ok := r.stats[key]; !ok { + return nil, false + } + + if stat, ok := r.stats[key][field]; ok { + return stat, true + } + + return nil, false +} + +func (r *rgstry) set(key uint64, s Stat) { + if _, ok := r.stats[key]; !ok { + r.stats[key] = make(map[string]Stat) + } + + r.stats[key][s.FieldName()] = s + return } func key(measurement string, tags map[string]string) uint64 { diff --git a/selfstat/selfstat_test.go b/selfstat/selfstat_test.go index 2de2bd381..10ce32728 100644 --- a/selfstat/selfstat_test.go +++ b/selfstat/selfstat_test.go @@ -5,8 +5,8 @@ import ( "testing" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) var ( @@ -109,32 +109,17 @@ func TestRegisterTimingAndIncrAndSet(t *testing.T) { } func TestStatKeyConsistency(t *testing.T) { - s := &stat{ - measurement: "internal_stat", - field: "myfield", - tags: map[string]string{ - "foo": "bar", - "bar": "baz", - "whose": "first", - }, - } - k := s.Key() - for i := 0; i < 5000; i++ { - // assert that the Key() func doesn't change anything. - assert.Equal(t, k, s.Key()) - - // assert that two identical measurements always produce the same key. - tmp := &stat{ - measurement: "internal_stat", - field: "myfield", - tags: map[string]string{ - "foo": "bar", - "bar": "baz", - "whose": "first", - }, - } - assert.Equal(t, k, tmp.Key()) - } + lhs := key("internal_stats", map[string]string{ + "foo": "bar", + "bar": "baz", + "whose": "first", + }) + rhs := key("internal_stats", map[string]string{ + "foo": "bar", + "bar": "baz", + "whose": "first", + }) + require.Equal(t, lhs, rhs) } func TestRegisterMetricsAndVerify(t *testing.T) { @@ -219,3 +204,10 @@ func TestRegisterMetricsAndVerify(t *testing.T) { }, ) } + +func TestRegisterCopy(t *testing.T) { + tags := map[string]string{"input": "mem", "alias": "mem1"} + stat := Register("gather", "metrics_gathered", tags) + tags["new"] = "value" + require.NotEqual(t, tags, stat.Tags()) +} diff --git a/selfstat/stat.go b/selfstat/stat.go index d7ec60a2b..e1905baf5 100644 --- a/selfstat/stat.go +++ b/selfstat/stat.go @@ -41,10 +41,3 @@ func (s *stat) Tags() map[string]string { } return m } - -func (s *stat) Key() uint64 { - if s.key == 0 { - s.key = key(s.measurement, s.tags) - } - return s.key -} diff --git a/selfstat/timingStat.go b/selfstat/timingStat.go index ef0ee05aa..13f8400bc 100644 --- a/selfstat/timingStat.go +++ b/selfstat/timingStat.go @@ -57,10 +57,3 @@ func (s *timingStat) Tags() map[string]string { } return m } - -func (s *timingStat) Key() uint64 { - if s.key == 0 { - s.key = key(s.measurement, s.tags) - } - return s.key -} From 44ab9b44f83e337fe5583d2be1fb135419388e88 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 23 Oct 2019 12:42:42 -0700 Subject: [PATCH 1263/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index fb5d8563a..42b413fc0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -47,6 +47,7 @@ #### Bugfixes - [#6521](https://github.com/influxdata/telegraf/issues/6521): Fix metric generation with ping input native method. +- [#6541](https://github.com/influxdata/telegraf/issues/6541): Exclude alias tag if unset from plugin internal stats. ## v1.12.3 [2019-10-07] From 504ccc25a7a0af9f059e6684ca64a38408481d6a Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 23 Oct 2019 12:50:54 -0700 Subject: [PATCH 1264/1815] Fix powerdns_recursor socket_mode option (#6572) --- plugins/inputs/powerdns/README.md | 10 +++++++ .../powerdns_recursor/powerdns_recursor.go | 29 +++++++++++++------ .../powerdns_recursor_test.go | 3 ++ 3 files changed, 33 insertions(+), 9 deletions(-) diff --git a/plugins/inputs/powerdns/README.md b/plugins/inputs/powerdns/README.md index 4b1732782..2e245eeff 100644 --- a/plugins/inputs/powerdns/README.md +++ b/plugins/inputs/powerdns/README.md @@ -14,6 +14,16 @@ The powerdns plugin gathers metrics about PowerDNS using unix socket. unix_sockets = ["/var/run/pdns.controlsocket"] ``` +#### Permissions + +Telegraf will need read access to the powerdns control socket. + +On many systems this can be accomplished by adding the `telegraf` user to the +`pdns` group: +``` +usermod telegraf -a -G pdns +``` + ### Measurements & Fields: - powerdns diff --git a/plugins/inputs/powerdns_recursor/powerdns_recursor.go b/plugins/inputs/powerdns_recursor/powerdns_recursor.go index fe6ecb5fe..d040d8355 100644 --- a/plugins/inputs/powerdns_recursor/powerdns_recursor.go +++ b/plugins/inputs/powerdns_recursor/powerdns_recursor.go @@ -18,10 +18,11 @@ import ( ) type PowerdnsRecursor struct { - UnixSockets []string + UnixSockets []string `toml:"unix_sockets"` + SocketDir string `toml:"socket_dir"` + SocketMode string `toml:"socket_mode"` - SocketDir string `toml:"socket_dir"` - SocketMode uint32 `toml:"socket_mode"` + mode uint32 } var defaultTimeout = 5 * time.Second @@ -45,6 +46,18 @@ func (p *PowerdnsRecursor) Description() string { return "Read metrics from one or many PowerDNS Recursor servers" } +func (p *PowerdnsRecursor) Init() error { + if p.SocketMode != "" { + mode, err := strconv.ParseUint(p.SocketMode, 8, 32) + if err != nil { + return fmt.Errorf("could not parse socket_mode: %v", err) + } + + p.mode = uint32(mode) + } + return nil +} + func (p *PowerdnsRecursor) Gather(acc telegraf.Accumulator) error { if len(p.UnixSockets) == 0 { return p.gatherServer("/var/run/pdns_recursor.controlsocket", acc) @@ -79,11 +92,7 @@ func (p *PowerdnsRecursor) gatherServer(address string, acc telegraf.Accumulator if err != nil { return err } - perm := uint32(0666) - if p.SocketMode > 0 { - perm = p.SocketMode - } - if err := os.Chmod(recvSocket, os.FileMode(perm)); err != nil { + if err := os.Chmod(recvSocket, os.FileMode(p.mode)); err != nil { return err } defer conn.Close() @@ -151,6 +160,8 @@ func parseResponse(metrics string) map[string]interface{} { func init() { inputs.Add("powerdns_recursor", func() telegraf.Input { - return &PowerdnsRecursor{} + return &PowerdnsRecursor{ + mode: uint32(0666), + } }) } diff --git a/plugins/inputs/powerdns_recursor/powerdns_recursor_test.go b/plugins/inputs/powerdns_recursor/powerdns_recursor_test.go index 629fe81c8..0ca4daf69 100644 --- a/plugins/inputs/powerdns_recursor/powerdns_recursor_test.go +++ b/plugins/inputs/powerdns_recursor/powerdns_recursor_test.go @@ -139,7 +139,10 @@ func TestPowerdnsRecursorGeneratesMetrics(t *testing.T) { p := &PowerdnsRecursor{ UnixSockets: []string{controlSocket}, SocketDir: "/tmp", + SocketMode: "0666", } + err = p.Init() + require.NoError(t, err) var acc testutil.Accumulator From e3839697b15e6b1231268ffd7d82d4a4d0d04cd2 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 23 Oct 2019 12:52:23 -0700 Subject: [PATCH 1265/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 42b413fc0..70662a2c4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -48,6 +48,7 @@ - [#6521](https://github.com/influxdata/telegraf/issues/6521): Fix metric generation with ping input native method. - [#6541](https://github.com/influxdata/telegraf/issues/6541): Exclude alias tag if unset from plugin internal stats. +- [#6564](https://github.com/influxdata/telegraf/issues/6564): Fix socket_mode option in powerdns_recursor input. ## v1.12.3 [2019-10-07] From 41d6a1a78784d016db845ec8441a7832f27774a0 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 23 Oct 2019 12:56:50 -0700 Subject: [PATCH 1266/1815] Set 1.12.4 release date in changelog --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 70662a2c4..4e8fc6f43 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -38,7 +38,7 @@ - [#6484](https://github.com/influxdata/telegraf/issues/6484): Show correct default settings in mysql sample config. -## v1.12.4 [unreleased] +## v1.12.4 [2019-10-23] #### Release Notes From a9a0d4048a2960fffe0d8fc22abada7ed89b5f8d Mon Sep 17 00:00:00 2001 From: David McKay Date: Wed, 23 Oct 2019 22:06:39 +0100 Subject: [PATCH 1267/1815] Add strict mode to JSON parser (#6536) --- plugins/parsers/json/README.md | 4 ++++ plugins/parsers/json/parser.go | 8 ++++++- plugins/parsers/json/parser_test.go | 36 +++++++++++++++++++++++++++++ plugins/parsers/registry.go | 4 ++++ 4 files changed, 51 insertions(+), 1 deletion(-) diff --git a/plugins/parsers/json/README.md b/plugins/parsers/json/README.md index 08aeef18e..45f4a98c6 100644 --- a/plugins/parsers/json/README.md +++ b/plugins/parsers/json/README.md @@ -18,6 +18,10 @@ ignored unless specified in the `tag_key` or `json_string_fields` options. ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "json" + ## When strict is true and a JSON array is being parsed, all objects within the + ## array must be valid + strict = false + ## Query is a GJSON path that specifies a specific chunk of JSON to be ## parsed, if not specified the whole document will be parsed. ## diff --git a/plugins/parsers/json/parser.go b/plugins/parsers/json/parser.go index fb64997fe..ae8c15c0d 100644 --- a/plugins/parsers/json/parser.go +++ b/plugins/parsers/json/parser.go @@ -32,6 +32,7 @@ type Config struct { TimeFormat string Timezone string DefaultTags map[string]string + Strict bool } type Parser struct { @@ -44,6 +45,7 @@ type Parser struct { timeFormat string timezone string defaultTags map[string]string + strict bool } func New(config *Config) (*Parser, error) { @@ -62,6 +64,7 @@ func New(config *Config) (*Parser, error) { timeFormat: config.TimeFormat, timezone: config.Timezone, defaultTags: config.DefaultTags, + strict: config.Strict, }, nil } @@ -73,7 +76,10 @@ func (p *Parser) parseArray(data []interface{}) ([]telegraf.Metric, error) { case map[string]interface{}: metrics, err := p.parseObject(v) if err != nil { - return nil, err + if p.strict { + return nil, err + } + continue } results = append(results, metrics...) default: diff --git a/plugins/parsers/json/parser_test.go b/plugins/parsers/json/parser_test.go index 44ae73af5..0b9493b40 100644 --- a/plugins/parsers/json/parser_test.go +++ b/plugins/parsers/json/parser_test.go @@ -17,6 +17,7 @@ const ( validJSONArrayMultiple = "[{\"a\": 5, \"b\": {\"c\": 6}}, {\"a\": 7, \"b\": {\"c\": 8}}]" invalidJSON = "I don't think this is JSON" invalidJSON2 = "{\"a\": 5, \"b\": \"c\": 6}}" + mixedValidityJSON = "[{\"a\": 5, \"time\": \"2006-01-02T15:04:05\"}, {\"a\": 2}]" ) const validJSONTags = ` @@ -152,6 +153,41 @@ func TestParseInvalidJSON(t *testing.T) { require.Error(t, err) } +func TestParseJSONImplicitStrictness(t *testing.T) { + parserImplicitNoStrict, err := New(&Config{ + MetricName: "json_test", + TimeKey: "time", + }) + require.NoError(t, err) + + _, err = parserImplicitNoStrict.Parse([]byte(mixedValidityJSON)) + require.NoError(t, err) +} + +func TestParseJSONExplicitStrictnessFalse(t *testing.T) { + parserNoStrict, err := New(&Config{ + MetricName: "json_test", + TimeKey: "time", + Strict: false, + }) + require.NoError(t, err) + + _, err = parserNoStrict.Parse([]byte(mixedValidityJSON)) + require.NoError(t, err) +} + +func TestParseJSONExplicitStrictnessTrue(t *testing.T) { + parserStrict, err := New(&Config{ + MetricName: "json_test", + TimeKey: "time", + Strict: true, + }) + require.NoError(t, err) + + _, err = parserStrict.Parse([]byte(mixedValidityJSON)) + require.Error(t, err) +} + func TestParseWithTagKeys(t *testing.T) { // Test that strings not matching tag keys are ignored parser, err := New(&Config{ diff --git a/plugins/parsers/registry.go b/plugins/parsers/registry.go index 9e4ea2b1f..1c3af2763 100644 --- a/plugins/parsers/registry.go +++ b/plugins/parsers/registry.go @@ -89,6 +89,9 @@ type Config struct { // default timezone JSONTimezone string `toml:"json_timezone"` + // Whether to continue if a JSON object can't be coerced + JSONStrict bool `toml:"json_strict"` + // Authentication file for collectd CollectdAuthFile string `toml:"collectd_auth_file"` // One of none (default), sign, or encrypt @@ -164,6 +167,7 @@ func NewParser(config *Config) (Parser, error) { TimeFormat: config.JSONTimeFormat, Timezone: config.JSONTimezone, DefaultTags: config.DefaultTags, + Strict: config.JSONStrict, }, ) case "value": From 988e03664124062dfe7834c8d3e33702315e40de Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 23 Oct 2019 14:09:28 -0700 Subject: [PATCH 1268/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4e8fc6f43..0c4e26ba7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -33,6 +33,7 @@ - [#6553](https://github.com/influxdata/telegraf/pull/6553): Add timeout option to cloudwatch input. - [#6549](https://github.com/influxdata/telegraf/pull/6549): Support custom success codes in http input. - [#6530](https://github.com/influxdata/telegraf/pull/6530): Improve ipvs input error strings and logging. +- [#6532](https://github.com/influxdata/telegraf/pull/6532): Add strict mode to JSON parser that can be disable to ignore invalid items. #### Bugfixes From 47a708ec99045912d7443cab27ae9b3ecc0c19f0 Mon Sep 17 00:00:00 2001 From: David McKay Date: Wed, 23 Oct 2019 23:35:37 +0100 Subject: [PATCH 1269/1815] Remove usage of deprecated v1beta API endpoints (#6543) --- Gopkg.lock | 6 +- plugins/inputs/kube_inventory/README.md | 99 ++++++++++--------- plugins/inputs/kube_inventory/client.go | 17 ++-- plugins/inputs/kube_inventory/daemonset.go | 4 +- .../inputs/kube_inventory/daemonset_test.go | 12 +-- plugins/inputs/kube_inventory/deployment.go | 5 +- .../inputs/kube_inventory/deployment_test.go | 18 ++-- plugins/inputs/kube_inventory/ingress_test.go | 2 +- plugins/inputs/kube_inventory/statefulset.go | 4 +- .../inputs/kube_inventory/statefulset_test.go | 14 +-- plugins/inputs/kubernetes/README.md | 22 +++-- 11 files changed, 107 insertions(+), 96 deletions(-) diff --git a/Gopkg.lock b/Gopkg.lock index aef85a69b..410b9b284 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -413,8 +413,7 @@ packages = [ ".", "apis/apiextensions/v1beta1", - "apis/apps/v1beta1", - "apis/apps/v1beta2", + "apis/apps/v1", "apis/core/v1", "apis/extensions/v1beta1", "apis/meta/v1", @@ -1712,8 +1711,7 @@ "github.com/docker/libnetwork/ipvs", "github.com/eclipse/paho.mqtt.golang", "github.com/ericchiang/k8s", - "github.com/ericchiang/k8s/apis/apps/v1beta1", - "github.com/ericchiang/k8s/apis/apps/v1beta2", + "github.com/ericchiang/k8s/apis/apps/v1", "github.com/ericchiang/k8s/apis/core/v1", "github.com/ericchiang/k8s/apis/extensions/v1beta1", "github.com/ericchiang/k8s/apis/meta/v1", diff --git a/plugins/inputs/kube_inventory/README.md b/plugins/inputs/kube_inventory/README.md index d24ca95bd..063d03072 100644 --- a/plugins/inputs/kube_inventory/README.md +++ b/plugins/inputs/kube_inventory/README.md @@ -1,17 +1,25 @@ # Kube_Inventory Plugin + This plugin generates metrics derived from the state of the following Kubernetes resources: - - daemonsets - - deployments - - nodes - - persistentvolumes - - persistentvolumeclaims - - pods (containers) - - statefulsets + +- daemonsets +- deployments +- nodes +- persistentvolumes +- persistentvolumeclaims +- pods (containers) +- statefulsets + +Kubernetes is a fast moving project, with a new minor release every 3 months. As +such, we will aim to maintain support only for versions that are supported by +the major cloud providers; this is roughly 4 release / 2 years. + +**This plugin supports Kubernetes 1.11 and later.** #### Series Cardinality Warning This plugin may produce a high number of series which, when not controlled -for, will cause high load on your database. Use the following techniques to +for, will cause high load on your database. Use the following techniques to avoid cardinality issues: - Use [metric filtering][] options to exclude unneeded measurements and tags. @@ -61,6 +69,7 @@ avoid cardinality issues: #### Kubernetes Permissions If using [RBAC authorization](https://kubernetes.io/docs/reference/access-authn-authz/rbac/), you will need to create a cluster role to list "persistentvolumes" and "nodes". You will then need to make an [aggregated ClusterRole](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#aggregated-clusterroles) that will eventually be bound to a user or group. + ```yaml --- kind: ClusterRole @@ -70,9 +79,9 @@ metadata: labels: rbac.authorization.k8s.io/aggregate-view-telegraf: "true" rules: -- apiGroups: [""] - resources: ["persistentvolumes","nodes"] - verbs: ["get","list"] + - apiGroups: [""] + resources: ["persistentvolumes", "nodes"] + verbs: ["get", "list"] --- kind: ClusterRole @@ -81,14 +90,15 @@ metadata: name: influx:telegraf aggregationRule: clusterRoleSelectors: - - matchLabels: - rbac.authorization.k8s.io/aggregate-view-telegraf: "true" - - matchLabels: - rbac.authorization.k8s.io/aggregate-to-view: "true" + - matchLabels: + rbac.authorization.k8s.io/aggregate-view-telegraf: "true" + - matchLabels: + rbac.authorization.k8s.io/aggregate-to-view: "true" rules: [] # Rules are automatically filled in by the controller manager. ``` Bind the newly created aggregated ClusterRole with the following config file, updating the subjects as needed. + ```yaml --- apiVersion: rbac.authorization.k8s.io/v1 @@ -100,15 +110,14 @@ roleRef: kind: ClusterRole name: influx:telegraf subjects: -- kind: ServiceAccount - name: telegraf - namespace: default + - kind: ServiceAccount + name: telegraf + namespace: default ``` - ### Metrics: -+ kubernetes_daemonset +- kubernetes_daemonset - tags: - daemonset_name - namespace @@ -122,7 +131,7 @@ subjects: - number_unavailable - updated_number_scheduled -- kubernetes_deployment +* kubernetes_deployment - tags: - deployment_name - namespace @@ -131,7 +140,7 @@ subjects: - replicas_unavailable - created -+ kubernetes_endpoints +- kubernetes_endpoints - tags: - endpoint_name - namespace @@ -139,14 +148,14 @@ subjects: - node_name - port_name - port_protocol - - kind (*varies) + - kind (\*varies) - fields: - created - generation - ready - port -- kubernetes_ingress +* kubernetes_ingress - tags: - ingress_name - namespace @@ -161,7 +170,7 @@ subjects: - backend_service_port - tls -+ kubernetes_node +- kubernetes_node - tags: - node_name - fields: @@ -172,7 +181,7 @@ subjects: - allocatable_memory_bytes - allocatable_pods -- kubernetes_persistentvolume +* kubernetes_persistentvolume - tags: - pv_name - phase @@ -180,7 +189,7 @@ subjects: - fields: - phase_type (int, [see below](#pv-phase_type)) -+ kubernetes_persistentvolumeclaim +- kubernetes_persistentvolumeclaim - tags: - pvc_name - namespace @@ -189,7 +198,7 @@ subjects: - fields: - phase_type (int, [see below](#pvc-phase_type)) -- kubernetes_pod_container +* kubernetes_pod_container - tags: - container_name - namespace @@ -204,7 +213,7 @@ subjects: - resource_limits_cpu_units - resource_limits_memory_bytes -+ kubernetes_service +- kubernetes_service - tags: - service_name - namespace @@ -218,7 +227,7 @@ subjects: - port - target_port -- kubernetes_statefulset +* kubernetes_statefulset - tags: - statefulset_name - namespace @@ -236,26 +245,25 @@ subjects: The persistentvolume "phase" is saved in the `phase` tag with a correlated numeric field called `phase_type` corresponding with that tag value. -|Tag value |Corresponding field value| ------------|-------------------------| -|bound | 0 | -|failed | 1 | -|pending | 2 | -|released | 3 | -|available | 4 | -|unknown | 5 | +| Tag value | Corresponding field value | +| --------- | ------------------------- | +| bound | 0 | +| failed | 1 | +| pending | 2 | +| released | 3 | +| available | 4 | +| unknown | 5 | #### pvc `phase_type` The persistentvolumeclaim "phase" is saved in the `phase` tag with a correlated numeric field called `phase_type` corresponding with that tag value. -|Tag value |Corresponding field value| ------------|-------------------------| -|bound | 0 | -|lost | 1 | -|pending | 2 | -|unknown | 3 | - +| Tag value | Corresponding field value | +| --------- | ------------------------- | +| bound | 0 | +| lost | 1 | +| pending | 2 | +| unknown | 3 | ### Example Output: @@ -271,7 +279,6 @@ kubernetes_pod_container,container_name=telegraf,namespace=default,node_name=ip- kubernetes_statefulset,namespace=default,statefulset_name=etcd replicas_updated=3i,spec_replicas=3i,observed_generation=1i,created=1544101669000000000i,generation=1i,replicas=3i,replicas_current=3i,replicas_ready=3i 1547597616000000000 ``` - [metric filtering]: https://github.com/influxdata/telegraf/blob/master/docs/CONFIGURATION.md#metric-filtering [retention policy]: https://docs.influxdata.com/influxdb/latest/guides/downsampling_and_retention/ [max-series-per-database]: https://docs.influxdata.com/influxdb/latest/administration/config/#max-series-per-database-1000000 diff --git a/plugins/inputs/kube_inventory/client.go b/plugins/inputs/kube_inventory/client.go index 5bb2baf5c..d16428c40 100644 --- a/plugins/inputs/kube_inventory/client.go +++ b/plugins/inputs/kube_inventory/client.go @@ -5,9 +5,8 @@ import ( "time" "github.com/ericchiang/k8s" - "github.com/ericchiang/k8s/apis/apps/v1beta1" - "github.com/ericchiang/k8s/apis/apps/v1beta2" - "github.com/ericchiang/k8s/apis/core/v1" + v1APPS "github.com/ericchiang/k8s/apis/apps/v1" + v1 "github.com/ericchiang/k8s/apis/core/v1" v1beta1EXT "github.com/ericchiang/k8s/apis/extensions/v1beta1" "github.com/influxdata/telegraf/internal/tls" @@ -48,15 +47,15 @@ func newClient(baseURL, namespace, bearerToken string, timeout time.Duration, tl }, nil } -func (c *client) getDaemonSets(ctx context.Context) (*v1beta2.DaemonSetList, error) { - list := new(v1beta2.DaemonSetList) +func (c *client) getDaemonSets(ctx context.Context) (*v1APPS.DaemonSetList, error) { + list := new(v1APPS.DaemonSetList) ctx, cancel := context.WithTimeout(ctx, c.timeout) defer cancel() return list, c.List(ctx, c.namespace, list) } -func (c *client) getDeployments(ctx context.Context) (*v1beta1.DeploymentList, error) { - list := &v1beta1.DeploymentList{} +func (c *client) getDeployments(ctx context.Context) (*v1APPS.DeploymentList, error) { + list := &v1APPS.DeploymentList{} ctx, cancel := context.WithTimeout(ctx, c.timeout) defer cancel() return list, c.List(ctx, c.namespace, list) @@ -111,8 +110,8 @@ func (c *client) getServices(ctx context.Context) (*v1.ServiceList, error) { return list, c.List(ctx, c.namespace, list) } -func (c *client) getStatefulSets(ctx context.Context) (*v1beta1.StatefulSetList, error) { - list := new(v1beta1.StatefulSetList) +func (c *client) getStatefulSets(ctx context.Context) (*v1APPS.StatefulSetList, error) { + list := new(v1APPS.StatefulSetList) ctx, cancel := context.WithTimeout(ctx, c.timeout) defer cancel() return list, c.List(ctx, c.namespace, list) diff --git a/plugins/inputs/kube_inventory/daemonset.go b/plugins/inputs/kube_inventory/daemonset.go index 92c7bc195..15df586d6 100644 --- a/plugins/inputs/kube_inventory/daemonset.go +++ b/plugins/inputs/kube_inventory/daemonset.go @@ -4,7 +4,7 @@ import ( "context" "time" - "github.com/ericchiang/k8s/apis/apps/v1beta2" + "github.com/ericchiang/k8s/apis/apps/v1" "github.com/influxdata/telegraf" ) @@ -23,7 +23,7 @@ func collectDaemonSets(ctx context.Context, acc telegraf.Accumulator, ki *Kubern } } -func (ki *KubernetesInventory) gatherDaemonSet(d v1beta2.DaemonSet, acc telegraf.Accumulator) error { +func (ki *KubernetesInventory) gatherDaemonSet(d v1.DaemonSet, acc telegraf.Accumulator) error { fields := map[string]interface{}{ "generation": d.Metadata.GetGeneration(), "current_number_scheduled": d.Status.GetCurrentNumberScheduled(), diff --git a/plugins/inputs/kube_inventory/daemonset_test.go b/plugins/inputs/kube_inventory/daemonset_test.go index 3f11df1ca..bf4e934d3 100644 --- a/plugins/inputs/kube_inventory/daemonset_test.go +++ b/plugins/inputs/kube_inventory/daemonset_test.go @@ -4,7 +4,7 @@ import ( "testing" "time" - "github.com/ericchiang/k8s/apis/apps/v1beta2" + "github.com/ericchiang/k8s/apis/apps/v1" metav1 "github.com/ericchiang/k8s/apis/meta/v1" "github.com/influxdata/telegraf/testutil" @@ -24,7 +24,7 @@ func TestDaemonSet(t *testing.T) { name: "no daemon set", handler: &mockHandler{ responseMap: map[string]interface{}{ - "/daemonsets/": &v1beta2.DaemonSetList{}, + "/daemonsets/": &v1.DaemonSetList{}, }, }, hasError: false, @@ -33,10 +33,10 @@ func TestDaemonSet(t *testing.T) { name: "collect daemonsets", handler: &mockHandler{ responseMap: map[string]interface{}{ - "/daemonsets/": &v1beta2.DaemonSetList{ - Items: []*v1beta2.DaemonSet{ + "/daemonsets/": &v1.DaemonSetList{ + Items: []*v1.DaemonSet{ { - Status: &v1beta2.DaemonSetStatus{ + Status: &v1.DaemonSetStatus{ CurrentNumberScheduled: toInt32Ptr(3), DesiredNumberScheduled: toInt32Ptr(5), NumberAvailable: toInt32Ptr(2), @@ -90,7 +90,7 @@ func TestDaemonSet(t *testing.T) { client: cli, } acc := new(testutil.Accumulator) - for _, dset := range ((v.handler.responseMap["/daemonsets/"]).(*v1beta2.DaemonSetList)).Items { + for _, dset := range ((v.handler.responseMap["/daemonsets/"]).(*v1.DaemonSetList)).Items { err := ks.gatherDaemonSet(*dset, acc) if err != nil { t.Errorf("Failed to gather daemonset - %s", err.Error()) diff --git a/plugins/inputs/kube_inventory/deployment.go b/plugins/inputs/kube_inventory/deployment.go index 2d72e8d03..5a0eb0b19 100644 --- a/plugins/inputs/kube_inventory/deployment.go +++ b/plugins/inputs/kube_inventory/deployment.go @@ -4,8 +4,7 @@ import ( "context" "time" - "github.com/ericchiang/k8s/apis/apps/v1beta1" - + v1 "github.com/ericchiang/k8s/apis/apps/v1" "github.com/influxdata/telegraf" ) @@ -23,7 +22,7 @@ func collectDeployments(ctx context.Context, acc telegraf.Accumulator, ki *Kuber } } -func (ki *KubernetesInventory) gatherDeployment(d v1beta1.Deployment, acc telegraf.Accumulator) error { +func (ki *KubernetesInventory) gatherDeployment(d v1.Deployment, acc telegraf.Accumulator) error { fields := map[string]interface{}{ "replicas_available": d.Status.GetAvailableReplicas(), "replicas_unavailable": d.Status.GetUnavailableReplicas(), diff --git a/plugins/inputs/kube_inventory/deployment_test.go b/plugins/inputs/kube_inventory/deployment_test.go index 0429b84fa..21b7bfd02 100644 --- a/plugins/inputs/kube_inventory/deployment_test.go +++ b/plugins/inputs/kube_inventory/deployment_test.go @@ -4,7 +4,7 @@ import ( "testing" "time" - "github.com/ericchiang/k8s/apis/apps/v1beta1" + "github.com/ericchiang/k8s/apis/apps/v1" metav1 "github.com/ericchiang/k8s/apis/meta/v1" "github.com/ericchiang/k8s/util/intstr" "github.com/influxdata/telegraf/testutil" @@ -37,7 +37,7 @@ func TestDeployment(t *testing.T) { name: "no deployments", handler: &mockHandler{ responseMap: map[string]interface{}{ - "/deployments/": &v1beta1.DeploymentList{}, + "/deployments/": &v1.DeploymentList{}, }, }, hasError: false, @@ -46,19 +46,19 @@ func TestDeployment(t *testing.T) { name: "collect deployments", handler: &mockHandler{ responseMap: map[string]interface{}{ - "/deployments/": &v1beta1.DeploymentList{ - Items: []*v1beta1.Deployment{ + "/deployments/": &v1.DeploymentList{ + Items: []*v1.Deployment{ { - Status: &v1beta1.DeploymentStatus{ + Status: &v1.DeploymentStatus{ Replicas: toInt32Ptr(3), AvailableReplicas: toInt32Ptr(1), UnavailableReplicas: toInt32Ptr(4), UpdatedReplicas: toInt32Ptr(2), ObservedGeneration: toInt64Ptr(9121), }, - Spec: &v1beta1.DeploymentSpec{ - Strategy: &v1beta1.DeploymentStrategy{ - RollingUpdate: &v1beta1.RollingUpdateDeployment{ + Spec: &v1.DeploymentSpec{ + Strategy: &v1.DeploymentStrategy{ + RollingUpdate: &v1.RollingUpdateDeployment{ MaxUnavailable: &intstr.IntOrString{ IntVal: toInt32Ptr(30), }, @@ -98,7 +98,7 @@ func TestDeployment(t *testing.T) { client: cli, } acc := new(testutil.Accumulator) - for _, deployment := range ((v.handler.responseMap["/deployments/"]).(*v1beta1.DeploymentList)).Items { + for _, deployment := range ((v.handler.responseMap["/deployments/"]).(*v1.DeploymentList)).Items { err := ks.gatherDeployment(*deployment, acc) if err != nil { t.Errorf("Failed to gather deployment - %s", err.Error()) diff --git a/plugins/inputs/kube_inventory/ingress_test.go b/plugins/inputs/kube_inventory/ingress_test.go index e3b44512c..2d111801a 100644 --- a/plugins/inputs/kube_inventory/ingress_test.go +++ b/plugins/inputs/kube_inventory/ingress_test.go @@ -4,7 +4,7 @@ import ( "testing" "time" - "github.com/ericchiang/k8s/apis/core/v1" + v1 "github.com/ericchiang/k8s/apis/core/v1" v1beta1EXT "github.com/ericchiang/k8s/apis/extensions/v1beta1" metav1 "github.com/ericchiang/k8s/apis/meta/v1" "github.com/influxdata/telegraf/testutil" diff --git a/plugins/inputs/kube_inventory/statefulset.go b/plugins/inputs/kube_inventory/statefulset.go index 407aaac2f..c95e566c2 100644 --- a/plugins/inputs/kube_inventory/statefulset.go +++ b/plugins/inputs/kube_inventory/statefulset.go @@ -4,7 +4,7 @@ import ( "context" "time" - "github.com/ericchiang/k8s/apis/apps/v1beta1" + "github.com/ericchiang/k8s/apis/apps/v1" "github.com/influxdata/telegraf" ) @@ -23,7 +23,7 @@ func collectStatefulSets(ctx context.Context, acc telegraf.Accumulator, ki *Kube } } -func (ki *KubernetesInventory) gatherStatefulSet(s v1beta1.StatefulSet, acc telegraf.Accumulator) error { +func (ki *KubernetesInventory) gatherStatefulSet(s v1.StatefulSet, acc telegraf.Accumulator) error { status := s.Status fields := map[string]interface{}{ "created": time.Unix(s.Metadata.CreationTimestamp.GetSeconds(), int64(s.Metadata.CreationTimestamp.GetNanos())).UnixNano(), diff --git a/plugins/inputs/kube_inventory/statefulset_test.go b/plugins/inputs/kube_inventory/statefulset_test.go index 6e94ad150..1a971b7b6 100644 --- a/plugins/inputs/kube_inventory/statefulset_test.go +++ b/plugins/inputs/kube_inventory/statefulset_test.go @@ -4,7 +4,7 @@ import ( "testing" "time" - "github.com/ericchiang/k8s/apis/apps/v1beta1" + "github.com/ericchiang/k8s/apis/apps/v1" metav1 "github.com/ericchiang/k8s/apis/meta/v1" "github.com/influxdata/telegraf/testutil" @@ -24,7 +24,7 @@ func TestStatefulSet(t *testing.T) { name: "no statefulsets", handler: &mockHandler{ responseMap: map[string]interface{}{ - "/statefulsets/": &v1beta1.StatefulSetList{}, + "/statefulsets/": &v1.StatefulSetList{}, }, }, hasError: false, @@ -33,17 +33,17 @@ func TestStatefulSet(t *testing.T) { name: "collect statefulsets", handler: &mockHandler{ responseMap: map[string]interface{}{ - "/statefulsets/": &v1beta1.StatefulSetList{ - Items: []*v1beta1.StatefulSet{ + "/statefulsets/": &v1.StatefulSetList{ + Items: []*v1.StatefulSet{ { - Status: &v1beta1.StatefulSetStatus{ + Status: &v1.StatefulSetStatus{ Replicas: toInt32Ptr(2), CurrentReplicas: toInt32Ptr(4), ReadyReplicas: toInt32Ptr(1), UpdatedReplicas: toInt32Ptr(3), ObservedGeneration: toInt64Ptr(119), }, - Spec: &v1beta1.StatefulSetSpec{ + Spec: &v1.StatefulSetSpec{ Replicas: toInt32Ptr(3), }, Metadata: &metav1.ObjectMeta{ @@ -90,7 +90,7 @@ func TestStatefulSet(t *testing.T) { client: cli, } acc := new(testutil.Accumulator) - for _, ss := range ((v.handler.responseMap["/statefulsets/"]).(*v1beta1.StatefulSetList)).Items { + for _, ss := range ((v.handler.responseMap["/statefulsets/"]).(*v1.StatefulSetList)).Items { err := ks.gatherStatefulSet(*ss, acc) if err != nil { t.Errorf("Failed to gather ss - %s", err.Error()) diff --git a/plugins/inputs/kubernetes/README.md b/plugins/inputs/kubernetes/README.md index d53d94e97..9aa5f17c1 100644 --- a/plugins/inputs/kubernetes/README.md +++ b/plugins/inputs/kubernetes/README.md @@ -3,15 +3,23 @@ This input plugin talks to the kubelet api using the `/stats/summary` endpoint to gather metrics about the running pods and containers for a single host. It is assumed that this plugin is running as part of a `daemonset` within a kubernetes installation. This means that telegraf is running on every node within the cluster. Therefore, you should configure this plugin to talk to its locally running kubelet. To find the ip address of the host you are running on you can issue a command like the following: + ``` $ curl -s $API_URL/api/v1/namespaces/$POD_NAMESPACE/pods/$HOSTNAME --header "Authorization: Bearer $TOKEN" --insecure | jq -r '.status.hostIP' ``` + In this case we used the downward API to pass in the `$POD_NAMESPACE` and `$HOSTNAME` is the hostname of the pod which is set by the kubernetes API. +Kubernetes is a fast moving project, with a new minor release every 3 months. As +such, we will aim to maintain support only for versions that are supported by +the major cloud providers; this is roughly 4 release / 2 years. + +**This plugin supports Kubernetes 1.11 and later.** + #### Series Cardinality Warning This plugin may produce a high number of series which, when not controlled -for, will cause high load on your database. Use the following techniques to +for, will cause high load on your database. Use the following techniques to avoid cardinality issues: - Use [metric filtering][] options to exclude unneeded measurements and tags. @@ -80,7 +88,7 @@ Architecture][k8s-telegraf] or view the Helm charts: - runtime_image_fs_capacity_bytes - runtime_image_fs_used_bytes -+ kubernetes_pod_container +* kubernetes_pod_container - tags: - container_name - namespace @@ -112,7 +120,7 @@ Architecture][k8s-telegraf] or view the Helm charts: - capacity_bytes - used_bytes -+ kubernetes_pod_network +* kubernetes_pod_network - tags: - namespace - node_name @@ -141,7 +149,7 @@ kubernetes_system_container [series cardinality]: https://docs.influxdata.com/influxdb/latest/query_language/spec/#show-cardinality [influx-docs]: https://docs.influxdata.com/influxdb/latest/ [k8s-telegraf]: https://www.influxdata.com/blog/monitoring-kubernetes-architecture/ -[Telegraf]: https://github.com/helm/charts/tree/master/stable/telegraf -[InfluxDB]: https://github.com/helm/charts/tree/master/stable/influxdb -[Chronograf]: https://github.com/helm/charts/tree/master/stable/chronograf -[Kapacitor]: https://github.com/helm/charts/tree/master/stable/kapacitor +[telegraf]: https://github.com/helm/charts/tree/master/stable/telegraf +[influxdb]: https://github.com/helm/charts/tree/master/stable/influxdb +[chronograf]: https://github.com/helm/charts/tree/master/stable/chronograf +[kapacitor]: https://github.com/helm/charts/tree/master/stable/kapacitor From 1761e25c96824ae6b8a39ff5a8f30213d83b217b Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 23 Oct 2019 15:37:29 -0700 Subject: [PATCH 1270/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0c4e26ba7..8f0f60167 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -34,6 +34,7 @@ - [#6549](https://github.com/influxdata/telegraf/pull/6549): Support custom success codes in http input. - [#6530](https://github.com/influxdata/telegraf/pull/6530): Improve ipvs input error strings and logging. - [#6532](https://github.com/influxdata/telegraf/pull/6532): Add strict mode to JSON parser that can be disable to ignore invalid items. +- [#6543](https://github.com/influxdata/telegraf/pull/6543): Add support for Kubernetes 1.16 and remove deprecated API usage. #### Bugfixes From 8b3a8d1113d5be01ce4df7ab16692f1d6457ab74 Mon Sep 17 00:00:00 2001 From: Jacques Heunis Date: Thu, 24 Oct 2019 02:08:19 +0200 Subject: [PATCH 1271/1815] Add gathering of RabbitMQ federation link metrics (#6283) --- plugins/inputs/rabbitmq/README.md | 25 ++++ plugins/inputs/rabbitmq/rabbitmq.go | 133 +++++++++++++++++- plugins/inputs/rabbitmq/rabbitmq_test.go | 14 ++ .../rabbitmq/testdata/federation-links.json | 63 +++++++++ 4 files changed, 228 insertions(+), 7 deletions(-) create mode 100644 plugins/inputs/rabbitmq/testdata/federation-links.json diff --git a/plugins/inputs/rabbitmq/README.md b/plugins/inputs/rabbitmq/README.md index d52a760f2..7ce4229f7 100644 --- a/plugins/inputs/rabbitmq/README.md +++ b/plugins/inputs/rabbitmq/README.md @@ -48,6 +48,13 @@ For additional details reference the [RabbitMQ Management HTTP Stats][management ## specified, metrics for all exchanges are gathered. # exchanges = ["telegraf"] + ## A list of federation upstreams to gather as the rabbitmq_federation measurement. + ## If not specified, metrics for all federation upstreams are gathered. + ## Federation link metrics will only be gathered for queues and exchanges + ## whose non-federation metrics will be collected (e.g a queue excluded + ## by the 'queue_name_exclude' option will also be excluded from federation). + # federation_upstreams = ["dataCentre2"] + ## Queues to include and exclude. Globs accepted. ## Note that an empty array for both will include all queues # queue_name_include = [] @@ -158,6 +165,16 @@ For additional details reference the [RabbitMQ Management HTTP Stats][management - messages_publish_out (int, count) - messages_publish_out_rate (int, messages per second) +- rabbitmq_federation + - acks_uncommitted (int, count) + - consumers (int, count) + - messages_unacknowledged (int, count) + - messages_uncommitted (int, count) + - messages_unconfirmed (int, count) + - messages_confirm (int, count) + - messages_publish (int, count) + - messages_return_unroutable (int, count) + ### Tags: - All measurements have the following tags: @@ -187,6 +204,14 @@ For additional details reference the [RabbitMQ Management HTTP Stats][management - durable - auto_delete +- rabbitmq_federation + - url + - vhost + - type + - upstream + - local_entity + - upstream_entity + ### Sample Queries: Message rates for the entire node can be calculated from total message counts. For instance, to get the rate of messages published per minute, use this query: diff --git a/plugins/inputs/rabbitmq/rabbitmq.go b/plugins/inputs/rabbitmq/rabbitmq.go index 168a340b0..acbba6e2a 100644 --- a/plugins/inputs/rabbitmq/rabbitmq.go +++ b/plugins/inputs/rabbitmq/rabbitmq.go @@ -47,14 +47,17 @@ type RabbitMQ struct { Queues []string Exchanges []string - QueueInclude []string `toml:"queue_name_include"` - QueueExclude []string `toml:"queue_name_exclude"` + QueueInclude []string `toml:"queue_name_include"` + QueueExclude []string `toml:"queue_name_exclude"` + FederationUpstreamInclude []string `toml:"federation_upstream_include"` + FederationUpstreamExclude []string `toml:"federation_upstream_exclude"` Client *http.Client filterCreated bool excludeEveryQueue bool queueFilter filter.Filter + upstreamFilter filter.Filter } // OverviewResponse ... @@ -178,6 +181,38 @@ type Exchange struct { AutoDelete bool `json:"auto_delete"` } +// FederationLinkChannelMessageStats ... +type FederationLinkChannelMessageStats struct { + Confirm int64 `json:"confirm"` + ConfirmDetails Details `json:"confirm_details"` + Publish int64 `json:"publish"` + PublishDetails Details `json:"publish_details"` + ReturnUnroutable int64 `json:"return_unroutable"` + ReturnUnroutableDetails Details `json:"return_unroutable_details"` +} + +// FederationLinkChannel ... +type FederationLinkChannel struct { + AcksUncommitted int64 `json:"acks_uncommitted"` + ConsumerCount int64 `json:"consumer_count"` + MessagesUnacknowledged int64 `json:"messages_unacknowledged"` + MessagesUncommitted int64 `json:"messages_uncommitted"` + MessagesUnconfirmed int64 `json:"messages_unconfirmed"` + MessageStats FederationLinkChannelMessageStats `json:"message_stats"` +} + +// FederationLink ... +type FederationLink struct { + Type string `json:"type"` + Queue string `json:"queue"` + UpstreamQueue string `json:"upstream_queue"` + Exchange string `json:"exchange"` + UpstreamExchange string `json:"upstream_exchange"` + Vhost string `json:"vhost"` + Upstream string `json:"upstream"` + LocalChannel FederationLinkChannel `json:"local_channel"` +} + type HealthCheck struct { Status string `json:"status"` } @@ -214,7 +249,7 @@ type Memory struct { // gatherFunc ... type gatherFunc func(r *RabbitMQ, acc telegraf.Accumulator) -var gatherFunctions = []gatherFunc{gatherOverview, gatherNodes, gatherQueues, gatherExchanges} +var gatherFunctions = []gatherFunc{gatherOverview, gatherNodes, gatherQueues, gatherExchanges, gatherFederationLinks} var sampleConfig = ` ## Management Plugin url. (default: http://localhost:15672) @@ -258,6 +293,15 @@ var sampleConfig = ` ## Note that an empty array for both will include all queues queue_name_include = [] queue_name_exclude = [] + + ## Federation upstreams include and exclude when gathering the rabbitmq_federation measurement. + ## If neither are specified, metrics for all federation upstreams are gathered. + ## Federation link metrics will only be gathered for queues and exchanges + ## whose non-federation metrics will be collected (e.g a queue excluded + ## by the 'queue_name_exclude' option will also be excluded from federation). + ## Globs accepted. + # federation_upstream_include = ["dataCentre-*"] + # federation_upstream_exclude = [] ` func boolToInt(b bool) int64 { @@ -294,12 +338,16 @@ func (r *RabbitMQ) Gather(acc telegraf.Accumulator) error { } } - // Create queue filter if not already created + // Create gather filters if not already created if !r.filterCreated { err := r.createQueueFilter() if err != nil { return err } + err = r.createUpstreamFilter() + if err != nil { + return err + } r.filterCreated = true } @@ -598,7 +646,7 @@ func gatherExchanges(r *RabbitMQ, acc telegraf.Accumulator) { } for _, exchange := range exchanges { - if !r.shouldGatherExchange(exchange) { + if !r.shouldGatherExchange(exchange.Name) { continue } tags := map[string]string{ @@ -624,6 +672,52 @@ func gatherExchanges(r *RabbitMQ, acc telegraf.Accumulator) { } } +func gatherFederationLinks(r *RabbitMQ, acc telegraf.Accumulator) { + // Gather information about federation links + federationLinks := make([]FederationLink, 0) + err := r.requestJSON("/api/federation-links", &federationLinks) + if err != nil { + acc.AddError(err) + return + } + + for _, link := range federationLinks { + if !r.shouldGatherFederationLink(link) { + continue + } + + tags := map[string]string{ + "url": r.URL, + "type": link.Type, + "vhost": link.Vhost, + "upstream": link.Upstream, + } + + if link.Type == "exchange" { + tags["exchange"] = link.Exchange + tags["upstream_exchange"] = link.UpstreamExchange + } else { + tags["queue"] = link.Queue + tags["upstream_queue"] = link.UpstreamQueue + } + + acc.AddFields( + "rabbitmq_federation", + map[string]interface{}{ + "acks_uncommitted": link.LocalChannel.AcksUncommitted, + "consumers": link.LocalChannel.ConsumerCount, + "messages_unacknowledged": link.LocalChannel.MessagesUnacknowledged, + "messages_uncommitted": link.LocalChannel.MessagesUncommitted, + "messages_unconfirmed": link.LocalChannel.MessagesUnconfirmed, + "messages_confirm": link.LocalChannel.MessageStats.Confirm, + "messages_publish": link.LocalChannel.MessageStats.Publish, + "messages_return_unroutable": link.LocalChannel.MessageStats.ReturnUnroutable, + }, + tags, + ) + } +} + func (r *RabbitMQ) shouldGatherNode(node Node) bool { if len(r.Nodes) == 0 { return true @@ -659,13 +753,23 @@ func (r *RabbitMQ) createQueueFilter() error { return nil } -func (r *RabbitMQ) shouldGatherExchange(exchange Exchange) bool { +func (r *RabbitMQ) createUpstreamFilter() error { + upstreamFilter, err := filter.NewIncludeExcludeFilter(r.FederationUpstreamInclude, r.FederationUpstreamExclude) + if err != nil { + return err + } + r.upstreamFilter = upstreamFilter + + return nil +} + +func (r *RabbitMQ) shouldGatherExchange(exchangeName string) bool { if len(r.Exchanges) == 0 { return true } for _, name := range r.Exchanges { - if name == exchange.Name { + if name == exchangeName { return true } } @@ -673,6 +777,21 @@ func (r *RabbitMQ) shouldGatherExchange(exchange Exchange) bool { return false } +func (r *RabbitMQ) shouldGatherFederationLink(link FederationLink) bool { + if !r.upstreamFilter.Match(link.Upstream) { + return false + } + + switch link.Type { + case "exchange": + return r.shouldGatherExchange(link.Exchange) + case "queue": + return r.queueFilter.Match(link.Queue) + default: + return false + } +} + func init() { inputs.Add("rabbitmq", func() telegraf.Input { return &RabbitMQ{ diff --git a/plugins/inputs/rabbitmq/rabbitmq_test.go b/plugins/inputs/rabbitmq/rabbitmq_test.go index 9d35718d9..0991dd0c0 100644 --- a/plugins/inputs/rabbitmq/rabbitmq_test.go +++ b/plugins/inputs/rabbitmq/rabbitmq_test.go @@ -28,6 +28,8 @@ func TestRabbitMQGeneratesMetrics(t *testing.T) { jsonFilePath = "testdata/exchanges.json" case "/api/healthchecks/node/rabbit@vagrant-ubuntu-trusty-64": jsonFilePath = "testdata/healthchecks.json" + case "/api/federation-links": + jsonFilePath = "testdata/federation-links.json" case "/api/nodes/rabbit@vagrant-ubuntu-trusty-64/memory": jsonFilePath = "testdata/memory.json" default: @@ -162,6 +164,18 @@ func TestRabbitMQGeneratesMetrics(t *testing.T) { "messages_publish_out_rate": 5.1, } compareMetrics(t, exchangeMetrics, acc, "rabbitmq_exchange") + + federationLinkMetrics := map[string]interface{}{ + "acks_uncommitted": 1, + "consumers": 2, + "messages_unacknowledged": 3, + "messages_uncommitted": 4, + "messages_unconfirmed": 5, + "messages_confirm": 67, + "messages_publish": 890, + "messages_return_unroutable": 1, + } + compareMetrics(t, federationLinkMetrics, acc, "rabbitmq_federation") } func compareMetrics(t *testing.T, expectedMetrics map[string]interface{}, diff --git a/plugins/inputs/rabbitmq/testdata/federation-links.json b/plugins/inputs/rabbitmq/testdata/federation-links.json new file mode 100644 index 000000000..4cf514870 --- /dev/null +++ b/plugins/inputs/rabbitmq/testdata/federation-links.json @@ -0,0 +1,63 @@ +[ + { + "node": "rabbit@rmqlocal", + "queue": "exampleLocalQueue", + "upstream_queue": "exampleUpstreamQueue", + "type": "queue", + "vhost": "/", + "upstream": "ExampleFederationUpstream", + "id": "8ba5218f", + "status": "running", + "local_connection": "", + "uri": "amqp://appsv03", + "timestamp": "2019-08-19 15:34:15", + "local_channel": { + "acks_uncommitted": 1, + "confirm": true, + "connection_details": { + "name": "", + "peer_host": "undefined", + "peer_port": "undefined" + }, + "consumer_count": 2, + "garbage_collection": { + "fullsweep_after": 65535, + "max_heap_size": 0, + "min_bin_vheap_size": 46422, + "min_heap_size": 233, + "minor_gcs": 203 + }, + "global_prefetch_count": 0, + "message_stats": { + "confirm": 67, + "confirm_details": { + "rate": 2 + }, + "publish": 890, + "publish_details": { + "rate": 2 + }, + "return_unroutable": 1, + "return_unroutable_details": { + "rate": 0.1 + } + }, + "messages_unacknowledged": 3, + "messages_uncommitted": 4, + "messages_unconfirmed": 5, + "name": "", + "node": "rabbit@rmqlocal", + "number": 1, + "prefetch_count": 0, + "reductions": 1926653, + "reductions_details": { + "rate": 1068 + }, + "state": "running", + "transactional": false, + "user": "none", + "user_who_performed_action": "none", + "vhost": "sorandomsorandom" + } + } +] From 3770923ce32a9174cd12c97cafa02577dbe35d7d Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 23 Oct 2019 17:26:31 -0700 Subject: [PATCH 1272/1815] Update rabbitmq input readme --- plugins/inputs/rabbitmq/README.md | 305 ++++++++++++++-------------- plugins/inputs/rabbitmq/rabbitmq.go | 16 +- 2 files changed, 159 insertions(+), 162 deletions(-) diff --git a/plugins/inputs/rabbitmq/README.md b/plugins/inputs/rabbitmq/README.md index 7ce4229f7..0e119b25e 100644 --- a/plugins/inputs/rabbitmq/README.md +++ b/plugins/inputs/rabbitmq/README.md @@ -7,7 +7,7 @@ For additional details reference the [RabbitMQ Management HTTP Stats][management [management]: https://www.rabbitmq.com/management.html [management-reference]: https://raw.githack.com/rabbitmq/rabbitmq-management/rabbitmq_v3_6_9/priv/www/api/index.html -### Configuration: +### Configuration ```toml [[inputs.rabbitmq]] @@ -48,180 +48,177 @@ For additional details reference the [RabbitMQ Management HTTP Stats][management ## specified, metrics for all exchanges are gathered. # exchanges = ["telegraf"] - ## A list of federation upstreams to gather as the rabbitmq_federation measurement. - ## If not specified, metrics for all federation upstreams are gathered. - ## Federation link metrics will only be gathered for queues and exchanges - ## whose non-federation metrics will be collected (e.g a queue excluded - ## by the 'queue_name_exclude' option will also be excluded from federation). - # federation_upstreams = ["dataCentre2"] - ## Queues to include and exclude. Globs accepted. ## Note that an empty array for both will include all queues # queue_name_include = [] # queue_name_exclude = [] + + ## Federation upstreams to include and exlude specified as an array of glob + ## pattern strings. Federation links can also be limited by the queue and + ## exchange filters. + # federation_upstream_include = [] + # federation_upstream_exclude = [] ``` -### Measurements & Fields: +### Metrics - rabbitmq_overview - - channels (int, channels) - - connections (int, connections) - - consumers (int, consumers) - - exchanges (int, exchanges) - - messages (int, messages) - - messages_acked (int, messages) - - messages_delivered (int, messages) - - messages_delivered_get (int, messages) - - messages_published (int, messages) - - messages_ready (int, messages) - - messages_unacked (int, messages) - - queues (int, queues) - - clustering_listeners (int, cluster nodes) - - amqp_listeners (int, amqp nodes up) - - return_unroutable (int, number of unroutable messages) - - return_unroutable_rate (float, number of unroutable messages per second) + - tags: + - url + - name + - fields: + - channels (int, channels) + - connections (int, connections) + - consumers (int, consumers) + - exchanges (int, exchanges) + - messages (int, messages) + - messages_acked (int, messages) + - messages_delivered (int, messages) + - messages_delivered_get (int, messages) + - messages_published (int, messages) + - messages_ready (int, messages) + - messages_unacked (int, messages) + - queues (int, queues) + - clustering_listeners (int, cluster nodes) + - amqp_listeners (int, amqp nodes up) + - return_unroutable (int, number of unroutable messages) + - return_unroutable_rate (float, number of unroutable messages per second) -- rabbitmq_node - - disk_free (int, bytes) - - disk_free_limit (int, bytes) - - disk_free_alarm (int, disk alarm) - - fd_total (int, file descriptors) - - fd_used (int, file descriptors) - - mem_limit (int, bytes) - - mem_used (int, bytes) - - mem_alarm (int, memory a) - - proc_total (int, erlang processes) - - proc_used (int, erlang processes) - - run_queue (int, erlang processes) - - sockets_total (int, sockets) - - sockets_used (int, sockets) - - running (int, node up) - - uptime (int, milliseconds) - - health_check_status (int, 1 or 0) - - mnesia_disk_tx_count (int, number of disk transaction) - - mnesia_ram_tx_count (int, number of ram transaction) - - mnesia_disk_tx_count_rate (float, number of disk transaction per second) - - mnesia_ram_tx_count_rate (float, number of ram transaction per second) - - gc_num (int, number of garbage collection) - - gc_bytes_reclaimed (int, bytes) - - gc_num_rate (float, number of garbage collection per second) - - gc_bytes_reclaimed_rate (float, bytes per second) - - io_read_avg_time (float, number of read operations) - - io_read_avg_time_rate (int, number of read operations per second) - - io_read_bytes (int, bytes) - - io_read_bytes_rate (float, bytes per second) - - io_write_avg_time (int, milliseconds) - - io_write_avg_time_rate (float, milliseconds per second) - - io_write_bytes (int, bytes) - - io_write_bytes_rate (float, bytes per second) - - mem_connection_readers (int, bytes) - - mem_connection_writers (int, bytes) - - mem_connection_channels (int, bytes) - - mem_connection_other (int, bytes) - - mem_queue_procs (int, bytes) - - mem_queue_slave_procs (int, bytes) - - mem_plugins (int, bytes) - - mem_other_proc (int, bytes) - - mem_metrics (int, bytes) - - mem_mgmt_db (int, bytes) - - mem_mnesia (int, bytes) - - mem_other_ets (int, bytes) - - mem_binary (int, bytes) - - mem_msg_index (int, bytes) - - mem_code (int, bytes) - - mem_atom (int, bytes) - - mem_other_system (int, bytes) - - mem_allocated_unused (int, bytes) - - mem_reserved_unallocated (int, bytes) - - mem_total (int, bytes) ++ rabbitmq_node + - tags: + - url + - node + - url + - fields: + - disk_free (int, bytes) + - disk_free_limit (int, bytes) + - disk_free_alarm (int, disk alarm) + - fd_total (int, file descriptors) + - fd_used (int, file descriptors) + - mem_limit (int, bytes) + - mem_used (int, bytes) + - mem_alarm (int, memory a) + - proc_total (int, erlang processes) + - proc_used (int, erlang processes) + - run_queue (int, erlang processes) + - sockets_total (int, sockets) + - sockets_used (int, sockets) + - running (int, node up) + - uptime (int, milliseconds) + - health_check_status (int, 1 or 0) + - mnesia_disk_tx_count (int, number of disk transaction) + - mnesia_ram_tx_count (int, number of ram transaction) + - mnesia_disk_tx_count_rate (float, number of disk transaction per second) + - mnesia_ram_tx_count_rate (float, number of ram transaction per second) + - gc_num (int, number of garbage collection) + - gc_bytes_reclaimed (int, bytes) + - gc_num_rate (float, number of garbage collection per second) + - gc_bytes_reclaimed_rate (float, bytes per second) + - io_read_avg_time (float, number of read operations) + - io_read_avg_time_rate (int, number of read operations per second) + - io_read_bytes (int, bytes) + - io_read_bytes_rate (float, bytes per second) + - io_write_avg_time (int, milliseconds) + - io_write_avg_time_rate (float, milliseconds per second) + - io_write_bytes (int, bytes) + - io_write_bytes_rate (float, bytes per second) + - mem_connection_readers (int, bytes) + - mem_connection_writers (int, bytes) + - mem_connection_channels (int, bytes) + - mem_connection_other (int, bytes) + - mem_queue_procs (int, bytes) + - mem_queue_slave_procs (int, bytes) + - mem_plugins (int, bytes) + - mem_other_proc (int, bytes) + - mem_metrics (int, bytes) + - mem_mgmt_db (int, bytes) + - mem_mnesia (int, bytes) + - mem_other_ets (int, bytes) + - mem_binary (int, bytes) + - mem_msg_index (int, bytes) + - mem_code (int, bytes) + - mem_atom (int, bytes) + - mem_other_system (int, bytes) + - mem_allocated_unused (int, bytes) + - mem_reserved_unallocated (int, bytes) + - mem_total (int, bytes) - rabbitmq_queue - - consumer_utilisation (float, percent) - - consumers (int, int) - - idle_since (string, time - e.g., "2006-01-02 15:04:05") - - memory (int, bytes) - - message_bytes (int, bytes) - - message_bytes_persist (int, bytes) - - message_bytes_ram (int, bytes) - - message_bytes_ready (int, bytes) - - message_bytes_unacked (int, bytes) - - messages (int, count) - - messages_ack (int, count) - - messages_ack_rate (float, messages per second) - - messages_deliver (int, count) - - messages_deliver_rate (float, messages per second) - - messages_deliver_get (int, count) - - messages_deliver_get_rate (float, messages per second) - - messages_publish (int, count) - - messages_publish_rate (float, messages per second) - - messages_ready (int, count) - - messages_redeliver (int, count) - - messages_redeliver_rate (float, messages per second) - - messages_unack (integer, count) + - tags: + - url + - queue + - vhost + - node + - durable + - auto_delete + - fields: + - consumer_utilisation (float, percent) + - consumers (int, int) + - idle_since (string, time - e.g., "2006-01-02 15:04:05") + - memory (int, bytes) + - message_bytes (int, bytes) + - message_bytes_persist (int, bytes) + - message_bytes_ram (int, bytes) + - message_bytes_ready (int, bytes) + - message_bytes_unacked (int, bytes) + - messages (int, count) + - messages_ack (int, count) + - messages_ack_rate (float, messages per second) + - messages_deliver (int, count) + - messages_deliver_rate (float, messages per second) + - messages_deliver_get (int, count) + - messages_deliver_get_rate (float, messages per second) + - messages_publish (int, count) + - messages_publish_rate (float, messages per second) + - messages_ready (int, count) + - messages_redeliver (int, count) + - messages_redeliver_rate (float, messages per second) + - messages_unack (integer, count) -- rabbitmq_exchange - - messages_publish_in (int, count) - - messages_publish_in_rate (int, messages per second) - - messages_publish_out (int, count) - - messages_publish_out_rate (int, messages per second) ++ rabbitmq_exchange + - tags: + - url + - exchange + - type + - vhost + - internal + - durable + - auto_delete + - fields: + - messages_publish_in (int, count) + - messages_publish_in_rate (int, messages per second) + - messages_publish_out (int, count) + - messages_publish_out_rate (int, messages per second) - rabbitmq_federation - - acks_uncommitted (int, count) - - consumers (int, count) - - messages_unacknowledged (int, count) - - messages_uncommitted (int, count) - - messages_unconfirmed (int, count) - - messages_confirm (int, count) - - messages_publish (int, count) - - messages_return_unroutable (int, count) + - tags: + - url + - vhost + - type + - upstream + - exchange + - upstream_exchange + - queue + - upstream_queue + - fields: + - acks_uncommitted (int, count) + - consumers (int, count) + - messages_unacknowledged (int, count) + - messages_uncommitted (int, count) + - messages_unconfirmed (int, count) + - messages_confirm (int, count) + - messages_publish (int, count) + - messages_return_unroutable (int, count) -### Tags: - -- All measurements have the following tags: - - url - -- rabbitmq_overview - - name - -- rabbitmq_node - - node - - url - -- rabbitmq_queue - - url - - queue - - vhost - - node - - durable - - auto_delete - -- rabbitmq_exchange - - url - - exchange - - type - - vhost - - internal - - durable - - auto_delete - -- rabbitmq_federation - - url - - vhost - - type - - upstream - - local_entity - - upstream_entity - -### Sample Queries: +### Sample Queries Message rates for the entire node can be calculated from total message counts. For instance, to get the rate of messages published per minute, use this query: ``` -SELECT NON_NEGATIVE_DERIVATIVE(LAST("messages_published"), 1m) AS messages_published_rate -FROM rabbitmq_overview WHERE time > now() - 10m GROUP BY time(1m) +SELECT NON_NEGATIVE_DERIVATIVE(LAST("messages_published"), 1m) AS messages_published_rate FROM rabbitmq_overview WHERE time > now() - 10m GROUP BY time(1m) ``` -### Example Output: +### Example Output ``` rabbitmq_queue,url=http://amqp.example.org:15672,queue=telegraf,vhost=influxdb,node=rabbit@amqp.example.org,durable=true,auto_delete=false,host=amqp.example.org messages_deliver_get=0i,messages_publish=329i,messages_publish_rate=0.2,messages_redeliver_rate=0,message_bytes_ready=0i,message_bytes_unacked=0i,messages_deliver=329i,messages_unack=0i,consumers=1i,idle_since="",messages=0i,messages_deliver_rate=0.2,messages_deliver_get_rate=0.2,messages_redeliver=0i,memory=43032i,message_bytes_ram=0i,messages_ack=329i,messages_ready=0i,messages_ack_rate=0.2,consumer_utilisation=1,message_bytes=0i,message_bytes_persist=0i 1493684035000000000 diff --git a/plugins/inputs/rabbitmq/rabbitmq.go b/plugins/inputs/rabbitmq/rabbitmq.go index acbba6e2a..199b24922 100644 --- a/plugins/inputs/rabbitmq/rabbitmq.go +++ b/plugins/inputs/rabbitmq/rabbitmq.go @@ -34,25 +34,25 @@ const DefaultClientTimeout = 4 // RabbitMQ defines the configuration necessary for gathering metrics, // see the sample config for further details type RabbitMQ struct { - URL string - Name string - Username string - Password string + URL string `toml:"url"` + Name string `toml:"name"` + Username string `toml:"username"` + Password string `toml:"password"` tls.ClientConfig ResponseHeaderTimeout internal.Duration `toml:"header_timeout"` ClientTimeout internal.Duration `toml:"client_timeout"` - Nodes []string - Queues []string - Exchanges []string + Nodes []string `toml:"nodes"` + Queues []string `toml:"queues"` + Exchanges []string `toml:"exchanges"` QueueInclude []string `toml:"queue_name_include"` QueueExclude []string `toml:"queue_name_exclude"` FederationUpstreamInclude []string `toml:"federation_upstream_include"` FederationUpstreamExclude []string `toml:"federation_upstream_exclude"` - Client *http.Client + Client *http.Client `toml:"-"` filterCreated bool excludeEveryQueue bool From 8461631703679d170a619118fba1954b32bb783a Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 23 Oct 2019 17:27:28 -0700 Subject: [PATCH 1273/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8f0f60167..9b92fd688 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -35,6 +35,7 @@ - [#6530](https://github.com/influxdata/telegraf/pull/6530): Improve ipvs input error strings and logging. - [#6532](https://github.com/influxdata/telegraf/pull/6532): Add strict mode to JSON parser that can be disable to ignore invalid items. - [#6543](https://github.com/influxdata/telegraf/pull/6543): Add support for Kubernetes 1.16 and remove deprecated API usage. +- [#6283](https://github.com/influxdata/telegraf/pull/6283): Add gathering of RabbitMQ federation link metrics. #### Bugfixes From 2754a484f980386176a3fbaccfb5d38dd9b8c4a7 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 24 Oct 2019 11:13:52 -0700 Subject: [PATCH 1274/1815] Document memory_usage field in procstat input --- plugins/inputs/procstat/README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/plugins/inputs/procstat/README.md b/plugins/inputs/procstat/README.md index 8ce834a70..6163b8284 100644 --- a/plugins/inputs/procstat/README.md +++ b/plugins/inputs/procstat/README.md @@ -106,6 +106,7 @@ implemented as a WMI query. The pattern allows fuzzy matching using only - memory_rss (int) - memory_stack (int) - memory_swap (int) + - memory_usage (float) - memory_vms (int) - minor_faults (int) - nice_priority (int) From 9efc37606f7eae0d72b801ba0be68d4561eee915 Mon Sep 17 00:00:00 2001 From: Scott Anderson Date: Thu, 24 Oct 2019 17:45:59 -0600 Subject: [PATCH 1275/1815] Add descriptions for kapacitor input metrics (#6505) --- plugins/inputs/kapacitor/README.md | 288 +++++++++++++++++++++++------ 1 file changed, 236 insertions(+), 52 deletions(-) diff --git a/plugins/inputs/kapacitor/README.md b/plugins/inputs/kapacitor/README.md index 2ff4eab88..8a6f3477f 100644 --- a/plugins/inputs/kapacitor/README.md +++ b/plugins/inputs/kapacitor/README.md @@ -25,63 +25,247 @@ The Kapacitor plugin will collect metrics from the given Kapacitor instances. ### Measurements & Fields -- kapacitor - - num_enabled_tasks, integer - - num_subscriptions, integer - - num_tasks, integer -- kapacitor_edges - - collected, integer - - emitted, integer -- kapacitor_ingress - - points_received, integer -- kapacitor_memstats - - alloc_bytes, integer - - buck_hash_sys_bytes, integer - - frees, integer - - gcc_pu_fraction, float - - gc_sys_bytes, integer - - heap_alloc_bytes, integer - - heap_idle_bytes, integer - - heap_inuse_bytes, integer - - heap_objects, integer - - heap_released_bytes, integer - - heap_sys_bytes, integer - - last_gc_ns, integer - - lookups, integer - - mallocs, integer - - mcache_in_use_bytes, integer - - mcache_sys_bytes, integer - - mspan_in_use_bytes, integer - - mspan_sys_bytes, integer - - next_gc_ns, integer - - num_gc, integer - - other_sys_bytes, integer - - pause_total_ns, integer - - stack_in_use_bytes, integer - - stack_sys_bytes, integer - - sys_bytes, integer - - total_alloc_bytes, integer -- kapacitor_nodes - - alerts_triggered, integer - - avg_exec_time_ns, integer - - batches_queried, integer - - crits_triggered, integer - - eval_errors, integer - - fields_defaulted, integer - - infos_triggered, integer - - oks_triggered, integer - - points_queried, integer - - points_written, integer - - query_errors, integer - - tags_defaulted, integer - - warns_triggered, integer - - write_errors, integer +- [kapacitor](#kapacitor) + - [num_enabled_tasks](#num_enabled_tasks) _(integer)_ + - [num_subscriptions](#num_subscriptions) _(integer)_ + - [num_tasks](#num_tasks) _(integer)_ +- [kapacitor_edges](#kapacitor_edges) + - [collected](#collected) _(integer)_ + - [emitted](#emitted) _(integer)_ +- [kapacitor_ingress](#kapacitor_ingress) + - [points_received](#points_received) _(integer)_ +- [kapacitor_load](#kapacitor_load) + - [errors](#errors) _(integer)_ +- [kapacitor_memstats](#kapacitor_memstats) + - [alloc_bytes](#alloc_bytes) _(integer)_ + - [buck_hash_sys_bytes](#buck_hash_sys_bytes) _(integer)_ + - [frees](#frees) _(integer)_ + - [gc_sys_bytes](#gc_sys_bytes) _(integer)_ + - [gcc_pu_fraction](#gcc_pu_fraction) _(float)_ + - [heap_alloc_bytes](#heap_alloc_bytes) _(integer)_ + - [heap_idle_bytes](#heap_idle_bytes) _(integer)_ + - [heap_in_use_bytes](#heap_in_use_bytes) _(integer)_ + - [heap_objects](#heap_objects) _(integer)_ + - [heap_released_bytes](#heap_released_bytes) _(integer)_ + - [heap_sys_bytes](#heap_sys_bytes) _(integer)_ + - [last_gc_ns](#last_gc_ns) _(integer)_ + - [lookups](#lookups) _(integer)_ + - [mallocs](#mallocs) _(integer)_ + - [mcache_in_use_bytes](#mcache_in_use_bytes) _(integer)_ + - [mcache_sys_bytes](#mcache_sys_bytes) _(integer)_ + - [mspan_in_use_bytes](#mspan_in_use_bytes) _(integer)_ + - [mspan_sys_bytes](#mspan_sys_bytes) _(integer)_ + - [next_gc_ns](#next_gc_ns) _(integer)_ + - [num_gc](#num_gc) _(integer)_ + - [other_sys_bytes](#other_sys_bytes) _(integer)_ + - [pause_total_ns](#pause_total_ns) _(integer)_ + - [stack_in_use_bytes](#stack_in_use_bytes) _(integer)_ + - [stack_sys_bytes](#stack_sys_bytes) _(integer)_ + - [sys_bytes](#sys_bytes) _(integer)_ + - [total_alloc_bytes](#total_alloc_bytes) _(integer)_ +- [kapacitor_nodes](#kapacitor_nodes) + - [alerts_inhibited](#alerts_inhibited) _(integer)_ + - [alerts_triggered](#alerts_triggered) _(integer)_ + - [avg_exec_time_ns](#avg_exec_time_ns) _(integer)_ + - [crits_triggered](#crits_triggered) _(integer)_ + - [errors](#errors) _(integer)_ + - [infos_triggered](#infos_triggered) _(integer)_ + - [oks_triggered](#oks_triggered) _(integer)_ + - [points_written](#points_written) _(integer)_ + - [warns_triggered](#warns_triggered) _(integer)_ + - [write_errors](#write_errors) _(integer)_ +- [kapacitor_topics](#kapacitor_topics) + - [collected](#collected) _(integer)_ + + +--- + +### kapacitor +The `kapacitor` measurement stores fields with information related to +[Kapacitor tasks](https://docs.influxdata.com/kapacitor/latest/introduction/getting-started/#kapacitor-tasks) +and [subscriptions](https://docs.influxdata.com/kapacitor/latest/administration/subscription-management/). + +#### num_enabled_tasks +The number of enabled Kapacitor tasks. + +#### num_subscriptions +The number of Kapacitor/InfluxDB subscriptions. + +#### num_tasks +The total number of Kapacitor tasks. + +--- + +### kapacitor_edges +The `kapacitor_edges` measurement stores fields with information related to +[edges](https://docs.influxdata.com/kapacitor/latest/tick/introduction/#pipelines) +in Kapacitor TICKscripts. + +#### collected +The number of messages collected by TICKscript edges. + +#### emitted +The number of messages emitted by TICKscript edges. + +--- + +### kapacitor_ingress +The `kapacitor_ingress` measurement stores fields with information related to data +coming into Kapacitor. + +#### points_received +The number of points received by Kapacitor. + +--- + +### kapacitor_load +The `kapacitor_load` measurement stores fields with information related to the +[Kapacitor Load Directory service](https://docs.influxdata.com/kapacitor/latest/guides/load_directory/). + +#### errors +The number of errors reported from the load directory service. + +--- + +### kapacitor_memstats +The `kapacitor_memstats` measurement stores fields related to Kapacitor memory usage. + +#### alloc_bytes +The number of bytes of memory allocated by Kapacitor that are still in use. + +#### buck_hash_sys_bytes +The number of bytes of memory used by the profiling bucket hash table. + +#### frees +The number of heap objects freed. + +#### gc_sys_bytes +The number of bytes of memory used for garbage collection system metadata. + +#### gcc_pu_fraction +The fraction of Kapacitor's available CPU time used by garbage collection since +Kapacitor started. + +#### heap_alloc_bytes +The number of reachable and unreachable heap objects garbage collection has +not freed. + +#### heap_idle_bytes +The number of heap bytes waiting to be used. + +#### heap_in_use_bytes +The number of heap bytes in use. + +#### heap_objects +The number of allocated objects. + +#### heap_released_bytes +The number of heap bytes released to the operating system. + +#### heap_sys_bytes +The number of heap bytes obtained from `system`. + +#### last_gc_ns +The nanosecond epoch time of the last garbage collection. + +#### lookups +The total number of pointer lookups. + +#### mallocs +The total number of mallocs. + +#### mcache_in_use_bytes +The number of bytes in use by mcache structures. + +#### mcache_sys_bytes +The number of bytes used for mcache structures obtained from `system`. + +#### mspan_in_use_bytes +The number of bytes in use by mspan structures. + +#### mspan_sys_bytes +The number of bytes used for mspan structures obtained from `system`. + +#### next_gc_ns +The nanosecond epoch time of the next garbage collection. + +#### num_gc +The number of completed garbage collection cycles. + +#### other_sys_bytes +The number of bytes used for other system allocations. + +#### pause_total_ns +The total number of nanoseconds spent in garbage collection "stop-the-world" +pauses since Kapacitor started. + +#### stack_in_use_bytes +The number of bytes in use by the stack allocator. + +#### stack_sys_bytes +The number of bytes obtained from `system` for stack allocator. + +#### sys_bytes +The number of bytes of memory obtained from `system`. + +#### total_alloc_bytes +The total number of bytes allocated, even if freed. + +--- + +### kapacitor_nodes +The `kapacitor_nodes` measurement stores fields related to events that occur in +[TICKscript nodes](https://docs.influxdata.com/kapacitor/latest/nodes/). + +#### alerts_inhibited +The total number of alerts inhibited by TICKscripts. + +#### alerts_triggered +The total number of alerts triggered by TICKscripts. + +#### avg_exec_time_ns +The average execution time of TICKscripts in nanoseconds. + +#### crits_triggered +The number of critical (`crit`) alerts triggered by TICKscripts. + +#### errors +The number of errors caused caused by TICKscripts. + +#### infos_triggered +The number of info (`info`) alerts triggered by TICKscripts. + +#### oks_triggered +The number of ok (`ok`) alerts triggered by TICKscripts. + +#### points_written +The number of points written to InfluxDB or back to Kapacitor. + +#### warns_triggered +The number of warning (`warn`) alerts triggered by TICKscripts. + +#### working_cardinality +The total number of unique series processed. + +#### write_errors +The number of errors that occurred when writing to InfluxDB or other write endpoints. + +--- + +### kapacitor_topics +The `kapacitor_topics` measurement stores fields related to +Kapacitor topics](https://docs.influxdata.com/kapacitor/latest/working/using_alert_topics/). + +#### collected +The number of events collected by Kapacitor topics. + +--- *Note:* The Kapacitor variables `host`, `cluster_id`, and `server_id` are currently not recorded due to the potential high cardinality of these values. -### Example Output: +## Example Output ``` $ telegraf --config /etc/telegraf.conf --input-filter kapacitor --test From f0a578492abfae3e08bdc808ee93928ad6a5cd8c Mon Sep 17 00:00:00 2001 From: Greg <2653109+glinton@users.noreply.github.com> Date: Tue, 5 Nov 2019 11:34:18 -0700 Subject: [PATCH 1276/1815] Fix incorrect results in ping plugin (#6581) --- plugins/inputs/ping/ping.go | 2 ++ plugins/inputs/ping/ping_test.go | 1 + 2 files changed, 3 insertions(+) diff --git a/plugins/inputs/ping/ping.go b/plugins/inputs/ping/ping.go index 581d429f7..de3c5fe8f 100644 --- a/plugins/inputs/ping/ping.go +++ b/plugins/inputs/ping/ping.go @@ -293,10 +293,12 @@ func (p *Ping) pingToURLNative(destination string, acc telegraf.Accumulator) { if strings.Contains(err.Error(), "not permitted") { sent.sent = false } + sents <- sent return } resps <- resp + sents <- sent }(i + 1) } } diff --git a/plugins/inputs/ping/ping_test.go b/plugins/inputs/ping/ping_test.go index 56303b1b2..8a1a0a9e1 100644 --- a/plugins/inputs/ping/ping_test.go +++ b/plugins/inputs/ping/ping_test.go @@ -355,4 +355,5 @@ func TestPingGatherNative(t *testing.T) { assert.NoError(t, acc.GatherError(p.Gather)) assert.True(t, acc.HasPoint("ping", map[string]string{"url": "localhost"}, "packets_transmitted", 5)) + assert.True(t, acc.HasPoint("ping", map[string]string{"url": "localhost"}, "packets_received", 5)) } From 042fa53db8415ae2a9b4d5ed011a1b996e6baf9c Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 5 Nov 2019 10:37:00 -0800 Subject: [PATCH 1277/1815] Update changelog --- CHANGELOG.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9b92fd688..ca36b78eb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -41,6 +41,12 @@ - [#6484](https://github.com/influxdata/telegraf/issues/6484): Show correct default settings in mysql sample config. +## v1.12.5 [unreleased] + +#### Bugfixes + +- [#6576](https://github.com/influxdata/telegraf/issues/6576): Fix incorrect results in ping input plugin. + ## v1.12.4 [2019-10-23] #### Release Notes From ba579819a0007c6ca02efbf01700eeba8f1c798f Mon Sep 17 00:00:00 2001 From: Giovanni Luisotto Date: Tue, 5 Nov 2019 20:56:48 +0100 Subject: [PATCH 1278/1815] Add missing character replacement to sql_instance tag (#6610) --- plugins/inputs/sqlserver/sqlserver.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/plugins/inputs/sqlserver/sqlserver.go b/plugins/inputs/sqlserver/sqlserver.go index 2aaccd871..1a5939f94 100644 --- a/plugins/inputs/sqlserver/sqlserver.go +++ b/plugins/inputs/sqlserver/sqlserver.go @@ -1305,7 +1305,7 @@ const sqlAzureDBResourceGovernance string = ` IF SERVERPROPERTY('EngineEdition') = 5 -- Is this Azure SQL DB? SELECT 'sqlserver_db_resource_governance' AS [measurement], - @@servername AS [sql_instance], + REPLACE(@@SERVERNAME,'\',':') AS [sql_instance], DB_NAME() as [database_name], slo_name, dtu_limit, @@ -1344,7 +1344,7 @@ BEGIN IF SERVERPROPERTY('EngineEdition') = 8 -- Is this Azure SQL Managed Instance? SELECT 'sqlserver_instance_resource_governance' AS [measurement], - @@SERVERNAME AS [sql_instance], + REPLACE(@@SERVERNAME,'\',':') AS [sql_instance], instance_cap_cpu, instance_max_log_rate, instance_max_worker_threads, @@ -1367,7 +1367,7 @@ SELECT blocking_session_id into #blockingSessions FROM sys.dm_exec_requests WHE create index ix_blockingSessions_1 on #blockingSessions (blocking_session_id) SELECT 'sqlserver_requests' AS [measurement], - @@servername AS [sql_instance], + REPLACE(@@SERVERNAME,'\',':') AS [sql_instance], DB_NAME() as [database_name], r.session_id , r.request_id From dd258e678234f9a16144e0389c0456ac2e21d465 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 5 Nov 2019 11:58:10 -0800 Subject: [PATCH 1279/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index ca36b78eb..4b26b6ae4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -46,6 +46,7 @@ #### Bugfixes - [#6576](https://github.com/influxdata/telegraf/issues/6576): Fix incorrect results in ping input plugin. +- [#6610](https://github.com/influxdata/telegraf/pull/6610): Add missing character replacement to sql_instance tag. ## v1.12.4 [2019-10-23] From 6881c64431b7481844d4b35a22e3c6f48694787e Mon Sep 17 00:00:00 2001 From: The Dale Date: Wed, 6 Nov 2019 07:50:07 +0800 Subject: [PATCH 1280/1815] Add punctuation to CONFIGURATION.md (#6600) --- docs/CONFIGURATION.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/CONFIGURATION.md b/docs/CONFIGURATION.md index 75aa1503b..55ab3498e 100644 --- a/docs/CONFIGURATION.md +++ b/docs/CONFIGURATION.md @@ -124,12 +124,12 @@ The agent table configures Telegraf and the defaults used across all plugins. - **flush_interval**: Default flushing [interval][] for all outputs. Maximum flush_interval will be - flush_interval + flush_jitter + flush_interval + flush_jitter. - **flush_jitter**: Jitter the flush [interval][] by a random amount. This is primarily to avoid large write spikes for users running a large number of telegraf instances. - ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s + ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s. - **precision**: Collected metrics are rounded to the precision specified as an [interval][]. From 284c7fc404221fe234ccd061c4947b2a4db31240 Mon Sep 17 00:00:00 2001 From: David McKay Date: Wed, 6 Nov 2019 21:37:48 +0000 Subject: [PATCH 1281/1815] Add bearer token defaults for Kubernetes plugins (#6356) --- plugins/inputs/kube_inventory/README.md | 3 ++ plugins/inputs/kube_inventory/kube_state.go | 48 +++++++++++++-------- plugins/inputs/kubernetes/README.md | 2 + plugins/inputs/kubernetes/kubernetes.go | 32 +++++++++----- 4 files changed, 57 insertions(+), 28 deletions(-) diff --git a/plugins/inputs/kube_inventory/README.md b/plugins/inputs/kube_inventory/README.md index 063d03072..f017b18c6 100644 --- a/plugins/inputs/kube_inventory/README.md +++ b/plugins/inputs/kube_inventory/README.md @@ -41,6 +41,8 @@ avoid cardinality issues: # namespace = "default" ## Use bearer token for authorization. ('bearer_token' takes priority) + ## If both of these are empty, we'll use the default serviceaccount: + ## at: /run/secrets/kubernetes.io/serviceaccount/token # bearer_token = "/path/to/bearer/token" ## OR # bearer_token_string = "abc_123" @@ -265,6 +267,7 @@ The persistentvolumeclaim "phase" is saved in the `phase` tag with a correlated | pending | 2 | | unknown | 3 | + ### Example Output: ``` diff --git a/plugins/inputs/kube_inventory/kube_state.go b/plugins/inputs/kube_inventory/kube_state.go index 19de9b882..5aa51b6c5 100644 --- a/plugins/inputs/kube_inventory/kube_state.go +++ b/plugins/inputs/kube_inventory/kube_state.go @@ -19,6 +19,10 @@ import ( "github.com/influxdata/telegraf/plugins/inputs" ) +const ( + defaultServiceAccountPath = "/run/secrets/kubernetes.io/serviceaccount/token" +) + // KubernetesInventory represents the config object for the plugin. type KubernetesInventory struct { URL string `toml:"url"` @@ -42,6 +46,8 @@ var sampleConfig = ` # namespace = "default" ## Use bearer token for authorization. ('bearer_token' takes priority) + ## If both of these are empty, we'll use the default serviceaccount: + ## at: /run/secrets/kubernetes.io/serviceaccount/token # bearer_token = "/path/to/bearer/token" ## OR # bearer_token_string = "abc_123" @@ -77,14 +83,32 @@ func (ki *KubernetesInventory) Description() string { return "Read metrics from the Kubernetes api" } -// Gather collects kubernetes metrics from a given URL. -func (ki *KubernetesInventory) Gather(acc telegraf.Accumulator) (err error) { - if ki.client == nil { - if ki.client, err = ki.initClient(); err != nil { - return err - } +func (ki *KubernetesInventory) Init() error { + // If neither are provided, use the default service account. + if ki.BearerToken == "" && ki.BearerTokenString == "" { + ki.BearerToken = defaultServiceAccountPath } + if ki.BearerToken != "" { + token, err := ioutil.ReadFile(ki.BearerToken) + if err != nil { + return err + } + ki.BearerTokenString = strings.TrimSpace(string(token)) + } + + var err error + ki.client, err = newClient(ki.URL, ki.Namespace, ki.BearerTokenString, ki.ResponseTimeout.Duration, ki.ClientConfig) + + if err != nil { + return err + } + + return nil +} + +// Gather collects kubernetes metrics from a given URL. +func (ki *KubernetesInventory) Gather(acc telegraf.Accumulator) (err error) { resourceFilter, err := filter.NewIncludeExcludeFilter(ki.ResourceInclude, ki.ResourceExclude) if err != nil { return err @@ -121,18 +145,6 @@ var availableCollectors = map[string]func(ctx context.Context, acc telegraf.Accu "persistentvolumeclaims": collectPersistentVolumeClaims, } -func (ki *KubernetesInventory) initClient() (*client, error) { - if ki.BearerToken != "" { - token, err := ioutil.ReadFile(ki.BearerToken) - if err != nil { - return nil, err - } - ki.BearerTokenString = strings.TrimSpace(string(token)) - } - - return newClient(ki.URL, ki.Namespace, ki.BearerTokenString, ki.ResponseTimeout.Duration, ki.ClientConfig) -} - func atoi(s string) int64 { i, err := strconv.ParseInt(s, 10, 64) if err != nil { diff --git a/plugins/inputs/kubernetes/README.md b/plugins/inputs/kubernetes/README.md index 9aa5f17c1..a094b7b29 100644 --- a/plugins/inputs/kubernetes/README.md +++ b/plugins/inputs/kubernetes/README.md @@ -38,6 +38,8 @@ avoid cardinality issues: url = "http://127.0.0.1:10255" ## Use bearer token for authorization. ('bearer_token' takes priority) + ## If both of these are empty, we'll use the default serviceaccount: + ## at: /run/secrets/kubernetes.io/serviceaccount/token # bearer_token = "/path/to/bearer/token" ## OR # bearer_token_string = "abc_123" diff --git a/plugins/inputs/kubernetes/kubernetes.go b/plugins/inputs/kubernetes/kubernetes.go index 4e6e17ef1..45093a57b 100644 --- a/plugins/inputs/kubernetes/kubernetes.go +++ b/plugins/inputs/kubernetes/kubernetes.go @@ -36,6 +36,8 @@ var sampleConfig = ` url = "http://127.0.0.1:10255" ## Use bearer token for authorization. ('bearer_token' takes priority) + ## If both of these are empty, we'll use the default serviceaccount: + ## at: /run/secrets/kubernetes.io/serviceaccount/token # bearer_token = "/path/to/bearer/token" ## OR # bearer_token_string = "abc_123" @@ -52,7 +54,8 @@ var sampleConfig = ` ` const ( - summaryEndpoint = `%s/stats/summary` + summaryEndpoint = `%s/stats/summary` + defaultServiceAccountPath = "/run/secrets/kubernetes.io/serviceaccount/token" ) func init() { @@ -71,6 +74,23 @@ func (k *Kubernetes) Description() string { return "Read metrics from the kubernetes kubelet api" } +func (k *Kubernetes) Init() error { + // If neither are provided, use the default service account. + if k.BearerToken == "" && k.BearerTokenString == "" { + k.BearerToken = defaultServiceAccountPath + } + + if k.BearerToken != "" { + token, err := ioutil.ReadFile(k.BearerToken) + if err != nil { + return err + } + k.BearerTokenString = strings.TrimSpace(string(token)) + } + + return nil +} + //Gather collects kubernetes metrics from a given URL func (k *Kubernetes) Gather(acc telegraf.Accumulator) error { acc.AddError(k.gatherSummary(k.URL, acc)) @@ -108,15 +128,7 @@ func (k *Kubernetes) gatherSummary(baseURL string, acc telegraf.Accumulator) err } } - if k.BearerToken != "" { - token, err := ioutil.ReadFile(k.BearerToken) - if err != nil { - return err - } - req.Header.Set("Authorization", "Bearer "+strings.TrimSpace(string(token))) - } else if k.BearerTokenString != "" { - req.Header.Set("Authorization", "Bearer "+k.BearerTokenString) - } + req.Header.Set("Authorization", "Bearer "+k.BearerTokenString) req.Header.Add("Accept", "application/json") resp, err = k.RoundTripper.RoundTrip(req) From 803e1a48aa98b255de923c4fd4bd6605540782ec Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 6 Nov 2019 13:39:09 -0800 Subject: [PATCH 1282/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4b26b6ae4..d4c386372 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -36,6 +36,7 @@ - [#6532](https://github.com/influxdata/telegraf/pull/6532): Add strict mode to JSON parser that can be disable to ignore invalid items. - [#6543](https://github.com/influxdata/telegraf/pull/6543): Add support for Kubernetes 1.16 and remove deprecated API usage. - [#6283](https://github.com/influxdata/telegraf/pull/6283): Add gathering of RabbitMQ federation link metrics. +- [#6356](https://github.com/influxdata/telegraf/pull/6356): Add bearer token defaults for Kubernetes plugins. #### Bugfixes From e4170339b19aad03188474c0d6b99be43ceed11d Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 6 Nov 2019 16:05:11 -0800 Subject: [PATCH 1283/1815] Document alias option on output, processor, aggregator plugins --- docs/CONFIGURATION.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/docs/CONFIGURATION.md b/docs/CONFIGURATION.md index 55ab3498e..d5e5ad072 100644 --- a/docs/CONFIGURATION.md +++ b/docs/CONFIGURATION.md @@ -257,6 +257,7 @@ databases, network services, and messaging systems. Parameters that can be used with any output plugin: +- **alias**: Name an instance of a plugin. - **flush_interval**: The maximum time between flushes. Use this setting to override the agent `flush_interval` on a per plugin basis. - **metric_batch_size**: The maximum number of metrics to send at once. Use @@ -294,6 +295,7 @@ input plugins and before any aggregator plugins. Parameters that can be used with any processor plugin: +- **alias**: Name an instance of a plugin. - **order**: The order in which the processor(s) are executed. If this is not specified then processor execution order will be random. @@ -328,6 +330,7 @@ processors have been applied. Parameters that can be used with any aggregator plugin: +- **alias**: Name an instance of a plugin. - **period**: The period on which to flush & clear each aggregator. All metrics that are sent with timestamps outside of this period will be ignored by the aggregator. From 8d52f5a4b2ae36ad72a334ab1d2fb43ee2bdeaa8 Mon Sep 17 00:00:00 2001 From: Nick Neisen Date: Wed, 6 Nov 2019 18:32:56 -0700 Subject: [PATCH 1284/1815] Change no metric error message to debug level (#6630) --- agent/accumulator.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/agent/accumulator.go b/agent/accumulator.go index 21146e3e2..6824249f6 100644 --- a/agent/accumulator.go +++ b/agent/accumulator.go @@ -111,7 +111,7 @@ func (ac *accumulator) AddError(err error) { return } NErrors.Incr(1) - log.Printf("E! [%s] Error in plugin: %v", ac.maker.LogName(), err) + log.Printf("D! [%s] Error in plugin: %v", ac.maker.LogName(), err) } func (ac *accumulator) SetPrecision(precision time.Duration) { From b4a712969ec58f077a1f18fa5cc66734c186bb24 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 6 Nov 2019 17:33:53 -0800 Subject: [PATCH 1285/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index d4c386372..0e29c4763 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -48,6 +48,7 @@ - [#6576](https://github.com/influxdata/telegraf/issues/6576): Fix incorrect results in ping input plugin. - [#6610](https://github.com/influxdata/telegraf/pull/6610): Add missing character replacement to sql_instance tag. +- [#6337](https://github.com/influxdata/telegraf/issues/6337): Change no metric error message to debug level in cloudwatch input. ## v1.12.4 [2019-10-23] From 378c570c06f678b7004520115df836c36f6fea35 Mon Sep 17 00:00:00 2001 From: Felix Maurer Date: Thu, 7 Nov 2019 20:56:51 +0200 Subject: [PATCH 1286/1815] Add support for SNMP over TCP (#5870) --- Gopkg.lock | 14 ++++++++++--- Gopkg.toml | 2 +- plugins/inputs/snmp/README.md | 2 +- plugins/inputs/snmp/snmp.go | 4 ++++ plugins/inputs/snmp/snmp_test.go | 34 ++++++++++++++++++++++++++++++++ 5 files changed, 51 insertions(+), 5 deletions(-) diff --git a/Gopkg.lock b/Gopkg.lock index 410b9b284..78842d940 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -513,6 +513,14 @@ revision = "636bf0302bc95575d69441b25a2603156ffdddf1" version = "v1.1.1" +[[projects]] + digest = "1:530233672f656641b365f8efb38ed9fba80e420baff2ce87633813ab3755ed6d" + name = "github.com/golang/mock" + packages = ["gomock"] + pruneopts = "" + revision = "51421b967af1f557f93a59e0057aaf15ca02e29c" + version = "v1.2.0" + [[projects]] digest = "1:f958a1c137db276e52f0b50efee41a1a389dcdded59a69711f3e872757dab34b" name = "github.com/golang/protobuf" @@ -1104,12 +1112,12 @@ version = "v1.0.5" [[projects]] - branch = "master" - digest = "1:4b0cabe65ca903a7b2a3e6272c5304eb788ce196d35ecb901c6563e5e7582443" + digest = "1:a1cb5e999ad98b9838147e11ed1bdb000e750ee8872e2e21c74d9464cc9110c0" name = "github.com/soniah/gosnmp" packages = ["."] pruneopts = "" - revision = "96b86229e9b3ffb4b954144cdc7f98fe3ee1003f" + revision = "40eae407a1f8cbbe3f3f14c57bde0b16db1cfe85" + version = "v1.22.0" [[projects]] branch = "master" diff --git a/Gopkg.toml b/Gopkg.toml index 3069cbf40..1bbeb962f 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -153,7 +153,7 @@ [[constraint]] name = "github.com/soniah/gosnmp" - branch = "master" + version = "1.22.0" [[constraint]] name = "github.com/StackExchange/wmi" diff --git a/plugins/inputs/snmp/README.md b/plugins/inputs/snmp/README.md index dab28e9b0..a15e5ddb6 100644 --- a/plugins/inputs/snmp/README.md +++ b/plugins/inputs/snmp/README.md @@ -98,7 +98,7 @@ Resulting output: ### Config parameters * `agents`: Default: `[]` -List of SNMP agents to connect to in the form of `IP[:PORT]`. If `:PORT` is unspecified, it defaults to `161`. +List of SNMP agents to connect to in the form of `[tcp://]IP[:PORT]`. If `:PORT` is unspecified, it defaults to `161`. When using the optional prefix `tcp://`, SNMP over TCP will be used. Otherwise UDP is used as the transport protocol. * `version`: Default: `2` SNMP protocol version to use. diff --git a/plugins/inputs/snmp/snmp.go b/plugins/inputs/snmp/snmp.go index 18eed4e47..32968730e 100644 --- a/plugins/inputs/snmp/snmp.go +++ b/plugins/inputs/snmp/snmp.go @@ -623,6 +623,10 @@ func (s *Snmp) getConnection(idx int) (snmpConnection, error) { gs := gosnmpWrapper{&gosnmp.GoSNMP{}} s.connectionCache[idx] = gs + if strings.HasPrefix(agent, "tcp://") { + agent = strings.TrimPrefix(agent, "tcp://") + gs.Transport = "tcp" + } host, portStr, err := net.SplitHostPort(agent) if err != nil { if err, ok := err.(*net.AddrError); !ok || err.Err != "missing port in address" { diff --git a/plugins/inputs/snmp/snmp_test.go b/plugins/inputs/snmp/snmp_test.go index db1a49605..efa426845 100644 --- a/plugins/inputs/snmp/snmp_test.go +++ b/plugins/inputs/snmp/snmp_test.go @@ -272,12 +272,46 @@ func TestGetSNMPConnection_v2(t *testing.T) { assert.EqualValues(t, 567, gs.Port) assert.Equal(t, gosnmp.Version2c, gs.Version) assert.Equal(t, "foo", gs.Community) + assert.Equal(t, "udp", gs.Transport) gsc, err = s.getConnection(1) require.NoError(t, err) gs = gsc.(gosnmpWrapper) assert.Equal(t, "1.2.3.4", gs.Target) assert.EqualValues(t, 161, gs.Port) + assert.Equal(t, "udp", gs.Transport) +} + +func TestGetSNMPConnectionTCP(t *testing.T) { + var wg sync.WaitGroup + wg.Add(1) + go stubTCPServer(&wg) + wg.Wait() + + s := &Snmp{ + Agents: []string{"tcp://127.0.0.1:56789"}, + } + err := s.init() + require.NoError(t, err) + + wg.Add(1) + gsc, err := s.getConnection(0) + require.NoError(t, err) + gs := gsc.(gosnmpWrapper) + assert.Equal(t, "127.0.0.1", gs.Target) + assert.EqualValues(t, 56789, gs.Port) + assert.Equal(t, "tcp", gs.Transport) + wg.Wait() +} + +func stubTCPServer(wg *sync.WaitGroup) { + defer wg.Done() + tcpAddr, _ := net.ResolveTCPAddr("tcp", "127.0.0.1:56789") + tcpServer, _ := net.ListenTCP("tcp", tcpAddr) + defer tcpServer.Close() + wg.Done() + conn, _ := tcpServer.AcceptTCP() + defer conn.Close() } func TestGetSNMPConnection_v3(t *testing.T) { From b98f4dc97bd1eca9ef443fc6115ba58c3095b902 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 7 Nov 2019 10:57:32 -0800 Subject: [PATCH 1287/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0e29c4763..7e796c006 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -37,6 +37,7 @@ - [#6543](https://github.com/influxdata/telegraf/pull/6543): Add support for Kubernetes 1.16 and remove deprecated API usage. - [#6283](https://github.com/influxdata/telegraf/pull/6283): Add gathering of RabbitMQ federation link metrics. - [#6356](https://github.com/influxdata/telegraf/pull/6356): Add bearer token defaults for Kubernetes plugins. +- [#5870](https://github.com/influxdata/telegraf/pull/5870): Add support for SNMP over TCP. #### Bugfixes From 80e93af25bd365564684f983c87666f7050bb4ff Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 7 Nov 2019 11:00:31 -0800 Subject: [PATCH 1288/1815] Add gomock to dependency licenses --- docs/LICENSE_OF_DEPENDENCIES.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index e0332196b..b3eb7ae20 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -42,6 +42,7 @@ following works: - github.com/go-sql-driver/mysql [Mozilla Public License 2.0](https://github.com/go-sql-driver/mysql/blob/master/LICENSE) - github.com/gobwas/glob [MIT License](https://github.com/gobwas/glob/blob/master/LICENSE) - github.com/gogo/protobuf [BSD 3-Clause Clear License](https://github.com/gogo/protobuf/blob/master/LICENSE) +- github.com/golang/mock [Apache License 2.0](https://github.com/golang/mock/blob/master/LICENSE) - github.com/golang/protobuf [BSD 3-Clause "New" or "Revised" License](https://github.com/golang/protobuf/blob/master/LICENSE) - github.com/golang/snappy [BSD 3-Clause "New" or "Revised" License](https://github.com/golang/snappy/blob/master/LICENSE) - github.com/google/go-cmp [BSD 3-Clause "New" or "Revised" License](https://github.com/google/go-cmp/blob/master/LICENSE) From 23c43fc310c6d2150a22e153a0ae2862ebfa953a Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 7 Nov 2019 11:07:45 -0800 Subject: [PATCH 1289/1815] Use gopkg.in/ldap.v3 3.1.0 (#6567) --- Gopkg.lock | 10 +++++----- Gopkg.toml | 8 ++++---- plugins/inputs/openldap/openldap.go | 3 +-- plugins/inputs/openldap/openldap_test.go | 3 +-- 4 files changed, 11 insertions(+), 13 deletions(-) diff --git a/Gopkg.lock b/Gopkg.lock index 78842d940..35f25649f 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -1629,12 +1629,12 @@ version = "v1.1.0" [[projects]] - digest = "1:367baf06b7dbd0ef0bbdd785f6a79f929c96b0c18e9d3b29c0eed1ac3f5db133" - name = "gopkg.in/ldap.v2" + digest = "1:cff622452aa789a1b2212d401f6b618ca1751a02229d26e002eb645ec22818f2" + name = "gopkg.in/ldap.v3" packages = ["."] pruneopts = "" - revision = "bb7a9ca6e4fbc2129e3db588a34bc970ffe811a9" - version = "v2.5.1" + revision = "caa044a2bfa324b735baee1722e8e2e372f76864" + version = "v3.1.0" [[projects]] branch = "v2" @@ -1826,7 +1826,7 @@ "google.golang.org/grpc/peer", "google.golang.org/grpc/status", "gopkg.in/gorethink/gorethink.v3", - "gopkg.in/ldap.v2", + "gopkg.in/ldap.v3", "gopkg.in/mgo.v2", "gopkg.in/mgo.v2/bson", "gopkg.in/olivere/elastic.v5", diff --git a/Gopkg.toml b/Gopkg.toml index 1bbeb962f..cc3498ab3 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -201,10 +201,6 @@ name = "gopkg.in/gorethink/gorethink.v3" version = "3.0.5" -[[constraint]] - name = "gopkg.in/ldap.v2" - version = "2.5.1" - [[constraint]] name = "gopkg.in/mgo.v2" branch = "v2" @@ -301,3 +297,7 @@ [[constraint]] branch = "master" name = "github.com/cisco-ie/nx-telemetry-proto" + +[[constraint]] + name = "gopkg.in/ldap.v3" + version = "3.1.0" diff --git a/plugins/inputs/openldap/openldap.go b/plugins/inputs/openldap/openldap.go index 9e69c8a21..a92a37371 100644 --- a/plugins/inputs/openldap/openldap.go +++ b/plugins/inputs/openldap/openldap.go @@ -5,11 +5,10 @@ import ( "strconv" "strings" - "gopkg.in/ldap.v2" - "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal/tls" "github.com/influxdata/telegraf/plugins/inputs" + "gopkg.in/ldap.v3" ) type Openldap struct { diff --git a/plugins/inputs/openldap/openldap_test.go b/plugins/inputs/openldap/openldap_test.go index 10835896f..76d9cc3a9 100644 --- a/plugins/inputs/openldap/openldap_test.go +++ b/plugins/inputs/openldap/openldap_test.go @@ -4,11 +4,10 @@ import ( "strconv" "testing" - "gopkg.in/ldap.v2" - "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "gopkg.in/ldap.v3" ) func TestOpenldapMockResult(t *testing.T) { From 74a8ebda9ea0a939f220b4c91ba439f0792e9eaf Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 7 Nov 2019 15:12:53 -0800 Subject: [PATCH 1290/1815] Use github.com/miekg/dns 1.0.10 (#6632) --- Gopkg.lock | 6 +++--- Gopkg.toml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Gopkg.lock b/Gopkg.lock index 35f25649f..12585863c 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -836,12 +836,12 @@ revision = "eb3dd99a75fe58389e357b732691320dcf706b5f" [[projects]] - digest = "1:4c8d8358c45ba11ab7bb15df749d4df8664ff1582daead28bae58cf8cbe49890" + digest = "1:1eef80a63549d929a5d922dc3d9ad0d489ed490f52b90887ad577b65a16d071c" name = "github.com/miekg/dns" packages = ["."] pruneopts = "" - revision = "5a2b9fab83ff0f8bfc99684bd5f43a37abe560f1" - version = "v1.0.8" + revision = "f4db2ca6edc3af0ee51bf332099cc480bcf3ef9d" + version = "v1.0.10" [[projects]] branch = "master" diff --git a/Gopkg.toml b/Gopkg.toml index cc3498ab3..8bf962452 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -104,7 +104,7 @@ [[constraint]] name = "github.com/miekg/dns" - version = "1.0.8" + version = "1.0.10" [[constraint]] name = "github.com/multiplay/go-ts3" From 0e6500669a5281ea3793311fc1331daa97ef6ad3 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 7 Nov 2019 16:29:45 -0800 Subject: [PATCH 1291/1815] Add missing ServerProperties query to sqlserver input docs (#6625) --- plugins/inputs/sqlserver/README.md | 5 +++-- plugins/inputs/sqlserver/sqlserver.go | 1 + 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/plugins/inputs/sqlserver/README.md b/plugins/inputs/sqlserver/README.md index 2d41c5dcc..b586ecd27 100644 --- a/plugins/inputs/sqlserver/README.md +++ b/plugins/inputs/sqlserver/README.md @@ -52,7 +52,7 @@ GO query_version = 2 ## If you are using AzureDB, setting this to true will gather resource utilization metrics - # azuredb = true + # azuredb = true ## If you would like to exclude some of the metrics queries, list them here ## Possible choices: @@ -67,8 +67,9 @@ GO ## - VolumeSpace ## - Schedulers ## - AzureDBResourceStats - ## - AzureDBResourceGovernance + ## - AzureDBResourceGovernance ## - SqlRequests + ## - ServerProperties exclude_query = [ 'Schedulers' , 'SqlRequests'] ``` diff --git a/plugins/inputs/sqlserver/sqlserver.go b/plugins/inputs/sqlserver/sqlserver.go index 1a5939f94..c2c852749 100644 --- a/plugins/inputs/sqlserver/sqlserver.go +++ b/plugins/inputs/sqlserver/sqlserver.go @@ -68,6 +68,7 @@ const sampleConfig = ` ## - AzureDBResourceStats ## - AzureDBResourceGovernance ## - SqlRequests + ## - ServerProperties exclude_query = [ 'Schedulers' ] ` From e33766cc0d933dad390f0c7913ebc5a90f58ea53 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 7 Nov 2019 16:31:17 -0800 Subject: [PATCH 1292/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7e796c006..5e9947be2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -50,6 +50,7 @@ - [#6576](https://github.com/influxdata/telegraf/issues/6576): Fix incorrect results in ping input plugin. - [#6610](https://github.com/influxdata/telegraf/pull/6610): Add missing character replacement to sql_instance tag. - [#6337](https://github.com/influxdata/telegraf/issues/6337): Change no metric error message to debug level in cloudwatch input. +- [#6602](https://github.com/influxdata/telegraf/issues/6602): Add missing ServerProperties query to sqlserver input docs. ## v1.12.4 [2019-10-23] From 6014a26467f7fb320e132174709c6f8be7e8a4f9 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 7 Nov 2019 17:23:42 -0800 Subject: [PATCH 1293/1815] Use latest golang.org/x/crypto (#6635) --- Gopkg.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Gopkg.lock b/Gopkg.lock index 12585863c..d547004a4 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -1290,7 +1290,7 @@ [[projects]] branch = "master" - digest = "1:0773b5c3be42874166670a20aa177872edb450cd9fc70b1df97303d977702a50" + digest = "1:d709f6b44dffe11337b3730ebf5ae6bb1bc9273a1c204266921205158a5a523f" name = "golang.org/x/crypto" packages = [ "bcrypt", @@ -1304,7 +1304,7 @@ "ssh/terminal", ] pruneopts = "" - revision = "a2144134853fc9a27a7b1e3eb4f19f1a76df13c9" + revision = "87dc89f01550277dc22b74ffcf4cd89fa2f40f4c" source = "https://github.com/golang/crypto.git" [[projects]] From 6cbaf890d9cdd28dc26fca063e8f70194dd65afb Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 7 Nov 2019 17:39:19 -0800 Subject: [PATCH 1294/1815] Use github.com/gofrs/uuid 2.1.0 (#6636) --- Gopkg.lock | 16 ++++++++++++---- Gopkg.toml | 4 ++-- docs/LICENSE_OF_DEPENDENCIES.md | 2 +- plugins/outputs/kafka/kafka.go | 22 +++++++++++++++------- plugins/outputs/kafka/kafka_test.go | 3 ++- plugins/outputs/kinesis/kinesis.go | 12 +++++++++--- plugins/outputs/kinesis/kinesis_test.go | 2 +- 7 files changed, 42 insertions(+), 19 deletions(-) diff --git a/Gopkg.lock b/Gopkg.lock index d547004a4..aa35cb845 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -505,6 +505,14 @@ revision = "5ccd90ef52e1e632236f7326478d4faa74f99438" version = "v0.2.3" +[[projects]] + digest = "1:181fe10dcb708edd7c68c5781928b6657612771f81dd1773287386b6982c94e2" + name = "github.com/gofrs/uuid" + packages = ["."] + pruneopts = "" + revision = "3a54a6416087bae7aa0ac32dd79fe1bf87bc99e4" + version = "v2.1.0" + [[projects]] digest = "1:6e73003ecd35f4487a5e88270d3ca0a81bc80dc88053ac7e4dcfec5fba30d918" name = "github.com/gogo/protobuf" @@ -715,7 +723,7 @@ revision = "7c63b0a71ef8300adc255344d275e10e5c3a71ec" [[projects]] - digest = "1:a7998e19ebb78fdd341cdaf3825fded9030ae27af9c70d298c05d88744e16a0b" + digest = "1:e248df365cb87001738e8c9368a6a27c504328047b196d89687c1ca918279a82" name = "github.com/jackc/pgx" packages = [ ".", @@ -727,8 +735,8 @@ "stdlib", ] pruneopts = "" - revision = "8faa4453fc7051d1076053f8854077753ab912f2" - version = "v3.4.0" + revision = "c73e7d75061bb42b0282945710f344cfe1113d10" + version = "v3.6.0" [[projects]] digest = "1:d45477e90c25c8c6d7d4237281167aa56079382fc042db4b44a8328071649bfa" @@ -1731,6 +1739,7 @@ "github.com/go-redis/redis", "github.com/go-sql-driver/mysql", "github.com/gobwas/glob", + "github.com/gofrs/uuid", "github.com/golang/protobuf/proto", "github.com/golang/protobuf/ptypes/duration", "github.com/golang/protobuf/ptypes/empty", @@ -1772,7 +1781,6 @@ "github.com/prometheus/client_golang/prometheus/promhttp", "github.com/prometheus/client_model/go", "github.com/prometheus/common/expfmt", - "github.com/satori/go.uuid", "github.com/shirou/gopsutil/cpu", "github.com/shirou/gopsutil/disk", "github.com/shirou/gopsutil/host", diff --git a/Gopkg.toml b/Gopkg.toml index 8bf962452..048f09403 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -139,8 +139,8 @@ branch = "master" [[constraint]] - name = "github.com/satori/go.uuid" - version = "1.2.0" + name = "github.com/gofrs/uuid" + version = "2.0.0" [[constraint]] name = "github.com/shirou/gopsutil" diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index b3eb7ae20..5582bf9ee 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -41,6 +41,7 @@ following works: - github.com/go-redis/redis [BSD 2-Clause "Simplified" License](https://github.com/go-redis/redis/blob/master/LICENSE) - github.com/go-sql-driver/mysql [Mozilla Public License 2.0](https://github.com/go-sql-driver/mysql/blob/master/LICENSE) - github.com/gobwas/glob [MIT License](https://github.com/gobwas/glob/blob/master/LICENSE) +- github.com/gofrs/uuid [MIT License](https://github.com/gofrs/uuid/blob/master/LICENSE) - github.com/gogo/protobuf [BSD 3-Clause Clear License](https://github.com/gogo/protobuf/blob/master/LICENSE) - github.com/golang/mock [Apache License 2.0](https://github.com/golang/mock/blob/master/LICENSE) - github.com/golang/protobuf [BSD 3-Clause "New" or "Revised" License](https://github.com/golang/protobuf/blob/master/LICENSE) @@ -99,7 +100,6 @@ following works: - github.com/prometheus/procfs [Apache License 2.0](https://github.com/prometheus/procfs/blob/master/LICENSE) - github.com/rcrowley/go-metrics [MIT License](https://github.com/rcrowley/go-metrics/blob/master/LICENSE) - github.com/samuel/go-zookeeper [BSD 3-Clause Clear License](https://github.com/samuel/go-zookeeper/blob/master/LICENSE) -- github.com/satori/go.uuid [MIT License](https://github.com/satori/go.uuid/blob/master/LICENSE) - github.com/shirou/gopsutil [BSD 3-Clause Clear License](https://github.com/shirou/gopsutil/blob/master/LICENSE) - github.com/shirou/w32 [BSD 3-Clause Clear License](https://github.com/shirou/w32/blob/master/LICENSE) - github.com/Shopify/sarama [MIT License](https://github.com/Shopify/sarama/blob/master/LICENSE) diff --git a/plugins/outputs/kafka/kafka.go b/plugins/outputs/kafka/kafka.go index 7ba457c59..0c967819f 100644 --- a/plugins/outputs/kafka/kafka.go +++ b/plugins/outputs/kafka/kafka.go @@ -7,11 +7,11 @@ import ( "strings" "github.com/Shopify/sarama" + "github.com/gofrs/uuid" "github.com/influxdata/telegraf" tlsint "github.com/influxdata/telegraf/internal/tls" "github.com/influxdata/telegraf/plugins/outputs" "github.com/influxdata/telegraf/plugins/serializers" - uuid "github.com/satori/go.uuid" ) var ValidTopicSuffixMethods = []string{ @@ -292,20 +292,23 @@ func (k *Kafka) Description() string { return "Configuration for the Kafka server to send metrics to" } -func (k *Kafka) routingKey(metric telegraf.Metric) string { +func (k *Kafka) routingKey(metric telegraf.Metric) (string, error) { if k.RoutingTag != "" { key, ok := metric.GetTag(k.RoutingTag) if ok { - return key + return key, nil } } if k.RoutingKey == "random" { - u := uuid.NewV4() - return u.String() + u, err := uuid.NewV4() + if err != nil { + return "", err + } + return u.String(), nil } - return k.RoutingKey + return k.RoutingKey, nil } func (k *Kafka) Write(metrics []telegraf.Metric) error { @@ -321,7 +324,12 @@ func (k *Kafka) Write(metrics []telegraf.Metric) error { Topic: k.GetTopicName(metric), Value: sarama.ByteEncoder(buf), } - key := k.routingKey(metric) + + key, err := k.routingKey(metric) + if err != nil { + return fmt.Errorf("could not generate routing key: %v", err) + } + if key != "" { m.Key = sarama.StringEncoder(key) } diff --git a/plugins/outputs/kafka/kafka_test.go b/plugins/outputs/kafka/kafka_test.go index ba900e32c..bac51c28d 100644 --- a/plugins/outputs/kafka/kafka_test.go +++ b/plugins/outputs/kafka/kafka_test.go @@ -150,7 +150,8 @@ func TestRoutingKey(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - key := tt.kafka.routingKey(tt.metric) + key, err := tt.kafka.routingKey(tt.metric) + require.NoError(t, err) tt.check(t, key) }) } diff --git a/plugins/outputs/kinesis/kinesis.go b/plugins/outputs/kinesis/kinesis.go index 1b7b747e9..d2d482ff3 100644 --- a/plugins/outputs/kinesis/kinesis.go +++ b/plugins/outputs/kinesis/kinesis.go @@ -6,11 +6,11 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/kinesis" + "github.com/gofrs/uuid" "github.com/influxdata/telegraf" internalaws "github.com/influxdata/telegraf/internal/config/aws" "github.com/influxdata/telegraf/plugins/outputs" "github.com/influxdata/telegraf/plugins/serializers" - "github.com/satori/go.uuid" ) type ( @@ -183,7 +183,10 @@ func (k *KinesisOutput) getPartitionKey(metric telegraf.Metric) string { case "static": return k.Partition.Key case "random": - u := uuid.NewV4() + u, err := uuid.NewV4() + if err != nil { + return k.Partition.Default + } return u.String() case "measurement": return metric.Name() @@ -200,7 +203,10 @@ func (k *KinesisOutput) getPartitionKey(metric telegraf.Metric) string { } } if k.RandomPartitionKey { - u := uuid.NewV4() + u, err := uuid.NewV4() + if err != nil { + return k.Partition.Default + } return u.String() } return k.PartitionKey diff --git a/plugins/outputs/kinesis/kinesis_test.go b/plugins/outputs/kinesis/kinesis_test.go index 627a459db..9d4f6729b 100644 --- a/plugins/outputs/kinesis/kinesis_test.go +++ b/plugins/outputs/kinesis/kinesis_test.go @@ -3,8 +3,8 @@ package kinesis import ( "testing" + "github.com/gofrs/uuid" "github.com/influxdata/telegraf/testutil" - uuid "github.com/satori/go.uuid" "github.com/stretchr/testify/assert" ) From d9ddd95b3caa9e0268e2be29bc409468dd03ef31 Mon Sep 17 00:00:00 2001 From: Phil Preston Date: Fri, 8 Nov 2019 19:55:37 +0000 Subject: [PATCH 1295/1815] Add ethtool input plugin (#5865) --- Gopkg.lock | 8 + Gopkg.toml | 4 + plugins/inputs/all/all.go | 1 + plugins/inputs/ethtool/README.md | 33 ++ plugins/inputs/ethtool/ethtool.go | 46 +++ plugins/inputs/ethtool/ethtool_linux.go | 136 ++++++++ plugins/inputs/ethtool/ethtool_nonlinux.go | 21 ++ plugins/inputs/ethtool/ethtool_test.go | 379 +++++++++++++++++++++ 8 files changed, 628 insertions(+) create mode 100644 plugins/inputs/ethtool/README.md create mode 100644 plugins/inputs/ethtool/ethtool.go create mode 100644 plugins/inputs/ethtool/ethtool_linux.go create mode 100644 plugins/inputs/ethtool/ethtool_nonlinux.go create mode 100644 plugins/inputs/ethtool/ethtool_test.go diff --git a/Gopkg.lock b/Gopkg.lock index aa35cb845..f9bba80b3 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -1070,6 +1070,13 @@ pruneopts = "" revision = "e2704e165165ec55d062f5919b4b29494e9fa790" +[[projects]] + digest = "1:a18bd4e530f3f36fe91a5d1fd57d492f25287546e613f892d21c2b76b848517d" + name = "github.com/safchain/ethtool" + packages = ["."] + pruneopts = "" + revision = "42ed695e3de80b9d695f280295fd7994639f209d" + [[projects]] branch = "master" digest = "1:7fc2f428767a2521abc63f1a663d981f61610524275d6c0ea645defadd4e916f" @@ -1781,6 +1788,7 @@ "github.com/prometheus/client_golang/prometheus/promhttp", "github.com/prometheus/client_model/go", "github.com/prometheus/common/expfmt", + "github.com/safchain/ethtool", "github.com/shirou/gopsutil/cpu", "github.com/shirou/gopsutil/disk", "github.com/shirou/gopsutil/host", diff --git a/Gopkg.toml b/Gopkg.toml index 048f09403..f5eeaabcc 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -301,3 +301,7 @@ [[constraint]] name = "gopkg.in/ldap.v3" version = "3.1.0" + +[[constraint]] + name = "github.com/safchain/ethtool" + revision = "42ed695e3de80b9d695f280295fd7994639f209d" diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index 693426642..a25ea3cd9 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -38,6 +38,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/dovecot" _ "github.com/influxdata/telegraf/plugins/inputs/ecs" _ "github.com/influxdata/telegraf/plugins/inputs/elasticsearch" + _ "github.com/influxdata/telegraf/plugins/inputs/ethtool" _ "github.com/influxdata/telegraf/plugins/inputs/exec" _ "github.com/influxdata/telegraf/plugins/inputs/fail2ban" _ "github.com/influxdata/telegraf/plugins/inputs/fibaro" diff --git a/plugins/inputs/ethtool/README.md b/plugins/inputs/ethtool/README.md new file mode 100644 index 000000000..3f397cdfb --- /dev/null +++ b/plugins/inputs/ethtool/README.md @@ -0,0 +1,33 @@ +# Ethtool Input Plugin + +The ethtool input plugin pulls ethernet device stats. Fields pulled will depend on the network device and driver + +### Configuration: + +```toml +# Returns ethtool statistics for given interfaces +[[inputs.ethtool]] + ## List of interfaces to pull metrics for + # interface_include = ["eth0"] + + ## List of interfaces to ignore when pulling metrics. + # interface_exclude = ["eth1"] +``` + +Interfaces can be included or ignored using + +- `interface_include` +- `interface_exclude` + +Note that loopback interfaces will be automatically ignored + +### Metrics: + +Metrics are dependant on the network device and driver + +### Example Output: + +``` +ethtool,driver=igb,host=test01,interface=mgmt0 tx_queue_1_packets=280782i,rx_queue_5_csum_err=0i,tx_queue_4_restart=0i,tx_multicast=7i,tx_queue_1_bytes=39674885i,rx_queue_2_alloc_failed=0i,tx_queue_5_packets=173970i,tx_single_coll_ok=0i,rx_queue_1_drops=0i,tx_queue_2_restart=0i,tx_aborted_errors=0i,rx_queue_6_csum_err=0i,tx_queue_5_restart=0i,tx_queue_4_bytes=64810835i,tx_abort_late_coll=0i,tx_queue_4_packets=109102i,os2bmc_tx_by_bmc=0i,tx_bytes=427527435i,tx_queue_7_packets=66665i,dropped_smbus=0i,rx_queue_0_csum_err=0i,tx_flow_control_xoff=0i,rx_packets=25926536i,rx_queue_7_csum_err=0i,rx_queue_3_bytes=84326060i,rx_multicast=83771i,rx_queue_4_alloc_failed=0i,rx_queue_3_drops=0i,rx_queue_3_csum_err=0i,rx_errors=0i,tx_errors=0i,tx_queue_6_packets=183236i,rx_broadcast=24378893i,rx_queue_7_packets=88680i,tx_dropped=0i,rx_frame_errors=0i,tx_queue_3_packets=161045i,tx_packets=1257017i,rx_queue_1_csum_err=0i,tx_window_errors=0i,tx_dma_out_of_sync=0i,rx_length_errors=0i,rx_queue_5_drops=0i,tx_timeout_count=0i,rx_queue_4_csum_err=0i,rx_flow_control_xon=0i,tx_heartbeat_errors=0i,tx_flow_control_xon=0i,collisions=0i,tx_queue_0_bytes=29465801i,rx_queue_6_drops=0i,rx_queue_0_alloc_failed=0i,tx_queue_1_restart=0i,rx_queue_0_drops=0i,tx_broadcast=9i,tx_carrier_errors=0i,tx_queue_7_bytes=13777515i,tx_queue_7_restart=0i,rx_queue_5_bytes=50732006i,rx_queue_7_bytes=35744457i,tx_deferred_ok=0i,tx_multi_coll_ok=0i,rx_crc_errors=0i,rx_fifo_errors=0i,rx_queue_6_alloc_failed=0i,tx_queue_2_packets=175206i,tx_queue_0_packets=107011i,rx_queue_4_bytes=201364548i,rx_queue_6_packets=372573i,os2bmc_rx_by_host=0i,multicast=83771i,rx_queue_4_drops=0i,rx_queue_5_packets=130535i,rx_queue_6_bytes=139488035i,tx_fifo_errors=0i,tx_queue_5_bytes=84899130i,rx_queue_0_packets=24529563i,rx_queue_3_alloc_failed=0i,rx_queue_7_drops=0i,tx_queue_6_bytes=96288614i,tx_queue_2_bytes=22132949i,tx_tcp_seg_failed=0i,rx_queue_1_bytes=246703840i,rx_queue_0_bytes=1506870738i,tx_queue_0_restart=0i,rx_queue_2_bytes=111344804i,tx_tcp_seg_good=0i,tx_queue_3_restart=0i,rx_no_buffer_count=0i,rx_smbus=0i,rx_queue_1_packets=273865i,rx_over_errors=0i,os2bmc_tx_by_host=0i,rx_queue_1_alloc_failed=0i,rx_queue_7_alloc_failed=0i,rx_short_length_errors=0i,tx_hwtstamp_timeouts=0i,tx_queue_6_restart=0i,rx_queue_2_packets=207136i,tx_queue_3_bytes=70391970i,rx_queue_3_packets=112007i,rx_queue_4_packets=212177i,tx_smbus=0i,rx_long_byte_count=2480280632i,rx_queue_2_csum_err=0i,rx_missed_errors=0i,rx_bytes=2480280632i,rx_queue_5_alloc_failed=0i,rx_queue_2_drops=0i,os2bmc_rx_by_bmc=0i,rx_align_errors=0i,rx_long_length_errors=0i,rx_hwtstamp_cleared=0i,rx_flow_control_xoff=0i 1564658080000000000 +ethtool,driver=igb,host=test02,interface=mgmt0 rx_queue_2_bytes=111344804i,tx_queue_3_bytes=70439858i,multicast=83771i,rx_broadcast=24378975i,tx_queue_0_packets=107011i,rx_queue_6_alloc_failed=0i,rx_queue_6_drops=0i,rx_hwtstamp_cleared=0i,tx_window_errors=0i,tx_tcp_seg_good=0i,rx_queue_1_drops=0i,tx_queue_1_restart=0i,rx_queue_7_csum_err=0i,rx_no_buffer_count=0i,tx_queue_1_bytes=39675245i,tx_queue_5_bytes=84899130i,tx_broadcast=9i,rx_queue_1_csum_err=0i,tx_flow_control_xoff=0i,rx_queue_6_csum_err=0i,tx_timeout_count=0i,os2bmc_tx_by_bmc=0i,rx_queue_6_packets=372577i,rx_queue_0_alloc_failed=0i,tx_flow_control_xon=0i,rx_queue_2_drops=0i,tx_queue_2_packets=175206i,rx_queue_3_csum_err=0i,tx_abort_late_coll=0i,tx_queue_5_restart=0i,tx_dropped=0i,rx_queue_2_alloc_failed=0i,tx_multi_coll_ok=0i,rx_queue_1_packets=273865i,rx_flow_control_xon=0i,tx_single_coll_ok=0i,rx_length_errors=0i,rx_queue_7_bytes=35744457i,rx_queue_4_alloc_failed=0i,rx_queue_6_bytes=139488395i,rx_queue_2_csum_err=0i,rx_long_byte_count=2480288216i,rx_queue_1_alloc_failed=0i,tx_queue_0_restart=0i,rx_queue_0_csum_err=0i,tx_queue_2_bytes=22132949i,rx_queue_5_drops=0i,tx_dma_out_of_sync=0i,rx_queue_3_drops=0i,rx_queue_4_packets=212177i,tx_queue_6_restart=0i,rx_packets=25926650i,rx_queue_7_packets=88680i,rx_frame_errors=0i,rx_queue_3_bytes=84326060i,rx_short_length_errors=0i,tx_queue_7_bytes=13777515i,rx_queue_3_alloc_failed=0i,tx_queue_6_packets=183236i,rx_queue_0_drops=0i,rx_multicast=83771i,rx_queue_2_packets=207136i,rx_queue_5_csum_err=0i,rx_queue_5_packets=130535i,rx_queue_7_alloc_failed=0i,tx_smbus=0i,tx_queue_3_packets=161081i,rx_queue_7_drops=0i,tx_queue_2_restart=0i,tx_multicast=7i,tx_fifo_errors=0i,tx_queue_3_restart=0i,rx_long_length_errors=0i,tx_queue_6_bytes=96288614i,tx_queue_1_packets=280786i,tx_tcp_seg_failed=0i,rx_align_errors=0i,tx_errors=0i,rx_crc_errors=0i,rx_queue_0_packets=24529673i,rx_flow_control_xoff=0i,tx_queue_0_bytes=29465801i,rx_over_errors=0i,rx_queue_4_drops=0i,os2bmc_rx_by_bmc=0i,rx_smbus=0i,dropped_smbus=0i,tx_hwtstamp_timeouts=0i,rx_errors=0i,tx_queue_4_packets=109102i,tx_carrier_errors=0i,tx_queue_4_bytes=64810835i,tx_queue_4_restart=0i,rx_queue_4_csum_err=0i,tx_queue_7_packets=66665i,tx_aborted_errors=0i,rx_missed_errors=0i,tx_bytes=427575843i,collisions=0i,rx_queue_1_bytes=246703840i,rx_queue_5_bytes=50732006i,rx_bytes=2480288216i,os2bmc_rx_by_host=0i,rx_queue_5_alloc_failed=0i,rx_queue_3_packets=112007i,tx_deferred_ok=0i,os2bmc_tx_by_host=0i,tx_heartbeat_errors=0i,rx_queue_0_bytes=1506877506i,tx_queue_7_restart=0i,tx_packets=1257057i,rx_queue_4_bytes=201364548i,rx_fifo_errors=0i,tx_queue_5_packets=173970i 1564658090000000000 +``` diff --git a/plugins/inputs/ethtool/ethtool.go b/plugins/inputs/ethtool/ethtool.go new file mode 100644 index 000000000..e8f6bfed4 --- /dev/null +++ b/plugins/inputs/ethtool/ethtool.go @@ -0,0 +1,46 @@ +package ethtool + +import ( + "net" +) + +type Command interface { + Init() error + DriverName(intf string) (string, error) + Interfaces() ([]net.Interface, error) + Stats(intf string) (map[string]uint64, error) +} + +type Ethtool struct { + // This is the list of interface names to include + InterfaceInclude []string `toml:"interface_include"` + + // This is the list of interface names to ignore + InterfaceExclude []string `toml:"interface_exclude"` + + // the ethtool command + command Command +} + +const ( + pluginName = "ethtool" + tagInterface = "interface" + tagDriverName = "driver" + + sampleConfig = ` + ## List of interfaces to pull metrics for + # interface_include = ["eth0"] + + ## List of interfaces to ignore when pulling metrics. + # interface_exclude = ["eth1"] +` +) + +func (e *Ethtool) SampleConfig() string { + return sampleConfig +} + +// Description returns a one-sentence description on the Input +func (e *Ethtool) Description() string { + return "Returns ethtool statistics for given interfaces" +} diff --git a/plugins/inputs/ethtool/ethtool_linux.go b/plugins/inputs/ethtool/ethtool_linux.go new file mode 100644 index 000000000..b8c9312cb --- /dev/null +++ b/plugins/inputs/ethtool/ethtool_linux.go @@ -0,0 +1,136 @@ +// +build linux + +package ethtool + +import ( + "net" + "sync" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/filter" + "github.com/influxdata/telegraf/plugins/inputs" + "github.com/pkg/errors" + "github.com/safchain/ethtool" +) + +type CommandEthtool struct { + ethtool *ethtool.Ethtool +} + +func (e *Ethtool) Gather(acc telegraf.Accumulator) error { + + // Get the list of interfaces + interfaces, err := e.command.Interfaces() + if err != nil { + acc.AddError(err) + return nil + } + + interfaceFilter, err := filter.NewIncludeExcludeFilter(e.InterfaceInclude, e.InterfaceExclude) + if err != nil { + return err + } + + // parallelize the ethtool call in event of many interfaces + var wg sync.WaitGroup + + for _, iface := range interfaces { + + // Check this isn't a loop back and that its matched by the filter + if (iface.Flags&net.FlagLoopback == 0) && interfaceFilter.Match(iface.Name) { + wg.Add(1) + + go func(i net.Interface) { + e.gatherEthtoolStats(i, acc) + wg.Done() + }(iface) + } + } + + // Waiting for all the interfaces + wg.Wait() + return nil +} + +// Initialise the Command Tool +func (e *Ethtool) Init() error { + return e.command.Init() +} + +// Gather the stats for the interface. +func (e *Ethtool) gatherEthtoolStats(iface net.Interface, acc telegraf.Accumulator) { + + tags := make(map[string]string) + tags[tagInterface] = iface.Name + + driverName, err := e.command.DriverName(iface.Name) + if err != nil { + driverErr := errors.Wrapf(err, "%s driver", iface.Name) + acc.AddError(driverErr) + return + } + + tags[tagDriverName] = driverName + + fields := make(map[string]interface{}) + stats, err := e.command.Stats(iface.Name) + if err != nil { + statsErr := errors.Wrapf(err, "%s stats", iface.Name) + acc.AddError(statsErr) + return + } + + for k, v := range stats { + fields[k] = v + } + + acc.AddFields(pluginName, fields, tags) +} + +func NewCommandEthtool() *CommandEthtool { + return &CommandEthtool{} +} + +func (c *CommandEthtool) Init() error { + + if c.ethtool != nil { + return nil + } + + e, err := ethtool.NewEthtool() + if err == nil { + c.ethtool = e + } + + return err +} + +func (c *CommandEthtool) DriverName(intf string) (string, error) { + return c.ethtool.DriverName(intf) +} + +func (c *CommandEthtool) Stats(intf string) (map[string]uint64, error) { + return c.ethtool.Stats(intf) +} + +func (c *CommandEthtool) Interfaces() ([]net.Interface, error) { + + // Get the list of interfaces + interfaces, err := net.Interfaces() + if err != nil { + return nil, err + } + + return interfaces, nil +} + +func init() { + + inputs.Add(pluginName, func() telegraf.Input { + return &Ethtool{ + InterfaceInclude: []string{}, + InterfaceExclude: []string{}, + command: NewCommandEthtool(), + } + }) +} diff --git a/plugins/inputs/ethtool/ethtool_nonlinux.go b/plugins/inputs/ethtool/ethtool_nonlinux.go new file mode 100644 index 000000000..62a0de3c1 --- /dev/null +++ b/plugins/inputs/ethtool/ethtool_nonlinux.go @@ -0,0 +1,21 @@ +// +build !linux + +package ethtool + +import ( + "log" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" +) + +func (e *Ethtool) Gather(acc telegraf.Accumulator) error { + return nil +} + +func init() { + inputs.Add(pluginName, func() telegraf.Input { + log.Print("W! [inputs.ethtool] Current platform is not supported") + return &Ethtool{} + }) +} diff --git a/plugins/inputs/ethtool/ethtool_test.go b/plugins/inputs/ethtool/ethtool_test.go new file mode 100644 index 000000000..c151c9cae --- /dev/null +++ b/plugins/inputs/ethtool/ethtool_test.go @@ -0,0 +1,379 @@ +package ethtool + +import ( + "net" + "testing" + + "github.com/influxdata/telegraf/testutil" + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" +) + +var command *Ethtool +var interfaceMap map[string]*InterfaceMock + +type InterfaceMock struct { + Name string + DriverName string + Stat map[string]uint64 + LoopBack bool +} + +type CommandEthtoolMock struct { + InterfaceMap map[string]*InterfaceMock +} + +func (c *CommandEthtoolMock) Init() error { + // Not required for test mock + return nil +} + +func (c *CommandEthtoolMock) DriverName(intf string) (driverName string, err error) { + i := c.InterfaceMap[intf] + if i != nil { + driverName = i.DriverName + return + } + return driverName, errors.New("interface not found") +} + +func (c *CommandEthtoolMock) Interfaces() ([]net.Interface, error) { + interfaceNames := make([]net.Interface, 0) + for k, v := range c.InterfaceMap { + + // Whether to set the flag to loopback + flag := net.FlagUp + if v.LoopBack { + flag = net.FlagLoopback + } + + // Create a dummy interface + iface := net.Interface{ + Index: 0, + MTU: 1500, + Name: k, + HardwareAddr: nil, + Flags: flag, + } + interfaceNames = append(interfaceNames, iface) + } + return interfaceNames, nil +} + +func (c *CommandEthtoolMock) Stats(intf string) (stat map[string]uint64, err error) { + i := c.InterfaceMap[intf] + if i != nil { + stat = i.Stat + return + } + return stat, errors.New("interface not found") +} + +func setup() { + + interfaceMap = make(map[string]*InterfaceMock) + + eth1Stat := map[string]uint64{ + "port_rx_1024_to_15xx": 25167245, + "port_rx_128_to_255": 1573526387, + "port_rx_15xx_to_jumbo": 137819058, + "port_rx_256_to_511": 772038107, + "port_rx_512_to_1023": 78294457, + "port_rx_64": 8798065, + "port_rx_65_to_127": 450348015, + "port_rx_bad": 0, + "port_rx_bad_bytes": 0, + "port_rx_bad_gtjumbo": 0, + "port_rx_broadcast": 6428250, + "port_rx_bytes": 893460472634, + "port_rx_control": 0, + "port_rx_dp_di_dropped_packets": 2772680304, + "port_rx_dp_hlb_fetch": 0, + "port_rx_dp_hlb_wait": 0, + "port_rx_dp_q_disabled_packets": 0, + "port_rx_dp_streaming_packets": 0, + "port_rx_good": 3045991334, + "port_rx_good_bytes": 893460472927, + "port_rx_gtjumbo": 0, + "port_rx_lt64": 0, + "port_rx_multicast": 1639566045, + "port_rx_nodesc_drops": 0, + "port_rx_overflow": 0, + "port_rx_packets": 3045991334, + "port_rx_pause": 0, + "port_rx_pm_discard_bb_overflow": 0, + "port_rx_pm_discard_mapping": 0, + "port_rx_pm_discard_qbb": 0, + "port_rx_pm_discard_vfifo_full": 0, + "port_rx_pm_trunc_bb_overflow": 0, + "port_rx_pm_trunc_qbb": 0, + "port_rx_pm_trunc_vfifo_full": 0, + "port_rx_unicast": 1399997040, + "port_tx_1024_to_15xx": 236, + "port_tx_128_to_255": 275090219, + "port_tx_15xx_to_jumbo": 926, + "port_tx_256_to_511": 48567221, + "port_tx_512_to_1023": 5142016, + "port_tx_64": 113903973, + "port_tx_65_to_127": 161935699, + "port_tx_broadcast": 8, + "port_tx_bytes": 94357131016, + "port_tx_control": 0, + "port_tx_lt64": 0, + "port_tx_multicast": 325891647, + "port_tx_packets": 604640290, + "port_tx_pause": 0, + "port_tx_unicast": 278748635, + "ptp_bad_syncs": 1, + "ptp_fast_syncs": 1, + "ptp_filter_matches": 0, + "ptp_good_syncs": 136151, + "ptp_invalid_sync_windows": 0, + "ptp_no_time_syncs": 1, + "ptp_non_filter_matches": 0, + "ptp_oversize_sync_windows": 53, + "ptp_rx_no_timestamp": 0, + "ptp_rx_timestamp_packets": 0, + "ptp_sync_timeouts": 1, + "ptp_timestamp_packets": 0, + "ptp_tx_timestamp_packets": 0, + "ptp_undersize_sync_windows": 3, + "rx-0.rx_packets": 55659234, + "rx-1.rx_packets": 87880538, + "rx-2.rx_packets": 26746234, + "rx-3.rx_packets": 103026471, + "rx-4.rx_packets": 0, + "rx_eth_crc_err": 0, + "rx_frm_trunc": 0, + "rx_inner_ip_hdr_chksum_err": 0, + "rx_inner_tcp_udp_chksum_err": 0, + "rx_ip_hdr_chksum_err": 0, + "rx_mcast_mismatch": 0, + "rx_merge_events": 0, + "rx_merge_packets": 0, + "rx_nodesc_trunc": 0, + "rx_noskb_drops": 0, + "rx_outer_ip_hdr_chksum_err": 0, + "rx_outer_tcp_udp_chksum_err": 0, + "rx_reset": 0, + "rx_tcp_udp_chksum_err": 0, + "rx_tobe_disc": 0, + "tx-0.tx_packets": 85843565, + "tx-1.tx_packets": 108642725, + "tx-2.tx_packets": 202596078, + "tx-3.tx_packets": 207561010, + "tx-4.tx_packets": 0, + "tx_cb_packets": 4, + "tx_merge_events": 11025, + "tx_pio_packets": 531928114, + "tx_pushes": 604643378, + "tx_tso_bursts": 0, + "tx_tso_fallbacks": 0, + "tx_tso_long_headers": 0, + } + eth1 := &InterfaceMock{"eth1", "driver1", eth1Stat, false} + interfaceMap[eth1.Name] = eth1 + + eth2Stat := map[string]uint64{ + "port_rx_1024_to_15xx": 11529312, + "port_rx_128_to_255": 1868952037, + "port_rx_15xx_to_jumbo": 130339387, + "port_rx_256_to_511": 843846270, + "port_rx_512_to_1023": 173194372, + "port_rx_64": 9190374, + "port_rx_65_to_127": 507806115, + "port_rx_bad": 0, + "port_rx_bad_bytes": 0, + "port_rx_bad_gtjumbo": 0, + "port_rx_broadcast": 6648019, + "port_rx_bytes": 1007358162202, + "port_rx_control": 0, + "port_rx_dp_di_dropped_packets": 3164124639, + "port_rx_dp_hlb_fetch": 0, + "port_rx_dp_hlb_wait": 0, + "port_rx_dp_q_disabled_packets": 0, + "port_rx_dp_streaming_packets": 0, + "port_rx_good": 3544857867, + "port_rx_good_bytes": 1007358162202, + "port_rx_gtjumbo": 0, + "port_rx_lt64": 0, + "port_rx_multicast": 2231999743, + "port_rx_nodesc_drops": 0, + "port_rx_overflow": 0, + "port_rx_packets": 3544857867, + "port_rx_pause": 0, + "port_rx_pm_discard_bb_overflow": 0, + "port_rx_pm_discard_mapping": 0, + "port_rx_pm_discard_qbb": 0, + "port_rx_pm_discard_vfifo_full": 0, + "port_rx_pm_trunc_bb_overflow": 0, + "port_rx_pm_trunc_qbb": 0, + "port_rx_pm_trunc_vfifo_full": 0, + "port_rx_unicast": 1306210105, + "port_tx_1024_to_15xx": 379, + "port_tx_128_to_255": 202767251, + "port_tx_15xx_to_jumbo": 558, + "port_tx_256_to_511": 31454719, + "port_tx_512_to_1023": 6865731, + "port_tx_64": 17268276, + "port_tx_65_to_127": 272816313, + "port_tx_broadcast": 6, + "port_tx_bytes": 78071946593, + "port_tx_control": 0, + "port_tx_lt64": 0, + "port_tx_multicast": 239510586, + "port_tx_packets": 531173227, + "port_tx_pause": 0, + "port_tx_unicast": 291662635, + "ptp_bad_syncs": 0, + "ptp_fast_syncs": 0, + "ptp_filter_matches": 0, + "ptp_good_syncs": 0, + "ptp_invalid_sync_windows": 0, + "ptp_no_time_syncs": 0, + "ptp_non_filter_matches": 0, + "ptp_oversize_sync_windows": 0, + "ptp_rx_no_timestamp": 0, + "ptp_rx_timestamp_packets": 0, + "ptp_sync_timeouts": 0, + "ptp_timestamp_packets": 0, + "ptp_tx_timestamp_packets": 0, + "ptp_undersize_sync_windows": 0, + "rx-0.rx_packets": 84587075, + "rx-1.rx_packets": 74029305, + "rx-2.rx_packets": 134586471, + "rx-3.rx_packets": 87531322, + "rx-4.rx_packets": 0, + "rx_eth_crc_err": 0, + "rx_frm_trunc": 0, + "rx_inner_ip_hdr_chksum_err": 0, + "rx_inner_tcp_udp_chksum_err": 0, + "rx_ip_hdr_chksum_err": 0, + "rx_mcast_mismatch": 0, + "rx_merge_events": 0, + "rx_merge_packets": 0, + "rx_nodesc_trunc": 0, + "rx_noskb_drops": 0, + "rx_outer_ip_hdr_chksum_err": 0, + "rx_outer_tcp_udp_chksum_err": 0, + "rx_reset": 0, + "rx_tcp_udp_chksum_err": 0, + "rx_tobe_disc": 0, + "tx-0.tx_packets": 232521451, + "tx-1.tx_packets": 97876137, + "tx-2.tx_packets": 106822111, + "tx-3.tx_packets": 93955050, + "tx-4.tx_packets": 0, + "tx_cb_packets": 1, + "tx_merge_events": 8402, + "tx_pio_packets": 481040054, + "tx_pushes": 531174491, + "tx_tso_bursts": 128, + "tx_tso_fallbacks": 0, + "tx_tso_long_headers": 0, + } + eth2 := &InterfaceMock{"eth2", "driver1", eth2Stat, false} + interfaceMap[eth2.Name] = eth2 + + // dummy loopback including dummy stat to ensure that the ignore feature is working + lo0Stat := map[string]uint64{ + "dummy": 0, + } + lo0 := &InterfaceMock{"lo0", "", lo0Stat, true} + interfaceMap[lo0.Name] = lo0 + + c := &CommandEthtoolMock{interfaceMap} + command = &Ethtool{ + InterfaceInclude: []string{}, + InterfaceExclude: []string{}, + command: c, + } +} + +func toStringMapInterface(in map[string]uint64) map[string]interface{} { + var m = map[string]interface{}{} + for k, v := range in { + m[k] = v + } + return m +} + +func TestGather(t *testing.T) { + + setup() + var acc testutil.Accumulator + + err := command.Gather(&acc) + assert.NoError(t, err) + assert.Len(t, acc.Metrics, 2) + + expectedFieldsEth1 := toStringMapInterface(interfaceMap["eth1"].Stat) + expectedTagsEth1 := map[string]string{ + "interface": "eth1", + "driver": "driver1", + } + acc.AssertContainsTaggedFields(t, pluginName, expectedFieldsEth1, expectedTagsEth1) + expectedFieldsEth2 := toStringMapInterface(interfaceMap["eth2"].Stat) + expectedTagsEth2 := map[string]string{ + "interface": "eth2", + "driver": "driver1", + } + acc.AssertContainsTaggedFields(t, pluginName, expectedFieldsEth2, expectedTagsEth2) +} + +func TestGatherIncludeInterfaces(t *testing.T) { + + setup() + var acc testutil.Accumulator + + command.InterfaceInclude = append(command.InterfaceInclude, "eth1") + + err := command.Gather(&acc) + assert.NoError(t, err) + assert.Len(t, acc.Metrics, 1) + + // Should contain eth1 + expectedFieldsEth1 := toStringMapInterface(interfaceMap["eth1"].Stat) + expectedTagsEth1 := map[string]string{ + "interface": "eth1", + "driver": "driver1", + } + acc.AssertContainsTaggedFields(t, pluginName, expectedFieldsEth1, expectedTagsEth1) + + // Should not contain eth2 + expectedFieldsEth2 := toStringMapInterface(interfaceMap["eth2"].Stat) + expectedTagsEth2 := map[string]string{ + "interface": "eth2", + "driver": "driver1", + } + acc.AssertDoesNotContainsTaggedFields(t, pluginName, expectedFieldsEth2, expectedTagsEth2) +} + +func TestGatherIgnoreInterfaces(t *testing.T) { + + setup() + var acc testutil.Accumulator + + command.InterfaceExclude = append(command.InterfaceExclude, "eth1") + + err := command.Gather(&acc) + assert.NoError(t, err) + assert.Len(t, acc.Metrics, 1) + + // Should not contain eth1 + expectedFieldsEth1 := toStringMapInterface(interfaceMap["eth1"].Stat) + expectedTagsEth1 := map[string]string{ + "interface": "eth1", + "driver": "driver1", + } + acc.AssertDoesNotContainsTaggedFields(t, pluginName, expectedFieldsEth1, expectedTagsEth1) + + // Should contain eth2 + expectedFieldsEth2 := toStringMapInterface(interfaceMap["eth2"].Stat) + expectedTagsEth2 := map[string]string{ + "interface": "eth2", + "driver": "driver1", + } + acc.AssertContainsTaggedFields(t, pluginName, expectedFieldsEth2, expectedTagsEth2) + +} From 6fd1453942ed4eb3e314da6bf6fd980107a5c90b Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 8 Nov 2019 12:02:17 -0800 Subject: [PATCH 1296/1815] Update changelog --- CHANGELOG.md | 1 + README.md | 1 + 2 files changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5e9947be2..731880c36 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ #### New Inputs - [azure_storage_queue](/plugins/inputs/azure_storage_queue/README.md) - Contributed by @mjiderhamn +- [ethtool](/plugins/inputs/ethtool/README.md) - Contributed by @philippreston - [suricata](/plugins/inputs/suricata/README.md) - Contributed by @satta #### New Processors diff --git a/README.md b/README.md index eb35705f2..da300a71f 100644 --- a/README.md +++ b/README.md @@ -173,6 +173,7 @@ For documentation on the latest development code see the [documentation index][d * [dovecot](./plugins/inputs/dovecot) * [ecs](./plugins/inputs/ecs) (Amazon Elastic Container Service, Fargate) * [elasticsearch](./plugins/inputs/elasticsearch) +* [ethtool](./plugins/inputs/ethtool) * [exec](./plugins/inputs/exec) (generic executable plugin, support JSON, influx, graphite and nagios) * [fail2ban](./plugins/inputs/fail2ban) * [fibaro](./plugins/inputs/fibaro) From 4d08f2f4042a9b0497cd1d84badd7c705b323458 Mon Sep 17 00:00:00 2001 From: Lyle Hanson Date: Fri, 8 Nov 2019 14:10:16 -0600 Subject: [PATCH 1297/1815] Use 1h or 3h rain values as appropriate (#6593) --- plugins/inputs/openweathermap/README.md | 2 +- .../inputs/openweathermap/openweathermap.go | 12 +- .../openweathermap/openweathermap_test.go | 287 +++++++++++++++++- 3 files changed, 295 insertions(+), 6 deletions(-) diff --git a/plugins/inputs/openweathermap/README.md b/plugins/inputs/openweathermap/README.md index b2a9d1c0a..85803f76a 100644 --- a/plugins/inputs/openweathermap/README.md +++ b/plugins/inputs/openweathermap/README.md @@ -56,7 +56,7 @@ condition ID, icon, and main is at [weather conditions][]. - cloudiness (int, percent) - humidity (int, percent) - pressure (float, atmospheric pressure hPa) - - rain (float, rain volume for the last 3 hours in mm) + - rain (float, rain volume for the last 1-3 hours (depending on API response) in mm) - sunrise (int, nanoseconds since unix epoch) - sunset (int, nanoseconds since unix epoch) - temperature (float, degrees) diff --git a/plugins/inputs/openweathermap/openweathermap.go b/plugins/inputs/openweathermap/openweathermap.go index 079973ddd..94055a6f8 100644 --- a/plugins/inputs/openweathermap/openweathermap.go +++ b/plugins/inputs/openweathermap/openweathermap.go @@ -179,6 +179,7 @@ type WeatherEntry struct { Temp float64 `json:"temp"` } `json:"main"` Rain struct { + Rain1 float64 `json:"1h"` Rain3 float64 `json:"3h"` } `json:"rain"` Sys struct { @@ -227,6 +228,13 @@ func gatherWeatherUrl(r io.Reader) (*Status, error) { return status, nil } +func gatherRain(e WeatherEntry) float64 { + if e.Rain.Rain1 > 0 { + return e.Rain.Rain1 + } + return e.Rain.Rain3 +} + func gatherWeather(acc telegraf.Accumulator, status *Status) { for _, e := range status.List { tm := time.Unix(e.Dt, 0) @@ -235,7 +243,7 @@ func gatherWeather(acc telegraf.Accumulator, status *Status) { "cloudiness": e.Clouds.All, "humidity": e.Main.Humidity, "pressure": e.Main.Pressure, - "rain": e.Rain.Rain3, + "rain": gatherRain(e), "sunrise": time.Unix(e.Sys.Sunrise, 0).UnixNano(), "sunset": time.Unix(e.Sys.Sunset, 0).UnixNano(), "temperature": e.Main.Temp, @@ -274,7 +282,7 @@ func gatherForecast(acc telegraf.Accumulator, status *Status) { "cloudiness": e.Clouds.All, "humidity": e.Main.Humidity, "pressure": e.Main.Pressure, - "rain": e.Rain.Rain3, + "rain": gatherRain(e), "temperature": e.Main.Temp, "wind_degrees": e.Wind.Deg, "wind_speed": e.Wind.Speed, diff --git a/plugins/inputs/openweathermap/openweathermap_test.go b/plugins/inputs/openweathermap/openweathermap_test.go index 20a00e5db..9bee1d2e9 100644 --- a/plugins/inputs/openweathermap/openweathermap_test.go +++ b/plugins/inputs/openweathermap/openweathermap_test.go @@ -106,9 +106,9 @@ const groupWeatherResponse = ` { "cnt": 1, "list": [{ - "clouds": { - "all": 0 - }, + "clouds": { + "all": 0 + }, "coord": { "lat": 48.85, "lon": 2.35 @@ -146,6 +146,145 @@ const groupWeatherResponse = ` } ` +const rainWeatherResponse = ` +{ + "cnt": 2, + "list": [{ + "dt": 1544194800, + "id": 111, + "main": { + "humidity": 87, + "pressure": 1007, + "temp": 9.25 + }, + "name": "Paris", + "sys": { + "country": "FR", + "id": 6550, + "message": 0.002, + "sunrise": 1544167818, + "sunset": 1544198047, + "type": 1 + }, + "visibility": 10000, + "weather": [ + { + "description": "light intensity drizzle", + "icon": "09d", + "id": 300, + "main": "Drizzle" + } + ], + "rain": { + "1h": 1.000 + }, + "wind": { + "deg": 290, + "speed": 8.7 + } + }, + { + "dt": 1544194800, + "id": 222, + "main": { + "humidity": 87, + "pressure": 1007, + "temp": 9.25 + }, + "name": "Paris", + "sys": { + "country": "FR", + "id": 6550, + "message": 0.002, + "sunrise": 1544167818, + "sunset": 1544198047, + "type": 1 + }, + "visibility": 10000, + "weather": [ + { + "description": "light intensity drizzle", + "icon": "09d", + "id": 300, + "main": "Drizzle" + } + ], + "rain": { + "3h": 3.000 + }, + "wind": { + "deg": 290, + "speed": 8.7 + } + }, + { + "dt": 1544194800, + "id": 333, + "main": { + "humidity": 87, + "pressure": 1007, + "temp": 9.25 + }, + "name": "Paris", + "sys": { + "country": "FR", + "id": 6550, + "message": 0.002, + "sunrise": 1544167818, + "sunset": 1544198047, + "type": 1 + }, + "visibility": 10000, + "weather": [ + { + "description": "light intensity drizzle", + "icon": "09d", + "id": 300, + "main": "Drizzle" + } + ], + "rain": { + "1h": 1.300, + "3h": 999 + }, + "wind": { + "deg": 290, + "speed": 8.7 + } + }, + { + "dt": 1544194800, + "id": 444, + "main": { + "humidity": 87, + "pressure": 1007, + "temp": 9.25 + }, + "name": "Paris", + "sys": { + "country": "FR", + "id": 6550, + "message": 0.002, + "sunrise": 1544167818, + "sunset": 1544198047, + "type": 1 + }, + "visibility": 10000, + "weather": [ + { + "description": "light intensity drizzle", + "icon": "09d", + "id": 300, + "main": "Drizzle" + } + ], + "wind": { + "deg": 290, + "speed": 8.7 + } + }] +} +` const batchWeatherResponse = ` { "cnt": 3, @@ -405,6 +544,148 @@ func TestWeatherGeneratesMetrics(t *testing.T) { testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics()) } +// Ensure that results containing "1h", "3h", both, or no rain values are parsed correctly +func TestRainMetrics(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var rsp string + if r.URL.Path == "/data/2.5/group" { + rsp = rainWeatherResponse + w.Header()["Content-Type"] = []string{"application/json"} + } else { + panic("Cannot handle request") + } + + fmt.Fprintln(w, rsp) + })) + defer ts.Close() + + n := &OpenWeatherMap{ + BaseUrl: ts.URL, + AppId: "noappid", + CityId: []string{"111", "222", "333", "444"}, + Fetch: []string{"weather"}, + Units: "metric", + } + n.Init() + + var acc testutil.Accumulator + + err := n.Gather(&acc) + require.NoError(t, err) + + expected := []telegraf.Metric{ + // City with 1h rain value + testutil.MustMetric( + "weather", + map[string]string{ + "city_id": "111", + "forecast": "*", + "city": "Paris", + "country": "FR", + "condition_id": "300", + "condition_main": "Drizzle", + }, + map[string]interface{}{ + "cloudiness": int64(0), + "humidity": int64(87), + "pressure": 1007.0, + "temperature": 9.25, + "rain": 1.0, + "sunrise": int64(1544167818000000000), + "sunset": int64(1544198047000000000), + "wind_degrees": 290.0, + "wind_speed": 8.7, + "visibility": 10000, + "condition_description": "light intensity drizzle", + "condition_icon": "09d", + }, + time.Unix(1544194800, 0), + ), + // City with 3h rain value + testutil.MustMetric( + "weather", + map[string]string{ + "city_id": "222", + "forecast": "*", + "city": "Paris", + "country": "FR", + "condition_id": "300", + "condition_main": "Drizzle", + }, + map[string]interface{}{ + "cloudiness": int64(0), + "humidity": int64(87), + "pressure": 1007.0, + "temperature": 9.25, + "rain": 3.0, + "sunrise": int64(1544167818000000000), + "sunset": int64(1544198047000000000), + "wind_degrees": 290.0, + "wind_speed": 8.7, + "visibility": 10000, + "condition_description": "light intensity drizzle", + "condition_icon": "09d", + }, + time.Unix(1544194800, 0), + ), + // City with both 1h and 3h rain values, prefer the 1h value + testutil.MustMetric( + "weather", + map[string]string{ + "city_id": "333", + "forecast": "*", + "city": "Paris", + "country": "FR", + "condition_id": "300", + "condition_main": "Drizzle", + }, + map[string]interface{}{ + "cloudiness": int64(0), + "humidity": int64(87), + "pressure": 1007.0, + "temperature": 9.25, + "rain": 1.3, + "sunrise": int64(1544167818000000000), + "sunset": int64(1544198047000000000), + "wind_degrees": 290.0, + "wind_speed": 8.7, + "visibility": 10000, + "condition_description": "light intensity drizzle", + "condition_icon": "09d", + }, + time.Unix(1544194800, 0), + ), + // City with no rain values + testutil.MustMetric( + "weather", + map[string]string{ + "city_id": "444", + "forecast": "*", + "city": "Paris", + "country": "FR", + "condition_id": "300", + "condition_main": "Drizzle", + }, + map[string]interface{}{ + "cloudiness": int64(0), + "humidity": int64(87), + "pressure": 1007.0, + "temperature": 9.25, + "rain": 0.0, + "sunrise": int64(1544167818000000000), + "sunset": int64(1544198047000000000), + "wind_degrees": 290.0, + "wind_speed": 8.7, + "visibility": 10000, + "condition_description": "light intensity drizzle", + "condition_icon": "09d", + }, + time.Unix(1544194800, 0), + ), + } + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics()) +} + func TestBatchWeatherGeneratesMetrics(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { var rsp string From 58df6f6c94298283717dcada3e1313d704226ad4 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 8 Nov 2019 12:12:00 -0800 Subject: [PATCH 1298/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 731880c36..804900018 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -43,6 +43,7 @@ #### Bugfixes - [#6484](https://github.com/influxdata/telegraf/issues/6484): Show correct default settings in mysql sample config. +- [#6583](https://github.com/influxdata/telegraf/issues/6583): Use 1h or 3h rain values as appropriate in openweathermap input. ## v1.12.5 [unreleased] From 8cba5941bee24e0e5b1c5a8d2430fb307125ffd8 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 11 Nov 2019 17:03:03 -0800 Subject: [PATCH 1299/1815] Skip logging when logfile is unset (#6648) --- logger/logger.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/logger/logger.go b/logger/logger.go index a52e709a8..a276d2e80 100644 --- a/logger/logger.go +++ b/logger/logger.go @@ -2,7 +2,6 @@ package logger import ( "errors" - "io" "log" "os" @@ -112,7 +111,6 @@ func (t *telegrafLogCreator) CreateLogger(config LogConfig) (io.Writer, error) { writer = defaultWriter } } else { - log.Print("E! Empty logfile, using stderr") writer = defaultWriter } case LogTargetStderr, "": From e2fde882c7e7079ce933d93086c859e6edf2a218 Mon Sep 17 00:00:00 2001 From: pertu Date: Tue, 12 Nov 2019 22:45:09 +0300 Subject: [PATCH 1300/1815] Fix mongodb total_created field naming (#6643) --- plugins/inputs/mongodb/mongostat.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/inputs/mongodb/mongostat.go b/plugins/inputs/mongodb/mongostat.go index 44c071a2f..d75ff9fb0 100644 --- a/plugins/inputs/mongodb/mongostat.go +++ b/plugins/inputs/mongodb/mongostat.go @@ -252,7 +252,7 @@ type FlushStats struct { type ConnectionStats struct { Current int64 `bson:"current"` Available int64 `bson:"available"` - TotalCreated int64 `bson:"total_created"` + TotalCreated int64 `bson:"totalCreated"` } // DurTiming stores information related to journaling. From eb93dab70b4e2b31bc61ef0f0c5cf0181ec153e4 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 12 Nov 2019 11:47:04 -0800 Subject: [PATCH 1301/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 804900018..44b62a329 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -53,6 +53,7 @@ - [#6610](https://github.com/influxdata/telegraf/pull/6610): Add missing character replacement to sql_instance tag. - [#6337](https://github.com/influxdata/telegraf/issues/6337): Change no metric error message to debug level in cloudwatch input. - [#6602](https://github.com/influxdata/telegraf/issues/6602): Add missing ServerProperties query to sqlserver input docs. +- [#6643](https://github.com/influxdata/telegraf/pull/6643): Fix mongodb connections_total_created field loading. ## v1.12.4 [2019-10-23] From d858d82a8554e1090acbaf58f6156fe56ae450d6 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 12 Nov 2019 11:55:53 -0800 Subject: [PATCH 1302/1815] Fix known mysql type conversion issues (#6647) --- plugins/inputs/mysql/mysql.go | 28 ++++++- plugins/inputs/mysql/v2/convert.go | 103 ++++++++++++++++++++++++ plugins/inputs/mysql/v2/convert_test.go | 86 ++++++++++++++++++++ 3 files changed, 215 insertions(+), 2 deletions(-) create mode 100644 plugins/inputs/mysql/v2/convert.go create mode 100644 plugins/inputs/mysql/v2/convert_test.go diff --git a/plugins/inputs/mysql/mysql.go b/plugins/inputs/mysql/mysql.go index 965170301..3ca955beb 100644 --- a/plugins/inputs/mysql/mysql.go +++ b/plugins/inputs/mysql/mysql.go @@ -14,6 +14,7 @@ import ( "github.com/influxdata/telegraf/internal/tls" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/inputs/mysql/v1" + "github.com/influxdata/telegraf/plugins/inputs/mysql/v2" ) type Mysql struct { @@ -37,6 +38,8 @@ type Mysql struct { GatherPerfEventsStatements bool `toml:"gather_perf_events_statements"` IntervalSlow string `toml:"interval_slow"` MetricVersion int `toml:"metric_version"` + + Log telegraf.Logger `toml:"-"` tls.ClientConfig lastT time.Time initDone bool @@ -554,14 +557,20 @@ func (m *Mysql) gatherGlobalVariables(db *sql.DB, serv string, acc telegraf.Accu return err } key = strings.ToLower(key) + // parse mysql version and put into field and tag if strings.Contains(key, "version") { fields[key] = string(val) tags[key] = string(val) } - if value, ok := m.parseValue(val); ok { + + value, err := m.parseGlobalVariables(key, val) + if err != nil { + m.Log.Debugf("Error parsing global variable %q: %v", key, err) + } else { fields[key] = value } + // Send 20 fields at a time if len(fields) >= 20 { acc.AddFields("mysql_variables", fields, tags) @@ -575,6 +584,18 @@ func (m *Mysql) gatherGlobalVariables(db *sql.DB, serv string, acc telegraf.Accu return nil } +func (m *Mysql) parseGlobalVariables(key string, value sql.RawBytes) (interface{}, error) { + if m.MetricVersion < 2 { + v, ok := v1.ParseValue(value) + if ok { + return v, nil + } + return v, fmt.Errorf("could not parse value: %q", string(value)) + } else { + return v2.ConvertGlobalVariables(key, value) + } +} + // gatherSlaveStatuses can be used to get replication analytics // When the server is slave, then it returns only one row. // If the multi-source replication is set, then everything works differently @@ -748,7 +769,10 @@ func (m *Mysql) gatherGlobalStatuses(db *sql.DB, serv string, acc telegraf.Accum } } else { key = strings.ToLower(key) - if value, ok := m.parseValue(val); ok { + value, err := v2.ConvertGlobalStatus(key, val) + if err != nil { + m.Log.Debugf("Error parsing global status: %v", err) + } else { fields[key] = value } } diff --git a/plugins/inputs/mysql/v2/convert.go b/plugins/inputs/mysql/v2/convert.go new file mode 100644 index 000000000..a3ac3e976 --- /dev/null +++ b/plugins/inputs/mysql/v2/convert.go @@ -0,0 +1,103 @@ +package v2 + +import ( + "bytes" + "database/sql" + "fmt" + "strconv" +) + +type ConversionFunc func(value sql.RawBytes) (interface{}, error) + +func ParseInt(value sql.RawBytes) (interface{}, error) { + v, err := strconv.ParseInt(string(value), 10, 64) + + // Ignore ErrRange. When this error is set the returned value is "the + // maximum magnitude integer of the appropriate bitSize and sign." + if err, ok := err.(*strconv.NumError); ok && err.Err == strconv.ErrRange { + return v, nil + } + + return v, err +} + +func ParseBoolAsInteger(value sql.RawBytes) (interface{}, error) { + if bytes.EqualFold(value, []byte("YES")) || bytes.EqualFold(value, []byte("ON")) { + return int64(1), nil + } + + return int64(0), nil +} + +func ParseGTIDMode(value sql.RawBytes) (interface{}, error) { + // https://dev.mysql.com/doc/refman/8.0/en/replication-mode-change-online-concepts.html + v := string(value) + switch v { + case "OFF": + return int64(0), nil + case "ON": + return int64(1), nil + case "OFF_PERMISSIVE": + return int64(0), nil + case "ON_PERMISSIVE": + return int64(1), nil + default: + return nil, fmt.Errorf("unrecognized gtid_mode: %q", v) + } +} + +func ParseValue(value sql.RawBytes) (interface{}, error) { + if bytes.EqualFold(value, []byte("YES")) || bytes.Compare(value, []byte("ON")) == 0 { + return 1, nil + } + + if bytes.EqualFold(value, []byte("NO")) || bytes.Compare(value, []byte("OFF")) == 0 { + return 0, nil + } + + if val, err := strconv.ParseInt(string(value), 10, 64); err == nil { + return val, nil + } + if val, err := strconv.ParseFloat(string(value), 64); err == nil { + return val, nil + } + + if len(string(value)) > 0 { + return string(value), nil + } + + return nil, fmt.Errorf("unconvertible value: %q", string(value)) +} + +var GlobalStatusConversions = map[string]ConversionFunc{ + "ssl_ctx_verify_depth": ParseInt, + "ssl_verify_depth": ParseInt, +} + +var GlobalVariableConversions = map[string]ConversionFunc{ + "gtid_mode": ParseGTIDMode, +} + +func ConvertGlobalStatus(key string, value sql.RawBytes) (interface{}, error) { + if bytes.Equal(value, []byte("")) { + return nil, nil + } + + if conv, ok := GlobalStatusConversions[key]; ok { + return conv(value) + } + + return ParseValue(value) +} + +func ConvertGlobalVariables(key string, value sql.RawBytes) (interface{}, error) { + if bytes.Equal(value, []byte("")) { + return nil, nil + } + + if conv, ok := GlobalVariableConversions[key]; ok { + return conv(value) + } + + return ParseValue(value) +} diff --git a/plugins/inputs/mysql/v2/convert_test.go b/plugins/inputs/mysql/v2/convert_test.go new file mode 100644 index 000000000..47189c18d --- /dev/null +++ b/plugins/inputs/mysql/v2/convert_test.go @@ -0,0 +1,86 @@ +package v2 + +import ( + "database/sql" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestConvertGlobalStatus(t *testing.T) { + tests := []struct { + name string + key string + value sql.RawBytes + expected interface{} + expectedErr error + }{ + { + name: "default", + key: "ssl_ctx_verify_depth", + value: []byte("0"), + expected: int64(0), + expectedErr: nil, + }, + { + name: "overflow int64", + key: "ssl_ctx_verify_depth", + value: []byte("18446744073709551615"), + expected: int64(9223372036854775807), + expectedErr: nil, + }, + { + name: "defined variable but unset", + key: "ssl_ctx_verify_depth", + value: []byte(""), + expected: nil, + expectedErr: nil, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + actual, err := ConvertGlobalStatus(tt.key, tt.value) + require.Equal(t, tt.expectedErr, err) + require.Equal(t, tt.expected, actual) + }) + } +} + +func TestCovertGlobalVariables(t *testing.T) { + tests := []struct { + name string + key string + value sql.RawBytes + expected interface{} + expectedErr error + }{ + { + name: "boolean type mysql<=5.6", + key: "gtid_mode", + value: []byte("ON"), + expected: int64(1), + expectedErr: nil, + }, + { + name: "enum type mysql>=5.7", + key: "gtid_mode", + value: []byte("ON_PERMISSIVE"), + expected: int64(1), + expectedErr: nil, + }, + { + name: "defined variable but unset", + key: "ssl_ctx_verify_depth", + value: []byte(""), + expected: nil, + expectedErr: nil, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + actual, err := ConvertGlobalVariables(tt.key, tt.value) + require.Equal(t, tt.expectedErr, err) + require.Equal(t, tt.expected, actual) + }) + } +} From ce3ae58ad9f4a94d0136e5e755fea9e16a112c83 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 12 Nov 2019 11:58:13 -0800 Subject: [PATCH 1303/1815] Fix uptime_ns calculation when container has been restarted (#6649) --- plugins/inputs/docker/docker.go | 11 +- plugins/inputs/docker/docker_test.go | 206 ++++++++++++++++++--------- 2 files changed, 143 insertions(+), 74 deletions(-) diff --git a/plugins/inputs/docker/docker.go b/plugins/inputs/docker/docker.go index 02442baf0..915d3a3e3 100644 --- a/plugins/inputs/docker/docker.go +++ b/plugins/inputs/docker/docker.go @@ -546,17 +546,22 @@ func (d *Docker) gatherContainerInspect( started, err := time.Parse(time.RFC3339, info.State.StartedAt) if err == nil && !started.IsZero() { statefields["started_at"] = started.UnixNano() - statefields["uptime_ns"] = finished.Sub(started).Nanoseconds() + + uptime := finished.Sub(started) + if finished.Before(started) { + uptime = now().Sub(started) + } + statefields["uptime_ns"] = uptime.Nanoseconds() } - acc.AddFields("docker_container_status", statefields, tags, time.Now()) + acc.AddFields("docker_container_status", statefields, tags, now()) if info.State.Health != nil { healthfields := map[string]interface{}{ "health_status": info.State.Health.Status, "failing_streak": info.ContainerJSONBase.State.Health.FailingStreak, } - acc.AddFields("docker_container_health", healthfields, tags, time.Now()) + acc.AddFields("docker_container_health", healthfields, tags, now()) } } diff --git a/plugins/inputs/docker/docker_test.go b/plugins/inputs/docker/docker_test.go index 148228af4..a331479d1 100644 --- a/plugins/inputs/docker/docker_test.go +++ b/plugins/inputs/docker/docker_test.go @@ -11,6 +11,7 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/swarm" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) @@ -541,25 +542,22 @@ func TestContainerNames(t *testing.T) { } } -func TestContainerStatus(t *testing.T) { - type expectation struct { - // tags - Status string - // fields - ContainerID string - OOMKilled bool - Pid int - ExitCode int - StartedAt time.Time - FinishedAt time.Time - UptimeNs int64 +func FilterMetrics(metrics []telegraf.Metric, f func(telegraf.Metric) bool) []telegraf.Metric { + results := []telegraf.Metric{} + for _, m := range metrics { + if f(m) { + results = append(results, m) + } } + return results +} +func TestContainerStatus(t *testing.T) { var tests = []struct { - name string - now func() time.Time - inspect types.ContainerJSON - expect expectation + name string + now func() time.Time + inspect types.ContainerJSON + expected []telegraf.Metric }{ { name: "finished_at is zero value", @@ -567,49 +565,141 @@ func TestContainerStatus(t *testing.T) { return time.Date(2018, 6, 14, 5, 51, 53, 266176036, time.UTC) }, inspect: containerInspect(), - expect: expectation{ - ContainerID: "e2173b9478a6ae55e237d4d74f8bbb753f0817192b5081334dc78476296b7dfb", - Status: "running", - OOMKilled: false, - Pid: 1234, - ExitCode: 0, - StartedAt: time.Date(2018, 6, 14, 5, 48, 53, 266176036, time.UTC), - UptimeNs: int64(3 * time.Minute), + expected: []telegraf.Metric{ + testutil.MustMetric( + "docker_container_status", + map[string]string{ + "container_name": "etcd", + "container_image": "quay.io/coreos/etcd", + "container_version": "v2.2.2", + "engine_host": "absol", + "label1": "test_value_1", + "label2": "test_value_2", + "server_version": "17.09.0-ce", + "container_status": "running", + "source": "e2173b9478a6", + }, + map[string]interface{}{ + "oomkilled": false, + "pid": 1234, + "exitcode": 0, + "container_id": "e2173b9478a6ae55e237d4d74f8bbb753f0817192b5081334dc78476296b7dfb", + "started_at": time.Date(2018, 6, 14, 5, 48, 53, 266176036, time.UTC).UnixNano(), + "uptime_ns": int64(3 * time.Minute), + }, + time.Date(2018, 6, 14, 5, 51, 53, 266176036, time.UTC), + ), }, }, { name: "finished_at is non-zero value", + now: func() time.Time { + return time.Date(2018, 6, 14, 5, 51, 53, 266176036, time.UTC) + }, inspect: func() types.ContainerJSON { i := containerInspect() i.ContainerJSONBase.State.FinishedAt = "2018-06-14T05:53:53.266176036Z" return i }(), - expect: expectation{ - ContainerID: "e2173b9478a6ae55e237d4d74f8bbb753f0817192b5081334dc78476296b7dfb", - Status: "running", - OOMKilled: false, - Pid: 1234, - ExitCode: 0, - StartedAt: time.Date(2018, 6, 14, 5, 48, 53, 266176036, time.UTC), - FinishedAt: time.Date(2018, 6, 14, 5, 53, 53, 266176036, time.UTC), - UptimeNs: int64(5 * time.Minute), + expected: []telegraf.Metric{ + testutil.MustMetric( + "docker_container_status", + map[string]string{ + "container_name": "etcd", + "container_image": "quay.io/coreos/etcd", + "container_version": "v2.2.2", + "engine_host": "absol", + "label1": "test_value_1", + "label2": "test_value_2", + "server_version": "17.09.0-ce", + "container_status": "running", + "source": "e2173b9478a6", + }, + map[string]interface{}{ + "oomkilled": false, + "pid": 1234, + "exitcode": 0, + "container_id": "e2173b9478a6ae55e237d4d74f8bbb753f0817192b5081334dc78476296b7dfb", + "started_at": time.Date(2018, 6, 14, 5, 48, 53, 266176036, time.UTC).UnixNano(), + "finished_at": time.Date(2018, 6, 14, 5, 53, 53, 266176036, time.UTC).UnixNano(), + "uptime_ns": int64(5 * time.Minute), + }, + time.Date(2018, 6, 14, 5, 51, 53, 266176036, time.UTC), + ), }, }, { name: "started_at is zero value", + now: func() time.Time { + return time.Date(2018, 6, 14, 5, 51, 53, 266176036, time.UTC) + }, inspect: func() types.ContainerJSON { i := containerInspect() i.ContainerJSONBase.State.StartedAt = "" i.ContainerJSONBase.State.FinishedAt = "2018-06-14T05:53:53.266176036Z" return i }(), - expect: expectation{ - ContainerID: "e2173b9478a6ae55e237d4d74f8bbb753f0817192b5081334dc78476296b7dfb", - Status: "running", - OOMKilled: false, - Pid: 1234, - ExitCode: 0, - FinishedAt: time.Date(2018, 6, 14, 5, 53, 53, 266176036, time.UTC), + expected: []telegraf.Metric{ + testutil.MustMetric( + "docker_container_status", + map[string]string{ + "container_name": "etcd", + "container_image": "quay.io/coreos/etcd", + "container_version": "v2.2.2", + "engine_host": "absol", + "label1": "test_value_1", + "label2": "test_value_2", + "server_version": "17.09.0-ce", + "container_status": "running", + "source": "e2173b9478a6", + }, + map[string]interface{}{ + "oomkilled": false, + "pid": 1234, + "exitcode": 0, + "container_id": "e2173b9478a6ae55e237d4d74f8bbb753f0817192b5081334dc78476296b7dfb", + "finished_at": time.Date(2018, 6, 14, 5, 53, 53, 266176036, time.UTC).UnixNano(), + }, + time.Date(2018, 6, 14, 5, 51, 53, 266176036, time.UTC), + ), + }, + }, + { + name: "container has been restarted", + now: func() time.Time { + return time.Date(2019, 1, 1, 0, 0, 3, 0, time.UTC) + }, + inspect: func() types.ContainerJSON { + i := containerInspect() + i.ContainerJSONBase.State.StartedAt = "2019-01-01T00:00:02Z" + i.ContainerJSONBase.State.FinishedAt = "2019-01-01T00:00:01Z" + return i + }(), + expected: []telegraf.Metric{ + testutil.MustMetric( + "docker_container_status", + map[string]string{ + "container_name": "etcd", + "container_image": "quay.io/coreos/etcd", + "container_version": "v2.2.2", + "engine_host": "absol", + "label1": "test_value_1", + "label2": "test_value_2", + "server_version": "17.09.0-ce", + "container_status": "running", + "source": "e2173b9478a6", + }, + map[string]interface{}{ + "oomkilled": false, + "pid": 1234, + "exitcode": 0, + "container_id": "e2173b9478a6ae55e237d4d74f8bbb753f0817192b5081334dc78476296b7dfb", + "started_at": time.Date(2019, 1, 1, 0, 0, 2, 0, time.UTC).UnixNano(), + "finished_at": time.Date(2019, 1, 1, 0, 0, 1, 0, time.UTC).UnixNano(), + "uptime_ns": int64(1 * time.Second), + }, + time.Date(2019, 1, 1, 0, 0, 3, 0, time.UTC), + ), }, }, } @@ -643,39 +733,13 @@ func TestContainerStatus(t *testing.T) { now = time.Now }() - err := acc.GatherError(d.Gather) + err := d.Gather(&acc) require.NoError(t, err) - fields := map[string]interface{}{ - "oomkilled": tt.expect.OOMKilled, - "pid": tt.expect.Pid, - "exitcode": tt.expect.ExitCode, - "container_id": tt.expect.ContainerID, - } - - if started := tt.expect.StartedAt; !started.IsZero() { - fields["started_at"] = started.UnixNano() - fields["uptime_ns"] = tt.expect.UptimeNs - } - - if finished := tt.expect.FinishedAt; !finished.IsZero() { - fields["finished_at"] = finished.UnixNano() - } - - acc.AssertContainsTaggedFields(t, - "docker_container_status", - fields, - map[string]string{ - "container_name": "etcd", - "container_image": "quay.io/coreos/etcd", - "container_version": "v2.2.2", - "engine_host": "absol", - "label1": "test_value_1", - "label2": "test_value_2", - "server_version": "17.09.0-ce", - "container_status": tt.expect.Status, - "source": "e2173b9478a6", - }) + actual := FilterMetrics(acc.GetTelegrafMetrics(), func(m telegraf.Metric) bool { + return m.Name() == "docker_container_status" + }) + testutil.RequireMetricsEqual(t, tt.expected, actual) }) } } From bcf1bcf318c814127a799e5649af490cb5ef972c Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 12 Nov 2019 11:59:11 -0800 Subject: [PATCH 1304/1815] Fix metric creation when node is offline in jenkins input (#6627) --- plugins/inputs/jenkins/README.md | 1 + plugins/inputs/jenkins/jenkins.go | 75 ++++---- plugins/inputs/jenkins/jenkins_test.go | 231 ++++++++++++++----------- 3 files changed, 175 insertions(+), 132 deletions(-) diff --git a/plugins/inputs/jenkins/README.md b/plugins/inputs/jenkins/README.md index 79f55e6aa..80d6de0be 100644 --- a/plugins/inputs/jenkins/README.md +++ b/plugins/inputs/jenkins/README.md @@ -67,6 +67,7 @@ This plugin does not require a plugin on jenkins and it makes use of Jenkins API - swap_available - swap_total - response_time + - num_executors - jenkins_job - tags: diff --git a/plugins/inputs/jenkins/jenkins.go b/plugins/inputs/jenkins/jenkins.go index e13d5c25d..c80463589 100644 --- a/plugins/inputs/jenkins/jenkins.go +++ b/plugins/inputs/jenkins/jenkins.go @@ -2,7 +2,6 @@ package jenkins import ( "context" - "errors" "fmt" "net/http" "strconv" @@ -180,27 +179,36 @@ func (j *Jenkins) gatherNodeData(n node, acc telegraf.Accumulator) error { return nil } - tags["arch"] = n.MonitorData.HudsonNodeMonitorsArchitectureMonitor + monitorData := n.MonitorData + + if monitorData.HudsonNodeMonitorsArchitectureMonitor != "" { + tags["arch"] = monitorData.HudsonNodeMonitorsArchitectureMonitor + } tags["status"] = "online" if n.Offline { tags["status"] = "offline" } - monitorData := n.MonitorData - if monitorData.HudsonNodeMonitorsArchitectureMonitor == "" { - return errors.New("empty monitor data, please check your permission") - } - tags["disk_path"] = monitorData.HudsonNodeMonitorsDiskSpaceMonitor.Path - tags["temp_path"] = monitorData.HudsonNodeMonitorsTemporarySpaceMonitor.Path - fields := map[string]interface{}{ - "response_time": monitorData.HudsonNodeMonitorsResponseTimeMonitor.Average, - "disk_available": monitorData.HudsonNodeMonitorsDiskSpaceMonitor.Size, - "temp_available": monitorData.HudsonNodeMonitorsTemporarySpaceMonitor.Size, - "swap_available": monitorData.HudsonNodeMonitorsSwapSpaceMonitor.SwapAvailable, - "memory_available": monitorData.HudsonNodeMonitorsSwapSpaceMonitor.MemoryAvailable, - "swap_total": monitorData.HudsonNodeMonitorsSwapSpaceMonitor.SwapTotal, - "memory_total": monitorData.HudsonNodeMonitorsSwapSpaceMonitor.MemoryTotal, + fields := make(map[string]interface{}) + fields["num_executors"] = n.NumExecutors + + if monitorData.HudsonNodeMonitorsResponseTimeMonitor != nil { + fields["response_time"] = monitorData.HudsonNodeMonitorsResponseTimeMonitor.Average + } + if monitorData.HudsonNodeMonitorsDiskSpaceMonitor != nil { + tags["disk_path"] = monitorData.HudsonNodeMonitorsDiskSpaceMonitor.Path + fields["disk_available"] = monitorData.HudsonNodeMonitorsDiskSpaceMonitor.Size + } + if monitorData.HudsonNodeMonitorsTemporarySpaceMonitor != nil { + tags["temp_path"] = monitorData.HudsonNodeMonitorsTemporarySpaceMonitor.Path + fields["temp_available"] = monitorData.HudsonNodeMonitorsTemporarySpaceMonitor.Size + } + if monitorData.HudsonNodeMonitorsSwapSpaceMonitor != nil { + fields["swap_available"] = monitorData.HudsonNodeMonitorsSwapSpaceMonitor.SwapAvailable + fields["memory_available"] = monitorData.HudsonNodeMonitorsSwapSpaceMonitor.MemoryAvailable + fields["swap_total"] = monitorData.HudsonNodeMonitorsSwapSpaceMonitor.SwapTotal + fields["memory_total"] = monitorData.HudsonNodeMonitorsSwapSpaceMonitor.MemoryTotal } acc.AddFields(measurementNode, fields, tags) @@ -327,24 +335,18 @@ type nodeResponse struct { } type node struct { - DisplayName string `json:"displayName"` - Offline bool `json:"offline"` - MonitorData monitorData `json:"monitorData"` + DisplayName string `json:"displayName"` + Offline bool `json:"offline"` + NumExecutors int `json:"numExecutors"` + MonitorData monitorData `json:"monitorData"` } type monitorData struct { - HudsonNodeMonitorsArchitectureMonitor string `json:"hudson.node_monitors.ArchitectureMonitor"` - HudsonNodeMonitorsDiskSpaceMonitor nodeSpaceMonitor `json:"hudson.node_monitors.DiskSpaceMonitor"` - HudsonNodeMonitorsResponseTimeMonitor struct { - Average int64 `json:"average"` - } `json:"hudson.node_monitors.ResponseTimeMonitor"` - HudsonNodeMonitorsSwapSpaceMonitor struct { - SwapAvailable float64 `json:"availableSwapSpace"` - SwapTotal float64 `json:"totalSwapSpace"` - MemoryAvailable float64 `json:"availablePhysicalMemory"` - MemoryTotal float64 `json:"totalPhysicalMemory"` - } `json:"hudson.node_monitors.SwapSpaceMonitor"` - HudsonNodeMonitorsTemporarySpaceMonitor nodeSpaceMonitor `json:"hudson.node_monitors.TemporarySpaceMonitor"` + HudsonNodeMonitorsArchitectureMonitor string `json:"hudson.node_monitors.ArchitectureMonitor"` + HudsonNodeMonitorsDiskSpaceMonitor *nodeSpaceMonitor `json:"hudson.node_monitors.DiskSpaceMonitor"` + HudsonNodeMonitorsResponseTimeMonitor *responseTimeMonitor `json:"hudson.node_monitors.ResponseTimeMonitor"` + HudsonNodeMonitorsSwapSpaceMonitor *swapSpaceMonitor `json:"hudson.node_monitors.SwapSpaceMonitor"` + HudsonNodeMonitorsTemporarySpaceMonitor *nodeSpaceMonitor `json:"hudson.node_monitors.TemporarySpaceMonitor"` } type nodeSpaceMonitor struct { @@ -352,6 +354,17 @@ type nodeSpaceMonitor struct { Size float64 `json:"size"` } +type responseTimeMonitor struct { + Average int64 `json:"average"` +} + +type swapSpaceMonitor struct { + SwapAvailable float64 `json:"availableSwapSpace"` + SwapTotal float64 `json:"totalSwapSpace"` + MemoryAvailable float64 `json:"availablePhysicalMemory"` + MemoryTotal float64 `json:"totalPhysicalMemory"` +} + type jobResponse struct { LastBuild jobBuild `json:"lastBuild"` Jobs []innerJob `json:"jobs"` diff --git a/plugins/inputs/jenkins/jenkins_test.go b/plugins/inputs/jenkins/jenkins_test.go index 04aaffaad..b8c713de0 100644 --- a/plugins/inputs/jenkins/jenkins_test.go +++ b/plugins/inputs/jenkins/jenkins_test.go @@ -107,7 +107,7 @@ func TestGatherNodeData(t *testing.T) { wantErr: true, }, { - name: "bad empty monitor data", + name: "empty monitor data", input: mockHandler{ responseMap: map[string]interface{}{ "/api/json": struct{}{}, @@ -119,7 +119,9 @@ func TestGatherNodeData(t *testing.T) { }, }, }, - wantErr: true, + output: &testutil.Accumulator{ + Metrics: []*testutil.Metric{}, + }, }, { name: "filtered nodes", @@ -135,7 +137,6 @@ func TestGatherNodeData(t *testing.T) { }, }, }, - { name: "normal data collection", input: mockHandler{ @@ -147,25 +148,18 @@ func TestGatherNodeData(t *testing.T) { DisplayName: "master", MonitorData: monitorData{ HudsonNodeMonitorsArchitectureMonitor: "linux", - HudsonNodeMonitorsResponseTimeMonitor: struct { - Average int64 `json:"average"` - }{ + HudsonNodeMonitorsResponseTimeMonitor: &responseTimeMonitor{ Average: 10032, }, - HudsonNodeMonitorsDiskSpaceMonitor: nodeSpaceMonitor{ + HudsonNodeMonitorsDiskSpaceMonitor: &nodeSpaceMonitor{ Path: "/path/1", Size: 123, }, - HudsonNodeMonitorsTemporarySpaceMonitor: nodeSpaceMonitor{ + HudsonNodeMonitorsTemporarySpaceMonitor: &nodeSpaceMonitor{ Path: "/path/2", Size: 245, }, - HudsonNodeMonitorsSwapSpaceMonitor: struct { - SwapAvailable float64 `json:"availableSwapSpace"` - SwapTotal float64 `json:"totalSwapSpace"` - MemoryAvailable float64 `json:"availablePhysicalMemory"` - MemoryTotal float64 `json:"totalPhysicalMemory"` - }{ + HudsonNodeMonitorsSwapSpaceMonitor: &swapSpaceMonitor{ SwapAvailable: 212, SwapTotal: 500, MemoryAvailable: 101, @@ -201,42 +195,75 @@ func TestGatherNodeData(t *testing.T) { }, }, }, + { + name: "slave is offline", + input: mockHandler{ + responseMap: map[string]interface{}{ + "/api/json": struct{}{}, + "/computer/api/json": nodeResponse{ + Computers: []node{ + { + DisplayName: "slave", + MonitorData: monitorData{}, + NumExecutors: 1, + Offline: true, + }, + }, + }, + }, + }, + output: &testutil.Accumulator{ + Metrics: []*testutil.Metric{ + { + Tags: map[string]string{ + "node_name": "slave", + "status": "offline", + }, + Fields: map[string]interface{}{ + "num_executors": 1, + }, + }, + }, + }, + }, } for _, test := range tests { - ts := httptest.NewServer(test.input) - defer ts.Close() - j := &Jenkins{ - Log: testutil.Logger{}, - URL: ts.URL, - ResponseTimeout: internal.Duration{Duration: time.Microsecond}, - NodeExclude: []string{"ignore-1", "ignore-2"}, - } - te := j.initialize(&http.Client{Transport: &http.Transport{}}) - acc := new(testutil.Accumulator) - j.gatherNodesData(acc) - if err := acc.FirstError(); err != nil { - te = err - } + t.Run(test.name, func(t *testing.T) { + ts := httptest.NewServer(test.input) + defer ts.Close() + j := &Jenkins{ + Log: testutil.Logger{}, + URL: ts.URL, + ResponseTimeout: internal.Duration{Duration: time.Microsecond}, + NodeExclude: []string{"ignore-1", "ignore-2"}, + } + te := j.initialize(&http.Client{Transport: &http.Transport{}}) + acc := new(testutil.Accumulator) + j.gatherNodesData(acc) + if err := acc.FirstError(); err != nil { + te = err + } - if !test.wantErr && te != nil { - t.Fatalf("%s: failed %s, expected to be nil", test.name, te.Error()) - } else if test.wantErr && te == nil { - t.Fatalf("%s: expected err, got nil", test.name) - } - if test.output == nil && len(acc.Metrics) > 0 { - t.Fatalf("%s: collected extra data", test.name) - } else if test.output != nil && len(test.output.Metrics) > 0 { - for k, m := range test.output.Metrics[0].Tags { - if acc.Metrics[0].Tags[k] != m { - t.Fatalf("%s: tag %s metrics unmatch Expected %s, got %s\n", test.name, k, m, acc.Metrics[0].Tags[k]) + if !test.wantErr && te != nil { + t.Fatalf("%s: failed %s, expected to be nil", test.name, te.Error()) + } else if test.wantErr && te == nil { + t.Fatalf("%s: expected err, got nil", test.name) + } + if test.output == nil && len(acc.Metrics) > 0 { + t.Fatalf("%s: collected extra data", test.name) + } else if test.output != nil && len(test.output.Metrics) > 0 { + for k, m := range test.output.Metrics[0].Tags { + if acc.Metrics[0].Tags[k] != m { + t.Fatalf("%s: tag %s metrics unmatch Expected %s, got %s\n", test.name, k, m, acc.Metrics[0].Tags[k]) + } + } + for k, m := range test.output.Metrics[0].Fields { + if acc.Metrics[0].Fields[k] != m { + t.Fatalf("%s: field %s metrics unmatch Expected %v(%T), got %v(%T)\n", test.name, k, m, m, acc.Metrics[0].Fields[k], acc.Metrics[0].Fields[k]) + } } } - for k, m := range test.output.Metrics[0].Fields { - if acc.Metrics[0].Fields[k] != m { - t.Fatalf("%s: field %s metrics unmatch Expected %v(%T), got %v(%T)\n", test.name, k, m, m, acc.Metrics[0].Fields[k], acc.Metrics[0].Fields[k]) - } - } - } + }) } } @@ -290,21 +317,22 @@ func TestInitialize(t *testing.T) { }, } for _, test := range tests { - te := test.input.initialize(mockClient) - if !test.wantErr && te != nil { - t.Fatalf("%s: failed %s, expected to be nil", test.name, te.Error()) - } else if test.wantErr && te == nil { - t.Fatalf("%s: expected err, got nil", test.name) - } - if test.output != nil { - if test.input.client == nil { - t.Fatalf("%s: failed %s, jenkins instance shouldn't be nil", test.name, te.Error()) + t.Run(test.name, func(t *testing.T) { + te := test.input.initialize(mockClient) + if !test.wantErr && te != nil { + t.Fatalf("%s: failed %s, expected to be nil", test.name, te.Error()) + } else if test.wantErr && te == nil { + t.Fatalf("%s: expected err, got nil", test.name) } - if test.input.MaxConnections != test.output.MaxConnections { - t.Fatalf("%s: different MaxConnections Expected %d, got %d\n", test.name, test.output.MaxConnections, test.input.MaxConnections) + if test.output != nil { + if test.input.client == nil { + t.Fatalf("%s: failed %s, jenkins instance shouldn't be nil", test.name, te.Error()) + } + if test.input.MaxConnections != test.output.MaxConnections { + t.Fatalf("%s: different MaxConnections Expected %d, got %d\n", test.name, test.output.MaxConnections, test.input.MaxConnections) + } } - } - + }) } } @@ -572,50 +600,51 @@ func TestGatherJobs(t *testing.T) { }, } for _, test := range tests { - ts := httptest.NewServer(test.input) - defer ts.Close() - j := &Jenkins{ - Log: testutil.Logger{}, - URL: ts.URL, - MaxBuildAge: internal.Duration{Duration: time.Hour}, - ResponseTimeout: internal.Duration{Duration: time.Microsecond}, - JobExclude: []string{ - "ignore-1", - "apps/ignore-all/*", - "apps/k8s-cloud/PR-ignore2", - }, - } - te := j.initialize(&http.Client{Transport: &http.Transport{}}) - acc := new(testutil.Accumulator) - acc.SetDebug(true) - j.gatherJobs(acc) - if err := acc.FirstError(); err != nil { - te = err - } - if !test.wantErr && te != nil { - t.Fatalf("%s: failed %s, expected to be nil", test.name, te.Error()) - } else if test.wantErr && te == nil { - t.Fatalf("%s: expected err, got nil", test.name) - } - - if test.output != nil && len(test.output.Metrics) > 0 { - // sort metrics - sort.Slice(acc.Metrics, func(i, j int) bool { - return strings.Compare(acc.Metrics[i].Tags["name"], acc.Metrics[j].Tags["name"]) < 0 - }) - for i := range test.output.Metrics { - for k, m := range test.output.Metrics[i].Tags { - if acc.Metrics[i].Tags[k] != m { - t.Fatalf("%s: tag %s metrics unmatch Expected %s, got %s\n", test.name, k, m, acc.Metrics[i].Tags[k]) - } - } - for k, m := range test.output.Metrics[i].Fields { - if acc.Metrics[i].Fields[k] != m { - t.Fatalf("%s: field %s metrics unmatch Expected %v(%T), got %v(%T)\n", test.name, k, m, m, acc.Metrics[i].Fields[k], acc.Metrics[0].Fields[k]) - } - } + t.Run(test.name, func(t *testing.T) { + ts := httptest.NewServer(test.input) + defer ts.Close() + j := &Jenkins{ + Log: testutil.Logger{}, + URL: ts.URL, + MaxBuildAge: internal.Duration{Duration: time.Hour}, + ResponseTimeout: internal.Duration{Duration: time.Microsecond}, + JobExclude: []string{ + "ignore-1", + "apps/ignore-all/*", + "apps/k8s-cloud/PR-ignore2", + }, + } + te := j.initialize(&http.Client{Transport: &http.Transport{}}) + acc := new(testutil.Accumulator) + j.gatherJobs(acc) + if err := acc.FirstError(); err != nil { + te = err + } + if !test.wantErr && te != nil { + t.Fatalf("%s: failed %s, expected to be nil", test.name, te.Error()) + } else if test.wantErr && te == nil { + t.Fatalf("%s: expected err, got nil", test.name) } - } + if test.output != nil && len(test.output.Metrics) > 0 { + // sort metrics + sort.Slice(acc.Metrics, func(i, j int) bool { + return strings.Compare(acc.Metrics[i].Tags["name"], acc.Metrics[j].Tags["name"]) < 0 + }) + for i := range test.output.Metrics { + for k, m := range test.output.Metrics[i].Tags { + if acc.Metrics[i].Tags[k] != m { + t.Fatalf("%s: tag %s metrics unmatch Expected %s, got %s\n", test.name, k, m, acc.Metrics[i].Tags[k]) + } + } + for k, m := range test.output.Metrics[i].Fields { + if acc.Metrics[i].Fields[k] != m { + t.Fatalf("%s: field %s metrics unmatch Expected %v(%T), got %v(%T)\n", test.name, k, m, m, acc.Metrics[i].Fields[k], acc.Metrics[0].Fields[k]) + } + } + } + + } + }) } } From f41fbef182130abce636dc63a588adef90e20ca0 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 12 Nov 2019 12:08:54 -0800 Subject: [PATCH 1305/1815] Update changelog --- CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 44b62a329..f637c654e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -54,6 +54,10 @@ - [#6337](https://github.com/influxdata/telegraf/issues/6337): Change no metric error message to debug level in cloudwatch input. - [#6602](https://github.com/influxdata/telegraf/issues/6602): Add missing ServerProperties query to sqlserver input docs. - [#6643](https://github.com/influxdata/telegraf/pull/6643): Fix mongodb connections_total_created field loading. +- [#6627](https://github.com/influxdata/telegraf/issues/6578): Fix metric creation when node is offline in jenkins input. +- [#6649](https://github.com/influxdata/telegraf/issues/6615): Fix docker uptime_ns calculation when container has been restarted. +- [#6647](https://github.com/influxdata/telegraf/issues/6646): Fix mysql field type conflict in conversion of gtid_mode to an integer. +- [#5529](https://github.com/influxdata/telegraf/issues/5529): Fix mysql field type conflict with ssl_verify_depth and ssl_ctx_verify_depth. ## v1.12.4 [2019-10-23] From 7717375bc9fdf8ac7ce2af3f9d5b94726e327a81 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 12 Nov 2019 12:28:41 -0800 Subject: [PATCH 1306/1815] Set 1.12.5 release date in changelog --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f637c654e..41102db42 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -45,7 +45,7 @@ - [#6484](https://github.com/influxdata/telegraf/issues/6484): Show correct default settings in mysql sample config. - [#6583](https://github.com/influxdata/telegraf/issues/6583): Use 1h or 3h rain values as appropriate in openweathermap input. -## v1.12.5 [unreleased] +## v1.12.5 [2019-11-12] #### Bugfixes From 898c20c01e8d491ffcdd61d9f1932f56852d1c7c Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 12 Nov 2019 13:44:57 -0800 Subject: [PATCH 1307/1815] Don't log if no error in mongodb input --- plugins/inputs/mongodb/mongodb.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/plugins/inputs/mongodb/mongodb.go b/plugins/inputs/mongodb/mongodb.go index b61bb671a..967ccbe5f 100644 --- a/plugins/inputs/mongodb/mongodb.go +++ b/plugins/inputs/mongodb/mongodb.go @@ -100,7 +100,10 @@ func (m *MongoDB) Gather(acc telegraf.Accumulator) error { wg.Add(1) go func(srv *Server) { defer wg.Done() - m.Log.Error(m.gatherServer(srv, acc)) + err := m.gatherServer(srv, acc) + if err != nil { + m.Log.Errorf("Error in plugin: %v", err) + } }(m.getMongoServer(u)) } From 55b78a5f668eeeabafc532148c720084f86ae7f9 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 12 Nov 2019 14:11:42 -0800 Subject: [PATCH 1308/1815] Fix spelling mistake in mqtt_consumer docs --- plugins/inputs/mqtt_consumer/README.md | 2 +- plugins/inputs/mqtt_consumer/mqtt_consumer.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/inputs/mqtt_consumer/README.md b/plugins/inputs/mqtt_consumer/README.md index 9e60679f6..ddb5a073b 100644 --- a/plugins/inputs/mqtt_consumer/README.md +++ b/plugins/inputs/mqtt_consumer/README.md @@ -45,7 +45,7 @@ and creates metrics using one of the supported [input data formats][]. # max_undelivered_messages = 1000 ## Persistent session disables clearing of the client session on connection. - ## In order for this option to work you must also set client_id to identity + ## In order for this option to work you must also set client_id to identify ## the client. To receive messages that arrived while the client is offline, ## also set the qos option to 1 or 2 and don't forget to also set the QoS when ## publishing. diff --git a/plugins/inputs/mqtt_consumer/mqtt_consumer.go b/plugins/inputs/mqtt_consumer/mqtt_consumer.go index 5c59eda87..2a1631750 100644 --- a/plugins/inputs/mqtt_consumer/mqtt_consumer.go +++ b/plugins/inputs/mqtt_consumer/mqtt_consumer.go @@ -114,7 +114,7 @@ var sampleConfig = ` # max_undelivered_messages = 1000 ## Persistent session disables clearing of the client session on connection. - ## In order for this option to work you must also set client_id to identity + ## In order for this option to work you must also set client_id to identify ## the client. To receive messages that arrived while the client is offline, ## also set the qos option to 1 or 2 and don't forget to also set the QoS when ## publishing. From 2cf5116d14472600e315d65b49ac62fc79c20390 Mon Sep 17 00:00:00 2001 From: Greg <2653109+glinton@users.noreply.github.com> Date: Tue, 12 Nov 2019 17:12:15 -0700 Subject: [PATCH 1309/1815] Update nvidia-smi input to use xml (#6639) --- plugins/inputs/nvidia_smi/nvidia_smi.go | 255 +++++++++++-------- plugins/inputs/nvidia_smi/nvidia_smi_test.go | 122 ++++++--- 2 files changed, 240 insertions(+), 137 deletions(-) diff --git a/plugins/inputs/nvidia_smi/nvidia_smi.go b/plugins/inputs/nvidia_smi/nvidia_smi.go index e2ec19959..b21e390c6 100644 --- a/plugins/inputs/nvidia_smi/nvidia_smi.go +++ b/plugins/inputs/nvidia_smi/nvidia_smi.go @@ -1,7 +1,7 @@ package nvidia_smi import ( - "bufio" + "encoding/xml" "fmt" "os" "os/exec" @@ -14,41 +14,12 @@ import ( "github.com/influxdata/telegraf/plugins/inputs" ) -var ( - measurement = "nvidia_smi" - metrics = "fan.speed,memory.total,memory.used,memory.free,pstate,temperature.gpu,name,uuid,compute_mode,utilization.gpu,utilization.memory,index,power.draw,pcie.link.gen.current,pcie.link.width.current,encoder.stats.sessionCount,encoder.stats.averageFps,encoder.stats.averageLatency,clocks.current.graphics,clocks.current.sm,clocks.current.memory,clocks.current.video" - metricNames = [][]string{ - {"fan_speed", "integer"}, - {"memory_total", "integer"}, - {"memory_used", "integer"}, - {"memory_free", "integer"}, - {"pstate", "tag"}, - {"temperature_gpu", "integer"}, - {"name", "tag"}, - {"uuid", "tag"}, - {"compute_mode", "tag"}, - {"utilization_gpu", "integer"}, - {"utilization_memory", "integer"}, - {"index", "tag"}, - {"power_draw", "float"}, - {"pcie_link_gen_current", "integer"}, - {"pcie_link_width_current", "integer"}, - {"encoder_stats_session_count", "integer"}, - {"encoder_stats_average_fps", "integer"}, - {"encoder_stats_average_latency", "integer"}, - {"clocks_current_graphics", "integer"}, - {"clocks_current_sm", "integer"}, - {"clocks_current_memory", "integer"}, - {"clocks_current_video", "integer"}, - } -) +const measurement = "nvidia_smi" // NvidiaSMI holds the methods for this plugin type NvidiaSMI struct { BinPath string Timeout internal.Duration - - metrics string } // Description returns the description of the NvidiaSMI plugin @@ -69,7 +40,6 @@ func (smi *NvidiaSMI) SampleConfig() string { // Gather implements the telegraf interface func (smi *NvidiaSMI) Gather(acc telegraf.Accumulator) error { - if _, err := os.Stat(smi.BinPath); os.IsNotExist(err) { return fmt.Errorf("nvidia-smi binary not at path %s, cannot gather GPU data", smi.BinPath) } @@ -92,93 +62,178 @@ func init() { return &NvidiaSMI{ BinPath: "/usr/bin/nvidia-smi", Timeout: internal.Duration{Duration: 5 * time.Second}, - metrics: metrics, } }) } -func (smi *NvidiaSMI) pollSMI() (string, error) { +func (smi *NvidiaSMI) pollSMI() ([]byte, error) { // Construct and execute metrics query - opts := []string{"--format=noheader,nounits,csv", fmt.Sprintf("--query-gpu=%s", smi.metrics)} - ret, err := internal.CombinedOutputTimeout(exec.Command(smi.BinPath, opts...), smi.Timeout.Duration) + ret, err := internal.CombinedOutputTimeout(exec.Command(smi.BinPath, "-q", "-x"), smi.Timeout.Duration) if err != nil { - return "", err + return nil, err } - return string(ret), nil + return ret, nil } -func gatherNvidiaSMI(ret string, acc telegraf.Accumulator) error { - // First split the lines up and handle each one - scanner := bufio.NewScanner(strings.NewReader(ret)) - for scanner.Scan() { - tags, fields, err := parseLine(scanner.Text()) - if err != nil { - return err - } - acc.AddFields(measurement, fields, tags) +func gatherNvidiaSMI(ret []byte, acc telegraf.Accumulator) error { + smi := &SMI{} + err := xml.Unmarshal(ret, smi) + if err != nil { + return err } - if err := scanner.Err(); err != nil { - return fmt.Errorf("Error scanning text %s", ret) + metrics := smi.genTagsFields() + + for _, metric := range metrics { + acc.AddFields(measurement, metric.fields, metric.tags) } return nil } -func parseLine(line string) (map[string]string, map[string]interface{}, error) { - tags := make(map[string]string, 0) - fields := make(map[string]interface{}, 0) +type metric struct { + tags map[string]string + fields map[string]interface{} +} - // Next split up the comma delimited metrics - met := strings.Split(line, ",") - - // Make sure there are as many metrics in the line as there were queried. - if len(met) == len(metricNames) { - for i, m := range metricNames { - col := strings.TrimSpace(met[i]) - - // Handle the tags - if m[1] == "tag" { - tags[m[0]] = col - continue - } - - // In some cases we may not be able to get data. - // One such case is when the memory is overclocked. - // nvidia-smi reads the max supported memory clock from the stock value. - // If the current memory clock is greater than the max detected memory clock then we receive [Unknown Error] as a value. - - // For example, the stock max memory clock speed on a 2080 Ti is 7000 MHz which nvidia-smi detects. - // The user has overclocked their memory using an offset of +1000 so under load the memory clock reaches 8000 MHz. - // Now when nvidia-smi tries to read the current memory clock it fails and spits back [Unknown Error] as the value. - // This value will break the parsing logic below unless it is accounted for here. - if strings.Contains(col, "[Not Supported]") || strings.Contains(col, "[Unknown Error]") { - continue - } - - // Parse the integers - if m[1] == "integer" { - out, err := strconv.ParseInt(col, 10, 64) - if err != nil { - return tags, fields, err - } - fields[m[0]] = out - } - - // Parse the floats - if m[1] == "float" { - out, err := strconv.ParseFloat(col, 64) - if err != nil { - return tags, fields, err - } - fields[m[0]] = out - } +func (s *SMI) genTagsFields() []metric { + metrics := []metric{} + for i, gpu := range s.GPU { + tags := map[string]string{ + "index": strconv.Itoa(i), } + fields := map[string]interface{}{} - // Return the tags and fields - return tags, fields, nil + setTagIfUsed(tags, "pstate", gpu.PState) + setTagIfUsed(tags, "name", gpu.ProdName) + setTagIfUsed(tags, "uuid", gpu.UUID) + setTagIfUsed(tags, "compute_mode", gpu.ComputeMode) + + setIfUsed("int", fields, "fan_speed", gpu.FanSpeed) + setIfUsed("int", fields, "memory_total", gpu.Memory.Total) + setIfUsed("int", fields, "memory_used", gpu.Memory.Used) + setIfUsed("int", fields, "memory_free", gpu.Memory.Free) + setIfUsed("int", fields, "temperature_gpu", gpu.Temp.GPUTemp) + setIfUsed("int", fields, "utilization_gpu", gpu.Utilization.GPU) + setIfUsed("int", fields, "utilization_memory", gpu.Utilization.Memory) + setIfUsed("int", fields, "pcie_link_gen_current", gpu.PCI.LinkInfo.PCIEGen.CurrentLinkGen) + setIfUsed("int", fields, "pcie_link_width_current", gpu.PCI.LinkInfo.LinkWidth.CurrentLinkWidth) + setIfUsed("int", fields, "encoder_stats_session_count", gpu.Encoder.SessionCount) + setIfUsed("int", fields, "encoder_stats_average_fps", gpu.Encoder.AverageFPS) + setIfUsed("int", fields, "encoder_stats_average_latency", gpu.Encoder.AverageLatency) + setIfUsed("int", fields, "clocks_current_graphics", gpu.Clocks.Graphics) + setIfUsed("int", fields, "clocks_current_sm", gpu.Clocks.SM) + setIfUsed("int", fields, "clocks_current_memory", gpu.Clocks.Memory) + setIfUsed("int", fields, "clocks_current_video", gpu.Clocks.Video) + + setIfUsed("float", fields, "power_draw", gpu.Power.PowerDraw) + metrics = append(metrics, metric{tags, fields}) + } + return metrics +} + +func setTagIfUsed(m map[string]string, k, v string) { + if v != "" { + m[k] = v + } +} + +func setIfUsed(t string, m map[string]interface{}, k, v string) { + vals := strings.Fields(v) + if len(vals) < 1 { + return } - // If the line is empty return an emptyline error - return tags, fields, fmt.Errorf("Different number of metrics returned (%d) than expeced (%d)", len(met), len(metricNames)) + val := vals[0] + if k == "pcie_link_width_current" { + val = strings.TrimSuffix(vals[0], "x") + } + + switch t { + case "float": + if val != "" { + f, err := strconv.ParseFloat(val, 64) + if err == nil { + m[k] = f + } + } + case "int": + if val != "" { + i, err := strconv.Atoi(val) + if err == nil { + m[k] = i + } + } + } +} + +// SMI defines the structure for the output of _nvidia-smi -q -x_. +type SMI struct { + GPU GPU `xml:"gpu"` +} + +// GPU defines the structure of the GPU portion of the smi output. +type GPU []struct { + FanSpeed string `xml:"fan_speed"` // int + Memory MemoryStats `xml:"fb_memory_usage"` + PState string `xml:"performance_state"` + Temp TempStats `xml:"temperature"` + ProdName string `xml:"product_name"` + UUID string `xml:"uuid"` + ComputeMode string `xml:"compute_mode"` + Utilization UtilizationStats `xml:"utilization"` + Power PowerReadings `xml:"power_readings"` + PCI PCI `xml:"pci"` + Encoder EncoderStats `xml:"encoder_stats"` + Clocks ClockStats `xml:"clocks"` +} + +// MemoryStats defines the structure of the memory portions in the smi output. +type MemoryStats struct { + Total string `xml:"total"` // int + Used string `xml:"used"` // int + Free string `xml:"free"` // int +} + +// TempStats defines the structure of the temperature portion of the smi output. +type TempStats struct { + GPUTemp string `xml:"gpu_temp"` // int +} + +// UtilizationStats defines the structure of the utilization portion of the smi output. +type UtilizationStats struct { + GPU string `xml:"gpu_util"` // int + Memory string `xml:"memory_util"` // int +} + +// PowerReadings defines the structure of the power_readings portion of the smi output. +type PowerReadings struct { + PowerDraw string `xml:"power_draw"` // float +} + +// PCI defines the structure of the pci portion of the smi output. +type PCI struct { + LinkInfo struct { + PCIEGen struct { + CurrentLinkGen string `xml:"current_link_gen"` // int + } `xml:"pcie_gen"` + LinkWidth struct { + CurrentLinkWidth string `xml:"current_link_width"` // int + } `xml:"link_widths"` + } `xml:"pci_gpu_link_info"` +} + +// EncoderStats defines the structure of the encoder_stats portion of the smi output. +type EncoderStats struct { + SessionCount string `xml:"session_count"` // int + AverageFPS string `xml:"average_fps"` // int + AverageLatency string `xml:"average_latency"` // int +} + +// ClockStats defines the structure of the clocks portion of the smi output. +type ClockStats struct { + Graphics string `xml:"graphics_clock"` // int + SM string `xml:"sm_clock"` // int + Memory string `xml:"mem_clock"` // int + Video string `xml:"video_clock"` // int } diff --git a/plugins/inputs/nvidia_smi/nvidia_smi_test.go b/plugins/inputs/nvidia_smi/nvidia_smi_test.go index a16447d69..7d0ec4666 100644 --- a/plugins/inputs/nvidia_smi/nvidia_smi_test.go +++ b/plugins/inputs/nvidia_smi/nvidia_smi_test.go @@ -1,51 +1,99 @@ package nvidia_smi import ( + "fmt" "testing" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) -func TestParseLineStandard(t *testing.T) { - line := "41, 11264, 1074, 10190, P8, 32, GeForce RTX 2080 Ti, GPU-c97b7f88-c06d-650f-5339-f8dd0c1315c0, Default, 1, 4, 0, 24.33, 1, 16, 0, 0, 0, 300, 300, 405, 540\n" - tags, fields, err := parseLine(line) - if err != nil { - t.Fail() - } - if tags["name"] != "GeForce RTX 2080 Ti" { - t.Fail() - } - if temp, ok := fields["temperature_gpu"].(int); ok && temp != 32 { - t.Fail() - } -} +var payload = []byte(` + + + + GeForce GTX 1070 Ti + GPU-f9ba66fc-a7f5-94c5-da19-019ef2f9c665 + + + + 1 + + + 16x + + + + 100 % + P8 + + 4096 MiB + 42 MiB + 4054 MiB + + Default + + 0 % + 0 % + + + 0 + 0 + 0 + + + 39 C + + + N/A + + + 135 MHz + 135 MHz + 405 MHz + 405 MHz + + +`) -func TestParseLineEmptyLine(t *testing.T) { - line := "\n" - _, _, err := parseLine(line) - if err == nil { - t.Fail() +func TestGatherSMI(t *testing.T) { + var expectedMetric = struct { + tags map[string]string + fields map[string]interface{} + }{ + tags: map[string]string{ + "name": "GeForce GTX 1070 Ti", + "compute_mode": "Default", + "index": "0", + "pstate": "P8", + "uuid": "GPU-f9ba66fc-a7f5-94c5-da19-019ef2f9c665", + }, + fields: map[string]interface{}{ + "fan_speed": 100, + "memory_free": 4054, + "memory_used": 42, + "memory_total": 4096, + "temperature_gpu": 39, + "utilization_gpu": 0, + "utilization_memory": 0, + "pcie_link_gen_current": 1, + "pcie_link_width_current": 16, + "encoder_stats_session_count": 0, + "encoder_stats_average_fps": 0, + "encoder_stats_average_latency": 0, + "clocks_current_graphics": 135, + "clocks_current_sm": 135, + "clocks_current_memory": 405, + "clocks_current_video": 405, + }, } -} -func TestParseLineBad(t *testing.T) { - line := "the quick brown fox jumped over the lazy dog" - _, _, err := parseLine(line) - if err == nil { - t.Fail() - } -} + acc := &testutil.Accumulator{} -func TestParseLineNotSupported(t *testing.T) { - line := "[Not Supported], 11264, 1074, 10190, P8, 32, GeForce RTX 2080 Ti, GPU-c97b7f88-c06d-650f-5339-f8dd0c1315c0, Default, 1, 4, 0, 24.33, 1, 16, 0, 0, 0, 300, 300, 405, 540\n" - _, fields, err := parseLine(line) - require.NoError(t, err) - require.Equal(t, nil, fields["fan_speed"]) -} + gatherNvidiaSMI(payload, acc) + fmt.Println() -func TestParseLineUnknownError(t *testing.T) { - line := "[Unknown Error], 11264, 1074, 10190, P8, 32, GeForce RTX 2080 Ti, GPU-c97b7f88-c06d-650f-5339-f8dd0c1315c0, Default, 1, 4, 0, 24.33, 1, 16, 0, 0, 0, 300, 300, 405, 540\n" - _, fields, err := parseLine(line) - require.NoError(t, err) - require.Equal(t, nil, fields["fan_speed"]) + require.Equal(t, 1, len(acc.Metrics)) + require.Equal(t, expectedMetric.fields, acc.Metrics[0].Fields) + require.Equal(t, expectedMetric.tags, acc.Metrics[0].Tags) } From b71a387ca270f18d6021af01be483e90556600fd Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 12 Nov 2019 16:13:30 -0800 Subject: [PATCH 1310/1815] Add additional nvidia-smi examples as testcases --- plugins/inputs/nvidia_smi/README.md | 13 +- plugins/inputs/nvidia_smi/nvidia_smi_test.go | 202 +++++++++++-------- 2 files changed, 131 insertions(+), 84 deletions(-) diff --git a/plugins/inputs/nvidia_smi/README.md b/plugins/inputs/nvidia_smi/README.md index 8afa74538..2173c904e 100644 --- a/plugins/inputs/nvidia_smi/README.md +++ b/plugins/inputs/nvidia_smi/README.md @@ -55,11 +55,20 @@ SELECT mean("temperature_gpu") FROM "nvidia_smi" WHERE time > now() - 5m GROUP B ### Troubleshooting -As the `telegraf` user run the following command. Adjust the path to `nvidia-smi` if customized. +Check the full output by running `nvidia-smi` binary manually. + +Linux: ``` -/usr/bin/nvidia-smi --format=noheader,nounits,csv --query-gpu=fan.speed,memory.total,memory.used,memory.free,pstate,temperature.gpu,name,uuid,compute_mode,utilization.gpu,utilization.memory,index,power.draw,pcie.link.gen.current,pcie.link.width.current,encoder.stats.sessionCount,encoder.stats.averageFps,encoder.stats.averageLatency,clocks.current.graphics,clocks.current.sm,clocks.current.memory,clocks.current.video +sudo -u telegraf -- /usr/bin/nvidia-smi -q -x ``` +Windows: +``` +"C:\Program Files\NVIDIA Corporation\NVSMI\nvidia-smi.exe" -q -x +``` + +Please include the output of this command if opening an GitHub issue. + ### Example Output ``` nvidia_smi,compute_mode=Default,host=8218cf,index=0,name=GeForce\ GTX\ 1070,pstate=P2,uuid=GPU-823bc202-6279-6f2c-d729-868a30f14d96 fan_speed=100i,memory_free=7563i,memory_total=8112i,memory_used=549i,temperature_gpu=53i,utilization_gpu=100i,utilization_memory=90i 1523991122000000000 diff --git a/plugins/inputs/nvidia_smi/nvidia_smi_test.go b/plugins/inputs/nvidia_smi/nvidia_smi_test.go index 7d0ec4666..6fd37b570 100644 --- a/plugins/inputs/nvidia_smi/nvidia_smi_test.go +++ b/plugins/inputs/nvidia_smi/nvidia_smi_test.go @@ -1,99 +1,137 @@ package nvidia_smi import ( - "fmt" + "io/ioutil" + "path/filepath" "testing" + "time" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) -var payload = []byte(` - - - - GeForce GTX 1070 Ti - GPU-f9ba66fc-a7f5-94c5-da19-019ef2f9c665 - - - - 1 - - - 16x - - - - 100 % - P8 - - 4096 MiB - 42 MiB - 4054 MiB - - Default - - 0 % - 0 % - - - 0 - 0 - 0 - - - 39 C - - - N/A - - - 135 MHz - 135 MHz - 405 MHz - 405 MHz - - -`) - -func TestGatherSMI(t *testing.T) { - var expectedMetric = struct { - tags map[string]string - fields map[string]interface{} +func TestGatherValidXML(t *testing.T) { + tests := []struct { + name string + filename string + expected []telegraf.Metric }{ - tags: map[string]string{ - "name": "GeForce GTX 1070 Ti", - "compute_mode": "Default", - "index": "0", - "pstate": "P8", - "uuid": "GPU-f9ba66fc-a7f5-94c5-da19-019ef2f9c665", + { + name: "GeForce GTX 1070 Ti", + filename: "gtx-1070-ti.xml", + expected: []telegraf.Metric{ + testutil.MustMetric( + "nvidia_smi", + map[string]string{ + "name": "GeForce GTX 1070 Ti", + "compute_mode": "Default", + "index": "0", + "pstate": "P8", + "uuid": "GPU-f9ba66fc-a7f5-94c5-da19-019ef2f9c665", + }, + map[string]interface{}{ + "clocks_current_graphics": 135, + "clocks_current_memory": 405, + "clocks_current_sm": 135, + "clocks_current_video": 405, + "encoder_stats_average_fps": 0, + "encoder_stats_average_latency": 0, + "encoder_stats_session_count": 0, + "fan_speed": 100, + "memory_free": 4054, + "memory_total": 4096, + "memory_used": 42, + "pcie_link_gen_current": 1, + "pcie_link_width_current": 16, + "temperature_gpu": 39, + "utilization_gpu": 0, + "utilization_memory": 0, + }, + time.Unix(0, 0)), + }, }, - fields: map[string]interface{}{ - "fan_speed": 100, - "memory_free": 4054, - "memory_used": 42, - "memory_total": 4096, - "temperature_gpu": 39, - "utilization_gpu": 0, - "utilization_memory": 0, - "pcie_link_gen_current": 1, - "pcie_link_width_current": 16, - "encoder_stats_session_count": 0, - "encoder_stats_average_fps": 0, - "encoder_stats_average_latency": 0, - "clocks_current_graphics": 135, - "clocks_current_sm": 135, - "clocks_current_memory": 405, - "clocks_current_video": 405, + { + name: "GeForce GTX 1660 Ti", + filename: "gtx-1660-ti.xml", + expected: []telegraf.Metric{ + testutil.MustMetric( + "nvidia_smi", + map[string]string{ + "compute_mode": "Default", + "index": "0", + "name": "Graphics Device", + "pstate": "P8", + "uuid": "GPU-304a277d-3545-63b8-3a36-dfde3c992989", + }, + map[string]interface{}{ + "clocks_current_graphics": 300, + "clocks_current_memory": 405, + "clocks_current_sm": 300, + "clocks_current_video": 540, + "encoder_stats_average_fps": 0, + "encoder_stats_average_latency": 0, + "encoder_stats_session_count": 0, + "fan_speed": 0, + "memory_free": 5912, + "memory_total": 5912, + "memory_used": 0, + "pcie_link_gen_current": 1, + "pcie_link_width_current": 16, + "power_draw": 8.93, + "temperature_gpu": 40, + "utilization_gpu": 0, + "utilization_memory": 1, + }, + time.Unix(0, 0)), + }, + }, + { + name: "Quadro P400", + filename: "quadro-p400.xml", + expected: []telegraf.Metric{ + testutil.MustMetric( + "nvidia_smi", + map[string]string{ + "compute_mode": "Default", + "index": "0", + "name": "Quadro P400", + "pstate": "P8", + "uuid": "GPU-8f750be4-dfbc-23b9-b33f-da729a536494", + }, + map[string]interface{}{ + "clocks_current_graphics": 139, + "clocks_current_memory": 405, + "clocks_current_sm": 139, + "clocks_current_video": 544, + "encoder_stats_average_fps": 0, + "encoder_stats_average_latency": 0, + "encoder_stats_session_count": 0, + "fan_speed": 34, + "memory_free": 1998, + "memory_total": 1998, + "memory_used": 0, + "pcie_link_gen_current": 1, + "pcie_link_width_current": 16, + "temperature_gpu": 33, + "utilization_gpu": 0, + "utilization_memory": 3, + }, + time.Unix(0, 0)), + }, }, } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var acc testutil.Accumulator - acc := &testutil.Accumulator{} + octets, err := ioutil.ReadFile(filepath.Join("testdata", tt.filename)) + require.NoError(t, err) - gatherNvidiaSMI(payload, acc) - fmt.Println() + err = gatherNvidiaSMI(octets, &acc) + require.NoError(t, err) - require.Equal(t, 1, len(acc.Metrics)) - require.Equal(t, expectedMetric.fields, acc.Metrics[0].Fields) - require.Equal(t, expectedMetric.tags, acc.Metrics[0].Tags) + testutil.RequireMetricsEqual(t, tt.expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime()) + }) + } } From a686645bf3f0eebf5410f0c3ae55760564214d00 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 12 Nov 2019 16:40:39 -0800 Subject: [PATCH 1311/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 41102db42..a2a6a227d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -44,6 +44,7 @@ - [#6484](https://github.com/influxdata/telegraf/issues/6484): Show correct default settings in mysql sample config. - [#6583](https://github.com/influxdata/telegraf/issues/6583): Use 1h or 3h rain values as appropriate in openweathermap input. +- [#6573](https://github.com/influxdata/telegraf/issues/6573): Fix not a valid field error in Windows with nvidia input. ## v1.12.5 [2019-11-12] From 2a8735d1c6234f76c27564799c3e40235cc22d9d Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 12 Nov 2019 16:41:46 -0800 Subject: [PATCH 1312/1815] Add missing testdata directory --- .../nvidia_smi/testdata/gtx-1070-ti.xml | 47 ++ .../nvidia_smi/testdata/gtx-1660-ti.xml | 189 ++++++++ .../nvidia_smi/testdata/quadro-p400.xml | 447 ++++++++++++++++++ 3 files changed, 683 insertions(+) create mode 100644 plugins/inputs/nvidia_smi/testdata/gtx-1070-ti.xml create mode 100644 plugins/inputs/nvidia_smi/testdata/gtx-1660-ti.xml create mode 100644 plugins/inputs/nvidia_smi/testdata/quadro-p400.xml diff --git a/plugins/inputs/nvidia_smi/testdata/gtx-1070-ti.xml b/plugins/inputs/nvidia_smi/testdata/gtx-1070-ti.xml new file mode 100644 index 000000000..3e3e3ec87 --- /dev/null +++ b/plugins/inputs/nvidia_smi/testdata/gtx-1070-ti.xml @@ -0,0 +1,47 @@ + + + + + GeForce GTX 1070 Ti + GPU-f9ba66fc-a7f5-94c5-da19-019ef2f9c665 + + + + 1 + + + 16x + + + + 100 % + P8 + + 4096 MiB + 42 MiB + 4054 MiB + + Default + + 0 % + 0 % + + + 0 + 0 + 0 + + + 39 C + + + N/A + + + 135 MHz + 135 MHz + 405 MHz + 405 MHz + + + diff --git a/plugins/inputs/nvidia_smi/testdata/gtx-1660-ti.xml b/plugins/inputs/nvidia_smi/testdata/gtx-1660-ti.xml new file mode 100644 index 000000000..1a6c7d089 --- /dev/null +++ b/plugins/inputs/nvidia_smi/testdata/gtx-1660-ti.xml @@ -0,0 +1,189 @@ + + + Fri Mar 29 19:19:44 2019 + 418.43 + 10.1 + 1 + + Graphics Device + GeForce + Disabled + Disabled + Disabled + Disabled + 4000 + + N/A + N/A + + N/A + GPU-304a277d-3545-63b8-3a36-dfde3c992989 + 0 + 90.16.25.00.4C + No + 0x4300 + N/A + + G001.0000.02.04 + 1.1 + N/A + N/A + + + N/A + N/A + + + None + + + N/A + + + 43 + 00 + 0000 + 218410DE + 00000000:43:00.0 + 3FC81458 + + + 3 + 1 + + + 16x + 16x + + + + N/A + N/A + + 0 + 0 + 0 KB/s + 0 KB/s + + 0 % + P8 + + Active + Not Active + Not Active + Not Active + Not Active + Not Active + Not Active + Not Active + Not Active + + + 5912 MiB + 0 MiB + 5912 MiB + + + 256 MiB + 2 MiB + 254 MiB + + Default + + 0 % + 1 % + 0 % + 0 % + + + 0 + 0 + 0 + + + 0 + 0 + 0 + + + N/A + N/A + + + + N/A + N/A + N/A + N/A + + + N/A + N/A + N/A + N/A + + + + + N/A + N/A + + + N/A + N/A + + N/A + + + 40 C + 96 C + 93 C + 91 C + N/A + N/A + + + P8 + Supported + 8.93 W + 130.00 W + 130.00 W + 130.00 W + 70.00 W + 130.00 W + + + 300 MHz + 300 MHz + 405 MHz + 540 MHz + + + N/A + N/A + + + N/A + N/A + + + 2145 MHz + 2145 MHz + 4001 MHz + 1950 MHz + + + N/A + + + N/A + N/A + + N/A + + + + + + + diff --git a/plugins/inputs/nvidia_smi/testdata/quadro-p400.xml b/plugins/inputs/nvidia_smi/testdata/quadro-p400.xml new file mode 100644 index 000000000..ca9e2191e --- /dev/null +++ b/plugins/inputs/nvidia_smi/testdata/quadro-p400.xml @@ -0,0 +1,447 @@ + + + Mon Mar 11 17:03:27 2019 + 418.43 + 10.1 + 1 + + Quadro P400 + Quadro + Disabled + Disabled + Disabled + Disabled + 4000 + + N/A + N/A + + 0424418054852 + GPU-8f750be4-dfbc-23b9-b33f-da729a536494 + 0 + 86.07.3B.00.4A + No + 0x4300 + 900-5G212-1701-000 + + G212.0500.00.01 + 1.1 + N/A + N/A + + + N/A + N/A + + + None + + + N/A + + + 43 + 00 + 0000 + 1CB310DE + 00000000:43:00.0 + 11BE10DE + + + 3 + 1 + + + 16x + 16x + + + + N/A + N/A + + 0 + 0 + 0 KB/s + 0 KB/s + + 34 % + P8 + + Active + Not Active + Not Active + Not Active + Not Active + Not Active + Not Active + Not Active + Not Active + + + 1998 MiB + 0 MiB + 1998 MiB + + + 256 MiB + 2 MiB + 254 MiB + + Default + + 0 % + 3 % + 0 % + 0 % + + + 0 + 0 + 0 + + + 0 + 0 + 0 + + + N/A + N/A + + + + + N/A + N/A + N/A + N/A + N/A + N/A + N/A + N/A + + + N/A + N/A + N/A + N/A + N/A + N/A + N/A + N/A + + + + + N/A + N/A + N/A + N/A + N/A + N/A + N/A + N/A + + + N/A + N/A + N/A + N/A + N/A + N/A + N/A + N/A + + + + + + N/A + N/A + + + N/A + N/A + + N/A + + + 33 C + 103 C + 100 C + N/A + N/A + N/A + + + P8 + N/A + N/A + N/A + N/A + N/A + N/A + N/A + + + 139 MHz + 139 MHz + 405 MHz + 544 MHz + + + 1227 MHz + 2005 MHz + + + 1227 MHz + 2005 MHz + + + 1252 MHz + 1252 MHz + 2005 MHz + 1126 MHz + + + 1252 MHz + + + N/A + N/A + + + + 2005 MHz + 1252 MHz + 1240 MHz + 1227 MHz + 1215 MHz + 1202 MHz + 1189 MHz + 1177 MHz + 1164 MHz + 1151 MHz + 1139 MHz + 1126 MHz + 1113 MHz + 1101 MHz + 1088 MHz + 1075 MHz + 1063 MHz + 1050 MHz + 1037 MHz + 1025 MHz + 1012 MHz + 999 MHz + 987 MHz + 974 MHz + 961 MHz + 949 MHz + 936 MHz + 923 MHz + 911 MHz + 898 MHz + 885 MHz + 873 MHz + 860 MHz + 847 MHz + 835 MHz + 822 MHz + 810 MHz + 797 MHz + 784 MHz + 772 MHz + 759 MHz + 746 MHz + 734 MHz + 721 MHz + 708 MHz + 696 MHz + 683 MHz + 670 MHz + 658 MHz + 645 MHz + 632 MHz + 620 MHz + 607 MHz + 594 MHz + 582 MHz + 569 MHz + 556 MHz + 544 MHz + 531 MHz + 518 MHz + 506 MHz + 493 MHz + 480 MHz + 468 MHz + 455 MHz + 442 MHz + 430 MHz + 417 MHz + 405 MHz + 392 MHz + 379 MHz + 367 MHz + 354 MHz + 341 MHz + 329 MHz + 316 MHz + 303 MHz + 291 MHz + 278 MHz + 265 MHz + 253 MHz + 240 MHz + 227 MHz + 215 MHz + 202 MHz + 189 MHz + 177 MHz + 164 MHz + 151 MHz + 139 MHz + + + 810 MHz + 1252 MHz + 1240 MHz + 1227 MHz + 1215 MHz + 1202 MHz + 1189 MHz + 1177 MHz + 1164 MHz + 1151 MHz + 1139 MHz + 1126 MHz + 1113 MHz + 1101 MHz + 1088 MHz + 1075 MHz + 1063 MHz + 1050 MHz + 1037 MHz + 1025 MHz + 1012 MHz + 999 MHz + 987 MHz + 974 MHz + 961 MHz + 949 MHz + 936 MHz + 923 MHz + 911 MHz + 898 MHz + 885 MHz + 873 MHz + 860 MHz + 847 MHz + 835 MHz + 822 MHz + 810 MHz + 797 MHz + 784 MHz + 772 MHz + 759 MHz + 746 MHz + 734 MHz + 721 MHz + 708 MHz + 696 MHz + 683 MHz + 670 MHz + 658 MHz + 645 MHz + 632 MHz + 620 MHz + 607 MHz + 594 MHz + 582 MHz + 569 MHz + 556 MHz + 544 MHz + 531 MHz + 518 MHz + 506 MHz + 493 MHz + 480 MHz + 468 MHz + 455 MHz + 442 MHz + 430 MHz + 417 MHz + 405 MHz + 392 MHz + 379 MHz + 367 MHz + 354 MHz + 341 MHz + 329 MHz + 316 MHz + 303 MHz + 291 MHz + 278 MHz + 265 MHz + 253 MHz + 240 MHz + 227 MHz + 215 MHz + 202 MHz + 189 MHz + 177 MHz + 164 MHz + 151 MHz + 139 MHz + + + 405 MHz + 607 MHz + 594 MHz + 582 MHz + 569 MHz + 556 MHz + 544 MHz + 531 MHz + 518 MHz + 506 MHz + 493 MHz + 480 MHz + 468 MHz + 455 MHz + 442 MHz + 430 MHz + 417 MHz + 405 MHz + 392 MHz + 379 MHz + 367 MHz + 354 MHz + 341 MHz + 329 MHz + 316 MHz + 303 MHz + 291 MHz + 278 MHz + 265 MHz + 253 MHz + 240 MHz + 227 MHz + 215 MHz + 202 MHz + 189 MHz + 177 MHz + 164 MHz + 151 MHz + 139 MHz + + + + + + + + + From 2156a6242e5063fa5dfa4c90b5663a7c066966be Mon Sep 17 00:00:00 2001 From: dbutler-starry Date: Tue, 12 Nov 2019 19:43:39 -0500 Subject: [PATCH 1313/1815] Add support for per output flush jitter (#6603) --- agent/agent.go | 6 ++++++ docs/CONFIGURATION.md | 12 +++++++++--- internal/config/config.go | 14 ++++++++++++++ internal/models/running_output.go | 1 + 4 files changed, 30 insertions(+), 3 deletions(-) diff --git a/agent/agent.go b/agent/agent.go index e2ef79b84..aa8d07e67 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -503,6 +503,12 @@ func (a *Agent) runOutputs( interval = output.Config.FlushInterval } + jitter := jitter + // Overwrite agent flush_jitter if this plugin has its own. + if output.Config.FlushJitter != nil { + jitter = *output.Config.FlushJitter + } + wg.Add(1) go func(output *models.RunningOutput) { defer wg.Done() diff --git a/docs/CONFIGURATION.md b/docs/CONFIGURATION.md index d5e5ad072..428ffeab4 100644 --- a/docs/CONFIGURATION.md +++ b/docs/CONFIGURATION.md @@ -127,9 +127,11 @@ The agent table configures Telegraf and the defaults used across all plugins. flush_interval + flush_jitter. - **flush_jitter**: - Jitter the flush [interval][] by a random amount. This is primarily to avoid - large write spikes for users running a large number of telegraf instances. - ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s. + Default flush jitter for all outputs. This jitters the flush [interval][] + by a random amount. This is primarily to avoid large write spikes for users + running a large number of telegraf instances. ie, a jitter of 5s and interval + 10s means flushes will happen every 10-15s. + - **precision**: Collected metrics are rounded to the precision specified as an [interval][]. @@ -260,6 +262,8 @@ Parameters that can be used with any output plugin: - **alias**: Name an instance of a plugin. - **flush_interval**: The maximum time between flushes. Use this setting to override the agent `flush_interval` on a per plugin basis. +- **flush_jitter**: The amount of time to jitter the flush interval. Use this + setting to override the agent `flush_jitter` on a per plugin basis. - **metric_batch_size**: The maximum number of metrics to send at once. Use this setting to override the agent `metric_batch_size` on a per plugin basis. - **metric_buffer_limit**: The maximum number of unsent metrics to buffer. @@ -275,6 +279,7 @@ Override flush parameters for a single output: ```toml [agent] flush_interval = "10s" + flush_jitter = "5s" metric_batch_size = 1000 [[outputs.influxdb]] @@ -284,6 +289,7 @@ Override flush parameters for a single output: [[outputs.file]] files = [ "stdout" ] flush_interval = "1s" + flush_jitter = "1s" metric_batch_size = 10 ``` diff --git a/internal/config/config.go b/internal/config/config.go index 0d54dc566..d45e52c66 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -2026,6 +2026,19 @@ func buildOutput(name string, tbl *ast.Table) (*models.OutputConfig, error) { } } + if node, ok := tbl.Fields["flush_jitter"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if str, ok := kv.Value.(*ast.String); ok { + dur, err := time.ParseDuration(str.Value) + if err != nil { + return nil, err + } + oc.FlushJitter = new(time.Duration) + *oc.FlushJitter = dur + } + } + } + if node, ok := tbl.Fields["metric_buffer_limit"]; ok { if kv, ok := node.(*ast.KeyValue); ok { if integer, ok := kv.Value.(*ast.Integer); ok { @@ -2059,6 +2072,7 @@ func buildOutput(name string, tbl *ast.Table) (*models.OutputConfig, error) { } delete(tbl.Fields, "flush_interval") + delete(tbl.Fields, "flush_jitter") delete(tbl.Fields, "metric_buffer_limit") delete(tbl.Fields, "metric_batch_size") delete(tbl.Fields, "alias") diff --git a/internal/models/running_output.go b/internal/models/running_output.go index 32e9d5ceb..752cf34ef 100644 --- a/internal/models/running_output.go +++ b/internal/models/running_output.go @@ -24,6 +24,7 @@ type OutputConfig struct { Filter Filter FlushInterval time.Duration + FlushJitter *time.Duration MetricBufferLimit int MetricBatchSize int } From 9a2b3bc91757b884b3e4d26efd7edd5f1cf31c9f Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 12 Nov 2019 16:45:03 -0800 Subject: [PATCH 1314/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index a2a6a227d..94f88e5d1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -39,6 +39,7 @@ - [#6283](https://github.com/influxdata/telegraf/pull/6283): Add gathering of RabbitMQ federation link metrics. - [#6356](https://github.com/influxdata/telegraf/pull/6356): Add bearer token defaults for Kubernetes plugins. - [#5870](https://github.com/influxdata/telegraf/pull/5870): Add support for SNMP over TCP. +- [#6603](https://github.com/influxdata/telegraf/pull/6603): Add support for per output flush jitter. #### Bugfixes From fa2f0fff4e131f4210dba714a5e8f281a6d1a6fc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E9=99=88=E6=96=B9=E8=88=9F?= Date: Thu, 14 Nov 2019 04:56:01 +0800 Subject: [PATCH 1315/1815] Fix influxdb output serialization on connection closed (#6621) --- internal/internal.go | 20 ++++++++++++++- internal/internal_test.go | 34 +++++++++++++++++++++++++ plugins/inputs/http/http.go | 9 ++++--- plugins/outputs/http/http.go | 4 ++- plugins/outputs/influxdb/http.go | 31 +++++++++++++++++----- plugins/outputs/influxdb/influxdb.go | 21 ++++++++------- plugins/outputs/influxdb_v2/http.go | 31 +++++++++++++++++----- plugins/outputs/influxdb_v2/influxdb.go | 19 ++++++++------ 8 files changed, 132 insertions(+), 37 deletions(-) diff --git a/internal/internal.go b/internal/internal.go index af36460e3..12e4b3af2 100644 --- a/internal/internal.go +++ b/internal/internal.go @@ -16,6 +16,7 @@ import ( "runtime" "strconv" "strings" + "sync" "syscall" "time" "unicode" @@ -50,6 +51,11 @@ type Number struct { Value float64 } +type ReadWaitCloser struct { + pipeReader *io.PipeReader + wg sync.WaitGroup +} + // SetVersion sets the telegraf agent version func SetVersion(v string) error { if version != "" { @@ -281,14 +287,25 @@ func ExitStatus(err error) (int, bool) { return 0, false } +func (r *ReadWaitCloser) Close() error { + err := r.pipeReader.Close() + r.wg.Wait() // wait for the gzip goroutine finish + return err +} + // CompressWithGzip takes an io.Reader as input and pipes // it through a gzip.Writer returning an io.Reader containing // the gzipped data. // An error is returned if passing data to the gzip.Writer fails -func CompressWithGzip(data io.Reader) (io.Reader, error) { +func CompressWithGzip(data io.Reader) (io.ReadCloser, error) { pipeReader, pipeWriter := io.Pipe() gzipWriter := gzip.NewWriter(pipeWriter) + rc := &ReadWaitCloser{ + pipeReader: pipeReader, + } + + rc.wg.Add(1) var err error go func() { _, err = io.Copy(gzipWriter, data) @@ -296,6 +313,7 @@ func CompressWithGzip(data io.Reader) (io.Reader, error) { // subsequent reads from the read half of the pipe will // return no bytes and the error err, or EOF if err is nil. pipeWriter.CloseWithError(err) + rc.wg.Done() }() return pipeReader, err diff --git a/internal/internal_test.go b/internal/internal_test.go index f4627ee74..bb186f5fc 100644 --- a/internal/internal_test.go +++ b/internal/internal_test.go @@ -3,6 +3,8 @@ package internal import ( "bytes" "compress/gzip" + "crypto/rand" + "io" "io/ioutil" "log" "os/exec" @@ -232,6 +234,38 @@ func TestCompressWithGzip(t *testing.T) { assert.Equal(t, testData, string(output)) } +type mockReader struct { + readN uint64 // record the number of calls to Read +} + +func (r *mockReader) Read(p []byte) (n int, err error) { + r.readN++ + return rand.Read(p) +} + +func TestCompressWithGzipEarlyClose(t *testing.T) { + mr := &mockReader{} + + rc, err := CompressWithGzip(mr) + assert.NoError(t, err) + + n, err := io.CopyN(ioutil.Discard, rc, 10000) + assert.NoError(t, err) + assert.Equal(t, int64(10000), n) + + r1 := mr.readN + err = rc.Close() + assert.NoError(t, err) + + n, err = io.CopyN(ioutil.Discard, rc, 10000) + assert.Error(t, io.EOF, err) + assert.Equal(t, int64(0), n) + + r2 := mr.readN + // no more read to the source after closing + assert.Equal(t, r1, r2) +} + func TestVersionAlreadySet(t *testing.T) { err := SetVersion("foo") assert.Nil(t, err) diff --git a/plugins/inputs/http/http.go b/plugins/inputs/http/http.go index dc155f254..13c9cd170 100644 --- a/plugins/inputs/http/http.go +++ b/plugins/inputs/http/http.go @@ -153,6 +153,7 @@ func (h *HTTP) gatherURL( if err != nil { return err } + defer body.Close() request, err := http.NewRequest(h.Method, url, body) if err != nil { @@ -216,16 +217,16 @@ func (h *HTTP) gatherURL( return nil } -func makeRequestBodyReader(contentEncoding, body string) (io.Reader, error) { - var err error +func makeRequestBodyReader(contentEncoding, body string) (io.ReadCloser, error) { var reader io.Reader = strings.NewReader(body) if contentEncoding == "gzip" { - reader, err = internal.CompressWithGzip(reader) + rc, err := internal.CompressWithGzip(reader) if err != nil { return nil, err } + return rc, nil } - return reader, nil + return ioutil.NopCloser(reader), nil } func init() { diff --git a/plugins/outputs/http/http.go b/plugins/outputs/http/http.go index 1967b6ef9..746cba346 100644 --- a/plugins/outputs/http/http.go +++ b/plugins/outputs/http/http.go @@ -176,10 +176,12 @@ func (h *HTTP) write(reqBody []byte) error { var err error if h.ContentEncoding == "gzip" { - reqBodyBuffer, err = internal.CompressWithGzip(reqBodyBuffer) + rc, err := internal.CompressWithGzip(reqBodyBuffer) if err != nil { return err } + defer rc.Close() + reqBodyBuffer = rc } req, err := http.NewRequest(h.Method, h.URL, reqBodyBuffer) diff --git a/plugins/outputs/influxdb/http.go b/plugins/outputs/influxdb/http.go index b30a8206d..d449c9456 100644 --- a/plugins/outputs/influxdb/http.go +++ b/plugins/outputs/influxdb/http.go @@ -6,6 +6,7 @@ import ( "encoding/json" "fmt" "io" + "io/ioutil" "net" "net/http" "net/url" @@ -288,7 +289,12 @@ func (c *httpClient) writeBatch(ctx context.Context, db string, metrics []telegr return err } - reader := influx.NewReader(metrics, c.config.Serializer) + reader, err := c.requestBodyReader(metrics) + if err != nil { + return err + } + defer reader.Close() + req, err := c.makeWriteRequest(url, reader) if err != nil { return err @@ -386,12 +392,6 @@ func (c *httpClient) makeQueryRequest(query string) (*http.Request, error) { func (c *httpClient) makeWriteRequest(url string, body io.Reader) (*http.Request, error) { var err error - if c.config.ContentEncoding == "gzip" { - body, err = internal.CompressWithGzip(body) - if err != nil { - return nil, err - } - } req, err := http.NewRequest("POST", url, body) if err != nil { @@ -408,6 +408,23 @@ func (c *httpClient) makeWriteRequest(url string, body io.Reader) (*http.Request return req, nil } +// requestBodyReader warp io.Reader from influx.NewReader to io.ReadCloser, which is usefully to fast close the write +// side of the connection in case of error +func (c *httpClient) requestBodyReader(metrics []telegraf.Metric) (io.ReadCloser, error) { + reader := influx.NewReader(metrics, c.config.Serializer) + + if c.config.ContentEncoding == "gzip" { + rc, err := internal.CompressWithGzip(reader) + if err != nil { + return nil, err + } + + return rc, nil + } + + return ioutil.NopCloser(reader), nil +} + func (c *httpClient) addHeaders(req *http.Request) { if c.config.Username != "" || c.config.Password != "" { req.SetBasicAuth(c.config.Username, c.config.Password) diff --git a/plugins/outputs/influxdb/influxdb.go b/plugins/outputs/influxdb/influxdb.go index 01a09208a..50161e832 100644 --- a/plugins/outputs/influxdb/influxdb.go +++ b/plugins/outputs/influxdb/influxdb.go @@ -57,8 +57,7 @@ type InfluxDB struct { CreateHTTPClientF func(config *HTTPConfig) (Client, error) CreateUDPClientF func(config *UDPConfig) (Client, error) - serializer *influx.Serializer - Log telegraf.Logger + Log telegraf.Logger } var sampleConfig = ` @@ -145,11 +144,6 @@ func (i *InfluxDB) Connect() error { urls = append(urls, defaultURL) } - i.serializer = influx.NewSerializer() - if i.InfluxUintSupport { - i.serializer.SetFieldTypeSupport(influx.UintSupport) - } - for _, u := range urls { parts, err := url.Parse(u) if err != nil { @@ -237,7 +231,7 @@ func (i *InfluxDB) udpClient(url *url.URL) (Client, error) { config := &UDPConfig{ URL: url, MaxPayloadSize: int(i.UDPPayload.Size), - Serializer: i.serializer, + Serializer: i.newSerializer(), Log: i.Log, } @@ -271,7 +265,7 @@ func (i *InfluxDB) httpClient(ctx context.Context, url *url.URL, proxy *url.URL) SkipDatabaseCreation: i.SkipDatabaseCreation, RetentionPolicy: i.RetentionPolicy, Consistency: i.WriteConsistency, - Serializer: i.serializer, + Serializer: i.newSerializer(), Log: i.Log, } @@ -291,6 +285,15 @@ func (i *InfluxDB) httpClient(ctx context.Context, url *url.URL, proxy *url.URL) return c, nil } +func (i *InfluxDB) newSerializer() *influx.Serializer { + serializer := influx.NewSerializer() + if i.InfluxUintSupport { + serializer.SetFieldTypeSupport(influx.UintSupport) + } + + return serializer +} + func init() { outputs.Add("influxdb", func() telegraf.Output { return &InfluxDB{ diff --git a/plugins/outputs/influxdb_v2/http.go b/plugins/outputs/influxdb_v2/http.go index b8706c9a5..b94df889b 100644 --- a/plugins/outputs/influxdb_v2/http.go +++ b/plugins/outputs/influxdb_v2/http.go @@ -7,6 +7,7 @@ import ( "errors" "fmt" "io" + "io/ioutil" "log" "net" "net/http" @@ -214,7 +215,12 @@ func (c *httpClient) writeBatch(ctx context.Context, bucket string, metrics []te return err } - reader := influx.NewReader(metrics, c.serializer) + reader, err := c.requestBodyReader(metrics) + if err != nil { + return err + } + defer reader.Close() + req, err := c.makeWriteRequest(url, reader) if err != nil { return err @@ -282,12 +288,6 @@ func (c *httpClient) writeBatch(ctx context.Context, bucket string, metrics []te func (c *httpClient) makeWriteRequest(url string, body io.Reader) (*http.Request, error) { var err error - if c.ContentEncoding == "gzip" { - body, err = internal.CompressWithGzip(body) - if err != nil { - return nil, err - } - } req, err := http.NewRequest("POST", url, body) if err != nil { @@ -304,6 +304,23 @@ func (c *httpClient) makeWriteRequest(url string, body io.Reader) (*http.Request return req, nil } +// requestBodyReader warp io.Reader from influx.NewReader to io.ReadCloser, which is usefully to fast close the write +// side of the connection in case of error +func (c *httpClient) requestBodyReader(metrics []telegraf.Metric) (io.ReadCloser, error) { + reader := influx.NewReader(metrics, c.serializer) + + if c.ContentEncoding == "gzip" { + rc, err := internal.CompressWithGzip(reader) + if err != nil { + return nil, err + } + + return rc, nil + } + + return ioutil.NopCloser(reader), nil +} + func (c *httpClient) addHeaders(req *http.Request) { for header, value := range c.Headers { req.Header.Set(header, value) diff --git a/plugins/outputs/influxdb_v2/influxdb.go b/plugins/outputs/influxdb_v2/influxdb.go index 972773f79..4e2314691 100644 --- a/plugins/outputs/influxdb_v2/influxdb.go +++ b/plugins/outputs/influxdb_v2/influxdb.go @@ -96,8 +96,7 @@ type InfluxDB struct { UintSupport bool `toml:"influx_uint_support"` tls.ClientConfig - clients []Client - serializer *influx.Serializer + clients []Client } func (i *InfluxDB) Connect() error { @@ -107,11 +106,6 @@ func (i *InfluxDB) Connect() error { i.URLs = append(i.URLs, defaultURL) } - i.serializer = influx.NewSerializer() - if i.UintSupport { - i.serializer.SetFieldTypeSupport(influx.UintSupport) - } - for _, u := range i.URLs { parts, err := url.Parse(u) if err != nil { @@ -196,7 +190,7 @@ func (i *InfluxDB) getHTTPClient(ctx context.Context, url *url.URL, proxy *url.U UserAgent: i.UserAgent, ContentEncoding: i.ContentEncoding, TLSConfig: tlsConfig, - Serializer: i.serializer, + Serializer: i.newSerializer(), } c, err := NewHTTPClient(config) @@ -207,6 +201,15 @@ func (i *InfluxDB) getHTTPClient(ctx context.Context, url *url.URL, proxy *url.U return c, nil } +func (i *InfluxDB) newSerializer() *influx.Serializer { + serializer := influx.NewSerializer() + if i.UintSupport { + serializer.SetFieldTypeSupport(influx.UintSupport) + } + + return serializer +} + func init() { outputs.Add("influxdb_v2", func() telegraf.Output { return &InfluxDB{ From 4f4063ba01e03e9aa1fba547a9b682c1099d411b Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 13 Nov 2019 12:58:51 -0800 Subject: [PATCH 1316/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 94f88e5d1..c795e9870 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -46,6 +46,7 @@ - [#6484](https://github.com/influxdata/telegraf/issues/6484): Show correct default settings in mysql sample config. - [#6583](https://github.com/influxdata/telegraf/issues/6583): Use 1h or 3h rain values as appropriate in openweathermap input. - [#6573](https://github.com/influxdata/telegraf/issues/6573): Fix not a valid field error in Windows with nvidia input. +- [#6614](https://github.com/influxdata/telegraf/issues/6614): Fix influxdb output serialization on connection closed. ## v1.12.5 [2019-11-12] From 20bb673a0e7edb8c3725120a95a230e0a429346d Mon Sep 17 00:00:00 2001 From: Nick Neisen Date: Wed, 13 Nov 2019 14:00:41 -0700 Subject: [PATCH 1317/1815] Add a nameable file tag to file input plugin (#6650) --- plugins/inputs/file/README.md | 5 +++++ plugins/inputs/file/file.go | 13 +++++++++++-- plugins/inputs/file/file_test.go | 28 ++++++++++++++++++++++++++++ 3 files changed, 44 insertions(+), 2 deletions(-) diff --git a/plugins/inputs/file/README.md b/plugins/inputs/file/README.md index 4358b67ad..24139973b 100644 --- a/plugins/inputs/file/README.md +++ b/plugins/inputs/file/README.md @@ -7,6 +7,7 @@ Files will always be read in their entirety, if you wish to tail/follow a file use the [tail input plugin](/plugins/inputs/tail) instead. ### Configuration: + ```toml [[inputs.file]] ## Files to parse each interval. @@ -22,4 +23,8 @@ use the [tail input plugin](/plugins/inputs/tail) instead. ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "influx" + + ## Name a tag containing the name of the file the data was parsed from. Leave empty + ## to disable. + # file_tag = "" ``` diff --git a/plugins/inputs/file/file.go b/plugins/inputs/file/file.go index b93a7ba99..c601d4875 100644 --- a/plugins/inputs/file/file.go +++ b/plugins/inputs/file/file.go @@ -3,6 +3,7 @@ package file import ( "fmt" "io/ioutil" + "path/filepath" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal/globpath" @@ -11,8 +12,9 @@ import ( ) type File struct { - Files []string `toml:"files"` - parser parsers.Parser + Files []string `toml:"files"` + FileTag string `toml:"file_tag"` + parser parsers.Parser filenames []string } @@ -31,6 +33,10 @@ const sampleConfig = ` ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "influx" + + ## Name a tag containing the name of the file the data was parsed from. Leave empty + ## to disable. + # file_tag = "" ` // SampleConfig returns the default configuration of the Input @@ -54,6 +60,9 @@ func (f *File) Gather(acc telegraf.Accumulator) error { } for _, m := range metrics { + if f.FileTag != "" { + m.AddTag(f.FileTag, filepath.Base(k)) + } acc.AddFields(m.Name(), m.Fields(), m.Tags(), m.Time()) } } diff --git a/plugins/inputs/file/file_test.go b/plugins/inputs/file/file_test.go index 43322c2e8..19341fc08 100644 --- a/plugins/inputs/file/file_test.go +++ b/plugins/inputs/file/file_test.go @@ -21,6 +21,34 @@ func TestRefreshFilePaths(t *testing.T) { require.NoError(t, err) assert.Equal(t, 2, len(r.filenames)) } + +func TestFileTag(t *testing.T) { + acc := testutil.Accumulator{} + wd, err := os.Getwd() + require.NoError(t, err) + r := File{ + Files: []string{filepath.Join(wd, "dev/testfiles/json_a.log")}, + FileTag: "filename", + } + + parserConfig := parsers.Config{ + DataFormat: "json", + } + nParser, err := parsers.NewParser(&parserConfig) + assert.NoError(t, err) + r.parser = nParser + + err = r.Gather(&acc) + require.NoError(t, err) + + for _, m := range acc.Metrics { + for key, value := range m.Tags { + assert.Equal(t, r.FileTag, key) + assert.Equal(t, filepath.Base(r.Files[0]), value) + } + } +} + func TestJSONParserCompile(t *testing.T) { var acc testutil.Accumulator wd, _ := os.Getwd() From 9211ec633edfa8a9152ede7573b7eda742788cb1 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 13 Nov 2019 13:01:25 -0800 Subject: [PATCH 1318/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index c795e9870..ae7baf4b4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -40,6 +40,7 @@ - [#6356](https://github.com/influxdata/telegraf/pull/6356): Add bearer token defaults for Kubernetes plugins. - [#5870](https://github.com/influxdata/telegraf/pull/5870): Add support for SNMP over TCP. - [#6603](https://github.com/influxdata/telegraf/pull/6603): Add support for per output flush jitter. +- [#6650](https://github.com/influxdata/telegraf/pull/6650): Add a nameable file tag to file input plugin. #### Bugfixes From 48c271640c3a7f9fd65aec11907081a386d27f7a Mon Sep 17 00:00:00 2001 From: dbaltor <39310315+dbaltor@users.noreply.github.com> Date: Wed, 13 Nov 2019 21:37:05 +0000 Subject: [PATCH 1319/1815] Upgrade Azure/go-autorest to 13.0.0 (#6656) --- Gopkg.lock | 21 ++++++++++++--------- Gopkg.toml | 2 +- 2 files changed, 13 insertions(+), 10 deletions(-) diff --git a/Gopkg.lock b/Gopkg.lock index f9bba80b3..2671dd975 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -64,18 +64,21 @@ version = "0.2.0" [[projects]] - digest = "1:5923e22a060ab818a015593422f9e8a35b9d881d4cfcfed0669a82959b11c7ee" + digest = "1:e4a02906493a47ee87ef61aeea130ce6624da07349a6dc62494a4e72b550ca8e" name = "github.com/Azure/go-autorest" packages = [ "autorest", "autorest/adal", "autorest/azure", "autorest/azure/auth", + "autorest/azure/cli", "autorest/date", + "logger", + "tracing", ] pruneopts = "" - revision = "1f7cd6cfe0adea687ad44a512dfe76140f804318" - version = "v10.12.0" + revision = "3492b2aff5036c67228ab3c7dba3577c871db200" + version = "v13.3.0" [[projects]] branch = "master" @@ -301,12 +304,12 @@ version = "v3.2.0" [[projects]] - branch = "master" - digest = "1:654ac9799e7a8a586d8690bb2229a4f3408bbfe2c5494bf4dfe043053eeb5496" + digest = "1:459dfcae44c32c1a6831fb99c75b40e7139aa800a04f55f6e47fedb33ee4407d" name = "github.com/dimchansky/utfbom" packages = ["."] pruneopts = "" - revision = "6c6132ff69f0f6c088739067407b5d32c52e1d0f" + revision = "d2133a1ce379ef6fa992b0514a77146c60db9d1c" + version = "v1.1.0" [[projects]] digest = "1:522eff2a1f014a64fb403db60fc0110653e4dc5b59779894d208e697b0708ddc" @@ -852,12 +855,12 @@ version = "v1.0.10" [[projects]] - branch = "master" - digest = "1:99651e95333755cbe5c9768c1b80031300acca64a80870b40309202b32585a5a" + digest = "1:6dbb0eb72090871f2e58d1e37973fe3cb8c0f45f49459398d3fc740cb30e13bd" name = "github.com/mitchellh/go-homedir" packages = ["."] pruneopts = "" - revision = "3864e76763d94a6df2f9960b16a20a33da9f9a66" + revision = "af06845cf3004701891bf4fdb884bfe4920b3727" + version = "v1.1.0" [[projects]] branch = "master" diff --git a/Gopkg.toml b/Gopkg.toml index f5eeaabcc..e75e0d843 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -231,7 +231,7 @@ [[constraint]] name = "github.com/Azure/go-autorest" - version = "10.12.0" + version = "^13.0.0" [[constraint]] name = "github.com/Azure/azure-storage-queue-go" From 0c918b099b583803dce02421d8eeb5275407acab Mon Sep 17 00:00:00 2001 From: Nick Neisen Date: Wed, 13 Nov 2019 14:38:33 -0700 Subject: [PATCH 1320/1815] Add source and port tags to jenkins plugin (#6641) --- plugins/inputs/jenkins/README.md | 4 +++- plugins/inputs/jenkins/jenkins.go | 10 +++++++++- plugins/inputs/jenkins/jenkins_test.go | 1 + 3 files changed, 13 insertions(+), 2 deletions(-) diff --git a/plugins/inputs/jenkins/README.md b/plugins/inputs/jenkins/README.md index 80d6de0be..55dd4bb6b 100644 --- a/plugins/inputs/jenkins/README.md +++ b/plugins/inputs/jenkins/README.md @@ -8,7 +8,7 @@ This plugin does not require a plugin on jenkins and it makes use of Jenkins API ```toml [[inputs.jenkins]] - ## The Jenkins URL + ## The Jenkins URL in the format "schema://host:port" url = "http://my-jenkins-instance:8080" # username = "admin" # password = "admin" @@ -59,6 +59,7 @@ This plugin does not require a plugin on jenkins and it makes use of Jenkins API - temp_path - node_name - status ("online", "offline") + - source - fields: - disk_available - temp_available @@ -74,6 +75,7 @@ This plugin does not require a plugin on jenkins and it makes use of Jenkins API - name - parents - result + - source - fields: - duration - result_code (0 = SUCCESS, 1 = FAILURE, 2 = NOT_BUILD, 3 = UNSTABLE, 4 = ABORTED) diff --git a/plugins/inputs/jenkins/jenkins.go b/plugins/inputs/jenkins/jenkins.go index c80463589..528d99c77 100644 --- a/plugins/inputs/jenkins/jenkins.go +++ b/plugins/inputs/jenkins/jenkins.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "net/http" + "net/url" "strconv" "strings" "sync" @@ -43,7 +44,7 @@ type Jenkins struct { } const sampleConfig = ` - ## The Jenkins URL + ## The Jenkins URL in the format "schema://host:port" url = "http://my-jenkins-instance:8080" # username = "admin" # password = "admin" @@ -190,6 +191,13 @@ func (j *Jenkins) gatherNodeData(n node, acc telegraf.Accumulator) error { tags["status"] = "offline" } + u, err := url.Parse(j.URL) + if err != nil { + return err + } + tags["source"] = u.Hostname() + tags["port"] = u.Port() + fields := make(map[string]interface{}) fields["num_executors"] = n.NumExecutors diff --git a/plugins/inputs/jenkins/jenkins_test.go b/plugins/inputs/jenkins/jenkins_test.go index b8c713de0..dcbb5a46d 100644 --- a/plugins/inputs/jenkins/jenkins_test.go +++ b/plugins/inputs/jenkins/jenkins_test.go @@ -181,6 +181,7 @@ func TestGatherNodeData(t *testing.T) { "status": "online", "disk_path": "/path/1", "temp_path": "/path/2", + "source": "127.0.0.1", }, Fields: map[string]interface{}{ "response_time": int64(10032), From 122ec0fa396371a9d5adc25ab0d0d6121a569e99 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 13 Nov 2019 14:45:56 -0800 Subject: [PATCH 1321/1815] Build the nats input on freebsd when cgo is enabled (#6658) --- plugins/inputs/nats/nats.go | 6 ++---- plugins/inputs/nats/nats_freebsd.go | 2 +- plugins/inputs/nats/nats_test.go | 2 +- 3 files changed, 4 insertions(+), 6 deletions(-) diff --git a/plugins/inputs/nats/nats.go b/plugins/inputs/nats/nats.go index ba1cc803c..83e262ec8 100644 --- a/plugins/inputs/nats/nats.go +++ b/plugins/inputs/nats/nats.go @@ -1,20 +1,18 @@ -// +build !freebsd +// +build !freebsd freebsd,cgo package nats import ( + "encoding/json" "io/ioutil" "net/http" "net/url" "path" "time" - "encoding/json" - "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" - gnatsd "github.com/nats-io/gnatsd/server" ) diff --git a/plugins/inputs/nats/nats_freebsd.go b/plugins/inputs/nats/nats_freebsd.go index c23a6eec5..08d08ba76 100644 --- a/plugins/inputs/nats/nats_freebsd.go +++ b/plugins/inputs/nats/nats_freebsd.go @@ -1,3 +1,3 @@ -// +build freebsd +// +build freebsd,!cgo package nats diff --git a/plugins/inputs/nats/nats_test.go b/plugins/inputs/nats/nats_test.go index ef387f7e4..ece22288f 100644 --- a/plugins/inputs/nats/nats_test.go +++ b/plugins/inputs/nats/nats_test.go @@ -1,4 +1,4 @@ -// +build !freebsd +// +build !freebsd freebsd,cgo package nats From 7a90ddd1b83ded7359138b3d43cc4b31ca189bcf Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 15 Nov 2019 18:52:55 -0800 Subject: [PATCH 1322/1815] Log no metrics found at debug level in cloudwatch input (#6665) --- agent/accumulator.go | 2 +- plugins/inputs/cloudwatch/cloudwatch.go | 11 ++++++++--- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/agent/accumulator.go b/agent/accumulator.go index 6824249f6..21146e3e2 100644 --- a/agent/accumulator.go +++ b/agent/accumulator.go @@ -111,7 +111,7 @@ func (ac *accumulator) AddError(err error) { return } NErrors.Incr(1) - log.Printf("D! [%s] Error in plugin: %v", ac.maker.LogName(), err) + log.Printf("E! [%s] Error in plugin: %v", ac.maker.LogName(), err) } func (ac *accumulator) SetPrecision(precision time.Duration) { diff --git a/plugins/inputs/cloudwatch/cloudwatch.go b/plugins/inputs/cloudwatch/cloudwatch.go index 5af281cfc..be4ae3700 100644 --- a/plugins/inputs/cloudwatch/cloudwatch.go +++ b/plugins/inputs/cloudwatch/cloudwatch.go @@ -1,7 +1,6 @@ package cloudwatch import ( - "errors" "fmt" "net" "net/http" @@ -12,7 +11,6 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/cloudwatch" - "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/filter" "github.com/influxdata/telegraf/internal" @@ -44,6 +42,8 @@ type ( CacheTTL internal.Duration `toml:"cache_ttl"` RateLimit int `toml:"ratelimit"` + Log telegraf.Logger `toml:"-"` + client cloudwatchClient statFilter filter.Filter metricCache *metricCache @@ -197,6 +197,10 @@ func (c *CloudWatch) Gather(acc telegraf.Accumulator) error { return err } + if len(queries) == 0 { + return nil + } + // Limit concurrency or we can easily exhaust user connection limit. // See cloudwatch API request limits: // http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_limits.html @@ -481,7 +485,8 @@ func (c *CloudWatch) getDataQueries(filteredMetrics []filteredMetric) ([]*cloudw } if len(dataQueries) == 0 { - return nil, errors.New("no metrics found to collect") + c.Log.Debug("no metrics found to collect") + return nil, nil } if c.metricCache == nil { From a9ec5fc2094a1ab5cba9bf1e9fe077c58fe35671 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 15 Nov 2019 18:54:59 -0800 Subject: [PATCH 1323/1815] Update changelog --- CHANGELOG.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index ae7baf4b4..651ede059 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -49,6 +49,12 @@ - [#6573](https://github.com/influxdata/telegraf/issues/6573): Fix not a valid field error in Windows with nvidia input. - [#6614](https://github.com/influxdata/telegraf/issues/6614): Fix influxdb output serialization on connection closed. +## v1.12.6 [unreleased] + +#### Bugfixes + +- [#6666](https://github.com/influxdata/telegraf/issues/6666): Fix many plugin errors are logged at debug logging level. + ## v1.12.5 [2019-11-12] #### Bugfixes From 1700cfb1c741d303cce4fd9f9aa52c52f6a91ac8 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 18 Nov 2019 10:24:22 -0800 Subject: [PATCH 1324/1815] Use nanosecond precision in docker_log input (#6663) --- plugins/inputs/docker_log/docker_log.go | 1 + 1 file changed, 1 insertion(+) diff --git a/plugins/inputs/docker_log/docker_log.go b/plugins/inputs/docker_log/docker_log.go index 81268f5f5..7cb2d94be 100644 --- a/plugins/inputs/docker_log/docker_log.go +++ b/plugins/inputs/docker_log/docker_log.go @@ -203,6 +203,7 @@ func (d *DockerLogs) matchedContainerName(names []string) string { func (d *DockerLogs) Gather(acc telegraf.Accumulator) error { ctx := context.Background() + acc.SetPrecision(time.Nanosecond) ctx, cancel := context.WithTimeout(ctx, d.Timeout.Duration) defer cancel() From e8d6d445f464113cb1b7df47e8c9f509dc188e73 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 18 Nov 2019 10:25:15 -0800 Subject: [PATCH 1325/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 651ede059..409d16405 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -54,6 +54,7 @@ #### Bugfixes - [#6666](https://github.com/influxdata/telegraf/issues/6666): Fix many plugin errors are logged at debug logging level. +- [#6652](https://github.com/influxdata/telegraf/issues/6652): Use nanosecond precision in docker_log input. ## v1.12.5 [2019-11-12] From 9f05163c5321fe82378cb36bb742cf8d2a0811d3 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 18 Nov 2019 10:27:31 -0800 Subject: [PATCH 1326/1815] Fix interface option with method = native in ping input (#6667) --- plugins/inputs/ping/ping.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/inputs/ping/ping.go b/plugins/inputs/ping/ping.go index de3c5fe8f..195c9d2d7 100644 --- a/plugins/inputs/ping/ping.go +++ b/plugins/inputs/ping/ping.go @@ -118,7 +118,7 @@ func (*Ping) SampleConfig() string { } func (p *Ping) Gather(acc telegraf.Accumulator) error { - if p.Interface != "" && p.listenAddr != "" { + if p.Interface != "" && p.listenAddr == "" { p.listenAddr = getAddr(p.Interface) } From 169ba2ecc4a1f57a72222652a295d398471f98f0 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 18 Nov 2019 10:28:27 -0800 Subject: [PATCH 1327/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 409d16405..25b31f159 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -55,6 +55,7 @@ - [#6666](https://github.com/influxdata/telegraf/issues/6666): Fix many plugin errors are logged at debug logging level. - [#6652](https://github.com/influxdata/telegraf/issues/6652): Use nanosecond precision in docker_log input. +- [#6642](https://github.com/influxdata/telegraf/issues/6642): Fix interface option with method = native in ping input. ## v1.12.5 [2019-11-12] From bc8769ba2436ebed95e20cc1427064558d8d3f6b Mon Sep 17 00:00:00 2001 From: Lance O'Connor Date: Mon, 18 Nov 2019 12:38:34 -0800 Subject: [PATCH 1328/1815] Add Splunk MultiMetric support (#6640) --- internal/config/config.go | 13 ++ plugins/serializers/registry.go | 9 +- plugins/serializers/splunkmetric/README.md | 35 +++- .../serializers/splunkmetric/splunkmetric.go | 177 +++++++++++++----- .../splunkmetric/splunkmetric_test.go | 80 ++++++-- 5 files changed, 249 insertions(+), 65 deletions(-) diff --git a/internal/config/config.go b/internal/config/config.go index d45e52c66..3ef4cee58 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -1952,6 +1952,18 @@ func buildSerializer(name string, tbl *ast.Table) (serializers.Serializer, error } } + if node, ok := tbl.Fields["splunkmetric_multimetric"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if b, ok := kv.Value.(*ast.Boolean); ok { + var err error + c.SplunkmetricMultiMetric, err = b.Boolean() + if err != nil { + return nil, err + } + } + } + } + if node, ok := tbl.Fields["wavefront_source_override"]; ok { if kv, ok := node.(*ast.KeyValue); ok { if ary, ok := kv.Value.(*ast.Array); ok { @@ -1985,6 +1997,7 @@ func buildSerializer(name string, tbl *ast.Table) (serializers.Serializer, error delete(tbl.Fields, "template") delete(tbl.Fields, "json_timestamp_units") delete(tbl.Fields, "splunkmetric_hec_routing") + delete(tbl.Fields, "splunkmetric_multimetric") delete(tbl.Fields, "wavefront_source_override") delete(tbl.Fields, "wavefront_use_strict") return serializers.NewSerializer(c) diff --git a/plugins/serializers/registry.go b/plugins/serializers/registry.go index cfdb784cc..aae590f78 100644 --- a/plugins/serializers/registry.go +++ b/plugins/serializers/registry.go @@ -73,6 +73,9 @@ type Config struct { // Include HEC routing fields for splunkmetric output HecRouting bool + // Enable Splunk MultiMetric output (Splunk 8.0+) + SplunkmetricMultiMetric bool + // Point tags to use as the source name for Wavefront (if none found, host will be used). WavefrontSourceOverride []string @@ -93,7 +96,7 @@ func NewSerializer(config *Config) (Serializer, error) { case "json": serializer, err = NewJsonSerializer(config.TimestampUnits) case "splunkmetric": - serializer, err = NewSplunkmetricSerializer(config.HecRouting) + serializer, err = NewSplunkmetricSerializer(config.HecRouting, config.SplunkmetricMultiMetric) case "nowmetric": serializer, err = NewNowSerializer() case "carbon2": @@ -118,8 +121,8 @@ func NewCarbon2Serializer() (Serializer, error) { return carbon2.NewSerializer() } -func NewSplunkmetricSerializer(splunkmetric_hec_routing bool) (Serializer, error) { - return splunkmetric.NewSerializer(splunkmetric_hec_routing) +func NewSplunkmetricSerializer(splunkmetric_hec_routing bool, splunkmetric_multimetric bool) (Serializer, error) { + return splunkmetric.NewSerializer(splunkmetric_hec_routing, splunkmetric_multimetric) } func NewNowSerializer() (Serializer, error) { diff --git a/plugins/serializers/splunkmetric/README.md b/plugins/serializers/splunkmetric/README.md index 552b90ea4..47ad8e1bf 100644 --- a/plugins/serializers/splunkmetric/README.md +++ b/plugins/serializers/splunkmetric/README.md @@ -27,6 +27,36 @@ In the above snippet, the following keys are dimensions: * dc * user +## Using Multimetric output + +Starting with Splunk Enterprise and Splunk Cloud 8.0, you can now send multiple metric values in one payload. This means, for example, that +you can send all of your CPU stats in one JSON struct, an example event looks like: + +```javascript +{ + "time": 1572469920, + "event": "metric", + "host": "mono.local", + "fields": { + "_config_hecRouting": false, + "_config_multiMetric": true, + "class": "osx", + "cpu": "cpu0", + "metric_name:telegraf.cpu.usage_guest": 0, + "metric_name:telegraf.cpu.usage_guest_nice": 0, + "metric_name:telegraf.cpu.usage_idle": 65.1, + "metric_name:telegraf.cpu.usage_iowait": 0, + "metric_name:telegraf.cpu.usage_irq": 0, + "metric_name:telegraf.cpu.usage_nice": 0, + "metric_name:telegraf.cpu.usage_softirq": 0, + "metric_name:telegraf.cpu.usage_steal": 0, + "metric_name:telegraf.cpu.usage_system": 10.2, + "metric_name:telegraf.cpu.usage_user": 24.7, + } +} +``` +In order to enable this mode, there's a new option `splunkmetric_multimetric` that you set in the appropriate output module you plan on using. + ## Using with the HTTP output To send this data to a Splunk HEC, you can use the HTTP output, there are some custom headers that you need to add @@ -61,6 +91,7 @@ to manage the HEC authorization, here's a sample config for an HTTP output: data_format = "splunkmetric" ## Provides time, index, source overrides for the HEC splunkmetric_hec_routing = true + # splunkmentric_multimetric = true ## Additional HTTP headers [outputs.http.headers] @@ -118,7 +149,6 @@ disabled = false INDEXED_EXTRACTIONS = json KV_MODE = none TIMESTAMP_FIELDS = time -TIME_FORMAT = %s.%3N ``` An example configuration of a file based output is: @@ -134,5 +164,6 @@ An example configuration of a file based output is: ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md data_format = "splunkmetric" - hec_routing = false + splunkmetric_hec_routing = false + splunkmetric_multimetric = true ``` diff --git a/plugins/serializers/splunkmetric/splunkmetric.go b/plugins/serializers/splunkmetric/splunkmetric.go index cdcf6cc59..77c724aa8 100644 --- a/plugins/serializers/splunkmetric/splunkmetric.go +++ b/plugins/serializers/splunkmetric/splunkmetric.go @@ -9,12 +9,33 @@ import ( ) type serializer struct { - HecRouting bool + HecRouting bool + SplunkmetricMultiMetric bool } -func NewSerializer(splunkmetric_hec_routing bool) (*serializer, error) { +type CommonTags struct { + Time float64 + Host string + Index string + Source string + Fields map[string]interface{} +} + +type HECTimeSeries struct { + Time float64 `json:"time"` + Event string `json:"event"` + Host string `json:"host,omitempty"` + Index string `json:"index,omitempty"` + Source string `json:"source,omitempty"` + Fields map[string]interface{} `json:"fields"` +} + +// NewSerializer Setup our new serializer +func NewSerializer(splunkmetric_hec_routing bool, splunkmetric_multimetric bool) (*serializer, error) { + /* Define output params */ s := &serializer{ - HecRouting: splunkmetric_hec_routing, + HecRouting: splunkmetric_hec_routing, + SplunkmetricMultiMetric: splunkmetric_multimetric, } return s, nil } @@ -45,26 +66,61 @@ func (s *serializer) SerializeBatch(metrics []telegraf.Metric) ([]byte, error) { return serialized, nil } -func (s *serializer) createObject(metric telegraf.Metric) (metricGroup []byte, err error) { +func (s *serializer) createMulti(metric telegraf.Metric, dataGroup HECTimeSeries, commonTags CommonTags) (metricGroup []byte, err error) { + /* When splunkmetric_multimetric is true, then we can write out multiple name=value pairs as part of the same + ** event payload. This only works when the time, host, and dimensions are the same for every name=value pair + ** in the timeseries data. + ** + ** The format for multimetric data is 'metric_name:nameOfMetric = valueOfMetric' + */ + var metricJSON []byte - /* Splunk supports one metric json object, and does _not_ support an array of JSON objects. - ** Splunk has the following required names for the metric store: - ** metric_name: The name of the metric - ** _value: The value for the metric - ** time: The timestamp for the metric - ** All other index fields become dimensions. - */ - type HECTimeSeries struct { - Time float64 `json:"time"` - Event string `json:"event"` - Host string `json:"host,omitempty"` - Index string `json:"index,omitempty"` - Source string `json:"source,omitempty"` - Fields map[string]interface{} `json:"fields"` + // Set the event data from the commonTags above. + dataGroup.Event = "metric" + dataGroup.Time = commonTags.Time + dataGroup.Host = commonTags.Host + dataGroup.Index = commonTags.Index + dataGroup.Source = commonTags.Source + dataGroup.Fields = commonTags.Fields + + // Stuff the metrid data into the structure. + for _, field := range metric.FieldList() { + value, valid := verifyValue(field.Value) + + if !valid { + log.Printf("D! Can not parse value: %v for key: %v", field.Value, field.Key) + continue + } + + dataGroup.Fields["metric_name:"+metric.Name()+"."+field.Key] = value } - dataGroup := HECTimeSeries{} - var metricJson []byte + // Manage the rest of the event details based upon HEC routing rules + switch s.HecRouting { + case true: + // Output the data as a fields array and host,index,time,source overrides for the HEC. + metricJSON, err = json.Marshal(dataGroup) + default: + // Just output the data and the time, useful for file based outuputs + dataGroup.Fields["time"] = dataGroup.Time + metricJSON, err = json.Marshal(dataGroup.Fields) + } + if err != nil { + return nil, err + } + // Let the JSON fall through to the return below + metricGroup = metricJSON + + return metricGroup, nil +} + +func (s *serializer) createSingle(metric telegraf.Metric, dataGroup HECTimeSeries, commonTags CommonTags) (metricGroup []byte, err error) { + /* The default mode is to generate one JSON entitiy per metric (required for pre-8.0 Splunks) + ** + ** The format for single metric is 'nameOfMetric = valueOfMetric' + */ + + var metricJSON []byte for _, field := range metric.FieldList() { @@ -75,39 +131,30 @@ func (s *serializer) createObject(metric telegraf.Metric) (metricGroup []byte, e continue } - obj := map[string]interface{}{} - obj["metric_name"] = metric.Name() + "." + field.Key - obj["_value"] = value - dataGroup.Event = "metric" - // Convert ns to float seconds since epoch. - dataGroup.Time = float64(metric.Time().UnixNano()) / float64(1000000000) - dataGroup.Fields = obj - // Break tags out into key(n)=value(t) pairs - for n, t := range metric.Tags() { - if n == "host" { - dataGroup.Host = t - } else if n == "index" { - dataGroup.Index = t - } else if n == "source" { - dataGroup.Source = t - } else { - dataGroup.Fields[n] = t - } - } + dataGroup.Time = commonTags.Time + + // Apply the common tags from above to every record. + dataGroup.Host = commonTags.Host + dataGroup.Index = commonTags.Index + dataGroup.Source = commonTags.Source + dataGroup.Fields = commonTags.Fields + + dataGroup.Fields["metric_name"] = metric.Name() + "." + field.Key + dataGroup.Fields["_value"] = value switch s.HecRouting { case true: // Output the data as a fields array and host,index,time,source overrides for the HEC. - metricJson, err = json.Marshal(dataGroup) + metricJSON, err = json.Marshal(dataGroup) default: // Just output the data and the time, useful for file based outuputs dataGroup.Fields["time"] = dataGroup.Time - metricJson, err = json.Marshal(dataGroup.Fields) + metricJSON, err = json.Marshal(dataGroup.Fields) } - metricGroup = append(metricGroup, metricJson...) + metricGroup = append(metricGroup, metricJSON...) if err != nil { return nil, err @@ -117,6 +164,52 @@ func (s *serializer) createObject(metric telegraf.Metric) (metricGroup []byte, e return metricGroup, nil } +func (s *serializer) createObject(metric telegraf.Metric) (metricGroup []byte, err error) { + + /* Splunk supports one metric json object, and does _not_ support an array of JSON objects. + ** Splunk has the following required names for the metric store: + ** metric_name: The name of the metric + ** _value: The value for the metric + ** time: The timestamp for the metric + ** All other index fields become dimensions. + */ + + dataGroup := HECTimeSeries{} + + // The tags are common to all events in this timeseries + commonTags := CommonTags{} + + commonObj := map[string]interface{}{} + + commonObj["config:hecRouting"] = s.HecRouting + commonObj["config:multiMetric"] = s.SplunkmetricMultiMetric + + commonTags.Fields = commonObj + + // Break tags out into key(n)=value(t) pairs + for n, t := range metric.Tags() { + if n == "host" { + commonTags.Host = t + } else if n == "index" { + commonTags.Index = t + } else if n == "source" { + commonTags.Source = t + } else { + commonTags.Fields[n] = t + } + } + commonTags.Time = float64(metric.Time().UnixNano()) / float64(1000000000) + switch s.SplunkmetricMultiMetric { + case true: + metricGroup, _ = s.createMulti(metric, dataGroup, commonTags) + default: + metricGroup, _ = s.createSingle(metric, dataGroup, commonTags) + } + + // Return the metric group regardless of if it's multimetric or single metric. + return metricGroup, nil +} + func verifyValue(v interface{}) (value interface{}, valid bool) { switch v.(type) { case string: diff --git a/plugins/serializers/splunkmetric/splunkmetric_test.go b/plugins/serializers/splunkmetric/splunkmetric_test.go index 04f6e6538..70037e28a 100644 --- a/plugins/serializers/splunkmetric/splunkmetric_test.go +++ b/plugins/serializers/splunkmetric/splunkmetric_test.go @@ -29,11 +29,11 @@ func TestSerializeMetricFloat(t *testing.T) { m, err := metric.New("cpu", tags, fields, now) assert.NoError(t, err) - s, _ := NewSerializer(false) + s, _ := NewSerializer(false, false) var buf []byte buf, err = s.Serialize(m) assert.NoError(t, err) - expS := `{"_value":91.5,"cpu":"cpu0","metric_name":"cpu.usage_idle","time":1529875740.819}` + expS := `{"_value":91.5,"config:hecRouting":false,"config:multiMetric":false,"cpu":"cpu0","metric_name":"cpu.usage_idle","time":1529875740.819}` assert.Equal(t, string(expS), string(buf)) } @@ -49,11 +49,11 @@ func TestSerializeMetricFloatHec(t *testing.T) { m, err := metric.New("cpu", tags, fields, now) assert.NoError(t, err) - s, _ := NewSerializer(true) + s, _ := NewSerializer(true, false) var buf []byte buf, err = s.Serialize(m) assert.NoError(t, err) - expS := `{"time":1529875740.819,"event":"metric","fields":{"_value":91.5,"cpu":"cpu0","metric_name":"cpu.usage_idle"}}` + expS := `{"time":1529875740.819,"event":"metric","fields":{"_value":91.5,"config:hecRouting":true,"config:multiMetric":false,"cpu":"cpu0","metric_name":"cpu.usage_idle"}}` assert.Equal(t, string(expS), string(buf)) } @@ -68,12 +68,12 @@ func TestSerializeMetricInt(t *testing.T) { m, err := metric.New("cpu", tags, fields, now) assert.NoError(t, err) - s, _ := NewSerializer(false) + s, _ := NewSerializer(false, false) var buf []byte buf, err = s.Serialize(m) assert.NoError(t, err) - expS := `{"_value":90,"cpu":"cpu0","metric_name":"cpu.usage_idle","time":0}` + expS := `{"_value":90,"config:hecRouting":false,"config:multiMetric":false,"cpu":"cpu0","metric_name":"cpu.usage_idle","time":0}` assert.Equal(t, string(expS), string(buf)) } @@ -88,12 +88,12 @@ func TestSerializeMetricIntHec(t *testing.T) { m, err := metric.New("cpu", tags, fields, now) assert.NoError(t, err) - s, _ := NewSerializer(true) + s, _ := NewSerializer(true, false) var buf []byte buf, err = s.Serialize(m) assert.NoError(t, err) - expS := `{"time":0,"event":"metric","fields":{"_value":90,"cpu":"cpu0","metric_name":"cpu.usage_idle"}}` + expS := `{"time":0,"event":"metric","fields":{"_value":90,"config:hecRouting":true,"config:multiMetric":false,"cpu":"cpu0","metric_name":"cpu.usage_idle"}}` assert.Equal(t, string(expS), string(buf)) } @@ -108,12 +108,12 @@ func TestSerializeMetricBool(t *testing.T) { m, err := metric.New("docker", tags, fields, now) assert.NoError(t, err) - s, _ := NewSerializer(false) + s, _ := NewSerializer(false, false) var buf []byte buf, err = s.Serialize(m) assert.NoError(t, err) - expS := `{"_value":1,"container-name":"telegraf-test","metric_name":"docker.oomkiller","time":0}` + expS := `{"_value":1,"config:hecRouting":false,"config:multiMetric":false,"container-name":"telegraf-test","metric_name":"docker.oomkiller","time":0}` assert.Equal(t, string(expS), string(buf)) } @@ -128,12 +128,12 @@ func TestSerializeMetricBoolHec(t *testing.T) { m, err := metric.New("docker", tags, fields, now) assert.NoError(t, err) - s, _ := NewSerializer(true) + s, _ := NewSerializer(true, false) var buf []byte buf, err = s.Serialize(m) assert.NoError(t, err) - expS := `{"time":0,"event":"metric","fields":{"_value":0,"container-name":"telegraf-test","metric_name":"docker.oomkiller"}}` + expS := `{"time":0,"event":"metric","fields":{"_value":0,"config:hecRouting":true,"config:multiMetric":false,"container-name":"telegraf-test","metric_name":"docker.oomkiller"}}` assert.Equal(t, string(expS), string(buf)) } @@ -149,12 +149,12 @@ func TestSerializeMetricString(t *testing.T) { m, err := metric.New("cpu", tags, fields, now) assert.NoError(t, err) - s, _ := NewSerializer(false) + s, _ := NewSerializer(false, false) var buf []byte buf, err = s.Serialize(m) assert.NoError(t, err) - expS := `{"_value":5,"cpu":"cpu0","metric_name":"cpu.usage_idle","time":0}` + expS := `{"_value":5,"config:hecRouting":false,"config:multiMetric":false,"cpu":"cpu0","metric_name":"cpu.usage_idle","time":0}` assert.Equal(t, string(expS), string(buf)) assert.NoError(t, err) } @@ -182,11 +182,33 @@ func TestSerializeBatch(t *testing.T) { ) metrics := []telegraf.Metric{m, n} - s, _ := NewSerializer(false) + s, _ := NewSerializer(false, false) buf, err := s.SerializeBatch(metrics) assert.NoError(t, err) - expS := `{"_value":42,"metric_name":"cpu.value","time":0}` + `{"_value":92,"metric_name":"cpu.value","time":0}` + expS := `{"_value":42,"config:hecRouting":false,"config:multiMetric":false,"metric_name":"cpu.value","time":0}{"_value":92,"config:hecRouting":false,"config:multiMetric":false,"metric_name":"cpu.value","time":0}` + assert.Equal(t, string(expS), string(buf)) +} + +func TestSerializeMulti(t *testing.T) { + m := MustMetric( + metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "user": 42.0, + "system": 8.0, + }, + time.Unix(0, 0), + ), + ) + + metrics := []telegraf.Metric{m} + s, _ := NewSerializer(false, true) + buf, err := s.SerializeBatch(metrics) + assert.NoError(t, err) + + expS := `{"config:hecRouting":false,"config:multiMetric":true,"metric_name:cpu.system":8,"metric_name:cpu.user":42,"time":0}` assert.Equal(t, string(expS), string(buf)) } @@ -213,10 +235,32 @@ func TestSerializeBatchHec(t *testing.T) { ) metrics := []telegraf.Metric{m, n} - s, _ := NewSerializer(true) + s, _ := NewSerializer(true, false) buf, err := s.SerializeBatch(metrics) assert.NoError(t, err) - expS := `{"time":0,"event":"metric","fields":{"_value":42,"metric_name":"cpu.value"}}` + `{"time":0,"event":"metric","fields":{"_value":92,"metric_name":"cpu.value"}}` + expS := `{"time":0,"event":"metric","fields":{"_value":42,"config:hecRouting":true,"config:multiMetric":false,"metric_name":"cpu.value"}}{"time":0,"event":"metric","fields":{"_value":92,"config:hecRouting":true,"config:multiMetric":false,"metric_name":"cpu.value"}}` + assert.Equal(t, string(expS), string(buf)) +} + +func TestSerializeMultiHec(t *testing.T) { + m := MustMetric( + metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "usage": 42.0, + "system": 8.0, + }, + time.Unix(0, 0), + ), + ) + + metrics := []telegraf.Metric{m} + s, _ := NewSerializer(true, true) + buf, err := s.SerializeBatch(metrics) + assert.NoError(t, err) + + expS := `{"time":0,"event":"metric","fields":{"config:hecRouting":true,"config:multiMetric":true,"metric_name:cpu.system":8,"metric_name:cpu.usage":42}}` assert.Equal(t, string(expS), string(buf)) } From 7ff6ec19630d4c0e9da61ba3e6a048797b37bf99 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 18 Nov 2019 12:39:43 -0800 Subject: [PATCH 1329/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 25b31f159..d0d4137a9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -41,6 +41,7 @@ - [#5870](https://github.com/influxdata/telegraf/pull/5870): Add support for SNMP over TCP. - [#6603](https://github.com/influxdata/telegraf/pull/6603): Add support for per output flush jitter. - [#6650](https://github.com/influxdata/telegraf/pull/6650): Add a nameable file tag to file input plugin. +- [#6640](https://github.com/influxdata/telegraf/pull/6640): Add Splunk MultiMetric support. #### Bugfixes From a66b6729e90b870dfa032e004eb83ce5de19bcde Mon Sep 17 00:00:00 2001 From: alan7yg Date: Wed, 20 Nov 2019 02:52:48 +0800 Subject: [PATCH 1330/1815] Fix panic in mongodb input if ShardStats is nil (#6680) --- plugins/inputs/mongodb/mongostat.go | 30 +++++++++++++++-------------- 1 file changed, 16 insertions(+), 14 deletions(-) diff --git a/plugins/inputs/mongodb/mongostat.go b/plugins/inputs/mongodb/mongostat.go index d75ff9fb0..8021775ea 100644 --- a/plugins/inputs/mongodb/mongostat.go +++ b/plugins/inputs/mongodb/mongostat.go @@ -983,21 +983,23 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec } // Set shard stats - newShardStats := *newMongo.ShardStats - returnVal.TotalInUse = newShardStats.TotalInUse - returnVal.TotalAvailable = newShardStats.TotalAvailable - returnVal.TotalCreated = newShardStats.TotalCreated - returnVal.TotalRefreshing = newShardStats.TotalRefreshing - returnVal.ShardHostStatsLines = map[string]ShardHostStatLine{} - for host, stats := range newShardStats.Hosts { - shardStatLine := &ShardHostStatLine{ - InUse: stats.InUse, - Available: stats.Available, - Created: stats.Created, - Refreshing: stats.Refreshing, - } + if newMongo.ShardStats != nil { + newShardStats := *newMongo.ShardStats + returnVal.TotalInUse = newShardStats.TotalInUse + returnVal.TotalAvailable = newShardStats.TotalAvailable + returnVal.TotalCreated = newShardStats.TotalCreated + returnVal.TotalRefreshing = newShardStats.TotalRefreshing + returnVal.ShardHostStatsLines = map[string]ShardHostStatLine{} + for host, stats := range newShardStats.Hosts { + shardStatLine := &ShardHostStatLine{ + InUse: stats.InUse, + Available: stats.Available, + Created: stats.Created, + Refreshing: stats.Refreshing, + } - returnVal.ShardHostStatsLines[host] = *shardStatLine + returnVal.ShardHostStatsLines[host] = *shardStatLine + } } return returnVal From 16decd5f50c7c57291ebecc2d17efcddb6301d2a Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 19 Nov 2019 12:28:07 -0800 Subject: [PATCH 1331/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index d0d4137a9..b1947a289 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -57,6 +57,7 @@ - [#6666](https://github.com/influxdata/telegraf/issues/6666): Fix many plugin errors are logged at debug logging level. - [#6652](https://github.com/influxdata/telegraf/issues/6652): Use nanosecond precision in docker_log input. - [#6642](https://github.com/influxdata/telegraf/issues/6642): Fix interface option with method = native in ping input. +- [#6680](https://github.com/influxdata/telegraf/pull/6680): Fix panic in mongodb input if shard connection pool stats are unreadable. (#6680) ## v1.12.5 [2019-11-12] From 70ff63060a6197580e358d568c39d4745d23e162 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 19 Nov 2019 12:32:03 -0800 Subject: [PATCH 1332/1815] Set 1.12.6 release date --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b1947a289..fe3ae4c5c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -50,7 +50,7 @@ - [#6573](https://github.com/influxdata/telegraf/issues/6573): Fix not a valid field error in Windows with nvidia input. - [#6614](https://github.com/influxdata/telegraf/issues/6614): Fix influxdb output serialization on connection closed. -## v1.12.6 [unreleased] +## v1.12.6 [2019-11-19] #### Bugfixes From 8e0eb5a7db305d2e8adb37383e9cd735d81227e4 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 20 Nov 2019 12:20:48 -0800 Subject: [PATCH 1333/1815] Add support for sending HTTP Basic Auth in influxdb input (#6668) --- plugins/inputs/influxdb/README.md | 4 ++ plugins/inputs/influxdb/influxdb.go | 70 ++++++++++++++++++++++-- plugins/inputs/influxdb/influxdb_test.go | 26 +++++++++ 3 files changed, 95 insertions(+), 5 deletions(-) diff --git a/plugins/inputs/influxdb/README.md b/plugins/inputs/influxdb/README.md index 2bab123f8..711503245 100644 --- a/plugins/inputs/influxdb/README.md +++ b/plugins/inputs/influxdb/README.md @@ -20,6 +20,10 @@ InfluxDB-formatted endpoints. See below for more information. "http://localhost:8086/debug/vars" ] + ## Username and password to send using HTTP Basic Authentication. + # username = "" + # password = "" + ## Optional TLS Config # tls_ca = "/etc/telegraf/ca.pem" # tls_cert = "/etc/telegraf/cert.pem" diff --git a/plugins/inputs/influxdb/influxdb.go b/plugins/inputs/influxdb/influxdb.go index 0bb3ead5e..96389a013 100644 --- a/plugins/inputs/influxdb/influxdb.go +++ b/plugins/inputs/influxdb/influxdb.go @@ -1,9 +1,10 @@ package influxdb import ( + "bytes" "encoding/json" "errors" - "fmt" + "io" "net/http" "sync" "time" @@ -14,9 +15,28 @@ import ( "github.com/influxdata/telegraf/plugins/inputs" ) +const ( + maxErrorResponseBodyLength = 1024 +) + +type APIError struct { + StatusCode int + Reason string + Description string `json:"error"` +} + +func (e *APIError) Error() string { + if e.Description != "" { + return e.Reason + ": " + e.Description + } + return e.Reason +} + type InfluxDB struct { - URLs []string `toml:"urls"` - Timeout internal.Duration + URLs []string `toml:"urls"` + Username string `toml:"username"` + Password string `toml:"password"` + Timeout internal.Duration `toml:"timeout"` tls.ClientConfig client *http.Client @@ -38,6 +58,10 @@ func (*InfluxDB) SampleConfig() string { "http://localhost:8086/debug/vars" ] + ## Username and password to send using HTTP Basic Authentication. + # username = "" + # password = "" + ## Optional TLS Config # tls_ca = "/etc/telegraf/ca.pem" # tls_cert = "/etc/telegraf/cert.pem" @@ -75,7 +99,7 @@ func (i *InfluxDB) Gather(acc telegraf.Accumulator) error { go func(url string) { defer wg.Done() if err := i.gatherURL(acc, url); err != nil { - acc.AddError(fmt.Errorf("[url=%s]: %s", url, err)) + acc.AddError(err) } }(u) } @@ -135,12 +159,27 @@ func (i *InfluxDB) gatherURL( shardCounter := 0 now := time.Now() - resp, err := i.client.Get(url) + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return err + } + + if i.Username != "" || i.Password != "" { + req.SetBasicAuth(i.Username, i.Password) + } + + req.Header.Set("User-Agent", "Telegraf/"+internal.Version()) + + resp, err := i.client.Do(req) if err != nil { return err } defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return readResponseError(resp) + } + // It would be nice to be able to decode into a map[string]point, but // we'll get a decoder error like: // `json: cannot unmarshal array into Go value of type influxdb.point` @@ -255,6 +294,27 @@ func (i *InfluxDB) gatherURL( return nil } +func readResponseError(resp *http.Response) error { + apiError := &APIError{ + StatusCode: resp.StatusCode, + Reason: resp.Status, + } + + var buf bytes.Buffer + r := io.LimitReader(resp.Body, maxErrorResponseBodyLength) + _, err := buf.ReadFrom(r) + if err != nil { + return apiError + } + + err = json.Unmarshal(buf.Bytes(), apiError) + if err != nil { + return apiError + } + + return apiError +} + func init() { inputs.Add("influxdb", func() telegraf.Input { return &InfluxDB{ diff --git a/plugins/inputs/influxdb/influxdb_test.go b/plugins/inputs/influxdb/influxdb_test.go index f24ecc24c..9225c45b0 100644 --- a/plugins/inputs/influxdb/influxdb_test.go +++ b/plugins/inputs/influxdb/influxdb_test.go @@ -1,6 +1,7 @@ package influxdb_test import ( + "fmt" "net/http" "net/http/httptest" "testing" @@ -178,6 +179,31 @@ func TestErrorHandling404(t *testing.T) { require.Error(t, acc.GatherError(plugin.Gather)) } +func TestErrorResponse(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusUnauthorized) + w.Write([]byte(`{"error": "unable to parse authentication credentials"}`)) + })) + defer ts.Close() + + plugin := &influxdb.InfluxDB{ + URLs: []string{ts.URL}, + } + + var acc testutil.Accumulator + err := plugin.Gather(&acc) + require.NoError(t, err) + + expected := []error{ + &influxdb.APIError{ + StatusCode: http.StatusUnauthorized, + Reason: fmt.Sprintf("%d %s", http.StatusUnauthorized, http.StatusText(http.StatusUnauthorized)), + Description: "unable to parse authentication credentials", + }, + } + require.Equal(t, expected, acc.Errors) +} + const basicJSON = ` { "_1": { From c12a4030429b00f234c3828b4d2c89f7caf24566 Mon Sep 17 00:00:00 2001 From: David Reimschussel Date: Wed, 20 Nov 2019 13:33:11 -0700 Subject: [PATCH 1334/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index fe3ae4c5c..42c62f038 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -42,6 +42,7 @@ - [#6603](https://github.com/influxdata/telegraf/pull/6603): Add support for per output flush jitter. - [#6650](https://github.com/influxdata/telegraf/pull/6650): Add a nameable file tag to file input plugin. - [#6640](https://github.com/influxdata/telegraf/pull/6640): Add Splunk MultiMetric support. +- [#6680](https://github.com/influxdata/telegraf/pull/6668): Add support for sending HTTP Basic Auth in influxdb input #### Bugfixes From 12ecdaba5b5b57b10ccf92df03ec889e2a1e3ca3 Mon Sep 17 00:00:00 2001 From: Vishwanath Date: Wed, 20 Nov 2019 20:53:57 -0800 Subject: [PATCH 1335/1815] Add prometheus metric_version = 2 and url tag configurable (#5767) --- plugins/inputs/prometheus/README.md | 18 +++ plugins/inputs/prometheus/parser.go | 158 +++++++++++++++++++ plugins/inputs/prometheus/prometheus.go | 21 ++- plugins/inputs/prometheus/prometheus_test.go | 67 +++++++- testutil/accumulator.go | 12 ++ 5 files changed, 272 insertions(+), 4 deletions(-) diff --git a/plugins/inputs/prometheus/README.md b/plugins/inputs/prometheus/README.md index edc8a27d6..4163e068e 100644 --- a/plugins/inputs/prometheus/README.md +++ b/plugins/inputs/prometheus/README.md @@ -11,6 +11,9 @@ in Prometheus format. ## An array of urls to scrape metrics from. urls = ["http://localhost:9100/metrics"] + ## Metric version (optional, default=1, supported values are 1 and 2) + # metric_version = 2 + ## An array of Kubernetes services to scrape metrics from. # kubernetes_services = ["http://my-service-dns.my-namespace:9100/metrics"] @@ -140,3 +143,18 @@ cpu_usage_user,cpu=cpu1,url=http://example.org:9273/metrics gauge=5.829145728641 cpu_usage_user,cpu=cpu2,url=http://example.org:9273/metrics gauge=2.119071644805144 1505776751000000000 cpu_usage_user,cpu=cpu3,url=http://example.org:9273/metrics gauge=1.5228426395944945 1505776751000000000 ``` + +**Output (when metric_version = 2)** +``` +prometheus,quantile=1,url=http://example.org:9273/metrics go_gc_duration_seconds=0.005574303 1556075100000000000 +prometheus,quantile=0.75,url=http://example.org:9273/metrics go_gc_duration_seconds=0.0001046 1556075100000000000 +prometheus,quantile=0.5,url=http://example.org:9273/metrics go_gc_duration_seconds=0.0000719 1556075100000000000 +prometheus,quantile=0.25,url=http://example.org:9273/metrics go_gc_duration_seconds=0.0000579 1556075100000000000 +prometheus,quantile=0,url=http://example.org:9273/metrics go_gc_duration_seconds=0.0000349 1556075100000000000 +prometheus,url=http://example.org:9273/metrics go_gc_duration_seconds_count=324,go_gc_duration_seconds_sum=0.091340353 1556075100000000000 +prometheus,url=http://example.org:9273/metrics go_goroutines=15 1556075100000000000 +prometheus,cpu=cpu0,url=http://example.org:9273/metrics cpu_usage_user=1.513622603430151 1505776751000000000 +prometheus,cpu=cpu1,url=http://example.org:9273/metrics cpu_usage_user=5.829145728641773 1505776751000000000 +prometheus,cpu=cpu2,url=http://example.org:9273/metrics cpu_usage_user=2.119071644805144 1505776751000000000 +prometheus,cpu=cpu3,url=http://example.org:9273/metrics cpu_usage_user=1.5228426395944945 1505776751000000000 +``` diff --git a/plugins/inputs/prometheus/parser.go b/plugins/inputs/prometheus/parser.go index 6584fbc05..9e79249ec 100644 --- a/plugins/inputs/prometheus/parser.go +++ b/plugins/inputs/prometheus/parser.go @@ -21,6 +21,145 @@ import ( "github.com/prometheus/common/expfmt" ) +// Parse returns a slice of Metrics from a text representation of a +// metrics +func ParseV2(buf []byte, header http.Header) ([]telegraf.Metric, error) { + var metrics []telegraf.Metric + var parser expfmt.TextParser + // parse even if the buffer begins with a newline + buf = bytes.TrimPrefix(buf, []byte("\n")) + // Read raw data + buffer := bytes.NewBuffer(buf) + reader := bufio.NewReader(buffer) + + mediatype, params, err := mime.ParseMediaType(header.Get("Content-Type")) + // Prepare output + metricFamilies := make(map[string]*dto.MetricFamily) + + if err == nil && mediatype == "application/vnd.google.protobuf" && + params["encoding"] == "delimited" && + params["proto"] == "io.prometheus.client.MetricFamily" { + for { + mf := &dto.MetricFamily{} + if _, ierr := pbutil.ReadDelimited(reader, mf); ierr != nil { + if ierr == io.EOF { + break + } + return nil, fmt.Errorf("reading metric family protocol buffer failed: %s", ierr) + } + metricFamilies[mf.GetName()] = mf + } + } else { + metricFamilies, err = parser.TextToMetricFamilies(reader) + if err != nil { + return nil, fmt.Errorf("reading text format failed: %s", err) + } + } + + // read metrics + for metricName, mf := range metricFamilies { + for _, m := range mf.Metric { + // reading tags + tags := makeLabels(m) + + if mf.GetType() == dto.MetricType_SUMMARY { + // summary metric + telegrafMetrics := makeQuantilesV2(m, tags, metricName, mf.GetType()) + metrics = append(metrics, telegrafMetrics...) + } else if mf.GetType() == dto.MetricType_HISTOGRAM { + // histogram metric + telegrafMetrics := makeBucketsV2(m, tags, metricName, mf.GetType()) + metrics = append(metrics, telegrafMetrics...) + } else { + // standard metric + // reading fields + fields := make(map[string]interface{}) + fields = getNameAndValueV2(m, metricName) + // converting to telegraf metric + if len(fields) > 0 { + var t time.Time + if m.TimestampMs != nil && *m.TimestampMs > 0 { + t = time.Unix(0, *m.TimestampMs*1000000) + } else { + t = time.Now() + } + metric, err := metric.New("prometheus", tags, fields, t, valueType(mf.GetType())) + if err == nil { + metrics = append(metrics, metric) + } + } + } + } + } + + return metrics, err +} + +// Get Quantiles for summary metric & Buckets for histogram +func makeQuantilesV2(m *dto.Metric, tags map[string]string, metricName string, metricType dto.MetricType) []telegraf.Metric { + var metrics []telegraf.Metric + fields := make(map[string]interface{}) + var t time.Time + if m.TimestampMs != nil && *m.TimestampMs > 0 { + t = time.Unix(0, *m.TimestampMs*1000000) + } else { + t = time.Now() + } + fields[metricName+"_count"] = float64(m.GetSummary().GetSampleCount()) + fields[metricName+"_sum"] = float64(m.GetSummary().GetSampleSum()) + met, err := metric.New("prometheus", tags, fields, t, valueType(metricType)) + if err == nil { + metrics = append(metrics, met) + } + + for _, q := range m.GetSummary().Quantile { + newTags := tags + fields = make(map[string]interface{}) + if !math.IsNaN(q.GetValue()) { + newTags["quantile"] = fmt.Sprint(q.GetQuantile()) + fields[metricName] = float64(q.GetValue()) + + quantileMetric, err := metric.New("prometheus", newTags, fields, t, valueType(metricType)) + if err == nil { + metrics = append(metrics, quantileMetric) + } + } + } + return metrics +} + +// Get Buckets from histogram metric +func makeBucketsV2(m *dto.Metric, tags map[string]string, metricName string, metricType dto.MetricType) []telegraf.Metric { + var metrics []telegraf.Metric + fields := make(map[string]interface{}) + var t time.Time + if m.TimestampMs != nil && *m.TimestampMs > 0 { + t = time.Unix(0, *m.TimestampMs*1000000) + } else { + t = time.Now() + } + fields[metricName+"_count"] = float64(m.GetHistogram().GetSampleCount()) + fields[metricName+"_sum"] = float64(m.GetHistogram().GetSampleSum()) + + met, err := metric.New("prometheus", tags, fields, t, valueType(metricType)) + if err == nil { + metrics = append(metrics, met) + } + + for _, b := range m.GetHistogram().Bucket { + newTags := tags + fields = make(map[string]interface{}) + newTags["le"] = fmt.Sprint(b.GetUpperBound()) + fields[metricName+"_bucket"] = float64(b.GetCumulativeCount()) + + histogramMetric, err := metric.New("prometheus", newTags, fields, t, valueType(metricType)) + if err == nil { + metrics = append(metrics, histogramMetric) + } + } + return metrics +} + // Parse returns a slice of Metrics from a text representation of a // metrics func Parse(buf []byte, header http.Header) ([]telegraf.Metric, error) { @@ -159,3 +298,22 @@ func getNameAndValue(m *dto.Metric) map[string]interface{} { } return fields } + +// Get name and value from metric +func getNameAndValueV2(m *dto.Metric, metricName string) map[string]interface{} { + fields := make(map[string]interface{}) + if m.Gauge != nil { + if !math.IsNaN(m.GetGauge().GetValue()) { + fields[metricName] = float64(m.GetGauge().GetValue()) + } + } else if m.Counter != nil { + if !math.IsNaN(m.GetCounter().GetValue()) { + fields[metricName] = float64(m.GetCounter().GetValue()) + } + } else if m.Untyped != nil { + if !math.IsNaN(m.GetUntyped().GetValue()) { + fields[metricName] = float64(m.GetUntyped().GetValue()) + } + } + return fields +} diff --git a/plugins/inputs/prometheus/prometheus.go b/plugins/inputs/prometheus/prometheus.go index aeeec9265..c59d92021 100644 --- a/plugins/inputs/prometheus/prometheus.go +++ b/plugins/inputs/prometheus/prometheus.go @@ -39,6 +39,10 @@ type Prometheus struct { ResponseTimeout internal.Duration `toml:"response_timeout"` + MetricVersion int `toml:"metric_version"` + + URLTag string `toml:"url_tag"` + tls.ClientConfig Log telegraf.Logger @@ -58,6 +62,12 @@ var sampleConfig = ` ## An array of urls to scrape metrics from. urls = ["http://localhost:9100/metrics"] + ## Metric version (optional, default=1, supported values are 1 and 2) + # metric_version = 2 + + ## Url tag name (tag containing scrapped url. optional, default is "url") + # url_tag = "scrapeUrl" + ## An array of Kubernetes services to scrape metrics from. # kubernetes_services = ["http://my-service-dns.my-namespace:9100/metrics"] @@ -224,6 +234,7 @@ func (p *Prometheus) gatherURL(u URLAndAddress, acc telegraf.Accumulator) error var req *http.Request var err error var uClient *http.Client + var metrics []telegraf.Metric if u.URL.Scheme == "unix" { path := u.URL.Query().Get("path") if path == "" { @@ -285,7 +296,12 @@ func (p *Prometheus) gatherURL(u URLAndAddress, acc telegraf.Accumulator) error return fmt.Errorf("error reading body: %s", err) } - metrics, err := Parse(body, resp.Header) + if p.MetricVersion == 2 { + metrics, err = ParseV2(body, resp.Header) + } else { + metrics, err = Parse(body, resp.Header) + } + if err != nil { return fmt.Errorf("error reading metrics for %s: %s", u.URL, err) @@ -295,7 +311,7 @@ func (p *Prometheus) gatherURL(u URLAndAddress, acc telegraf.Accumulator) error tags := metric.Tags() // strip user and password from URL u.OriginalURL.User = nil - tags["url"] = u.OriginalURL.String() + tags[p.URLTag] = u.OriginalURL.String() if u.Address != "" { tags["address"] = u.Address } @@ -342,6 +358,7 @@ func init() { return &Prometheus{ ResponseTimeout: internal.Duration{Duration: time.Second * 3}, kubernetesPods: map[string]URLAndAddress{}, + URLTag: "url", } }) } diff --git a/plugins/inputs/prometheus/prometheus_test.go b/plugins/inputs/prometheus/prometheus_test.go index f5a05b890..78629d3d7 100644 --- a/plugins/inputs/prometheus/prometheus_test.go +++ b/plugins/inputs/prometheus/prometheus_test.go @@ -29,6 +29,21 @@ go_goroutines 15 # TYPE test_metric untyped test_metric{label="value"} 1.0 1490802350000 ` +const sampleSummaryTextFormat = `# HELP go_gc_duration_seconds A summary of the GC invocation durations. +# TYPE go_gc_duration_seconds summary +go_gc_duration_seconds{quantile="0"} 0.00010425500000000001 +go_gc_duration_seconds{quantile="0.25"} 0.000139108 +go_gc_duration_seconds{quantile="0.5"} 0.00015749400000000002 +go_gc_duration_seconds{quantile="0.75"} 0.000331463 +go_gc_duration_seconds{quantile="1"} 0.000667154 +go_gc_duration_seconds_sum 0.0018183950000000002 +go_gc_duration_seconds_count 7 +` +const sampleGaugeTextFormat = ` +# HELP go_goroutines Number of goroutines that currently exist. +# TYPE go_goroutines gauge +go_goroutines 15 1490802350000 +` func TestPrometheusGeneratesMetrics(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { @@ -37,8 +52,9 @@ func TestPrometheusGeneratesMetrics(t *testing.T) { defer ts.Close() p := &Prometheus{ - Log: testutil.Logger{}, - URLs: []string{ts.URL}, + Log: testutil.Logger{}, + URLs: []string{ts.URL}, + URLTag: "url", } var acc testutil.Accumulator @@ -63,6 +79,7 @@ func TestPrometheusGeneratesMetricsWithHostNameTag(t *testing.T) { p := &Prometheus{ Log: testutil.Logger{}, KubernetesServices: []string{ts.URL}, + URLTag: "url", } u, _ := url.Parse(ts.URL) tsAddress := u.Hostname() @@ -106,3 +123,49 @@ func TestPrometheusGeneratesMetricsAlthoughFirstDNSFails(t *testing.T) { assert.True(t, acc.HasFloatField("test_metric", "value")) assert.True(t, acc.HasTimestamp("test_metric", time.Unix(1490802350, 0))) } + +func TestPrometheusGeneratesSummaryMetricsV2(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintln(w, sampleSummaryTextFormat) + })) + defer ts.Close() + + p := &Prometheus{ + URLs: []string{ts.URL}, + URLTag: "url", + MetricVersion: 2, + } + + var acc testutil.Accumulator + + err := acc.GatherError(p.Gather) + require.NoError(t, err) + + assert.True(t, acc.TagSetValue("prometheus", "quantile") == "0") + assert.True(t, acc.HasFloatField("prometheus", "go_gc_duration_seconds_sum")) + assert.True(t, acc.HasFloatField("prometheus", "go_gc_duration_seconds_count")) + assert.True(t, acc.TagValue("prometheus", "url") == ts.URL+"/metrics") + +} + +func TestPrometheusGeneratesGaugeMetricsV2(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintln(w, sampleGaugeTextFormat) + })) + defer ts.Close() + + p := &Prometheus{ + URLs: []string{ts.URL}, + URLTag: "url", + MetricVersion: 2, + } + + var acc testutil.Accumulator + + err := acc.GatherError(p.Gather) + require.NoError(t, err) + + assert.True(t, acc.HasFloatField("prometheus", "go_goroutines")) + assert.True(t, acc.TagValue("prometheus", "url") == ts.URL+"/metrics") + assert.True(t, acc.HasTimestamp("prometheus", time.Unix(1490802350, 0))) +} diff --git a/testutil/accumulator.go b/testutil/accumulator.go index e33959a83..9e4e82e27 100644 --- a/testutil/accumulator.go +++ b/testutil/accumulator.go @@ -258,6 +258,18 @@ func (a *Accumulator) HasTag(measurement string, key string) bool { return false } +func (a *Accumulator) TagSetValue(measurement string, key string) string { + for _, p := range a.Metrics { + if p.Measurement == measurement { + v, ok := p.Tags[key] + if ok { + return v + } + } + } + return "" +} + func (a *Accumulator) TagValue(measurement string, key string) string { for _, p := range a.Metrics { if p.Measurement == measurement { From f800d91dc847d77272d6193c4d57bebbe41f7e06 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 21 Nov 2019 00:09:27 -0800 Subject: [PATCH 1336/1815] Update changelog --- CHANGELOG.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 42c62f038..82998d8c5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -43,6 +43,8 @@ - [#6650](https://github.com/influxdata/telegraf/pull/6650): Add a nameable file tag to file input plugin. - [#6640](https://github.com/influxdata/telegraf/pull/6640): Add Splunk MultiMetric support. - [#6680](https://github.com/influxdata/telegraf/pull/6668): Add support for sending HTTP Basic Auth in influxdb input +- [#5767](https://github.com/influxdata/telegraf/pull/5767): Add ability to configure the url tag in the prometheus input. +- [#5767](https://github.com/influxdata/telegraf/pull/5767): Add prometheus metric_version=2 mapping to internal metrics/line protocol. #### Bugfixes @@ -58,7 +60,7 @@ - [#6666](https://github.com/influxdata/telegraf/issues/6666): Fix many plugin errors are logged at debug logging level. - [#6652](https://github.com/influxdata/telegraf/issues/6652): Use nanosecond precision in docker_log input. - [#6642](https://github.com/influxdata/telegraf/issues/6642): Fix interface option with method = native in ping input. -- [#6680](https://github.com/influxdata/telegraf/pull/6680): Fix panic in mongodb input if shard connection pool stats are unreadable. (#6680) +- [#6680](https://github.com/influxdata/telegraf/pull/6680): Fix panic in mongodb input if shard connection pool stats are unreadable. ## v1.12.5 [2019-11-12] From acfdc5576ccfd69bce6b00372ac8b1c305dd58bb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adri=C3=A1n=20L=C3=B3pez?= Date: Thu, 21 Nov 2019 19:36:48 +0100 Subject: [PATCH 1337/1815] Add clone processor to all.go (#6697) --- plugins/processors/all/all.go | 1 + 1 file changed, 1 insertion(+) diff --git a/plugins/processors/all/all.go b/plugins/processors/all/all.go index 47ff83f54..e0f69d787 100644 --- a/plugins/processors/all/all.go +++ b/plugins/processors/all/all.go @@ -1,6 +1,7 @@ package all import ( + _ "github.com/influxdata/telegraf/plugins/processors/clone" _ "github.com/influxdata/telegraf/plugins/processors/converter" _ "github.com/influxdata/telegraf/plugins/processors/date" _ "github.com/influxdata/telegraf/plugins/processors/enum" From 23b6deee2245f83c86d2cfb0234d274707c3aaf9 Mon Sep 17 00:00:00 2001 From: Remi Frenay <47144573+rfrenayworldstream@users.noreply.github.com> Date: Thu, 21 Nov 2019 20:26:59 +0100 Subject: [PATCH 1338/1815] Add synproxy input plugin (#5683) --- plugins/inputs/all/all.go | 1 + plugins/inputs/synproxy/README.md | 49 ++++++ plugins/inputs/synproxy/synproxy.go | 121 +++++++++++++ plugins/inputs/synproxy/synproxy_notlinux.go | 31 ++++ plugins/inputs/synproxy/synproxy_test.go | 169 +++++++++++++++++++ 5 files changed, 371 insertions(+) create mode 100644 plugins/inputs/synproxy/README.md create mode 100644 plugins/inputs/synproxy/synproxy.go create mode 100644 plugins/inputs/synproxy/synproxy_notlinux.go create mode 100644 plugins/inputs/synproxy/synproxy_test.go diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index a25ea3cd9..326629a7e 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -143,6 +143,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/statsd" _ "github.com/influxdata/telegraf/plugins/inputs/suricata" _ "github.com/influxdata/telegraf/plugins/inputs/swap" + _ "github.com/influxdata/telegraf/plugins/inputs/synproxy" _ "github.com/influxdata/telegraf/plugins/inputs/syslog" _ "github.com/influxdata/telegraf/plugins/inputs/sysstat" _ "github.com/influxdata/telegraf/plugins/inputs/system" diff --git a/plugins/inputs/synproxy/README.md b/plugins/inputs/synproxy/README.md new file mode 100644 index 000000000..4e275886f --- /dev/null +++ b/plugins/inputs/synproxy/README.md @@ -0,0 +1,49 @@ +# Synproxy Input Plugin + +The synproxy plugin gathers the synproxy counters. Synproxy is a Linux netfilter module used for SYN attack mitigation. +The use of synproxy is documented in `man iptables-extensions` under the SYNPROXY section. + + +### Configuration + +The synproxy plugin does not need any configuration + +```toml +[[inputs.synproxy]] + # no configuration +``` + +### Metrics + +The following synproxy counters are gathered + +- synproxy + - fields: + - cookie_invalid (uint32, packets, counter) - Invalid cookies + - cookie_retrans (uint32, packets, counter) - Cookies retransmitted + - cookie_valid (uint32, packets, counter) - Valid cookies + - entries (uint32, packets, counter) - Entries + - syn_received (uint32, packets, counter) - SYN received + - conn_reopened (uint32, packets, counter) - Connections reopened + +### Sample Queries + +Get the number of packets per 5 minutes for the measurement in the last hour from InfluxDB: +``` +SELECT difference(last("cookie_invalid")) AS "cookie_invalid", difference(last("cookie_retrans")) AS "cookie_retrans", difference(last("cookie_valid")) AS "cookie_valid", difference(last("entries")) AS "entries", difference(last("syn_received")) AS "syn_received", difference(last("conn_reopened")) AS "conn_reopened" FROM synproxy WHERE time > NOW() - 1h GROUP BY time(5m) FILL(null); +``` + +### Troubleshooting + +Execute the following CLI command in Linux to test the synproxy counters: +``` +cat /proc/net/stat/synproxy +``` + +### Example Output + +This section shows example output in Line Protocol format. + +``` +synproxy,host=Filter-GW01,rack=filter-node1 conn_reopened=0i,cookie_invalid=235i,cookie_retrans=0i,cookie_valid=8814i,entries=0i,syn_received=8742i 1549550634000000000 +``` diff --git a/plugins/inputs/synproxy/synproxy.go b/plugins/inputs/synproxy/synproxy.go new file mode 100644 index 000000000..510f5584d --- /dev/null +++ b/plugins/inputs/synproxy/synproxy.go @@ -0,0 +1,121 @@ +// +build linux + +package synproxy + +import ( + "bufio" + "fmt" + "os" + "path" + "strconv" + "strings" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" +) + +type Synproxy struct { + // Synproxy stats filename (proc filesystem) + statFile string +} + +func (k *Synproxy) Description() string { + return "Get synproxy counter statistics from procfs" +} + +func (k *Synproxy) SampleConfig() string { + return "" +} + +func (k *Synproxy) Gather(acc telegraf.Accumulator) error { + data, err := k.getSynproxyStat() + if err != nil { + return err + } + + acc.AddCounter("synproxy", data, map[string]string{}) + return nil +} + +func inSlice(haystack []string, needle string) bool { + for _, val := range haystack { + if needle == val { + return true + } + } + return false +} + +func (k *Synproxy) getSynproxyStat() (map[string]interface{}, error) { + var hname []string + counters := []string{"entries", "syn_received", "cookie_invalid", "cookie_valid", "cookie_retrans", "conn_reopened"} + fields := make(map[string]interface{}) + + // Open synproxy file in proc filesystem + file, err := os.Open(k.statFile) + if err != nil { + return nil, err + } + defer file.Close() + + // Initialise expected fields + for _, val := range counters { + fields[val] = uint32(0) + } + + scanner := bufio.NewScanner(file) + // Read header row + if scanner.Scan() { + line := scanner.Text() + // Parse fields separated by whitespace + dataFields := strings.Fields(line) + for _, val := range dataFields { + if !inSlice(counters, val) { + val = "" + } + hname = append(hname, val) + } + } + if len(hname) == 0 { + return nil, fmt.Errorf("invalid data") + } + // Read data rows + for scanner.Scan() { + line := scanner.Text() + // Parse fields separated by whitespace + dataFields := strings.Fields(line) + // If number of data fields do not match number of header fields + if len(dataFields) != len(hname) { + return nil, fmt.Errorf("invalid number of columns in data, expected %d found %d", len(hname), + len(dataFields)) + } + for i, val := range dataFields { + // Convert from hexstring to int32 + x, err := strconv.ParseUint(val, 16, 32) + // If field is not a valid hexstring + if err != nil { + return nil, fmt.Errorf("invalid value '%s' found", val) + } + if hname[i] != "" { + fields[hname[i]] = fields[hname[i]].(uint32) + uint32(x) + } + } + } + return fields, nil +} + +func getHostProc() string { + procPath := "/proc" + if os.Getenv("HOST_PROC") != "" { + procPath = os.Getenv("HOST_PROC") + } + return procPath +} + +func init() { + inputs.Add("synproxy", func() telegraf.Input { + return &Synproxy{ + statFile: path.Join(getHostProc(), "/net/stat/synproxy"), + } + }) +} diff --git a/plugins/inputs/synproxy/synproxy_notlinux.go b/plugins/inputs/synproxy/synproxy_notlinux.go new file mode 100644 index 000000000..e77f06903 --- /dev/null +++ b/plugins/inputs/synproxy/synproxy_notlinux.go @@ -0,0 +1,31 @@ +// +build !linux + +package synproxy + +import ( + "log" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" +) + +type Synproxy struct{} + +func (k *Synproxy) Gather(acc telegraf.Accumulator) error { + return nil +} + +func (k *Synproxy) Description() string { + return "" +} + +func (k *Synproxy) SampleConfig() string { + return "" +} + +func init() { + inputs.Add("synproxy", func() telegraf.Input { + log.Print("W! [inputs.synproxy] Current platform is not supported") + return &Synproxy{} + }) +} diff --git a/plugins/inputs/synproxy/synproxy_test.go b/plugins/inputs/synproxy/synproxy_test.go new file mode 100644 index 000000000..83d752ff1 --- /dev/null +++ b/plugins/inputs/synproxy/synproxy_test.go @@ -0,0 +1,169 @@ +// +build linux + +package synproxy + +import ( + "io/ioutil" + "os" + "testing" + + "github.com/influxdata/telegraf/testutil" + + "github.com/stretchr/testify/assert" +) + +func TestSynproxyFileNormal(t *testing.T) { + testSynproxyFileData(t, synproxyFileNormal, synproxyResultNormal) +} + +func TestSynproxyFileOverflow(t *testing.T) { + testSynproxyFileData(t, synproxyFileOverflow, synproxyResultOverflow) +} + +func TestSynproxyFileExtended(t *testing.T) { + testSynproxyFileData(t, synproxyFileExtended, synproxyResultNormal) +} + +func TestSynproxyFileAltered(t *testing.T) { + testSynproxyFileData(t, synproxyFileAltered, synproxyResultNormal) +} + +func TestSynproxyFileHeaderMismatch(t *testing.T) { + tmpfile := makeFakeSynproxyFile([]byte(synproxyFileHeaderMismatch)) + defer os.Remove(tmpfile) + + k := Synproxy{ + statFile: tmpfile, + } + + acc := testutil.Accumulator{} + err := k.Gather(&acc) + assert.Error(t, err) + assert.Contains(t, err.Error(), "invalid number of columns in data") +} + +func TestSynproxyFileInvalidHex(t *testing.T) { + tmpfile := makeFakeSynproxyFile([]byte(synproxyFileInvalidHex)) + defer os.Remove(tmpfile) + + k := Synproxy{ + statFile: tmpfile, + } + + acc := testutil.Accumulator{} + err := k.Gather(&acc) + assert.Error(t, err) + assert.Contains(t, err.Error(), "invalid value") +} + +func TestNoSynproxyFile(t *testing.T) { + tmpfile := makeFakeSynproxyFile([]byte(synproxyFileNormal)) + // Remove file to generate "no such file" error + os.Remove(tmpfile) + + k := Synproxy{ + statFile: tmpfile, + } + + acc := testutil.Accumulator{} + err := k.Gather(&acc) + assert.Error(t, err) +} + +// Valid Synproxy file +const synproxyFileNormal = `entries syn_received cookie_invalid cookie_valid cookie_retrans conn_reopened +00000000 00007a88 00002af7 00007995 00000000 00000000 +00000000 0000892c 000015e3 00008852 00000000 00000000 +00000000 00007a80 00002ccc 0000796a 00000000 00000000 +00000000 000079f7 00002bf5 0000790a 00000000 00000000 +00000000 00007a08 00002c9a 00007901 00000000 00000000 +00000000 00007cfc 00002b36 000078fd 00000000 00000000 +00000000 000079c2 00002c2b 000078d6 00000000 00000000 +00000000 0000798a 00002ba8 000078a0 00000000 00000000` + +const synproxyFileOverflow = `entries syn_received cookie_invalid cookie_valid cookie_retrans conn_reopened +00000000 80000001 e0000000 80000001 00000000 00000000 +00000000 80000003 f0000009 80000003 00000000 00000000` + +const synproxyFileHeaderMismatch = `entries syn_received cookie_invalid cookie_valid cookie_retrans +00000000 00000002 00000000 00000002 00000000 00000000 +00000000 00000004 00000015 00000004 00000000 00000000 +00000000 00000003 00000000 00000003 00000000 00000000 +00000000 00000002 00000000 00000002 00000000 00000000 +00000000 00000003 00000009 00000003 00000000 00000000 +00000000 00000003 00000009 00000003 00000000 00000000 +00000000 00000001 00000000 00000001 00000000 00000000 +00000000 00000003 00000009 00000003 00000000 00000000` + +const synproxyFileInvalidHex = `entries syn_received cookie_invalid cookie_valid cookie_retrans conn_reopened +entries 00000002 00000000 00000002 00000000 00000000 +00000000 00000003 00000009 00000003 00000000 00000000` + +const synproxyFileExtended = `entries syn_received cookie_invalid cookie_valid cookie_retrans conn_reopened new_counter +00000000 00007a88 00002af7 00007995 00000000 00000000 00000000 +00000000 0000892c 000015e3 00008852 00000000 00000000 00000000 +00000000 00007a80 00002ccc 0000796a 00000000 00000000 00000000 +00000000 000079f7 00002bf5 0000790a 00000000 00000000 00000000 +00000000 00007a08 00002c9a 00007901 00000000 00000000 00000000 +00000000 00007cfc 00002b36 000078fd 00000000 00000000 00000000 +00000000 000079c2 00002c2b 000078d6 00000000 00000000 00000000 +00000000 0000798a 00002ba8 000078a0 00000000 00000000 00000000` + +const synproxyFileAltered = `entries cookie_invalid cookie_valid syn_received conn_reopened +00000000 00002af7 00007995 00007a88 00000000 +00000000 000015e3 00008852 0000892c 00000000 +00000000 00002ccc 0000796a 00007a80 00000000 +00000000 00002bf5 0000790a 000079f7 00000000 +00000000 00002c9a 00007901 00007a08 00000000 +00000000 00002b36 000078fd 00007cfc 00000000 +00000000 00002c2b 000078d6 000079c2 00000000 +00000000 00002ba8 000078a0 0000798a 00000000` + +var synproxyResultNormal = map[string]interface{}{ + "entries": uint32(0x00000000), + "syn_received": uint32(0x0003e27b), + "cookie_invalid": uint32(0x0001493e), + "cookie_valid": uint32(0x0003d7cf), + "cookie_retrans": uint32(0x00000000), + "conn_reopened": uint32(0x00000000), +} + +var synproxyResultOverflow = map[string]interface{}{ + "entries": uint32(0x00000000), + "syn_received": uint32(0x00000004), + "cookie_invalid": uint32(0xd0000009), + "cookie_valid": uint32(0x00000004), + "cookie_retrans": uint32(0x00000000), + "conn_reopened": uint32(0x00000000), +} + +func testSynproxyFileData(t *testing.T, fileData string, telegrafData map[string]interface{}) { + tmpfile := makeFakeSynproxyFile([]byte(fileData)) + defer os.Remove(tmpfile) + + k := Synproxy{ + statFile: tmpfile, + } + + acc := testutil.Accumulator{} + err := k.Gather(&acc) + assert.NoError(t, err) + + acc.AssertContainsFields(t, "synproxy", telegrafData) +} + +func makeFakeSynproxyFile(content []byte) string { + tmpfile, err := ioutil.TempFile("", "synproxy_test") + if err != nil { + panic(err) + } + + if _, err := tmpfile.Write(content); err != nil { + panic(err) + } + if err := tmpfile.Close(); err != nil { + panic(err) + } + + return tmpfile.Name() +} From a000ad35535fddca09219644c14f46de8a48ddb2 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 21 Nov 2019 11:33:47 -0800 Subject: [PATCH 1339/1815] Update changelog and readme --- CHANGELOG.md | 1 + README.md | 1 + 2 files changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 82998d8c5..76481d2e1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,7 @@ - [azure_storage_queue](/plugins/inputs/azure_storage_queue/README.md) - Contributed by @mjiderhamn - [ethtool](/plugins/inputs/ethtool/README.md) - Contributed by @philippreston - [suricata](/plugins/inputs/suricata/README.md) - Contributed by @satta +- [synproxy](/plugins/inputs/synproxy/README.md) - Contributed by @rfrenayworldstream #### New Processors diff --git a/README.md b/README.md index da300a71f..a207ecd32 100644 --- a/README.md +++ b/README.md @@ -279,6 +279,7 @@ For documentation on the latest development code see the [documentation index][d * [statsd](./plugins/inputs/statsd) * [suricata](./plugins/inputs/suricata) * [swap](./plugins/inputs/swap) +* [synproxy](./plugins/inputs/synproxy) * [syslog](./plugins/inputs/syslog) * [sysstat](./plugins/inputs/sysstat) * [system](./plugins/inputs/system) From 32d1e71a7ecd8f1e92c97450909f68e1428b76aa Mon Sep 17 00:00:00 2001 From: Nick Neisen Date: Thu, 21 Nov 2019 19:11:17 -0700 Subject: [PATCH 1340/1815] Add decoding and tests to socket_listener (#6660) --- plugins/inputs/socket_listener/README.md | 5 ++ .../inputs/socket_listener/socket_listener.go | 28 ++++++++- .../socket_listener/socket_listener_test.go | 63 +++++++++++++++++-- 3 files changed, 89 insertions(+), 7 deletions(-) diff --git a/plugins/inputs/socket_listener/README.md b/plugins/inputs/socket_listener/README.md index 1740d8bcf..ec1aa0bef 100644 --- a/plugins/inputs/socket_listener/README.md +++ b/plugins/inputs/socket_listener/README.md @@ -66,6 +66,10 @@ This is a sample configuration for the plugin. ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md # data_format = "influx" + + ## Content encoding for message payloads, can be set to "gzip" to or + ## "identity" to apply no encoding. + # content_encoding = "identity" ``` ## A Note on UDP OS Buffer Sizes @@ -84,6 +88,7 @@ at least 8MB before trying to run large amounts of UDP traffic to your instance. 8MB is just a recommendation, and can be adjusted higher. ### Linux + Check the current UDP/IP receive buffer limit & default by typing the following commands: diff --git a/plugins/inputs/socket_listener/socket_listener.go b/plugins/inputs/socket_listener/socket_listener.go index b5b4d0405..b1e933851 100644 --- a/plugins/inputs/socket_listener/socket_listener.go +++ b/plugins/inputs/socket_listener/socket_listener.go @@ -119,7 +119,14 @@ func (ssl *streamSocketListener) read(c net.Conn) { if !scnr.Scan() { break } - metrics, err := ssl.Parse(scnr.Bytes()) + + body, err := ssl.decoder.Decode(scnr.Bytes()) + if err != nil { + ssl.Log.Errorf("Unable to decode incoming line: %s", err.Error()) + continue + } + + metrics, err := ssl.Parse(body) if err != nil { ssl.Log.Errorf("Unable to parse incoming line: %s", err.Error()) // TODO rate limit @@ -155,7 +162,12 @@ func (psl *packetSocketListener) listen() { break } - metrics, err := psl.Parse(buf[:n]) + body, err := psl.decoder.Decode(buf[:n]) + if err != nil { + psl.Log.Errorf("Unable to decode incoming packet: %s", err.Error()) + } + + metrics, err := psl.Parse(body) if err != nil { psl.Log.Errorf("Unable to parse incoming packet: %s", err.Error()) // TODO rate limit @@ -174,6 +186,7 @@ type SocketListener struct { ReadTimeout *internal.Duration `toml:"read_timeout"` KeepAlivePeriod *internal.Duration `toml:"keep_alive_period"` SocketMode string `toml:"socket_mode"` + ContentEncoding string `toml:"content_encoding"` tlsint.ServerConfig wg sync.WaitGroup @@ -183,6 +196,7 @@ type SocketListener struct { parsers.Parser telegraf.Accumulator io.Closer + decoder internal.ContentDecoder } func (sl *SocketListener) Description() string { @@ -244,6 +258,10 @@ func (sl *SocketListener) SampleConfig() string { ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md # data_format = "influx" + + ## Content encoding for message payloads, can be set to "gzip" to or + ## "identity" to apply no encoding. + # content_encoding = "identity" ` } @@ -265,6 +283,12 @@ func (sl *SocketListener) Start(acc telegraf.Accumulator) error { protocol := spl[0] addr := spl[1] + var err error + sl.decoder, err = internal.NewContentDecoder(sl.ContentEncoding) + if err != nil { + return err + } + if protocol == "unix" || protocol == "unixpacket" || protocol == "unixgram" { // no good way of testing for "file does not exist". // Instead just ignore error and blow up when we try to listen, which will diff --git a/plugins/inputs/socket_listener/socket_listener_test.go b/plugins/inputs/socket_listener/socket_listener_test.go index 481a0c1a5..c6adf4cde 100644 --- a/plugins/inputs/socket_listener/socket_listener_test.go +++ b/plugins/inputs/socket_listener/socket_listener_test.go @@ -180,12 +180,65 @@ func TestSocketListener_unixgram(t *testing.T) { testSocketListener(t, sl, client) } +func TestSocketListenerDecode_tcp(t *testing.T) { + defer testEmptyLog(t)() + + sl := newSocketListener() + sl.Log = testutil.Logger{} + sl.ServiceAddress = "tcp://127.0.0.1:0" + sl.ReadBufferSize = internal.Size{Size: 1024} + sl.ContentEncoding = "gzip" + + acc := &testutil.Accumulator{} + err := sl.Start(acc) + require.NoError(t, err) + defer sl.Stop() + + client, err := net.Dial("tcp", sl.Closer.(net.Listener).Addr().String()) + require.NoError(t, err) + + testSocketListener(t, sl, client) +} + +func TestSocketListenerDecode_udp(t *testing.T) { + defer testEmptyLog(t)() + + sl := newSocketListener() + sl.Log = testutil.Logger{} + sl.ServiceAddress = "udp://127.0.0.1:0" + sl.ReadBufferSize = internal.Size{Size: 1024} + sl.ContentEncoding = "gzip" + + acc := &testutil.Accumulator{} + err := sl.Start(acc) + require.NoError(t, err) + defer sl.Stop() + + client, err := net.Dial("udp", sl.Closer.(net.PacketConn).LocalAddr().String()) + require.NoError(t, err) + + testSocketListener(t, sl, client) +} + func testSocketListener(t *testing.T, sl *SocketListener, client net.Conn) { - mstr12 := "test,foo=bar v=1i 123456789\ntest,foo=baz v=2i 123456790\n" - mstr3 := "test,foo=zab v=3i 123456791" - client.Write([]byte(mstr12)) - client.Write([]byte(mstr3)) - if _, ok := client.(net.Conn); ok { + mstr12 := []byte("test,foo=bar v=1i 123456789\ntest,foo=baz v=2i 123456790\n") + mstr3 := []byte("test,foo=zab v=3i 123456791") + + if sl.ContentEncoding == "gzip" { + encoder, err := internal.NewContentEncoder(sl.ContentEncoding) + require.NoError(t, err) + mstr12, err = encoder.Encode(mstr12) + require.NoError(t, err) + + encoder, err = internal.NewContentEncoder(sl.ContentEncoding) + require.NoError(t, err) + mstr3, err = encoder.Encode(mstr3) + require.NoError(t, err) + } + + client.Write(mstr12) + client.Write(mstr3) + if client.LocalAddr().Network() != "udp" { // stream connection. needs trailing newline to terminate mstr3 client.Write([]byte{'\n'}) } From a193f527f0831076fc126e9de1cf19f129befa2e Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 21 Nov 2019 18:13:44 -0800 Subject: [PATCH 1341/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 76481d2e1..15bf0e448 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -46,6 +46,7 @@ - [#6680](https://github.com/influxdata/telegraf/pull/6668): Add support for sending HTTP Basic Auth in influxdb input - [#5767](https://github.com/influxdata/telegraf/pull/5767): Add ability to configure the url tag in the prometheus input. - [#5767](https://github.com/influxdata/telegraf/pull/5767): Add prometheus metric_version=2 mapping to internal metrics/line protocol. +- [#6660](https://github.com/influxdata/telegraf/pull/6660): Add content_encoding compression support to socket_listener. #### Bugfixes From c7af10b159e761b9876a4af6fe023fa8f67bae96 Mon Sep 17 00:00:00 2001 From: Marc Ruiz Date: Fri, 22 Nov 2019 03:37:33 +0100 Subject: [PATCH 1342/1815] Add high resolution metrics support to CloudWatch output (#6689) --- plugins/outputs/cloudwatch/README.md | 5 +- plugins/outputs/cloudwatch/cloudwatch.go | 60 ++++++++++++------- plugins/outputs/cloudwatch/cloudwatch_test.go | 29 +++++++-- 3 files changed, 64 insertions(+), 30 deletions(-) diff --git a/plugins/outputs/cloudwatch/README.md b/plugins/outputs/cloudwatch/README.md index 31619263f..d585255c8 100644 --- a/plugins/outputs/cloudwatch/README.md +++ b/plugins/outputs/cloudwatch/README.md @@ -45,4 +45,7 @@ also save AWS API cost. If enable this flag, this plugin would parse the require [CloudWatch statistic fields](https://docs.aws.amazon.com/sdk-for-go/api/service/cloudwatch/#StatisticSet) (count, min, max, and sum) and send them to CloudWatch. You could use `basicstats` aggregator to calculate those fields. If not all statistic fields are available, -all fields would still be sent as raw metrics. \ No newline at end of file +all fields would still be sent as raw metrics. + +## high_resolution_metrics +Enable high resolution metrics (1 second precision) instead of standard ones (60 seconds precision) \ No newline at end of file diff --git a/plugins/outputs/cloudwatch/cloudwatch.go b/plugins/outputs/cloudwatch/cloudwatch.go index aaefa89ec..625a9c265 100644 --- a/plugins/outputs/cloudwatch/cloudwatch.go +++ b/plugins/outputs/cloudwatch/cloudwatch.go @@ -25,8 +25,9 @@ type CloudWatch struct { Token string `toml:"token"` EndpointURL string `toml:"endpoint_url"` - Namespace string `toml:"namespace"` // CloudWatch Metrics Namespace - svc *cloudwatch.CloudWatch + Namespace string `toml:"namespace"` // CloudWatch Metrics Namespace + HighResolutionMetrics bool `toml:"high_resolution_metrics"` + svc *cloudwatch.CloudWatch WriteStatistics bool `toml:"write_statistics"` } @@ -47,11 +48,12 @@ type cloudwatchField interface { } type statisticField struct { - metricName string - fieldName string - tags map[string]string - values map[statisticType]float64 - timestamp time.Time + metricName string + fieldName string + tags map[string]string + values map[statisticType]float64 + timestamp time.Time + storageResolution int64 } func (f *statisticField) addValue(sType statisticType, value float64) { @@ -81,6 +83,7 @@ func (f *statisticField) buildDatum() []*cloudwatch.MetricDatum { Sum: aws.Float64(sum), SampleCount: aws.Float64(count), }, + StorageResolution: aws.Int64(f.storageResolution), } datums = append(datums, datum) @@ -126,11 +129,12 @@ func (f *statisticField) hasAllFields() bool { } type valueField struct { - metricName string - fieldName string - tags map[string]string - value float64 - timestamp time.Time + metricName string + fieldName string + tags map[string]string + value float64 + timestamp time.Time + storageResolution int64 } func (f *valueField) addValue(sType statisticType, value float64) { @@ -143,10 +147,11 @@ func (f *valueField) buildDatum() []*cloudwatch.MetricDatum { return []*cloudwatch.MetricDatum{ { - MetricName: aws.String(strings.Join([]string{f.metricName, f.fieldName}, "_")), - Value: aws.Float64(f.value), - Dimensions: BuildDimensions(f.tags), - Timestamp: aws.Time(f.timestamp), + MetricName: aws.String(strings.Join([]string{f.metricName, f.fieldName}, "_")), + Value: aws.Float64(f.value), + Dimensions: BuildDimensions(f.tags), + Timestamp: aws.Time(f.timestamp), + StorageResolution: aws.Int64(f.storageResolution), }, } } @@ -186,6 +191,9 @@ var sampleConfig = ` ## You could use basicstats aggregator to calculate those fields. If not all statistic ## fields are available, all fields would still be sent as raw metrics. # write_statistics = false + + ## Enable high resolution metrics of 1 second (standard resolution metrics are 60 seconds) + ## high_resolution_metrics = false ` func (c *CloudWatch) SampleConfig() string { @@ -220,7 +228,7 @@ func (c *CloudWatch) Write(metrics []telegraf.Metric) error { var datums []*cloudwatch.MetricDatum for _, m := range metrics { - d := BuildMetricDatum(c.WriteStatistics, m) + d := BuildMetricDatum(c.WriteStatistics, c.HighResolutionMetrics, m) datums = append(datums, d...) } @@ -278,10 +286,14 @@ func PartitionDatums(size int, datums []*cloudwatch.MetricDatum) [][]*cloudwatch // Make a MetricDatum from telegraf.Metric. It would check if all required fields of // cloudwatch.StatisticSet are available. If so, it would build MetricDatum from statistic values. // Otherwise, fields would still been built independently. -func BuildMetricDatum(buildStatistic bool, point telegraf.Metric) []*cloudwatch.MetricDatum { +func BuildMetricDatum(buildStatistic bool, highResolutionMetrics bool, point telegraf.Metric) []*cloudwatch.MetricDatum { fields := make(map[string]cloudwatchField) tags := point.Tags() + storageResolution := int64(60) + if highResolutionMetrics { + storageResolution = 1 + } for k, v := range point.Fields() { @@ -297,11 +309,12 @@ func BuildMetricDatum(buildStatistic bool, point telegraf.Metric) []*cloudwatch. // If statistic metric is not enabled or non-statistic type, just take current field as a value field. if !buildStatistic || sType == statisticTypeNone { fields[k] = &valueField{ - metricName: point.Name(), - fieldName: k, - tags: tags, - timestamp: point.Time(), - value: val, + metricName: point.Name(), + fieldName: k, + tags: tags, + timestamp: point.Time(), + value: val, + storageResolution: storageResolution, } continue } @@ -317,6 +330,7 @@ func BuildMetricDatum(buildStatistic bool, point telegraf.Metric) []*cloudwatch. values: map[statisticType]float64{ sType: val, }, + storageResolution: storageResolution, } } else { // Add new statistic value to this field diff --git a/plugins/outputs/cloudwatch/cloudwatch_test.go b/plugins/outputs/cloudwatch/cloudwatch_test.go index acadca842..b2466e4d0 100644 --- a/plugins/outputs/cloudwatch/cloudwatch_test.go +++ b/plugins/outputs/cloudwatch/cloudwatch_test.go @@ -75,11 +75,11 @@ func TestBuildMetricDatums(t *testing.T) { testutil.TestMetric(float64(1.174272e+108)), // largest should be 1.174271e+108 } for _, point := range validMetrics { - datums := BuildMetricDatum(false, point) + datums := BuildMetricDatum(false, false, point) assert.Equal(1, len(datums), fmt.Sprintf("Valid point should create a Datum {value: %v}", point)) } for _, point := range invalidMetrics { - datums := BuildMetricDatum(false, point) + datums := BuildMetricDatum(false, false, point) assert.Equal(0, len(datums), fmt.Sprintf("Valid point should not create a Datum {value: %v}", point)) } @@ -89,7 +89,7 @@ func TestBuildMetricDatums(t *testing.T) { map[string]interface{}{"value_max": float64(10), "value_min": float64(0), "value_sum": float64(100), "value_count": float64(20)}, time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC), ) - datums := BuildMetricDatum(true, statisticMetric) + datums := BuildMetricDatum(true, false, statisticMetric) assert.Equal(1, len(datums), fmt.Sprintf("Valid point should create a Datum {value: %v}", statisticMetric)) multiFieldsMetric, _ := metric.New( @@ -98,7 +98,7 @@ func TestBuildMetricDatums(t *testing.T) { map[string]interface{}{"valueA": float64(10), "valueB": float64(0), "valueC": float64(100), "valueD": float64(20)}, time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC), ) - datums = BuildMetricDatum(true, multiFieldsMetric) + datums = BuildMetricDatum(true, false, multiFieldsMetric) assert.Equal(4, len(datums), fmt.Sprintf("Each field should create a Datum {value: %v}", multiFieldsMetric)) multiStatisticMetric, _ := metric.New( @@ -112,10 +112,27 @@ func TestBuildMetricDatums(t *testing.T) { }, time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC), ) - datums = BuildMetricDatum(true, multiStatisticMetric) + datums = BuildMetricDatum(true, false, multiStatisticMetric) assert.Equal(7, len(datums), fmt.Sprintf("Valid point should create a Datum {value: %v}", multiStatisticMetric)) } +func TestMetricDatumResolution(t *testing.T) { + const expectedStandardResolutionValue = int64(60) + const expectedHighResolutionValue = int64(1) + + assert := assert.New(t) + + metric := testutil.TestMetric(1) + + standardResolutionDatum := BuildMetricDatum(false, false, metric) + actualStandardResolutionValue := *standardResolutionDatum[0].StorageResolution + assert.Equal(expectedStandardResolutionValue, actualStandardResolutionValue) + + highResolutionDatum := BuildMetricDatum(false, true, metric) + actualHighResolutionValue := *highResolutionDatum[0].StorageResolution + assert.Equal(expectedHighResolutionValue, actualHighResolutionValue) +} + func TestBuildMetricDatums_SkipEmptyTags(t *testing.T) { input := testutil.MustMetric( "cpu", @@ -129,7 +146,7 @@ func TestBuildMetricDatums_SkipEmptyTags(t *testing.T) { time.Unix(0, 0), ) - datums := BuildMetricDatum(true, input) + datums := BuildMetricDatum(true, false, input) require.Len(t, datums[0].Dimensions, 1) } From c3e3236babef4952840887ca2d67f4741a2549a0 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 21 Nov 2019 18:39:22 -0800 Subject: [PATCH 1343/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 15bf0e448..5be0e232e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -47,6 +47,7 @@ - [#5767](https://github.com/influxdata/telegraf/pull/5767): Add ability to configure the url tag in the prometheus input. - [#5767](https://github.com/influxdata/telegraf/pull/5767): Add prometheus metric_version=2 mapping to internal metrics/line protocol. - [#6660](https://github.com/influxdata/telegraf/pull/6660): Add content_encoding compression support to socket_listener. +- [#6689](https://github.com/influxdata/telegraf/pull/6689): Add high resolution metrics support to CloudWatch output. #### Bugfixes From 4e8aa8ad1b65693348dce097235465a7935f2227 Mon Sep 17 00:00:00 2001 From: Marc Ruiz Date: Fri, 22 Nov 2019 19:32:39 +0100 Subject: [PATCH 1344/1815] Fix README.md and improve example config description (#6707) --- plugins/outputs/cloudwatch/README.md | 2 +- plugins/outputs/cloudwatch/cloudwatch.go | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/plugins/outputs/cloudwatch/README.md b/plugins/outputs/cloudwatch/README.md index d585255c8..418fe86ff 100644 --- a/plugins/outputs/cloudwatch/README.md +++ b/plugins/outputs/cloudwatch/README.md @@ -47,5 +47,5 @@ also save AWS API cost. If enable this flag, this plugin would parse the require aggregator to calculate those fields. If not all statistic fields are available, all fields would still be sent as raw metrics. -## high_resolution_metrics +### high_resolution_metrics Enable high resolution metrics (1 second precision) instead of standard ones (60 seconds precision) \ No newline at end of file diff --git a/plugins/outputs/cloudwatch/cloudwatch.go b/plugins/outputs/cloudwatch/cloudwatch.go index 625a9c265..1ae8bd4f8 100644 --- a/plugins/outputs/cloudwatch/cloudwatch.go +++ b/plugins/outputs/cloudwatch/cloudwatch.go @@ -192,8 +192,8 @@ var sampleConfig = ` ## fields are available, all fields would still be sent as raw metrics. # write_statistics = false - ## Enable high resolution metrics of 1 second (standard resolution metrics are 60 seconds) - ## high_resolution_metrics = false + ## Enable high resolution metrics of 1 second (if not enabled, standard resolution are of 60 seconds precision) + # high_resolution_metrics = false ` func (c *CloudWatch) SampleConfig() string { From cec1bdce905c03258577ecc95521a6125813206c Mon Sep 17 00:00:00 2001 From: reimda Date: Mon, 25 Nov 2019 12:56:21 -0700 Subject: [PATCH 1345/1815] Add snmp_trap input plugin (#6629) --- Gopkg.lock | 6 +- plugins/inputs/all/all.go | 1 + plugins/inputs/snmp/snmp.go | 28 ++- plugins/inputs/snmp/snmp_test.go | 4 +- plugins/inputs/snmp_trap/README.md | 43 ++++ plugins/inputs/snmp_trap/snmp_trap.go | 266 +++++++++++++++++++++ plugins/inputs/snmp_trap/snmp_trap_test.go | 222 +++++++++++++++++ 7 files changed, 562 insertions(+), 8 deletions(-) create mode 100644 plugins/inputs/snmp_trap/README.md create mode 100644 plugins/inputs/snmp_trap/snmp_trap.go create mode 100644 plugins/inputs/snmp_trap/snmp_trap_test.go diff --git a/Gopkg.lock b/Gopkg.lock index 2671dd975..fa0c2f4c7 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -525,12 +525,12 @@ version = "v1.1.1" [[projects]] - digest = "1:530233672f656641b365f8efb38ed9fba80e420baff2ce87633813ab3755ed6d" + digest = "1:68c64bb61d55dcd17c82ca0b871ddddb5ae18b30cfe26f6bfd4b6df6287dc2e0" name = "github.com/golang/mock" packages = ["gomock"] pruneopts = "" - revision = "51421b967af1f557f93a59e0057aaf15ca02e29c" - version = "v1.2.0" + revision = "9fa652df1129bef0e734c9cf9bf6dbae9ef3b9fa" + version = "1.3.1" [[projects]] digest = "1:f958a1c137db276e52f0b50efee41a1a389dcdded59a69711f3e872757dab34b" diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index 326629a7e..ca0aa4a32 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -136,6 +136,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/smart" _ "github.com/influxdata/telegraf/plugins/inputs/snmp" _ "github.com/influxdata/telegraf/plugins/inputs/snmp_legacy" + _ "github.com/influxdata/telegraf/plugins/inputs/snmp_trap" _ "github.com/influxdata/telegraf/plugins/inputs/socket_listener" _ "github.com/influxdata/telegraf/plugins/inputs/solr" _ "github.com/influxdata/telegraf/plugins/inputs/sqlserver" diff --git a/plugins/inputs/snmp/snmp.go b/plugins/inputs/snmp/snmp.go index 32968730e..fe9645772 100644 --- a/plugins/inputs/snmp/snmp.go +++ b/plugins/inputs/snmp/snmp.go @@ -277,7 +277,7 @@ func (f *Field) init() error { return nil } - _, oidNum, oidText, conversion, err := snmpTranslate(f.Oid) + _, oidNum, oidText, conversion, err := SnmpTranslate(f.Oid) if err != nil { return Errorf(err, "translating") } @@ -882,7 +882,7 @@ func snmpTable(oid string) (mibName string, oidNum string, oidText string, field } func snmpTableCall(oid string) (mibName string, oidNum string, oidText string, fields []Field, err error) { - mibName, oidNum, oidText, _, err = snmpTranslate(oid) + mibName, oidNum, oidText, _, err = SnmpTranslate(oid) if err != nil { return "", "", "", nil, Errorf(err, "translating") } @@ -952,7 +952,7 @@ var snmpTranslateCachesLock sync.Mutex var snmpTranslateCaches map[string]snmpTranslateCache // snmpTranslate resolves the given OID. -func snmpTranslate(oid string) (mibName string, oidNum string, oidText string, conversion string, err error) { +func SnmpTranslate(oid string) (mibName string, oidNum string, oidText string, conversion string, err error) { snmpTranslateCachesLock.Lock() if snmpTranslateCaches == nil { snmpTranslateCaches = map[string]snmpTranslateCache{} @@ -978,6 +978,28 @@ func snmpTranslate(oid string) (mibName string, oidNum string, oidText string, c return stc.mibName, stc.oidNum, stc.oidText, stc.conversion, stc.err } +func SnmpTranslateForce(oid string, mibName string, oidNum string, oidText string, conversion string) { + snmpTranslateCachesLock.Lock() + defer snmpTranslateCachesLock.Unlock() + if snmpTranslateCaches == nil { + snmpTranslateCaches = map[string]snmpTranslateCache{} + } + + var stc snmpTranslateCache + stc.mibName = mibName + stc.oidNum = oidNum + stc.oidText = oidText + stc.conversion = conversion + stc.err = nil + snmpTranslateCaches[oid] = stc +} + +func SnmpTranslateClear() { + snmpTranslateCachesLock.Lock() + defer snmpTranslateCachesLock.Unlock() + snmpTranslateCaches = map[string]snmpTranslateCache{} +} + func snmpTranslateCall(oid string) (mibName string, oidNum string, oidText string, conversion string, err error) { var out []byte if strings.ContainsAny(oid, ":abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") { diff --git a/plugins/inputs/snmp/snmp_test.go b/plugins/inputs/snmp/snmp_test.go index efa426845..9a4335e4e 100644 --- a/plugins/inputs/snmp/snmp_test.go +++ b/plugins/inputs/snmp/snmp_test.go @@ -742,7 +742,7 @@ func TestFieldConvert(t *testing.T) { func TestSnmpTranslateCache_miss(t *testing.T) { snmpTranslateCaches = nil oid := "IF-MIB::ifPhysAddress.1" - mibName, oidNum, oidText, conversion, err := snmpTranslate(oid) + mibName, oidNum, oidText, conversion, err := SnmpTranslate(oid) assert.Len(t, snmpTranslateCaches, 1) stc := snmpTranslateCaches[oid] require.NotNil(t, stc) @@ -763,7 +763,7 @@ func TestSnmpTranslateCache_hit(t *testing.T) { err: fmt.Errorf("e"), }, } - mibName, oidNum, oidText, conversion, err := snmpTranslate("foo") + mibName, oidNum, oidText, conversion, err := SnmpTranslate("foo") assert.Equal(t, "a", mibName) assert.Equal(t, "b", oidNum) assert.Equal(t, "c", oidText) diff --git a/plugins/inputs/snmp_trap/README.md b/plugins/inputs/snmp_trap/README.md new file mode 100644 index 000000000..ec3c7ba4c --- /dev/null +++ b/plugins/inputs/snmp_trap/README.md @@ -0,0 +1,43 @@ +# SNMP Trap Input Plugin + +The SNMP Trap plugin is a service input plugin that receives SNMP +notifications (traps and inform requests). + +Notifications are received on plain UDP. The port to listen is +configurable. + +OIDs can be resolved to strings using system MIB files. This is done +in same way as the SNMP input plugin. See the section "MIB Lookups" in +the SNMP [README.md](../snmp/README.md) for details. + +### Configuration +```toml +# Snmp trap listener +[[inputs.snmp_trap]] + ## Transport, local address, and port to listen on. Transport must + ## be "udp://". Omit local address to listen on all interfaces. + ## example: "udp://127.0.0.1:1234" + # service_address = udp://:162 + ## Timeout running snmptranslate command + # timeout = "5s" +``` + +### Metrics + +- snmp_trap + - tags: + - source (string, IP address of trap source) + - name (string, value from SNMPv2-MIB::snmpTrapOID.0 PDU) + - mib (string, MIB from SNMPv2-MIB::snmpTrapOID.0 PDU) + - oid (string, OID string from SNMPv2-MIB::snmpTrapOID.0 PDU) + - version (string, "1" or "2c" or "3") + - fields: + - Fields are mapped from variables in the trap. Field names are + the trap variable names after MIB lookup. Field values are trap + variable values. + +### Example Output +``` +snmp_trap,mib=SNMPv2-MIB,name=coldStart,oid=.1.3.6.1.6.3.1.1.5.1,source=192.168.122.102,version=2c snmpTrapEnterprise.0="linux",sysUpTimeInstance=1i 1574109187723429814 +snmp_trap,mib=NET-SNMP-AGENT-MIB,name=nsNotifyShutdown,oid=.1.3.6.1.4.1.8072.4.0.2,source=192.168.122.102,version=2c sysUpTimeInstance=5803i,snmpTrapEnterprise.0="netSnmpNotificationPrefix" 1574109186555115459 +``` diff --git a/plugins/inputs/snmp_trap/snmp_trap.go b/plugins/inputs/snmp_trap/snmp_trap.go new file mode 100644 index 000000000..4b9ce4a56 --- /dev/null +++ b/plugins/inputs/snmp_trap/snmp_trap.go @@ -0,0 +1,266 @@ +package snmp_trap + +import ( + "bufio" + "bytes" + "fmt" + "net" + "os/exec" + "strings" + "sync" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/inputs" + + "github.com/soniah/gosnmp" +) + +var defaultTimeout = internal.Duration{Duration: time.Second * 5} + +type handler func(*gosnmp.SnmpPacket, *net.UDPAddr) +type execer func(internal.Duration, string, ...string) ([]byte, error) + +type mibEntry struct { + mibName string + oidText string +} + +type SnmpTrap struct { + ServiceAddress string `toml:"service_address"` + Timeout internal.Duration `toml:"timeout"` + + acc telegraf.Accumulator + listener *gosnmp.TrapListener + timeFunc func() time.Time + errCh chan error + + makeHandlerWrapper func(handler) handler + + Log telegraf.Logger `toml:"-"` + + cacheLock sync.Mutex + cache map[string]mibEntry + + execCmd execer +} + +var sampleConfig = ` + ## Transport, local address, and port to listen on. Transport must + ## be "udp://". Omit local address to listen on all interfaces. + ## example: "udp://127.0.0.1:1234" + # service_address = udp://:162 + ## Timeout running snmptranslate command + # timeout = "5s" +` + +func (s *SnmpTrap) SampleConfig() string { + return sampleConfig +} + +func (s *SnmpTrap) Description() string { + return "Receive SNMP traps" +} + +func (s *SnmpTrap) Gather(_ telegraf.Accumulator) error { + return nil +} + +func init() { + inputs.Add("snmp_trap", func() telegraf.Input { + return &SnmpTrap{ + timeFunc: time.Now, + ServiceAddress: "udp://:162", + Timeout: defaultTimeout, + } + }) +} + +func realExecCmd(Timeout internal.Duration, arg0 string, args ...string) ([]byte, error) { + cmd := exec.Command(arg0, args...) + var out bytes.Buffer + cmd.Stdout = &out + err := internal.RunTimeout(cmd, Timeout.Duration) + if err != nil { + return nil, err + } + return out.Bytes(), nil +} + +func (s *SnmpTrap) Init() error { + s.cache = map[string]mibEntry{} + s.execCmd = realExecCmd + return nil +} + +func (s *SnmpTrap) Start(acc telegraf.Accumulator) error { + s.acc = acc + s.listener = gosnmp.NewTrapListener() + s.listener.OnNewTrap = makeTrapHandler(s) + s.listener.Params = gosnmp.Default + + // wrap the handler, used in unit tests + if nil != s.makeHandlerWrapper { + s.listener.OnNewTrap = s.makeHandlerWrapper(s.listener.OnNewTrap) + } + + split := strings.SplitN(s.ServiceAddress, "://", 2) + if len(split) != 2 { + return fmt.Errorf("invalid service address: %s", s.ServiceAddress) + } + + protocol := split[0] + addr := split[1] + + // gosnmp.TrapListener currently supports udp only. For forward + // compatibility, require udp in the service address + if protocol != "udp" { + return fmt.Errorf("unknown protocol '%s' in '%s'", protocol, s.ServiceAddress) + } + + // If (*TrapListener).Listen immediately returns an error we need + // to return it from this function. Use a channel to get it here + // from the goroutine. Buffer one in case Listen returns after + // Listening but before our Close is called. + s.errCh = make(chan error, 1) + go func() { + s.errCh <- s.listener.Listen(addr) + }() + + select { + case <-s.listener.Listening(): + s.Log.Infof("Listening on %s", s.ServiceAddress) + case err := <-s.errCh: + return err + } + + return nil +} + +func (s *SnmpTrap) Stop() { + s.listener.Close() + err := <-s.errCh + if nil != err { + s.Log.Errorf("Error stopping trap listener %v", err) + } +} + +func makeTrapHandler(s *SnmpTrap) handler { + return func(packet *gosnmp.SnmpPacket, addr *net.UDPAddr) { + tm := s.timeFunc() + fields := map[string]interface{}{} + tags := map[string]string{} + + tags["version"] = packet.Version.String() + tags["source"] = addr.IP.String() + + for _, v := range packet.Variables { + // Use system mibs to resolve oids. Don't fall back to + // numeric oid because it's not useful enough to the end + // user and can be difficult to translate or remove from + // the database later. + + var value interface{} + + // todo: format the pdu value based on its snmp type and + // the mib's textual convention. The snmp input plugin + // only handles textual convention for ip and mac + // addresses + + switch v.Type { + case gosnmp.ObjectIdentifier: + val, ok := v.Value.(string) + if !ok { + s.Log.Errorf("Error getting value OID") + return + } + + var e mibEntry + var err error + e, err = s.lookup(val) + if nil != err { + s.Log.Errorf("Error resolving value OID: %v", err) + return + } + + value = e.oidText + + // 1.3.6.1.6.3.1.1.4.1.0 is SNMPv2-MIB::snmpTrapOID.0. + // If v.Name is this oid, set a tag of the trap name. + if v.Name == ".1.3.6.1.6.3.1.1.4.1.0" { + tags["oid"] = val + tags["name"] = e.oidText + tags["mib"] = e.mibName + continue + } + default: + value = v.Value + } + + e, err := s.lookup(v.Name) + if nil != err { + s.Log.Errorf("Error resolving OID: %v", err) + return + } + + name := e.oidText + + fields[name] = value + } + + s.acc.AddFields("snmp_trap", fields, tags, tm) + } +} + +func (s *SnmpTrap) lookup(oid string) (e mibEntry, err error) { + s.cacheLock.Lock() + defer s.cacheLock.Unlock() + var ok bool + if e, ok = s.cache[oid]; !ok { + // cache miss. exec snmptranlate + e, err = s.snmptranslate(oid) + if err == nil { + s.cache[oid] = e + } + return e, err + } + return e, nil +} + +func (s *SnmpTrap) clear() { + s.cacheLock.Lock() + defer s.cacheLock.Unlock() + s.cache = map[string]mibEntry{} +} + +func (s *SnmpTrap) load(oid string, e mibEntry) { + s.cacheLock.Lock() + defer s.cacheLock.Unlock() + s.cache[oid] = e +} + +func (s *SnmpTrap) snmptranslate(oid string) (e mibEntry, err error) { + var out []byte + out, err = s.execCmd(s.Timeout, "snmptranslate", "-Td", "-Ob", "-m", "all", oid) + + if err != nil { + return e, err + } + + scanner := bufio.NewScanner(bytes.NewBuffer(out)) + ok := scanner.Scan() + if err = scanner.Err(); !ok && err != nil { + return e, err + } + + e.oidText = scanner.Text() + + i := strings.Index(e.oidText, "::") + if i == -1 { + return e, fmt.Errorf("not found") + } + e.mibName = e.oidText[:i] + e.oidText = e.oidText[i+2:] + return e, nil +} diff --git a/plugins/inputs/snmp_trap/snmp_trap_test.go b/plugins/inputs/snmp_trap/snmp_trap_test.go new file mode 100644 index 000000000..ed31786d8 --- /dev/null +++ b/plugins/inputs/snmp_trap/snmp_trap_test.go @@ -0,0 +1,222 @@ +package snmp_trap + +import ( + "fmt" + "net" + "strconv" + "testing" + "time" + + "github.com/soniah/gosnmp" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/testutil" + + "github.com/stretchr/testify/require" +) + +func TestLoad(t *testing.T) { + s := &SnmpTrap{} + require.Nil(t, s.Init()) + + defer s.clear() + s.load( + ".1.3.6.1.6.3.1.1.5.1", + mibEntry{ + "SNMPv2-MIB", + "coldStart", + }, + ) + + e, err := s.lookup(".1.3.6.1.6.3.1.1.5.1") + require.NoError(t, err) + require.Equal(t, "SNMPv2-MIB", e.mibName) + require.Equal(t, "coldStart", e.oidText) +} + +func sendTrap(t *testing.T, port uint16) (sentTimestamp uint32) { + s := &gosnmp.GoSNMP{ + Port: port, + Community: "public", + Version: gosnmp.Version2c, + Timeout: time.Duration(2) * time.Second, + Retries: 3, + MaxOids: gosnmp.MaxOids, + Target: "127.0.0.1", + } + + err := s.Connect() + if err != nil { + t.Errorf("Connect() err: %v", err) + } + defer s.Conn.Close() + + // If the first pdu isn't type TimeTicks, gosnmp.SendTrap() will + // prepend one with time.Now(). The time value is part of the + // plugin output so we need to keep track of it and verify it + // later. + now := uint32(time.Now().Unix()) + timePdu := gosnmp.SnmpPDU{ + Name: ".1.3.6.1.2.1.1.3.0", + Type: gosnmp.TimeTicks, + Value: now, + } + + pdu := gosnmp.SnmpPDU{ + Name: ".1.3.6.1.6.3.1.1.4.1.0", // SNMPv2-MIB::snmpTrapOID.0 + Type: gosnmp.ObjectIdentifier, + Value: ".1.3.6.1.6.3.1.1.5.1", // coldStart + } + + trap := gosnmp.SnmpTrap{ + Variables: []gosnmp.SnmpPDU{ + timePdu, + pdu, + }, + } + + _, err = s.SendTrap(trap) + if err != nil { + t.Errorf("SendTrap() err: %v", err) + } + + return now +} + +func TestReceiveTrap(t *testing.T) { + // We would prefer to specify port 0 and let the network stack + // choose an unused port for us but TrapListener doesn't have a + // way to return the autoselected port. Instead, we'll use an + // unusual port and hope it's unused. + const port = 12399 + var fakeTime = time.Now() + + // hook into the trap handler so the test knows when the trap has + // been received + received := make(chan int) + wrap := func(f handler) handler { + return func(p *gosnmp.SnmpPacket, a *net.UDPAddr) { + f(p, a) + received <- 0 + } + } + + // set up the service input plugin + s := &SnmpTrap{ + ServiceAddress: "udp://:" + strconv.Itoa(port), + makeHandlerWrapper: wrap, + timeFunc: func() time.Time { + return fakeTime + }, + Log: testutil.Logger{}, + } + require.Nil(t, s.Init()) + var acc testutil.Accumulator + require.Nil(t, s.Start(&acc)) + defer s.Stop() + + // Preload the cache with the oids we'll use in this test so + // snmptranslate and mibs don't need to be installed. + defer s.clear() + s.load(".1.3.6.1.6.3.1.1.4.1.0", + mibEntry{ + "SNMPv2-MIB", + "snmpTrapOID.0", + }) + s.load(".1.3.6.1.6.3.1.1.5.1", + mibEntry{ + "SNMPv2-MIB", + "coldStart", + }) + s.load(".1.3.6.1.2.1.1.3.0", + mibEntry{ + "UNUSED_MIB_NAME", + "sysUpTimeInstance", + }) + + // send the trap + sentTimestamp := sendTrap(t, port) + + // wait for trap to be received + select { + case <-received: + case <-time.After(2 * time.Second): + t.Fatal("timed out waiting for trap to be received") + } + + // verify plugin output + expected := []telegraf.Metric{ + testutil.MustMetric( + "snmp_trap", // name + map[string]string{ // tags + "oid": ".1.3.6.1.6.3.1.1.5.1", + "name": "coldStart", + "mib": "SNMPv2-MIB", + "version": "2c", + "source": "127.0.0.1", + }, + map[string]interface{}{ // fields + "sysUpTimeInstance": sentTimestamp, + }, + fakeTime, + ), + } + + testutil.RequireMetricsEqual(t, + expected, acc.GetTelegrafMetrics(), + testutil.SortMetrics()) + +} + +func fakeExecCmd(_ internal.Duration, _ string, _ ...string) ([]byte, error) { + return nil, fmt.Errorf("intentional failure") +} + +func TestMissingOid(t *testing.T) { + // should fail even if snmptranslate is installed + const port = 12399 + var fakeTime = time.Now() + + received := make(chan int) + wrap := func(f handler) handler { + return func(p *gosnmp.SnmpPacket, a *net.UDPAddr) { + f(p, a) + received <- 0 + } + } + + s := &SnmpTrap{ + ServiceAddress: "udp://:" + strconv.Itoa(port), + makeHandlerWrapper: wrap, + timeFunc: func() time.Time { + return fakeTime + }, + Log: testutil.Logger{}, + } + require.Nil(t, s.Init()) + var acc testutil.Accumulator + require.Nil(t, s.Start(&acc)) + defer s.Stop() + + // make sure the cache is empty + s.clear() + + // don't call the real snmptranslate + s.execCmd = fakeExecCmd + + _ = sendTrap(t, port) + + select { + case <-received: + case <-time.After(2 * time.Second): + t.Fatal("timed out waiting for trap to be received") + } + + // oid lookup should fail so we shouldn't get a metric + expected := []telegraf.Metric{} + + testutil.RequireMetricsEqual(t, + expected, acc.GetTelegrafMetrics(), + testutil.SortMetrics()) +} From 6d94798fd66b102e58414beebce303d5b9414547 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 25 Nov 2019 11:58:19 -0800 Subject: [PATCH 1346/1815] Update changelog and readme --- CHANGELOG.md | 1 + README.md | 1 + 2 files changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5be0e232e..294763c38 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,7 @@ - [azure_storage_queue](/plugins/inputs/azure_storage_queue/README.md) - Contributed by @mjiderhamn - [ethtool](/plugins/inputs/ethtool/README.md) - Contributed by @philippreston +- [snmp_trap](/plugins/inputs/snmp_trap/README.md) - Contributed by @influxdata - [suricata](/plugins/inputs/suricata/README.md) - Contributed by @satta - [synproxy](/plugins/inputs/synproxy/README.md) - Contributed by @rfrenayworldstream diff --git a/README.md b/README.md index a207ecd32..8fa869ca0 100644 --- a/README.md +++ b/README.md @@ -272,6 +272,7 @@ For documentation on the latest development code see the [documentation index][d * [smart](./plugins/inputs/smart) * [snmp_legacy](./plugins/inputs/snmp_legacy) * [snmp](./plugins/inputs/snmp) +* [snmp_trap](./plugins/inputs/snmp_trap) * [socket_listener](./plugins/inputs/socket_listener) * [solr](./plugins/inputs/solr) * [sql server](./plugins/inputs/sqlserver) (microsoft) From cbe7d33bd4d8b243975354668c1393609d62112a Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 25 Nov 2019 15:31:22 -0800 Subject: [PATCH 1347/1815] Add SReclaimable and SUnreclaim to mem input (#6716) --- Gopkg.toml | 2 +- plugins/inputs/mem/README.md | 4 +++- plugins/inputs/mem/memory.go | 2 ++ plugins/inputs/mem/memory_test.go | 4 ++++ 4 files changed, 10 insertions(+), 2 deletions(-) diff --git a/Gopkg.toml b/Gopkg.toml index e75e0d843..c6e510641 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -144,7 +144,7 @@ [[constraint]] name = "github.com/shirou/gopsutil" - version = "2.18.12" + version = "2.19.7" [[constraint]] name = "github.com/Shopify/sarama" diff --git a/plugins/inputs/mem/README.md b/plugins/inputs/mem/README.md index 842546825..87280d8d2 100644 --- a/plugins/inputs/mem/README.md +++ b/plugins/inputs/mem/README.md @@ -43,6 +43,8 @@ Available fields are dependent on platform. - mapped (integer) - page_tables (integer) - shared (integer) + - sreclaimable (integer) + - sunreclaim (integer) - swap_cached (integer) - swap_free (integer) - swap_total (integer) @@ -54,5 +56,5 @@ Available fields are dependent on platform. ### Example Output: ``` -mem active=11347566592i,available=18705133568i,available_percent=89.4288960571006,buffered=1976709120i,cached=13975572480i,commit_limit=14753067008i,committed_as=2872422400i,dirty=87461888i,free=1352400896i,high_free=0i,high_total=0i,huge_page_size=2097152i,huge_pages_free=0i,huge_pages_total=0i,inactive=6201593856i,low_free=0i,low_total=0i,mapped=310427648i,page_tables=14397440i,shared=200781824i,slab=1937526784i,swap_cached=0i,swap_free=4294963200i,swap_total=4294963200i,total=20916207616i,used=3611525120i,used_percent=17.26663449848977,vmalloc_chunk=0i,vmalloc_total=35184372087808i,vmalloc_used=0i,wired=0i,write_back=0i,write_back_tmp=0i 1536704085000000000 +mem active=9299595264i,available=16818249728i,available_percent=80.41654254645131,buffered=2383761408i,cached=13316689920i,commit_limit=14751920128i,committed_as=11781156864i,dirty=122880i,free=1877688320i,high_free=0i,high_total=0i,huge_page_size=2097152i,huge_pages_free=0i,huge_pages_total=0i,inactive=7549939712i,low_free=0i,low_total=0i,mapped=416763904i,page_tables=19787776i,shared=670679040i,slab=2081071104i,sreclaimable=1923395584i,sunreclaim=157675520i,swap_cached=1302528i,swap_free=4286128128i,swap_total=4294963200i,total=20913917952i,used=3335778304i,used_percent=15.95004011996231,vmalloc_chunk=0i,vmalloc_total=35184372087808i,vmalloc_used=0i,wired=0i,write_back=0i,write_back_tmp=0i 1574712869000000000 ``` diff --git a/plugins/inputs/mem/memory.go b/plugins/inputs/mem/memory.go index a7d887cbe..daae390b8 100644 --- a/plugins/inputs/mem/memory.go +++ b/plugins/inputs/mem/memory.go @@ -50,6 +50,8 @@ func (s *MemStats) Gather(acc telegraf.Accumulator) error { "mapped": vm.Mapped, "page_tables": vm.PageTables, "shared": vm.Shared, + "sreclaimable": vm.SReclaimable, + "sunreclaim": vm.SUnreclaim, "swap_cached": vm.SwapCached, "swap_free": vm.SwapFree, "swap_total": vm.SwapTotal, diff --git a/plugins/inputs/mem/memory_test.go b/plugins/inputs/mem/memory_test.go index 06f2f6ea9..653010fa8 100644 --- a/plugins/inputs/mem/memory_test.go +++ b/plugins/inputs/mem/memory_test.go @@ -40,6 +40,8 @@ func TestMemStats(t *testing.T) { Mapped: 42236, PageTables: 1236, Shared: 0, + SReclaimable: 1923022848, + SUnreclaim: 157728768, SwapCached: 0, SwapFree: 524280, SwapTotal: 524280, @@ -81,6 +83,8 @@ func TestMemStats(t *testing.T) { "mapped": uint64(42236), "page_tables": uint64(1236), "shared": uint64(0), + "sreclaimable": uint64(1923022848), + "sunreclaim": uint64(157728768), "swap_cached": uint64(0), "swap_free": uint64(524280), "swap_total": uint64(524280), From c16b760a264906dce8b4a7c1d8e1ee6fd41c756c Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 25 Nov 2019 15:36:12 -0800 Subject: [PATCH 1348/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 294763c38..fac354ab0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -49,6 +49,7 @@ - [#5767](https://github.com/influxdata/telegraf/pull/5767): Add prometheus metric_version=2 mapping to internal metrics/line protocol. - [#6660](https://github.com/influxdata/telegraf/pull/6660): Add content_encoding compression support to socket_listener. - [#6689](https://github.com/influxdata/telegraf/pull/6689): Add high resolution metrics support to CloudWatch output. +- [#6716](https://github.com/influxdata/telegraf/pull/6716): Add SReclaimable and SUnreclaim to mem input. #### Bugfixes From c53d53826da8ba118cc672a59a46e7c6dc7a411a Mon Sep 17 00:00:00 2001 From: Jonathan Negrin Date: Tue, 26 Nov 2019 00:38:57 +0100 Subject: [PATCH 1349/1815] Allow multiple certificates per file in x509_cert input (#6695) --- plugins/inputs/x509_cert/x509_cert.go | 27 ++++++++++++++-------- plugins/inputs/x509_cert/x509_cert_test.go | 8 +++++++ 2 files changed, 25 insertions(+), 10 deletions(-) diff --git a/plugins/inputs/x509_cert/x509_cert.go b/plugins/inputs/x509_cert/x509_cert.go index 825fd5eeb..cd136ae4b 100644 --- a/plugins/inputs/x509_cert/x509_cert.go +++ b/plugins/inputs/x509_cert/x509_cert.go @@ -2,6 +2,7 @@ package x509_cert import ( + "bytes" "crypto/tls" "crypto/x509" "crypto/x509/pkix" @@ -96,18 +97,24 @@ func (c *X509Cert) getCert(u *url.URL, timeout time.Duration) ([]*x509.Certifica if err != nil { return nil, err } + var certs []*x509.Certificate + for { + block, rest := pem.Decode(bytes.TrimSpace(content)) + if block == nil { + return nil, fmt.Errorf("failed to parse certificate PEM") + } - block, _ := pem.Decode(content) - if block == nil { - return nil, fmt.Errorf("failed to parse certificate PEM") + cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return nil, err + } + certs = append(certs, cert) + if rest == nil || len(rest) == 0 { + break + } + content = rest } - - cert, err := x509.ParseCertificate(block.Bytes) - if err != nil { - return nil, err - } - - return []*x509.Certificate{cert}, nil + return certs, nil default: return nil, fmt.Errorf("unsuported scheme '%s' in location %s", u.Scheme, u.String()) } diff --git a/plugins/inputs/x509_cert/x509_cert_test.go b/plugins/inputs/x509_cert/x509_cert_test.go index 188b510d2..21c110bbf 100644 --- a/plugins/inputs/x509_cert/x509_cert_test.go +++ b/plugins/inputs/x509_cert/x509_cert_test.go @@ -141,6 +141,14 @@ func TestGatherLocal(t *testing.T) { {name: "not a certificate", mode: 0640, content: "test", error: true}, {name: "wrong certificate", mode: 0640, content: wrongCert, error: true}, {name: "correct certificate", mode: 0640, content: pki.ReadServerCert()}, + {name: "correct certificate and extra trailing space", mode: 0640, content: pki.ReadServerCert() + " "}, + {name: "correct certificate and extra leading space", mode: 0640, content: " " + pki.ReadServerCert()}, + {name: "correct multiple certificates", mode: 0640, content: pki.ReadServerCert() + pki.ReadCACert()}, + {name: "correct certificate and wrong certificate", mode: 0640, content: pki.ReadServerCert() + "\n" + wrongCert, error: true}, + {name: "correct certificate and not a certificate", mode: 0640, content: pki.ReadServerCert() + "\ntest", error: true}, + {name: "correct multiple certificates and extra trailing space", mode: 0640, content: pki.ReadServerCert() + pki.ReadServerCert() + " "}, + {name: "correct multiple certificates and extra leading space", mode: 0640, content: " " + pki.ReadServerCert() + pki.ReadServerCert()}, + {name: "correct multiple certificates and extra middle space", mode: 0640, content: pki.ReadServerCert() + " " + pki.ReadServerCert()}, } for _, test := range tests { From b8d3f896cee29a40645b0723bc9772c6941cc7d3 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 25 Nov 2019 15:39:36 -0800 Subject: [PATCH 1350/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index fac354ab0..f5ce66409 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -50,6 +50,7 @@ - [#6660](https://github.com/influxdata/telegraf/pull/6660): Add content_encoding compression support to socket_listener. - [#6689](https://github.com/influxdata/telegraf/pull/6689): Add high resolution metrics support to CloudWatch output. - [#6716](https://github.com/influxdata/telegraf/pull/6716): Add SReclaimable and SUnreclaim to mem input. +- [#6695](https://github.com/influxdata/telegraf/pull/6695): Allow multiple certificates per file in x509_cert input. #### Bugfixes From e061376846662390e62816874aca537ef9e884ce Mon Sep 17 00:00:00 2001 From: Samantha Wang <32681364+sjwang90@users.noreply.github.com> Date: Tue, 26 Nov 2019 08:54:39 -0800 Subject: [PATCH 1351/1815] docs(readme): Update to Amazon ECS Update ECS to amazon ECS --- plugins/inputs/ecs/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/inputs/ecs/README.md b/plugins/inputs/ecs/README.md index f391a6b9c..f23eb8bab 100644 --- a/plugins/inputs/ecs/README.md +++ b/plugins/inputs/ecs/README.md @@ -1,6 +1,6 @@ -# ECS Input Plugin +# Amazon ECS Input Plugin -ECS, Fargate compatible, input plugin which uses the [ECS v2 metadata and +Amazon ECS, Fargate compatible, input plugin which uses the [Amazon ECS v2 metadata and stats API][task-metadata-endpoint-v2] endpoints to gather stats on running containers in a Task. From 6eb21978e6622aacc8e2c47c86b1f05a973654ca Mon Sep 17 00:00:00 2001 From: Yong Wen Chua Date: Wed, 27 Nov 2019 02:04:55 +0800 Subject: [PATCH 1352/1815] Add additional tags for x509 Input Plugin (#6686) --- plugins/inputs/x509_cert/README.md | 6 +++ plugins/inputs/x509_cert/x509_cert.go | 44 +++++++++++------ plugins/inputs/x509_cert/x509_cert_test.go | 56 ++++++++++++++++++++++ 3 files changed, 91 insertions(+), 15 deletions(-) diff --git a/plugins/inputs/x509_cert/README.md b/plugins/inputs/x509_cert/README.md index 450dd3d10..b302d4992 100644 --- a/plugins/inputs/x509_cert/README.md +++ b/plugins/inputs/x509_cert/README.md @@ -33,6 +33,12 @@ file or network connection. - province - locality - verification + - serial_number + - signature_algorithm + - public_key_algorithm + - issuer_common_name + - issuer_serial_number + - san - fields: - verification_code (int) - verification_error (string) diff --git a/plugins/inputs/x509_cert/x509_cert.go b/plugins/inputs/x509_cert/x509_cert.go index cd136ae4b..ad47db663 100644 --- a/plugins/inputs/x509_cert/x509_cert.go +++ b/plugins/inputs/x509_cert/x509_cert.go @@ -5,7 +5,6 @@ import ( "bytes" "crypto/tls" "crypto/x509" - "crypto/x509/pkix" "encoding/pem" "fmt" "io/ioutil" @@ -136,28 +135,43 @@ func getFields(cert *x509.Certificate, now time.Time) map[string]interface{} { return fields } -func getTags(subject pkix.Name, location string) map[string]string { +func getTags(cert *x509.Certificate, location string) map[string]string { tags := map[string]string{ - "source": location, - "common_name": subject.CommonName, + "source": location, + "common_name": cert.Subject.CommonName, + "serial_number": cert.SerialNumber.Text(16), + "signature_algorithm": cert.SignatureAlgorithm.String(), + "public_key_algorithm": cert.PublicKeyAlgorithm.String(), } - if len(subject.Organization) > 0 { - tags["organization"] = subject.Organization[0] + if len(cert.Subject.Organization) > 0 { + tags["organization"] = cert.Subject.Organization[0] } - if len(subject.OrganizationalUnit) > 0 { - tags["organizational_unit"] = subject.OrganizationalUnit[0] + if len(cert.Subject.OrganizationalUnit) > 0 { + tags["organizational_unit"] = cert.Subject.OrganizationalUnit[0] } - if len(subject.Country) > 0 { - tags["country"] = subject.Country[0] + if len(cert.Subject.Country) > 0 { + tags["country"] = cert.Subject.Country[0] } - if len(subject.Province) > 0 { - tags["province"] = subject.Province[0] + if len(cert.Subject.Province) > 0 { + tags["province"] = cert.Subject.Province[0] } - if len(subject.Locality) > 0 { - tags["locality"] = subject.Locality[0] + if len(cert.Subject.Locality) > 0 { + tags["locality"] = cert.Subject.Locality[0] } + tags["issuer_common_name"] = cert.Issuer.CommonName + tags["issuer_serial_number"] = cert.Issuer.SerialNumber + + san := append(cert.DNSNames, cert.EmailAddresses...) + for _, ip := range cert.IPAddresses { + san = append(san, ip.String()) + } + for _, uri := range cert.URIs { + san = append(san, uri.String()) + } + tags["san"] = strings.Join(san, ",") + return tags } @@ -179,7 +193,7 @@ func (c *X509Cert) Gather(acc telegraf.Accumulator) error { for i, cert := range certs { fields := getFields(cert, now) - tags := getTags(cert.Subject, location) + tags := getTags(cert, location) // The first certificate is the leaf/end-entity certificate which needs DNS // name validation against the URL hostname. diff --git a/plugins/inputs/x509_cert/x509_cert_test.go b/plugins/inputs/x509_cert/x509_cert_test.go index 21c110bbf..48559ca6a 100644 --- a/plugins/inputs/x509_cert/x509_cert_test.go +++ b/plugins/inputs/x509_cert/x509_cert_test.go @@ -5,6 +5,7 @@ import ( "encoding/base64" "fmt" "io/ioutil" + "math/big" "os" "testing" "time" @@ -195,6 +196,61 @@ func TestGatherLocal(t *testing.T) { } } +func TestTags(t *testing.T) { + cert := fmt.Sprintf("%s\n%s", pki.ReadServerCert(), pki.ReadCACert()) + + f, err := ioutil.TempFile("", "x509_cert") + if err != nil { + t.Fatal(err) + } + + _, err = f.Write([]byte(cert)) + if err != nil { + t.Fatal(err) + } + + err = f.Close() + if err != nil { + t.Fatal(err) + } + + defer os.Remove(f.Name()) + + sc := X509Cert{ + Sources: []string{f.Name()}, + } + sc.Init() + + acc := testutil.Accumulator{} + err = sc.Gather(&acc) + require.NoError(t, err) + + assert.True(t, acc.HasMeasurement("x509_cert")) + + assert.True(t, acc.HasTag("x509_cert", "common_name")) + assert.Equal(t, "server.localdomain", acc.TagValue("x509_cert", "common_name")) + + assert.True(t, acc.HasTag("x509_cert", "signature_algorithm")) + assert.Equal(t, "SHA256-RSA", acc.TagValue("x509_cert", "signature_algorithm")) + + assert.True(t, acc.HasTag("x509_cert", "public_key_algorithm")) + assert.Equal(t, "RSA", acc.TagValue("x509_cert", "public_key_algorithm")) + + assert.True(t, acc.HasTag("x509_cert", "issuer_common_name")) + assert.Equal(t, "Telegraf Test CA", acc.TagValue("x509_cert", "issuer_common_name")) + + assert.True(t, acc.HasTag("x509_cert", "san")) + assert.Equal(t, "localhost,127.0.0.1", acc.TagValue("x509_cert", "san")) + + assert.True(t, acc.HasTag("x509_cert", "serial_number")) + serialNumber := new(big.Int) + _, validSerialNumber := serialNumber.SetString(acc.TagValue("x509_cert", "serial_number"), 16) + if !validSerialNumber { + t.Errorf("Expected a valid Hex serial number but got %s", acc.TagValue("x509_cert", "serial_number")) + } + assert.Equal(t, big.NewInt(1), serialNumber) +} + func TestGatherChain(t *testing.T) { cert := fmt.Sprintf("%s\n%s", pki.ReadServerCert(), pki.ReadCACert()) From a1424d78baad381afcb4599400039e066c125776 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 26 Nov 2019 10:06:32 -0800 Subject: [PATCH 1353/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index f5ce66409..d891d621d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -51,6 +51,7 @@ - [#6689](https://github.com/influxdata/telegraf/pull/6689): Add high resolution metrics support to CloudWatch output. - [#6716](https://github.com/influxdata/telegraf/pull/6716): Add SReclaimable and SUnreclaim to mem input. - [#6695](https://github.com/influxdata/telegraf/pull/6695): Allow multiple certificates per file in x509_cert input. +- [#6686](https://github.com/influxdata/telegraf/pull/6686): Add additional tags to the x509 input. #### Bugfixes From 8f71bbaa48fb447892031e81be79e2422b603d16 Mon Sep 17 00:00:00 2001 From: Eric Kincl Date: Tue, 26 Nov 2019 15:36:53 -0800 Subject: [PATCH 1354/1815] Add link to VMWare vCenter Converter API Reference (#6719) --- plugins/inputs/vsphere/METRICS.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/plugins/inputs/vsphere/METRICS.md b/plugins/inputs/vsphere/METRICS.md index 0b9e0482f..d1a34bb26 100644 --- a/plugins/inputs/vsphere/METRICS.md +++ b/plugins/inputs/vsphere/METRICS.md @@ -4,6 +4,8 @@ and the set of available metrics may vary depending hardware, as well as what pl are installed. Therefore, providing a definitive list of available metrics is difficult. The metrics listed below are the most commonly available as of vSphere 6.5. +For a complete list of metrics available from vSphere and the units they measure in, please reference the [VMWare vCenter Converter API Reference](https://www.vmware.com/support/developer/converter-sdk/conv60_apireference/vim.PerformanceManager.html). + To list the exact set in your environment, please use the govc tool available [here](https://github.com/vmware/govmomi/tree/master/govc) To obtain the set of metrics for e.g. a VM, you may use the following command: @@ -284,4 +286,4 @@ disk.capacity.latest disk.capacity.contention.average disk.capacity.provisioned.average disk.capacity.usage.average -``` \ No newline at end of file +``` From 80c5edd48e179fe59e25b375e2410120300fa5fd Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 26 Nov 2019 15:46:31 -0800 Subject: [PATCH 1355/1815] Add prometheus serializer and use it in prometheus output (#6703) --- Gopkg.lock | 1 + docs/DATA_FORMATS_OUTPUT.md | 7 +- internal/http.go | 47 + plugins/inputs/file/file.go | 4 +- plugins/inputs/prometheus/README.md | 10 +- plugins/inputs/prometheus/prometheus.go | 23 +- plugins/outputs/file/README.md | 5 + plugins/outputs/file/file.go | 30 +- plugins/outputs/prometheus_client/README.md | 13 +- .../prometheus_client/prometheus_client.go | 591 +++--------- .../prometheus_client_test.go | 909 +++++------------- .../outputs/prometheus_client/v1/collector.go | 391 ++++++++ .../outputs/prometheus_client/v2/collector.go | 87 ++ plugins/serializers/prometheus/README.md | 68 ++ plugins/serializers/prometheus/collection.go | 464 +++++++++ .../serializers/prometheus/collection_test.go | 116 +++ plugins/serializers/prometheus/convert.go | 175 ++++ plugins/serializers/prometheus/prometheus.go | 69 ++ .../serializers/prometheus/prometheus_test.go | 589 ++++++++++++ plugins/serializers/registry.go | 61 +- 20 files changed, 2516 insertions(+), 1144 deletions(-) create mode 100644 plugins/outputs/prometheus_client/v1/collector.go create mode 100644 plugins/outputs/prometheus_client/v2/collector.go create mode 100644 plugins/serializers/prometheus/README.md create mode 100644 plugins/serializers/prometheus/collection.go create mode 100644 plugins/serializers/prometheus/collection_test.go create mode 100644 plugins/serializers/prometheus/convert.go create mode 100644 plugins/serializers/prometheus/prometheus.go create mode 100644 plugins/serializers/prometheus/prometheus_test.go diff --git a/Gopkg.lock b/Gopkg.lock index fa0c2f4c7..3eb640780 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -1750,6 +1750,7 @@ "github.com/go-sql-driver/mysql", "github.com/gobwas/glob", "github.com/gofrs/uuid", + "github.com/gogo/protobuf/proto", "github.com/golang/protobuf/proto", "github.com/golang/protobuf/ptypes/duration", "github.com/golang/protobuf/ptypes/empty", diff --git a/docs/DATA_FORMATS_OUTPUT.md b/docs/DATA_FORMATS_OUTPUT.md index f3ac028b9..a8650b250 100644 --- a/docs/DATA_FORMATS_OUTPUT.md +++ b/docs/DATA_FORMATS_OUTPUT.md @@ -5,10 +5,11 @@ standard data formats that may be selected from when configuring many output plugins. 1. [InfluxDB Line Protocol](/plugins/serializers/influx) -1. [JSON](/plugins/serializers/json) -1. [Graphite](/plugins/serializers/graphite) -1. [SplunkMetric](/plugins/serializers/splunkmetric) 1. [Carbon2](/plugins/serializers/carbon2) +1. [Graphite](/plugins/serializers/graphite) +1. [JSON](/plugins/serializers/json) +1. [Prometheus](/plugins/serializers/prometheus) +1. [SplunkMetric](/plugins/serializers/splunkmetric) 1. [Wavefront](/plugins/serializers/wavefront) You will be able to identify the plugins with support by the presence of a diff --git a/internal/http.go b/internal/http.go index 230fdf2b7..7ffd9bf2b 100644 --- a/internal/http.go +++ b/internal/http.go @@ -2,6 +2,7 @@ package internal import ( "crypto/subtle" + "net" "net/http" ) @@ -43,3 +44,49 @@ func (h *basicAuthHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) h.next.ServeHTTP(rw, req) } + +// IPRangeHandler returns a http handler that requires the remote address to be +// in the specified network. +func IPRangeHandler(network []*net.IPNet, onError ErrorFunc) func(h http.Handler) http.Handler { + return func(h http.Handler) http.Handler { + return &ipRangeHandler{ + network: network, + onError: onError, + next: h, + } + } +} + +type ipRangeHandler struct { + network []*net.IPNet + onError ErrorFunc + next http.Handler +} + +func (h *ipRangeHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) { + if len(h.network) == 0 { + h.next.ServeHTTP(rw, req) + return + } + + remoteIPString, _, err := net.SplitHostPort(req.RemoteAddr) + if err != nil { + h.onError(rw, http.StatusForbidden) + return + } + + remoteIP := net.ParseIP(remoteIPString) + if remoteIP == nil { + h.onError(rw, http.StatusForbidden) + return + } + + for _, net := range h.network { + if net.Contains(remoteIP) { + h.next.ServeHTTP(rw, req) + return + } + } + + h.onError(rw, http.StatusForbidden) +} diff --git a/plugins/inputs/file/file.go b/plugins/inputs/file/file.go index c601d4875..860595283 100644 --- a/plugins/inputs/file/file.go +++ b/plugins/inputs/file/file.go @@ -33,8 +33,8 @@ const sampleConfig = ` ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "influx" - - ## Name a tag containing the name of the file the data was parsed from. Leave empty + + ## Name a tag containing the name of the file the data was parsed from. Leave empty ## to disable. # file_tag = "" ` diff --git a/plugins/inputs/prometheus/README.md b/plugins/inputs/prometheus/README.md index 4163e068e..7b2e054a2 100644 --- a/plugins/inputs/prometheus/README.md +++ b/plugins/inputs/prometheus/README.md @@ -11,8 +11,14 @@ in Prometheus format. ## An array of urls to scrape metrics from. urls = ["http://localhost:9100/metrics"] - ## Metric version (optional, default=1, supported values are 1 and 2) - # metric_version = 2 + ## Metric version controls the mapping from Prometheus metrics into + ## Telegraf metrics. When using the prometheus_client output, use the same + ## value in both plugins to ensure metrics are round-tripped without + ## modification. + ## + ## example: metric_version = 1; deprecated in 1.13 + ## metric_version = 2; recommended version + # metric_version = 1 ## An array of Kubernetes services to scrape metrics from. # kubernetes_services = ["http://my-service-dns.my-namespace:9100/metrics"] diff --git a/plugins/inputs/prometheus/prometheus.go b/plugins/inputs/prometheus/prometheus.go index c59d92021..340736c98 100644 --- a/plugins/inputs/prometheus/prometheus.go +++ b/plugins/inputs/prometheus/prometheus.go @@ -62,8 +62,14 @@ var sampleConfig = ` ## An array of urls to scrape metrics from. urls = ["http://localhost:9100/metrics"] - ## Metric version (optional, default=1, supported values are 1 and 2) - # metric_version = 2 + ## Metric version controls the mapping from Prometheus metrics into + ## Telegraf metrics. When using the prometheus_client output, use the same + ## value in both plugins to ensure metrics are round-tripped without + ## modification. + ## + ## example: metric_version = 1; deprecated in 1.13 + ## metric_version = 2; recommended version + # metric_version = 1 ## Url tag name (tag containing scrapped url. optional, default is "url") # url_tag = "scrapeUrl" @@ -95,7 +101,7 @@ var sampleConfig = ` # username = "" # password = "" - ## Specify timeout duration for slower prometheus clients (default is 3s) + ## Specify timeout duration for slower prometheus clients (default is 3s) # response_timeout = "3s" ## Optional TLS Config @@ -114,6 +120,13 @@ func (p *Prometheus) Description() string { return "Read metrics from one or many prometheus clients" } +func (p *Prometheus) Init() error { + if p.MetricVersion != 2 { + p.Log.Warnf("Use of deprecated configuration: 'metric_version = 1'; please update to 'metric_version = 2'") + } + return nil +} + var ErrProtocolError = errors.New("prometheus protocol error") func (p *Prometheus) AddressToURL(u *url.URL, address string) *url.URL { @@ -311,7 +324,9 @@ func (p *Prometheus) gatherURL(u URLAndAddress, acc telegraf.Accumulator) error tags := metric.Tags() // strip user and password from URL u.OriginalURL.User = nil - tags[p.URLTag] = u.OriginalURL.String() + if p.URLTag != "" { + tags[p.URLTag] = u.OriginalURL.String() + } if u.Address != "" { tags["address"] = u.Address } diff --git a/plugins/outputs/file/README.md b/plugins/outputs/file/README.md index e34f80807..350633c56 100644 --- a/plugins/outputs/file/README.md +++ b/plugins/outputs/file/README.md @@ -9,6 +9,11 @@ This plugin writes telegraf metrics to files ## Files to write to, "stdout" is a specially handled file. files = ["stdout", "/tmp/metrics.out"] + ## Use batch serialization format instead of line based delimiting. The + ## batch format allows for the production of non line based output formats and + ## may more effiently encode and write metrics. + # use_batch_format = false + ## The file will be rotated after the time interval specified. When set ## to 0 no time based rotation is performed. # rotation_interval = "0h" diff --git a/plugins/outputs/file/file.go b/plugins/outputs/file/file.go index 11793bc4f..12d70d8f3 100644 --- a/plugins/outputs/file/file.go +++ b/plugins/outputs/file/file.go @@ -3,7 +3,6 @@ package file import ( "fmt" "io" - "log" "os" "github.com/influxdata/telegraf" @@ -18,6 +17,8 @@ type File struct { RotationInterval internal.Duration `toml:"rotation_interval"` RotationMaxSize internal.Size `toml:"rotation_max_size"` RotationMaxArchives int `toml:"rotation_max_archives"` + UseBatchFormat bool `toml:"use_batch_format"` + Log telegraf.Logger `toml:"-"` writer io.Writer closers []io.Closer @@ -28,6 +29,11 @@ var sampleConfig = ` ## Files to write to, "stdout" is a specially handled file. files = ["stdout", "/tmp/metrics.out"] + ## Use batch serialization format instead of line based delimiting. The + ## batch format allows for the production of non line based output formats and + ## may more effiently encode metric groups. + # use_batch_format = false + ## The file will be rotated after the time interval specified. When set ## to 0 no time based rotation is performed. # rotation_interval = "0d" @@ -98,15 +104,27 @@ func (f *File) Description() string { func (f *File) Write(metrics []telegraf.Metric) error { var writeErr error = nil - for _, metric := range metrics { - b, err := f.serializer.Serialize(metric) + if f.UseBatchFormat { + octets, err := f.serializer.SerializeBatch(metrics) if err != nil { - log.Printf("D! [outputs.file] Could not serialize metric: %v", err) + f.Log.Errorf("Could not serialize metric: %v", err) } - _, err = f.writer.Write(b) + _, err = f.writer.Write(octets) if err != nil { - writeErr = fmt.Errorf("E! [outputs.file] failed to write message: %v", err) + f.Log.Errorf("Error writing to file: %v", err) + } + } else { + for _, metric := range metrics { + b, err := f.serializer.Serialize(metric) + if err != nil { + f.Log.Debugf("Could not serialize metric: %v", err) + } + + _, err = f.writer.Write(b) + if err != nil { + writeErr = fmt.Errorf("E! [outputs.file] failed to write message: %v", err) + } } } diff --git a/plugins/outputs/prometheus_client/README.md b/plugins/outputs/prometheus_client/README.md index 49030bb3c..7d4fe09b1 100644 --- a/plugins/outputs/prometheus_client/README.md +++ b/plugins/outputs/prometheus_client/README.md @@ -1,6 +1,7 @@ -# Prometheus Client Service Output Plugin +# Prometheus Output Plugin -This plugin starts a [Prometheus](https://prometheus.io/) Client, it exposes all metrics on `/metrics` (default) to be polled by a Prometheus server. +This plugin starts a [Prometheus](https://prometheus.io/) Client, it exposes +all metrics on `/metrics` (default) to be polled by a Prometheus server. ## Configuration @@ -10,6 +11,14 @@ This plugin starts a [Prometheus](https://prometheus.io/) Client, it exposes all ## Address to listen on. listen = ":9273" + ## Metric version controls the mapping from Telegraf metrics into + ## Prometheus format. When using the prometheus input, use the same value in + ## both plugins to ensure metrics are round-tripped without modification. + ## + ## example: metric_version = 1; deprecated in 1.13 + ## metric_version = 2; recommended version + # metric_version = 1 + ## Use HTTP Basic Authentication. # basic_username = "Foo" # basic_password = "Bar" diff --git a/plugins/outputs/prometheus_client/prometheus_client.go b/plugins/outputs/prometheus_client/prometheus_client.go index 32dcdbb89..afdf7e107 100644 --- a/plugins/outputs/prometheus_client/prometheus_client.go +++ b/plugins/outputs/prometheus_client/prometheus_client.go @@ -1,18 +1,12 @@ -package prometheus_client +package prometheus import ( "context" - "crypto/subtle" "crypto/tls" "fmt" - "log" "net" "net/http" "net/url" - "regexp" - "sort" - "strconv" - "strings" "sync" "time" @@ -20,73 +14,30 @@ import ( "github.com/influxdata/telegraf/internal" tlsint "github.com/influxdata/telegraf/internal/tls" "github.com/influxdata/telegraf/plugins/outputs" + "github.com/influxdata/telegraf/plugins/outputs/prometheus_client/v1" + "github.com/influxdata/telegraf/plugins/outputs/prometheus_client/v2" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" ) var ( - invalidNameCharRE = regexp.MustCompile(`[^a-zA-Z0-9_:]`) - validNameCharRE = regexp.MustCompile(`^[a-zA-Z_][a-zA-Z0-9_]*`) + defaultListen = ":9273" + defaultPath = "/metrics" + defaultExpirationInterval = internal.Duration{Duration: 60 * time.Second} ) -// SampleID uniquely identifies a Sample -type SampleID string - -// Sample represents the current value of a series. -type Sample struct { - // Labels are the Prometheus labels. - Labels map[string]string - // Value is the value in the Prometheus output. Only one of these will populated. - Value float64 - HistogramValue map[float64]uint64 - SummaryValue map[float64]float64 - // Histograms and Summaries need a count and a sum - Count uint64 - Sum float64 - // Metric timestamp - Timestamp time.Time - // Expiration is the deadline that this Sample is valid until. - Expiration time.Time -} - -// MetricFamily contains the data required to build valid prometheus Metrics. -type MetricFamily struct { - // Samples are the Sample belonging to this MetricFamily. - Samples map[SampleID]*Sample - // Need the telegraf ValueType because there isn't a Prometheus ValueType - // representing Histogram or Summary - TelegrafValueType telegraf.ValueType - // LabelSet is the label counts for all Samples. - LabelSet map[string]int -} - -type PrometheusClient struct { - Listen string - BasicUsername string `toml:"basic_username"` - BasicPassword string `toml:"basic_password"` - IPRange []string `toml:"ip_range"` - ExpirationInterval internal.Duration `toml:"expiration_interval"` - Path string `toml:"path"` - CollectorsExclude []string `toml:"collectors_exclude"` - StringAsLabel bool `toml:"string_as_label"` - ExportTimestamp bool `toml:"export_timestamp"` - - tlsint.ServerConfig - - server *http.Server - url string - - sync.Mutex - // fam is the non-expired MetricFamily by Prometheus metric name. - fam map[string]*MetricFamily - // now returns the current time. - now func() time.Time -} - var sampleConfig = ` ## Address to listen on listen = ":9273" + ## Metric version controls the mapping from Telegraf metrics into + ## Prometheus format. When using the prometheus input, use the same value in + ## both plugins to ensure metrics are round-tripped without modification. + ## + ## example: metric_version = 1; deprecated in 1.13 + ## metric_version = 2; recommended version + # metric_version = 1 + ## Use HTTP Basic Authentication. # basic_username = "Foo" # basic_password = "Bar" @@ -121,46 +72,42 @@ var sampleConfig = ` # export_timestamp = false ` -func (p *PrometheusClient) auth(h http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if p.BasicUsername != "" && p.BasicPassword != "" { - w.Header().Set("WWW-Authenticate", `Basic realm="Restricted"`) - - username, password, ok := r.BasicAuth() - if !ok || - subtle.ConstantTimeCompare([]byte(username), []byte(p.BasicUsername)) != 1 || - subtle.ConstantTimeCompare([]byte(password), []byte(p.BasicPassword)) != 1 { - http.Error(w, "Not authorized", 401) - return - } - } - - if len(p.IPRange) > 0 { - matched := false - remoteIPs, _, _ := net.SplitHostPort(r.RemoteAddr) - remoteIP := net.ParseIP(remoteIPs) - for _, iprange := range p.IPRange { - _, ipNet, err := net.ParseCIDR(iprange) - if err != nil { - http.Error(w, "Config Error in ip_range setting", 500) - return - } - if ipNet.Contains(remoteIP) { - matched = true - break - } - } - if !matched { - http.Error(w, "Not authorized", 401) - return - } - } - - h.ServeHTTP(w, r) - }) +type Collector interface { + Describe(ch chan<- *prometheus.Desc) + Collect(ch chan<- prometheus.Metric) + Add(metrics []telegraf.Metric) error } -func (p *PrometheusClient) Connect() error { +type PrometheusClient struct { + Listen string `toml:"listen"` + MetricVersion int `toml:"metric_version"` + BasicUsername string `toml:"basic_username"` + BasicPassword string `toml:"basic_password"` + IPRange []string `toml:"ip_range"` + ExpirationInterval internal.Duration `toml:"expiration_interval"` + Path string `toml:"path"` + CollectorsExclude []string `toml:"collectors_exclude"` + StringAsLabel bool `toml:"string_as_label"` + ExportTimestamp bool `toml:"export_timestamp"` + tlsint.ServerConfig + + Log telegraf.Logger `toml:"-"` + + server *http.Server + url *url.URL + collector Collector + wg sync.WaitGroup +} + +func (p *PrometheusClient) Description() string { + return "Configuration for the Prometheus client to spawn" +} + +func (p *PrometheusClient) SampleConfig() string { + return sampleConfig +} + +func (p *PrometheusClient) Init() error { defaultCollectors := map[string]bool{ "gocollector": true, "process": true, @@ -181,421 +128,137 @@ func (p *PrometheusClient) Connect() error { } } - err := registry.Register(p) - if err != nil { - return err + switch p.MetricVersion { + default: + fallthrough + case 1: + p.Log.Warnf("Use of deprecated configuration: metric_version = 1; please update to metric_version = 2") + p.collector = v1.NewCollector(p.ExpirationInterval.Duration, p.StringAsLabel, p.Log) + err := registry.Register(p.collector) + if err != nil { + return err + } + case 2: + p.collector = v2.NewCollector(p.ExpirationInterval.Duration, p.StringAsLabel) + err := registry.Register(p.collector) + if err != nil { + return err + } } - if p.Listen == "" { - p.Listen = "localhost:9273" + ipRange := make([]*net.IPNet, 0, len(p.IPRange)) + for _, cidr := range p.IPRange { + _, ipNet, err := net.ParseCIDR(cidr) + if err != nil { + return fmt.Errorf("error parsing ip_range: %v", err) + } + + ipRange = append(ipRange, ipNet) } - if p.Path == "" { - p.Path = "/metrics" - } + authHandler := internal.AuthHandler(p.BasicUsername, p.BasicPassword, onAuthError) + rangeHandler := internal.IPRangeHandler(ipRange, onError) + promHandler := promhttp.HandlerFor(registry, promhttp.HandlerOpts{ErrorHandling: promhttp.ContinueOnError}) mux := http.NewServeMux() - mux.Handle(p.Path, p.auth(promhttp.HandlerFor( - registry, promhttp.HandlerOpts{ErrorHandling: promhttp.ContinueOnError}))) + if p.Path == "" { + p.Path = "/" + } + mux.Handle(p.Path, authHandler(rangeHandler(promHandler))) tlsConfig, err := p.TLSConfig() if err != nil { return err } + p.server = &http.Server{ Addr: p.Listen, Handler: mux, TLSConfig: tlsConfig, } - var listener net.Listener - if tlsConfig != nil { - listener, err = tls.Listen("tcp", p.Listen, tlsConfig) + return nil +} + +func (p *PrometheusClient) listen() (net.Listener, error) { + if p.server.TLSConfig != nil { + return tls.Listen("tcp", p.Listen, p.server.TLSConfig) } else { - listener, err = net.Listen("tcp", p.Listen) + return net.Listen("tcp", p.Listen) } +} + +func (p *PrometheusClient) Connect() error { + listener, err := p.listen() if err != nil { return err } - p.url = createURL(tlsConfig, listener, p.Path) + scheme := "http" + if p.server.TLSConfig != nil { + scheme = "https" + } + p.url = &url.URL{ + Scheme: scheme, + Host: listener.Addr().String(), + Path: p.Path, + } + + p.Log.Infof("Listening on %s", p.URL()) + + p.wg.Add(1) go func() { + defer p.wg.Done() err := p.server.Serve(listener) if err != nil && err != http.ErrServerClosed { - log.Printf("E! Error creating prometheus metric endpoint, err: %s\n", - err.Error()) + p.Log.Errorf("Server error: %v", err) } }() return nil } +func onAuthError(rw http.ResponseWriter, code int) { + rw.Header().Set("WWW-Authenticate", `Basic realm="Restricted"`) + http.Error(rw, http.StatusText(code), code) +} + +func onError(rw http.ResponseWriter, code int) { + http.Error(rw, http.StatusText(code), code) +} + // Address returns the address the plugin is listening on. If not listening // an empty string is returned. func (p *PrometheusClient) URL() string { - return p.url -} - -func createURL(tlsConfig *tls.Config, listener net.Listener, path string) string { - u := url.URL{ - Scheme: "http", - Host: listener.Addr().String(), - Path: path, + if p.url != nil { + return p.url.String() } - - if tlsConfig != nil { - u.Scheme = "https" - } - return u.String() + return "" } func (p *PrometheusClient) Close() error { - ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() + err := p.server.Shutdown(ctx) - prometheus.Unregister(p) - p.url = "" + p.wg.Wait() + p.url = nil + prometheus.Unregister(p.collector) return err } -func (p *PrometheusClient) SampleConfig() string { - return sampleConfig -} - -func (p *PrometheusClient) Description() string { - return "Configuration for the Prometheus client to spawn" -} - -// Implements prometheus.Collector -func (p *PrometheusClient) Describe(ch chan<- *prometheus.Desc) { - prometheus.NewGauge(prometheus.GaugeOpts{Name: "Dummy", Help: "Dummy"}).Describe(ch) -} - -// Expire removes Samples that have expired. -func (p *PrometheusClient) Expire() { - now := p.now() - for name, family := range p.fam { - for key, sample := range family.Samples { - if p.ExpirationInterval.Duration != 0 && now.After(sample.Expiration) { - for k := range sample.Labels { - family.LabelSet[k]-- - } - delete(family.Samples, key) - - if len(family.Samples) == 0 { - delete(p.fam, name) - } - } - } - } -} - -// Collect implements prometheus.Collector -func (p *PrometheusClient) Collect(ch chan<- prometheus.Metric) { - p.Lock() - defer p.Unlock() - - p.Expire() - - for name, family := range p.fam { - // Get list of all labels on MetricFamily - var labelNames []string - for k, v := range family.LabelSet { - if v > 0 { - labelNames = append(labelNames, k) - } - } - desc := prometheus.NewDesc(name, "Telegraf collected metric", labelNames, nil) - - for _, sample := range family.Samples { - // Get labels for this sample; unset labels will be set to the - // empty string - var labels []string - for _, label := range labelNames { - v := sample.Labels[label] - labels = append(labels, v) - } - - var metric prometheus.Metric - var err error - switch family.TelegrafValueType { - case telegraf.Summary: - metric, err = prometheus.NewConstSummary(desc, sample.Count, sample.Sum, sample.SummaryValue, labels...) - case telegraf.Histogram: - metric, err = prometheus.NewConstHistogram(desc, sample.Count, sample.Sum, sample.HistogramValue, labels...) - default: - metric, err = prometheus.NewConstMetric(desc, getPromValueType(family.TelegrafValueType), sample.Value, labels...) - } - if err != nil { - log.Printf("E! Error creating prometheus metric, "+ - "key: %s, labels: %v,\nerr: %s\n", - name, labels, err.Error()) - continue - } - - if p.ExportTimestamp { - metric = prometheus.NewMetricWithTimestamp(sample.Timestamp, metric) - } - ch <- metric - } - } -} - -func sanitize(value string) string { - return invalidNameCharRE.ReplaceAllString(value, "_") -} - -func isValidTagName(tag string) bool { - return validNameCharRE.MatchString(tag) -} - -func getPromValueType(tt telegraf.ValueType) prometheus.ValueType { - switch tt { - case telegraf.Counter: - return prometheus.CounterValue - case telegraf.Gauge: - return prometheus.GaugeValue - default: - return prometheus.UntypedValue - } -} - -// CreateSampleID creates a SampleID based on the tags of a telegraf.Metric. -func CreateSampleID(tags map[string]string) SampleID { - pairs := make([]string, 0, len(tags)) - for k, v := range tags { - pairs = append(pairs, fmt.Sprintf("%s=%s", k, v)) - } - sort.Strings(pairs) - return SampleID(strings.Join(pairs, ",")) -} - -func addSample(fam *MetricFamily, sample *Sample, sampleID SampleID) { - - for k := range sample.Labels { - fam.LabelSet[k]++ - } - - fam.Samples[sampleID] = sample -} - -func (p *PrometheusClient) addMetricFamily(point telegraf.Metric, sample *Sample, mname string, sampleID SampleID) { - var fam *MetricFamily - var ok bool - if fam, ok = p.fam[mname]; !ok { - fam = &MetricFamily{ - Samples: make(map[SampleID]*Sample), - TelegrafValueType: point.Type(), - LabelSet: make(map[string]int), - } - p.fam[mname] = fam - } - - addSample(fam, sample, sampleID) -} - -// Sorted returns a copy of the metrics in time ascending order. A copy is -// made to avoid modifying the input metric slice since doing so is not -// allowed. -func sorted(metrics []telegraf.Metric) []telegraf.Metric { - batch := make([]telegraf.Metric, 0, len(metrics)) - for i := len(metrics) - 1; i >= 0; i-- { - batch = append(batch, metrics[i]) - } - sort.Slice(batch, func(i, j int) bool { - return batch[i].Time().Before(batch[j].Time()) - }) - return batch -} - func (p *PrometheusClient) Write(metrics []telegraf.Metric) error { - p.Lock() - defer p.Unlock() - - now := p.now() - - for _, point := range sorted(metrics) { - tags := point.Tags() - sampleID := CreateSampleID(tags) - - labels := make(map[string]string) - for k, v := range tags { - tName := sanitize(k) - if !isValidTagName(tName) { - continue - } - labels[tName] = v - } - - // Prometheus doesn't have a string value type, so convert string - // fields to labels if enabled. - if p.StringAsLabel { - for fn, fv := range point.Fields() { - switch fv := fv.(type) { - case string: - tName := sanitize(fn) - if !isValidTagName(tName) { - continue - } - labels[tName] = fv - } - } - } - - switch point.Type() { - case telegraf.Summary: - var mname string - var sum float64 - var count uint64 - summaryvalue := make(map[float64]float64) - for fn, fv := range point.Fields() { - var value float64 - switch fv := fv.(type) { - case int64: - value = float64(fv) - case uint64: - value = float64(fv) - case float64: - value = fv - default: - continue - } - - switch fn { - case "sum": - sum = value - case "count": - count = uint64(value) - default: - limit, err := strconv.ParseFloat(fn, 64) - if err == nil { - summaryvalue[limit] = value - } - } - } - sample := &Sample{ - Labels: labels, - SummaryValue: summaryvalue, - Count: count, - Sum: sum, - Timestamp: point.Time(), - Expiration: now.Add(p.ExpirationInterval.Duration), - } - mname = sanitize(point.Name()) - - if !isValidTagName(mname) { - continue - } - - p.addMetricFamily(point, sample, mname, sampleID) - - case telegraf.Histogram: - var mname string - var sum float64 - var count uint64 - histogramvalue := make(map[float64]uint64) - for fn, fv := range point.Fields() { - var value float64 - switch fv := fv.(type) { - case int64: - value = float64(fv) - case uint64: - value = float64(fv) - case float64: - value = fv - default: - continue - } - - switch fn { - case "sum": - sum = value - case "count": - count = uint64(value) - default: - limit, err := strconv.ParseFloat(fn, 64) - if err == nil { - histogramvalue[limit] = uint64(value) - } - } - } - sample := &Sample{ - Labels: labels, - HistogramValue: histogramvalue, - Count: count, - Sum: sum, - Timestamp: point.Time(), - Expiration: now.Add(p.ExpirationInterval.Duration), - } - mname = sanitize(point.Name()) - - if !isValidTagName(mname) { - continue - } - - p.addMetricFamily(point, sample, mname, sampleID) - - default: - for fn, fv := range point.Fields() { - // Ignore string and bool fields. - var value float64 - switch fv := fv.(type) { - case int64: - value = float64(fv) - case uint64: - value = float64(fv) - case float64: - value = fv - default: - continue - } - - sample := &Sample{ - Labels: labels, - Value: value, - Timestamp: point.Time(), - Expiration: now.Add(p.ExpirationInterval.Duration), - } - - // Special handling of value field; supports passthrough from - // the prometheus input. - var mname string - switch point.Type() { - case telegraf.Counter: - if fn == "counter" { - mname = sanitize(point.Name()) - } - case telegraf.Gauge: - if fn == "gauge" { - mname = sanitize(point.Name()) - } - } - if mname == "" { - if fn == "value" { - mname = sanitize(point.Name()) - } else { - mname = sanitize(fmt.Sprintf("%s_%s", point.Name(), fn)) - } - } - if !isValidTagName(mname) { - continue - } - p.addMetricFamily(point, sample, mname, sampleID) - - } - } - } - return nil + return p.collector.Add(metrics) } func init() { outputs.Add("prometheus_client", func() telegraf.Output { return &PrometheusClient{ - ExpirationInterval: internal.Duration{Duration: time.Second * 60}, + Listen: defaultListen, + Path: defaultPath, + ExpirationInterval: defaultExpirationInterval, StringAsLabel: true, - fam: make(map[string]*MetricFamily), - now: time.Now, } }) } diff --git a/plugins/outputs/prometheus_client/prometheus_client_test.go b/plugins/outputs/prometheus_client/prometheus_client_test.go index 211e24030..6af8da8da 100644 --- a/plugins/outputs/prometheus_client/prometheus_client_test.go +++ b/plugins/outputs/prometheus_client/prometheus_client_test.go @@ -1,693 +1,304 @@ -package prometheus_client +package prometheus import ( + "io/ioutil" + "net/http" + "strings" "testing" "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" - "github.com/influxdata/telegraf/metric" - prometheus_input "github.com/influxdata/telegraf/plugins/inputs/prometheus" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) -func setUnixTime(client *PrometheusClient, sec int64) { - client.now = func() time.Time { - return time.Unix(sec, 0) - } -} - -// NewClient initializes a PrometheusClient. -func NewClient() *PrometheusClient { - return &PrometheusClient{ - ExpirationInterval: internal.Duration{Duration: time.Second * 60}, - StringAsLabel: true, - fam: make(map[string]*MetricFamily), - now: time.Now, - } -} - -func TestWrite_Basic(t *testing.T) { - now := time.Now() - pt1, err := metric.New( - "foo", - make(map[string]string), - map[string]interface{}{"value": 0.0}, - now) - var metrics = []telegraf.Metric{ - pt1, - } - - client := NewClient() - err = client.Write(metrics) - require.NoError(t, err) - - fam, ok := client.fam["foo"] - require.True(t, ok) - require.Equal(t, telegraf.Untyped, fam.TelegrafValueType) - require.Equal(t, map[string]int{}, fam.LabelSet) - - sample, ok := fam.Samples[CreateSampleID(pt1.Tags())] - require.True(t, ok) - - require.Equal(t, 0.0, sample.Value) - require.True(t, now.Before(sample.Expiration)) -} - -func TestWrite_IntField(t *testing.T) { - client := NewClient() - - p1, err := metric.New( - "foo", - make(map[string]string), - map[string]interface{}{"value": 42}, - time.Now()) - err = client.Write([]telegraf.Metric{p1}) - require.NoError(t, err) - - fam, ok := client.fam["foo"] - require.True(t, ok) - for _, v := range fam.Samples { - require.Equal(t, 42.0, v.Value) - } - -} - -func TestWrite_FieldNotValue(t *testing.T) { - client := NewClient() - - p1, err := metric.New( - "foo", - make(map[string]string), - map[string]interface{}{"howdy": 0.0}, - time.Now()) - err = client.Write([]telegraf.Metric{p1}) - require.NoError(t, err) - - fam, ok := client.fam["foo_howdy"] - require.True(t, ok) - for _, v := range fam.Samples { - require.Equal(t, 0.0, v.Value) - } -} - -func TestWrite_SkipNonNumberField(t *testing.T) { - client := NewClient() - - p1, err := metric.New( - "foo", - make(map[string]string), - map[string]interface{}{"value": "howdy"}, - time.Now()) - err = client.Write([]telegraf.Metric{p1}) - require.NoError(t, err) - - _, ok := client.fam["foo"] - require.False(t, ok) -} - -func TestWrite_Counters(t *testing.T) { - type args struct { - measurement string - tags map[string]string - fields map[string]interface{} - valueType telegraf.ValueType - } - var tests = []struct { - name string - args args - err error - metricName string - valueType telegraf.ValueType +func TestMetricVersion1(t *testing.T) { + tests := []struct { + name string + output *PrometheusClient + metrics []telegraf.Metric + expected []byte }{ { - name: "field named value is not added to metric name", - args: args{ - measurement: "foo", - fields: map[string]interface{}{"value": 42}, - valueType: telegraf.Counter, + name: "simple", + output: &PrometheusClient{ + Listen: ":0", + MetricVersion: 1, + CollectorsExclude: []string{"gocollector", "process"}, + Path: "/metrics", + Log: testutil.Logger{}, }, - metricName: "foo", - valueType: telegraf.Counter, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{ + "host": "example.org", + }, + map[string]interface{}{ + "time_idle": 42.0, + }, + time.Unix(0, 0), + ), + }, + expected: []byte(` +# HELP cpu_time_idle Telegraf collected metric +# TYPE cpu_time_idle untyped +cpu_time_idle{host="example.org"} 42 +`), }, { - name: "field named counter is not added to metric name", - args: args{ - measurement: "foo", - fields: map[string]interface{}{"counter": 42}, - valueType: telegraf.Counter, + name: "prometheus untyped", + output: &PrometheusClient{ + Listen: ":0", + MetricVersion: 1, + CollectorsExclude: []string{"gocollector", "process"}, + Path: "/metrics", + Log: testutil.Logger{}, }, - metricName: "foo", - valueType: telegraf.Counter, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu_time_idle", + map[string]string{ + "host": "example.org", + }, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(0, 0), + ), + }, + expected: []byte(` +# HELP cpu_time_idle Telegraf collected metric +# TYPE cpu_time_idle untyped +cpu_time_idle{host="example.org"} 42 +`), }, { - name: "field with any other name is added to metric name", - args: args{ - measurement: "foo", - fields: map[string]interface{}{"other": 42}, - valueType: telegraf.Counter, + name: "prometheus counter", + output: &PrometheusClient{ + Listen: ":0", + MetricVersion: 1, + CollectorsExclude: []string{"gocollector", "process"}, + Path: "/metrics", + Log: testutil.Logger{}, }, - metricName: "foo_other", - valueType: telegraf.Counter, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu_time_idle", + map[string]string{ + "host": "example.org", + }, + map[string]interface{}{ + "counter": 42.0, + }, + time.Unix(0, 0), + telegraf.Counter, + ), + }, + expected: []byte(` +# HELP cpu_time_idle Telegraf collected metric +# TYPE cpu_time_idle counter +cpu_time_idle{host="example.org"} 42 +`), }, { - name: "uint64 fields are output", - args: args{ - measurement: "foo", - fields: map[string]interface{}{"value": uint64(42)}, - valueType: telegraf.Counter, + name: "prometheus gauge", + output: &PrometheusClient{ + Listen: ":0", + MetricVersion: 1, + CollectorsExclude: []string{"gocollector", "process"}, + Path: "/metrics", + Log: testutil.Logger{}, }, - metricName: "foo", - valueType: telegraf.Counter, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu_time_idle", + map[string]string{ + "host": "example.org", + }, + map[string]interface{}{ + "gauge": 42.0, + }, + time.Unix(0, 0), + telegraf.Gauge, + ), + }, + expected: []byte(` +# HELP cpu_time_idle Telegraf collected metric +# TYPE cpu_time_idle gauge +cpu_time_idle{host="example.org"} 42 +`), + }, + { + name: "prometheus histogram", + output: &PrometheusClient{ + Listen: ":0", + MetricVersion: 1, + CollectorsExclude: []string{"gocollector", "process"}, + Path: "/metrics", + Log: testutil.Logger{}, + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "http_request_duration_seconds", + map[string]string{}, + map[string]interface{}{ + "sum": 53423, + "0.05": 24054, + "0.1": 33444, + "0.2": 100392, + "0.5": 129389, + "1": 133988, + "+Inf": 144320, + "count": 144320, + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + }, + expected: []byte(` +# HELP http_request_duration_seconds Telegraf collected metric +# TYPE http_request_duration_seconds histogram +http_request_duration_seconds_bucket{le="0.05"} 24054 +http_request_duration_seconds_bucket{le="0.1"} 33444 +http_request_duration_seconds_bucket{le="0.2"} 100392 +http_request_duration_seconds_bucket{le="0.5"} 129389 +http_request_duration_seconds_bucket{le="1"} 133988 +http_request_duration_seconds_bucket{le="+Inf"} 144320 +http_request_duration_seconds_sum 53423 +http_request_duration_seconds_count 144320 +`), + }, + { + name: "prometheus summary", + output: &PrometheusClient{ + Listen: ":0", + MetricVersion: 1, + CollectorsExclude: []string{"gocollector", "process"}, + Path: "/metrics", + Log: testutil.Logger{}, + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "rpc_duration_seconds", + map[string]string{}, + map[string]interface{}{ + "0.01": 3102, + "0.05": 3272, + "0.5": 4773, + "0.9": 9001, + "0.99": 76656, + "count": 2693, + "sum": 17560473, + }, + time.Unix(0, 0), + telegraf.Summary, + ), + }, + expected: []byte(` +# HELP rpc_duration_seconds Telegraf collected metric +# TYPE rpc_duration_seconds summary +rpc_duration_seconds{quantile="0.01"} 3102 +rpc_duration_seconds{quantile="0.05"} 3272 +rpc_duration_seconds{quantile="0.5"} 4773 +rpc_duration_seconds{quantile="0.9"} 9001 +rpc_duration_seconds{quantile="0.99"} 76656 +rpc_duration_seconds_sum 1.7560473e+07 +rpc_duration_seconds_count 2693 +`), }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - m, err := metric.New( - tt.args.measurement, - tt.args.tags, - tt.args.fields, - time.Now(), - tt.args.valueType, - ) - client := NewClient() - err = client.Write([]telegraf.Metric{m}) - require.Equal(t, tt.err, err) + err := tt.output.Init() + require.NoError(t, err) - fam, ok := client.fam[tt.metricName] - require.True(t, ok) - require.Equal(t, tt.valueType, fam.TelegrafValueType) + err = tt.output.Connect() + require.NoError(t, err) + + defer func() { + err := tt.output.Close() + require.NoError(t, err) + }() + + err = tt.output.Write(tt.metrics) + require.NoError(t, err) + + resp, err := http.Get(tt.output.URL()) + require.NoError(t, err) + require.Equal(t, http.StatusOK, resp.StatusCode) + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + + require.Equal(t, + strings.TrimSpace(string(tt.expected)), + strings.TrimSpace(string(body))) }) } } -func TestWrite_Sanitize(t *testing.T) { - client := NewClient() - - p1, err := metric.New( - "foo.bar:colon", - map[string]string{"tag-with-dash": "localhost.local"}, - map[string]interface{}{"field-with-dash-and:colon": 42}, - time.Now(), - telegraf.Counter) - err = client.Write([]telegraf.Metric{p1}) - require.NoError(t, err) - - fam, ok := client.fam["foo_bar:colon_field_with_dash_and:colon"] - require.True(t, ok) - require.Equal(t, map[string]int{"tag_with_dash": 1}, fam.LabelSet) - - sample1, ok := fam.Samples[CreateSampleID(p1.Tags())] - require.True(t, ok) - - require.Equal(t, map[string]string{ - "tag_with_dash": "localhost.local"}, sample1.Labels) -} - -func TestWrite_Gauge(t *testing.T) { - type args struct { - measurement string - tags map[string]string - fields map[string]interface{} - valueType telegraf.ValueType - } - var tests = []struct { - name string - args args - err error - metricName string - valueType telegraf.ValueType +func TestMetricVersion2(t *testing.T) { + tests := []struct { + name string + output *PrometheusClient + metrics []telegraf.Metric + expected []byte }{ { - name: "field named value is not added to metric name", - args: args{ - measurement: "foo", - fields: map[string]interface{}{"value": 42}, - valueType: telegraf.Gauge, + name: "simple", + output: &PrometheusClient{ + Listen: ":0", + MetricVersion: 2, + CollectorsExclude: []string{"gocollector", "process"}, + Path: "/metrics", + Log: testutil.Logger{}, }, - metricName: "foo", - valueType: telegraf.Gauge, - }, - { - name: "field named gauge is not added to metric name", - args: args{ - measurement: "foo", - fields: map[string]interface{}{"gauge": 42}, - valueType: telegraf.Gauge, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{ + "host": "example.org", + }, + map[string]interface{}{ + "time_idle": 42.0, + }, + time.Unix(0, 0), + ), }, - metricName: "foo", - valueType: telegraf.Gauge, - }, - { - name: "field with any other name is added to metric name", - args: args{ - measurement: "foo", - fields: map[string]interface{}{"other": 42}, - valueType: telegraf.Gauge, - }, - metricName: "foo_other", - valueType: telegraf.Gauge, - }, - { - name: "uint64 fields are output", - args: args{ - measurement: "foo", - fields: map[string]interface{}{"value": uint64(42)}, - valueType: telegraf.Counter, - }, - metricName: "foo", - valueType: telegraf.Counter, + expected: []byte(` +# HELP cpu_time_idle Telegraf collected metric +# TYPE cpu_time_idle untyped +cpu_time_idle{host="example.org"} 42 +`), }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - m, err := metric.New( - tt.args.measurement, - tt.args.tags, - tt.args.fields, - time.Now(), - tt.args.valueType, - ) - client := NewClient() - err = client.Write([]telegraf.Metric{m}) - require.Equal(t, tt.err, err) + err := tt.output.Init() + require.NoError(t, err) - fam, ok := client.fam[tt.metricName] - require.True(t, ok) - require.Equal(t, tt.valueType, fam.TelegrafValueType) + err = tt.output.Connect() + require.NoError(t, err) + defer func() { + err := tt.output.Close() + require.NoError(t, err) + }() + + err = tt.output.Write(tt.metrics) + require.NoError(t, err) + + resp, err := http.Get(tt.output.URL()) + require.NoError(t, err) + require.Equal(t, http.StatusOK, resp.StatusCode) + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + + require.Equal(t, + strings.TrimSpace(string(tt.expected)), + strings.TrimSpace(string(body))) }) } } - -func TestWrite_Summary(t *testing.T) { - client := NewClient() - - p1, err := metric.New( - "foo", - make(map[string]string), - map[string]interface{}{"sum": 84, "count": 42, "0": 2, "0.5": 3, "1": 4}, - time.Now(), - telegraf.Summary) - - err = client.Write([]telegraf.Metric{p1}) - require.NoError(t, err) - - fam, ok := client.fam["foo"] - require.True(t, ok) - require.Equal(t, 1, len(fam.Samples)) - - sample1, ok := fam.Samples[CreateSampleID(p1.Tags())] - require.True(t, ok) - - require.Equal(t, 84.0, sample1.Sum) - require.Equal(t, uint64(42), sample1.Count) - require.Equal(t, 3, len(sample1.SummaryValue)) -} - -func TestWrite_Histogram(t *testing.T) { - client := NewClient() - - p1, err := metric.New( - "foo", - make(map[string]string), - map[string]interface{}{"sum": 84, "count": 42, "0": 2, "0.5": 3, "1": 4}, - time.Now(), - telegraf.Histogram) - - err = client.Write([]telegraf.Metric{p1}) - require.NoError(t, err) - - fam, ok := client.fam["foo"] - require.True(t, ok) - require.Equal(t, 1, len(fam.Samples)) - - sample1, ok := fam.Samples[CreateSampleID(p1.Tags())] - require.True(t, ok) - - require.Equal(t, 84.0, sample1.Sum) - require.Equal(t, uint64(42), sample1.Count) - require.Equal(t, 3, len(sample1.HistogramValue)) -} - -func TestWrite_MixedValueType(t *testing.T) { - now := time.Now() - p1, err := metric.New( - "foo", - make(map[string]string), - map[string]interface{}{"value": 1.0}, - now, - telegraf.Counter) - p2, err := metric.New( - "foo", - make(map[string]string), - map[string]interface{}{"value": 2.0}, - now, - telegraf.Gauge) - var metrics = []telegraf.Metric{p1, p2} - - client := NewClient() - err = client.Write(metrics) - require.NoError(t, err) - - fam, ok := client.fam["foo"] - require.True(t, ok) - require.Equal(t, 1, len(fam.Samples)) -} - -func TestWrite_MixedValueTypeUpgrade(t *testing.T) { - now := time.Now() - p1, err := metric.New( - "foo", - map[string]string{"a": "x"}, - map[string]interface{}{"value": 1.0}, - now, - telegraf.Untyped) - p2, err := metric.New( - "foo", - map[string]string{"a": "y"}, - map[string]interface{}{"value": 2.0}, - now, - telegraf.Gauge) - var metrics = []telegraf.Metric{p1, p2} - - client := NewClient() - err = client.Write(metrics) - require.NoError(t, err) - - fam, ok := client.fam["foo"] - require.True(t, ok) - require.Equal(t, 2, len(fam.Samples)) -} - -func TestWrite_MixedValueTypeDowngrade(t *testing.T) { - now := time.Now() - p1, err := metric.New( - "foo", - map[string]string{"a": "x"}, - map[string]interface{}{"value": 1.0}, - now, - telegraf.Gauge) - p2, err := metric.New( - "foo", - map[string]string{"a": "y"}, - map[string]interface{}{"value": 2.0}, - now, - telegraf.Untyped) - var metrics = []telegraf.Metric{p1, p2} - - client := NewClient() - err = client.Write(metrics) - require.NoError(t, err) - - fam, ok := client.fam["foo"] - require.True(t, ok) - require.Equal(t, 2, len(fam.Samples)) -} - -func TestWrite_Tags(t *testing.T) { - now := time.Now() - p1, err := metric.New( - "foo", - make(map[string]string), - map[string]interface{}{"value": 1.0}, - now) - p2, err := metric.New( - "foo", - map[string]string{"host": "localhost"}, - map[string]interface{}{"value": 2.0}, - now) - var metrics = []telegraf.Metric{p1, p2} - - client := NewClient() - err = client.Write(metrics) - require.NoError(t, err) - - fam, ok := client.fam["foo"] - require.True(t, ok) - require.Equal(t, telegraf.Untyped, fam.TelegrafValueType) - - require.Equal(t, map[string]int{"host": 1}, fam.LabelSet) - - sample1, ok := fam.Samples[CreateSampleID(p1.Tags())] - require.True(t, ok) - - require.Equal(t, 1.0, sample1.Value) - require.True(t, now.Before(sample1.Expiration)) - - sample2, ok := fam.Samples[CreateSampleID(p2.Tags())] - require.True(t, ok) - - require.Equal(t, 2.0, sample2.Value) - require.True(t, now.Before(sample2.Expiration)) -} - -func TestWrite_StringFields(t *testing.T) { - now := time.Now() - p1, err := metric.New( - "foo", - make(map[string]string), - map[string]interface{}{"value": 1.0, "status": "good"}, - now, - telegraf.Counter) - p2, err := metric.New( - "bar", - make(map[string]string), - map[string]interface{}{"status": "needs numeric field"}, - now, - telegraf.Gauge) - var metrics = []telegraf.Metric{p1, p2} - - client := NewClient() - err = client.Write(metrics) - require.NoError(t, err) - - fam, ok := client.fam["foo"] - require.True(t, ok) - require.Equal(t, 1, fam.LabelSet["status"]) - - fam, ok = client.fam["bar"] - require.False(t, ok) -} - -func TestDoNotWrite_StringFields(t *testing.T) { - now := time.Now() - p1, err := metric.New( - "foo", - make(map[string]string), - map[string]interface{}{"value": 1.0, "status": "good"}, - now, - telegraf.Counter) - p2, err := metric.New( - "bar", - make(map[string]string), - map[string]interface{}{"status": "needs numeric field"}, - now, - telegraf.Gauge) - var metrics = []telegraf.Metric{p1, p2} - - client := &PrometheusClient{ - ExpirationInterval: internal.Duration{Duration: time.Second * 60}, - StringAsLabel: false, - fam: make(map[string]*MetricFamily), - now: time.Now, - } - - err = client.Write(metrics) - require.NoError(t, err) - - fam, ok := client.fam["foo"] - require.True(t, ok) - require.Equal(t, 0, fam.LabelSet["status"]) - - fam, ok = client.fam["bar"] - require.False(t, ok) -} - -func TestExpire(t *testing.T) { - client := NewClient() - - p1, err := metric.New( - "foo", - make(map[string]string), - map[string]interface{}{"value": 1.0}, - time.Now()) - setUnixTime(client, 0) - err = client.Write([]telegraf.Metric{p1}) - require.NoError(t, err) - - p2, err := metric.New( - "bar", - make(map[string]string), - map[string]interface{}{"value": 2.0}, - time.Now()) - setUnixTime(client, 1) - err = client.Write([]telegraf.Metric{p2}) - - setUnixTime(client, 61) - require.Equal(t, 2, len(client.fam)) - client.Expire() - require.Equal(t, 1, len(client.fam)) -} - -func TestExpire_TagsNoDecrement(t *testing.T) { - client := NewClient() - - p1, err := metric.New( - "foo", - make(map[string]string), - map[string]interface{}{"value": 1.0}, - time.Now()) - setUnixTime(client, 0) - err = client.Write([]telegraf.Metric{p1}) - require.NoError(t, err) - - p2, err := metric.New( - "foo", - map[string]string{"host": "localhost"}, - map[string]interface{}{"value": 2.0}, - time.Now()) - setUnixTime(client, 1) - err = client.Write([]telegraf.Metric{p2}) - - setUnixTime(client, 61) - fam, ok := client.fam["foo"] - require.True(t, ok) - require.Equal(t, 2, len(fam.Samples)) - client.Expire() - require.Equal(t, 1, len(fam.Samples)) - - require.Equal(t, map[string]int{"host": 1}, fam.LabelSet) -} - -func TestExpire_TagsWithDecrement(t *testing.T) { - client := NewClient() - - p1, err := metric.New( - "foo", - map[string]string{"host": "localhost"}, - map[string]interface{}{"value": 1.0}, - time.Now()) - setUnixTime(client, 0) - err = client.Write([]telegraf.Metric{p1}) - require.NoError(t, err) - - p2, err := metric.New( - "foo", - make(map[string]string), - map[string]interface{}{"value": 2.0}, - time.Now()) - setUnixTime(client, 1) - err = client.Write([]telegraf.Metric{p2}) - - setUnixTime(client, 61) - fam, ok := client.fam["foo"] - require.True(t, ok) - require.Equal(t, 2, len(fam.Samples)) - client.Expire() - require.Equal(t, 1, len(fam.Samples)) - - require.Equal(t, map[string]int{"host": 0}, fam.LabelSet) -} - -var pTesting *PrometheusClient - -func TestPrometheusWritePointEmptyTag(t *testing.T) { - if testing.Short() { - t.Skip("Skipping integration test in short mode") - } - - pClient, p, err := setupPrometheus() - require.NoError(t, err) - defer pClient.Close() - - now := time.Now() - tags := make(map[string]string) - pt1, _ := metric.New( - "test_point_1", - tags, - map[string]interface{}{"value": 0.0}, - now) - pt2, _ := metric.New( - "test_point_2", - tags, - map[string]interface{}{"value": 1.0}, - now) - var metrics = []telegraf.Metric{ - pt1, - pt2, - } - require.NoError(t, pClient.Write(metrics)) - - expected := []struct { - name string - value float64 - tags map[string]string - }{ - {"test_point_1", 0.0, tags}, - {"test_point_2", 1.0, tags}, - } - - var acc testutil.Accumulator - - require.NoError(t, p.Gather(&acc)) - for _, e := range expected { - acc.AssertContainsFields(t, e.name, - map[string]interface{}{"value": e.value}) - } - - tags = make(map[string]string) - tags["testtag"] = "testvalue" - pt3, _ := metric.New( - "test_point_3", - tags, - map[string]interface{}{"value": 0.0}, - now) - pt4, _ := metric.New( - "test_point_4", - tags, - map[string]interface{}{"value": 1.0}, - now) - metrics = []telegraf.Metric{ - pt3, - pt4, - } - require.NoError(t, pClient.Write(metrics)) - - expected2 := []struct { - name string - value float64 - }{ - {"test_point_3", 0.0}, - {"test_point_4", 1.0}, - } - - require.NoError(t, p.Gather(&acc)) - for _, e := range expected2 { - acc.AssertContainsFields(t, e.name, - map[string]interface{}{"value": e.value}) - } -} - -func setupPrometheus() (*PrometheusClient, *prometheus_input.Prometheus, error) { - if pTesting == nil { - pTesting = NewClient() - pTesting.Listen = "localhost:9127" - pTesting.Path = "/metrics" - err := pTesting.Connect() - if err != nil { - return nil, nil, err - } - } else { - pTesting.fam = make(map[string]*MetricFamily) - } - - time.Sleep(time.Millisecond * 200) - - p := &prometheus_input.Prometheus{ - URLs: []string{"http://localhost:9127/metrics"}, - } - - return pTesting, p, nil -} diff --git a/plugins/outputs/prometheus_client/v1/collector.go b/plugins/outputs/prometheus_client/v1/collector.go new file mode 100644 index 000000000..72b09be08 --- /dev/null +++ b/plugins/outputs/prometheus_client/v1/collector.go @@ -0,0 +1,391 @@ +package v1 + +import ( + "fmt" + "regexp" + "sort" + "strconv" + "strings" + "sync" + "time" + + "github.com/influxdata/telegraf" + "github.com/prometheus/client_golang/prometheus" +) + +var ( + invalidNameCharRE = regexp.MustCompile(`[^a-zA-Z0-9_:]`) + validNameCharRE = regexp.MustCompile(`^[a-zA-Z_][a-zA-Z0-9_]*`) +) + +// SampleID uniquely identifies a Sample +type SampleID string + +// Sample represents the current value of a series. +type Sample struct { + // Labels are the Prometheus labels. + Labels map[string]string + // Value is the value in the Prometheus output. Only one of these will populated. + Value float64 + HistogramValue map[float64]uint64 + SummaryValue map[float64]float64 + // Histograms and Summaries need a count and a sum + Count uint64 + Sum float64 + // Metric timestamp + Timestamp time.Time + // Expiration is the deadline that this Sample is valid until. + Expiration time.Time +} + +// MetricFamily contains the data required to build valid prometheus Metrics. +type MetricFamily struct { + // Samples are the Sample belonging to this MetricFamily. + Samples map[SampleID]*Sample + // Need the telegraf ValueType because there isn't a Prometheus ValueType + // representing Histogram or Summary + TelegrafValueType telegraf.ValueType + // LabelSet is the label counts for all Samples. + LabelSet map[string]int +} + +type Collector struct { + ExpirationInterval time.Duration + StringAsLabel bool + ExportTimestamp bool + Log telegraf.Logger + + sync.Mutex + fam map[string]*MetricFamily +} + +func NewCollector(expire time.Duration, stringsAsLabel bool, logger telegraf.Logger) *Collector { + return &Collector{ + ExpirationInterval: expire, + StringAsLabel: stringsAsLabel, + Log: logger, + fam: make(map[string]*MetricFamily), + } +} + +func (c *Collector) Describe(ch chan<- *prometheus.Desc) { + prometheus.NewGauge(prometheus.GaugeOpts{Name: "Dummy", Help: "Dummy"}).Describe(ch) +} + +func (c *Collector) Collect(ch chan<- prometheus.Metric) { + c.Lock() + defer c.Unlock() + + c.Expire(time.Now(), c.ExpirationInterval) + + for name, family := range c.fam { + // Get list of all labels on MetricFamily + var labelNames []string + for k, v := range family.LabelSet { + if v > 0 { + labelNames = append(labelNames, k) + } + } + desc := prometheus.NewDesc(name, "Telegraf collected metric", labelNames, nil) + + for _, sample := range family.Samples { + // Get labels for this sample; unset labels will be set to the + // empty string + var labels []string + for _, label := range labelNames { + v := sample.Labels[label] + labels = append(labels, v) + } + + var metric prometheus.Metric + var err error + switch family.TelegrafValueType { + case telegraf.Summary: + metric, err = prometheus.NewConstSummary(desc, sample.Count, sample.Sum, sample.SummaryValue, labels...) + case telegraf.Histogram: + metric, err = prometheus.NewConstHistogram(desc, sample.Count, sample.Sum, sample.HistogramValue, labels...) + default: + metric, err = prometheus.NewConstMetric(desc, getPromValueType(family.TelegrafValueType), sample.Value, labels...) + } + if err != nil { + c.Log.Errorf("Error creating prometheus metric: "+ + "key: %s, labels: %v, err: %v", + name, labels, err) + continue + } + + if c.ExportTimestamp { + metric = prometheus.NewMetricWithTimestamp(sample.Timestamp, metric) + } + ch <- metric + } + } +} + +func sanitize(value string) string { + return invalidNameCharRE.ReplaceAllString(value, "_") +} + +func isValidTagName(tag string) bool { + return validNameCharRE.MatchString(tag) +} + +func getPromValueType(tt telegraf.ValueType) prometheus.ValueType { + switch tt { + case telegraf.Counter: + return prometheus.CounterValue + case telegraf.Gauge: + return prometheus.GaugeValue + default: + return prometheus.UntypedValue + } +} + +// CreateSampleID creates a SampleID based on the tags of a telegraf.Metric. +func CreateSampleID(tags map[string]string) SampleID { + pairs := make([]string, 0, len(tags)) + for k, v := range tags { + pairs = append(pairs, fmt.Sprintf("%s=%s", k, v)) + } + sort.Strings(pairs) + return SampleID(strings.Join(pairs, ",")) +} + +func addSample(fam *MetricFamily, sample *Sample, sampleID SampleID) { + + for k := range sample.Labels { + fam.LabelSet[k]++ + } + + fam.Samples[sampleID] = sample +} + +func (c *Collector) addMetricFamily(point telegraf.Metric, sample *Sample, mname string, sampleID SampleID) { + var fam *MetricFamily + var ok bool + if fam, ok = c.fam[mname]; !ok { + fam = &MetricFamily{ + Samples: make(map[SampleID]*Sample), + TelegrafValueType: point.Type(), + LabelSet: make(map[string]int), + } + c.fam[mname] = fam + } + + addSample(fam, sample, sampleID) +} + +// Sorted returns a copy of the metrics in time ascending order. A copy is +// made to avoid modifying the input metric slice since doing so is not +// allowed. +func sorted(metrics []telegraf.Metric) []telegraf.Metric { + batch := make([]telegraf.Metric, 0, len(metrics)) + for i := len(metrics) - 1; i >= 0; i-- { + batch = append(batch, metrics[i]) + } + sort.Slice(batch, func(i, j int) bool { + return batch[i].Time().Before(batch[j].Time()) + }) + return batch +} + +func (c *Collector) Add(metrics []telegraf.Metric) error { + c.Lock() + defer c.Unlock() + + now := time.Now() + + for _, point := range sorted(metrics) { + tags := point.Tags() + sampleID := CreateSampleID(tags) + + labels := make(map[string]string) + for k, v := range tags { + tName := sanitize(k) + if !isValidTagName(tName) { + continue + } + labels[tName] = v + } + + // Prometheus doesn't have a string value type, so convert string + // fields to labels if enabled. + if c.StringAsLabel { + for fn, fv := range point.Fields() { + switch fv := fv.(type) { + case string: + tName := sanitize(fn) + if !isValidTagName(tName) { + continue + } + labels[tName] = fv + } + } + } + + switch point.Type() { + case telegraf.Summary: + var mname string + var sum float64 + var count uint64 + summaryvalue := make(map[float64]float64) + for fn, fv := range point.Fields() { + var value float64 + switch fv := fv.(type) { + case int64: + value = float64(fv) + case uint64: + value = float64(fv) + case float64: + value = fv + default: + continue + } + + switch fn { + case "sum": + sum = value + case "count": + count = uint64(value) + default: + limit, err := strconv.ParseFloat(fn, 64) + if err == nil { + summaryvalue[limit] = value + } + } + } + sample := &Sample{ + Labels: labels, + SummaryValue: summaryvalue, + Count: count, + Sum: sum, + Timestamp: point.Time(), + Expiration: now.Add(c.ExpirationInterval), + } + mname = sanitize(point.Name()) + + if !isValidTagName(mname) { + continue + } + + c.addMetricFamily(point, sample, mname, sampleID) + + case telegraf.Histogram: + var mname string + var sum float64 + var count uint64 + histogramvalue := make(map[float64]uint64) + for fn, fv := range point.Fields() { + var value float64 + switch fv := fv.(type) { + case int64: + value = float64(fv) + case uint64: + value = float64(fv) + case float64: + value = fv + default: + continue + } + + switch fn { + case "sum": + sum = value + case "count": + count = uint64(value) + default: + limit, err := strconv.ParseFloat(fn, 64) + if err == nil { + histogramvalue[limit] = uint64(value) + } + } + } + sample := &Sample{ + Labels: labels, + HistogramValue: histogramvalue, + Count: count, + Sum: sum, + Timestamp: point.Time(), + Expiration: now.Add(c.ExpirationInterval), + } + mname = sanitize(point.Name()) + + if !isValidTagName(mname) { + continue + } + + c.addMetricFamily(point, sample, mname, sampleID) + + default: + for fn, fv := range point.Fields() { + // Ignore string and bool fields. + var value float64 + switch fv := fv.(type) { + case int64: + value = float64(fv) + case uint64: + value = float64(fv) + case float64: + value = fv + default: + continue + } + + sample := &Sample{ + Labels: labels, + Value: value, + Timestamp: point.Time(), + Expiration: now.Add(c.ExpirationInterval), + } + + // Special handling of value field; supports passthrough from + // the prometheus input. + var mname string + switch point.Type() { + case telegraf.Counter: + if fn == "counter" { + mname = sanitize(point.Name()) + } + case telegraf.Gauge: + if fn == "gauge" { + mname = sanitize(point.Name()) + } + } + if mname == "" { + if fn == "value" { + mname = sanitize(point.Name()) + } else { + mname = sanitize(fmt.Sprintf("%s_%s", point.Name(), fn)) + } + } + if !isValidTagName(mname) { + continue + } + c.addMetricFamily(point, sample, mname, sampleID) + + } + } + } + return nil +} + +func (c *Collector) Expire(now time.Time, age time.Duration) { + if age == 0 { + return + } + + for name, family := range c.fam { + for key, sample := range family.Samples { + if age != 0 && now.After(sample.Expiration) { + for k := range sample.Labels { + family.LabelSet[k]-- + } + delete(family.Samples, key) + + if len(family.Samples) == 0 { + delete(c.fam, name) + } + } + } + } +} diff --git a/plugins/outputs/prometheus_client/v2/collector.go b/plugins/outputs/prometheus_client/v2/collector.go new file mode 100644 index 000000000..45e1cb7a7 --- /dev/null +++ b/plugins/outputs/prometheus_client/v2/collector.go @@ -0,0 +1,87 @@ +package v2 + +import ( + "sync" + "time" + + "github.com/influxdata/telegraf" + serializer "github.com/influxdata/telegraf/plugins/serializers/prometheus" + "github.com/prometheus/client_golang/prometheus" + dto "github.com/prometheus/client_model/go" +) + +type Metric struct { + family *dto.MetricFamily + metric *dto.Metric +} + +func (m *Metric) Desc() *prometheus.Desc { + labelNames := make([]string, 0, len(m.metric.Label)) + for _, label := range m.metric.Label { + labelNames = append(labelNames, *label.Name) + } + + desc := prometheus.NewDesc(*m.family.Name, *m.family.Help, labelNames, nil) + + return desc +} + +func (m *Metric) Write(out *dto.Metric) error { + out.Label = m.metric.Label + out.Counter = m.metric.Counter + out.Untyped = m.metric.Untyped + out.Gauge = m.metric.Gauge + out.Histogram = m.metric.Histogram + out.Summary = m.metric.Summary + out.TimestampMs = m.metric.TimestampMs + return nil +} + +type Collector struct { + sync.Mutex + expireDuration time.Duration + coll *serializer.Collection +} + +func NewCollector(expire time.Duration, stringsAsLabel bool) *Collector { + config := serializer.FormatConfig{} + if stringsAsLabel { + config.StringHandling = serializer.StringAsLabel + } + return &Collector{ + expireDuration: expire, + coll: serializer.NewCollection(config), + } +} + +func (c *Collector) Describe(ch chan<- *prometheus.Desc) { + // Sending no descriptor at all marks the Collector as "unchecked", + // i.e. no checks will be performed at registration time, and the + // Collector may yield any Metric it sees fit in its Collect method. + return +} + +func (c *Collector) Collect(ch chan<- prometheus.Metric) { + c.Lock() + defer c.Unlock() + + for _, family := range c.coll.GetProto() { + for _, metric := range family.Metric { + ch <- &Metric{family: family, metric: metric} + } + } +} + +func (c *Collector) Add(metrics []telegraf.Metric) error { + c.Lock() + defer c.Unlock() + + for _, metric := range metrics { + c.coll.Add(metric) + } + + if c.expireDuration != 0 { + c.coll.Expire(time.Now(), c.expireDuration) + } + return nil +} diff --git a/plugins/serializers/prometheus/README.md b/plugins/serializers/prometheus/README.md new file mode 100644 index 000000000..9a0cdfea2 --- /dev/null +++ b/plugins/serializers/prometheus/README.md @@ -0,0 +1,68 @@ +# Prometheus + +The `prometheus` data format converts metrics into the Prometheus text +exposition format. When used with the `prometheus` input, the input should be +use the `metric_version = 2` option in order to properly round trip metrics. + +**Warning**: When generating histogram and summary types, output may +not be correct if the metric spans multiple batches. This issue can be +somewhat, but not fully, mitigated by using outputs that support writing in +"batch format". When using histogram and summary types, it is recommended to +use only the `prometheus_client` output. + +## Configuration + +```toml +[[outputs.file]] + files = ["stdout"] + use_batch_format = true + + ## Include the metric timestamp on each sample. + prometheus_export_timestamp = false + + ## Sort prometheus metric families and metric samples. Useful for + ## debugging. + prometheus_sort_metrics = false + + ## Output string fields as metric labels; when false string fields are + ## discarded. + prometheus_string_as_label = false + + ## Data format to output. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "prometheus" +``` + +### Example + +**Example Input** +``` +cpu,cpu=cpu0 time_guest=8022.6,time_system=26145.98,time_user=92512.89 1574317740000000000 +cpu,cpu=cpu1 time_guest=8097.88,time_system=25223.35,time_user=96519.58 1574317740000000000 +cpu,cpu=cpu2 time_guest=7386.28,time_system=24870.37,time_user=95631.59 1574317740000000000 +cpu,cpu=cpu3 time_guest=7434.19,time_system=24843.71,time_user=93753.88 1574317740000000000 +``` + +**Example Output** +``` +# HELP cpu_time_guest Telegraf collected metric +# TYPE cpu_time_guest counter +cpu_time_guest{cpu="cpu0"} 9582.54 +cpu_time_guest{cpu="cpu1"} 9660.88 +cpu_time_guest{cpu="cpu2"} 8946.45 +cpu_time_guest{cpu="cpu3"} 9002.31 +# HELP cpu_time_system Telegraf collected metric +# TYPE cpu_time_system counter +cpu_time_system{cpu="cpu0"} 28675.47 +cpu_time_system{cpu="cpu1"} 27779.34 +cpu_time_system{cpu="cpu2"} 27406.18 +cpu_time_system{cpu="cpu3"} 27404.97 +# HELP cpu_time_user Telegraf collected metric +# TYPE cpu_time_user counter +cpu_time_user{cpu="cpu0"} 99551.84 +cpu_time_user{cpu="cpu1"} 103468.52 +cpu_time_user{cpu="cpu2"} 102591.45 +cpu_time_user{cpu="cpu3"} 100717.05 +``` diff --git a/plugins/serializers/prometheus/collection.go b/plugins/serializers/prometheus/collection.go new file mode 100644 index 000000000..d16208622 --- /dev/null +++ b/plugins/serializers/prometheus/collection.go @@ -0,0 +1,464 @@ +package prometheus + +import ( + "hash/fnv" + "sort" + "strconv" + "strings" + "time" + + "github.com/gogo/protobuf/proto" + "github.com/influxdata/telegraf" + dto "github.com/prometheus/client_model/go" +) + +const helpString = "Telegraf collected metric" + +type MetricFamily struct { + Name string + Type telegraf.ValueType +} + +type Metric struct { + Labels []LabelPair + Time time.Time + Scaler *Scaler + Histogram *Histogram + Summary *Summary +} + +type LabelPair struct { + Name string + Value string +} + +type Scaler struct { + Value float64 +} + +type Bucket struct { + Bound float64 + Count uint64 +} + +type Quantile struct { + Quantile float64 + Value float64 +} + +type Histogram struct { + Buckets []Bucket + Count uint64 + Sum float64 +} + +type Summary struct { + Quantiles []Quantile + Count uint64 + Sum float64 +} + +type MetricKey uint64 + +func MakeMetricKey(labels []LabelPair) MetricKey { + h := fnv.New64a() + for _, label := range labels { + h.Write([]byte(label.Name)) + h.Write([]byte("\x00")) + h.Write([]byte(label.Value)) + h.Write([]byte("\x00")) + } + return MetricKey(h.Sum64()) +} + +type Entry struct { + Family MetricFamily + Metrics map[MetricKey]*Metric +} + +type Collection struct { + config FormatConfig + Entries map[MetricFamily]Entry +} + +func NewCollection(config FormatConfig) *Collection { + cache := &Collection{ + config: config, + Entries: make(map[MetricFamily]Entry), + } + return cache +} + +func hasLabel(name string, labels []LabelPair) bool { + for _, label := range labels { + if name == label.Name { + return true + } + } + return false +} + +func (c *Collection) createLabels(metric telegraf.Metric) []LabelPair { + labels := make([]LabelPair, 0, len(metric.TagList())) + for _, tag := range metric.TagList() { + // Ignore special tags for histogram and summary types. + switch metric.Type() { + case telegraf.Histogram: + if tag.Key == "le" { + continue + } + case telegraf.Summary: + if tag.Key == "quantile" { + continue + } + } + + name, ok := SanitizeName(tag.Key) + if !ok { + continue + } + + labels = append(labels, LabelPair{Name: name, Value: tag.Value}) + } + + if c.config.StringHandling != StringAsLabel { + return labels + } + + addedFieldLabel := false + for _, field := range metric.FieldList() { + value, ok := field.Value.(string) + if !ok { + continue + } + + name, ok := SanitizeName(field.Key) + if !ok { + continue + } + + // If there is a tag with the same name as the string field, discard + // the field and use the tag instead. + if hasLabel(name, labels) { + continue + } + + labels = append(labels, LabelPair{Name: name, Value: value}) + addedFieldLabel = true + + } + + if addedFieldLabel { + sort.Slice(labels, func(i, j int) bool { + return labels[i].Name < labels[j].Name + }) + } + + return labels +} + +func (c *Collection) Add(metric telegraf.Metric) { + labels := c.createLabels(metric) + for _, field := range metric.FieldList() { + metricName := MetricName(metric.Name(), field.Key, metric.Type()) + metricName, ok := SanitizeName(metricName) + if !ok { + continue + } + + family := MetricFamily{ + Name: metricName, + Type: metric.Type(), + } + + entry, ok := c.Entries[family] + if !ok { + entry = Entry{ + Family: family, + Metrics: make(map[MetricKey]*Metric), + } + c.Entries[family] = entry + + } + + metricKey := MakeMetricKey(labels) + + m, ok := entry.Metrics[metricKey] + if ok { + // A batch of metrics can contain multiple values for a single + // Prometheus sample. If this metric is older than the existing + // sample then we can skip over it. + if metric.Time().Before(m.Time) { + continue + } + } + + switch metric.Type() { + case telegraf.Counter: + fallthrough + case telegraf.Gauge: + fallthrough + case telegraf.Untyped: + value, ok := SampleValue(field.Value) + if !ok { + continue + } + + m = &Metric{ + Labels: labels, + Time: metric.Time(), + Scaler: &Scaler{Value: value}, + } + + // what if already here + entry.Metrics[metricKey] = m + case telegraf.Histogram: + if m == nil { + m = &Metric{ + Labels: labels, + Time: metric.Time(), + Histogram: &Histogram{}, + } + } + switch { + case strings.HasSuffix(field.Key, "_bucket"): + le, ok := metric.GetTag("le") + if !ok { + continue + } + bound, err := strconv.ParseFloat(le, 64) + if err != nil { + continue + } + + count, ok := SampleCount(field.Value) + if !ok { + continue + } + + m.Histogram.Buckets = append(m.Histogram.Buckets, Bucket{ + Bound: bound, + Count: count, + }) + case strings.HasSuffix(field.Key, "_sum"): + sum, ok := SampleSum(field.Value) + if !ok { + continue + } + + m.Histogram.Sum = sum + case strings.HasSuffix(field.Key, "_count"): + count, ok := SampleCount(field.Value) + if !ok { + continue + } + + m.Histogram.Count = count + default: + continue + } + + entry.Metrics[metricKey] = m + case telegraf.Summary: + if m == nil { + m = &Metric{ + Labels: labels, + Time: metric.Time(), + Summary: &Summary{}, + } + } + switch { + case strings.HasSuffix(field.Key, "_sum"): + sum, ok := SampleSum(field.Value) + if !ok { + continue + } + + m.Summary.Sum = sum + case strings.HasSuffix(field.Key, "_count"): + count, ok := SampleCount(field.Value) + if !ok { + continue + } + + m.Summary.Count = count + default: + quantileTag, ok := metric.GetTag("quantile") + if !ok { + continue + } + quantile, err := strconv.ParseFloat(quantileTag, 64) + if err != nil { + continue + } + + value, ok := SampleValue(field.Value) + if !ok { + continue + } + + m.Summary.Quantiles = append(m.Summary.Quantiles, Quantile{ + Quantile: quantile, + Value: value, + }) + } + + entry.Metrics[metricKey] = m + } + } +} + +func (c *Collection) Expire(now time.Time, age time.Duration) { + expireTime := now.Add(-age) + for _, entry := range c.Entries { + for key, metric := range entry.Metrics { + if metric.Time.Before(expireTime) { + delete(entry.Metrics, key) + if len(entry.Metrics) == 0 { + delete(c.Entries, entry.Family) + } + } + } + } +} + +func (c *Collection) GetEntries(order MetricSortOrder) []Entry { + entries := make([]Entry, 0, len(c.Entries)) + for _, entry := range c.Entries { + entries = append(entries, entry) + } + + switch order { + case SortMetrics: + sort.Slice(entries, func(i, j int) bool { + lhs := entries[i].Family + rhs := entries[j].Family + if lhs.Name != rhs.Name { + return lhs.Name < rhs.Name + } + + return lhs.Type < rhs.Type + }) + } + return entries +} + +func (c *Collection) GetMetrics(entry Entry, order MetricSortOrder) []*Metric { + metrics := make([]*Metric, 0, len(entry.Metrics)) + for _, metric := range entry.Metrics { + metrics = append(metrics, metric) + } + + switch order { + case SortMetrics: + sort.Slice(metrics, func(i, j int) bool { + lhs := metrics[i].Labels + rhs := metrics[j].Labels + if len(lhs) != len(rhs) { + return len(lhs) < len(rhs) + } + + for index := range lhs { + l := lhs[index] + r := rhs[index] + + if l.Name != r.Name { + return l.Name < r.Name + } + + if l.Value != r.Value { + return l.Value < r.Value + } + } + + return false + }) + } + + return metrics +} + +func (c *Collection) GetProto() []*dto.MetricFamily { + result := make([]*dto.MetricFamily, 0, len(c.Entries)) + + for _, entry := range c.GetEntries(c.config.MetricSortOrder) { + mf := &dto.MetricFamily{ + Name: proto.String(entry.Family.Name), + Help: proto.String(helpString), + Type: MetricType(entry.Family.Type), + } + + for _, metric := range c.GetMetrics(entry, c.config.MetricSortOrder) { + l := make([]*dto.LabelPair, 0, len(metric.Labels)) + for _, label := range metric.Labels { + l = append(l, &dto.LabelPair{ + Name: proto.String(label.Name), + Value: proto.String(label.Value), + }) + } + + m := &dto.Metric{ + Label: l, + } + + if c.config.TimestampExport == ExportTimestamp { + m.TimestampMs = proto.Int64(metric.Time.UnixNano() / int64(time.Millisecond)) + } + + switch entry.Family.Type { + case telegraf.Gauge: + m.Gauge = &dto.Gauge{Value: proto.Float64(metric.Scaler.Value)} + case telegraf.Counter: + m.Counter = &dto.Counter{Value: proto.Float64(metric.Scaler.Value)} + case telegraf.Untyped: + m.Untyped = &dto.Untyped{Value: proto.Float64(metric.Scaler.Value)} + case telegraf.Histogram: + buckets := make([]*dto.Bucket, 0, len(metric.Histogram.Buckets)) + for _, bucket := range metric.Histogram.Buckets { + buckets = append(buckets, &dto.Bucket{ + UpperBound: proto.Float64(bucket.Bound), + CumulativeCount: proto.Uint64(bucket.Count), + }) + } + + if len(buckets) == 0 { + continue + } + + m.Histogram = &dto.Histogram{ + Bucket: buckets, + SampleCount: proto.Uint64(metric.Histogram.Count), + SampleSum: proto.Float64(metric.Histogram.Sum), + } + case telegraf.Summary: + quantiles := make([]*dto.Quantile, 0, len(metric.Summary.Quantiles)) + for _, quantile := range metric.Summary.Quantiles { + quantiles = append(quantiles, &dto.Quantile{ + Quantile: proto.Float64(quantile.Quantile), + Value: proto.Float64(quantile.Value), + }) + } + + if len(quantiles) == 0 { + continue + } + + m.Summary = &dto.Summary{ + Quantile: quantiles, + SampleCount: proto.Uint64(metric.Summary.Count), + SampleSum: proto.Float64(metric.Summary.Sum), + } + default: + panic("unknown telegraf.ValueType") + } + + mf.Metric = append(mf.Metric, m) + } + + if len(mf.Metric) != 0 { + result = append(result, mf) + } + } + + return result +} diff --git a/plugins/serializers/prometheus/collection_test.go b/plugins/serializers/prometheus/collection_test.go new file mode 100644 index 000000000..589c306b5 --- /dev/null +++ b/plugins/serializers/prometheus/collection_test.go @@ -0,0 +1,116 @@ +package prometheus + +import ( + "testing" + "time" + + "github.com/gogo/protobuf/proto" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/testutil" + dto "github.com/prometheus/client_model/go" + "github.com/stretchr/testify/require" +) + +func TestCollectionExpire(t *testing.T) { + tests := []struct { + name string + now time.Time + age time.Duration + metrics []telegraf.Metric + expected []*dto.MetricFamily + }{ + { + name: "not expired", + now: time.Unix(1, 0), + age: 10 * time.Second, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": 42.0, + }, + time.Unix(0, 0), + ), + }, + expected: []*dto.MetricFamily{ + { + Name: proto.String("cpu_time_idle"), + Help: proto.String(helpString), + Type: dto.MetricType_UNTYPED.Enum(), + Metric: []*dto.Metric{ + { + Label: []*dto.LabelPair{}, + Untyped: &dto.Untyped{Value: proto.Float64(42.0)}, + }, + }, + }, + }, + }, + { + name: "expired single metric in metric family", + now: time.Unix(20, 0), + age: 10 * time.Second, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": 42.0, + }, + time.Unix(0, 0), + ), + }, + expected: []*dto.MetricFamily{}, + }, + { + name: "expired one metric in metric family", + now: time.Unix(20, 0), + age: 10 * time.Second, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": 42.0, + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "time_guest": 42.0, + }, + time.Unix(15, 0), + ), + }, + expected: []*dto.MetricFamily{ + { + Name: proto.String("cpu_time_guest"), + Help: proto.String(helpString), + Type: dto.MetricType_UNTYPED.Enum(), + Metric: []*dto.Metric{ + { + Label: []*dto.LabelPair{}, + Untyped: &dto.Untyped{Value: proto.Float64(42.0)}, + }, + }, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := NewCollection(FormatConfig{}) + for _, metric := range tt.metrics { + c.Add(metric) + } + c.Expire(tt.now, tt.age) + + actual := c.GetProto() + + require.Equal(t, tt.expected, actual) + }) + } +} diff --git a/plugins/serializers/prometheus/convert.go b/plugins/serializers/prometheus/convert.go new file mode 100644 index 000000000..2ef23be63 --- /dev/null +++ b/plugins/serializers/prometheus/convert.go @@ -0,0 +1,175 @@ +package prometheus + +import ( + "strings" + "unicode" + + "github.com/influxdata/telegraf" + dto "github.com/prometheus/client_model/go" +) + +var FirstTable = &unicode.RangeTable{ + R16: []unicode.Range16{ + {0x0041, 0x005A, 1}, // A-Z + {0x005F, 0x005F, 1}, // _ + {0x0061, 0x007A, 1}, // a-z + }, + LatinOffset: 3, +} + +var RestTable = &unicode.RangeTable{ + R16: []unicode.Range16{ + {0x0030, 0x0039, 1}, // 0-9 + {0x0041, 0x005A, 1}, // A-Z + {0x005F, 0x005F, 1}, // _ + {0x0061, 0x007A, 1}, // a-z + }, + LatinOffset: 4, +} + +func isValid(name string) bool { + if name == "" { + return false + } + + for i, r := range name { + switch { + case i == 0: + if !unicode.In(r, FirstTable) { + return false + } + default: + if !unicode.In(r, RestTable) { + return false + } + } + } + + return true +} + +// SanitizeName check if the name is a valid Prometheus metric name and label +// name. If not, it attempts to replaces invalid runes with an underscore to +// create a valid name. Returns the metric name and true if the name is valid +// to use. +func SanitizeName(name string) (string, bool) { + if isValid(name) { + return name, true + } + + var b strings.Builder + + for i, r := range name { + switch { + case i == 0: + if unicode.In(r, FirstTable) { + b.WriteRune(r) + } + default: + if unicode.In(r, RestTable) { + b.WriteRune(r) + } else { + b.WriteString("_") + } + } + } + + name = strings.Trim(b.String(), "_") + if name == "" { + return "", false + } + + return name, true +} + +// MetricName returns the Prometheus metric name. +func MetricName(measurement, fieldKey string, valueType telegraf.ValueType) string { + switch valueType { + case telegraf.Histogram, telegraf.Summary: + switch { + case strings.HasSuffix(fieldKey, "_bucket"): + fieldKey = strings.TrimSuffix(fieldKey, "_bucket") + case strings.HasSuffix(fieldKey, "_sum"): + fieldKey = strings.TrimSuffix(fieldKey, "_sum") + case strings.HasSuffix(fieldKey, "_count"): + fieldKey = strings.TrimSuffix(fieldKey, "_count") + } + } + + if measurement == "prometheus" { + return fieldKey + } + return measurement + "_" + fieldKey +} + +func MetricType(valueType telegraf.ValueType) *dto.MetricType { + switch valueType { + case telegraf.Counter: + return dto.MetricType_COUNTER.Enum() + case telegraf.Gauge: + return dto.MetricType_GAUGE.Enum() + case telegraf.Summary: + return dto.MetricType_SUMMARY.Enum() + case telegraf.Untyped: + return dto.MetricType_UNTYPED.Enum() + case telegraf.Histogram: + return dto.MetricType_HISTOGRAM.Enum() + default: + panic("unknown telegraf.ValueType") + } +} + +// SampleValue converts a field value into a value suitable for a simple sample value. +func SampleValue(value interface{}) (float64, bool) { + switch v := value.(type) { + case float64: + return v, true + case int64: + return float64(v), true + case uint64: + return float64(v), true + case bool: + if v { + return 1.0, true + } + return 0.0, true + default: + return 0, false + } +} + +// SampleCount converts a field value into a count suitable for a metric family +// of the Histogram or Summary type. +func SampleCount(value interface{}) (uint64, bool) { + switch v := value.(type) { + case float64: + if v < 0 { + return 0, false + } + return uint64(v), true + case int64: + if v < 0 { + return 0, false + } + return uint64(v), true + case uint64: + return v, true + default: + return 0, false + } +} + +// SampleSum converts a field value into a sum suitable for a metric family +// of the Histogram or Summary type. +func SampleSum(value interface{}) (float64, bool) { + switch v := value.(type) { + case float64: + return v, true + case int64: + return float64(v), true + case uint64: + return float64(v), true + default: + return 0, false + } +} diff --git a/plugins/serializers/prometheus/prometheus.go b/plugins/serializers/prometheus/prometheus.go new file mode 100644 index 000000000..11c305aa4 --- /dev/null +++ b/plugins/serializers/prometheus/prometheus.go @@ -0,0 +1,69 @@ +package prometheus + +import ( + "bytes" + + "github.com/influxdata/telegraf" + "github.com/prometheus/common/expfmt" +) + +// TimestampExport controls if the output contains timestamps. +type TimestampExport int + +const ( + NoExportTimestamp TimestampExport = iota + ExportTimestamp +) + +// MetricSortOrder controls if the output is sorted. +type MetricSortOrder int + +const ( + NoSortMetrics MetricSortOrder = iota + SortMetrics +) + +// StringHandling defines how to process string fields. +type StringHandling int + +const ( + DiscardStrings StringHandling = iota + StringAsLabel +) + +type FormatConfig struct { + TimestampExport TimestampExport + MetricSortOrder MetricSortOrder + StringHandling StringHandling +} + +type Serializer struct { + config FormatConfig +} + +func NewSerializer(config FormatConfig) (*Serializer, error) { + s := &Serializer{config: config} + return s, nil +} + +func (s *Serializer) Serialize(metric telegraf.Metric) ([]byte, error) { + return s.SerializeBatch([]telegraf.Metric{metric}) +} + +func (s *Serializer) SerializeBatch(metrics []telegraf.Metric) ([]byte, error) { + coll := NewCollection(s.config) + for _, metric := range metrics { + coll.Add(metric) + } + + var buf bytes.Buffer + for _, mf := range coll.GetProto() { + enc := expfmt.NewEncoder(&buf, expfmt.FmtText) + err := enc.Encode(mf) + if err != nil { + return nil, err + } + } + + return buf.Bytes(), nil +} diff --git a/plugins/serializers/prometheus/prometheus_test.go b/plugins/serializers/prometheus/prometheus_test.go new file mode 100644 index 000000000..6195fbead --- /dev/null +++ b/plugins/serializers/prometheus/prometheus_test.go @@ -0,0 +1,589 @@ +package prometheus + +import ( + "strings" + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +func TestSerialize(t *testing.T) { + tests := []struct { + name string + config FormatConfig + metric telegraf.Metric + expected []byte + }{ + { + name: "simple", + metric: testutil.MustMetric( + "cpu", + map[string]string{ + "host": "example.org", + }, + map[string]interface{}{ + "time_idle": 42.0, + }, + time.Unix(0, 0), + ), + expected: []byte(` +# HELP cpu_time_idle Telegraf collected metric +# TYPE cpu_time_idle untyped +cpu_time_idle{host="example.org"} 42 +`), + }, + { + name: "prometheus input untyped", + metric: testutil.MustMetric( + "prometheus", + map[string]string{ + "code": "400", + "method": "post", + }, + map[string]interface{}{ + "http_requests_total": 3.0, + }, + time.Unix(0, 0), + telegraf.Untyped, + ), + expected: []byte(` +# HELP http_requests_total Telegraf collected metric +# TYPE http_requests_total untyped +http_requests_total{code="400",method="post"} 3 +`), + }, + { + name: "prometheus input counter", + metric: testutil.MustMetric( + "prometheus", + map[string]string{ + "code": "400", + "method": "post", + }, + map[string]interface{}{ + "http_requests_total": 3.0, + }, + time.Unix(0, 0), + telegraf.Counter, + ), + expected: []byte(` +# HELP http_requests_total Telegraf collected metric +# TYPE http_requests_total counter +http_requests_total{code="400",method="post"} 3 +`), + }, + { + name: "prometheus input gauge", + metric: testutil.MustMetric( + "prometheus", + map[string]string{ + "code": "400", + "method": "post", + }, + map[string]interface{}{ + "http_requests_total": 3.0, + }, + time.Unix(0, 0), + telegraf.Gauge, + ), + expected: []byte(` +# HELP http_requests_total Telegraf collected metric +# TYPE http_requests_total gauge +http_requests_total{code="400",method="post"} 3 +`), + }, + { + name: "prometheus input histogram no buckets", + metric: testutil.MustMetric( + "prometheus", + map[string]string{}, + map[string]interface{}{ + "http_request_duration_seconds_sum": 53423, + "http_request_duration_seconds_count": 144320, + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + expected: []byte(` +`), + }, + { + name: "prometheus input histogram only bucket", + metric: testutil.MustMetric( + "prometheus", + map[string]string{ + "le": "0.5", + }, + map[string]interface{}{ + "http_request_duration_seconds_bucket": 129389.0, + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + expected: []byte(` +# HELP http_request_duration_seconds Telegraf collected metric +# TYPE http_request_duration_seconds histogram +http_request_duration_seconds_bucket{le="0.5"} 129389 +http_request_duration_seconds_bucket{le="+Inf"} 0 +http_request_duration_seconds_sum 0 +http_request_duration_seconds_count 0 +`), + }, + { + name: "simple with timestamp", + config: FormatConfig{ + TimestampExport: ExportTimestamp, + }, + metric: testutil.MustMetric( + "cpu", + map[string]string{ + "host": "example.org", + }, + map[string]interface{}{ + "time_idle": 42.0, + }, + time.Unix(1574279268, 0), + ), + expected: []byte(` +# HELP cpu_time_idle Telegraf collected metric +# TYPE cpu_time_idle untyped +cpu_time_idle{host="example.org"} 42 1574279268000 +`), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + s, err := NewSerializer(FormatConfig{ + MetricSortOrder: SortMetrics, + TimestampExport: tt.config.TimestampExport, + StringHandling: tt.config.StringHandling, + }) + require.NoError(t, err) + actual, err := s.Serialize(tt.metric) + require.NoError(t, err) + + require.Equal(t, strings.TrimSpace(string(tt.expected)), + strings.TrimSpace(string(actual))) + }) + } +} + +func TestSerializeBatch(t *testing.T) { + tests := []struct { + name string + config FormatConfig + metrics []telegraf.Metric + expected []byte + }{ + { + name: "simple", + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{ + "host": "one.example.org", + }, + map[string]interface{}{ + "time_idle": 42.0, + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "cpu", + map[string]string{ + "host": "two.example.org", + }, + map[string]interface{}{ + "time_idle": 42.0, + }, + time.Unix(0, 0), + ), + }, + expected: []byte(` +# HELP cpu_time_idle Telegraf collected metric +# TYPE cpu_time_idle untyped +cpu_time_idle{host="one.example.org"} 42 +cpu_time_idle{host="two.example.org"} 42 +`), + }, + { + name: "multiple metric families", + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{ + "host": "one.example.org", + }, + map[string]interface{}{ + "time_idle": 42.0, + "time_guest": 42.0, + }, + time.Unix(0, 0), + ), + }, + expected: []byte(` +# HELP cpu_time_guest Telegraf collected metric +# TYPE cpu_time_guest untyped +cpu_time_guest{host="one.example.org"} 42 +# HELP cpu_time_idle Telegraf collected metric +# TYPE cpu_time_idle untyped +cpu_time_idle{host="one.example.org"} 42 +`), + }, + { + name: "histogram", + metrics: []telegraf.Metric{ + testutil.MustMetric( + "prometheus", + map[string]string{}, + map[string]interface{}{ + "http_request_duration_seconds_sum": 53423, + "http_request_duration_seconds_count": 144320, + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + testutil.MustMetric( + "prometheus", + map[string]string{"le": "0.05"}, + map[string]interface{}{ + "http_request_duration_seconds_bucket": 24054.0, + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + testutil.MustMetric( + "prometheus", + map[string]string{"le": "0.1"}, + map[string]interface{}{ + "http_request_duration_seconds_bucket": 33444.0, + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + testutil.MustMetric( + "prometheus", + map[string]string{"le": "0.2"}, + map[string]interface{}{ + "http_request_duration_seconds_bucket": 100392.0, + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + testutil.MustMetric( + "prometheus", + map[string]string{"le": "0.5"}, + map[string]interface{}{ + "http_request_duration_seconds_bucket": 129389.0, + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + testutil.MustMetric( + "prometheus", + map[string]string{"le": "1.0"}, + map[string]interface{}{ + "http_request_duration_seconds_bucket": 133988.0, + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + testutil.MustMetric( + "prometheus", + map[string]string{"le": "+Inf"}, + map[string]interface{}{ + "http_request_duration_seconds_bucket": 144320.0, + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + }, + expected: []byte(` +# HELP http_request_duration_seconds Telegraf collected metric +# TYPE http_request_duration_seconds histogram +http_request_duration_seconds_bucket{le="0.05"} 24054 +http_request_duration_seconds_bucket{le="0.1"} 33444 +http_request_duration_seconds_bucket{le="0.2"} 100392 +http_request_duration_seconds_bucket{le="0.5"} 129389 +http_request_duration_seconds_bucket{le="1"} 133988 +http_request_duration_seconds_bucket{le="+Inf"} 144320 +http_request_duration_seconds_sum 53423 +http_request_duration_seconds_count 144320 +`), + }, + { + name: "", + metrics: []telegraf.Metric{ + testutil.MustMetric( + "prometheus", + map[string]string{}, + map[string]interface{}{ + "rpc_duration_seconds_sum": 1.7560473e+07, + "rpc_duration_seconds_count": 2693, + }, + time.Unix(0, 0), + telegraf.Summary, + ), + testutil.MustMetric( + "prometheus", + map[string]string{"quantile": "0.01"}, + map[string]interface{}{ + "rpc_duration_seconds": 3102.0, + }, + time.Unix(0, 0), + telegraf.Summary, + ), + testutil.MustMetric( + "prometheus", + map[string]string{"quantile": "0.05"}, + map[string]interface{}{ + "rpc_duration_seconds": 3272.0, + }, + time.Unix(0, 0), + telegraf.Summary, + ), + testutil.MustMetric( + "prometheus", + map[string]string{"quantile": "0.5"}, + map[string]interface{}{ + "rpc_duration_seconds": 4773.0, + }, + time.Unix(0, 0), + telegraf.Summary, + ), + testutil.MustMetric( + "prometheus", + map[string]string{"quantile": "0.9"}, + map[string]interface{}{ + "rpc_duration_seconds": 9001.0, + }, + time.Unix(0, 0), + telegraf.Summary, + ), + testutil.MustMetric( + "prometheus", + map[string]string{"quantile": "0.99"}, + map[string]interface{}{ + "rpc_duration_seconds": 76656.0, + }, + time.Unix(0, 0), + telegraf.Summary, + ), + }, + expected: []byte(` +# HELP rpc_duration_seconds Telegraf collected metric +# TYPE rpc_duration_seconds summary +rpc_duration_seconds{quantile="0.01"} 3102 +rpc_duration_seconds{quantile="0.05"} 3272 +rpc_duration_seconds{quantile="0.5"} 4773 +rpc_duration_seconds{quantile="0.9"} 9001 +rpc_duration_seconds{quantile="0.99"} 76656 +rpc_duration_seconds_sum 1.7560473e+07 +rpc_duration_seconds_count 2693 +`), + }, + { + name: "newer sample", + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": 43.0, + }, + time.Unix(1, 0), + ), + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": 42.0, + }, + time.Unix(0, 0), + ), + }, + expected: []byte(` +# HELP cpu_time_idle Telegraf collected metric +# TYPE cpu_time_idle untyped +cpu_time_idle 43 +`), + }, + { + name: "invalid label", + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{ + "host-name": "example.org", + }, + map[string]interface{}{ + "time_idle": 42.0, + }, + time.Unix(0, 0), + ), + }, + expected: []byte(` +# HELP cpu_time_idle Telegraf collected metric +# TYPE cpu_time_idle untyped +cpu_time_idle{host_name="example.org"} 42 +`), + }, + { + name: "discard strings", + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": 42.0, + "cpu": "cpu0", + }, + time.Unix(0, 0), + ), + }, + expected: []byte(` +# HELP cpu_time_idle Telegraf collected metric +# TYPE cpu_time_idle untyped +cpu_time_idle 42 +`), + }, + { + name: "string as label", + config: FormatConfig{ + StringHandling: StringAsLabel, + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": 42.0, + "cpu": "cpu0", + }, + time.Unix(0, 0), + ), + }, + expected: []byte(` +# HELP cpu_time_idle Telegraf collected metric +# TYPE cpu_time_idle untyped +cpu_time_idle{cpu="cpu0"} 42 +`), + }, + { + name: "string as label duplicate tag", + config: FormatConfig{ + StringHandling: StringAsLabel, + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{ + "cpu": "cpu0", + }, + map[string]interface{}{ + "time_idle": 42.0, + "cpu": "cpu1", + }, + time.Unix(0, 0), + ), + }, + expected: []byte(` +# HELP cpu_time_idle Telegraf collected metric +# TYPE cpu_time_idle untyped +cpu_time_idle{cpu="cpu0"} 42 +`), + }, + { + name: "multiple fields grouping", + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{ + "cpu": "cpu0", + }, + map[string]interface{}{ + "time_guest": 8106.04, + "time_system": 26271.4, + "time_user": 92904.33, + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "cpu", + map[string]string{ + "cpu": "cpu1", + }, + map[string]interface{}{ + "time_guest": 8181.63, + "time_system": 25351.49, + "time_user": 96912.57, + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "cpu", + map[string]string{ + "cpu": "cpu2", + }, + map[string]interface{}{ + "time_guest": 7470.04, + "time_system": 24998.43, + "time_user": 96034.08, + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "cpu", + map[string]string{ + "cpu": "cpu3", + }, + map[string]interface{}{ + "time_guest": 7517.95, + "time_system": 24970.82, + "time_user": 94148, + }, + time.Unix(0, 0), + ), + }, + expected: []byte(` +# HELP cpu_time_guest Telegraf collected metric +# TYPE cpu_time_guest untyped +cpu_time_guest{cpu="cpu0"} 8106.04 +cpu_time_guest{cpu="cpu1"} 8181.63 +cpu_time_guest{cpu="cpu2"} 7470.04 +cpu_time_guest{cpu="cpu3"} 7517.95 +# HELP cpu_time_system Telegraf collected metric +# TYPE cpu_time_system untyped +cpu_time_system{cpu="cpu0"} 26271.4 +cpu_time_system{cpu="cpu1"} 25351.49 +cpu_time_system{cpu="cpu2"} 24998.43 +cpu_time_system{cpu="cpu3"} 24970.82 +# HELP cpu_time_user Telegraf collected metric +# TYPE cpu_time_user untyped +cpu_time_user{cpu="cpu0"} 92904.33 +cpu_time_user{cpu="cpu1"} 96912.57 +cpu_time_user{cpu="cpu2"} 96034.08 +cpu_time_user{cpu="cpu3"} 94148 +`), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + s, err := NewSerializer(FormatConfig{ + MetricSortOrder: SortMetrics, + TimestampExport: tt.config.TimestampExport, + StringHandling: tt.config.StringHandling, + }) + require.NoError(t, err) + actual, err := s.SerializeBatch(tt.metrics) + require.NoError(t, err) + + require.Equal(t, + strings.TrimSpace(string(tt.expected)), + strings.TrimSpace(string(actual))) + }) + } +} diff --git a/plugins/serializers/registry.go b/plugins/serializers/registry.go index aae590f78..dc9859e34 100644 --- a/plugins/serializers/registry.go +++ b/plugins/serializers/registry.go @@ -10,6 +10,7 @@ import ( "github.com/influxdata/telegraf/plugins/serializers/influx" "github.com/influxdata/telegraf/plugins/serializers/json" "github.com/influxdata/telegraf/plugins/serializers/nowmetric" + "github.com/influxdata/telegraf/plugins/serializers/prometheus" "github.com/influxdata/telegraf/plugins/serializers/splunkmetric" "github.com/influxdata/telegraf/plugins/serializers/wavefront" ) @@ -45,43 +46,54 @@ type Serializer interface { // and can be used to instantiate _any_ of the serializers. type Config struct { // Dataformat can be one of the serializer types listed in NewSerializer. - DataFormat string + DataFormat string `toml:"data_format"` // Support tags in graphite protocol - GraphiteTagSupport bool + GraphiteTagSupport bool `toml:"graphite_tag_support"` // Maximum line length in bytes; influx format only - InfluxMaxLineBytes int + InfluxMaxLineBytes int `toml:"influx_max_line_bytes"` // Sort field keys, set to true only when debugging as it less performant // than unsorted fields; influx format only - InfluxSortFields bool + InfluxSortFields bool `toml:"influx_sort_fields"` // Support unsigned integer output; influx format only - InfluxUintSupport bool + InfluxUintSupport bool `toml:"influx_uint_support"` // Prefix to add to all measurements, only supports Graphite - Prefix string + Prefix string `toml:"prefix"` // Template for converting telegraf metrics into Graphite // only supports Graphite - Template string + Template string `toml:"template"` // Timestamp units to use for JSON formatted output - TimestampUnits time.Duration + TimestampUnits time.Duration `toml:"timestamp_units"` // Include HEC routing fields for splunkmetric output - HecRouting bool + HecRouting bool `toml:"hec_routing"` // Enable Splunk MultiMetric output (Splunk 8.0+) - SplunkmetricMultiMetric bool + SplunkmetricMultiMetric bool `toml:"splunkmetric_multi_metric"` // Point tags to use as the source name for Wavefront (if none found, host will be used). - WavefrontSourceOverride []string + WavefrontSourceOverride []string `toml:"wavefront_source_override"` // Use Strict rules to sanitize metric and tag names from invalid characters for Wavefront // When enabled forward slash (/) and comma (,) will be accepted - WavefrontUseStrict bool + WavefrontUseStrict bool `toml:"wavefront_use_strict"` + + // Include the metric timestamp on each sample. + PrometheusExportTimestamp bool `toml:"prometheus_export_timestamp"` + + // Sort prometheus metric families and metric samples. Useful for + // debugging. + PrometheusSortMetrics bool `toml:"prometheus_sort_metrics"` + + // Output string fields as metric labels; when false string fields are + // discarded. + PrometheusStringAsLabel bool `toml:"prometheus_string_as_label"` } // NewSerializer a Serializer interface based on the given config. @@ -103,12 +115,37 @@ func NewSerializer(config *Config) (Serializer, error) { serializer, err = NewCarbon2Serializer() case "wavefront": serializer, err = NewWavefrontSerializer(config.Prefix, config.WavefrontUseStrict, config.WavefrontSourceOverride) + case "prometheus": + serializer, err = NewPrometheusSerializer(config) default: err = fmt.Errorf("Invalid data format: %s", config.DataFormat) } return serializer, err } +func NewPrometheusSerializer(config *Config) (Serializer, error) { + exportTimestamp := prometheus.NoExportTimestamp + if config.PrometheusExportTimestamp { + exportTimestamp = prometheus.ExportTimestamp + } + + sortMetrics := prometheus.NoSortMetrics + if config.PrometheusExportTimestamp { + sortMetrics = prometheus.SortMetrics + } + + stringAsLabels := prometheus.DiscardStrings + if config.PrometheusStringAsLabel { + stringAsLabels = prometheus.StringAsLabel + } + + return prometheus.NewSerializer(prometheus.FormatConfig{ + TimestampExport: exportTimestamp, + MetricSortOrder: sortMetrics, + StringHandling: stringAsLabels, + }) +} + func NewWavefrontSerializer(prefix string, useStrict bool, sourceOverride []string) (Serializer, error) { return wavefront.NewSerializer(prefix, useStrict, sourceOverride) } From 10db774db3f175b53ac584c16b184e836e0ed3c9 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 26 Nov 2019 17:31:36 -0800 Subject: [PATCH 1356/1815] Add prometheus round trip unit tests (#6720) --- plugins/inputs/filecount/filecount_test.go | 1 + plugins/inputs/processes/processes_test.go | 2 +- plugins/inputs/statsd/statsd_test.go | 4 + plugins/inputs/syslog/commons_test.go | 12 +- plugins/inputs/syslog/nontransparent_test.go | 135 +++---- plugins/inputs/syslog/octetcounting_test.go | 343 ++++++++-------- plugins/inputs/syslog/rfc5426_test.go | 281 +++++++------ plugins/inputs/zipkin/convert_test.go | 11 + plugins/inputs/zipkin/zipkin_test.go | 36 +- ...t_test.go => prometheus_client_v1_test.go} | 158 ++++++-- .../prometheus_client_v2_test.go | 376 ++++++++++++++++++ testutil/accumulator.go | 30 +- testutil/metric.go | 2 +- 13 files changed, 935 insertions(+), 456 deletions(-) rename plugins/outputs/prometheus_client/{prometheus_client_test.go => prometheus_client_v1_test.go} (65%) create mode 100644 plugins/outputs/prometheus_client/prometheus_client_v2_test.go diff --git a/plugins/inputs/filecount/filecount_test.go b/plugins/inputs/filecount/filecount_test.go index dcd6d9d8e..3e0cadf37 100644 --- a/plugins/inputs/filecount/filecount_test.go +++ b/plugins/inputs/filecount/filecount_test.go @@ -144,6 +144,7 @@ func TestDirectoryWithTrailingSlash(t *testing.T) { "size_bytes": 5096, }, time.Unix(0, 0), + telegraf.Gauge, ), } diff --git a/plugins/inputs/processes/processes_test.go b/plugins/inputs/processes/processes_test.go index fa9ad62da..23359a85d 100644 --- a/plugins/inputs/processes/processes_test.go +++ b/plugins/inputs/processes/processes_test.go @@ -153,7 +153,7 @@ func TestParkedProcess(t *testing.T) { "zombies": 0, }, time.Unix(0, 0), - telegraf.Untyped, + telegraf.Gauge, ), } actual := acc.GetTelegrafMetrics() diff --git a/plugins/inputs/statsd/statsd_test.go b/plugins/inputs/statsd/statsd_test.go index ae025feec..1215eeb2d 100644 --- a/plugins/inputs/statsd/statsd_test.go +++ b/plugins/inputs/statsd/statsd_test.go @@ -875,6 +875,7 @@ func TestParse_DataDogTags(t *testing.T) { "value": 1, }, time.Now(), + telegraf.Counter, ), }, }, @@ -892,6 +893,7 @@ func TestParse_DataDogTags(t *testing.T) { "value": 10.1, }, time.Now(), + telegraf.Gauge, ), }, }, @@ -948,6 +950,7 @@ func TestParse_DataDogTags(t *testing.T) { "value": 42, }, time.Now(), + telegraf.Counter, ), }, }, @@ -1668,6 +1671,7 @@ func TestTCP(t *testing.T) { "value": 42, }, time.Now(), + telegraf.Counter, ), }, acc.GetTelegrafMetrics(), diff --git a/plugins/inputs/syslog/commons_test.go b/plugins/inputs/syslog/commons_test.go index 5d5562fc7..10f2ddf51 100644 --- a/plugins/inputs/syslog/commons_test.go +++ b/plugins/inputs/syslog/commons_test.go @@ -1,10 +1,12 @@ package syslog import ( + "time" + + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" framing "github.com/influxdata/telegraf/internal/syslog" "github.com/influxdata/telegraf/testutil" - "time" ) var ( @@ -14,16 +16,16 @@ var ( type testCasePacket struct { name string data []byte - wantBestEffort *testutil.Metric - wantStrict *testutil.Metric + wantBestEffort telegraf.Metric + wantStrict telegraf.Metric werr bool } type testCaseStream struct { name string data []byte - wantBestEffort []testutil.Metric - wantStrict []testutil.Metric + wantBestEffort []telegraf.Metric + wantStrict []telegraf.Metric werr int // how many errors we expect in the strict mode? } diff --git a/plugins/inputs/syslog/nontransparent_test.go b/plugins/inputs/syslog/nontransparent_test.go index 2bf6aa4ef..d0352c6ae 100644 --- a/plugins/inputs/syslog/nontransparent_test.go +++ b/plugins/inputs/syslog/nontransparent_test.go @@ -9,7 +9,7 @@ import ( "testing" "time" - "github.com/google/go-cmp/cmp" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" framing "github.com/influxdata/telegraf/internal/syslog" "github.com/influxdata/telegraf/testutil" @@ -21,10 +21,16 @@ func getTestCasesForNonTransparent() []testCaseStream { { name: "1st/avg/ok", data: []byte(`<29>1 2016-02-21T04:32:57+00:00 web1 someservice 2341 2 [origin][meta sequence="14125553" service="someservice"] "GET /v1/ok HTTP/1.1" 200 145 "-" "hacheck 0.9.0" 24306 127.0.0.1:40124 575`), - wantStrict: []testutil.Metric{ - { - Measurement: "syslog", - Fields: map[string]interface{}{ + wantStrict: []telegraf.Metric{ + testutil.MustMetric( + "syslog", + map[string]string{ + "severity": "notice", + "facility": "daemon", + "hostname": "web1", + "appname": "someservice", + }, + map[string]interface{}{ "version": uint16(1), "timestamp": time.Unix(1456029177, 0).UnixNano(), "procid": "2341", @@ -36,19 +42,19 @@ func getTestCasesForNonTransparent() []testCaseStream { "severity_code": 5, "facility_code": 3, }, - Tags: map[string]string{ - "severity": "notice", - "facility": "daemon", - "hostname": "web1", - "appname": "someservice", - }, - Time: defaultTime, - }, + defaultTime, + ), }, - wantBestEffort: []testutil.Metric{ - { - Measurement: "syslog", - Fields: map[string]interface{}{ + wantBestEffort: []telegraf.Metric{ + testutil.MustMetric( + "syslog", + map[string]string{ + "severity": "notice", + "facility": "daemon", + "hostname": "web1", + "appname": "someservice", + }, + map[string]interface{}{ "version": uint16(1), "timestamp": time.Unix(1456029177, 0).UnixNano(), "procid": "2341", @@ -60,75 +66,69 @@ func getTestCasesForNonTransparent() []testCaseStream { "severity_code": 5, "facility_code": 3, }, - Tags: map[string]string{ - "severity": "notice", - "facility": "daemon", - "hostname": "web1", - "appname": "someservice", - }, - Time: defaultTime, - }, + defaultTime, + ), }, werr: 1, }, { name: "1st/min/ok//2nd/min/ok", data: []byte("<1>2 - - - - - -\n<4>11 - - - - - -\n"), - wantStrict: []testutil.Metric{ - { - Measurement: "syslog", - Fields: map[string]interface{}{ + wantStrict: []telegraf.Metric{ + testutil.MustMetric( + "syslog", + map[string]string{ + "severity": "alert", + "facility": "kern", + }, + map[string]interface{}{ "version": uint16(2), "severity_code": 1, "facility_code": 0, }, - Tags: map[string]string{ - "severity": "alert", + defaultTime, + ), + testutil.MustMetric( + "syslog", + map[string]string{ + "severity": "warning", "facility": "kern", }, - Time: defaultTime, - }, - { - Measurement: "syslog", - Fields: map[string]interface{}{ + map[string]interface{}{ "version": uint16(11), "severity_code": 4, "facility_code": 0, }, - Tags: map[string]string{ - "severity": "warning", - "facility": "kern", - }, - Time: defaultTime.Add(time.Nanosecond), - }, + defaultTime.Add(time.Nanosecond), + ), }, - wantBestEffort: []testutil.Metric{ - { - Measurement: "syslog", - Fields: map[string]interface{}{ + wantBestEffort: []telegraf.Metric{ + testutil.MustMetric( + "syslog", + map[string]string{ + "severity": "alert", + "facility": "kern", + }, + map[string]interface{}{ "version": uint16(2), "severity_code": 1, "facility_code": 0, }, - Tags: map[string]string{ - "severity": "alert", + defaultTime, + ), + testutil.MustMetric( + "syslog", + map[string]string{ + "severity": "warning", "facility": "kern", }, - Time: defaultTime, - }, - { - Measurement: "syslog", - Fields: map[string]interface{}{ + map[string]interface{}{ "version": uint16(11), "severity_code": 4, "facility_code": 0, }, - Tags: map[string]string{ - "severity": "warning", - "facility": "kern", - }, - Time: defaultTime.Add(time.Nanosecond), - }, + defaultTime.Add(time.Nanosecond), + ), }, }, } @@ -186,13 +186,7 @@ func testStrictNonTransparent(t *testing.T, protocol string, address string, wan if len(acc.Errors) != tc.werr { t.Fatalf("Got unexpected errors. want error = %v, errors = %v\n", tc.werr, acc.Errors) } - var got []testutil.Metric - for _, metric := range acc.Metrics { - got = append(got, *metric) - } - if !cmp.Equal(tc.wantStrict, got) { - t.Fatalf("Got (+) / Want (-)\n %s", cmp.Diff(tc.wantStrict, got)) - } + testutil.RequireMetricsEqual(t, tc.wantStrict, acc.GetTelegrafMetrics()) }) } } @@ -240,14 +234,7 @@ func testBestEffortNonTransparent(t *testing.T, protocol string, address string, acc.Wait(len(tc.wantBestEffort)) } - // Verify - var got []testutil.Metric - for _, metric := range acc.Metrics { - got = append(got, *metric) - } - if !cmp.Equal(tc.wantBestEffort, got) { - t.Fatalf("Got (+) / Want (-)\n %s", cmp.Diff(tc.wantBestEffort, got)) - } + testutil.RequireMetricsEqual(t, tc.wantStrict, acc.GetTelegrafMetrics()) }) } } diff --git a/plugins/inputs/syslog/octetcounting_test.go b/plugins/inputs/syslog/octetcounting_test.go index 4f8f2d278..ea86b808d 100644 --- a/plugins/inputs/syslog/octetcounting_test.go +++ b/plugins/inputs/syslog/octetcounting_test.go @@ -10,7 +10,7 @@ import ( "testing" "time" - "github.com/google/go-cmp/cmp" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" framing "github.com/influxdata/telegraf/internal/syslog" "github.com/influxdata/telegraf/testutil" @@ -22,10 +22,16 @@ func getTestCasesForOctetCounting() []testCaseStream { { name: "1st/avg/ok", data: []byte(`188 <29>1 2016-02-21T04:32:57+00:00 web1 someservice 2341 2 [origin][meta sequence="14125553" service="someservice"] "GET /v1/ok HTTP/1.1" 200 145 "-" "hacheck 0.9.0" 24306 127.0.0.1:40124 575`), - wantStrict: []testutil.Metric{ - { - Measurement: "syslog", - Fields: map[string]interface{}{ + wantStrict: []telegraf.Metric{ + testutil.MustMetric( + "syslog", + map[string]string{ + "severity": "notice", + "facility": "daemon", + "hostname": "web1", + "appname": "someservice", + }, + map[string]interface{}{ "version": uint16(1), "timestamp": time.Unix(1456029177, 0).UnixNano(), "procid": "2341", @@ -37,19 +43,19 @@ func getTestCasesForOctetCounting() []testCaseStream { "severity_code": 5, "facility_code": 3, }, - Tags: map[string]string{ - "severity": "notice", - "facility": "daemon", - "hostname": "web1", - "appname": "someservice", - }, - Time: defaultTime, - }, + defaultTime, + ), }, - wantBestEffort: []testutil.Metric{ - { - Measurement: "syslog", - Fields: map[string]interface{}{ + wantBestEffort: []telegraf.Metric{ + testutil.MustMetric( + "syslog", + map[string]string{ + "severity": "notice", + "facility": "daemon", + "hostname": "web1", + "appname": "someservice", + }, + map[string]interface{}{ "version": uint16(1), "timestamp": time.Unix(1456029177, 0).UnixNano(), "procid": "2341", @@ -61,221 +67,215 @@ func getTestCasesForOctetCounting() []testCaseStream { "severity_code": 5, "facility_code": 3, }, - Tags: map[string]string{ - "severity": "notice", - "facility": "daemon", - "hostname": "web1", - "appname": "someservice", - }, - Time: defaultTime, - }, + defaultTime, + ), }, }, { name: "1st/min/ok//2nd/min/ok", data: []byte("16 <1>2 - - - - - -17 <4>11 - - - - - -"), - wantStrict: []testutil.Metric{ - { - Measurement: "syslog", - Fields: map[string]interface{}{ + wantStrict: []telegraf.Metric{ + testutil.MustMetric( + "syslog", + map[string]string{ + "severity": "alert", + "facility": "kern", + }, + map[string]interface{}{ "version": uint16(2), "severity_code": 1, "facility_code": 0, }, - Tags: map[string]string{ - "severity": "alert", + defaultTime, + ), + testutil.MustMetric( + "syslog", + map[string]string{ + "severity": "warning", "facility": "kern", }, - Time: defaultTime, - }, - { - Measurement: "syslog", - Fields: map[string]interface{}{ + map[string]interface{}{ "version": uint16(11), "severity_code": 4, "facility_code": 0, }, - Tags: map[string]string{ - "severity": "warning", - "facility": "kern", - }, - Time: defaultTime.Add(time.Nanosecond), - }, + defaultTime.Add(time.Nanosecond), + ), }, - wantBestEffort: []testutil.Metric{ - { - Measurement: "syslog", - Fields: map[string]interface{}{ + wantBestEffort: []telegraf.Metric{ + testutil.MustMetric( + "syslog", + map[string]string{ + "severity": "alert", + "facility": "kern", + }, + map[string]interface{}{ "version": uint16(2), "severity_code": 1, "facility_code": 0, }, - Tags: map[string]string{ - "severity": "alert", + defaultTime, + ), + testutil.MustMetric( + "syslog", + map[string]string{ + "severity": "warning", "facility": "kern", }, - Time: defaultTime, - }, - { - Measurement: "syslog", - Fields: map[string]interface{}{ + map[string]interface{}{ "version": uint16(11), "severity_code": 4, "facility_code": 0, }, - Tags: map[string]string{ - "severity": "warning", - "facility": "kern", - }, - Time: defaultTime.Add(time.Nanosecond), - }, + defaultTime.Add(time.Nanosecond), + ), }, }, { name: "1st/utf8/ok", data: []byte("23 <1>1 - - - - - - hellø"), - wantStrict: []testutil.Metric{ - { - Measurement: "syslog", - Fields: map[string]interface{}{ + wantStrict: []telegraf.Metric{ + testutil.MustMetric( + "syslog", + map[string]string{ + "severity": "alert", + "facility": "kern", + }, + map[string]interface{}{ "version": uint16(1), "message": "hellø", "severity_code": 1, "facility_code": 0, }, - Tags: map[string]string{ - "severity": "alert", - "facility": "kern", - }, - Time: defaultTime, - }, + defaultTime, + ), }, - wantBestEffort: []testutil.Metric{ - { - Measurement: "syslog", - Fields: map[string]interface{}{ + wantBestEffort: []telegraf.Metric{ + testutil.MustMetric( + "syslog", + map[string]string{ + "severity": "alert", + "facility": "kern", + }, + map[string]interface{}{ "version": uint16(1), "message": "hellø", "severity_code": 1, "facility_code": 0, }, - Tags: map[string]string{ - "severity": "alert", - "facility": "kern", - }, - Time: defaultTime, - }, + defaultTime, + ), }, }, { name: "1st/nl/ok", // newline data: []byte("28 <1>3 - - - - - - hello\nworld"), - wantStrict: []testutil.Metric{ - { - Measurement: "syslog", - Fields: map[string]interface{}{ + wantStrict: []telegraf.Metric{ + testutil.MustMetric( + "syslog", + map[string]string{ + "severity": "alert", + "facility": "kern", + }, + map[string]interface{}{ "version": uint16(3), "message": "hello\nworld", "severity_code": 1, "facility_code": 0, }, - Tags: map[string]string{ - "severity": "alert", - "facility": "kern", - }, - Time: defaultTime, - }, + defaultTime, + ), }, - wantBestEffort: []testutil.Metric{ - { - Measurement: "syslog", - Fields: map[string]interface{}{ + wantBestEffort: []telegraf.Metric{ + testutil.MustMetric( + "syslog", + map[string]string{ + "severity": "alert", + "facility": "kern", + }, + map[string]interface{}{ "version": uint16(3), "message": "hello\nworld", "severity_code": 1, "facility_code": 0, }, - Tags: map[string]string{ - "severity": "alert", - "facility": "kern", - }, - Time: defaultTime, - }, + defaultTime, + ), }, }, { name: "1st/uf/ko", // underflow (msglen less than provided octets) data: []byte("16 <1>2"), wantStrict: nil, - wantBestEffort: []testutil.Metric{ - { - Measurement: "syslog", - Fields: map[string]interface{}{ + wantBestEffort: []telegraf.Metric{ + testutil.MustMetric( + "syslog", + map[string]string{ + "severity": "alert", + "facility": "kern", + }, + map[string]interface{}{ "version": uint16(2), "severity_code": 1, "facility_code": 0, }, - Tags: map[string]string{ - "severity": "alert", - "facility": "kern", - }, - Time: defaultTime, - }, + defaultTime, + ), }, werr: 1, }, { name: "1st/min/ok", data: []byte("16 <1>1 - - - - - -"), - wantStrict: []testutil.Metric{ - { - Measurement: "syslog", - Fields: map[string]interface{}{ + wantStrict: []telegraf.Metric{ + testutil.MustMetric( + "syslog", + map[string]string{ + "severity": "alert", + "facility": "kern", + }, + map[string]interface{}{ "version": uint16(1), "severity_code": 1, "facility_code": 0, }, - Tags: map[string]string{ - "severity": "alert", - "facility": "kern", - }, - Time: defaultTime, - }, + defaultTime, + ), }, - wantBestEffort: []testutil.Metric{ - { - Measurement: "syslog", - Fields: map[string]interface{}{ + wantBestEffort: []telegraf.Metric{ + testutil.MustMetric( + "syslog", + map[string]string{ + "severity": "alert", + "facility": "kern", + }, + map[string]interface{}{ "version": uint16(1), "severity_code": 1, "facility_code": 0, }, - Tags: map[string]string{ - "severity": "alert", - "facility": "kern", - }, - Time: defaultTime, - }, + defaultTime, + ), }, }, { name: "1st/uf/mf", // The first "underflow" message breaks also the second one data: []byte("16 <1>217 <11>1 - - - - - -"), wantStrict: nil, - wantBestEffort: []testutil.Metric{ - { - Measurement: "syslog", - Fields: map[string]interface{}{ + wantBestEffort: []telegraf.Metric{ + testutil.MustMetric( + "syslog", + map[string]string{ + "severity": "alert", + "facility": "kern", + }, + map[string]interface{}{ "version": uint16(217), "severity_code": 1, "facility_code": 0, }, - Tags: map[string]string{ - "severity": "alert", - "facility": "kern", - }, - Time: defaultTime, - }, + defaultTime, + ), }, werr: 1, }, @@ -287,10 +287,16 @@ func getTestCasesForOctetCounting() []testCaseStream { { name: "1st/max/ok", data: []byte(fmt.Sprintf("8192 <%d>%d %s %s %s %s %s - %s", maxP, maxV, maxTS, maxH, maxA, maxPID, maxMID, message7681)), - wantStrict: []testutil.Metric{ - { - Measurement: "syslog", - Fields: map[string]interface{}{ + wantStrict: []telegraf.Metric{ + testutil.MustMetric( + "syslog", + map[string]string{ + "severity": "debug", + "facility": "local7", + "hostname": maxH, + "appname": maxA, + }, + map[string]interface{}{ "version": maxV, "timestamp": time.Unix(1514764799, 999999000).UnixNano(), "message": message7681, @@ -299,19 +305,19 @@ func getTestCasesForOctetCounting() []testCaseStream { "facility_code": 23, "severity_code": 7, }, - Tags: map[string]string{ - "severity": "debug", - "facility": "local7", - "hostname": maxH, - "appname": maxA, - }, - Time: defaultTime, - }, + defaultTime, + ), }, - wantBestEffort: []testutil.Metric{ - { - Measurement: "syslog", - Fields: map[string]interface{}{ + wantBestEffort: []telegraf.Metric{ + testutil.MustMetric( + "syslog", + map[string]string{ + "severity": "debug", + "facility": "local7", + "hostname": maxH, + "appname": maxA, + }, + map[string]interface{}{ "version": maxV, "timestamp": time.Unix(1514764799, 999999000).UnixNano(), "message": message7681, @@ -320,14 +326,8 @@ func getTestCasesForOctetCounting() []testCaseStream { "facility_code": 23, "severity_code": 7, }, - Tags: map[string]string{ - "severity": "debug", - "facility": "local7", - "hostname": maxH, - "appname": maxA, - }, - Time: defaultTime, - }, + defaultTime, + ), }, }, } @@ -386,13 +386,7 @@ func testStrictOctetCounting(t *testing.T, protocol string, address string, want if len(acc.Errors) != tc.werr { t.Fatalf("Got unexpected errors. want error = %v, errors = %v\n", tc.werr, acc.Errors) } - var got []testutil.Metric - for _, metric := range acc.Metrics { - got = append(got, *metric) - } - if !cmp.Equal(tc.wantStrict, got) { - t.Fatalf("Got (+) / Want (-)\n %s", cmp.Diff(tc.wantStrict, got)) - } + testutil.RequireMetricsEqual(t, tc.wantStrict, acc.GetTelegrafMetrics()) }) } } @@ -440,14 +434,7 @@ func testBestEffortOctetCounting(t *testing.T, protocol string, address string, acc.Wait(len(tc.wantBestEffort)) } - // Verify - var got []testutil.Metric - for _, metric := range acc.Metrics { - got = append(got, *metric) - } - if !cmp.Equal(tc.wantBestEffort, got) { - t.Fatalf("Got (+) / Want (-)\n %s", cmp.Diff(tc.wantBestEffort, got)) - } + testutil.RequireMetricsEqual(t, tc.wantBestEffort, acc.GetTelegrafMetrics()) }) } } diff --git a/plugins/inputs/syslog/rfc5426_test.go b/plugins/inputs/syslog/rfc5426_test.go index ba856b0ac..00efb9479 100644 --- a/plugins/inputs/syslog/rfc5426_test.go +++ b/plugins/inputs/syslog/rfc5426_test.go @@ -10,7 +10,7 @@ import ( "testing" "time" - "github.com/google/go-cmp/cmp" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) @@ -25,73 +25,79 @@ func getTestCasesForRFC5426() []testCasePacket { { name: "complete", data: []byte("<1>1 - - - - - - A"), - wantBestEffort: &testutil.Metric{ - Measurement: "syslog", - Fields: map[string]interface{}{ + wantBestEffort: testutil.MustMetric( + "syslog", + map[string]string{ + "severity": "alert", + "facility": "kern", + }, + map[string]interface{}{ "version": uint16(1), "message": "A", "facility_code": 0, "severity_code": 1, }, - Tags: map[string]string{ + defaultTime, + ), + wantStrict: testutil.MustMetric( + "syslog", + map[string]string{ "severity": "alert", "facility": "kern", }, - Time: defaultTime, - }, - wantStrict: &testutil.Metric{ - Measurement: "syslog", - Fields: map[string]interface{}{ + map[string]interface{}{ "version": uint16(1), "message": "A", "facility_code": 0, "severity_code": 1, }, - Tags: map[string]string{ - "severity": "alert", - "facility": "kern", - }, - Time: defaultTime, - }, + defaultTime, + ), }, { name: "one/per/packet", data: []byte("<1>3 - - - - - - A<1>4 - - - - - - B"), - wantBestEffort: &testutil.Metric{ - Measurement: "syslog", - Fields: map[string]interface{}{ + wantBestEffort: testutil.MustMetric( + "syslog", + map[string]string{ + "severity": "alert", + "facility": "kern", + }, + map[string]interface{}{ "version": uint16(3), "message": "A<1>4 - - - - - - B", "severity_code": 1, "facility_code": 0, }, - Tags: map[string]string{ + defaultTime, + ), + wantStrict: testutil.MustMetric( + "syslog", + map[string]string{ "severity": "alert", "facility": "kern", }, - Time: defaultTime, - }, - wantStrict: &testutil.Metric{ - Measurement: "syslog", - Fields: map[string]interface{}{ + map[string]interface{}{ "version": uint16(3), "message": "A<1>4 - - - - - - B", "severity_code": 1, "facility_code": 0, }, - Tags: map[string]string{ - "severity": "alert", - "facility": "kern", - }, - Time: defaultTime, - }, + defaultTime, + ), }, { name: "average", data: []byte(`<29>1 2016-02-21T04:32:57+00:00 web1 someservice 2341 2 [origin][meta sequence="14125553" service="someservice"] "GET /v1/ok HTTP/1.1" 200 145 "-" "hacheck 0.9.0" 24306 127.0.0.1:40124 575`), - wantBestEffort: &testutil.Metric{ - Measurement: "syslog", - Fields: map[string]interface{}{ + wantBestEffort: testutil.MustMetric( + "syslog", + map[string]string{ + "severity": "notice", + "facility": "daemon", + "hostname": "web1", + "appname": "someservice", + }, + map[string]interface{}{ "version": uint16(1), "timestamp": time.Unix(1456029177, 0).UnixNano(), "procid": "2341", @@ -103,17 +109,17 @@ func getTestCasesForRFC5426() []testCasePacket { "severity_code": 5, "facility_code": 3, }, - Tags: map[string]string{ + defaultTime, + ), + wantStrict: testutil.MustMetric( + "syslog", + map[string]string{ "severity": "notice", "facility": "daemon", "hostname": "web1", "appname": "someservice", }, - Time: defaultTime, - }, - wantStrict: &testutil.Metric{ - Measurement: "syslog", - Fields: map[string]interface{}{ + map[string]interface{}{ "version": uint16(1), "timestamp": time.Unix(1456029177, 0).UnixNano(), "procid": "2341", @@ -125,21 +131,21 @@ func getTestCasesForRFC5426() []testCasePacket { "severity_code": 5, "facility_code": 3, }, - Tags: map[string]string{ - "severity": "notice", - "facility": "daemon", - "hostname": "web1", - "appname": "someservice", - }, - Time: defaultTime, - }, + defaultTime, + ), }, { name: "max", data: []byte(fmt.Sprintf("<%d>%d %s %s %s %s %s - %s", maxP, maxV, maxTS, maxH, maxA, maxPID, maxMID, message7681)), - wantBestEffort: &testutil.Metric{ - Measurement: "syslog", - Fields: map[string]interface{}{ + wantBestEffort: testutil.MustMetric( + "syslog", + map[string]string{ + "severity": "debug", + "facility": "local7", + "hostname": maxH, + "appname": maxA, + }, + map[string]interface{}{ "version": maxV, "timestamp": time.Unix(1514764799, 999999000).UnixNano(), "message": message7681, @@ -148,17 +154,17 @@ func getTestCasesForRFC5426() []testCasePacket { "severity_code": 7, "facility_code": 23, }, - Tags: map[string]string{ + defaultTime, + ), + wantStrict: testutil.MustMetric( + "syslog", + map[string]string{ "severity": "debug", "facility": "local7", "hostname": maxH, "appname": maxA, }, - Time: defaultTime, - }, - wantStrict: &testutil.Metric{ - Measurement: "syslog", - Fields: map[string]interface{}{ + map[string]interface{}{ "version": maxV, "timestamp": time.Unix(1514764799, 999999000).UnixNano(), "message": message7681, @@ -167,64 +173,58 @@ func getTestCasesForRFC5426() []testCasePacket { "severity_code": 7, "facility_code": 23, }, - Tags: map[string]string{ - "severity": "debug", - "facility": "local7", - "hostname": maxH, - "appname": maxA, - }, - Time: defaultTime, - }, + defaultTime, + ), }, { name: "minimal/incomplete", data: []byte("<1>2"), - wantBestEffort: &testutil.Metric{ - Measurement: "syslog", - Fields: map[string]interface{}{ + wantBestEffort: testutil.MustMetric( + "syslog", + map[string]string{ + "severity": "alert", + "facility": "kern", + }, + map[string]interface{}{ "version": uint16(2), "facility_code": 0, "severity_code": 1, }, - Tags: map[string]string{ - "severity": "alert", - "facility": "kern", - }, - Time: defaultTime, - }, + defaultTime, + ), werr: true, }, { name: "trim message", data: []byte("<1>1 - - - - - - \tA\n"), - wantBestEffort: &testutil.Metric{ - Measurement: "syslog", - Fields: map[string]interface{}{ + wantBestEffort: testutil.MustMetric( + "syslog", + map[string]string{ + "severity": "alert", + "facility": "kern", + }, + map[string]interface{}{ "version": uint16(1), "message": "\tA", "facility_code": 0, "severity_code": 1, }, - Tags: map[string]string{ + defaultTime, + ), + wantStrict: testutil.MustMetric( + "syslog", + map[string]string{ "severity": "alert", "facility": "kern", }, - Time: defaultTime, - }, - wantStrict: &testutil.Metric{ - Measurement: "syslog", - Fields: map[string]interface{}{ + map[string]interface{}{ "version": uint16(1), "message": "\tA", "facility_code": 0, "severity_code": 1, }, - Tags: map[string]string{ - "severity": "alert", - "facility": "kern", - }, - Time: defaultTime, - }, + defaultTime, + ), }, } @@ -269,19 +269,17 @@ func testRFC5426(t *testing.T, protocol string, address string, bestEffort bool) } // Compare - var got *testutil.Metric - var want *testutil.Metric + var got telegraf.Metric + var want telegraf.Metric if len(acc.Metrics) > 0 { - got = acc.Metrics[0] + got = acc.GetTelegrafMetrics()[0] } if bestEffort { want = tc.wantBestEffort } else { want = tc.wantStrict } - if !cmp.Equal(want, got) { - t.Fatalf("Got (+) / Want (-)\n %s", cmp.Diff(want, got)) - } + testutil.RequireMetricEqual(t, want, got) }) } } @@ -346,23 +344,22 @@ func TestTimeIncrement_udp(t *testing.T) { // Wait acc.Wait(1) - want := &testutil.Metric{ - Measurement: "syslog", - Fields: map[string]interface{}{ - "version": uint16(1), - "facility_code": 0, - "severity_code": 1, - }, - Tags: map[string]string{ - "severity": "alert", - "facility": "kern", - }, - Time: getNow(), - } - - if !cmp.Equal(want, acc.Metrics[0]) { - t.Fatalf("Got (+) / Want (-)\n %s", cmp.Diff(want, acc.Metrics[0])) + want := []telegraf.Metric{ + testutil.MustMetric( + "syslog", + map[string]string{ + "severity": "alert", + "facility": "kern", + }, + map[string]interface{}{ + "version": uint16(1), + "facility_code": 0, + "severity_code": 1, + }, + getNow(), + ), } + testutil.RequireMetricsEqual(t, want, acc.GetTelegrafMetrics()) // New one with different time atomic.StoreInt64(&i, atomic.LoadInt64(&i)+1) @@ -377,23 +374,22 @@ func TestTimeIncrement_udp(t *testing.T) { // Wait acc.Wait(1) - want = &testutil.Metric{ - Measurement: "syslog", - Fields: map[string]interface{}{ - "version": uint16(1), - "facility_code": 0, - "severity_code": 1, - }, - Tags: map[string]string{ - "severity": "alert", - "facility": "kern", - }, - Time: getNow(), - } - - if !cmp.Equal(want, acc.Metrics[0]) { - t.Fatalf("Got (+) / Want (-)\n %s", cmp.Diff(want, acc.Metrics[0])) + want = []telegraf.Metric{ + testutil.MustMetric( + "syslog", + map[string]string{ + "severity": "alert", + "facility": "kern", + }, + map[string]interface{}{ + "version": uint16(1), + "facility_code": 0, + "severity_code": 1, + }, + getNow(), + ), } + testutil.RequireMetricsEqual(t, want, acc.GetTelegrafMetrics()) // New one with same time as previous one @@ -407,21 +403,20 @@ func TestTimeIncrement_udp(t *testing.T) { // Wait acc.Wait(1) - want = &testutil.Metric{ - Measurement: "syslog", - Fields: map[string]interface{}{ - "version": uint16(1), - "facility_code": 0, - "severity_code": 1, - }, - Tags: map[string]string{ - "severity": "alert", - "facility": "kern", - }, - Time: getNow().Add(time.Nanosecond), - } - - if !cmp.Equal(want, acc.Metrics[0]) { - t.Fatalf("Got (+) / Want (-)\n %s", cmp.Diff(want, acc.Metrics[0])) + want = []telegraf.Metric{ + testutil.MustMetric( + "syslog", + map[string]string{ + "severity": "alert", + "facility": "kern", + }, + map[string]interface{}{ + "version": uint16(1), + "facility_code": 0, + "severity_code": 1, + }, + getNow().Add(time.Nanosecond), + ), } + testutil.RequireMetricsEqual(t, want, acc.GetTelegrafMetrics()) } diff --git a/plugins/inputs/zipkin/convert_test.go b/plugins/inputs/zipkin/convert_test.go index 92c1ba3ff..23a951594 100644 --- a/plugins/inputs/zipkin/convert_test.go +++ b/plugins/inputs/zipkin/convert_test.go @@ -121,6 +121,7 @@ func TestLineProtocolConverter_Record(t *testing.T) { "duration_ns": (time.Duration(53106) * time.Microsecond).Nanoseconds(), }, Time: time.Unix(0, 1498688360851331000).UTC(), + Type: telegraf.Untyped, }, { Measurement: "zipkin", @@ -138,6 +139,7 @@ func TestLineProtocolConverter_Record(t *testing.T) { "duration_ns": (time.Duration(53106) * time.Microsecond).Nanoseconds(), }, Time: time.Unix(0, 1498688360851331000).UTC(), + Type: telegraf.Untyped, }, { Measurement: "zipkin", @@ -152,6 +154,7 @@ func TestLineProtocolConverter_Record(t *testing.T) { "duration_ns": (time.Duration(50410) * time.Microsecond).Nanoseconds(), }, Time: time.Unix(0, 1498688360904552000).UTC(), + Type: telegraf.Untyped, }, { Measurement: "zipkin", @@ -169,6 +172,7 @@ func TestLineProtocolConverter_Record(t *testing.T) { "duration_ns": (time.Duration(50410) * time.Microsecond).Nanoseconds(), }, Time: time.Unix(0, 1498688360904552000).UTC(), + Type: telegraf.Untyped, }, { Measurement: "zipkin", @@ -183,6 +187,7 @@ func TestLineProtocolConverter_Record(t *testing.T) { "duration_ns": (time.Duration(103680) * time.Microsecond).Nanoseconds(), }, Time: time.Unix(0, 1498688360851318000).UTC(), + Type: telegraf.Untyped, }, { Measurement: "zipkin", @@ -199,6 +204,7 @@ func TestLineProtocolConverter_Record(t *testing.T) { "duration_ns": (time.Duration(103680) * time.Microsecond).Nanoseconds(), }, Time: time.Unix(0, 1498688360851318000).UTC(), + Type: telegraf.Untyped, }, { Measurement: "zipkin", @@ -215,6 +221,7 @@ func TestLineProtocolConverter_Record(t *testing.T) { "duration_ns": (time.Duration(103680) * time.Microsecond).Nanoseconds(), }, Time: time.Unix(0, 1498688360851318000).UTC(), + Type: telegraf.Untyped, }, { Measurement: "zipkin", @@ -231,6 +238,7 @@ func TestLineProtocolConverter_Record(t *testing.T) { "duration_ns": (time.Duration(103680) * time.Microsecond).Nanoseconds(), }, Time: time.Unix(0, 1498688360851318000).UTC(), + Type: telegraf.Untyped, }, { Measurement: "zipkin", @@ -248,6 +256,7 @@ func TestLineProtocolConverter_Record(t *testing.T) { "duration_ns": (time.Duration(103680) * time.Microsecond).Nanoseconds(), }, Time: time.Unix(0, 1498688360851318000).UTC(), + Type: telegraf.Untyped, }, }, wantErr: false, @@ -296,6 +305,7 @@ func TestLineProtocolConverter_Record(t *testing.T) { "duration_ns": (time.Duration(1) * time.Nanosecond).Nanoseconds(), }, Time: time.Unix(1, 0).UTC(), + Type: telegraf.Untyped, }, { Measurement: "zipkin", @@ -312,6 +322,7 @@ func TestLineProtocolConverter_Record(t *testing.T) { "duration_ns": (time.Duration(1) * time.Nanosecond).Nanoseconds(), }, Time: time.Unix(1, 0).UTC(), + Type: telegraf.Untyped, }, }, }, diff --git a/plugins/inputs/zipkin/zipkin_test.go b/plugins/inputs/zipkin/zipkin_test.go index c022b6055..77bef853b 100644 --- a/plugins/inputs/zipkin/zipkin_test.go +++ b/plugins/inputs/zipkin/zipkin_test.go @@ -9,6 +9,7 @@ import ( "time" "github.com/google/go-cmp/cmp" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" ) @@ -40,6 +41,7 @@ func TestZipkinPlugin(t *testing.T) { "duration_ns": (time.Duration(53106) * time.Microsecond).Nanoseconds(), }, Time: time.Unix(0, 1498688360851331000).UTC(), + Type: telegraf.Untyped, }, { Measurement: "zipkin", @@ -57,6 +59,7 @@ func TestZipkinPlugin(t *testing.T) { "duration_ns": (time.Duration(53106) * time.Microsecond).Nanoseconds(), }, Time: time.Unix(0, 1498688360851331000).UTC(), + Type: telegraf.Untyped, }, { Measurement: "zipkin", @@ -71,6 +74,7 @@ func TestZipkinPlugin(t *testing.T) { "duration_ns": (time.Duration(50410) * time.Microsecond).Nanoseconds(), }, Time: time.Unix(0, 1498688360904552000).UTC(), + Type: telegraf.Untyped, }, { Measurement: "zipkin", @@ -88,6 +92,7 @@ func TestZipkinPlugin(t *testing.T) { "duration_ns": (time.Duration(50410) * time.Microsecond).Nanoseconds(), }, Time: time.Unix(0, 1498688360904552000).UTC(), + Type: telegraf.Untyped, }, { Measurement: "zipkin", @@ -102,6 +107,7 @@ func TestZipkinPlugin(t *testing.T) { "duration_ns": (time.Duration(103680) * time.Microsecond).Nanoseconds(), }, Time: time.Unix(0, 1498688360851318000).UTC(), + Type: telegraf.Untyped, }, { Measurement: "zipkin", @@ -118,6 +124,7 @@ func TestZipkinPlugin(t *testing.T) { "duration_ns": (time.Duration(103680) * time.Microsecond).Nanoseconds(), }, Time: time.Unix(0, 1498688360851318000).UTC(), + Type: telegraf.Untyped, }, { Measurement: "zipkin", @@ -134,6 +141,7 @@ func TestZipkinPlugin(t *testing.T) { "duration_ns": (time.Duration(103680) * time.Microsecond).Nanoseconds(), }, Time: time.Unix(0, 1498688360851318000).UTC(), + Type: telegraf.Untyped, }, { Measurement: "zipkin", @@ -150,6 +158,7 @@ func TestZipkinPlugin(t *testing.T) { "duration_ns": (time.Duration(103680) * time.Microsecond).Nanoseconds(), }, Time: time.Unix(0, 1498688360851318000).UTC(), + Type: telegraf.Untyped, }, { Measurement: "zipkin", @@ -167,6 +176,7 @@ func TestZipkinPlugin(t *testing.T) { "duration_ns": (time.Duration(103680) * time.Microsecond).Nanoseconds(), }, Time: time.Unix(0, 1498688360851318000).UTC(), + Type: telegraf.Untyped, }, }, wantErr: false, @@ -189,6 +199,7 @@ func TestZipkinPlugin(t *testing.T) { "duration_ns": (time.Duration(1) * time.Microsecond).Nanoseconds(), }, Time: time.Unix(0, 1433330263415871*int64(time.Microsecond)).UTC(), + Type: telegraf.Untyped, }, { Measurement: "zipkin", @@ -205,6 +216,7 @@ func TestZipkinPlugin(t *testing.T) { "duration_ns": (time.Duration(1) * time.Microsecond).Nanoseconds(), }, Time: time.Unix(0, 1433330263415871*int64(time.Microsecond)).UTC(), + Type: telegraf.Untyped, }, { Measurement: "zipkin", @@ -221,6 +233,7 @@ func TestZipkinPlugin(t *testing.T) { "duration_ns": (time.Duration(1) * time.Microsecond).Nanoseconds(), }, Time: time.Unix(0, 1433330263415871*int64(time.Microsecond)).UTC(), + Type: telegraf.Untyped, }, }, }, @@ -240,7 +253,9 @@ func TestZipkinPlugin(t *testing.T) { }, Fields: map[string]interface{}{ "duration_ns": int64(3000000), - }, Time: time.Unix(0, 1503031538791000*int64(time.Microsecond)).UTC(), + }, + Time: time.Unix(0, 1503031538791000*int64(time.Microsecond)).UTC(), + Type: telegraf.Untyped, }, { Measurement: "zipkin", @@ -257,6 +272,7 @@ func TestZipkinPlugin(t *testing.T) { "duration_ns": int64(3000000), }, Time: time.Unix(0, 1503031538791000*int64(time.Microsecond)).UTC(), + Type: telegraf.Untyped, }, { Measurement: "zipkin", @@ -273,6 +289,7 @@ func TestZipkinPlugin(t *testing.T) { "duration_ns": int64(3000000), }, Time: time.Unix(0, 1503031538791000*int64(time.Microsecond)).UTC(), + Type: telegraf.Untyped, }, { Measurement: "zipkin", @@ -290,6 +307,7 @@ func TestZipkinPlugin(t *testing.T) { "duration_ns": int64(3000000), }, Time: time.Unix(0, 1503031538791000*int64(time.Microsecond)).UTC(), + Type: telegraf.Untyped, }, { Measurement: "zipkin", @@ -307,6 +325,7 @@ func TestZipkinPlugin(t *testing.T) { "duration_ns": int64(3000000), }, Time: time.Unix(0, 1503031538791000*int64(time.Microsecond)).UTC(), + Type: telegraf.Untyped, }, { Measurement: "zipkin", @@ -324,6 +343,7 @@ func TestZipkinPlugin(t *testing.T) { "duration_ns": int64(3000000), }, Time: time.Unix(0, 1503031538791000*int64(time.Microsecond)).UTC(), + Type: telegraf.Untyped, }, { Measurement: "zipkin", @@ -338,6 +358,7 @@ func TestZipkinPlugin(t *testing.T) { "duration_ns": int64(10000000), }, Time: time.Unix(0, 1503031538786000*int64(time.Microsecond)).UTC(), + Type: telegraf.Untyped, }, { Measurement: "zipkin", @@ -354,6 +375,7 @@ func TestZipkinPlugin(t *testing.T) { "duration_ns": int64(10000000), }, Time: time.Unix(0, 1503031538786000*int64(time.Microsecond)).UTC(), + Type: telegraf.Untyped, }, { Measurement: "zipkin", @@ -370,6 +392,7 @@ func TestZipkinPlugin(t *testing.T) { "duration_ns": int64(10000000), }, Time: time.Unix(0, 1503031538786000*int64(time.Microsecond)).UTC(), + Type: telegraf.Untyped, }, { Measurement: "zipkin", @@ -387,6 +410,7 @@ func TestZipkinPlugin(t *testing.T) { "duration_ns": int64(10000000), }, Time: time.Unix(0, 1503031538786000*int64(time.Microsecond)).UTC(), + Type: telegraf.Untyped, }, { Measurement: "zipkin", @@ -404,6 +428,7 @@ func TestZipkinPlugin(t *testing.T) { "duration_ns": int64(10000000), }, Time: time.Unix(0, 1503031538786000*int64(time.Microsecond)).UTC(), + Type: telegraf.Untyped, }, { Measurement: "zipkin", @@ -421,6 +446,7 @@ func TestZipkinPlugin(t *testing.T) { "duration_ns": int64(10000000), }, Time: time.Unix(0, 1503031538786000*int64(time.Microsecond)).UTC(), + Type: telegraf.Untyped, }, { Measurement: "zipkin", @@ -438,6 +464,7 @@ func TestZipkinPlugin(t *testing.T) { "duration_ns": int64(10000000), }, Time: time.Unix(0, 1503031538786000*int64(time.Microsecond)).UTC(), + Type: telegraf.Untyped, }, { Measurement: "zipkin", @@ -455,6 +482,7 @@ func TestZipkinPlugin(t *testing.T) { "duration_ns": int64(10000000), }, Time: time.Unix(0, 1503031538786000*int64(time.Microsecond)).UTC(), + Type: telegraf.Untyped, }, { Measurement: "zipkin", @@ -469,6 +497,7 @@ func TestZipkinPlugin(t *testing.T) { "duration_ns": int64(23393000), }, Time: time.Unix(0, 1503031538778000*int64(time.Microsecond)).UTC(), + Type: telegraf.Untyped, }, { Measurement: "zipkin", @@ -485,6 +514,7 @@ func TestZipkinPlugin(t *testing.T) { "duration_ns": int64(23393000), }, Time: time.Unix(0, 1503031538778000*int64(time.Microsecond)).UTC(), + Type: telegraf.Untyped, }, { Measurement: "zipkin", @@ -501,6 +531,7 @@ func TestZipkinPlugin(t *testing.T) { "duration_ns": int64(23393000), }, Time: time.Unix(0, 1503031538778000*int64(time.Microsecond)).UTC(), + Type: telegraf.Untyped, }, { Measurement: "zipkin", @@ -518,6 +549,7 @@ func TestZipkinPlugin(t *testing.T) { "duration_ns": int64(23393000), }, Time: time.Unix(0, 1503031538778000*int64(time.Microsecond)).UTC(), + Type: telegraf.Untyped, }, { Measurement: "zipkin", @@ -535,6 +567,7 @@ func TestZipkinPlugin(t *testing.T) { "duration_ns": int64(23393000), }, Time: time.Unix(0, 1503031538778000*int64(time.Microsecond)).UTC(), + Type: telegraf.Untyped, }, { Measurement: "zipkin", @@ -552,6 +585,7 @@ func TestZipkinPlugin(t *testing.T) { "duration_ns": int64(23393000), }, Time: time.Unix(0, 1503031538778000*int64(time.Microsecond)).UTC(), + Type: telegraf.Untyped, }, }, }, diff --git a/plugins/outputs/prometheus_client/prometheus_client_test.go b/plugins/outputs/prometheus_client/prometheus_client_v1_test.go similarity index 65% rename from plugins/outputs/prometheus_client/prometheus_client_test.go rename to plugins/outputs/prometheus_client/prometheus_client_v1_test.go index 6af8da8da..6a9770fdc 100644 --- a/plugins/outputs/prometheus_client/prometheus_client_test.go +++ b/plugins/outputs/prometheus_client/prometheus_client_v1_test.go @@ -1,18 +1,22 @@ package prometheus import ( + "fmt" "io/ioutil" "net/http" + "net/http/httptest" "strings" "testing" "time" "github.com/influxdata/telegraf" + inputs "github.com/influxdata/telegraf/plugins/inputs/prometheus" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) func TestMetricVersion1(t *testing.T) { + Logger := testutil.Logger{Name: "outputs.prometheus_client"} tests := []struct { name string output *PrometheusClient @@ -26,7 +30,7 @@ func TestMetricVersion1(t *testing.T) { MetricVersion: 1, CollectorsExclude: []string{"gocollector", "process"}, Path: "/metrics", - Log: testutil.Logger{}, + Log: Logger, }, metrics: []telegraf.Metric{ testutil.MustMetric( @@ -53,7 +57,7 @@ cpu_time_idle{host="example.org"} 42 MetricVersion: 1, CollectorsExclude: []string{"gocollector", "process"}, Path: "/metrics", - Log: testutil.Logger{}, + Log: Logger, }, metrics: []telegraf.Metric{ testutil.MustMetric( @@ -80,7 +84,7 @@ cpu_time_idle{host="example.org"} 42 MetricVersion: 1, CollectorsExclude: []string{"gocollector", "process"}, Path: "/metrics", - Log: testutil.Logger{}, + Log: Logger, }, metrics: []telegraf.Metric{ testutil.MustMetric( @@ -108,7 +112,7 @@ cpu_time_idle{host="example.org"} 42 MetricVersion: 1, CollectorsExclude: []string{"gocollector", "process"}, Path: "/metrics", - Log: testutil.Logger{}, + Log: Logger, }, metrics: []telegraf.Metric{ testutil.MustMetric( @@ -136,7 +140,7 @@ cpu_time_idle{host="example.org"} 42 MetricVersion: 1, CollectorsExclude: []string{"gocollector", "process"}, Path: "/metrics", - Log: testutil.Logger{}, + Log: Logger, }, metrics: []telegraf.Metric{ testutil.MustMetric( @@ -176,7 +180,7 @@ http_request_duration_seconds_count 144320 MetricVersion: 1, CollectorsExclude: []string{"gocollector", "process"}, Path: "/metrics", - Log: testutil.Logger{}, + Log: Logger, }, metrics: []telegraf.Metric{ testutil.MustMetric( @@ -238,67 +242,133 @@ rpc_duration_seconds_count 2693 } } -func TestMetricVersion2(t *testing.T) { +func TestRoundTripMetricVersion1(t *testing.T) { + Logger := testutil.Logger{Name: "outputs.prometheus_client"} tests := []struct { - name string - output *PrometheusClient - metrics []telegraf.Metric - expected []byte + name string + data []byte }{ { - name: "simple", - output: &PrometheusClient{ - Listen: ":0", - MetricVersion: 2, - CollectorsExclude: []string{"gocollector", "process"}, - Path: "/metrics", - Log: testutil.Logger{}, - }, - metrics: []telegraf.Metric{ - testutil.MustMetric( - "cpu", - map[string]string{ - "host": "example.org", - }, - map[string]interface{}{ - "time_idle": 42.0, - }, - time.Unix(0, 0), - ), - }, - expected: []byte(` + name: "untyped", + data: []byte(` # HELP cpu_time_idle Telegraf collected metric # TYPE cpu_time_idle untyped cpu_time_idle{host="example.org"} 42 +`), + }, + { + name: "counter", + data: []byte(` +# HELP cpu_time_idle Telegraf collected metric +# TYPE cpu_time_idle counter +cpu_time_idle{host="example.org"} 42 +`), + }, + { + name: "gauge", + data: []byte(` +# HELP cpu_time_idle Telegraf collected metric +# TYPE cpu_time_idle gauge +cpu_time_idle{host="example.org"} 42 +`), + }, + { + name: "multi", + data: []byte(` +# HELP cpu_time_guest Telegraf collected metric +# TYPE cpu_time_guest gauge +cpu_time_guest{host="one.example.org"} 42 +cpu_time_guest{host="two.example.org"} 42 +# HELP cpu_time_idle Telegraf collected metric +# TYPE cpu_time_idle gauge +cpu_time_idle{host="one.example.org"} 42 +cpu_time_idle{host="two.example.org"} 42 +`), + }, + { + name: "histogram", + data: []byte(` +# HELP http_request_duration_seconds Telegraf collected metric +# TYPE http_request_duration_seconds histogram +http_request_duration_seconds_bucket{le="0.05"} 24054 +http_request_duration_seconds_bucket{le="0.1"} 33444 +http_request_duration_seconds_bucket{le="0.2"} 100392 +http_request_duration_seconds_bucket{le="0.5"} 129389 +http_request_duration_seconds_bucket{le="1"} 133988 +http_request_duration_seconds_bucket{le="+Inf"} 144320 +http_request_duration_seconds_sum 53423 +http_request_duration_seconds_count 144320 +`), + }, + { + name: "summary", + data: []byte(` +# HELP rpc_duration_seconds Telegraf collected metric +# TYPE rpc_duration_seconds summary +rpc_duration_seconds{quantile="0.01"} 3102 +rpc_duration_seconds{quantile="0.05"} 3272 +rpc_duration_seconds{quantile="0.5"} 4773 +rpc_duration_seconds{quantile="0.9"} 9001 +rpc_duration_seconds{quantile="0.99"} 76656 +rpc_duration_seconds_sum 1.7560473e+07 +rpc_duration_seconds_count 2693 `), }, } + + ts := httptest.NewServer(http.NotFoundHandler()) + defer ts.Close() + + url := fmt.Sprintf("http://%s", ts.Listener.Addr()) + for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - err := tt.output.Init() - require.NoError(t, err) + ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + w.Write(tt.data) + }) - err = tt.output.Connect() + input := &inputs.Prometheus{ + URLs: []string{url}, + URLTag: "", + MetricVersion: 1, + } + var acc testutil.Accumulator + err := input.Start(&acc) require.NoError(t, err) + err = input.Gather(&acc) + require.NoError(t, err) + input.Stop() + metrics := acc.GetTelegrafMetrics() + + output := &PrometheusClient{ + Listen: "127.0.0.1:0", + Path: defaultPath, + MetricVersion: 1, + Log: Logger, + CollectorsExclude: []string{"gocollector", "process"}, + } + err = output.Init() + require.NoError(t, err) + err = output.Connect() + require.NoError(t, err) defer func() { - err := tt.output.Close() + err = output.Close() require.NoError(t, err) }() - - err = tt.output.Write(tt.metrics) + err = output.Write(metrics) require.NoError(t, err) - resp, err := http.Get(tt.output.URL()) + resp, err := http.Get(output.URL()) require.NoError(t, err) - require.Equal(t, http.StatusOK, resp.StatusCode) - defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) + + actual, err := ioutil.ReadAll(resp.Body) require.NoError(t, err) require.Equal(t, - strings.TrimSpace(string(tt.expected)), - strings.TrimSpace(string(body))) + strings.TrimSpace(string(tt.data)), + strings.TrimSpace(string(actual))) }) } } diff --git a/plugins/outputs/prometheus_client/prometheus_client_v2_test.go b/plugins/outputs/prometheus_client/prometheus_client_v2_test.go new file mode 100644 index 000000000..755bd5dc4 --- /dev/null +++ b/plugins/outputs/prometheus_client/prometheus_client_v2_test.go @@ -0,0 +1,376 @@ +package prometheus + +import ( + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "strings" + "testing" + "time" + + "github.com/influxdata/telegraf" + inputs "github.com/influxdata/telegraf/plugins/inputs/prometheus" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +func TestMetricVersion2(t *testing.T) { + Logger := testutil.Logger{Name: "outputs.prometheus_client"} + tests := []struct { + name string + output *PrometheusClient + metrics []telegraf.Metric + expected []byte + }{ + { + name: "untyped telegraf metric", + output: &PrometheusClient{ + Listen: ":0", + MetricVersion: 2, + CollectorsExclude: []string{"gocollector", "process"}, + Path: "/metrics", + Log: Logger, + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{ + "host": "example.org", + }, + map[string]interface{}{ + "time_idle": 42.0, + }, + time.Unix(0, 0), + ), + }, + expected: []byte(` +# HELP cpu_time_idle Telegraf collected metric +# TYPE cpu_time_idle untyped +cpu_time_idle{host="example.org"} 42 +`), + }, + { + name: "strings as labels", + output: &PrometheusClient{ + Listen: ":0", + MetricVersion: 2, + CollectorsExclude: []string{"gocollector", "process"}, + Path: "/metrics", + StringAsLabel: true, + Log: Logger, + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": 42.0, + "host": "example.org", + }, + time.Unix(0, 0), + ), + }, + expected: []byte(` +# HELP cpu_time_idle Telegraf collected metric +# TYPE cpu_time_idle untyped +cpu_time_idle{host="example.org"} 42 +`), + }, + { + name: "when strings as labels is false string fields are discarded", + output: &PrometheusClient{ + Listen: ":0", + MetricVersion: 2, + CollectorsExclude: []string{"gocollector", "process"}, + Path: "/metrics", + StringAsLabel: false, + Log: Logger, + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": 42.0, + "host": "example.org", + }, + time.Unix(0, 0), + ), + }, + expected: []byte(` +# HELP cpu_time_idle Telegraf collected metric +# TYPE cpu_time_idle untyped +cpu_time_idle 42 +`), + }, + { + name: "untype prometheus metric", + output: &PrometheusClient{ + Listen: ":0", + MetricVersion: 2, + CollectorsExclude: []string{"gocollector", "process"}, + Path: "/metrics", + Log: Logger, + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "prometheus", + map[string]string{ + "host": "example.org", + }, + map[string]interface{}{ + "cpu_time_idle": 42.0, + }, + time.Unix(0, 0), + ), + }, + expected: []byte(` +# HELP cpu_time_idle Telegraf collected metric +# TYPE cpu_time_idle untyped +cpu_time_idle{host="example.org"} 42 +`), + }, + { + name: "telegraf histogram", + output: &PrometheusClient{ + Listen: ":0", + MetricVersion: 2, + CollectorsExclude: []string{"gocollector", "process"}, + Path: "/metrics", + Log: Logger, + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{ + "cpu": "cpu1", + }, + map[string]interface{}{ + "usage_idle_sum": 2000.0, + "usage_idle_count": 20.0, + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + testutil.MustMetric( + "cpu", + map[string]string{ + "cpu": "cpu1", + "le": "0.0", + }, + map[string]interface{}{ + "usage_idle_bucket": 0.0, + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + testutil.MustMetric( + "cpu", + map[string]string{ + "cpu": "cpu1", + "le": "50.0", + }, + map[string]interface{}{ + "usage_idle_bucket": 7.0, + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + testutil.MustMetric( + "cpu", + map[string]string{ + "cpu": "cpu1", + "le": "100.0", + }, + map[string]interface{}{ + "usage_idle_bucket": 20.0, + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + testutil.MustMetric( + "cpu", + map[string]string{ + "cpu": "cpu1", + "le": "+Inf", + }, + map[string]interface{}{ + "usage_idle_bucket": 20.0, + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + }, + expected: []byte(` +# HELP cpu_usage_idle Telegraf collected metric +# TYPE cpu_usage_idle histogram +cpu_usage_idle_bucket{cpu="cpu1",le="0"} 0 +cpu_usage_idle_bucket{cpu="cpu1",le="50"} 7 +cpu_usage_idle_bucket{cpu="cpu1",le="100"} 20 +cpu_usage_idle_bucket{cpu="cpu1",le="+Inf"} 20 +cpu_usage_idle_sum{cpu="cpu1"} 2000 +cpu_usage_idle_count{cpu="cpu1"} 20 +`), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := tt.output.Init() + require.NoError(t, err) + + err = tt.output.Connect() + require.NoError(t, err) + + defer func() { + err := tt.output.Close() + require.NoError(t, err) + }() + + err = tt.output.Write(tt.metrics) + require.NoError(t, err) + + resp, err := http.Get(tt.output.URL()) + require.NoError(t, err) + require.Equal(t, http.StatusOK, resp.StatusCode) + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + + require.Equal(t, + strings.TrimSpace(string(tt.expected)), + strings.TrimSpace(string(body))) + }) + } +} + +func TestRoundTripMetricVersion2(t *testing.T) { + Logger := testutil.Logger{Name: "outputs.prometheus_client"} + tests := []struct { + name string + data []byte + }{ + { + name: "untyped", + data: []byte(` +# HELP cpu_time_idle Telegraf collected metric +# TYPE cpu_time_idle untyped +cpu_time_idle{host="example.org"} 42 +`), + }, + { + name: "counter", + data: []byte(` +# HELP cpu_time_idle Telegraf collected metric +# TYPE cpu_time_idle counter +cpu_time_idle{host="example.org"} 42 +`), + }, + { + name: "gauge", + data: []byte(` +# HELP cpu_time_idle Telegraf collected metric +# TYPE cpu_time_idle gauge +cpu_time_idle{host="example.org"} 42 +`), + }, + { + name: "multi", + data: []byte(` +# HELP cpu_time_guest Telegraf collected metric +# TYPE cpu_time_guest gauge +cpu_time_guest{host="one.example.org"} 42 +cpu_time_guest{host="two.example.org"} 42 +# HELP cpu_time_idle Telegraf collected metric +# TYPE cpu_time_idle gauge +cpu_time_idle{host="one.example.org"} 42 +cpu_time_idle{host="two.example.org"} 42 +`), + }, + { + name: "histogram", + data: []byte(` +# HELP http_request_duration_seconds Telegraf collected metric +# TYPE http_request_duration_seconds histogram +http_request_duration_seconds_bucket{le="0.05"} 24054 +http_request_duration_seconds_bucket{le="0.1"} 33444 +http_request_duration_seconds_bucket{le="0.2"} 100392 +http_request_duration_seconds_bucket{le="0.5"} 129389 +http_request_duration_seconds_bucket{le="1"} 133988 +http_request_duration_seconds_bucket{le="+Inf"} 144320 +http_request_duration_seconds_sum 53423 +http_request_duration_seconds_count 144320 +`), + }, + { + name: "summary", + data: []byte(` +# HELP rpc_duration_seconds Telegraf collected metric +# TYPE rpc_duration_seconds summary +rpc_duration_seconds{quantile="0.01"} 3102 +rpc_duration_seconds{quantile="0.05"} 3272 +rpc_duration_seconds{quantile="0.5"} 4773 +rpc_duration_seconds{quantile="0.9"} 9001 +rpc_duration_seconds{quantile="0.99"} 76656 +rpc_duration_seconds_sum 1.7560473e+07 +rpc_duration_seconds_count 2693 +`), + }, + } + + ts := httptest.NewServer(http.NotFoundHandler()) + defer ts.Close() + + url := fmt.Sprintf("http://%s", ts.Listener.Addr()) + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + w.Write(tt.data) + }) + + input := &inputs.Prometheus{ + URLs: []string{url}, + URLTag: "", + MetricVersion: 2, + } + var acc testutil.Accumulator + err := input.Start(&acc) + require.NoError(t, err) + err = input.Gather(&acc) + require.NoError(t, err) + input.Stop() + + metrics := acc.GetTelegrafMetrics() + + output := &PrometheusClient{ + Listen: "127.0.0.1:0", + Path: defaultPath, + MetricVersion: 2, + Log: Logger, + CollectorsExclude: []string{"gocollector", "process"}, + } + err = output.Init() + require.NoError(t, err) + err = output.Connect() + require.NoError(t, err) + defer func() { + err = output.Close() + require.NoError(t, err) + }() + err = output.Write(metrics) + require.NoError(t, err) + + resp, err := http.Get(output.URL()) + require.NoError(t, err) + + actual, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + + require.Equal(t, + strings.TrimSpace(string(tt.data)), + strings.TrimSpace(string(actual))) + }) + } +} diff --git a/testutil/accumulator.go b/testutil/accumulator.go index 9e4e82e27..64c3d19fe 100644 --- a/testutil/accumulator.go +++ b/testutil/accumulator.go @@ -28,6 +28,7 @@ type Metric struct { Tags map[string]string Fields map[string]interface{} Time time.Time + Type telegraf.ValueType } func (p *Metric) String() string { @@ -75,11 +76,11 @@ func (a *Accumulator) ClearMetrics() { a.Metrics = make([]*Metric, 0) } -// AddFields adds a measurement point with a specified timestamp. -func (a *Accumulator) AddFields( +func (a *Accumulator) addFields( measurement string, - fields map[string]interface{}, tags map[string]string, + fields map[string]interface{}, + tp telegraf.ValueType, timestamp ...time.Time, ) { a.Lock() @@ -132,18 +133,29 @@ func (a *Accumulator) AddFields( Fields: fieldsCopy, Tags: tagsCopy, Time: t, + Type: tp, } a.Metrics = append(a.Metrics, p) } +// AddFields adds a measurement point with a specified timestamp. +func (a *Accumulator) AddFields( + measurement string, + fields map[string]interface{}, + tags map[string]string, + timestamp ...time.Time, +) { + a.addFields(measurement, tags, fields, telegraf.Untyped, timestamp...) +} + func (a *Accumulator) AddCounter( measurement string, fields map[string]interface{}, tags map[string]string, timestamp ...time.Time, ) { - a.AddFields(measurement, fields, tags, timestamp...) + a.addFields(measurement, tags, fields, telegraf.Counter, timestamp...) } func (a *Accumulator) AddGauge( @@ -152,12 +164,12 @@ func (a *Accumulator) AddGauge( tags map[string]string, timestamp ...time.Time, ) { - a.AddFields(measurement, fields, tags, timestamp...) + a.addFields(measurement, tags, fields, telegraf.Gauge, timestamp...) } func (a *Accumulator) AddMetrics(metrics []telegraf.Metric) { for _, m := range metrics { - a.AddFields(m.Name(), m.Fields(), m.Tags(), m.Time()) + a.addFields(m.Name(), m.Tags(), m.Fields(), m.Type(), m.Time()) } } @@ -167,7 +179,7 @@ func (a *Accumulator) AddSummary( tags map[string]string, timestamp ...time.Time, ) { - a.AddFields(measurement, fields, tags, timestamp...) + a.addFields(measurement, tags, fields, telegraf.Summary, timestamp...) } func (a *Accumulator) AddHistogram( @@ -176,11 +188,11 @@ func (a *Accumulator) AddHistogram( tags map[string]string, timestamp ...time.Time, ) { - a.AddFields(measurement, fields, tags, timestamp...) + a.addFields(measurement, tags, fields, telegraf.Histogram, timestamp...) } func (a *Accumulator) AddMetric(m telegraf.Metric) { - a.AddFields(m.Name(), m.Fields(), m.Tags(), m.Time()) + a.addFields(m.Name(), m.Tags(), m.Fields(), m.Type(), m.Time()) } func (a *Accumulator) WithTracking(maxTracked int) telegraf.TrackingAccumulator { diff --git a/testutil/metric.go b/testutil/metric.go index da3ace0f2..36ba63af9 100644 --- a/testutil/metric.go +++ b/testutil/metric.go @@ -197,7 +197,7 @@ func MustMetric( } func FromTestMetric(met *Metric) telegraf.Metric { - m, err := metric.New(met.Measurement, met.Tags, met.Fields, met.Time) + m, err := metric.New(met.Measurement, met.Tags, met.Fields, met.Time, met.Type) if err != nil { panic("MustMetric") } From c58f0debb1df2ef58509cbc5d82765669d889999 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 26 Nov 2019 17:32:37 -0800 Subject: [PATCH 1357/1815] Update changelog --- CHANGELOG.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index d891d621d..2f317f5c6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,11 @@ #### Release Notes - Official packages built with Go 1.13.3. +- The `prometheus` input and `prometheus_client` output have a new mapping to + and from Telegraf metrics, which can be enabled by setting `metric_version = 2`. + The original mapping is deprecated. When both plugins have the same setting, + passthrough metrics will be unchanged. Refer to the `prometheus` input for + details about the mapping. #### New Inputs @@ -47,11 +52,13 @@ - [#6680](https://github.com/influxdata/telegraf/pull/6668): Add support for sending HTTP Basic Auth in influxdb input - [#5767](https://github.com/influxdata/telegraf/pull/5767): Add ability to configure the url tag in the prometheus input. - [#5767](https://github.com/influxdata/telegraf/pull/5767): Add prometheus metric_version=2 mapping to internal metrics/line protocol. +- [#6703](https://github.com/influxdata/telegraf/pull/6703): Add prometheus metric_version=2 support to prometheus_client output. - [#6660](https://github.com/influxdata/telegraf/pull/6660): Add content_encoding compression support to socket_listener. - [#6689](https://github.com/influxdata/telegraf/pull/6689): Add high resolution metrics support to CloudWatch output. - [#6716](https://github.com/influxdata/telegraf/pull/6716): Add SReclaimable and SUnreclaim to mem input. - [#6695](https://github.com/influxdata/telegraf/pull/6695): Allow multiple certificates per file in x509_cert input. - [#6686](https://github.com/influxdata/telegraf/pull/6686): Add additional tags to the x509 input. +- [#6703](https://github.com/influxdata/telegraf/pull/6703): Add batch data format support to file output. #### Bugfixes From e04bb1e07f28618134644b7a5bb6d8cee93ed2d3 Mon Sep 17 00:00:00 2001 From: Enno Lohmeier Date: Wed, 27 Nov 2019 19:54:29 +0100 Subject: [PATCH 1358/1815] Support partition assignement strategy configuration in kafka_consumer (#6688) --- Gopkg.lock | 20 ++++++++++++++++--- Gopkg.toml | 3 +-- etc/telegraf.conf | 3 +++ plugins/inputs/kafka_consumer/README.md | 3 +++ .../inputs/kafka_consumer/kafka_consumer.go | 15 ++++++++++++++ 5 files changed, 39 insertions(+), 5 deletions(-) diff --git a/Gopkg.lock b/Gopkg.lock index 3eb640780..ae730556e 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -100,12 +100,12 @@ version = "v0.4.9" [[projects]] - digest = "1:322bf7f4bb312294fc551f6e2c82d02f2ab8f94920f4163b3deeb07a8141ac79" + digest = "1:33f56caa9ab45fedc63d3d1d3e342d9f9d00726071f22c67d06b0cd26d49a55e" name = "github.com/Shopify/sarama" packages = ["."] pruneopts = "" - revision = "b12709e6ca29240128c89fe0b30b6a76be42b457" - source = "https://github.com/influxdata/sarama.git" + revision = "" + version = "v1.24.1" [[projects]] digest = "1:f82b8ac36058904227087141017bb82f4b0fc58272990a4cdae3e2d6d222644e" @@ -791,6 +791,20 @@ pruneopts = "" revision = "95032a82bc518f77982ea72343cc1ade730072f0" +[[projects]] + digest = "1:4ceab6231efd01210f2b8b6ab360d480d49c0f44df63841ca0465920a387495d" + name = "github.com/klauspost/compress" + packages = [ + "fse", + "huff0", + "snappy", + "zstd", + "zstd/internal/xxhash", + ] + pruneopts = "" + revision = "4e96aec082898e4dad17d8aca1a7e2d01362ff6c" + version = "v1.9.2" + [[projects]] branch = "master" digest = "1:1ed9eeebdf24aadfbca57eb50e6455bd1d2474525e0f0d4454de8c8e9bc7ee9a" diff --git a/Gopkg.toml b/Gopkg.toml index c6e510641..7ecfae425 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -148,8 +148,7 @@ [[constraint]] name = "github.com/Shopify/sarama" - revision = "b12709e6ca29240128c89fe0b30b6a76be42b457" - source = "https://github.com/influxdata/sarama.git" + version = "1.24.0" [[constraint]] name = "github.com/soniah/gosnmp" diff --git a/etc/telegraf.conf b/etc/telegraf.conf index bab1fb456..5f728579b 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -5116,6 +5116,9 @@ # ## Initial offset position; one of "oldest" or "newest". # # offset = "oldest" # +# ## Consumer group partition assignment strategy; one of "range", "roundrobin" or "sticky". +# # balance_strategy = "range" +# # ## Maximum length of a message to consume, in bytes (default 0/unlimited); # ## larger messages are dropped # max_message_len = 1000000 diff --git a/plugins/inputs/kafka_consumer/README.md b/plugins/inputs/kafka_consumer/README.md index efd3ffad6..b0f2a4798 100644 --- a/plugins/inputs/kafka_consumer/README.md +++ b/plugins/inputs/kafka_consumer/README.md @@ -44,6 +44,9 @@ and use the old zookeeper connection method. ## Initial offset position; one of "oldest" or "newest". # offset = "oldest" + ## Consumer group partition assignment strategy; one of "range", "roundrobin" or "sticky". + # balance_strategy = "range" + ## Maximum length of a message to consume, in bytes (default 0/unlimited); ## larger messages are dropped max_message_len = 1000000 diff --git a/plugins/inputs/kafka_consumer/kafka_consumer.go b/plugins/inputs/kafka_consumer/kafka_consumer.go index 997988ca6..39f6f0e2b 100644 --- a/plugins/inputs/kafka_consumer/kafka_consumer.go +++ b/plugins/inputs/kafka_consumer/kafka_consumer.go @@ -49,6 +49,9 @@ const sampleConfig = ` ## Initial offset position; one of "oldest" or "newest". # offset = "oldest" + ## Consumer group partition assignment strategy; one of "range", "roundrobin" or "sticky". + # balance_strategy = "range" + ## Maximum length of a message to consume, in bytes (default 0/unlimited); ## larger messages are dropped max_message_len = 1000000 @@ -86,6 +89,7 @@ type KafkaConsumer struct { MaxMessageLen int `toml:"max_message_len"` MaxUndeliveredMessages int `toml:"max_undelivered_messages"` Offset string `toml:"offset"` + BalanceStrategy string `toml:"balance_strategy"` Topics []string `toml:"topics"` TopicTag string `toml:"topic_tag"` Version string `toml:"version"` @@ -185,6 +189,17 @@ func (k *KafkaConsumer) Init() error { return fmt.Errorf("invalid offset %q", k.Offset) } + switch strings.ToLower(k.BalanceStrategy) { + case "range", "": + config.Consumer.Group.Rebalance.Strategy = sarama.BalanceStrategyRange + case "roundrobin": + config.Consumer.Group.Rebalance.Strategy = sarama.BalanceStrategyRoundRobin + case "sticky": + config.Consumer.Group.Rebalance.Strategy = sarama.BalanceStrategySticky + default: + return fmt.Errorf("invalid balance strategy %q", k.BalanceStrategy) + } + if k.ConsumerCreator == nil { k.ConsumerCreator = &SaramaCreator{} } From 10dd367faa554299717f2f202f2b23e96aa7bf7e Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 27 Nov 2019 10:58:46 -0800 Subject: [PATCH 1359/1815] Update changelog and dependency licences --- CHANGELOG.md | 1 + docs/LICENSE_OF_DEPENDENCIES.md | 1 + 2 files changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2f317f5c6..c5b7750c8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -59,6 +59,7 @@ - [#6695](https://github.com/influxdata/telegraf/pull/6695): Allow multiple certificates per file in x509_cert input. - [#6686](https://github.com/influxdata/telegraf/pull/6686): Add additional tags to the x509 input. - [#6703](https://github.com/influxdata/telegraf/pull/6703): Add batch data format support to file output. +- [#6688](https://github.com/influxdata/telegraf/pull/6688): Support partition assignement strategy configuration in kafka_consumer. #### Bugfixes diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index 5582bf9ee..0b0b95ab1 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -68,6 +68,7 @@ following works: - github.com/kardianos/osext [BSD 3-Clause "New" or "Revised" License](https://github.com/kardianos/osext/blob/master/LICENSE) - github.com/kardianos/service [zlib License](https://github.com/kardianos/service/blob/master/LICENSE) - github.com/kballard/go-shellquote [MIT License](https://github.com/kballard/go-shellquote/blob/master/LICENSE) +- github.com/klauspost/compress [BSD 3-Clause Clear License](https://github.com/klauspost/compress/blob/master/LICENSE) - github.com/kr/logfmt [MIT License](https://github.com/kr/logfmt/blob/master/Readme) - github.com/kubernetes/apimachinery [Apache License 2.0](https://github.com/kubernetes/apimachinery/blob/master/LICENSE) - github.com/leodido/ragel-machinery [MIT License](https://github.com/leodido/ragel-machinery/blob/develop/LICENSE) From 3595cb8b7204ed4c9a5f0625b9e39d72968ec465 Mon Sep 17 00:00:00 2001 From: Samantha Wang <32681364+sjwang90@users.noreply.github.com> Date: Wed, 27 Nov 2019 18:02:40 -0800 Subject: [PATCH 1360/1815] Update Telegraf README.md (#6718) --- README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 8fa869ca0..a2383ba95 100644 --- a/README.md +++ b/README.md @@ -142,7 +142,7 @@ For documentation on the latest development code see the [documentation index][d * [apache](./plugins/inputs/apache) * [apcupsd](./plugins/inputs/apcupsd) * [aurora](./plugins/inputs/aurora) -* [aws cloudwatch](./plugins/inputs/cloudwatch) +* [aws cloudwatch](./plugins/inputs/cloudwatch) (Amazon Cloudwatch) * [azure_storage_queue](./plugins/inputs/azure_storage_queue) * [bcache](./plugins/inputs/bcache) * [beanstalkd](./plugins/inputs/beanstalkd) @@ -171,7 +171,7 @@ For documentation on the latest development code see the [documentation index][d * [docker](./plugins/inputs/docker) * [docker_log](./plugins/inputs/docker_log) * [dovecot](./plugins/inputs/dovecot) -* [ecs](./plugins/inputs/ecs) (Amazon Elastic Container Service, Fargate) +* [aws ecs](./plugins/inputs/ecs) (Amazon Elastic Container Service, Fargate) * [elasticsearch](./plugins/inputs/elasticsearch) * [ethtool](./plugins/inputs/ethtool) * [exec](./plugins/inputs/exec) (generic executable plugin, support JSON, influx, graphite and nagios) @@ -206,7 +206,7 @@ For documentation on the latest development code see the [documentation index][d * [jti_openconfig_telemetry](./plugins/inputs/jti_openconfig_telemetry) * [kafka_consumer](./plugins/inputs/kafka_consumer) * [kapacitor](./plugins/inputs/kapacitor) -* [kinesis](./plugins/inputs/kinesis_consumer) +* [aws kinesis](./plugins/inputs/kinesis_consumer) (Amazon Kinesis) * [kernel](./plugins/inputs/kernel) * [kernel_vmstat](./plugins/inputs/kernel_vmstat) * [kibana](./plugins/inputs/kibana) From 03a69106896c74b5ec3921619ada8699453a0127 Mon Sep 17 00:00:00 2001 From: Chris Goller Date: Mon, 2 Dec 2019 12:49:04 -0600 Subject: [PATCH 1361/1815] perf(inputs/influxdb_listener): benchmark serving writes (#6673) * perf(inputs/influxdb_listener): benchmark serving writes * chore(inputs/influxdb_listener): remove stray comment --- .../influxdb_listener_test.go | 114 ++++++++++++++++++ testutil/accumulator.go | 19 +++ 2 files changed, 133 insertions(+) create mode 100644 plugins/inputs/influxdb_listener/influxdb_listener_test.go diff --git a/plugins/inputs/influxdb_listener/influxdb_listener_test.go b/plugins/inputs/influxdb_listener/influxdb_listener_test.go new file mode 100644 index 000000000..5badc1213 --- /dev/null +++ b/plugins/inputs/influxdb_listener/influxdb_listener_test.go @@ -0,0 +1,114 @@ +package http_listener + +import ( + "fmt" + "net/http" + "net/http/httptest" + "strings" + "testing" + "time" + + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/parsers/influx" + "github.com/influxdata/telegraf/selfstat" + "github.com/influxdata/telegraf/testutil" +) + +// newListener is the minimal HTTPListener construction to serve writes. +func newListener() *HTTPListener { + listener := &HTTPListener{ + TimeFunc: time.Now, + acc: &testutil.NopAccumulator{}, + BytesRecv: selfstat.Register("http_listener", "bytes_received", map[string]string{}), + handler: influx.NewMetricHandler(), + pool: NewPool(200, DEFAULT_MAX_LINE_SIZE), + MaxLineSize: internal.Size{ + Size: DEFAULT_MAX_LINE_SIZE, + }, + MaxBodySize: internal.Size{ + Size: DEFAULT_MAX_BODY_SIZE, + }, + } + listener.parser = influx.NewParser(listener.handler) + return listener +} + +func BenchmarkHTTPListener_serveWrite(b *testing.B) { + res := httptest.NewRecorder() + addr := "http://localhost/write?db=mydb" + + benchmarks := []struct { + name string + lines string + }{ + { + name: "single line, tag, and field", + lines: lines(1, 1, 1), + }, + { + name: "single line, 10 tags and fields", + lines: lines(1, 10, 10), + }, + { + name: "single line, 100 tags and fields", + lines: lines(1, 100, 100), + }, + { + name: "1k lines, single tag and field", + lines: lines(1000, 1, 1), + }, + { + name: "1k lines, 10 tags and fields", + lines: lines(1000, 10, 10), + }, + { + name: "10k lines, 10 tags and fields", + lines: lines(10000, 10, 10), + }, + { + name: "100k lines, 10 tags and fields", + lines: lines(100000, 10, 10), + }, + } + + for _, bm := range benchmarks { + b.Run(bm.name, func(b *testing.B) { + listener := newListener() + + b.ResetTimer() + for n := 0; n < b.N; n++ { + req, err := http.NewRequest("POST", addr, strings.NewReader(bm.lines)) + if err != nil { + b.Error(err) + } + listener.serveWrite(res, req) + if res.Code != http.StatusNoContent { + b.Errorf("unexpected status %d", res.Code) + } + } + }) + } +} + +func lines(lines, numTags, numFields int) string { + lp := make([]string, lines) + for i := 0; i < lines; i++ { + tags := make([]string, numTags) + for j := 0; j < numTags; j++ { + tags[j] = fmt.Sprintf("t%d=v%d", j, j) + } + + fields := make([]string, numFields) + for k := 0; k < numFields; k++ { + fields[k] = fmt.Sprintf("f%d=%d", k, k) + } + + lp[i] = fmt.Sprintf("m%d,%s %s", + i, + strings.Join(tags, ","), + strings.Join(fields, ","), + ) + } + + return strings.Join(lp, "\n") +} diff --git a/testutil/accumulator.go b/testutil/accumulator.go index 64c3d19fe..65592b5a0 100644 --- a/testutil/accumulator.go +++ b/testutil/accumulator.go @@ -715,3 +715,22 @@ func (a *Accumulator) BoolField(measurement string, field string) (bool, bool) { return false, false } + +// NopAccumulator is used for benchmarking to isolate the plugin from the internal +// telegraf accumulator machinary. +type NopAccumulator struct{} + +func (n *NopAccumulator) AddFields(measurement string, fields map[string]interface{}, tags map[string]string, t ...time.Time) { +} +func (n *NopAccumulator) AddGauge(measurement string, fields map[string]interface{}, tags map[string]string, t ...time.Time) { +} +func (n *NopAccumulator) AddCounter(measurement string, fields map[string]interface{}, tags map[string]string, t ...time.Time) { +} +func (n *NopAccumulator) AddSummary(measurement string, fields map[string]interface{}, tags map[string]string, t ...time.Time) { +} +func (n *NopAccumulator) AddHistogram(measurement string, fields map[string]interface{}, tags map[string]string, t ...time.Time) { +} +func (n *NopAccumulator) AddMetric(telegraf.Metric) {} +func (n *NopAccumulator) SetPrecision(precision time.Duration) {} +func (n *NopAccumulator) AddError(err error) {} +func (n *NopAccumulator) WithTracking(maxTracked int) telegraf.TrackingAccumulator { return nil } From 906027c39b54c75c2bea5a7657cb745dd8a5d09b Mon Sep 17 00:00:00 2001 From: Kevin Lin Date: Mon, 2 Dec 2019 11:06:36 -0800 Subject: [PATCH 1362/1815] Support resolution of symlinks in filecount input (#6735) --- plugins/inputs/filecount/README.md | 3 ++ plugins/inputs/filecount/filecount.go | 46 ++++++++++++---------- plugins/inputs/filecount/filecount_test.go | 14 ++++++- 3 files changed, 42 insertions(+), 21 deletions(-) diff --git a/plugins/inputs/filecount/README.md b/plugins/inputs/filecount/README.md index 49e28caa6..81fc75908 100644 --- a/plugins/inputs/filecount/README.md +++ b/plugins/inputs/filecount/README.md @@ -27,6 +27,9 @@ Reports the number and total size of files in specified directories. ## Only count regular files. Defaults to true. regular_only = true + ## Follow all symlinks while walking the directory tree. Defaults to false. + follow_symlinks = false + ## Only count files that are at least this size. If size is ## a negative number, only count files that are smaller than the ## absolute value of size. Acceptable units are B, KiB, MiB, KB, ... diff --git a/plugins/inputs/filecount/filecount.go b/plugins/inputs/filecount/filecount.go index 4d42da603..30815541c 100644 --- a/plugins/inputs/filecount/filecount.go +++ b/plugins/inputs/filecount/filecount.go @@ -35,6 +35,9 @@ const sampleConfig = ` ## Only count regular files. Defaults to true. regular_only = true + ## Follow all symlinks while walking the directory tree. Defaults to false. + follow_symlinks = false + ## Only count files that are at least this size. If size is ## a negative number, only count files that are smaller than the ## absolute value of size. Acceptable units are B, KiB, MiB, KB, ... @@ -48,17 +51,18 @@ const sampleConfig = ` ` type FileCount struct { - Directory string // deprecated in 1.9 - Directories []string - Name string - Recursive bool - RegularOnly bool - Size internal.Size - MTime internal.Duration `toml:"mtime"` - fileFilters []fileFilterFunc - globPaths []globpath.GlobPath - Fs fileSystem - Log telegraf.Logger + Directory string // deprecated in 1.9 + Directories []string + Name string + Recursive bool + RegularOnly bool + FollowSymlinks bool + Size internal.Size + MTime internal.Duration `toml:"mtime"` + fileFilters []fileFilterFunc + globPaths []globpath.GlobPath + Fs fileSystem + Log telegraf.Logger } func (_ *FileCount) Description() string { @@ -208,6 +212,7 @@ func (fc *FileCount) count(acc telegraf.Accumulator, basedir string, glob globpa Callback: walkFn, PostChildrenCallback: postChildrenFn, Unsorted: true, + FollowSymbolicLinks: fc.FollowSymlinks, ErrorCallback: func(osPathname string, err error) godirwalk.ErrorAction { if os.IsPermission(errors.Cause(err)) { fc.Log.Debug(err) @@ -292,15 +297,16 @@ func (fc *FileCount) initGlobPaths(acc telegraf.Accumulator) { func NewFileCount() *FileCount { return &FileCount{ - Directory: "", - Directories: []string{}, - Name: "*", - Recursive: true, - RegularOnly: true, - Size: internal.Size{Size: 0}, - MTime: internal.Duration{Duration: 0}, - fileFilters: nil, - Fs: osFS{}, + Directory: "", + Directories: []string{}, + Name: "*", + Recursive: true, + RegularOnly: true, + FollowSymlinks: false, + Size: internal.Size{Size: 0}, + MTime: internal.Duration{Duration: 0}, + fileFilters: nil, + Fs: osFS{}, } } diff --git a/plugins/inputs/filecount/filecount_test.go b/plugins/inputs/filecount/filecount_test.go index 3e0cadf37..96d8f0c3b 100644 --- a/plugins/inputs/filecount/filecount_test.go +++ b/plugins/inputs/filecount/filecount_test.go @@ -102,7 +102,6 @@ func TestSizeFilter(t *testing.T) { } func TestMTimeFilter(t *testing.T) { - mtime := time.Date(2011, time.December, 14, 18, 25, 5, 0, time.UTC) fileAge := time.Since(mtime) - (60 * time.Second) @@ -119,6 +118,19 @@ func TestMTimeFilter(t *testing.T) { fileCountEquals(t, fc, len(matches), 0) } +// The library dependency karrick/godirwalk completely abstracts out the +// behavior of the FollowSymlinks plugin input option. However, it should at +// least behave identically when enabled on a filesystem with no symlinks. +func TestFollowSymlinks(t *testing.T) { + fc := getNoFilterFileCount() + fc.FollowSymlinks = true + matches := []string{"foo", "bar", "baz", "qux", + "subdir/", "subdir/quux", "subdir/quuz", + "subdir/nested2", "subdir/nested2/qux"} + + fileCountEquals(t, fc, len(matches), 5096) +} + // Paths with a trailing slash will not exactly match paths produced during the // walk as these paths are cleaned before being returned from godirwalk. #6329 func TestDirectoryWithTrailingSlash(t *testing.T) { From 6175d17969e762d6652aba4bdc6979bffde1b5b8 Mon Sep 17 00:00:00 2001 From: pertu Date: Mon, 2 Dec 2019 19:16:00 +0000 Subject: [PATCH 1363/1815] Add uptime_ns field to mongodb input (#6669) --- plugins/inputs/mongodb/README.md | 3 ++- plugins/inputs/mongodb/mongodb_data.go | 1 + plugins/inputs/mongodb/mongodb_data_test.go | 2 ++ plugins/inputs/mongodb/mongostat.go | 4 ++++ 4 files changed, 9 insertions(+), 1 deletion(-) diff --git a/plugins/inputs/mongodb/README.md b/plugins/inputs/mongodb/README.md index 5772f4fc3..1db121178 100644 --- a/plugins/inputs/mongodb/README.md +++ b/plugins/inputs/mongodb/README.md @@ -74,7 +74,7 @@ by running Telegraf with the `--debug` argument. - flushes (integer) - flushes_total_time_ns (integer) - getmores (integer) - - inserts (integer + - inserts (integer) - jumbo_chunks (integer) - member_status (string) - net_in_bytes_count (integer) @@ -102,6 +102,7 @@ by running Telegraf with the `--debug` argument. - ttl_deletes (integer) - ttl_passes (integer) - updates (integer) + - uptime_ns (integer) - vsize_megabytes (integer) - wtcache_app_threads_page_read_count (integer) - wtcache_app_threads_page_read_time (integer) diff --git a/plugins/inputs/mongodb/mongodb_data.go b/plugins/inputs/mongodb/mongodb_data.go index 6f999cbd7..6aba4bb9d 100644 --- a/plugins/inputs/mongodb/mongodb_data.go +++ b/plugins/inputs/mongodb/mongodb_data.go @@ -38,6 +38,7 @@ func NewMongodbData(statLine *StatLine, tags map[string]string) *MongodbData { } var DefaultStats = map[string]string{ + "uptime_ns": "UptimeNanos", "inserts": "InsertCnt", "inserts_per_sec": "Insert", "queries": "QueryCnt", diff --git a/plugins/inputs/mongodb/mongodb_data_test.go b/plugins/inputs/mongodb/mongodb_data_test.go index 527e7ab93..c565db910 100644 --- a/plugins/inputs/mongodb/mongodb_data_test.go +++ b/plugins/inputs/mongodb/mongodb_data_test.go @@ -16,6 +16,7 @@ func TestAddNonReplStats(t *testing.T) { &StatLine{ StorageEngine: "", Time: time.Now(), + UptimeNanos: 0, Insert: 0, Query: 0, Update: 0, @@ -235,6 +236,7 @@ func TestStateTag(t *testing.T) { "resident_megabytes": int64(0), "updates": int64(0), "updates_per_sec": int64(0), + "uptime_ns": int64(0), "vsize_megabytes": int64(0), "ttl_deletes": int64(0), "ttl_deletes_per_sec": int64(0), diff --git a/plugins/inputs/mongodb/mongostat.go b/plugins/inputs/mongodb/mongostat.go index 8021775ea..1658fc071 100644 --- a/plugins/inputs/mongodb/mongostat.go +++ b/plugins/inputs/mongodb/mongostat.go @@ -477,6 +477,8 @@ type StatLine struct { IsMongos bool Host string + UptimeNanos int64 + // The time at which this StatLine was generated. Time time.Time @@ -659,6 +661,8 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec Faults: -1, } + returnVal.UptimeNanos = 1000 * 1000 * newStat.UptimeMillis + // set connection info returnVal.CurrentC = newStat.Connections.Current returnVal.AvailableC = newStat.Connections.Available From fd2e9889ac753d16da60cd44608a7c4dcee325dd Mon Sep 17 00:00:00 2001 From: Ross Lodge Date: Mon, 2 Dec 2019 11:19:14 -0800 Subject: [PATCH 1364/1815] Add node type tag to mongodb input (#6731) --- plugins/inputs/mongodb/README.md | 3 ++- plugins/inputs/mongodb/mongodb_data.go | 1 + plugins/inputs/mongodb/mongodb_data_test.go | 1 + 3 files changed, 4 insertions(+), 1 deletion(-) diff --git a/plugins/inputs/mongodb/README.md b/plugins/inputs/mongodb/README.md index 1db121178..f154f333b 100644 --- a/plugins/inputs/mongodb/README.md +++ b/plugins/inputs/mongodb/README.md @@ -55,6 +55,7 @@ by running Telegraf with the `--debug` argument. - mongodb - tags: - hostname + - node_type - fields: - active_reads (integer) - active_writes (integer) @@ -184,7 +185,7 @@ by running Telegraf with the `--debug` argument. ### Example Output: ``` -mongodb,hostname=127.0.0.1:27017 active_reads=0i,active_writes=0i,commands=1335i,commands_per_sec=7i,connections_available=814i,connections_current=5i,connections_total_created=0i,cursor_no_timeout=0i,cursor_no_timeout_count=0i,cursor_pinned=0i,cursor_pinned_count=1i,cursor_timed_out=0i,cursor_timed_out_count=0i,cursor_total=0i,cursor_total_count=1i,deletes=0i,deletes_per_sec=0i,document_deleted=0i,document_inserted=0i,document_returned=13i,document_updated=0i,flushes=5i,flushes_per_sec=0i,getmores=269i,getmores_per_sec=0i,inserts=0i,inserts_per_sec=0i,jumbo_chunks=0i,member_status="PRI",net_in_bytes=986i,net_in_bytes_count=358006i,net_out_bytes=23906i,net_out_bytes_count=661507i,open_connections=5i,percent_cache_dirty=0,percent_cache_used=0,queries=18i,queries_per_sec=3i,queued_reads=0i,queued_writes=0i,repl_commands=0i,repl_commands_per_sec=0i,repl_deletes=0i,repl_deletes_per_sec=0i,repl_getmores=0i,repl_getmores_per_sec=0i,repl_inserts=0i,repl_inserts_per_sec=0i,repl_lag=0i,repl_oplog_window_sec=24355215i,repl_queries=0i,repl_queries_per_sec=0i,repl_updates=0i,repl_updates_per_sec=0i,resident_megabytes=62i,state="PRIMARY",total_available=0i,total_created=0i,total_in_use=0i,total_refreshing=0i,ttl_deletes=0i,ttl_deletes_per_sec=0i,ttl_passes=23i,ttl_passes_per_sec=0i,updates=0i,updates_per_sec=0i,vsize_megabytes=713i,wtcache_app_threads_page_read_count=13i,wtcache_app_threads_page_read_time=74i,wtcache_app_threads_page_write_count=0i,wtcache_bytes_read_into=55271i,wtcache_bytes_written_from=125402i,wtcache_current_bytes=117050i,wtcache_max_bytes_configured=1073741824i,wtcache_pages_evicted_by_app_thread=0i,wtcache_pages_queued_for_eviction=0i,wtcache_server_evicting_pages=0i,wtcache_tracked_dirty_bytes=0i,wtcache_worker_thread_evictingpages=0i 1547159491000000000 +mongodb,hostname=127.0.0.1:27017,node_type=PRI active_reads=0i,active_writes=0i,commands=1335i,commands_per_sec=7i,connections_available=814i,connections_current=5i,connections_total_created=0i,cursor_no_timeout=0i,cursor_no_timeout_count=0i,cursor_pinned=0i,cursor_pinned_count=1i,cursor_timed_out=0i,cursor_timed_out_count=0i,cursor_total=0i,cursor_total_count=1i,deletes=0i,deletes_per_sec=0i,document_deleted=0i,document_inserted=0i,document_returned=13i,document_updated=0i,flushes=5i,flushes_per_sec=0i,getmores=269i,getmores_per_sec=0i,inserts=0i,inserts_per_sec=0i,jumbo_chunks=0i,member_status="PRI",net_in_bytes=986i,net_in_bytes_count=358006i,net_out_bytes=23906i,net_out_bytes_count=661507i,open_connections=5i,percent_cache_dirty=0,percent_cache_used=0,queries=18i,queries_per_sec=3i,queued_reads=0i,queued_writes=0i,repl_commands=0i,repl_commands_per_sec=0i,repl_deletes=0i,repl_deletes_per_sec=0i,repl_getmores=0i,repl_getmores_per_sec=0i,repl_inserts=0i,repl_inserts_per_sec=0i,repl_lag=0i,repl_oplog_window_sec=24355215i,repl_queries=0i,repl_queries_per_sec=0i,repl_updates=0i,repl_updates_per_sec=0i,resident_megabytes=62i,state="PRIMARY",total_available=0i,total_created=0i,total_in_use=0i,total_refreshing=0i,ttl_deletes=0i,ttl_deletes_per_sec=0i,ttl_passes=23i,ttl_passes_per_sec=0i,updates=0i,updates_per_sec=0i,vsize_megabytes=713i,wtcache_app_threads_page_read_count=13i,wtcache_app_threads_page_read_time=74i,wtcache_app_threads_page_write_count=0i,wtcache_bytes_read_into=55271i,wtcache_bytes_written_from=125402i,wtcache_current_bytes=117050i,wtcache_max_bytes_configured=1073741824i,wtcache_pages_evicted_by_app_thread=0i,wtcache_pages_queued_for_eviction=0i,wtcache_server_evicting_pages=0i,wtcache_tracked_dirty_bytes=0i,wtcache_worker_thread_evictingpages=0i 1547159491000000000 mongodb_db_stats,db_name=admin,hostname=127.0.0.1:27017 avg_obj_size=241,collections=2i,data_size=723i,index_size=49152i,indexes=3i,num_extents=0i,objects=3i,ok=1i,storage_size=53248i,type="db_stat" 1547159491000000000 mongodb_db_stats,db_name=local,hostname=127.0.0.1:27017 avg_obj_size=813.9705882352941,collections=6i,data_size=55350i,index_size=102400i,indexes=5i,num_extents=0i,objects=68i,ok=1i,storage_size=204800i,type="db_stat" 1547159491000000000 mongodb_col_stats,collection=foo,db_name=local,hostname=127.0.0.1:27017 size=375005928i,avg_obj_size=5494,type="col_stat",storage_size=249307136i,total_index_size=2138112i,ok=1i,count=68251i 1547159491000000000 diff --git a/plugins/inputs/mongodb/mongodb_data.go b/plugins/inputs/mongodb/mongodb_data.go index 6aba4bb9d..0c3695c61 100644 --- a/plugins/inputs/mongodb/mongodb_data.go +++ b/plugins/inputs/mongodb/mongodb_data.go @@ -229,6 +229,7 @@ func (d *MongodbData) AddDefaultStats() { d.addStat(statLine, DefaultStats) if d.StatLine.NodeType != "" { d.addStat(statLine, DefaultReplStats) + d.Tags["node_type"] = d.StatLine.NodeType } if d.StatLine.OplogStats != nil { diff --git a/plugins/inputs/mongodb/mongodb_data_test.go b/plugins/inputs/mongodb/mongodb_data_test.go index c565db910..2717dffd1 100644 --- a/plugins/inputs/mongodb/mongodb_data_test.go +++ b/plugins/inputs/mongodb/mongodb_data_test.go @@ -190,6 +190,7 @@ func TestStateTag(t *testing.T) { ) stateTags := make(map[string]string) + stateTags["node_type"] = "PRI" var acc testutil.Accumulator From 6839e5573cabb355beedea74eab4864b4e1aeef1 Mon Sep 17 00:00:00 2001 From: Benjamin Schweizer <234864+benschweizer@users.noreply.github.com> Date: Tue, 3 Dec 2019 01:05:50 +0100 Subject: [PATCH 1365/1815] Add new "systemd_units" input plugin (#4532) --- CHANGELOG.md | 1 + plugins/inputs/all/all.go | 1 + plugins/inputs/systemd_units/README.md | 140 +++++++++++ .../systemd_units/systemd_units_linux.go | 221 ++++++++++++++++++ .../systemd_units/systemd_units_linux_test.go | 100 ++++++++ .../systemd_units/systemd_units_notlinux.go | 3 + 6 files changed, 466 insertions(+) create mode 100644 plugins/inputs/systemd_units/README.md create mode 100644 plugins/inputs/systemd_units/systemd_units_linux.go create mode 100644 plugins/inputs/systemd_units/systemd_units_linux_test.go create mode 100644 plugins/inputs/systemd_units/systemd_units_notlinux.go diff --git a/CHANGELOG.md b/CHANGELOG.md index c5b7750c8..00cb89e4f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,6 +16,7 @@ - [snmp_trap](/plugins/inputs/snmp_trap/README.md) - Contributed by @influxdata - [suricata](/plugins/inputs/suricata/README.md) - Contributed by @satta - [synproxy](/plugins/inputs/synproxy/README.md) - Contributed by @rfrenayworldstream +- [systemd_units](/plugins/inputs/systemd_units/README.md) - Contributed by @benschweizer #### New Processors diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index ca0aa4a32..3ce9823f6 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -148,6 +148,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/syslog" _ "github.com/influxdata/telegraf/plugins/inputs/sysstat" _ "github.com/influxdata/telegraf/plugins/inputs/system" + _ "github.com/influxdata/telegraf/plugins/inputs/systemd_units" _ "github.com/influxdata/telegraf/plugins/inputs/tail" _ "github.com/influxdata/telegraf/plugins/inputs/tcp_listener" _ "github.com/influxdata/telegraf/plugins/inputs/teamspeak" diff --git a/plugins/inputs/systemd_units/README.md b/plugins/inputs/systemd_units/README.md new file mode 100644 index 000000000..c9d4a85da --- /dev/null +++ b/plugins/inputs/systemd_units/README.md @@ -0,0 +1,140 @@ +# Systemd Units Plugin + +The systemd_units plugin gathers systemd unit status on Linux. It relies on +`systemctl list-units --all --type=service` to collect data on service status. + +The results are tagged with the unit name and provide enumerated fields for +loaded, active and running fields, indicating the unit health. + +This plugin is related to the [win_services module](../win_services/), which +fulfills the same purpose on windows. + +In addition to services, this plugin can gather other unit types as well, +see `systemctl list-units --all --type help` for possible options. + +### Configuration +``` +[[inputs.systemd_units]] + ## Set timeout for systemctl execution + # timeout = "1s" + # + ## Filter for a specific unit type, default is "service", other possible + ## values are "socket", "target", "device", "mount", "automount", "swap", + ## "timer", "path", "slice" and "scope ": + # unittype = "service" +``` + +### Metrics +- systemd_units: + - tags: + - name (string, unit name) + - load (string, load state) + - active (string, active state) + - sub (string, sub state) + - fields: + - load_code (int, see below) + - active_code (int, see below) + - sub_code (int, see below) + +#### Load + +enumeration of [unit_load_state_table](https://github.com/systemd/systemd/blob/c87700a1335f489be31cd3549927da68b5638819/src/basic/unit-def.c#L87) + +| Value | Meaning | Description | +| ----- | ------- | ----------- | +| 0 | loaded | unit is ~ | +| 1 | stub | unit is ~ | +| 2 | not-found | unit is ~ | +| 3 | bad-setting | unit is ~ | +| 4 | error | unit is ~ | +| 5 | merged | unit is ~ | +| 6 | masked | unit is ~ | + +#### Active + +enumeration of [unit_active_state_table](https://github.com/systemd/systemd/blob/c87700a1335f489be31cd3549927da68b5638819/src/basic/unit-def.c#L99) + +| Value | Meaning | Description | +| ----- | ------- | ----------- | +| 0 | active | unit is ~ | +| 1 | reloading | unit is ~ | +| 2 | inactive | unit is ~ | +| 3 | failed | unit is ~ | +| 4 | activating | unit is ~ | +| 5 | deactivating | unit is ~ | + +#### Sub + +enumeration of sub states, see various [unittype_state_tables](https://github.com/systemd/systemd/blob/c87700a1335f489be31cd3549927da68b5638819/src/basic/unit-def.c#L163); +duplicates were removed, tables are hex aligned to keep some space for future +values + +| Value | Meaning | Description | +| ----- | ------- | ----------- | +| | | service_state_table start at 0x0000 | +| 0x0000 | running | unit is ~ | +| 0x0001 | dead | unit is ~ | +| 0x0002 | start-pre | unit is ~ | +| 0x0003 | start | unit is ~ | +| 0x0004 | exited | unit is ~ | +| 0x0005 | reload | unit is ~ | +| 0x0006 | stop | unit is ~ | +| 0x0007 | stop-watchdog | unit is ~ | +| 0x0008 | stop-sigterm | unit is ~ | +| 0x0009 | stop-sigkill | unit is ~ | +| 0x000a | stop-post | unit is ~ | +| 0x000b | final-sigterm | unit is ~ | +| 0x000c | failed | unit is ~ | +| 0x000d | auto-restart | unit is ~ | +| | | service_state_table start at 0x0010 | +| 0x0010 | waiting | unit is ~ | +| | | service_state_table start at 0x0020 | +| 0x0020 | tentative | unit is ~ | +| 0x0021 | plugged | unit is ~ | +| | | service_state_table start at 0x0030 | +| 0x0030 | mounting | unit is ~ | +| 0x0031 | mounting-done | unit is ~ | +| 0x0032 | mounted | unit is ~ | +| 0x0033 | remounting | unit is ~ | +| 0x0034 | unmounting | unit is ~ | +| 0x0035 | remounting-sigterm | unit is ~ | +| 0x0036 | remounting-sigkill | unit is ~ | +| 0x0037 | unmounting-sigterm | unit is ~ | +| 0x0038 | unmounting-sigkill | unit is ~ | +| | | service_state_table start at 0x0040 | +| | | service_state_table start at 0x0050 | +| 0x0050 | abandoned | unit is ~ | +| | | service_state_table start at 0x0060 | +| 0x0060 | active | unit is ~ | +| | | service_state_table start at 0x0070 | +| 0x0070 | start-chown | unit is ~ | +| 0x0071 | start-post | unit is ~ | +| 0x0072 | listening | unit is ~ | +| 0x0073 | stop-pre | unit is ~ | +| 0x0074 | stop-pre-sigterm | unit is ~ | +| 0x0075 | stop-pre-sigkill | unit is ~ | +| 0x0076 | final-sigkill | unit is ~ | +| | | service_state_table start at 0x0080 | +| 0x0080 | activating | unit is ~ | +| 0x0081 | activating-done | unit is ~ | +| 0x0082 | deactivating | unit is ~ | +| 0x0083 | deactivating-sigterm | unit is ~ | +| 0x0084 | deactivating-sigkill | unit is ~ | +| | | service_state_table start at 0x0090 | +| | | service_state_table start at 0x00a0 | +| 0x00a0 | elapsed | unit is ~ | +| | | | + +### Example Output + +Linux Systemd Units: +``` +$ telegraf --test --config /tmp/telegraf.conf +> systemd_units,host=host1.example.com,name=dbus.service,load=loaded,active=active,sub=running load_code=0i,active_code=0i,sub_code=0i 1533730725000000000 +> systemd_units,host=host1.example.com,name=networking.service,load=loaded,active=failed,sub=failed load_code=0i,active_code=3i,sub_code=12i 1533730725000000000 +> systemd_units,host=host1.example.com,name=ssh.service,load=loaded,active=active,sub=running load_code=0i,active_code=0i,sub_code=0i 1533730725000000000 +... +``` + +### Possible Improvements +- add blacklist to filter names diff --git a/plugins/inputs/systemd_units/systemd_units_linux.go b/plugins/inputs/systemd_units/systemd_units_linux.go new file mode 100644 index 000000000..64caf03d0 --- /dev/null +++ b/plugins/inputs/systemd_units/systemd_units_linux.go @@ -0,0 +1,221 @@ +package systemd_units + +import ( + "bufio" + "bytes" + "fmt" + "os/exec" + "strings" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/inputs" +) + +// SystemdUnits is a telegraf plugin to gather systemd unit status +type SystemdUnits struct { + Timeout internal.Duration + UnitType string `toml:"unittype"` + systemctl systemctl +} + +type systemctl func(Timeout internal.Duration, UnitType string) (*bytes.Buffer, error) + +const measurement = "systemd_units" + +// Below are mappings of systemd state tables as defined in +// https://github.com/systemd/systemd/blob/c87700a1335f489be31cd3549927da68b5638819/src/basic/unit-def.c +// Duplicate strings are removed from this list. +var load_map = map[string]int{ + "loaded": 0, + "stub": 1, + "not-found": 2, + "bad-setting": 3, + "error": 4, + "merged": 5, + "masked": 6, +} + +var active_map = map[string]int{ + "active": 0, + "reloading": 1, + "inactive": 2, + "failed": 3, + "activating": 4, + "deactivating": 5, +} + +var sub_map = map[string]int{ + // service_state_table, offset 0x0000 + "running": 0x0000, + "dead": 0x0001, + "start-pre": 0x0002, + "start": 0x0003, + "exited": 0x0004, + "reload": 0x0005, + "stop": 0x0006, + "stop-watchdog": 0x0007, + "stop-sigterm": 0x0008, + "stop-sigkill": 0x0009, + "stop-post": 0x000a, + "final-sigterm": 0x000b, + "failed": 0x000c, + "auto-restart": 0x000d, + + // automount_state_table, offset 0x0010 + "waiting": 0x0010, + + // device_state_table, offset 0x0020 + "tentative": 0x0020, + "plugged": 0x0021, + + // mount_state_table, offset 0x0030 + "mounting": 0x0030, + "mounting-done": 0x0031, + "mounted": 0x0032, + "remounting": 0x0033, + "unmounting": 0x0034, + "remounting-sigterm": 0x0035, + "remounting-sigkill": 0x0036, + "unmounting-sigterm": 0x0037, + "unmounting-sigkill": 0x0038, + + // path_state_table, offset 0x0040 + + // scope_state_table, offset 0x0050 + "abandoned": 0x0050, + + // slice_state_table, offset 0x0060 + "active": 0x0060, + + // socket_state_table, offset 0x0070 + "start-chown": 0x0070, + "start-post": 0x0071, + "listening": 0x0072, + "stop-pre": 0x0073, + "stop-pre-sigterm": 0x0074, + "stop-pre-sigkill": 0x0075, + "final-sigkill": 0x0076, + + // swap_state_table, offset 0x0080 + "activating": 0x0080, + "activating-done": 0x0081, + "deactivating": 0x0082, + "deactivating-sigterm": 0x0083, + "deactivating-sigkill": 0x0084, + + // target_state_table, offset 0x0090 + + // timer_state_table, offset 0x00a0 + "elapsed": 0x00a0, +} + +var ( + defaultTimeout = internal.Duration{Duration: time.Second} + defaultUnitType = "service" +) + +// Description returns a short description of the plugin +func (s *SystemdUnits) Description() string { + return "Gather systemd units state" +} + +// SampleConfig returns sample configuration options. +func (s *SystemdUnits) SampleConfig() string { + return ` + ## Set timeout for systemctl execution + # timeout = "1s" + # + ## Filter for a specific unit type, default is "service", other possible + ## values are "socket", "target", "device", "mount", "automount", "swap", + ## "timer", "path", "slice" and "scope ": + # unittype = "service" +` +} + +// Gather parses systemctl outputs and adds counters to the Accumulator +func (s *SystemdUnits) Gather(acc telegraf.Accumulator) error { + out, err := s.systemctl(s.Timeout, s.UnitType) + if err != nil { + return err + } + + scanner := bufio.NewScanner(out) + for scanner.Scan() { + line := scanner.Text() + + data := strings.Fields(line) + if len(data) < 4 { + acc.AddError(fmt.Errorf("Error parsing line (expected at least 4 fields): %s", line)) + continue + } + name := data[0] + load := data[1] + active := data[2] + sub := data[3] + tags := map[string]string{ + "name": name, + "load": load, + "active": active, + "sub": sub, + } + + var ( + load_code int + active_code int + sub_code int + ok bool + ) + if load_code, ok = load_map[load]; !ok { + acc.AddError(fmt.Errorf("Error parsing field 'load', value not in map: %s", load)) + continue + } + if active_code, ok = active_map[active]; !ok { + acc.AddError(fmt.Errorf("Error parsing field 'active', value not in map: %s", active)) + continue + } + if sub_code, ok = sub_map[sub]; !ok { + acc.AddError(fmt.Errorf("Error parsing field 'sub', value not in map: %s", sub)) + continue + } + fields := map[string]interface{}{ + "load_code": load_code, + "active_code": active_code, + "sub_code": sub_code, + } + + acc.AddFields(measurement, fields, tags) + } + + return nil +} + +func setSystemctl(Timeout internal.Duration, UnitType string) (*bytes.Buffer, error) { + // is systemctl available ? + systemctlPath, err := exec.LookPath("systemctl") + if err != nil { + return nil, err + } + + cmd := exec.Command(systemctlPath, "list-units", "--all", fmt.Sprintf("--type=%s", UnitType), "--no-legend") + + var out bytes.Buffer + cmd.Stdout = &out + err = internal.RunTimeout(cmd, Timeout.Duration) + if err != nil { + return &out, fmt.Errorf("error running systemctl list-units --all --type=%s --no-legend: %s", UnitType, err) + } + + return &out, nil +} + +func init() { + inputs.Add("systemd_units", func() telegraf.Input { + return &SystemdUnits{ + systemctl: setSystemctl, + Timeout: defaultTimeout, + UnitType: defaultUnitType, + } + }) +} diff --git a/plugins/inputs/systemd_units/systemd_units_linux_test.go b/plugins/inputs/systemd_units/systemd_units_linux_test.go new file mode 100644 index 000000000..f45922bb9 --- /dev/null +++ b/plugins/inputs/systemd_units/systemd_units_linux_test.go @@ -0,0 +1,100 @@ +package systemd_units + +import ( + "bytes" + "fmt" + "reflect" + "testing" + + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/testutil" +) + +func TestSystemdUnits(t *testing.T) { + tests := []struct { + name string + line string + tags map[string]string + fields map[string]interface{} + status int + err error + }{ + { + name: "example loaded active running", + line: "example.service loaded active running example service description", + tags: map[string]string{"name": "example.service", "load": "loaded", "active": "active", "sub": "running"}, + fields: map[string]interface{}{ + "load_code": 0, + "active_code": 0, + "sub_code": 0, + }, + }, + { + name: "example loaded active exited", + line: "example.service loaded active exited example service description", + tags: map[string]string{"name": "example.service", "load": "loaded", "active": "active", "sub": "exited"}, + fields: map[string]interface{}{ + "load_code": 0, + "active_code": 0, + "sub_code": 4, + }, + }, + { + name: "example loaded failed failed", + line: "example.service loaded failed failed example service description", + tags: map[string]string{"name": "example.service", "load": "loaded", "active": "failed", "sub": "failed"}, + fields: map[string]interface{}{ + "load_code": 0, + "active_code": 3, + "sub_code": 12, + }, + }, + { + name: "example not-found inactive dead", + line: "example.service not-found inactive dead example service description", + tags: map[string]string{"name": "example.service", "load": "not-found", "active": "inactive", "sub": "dead"}, + fields: map[string]interface{}{ + "load_code": 2, + "active_code": 2, + "sub_code": 1, + }, + }, + { + name: "example unknown unknown unknown", + line: "example.service unknown unknown unknown example service description", + err: fmt.Errorf("Error parsing field 'load', value not in map: %s", "unknown"), + }, + { + name: "example too few fields", + line: "example.service loaded fai", + err: fmt.Errorf("Error parsing line (expected at least 4 fields): %s", "example.service loaded fai"), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + systemd_units := &SystemdUnits{ + systemctl: func(Timeout internal.Duration, UnitType string) (*bytes.Buffer, error) { + return bytes.NewBufferString(tt.line), nil + }, + } + acc := new(testutil.Accumulator) + err := acc.GatherError(systemd_units.Gather) + if !reflect.DeepEqual(tt.err, err) { + t.Errorf("%s: expected error '%#v' got '%#v'", tt.name, tt.err, err) + } + if len(acc.Metrics) > 0 { + m := acc.Metrics[0] + if !reflect.DeepEqual(m.Measurement, measurement) { + t.Errorf("%s: expected measurement '%#v' got '%#v'\n", tt.name, measurement, m.Measurement) + } + if !reflect.DeepEqual(m.Tags, tt.tags) { + t.Errorf("%s: expected tags\n%#v got\n%#v\n", tt.name, tt.tags, m.Tags) + } + if !reflect.DeepEqual(m.Fields, tt.fields) { + t.Errorf("%s: expected fields\n%#v got\n%#v\n", tt.name, tt.fields, m.Fields) + } + } + }) + } +} diff --git a/plugins/inputs/systemd_units/systemd_units_notlinux.go b/plugins/inputs/systemd_units/systemd_units_notlinux.go new file mode 100644 index 000000000..f53cea3de --- /dev/null +++ b/plugins/inputs/systemd_units/systemd_units_notlinux.go @@ -0,0 +1,3 @@ +// +build !linux + +package systemd_units From 88ab29ed6368fbbf891640e166b71aa480b6dd11 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 2 Dec 2019 16:18:56 -0800 Subject: [PATCH 1366/1815] Update changelog --- CHANGELOG.md | 3 +++ README.md | 1 + plugins/inputs/systemd_units/README.md | 15 +++++---------- 3 files changed, 9 insertions(+), 10 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 00cb89e4f..3522c1b3f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -61,6 +61,9 @@ - [#6686](https://github.com/influxdata/telegraf/pull/6686): Add additional tags to the x509 input. - [#6703](https://github.com/influxdata/telegraf/pull/6703): Add batch data format support to file output. - [#6688](https://github.com/influxdata/telegraf/pull/6688): Support partition assignement strategy configuration in kafka_consumer. +- [#6731](https://github.com/influxdata/telegraf/pull/6731): Add node type tag to mongodb input. +- [#6669](https://github.com/influxdata/telegraf/pull/6669): Add uptime_ns field to mongodb input. +- [#6735](https://github.com/influxdata/telegraf/pull/6735): Support resolution of symlinks in filecount input. #### Bugfixes diff --git a/README.md b/README.md index a2383ba95..73f4268bb 100644 --- a/README.md +++ b/README.md @@ -283,6 +283,7 @@ For documentation on the latest development code see the [documentation index][d * [synproxy](./plugins/inputs/synproxy) * [syslog](./plugins/inputs/syslog) * [sysstat](./plugins/inputs/sysstat) +* [systemd_units](./plugins/inputs/systemd_units) * [system](./plugins/inputs/system) * [tail](./plugins/inputs/tail) * [temp](./plugins/inputs/temp) diff --git a/plugins/inputs/systemd_units/README.md b/plugins/inputs/systemd_units/README.md index c9d4a85da..f6b8796f9 100644 --- a/plugins/inputs/systemd_units/README.md +++ b/plugins/inputs/systemd_units/README.md @@ -6,14 +6,14 @@ The systemd_units plugin gathers systemd unit status on Linux. It relies on The results are tagged with the unit name and provide enumerated fields for loaded, active and running fields, indicating the unit health. -This plugin is related to the [win_services module](../win_services/), which +This plugin is related to the [win_services module](/plugins/inputs/win_services/), which fulfills the same purpose on windows. In addition to services, this plugin can gather other unit types as well, see `systemctl list-units --all --type help` for possible options. ### Configuration -``` +```toml [[inputs.systemd_units]] ## Set timeout for systemctl execution # timeout = "1s" @@ -127,14 +127,9 @@ values ### Example Output -Linux Systemd Units: ``` -$ telegraf --test --config /tmp/telegraf.conf -> systemd_units,host=host1.example.com,name=dbus.service,load=loaded,active=active,sub=running load_code=0i,active_code=0i,sub_code=0i 1533730725000000000 -> systemd_units,host=host1.example.com,name=networking.service,load=loaded,active=failed,sub=failed load_code=0i,active_code=3i,sub_code=12i 1533730725000000000 -> systemd_units,host=host1.example.com,name=ssh.service,load=loaded,active=active,sub=running load_code=0i,active_code=0i,sub_code=0i 1533730725000000000 +systemd_units,host=host1.example.com,name=dbus.service,load=loaded,active=active,sub=running load_code=0i,active_code=0i,sub_code=0i 1533730725000000000 +systemd_units,host=host1.example.com,name=networking.service,load=loaded,active=failed,sub=failed load_code=0i,active_code=3i,sub_code=12i 1533730725000000000 +systemd_units,host=host1.example.com,name=ssh.service,load=loaded,active=active,sub=running load_code=0i,active_code=0i,sub_code=0i 1533730725000000000 ... ``` - -### Possible Improvements -- add blacklist to filter names From 1b25a9c9106742c233b4baeb9be1984db0fe2436 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 2 Dec 2019 20:03:25 -0800 Subject: [PATCH 1367/1815] Remove debug print statement --- plugins/parsers/grok/parser.go | 1 - 1 file changed, 1 deletion(-) diff --git a/plugins/parsers/grok/parser.go b/plugins/parsers/grok/parser.go index 60eff1afe..810190b9d 100644 --- a/plugins/parsers/grok/parser.go +++ b/plugins/parsers/grok/parser.go @@ -305,7 +305,6 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { log.Printf("E! Error parsing %s to int: %s", v, err) } else { timestamp = time.Unix(0, ms*int64(time.Millisecond)) - fmt.Println(timestamp) } case EPOCH_NANO: iv, err := strconv.ParseInt(v, 10, 64) From cf78f4e11efa05b64628dd41052a84fd45e1b7b0 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 3 Dec 2019 11:26:51 -0800 Subject: [PATCH 1368/1815] Log mongodb oplog auth errors at debug level (#6742) --- plugins/inputs/mongodb/mongodb_server.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/inputs/mongodb/mongodb_server.go b/plugins/inputs/mongodb/mongodb_server.go index d311a9058..be3916b5e 100644 --- a/plugins/inputs/mongodb/mongodb_server.go +++ b/plugins/inputs/mongodb/mongodb_server.go @@ -214,7 +214,7 @@ func (s *Server) gatherData(acc telegraf.Accumulator, gatherDbStats bool, gather if replSetStatus != nil { oplogStats, err = s.gatherOplogStats() if err != nil { - return err + s.authLog(fmt.Errorf("Unable to get oplog stats: %v", err)) } } From 63b047c91ac2d2d260d13456da53a19e71823b28 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 3 Dec 2019 11:27:33 -0800 Subject: [PATCH 1369/1815] Fix ping skips remaining hosts after dns lookup error (#6743) --- plugins/inputs/ping/ping.go | 32 +++++++++++++++----------------- 1 file changed, 15 insertions(+), 17 deletions(-) diff --git a/plugins/inputs/ping/ping.go b/plugins/inputs/ping/ping.go index 195c9d2d7..17767bac3 100644 --- a/plugins/inputs/ping/ping.go +++ b/plugins/inputs/ping/ping.go @@ -122,27 +122,25 @@ func (p *Ping) Gather(acc telegraf.Accumulator) error { p.listenAddr = getAddr(p.Interface) } - for _, ip := range p.Urls { - _, err := net.LookupHost(ip) + for _, host := range p.Urls { + _, err := net.LookupHost(host) if err != nil { - acc.AddFields("ping", map[string]interface{}{"result_code": 1}, map[string]string{"ip": ip}) + acc.AddFields("ping", map[string]interface{}{"result_code": 1}, map[string]string{"url": host}) acc.AddError(err) - return nil + continue } - if p.Method == "native" { - p.wg.Add(1) - go func(ip string) { - defer p.wg.Done() - p.pingToURLNative(ip, acc) - }(ip) - } else { - p.wg.Add(1) - go func(ip string) { - defer p.wg.Done() - p.pingToURL(ip, acc) - }(ip) - } + p.wg.Add(1) + go func(host string) { + defer p.wg.Done() + + switch p.Method { + case "native": + p.pingToURLNative(host, acc) + default: + p.pingToURL(host, acc) + } + }(host) } p.wg.Wait() From 4feef67c21c361e32d7b888c0e91b0fd462867dd Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 3 Dec 2019 11:31:15 -0800 Subject: [PATCH 1370/1815] Update changelog --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3522c1b3f..37ddd1e7b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -71,6 +71,8 @@ - [#6583](https://github.com/influxdata/telegraf/issues/6583): Use 1h or 3h rain values as appropriate in openweathermap input. - [#6573](https://github.com/influxdata/telegraf/issues/6573): Fix not a valid field error in Windows with nvidia input. - [#6614](https://github.com/influxdata/telegraf/issues/6614): Fix influxdb output serialization on connection closed. +- [#6690](https://github.com/influxdata/telegraf/issues/6690): Fix ping skips remaining hosts after dns lookup error. +- [#6684](https://github.com/influxdata/telegraf/issues/6684): Log mongodb oplog auth errors at debug level. ## v1.12.6 [2019-11-19] From 03de92b962099d5649619ffbffa3f005e2c2e961 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 3 Dec 2019 11:46:29 -0800 Subject: [PATCH 1371/1815] Remove trailing underscore trimming from json flattener (#6744) --- plugins/parsers/json/parser.go | 17 ++- plugins/parsers/json/parser_test.go | 210 +++++++++++++++------------- 2 files changed, 124 insertions(+), 103 deletions(-) diff --git a/plugins/parsers/json/parser.go b/plugins/parsers/json/parser.go index ae8c15c0d..bba179e1b 100644 --- a/plugins/parsers/json/parser.go +++ b/plugins/parsers/json/parser.go @@ -7,7 +7,6 @@ import ( "fmt" "log" "strconv" - "strings" "time" "github.com/influxdata/telegraf" @@ -260,19 +259,27 @@ func (f *JSONFlattener) FullFlattenJSON( if f.Fields == nil { f.Fields = make(map[string]interface{}) } - fieldname = strings.Trim(fieldname, "_") + switch t := v.(type) { case map[string]interface{}: for k, v := range t { - err := f.FullFlattenJSON(fieldname+"_"+k+"_", v, convertString, convertBool) + fieldkey := k + if fieldname != "" { + fieldkey = fieldname + "_" + fieldkey + } + + err := f.FullFlattenJSON(fieldkey, v, convertString, convertBool) if err != nil { return err } } case []interface{}: for i, v := range t { - k := strconv.Itoa(i) - err := f.FullFlattenJSON(fieldname+"_"+k+"_", v, convertString, convertBool) + fieldkey := strconv.Itoa(i) + if fieldname != "" { + fieldkey = fieldname + "_" + fieldkey + } + err := f.FullFlattenJSON(fieldkey, v, convertString, convertBool) if err != nil { return nil } diff --git a/plugins/parsers/json/parser_test.go b/plugins/parsers/json/parser_test.go index 0b9493b40..4571de63a 100644 --- a/plugins/parsers/json/parser_test.go +++ b/plugins/parsers/json/parser_test.go @@ -815,104 +815,6 @@ func TestNameKey(t *testing.T) { require.Equal(t, "this is my name", metrics[0].Name()) } -func TestTimeKeyDelete(t *testing.T) { - data := `{ - "timestamp": 1541183052, - "value": 42 - }` - - parser, err := New(&Config{ - MetricName: "json", - TimeKey: "timestamp", - TimeFormat: "unix", - }) - require.NoError(t, err) - - metrics, err := parser.Parse([]byte(data)) - require.NoError(t, err) - expected := []telegraf.Metric{ - testutil.MustMetric("json", - map[string]string{}, - map[string]interface{}{"value": 42.0}, - time.Unix(1541183052, 0)), - } - - testutil.RequireMetricsEqual(t, expected, metrics) -} - -func TestStringFieldGlob(t *testing.T) { - data := ` -{ - "color": "red", - "status": "error", - "time": "1541183052" -} -` - - parser, err := New(&Config{ - MetricName: "json", - StringFields: []string{"*"}, - TimeKey: "time", - TimeFormat: "unix", - }) - require.NoError(t, err) - - actual, err := parser.Parse([]byte(data)) - require.NoError(t, err) - - expected := []telegraf.Metric{ - testutil.MustMetric( - "json", - map[string]string{}, - map[string]interface{}{ - "color": "red", - "status": "error", - }, - time.Unix(1541183052, 0), - ), - } - - testutil.RequireMetricsEqual(t, expected, actual) -} - -func TestParseEmptyArray(t *testing.T) { - data := `[]` - - parser, err := New(&Config{}) - require.NoError(t, err) - - actual, err := parser.Parse([]byte(data)) - require.NoError(t, err) - - expected := []telegraf.Metric{} - testutil.RequireMetricsEqual(t, expected, actual) -} - -func TestParseSimpleArray(t *testing.T) { - data := `[{"answer": 42}]` - - parser, err := New(&Config{ - MetricName: "json", - }) - require.NoError(t, err) - - actual, err := parser.Parse([]byte(data)) - require.NoError(t, err) - - expected := []telegraf.Metric{ - testutil.MustMetric( - "json", - map[string]string{}, - map[string]interface{}{ - "answer": 42.0, - }, - time.Unix(0, 0), - ), - } - - testutil.RequireMetricsEqual(t, expected, actual, testutil.IgnoreTime()) -} - func TestParseArrayWithWrongType(t *testing.T) { data := `[{"answer": 42}, 123]` @@ -922,3 +824,115 @@ func TestParseArrayWithWrongType(t *testing.T) { _, err = parser.Parse([]byte(data)) require.Error(t, err) } + +func TestParse(t *testing.T) { + tests := []struct { + name string + config *Config + input []byte + expected []telegraf.Metric + }{ + { + name: "tag keys with underscore issue 6705", + config: &Config{ + MetricName: "json", + TagKeys: []string{"metric___name__"}, + }, + input: []byte(`{"metric": {"__name__": "howdy", "time_idle": 42}}`), + expected: []telegraf.Metric{ + testutil.MustMetric( + "json", + map[string]string{ + "metric___name__": "howdy", + }, + map[string]interface{}{ + "metric_time_idle": 42.0, + }, + time.Unix(0, 0), + ), + }, + }, + { + name: "parse empty array", + config: &Config{}, + input: []byte(`[]`), + expected: []telegraf.Metric{}, + }, + { + name: "parse simple array", + config: &Config{ + MetricName: "json", + }, + input: []byte(`[{"answer": 42}]`), + expected: []telegraf.Metric{ + testutil.MustMetric( + "json", + map[string]string{}, + map[string]interface{}{ + "answer": 42.0, + }, + time.Unix(0, 0), + ), + }, + }, + { + name: "string field glob", + config: &Config{ + MetricName: "json", + StringFields: []string{"*"}, + }, + input: []byte(` +{ + "color": "red", + "status": "error" +} +`), + expected: []telegraf.Metric{ + testutil.MustMetric( + "json", + map[string]string{}, + map[string]interface{}{ + "color": "red", + "status": "error", + }, + time.Unix(0, 0), + ), + }, + }, + { + name: "time key is deleted from fields", + config: &Config{ + MetricName: "json", + TimeKey: "timestamp", + TimeFormat: "unix", + }, + input: []byte(` +{ + "value": 42, + "timestamp": 1541183052 +} +`), + expected: []telegraf.Metric{ + testutil.MustMetric( + "json", + map[string]string{}, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(1541183052, 0), + ), + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + parser, err := New(tt.config) + require.NoError(t, err) + + actual, err := parser.Parse(tt.input) + require.NoError(t, err) + + testutil.RequireMetricsEqual(t, tt.expected, actual, testutil.IgnoreTime()) + }) + } +} From 9814817f90ea7f117a7da06236bfc5aba29ea1b4 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 3 Dec 2019 11:47:02 -0800 Subject: [PATCH 1372/1815] Update to gopsutil v2.19.11 (#6741) --- Gopkg.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Gopkg.lock b/Gopkg.lock index ae730556e..00116a7b5 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -1111,7 +1111,7 @@ version = "v1.2.0" [[projects]] - digest = "1:55dcddb2ba6ab25098ee6b96f176f39305f1fde7ea3d138e7e10bb64a5bf45be" + digest = "1:9024df427b3c8a80a0c4b34e535e5e1ae922c7174e3242b6c7f30ffb3b9f715e" name = "github.com/shirou/gopsutil" packages = [ "cpu", @@ -1124,8 +1124,8 @@ "process", ] pruneopts = "" - revision = "e4ec7b275ada47ca32799106c2dba142d96aaf93" - version = "v2.19.8" + revision = "fc7e5e7af6052e36e83e5539148015ed2c09d8f9" + version = "v2.19.11" [[projects]] branch = "master" From add8332990f4cbd4aa241a9367307572a2e7485d Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 3 Dec 2019 11:47:31 -0800 Subject: [PATCH 1373/1815] Accept any media type in the prometheus input (#6745) --- plugins/inputs/prometheus/prometheus.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/inputs/prometheus/prometheus.go b/plugins/inputs/prometheus/prometheus.go index 340736c98..1f0862760 100644 --- a/plugins/inputs/prometheus/prometheus.go +++ b/plugins/inputs/prometheus/prometheus.go @@ -17,7 +17,7 @@ import ( "github.com/influxdata/telegraf/plugins/inputs" ) -const acceptHeader = `application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited;q=0.7,text/plain;version=0.0.4;q=0.3` +const acceptHeader = `application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited;q=0.7,text/plain;version=0.0.4;q=0.3,*/*;q=0.1` type Prometheus struct { // An array of urls to scrape metrics from. From cdb00d6fe79605b5b918e84bda1f4efb41dadf26 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 3 Dec 2019 11:48:02 -0800 Subject: [PATCH 1374/1815] Add base64decode operation to string processor (#6740) --- plugins/processors/strings/README.md | 5 + plugins/processors/strings/strings.go | 39 ++++++-- plugins/processors/strings/strings_test.go | 108 +++++++++++++++++++++ 3 files changed, 143 insertions(+), 9 deletions(-) diff --git a/plugins/processors/strings/README.md b/plugins/processors/strings/README.md index 367732c6f..d00bf03db 100644 --- a/plugins/processors/strings/README.md +++ b/plugins/processors/strings/README.md @@ -12,6 +12,7 @@ Implemented functions are: - trim_suffix - replace - left +- base64decode Please note that in this implementation these are processed in the order that they appear above. @@ -68,6 +69,10 @@ If you'd like to apply multiple processings to the same `tag_key` or `field_key` # [[processors.strings.left]] # field = "message" # width = 10 + + ## Decode a base64 encoded utf-8 string + # [[processors.strings.base64decode]] + # field = "message" ``` #### Trim, TrimLeft, TrimRight diff --git a/plugins/processors/strings/strings.go b/plugins/processors/strings/strings.go index e185bdd3b..4a8a6e7ff 100644 --- a/plugins/processors/strings/strings.go +++ b/plugins/processors/strings/strings.go @@ -1,23 +1,26 @@ package strings import ( + "encoding/base64" "strings" "unicode" + "unicode/utf8" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/processors" ) type Strings struct { - Lowercase []converter `toml:"lowercase"` - Uppercase []converter `toml:"uppercase"` - Trim []converter `toml:"trim"` - TrimLeft []converter `toml:"trim_left"` - TrimRight []converter `toml:"trim_right"` - TrimPrefix []converter `toml:"trim_prefix"` - TrimSuffix []converter `toml:"trim_suffix"` - Replace []converter `toml:"replace"` - Left []converter `toml:"left"` + Lowercase []converter `toml:"lowercase"` + Uppercase []converter `toml:"uppercase"` + Trim []converter `toml:"trim"` + TrimLeft []converter `toml:"trim_left"` + TrimRight []converter `toml:"trim_right"` + TrimPrefix []converter `toml:"trim_prefix"` + TrimSuffix []converter `toml:"trim_suffix"` + Replace []converter `toml:"replace"` + Left []converter `toml:"left"` + Base64Decode []converter `toml:"base64decode"` converters []converter init bool @@ -86,6 +89,10 @@ const sampleConfig = ` # [[processors.strings.left]] # field = "message" # width = 10 + + ## Decode a base64 encoded utf-8 string + # [[processors.strings.base64decode]] + # field = "message" ` func (s *Strings) SampleConfig() string { @@ -288,6 +295,20 @@ func (s *Strings) initOnce() { } s.converters = append(s.converters, c) } + for _, c := range s.Base64Decode { + c := c + c.fn = func(s string) string { + data, err := base64.StdEncoding.DecodeString(s) + if err != nil { + return s + } + if utf8.Valid(data) { + return string(data) + } + return s + } + s.converters = append(s.converters, c) + } s.init = true } diff --git a/plugins/processors/strings/strings_test.go b/plugins/processors/strings/strings_test.go index 95d16c05e..ae35acecf 100644 --- a/plugins/processors/strings/strings_test.go +++ b/plugins/processors/strings/strings_test.go @@ -6,6 +6,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -892,3 +893,110 @@ func TestMeasurementCharDeletion(t *testing.T) { assert.Equal(t, "foofoofoo", results[1].Name(), "Should have refused to delete the whole string") assert.Equal(t, "barbarbar", results[2].Name(), "Should not have changed the input") } + +func TestBase64Decode(t *testing.T) { + tests := []struct { + name string + plugin *Strings + metric []telegraf.Metric + expected []telegraf.Metric + }{ + { + name: "base64decode success", + plugin: &Strings{ + Base64Decode: []converter{ + { + Field: "message", + }, + }, + }, + metric: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "message": "aG93ZHk=", + }, + time.Unix(0, 0), + ), + }, + expected: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "message": "howdy", + }, + time.Unix(0, 0), + ), + }, + }, + { + name: "base64decode not valid base64 returns original string", + plugin: &Strings{ + Base64Decode: []converter{ + { + Field: "message", + }, + }, + }, + metric: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "message": "_not_base64_", + }, + time.Unix(0, 0), + ), + }, + expected: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "message": "_not_base64_", + }, + time.Unix(0, 0), + ), + }, + }, + { + name: "base64decode not valid utf-8 returns original string", + plugin: &Strings{ + Base64Decode: []converter{ + { + Field: "message", + }, + }, + }, + metric: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "message": "//5oAG8AdwBkAHkA", + }, + time.Unix(0, 0), + ), + }, + expected: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "message": "//5oAG8AdwBkAHkA", + }, + time.Unix(0, 0), + ), + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + actual := tt.plugin.Apply(tt.metric...) + testutil.RequireMetricsEqual(t, tt.expected, actual) + }) + } +} From 317c823bfc873fa0c74caca394ca7c6d86c3c708 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 3 Dec 2019 11:48:53 -0800 Subject: [PATCH 1375/1815] Set message timestamp to the metric time in kafka output (#6746) --- plugins/outputs/kafka/kafka.go | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/plugins/outputs/kafka/kafka.go b/plugins/outputs/kafka/kafka.go index 0c967819f..85eb32a3f 100644 --- a/plugins/outputs/kafka/kafka.go +++ b/plugins/outputs/kafka/kafka.go @@ -50,6 +50,8 @@ type ( // SASL Password SASLPassword string `toml:"sasl_password"` + Log telegraf.Logger `toml:"-"` + tlsConfig tls.Config producer sarama.SyncProducer @@ -316,13 +318,14 @@ func (k *Kafka) Write(metrics []telegraf.Metric) error { for _, metric := range metrics { buf, err := k.serializer.Serialize(metric) if err != nil { - log.Printf("D! [outputs.kafka] Could not serialize metric: %v", err) + k.Log.Debugf("Could not serialize metric: %v", err) continue } m := &sarama.ProducerMessage{ - Topic: k.GetTopicName(metric), - Value: sarama.ByteEncoder(buf), + Topic: k.GetTopicName(metric), + Value: sarama.ByteEncoder(buf), + Timestamp: metric.Time(), } key, err := k.routingKey(metric) @@ -342,7 +345,11 @@ func (k *Kafka) Write(metrics []telegraf.Metric) error { if errs, ok := err.(sarama.ProducerErrors); ok { for _, prodErr := range errs { if prodErr.Err == sarama.ErrMessageSizeTooLarge { - log.Printf("E! Error writing to output [kafka]: Message too large, consider increasing `max_message_bytes`; dropping batch") + k.Log.Error("Message too large, consider increasing `max_message_bytes`; dropping batch") + return nil + } + if prodErr.Err == sarama.ErrInvalidTimestamp { + k.Log.Error("The timestamp of the message is out of acceptable range, consider increasing broker `message.timestamp.difference.max.ms`; dropping batch") return nil } return prodErr From 5d502bb60565cd8dc9b4e32b6e27f74c49710c01 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 3 Dec 2019 12:41:59 -0800 Subject: [PATCH 1376/1815] Update sample config --- etc/telegraf.conf | 341 +++++++++++++++++++++++++++++--------- etc/telegraf_windows.conf | 34 +++- 2 files changed, 293 insertions(+), 82 deletions(-) diff --git a/etc/telegraf.conf b/etc/telegraf.conf index 5f728579b..c807c01c7 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -35,7 +35,9 @@ ## This controls the size of writes that Telegraf sends to output plugins. metric_batch_size = 1000 - ## Maximum number of unwritten metrics per output. + ## Maximum number of unwritten metrics per output. Increasing this value + ## allows for longer periods of output downtime without dropping metrics at the + ## cost of higher maximum memory usage. metric_buffer_limit = 10000 ## Collection jitter is used to jitter the collection by a random amount. @@ -66,7 +68,13 @@ ## Log only error level messages. # quiet = false - ## Log file name, the empty string means to log to stderr. + ## Log target controls the destination for logs and can be one of "file", + ## "stderr" or, on Windows, "eventlog". When set to "file", the output file + ## is determined by the "logfile" setting. + # logtarget = "file" + + ## Name of the file to be logged to when using the "file" logtarget. If set to + ## the empty string then logs are written to stderr. # logfile = "" ## The logfile will be rotated after the time interval specified. When set @@ -412,6 +420,9 @@ # ## You could use basicstats aggregator to calculate those fields. If not all statistic # ## fields are available, all fields would still be sent as raw metrics. # # write_statistics = false +# +# ## Enable high resolution metrics of 1 second (if not enabled, standard resolution are of 60 seconds precision) +# # high_resolution_metrics = false # # Configuration for CrateDB to send metrics to. @@ -516,6 +527,11 @@ # ## Files to write to, "stdout" is a specially handled file. # files = ["stdout", "/tmp/metrics.out"] # +# ## Use batch serialization format instead of line based delimiting. The +# ## batch format allows for the production of non line based output formats and +# ## may more effiently encode metric groups. +# # use_batch_format = false +# # ## The file will be rotated after the time interval specified. When set # ## to 0 no time based rotation is performed. # # rotation_interval = "0d" @@ -657,6 +673,7 @@ # ## # ## Multiple URLs can be specified for a single cluster, only ONE of the # ## urls will be written to each interval. +# ## ex: urls = ["https://us-west-2-1.aws.cloud2.influxdata.com"] # urls = ["http://127.0.0.1:9999"] # # ## Token for authentication. @@ -1029,6 +1046,14 @@ # ## Address to listen on # listen = ":9273" # +# ## Metric version controls the mapping from Telegraf metrics into +# ## Prometheus format. When using the prometheus input, use the same value in +# ## both plugins to ensure metrics are round-tripped without modification. +# ## +# ## example: metric_version = 1; deprecated in 1.13 +# ## metric_version = 2; recommended version +# # metric_version = 1 +# # ## Use HTTP Basic Authentication. # # basic_username = "Foo" # # basic_password = "Bar" @@ -1292,6 +1317,18 @@ ############################################################################### +# # Clone metrics and apply modifications. +# [[processors.clone]] +# ## All modifications on inputs and aggregators can be overridden: +# # name_override = "new_name" +# # name_prefix = "new_name_prefix" +# # name_suffix = "new_name_suffix" +# +# ## Tags to be added (all values must be strings) +# # [processors.clone.tags] +# # additional_tag = "tag_value" + + # # Convert values to another metric value type # [[processors.converter]] # ## Tags to convert @@ -1557,6 +1594,7 @@ # [[aggregators.basicstats]] # ## The period on which to flush & clear the aggregator. # period = "30s" +# # ## If true, the original metric will be dropped by the # ## aggregator and will not get sent to the output plugins. # drop_original = false @@ -1607,6 +1645,11 @@ # # fields = ["io_time", "read_time", "write_time"] +# # Merge metrics into multifield metrics by series key +# [[aggregators.merge]] +# # no configuration + + # # Keep the aggregate min/max of each metric passing through. # [[aggregators.minmax]] # ## General Aggregator Arguments: @@ -1816,6 +1859,18 @@ # # insecure_skip_verify = false +# # Gather Azure Storage Queue metrics +# [[inputs.azure_storage_queue]] +# ## Required Azure Storage Account name +# account_name = "mystorageaccount" +# +# ## Required Azure Storage Account access key +# account_key = "storageaccountaccesskey" +# +# ## Set to false to disable peeking age of oldest message (executes faster) +# # peek_oldest_message_age = true + + # # Read metrics of bcache from stats_total and dirty_data # [[inputs.bcache]] # ## Bcache sets path @@ -2013,6 +2068,9 @@ # ## See http://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_limits.html # # ratelimit = 25 # +# ## Timeout for http requests made by the cloudwatch client. +# # timeout = "5s" +# # ## Namespace-wide statistic filters. These allow fewer queries to be made to # ## cloudwatch. # # statistic_include = [ "average", "sum", "minimum", "maximum", sample_count" ] @@ -2202,6 +2260,9 @@ # ## Only collect metrics for these containers, collect all if empty # container_names = [] # +# ## Set the source tag for the metrics to the container ID hostname, eg first 12 chars +# source_tag = false +# # ## Containers to include and exclude. Globs accepted. # ## Note that an empty array for both will include all containers # container_name_include = [] @@ -2220,8 +2281,10 @@ # ## Whether to report for each container per-device blkio (8:0, 8:1...) and # ## network (eth0, eth1, ...) stats or not # perdevice = true +# # ## Whether to report for each container total blkio and network stats or not # total = false +# # ## Which environment variables should we use as a tag # ##tag_env = ["JAVA_HOME", "HEAP_SIZE"] # @@ -2246,8 +2309,10 @@ # ## # ## If no servers are specified, then localhost is used as the host. # servers = ["localhost:24242"] +# # ## Type is one of "user", "domain", "ip", or "global" # type = "global" +# # ## Wildcard matches like "*.com". An empty string "" is same as "*" # ## If type = "ip" filters should be # filters = [""] @@ -2332,6 +2397,15 @@ # # insecure_skip_verify = false +# # Returns ethtool statistics for given interfaces +# [[inputs.ethtool]] +# ## List of interfaces to pull metrics for +# # interface_include = ["eth0"] +# +# ## List of interfaces to ignore when pulling metrics. +# # interface_exclude = ["eth1"] + + # # Read metrics from one or more commands that can output to stdout # [[inputs.exec]] # ## Commands array @@ -2389,6 +2463,10 @@ # ## more about them here: # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md # data_format = "influx" +# +# ## Name a tag containing the name of the file the data was parsed from. Leave empty +# ## to disable. +# # file_tag = "" # # Count files in a directory @@ -2414,6 +2492,9 @@ # ## Only count regular files. Defaults to true. # regular_only = true # +# ## Follow all symlinks while walking the directory tree. Defaults to false. +# follow_symlinks = false +# # ## Only count files that are at least this size. If size is # ## a negative number, only count files that are smaller than the # ## absolute value of size. Acceptable units are B, KiB, MiB, KB, ... @@ -2438,6 +2519,7 @@ # ## See https://github.com/gobwas/glob for more examples # ## # files = ["/var/log/**.log"] +# # ## If true, read the entire file and calculate an md5 checksum. # md5 = false @@ -2710,10 +2792,10 @@ # # Gather Icinga2 status # [[inputs.icinga2]] -# ## Required Icinga2 server address (default: "https://localhost:5665") +# ## Required Icinga2 server address # # server = "https://localhost:5665" # -# ## Required Icinga2 object type ("services" or "hosts, default "services") +# ## Required Icinga2 object type ("services" or "hosts") # # object_type = "services" # # ## Credentials for basic HTTP authentication @@ -2743,6 +2825,10 @@ # "http://localhost:8086/debug/vars" # ] # +# ## Username and password to send using HTTP Basic Authentication. +# # username = "" +# # password = "" +# # ## Optional TLS Config # # tls_ca = "/etc/telegraf/ca.pem" # # tls_cert = "/etc/telegraf/cert.pem" @@ -2841,7 +2927,7 @@ # # Read jobs and cluster metrics from Jenkins instances # [[inputs.jenkins]] -# ## The Jenkins URL +# ## The Jenkins URL in the format "schema://host:port" # url = "http://my-jenkins-instance:8080" # # username = "admin" # # password = "admin" @@ -3062,6 +3148,8 @@ # # namespace = "default" # # ## Use bearer token for authorization. ('bearer_token' takes priority) +# ## If both of these are empty, we'll use the default serviceaccount: +# ## at: /run/secrets/kubernetes.io/serviceaccount/token # # bearer_token = "/path/to/bearer/token" # ## OR # # bearer_token_string = "abc_123" @@ -3093,6 +3181,8 @@ # url = "http://127.0.0.1:10255" # # ## Use bearer token for authorization. ('bearer_token' takes priority) +# ## If both of these are empty, we'll use the default serviceaccount: +# ## at: /run/secrets/kubernetes.io/serviceaccount/token # # bearer_token = "/path/to/bearer/token" # ## OR # # bearer_token_string = "abc_123" @@ -3223,8 +3313,10 @@ # [[inputs.mesos]] # ## Timeout, in ms. # timeout = 100 +# # ## A list of Mesos masters. # masters = ["http://localhost:5050"] +# # ## Master metrics groups to be collected, by default, all enabled. # master_collections = [ # "resources", @@ -3239,8 +3331,10 @@ # "registrar", # "allocator", # ] +# # ## A list of Mesos slaves, default is [] # # slaves = [] +# # ## Slave metrics groups to be collected, by default, all enabled. # # slave_collections = [ # # "resources", @@ -3285,8 +3379,10 @@ # # ## When true, collect per database stats # # gather_perdb_stats = false +# # ## When true, collect per collection stats # # gather_col_stats = false +# # ## List of db where collections stats are collected # ## If empty, all db are concerned # # col_stats_dbs = ["local"] @@ -3349,55 +3445,56 @@ # ## <1.6: metric_version = 1 (or unset) # metric_version = 2 # -# ## the limits for metrics form perf_events_statements -# perf_events_statements_digest_text_limit = 120 -# perf_events_statements_limit = 250 -# perf_events_statements_time_limit = 86400 -# # # ## if the list is empty, then metrics are gathered from all databasee tables -# table_schema_databases = [] -# # +# # table_schema_databases = [] +# # ## gather metrics from INFORMATION_SCHEMA.TABLES for databases provided above list -# gather_table_schema = false -# # +# # gather_table_schema = false +# # ## gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST -# gather_process_list = true -# # +# # gather_process_list = false +# # ## gather user statistics from INFORMATION_SCHEMA.USER_STATISTICS -# gather_user_statistics = true -# # +# # gather_user_statistics = false +# # ## gather auto_increment columns and max values from information schema -# gather_info_schema_auto_inc = true -# # +# # gather_info_schema_auto_inc = false +# # ## gather metrics from INFORMATION_SCHEMA.INNODB_METRICS -# gather_innodb_metrics = true -# # +# # gather_innodb_metrics = false +# # ## gather metrics from SHOW SLAVE STATUS command output -# gather_slave_status = true -# # +# # gather_slave_status = false +# # ## gather metrics from SHOW BINARY LOGS command output -# gather_binary_logs = false -# # +# # gather_binary_logs = false +# # ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE -# gather_table_io_waits = false -# # +# # gather_table_io_waits = false +# # ## gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS -# gather_table_lock_waits = false -# # +# # gather_table_lock_waits = false +# # ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_INDEX_USAGE -# gather_index_io_waits = false -# # +# # gather_index_io_waits = false +# # ## gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS -# gather_event_waits = false -# # +# # gather_event_waits = false +# # ## gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME -# gather_file_events_stats = false -# # +# # gather_file_events_stats = false +# # ## gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST -# gather_perf_events_statements = false -# # +# # gather_perf_events_statements = false +# +# ## the limits for metrics form perf_events_statements +# # perf_events_statements_digest_text_limit = 120 +# # perf_events_statements_limit = 250 +# # perf_events_statements_time_limit = 86400 +# # ## Some queries we may want to run less often (such as SHOW GLOBAL VARIABLES) -# interval_slow = "30m" +# ## example: interval_slow = "30m" +# # interval_slow = "" # # ## Optional TLS Config (will be used if tls=custom parameter specified in server uri) # # tls_ca = "/etc/telegraf/ca.pem" @@ -3672,6 +3769,12 @@ # ## City ID's to collect weather data from. # city_id = ["5391959"] # +# ## Language of the description field. Can be one of "ar", "bg", +# ## "ca", "cz", "de", "el", "en", "fa", "fi", "fr", "gl", "hr", "hu", +# ## "it", "ja", "kr", "la", "lt", "mk", "nl", "pl", "pt", "ro", "ru", +# ## "se", "sk", "sl", "es", "tr", "ua", "vi", "zh_cn", "zh_tw" +# # lang = "en" +# # ## APIs to fetch; can contain "weather" or "forecast". # fetch = ["weather", "forecast"] # @@ -3748,35 +3851,47 @@ # # Ping given url(s) and return statistics # [[inputs.ping]] -# ## List of urls to ping +# ## Hosts to send ping packets to. # urls = ["example.org"] # -# ## Number of pings to send per collection (ping -c ) -# # count = 1 -# -# ## Interval, in s, at which to ping. 0 == default (ping -i ) -# # ping_interval = 1.0 -# -# ## Per-ping timeout, in s. 0 == no timeout (ping -W ) -# # timeout = 1.0 -# -# ## Total-ping deadline, in s. 0 == no deadline (ping -w ) -# # deadline = 10 -# -# ## Interface or source address to send ping from (ping -I[-S] ) -# # interface = "" -# -# ## How to ping. "native" doesn't have external dependencies, while "exec" depends on 'ping'. +# ## Method used for sending pings, can be either "exec" or "native". When set +# ## to "exec" the systems ping command will be executed. When set to "native" +# ## the plugin will send pings directly. +# ## +# ## While the default is "exec" for backwards compatibility, new deployments +# ## are encouraged to use the "native" method for improved compatibility and +# ## performance. # # method = "exec" # -# ## Specify the ping executable binary, default is "ping" -# # binary = "ping" +# ## Number of ping packets to send per interval. Corresponds to the "-c" +# ## option of the ping command. +# # count = 1 # -# ## Arguments for ping command. When arguments is not empty, system binary will be used and -# ## other options (ping_interval, timeout, etc) will be ignored. +# ## Time to wait between sending ping packets in seconds. Operates like the +# ## "-i" option of the ping command. +# # ping_interval = 1.0 +# +# ## If set, the time to wait for a ping response in seconds. Operates like +# ## the "-W" option of the ping command. +# # timeout = 1.0 +# +# ## If set, the total ping deadline, in seconds. Operates like the -w option +# ## of the ping command. +# # deadline = 10 +# +# ## Interface or source address to send ping from. Operates like the -I or -S +# ## option of the ping command. +# # interface = "" +# +# ## Specify the ping executable binary. +# # binary = "ping" +# +# ## Arguments for ping command. When arguments is not empty, the command from +# ## the binary option will be used and other options (ping_interval, timeout, +# ## etc) will be ignored. # # arguments = ["-c", "3"] # -# ## Use only ipv6 addresses when resolving hostnames. +# ## Use only IPv6 addresses when resolving a hostname. # # ipv6 = false @@ -3895,6 +4010,15 @@ # ## Note that an empty array for both will include all queues # queue_name_include = [] # queue_name_exclude = [] +# +# ## Federation upstreams include and exclude when gathering the rabbitmq_federation measurement. +# ## If neither are specified, metrics for all federation upstreams are gathered. +# ## Federation link metrics will only be gathered for queues and exchanges +# ## whose non-federation metrics will be collected (e.g a queue excluded +# ## by the 'queue_name_exclude' option will also be excluded from federation). +# ## Globs accepted. +# # federation_upstream_include = ["dataCentre-*"] +# # federation_upstream_exclude = [] # # Read raindrops stats (raindrops - real-time stats for preforking Rack servers) @@ -4200,7 +4324,8 @@ # ## By default, the host is localhost, listening on default port, TCP 1433. # ## for Windows, the user is the currently running AD user (SSO). # ## See https://github.com/denisenkom/go-mssqldb for detailed connection -# ## parameters. +# ## parameters, in particular, tls connections can be created like so: +# ## "encrypt=true;certificate=;hostNameInCertificate=" # # servers = [ # # "Server=192.168.1.10;Port=1433;User Id=;Password=;app name=telegraf;log=1;", # # ] @@ -4229,6 +4354,7 @@ # ## - AzureDBResourceStats # ## - AzureDBResourceGovernance # ## - SqlRequests +# ## - ServerProperties # exclude_query = [ 'Schedulers' ] @@ -4312,6 +4438,11 @@ # # value = 'one_of("sda", "sdb")' +# # Get synproxy counter statistics from procfs +# [[inputs.synproxy]] +# # no configuration + + # # Sysstat metrics collector # [[inputs.sysstat]] # ## Path to the sadc command. @@ -4321,18 +4452,15 @@ # ## Arch: /usr/lib/sa/sadc # ## RHEL/CentOS: /usr/lib64/sa/sadc # sadc_path = "/usr/lib/sa/sadc" # required -# # -# # +# # ## Path to the sadf command, if it is not in PATH # # sadf_path = "/usr/bin/sadf" -# # -# # +# # ## Activities is a list of activities, that are passed as argument to the # ## sadc collector utility (e.g: DISK, SNMP etc...) # ## The more activities that are added, the more data is collected. # # activities = ["DISK"] -# # -# # +# # ## Group metrics to measurements. # ## # ## If group is false each metric will be prefixed with a description @@ -4340,8 +4468,7 @@ # ## # ## If Group is true, corresponding metrics are grouped to a single measurement. # # group = true -# # -# # +# # ## Options for the sadf command. The values on the left represent the sadf # ## options and the values on the right their description (which are used for # ## grouping and prefixing metrics). @@ -4365,8 +4492,7 @@ # -w = "task" # # -H = "hugepages" # only available for newer linux distributions # # "-I ALL" = "interrupts" # requires INT activity -# # -# # +# # ## Device tags can be used to add additional tags for devices. # ## For example the configuration below adds a tag vg with value rootvg for # ## all metrics with sda devices. @@ -4374,6 +4500,17 @@ # # vg = "rootvg" +# # Gather systemd units state +# [[inputs.systemd_units]] +# ## Set timeout for systemctl execution +# # timeout = "1s" +# # +# ## Filter for a specific unit type, default is "service", other possible +# ## values are "socket", "target", "device", "mount", "automount", "swap", +# ## "timer", "path", "slice" and "scope ": +# # unittype = "service" + + # # Reads metrics from a Teamspeak 3 Server via ServerQuery # [[inputs.teamspeak]] # ## Server address for Teamspeak 3 ServerQuery @@ -4739,6 +4876,9 @@ # ## transport only. # # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] # +# ## Define (for certain nested telemetry measurements with embedded tags) which fields are tags +# # embedded_tags = ["Cisco-IOS-XR-qos-ma-oper:qos/interface-table/interface/input/service-policy-names/service-policy-instance/statistics/class-stats/class-name"] +# # ## Define aliases to map telemetry encoding paths to simple measurement names # [inputs.cisco_telemetry_mdt.aliases] # ifstats = "ietf-interfaces:interfaces-state/interface/statistics" @@ -4899,6 +5039,9 @@ # # docker_label_include = [] # # docker_label_exclude = [] # +# ## Set the source tag for the metrics to the container ID hostname, eg first 12 chars +# source_tag = false +# # ## Optional TLS Config # # tls_ca = "/etc/telegraf/ca.pem" # # tls_cert = "/etc/telegraf/cert.pem" @@ -5144,12 +5287,16 @@ # [[inputs.kafka_consumer_legacy]] # ## topic(s) to consume # topics = ["telegraf"] +# # ## an array of Zookeeper connection strings # zookeeper_peers = ["localhost:2181"] +# # ## Zookeeper Chroot # zookeeper_chroot = "" +# # ## the name of the consumer group # consumer_group = "telegraf_metrics_consumers" +# # ## Offset (must be either "oldest" or "newest") # offset = "oldest" # @@ -5314,7 +5461,7 @@ # # max_undelivered_messages = 1000 # # ## Persistent session disables clearing of the client session on connection. -# ## In order for this option to work you must also set client_id to identity +# ## In order for this option to work you must also set client_id to identify # ## the client. To receive messages that arrived while the client is offline, # ## also set the qos option to 1 or 2 and don't forget to also set the QoS when # ## publishing. @@ -5348,6 +5495,7 @@ # # ## subject(s) to consume # subjects = ["telegraf"] +# # ## name a queue group # queue_group = "telegraf_consumers" # @@ -5391,8 +5539,10 @@ # [[inputs.nsq_consumer]] # ## Server option still works but is deprecated, we just prepend it to the nsqd array. # # server = "localhost:4150" +# # ## An array representing the NSQD TCP HTTP Endpoints # nsqd = ["localhost:4150"] +# # ## An array representing the NSQLookupd HTTP Endpoints # nsqlookupd = ["localhost:4161"] # topic = "telegraf" @@ -5507,7 +5657,10 @@ # ## field is used to define custom tags (separated by commas) # ## The optional "measurement" value can be used to override the default # ## output measurement name ("postgresql"). -# # +# ## +# ## The script option can be used to specify the .sql file path. +# ## If script and sqlquery options specified at same time, sqlquery will be used +# ## # ## Structure : # ## [[inputs.postgresql_extensible.query]] # ## sqlquery string @@ -5533,6 +5686,18 @@ # ## An array of urls to scrape metrics from. # urls = ["http://localhost:9100/metrics"] # +# ## Metric version controls the mapping from Prometheus metrics into +# ## Telegraf metrics. When using the prometheus_client output, use the same +# ## value in both plugins to ensure metrics are round-tripped without +# ## modification. +# ## +# ## example: metric_version = 1; deprecated in 1.13 +# ## metric_version = 2; recommended version +# # metric_version = 1 +# +# ## Url tag name (tag containing scrapped url. optional, default is "url") +# # url_tag = "scrapeUrl" +# # ## An array of Kubernetes services to scrape metrics from. # # kubernetes_services = ["http://my-service-dns.my-namespace:9100/metrics"] # @@ -5560,7 +5725,7 @@ # # username = "" # # password = "" # -# ## Specify timeout duration for slower prometheus clients (default is 3s) +# ## Specify timeout duration for slower prometheus clients (default is 3s) # # response_timeout = "3s" # # ## Optional TLS Config @@ -5571,6 +5736,16 @@ # # insecure_skip_verify = false +# # Receive SNMP traps +# [[inputs.snmp_trap]] +# ## Transport, local address, and port to listen on. Transport must +# ## be "udp://". Omit local address to listen on all interfaces. +# ## example: "udp://127.0.0.1:1234" +# # service_address = udp://:162 +# ## Timeout running snmptranslate command +# # timeout = "5s" + + # # Generic socket listener capable of handling multiple socket types. # [[inputs.socket_listener]] # ## URL to listen on @@ -5626,6 +5801,10 @@ # ## more about them here: # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md # # data_format = "influx" +# +# ## Content encoding for message payloads, can be set to "gzip" to or +# ## "identity" to apply no encoding. +# # content_encoding = "identity" # # Statsd UDP/TCP Server @@ -5688,6 +5867,18 @@ # percentile_limit = 1000 +# # Suricata stats plugin +# [[inputs.suricata]] +# ## Data sink for Suricata stats log +# # This is expected to be a filename of a +# # unix socket to be created for listening. +# source = "/var/run/suricata-stats.sock" +# +# # Delimiter for flattening field keys, e.g. subitem "alert" of "detect" +# # becomes "detect_alert" when delimiter is "_". +# delimiter = "_" + + # # Accepts syslog messages following RFC5424 format with transports as per RFC5426, RFC5425, or RFC6587 # [[inputs.syslog]] # ## Specify an ip or hostname with port - eg., tcp://localhost:6514, tcp://10.0.0.1:6514 diff --git a/etc/telegraf_windows.conf b/etc/telegraf_windows.conf index 0d72e79e8..c3586cafd 100644 --- a/etc/telegraf_windows.conf +++ b/etc/telegraf_windows.conf @@ -9,9 +9,9 @@ # Use 'telegraf -config telegraf.conf -test' to see what metrics a config # file would generate. # -# Environment variables can be used anywhere in this config file, simply prepend -# them with $. For strings the variable must be within quotes (ie, "$STR_VAR"), -# for numbers and booleans they should be plain (ie, $INT_VAR, $BOOL_VAR) +# Environment variables can be used anywhere in this config file, simply surround +# them with ${}. For strings the variable must be within quotes (ie, "${STR_VAR}"), +# for numbers and booleans they should be plain (ie, ${INT_VAR}, ${BOOL_VAR}) # Global tags can be specified here in key="value" format. @@ -35,7 +35,9 @@ ## This controls the size of writes that Telegraf sends to output plugins. metric_batch_size = 1000 - ## Maximum number of unwritten metrics per output. + ## Maximum number of unwritten metrics per output. Increasing this value + ## allows for longer periods of output downtime without dropping metrics at the + ## cost of higher maximum memory usage. metric_buffer_limit = 10000 ## Collection jitter is used to jitter the collection by a random amount. @@ -66,7 +68,13 @@ ## Log only error level messages. # quiet = false - ## Log file name, the empty string means to log to stderr. + ## Log target controls the destination for logs and can be one of "file", + ## "stderr" or, on Windows, "eventlog". When set to "file", the output file + ## is determined by the "logfile" setting. + # logtarget = "file" + + ## Name of the file to be logged to when using the "file" logtarget. If set to + ## the empty string then logs are written to stderr. # logfile = "" ## The logfile will be rotated after the time interval specified. When set @@ -89,9 +97,10 @@ ############################################################################### -# OUTPUTS # +# OUTPUT PLUGINS # ############################################################################### + # Configuration for sending metrics to InfluxDB [[outputs.influxdb]] ## The full HTTP or UDP URL for your InfluxDB instance. @@ -103,8 +112,16 @@ # urls = ["http://127.0.0.1:8086"] ## The target database for metrics; will be created as needed. + ## For UDP url endpoint database needs to be configured on server side. # database = "telegraf" + ## The value of this tag will be used to determine the database. If this + ## tag is not set the 'database' option is used as the default. + # database_tag = "" + + ## If true, the database tag will not be added to the metric. + # exclude_database_tag = false + ## If true, no CREATE DATABASE queries will be sent. Set to true when using ## Telegraf with a user without permissions to create databases or when the ## database already exists. @@ -161,6 +178,7 @@ # ## # ## Multiple URLs can be specified for a single cluster, only ONE of the # ## urls will be written to each interval. +# ## ex: urls = ["https://us-west-2-1.aws.cloud2.influxdata.com"] # urls = ["http://127.0.0.1:9999"] # # ## Token for authentication. @@ -206,10 +224,12 @@ # ## Use TLS but skip chain & host verification # # insecure_skip_verify = false + ############################################################################### -# INPUTS # +# INPUT PLUGINS # ############################################################################### + # Windows Performance Counters plugin. # These are the recommended method of monitoring system metrics on windows, # as the regular system plugins (inputs.cpu, inputs.mem, etc.) rely on WMI, From 09f9b703543c75f61a92fc15d642778e38a4d3ee Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 3 Dec 2019 12:52:32 -0800 Subject: [PATCH 1377/1815] Update changelog --- CHANGELOG.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 37ddd1e7b..d82e3cb4b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -64,6 +64,8 @@ - [#6731](https://github.com/influxdata/telegraf/pull/6731): Add node type tag to mongodb input. - [#6669](https://github.com/influxdata/telegraf/pull/6669): Add uptime_ns field to mongodb input. - [#6735](https://github.com/influxdata/telegraf/pull/6735): Support resolution of symlinks in filecount input. +- [#6746](https://github.com/influxdata/telegraf/pull/6746): Set message timestamp to the metric time in kafka output. +- [#6740](https://github.com/influxdata/telegraf/pull/6740): Add base64decode operation to string processor. #### Bugfixes @@ -73,6 +75,9 @@ - [#6614](https://github.com/influxdata/telegraf/issues/6614): Fix influxdb output serialization on connection closed. - [#6690](https://github.com/influxdata/telegraf/issues/6690): Fix ping skips remaining hosts after dns lookup error. - [#6684](https://github.com/influxdata/telegraf/issues/6684): Log mongodb oplog auth errors at debug level. +- [#6705](https://github.com/influxdata/telegraf/issues/6705): Remove trailing underscore trimming from json flattener. +- [#6421](https://github.com/influxdata/telegraf/issues/6421): Revert change causing cpu usage to be capped at 100 percent. +- [#6523](https://github.com/influxdata/telegraf/issues/6523): Accept any media type in the prometheus input. ## v1.12.6 [2019-11-19] From 1f7b68a2b293a838673460424f1faf67e10bc9d5 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 3 Dec 2019 13:34:00 -0800 Subject: [PATCH 1378/1815] Allow colons in the prometheus metric name (#6751) --- plugins/serializers/prometheus/collection.go | 6 +- plugins/serializers/prometheus/convert.go | 90 +++++++++++++------ .../serializers/prometheus/prometheus_test.go | 56 ++++++++++++ 3 files changed, 124 insertions(+), 28 deletions(-) diff --git a/plugins/serializers/prometheus/collection.go b/plugins/serializers/prometheus/collection.go index d16208622..8ca06520b 100644 --- a/plugins/serializers/prometheus/collection.go +++ b/plugins/serializers/prometheus/collection.go @@ -113,7 +113,7 @@ func (c *Collection) createLabels(metric telegraf.Metric) []LabelPair { } } - name, ok := SanitizeName(tag.Key) + name, ok := SanitizeLabelName(tag.Key) if !ok { continue } @@ -132,7 +132,7 @@ func (c *Collection) createLabels(metric telegraf.Metric) []LabelPair { continue } - name, ok := SanitizeName(field.Key) + name, ok := SanitizeLabelName(field.Key) if !ok { continue } @@ -161,7 +161,7 @@ func (c *Collection) Add(metric telegraf.Metric) { labels := c.createLabels(metric) for _, field := range metric.FieldList() { metricName := MetricName(metric.Name(), field.Key, metric.Type()) - metricName, ok := SanitizeName(metricName) + metricName, ok := SanitizeMetricName(metricName) if !ok { continue } diff --git a/plugins/serializers/prometheus/convert.go b/plugins/serializers/prometheus/convert.go index 2ef23be63..131ac31b8 100644 --- a/plugins/serializers/prometheus/convert.go +++ b/plugins/serializers/prometheus/convert.go @@ -8,26 +8,53 @@ import ( dto "github.com/prometheus/client_model/go" ) -var FirstTable = &unicode.RangeTable{ - R16: []unicode.Range16{ - {0x0041, 0x005A, 1}, // A-Z - {0x005F, 0x005F, 1}, // _ - {0x0061, 0x007A, 1}, // a-z - }, - LatinOffset: 3, +type Table struct { + First *unicode.RangeTable + Rest *unicode.RangeTable } -var RestTable = &unicode.RangeTable{ - R16: []unicode.Range16{ - {0x0030, 0x0039, 1}, // 0-9 - {0x0041, 0x005A, 1}, // A-Z - {0x005F, 0x005F, 1}, // _ - {0x0061, 0x007A, 1}, // a-z +var MetricNameTable = Table{ + First: &unicode.RangeTable{ + R16: []unicode.Range16{ + {0x003A, 0x003A, 1}, // : + {0x0041, 0x005A, 1}, // A-Z + {0x005F, 0x005F, 1}, // _ + {0x0061, 0x007A, 1}, // a-z + }, + LatinOffset: 4, + }, + Rest: &unicode.RangeTable{ + R16: []unicode.Range16{ + {0x0030, 0x003A, 1}, // 0-: + {0x0041, 0x005A, 1}, // A-Z + {0x005F, 0x005F, 1}, // _ + {0x0061, 0x007A, 1}, // a-z + }, + LatinOffset: 4, }, - LatinOffset: 4, } -func isValid(name string) bool { +var LabelNameTable = Table{ + First: &unicode.RangeTable{ + R16: []unicode.Range16{ + {0x0041, 0x005A, 1}, // A-Z + {0x005F, 0x005F, 1}, // _ + {0x0061, 0x007A, 1}, // a-z + }, + LatinOffset: 3, + }, + Rest: &unicode.RangeTable{ + R16: []unicode.Range16{ + {0x0030, 0x0039, 1}, // 0-9 + {0x0041, 0x005A, 1}, // A-Z + {0x005F, 0x005F, 1}, // _ + {0x0061, 0x007A, 1}, // a-z + }, + LatinOffset: 4, + }, +} + +func isValid(name string, table Table) bool { if name == "" { return false } @@ -35,11 +62,11 @@ func isValid(name string) bool { for i, r := range name { switch { case i == 0: - if !unicode.In(r, FirstTable) { + if !unicode.In(r, table.First) { return false } default: - if !unicode.In(r, RestTable) { + if !unicode.In(r, table.Rest) { return false } } @@ -48,12 +75,11 @@ func isValid(name string) bool { return true } -// SanitizeName check if the name is a valid Prometheus metric name and label -// name. If not, it attempts to replaces invalid runes with an underscore to -// create a valid name. Returns the metric name and true if the name is valid -// to use. -func SanitizeName(name string) (string, bool) { - if isValid(name) { +// Sanitize checks if the name is valid according to the table. If not, it +// attempts to replaces invalid runes with an underscore to create a valid +// name. +func sanitize(name string, table Table) (string, bool) { + if isValid(name, table) { return name, true } @@ -62,11 +88,11 @@ func SanitizeName(name string) (string, bool) { for i, r := range name { switch { case i == 0: - if unicode.In(r, FirstTable) { + if unicode.In(r, table.First) { b.WriteRune(r) } default: - if unicode.In(r, RestTable) { + if unicode.In(r, table.Rest) { b.WriteRune(r) } else { b.WriteString("_") @@ -82,6 +108,20 @@ func SanitizeName(name string) (string, bool) { return name, true } +// SanitizeMetricName checks if the name is a valid Prometheus metric name. If +// not, it attempts to replaces invalid runes with an underscore to create a +// valid name. +func SanitizeMetricName(name string) (string, bool) { + return sanitize(name, MetricNameTable) +} + +// SanitizeLabelName checks if the name is a valid Prometheus label name. If +// not, it attempts to replaces invalid runes with an underscore to create a +// valid name. +func SanitizeLabelName(name string) (string, bool) { + return sanitize(name, LabelNameTable) +} + // MetricName returns the Prometheus metric name. func MetricName(measurement, fieldKey string, valueType telegraf.ValueType) string { switch valueType { diff --git a/plugins/serializers/prometheus/prometheus_test.go b/plugins/serializers/prometheus/prometheus_test.go index 6195fbead..632ca148e 100644 --- a/plugins/serializers/prometheus/prometheus_test.go +++ b/plugins/serializers/prometheus/prometheus_test.go @@ -409,6 +409,42 @@ rpc_duration_seconds_count 2693 # HELP cpu_time_idle Telegraf collected metric # TYPE cpu_time_idle untyped cpu_time_idle 43 +`), + }, + { + name: "colons are not replaced in metric name from measurement", + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu::xyzzy", + map[string]string{}, + map[string]interface{}{ + "time_idle": 42.0, + }, + time.Unix(0, 0), + ), + }, + expected: []byte(` +# HELP cpu::xyzzy_time_idle Telegraf collected metric +# TYPE cpu::xyzzy_time_idle untyped +cpu::xyzzy_time_idle 42 +`), + }, + { + name: "colons are not replaced in metric name from field", + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "time:idle": 42.0, + }, + time.Unix(0, 0), + ), + }, + expected: []byte(` +# HELP cpu_time:idle Telegraf collected metric +# TYPE cpu_time:idle untyped +cpu_time:idle 42 `), }, { @@ -429,6 +465,26 @@ cpu_time_idle 43 # HELP cpu_time_idle Telegraf collected metric # TYPE cpu_time_idle untyped cpu_time_idle{host_name="example.org"} 42 +`), + }, + { + name: "colons are replaced in label name", + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{ + "host:name": "example.org", + }, + map[string]interface{}{ + "time_idle": 42.0, + }, + time.Unix(0, 0), + ), + }, + expected: []byte(` +# HELP cpu_time_idle Telegraf collected metric +# TYPE cpu_time_idle untyped +cpu_time_idle{host_name="example.org"} 42 `), }, { From cc9a8cd1c6968bcfc6a9390a79b9966cbd292568 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 3 Dec 2019 14:57:40 -0800 Subject: [PATCH 1379/1815] Remove non-existent field from net_response readme --- plugins/inputs/net_response/README.md | 1 - 1 file changed, 1 deletion(-) diff --git a/plugins/inputs/net_response/README.md b/plugins/inputs/net_response/README.md index dcfb341d5..2c492408b 100644 --- a/plugins/inputs/net_response/README.md +++ b/plugins/inputs/net_response/README.md @@ -43,7 +43,6 @@ verify text in the response. - result - fields: - response_time (float, seconds) - - success (int) # success 0, failure 1 - result_code (int, success = 0, timeout = 1, connection_failed = 2, read_failed = 3, string_mismatch = 4) - result_type (string) **DEPRECATED in 1.7; use result tag** - string_found (boolean) **DEPRECATED in 1.4; use result tag** From a58db9e44776fdbff008db79b51b92a1831dd7dc Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 3 Dec 2019 14:59:50 -0800 Subject: [PATCH 1380/1815] Increment package versions --- scripts/build.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/build.py b/scripts/build.py index 4bde92514..6d404ea9d 100755 --- a/scripts/build.py +++ b/scripts/build.py @@ -101,7 +101,7 @@ supported_packages = { "freebsd": [ "tar" ] } -next_version = '1.13.0' +next_version = '1.14.0' ################ #### Telegraf Functions From 48f9f22f33b35139433e5ebd2008244c6c929616 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 3 Dec 2019 16:43:05 -0800 Subject: [PATCH 1381/1815] Document that json tag_keys are not saved as fields. --- plugins/parsers/json/README.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/plugins/parsers/json/README.md b/plugins/parsers/json/README.md index 45f4a98c6..b4975bcd3 100644 --- a/plugins/parsers/json/README.md +++ b/plugins/parsers/json/README.md @@ -29,7 +29,8 @@ ignored unless specified in the `tag_key` or `json_string_fields` options. ## https://github.com/tidwall/gjson/tree/v1.3.0#path-syntax json_query = "" - ## Tag keys is an array of keys that should be added as tags. + ## Tag keys is an array of keys that should be added as tags. Matching keys + ## are no longer saved as fields. tag_keys = [ "my_tag_1", "my_tag_2" From eeb46906866f8a10b89040ae62a88e7e98dff366 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 4 Dec 2019 13:41:33 -0800 Subject: [PATCH 1382/1815] Fix prometheus histogram and summary merging (#6756) --- plugins/serializers/prometheus/collection.go | 25 +- .../serializers/prometheus/collection_test.go | 231 ++++++++++++++++++ 2 files changed, 253 insertions(+), 3 deletions(-) diff --git a/plugins/serializers/prometheus/collection.go b/plugins/serializers/prometheus/collection.go index 8ca06520b..5c385caad 100644 --- a/plugins/serializers/prometheus/collection.go +++ b/plugins/serializers/prometheus/collection.go @@ -52,12 +52,32 @@ type Histogram struct { Sum float64 } +func (h *Histogram) merge(b Bucket) { + for i := range h.Buckets { + if h.Buckets[i].Bound == b.Bound { + h.Buckets[i].Count = b.Count + return + } + } + h.Buckets = append(h.Buckets, b) +} + type Summary struct { Quantiles []Quantile Count uint64 Sum float64 } +func (s *Summary) merge(q Quantile) { + for i := range s.Quantiles { + if s.Quantiles[i].Quantile == q.Quantile { + s.Quantiles[i].Value = q.Value + return + } + } + s.Quantiles = append(s.Quantiles, q) +} + type MetricKey uint64 func MakeMetricKey(labels []LabelPair) MetricKey { @@ -210,7 +230,6 @@ func (c *Collection) Add(metric telegraf.Metric) { Scaler: &Scaler{Value: value}, } - // what if already here entry.Metrics[metricKey] = m case telegraf.Histogram: if m == nil { @@ -236,7 +255,7 @@ func (c *Collection) Add(metric telegraf.Metric) { continue } - m.Histogram.Buckets = append(m.Histogram.Buckets, Bucket{ + m.Histogram.merge(Bucket{ Bound: bound, Count: count, }) @@ -297,7 +316,7 @@ func (c *Collection) Add(metric telegraf.Metric) { continue } - m.Summary.Quantiles = append(m.Summary.Quantiles, Quantile{ + m.Summary.merge(Quantile{ Quantile: quantile, Value: value, }) diff --git a/plugins/serializers/prometheus/collection_test.go b/plugins/serializers/prometheus/collection_test.go index 589c306b5..70f26dac7 100644 --- a/plugins/serializers/prometheus/collection_test.go +++ b/plugins/serializers/prometheus/collection_test.go @@ -1,6 +1,7 @@ package prometheus import ( + "math" "testing" "time" @@ -47,6 +48,78 @@ func TestCollectionExpire(t *testing.T) { }, }, }, + { + name: "update metric expiration", + now: time.Unix(20, 0), + age: 10 * time.Second, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": 42.0, + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": 43.0, + }, + time.Unix(12, 0), + ), + }, + expected: []*dto.MetricFamily{ + { + Name: proto.String("cpu_time_idle"), + Help: proto.String(helpString), + Type: dto.MetricType_UNTYPED.Enum(), + Metric: []*dto.Metric{ + { + Label: []*dto.LabelPair{}, + Untyped: &dto.Untyped{Value: proto.Float64(43.0)}, + }, + }, + }, + }, + }, + { + name: "update metric expiration descending order", + now: time.Unix(20, 0), + age: 10 * time.Second, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": 42.0, + }, + time.Unix(12, 0), + ), + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": 43.0, + }, + time.Unix(0, 0), + ), + }, + expected: []*dto.MetricFamily{ + { + Name: proto.String("cpu_time_idle"), + Help: proto.String(helpString), + Type: dto.MetricType_UNTYPED.Enum(), + Metric: []*dto.Metric{ + { + Label: []*dto.LabelPair{}, + Untyped: &dto.Untyped{Value: proto.Float64(42.0)}, + }, + }, + }, + }, + }, { name: "expired single metric in metric family", now: time.Unix(20, 0), @@ -99,6 +172,164 @@ func TestCollectionExpire(t *testing.T) { }, }, }, + { + name: "histogram bucket updates", + now: time.Unix(0, 0), + age: 10 * time.Second, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "prometheus", + map[string]string{}, + map[string]interface{}{ + "http_request_duration_seconds_sum": 10.0, + "http_request_duration_seconds_count": 2, + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + testutil.MustMetric( + "prometheus", + map[string]string{"le": "0.05"}, + map[string]interface{}{ + "http_request_duration_seconds_bucket": 1.0, + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + testutil.MustMetric( + "prometheus", + map[string]string{"le": "+Inf"}, + map[string]interface{}{ + "http_request_duration_seconds_bucket": 1.0, + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + // Next interval + testutil.MustMetric( + "prometheus", + map[string]string{}, + map[string]interface{}{ + "http_request_duration_seconds_sum": 20.0, + "http_request_duration_seconds_count": 4, + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + testutil.MustMetric( + "prometheus", + map[string]string{"le": "0.05"}, + map[string]interface{}{ + "http_request_duration_seconds_bucket": 2.0, + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + testutil.MustMetric( + "prometheus", + map[string]string{"le": "+Inf"}, + map[string]interface{}{ + "http_request_duration_seconds_bucket": 2.0, + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + }, + expected: []*dto.MetricFamily{ + { + Name: proto.String("http_request_duration_seconds"), + Help: proto.String(helpString), + Type: dto.MetricType_HISTOGRAM.Enum(), + Metric: []*dto.Metric{ + { + Label: []*dto.LabelPair{}, + Histogram: &dto.Histogram{ + SampleCount: proto.Uint64(4), + SampleSum: proto.Float64(20.0), + Bucket: []*dto.Bucket{ + { + UpperBound: proto.Float64(0.05), + CumulativeCount: proto.Uint64(2), + }, + { + UpperBound: proto.Float64(math.Inf(1)), + CumulativeCount: proto.Uint64(2), + }, + }, + }, + }, + }, + }, + }, + }, + { + name: "summary quantile updates", + now: time.Unix(0, 0), + age: 10 * time.Second, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "prometheus", + map[string]string{}, + map[string]interface{}{ + "rpc_duration_seconds_sum": 1.0, + "rpc_duration_seconds_count": 1, + }, + time.Unix(0, 0), + telegraf.Summary, + ), + testutil.MustMetric( + "prometheus", + map[string]string{"quantile": "0.01"}, + map[string]interface{}{ + "rpc_duration_seconds": 1.0, + }, + time.Unix(0, 0), + telegraf.Summary, + ), + // Updated Summary + testutil.MustMetric( + "prometheus", + map[string]string{}, + map[string]interface{}{ + "rpc_duration_seconds_sum": 2.0, + "rpc_duration_seconds_count": 2, + }, + time.Unix(0, 0), + telegraf.Summary, + ), + testutil.MustMetric( + "prometheus", + map[string]string{"quantile": "0.01"}, + map[string]interface{}{ + "rpc_duration_seconds": 2.0, + }, + time.Unix(0, 0), + telegraf.Summary, + ), + }, + expected: []*dto.MetricFamily{ + { + Name: proto.String("rpc_duration_seconds"), + Help: proto.String(helpString), + Type: dto.MetricType_SUMMARY.Enum(), + Metric: []*dto.Metric{ + { + Label: []*dto.LabelPair{}, + Summary: &dto.Summary{ + SampleCount: proto.Uint64(2), + SampleSum: proto.Float64(2.0), + Quantile: []*dto.Quantile{ + { + Quantile: proto.Float64(0.01), + Value: proto.Float64(2), + }, + }, + }, + }, + }, + }, + }, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { From d0db03b8f3098ee8ba06c9c665800e40cd898fa9 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 4 Dec 2019 14:32:54 -0800 Subject: [PATCH 1383/1815] Add troubleshooting command for Windows to temp input --- plugins/inputs/temp/README.md | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/plugins/inputs/temp/README.md b/plugins/inputs/temp/README.md index 873a73285..8398d25ca 100644 --- a/plugins/inputs/temp/README.md +++ b/plugins/inputs/temp/README.md @@ -5,13 +5,14 @@ meant to be multi platform and uses platform specific collection methods. Currently supports Linux and Windows. -### Configuration: +### Configuration -``` +```toml [[inputs.temp]] + # no configuration ``` -### Metrics: +### Metrics - temp - tags: @@ -19,7 +20,16 @@ Currently supports Linux and Windows. - fields: - temp (float, celcius) -### Example Output: + +### Troubleshooting + +On **Windows**, the plugin uses a WMI call that is can be replicated with the +following command: +``` +wmic /namespace:\\root\wmi PATH MSAcpi_ThermalZoneTemperature +``` + +### Example Output ``` temp,sensor=coretemp_physicalid0_crit temp=100 1531298763000000000 From 1f5be2bac7b895867411406b2eeac892b9ff9258 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 4 Dec 2019 17:16:00 -0800 Subject: [PATCH 1384/1815] Add minimum system requirements to readme --- README.md | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/README.md b/README.md index 73f4268bb..b34451df2 100644 --- a/README.md +++ b/README.md @@ -29,6 +29,16 @@ There are many ways to contribute: - Answer questions and discuss here on github and on the [Community Site](https://community.influxdata.com/) - [Contribute plugins](CONTRIBUTING.md) +## Minimum Requirements + +Telegraf shares the same [minimum requirements][] as Go: +- Linux kernel version 2.6.23 or later +- Windows 7 or later +- FreeBSD 11.2 or later +- MacOS 10.11 El Capitan or later + +[minimum requirements]: https://github.com/golang/go/wiki/MinimumRequirements#minimum-requirements + ## Installation: You can download the binaries directly from the [downloads](https://www.influxdata.com/downloads) page From e6c57e7df4f40d5569e900648d73d6c4e27959db Mon Sep 17 00:00:00 2001 From: Ross Lodge Date: Thu, 5 Dec 2019 16:38:51 -0800 Subject: [PATCH 1385/1815] Add page_faults for mongodb wired tiger (#6732) --- plugins/inputs/mongodb/mongodb_data.go | 1 + plugins/inputs/mongodb/mongodb_data_test.go | 3 +++ 2 files changed, 4 insertions(+) diff --git a/plugins/inputs/mongodb/mongodb_data.go b/plugins/inputs/mongodb/mongodb_data.go index 0c3695c61..279dbb138 100644 --- a/plugins/inputs/mongodb/mongodb_data.go +++ b/plugins/inputs/mongodb/mongodb_data.go @@ -248,6 +248,7 @@ func (d *MongodbData) AddDefaultStats() { d.add(key, floatVal) } d.addStat(statLine, WiredTigerExtStats) + d.add("page_faults", d.StatLine.FaultsCnt) } } diff --git a/plugins/inputs/mongodb/mongodb_data_test.go b/plugins/inputs/mongodb/mongodb_data_test.go index 2717dffd1..bbc882a26 100644 --- a/plugins/inputs/mongodb/mongodb_data_test.go +++ b/plugins/inputs/mongodb/mongodb_data_test.go @@ -100,6 +100,7 @@ func TestAddWiredTigerStats(t *testing.T) { PagesQueuedForEviction: 0, ServerEvictingPages: 0, WorkerThreadEvictingPages: 0, + FaultsCnt: 204, }, tags, ) @@ -116,6 +117,8 @@ func TestAddWiredTigerStats(t *testing.T) { for key := range WiredTigerExtStats { assert.True(t, acc.HasFloatField("mongodb", key) || acc.HasInt64Field("mongodb", key), key) } + + assert.True(t, acc.HasInt64Field("mongodb", "page_faults")) } func TestAddShardStats(t *testing.T) { From b54af02a9a5949193f192cd772fae4a8b397b840 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 5 Dec 2019 17:18:09 -0800 Subject: [PATCH 1386/1815] Update changelog --- CHANGELOG.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index d82e3cb4b..4ac5458a5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,9 @@ +## v1.14 [unreleased] + +#### Features + +- [#6730](https://github.com/influxdata/telegraf/pull/6730): Add page_faults for mongodb wired tiger. + ## v1.13 [unreleased] #### Release Notes From 3e46768578c45b3eaebf1488ca60001b05f5a74b Mon Sep 17 00:00:00 2001 From: likerj Date: Sat, 7 Dec 2019 08:43:39 +0800 Subject: [PATCH 1387/1815] Update supported environment variables (#6747) --- docs/FAQ.md | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/docs/FAQ.md b/docs/FAQ.md index 1d1c490aa..f4d81ec7c 100644 --- a/docs/FAQ.md +++ b/docs/FAQ.md @@ -6,14 +6,17 @@ You will need to setup several volume mounts as well as some environment variables: ``` docker run --name telegraf - -v /:/hostfs:ro - -v /etc:/hostfs/etc:ro + -v /:/hostfs:ro + -v /etc:/hostfs/etc:ro -v /proc:/hostfs/proc:ro -v /sys:/hostfs/sys:ro - -v /var/run/utmp:/var/run/utmp:ro + -v /var:/hostfs/var:ro + -v /run:/hostfs/run:ro -e HOST_ETC=/hostfs/etc -e HOST_PROC=/hostfs/proc -e HOST_SYS=/hostfs/sys + -e HOST_VAR=/hostfs/var + -e HOST_RUN=/hostfs/run -e HOST_MOUNT_PREFIX=/hostfs telegraf ``` From 613d0dbd162c154c8d20394b9913757e71c701d3 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 6 Dec 2019 17:10:59 -0800 Subject: [PATCH 1388/1815] Build packages with Go 1.13.5 (#6767) --- .circleci/config.yml | 4 ++-- CHANGELOG.md | 2 +- Makefile | 8 ++++---- appveyor.yml | 4 ++-- scripts/ci-1.12.docker | 2 +- scripts/ci-1.13.docker | 2 +- 6 files changed, 11 insertions(+), 11 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index a32bd77a4..e070c2957 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -6,10 +6,10 @@ defaults: GOFLAGS: -p=8 go-1_12: &go-1_12 docker: - - image: 'quay.io/influxdb/telegraf-ci:1.12.12' + - image: 'quay.io/influxdb/telegraf-ci:1.12.14' go-1_13: &go-1_13 docker: - - image: 'quay.io/influxdb/telegraf-ci:1.13.3' + - image: 'quay.io/influxdb/telegraf-ci:1.13.5' version: 2 jobs: diff --git a/CHANGELOG.md b/CHANGELOG.md index 4ac5458a5..e52acf6d3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,7 +8,7 @@ #### Release Notes -- Official packages built with Go 1.13.3. +- Official packages built with Go 1.13.5. - The `prometheus` input and `prometheus_client` output have a new mapping to and from Telegraf metrics, which can be enabled by setting `metric_version = 2`. The original mapping is deprecated. When both plugins have the same setting, diff --git a/Makefile b/Makefile index aeae48e4c..9202cc1f4 100644 --- a/Makefile +++ b/Makefile @@ -131,10 +131,10 @@ plugin-%: .PHONY: ci-1.13 ci-1.13: - docker build -t quay.io/influxdb/telegraf-ci:1.13.3 - < scripts/ci-1.13.docker - docker push quay.io/influxdb/telegraf-ci:1.13.3 + docker build -t quay.io/influxdb/telegraf-ci:1.13.5 - < scripts/ci-1.13.docker + docker push quay.io/influxdb/telegraf-ci:1.13.5 .PHONY: ci-1.12 ci-1.12: - docker build -t quay.io/influxdb/telegraf-ci:1.12.12 - < scripts/ci-1.12.docker - docker push quay.io/influxdb/telegraf-ci:1.12.12 + docker build -t quay.io/influxdb/telegraf-ci:1.12.14 - < scripts/ci-1.12.docker + docker push quay.io/influxdb/telegraf-ci:1.12.14 diff --git a/appveyor.yml b/appveyor.yml index fba80d46f..66d17b0f4 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -13,11 +13,11 @@ platform: x64 install: - IF NOT EXIST "C:\Cache" mkdir C:\Cache - - IF NOT EXIST "C:\Cache\go1.13.3.msi" curl -o "C:\Cache\go1.13.3.msi" https://storage.googleapis.com/golang/go1.13.3.windows-amd64.msi + - IF NOT EXIST "C:\Cache\go1.13.5.msi" curl -o "C:\Cache\go1.13.5.msi" https://storage.googleapis.com/golang/go1.13.5.windows-amd64.msi - IF NOT EXIST "C:\Cache\gnuwin32-bin.zip" curl -o "C:\Cache\gnuwin32-bin.zip" https://dl.influxdata.com/telegraf/ci/make-3.81-bin.zip - IF NOT EXIST "C:\Cache\gnuwin32-dep.zip" curl -o "C:\Cache\gnuwin32-dep.zip" https://dl.influxdata.com/telegraf/ci/make-3.81-dep.zip - IF EXIST "C:\Go" rmdir /S /Q C:\Go - - msiexec.exe /i "C:\Cache\go1.13.3.msi" /quiet + - msiexec.exe /i "C:\Cache\go1.13.5.msi" /quiet - 7z x "C:\Cache\gnuwin32-bin.zip" -oC:\GnuWin32 -y - 7z x "C:\Cache\gnuwin32-dep.zip" -oC:\GnuWin32 -y - go get -d github.com/golang/dep diff --git a/scripts/ci-1.12.docker b/scripts/ci-1.12.docker index f60f49a43..e68618dbc 100644 --- a/scripts/ci-1.12.docker +++ b/scripts/ci-1.12.docker @@ -1,4 +1,4 @@ -FROM golang:1.12.12 +FROM golang:1.12.14 RUN chmod -R 755 "$GOPATH" diff --git a/scripts/ci-1.13.docker b/scripts/ci-1.13.docker index c3c9792d2..ad71addb9 100644 --- a/scripts/ci-1.13.docker +++ b/scripts/ci-1.13.docker @@ -1,4 +1,4 @@ -FROM golang:1.13.3 +FROM golang:1.13.5 RUN chmod -R 755 "$GOPATH" From f0b0295e3c1000335fe446935ab874b202bd3698 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 6 Dec 2019 17:22:11 -0800 Subject: [PATCH 1389/1815] Override github.com/satori/go.uuid revision for transitive deps (#6768) While there has been a workaround in place for some time, this change is being made to reduce confusion around if Telegraf is affected by https://github.com/satori/go.uuid/issues/73 --- Gopkg.lock | 7 +++---- Gopkg.toml | 6 +++++- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/Gopkg.lock b/Gopkg.lock index 00116a7b5..3fabcfb77 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -81,7 +81,6 @@ version = "v13.3.0" [[projects]] - branch = "master" digest = "1:298712a3ee36b59c3ca91f4183bd75d174d5eaa8b4aed5072831f126e2e752f6" name = "github.com/Microsoft/ApplicationInsights-Go" packages = [ @@ -90,6 +89,7 @@ ] pruneopts = "" revision = "d2df5d440eda5372f24fcac03839a64d6cb5f7e5" + version = "v0.4.2" [[projects]] digest = "1:45ec6eb579713a01991ad07f538fed3b576ee55f5ce9f248320152a9270d9258" @@ -1103,12 +1103,11 @@ revision = "c4fab1ac1bec58281ad0667dc3f0907a9476ac47" [[projects]] - digest = "1:7f569d906bdd20d906b606415b7d794f798f91a62fcfb6a4daa6d50690fb7a3f" + digest = "1:47081c00d00c1dfc9a530c2556e78be391a5c24db1043efe6d406af882a169a1" name = "github.com/satori/go.uuid" packages = ["."] pruneopts = "" - revision = "f58768cc1a7a7e77a3bd49e98cdd21419399b6a3" - version = "v1.2.0" + revision = "b2ce2384e17bbe0c6d34077efa39dbab3e09123b" [[projects]] digest = "1:9024df427b3c8a80a0c4b34e535e5e1ae922c7174e3242b6c7f30ffb3b9f715e" diff --git a/Gopkg.toml b/Gopkg.toml index 7ecfae425..5b0a2dba4 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -100,7 +100,7 @@ [[constraint]] name = "github.com/Microsoft/ApplicationInsights-Go" - branch = "master" + version = "0.4.2" [[constraint]] name = "github.com/miekg/dns" @@ -304,3 +304,7 @@ [[constraint]] name = "github.com/safchain/ethtool" revision = "42ed695e3de80b9d695f280295fd7994639f209d" + +[[override]] + name = "github.com/satori/go.uuid" + revision = "b2ce2384e17bbe0c6d34077efa39dbab3e09123b" From faca80fd000d0118e2ec38825a4bd3be00ec9a62 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 9 Dec 2019 12:15:27 -0800 Subject: [PATCH 1390/1815] Fix unix socket dial arguments in uwsgi input (#6769) --- plugins/inputs/uwsgi/uwsgi.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/inputs/uwsgi/uwsgi.go b/plugins/inputs/uwsgi/uwsgi.go index 15a9bbe22..a20f3b2bf 100644 --- a/plugins/inputs/uwsgi/uwsgi.go +++ b/plugins/inputs/uwsgi/uwsgi.go @@ -91,13 +91,13 @@ func (u *Uwsgi) gatherServer(acc telegraf.Accumulator, url *url.URL) error { } s.source = url.Host case "unix": - r, err = net.DialTimeout(url.Scheme, url.Host, u.Timeout.Duration) + r, err = net.DialTimeout(url.Scheme, url.Path, u.Timeout.Duration) if err != nil { return err } s.source, err = os.Hostname() if err != nil { - s.source = url.Host + s.source = "" } case "http": resp, err := u.client.Get(url.String()) From 5034af7af2d475a2952364c7a17cb4cc5f541f4e Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 9 Dec 2019 12:24:11 -0800 Subject: [PATCH 1391/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index e52acf6d3..00f7b0dd7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -84,6 +84,7 @@ - [#6705](https://github.com/influxdata/telegraf/issues/6705): Remove trailing underscore trimming from json flattener. - [#6421](https://github.com/influxdata/telegraf/issues/6421): Revert change causing cpu usage to be capped at 100 percent. - [#6523](https://github.com/influxdata/telegraf/issues/6523): Accept any media type in the prometheus input. +- [#6769](https://github.com/influxdata/telegraf/issues/6769): Fix unix socket dial arguments in uwsgi input. ## v1.12.6 [2019-11-19] From eb00f41905999b74557545708b6cdcd7189ccf43 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 10 Dec 2019 12:58:59 -0800 Subject: [PATCH 1392/1815] Use actual database name in db creation failed log (#6780) --- plugins/outputs/influxdb/influxdb.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/outputs/influxdb/influxdb.go b/plugins/outputs/influxdb/influxdb.go index 50161e832..be462ba03 100644 --- a/plugins/outputs/influxdb/influxdb.go +++ b/plugins/outputs/influxdb/influxdb.go @@ -278,7 +278,7 @@ func (i *InfluxDB) httpClient(ctx context.Context, url *url.URL, proxy *url.URL) err = c.CreateDatabase(ctx, c.Database()) if err != nil { i.Log.Warnf("When writing to [%s]: database %q creation failed: %v", - c.URL(), i.Database, err) + c.URL(), c.Database(), err) } } From aabc7e7d4f04171cd39fdd945461ae53728d7402 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 10 Dec 2019 12:59:16 -0800 Subject: [PATCH 1393/1815] Replace colon chars in prometheus output labels with metric_version=1 (#6781) --- .../prometheus_client_v1_test.go | 28 +++++++++++++++++++ .../outputs/prometheus_client/v1/collector.go | 13 +++++---- .../serializers/prometheus/prometheus_test.go | 22 +++++++++++++++ 3 files changed, 57 insertions(+), 6 deletions(-) diff --git a/plugins/outputs/prometheus_client/prometheus_client_v1_test.go b/plugins/outputs/prometheus_client/prometheus_client_v1_test.go index 6a9770fdc..adf18c9f0 100644 --- a/plugins/outputs/prometheus_client/prometheus_client_v1_test.go +++ b/plugins/outputs/prometheus_client/prometheus_client_v1_test.go @@ -103,6 +103,34 @@ cpu_time_idle{host="example.org"} 42 # HELP cpu_time_idle Telegraf collected metric # TYPE cpu_time_idle counter cpu_time_idle{host="example.org"} 42 +`), + }, + { + name: "replace characters when using string as label", + output: &PrometheusClient{ + Listen: ":0", + MetricVersion: 1, + CollectorsExclude: []string{"gocollector", "process"}, + Path: "/metrics", + StringAsLabel: true, + Log: Logger, + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu_time_idle", + map[string]string{}, + map[string]interface{}{ + "host:name": "example.org", + "counter": 42.0, + }, + time.Unix(0, 0), + telegraf.Counter, + ), + }, + expected: []byte(` +# HELP cpu_time_idle Telegraf collected metric +# TYPE cpu_time_idle counter +cpu_time_idle{host_name="example.org"} 42 `), }, { diff --git a/plugins/outputs/prometheus_client/v1/collector.go b/plugins/outputs/prometheus_client/v1/collector.go index 72b09be08..7932bbc59 100644 --- a/plugins/outputs/prometheus_client/v1/collector.go +++ b/plugins/outputs/prometheus_client/v1/collector.go @@ -10,6 +10,7 @@ import ( "time" "github.com/influxdata/telegraf" + serializer "github.com/influxdata/telegraf/plugins/serializers/prometheus" "github.com/prometheus/client_golang/prometheus" ) @@ -201,11 +202,11 @@ func (c *Collector) Add(metrics []telegraf.Metric) error { labels := make(map[string]string) for k, v := range tags { - tName := sanitize(k) - if !isValidTagName(tName) { + name, ok := serializer.SanitizeLabelName(k) + if !ok { continue } - labels[tName] = v + labels[name] = v } // Prometheus doesn't have a string value type, so convert string @@ -214,11 +215,11 @@ func (c *Collector) Add(metrics []telegraf.Metric) error { for fn, fv := range point.Fields() { switch fv := fv.(type) { case string: - tName := sanitize(fn) - if !isValidTagName(tName) { + name, ok := serializer.SanitizeLabelName(fn) + if !ok { continue } - labels[tName] = fv + labels[name] = fv } } } diff --git a/plugins/serializers/prometheus/prometheus_test.go b/plugins/serializers/prometheus/prometheus_test.go index 632ca148e..ff082f7b2 100644 --- a/plugins/serializers/prometheus/prometheus_test.go +++ b/plugins/serializers/prometheus/prometheus_test.go @@ -550,6 +550,28 @@ cpu_time_idle{cpu="cpu0"} 42 # HELP cpu_time_idle Telegraf collected metric # TYPE cpu_time_idle untyped cpu_time_idle{cpu="cpu0"} 42 +`), + }, + { + name: "replace characters when using string as label", + config: FormatConfig{ + StringHandling: StringAsLabel, + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "host:name": "example.org", + "time_idle": 42.0, + }, + time.Unix(1574279268, 0), + ), + }, + expected: []byte(` +# HELP cpu_time_idle Telegraf collected metric +# TYPE cpu_time_idle untyped +cpu_time_idle{host_name="example.org"} 42 `), }, { From 7cfde0cf4d59c3e44a9fe6baf5933b7b2dacda63 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 10 Dec 2019 13:01:53 -0800 Subject: [PATCH 1394/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 00f7b0dd7..d2750fa03 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -85,6 +85,7 @@ - [#6421](https://github.com/influxdata/telegraf/issues/6421): Revert change causing cpu usage to be capped at 100 percent. - [#6523](https://github.com/influxdata/telegraf/issues/6523): Accept any media type in the prometheus input. - [#6769](https://github.com/influxdata/telegraf/issues/6769): Fix unix socket dial arguments in uwsgi input. +- [#6757](https://github.com/influxdata/telegraf/issues/6757): Replace colon chars in prometheus output labels with metric_version=1. ## v1.12.6 [2019-11-19] From 05cefe61bd6bd183369dc36c5cc649083c6570cb Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 10 Dec 2019 13:13:03 -0800 Subject: [PATCH 1395/1815] Document --service-display-name flag for Windows service --- docs/WINDOWS_SERVICE.md | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/docs/WINDOWS_SERVICE.md b/docs/WINDOWS_SERVICE.md index 5b630076c..b0b6ee5ad 100644 --- a/docs/WINDOWS_SERVICE.md +++ b/docs/WINDOWS_SERVICE.md @@ -48,18 +48,21 @@ Telegraf can manage its own service through the --service flag: ## Install multiple services -You can install multiple telegraf instances with --service-name flag: +Running multiple instances of Telegraf is seldom needed, as you can run +multiple instances of each plugin and route metric flow using the metric +filtering options. However, if you do need to run multiple telegraf instances +on a single system, you can install the service with the `--service-name` and +`--service-display-name` flags to give the services unique names: ``` - > C:\"Program Files"\Telegraf\telegraf.exe --service install --service-name telegraf-1 - > C:\"Program Files"\Telegraf\telegraf.exe --service install --service-name telegraf-2 - > C:\"Program Files"\Telegraf\telegraf.exe --service uninstall --service-name telegraf-1 +> C:\"Program Files"\Telegraf\telegraf.exe --service install --service-name telegraf-1 --service-display-name "Telegraf 1" +> C:\"Program Files"\Telegraf\telegraf.exe --service install --service-name telegraf-2 --service-display-name "Telegraf 2" ``` ## Troubleshooting When Telegraf runs as a Windows service, Telegraf logs messages to Windows events log before configuration file with logging settings is loaded. -Check event log for an error reported by `telegraf` service in case of Telegraf service reports failure on its start: Event Viewer->Windows Logs->Application +Check event log for an error reported by `telegraf` service in case of Telegraf service reports failure on its start: Event Viewer->Windows Logs->Application **Troubleshooting common error #1067** From 61fbc68279cbff44a08e920b66ed75c8a9bde683 Mon Sep 17 00:00:00 2001 From: reimda Date: Wed, 11 Dec 2019 11:23:51 -0700 Subject: [PATCH 1396/1815] Add documentation about listening on port < 1024 (#6785) --- plugins/inputs/snmp_trap/README.md | 31 ++++++++++++++++++++++++++- plugins/inputs/snmp_trap/snmp_trap.go | 6 +++++- 2 files changed, 35 insertions(+), 2 deletions(-) diff --git a/plugins/inputs/snmp_trap/README.md b/plugins/inputs/snmp_trap/README.md index ec3c7ba4c..8c1a2c132 100644 --- a/plugins/inputs/snmp_trap/README.md +++ b/plugins/inputs/snmp_trap/README.md @@ -17,7 +17,11 @@ the SNMP [README.md](../snmp/README.md) for details. ## Transport, local address, and port to listen on. Transport must ## be "udp://". Omit local address to listen on all interfaces. ## example: "udp://127.0.0.1:1234" - # service_address = udp://:162 + ## + ## Special permissions may be required to listen on a port less than + ## 1024. See README.md for details + ## + # service_address = "udp://:162" ## Timeout running snmptranslate command # timeout = "5s" ``` @@ -41,3 +45,28 @@ the SNMP [README.md](../snmp/README.md) for details. snmp_trap,mib=SNMPv2-MIB,name=coldStart,oid=.1.3.6.1.6.3.1.1.5.1,source=192.168.122.102,version=2c snmpTrapEnterprise.0="linux",sysUpTimeInstance=1i 1574109187723429814 snmp_trap,mib=NET-SNMP-AGENT-MIB,name=nsNotifyShutdown,oid=.1.3.6.1.4.1.8072.4.0.2,source=192.168.122.102,version=2c sysUpTimeInstance=5803i,snmpTrapEnterprise.0="netSnmpNotificationPrefix" 1574109186555115459 ``` + +### Using a Privileged Port + +On many operating systems, listening on a privileged port (a port +number less than 1024) requires extra permission. Since the default +SNMP trap port 162 is in this category, using telegraf to receive SNMP +traps may need extra permission. + +Instructions for listening on a privileged port vary by operating +system. It is not recommended to run telegraf as superuser in order to +use a privileged port. Instead follow the principle of least privilege +and use a more specific operating system mechanism to allow telegraf to +use the port. You may also be able to have telegraf use an +unprivileged port and then configure a firewall port forward rule from +the privileged port. + +To use a privileged port on Linux, you can use setcap to enable the +CAP_NET_BIND_SERVICE capability on the telegraf binary: + +``` +setcap cap_net_bind_service=+ep /usr/bin/telegraf +``` + +On Mac OS, listening on privileged ports is unrestricted on versions +10.14 and later. diff --git a/plugins/inputs/snmp_trap/snmp_trap.go b/plugins/inputs/snmp_trap/snmp_trap.go index 4b9ce4a56..7163a853e 100644 --- a/plugins/inputs/snmp_trap/snmp_trap.go +++ b/plugins/inputs/snmp_trap/snmp_trap.go @@ -50,7 +50,11 @@ var sampleConfig = ` ## Transport, local address, and port to listen on. Transport must ## be "udp://". Omit local address to listen on all interfaces. ## example: "udp://127.0.0.1:1234" - # service_address = udp://:162 + ## + ## Special permissions may be required to listen on a port less than + ## 1024. See README.md for details + ## + # service_address = "udp://:162" ## Timeout running snmptranslate command # timeout = "5s" ` From 98585a1853c8f75bc5a9b6c018e6d8565a2ae055 Mon Sep 17 00:00:00 2001 From: Daniel Speichert Date: Wed, 11 Dec 2019 14:25:35 -0500 Subject: [PATCH 1397/1815] Set TrimLeadingSpace when TrimSpace is on in csv parser (#6773) --- plugins/parsers/csv/parser.go | 1 + plugins/parsers/csv/parser_test.go | 24 ++++++++++++++++++++++++ 2 files changed, 25 insertions(+) diff --git a/plugins/parsers/csv/parser.go b/plugins/parsers/csv/parser.go index 861844488..b59ea9799 100644 --- a/plugins/parsers/csv/parser.go +++ b/plugins/parsers/csv/parser.go @@ -45,6 +45,7 @@ func (p *Parser) compile(r *bytes.Reader) (*csv.Reader, error) { if p.Comment != "" { csvReader.Comment = []rune(p.Comment)[0] } + csvReader.TrimLeadingSpace = p.TrimSpace return csvReader, nil } diff --git a/plugins/parsers/csv/parser_test.go b/plugins/parsers/csv/parser_test.go index 6a10c0834..1b6fb8f3b 100644 --- a/plugins/parsers/csv/parser_test.go +++ b/plugins/parsers/csv/parser_test.go @@ -243,6 +243,30 @@ func TestTrimSpace(t *testing.T) { require.Equal(t, expectedFields, metrics[0].Fields()) } +func TestTrimSpaceDelimetedBySpace(t *testing.T) { + p := Parser{ + Delimiter: " ", + HeaderRowCount: 1, + TrimSpace: true, + TimeFunc: DefaultTime, + } + testCSV := ` first second third fourth +abcdefgh 0 2 false + abcdef 3.3 4 true + f 0 2 false` + + expectedFields := map[string]interface{}{ + "first": "abcdef", + "second": 3.3, + "third": int64(4), + "fourth": true, + } + + metrics, err := p.Parse([]byte(testCSV)) + require.NoError(t, err) + require.Equal(t, expectedFields, metrics[1].Fields()) +} + func TestSkipRows(t *testing.T) { p := Parser{ HeaderRowCount: 1, From 4def7cc5e17d1347c4cdce22a24b5c3e67c602ca Mon Sep 17 00:00:00 2001 From: Ben Hymans <6125803+benhymans@users.noreply.github.com> Date: Wed, 11 Dec 2019 13:42:54 -0600 Subject: [PATCH 1398/1815] Add option to control collecting global variables to mysql input (#6790) --- plugins/inputs/mysql/README.md | 3 +++ plugins/inputs/mysql/mysql.go | 22 +++++++++++++++------- 2 files changed, 18 insertions(+), 7 deletions(-) diff --git a/plugins/inputs/mysql/README.md b/plugins/inputs/mysql/README.md index af00da03d..3e07229da 100644 --- a/plugins/inputs/mysql/README.md +++ b/plugins/inputs/mysql/README.md @@ -69,6 +69,9 @@ This plugin gathers the statistic data from MySQL server ## gather metrics from SHOW BINARY LOGS command output # gather_binary_logs = false + ## gather metrics from SHOW GLOBAL VARIABLES command output + # gather_global_variables = true + ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE # gather_table_io_waits = false diff --git a/plugins/inputs/mysql/mysql.go b/plugins/inputs/mysql/mysql.go index 3ca955beb..a2dc56505 100644 --- a/plugins/inputs/mysql/mysql.go +++ b/plugins/inputs/mysql/mysql.go @@ -36,6 +36,7 @@ type Mysql struct { GatherTableSchema bool `toml:"gather_table_schema"` GatherFileEventsStats bool `toml:"gather_file_events_stats"` GatherPerfEventsStatements bool `toml:"gather_perf_events_statements"` + GatherGlobalVars bool `toml:"gather_global_variables"` IntervalSlow string `toml:"interval_slow"` MetricVersion int `toml:"metric_version"` @@ -94,6 +95,9 @@ const sampleConfig = ` ## gather metrics from SHOW BINARY LOGS command output # gather_binary_logs = false + ## gather metrics from PERFORMANCE_SCHEMA.GLOBAL_VARIABLES + # gather_global_variables = true + ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE # gather_table_io_waits = false @@ -134,6 +138,7 @@ const ( defaultPerfEventsStatementsDigestTextLimit = 120 defaultPerfEventsStatementsLimit = 250 defaultPerfEventsStatementsTimeLimit = 86400 + defaultGatherGlobalVars = true ) func (m *Mysql) SampleConfig() string { @@ -431,14 +436,16 @@ func (m *Mysql) gatherServer(serv string, acc telegraf.Accumulator) error { return err } - // Global Variables may be gathered less often - if len(m.IntervalSlow) > 0 { - if uint32(time.Since(m.lastT).Seconds()) >= m.scanIntervalSlow { - err = m.gatherGlobalVariables(db, serv, acc) - if err != nil { - return err + if m.GatherGlobalVars { + // Global Variables may be gathered less often + if len(m.IntervalSlow) > 0 { + if uint32(time.Since(m.lastT).Seconds()) >= m.scanIntervalSlow { + err = m.gatherGlobalVariables(db, serv, acc) + if err != nil { + return err + } + m.lastT = time.Now() } - m.lastT = time.Now() } } @@ -1767,6 +1774,7 @@ func init() { PerfEventsStatementsDigestTextLimit: defaultPerfEventsStatementsDigestTextLimit, PerfEventsStatementsLimit: defaultPerfEventsStatementsLimit, PerfEventsStatementsTimeLimit: defaultPerfEventsStatementsTimeLimit, + GatherGlobalVars: defaultGatherGlobalVars, } }) } From 7cc3507f222b63c6313ca4f94f445b23c1727dbc Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 11 Dec 2019 11:27:00 -0800 Subject: [PATCH 1399/1815] Update changelog --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index d2750fa03..eef3e4f93 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -72,6 +72,7 @@ - [#6735](https://github.com/influxdata/telegraf/pull/6735): Support resolution of symlinks in filecount input. - [#6746](https://github.com/influxdata/telegraf/pull/6746): Set message timestamp to the metric time in kafka output. - [#6740](https://github.com/influxdata/telegraf/pull/6740): Add base64decode operation to string processor. +- [#6790](https://github.com/influxdata/telegraf/pull/6790): Add option to control collecting global variables to mysql input. #### Bugfixes @@ -86,6 +87,7 @@ - [#6523](https://github.com/influxdata/telegraf/issues/6523): Accept any media type in the prometheus input. - [#6769](https://github.com/influxdata/telegraf/issues/6769): Fix unix socket dial arguments in uwsgi input. - [#6757](https://github.com/influxdata/telegraf/issues/6757): Replace colon chars in prometheus output labels with metric_version=1. +- [#6773](https://github.com/influxdata/telegraf/issues/6773): Set TrimLeadingSpace when TrimSpace is on in csv parser. ## v1.12.6 [2019-11-19] From cae701c54bb805d2e715ae97c97f5752923f51aa Mon Sep 17 00:00:00 2001 From: reimda Date: Wed, 11 Dec 2019 15:29:18 -0700 Subject: [PATCH 1400/1815] Interpret SNMP v1 traps as described in RFC 2576 3.1 (#6793) --- plugins/inputs/snmp_trap/snmp_trap.go | 38 ++++++- plugins/inputs/snmp_trap/snmp_trap_test.go | 112 +++++++++++++++++++++ 2 files changed, 147 insertions(+), 3 deletions(-) diff --git a/plugins/inputs/snmp_trap/snmp_trap.go b/plugins/inputs/snmp_trap/snmp_trap.go index 7163a853e..03f6a3a29 100644 --- a/plugins/inputs/snmp_trap/snmp_trap.go +++ b/plugins/inputs/snmp_trap/snmp_trap.go @@ -6,6 +6,7 @@ import ( "fmt" "net" "os/exec" + "strconv" "strings" "sync" "time" @@ -150,6 +151,12 @@ func (s *SnmpTrap) Stop() { } } +func setTrapOid(tags map[string]string, oid string, e mibEntry) { + tags["oid"] = oid + tags["name"] = e.oidText + tags["mib"] = e.mibName +} + func makeTrapHandler(s *SnmpTrap) handler { return func(packet *gosnmp.SnmpPacket, addr *net.UDPAddr) { tm := s.timeFunc() @@ -159,6 +166,33 @@ func makeTrapHandler(s *SnmpTrap) handler { tags["version"] = packet.Version.String() tags["source"] = addr.IP.String() + if packet.Version == gosnmp.Version1 { + // Follow the procedure described in RFC 2576 3.1 to + // translate a v1 trap to v2. + var trapOid string + + if packet.GenericTrap > 0 && packet.GenericTrap < 6 { + trapOid = "1.3.6.1.6.3.1.1.5." + strconv.Itoa(packet.GenericTrap+1) + } else if packet.GenericTrap == 6 { + trapOid = packet.Enterprise + ".0." + strconv.Itoa(packet.SpecificTrap) + } + + if trapOid != "" { + e, err := s.lookup(trapOid) + if err != nil { + s.Log.Errorf("Error resolving V1 OID: %v", err) + return + } + setTrapOid(tags, trapOid, e) + } + + if packet.AgentAddress != "" { + tags["agent_address"] = packet.AgentAddress + } + + fields["sysUpTimeInstance"] = packet.Timestamp + } + for _, v := range packet.Variables { // Use system mibs to resolve oids. Don't fall back to // numeric oid because it's not useful enough to the end @@ -193,9 +227,7 @@ func makeTrapHandler(s *SnmpTrap) handler { // 1.3.6.1.6.3.1.1.4.1.0 is SNMPv2-MIB::snmpTrapOID.0. // If v.Name is this oid, set a tag of the trap name. if v.Name == ".1.3.6.1.6.3.1.1.4.1.0" { - tags["oid"] = val - tags["name"] = e.oidText - tags["mib"] = e.mibName + setTrapOid(tags, val, e) continue } default: diff --git a/plugins/inputs/snmp_trap/snmp_trap_test.go b/plugins/inputs/snmp_trap/snmp_trap_test.go index ed31786d8..68121b0c8 100644 --- a/plugins/inputs/snmp_trap/snmp_trap_test.go +++ b/plugins/inputs/snmp_trap/snmp_trap_test.go @@ -220,3 +220,115 @@ func TestMissingOid(t *testing.T) { expected, acc.GetTelegrafMetrics(), testutil.SortMetrics()) } + +func sendV1Trap(t *testing.T, port uint16) (sentTimestamp uint) { + s := &gosnmp.GoSNMP{ + Port: port, + Community: "public", + Version: gosnmp.Version1, + Timeout: time.Duration(2) * time.Second, + Retries: 3, + MaxOids: gosnmp.MaxOids, + Target: "127.0.0.1", + } + + err := s.Connect() + if err != nil { + t.Fatalf("Connect() err: %v", err) + } + defer s.Conn.Close() + + now := uint(time.Now().Unix()) + + pdu := gosnmp.SnmpPDU{ + Name: ".1.2.3.4.5", + Type: gosnmp.OctetString, + Value: "payload", + } + + trap := gosnmp.SnmpTrap{ + Variables: []gosnmp.SnmpPDU{pdu}, + Enterprise: ".1.2.3", + AgentAddress: "10.20.30.40", + GenericTrap: 6, // enterpriseSpecific + SpecificTrap: 55, + Timestamp: now, + } + + _, err = s.SendTrap(trap) + if err != nil { + t.Fatalf("SendTrap() err: %v", err) + } + + return now +} + +func TestReceiveV1Trap(t *testing.T) { + const port = 12399 + var fakeTime = time.Now() + + received := make(chan int) + wrap := func(f handler) handler { + return func(p *gosnmp.SnmpPacket, a *net.UDPAddr) { + f(p, a) + received <- 0 + } + } + + s := &SnmpTrap{ + ServiceAddress: "udp://:" + strconv.Itoa(port), + makeHandlerWrapper: wrap, + timeFunc: func() time.Time { + return fakeTime + }, + Log: testutil.Logger{}, + } + require.Nil(t, s.Init()) + var acc testutil.Accumulator + require.Nil(t, s.Start(&acc)) + defer s.Stop() + + defer s.clear() + s.load(".1.2.3.4.5", + mibEntry{ + "valueMIB", + "valueOID", + }) + s.load(".1.2.3.0.55", + mibEntry{ + "enterpriseMIB", + "enterpriseOID", + }) + + sentTimestamp := sendV1Trap(t, port) + + select { + case <-received: + case <-time.After(2 * time.Second): + t.Fatal("timed out waiting for trap to be received") + } + + expected := []telegraf.Metric{ + testutil.MustMetric( + "snmp_trap", // name + map[string]string{ // tags + "oid": ".1.2.3.0.55", + "name": "enterpriseOID", + "mib": "enterpriseMIB", + "version": "1", + "source": "127.0.0.1", + "agent_address": "10.20.30.40", + }, + map[string]interface{}{ // fields + "sysUpTimeInstance": sentTimestamp, + "valueOID": "payload", + }, + fakeTime, + ), + } + + testutil.RequireMetricsEqual(t, + expected, acc.GetTelegrafMetrics(), + testutil.SortMetrics()) + +} From a7a639f6a3fb25d66e971306ce8abe6af33984ac Mon Sep 17 00:00:00 2001 From: reimda Date: Thu, 12 Dec 2019 11:54:44 -0700 Subject: [PATCH 1401/1815] Fix off by one bug in snmp trap v1 generic trap field (#6797) --- plugins/inputs/snmp_trap/snmp_trap.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/inputs/snmp_trap/snmp_trap.go b/plugins/inputs/snmp_trap/snmp_trap.go index 03f6a3a29..a80276264 100644 --- a/plugins/inputs/snmp_trap/snmp_trap.go +++ b/plugins/inputs/snmp_trap/snmp_trap.go @@ -171,7 +171,7 @@ func makeTrapHandler(s *SnmpTrap) handler { // translate a v1 trap to v2. var trapOid string - if packet.GenericTrap > 0 && packet.GenericTrap < 6 { + if packet.GenericTrap >= 0 && packet.GenericTrap < 6 { trapOid = "1.3.6.1.6.3.1.1.5." + strconv.Itoa(packet.GenericTrap+1) } else if packet.GenericTrap == 6 { trapOid = packet.Enterprise + ".0." + strconv.Itoa(packet.SpecificTrap) From d6f2857c2b849fd982f602765e5a28bbd758b29f Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 12 Dec 2019 11:05:31 -0800 Subject: [PATCH 1402/1815] Update sample config --- etc/telegraf.conf | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/etc/telegraf.conf b/etc/telegraf.conf index c807c01c7..dbafd2f83 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -1516,6 +1516,10 @@ # # [[processors.strings.left]] # # field = "message" # # width = 10 +# +# ## Decode a base64 encoded utf-8 string +# # [[processors.strings.base64decode]] +# # field = "message" # # Restricts the number of tags that can pass through this filter and chooses which tags to preserve when over the limit. @@ -3469,6 +3473,9 @@ # ## gather metrics from SHOW BINARY LOGS command output # # gather_binary_logs = false # +# ## gather metrics from PERFORMANCE_SCHEMA.GLOBAL_VARIABLES +# # gather_global_variables = true +# # ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE # # gather_table_io_waits = false # @@ -5741,7 +5748,11 @@ # ## Transport, local address, and port to listen on. Transport must # ## be "udp://". Omit local address to listen on all interfaces. # ## example: "udp://127.0.0.1:1234" -# # service_address = udp://:162 +# ## +# ## Special permissions may be required to listen on a port less than +# ## 1024. See README.md for details +# ## +# # service_address = "udp://:162" # ## Timeout running snmptranslate command # # timeout = "5s" From dde70118c081caa394876e747ae6d7d9f9c2ece6 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 12 Dec 2019 11:09:40 -0800 Subject: [PATCH 1403/1815] Set 1.13.0 release date --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index eef3e4f93..0e801d73c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,7 +4,7 @@ - [#6730](https://github.com/influxdata/telegraf/pull/6730): Add page_faults for mongodb wired tiger. -## v1.13 [unreleased] +## v1.13 [2019-12-12] #### Release Notes From 1aee98f06473e1912062ffe0d18aa3fba5332c71 Mon Sep 17 00:00:00 2001 From: Ram Gopinathan Date: Thu, 12 Dec 2019 12:38:00 -0800 Subject: [PATCH 1404/1815] Add missing basic auth credentials to haproxy readme (#6796) --- plugins/inputs/haproxy/README.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/plugins/inputs/haproxy/README.md b/plugins/inputs/haproxy/README.md index 35b59524d..86fbb986b 100644 --- a/plugins/inputs/haproxy/README.md +++ b/plugins/inputs/haproxy/README.md @@ -15,6 +15,10 @@ or [HTTP statistics page](https://cbonte.github.io/haproxy-dconv/1.9/management. ## Make sure you specify the complete path to the stats endpoint ## including the protocol, ie http://10.10.3.33:1936/haproxy?stats + ## Credentials for basic HTTP authentication + # username = "admin" + # password = "admin" + ## If no servers are specified, then default to 127.0.0.1:1936/haproxy?stats servers = ["http://myhaproxy.com:1936/haproxy?stats"] From 94fc769e0b88932e04a6d5cafa7770d63ac3f6a4 Mon Sep 17 00:00:00 2001 From: chuckbarkertech Date: Thu, 12 Dec 2019 15:16:41 -0600 Subject: [PATCH 1405/1815] Fix ServerProperty query stops working on Azure after failover (#6794) --- plugins/inputs/sqlserver/sqlserver.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/plugins/inputs/sqlserver/sqlserver.go b/plugins/inputs/sqlserver/sqlserver.go index c2c852749..03b0cfcfd 100644 --- a/plugins/inputs/sqlserver/sqlserver.go +++ b/plugins/inputs/sqlserver/sqlserver.go @@ -432,8 +432,9 @@ IF SERVERPROPERTY('EngineEdition') = 5 -- Azure SQL DB NULL AS available_storage_mb, -- Can we find out storage? NULL as uptime FROM sys.databases d - JOIN sys.database_service_objectives slo - ON d.database_id = slo.database_id + -- sys.databases.database_id may not match current DB_ID on Azure SQL DB + CROSS JOIN sys.database_service_objectives slo + WHERE d.name = DB_NAME() AND slo.database_id = DB_ID() ELSE BEGIN From debb5e4fa66a1db3024640f1010356a6d5f21a16 Mon Sep 17 00:00:00 2001 From: gescheit Date: Fri, 13 Dec 2019 00:56:28 +0300 Subject: [PATCH 1406/1815] Add use_sudo option to ipmi_sensor input (#6798) --- plugins/inputs/ipmi_sensor/README.md | 20 ++++++++++++++++++++ plugins/inputs/ipmi_sensor/ipmi.go | 14 +++++++++++++- 2 files changed, 33 insertions(+), 1 deletion(-) diff --git a/plugins/inputs/ipmi_sensor/README.md b/plugins/inputs/ipmi_sensor/README.md index fb2e8f26e..6c93bd15e 100644 --- a/plugins/inputs/ipmi_sensor/README.md +++ b/plugins/inputs/ipmi_sensor/README.md @@ -27,6 +27,11 @@ ipmitool -I lan -H SERVER -U USERID -P PASSW0RD sdr ## optionally specify the path to the ipmitool executable # path = "/usr/bin/ipmitool" ## + ## Setting 'use_sudo' to true will make use of sudo to run ipmitool. + ## Sudo must be configured to allow the telegraf user to run ipmitool + ## without a password. + # use_sudo = false + ## ## optionally force session privilege level. Can be CALLBACK, USER, OPERATOR, ADMINISTRATOR # privilege = "ADMINISTRATOR" ## @@ -86,6 +91,21 @@ ipmi device node. When using udev you can create the device node giving ``` KERNEL=="ipmi*", MODE="660", GROUP="telegraf" ``` +Alternatively, it is possible to use sudo. You will need the following in your telegraf config: +```toml +[[inputs.ipmi_sensor]] + use_sudo = true +``` + +You will also need to update your sudoers file: + +```bash +$ visudo +# Add the following line: +Cmnd_Alias IPMITOOL = /usr/bin/ipmitool * +telegraf ALL=(root) NOPASSWD: IPMITOOL +Defaults!IPMITOOL !logfile, !syslog, !pam_session +``` ### Example Output diff --git a/plugins/inputs/ipmi_sensor/ipmi.go b/plugins/inputs/ipmi_sensor/ipmi.go index 2ec51525b..9ac842b89 100644 --- a/plugins/inputs/ipmi_sensor/ipmi.go +++ b/plugins/inputs/ipmi_sensor/ipmi.go @@ -32,12 +32,18 @@ type Ipmi struct { Servers []string Timeout internal.Duration MetricVersion int + UseSudo bool } var sampleConfig = ` ## optionally specify the path to the ipmitool executable # path = "/usr/bin/ipmitool" ## + ## Setting 'use_sudo' to true will make use of sudo to run ipmitool. + ## Sudo must be configured to allow the telegraf user to run ipmitool + ## without a password. + # use_sudo = false + ## ## optionally force session privilege level. Can be CALLBACK, USER, OPERATOR, ADMINISTRATOR # privilege = "ADMINISTRATOR" ## @@ -112,7 +118,13 @@ func (m *Ipmi) parse(acc telegraf.Accumulator, server string) error { if m.MetricVersion == 2 { opts = append(opts, "elist") } - cmd := execCommand(m.Path, opts...) + name := m.Path + if m.UseSudo { + // -n - avoid prompting the user for input of any kind + opts = append([]string{"-n", name}, opts...) + name = "sudo" + } + cmd := execCommand(name, opts...) out, err := internal.CombinedOutputTimeout(cmd, m.Timeout.Duration) timestamp := time.Now() if err != nil { From 4fbba13622d0fa8ea6f61911c4a6d478e487037d Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 12 Dec 2019 13:57:16 -0800 Subject: [PATCH 1407/1815] Update changelog --- CHANGELOG.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0e801d73c..fb9469e2d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,13 @@ #### Features - [#6730](https://github.com/influxdata/telegraf/pull/6730): Add page_faults for mongodb wired tiger. +- [#6798](https://github.com/influxdata/telegraf/pull/6798): Add use_sudo option to ipmi_sensor input. + +## v1.13.1 [unreleased] + +#### Bugfixes + +- [#6788](https://github.com/influxdata/telegraf/issues/6788): Fix ServerProperty query stops working on Azure after failover. ## v1.13 [2019-12-12] From f79ba10ab3e52f46b1ed0d327408781d1a029761 Mon Sep 17 00:00:00 2001 From: LinaLinn <10799908+linalinn@users.noreply.github.com> Date: Thu, 12 Dec 2019 23:14:37 +0100 Subject: [PATCH 1408/1815] Add ability to collect pod labels to Kubernetes input (#6764) --- plugins/inputs/kubernetes/README.md | 7 +- plugins/inputs/kubernetes/kubernetes.go | 123 +++++++++++++------ plugins/inputs/kubernetes/kubernetes_pods.go | 17 +++ plugins/inputs/kubernetes/kubernetes_test.go | 55 ++++++++- 4 files changed, 161 insertions(+), 41 deletions(-) create mode 100644 plugins/inputs/kubernetes/kubernetes_pods.go diff --git a/plugins/inputs/kubernetes/README.md b/plugins/inputs/kubernetes/README.md index a094b7b29..2a286e962 100644 --- a/plugins/inputs/kubernetes/README.md +++ b/plugins/inputs/kubernetes/README.md @@ -1,6 +1,6 @@ # Kubernetes Input Plugin -This input plugin talks to the kubelet api using the `/stats/summary` endpoint to gather metrics about the running pods and containers for a single host. It is assumed that this plugin is running as part of a `daemonset` within a kubernetes installation. This means that telegraf is running on every node within the cluster. Therefore, you should configure this plugin to talk to its locally running kubelet. +This input plugin talks to the kubelet api using the `/stats/summary` and `/pods` endpoint to gather metrics about the running pods and containers for a single host. It is assumed that this plugin is running as part of a `daemonset` within a kubernetes installation. This means that telegraf is running on every node within the cluster. Therefore, you should configure this plugin to talk to its locally running kubelet. To find the ip address of the host you are running on you can issue a command like the following: @@ -44,6 +44,11 @@ avoid cardinality issues: ## OR # bearer_token_string = "abc_123" + # Labels to include and exclude + # An empty array for include and exclude will include all labels + # label_include = [] + # label_exclude = ["*"] + ## Set response_timeout (default 5 seconds) # response_timeout = "5s" diff --git a/plugins/inputs/kubernetes/kubernetes.go b/plugins/inputs/kubernetes/kubernetes.go index 45093a57b..2342d5f4d 100644 --- a/plugins/inputs/kubernetes/kubernetes.go +++ b/plugins/inputs/kubernetes/kubernetes.go @@ -3,6 +3,7 @@ package kubernetes import ( "encoding/json" "fmt" + "github.com/influxdata/telegraf/filter" "io/ioutil" "net/http" "net/url" @@ -23,6 +24,11 @@ type Kubernetes struct { BearerToken string `toml:"bearer_token"` BearerTokenString string `toml:"bearer_token_string"` + LabelInclude []string `toml:"label_include"` + LabelExclude []string `toml:"label_exclude"` + + labelFilter filter.Filter + // HTTP Timeout specified as a string - 3s, 1m, 1h ResponseTimeout internal.Duration @@ -42,6 +48,11 @@ var sampleConfig = ` ## OR # bearer_token_string = "abc_123" + # Labels to include and exclude + # An empty array for include and exclude will include all labels + # label_include = [] + # label_exclude = ["*"] + ## Set response_timeout (default 5 seconds) # response_timeout = "5s" @@ -60,7 +71,10 @@ const ( func init() { inputs.Add("kubernetes", func() telegraf.Input { - return &Kubernetes{} + return &Kubernetes{ + LabelInclude: []string{}, + LabelExclude: []string{"*"}, + } }) } @@ -75,6 +89,7 @@ func (k *Kubernetes) Description() string { } func (k *Kubernetes) Init() error { + // If neither are provided, use the default service account. if k.BearerToken == "" && k.BearerTokenString == "" { k.BearerToken = defaultServiceAccountPath @@ -88,6 +103,12 @@ func (k *Kubernetes) Init() error { k.BearerTokenString = strings.TrimSpace(string(token)) } + labelFilter, err := filter.NewIncludeExcludeFilter(k.LabelInclude, k.LabelExclude) + if err != nil { + return err + } + k.labelFilter = labelFilter + return nil } @@ -107,48 +128,19 @@ func buildURL(endpoint string, base string) (*url.URL, error) { } func (k *Kubernetes) gatherSummary(baseURL string, acc telegraf.Accumulator) error { - url := fmt.Sprintf("%s/stats/summary", baseURL) - var req, err = http.NewRequest("GET", url, nil) - var resp *http.Response - - tlsCfg, err := k.ClientConfig.TLSConfig() + summaryMetrics := &SummaryMetrics{} + err := k.LoadJson(fmt.Sprintf("%s/stats/summary", baseURL), summaryMetrics) if err != nil { return err } - if k.RoundTripper == nil { - // Set default values - if k.ResponseTimeout.Duration < time.Second { - k.ResponseTimeout.Duration = time.Second * 5 - } - k.RoundTripper = &http.Transport{ - TLSHandshakeTimeout: 5 * time.Second, - TLSClientConfig: tlsCfg, - ResponseHeaderTimeout: k.ResponseTimeout.Duration, - } - } - - req.Header.Set("Authorization", "Bearer "+k.BearerTokenString) - req.Header.Add("Accept", "application/json") - - resp, err = k.RoundTripper.RoundTrip(req) + podInfos, err := k.gatherPodInfo(baseURL) if err != nil { - return fmt.Errorf("error making HTTP request to %s: %s", url, err) - } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK { - return fmt.Errorf("%s returned HTTP status %s", url, resp.Status) - } - - summaryMetrics := &SummaryMetrics{} - err = json.NewDecoder(resp.Body).Decode(summaryMetrics) - if err != nil { - return fmt.Errorf(`Error parsing response: %s`, err) + return err } buildSystemContainerMetrics(summaryMetrics, acc) buildNodeMetrics(summaryMetrics, acc) - buildPodMetrics(summaryMetrics, acc) + buildPodMetrics(baseURL, summaryMetrics, podInfos, k.labelFilter, acc) return nil } @@ -200,7 +192,56 @@ func buildNodeMetrics(summaryMetrics *SummaryMetrics, acc telegraf.Accumulator) acc.AddFields("kubernetes_node", fields, tags) } -func buildPodMetrics(summaryMetrics *SummaryMetrics, acc telegraf.Accumulator) { +func (k *Kubernetes) gatherPodInfo(baseURL string) ([]Metadata, error) { + var podApi Pods + err := k.LoadJson(fmt.Sprintf("%s/pods", baseURL), &podApi) + if err != nil { + return nil, err + } + var podInfos []Metadata + for _, podMetadata := range podApi.Items { + podInfos = append(podInfos, podMetadata.Metadata) + } + return podInfos, nil +} + +func (k *Kubernetes) LoadJson(url string, v interface{}) error { + var req, err = http.NewRequest("GET", url, nil) + var resp *http.Response + tlsCfg, err := k.ClientConfig.TLSConfig() + if err != nil { + return err + } + if k.RoundTripper == nil { + if k.ResponseTimeout.Duration < time.Second { + k.ResponseTimeout.Duration = time.Second * 5 + } + k.RoundTripper = &http.Transport{ + TLSHandshakeTimeout: 5 * time.Second, + TLSClientConfig: tlsCfg, + ResponseHeaderTimeout: k.ResponseTimeout.Duration, + } + } + req.Header.Set("Authorization", "Bearer "+k.BearerTokenString) + req.Header.Add("Accept", "application/json") + resp, err = k.RoundTripper.RoundTrip(req) + if err != nil { + return fmt.Errorf("error making HTTP request to %s: %s", url, err) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("%s returned HTTP status %s", url, resp.Status) + } + + err = json.NewDecoder(resp.Body).Decode(v) + if err != nil { + return fmt.Errorf(`Error parsing response: %s`, err) + } + + return nil +} + +func buildPodMetrics(baseURL string, summaryMetrics *SummaryMetrics, podInfo []Metadata, labelFilter filter.Filter, acc telegraf.Accumulator) { for _, pod := range summaryMetrics.Pods { for _, container := range pod.Containers { tags := map[string]string{ @@ -209,6 +250,16 @@ func buildPodMetrics(summaryMetrics *SummaryMetrics, acc telegraf.Accumulator) { "container_name": container.Name, "pod_name": pod.PodRef.Name, } + for _, info := range podInfo { + if info.Name == pod.PodRef.Name && info.Namespace == pod.PodRef.Namespace { + for k, v := range info.Labels { + if labelFilter.Match(k) { + tags[k] = v + } + } + } + } + fields := make(map[string]interface{}) fields["cpu_usage_nanocores"] = container.CPU.UsageNanoCores fields["cpu_usage_core_nanoseconds"] = container.CPU.UsageCoreNanoSeconds diff --git a/plugins/inputs/kubernetes/kubernetes_pods.go b/plugins/inputs/kubernetes/kubernetes_pods.go new file mode 100644 index 000000000..672608e54 --- /dev/null +++ b/plugins/inputs/kubernetes/kubernetes_pods.go @@ -0,0 +1,17 @@ +package kubernetes + +type Pods struct { + Kind string `json:"kind"` + ApiVersion string `json:"apiVersion"` + Items []Item `json:"items"` +} + +type Item struct { + Metadata Metadata `json:"metadata"` +} + +type Metadata struct { + Name string `json:"name"` + Namespace string `json:"namespace"` + Labels map[string]string `json:"labels"` +} diff --git a/plugins/inputs/kubernetes/kubernetes_test.go b/plugins/inputs/kubernetes/kubernetes_test.go index 081bca03a..faf40be3e 100644 --- a/plugins/inputs/kubernetes/kubernetes_test.go +++ b/plugins/inputs/kubernetes/kubernetes_test.go @@ -2,6 +2,7 @@ package kubernetes import ( "fmt" + "github.com/influxdata/telegraf/filter" "net/http" "net/http/httptest" "testing" @@ -12,13 +13,23 @@ import ( func TestKubernetesStats(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusOK) - fmt.Fprintln(w, response) + if r.RequestURI == "/stats/summary" { + w.WriteHeader(http.StatusOK) + fmt.Fprintln(w, responseStatsSummery) + } + if r.RequestURI == "/pods" { + w.WriteHeader(http.StatusOK) + fmt.Fprintln(w, responsePods) + } + })) defer ts.Close() + labelFilter, _ := filter.NewIncludeExcludeFilter([]string{"app", "superkey"}, nil) + k := &Kubernetes{ - URL: ts.URL, + URL: ts.URL, + labelFilter: labelFilter, } var acc testutil.Accumulator @@ -89,6 +100,8 @@ func TestKubernetesStats(t *testing.T) { "container_name": "foocontainer", "namespace": "foons", "pod_name": "foopod", + "app": "foo", + "superkey": "foobar", } acc.AssertContainsTaggedFields(t, "kubernetes_pod_container", fields, tags) @@ -112,6 +125,8 @@ func TestKubernetesStats(t *testing.T) { "container_name": "stopped-container", "namespace": "foons", "pod_name": "stopped-pod", + "app": "foo-stop", + "superkey": "superfoo", } acc.AssertContainsTaggedFields(t, "kubernetes_pod_container", fields, tags) @@ -143,7 +158,39 @@ func TestKubernetesStats(t *testing.T) { } -var response = ` +var responsePods = ` +{ + "kind": "PodList", + "apiVersion": "v1", + "metadata": {}, + "items": [ + { + "metadata": { + "name": "foopod", + "namespace": "foons", + "labels": { + "superkey": "foobar", + "app": "foo", + "exclude": "exclude0" + } + } + }, + { + "metadata": { + "name": "stopped-pod", + "namespace": "foons", + "labels": { + "superkey": "superfoo", + "app": "foo-stop", + "exclude": "exclude1" + } + } + } + ] +} +` + +var responseStatsSummery = ` { "node": { "nodeName": "node1", From e6a87cd52e5f731ac26b2824c88faab0b3f55d34 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 12 Dec 2019 15:01:50 -0800 Subject: [PATCH 1409/1815] Update kubernetes sample config and readme --- plugins/inputs/kubernetes/README.md | 10 +++++++--- plugins/inputs/kubernetes/kubernetes.go | 6 +++--- 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/plugins/inputs/kubernetes/README.md b/plugins/inputs/kubernetes/README.md index 2a286e962..2d38f23d9 100644 --- a/plugins/inputs/kubernetes/README.md +++ b/plugins/inputs/kubernetes/README.md @@ -1,6 +1,10 @@ # Kubernetes Input Plugin -This input plugin talks to the kubelet api using the `/stats/summary` and `/pods` endpoint to gather metrics about the running pods and containers for a single host. It is assumed that this plugin is running as part of a `daemonset` within a kubernetes installation. This means that telegraf is running on every node within the cluster. Therefore, you should configure this plugin to talk to its locally running kubelet. +The Kubernetes plugin talks to the Kubelet API and gathers metrics about the +running pods and containers for a single host. It is assumed that this plugin +is running as part of a `daemonset` within a kubernetes installation. This +means that telegraf is running on every node within the cluster. Therefore, you +should configure this plugin to talk to its locally running kubelet. To find the ip address of the host you are running on you can issue a command like the following: @@ -44,8 +48,8 @@ avoid cardinality issues: ## OR # bearer_token_string = "abc_123" - # Labels to include and exclude - # An empty array for include and exclude will include all labels + ## Pod labels to be added as tags. An empty array for both include and + ## exclude will include all labels. # label_include = [] # label_exclude = ["*"] diff --git a/plugins/inputs/kubernetes/kubernetes.go b/plugins/inputs/kubernetes/kubernetes.go index 2342d5f4d..412db1dc3 100644 --- a/plugins/inputs/kubernetes/kubernetes.go +++ b/plugins/inputs/kubernetes/kubernetes.go @@ -3,7 +3,6 @@ package kubernetes import ( "encoding/json" "fmt" - "github.com/influxdata/telegraf/filter" "io/ioutil" "net/http" "net/url" @@ -11,6 +10,7 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/filter" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/internal/tls" "github.com/influxdata/telegraf/plugins/inputs" @@ -48,8 +48,8 @@ var sampleConfig = ` ## OR # bearer_token_string = "abc_123" - # Labels to include and exclude - # An empty array for include and exclude will include all labels + ## Pod labels to be added as tags. An empty array for both include and + ## exclude will include all labels. # label_include = [] # label_exclude = ["*"] From cb915a5c5a8dc744f41e63c787e2da28e6113fe1 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 12 Dec 2019 15:03:08 -0800 Subject: [PATCH 1410/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index fb9469e2d..a28a0d0e6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,7 @@ - [#6730](https://github.com/influxdata/telegraf/pull/6730): Add page_faults for mongodb wired tiger. - [#6798](https://github.com/influxdata/telegraf/pull/6798): Add use_sudo option to ipmi_sensor input. +- [#6764](https://github.com/influxdata/telegraf/pull/6764): Add ability to collect pod labels to kubernetes input. ## v1.13.1 [unreleased] From 2beb79969ab82cda604bdc8a8f0e872bf458cbec Mon Sep 17 00:00:00 2001 From: maurorappa Date: Tue, 17 Dec 2019 20:44:17 +0000 Subject: [PATCH 1411/1815] Sort alphabetically the output of the plugin listing commands (#6810) --- cmd/telegraf/telegraf.go | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/cmd/telegraf/telegraf.go b/cmd/telegraf/telegraf.go index f865cee51..7b013cc6c 100644 --- a/cmd/telegraf/telegraf.go +++ b/cmd/telegraf/telegraf.go @@ -11,6 +11,7 @@ import ( "os" "os/signal" "runtime" + "sort" "strings" "syscall" "time" @@ -327,14 +328,24 @@ func main() { // switch for flags which just do something and exit immediately switch { case *fOutputList: - fmt.Println("Available Output Plugins:") + fmt.Println("Available Output Plugins: ") + names := make([]string, 0, len(outputs.Outputs)) for k := range outputs.Outputs { + names = append(names, k) + } + sort.Strings(names) + for _, k := range names { fmt.Printf(" %s\n", k) } return case *fInputList: fmt.Println("Available Input Plugins:") + names := make([]string, 0, len(inputs.Inputs)) for k := range inputs.Inputs { + names = append(names, k) + } + sort.Strings(names) + for _, k := range names { fmt.Printf(" %s\n", k) } return From 99279e6461a6f9035f1426c0528f6be4682b4a2e Mon Sep 17 00:00:00 2001 From: reimda Date: Tue, 17 Dec 2019 15:12:42 -0700 Subject: [PATCH 1412/1815] Convert snmp_trap_test.go to table test (#6803) Add a test for v1 generic trap. Add missing leading period in v1 generic trap oid. --- plugins/inputs/snmp_trap/snmp_trap.go | 2 +- plugins/inputs/snmp_trap/snmp_trap_test.go | 529 +++++++++++---------- 2 files changed, 268 insertions(+), 263 deletions(-) diff --git a/plugins/inputs/snmp_trap/snmp_trap.go b/plugins/inputs/snmp_trap/snmp_trap.go index a80276264..80fc28f7c 100644 --- a/plugins/inputs/snmp_trap/snmp_trap.go +++ b/plugins/inputs/snmp_trap/snmp_trap.go @@ -172,7 +172,7 @@ func makeTrapHandler(s *SnmpTrap) handler { var trapOid string if packet.GenericTrap >= 0 && packet.GenericTrap < 6 { - trapOid = "1.3.6.1.6.3.1.1.5." + strconv.Itoa(packet.GenericTrap+1) + trapOid = ".1.3.6.1.6.3.1.1.5." + strconv.Itoa(packet.GenericTrap+1) } else if packet.GenericTrap == 6 { trapOid = packet.Enterprise + ".0." + strconv.Itoa(packet.SpecificTrap) } diff --git a/plugins/inputs/snmp_trap/snmp_trap_test.go b/plugins/inputs/snmp_trap/snmp_trap_test.go index 68121b0c8..34dd6cde0 100644 --- a/plugins/inputs/snmp_trap/snmp_trap_test.go +++ b/plugins/inputs/snmp_trap/snmp_trap_test.go @@ -4,6 +4,7 @@ import ( "fmt" "net" "strconv" + "strings" "testing" "time" @@ -35,11 +36,15 @@ func TestLoad(t *testing.T) { require.Equal(t, "coldStart", e.oidText) } -func sendTrap(t *testing.T, port uint16) (sentTimestamp uint32) { +func fakeExecCmd(_ internal.Duration, x string, y ...string) ([]byte, error) { + return nil, fmt.Errorf("mock " + x + " " + strings.Join(y, " ")) +} + +func sendTrap(t *testing.T, port uint16, now uint32, trap gosnmp.SnmpTrap, version gosnmp.SnmpVersion) { s := &gosnmp.GoSNMP{ Port: port, Community: "public", - Version: gosnmp.Version2c, + Version: version, Timeout: time.Duration(2) * time.Second, Retries: 3, MaxOids: gosnmp.MaxOids, @@ -52,283 +57,283 @@ func sendTrap(t *testing.T, port uint16) (sentTimestamp uint32) { } defer s.Conn.Close() - // If the first pdu isn't type TimeTicks, gosnmp.SendTrap() will - // prepend one with time.Now(). The time value is part of the - // plugin output so we need to keep track of it and verify it - // later. - now := uint32(time.Now().Unix()) - timePdu := gosnmp.SnmpPDU{ - Name: ".1.3.6.1.2.1.1.3.0", - Type: gosnmp.TimeTicks, - Value: now, - } - - pdu := gosnmp.SnmpPDU{ - Name: ".1.3.6.1.6.3.1.1.4.1.0", // SNMPv2-MIB::snmpTrapOID.0 - Type: gosnmp.ObjectIdentifier, - Value: ".1.3.6.1.6.3.1.1.5.1", // coldStart - } - - trap := gosnmp.SnmpTrap{ - Variables: []gosnmp.SnmpPDU{ - timePdu, - pdu, - }, - } - _, err = s.SendTrap(trap) if err != nil { t.Errorf("SendTrap() err: %v", err) } - - return now } func TestReceiveTrap(t *testing.T) { - // We would prefer to specify port 0 and let the network stack - // choose an unused port for us but TrapListener doesn't have a - // way to return the autoselected port. Instead, we'll use an - // unusual port and hope it's unused. - const port = 12399 - var fakeTime = time.Now() + var now uint32 + now = 123123123 - // hook into the trap handler so the test knows when the trap has - // been received - received := make(chan int) - wrap := func(f handler) handler { - return func(p *gosnmp.SnmpPacket, a *net.UDPAddr) { - f(p, a) - received <- 0 - } + var fakeTime time.Time + fakeTime = time.Unix(456456456, 456) + + type entry struct { + oid string + e mibEntry } - // set up the service input plugin - s := &SnmpTrap{ - ServiceAddress: "udp://:" + strconv.Itoa(port), - makeHandlerWrapper: wrap, - timeFunc: func() time.Time { - return fakeTime + // If the first pdu isn't type TimeTicks, gosnmp.SendTrap() will + // prepend one with time.Now() + var tests = []struct { + name string + + // send + version gosnmp.SnmpVersion + trap gosnmp.SnmpTrap // include pdus + + // recieve + entries []entry + metrics []telegraf.Metric + }{ + //ordinary v2c coldStart trap + { + name: "v2c coldStart", + version: gosnmp.Version2c, + trap: gosnmp.SnmpTrap{ + Variables: []gosnmp.SnmpPDU{ + { + Name: ".1.3.6.1.2.1.1.3.0", + Type: gosnmp.TimeTicks, + Value: now, + }, + { + Name: ".1.3.6.1.6.3.1.1.4.1.0", // SNMPv2-MIB::snmpTrapOID.0 + Type: gosnmp.ObjectIdentifier, + Value: ".1.3.6.1.6.3.1.1.5.1", // coldStart + }, + }, + }, + entries: []entry{ + { + oid: ".1.3.6.1.6.3.1.1.4.1.0", + e: mibEntry{ + "SNMPv2-MIB", + "snmpTrapOID.0", + }, + }, + { + oid: ".1.3.6.1.6.3.1.1.5.1", + e: mibEntry{ + "SNMPv2-MIB", + "coldStart", + }, + }, + { + oid: ".1.3.6.1.2.1.1.3.0", + e: mibEntry{ + "UNUSED_MIB_NAME", + "sysUpTimeInstance", + }, + }, + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "snmp_trap", // name + map[string]string{ // tags + "oid": ".1.3.6.1.6.3.1.1.5.1", + "name": "coldStart", + "mib": "SNMPv2-MIB", + "version": "2c", + "source": "127.0.0.1", + }, + map[string]interface{}{ // fields + "sysUpTimeInstance": now, + }, + fakeTime, + ), + }, }, - Log: testutil.Logger{}, - } - require.Nil(t, s.Init()) - var acc testutil.Accumulator - require.Nil(t, s.Start(&acc)) - defer s.Stop() - - // Preload the cache with the oids we'll use in this test so - // snmptranslate and mibs don't need to be installed. - defer s.clear() - s.load(".1.3.6.1.6.3.1.1.4.1.0", - mibEntry{ - "SNMPv2-MIB", - "snmpTrapOID.0", - }) - s.load(".1.3.6.1.6.3.1.1.5.1", - mibEntry{ - "SNMPv2-MIB", - "coldStart", - }) - s.load(".1.3.6.1.2.1.1.3.0", - mibEntry{ - "UNUSED_MIB_NAME", - "sysUpTimeInstance", - }) - - // send the trap - sentTimestamp := sendTrap(t, port) - - // wait for trap to be received - select { - case <-received: - case <-time.After(2 * time.Second): - t.Fatal("timed out waiting for trap to be received") - } - - // verify plugin output - expected := []telegraf.Metric{ - testutil.MustMetric( - "snmp_trap", // name - map[string]string{ // tags - "oid": ".1.3.6.1.6.3.1.1.5.1", - "name": "coldStart", - "mib": "SNMPv2-MIB", - "version": "2c", - "source": "127.0.0.1", + //Check that we're not running snmptranslate to look up oids + //when we shouldn't be. This sends and receives a valid trap + //but metric production should fail because the oids aren't in + //the cache and oid lookup is intentionally mocked to fail. + { + name: "missing oid", + version: gosnmp.Version2c, + trap: gosnmp.SnmpTrap{ + Variables: []gosnmp.SnmpPDU{ + { + Name: ".1.3.6.1.2.1.1.3.0", + Type: gosnmp.TimeTicks, + Value: now, + }, + { + Name: ".1.3.6.1.6.3.1.1.4.1.0", // SNMPv2-MIB::snmpTrapOID.0 + Type: gosnmp.ObjectIdentifier, + Value: ".1.3.6.1.6.3.1.1.5.1", // coldStart + }, + }, }, - map[string]interface{}{ // fields - "sysUpTimeInstance": sentTimestamp, - }, - fakeTime, - ), - } - - testutil.RequireMetricsEqual(t, - expected, acc.GetTelegrafMetrics(), - testutil.SortMetrics()) - -} - -func fakeExecCmd(_ internal.Duration, _ string, _ ...string) ([]byte, error) { - return nil, fmt.Errorf("intentional failure") -} - -func TestMissingOid(t *testing.T) { - // should fail even if snmptranslate is installed - const port = 12399 - var fakeTime = time.Now() - - received := make(chan int) - wrap := func(f handler) handler { - return func(p *gosnmp.SnmpPacket, a *net.UDPAddr) { - f(p, a) - received <- 0 - } - } - - s := &SnmpTrap{ - ServiceAddress: "udp://:" + strconv.Itoa(port), - makeHandlerWrapper: wrap, - timeFunc: func() time.Time { - return fakeTime + entries: []entry{}, //nothing in cache + metrics: []telegraf.Metric{}, }, - Log: testutil.Logger{}, - } - require.Nil(t, s.Init()) - var acc testutil.Accumulator - require.Nil(t, s.Start(&acc)) - defer s.Stop() - - // make sure the cache is empty - s.clear() - - // don't call the real snmptranslate - s.execCmd = fakeExecCmd - - _ = sendTrap(t, port) - - select { - case <-received: - case <-time.After(2 * time.Second): - t.Fatal("timed out waiting for trap to be received") - } - - // oid lookup should fail so we shouldn't get a metric - expected := []telegraf.Metric{} - - testutil.RequireMetricsEqual(t, - expected, acc.GetTelegrafMetrics(), - testutil.SortMetrics()) -} - -func sendV1Trap(t *testing.T, port uint16) (sentTimestamp uint) { - s := &gosnmp.GoSNMP{ - Port: port, - Community: "public", - Version: gosnmp.Version1, - Timeout: time.Duration(2) * time.Second, - Retries: 3, - MaxOids: gosnmp.MaxOids, - Target: "127.0.0.1", - } - - err := s.Connect() - if err != nil { - t.Fatalf("Connect() err: %v", err) - } - defer s.Conn.Close() - - now := uint(time.Now().Unix()) - - pdu := gosnmp.SnmpPDU{ - Name: ".1.2.3.4.5", - Type: gosnmp.OctetString, - Value: "payload", - } - - trap := gosnmp.SnmpTrap{ - Variables: []gosnmp.SnmpPDU{pdu}, - Enterprise: ".1.2.3", - AgentAddress: "10.20.30.40", - GenericTrap: 6, // enterpriseSpecific - SpecificTrap: 55, - Timestamp: now, - } - - _, err = s.SendTrap(trap) - if err != nil { - t.Fatalf("SendTrap() err: %v", err) - } - - return now -} - -func TestReceiveV1Trap(t *testing.T) { - const port = 12399 - var fakeTime = time.Now() - - received := make(chan int) - wrap := func(f handler) handler { - return func(p *gosnmp.SnmpPacket, a *net.UDPAddr) { - f(p, a) - received <- 0 - } - } - - s := &SnmpTrap{ - ServiceAddress: "udp://:" + strconv.Itoa(port), - makeHandlerWrapper: wrap, - timeFunc: func() time.Time { - return fakeTime + //v1 enterprise specific trap + { + name: "v1 trap enterprise", + version: gosnmp.Version1, + trap: gosnmp.SnmpTrap{ + Variables: []gosnmp.SnmpPDU{ + { + Name: ".1.2.3.4.5", + Type: gosnmp.OctetString, + Value: "payload", + }, + }, + Enterprise: ".1.2.3", + AgentAddress: "10.20.30.40", + GenericTrap: 6, // enterpriseSpecific + SpecificTrap: 55, + Timestamp: uint(now), + }, + entries: []entry{ + { + ".1.2.3.4.5", + mibEntry{ + "valueMIB", + "valueOID", + }, + }, + { + ".1.2.3.0.55", + mibEntry{ + "enterpriseMIB", + "enterpriseOID", + }, + }, + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "snmp_trap", // name + map[string]string{ // tags + "oid": ".1.2.3.0.55", + "name": "enterpriseOID", + "mib": "enterpriseMIB", + "version": "1", + "source": "127.0.0.1", + "agent_address": "10.20.30.40", + }, + map[string]interface{}{ // fields + "sysUpTimeInstance": uint(now), + "valueOID": "payload", + }, + fakeTime, + ), + }, }, - Log: testutil.Logger{}, - } - require.Nil(t, s.Init()) - var acc testutil.Accumulator - require.Nil(t, s.Start(&acc)) - defer s.Stop() - - defer s.clear() - s.load(".1.2.3.4.5", - mibEntry{ - "valueMIB", - "valueOID", - }) - s.load(".1.2.3.0.55", - mibEntry{ - "enterpriseMIB", - "enterpriseOID", - }) - - sentTimestamp := sendV1Trap(t, port) - - select { - case <-received: - case <-time.After(2 * time.Second): - t.Fatal("timed out waiting for trap to be received") - } - - expected := []telegraf.Metric{ - testutil.MustMetric( - "snmp_trap", // name - map[string]string{ // tags - "oid": ".1.2.3.0.55", - "name": "enterpriseOID", - "mib": "enterpriseMIB", - "version": "1", - "source": "127.0.0.1", - "agent_address": "10.20.30.40", + //v1 generic trap + { + name: "v1 trap generic", + version: gosnmp.Version1, + trap: gosnmp.SnmpTrap{ + Variables: []gosnmp.SnmpPDU{ + { + Name: ".1.2.3.4.5", + Type: gosnmp.OctetString, + Value: "payload", + }, + }, + Enterprise: ".1.2.3", + AgentAddress: "10.20.30.40", + GenericTrap: 0, //coldStart + SpecificTrap: 0, + Timestamp: uint(now), }, - map[string]interface{}{ // fields - "sysUpTimeInstance": sentTimestamp, - "valueOID": "payload", + entries: []entry{ + { + ".1.2.3.4.5", + mibEntry{ + "valueMIB", + "valueOID", + }, + }, + { + ".1.3.6.1.6.3.1.1.5.1", + mibEntry{ + "coldStartMIB", + "coldStartOID", + }, + }, }, - fakeTime, - ), + metrics: []telegraf.Metric{ + testutil.MustMetric( + "snmp_trap", // name + map[string]string{ // tags + "oid": ".1.3.6.1.6.3.1.1.5.1", + "name": "coldStartOID", + "mib": "coldStartMIB", + "version": "1", + "source": "127.0.0.1", + "agent_address": "10.20.30.40", + }, + map[string]interface{}{ // fields + "sysUpTimeInstance": uint(now), + "valueOID": "payload", + }, + fakeTime, + ), + }, + }, } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // We would prefer to specify port 0 and let the network + // stack choose an unused port for us but TrapListener + // doesn't have a way to return the autoselected port. + // Instead, we'll use an unusual port and hope it's + // unused. + const port = 12399 - testutil.RequireMetricsEqual(t, - expected, acc.GetTelegrafMetrics(), - testutil.SortMetrics()) + // Hook into the trap handler so the test knows when the + // trap has been received + received := make(chan int) + wrap := func(f handler) handler { + return func(p *gosnmp.SnmpPacket, a *net.UDPAddr) { + f(p, a) + received <- 0 + } + } + + // Set up the service input plugin + s := &SnmpTrap{ + ServiceAddress: "udp://:" + strconv.Itoa(port), + makeHandlerWrapper: wrap, + timeFunc: func() time.Time { + return fakeTime + }, + Log: testutil.Logger{}, + } + require.Nil(t, s.Init()) + var acc testutil.Accumulator + require.Nil(t, s.Start(&acc)) + defer s.Stop() + + // Preload the cache with the oids we'll use in this test + // so snmptranslate and mibs don't need to be installed. + for _, entry := range tt.entries { + s.load(entry.oid, entry.e) + } + + // Don't look up oid with snmptranslate. + s.execCmd = fakeExecCmd + + // Send the trap + sendTrap(t, port, now, tt.trap, tt.version) + + // Wait for trap to be received + select { + case <-received: + case <-time.After(2 * time.Second): + t.Fatal("timed out waiting for trap to be received") + } + + // Verify plugin output + testutil.RequireMetricsEqual(t, + tt.metrics, acc.GetTelegrafMetrics(), + testutil.SortMetrics()) + }) + } } From 644f2ad84767221cb0087d4fb4d65a32a6958c06 Mon Sep 17 00:00:00 2001 From: David Reimschussel Date: Tue, 17 Dec 2019 15:22:48 -0700 Subject: [PATCH 1413/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index a28a0d0e6..830874f73 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,7 @@ #### Bugfixes - [#6788](https://github.com/influxdata/telegraf/issues/6788): Fix ServerProperty query stops working on Azure after failover. +- [#6803](https://github.com/influxdata/telegraf/pull/6803): Add leading period to OID in SNMP v1 generic traps ## v1.13 [2019-12-12] From 697963e8cca3db45ec5e681372d3b535359358f6 Mon Sep 17 00:00:00 2001 From: Enno Lohmeier Date: Wed, 18 Dec 2019 01:50:00 +0100 Subject: [PATCH 1414/1815] Expose unbound-control config file option (#6770) --- etc/telegraf.conf | 3 +++ plugins/inputs/unbound/README.md | 3 +++ plugins/inputs/unbound/unbound.go | 15 ++++++++++++--- plugins/inputs/unbound/unbound_test.go | 8 ++++---- 4 files changed, 22 insertions(+), 7 deletions(-) diff --git a/etc/telegraf.conf b/etc/telegraf.conf index dbafd2f83..517600475 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -4597,6 +4597,9 @@ # ## The default location of the unbound-control binary can be overridden with: # # binary = "/usr/sbin/unbound-control" # +# ## The default location of the unbound config file can be overridden with: +# # config_file = "/etc/unbound/unbound.conf" +# # ## The default timeout of 1s can be overriden with: # # timeout = "1s" # diff --git a/plugins/inputs/unbound/README.md b/plugins/inputs/unbound/README.md index 36c9aa47d..d7d5c8ba9 100644 --- a/plugins/inputs/unbound/README.md +++ b/plugins/inputs/unbound/README.md @@ -18,6 +18,9 @@ a validating, recursive, and caching DNS resolver. ## The default location of the unbound-control binary can be overridden with: # binary = "/usr/sbin/unbound-control" + ## The default location of the unbound config file can be overridden with: + # config_file = "/etc/unbound/unbound.conf" + ## The default timeout of 1s can be overriden with: # timeout = "1s" diff --git a/plugins/inputs/unbound/unbound.go b/plugins/inputs/unbound/unbound.go index 02067c739..c8247d0cf 100644 --- a/plugins/inputs/unbound/unbound.go +++ b/plugins/inputs/unbound/unbound.go @@ -17,7 +17,7 @@ import ( "github.com/influxdata/telegraf/plugins/inputs" ) -type runner func(cmdName string, Timeout internal.Duration, UseSudo bool, Server string, ThreadAsTag bool) (*bytes.Buffer, error) +type runner func(cmdName string, Timeout internal.Duration, UseSudo bool, Server string, ThreadAsTag bool, ConfigFile string) (*bytes.Buffer, error) // Unbound is used to store configuration values type Unbound struct { @@ -26,6 +26,7 @@ type Unbound struct { UseSudo bool Server string ThreadAsTag bool + ConfigFile string filter filter.Filter run runner @@ -45,6 +46,9 @@ var sampleConfig = ` ## The default location of the unbound-control binary can be overridden with: # binary = "/usr/sbin/unbound-control" + ## The default location of the unbound config file can be overridden with: + # config_file = "/etc/unbound/unbound.conf" + ## The default timeout of 1s can be overriden with: # timeout = "1s" @@ -67,7 +71,7 @@ func (s *Unbound) SampleConfig() string { } // Shell out to unbound_stat and return the output -func unboundRunner(cmdName string, Timeout internal.Duration, UseSudo bool, Server string, ThreadAsTag bool) (*bytes.Buffer, error) { +func unboundRunner(cmdName string, Timeout internal.Duration, UseSudo bool, Server string, ThreadAsTag bool, ConfigFile string) (*bytes.Buffer, error) { cmdArgs := []string{"stats_noreset"} if Server != "" { @@ -96,6 +100,10 @@ func unboundRunner(cmdName string, Timeout internal.Duration, UseSudo bool, Serv cmdArgs = append([]string{"-s", server}, cmdArgs...) } + if ConfigFile != "" { + cmdArgs = append([]string{"-c", ConfigFile}, cmdArgs...) + } + cmd := exec.Command(cmdName, cmdArgs...) if UseSudo { @@ -125,7 +133,7 @@ func (s *Unbound) Gather(acc telegraf.Accumulator) error { return err } - out, err := s.run(s.Binary, s.Timeout, s.UseSudo, s.Server, s.ThreadAsTag) + out, err := s.run(s.Binary, s.Timeout, s.UseSudo, s.Server, s.ThreadAsTag, s.ConfigFile) if err != nil { return fmt.Errorf("error gathering metrics: %s", err) } @@ -207,6 +215,7 @@ func init() { UseSudo: false, Server: "", ThreadAsTag: false, + ConfigFile: "", } }) } diff --git a/plugins/inputs/unbound/unbound_test.go b/plugins/inputs/unbound/unbound_test.go index b1d6206c3..cc4b99dae 100644 --- a/plugins/inputs/unbound/unbound_test.go +++ b/plugins/inputs/unbound/unbound_test.go @@ -12,8 +12,8 @@ import ( var TestTimeout = internal.Duration{Duration: time.Second} -func UnboundControl(output string, Timeout internal.Duration, useSudo bool, Server string, ThreadAsTag bool) func(string, internal.Duration, bool, string, bool) (*bytes.Buffer, error) { - return func(string, internal.Duration, bool, string, bool) (*bytes.Buffer, error) { +func UnboundControl(output string, Timeout internal.Duration, useSudo bool, Server string, ThreadAsTag bool, ConfigFile string) func(string, internal.Duration, bool, string, bool, string) (*bytes.Buffer, error) { + return func(string, internal.Duration, bool, string, bool, string) (*bytes.Buffer, error) { return bytes.NewBuffer([]byte(output)), nil } } @@ -21,7 +21,7 @@ func UnboundControl(output string, Timeout internal.Duration, useSudo bool, Serv func TestParseFullOutput(t *testing.T) { acc := &testutil.Accumulator{} v := &Unbound{ - run: UnboundControl(fullOutput, TestTimeout, true, "", false), + run: UnboundControl(fullOutput, TestTimeout, true, "", false, ""), } err := v.Gather(acc) @@ -38,7 +38,7 @@ func TestParseFullOutput(t *testing.T) { func TestParseFullOutputThreadAsTag(t *testing.T) { acc := &testutil.Accumulator{} v := &Unbound{ - run: UnboundControl(fullOutput, TestTimeout, true, "", true), + run: UnboundControl(fullOutput, TestTimeout, true, "", true, ""), ThreadAsTag: true, } err := v.Gather(acc) From 17a84ab4b4ac93ea59f2e7d4eec29dbc1c463273 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 17 Dec 2019 16:51:11 -0800 Subject: [PATCH 1415/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 830874f73..4c001fc4d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,7 @@ - [#6730](https://github.com/influxdata/telegraf/pull/6730): Add page_faults for mongodb wired tiger. - [#6798](https://github.com/influxdata/telegraf/pull/6798): Add use_sudo option to ipmi_sensor input. - [#6764](https://github.com/influxdata/telegraf/pull/6764): Add ability to collect pod labels to kubernetes input. +- [#6770](https://github.com/influxdata/telegraf/pull/6770): Expose unbound-control config file option. ## v1.13.1 [unreleased] From b380c65c15361a97c7bbfa0269bed353a9a3c67a Mon Sep 17 00:00:00 2001 From: Bugagazavr Date: Wed, 18 Dec 2019 02:58:06 +0200 Subject: [PATCH 1416/1815] Add support for new nginx plus api endpoints (#6508) --- plugins/inputs/nginx_plus_api/README.md | 113 +++++++---- .../inputs/nginx_plus_api/nginx_plus_api.go | 11 +- .../nginx_plus_api/nginx_plus_api_metrics.go | 96 ++++++++++ .../nginx_plus_api_metrics_test.go | 179 ++++++++++++++++++ .../nginx_plus_api/nginx_plus_api_types.go | 26 +++ 5 files changed, 384 insertions(+), 41 deletions(-) diff --git a/plugins/inputs/nginx_plus_api/README.md b/plugins/inputs/nginx_plus_api/README.md index e90645e43..4ec63b2e8 100644 --- a/plugins/inputs/nginx_plus_api/README.md +++ b/plugins/inputs/nginx_plus_api/README.md @@ -29,6 +29,24 @@ Nginx Plus is a commercial version of the open source web server Nginx. The use | nginx_plus_stream_upstream_peer | nginx_plus_api_stream_upstream_peers | | nginx.stream.zone | nginx_plus_api_stream_server_zones | +### Measurements by API version + +| Measurement | API version (api_version) | +|--------------------------------------|---------------------------| +| nginx_plus_api_processes | >= 3 | +| nginx_plus_api_connections | >= 3 | +| nginx_plus_api_ssl | >= 3 | +| nginx_plus_api_http_requests | >= 3 | +| nginx_plus_api_http_server_zones | >= 3 | +| nginx_plus_api_http_upstreams | >= 3 | +| nginx_plus_api_http_upstream_peers | >= 3 | +| nginx_plus_api_http_caches | >= 3 | +| nginx_plus_api_stream_upstreams | >= 3 | +| nginx_plus_api_stream_upstream_peers | >= 3 | +| nginx_plus_api_stream_server_zones | >= 3 | +| nginx_plus_api_http_location_zones | >= 5 | +| nginx_plus_api_resolver_zones | >= 5 | + ### Measurements & Fields: - nginx_plus_api_processes @@ -129,7 +147,29 @@ Nginx Plus is a commercial version of the open source web server Nginx. The use - connections - received - sent - +- nginx_plus_api_location_zones + - requests + - responses_1xx + - responses_2xx + - responses_3xx + - responses_4xx + - responses_5xx + - responses_total + - received + - sent + - discarded +- nginx_plus_api_resolver_zones + - name + - srv + - addr + - noerror + - formerr + - servfail + - nxdomain + - notimp + - refused + - timedout + - unknown ### Tags: @@ -142,7 +182,7 @@ Nginx Plus is a commercial version of the open source web server Nginx. The use - source - port -- nginx_plus_api_http_server_zones, nginx_plus_api_upstream_server_zones +- nginx_plus_api_http_server_zones, nginx_plus_api_upstream_server_zones, nginx_plus_api_http_location_zones, nginx_plus_api_resolver_zones - source - port - zone @@ -174,41 +214,40 @@ When run with: It produces: ``` -> nginx_plus_api_processes,host=localhost,port=80,source=localhost respawned=0i 1539163505000000000 -> nginx_plus_api_connections,host=localhost,port=80,source=localhost accepted=120890747i,active=6i,dropped=0i,idle=67i 1539163505000000000 -> nginx_plus_api_ssl,host=localhost,port=80,source=localhost handshakes=2983938i,handshakes_failed=54350i,session_reuses=2485267i 1539163506000000000 -> nginx_plus_api_http_requests,host=localhost,port=80,source=localhost current=12i,total=175270198i 1539163506000000000 -> nginx_plus_api_http_server_zones,host=localhost,port=80,source=localhost,zone=hg.nginx.org discarded=45i,processing=0i,received=35723884i,requests=134102i,responses_1xx=0i,responses_2xx=96890i,responses_3xx=6892i,responses_4xx=30270i,responses_5xx=5i,responses_total=134057i,sent=3681826618i 1539163506000000000 -> nginx_plus_api_http_server_zones,host=localhost,port=80,source=localhost,zone=trac.nginx.org discarded=4034i,processing=9i,received=282399663i,requests=336129i,responses_1xx=0i,responses_2xx=101264i,responses_3xx=25454i,responses_4xx=68961i,responses_5xx=136407i,responses_total=332086i,sent=2346677493i 1539163506000000000 -> nginx_plus_api_http_server_zones,host=localhost,port=80,source=localhost,zone=lxr.nginx.org discarded=4i,processing=1i,received=7223569i,requests=29661i,responses_1xx=0i,responses_2xx=28584i,responses_3xx=73i,responses_4xx=390i,responses_5xx=609i,responses_total=29656i,sent=5811238975i 1539163506000000000 -> nginx_plus_api_http_upstreams,host=localhost,port=80,source=localhost,upstream=trac-backend keepalive=0i,zombies=0i 1539163506000000000 -> nginx_plus_api_http_upstream_peers,host=localhost,id=0,port=80,source=localhost,upstream=trac-backend,upstream_address=10.0.0.1:8080 active=0i,backup=false,downtime=53870i,fails=5i,header_time=421i,healthchecks_checks=17275i,healthchecks_fails=0i,healthchecks_last_passed=true,healthchecks_unhealthy=0i,received=1885213684i,requests=88476i,response_time=423i,responses_1xx=0i,responses_2xx=50997i,responses_3xx=205i,responses_4xx=34344i,responses_5xx=2076i,responses_total=87622i,sent=189938404i,state="up",unavail=5i,weight=1i 1539163506000000000 -> nginx_plus_api_http_upstream_peers,host=localhost,id=1,port=80,source=localhost,upstream=trac-backend,upstream_address=10.0.0.1:8081 active=0i,backup=true,downtime=173957231i,fails=0i,healthchecks_checks=17394i,healthchecks_fails=17394i,healthchecks_last_passed=false,healthchecks_unhealthy=1i,received=0i,requests=0i,responses_1xx=0i,responses_2xx=0i,responses_3xx=0i,responses_4xx=0i,responses_5xx=0i,responses_total=0i,sent=0i,state="unhealthy",unavail=0i,weight=1i 1539163506000000000 -> nginx_plus_api_http_upstreams,host=localhost,port=80,source=localhost,upstream=hg-backend keepalive=0i,zombies=0i 1539163506000000000 -> nginx_plus_api_http_upstream_peers,host=localhost,id=0,port=80,source=localhost,upstream=hg-backend,upstream_address=10.0.0.1:8088 active=0i,backup=false,downtime=0i,fails=0i,header_time=22i,healthchecks_checks=17319i,healthchecks_fails=0i,healthchecks_last_passed=true,healthchecks_unhealthy=0i,received=3724240605i,requests=89563i,response_time=44i,responses_1xx=0i,responses_2xx=81996i,responses_3xx=6886i,responses_4xx=639i,responses_5xx=5i,responses_total=89526i,sent=31597952i,state="up",unavail=0i,weight=5i 1539163506000000000 -> nginx_plus_api_http_upstream_peers,host=localhost,id=1,port=80,source=localhost,upstream=hg-backend,upstream_address=10.0.0.1:8089 active=0i,backup=true,downtime=173957231i,fails=0i,healthchecks_checks=17394i,healthchecks_fails=17394i,healthchecks_last_passed=false,healthchecks_unhealthy=1i,received=0i,requests=0i,responses_1xx=0i,responses_2xx=0i,responses_3xx=0i,responses_4xx=0i,responses_5xx=0i,responses_total=0i,sent=0i,state="unhealthy",unavail=0i,weight=1i 1539163506000000000 -> nginx_plus_api_http_upstreams,host=localhost,port=80,source=localhost,upstream=lxr-backend keepalive=0i,zombies=0i 1539163506000000000 -> nginx_plus_api_http_upstream_peers,host=localhost,id=0,port=80,source=localhost,upstream=lxr-backend,upstream_address=unix:/tmp/cgi.sock active=0i,backup=false,downtime=0i,fails=609i,header_time=111i,healthchecks_checks=0i,healthchecks_fails=0i,healthchecks_unhealthy=0i,received=6220215064i,requests=28278i,response_time=172i,responses_1xx=0i,responses_2xx=27665i,responses_3xx=0i,responses_4xx=0i,responses_5xx=0i,responses_total=27665i,sent=21337016i,state="up",unavail=0i,weight=1i 1539163506000000000 -> nginx_plus_api_http_upstream_peers,host=localhost,id=1,port=80,source=localhost,upstream=lxr-backend,upstream_address=unix:/tmp/cgib.sock active=0i,backup=true,downtime=0i,fails=0i,healthchecks_checks=0i,healthchecks_fails=0i,healthchecks_unhealthy=0i,max_conns=42i,received=0i,requests=0i,responses_1xx=0i,responses_2xx=0i,responses_3xx=0i,responses_4xx=0i,responses_5xx=0i,responses_total=0i,sent=0i,state="up",unavail=0i,weight=1i 1539163506000000000 -> nginx_plus_api_http_upstreams,host=localhost,port=80,source=localhost,upstream=demo-backend keepalive=0i,zombies=0i 1539163506000000000 -> nginx_plus_api_http_upstream_peers,host=localhost,id=0,port=80,source=localhost,upstream=demo-backend,upstream_address=10.0.0.2:15431 active=0i,backup=false,downtime=0i,fails=0i,healthchecks_checks=173640i,healthchecks_fails=0i,healthchecks_last_passed=true,healthchecks_unhealthy=0i,received=0i,requests=0i,responses_1xx=0i,responses_2xx=0i,responses_3xx=0i,responses_4xx=0i,responses_5xx=0i,responses_total=0i,sent=0i,state="up",unavail=0i,weight=1i 1539163506000000000 -> nginx_plus_api_http_caches,cache=http_cache,host=localhost,port=80,source=localhost bypass_bytes=0i,bypass_bytes_written=0i,bypass_responses=0i,bypass_responses_written=0i,cold=false,expired_bytes=133671410i,expired_bytes_written=129210272i,expired_responses=15721i,expired_responses_written=15213i,hit_bytes=2459840828i,hit_responses=231195i,max_size=536870912i,miss_bytes=18742246i,miss_bytes_written=85199i,miss_responses=2816i,miss_responses_written=69i,revalidated_bytes=0i,revalidated_responses=0i,size=774144i,stale_bytes=0i,stale_responses=0i,updating_bytes=0i,updating_responses=0i 1539163506000000000 -> nginx_plus_api_stream_server_zones,host=localhost,port=80,source=localhost,zone=postgresql_loadbalancer connections=173639i,processing=0i,received=17884817i,sent=33685966i 1539163506000000000 -> nginx_plus_api_stream_server_zones,host=localhost,port=80,source=localhost,zone=dns_loadbalancer connections=97255i,processing=0i,received=2699082i,sent=16566552i 1539163506000000000 -> nginx_plus_api_stream_upstreams,host=localhost,port=80,source=localhost,upstream=postgresql_backends zombies=0i 1539163507000000000 -> nginx_plus_api_stream_upstream_peers,host=localhost,id=0,port=80,source=localhost,upstream=postgresql_backends,upstream_address=10.0.0.2:15432 active=0i,backup=false,connect_time=4i,connections=57880i,downtime=0i,fails=0i,first_byte_time=10i,healthchecks_checks=34781i,healthchecks_fails=0i,healthchecks_last_passed=true,healthchecks_unhealthy=0i,received=11228720i,response_time=10i,sent=5961640i,state="up",unavail=0i,weight=1i 1539163507000000000 -> nginx_plus_api_stream_upstream_peers,host=localhost,id=1,port=80,source=localhost,upstream=postgresql_backends,upstream_address=10.0.0.2:15433 active=0i,backup=false,connect_time=3i,connections=57880i,downtime=0i,fails=0i,first_byte_time=9i,healthchecks_checks=34781i,healthchecks_fails=0i,healthchecks_last_passed=true,healthchecks_unhealthy=0i,received=11228720i,response_time=10i,sent=5961640i,state="up",unavail=0i,weight=1i 1539163507000000000 -> nginx_plus_api_stream_upstream_peers,host=localhost,id=2,port=80,source=localhost,upstream=postgresql_backends,upstream_address=10.0.0.2:15434 active=0i,backup=false,connect_time=2i,connections=57879i,downtime=0i,fails=0i,first_byte_time=9i,healthchecks_checks=34781i,healthchecks_fails=0i,healthchecks_last_passed=true,healthchecks_unhealthy=0i,received=11228526i,response_time=9i,sent=5961537i,state="up",unavail=0i,weight=1i 1539163507000000000 -> nginx_plus_api_stream_upstream_peers,host=localhost,id=3,port=80,source=localhost,upstream=postgresql_backends,upstream_address=10.0.0.2:15435 active=0i,backup=false,connections=0i,downtime=0i,fails=0i,healthchecks_checks=0i,healthchecks_fails=0i,healthchecks_unhealthy=0i,received=0i,sent=0i,state="down",unavail=0i,weight=1i 1539163507000000000 -> nginx_plus_api_stream_upstreams,host=localhost,port=80,source=localhost,upstream=dns_udp_backends zombies=0i 1539163507000000000 -> nginx_plus_api_stream_upstream_peers,host=localhost,id=0,port=80,source=localhost,upstream=dns_udp_backends,upstream_address=10.0.0.5:53 active=0i,backup=false,connect_time=0i,connections=64837i,downtime=0i,fails=0i,first_byte_time=17i,healthchecks_checks=34761i,healthchecks_fails=0i,healthchecks_last_passed=true,healthchecks_unhealthy=0i,received=10996616i,response_time=17i,sent=1791693i,state="up",unavail=0i,weight=2i 1539163507000000000 -> nginx_plus_api_stream_upstream_peers,host=localhost,id=1,port=80,source=localhost,upstream=dns_udp_backends,upstream_address=10.0.0.2:53 active=0i,backup=false,connect_time=0i,connections=32418i,downtime=0i,fails=0i,first_byte_time=17i,healthchecks_checks=34761i,healthchecks_fails=0i,healthchecks_last_passed=true,healthchecks_unhealthy=0i,received=5569936i,response_time=17i,sent=907389i,state="up",unavail=0i,weight=1i 1539163507000000000 -> nginx_plus_api_stream_upstream_peers,host=localhost,id=2,port=80,source=localhost,upstream=dns_udp_backends,upstream_address=10.0.0.7:53 active=0i,backup=false,connections=0i,downtime=0i,fails=0i,healthchecks_checks=0i,healthchecks_fails=0i,healthchecks_unhealthy=0i,received=0i,sent=0i,state="down",unavail=0i,weight=1i 1539163507000000000 -> nginx_plus_api_stream_upstreams,host=localhost,port=80,source=localhost,upstream=unused_tcp_backends zombies=0i 1539163507000000000 -> nginx_plus_api_stream_upstream_peers,host=localhost,id=1,port=80,source=localhost,upstream=unused_tcp_backends,upstream_address=95.211.80.227:80 active=0i,backup=false,connections=0i,downtime=0i,fails=0i,healthchecks_checks=0i,healthchecks_fails=0i,healthchecks_unhealthy=0i,received=0i,sent=0i,state="down",unavail=0i,weight=1i 1539163507000000000 -> nginx_plus_api_stream_upstream_peers,host=localhost,id=2,port=80,source=localhost,upstream=unused_tcp_backends,upstream_address=206.251.255.63:80 active=0i,backup=false,connections=0i,downtime=0i,fails=0i,healthchecks_checks=0i,healthchecks_fails=0i,healthchecks_unhealthy=0i,received=0i,sent=0i,state="down",unavail=0i,weight=1i 1539163507000000000 -> nginx_plus_api_stream_upstream_peers,host=localhost,id=3,port=80,source=localhost,upstream=unused_tcp_backends,upstream_address=[2001:1af8:4060:a004:21::e3]:80 active=0i,backup=false,connections=0i,downtime=0i,fails=0i,healthchecks_checks=0i,healthchecks_fails=0i,healthchecks_unhealthy=0i,received=0i,sent=0i,state="down",unavail=0i,weight=1i 1539163507000000000 -> nginx_plus_api_stream_upstream_peers,host=localhost,id=4,port=80,source=localhost,upstream=unused_tcp_backends,upstream_address=[2606:7100:1:69::3f]:80 active=0i,backup=false,connections=0i,downtime=0i,fails=0i,healthchecks_checks=0i,healthchecks_fails=0i,healthchecks_unhealthy=0i,received=0i,sent=0i,state="down",unavail=0i,weight=1i 1539163507000000000 +> nginx_plus_api_processes,port=80,source=demo.nginx.com respawned=0i 1570696321000000000 +> nginx_plus_api_connections,port=80,source=demo.nginx.com accepted=68998606i,active=7i,dropped=0i,idle=57i 1570696322000000000 +> nginx_plus_api_ssl,port=80,source=demo.nginx.com handshakes=9398978i,handshakes_failed=289353i,session_reuses=1004389i 1570696322000000000 +> nginx_plus_api_http_requests,port=80,source=demo.nginx.com current=51i,total=264649353i 1570696322000000000 +> nginx_plus_api_http_server_zones,port=80,source=demo.nginx.com,zone=hg.nginx.org discarded=5i,processing=0i,received=24123604i,requests=60138i,responses_1xx=0i,responses_2xx=59353i,responses_3xx=531i,responses_4xx=249i,responses_5xx=0i,responses_total=60133i,sent=830165221i 1570696322000000000 +> nginx_plus_api_http_server_zones,port=80,source=demo.nginx.com,zone=trac.nginx.org discarded=250i,processing=0i,received=2184618i,requests=12404i,responses_1xx=0i,responses_2xx=8579i,responses_3xx=2513i,responses_4xx=583i,responses_5xx=479i,responses_total=12154i,sent=139384159i 1570696322000000000 +> nginx_plus_api_http_server_zones,port=80,source=demo.nginx.com,zone=lxr.nginx.org discarded=1i,processing=0i,received=1011701i,requests=4523i,responses_1xx=0i,responses_2xx=4332i,responses_3xx=28i,responses_4xx=39i,responses_5xx=123i,responses_total=4522i,sent=72631354i 1570696322000000000 +> nginx_plus_api_http_upstreams,port=80,source=demo.nginx.com,upstream=trac-backend keepalive=0i,zombies=0i 1570696322000000000 +> nginx_plus_api_http_upstream_peers,id=0,port=80,source=demo.nginx.com,upstream=trac-backend,upstream_address=10.0.0.1:8080 active=0i,backup=false,downtime=0i,fails=0i,header_time=235i,healthchecks_checks=0i,healthchecks_fails=0i,healthchecks_unhealthy=0i,received=88581178i,requests=3180i,response_time=235i,responses_1xx=0i,responses_2xx=3168i,responses_3xx=5i,responses_4xx=6i,responses_5xx=0i,responses_total=3179i,sent=1321720i,state="up",unavail=0i,weight=1i 1570696322000000000 +> nginx_plus_api_http_upstream_peers,id=1,port=80,source=demo.nginx.com,upstream=trac-backend,upstream_address=10.0.0.1:8081 active=0i,backup=true,downtime=0i,fails=0i,healthchecks_checks=0i,healthchecks_fails=0i,healthchecks_unhealthy=0i,received=0i,requests=0i,responses_1xx=0i,responses_2xx=0i,responses_3xx=0i,responses_4xx=0i,responses_5xx=0i,responses_total=0i,sent=0i,state="up",unavail=0i,weight=1i 1570696322000000000 +> nginx_plus_api_http_upstreams,port=80,source=demo.nginx.com,upstream=hg-backend keepalive=0i,zombies=0i 1570696322000000000 +> nginx_plus_api_http_upstream_peers,id=0,port=80,source=demo.nginx.com,upstream=hg-backend,upstream_address=10.0.0.1:8088 active=0i,backup=false,downtime=0i,fails=0i,header_time=22i,healthchecks_checks=0i,healthchecks_fails=0i,healthchecks_unhealthy=0i,received=909402572i,requests=18514i,response_time=88i,responses_1xx=0i,responses_2xx=17799i,responses_3xx=531i,responses_4xx=179i,responses_5xx=0i,responses_total=18509i,sent=10608107i,state="up",unavail=0i,weight=5i 1570696322000000000 +> nginx_plus_api_http_upstream_peers,id=1,port=80,source=demo.nginx.com,upstream=hg-backend,upstream_address=10.0.0.1:8089 active=0i,backup=true,downtime=0i,fails=0i,healthchecks_checks=0i,healthchecks_fails=0i,healthchecks_unhealthy=0i,received=0i,requests=0i,responses_1xx=0i,responses_2xx=0i,responses_3xx=0i,responses_4xx=0i,responses_5xx=0i,responses_total=0i,sent=0i,state="up",unavail=0i,weight=1i 1570696322000000000 +> nginx_plus_api_http_upstreams,port=80,source=demo.nginx.com,upstream=lxr-backend keepalive=0i,zombies=0i 1570696322000000000 +> nginx_plus_api_http_upstream_peers,id=0,port=80,source=demo.nginx.com,upstream=lxr-backend,upstream_address=unix:/tmp/cgi.sock active=0i,backup=false,downtime=0i,fails=123i,header_time=91i,healthchecks_checks=0i,healthchecks_fails=0i,healthchecks_unhealthy=0i,received=71782888i,requests=4354i,response_time=91i,responses_1xx=0i,responses_2xx=4230i,responses_3xx=0i,responses_4xx=0i,responses_5xx=0i,responses_total=4230i,sent=3088656i,state="up",unavail=0i,weight=1i 1570696322000000000 +> nginx_plus_api_http_upstream_peers,id=1,port=80,source=demo.nginx.com,upstream=lxr-backend,upstream_address=unix:/tmp/cgib.sock active=0i,backup=true,downtime=0i,fails=0i,healthchecks_checks=0i,healthchecks_fails=0i,healthchecks_unhealthy=0i,max_conns=42i,received=0i,requests=0i,responses_1xx=0i,responses_2xx=0i,responses_3xx=0i,responses_4xx=0i,responses_5xx=0i,responses_total=0i,sent=0i,state="up",unavail=0i,weight=1i 1570696322000000000 +> nginx_plus_api_http_upstreams,port=80,source=demo.nginx.com,upstream=demo-backend keepalive=0i,zombies=0i 1570696322000000000 +> nginx_plus_api_http_upstream_peers,id=0,port=80,source=demo.nginx.com,upstream=demo-backend,upstream_address=10.0.0.2:15431 active=0i,backup=false,downtime=0i,fails=0i,healthchecks_checks=0i,healthchecks_fails=0i,healthchecks_unhealthy=0i,received=0i,requests=0i,responses_1xx=0i,responses_2xx=0i,responses_3xx=0i,responses_4xx=0i,responses_5xx=0i,responses_total=0i,sent=0i,state="up",unavail=0i,weight=1i 1570696322000000000 +> nginx_plus_api_http_caches,cache=http_cache,port=80,source=demo.nginx.com bypass_bytes=0i,bypass_bytes_written=0i,bypass_responses=0i,bypass_responses_written=0i,cold=false,expired_bytes=381518640i,expired_bytes_written=363449785i,expired_responses=42114i,expired_responses_written=39954i,hit_bytes=6321885979i,hit_responses=596730i,max_size=536870912i,miss_bytes=48512185i,miss_bytes_written=155600i,miss_responses=6052i,miss_responses_written=136i,revalidated_bytes=0i,revalidated_responses=0i,size=765952i,stale_bytes=0i,stale_responses=0i,updating_bytes=0i,updating_responses=0i 1570696323000000000 +> nginx_plus_api_stream_server_zones,port=80,source=demo.nginx.com,zone=postgresql_loadbalancer connections=0i,processing=0i,received=0i,sent=0i 1570696323000000000 +> nginx_plus_api_stream_server_zones,port=80,source=demo.nginx.com,zone=dns_loadbalancer connections=0i,processing=0i,received=0i,sent=0i 1570696323000000000 +> nginx_plus_api_stream_upstreams,port=80,source=demo.nginx.com,upstream=postgresql_backends zombies=0i 1570696323000000000 +> nginx_plus_api_stream_upstream_peers,id=0,port=80,source=demo.nginx.com,upstream=postgresql_backends,upstream_address=10.0.0.2:15432 active=0i,backup=false,connections=0i,downtime=0i,fails=0i,healthchecks_checks=0i,healthchecks_fails=0i,healthchecks_unhealthy=0i,received=0i,sent=0i,state="up",unavail=0i,weight=1i 1570696323000000000 +> nginx_plus_api_stream_upstream_peers,id=1,port=80,source=demo.nginx.com,upstream=postgresql_backends,upstream_address=10.0.0.2:15433 active=0i,backup=false,connections=0i,downtime=0i,fails=0i,healthchecks_checks=0i,healthchecks_fails=0i,healthchecks_unhealthy=0i,received=0i,sent=0i,state="up",unavail=0i,weight=1i 1570696323000000000 +> nginx_plus_api_stream_upstream_peers,id=2,port=80,source=demo.nginx.com,upstream=postgresql_backends,upstream_address=10.0.0.2:15434 active=0i,backup=false,connections=0i,downtime=0i,fails=0i,healthchecks_checks=0i,healthchecks_fails=0i,healthchecks_unhealthy=0i,received=0i,sent=0i,state="up",unavail=0i,weight=1i 1570696323000000000 +> nginx_plus_api_stream_upstream_peers,id=3,port=80,source=demo.nginx.com,upstream=postgresql_backends,upstream_address=10.0.0.2:15435 active=0i,backup=false,connections=0i,downtime=0i,fails=0i,healthchecks_checks=0i,healthchecks_fails=0i,healthchecks_unhealthy=0i,received=0i,sent=0i,state="down",unavail=0i,weight=1i 1570696323000000000 +> nginx_plus_api_stream_upstreams,port=80,source=demo.nginx.com,upstream=dns_udp_backends zombies=0i 1570696323000000000 +> nginx_plus_api_stream_upstream_peers,id=0,port=80,source=demo.nginx.com,upstream=dns_udp_backends,upstream_address=10.0.0.5:53 active=0i,backup=false,connections=0i,downtime=0i,fails=0i,healthchecks_checks=0i,healthchecks_fails=0i,healthchecks_unhealthy=0i,received=0i,sent=0i,state="up",unavail=0i,weight=2i 1570696323000000000 +> nginx_plus_api_stream_upstream_peers,id=1,port=80,source=demo.nginx.com,upstream=dns_udp_backends,upstream_address=10.0.0.2:53 active=0i,backup=false,connections=0i,downtime=0i,fails=0i,healthchecks_checks=0i,healthchecks_fails=0i,healthchecks_unhealthy=0i,received=0i,sent=0i,state="up",unavail=0i,weight=1i 1570696323000000000 +> nginx_plus_api_stream_upstream_peers,id=2,port=80,source=demo.nginx.com,upstream=dns_udp_backends,upstream_address=10.0.0.7:53 active=0i,backup=false,connections=0i,downtime=0i,fails=0i,healthchecks_checks=0i,healthchecks_fails=0i,healthchecks_unhealthy=0i,received=0i,sent=0i,state="down",unavail=0i,weight=1i 1570696323000000000 +> nginx_plus_api_stream_upstreams,port=80,source=demo.nginx.com,upstream=unused_tcp_backends zombies=0i 1570696323000000000 +> nginx_plus_api_http_location_zones,port=80,source=demo.nginx.com,zone=swagger discarded=0i,received=1622i,requests=8i,responses_1xx=0i,responses_2xx=7i,responses_3xx=0i,responses_4xx=1i,responses_5xx=0i,responses_total=8i,sent=638333i 1570696323000000000 +> nginx_plus_api_http_location_zones,port=80,source=demo.nginx.com,zone=api-calls discarded=64i,received=337530181i,requests=1726513i,responses_1xx=0i,responses_2xx=1726428i,responses_3xx=0i,responses_4xx=21i,responses_5xx=0i,responses_total=1726449i,sent=1902577668i 1570696323000000000 +> nginx_plus_api_resolver_zones,port=80,source=demo.nginx.com,zone=resolver1 addr=0i,formerr=0i,name=0i,noerror=0i,notimp=0i,nxdomain=0i,refused=0i,servfail=0i,srv=0i,timedout=0i,unknown=0i 1570696324000000000 ``` ### Reference material diff --git a/plugins/inputs/nginx_plus_api/nginx_plus_api.go b/plugins/inputs/nginx_plus_api/nginx_plus_api.go index 3487dd512..addb813e3 100644 --- a/plugins/inputs/nginx_plus_api/nginx_plus_api.go +++ b/plugins/inputs/nginx_plus_api/nginx_plus_api.go @@ -31,10 +31,13 @@ const ( connectionsPath = "connections" sslPath = "ssl" - httpRequestsPath = "http/requests" - httpServerZonesPath = "http/server_zones" - httpUpstreamsPath = "http/upstreams" - httpCachesPath = "http/caches" + httpRequestsPath = "http/requests" + httpServerZonesPath = "http/server_zones" + httpLocationZonesPath = "http/location_zones" + httpUpstreamsPath = "http/upstreams" + httpCachesPath = "http/caches" + + resolverZonesPath = "resolvers" streamServerZonesPath = "stream/server_zones" streamUpstreamsPath = "stream/upstreams" diff --git a/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics.go b/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics.go index 1936591c9..6aaaff2d3 100644 --- a/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics.go +++ b/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics.go @@ -29,6 +29,11 @@ func (n *NginxPlusApi) gatherMetrics(addr *url.URL, acc telegraf.Accumulator) { addError(acc, n.gatherHttpCachesMetrics(addr, acc)) addError(acc, n.gatherStreamServerZonesMetrics(addr, acc)) addError(acc, n.gatherStreamUpstreamsMetrics(addr, acc)) + + if n.ApiVersion >= 5 { + addError(acc, n.gatherHttpLocationZonesMetrics(addr, acc)) + addError(acc, n.gatherResolverZonesMetrics(addr, acc)) + } } func addError(acc telegraf.Accumulator, err error) { @@ -221,6 +226,53 @@ func (n *NginxPlusApi) gatherHttpServerZonesMetrics(addr *url.URL, acc telegraf. return nil } +// Added in 5 API version +func (n *NginxPlusApi) gatherHttpLocationZonesMetrics(addr *url.URL, acc telegraf.Accumulator) error { + body, err := n.gatherUrl(addr, httpLocationZonesPath) + if err != nil { + return err + } + + var httpLocationZones HttpLocationZones + + if err := json.Unmarshal(body, &httpLocationZones); err != nil { + return err + } + + tags := getTags(addr) + + for zoneName, zone := range httpLocationZones { + zoneTags := map[string]string{} + for k, v := range tags { + zoneTags[k] = v + } + zoneTags["zone"] = zoneName + acc.AddFields( + "nginx_plus_api_http_location_zones", + func() map[string]interface{} { + result := map[string]interface{}{ + "requests": zone.Requests, + "responses_1xx": zone.Responses.Responses1xx, + "responses_2xx": zone.Responses.Responses2xx, + "responses_3xx": zone.Responses.Responses3xx, + "responses_4xx": zone.Responses.Responses4xx, + "responses_5xx": zone.Responses.Responses5xx, + "responses_total": zone.Responses.Total, + "received": zone.Received, + "sent": zone.Sent, + } + if zone.Discarded != nil { + result["discarded"] = *zone.Discarded + } + return result + }(), + zoneTags, + ) + } + + return nil +} + func (n *NginxPlusApi) gatherHttpUpstreamsMetrics(addr *url.URL, acc telegraf.Accumulator) error { body, err := n.gatherUrl(addr, httpUpstreamsPath) if err != nil { @@ -394,6 +446,50 @@ func (n *NginxPlusApi) gatherStreamServerZonesMetrics(addr *url.URL, acc telegra return nil } +// Added in 5 API version +func (n *NginxPlusApi) gatherResolverZonesMetrics(addr *url.URL, acc telegraf.Accumulator) error { + body, err := n.gatherUrl(addr, resolverZonesPath) + if err != nil { + return err + } + + var resolverZones ResolverZones + + if err := json.Unmarshal(body, &resolverZones); err != nil { + return err + } + + tags := getTags(addr) + + for zoneName, resolver := range resolverZones { + zoneTags := map[string]string{} + for k, v := range tags { + zoneTags[k] = v + } + zoneTags["zone"] = zoneName + acc.AddFields( + "nginx_plus_api_resolver_zones", + map[string]interface{}{ + "name": resolver.Requests.Name, + "srv": resolver.Requests.Srv, + "addr": resolver.Requests.Addr, + + "noerror": resolver.Responses.Noerror, + "formerr": resolver.Responses.Formerr, + "servfail": resolver.Responses.Servfail, + "nxdomain": resolver.Responses.Nxdomain, + "notimp": resolver.Responses.Notimp, + "refused": resolver.Responses.Refused, + "timedout": resolver.Responses.Timedout, + "unknown": resolver.Responses.Unknown, + }, + zoneTags, + ) + } + + return nil +} + func (n *NginxPlusApi) gatherStreamUpstreamsMetrics(addr *url.URL, acc telegraf.Accumulator) error { body, err := n.gatherUrl(addr, streamUpstreamsPath) if err != nil { diff --git a/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics_test.go b/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics_test.go index da1806aac..584816fe7 100644 --- a/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics_test.go +++ b/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics_test.go @@ -35,6 +35,45 @@ const sslPayload = ` } ` +const resolverZonesPayload = ` +{ + "resolver_zone1": { + "requests": { + "name": 25460, + "srv": 130, + "addr": 2580 + }, + "responses": { + "noerror": 26499, + "formerr": 0, + "servfail": 3, + "nxdomain": 0, + "notimp": 0, + "refused": 0, + "timedout": 243, + "unknown": 478 + } + }, + "resolver_zone2": { + "requests": { + "name": 325460, + "srv": 1130, + "addr": 12580 + }, + "responses": { + "noerror": 226499, + "formerr": 0, + "servfail": 283, + "nxdomain": 0, + "notimp": 0, + "refused": 0, + "timedout": 743, + "unknown": 1478 + } + } +} +` + const httpRequestsPayload = ` { "total": 10624511, @@ -77,6 +116,39 @@ const httpServerZonesPayload = ` } ` +const httpLocationZonesPayload = ` +{ + "site1": { + "requests": 736395, + "responses": { + "1xx": 0, + "2xx": 727290, + "3xx": 4614, + "4xx": 934, + "5xx": 1535, + "total": 734373 + }, + "discarded": 2020, + "received": 180157219, + "sent": 20183175459 + }, + "site2": { + "requests": 185307, + "responses": { + "1xx": 0, + "2xx": 112674, + "3xx": 45383, + "4xx": 2504, + "5xx": 4419, + "total": 164980 + }, + "discarded": 20326, + "received": 51575327, + "sent": 2983241510 + } +} +` + const httpUpstreamsPayload = ` { "trac-backend": { @@ -591,6 +663,58 @@ func TestGatherHttpServerZonesMetrics(t *testing.T) { }) } +func TestGatherHttpLocationZonesMetrics(t *testing.T) { + ts, n := prepareEndpoint(t, httpLocationZonesPath, defaultApiVersion, httpLocationZonesPayload) + defer ts.Close() + + var acc testutil.Accumulator + addr, host, port := prepareAddr(t, ts) + + require.NoError(t, n.gatherHttpLocationZonesMetrics(addr, &acc)) + + acc.AssertContainsTaggedFields( + t, + "nginx_plus_api_http_location_zones", + map[string]interface{}{ + "discarded": int64(2020), + "received": int64(180157219), + "requests": int64(736395), + "responses_1xx": int64(0), + "responses_2xx": int64(727290), + "responses_3xx": int64(4614), + "responses_4xx": int64(934), + "responses_5xx": int64(1535), + "responses_total": int64(734373), + "sent": int64(20183175459), + }, + map[string]string{ + "source": host, + "port": port, + "zone": "site1", + }) + + acc.AssertContainsTaggedFields( + t, + "nginx_plus_api_http_location_zones", + map[string]interface{}{ + "discarded": int64(20326), + "received": int64(51575327), + "requests": int64(185307), + "responses_1xx": int64(0), + "responses_2xx": int64(112674), + "responses_3xx": int64(45383), + "responses_4xx": int64(2504), + "responses_5xx": int64(4419), + "responses_total": int64(164980), + "sent": int64(2983241510), + }, + map[string]string{ + "source": host, + "port": port, + "zone": "site2", + }) +} + func TestHatherHttpUpstreamsMetrics(t *testing.T) { ts, n := prepareEndpoint(t, httpUpstreamsPath, defaultApiVersion, httpUpstreamsPayload) defer ts.Close() @@ -841,6 +965,60 @@ func TestGatherHttpCachesMetrics(t *testing.T) { }) } +func TestGatherResolverZonesMetrics(t *testing.T) { + ts, n := prepareEndpoint(t, resolverZonesPath, defaultApiVersion, resolverZonesPayload) + defer ts.Close() + + var acc testutil.Accumulator + addr, host, port := prepareAddr(t, ts) + + require.NoError(t, n.gatherResolverZonesMetrics(addr, &acc)) + + acc.AssertContainsTaggedFields( + t, + "nginx_plus_api_resolver_zones", + map[string]interface{}{ + "name": int64(25460), + "srv": int64(130), + "addr": int64(2580), + "noerror": int64(26499), + "formerr": int64(0), + "servfail": int64(3), + "nxdomain": int64(0), + "notimp": int64(0), + "refused": int64(0), + "timedout": int64(243), + "unknown": int64(478), + }, + map[string]string{ + "source": host, + "port": port, + "zone": "resolver_zone1", + }) + + acc.AssertContainsTaggedFields( + t, + "nginx_plus_api_resolver_zones", + map[string]interface{}{ + "name": int64(325460), + "srv": int64(1130), + "addr": int64(12580), + "noerror": int64(226499), + "formerr": int64(0), + "servfail": int64(283), + "nxdomain": int64(0), + "notimp": int64(0), + "refused": int64(0), + "timedout": int64(743), + "unknown": int64(1478), + }, + map[string]string{ + "source": host, + "port": port, + "zone": "resolver_zone2", + }) +} + func TestGatherStreamUpstreams(t *testing.T) { ts, n := prepareEndpoint(t, streamUpstreamsPath, defaultApiVersion, streamUpstreamsPayload) defer ts.Close() @@ -1023,6 +1201,7 @@ func TestGatherStreamServerZonesMetrics(t *testing.T) { "zone": "dns", }) } + func TestUnavailableEndpoints(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusNotFound) diff --git a/plugins/inputs/nginx_plus_api/nginx_plus_api_types.go b/plugins/inputs/nginx_plus_api/nginx_plus_api_types.go index b8240f844..868bc04e4 100644 --- a/plugins/inputs/nginx_plus_api/nginx_plus_api_types.go +++ b/plugins/inputs/nginx_plus_api/nginx_plus_api_types.go @@ -17,6 +17,24 @@ type Ssl struct { // added in version 6 SessionReuses int64 `json:"session_reuses"` } +type ResolverZones map[string]struct { + Requests struct { + Name int64 `json:"name"` + Srv int64 `json:"srv"` + Addr int64 `json:"addr"` + } `json:"requests"` + Responses struct { + Noerror int64 `json:"noerror"` + Formerr int64 `json:"formerr"` + Servfail int64 `json:"servfail"` + Nxdomain int64 `json:"nxdomain"` + Notimp int64 `json:"notimp"` + Refused int64 `json:"refused"` + Timedout int64 `json:"timedout"` + Unknown int64 `json:"unknown"` + } `json:"responses"` +} + type HttpRequests struct { Total int64 `json:"total"` Current int64 `json:"current"` @@ -40,6 +58,14 @@ type HttpServerZones map[string]struct { Sent int64 `json:"sent"` } +type HttpLocationZones map[string]struct { + Requests int64 `json:"requests"` + Responses ResponseStats `json:"responses"` + Discarded *int64 `json:"discarded"` // added in version 6 + Received int64 `json:"received"` + Sent int64 `json:"sent"` +} + type HealthCheckStats struct { Checks int64 `json:"checks"` Fails int64 `json:"fails"` From 2f943e97b77b9e7599103169ba540a94204ea381 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 17 Dec 2019 16:58:57 -0800 Subject: [PATCH 1417/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4c001fc4d..f88318661 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,7 @@ - [#6798](https://github.com/influxdata/telegraf/pull/6798): Add use_sudo option to ipmi_sensor input. - [#6764](https://github.com/influxdata/telegraf/pull/6764): Add ability to collect pod labels to kubernetes input. - [#6770](https://github.com/influxdata/telegraf/pull/6770): Expose unbound-control config file option. +- [#6508](https://github.com/influxdata/telegraf/pull/6508): Add support for new nginx plus api endpoints. ## v1.13.1 [unreleased] From 40311dcd7a04b017709969895e56bb5cd02acab9 Mon Sep 17 00:00:00 2001 From: Brad Vernon Date: Thu, 26 Dec 2019 10:15:25 -0800 Subject: [PATCH 1418/1815] Fix missing config fields in prometheus serializer (#6823) --- internal/config/config.go | 39 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) diff --git a/internal/config/config.go b/internal/config/config.go index 3ef4cee58..586acce71 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -1988,6 +1988,42 @@ func buildSerializer(name string, tbl *ast.Table) (serializers.Serializer, error } } + if node, ok := tbl.Fields["prometheus_export_timestamp"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if b, ok := kv.Value.(*ast.Boolean); ok { + var err error + c.PrometheusExportTimestamp, err = b.Boolean() + if err != nil { + return nil, err + } + } + } + } + + if node, ok := tbl.Fields["prometheus_sort_metrics"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if b, ok := kv.Value.(*ast.Boolean); ok { + var err error + c.PrometheusSortMetrics, err = b.Boolean() + if err != nil { + return nil, err + } + } + } + } + + if node, ok := tbl.Fields["prometheus_string_as_label"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if b, ok := kv.Value.(*ast.Boolean); ok { + var err error + c.PrometheusStringAsLabel, err = b.Boolean() + if err != nil { + return nil, err + } + } + } + } + delete(tbl.Fields, "influx_max_line_bytes") delete(tbl.Fields, "influx_sort_fields") delete(tbl.Fields, "influx_uint_support") @@ -2000,6 +2036,9 @@ func buildSerializer(name string, tbl *ast.Table) (serializers.Serializer, error delete(tbl.Fields, "splunkmetric_multimetric") delete(tbl.Fields, "wavefront_source_override") delete(tbl.Fields, "wavefront_use_strict") + delete(tbl.Fields, "prometheus_export_timestamp") + delete(tbl.Fields, "prometheus_sort_metrics") + delete(tbl.Fields, "prometheus_string_as_label") return serializers.NewSerializer(c) } From ef7fd9d0305f7102ef928cb32895d820f56335da Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 26 Dec 2019 10:17:52 -0800 Subject: [PATCH 1419/1815] Update changelog --- CHANGELOG.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f88318661..8f8b55b9a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,7 +13,8 @@ #### Bugfixes - [#6788](https://github.com/influxdata/telegraf/issues/6788): Fix ServerProperty query stops working on Azure after failover. -- [#6803](https://github.com/influxdata/telegraf/pull/6803): Add leading period to OID in SNMP v1 generic traps +- [#6803](https://github.com/influxdata/telegraf/pull/6803): Add leading period to OID in SNMP v1 generic traps. +- [#6823](https://github.com/influxdata/telegraf/pull/6823): Fix missing config fields in prometheus serializer. ## v1.13 [2019-12-12] From 8b73625492d676107bf5b81cc232ad8f31c2eb1b Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 27 Dec 2019 15:48:45 -0800 Subject: [PATCH 1420/1815] Fix panic on connection loss with undelivered messages (#6806) --- plugins/inputs/mqtt_consumer/mqtt_consumer.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/inputs/mqtt_consumer/mqtt_consumer.go b/plugins/inputs/mqtt_consumer/mqtt_consumer.go index 2a1631750..5f54f4bb4 100644 --- a/plugins/inputs/mqtt_consumer/mqtt_consumer.go +++ b/plugins/inputs/mqtt_consumer/mqtt_consumer.go @@ -187,6 +187,7 @@ func (m *MQTTConsumer) Start(acc telegraf.Accumulator) error { m.state = Disconnected m.acc = acc.WithTracking(m.MaxUndeliveredMessages) + m.sem = make(semaphore, m.MaxUndeliveredMessages) m.ctx, m.cancel = context.WithCancel(context.Background()) m.client = m.clientFactory(m.opts) @@ -215,7 +216,6 @@ func (m *MQTTConsumer) connect() error { m.Log.Infof("Connected %v", m.Servers) m.state = Connected - m.sem = make(semaphore, m.MaxUndeliveredMessages) m.messages = make(map[telegraf.TrackingID]bool) // Presistent sessions should skip subscription if a session is present, as @@ -254,12 +254,12 @@ func (m *MQTTConsumer) recvMessage(c mqtt.Client, msg mqtt.Message) { for { select { case track := <-m.acc.Delivered(): + <-m.sem _, ok := m.messages[track.ID()] if !ok { // Added by a previous connection continue } - <-m.sem // No ack, MQTT does not support durable handling delete(m.messages, track.ID()) case m.sem <- empty{}: From f035d44fe102adbdcce69a794384ee63db3dd3db Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 27 Dec 2019 15:51:09 -0800 Subject: [PATCH 1421/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8f8b55b9a..3e6cc2feb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -15,6 +15,7 @@ - [#6788](https://github.com/influxdata/telegraf/issues/6788): Fix ServerProperty query stops working on Azure after failover. - [#6803](https://github.com/influxdata/telegraf/pull/6803): Add leading period to OID in SNMP v1 generic traps. - [#6823](https://github.com/influxdata/telegraf/pull/6823): Fix missing config fields in prometheus serializer. +- [#6694](https://github.com/influxdata/telegraf/pull/6694): Fix panic on connection loss with undelivered messages in mqtt_consumer. ## v1.13 [2019-12-12] From c325c94a9666c00efa6caec7b3f50ef096d58afa Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 30 Dec 2019 11:33:32 -0800 Subject: [PATCH 1422/1815] Rewrite documentation for snmp input (#6802) --- plugins/inputs/snmp/CONFIG-EXAMPLES.md | 65 ----- plugins/inputs/snmp/DEBUGGING.md | 53 ---- plugins/inputs/snmp/README.md | 319 ++++++++++++++----------- plugins/inputs/snmp/snmp.go | 120 ++++------ plugins/inputs/snmp/snmp_test.go | 40 +--- plugins/inputs/snmp_trap/README.md | 62 +++-- 6 files changed, 278 insertions(+), 381 deletions(-) delete mode 100644 plugins/inputs/snmp/CONFIG-EXAMPLES.md delete mode 100644 plugins/inputs/snmp/DEBUGGING.md diff --git a/plugins/inputs/snmp/CONFIG-EXAMPLES.md b/plugins/inputs/snmp/CONFIG-EXAMPLES.md deleted file mode 100644 index a0a52eeb3..000000000 --- a/plugins/inputs/snmp/CONFIG-EXAMPLES.md +++ /dev/null @@ -1,65 +0,0 @@ -Here are a few configuration examples for different use cases. - -### Switch/router interface metrics - -This setup will collect data on all interfaces from three different tables, `IF-MIB::ifTable`, `IF-MIB::ifXTable` and `EtherLike-MIB::dot3StatsTable`. It will also add the name from `IF-MIB::ifDescr` and use that as a tag. Depending on your needs and preferences you can easily use `IF-MIB::ifName` or `IF-MIB::ifAlias` instead or in addition. The values of these are typically: - - IF-MIB::ifName = Gi0/0/0 - IF-MIB::ifDescr = GigabitEthernet0/0/0 - IF-MIB::ifAlias = ### LAN ### - -This configuration also collects the hostname from the device (`RFC1213-MIB::sysName.0`) and adds as a tag. So each metric will both have the configured host/IP as `agent_host` as well as the device self-reported hostname as `hostname` and the name of the host that has collected these metrics as `host`. - -Here is the configuration that you add to your `telegraf.conf`: - -``` -[[inputs.snmp]] - agents = [ "host.example.com" ] - version = 2 - community = "public" - - [[inputs.snmp.field]] - name = "hostname" - oid = "RFC1213-MIB::sysName.0" - is_tag = true - - [[inputs.snmp.field]] - name = "uptime" - oid = "DISMAN-EXPRESSION-MIB::sysUpTimeInstance" - - # IF-MIB::ifTable contains counters on input and output traffic as well as errors and discards. - [[inputs.snmp.table]] - name = "interface" - inherit_tags = [ "hostname" ] - oid = "IF-MIB::ifTable" - - # Interface tag - used to identify interface in metrics database - [[inputs.snmp.table.field]] - name = "ifDescr" - oid = "IF-MIB::ifDescr" - is_tag = true - - # IF-MIB::ifXTable contains newer High Capacity (HC) counters that do not overflow as fast for a few of the ifTable counters - [[inputs.snmp.table]] - name = "interface" - inherit_tags = [ "hostname" ] - oid = "IF-MIB::ifXTable" - - # Interface tag - used to identify interface in metrics database - [[inputs.snmp.table.field]] - name = "ifDescr" - oid = "IF-MIB::ifDescr" - is_tag = true - - # EtherLike-MIB::dot3StatsTable contains detailed ethernet-level information about what kind of errors have been logged on an interface (such as FCS error, frame too long, etc) - [[inputs.snmp.table]] - name = "interface" - inherit_tags = [ "hostname" ] - oid = "EtherLike-MIB::dot3StatsTable" - - # Interface tag - used to identify interface in metrics database - [[inputs.snmp.table.field]] - name = "ifDescr" - oid = "IF-MIB::ifDescr" - is_tag = true -``` diff --git a/plugins/inputs/snmp/DEBUGGING.md b/plugins/inputs/snmp/DEBUGGING.md deleted file mode 100644 index f357c58b5..000000000 --- a/plugins/inputs/snmp/DEBUGGING.md +++ /dev/null @@ -1,53 +0,0 @@ -# Debugging & Testing SNMP Issues - -### Install net-snmp on your system: - -Mac: - -``` -brew install net-snmp -``` - -### Run an SNMP simulator docker image to get a full MIB on port 161: - -``` -docker run -d -p 161:161/udp xeemetric/snmp-simulator -``` - -### snmpget: - -snmpget corresponds to the inputs.snmp.field configuration. - -```bash -$ # get an snmp field with fully-qualified MIB name. -$ snmpget -v2c -c public localhost:161 system.sysUpTime.0 -DISMAN-EVENT-MIB::sysUpTimeInstance = Timeticks: (1643) 0:00:16.43 - -$ # get an snmp field, outputting the numeric OID. -$ snmpget -On -v2c -c public localhost:161 system.sysUpTime.0 -.1.3.6.1.2.1.1.3.0 = Timeticks: (1638) 0:00:16.38 -``` - -### snmptranslate: - -snmptranslate can be used to translate an OID to a MIB name: - -```bash -$ snmptranslate .1.3.6.1.2.1.1.3.0 -DISMAN-EVENT-MIB::sysUpTimeInstance -``` - -And to convert a partial MIB name to a fully qualified one: - -```bash -$ snmptranslate -IR sysUpTime.0 -DISMAN-EVENT-MIB::sysUpTimeInstance -``` - -And to convert a MIB name to an OID: - -```bash -$ snmptranslate -On -IR system.sysUpTime.0 -.1.3.6.1.2.1.1.3.0 -``` - diff --git a/plugins/inputs/snmp/README.md b/plugins/inputs/snmp/README.md index a15e5ddb6..68760968a 100644 --- a/plugins/inputs/snmp/README.md +++ b/plugins/inputs/snmp/README.md @@ -1,180 +1,221 @@ -# SNMP Plugin +# SNMP Input Plugin -The SNMP input plugin gathers metrics from SNMP agents. +The `snmp` input plugin uses polling to gather metrics from SNMP agents. +Support for gathering individual OIDs as well as complete SNMP tables is +included. -## Configuration: +### Prerequisites -See additional SNMP plugin configuration examples [here](./CONFIG-EXAMPLES.md). +This plugin uses the `snmptable` and `snmptranslate` programs from the +[net-snmp][] project. These tools will need to be installed into the `PATH` in +order to be located. Other utilities from the net-snmp project may be useful +for troubleshooting, but are not directly used by the plugin. -### Example: +These programs will load available MIBs on the system. Typically the default +directory for MIBs is `/usr/share/snmp/mibs`, but if your MIBs are in a +different location you may need to make the paths known to net-snmp. The +location of these files can be configured in the `snmp.conf` or via the +`MIBDIRS` environment variable. See [`man 1 snmpcmd`][man snmpcmd] for more +information. -SNMP data: -``` -.1.0.0.0.1.1.0 octet_str "foo" -.1.0.0.0.1.1.1 octet_str "bar" -.1.0.0.0.1.102 octet_str "bad" -.1.0.0.0.1.2.0 integer 1 -.1.0.0.0.1.2.1 integer 2 -.1.0.0.0.1.3.0 octet_str "0.123" -.1.0.0.0.1.3.1 octet_str "0.456" -.1.0.0.0.1.3.2 octet_str "9.999" -.1.0.0.1.1 octet_str "baz" -.1.0.0.1.2 uinteger 54321 -.1.0.0.1.3 uinteger 234 -``` - -Telegraf config: +### Configuration ```toml [[inputs.snmp]] - agents = [ "127.0.0.1:161" ] - version = 2 - community = "public" + ## Agent addresses to retrieve values from. + ## example: agents = ["udp://127.0.0.1:161"] + ## agents = ["tcp://127.0.0.1:161"] + agents = ["udp://127.0.0.1:161"] - name = "system" - [[inputs.snmp.field]] - name = "hostname" - oid = ".1.0.0.1.1" - is_tag = true + ## Timeout for each request. + # timeout = "5s" + + ## SNMP version; can be 1, 2, or 3. + # version = 2 + + ## SNMP community string. + # community = "public" + + ## Number of retries to attempt. + # retries = 3 + + ## The GETBULK max-repetitions parameter. + # max_repetitions = 10 + + ## SNMPv3 authentication and encryption options. + ## + ## Security Name. + # sec_name = "myuser" + ## Authentication protocol; one of "MD5", "SHA", or "". + # auth_protocol = "MD5" + ## Authentication password. + # auth_password = "pass" + ## Security Level; one of "noAuthNoPriv", "authNoPriv", or "authPriv". + # sec_level = "authNoPriv" + ## Context Name. + # context_name = "" + ## Privacy protocol used for encrypted messages; one of "DES", "AES" or "". + # priv_protocol = "" + ## Privacy password used for encrypted messages. + # priv_password = "" + + ## Add fields and tables defining the variables you wish to collect. This + ## example collects the system uptime and interface variables. Reference the + ## full plugin documentation for configuration details. [[inputs.snmp.field]] + oid = "RFC1213-MIB::sysUpTime.0" name = "uptime" - oid = ".1.0.0.1.2" - [[inputs.snmp.field]] - name = "loadavg" - oid = ".1.0.0.1.3" - conversion = "float(2)" - - [[inputs.snmp.table]] - name = "remote_servers" - inherit_tags = [ "hostname" ] - [[inputs.snmp.table.field]] - name = "server" - oid = ".1.0.0.0.1.1" - is_tag = true - [[inputs.snmp.table.field]] - name = "connections" - oid = ".1.0.0.0.1.2" - [[inputs.snmp.table.field]] - name = "latency" - oid = ".1.0.0.0.1.3" - conversion = "float" -``` - -Resulting output: -``` -* Plugin: snmp, Collection 1 -> system,agent_host=127.0.0.1,host=mylocalhost,hostname=baz loadavg=2.34,uptime=54321i 1468953135000000000 -> remote_servers,agent_host=127.0.0.1,host=mylocalhost,hostname=baz,server=foo connections=1i,latency=0.123 1468953135000000000 -> remote_servers,agent_host=127.0.0.1,host=mylocalhost,hostname=baz,server=bar connections=2i,latency=0.456 1468953135000000000 -``` - -#### Configuration via MIB: - -This example uses the SNMP data above, but is configured via the MIB. -The example MIB file can be found in the `testdata` directory. See the [MIB lookups](#mib-lookups) section for more information. - -Telegraf config: -```toml -[[inputs.snmp]] - agents = [ "127.0.0.1:161" ] - version = 2 - community = "public" [[inputs.snmp.field]] - oid = "TEST::hostname" + oid = "RFC1213-MIB::sysName.0" + name = "source" is_tag = true [[inputs.snmp.table]] - oid = "TEST::testTable" - inherit_tags = [ "hostname" ] + oid = "IF-MIB::ifTable" + name = "interface" + inherit_tags = ["source"] + + [[inputs.snmp.table.field]] + oid = "IF-MIB::ifDescr" + name = "ifDescr" + is_tag = true ``` -Resulting output: -``` -* Plugin: snmp, Collection 1 -> testTable,agent_host=127.0.0.1,host=mylocalhost,hostname=baz,server=foo connections=1i,latency="0.123" 1468953135000000000 -> testTable,agent_host=127.0.0.1,host=mylocalhost,hostname=baz,server=bar connections=2i,latency="0.456" 1468953135000000000 +#### Configure SNMP Requests + +This plugin provides two methods for configuring the SNMP requests: `fields` +and `tables`. Use the `field` option to gather single ad-hoc variables. +To collect SNMP tables, use the `table` option. + +##### Field + +Use a `field` to collect a variable by OID. Requests specified with this +option operate similar to the `snmpget` utility. + +```toml +[[inputs.snmp]] + # ... snip ... + + [[inputs.snmp.field]] + ## Object identifier of the variable as a numeric or textual OID. + oid = "RFC1213-MIB::sysName.0" + + ## Name of the field or tag to create. If not specified, it defaults to + ## the value of 'oid'. If 'oid' is numeric, an attempt to translate the + ## numeric OID into a textual OID will be made. + # name = "" + + ## If true the variable will be added as a tag, otherwise a field will be + ## created. + # is_tag = false + + ## Apply one of the following conversions to the variable value: + ## float(X) Convert the input value into a float and divides by the + ## Xth power of 10. Efficively just moves the decimal left + ## X places. For example a value of `123` with `float(2)` + ## will result in `1.23`. + ## float: Convert the value into a float with no adjustment. Same + ## as `float(0)`. + ## int: Convert the value into an integer. + ## hwaddr: Convert the value to a MAC address. + ## ipaddr: Convert the value to an IP address. + # conversion = "" ``` -### Config parameters +##### Table -* `agents`: Default: `[]` -List of SNMP agents to connect to in the form of `[tcp://]IP[:PORT]`. If `:PORT` is unspecified, it defaults to `161`. When using the optional prefix `tcp://`, SNMP over TCP will be used. Otherwise UDP is used as the transport protocol. +Use a `table` to configure the collection of a SNMP table. SNMP requests +formed with this option operate similarly way to the `snmptable` command. -* `version`: Default: `2` -SNMP protocol version to use. +Control the handling of specific table columns using a nested `field`. These +nested fields are specified similarly to a top-level `field`. -* `community`: Default: `"public"` -SNMP community to use. +All columns of the SNMP table will be collected, it is not required to add a +nested field for each column, only those which you wish to modify. To exclude +columns use [metric filtering][]. -* `max_repetitions`: Default: `50` -Maximum number of iterations for repeating variables. +One [metric][] is created for each row of the SNMP table. -* `sec_name`: -Security name for authenticated SNMPv3 requests. +```toml +[[inputs.snmp]] + # ... snip ... -* `auth_protocol`: Values: `"MD5"`,`"SHA"`,`""`. Default: `""` -Authentication protocol for authenticated SNMPv3 requests. + [[inputs.snmp.table]] + ## Object identifier of the SNMP table as a numeric or textual OID. + oid = "IF-MIB::ifTable" -* `auth_password`: -Authentication password for authenticated SNMPv3 requests. + ## Name of the field or tag to create. If not specified, it defaults to + ## the value of 'oid'. If 'oid' is numeric an attempt to translate the + ## numeric OID into a textual OID will be made. + # name = "" -* `sec_level`: Values: `"noAuthNoPriv"`,`"authNoPriv"`,`"authPriv"`. Default: `"noAuthNoPriv"` -Security level used for SNMPv3 messages. + ## Which tags to inherit from the top-level config and to use in the output + ## of this table's measurement. + ## example: inherit_tags = ["source"] + # inherit_tags = [] -* `context_name`: -Context name used for SNMPv3 requests. + ## Add an 'index' tag with the table row number. Use this if the table has + ## no indexes or if you are excluding them. This option is normally not + ## required as any index columns are automatically added as tags. + # index_as_tag = false -* `priv_protocol`: Values: `"DES"`,`"AES"`,`""`. Default: `""` -Privacy protocol used for encrypted SNMPv3 messages. + [[inputs.snmp.table.field]] + ## OID to get. May be a numeric or textual module-qualified OID. + oid = "IF-MIB::ifDescr" -* `priv_password`: -Privacy password used for encrypted SNMPv3 messages. + ## Name of the field or tag to create. If not specified, it defaults to + ## the value of 'oid'. If 'oid' is numeric an attempt to translate the + ## numeric OID into a textual OID will be made. + # name = "" + ## Output this field as a tag. + # is_tag = false -* `name`: -Output measurement name. + ## The OID sub-identifier to strip off so that the index can be matched + ## against other fields in the table. + # oid_index_suffix = "" -#### Field parameters: -* `oid`: -OID to get. May be a numeric or textual OID. + ## Specifies the length of the index after the supplied table OID (in OID + ## path segments). Truncates the index after this point to remove non-fixed + ## value or length index suffixes. + # oid_index_length = 0 +``` -* `oid_index_suffix`: -The OID sub-identifier to strip off so that the index can be matched against other fields in the table. +### Troubleshooting -* `oid_index_length`: -Specifies the length of the index after the supplied table OID (in OID path segments). Truncates the index after this point to remove non-fixed value or length index suffixes. +Check that a numeric field can be translated to a textual field: +``` +$ snmptranslate .1.3.6.1.2.1.1.3.0 +DISMAN-EVENT-MIB::sysUpTimeInstance +``` -* `name`: -Output field/tag name. -If not specified, it defaults to the value of `oid`. If `oid` is numeric, an attempt to translate the numeric OID into a texual OID will be made. +Request a top-level field: +``` +$ snmpget -v2c -c public 127.0.0.1 sysUpTime.0 +``` -* `is_tag`: -Output this field as a tag. +Request a table: +``` +$ snmptable -v2c -c public 127.0.0.1 ifTable +``` -* `conversion`: Values: `"float(X)"`,`"float"`,`"int"`,`""`. Default: `""` -Converts the value according to the given specification. +To collect a packet capture, run this command in the background while running +Telegraf or one of the above commands. Adjust the interface, host and port as +needed: +``` +$ sudo tcpdump -s 0 -i eth0 -w telegraf-snmp.pcap host 127.0.0.1 and port 161 +``` - - `float(X)`: Converts the input value into a float and divides by the Xth power of 10. Efficively just moves the decimal left X places. For example a value of `123` with `float(2)` will result in `1.23`. - - `float`: Converts the value into a float with no adjustment. Same as `float(0)`. - - `int`: Convertes the value into an integer. - - `hwaddr`: Converts the value to a MAC address. - - `ipaddr`: Converts the value to an IP address. +### Example Output -#### Table parameters: -* `oid`: -Automatically populates the table's fields using data from the MIB. +``` +snmp,agent_host=127.0.0.1,source=loaner uptime=11331974i 1575509815000000000 +interface,agent_host=127.0.0.1,ifDescr=wlan0,ifIndex=3,source=example.org ifAdminStatus=1i,ifInDiscards=0i,ifInErrors=0i,ifInNUcastPkts=0i,ifInOctets=3436617431i,ifInUcastPkts=2717778i,ifInUnknownProtos=0i,ifLastChange=0i,ifMtu=1500i,ifOperStatus=1i,ifOutDiscards=0i,ifOutErrors=0i,ifOutNUcastPkts=0i,ifOutOctets=581368041i,ifOutQLen=0i,ifOutUcastPkts=1354338i,ifPhysAddress="c8:5b:76:c9:e6:8c",ifSpecific=".0.0",ifSpeed=0i,ifType=6i 1575509815000000000 +interface,agent_host=127.0.0.1,ifDescr=eth0,ifIndex=2,source=example.org ifAdminStatus=1i,ifInDiscards=0i,ifInErrors=0i,ifInNUcastPkts=21i,ifInOctets=3852386380i,ifInUcastPkts=3634004i,ifInUnknownProtos=0i,ifLastChange=9088763i,ifMtu=1500i,ifOperStatus=1i,ifOutDiscards=0i,ifOutErrors=0i,ifOutNUcastPkts=0i,ifOutOctets=434865441i,ifOutQLen=0i,ifOutUcastPkts=2110394i,ifPhysAddress="c8:5b:76:c9:e6:8c",ifSpecific=".0.0",ifSpeed=1000000000i,ifType=6i 1575509815000000000 +interface,agent_host=127.0.0.1,ifDescr=lo,ifIndex=1,source=example.org ifAdminStatus=1i,ifInDiscards=0i,ifInErrors=0i,ifInNUcastPkts=0i,ifInOctets=51555569i,ifInUcastPkts=339097i,ifInUnknownProtos=0i,ifLastChange=0i,ifMtu=65536i,ifOperStatus=1i,ifOutDiscards=0i,ifOutErrors=0i,ifOutNUcastPkts=0i,ifOutOctets=51555569i,ifOutQLen=0i,ifOutUcastPkts=339097i,ifSpecific=".0.0",ifSpeed=10000000i,ifType=24i 1575509815000000000 +``` -* `name`: -Output measurement name. -If not specified, it defaults to the value of `oid`. If `oid` is numeric, an attempt to translate the numeric OID into a texual OID will be made. - -* `inherit_tags`: -Which tags to inherit from the top-level config and to use in the output of this table's measurement. - -* `index_as_tag`: -Adds each row's index within the table as a tag. - -### MIB lookups -If the plugin is configured such that it needs to perform lookups from the MIB, it will use the net-snmp utilities `snmptranslate` and `snmptable`. - -When performing the lookups, the plugin will load all available MIBs. If your MIB files are in a custom path, you may add the path using the `MIBDIRS` environment variable. See [`man 1 snmpcmd`](http://net-snmp.sourceforge.net/docs/man/snmpcmd.html#lbAK) for more information on the variable. +[net-snmp]: http://www.net-snmp.org/ +[man snmpcmd]: http://net-snmp.sourceforge.net/docs/man/snmpcmd.html#lbAK +[metric filtering]: /docs/CONFIGURATION.md#metric-filtering +[metric]: /docs/METRICS.md diff --git a/plugins/inputs/snmp/snmp.go b/plugins/inputs/snmp/snmp.go index fe9645772..75c9b7836 100644 --- a/plugins/inputs/snmp/snmp.go +++ b/plugins/inputs/snmp/snmp.go @@ -22,61 +22,46 @@ import ( const description = `Retrieves SNMP values from remote agents` const sampleConfig = ` - agents = [ "127.0.0.1:161" ] - ## Timeout for each SNMP query. - timeout = "5s" - ## Number of retries to attempt within timeout. - retries = 3 - ## SNMP version, values can be 1, 2, or 3 - version = 2 + ## Agent addresses to retrieve values from. + ## example: agents = ["udp://127.0.0.1:161"] + ## agents = ["tcp://127.0.0.1:161"] + agents = ["udp://127.0.0.1:161"] + + ## Timeout for each request. + # timeout = "5s" + + ## SNMP version; can be 1, 2, or 3. + # version = 2 ## SNMP community string. - community = "public" + # community = "public" - ## The GETBULK max-repetitions parameter - max_repetitions = 10 + ## Number of retries to attempt. + # retries = 3 - ## SNMPv3 auth parameters - #sec_name = "myuser" - #auth_protocol = "md5" # Values: "MD5", "SHA", "" - #auth_password = "pass" - #sec_level = "authNoPriv" # Values: "noAuthNoPriv", "authNoPriv", "authPriv" - #context_name = "" - #priv_protocol = "" # Values: "DES", "AES", "" - #priv_password = "" + ## The GETBULK max-repetitions parameter. + # max_repetitions = 10 - ## measurement name - name = "system" - [[inputs.snmp.field]] - name = "hostname" - oid = ".1.0.0.1.1" - [[inputs.snmp.field]] - name = "uptime" - oid = ".1.0.0.1.2" - [[inputs.snmp.field]] - name = "load" - oid = ".1.0.0.1.3" - [[inputs.snmp.field]] - oid = "HOST-RESOURCES-MIB::hrMemorySize" + ## SNMPv3 authentication and encryption options. + ## + ## Security Name. + # sec_name = "myuser" + ## Authentication protocol; one of "MD5", "SHA", or "". + # auth_protocol = "MD5" + ## Authentication password. + # auth_password = "pass" + ## Security Level; one of "noAuthNoPriv", "authNoPriv", or "authPriv". + # sec_level = "authNoPriv" + ## Context Name. + # context_name = "" + ## Privacy protocol used for encrypted messages; one of "DES", "AES" or "". + # priv_protocol = "" + ## Privacy password used for encrypted messages. + # priv_password = "" - [[inputs.snmp.table]] - ## measurement name - name = "remote_servers" - inherit_tags = [ "hostname" ] - [[inputs.snmp.table.field]] - name = "server" - oid = ".1.0.0.0.1.0" - is_tag = true - [[inputs.snmp.table.field]] - name = "connections" - oid = ".1.0.0.0.1.1" - [[inputs.snmp.table.field]] - name = "latency" - oid = ".1.0.0.0.1.2" - - [[inputs.snmp.table]] - ## auto populate table's fields using the MIB - oid = "HOST-RESOURCES-MIB::hrNetworkTable" + ## Add fields and tables defining the variables you wish to collect. This + ## example collects the system uptime and interface variables. Reference the + ## full plugin documentation for configuration details. ` // execCommand is so tests can mock out exec.Command usage. @@ -108,41 +93,42 @@ func execCmd(arg0 string, args ...string) ([]byte, error) { // Snmp holds the configuration for the plugin. type Snmp struct { - // The SNMP agent to query. Format is ADDR[:PORT] (e.g. 1.2.3.4:161). - Agents []string + // The SNMP agent to query. Format is [SCHEME://]ADDR[:PORT] (e.g. + // udp://1.2.3.4:161). If the scheme is not specified then "udp" is used. + Agents []string `toml:"agents"` // Timeout to wait for a response. - Timeout internal.Duration - Retries int + Timeout internal.Duration `toml:"timeout"` + Retries int `toml:"retries"` // Values: 1, 2, 3 - Version uint8 + Version uint8 `toml:"version"` // Parameters for Version 1 & 2 - Community string + Community string `toml:"community"` // Parameters for Version 2 & 3 - MaxRepetitions uint8 + MaxRepetitions uint8 `toml:"max_repetitions"` // Parameters for Version 3 - ContextName string + ContextName string `toml:"context_name"` // Values: "noAuthNoPriv", "authNoPriv", "authPriv" - SecLevel string - SecName string + SecLevel string `toml:"sec_level"` + SecName string `toml:"sec_name"` // Values: "MD5", "SHA", "". Default: "" - AuthProtocol string - AuthPassword string + AuthProtocol string `toml:"auth_protocol"` + AuthPassword string `toml:"auth_password"` // Values: "DES", "AES", "". Default: "" - PrivProtocol string - PrivPassword string - EngineID string - EngineBoots uint32 - EngineTime uint32 + PrivProtocol string `toml:"priv_protocol"` + PrivPassword string `toml:"priv_password"` + EngineID string `toml:"-"` + EngineBoots uint32 `toml:"-"` + EngineTime uint32 `toml:"-"` Tables []Table `toml:"table"` // Name & Fields are the elements of a Table. // Telegraf chokes if we try to embed a Table. So instead we have to embed the // fields of a Table, and construct a Table during runtime. - Name string + Name string // deprecated in 1.14; use name_override Fields []Field `toml:"field"` connectionCache []snmpConnection diff --git a/plugins/inputs/snmp/snmp_test.go b/plugins/inputs/snmp/snmp_test.go index 9a4335e4e..3e174e224 100644 --- a/plugins/inputs/snmp/snmp_test.go +++ b/plugins/inputs/snmp/snmp_test.go @@ -10,6 +10,7 @@ import ( "time" "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/testutil" "github.com/influxdata/toml" "github.com/soniah/gosnmp" @@ -82,45 +83,20 @@ var tsc = &testSNMPConnection{ } func TestSampleConfig(t *testing.T) { - conf := struct { - Inputs struct { - Snmp []*Snmp - } - }{} - err := toml.Unmarshal([]byte("[[inputs.snmp]]\n"+(*Snmp)(nil).SampleConfig()), &conf) - assert.NoError(t, err) + conf := inputs.Inputs["snmp"]() + err := toml.Unmarshal([]byte(conf.SampleConfig()), conf) + require.NoError(t, err) - s := Snmp{ - Agents: []string{"127.0.0.1:161"}, + expected := &Snmp{ + Agents: []string{"udp://127.0.0.1:161"}, Timeout: internal.Duration{Duration: 5 * time.Second}, Version: 2, Community: "public", MaxRepetitions: 10, Retries: 3, - - Name: "system", - Fields: []Field{ - {Name: "hostname", Oid: ".1.0.0.1.1"}, - {Name: "uptime", Oid: ".1.0.0.1.2"}, - {Name: "load", Oid: ".1.0.0.1.3"}, - {Oid: "HOST-RESOURCES-MIB::hrMemorySize"}, - }, - Tables: []Table{ - { - Name: "remote_servers", - InheritTags: []string{"hostname"}, - Fields: []Field{ - {Name: "server", Oid: ".1.0.0.0.1.0", IsTag: true}, - {Name: "connections", Oid: ".1.0.0.0.1.1"}, - {Name: "latency", Oid: ".1.0.0.0.1.2"}, - }, - }, - { - Oid: "HOST-RESOURCES-MIB::hrNetworkTable", - }, - }, + Name: "snmp", } - assert.Equal(t, &s, conf.Inputs.Snmp[0]) + require.Equal(t, expected, conf) } func TestFieldInit(t *testing.T) { diff --git a/plugins/inputs/snmp_trap/README.md b/plugins/inputs/snmp_trap/README.md index 8c1a2c132..ceb370d8f 100644 --- a/plugins/inputs/snmp_trap/README.md +++ b/plugins/inputs/snmp_trap/README.md @@ -6,13 +6,22 @@ notifications (traps and inform requests). Notifications are received on plain UDP. The port to listen is configurable. -OIDs can be resolved to strings using system MIB files. This is done -in same way as the SNMP input plugin. See the section "MIB Lookups" in -the SNMP [README.md](../snmp/README.md) for details. +### Prerequisites + +This plugin uses the `snmptranslate` programs from the +[net-snmp][] project. These tools will need to be installed into the `PATH` in +order to be located. Other utilities from the net-snmp project may be useful +for troubleshooting, but are not directly used by the plugin. + +These programs will load available MIBs on the system. Typically the default +directory for MIBs is `/usr/share/snmp/mibs`, but if your MIBs are in a +different location you may need to make the paths known to net-snmp. The +location of these files can be configured in the `snmp.conf` or via the +`MIBDIRS` environment variable. See [`man 1 snmpcmd`][man snmpcmd] for more +information. ### Configuration ```toml -# Snmp trap listener [[inputs.snmp_trap]] ## Transport, local address, and port to listen on. Transport must ## be "udp://". Omit local address to listen on all interfaces. @@ -26,27 +35,7 @@ the SNMP [README.md](../snmp/README.md) for details. # timeout = "5s" ``` -### Metrics - -- snmp_trap - - tags: - - source (string, IP address of trap source) - - name (string, value from SNMPv2-MIB::snmpTrapOID.0 PDU) - - mib (string, MIB from SNMPv2-MIB::snmpTrapOID.0 PDU) - - oid (string, OID string from SNMPv2-MIB::snmpTrapOID.0 PDU) - - version (string, "1" or "2c" or "3") - - fields: - - Fields are mapped from variables in the trap. Field names are - the trap variable names after MIB lookup. Field values are trap - variable values. - -### Example Output -``` -snmp_trap,mib=SNMPv2-MIB,name=coldStart,oid=.1.3.6.1.6.3.1.1.5.1,source=192.168.122.102,version=2c snmpTrapEnterprise.0="linux",sysUpTimeInstance=1i 1574109187723429814 -snmp_trap,mib=NET-SNMP-AGENT-MIB,name=nsNotifyShutdown,oid=.1.3.6.1.4.1.8072.4.0.2,source=192.168.122.102,version=2c sysUpTimeInstance=5803i,snmpTrapEnterprise.0="netSnmpNotificationPrefix" 1574109186555115459 -``` - -### Using a Privileged Port +#### Using a Privileged Port On many operating systems, listening on a privileged port (a port number less than 1024) requires extra permission. Since the default @@ -70,3 +59,26 @@ setcap cap_net_bind_service=+ep /usr/bin/telegraf On Mac OS, listening on privileged ports is unrestricted on versions 10.14 and later. + +### Metrics + +- snmp_trap + - tags: + - source (string, IP address of trap source) + - name (string, value from SNMPv2-MIB::snmpTrapOID.0 PDU) + - mib (string, MIB from SNMPv2-MIB::snmpTrapOID.0 PDU) + - oid (string, OID string from SNMPv2-MIB::snmpTrapOID.0 PDU) + - version (string, "1" or "2c" or "3") + - fields: + - Fields are mapped from variables in the trap. Field names are + the trap variable names after MIB lookup. Field values are trap + variable values. + +### Example Output +``` +snmp_trap,mib=SNMPv2-MIB,name=coldStart,oid=.1.3.6.1.6.3.1.1.5.1,source=192.168.122.102,version=2c snmpTrapEnterprise.0="linux",sysUpTimeInstance=1i 1574109187723429814 +snmp_trap,mib=NET-SNMP-AGENT-MIB,name=nsNotifyShutdown,oid=.1.3.6.1.4.1.8072.4.0.2,source=192.168.122.102,version=2c sysUpTimeInstance=5803i,snmpTrapEnterprise.0="netSnmpNotificationPrefix" 1574109186555115459 +``` + +[net-snmp]: http://www.net-snmp.org/ +[man snmpcmd]: http://net-snmp.sourceforge.net/docs/man/snmpcmd.html#lbAK From 3d8940e69b57b6e4dc0d2c6eb6ad7729638b08d1 Mon Sep 17 00:00:00 2001 From: Giovanni Luisotto Date: Mon, 30 Dec 2019 20:45:46 +0100 Subject: [PATCH 1423/1815] Encode query hash fields as hex strings in sqlserver input (#6818) --- plugins/inputs/sqlserver/sqlserver.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/inputs/sqlserver/sqlserver.go b/plugins/inputs/sqlserver/sqlserver.go index 03b0cfcfd..32a1ede7d 100644 --- a/plugins/inputs/sqlserver/sqlserver.go +++ b/plugins/inputs/sqlserver/sqlserver.go @@ -1408,8 +1408,8 @@ SELECT , qt.objectid , QUOTENAME(OBJECT_SCHEMA_NAME(qt.objectid,qt.dbid)) + '.' + QUOTENAME(OBJECT_NAME(qt.objectid,qt.dbid)) as stmt_object_name , DB_NAME(qt.dbid) stmt_db_name - , r.query_hash - , r.query_plan_hash + ,CONVERT(varchar(20),[query_hash],1) as [query_hash] + ,CONVERT(varchar(20),[query_plan_hash],1) as [query_plan_hash] FROM sys.dm_exec_requests r LEFT OUTER JOIN sys.dm_exec_sessions s ON (s.session_id = r.session_id) OUTER APPLY sys.dm_exec_sql_text(sql_handle) AS qt From dd67a1c7643c0cd4b8dd4d2969d034771c1be856 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 30 Dec 2019 11:46:39 -0800 Subject: [PATCH 1424/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3e6cc2feb..77f61935d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,6 +16,7 @@ - [#6803](https://github.com/influxdata/telegraf/pull/6803): Add leading period to OID in SNMP v1 generic traps. - [#6823](https://github.com/influxdata/telegraf/pull/6823): Fix missing config fields in prometheus serializer. - [#6694](https://github.com/influxdata/telegraf/pull/6694): Fix panic on connection loss with undelivered messages in mqtt_consumer. +- [#6679](https://github.com/influxdata/telegraf/pull/6679): Encode query hash fields as hex strings in sqlserver input. ## v1.13 [2019-12-12] From 01e31e9d53f40708defed75985ac9a0981c3272f Mon Sep 17 00:00:00 2001 From: Jonathan Hurter Date: Mon, 30 Dec 2019 22:52:03 +0100 Subject: [PATCH 1425/1815] Invalidate diskio cache if the metadata mtime has changed (#6835) --- plugins/inputs/diskio/diskio_linux.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/plugins/inputs/diskio/diskio_linux.go b/plugins/inputs/diskio/diskio_linux.go index c727f485b..f2499ca17 100644 --- a/plugins/inputs/diskio/diskio_linux.go +++ b/plugins/inputs/diskio/diskio_linux.go @@ -11,6 +11,7 @@ import ( ) type diskInfoCache struct { + modifiedAt int64 // Unix Nano timestamp of the last modification of the device. This value is used to invalidate the cache udevDataPath string values map[string]string } @@ -31,7 +32,8 @@ func (s *DiskIO) diskInfo(devName string) (map[string]string, error) { s.infoCache = map[string]diskInfoCache{} } ic, ok := s.infoCache[devName] - if ok { + + if ok && stat.Mtim.Nano() == ic.modifiedAt { return ic.values, nil } @@ -42,6 +44,7 @@ func (s *DiskIO) diskInfo(devName string) (map[string]string, error) { di := map[string]string{} s.infoCache[devName] = diskInfoCache{ + modifiedAt: stat.Mtim.Nano(), udevDataPath: udevDataPath, values: di, } From a5ef34f6e241b4154e6d542900e165bd0877a22e Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 30 Dec 2019 13:54:00 -0800 Subject: [PATCH 1426/1815] Update changelog --- CHANGELOG.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 77f61935d..5dd3ac94e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -15,8 +15,9 @@ - [#6788](https://github.com/influxdata/telegraf/issues/6788): Fix ServerProperty query stops working on Azure after failover. - [#6803](https://github.com/influxdata/telegraf/pull/6803): Add leading period to OID in SNMP v1 generic traps. - [#6823](https://github.com/influxdata/telegraf/pull/6823): Fix missing config fields in prometheus serializer. -- [#6694](https://github.com/influxdata/telegraf/pull/6694): Fix panic on connection loss with undelivered messages in mqtt_consumer. -- [#6679](https://github.com/influxdata/telegraf/pull/6679): Encode query hash fields as hex strings in sqlserver input. +- [#6694](https://github.com/influxdata/telegraf/issues/6694): Fix panic on connection loss with undelivered messages in mqtt_consumer. +- [#6679](https://github.com/influxdata/telegraf/issues/6679): Encode query hash fields as hex strings in sqlserver input. +- [#6345](https://github.com/influxdata/telegraf/issues/6345): Invalidate diskio cache if the metadata mtime has changed. ## v1.13 [2019-12-12] From 25e1636775cc2be3dfd61ecf4891af23403cebf0 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 2 Jan 2020 16:14:32 -0800 Subject: [PATCH 1427/1815] Fix race condition in logparser tests (#6825) --- plugins/inputs/logparser/logparser_test.go | 72 +++++++++++++++------- 1 file changed, 49 insertions(+), 23 deletions(-) diff --git a/plugins/inputs/logparser/logparser_test.go b/plugins/inputs/logparser/logparser_test.go index 8342e38ee..142f78d46 100644 --- a/plugins/inputs/logparser/logparser_test.go +++ b/plugins/inputs/logparser/logparser_test.go @@ -6,10 +6,12 @@ import ( "runtime" "strings" "testing" + "time" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestStartNoParsers(t *testing.T) { @@ -56,32 +58,56 @@ func TestGrokParseLogFiles(t *testing.T) { } acc := testutil.Accumulator{} - assert.NoError(t, logparser.Start(&acc)) - acc.Wait(2) + require.NoError(t, logparser.Start(&acc)) + acc.Wait(3) logparser.Stop() - acc.AssertContainsTaggedFields(t, "logparser_grok", - map[string]interface{}{ - "clientip": "192.168.1.1", - "myfloat": float64(1.25), - "response_time": int64(5432), - "myint": int64(101), - }, - map[string]string{ - "response_code": "200", - "path": thisdir + "testdata/test_a.log", - }) + expected := []telegraf.Metric{ + testutil.MustMetric( + "logparser_grok", + map[string]string{ + "response_code": "200", + "path": thisdir + "testdata/test_a.log", + }, + map[string]interface{}{ + "clientip": "192.168.1.1", + "myfloat": float64(1.25), + "response_time": int64(5432), + "myint": int64(101), + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "logparser_grok", + map[string]string{ + "path": thisdir + "testdata/test_b.log", + }, + map[string]interface{}{ + "myfloat": 1.25, + "mystring": "mystring", + "nomodifier": "nomodifier", + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "logparser_grok", + map[string]string{ + "path": thisdir + "testdata/test_c.log", + "response_code": "200", + }, + map[string]interface{}{ + "clientip": "192.168.1.1", + "myfloat": 1.25, + "myint": 101, + "response_time": 5432, + }, + time.Unix(0, 0), + ), + } - acc.AssertContainsTaggedFields(t, "logparser_grok", - map[string]interface{}{ - "myfloat": 1.25, - "mystring": "mystring", - "nomodifier": "nomodifier", - }, - map[string]string{ - "path": thisdir + "testdata/test_b.log", - }) + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), + testutil.IgnoreTime(), testutil.SortMetrics()) } func TestGrokParseLogFilesAppearLater(t *testing.T) { From 8831651799974038c272c10e20fbafe541ac3345 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 2 Jan 2020 16:15:48 -0800 Subject: [PATCH 1428/1815] Show platform not supported warning only on plugin creation (#6801) --- plugins/inputs/ethtool/ethtool.go | 4 + ...thtool_nonlinux.go => ethtool_notlinux.go} | 8 +- plugins/inputs/synproxy/synproxy.go | 85 +----------------- plugins/inputs/synproxy/synproxy_linux.go | 90 +++++++++++++++++++ plugins/inputs/synproxy/synproxy_notlinux.go | 16 +--- plugins/inputs/wireless/wireless.go | 3 +- ...eless_nonlinux.go => wireless_notlinux.go} | 8 +- 7 files changed, 112 insertions(+), 102 deletions(-) rename plugins/inputs/ethtool/{ethtool_nonlinux.go => ethtool_notlinux.go} (75%) create mode 100644 plugins/inputs/synproxy/synproxy_linux.go rename plugins/inputs/wireless/{wireless_nonlinux.go => wireless_notlinux.go} (75%) diff --git a/plugins/inputs/ethtool/ethtool.go b/plugins/inputs/ethtool/ethtool.go index e8f6bfed4..3f8f8e156 100644 --- a/plugins/inputs/ethtool/ethtool.go +++ b/plugins/inputs/ethtool/ethtool.go @@ -2,6 +2,8 @@ package ethtool import ( "net" + + "github.com/influxdata/telegraf" ) type Command interface { @@ -18,6 +20,8 @@ type Ethtool struct { // This is the list of interface names to ignore InterfaceExclude []string `toml:"interface_exclude"` + Log telegraf.Logger `toml:"-"` + // the ethtool command command Command } diff --git a/plugins/inputs/ethtool/ethtool_nonlinux.go b/plugins/inputs/ethtool/ethtool_notlinux.go similarity index 75% rename from plugins/inputs/ethtool/ethtool_nonlinux.go rename to plugins/inputs/ethtool/ethtool_notlinux.go index 62a0de3c1..b022e0a46 100644 --- a/plugins/inputs/ethtool/ethtool_nonlinux.go +++ b/plugins/inputs/ethtool/ethtool_notlinux.go @@ -3,19 +3,21 @@ package ethtool import ( - "log" - "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" ) +func (e *Ethtool) Init() error { + e.Log.Warn("Current platform is not supported") + return nil +} + func (e *Ethtool) Gather(acc telegraf.Accumulator) error { return nil } func init() { inputs.Add(pluginName, func() telegraf.Input { - log.Print("W! [inputs.ethtool] Current platform is not supported") return &Ethtool{} }) } diff --git a/plugins/inputs/synproxy/synproxy.go b/plugins/inputs/synproxy/synproxy.go index 510f5584d..6a5b2b323 100644 --- a/plugins/inputs/synproxy/synproxy.go +++ b/plugins/inputs/synproxy/synproxy.go @@ -1,20 +1,16 @@ -// +build linux - package synproxy import ( - "bufio" - "fmt" "os" "path" - "strconv" - "strings" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" ) type Synproxy struct { + Log telegraf.Logger `toml:"-"` + // Synproxy stats filename (proc filesystem) statFile string } @@ -27,83 +23,6 @@ func (k *Synproxy) SampleConfig() string { return "" } -func (k *Synproxy) Gather(acc telegraf.Accumulator) error { - data, err := k.getSynproxyStat() - if err != nil { - return err - } - - acc.AddCounter("synproxy", data, map[string]string{}) - return nil -} - -func inSlice(haystack []string, needle string) bool { - for _, val := range haystack { - if needle == val { - return true - } - } - return false -} - -func (k *Synproxy) getSynproxyStat() (map[string]interface{}, error) { - var hname []string - counters := []string{"entries", "syn_received", "cookie_invalid", "cookie_valid", "cookie_retrans", "conn_reopened"} - fields := make(map[string]interface{}) - - // Open synproxy file in proc filesystem - file, err := os.Open(k.statFile) - if err != nil { - return nil, err - } - defer file.Close() - - // Initialise expected fields - for _, val := range counters { - fields[val] = uint32(0) - } - - scanner := bufio.NewScanner(file) - // Read header row - if scanner.Scan() { - line := scanner.Text() - // Parse fields separated by whitespace - dataFields := strings.Fields(line) - for _, val := range dataFields { - if !inSlice(counters, val) { - val = "" - } - hname = append(hname, val) - } - } - if len(hname) == 0 { - return nil, fmt.Errorf("invalid data") - } - // Read data rows - for scanner.Scan() { - line := scanner.Text() - // Parse fields separated by whitespace - dataFields := strings.Fields(line) - // If number of data fields do not match number of header fields - if len(dataFields) != len(hname) { - return nil, fmt.Errorf("invalid number of columns in data, expected %d found %d", len(hname), - len(dataFields)) - } - for i, val := range dataFields { - // Convert from hexstring to int32 - x, err := strconv.ParseUint(val, 16, 32) - // If field is not a valid hexstring - if err != nil { - return nil, fmt.Errorf("invalid value '%s' found", val) - } - if hname[i] != "" { - fields[hname[i]] = fields[hname[i]].(uint32) + uint32(x) - } - } - } - return fields, nil -} - func getHostProc() string { procPath := "/proc" if os.Getenv("HOST_PROC") != "" { diff --git a/plugins/inputs/synproxy/synproxy_linux.go b/plugins/inputs/synproxy/synproxy_linux.go new file mode 100644 index 000000000..bcc972938 --- /dev/null +++ b/plugins/inputs/synproxy/synproxy_linux.go @@ -0,0 +1,90 @@ +// +build linux + +package synproxy + +import ( + "bufio" + "fmt" + "os" + "strconv" + "strings" + + "github.com/influxdata/telegraf" +) + +func (k *Synproxy) Gather(acc telegraf.Accumulator) error { + data, err := k.getSynproxyStat() + if err != nil { + return err + } + + acc.AddCounter("synproxy", data, map[string]string{}) + return nil +} + +func inSlice(haystack []string, needle string) bool { + for _, val := range haystack { + if needle == val { + return true + } + } + return false +} + +func (k *Synproxy) getSynproxyStat() (map[string]interface{}, error) { + var hname []string + counters := []string{"entries", "syn_received", "cookie_invalid", "cookie_valid", "cookie_retrans", "conn_reopened"} + fields := make(map[string]interface{}) + + // Open synproxy file in proc filesystem + file, err := os.Open(k.statFile) + if err != nil { + return nil, err + } + defer file.Close() + + // Initialise expected fields + for _, val := range counters { + fields[val] = uint32(0) + } + + scanner := bufio.NewScanner(file) + // Read header row + if scanner.Scan() { + line := scanner.Text() + // Parse fields separated by whitespace + dataFields := strings.Fields(line) + for _, val := range dataFields { + if !inSlice(counters, val) { + val = "" + } + hname = append(hname, val) + } + } + if len(hname) == 0 { + return nil, fmt.Errorf("invalid data") + } + // Read data rows + for scanner.Scan() { + line := scanner.Text() + // Parse fields separated by whitespace + dataFields := strings.Fields(line) + // If number of data fields do not match number of header fields + if len(dataFields) != len(hname) { + return nil, fmt.Errorf("invalid number of columns in data, expected %d found %d", len(hname), + len(dataFields)) + } + for i, val := range dataFields { + // Convert from hexstring to int32 + x, err := strconv.ParseUint(val, 16, 32) + // If field is not a valid hexstring + if err != nil { + return nil, fmt.Errorf("invalid value '%s' found", val) + } + if hname[i] != "" { + fields[hname[i]] = fields[hname[i]].(uint32) + uint32(x) + } + } + } + return fields, nil +} diff --git a/plugins/inputs/synproxy/synproxy_notlinux.go b/plugins/inputs/synproxy/synproxy_notlinux.go index e77f06903..71a223644 100644 --- a/plugins/inputs/synproxy/synproxy_notlinux.go +++ b/plugins/inputs/synproxy/synproxy_notlinux.go @@ -3,29 +3,21 @@ package synproxy import ( - "log" - "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" ) -type Synproxy struct{} +func (k *Synproxy) Init() error { + k.Log.Warn("Current platform is not supported") + return nil +} func (k *Synproxy) Gather(acc telegraf.Accumulator) error { return nil } -func (k *Synproxy) Description() string { - return "" -} - -func (k *Synproxy) SampleConfig() string { - return "" -} - func init() { inputs.Add("synproxy", func() telegraf.Input { - log.Print("W! [inputs.synproxy] Current platform is not supported") return &Synproxy{} }) } diff --git a/plugins/inputs/wireless/wireless.go b/plugins/inputs/wireless/wireless.go index eb488ef59..911d7fb09 100644 --- a/plugins/inputs/wireless/wireless.go +++ b/plugins/inputs/wireless/wireless.go @@ -7,7 +7,8 @@ import ( // Wireless is used to store configuration values. type Wireless struct { - HostProc string `toml:"host_proc"` + HostProc string `toml:"host_proc"` + Log telegraf.Logger `toml:"-"` } var sampleConfig = ` diff --git a/plugins/inputs/wireless/wireless_nonlinux.go b/plugins/inputs/wireless/wireless_notlinux.go similarity index 75% rename from plugins/inputs/wireless/wireless_nonlinux.go rename to plugins/inputs/wireless/wireless_notlinux.go index 0fbe5eb06..4769acc97 100644 --- a/plugins/inputs/wireless/wireless_nonlinux.go +++ b/plugins/inputs/wireless/wireless_notlinux.go @@ -3,19 +3,21 @@ package wireless import ( - "log" - "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" ) +func (w *Wireless) Init() error { + w.Log.Warn("Current platform is not supported") + return nil +} + func (w *Wireless) Gather(acc telegraf.Accumulator) error { return nil } func init() { inputs.Add("wireless", func() telegraf.Input { - log.Print("W! [inputs.wireless] Current platform is not supported") return &Wireless{} }) } From 318d8134cf8bbc005efea924838486b931fdb6fa Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 2 Jan 2020 16:17:16 -0800 Subject: [PATCH 1429/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5dd3ac94e..a810f13ba 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -18,6 +18,7 @@ - [#6694](https://github.com/influxdata/telegraf/issues/6694): Fix panic on connection loss with undelivered messages in mqtt_consumer. - [#6679](https://github.com/influxdata/telegraf/issues/6679): Encode query hash fields as hex strings in sqlserver input. - [#6345](https://github.com/influxdata/telegraf/issues/6345): Invalidate diskio cache if the metadata mtime has changed. +- [#6800](https://github.com/influxdata/telegraf/issues/6800): Show platform not supported warning only on plugin creation. ## v1.13 [2019-12-12] From 1edb73916fb4cf254db535dc3bc2bc992156649e Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 2 Jan 2020 16:26:48 -0800 Subject: [PATCH 1430/1815] Improve suricata input unit test debugging (#6815) --- plugins/inputs/suricata/suricata_test.go | 350 ++++++----------------- 1 file changed, 84 insertions(+), 266 deletions(-) diff --git a/plugins/inputs/suricata/suricata_test.go b/plugins/inputs/suricata/suricata_test.go index 093efd347..02f298b97 100644 --- a/plugins/inputs/suricata/suricata_test.go +++ b/plugins/inputs/suricata/suricata_test.go @@ -1,7 +1,6 @@ package suricata import ( - "bytes" "fmt" "io/ioutil" "log" @@ -9,35 +8,21 @@ import ( "net" "os" "path/filepath" - "regexp" "strings" "testing" "time" - "github.com/influxdata/telegraf/plugins/inputs" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) var ex2 = `{"timestamp":"2017-03-06T07:43:39.000397+0000","event_type":"stats","stats":{"capture":{"kernel_packets":905344474,"kernel_drops":78355440,"kernel_packets_delta":2376742,"kernel_drops_delta":82049}}}` -var ex3 = `{"timestamp":"2017-03-06T07:43:39.000397+0000","event_type":"stats","stats":{"threads": { "foo": { "capture":{"kernel_packets":905344474,"kernel_drops":78355440}}}}}` -var ex4 = `{"timestamp":"2017-03-06T07:43:39.000397+0000","event_type":"stats","stats":{"threads": { "W1#en..bar1": { "capture":{"kernel_packets":905344474,"kernel_drops":78355440}}}}}` -var brokenType1 = `{"timestamp":"2017-03-06T07:43:39.000397+0000","event_type":"stats","stats":{"threads": { "W1#en..bar1": { "capture":{"kernel_packets":905344474,"kernel_drops": true}}}}}` -var brokenType2 = `{"timestamp":"2017-03-06T07:43:39.000397+0000","event_type":"stats","stats":{"threads": { "W1#en..bar1": { "capture":{"kernel_packets":905344474,"kernel_drops": ["foo"]}}}}}` -var brokenType3 = `{"timestamp":"2017-03-06T07:43:39.000397+0000","event_type":"stats","stats":{"threads": { "W1#en..bar1": { "capture":{"kernel_packets":905344474,"kernel_drops":"none this time"}}}}}` -var brokenType4 = `{"timestamp":"2017-03-06T07:43:39.000397+0000","event_type":"stats","stats":{"threads": { "W1#en..bar1": { "capture":{"kernel_packets":905344474,"kernel_drops":null}}}}}` -var brokenType5 = `{"timestamp":"2017-03-06T07:43:39.000397+0000","event_type":"stats","stats":{"foo": null}}` -var brokenStruct1 = `{"timestamp":"2017-03-06T07:43:39.000397+0000","event_type":"stats","stats":{"threads": ["foo"]}}` -var brokenStruct2 = `{"timestamp":"2017-03-06T07:43:39.000397+0000","event_type":"stats"}` -var brokenStruct3 = `{"timestamp":"2017-03-06T07:43:39.000397+0000","event_type":"stats","stats": "foobar"}` -var brokenStruct4 = `{"timestamp":"2017-03-06T07:43:39.000397+0000","event_type":"stats","stats": null}` -var singleDotRegexp = regexp.MustCompilePOSIX(`[^.]\.[^.]`) +var ex3 = `{"timestamp":"2017-03-06T07:43:39.000397+0000","event_type":"stats","stats":{"threads": { "W#05-wlp4s0": { "capture":{"kernel_packets":905344474,"kernel_drops":78355440}}}}}` func TestSuricataLarge(t *testing.T) { dir, err := ioutil.TempDir("", "test") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer os.RemoveAll(dir) tmpfn := filepath.Join(dir, fmt.Sprintf("t%d", rand.Int63())) @@ -49,32 +34,24 @@ func TestSuricataLarge(t *testing.T) { }, } acc := testutil.Accumulator{} - acc.SetDebug(true) - assert.NoError(t, s.Start(&acc)) + require.NoError(t, s.Start(&acc)) + defer s.Stop() data, err := ioutil.ReadFile("testdata/test1.json") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) c, err := net.Dial("unix", tmpfn) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) c.Write([]byte(data)) c.Write([]byte("\n")) c.Close() acc.Wait(1) - - s.Stop() } func TestSuricata(t *testing.T) { dir, err := ioutil.TempDir("", "test") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer os.RemoveAll(dir) tmpfn := filepath.Join(dir, fmt.Sprintf("t%d", rand.Int63())) @@ -86,20 +63,17 @@ func TestSuricata(t *testing.T) { }, } acc := testutil.Accumulator{} - acc.SetDebug(true) - assert.NoError(t, s.Start(&acc)) + require.NoError(t, s.Start(&acc)) + defer s.Stop() c, err := net.Dial("unix", tmpfn) - if err != nil { - t.Fatalf("failed: %s", err.Error()) - } + require.NoError(t, err) c.Write([]byte(ex2)) c.Write([]byte("\n")) c.Close() acc.Wait(1) - s.Stop() s = Suricata{ Source: tmpfn, Delimiter: ".", @@ -108,23 +82,45 @@ func TestSuricata(t *testing.T) { }, } - acc.AssertContainsTaggedFields(t, "suricata", - map[string]interface{}{ - "capture.kernel_packets": float64(905344474), - "capture.kernel_drops": float64(78355440), - "capture.kernel_packets_delta": float64(2376742), - "capture.kernel_drops_delta": float64(82049), - }, - map[string]string{"thread": "total"}) - - acc = testutil.Accumulator{} - acc.SetDebug(true) - assert.NoError(t, s.Start(&acc)) - - c, err = net.Dial("unix", tmpfn) - if err != nil { - log.Println(err) + expected := []telegraf.Metric{ + testutil.MustMetric( + "suricata", + map[string]string{ + "thread": "total", + }, + map[string]interface{}{ + "capture.kernel_packets": float64(905344474), + "capture.kernel_drops": float64(78355440), + "capture.kernel_packets_delta": float64(2376742), + "capture.kernel_drops_delta": float64(82049), + }, + time.Unix(0, 0), + ), } + + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime()) +} + +func TestThreadStats(t *testing.T) { + dir, err := ioutil.TempDir("", "test") + require.NoError(t, err) + defer os.RemoveAll(dir) + tmpfn := filepath.Join(dir, fmt.Sprintf("t%d", rand.Int63())) + + s := Suricata{ + Source: tmpfn, + Delimiter: ".", + Log: testutil.Logger{ + Name: "inputs.suricata", + }, + } + + acc := testutil.Accumulator{} + require.NoError(t, s.Start(&acc)) + defer s.Stop() + + c, err := net.Dial("unix", tmpfn) + require.NoError(t, err) c.Write([]byte("")) c.Write([]byte("\n")) c.Write([]byte("foobard}\n")) @@ -133,21 +129,26 @@ func TestSuricata(t *testing.T) { c.Close() acc.Wait(1) - s.Stop() + expected := []telegraf.Metric{ + testutil.MustMetric( + "suricata", + map[string]string{ + "thread": "W#05-wlp4s0", + }, + map[string]interface{}{ + "capture.kernel_packets": float64(905344474), + "capture.kernel_drops": float64(78355440), + }, + time.Unix(0, 0), + ), + } - acc.AssertContainsTaggedFields(t, "suricata", - map[string]interface{}{ - "capture.kernel_packets": float64(905344474), - "capture.kernel_drops": float64(78355440), - }, - map[string]string{"thread": "foo"}) + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime()) } func TestSuricataInvalid(t *testing.T) { dir, err := ioutil.TempDir("", "test") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer os.RemoveAll(dir) tmpfn := filepath.Join(dir, fmt.Sprintf("t%d", rand.Int63())) @@ -160,79 +161,16 @@ func TestSuricataInvalid(t *testing.T) { acc := testutil.Accumulator{} acc.SetDebug(true) - assert.NoError(t, s.Start(&acc)) + require.NoError(t, s.Start(&acc)) + defer s.Stop() c, err := net.Dial("unix", tmpfn) - if err != nil { - log.Println(err) - } + require.NoError(t, err) c.Write([]byte("sfjiowef")) c.Write([]byte("\n")) c.Close() acc.WaitError(1) - s.Stop() -} - -func splitAtSingleDot(in string) []string { - res := singleDotRegexp.FindAllStringIndex(in, -1) - if res == nil { - return []string{in} - } - ret := make([]string, 0) - startpos := 0 - for _, v := range res { - ret = append(ret, in[startpos:v[0]+1]) - startpos = v[1] - 1 - } - return append(ret, in[startpos:]) -} - -func TestSuricataSplitDots(t *testing.T) { - dir, err := ioutil.TempDir("", "test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dir) - tmpfn := filepath.Join(dir, fmt.Sprintf("t%d", rand.Int63())) - - out := splitAtSingleDot("foo") - if len(out) != 1 { - t.Fatalf("splitting 'foo' should yield one result") - } - if out[0] != "foo" { - t.Fatalf("splitting 'foo' should yield one result, 'foo'") - } - - s := Suricata{ - Source: tmpfn, - Delimiter: ".", - Log: testutil.Logger{ - Name: "inputs.suricata", - }, - } - acc := testutil.Accumulator{} - acc.SetDebug(true) - - assert.NoError(t, s.Start(&acc)) - - c, err := net.Dial("unix", tmpfn) - if err != nil { - log.Println(err) - } - c.Write([]byte(ex4)) - c.Write([]byte("\n")) - c.Close() - - acc.Wait(1) - acc.AssertContainsTaggedFields(t, "suricata", - map[string]interface{}{ - "capture.kernel_packets": float64(905344474), - "capture.kernel_drops": float64(78355440), - }, - map[string]string{"thread": "W1#en..bar1"}) - - s.Stop() } func TestSuricataInvalidPath(t *testing.T) { @@ -245,16 +183,12 @@ func TestSuricataInvalidPath(t *testing.T) { } acc := testutil.Accumulator{} - acc.SetDebug(true) - - assert.Error(t, s.Start(&acc)) + require.Error(t, s.Start(&acc)) } func TestSuricataTooLongLine(t *testing.T) { dir, err := ioutil.TempDir("", "test") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer os.RemoveAll(dir) tmpfn := filepath.Join(dir, fmt.Sprintf("t%d", rand.Int63())) @@ -265,28 +199,23 @@ func TestSuricataTooLongLine(t *testing.T) { }, } acc := testutil.Accumulator{} - acc.SetDebug(true) - assert.NoError(t, s.Start(&acc)) + require.NoError(t, s.Start(&acc)) + defer s.Stop() c, err := net.Dial("unix", tmpfn) - if err != nil { - log.Println(err) - } + require.NoError(t, err) c.Write([]byte(strings.Repeat("X", 20000000))) c.Write([]byte("\n")) c.Close() acc.WaitError(1) - s.Stop() } func TestSuricataEmptyJSON(t *testing.T) { dir, err := ioutil.TempDir("", "test") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer os.RemoveAll(dir) tmpfn := filepath.Join(dir, fmt.Sprintf("t%d", rand.Int63())) @@ -297,85 +226,23 @@ func TestSuricataEmptyJSON(t *testing.T) { }, } acc := testutil.Accumulator{} - acc.SetDebug(true) - - assert.NoError(t, s.Start(&acc)) + require.NoError(t, s.Start(&acc)) + defer s.Stop() c, err := net.Dial("unix", tmpfn) if err != nil { log.Println(err) + } c.Write([]byte("\n")) c.Close() acc.WaitError(1) - - s.Stop() -} - -func TestSuricataInvalidInputs(t *testing.T) { - dir, err := ioutil.TempDir("", "test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dir) - defer func() { - log.SetOutput(os.Stderr) - }() - tmpfn := filepath.Join(dir, fmt.Sprintf("t%d", rand.Int63())) - - for input, errmsg := range map[string]string{ - brokenType1: `Unsupported type bool encountered`, - brokenType2: `Unsupported type []interface {} encountered`, - brokenType3: `Unsupported type string encountered`, - brokenType4: `Unsupported type encountered`, - brokenType5: `Unsupported type encountered`, - brokenStruct1: `The 'threads' sub-object does not have required structure`, - brokenStruct2: `Input does not contain necessary 'stats' sub-object`, - brokenStruct3: `The 'stats' sub-object does not have required structure`, - brokenStruct4: `The 'stats' sub-object does not have required structure`, - } { - var logBuf buffer - logBuf.Reset() - log.SetOutput(&logBuf) - - acc := testutil.Accumulator{} - acc.SetDebug(true) - - s := Suricata{ - Source: tmpfn, - Delimiter: ".", - Log: testutil.Logger{ - Name: "inputs.suricata", - }, - } - assert.NoError(t, s.Start(&acc)) - - c, err := net.Dial("unix", tmpfn) - if err != nil { - t.Fatal(err) - } - c.Write([]byte(input)) - c.Write([]byte("\n")) - c.Close() - - for { - if bytes.Count(logBuf.Bytes(), []byte{'\n'}) > 0 { - break - } - time.Sleep(50 * time.Millisecond) - } - - assert.Contains(t, logBuf.String(), errmsg) - s.Stop() - } } func TestSuricataDisconnectSocket(t *testing.T) { dir, err := ioutil.TempDir("", "test") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer os.RemoveAll(dir) tmpfn := filepath.Join(dir, fmt.Sprintf("t%d", rand.Int63())) @@ -386,47 +253,28 @@ func TestSuricataDisconnectSocket(t *testing.T) { }, } acc := testutil.Accumulator{} - acc.SetDebug(true) - assert.NoError(t, s.Start(&acc)) + require.NoError(t, s.Start(&acc)) + defer s.Stop() c, err := net.Dial("unix", tmpfn) - if err != nil { - log.Println(err) - } + require.NoError(t, err) c.Write([]byte(ex2)) c.Write([]byte("\n")) c.Close() c, err = net.Dial("unix", tmpfn) - if err != nil { - log.Println(err) - } + require.NoError(t, err) c.Write([]byte(ex3)) c.Write([]byte("\n")) c.Close() acc.Wait(2) - - s.Stop() -} - -func TestSuricataPluginDesc(t *testing.T) { - v, ok := inputs.Inputs["suricata"] - if !ok { - t.Fatal("suricata plugin not registered") - } - desc := v().Description() - if desc != "Suricata stats plugin" { - t.Fatal("invalid description ", desc) - } } func TestSuricataStartStop(t *testing.T) { dir, err := ioutil.TempDir("", "test") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer os.RemoveAll(dir) tmpfn := filepath.Join(dir, fmt.Sprintf("t%d", rand.Int63())) @@ -437,36 +285,6 @@ func TestSuricataStartStop(t *testing.T) { }, } acc := testutil.Accumulator{} - acc.SetDebug(true) - assert.NoError(t, s.Start(&acc)) + require.NoError(t, s.Start(&acc)) s.Stop() } - -func TestSuricataGather(t *testing.T) { - dir, err := ioutil.TempDir("", "test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dir) - tmpfn := filepath.Join(dir, fmt.Sprintf("t%d", rand.Int63())) - - s := Suricata{ - Source: tmpfn, - Log: testutil.Logger{ - Name: "inputs.suricata", - }, - } - acc := testutil.Accumulator{} - acc.SetDebug(true) - assert.NoError(t, s.Gather(&acc)) -} - -func TestSuricataSampleConfig(t *testing.T) { - v, ok := inputs.Inputs["suricata"] - if !ok { - t.Fatal("suricata plugin not registered") - } - if v().SampleConfig() != sampleConfig { - t.Fatal("wrong sampleconfig") - } -} From 2486006495491c3034a22246d610a9d6ffb8d3e2 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 2 Jan 2020 16:27:26 -0800 Subject: [PATCH 1431/1815] Add kafka SASL version control to kafka_consumer (#6350) --- plugins/common/kafka/sasl.go | 25 +++++++++ plugins/inputs/kafka_consumer/README.md | 6 ++- .../inputs/kafka_consumer/kafka_consumer.go | 30 ++++++++++- .../kafka_consumer/kafka_consumer_test.go | 51 +++++++++++++++++++ plugins/inputs/zookeeper/README.md | 2 +- plugins/outputs/kafka/README.md | 3 ++ plugins/outputs/kafka/kafka.go | 27 ++++++++-- 7 files changed, 137 insertions(+), 7 deletions(-) create mode 100644 plugins/common/kafka/sasl.go diff --git a/plugins/common/kafka/sasl.go b/plugins/common/kafka/sasl.go new file mode 100644 index 000000000..cd3358b38 --- /dev/null +++ b/plugins/common/kafka/sasl.go @@ -0,0 +1,25 @@ +package kafka + +import ( + "errors" + + "github.com/Shopify/sarama" +) + +func SASLVersion(kafkaVersion sarama.KafkaVersion, saslVersion *int) (int16, error) { + if saslVersion == nil { + if kafkaVersion.IsAtLeast(sarama.V1_0_0_0) { + return sarama.SASLHandshakeV1, nil + } + return sarama.SASLHandshakeV0, nil + } + + switch *saslVersion { + case 0: + return sarama.SASLHandshakeV0, nil + case 1: + return sarama.SASLHandshakeV1, nil + default: + return 0, errors.New("invalid SASL version") + } +} diff --git a/plugins/inputs/kafka_consumer/README.md b/plugins/inputs/kafka_consumer/README.md index b0f2a4798..dec39cc32 100644 --- a/plugins/inputs/kafka_consumer/README.md +++ b/plugins/inputs/kafka_consumer/README.md @@ -34,10 +34,14 @@ and use the old zookeeper connection method. ## Use TLS but skip chain & host verification # insecure_skip_verify = false - ## Optional SASL Config + ## SASL authentication credentials. These settings should typically be used + ## with TLS encryption enabled using the "enable_tls" option. # sasl_username = "kafka" # sasl_password = "secret" + ## SASL protocol version. When connecting to Azure EventHub set to 0. + # sasl_version = 1 + ## Name of the consumer group. # consumer_group = "telegraf_metrics_consumers" diff --git a/plugins/inputs/kafka_consumer/kafka_consumer.go b/plugins/inputs/kafka_consumer/kafka_consumer.go index 39f6f0e2b..5cd6a9771 100644 --- a/plugins/inputs/kafka_consumer/kafka_consumer.go +++ b/plugins/inputs/kafka_consumer/kafka_consumer.go @@ -10,6 +10,7 @@ import ( "github.com/Shopify/sarama" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal/tls" + "github.com/influxdata/telegraf/plugins/common/kafka" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/parsers" ) @@ -33,16 +34,21 @@ const sampleConfig = ` # version = "" ## Optional TLS Config + # enable_tls = true # tls_ca = "/etc/telegraf/ca.pem" # tls_cert = "/etc/telegraf/cert.pem" # tls_key = "/etc/telegraf/key.pem" ## Use TLS but skip chain & host verification # insecure_skip_verify = false - ## Optional SASL Config + ## SASL authentication credentials. These settings should typically be used + ## with TLS encryption enabled using the "enable_tls" option. # sasl_username = "kafka" # sasl_password = "secret" + ## SASL protocol version. When connecting to Azure EventHub set to 0. + # sasl_version = 1 + ## Name of the consumer group. # consumer_group = "telegraf_metrics_consumers" @@ -95,9 +101,13 @@ type KafkaConsumer struct { Version string `toml:"version"` SASLPassword string `toml:"sasl_password"` SASLUsername string `toml:"sasl_username"` + SASLVersion *int `toml:"sasl_version"` + EnableTLS *bool `toml:"enable_tls"` tls.ClientConfig + Log telegraf.Logger `toml:"-"` + ConsumerCreator ConsumerGroupCreator `toml:"-"` consumer ConsumerGroup config *sarama.Config @@ -158,6 +168,10 @@ func (k *KafkaConsumer) Init() error { config.Version = version } + if k.EnableTLS != nil && *k.EnableTLS { + config.Net.TLS.Enable = true + } + tlsConfig, err := k.ClientConfig.TLSConfig() if err != nil { return err @@ -165,13 +179,25 @@ func (k *KafkaConsumer) Init() error { if tlsConfig != nil { config.Net.TLS.Config = tlsConfig - config.Net.TLS.Enable = true + + // To maintain backwards compatibility, if the enable_tls option is not + // set TLS is enabled if a non-default TLS config is used. + if k.EnableTLS == nil { + k.Log.Warnf("Use of deprecated configuration: enable_tls should be set when using TLS") + config.Net.TLS.Enable = true + } } if k.SASLUsername != "" && k.SASLPassword != "" { config.Net.SASL.User = k.SASLUsername config.Net.SASL.Password = k.SASLPassword config.Net.SASL.Enable = true + + version, err := kafka.SASLVersion(config.Version, k.SASLVersion) + if err != nil { + return err + } + config.Net.SASL.Version = version } if k.ClientID != "" { diff --git a/plugins/inputs/kafka_consumer/kafka_consumer_test.go b/plugins/inputs/kafka_consumer/kafka_consumer_test.go index 3aa0efa50..0c8063578 100644 --- a/plugins/inputs/kafka_consumer/kafka_consumer_test.go +++ b/plugins/inputs/kafka_consumer/kafka_consumer_test.go @@ -7,6 +7,7 @@ import ( "github.com/Shopify/sarama" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal/tls" "github.com/influxdata/telegraf/plugins/parsers/value" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" @@ -68,6 +69,7 @@ func TestInit(t *testing.T) { name: "parses valid version string", plugin: &KafkaConsumer{ Version: "1.0.0", + Log: testutil.Logger{}, }, check: func(t *testing.T, plugin *KafkaConsumer) { require.Equal(t, plugin.config.Version, sarama.V1_0_0_0) @@ -77,6 +79,7 @@ func TestInit(t *testing.T) { name: "invalid version string", plugin: &KafkaConsumer{ Version: "100", + Log: testutil.Logger{}, }, initError: true, }, @@ -84,6 +87,7 @@ func TestInit(t *testing.T) { name: "custom client_id", plugin: &KafkaConsumer{ ClientID: "custom", + Log: testutil.Logger{}, }, check: func(t *testing.T, plugin *KafkaConsumer) { require.Equal(t, plugin.config.ClientID, "custom") @@ -93,6 +97,7 @@ func TestInit(t *testing.T) { name: "custom offset", plugin: &KafkaConsumer{ Offset: "newest", + Log: testutil.Logger{}, }, check: func(t *testing.T, plugin *KafkaConsumer) { require.Equal(t, plugin.config.Consumer.Offsets.Initial, sarama.OffsetNewest) @@ -102,9 +107,54 @@ func TestInit(t *testing.T) { name: "invalid offset", plugin: &KafkaConsumer{ Offset: "middle", + Log: testutil.Logger{}, }, initError: true, }, + { + name: "default tls without tls config", + plugin: &KafkaConsumer{ + Log: testutil.Logger{}, + }, + check: func(t *testing.T, plugin *KafkaConsumer) { + require.False(t, plugin.config.Net.TLS.Enable) + }, + }, + { + name: "default tls with a tls config", + plugin: &KafkaConsumer{ + ClientConfig: tls.ClientConfig{ + InsecureSkipVerify: true, + }, + Log: testutil.Logger{}, + }, + check: func(t *testing.T, plugin *KafkaConsumer) { + require.True(t, plugin.config.Net.TLS.Enable) + }, + }, + { + name: "disable tls", + plugin: &KafkaConsumer{ + EnableTLS: func() *bool { v := false; return &v }(), + ClientConfig: tls.ClientConfig{ + InsecureSkipVerify: true, + }, + Log: testutil.Logger{}, + }, + check: func(t *testing.T, plugin *KafkaConsumer) { + require.False(t, plugin.config.Net.TLS.Enable) + }, + }, + { + name: "enable tls", + plugin: &KafkaConsumer{ + EnableTLS: func() *bool { v := true; return &v }(), + Log: testutil.Logger{}, + }, + check: func(t *testing.T, plugin *KafkaConsumer) { + require.True(t, plugin.config.Net.TLS.Enable) + }, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -125,6 +175,7 @@ func TestStartStop(t *testing.T) { cg := &FakeConsumerGroup{errors: make(chan error)} plugin := &KafkaConsumer{ ConsumerCreator: &FakeCreator{ConsumerGroup: cg}, + Log: testutil.Logger{}, } err := plugin.Init() require.NoError(t, err) diff --git a/plugins/inputs/zookeeper/README.md b/plugins/inputs/zookeeper/README.md index c452e8663..23009c519 100644 --- a/plugins/inputs/zookeeper/README.md +++ b/plugins/inputs/zookeeper/README.md @@ -19,7 +19,7 @@ The zookeeper plugin collects variables outputted from the 'mntr' command # timeout = "5s" ## Optional TLS Config - # enable_ssl = true + # enable_tls = true # tls_ca = "/etc/telegraf/ca.pem" # tls_cert = "/etc/telegraf/cert.pem" # tls_key = "/etc/telegraf/key.pem" diff --git a/plugins/outputs/kafka/README.md b/plugins/outputs/kafka/README.md index 25b173a02..7b9fc0e30 100644 --- a/plugins/outputs/kafka/README.md +++ b/plugins/outputs/kafka/README.md @@ -96,6 +96,9 @@ This plugin writes to a [Kafka Broker](http://kafka.apache.org/07/quickstart.htm # sasl_username = "kafka" # sasl_password = "secret" + ## SASL protocol version. When connecting to Azure EventHub set to 0. + # sasl_version = 1 + ## Data format to output. ## Each data format has its own unique set of configuration options, read ## more about them here: diff --git a/plugins/outputs/kafka/kafka.go b/plugins/outputs/kafka/kafka.go index 85eb32a3f..b4e71ef57 100644 --- a/plugins/outputs/kafka/kafka.go +++ b/plugins/outputs/kafka/kafka.go @@ -10,6 +10,7 @@ import ( "github.com/gofrs/uuid" "github.com/influxdata/telegraf" tlsint "github.com/influxdata/telegraf/internal/tls" + "github.com/influxdata/telegraf/plugins/common/kafka" "github.com/influxdata/telegraf/plugins/outputs" "github.com/influxdata/telegraf/plugins/serializers" ) @@ -43,12 +44,12 @@ type ( // TLS certificate authority CA string + EnableTLS *bool `toml:"enable_tls"` tlsint.ClientConfig - // SASL Username SASLUsername string `toml:"sasl_username"` - // SASL Password SASLPassword string `toml:"sasl_password"` + SASLVersion *int `toml:"sasl_version"` Log telegraf.Logger `toml:"-"` @@ -170,6 +171,7 @@ var sampleConfig = ` # max_message_bytes = 1000000 ## Optional TLS Config + # enable_tls = true # tls_ca = "/etc/telegraf/ca.pem" # tls_cert = "/etc/telegraf/cert.pem" # tls_key = "/etc/telegraf/key.pem" @@ -180,6 +182,9 @@ var sampleConfig = ` # sasl_username = "kafka" # sasl_password = "secret" + ## SASL protocol version. When connecting to Azure EventHub set to 0. + # sasl_version = 1 + ## Data format to output. ## Each data format has its own unique set of configuration options, read ## more about them here: @@ -258,6 +263,10 @@ func (k *Kafka) Connect() error { k.TLSKey = k.Key } + if k.EnableTLS != nil && *k.EnableTLS { + config.Net.TLS.Enable = true + } + tlsConfig, err := k.ClientConfig.TLSConfig() if err != nil { return err @@ -265,13 +274,25 @@ func (k *Kafka) Connect() error { if tlsConfig != nil { config.Net.TLS.Config = tlsConfig - config.Net.TLS.Enable = true + + // To maintain backwards compatibility, if the enable_tls option is not + // set TLS is enabled if a non-default TLS config is used. + if k.EnableTLS == nil { + k.Log.Warnf("Use of deprecated configuration: enable_tls should be set when using TLS") + config.Net.TLS.Enable = true + } } if k.SASLUsername != "" && k.SASLPassword != "" { config.Net.SASL.User = k.SASLUsername config.Net.SASL.Password = k.SASLPassword config.Net.SASL.Enable = true + + version, err := kafka.SASLVersion(config.Version, k.SASLVersion) + if err != nil { + return err + } + config.Net.SASL.Version = version } producer, err := sarama.NewSyncProducer(k.Brokers, config) From eb272f450ae20ce87f45bd230d83de07765de248 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 2 Jan 2020 16:39:45 -0800 Subject: [PATCH 1432/1815] Update changelog --- CHANGELOG.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a810f13ba..7ed412d24 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,7 +6,8 @@ - [#6798](https://github.com/influxdata/telegraf/pull/6798): Add use_sudo option to ipmi_sensor input. - [#6764](https://github.com/influxdata/telegraf/pull/6764): Add ability to collect pod labels to kubernetes input. - [#6770](https://github.com/influxdata/telegraf/pull/6770): Expose unbound-control config file option. -- [#6508](https://github.com/influxdata/telegraf/pull/6508): Add support for new nginx plus api endpoints. +- [#6508](https://github.com/influxdata/telegraf/pull/6508): Add support for new nginx plus api endpoints. +- [#6342](https://github.com/influxdata/telegraf/pull/6342): Add kafka SASL version control to support Azure Event Hub. ## v1.13.1 [unreleased] From 8e04221fb790fb6602ba21db2e372e34021afb70 Mon Sep 17 00:00:00 2001 From: Xiaoyu Lee <895605504@qq.com> Date: Sat, 4 Jan 2020 02:58:15 +0800 Subject: [PATCH 1433/1815] Fix link to uwsgi input (#6851) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index b34451df2..4150ec17a 100644 --- a/README.md +++ b/README.md @@ -304,7 +304,7 @@ For documentation on the latest development code see the [documentation index][d * [twemproxy](./plugins/inputs/twemproxy) * [udp_listener](./plugins/inputs/socket_listener) * [unbound](./plugins/inputs/unbound) -* [uswgi](./plugins/inputs/uswgi) +* [uwsgi](./plugins/inputs/uwsgi) * [varnish](./plugins/inputs/varnish) * [vsphere](./plugins/inputs/vsphere) VMware vSphere * [webhooks](./plugins/inputs/webhooks) From 7e498ede6d3e0f4526b6486830b54f1a0430be90 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 3 Jan 2020 11:37:30 -0800 Subject: [PATCH 1434/1815] Update github.com/kardianos/service to 1.0.0 (#6849) --- Gopkg.lock | 14 +++----------- Gopkg.toml | 2 +- 2 files changed, 4 insertions(+), 12 deletions(-) diff --git a/Gopkg.lock b/Gopkg.lock index 3fabcfb77..749f37d4c 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -760,20 +760,12 @@ revision = "c2b33e84" [[projects]] - branch = "master" - digest = "1:2c5ad58492804c40bdaf5d92039b0cde8b5becd2b7feeb37d7d1cc36a8aa8dbe" - name = "github.com/kardianos/osext" - packages = ["."] - pruneopts = "" - revision = "ae77be60afb1dcacde03767a8c37337fad28ac14" - -[[projects]] - branch = "master" - digest = "1:fed90fa725d3b1bac0a760de64426834dfef4546474cf182f2ec94285afa74a8" + digest = "1:b498ceccf0d2efa0af877b1dda20d3742ef9ff7475123e8e922016f0b737069b" name = "github.com/kardianos/service" packages = ["."] pruneopts = "" - revision = "615a14ed75099c9eaac6949e22ac2341bf9d3197" + revision = "56787a3ea05e9b262708192e7ce3b500aba73561" + version = "v1.0.0" [[projects]] digest = "1:3e160bec100719bb664ce5192b42e82e66b290397da4c0845aed5ce3cfce60cb" diff --git a/Gopkg.toml b/Gopkg.toml index 5b0a2dba4..5604fd362 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -88,7 +88,7 @@ [[constraint]] name = "github.com/kardianos/service" - branch = "master" + version = "1.0.0" [[constraint]] name = "github.com/kballard/go-shellquote" From 0cf94cfe54b7150eda58877d96cf6145f09ab6c1 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 3 Jan 2020 11:38:20 -0800 Subject: [PATCH 1435/1815] Report rabbitmq_node measurement and return on gather error (#6819) --- plugins/inputs/rabbitmq/rabbitmq.go | 184 ++++++++++++---------------- 1 file changed, 81 insertions(+), 103 deletions(-) diff --git a/plugins/inputs/rabbitmq/rabbitmq.go b/plugins/inputs/rabbitmq/rabbitmq.go index 199b24922..d27c522bf 100644 --- a/plugins/inputs/rabbitmq/rabbitmq.go +++ b/plugins/inputs/rabbitmq/rabbitmq.go @@ -448,134 +448,112 @@ func gatherOverview(r *RabbitMQ, acc telegraf.Accumulator) { } func gatherNodes(r *RabbitMQ, acc telegraf.Accumulator) { - allNodes := make([]Node, 0) - // Gather information about nodes + allNodes := make([]*Node, 0) + err := r.requestJSON("/api/nodes", &allNodes) if err != nil { acc.AddError(err) return } - nodes := make(map[string]Node) + nodes := allNodes[:0] for _, node := range allNodes { if r.shouldGatherNode(node) { - nodes[node.Name] = node + nodes = append(nodes, node) } } - numberNodes := len(nodes) - if numberNodes == 0 { - return - } - - type NodeCheck struct { - NodeName string - HealthCheck HealthCheck - Memory *Memory - } - - nodeChecksChannel := make(chan NodeCheck, numberNodes) - + var wg sync.WaitGroup for _, node := range nodes { - go func(nodeName string, healthChecksChannel chan NodeCheck) { - var healthCheck HealthCheck - var memoryresponse MemoryResponse + wg.Add(1) + go func(node *Node) { + defer wg.Done() - err := r.requestJSON("/api/healthchecks/node/"+nodeName, &healthCheck) - nodeCheck := NodeCheck{ - NodeName: nodeName, - HealthCheck: healthCheck, + tags := map[string]string{"url": r.URL} + tags["node"] = node.Name + + fields := map[string]interface{}{ + "disk_free": node.DiskFree, + "disk_free_limit": node.DiskFreeLimit, + "disk_free_alarm": boolToInt(node.DiskFreeAlarm), + "fd_total": node.FdTotal, + "fd_used": node.FdUsed, + "mem_limit": node.MemLimit, + "mem_used": node.MemUsed, + "mem_alarm": boolToInt(node.MemAlarm), + "proc_total": node.ProcTotal, + "proc_used": node.ProcUsed, + "run_queue": node.RunQueue, + "sockets_total": node.SocketsTotal, + "sockets_used": node.SocketsUsed, + "uptime": node.Uptime, + "mnesia_disk_tx_count": node.MnesiaDiskTxCount, + "mnesia_disk_tx_count_rate": node.MnesiaDiskTxCountDetails.Rate, + "mnesia_ram_tx_count": node.MnesiaRamTxCount, + "mnesia_ram_tx_count_rate": node.MnesiaRamTxCountDetails.Rate, + "gc_num": node.GcNum, + "gc_num_rate": node.GcNumDetails.Rate, + "gc_bytes_reclaimed": node.GcBytesReclaimed, + "gc_bytes_reclaimed_rate": node.GcBytesReclaimedDetails.Rate, + "io_read_avg_time": node.IoReadAvgTime, + "io_read_avg_time_rate": node.IoReadAvgTimeDetails.Rate, + "io_read_bytes": node.IoReadBytes, + "io_read_bytes_rate": node.IoReadBytesDetails.Rate, + "io_write_avg_time": node.IoWriteAvgTime, + "io_write_avg_time_rate": node.IoWriteAvgTimeDetails.Rate, + "io_write_bytes": node.IoWriteBytes, + "io_write_bytes_rate": node.IoWriteBytesDetails.Rate, + "running": boolToInt(node.Running), } + + var health HealthCheck + err := r.requestJSON("/api/healthchecks/node/"+node.Name, &health) if err != nil { acc.AddError(err) return } - err = r.requestJSON("/api/nodes/"+nodeName+"/memory", &memoryresponse) - nodeCheck.Memory = memoryresponse.Memory + if health.Status == "ok" { + fields["health_check_status"] = int64(1) + } else { + fields["health_check_status"] = int64(0) + } + + var memory MemoryResponse + err = r.requestJSON("/api/nodes/"+node.Name+"/memory", &memory) if err != nil { acc.AddError(err) return } - nodeChecksChannel <- nodeCheck - }(node.Name, nodeChecksChannel) + if memory.Memory != nil { + fields["mem_connection_readers"] = memory.Memory.ConnectionReaders + fields["mem_connection_writers"] = memory.Memory.ConnectionWriters + fields["mem_connection_channels"] = memory.Memory.ConnectionChannels + fields["mem_connection_other"] = memory.Memory.ConnectionOther + fields["mem_queue_procs"] = memory.Memory.QueueProcs + fields["mem_queue_slave_procs"] = memory.Memory.QueueSlaveProcs + fields["mem_plugins"] = memory.Memory.Plugins + fields["mem_other_proc"] = memory.Memory.OtherProc + fields["mem_metrics"] = memory.Memory.Metrics + fields["mem_mgmt_db"] = memory.Memory.MgmtDb + fields["mem_mnesia"] = memory.Memory.Mnesia + fields["mem_other_ets"] = memory.Memory.OtherEts + fields["mem_binary"] = memory.Memory.Binary + fields["mem_msg_index"] = memory.Memory.MsgIndex + fields["mem_code"] = memory.Memory.Code + fields["mem_atom"] = memory.Memory.Atom + fields["mem_other_system"] = memory.Memory.OtherSystem + fields["mem_allocated_unused"] = memory.Memory.AllocatedUnused + fields["mem_reserved_unallocated"] = memory.Memory.ReservedUnallocated + fields["mem_total"] = memory.Memory.Total + } + + acc.AddFields("rabbitmq_node", fields, tags) + }(node) } - now := time.Now() - - for i := 0; i < len(nodes); i++ { - nodeCheck := <-nodeChecksChannel - - var healthCheckStatus int64 = 0 - - if nodeCheck.HealthCheck.Status == "ok" { - healthCheckStatus = 1 - } - - node := nodes[nodeCheck.NodeName] - - tags := map[string]string{"url": r.URL} - tags["node"] = node.Name - - fields := map[string]interface{}{ - "disk_free": node.DiskFree, - "disk_free_limit": node.DiskFreeLimit, - "disk_free_alarm": boolToInt(node.DiskFreeAlarm), - "fd_total": node.FdTotal, - "fd_used": node.FdUsed, - "mem_limit": node.MemLimit, - "mem_used": node.MemUsed, - "mem_alarm": boolToInt(node.MemAlarm), - "proc_total": node.ProcTotal, - "proc_used": node.ProcUsed, - "run_queue": node.RunQueue, - "sockets_total": node.SocketsTotal, - "sockets_used": node.SocketsUsed, - "uptime": node.Uptime, - "mnesia_disk_tx_count": node.MnesiaDiskTxCount, - "mnesia_disk_tx_count_rate": node.MnesiaDiskTxCountDetails.Rate, - "mnesia_ram_tx_count": node.MnesiaRamTxCount, - "mnesia_ram_tx_count_rate": node.MnesiaRamTxCountDetails.Rate, - "gc_num": node.GcNum, - "gc_num_rate": node.GcNumDetails.Rate, - "gc_bytes_reclaimed": node.GcBytesReclaimed, - "gc_bytes_reclaimed_rate": node.GcBytesReclaimedDetails.Rate, - "io_read_avg_time": node.IoReadAvgTime, - "io_read_avg_time_rate": node.IoReadAvgTimeDetails.Rate, - "io_read_bytes": node.IoReadBytes, - "io_read_bytes_rate": node.IoReadBytesDetails.Rate, - "io_write_avg_time": node.IoWriteAvgTime, - "io_write_avg_time_rate": node.IoWriteAvgTimeDetails.Rate, - "io_write_bytes": node.IoWriteBytes, - "io_write_bytes_rate": node.IoWriteBytesDetails.Rate, - "running": boolToInt(node.Running), - "health_check_status": healthCheckStatus, - } - if nodeCheck.Memory != nil { - fields["mem_connection_readers"] = nodeCheck.Memory.ConnectionReaders - fields["mem_connection_writers"] = nodeCheck.Memory.ConnectionWriters - fields["mem_connection_channels"] = nodeCheck.Memory.ConnectionChannels - fields["mem_connection_other"] = nodeCheck.Memory.ConnectionOther - fields["mem_queue_procs"] = nodeCheck.Memory.QueueProcs - fields["mem_queue_slave_procs"] = nodeCheck.Memory.QueueSlaveProcs - fields["mem_plugins"] = nodeCheck.Memory.Plugins - fields["mem_other_proc"] = nodeCheck.Memory.OtherProc - fields["mem_metrics"] = nodeCheck.Memory.Metrics - fields["mem_mgmt_db"] = nodeCheck.Memory.MgmtDb - fields["mem_mnesia"] = nodeCheck.Memory.Mnesia - fields["mem_other_ets"] = nodeCheck.Memory.OtherEts - fields["mem_binary"] = nodeCheck.Memory.Binary - fields["mem_msg_index"] = nodeCheck.Memory.MsgIndex - fields["mem_code"] = nodeCheck.Memory.Code - fields["mem_atom"] = nodeCheck.Memory.Atom - fields["mem_other_system"] = nodeCheck.Memory.OtherSystem - fields["mem_allocated_unused"] = nodeCheck.Memory.AllocatedUnused - fields["mem_reserved_unallocated"] = nodeCheck.Memory.ReservedUnallocated - fields["mem_total"] = nodeCheck.Memory.Total - } - acc.AddFields("rabbitmq_node", fields, tags, now) - } + wg.Wait() } func gatherQueues(r *RabbitMQ, acc telegraf.Accumulator) { @@ -718,7 +696,7 @@ func gatherFederationLinks(r *RabbitMQ, acc telegraf.Accumulator) { } } -func (r *RabbitMQ) shouldGatherNode(node Node) bool { +func (r *RabbitMQ) shouldGatherNode(node *Node) bool { if len(r.Nodes) == 0 { return true } From b0398c9a8f275abe7740671788cb0705925146e6 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 3 Jan 2020 11:43:19 -0800 Subject: [PATCH 1436/1815] Update changelog --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7ed412d24..cf2325c36 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -20,6 +20,8 @@ - [#6679](https://github.com/influxdata/telegraf/issues/6679): Encode query hash fields as hex strings in sqlserver input. - [#6345](https://github.com/influxdata/telegraf/issues/6345): Invalidate diskio cache if the metadata mtime has changed. - [#6800](https://github.com/influxdata/telegraf/issues/6800): Show platform not supported warning only on plugin creation. +- [#6814](https://github.com/influxdata/telegraf/issues/6814): Fix rabbitmq cannot complete gather after request error. +- [#6846](https://github.com/influxdata/telegraf/issues/6846): Fix /sbin/init --version executed on Telegraf startup. ## v1.13 [2019-12-12] From 5b92477603c8cd7ce5178f4b8b1d5a5a0c6e7330 Mon Sep 17 00:00:00 2001 From: Thomas Mohaupt Date: Tue, 7 Jan 2020 00:54:50 +0100 Subject: [PATCH 1437/1815] Fix error in pivot processor docs (#6856) --- plugins/processors/pivot/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/processors/pivot/README.md b/plugins/processors/pivot/README.md index 7d2fa91b4..b3eb06fd3 100644 --- a/plugins/processors/pivot/README.md +++ b/plugins/processors/pivot/README.md @@ -24,7 +24,7 @@ To perform the reverse operation use the [unpivot] processor. - cpu,cpu=cpu0,name=time_idle value=42i - cpu,cpu=cpu0,name=time_user value=43i + cpu,cpu=cpu0 time_idle=42i -+ cpu,cpu=cpu0 time_user=42i ++ cpu,cpu=cpu0 time_user=43i ``` [unpivot]: /plugins/processors/unpivot/README.md From d62ff1d25cbaedeb9464aa00644396e827de4909 Mon Sep 17 00:00:00 2001 From: Samantha Wang <32681364+sjwang90@users.noreply.github.com> Date: Wed, 8 Jan 2020 10:46:01 -0800 Subject: [PATCH 1438/1815] Update merge aggregator config file in README.md (#6805) --- plugins/aggregators/merge/README.md | 5 ++++- plugins/aggregators/merge/merge.go | 6 +++++- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/plugins/aggregators/merge/README.md b/plugins/aggregators/merge/README.md index 58fa47bbd..6f959a1e8 100644 --- a/plugins/aggregators/merge/README.md +++ b/plugins/aggregators/merge/README.md @@ -11,7 +11,10 @@ be handled more efficiently by the output. ```toml [[aggregators.merge]] - # no configuration + ## If true, the original metric will be dropped by the + ## aggregator and will not get sent to the output plugins. + drop_original = true + ``` ### Example diff --git a/plugins/aggregators/merge/merge.go b/plugins/aggregators/merge/merge.go index 6a1e82911..8d36681f2 100644 --- a/plugins/aggregators/merge/merge.go +++ b/plugins/aggregators/merge/merge.go @@ -10,7 +10,11 @@ import ( const ( description = "Merge metrics into multifield metrics by series key" - sampleConfig = "" + sampleConfig = ` + ## If true, the original metric will be dropped by the + ## aggregator and will not get sent to the output plugins. + drop_original = true + ` ) type Merge struct { From 5f52b9538d8c0d937a139aba81e6e98d83ea91f3 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 8 Jan 2020 10:48:06 -0800 Subject: [PATCH 1439/1815] Fix indention in merge sample config --- plugins/aggregators/merge/README.md | 1 - plugins/aggregators/merge/merge.go | 8 ++++---- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/plugins/aggregators/merge/README.md b/plugins/aggregators/merge/README.md index 6f959a1e8..89f7f0983 100644 --- a/plugins/aggregators/merge/README.md +++ b/plugins/aggregators/merge/README.md @@ -14,7 +14,6 @@ be handled more efficiently by the output. ## If true, the original metric will be dropped by the ## aggregator and will not get sent to the output plugins. drop_original = true - ``` ### Example diff --git a/plugins/aggregators/merge/merge.go b/plugins/aggregators/merge/merge.go index 8d36681f2..083c8fd3e 100644 --- a/plugins/aggregators/merge/merge.go +++ b/plugins/aggregators/merge/merge.go @@ -11,10 +11,10 @@ import ( const ( description = "Merge metrics into multifield metrics by series key" sampleConfig = ` - ## If true, the original metric will be dropped by the - ## aggregator and will not get sent to the output plugins. - drop_original = true - ` + ## If true, the original metric will be dropped by the + ## aggregator and will not get sent to the output plugins. + drop_original = true +` ) type Merge struct { From 69d9c105723349e242ca7acca65ba789dd69b41d Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 8 Jan 2020 10:50:40 -0800 Subject: [PATCH 1440/1815] Update example telegraf.conf --- etc/telegraf.conf | 108 ++++++++++++++++++++++++---------------------- 1 file changed, 57 insertions(+), 51 deletions(-) diff --git a/etc/telegraf.conf b/etc/telegraf.conf index 517600475..28edd5192 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -822,6 +822,7 @@ # # max_message_bytes = 1000000 # # ## Optional TLS Config +# # enable_tls = true # # tls_ca = "/etc/telegraf/ca.pem" # # tls_cert = "/etc/telegraf/cert.pem" # # tls_key = "/etc/telegraf/key.pem" @@ -832,6 +833,9 @@ # # sasl_username = "kafka" # # sasl_password = "secret" # +# ## SASL protocol version. When connecting to Azure EventHub set to 0. +# # sasl_version = 1 +# # ## Data format to output. # ## Each data format has its own unique set of configuration options, read # ## more about them here: @@ -1651,7 +1655,9 @@ # # Merge metrics into multifield metrics by series key # [[aggregators.merge]] -# # no configuration +# ## If true, the original metric will be dropped by the +# ## aggregator and will not get sent to the output plugins. +# drop_original = true # # Keep the aggregate min/max of each metric passing through. @@ -2870,6 +2876,11 @@ # ## optionally specify the path to the ipmitool executable # # path = "/usr/bin/ipmitool" # ## +# ## Setting 'use_sudo' to true will make use of sudo to run ipmitool. +# ## Sudo must be configured to allow the telegraf user to run ipmitool +# ## without a password. +# # use_sudo = false +# ## # ## optionally force session privilege level. Can be CALLBACK, USER, OPERATOR, ADMINISTRATOR # # privilege = "ADMINISTRATOR" # ## @@ -3191,6 +3202,11 @@ # ## OR # # bearer_token_string = "abc_123" # +# ## Pod labels to be added as tags. An empty array for both include and +# ## exclude will include all labels. +# # label_include = [] +# # label_exclude = ["*"] +# # ## Set response_timeout (default 5 seconds) # # response_timeout = "5s" # @@ -4150,61 +4166,46 @@ # # Retrieves SNMP values from remote agents # [[inputs.snmp]] -# agents = [ "127.0.0.1:161" ] -# ## Timeout for each SNMP query. -# timeout = "5s" -# ## Number of retries to attempt within timeout. -# retries = 3 -# ## SNMP version, values can be 1, 2, or 3 -# version = 2 +# ## Agent addresses to retrieve values from. +# ## example: agents = ["udp://127.0.0.1:161"] +# ## agents = ["tcp://127.0.0.1:161"] +# agents = ["udp://127.0.0.1:161"] +# +# ## Timeout for each request. +# # timeout = "5s" +# +# ## SNMP version; can be 1, 2, or 3. +# # version = 2 # # ## SNMP community string. -# community = "public" +# # community = "public" # -# ## The GETBULK max-repetitions parameter -# max_repetitions = 10 +# ## Number of retries to attempt. +# # retries = 3 # -# ## SNMPv3 auth parameters -# #sec_name = "myuser" -# #auth_protocol = "md5" # Values: "MD5", "SHA", "" -# #auth_password = "pass" -# #sec_level = "authNoPriv" # Values: "noAuthNoPriv", "authNoPriv", "authPriv" -# #context_name = "" -# #priv_protocol = "" # Values: "DES", "AES", "" -# #priv_password = "" +# ## The GETBULK max-repetitions parameter. +# # max_repetitions = 10 # -# ## measurement name -# name = "system" -# [[inputs.snmp.field]] -# name = "hostname" -# oid = ".1.0.0.1.1" -# [[inputs.snmp.field]] -# name = "uptime" -# oid = ".1.0.0.1.2" -# [[inputs.snmp.field]] -# name = "load" -# oid = ".1.0.0.1.3" -# [[inputs.snmp.field]] -# oid = "HOST-RESOURCES-MIB::hrMemorySize" +# ## SNMPv3 authentication and encryption options. +# ## +# ## Security Name. +# # sec_name = "myuser" +# ## Authentication protocol; one of "MD5", "SHA", or "". +# # auth_protocol = "MD5" +# ## Authentication password. +# # auth_password = "pass" +# ## Security Level; one of "noAuthNoPriv", "authNoPriv", or "authPriv". +# # sec_level = "authNoPriv" +# ## Context Name. +# # context_name = "" +# ## Privacy protocol used for encrypted messages; one of "DES", "AES" or "". +# # priv_protocol = "" +# ## Privacy password used for encrypted messages. +# # priv_password = "" # -# [[inputs.snmp.table]] -# ## measurement name -# name = "remote_servers" -# inherit_tags = [ "hostname" ] -# [[inputs.snmp.table.field]] -# name = "server" -# oid = ".1.0.0.0.1.0" -# is_tag = true -# [[inputs.snmp.table.field]] -# name = "connections" -# oid = ".1.0.0.0.1.1" -# [[inputs.snmp.table.field]] -# name = "latency" -# oid = ".1.0.0.0.1.2" -# -# [[inputs.snmp.table]] -# ## auto populate table's fields using the MIB -# oid = "HOST-RESOURCES-MIB::hrNetworkTable" +# ## Add fields and tables defining the variables you wish to collect. This +# ## example collects the system uptime and interface variables. Reference the +# ## full plugin documentation for configuration details. # # DEPRECATED! PLEASE USE inputs.snmp INSTEAD. @@ -5253,16 +5254,21 @@ # # version = "" # # ## Optional TLS Config +# # enable_tls = true # # tls_ca = "/etc/telegraf/ca.pem" # # tls_cert = "/etc/telegraf/cert.pem" # # tls_key = "/etc/telegraf/key.pem" # ## Use TLS but skip chain & host verification # # insecure_skip_verify = false # -# ## Optional SASL Config +# ## SASL authentication credentials. These settings should typically be used +# ## with TLS encryption enabled using the "enable_tls" option. # # sasl_username = "kafka" # # sasl_password = "secret" # +# ## SASL protocol version. When connecting to Azure EventHub set to 0. +# # sasl_version = 1 +# # ## Name of the consumer group. # # consumer_group = "telegraf_metrics_consumers" # From 73488eb61cf71d41e45df6f7b91cfc6d4e95a067 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 8 Jan 2020 10:52:36 -0800 Subject: [PATCH 1441/1815] Use last path element as field key if path fully specified (#6848) --- .../cisco_telemetry_gnmi.go | 21 +- .../cisco_telemetry_gnmi_test.go | 462 +++++++++++++----- 2 files changed, 353 insertions(+), 130 deletions(-) diff --git a/plugins/inputs/cisco_telemetry_gnmi/cisco_telemetry_gnmi.go b/plugins/inputs/cisco_telemetry_gnmi/cisco_telemetry_gnmi.go index 38297b976..c8c50e368 100644 --- a/plugins/inputs/cisco_telemetry_gnmi/cisco_telemetry_gnmi.go +++ b/plugins/inputs/cisco_telemetry_gnmi/cisco_telemetry_gnmi.go @@ -280,11 +280,26 @@ func (c *CiscoTelemetryGNMI) handleSubscribeResponse(address string, reply *gnmi } // Group metrics - for key, val := range fields { - if len(aliasPath) > 0 { + for k, v := range fields { + key := k + if len(aliasPath) < len(key) { + // This may not be an exact prefix, due to naming style + // conversion on the key. key = key[len(aliasPath)+1:] + } else { + // Otherwise use the last path element as the field key. + key = path.Base(key) + + // If there are no elements skip the item; this would be an + // invalid message. + key = strings.TrimLeft(key, "/.") + if key == "" { + c.Log.Errorf("invalid empty path: %q", k) + continue + } } - grouper.Add(name, tags, timestamp, key, val) + + grouper.Add(name, tags, timestamp, key, v) } lastAliasPath = aliasPath diff --git a/plugins/inputs/cisco_telemetry_gnmi/cisco_telemetry_gnmi_test.go b/plugins/inputs/cisco_telemetry_gnmi/cisco_telemetry_gnmi_test.go index 7a62bcd14..1b12886b9 100644 --- a/plugins/inputs/cisco_telemetry_gnmi/cisco_telemetry_gnmi_test.go +++ b/plugins/inputs/cisco_telemetry_gnmi/cisco_telemetry_gnmi_test.go @@ -5,9 +5,11 @@ import ( "errors" "fmt" "net" + "sync" "testing" "time" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/testutil" "github.com/openconfig/gnmi/proto/gnmi" @@ -37,89 +39,124 @@ func TestParsePath(t *testing.T) { assert.Equal(t, errors.New("Invalid GNMI path: /foo[[/"), err) } -type mockGNMIServer struct { - t *testing.T - acc *testutil.Accumulator - server *grpc.Server - scenario int +type MockServer struct { + SubscribeF func(gnmi.GNMI_SubscribeServer) error + GRPCServer *grpc.Server } -func (m *mockGNMIServer) Capabilities(context.Context, *gnmi.CapabilityRequest) (*gnmi.CapabilityResponse, error) { +func (s *MockServer) Capabilities(context.Context, *gnmi.CapabilityRequest) (*gnmi.CapabilityResponse, error) { return nil, nil } -func (m *mockGNMIServer) Get(context.Context, *gnmi.GetRequest) (*gnmi.GetResponse, error) { +func (s *MockServer) Get(context.Context, *gnmi.GetRequest) (*gnmi.GetResponse, error) { return nil, nil } -func (m *mockGNMIServer) Set(context.Context, *gnmi.SetRequest) (*gnmi.SetResponse, error) { +func (s *MockServer) Set(context.Context, *gnmi.SetRequest) (*gnmi.SetResponse, error) { return nil, nil } -func (m *mockGNMIServer) Subscribe(server gnmi.GNMI_SubscribeServer) error { - metadata, ok := metadata.FromIncomingContext(server.Context()) - require.Equal(m.t, ok, true) - require.Equal(m.t, metadata.Get("username"), []string{"theuser"}) - require.Equal(m.t, metadata.Get("password"), []string{"thepassword"}) - - // Must read request before sending a response; even though we don't check - // the request itself currently. - _, err := server.Recv() - if err != nil { - panic(err) - } - - switch m.scenario { - case 0: - return fmt.Errorf("testerror") - case 1: - notification := mockGNMINotification() - server.Send(&gnmi.SubscribeResponse{Response: &gnmi.SubscribeResponse_Update{Update: notification}}) - server.Send(&gnmi.SubscribeResponse{Response: &gnmi.SubscribeResponse_SyncResponse{SyncResponse: true}}) - notification.Prefix.Elem[0].Key["foo"] = "bar2" - notification.Update[0].Path.Elem[1].Key["name"] = "str2" - notification.Update[0].Val = &gnmi.TypedValue{Value: &gnmi.TypedValue_JsonVal{JsonVal: []byte{'"', '1', '2', '3', '"'}}} - server.Send(&gnmi.SubscribeResponse{Response: &gnmi.SubscribeResponse_Update{Update: notification}}) - return nil - case 2: - notification := mockGNMINotification() - server.Send(&gnmi.SubscribeResponse{Response: &gnmi.SubscribeResponse_Update{Update: notification}}) - return nil - case 3: - notification := mockGNMINotification() - notification.Prefix.Elem[0].Key["foo"] = "bar2" - notification.Update[0].Path.Elem[1].Key["name"] = "str2" - notification.Update[0].Val = &gnmi.TypedValue{Value: &gnmi.TypedValue_BoolVal{BoolVal: false}} - server.Send(&gnmi.SubscribeResponse{Response: &gnmi.SubscribeResponse_Update{Update: notification}}) - return nil - default: - return fmt.Errorf("test not implemented ;)") - } +func (s *MockServer) Subscribe(server gnmi.GNMI_SubscribeServer) error { + return s.SubscribeF(server) } -func TestGNMIError(t *testing.T) { +func TestWaitError(t *testing.T) { listener, err := net.Listen("tcp", "127.0.0.1:0") require.NoError(t, err) - server := grpc.NewServer() - acc := &testutil.Accumulator{} - gnmi.RegisterGNMIServer(server, &mockGNMIServer{t: t, scenario: 0, server: server, acc: acc}) - c := &CiscoTelemetryGNMI{ + grpcServer := grpc.NewServer() + gnmiServer := &MockServer{ + SubscribeF: func(server gnmi.GNMI_SubscribeServer) error { + return fmt.Errorf("testerror") + }, + GRPCServer: grpcServer, + } + gnmi.RegisterGNMIServer(grpcServer, gnmiServer) + + plugin := &CiscoTelemetryGNMI{ Log: testutil.Logger{}, Addresses: []string{listener.Addr().String()}, - Username: "theuser", Password: "thepassword", Encoding: "proto", - Redial: internal.Duration{Duration: 1 * time.Second}} + Encoding: "proto", + Redial: internal.Duration{Duration: 1 * time.Second}, + } - require.NoError(t, c.Start(acc)) + var acc testutil.Accumulator + err = plugin.Start(&acc) + require.NoError(t, err) + + var wg sync.WaitGroup + wg.Add(1) go func() { - err := server.Serve(listener) + defer wg.Done() + err := grpcServer.Serve(listener) require.NoError(t, err) }() - acc.WaitError(1) - c.Stop() - server.Stop() - require.Contains(t, acc.Errors, errors.New("aborted GNMI subscription: rpc error: code = Unknown desc = testerror")) + acc.WaitError(1) + plugin.Stop() + grpcServer.Stop() + wg.Wait() + + require.Contains(t, acc.Errors, + errors.New("aborted GNMI subscription: rpc error: code = Unknown desc = testerror")) +} + +func TestUsernamePassword(t *testing.T) { + listener, err := net.Listen("tcp", "127.0.0.1:0") + require.NoError(t, err) + + grpcServer := grpc.NewServer() + gnmiServer := &MockServer{ + SubscribeF: func(server gnmi.GNMI_SubscribeServer) error { + metadata, ok := metadata.FromIncomingContext(server.Context()) + if !ok { + return errors.New("failed to get metadata") + } + + username := metadata.Get("username") + if len(username) != 1 || username[0] != "theusername" { + return errors.New("wrong username") + } + + password := metadata.Get("password") + if len(password) != 1 || password[0] != "thepassword" { + return errors.New("wrong password") + } + + return errors.New("success") + }, + GRPCServer: grpcServer, + } + gnmi.RegisterGNMIServer(grpcServer, gnmiServer) + + plugin := &CiscoTelemetryGNMI{ + Log: testutil.Logger{}, + Addresses: []string{listener.Addr().String()}, + Username: "theusername", + Password: "thepassword", + Encoding: "proto", + Redial: internal.Duration{Duration: 1 * time.Second}, + } + + var acc testutil.Accumulator + err = plugin.Start(&acc) + require.NoError(t, err) + + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + err := grpcServer.Serve(listener) + require.NoError(t, err) + }() + + acc.WaitError(1) + plugin.Stop() + grpcServer.Stop() + wg.Wait() + + require.Contains(t, acc.Errors, + errors.New("aborted GNMI subscription: rpc error: code = Unknown desc = success")) } func mockGNMINotification() *gnmi.Notification { @@ -169,97 +206,268 @@ func mockGNMINotification() *gnmi.Notification { } } -func TestGNMIMultiple(t *testing.T) { - listener, err := net.Listen("tcp", "127.0.0.1:0") - require.NoError(t, err) - server := grpc.NewServer() - acc := &testutil.Accumulator{} - gnmi.RegisterGNMIServer(server, &mockGNMIServer{t: t, scenario: 1, server: server, acc: acc}) - - c := &CiscoTelemetryGNMI{ - Log: testutil.Logger{}, - Addresses: []string{listener.Addr().String()}, - Username: "theuser", Password: "thepassword", Encoding: "proto", - Redial: internal.Duration{Duration: 1 * time.Second}, - Subscriptions: []Subscription{{Name: "alias", Origin: "type", Path: "/model", SubscriptionMode: "sample"}}, +func TestNotification(t *testing.T) { + tests := []struct { + name string + plugin *CiscoTelemetryGNMI + server *MockServer + expected []telegraf.Metric + }{ + { + name: "multiple metrics", + plugin: &CiscoTelemetryGNMI{ + Log: testutil.Logger{}, + Encoding: "proto", + Redial: internal.Duration{Duration: 1 * time.Second}, + Subscriptions: []Subscription{ + { + Name: "alias", + Origin: "type", + Path: "/model", + SubscriptionMode: "sample", + }, + }, + }, + server: &MockServer{ + SubscribeF: func(server gnmi.GNMI_SubscribeServer) error { + notification := mockGNMINotification() + server.Send(&gnmi.SubscribeResponse{Response: &gnmi.SubscribeResponse_Update{Update: notification}}) + server.Send(&gnmi.SubscribeResponse{Response: &gnmi.SubscribeResponse_SyncResponse{SyncResponse: true}}) + notification.Prefix.Elem[0].Key["foo"] = "bar2" + notification.Update[0].Path.Elem[1].Key["name"] = "str2" + notification.Update[0].Val = &gnmi.TypedValue{Value: &gnmi.TypedValue_JsonVal{JsonVal: []byte{'"', '1', '2', '3', '"'}}} + server.Send(&gnmi.SubscribeResponse{Response: &gnmi.SubscribeResponse_Update{Update: notification}}) + return nil + }, + }, + expected: []telegraf.Metric{ + testutil.MustMetric( + "alias", + map[string]string{ + "path": "type:/model", + "source": "127.0.0.1", + "foo": "bar", + "name": "str", + "uint64": "1234", + }, + map[string]interface{}{ + "some/path": int64(5678), + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "alias", + map[string]string{ + "path": "type:/model", + "source": "127.0.0.1", + "foo": "bar", + }, + map[string]interface{}{ + "other/path": "foobar", + "other/this": "that", + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "alias", + map[string]string{ + "path": "type:/model", + "foo": "bar2", + "source": "127.0.0.1", + "name": "str2", + "uint64": "1234", + }, + map[string]interface{}{ + "some/path": "123", + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "alias", + map[string]string{ + "path": "type:/model", + "source": "127.0.0.1", + "foo": "bar2", + }, + map[string]interface{}{ + "other/path": "foobar", + "other/this": "that", + }, + time.Unix(0, 0), + ), + }, + }, + { + name: "full path field key", + plugin: &CiscoTelemetryGNMI{ + Log: testutil.Logger{}, + Encoding: "proto", + Redial: internal.Duration{Duration: 1 * time.Second}, + Subscriptions: []Subscription{ + { + Name: "PHY_COUNTERS", + Origin: "type", + Path: "/state/port[port-id=*]/ethernet/oper-speed", + SubscriptionMode: "sample", + }, + }, + }, + server: &MockServer{ + SubscribeF: func(server gnmi.GNMI_SubscribeServer) error { + response := &gnmi.SubscribeResponse{ + Response: &gnmi.SubscribeResponse_Update{ + Update: &gnmi.Notification{ + Timestamp: 1543236572000000000, + Prefix: &gnmi.Path{ + Origin: "type", + Elem: []*gnmi.PathElem{ + { + Name: "state", + }, + { + Name: "port", + Key: map[string]string{"port-id": "1"}, + }, + { + Name: "ethernet", + }, + { + Name: "oper-speed", + }, + }, + Target: "subscription", + }, + Update: []*gnmi.Update{ + { + Path: &gnmi.Path{}, + Val: &gnmi.TypedValue{ + Value: &gnmi.TypedValue_IntVal{IntVal: 42}, + }, + }, + }, + }, + }, + } + server.Send(response) + return nil + }, + }, + expected: []telegraf.Metric{ + testutil.MustMetric( + "PHY_COUNTERS", + map[string]string{ + "path": "type:/state/port/ethernet/oper-speed", + "source": "127.0.0.1", + "port_id": "1", + }, + map[string]interface{}{ + "oper_speed": 42, + }, + time.Unix(0, 0), + ), + }, + }, } - require.NoError(t, c.Start(acc)) - go func() { - err := server.Serve(listener) - require.NoError(t, err) - }() - acc.Wait(4) - c.Stop() - server.Stop() + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + listener, err := net.Listen("tcp", "127.0.0.1:0") + require.NoError(t, err) - require.Empty(t, acc.Errors) + tt.plugin.Addresses = []string{listener.Addr().String()} - tags := map[string]string{"path": "type:/model", "source": "127.0.0.1", "foo": "bar", "name": "str", "uint64": "1234"} - fields := map[string]interface{}{"some/path": int64(5678)} - acc.AssertContainsTaggedFields(t, "alias", fields, tags) + grpcServer := grpc.NewServer() + tt.server.GRPCServer = grpcServer + gnmi.RegisterGNMIServer(grpcServer, tt.server) - tags = map[string]string{"path": "type:/model", "source": "127.0.0.1", "foo": "bar"} - fields = map[string]interface{}{"other/path": "foobar", "other/this": "that"} - acc.AssertContainsTaggedFields(t, "alias", fields, tags) + var acc testutil.Accumulator + err = tt.plugin.Start(&acc) + require.NoError(t, err) - tags = map[string]string{"path": "type:/model", "foo": "bar2", "source": "127.0.0.1", "name": "str2", "uint64": "1234"} - fields = map[string]interface{}{"some/path": "123"} - acc.AssertContainsTaggedFields(t, "alias", fields, tags) + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + err := grpcServer.Serve(listener) + require.NoError(t, err) + }() - tags = map[string]string{"path": "type:/model", "source": "127.0.0.1", "foo": "bar2"} - fields = map[string]interface{}{"other/path": "foobar", "other/this": "that"} - acc.AssertContainsTaggedFields(t, "alias", fields, tags) + acc.Wait(len(tt.expected)) + tt.plugin.Stop() + grpcServer.Stop() + wg.Wait() + + testutil.RequireMetricsEqual(t, tt.expected, acc.GetTelegrafMetrics(), + testutil.IgnoreTime()) + }) + } } -func TestGNMIMultipleRedial(t *testing.T) { +func TestRedial(t *testing.T) { listener, err := net.Listen("tcp", "127.0.0.1:0") require.NoError(t, err) - server := grpc.NewServer() - acc := &testutil.Accumulator{} - gnmi.RegisterGNMIServer(server, &mockGNMIServer{t: t, scenario: 2, server: server, acc: acc}) - c := &CiscoTelemetryGNMI{ + plugin := &CiscoTelemetryGNMI{ Log: testutil.Logger{}, Addresses: []string{listener.Addr().String()}, - Username: "theuser", Password: "thepassword", Encoding: "proto", - Redial: internal.Duration{Duration: 10 * time.Millisecond}, - Subscriptions: []Subscription{{Name: "alias", Origin: "type", Path: "/model", SubscriptionMode: "sample"}}, + Encoding: "proto", + Redial: internal.Duration{Duration: 10 * time.Millisecond}, } - require.NoError(t, c.Start(acc)) + grpcServer := grpc.NewServer() + gnmiServer := &MockServer{ + SubscribeF: func(server gnmi.GNMI_SubscribeServer) error { + notification := mockGNMINotification() + server.Send(&gnmi.SubscribeResponse{Response: &gnmi.SubscribeResponse_Update{Update: notification}}) + return nil + }, + GRPCServer: grpcServer, + } + gnmi.RegisterGNMIServer(grpcServer, gnmiServer) + + var wg sync.WaitGroup + wg.Add(1) go func() { - err := server.Serve(listener) + defer wg.Done() + err := grpcServer.Serve(listener) require.NoError(t, err) }() + + var acc testutil.Accumulator + err = plugin.Start(&acc) + require.NoError(t, err) + acc.Wait(2) - server.Stop() + grpcServer.Stop() + wg.Wait() - listener, _ = net.Listen("tcp", listener.Addr().String()) - server = grpc.NewServer() - gnmi.RegisterGNMIServer(server, &mockGNMIServer{t: t, scenario: 3, server: server, acc: acc}) + // Restart GNMI server at the same address + listener, err = net.Listen("tcp", listener.Addr().String()) + require.NoError(t, err) + grpcServer = grpc.NewServer() + gnmiServer = &MockServer{ + SubscribeF: func(server gnmi.GNMI_SubscribeServer) error { + notification := mockGNMINotification() + notification.Prefix.Elem[0].Key["foo"] = "bar2" + notification.Update[0].Path.Elem[1].Key["name"] = "str2" + notification.Update[0].Val = &gnmi.TypedValue{Value: &gnmi.TypedValue_BoolVal{BoolVal: false}} + server.Send(&gnmi.SubscribeResponse{Response: &gnmi.SubscribeResponse_Update{Update: notification}}) + return nil + }, + GRPCServer: grpcServer, + } + gnmi.RegisterGNMIServer(grpcServer, gnmiServer) + + wg.Add(1) go func() { - err := server.Serve(listener) + defer wg.Done() + err := grpcServer.Serve(listener) require.NoError(t, err) }() + acc.Wait(4) - c.Stop() - server.Stop() - - tags := map[string]string{"path": "type:/model", "source": "127.0.0.1", "foo": "bar", "name": "str", "uint64": "1234"} - fields := map[string]interface{}{"some/path": int64(5678)} - acc.AssertContainsTaggedFields(t, "alias", fields, tags) - - tags = map[string]string{"path": "type:/model", "source": "127.0.0.1", "foo": "bar"} - fields = map[string]interface{}{"other/path": "foobar", "other/this": "that"} - acc.AssertContainsTaggedFields(t, "alias", fields, tags) - - tags = map[string]string{"path": "type:/model", "foo": "bar2", "source": "127.0.0.1", "name": "str2", "uint64": "1234"} - fields = map[string]interface{}{"some/path": false} - acc.AssertContainsTaggedFields(t, "alias", fields, tags) - - tags = map[string]string{"path": "type:/model", "source": "127.0.0.1", "foo": "bar2"} - fields = map[string]interface{}{"other/path": "foobar", "other/this": "that"} - acc.AssertContainsTaggedFields(t, "alias", fields, tags) + plugin.Stop() + grpcServer.Stop() + wg.Wait() } From 1498f8addf595aa22ee0d96d8b36376bf8c53664 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 8 Jan 2020 10:52:58 -0800 Subject: [PATCH 1442/1815] Add cardinality tips to FAQ and sqlserver input (#6852) --- docs/FAQ.md | 31 ++++++++++++++++++++++++++++-- plugins/inputs/sqlserver/README.md | 11 ++++++++++- 2 files changed, 39 insertions(+), 3 deletions(-) diff --git a/docs/FAQ.md b/docs/FAQ.md index f4d81ec7c..8819ee657 100644 --- a/docs/FAQ.md +++ b/docs/FAQ.md @@ -6,8 +6,8 @@ You will need to setup several volume mounts as well as some environment variables: ``` docker run --name telegraf - -v /:/hostfs:ro - -v /etc:/hostfs/etc:ro + -v /:/hostfs:ro + -v /etc:/hostfs/etc:ro -v /proc:/hostfs/proc:ro -v /sys:/hostfs/sys:ro -v /var:/hostfs/var:ro @@ -43,6 +43,33 @@ If running as a service add the environment variable to `/etc/default/telegraf`: GODEBUG=netdns=cgo ``` +### Q: How can I manage series cardinality? + +High [series cardinality][], when not properly managed, can cause high load on +your database. Telegraf attempts to avoid creating series with high +cardinality, but some monitoring workloads such as tracking containers are are +inherently high cardinality. These workloads can still be monitored, but care +must be taken to manage cardinality growth. + +You can use the following techniques to avoid cardinality issues: + +- Use [metric filtering][] options to exclude unneeded measurements and tags. +- Write to a database with an appropriate [retention policy][]. +- Limit series cardinality in your database using the + [max-series-per-database][] and [max-values-per-tag][] settings. +- Consider using the [Time Series Index][tsi]. +- Monitor your databases using the [show cardinality][] commands. +- Consult the [InfluxDB documentation][influx docs] for the most up-to-date techniques. + +[series cardinality]: https://docs.influxdata.com/influxdb/v1.7/concepts/glossary/#series-cardinality +[metric filtering]: https://github.com/influxdata/telegraf/blob/master/docs/CONFIGURATION.md#metric-filtering +[retention policy]: https://docs.influxdata.com/influxdb/latest/guides/downsampling_and_retention/ +[max-series-per-database]: https://docs.influxdata.com/influxdb/latest/administration/config/#max-series-per-database-1000000 +[max-values-per-tag]: https://docs.influxdata.com/influxdb/latest/administration/config/#max-values-per-tag-100000 +[tsi]: https://docs.influxdata.com/influxdb/latest/concepts/time-series-index/ +[show cardinality]: https://docs.influxdata.com/influxdb/latest/query_language/spec/#show-cardinality +[influx docs]: https://docs.influxdata.com/influxdb/latest/ + ### Q: When will the next version be released? The latest release date estimate can be viewed on the diff --git a/plugins/inputs/sqlserver/README.md b/plugins/inputs/sqlserver/README.md index b586ecd27..1b71165fb 100644 --- a/plugins/inputs/sqlserver/README.md +++ b/plugins/inputs/sqlserver/README.md @@ -109,7 +109,14 @@ The new (version 2) metrics provide: - *Server properties*: Number of databases in all possible states (online, offline, suspect, etc.), cpu count, physical memory, SQL Server service uptime, and SQL Server version. In the case of Azure SQL relevent properties such as Tier, #Vcores, Memory etc. - *Wait stats*: Wait time in ms, number of waiting tasks, resource wait time, signal wait time, max wait time in ms, wait type, and wait category. The waits are categorized using the same categories used in Query Store. - *Schedulers* - This captures sys.dm_os_schedulers. -- *SqlRequests* - This captures a snapshot of dm_exec_requests and dm_exec_sessions that gives you running requests as well as wait types and blocking sessions +- *SqlRequests* - This captures a snapshot of dm_exec_requests and + dm_exec_sessions that gives you running requests as well as wait types and + blocking sessions. + + In order to allow tracking on a per statement basis this query produces a + unique tag for each query. Depending on the database workload, this may + result in a high cardinality series. Reference the FAQ for tips on + [managing series cardinality][cardinality]. - *Azure Managed Instances* - Stats from `sys.server_resource_stats`: - cpu_count @@ -165,3 +172,5 @@ The following metrics can be used directly, with no delta calculations: Version 2 queries have the following tags: - `sql_instance`: Physical host and instance name (hostname:instance) - database_name: For Azure SQLDB, database_name denotes the name of the Azure SQL Database as server name is a logical construct. + +[cardinality]: /docs/FAQ.md#user-content-q-how-can-i-manage-series-cardinality From f571f2392a1399ed668b1302d52072645c014ca3 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 8 Jan 2020 10:58:02 -0800 Subject: [PATCH 1443/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index cf2325c36..140e9b8d8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -22,6 +22,7 @@ - [#6800](https://github.com/influxdata/telegraf/issues/6800): Show platform not supported warning only on plugin creation. - [#6814](https://github.com/influxdata/telegraf/issues/6814): Fix rabbitmq cannot complete gather after request error. - [#6846](https://github.com/influxdata/telegraf/issues/6846): Fix /sbin/init --version executed on Telegraf startup. +- [#6847](https://github.com/influxdata/telegraf/issues/6847): Use last path element as field key if path fully specified. ## v1.13 [2019-12-12] From bc576134bb0a94cfe4a035901420f1247066a9c6 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 8 Jan 2020 11:04:47 -0800 Subject: [PATCH 1444/1815] Set release date for 1.13.1 --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 140e9b8d8..306684938 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,7 +9,7 @@ - [#6508](https://github.com/influxdata/telegraf/pull/6508): Add support for new nginx plus api endpoints. - [#6342](https://github.com/influxdata/telegraf/pull/6342): Add kafka SASL version control to support Azure Event Hub. -## v1.13.1 [unreleased] +## v1.13.1 [2020-01-08] #### Bugfixes From c1456a718e9a7d4ddfb9dfa509e76d1a1ecbb7ee Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 8 Jan 2020 12:58:14 -0800 Subject: [PATCH 1445/1815] Update changelog --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 306684938..1c858b313 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -22,7 +22,7 @@ - [#6800](https://github.com/influxdata/telegraf/issues/6800): Show platform not supported warning only on plugin creation. - [#6814](https://github.com/influxdata/telegraf/issues/6814): Fix rabbitmq cannot complete gather after request error. - [#6846](https://github.com/influxdata/telegraf/issues/6846): Fix /sbin/init --version executed on Telegraf startup. -- [#6847](https://github.com/influxdata/telegraf/issues/6847): Use last path element as field key if path fully specified. +- [#6847](https://github.com/influxdata/telegraf/issues/6847): Use last path element as field key if path fully specified in cisco_telemetry_gnmi input. ## v1.13 [2019-12-12] From 1df88dd22bf372327f24341b4043bb383e33529c Mon Sep 17 00:00:00 2001 From: denzilribeiro Date: Wed, 8 Jan 2020 19:55:37 -0600 Subject: [PATCH 1446/1815] Add RBPEX stats collection, DBName for perfmon, proper type for resource stats (#6869) --- plugins/inputs/sqlserver/sqlserver.go | 112 +++++++++++++++++--------- 1 file changed, 72 insertions(+), 40 deletions(-) diff --git a/plugins/inputs/sqlserver/sqlserver.go b/plugins/inputs/sqlserver/sqlserver.go index 32a1ede7d..511bb5b49 100644 --- a/plugins/inputs/sqlserver/sqlserver.go +++ b/plugins/inputs/sqlserver/sqlserver.go @@ -350,25 +350,30 @@ EXEC(@SQL) // Conditional check based on Azure SQL DB OR On-prem SQL Server // EngineEdition=5 is Azure SQL DB -const sqlDatabaseIOV2 = `SET DEADLOCK_PRIORITY -10; +const sqlDatabaseIOV2 = ` +SET DEADLOCK_PRIORITY -10; IF SERVERPROPERTY('EngineEdition') = 5 BEGIN SELECT 'sqlserver_database_io' As [measurement], REPLACE(@@SERVERNAME,'\',':') AS [sql_instance], -DB_NAME([vfs].[database_id]) [database_name], +DB_NAME([vfs].[database_id]) AS [database_name], vfs.io_stall_read_ms AS read_latency_ms, vfs.num_of_reads AS reads, vfs.num_of_bytes_read AS read_bytes, vfs.io_stall_write_ms AS write_latency_ms, vfs.num_of_writes AS writes, vfs.num_of_bytes_written AS write_bytes, -b.name as logical_filename, -b.physical_name as physical_filename, -CASE WHEN vfs.file_id = 2 THEN 'LOG' ELSE 'DATA' END AS file_type +vfs.io_stall_queued_read_ms as rg_read_stall_ms, +vfs.io_stall_queued_write_ms as rg_write_stall_ms, +ISNULL(b.name ,'RBPEX') as logical_filename, +ISNULL(b.physical_name, 'RBPEX') as physical_filename, +CASE WHEN vfs.file_id = 2 THEN 'LOG'ELSE 'DATA' END AS file_type +,ISNULL(size,0)/128 AS current_size_mb +,ISNULL(FILEPROPERTY(b.name,'SpaceUsed')/128,0) as space_used_mb FROM [sys].[dm_io_virtual_file_stats](NULL,NULL) AS vfs -inner join sys.database_files b on b.file_id = vfs.file_id +LEFT OUTER join sys.database_files b on b.file_id = vfs.file_id END ELSE BEGIN @@ -382,12 +387,17 @@ vfs.num_of_bytes_read AS read_bytes, vfs.io_stall_write_ms AS write_latency_ms, vfs.num_of_writes AS writes, vfs.num_of_bytes_written AS write_bytes, -b.name as logical_filename, -b.physical_name as physical_filename, +vfs.io_stall_queued_read_ms as rg_read_stall_ms, +vfs.io_stall_queued_write_ms as rg_write_stall_ms, +ISNULL(b.name ,'RBPEX') as logical_filename, +ISNULL(b.physical_name, 'RBPEX') as physical_filename, CASE WHEN vfs.file_id = 2 THEN 'LOG' ELSE 'DATA' END AS file_type +,ISNULL(size,0)/128 AS current_size_mb +-- can't easily get space used without switching context to each DB for MI/On-prem making query expensive +, -1 as space_used_mb FROM [sys].[dm_io_virtual_file_stats](NULL,NULL) AS vfs -inner join sys.master_files b on b.database_id = vfs.database_id and b.file_id = vfs.file_id +LEFT OUTER join sys.master_files b on b.database_id = vfs.database_id and b.file_id = vfs.file_id END ` @@ -509,10 +519,32 @@ INSERT INTO @PCounters SELECT DISTINCT RTrim(spi.object_name) object_name, RTrim(spi.counter_name) counter_name, - RTrim(spi.instance_name) instance_name, + CASE WHEN ( + RTRIM(spi.object_name) LIKE '%:Databases' + OR RTRIM(spi.object_name) LIKE '%:Database Replica' + OR RTRIM(spi.object_name) LIKE '%:Catalog Metadata' + OR RTRIM(spi.object_name) LIKE '%:Query Store' + OR RTRIM(spi.object_name) LIKE '%:Columnstore' + OR RTRIM(spi.object_name) LIKE '%:Advanced Analytics') + AND SERVERPROPERTY ('EngineEdition') IN (5,8) + AND TRY_CONVERT(uniqueidentifier, spi.instance_name) IS NOT NULL -- for cloud only + THEN d.name + WHEN RTRIM(object_name) LIKE '%:Availability Replica' + AND SERVERPROPERTY ('EngineEdition') IN (5,8) + AND TRY_CONVERT(uniqueidentifier, spi.instance_name) IS NOT NULL -- for cloud only + THEN d.name + RTRIM(SUBSTRING(spi.instance_name, 37, LEN(spi.instance_name))) + ELSE spi.instance_name + END AS instance_name, CAST(spi.cntr_value AS BIGINT) AS cntr_value, spi.cntr_type FROM sys.dm_os_performance_counters AS spi +LEFT JOIN sys.databases AS d +ON LEFT(spi.instance_name, 36) -- some instance_name values have an additional identifier appended after the GUID + = CASE WHEN -- in SQL DB standalone, physical_database_name for master is the GUID of the user database + d.name = 'master' AND TRY_CONVERT(uniqueidentifier, d.physical_database_name) IS NOT NULL + THEN d.name + ELSE d.physical_database_name + END WHERE ( counter_name IN ( 'SQL Compilations/sec', @@ -526,12 +558,12 @@ WHERE ( 'Full Scans/sec', 'Index Searches/sec', 'Page Splits/sec', - 'Page Lookups/sec', - 'Page Reads/sec', - 'Page Writes/sec', - 'Readahead Pages/sec', - 'Lazy Writes/sec', - 'Checkpoint Pages/sec', + 'Page lookups/sec', + 'Page reads/sec', + 'Page writes/sec', + 'Readahead pages/sec', + 'Lazy writes/sec', + 'Checkpoint pages/sec', 'Page life expectancy', 'Log File(s) Size (KB)', 'Log File(s) Used Size (KB)', @@ -594,7 +626,7 @@ WHERE ( 'Redo Queue KB', 'Mirrored Write Transactions/sec', 'Group Commit Time', - 'Group Commits/sec' + 'Group Commits/Sec' ) ) OR ( object_name LIKE '%User Settable%' @@ -658,8 +690,7 @@ FROM @PCounters AS pc AND pc.object_name = pc1.object_name AND pc.instance_name = pc1.instance_name AND pc1.counter_name LIKE '%base' -WHERE pc.counter_name NOT LIKE '% base' -OPTION(RECOMPILE); +WHERE pc.counter_name NOT LIKE '% base'; ` // Conditional check based on Azure SQL DB v/s the rest aka (Azure SQL Managed instance OR On-prem SQL Server) @@ -1280,27 +1311,28 @@ ELSE const sqlAzureDBResourceStats string = `SET DEADLOCK_PRIORITY -10; IF SERVERPROPERTY('EngineEdition') = 5 -- Is this Azure SQL DB? BEGIN - SELECT TOP(1) - 'sqlserver_azurestats' AS [measurement], - REPLACE(@@SERVERNAME,'\',':') AS [sql_instance], - DB_NAME() as [database_name], - avg_cpu_percent, - avg_data_io_percent, - avg_log_write_percent, - avg_memory_usage_percent, - xtp_storage_percent, - max_worker_percent, - max_session_percent, - dtu_limit, - avg_login_rate_percent, - end_time, - avg_instance_memory_percent, - avg_instance_cpu_percent - FROM - sys.dm_db_resource_stats WITH (NOLOCK) - ORDER BY - end_time DESC -END` + SELECT TOP(1) + 'sqlserver_azure_db_resource_stats' AS [measurement], + REPLACE(@@SERVERNAME,'\',':') AS [sql_instance], + DB_NAME() as [database_name], + cast(avg_cpu_percent as float) as avg_cpu_percent, + cast(avg_data_io_percent as float) as avg_data_io_percent, + cast(avg_log_write_percent as float) as avg_log_write_percent, + cast(avg_memory_usage_percent as float) as avg_memory_usage_percent, + cast(xtp_storage_percent as float) as xtp_storage_percent, + cast(max_worker_percent as float) as max_worker_percent, + cast(max_session_percent as float) as max_session_percent, + dtu_limit, + cast(avg_login_rate_percent as float) as avg_login_rate_percent , + end_time, + cast(avg_instance_memory_percent as float) as avg_instance_memory_percent , + cast(avg_instance_cpu_percent as float) as avg_instance_cpu_percent + FROM + sys.dm_db_resource_stats WITH (NOLOCK) + ORDER BY + end_time DESC +END +` //Only executed if AzureDB Flag is set const sqlAzureDBResourceGovernance string = ` From 695678c4a5eb5ceb7eafacf1f2fd4518203619e2 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 8 Jan 2020 18:10:44 -0800 Subject: [PATCH 1447/1815] Update changelog --- CHANGELOG.md | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1c858b313..5c5216e7f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,11 @@ ## v1.14 [unreleased] +#### Release Notes + +- In the `sqlserver` input, the `sqlserver_azurestats` measurement has been + renamed to `sqlserver_azure_db_resource_stats` due to an issue where numeric + metrics were previously being reported incorrectly as strings. + #### Features - [#6730](https://github.com/influxdata/telegraf/pull/6730): Add page_faults for mongodb wired tiger. @@ -8,6 +14,14 @@ - [#6770](https://github.com/influxdata/telegraf/pull/6770): Expose unbound-control config file option. - [#6508](https://github.com/influxdata/telegraf/pull/6508): Add support for new nginx plus api endpoints. - [#6342](https://github.com/influxdata/telegraf/pull/6342): Add kafka SASL version control to support Azure Event Hub. +- [#6869](https://github.com/influxdata/telegraf/pull/6869): Add RBPEX IO statistics to DatabaseIO query in sqlserver input. +- [#6869](https://github.com/influxdata/telegraf/pull/6869): Add space on disk for each file to DatabaseIO query in the sqlserver input. +- [#6869](https://github.com/influxdata/telegraf/pull/6869): Calculate DB Name instead of GUID in physical_db_name in the sqlserver input. + +#### Bugfixes + +- [#6397](https://github.com/influxdata/telegraf/issues/6397): Fix conversion to floats in AzureDBResourceStats query in the sqlserver input. +- [#6867](https://github.com/influxdata/telegraf/issues/6867): Fix case sensitive collation in sqlserver input. ## v1.13.1 [2020-01-08] From 6f9f0b7f01e2e9ff6b73e266ec14e39c9c95ba78 Mon Sep 17 00:00:00 2001 From: Ross Lodge Date: Wed, 8 Jan 2020 18:18:12 -0800 Subject: [PATCH 1448/1815] Add latency stats to mongo input (#6733) --- plugins/inputs/mongodb/README.md | 7 + plugins/inputs/mongodb/mongodb_data.go | 13 ++ plugins/inputs/mongodb/mongodb_data_test.go | 23 +++ plugins/inputs/mongodb/mongostat.go | 37 ++++ plugins/inputs/mongodb/mongostat_test.go | 205 ++++++++++++++++++++ 5 files changed, 285 insertions(+) create mode 100644 plugins/inputs/mongodb/mongostat_test.go diff --git a/plugins/inputs/mongodb/README.md b/plugins/inputs/mongodb/README.md index f154f333b..5449e3b4b 100644 --- a/plugins/inputs/mongodb/README.md +++ b/plugins/inputs/mongodb/README.md @@ -77,6 +77,12 @@ by running Telegraf with the `--debug` argument. - getmores (integer) - inserts (integer) - jumbo_chunks (integer) + - latency_commands_count (integer) + - latency_commands (integer) + - latency_reads_count (integer) + - latency_reads (integer) + - latency_writes_count (integer) + - latency_writes (integer) - member_status (string) - net_in_bytes_count (integer) - net_out_bytes_count (integer) @@ -185,6 +191,7 @@ by running Telegraf with the `--debug` argument. ### Example Output: ``` +mongodb,hostname=127.0.0.1:27017 active_reads=0i,active_writes=0i,commands=1335i,commands_per_sec=7i,connections_available=814i,connections_current=5i,connections_total_created=0i,cursor_no_timeout=0i,cursor_no_timeout_count=0i,cursor_pinned=0i,cursor_pinned_count=1i,cursor_timed_out=0i,cursor_timed_out_count=0i,cursor_total=0i,cursor_total_count=1i,deletes=0i,deletes_per_sec=0i,document_deleted=0i,document_inserted=0i,document_returned=13i,document_updated=0i,flushes=5i,flushes_per_sec=0i,getmores=269i,getmores_per_sec=0i,inserts=0i,inserts_per_sec=0i,jumbo_chunks=0i,latency_commands_count=0i,latency_commands=0i,latency_reads_count=0i,latency_reads=0i,latency_writes_count=0i,latency_writes=0i,member_status="PRI",net_in_bytes=986i,net_in_bytes_count=358006i,net_out_bytes=23906i,net_out_bytes_count=661507i,open_connections=5i,percent_cache_dirty=0,percent_cache_used=0,queries=18i,queries_per_sec=3i,queued_reads=0i,queued_writes=0i,repl_commands=0i,repl_commands_per_sec=0i,repl_deletes=0i,repl_deletes_per_sec=0i,repl_getmores=0i,repl_getmores_per_sec=0i,repl_inserts=0i,repl_inserts_per_sec=0i,repl_lag=0i,repl_oplog_window_sec=24355215i,repl_queries=0i,repl_queries_per_sec=0i,repl_updates=0i,repl_updates_per_sec=0i,resident_megabytes=62i,state="PRIMARY",total_available=0i,total_created=0i,total_in_use=0i,total_refreshing=0i,ttl_deletes=0i,ttl_deletes_per_sec=0i,ttl_passes=23i,ttl_passes_per_sec=0i,updates=0i,updates_per_sec=0i,vsize_megabytes=713i,wtcache_app_threads_page_read_count=13i,wtcache_app_threads_page_read_time=74i,wtcache_app_threads_page_write_count=0i,wtcache_bytes_read_into=55271i,wtcache_bytes_written_from=125402i,wtcache_current_bytes=117050i,wtcache_max_bytes_configured=1073741824i,wtcache_pages_evicted_by_app_thread=0i,wtcache_pages_queued_for_eviction=0i,wtcache_server_evicting_pages=0i,wtcache_tracked_dirty_bytes=0i,wtcache_worker_thread_evictingpages=0i 1547159491000000000 mongodb,hostname=127.0.0.1:27017,node_type=PRI active_reads=0i,active_writes=0i,commands=1335i,commands_per_sec=7i,connections_available=814i,connections_current=5i,connections_total_created=0i,cursor_no_timeout=0i,cursor_no_timeout_count=0i,cursor_pinned=0i,cursor_pinned_count=1i,cursor_timed_out=0i,cursor_timed_out_count=0i,cursor_total=0i,cursor_total_count=1i,deletes=0i,deletes_per_sec=0i,document_deleted=0i,document_inserted=0i,document_returned=13i,document_updated=0i,flushes=5i,flushes_per_sec=0i,getmores=269i,getmores_per_sec=0i,inserts=0i,inserts_per_sec=0i,jumbo_chunks=0i,member_status="PRI",net_in_bytes=986i,net_in_bytes_count=358006i,net_out_bytes=23906i,net_out_bytes_count=661507i,open_connections=5i,percent_cache_dirty=0,percent_cache_used=0,queries=18i,queries_per_sec=3i,queued_reads=0i,queued_writes=0i,repl_commands=0i,repl_commands_per_sec=0i,repl_deletes=0i,repl_deletes_per_sec=0i,repl_getmores=0i,repl_getmores_per_sec=0i,repl_inserts=0i,repl_inserts_per_sec=0i,repl_lag=0i,repl_oplog_window_sec=24355215i,repl_queries=0i,repl_queries_per_sec=0i,repl_updates=0i,repl_updates_per_sec=0i,resident_megabytes=62i,state="PRIMARY",total_available=0i,total_created=0i,total_in_use=0i,total_refreshing=0i,ttl_deletes=0i,ttl_deletes_per_sec=0i,ttl_passes=23i,ttl_passes_per_sec=0i,updates=0i,updates_per_sec=0i,vsize_megabytes=713i,wtcache_app_threads_page_read_count=13i,wtcache_app_threads_page_read_time=74i,wtcache_app_threads_page_write_count=0i,wtcache_bytes_read_into=55271i,wtcache_bytes_written_from=125402i,wtcache_current_bytes=117050i,wtcache_max_bytes_configured=1073741824i,wtcache_pages_evicted_by_app_thread=0i,wtcache_pages_queued_for_eviction=0i,wtcache_server_evicting_pages=0i,wtcache_tracked_dirty_bytes=0i,wtcache_worker_thread_evictingpages=0i 1547159491000000000 mongodb_db_stats,db_name=admin,hostname=127.0.0.1:27017 avg_obj_size=241,collections=2i,data_size=723i,index_size=49152i,indexes=3i,num_extents=0i,objects=3i,ok=1i,storage_size=53248i,type="db_stat" 1547159491000000000 mongodb_db_stats,db_name=local,hostname=127.0.0.1:27017 avg_obj_size=813.9705882352941,collections=6i,data_size=55350i,index_size=102400i,indexes=5i,num_extents=0i,objects=68i,ok=1i,storage_size=204800i,type="db_stat" 1547159491000000000 diff --git a/plugins/inputs/mongodb/mongodb_data.go b/plugins/inputs/mongodb/mongodb_data.go index 279dbb138..09bacdae1 100644 --- a/plugins/inputs/mongodb/mongodb_data.go +++ b/plugins/inputs/mongodb/mongodb_data.go @@ -86,6 +86,15 @@ var DefaultStats = map[string]string{ "connections_total_created": "TotalCreatedC", } +var DefaultLatencyStats = map[string]string{ + "latency_writes_count": "WriteOpsCnt", + "latency_writes": "WriteLatency", + "latency_reads_count": "ReadOpsCnt", + "latency_reads": "ReadLatency", + "latency_commands_count": "CommandOpsCnt", + "latency_commands": "CommandLatency", +} + var DefaultReplStats = map[string]string{ "repl_inserts": "InsertRCnt", "repl_inserts_per_sec": "InsertR", @@ -232,6 +241,10 @@ func (d *MongodbData) AddDefaultStats() { d.Tags["node_type"] = d.StatLine.NodeType } + if d.StatLine.ReadLatency > 0 { + d.addStat(statLine, DefaultLatencyStats) + } + if d.StatLine.OplogStats != nil { d.add("repl_oplog_window_sec", d.StatLine.OplogStats.TimeDiff) } diff --git a/plugins/inputs/mongodb/mongodb_data_test.go b/plugins/inputs/mongodb/mongodb_data_test.go index bbc882a26..711b3eef2 100644 --- a/plugins/inputs/mongodb/mongodb_data_test.go +++ b/plugins/inputs/mongodb/mongodb_data_test.go @@ -142,6 +142,29 @@ func TestAddShardStats(t *testing.T) { } } +func TestAddLatencyStats(t *testing.T) { + d := NewMongodbData( + &StatLine{ + CommandOpsCnt: 73, + CommandLatency: 364, + ReadOpsCnt: 113, + ReadLatency: 201, + WriteOpsCnt: 7, + WriteLatency: 55, + }, + tags, + ) + + var acc testutil.Accumulator + + d.AddDefaultStats() + d.flush(&acc) + + for key := range DefaultLatencyStats { + assert.True(t, acc.HasInt64Field("mongodb", key)) + } +} + func TestAddShardHostStats(t *testing.T) { expectedHosts := []string{"hostA", "hostB"} hostStatLines := map[string]ShardHostStatLine{} diff --git a/plugins/inputs/mongodb/mongostat.go b/plugins/inputs/mongodb/mongostat.go index 1658fc071..985627c87 100644 --- a/plugins/inputs/mongodb/mongostat.go +++ b/plugins/inputs/mongodb/mongostat.go @@ -58,6 +58,7 @@ type ServerStatus struct { Network *NetworkStats `bson:"network"` Opcounters *OpcountStats `bson:"opcounters"` OpcountersRepl *OpcountStats `bson:"opcountersRepl"` + OpLatencies *OpLatenciesStats `bson:"opLatencies"` RecordStats *DBRecordStats `bson:"recordStats"` Mem *MemStats `bson:"mem"` Repl *ReplStatus `bson:"repl"` @@ -314,6 +315,19 @@ type OpcountStats struct { Command int64 `bson:"command"` } +// OpLatenciesStats stores information related to operation latencies for the database as a whole +type OpLatenciesStats struct { + Reads *LatencyStats `bson:"reads"` + Writes *LatencyStats `bson:"writes"` + Commands *LatencyStats `bson:"commands"` +} + +// LatencyStats lists total latency in microseconds and count of operations, enabling you to obtain an average +type LatencyStats struct { + Latency int64 `bson:"latency"` + Ops int64 `bson:"ops"` +} + // MetricsStats stores information related to metrics type MetricsStats struct { TTL *TTLStats `bson:"ttl"` @@ -493,6 +507,14 @@ type StatLine struct { GetMore, GetMoreCnt int64 Command, CommandCnt int64 + // OpLatency fields + WriteOpsCnt int64 + WriteLatency int64 + ReadOpsCnt int64 + ReadLatency int64 + CommandOpsCnt int64 + CommandLatency int64 + // TTL fields Passes, PassesCnt int64 DeletedDocuments, DeletedDocumentsCnt int64 @@ -684,6 +706,21 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec returnVal.Command, returnVal.CommandCnt = diff(newStat.Opcounters.Command, oldStat.Opcounters.Command, sampleSecs) } + if newStat.OpLatencies != nil { + if newStat.OpLatencies.Reads != nil { + returnVal.ReadOpsCnt = newStat.OpLatencies.Reads.Ops + returnVal.ReadLatency = newStat.OpLatencies.Reads.Latency + } + if newStat.OpLatencies.Writes != nil { + returnVal.WriteOpsCnt = newStat.OpLatencies.Writes.Ops + returnVal.WriteLatency = newStat.OpLatencies.Writes.Latency + } + if newStat.OpLatencies.Commands != nil { + returnVal.CommandOpsCnt = newStat.OpLatencies.Commands.Ops + returnVal.CommandLatency = newStat.OpLatencies.Commands.Latency + } + } + if newStat.Metrics != nil && oldStat.Metrics != nil { if newStat.Metrics.TTL != nil && oldStat.Metrics.TTL != nil { returnVal.Passes, returnVal.PassesCnt = diff(newStat.Metrics.TTL.Passes, oldStat.Metrics.TTL.Passes, sampleSecs) diff --git a/plugins/inputs/mongodb/mongostat_test.go b/plugins/inputs/mongodb/mongostat_test.go new file mode 100644 index 000000000..5506602a9 --- /dev/null +++ b/plugins/inputs/mongodb/mongostat_test.go @@ -0,0 +1,205 @@ +package mongodb + +import ( + "testing" + //"time" + + //"github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/assert" +) + +func TestLatencyStats(t *testing.T) { + + sl := NewStatLine( + MongoStatus{ + ServerStatus: &ServerStatus{ + Connections: &ConnectionStats{}, + Mem: &MemStats{ + Bits: 0, + Resident: 0, + Virtual: 0, + Supported: false, + Mapped: 0, + MappedWithJournal: 0, + }, + }, + }, + MongoStatus{ + ServerStatus: &ServerStatus{ + Connections: &ConnectionStats{}, + Mem: &MemStats{ + Bits: 0, + Resident: 0, + Virtual: 0, + Supported: false, + Mapped: 0, + MappedWithJournal: 0, + }, + OpLatencies: &OpLatenciesStats{ + Reads: &LatencyStats{ + Ops: 0, + Latency: 0, + }, + Writes: &LatencyStats{ + Ops: 0, + Latency: 0, + }, + Commands: &LatencyStats{ + Ops: 0, + Latency: 0, + }, + }, + }, + }, + "foo", + true, + 60, + ) + + assert.Equal(t, sl.CommandLatency, int64(0)) + assert.Equal(t, sl.ReadLatency, int64(0)) + assert.Equal(t, sl.WriteLatency, int64(0)) + assert.Equal(t, sl.CommandOpsCnt, int64(0)) + assert.Equal(t, sl.ReadOpsCnt, int64(0)) + assert.Equal(t, sl.WriteOpsCnt, int64(0)) +} + +func TestLatencyStatsDiffZero(t *testing.T) { + + sl := NewStatLine( + MongoStatus{ + ServerStatus: &ServerStatus{ + Connections: &ConnectionStats{}, + Mem: &MemStats{ + Bits: 0, + Resident: 0, + Virtual: 0, + Supported: false, + Mapped: 0, + MappedWithJournal: 0, + }, + OpLatencies: &OpLatenciesStats{ + Reads: &LatencyStats{ + Ops: 0, + Latency: 0, + }, + Writes: &LatencyStats{ + Ops: 0, + Latency: 0, + }, + Commands: &LatencyStats{ + Ops: 0, + Latency: 0, + }, + }, + }, + }, + MongoStatus{ + ServerStatus: &ServerStatus{ + Connections: &ConnectionStats{}, + Mem: &MemStats{ + Bits: 0, + Resident: 0, + Virtual: 0, + Supported: false, + Mapped: 0, + MappedWithJournal: 0, + }, + OpLatencies: &OpLatenciesStats{ + Reads: &LatencyStats{ + Ops: 0, + Latency: 0, + }, + Writes: &LatencyStats{ + Ops: 0, + Latency: 0, + }, + Commands: &LatencyStats{ + Ops: 0, + Latency: 0, + }, + }, + }, + }, + "foo", + true, + 60, + ) + + assert.Equal(t, sl.CommandLatency, int64(0)) + assert.Equal(t, sl.ReadLatency, int64(0)) + assert.Equal(t, sl.WriteLatency, int64(0)) + assert.Equal(t, sl.CommandOpsCnt, int64(0)) + assert.Equal(t, sl.ReadOpsCnt, int64(0)) + assert.Equal(t, sl.WriteOpsCnt, int64(0)) +} + +func TestLatencyStatsDiff(t *testing.T) { + + sl := NewStatLine( + MongoStatus{ + ServerStatus: &ServerStatus{ + Connections: &ConnectionStats{}, + Mem: &MemStats{ + Bits: 0, + Resident: 0, + Virtual: 0, + Supported: false, + Mapped: 0, + MappedWithJournal: 0, + }, + OpLatencies: &OpLatenciesStats{ + Reads: &LatencyStats{ + Ops: 4189041956, + Latency: 2255922322753, + }, + Writes: &LatencyStats{ + Ops: 1691019457, + Latency: 494478256915, + }, + Commands: &LatencyStats{ + Ops: 1019150402, + Latency: 59177710371, + }, + }, + }, + }, + MongoStatus{ + ServerStatus: &ServerStatus{ + Connections: &ConnectionStats{}, + Mem: &MemStats{ + Bits: 0, + Resident: 0, + Virtual: 0, + Supported: false, + Mapped: 0, + MappedWithJournal: 0, + }, + OpLatencies: &OpLatenciesStats{ + Reads: &LatencyStats{ + Ops: 4189049884, + Latency: 2255946760057, + }, + Writes: &LatencyStats{ + Ops: 1691021287, + Latency: 494479456987, + }, + Commands: &LatencyStats{ + Ops: 1019152861, + Latency: 59177981552, + }, + }, + }, + }, + "foo", + true, + 60, + ) + + assert.Equal(t, sl.CommandLatency, int64(59177981552)) + assert.Equal(t, sl.ReadLatency, int64(2255946760057)) + assert.Equal(t, sl.WriteLatency, int64(494479456987)) + assert.Equal(t, sl.CommandOpsCnt, int64(1019152861)) + assert.Equal(t, sl.ReadOpsCnt, int64(4189049884)) + assert.Equal(t, sl.WriteOpsCnt, int64(1691021287)) +} From 07b75c57fe3aa3794707ae4b8ac3067d8e09d5c9 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 8 Jan 2020 18:19:16 -0800 Subject: [PATCH 1449/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5c5216e7f..64cdb7852 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,6 +17,7 @@ - [#6869](https://github.com/influxdata/telegraf/pull/6869): Add RBPEX IO statistics to DatabaseIO query in sqlserver input. - [#6869](https://github.com/influxdata/telegraf/pull/6869): Add space on disk for each file to DatabaseIO query in the sqlserver input. - [#6869](https://github.com/influxdata/telegraf/pull/6869): Calculate DB Name instead of GUID in physical_db_name in the sqlserver input. +- [#6733](https://github.com/influxdata/telegraf/pull/6733): Add latency stats to mongo input. #### Bugfixes From ce02bebf3037be3c6043652fa61e5dc5adcc135a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Aur=C3=A9lien=20H=C3=89BERT?= Date: Thu, 9 Jan 2020 20:29:16 +0100 Subject: [PATCH 1450/1815] Add output plugin for Warp10 (#1923) --- README.md | 1 + plugins/outputs/all/all.go | 1 + plugins/outputs/warp10/README.md | 30 +++ plugins/outputs/warp10/warp10.go | 282 ++++++++++++++++++++++++++ plugins/outputs/warp10/warp10_test.go | 108 ++++++++++ 5 files changed, 422 insertions(+) create mode 100644 plugins/outputs/warp10/README.md create mode 100644 plugins/outputs/warp10/warp10.go create mode 100644 plugins/outputs/warp10/warp10_test.go diff --git a/README.md b/README.md index 4150ec17a..3276f33bf 100644 --- a/README.md +++ b/README.md @@ -409,4 +409,5 @@ For documentation on the latest development code see the [documentation index][d * [syslog](./plugins/outputs/syslog) * [tcp](./plugins/outputs/socket_writer) * [udp](./plugins/outputs/socket_writer) +* [warp10](./plugins/outputs/warp10) * [wavefront](./plugins/outputs/wavefront) diff --git a/plugins/outputs/all/all.go b/plugins/outputs/all/all.go index e40230993..35e0393de 100644 --- a/plugins/outputs/all/all.go +++ b/plugins/outputs/all/all.go @@ -33,5 +33,6 @@ import ( _ "github.com/influxdata/telegraf/plugins/outputs/socket_writer" _ "github.com/influxdata/telegraf/plugins/outputs/stackdriver" _ "github.com/influxdata/telegraf/plugins/outputs/syslog" + _ "github.com/influxdata/telegraf/plugins/outputs/warp10" _ "github.com/influxdata/telegraf/plugins/outputs/wavefront" ) diff --git a/plugins/outputs/warp10/README.md b/plugins/outputs/warp10/README.md new file mode 100644 index 000000000..df56e6816 --- /dev/null +++ b/plugins/outputs/warp10/README.md @@ -0,0 +1,30 @@ +# README # + +Telegraph plugin to push metrics on Warp10 + +### Telegraph output for Warp10 ### + +Execute a post http on Warp10 at every flush time configured in telegraph in order to push the metrics collected + +### Config ### + +Add following instruction in the config file (Output part) + +``` +[[outputs.warp10]] +warpUrl = "http://localhost:4242" +token = "token" +prefix = "telegraf." +timeout = "15s" +``` + +To get more details on Warp 10 errors occuring when pushing data with Telegraf, you can optionaly set: + +``` +printErrorBody = true ## To print the full body of the HTTP Post instead of the request status +maxStringErrorSize = 700  ## To update the maximal string size of the Warp 10 error body. By default it's set to 512. +``` + +### Values format + +The Warp 10 output support natively number, float and boolean values. String are send as URL encoded values as well as all Influx objects. \ No newline at end of file diff --git a/plugins/outputs/warp10/warp10.go b/plugins/outputs/warp10/warp10.go new file mode 100644 index 000000000..deaefc6fc --- /dev/null +++ b/plugins/outputs/warp10/warp10.go @@ -0,0 +1,282 @@ +package warp10 + +import ( + "bytes" + "fmt" + "io/ioutil" + "log" + "net/http" + "sort" + "strconv" + "strings" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/internal/tls" + "github.com/influxdata/telegraf/plugins/outputs" +) + +const ( + defaultClientTimeout = 15 * time.Second +) + +// Warp10 output plugin +type Warp10 struct { + Prefix string + WarpURL string + Token string + Timeout internal.Duration `toml:"timeout"` + PrintErrorBody bool + MaxStringErrorSize int + client *http.Client + tls.ClientConfig +} + +var sampleConfig = ` + # prefix for metrics class Name + prefix = "telegraf." + ## POST HTTP(or HTTPS) ## + # Url name of the Warp 10 server + warp_url = "http://localhost:8080" + # Token to access your app on warp 10 + token = "Token" + # Warp 10 query timeout, by default 15s + timeout = "15s" + ## Optional Print Warp 10 error body + # print_error_body = false + ## Optional Max string error Size + # max_string_error_size = 511 + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" +` + +// MetricLine Warp 10 metrics +type MetricLine struct { + Metric string + Timestamp int64 + Value string + Tags string +} + +func (w *Warp10) createClient() (*http.Client, error) { + tlsCfg, err := w.ClientConfig.TLSConfig() + if err != nil { + return nil, err + } + + if w.Timeout.Duration == 0 { + w.Timeout.Duration = defaultClientTimeout + } + + client := &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: tlsCfg, + Proxy: http.ProxyFromEnvironment, + }, + Timeout: w.Timeout.Duration, + } + + return client, nil +} + +// Connect to warp10 +func (w *Warp10) Connect() error { + client, err := w.createClient() + if err != nil { + return err + } + + w.client = client + return nil +} + +// GenWarp10Payload compute Warp 10 metrics payload +func (w *Warp10) GenWarp10Payload(metrics []telegraf.Metric, now time.Time) string { + collectString := make([]string, 0) + for _, mm := range metrics { + + for _, field := range mm.FieldList() { + + metric := &MetricLine{ + Metric: fmt.Sprintf("%s%s", w.Prefix, mm.Name()+"."+field.Key), + Timestamp: now.UnixNano() / 1000, + } + + metricValue, err := buildValue(field.Value) + if err != nil { + log.Printf("E! [outputs.warp10] Could not encode value: %v", err) + continue + } + metric.Value = metricValue + + tagsSlice := buildTags(mm.TagList()) + metric.Tags = strings.Join(tagsSlice, ",") + + messageLine := fmt.Sprintf("%d// %s{%s} %s\n", metric.Timestamp, metric.Metric, metric.Tags, metric.Value) + + collectString = append(collectString, messageLine) + } + } + return fmt.Sprint(strings.Join(collectString, "")) +} + +// Write metrics to Warp10 +func (w *Warp10) Write(metrics []telegraf.Metric) error { + + var now = time.Now() + payload := w.GenWarp10Payload(metrics, now) + + if payload == "" { + return nil + } + + req, err := http.NewRequest("POST", w.WarpURL+"/api/v0/update", bytes.NewBufferString(payload)) + req.Header.Set("X-Warp10-Token", w.Token) + req.Header.Set("Content-Type", "text/plain") + + resp, err := w.client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + if w.PrintErrorBody { + body, _ := ioutil.ReadAll(resp.Body) + return fmt.Errorf(w.WarpURL + ": " + w.HandleError(string(body), w.MaxStringErrorSize)) + } + + if len(resp.Status) < w.MaxStringErrorSize { + return fmt.Errorf(w.WarpURL + ": " + resp.Status) + } + + return fmt.Errorf(w.WarpURL + ": " + resp.Status[0:w.MaxStringErrorSize]) + } + + return nil +} + +func buildTags(tags []*telegraf.Tag) []string { + + tagsString := make([]string, len(tags)+1) + indexSource := 0 + for index, tag := range tags { + tagsString[index] = fmt.Sprintf("%s=%s", tag.Key, tag.Value) + indexSource = index + } + indexSource++ + tagsString[indexSource] = fmt.Sprintf("source=telegraf") + sort.Strings(tagsString) + return tagsString +} + +func buildValue(v interface{}) (string, error) { + var retv string + switch p := v.(type) { + case int64: + retv = intToString(int64(p)) + case string: + retv = fmt.Sprintf("'%s'", strings.Replace(p, "'", "\\'", -1)) + case bool: + retv = boolToString(bool(p)) + case uint64: + retv = uIntToString(uint64(p)) + case float64: + retv = floatToString(float64(p)) + default: + retv = "'" + strings.Replace(fmt.Sprintf("%s", p), "'", "\\'", -1) + "'" + } + return retv, nil +} + +func intToString(inputNum int64) string { + return strconv.FormatInt(inputNum, 10) +} + +func boolToString(inputBool bool) string { + return strconv.FormatBool(inputBool) +} + +func uIntToString(inputNum uint64) string { + return strconv.FormatUint(inputNum, 10) +} + +func floatToString(inputNum float64) string { + return strconv.FormatFloat(inputNum, 'f', 6, 64) +} + +// SampleConfig get config +func (w *Warp10) SampleConfig() string { + return sampleConfig +} + +// Description get description +func (w *Warp10) Description() string { + return "Configuration for Warp server to send metrics to" +} + +// Close close +func (w *Warp10) Close() error { + return nil +} + +// Init Warp10 struct +func (w *Warp10) Init() error { + if w.MaxStringErrorSize <= 0 { + w.MaxStringErrorSize = 511 + } + return nil +} + +func init() { + outputs.Add("warp10", func() telegraf.Output { + return &Warp10{} + }) +} + +// HandleError read http error body and return a corresponding error +func (w *Warp10) HandleError(body string, maxStringSize int) string { + if body == "" { + return "Empty return" + } + + if strings.Contains(body, "Invalid token") { + return "Invalid token" + } + + if strings.Contains(body, "Write token missing") { + return "Write token missing" + } + + if strings.Contains(body, "Token Expired") { + return "Token Expired" + } + + if strings.Contains(body, "Token revoked") { + return "Token revoked" + } + + if strings.Contains(body, "exceed your Monthly Active Data Streams limit") || strings.Contains(body, "exceed the Monthly Active Data Streams limit") { + return "Exceeded Monthly Active Data Streams limit" + } + + if strings.Contains(body, "Daily Data Points limit being already exceeded") { + return "Exceeded Daily Data Points limit" + } + + if strings.Contains(body, "Application suspended or closed") { + return "Application suspended or closed" + } + + if strings.Contains(body, "broken pipe") { + return "broken pipe" + } + + if len(body) < maxStringSize { + return body + } + return body[0:maxStringSize] +} diff --git a/plugins/outputs/warp10/warp10_test.go b/plugins/outputs/warp10/warp10_test.go new file mode 100644 index 000000000..e222b7d93 --- /dev/null +++ b/plugins/outputs/warp10/warp10_test.go @@ -0,0 +1,108 @@ +package warp10 + +import ( + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" +) + +type ErrorTest struct { + Message string + Expected string +} + +func TestWriteWarp10(t *testing.T) { + w := Warp10{ + Prefix: "unit.test", + WarpURL: "http://localhost:8090", + Token: "WRITE", + } + + var now = time.Now() + payload := w.GenWarp10Payload(testutil.MockMetrics(), now) + require.Exactly(t, fmt.Sprintf("%d// unit.testtest1.value{source=telegraf,tag1=value1} 1.000000\n", now.UnixNano()/1000), payload) +} + +func TestHandleWarp10Error(t *testing.T) { + w := Warp10{ + Prefix: "unit.test", + WarpURL: "http://localhost:8090", + Token: "WRITE", + } + tests := [...]*ErrorTest{ + { + Message: ` + + + + Error 500 io.warp10.script.WarpScriptException: Invalid token. + +

HTTP ERROR 500

+

Problem accessing /api/v0/update. Reason: +

    io.warp10.script.WarpScriptException: Invalid token.

+ + + `, + Expected: fmt.Sprintf("Invalid token"), + }, + { + Message: ` + + + + Error 500 io.warp10.script.WarpScriptException: Token Expired. + +

HTTP ERROR 500

+

Problem accessing /api/v0/update. Reason: +

    io.warp10.script.WarpScriptException: Token Expired.

+ + + `, + Expected: fmt.Sprintf("Token Expired"), + }, + { + Message: ` + + + + Error 500 io.warp10.script.WarpScriptException: Token revoked. + +

HTTP ERROR 500

+

Problem accessing /api/v0/update. Reason: +

    io.warp10.script.WarpScriptException: Token revoked.

+ + + `, + Expected: fmt.Sprintf("Token revoked"), + }, + { + Message: ` + + + + Error 500 io.warp10.script.WarpScriptException: Write token missing. + +

HTTP ERROR 500

+

Problem accessing /api/v0/update. Reason: +

    io.warp10.script.WarpScriptException: Write token missing.

+ + + `, + Expected: "Write token missing", + }, + { + Message: `Error 503: server unavailable`, + Expected: "Error 503: server unavailable", + }, + } + + for _, handledError := range tests { + payload := w.HandleError(handledError.Message, 511) + require.Exactly(t, handledError.Expected, payload) + } + +} From 7faf05023dbbebc15b9628e25020add36df202be Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 9 Jan 2020 13:57:14 -0800 Subject: [PATCH 1451/1815] Update Warp10 docs and uint64 and timestamp handling (#6885) --- CHANGELOG.md | 4 ++ plugins/outputs/warp10/README.md | 62 ++++++++++++++++++--------- plugins/outputs/warp10/warp10.go | 57 +++++++++++++----------- plugins/outputs/warp10/warp10_test.go | 9 ++-- 4 files changed, 81 insertions(+), 51 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 64cdb7852..9efc05a34 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,10 @@ renamed to `sqlserver_azure_db_resource_stats` due to an issue where numeric metrics were previously being reported incorrectly as strings. +#### New Outputs + +- [warp10](/plugins/outputs/warp10/README.md) - Contributed by @aurrelhebert + #### Features - [#6730](https://github.com/influxdata/telegraf/pull/6730): Add page_faults for mongodb wired tiger. diff --git a/plugins/outputs/warp10/README.md b/plugins/outputs/warp10/README.md index df56e6816..07e6cd25b 100644 --- a/plugins/outputs/warp10/README.md +++ b/plugins/outputs/warp10/README.md @@ -1,30 +1,50 @@ -# README # +# Warp10 Output Plugin -Telegraph plugin to push metrics on Warp10 +The `warp10` output plugin writes metrics to [Warp 10][]. -### Telegraph output for Warp10 ### +### Configuration -Execute a post http on Warp10 at every flush time configured in telegraph in order to push the metrics collected - -### Config ### - -Add following instruction in the config file (Output part) - -``` +```toml [[outputs.warp10]] -warpUrl = "http://localhost:4242" -token = "token" -prefix = "telegraf." -timeout = "15s" + # Prefix to add to the measurement. + prefix = "telegraf." + + # URL of the Warp 10 server + warp_url = "http://localhost:8080" + + # Write token to access your app on warp 10 + token = "Token" + + # Warp 10 query timeout + # timeout = "15s" + + ## Print Warp 10 error body + # print_error_body = false + + ## Max string error size + # max_string_error_size = 511 + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false ``` -To get more details on Warp 10 errors occuring when pushing data with Telegraf, you can optionaly set: +### Output Format -``` -printErrorBody = true ## To print the full body of the HTTP Post instead of the request status -maxStringErrorSize = 700  ## To update the maximal string size of the Warp 10 error body. By default it's set to 512. -``` +Metrics are converted and sent using the [Geo Time Series][] (GTS) input format. -### Values format +The class name of the reading is produced by combining the value of the +`prefix` option, the measurement name, and the field key. A dot (`.`) +character is used as the joining character. -The Warp 10 output support natively number, float and boolean values. String are send as URL encoded values as well as all Influx objects. \ No newline at end of file +The GTS form provides support for the Telegraf integer, float, boolean, and +string types directly. Unsigned integer fields will be capped to the largest +64-bit integer (2^63-1) in case of overflow. + +Timestamps are sent in microsecond precision. + +[Warp 10]: https://www.warp10.io +[Geo Time Series]: https://www.warp10.io/content/03_Documentation/03_Interacting_with_Warp_10/03_Ingesting_data/02_GTS_input_format diff --git a/plugins/outputs/warp10/warp10.go b/plugins/outputs/warp10/warp10.go index deaefc6fc..eead153e0 100644 --- a/plugins/outputs/warp10/warp10.go +++ b/plugins/outputs/warp10/warp10.go @@ -5,6 +5,7 @@ import ( "fmt" "io/ioutil" "log" + "math" "net/http" "sort" "strconv" @@ -23,34 +24,41 @@ const ( // Warp10 output plugin type Warp10 struct { - Prefix string - WarpURL string - Token string + Prefix string `toml:"prefix"` + WarpURL string `toml:"warp_url"` + Token string `toml:"token"` Timeout internal.Duration `toml:"timeout"` - PrintErrorBody bool - MaxStringErrorSize int + PrintErrorBody bool `toml:"print_error_body"` + MaxStringErrorSize int `toml:"max_string_error_size"` client *http.Client tls.ClientConfig } var sampleConfig = ` - # prefix for metrics class Name + # Prefix to add to the measurement. prefix = "telegraf." - ## POST HTTP(or HTTPS) ## - # Url name of the Warp 10 server + + # URL of the Warp 10 server warp_url = "http://localhost:8080" - # Token to access your app on warp 10 + + # Write token to access your app on warp 10 token = "Token" - # Warp 10 query timeout, by default 15s - timeout = "15s" - ## Optional Print Warp 10 error body + + # Warp 10 query timeout + # timeout = "15s" + + ## Print Warp 10 error body # print_error_body = false - ## Optional Max string error Size + + ## Max string error size # max_string_error_size = 511 + ## Optional TLS Config # tls_ca = "/etc/telegraf/ca.pem" # tls_cert = "/etc/telegraf/cert.pem" # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false ` // MetricLine Warp 10 metrics @@ -94,7 +102,7 @@ func (w *Warp10) Connect() error { } // GenWarp10Payload compute Warp 10 metrics payload -func (w *Warp10) GenWarp10Payload(metrics []telegraf.Metric, now time.Time) string { +func (w *Warp10) GenWarp10Payload(metrics []telegraf.Metric) string { collectString := make([]string, 0) for _, mm := range metrics { @@ -102,7 +110,7 @@ func (w *Warp10) GenWarp10Payload(metrics []telegraf.Metric, now time.Time) stri metric := &MetricLine{ Metric: fmt.Sprintf("%s%s", w.Prefix, mm.Name()+"."+field.Key), - Timestamp: now.UnixNano() / 1000, + Timestamp: mm.Time().UnixNano() / 1000, } metricValue, err := buildValue(field.Value) @@ -125,10 +133,7 @@ func (w *Warp10) GenWarp10Payload(metrics []telegraf.Metric, now time.Time) stri // Write metrics to Warp10 func (w *Warp10) Write(metrics []telegraf.Metric) error { - - var now = time.Now() - payload := w.GenWarp10Payload(metrics, now) - + payload := w.GenWarp10Payload(metrics) if payload == "" { return nil } @@ -177,17 +182,21 @@ func buildValue(v interface{}) (string, error) { var retv string switch p := v.(type) { case int64: - retv = intToString(int64(p)) + retv = intToString(p) case string: retv = fmt.Sprintf("'%s'", strings.Replace(p, "'", "\\'", -1)) case bool: - retv = boolToString(bool(p)) + retv = boolToString(p) case uint64: - retv = uIntToString(uint64(p)) + if p <= uint64(math.MaxInt64) { + retv = strconv.FormatInt(int64(p), 10) + } else { + retv = strconv.FormatInt(math.MaxInt64, 10) + } case float64: retv = floatToString(float64(p)) default: - retv = "'" + strings.Replace(fmt.Sprintf("%s", p), "'", "\\'", -1) + "'" + return "", fmt.Errorf("unsupported type: %T", v) } return retv, nil } @@ -215,7 +224,7 @@ func (w *Warp10) SampleConfig() string { // Description get description func (w *Warp10) Description() string { - return "Configuration for Warp server to send metrics to" + return "Write metrics to Warp 10" } // Close close diff --git a/plugins/outputs/warp10/warp10_test.go b/plugins/outputs/warp10/warp10_test.go index e222b7d93..5b543b34c 100644 --- a/plugins/outputs/warp10/warp10_test.go +++ b/plugins/outputs/warp10/warp10_test.go @@ -3,11 +3,9 @@ package warp10 import ( "fmt" "testing" - "time" - - "github.com/stretchr/testify/require" "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" ) type ErrorTest struct { @@ -22,9 +20,8 @@ func TestWriteWarp10(t *testing.T) { Token: "WRITE", } - var now = time.Now() - payload := w.GenWarp10Payload(testutil.MockMetrics(), now) - require.Exactly(t, fmt.Sprintf("%d// unit.testtest1.value{source=telegraf,tag1=value1} 1.000000\n", now.UnixNano()/1000), payload) + payload := w.GenWarp10Payload(testutil.MockMetrics()) + require.Exactly(t, "1257894000000000// unit.testtest1.value{source=telegraf,tag1=value1} 1.000000\n", payload) } func TestHandleWarp10Error(t *testing.T) { From 1b1d78b8dd3c95154ef06eef249968f6ccdb9813 Mon Sep 17 00:00:00 2001 From: vikkyomkar Date: Fri, 10 Jan 2020 07:22:26 +0530 Subject: [PATCH 1452/1815] Add source and port tags to jenkins_job metrics (#6844) --- plugins/inputs/jenkins/README.md | 25 +++++++++++--------- plugins/inputs/jenkins/jenkins.go | 32 ++++++++++++++++++-------- plugins/inputs/jenkins/jenkins_test.go | 1 + 3 files changed, 38 insertions(+), 20 deletions(-) diff --git a/plugins/inputs/jenkins/README.md b/plugins/inputs/jenkins/README.md index 55dd4bb6b..2bbfd157e 100644 --- a/plugins/inputs/jenkins/README.md +++ b/plugins/inputs/jenkins/README.md @@ -60,14 +60,15 @@ This plugin does not require a plugin on jenkins and it makes use of Jenkins API - node_name - status ("online", "offline") - source + - port - fields: - - disk_available - - temp_available - - memory_available - - memory_total - - swap_available - - swap_total - - response_time + - disk_available (Bytes) + - temp_available (Bytes) + - memory_available (Bytes) + - memory_total (Bytes) + - swap_available (Bytes) + - swap_total (Bytes) + - response_time (ms) - num_executors - jenkins_job @@ -76,8 +77,9 @@ This plugin does not require a plugin on jenkins and it makes use of Jenkins API - parents - result - source + - port - fields: - - duration + - duration (ms) - result_code (0 = SUCCESS, 1 = FAILURE, 2 = NOT_BUILD, 3 = UNSTABLE, 4 = ABORTED) ### Sample Queries: @@ -94,7 +96,8 @@ SELECT mean("duration") AS "mean_duration" FROM "jenkins_job" WHERE time > now() ``` $ ./telegraf --config telegraf.conf --input-filter jenkins --test -jenkins_node,arch=Linux\ (amd64),disk_path=/var/jenkins_home,temp_path=/tmp,host=myhost,node_name=master swap_total=4294963200,memory_available=586711040,memory_total=6089498624,status=online,response_time=1000i,disk_available=152392036352,temp_available=152392036352,swap_available=3503263744 1516031535000000000 -jenkins_job,host=myhost,name=JOB1,parents=apps/br1,result=SUCCESS duration=2831i,result_code=0i 1516026630000000000 -jenkins_job,host=myhost,name=JOB2,parents=apps/br2,result=SUCCESS duration=2285i,result_code=0i 1516027230000000000 +jenkins_node,arch=Linux\ (amd64),disk_path=/var/jenkins_home,temp_path=/tmp,host=myhost,node_name=master,source=my-jenkins-instance,port=8080 swap_total=4294963200,memory_available=586711040,memory_total=6089498624,status=online,response_time=1000i,disk_available=152392036352,temp_available=152392036352,swap_available=3503263744,num_executors=2i 1516031535000000000 +jenkins_job,host=myhost,name=JOB1,parents=apps/br1,result=SUCCESS,source=my-jenkins-instance,port=8080 duration=2831i,result_code=0i 1516026630000000000 +jenkins_job,host=myhost,name=JOB2,parents=apps/br2,result=SUCCESS,source=my-jenkins-instance,port=8080 duration=2285i,result_code=0i 1516027230000000000 ``` + diff --git a/plugins/inputs/jenkins/jenkins.go b/plugins/inputs/jenkins/jenkins.go index 528d99c77..7a2b19d95 100644 --- a/plugins/inputs/jenkins/jenkins.go +++ b/plugins/inputs/jenkins/jenkins.go @@ -22,6 +22,8 @@ type Jenkins struct { URL string Username string Password string + Source string + Port string // HTTP Timeout specified as a string - 3s, 1m, 1h ResponseTimeout internal.Duration @@ -138,6 +140,22 @@ func (j *Jenkins) newHTTPClient() (*http.Client, error) { func (j *Jenkins) initialize(client *http.Client) error { var err error + // init jenkins tags + u, err := url.Parse(j.URL) + if err != nil { + return err + } + if u.Port() == "" { + if u.Scheme == "http" { + j.Port = "80" + } else if u.Scheme == "https" { + j.Port = "443" + } + } else { + j.Port = u.Port() + } + j.Source = u.Hostname() + // init job filter j.jobFilter, err = filter.Compile(j.JobExclude) if err != nil { @@ -191,12 +209,8 @@ func (j *Jenkins) gatherNodeData(n node, acc telegraf.Accumulator) error { tags["status"] = "offline" } - u, err := url.Parse(j.URL) - if err != nil { - return err - } - tags["source"] = u.Hostname() - tags["port"] = u.Port() + tags["source"] = j.Source + tags["port"] = j.Port fields := make(map[string]interface{}) fields["num_executors"] = n.NumExecutors @@ -334,7 +348,7 @@ func (j *Jenkins) getJobDetail(jr jobRequest, acc telegraf.Accumulator) error { return nil } - gatherJobBuild(jr, build, acc) + j.gatherJobBuild(jr, build, acc) return nil } @@ -432,8 +446,8 @@ func (jr jobRequest) parentsString() string { return strings.Join(jr.parents, "/") } -func gatherJobBuild(jr jobRequest, b *buildResponse, acc telegraf.Accumulator) { - tags := map[string]string{"name": jr.name, "parents": jr.parentsString(), "result": b.Result} +func (j *Jenkins) gatherJobBuild(jr jobRequest, b *buildResponse, acc telegraf.Accumulator) { + tags := map[string]string{"name": jr.name, "parents": jr.parentsString(), "result": b.Result, "source": j.Source, "port": j.Port} fields := make(map[string]interface{}) fields["duration"] = b.Duration fields["result_code"] = mapResultCode(b.Result) diff --git a/plugins/inputs/jenkins/jenkins_test.go b/plugins/inputs/jenkins/jenkins_test.go index dcbb5a46d..6233bb83f 100644 --- a/plugins/inputs/jenkins/jenkins_test.go +++ b/plugins/inputs/jenkins/jenkins_test.go @@ -1,3 +1,4 @@ +// Test Suite package jenkins import ( From 949ac7471fc0d82deebeb49b0fe3a35f7655d200 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 9 Jan 2020 17:53:18 -0800 Subject: [PATCH 1453/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9efc05a34..fafb21f5e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -22,6 +22,7 @@ - [#6869](https://github.com/influxdata/telegraf/pull/6869): Add space on disk for each file to DatabaseIO query in the sqlserver input. - [#6869](https://github.com/influxdata/telegraf/pull/6869): Calculate DB Name instead of GUID in physical_db_name in the sqlserver input. - [#6733](https://github.com/influxdata/telegraf/pull/6733): Add latency stats to mongo input. +- [#6844](https://github.com/influxdata/telegraf/pull/6844): Add source and port tags to jenkins_job metrics. #### Bugfixes From 0cee84fa6aadb085805142d949bf8b56a94e34bc Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 10 Jan 2020 12:43:28 -0800 Subject: [PATCH 1454/1815] Warn without error when processes input is started on Windows (#6891) --- plugins/inputs/processes/README.md | 17 +- plugins/inputs/processes/processes.go | 234 ----------------- .../inputs/processes/processes_notwindows.go | 235 ++++++++++++++++++ plugins/inputs/processes/processes_windows.go | 24 ++ 4 files changed, 266 insertions(+), 244 deletions(-) create mode 100644 plugins/inputs/processes/processes_notwindows.go diff --git a/plugins/inputs/processes/README.md b/plugins/inputs/processes/README.md index 4113f0d3a..756326d75 100644 --- a/plugins/inputs/processes/README.md +++ b/plugins/inputs/processes/README.md @@ -6,7 +6,9 @@ them by status (zombie, sleeping, running, etc.) On linux this plugin requires access to procfs (/proc), on other OSes it requires access to execute `ps`. -### Configuration: +**Supported Platforms**: Linux, FreeBSD, Darwin + +### Configuration ```toml # Get the number of processes and group them by status @@ -19,9 +21,10 @@ Using the environment variable `HOST_PROC` the plugin will retrieve process info `docker run -v /proc:/rootfs/proc:ro -e HOST_PROC=/rootfs/proc` -### Measurements & Fields: +### Metrics - processes + - fields: - blocked (aka disk sleep or uninterruptible sleep) - running - sleeping @@ -53,14 +56,8 @@ Linux FreeBSD Darwin meaning W W none paging (linux kernel < 2.6 only), wait (freebsd) ``` -### Tags: - -None - -### Example Output: +### Example Output ``` -$ telegraf --config ~/ws/telegraf.conf --input-filter processes --test -* Plugin: processes, Collection 1 -> processes blocked=8i,running=1i,sleeping=265i,stopped=0i,total=274i,zombie=0i,dead=0i,paging=0i,total_threads=687i 1457478636980905042 +processes blocked=8i,running=1i,sleeping=265i,stopped=0i,total=274i,zombie=0i,dead=0i,paging=0i,total_threads=687i 1457478636980905042 ``` diff --git a/plugins/inputs/processes/processes.go b/plugins/inputs/processes/processes.go index 4421010d5..9ee583dba 100644 --- a/plugins/inputs/processes/processes.go +++ b/plugins/inputs/processes/processes.go @@ -1,241 +1,7 @@ -// +build !windows - package processes -import ( - "bytes" - "fmt" - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "runtime" - "strconv" - "syscall" - - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/plugins/inputs" - "github.com/influxdata/telegraf/plugins/inputs/linux_sysctl_fs" -) - -type Processes struct { - execPS func() ([]byte, error) - readProcFile func(filename string) ([]byte, error) - - Log telegraf.Logger - - forcePS bool - forceProc bool -} - func (p *Processes) Description() string { return "Get the number of processes and group them by status" } func (p *Processes) SampleConfig() string { return "" } - -func (p *Processes) Gather(acc telegraf.Accumulator) error { - // Get an empty map of metric fields - fields := getEmptyFields() - - // Decide if we will use 'ps' to get stats (use procfs otherwise) - usePS := true - if runtime.GOOS == "linux" { - usePS = false - } - if p.forcePS { - usePS = true - } else if p.forceProc { - usePS = false - } - - // Gather stats from 'ps' or procfs - if usePS { - if err := p.gatherFromPS(fields); err != nil { - return err - } - } else { - if err := p.gatherFromProc(fields); err != nil { - return err - } - } - - acc.AddGauge("processes", fields, nil) - return nil -} - -// Gets empty fields of metrics based on the OS -func getEmptyFields() map[string]interface{} { - fields := map[string]interface{}{ - "blocked": int64(0), - "zombies": int64(0), - "stopped": int64(0), - "running": int64(0), - "sleeping": int64(0), - "total": int64(0), - "unknown": int64(0), - } - switch runtime.GOOS { - case "freebsd": - fields["idle"] = int64(0) - fields["wait"] = int64(0) - case "darwin": - fields["idle"] = int64(0) - case "openbsd": - fields["idle"] = int64(0) - case "linux": - fields["dead"] = int64(0) - fields["paging"] = int64(0) - fields["total_threads"] = int64(0) - fields["idle"] = int64(0) - } - return fields -} - -// exec `ps` to get all process states -func (p *Processes) gatherFromPS(fields map[string]interface{}) error { - out, err := p.execPS() - if err != nil { - return err - } - - for i, status := range bytes.Fields(out) { - if i == 0 && string(status) == "STAT" { - // This is a header, skip it - continue - } - switch status[0] { - case 'W': - fields["wait"] = fields["wait"].(int64) + int64(1) - case 'U', 'D', 'L': - // Also known as uninterruptible sleep or disk sleep - fields["blocked"] = fields["blocked"].(int64) + int64(1) - case 'Z': - fields["zombies"] = fields["zombies"].(int64) + int64(1) - case 'X': - fields["dead"] = fields["dead"].(int64) + int64(1) - case 'T': - fields["stopped"] = fields["stopped"].(int64) + int64(1) - case 'R': - fields["running"] = fields["running"].(int64) + int64(1) - case 'S': - fields["sleeping"] = fields["sleeping"].(int64) + int64(1) - case 'I': - fields["idle"] = fields["idle"].(int64) + int64(1) - case '?': - fields["unknown"] = fields["unknown"].(int64) + int64(1) - default: - p.Log.Infof("Unknown state %q from ps", string(status[0])) - } - fields["total"] = fields["total"].(int64) + int64(1) - } - return nil -} - -// get process states from /proc/(pid)/stat files -func (p *Processes) gatherFromProc(fields map[string]interface{}) error { - filenames, err := filepath.Glob(linux_sysctl_fs.GetHostProc() + "/[0-9]*/stat") - - if err != nil { - return err - } - - for _, filename := range filenames { - _, err := os.Stat(filename) - data, err := p.readProcFile(filename) - if err != nil { - return err - } - if data == nil { - continue - } - - // Parse out data after () - i := bytes.LastIndex(data, []byte(")")) - if i == -1 { - continue - } - data = data[i+2:] - - stats := bytes.Fields(data) - if len(stats) < 3 { - return fmt.Errorf("Something is terribly wrong with %s", filename) - } - switch stats[0][0] { - case 'R': - fields["running"] = fields["running"].(int64) + int64(1) - case 'S': - fields["sleeping"] = fields["sleeping"].(int64) + int64(1) - case 'D': - fields["blocked"] = fields["blocked"].(int64) + int64(1) - case 'Z': - fields["zombies"] = fields["zombies"].(int64) + int64(1) - case 'X': - fields["dead"] = fields["dead"].(int64) + int64(1) - case 'T', 't': - fields["stopped"] = fields["stopped"].(int64) + int64(1) - case 'W': - fields["paging"] = fields["paging"].(int64) + int64(1) - case 'I': - fields["idle"] = fields["idle"].(int64) + int64(1) - case 'P': - if _, ok := fields["parked"]; ok { - fields["parked"] = fields["parked"].(int64) + int64(1) - } - fields["parked"] = int64(1) - default: - p.Log.Infof("Unknown state %q in file %q", string(stats[0][0]), filename) - } - fields["total"] = fields["total"].(int64) + int64(1) - - threads, err := strconv.Atoi(string(stats[17])) - if err != nil { - p.Log.Infof("Error parsing thread count: %s", err.Error()) - continue - } - fields["total_threads"] = fields["total_threads"].(int64) + int64(threads) - } - return nil -} - -func readProcFile(filename string) ([]byte, error) { - data, err := ioutil.ReadFile(filename) - if err != nil { - if os.IsNotExist(err) { - return nil, nil - } - - // Reading from /proc/ fails with ESRCH if the process has - // been terminated between open() and read(). - if perr, ok := err.(*os.PathError); ok && perr.Err == syscall.ESRCH { - return nil, nil - } - - return nil, err - } - - return data, nil -} - -func execPS() ([]byte, error) { - bin, err := exec.LookPath("ps") - if err != nil { - return nil, err - } - - out, err := exec.Command(bin, "axo", "state").Output() - if err != nil { - return nil, err - } - - return out, err -} - -func init() { - inputs.Add("processes", func() telegraf.Input { - return &Processes{ - execPS: execPS, - readProcFile: readProcFile, - } - }) -} diff --git a/plugins/inputs/processes/processes_notwindows.go b/plugins/inputs/processes/processes_notwindows.go new file mode 100644 index 000000000..445e7fb9f --- /dev/null +++ b/plugins/inputs/processes/processes_notwindows.go @@ -0,0 +1,235 @@ +// +build !windows + +package processes + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "runtime" + "strconv" + "syscall" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/inputs/linux_sysctl_fs" +) + +type Processes struct { + execPS func() ([]byte, error) + readProcFile func(filename string) ([]byte, error) + + Log telegraf.Logger + + forcePS bool + forceProc bool +} + +func (p *Processes) Gather(acc telegraf.Accumulator) error { + // Get an empty map of metric fields + fields := getEmptyFields() + + // Decide if we will use 'ps' to get stats (use procfs otherwise) + usePS := true + if runtime.GOOS == "linux" { + usePS = false + } + if p.forcePS { + usePS = true + } else if p.forceProc { + usePS = false + } + + // Gather stats from 'ps' or procfs + if usePS { + if err := p.gatherFromPS(fields); err != nil { + return err + } + } else { + if err := p.gatherFromProc(fields); err != nil { + return err + } + } + + acc.AddGauge("processes", fields, nil) + return nil +} + +// Gets empty fields of metrics based on the OS +func getEmptyFields() map[string]interface{} { + fields := map[string]interface{}{ + "blocked": int64(0), + "zombies": int64(0), + "stopped": int64(0), + "running": int64(0), + "sleeping": int64(0), + "total": int64(0), + "unknown": int64(0), + } + switch runtime.GOOS { + case "freebsd": + fields["idle"] = int64(0) + fields["wait"] = int64(0) + case "darwin": + fields["idle"] = int64(0) + case "openbsd": + fields["idle"] = int64(0) + case "linux": + fields["dead"] = int64(0) + fields["paging"] = int64(0) + fields["total_threads"] = int64(0) + fields["idle"] = int64(0) + } + return fields +} + +// exec `ps` to get all process states +func (p *Processes) gatherFromPS(fields map[string]interface{}) error { + out, err := p.execPS() + if err != nil { + return err + } + + for i, status := range bytes.Fields(out) { + if i == 0 && string(status) == "STAT" { + // This is a header, skip it + continue + } + switch status[0] { + case 'W': + fields["wait"] = fields["wait"].(int64) + int64(1) + case 'U', 'D', 'L': + // Also known as uninterruptible sleep or disk sleep + fields["blocked"] = fields["blocked"].(int64) + int64(1) + case 'Z': + fields["zombies"] = fields["zombies"].(int64) + int64(1) + case 'X': + fields["dead"] = fields["dead"].(int64) + int64(1) + case 'T': + fields["stopped"] = fields["stopped"].(int64) + int64(1) + case 'R': + fields["running"] = fields["running"].(int64) + int64(1) + case 'S': + fields["sleeping"] = fields["sleeping"].(int64) + int64(1) + case 'I': + fields["idle"] = fields["idle"].(int64) + int64(1) + case '?': + fields["unknown"] = fields["unknown"].(int64) + int64(1) + default: + p.Log.Infof("Unknown state %q from ps", string(status[0])) + } + fields["total"] = fields["total"].(int64) + int64(1) + } + return nil +} + +// get process states from /proc/(pid)/stat files +func (p *Processes) gatherFromProc(fields map[string]interface{}) error { + filenames, err := filepath.Glob(linux_sysctl_fs.GetHostProc() + "/[0-9]*/stat") + + if err != nil { + return err + } + + for _, filename := range filenames { + _, err := os.Stat(filename) + data, err := p.readProcFile(filename) + if err != nil { + return err + } + if data == nil { + continue + } + + // Parse out data after () + i := bytes.LastIndex(data, []byte(")")) + if i == -1 { + continue + } + data = data[i+2:] + + stats := bytes.Fields(data) + if len(stats) < 3 { + return fmt.Errorf("Something is terribly wrong with %s", filename) + } + switch stats[0][0] { + case 'R': + fields["running"] = fields["running"].(int64) + int64(1) + case 'S': + fields["sleeping"] = fields["sleeping"].(int64) + int64(1) + case 'D': + fields["blocked"] = fields["blocked"].(int64) + int64(1) + case 'Z': + fields["zombies"] = fields["zombies"].(int64) + int64(1) + case 'X': + fields["dead"] = fields["dead"].(int64) + int64(1) + case 'T', 't': + fields["stopped"] = fields["stopped"].(int64) + int64(1) + case 'W': + fields["paging"] = fields["paging"].(int64) + int64(1) + case 'I': + fields["idle"] = fields["idle"].(int64) + int64(1) + case 'P': + if _, ok := fields["parked"]; ok { + fields["parked"] = fields["parked"].(int64) + int64(1) + } + fields["parked"] = int64(1) + default: + p.Log.Infof("Unknown state %q in file %q", string(stats[0][0]), filename) + } + fields["total"] = fields["total"].(int64) + int64(1) + + threads, err := strconv.Atoi(string(stats[17])) + if err != nil { + p.Log.Infof("Error parsing thread count: %s", err.Error()) + continue + } + fields["total_threads"] = fields["total_threads"].(int64) + int64(threads) + } + return nil +} + +func readProcFile(filename string) ([]byte, error) { + data, err := ioutil.ReadFile(filename) + if err != nil { + if os.IsNotExist(err) { + return nil, nil + } + + // Reading from /proc/ fails with ESRCH if the process has + // been terminated between open() and read(). + if perr, ok := err.(*os.PathError); ok && perr.Err == syscall.ESRCH { + return nil, nil + } + + return nil, err + } + + return data, nil +} + +func execPS() ([]byte, error) { + bin, err := exec.LookPath("ps") + if err != nil { + return nil, err + } + + out, err := exec.Command(bin, "axo", "state").Output() + if err != nil { + return nil, err + } + + return out, err +} + +func init() { + inputs.Add("processes", func() telegraf.Input { + return &Processes{ + execPS: execPS, + readProcFile: readProcFile, + } + }) +} diff --git a/plugins/inputs/processes/processes_windows.go b/plugins/inputs/processes/processes_windows.go index 32c73f918..567373c7c 100644 --- a/plugins/inputs/processes/processes_windows.go +++ b/plugins/inputs/processes/processes_windows.go @@ -1,3 +1,27 @@ // +build windows package processes + +import ( + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" +) + +type Processes struct { + Log telegraf.Logger +} + +func (e *Processes) Init() error { + e.Log.Warn("Current platform is not supported") + return nil +} + +func (e *Processes) Gather(acc telegraf.Accumulator) error { + return nil +} + +func init() { + inputs.Add("processes", func() telegraf.Input { + return &Processes{} + }) +} From a93eda95e1ef006050ccc6579a7f33045e1c9c23 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 10 Jan 2020 12:44:44 -0800 Subject: [PATCH 1455/1815] Update changelog --- CHANGELOG.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index fafb21f5e..ed999fc5e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -29,6 +29,12 @@ - [#6397](https://github.com/influxdata/telegraf/issues/6397): Fix conversion to floats in AzureDBResourceStats query in the sqlserver input. - [#6867](https://github.com/influxdata/telegraf/issues/6867): Fix case sensitive collation in sqlserver input. +## v1.13.2 [unreleased] + +#### Bugfixes + +- [#2652](https://github.com/influxdata/telegraf/issues/2652): Warn without error when processes input is started on Windows. + ## v1.13.1 [2020-01-08] #### Bugfixes From 875bd7743b2efde3936179bb6c3ce0aeff5350fa Mon Sep 17 00:00:00 2001 From: Benjamin Schweizer <234864+benschweizer@users.noreply.github.com> Date: Mon, 13 Jan 2020 19:49:14 +0100 Subject: [PATCH 1456/1815] Only parse certificate blocks in x509_cert input (#6893) --- CHANGELOG.md | 1 + plugins/inputs/x509_cert/x509_cert.go | 10 ++++++---- plugins/inputs/x509_cert/x509_cert_test.go | 1 + 3 files changed, 8 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index ed999fc5e..c2644c086 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -32,6 +32,7 @@ ## v1.13.2 [unreleased] #### Bugfixes +- [#6890](https://github.com/influxdata/telegraf/issues/6890): Fix local certificate parsing in x509_certs input. - [#2652](https://github.com/influxdata/telegraf/issues/2652): Warn without error when processes input is started on Windows. diff --git a/plugins/inputs/x509_cert/x509_cert.go b/plugins/inputs/x509_cert/x509_cert.go index ad47db663..21e64fcbb 100644 --- a/plugins/inputs/x509_cert/x509_cert.go +++ b/plugins/inputs/x509_cert/x509_cert.go @@ -103,11 +103,13 @@ func (c *X509Cert) getCert(u *url.URL, timeout time.Duration) ([]*x509.Certifica return nil, fmt.Errorf("failed to parse certificate PEM") } - cert, err := x509.ParseCertificate(block.Bytes) - if err != nil { - return nil, err + if block.Type == "CERTIFICATE" { + cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return nil, err + } + certs = append(certs, cert) } - certs = append(certs, cert) if rest == nil || len(rest) == 0 { break } diff --git a/plugins/inputs/x509_cert/x509_cert_test.go b/plugins/inputs/x509_cert/x509_cert_test.go index 48559ca6a..fa90a90eb 100644 --- a/plugins/inputs/x509_cert/x509_cert_test.go +++ b/plugins/inputs/x509_cert/x509_cert_test.go @@ -145,6 +145,7 @@ func TestGatherLocal(t *testing.T) { {name: "correct certificate and extra trailing space", mode: 0640, content: pki.ReadServerCert() + " "}, {name: "correct certificate and extra leading space", mode: 0640, content: " " + pki.ReadServerCert()}, {name: "correct multiple certificates", mode: 0640, content: pki.ReadServerCert() + pki.ReadCACert()}, + {name: "correct multiple certificates and key", mode: 0640, content: pki.ReadServerCert() + pki.ReadCACert() + pki.ReadServerKey()}, {name: "correct certificate and wrong certificate", mode: 0640, content: pki.ReadServerCert() + "\n" + wrongCert, error: true}, {name: "correct certificate and not a certificate", mode: 0640, content: pki.ReadServerCert() + "\ntest", error: true}, {name: "correct multiple certificates and extra trailing space", mode: 0640, content: pki.ReadServerCert() + pki.ReadServerCert() + " "}, From b9080593269249440fa6cc4bab8d97b3d0b88586 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 13 Jan 2020 10:50:53 -0800 Subject: [PATCH 1457/1815] Update changelog --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c2644c086..9a7e581d5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -32,9 +32,9 @@ ## v1.13.2 [unreleased] #### Bugfixes -- [#6890](https://github.com/influxdata/telegraf/issues/6890): Fix local certificate parsing in x509_certs input. - [#2652](https://github.com/influxdata/telegraf/issues/2652): Warn without error when processes input is started on Windows. +- [#6890](https://github.com/influxdata/telegraf/issues/6890): Only parse certificate blocks in x509_cert input. ## v1.13.1 [2020-01-08] From 78a7978ea8fafb6576826802b1ea14697446e939 Mon Sep 17 00:00:00 2001 From: Pontus Rydin Date: Mon, 13 Jan 2020 18:15:55 -0500 Subject: [PATCH 1458/1815] Add custom attributes for all resource types in vsphere input (#6884) --- plugins/inputs/vsphere/endpoint.go | 90 +++++++++++++++++++----------- plugins/inputs/vsphere/finder.go | 10 ++-- 2 files changed, 62 insertions(+), 38 deletions(-) diff --git a/plugins/inputs/vsphere/endpoint.go b/plugins/inputs/vsphere/endpoint.go index 63fe3eb03..98b308dfc 100644 --- a/plugins/inputs/vsphere/endpoint.go +++ b/plugins/inputs/vsphere/endpoint.go @@ -84,7 +84,7 @@ type metricEntry struct { fields map[string]interface{} } -type objectMap map[string]objectRef +type objectMap map[string]*objectRef type objectRef struct { name string @@ -100,7 +100,7 @@ type objectRef struct { func (e *Endpoint) getParent(obj *objectRef, res *resourceKind) (*objectRef, bool) { if pKind, ok := e.resourceKinds[res.parent]; ok { if p, ok := pKind.objects[obj.parentRef.Value]; ok { - return &p, true + return p, true } } return nil, false @@ -322,7 +322,7 @@ func (e *Endpoint) getMetricNameMap(ctx context.Context) (map[int32]string, erro return names, nil } -func (e *Endpoint) getMetadata(ctx context.Context, obj objectRef, sampling int32) (performance.MetricList, error) { +func (e *Endpoint) getMetadata(ctx context.Context, obj *objectRef, sampling int32) (performance.MetricList, error) { client, err := e.clientFactory.GetClient(ctx) if err != nil { return nil, err @@ -508,7 +508,7 @@ func (e *Endpoint) simpleMetadataSelect(ctx context.Context, client *Client, res func (e *Endpoint) complexMetadataSelect(ctx context.Context, res *resourceKind, objects objectMap, metricNames map[int32]string) { // We're only going to get metadata from maxMetadataSamples resources. If we have // more resources than that, we pick maxMetadataSamples samples at random. - sampledObjects := make([]objectRef, len(objects)) + sampledObjects := make([]*objectRef, len(objects)) i := 0 for _, obj := range objects { sampledObjects[i] = obj @@ -529,7 +529,7 @@ func (e *Endpoint) complexMetadataSelect(ctx context.Context, res *resourceKind, instInfoMux := sync.Mutex{} te := NewThrottledExecutor(e.Parent.DiscoverConcurrency) for _, obj := range sampledObjects { - func(obj objectRef) { + func(obj *objectRef) { te.Run(ctx, func() { metrics, err := e.getMetadata(ctx, obj, res.sampling) if err != nil { @@ -573,8 +573,13 @@ func getDatacenters(ctx context.Context, e *Endpoint, filter *ResourceFilter) (o } m := make(objectMap, len(resources)) for _, r := range resources { - m[r.ExtensibleManagedObject.Reference().Value] = objectRef{ - name: r.Name, ref: r.ExtensibleManagedObject.Reference(), parentRef: r.Parent, dcname: r.Name} + m[r.ExtensibleManagedObject.Reference().Value] = &objectRef{ + name: r.Name, + ref: r.ExtensibleManagedObject.Reference(), + parentRef: r.Parent, + dcname: r.Name, + customValues: e.loadCustomAttributes(&r.ManagedEntity), + } } return m, nil } @@ -613,8 +618,12 @@ func getClusters(ctx context.Context, e *Endpoint, filter *ResourceFilter) (obje cache[r.Parent.Value] = p } } - m[r.ExtensibleManagedObject.Reference().Value] = objectRef{ - name: r.Name, ref: r.ExtensibleManagedObject.Reference(), parentRef: p} + m[r.ExtensibleManagedObject.Reference().Value] = &objectRef{ + name: r.Name, + ref: r.ExtensibleManagedObject.Reference(), + parentRef: p, + customValues: e.loadCustomAttributes(&r.ManagedEntity), + } } return m, nil } @@ -627,8 +636,12 @@ func getHosts(ctx context.Context, e *Endpoint, filter *ResourceFilter) (objectM } m := make(objectMap) for _, r := range resources { - m[r.ExtensibleManagedObject.Reference().Value] = objectRef{ - name: r.Name, ref: r.ExtensibleManagedObject.Reference(), parentRef: r.Parent} + m[r.ExtensibleManagedObject.Reference().Value] = &objectRef{ + name: r.Name, + ref: r.ExtensibleManagedObject.Reference(), + parentRef: r.Parent, + customValues: e.loadCustomAttributes(&r.ManagedEntity), + } } return m, nil } @@ -693,30 +706,13 @@ func getVMs(ctx context.Context, e *Endpoint, filter *ResourceFilter) (objectMap guest = cleanGuestID(r.Config.GuestId) uuid = r.Config.Uuid } - cvs := make(map[string]string) - if e.customAttrEnabled { - for _, cv := range r.Summary.CustomValue { - val := cv.(*types.CustomFieldStringValue) - if val.Value == "" { - continue - } - key, ok := e.customFields[val.Key] - if !ok { - e.Parent.Log.Warnf("Metadata for custom field %d not found. Skipping", val.Key) - continue - } - if e.customAttrFilter.Match(key) { - cvs[key] = val.Value - } - } - } - m[r.ExtensibleManagedObject.Reference().Value] = objectRef{ + m[r.ExtensibleManagedObject.Reference().Value] = &objectRef{ name: r.Name, ref: r.ExtensibleManagedObject.Reference(), parentRef: r.Runtime.Host, guest: guest, altID: uuid, - customValues: cvs, + customValues: e.loadCustomAttributes(&r.ManagedEntity), lookup: lookup, } } @@ -740,12 +736,40 @@ func getDatastores(ctx context.Context, e *Endpoint, filter *ResourceFilter) (ob url = info.Url } } - m[r.ExtensibleManagedObject.Reference().Value] = objectRef{ - name: r.Name, ref: r.ExtensibleManagedObject.Reference(), parentRef: r.Parent, altID: url} + m[r.ExtensibleManagedObject.Reference().Value] = &objectRef{ + name: r.Name, + ref: r.ExtensibleManagedObject.Reference(), + parentRef: r.Parent, + altID: url, + customValues: e.loadCustomAttributes(&r.ManagedEntity), + } } return m, nil } +func (e *Endpoint) loadCustomAttributes(entity *mo.ManagedEntity) map[string]string { + if !e.customAttrEnabled { + return map[string]string{} + } + cvs := make(map[string]string) + for _, v := range entity.CustomValue { + cv, ok := v.(*types.CustomFieldStringValue) + if !ok { + e.Parent.Log.Warnf("Metadata for custom field %d not of string type. Skipping", cv.Key) + continue + } + key, ok := e.customFields[cv.Key] + if !ok { + e.Parent.Log.Warnf("Metadata for custom field %d not found. Skipping", cv.Key) + continue + } + if e.customAttrFilter.Match(key) { + cvs[key] = cv.Value + } + } + return cvs +} + // Close shuts down an Endpoint and releases any resources associated with it. func (e *Endpoint) Close() { e.clientFactory.Close() @@ -1054,7 +1078,7 @@ func (e *Endpoint) collectChunk(ctx context.Context, pqs []types.PerfQuerySpec, e.Parent.Log.Errorf("MOID %s not found in cache. Skipping", moid) continue } - e.populateTags(&objectRef, resourceType, res, t, &v) + e.populateTags(objectRef, resourceType, res, t, &v) nValues := 0 alignedInfo, alignedValues := alignSamples(em.SampleInfo, v.Value, interval) diff --git a/plugins/inputs/vsphere/finder.go b/plugins/inputs/vsphere/finder.go index 14f317df4..33ce90f5b 100644 --- a/plugins/inputs/vsphere/finder.go +++ b/plugins/inputs/vsphere/finder.go @@ -233,12 +233,12 @@ func init() { } addFields = map[string][]string{ - "HostSystem": {"parent"}, + "HostSystem": {"parent", "summary.customValue", "customValue"}, "VirtualMachine": {"runtime.host", "config.guestId", "config.uuid", "runtime.powerState", - "summary.customValue", "guest.net", "guest.hostName"}, - "Datastore": {"parent", "info"}, - "ClusterComputeResource": {"parent"}, - "Datacenter": {"parent"}, + "summary.customValue", "guest.net", "guest.hostName", "customValue"}, + "Datastore": {"parent", "info", "customValue"}, + "ClusterComputeResource": {"parent", "customValue"}, + "Datacenter": {"parent", "customValue"}, } containers = map[string]interface{}{ From 8f14187e9a6a608dc09157468c2c1ea48f4b23b8 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 13 Jan 2020 15:17:08 -0800 Subject: [PATCH 1459/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9a7e581d5..e7ec20ca7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -35,6 +35,7 @@ - [#2652](https://github.com/influxdata/telegraf/issues/2652): Warn without error when processes input is started on Windows. - [#6890](https://github.com/influxdata/telegraf/issues/6890): Only parse certificate blocks in x509_cert input. +- [#6883](https://github.com/influxdata/telegraf/issues/6883): Add custom attributes for all resource types in vsphere input. ## v1.13.1 [2020-01-08] From 5f2ed4ce4f213aafa5d62d61308836130ec493a3 Mon Sep 17 00:00:00 2001 From: Dennis Hoppe Date: Tue, 14 Jan 2020 21:30:03 +0100 Subject: [PATCH 1460/1815] Remove tabs to fix indentation (#6896) --- scripts/init.sh | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/scripts/init.sh b/scripts/init.sh index fc71536f9..d01e16a7c 100755 --- a/scripts/init.sh +++ b/scripts/init.sh @@ -122,11 +122,11 @@ case $1 in # Checked the PID file exists and check the actual status of process if [ -e "$pidfile" ]; then if pidofproc -p $pidfile $daemon > /dev/null; then - log_failure_msg "$name process is running" - else - log_failure_msg "$name pidfile has no corresponding process; ensure $name is stopped and remove $pidfile" - fi - exit 0 + log_failure_msg "$name process is running" + else + log_failure_msg "$name pidfile has no corresponding process; ensure $name is stopped and remove $pidfile" + fi + exit 0 fi # Bump the file limits, before launching the daemon. These will carry over to From e8c4efb57232f5ff08e0fa43f6fafe5e695afe16 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 14 Jan 2020 15:16:27 -0800 Subject: [PATCH 1461/1815] Add date offset and timezone options to date processor (#6886) --- CHANGELOG.md | 3 ++ plugins/processors/date/README.md | 19 +++++++++++ plugins/processors/date/date.go | 35 +++++++++++++++++--- plugins/processors/date/date_test.go | 48 ++++++++++++++++++++++++++++ 4 files changed, 101 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e7ec20ca7..7cee28b02 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,9 @@ renamed to `sqlserver_azure_db_resource_stats` due to an issue where numeric metrics were previously being reported incorrectly as strings. +- The `date` processor now uses the UTC timezone when creating its tag. In + previous versions the local time was used. + #### New Outputs - [warp10](/plugins/outputs/warp10/README.md) - Contributed by @aurrelhebert diff --git a/plugins/processors/date/README.md b/plugins/processors/date/README.md index 1a68119e1..b04964b4a 100644 --- a/plugins/processors/date/README.md +++ b/plugins/processors/date/README.md @@ -19,6 +19,23 @@ A few example usecases include: ## Date format string, must be a representation of the Go "reference time" ## which is "Mon Jan 2 15:04:05 -0700 MST 2006". date_format = "Jan" + + ## Offset duration added to the date string when writing the new tag. + # date_offset = "0s" + + ## Timezone to use when generating the date. This can be set to one of + ## "Local", "UTC", or to a location name in the IANA Time Zone database. + ## example: timezone = "America/Los_Angeles" + # timezone = "UTC" +``` + +#### timezone + +On Windows, only the `Local` and `UTC` zones are available by default. To use +other timezones, set the `ZONEINFO` environment variable to the location of +[`zoneinfo.zip`][zoneinfo]: +``` +set ZONEINFO=C:\zoneinfo.zip ``` ### Example @@ -27,3 +44,5 @@ A few example usecases include: - throughput lower=10i,upper=1000i,mean=500i 1560540094000000000 + throughput,month=Jun lower=10i,upper=1000i,mean=500i 1560540094000000000 ``` + +[zoneinfo]: https://github.com/golang/go/raw/50bd1c4d4eb4fac8ddeb5f063c099daccfb71b26/lib/time/zoneinfo.zip diff --git a/plugins/processors/date/date.go b/plugins/processors/date/date.go index 479106ef2..c8007323f 100644 --- a/plugins/processors/date/date.go +++ b/plugins/processors/date/date.go @@ -1,7 +1,10 @@ package date import ( + "time" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/processors" ) @@ -12,11 +15,25 @@ const sampleConfig = ` ## Date format string, must be a representation of the Go "reference time" ## which is "Mon Jan 2 15:04:05 -0700 MST 2006". date_format = "Jan" + + ## Offset duration added to the date string when writing the new tag. + # date_offset = "0s" + + ## Timezone to use when creating the tag. This can be set to one of + ## "UTC", "Local", or to a location name in the IANA Time Zone database. + ## example: timezone = "America/Los_Angeles" + # timezone = "UTC" ` +const defaultTimezone = "UTC" + type Date struct { - TagKey string `toml:"tag_key"` - DateFormat string `toml:"date_format"` + TagKey string `toml:"tag_key"` + DateFormat string `toml:"date_format"` + DateOffset internal.Duration `toml:"date_offset"` + Timezone string `toml:"timezone"` + + location *time.Location } func (d *Date) SampleConfig() string { @@ -27,9 +44,17 @@ func (d *Date) Description() string { return "Dates measurements, tags, and fields that pass through this filter." } +func (d *Date) Init() error { + var err error + // LoadLocation returns UTC if timezone is the empty string. + d.location, err = time.LoadLocation(d.Timezone) + return err +} + func (d *Date) Apply(in ...telegraf.Metric) []telegraf.Metric { for _, point := range in { - point.AddTag(d.TagKey, point.Time().Format(d.DateFormat)) + tm := point.Time().In(d.location).Add(d.DateOffset.Duration) + point.AddTag(d.TagKey, tm.Format(d.DateFormat)) } return in @@ -37,6 +62,8 @@ func (d *Date) Apply(in ...telegraf.Metric) []telegraf.Metric { func init() { processors.Add("date", func() telegraf.Processor { - return &Date{} + return &Date{ + Timezone: defaultTimezone, + } }) } diff --git a/plugins/processors/date/date_test.go b/plugins/processors/date/date_test.go index 98d88b351..d97cc2a9c 100644 --- a/plugins/processors/date/date_test.go +++ b/plugins/processors/date/date_test.go @@ -5,8 +5,11 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/metric" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func MustMetric(name string, tags map[string]string, fields map[string]interface{}, metricTime time.Time) telegraf.Metric { @@ -25,6 +28,8 @@ func TestMonthTag(t *testing.T) { TagKey: "month", DateFormat: "Jan", } + err := dateFormatMonth.Init() + require.NoError(t, err) currentTime := time.Now() month := currentTime.Format("Jan") @@ -43,6 +48,10 @@ func TestYearTag(t *testing.T) { TagKey: "year", DateFormat: "2006", } + + err := dateFormatYear.Init() + require.NoError(t, err) + currentTime := time.Now() year := currentTime.Format("2006") @@ -61,7 +70,46 @@ func TestOldDateTag(t *testing.T) { DateFormat: "2006", } + err := dateFormatYear.Init() + require.NoError(t, err) + m7 := MustMetric("foo", nil, nil, time.Date(1993, 05, 27, 0, 0, 0, 0, time.UTC)) customDateApply := dateFormatYear.Apply(m7) assert.Equal(t, map[string]string{"year": "1993"}, customDateApply[0].Tags(), "should add tag 'year'") } + +func TestDateOffset(t *testing.T) { + plugin := &Date{ + TagKey: "hour", + DateFormat: "15", + DateOffset: internal.Duration{Duration: 2 * time.Hour}, + } + + err := plugin.Init() + require.NoError(t, err) + + metric := testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": 42.0, + }, + time.Unix(1578603600, 0), + ) + + expected := []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{ + "hour": "23", + }, + map[string]interface{}{ + "time_idle": 42.0, + }, + time.Unix(1578603600, 0), + ), + } + + actual := plugin.Apply(metric) + testutil.RequireMetricsEqual(t, expected, actual) +} From d9113c5fdb2b55ebee861d6e32f71cd865369c57 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 14 Jan 2020 15:17:11 -0800 Subject: [PATCH 1462/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7cee28b02..64b18e752 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -26,6 +26,7 @@ - [#6869](https://github.com/influxdata/telegraf/pull/6869): Calculate DB Name instead of GUID in physical_db_name in the sqlserver input. - [#6733](https://github.com/influxdata/telegraf/pull/6733): Add latency stats to mongo input. - [#6844](https://github.com/influxdata/telegraf/pull/6844): Add source and port tags to jenkins_job metrics. +- [#6886](https://github.com/influxdata/telegraf/pull/6886): Add date offset and timezone options to date processor. #### Bugfixes From 0693748c35ba33dee3a7154c355de5477a16ac14 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 14 Jan 2020 16:24:14 -0800 Subject: [PATCH 1463/1815] Show default port in consul sample config --- plugins/inputs/consul/README.md | 2 +- plugins/inputs/consul/consul.go | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/plugins/inputs/consul/README.md b/plugins/inputs/consul/README.md index 2b2368388..72bdeb231 100644 --- a/plugins/inputs/consul/README.md +++ b/plugins/inputs/consul/README.md @@ -12,7 +12,7 @@ report those stats already using StatsD protocol if needed. # Gather health check statuses from services registered in Consul [[inputs.consul]] ## Consul server address - # address = "localhost" + # address = "localhost:8500" ## URI scheme for the Consul server, one of "http", "https" # scheme = "http" diff --git a/plugins/inputs/consul/consul.go b/plugins/inputs/consul/consul.go index 4b5ee4b1c..964eb9394 100644 --- a/plugins/inputs/consul/consul.go +++ b/plugins/inputs/consul/consul.go @@ -5,7 +5,6 @@ import ( "strings" "github.com/hashicorp/consul/api" - "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal/tls" "github.com/influxdata/telegraf/plugins/inputs" @@ -28,7 +27,7 @@ type Consul struct { var sampleConfig = ` ## Consul server address - # address = "localhost" + # address = "localhost:8500" ## URI scheme for the Consul server, one of "http", "https" # scheme = "http" From 8d647c4ebf1f62efc96fa62ee1774981c4a50585 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 14 Jan 2020 16:26:44 -0800 Subject: [PATCH 1464/1815] Fix URL agent address form with udp in snmp input (#6899) --- plugins/inputs/snmp/snmp.go | 31 +++++++++++++++++++++---------- plugins/inputs/snmp/snmp_test.go | 9 ++++++++- 2 files changed, 29 insertions(+), 11 deletions(-) diff --git a/plugins/inputs/snmp/snmp.go b/plugins/inputs/snmp/snmp.go index 75c9b7836..2fc56ff97 100644 --- a/plugins/inputs/snmp/snmp.go +++ b/plugins/inputs/snmp/snmp.go @@ -7,6 +7,7 @@ import ( "log" "math" "net" + "net/url" "os/exec" "strconv" "strings" @@ -609,20 +610,30 @@ func (s *Snmp) getConnection(idx int) (snmpConnection, error) { gs := gosnmpWrapper{&gosnmp.GoSNMP{}} s.connectionCache[idx] = gs - if strings.HasPrefix(agent, "tcp://") { - agent = strings.TrimPrefix(agent, "tcp://") - gs.Transport = "tcp" + if !strings.Contains(agent, "://") { + agent = "udp://" + agent } - host, portStr, err := net.SplitHostPort(agent) + + u, err := url.Parse(agent) if err != nil { - if err, ok := err.(*net.AddrError); !ok || err.Err != "missing port in address" { - return nil, Errorf(err, "parsing host") - } - host = agent + return nil, err + } + + switch u.Scheme { + case "tcp": + gs.Transport = "tcp" + case "", "udp": + gs.Transport = "udp" + default: + return nil, fmt.Errorf("unsupported scheme: %v", u.Scheme) + } + + gs.Target = u.Hostname() + + portStr := u.Port() + if portStr == "" { portStr = "161" } - gs.Target = host - port, err := strconv.ParseUint(portStr, 10, 16) if err != nil { return nil, Errorf(err, "parsing port") diff --git a/plugins/inputs/snmp/snmp_test.go b/plugins/inputs/snmp/snmp_test.go index 3e174e224..25382bd7d 100644 --- a/plugins/inputs/snmp/snmp_test.go +++ b/plugins/inputs/snmp/snmp_test.go @@ -232,7 +232,7 @@ func TestSnmpInit_noTranslate(t *testing.T) { func TestGetSNMPConnection_v2(t *testing.T) { s := &Snmp{ - Agents: []string{"1.2.3.4:567", "1.2.3.4"}, + Agents: []string{"1.2.3.4:567", "1.2.3.4", "udp://127.0.0.1"}, Timeout: internal.Duration{Duration: 3 * time.Second}, Retries: 4, Version: 2, @@ -256,6 +256,13 @@ func TestGetSNMPConnection_v2(t *testing.T) { assert.Equal(t, "1.2.3.4", gs.Target) assert.EqualValues(t, 161, gs.Port) assert.Equal(t, "udp", gs.Transport) + + gsc, err = s.getConnection(2) + require.NoError(t, err) + gs = gsc.(gosnmpWrapper) + assert.Equal(t, "127.0.0.1", gs.Target) + assert.EqualValues(t, 161, gs.Port) + assert.Equal(t, "udp", gs.Transport) } func TestGetSNMPConnectionTCP(t *testing.T) { From fdf871f679c5a7221054c3870d45c54fd6f0e261 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 14 Jan 2020 16:27:57 -0800 Subject: [PATCH 1465/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 64b18e752..08794c744 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -40,6 +40,7 @@ - [#2652](https://github.com/influxdata/telegraf/issues/2652): Warn without error when processes input is started on Windows. - [#6890](https://github.com/influxdata/telegraf/issues/6890): Only parse certificate blocks in x509_cert input. - [#6883](https://github.com/influxdata/telegraf/issues/6883): Add custom attributes for all resource types in vsphere input. +- [#6899](https://github.com/influxdata/telegraf/pull/6899): Fix URL agent address form with udp in snmp input. ## v1.13.1 [2020-01-08] From fc5701262947ee613cd9565b2971a8571a8e52d5 Mon Sep 17 00:00:00 2001 From: Pontus Rydin Date: Tue, 14 Jan 2020 19:30:43 -0500 Subject: [PATCH 1466/1815] Deprecated force_discovery_on_init flag in vsphere input (#6861) --- plugins/inputs/vsphere/endpoint.go | 16 ++-------------- plugins/inputs/vsphere/vsphere.go | 12 ++++++------ 2 files changed, 8 insertions(+), 20 deletions(-) diff --git a/plugins/inputs/vsphere/endpoint.go b/plugins/inputs/vsphere/endpoint.go index 98b308dfc..fa1950193 100644 --- a/plugins/inputs/vsphere/endpoint.go +++ b/plugins/inputs/vsphere/endpoint.go @@ -286,20 +286,8 @@ func (e *Endpoint) init(ctx context.Context) error { } if e.Parent.ObjectDiscoveryInterval.Duration > 0 { - - // Run an initial discovery. If force_discovery_on_init isn't set, we kick it off as a - // goroutine without waiting for it. This will probably cause us to report an empty - // dataset on the first collection, but it solves the issue of the first collection timing out. - if e.Parent.ForceDiscoverOnInit { - e.Parent.Log.Debug("Running initial discovery and waiting for it to finish") - e.initalDiscovery(ctx) - } else { - // Otherwise, just run it in the background. We'll probably have an incomplete first metric - // collection this way. - go func() { - e.initalDiscovery(ctx) - }() - } + e.Parent.Log.Debug("Running initial discovery") + e.initalDiscovery(ctx) } e.initialized = true return nil diff --git a/plugins/inputs/vsphere/vsphere.go b/plugins/inputs/vsphere/vsphere.go index 176d55010..e4f572153 100644 --- a/plugins/inputs/vsphere/vsphere.go +++ b/plugins/inputs/vsphere/vsphere.go @@ -195,11 +195,6 @@ var sampleConfig = ` # collect_concurrency = 1 # discover_concurrency = 1 - ## whether or not to force discovery of new objects on initial gather call before collecting metrics - ## when true for large environments this may cause errors for time elapsed while collecting metrics - ## when false (default) the first collection cycle may result in no or limited metrics while objects are discovered - # force_discover_on_init = false - ## the interval before (re)discovering objects subject to metrics collection (default: 300s) # object_discovery_interval = "300s" @@ -248,6 +243,11 @@ func (v *VSphere) Start(acc telegraf.Accumulator) error { ctx, cancel := context.WithCancel(context.Background()) v.cancel = cancel + // Check for deprecated settings + if !v.ForceDiscoverOnInit { + v.Log.Warn("The 'force_discover_on_init' configuration parameter has been deprecated. Setting it to 'false' has no effect") + } + // Create endpoints, one for each vCenter we're monitoring v.endpoints = make([]*Endpoint, len(v.Vcenters)) for i, rawURL := range v.Vcenters { @@ -344,7 +344,7 @@ func init() { MaxQueryMetrics: 256, CollectConcurrency: 1, DiscoverConcurrency: 1, - ForceDiscoverOnInit: false, + ForceDiscoverOnInit: true, ObjectDiscoveryInterval: internal.Duration{Duration: time.Second * 300}, Timeout: internal.Duration{Duration: time.Second * 60}, } From 1b4aad2ccdc226cb13bca474a1790eae7c9dd6c8 Mon Sep 17 00:00:00 2001 From: ryan-peck <57376496+ryan-peck@users.noreply.github.com> Date: Tue, 14 Jan 2020 17:05:28 -0800 Subject: [PATCH 1467/1815] Change logic to allow recording of device fields when attributes is false (#6638) --- plugins/inputs/smart/smart.go | 33 ++++++++++++++++++--------------- 1 file changed, 18 insertions(+), 15 deletions(-) diff --git a/plugins/inputs/smart/smart.go b/plugins/inputs/smart/smart.go index c80e86859..6c83e9890 100644 --- a/plugins/inputs/smart/smart.go +++ b/plugins/inputs/smart/smart.go @@ -319,6 +319,7 @@ func gatherDisk(acc telegraf.Accumulator, timeout internal.Duration, usesudo, co attr := attribute.FindStringSubmatch(line) if len(attr) > 1 { + // attribute has been found, add it only if collectAttributes is true if collectAttributes { tags["id"] = attr[1] tags["name"] = attr[2] @@ -351,23 +352,25 @@ func gatherDisk(acc telegraf.Accumulator, timeout internal.Duration, usesudo, co } } } else { - if collectAttributes { - if matches := sasNvmeAttr.FindStringSubmatch(line); len(matches) > 2 { - if attr, ok := sasNvmeAttributes[matches[1]]; ok { - tags["name"] = attr.Name - if attr.ID != "" { - tags["id"] = attr.ID - } + // what was found is not a vendor attribute + if matches := sasNvmeAttr.FindStringSubmatch(line); len(matches) > 2 { + if attr, ok := sasNvmeAttributes[matches[1]]; ok { + tags["name"] = attr.Name + if attr.ID != "" { + tags["id"] = attr.ID + } - parse := parseCommaSeperatedInt - if attr.Parse != nil { - parse = attr.Parse - } - - if err := parse(fields, deviceFields, matches[2]); err != nil { - continue - } + parse := parseCommaSeperatedInt + if attr.Parse != nil { + parse = attr.Parse + } + if err := parse(fields, deviceFields, matches[2]); err != nil { + continue + } + // if the field is classified as an attribute, only add it + // if collectAttributes is true + if collectAttributes { acc.AddFields("smart_attribute", fields, tags) } } From 6d96f359b46942d1060933ee5c12057b7b1eed8f Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 14 Jan 2020 17:06:34 -0800 Subject: [PATCH 1468/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 08794c744..98da1fcf7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -41,6 +41,7 @@ - [#6890](https://github.com/influxdata/telegraf/issues/6890): Only parse certificate blocks in x509_cert input. - [#6883](https://github.com/influxdata/telegraf/issues/6883): Add custom attributes for all resource types in vsphere input. - [#6899](https://github.com/influxdata/telegraf/pull/6899): Fix URL agent address form with udp in snmp input. +- [#6619](https://github.com/influxdata/telegraf/issues/6619): Change logic to allow recording of device fields when attributes is false. ## v1.13.1 [2020-01-08] From 68925ed1efedf33c7231f434852d202611cb1308 Mon Sep 17 00:00:00 2001 From: like-inspur Date: Wed, 15 Jan 2020 10:35:48 +0800 Subject: [PATCH 1469/1815] Change description for config dns_lookup (#6902) --- plugins/inputs/ntpq/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/inputs/ntpq/README.md b/plugins/inputs/ntpq/README.md index f6ee8e2af..e691200dd 100644 --- a/plugins/inputs/ntpq/README.md +++ b/plugins/inputs/ntpq/README.md @@ -29,7 +29,7 @@ server (RMS of difference of multiple time samples, milliseconds); ```toml # Get standard NTP query metrics, requires ntpq executable [[inputs.ntpq]] - ## If false, set the -n ntpq flag. Can reduce metric gather times. + ## If false, add -n for ntpq command. Can reduce metric gather times. dns_lookup = true ``` From f6b302621eb06afd456085ed1bd368d46482e059 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 15 Jan 2020 15:26:50 -0800 Subject: [PATCH 1470/1815] Do not add invalid timestamps to kafka messages (#6908) --- plugins/outputs/kafka/kafka.go | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/plugins/outputs/kafka/kafka.go b/plugins/outputs/kafka/kafka.go index b4e71ef57..18a8925a5 100644 --- a/plugins/outputs/kafka/kafka.go +++ b/plugins/outputs/kafka/kafka.go @@ -5,6 +5,7 @@ import ( "fmt" "log" "strings" + "time" "github.com/Shopify/sarama" "github.com/gofrs/uuid" @@ -21,6 +22,8 @@ var ValidTopicSuffixMethods = []string{ "tags", } +var zeroTime = time.Unix(0, 0) + type ( Kafka struct { Brokers []string @@ -344,9 +347,13 @@ func (k *Kafka) Write(metrics []telegraf.Metric) error { } m := &sarama.ProducerMessage{ - Topic: k.GetTopicName(metric), - Value: sarama.ByteEncoder(buf), - Timestamp: metric.Time(), + Topic: k.GetTopicName(metric), + Value: sarama.ByteEncoder(buf), + } + + // Negative timestamps are not allowed by the Kafka protocol. + if !metric.Time().Before(zeroTime) { + m.Timestamp = metric.Time() } key, err := k.routingKey(metric) From ca476066137c1d6965a8d353be8df562bdd98d1b Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 15 Jan 2020 15:28:10 -0800 Subject: [PATCH 1471/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 98da1fcf7..70360c845 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -42,6 +42,7 @@ - [#6883](https://github.com/influxdata/telegraf/issues/6883): Add custom attributes for all resource types in vsphere input. - [#6899](https://github.com/influxdata/telegraf/pull/6899): Fix URL agent address form with udp in snmp input. - [#6619](https://github.com/influxdata/telegraf/issues/6619): Change logic to allow recording of device fields when attributes is false. +- [#6903](https://github.com/influxdata/telegraf/issues/6903): Do not add invalid timestamps to kafka messages. ## v1.13.1 [2020-01-08] From d7b3f1f4ea565ff2d5a80725c622ea542f04432c Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 15 Jan 2020 19:29:50 -0800 Subject: [PATCH 1472/1815] Document workaround for truncated powershell output (#6910) --- plugins/inputs/exec/README.md | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/plugins/inputs/exec/README.md b/plugins/inputs/exec/README.md index f4e917242..a8544e1d1 100644 --- a/plugins/inputs/exec/README.md +++ b/plugins/inputs/exec/README.md @@ -50,8 +50,16 @@ It can be paired with the following configuration and will be run at the `interv ### Common Issues: -#### Q: My script works when I run it by hand, but not when Telegraf is running as a service. +#### My script works when I run it by hand, but not when Telegraf is running as a service. This may be related to the Telegraf service running as a different user. The official packages run Telegraf as the `telegraf` user and group on Linux systems. + +#### With a PowerShell on Windows, the output of the script appears to be truncated. + +You may need to set a variable in your script to increase the numer of columns +available for output: +``` +$host.UI.RawUI.BufferSize = new-object System.Management.Automation.Host.Size(1024,50) +``` From c7b7336da396514ac84d67c5ffed4dd39f369511 Mon Sep 17 00:00:00 2001 From: Pontus Rydin Date: Thu, 16 Jan 2020 15:14:00 -0500 Subject: [PATCH 1473/1815] Exclude resources by inventory path in vsphere input (#6859) --- plugins/inputs/vsphere/README.md | 5 + plugins/inputs/vsphere/endpoint.go | 225 ++++++++++++++----------- plugins/inputs/vsphere/finder.go | 41 ++++- plugins/inputs/vsphere/vsphere.go | 5 + plugins/inputs/vsphere/vsphere_test.go | 51 +++++- 5 files changed, 217 insertions(+), 110 deletions(-) diff --git a/plugins/inputs/vsphere/README.md b/plugins/inputs/vsphere/README.md index 4009c8cde..f69bd2862 100644 --- a/plugins/inputs/vsphere/README.md +++ b/plugins/inputs/vsphere/README.md @@ -31,6 +31,7 @@ vm_metric_exclude = [ "*" ] ## VMs ## Typical VM metrics (if omitted or empty, all metrics are collected) # vm_include = [ "/*/vm/**"] # Inventory path to VMs to collect (by default all are collected) + # vm_exclude = [] # Inventory paths to exclude vm_metric_include = [ "cpu.demand.average", "cpu.idle.summation", @@ -73,6 +74,7 @@ vm_metric_exclude = [ "*" ] ## Hosts ## Typical host metrics (if omitted or empty, all metrics are collected) # host_include = [ "/*/host/**"] # Inventory path to hosts to collect (by default all are collected) + # host_exclude [] # Inventory paths to exclude host_metric_include = [ "cpu.coreUtilization.average", "cpu.costop.summation", @@ -130,18 +132,21 @@ vm_metric_exclude = [ "*" ] ## Clusters # cluster_include = [ "/*/host/**"] # Inventory path to clusters to collect (by default all are collected) + # cluster_exclude = [] # Inventory paths to exclude # cluster_metric_include = [] ## if omitted or empty, all metrics are collected # cluster_metric_exclude = [] ## Nothing excluded by default # cluster_instances = false ## false by default ## Datastores # cluster_include = [ "/*/datastore/**"] # Inventory path to datastores to collect (by default all are collected) + # cluster_exclude = [] # Inventory paths to exclude # datastore_metric_include = [] ## if omitted or empty, all metrics are collected # datastore_metric_exclude = [] ## Nothing excluded by default # datastore_instances = false ## false by default ## Datacenters # datacenter_include = [ "/*/host/**"] # Inventory path to clusters to collect (by default all are collected) + # datacenter_exclude = [] # Inventory paths to exclude datacenter_metric_include = [] ## if omitted or empty, all metrics are collected datacenter_metric_exclude = [ "*" ] ## Datacenters are not collected by default. # datacenter_instances = false ## false by default diff --git a/plugins/inputs/vsphere/endpoint.go b/plugins/inputs/vsphere/endpoint.go index fa1950193..c049e495f 100644 --- a/plugins/inputs/vsphere/endpoint.go +++ b/plugins/inputs/vsphere/endpoint.go @@ -32,8 +32,6 @@ var isIPv6 = regexp.MustCompile("^(?:[A-Fa-f0-9]{0,4}:){1,7}[A-Fa-f0-9]{1,4}$") const metricLookback = 3 // Number of time periods to look back at for non-realtime metrics -const rtMetricLookback = 3 // Number of time periods to look back at for realtime metrics - const maxSampleConst = 10 // Absolute maximim number of samples regardless of period const maxMetadataSamples = 100 // Number of resources to sample for metric metadata @@ -67,6 +65,7 @@ type resourceKind struct { objects objectMap filters filter.Filter paths []string + excludePaths []string collectInstances bool getObjects func(context.Context, *Endpoint, *ResourceFilter) (objectMap, error) include []string @@ -132,6 +131,7 @@ func NewEndpoint(ctx context.Context, parent *VSphere, url *url.URL) (*Endpoint, objects: make(objectMap), filters: newFilterOrPanic(parent.DatacenterMetricInclude, parent.DatacenterMetricExclude), paths: parent.DatacenterInclude, + excludePaths: parent.DatacenterExclude, simple: isSimple(parent.DatacenterMetricInclude, parent.DatacenterMetricExclude), include: parent.DatacenterMetricInclude, collectInstances: parent.DatacenterInstances, @@ -149,6 +149,7 @@ func NewEndpoint(ctx context.Context, parent *VSphere, url *url.URL) (*Endpoint, objects: make(objectMap), filters: newFilterOrPanic(parent.ClusterMetricInclude, parent.ClusterMetricExclude), paths: parent.ClusterInclude, + excludePaths: parent.ClusterExclude, simple: isSimple(parent.ClusterMetricInclude, parent.ClusterMetricExclude), include: parent.ClusterMetricInclude, collectInstances: parent.ClusterInstances, @@ -166,6 +167,7 @@ func NewEndpoint(ctx context.Context, parent *VSphere, url *url.URL) (*Endpoint, objects: make(objectMap), filters: newFilterOrPanic(parent.HostMetricInclude, parent.HostMetricExclude), paths: parent.HostInclude, + excludePaths: parent.HostExclude, simple: isSimple(parent.HostMetricInclude, parent.HostMetricExclude), include: parent.HostMetricInclude, collectInstances: parent.HostInstances, @@ -183,6 +185,7 @@ func NewEndpoint(ctx context.Context, parent *VSphere, url *url.URL) (*Endpoint, objects: make(objectMap), filters: newFilterOrPanic(parent.VMMetricInclude, parent.VMMetricExclude), paths: parent.VMInclude, + excludePaths: parent.VMExclude, simple: isSimple(parent.VMMetricInclude, parent.VMMetricExclude), include: parent.VMMetricInclude, collectInstances: parent.VMInstances, @@ -199,6 +202,7 @@ func NewEndpoint(ctx context.Context, parent *VSphere, url *url.URL) (*Endpoint, objects: make(objectMap), filters: newFilterOrPanic(parent.DatastoreMetricInclude, parent.DatastoreMetricExclude), paths: parent.DatastoreInclude, + excludePaths: parent.DatastoreExclude, simple: isSimple(parent.DatastoreMetricInclude, parent.DatastoreMetricExclude), include: parent.DatastoreMetricInclude, collectInstances: parent.DatastoreInstances, @@ -329,32 +333,36 @@ func (e *Endpoint) getDatacenterName(ctx context.Context, client *Client, cache path := make([]string, 0) returnVal := "" here := r - for { - if name, ok := cache[here.Reference().String()]; ok { - // Populate cache for the entire chain of objects leading here. - returnVal = name - break - } - path = append(path, here.Reference().String()) - o := object.NewCommon(client.Client.Client, r) - var result mo.ManagedEntity - ctx1, cancel1 := context.WithTimeout(ctx, e.Parent.Timeout.Duration) - defer cancel1() - err := o.Properties(ctx1, here, []string{"parent", "name"}, &result) - if err != nil { - e.Parent.Log.Warnf("Error while resolving parent. Assuming no parent exists. Error: %s", err.Error()) - break - } - if result.Reference().Type == "Datacenter" { - // Populate cache for the entire chain of objects leading here. - returnVal = result.Name - break - } - if result.Parent == nil { - e.Parent.Log.Debugf("No parent found for %s (ascending from %s)", here.Reference(), r.Reference()) - break - } - here = result.Parent.Reference() + done := false + for !done { + done = func() bool { + if name, ok := cache[here.Reference().String()]; ok { + // Populate cache for the entire chain of objects leading here. + returnVal = name + return true + } + path = append(path, here.Reference().String()) + o := object.NewCommon(client.Client.Client, r) + var result mo.ManagedEntity + ctx1, cancel1 := context.WithTimeout(ctx, e.Parent.Timeout.Duration) + defer cancel1() + err := o.Properties(ctx1, here, []string{"parent", "name"}, &result) + if err != nil { + e.Parent.Log.Warnf("Error while resolving parent. Assuming no parent exists. Error: %s", err.Error()) + return true + } + if result.Reference().Type == "Datacenter" { + // Populate cache for the entire chain of objects leading here. + returnVal = result.Name + return true + } + if result.Parent == nil { + e.Parent.Log.Debugf("No parent found for %s (ascending from %s)", here.Reference(), r.Reference()) + return true + } + here = result.Parent.Reference() + return false + }() } for _, s := range path { cache[s] = returnVal @@ -389,43 +397,51 @@ func (e *Endpoint) discover(ctx context.Context) error { // Populate resource objects, and endpoint instance info. newObjects := make(map[string]objectMap) for k, res := range e.resourceKinds { - e.Parent.Log.Debugf("Discovering resources for %s", res.name) - // Need to do this for all resource types even if they are not enabled - if res.enabled || k != "vm" { - rf := ResourceFilter{ - finder: &Finder{client}, - resType: res.vcName, - paths: res.paths} + err := func() error { + e.Parent.Log.Debugf("Discovering resources for %s", res.name) + // Need to do this for all resource types even if they are not enabled + if res.enabled || k != "vm" { + rf := ResourceFilter{ + finder: &Finder{client}, + resType: res.vcName, + paths: res.paths, + excludePaths: res.excludePaths, + } - ctx1, cancel1 := context.WithTimeout(ctx, e.Parent.Timeout.Duration) - defer cancel1() - objects, err := res.getObjects(ctx1, e, &rf) - if err != nil { - return err - } + ctx1, cancel1 := context.WithTimeout(ctx, e.Parent.Timeout.Duration) + defer cancel1() + objects, err := res.getObjects(ctx1, e, &rf) + if err != nil { + return err + } - // Fill in datacenter names where available (no need to do it for Datacenters) - if res.name != "Datacenter" { - for k, obj := range objects { - if obj.parentRef != nil { - obj.dcname = e.getDatacenterName(ctx, client, dcNameCache, *obj.parentRef) - objects[k] = obj + // Fill in datacenter names where available (no need to do it for Datacenters) + if res.name != "Datacenter" { + for k, obj := range objects { + if obj.parentRef != nil { + obj.dcname = e.getDatacenterName(ctx, client, dcNameCache, *obj.parentRef) + objects[k] = obj + } } } - } - // No need to collect metric metadata if resource type is not enabled - if res.enabled { - if res.simple { - e.simpleMetadataSelect(ctx, client, res) - } else { - e.complexMetadataSelect(ctx, res, objects, metricNames) + // No need to collect metric metadata if resource type is not enabled + if res.enabled { + if res.simple { + e.simpleMetadataSelect(ctx, client, res) + } else { + e.complexMetadataSelect(ctx, res, objects, metricNames) + } } - } - newObjects[k] = objects + newObjects[k] = objects - SendInternalCounterWithTags("discovered_objects", e.URL.Host, map[string]string{"type": res.name}, int64(len(objects))) - numRes += int64(len(objects)) + SendInternalCounterWithTags("discovered_objects", e.URL.Host, map[string]string{"type": res.name}, int64(len(objects))) + numRes += int64(len(objects)) + } + return nil + }() + if err != nil { + return err } } @@ -433,8 +449,8 @@ func (e *Endpoint) discover(ctx context.Context) error { dss := newObjects["datastore"] l2d := make(map[string]string) for _, ds := range dss { - url := ds.altID - m := isolateLUN.FindStringSubmatch(url) + lunId := ds.altID + m := isolateLUN.FindStringSubmatch(lunId) if m != nil { l2d[m[1]] = ds.name } @@ -583,39 +599,47 @@ func getClusters(ctx context.Context, e *Endpoint, filter *ResourceFilter) (obje cache := make(map[string]*types.ManagedObjectReference) m := make(objectMap, len(resources)) for _, r := range resources { - // We're not interested in the immediate parent (a folder), but the data center. - p, ok := cache[r.Parent.Value] - if !ok { - ctx2, cancel2 := context.WithTimeout(ctx, e.Parent.Timeout.Duration) - defer cancel2() - client, err := e.clientFactory.GetClient(ctx2) - if err != nil { - return nil, err + // Wrap in a function to make defer work correctly. + err := func() error { + // We're not interested in the immediate parent (a folder), but the data center. + p, ok := cache[r.Parent.Value] + if !ok { + ctx2, cancel2 := context.WithTimeout(ctx, e.Parent.Timeout.Duration) + defer cancel2() + client, err := e.clientFactory.GetClient(ctx2) + if err != nil { + return err + } + o := object.NewFolder(client.Client.Client, *r.Parent) + var folder mo.Folder + ctx3, cancel3 := context.WithTimeout(ctx, e.Parent.Timeout.Duration) + defer cancel3() + err = o.Properties(ctx3, *r.Parent, []string{"parent"}, &folder) + if err != nil { + e.Parent.Log.Warnf("Error while getting folder parent: %s", err.Error()) + p = nil + } else { + pp := folder.Parent.Reference() + p = &pp + cache[r.Parent.Value] = p + } } - o := object.NewFolder(client.Client.Client, *r.Parent) - var folder mo.Folder - ctx3, cancel3 := context.WithTimeout(ctx, e.Parent.Timeout.Duration) - defer cancel3() - err = o.Properties(ctx3, *r.Parent, []string{"parent"}, &folder) - if err != nil { - e.Parent.Log.Warnf("Error while getting folder parent: %s", err.Error()) - p = nil - } else { - pp := folder.Parent.Reference() - p = &pp - cache[r.Parent.Value] = p + m[r.ExtensibleManagedObject.Reference().Value] = &objectRef{ + name: r.Name, + ref: r.ExtensibleManagedObject.Reference(), + parentRef: p, + customValues: e.loadCustomAttributes(&r.ManagedEntity), } - } - m[r.ExtensibleManagedObject.Reference().Value] = &objectRef{ - name: r.Name, - ref: r.ExtensibleManagedObject.Reference(), - parentRef: p, - customValues: e.loadCustomAttributes(&r.ManagedEntity), + return nil + }() + if err != nil { + return nil, err } } return m, nil } +//noinspection GoUnusedParameter func getHosts(ctx context.Context, e *Endpoint, filter *ResourceFilter) (objectMap, error) { var resources []mo.HostSystem err := filter.FindAll(ctx, &resources) @@ -717,18 +741,18 @@ func getDatastores(ctx context.Context, e *Endpoint, filter *ResourceFilter) (ob } m := make(objectMap) for _, r := range resources { - url := "" + lunId := "" if r.Info != nil { info := r.Info.GetDatastoreInfo() if info != nil { - url = info.Url + lunId = info.Url } } m[r.ExtensibleManagedObject.Reference().Value] = &objectRef{ name: r.Name, ref: r.ExtensibleManagedObject.Reference(), parentRef: r.Parent, - altID: url, + altID: lunId, customValues: e.loadCustomAttributes(&r.ManagedEntity), } } @@ -814,7 +838,7 @@ func submitChunkJob(ctx context.Context, te *ThrottledExecutor, job func([]types }) } -func (e *Endpoint) chunkify(ctx context.Context, res *resourceKind, now time.Time, latest time.Time, acc telegraf.Accumulator, job func([]types.PerfQuerySpec)) { +func (e *Endpoint) chunkify(ctx context.Context, res *resourceKind, now time.Time, latest time.Time, job func([]types.PerfQuerySpec)) { te := NewThrottledExecutor(e.Parent.CollectConcurrency) maxMetrics := e.Parent.MaxQueryMetrics if maxMetrics < 1 { @@ -831,7 +855,7 @@ func (e *Endpoint) chunkify(ctx context.Context, res *resourceKind, now time.Tim metrics := 0 total := 0 nRes := 0 - for _, object := range res.objects { + for _, resource := range res.objects { mr := len(res.metrics) for mr > 0 { mc := mr @@ -841,14 +865,14 @@ func (e *Endpoint) chunkify(ctx context.Context, res *resourceKind, now time.Tim } fm := len(res.metrics) - mr pq := types.PerfQuerySpec{ - Entity: object.ref, + Entity: resource.ref, MaxSample: maxSampleConst, MetricId: res.metrics[fm : fm+mc], IntervalId: res.sampling, Format: "normal", } - start, ok := e.hwMarks.Get(object.ref.Value) + start, ok := e.hwMarks.Get(resource.ref.Value) if !ok { // Look back 3 sampling periods by default start = latest.Add(time.Duration(-res.sampling) * time.Second * (metricLookback - 1)) @@ -917,7 +941,7 @@ func (e *Endpoint) collectResource(ctx context.Context, resourceType string, acc // Estimate the interval at which we're invoked. Use local time (not server time) // since this is about how we got invoked locally. localNow := time.Now() - estInterval := time.Duration(time.Minute) + estInterval := time.Minute if !res.lastColl.IsZero() { s := time.Duration(res.sampling) * time.Second rawInterval := localNow.Sub(res.lastColl) @@ -957,13 +981,14 @@ func (e *Endpoint) collectResource(ctx context.Context, resourceType string, acc latestSample := time.Time{} // Divide workload into chunks and process them concurrently - e.chunkify(ctx, res, now, latest, acc, + e.chunkify(ctx, res, now, latest, func(chunk []types.PerfQuerySpec) { - n, localLatest, err := e.collectChunk(ctx, chunk, res, acc, now, estInterval) - e.Parent.Log.Debugf("CollectChunk for %s returned %d metrics", resourceType, n) + n, localLatest, err := e.collectChunk(ctx, chunk, res, acc, estInterval) if err != nil { acc.AddError(errors.New("while collecting " + res.name + ": " + err.Error())) + return } + e.Parent.Log.Debugf("CollectChunk for %s returned %d metrics", resourceType, n) atomic.AddInt64(&count, int64(n)) tsMux.Lock() defer tsMux.Unlock() @@ -1004,7 +1029,7 @@ func alignSamples(info []types.PerfSampleInfo, values []int64, interval time.Dur if roundedTs == lastBucket { bi++ p := len(rValues) - 1 - rValues[p] = ((bi-1)/bi)*float64(rValues[p]) + v/bi + rValues[p] = ((bi-1)/bi)*rValues[p] + v/bi } else { rValues = append(rValues, v) roundedInfo := types.PerfSampleInfo{ @@ -1019,7 +1044,7 @@ func alignSamples(info []types.PerfSampleInfo, values []int64, interval time.Dur return rInfo, rValues } -func (e *Endpoint) collectChunk(ctx context.Context, pqs []types.PerfQuerySpec, res *resourceKind, acc telegraf.Accumulator, now time.Time, interval time.Duration) (int, time.Time, error) { +func (e *Endpoint) collectChunk(ctx context.Context, pqs []types.PerfQuerySpec, res *resourceKind, acc telegraf.Accumulator, interval time.Duration) (int, time.Time, error) { e.Parent.Log.Debugf("Query for %s has %d QuerySpecs", res.name, len(pqs)) latestSample := time.Time{} count := 0 @@ -1100,7 +1125,7 @@ func (e *Endpoint) collectChunk(ctx context.Context, pqs []types.PerfQuerySpec, } v := alignedValues[idx] if info.UnitInfo.GetElementDescription().Key == "percent" { - bucket.fields[fn] = float64(v) / 100.0 + bucket.fields[fn] = v / 100.0 } else { if e.Parent.UseIntSamples { bucket.fields[fn] = int64(round(v)) diff --git a/plugins/inputs/vsphere/finder.go b/plugins/inputs/vsphere/finder.go index 33ce90f5b..e49bf80f3 100644 --- a/plugins/inputs/vsphere/finder.go +++ b/plugins/inputs/vsphere/finder.go @@ -25,34 +25,54 @@ type Finder struct { // ResourceFilter is a convenience class holding a finder and a set of paths. It is useful when you need a // self contained object capable of returning a certain set of resources. type ResourceFilter struct { - finder *Finder - resType string - paths []string + finder *Finder + resType string + paths []string + excludePaths []string } // FindAll returns the union of resources found given the supplied resource type and paths. -func (f *Finder) FindAll(ctx context.Context, resType string, paths []string, dst interface{}) error { +func (f *Finder) FindAll(ctx context.Context, resType string, paths, excludePaths []string, dst interface{}) error { + objs := make(map[string]types.ObjectContent) for _, p := range paths { - if err := f.Find(ctx, resType, p, dst); err != nil { + if err := f.find(ctx, resType, p, objs); err != nil { return err } } - return nil + if len(excludePaths) > 0 { + excludes := make(map[string]types.ObjectContent) + for _, p := range excludePaths { + if err := f.find(ctx, resType, p, excludes); err != nil { + return err + } + } + for k := range excludes { + delete(objs, k) + } + } + return objectContentToTypedArray(objs, dst) } // Find returns the resources matching the specified path. func (f *Finder) Find(ctx context.Context, resType, path string, dst interface{}) error { + objs := make(map[string]types.ObjectContent) + err := f.find(ctx, resType, path, objs) + if err != nil { + return err + } + return objectContentToTypedArray(objs, dst) +} + +func (f *Finder) find(ctx context.Context, resType, path string, objs map[string]types.ObjectContent) error { p := strings.Split(path, "/") flt := make([]property.Filter, len(p)-1) for i := 1; i < len(p); i++ { flt[i-1] = property.Filter{"name": p[i]} } - objs := make(map[string]types.ObjectContent) err := f.descend(ctx, f.client.Client.ServiceContent.RootFolder, resType, flt, 0, objs) if err != nil { return err } - objectContentToTypedArray(objs, dst) f.client.log.Debugf("Find(%s, %s) returned %d objects", resType, path, len(objs)) return nil } @@ -94,6 +114,9 @@ func (f *Finder) descend(ctx context.Context, root types.ManagedObjectReference, // Special case: The last token is a recursive wildcard, so we can grab everything // recursively in a single call. v2, err := m.CreateContainerView(ctx, root, []string{resType}, true) + if err != nil { + return err + } defer v2.Destroy(ctx) err = v2.Retrieve(ctx, []string{resType}, fields, &content) if err != nil { @@ -204,7 +227,7 @@ func objectContentToTypedArray(objs map[string]types.ObjectContent, dst interfac // FindAll finds all resources matching the paths that were specified upon creation of // the ResourceFilter. func (r *ResourceFilter) FindAll(ctx context.Context, dst interface{}) error { - return r.finder.FindAll(ctx, r.resType, r.paths, dst) + return r.finder.FindAll(ctx, r.resType, r.paths, r.excludePaths, dst) } func matchName(f property.Filter, props []types.DynamicProperty) bool { diff --git a/plugins/inputs/vsphere/vsphere.go b/plugins/inputs/vsphere/vsphere.go index e4f572153..bc4042980 100644 --- a/plugins/inputs/vsphere/vsphere.go +++ b/plugins/inputs/vsphere/vsphere.go @@ -22,22 +22,27 @@ type VSphere struct { DatacenterMetricInclude []string DatacenterMetricExclude []string DatacenterInclude []string + DatacenterExclude []string ClusterInstances bool ClusterMetricInclude []string ClusterMetricExclude []string ClusterInclude []string + ClusterExclude []string HostInstances bool HostMetricInclude []string HostMetricExclude []string HostInclude []string + HostExclude []string VMInstances bool `toml:"vm_instances"` VMMetricInclude []string `toml:"vm_metric_include"` VMMetricExclude []string `toml:"vm_metric_exclude"` VMInclude []string `toml:"vm_include"` + VMExclude []string `toml:"vm_exclude"` DatastoreInstances bool DatastoreMetricInclude []string DatastoreMetricExclude []string DatastoreInclude []string + DatastoreExclude []string Separator string CustomAttributeInclude []string CustomAttributeExclude []string diff --git a/plugins/inputs/vsphere/vsphere_test.go b/plugins/inputs/vsphere/vsphere_test.go index aa56d44a1..b66fa45eb 100644 --- a/plugins/inputs/vsphere/vsphere_test.go +++ b/plugins/inputs/vsphere/vsphere_test.go @@ -377,10 +377,59 @@ func TestFinder(t *testing.T) { testLookupVM(ctx, t, &f, "/*/host/**/*DC*/*/*DC*", 4, "") vm = []mo.VirtualMachine{} - err = f.FindAll(ctx, "VirtualMachine", []string{"/DC0/vm/DC0_H0*", "/DC0/vm/DC0_C0*"}, &vm) + err = f.FindAll(ctx, "VirtualMachine", []string{"/DC0/vm/DC0_H0*", "/DC0/vm/DC0_C0*"}, []string{}, &vm) require.NoError(t, err) require.Equal(t, 4, len(vm)) + rf := ResourceFilter{ + finder: &f, + paths: []string{"/DC0/vm/DC0_H0*", "/DC0/vm/DC0_C0*"}, + excludePaths: []string{"/DC0/vm/DC0_H0_VM0"}, + resType: "VirtualMachine", + } + vm = []mo.VirtualMachine{} + require.NoError(t, rf.FindAll(ctx, &vm)) + require.Equal(t, 3, len(vm)) + + rf = ResourceFilter{ + finder: &f, + paths: []string{"/DC0/vm/DC0_H0*", "/DC0/vm/DC0_C0*"}, + excludePaths: []string{"/**"}, + resType: "VirtualMachine", + } + vm = []mo.VirtualMachine{} + require.NoError(t, rf.FindAll(ctx, &vm)) + require.Equal(t, 0, len(vm)) + + rf = ResourceFilter{ + finder: &f, + paths: []string{"/**"}, + excludePaths: []string{"/**"}, + resType: "VirtualMachine", + } + vm = []mo.VirtualMachine{} + require.NoError(t, rf.FindAll(ctx, &vm)) + require.Equal(t, 0, len(vm)) + + rf = ResourceFilter{ + finder: &f, + paths: []string{"/**"}, + excludePaths: []string{"/this won't match anything"}, + resType: "VirtualMachine", + } + vm = []mo.VirtualMachine{} + require.NoError(t, rf.FindAll(ctx, &vm)) + require.Equal(t, 8, len(vm)) + + rf = ResourceFilter{ + finder: &f, + paths: []string{"/**"}, + excludePaths: []string{"/**/*VM0"}, + resType: "VirtualMachine", + } + vm = []mo.VirtualMachine{} + require.NoError(t, rf.FindAll(ctx, &vm)) + require.Equal(t, 4, len(vm)) } func TestFolders(t *testing.T) { From 93f149f12628fe1038d4df44fc987bf44bcbc6f7 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 16 Jan 2020 12:17:06 -0800 Subject: [PATCH 1474/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 70360c845..e42f2a1b9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -27,6 +27,7 @@ - [#6733](https://github.com/influxdata/telegraf/pull/6733): Add latency stats to mongo input. - [#6844](https://github.com/influxdata/telegraf/pull/6844): Add source and port tags to jenkins_job metrics. - [#6886](https://github.com/influxdata/telegraf/pull/6886): Add date offset and timezone options to date processor. +- [#6859](https://github.com/influxdata/telegraf/pull/6859): Exclude resources by inventory path in vsphere input. #### Bugfixes From 182104f95e325886edf78ec8a35b878dc1f8fddc Mon Sep 17 00:00:00 2001 From: Will Furnell Date: Thu, 16 Jan 2020 20:51:33 +0000 Subject: [PATCH 1475/1815] Add a new input plugin for InfiniBand card/port statistics (#6631) --- Gopkg.lock | 14 +- Gopkg.toml | 4 + plugins/inputs/all/all.go | 1 + plugins/inputs/infiniband/README.md | 29 ++++ plugins/inputs/infiniband/infiniband.go | 22 +++ plugins/inputs/infiniband/infiniband_linux.go | 59 ++++++++ .../inputs/infiniband/infiniband_notlinux.go | 23 +++ plugins/inputs/infiniband/infiniband_test.go | 134 ++++++++++++++++++ 8 files changed, 285 insertions(+), 1 deletion(-) create mode 100644 plugins/inputs/infiniband/README.md create mode 100644 plugins/inputs/infiniband/infiniband.go create mode 100644 plugins/inputs/infiniband/infiniband_linux.go create mode 100644 plugins/inputs/infiniband/infiniband_notlinux.go create mode 100644 plugins/inputs/infiniband/infiniband_test.go diff --git a/Gopkg.lock b/Gopkg.lock index 749f37d4c..477aff14a 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -80,6 +80,14 @@ revision = "3492b2aff5036c67228ab3c7dba3577c871db200" version = "v13.3.0" +[[projects]] + branch = "master" + digest = "1:005d83d9daaea4e3fc7b2eedf28f68ebf87df7d331a874e5d7d14f643467e7d9" + name = "github.com/Mellanox/rdmamap" + packages = ["."] + pruneopts = "" + revision = "7c3c4763a6ee6a4d624fe133135dc3a7c483111c" + [[projects]] digest = "1:298712a3ee36b59c3ca91f4183bd75d174d5eaa8b4aed5072831f126e2e752f6" name = "github.com/Microsoft/ApplicationInsights-Go" @@ -1197,7 +1205,10 @@ [[projects]] digest = "1:026b6ceaabbacaa147e94a63579efc3d3c73e00c73b67fa5c43ab46191ed04eb" name = "github.com/vishvananda/netlink" - packages = ["nl"] + packages = [ + ".", + "nl", + ] pruneopts = "" revision = "b2de5d10e38ecce8607e6b438b6d174f389a004e" @@ -1712,6 +1723,7 @@ "github.com/Azure/azure-storage-queue-go/azqueue", "github.com/Azure/go-autorest/autorest", "github.com/Azure/go-autorest/autorest/azure/auth", + "github.com/Mellanox/rdmamap", "github.com/Microsoft/ApplicationInsights-Go/appinsights", "github.com/Shopify/sarama", "github.com/StackExchange/wmi", diff --git a/Gopkg.toml b/Gopkg.toml index 5604fd362..b4304c61c 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -297,6 +297,10 @@ branch = "master" name = "github.com/cisco-ie/nx-telemetry-proto" +[[constraint]] + branch = "master" + name = "github.com/Mellanox/rdmamap" + [[constraint]] name = "gopkg.in/ldap.v3" version = "3.1.0" diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index 3ce9823f6..5860ac6c6 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -56,6 +56,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/http_response" _ "github.com/influxdata/telegraf/plugins/inputs/httpjson" _ "github.com/influxdata/telegraf/plugins/inputs/icinga2" + _ "github.com/influxdata/telegraf/plugins/inputs/infiniband" _ "github.com/influxdata/telegraf/plugins/inputs/influxdb" _ "github.com/influxdata/telegraf/plugins/inputs/influxdb_listener" _ "github.com/influxdata/telegraf/plugins/inputs/internal" diff --git a/plugins/inputs/infiniband/README.md b/plugins/inputs/infiniband/README.md new file mode 100644 index 000000000..6f2e85a96 --- /dev/null +++ b/plugins/inputs/infiniband/README.md @@ -0,0 +1,29 @@ +# InfiniBand Input Plugin + +This plugin gathers statistics for all InfiniBand devices and ports on the system. These are the counters that can be found in /sys/class/infiniband//port//counters/ + +### Configuration + +This section contains the default TOML to configure the plugin. You can +generate it using `telegraf --usage infiniband`. + +```toml +[[inputs.infiniband]] +``` + +There are no configuration options for this plugin. + +### Metrics + +You can find more information about the counters that are gathered here: +https://community.mellanox.com/s/article/understanding-mlx5-linux-counters-and-status-parameters + +There is a simple mapping from counter -> counter value. All counter values are 64 bit integers. A seperate measurement is made for each port. +Each measurement is tagged with the device and port that it relates to. These are strings. + + +### Example Output + +``` +infiniband,device=mlx5_0,port=1,VL15_dropped=0i,excessive_buffer_overrun_errors=0i,link_downed=0i,link_error_recovery=0i,local_link_integrity_errors=0i,multicast_rcv_packets=0i,multicast_xmit_packets=0i,port_rcv_constraint_errors=0i,port_rcv_data=237159415345822i,port_rcv_errors=0i,port_rcv_packets=801977655075i,port_rcv_remote_physical_errors=0i,port_rcv_switch_relay_errors=0i,port_xmit_constraint_errors=0i,port_xmit_data=238334949937759i,port_xmit_discards=0i,port_xmit_packets=803162651391i,port_xmit_wait=4294967295i,symbol_error=0i,unicast_rcv_packets=801977655075i,unicast_xmit_packets=803162651391i 1573125558000000000 +``` diff --git a/plugins/inputs/infiniband/infiniband.go b/plugins/inputs/infiniband/infiniband.go new file mode 100644 index 000000000..65e1d6c71 --- /dev/null +++ b/plugins/inputs/infiniband/infiniband.go @@ -0,0 +1,22 @@ +package infiniband + +import ( + "github.com/influxdata/telegraf" +) + +// Stores the configuration values for the infiniband plugin - as there are no +// config values, this is intentionally empty +type Infiniband struct { + Log telegraf.Logger `toml:"-"` +} + +// Sample configuration for plugin +var InfinibandConfig = `` + +func (_ *Infiniband) SampleConfig() string { + return InfinibandConfig +} + +func (_ *Infiniband) Description() string { + return "Gets counters from all InfiniBand cards and ports installed" +} diff --git a/plugins/inputs/infiniband/infiniband_linux.go b/plugins/inputs/infiniband/infiniband_linux.go new file mode 100644 index 000000000..48cd8a428 --- /dev/null +++ b/plugins/inputs/infiniband/infiniband_linux.go @@ -0,0 +1,59 @@ +// +build linux + +package infiniband + +import ( + "fmt" + "github.com/Mellanox/rdmamap" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" + "strconv" +) + +// Gather statistics from our infiniband cards +func (_ *Infiniband) Gather(acc telegraf.Accumulator) error { + + rdmaDevices := rdmamap.GetRdmaDeviceList() + + if len(rdmaDevices) == 0 { + return fmt.Errorf("no InfiniBand devices found in /sys/class/infiniband/") + } + + for _, dev := range rdmaDevices { + devicePorts := rdmamap.GetPorts(dev) + for _, port := range devicePorts { + portInt, err := strconv.Atoi(port) + if err != nil { + return err + } + + stats, err := rdmamap.GetRdmaSysfsStats(dev, portInt) + if err != nil { + return err + } + + addStats(dev, port, stats, acc) + } + } + + return nil +} + +// Add the statistics to the accumulator +func addStats(dev string, port string, stats []rdmamap.RdmaStatEntry, acc telegraf.Accumulator) { + + // Allow users to filter by card and port + tags := map[string]string{"device": dev, "port": port} + fields := make(map[string]interface{}) + + for _, entry := range stats { + fields[entry.Name] = entry.Value + } + + acc.AddFields("infiniband", fields, tags) +} + +// Initialise plugin +func init() { + inputs.Add("infiniband", func() telegraf.Input { return &Infiniband{} }) +} diff --git a/plugins/inputs/infiniband/infiniband_notlinux.go b/plugins/inputs/infiniband/infiniband_notlinux.go new file mode 100644 index 000000000..5b19672d9 --- /dev/null +++ b/plugins/inputs/infiniband/infiniband_notlinux.go @@ -0,0 +1,23 @@ +// +build !linux + +package infiniband + +import ( + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" +) + +func (i *Infiniband) Init() error { + i.Log.Warn("Current platform is not supported") + return nil +} + +func (_ *Infiniband) Gather(acc telegraf.Accumulator) error { + return nil +} + +func init() { + inputs.Add("infiniband", func() telegraf.Input { + return &Infiniband{} + }) +} diff --git a/plugins/inputs/infiniband/infiniband_test.go b/plugins/inputs/infiniband/infiniband_test.go new file mode 100644 index 000000000..6c4bb2458 --- /dev/null +++ b/plugins/inputs/infiniband/infiniband_test.go @@ -0,0 +1,134 @@ +// +build linux + +package infiniband + +import ( + "github.com/Mellanox/rdmamap" + "github.com/influxdata/telegraf/testutil" + "testing" +) + +func TestInfiniband(t *testing.T) { + fields := map[string]interface{}{ + "excessive_buffer_overrun_errors": uint64(0), + "link_downed": uint64(0), + "link_error_recovery": uint64(0), + "local_link_integrity_errors": uint64(0), + "multicast_rcv_packets": uint64(0), + "multicast_xmit_packets": uint64(0), + "port_rcv_constraint_errors": uint64(0), + "port_rcv_data": uint64(237159415345822), + "port_rcv_errors": uint64(0), + "port_rcv_packets": uint64(801977655075), + "port_rcv_remote_physical_errors": uint64(0), + "port_rcv_switch_relay_errors": uint64(0), + "port_xmit_constraint_errors": uint64(0), + "port_xmit_data": uint64(238334949937759), + "port_xmit_discards": uint64(0), + "port_xmit_packets": uint64(803162651391), + "port_xmit_wait": uint64(4294967295), + "symbol_error": uint64(0), + "unicast_rcv_packets": uint64(801977655075), + "unicast_xmit_packets": uint64(803162651391), + "VL15_dropped": uint64(0), + } + + tags := map[string]string{ + "device": "m1x5_0", + "port": "1", + } + + sample_rdmastats_entries := []rdmamap.RdmaStatEntry{ + { + Name: "excessive_buffer_overrun_errors", + Value: uint64(0), + }, + { + Name: "link_downed", + Value: uint64(0), + }, + { + Name: "link_error_recovery", + Value: uint64(0), + }, + { + Name: "local_link_integrity_errors", + Value: uint64(0), + }, + { + Name: "multicast_rcv_packets", + Value: uint64(0), + }, + { + Name: "multicast_xmit_packets", + Value: uint64(0), + }, + { + Name: "port_rcv_constraint_errors", + Value: uint64(0), + }, + { + Name: "port_rcv_data", + Value: uint64(237159415345822), + }, + { + Name: "port_rcv_errors", + Value: uint64(0), + }, + { + Name: "port_rcv_packets", + Value: uint64(801977655075), + }, + { + Name: "port_rcv_remote_physical_errors", + Value: uint64(0), + }, + { + Name: "port_rcv_switch_relay_errors", + Value: uint64(0), + }, + { + Name: "port_xmit_constraint_errors", + Value: uint64(0), + }, + { + Name: "port_xmit_data", + Value: uint64(238334949937759), + }, + { + Name: "port_xmit_discards", + Value: uint64(0), + }, + { + Name: "port_xmit_packets", + Value: uint64(803162651391), + }, + { + Name: "port_xmit_wait", + Value: uint64(4294967295), + }, + { + Name: "symbol_error", + Value: uint64(0), + }, + { + Name: "unicast_rcv_packets", + Value: uint64(801977655075), + }, + { + Name: "unicast_xmit_packets", + Value: uint64(803162651391), + }, + { + Name: "VL15_dropped", + Value: uint64(0), + }, + } + + var acc testutil.Accumulator + + addStats("m1x5_0", "1", sample_rdmastats_entries, &acc) + + acc.AssertContainsTaggedFields(t, "infiniband", fields, tags) + +} From 17c165391bc88f070feb938555d3f2c7226f83aa Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 16 Jan 2020 13:56:23 -0800 Subject: [PATCH 1476/1815] Update documentation for infiniband plugin --- CHANGELOG.md | 4 +++ README.md | 1 + docs/LICENSE_OF_DEPENDENCIES.md | 1 + plugins/inputs/infiniband/README.md | 51 ++++++++++++++++++++++------- 4 files changed, 46 insertions(+), 11 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e42f2a1b9..64f233db4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,10 @@ - The `date` processor now uses the UTC timezone when creating its tag. In previous versions the local time was used. +#### New Inputs + +- [infiniband](/plugins/inputs/infiniband/README.md) - Contributed by @willfurnell + #### New Outputs - [warp10](/plugins/outputs/warp10/README.md) - Contributed by @aurrelhebert diff --git a/README.md b/README.md index 3276f33bf..a2a48b20f 100644 --- a/README.md +++ b/README.md @@ -202,6 +202,7 @@ For documentation on the latest development code see the [documentation index][d * [http](./plugins/inputs/http) (generic HTTP plugin, supports using input data formats) * [http_response](./plugins/inputs/http_response) * [icinga2](./plugins/inputs/icinga2) +* [infiniband](./plugins/inputs/infiniband) * [influxdb](./plugins/inputs/influxdb) * [influxdb_listener](./plugins/inputs/influxdb_listener) * [internal](./plugins/inputs/internal) diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index 0b0b95ab1..71636a0b8 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -75,6 +75,7 @@ following works: - github.com/mailru/easyjson [MIT License](https://github.com/mailru/easyjson/blob/master/LICENSE) - github.com/matttproud/golang_protobuf_extensions [Apache License 2.0](https://github.com/matttproud/golang_protobuf_extensions/blob/master/LICENSE) - github.com/mdlayher/apcupsd [MIT License](https://github.com/mdlayher/apcupsd/blob/master/LICENSE.md) +- github.com/Mellanox/rdmamap [Apache License 2.0](https://github.com/Mellanox/rdmamap/blob/master/LICENSE) - github.com/Microsoft/ApplicationInsights-Go [MIT License](https://github.com/Microsoft/ApplicationInsights-Go/blob/master/LICENSE) - github.com/Microsoft/go-winio [MIT License](https://github.com/Microsoft/go-winio/blob/master/LICENSE) - github.com/miekg/dns [BSD 3-Clause Clear License](https://github.com/miekg/dns/blob/master/LICENSE) diff --git a/plugins/inputs/infiniband/README.md b/plugins/inputs/infiniband/README.md index 6f2e85a96..bc5b03543 100644 --- a/plugins/inputs/infiniband/README.md +++ b/plugins/inputs/infiniband/README.md @@ -1,29 +1,58 @@ # InfiniBand Input Plugin -This plugin gathers statistics for all InfiniBand devices and ports on the system. These are the counters that can be found in /sys/class/infiniband//port//counters/ +This plugin gathers statistics for all InfiniBand devices and ports on the +system. These are the counters that can be found in +`/sys/class/infiniband//port//counters/` + +**Supported Platforms**: Linux ### Configuration -This section contains the default TOML to configure the plugin. You can -generate it using `telegraf --usage infiniband`. - ```toml [[inputs.infiniband]] + # no configuration ``` -There are no configuration options for this plugin. - ### Metrics -You can find more information about the counters that are gathered here: -https://community.mellanox.com/s/article/understanding-mlx5-linux-counters-and-status-parameters +Actual metrics depend on the InfiniBand devices, the plugin uses a simple +mapping from counter -> counter value. + +[Information about the counters][counters] collected is provided by Mellanox. + +[counters]: https://community.mellanox.com/s/article/understanding-mlx5-linux-counters-and-status-parameters + +- infiniband + - tags: + - device + - port + - fields: + - excessive_buffer_overrun_errors (integer) + - link_downed (integer) + - link_error_recovery (integer) + - local_link_integrity_errors (integer) + - multicast_rcv_packets (integer) + - multicast_xmit_packets (integer) + - port_rcv_constraint_errors (integer) + - port_rcv_data (integer) + - port_rcv_errors (integer) + - port_rcv_packets (integer) + - port_rcv_remote_physical_errors (integer) + - port_rcv_switch_relay_errors (integer) + - port_xmit_constraint_errors (integer) + - port_xmit_data (integer) + - port_xmit_discards (integer) + - port_xmit_packets (integer) + - port_xmit_wait (integer) + - symbol_error (integer) + - unicast_rcv_packets (integer) + - unicast_xmit_packets (integer) + - VL15_dropped (integer) -There is a simple mapping from counter -> counter value. All counter values are 64 bit integers. A seperate measurement is made for each port. -Each measurement is tagged with the device and port that it relates to. These are strings. ### Example Output ``` -infiniband,device=mlx5_0,port=1,VL15_dropped=0i,excessive_buffer_overrun_errors=0i,link_downed=0i,link_error_recovery=0i,local_link_integrity_errors=0i,multicast_rcv_packets=0i,multicast_xmit_packets=0i,port_rcv_constraint_errors=0i,port_rcv_data=237159415345822i,port_rcv_errors=0i,port_rcv_packets=801977655075i,port_rcv_remote_physical_errors=0i,port_rcv_switch_relay_errors=0i,port_xmit_constraint_errors=0i,port_xmit_data=238334949937759i,port_xmit_discards=0i,port_xmit_packets=803162651391i,port_xmit_wait=4294967295i,symbol_error=0i,unicast_rcv_packets=801977655075i,unicast_xmit_packets=803162651391i 1573125558000000000 +infiniband,device=mlx5_0,port=1 VL15_dropped=0i,excessive_buffer_overrun_errors=0i,link_downed=0i,link_error_recovery=0i,local_link_integrity_errors=0i,multicast_rcv_packets=0i,multicast_xmit_packets=0i,port_rcv_constraint_errors=0i,port_rcv_data=237159415345822i,port_rcv_errors=0i,port_rcv_packets=801977655075i,port_rcv_remote_physical_errors=0i,port_rcv_switch_relay_errors=0i,port_xmit_constraint_errors=0i,port_xmit_data=238334949937759i,port_xmit_discards=0i,port_xmit_packets=803162651391i,port_xmit_wait=4294967295i,symbol_error=0i,unicast_rcv_packets=801977655075i,unicast_xmit_packets=803162651391i 1573125558000000000 ``` From 5f1f4b9e8d2fcce14a842a83ad0a018f9342103b Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 16 Jan 2020 14:38:06 -0800 Subject: [PATCH 1477/1815] Use Go modules for dependency management (#6912) --- .circleci/config.yml | 32 +- Makefile | 12 +- README.md | 12 +- appveyor.yml | 27 +- go.mod | 137 ++++++ go.sum | 579 ++++++++++++++++++++++++ plugins/inputs/syslog/syslog.go | 8 +- plugins/outputs/syslog/syslog.go | 4 +- plugins/outputs/syslog/syslog_mapper.go | 2 +- scripts/build.py | 4 +- 10 files changed, 771 insertions(+), 46 deletions(-) create mode 100644 go.mod create mode 100644 go.sum diff --git a/.circleci/config.yml b/.circleci/config.yml index e070c2957..874a28bb4 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -7,6 +7,8 @@ defaults: go-1_12: &go-1_12 docker: - image: 'quay.io/influxdb/telegraf-ci:1.12.14' + environment: + GO111MODULE: 'on' go-1_13: &go-1_13 docker: - image: 'quay.io/influxdb/telegraf-ci:1.13.5' @@ -18,16 +20,16 @@ jobs: steps: - checkout - restore_cache: - key: vendor-{{ checksum "Gopkg.lock" }} + key: go-mod-v1-{{ checksum "go.sum" }} - run: 'make deps' - - run: 'dep check' + - run: 'make tidy' - save_cache: - name: 'vendored deps' - key: vendor-{{ checksum "Gopkg.lock" }} + name: 'go module cache' + key: go-mod-v1-{{ checksum "go.sum" }} paths: - - './vendor' + - '/go/pkg/mod' - persist_to_workspace: - root: '/go/src' + root: '/go' paths: - '*' @@ -35,28 +37,32 @@ jobs: <<: [ *defaults, *go-1_12 ] steps: - attach_workspace: - at: '/go/src' + at: '/go' + - run: 'make' - run: 'make check' - run: 'make test' test-go-1.12-386: <<: [ *defaults, *go-1_12 ] steps: - attach_workspace: - at: '/go/src' + at: '/go' + - run: 'GOARCH=386 make' - run: 'GOARCH=386 make check' - run: 'GOARCH=386 make test' test-go-1.13: <<: [ *defaults, *go-1_13 ] steps: - attach_workspace: - at: '/go/src' + at: '/go' + - run: 'make' - run: 'make check' - run: 'make test' test-go-1.13-386: <<: [ *defaults, *go-1_13 ] steps: - attach_workspace: - at: '/go/src' + at: '/go' + - run: 'GOARCH=386 make' - run: 'GOARCH=386 make check' - run: 'GOARCH=386 make test' @@ -64,7 +70,7 @@ jobs: <<: [ *defaults, *go-1_13 ] steps: - attach_workspace: - at: '/go/src' + at: '/go' - run: 'make package' - store_artifacts: path: './build' @@ -73,7 +79,7 @@ jobs: <<: [ *defaults, *go-1_13 ] steps: - attach_workspace: - at: '/go/src' + at: '/go' - run: 'make package-release' - store_artifacts: path: './build' @@ -82,7 +88,7 @@ jobs: <<: [ *defaults, *go-1_13 ] steps: - attach_workspace: - at: '/go/src' + at: '/go' - run: 'make package-nightly' - store_artifacts: path: './build' diff --git a/Makefile b/Makefile index 9202cc1f4..27aefdeb7 100644 --- a/Makefile +++ b/Makefile @@ -32,7 +32,7 @@ all: .PHONY: deps deps: - dep ensure -vendor-only + go mod download .PHONY: telegraf telegraf: @@ -83,8 +83,18 @@ vet: exit 1; \ fi +.PHONY: tidy +tidy: + go mod verify + go mod tidy + @if ! git diff --quiet go.mod go.sum; then \ + echo "please run go mod tidy and check in changes"; \ + exit 1; \ + fi + .PHONY: check check: fmtcheck vet + @$(MAKE) --no-print-directory tidy .PHONY: test-all test-all: fmtcheck vet diff --git a/README.md b/README.md index a2a48b20f..81990320d 100644 --- a/README.md +++ b/README.md @@ -50,17 +50,17 @@ Ansible role: https://github.com/rossmcdonald/telegraf ### From Source: -Telegraf requires golang version 1.12 or newer, the Makefile requires GNU make. +Telegraf requires Go version 1.12 or newer, the Makefile requires GNU make. 1. [Install Go](https://golang.org/doc/install) >=1.12 (1.13 recommended) -2. [Install dep](https://golang.github.io/dep/docs/installation.html) ==v0.5.0 -3. Download Telegraf source: +2. Clone the Telegraf repository: ``` - go get -d github.com/influxdata/telegraf + cd ~/src + git clone https://github.com/influxdata/telegraf.git ``` -4. Run make from the source directory +3. Run `make` from the source directory ``` - cd "$HOME/go/src/github.com/influxdata/telegraf" + cd ~/src/telegraf make ``` diff --git a/appveyor.yml b/appveyor.yml index 66d17b0f4..559647e35 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -1,40 +1,33 @@ version: "{build}" cache: - - C:\Cache - - C:\gopath\pkg\dep\sources -> Gopkg.lock + - C:\gopath\pkg\mod -> go.sum + - C:\ProgramData\chocolatey\bin -> appveyor.yml + - C:\ProgramData\chocolatey\lib -> appveyor.yml clone_folder: C:\gopath\src\github.com\influxdata\telegraf environment: + GOVERSION: 1.13.5 GOPATH: C:\gopath platform: x64 install: - - IF NOT EXIST "C:\Cache" mkdir C:\Cache - - IF NOT EXIST "C:\Cache\go1.13.5.msi" curl -o "C:\Cache\go1.13.5.msi" https://storage.googleapis.com/golang/go1.13.5.windows-amd64.msi - - IF NOT EXIST "C:\Cache\gnuwin32-bin.zip" curl -o "C:\Cache\gnuwin32-bin.zip" https://dl.influxdata.com/telegraf/ci/make-3.81-bin.zip - - IF NOT EXIST "C:\Cache\gnuwin32-dep.zip" curl -o "C:\Cache\gnuwin32-dep.zip" https://dl.influxdata.com/telegraf/ci/make-3.81-dep.zip - - IF EXIST "C:\Go" rmdir /S /Q C:\Go - - msiexec.exe /i "C:\Cache\go1.13.5.msi" /quiet - - 7z x "C:\Cache\gnuwin32-bin.zip" -oC:\GnuWin32 -y - - 7z x "C:\Cache\gnuwin32-dep.zip" -oC:\GnuWin32 -y - - go get -d github.com/golang/dep - - cd "%GOPATH%\src\github.com\golang\dep" - - git checkout -q v0.5.0 - - go install -ldflags="-X main.version=v0.5.0" ./cmd/dep + - choco install golang --version "%GOVERSION%" + - choco install make - cd "%GOPATH%\src\github.com\influxdata\telegraf" - git config --system core.longpaths true - go version - go env build_script: - - cmd: C:\GnuWin32\bin\make + - make deps + - make telegraf test_script: - - cmd: C:\GnuWin32\bin\make check - - cmd: C:\GnuWin32\bin\make test-windows + - make check + - make test-windows artifacts: - path: telegraf.exe diff --git a/go.mod b/go.mod new file mode 100644 index 000000000..36819f522 --- /dev/null +++ b/go.mod @@ -0,0 +1,137 @@ +module github.com/influxdata/telegraf + +go 1.12 + +require ( + cloud.google.com/go v0.37.4 + code.cloudfoundry.org/clock v1.0.0 // indirect + collectd.org v0.3.0 + github.com/Azure/azure-storage-queue-go v0.0.0-20181215014128-6ed74e755687 + github.com/Azure/go-autorest/autorest v0.9.3 + github.com/Azure/go-autorest/autorest/azure/auth v0.4.2 + github.com/Mellanox/rdmamap v0.0.0-20191106181932-7c3c4763a6ee + github.com/Microsoft/ApplicationInsights-Go v0.4.2 + github.com/Microsoft/go-winio v0.4.9 // indirect + github.com/Shopify/sarama v1.24.1 + github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 + github.com/aerospike/aerospike-client-go v1.27.0 + github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf + github.com/amir/raidman v0.0.0-20170415203553-1ccc43bfb9c9 + github.com/apache/thrift v0.12.0 + github.com/armon/go-metrics v0.3.0 // indirect + github.com/aws/aws-sdk-go v1.19.41 + github.com/bitly/go-hostpool v0.1.0 // indirect + github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 // indirect + github.com/caio/go-tdigest v2.3.0+incompatible // indirect + github.com/cenkalti/backoff v2.0.0+incompatible // indirect + github.com/cisco-ie/nx-telemetry-proto v0.0.0-20190531143454-82441e232cf6 + github.com/cockroachdb/apd v1.1.0 // indirect + github.com/couchbase/go-couchbase v0.0.0-20180501122049-16db1f1fe037 + github.com/couchbase/gomemcached v0.0.0-20180502221210-0da75df14530 // indirect + github.com/couchbase/goutils v0.0.0-20180530154633-e865a1461c8a // indirect + github.com/denisenkom/go-mssqldb v0.0.0-20190707035753-2be1aa521ff4 + github.com/dgrijalva/jwt-go v3.2.0+incompatible + github.com/docker/distribution v2.6.0-rc.1.0.20170726174610-edc3ab29cdff+incompatible // indirect + github.com/docker/docker v1.4.2-0.20180327123150-ed7b6428c133 + github.com/docker/go-connections v0.3.0 // indirect + github.com/docker/go-units v0.3.3 // indirect + github.com/docker/libnetwork v0.8.0-dev.2.0.20181012153825-d7b61745d166 + github.com/eclipse/paho.mqtt.golang v1.2.0 + github.com/ericchiang/k8s v1.2.0 + github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 + github.com/glinton/ping v0.1.3 + github.com/go-logfmt/logfmt v0.4.0 + github.com/go-ole/go-ole v1.2.1 // indirect + github.com/go-redis/redis v6.12.0+incompatible + github.com/go-sql-driver/mysql v1.4.1 + github.com/gobwas/glob v0.2.3 + github.com/gofrs/uuid v2.1.0+incompatible + github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d + github.com/golang/mock v1.3.1-0.20190508161146-9fa652df1129 // indirect + github.com/golang/protobuf v1.3.2 + github.com/google/go-cmp v0.3.0 + github.com/google/go-github v17.0.0+incompatible + github.com/google/go-querystring v1.0.0 // indirect + github.com/gorilla/mux v1.6.2 + github.com/gotestyourself/gotestyourself v2.2.0+incompatible // indirect + github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed // indirect + github.com/harlow/kinesis-consumer v0.3.1-0.20181230152818-2f58b136fee0 + github.com/hashicorp/consul v1.2.1 + github.com/hashicorp/go-msgpack v0.5.5 // indirect + github.com/hashicorp/go-rootcerts v0.0.0-20160503143440-6bb64b370b90 // indirect + github.com/hashicorp/memberlist v0.1.5 // indirect + github.com/hashicorp/serf v0.8.1 // indirect + github.com/influxdata/go-syslog/v2 v2.0.1 + github.com/influxdata/tail v1.0.1-0.20180327235535-c43482518d41 + github.com/influxdata/toml v0.0.0-20190415235208-270119a8ce65 + github.com/influxdata/wlog v0.0.0-20160411224016-7c63b0a71ef8 + github.com/jackc/fake v0.0.0-20150926172116-812a484cc733 // indirect + github.com/jackc/pgx v3.6.0+incompatible + github.com/jcmturner/gofork v1.0.0 // indirect + github.com/kardianos/service v1.0.0 + github.com/karrick/godirwalk v1.12.0 + github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 + github.com/klauspost/compress v1.9.2 // indirect + github.com/kubernetes/apimachinery v0.0.0-20190119020841-d41becfba9ee + github.com/kylelemons/godebug v1.1.0 // indirect + github.com/leesper/go_rng v0.0.0-20190531154944-a612b043e353 // indirect + github.com/lib/pq v1.3.0 // indirect + github.com/mailru/easyjson v0.0.0-20180717111219-efc7eb8984d6 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.1 + github.com/mdlayher/apcupsd v0.0.0-20190314144147-eb3dd99a75fe + github.com/miekg/dns v1.0.14 + github.com/mitchellh/go-testing-interface v1.0.0 // indirect + github.com/mitchellh/mapstructure v0.0.0-20180715050151-f15292f7a699 // indirect + github.com/multiplay/go-ts3 v1.0.0 + github.com/naoina/go-stringutil v0.1.0 // indirect + github.com/nats-io/gnatsd v1.2.0 + github.com/nats-io/go-nats v1.5.0 + github.com/nats-io/nuid v1.0.0 // indirect + github.com/nsqio/go-nsq v1.0.7 + github.com/openconfig/gnmi v0.0.0-20180912164834-33a1865c3029 + github.com/opencontainers/go-digest v1.0.0-rc1 // indirect + github.com/opencontainers/image-spec v1.0.1 // indirect + github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492 // indirect + github.com/opentracing/opentracing-go v1.0.2 // indirect + github.com/openzipkin/zipkin-go-opentracing v0.3.4 + github.com/pkg/errors v0.8.1 + github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829 + github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f + github.com/prometheus/common v0.2.0 + github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8 + github.com/samuel/go-zookeeper v0.0.0-20180130194729-c4fab1ac1bec // indirect + github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b // indirect + github.com/shirou/gopsutil v2.19.11+incompatible + github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4 // indirect + github.com/shopspring/decimal v0.0.0-20200105231215-408a2507e114 // indirect + github.com/sirupsen/logrus v1.2.0 + github.com/soniah/gosnmp v1.22.0 + github.com/streadway/amqp v0.0.0-20180528204448-e5adc2ada8b8 + github.com/stretchr/testify v1.4.0 + github.com/tedsuo/ifrit v0.0.0-20191009134036-9a97d0632f00 // indirect + github.com/tidwall/gjson v1.3.0 + github.com/vishvananda/netlink v0.0.0-20171020171820-b2de5d10e38e // indirect + github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc // indirect + github.com/vjeantet/grok v1.0.0 + github.com/vmware/govmomi v0.19.0 + github.com/wavefronthq/wavefront-sdk-go v0.9.2 + github.com/wvanbergen/kafka v0.0.0-20171203153745-e2edea948ddf + github.com/wvanbergen/kazoo-go v0.0.0-20180202103751-f72d8611297a // indirect + github.com/yuin/gopher-lua v0.0.0-20180630135845-46796da1b0b4 // indirect + golang.org/x/net v0.0.0-20191004110552-13f9640d40b9 + golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421 + golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456 + gonum.org/v1/gonum v0.6.2 // indirect + google.golang.org/api v0.3.1 + google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107 + google.golang.org/grpc v1.19.0 + gopkg.in/fatih/pool.v2 v2.0.0 // indirect + gopkg.in/gorethink/gorethink.v3 v3.0.5 + gopkg.in/jcmturner/gokrb5.v7 v7.3.0 // indirect + gopkg.in/ldap.v3 v3.1.0 + gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce + gopkg.in/olivere/elastic.v5 v5.0.70 + gopkg.in/yaml.v2 v2.2.4 + gotest.tools v2.2.0+incompatible // indirect + k8s.io/apimachinery v0.17.1 // indirect +) diff --git a/go.sum b/go.sum new file mode 100644 index 000000000..ed221dee3 --- /dev/null +++ b/go.sum @@ -0,0 +1,579 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.37.4 h1:glPeL3BQJsbF6aIIYfZizMwc5LTYz250bDMjttbBGAU= +cloud.google.com/go v0.37.4/go.mod h1:NHPJ89PdicEuT9hdPXMROBD91xc5uRDxsMtSB16k7hw= +code.cloudfoundry.org/clock v1.0.0 h1:kFXWQM4bxYvdBw2X8BbBeXwQNgfoWv1vqAk2ZZyBN2o= +code.cloudfoundry.org/clock v1.0.0/go.mod h1:QD9Lzhd/ux6eNQVUDVRJX/RKTigpewimNYBi7ivZKY8= +collectd.org v0.3.0 h1:iNBHGw1VvPJxH2B6RiFWFZ+vsjo1lCdRszBeOuwGi00= +collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE= +github.com/Azure/azure-pipeline-go v0.1.8 h1:KmVRa8oFMaargVesEuuEoiLCQ4zCCwQ8QX/xg++KS20= +github.com/Azure/azure-pipeline-go v0.1.8/go.mod h1:XA1kFWRVhSK+KNFiOhfv83Fv8L9achrP7OxIzeTn1Yg= +github.com/Azure/azure-storage-queue-go v0.0.0-20181215014128-6ed74e755687 h1:7MiZ6Th+YTmwUdrKmFg5OMsGYz7IdQwjqL0RPxkhhOQ= +github.com/Azure/azure-storage-queue-go v0.0.0-20181215014128-6ed74e755687/go.mod h1:K6am8mT+5iFXgingS9LUc7TmbsW6XBw3nxaRyaMyWc8= +github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= +github.com/Azure/go-autorest/autorest v0.9.3 h1:OZEIaBbMdUE/Js+BQKlpO81XlISgipr6yDJ+PSwsgi4= +github.com/Azure/go-autorest/autorest v0.9.3/go.mod h1:GsRuLYvwzLjjjRoWEIyMUaYq8GNUx2nRB378IPt/1p0= +github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= +github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= +github.com/Azure/go-autorest/autorest/adal v0.8.1 h1:pZdL8o72rK+avFWl+p9nE8RWi1JInZrWJYlnpfXJwHk= +github.com/Azure/go-autorest/autorest/adal v0.8.1/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= +github.com/Azure/go-autorest/autorest/azure/auth v0.4.2 h1:iM6UAvjR97ZIeR93qTcwpKNMpV+/FTWjwEbuPD495Tk= +github.com/Azure/go-autorest/autorest/azure/auth v0.4.2/go.mod h1:90gmfKdlmKgfjUpnCEpOJzsUEjrWDSLwHIG73tSXddM= +github.com/Azure/go-autorest/autorest/azure/cli v0.3.1 h1:LXl088ZQlP0SBppGFsRZonW6hSvwgL5gRByMbvUbx8U= +github.com/Azure/go-autorest/autorest/azure/cli v0.3.1/go.mod h1:ZG5p860J94/0kI9mNJVoIoLgXcirM2gF5i2kWloofxw= +github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= +github.com/Azure/go-autorest/autorest/date v0.2.0 h1:yW+Zlqf26583pE43KhfnhFcdmSWlm5Ew6bxipnr/tbM= +github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= +github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.3.0 h1:qJumjCaCudz+OcqE9/XtEPfvtOjOmKaui4EOpFI6zZc= +github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= +github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY= +github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= +github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k= +github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/Mellanox/rdmamap v0.0.0-20191106181932-7c3c4763a6ee h1:atI/FFjXh6hIVlPE1Jup9m8N4B9q/OSbMUe2EBahs+w= +github.com/Mellanox/rdmamap v0.0.0-20191106181932-7c3c4763a6ee/go.mod h1:jDA6v0TUYrFEIAE5uGJ29LQOeONIgMdP4Rkqb8HUnPM= +github.com/Microsoft/ApplicationInsights-Go v0.4.2 h1:HIZoGXMiKNwAtMAgCSSX35j9mP+DjGF9ezfBvxMDLLg= +github.com/Microsoft/ApplicationInsights-Go v0.4.2/go.mod h1:CukZ/G66zxXtI+h/VcVn3eVVDGDHfXM2zVILF7bMmsg= +github.com/Microsoft/go-winio v0.4.9 h1:3RbgqgGVqmcpbOiwrjbVtDHLlJBGF6aE+yHmNtBNsFQ= +github.com/Microsoft/go-winio v0.4.9/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= +github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= +github.com/Shopify/sarama v1.24.1 h1:svn9vfN3R1Hz21WR2Gj0VW9ehaDGkiOS+VqlIcZOkMI= +github.com/Shopify/sarama v1.24.1/go.mod h1:fGP8eQ6PugKEI0iUETYYtnP6d1pH/bdDMTel1X5ajsU= +github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc= +github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= +github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 h1:fLjPD/aNc3UIOA6tDi6QXUemppXK3P9BI7mr2hd6gx8= +github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= +github.com/aerospike/aerospike-client-go v1.27.0 h1:VC6/Wqqm3Qlp4/utM7Zts3cv4A2HPn8rVFp/XZKTWgE= +github.com/aerospike/aerospike-client-go v1.27.0/go.mod h1:zj8LBEnWBDOVEIJt8LvaRvDG5ARAoa5dBeHaB472NRc= +github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf h1:qet1QNfXsQxTZqLG4oE62mJzwPIB8+Tee4RNCL9ulrY= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/amir/raidman v0.0.0-20170415203553-1ccc43bfb9c9 h1:FXrPTd8Rdlc94dKccl7KPmdmIbVh/OjelJ8/vgMRzcQ= +github.com/amir/raidman v0.0.0-20170415203553-1ccc43bfb9c9/go.mod h1:eliMa/PW+RDr2QLWRmLH1R1ZA4RInpmvOzDDXtaIZkc= +github.com/apache/thrift v0.12.0 h1:pODnxUFNcjP9UTLZGTdeh+j16A8lJbRvD3rOtrk/7bs= +github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-metrics v0.3.0 h1:B7AQgHi8QSEi4uHu7Sbsga+IJDU+CENgjxoo81vDUqU= +github.com/armon/go-metrics v0.3.0/go.mod h1:zXjbSimjXTd7vOpY8B0/2LpvNvDoXBuplAD+gJD3GYs= +github.com/aws/aws-sdk-go v1.19.41 h1:veutzvQP/lOmYmtX26S9mTFJLO6sp7/UsxFcCjglu4A= +github.com/aws/aws-sdk-go v1.19.41/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/bitly/go-hostpool v0.1.0 h1:XKmsF6k5el6xHG3WPJ8U0Ku/ye7njX7W81Ng7O2ioR0= +github.com/bitly/go-hostpool v0.1.0/go.mod h1:4gOCgp6+NZnVqlKyZ/iBZFTAJKembaVENUpMkpg42fw= +github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY= +github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= +github.com/caio/go-tdigest v2.3.0+incompatible h1:zP6nR0nTSUzlSqqr7F/LhslPlSZX/fZeGmgmwj2cxxY= +github.com/caio/go-tdigest v2.3.0+incompatible/go.mod h1:sHQM/ubZStBUmF1WbB8FAm8q9GjDajLC5T7ydxE3JHI= +github.com/cenkalti/backoff v2.0.0+incompatible h1:5IIPUHhlnUZbcHQsQou5k1Tn58nJkeJL9U+ig5CHJbY= +github.com/cenkalti/backoff v2.0.0+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= +github.com/cisco-ie/nx-telemetry-proto v0.0.0-20190531143454-82441e232cf6 h1:57RI0wFkG/smvVTcz7F43+R0k+Hvci3jAVQF9lyMoOo= +github.com/cisco-ie/nx-telemetry-proto v0.0.0-20190531143454-82441e232cf6/go.mod h1:ugEfq4B8T8ciw/h5mCkgdiDRFS4CkqqhH2dymDB4knc= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I= +github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= +github.com/couchbase/go-couchbase v0.0.0-20180501122049-16db1f1fe037 h1:Dbz60fpCq04vRxVVVJLbQuL0G7pRt0Gyo2BkozFc4SQ= +github.com/couchbase/go-couchbase v0.0.0-20180501122049-16db1f1fe037/go.mod h1:TWI8EKQMs5u5jLKW/tsb9VwauIrMIxQG1r5fMsswK5U= +github.com/couchbase/gomemcached v0.0.0-20180502221210-0da75df14530 h1:F8nmbiuX+gCz9xvWMi6Ak8HQntB4ATFXP46gaxifbp4= +github.com/couchbase/gomemcached v0.0.0-20180502221210-0da75df14530/go.mod h1:srVSlQLB8iXBVXHgnqemxUXqN6FCvClgCMPCsjBDR7c= +github.com/couchbase/goutils v0.0.0-20180530154633-e865a1461c8a h1:Y5XsLCEhtEI8qbD9RP3Qlv5FXdTDHxZM9UPUnMRgBp8= +github.com/couchbase/goutils v0.0.0-20180530154633-e865a1461c8a/go.mod h1:BQwMFlJzDjFDG3DJUdU0KORxn88UlsOULuxLExMh3Hs= +github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/denisenkom/go-mssqldb v0.0.0-20190707035753-2be1aa521ff4 h1:YcpmyvADGYw5LqMnHqSkyIELsHCGF6PkrmM31V8rF7o= +github.com/denisenkom/go-mssqldb v0.0.0-20190707035753-2be1aa521ff4/go.mod h1:zAg7JM8CkOJ43xKXIj7eRO9kmWm/TW578qo+oDO6tuM= +github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dimchansky/utfbom v1.1.0 h1:FcM3g+nofKgUteL8dm/UpdRXNC9KmADgTpLKsu0TRo4= +github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= +github.com/docker/distribution v2.6.0-rc.1.0.20170726174610-edc3ab29cdff+incompatible h1:357nGVUC8gSpeSc2Axup8HfrfTLLUfWfCsCUhiQSKIg= +github.com/docker/distribution v2.6.0-rc.1.0.20170726174610-edc3ab29cdff+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v1.4.2-0.20180327123150-ed7b6428c133 h1:Kus8nU6ctI/u/l86ljUJl6GpUtmO7gtD/krn4u5dr0M= +github.com/docker/docker v1.4.2-0.20180327123150-ed7b6428c133/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.3.0 h1:3lOnM9cSzgGwx8VfK/NGOW5fLQ0GjIlCkaktF+n1M6o= +github.com/docker/go-connections v0.3.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-units v0.3.3 h1:Xk8S3Xj5sLGlG5g67hJmYMmUgXv5N4PhkjJHHqrwnTk= +github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/libnetwork v0.8.0-dev.2.0.20181012153825-d7b61745d166 h1:KgEcrKF0NWi9GT/OvDp9ioXZIrHRbP8S5o+sot9gznQ= +github.com/docker/libnetwork v0.8.0-dev.2.0.20181012153825-d7b61745d166/go.mod h1:93m0aTqz6z+g32wla4l4WxTrdtvBRmVzYRkYvasA5Z8= +github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= +github.com/eapache/go-resiliency v1.1.0 h1:1NtRmCAqadE2FN4ZcN6g90TP3uk8cg9rn9eNK2197aU= +github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= +github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 h1:YEetp8/yCZMuEPMUDHG0CW/brkkEp8mzqk2+ODEitlw= +github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= +github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= +github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/eclipse/paho.mqtt.golang v1.2.0 h1:1F8mhG9+aO5/xpdtFkW4SxOJB67ukuDC3t2y2qayIX0= +github.com/eclipse/paho.mqtt.golang v1.2.0/go.mod h1:H9keYFcgq3Qr5OUJm/JZI/i6U7joQ8SYLhZwfeOo6Ts= +github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/ericchiang/k8s v1.2.0 h1:vxrMwEzY43oxu8aZyD/7b1s8tsBM+xoUoxjWECWFbPI= +github.com/ericchiang/k8s v1.2.0/go.mod h1:/OmBgSq2cd9IANnsGHGlEz27nwMZV2YxlpXuQtU3Bz4= +github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= +github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= +github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= +github.com/frankban/quicktest v1.4.1 h1:Wv2VwvNn73pAdFIVUQRXYDFp31lXKbqblIXo/Q5GPSg= +github.com/frankban/quicktest v1.4.1/go.mod h1:36zfPVQyHxymz4cH7wlDmVwDrJuljRB60qkgn7rorfQ= +github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 h1:Mn26/9ZMNWSw9C9ERFA1PUxfmGpolnw2v0bKOREu5ew= +github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32/go.mod h1:GIjDIg/heH5DOkXY3YJ/wNhfHsQHoXGjl8G8amsYQ1I= +github.com/glinton/ping v0.1.3 h1:8/9mj+hCgfba0X25E0Xs7cy+Zg9jGQVyulMVlUBrDDA= +github.com/glinton/ping v0.1.3/go.mod h1:uY+1eqFUyotrQxF1wYFNtMeHp/swbYRsoGzfcPZ8x3o= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0 h1:MP4Eh7ZCb31lleYCFuwm0oe4/YGak+5l1vA2NOE80nA= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= +github.com/go-ole/go-ole v1.2.1 h1:2lOsA72HgjxAuMlKpFiCbHTvu44PIVkZ5hqm3RSdI/E= +github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8= +github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= +github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= +github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= +github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= +github.com/go-redis/redis v6.12.0+incompatible h1:s+64XI+z/RXqGHz2fQSgRJOEwqqSXeX3dliF7iVkMbE= +github.com/go-redis/redis v6.12.0+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= +github.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZpNwpA= +github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= +github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= +github.com/gofrs/uuid v2.1.0+incompatible h1:8oEj3gioPmmDAOLQUZdnW+h4FZu9aSE/SQIas1E9pzA= +github.com/gofrs/uuid v2.1.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.0 h1:xU6/SpYbvkNYiptHJYEDRseDLvYE7wSqhYYNy0QSUzI= +github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d h1:3PaI8p3seN09VjbTYC/QWlUZdZ1qS1zGjy7LH2Wt07I= +github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1-0.20190508161146-9fa652df1129 h1:tT8iWCYw4uOem71yYA3htfH+LNopJvcqZQshm56G5L4= +github.com/golang/mock v1.3.1-0.20190508161146-9fa652df1129/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= +github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c h1:964Od4U6p2jUkFxvCydnIczKteheJEzHRToSGK3Bnlw= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-github v17.0.0+incompatible h1:N0LgJ1j65A7kfXrZnUDaYCs/Sf4rEjNlfyDHW9dolSY= +github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= +github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk= +github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= +github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4 h1:hU4mGcQI4DaAYW+IbTun+2qEZVFxK0ySjQLTbS0VQKc= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= +github.com/gorilla/context v1.1.1 h1:AWwleXJkX/nhcU9bZSnZoi3h/qGYqQAGhq6zZe/aQW8= +github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= +github.com/gorilla/mux v1.6.2 h1:Pgr17XVTNXAk3q/r4CpKzC5xBM/qW1uVLV+IhRZpIIk= +github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gotestyourself/gotestyourself v2.2.0+incompatible h1:AQwinXlbQR2HvPjQZOmDhRqsv5mZf+Jb1RnSLxcqZcI= +github.com/gotestyourself/gotestyourself v2.2.0+incompatible/go.mod h1:zZKM6oeNM8k+FRljX1mnzVYeS8wiGgQyvST1/GafPbY= +github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8= +github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= +github.com/harlow/kinesis-consumer v0.3.1-0.20181230152818-2f58b136fee0 h1:U0KvGD9CJIl1nbgu9yLsfWxMT6WqL8fG0IBB7RvOZZQ= +github.com/harlow/kinesis-consumer v0.3.1-0.20181230152818-2f58b136fee0/go.mod h1:dk23l2BruuUzRP8wbybQbPn3J7sZga2QHICCeaEy5rQ= +github.com/hashicorp/consul v1.2.1 h1:66MuuTfV4aOXTQM7cjAIKUWFOITSk4XZlMhE09ymVbg= +github.com/hashicorp/consul v1.2.1/go.mod h1:mFrjN1mfidgJfYP1xrJCF+AfRhr6Eaqhb2+sfyn/OOI= +github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.0 h1:wvCrVc9TjDls6+YGAF2hAifE1E5U1+b4tH6KdvN3Gig= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI= +github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-rootcerts v0.0.0-20160503143440-6bb64b370b90 h1:VBj0QYQ0u2MCJzBfeYXGexnAl17GsH1yidnoxCqqD9E= +github.com/hashicorp/go-rootcerts v0.0.0-20160503143440-6bb64b370b90/go.mod h1:o4zcYY1e0GEZI6eSEr+43QDYmuGglw1qSO6qdHUHCgg= +github.com/hashicorp/go-sockaddr v1.0.0 h1:GeH6tui99pF4NJgfnhp+L6+FfobzVW3Ah46sLo0ICXs= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/memberlist v0.1.5 h1:AYBsgJOW9gab/toO5tEB8lWetVgDKZycqkebJ8xxpqM= +github.com/hashicorp/memberlist v0.1.5/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/serf v0.8.1 h1:mYs6SMzu72+90OcPa5wr3nfznA4Dw9UyR791ZFNOIf4= +github.com/hashicorp/serf v0.8.1/go.mod h1:h/Ru6tmZazX7WO/GDmwdpS975F019L4t5ng5IgwbNrE= +github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/influxdata/go-syslog/v2 v2.0.1 h1:l44S4l4Q8MhGQcoOxJpbo+QQYxJqp0vdgIVHh4+DO0s= +github.com/influxdata/go-syslog/v2 v2.0.1/go.mod h1:hjvie1UTaD5E1fTnDmxaCw8RRDrT4Ve+XHr5O2dKSCo= +github.com/influxdata/tail v1.0.1-0.20180327235535-c43482518d41 h1:HxQo1NpNXQDpvEBzthbQLmePvTLFTa5GzSFUjL03aEs= +github.com/influxdata/tail v1.0.1-0.20180327235535-c43482518d41/go.mod h1:xTFF2SILpIYc5N+Srb0d5qpx7d+f733nBrbasb13DtQ= +github.com/influxdata/toml v0.0.0-20190415235208-270119a8ce65 h1:vvyMtD5LTJc1W9sQKjDkAWdcg0478CszSdzlHtiAXCY= +github.com/influxdata/toml v0.0.0-20190415235208-270119a8ce65/go.mod h1:zApaNFpP/bTpQItGZNNUMISDMDAnTXu9UqJ4yT3ocz8= +github.com/influxdata/wlog v0.0.0-20160411224016-7c63b0a71ef8 h1:W2IgzRCb0L9VzMujq/QuTaZUKcH8096jWwP519mHN6Q= +github.com/influxdata/wlog v0.0.0-20160411224016-7c63b0a71ef8/go.mod h1:/2NMgWB1DHM1ti/gqhOlg+LJeBVk6FqR5aVGYY0hlwI= +github.com/jackc/fake v0.0.0-20150926172116-812a484cc733 h1:vr3AYkKovP8uR8AvSGGUK1IDqRa5lAAvEkZG1LKaCRc= +github.com/jackc/fake v0.0.0-20150926172116-812a484cc733/go.mod h1:WrMFNQdiFJ80sQsxDoMokWK1W5TQtxBFNpzWTD84ibQ= +github.com/jackc/pgx v3.6.0+incompatible h1:bJeo4JdVbDAW8KB2m8XkFeo8CPipREoG37BwEoKGz+Q= +github.com/jackc/pgx v3.6.0+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I= +github.com/jcmturner/gofork v0.0.0-20190328161633-dc7c13fece03/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= +github.com/jcmturner/gofork v1.0.0 h1:J7uCkflzTEhUZ64xqKnkDxq3kzc96ajM1Gli5ktUem8= +github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= +github.com/kardianos/service v1.0.0 h1:HgQS3mFfOlyntWX8Oke98JcJLqt1DBcHR4kxShpYef0= +github.com/kardianos/service v1.0.0/go.mod h1:8CzDhVuCuugtsHyZoTvsOBuvonN/UDBvl0kH+BUxvbo= +github.com/karrick/godirwalk v1.12.0 h1:nkS4xxsjiZMvVlazd0mFyiwD4BR9f3m6LXGhM2TUx3Y= +github.com/karrick/godirwalk v1.12.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= +github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs= +github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.8.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.9.2 h1:LfVyl+ZlLlLDeQ/d2AqfGIIH4qEDu0Ed2S5GyhCWIWY= +github.com/klauspost/compress v1.9.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kubernetes/apimachinery v0.0.0-20190119020841-d41becfba9ee h1:MB75LRhfeLER2RF7neSVpYuX/lL8aPi3yPtv5vdOJmk= +github.com/kubernetes/apimachinery v0.0.0-20190119020841-d41becfba9ee/go.mod h1:Pe/YBTPc3vqoMkbuIWPH8CF9ehINdvNyS0dP3J6HC0s= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/leesper/go_rng v0.0.0-20190531154944-a612b043e353 h1:X/79QL0b4YJVO5+OsPH9rF2u428CIrGL/jLmPsoOQQ4= +github.com/leesper/go_rng v0.0.0-20190531154944-a612b043e353/go.mod h1:N0SVk0uhy+E1PZ3C9ctsPRlvOPAFPkCNlcPBDkt0N3U= +github.com/leodido/ragel-machinery v0.0.0-20181214104525-299bdde78165 h1:bCiVCRCs1Heq84lurVinUPy19keqGEe4jh5vtK37jcg= +github.com/leodido/ragel-machinery v0.0.0-20181214104525-299bdde78165/go.mod h1:WZxr2/6a/Ar9bMDc2rN/LJrE/hF6bXE4LPyDSIxwAfg= +github.com/lib/pq v1.3.0 h1:/qkRGz8zljWiDcFvgpwUpwIAPu3r07TDvs3Rws+o/pU= +github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20180717111219-efc7eb8984d6 h1:8/+Y8SKf0xCZ8cCTfnrMdY7HNzlEjPAt3bPjalNb6CA= +github.com/mailru/easyjson v0.0.0-20180717111219-efc7eb8984d6/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mdlayher/apcupsd v0.0.0-20190314144147-eb3dd99a75fe h1:yMrL+YorbzaBpj/h3BbLMP+qeslPZYMbzcpHFBNy1Yk= +github.com/mdlayher/apcupsd v0.0.0-20190314144147-eb3dd99a75fe/go.mod h1:y3mw3VG+t0m20OMqpG8RQqw8cDXvShVb+L8Z8FEnebw= +github.com/miekg/dns v1.0.14 h1:9jZdLNd/P4+SfEJ0TNyxYpsK8N4GtfylBLqtbYN1sbA= +github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/mapstructure v0.0.0-20180715050151-f15292f7a699 h1:KXZJFdun9knAVAR8tg/aHJEr5DgtcbqyvzacK+CDCaI= +github.com/mitchellh/mapstructure v0.0.0-20180715050151-f15292f7a699/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/multiplay/go-ts3 v1.0.0 h1:loxtEFqvYtpoGh1jOqEt6aDzctYuQsi3vb3dMpvWiWw= +github.com/multiplay/go-ts3 v1.0.0/go.mod h1:14S6cS3fLNT3xOytrA/DkRyAFNuQLMLEqOYAsf87IbQ= +github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/naoina/go-stringutil v0.1.0 h1:rCUeRUHjBjGTSHl0VC00jUPLz8/F9dDzYI70Hzifhks= +github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0= +github.com/nats-io/gnatsd v1.2.0 h1:WKLzmB8LyP4CiVJuAoZMxdYBurENVX4piS358tjcBhw= +github.com/nats-io/gnatsd v1.2.0/go.mod h1:nqco77VO78hLCJpIcVfygDP2rPGfsEHkGTUk94uh5DQ= +github.com/nats-io/go-nats v1.5.0 h1:OrEQSvQQrP+A+9EBBxY86Z4Es6uaUdObZ5UhWHn9b08= +github.com/nats-io/go-nats v1.5.0/go.mod h1:+t7RHT5ApZebkrQdnn6AhQJmhJJiKAvJUio1PiiCtj0= +github.com/nats-io/nuid v1.0.0 h1:44QGdhbiANq8ZCbUkdn6W5bqtg+mHuDE4wOUuxxndFs= +github.com/nats-io/nuid v1.0.0/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= +github.com/nsqio/go-nsq v1.0.7 h1:O0pIZJYTf+x7cZBA0UMY8WxFG79lYTURmWzAAh48ljY= +github.com/nsqio/go-nsq v1.0.7/go.mod h1:XP5zaUs3pqf+Q71EqUJs3HYfBIqfK6G83WQMdNN+Ito= +github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.1 h1:q/mM8GF/n0shIN8SaAZ0V+jnLPzen6WIVZdiwrRlMlo= +github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME= +github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/openconfig/gnmi v0.0.0-20180912164834-33a1865c3029 h1:lXQqyLroROhwR2Yq/kXbLzVecgmVeZh2TFLg6OxCd+w= +github.com/openconfig/gnmi v0.0.0-20180912164834-33a1865c3029/go.mod h1:t+O9It+LKzfOAhKTT5O0ehDix+MTqbtT0T9t+7zzOvc= +github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ= +github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI= +github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492 h1:lM6RxxfUMrYL/f8bWEUqdXrANWtrL7Nndbm9iFN0DlU= +github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= +github.com/opentracing/opentracing-go v1.0.2 h1:3jA2P6O1F9UOrWVpwrIo17pu01KWvNWg4X946/Y5Zwg= +github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= +github.com/openzipkin/zipkin-go-opentracing v0.3.4 h1:x/pBv/5VJNWkcHF1G9xqhug8Iw7X1y1zOMzDmyuvP2g= +github.com/openzipkin/zipkin-go-opentracing v0.3.4/go.mod h1:js2AbwmHW0YD9DwIw2JhQWmbfFi/UnWyYwdVhqbCDOE= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pierrec/lz4 v2.2.6+incompatible h1:6aCX4/YZ9v8q69hTyiR7dNLnTA3fgtKHVVW5BCd5Znw= +github.com/pierrec/lz4 v2.2.6+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= +github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829 h1:D+CiwcpGTW6pL6bv6KI3KbyEyCKyS+1JWS2h8PNDnGA= +github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f h1:BVwpUVJDADN2ufcGik7W992pyps0wZ888b/y9GXcLTU= +github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.2.0 h1:kUZDBDTdBVBYBj5Tmh2NZLlF60mfjA27rM34b+cVwNU= +github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1 h1:/K3IL0Z1quvmJ7X0A1AwNEK7CRkVK3YwfOU/QAL4WGg= +github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a h1:9ZKAASQSHhDYGoxY8uLVpewe1GDZ2vu2Tr/vTdVAkFQ= +github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8 h1:2c1EFnZHIPCW8qKWgHMH/fX2PkSabFc5mrVzfUNdg5U= +github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= +github.com/samuel/go-zookeeper v0.0.0-20180130194729-c4fab1ac1bec h1:6ncX5ko6B9LntYM0YBRXkiSaZMmLYeZ/NWcmeB43mMY= +github.com/samuel/go-zookeeper v0.0.0-20180130194729-c4fab1ac1bec/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= +github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b h1:gQZ0qzfKHQIybLANtM3mBXNUtOfsCFXeTsnBqCsx1KM= +github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/shirou/gopsutil v2.19.11+incompatible h1:lJHR0foqAjI4exXqWsU3DbH7bX1xvdhGdnXTIARA9W4= +github.com/shirou/gopsutil v2.19.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= +github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4 h1:udFKJ0aHUL60LboW/A+DfgoHVedieIzIXE8uylPue0U= +github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q1u/4XEfrquwF8Lw7D7y5cD8CuHnfIc= +github.com/shopspring/decimal v0.0.0-20200105231215-408a2507e114 h1:Pm6R878vxWWWR+Sa3ppsLce/Zq+JNTs6aVvRu13jv9A= +github.com/shopspring/decimal v0.0.0-20200105231215-408a2507e114/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/sirupsen/logrus v1.2.0 h1:juTguoYk5qI21pwyTXY3B3Y5cOTH3ZUyZCg1v/mihuo= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/soniah/gosnmp v1.22.0 h1:jVJi8+OGvR+JHIaZKMmnyNP0akJd2vEgNatybwhZvxg= +github.com/soniah/gosnmp v1.22.0/go.mod h1:DuEpAS0az51+DyVBQwITDsoq4++e3LTNckp2GoasF2I= +github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/streadway/amqp v0.0.0-20180528204448-e5adc2ada8b8 h1:l6epF6yBwuejBfhGkM5m8VSNM/QAm7ApGyH35ehA7eQ= +github.com/streadway/amqp v0.0.0-20180528204448-e5adc2ada8b8/go.mod h1:1WNBiOZtZQLpVAyu0iTduoJL9hEsMloAK5XWrtW0xdY= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/tedsuo/ifrit v0.0.0-20191009134036-9a97d0632f00 h1:mujcChM89zOHwgZBBNr5WZ77mBXP1yR+gLThGCYZgAg= +github.com/tedsuo/ifrit v0.0.0-20191009134036-9a97d0632f00/go.mod h1:eyZnKCc955uh98WQvzOm0dgAeLnf2O0Rz0LPoC5ze+0= +github.com/tidwall/gjson v1.3.0 h1:kfpsw1W3trbg4Xm6doUtqSl9+LhLB6qJ9PkltVAQZYs= +github.com/tidwall/gjson v1.3.0/go.mod h1:P256ACg0Mn+j1RXIDXoss50DeIABTYK1PULOJHhxOls= +github.com/tidwall/match v1.0.1 h1:PnKP62LPNxHKTwvHHZZzdOAOCtsJTjo6dZLCwpKm5xc= +github.com/tidwall/match v1.0.1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0+pL9E= +github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= +github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +github.com/vishvananda/netlink v0.0.0-20171020171820-b2de5d10e38e h1:f1yevOHP+Suqk0rVc13fIkzcLULJbyQcXDba2klljD0= +github.com/vishvananda/netlink v0.0.0-20171020171820-b2de5d10e38e/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= +github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc h1:R83G5ikgLMxrBvLh22JhdfI8K6YXEPHx5P03Uu3DRs4= +github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= +github.com/vjeantet/grok v1.0.0 h1:uxMqatJP6MOFXsj6C1tZBnqqAThQEeqnizUZ48gSJQQ= +github.com/vjeantet/grok v1.0.0/go.mod h1:/FWYEVYekkm+2VjcFmO9PufDU5FgXHUz9oy2EGqmQBo= +github.com/vmware/govmomi v0.19.0 h1:CR6tEByWCPOnRoRyhLzuHaU+6o2ybF3qufNRWS/MGrY= +github.com/vmware/govmomi v0.19.0/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU= +github.com/wavefronthq/wavefront-sdk-go v0.9.2 h1:/LvWgZYNjHFUg+ZUX+qv+7e+M8sEMi0lM15zPp681Gk= +github.com/wavefronthq/wavefront-sdk-go v0.9.2/go.mod h1:hQI6y8M9OtTCtc0xdwh+dCER4osxXdEAeCpacjpDZEU= +github.com/wvanbergen/kafka v0.0.0-20171203153745-e2edea948ddf h1:TOV5PC6fIWwFOFra9xJfRXZcL2pLhMI8oNuDugNxg9Q= +github.com/wvanbergen/kafka v0.0.0-20171203153745-e2edea948ddf/go.mod h1:nxx7XRXbR9ykhnC8lXqQyJS0rfvJGxKyKw/sT1YOttg= +github.com/wvanbergen/kazoo-go v0.0.0-20180202103751-f72d8611297a h1:ILoU84rj4AQ3q6cjQvtb9jBjx4xzR/Riq/zYhmDQiOk= +github.com/wvanbergen/kazoo-go v0.0.0-20180202103751-f72d8611297a/go.mod h1:vQQATAGxVK20DC1rRubTJbZDDhhpA4QfU02pMdPxGO4= +github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= +github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= +github.com/yuin/gopher-lua v0.0.0-20180630135845-46796da1b0b4 h1:f6CCNiTjQZ0uWK4jPwhwYB8QIGGfn0ssD9kVzRUUUpk= +github.com/yuin/gopher-lua v0.0.0-20180630135845-46796da1b0b4/go.mod h1:aEV29XrmTYFr3CiRxZeGHpkvbwq+prZduBqMaascyCU= +go.opencensus.io v0.20.1 h1:pMEjRZ1M4ebWGikflH7nQpV6+Zr88KBMA2XJD3sbijw= +go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190404164418-38d8ce5564a5/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= +golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 h1:ULYEB3JvPRE/IfO+9uO7vKV/xzVTO7XPAwm8xbf4w2g= +golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2 h1:y102fOLFqhV41b+4GPiJoa0k/x+pJcEi2/HB1Y5T6fU= +golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7 h1:rTIdg5QFRR7XCaK4LCjBiPbx8j4DQRpdYMnGn/bJUEU= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191004110552-13f9640d40b9 h1:rjwSpXsdiK0dV8/Naq3kAw9ymfAeJIyd0upUIElB+lI= +golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421 h1:Wo7BWFiOk0QRFMLYMqJGFMd9CgUAcGx7V+qEg/h5IBI= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190204203706-41f3e6584952/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456 h1:ng0gs1AKnRRuEMZoTLLlbOd+C17zUDepwGQBb/n+JVg= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2 h1:z99zHgr7hKfrUcX/KsoJk5FJfjTceCKIp96+biqP4To= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= +gonum.org/v1/gonum v0.6.2 h1:4r+yNT0+8SWcOkXP+63H2zQbN+USnC73cjGUxnDF94Q= +gonum.org/v1/gonum v0.6.2/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU= +gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0 h1:OE9mWmgKkjJyEmDAAtGMPjXu+YNeGvK9VTSHY6+Qihc= +gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= +gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= +google.golang.org/api v0.3.1 h1:oJra/lMfmtm13/rgY/8i3MzjFWYXvQIAKjQ3HqofMk8= +google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107 h1:xtNn7qFlagY2mQNFHMSRPjT2RkOV4OXM7P5TVy9xATo= +google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= +google.golang.org/grpc v1.19.0 h1:cfg4PD8YEdSFnm7qLV4++93WcmhH2nIUhMjhdCvl3j8= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d h1:TxyelI5cVkbREznMhfzycHdkp5cLA7DpE+GKjSslYhM= +gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/fatih/pool.v2 v2.0.0 h1:xIFeWtxifuQJGk/IEPKsTduEKcKvPmhoiVDGpC40nKg= +gopkg.in/fatih/pool.v2 v2.0.0/go.mod h1:8xVGeu1/2jr2wm5V9SPuMht2H5AEmf5aFMGSQixtjTY= +gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/gorethink/gorethink.v3 v3.0.5 h1:e2Uc/Xe+hpcVQFsj6MuHlYog3r0JYpnTzwDj/y2O4MU= +gopkg.in/gorethink/gorethink.v3 v3.0.5/go.mod h1:+3yIIHJUGMBK+wyPH+iN5TP+88ikFDfZdqTlK3Y9q8I= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/jcmturner/aescts.v1 v1.0.1 h1:cVVZBK2b1zY26haWB4vbBiZrfFQnfbTVrE3xZq6hrEw= +gopkg.in/jcmturner/aescts.v1 v1.0.1/go.mod h1:nsR8qBOg+OucoIW+WMhB3GspUQXq9XorLnQb9XtvcOo= +gopkg.in/jcmturner/dnsutils.v1 v1.0.1 h1:cIuC1OLRGZrld+16ZJvvZxVJeKPsvd5eUIvxfoN5hSM= +gopkg.in/jcmturner/dnsutils.v1 v1.0.1/go.mod h1:m3v+5svpVOhtFAP/wSz+yzh4Mc0Fg7eRhxkJMWSIz9Q= +gopkg.in/jcmturner/goidentity.v3 v3.0.0 h1:1duIyWiTaYvVx3YX2CYtpJbUFd7/UuPYCfgXtQ3VTbI= +gopkg.in/jcmturner/goidentity.v3 v3.0.0/go.mod h1:oG2kH0IvSYNIu80dVAyu/yoefjq1mNfM5bm88whjWx4= +gopkg.in/jcmturner/gokrb5.v7 v7.2.3/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM= +gopkg.in/jcmturner/gokrb5.v7 v7.3.0 h1:0709Jtq/6QXEuWRfAm260XqlpcwL1vxtO1tUE2qK8Z4= +gopkg.in/jcmturner/gokrb5.v7 v7.3.0/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM= +gopkg.in/jcmturner/rpc.v1 v1.1.0 h1:QHIUxTX1ISuAv9dD2wJ9HWQVuWDX/Zc0PfeC2tjc4rU= +gopkg.in/jcmturner/rpc.v1 v1.1.0/go.mod h1:YIdkC4XfD6GXbzje11McwsDuOlZQSb9W4vfLvuNnlv8= +gopkg.in/ldap.v3 v3.1.0 h1:DIDWEjI7vQWREh0S8X5/NFPCZ3MCVd55LmXKPW4XLGE= +gopkg.in/ldap.v3 v3.1.0/go.mod h1:dQjCc0R0kfyFjIlWNMH1DORwUASZyDxo2Ry1B51dXaQ= +gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce h1:xcEWjVhvbDy+nHP67nPDDpbYrY+ILlfndk4bRioVHaU= +gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= +gopkg.in/olivere/elastic.v5 v5.0.70 h1:DqFG2Odzs74JCz6SssgJjd6qpGnsOAzNc7+l5EnvsnE= +gopkg.in/olivere/elastic.v5 v5.0.70/go.mod h1:FylZT6jQWtfHsicejzOm3jIMVPOAksa80i3o+6qtQRk= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= +gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +k8s.io/apimachinery v0.17.1 h1:zUjS3szTxoUjTDYNvdFkYt2uMEXLcthcbp+7uZvWhYM= +k8s.io/apimachinery v0.17.1/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg= +k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= +k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= +rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= +sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= diff --git a/plugins/inputs/syslog/syslog.go b/plugins/inputs/syslog/syslog.go index 43d02de5e..92d134092 100644 --- a/plugins/inputs/syslog/syslog.go +++ b/plugins/inputs/syslog/syslog.go @@ -12,10 +12,10 @@ import ( "time" "unicode" - "github.com/influxdata/go-syslog" - "github.com/influxdata/go-syslog/nontransparent" - "github.com/influxdata/go-syslog/octetcounting" - "github.com/influxdata/go-syslog/rfc5424" + "github.com/influxdata/go-syslog/v2" + "github.com/influxdata/go-syslog/v2/nontransparent" + "github.com/influxdata/go-syslog/v2/octetcounting" + "github.com/influxdata/go-syslog/v2/rfc5424" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" framing "github.com/influxdata/telegraf/internal/syslog" diff --git a/plugins/outputs/syslog/syslog.go b/plugins/outputs/syslog/syslog.go index 013db94a1..582e8e920 100644 --- a/plugins/outputs/syslog/syslog.go +++ b/plugins/outputs/syslog/syslog.go @@ -8,8 +8,8 @@ import ( "strconv" "strings" - "github.com/influxdata/go-syslog/nontransparent" - "github.com/influxdata/go-syslog/rfc5424" + "github.com/influxdata/go-syslog/v2/nontransparent" + "github.com/influxdata/go-syslog/v2/rfc5424" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" framing "github.com/influxdata/telegraf/internal/syslog" diff --git a/plugins/outputs/syslog/syslog_mapper.go b/plugins/outputs/syslog/syslog_mapper.go index ba6b0d660..4e4848205 100644 --- a/plugins/outputs/syslog/syslog_mapper.go +++ b/plugins/outputs/syslog/syslog_mapper.go @@ -8,7 +8,7 @@ import ( "strings" "time" - "github.com/influxdata/go-syslog/rfc5424" + "github.com/influxdata/go-syslog/v2/rfc5424" "github.com/influxdata/telegraf" ) diff --git a/scripts/build.py b/scripts/build.py index 6d404ea9d..2c2d0be76 100755 --- a/scripts/build.py +++ b/scripts/build.py @@ -161,8 +161,8 @@ def go_get(branch, update=False, no_uncommitted=False): if local_changes() and no_uncommitted: logging.error("There are uncommitted changes in the current directory.") return False - logging.info("Retrieving dependencies with `dep`...") - run("dep ensure -v -vendor-only") + logging.info("Retrieving dependencies...") + run("go mod download") return True def run_tests(race, parallel, timeout, no_vet): From c6519c7793566dea49f6e5612146b664ab172b38 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 21 Jan 2020 10:10:02 -0800 Subject: [PATCH 1478/1815] Hook up json_strict option with default of true (#6909) --- internal/config/config.go | 17 ++++++++++++++++- internal/config/config_test.go | 1 + plugins/parsers/json/README.md | 2 +- 3 files changed, 18 insertions(+), 2 deletions(-) diff --git a/internal/config/config.go b/internal/config/config.go index 586acce71..6e05ce45b 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -1411,7 +1411,9 @@ func buildParser(name string, tbl *ast.Table) (parsers.Parser, error) { } func getParserConfig(name string, tbl *ast.Table) (*parsers.Config, error) { - c := &parsers.Config{} + c := &parsers.Config{ + JSONStrict: true, + } if node, ok := tbl.Fields["data_format"]; ok { if kv, ok := node.(*ast.KeyValue); ok { @@ -1512,6 +1514,18 @@ func getParserConfig(name string, tbl *ast.Table) (*parsers.Config, error) { } } + if node, ok := tbl.Fields["json_strict"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if b, ok := kv.Value.(*ast.Boolean); ok { + var err error + c.JSONStrict, err = b.Boolean() + if err != nil { + return nil, err + } + } + } + } + if node, ok := tbl.Fields["data_type"]; ok { if kv, ok := node.(*ast.KeyValue); ok { if str, ok := kv.Value.(*ast.String); ok { @@ -1808,6 +1822,7 @@ func getParserConfig(name string, tbl *ast.Table) (*parsers.Config, error) { delete(tbl.Fields, "json_time_format") delete(tbl.Fields, "json_time_key") delete(tbl.Fields, "json_timezone") + delete(tbl.Fields, "json_strict") delete(tbl.Fields, "data_type") delete(tbl.Fields, "collectd_auth_file") delete(tbl.Fields, "collectd_security_level") diff --git a/internal/config/config_test.go b/internal/config/config_test.go index 7559bf9fe..9d42177cd 100644 --- a/internal/config/config_test.go +++ b/internal/config/config_test.go @@ -149,6 +149,7 @@ func TestConfig_LoadDirectory(t *testing.T) { p, err := parsers.NewParser(&parsers.Config{ MetricName: "exec", DataFormat: "json", + JSONStrict: true, }) assert.NoError(t, err) ex.SetParser(p) diff --git a/plugins/parsers/json/README.md b/plugins/parsers/json/README.md index b4975bcd3..b318f32e0 100644 --- a/plugins/parsers/json/README.md +++ b/plugins/parsers/json/README.md @@ -20,7 +20,7 @@ ignored unless specified in the `tag_key` or `json_string_fields` options. ## When strict is true and a JSON array is being parsed, all objects within the ## array must be valid - strict = false + json_strict = true ## Query is a GJSON path that specifies a specific chunk of JSON to be ## parsed, if not specified the whole document will be parsed. From ebebfd95735ff90c31efa8ccd107ac4f3b70bca2 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 21 Jan 2020 10:12:59 -0800 Subject: [PATCH 1479/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 64f233db4..97c32e3e8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -48,6 +48,7 @@ - [#6899](https://github.com/influxdata/telegraf/pull/6899): Fix URL agent address form with udp in snmp input. - [#6619](https://github.com/influxdata/telegraf/issues/6619): Change logic to allow recording of device fields when attributes is false. - [#6903](https://github.com/influxdata/telegraf/issues/6903): Do not add invalid timestamps to kafka messages. +- [#6906](https://github.com/influxdata/telegraf/issues/6906): Fix json_strict option and set default of true. ## v1.13.1 [2020-01-08] From c6f8b273c0a77705c6d54a232c9efae330ee8ff7 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 21 Jan 2020 10:49:01 -0800 Subject: [PATCH 1480/1815] Allow a user defined field to be used as the graylog short_message (#6700) --- plugins/outputs/graylog/README.md | 14 +++++++---- plugins/outputs/graylog/graylog.go | 40 +++++++++++++++++------------- 2 files changed, 32 insertions(+), 22 deletions(-) diff --git a/plugins/outputs/graylog/README.md b/plugins/outputs/graylog/README.md index 39863b541..4945ce46f 100644 --- a/plugins/outputs/graylog/README.md +++ b/plugins/outputs/graylog/README.md @@ -1,14 +1,18 @@ # Graylog Output Plugin -This plugin writes to a Graylog instance using the "gelf" format. +This plugin writes to a Graylog instance using the "[GELF][]" format. -It requires a `servers` name. +[GELF]: https://docs.graylog.org/en/3.1/pages/gelf.html#gelf-payload-specification ### Configuration: ```toml -# Send telegraf metrics to graylog(s) [[outputs.graylog]] - ## UDP endpoint for your graylog instance(s). - servers = ["127.0.0.1:12201", "192.168.1.1:12201"] + ## UDP endpoint for your graylog instances. + servers = ["127.0.0.1:12201"] + + ## The field to use as the GELF short_message, if unset the static string + ## "telegraf" will be used. + ## example: short_message_field = "message" + # short_message_field = "" ``` diff --git a/plugins/outputs/graylog/graylog.go b/plugins/outputs/graylog/graylog.go index 4b2c1693a..34f2ec6d9 100644 --- a/plugins/outputs/graylog/graylog.go +++ b/plugins/outputs/graylog/graylog.go @@ -150,13 +150,19 @@ func (g *Gelf) send(b []byte) (n int, err error) { } type Graylog struct { - Servers []string - writer io.Writer + Servers []string `toml:"servers"` + ShortMessageField string `toml:"short_message_field"` + writer io.Writer } var sampleConfig = ` ## UDP endpoint for your graylog instance. - servers = ["127.0.0.1:12201", "192.168.1.1:12201"] + servers = ["127.0.0.1:12201"] + + ## The field to use as the GELF short_message, if unset the static string + ## "telegraf" will be used. + ## example: short_message_field = "message" + # short_message_field = "" ` func (g *Graylog) Connect() error { @@ -184,16 +190,12 @@ func (g *Graylog) SampleConfig() string { } func (g *Graylog) Description() string { - return "Send telegraf metrics to graylog(s)" + return "Send telegraf metrics to graylog" } func (g *Graylog) Write(metrics []telegraf.Metric) error { - if len(metrics) == 0 { - return nil - } - for _, metric := range metrics { - values, err := serialize(metric) + values, err := g.serialize(metric) if err != nil { return err } @@ -201,14 +203,14 @@ func (g *Graylog) Write(metrics []telegraf.Metric) error { for _, value := range values { _, err := g.writer.Write([]byte(value)) if err != nil { - return fmt.Errorf("FAILED to write message: %s, %s", value, err) + return fmt.Errorf("error writing message: %q, %v", value, err) } } } return nil } -func serialize(metric telegraf.Metric) ([]string, error) { +func (g *Graylog) serialize(metric telegraf.Metric) ([]string, error) { out := []string{} m := make(map[string]interface{}) @@ -217,7 +219,7 @@ func serialize(metric telegraf.Metric) ([]string, error) { m["short_message"] = "telegraf" m["name"] = metric.Name() - if host, ok := metric.Tags()["host"]; ok { + if host, ok := metric.GetTag("host"); ok { m["host"] = host } else { host, err := os.Hostname() @@ -227,14 +229,18 @@ func serialize(metric telegraf.Metric) ([]string, error) { m["host"] = host } - for key, value := range metric.Tags() { - if key != "host" { - m["_"+key] = value + for _, tag := range metric.TagList() { + if tag.Key != "host" { + m["_"+tag.Key] = tag.Value } } - for key, value := range metric.Fields() { - m["_"+key] = value + for _, field := range metric.FieldList() { + if field.Key == g.ShortMessageField { + m["short_message"] = field.Value + } else { + m["_"+field.Key] = field.Value + } } serialized, err := ejson.Marshal(m) From 4c79a68544d9dfab3f2c1e8c10594012454a82f9 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 21 Jan 2020 10:49:51 -0800 Subject: [PATCH 1481/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 97c32e3e8..641dbacfa 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -32,6 +32,7 @@ - [#6844](https://github.com/influxdata/telegraf/pull/6844): Add source and port tags to jenkins_job metrics. - [#6886](https://github.com/influxdata/telegraf/pull/6886): Add date offset and timezone options to date processor. - [#6859](https://github.com/influxdata/telegraf/pull/6859): Exclude resources by inventory path in vsphere input. +- [#6700](https://github.com/influxdata/telegraf/pull/6700): Allow a user defined field to be used as the graylog short_message. #### Bugfixes From 6a982da8b0a5944d05b77b49d5c8412cde2374bd Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 21 Jan 2020 11:00:37 -0800 Subject: [PATCH 1482/1815] Set release date for 1.13.2 --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 641dbacfa..10f889fab 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -39,7 +39,7 @@ - [#6397](https://github.com/influxdata/telegraf/issues/6397): Fix conversion to floats in AzureDBResourceStats query in the sqlserver input. - [#6867](https://github.com/influxdata/telegraf/issues/6867): Fix case sensitive collation in sqlserver input. -## v1.13.2 [unreleased] +## v1.13.2 [2020-01-21] #### Bugfixes From 916f39d285eec41983431e461c6c2a96d3a4786f Mon Sep 17 00:00:00 2001 From: James Beckett <308470+hackery@users.noreply.github.com> Date: Wed, 22 Jan 2020 01:06:58 +0000 Subject: [PATCH 1483/1815] Add server_name override for x509_cert plugin (#6917) --- plugins/inputs/x509_cert/README.md | 2 ++ plugins/inputs/x509_cert/x509_cert.go | 22 +++++++++++++++++----- 2 files changed, 19 insertions(+), 5 deletions(-) diff --git a/plugins/inputs/x509_cert/README.md b/plugins/inputs/x509_cert/README.md index b302d4992..b8dfb8814 100644 --- a/plugins/inputs/x509_cert/README.md +++ b/plugins/inputs/x509_cert/README.md @@ -19,6 +19,8 @@ file or network connection. # tls_ca = "/etc/telegraf/ca.pem" # tls_cert = "/etc/telegraf/cert.pem" # tls_key = "/etc/telegraf/key.pem" + ## Pass a different name into the TLS request (Server Name Indication) + # server_name = "myhost.example.org" ``` diff --git a/plugins/inputs/x509_cert/x509_cert.go b/plugins/inputs/x509_cert/x509_cert.go index 21e64fcbb..4a6702b9c 100644 --- a/plugins/inputs/x509_cert/x509_cert.go +++ b/plugins/inputs/x509_cert/x509_cert.go @@ -30,14 +30,17 @@ const sampleConfig = ` # tls_ca = "/etc/telegraf/ca.pem" # tls_cert = "/etc/telegraf/cert.pem" # tls_key = "/etc/telegraf/key.pem" + ## Pass a different name into the TLS request (Server Name Indication) + # server_name = "myhost.example.org" ` const description = "Reads metrics from a SSL certificate" // X509Cert holds the configuration of the plugin. type X509Cert struct { - Sources []string `toml:"sources"` - Timeout internal.Duration `toml:"timeout"` - tlsCfg *tls.Config + Sources []string `toml:"sources"` + Timeout internal.Duration `toml:"timeout"` + ServerName string `toml:"server_name"` + tlsCfg *tls.Config _tls.ClientConfig } @@ -78,7 +81,12 @@ func (c *X509Cert) getCert(u *url.URL, timeout time.Duration) ([]*x509.Certifica } defer ipConn.Close() - c.tlsCfg.ServerName = u.Hostname() + if c.ServerName == "" { + c.tlsCfg.ServerName = u.Hostname() + } else { + c.tlsCfg.ServerName = c.ServerName + } + c.tlsCfg.InsecureSkipVerify = true conn := tls.Client(ipConn, c.tlsCfg) defer conn.Close() @@ -203,7 +211,11 @@ func (c *X509Cert) Gather(acc telegraf.Accumulator) error { Intermediates: x509.NewCertPool(), } if i == 0 { - opts.DNSName = u.Hostname() + if c.ServerName == "" { + opts.DNSName = u.Hostname() + } else { + opts.DNSName = c.ServerName + } for j, cert := range certs { if j != 0 { opts.Intermediates.AddCert(cert) From 9747fa7f0703356b6d72410bcc5f6199e9526b58 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 21 Jan 2020 17:11:50 -0800 Subject: [PATCH 1484/1815] Update changelog --- CHANGELOG.md | 1 + plugins/inputs/x509_cert/README.md | 6 ++++-- plugins/inputs/x509_cert/x509_cert.go | 6 ++++-- 3 files changed, 9 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 10f889fab..b312f4030 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -33,6 +33,7 @@ - [#6886](https://github.com/influxdata/telegraf/pull/6886): Add date offset and timezone options to date processor. - [#6859](https://github.com/influxdata/telegraf/pull/6859): Exclude resources by inventory path in vsphere input. - [#6700](https://github.com/influxdata/telegraf/pull/6700): Allow a user defined field to be used as the graylog short_message. +- [#6917](https://github.com/influxdata/telegraf/pull/6917): Add server_name override for x509_cert plugin. #### Bugfixes diff --git a/plugins/inputs/x509_cert/README.md b/plugins/inputs/x509_cert/README.md index b8dfb8814..074bbc58c 100644 --- a/plugins/inputs/x509_cert/README.md +++ b/plugins/inputs/x509_cert/README.md @@ -15,12 +15,14 @@ file or network connection. ## Timeout for SSL connection # timeout = "5s" + ## Pass a different name into the TLS request (Server Name Indication) + ## example: server_name = "myhost.example.org" + # server_name = "myhost.example.org" + ## Optional TLS Config # tls_ca = "/etc/telegraf/ca.pem" # tls_cert = "/etc/telegraf/cert.pem" # tls_key = "/etc/telegraf/key.pem" - ## Pass a different name into the TLS request (Server Name Indication) - # server_name = "myhost.example.org" ``` diff --git a/plugins/inputs/x509_cert/x509_cert.go b/plugins/inputs/x509_cert/x509_cert.go index 4a6702b9c..49f5fc88e 100644 --- a/plugins/inputs/x509_cert/x509_cert.go +++ b/plugins/inputs/x509_cert/x509_cert.go @@ -26,12 +26,14 @@ const sampleConfig = ` ## Timeout for SSL connection # timeout = "5s" + ## Pass a different name into the TLS request (Server Name Indication) + ## example: server_name = "myhost.example.org" + # server_name = "" + ## Optional TLS Config # tls_ca = "/etc/telegraf/ca.pem" # tls_cert = "/etc/telegraf/cert.pem" # tls_key = "/etc/telegraf/key.pem" - ## Pass a different name into the TLS request (Server Name Indication) - # server_name = "myhost.example.org" ` const description = "Reads metrics from a SSL certificate" From 9fd400c9ac88ace6e60557c43dc42ff6b9d49700 Mon Sep 17 00:00:00 2001 From: SirishaGopigiri <52744121+SirishaGopigiri@users.noreply.github.com> Date: Thu, 23 Jan 2020 04:15:18 +0530 Subject: [PATCH 1485/1815] Add input plugin for monit (#6850) --- plugins/inputs/all/all.go | 1 + plugins/inputs/monit/README.md | 148 ++++ plugins/inputs/monit/monit.go | 434 +++++++++++ plugins/inputs/monit/monit_test.go | 719 ++++++++++++++++++ .../monit/testdata/response_invalidxml_1.xml | 51 ++ .../monit/testdata/response_invalidxml_2.xml | 52 ++ .../monit/testdata/response_invalidxml_3.xml | 52 ++ .../monit/testdata/response_servicetype_0.xml | 51 ++ .../monit/testdata/response_servicetype_1.xml | 41 + .../monit/testdata/response_servicetype_2.xml | 42 + .../monit/testdata/response_servicetype_3.xml | 52 ++ .../monit/testdata/response_servicetype_4.xml | 45 ++ .../monit/testdata/response_servicetype_5.xml | 57 ++ .../monit/testdata/response_servicetype_6.xml | 41 + .../monit/testdata/response_servicetype_7.xml | 42 + .../monit/testdata/response_servicetype_8.xml | 70 ++ .../response_servicetype_8_failure.xml | 70 ++ ...esponse_servicetype_8_initializingmode.xml | 70 ++ .../response_servicetype_8_passivemode.xml | 70 ++ .../response_servicetype_8_pendingaction.xml | 70 ++ 20 files changed, 2178 insertions(+) create mode 100644 plugins/inputs/monit/README.md create mode 100644 plugins/inputs/monit/monit.go create mode 100644 plugins/inputs/monit/monit_test.go create mode 100644 plugins/inputs/monit/testdata/response_invalidxml_1.xml create mode 100644 plugins/inputs/monit/testdata/response_invalidxml_2.xml create mode 100644 plugins/inputs/monit/testdata/response_invalidxml_3.xml create mode 100644 plugins/inputs/monit/testdata/response_servicetype_0.xml create mode 100644 plugins/inputs/monit/testdata/response_servicetype_1.xml create mode 100644 plugins/inputs/monit/testdata/response_servicetype_2.xml create mode 100644 plugins/inputs/monit/testdata/response_servicetype_3.xml create mode 100644 plugins/inputs/monit/testdata/response_servicetype_4.xml create mode 100644 plugins/inputs/monit/testdata/response_servicetype_5.xml create mode 100644 plugins/inputs/monit/testdata/response_servicetype_6.xml create mode 100644 plugins/inputs/monit/testdata/response_servicetype_7.xml create mode 100644 plugins/inputs/monit/testdata/response_servicetype_8.xml create mode 100644 plugins/inputs/monit/testdata/response_servicetype_8_failure.xml create mode 100644 plugins/inputs/monit/testdata/response_servicetype_8_initializingmode.xml create mode 100644 plugins/inputs/monit/testdata/response_servicetype_8_passivemode.xml create mode 100644 plugins/inputs/monit/testdata/response_servicetype_8_pendingaction.xml diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index 5860ac6c6..6a42b9451 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -91,6 +91,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/mesos" _ "github.com/influxdata/telegraf/plugins/inputs/minecraft" _ "github.com/influxdata/telegraf/plugins/inputs/mongodb" + _ "github.com/influxdata/telegraf/plugins/inputs/monit" _ "github.com/influxdata/telegraf/plugins/inputs/mqtt_consumer" _ "github.com/influxdata/telegraf/plugins/inputs/multifile" _ "github.com/influxdata/telegraf/plugins/inputs/mysql" diff --git a/plugins/inputs/monit/README.md b/plugins/inputs/monit/README.md new file mode 100644 index 000000000..613e1eac3 --- /dev/null +++ b/plugins/inputs/monit/README.md @@ -0,0 +1,148 @@ +# Monit Plugin + +The monit plugin gathers metrics and status information about local processes, +remote hosts, file, file systems, directories and network interfaces managed and watched over by Monit. + +To install Monit agent on the host please refer to the link https://mmonit.com/wiki/Monit/Installation + +Minimum Version of Monit tested with is 5.16 + +### Configuration: + +```toml +# Read metrics and status information about processes managed by Monit + [[inputs.monit]] + #SampleConfig + address = "http://127.0.0.1:2812" + basic_auth_username = "test" + basic_auth_password = "test" +``` + +### Tags: +All measurements have the following tags: +- address +- version +- service +- paltform_name +- status +- monitoring_status +- monitoring_mode + +### Measurements & Fields: + + + +### Fields: +Fields for all Monit service types: +- status_code +- monitoring_status_code +- monitoring_mode_code + +### Measurement & Fields: +Fields for Monit service type Filesystem: +- Measurement: + - monit_filesystem +- Fields: + - mode + - block_percent + - block_usage + - block_total + - inode_percent + - inode_usage + - inode_total + +Fields for Monit service type directory: +- Measurement: + - monit_directory +- Fields: + - permissions + +Fields for Monit service type file: +- Measurement: + - monit_file +- Fields: + - size + - permissions + +Fields for Monit service type process: +- Measurement: + - monit_process +- Fields: + - cpu_percent + - cpu_percent_total + - mem_kb + - mem_kb_total + - mem_percent + - mem_percent_total + - pid + - parent_pid + - threads + - children + +Fields for Monit service type remote host: +- Measurement: + - monit_remote_host +- Fields: + - hostname + - port_number + - request + - protocol + - type + +Fields for Monit service type system: +- Measurement: + - monit_system +- Fields: + - cpu_system + - cpu_user + - cpu_wait + - cpu_load_avg_1m + - cpu_load_avg_5m + - cpu_load_avg_15m + - mem_kb + - mem_percent + - swap_kb + - swap_percent + +Fields for Monit service type fifo: +- Measurement: + - monit_fifo +- Fields: + - permissions + +Fields for Monit service type program: +- Measurement: + - monit_program +- Fields: + - last_started_time + - program_status + +Fields for Monit service type network: +- Measurement: + - monit_network +- Fields: + - link_state + - link_mode + - link_speed + - download_packets_now + - download_packets_total + - download_bytes_now + - download_bytes_total + - download_errors_now + - download_errors_total + - upload_packets_now + - upload_packets_total + - upload_bytes_now + - upload_bytes_total + - upload_errors_now + - upload_errors_total + +### Example Output: +``` +$ ./telegraf -config telegraf.conf -input-filter monit -test +monit_system,address=http://localhost:2812,host=verizon-onap,hostname=verizon-onap,monitoring_mode=Monitoring\ mode:\ \ active,monitoring_status=Monitoring\ status:\ \ Monitored,platform_name=Linux,service=verizon-onap,status=Running,version=5.16 status_code=0i,cpu_system=1.9,cpu_user=4.7,cpu_wait=1.5,cpu_load_avg_1m=1.24,cpu_load_avg_5m=1.68,mem_percent=67.1,monitoring_status_code=1i,monitoring_mode_code=0i,cpu_load_avg_15m=1.64,mem_kb=10961012i,swap_kb=2322688,swap_percent=13.9 1578636430000000000 +monit_remote_host,address=http://localhost:2812,host=verizon-onap,hostname=verizon-onap,monitoring_mode=Monitoring\ mode:\ \ passive,monitoring_status=Monitoring\ status:\ \ Monitored,platform_name=Linux,service=testing,status=Failure,version=5.16 status_code=32i,monitoring_status_code=1i,monitoring_mode_code=1i,remote_hostname="192.168.10.49",port_number=2220i,request="",protocol="DEFAULT",type="TCP" 1578636430000000000 +monit_fifo,address=http://localhost:2812,host=verizon-onap,hostname=verizon-onap,monitoring_mode=Monitoring\ mode:\ \ active,monitoring_status=Monitoring\ status:\ \ Monitored,platform_name=Linux,service=test2,status=Running,version=5.16 status_code=0i,monitoring_status_code=1i,monitoring_mode_code=0i,permissions=664i 1578636430000000000 +monit_network,address=http://localhost:2812,host=verizon-onap,hostname=verizon-onap,monitoring_mode=Monitoring\ mode:\ \ active,monitoring_status=Monitoring\ status:\ \ Monitored,platform_name=Linux,service=test1,status=Failure,version=5.16 monitoring_status_code=1i,monitoring_mode_code=0i,download_packets_total=0i,upload_bytes_now=0i,download_errors_total=0i,status_code=8388608i,link_speed=-1i,link_mode="Unknown Mode",download_bytes_now=0i,download_bytes_total=0i,download_errors_now=0i,upload_packets_total=0i,upload_bytes_total=0i,upload_errors_now=0i,upload_errors_total=0i,link_state=0i,download_packets_now=0i,upload_packets_now=0i 1578636430000000000 +monit_directory,address=http://localhost:2812,host=verizon-onap,hostname=verizon-onap,monitoring_mode=Monitoring\ mode:\ \ passive,monitoring_status=Monitoring\ status:\ \ Monitored,platform_name=Linux,service=test,status=Running,version=5.16 status_code=0i,monitoring_status_code=1i,monitoring_mode_code=1i,permissions=755i 1578636430000000000 +``` diff --git a/plugins/inputs/monit/monit.go b/plugins/inputs/monit/monit.go new file mode 100644 index 000000000..b7477d784 --- /dev/null +++ b/plugins/inputs/monit/monit.go @@ -0,0 +1,434 @@ +package monit + +import ( + "encoding/xml" + "fmt" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/internal/tls" + "github.com/influxdata/telegraf/plugins/inputs" + "golang.org/x/net/html/charset" + "net/http" +) + +const ( + fileSystem string = "0" + directory = "1" + file = "2" + process = "3" + remoteHost = "4" + system = "5" + fifo = "6" + program = "7" + network = "8" +) + +var pendingActions = []string{"ignore", "alert", "restart", "stop", "exec", "unmonitor", "start", "monitor"} + +type Status struct { + Server Server `xml:"server"` + Platform Platform `xml:"platform"` + Services []Service `xml:"service"` +} + +type Server struct { + ID string `xml:"id"` + Version string `xml:"version"` + Uptime int64 `xml:"uptime"` + Poll int `xml:"poll"` + LocalHostname string `xml:"localhostname"` + StartDelay int `xml:"startdelay"` + ControlFile string `xml:"controlfile"` +} + +type Platform struct { + Name string `xml:"name"` + Release string `xml:"release"` + Version string `xml:"version"` + Machine string `xml:"machine"` + CPU int `xml:"cpu"` + Memory int `xml:"memory"` + Swap int `xml:"swap"` +} + +type Service struct { + Type string `xml:"type,attr"` + Name string `xml:"name"` + Status int `xml:"status"` + MonitoringStatus int `xml:"monitor"` + MonitorMode int `xml:"monitormode"` + PendingAction int `xml:"pendingaction"` + Memory Memory `xml:"memory"` + CPU CPU `xml:"cpu"` + System System `xml:"system"` + Size int64 `xml:"size"` + Mode int `xml:"mode"` + Program Program `xml:"program"` + Block Block `xml:"block"` + Inode Inode `xml:"inode"` + Pid int64 `xml:"pid"` + ParentPid int64 `xml:"ppid"` + Threads int `xml:"threads"` + Children int `xml:"children"` + Port Port `xml:"port"` + Link Link `xml:"link"` +} + +type Link struct { + State int `xml:"state"` + Speed int64 `xml:"speed"` + Duplex int `xml:"duplex"` + Download Download `xml:"download"` + Upload Upload `xml:"upload"` +} + +type Download struct { + Packets struct { + Now int64 `xml:"now"` + Total int64 `xml:"total"` + } `xml:"packets"` + Bytes struct { + Now int64 `xml:"now"` + Total int64 `xml:"total"` + } `xml:"bytes"` + Errors struct { + Now int64 `xml:"now"` + Total int64 `xml:"total"` + } `xml:"errors"` +} + +type Upload struct { + Packets struct { + Now int64 `xml:"now"` + Total int64 `xml:"total"` + } `xml:"packets"` + Bytes struct { + Now int64 `xml:"now"` + Total int64 `xml:"total"` + } `xml:"bytes"` + Errors struct { + Now int64 `xml:"now"` + Total int64 `xml:"total"` + } `xml:"errors"` +} + +type Port struct { + Hostname string `xml:"hostname"` + PortNumber int64 `xml:"portnumber"` + Request string `xml:"request"` + Protocol string `xml:"protocol"` + Type string `xml:"type"` +} + +type Block struct { + Percent float64 `xml:"percent"` + Usage float64 `xml:"usage"` + Total float64 `xml:"total"` +} + +type Inode struct { + Percent float64 `xml:"percent"` + Usage float64 `xml:"usage"` + Total float64 `xml:"total"` +} + +type Program struct { + Started int64 `xml:"started"` + Status int `xml:"status"` +} + +type Memory struct { + Percent float64 `xml:"percent"` + PercentTotal float64 `xml:"percenttotal"` + Kilobyte int64 `xml:"kilobyte"` + KilobyteTotal int64 `xml:"kilobytetotal"` +} + +type CPU struct { + Percent float64 `xml:"percent"` + PercentTotal float64 `xml:"percenttotal"` +} + +type System struct { + Load struct { + Avg01 float64 `xml:"avg01"` + Avg05 float64 `xml:"avg05"` + Avg15 float64 `xml:"avg15"` + } `xml:"load"` + CPU struct { + User float64 `xml:"user"` + System float64 `xml:"system"` + Wait float64 `xml:"wait"` + } `xml:"cpu"` + Memory struct { + Percent float64 `xml:"percent"` + Kilobyte int64 `xml:"kilobyte"` + } `xml:"memory"` + Swap struct { + Percent float64 `xml:"percent"` + Kilobyte float64 `xml:"kilobyte"` + } `xml:"swap"` +} + +type Monit struct { + Address string `toml:"address"` + Username string `toml:"username"` + Password string `toml:"password"` + client HTTPClient + tls.ClientConfig + Timeout internal.Duration `toml:"timeout"` +} + +type HTTPClient interface { + MakeRequest(req *http.Request) (*http.Response, error) + + SetHTTPClient(client *http.Client) + HTTPClient() *http.Client +} + +type Messagebody struct { + Metrics []string `json:"metrics"` +} + +type RealHTTPClient struct { + client *http.Client +} + +func (c *RealHTTPClient) MakeRequest(req *http.Request) (*http.Response, error) { + return c.client.Do(req) +} + +func (c *RealHTTPClient) SetHTTPClient(client *http.Client) { + c.client = client +} + +func (c *RealHTTPClient) HTTPClient() *http.Client { + return c.client +} + +func (m *Monit) Description() string { + return "Read metrics and status information about processes managed by Monit" +} + +var sampleConfig = ` + ## Monit + address = "http://127.0.0.1:2812" + + ## Username and Password for Monit + username = "" + password = "" + + ## Amount of time allowed to complete the HTTP request + # timeout = "5s" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false +` + +func (m *Monit) SampleConfig() string { + return sampleConfig +} + +func (m *Monit) Init() error { + tlsCfg, err := m.ClientConfig.TLSConfig() + if err != nil { + return err + } + + client := &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: tlsCfg, + Proxy: http.ProxyFromEnvironment, + }, + Timeout: m.Timeout.Duration, + } + m.client.SetHTTPClient(client) + return nil +} + +func (m *Monit) Gather(acc telegraf.Accumulator) error { + + req, err := http.NewRequest("GET", fmt.Sprintf("%s/_status?format=xml", m.Address), nil) + if err != nil { + return err + } + if len(m.Username) > 0 || len(m.Password) > 0 { + req.SetBasicAuth(m.Username, m.Password) + } + + resp, err := m.client.MakeRequest(req) + if err != nil { + return err + } + defer resp.Body.Close() + + if resp.StatusCode == 200 { + + var status Status + decoder := xml.NewDecoder(resp.Body) + decoder.CharsetReader = charset.NewReaderLabel + if err := decoder.Decode(&status); err != nil { + return fmt.Errorf("error parsing input: %v", err) + } + + tags := map[string]string{ + "version": status.Server.Version, + "source": status.Server.LocalHostname, + "platform_name": status.Platform.Name, + } + + for _, service := range status.Services { + fields := make(map[string]interface{}) + tags["status"] = serviceStatus(service) + fields["status_code"] = service.Status + tags["pending_action"] = pendingAction(service) + fields["pending_action_code"] = service.PendingAction + tags["monitoring_status"] = monitoringStatus(service) + fields["monitoring_status_code"] = service.MonitoringStatus + tags["monitoring_mode"] = monitoringMode(service) + fields["monitoring_mode_code"] = service.MonitorMode + tags["service"] = service.Name + if service.Type == fileSystem { + fields["mode"] = service.Mode + fields["block_percent"] = service.Block.Percent + fields["block_usage"] = service.Block.Usage + fields["block_total"] = service.Block.Total + fields["inode_percent"] = service.Inode.Percent + fields["inode_usage"] = service.Inode.Usage + fields["inode_total"] = service.Inode.Total + acc.AddFields("monit_filesystem", fields, tags) + } else if service.Type == directory { + fields["mode"] = service.Mode + acc.AddFields("monit_directory", fields, tags) + } else if service.Type == file { + fields["size"] = service.Size + fields["mode"] = service.Mode + acc.AddFields("monit_file", fields, tags) + } else if service.Type == process { + fields["cpu_percent"] = service.CPU.Percent + fields["cpu_percent_total"] = service.CPU.PercentTotal + fields["mem_kb"] = service.Memory.Kilobyte + fields["mem_kb_total"] = service.Memory.KilobyteTotal + fields["mem_percent"] = service.Memory.Percent + fields["mem_percent_total"] = service.Memory.PercentTotal + fields["pid"] = service.Pid + fields["parent_pid"] = service.ParentPid + fields["threads"] = service.Threads + fields["children"] = service.Children + acc.AddFields("monit_process", fields, tags) + } else if service.Type == remoteHost { + fields["remote_hostname"] = service.Port.Hostname + fields["port_number"] = service.Port.PortNumber + fields["request"] = service.Port.Request + fields["protocol"] = service.Port.Protocol + fields["type"] = service.Port.Type + acc.AddFields("monit_remote_host", fields, tags) + } else if service.Type == system { + fields["cpu_system"] = service.System.CPU.System + fields["cpu_user"] = service.System.CPU.User + fields["cpu_wait"] = service.System.CPU.Wait + fields["cpu_load_avg_1m"] = service.System.Load.Avg01 + fields["cpu_load_avg_5m"] = service.System.Load.Avg05 + fields["cpu_load_avg_15m"] = service.System.Load.Avg15 + fields["mem_kb"] = service.System.Memory.Kilobyte + fields["mem_percent"] = service.System.Memory.Percent + fields["swap_kb"] = service.System.Swap.Kilobyte + fields["swap_percent"] = service.System.Swap.Percent + acc.AddFields("monit_system", fields, tags) + } else if service.Type == fifo { + fields["mode"] = service.Mode + acc.AddFields("monit_fifo", fields, tags) + } else if service.Type == program { + fields["program_started"] = service.Program.Started * 10000000 + fields["program_status"] = service.Program.Status + acc.AddFields("monit_program", fields, tags) + } else if service.Type == network { + fields["link_state"] = service.Link.State + fields["link_speed"] = service.Link.Speed + fields["link_mode"] = linkMode(service) + fields["download_packets_now"] = service.Link.Download.Packets.Now + fields["download_packets_total"] = service.Link.Download.Packets.Total + fields["download_bytes_now"] = service.Link.Download.Bytes.Now + fields["download_bytes_total"] = service.Link.Download.Bytes.Total + fields["download_errors_now"] = service.Link.Download.Errors.Now + fields["download_errors_total"] = service.Link.Download.Errors.Total + fields["upload_packets_now"] = service.Link.Upload.Packets.Now + fields["upload_packets_total"] = service.Link.Upload.Packets.Total + fields["upload_bytes_now"] = service.Link.Upload.Bytes.Now + fields["upload_bytes_total"] = service.Link.Upload.Bytes.Total + fields["upload_errors_now"] = service.Link.Upload.Errors.Now + fields["upload_errors_total"] = service.Link.Upload.Errors.Total + acc.AddFields("monit_network", fields, tags) + } + } + } else { + return fmt.Errorf("received status code %d (%s), expected 200", + resp.StatusCode, + http.StatusText(resp.StatusCode)) + + } + return nil +} + +func linkMode(s Service) string { + if s.Link.Duplex == 1 { + return "duplex" + } else if s.Link.Duplex == 0 { + return "simplex" + } else { + return "unknown" + } +} + +func serviceStatus(s Service) string { + if s.Status == 0 { + return "running" + } else { + return "failure" + } +} + +func pendingAction(s Service) string { + if s.PendingAction > 0 { + if s.PendingAction >= len(pendingActions) { + return "unknown" + } + return pendingActions[s.PendingAction-1] + } else { + return "none" + } +} + +func monitoringMode(s Service) string { + switch s.MonitorMode { + case 0: + return "active" + case 1: + return "passive" + } + return "unknown" +} + +func monitoringStatus(s Service) string { + switch s.MonitoringStatus { + case 1: + return "monitored" + case 2: + return "initializing" + case 4: + return "waiting" + } + return "not_monitored" +} + +func init() { + inputs.Add("monit", func() telegraf.Input { + return &Monit{ + client: &RealHTTPClient{}, + } + }) +} diff --git a/plugins/inputs/monit/monit_test.go b/plugins/inputs/monit/monit_test.go new file mode 100644 index 000000000..b0d0698b4 --- /dev/null +++ b/plugins/inputs/monit/monit_test.go @@ -0,0 +1,719 @@ +package monit + +import ( + "errors" + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +type MockHTTPClient struct { + networkError string +} + +func (c *MockHTTPClient) MakeRequest(req *http.Request) (*http.Response, error) { + return nil, errors.New(c.networkError) +} + +func (c *MockHTTPClient) SetHTTPClient(client *http.Client) { +} + +func (c *MockHTTPClient) HTTPClient() *http.Client { + return nil +} + +func TestServiceType(t *testing.T) { + tests := []struct { + name string + filename string + expected []telegraf.Metric + }{ + { + name: "check filesystem service type", + filename: "testdata/response_servicetype_0.xml", + expected: []telegraf.Metric{ + testutil.MustMetric( + "monit_filesystem", + map[string]string{ + "version": "5.17.1", + "source": "localhost", + "platform_name": "Linux", + "service": "test", + "status": "running", + "monitoring_status": "monitored", + "monitoring_mode": "active", + "pending_action": "none", + }, + map[string]interface{}{ + "status_code": 0, + "monitoring_status_code": 1, + "monitoring_mode_code": 0, + "pending_action_code": 0, + "mode": 555, + "block_percent": 29.5, + "block_usage": 4424.0, + "block_total": 14990.0, + "inode_percent": 0.8, + "inode_usage": 59674.0, + "inode_total": 7680000.0, + }, + time.Unix(0, 0), + ), + }, + }, + { + name: "check directory service type", + filename: "testdata/response_servicetype_1.xml", + expected: []telegraf.Metric{ + testutil.MustMetric( + "monit_directory", + map[string]string{ + "version": "5.17.1", + "source": "localhost", + "platform_name": "Linux", + "service": "test", + "status": "running", + "monitoring_status": "monitored", + "monitoring_mode": "active", + "pending_action": "none", + }, + map[string]interface{}{ + "status_code": 0, + "monitoring_status_code": 1, + "monitoring_mode_code": 0, + "pending_action_code": 0, + "mode": 755, + }, + time.Unix(0, 0), + ), + }, + }, + { + name: "check file service type", + filename: "testdata/response_servicetype_2.xml", + expected: []telegraf.Metric{ + testutil.MustMetric( + "monit_file", + map[string]string{ + "version": "5.17.1", + "source": "localhost", + "platform_name": "Linux", + "service": "test", + "status": "running", + "monitoring_status": "monitored", + "monitoring_mode": "active", + "pending_action": "none", + }, + map[string]interface{}{ + "status_code": 0, + "monitoring_status_code": 1, + "monitoring_mode_code": 0, + "pending_action_code": 0, + "mode": 644, + "size": 1565, + }, + time.Unix(0, 0), + ), + }, + }, + { + name: "check process service type", + filename: "testdata/response_servicetype_3.xml", + expected: []telegraf.Metric{ + testutil.MustMetric( + "monit_process", + map[string]string{ + "version": "5.17.1", + "source": "localhost", + "platform_name": "Linux", + "service": "test", + "status": "running", + "monitoring_status": "monitored", + "monitoring_mode": "active", + "pending_action": "none", + }, + map[string]interface{}{ + "status_code": 0, + "monitoring_status_code": 1, + "monitoring_mode_code": 0, + "pending_action_code": 0, + "cpu_percent": 0.0, + "cpu_percent_total": 0.0, + "mem_kb": 22892, + "mem_kb_total": 22892, + "mem_percent": 0.1, + "mem_percent_total": 0.1, + "pid": 5959, + "parent_pid": 1, + "threads": 31, + "children": 0, + }, + time.Unix(0, 0), + ), + }, + }, + { + name: "check remote host service type", + filename: "testdata/response_servicetype_4.xml", + expected: []telegraf.Metric{ + testutil.MustMetric( + "monit_remote_host", + map[string]string{ + "version": "5.17.1", + "source": "localhost", + "platform_name": "Linux", + "service": "test", + "status": "running", + "monitoring_status": "monitored", + "monitoring_mode": "active", + "pending_action": "none", + }, + map[string]interface{}{ + "status_code": 0, + "monitoring_status_code": 1, + "monitoring_mode_code": 0, + "pending_action_code": 0, + "remote_hostname": "192.168.1.10", + "port_number": 2812, + "request": "", + "protocol": "DEFAULT", + "type": "TCP", + }, + time.Unix(0, 0), + ), + }, + }, + { + name: "check system service type", + filename: "testdata/response_servicetype_5.xml", + expected: []telegraf.Metric{ + testutil.MustMetric( + "monit_system", + map[string]string{ + "version": "5.17.1", + "source": "localhost", + "platform_name": "Linux", + "service": "test", + "status": "running", + "monitoring_status": "monitored", + "monitoring_mode": "active", + "pending_action": "none", + }, + map[string]interface{}{ + "status_code": 0, + "monitoring_status_code": 1, + "monitoring_mode_code": 0, + "pending_action_code": 0, + "cpu_system": 0.1, + "cpu_user": 0.0, + "cpu_wait": 0.0, + "cpu_load_avg_1m": 0.00, + "cpu_load_avg_5m": 0.00, + "cpu_load_avg_15m": 0.00, + "mem_kb": 259668, + "mem_percent": 1.5, + "swap_kb": 0.0, + "swap_percent": 0.0, + }, + time.Unix(0, 0), + ), + }, + }, + { + name: "check fifo service type", + filename: "testdata/response_servicetype_6.xml", + expected: []telegraf.Metric{ + testutil.MustMetric( + "monit_fifo", + map[string]string{ + "version": "5.17.1", + "source": "localhost", + "platform_name": "Linux", + "service": "test", + "status": "running", + "monitoring_status": "monitored", + "monitoring_mode": "active", + "pending_action": "none", + }, + map[string]interface{}{ + "status_code": 0, + "monitoring_status_code": 1, + "monitoring_mode_code": 0, + "pending_action_code": 0, + "mode": 664, + }, + time.Unix(0, 0), + ), + }, + }, + { + name: "check program service type", + filename: "testdata/response_servicetype_7.xml", + expected: []telegraf.Metric{ + testutil.MustMetric( + "monit_program", + map[string]string{ + "version": "5.17.1", + "source": "localhost", + "platform_name": "Linux", + "service": "test", + "status": "running", + "monitoring_status": "monitored", + "monitoring_mode": "active", + "pending_action": "none", + }, + map[string]interface{}{ + "status_code": 0, + "monitoring_status_code": 1, + "monitoring_mode_code": 0, + "pending_action_code": 0, + "program_status": 0, + "program_started": int64(15728504980000000), + }, + time.Unix(0, 0), + ), + }, + }, + { + name: "check network service type", + filename: "testdata/response_servicetype_8.xml", + expected: []telegraf.Metric{ + testutil.MustMetric( + "monit_network", + map[string]string{ + "version": "5.17.1", + "source": "localhost", + "platform_name": "Linux", + "service": "test", + "status": "running", + "monitoring_status": "monitored", + "monitoring_mode": "active", + "pending_action": "none", + }, + map[string]interface{}{ + "status_code": 0, + "monitoring_status_code": 1, + "monitoring_mode_code": 0, + "pending_action_code": 0, + "link_speed": 1000000000, + "link_mode": "duplex", + "link_state": 1, + "download_packets_now": 0, + "download_packets_total": 15243, + "download_bytes_now": 0, + "download_bytes_total": 5506778, + "download_errors_now": 0, + "download_errors_total": 0, + "upload_packets_now": 0, + "upload_packets_total": 8822, + "upload_bytes_now": 0, + "upload_bytes_total": 1287240, + "upload_errors_now": 0, + "upload_errors_total": 0, + }, + time.Unix(0, 0), + ), + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/_status": + http.ServeFile(w, r, tt.filename) + default: + w.WriteHeader(http.StatusNotFound) + } + })) + defer ts.Close() + + plugin := &Monit{ + Address: ts.URL, + client: &RealHTTPClient{}, + } + + plugin.Init() + + var acc testutil.Accumulator + err := plugin.Gather(&acc) + require.NoError(t, err) + + testutil.RequireMetricsEqual(t, tt.expected, acc.GetTelegrafMetrics(), + testutil.IgnoreTime()) + }) + } +} + +func TestMonitFailure(t *testing.T) { + tests := []struct { + name string + filename string + expected []telegraf.Metric + }{ + { + name: "check monit failure status", + filename: "testdata/response_servicetype_8_failure.xml", + expected: []telegraf.Metric{ + testutil.MustMetric( + "monit_network", + map[string]string{ + "version": "5.17.1", + "source": "localhost", + "platform_name": "Linux", + "service": "test", + "status": "failure", + "monitoring_status": "monitored", + "monitoring_mode": "active", + "pending_action": "none", + }, + map[string]interface{}{ + "status_code": 8388608, + "monitoring_status_code": 1, + "monitoring_mode_code": 0, + "pending_action_code": 0, + "link_speed": -1, + "link_mode": "unknown", + "link_state": 0, + "download_packets_now": 0, + "download_packets_total": 0, + "download_bytes_now": 0, + "download_bytes_total": 0, + "download_errors_now": 0, + "download_errors_total": 0, + "upload_packets_now": 0, + "upload_packets_total": 0, + "upload_bytes_now": 0, + "upload_bytes_total": 0, + "upload_errors_now": 0, + "upload_errors_total": 0, + }, + time.Unix(0, 0), + ), + }, + }, + { + name: "check passive mode", + filename: "testdata/response_servicetype_8_passivemode.xml", + expected: []telegraf.Metric{ + testutil.MustMetric( + "monit_network", + map[string]string{ + "version": "5.17.1", + "source": "localhost", + "platform_name": "Linux", + "service": "test", + "status": "running", + "monitoring_status": "monitored", + "monitoring_mode": "passive", + "pending_action": "none", + }, + map[string]interface{}{ + "status_code": 0, + "monitoring_status_code": 1, + "monitoring_mode_code": 1, + "pending_action_code": 0, + "link_speed": 1000000000, + "link_mode": "duplex", + "link_state": 1, + "download_packets_now": 0, + "download_packets_total": 15243, + "download_bytes_now": 0, + "download_bytes_total": 5506778, + "download_errors_now": 0, + "download_errors_total": 0, + "upload_packets_now": 0, + "upload_packets_total": 8822, + "upload_bytes_now": 0, + "upload_bytes_total": 1287240, + "upload_errors_now": 0, + "upload_errors_total": 0, + }, + time.Unix(0, 0), + ), + }, + }, + { + name: "check initializing status", + filename: "testdata/response_servicetype_8_initializingmode.xml", + expected: []telegraf.Metric{ + testutil.MustMetric( + "monit_network", + map[string]string{ + "version": "5.17.1", + "source": "localhost", + "platform_name": "Linux", + "service": "test", + "status": "running", + "monitoring_status": "initializing", + "monitoring_mode": "active", + "pending_action": "none", + }, + map[string]interface{}{ + "status_code": 0, + "monitoring_status_code": 2, + "monitoring_mode_code": 0, + "pending_action_code": 0, + "link_speed": 1000000000, + "link_mode": "duplex", + "link_state": 1, + "download_packets_now": 0, + "download_packets_total": 15243, + "download_bytes_now": 0, + "download_bytes_total": 5506778, + "download_errors_now": 0, + "download_errors_total": 0, + "upload_packets_now": 0, + "upload_packets_total": 8822, + "upload_bytes_now": 0, + "upload_bytes_total": 1287240, + "upload_errors_now": 0, + "upload_errors_total": 0, + }, + time.Unix(0, 0), + ), + }, + }, + { + name: "check pending action", + filename: "testdata/response_servicetype_8_pendingaction.xml", + expected: []telegraf.Metric{ + testutil.MustMetric( + "monit_network", + map[string]string{ + "version": "5.17.1", + "source": "localhost", + "platform_name": "Linux", + "service": "test", + "status": "running", + "monitoring_status": "monitored", + "monitoring_mode": "active", + "pending_action": "exec", + }, + map[string]interface{}{ + "status_code": 0, + "monitoring_status_code": 1, + "monitoring_mode_code": 0, + "pending_action_code": 5, + "link_speed": 1000000000, + "link_mode": "duplex", + "link_state": 1, + "download_packets_now": 0, + "download_packets_total": 15243, + "download_bytes_now": 0, + "download_bytes_total": 5506778, + "download_errors_now": 0, + "download_errors_total": 0, + "upload_packets_now": 0, + "upload_packets_total": 8822, + "upload_bytes_now": 0, + "upload_bytes_total": 1287240, + "upload_errors_now": 0, + "upload_errors_total": 0, + }, + time.Unix(0, 0), + ), + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/_status": + http.ServeFile(w, r, tt.filename) + default: + w.WriteHeader(http.StatusNotFound) + } + })) + defer ts.Close() + + plugin := &Monit{ + Address: ts.URL, + client: &RealHTTPClient{}, + } + + plugin.Init() + + var acc testutil.Accumulator + err := plugin.Gather(&acc) + require.NoError(t, err) + + testutil.RequireMetricsEqual(t, tt.expected, acc.GetTelegrafMetrics(), + testutil.IgnoreTime()) + }) + } +} + +func checkAuth(r *http.Request, username, password string) bool { + user, pass, ok := r.BasicAuth() + if !ok { + return false + } + return user == username && pass == password +} + +func TestAllowHosts(t *testing.T) { + + networkError := "Get http://127.0.0.1:2812/_status?format=xml: " + + "read tcp 192.168.10.2:55610->127.0.0.1:2812: " + + "read: connection reset by peer" + r := &Monit{ + Address: "http://127.0.0.1:2812", + Username: "test", + Password: "test", + client: &MockHTTPClient{networkError}, + } + + var acc testutil.Accumulator + + r.Init() + + err := r.Gather(&acc) + + if assert.Error(t, err) { + assert.Contains(t, err.Error(), "read: connection reset by peer") + } +} + +func TestConnection(t *testing.T) { + + r := &Monit{ + Address: "http://127.0.0.1:2812", + Username: "test", + Password: "test", + client: &RealHTTPClient{}, + } + + var acc testutil.Accumulator + + r.Init() + + err := r.Gather(&acc) + + if assert.Error(t, err) { + assert.Contains(t, err.Error(), "connect: connection refused") + } +} + +func TestInvalidUsernameorPassword(t *testing.T) { + + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + + if !checkAuth(r, "testing", "testing") { + http.Error(w, "Unauthorized.", 401) + return + } + + switch r.URL.Path { + case "/_status": + http.ServeFile(w, r, "testdata/response_servicetype_0.xml") + default: + panic("Cannot handle request") + } + })) + + defer ts.Close() + + r := &Monit{ + Address: ts.URL, + Username: "test", + Password: "test", + client: &RealHTTPClient{}, + } + + var acc testutil.Accumulator + + r.Init() + + err := r.Gather(&acc) + + assert.EqualError(t, err, "received status code 401 (Unauthorized), expected 200") +} + +func TestNoUsernameorPasswordConfiguration(t *testing.T) { + + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + + if !checkAuth(r, "testing", "testing") { + http.Error(w, "Unauthorized.", 401) + return + } + + switch r.URL.Path { + case "/_status": + http.ServeFile(w, r, "testdata/response_servicetype_0.xml") + default: + panic("Cannot handle request") + } + })) + + defer ts.Close() + + r := &Monit{ + Address: ts.URL, + client: &RealHTTPClient{}, + } + + var acc testutil.Accumulator + + r.Init() + + err := r.Gather(&acc) + + assert.EqualError(t, err, "received status code 401 (Unauthorized), expected 200") +} + +func TestInvalidXMLAndInvalidTypes(t *testing.T) { + + tests := []struct { + name string + filename string + }{ + { + name: "check filesystem service type", + filename: "testdata/response_invalidxml_1.xml", + }, + { + name: "check filesystem service type", + filename: "testdata/response_invalidxml_2.xml", + }, + { + name: "check filesystem service type", + filename: "testdata/response_invalidxml_3.xml", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/_status": + http.ServeFile(w, r, tt.filename) + default: + w.WriteHeader(http.StatusNotFound) + } + })) + defer ts.Close() + + plugin := &Monit{ + Address: ts.URL, + client: &RealHTTPClient{}, + } + + plugin.Init() + + var acc testutil.Accumulator + err := plugin.Gather(&acc) + + if assert.Error(t, err) { + assert.Contains(t, err.Error(), "error parsing input:") + } + }) + } +} diff --git a/plugins/inputs/monit/testdata/response_invalidxml_1.xml b/plugins/inputs/monit/testdata/response_invalidxml_1.xml new file mode 100644 index 000000000..8f1dcbaa0 --- /dev/null +++ b/plugins/inputs/monit/testdata/response_invalidxml_1.xml @@ -0,0 +1,51 @@ + + + + + 0ed39c522be4c3971541412c43141613 + 1476518435 + 5.17.1 + 109878 + 10 + 0 + localhost + /var/vcap/bosh/etc/monitrc + +
127.0.0.1
+ 2822 + 0 +
+
+ + Linux + 4.15.0-65-generic + #74~16.04.1-Ubuntu SMP Wed Sep 18 09:51:44 UTC 2019 + x86_64 + 8 + 16432272 + 16432268 + + + test + 1572850498 + 709694 + 0 + 0 + 1 + 0 + 0 + 555 + 0 + 0 + 4096 + + 29.5 + 4424.0 + 14990.0 + + 0.8 + 59674 + 7680000 + + +
diff --git a/plugins/inputs/monit/testdata/response_invalidxml_2.xml b/plugins/inputs/monit/testdata/response_invalidxml_2.xml new file mode 100644 index 000000000..aab7bc87c --- /dev/null +++ b/plugins/inputs/monit/testdata/response_invalidxml_2.xml @@ -0,0 +1,52 @@ + + + + + 0ed39c522be4c3971541412c43141613 + 1476518435 + 5.17.1 + 109878 + 10 + 0 + localhost + /var/vcap/bosh/etc/monitrc + +
127.0.0.1
+ 2822 + 0 +
+
+ + Linux + 4.15.0-65-generic + #74~16.04.1-Ubuntu SMP Wed Sep 18 09:51:44 UTC 2019 + x86_64 + 8 + 16432272 + 16432268 + + + test + 1572850498 + 709694 + 0.0 + 0 + 1 + 0 + 0 + 555 + 0 + 0 + 4096 + + 29.5 + 4424.0 + 14990.0 + + + 0.8 + 59674 + 7680000 + + +
diff --git a/plugins/inputs/monit/testdata/response_invalidxml_3.xml b/plugins/inputs/monit/testdata/response_invalidxml_3.xml new file mode 100644 index 000000000..9fd7ed31d --- /dev/null +++ b/plugins/inputs/monit/testdata/response_invalidxml_3.xml @@ -0,0 +1,52 @@ + + + + + 0ed39c522be4c3971541412c43141613 + 1476518435 + 5.17.1 + 109878 + 10 + 0 + localhost + /var/vcap/bosh/etc/monitrc + +
127.0.0.1
+ 2822 + 0 +
+
+ + Linux + 4.15.0-65-generic + #74~16.04.1-Ubuntu SMP Wed Sep 18 09:51:44 UTC 2019 + x86_64 + 8 + 16432272 + 16432268 + + + test + 1572850498 + 709694 + 0 + 0 + 1 + 0 + 0 + 555 + 0 + 0 + 4096 + + 29.5 + 4424.0 + 14990.0 + + + 0.8 + 59674 + 7680000 + + +
diff --git a/plugins/inputs/monit/testdata/response_servicetype_0.xml b/plugins/inputs/monit/testdata/response_servicetype_0.xml new file mode 100644 index 000000000..beaeb2003 --- /dev/null +++ b/plugins/inputs/monit/testdata/response_servicetype_0.xml @@ -0,0 +1,51 @@ + + + + 0ed39c522be4c3971541412c43141613 + 1476518435 + 5.17.1 + 109878 + 10 + 0 + localhost + /var/vcap/bosh/etc/monitrc + +
127.0.0.1
+ 2822 + 0 +
+
+ + Linux + 4.15.0-65-generic + #74~16.04.1-Ubuntu SMP Wed Sep 18 09:51:44 UTC 2019 + x86_64 + 8 + 16432272 + 16432268 + + + test + 1572850498 + 709694 + 0 + 0 + 1 + 0 + 0 + 555 + 0 + 0 + 4096 + + 29.5 + 4424.0 + 14990.0 + + + 0.8 + 59674 + 7680000 + + +
diff --git a/plugins/inputs/monit/testdata/response_servicetype_1.xml b/plugins/inputs/monit/testdata/response_servicetype_1.xml new file mode 100644 index 000000000..86f02f142 --- /dev/null +++ b/plugins/inputs/monit/testdata/response_servicetype_1.xml @@ -0,0 +1,41 @@ + + + + 0ed39c522be4c3971541412c43141613 + 1476518435 + 5.17.1 + 109878 + 10 + 0 + localhost + /var/vcap/bosh/etc/monitrc + +
127.0.0.1
+ 2822 + 0 +
+
+ + Linux + 4.15.0-65-generic + #74~16.04.1-Ubuntu SMP Wed Sep 18 09:51:44 UTC 2019 + x86_64 + 8 + 16432272 + 16432268 + + + test + 1572850342 + 546082 + 0 + 0 + 1 + 0 + 0 + 755 + 0 + 0 + 1572272434 + +
diff --git a/plugins/inputs/monit/testdata/response_servicetype_2.xml b/plugins/inputs/monit/testdata/response_servicetype_2.xml new file mode 100644 index 000000000..709368007 --- /dev/null +++ b/plugins/inputs/monit/testdata/response_servicetype_2.xml @@ -0,0 +1,42 @@ + + + + 0ed39c522be4c3971541412c43141613 + 1476518435 + 5.17.1 + 109878 + 10 + 0 + localhost + /var/vcap/bosh/etc/monitrc + +
127.0.0.1
+ 2822 + 0 +
+
+ + Linux + 4.15.0-65-generic + #74~16.04.1-Ubuntu SMP Wed Sep 18 09:51:44 UTC 2019 + x86_64 + 8 + 16432272 + 16432268 + + + test + 1476628305 + 302669 + 0 + 0 + 1 + 0 + 0 + 644 + 1000 + 1000 + 1476518441 + 1565 + +
diff --git a/plugins/inputs/monit/testdata/response_servicetype_3.xml b/plugins/inputs/monit/testdata/response_servicetype_3.xml new file mode 100644 index 000000000..14a603dc3 --- /dev/null +++ b/plugins/inputs/monit/testdata/response_servicetype_3.xml @@ -0,0 +1,52 @@ + + + + 0ed39c522be4c3971541412c43141613 + 1476518435 + 5.17.1 + 109878 + 10 + 0 + localhost + /var/vcap/bosh/etc/monitrc + +
127.0.0.1
+ 2822 + 0 +
+
+ + Linux + 4.15.0-65-generic + #74~16.04.1-Ubuntu SMP Wed Sep 18 09:51:44 UTC 2019 + x86_64 + 8 + 16432272 + 16432268 + + + test + 1476628305 + 302552 + 0 + 0 + 1 + 0 + 0 + 5959 + 1 + 109870 + 0 + 31 + + 0.1 + 0.1 + 22892 + 22892 + + + 0.0 + 0.0 + + +
diff --git a/plugins/inputs/monit/testdata/response_servicetype_4.xml b/plugins/inputs/monit/testdata/response_servicetype_4.xml new file mode 100644 index 000000000..d7064e2f7 --- /dev/null +++ b/plugins/inputs/monit/testdata/response_servicetype_4.xml @@ -0,0 +1,45 @@ + + + + 0ed39c522be4c3971541412c43141613 + 1476518435 + 5.17.1 + 109878 + 10 + 0 + localhost + /var/vcap/bosh/etc/monitrc + +
127.0.0.1
+ 2822 + 0 +
+
+ + Linux + 4.15.0-65-generic + #74~16.04.1-Ubuntu SMP Wed Sep 18 09:51:44 UTC 2019 + x86_64 + 8 + 16432272 + 16432268 + + + test + 1572862451 + 947671 + 0 + 0 + 1 + 0 + 0 + + 192.168.1.10 + 2812 + + DEFAULT + TCP + 0.000145 + + +
diff --git a/plugins/inputs/monit/testdata/response_servicetype_5.xml b/plugins/inputs/monit/testdata/response_servicetype_5.xml new file mode 100644 index 000000000..d0ee2cfca --- /dev/null +++ b/plugins/inputs/monit/testdata/response_servicetype_5.xml @@ -0,0 +1,57 @@ + + + + 0ed39c522be4c3971541412c43141613 + 1476518435 + 5.17.1 + 109878 + 10 + 0 + localhost + /var/vcap/bosh/etc/monitrc + +
127.0.0.1
+ 2822 + 0 +
+
+ + Linux + 4.15.0-65-generic + #74~16.04.1-Ubuntu SMP Wed Sep 18 09:51:44 UTC 2019 + x86_64 + 8 + 16432272 + 16432268 + + + test + 1476628305 + 302682 + 0 + 0 + 1 + 0 + 0 + + + 0.00 + 0.00 + 0.00 + + + 0.0 + 0.1 + 0.0 + + + 1.5 + 259668 + + + 0.0 + 0 + + + +
diff --git a/plugins/inputs/monit/testdata/response_servicetype_6.xml b/plugins/inputs/monit/testdata/response_servicetype_6.xml new file mode 100644 index 000000000..5acabe2da --- /dev/null +++ b/plugins/inputs/monit/testdata/response_servicetype_6.xml @@ -0,0 +1,41 @@ + + + + 0ed39c522be4c3971541412c43141613 + 1476518435 + 5.17.1 + 109878 + 10 + 0 + localhost + /var/vcap/bosh/etc/monitrc + +
127.0.0.1
+ 2822 + 0 +
+
+ + Linux + 4.15.0-65-generic + #74~16.04.1-Ubuntu SMP Wed Sep 18 09:51:44 UTC 2019 + x86_64 + 8 + 16432272 + 16432268 + + + test + 1572862451 + 947495 + 0 + 0 + 1 + 0 + 0 + 664 + 1000 + 1000 + 1572271731 + +
\ No newline at end of file diff --git a/plugins/inputs/monit/testdata/response_servicetype_7.xml b/plugins/inputs/monit/testdata/response_servicetype_7.xml new file mode 100644 index 000000000..fbda56c5c --- /dev/null +++ b/plugins/inputs/monit/testdata/response_servicetype_7.xml @@ -0,0 +1,42 @@ + + + + 0ed39c522be4c3971541412c43141613 + 1476518435 + 5.17.1 + 109878 + 10 + 0 + localhost + /var/vcap/bosh/etc/monitrc + +
127.0.0.1
+ 2822 + 0 +
+
+ + Linux + 4.15.0-65-generic + #74~16.04.1-Ubuntu SMP Wed Sep 18 09:51:44 UTC 2019 + x86_64 + 8 + 16432272 + 16432268 + + + test + 1572850498 + 710675 + 0 + 0 + 1 + 0 + 0 + + 1572850498 + 0 + Stats health check successful. + + +
diff --git a/plugins/inputs/monit/testdata/response_servicetype_8.xml b/plugins/inputs/monit/testdata/response_servicetype_8.xml new file mode 100644 index 000000000..12623a9d4 --- /dev/null +++ b/plugins/inputs/monit/testdata/response_servicetype_8.xml @@ -0,0 +1,70 @@ + + + + 0ed39c522be4c3971541412c43141613 + 1476518435 + 5.17.1 + 109878 + 10 + 0 + localhost + /var/vcap/bosh/etc/monitrc + +
127.0.0.1
+ 2822 + 0 +
+
+ + Linux + 4.15.0-65-generic + #74~16.04.1-Ubuntu SMP Wed Sep 18 09:51:44 UTC 2019 + x86_64 + 8 + 16432272 + 16432268 + + + test + 1572869770 + 807562 + 0 + 0 + 1 + 0 + 0 + + 1 + 1000000000 + 1 + + + 0 + 15243 + + + 0 + 5506778 + + + 0 + 0 + + + + + 0 + 8822 + + + 0 + 1287240 + + + 0 + 0 + + + + +
diff --git a/plugins/inputs/monit/testdata/response_servicetype_8_failure.xml b/plugins/inputs/monit/testdata/response_servicetype_8_failure.xml new file mode 100644 index 000000000..d68419d59 --- /dev/null +++ b/plugins/inputs/monit/testdata/response_servicetype_8_failure.xml @@ -0,0 +1,70 @@ + + + + 0ed39c522be4c3971541412c43141613 + 1476518435 + 5.17.1 + 109878 + 10 + 0 + localhost + /var/vcap/bosh/etc/monitrc + +
127.0.0.1
+ 2822 + 0 +
+
+ + Linux + 4.15.0-65-generic + #74~16.04.1-Ubuntu SMP Wed Sep 18 09:51:44 UTC 2019 + x86_64 + 8 + 16432272 + 16432268 + + + test + 1572869770 + 807562 + 8388608 + 0 + 1 + 0 + 0 + + 0 + -1 + -1 + + + 0 + 0 + + + 0 + 0 + + + 0 + 0 + + + + + 0 + 0 + + + 0 + 0 + + + 0 + 0 + + + + +
diff --git a/plugins/inputs/monit/testdata/response_servicetype_8_initializingmode.xml b/plugins/inputs/monit/testdata/response_servicetype_8_initializingmode.xml new file mode 100644 index 000000000..357f66f3b --- /dev/null +++ b/plugins/inputs/monit/testdata/response_servicetype_8_initializingmode.xml @@ -0,0 +1,70 @@ + + + + 0ed39c522be4c3971541412c43141613 + 1476518435 + 5.17.1 + 109878 + 10 + 0 + localhost + /var/vcap/bosh/etc/monitrc + +
127.0.0.1
+ 2822 + 0 +
+
+ + Linux + 4.15.0-65-generic + #74~16.04.1-Ubuntu SMP Wed Sep 18 09:51:44 UTC 2019 + x86_64 + 8 + 16432272 + 16432268 + + + test + 1572869770 + 807562 + 0 + 0 + 2 + 0 + 0 + + 1 + 1000000000 + 1 + + + 0 + 15243 + + + 0 + 5506778 + + + 0 + 0 + + + + + 0 + 8822 + + + 0 + 1287240 + + + 0 + 0 + + + + +
diff --git a/plugins/inputs/monit/testdata/response_servicetype_8_passivemode.xml b/plugins/inputs/monit/testdata/response_servicetype_8_passivemode.xml new file mode 100644 index 000000000..a4d9595ae --- /dev/null +++ b/plugins/inputs/monit/testdata/response_servicetype_8_passivemode.xml @@ -0,0 +1,70 @@ + + + + 0ed39c522be4c3971541412c43141613 + 1476518435 + 5.17.1 + 109878 + 10 + 0 + localhost + /var/vcap/bosh/etc/monitrc + +
127.0.0.1
+ 2822 + 0 +
+
+ + Linux + 4.15.0-65-generic + #74~16.04.1-Ubuntu SMP Wed Sep 18 09:51:44 UTC 2019 + x86_64 + 8 + 16432272 + 16432268 + + + test + 1572869770 + 807562 + 0 + 0 + 1 + 1 + 0 + + 1 + 1000000000 + 1 + + + 0 + 15243 + + + 0 + 5506778 + + + 0 + 0 + + + + + 0 + 8822 + + + 0 + 1287240 + + + 0 + 0 + + + + +
diff --git a/plugins/inputs/monit/testdata/response_servicetype_8_pendingaction.xml b/plugins/inputs/monit/testdata/response_servicetype_8_pendingaction.xml new file mode 100644 index 000000000..df19a6428 --- /dev/null +++ b/plugins/inputs/monit/testdata/response_servicetype_8_pendingaction.xml @@ -0,0 +1,70 @@ + + + + 0ed39c522be4c3971541412c43141613 + 1476518435 + 5.17.1 + 109878 + 10 + 0 + localhost + /var/vcap/bosh/etc/monitrc + +
127.0.0.1
+ 2822 + 0 +
+
+ + Linux + 4.15.0-65-generic + #74~16.04.1-Ubuntu SMP Wed Sep 18 09:51:44 UTC 2019 + x86_64 + 8 + 16432272 + 16432268 + + + test + 1572869770 + 807562 + 0 + 0 + 1 + 0 + 5 + + 1 + 1000000000 + 1 + + + 0 + 15243 + + + 0 + 5506778 + + + 0 + 0 + + + + + 0 + 8822 + + + 0 + 1287240 + + + 0 + 0 + + + + +
From ec35f0777002c20cf9d30e7ee7262673d45a3a70 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 22 Jan 2020 15:27:54 -0800 Subject: [PATCH 1486/1815] Update monit docs and update changelog/readme --- CHANGELOG.md | 1 + README.md | 1 + plugins/inputs/EXAMPLE_README.md | 2 +- plugins/inputs/monit/README.md | 339 +++++++++++++++++++------------ plugins/inputs/monit/monit.go | 9 +- 5 files changed, 221 insertions(+), 131 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b312f4030..f6b38133e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,6 +12,7 @@ #### New Inputs - [infiniband](/plugins/inputs/infiniband/README.md) - Contributed by @willfurnell +- [monit](/plugins/inputs/monit/README.md) - Contributed by @SirishaGopigiri #### New Outputs diff --git a/README.md b/README.md index 81990320d..b97b8719b 100644 --- a/README.md +++ b/README.md @@ -236,6 +236,7 @@ For documentation on the latest development code see the [documentation index][d * [mesos](./plugins/inputs/mesos) * [minecraft](./plugins/inputs/minecraft) * [mongodb](./plugins/inputs/mongodb) +* [monit](./plugins/inputs/monit) * [mqtt_consumer](./plugins/inputs/mqtt_consumer) * [multifile](./plugins/inputs/multifile) * [mysql](./plugins/inputs/mysql) diff --git a/plugins/inputs/EXAMPLE_README.md b/plugins/inputs/EXAMPLE_README.md index b60d48c91..8ac55876b 100644 --- a/plugins/inputs/EXAMPLE_README.md +++ b/plugins/inputs/EXAMPLE_README.md @@ -36,7 +36,7 @@ mapped to the output. - field1 (type, unit) - field2 (float, percent) -- measurement2 ++ measurement2 - tags: - tag3 - fields: diff --git a/plugins/inputs/monit/README.md b/plugins/inputs/monit/README.md index 613e1eac3..9abd657d5 100644 --- a/plugins/inputs/monit/README.md +++ b/plugins/inputs/monit/README.md @@ -1,148 +1,235 @@ -# Monit Plugin +# Monit Input Plugin -The monit plugin gathers metrics and status information about local processes, -remote hosts, file, file systems, directories and network interfaces managed and watched over by Monit. +The `monit` plugin gathers metrics and status information about local processes, +remote hosts, file, file systems, directories and network interfaces managed +and watched over by [Monit][monit]. -To install Monit agent on the host please refer to the link https://mmonit.com/wiki/Monit/Installation +The use this plugin you should first enable the [HTTPD TCP port][httpd] in +Monit. -Minimum Version of Monit tested with is 5.16 +Minimum Version of Monit tested with is 5.16. -### Configuration: +[monit]: https://mmonit.com/ +[httpd]: https://mmonit.com/monit/documentation/monit.html#TCP-PORT + +### Configuration ```toml -# Read metrics and status information about processes managed by Monit - [[inputs.monit]] - #SampleConfig - address = "http://127.0.0.1:2812" - basic_auth_username = "test" - basic_auth_password = "test" +[[inputs.monit]] + ## Monit HTTPD address + address = "http://127.0.0.1:2812" + + ## Username and Password for Monit + # username = "" + # password = "" + + ## Amount of time allowed to complete the HTTP request + # timeout = "5s" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false ``` -### Tags: -All measurements have the following tags: -- address -- version -- service -- paltform_name -- status -- monitoring_status -- monitoring_mode +### Metrics -### Measurements & Fields: +- monit_filesystem + - tags: + - address + - version + - service + - paltform_name + - status + - monitoring_status + - monitoring_mode + - fields: + - status_code + - monitoring_status_code + - monitoring_mode_code + - mode + - block_percent + - block_usage + - block_total + - inode_percent + - inode_usage + - inode_total - ++ monit_directory + - tags: + - address + - version + - service + - paltform_name + - status + - monitoring_status + - monitoring_mode + - fields: + - status_code + - monitoring_status_code + - monitoring_mode_code + - permissions -### Fields: -Fields for all Monit service types: -- status_code -- monitoring_status_code -- monitoring_mode_code +- monit_file + - tags: + - address + - version + - service + - paltform_name + - status + - monitoring_status + - monitoring_mode + - fields: + - status_code + - monitoring_status_code + - monitoring_mode_code + - size + - permissions -### Measurement & Fields: -Fields for Monit service type Filesystem: -- Measurement: - - monit_filesystem -- Fields: - - mode - - block_percent - - block_usage - - block_total - - inode_percent - - inode_usage - - inode_total ++ monit_process + - tags: + - address + - version + - service + - paltform_name + - status + - monitoring_status + - monitoring_mode + - fields: + - status_code + - monitoring_status_code + - monitoring_mode_code + - cpu_percent + - cpu_percent_total + - mem_kb + - mem_kb_total + - mem_percent + - mem_percent_total + - pid + - parent_pid + - threads + - children -Fields for Monit service type directory: -- Measurement: - - monit_directory -- Fields: - - permissions +- monit_remote_host + - tags: + - address + - version + - service + - paltform_name + - status + - monitoring_status + - monitoring_mode + - fields: + - status_code + - monitoring_status_code + - monitoring_mode_code + - hostname + - port_number + - request + - protocol + - type -Fields for Monit service type file: -- Measurement: - - monit_file -- Fields: - - size - - permissions ++ monit_system + - tags: + - address + - version + - service + - paltform_name + - status + - monitoring_status + - monitoring_mode + - fields: + - status_code + - monitoring_status_code + - monitoring_mode_code + - cpu_system + - cpu_user + - cpu_wait + - cpu_load_avg_1m + - cpu_load_avg_5m + - cpu_load_avg_15m + - mem_kb + - mem_percent + - swap_kb + - swap_percent -Fields for Monit service type process: -- Measurement: - - monit_process -- Fields: - - cpu_percent - - cpu_percent_total - - mem_kb - - mem_kb_total - - mem_percent - - mem_percent_total - - pid - - parent_pid - - threads - - children +- monit_fifo + - tags: + - address + - version + - service + - paltform_name + - status + - monitoring_status + - monitoring_mode + - fields: + - status_code + - monitoring_status_code + - monitoring_mode_code + - permissions -Fields for Monit service type remote host: -- Measurement: - - monit_remote_host -- Fields: - - hostname - - port_number - - request - - protocol - - type ++ monit_program + - tags: + - address + - version + - service + - paltform_name + - status + - monitoring_status + - monitoring_mode + - fields: + - status_code + - monitoring_status_code + - monitoring_mode_code -Fields for Monit service type system: -- Measurement: - - monit_system -- Fields: - - cpu_system - - cpu_user - - cpu_wait - - cpu_load_avg_1m - - cpu_load_avg_5m - - cpu_load_avg_15m - - mem_kb - - mem_percent - - swap_kb - - swap_percent +- monit_network + - tags: + - address + - version + - service + - paltform_name + - status + - monitoring_status + - monitoring_mode + - fields: + - status_code + - monitoring_status_code + - monitoring_mode_code -Fields for Monit service type fifo: -- Measurement: - - monit_fifo -- Fields: - - permissions ++ monit_program + - tags: + - address + - version + - service + - paltform_name + - status + - monitoring_status + - monitoring_mode + - fields: + - status_code + - monitoring_status_code + - monitoring_mode_code -Fields for Monit service type program: -- Measurement: - - monit_program -- Fields: - - last_started_time - - program_status +- monit_network + - tags: + - address + - version + - service + - paltform_name + - status + - monitoring_status + - monitoring_mode + - fields: + - status_code + - monitoring_status_code + - monitoring_mode_code -Fields for Monit service type network: -- Measurement: - - monit_network -- Fields: - - link_state - - link_mode - - link_speed - - download_packets_now - - download_packets_total - - download_bytes_now - - download_bytes_total - - download_errors_now - - download_errors_total - - upload_packets_now - - upload_packets_total - - upload_bytes_now - - upload_bytes_total - - upload_errors_now - - upload_errors_total - -### Example Output: +### Example Output ``` -$ ./telegraf -config telegraf.conf -input-filter monit -test -monit_system,address=http://localhost:2812,host=verizon-onap,hostname=verizon-onap,monitoring_mode=Monitoring\ mode:\ \ active,monitoring_status=Monitoring\ status:\ \ Monitored,platform_name=Linux,service=verizon-onap,status=Running,version=5.16 status_code=0i,cpu_system=1.9,cpu_user=4.7,cpu_wait=1.5,cpu_load_avg_1m=1.24,cpu_load_avg_5m=1.68,mem_percent=67.1,monitoring_status_code=1i,monitoring_mode_code=0i,cpu_load_avg_15m=1.64,mem_kb=10961012i,swap_kb=2322688,swap_percent=13.9 1578636430000000000 -monit_remote_host,address=http://localhost:2812,host=verizon-onap,hostname=verizon-onap,monitoring_mode=Monitoring\ mode:\ \ passive,monitoring_status=Monitoring\ status:\ \ Monitored,platform_name=Linux,service=testing,status=Failure,version=5.16 status_code=32i,monitoring_status_code=1i,monitoring_mode_code=1i,remote_hostname="192.168.10.49",port_number=2220i,request="",protocol="DEFAULT",type="TCP" 1578636430000000000 -monit_fifo,address=http://localhost:2812,host=verizon-onap,hostname=verizon-onap,monitoring_mode=Monitoring\ mode:\ \ active,monitoring_status=Monitoring\ status:\ \ Monitored,platform_name=Linux,service=test2,status=Running,version=5.16 status_code=0i,monitoring_status_code=1i,monitoring_mode_code=0i,permissions=664i 1578636430000000000 -monit_network,address=http://localhost:2812,host=verizon-onap,hostname=verizon-onap,monitoring_mode=Monitoring\ mode:\ \ active,monitoring_status=Monitoring\ status:\ \ Monitored,platform_name=Linux,service=test1,status=Failure,version=5.16 monitoring_status_code=1i,monitoring_mode_code=0i,download_packets_total=0i,upload_bytes_now=0i,download_errors_total=0i,status_code=8388608i,link_speed=-1i,link_mode="Unknown Mode",download_bytes_now=0i,download_bytes_total=0i,download_errors_now=0i,upload_packets_total=0i,upload_bytes_total=0i,upload_errors_now=0i,upload_errors_total=0i,link_state=0i,download_packets_now=0i,upload_packets_now=0i 1578636430000000000 -monit_directory,address=http://localhost:2812,host=verizon-onap,hostname=verizon-onap,monitoring_mode=Monitoring\ mode:\ \ passive,monitoring_status=Monitoring\ status:\ \ Monitored,platform_name=Linux,service=test,status=Running,version=5.16 status_code=0i,monitoring_status_code=1i,monitoring_mode_code=1i,permissions=755i 1578636430000000000 +monit_file,monitoring_mode=active,monitoring_status=monitored,pending_action=none,platform_name=Linux,service=rsyslog_pid,source=xyzzy.local,status=running,version=5.20.0 mode=644i,monitoring_mode_code=0i,monitoring_status_code=1i,pending_action_code=0i,size=3i,status_code=0i 1579735047000000000 +monit_process,monitoring_mode=active,monitoring_status=monitored,pending_action=none,platform_name=Linux,service=rsyslog,source=xyzzy.local,status=running,version=5.20.0 children=0i,cpu_percent=0,cpu_percent_total=0,mem_kb=3148i,mem_kb_total=3148i,mem_percent=0.2,mem_percent_total=0.2,monitoring_mode_code=0i,monitoring_status_code=1i,parent_pid=1i,pending_action_code=0i,pid=318i,status_code=0i,threads=4i 1579735047000000000 +monit_program,monitoring_mode=active,monitoring_status=initializing,pending_action=none,platform_name=Linux,service=echo,source=xyzzy.local,status=running,version=5.20.0 monitoring_mode_code=0i,monitoring_status_code=2i,pending_action_code=0i,program_started=0i,program_status=0i,status_code=0i 1579735047000000000 +monit_system,monitoring_mode=active,monitoring_status=monitored,pending_action=none,platform_name=Linux,service=debian-stretch-monit.virt,source=xyzzy.local,status=running,version=5.20.0 cpu_load_avg_15m=0,cpu_load_avg_1m=0,cpu_load_avg_5m=0,cpu_system=0,cpu_user=0,cpu_wait=0,mem_kb=42852i,mem_percent=2.1,monitoring_mode_code=0i,monitoring_status_code=1i,pending_action_code=0i,status_code=0i,swap_kb=0,swap_percent=0 1579735047000000000 ``` diff --git a/plugins/inputs/monit/monit.go b/plugins/inputs/monit/monit.go index b7477d784..dddb801d3 100644 --- a/plugins/inputs/monit/monit.go +++ b/plugins/inputs/monit/monit.go @@ -3,12 +3,13 @@ package monit import ( "encoding/xml" "fmt" + "net/http" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/internal/tls" "github.com/influxdata/telegraf/plugins/inputs" "golang.org/x/net/html/charset" - "net/http" ) const ( @@ -211,12 +212,12 @@ func (m *Monit) Description() string { } var sampleConfig = ` - ## Monit + ## Monit HTTPD address address = "http://127.0.0.1:2812" ## Username and Password for Monit - username = "" - password = "" + # username = "" + # password = "" ## Amount of time allowed to complete the HTTP request # timeout = "5s" From 9243ae9f51508c877b9f34350f83d584234fe65c Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 22 Jan 2020 15:28:41 -0800 Subject: [PATCH 1487/1815] Add udp internal metrics for the statsd input (#6921) --- plugins/inputs/statsd/statsd.go | 26 ++++++++++++++++++++------ 1 file changed, 20 insertions(+), 6 deletions(-) diff --git a/plugins/inputs/statsd/statsd.go b/plugins/inputs/statsd/statsd.go index a0d3c9ee7..32b12a7e9 100644 --- a/plugins/inputs/statsd/statsd.go +++ b/plugins/inputs/statsd/statsd.go @@ -122,8 +122,12 @@ type Statsd struct { MaxConnections selfstat.Stat CurrentConnections selfstat.Stat TotalConnections selfstat.Stat - PacketsRecv selfstat.Stat - BytesRecv selfstat.Stat + TCPPacketsRecv selfstat.Stat + TCPBytesRecv selfstat.Stat + UDPPacketsRecv selfstat.Stat + UDPPacketsDrop selfstat.Stat + UDPBytesRecv selfstat.Stat + ParseTimeNS selfstat.Stat Log telegraf.Logger @@ -327,8 +331,12 @@ func (s *Statsd) Start(ac telegraf.Accumulator) error { s.MaxConnections.Set(int64(s.MaxTCPConnections)) s.CurrentConnections = selfstat.Register("statsd", "tcp_current_connections", tags) s.TotalConnections = selfstat.Register("statsd", "tcp_total_connections", tags) - s.PacketsRecv = selfstat.Register("statsd", "tcp_packets_received", tags) - s.BytesRecv = selfstat.Register("statsd", "tcp_bytes_received", tags) + s.TCPPacketsRecv = selfstat.Register("statsd", "tcp_packets_received", tags) + s.TCPBytesRecv = selfstat.Register("statsd", "tcp_bytes_received", tags) + s.UDPPacketsRecv = selfstat.Register("statsd", "udp_packets_received", tags) + s.UDPPacketsDrop = selfstat.Register("statsd", "udp_packets_dropped", tags) + s.UDPBytesRecv = selfstat.Register("statsd", "udp_bytes_received", tags) + s.ParseTimeNS = selfstat.Register("statsd", "parse_time_ns", tags) s.in = make(chan input, s.AllowedPendingMessages) s.done = make(chan struct{}) @@ -461,6 +469,8 @@ func (s *Statsd) udpListen(conn *net.UDPConn) error { } return err } + s.UDPPacketsRecv.Incr(1) + s.UDPBytesRecv.Incr(int64(n)) b := s.bufPool.Get().(*bytes.Buffer) b.Reset() b.Write(buf[:n]) @@ -470,6 +480,7 @@ func (s *Statsd) udpListen(conn *net.UDPConn) error { Time: time.Now(), Addr: addr.IP.String()}: default: + s.UDPPacketsDrop.Incr(1) s.drops++ if s.drops == 1 || s.AllowedPendingMessages == 0 || s.drops%s.AllowedPendingMessages == 0 { s.Log.Errorf("Statsd message queue full. "+ @@ -490,6 +501,7 @@ func (s *Statsd) parser() error { case <-s.done: return nil case in := <-s.in: + start := time.Now() lines := strings.Split(in.Buffer.String(), "\n") s.bufPool.Put(in.Buffer) for _, line := range lines { @@ -502,6 +514,8 @@ func (s *Statsd) parser() error { s.parseStatsdLine(line) } } + elapsed := time.Since(start) + s.ParseTimeNS.Set(elapsed.Nanoseconds()) } } } @@ -834,8 +848,8 @@ func (s *Statsd) handler(conn *net.TCPConn, id string) { if n == 0 { continue } - s.BytesRecv.Incr(int64(n)) - s.PacketsRecv.Incr(1) + s.TCPBytesRecv.Incr(int64(n)) + s.TCPPacketsRecv.Incr(1) b := s.bufPool.Get().(*bytes.Buffer) b.Reset() From fb1b29cc20b4fdf40ab98a18374e11253e9d9105 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 22 Jan 2020 15:29:21 -0800 Subject: [PATCH 1488/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index f6b38133e..9bb08dc16 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -35,6 +35,7 @@ - [#6859](https://github.com/influxdata/telegraf/pull/6859): Exclude resources by inventory path in vsphere input. - [#6700](https://github.com/influxdata/telegraf/pull/6700): Allow a user defined field to be used as the graylog short_message. - [#6917](https://github.com/influxdata/telegraf/pull/6917): Add server_name override for x509_cert plugin. +- [#6921](https://github.com/influxdata/telegraf/pull/6921): Add udp internal metrics for the statsd input. #### Bugfixes From bc3429ed48a59de5c6cb031603ca0b0c2f2fee70 Mon Sep 17 00:00:00 2001 From: vikkyomkar Date: Fri, 24 Jan 2020 01:38:16 +0530 Subject: [PATCH 1489/1815] Add compatibility for Kibana 6.4 and later (#6923) --- plugins/inputs/kibana/README.md | 11 +- plugins/inputs/kibana/kibana.go | 44 +++- plugins/inputs/kibana/kibana_test.go | 40 ++- .../{testdata_test.go => testdata_test6_3.go} | 5 +- plugins/inputs/kibana/testdata_test6_5.go | 227 ++++++++++++++++++ 5 files changed, 306 insertions(+), 21 deletions(-) rename plugins/inputs/kibana/{testdata_test.go => testdata_test6_3.go} (96%) create mode 100644 plugins/inputs/kibana/testdata_test6_5.go diff --git a/plugins/inputs/kibana/README.md b/plugins/inputs/kibana/README.md index 7d885aed1..f24e3d33a 100644 --- a/plugins/inputs/kibana/README.md +++ b/plugins/inputs/kibana/README.md @@ -42,6 +42,7 @@ with following rules: - kibana - status_code: integer (1, 2, 3, 0) + - heap_total_bytes: integer - heap_max_bytes: integer - heap_used_bytes: integer - uptime_ms: integer @@ -52,12 +53,12 @@ with following rules: ### Tags -- status (Kibana health: green, yellow, red) - name (Kibana reported name) -- uuid (Kibana reported UUID) -- version (Kibana version) - source (Kibana server hostname or IP) +- status (Kibana health: green, yellow, red) +- version (Kibana version) ### Example Output - -kibana,host=myhost,name=my-kibana,source=localhost:5601,version=6.3.2 concurrent_connections=0i,heap_max_bytes=136478720i,heap_used_bytes=119231088i,response_time_avg_ms=0i,response_time_max_ms=0i,status="green",status_code=1i,uptime_ms=2187428019i 1534864502000000000 +``` +kibana,host=myhost,name=my-kibana,source=localhost:5601,status=green,version=6.5.4 concurrent_connections=8i,heap_max_bytes=447778816i,heap_total_bytes=447778816i,heap_used_bytes=380603352i,requests_per_sec=1,response_time_avg_ms=57.6,response_time_max_ms=220i,status_code=1i,uptime_ms=6717489805i 1534864502000000000 +``` diff --git a/plugins/inputs/kibana/kibana.go b/plugins/inputs/kibana/kibana.go index 0e21ad800..64353013c 100644 --- a/plugins/inputs/kibana/kibana.go +++ b/plugins/inputs/kibana/kibana.go @@ -4,6 +4,7 @@ import ( "encoding/json" "fmt" "net/http" + "strconv" "strings" "sync" "time" @@ -54,7 +55,9 @@ type responseTimes struct { } type process struct { - Mem mem `json:"mem"` + Mem mem `json:"mem"` + Memory memory `json:"memory"` + UptimeInMillis int64 `json:"uptime_in_millis"` } type requests struct { @@ -66,6 +69,15 @@ type mem struct { HeapUsedInBytes int64 `json:"heap_used_in_bytes"` } +type memory struct { + Heap heap `json:"heap"` +} + +type heap struct { + TotalInBytes int64 `json:"total_in_bytes"` + UsedInBytes int64 `json:"used_in_bytes"` +} + const sampleConfig = ` ## specify a list of one or more Kibana servers servers = ["http://localhost:5601"] @@ -187,15 +199,37 @@ func (k *Kibana) gatherKibanaStatus(baseUrl string, acc telegraf.Accumulator) er tags["status"] = kibanaStatus.Status.Overall.State fields["status_code"] = mapHealthStatusToCode(kibanaStatus.Status.Overall.State) - - fields["uptime_ms"] = kibanaStatus.Metrics.UptimeInMillis fields["concurrent_connections"] = kibanaStatus.Metrics.ConcurrentConnections - fields["heap_max_bytes"] = kibanaStatus.Metrics.Process.Mem.HeapMaxInBytes - fields["heap_used_bytes"] = kibanaStatus.Metrics.Process.Mem.HeapUsedInBytes fields["response_time_avg_ms"] = kibanaStatus.Metrics.ResponseTimes.AvgInMillis fields["response_time_max_ms"] = kibanaStatus.Metrics.ResponseTimes.MaxInMillis fields["requests_per_sec"] = float64(kibanaStatus.Metrics.Requests.Total) / float64(kibanaStatus.Metrics.CollectionIntervalInMilles) * 1000 + versionArray := strings.Split(kibanaStatus.Version.Number, ".") + arrayElement := 1 + + if len(versionArray) > 1 { + arrayElement = 2 + } + versionNumber, err := strconv.ParseFloat(strings.Join(versionArray[:arrayElement], "."), 64) + if err != nil { + return err + } + + // Same value will be assigned to both the metrics [heap_max_bytes and heap_total_bytes ] + // Which keeps the code backward compatible + if versionNumber >= 6.4 { + fields["uptime_ms"] = kibanaStatus.Metrics.Process.UptimeInMillis + fields["heap_max_bytes"] = kibanaStatus.Metrics.Process.Memory.Heap.TotalInBytes + fields["heap_total_bytes"] = kibanaStatus.Metrics.Process.Memory.Heap.TotalInBytes + fields["heap_used_bytes"] = kibanaStatus.Metrics.Process.Memory.Heap.UsedInBytes + } else { + fields["uptime_ms"] = kibanaStatus.Metrics.UptimeInMillis + fields["heap_max_bytes"] = kibanaStatus.Metrics.Process.Mem.HeapMaxInBytes + fields["heap_total_bytes"] = kibanaStatus.Metrics.Process.Mem.HeapMaxInBytes + fields["heap_used_bytes"] = kibanaStatus.Metrics.Process.Mem.HeapUsedInBytes + + } + acc.AddFields("kibana", fields, tags) return nil diff --git a/plugins/inputs/kibana/kibana_test.go b/plugins/inputs/kibana/kibana_test.go index ad5e32d29..537f6b560 100644 --- a/plugins/inputs/kibana/kibana_test.go +++ b/plugins/inputs/kibana/kibana_test.go @@ -9,7 +9,7 @@ import ( "github.com/influxdata/telegraf/testutil" ) -func defaultTags() map[string]string { +func defaultTags6_3() map[string]string { return map[string]string{ "name": "my-kibana", "source": "example.com:5601", @@ -18,6 +18,15 @@ func defaultTags() map[string]string { } } +func defaultTags6_5() map[string]string { + return map[string]string{ + "name": "my-kibana", + "source": "example.com:5601", + "version": "6.5.4", + "status": "green", + } +} + type transportMock struct { statusCode int body string @@ -41,22 +50,35 @@ func (t *transportMock) RoundTrip(r *http.Request) (*http.Response, error) { return res, nil } -func checkKibanaStatusResult(t *testing.T, acc *testutil.Accumulator) { - tags := defaultTags() - acc.AssertContainsTaggedFields(t, "kibana", kibanaStatusExpected, tags) +func checkKibanaStatusResult(version string, t *testing.T, acc *testutil.Accumulator) { + if version == "6.3.2" { + tags := defaultTags6_3() + acc.AssertContainsTaggedFields(t, "kibana", kibanaStatusExpected6_3, tags) + } else { + tags := defaultTags6_5() + acc.AssertContainsTaggedFields(t, "kibana", kibanaStatusExpected6_5, tags) + } } func TestGather(t *testing.T) { ks := newKibanahWithClient() ks.Servers = []string{"http://example.com:5601"} - ks.client.Transport = newTransportMock(http.StatusOK, kibanaStatusResponse) - - var acc testutil.Accumulator - if err := acc.GatherError(ks.Gather); err != nil { + // Unit test for Kibana version < 6.5 + ks.client.Transport = newTransportMock(http.StatusOK, kibanaStatusResponse6_3) + var acc1 testutil.Accumulator + if err := acc1.GatherError(ks.Gather); err != nil { t.Fatal(err) } + checkKibanaStatusResult(defaultTags6_3()["version"], t, &acc1) + + //Unit test for Kibana version >= 6.5 + ks.client.Transport = newTransportMock(http.StatusOK, kibanaStatusResponse6_5) + var acc2 testutil.Accumulator + if err := acc2.GatherError(ks.Gather); err != nil { + t.Fatal(err) + } + checkKibanaStatusResult(defaultTags6_5()["version"], t, &acc2) - checkKibanaStatusResult(t, &acc) } func newKibanahWithClient() *Kibana { diff --git a/plugins/inputs/kibana/testdata_test.go b/plugins/inputs/kibana/testdata_test6_3.go similarity index 96% rename from plugins/inputs/kibana/testdata_test.go rename to plugins/inputs/kibana/testdata_test6_3.go index ec393bb19..bda529273 100644 --- a/plugins/inputs/kibana/testdata_test.go +++ b/plugins/inputs/kibana/testdata_test6_3.go @@ -1,6 +1,6 @@ package kibana -const kibanaStatusResponse = ` +const kibanaStatusResponse6_3 = ` { "name": "my-kibana", "uuid": "00000000-0000-0000-0000-000000000000", @@ -187,8 +187,9 @@ const kibanaStatusResponse = ` } ` -var kibanaStatusExpected = map[string]interface{}{ +var kibanaStatusExpected6_3 = map[string]interface{}{ "status_code": 1, + "heap_total_bytes": int64(149954560), "heap_max_bytes": int64(149954560), "heap_used_bytes": int64(126274392), "uptime_ms": int64(2173595336), diff --git a/plugins/inputs/kibana/testdata_test6_5.go b/plugins/inputs/kibana/testdata_test6_5.go new file mode 100644 index 000000000..f47878b11 --- /dev/null +++ b/plugins/inputs/kibana/testdata_test6_5.go @@ -0,0 +1,227 @@ +package kibana + +const kibanaStatusResponse6_5 = ` +{ + "name": "my-kibana", + "uuid": "00000000-0000-0000-0000-000000000000", + "version": { + "number": "6.5.4", + "build_hash": "53d0c6758ac3fb38a3a1df198c1d4c87765e63f7", + "build_number": 17307, + "build_snapshot": false + }, + "status": { + "overall": { + "state": "green", + "title": "Green", + "nickname": "Looking good", + "icon": "success", + "since": "2018-07-27T07:37:42.567Z" + }, + "statuses": [{ + "id": "plugin:kibana@6.5.4", + "state": "green", + "icon": "success", + "message": "Ready", + "since": "2018-07-27T07:37:42.567Z" + }, + { + "id": "plugin:elasticsearch@6.5.4", + "state": "green", + "icon": "success", + "message": "Ready", + "since": "2018-07-28T10:07:04.920Z" + }, + { + "id": "plugin:xpack_main@6.5.4", + "state": "green", + "icon": "success", + "message": "Ready", + "since": "2018-07-28T10:07:02.393Z" + }, + { + "id": "plugin:searchprofiler@6.5.4", + "state": "green", + "icon": "success", + "message": "Ready", + "since": "2018-07-28T10:07:02.395Z" + }, + { + "id": "plugin:tilemap@6.5.4", + "state": "green", + "icon": "success", + "message": "Ready", + "since": "2018-07-28T10:07:02.396Z" + }, + { + "id": "plugin:watcher@6.5.4", + "state": "green", + "icon": "success", + "message": "Ready", + "since": "2018-07-28T10:07:02.397Z" + }, + { + "id": "plugin:license_management@6.5.4", + "state": "green", + "icon": "success", + "message": "Ready", + "since": "2018-07-27T07:37:42.668Z" + }, + { + "id": "plugin:index_management@6.5.4", + "state": "green", + "icon": "success", + "message": "Ready", + "since": "2018-07-28T10:07:02.399Z" + }, + { + "id": "plugin:timelion@6.5.4", + "state": "green", + "icon": "success", + "message": "Ready", + "since": "2018-07-27T07:37:42.912Z" + }, + { + "id": "plugin:logtrail@0.1.29", + "state": "green", + "icon": "success", + "message": "Ready", + "since": "2018-07-27T07:37:42.919Z" + }, + { + "id": "plugin:monitoring@6.5.4", + "state": "green", + "icon": "success", + "message": "Ready", + "since": "2018-07-27T07:37:42.922Z" + }, + { + "id": "plugin:grokdebugger@6.5.4", + "state": "green", + "icon": "success", + "message": "Ready", + "since": "2018-07-28T10:07:02.400Z" + }, + { + "id": "plugin:dashboard_mode@6.5.4", + "state": "green", + "icon": "success", + "message": "Ready", + "since": "2018-07-27T07:37:42.928Z" + }, + { + "id": "plugin:logstash@6.5.4", + "state": "green", + "icon": "success", + "message": "Ready", + "since": "2018-07-28T10:07:02.401Z" + }, + { + "id": "plugin:apm@6.5.4", + "state": "green", + "icon": "success", + "message": "Ready", + "since": "2018-07-27T07:37:42.950Z" + }, + { + "id": "plugin:console@6.5.4", + "state": "green", + "icon": "success", + "message": "Ready", + "since": "2018-07-27T07:37:42.958Z" + }, + { + "id": "plugin:console_extensions@6.5.4", + "state": "green", + "icon": "success", + "message": "Ready", + "since": "2018-07-27T07:37:42.961Z" + }, + { + "id": "plugin:metrics@6.5.4", + "state": "green", + "icon": "success", + "message": "Ready", + "since": "2018-07-27T07:37:42.965Z" + }, + { + "id": "plugin:reporting@6.5.4", + "state": "green", + "icon": "success", + "message": "Ready", + "since": "2018-07-28T10:07:02.402Z" + }] + }, + "metrics": { + "last_updated": "2020-01-15T09:40:17.733Z", + "collection_interval_in_millis": 5000, + "process": { + "memory": { + "heap": { + "total_in_bytes": 149954560, + "used_in_bytes": 126274392, + "size_limit": 1501560832 + }, + "resident_set_size_in_bytes": 286650368 + }, + "event_loop_delay": 0.5314235687255859, + "pid": 6, + "uptime_in_millis": 2173595336 + }, + "os": { + "load": { + "1m": 2.66015625, + "5m": 2.8173828125, + "15m": 2.51025390625 + }, + "memory": { + "total_in_bytes": 404355756032, + "free_in_bytes": 294494244864, + "used_in_bytes": 109861511168 + }, + "uptime_in_millis": 8220745000, + "cgroup": { + "cpuacct": { + "control_group": "/", + "usage_nanos": 1086527218898 + }, + "cpu": { + "control_group": "/", + "cfs_period_micros": 100000, + "cfs_quota_micros": -1, + "stat": { + "number_of_elapsed_periods": 0, + "number_of_times_throttled": 0, + "time_throttled_nanos": 0 + } + } + } + }, + "response_times": { + "avg_in_millis": 12.5, + "max_in_millis": 123 + }, + "requests": { + "total": 2, + "disconnects": 0, + "status_codes": { + "200": 1, + "304": 1 + } + }, + "concurrent_connections": 10 + } +} +` + +var kibanaStatusExpected6_5 = map[string]interface{}{ + "status_code": 1, + "heap_total_bytes": int64(149954560), + "heap_max_bytes": int64(149954560), + "heap_used_bytes": int64(126274392), + "uptime_ms": int64(2173595336), + "response_time_avg_ms": float64(12.5), + "response_time_max_ms": int64(123), + "concurrent_connections": int64(10), + "requests_per_sec": float64(0.4), +} From bbe2d12e7e140e33422533afb565a45eab100d8a Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 23 Jan 2020 15:33:13 -0800 Subject: [PATCH 1490/1815] Update kibana docs --- plugins/inputs/EXAMPLE_README.md | 6 ++- plugins/inputs/kibana/README.md | 57 ++++++++++++---------------- plugins/inputs/kibana/kibana.go | 2 +- plugins/inputs/kibana/kibana_test.go | 5 +-- 4 files changed, 32 insertions(+), 38 deletions(-) diff --git a/plugins/inputs/EXAMPLE_README.md b/plugins/inputs/EXAMPLE_README.md index 8ac55876b..b15b9caf2 100644 --- a/plugins/inputs/EXAMPLE_README.md +++ b/plugins/inputs/EXAMPLE_README.md @@ -1,6 +1,6 @@ # Example Input Plugin -The example plugin gathers metrics about example things. This description +The `example` plugin gathers metrics about example things. This description explains at a high level what the plugin does and provides links to where additional information can be found. @@ -41,6 +41,10 @@ mapped to the output. - tag3 - fields: - field3 (integer, bytes) + - field4 (integer, green=1 yellow=2 red=3) + - field5 (string) + - field6 (float) + - field7 (boolean) ### Sample Queries diff --git a/plugins/inputs/kibana/README.md b/plugins/inputs/kibana/README.md index f24e3d33a..73bf4a298 100644 --- a/plugins/inputs/kibana/README.md +++ b/plugins/inputs/kibana/README.md @@ -1,15 +1,17 @@ -# Kibana input plugin +# Kibana Input Plugin -The [kibana](https://www.elastic.co/) plugin queries Kibana status API to -obtain the health status of Kibana and some useful metrics. +The `kibana` plugin queries the [Kibana][] API to obtain the service status. -This plugin has been tested and works on Kibana 6.x versions. +- Telegraf minimum version: 1.8 +- Kibana minimum tested version: 6.0 + +[Kibana]: https://www.elastic.co/ ### Configuration ```toml [[inputs.kibana]] - ## specify a list of one or more Kibana servers + ## Specify a list of one or more Kibana servers servers = ["http://localhost:5601"] ## Timeout for HTTP requests @@ -27,38 +29,27 @@ This plugin has been tested and works on Kibana 6.x versions. # insecure_skip_verify = false ``` -### Status mappings - -When reporting health (green/yellow/red), additional field `status_code` -is reported. Field contains mapping from status:string to status_code:int -with following rules: - -- `green` - 1 -- `yellow` - 2 -- `red` - 3 -- `unknown` - 0 - -### Measurements & Fields +### Metrics - kibana - - status_code: integer (1, 2, 3, 0) - - heap_total_bytes: integer - - heap_max_bytes: integer - - heap_used_bytes: integer - - uptime_ms: integer - - response_time_avg_ms: float - - response_time_max_ms: integer - - concurrent_connections: integer - - requests_per_sec: float - -### Tags - -- name (Kibana reported name) -- source (Kibana server hostname or IP) -- status (Kibana health: green, yellow, red) -- version (Kibana version) + - tags: + - name (Kibana reported name) + - source (Kibana server hostname or IP) + - status (Kibana health: green, yellow, red) + - version (Kibana version) + - fields: + - status_code (integer, green=1 yellow=2 red=3 unknown=0) + - heap_total_bytes (integer) + - heap_max_bytes (integer; deprecated in 1.13.3: use `heap_total_bytes` field) + - heap_used_bytes (integer) + - uptime_ms (integer) + - response_time_avg_ms (float) + - response_time_max_ms (integer) + - concurrent_connections (integer) + - requests_per_sec (float) ### Example Output + ``` kibana,host=myhost,name=my-kibana,source=localhost:5601,status=green,version=6.5.4 concurrent_connections=8i,heap_max_bytes=447778816i,heap_total_bytes=447778816i,heap_used_bytes=380603352i,requests_per_sec=1,response_time_avg_ms=57.6,response_time_max_ms=220i,status_code=1i,uptime_ms=6717489805i 1534864502000000000 ``` diff --git a/plugins/inputs/kibana/kibana.go b/plugins/inputs/kibana/kibana.go index 64353013c..4b7e3c5c5 100644 --- a/plugins/inputs/kibana/kibana.go +++ b/plugins/inputs/kibana/kibana.go @@ -79,7 +79,7 @@ type heap struct { } const sampleConfig = ` - ## specify a list of one or more Kibana servers + ## Specify a list of one or more Kibana servers servers = ["http://localhost:5601"] ## Timeout for HTTP requests diff --git a/plugins/inputs/kibana/kibana_test.go b/plugins/inputs/kibana/kibana_test.go index 537f6b560..3dfed9edf 100644 --- a/plugins/inputs/kibana/kibana_test.go +++ b/plugins/inputs/kibana/kibana_test.go @@ -63,7 +63,7 @@ func checkKibanaStatusResult(version string, t *testing.T, acc *testutil.Accumul func TestGather(t *testing.T) { ks := newKibanahWithClient() ks.Servers = []string{"http://example.com:5601"} - // Unit test for Kibana version < 6.5 + // Unit test for Kibana version < 6.4 ks.client.Transport = newTransportMock(http.StatusOK, kibanaStatusResponse6_3) var acc1 testutil.Accumulator if err := acc1.GatherError(ks.Gather); err != nil { @@ -71,14 +71,13 @@ func TestGather(t *testing.T) { } checkKibanaStatusResult(defaultTags6_3()["version"], t, &acc1) - //Unit test for Kibana version >= 6.5 + //Unit test for Kibana version >= 6.4 ks.client.Transport = newTransportMock(http.StatusOK, kibanaStatusResponse6_5) var acc2 testutil.Accumulator if err := acc2.GatherError(ks.Gather); err != nil { t.Fatal(err) } checkKibanaStatusResult(defaultTags6_5()["version"], t, &acc2) - } func newKibanahWithClient() *Kibana { From 5d9cecaa02c1fb330f3719dc01f778349520b9ba Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 23 Jan 2020 15:36:25 -0800 Subject: [PATCH 1491/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9bb08dc16..dd10a2e02 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -53,6 +53,7 @@ - [#6619](https://github.com/influxdata/telegraf/issues/6619): Change logic to allow recording of device fields when attributes is false. - [#6903](https://github.com/influxdata/telegraf/issues/6903): Do not add invalid timestamps to kafka messages. - [#6906](https://github.com/influxdata/telegraf/issues/6906): Fix json_strict option and set default of true. +- [#5744](https://github.com/influxdata/telegraf/issues/5744): Fix kibana input with Kibana versions greater than 6.4. ## v1.13.1 [2020-01-08] From cb50fadc207ae232140d267a9566561dea7a7207 Mon Sep 17 00:00:00 2001 From: AnastasiyaRagozina Date: Fri, 24 Jan 2020 02:46:23 +0300 Subject: [PATCH 1492/1815] Add replica set tag to mongodb input (#6914) --- plugins/inputs/mongodb/README.md | 1 + plugins/inputs/mongodb/mongodb_data.go | 4 ++++ plugins/inputs/mongodb/mongodb_data_test.go | 2 ++ 3 files changed, 7 insertions(+) diff --git a/plugins/inputs/mongodb/README.md b/plugins/inputs/mongodb/README.md index 5449e3b4b..6f9fa3995 100644 --- a/plugins/inputs/mongodb/README.md +++ b/plugins/inputs/mongodb/README.md @@ -56,6 +56,7 @@ by running Telegraf with the `--debug` argument. - tags: - hostname - node_type + - rs_name - fields: - active_reads (integer) - active_writes (integer) diff --git a/plugins/inputs/mongodb/mongodb_data.go b/plugins/inputs/mongodb/mongodb_data.go index 09bacdae1..fd085665f 100644 --- a/plugins/inputs/mongodb/mongodb_data.go +++ b/plugins/inputs/mongodb/mongodb_data.go @@ -245,6 +245,10 @@ func (d *MongodbData) AddDefaultStats() { d.addStat(statLine, DefaultLatencyStats) } + if d.StatLine.ReplSetName != "" { + d.Tags["rs_name"] = d.StatLine.ReplSetName + } + if d.StatLine.OplogStats != nil { d.add("repl_oplog_window_sec", d.StatLine.OplogStats.TimeDiff) } diff --git a/plugins/inputs/mongodb/mongodb_data_test.go b/plugins/inputs/mongodb/mongodb_data_test.go index 711b3eef2..e643d1820 100644 --- a/plugins/inputs/mongodb/mongodb_data_test.go +++ b/plugins/inputs/mongodb/mongodb_data_test.go @@ -211,12 +211,14 @@ func TestStateTag(t *testing.T) { Query: 0, NodeType: "PRI", NodeState: "PRIMARY", + ReplSetName: "rs1", }, tags, ) stateTags := make(map[string]string) stateTags["node_type"] = "PRI" + stateTags["rs_name"] = "rs1" var acc testutil.Accumulator From 4929ad1e6e943f010775d7c6da59456acbc0caf9 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 23 Jan 2020 15:47:33 -0800 Subject: [PATCH 1493/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index dd10a2e02..306b83002 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -36,6 +36,7 @@ - [#6700](https://github.com/influxdata/telegraf/pull/6700): Allow a user defined field to be used as the graylog short_message. - [#6917](https://github.com/influxdata/telegraf/pull/6917): Add server_name override for x509_cert plugin. - [#6921](https://github.com/influxdata/telegraf/pull/6921): Add udp internal metrics for the statsd input. +- [#6914](https://github.com/influxdata/telegraf/pull/6914): Add replica set tag to mongodb input. #### Bugfixes From c6b8947c154da75d82984f01ae8323d2f315c188 Mon Sep 17 00:00:00 2001 From: victornet Date: Fri, 24 Jan 2020 19:57:47 +0100 Subject: [PATCH 1494/1815] Add counters for merged reads and writes to diskio input. --- plugins/inputs/diskio/README.md | 20 +++++++++++++------- plugins/inputs/diskio/diskio.go | 2 ++ plugins/inputs/diskio/diskio_test.go | 22 +++++++++++++--------- 3 files changed, 28 insertions(+), 16 deletions(-) diff --git a/plugins/inputs/diskio/README.md b/plugins/inputs/diskio/README.md index 07bc71456..ce2acda55 100644 --- a/plugins/inputs/diskio/README.md +++ b/plugins/inputs/diskio/README.md @@ -64,6 +64,8 @@ docker run --privileged -v /:/hostfs:ro -v /run/udev:/run/udev:ro -e HOST_PROC=/ - io_time (integer, counter, milliseconds) - weighted_io_time (integer, counter, milliseconds) - iops_in_progress (integer, gauge) + - merged_reads (integer, counter) + - merged_writes (integer, counter) On linux these values correspond to the values in [`/proc/diskstats`](https://www.kernel.org/doc/Documentation/ABI/testing/procfs-diskstats) @@ -105,6 +107,13 @@ This value counts the number of I/O requests that have been issued to the device driver but have not yet completed. It does not include I/O requests that are in the queue but not yet issued to the device driver. +#### `merged_reads` & `merged_writes`: + +Reads and writes which are adjacent to each other may be merged for +efficiency. Thus two 4K reads may become one 8K read before it is +ultimately handed to the disk, and so it will be counted (and queued) +as only one I/O. These fields lets you know how often this was done. + ### Sample Queries: #### Calculate percent IO utilization per disk and host: @@ -121,11 +130,8 @@ SELECT non_negative_derivative(last("weighted_io_time",1ms)) from "diskio" WHERE ### Example Output: ``` -diskio,name=sda weighted_io_time=8411917i,read_time=7446444i,write_time=971489i,io_time=866197i,write_bytes=5397686272i,iops_in_progress=0i,reads=2970519i,writes=361139i,read_bytes=119528903168i 1502467254359000000 -diskio,name=sda1 reads=2149i,read_bytes=10753536i,write_bytes=20697088i,write_time=346i,weighted_io_time=505i,writes=2110i,read_time=161i,io_time=208i,iops_in_progress=0i 1502467254359000000 -diskio,name=sda2 reads=2968279i,writes=359029i,write_bytes=5376989184i,iops_in_progress=0i,weighted_io_time=8411250i,read_bytes=119517334528i,read_time=7446249i,write_time=971143i,io_time=866010i 1502467254359000000 -diskio,name=sdb writes=99391856i,write_time=466700894i,io_time=630259874i,weighted_io_time=4245949844i,reads=2750773828i,read_bytes=80667939499008i,write_bytes=6329347096576i,read_time=3783042534i,iops_in_progress=2i 1502467254359000000 -diskio,name=centos/root read_time=7472461i,write_time=950014i,iops_in_progress=0i,weighted_io_time=8424447i,writes=298543i,read_bytes=119510105088i,io_time=837421i,reads=2971769i,write_bytes=5192795648i 1502467254359000000 -diskio,name=centos/var_log reads=1065i,writes=69711i,read_time=1083i,write_time=35376i,read_bytes=6828032i,write_bytes=184193536i,io_time=29699i,iops_in_progress=0i,weighted_io_time=36460i 1502467254359000000 -diskio,name=postgresql/pgsql write_time=478267417i,io_time=631098730i,iops_in_progress=2i,weighted_io_time=4263637564i,reads=2750777151i,writes=110044361i,read_bytes=80667939288064i,write_bytes=6329347096576i,read_time=3784499336i 1502467254359000000 +diskio,name=sda1 merged_reads=0i,reads=2353i,writes=10i,write_bytes=2117632i,write_time=49i,io_time=1271i,weighted_io_time=1350i,read_bytes=31350272i,read_time=1303i,iops_in_progress=0i,merged_writes=0i 1578326400000000000 +diskio,name=centos/var_log reads=1063077i,writes=591025i,read_bytes=139325491712i,write_bytes=144233131520i,read_time=650221i,write_time=24368817i,io_time=852490i,weighted_io_time=25037394i,iops_in_progress=1i,merged_reads=0i,merged_writes=0i 1578326400000000000 +diskio,name=sda write_time=49i,io_time=1317i,weighted_io_time=1404i,reads=2495i,read_time=1357i,write_bytes=2117632i,iops_in_progress=0i,merged_reads=0i,merged_writes=0i,writes=10i,read_bytes=38956544i 1578326400000000000 + ``` diff --git a/plugins/inputs/diskio/diskio.go b/plugins/inputs/diskio/diskio.go index 875ec9582..9c1e20ebd 100644 --- a/plugins/inputs/diskio/diskio.go +++ b/plugins/inputs/diskio/diskio.go @@ -148,6 +148,8 @@ func (s *DiskIO) Gather(acc telegraf.Accumulator) error { "io_time": io.IoTime, "weighted_io_time": io.WeightedIO, "iops_in_progress": io.IopsInProgress, + "merged_reads": io.MergedReadCount, + "merged_writes": io.MergedWriteCount, } acc.AddCounter("diskio", fields, tags) } diff --git a/plugins/inputs/diskio/diskio_test.go b/plugins/inputs/diskio/diskio_test.go index b013e30ba..3ad203de0 100644 --- a/plugins/inputs/diskio/diskio_test.go +++ b/plugins/inputs/diskio/diskio_test.go @@ -31,15 +31,17 @@ func TestDiskIO(t *testing.T) { result: Result{ stats: map[string]disk.IOCountersStat{ "sda": { - ReadCount: 888, - WriteCount: 5341, - ReadBytes: 100000, - WriteBytes: 200000, - ReadTime: 7123, - WriteTime: 9087, - Name: "sda", - IoTime: 123552, - SerialNumber: "ab-123-ad", + ReadCount: 888, + WriteCount: 5341, + ReadBytes: 100000, + WriteBytes: 200000, + ReadTime: 7123, + WriteTime: 9087, + MergedReadCount: 11, + MergedWriteCount: 12, + Name: "sda", + IoTime: 123552, + SerialNumber: "ab-123-ad", }, }, err: nil, @@ -61,6 +63,8 @@ func TestDiskIO(t *testing.T) { "io_time": uint64(123552), "weighted_io_time": uint64(0), "iops_in_progress": uint64(0), + "merged_reads": uint64(11), + "merged_writes": uint64(12), }, }, }, From 9d214ae291ba4e7e7df66c91fb50460a01a9e4e5 Mon Sep 17 00:00:00 2001 From: David Reimschussel Date: Fri, 24 Jan 2020 12:01:13 -0700 Subject: [PATCH 1495/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 306b83002..6b1bb3b66 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -37,6 +37,7 @@ - [#6917](https://github.com/influxdata/telegraf/pull/6917): Add server_name override for x509_cert plugin. - [#6921](https://github.com/influxdata/telegraf/pull/6921): Add udp internal metrics for the statsd input. - [#6914](https://github.com/influxdata/telegraf/pull/6914): Add replica set tag to mongodb input. +- [#6935](https://github.com/influxdata/telegraf/pull/6935): Add counters for merged reads and writes to diskio input. #### Bugfixes From 99da6f48833a4e17f3a792d4c431d2e0c3d9fb19 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 27 Jan 2020 23:18:23 -0800 Subject: [PATCH 1496/1815] Remove dep dependency information (#6937) --- Gopkg.lock | 1873 ---------------------------------------------------- Gopkg.toml | 314 --------- 2 files changed, 2187 deletions(-) delete mode 100644 Gopkg.lock delete mode 100644 Gopkg.toml diff --git a/Gopkg.lock b/Gopkg.lock deleted file mode 100644 index 477aff14a..000000000 --- a/Gopkg.lock +++ /dev/null @@ -1,1873 +0,0 @@ -# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. - - -[[projects]] - digest = "1:972f38a9c879a4920d1e3a3d3438104b6c06163bfa3e6f4064adb00468d40587" - name = "cloud.google.com/go" - packages = [ - "civil", - "compute/metadata", - "iam", - "internal/optional", - "internal/version", - "monitoring/apiv3", - "pubsub", - "pubsub/apiv1", - "pubsub/internal/distribution", - ] - pruneopts = "" - revision = "c728a003b238b26cef9ab6753a5dc424b331c3ad" - version = "v0.27.0" - -[[projects]] - branch = "master" - digest = "1:fc0802104acded1f48e4860a9f2db85b82b4a754fca9eae750ff4e8b8cdf2116" - name = "code.cloudfoundry.org/clock" - packages = ["."] - pruneopts = "" - revision = "02e53af36e6c978af692887ed449b74026d76fec" - -[[projects]] - digest = "1:ca3acef20fd660d4df327accbf3ca2df9a12213d914f3113305dcd56579324b9" - name = "collectd.org" - packages = [ - "api", - "cdtime", - "network", - ] - pruneopts = "" - revision = "2ce144541b8903101fb8f1483cc0497a68798122" - version = "v0.3.0" - -[[projects]] - digest = "1:5f61d4466cef935862c262f6bc00e24beb5b39b551e906f3cfb180dfac096d57" - name = "contrib.go.opencensus.io/exporter/stackdriver" - packages = ["propagation"] - pruneopts = "" - revision = "2b93072101d466aa4120b3c23c2e1b08af01541c" - version = "v0.6.0" - -[[projects]] - digest = "1:bd444f85703c5aff1ba686cb52766fd38c3730d4e1dfb02327b2481bfe674997" - name = "github.com/Azure/azure-pipeline-go" - packages = ["pipeline"] - pruneopts = "" - revision = "b8e3409182fd52e74f7d7bdfbff5833591b3b655" - version = "v0.1.8" - -[[projects]] - digest = "1:6ef03ecdaf3e9a003c2ebd67bfa673bbe8df2c23c82217a4448da766e8ef6b30" - name = "github.com/Azure/azure-storage-queue-go" - packages = ["azqueue"] - pruneopts = "" - revision = "6ed74e755687d1a74f08d9aab5a9e3f2fbe7d162" - version = "0.2.0" - -[[projects]] - digest = "1:e4a02906493a47ee87ef61aeea130ce6624da07349a6dc62494a4e72b550ca8e" - name = "github.com/Azure/go-autorest" - packages = [ - "autorest", - "autorest/adal", - "autorest/azure", - "autorest/azure/auth", - "autorest/azure/cli", - "autorest/date", - "logger", - "tracing", - ] - pruneopts = "" - revision = "3492b2aff5036c67228ab3c7dba3577c871db200" - version = "v13.3.0" - -[[projects]] - branch = "master" - digest = "1:005d83d9daaea4e3fc7b2eedf28f68ebf87df7d331a874e5d7d14f643467e7d9" - name = "github.com/Mellanox/rdmamap" - packages = ["."] - pruneopts = "" - revision = "7c3c4763a6ee6a4d624fe133135dc3a7c483111c" - -[[projects]] - digest = "1:298712a3ee36b59c3ca91f4183bd75d174d5eaa8b4aed5072831f126e2e752f6" - name = "github.com/Microsoft/ApplicationInsights-Go" - packages = [ - "appinsights", - "appinsights/contracts", - ] - pruneopts = "" - revision = "d2df5d440eda5372f24fcac03839a64d6cb5f7e5" - version = "v0.4.2" - -[[projects]] - digest = "1:45ec6eb579713a01991ad07f538fed3b576ee55f5ce9f248320152a9270d9258" - name = "github.com/Microsoft/go-winio" - packages = ["."] - pruneopts = "" - revision = "a6d595ae73cf27a1b8fc32930668708f45ce1c85" - version = "v0.4.9" - -[[projects]] - digest = "1:33f56caa9ab45fedc63d3d1d3e342d9f9d00726071f22c67d06b0cd26d49a55e" - name = "github.com/Shopify/sarama" - packages = ["."] - pruneopts = "" - revision = "" - version = "v1.24.1" - -[[projects]] - digest = "1:f82b8ac36058904227087141017bb82f4b0fc58272990a4cdae3e2d6d222644e" - name = "github.com/StackExchange/wmi" - packages = ["."] - pruneopts = "" - revision = "5d049714c4a64225c3c79a7cf7d02f7fb5b96338" - version = "1.0.0" - -[[projects]] - digest = "1:f296e8b29c60c94efed3b8cfae08d793cb95149cdd7343e6a9834b4ac7136475" - name = "github.com/aerospike/aerospike-client-go" - packages = [ - ".", - "internal/lua", - "internal/lua/resources", - "logger", - "pkg/bcrypt", - "pkg/ripemd160", - "types", - "types/atomic", - "types/particle_type", - "types/rand", - "utils/buffer", - ] - pruneopts = "" - revision = "1dc8cf203d24cd454e71ce40ab4cd0bf3112df90" - version = "v1.27.0" - -[[projects]] - branch = "master" - digest = "1:8483994d21404c8a1d489f6be756e25bfccd3b45d65821f25695577791a08e68" - name = "github.com/alecthomas/units" - packages = ["."] - pruneopts = "" - revision = "2efee857e7cfd4f3d0138cc3cbb1b4966962b93a" - -[[projects]] - branch = "master" - digest = "1:7f21a8f175ee7f91c659f919c61032e11889fba5dc25c0cec555087cbb87435a" - name = "github.com/amir/raidman" - packages = [ - ".", - "proto", - ] - pruneopts = "" - revision = "1ccc43bfb9c93cb401a4025e49c64ba71e5e668b" - -[[projects]] - branch = "master" - digest = "1:0828d8c0f95689f832cf348fe23827feb7640cd698d612ef59e2f9d041f54c68" - name = "github.com/apache/thrift" - packages = ["lib/go/thrift"] - pruneopts = "" - revision = "f2867c24984aa53edec54a138c03db934221bdea" - -[[projects]] - digest = "1:f8bf2fcf62410b565b9caacb6a7a858302c22968f5738549c09a17dbe6ae306a" - name = "github.com/aws/aws-sdk-go" - packages = [ - "aws", - "aws/awserr", - "aws/awsutil", - "aws/client", - "aws/client/metadata", - "aws/corehandlers", - "aws/credentials", - "aws/credentials/ec2rolecreds", - "aws/credentials/endpointcreds", - "aws/credentials/processcreds", - "aws/credentials/stscreds", - "aws/crr", - "aws/csm", - "aws/defaults", - "aws/ec2metadata", - "aws/endpoints", - "aws/request", - "aws/session", - "aws/signer/v4", - "internal/ini", - "internal/sdkio", - "internal/sdkrand", - "internal/sdkuri", - "internal/shareddefaults", - "private/protocol", - "private/protocol/eventstream", - "private/protocol/eventstream/eventstreamapi", - "private/protocol/json/jsonutil", - "private/protocol/jsonrpc", - "private/protocol/query", - "private/protocol/query/queryutil", - "private/protocol/rest", - "private/protocol/xml/xmlutil", - "service/cloudwatch", - "service/dynamodb", - "service/dynamodb/dynamodbattribute", - "service/dynamodb/dynamodbiface", - "service/kinesis", - "service/kinesis/kinesisiface", - "service/sts", - ] - pruneopts = "" - revision = "5312c8dac9067d339c4e68d7e0dd5507b2f01849" - version = "v1.19.41" - -[[projects]] - branch = "master" - digest = "1:c0bec5f9b98d0bc872ff5e834fac186b807b656683bd29cb82fb207a1513fabb" - name = "github.com/beorn7/perks" - packages = ["quantile"] - pruneopts = "" - revision = "3a771d992973f24aa725d07868b467d1ddfceafb" - -[[projects]] - digest = "1:e5691038f8e87e7da05280095d968e50c17d624e25cca095d4e4cd947a805563" - name = "github.com/caio/go-tdigest" - packages = ["."] - pruneopts = "" - revision = "f3c8d94f65d3096ac96eda54ffcd10c0fe1477f1" - version = "v2.3.0" - -[[projects]] - digest = "1:f619cb9b07aebe5416262cdd8b86082e8d5bdc5264cb3b615ff858df0b645f97" - name = "github.com/cenkalti/backoff" - packages = ["."] - pruneopts = "" - revision = "2ea60e5f094469f9e65adb9cd103795b73ae743e" - version = "v2.0.0" - -[[projects]] - branch = "master" - digest = "1:ed5e77e0626ed76b7e7a2554bc4586aae768612381c5f62738f16a2dfa48763b" - name = "github.com/cisco-ie/nx-telemetry-proto" - packages = [ - "mdt_dialout", - "telemetry_bis", - ] - pruneopts = "" - revision = "82441e232cf6af9be0f808bf0c6421ee8519880e" - -[[projects]] - branch = "master" - digest = "1:298e42868718da06fc0899ae8fdb99c48a14477045234c9274d81caa79af6a8f" - name = "github.com/couchbase/go-couchbase" - packages = ["."] - pruneopts = "" - revision = "16db1f1fe037412f12738fa4d8448c549c4edd77" - -[[projects]] - branch = "master" - digest = "1:c734658274a6be88870a36742fdea96a3fce4fc99a7b90946c9e84335ceae71a" - name = "github.com/couchbase/gomemcached" - packages = [ - ".", - "client", - ] - pruneopts = "" - revision = "0da75df145308b9a4e6704d762ca9d9b77752efc" - -[[projects]] - branch = "master" - digest = "1:c1195c02bc8fbf5307cfb95bc79eddaa1351ee3587cc4a7bbe6932e2fb966ff2" - name = "github.com/couchbase/goutils" - packages = [ - "logging", - "scramsha", - ] - pruneopts = "" - revision = "e865a1461c8ac0032bd37e2d4dab3289faea3873" - -[[projects]] - digest = "1:56c130d885a4aacae1dd9c7b71cfe39912c7ebc1ff7d2b46083c8812996dc43b" - name = "github.com/davecgh/go-spew" - packages = ["spew"] - pruneopts = "" - revision = "346938d642f2ec3594ed81d874461961cd0faa76" - version = "v1.1.0" - -[[projects]] - branch = "master" - digest = "1:44330613a423ea575a90180ee9bf6f49de87df42725488764da71e18865c1469" - name = "github.com/denisenkom/go-mssqldb" - packages = [ - ".", - "internal/cp", - ] - pruneopts = "" - revision = "2be1aa521ff4499e74b7861a2779ba1e96e3e2c5" - -[[projects]] - digest = "1:6098222470fe0172157ce9bbef5d2200df4edde17ee649c5d6e48330e4afa4c6" - name = "github.com/dgrijalva/jwt-go" - packages = ["."] - pruneopts = "" - revision = "06ea1031745cb8b3dab3f6a236daf2b0aa468b7e" - version = "v3.2.0" - -[[projects]] - digest = "1:459dfcae44c32c1a6831fb99c75b40e7139aa800a04f55f6e47fedb33ee4407d" - name = "github.com/dimchansky/utfbom" - packages = ["."] - pruneopts = "" - revision = "d2133a1ce379ef6fa992b0514a77146c60db9d1c" - version = "v1.1.0" - -[[projects]] - digest = "1:522eff2a1f014a64fb403db60fc0110653e4dc5b59779894d208e697b0708ddc" - name = "github.com/docker/distribution" - packages = [ - "digestset", - "reference", - ] - pruneopts = "" - revision = "edc3ab29cdff8694dd6feb85cfeb4b5f1b38ed9c" - -[[projects]] - digest = "1:d149605f1b00713fdc48150122892d77d49d30c825f690dd92f497aeb6cf18f5" - name = "github.com/docker/docker" - packages = [ - "api", - "api/types", - "api/types/blkiodev", - "api/types/container", - "api/types/events", - "api/types/filters", - "api/types/image", - "api/types/mount", - "api/types/network", - "api/types/registry", - "api/types/strslice", - "api/types/swarm", - "api/types/swarm/runtime", - "api/types/time", - "api/types/versions", - "api/types/volume", - "client", - "pkg/stdcopy", - ] - pruneopts = "" - revision = "ed7b6428c133e7c59404251a09b7d6b02fa83cc2" - -[[projects]] - digest = "1:a5ecc2e70260a87aa263811281465a5effcfae8a54bac319cee87c4625f04d63" - name = "github.com/docker/go-connections" - packages = [ - "nat", - "sockets", - "tlsconfig", - ] - pruneopts = "" - revision = "3ede32e2033de7505e6500d6c868c2b9ed9f169d" - version = "v0.3.0" - -[[projects]] - digest = "1:582d54fcb7233da8dde1dfd2210a5b9675d0685f84246a8d317b07d680c18b1b" - name = "github.com/docker/go-units" - packages = ["."] - pruneopts = "" - revision = "47565b4f722fb6ceae66b95f853feed578a4a51c" - version = "v0.3.3" - -[[projects]] - branch = "master" - digest = "1:809792497a26f3936462cc5787a0d644b4d3cbfd59587e4f8845a9396ca2eb8a" - name = "github.com/docker/libnetwork" - packages = ["ipvs"] - pruneopts = "" - revision = "d7b61745d16675c9f548b19f06fda80d422a74f0" - -[[projects]] - digest = "1:6d6672f85a84411509885eaa32f597577873de00e30729b9bb0eb1e1faa49c12" - name = "github.com/eapache/go-resiliency" - packages = ["breaker"] - pruneopts = "" - revision = "ea41b0fad31007accc7f806884dcdf3da98b79ce" - version = "v1.1.0" - -[[projects]] - branch = "master" - digest = "1:7b12ea8b50040c6c2378ec5b5a1ab722730b2bfb46e8724ded57f2c3905431fa" - name = "github.com/eapache/go-xerial-snappy" - packages = ["."] - pruneopts = "" - revision = "040cc1a32f578808623071247fdbd5cc43f37f5f" - -[[projects]] - digest = "1:d8d46d21073d0f65daf1740ebf4629c65e04bf92e14ce93c2201e8624843c3d3" - name = "github.com/eapache/queue" - packages = ["."] - pruneopts = "" - revision = "44cc805cf13205b55f69e14bcb69867d1ae92f98" - version = "v1.1.0" - -[[projects]] - digest = "1:392ebbe504a822b15b41dd09cecc5baa98e9e0942502950dc14ba1f23c149e32" - name = "github.com/eclipse/paho.mqtt.golang" - packages = [ - ".", - "packets", - ] - pruneopts = "" - revision = "adca289fdcf8c883800aafa545bc263452290bae" - version = "v1.2.0" - -[[projects]] - digest = "1:99a0607f79d36202b64b674c0464781549917cfc4bfb88037aaa98b31e124a18" - name = "github.com/ericchiang/k8s" - packages = [ - ".", - "apis/apiextensions/v1beta1", - "apis/apps/v1", - "apis/core/v1", - "apis/extensions/v1beta1", - "apis/meta/v1", - "apis/policy/v1beta1", - "apis/resource", - "runtime", - "runtime/schema", - "util/intstr", - "watch/versioned", - ] - pruneopts = "" - revision = "d1bbc0cffaf9849ddcae7b9efffae33e2dd52e9a" - version = "v1.2.0" - -[[projects]] - branch = "master" - digest = "1:ec95c1c49fbec27ab5383b9c47fae5c2fe1d97ac5b41d36d78e17588a44e9f3f" - name = "github.com/ghodss/yaml" - packages = ["."] - pruneopts = "" - revision = "25d852aebe32c875e9c044af3eef9c7dc6bc777f" - -[[projects]] - digest = "1:7a9dc29b3fbc9a6440d98fcff422a2ce1a613975697ea560e3610084234f91ec" - name = "github.com/glinton/ping" - packages = ["."] - pruneopts = "" - revision = "d3c0ecf4df108179eccdff2176f4ff569c3aab37" - version = "v0.1.3" - -[[projects]] - digest = "1:df89444601379b2e1ee82bf8e6b72af9901cbeed4b469fa380a519c89c339310" - name = "github.com/go-logfmt/logfmt" - packages = ["."] - pruneopts = "" - revision = "07c9b44f60d7ffdfb7d8efe1ad539965737836dc" - version = "v0.4.0" - -[[projects]] - digest = "1:96c4a6ff4206086347bfe28e96e092642882128f45ecb8dc8f15f3e6f6703af0" - name = "github.com/go-ole/go-ole" - packages = [ - ".", - "oleutil", - ] - pruneopts = "" - revision = "a41e3c4b706f6ae8dfbff342b06e40fa4d2d0506" - version = "v1.2.1" - -[[projects]] - digest = "1:3dfd659219b6f63dc0677a62b8d4e8f10b5cf53900aef40858db10a19407e41d" - name = "github.com/go-redis/redis" - packages = [ - ".", - "internal", - "internal/consistenthash", - "internal/hashtag", - "internal/pool", - "internal/proto", - "internal/singleflight", - "internal/util", - ] - pruneopts = "" - revision = "83fb42932f6145ce52df09860384a4653d2d332a" - version = "v6.12.0" - -[[projects]] - digest = "1:e692d16fdfbddb94e9e4886aaf6c08bdbae5cb4ac80651445de9181b371c6e46" - name = "github.com/go-sql-driver/mysql" - packages = ["."] - pruneopts = "" - revision = "72cd26f257d44c1114970e19afddcd812016007e" - version = "v1.4.1" - -[[projects]] - digest = "1:9ab1b1c637d7c8f49e39d8538a650d7eb2137b076790cff69d160823b505964c" - name = "github.com/gobwas/glob" - packages = [ - ".", - "compiler", - "match", - "syntax", - "syntax/ast", - "syntax/lexer", - "util/runes", - "util/strings", - ] - pruneopts = "" - revision = "5ccd90ef52e1e632236f7326478d4faa74f99438" - version = "v0.2.3" - -[[projects]] - digest = "1:181fe10dcb708edd7c68c5781928b6657612771f81dd1773287386b6982c94e2" - name = "github.com/gofrs/uuid" - packages = ["."] - pruneopts = "" - revision = "3a54a6416087bae7aa0ac32dd79fe1bf87bc99e4" - version = "v2.1.0" - -[[projects]] - digest = "1:6e73003ecd35f4487a5e88270d3ca0a81bc80dc88053ac7e4dcfec5fba30d918" - name = "github.com/gogo/protobuf" - packages = ["proto"] - pruneopts = "" - revision = "636bf0302bc95575d69441b25a2603156ffdddf1" - version = "v1.1.1" - -[[projects]] - digest = "1:68c64bb61d55dcd17c82ca0b871ddddb5ae18b30cfe26f6bfd4b6df6287dc2e0" - name = "github.com/golang/mock" - packages = ["gomock"] - pruneopts = "" - revision = "9fa652df1129bef0e734c9cf9bf6dbae9ef3b9fa" - version = "1.3.1" - -[[projects]] - digest = "1:f958a1c137db276e52f0b50efee41a1a389dcdded59a69711f3e872757dab34b" - name = "github.com/golang/protobuf" - packages = [ - "proto", - "protoc-gen-go/descriptor", - "ptypes", - "ptypes/any", - "ptypes/duration", - "ptypes/empty", - "ptypes/struct", - "ptypes/timestamp", - "ptypes/wrappers", - ] - pruneopts = "" - revision = "b4deda0973fb4c70b50d226b1af49f3da59f5265" - version = "v1.1.0" - -[[projects]] - branch = "master" - digest = "1:2a5888946cdbc8aa360fd43301f9fc7869d663f60d5eedae7d4e6e5e4f06f2bf" - name = "github.com/golang/snappy" - packages = ["."] - pruneopts = "" - revision = "2e65f85255dbc3072edf28d6b5b8efc472979f5a" - -[[projects]] - digest = "1:f9f45f75f332e03fc7e9fe9188ea4e1ce4d14779ef34fa1b023da67518e36327" - name = "github.com/google/go-cmp" - packages = [ - "cmp", - "cmp/cmpopts", - "cmp/internal/diff", - "cmp/internal/function", - "cmp/internal/value", - ] - pruneopts = "" - revision = "3af367b6b30c263d47e8895973edcca9a49cf029" - version = "v0.2.0" - -[[projects]] - digest = "1:e38ad2825940d58bd8425be40bcd4211099d0c1988c158c35828197413b3cf85" - name = "github.com/google/go-github" - packages = ["github"] - pruneopts = "" - revision = "7462feb2032c2da9e3b85e9b04e6853a6e9e14ca" - version = "v24.0.1" - -[[projects]] - digest = "1:cea4aa2038169ee558bf507d5ea02c94ca85bcca28a4c7bb99fd59b31e43a686" - name = "github.com/google/go-querystring" - packages = ["query"] - pruneopts = "" - revision = "44c6ddd0a2342c386950e880b658017258da92fc" - version = "v1.0.0" - -[[projects]] - digest = "1:c1d7e883c50a26ea34019320d8ae40fad86c9e5d56e63a1ba2cb618cef43e986" - name = "github.com/google/uuid" - packages = ["."] - pruneopts = "" - revision = "064e2069ce9c359c118179501254f67d7d37ba24" - version = "0.2" - -[[projects]] - digest = "1:e097a364f4e8d8d91b9b9eeafb992d3796a41fde3eb548c1a87eb9d9f60725cf" - name = "github.com/googleapis/gax-go" - packages = ["."] - pruneopts = "" - revision = "317e0006254c44a0ac427cc52a0e083ff0b9622f" - version = "v2.0.0" - -[[projects]] - digest = "1:dbbeb8ddb0be949954c8157ee8439c2adfd8dc1c9510eb44a6e58cb68c3dce28" - name = "github.com/gorilla/context" - packages = ["."] - pruneopts = "" - revision = "08b5f424b9271eedf6f9f0ce86cb9396ed337a42" - version = "v1.1.1" - -[[projects]] - digest = "1:c2c8666b4836c81a1d247bdf21c6a6fc1ab586538ab56f74437c2e0df5c375e1" - name = "github.com/gorilla/mux" - packages = ["."] - pruneopts = "" - revision = "e3702bed27f0d39777b0b37b664b6280e8ef8fbf" - version = "v1.6.2" - -[[projects]] - branch = "master" - digest = "1:60b7bc5e043a11213472ae05252527287d20e0a6ccc18f6ae67fad88e41004de" - name = "github.com/hailocab/go-hostpool" - packages = ["."] - pruneopts = "" - revision = "e80d13ce29ede4452c43dea11e79b9bc8a15b478" - -[[projects]] - branch = "master" - digest = "1:c191ec4c50122cdfeedba867d25bbe2ed63ed6dd2130729220c6c0d654361ea4" - name = "github.com/harlow/kinesis-consumer" - packages = [ - ".", - "checkpoint/ddb", - ] - pruneopts = "" - revision = "2f58b136fee036f5de256b81a8461cc724fdf9df" - -[[projects]] - digest = "1:e7224669901bab4094e6d6697c136557b7177db6ceb01b7fc8b20d08f4b5aacd" - name = "github.com/hashicorp/consul" - packages = ["api"] - pruneopts = "" - revision = "39f93f011e591c842acc8053a7f5972aa6e592fd" - version = "v1.2.1" - -[[projects]] - branch = "master" - digest = "1:f5d25fd7bdda08e39e01193ef94a1ebf7547b1b931bcdec785d08050598f306c" - name = "github.com/hashicorp/go-cleanhttp" - packages = ["."] - pruneopts = "" - revision = "d5fe4b57a186c716b0e00b8c301cbd9b4182694d" - -[[projects]] - branch = "master" - digest = "1:ff65bf6fc4d1116f94ac305342725c21b55c16819c2606adc8f527755716937f" - name = "github.com/hashicorp/go-rootcerts" - packages = ["."] - pruneopts = "" - revision = "6bb64b370b90e7ef1fa532be9e591a81c3493e00" - -[[projects]] - digest = "1:0038a7f43b51c8b2a8cd03b5372e73f8eadfe156484c2ae8185ae836f8ebc2cd" - name = "github.com/hashicorp/go-uuid" - packages = ["."] - pruneopts = "" - revision = "4f571afc59f3043a65f8fe6bf46d887b10a01d43" - version = "v1.0.1" - -[[projects]] - digest = "1:f72168ea995f398bab88e84bd1ff58a983466ba162fb8d50d47420666cd57fad" - name = "github.com/hashicorp/serf" - packages = ["coordinate"] - pruneopts = "" - revision = "d6574a5bb1226678d7010325fb6c985db20ee458" - version = "v0.8.1" - -[[projects]] - digest = "1:824c4cd143ee15735f1c75d9072aad46e51dd27a4ef8bf6ce723a138265b7fb3" - name = "github.com/influxdata/go-syslog" - packages = [ - ".", - "nontransparent", - "octetcounting", - "rfc5424", - ] - pruneopts = "" - revision = "0cd00a9f0a5e5607d5ef9a294c260f77a74e3b5a" - version = "v2.0.0" - -[[projects]] - branch = "master" - digest = "1:bc3eb5ddfd59781ea1183f2b3d1eb105a1495d421f09b2ccd360c7fced0b612d" - name = "github.com/influxdata/tail" - packages = [ - ".", - "ratelimiter", - "util", - "watch", - "winfile", - ] - pruneopts = "" - revision = "c43482518d410361b6c383d7aebce33d0471d7bc" - -[[projects]] - branch = "telegraf" - digest = "1:65e98c3d449a34fe4644b503148d3a7244ceabe13f8bf71c2cfecfc2bdce05e9" - name = "github.com/influxdata/toml" - packages = [ - ".", - "ast", - ] - pruneopts = "" - revision = "270119a8ce653b297f12189c9099ef1409979f2b" - -[[projects]] - branch = "master" - digest = "1:a0c157916be0b4de1d4565b1f094b8d746109f94968140dff40a42780fa6ccef" - name = "github.com/influxdata/wlog" - packages = ["."] - pruneopts = "" - revision = "7c63b0a71ef8300adc255344d275e10e5c3a71ec" - -[[projects]] - digest = "1:e248df365cb87001738e8c9368a6a27c504328047b196d89687c1ca918279a82" - name = "github.com/jackc/pgx" - packages = [ - ".", - "chunkreader", - "internal/sanitize", - "pgio", - "pgproto3", - "pgtype", - "stdlib", - ] - pruneopts = "" - revision = "c73e7d75061bb42b0282945710f344cfe1113d10" - version = "v3.6.0" - -[[projects]] - digest = "1:d45477e90c25c8c6d7d4237281167aa56079382fc042db4b44a8328071649bfa" - name = "github.com/jcmturner/gofork" - packages = [ - "encoding/asn1", - "x/crypto/pbkdf2", - ] - pruneopts = "" - revision = "dc7c13fece037a4a36e2b3c69db4991498d30692" - version = "v1.0.0" - -[[projects]] - digest = "1:13fe471d0ed891e8544eddfeeb0471fd3c9f2015609a1c000aefdedf52a19d40" - name = "github.com/jmespath/go-jmespath" - packages = ["."] - pruneopts = "" - revision = "c2b33e84" - -[[projects]] - digest = "1:b498ceccf0d2efa0af877b1dda20d3742ef9ff7475123e8e922016f0b737069b" - name = "github.com/kardianos/service" - packages = ["."] - pruneopts = "" - revision = "56787a3ea05e9b262708192e7ce3b500aba73561" - version = "v1.0.0" - -[[projects]] - digest = "1:3e160bec100719bb664ce5192b42e82e66b290397da4c0845aed5ce3cfce60cb" - name = "github.com/karrick/godirwalk" - packages = ["."] - pruneopts = "" - revision = "532e518bccc921708e14b29e16503b1bf5c898cc" - version = "v1.12.0" - -[[projects]] - branch = "master" - digest = "1:63e7368fcf6b54804076eaec26fd9cf0c4466166b272393db4b93102e1e962df" - name = "github.com/kballard/go-shellquote" - packages = ["."] - pruneopts = "" - revision = "95032a82bc518f77982ea72343cc1ade730072f0" - -[[projects]] - digest = "1:4ceab6231efd01210f2b8b6ab360d480d49c0f44df63841ca0465920a387495d" - name = "github.com/klauspost/compress" - packages = [ - "fse", - "huff0", - "snappy", - "zstd", - "zstd/internal/xxhash", - ] - pruneopts = "" - revision = "4e96aec082898e4dad17d8aca1a7e2d01362ff6c" - version = "v1.9.2" - -[[projects]] - branch = "master" - digest = "1:1ed9eeebdf24aadfbca57eb50e6455bd1d2474525e0f0d4454de8c8e9bc7ee9a" - name = "github.com/kr/logfmt" - packages = ["."] - pruneopts = "" - revision = "b84e30acd515aadc4b783ad4ff83aff3299bdfe0" - -[[projects]] - branch = "master" - digest = "1:e7737c09200582508f4f67227c39e7c4667cc6067a6d2b2e679654e43e8a8cb4" - name = "github.com/kubernetes/apimachinery" - packages = ["pkg/api/resource"] - pruneopts = "" - revision = "d41becfba9ee9bf8e55cec1dd3934cd7cfc04b99" - -[[projects]] - branch = "develop" - digest = "1:3e66a61a57bbbe832c338edb3a623be0deb3dec650c2f3515149658898287e37" - name = "github.com/leodido/ragel-machinery" - packages = [ - ".", - "parser", - ] - pruneopts = "" - revision = "299bdde78165d4ca4bc7d064d8d6a4f39ac6de8c" - -[[projects]] - branch = "master" - digest = "1:7e9956922e349af0190afa0b6621befcd201072679d8e51a9047ff149f2afe93" - name = "github.com/mailru/easyjson" - packages = [ - ".", - "buffer", - "jlexer", - "jwriter", - ] - pruneopts = "" - revision = "efc7eb8984d6655c26b5c9d2e65c024e5767c37c" - -[[projects]] - digest = "1:63722a4b1e1717be7b98fc686e0b30d5e7f734b9e93d7dee86293b6deab7ea28" - name = "github.com/matttproud/golang_protobuf_extensions" - packages = ["pbutil"] - pruneopts = "" - revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c" - version = "v1.0.1" - -[[projects]] - digest = "1:d905825446d3547ebf8f58a4ff30c30439b39781b54d756f5ff3bf19765a3fdb" - name = "github.com/mdlayher/apcupsd" - packages = ["."] - pruneopts = "" - revision = "eb3dd99a75fe58389e357b732691320dcf706b5f" - -[[projects]] - digest = "1:1eef80a63549d929a5d922dc3d9ad0d489ed490f52b90887ad577b65a16d071c" - name = "github.com/miekg/dns" - packages = ["."] - pruneopts = "" - revision = "f4db2ca6edc3af0ee51bf332099cc480bcf3ef9d" - version = "v1.0.10" - -[[projects]] - digest = "1:6dbb0eb72090871f2e58d1e37973fe3cb8c0f45f49459398d3fc740cb30e13bd" - name = "github.com/mitchellh/go-homedir" - packages = ["."] - pruneopts = "" - revision = "af06845cf3004701891bf4fdb884bfe4920b3727" - version = "v1.1.0" - -[[projects]] - branch = "master" - digest = "1:f43ed2c836208c14f45158fd01577c985688a4d11cf9fd475a939819fef3b321" - name = "github.com/mitchellh/mapstructure" - packages = ["."] - pruneopts = "" - revision = "f15292f7a699fcc1a38a80977f80a046874ba8ac" - -[[projects]] - digest = "1:ee2e62b00a9ccc2dba1525f93396e35c847f90f87939df6f361b86315ea5f69a" - name = "github.com/multiplay/go-ts3" - packages = ["."] - pruneopts = "" - revision = "d0d44555495c8776880a17e439399e715a4ef319" - version = "v1.0.0" - -[[projects]] - digest = "1:ccd0def9f0b82b61c5e54fcbfccf528eabb13b489d008e46dc16b808c2e1f765" - name = "github.com/naoina/go-stringutil" - packages = ["."] - pruneopts = "" - revision = "6b638e95a32d0c1131db0e7fe83775cbea4a0d0b" - version = "v0.1.0" - -[[projects]] - digest = "1:e5ec850ce66beb0014fc40d8e64b7482172eee71d86d734d66def5e9eac16797" - name = "github.com/nats-io/gnatsd" - packages = [ - "conf", - "logger", - "server", - "server/pse", - "util", - ] - pruneopts = "" - revision = "6608e9ac3be979dcb0614b772cc86a87b71acaa3" - version = "v1.2.0" - -[[projects]] - digest = "1:665af347df4c5d1ae4c3eacd0754f5337a301f6a3f2444c9993b996605c8c02b" - name = "github.com/nats-io/go-nats" - packages = [ - ".", - "encoders/builtin", - "util", - ] - pruneopts = "" - revision = "062418ea1c2181f52dc0f954f6204370519a868b" - version = "v1.5.0" - -[[projects]] - digest = "1:be61e8224b84064109eaba8157cbb4bbe6ca12443e182b6624fdfa1c0dcf53d9" - name = "github.com/nats-io/nuid" - packages = ["."] - pruneopts = "" - revision = "289cccf02c178dc782430d534e3c1f5b72af807f" - version = "v1.0.0" - -[[projects]] - digest = "1:7a69f6a3a33929f8b66aa39c93868ad1698f06417fe627ae067559beb94504bd" - name = "github.com/nsqio/go-nsq" - packages = ["."] - pruneopts = "" - revision = "eee57a3ac4174c55924125bb15eeeda8cffb6e6f" - version = "v1.0.7" - -[[projects]] - branch = "master" - digest = "1:06ee57a6252cc9c3a1650be9888e8df796d86947ec75bff7e2c4ac5689baa086" - name = "github.com/openconfig/gnmi" - packages = [ - "proto/gnmi", - "proto/gnmi_ext", - ] - pruneopts = "" - revision = "33a1865c302903e7a2e06f35960e6bc31e84b9f6" - -[[projects]] - digest = "1:5d9b668b0b4581a978f07e7d2e3314af18eb27b3fb5d19b70185b7c575723d11" - name = "github.com/opencontainers/go-digest" - packages = ["."] - pruneopts = "" - revision = "279bed98673dd5bef374d3b6e4b09e2af76183bf" - version = "v1.0.0-rc1" - -[[projects]] - digest = "1:f26c8670b11e29a49c8e45f7ec7f2d5bac62e8fd4e3c0ae1662baa4a697f984a" - name = "github.com/opencontainers/image-spec" - packages = [ - "specs-go", - "specs-go/v1", - ] - pruneopts = "" - revision = "d60099175f88c47cd379c4738d158884749ed235" - version = "v1.0.1" - -[[projects]] - branch = "master" - digest = "1:2da0e5077ed40453dc281b9a2428d84cf6ad14063aed189f6296ca5dd25cf13d" - name = "github.com/opentracing-contrib/go-observer" - packages = ["."] - pruneopts = "" - revision = "a52f2342449246d5bcc273e65cbdcfa5f7d6c63c" - -[[projects]] - digest = "1:78fb99d6011c2ae6c72f3293a83951311147b12b06a5ffa43abf750c4fab6ac5" - name = "github.com/opentracing/opentracing-go" - packages = [ - ".", - "ext", - "log", - ] - pruneopts = "" - revision = "1949ddbfd147afd4d964a9f00b24eb291e0e7c38" - version = "v1.0.2" - -[[projects]] - digest = "1:fea0e67285d900e5a0a7ec19ff4b4c82865a28dddbee8454c5360ad908f7069c" - name = "github.com/openzipkin/zipkin-go-opentracing" - packages = [ - ".", - "flag", - "thrift/gen-go/scribe", - "thrift/gen-go/zipkincore", - "types", - "wire", - ] - pruneopts = "" - revision = "26cf9707480e6b90e5eff22cf0bbf05319154232" - version = "v0.3.4" - -[[projects]] - digest = "1:29e34e58f26655c4d73135cdfc0517ea2ff1483eff34e5d5ef4b6fddbb81e31b" - name = "github.com/pierrec/lz4" - packages = [ - ".", - "internal/xxh32", - ] - pruneopts = "" - revision = "1958fd8fff7f115e79725b1288e0b878b3e06b00" - version = "v2.0.3" - -[[projects]] - digest = "1:7365acd48986e205ccb8652cc746f09c8b7876030d53710ea6ef7d0bd0dcd7ca" - name = "github.com/pkg/errors" - packages = ["."] - pruneopts = "" - revision = "645ef00459ed84a119197bfb8d8205042c6df63d" - version = "v0.8.0" - -[[projects]] - digest = "1:256484dbbcd271f9ecebc6795b2df8cad4c458dd0f5fd82a8c2fa0c29f233411" - name = "github.com/pmezard/go-difflib" - packages = ["difflib"] - pruneopts = "" - revision = "792786c7400a136282c1664665ae0a8db921c6c2" - version = "v1.0.0" - -[[projects]] - digest = "1:6f218995d6a74636cfcab45ce03005371e682b4b9bee0e5eb0ccfd83ef85364f" - name = "github.com/prometheus/client_golang" - packages = [ - "prometheus", - "prometheus/internal", - "prometheus/promhttp", - ] - pruneopts = "" - revision = "505eaef017263e299324067d40ca2c48f6a2cf50" - version = "v0.9.2" - -[[projects]] - branch = "master" - digest = "1:185cf55b1f44a1bf243558901c3f06efa5c64ba62cfdcbb1bf7bbe8c3fb68561" - name = "github.com/prometheus/client_model" - packages = ["go"] - pruneopts = "" - revision = "5c3871d89910bfb32f5fcab2aa4b9ec68e65a99f" - -[[projects]] - branch = "master" - digest = "1:bfbc121ef802d245ef67421cff206615357d9202337a3d492b8f668906b485a8" - name = "github.com/prometheus/common" - packages = [ - "expfmt", - "internal/bitbucket.org/ww/goautoneg", - "model", - ] - pruneopts = "" - revision = "7600349dcfe1abd18d72d3a1770870d9800a7801" - -[[projects]] - branch = "master" - digest = "1:b694a6bdecdace488f507cff872b30f6f490fdaf988abd74d87ea56406b23b6e" - name = "github.com/prometheus/procfs" - packages = [ - ".", - "internal/util", - "nfs", - "xfs", - ] - pruneopts = "" - revision = "ae68e2d4c00fed4943b5f6698d504a5fe083da8a" - -[[projects]] - branch = "master" - digest = "1:15bcdc717654ef21128e8af3a63eec39a6d08a830e297f93d65163f87c8eb523" - name = "github.com/rcrowley/go-metrics" - packages = ["."] - pruneopts = "" - revision = "e2704e165165ec55d062f5919b4b29494e9fa790" - -[[projects]] - digest = "1:a18bd4e530f3f36fe91a5d1fd57d492f25287546e613f892d21c2b76b848517d" - name = "github.com/safchain/ethtool" - packages = ["."] - pruneopts = "" - revision = "42ed695e3de80b9d695f280295fd7994639f209d" - -[[projects]] - branch = "master" - digest = "1:7fc2f428767a2521abc63f1a663d981f61610524275d6c0ea645defadd4e916f" - name = "github.com/samuel/go-zookeeper" - packages = ["zk"] - pruneopts = "" - revision = "c4fab1ac1bec58281ad0667dc3f0907a9476ac47" - -[[projects]] - digest = "1:47081c00d00c1dfc9a530c2556e78be391a5c24db1043efe6d406af882a169a1" - name = "github.com/satori/go.uuid" - packages = ["."] - pruneopts = "" - revision = "b2ce2384e17bbe0c6d34077efa39dbab3e09123b" - -[[projects]] - digest = "1:9024df427b3c8a80a0c4b34e535e5e1ae922c7174e3242b6c7f30ffb3b9f715e" - name = "github.com/shirou/gopsutil" - packages = [ - "cpu", - "disk", - "host", - "internal/common", - "load", - "mem", - "net", - "process", - ] - pruneopts = "" - revision = "fc7e5e7af6052e36e83e5539148015ed2c09d8f9" - version = "v2.19.11" - -[[projects]] - branch = "master" - digest = "1:99c6a6dab47067c9b898e8c8b13d130c6ab4ffbcc4b7cc6236c2cd0b1e344f5b" - name = "github.com/shirou/w32" - packages = ["."] - pruneopts = "" - revision = "bb4de0191aa41b5507caa14b0650cdbddcd9280b" - -[[projects]] - digest = "1:8cf46b6c18a91068d446e26b67512cf16f1540b45d90b28b9533706a127f0ca6" - name = "github.com/sirupsen/logrus" - packages = ["."] - pruneopts = "" - revision = "c155da19408a8799da419ed3eeb0cb5db0ad5dbc" - version = "v1.0.5" - -[[projects]] - digest = "1:a1cb5e999ad98b9838147e11ed1bdb000e750ee8872e2e21c74d9464cc9110c0" - name = "github.com/soniah/gosnmp" - packages = ["."] - pruneopts = "" - revision = "40eae407a1f8cbbe3f3f14c57bde0b16db1cfe85" - version = "v1.22.0" - -[[projects]] - branch = "master" - digest = "1:4e8f1cae8e6d83af9000d82566efb8823907dae77ba4f1d76ff28fdd197c3c90" - name = "github.com/streadway/amqp" - packages = ["."] - pruneopts = "" - revision = "e5adc2ada8b8efff032bf61173a233d143e9318e" - -[[projects]] - digest = "1:711eebe744c0151a9d09af2315f0bb729b2ec7637ef4c410fa90a18ef74b65b6" - name = "github.com/stretchr/objx" - packages = ["."] - pruneopts = "" - revision = "477a77ecc69700c7cdeb1fa9e129548e1c1c393c" - version = "v0.1.1" - -[[projects]] - digest = "1:c587772fb8ad29ad4db67575dad25ba17a51f072ff18a22b4f0257a4d9c24f75" - name = "github.com/stretchr/testify" - packages = [ - "assert", - "mock", - "require", - ] - pruneopts = "" - revision = "f35b8ab0b5a2cef36673838d662e249dd9c94686" - version = "v1.2.2" - -[[projects]] - digest = "1:d2e45c5ed1c65576448b7adca867fc826f0c4710299d560819f1fa376189b70f" - name = "github.com/tidwall/gjson" - packages = ["."] - pruneopts = "" - revision = "d7c940e59395fdcaff4584cb442b2e7808f6711e" - version = "v1.3.0" - -[[projects]] - branch = "master" - digest = "1:4db4f92bb9cb04cfc4fccb36aba2598b02a988008c4cc0692b241214ad8ac96e" - name = "github.com/tidwall/match" - packages = ["."] - pruneopts = "" - revision = "1731857f09b1f38450e2c12409748407822dc6be" - -[[projects]] - digest = "1:1d7cab09854959fe179fe2f209400626f3dda9ec8e8b719c661d7b2add7b54b5" - name = "github.com/tidwall/pretty" - packages = ["."] - pruneopts = "" - revision = "1166b9ac2b65e46a43d8618d30d1554f4652d49b" - version = "v1.0.0" - -[[projects]] - digest = "1:026b6ceaabbacaa147e94a63579efc3d3c73e00c73b67fa5c43ab46191ed04eb" - name = "github.com/vishvananda/netlink" - packages = [ - ".", - "nl", - ] - pruneopts = "" - revision = "b2de5d10e38ecce8607e6b438b6d174f389a004e" - -[[projects]] - branch = "master" - digest = "1:c09fddfdd491edaa4383396503e57023a26e5a824283a78c2310613a1252c649" - name = "github.com/vishvananda/netns" - packages = ["."] - pruneopts = "" - revision = "13995c7128ccc8e51e9a6bd2b551020a27180abd" - -[[projects]] - digest = "1:343f20460c11a0d0529fe532553bfef9446918d1a1fda6d8661eb27d5b1a68b8" - name = "github.com/vjeantet/grok" - packages = ["."] - pruneopts = "" - revision = "ce01e59abcf6fbc9833b7deb5e4b8ee1769bcc53" - version = "v1.0.0" - -[[projects]] - digest = "1:6af52ce6dae9a912aa3113f247a63cd82599760ddc328a6721c3ef0426d31ca2" - name = "github.com/vmware/govmomi" - packages = [ - ".", - "find", - "list", - "nfc", - "object", - "performance", - "property", - "session", - "simulator", - "simulator/esx", - "simulator/vpx", - "task", - "view", - "vim25", - "vim25/debug", - "vim25/methods", - "vim25/mo", - "vim25/progress", - "vim25/soap", - "vim25/types", - "vim25/xml", - ] - pruneopts = "" - revision = "3617f28d167d448f93f282a867870f109516d2a5" - version = "v0.19.0" - -[[projects]] - digest = "1:4cb7eb45ed9a5129bc77c726328c130abcbaae566c1fe4d82693fae86c8c621d" - name = "github.com/wavefronthq/wavefront-sdk-go" - packages = [ - "histogram", - "internal", - "senders", - ] - pruneopts = "" - revision = "fa87530cd02a8ad08bd179e1c39fb319a0cc0dae" - version = "v0.9.2" - -[[projects]] - branch = "master" - digest = "1:98ed05e9796df287b90c1d96854e3913c8e349dbc546412d3cabb472ecf4b417" - name = "github.com/wvanbergen/kafka" - packages = ["consumergroup"] - pruneopts = "" - revision = "e2edea948ddfee841ea9a263b32ccca15f7d6c2f" - -[[projects]] - branch = "master" - digest = "1:12aff3cc417907bf9f683a6bf1dc78ffb08e41bc69f829491e593ea9b951a3cf" - name = "github.com/wvanbergen/kazoo-go" - packages = ["."] - pruneopts = "" - revision = "f72d8611297a7cf105da904c04198ad701a60101" - -[[projects]] - branch = "master" - digest = "1:c5918689b7e187382cc1066bf0260de54ba9d1b323105f46ed2551d2fb4a17c7" - name = "github.com/yuin/gopher-lua" - packages = [ - ".", - "ast", - "parse", - "pm", - ] - pruneopts = "" - revision = "46796da1b0b4794e1e341883a399f12cc7574b55" - -[[projects]] - digest = "1:8c8ec859c77fccd10a347b7219b597c4c21c448949e8bdf3fc3e6f4c78f952b4" - name = "go.opencensus.io" - packages = [ - ".", - "internal", - "internal/tagencoding", - "plugin/ocgrpc", - "plugin/ochttp", - "plugin/ochttp/propagation/b3", - "stats", - "stats/internal", - "stats/view", - "tag", - "trace", - "trace/internal", - "trace/propagation", - "trace/tracestate", - ] - pruneopts = "" - revision = "79993219becaa7e29e3b60cb67f5b8e82dee11d6" - version = "v0.17.0" - -[[projects]] - branch = "master" - digest = "1:d709f6b44dffe11337b3730ebf5ae6bb1bc9273a1c204266921205158a5a523f" - name = "golang.org/x/crypto" - packages = [ - "bcrypt", - "blowfish", - "ed25519", - "ed25519/internal/edwards25519", - "md4", - "pbkdf2", - "pkcs12", - "pkcs12/internal/rc2", - "ssh/terminal", - ] - pruneopts = "" - revision = "87dc89f01550277dc22b74ffcf4cd89fa2f40f4c" - source = "https://github.com/golang/crypto.git" - -[[projects]] - branch = "master" - digest = "1:00ff990baae4665bb0a8174af5ff78228574227ed96c89671247a56852a50e21" - name = "golang.org/x/net" - packages = [ - "bpf", - "context", - "context/ctxhttp", - "html", - "html/atom", - "html/charset", - "http/httpguts", - "http2", - "http2/hpack", - "icmp", - "idna", - "internal/iana", - "internal/socket", - "internal/socks", - "internal/timeseries", - "ipv4", - "ipv6", - "proxy", - "trace", - "websocket", - ] - pruneopts = "" - revision = "a680a1efc54dd51c040b3b5ce4939ea3cf2ea0d1" - source = "https://github.com/golang/net.git" - -[[projects]] - branch = "master" - digest = "1:b697592485cb412be4188c08ca0beed9aab87f36b86418e21acc4a3998f63734" - name = "golang.org/x/oauth2" - packages = [ - ".", - "clientcredentials", - "google", - "internal", - "jws", - "jwt", - ] - pruneopts = "" - revision = "d2e6202438beef2727060aa7cabdd924d92ebfd9" - source = "https://github.com/golang/oauth2.git" - -[[projects]] - branch = "master" - digest = "1:88ecca26e54f601a8733c9a31d9f0883b915216a177673f0467f6b864fd0d90f" - name = "golang.org/x/sync" - packages = [ - "errgroup", - "semaphore", - ] - pruneopts = "" - revision = "42b317875d0fa942474b76e1b46a6060d720ae6e" - source = "https://github.com/golang/sync.git" - -[[projects]] - branch = "master" - digest = "1:0b5c2207c72f2d13995040f176feb6e3f453d6b01af2b9d57df76b05ded2e926" - name = "golang.org/x/sys" - packages = [ - "unix", - "windows", - "windows/registry", - "windows/svc", - "windows/svc/debug", - "windows/svc/eventlog", - "windows/svc/mgr", - ] - pruneopts = "" - revision = "51ab0e2deafac1f46c46ad59cf0921be2f180c3d" - source = "https://github.com/golang/sys.git" - -[[projects]] - digest = "1:5acd3512b047305d49e8763eef7ba423901e85d5dd2fd1e71778a0ea8de10bd4" - name = "golang.org/x/text" - packages = [ - "cases", - "collate", - "collate/build", - "encoding", - "encoding/charmap", - "encoding/htmlindex", - "encoding/internal", - "encoding/internal/identifier", - "encoding/japanese", - "encoding/korean", - "encoding/simplifiedchinese", - "encoding/traditionalchinese", - "encoding/unicode", - "internal", - "internal/colltab", - "internal/gen", - "internal/tag", - "internal/triegen", - "internal/ucd", - "internal/utf8internal", - "language", - "runes", - "secure/bidirule", - "secure/precis", - "transform", - "unicode/bidi", - "unicode/cldr", - "unicode/norm", - "unicode/rangetable", - "width", - ] - pruneopts = "" - revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0" - source = "https://github.com/golang/text.git" - version = "v0.3.0" - -[[projects]] - branch = "master" - digest = "1:2d878ecef4b17dbdd067b8fb98eb64f768f0802b1176b91b9e3c01b457efd01f" - name = "google.golang.org/api" - packages = [ - "googleapi/transport", - "internal", - "iterator", - "option", - "support/bundler", - "transport", - "transport/grpc", - "transport/http", - ] - pruneopts = "" - revision = "19ff8768a5c0b8e46ea281065664787eefc24121" - -[[projects]] - digest = "1:c1771ca6060335f9768dff6558108bc5ef6c58506821ad43377ee23ff059e472" - name = "google.golang.org/appengine" - packages = [ - ".", - "cloudsql", - "internal", - "internal/app_identity", - "internal/base", - "internal/datastore", - "internal/log", - "internal/modules", - "internal/remote_api", - "internal/socket", - "internal/urlfetch", - "socket", - "urlfetch", - ] - pruneopts = "" - revision = "b1f26356af11148e710935ed1ac8a7f5702c7612" - version = "v1.1.0" - -[[projects]] - branch = "master" - digest = "1:b1443b4e3cc990c84d27fcdece9d3302158c67dba870e33a6937a2c0076388c2" - name = "google.golang.org/genproto" - packages = [ - "googleapis/api/annotations", - "googleapis/api/distribution", - "googleapis/api/label", - "googleapis/api/metric", - "googleapis/api/monitoredres", - "googleapis/iam/v1", - "googleapis/monitoring/v3", - "googleapis/pubsub/v1", - "googleapis/rpc/status", - "protobuf/field_mask", - ] - pruneopts = "" - revision = "fedd2861243fd1a8152376292b921b394c7bef7e" - -[[projects]] - digest = "1:5f31b45ee9da7a87f140bef3ed0a7ca34ea2a6d38eb888123b8e28170e8aa4f2" - name = "google.golang.org/grpc" - packages = [ - ".", - "balancer", - "balancer/base", - "balancer/roundrobin", - "codes", - "connectivity", - "credentials", - "credentials/oauth", - "encoding", - "encoding/gzip", - "encoding/proto", - "grpclog", - "internal", - "internal/backoff", - "internal/channelz", - "internal/grpcrand", - "keepalive", - "metadata", - "naming", - "peer", - "resolver", - "resolver/dns", - "resolver/passthrough", - "stats", - "status", - "tap", - "transport", - ] - pruneopts = "" - revision = "168a6198bcb0ef175f7dacec0b8691fc141dc9b8" - version = "v1.13.0" - -[[projects]] - digest = "1:3cad99e0d1f94b8c162787c12e59d0a0b9df1ef75590eb145cdd625479091efe" - name = "gopkg.in/asn1-ber.v1" - packages = ["."] - pruneopts = "" - revision = "379148ca0225df7a432012b8df0355c2a2063ac0" - version = "v1.2" - -[[projects]] - digest = "1:581450ae66d7970d91ef9132459fa583e937c6e502f1b96e4ee7783a56fa0b44" - name = "gopkg.in/fatih/pool.v2" - packages = ["."] - pruneopts = "" - revision = "010e0b745d12eaf8426c95f9c3924d81dd0b668f" - version = "v2.0.0" - -[[projects]] - digest = "1:eb53021a8aa3f599d29c7102e65026242bdedce998a54837dc67f14b6a97c5fd" - name = "gopkg.in/fsnotify.v1" - packages = ["."] - pruneopts = "" - revision = "c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9" - source = "https://github.com/fsnotify/fsnotify/archive/v1.4.7.tar.gz" - version = "v1.4.7" - -[[projects]] - digest = "1:960720207d3d0992995f4576e1366fd9e9b1483473b07fb7243144f75f5b1546" - name = "gopkg.in/gorethink/gorethink.v3" - packages = [ - ".", - "encoding", - "ql2", - "types", - ] - pruneopts = "" - revision = "7f5bdfd858bb064d80559b2a32b86669c5de5d3b" - version = "v3.0.5" - -[[projects]] - digest = "1:75fb3fcfc73a8c723efde7777b40e8e8ff9babf30d8c56160d01beffea8a95a6" - name = "gopkg.in/inf.v0" - packages = ["."] - pruneopts = "" - revision = "d2d2541c53f18d2a059457998ce2876cc8e67cbf" - version = "v0.9.1" - -[[projects]] - digest = "1:4777ba481cc12866b89aafb0a67529e7ac48b9aea06a25f3737b2cf5a3ffda12" - name = "gopkg.in/jcmturner/aescts.v1" - packages = ["."] - pruneopts = "" - revision = "f6abebb3171c4c1b1fea279cb7c7325020a26290" - version = "v1.0.1" - -[[projects]] - digest = "1:84c5b1392ef65ad1bb64da4b4d0beb2f204eefc769d6d96082347bb7057cb7b1" - name = "gopkg.in/jcmturner/dnsutils.v1" - packages = ["."] - pruneopts = "" - revision = "13eeb8d49ffb74d7a75784c35e4d900607a3943c" - version = "v1.0.1" - -[[projects]] - digest = "1:502ab576ba8c47c4de77fe3f2b2386adc1a1447bb5afae2ac7bf0edd2b6f7c52" - name = "gopkg.in/jcmturner/gokrb5.v7" - packages = [ - "asn1tools", - "client", - "config", - "credentials", - "crypto", - "crypto/common", - "crypto/etype", - "crypto/rfc3961", - "crypto/rfc3962", - "crypto/rfc4757", - "crypto/rfc8009", - "gssapi", - "iana", - "iana/addrtype", - "iana/adtype", - "iana/asnAppTag", - "iana/chksumtype", - "iana/errorcode", - "iana/etypeID", - "iana/flags", - "iana/keyusage", - "iana/msgtype", - "iana/nametype", - "iana/patype", - "kadmin", - "keytab", - "krberror", - "messages", - "pac", - "types", - ] - pruneopts = "" - revision = "363118e62befa8a14ff01031c025026077fe5d6d" - version = "v7.3.0" - -[[projects]] - digest = "1:f9956ccc103c6208cd50c71ee5191b6fdcc635972c12624ef949c9b20b2bb9d1" - name = "gopkg.in/jcmturner/rpc.v1" - packages = [ - "mstypes", - "ndr", - ] - pruneopts = "" - revision = "99a8ce2fbf8b8087b6ed12a37c61b10f04070043" - version = "v1.1.0" - -[[projects]] - digest = "1:cff622452aa789a1b2212d401f6b618ca1751a02229d26e002eb645ec22818f2" - name = "gopkg.in/ldap.v3" - packages = ["."] - pruneopts = "" - revision = "caa044a2bfa324b735baee1722e8e2e372f76864" - version = "v3.1.0" - -[[projects]] - branch = "v2" - digest = "1:f54ba71a035aac92ced3e902d2bff3734a15d1891daff73ec0f90ef236750139" - name = "gopkg.in/mgo.v2" - packages = [ - ".", - "bson", - "internal/json", - "internal/sasl", - "internal/scram", - ] - pruneopts = "" - revision = "9856a29383ce1c59f308dd1cf0363a79b5bef6b5" - -[[projects]] - digest = "1:b49c4d3115800eace659c9a6a5c384a922f5b210178b24a01abb10731f404ea2" - name = "gopkg.in/olivere/elastic.v5" - packages = [ - ".", - "config", - "uritemplates", - ] - pruneopts = "" - revision = "52741dc2ce53629cbe1e673869040d886cba2cd5" - version = "v5.0.70" - -[[projects]] - branch = "v1" - digest = "1:a96d16bd088460f2e0685d46c39bcf1208ba46e0a977be2df49864ec7da447dd" - name = "gopkg.in/tomb.v1" - packages = ["."] - pruneopts = "" - revision = "dd632973f1e7218eb1089048e0798ec9ae7dceb8" - -[[projects]] - digest = "1:f0620375dd1f6251d9973b5f2596228cc8042e887cd7f827e4220bc1ce8c30e2" - name = "gopkg.in/yaml.v2" - packages = ["."] - pruneopts = "" - revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183" - version = "v2.2.1" - -[solve-meta] - analyzer-name = "dep" - analyzer-version = 1 - input-imports = [ - "cloud.google.com/go/monitoring/apiv3", - "cloud.google.com/go/pubsub", - "collectd.org/api", - "collectd.org/network", - "github.com/Azure/azure-storage-queue-go/azqueue", - "github.com/Azure/go-autorest/autorest", - "github.com/Azure/go-autorest/autorest/azure/auth", - "github.com/Mellanox/rdmamap", - "github.com/Microsoft/ApplicationInsights-Go/appinsights", - "github.com/Shopify/sarama", - "github.com/StackExchange/wmi", - "github.com/aerospike/aerospike-client-go", - "github.com/alecthomas/units", - "github.com/amir/raidman", - "github.com/apache/thrift/lib/go/thrift", - "github.com/aws/aws-sdk-go/aws", - "github.com/aws/aws-sdk-go/aws/client", - "github.com/aws/aws-sdk-go/aws/credentials", - "github.com/aws/aws-sdk-go/aws/credentials/stscreds", - "github.com/aws/aws-sdk-go/aws/session", - "github.com/aws/aws-sdk-go/service/cloudwatch", - "github.com/aws/aws-sdk-go/service/dynamodb", - "github.com/aws/aws-sdk-go/service/kinesis", - "github.com/cisco-ie/nx-telemetry-proto/mdt_dialout", - "github.com/cisco-ie/nx-telemetry-proto/telemetry_bis", - "github.com/couchbase/go-couchbase", - "github.com/denisenkom/go-mssqldb", - "github.com/dgrijalva/jwt-go", - "github.com/docker/docker/api/types", - "github.com/docker/docker/api/types/container", - "github.com/docker/docker/api/types/filters", - "github.com/docker/docker/api/types/registry", - "github.com/docker/docker/api/types/swarm", - "github.com/docker/docker/client", - "github.com/docker/docker/pkg/stdcopy", - "github.com/docker/libnetwork/ipvs", - "github.com/eclipse/paho.mqtt.golang", - "github.com/ericchiang/k8s", - "github.com/ericchiang/k8s/apis/apps/v1", - "github.com/ericchiang/k8s/apis/core/v1", - "github.com/ericchiang/k8s/apis/extensions/v1beta1", - "github.com/ericchiang/k8s/apis/meta/v1", - "github.com/ericchiang/k8s/apis/resource", - "github.com/ericchiang/k8s/util/intstr", - "github.com/ghodss/yaml", - "github.com/glinton/ping", - "github.com/go-logfmt/logfmt", - "github.com/go-redis/redis", - "github.com/go-sql-driver/mysql", - "github.com/gobwas/glob", - "github.com/gofrs/uuid", - "github.com/gogo/protobuf/proto", - "github.com/golang/protobuf/proto", - "github.com/golang/protobuf/ptypes/duration", - "github.com/golang/protobuf/ptypes/empty", - "github.com/golang/protobuf/ptypes/timestamp", - "github.com/google/go-cmp/cmp", - "github.com/google/go-cmp/cmp/cmpopts", - "github.com/google/go-github/github", - "github.com/gorilla/mux", - "github.com/harlow/kinesis-consumer", - "github.com/harlow/kinesis-consumer/checkpoint/ddb", - "github.com/hashicorp/consul/api", - "github.com/influxdata/go-syslog", - "github.com/influxdata/go-syslog/nontransparent", - "github.com/influxdata/go-syslog/octetcounting", - "github.com/influxdata/go-syslog/rfc5424", - "github.com/influxdata/tail", - "github.com/influxdata/toml", - "github.com/influxdata/toml/ast", - "github.com/influxdata/wlog", - "github.com/jackc/pgx", - "github.com/jackc/pgx/pgtype", - "github.com/jackc/pgx/stdlib", - "github.com/kardianos/service", - "github.com/karrick/godirwalk", - "github.com/kballard/go-shellquote", - "github.com/kubernetes/apimachinery/pkg/api/resource", - "github.com/matttproud/golang_protobuf_extensions/pbutil", - "github.com/mdlayher/apcupsd", - "github.com/miekg/dns", - "github.com/multiplay/go-ts3", - "github.com/nats-io/gnatsd/server", - "github.com/nats-io/go-nats", - "github.com/nsqio/go-nsq", - "github.com/openconfig/gnmi/proto/gnmi", - "github.com/openzipkin/zipkin-go-opentracing", - "github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore", - "github.com/pkg/errors", - "github.com/prometheus/client_golang/prometheus", - "github.com/prometheus/client_golang/prometheus/promhttp", - "github.com/prometheus/client_model/go", - "github.com/prometheus/common/expfmt", - "github.com/safchain/ethtool", - "github.com/shirou/gopsutil/cpu", - "github.com/shirou/gopsutil/disk", - "github.com/shirou/gopsutil/host", - "github.com/shirou/gopsutil/load", - "github.com/shirou/gopsutil/mem", - "github.com/shirou/gopsutil/net", - "github.com/shirou/gopsutil/process", - "github.com/sirupsen/logrus", - "github.com/soniah/gosnmp", - "github.com/streadway/amqp", - "github.com/stretchr/testify/assert", - "github.com/stretchr/testify/mock", - "github.com/stretchr/testify/require", - "github.com/tidwall/gjson", - "github.com/vjeantet/grok", - "github.com/vmware/govmomi", - "github.com/vmware/govmomi/object", - "github.com/vmware/govmomi/performance", - "github.com/vmware/govmomi/property", - "github.com/vmware/govmomi/session", - "github.com/vmware/govmomi/simulator", - "github.com/vmware/govmomi/view", - "github.com/vmware/govmomi/vim25", - "github.com/vmware/govmomi/vim25/methods", - "github.com/vmware/govmomi/vim25/mo", - "github.com/vmware/govmomi/vim25/soap", - "github.com/vmware/govmomi/vim25/types", - "github.com/wavefronthq/wavefront-sdk-go/senders", - "github.com/wvanbergen/kafka/consumergroup", - "golang.org/x/net/context", - "golang.org/x/net/html/charset", - "golang.org/x/oauth2", - "golang.org/x/oauth2/clientcredentials", - "golang.org/x/oauth2/google", - "golang.org/x/sys/unix", - "golang.org/x/sys/windows", - "golang.org/x/sys/windows/svc", - "golang.org/x/sys/windows/svc/mgr", - "google.golang.org/api/iterator", - "google.golang.org/api/option", - "google.golang.org/api/support/bundler", - "google.golang.org/genproto/googleapis/api/distribution", - "google.golang.org/genproto/googleapis/api/metric", - "google.golang.org/genproto/googleapis/api/monitoredres", - "google.golang.org/genproto/googleapis/monitoring/v3", - "google.golang.org/grpc", - "google.golang.org/grpc/codes", - "google.golang.org/grpc/credentials", - "google.golang.org/grpc/encoding/gzip", - "google.golang.org/grpc/metadata", - "google.golang.org/grpc/peer", - "google.golang.org/grpc/status", - "gopkg.in/gorethink/gorethink.v3", - "gopkg.in/ldap.v3", - "gopkg.in/mgo.v2", - "gopkg.in/mgo.v2/bson", - "gopkg.in/olivere/elastic.v5", - "gopkg.in/yaml.v2", - ] - solver-name = "gps-cdcl" - solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml deleted file mode 100644 index b4304c61c..000000000 --- a/Gopkg.toml +++ /dev/null @@ -1,314 +0,0 @@ -[[constraint]] - name = "collectd.org" - version = "0.3.0" - -[[constraint]] - name = "github.com/aerospike/aerospike-client-go" - version = "<=1.27.0" - -[[constraint]] - name = "github.com/amir/raidman" - branch = "master" - -[[constraint]] - name = "github.com/apache/thrift" - branch = "master" - -[[constraint]] - name = "github.com/aws/aws-sdk-go" - version = "1.19.41" - -[[constraint]] - name = "github.com/couchbase/go-couchbase" - branch = "master" - -[[constraint]] - name = "github.com/dgrijalva/jwt-go" - version = "3.2.0" - -[[constraint]] - name = "github.com/docker/docker" - revision = "ed7b6428c133e7c59404251a09b7d6b02fa83cc2" # v18.05.0-ce - -[[override]] - name = "github.com/docker/distribution" - revision = "edc3ab29cdff8694dd6feb85cfeb4b5f1b38ed9c" # v18.05.0-ce - -[[constraint]] - name = "github.com/eclipse/paho.mqtt.golang" - version = "1" - -[[constraint]] - name = "github.com/go-sql-driver/mysql" - version = "1.4.0" - -[[constraint]] - name = "github.com/gobwas/glob" - version = "0.2.3" - -[[constraint]] - name = "github.com/golang/protobuf" - version = "1.1.0" - -[[constraint]] - name = "github.com/google/go-cmp" - version = "0.2.0" - -[[constraint]] - name = "github.com/gorilla/mux" - version = "1.6.2" - -[[constraint]] - name = "github.com/go-redis/redis" - version = "6.12.0" - -[[constraint]] - name = "github.com/hashicorp/consul" - version = "1.1.0" - -[[constraint]] - name = "github.com/influxdata/go-syslog" - version = "2.0.0" - -[[constraint]] - name = "github.com/influxdata/tail" - branch = "master" - -[[constraint]] - name = "github.com/influxdata/toml" - branch = "telegraf" - -[[constraint]] - name = "github.com/influxdata/wlog" - branch = "master" - -[[constraint]] - name = "github.com/jackc/pgx" - version = "3.4.0" - -[[constraint]] - name = "github.com/kardianos/service" - version = "1.0.0" - -[[constraint]] - name = "github.com/kballard/go-shellquote" - branch = "master" - -[[constraint]] - name = "github.com/matttproud/golang_protobuf_extensions" - version = "1.0.1" - -[[constraint]] - name = "github.com/Microsoft/ApplicationInsights-Go" - version = "0.4.2" - -[[constraint]] - name = "github.com/miekg/dns" - version = "1.0.10" - -[[constraint]] - name = "github.com/multiplay/go-ts3" - version = "1.0.0" - -[[constraint]] - name = "github.com/nats-io/gnatsd" - version = "1.1.0" - -[[constraint]] - name = "github.com/nats-io/go-nats" - version = "1.5.0" - -[[constraint]] - name = "github.com/nsqio/go-nsq" - version = "1.0.7" - -[[constraint]] - name = "github.com/openzipkin/zipkin-go-opentracing" - version = "0.3.4" - -[[constraint]] - name = "github.com/prometheus/client_golang" - version = "0.9.2" - -[[constraint]] - name = "github.com/prometheus/client_model" - branch = "master" - -[[constraint]] - name = "github.com/prometheus/common" - branch = "master" - -[[constraint]] - name = "github.com/gofrs/uuid" - version = "2.0.0" - -[[constraint]] - name = "github.com/shirou/gopsutil" - version = "2.19.7" - -[[constraint]] - name = "github.com/Shopify/sarama" - version = "1.24.0" - -[[constraint]] - name = "github.com/soniah/gosnmp" - version = "1.22.0" - -[[constraint]] - name = "github.com/StackExchange/wmi" - version = "1.0.0" - -[[constraint]] - name = "github.com/streadway/amqp" - branch = "master" - -[[constraint]] - name = "github.com/stretchr/testify" - version = "1.2.2" - -[[constraint]] - name = "github.com/tidwall/gjson" - version = "1.1.1" - -[[constraint]] - name = "github.com/vjeantet/grok" - version = "1.0.0" - -[[constraint]] - name = "github.com/wvanbergen/kafka" - branch = "master" - -[[constraint]] - name = "github.com/denisenkom/go-mssqldb" - branch = "master" - -[[constraint]] - name = "golang.org/x/net" - branch = "master" - source = "https://github.com/golang/net.git" - -[[constraint]] - name = "golang.org/x/sys" - branch = "master" - source = "https://github.com/golang/sys.git" - -[[constraint]] - name = "google.golang.org/grpc" - version = "1.12.2" - -[[constraint]] - name = "gopkg.in/gorethink/gorethink.v3" - version = "3.0.5" - -[[constraint]] - name = "gopkg.in/mgo.v2" - branch = "v2" - -[[constraint]] - name = "gopkg.in/olivere/elastic.v5" - version = "^5.0.69" - -[[constraint]] - name = "gopkg.in/yaml.v2" - version = "^2.2.1" - -[[override]] - source = "https://github.com/fsnotify/fsnotify/archive/v1.4.7.tar.gz" - name = "gopkg.in/fsnotify.v1" - -[[constraint]] - name = "github.com/mdlayher/apcupsd" - revision = "eb3dd99a75fe58389e357b732691320dcf706b5f" - -[[constraint]] - branch = "master" - name = "google.golang.org/genproto" - -[[constraint]] - name = "github.com/vmware/govmomi" - version = "0.19.0" - -[[constraint]] - name = "github.com/Azure/go-autorest" - version = "^13.0.0" - -[[constraint]] - name = "github.com/Azure/azure-storage-queue-go" - version = "0.2.0" - -[[constraint]] - branch = "master" - name = "golang.org/x/oauth2" - source = "https://github.com/golang/oauth2.git" - -[[constraint]] - branch = "master" - name = "github.com/docker/libnetwork" - -[[override]] - name = "github.com/vishvananda/netlink" - revision = "b2de5d10e38ecce8607e6b438b6d174f389a004e" - -[[constraint]] - name = "github.com/wavefronthq/wavefront-sdk-go" - version = "^0.9.1" - -[[constraint]] - name = "github.com/karrick/godirwalk" - version = "1.10" - -[[override]] - name = "github.com/harlow/kinesis-consumer" - branch = "master" - -[[constraint]] - branch = "master" - name = "github.com/kubernetes/apimachinery" - -[[constraint]] - name = "github.com/go-logfmt/logfmt" - version = "0.4.0" - -[[constraint]] - branch = "master" - name = "github.com/ghodss/yaml" - -[[override]] - name = "golang.org/x/crypto" - source = "https://github.com/golang/crypto.git" - -[[override]] - name = "golang.org/x/sync" - source = "https://github.com/golang/sync.git" - -[[override]] - name = "golang.org/x/text" - source = "https://github.com/golang/text.git" - -[[constraint]] - name = "github.com/google/go-github" - version = "24.0.1" - -[[constraint]] - branch = "master" - name = "github.com/openconfig/gnmi" - -[[constraint]] - branch = "master" - name = "github.com/cisco-ie/nx-telemetry-proto" - -[[constraint]] - branch = "master" - name = "github.com/Mellanox/rdmamap" - -[[constraint]] - name = "gopkg.in/ldap.v3" - version = "3.1.0" - -[[constraint]] - name = "github.com/safchain/ethtool" - revision = "42ed695e3de80b9d695f280295fd7994639f209d" - -[[override]] - name = "github.com/satori/go.uuid" - revision = "b2ce2384e17bbe0c6d34077efa39dbab3e09123b" From 9389099820a163b49da899f6e8dcf2fd20933300 Mon Sep 17 00:00:00 2001 From: Antonio Garcia Date: Wed, 29 Jan 2020 04:18:58 -0600 Subject: [PATCH 1497/1815] Add modbus input plugin (#6154) --- go.mod | 3 + go.sum | 6 + plugins/inputs/all/all.go | 1 + plugins/inputs/modbus/.README.md.swp | Bin 0 -> 12288 bytes plugins/inputs/modbus/README.md | 84 ++++ plugins/inputs/modbus/modbus.go | 589 +++++++++++++++++++++++++++ plugins/inputs/modbus/modbus_test.go | 376 +++++++++++++++++ 7 files changed, 1059 insertions(+) create mode 100644 plugins/inputs/modbus/.README.md.swp create mode 100644 plugins/inputs/modbus/README.md create mode 100644 plugins/inputs/modbus/modbus.go create mode 100644 plugins/inputs/modbus/modbus_test.go diff --git a/go.mod b/go.mod index 36819f522..fe4c52d0f 100644 --- a/go.mod +++ b/go.mod @@ -44,6 +44,8 @@ require ( github.com/go-ole/go-ole v1.2.1 // indirect github.com/go-redis/redis v6.12.0+incompatible github.com/go-sql-driver/mysql v1.4.1 + github.com/goburrow/modbus v0.1.0 + github.com/goburrow/serial v0.1.0 // indirect github.com/gobwas/glob v0.2.3 github.com/gofrs/uuid v2.1.0+incompatible github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d @@ -108,6 +110,7 @@ require ( github.com/soniah/gosnmp v1.22.0 github.com/streadway/amqp v0.0.0-20180528204448-e5adc2ada8b8 github.com/stretchr/testify v1.4.0 + github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62 github.com/tedsuo/ifrit v0.0.0-20191009134036-9a97d0632f00 // indirect github.com/tidwall/gjson v1.3.0 github.com/vishvananda/netlink v0.0.0-20171020171820-b2de5d10e38e // indirect diff --git a/go.sum b/go.sum index ed221dee3..c04272c07 100644 --- a/go.sum +++ b/go.sum @@ -150,6 +150,10 @@ github.com/go-redis/redis v6.12.0+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8w github.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZpNwpA= github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/goburrow/modbus v0.1.0 h1:DejRZY73nEM6+bt5JSP6IsFolJ9dVcqxsYbpLbeW/ro= +github.com/goburrow/modbus v0.1.0/go.mod h1:Kx552D5rLIS8E7TyUwQ/UdHEqvX5T8tyiGBTlzMcZBg= +github.com/goburrow/serial v0.1.0 h1:v2T1SQa/dlUqQiYIT8+Cu7YolfqAi3K96UmhwYyuSrA= +github.com/goburrow/serial v0.1.0/go.mod h1:sAiqG0nRVswsm1C97xsttiYCzSLBmUZ/VSlVLZJ8haA= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= github.com/gofrs/uuid v2.1.0+incompatible h1:8oEj3gioPmmDAOLQUZdnW+h4FZu9aSE/SQIas1E9pzA= @@ -406,6 +410,8 @@ github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0 github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62 h1:Oj2e7Sae4XrOsk3ij21QjjEgAcVSeo9nkp0dI//cD2o= +github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62/go.mod h1:qUzPVlSj2UgxJkVbH0ZwuuiR46U8RBMDT5KLY78Ifpw= github.com/tedsuo/ifrit v0.0.0-20191009134036-9a97d0632f00 h1:mujcChM89zOHwgZBBNr5WZ77mBXP1yR+gLThGCYZgAg= github.com/tedsuo/ifrit v0.0.0-20191009134036-9a97d0632f00/go.mod h1:eyZnKCc955uh98WQvzOm0dgAeLnf2O0Rz0LPoC5ze+0= github.com/tidwall/gjson v1.3.0 h1:kfpsw1W3trbg4Xm6doUtqSl9+LhLB6qJ9PkltVAQZYs= diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index 6a42b9451..dec04b397 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -90,6 +90,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/memcached" _ "github.com/influxdata/telegraf/plugins/inputs/mesos" _ "github.com/influxdata/telegraf/plugins/inputs/minecraft" + _ "github.com/influxdata/telegraf/plugins/inputs/modbus" _ "github.com/influxdata/telegraf/plugins/inputs/mongodb" _ "github.com/influxdata/telegraf/plugins/inputs/monit" _ "github.com/influxdata/telegraf/plugins/inputs/mqtt_consumer" diff --git a/plugins/inputs/modbus/.README.md.swp b/plugins/inputs/modbus/.README.md.swp new file mode 100644 index 0000000000000000000000000000000000000000..6a1629b1a3978092196c48639609dfc763bac751 GIT binary patch literal 12288 zcmeHN%WoS+7@t0Pw51e*XirEVF-3`F$B)E`lVTyiLes`gWIMT3Azkl|omJPnW_LDi z3KVfba6>{I$_)V$LI{Zi2mS(XaN`VI3J8f_Ir5v?bz*xPrK-39yV9@go%v?I`F`JH zrpmN48_RVnOlKILrx^S6?w_0Ay!RY?AD@n;nVxJvuQ~J-}1Yr?V+#l3oqSsjJEHk8?{ok zUYqV22P%m>Vg_Ocq73+=lPa8EpUGuaswXC%r)QpCkIKa@F#|CJF#|CJF#|CJF#|CJ zF#|CJ|5*k^{|I{v)*THkY=rM4WAE@Ye#H#L48#n?48#n?48#n?48#n?48#n?48#n? z4Ezfj(D0Y^v&R^_bsUSw-~R{y|Nr?6V?O~p&<0k4E5H&k1)K+deUh;|z=yyGz^lM{ z;2iKQ@YiX^egp0T-vOTh9|P|KZD1968OQ(x90!g947m3MV}Ag@1K$F7fL-7Q&<3)= zG2q8jjC~G#27Clu2a3QM;1qBI`2HkgUjT0d*MOIRbHKe5jJ*R~0w#eMfiu9Dk2CfP z@XKT11vm?Q{U~E!0e65I;D<*T+XgtG0d-&nxCopCZi7$BsVRAlpO}IFJp%Zu6E%=AJAPd1;+7v>8Kl+EX7GxH01T!t7t>;;67a81eXP)c1!N_9*N>!e`uloH$%48}2*NF=CsQ|t9DPOH9vQH!dBHW?AU zPPW$LRHTVDzt_*EntjfVi6pgm1>bU915=`}RIXIfC@({8iQT@EU%t|s$)S041 z)lVYLFkJ3=72{?}IXv@P`g6K{TM+Cld5j8>f3>f<-)Z~+M5iMcf9bk-V9tpg|2@67EB!#5wgG|SS z$62!^gBeqaJ zmfBG4?$W5DDW!?tNTpJhYN;G76zb}xkyYy!hER(G(EpiT>?uL5e|YFUe-xfM!Za=L-=uynL;hSCk+ zj!Fm7t~;isYI~&Y)eucxx%CZJHV90)5k#`Ffj|?X62;w+bkvH$lmW~5cSt&FMN??* zL32whFrJZNdb-Q8$ElJYnxuR+nEQZi8o2vDzE;DlChia;y@q^U4Rr-h_mrb2B+4EThrkLhoa=<=`M7&q<5+Y(|Fw7;Ra4U(9)DM(Q*~tp}#r&LiN>ZdA&(s!PGf9wknsn;yvUj zX#{4HtXQVaVO+20+orCHCYko2$W6jiQv@Sit{JK(PnMdM<>e_tk~p%Q2j-$a!nZ!E zoaguZjtiHDe8KG=35V8JDqg^yT3uR_>bP>WnxPV~3u(enoDY3CS{87e{)E%Bf)n6K z*CJyr=nZ^9!KHR+MKI`4KNhY@Op923z>+oOWC|8KPy;^FiUBN$TH%fQunw-Nd-aQ< GDEk}yv%(Gl literal 0 HcmV?d00001 diff --git a/plugins/inputs/modbus/README.md b/plugins/inputs/modbus/README.md new file mode 100644 index 000000000..1e042deba --- /dev/null +++ b/plugins/inputs/modbus/README.md @@ -0,0 +1,84 @@ +# Telegraf Input Plugin: Modbus + +The Modbus plugin collects Discrete Inputs, Coils, Input Registers and Holding Registers via Modbus TCP or Modbus RTU/ASCII + +### Configuration: + +```toml + ## Connection Configuration + ## + ## The module supports connections to PLCs via MODBUS/TCP or + ## via serial line communication in binary (RTU) or readable (ASCII) encoding + ## + ## Device name + name = "Device" + + ## Slave ID - addresses a MODBUS device on the bus + ## Range: 0 - 255 [0 = broadcast; 248 - 255 = reserved] + slave_id = 1 + + ## Timeout for each request + timeout = "1s" + + # TCP - connect via Modbus/TCP + controller = "tcp://localhost:502" + + # Serial (RS485; RS232) + #controller = "file:///dev/ttyUSB0" + #baud_rate = 9600 + #data_bits = 8 + #parity = "N" + #stop_bits = 1 + #transmission_mode = "RTU" + + + ## Measurements + ## + + ## Digital Variables, Discrete Inputs and Coils + ## name - the variable name + ## address - variable address + + discrete_inputs = [ + { name = "Start", address = [0]}, + { name = "Stop", address = [1]}, + { name = "Reset", address = [2]}, + { name = "EmergencyStop", address = [3]}, + ] + coils = [ + { name = "Motor1-Run", address = [0]}, + { name = "Motor1-Jog", address = [1]}, + { name = "Motor1-Stop", address = [2]}, + ] + + ## Analog Variables, Input Registers and Holding Registers + ## name - the variable name + ## byte_order - the ordering of bytes + ## |---AB, ABCD - Big Endian + ## |---BA, DCBA - Little Endian + ## |---BADC - Mid-Big Endian + ## |---CDAB - Mid-Little Endian + ## data_type - UINT16, INT16, INT32, UINT32, FLOAT32, FLOAT32-IEEE (the IEEE 754 binary representation) + ## scale - the final numeric variable representation + ## address - variable address + + holding_registers = [ + { name = "PowerFactor", byte_order = "AB", data_type = "FLOAT32", scale=0.01, address = [8]}, + { name = "Voltage", byte_order = "AB", data_type = "FLOAT32", scale=0.1, address = [0]}, + { name = "Energy", byte_order = "ABCD", data_type = "FLOAT32", scale=0.001, address = [5,6]}, + { name = "Current", byte_order = "ABCD", data_type = "FLOAT32", scale=0.001, address = [1,2]}, + { name = "Frequency", byte_order = "AB", data_type = "FLOAT32", scale=0.1, address = [7]}, + { name = "Power", byte_order = "ABCD", data_type = "FLOAT32", scale=0.1, address = [3,4]}, + ] + input_registers = [ + { name = "TankLevel", byte_order = "AB", data_type = "INT16", scale=1.0, address = [0]}, + { name = "TankPH", byte_order = "AB", data_type = "INT16", scale=1.0, address = [1]}, + { name = "Pump1-Speed", byte_order = "ABCD", data_type = "INT32", scale=1.0, address = [3,4]}, + ] +``` +### Example Output: + +``` +$ ./telegraf -config telegraf.conf -input-filter modbus -test +modbus.InputRegisters,host=orangepizero Current=0,Energy=0,Frecuency=60,Power=0,PowerFactor=0,Voltage=123.9000015258789 1554079521000000000 +``` diff --git a/plugins/inputs/modbus/modbus.go b/plugins/inputs/modbus/modbus.go new file mode 100644 index 000000000..d845ef8fe --- /dev/null +++ b/plugins/inputs/modbus/modbus.go @@ -0,0 +1,589 @@ +package modbus + +import ( + "encoding/binary" + "fmt" + "math" + "net" + "net/url" + "sort" + + mb "github.com/goburrow/modbus" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/inputs" +) + +// Modbus holds all data relevant to the plugin +type Modbus struct { + Name string `toml:"name"` + Controller string `toml:"controller"` + TransmissionMode string `toml:"transmission_mode"` + BaudRate int `toml:"baud_rate"` + DataBits int `toml:"data_bits"` + Parity string `toml:"parity"` + StopBits int `toml:"stop_bits"` + SlaveID int `toml:"slave_id"` + Timeout internal.Duration `toml:"timeout"` + DiscreteInputs []fieldContainer `toml:"discrete_inputs"` + Coils []fieldContainer `toml:"coils"` + HoldingRegisters []fieldContainer `toml:"holding_registers"` + InputRegisters []fieldContainer `toml:"input_registers"` + registers []register + isConnected bool + tcpHandler *mb.TCPClientHandler + rtuHandler *mb.RTUClientHandler + asciiHandler *mb.ASCIIClientHandler + client mb.Client +} + +type register struct { + Type string + RegistersRange []registerRange + ReadValue func(uint16, uint16) ([]byte, error) + Fields []fieldContainer +} + +type fieldContainer struct { + Name string `toml:"name"` + ByteOrder string `toml:"byte_order"` + DataType string `toml:"data_type"` + Scale float32 `toml:"scale"` + Address []uint16 `toml:"address"` + value interface{} +} + +type registerRange struct { + address uint16 + length uint16 +} + +const ( + cDiscreteInputs = "discrete_input" + cCoils = "coil" + cHoldingRegisters = "holding_register" + cInputRegisters = "input_register" +) + +const description = `Retrieve data from MODBUS slave devices` +const sampleConfig = ` + ## Connection Configuration + ## + ## The plugin supports connections to PLCs via MODBUS/TCP or + ## via serial line communication in binary (RTU) or readable (ASCII) encoding + ## + ## Device name + name = "Device" + + ## Slave ID - addresses a MODBUS device on the bus + ## Range: 0 - 255 [0 = broadcast; 248 - 255 = reserved] + slave_id = 1 + + ## Timeout for each request + timeout = "1s" + + # TCP - connect via Modbus/TCP + controller = "tcp://localhost:502" + + # Serial (RS485; RS232) + #controller = "file:///dev/ttyUSB0" + #baud_rate = 9600 + #data_bits = 8 + #parity = "N" + #stop_bits = 1 + #transmission_mode = "RTU" + + + ## Measurements + ## + + ## Digital Variables, Discrete Inputs and Coils + ## name - the variable name + ## address - variable address + + discrete_inputs = [ + { name = "start", address = [0]}, + { name = "stop", address = [1]}, + { name = "reset", address = [2]}, + { name = "emergency_stop", address = [3]}, + ] + coils = [ + { name = "motor1_run", address = [0]}, + { name = "motor1_jog", address = [1]}, + { name = "motor1_stop", address = [2]}, + ] + + ## Analog Variables, Input Registers and Holding Registers + ## name - the variable name + ## byte_order - the ordering of bytes + ## |---AB, ABCD - Big Endian + ## |---BA, DCBA - Little Endian + ## |---BADC - Mid-Big Endian + ## |---CDAB - Mid-Little Endian + ## data_type - UINT16, INT16, INT32, UINT32, FLOAT32, FLOAT32-IEEE (the IEEE 754 binary representation) + ## scale - the final numeric variable representation + ## address - variable address + + holding_registers = [ + { name = "power_factor", byte_order = "AB", data_type = "FLOAT32", scale=0.01, address = [8]}, + { name = "voltage", byte_order = "AB", data_type = "FLOAT32", scale=0.1, address = [0]}, + { name = "energy", byte_order = "ABCD", data_type = "FLOAT32", scale=0.001, address = [5,6]}, + { name = "current", byte_order = "ABCD", data_type = "FLOAT32", scale=0.001, address = [1,2]}, + { name = "frequency", byte_order = "AB", data_type = "FLOAT32", scale=0.1, address = [7]}, + { name = "power", byte_order = "ABCD", data_type = "FLOAT32", scale=0.1, address = [3,4]}, + ] + input_registers = [ + { name = "tank_level", byte_order = "AB", data_type = "INT16", scale=1.0, address = [0]}, + { name = "tank_ph", byte_order = "AB", data_type = "INT16", scale=1.0, address = [1]}, + { name = "pump1_speed", byte_order = "ABCD", data_type = "INT32", scale=1.0, address = [3,4]}, + ] +` + +// SampleConfig returns a basic configuration for the plugin +func (m *Modbus) SampleConfig() string { + return sampleConfig +} + +// Description returns a short description of what the plugin does +func (m *Modbus) Description() string { + return description +} + +func (m *Modbus) Init() error { + //check device name + if m.Name == "" { + return fmt.Errorf("device name is empty") + } + + err := connect(m) + if err != nil { + m.isConnected = false + return err + } + + err = m.InitRegister(m.DiscreteInputs, cDiscreteInputs) + if err != nil { + return err + } + + err = m.InitRegister(m.Coils, cCoils) + if err != nil { + return err + } + + err = m.InitRegister(m.HoldingRegisters, cHoldingRegisters) + if err != nil { + return err + } + + err = m.InitRegister(m.InputRegisters, cInputRegisters) + if err != nil { + return err + } + + return nil +} + +func (m *Modbus) InitRegister(fields []fieldContainer, name string) error { + if len(fields) == 0 { + return nil + } + + err := validateFieldContainers(fields, name) + if err != nil { + return err + } + + addrs := []uint16{} + for _, field := range fields { + for _, a := range field.Address { + addrs = append(addrs, a) + } + } + + addrs = removeDuplicates(addrs) + sort.Slice(addrs, func(i, j int) bool { return addrs[i] < addrs[j] }) + + ii := 0 + var registersRange []registerRange + + // Get range of consecutive integers + // [1, 2, 3, 5, 6, 10, 11, 12, 14] + // (1, 3) , (5, 2) , (10, 3), (14 , 1) + for range addrs { + if ii < len(addrs) { + start := addrs[ii] + end := start + + for ii < len(addrs)-1 && addrs[ii+1]-addrs[ii] == 1 { + end = addrs[ii+1] + ii++ + } + ii++ + registersRange = append(registersRange, registerRange{start, end - start + 1}) + } + } + + var fn func(uint16, uint16) ([]byte, error) + + if name == cDiscreteInputs { + fn = m.client.ReadDiscreteInputs + } else if name == cCoils { + fn = m.client.ReadCoils + } else if name == cInputRegisters { + fn = m.client.ReadInputRegisters + } else if name == cHoldingRegisters { + fn = m.client.ReadHoldingRegisters + } else { + return fmt.Errorf("not Valid function") + } + + m.registers = append(m.registers, register{name, registersRange, fn, fields}) + + return nil +} + +// Connect to a MODBUS Slave device via Modbus/[TCP|RTU|ASCII] +func connect(m *Modbus) error { + u, err := url.Parse(m.Controller) + if err != nil { + return err + } + + switch u.Scheme { + case "tcp": + var host, port string + host, port, err = net.SplitHostPort(u.Host) + if err != nil { + return err + } + m.tcpHandler = mb.NewTCPClientHandler(host + ":" + port) + m.tcpHandler.Timeout = m.Timeout.Duration + m.tcpHandler.SlaveId = byte(m.SlaveID) + m.client = mb.NewClient(m.tcpHandler) + err := m.tcpHandler.Connect() + if err != nil { + return err + } + m.isConnected = true + return nil + case "file": + if m.TransmissionMode == "RTU" { + m.rtuHandler = mb.NewRTUClientHandler(u.Path) + m.rtuHandler.Timeout = m.Timeout.Duration + m.rtuHandler.SlaveId = byte(m.SlaveID) + m.rtuHandler.BaudRate = m.BaudRate + m.rtuHandler.DataBits = m.DataBits + m.rtuHandler.Parity = m.Parity + m.rtuHandler.StopBits = m.StopBits + m.client = mb.NewClient(m.rtuHandler) + err := m.rtuHandler.Connect() + if err != nil { + return err + } + m.isConnected = true + return nil + } else if m.TransmissionMode == "ASCII" { + m.asciiHandler = mb.NewASCIIClientHandler(u.Path) + m.asciiHandler.Timeout = m.Timeout.Duration + m.asciiHandler.SlaveId = byte(m.SlaveID) + m.asciiHandler.BaudRate = m.BaudRate + m.asciiHandler.DataBits = m.DataBits + m.asciiHandler.Parity = m.Parity + m.asciiHandler.StopBits = m.StopBits + m.client = mb.NewClient(m.asciiHandler) + err := m.asciiHandler.Connect() + if err != nil { + return err + } + m.isConnected = true + return nil + } else { + return fmt.Errorf("invalid protocol '%s' - '%s' ", u.Scheme, m.TransmissionMode) + } + default: + return fmt.Errorf("invalid controller") + } +} + +func validateFieldContainers(t []fieldContainer, n string) error { + nameEncountered := map[string]bool{} + for _, item := range t { + //check empty name + if item.Name == "" { + return fmt.Errorf("empty name in '%s'", n) + } + + //search name duplicate + if nameEncountered[item.Name] { + return fmt.Errorf("name '%s' is duplicated in '%s' - '%s'", item.Name, n, item.Name) + } else { + nameEncountered[item.Name] = true + } + + if n == cInputRegisters || n == cHoldingRegisters { + // search byte order + switch item.ByteOrder { + case "AB", "BA", "ABCD", "CDAB", "BADC", "DCBA": + break + default: + return fmt.Errorf("invalid byte order '%s' in '%s' - '%s'", item.ByteOrder, n, item.Name) + } + + // search data type + switch item.DataType { + case "UINT16", "INT16", "UINT32", "INT32", "FLOAT32-IEEE", "FLOAT32": + break + default: + return fmt.Errorf("invalid data type '%s' in '%s' - '%s'", item.DataType, n, item.Name) + } + + // check scale + if item.Scale == 0.0 { + return fmt.Errorf("invalid scale '%f' in '%s' - '%s'", item.Scale, n, item.Name) + } + } + + // check address + if len(item.Address) == 0 || len(item.Address) > 2 { + return fmt.Errorf("invalid address '%v' length '%v' in '%s' - '%s'", item.Address, len(item.Address), n, item.Name) + } else if n == cInputRegisters || n == cHoldingRegisters { + if (len(item.Address) == 1 && len(item.ByteOrder) != 2) || (len(item.Address) == 2 && len(item.ByteOrder) != 4) { + return fmt.Errorf("invalid byte order '%s' and address '%v' in '%s' - '%s'", item.ByteOrder, item.Address, n, item.Name) + } + + // search duplicated + if len(item.Address) > len(removeDuplicates(item.Address)) { + return fmt.Errorf("duplicate address '%v' in '%s' - '%s'", item.Address, n, item.Name) + } + + } else if len(item.Address) > 1 || (n == cInputRegisters || n == cHoldingRegisters) { + return fmt.Errorf("invalid address'%v' length'%v' in '%s' - '%s'", item.Address, len(item.Address), n, item.Name) + } + } + return nil +} + +func removeDuplicates(elements []uint16) []uint16 { + encountered := map[uint16]bool{} + result := []uint16{} + + for v := range elements { + if encountered[elements[v]] { + } else { + encountered[elements[v]] = true + result = append(result, elements[v]) + } + } + + return result +} + +func (m *Modbus) getFields() error { + for _, register := range m.registers { + rawValues := make(map[uint16][]byte) + bitRawValues := make(map[uint16]uint16) + for _, rr := range register.RegistersRange { + address := rr.address + readValues, err := register.ReadValue(uint16(rr.address), uint16(rr.length)) + if err != nil { + return err + } + + // Raw Values + if register.Type == cDiscreteInputs || register.Type == cCoils { + for _, readValue := range readValues { + for bitPosition := 0; bitPosition < 8; bitPosition++ { + bitRawValues[address] = getBitValue(readValue, bitPosition) + address = address + 1 + if address+1 > rr.length { + break + } + } + } + } + + // Raw Values + if register.Type == cInputRegisters || register.Type == cHoldingRegisters { + batchSize := 2 + for batchSize < len(readValues) { + rawValues[address] = readValues[0:batchSize:batchSize] + address = address + 1 + readValues = readValues[batchSize:] + } + + rawValues[address] = readValues[0:batchSize:batchSize] + } + } + + if register.Type == cDiscreteInputs || register.Type == cCoils { + for i := 0; i < len(register.Fields); i++ { + register.Fields[i].value = bitRawValues[register.Fields[i].Address[0]] + } + } + + if register.Type == cInputRegisters || register.Type == cHoldingRegisters { + for i := 0; i < len(register.Fields); i++ { + var values_t []byte + + for j := 0; j < len(register.Fields[i].Address); j++ { + tempArray := rawValues[register.Fields[i].Address[j]] + for x := 0; x < len(tempArray); x++ { + values_t = append(values_t, tempArray[x]) + } + } + + register.Fields[i].value = convertDataType(register.Fields[i], values_t) + } + + } + } + + return nil +} + +func getBitValue(n byte, pos int) uint16 { + return uint16(n >> uint(pos) & 0x01) +} + +func convertDataType(t fieldContainer, bytes []byte) interface{} { + switch t.DataType { + case "UINT16": + e16 := convertEndianness16(t.ByteOrder, bytes) + f16 := format16(t.DataType, e16).(uint16) + return scaleUint16(t.Scale, f16) + case "INT16": + e16 := convertEndianness16(t.ByteOrder, bytes) + f16 := format16(t.DataType, e16).(int16) + return scaleInt16(t.Scale, f16) + case "UINT32": + e32 := convertEndianness32(t.ByteOrder, bytes) + f32 := format32(t.DataType, e32).(uint32) + return scaleUint32(t.Scale, f32) + case "INT32": + e32 := convertEndianness32(t.ByteOrder, bytes) + return format32(t.DataType, e32) + case "FLOAT32-IEEE": + e32 := convertEndianness32(t.ByteOrder, bytes) + return format32(t.DataType, e32) + case "FLOAT32": + if len(bytes) == 2 { + e16 := convertEndianness16(t.ByteOrder, bytes) + f16 := format16(t.DataType, e16).(uint16) + return scale16toFloat32(t.Scale, f16) + } else { + e32 := convertEndianness32(t.ByteOrder, bytes) + return scale32toFloat32(t.Scale, e32) + } + default: + return 0 + } +} + +func convertEndianness16(o string, b []byte) uint16 { + switch o { + case "AB": + return binary.BigEndian.Uint16(b) + case "BA": + return binary.LittleEndian.Uint16(b) + default: + return 0 + } +} + +func convertEndianness32(o string, b []byte) uint32 { + switch o { + case "ABCD": + return binary.BigEndian.Uint32(b) + case "DCBA": + return binary.LittleEndian.Uint32(b) + case "BADC": + return uint32(binary.LittleEndian.Uint16(b[0:]))<<16 | uint32(binary.LittleEndian.Uint16(b[2:])) + case "CDAB": + return uint32(binary.BigEndian.Uint16(b[2:]))<<16 | uint32(binary.BigEndian.Uint16(b[0:])) + default: + return 0 + } +} + +func format16(f string, r uint16) interface{} { + switch f { + case "UINT16": + return r + case "INT16": + return int16(r) + default: + return r + } +} + +func format32(f string, r uint32) interface{} { + switch f { + case "UINT32": + return r + case "INT32": + return int32(r) + case "FLOAT32-IEEE": + return math.Float32frombits(r) + default: + return r + } +} + +func scale16toFloat32(s float32, v uint16) float32 { + return float32(v) * s +} + +func scale32toFloat32(s float32, v uint32) float32 { + return float32(v) * s +} + +func scaleInt16(s float32, v int16) int16 { + return int16(float32(v) * s) +} + +func scaleUint16(s float32, v uint16) uint16 { + return uint16(float32(v) * s) +} + +func scaleUint32(s float32, v uint32) uint32 { + return uint32(float64(v) * float64(s)) +} + +// Gather implements the telegraf plugin interface method for data accumulation +func (m *Modbus) Gather(acc telegraf.Accumulator) error { + if !m.isConnected { + err := connect(m) + if err != nil { + m.isConnected = false + return err + } + } + + err := m.getFields() + if err != nil { + m.isConnected = false + return err + } + + for _, reg := range m.registers { + fields := make(map[string]interface{}) + tags := map[string]string{ + "name": m.Name, + "type": reg.Type, + } + + for _, field := range reg.Fields { + fields[field.Name] = field.value + } + + acc.AddFields("modbus", fields, tags) + } + + return nil +} + +// Add this plugin to telegraf +func init() { + inputs.Add("modbus", func() telegraf.Input { return &Modbus{} }) +} diff --git a/plugins/inputs/modbus/modbus_test.go b/plugins/inputs/modbus/modbus_test.go new file mode 100644 index 000000000..3d54c68c5 --- /dev/null +++ b/plugins/inputs/modbus/modbus_test.go @@ -0,0 +1,376 @@ +package modbus + +import ( + "testing" + + m "github.com/goburrow/modbus" + "github.com/stretchr/testify/assert" + "github.com/tbrandon/mbserver" + + "github.com/influxdata/telegraf/testutil" +) + +func TestCoils(t *testing.T) { + var coilTests = []struct { + name string + address uint16 + quantity uint16 + write []byte + read uint16 + }{ + { + name: "coil0_turn_off", + address: 0, + quantity: 1, + write: []byte{0x00}, + read: 0, + }, + { + name: "coil0_turn_on", + address: 0, + quantity: 1, + write: []byte{0x01}, + read: 1, + }, + { + name: "coil1_turn_on", + address: 1, + quantity: 1, + write: []byte{0x01}, + read: 1, + }, + { + name: "coil2_turn_on", + address: 2, + quantity: 1, + write: []byte{0x01}, + read: 1, + }, + { + name: "coil3_turn_on", + address: 3, + quantity: 1, + write: []byte{0x01}, + read: 1, + }, + { + name: "coil1_turn_off", + address: 1, + quantity: 1, + write: []byte{0x00}, + read: 0, + }, + { + name: "coil2_turn_off", + address: 2, + quantity: 1, + write: []byte{0x00}, + read: 0, + }, + { + name: "coil3_turn_off", + address: 3, + quantity: 1, + write: []byte{0x00}, + read: 0, + }, + } + + serv := mbserver.NewServer() + err := serv.ListenTCP("localhost:1502") + defer serv.Close() + assert.NoError(t, err) + + handler := m.NewTCPClientHandler("localhost:1502") + err = handler.Connect() + assert.NoError(t, err) + defer handler.Close() + client := m.NewClient(handler) + + for _, ct := range coilTests { + t.Run(ct.name, func(t *testing.T) { + _, err = client.WriteMultipleCoils(ct.address, ct.quantity, ct.write) + assert.NoError(t, err) + + modbus := Modbus{ + Name: "TestCoils", + Controller: "tcp://localhost:1502", + SlaveID: 1, + Coils: []fieldContainer{ + { + Name: ct.name, + Address: []uint16{ct.address}, + }, + }, + } + + err = modbus.Init() + assert.NoError(t, err) + var acc testutil.Accumulator + err = modbus.Gather(&acc) + assert.NoError(t, err) + assert.NotEmpty(t, modbus.registers) + + for _, coil := range modbus.registers { + assert.Equal(t, ct.read, coil.Fields[0].value) + } + }) + } +} + +func TestHoldingRegisters(t *testing.T) { + var holdingRegisterTests = []struct { + name string + address []uint16 + quantity uint16 + byteOrder string + dataType string + scale float32 + write []byte + read interface{} + }{ + { + name: "register0_ab_float32", + address: []uint16{0}, + quantity: 1, + byteOrder: "AB", + dataType: "FLOAT32", + scale: 0.1, + write: []byte{0x08, 0x98}, + read: float32(220), + }, + { + name: "register0_register1_ab_float32", + address: []uint16{0, 1}, + quantity: 2, + byteOrder: "ABCD", + dataType: "FLOAT32", + scale: 0.001, + write: []byte{0x00, 0x00, 0x03, 0xE8}, + read: float32(1), + }, + { + name: "register1_register2_abcd_float32", + address: []uint16{1, 2}, + quantity: 2, + byteOrder: "ABCD", + dataType: "FLOAT32", + scale: 0.1, + write: []byte{0x00, 0x00, 0x08, 0x98}, + read: float32(220), + }, + { + name: "register3_register4_abcd_float32", + address: []uint16{3, 4}, + quantity: 2, + byteOrder: "ABCD", + dataType: "FLOAT32", + scale: 0.1, + write: []byte{0x00, 0x00, 0x08, 0x98}, + read: float32(220), + }, + { + name: "register7_ab_float32", + address: []uint16{7}, + quantity: 1, + byteOrder: "AB", + dataType: "FLOAT32", + scale: 0.1, + write: []byte{0x01, 0xF4}, + read: float32(50), + }, + { + name: "register10_ab_uint16", + address: []uint16{10}, + quantity: 1, + byteOrder: "AB", + dataType: "UINT16", + scale: 1, + write: []byte{0xAB, 0xCD}, + read: uint16(43981), + }, + { + name: "register10_ab_uint16-scale_.1", + address: []uint16{10}, + quantity: 1, + byteOrder: "AB", + dataType: "UINT16", + scale: .1, + write: []byte{0xAB, 0xCD}, + read: uint16(4398), + }, + { + name: "register10_ab_uint16_scale_10", + address: []uint16{10}, + quantity: 1, + byteOrder: "AB", + dataType: "UINT16", + scale: 10, + write: []byte{0x00, 0x2A}, + read: uint16(420), + }, + { + name: "register20_ba_uint16", + address: []uint16{20}, + quantity: 1, + byteOrder: "BA", + dataType: "UINT16", + scale: 1, + write: []byte{0xAB, 0xCD}, + read: uint16(52651), + }, + { + name: "register30_ab_int16", + address: []uint16{20}, + quantity: 1, + byteOrder: "AB", + dataType: "INT16", + scale: 1, + write: []byte{0xAB, 0xCD}, + read: int16(-21555), + }, + { + name: "register40_ba_int16", + address: []uint16{40}, + quantity: 1, + byteOrder: "BA", + dataType: "INT16", + scale: 1, + write: []byte{0xAB, 0xCD}, + read: int16(-12885), + }, + { + name: "register50_register51_abcd_int32", + address: []uint16{50, 51}, + quantity: 2, + byteOrder: "ABCD", + dataType: "INT32", + scale: 1, + write: []byte{0xAA, 0xBB, 0xCC, 0xDD}, + read: int32(-1430532899), + }, + { + name: "register60_register61_dcba_int32", + address: []uint16{60, 61}, + quantity: 2, + byteOrder: "DCBA", + dataType: "INT32", + scale: 1, + write: []byte{0xAA, 0xBB, 0xCC, 0xDD}, + read: int32(-573785174), + }, + { + name: "register70_register71_badc_int32", + address: []uint16{70, 71}, + quantity: 2, + byteOrder: "BADC", + dataType: "INT32", + scale: 1, + write: []byte{0xAA, 0xBB, 0xCC, 0xDD}, + read: int32(-1146430004), + }, + { + name: "register80_register81_cdab_int32", + address: []uint16{80, 81}, + quantity: 2, + byteOrder: "CDAB", + dataType: "INT32", + scale: 1, + write: []byte{0xAA, 0xBB, 0xCC, 0xDD}, + read: int32(-857888069), + }, + { + name: "register90_register91_abcd_uint32", + address: []uint16{90, 91}, + quantity: 2, + byteOrder: "ABCD", + dataType: "UINT32", + scale: 1, + write: []byte{0xAA, 0xBB, 0xCC, 0xDD}, + read: uint32(2864434397), + }, + { + name: "register100_register101_dcba_uint32", + address: []uint16{100, 101}, + quantity: 2, + byteOrder: "DCBA", + dataType: "UINT32", + scale: 1, + write: []byte{0xAA, 0xBB, 0xCC, 0xDD}, + read: uint32(3721182122), + }, + { + name: "register110_register111_badc_uint32", + address: []uint16{110, 111}, + quantity: 2, + byteOrder: "BADC", + dataType: "UINT32", + scale: 1, + write: []byte{0xAA, 0xBB, 0xCC, 0xDD}, + read: uint32(3148537292), + }, + { + name: "register120_register121_cdab_uint32", + address: []uint16{120, 121}, + quantity: 2, + byteOrder: "CDAB", + dataType: "UINT32", + scale: 1, + write: []byte{0xAA, 0xBB, 0xCC, 0xDD}, + read: uint32(3437079227), + }, + { + name: "register130_register131_abcd_float32_ieee", + address: []uint16{130, 131}, + quantity: 2, + byteOrder: "ABCD", + dataType: "FLOAT32-IEEE", + scale: 1, + write: []byte{0xAA, 0xBB, 0xCC, 0xDD}, + read: float32(-3.3360025e-13), + }, + } + + serv := mbserver.NewServer() + err := serv.ListenTCP("localhost:1502") + defer serv.Close() + assert.NoError(t, err) + + handler := m.NewTCPClientHandler("localhost:1502") + err = handler.Connect() + assert.NoError(t, err) + defer handler.Close() + client := m.NewClient(handler) + + for _, hrt := range holdingRegisterTests { + t.Run(hrt.name, func(t *testing.T) { + _, err = client.WriteMultipleRegisters(hrt.address[0], hrt.quantity, hrt.write) + assert.NoError(t, err) + + modbus := Modbus{ + Name: "TestHoldingRegisters", + Controller: "tcp://localhost:1502", + SlaveID: 1, + HoldingRegisters: []fieldContainer{ + { + Name: hrt.name, + ByteOrder: hrt.byteOrder, + DataType: hrt.dataType, + Scale: hrt.scale, + Address: hrt.address, + }, + }, + } + + err = modbus.Init() + assert.NoError(t, err) + var acc testutil.Accumulator + modbus.Gather(&acc) + assert.NotEmpty(t, modbus.registers) + + for _, coil := range modbus.registers { + assert.Equal(t, hrt.read, coil.Fields[0].value) + } + }) + } +} From ede9bc520baacc4f06a2848ae0ef46686b46f8af Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 29 Jan 2020 02:50:11 -0800 Subject: [PATCH 1498/1815] Update readme and changelog --- CHANGELOG.md | 1 + README.md | 1 + 2 files changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6b1bb3b66..76d1ccd89 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,6 +12,7 @@ #### New Inputs - [infiniband](/plugins/inputs/infiniband/README.md) - Contributed by @willfurnell +- [modbus](/plugins/inputs/modbus/README.md) - Contributed by @garciaolais - [monit](/plugins/inputs/monit/README.md) - Contributed by @SirishaGopigiri #### New Outputs diff --git a/README.md b/README.md index b97b8719b..806df0139 100644 --- a/README.md +++ b/README.md @@ -235,6 +235,7 @@ For documentation on the latest development code see the [documentation index][d * [mem](./plugins/inputs/mem) * [mesos](./plugins/inputs/mesos) * [minecraft](./plugins/inputs/minecraft) +* [modbus](./plugins/inputs/modbus) * [mongodb](./plugins/inputs/mongodb) * [monit](./plugins/inputs/monit) * [mqtt_consumer](./plugins/inputs/mqtt_consumer) From 982395259768ef547251cedc28e3e8e578181b69 Mon Sep 17 00:00:00 2001 From: Asgaut Eng Date: Fri, 31 Jan 2020 21:03:54 +0100 Subject: [PATCH 1499/1815] Fix duplicate TrackingIDs returned (#6960) There is a small chance the newTrackingID() function in tracking.go will return the same id to multiple simultaneous callers. The function must return the value returned by atomic.AddUint64() to be safe. --- metric/tracking.go | 3 +-- metric/tracking_test.go | 38 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 39 insertions(+), 2 deletions(-) diff --git a/metric/tracking.go b/metric/tracking.go index 3d8843240..e370d9f2a 100644 --- a/metric/tracking.go +++ b/metric/tracking.go @@ -34,8 +34,7 @@ var ( ) func newTrackingID() telegraf.TrackingID { - atomic.AddUint64(&lastID, 1) - return telegraf.TrackingID(lastID) + return telegraf.TrackingID(atomic.AddUint64(&lastID, 1)) } func debugFinalizer(d *trackingData) { diff --git a/metric/tracking_test.go b/metric/tracking_test.go index f950cfcd1..0ca1ca4da 100644 --- a/metric/tracking_test.go +++ b/metric/tracking_test.go @@ -1,6 +1,7 @@ package metric import ( + "sync" "testing" "time" @@ -30,6 +31,43 @@ func (d *deliveries) onDelivery(info telegraf.DeliveryInfo) { d.Info[info.ID()] = info } +func TestNewTrackingID(t *testing.T) { + var wg sync.WaitGroup + var a [100000]telegraf.TrackingID + var b [100000]telegraf.TrackingID + + wg.Add(2) + go func() { + for i := 0; i < len(a); i++ { + a[i] = newTrackingID() + } + wg.Done() + }() + go func() { + for i := 0; i < len(b); i++ { + b[i] = newTrackingID() + } + wg.Done() + }() + wg.Wait() + + // Find any duplicate TrackingIDs in arrays a and b. Arrays must be sorted in increasing order. + for i, j := 0, 0; i < len(a) && j < len(b); { + if a[i] == b[j] { + t.Errorf("Duplicate TrackingID: a[%d]==%d and b[%d]==%d.", i, a[i], j, b[j]) + break + } + if a[i] > b[j] { + j++ + continue + } + if a[i] < b[j] { + i++ + continue + } + } +} + func TestTracking(t *testing.T) { tests := []struct { name string From 6cac2fb388e8fe2c67ef189ea58030c34babad17 Mon Sep 17 00:00:00 2001 From: SirishaGopigiri <52744121+SirishaGopigiri@users.noreply.github.com> Date: Sat, 1 Feb 2020 03:39:09 +0530 Subject: [PATCH 1500/1815] Improve monit http client transport mock (#6955) --- plugins/inputs/monit/monit.go | 34 ++++-------------------------- plugins/inputs/monit/monit_test.go | 29 ++++++------------------- 2 files changed, 11 insertions(+), 52 deletions(-) diff --git a/plugins/inputs/monit/monit.go b/plugins/inputs/monit/monit.go index dddb801d3..be17762a1 100644 --- a/plugins/inputs/monit/monit.go +++ b/plugins/inputs/monit/monit.go @@ -175,38 +175,15 @@ type Monit struct { Address string `toml:"address"` Username string `toml:"username"` Password string `toml:"password"` - client HTTPClient + client http.Client tls.ClientConfig Timeout internal.Duration `toml:"timeout"` } -type HTTPClient interface { - MakeRequest(req *http.Request) (*http.Response, error) - - SetHTTPClient(client *http.Client) - HTTPClient() *http.Client -} - type Messagebody struct { Metrics []string `json:"metrics"` } -type RealHTTPClient struct { - client *http.Client -} - -func (c *RealHTTPClient) MakeRequest(req *http.Request) (*http.Response, error) { - return c.client.Do(req) -} - -func (c *RealHTTPClient) SetHTTPClient(client *http.Client) { - c.client = client -} - -func (c *RealHTTPClient) HTTPClient() *http.Client { - return c.client -} - func (m *Monit) Description() string { return "Read metrics and status information about processes managed by Monit" } @@ -240,14 +217,13 @@ func (m *Monit) Init() error { return err } - client := &http.Client{ + m.client = http.Client{ Transport: &http.Transport{ TLSClientConfig: tlsCfg, Proxy: http.ProxyFromEnvironment, }, Timeout: m.Timeout.Duration, } - m.client.SetHTTPClient(client) return nil } @@ -261,7 +237,7 @@ func (m *Monit) Gather(acc telegraf.Accumulator) error { req.SetBasicAuth(m.Username, m.Password) } - resp, err := m.client.MakeRequest(req) + resp, err := m.client.Do(req) if err != nil { return err } @@ -428,8 +404,6 @@ func monitoringStatus(s Service) string { func init() { inputs.Add("monit", func() telegraf.Input { - return &Monit{ - client: &RealHTTPClient{}, - } + return &Monit{} }) } diff --git a/plugins/inputs/monit/monit_test.go b/plugins/inputs/monit/monit_test.go index b0d0698b4..1f7e671f4 100644 --- a/plugins/inputs/monit/monit_test.go +++ b/plugins/inputs/monit/monit_test.go @@ -13,19 +13,14 @@ import ( "github.com/stretchr/testify/require" ) -type MockHTTPClient struct { - networkError string +type transportMock struct { } -func (c *MockHTTPClient) MakeRequest(req *http.Request) (*http.Response, error) { - return nil, errors.New(c.networkError) -} - -func (c *MockHTTPClient) SetHTTPClient(client *http.Client) { -} - -func (c *MockHTTPClient) HTTPClient() *http.Client { - return nil +func (t *transportMock) RoundTrip(r *http.Request) (*http.Response, error) { + errorString := "Get http://127.0.0.1:2812/_status?format=xml: " + + "read tcp 192.168.10.2:55610->127.0.0.1:2812: " + + "read: connection reset by peer" + return nil, errors.New(errorString) } func TestServiceType(t *testing.T) { @@ -336,7 +331,6 @@ func TestServiceType(t *testing.T) { plugin := &Monit{ Address: ts.URL, - client: &RealHTTPClient{}, } plugin.Init() @@ -536,7 +530,6 @@ func TestMonitFailure(t *testing.T) { plugin := &Monit{ Address: ts.URL, - client: &RealHTTPClient{}, } plugin.Init() @@ -561,19 +554,15 @@ func checkAuth(r *http.Request, username, password string) bool { func TestAllowHosts(t *testing.T) { - networkError := "Get http://127.0.0.1:2812/_status?format=xml: " + - "read tcp 192.168.10.2:55610->127.0.0.1:2812: " + - "read: connection reset by peer" r := &Monit{ Address: "http://127.0.0.1:2812", Username: "test", Password: "test", - client: &MockHTTPClient{networkError}, } var acc testutil.Accumulator - r.Init() + r.client.Transport = &transportMock{} err := r.Gather(&acc) @@ -588,7 +577,6 @@ func TestConnection(t *testing.T) { Address: "http://127.0.0.1:2812", Username: "test", Password: "test", - client: &RealHTTPClient{}, } var acc testutil.Accumulator @@ -625,7 +613,6 @@ func TestInvalidUsernameorPassword(t *testing.T) { Address: ts.URL, Username: "test", Password: "test", - client: &RealHTTPClient{}, } var acc testutil.Accumulator @@ -658,7 +645,6 @@ func TestNoUsernameorPasswordConfiguration(t *testing.T) { r := &Monit{ Address: ts.URL, - client: &RealHTTPClient{}, } var acc testutil.Accumulator @@ -703,7 +689,6 @@ func TestInvalidXMLAndInvalidTypes(t *testing.T) { plugin := &Monit{ Address: ts.URL, - client: &RealHTTPClient{}, } plugin.Init() From 38bc81e746ec144b3fffab760eba06d0730aa11f Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 31 Jan 2020 14:14:44 -0800 Subject: [PATCH 1501/1815] Fix atomic usage in tests (#6964) --- internal/models/running_aggregator_test.go | 5 ++-- plugins/inputs/vsphere/vsphere_test.go | 33 ---------------------- testutil/accumulator.go | 4 +-- 3 files changed, 4 insertions(+), 38 deletions(-) diff --git a/internal/models/running_aggregator_test.go b/internal/models/running_aggregator_test.go index 83b9dea0a..a85885965 100644 --- a/internal/models/running_aggregator_test.go +++ b/internal/models/running_aggregator_test.go @@ -1,7 +1,6 @@ package models import ( - "sync/atomic" "testing" "time" @@ -246,7 +245,7 @@ type TestAggregator struct { func (t *TestAggregator) Description() string { return "" } func (t *TestAggregator) SampleConfig() string { return "" } func (t *TestAggregator) Reset() { - atomic.StoreInt64(&t.sum, 0) + t.sum = 0 } func (t *TestAggregator) Push(acc telegraf.Accumulator) { @@ -259,7 +258,7 @@ func (t *TestAggregator) Push(acc telegraf.Accumulator) { func (t *TestAggregator) Add(in telegraf.Metric) { for _, v := range in.Fields() { if vi, ok := v.(int64); ok { - atomic.AddInt64(&t.sum, vi) + t.sum += vi } } } diff --git a/plugins/inputs/vsphere/vsphere_test.go b/plugins/inputs/vsphere/vsphere_test.go index b66fa45eb..dce21fa78 100644 --- a/plugins/inputs/vsphere/vsphere_test.go +++ b/plugins/inputs/vsphere/vsphere_test.go @@ -5,9 +5,6 @@ import ( "crypto/tls" "fmt" "regexp" - "sort" - "sync" - "sync/atomic" "testing" "time" "unsafe" @@ -230,36 +227,6 @@ func TestParseConfig(t *testing.T) { require.NotNil(t, tab) } -func TestThrottledExecutor(t *testing.T) { - max := int64(0) - ngr := int64(0) - n := 10000 - var mux sync.Mutex - results := make([]int, 0, n) - te := NewThrottledExecutor(5) - for i := 0; i < n; i++ { - func(i int) { - te.Run(context.Background(), func() { - atomic.AddInt64(&ngr, 1) - mux.Lock() - defer mux.Unlock() - results = append(results, i*2) - if ngr > max { - max = ngr - } - time.Sleep(100 * time.Microsecond) - atomic.AddInt64(&ngr, -1) - }) - }(i) - } - te.Wait() - sort.Ints(results) - for i := 0; i < n; i++ { - require.Equal(t, results[i], i*2, "Some jobs didn't run") - } - require.Equal(t, int64(5), max, "Wrong number of goroutines spawned") -} - func TestMaxQuery(t *testing.T) { // Don't run test on 32-bit machines due to bug in simulator. // https://github.com/vmware/govmomi/issues/1330 diff --git a/testutil/accumulator.go b/testutil/accumulator.go index 65592b5a0..5716d3518 100644 --- a/testutil/accumulator.go +++ b/testutil/accumulator.go @@ -18,8 +18,8 @@ var ( ) func newTrackingID() telegraf.TrackingID { - atomic.AddUint64(&lastID, 1) - return telegraf.TrackingID(lastID) + id := atomic.AddUint64(&lastID, 1) + return telegraf.TrackingID(id) } // Metric defines a single point measurement From adb156c185f893ac1abae7e61e882dd3b8262b25 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 3 Feb 2020 18:13:00 -0800 Subject: [PATCH 1502/1815] Remove quantile example from aggregators docs Avoid confusion since we don't yet have an aggregator for quantiles. --- docs/AGGREGATORS_AND_PROCESSORS.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/AGGREGATORS_AND_PROCESSORS.md b/docs/AGGREGATORS_AND_PROCESSORS.md index 9cbc39381..4b5c9f1a6 100644 --- a/docs/AGGREGATORS_AND_PROCESSORS.md +++ b/docs/AGGREGATORS_AND_PROCESSORS.md @@ -52,7 +52,7 @@ all metrics or adding a tag to all metrics that pass through. ### Aggregator Aggregator plugins, on the other hand, are a bit more complicated. Aggregators are typically for emitting new _aggregate_ metrics, such as a running mean, -minimum, maximum, quantiles, or standard deviation. For this reason, all _aggregator_ +minimum, maximum, or standard deviation. For this reason, all _aggregator_ plugins are configured with a `period`. The `period` is the size of the window of metrics that each _aggregate_ represents. In other words, the emitted _aggregate_ metric will be the aggregated value of the past `period` seconds. From 40ef7fe4d12a925258e2f1045a0b67d80653b380 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 3 Feb 2020 18:24:43 -0800 Subject: [PATCH 1503/1815] Update changelog --- CHANGELOG.md | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 76d1ccd89..5e1612db7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -45,6 +45,13 @@ - [#6397](https://github.com/influxdata/telegraf/issues/6397): Fix conversion to floats in AzureDBResourceStats query in the sqlserver input. - [#6867](https://github.com/influxdata/telegraf/issues/6867): Fix case sensitive collation in sqlserver input. +## v1.13.3 [unreleased] + +#### Bugfixes + +- [#5744](https://github.com/influxdata/telegraf/issues/5744): Fix kibana input with Kibana versions greater than 6.4. +- [#6960](https://github.com/influxdata/telegraf/issues/6960): Fix duplicate TrackingIDs can be returned in queue consumer plugins. + ## v1.13.2 [2020-01-21] #### Bugfixes @@ -56,7 +63,6 @@ - [#6619](https://github.com/influxdata/telegraf/issues/6619): Change logic to allow recording of device fields when attributes is false. - [#6903](https://github.com/influxdata/telegraf/issues/6903): Do not add invalid timestamps to kafka messages. - [#6906](https://github.com/influxdata/telegraf/issues/6906): Fix json_strict option and set default of true. -- [#5744](https://github.com/influxdata/telegraf/issues/5744): Fix kibana input with Kibana versions greater than 6.4. ## v1.13.1 [2020-01-08] From c9a38875012ea96f4c9586736de5077ca570a28c Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 3 Feb 2020 18:52:08 -0800 Subject: [PATCH 1504/1815] Update to github.com/safchain/ethtool@latest (#6975) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index fe4c52d0f..2157160c6 100644 --- a/go.mod +++ b/go.mod @@ -100,7 +100,7 @@ require ( github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829 github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f github.com/prometheus/common v0.2.0 - github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8 + github.com/safchain/ethtool v0.0.0-20200128171343-ef7e7c9c2763 github.com/samuel/go-zookeeper v0.0.0-20180130194729-c4fab1ac1bec // indirect github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b // indirect github.com/shirou/gopsutil v2.19.11+incompatible diff --git a/go.sum b/go.sum index c04272c07..e3cc6969a 100644 --- a/go.sum +++ b/go.sum @@ -379,8 +379,8 @@ github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1 h1:/K3IL0Z1quvmJ github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a h1:9ZKAASQSHhDYGoxY8uLVpewe1GDZ2vu2Tr/vTdVAkFQ= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8 h1:2c1EFnZHIPCW8qKWgHMH/fX2PkSabFc5mrVzfUNdg5U= -github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= +github.com/safchain/ethtool v0.0.0-20200128171343-ef7e7c9c2763 h1:rzR7qhaYDEzb9ba9+hNyRWCYuMFGPmzFDoo1QPM9KC0= +github.com/safchain/ethtool v0.0.0-20200128171343-ef7e7c9c2763/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= github.com/samuel/go-zookeeper v0.0.0-20180130194729-c4fab1ac1bec h1:6ncX5ko6B9LntYM0YBRXkiSaZMmLYeZ/NWcmeB43mMY= github.com/samuel/go-zookeeper v0.0.0-20180130194729-c4fab1ac1bec/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b h1:gQZ0qzfKHQIybLANtM3mBXNUtOfsCFXeTsnBqCsx1KM= From bc09c21f6ea836161dbc7a7eb25b082ae9bb5852 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 3 Feb 2020 18:53:59 -0800 Subject: [PATCH 1505/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5e1612db7..2d7d32702 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -51,6 +51,7 @@ - [#5744](https://github.com/influxdata/telegraf/issues/5744): Fix kibana input with Kibana versions greater than 6.4. - [#6960](https://github.com/influxdata/telegraf/issues/6960): Fix duplicate TrackingIDs can be returned in queue consumer plugins. +- [#6913](https://github.com/influxdata/telegraf/issues/6913): Support up to 4096 stats in the ethtool input. ## v1.13.2 [2020-01-21] From bbb8858c1dba7142369e995506b12ddc96d59d3d Mon Sep 17 00:00:00 2001 From: Antonio Garcia Date: Tue, 4 Feb 2020 12:36:02 -0600 Subject: [PATCH 1506/1815] Delete .README.md.swp (#6980) --- plugins/inputs/modbus/.README.md.swp | Bin 12288 -> 0 bytes 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 plugins/inputs/modbus/.README.md.swp diff --git a/plugins/inputs/modbus/.README.md.swp b/plugins/inputs/modbus/.README.md.swp deleted file mode 100644 index 6a1629b1a3978092196c48639609dfc763bac751..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 12288 zcmeHN%WoS+7@t0Pw51e*XirEVF-3`F$B)E`lVTyiLes`gWIMT3Azkl|omJPnW_LDi z3KVfba6>{I$_)V$LI{Zi2mS(XaN`VI3J8f_Ir5v?bz*xPrK-39yV9@go%v?I`F`JH zrpmN48_RVnOlKILrx^S6?w_0Ay!RY?AD@n;nVxJvuQ~J-}1Yr?V+#l3oqSsjJEHk8?{ok zUYqV22P%m>Vg_Ocq73+=lPa8EpUGuaswXC%r)QpCkIKa@F#|CJF#|CJF#|CJF#|CJ zF#|CJ|5*k^{|I{v)*THkY=rM4WAE@Ye#H#L48#n?48#n?48#n?48#n?48#n?48#n? z4Ezfj(D0Y^v&R^_bsUSw-~R{y|Nr?6V?O~p&<0k4E5H&k1)K+deUh;|z=yyGz^lM{ z;2iKQ@YiX^egp0T-vOTh9|P|KZD1968OQ(x90!g947m3MV}Ag@1K$F7fL-7Q&<3)= zG2q8jjC~G#27Clu2a3QM;1qBI`2HkgUjT0d*MOIRbHKe5jJ*R~0w#eMfiu9Dk2CfP z@XKT11vm?Q{U~E!0e65I;D<*T+XgtG0d-&nxCopCZi7$BsVRAlpO}IFJp%Zu6E%=AJAPd1;+7v>8Kl+EX7GxH01T!t7t>;;67a81eXP)c1!N_9*N>!e`uloH$%48}2*NF=CsQ|t9DPOH9vQH!dBHW?AU zPPW$LRHTVDzt_*EntjfVi6pgm1>bU915=`}RIXIfC@({8iQT@EU%t|s$)S041 z)lVYLFkJ3=72{?}IXv@P`g6K{TM+Cld5j8>f3>f<-)Z~+M5iMcf9bk-V9tpg|2@67EB!#5wgG|SS z$62!^gBeqaJ zmfBG4?$W5DDW!?tNTpJhYN;G76zb}xkyYy!hER(G(EpiT>?uL5e|YFUe-xfM!Za=L-=uynL;hSCk+ zj!Fm7t~;isYI~&Y)eucxx%CZJHV90)5k#`Ffj|?X62;w+bkvH$lmW~5cSt&FMN??* zL32whFrJZNdb-Q8$ElJYnxuR+nEQZi8o2vDzE;DlChia;y@q^U4Rr-h_mrb2B+4EThrkLhoa=<=`M7&q<5+Y(|Fw7;Ra4U(9)DM(Q*~tp}#r&LiN>ZdA&(s!PGf9wknsn;yvUj zX#{4HtXQVaVO+20+orCHCYko2$W6jiQv@Sit{JK(PnMdM<>e_tk~p%Q2j-$a!nZ!E zoaguZjtiHDe8KG=35V8JDqg^yT3uR_>bP>WnxPV~3u(enoDY3CS{87e{)E%Bf)n6K z*CJyr=nZ^9!KHR+MKI`4KNhY@Op923z>+oOWC|8KPy;^FiUBN$TH%fQunw-Nd-aQ< GDEk}yv%(Gl From 8792a5fd5ebe7f8e48d9f8cc1ce4a95a67a12314 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 4 Feb 2020 13:54:11 -0800 Subject: [PATCH 1507/1815] Exclude processors and aggregators from minimal config example --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 806df0139..7df2067cc 100644 --- a/README.md +++ b/README.md @@ -114,7 +114,7 @@ telegraf config > telegraf.conf #### Generate config with only cpu input & influxdb output plugins defined: ``` -telegraf --input-filter cpu --output-filter influxdb config +telegraf --section-filter agent:inputs:outputs --input-filter cpu --output-filter influxdb config ``` #### Run a single telegraf collection, outputing metrics to stdout: From e8d9add2d15a743d28cc48ccff976b02c76026ee Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 4 Feb 2020 15:12:23 -0800 Subject: [PATCH 1508/1815] Expire metrics on query in addition to on add (#6981) Ensures that expired metrics are removed even when no new data is sent to the output. --- plugins/outputs/prometheus_client/v2/collector.go | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/plugins/outputs/prometheus_client/v2/collector.go b/plugins/outputs/prometheus_client/v2/collector.go index 45e1cb7a7..9ffc6516a 100644 --- a/plugins/outputs/prometheus_client/v2/collector.go +++ b/plugins/outputs/prometheus_client/v2/collector.go @@ -65,6 +65,12 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) { c.Lock() defer c.Unlock() + // Expire metrics, doing this on Collect ensure metrics are removed even if no + // new metrics are added to the output. + if c.expireDuration != 0 { + c.coll.Expire(time.Now(), c.expireDuration) + } + for _, family := range c.coll.GetProto() { for _, metric := range family.Metric { ch <- &Metric{family: family, metric: metric} @@ -80,8 +86,11 @@ func (c *Collector) Add(metrics []telegraf.Metric) error { c.coll.Add(metric) } + // Expire metrics, doing this on Add ensure metrics are removed even if no + // one is querying the data. if c.expireDuration != 0 { c.coll.Expire(time.Now(), c.expireDuration) } + return nil } From 4991fcb28134993360c4f59f841c21f6a6d3d627 Mon Sep 17 00:00:00 2001 From: David Reimschussel Date: Tue, 4 Feb 2020 16:24:02 -0700 Subject: [PATCH 1509/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2d7d32702..a00d5bc8d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -52,6 +52,7 @@ - [#5744](https://github.com/influxdata/telegraf/issues/5744): Fix kibana input with Kibana versions greater than 6.4. - [#6960](https://github.com/influxdata/telegraf/issues/6960): Fix duplicate TrackingIDs can be returned in queue consumer plugins. - [#6913](https://github.com/influxdata/telegraf/issues/6913): Support up to 4096 stats in the ethtool input. +- [#6973](https://github.com/influxdata/telegraf/issues/6973): Expire metrics on query in addition to on add. ## v1.13.2 [2020-01-21] From e51d9a35754576bdcb7d8eeb556e8c15a6675474 Mon Sep 17 00:00:00 2001 From: David Reimschussel Date: Tue, 4 Feb 2020 16:36:17 -0700 Subject: [PATCH 1510/1815] Set 1.13.3 release date --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a00d5bc8d..d7b9a1b77 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -45,7 +45,7 @@ - [#6397](https://github.com/influxdata/telegraf/issues/6397): Fix conversion to floats in AzureDBResourceStats query in the sqlserver input. - [#6867](https://github.com/influxdata/telegraf/issues/6867): Fix case sensitive collation in sqlserver input. -## v1.13.3 [unreleased] +## v1.13.3 [2020-02-04] #### Bugfixes From 0cc71dbd510319400934a82825d4a2a50c2697e0 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 4 Feb 2020 16:40:00 -0800 Subject: [PATCH 1511/1815] Document the behavior of an unset routing key (#6983) --- plugins/outputs/kafka/README.md | 18 +++++++++++++----- plugins/outputs/kafka/kafka.go | 18 +++++++++++++----- 2 files changed, 26 insertions(+), 10 deletions(-) diff --git a/plugins/outputs/kafka/README.md b/plugins/outputs/kafka/README.md index 7b9fc0e30..7ced4c5c6 100644 --- a/plugins/outputs/kafka/README.md +++ b/plugins/outputs/kafka/README.md @@ -46,13 +46,21 @@ This plugin writes to a [Kafka Broker](http://kafka.apache.org/07/quickstart.htm # keys = ["foo", "bar"] # separator = "_" - ## Telegraf tag to use as a routing key - ## ie, if this tag exists, its value will be used as the routing key + ## The routing tag specifies a tagkey on the metric whose value is used as + ## the message key. The message key is used to determine which partition to + ## send the message to. This tag is prefered over the routing_key option. routing_tag = "host" - ## Static routing key. Used when no routing_tag is set or as a fallback - ## when the tag specified in routing tag is not found. If set to "random", - ## a random value will be generated for each message. + ## The routing key is set as the message key and used to determine which + ## partition to send the message to. This value is only used when no + ## routing_tag is set or as a fallback when the tag specified in routing tag + ## is not found. + ## + ## If set to "random", a random value will be generated for each message. + ## + ## When unset, no message key is added and each message is routed to a random + ## partition. + ## ## ex: routing_key = "random" ## routing_key = "telegraf" # routing_key = "" diff --git a/plugins/outputs/kafka/kafka.go b/plugins/outputs/kafka/kafka.go index 18a8925a5..a877b334b 100644 --- a/plugins/outputs/kafka/kafka.go +++ b/plugins/outputs/kafka/kafka.go @@ -130,13 +130,21 @@ var sampleConfig = ` # keys = ["foo", "bar"] # separator = "_" - ## Telegraf tag to use as a routing key - ## ie, if this tag exists, its value will be used as the routing key + ## The routing tag specifies a tagkey on the metric whose value is used as + ## the message key. The message key is used to determine which partition to + ## send the message to. This tag is prefered over the routing_key option. routing_tag = "host" - ## Static routing key. Used when no routing_tag is set or as a fallback - ## when the tag specified in routing tag is not found. If set to "random", - ## a random value will be generated for each message. + ## The routing key is set as the message key and used to determine which + ## partition to send the message to. This value is only used when no + ## routing_tag is set or as a fallback when the tag specified in routing tag + ## is not found. + ## + ## If set to "random", a random value will be generated for each message. + ## + ## When unset, no message key is added and each message is routed to a random + ## partition. + ## ## ex: routing_key = "random" ## routing_key = "telegraf" # routing_key = "" From ae22db4b8117384ba1aef50f81ef863fe3b09f0e Mon Sep 17 00:00:00 2001 From: Manthan Sharma Date: Wed, 5 Feb 2020 07:02:41 +0530 Subject: [PATCH 1512/1815] Add support for titlecase transformation to strings processor (#6982) --- plugins/processors/strings/README.md | 5 +++ plugins/processors/strings/strings.go | 9 +++++ plugins/processors/strings/strings_test.go | 47 ++++++++++++++++++++++ 3 files changed, 61 insertions(+) diff --git a/plugins/processors/strings/README.md b/plugins/processors/strings/README.md index d00bf03db..a7aa0e2a5 100644 --- a/plugins/processors/strings/README.md +++ b/plugins/processors/strings/README.md @@ -5,6 +5,7 @@ The `strings` plugin maps certain go string functions onto measurement, tag, and Implemented functions are: - lowercase - uppercase +- titlecase - trim - trim_left - trim_right @@ -35,6 +36,10 @@ If you'd like to apply multiple processings to the same `tag_key` or `field_key` # [[processors.strings.uppercase]] # tag = "method" + ## Convert a field value to titlecase + # [[processors.strings.titlecase]] + # field = "status" + ## Trim leading and trailing whitespace using the default cutset # [[processors.strings.trim]] # field = "message" diff --git a/plugins/processors/strings/strings.go b/plugins/processors/strings/strings.go index 4a8a6e7ff..1ac6c6101 100644 --- a/plugins/processors/strings/strings.go +++ b/plugins/processors/strings/strings.go @@ -13,6 +13,7 @@ import ( type Strings struct { Lowercase []converter `toml:"lowercase"` Uppercase []converter `toml:"uppercase"` + Titlecase []converter `toml:"titlecase"` Trim []converter `toml:"trim"` TrimLeft []converter `toml:"trim_left"` TrimRight []converter `toml:"trim_right"` @@ -55,6 +56,10 @@ const sampleConfig = ` # field = "uri_stem" # dest = "uri_stem_normalised" + ## Convert a field value to titlecase + # [[processors.strings.titlecase]] + # field = "status" + ## Trim leading and trailing whitespace using the default cutset # [[processors.strings.trim]] # field = "message" @@ -235,6 +240,10 @@ func (s *Strings) initOnce() { c.fn = strings.ToUpper s.converters = append(s.converters, c) } + for _, c := range s.Titlecase { + c.fn = strings.Title + s.converters = append(s.converters, c) + } for _, c := range s.Trim { c := c if c.Cutset != "" { diff --git a/plugins/processors/strings/strings_test.go b/plugins/processors/strings/strings_test.go index ae35acecf..2c1be510e 100644 --- a/plugins/processors/strings/strings_test.go +++ b/plugins/processors/strings/strings_test.go @@ -78,6 +78,21 @@ func TestFieldConversions(t *testing.T) { require.Equal(t, "/MIXED/CASE/PATH/?FROM=-1D&TO=NOW", fv) }, }, + { + name: "Should change existing field to titlecase", + plugin: &Strings{ + Titlecase: []converter{ + { + Field: "request", + }, + }, + }, + check: func(t *testing.T, actual telegraf.Metric) { + fv, ok := actual.GetField("request") + require.True(t, ok) + require.Equal(t, "/Mixed/CASE/PaTH/?From=-1D&To=Now", fv) + }, + }, { name: "Should add new lowercase field", plugin: &Strings{ @@ -331,6 +346,7 @@ func TestFieldKeyConversions(t *testing.T) { // Tag/field key multiple executions occur in the following order: (initOnce) // Lowercase // Uppercase + // Titlecase // Trim // TrimLeft // TrimRight @@ -595,6 +611,30 @@ func TestTagConversions(t *testing.T) { require.Equal(t, "MIXEDCASE_HOSTNAME", tv) }, }, + { + name: "Should add new titlecase tag", + plugin: &Strings{ + Titlecase: []converter{ + { + Tag: "s-computername", + Dest: "s-computername_titlecase", + }, + }, + }, + check: func(t *testing.T, actual telegraf.Metric) { + tv, ok := actual.GetTag("verb") + require.True(t, ok) + require.Equal(t, "GET", tv) + + tv, ok = actual.GetTag("s-computername") + require.True(t, ok) + require.Equal(t, "MIXEDCASE_hostname", tv) + + tv, ok = actual.GetTag("s-computername_titlecase") + require.True(t, ok) + require.Equal(t, "MIXEDCASE_hostname", tv) + }, + }, } for _, tt := range tests { @@ -736,6 +776,11 @@ func TestMultipleConversions(t *testing.T) { Tag: "verb", }, }, + Titlecase: []converter{ + { + Field: "status", + }, + }, Replace: []converter{ { Tag: "foo", @@ -763,6 +808,7 @@ func TestMultipleConversions(t *testing.T) { "cs-host": "AAAbbb", "ignore_number": int64(200), "ignore_bool": true, + "status": "green", }, time.Now(), ) @@ -775,6 +821,7 @@ func TestMultipleConversions(t *testing.T) { "ignore_bool": true, "cs-host": "AAAbbb", "cs-host_lowercase": "aaabbb", + "status": "Green", } expectedTags := map[string]string{ "verb": "GET", From 62ffd7172fa4e929309cbbb6300a049fd43e55eb Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 4 Feb 2020 17:33:57 -0800 Subject: [PATCH 1513/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index d7b9a1b77..e996aa8bf 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -39,6 +39,7 @@ - [#6921](https://github.com/influxdata/telegraf/pull/6921): Add udp internal metrics for the statsd input. - [#6914](https://github.com/influxdata/telegraf/pull/6914): Add replica set tag to mongodb input. - [#6935](https://github.com/influxdata/telegraf/pull/6935): Add counters for merged reads and writes to diskio input. +- [#6982](https://github.com/influxdata/telegraf/pull/6982): Add support for titlecase transformation to strings processor. #### Bugfixes From 15d016692287aaf6fb5a95e27406ad9610a3a21c Mon Sep 17 00:00:00 2001 From: RobMalvern Date: Thu, 6 Feb 2020 20:40:03 +0000 Subject: [PATCH 1514/1815] Add template processor (#6494) --- plugins/processors/template/README.md | 26 ++++++ plugins/processors/template/template.go | 63 +++++++++++++++ .../processors/template/template_metric.go | 28 +++++++ plugins/processors/template/template_test.go | 80 +++++++++++++++++++ 4 files changed, 197 insertions(+) create mode 100644 plugins/processors/template/README.md create mode 100644 plugins/processors/template/template.go create mode 100644 plugins/processors/template/template_metric.go create mode 100644 plugins/processors/template/template_test.go diff --git a/plugins/processors/template/README.md b/plugins/processors/template/README.md new file mode 100644 index 000000000..bd336f045 --- /dev/null +++ b/plugins/processors/template/README.md @@ -0,0 +1,26 @@ +# Template Processor + +The `template` processor applies a go template to tag, field, measurement and time values to create a new tag. + +Golang [Template Documentation] + +### Configuration + +```toml + # Concatenate two tags to create a new tag + [[processors.template]] + ## Tag to create + tag = "topic" + ## Template to create tag + # Note: Single quotes (') are used, so the double quotes (") don't need escaping (\") + template = '{{ .Tag "hostname" }}.{{ .Tag "level" }}' +``` + +### Example + +```diff +- cpu,level=debug,hostname=localhost value=42i ++ cpu,level=debug,hostname=localhost,topic=localhost.debug value=42i +``` + +[Template Documentation]:https://golang.org/pkg/text/template/ \ No newline at end of file diff --git a/plugins/processors/template/template.go b/plugins/processors/template/template.go new file mode 100644 index 000000000..20da631a8 --- /dev/null +++ b/plugins/processors/template/template.go @@ -0,0 +1,63 @@ +package template + +import ( + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/processors" + "strings" + "text/template" +) + +type TemplateProcessor struct { + Tag string `toml:"tag"` + Template string `toml:"template"` + tmpl *template.Template +} + +const sampleConfig = ` + ## Concatenate two tags to create a new tag + # [[processors.template]] + # ## Tag to create + # tag = "topic" + # ## Template to create tag + # Note: Single quotes (') are used, so the double quotes (") don't need escaping (\") + # template = '{{.Tag "hostname"}}.{{ .Tag "level" }}' +` + +func (r *TemplateProcessor) SampleConfig() string { + return sampleConfig +} + +func (r *TemplateProcessor) Description() string { + return "Uses a Go template to create a new tag" +} + +func (r *TemplateProcessor) Apply(in ...telegraf.Metric) []telegraf.Metric { + // for each metric in "in" array + for _, metric := range in { + var b strings.Builder + newM := TemplateMetric{metric} + + // supply TemplateMetric and Template from configuration to Template.Execute + err := r.tmpl.Execute(&b, &newM) + if err != nil { + panic(err) + } + + metric.AddTag(r.Tag, b.String()) + } + return in +} + +func (r *TemplateProcessor) Init() error { + // create template + t, err := template.New("configured_template").Parse(r.Template) + + r.tmpl = t + return err +} + +func init() { + processors.Add("printer", func() telegraf.Processor { + return &TemplateProcessor{} + }) +} diff --git a/plugins/processors/template/template_metric.go b/plugins/processors/template/template_metric.go new file mode 100644 index 000000000..47d86ec57 --- /dev/null +++ b/plugins/processors/template/template_metric.go @@ -0,0 +1,28 @@ +package template + +import ( + "github.com/influxdata/telegraf" + "time" +) + +type TemplateMetric struct { + metric telegraf.Metric +} + +func (m *TemplateMetric) Measurement() string { + return m.Measurement() +} + +func (m *TemplateMetric) Tag(key string) string { + tagString, _ := m.metric.GetTag(key) + return tagString +} + +func (m *TemplateMetric) Field(key string) interface{} { + field, _ := m.metric.GetField(key) + return field +} + +func (m *TemplateMetric) Time() time.Time { + return m.metric.Time() +} diff --git a/plugins/processors/template/template_test.go b/plugins/processors/template/template_test.go new file mode 100644 index 000000000..b8c195cda --- /dev/null +++ b/plugins/processors/template/template_test.go @@ -0,0 +1,80 @@ +package template + +import ( + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/assert" +) + +func TestTagTemplateConcatenate(t *testing.T) { + now := time.Now() + + // Create Template processor + tmp := TemplateProcessor{Tag: "topic", Template: `{{.Tag "hostname"}}.{{ .Tag "level" }}`} + // manually init + err := tmp.Init() + + if err != nil { + panic(err) + } + + // create metric for testing + input := []telegraf.Metric{testutil.MustMetric("Tags", map[string]string{"hostname": "localhost", "level": "debug"}, nil, now)} + + // act + actual := tmp.Apply(input[0]) + + // assert + expected := []telegraf.Metric{testutil.MustMetric("Tags", map[string]string{"hostname": "localhost", "level": "debug", "topic": "localhost.debug"}, nil, now)} + testutil.RequireMetricsEqual(t, expected, actual) +} + +func TestMetricMissingTagsIsNotLost(t *testing.T) { + now := time.Now() + + // Create Template processor + tmp := TemplateProcessor{Tag: "topic", Template: `{{.Tag "hostname"}}.{{ .Tag "level" }}`} + // manually init + err := tmp.Init() + + if err != nil { + panic(err) + } + + // create metrics for testing + m1 := testutil.MustMetric("Works", map[string]string{"hostname": "localhost", "level": "debug"}, nil, now) + m2 := testutil.MustMetric("Fails", map[string]string{"hostname": "localhost"}, nil, now) + + // act + actual := tmp.Apply(m1, m2) + + // assert + // make sure no metrics are lost when a template process fails + assert.Equal(t, 2, len(actual), "Number of metrics input should equal number of metrics output") +} + +func TestTagAndFieldConcatenate(t *testing.T) { + now := time.Now() + + // Create Template processor + tmp := TemplateProcessor{Tag: "LocalTemp", Template: `{{.Tag "location"}} is {{ .Field "temperature" }}`} + // manually init + err := tmp.Init() + + if err != nil { + panic(err) + } + + // create metric for testing + m1 := testutil.MustMetric("weather", map[string]string{"location": "us-midwest"}, map[string]interface{}{"temperature": "too warm"}, now) + + // act + actual := tmp.Apply(m1) + + // assert + expected := []telegraf.Metric{testutil.MustMetric("weather", map[string]string{"location": "us-midwest", "LocalTemp": "us-midwest is too warm"}, map[string]interface{}{"temperature": "too warm"}, now)} + testutil.RequireMetricsEqual(t, expected, actual) +} From 5f2f2ff33d5c6bc810f8b66dcbee515cc03a3759 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 6 Feb 2020 13:34:36 -0800 Subject: [PATCH 1515/1815] Update readme and changelog for template processor --- CHANGELOG.md | 4 ++ README.md | 29 ++++++------ plugins/processors/all/all.go | 1 + plugins/processors/template/README.md | 59 +++++++++++++++++++------ plugins/processors/template/template.go | 29 ++++++------ 5 files changed, 82 insertions(+), 40 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e996aa8bf..350a7530a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -15,6 +15,10 @@ - [modbus](/plugins/inputs/modbus/README.md) - Contributed by @garciaolais - [monit](/plugins/inputs/monit/README.md) - Contributed by @SirishaGopigiri +#### New Processors + +- [template](/plugins/processors/template/README.md) - Contributed by @RobMalvern + #### New Outputs - [warp10](/plugins/outputs/warp10/README.md) - Contributed by @aurrelhebert diff --git a/README.md b/README.md index 7df2067cc..0df0f003c 100644 --- a/README.md +++ b/README.md @@ -352,20 +352,21 @@ For documentation on the latest development code see the [documentation index][d ## Processor Plugins -* [clone](./plugins/processors/clone) -* [converter](./plugins/processors/converter) -* [date](./plugins/processors/date) -* [enum](./plugins/processors/enum) -* [override](./plugins/processors/override) -* [parser](./plugins/processors/parser) -* [pivot](./plugins/processors/pivot) -* [printer](./plugins/processors/printer) -* [regex](./plugins/processors/regex) -* [rename](./plugins/processors/rename) -* [strings](./plugins/processors/strings) -* [tag_limit](./plugins/processors/tag_limit) -* [topk](./plugins/processors/topk) -* [unpivot](./plugins/processors/unpivot) +* [clone](/plugins/processors/clone) +* [converter](/plugins/processors/converter) +* [date](/plugins/processors/date) +* [enum](/plugins/processors/enum) +* [override](/plugins/processors/override) +* [parser](/plugins/processors/parser) +* [pivot](/plugins/processors/pivot) +* [printer](/plugins/processors/printer) +* [regex](/plugins/processors/regex) +* [rename](/plugins/processors/rename) +* [strings](/plugins/processors/strings) +* [tag_limit](/plugins/processors/tag_limit) +* [template](/plugins/processors/template) +* [topk](/plugins/processors/topk) +* [unpivot](/plugins/processors/unpivot) ## Aggregator Plugins diff --git a/plugins/processors/all/all.go b/plugins/processors/all/all.go index e0f69d787..ba72ee10e 100644 --- a/plugins/processors/all/all.go +++ b/plugins/processors/all/all.go @@ -13,6 +13,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/processors/rename" _ "github.com/influxdata/telegraf/plugins/processors/strings" _ "github.com/influxdata/telegraf/plugins/processors/tag_limit" + _ "github.com/influxdata/telegraf/plugins/processors/template" _ "github.com/influxdata/telegraf/plugins/processors/topk" _ "github.com/influxdata/telegraf/plugins/processors/unpivot" ) diff --git a/plugins/processors/template/README.md b/plugins/processors/template/README.md index bd336f045..f08a96c6b 100644 --- a/plugins/processors/template/README.md +++ b/plugins/processors/template/README.md @@ -1,26 +1,59 @@ # Template Processor -The `template` processor applies a go template to tag, field, measurement and time values to create a new tag. +The `template` processor applies a Go template to metrics to generate a new +tag. The primary use case of this plugin is to create a tag that can be used +for dynamic routing to multiple output plugins or using an output specific +routing option. -Golang [Template Documentation] +The template has access to each metric's measurement name, tags, fields, and +timestamp using the [interface in `/template_metric.go`](template_metric.go). + +Read the full [Go Template Documentation][]. ### Configuration ```toml - # Concatenate two tags to create a new tag - [[processors.template]] - ## Tag to create - tag = "topic" - ## Template to create tag - # Note: Single quotes (') are used, so the double quotes (") don't need escaping (\") - template = '{{ .Tag "hostname" }}.{{ .Tag "level" }}' +[[processors.template]] + ## Tag to set with the output of the template. + tag = "topic" + + ## Go template used to create the tag value. In order to ease TOML + ## escaping requirements, you may wish to use single quotes around the + ## template string. + template = '{{ .Tag "hostname" }}.{{ .Tag "level" }}' ``` ### Example -```diff -- cpu,level=debug,hostname=localhost value=42i -+ cpu,level=debug,hostname=localhost,topic=localhost.debug value=42i +Combine multiple tags to create a single tag: +```toml +[[processors.template]] + tag = "topic" + template = '{{ .Tag "hostname" }}.{{ .Tag "level" }}' ``` -[Template Documentation]:https://golang.org/pkg/text/template/ \ No newline at end of file +```diff +- cpu,level=debug,hostname=localhost time_idle=42 ++ cpu,level=debug,hostname=localhost,topic=localhost.debug time_idle=42 +``` + +Add measurement name as a tag: +```toml +[[processors.template]] + tag = "measurement" + template = '{{ .Name }}' +``` + +```diff +- cpu,hostname=localhost time_idle=42 ++ cpu,hostname=localhost,meaurement=cpu time_idle=42 +``` + +Add the year as a tag, similar to the date processor: +```toml +[[processors.template]] + tag = "year" + template = '{{.Time.UTC.Year}}' +``` + +[Go Template Documentation]: https://golang.org/pkg/text/template/ diff --git a/plugins/processors/template/template.go b/plugins/processors/template/template.go index 20da631a8..f4470a07c 100644 --- a/plugins/processors/template/template.go +++ b/plugins/processors/template/template.go @@ -1,26 +1,28 @@ package template import ( - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/plugins/processors" "strings" "text/template" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/processors" ) type TemplateProcessor struct { - Tag string `toml:"tag"` - Template string `toml:"template"` + Tag string `toml:"tag"` + Template string `toml:"template"` + Log telegraf.Logger `toml:"-"` tmpl *template.Template } const sampleConfig = ` - ## Concatenate two tags to create a new tag - # [[processors.template]] - # ## Tag to create - # tag = "topic" - # ## Template to create tag - # Note: Single quotes (') are used, so the double quotes (") don't need escaping (\") - # template = '{{.Tag "hostname"}}.{{ .Tag "level" }}' + ## Tag to set with the output of the template. + tag = "topic" + + ## Go template used to create the tag value. In order to ease TOML + ## escaping requirements, you may wish to use single quotes around the + ## template string. + template = '{{ .Tag "hostname" }}.{{ .Tag "level" }}' ` func (r *TemplateProcessor) SampleConfig() string { @@ -40,7 +42,8 @@ func (r *TemplateProcessor) Apply(in ...telegraf.Metric) []telegraf.Metric { // supply TemplateMetric and Template from configuration to Template.Execute err := r.tmpl.Execute(&b, &newM) if err != nil { - panic(err) + r.Log.Errorf("failed to execute template: %v", err) + continue } metric.AddTag(r.Tag, b.String()) @@ -57,7 +60,7 @@ func (r *TemplateProcessor) Init() error { } func init() { - processors.Add("printer", func() telegraf.Processor { + processors.Add("template", func() telegraf.Processor { return &TemplateProcessor{} }) } From d46f94112cccdf69480c7124ccd2b980c6915035 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 6 Feb 2020 14:18:42 -0800 Subject: [PATCH 1516/1815] Use require in cisco mdt tests to avoid follow on errors (#6984) --- .../cisco_telemetry_mdt.go | 11 +-- .../cisco_telemetry_mdt_test.go | 84 ++++++++++++------- 2 files changed, 58 insertions(+), 37 deletions(-) diff --git a/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt.go b/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt.go index 37ccff926..28866ce67 100644 --- a/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt.go +++ b/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt.go @@ -12,18 +12,15 @@ import ( "sync" "time" - "github.com/influxdata/telegraf/metric" - dialout "github.com/cisco-ie/nx-telemetry-proto/mdt_dialout" telemetry "github.com/cisco-ie/nx-telemetry-proto/telemetry_bis" "github.com/golang/protobuf/proto" "github.com/influxdata/telegraf" internaltls "github.com/influxdata/telegraf/internal/tls" + "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/plugins/inputs" "google.golang.org/grpc" - "google.golang.org/grpc/credentials" - - // Register GRPC gzip decoder to support compressed telemetry + "google.golang.org/grpc/credentials" // Register GRPC gzip decoder to support compressed telemetry _ "google.golang.org/grpc/encoding/gzip" "google.golang.org/grpc/peer" ) @@ -496,6 +493,10 @@ func (c *CiscoTelemetryMDT) parseContentField(grouper *metric.SeriesGrouper, fie delete(tags, prefix) } +func (c *CiscoTelemetryMDT) Address() net.Addr { + return c.listener.Addr() +} + // Stop listener and cleanup func (c *CiscoTelemetryMDT) Stop() { if c.grpcServer != nil { diff --git a/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt_test.go b/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt_test.go index 5261bd399..ea200bc74 100644 --- a/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt_test.go +++ b/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt_test.go @@ -7,20 +7,20 @@ import ( "net" "testing" - "github.com/golang/protobuf/proto" - dialout "github.com/cisco-ie/nx-telemetry-proto/mdt_dialout" telemetry "github.com/cisco-ie/nx-telemetry-proto/telemetry_bis" + "github.com/golang/protobuf/proto" "github.com/influxdata/telegraf/testutil" - - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "google.golang.org/grpc" ) func TestHandleTelemetryTwoSimple(t *testing.T) { c := &CiscoTelemetryMDT{Log: testutil.Logger{}, Transport: "dummy", Aliases: map[string]string{"alias": "type:model/some/path"}} acc := &testutil.Accumulator{} - c.Start(acc) + err := c.Start(acc) + // error is expected since we are passing in dummy transport + require.Error(t, err) telemetry := &telemetry.Telemetry{ MsgTimestamp: 1543236572000, @@ -81,7 +81,7 @@ func TestHandleTelemetryTwoSimple(t *testing.T) { data, _ := proto.Marshal(telemetry) c.handleTelemetry(data) - assert.Empty(t, acc.Errors) + require.Empty(t, acc.Errors) tags := map[string]string{"path": "type:model/some/path", "name": "str", "uint64": "1234", "source": "hostname", "subscription": "subscription"} fields := map[string]interface{}{"bool": true} @@ -95,7 +95,9 @@ func TestHandleTelemetryTwoSimple(t *testing.T) { func TestHandleTelemetrySingleNested(t *testing.T) { c := &CiscoTelemetryMDT{Log: testutil.Logger{}, Transport: "dummy", Aliases: map[string]string{"nested": "type:model/nested/path"}} acc := &testutil.Accumulator{} - c.Start(acc) + err := c.Start(acc) + // error is expected since we are passing in dummy transport + require.Error(t, err) telemetry := &telemetry.Telemetry{ MsgTimestamp: 1543236572000, @@ -150,7 +152,7 @@ func TestHandleTelemetrySingleNested(t *testing.T) { data, _ := proto.Marshal(telemetry) c.handleTelemetry(data) - assert.Empty(t, acc.Errors) + require.Empty(t, acc.Errors) tags := map[string]string{"path": "type:model/nested/path", "level": "3", "source": "hostname", "subscription": "subscription"} fields := map[string]interface{}{"nested/value/foo": "bar"} @@ -160,7 +162,9 @@ func TestHandleTelemetrySingleNested(t *testing.T) { func TestHandleEmbeddedTags(t *testing.T) { c := &CiscoTelemetryMDT{Transport: "dummy", Aliases: map[string]string{"extra": "type:model/extra"}, EmbeddedTags: []string{"type:model/extra/list/name"}} acc := &testutil.Accumulator{} - c.Start(acc) + err := c.Start(acc) + // error is expected since we are passing in dummy transport + require.Error(t, err) telemetry := &telemetry.Telemetry{ MsgTimestamp: 1543236572000, @@ -217,7 +221,7 @@ func TestHandleEmbeddedTags(t *testing.T) { data, _ := proto.Marshal(telemetry) c.handleTelemetry(data) - assert.Empty(t, acc.Errors) + require.Empty(t, acc.Errors) tags1 := map[string]string{"path": "type:model/extra", "foo": "bar", "source": "hostname", "subscription": "subscription", "list/name": "entry1"} fields1 := map[string]interface{}{"list/test": "foo"} @@ -230,7 +234,9 @@ func TestHandleEmbeddedTags(t *testing.T) { func TestHandleNXAPI(t *testing.T) { c := &CiscoTelemetryMDT{Transport: "dummy", Aliases: map[string]string{"nxapi": "show nxapi"}} acc := &testutil.Accumulator{} - c.Start(acc) + err := c.Start(acc) + // error is expected since we are passing in dummy transport + require.Error(t, err) telemetry := &telemetry.Telemetry{ MsgTimestamp: 1543236572000, @@ -303,7 +309,7 @@ func TestHandleNXAPI(t *testing.T) { data, _ := proto.Marshal(telemetry) c.handleTelemetry(data) - assert.Empty(t, acc.Errors) + require.Empty(t, acc.Errors) tags1 := map[string]string{"path": "show nxapi", "foo": "bar", "TABLE_nxapi": "i1", "source": "hostname", "subscription": "subscription"} fields1 := map[string]interface{}{"value": "foo"} @@ -316,7 +322,9 @@ func TestHandleNXAPI(t *testing.T) { func TestHandleNXDME(t *testing.T) { c := &CiscoTelemetryMDT{Transport: "dummy", Aliases: map[string]string{"dme": "sys/dme"}} acc := &testutil.Accumulator{} - c.Start(acc) + err := c.Start(acc) + // error is expected since we are passing in dummy transport + require.Error(t, err) telemetry := &telemetry.Telemetry{ MsgTimestamp: 1543236572000, @@ -377,7 +385,7 @@ func TestHandleNXDME(t *testing.T) { data, _ := proto.Marshal(telemetry) c.handleTelemetry(data) - assert.Empty(t, acc.Errors) + require.Empty(t, acc.Errors) tags1 := map[string]string{"path": "sys/dme", "foo": "bar", "fooEntity": "some-rn", "source": "hostname", "subscription": "subscription"} fields1 := map[string]interface{}{"value": "foo"} @@ -385,9 +393,10 @@ func TestHandleNXDME(t *testing.T) { } func TestTCPDialoutOverflow(t *testing.T) { - c := &CiscoTelemetryMDT{Log: testutil.Logger{}, Transport: "tcp", ServiceAddress: "127.0.0.1:57000"} + c := &CiscoTelemetryMDT{Log: testutil.Logger{}, Transport: "tcp", ServiceAddress: "127.0.0.1:0"} acc := &testutil.Accumulator{} - assert.Nil(t, c.Start(acc)) + err := c.Start(acc) + require.NoError(t, err) hdr := struct { MsgType uint16 @@ -397,14 +406,16 @@ func TestTCPDialoutOverflow(t *testing.T) { MsgLen uint32 }{MsgLen: uint32(1000000000)} - conn, _ := net.Dial("tcp", "127.0.0.1:57000") + addr := c.Address() + conn, err := net.Dial(addr.Network(), addr.String()) + require.NoError(t, err) binary.Write(conn, binary.BigEndian, hdr) conn.Read([]byte{0}) conn.Close() c.Stop() - assert.Contains(t, acc.Errors, errors.New("dialout packet too long: 1000000000")) + require.Contains(t, acc.Errors, errors.New("dialout packet too long: 1000000000")) } func mockTelemetryMessage() *telemetry.Telemetry { @@ -441,10 +452,11 @@ func mockTelemetryMessage() *telemetry.Telemetry { } func TestTCPDialoutMultiple(t *testing.T) { - c := &CiscoTelemetryMDT{Log: testutil.Logger{}, Transport: "tcp", ServiceAddress: "127.0.0.1:57000", Aliases: map[string]string{ + c := &CiscoTelemetryMDT{Log: testutil.Logger{}, Transport: "tcp", ServiceAddress: "127.0.0.1:0", Aliases: map[string]string{ "some": "type:model/some/path", "parallel": "type:model/parallel/path", "other": "type:model/other/path"}} acc := &testutil.Accumulator{} - assert.Nil(t, c.Start(acc)) + err := c.Start(acc) + require.NoError(t, err) telemetry := mockTelemetryMessage() @@ -456,14 +468,18 @@ func TestTCPDialoutMultiple(t *testing.T) { MsgLen uint32 }{} - conn, _ := net.Dial("tcp", "127.0.0.1:57000") + addr := c.Address() + conn, err := net.Dial(addr.Network(), addr.String()) + require.NoError(t, err) data, _ := proto.Marshal(telemetry) hdr.MsgLen = uint32(len(data)) binary.Write(conn, binary.BigEndian, hdr) conn.Write(data) - conn2, _ := net.Dial("tcp", "127.0.0.1:57000") + conn2, err := net.Dial(addr.Network(), addr.String()) + require.NoError(t, err) + telemetry.EncodingPath = "type:model/parallel/path" data, _ = proto.Marshal(telemetry) hdr.MsgLen = uint32(len(data)) @@ -484,7 +500,7 @@ func TestTCPDialoutMultiple(t *testing.T) { conn.Close() // We use the invalid dialout flags to let the server close the connection - assert.Equal(t, acc.Errors, []error{errors.New("invalid dialout flags: 257"), errors.New("invalid dialout flags: 257")}) + require.Equal(t, acc.Errors, []error{errors.New("invalid dialout flags: 257"), errors.New("invalid dialout flags: 257")}) tags := map[string]string{"path": "type:model/some/path", "name": "str", "source": "hostname", "subscription": "subscription"} fields := map[string]interface{}{"value": int64(-1)} @@ -500,11 +516,13 @@ func TestTCPDialoutMultiple(t *testing.T) { } func TestGRPCDialoutError(t *testing.T) { - c := &CiscoTelemetryMDT{Log: testutil.Logger{}, Transport: "grpc", ServiceAddress: "127.0.0.1:57001"} + c := &CiscoTelemetryMDT{Log: testutil.Logger{}, Transport: "grpc", ServiceAddress: "127.0.0.1:0"} acc := &testutil.Accumulator{} - assert.Nil(t, c.Start(acc)) + err := c.Start(acc) + require.NoError(t, err) - conn, _ := grpc.Dial("127.0.0.1:57001", grpc.WithInsecure()) + addr := c.Address() + conn, _ := grpc.Dial(addr.String(), grpc.WithInsecure()) client := dialout.NewGRPCMdtDialoutClient(conn) stream, _ := client.MdtDialout(context.Background()) @@ -515,17 +533,19 @@ func TestGRPCDialoutError(t *testing.T) { stream.Recv() c.Stop() - assert.Equal(t, acc.Errors, []error{errors.New("GRPC dialout error: foobar")}) + require.Equal(t, acc.Errors, []error{errors.New("GRPC dialout error: foobar")}) } func TestGRPCDialoutMultiple(t *testing.T) { - c := &CiscoTelemetryMDT{Log: testutil.Logger{}, Transport: "grpc", ServiceAddress: "127.0.0.1:57001", Aliases: map[string]string{ + c := &CiscoTelemetryMDT{Log: testutil.Logger{}, Transport: "grpc", ServiceAddress: "127.0.0.1:0", Aliases: map[string]string{ "some": "type:model/some/path", "parallel": "type:model/parallel/path", "other": "type:model/other/path"}} acc := &testutil.Accumulator{} - assert.Nil(t, c.Start(acc)) + err := c.Start(acc) + require.NoError(t, err) telemetry := mockTelemetryMessage() - conn, _ := grpc.Dial("127.0.0.1:57001", grpc.WithInsecure(), grpc.WithBlock()) + addr := c.Address() + conn, _ := grpc.Dial(addr.String(), grpc.WithInsecure(), grpc.WithBlock()) client := dialout.NewGRPCMdtDialoutClient(conn) stream, _ := client.MdtDialout(context.TODO()) @@ -533,7 +553,7 @@ func TestGRPCDialoutMultiple(t *testing.T) { args := &dialout.MdtDialoutArgs{Data: data, ReqId: 456} stream.Send(args) - conn2, _ := grpc.Dial("127.0.0.1:57001", grpc.WithInsecure(), grpc.WithBlock()) + conn2, _ := grpc.Dial(addr.String(), grpc.WithInsecure(), grpc.WithBlock()) client2 := dialout.NewGRPCMdtDialoutClient(conn2) stream2, _ := client2.MdtDialout(context.TODO()) @@ -555,7 +575,7 @@ func TestGRPCDialoutMultiple(t *testing.T) { c.Stop() conn.Close() - assert.Equal(t, acc.Errors, []error{errors.New("GRPC dialout error: testclose"), errors.New("GRPC dialout error: testclose")}) + require.Equal(t, acc.Errors, []error{errors.New("GRPC dialout error: testclose"), errors.New("GRPC dialout error: testclose")}) tags := map[string]string{"path": "type:model/some/path", "name": "str", "source": "hostname", "subscription": "subscription"} fields := map[string]interface{}{"value": int64(-1)} From 0710cc74880919af6ae470d968fdc8ab0d84cd5a Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 10 Feb 2020 14:18:30 -0800 Subject: [PATCH 1517/1815] Parse NaN values from summary types in prometheus input (#6997) --- plugins/inputs/prometheus/parser.go | 14 ++--- plugins/inputs/prometheus/prometheus_test.go | 65 ++++++++++++++++++++ 2 files changed, 71 insertions(+), 8 deletions(-) diff --git a/plugins/inputs/prometheus/parser.go b/plugins/inputs/prometheus/parser.go index 9e79249ec..8f7061df8 100644 --- a/plugins/inputs/prometheus/parser.go +++ b/plugins/inputs/prometheus/parser.go @@ -15,7 +15,6 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" - "github.com/matttproud/golang_protobuf_extensions/pbutil" dto "github.com/prometheus/client_model/go" "github.com/prometheus/common/expfmt" @@ -115,14 +114,13 @@ func makeQuantilesV2(m *dto.Metric, tags map[string]string, metricName string, m for _, q := range m.GetSummary().Quantile { newTags := tags fields = make(map[string]interface{}) - if !math.IsNaN(q.GetValue()) { - newTags["quantile"] = fmt.Sprint(q.GetQuantile()) - fields[metricName] = float64(q.GetValue()) - quantileMetric, err := metric.New("prometheus", newTags, fields, t, valueType(metricType)) - if err == nil { - metrics = append(metrics, quantileMetric) - } + newTags["quantile"] = fmt.Sprint(q.GetQuantile()) + fields[metricName] = float64(q.GetValue()) + + quantileMetric, err := metric.New("prometheus", newTags, fields, t, valueType(metricType)) + if err == nil { + metrics = append(metrics, quantileMetric) } } return metrics diff --git a/plugins/inputs/prometheus/prometheus_test.go b/plugins/inputs/prometheus/prometheus_test.go index 78629d3d7..d33cba273 100644 --- a/plugins/inputs/prometheus/prometheus_test.go +++ b/plugins/inputs/prometheus/prometheus_test.go @@ -2,12 +2,14 @@ package prometheus import ( "fmt" + "math" "net/http" "net/http/httptest" "net/url" "testing" "time" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -148,6 +150,69 @@ func TestPrometheusGeneratesSummaryMetricsV2(t *testing.T) { } +func TestSummaryMayContainNaN(t *testing.T) { + const data = `# HELP go_gc_duration_seconds A summary of the GC invocation durations. +# TYPE go_gc_duration_seconds summary +go_gc_duration_seconds{quantile="0"} NaN +go_gc_duration_seconds{quantile="1"} NaN +go_gc_duration_seconds_sum 42.0 +go_gc_duration_seconds_count 42 +` + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintln(w, data) + })) + defer ts.Close() + + p := &Prometheus{ + URLs: []string{ts.URL}, + URLTag: "", + MetricVersion: 2, + } + + var acc testutil.Accumulator + + err := p.Gather(&acc) + require.NoError(t, err) + + expected := []telegraf.Metric{ + testutil.MustMetric( + "prometheus", + map[string]string{ + "quantile": "0", + }, + map[string]interface{}{ + "go_gc_duration_seconds": math.NaN(), + }, + time.Unix(0, 0), + telegraf.Summary, + ), + testutil.MustMetric( + "prometheus", + map[string]string{ + "quantile": "1", + }, + map[string]interface{}{ + "go_gc_duration_seconds": math.NaN(), + }, + time.Unix(0, 0), + telegraf.Summary, + ), + testutil.MustMetric( + "prometheus", + map[string]string{}, + map[string]interface{}{ + "go_gc_duration_seconds_sum": 42.0, + "go_gc_duration_seconds_count": 42.0, + }, + time.Unix(0, 0), + telegraf.Summary, + ), + } + + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), + testutil.IgnoreTime(), testutil.SortMetrics()) +} + func TestPrometheusGeneratesGaugeMetricsV2(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { fmt.Fprintln(w, sampleGaugeTextFormat) From d3b89ec51ff322983655cd195a907897edbfc4c0 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 10 Feb 2020 14:20:03 -0800 Subject: [PATCH 1518/1815] Update changelog --- CHANGELOG.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 350a7530a..e45b4e9ad 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -50,6 +50,12 @@ - [#6397](https://github.com/influxdata/telegraf/issues/6397): Fix conversion to floats in AzureDBResourceStats query in the sqlserver input. - [#6867](https://github.com/influxdata/telegraf/issues/6867): Fix case sensitive collation in sqlserver input. +## v1.13.4 [unreleased] + +#### Bugfixes + +- [#6997](https://github.com/influxdata/telegraf/issues/6997): Parse NaN values from summary types in prometheus input. + ## v1.13.3 [2020-02-04] #### Bugfixes From 5b8c71e61a0499edc00137bb6ed36998af957d40 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 10 Feb 2020 14:22:07 -0800 Subject: [PATCH 1519/1815] Search for chronyc only when chrony input plugin is enabled (#7005) --- plugins/inputs/chrony/chrony.go | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/plugins/inputs/chrony/chrony.go b/plugins/inputs/chrony/chrony.go index 6173357cf..3fe18e89c 100644 --- a/plugins/inputs/chrony/chrony.go +++ b/plugins/inputs/chrony/chrony.go @@ -33,11 +33,16 @@ func (*Chrony) SampleConfig() string { ` } -func (c *Chrony) Gather(acc telegraf.Accumulator) error { - if len(c.path) == 0 { +func (c *Chrony) Init() error { + var err error + c.path, err = exec.LookPath("chronyc") + if err != nil { return errors.New("chronyc not found: verify that chrony is installed and that chronyc is in your PATH") } + return nil +} +func (c *Chrony) Gather(acc telegraf.Accumulator) error { flags := []string{} if !c.DNSLookup { flags = append(flags, "-n") @@ -120,12 +125,7 @@ func processChronycOutput(out string) (map[string]interface{}, map[string]string } func init() { - c := Chrony{} - path, _ := exec.LookPath("chronyc") - if len(path) > 0 { - c.path = path - } inputs.Add("chrony", func() telegraf.Input { - return &c + return &Chrony{} }) } From c1eb0c8501ff619e718250b99a8863ceadda68e4 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 10 Feb 2020 14:23:29 -0800 Subject: [PATCH 1520/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index e45b4e9ad..da1b72bf6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -49,6 +49,7 @@ - [#6397](https://github.com/influxdata/telegraf/issues/6397): Fix conversion to floats in AzureDBResourceStats query in the sqlserver input. - [#6867](https://github.com/influxdata/telegraf/issues/6867): Fix case sensitive collation in sqlserver input. +- [#7005](https://github.com/influxdata/telegraf/pull/7005): Search for chronyc only when chrony input plugin is enabled. ## v1.13.4 [unreleased] From c681eb3524127e3c61422e91d96f4d70fd6981aa Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 10 Feb 2020 14:24:23 -0800 Subject: [PATCH 1521/1815] Fix issue number in changelog --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index da1b72bf6..64534e609 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -55,7 +55,7 @@ #### Bugfixes -- [#6997](https://github.com/influxdata/telegraf/issues/6997): Parse NaN values from summary types in prometheus input. +- [#6988](https://github.com/influxdata/telegraf/issues/6988): Parse NaN values from summary types in prometheus input. ## v1.13.3 [2020-02-04] From 9d97ed22e63c7f6e77839c44d24459aba5ffb494 Mon Sep 17 00:00:00 2001 From: Antonio Garcia Date: Mon, 10 Feb 2020 18:54:33 -0600 Subject: [PATCH 1522/1815] Fix float conversion and startup connection issue in modbus input (#7002) --- plugins/inputs/modbus/modbus.go | 87 +++++++++++++++++----------- plugins/inputs/modbus/modbus_test.go | 12 ++-- 2 files changed, 59 insertions(+), 40 deletions(-) diff --git a/plugins/inputs/modbus/modbus.go b/plugins/inputs/modbus/modbus.go index d845ef8fe..d2e913039 100644 --- a/plugins/inputs/modbus/modbus.go +++ b/plugins/inputs/modbus/modbus.go @@ -40,7 +40,6 @@ type Modbus struct { type register struct { Type string RegistersRange []registerRange - ReadValue func(uint16, uint16) ([]byte, error) Fields []fieldContainer } @@ -48,7 +47,7 @@ type fieldContainer struct { Name string `toml:"name"` ByteOrder string `toml:"byte_order"` DataType string `toml:"data_type"` - Scale float32 `toml:"scale"` + Scale float64 `toml:"scale"` Address []uint16 `toml:"address"` value interface{} } @@ -155,13 +154,7 @@ func (m *Modbus) Init() error { return fmt.Errorf("device name is empty") } - err := connect(m) - if err != nil { - m.isConnected = false - return err - } - - err = m.InitRegister(m.DiscreteInputs, cDiscreteInputs) + err := m.InitRegister(m.DiscreteInputs, cDiscreteInputs) if err != nil { return err } @@ -224,21 +217,7 @@ func (m *Modbus) InitRegister(fields []fieldContainer, name string) error { } } - var fn func(uint16, uint16) ([]byte, error) - - if name == cDiscreteInputs { - fn = m.client.ReadDiscreteInputs - } else if name == cCoils { - fn = m.client.ReadCoils - } else if name == cInputRegisters { - fn = m.client.ReadInputRegisters - } else if name == cHoldingRegisters { - fn = m.client.ReadHoldingRegisters - } else { - return fmt.Errorf("not Valid function") - } - - m.registers = append(m.registers, register{name, registersRange, fn, fields}) + m.registers = append(m.registers, register{name, registersRange, fields}) return nil } @@ -306,6 +285,31 @@ func connect(m *Modbus) error { } } +func disconnect(m *Modbus) error { + u, err := url.Parse(m.Controller) + if err != nil { + return err + } + + switch u.Scheme { + case "tcp": + m.tcpHandler.Close() + return nil + case "file": + if m.TransmissionMode == "RTU" { + m.rtuHandler.Close() + return nil + } else if m.TransmissionMode == "ASCII" { + m.asciiHandler.Close() + return nil + } else { + return fmt.Errorf("invalid protocol '%s' - '%s' ", u.Scheme, m.TransmissionMode) + } + default: + return fmt.Errorf("invalid controller") + } +} + func validateFieldContainers(t []fieldContainer, n string) error { nameEncountered := map[string]bool{} for _, item := range t { @@ -379,13 +383,27 @@ func removeDuplicates(elements []uint16) []uint16 { return result } +func readRegisterValues(m *Modbus, rt string, rr registerRange) ([]byte, error) { + if rt == cDiscreteInputs { + return m.client.ReadDiscreteInputs(uint16(rr.address), uint16(rr.length)) + } else if rt == cCoils { + return m.client.ReadCoils(uint16(rr.address), uint16(rr.length)) + } else if rt == cInputRegisters { + return m.client.ReadInputRegisters(uint16(rr.address), uint16(rr.length)) + } else if rt == cHoldingRegisters { + return m.client.ReadHoldingRegisters(uint16(rr.address), uint16(rr.length)) + } else { + return []byte{}, fmt.Errorf("not Valid function") + } +} + func (m *Modbus) getFields() error { for _, register := range m.registers { rawValues := make(map[uint16][]byte) bitRawValues := make(map[uint16]uint16) for _, rr := range register.RegistersRange { address := rr.address - readValues, err := register.ReadValue(uint16(rr.address), uint16(rr.length)) + readValues, err := readRegisterValues(m, register.Type, rr) if err != nil { return err } @@ -530,23 +548,23 @@ func format32(f string, r uint32) interface{} { } } -func scale16toFloat32(s float32, v uint16) float32 { - return float32(v) * s +func scale16toFloat32(s float64, v uint16) float64 { + return float64(v) * s } -func scale32toFloat32(s float32, v uint32) float32 { - return float32(v) * s +func scale32toFloat32(s float64, v uint32) float64 { + return float64(float64(v) * float64(s)) } -func scaleInt16(s float32, v int16) int16 { - return int16(float32(v) * s) +func scaleInt16(s float64, v int16) int16 { + return int16(float64(v) * s) } -func scaleUint16(s float32, v uint16) uint16 { - return uint16(float32(v) * s) +func scaleUint16(s float64, v uint16) uint16 { + return uint16(float64(v) * s) } -func scaleUint32(s float32, v uint32) uint32 { +func scaleUint32(s float64, v uint32) uint32 { return uint32(float64(v) * float64(s)) } @@ -562,6 +580,7 @@ func (m *Modbus) Gather(acc telegraf.Accumulator) error { err := m.getFields() if err != nil { + disconnect(m) m.isConnected = false return err } diff --git a/plugins/inputs/modbus/modbus_test.go b/plugins/inputs/modbus/modbus_test.go index 3d54c68c5..3317067a8 100644 --- a/plugins/inputs/modbus/modbus_test.go +++ b/plugins/inputs/modbus/modbus_test.go @@ -125,7 +125,7 @@ func TestHoldingRegisters(t *testing.T) { quantity uint16 byteOrder string dataType string - scale float32 + scale float64 write []byte read interface{} }{ @@ -137,7 +137,7 @@ func TestHoldingRegisters(t *testing.T) { dataType: "FLOAT32", scale: 0.1, write: []byte{0x08, 0x98}, - read: float32(220), + read: float64(220), }, { name: "register0_register1_ab_float32", @@ -147,7 +147,7 @@ func TestHoldingRegisters(t *testing.T) { dataType: "FLOAT32", scale: 0.001, write: []byte{0x00, 0x00, 0x03, 0xE8}, - read: float32(1), + read: float64(1), }, { name: "register1_register2_abcd_float32", @@ -157,7 +157,7 @@ func TestHoldingRegisters(t *testing.T) { dataType: "FLOAT32", scale: 0.1, write: []byte{0x00, 0x00, 0x08, 0x98}, - read: float32(220), + read: float64(220), }, { name: "register3_register4_abcd_float32", @@ -167,7 +167,7 @@ func TestHoldingRegisters(t *testing.T) { dataType: "FLOAT32", scale: 0.1, write: []byte{0x00, 0x00, 0x08, 0x98}, - read: float32(220), + read: float64(220), }, { name: "register7_ab_float32", @@ -177,7 +177,7 @@ func TestHoldingRegisters(t *testing.T) { dataType: "FLOAT32", scale: 0.1, write: []byte{0x01, 0xF4}, - read: float32(50), + read: float64(50), }, { name: "register10_ab_uint16", From e122bc75036b9c6062b2727651d3b767f6220bbb Mon Sep 17 00:00:00 2001 From: Alvaro Olmedo Rodriguez Date: Wed, 12 Feb 2020 18:43:44 +0100 Subject: [PATCH 1523/1815] Fix units in redis documentation (#7016) --- plugins/inputs/redis/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/inputs/redis/README.md b/plugins/inputs/redis/README.md index 207a44750..aa10c2887 100644 --- a/plugins/inputs/redis/README.md +++ b/plugins/inputs/redis/README.md @@ -80,8 +80,8 @@ Additionally the plugin also calculates the hit/miss ratio (keyspace\_hitrate) a - instantaneous_ops_per_sec(int, number) - total_net_input_bytes(int, bytes) - total_net_output_bytes(int, bytes) - - instantaneous_input_kbps(float, bytes) - - instantaneous_output_kbps(float, bytes) + - instantaneous_input_kbps(float, KB/sec) + - instantaneous_output_kbps(float, KB/sec) - rejected_connections(int, number) - sync_full(int, number) - sync_partial_ok(int, number) From e082c8d800942cd749a755afa8101641d873e339 Mon Sep 17 00:00:00 2001 From: Quanah Gibson-Mount Date: Wed, 12 Feb 2020 17:33:48 -0800 Subject: [PATCH 1524/1815] Add support for MDB database information to openldap input (#6993) --- plugins/inputs/openldap/openldap.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/plugins/inputs/openldap/openldap.go b/plugins/inputs/openldap/openldap.go index a92a37371..2bfbc3fac 100644 --- a/plugins/inputs/openldap/openldap.go +++ b/plugins/inputs/openldap/openldap.go @@ -56,6 +56,12 @@ var attrTranslate = map[string]string{ "monitoredInfo": "", "monitorOpInitiated": "_initiated", "monitorOpCompleted": "_completed", + "olmMDBPagesMax": "_mdb_pages_max", + "olmMDBPagesUsed": "_mdb_pages_used", + "olmMDBPagesFree": "_mdb_pages_free", + "olmMDBReadersMax": "_mdb_readers_max", + "olmMDBReadersUsed": "_mdb_readers_used", + "olmMDBEntries": "_mdb_entries", } func (o *Openldap) SampleConfig() string { From 68678755de0ad7c016eb3e3de7b964f69d8ea565 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 12 Feb 2020 17:37:22 -0800 Subject: [PATCH 1525/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 64534e609..44fecf06e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -44,6 +44,7 @@ - [#6914](https://github.com/influxdata/telegraf/pull/6914): Add replica set tag to mongodb input. - [#6935](https://github.com/influxdata/telegraf/pull/6935): Add counters for merged reads and writes to diskio input. - [#6982](https://github.com/influxdata/telegraf/pull/6982): Add support for titlecase transformation to strings processor. +- [#6993](https://github.com/influxdata/telegraf/pull/6993): Add support for MDB database information to openldap input. #### Bugfixes From e3bc546a4601e9c40ceb8b7e8f021fe0c63659b0 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 12 Feb 2020 17:34:15 -0800 Subject: [PATCH 1526/1815] Add release note regarding Windows Vista support --- CHANGELOG.md | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 44fecf06e..c59b44706 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -98,7 +98,10 @@ #### Release Notes -- Official packages built with Go 1.13.5. +- Official packages built with Go 1.13.5. This affects the minimum supported + version on several platforms, most notably requiring Windows 7 (2008 R2) or + later. For details, check the release notes for Go + [ports](https://golang.org/doc/go1.13#ports). - The `prometheus` input and `prometheus_client` output have a new mapping to and from Telegraf metrics, which can be enabled by setting `metric_version = 2`. The original mapping is deprecated. When both plugins have the same setting, From 6c839a33a42437596b329914979466b608f9bfde Mon Sep 17 00:00:00 2001 From: Mark Fletcher Date: Wed, 12 Feb 2020 20:53:11 -0800 Subject: [PATCH 1527/1815] Fix pgbouncer input when used with newer pgbouncer versions (#6820) --- plugins/inputs/pgbouncer/pgbouncer.go | 9 ++++++++- plugins/inputs/postgresql/service.go | 7 +++++++ 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/plugins/inputs/pgbouncer/pgbouncer.go b/plugins/inputs/pgbouncer/pgbouncer.go index edff10509..db010e5c1 100644 --- a/plugins/inputs/pgbouncer/pgbouncer.go +++ b/plugins/inputs/pgbouncer/pgbouncer.go @@ -2,6 +2,7 @@ package pgbouncer import ( "bytes" + "strconv" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" @@ -70,12 +71,18 @@ func (p *PgBouncer) Gather(acc telegraf.Accumulator) error { for col, val := range columnMap { _, ignore := ignoredColumns[col] if !ignore { - fields[col] = *val + // these values are returned by pgbouncer as strings, which we need to convert. + fields[col], _ = strconv.ParseUint((*val).(string), 10, 64) } } acc.AddFields("pgbouncer", fields, tags) } + err = rows.Err() + if err != nil { + return err + } + query = `SHOW POOLS` poolRows, err := p.DB.Query(query) diff --git a/plugins/inputs/postgresql/service.go b/plugins/inputs/postgresql/service.go index 9d3ab3963..96a9a6317 100644 --- a/plugins/inputs/postgresql/service.go +++ b/plugins/inputs/postgresql/service.go @@ -122,6 +122,13 @@ func (p *Service) Start(telegraf.Accumulator) (err error) { Name: "int8OID", OID: pgtype.Int8OID, }) + // Newer versions of pgbouncer need this defined. See the discussion here: + // https://github.com/jackc/pgx/issues/649 + info.RegisterDataType(pgtype.DataType{ + Value: &pgtype.OIDValue{}, + Name: "numericOID", + OID: pgtype.NumericOID, + }) return info, nil }, From cb2c26a3324964d432bf83f535eca7b4d7879db3 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 12 Feb 2020 21:15:29 -0800 Subject: [PATCH 1528/1815] Fix support with pgbouncer <1.9 --- plugins/inputs/pgbouncer/pgbouncer.go | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/plugins/inputs/pgbouncer/pgbouncer.go b/plugins/inputs/pgbouncer/pgbouncer.go index db010e5c1..cbc38c869 100644 --- a/plugins/inputs/pgbouncer/pgbouncer.go +++ b/plugins/inputs/pgbouncer/pgbouncer.go @@ -70,9 +70,22 @@ func (p *PgBouncer) Gather(acc telegraf.Accumulator) error { fields := make(map[string]interface{}) for col, val := range columnMap { _, ignore := ignoredColumns[col] - if !ignore { - // these values are returned by pgbouncer as strings, which we need to convert. - fields[col], _ = strconv.ParseUint((*val).(string), 10, 64) + if ignore { + continue + } + + switch v := (*val).(type) { + case int64: + // Integer fields are returned in pgbouncer 1.5 through 1.9 + fields[col] = v + case string: + // Integer fields are returned in pgbouncer 1.12 + integer, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + + fields[col] = integer } } acc.AddFields("pgbouncer", fields, tags) From cb192c85b2a07089be219bfabe1a924b888dea50 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 12 Feb 2020 21:15:59 -0800 Subject: [PATCH 1529/1815] Update pgbouncer readme --- plugins/inputs/pgbouncer/README.md | 71 +++++++++++++++++++++++++++--- 1 file changed, 64 insertions(+), 7 deletions(-) diff --git a/plugins/inputs/pgbouncer/README.md b/plugins/inputs/pgbouncer/README.md index dd8224e3e..58e3352c3 100644 --- a/plugins/inputs/pgbouncer/README.md +++ b/plugins/inputs/pgbouncer/README.md @@ -1,10 +1,29 @@ -# PgBouncer plugin +# PgBouncer Input Plugin -This PgBouncer plugin provides metrics for your PgBouncer load balancer. +The `pgbouncer` plugin provides metrics for your PgBouncer load balancer. -More information about the meaning of these metrics can be found in the [PgBouncer Documentation](https://pgbouncer.github.io/usage.html) +More information about the meaning of these metrics can be found in the +[PgBouncer Documentation](https://pgbouncer.github.io/usage.html). + +- PgBouncer minimum tested version: 1.5 + +### Configuration example + +```toml +[[inputs.pgbouncer]] + ## specify address via a url matching: + ## postgres://[pqgotest[:password]]@localhost[/dbname]\ + ## ?sslmode=[disable|verify-ca|verify-full] + ## or a simple string: + ## host=localhost user=pqotest password=... sslmode=... dbname=app_production + ## + ## All connection parameters are optional. + ## + address = "host=localhost user=pgbouncer sslmode=disable" +``` + +#### `address` -## Configuration Specify address via a postgresql connection string: `host=/run/postgresql port=6432 user=telegraf database=pgbouncer` @@ -18,8 +37,46 @@ All connection parameters are optional. Without the dbname parameter, the driver will default to a database with the same name as the user. This dbname is just for instantiating a connection with the server and doesn't restrict the databases we are trying to grab metrics for. -### Configuration example +### Metrics + +- pgbouncer + - tags: + - db + - server + - fields: + - avg_query_count + - avg_query_time + - avg_wait_time + - avg_xact_count + - avg_xact_time + - total_query_count + - total_query_time + - total_received + - total_sent + - total_wait_time + - total_xact_count + - total_xact_time + ++ pgbouncer_pools + - tags: + - db + - pool_mode + - server + - user + - fields: + - cl_active + - cl_waiting + - maxwait + - maxwait_us + - sv_active + - sv_idle + - sv_login + - sv_tested + - sv_used + +### Example Output + ``` -[[inputs.pgbouncer]] - address = "postgres://telegraf@localhost/pgbouncer" +pgbouncer,db=pgbouncer,server=host\=debian-buster-postgres\ user\=dbn\ port\=6432\ dbname\=pgbouncer\ avg_query_count=0i,avg_query_time=0i,avg_wait_time=0i,avg_xact_count=0i,avg_xact_time=0i,total_query_count=26i,total_query_time=0i,total_received=0i,total_sent=0i,total_wait_time=0i,total_xact_count=26i,total_xact_time=0i 1581569936000000000 +pgbouncer_pools,db=pgbouncer,pool_mode=statement,server=host\=debian-buster-postgres\ user\=dbn\ port\=6432\ dbname\=pgbouncer\ ,user=pgbouncer cl_active=1i,cl_waiting=0i,maxwait=0i,maxwait_us=0i,sv_active=0i,sv_idle=0i,sv_login=0i,sv_tested=0i,sv_used=0i 1581569936000000000 ``` From 07a7b70f97aa81b1e61f77b2195e1d87ffdc8b0f Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 12 Feb 2020 21:17:27 -0800 Subject: [PATCH 1530/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index c59b44706..0573913b6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -57,6 +57,7 @@ #### Bugfixes - [#6988](https://github.com/influxdata/telegraf/issues/6988): Parse NaN values from summary types in prometheus input. +- [#6820](https://github.com/influxdata/telegraf/issues/6820): Fix pgbouncer input when used with newer pgbouncer versions. ## v1.13.3 [2020-02-04] From e056097cb41b2a98ec1a1d1a1008fb0a36f05b1b Mon Sep 17 00:00:00 2001 From: vikkyomkar Date: Sat, 15 Feb 2020 03:34:54 +0530 Subject: [PATCH 1531/1815] Add new fields for Jenkins Total and Busy executors (#6957) --- plugins/inputs/jenkins/README.md | 1 + plugins/inputs/jenkins/jenkins.go | 18 +++++-- plugins/inputs/jenkins/jenkins_test.go | 68 +++++++++++++++++++++++--- 3 files changed, 76 insertions(+), 11 deletions(-) diff --git a/plugins/inputs/jenkins/README.md b/plugins/inputs/jenkins/README.md index 2bbfd157e..911d2a9b5 100644 --- a/plugins/inputs/jenkins/README.md +++ b/plugins/inputs/jenkins/README.md @@ -96,6 +96,7 @@ SELECT mean("duration") AS "mean_duration" FROM "jenkins_job" WHERE time > now() ``` $ ./telegraf --config telegraf.conf --input-filter jenkins --test +jenkins,host=myhost,port=80,source=my-jenkins-instance busy_executors=4i,total_executors=8i 1580418261000000000 jenkins_node,arch=Linux\ (amd64),disk_path=/var/jenkins_home,temp_path=/tmp,host=myhost,node_name=master,source=my-jenkins-instance,port=8080 swap_total=4294963200,memory_available=586711040,memory_total=6089498624,status=online,response_time=1000i,disk_available=152392036352,temp_available=152392036352,swap_available=3503263744,num_executors=2i 1516031535000000000 jenkins_job,host=myhost,name=JOB1,parents=apps/br1,result=SUCCESS,source=my-jenkins-instance,port=8080 duration=2831i,result_code=0i 1516026630000000000 jenkins_job,host=myhost,name=JOB2,parents=apps/br2,result=SUCCESS,source=my-jenkins-instance,port=8080 duration=2285i,result_code=0i 1516027230000000000 diff --git a/plugins/inputs/jenkins/jenkins.go b/plugins/inputs/jenkins/jenkins.go index 7a2b19d95..d6d326922 100644 --- a/plugins/inputs/jenkins/jenkins.go +++ b/plugins/inputs/jenkins/jenkins.go @@ -90,8 +90,9 @@ const sampleConfig = ` // measurement const ( - measurementNode = "jenkins_node" - measurementJob = "jenkins_job" + measurementJenkins = "jenkins" + measurementNode = "jenkins_node" + measurementJob = "jenkins_job" ) // SampleConfig implements telegraf.Input interface @@ -244,6 +245,15 @@ func (j *Jenkins) gatherNodesData(acc telegraf.Accumulator) { acc.AddError(err) return } + + // get total and busy executors + tags := map[string]string{"source": j.Source, "port": j.Port} + fields := make(map[string]interface{}) + fields["busy_executors"] = nodeResp.BusyExecutors + fields["total_executors"] = nodeResp.TotalExecutors + + acc.AddFields(measurementJenkins, fields, tags) + // get node data for _, node := range nodeResp.Computers { err = j.gatherNodeData(node, acc) @@ -353,7 +363,9 @@ func (j *Jenkins) getJobDetail(jr jobRequest, acc telegraf.Accumulator) error { } type nodeResponse struct { - Computers []node `json:"computer"` + Computers []node `json:"computer"` + BusyExecutors int `json:"busyExecutors"` + TotalExecutors int `json:"totalExecutors"` } type node struct { diff --git a/plugins/inputs/jenkins/jenkins_test.go b/plugins/inputs/jenkins/jenkins_test.go index 6233bb83f..6c281390e 100644 --- a/plugins/inputs/jenkins/jenkins_test.go +++ b/plugins/inputs/jenkins/jenkins_test.go @@ -106,6 +106,19 @@ func TestGatherNodeData(t *testing.T) { }, }, wantErr: true, + output: &testutil.Accumulator{ + Metrics: []*testutil.Metric{ + { + Tags: map[string]string{ + "source": "127.0.0.1", + }, + Fields: map[string]interface{}{ + "busy_executors": 0, + "total_executors": 0, + }, + }, + }, + }, }, { name: "empty monitor data", @@ -130,6 +143,8 @@ func TestGatherNodeData(t *testing.T) { responseMap: map[string]interface{}{ "/api/json": struct{}{}, "/computer/api/json": nodeResponse{ + BusyExecutors: 4, + TotalExecutors: 8, Computers: []node{ {DisplayName: "ignore-1"}, {DisplayName: "ignore-2"}, @@ -137,6 +152,19 @@ func TestGatherNodeData(t *testing.T) { }, }, }, + output: &testutil.Accumulator{ + Metrics: []*testutil.Metric{ + { + Tags: map[string]string{ + "source": "127.0.0.1", + }, + Fields: map[string]interface{}{ + "busy_executors": 4, + "total_executors": 8, + }, + }, + }, + }, }, { name: "normal data collection", @@ -144,6 +172,8 @@ func TestGatherNodeData(t *testing.T) { responseMap: map[string]interface{}{ "/api/json": struct{}{}, "/computer/api/json": nodeResponse{ + BusyExecutors: 4, + TotalExecutors: 8, Computers: []node{ { DisplayName: "master", @@ -175,6 +205,15 @@ func TestGatherNodeData(t *testing.T) { }, output: &testutil.Accumulator{ Metrics: []*testutil.Metric{ + { + Tags: map[string]string{ + "source": "127.0.0.1", + }, + Fields: map[string]interface{}{ + "busy_executors": 4, + "total_executors": 8, + }, + }, { Tags: map[string]string{ "node_name": "master", @@ -203,6 +242,8 @@ func TestGatherNodeData(t *testing.T) { responseMap: map[string]interface{}{ "/api/json": struct{}{}, "/computer/api/json": nodeResponse{ + BusyExecutors: 4, + TotalExecutors: 8, Computers: []node{ { DisplayName: "slave", @@ -216,6 +257,15 @@ func TestGatherNodeData(t *testing.T) { }, output: &testutil.Accumulator{ Metrics: []*testutil.Metric{ + { + Tags: map[string]string{ + "source": "127.0.0.1", + }, + Fields: map[string]interface{}{ + "busy_executors": 4, + "total_executors": 8, + }, + }, { Tags: map[string]string{ "node_name": "slave", @@ -252,16 +302,18 @@ func TestGatherNodeData(t *testing.T) { t.Fatalf("%s: expected err, got nil", test.name) } if test.output == nil && len(acc.Metrics) > 0 { - t.Fatalf("%s: collected extra data", test.name) + t.Fatalf("%s: collected extra data %s", test.name, acc.Metrics) } else if test.output != nil && len(test.output.Metrics) > 0 { - for k, m := range test.output.Metrics[0].Tags { - if acc.Metrics[0].Tags[k] != m { - t.Fatalf("%s: tag %s metrics unmatch Expected %s, got %s\n", test.name, k, m, acc.Metrics[0].Tags[k]) + for i := 0; i < len(test.output.Metrics); i++ { + for k, m := range test.output.Metrics[i].Tags { + if acc.Metrics[i].Tags[k] != m { + t.Fatalf("%s: tag %s metrics unmatch Expected %s, got %s\n", test.name, k, m, acc.Metrics[0].Tags[k]) + } } - } - for k, m := range test.output.Metrics[0].Fields { - if acc.Metrics[0].Fields[k] != m { - t.Fatalf("%s: field %s metrics unmatch Expected %v(%T), got %v(%T)\n", test.name, k, m, m, acc.Metrics[0].Fields[k], acc.Metrics[0].Fields[k]) + for k, m := range test.output.Metrics[i].Fields { + if acc.Metrics[i].Fields[k] != m { + t.Fatalf("%s: field %s metrics unmatch Expected %v(%T), got %v(%T)\n", test.name, k, m, m, acc.Metrics[0].Fields[k], acc.Metrics[0].Fields[k]) + } } } } From 784266a301b8dc382c21b70d574287c2a6b3cbc1 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 14 Feb 2020 15:36:58 -0800 Subject: [PATCH 1532/1815] Update jenkins readme --- plugins/inputs/jenkins/README.md | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/plugins/inputs/jenkins/README.md b/plugins/inputs/jenkins/README.md index 911d2a9b5..3615006c4 100644 --- a/plugins/inputs/jenkins/README.md +++ b/plugins/inputs/jenkins/README.md @@ -53,6 +53,14 @@ This plugin does not require a plugin on jenkins and it makes use of Jenkins API ### Metrics: - jenkins_node + - tags: + - source + - port + - fields: + - busy_executors + - total_executors + ++ jenkins_node - tags: - arch - disk_path From 88a89634d80e7198b0d2cbdd2f6d7c8172b612a2 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 14 Feb 2020 15:37:46 -0800 Subject: [PATCH 1533/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0573913b6..2d26f8383 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -45,6 +45,7 @@ - [#6935](https://github.com/influxdata/telegraf/pull/6935): Add counters for merged reads and writes to diskio input. - [#6982](https://github.com/influxdata/telegraf/pull/6982): Add support for titlecase transformation to strings processor. - [#6993](https://github.com/influxdata/telegraf/pull/6993): Add support for MDB database information to openldap input. +- [#6957](https://github.com/influxdata/telegraf/pull/6957): Add new fields for Jenkins total and busy executors. #### Bugfixes From 397a04aa3251994dcd34b20fb670cf5277a505f2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Atakan=20=C3=96zceviz?= Date: Tue, 18 Feb 2020 23:30:56 +0100 Subject: [PATCH 1534/1815] Fix typo in http_response readme (#7036) --- plugins/inputs/http_response/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/inputs/http_response/README.md b/plugins/inputs/http_response/README.md index 8f1427534..2307461ca 100644 --- a/plugins/inputs/http_response/README.md +++ b/plugins/inputs/http_response/README.md @@ -77,7 +77,7 @@ This tag is used to expose network and plugin errors. HTTP errors are considered --------------------------|-------------------------|-----------| |success | 0 |The HTTP request completed, even if the HTTP code represents an error| |response_string_mismatch | 1 |The option `response_string_match` was used, and the body of the response didn't match the regex. HTTP errors with content in their body (like 4xx, 5xx) will trigger this error| -|body_read_error | 2 |The option `response_string_match` was used, but the plugin wans't able to read the body of the response. Responses with empty bodies (like 3xx, HEAD, etc) will trigger this error| +|body_read_error | 2 |The option `response_string_match` was used, but the plugin wasn't able to read the body of the response. Responses with empty bodies (like 3xx, HEAD, etc) will trigger this error| |connection_failed | 3 |Catch all for any network error not specifically handled by the plugin| |timeout | 4 |The plugin timed out while awaiting the HTTP connection to complete| |dns_error | 5 |There was a DNS error while attempting to connect to the host| From 8b3bd2585db17d6eb30248cd58a9bf784bf1e1aa Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Tue, 18 Feb 2020 19:13:54 -0500 Subject: [PATCH 1535/1815] Fix issues with failing tests on darwin (#7042) --- docker-compose.yml | 9 +++++---- plugins/inputs/ethtool/ethtool_test.go | 2 ++ .../postgresql_extensible/postgresql_extensible_test.go | 1 + .../inputs/powerdns_recursor/powerdns_recursor_test.go | 4 ++++ plugins/inputs/processes/processes_test.go | 3 +++ plugins/inputs/syslog/rfc5426_test.go | 5 ----- 6 files changed, 15 insertions(+), 9 deletions(-) diff --git a/docker-compose.yml b/docker-compose.yml index bce3f4922..eb96fc2bf 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -42,14 +42,16 @@ services: ports: - "11211:11211" pgbouncer: - image: mbed/pgbouncer + image: mbentley/ubuntu-pgbouncer environment: - PG_ENV_POSTGRESQL_USER: pgbouncer - PG_ENV_POSTGRESQL_PASS: pgbouncer + - PG_ENV_POSTGRESQL_USER=pgbouncer + - PG_ENV_POSTGRESQL_PASS=pgbouncer ports: - "6432:6432" postgres: image: postgres:alpine + environment: + - POSTGRES_HOST_AUTH_METHOD=trust ports: - "5432:5432" rabbitmq: @@ -96,6 +98,5 @@ services: - crate - -Cnetwork.host=0.0.0.0 - -Ctransport.host=localhost - - -Clicense.enterprise=false environment: - CRATE_HEAP_SIZE=128m diff --git a/plugins/inputs/ethtool/ethtool_test.go b/plugins/inputs/ethtool/ethtool_test.go index c151c9cae..d281644a5 100644 --- a/plugins/inputs/ethtool/ethtool_test.go +++ b/plugins/inputs/ethtool/ethtool_test.go @@ -1,3 +1,5 @@ +// +build linux + package ethtool import ( diff --git a/plugins/inputs/postgresql_extensible/postgresql_extensible_test.go b/plugins/inputs/postgresql_extensible/postgresql_extensible_test.go index 757b468f2..bca009f16 100644 --- a/plugins/inputs/postgresql_extensible/postgresql_extensible_test.go +++ b/plugins/inputs/postgresql_extensible/postgresql_extensible_test.go @@ -210,6 +210,7 @@ func TestPostgresqlSqlScript(t *testing.T) { Tagvalue: "", }} p := &Postgresql{ + Log: testutil.Logger{}, Service: postgresql.Service{ Address: fmt.Sprintf( "host=%s user=postgres sslmode=disable", diff --git a/plugins/inputs/powerdns_recursor/powerdns_recursor_test.go b/plugins/inputs/powerdns_recursor/powerdns_recursor_test.go index 0ca4daf69..d0f5690cc 100644 --- a/plugins/inputs/powerdns_recursor/powerdns_recursor_test.go +++ b/plugins/inputs/powerdns_recursor/powerdns_recursor_test.go @@ -3,6 +3,7 @@ package powerdns_recursor import ( "net" "os" + "runtime" "sync" "testing" "time" @@ -98,6 +99,9 @@ var intOverflowMetrics = "all-outqueries\t18446744073709550195\nanswers-slow\t36 "x-ourtime2-4\t302\nx-ourtime4-8\t194\nx-ourtime8-16\t24\n" func TestPowerdnsRecursorGeneratesMetrics(t *testing.T) { + if runtime.GOOS == "darwin" { + t.Skip("Skipping test on darwin") + } // We create a fake server to return test data controlSocket := "/tmp/pdns5724354148158589552.controlsocket" addr, err := net.ResolveUnixAddr("unixgram", controlSocket) diff --git a/plugins/inputs/processes/processes_test.go b/plugins/inputs/processes/processes_test.go index 23359a85d..268cef913 100644 --- a/plugins/inputs/processes/processes_test.go +++ b/plugins/inputs/processes/processes_test.go @@ -122,6 +122,9 @@ func TestFromProcFilesWithSpaceInCmd(t *testing.T) { // However, we have had reports of this process state on Ubuntu // Bionic w/ Linux 4.15 (#6270) func TestParkedProcess(t *testing.T) { + if runtime.GOOS != "linux" { + t.Skip("Parked process test only relevant on linux") + } procstat := `88 (watchdog/13) P 2 0 0 0 -1 69238848 0 0 0 0 0 0 0 0 20 0 1 0 20 0 0 18446744073709551615 0 0 0 0 0 0 0 2147483647 0 1 0 0 17 0 0 0 0 0 0 0 0 0 0 0 0 0 0 ` plugin := &Processes{ diff --git a/plugins/inputs/syslog/rfc5426_test.go b/plugins/inputs/syslog/rfc5426_test.go index 00efb9479..5e65c1d39 100644 --- a/plugins/inputs/syslog/rfc5426_test.go +++ b/plugins/inputs/syslog/rfc5426_test.go @@ -17,11 +17,6 @@ import ( func getTestCasesForRFC5426() []testCasePacket { testCases := []testCasePacket{ - { - name: "empty", - data: []byte(""), - werr: true, - }, { name: "complete", data: []byte("<1>1 - - - - - - A"), From 0a1373765e9a85f116c9aad95b072c75eb1e1f6d Mon Sep 17 00:00:00 2001 From: Steven Barth Date: Wed, 19 Feb 2020 02:31:39 +0100 Subject: [PATCH 1536/1815] Fix dash to underscore replacement when handling embedded tags in Cisco MDT (#7035) Currently configuring embedded_tags for cisco_telemetry_mdt input requires an unusual mix of - and _, i.e. one needs to specify e.g. Cisco-IOS-XR-wdsysmon-fd-oper:system-monitoring/cpu-utilization/process_cpu/process-id for it to work correctly. Additionally, tags created might still contain dashes against convention. This fix creates correctly formatted tags with underscores instead of dashes and unifies the configuration parameter to expect either dashes or underscores, so old configurations are still valid. --- plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt.go b/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt.go index 28866ce67..2ae051d5b 100644 --- a/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt.go +++ b/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt.go @@ -76,7 +76,7 @@ func (c *CiscoTelemetryMDT) Start(acc telegraf.Accumulator) error { // Fill extra tags c.extraTags = make(map[string]map[string]struct{}) for _, tag := range c.EmbeddedTags { - dir := path.Dir(tag) + dir := strings.Replace(path.Dir(tag), "-", "_", -1) if _, hasKey := c.extraTags[dir]; !hasKey { c.extraTags[dir] = make(map[string]struct{}) } @@ -400,7 +400,7 @@ func (c *CiscoTelemetryMDT) parseContentField(grouper *metric.SeriesGrouper, fie name = prefix + "/" + name } - extraTags := c.extraTags[path+"/"+name] + extraTags := c.extraTags[strings.Replace(path, "-", "_", -1)+"/"+name] if value := decodeValue(field); value != nil { // Do alias lookup, to shorten measurement names @@ -423,7 +423,7 @@ func (c *CiscoTelemetryMDT) parseContentField(grouper *metric.SeriesGrouper, fie if len(extraTags) > 0 { for _, subfield := range field.Fields { if _, isExtraTag := extraTags[subfield.Name]; isExtraTag { - tags[name+"/"+subfield.Name] = decodeTag(subfield) + tags[name+"/"+strings.Replace(subfield.Name, "-", "_", -1)] = decodeTag(subfield) } } } From 3f7198fca5a063610c293ca26128c243a17d7ba1 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 18 Feb 2020 17:32:18 -0800 Subject: [PATCH 1537/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2d26f8383..8c9859167 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -46,6 +46,7 @@ - [#6982](https://github.com/influxdata/telegraf/pull/6982): Add support for titlecase transformation to strings processor. - [#6993](https://github.com/influxdata/telegraf/pull/6993): Add support for MDB database information to openldap input. - [#6957](https://github.com/influxdata/telegraf/pull/6957): Add new fields for Jenkins total and busy executors. +- [#7035](https://github.com/influxdata/telegraf/pull/7035): Fix dash to underscore replacement when handling embedded tags in Cisco MDT. #### Bugfixes From 740e30a96d5e0050d7a1880d853f80f3a1dd470e Mon Sep 17 00:00:00 2001 From: MikaelUrankar <49529234+MikaelUrankar@users.noreply.github.com> Date: Wed, 19 Feb 2020 20:54:19 +0100 Subject: [PATCH 1538/1815] Update gopsutil to add support for freebsd/arm64 (#7031) --- go.mod | 7 +++---- go.sum | 10 ++++++---- 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/go.mod b/go.mod index 2157160c6..b9495c27a 100644 --- a/go.mod +++ b/go.mod @@ -103,8 +103,7 @@ require ( github.com/safchain/ethtool v0.0.0-20200128171343-ef7e7c9c2763 github.com/samuel/go-zookeeper v0.0.0-20180130194729-c4fab1ac1bec // indirect github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b // indirect - github.com/shirou/gopsutil v2.19.11+incompatible - github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4 // indirect + github.com/shirou/gopsutil v2.20.1+incompatible github.com/shopspring/decimal v0.0.0-20200105231215-408a2507e114 // indirect github.com/sirupsen/logrus v1.2.0 github.com/soniah/gosnmp v1.22.0 @@ -121,9 +120,9 @@ require ( github.com/wvanbergen/kafka v0.0.0-20171203153745-e2edea948ddf github.com/wvanbergen/kazoo-go v0.0.0-20180202103751-f72d8611297a // indirect github.com/yuin/gopher-lua v0.0.0-20180630135845-46796da1b0b4 // indirect - golang.org/x/net v0.0.0-20191004110552-13f9640d40b9 + golang.org/x/net v0.0.0-20200202094626-16171245cfb2 golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421 - golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456 + golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4 gonum.org/v1/gonum v0.6.2 // indirect google.golang.org/api v0.3.1 google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107 diff --git a/go.sum b/go.sum index e3cc6969a..a807fe68e 100644 --- a/go.sum +++ b/go.sum @@ -387,10 +387,8 @@ github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b h1:gQZ0qzfKHQIybL github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= -github.com/shirou/gopsutil v2.19.11+incompatible h1:lJHR0foqAjI4exXqWsU3DbH7bX1xvdhGdnXTIARA9W4= -github.com/shirou/gopsutil v2.19.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= -github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4 h1:udFKJ0aHUL60LboW/A+DfgoHVedieIzIXE8uylPue0U= -github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q1u/4XEfrquwF8Lw7D7y5cD8CuHnfIc= +github.com/shirou/gopsutil v2.20.1+incompatible h1:oIq9Cq4i84Hk8uQAUOG3eNdI/29hBawGrD5YRl6JRDY= +github.com/shirou/gopsutil v2.20.1+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shopspring/decimal v0.0.0-20200105231215-408a2507e114 h1:Pm6R878vxWWWR+Sa3ppsLce/Zq+JNTs6aVvRu13jv9A= github.com/shopspring/decimal v0.0.0-20200105231215-408a2507e114/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/sirupsen/logrus v1.2.0 h1:juTguoYk5qI21pwyTXY3B3Y5cOTH3ZUyZCg1v/mihuo= @@ -473,6 +471,8 @@ golang.org/x/net v0.0.0-20190628185345-da137c7871d7 h1:rTIdg5QFRR7XCaK4LCjBiPbx8 golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191004110552-13f9640d40b9 h1:rjwSpXsdiK0dV8/Naq3kAw9ymfAeJIyd0upUIElB+lI= golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2 h1:CCH4IOTTfewWjGOlSp+zGcjutRKlBEZQ6wTn8ozI/nI= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421 h1:Wo7BWFiOk0QRFMLYMqJGFMd9CgUAcGx7V+qEg/h5IBI= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -495,6 +495,8 @@ golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456 h1:ng0gs1AKnRRuEMZoTLLlbOd+C17zUDepwGQBb/n+JVg= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4 h1:sfkvUWPNGwSV+8/fNqctR5lS2AqCSqYwXdrjCxp/dXo= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2 h1:z99zHgr7hKfrUcX/KsoJk5FJfjTceCKIp96+biqP4To= From 59a506ae4adaae956bb02d7ff0788a1844637c36 Mon Sep 17 00:00:00 2001 From: prashanthjbabu Date: Thu, 20 Feb 2020 01:27:07 +0530 Subject: [PATCH 1539/1815] Add process created_at time to procstat input (#7039) --- plugins/inputs/procstat/README.md | 5 +++-- plugins/inputs/procstat/process.go | 1 + plugins/inputs/procstat/procstat.go | 5 +++++ plugins/inputs/procstat/procstat_test.go | 4 ++++ 4 files changed, 13 insertions(+), 2 deletions(-) diff --git a/plugins/inputs/procstat/README.md b/plugins/inputs/procstat/README.md index 6163b8284..9ecc3d367 100644 --- a/plugins/inputs/procstat/README.md +++ b/plugins/inputs/procstat/README.md @@ -87,6 +87,7 @@ implemented as a WMI query. The pattern allows fuzzy matching using only - fields: - child_major_faults (int) - child_minor_faults (int) + - created_at (int) [epoch in nanoseconds] - cpu_time (int) - cpu_time_guest (float) - cpu_time_guest_nice (float) @@ -164,6 +165,6 @@ implemented as a WMI query. The pattern allows fuzzy matching using only ### Example Output: ``` -procstat,pidfile=/var/run/lxc/dnsmasq.pid,process_name=dnsmasq rlimit_file_locks_soft=2147483647i,rlimit_signals_pending_hard=1758i,voluntary_context_switches=478i,read_bytes=307200i,cpu_time_user=0.01,cpu_time_guest=0,memory_swap=0i,memory_locked=0i,rlimit_num_fds_hard=4096i,rlimit_nice_priority_hard=0i,num_fds=11i,involuntary_context_switches=20i,read_count=23i,memory_rss=1388544i,rlimit_memory_rss_soft=2147483647i,rlimit_memory_rss_hard=2147483647i,nice_priority=20i,rlimit_cpu_time_hard=2147483647i,cpu_time=0i,write_bytes=0i,cpu_time_idle=0,cpu_time_nice=0,memory_data=229376i,memory_stack=135168i,rlimit_cpu_time_soft=2147483647i,rlimit_memory_data_hard=2147483647i,rlimit_memory_locked_hard=65536i,rlimit_signals_pending_soft=1758i,write_count=11i,cpu_time_iowait=0,cpu_time_steal=0,rlimit_memory_stack_soft=8388608i,cpu_time_system=0.02,cpu_time_guest_nice=0,rlimit_memory_locked_soft=65536i,rlimit_memory_vms_soft=2147483647i,rlimit_file_locks_hard=2147483647i,rlimit_realtime_priority_hard=0i,pid=828i,num_threads=1i,cpu_time_soft_irq=0,rlimit_memory_vms_hard=2147483647i,rlimit_realtime_priority_soft=0i,memory_vms=15884288i,rlimit_memory_stack_hard=2147483647i,cpu_time_irq=0,rlimit_memory_data_soft=2147483647i,rlimit_num_fds_soft=1024i,signals_pending=0i,rlimit_nice_priority_soft=0i,realtime_priority=0i -procstat,exe=influxd,process_name=influxd rlimit_num_fds_hard=16384i,rlimit_signals_pending_hard=1758i,realtime_priority=0i,rlimit_memory_vms_hard=2147483647i,rlimit_signals_pending_soft=1758i,rlimit_memory_stack_hard=2147483647i,rlimit_realtime_priority_hard=0i,cpu_time=0i,pid=500i,voluntary_context_switches=975i,cpu_time_idle=0,memory_rss=3072000i,memory_locked=0i,rlimit_nice_priority_soft=0i,signals_pending=0i,nice_priority=20i,read_bytes=823296i,cpu_time_soft_irq=0,rlimit_memory_data_hard=2147483647i,rlimit_memory_locked_soft=65536i,write_count=8i,cpu_time_irq=0,memory_vms=33501184i,rlimit_memory_stack_soft=8388608i,cpu_time_iowait=0,rlimit_memory_vms_soft=2147483647i,rlimit_nice_priority_hard=0i,num_fds=29i,memory_data=229376i,rlimit_cpu_time_soft=2147483647i,rlimit_file_locks_soft=2147483647i,num_threads=1i,write_bytes=0i,cpu_time_steal=0,rlimit_memory_rss_hard=2147483647i,cpu_time_guest=0,cpu_time_guest_nice=0,cpu_usage=0,rlimit_memory_locked_hard=65536i,rlimit_file_locks_hard=2147483647i,involuntary_context_switches=38i,read_count=16851i,memory_swap=0i,rlimit_memory_data_soft=2147483647i,cpu_time_user=0.11,rlimit_cpu_time_hard=2147483647i,rlimit_num_fds_soft=16384i,rlimit_realtime_priority_soft=0i,cpu_time_system=0.27,cpu_time_nice=0,memory_stack=135168i,rlimit_memory_rss_soft=2147483647i +procstat_lookup,host=prash-laptop,pattern=influxd,pid_finder=pgrep,result=success pid_count=1i,running=1i,result_code=0i 1582089700000000000 +procstat,host=prash-laptop,pattern=influxd,process_name=influxd,user=root involuntary_context_switches=151496i,child_minor_faults=1061i,child_major_faults=8i,cpu_time_user=2564.81,cpu_time_idle=0,cpu_time_irq=0,cpu_time_guest=0,pid=32025i,major_faults=8609i,created_at=1580107536000000000i,voluntary_context_switches=1058996i,cpu_time_system=616.98,cpu_time_steal=0,cpu_time_guest_nice=0,memory_swap=0i,memory_locked=0i,memory_usage=1.7797634601593018,num_threads=18i,cpu_time_nice=0,cpu_time_iowait=0,cpu_time_soft_irq=0,memory_rss=148643840i,memory_vms=1435688960i,memory_data=0i,memory_stack=0i,minor_faults=1856550i 1582089700000000000 ``` diff --git a/plugins/inputs/procstat/process.go b/plugins/inputs/procstat/process.go index 7d3a9431d..042929f08 100644 --- a/plugins/inputs/procstat/process.go +++ b/plugins/inputs/procstat/process.go @@ -25,6 +25,7 @@ type Process interface { Times() (*cpu.TimesStat, error) RlimitUsage(bool) ([]process.RlimitStat, error) Username() (string, error) + CreateTime() (int64, error) } type PIDFinder interface { diff --git a/plugins/inputs/procstat/procstat.go b/plugins/inputs/procstat/procstat.go index 995aa5bdd..8e56e4bf7 100644 --- a/plugins/inputs/procstat/procstat.go +++ b/plugins/inputs/procstat/procstat.go @@ -216,6 +216,11 @@ func (p *Procstat) addMetric(proc Process, acc telegraf.Accumulator) { fields[prefix+"write_bytes"] = io.WriteBytes } + createdAt, err := proc.CreateTime() //Returns epoch in ms + if err == nil { + fields[prefix+"created_at"] = createdAt * 1000000 //Convert ms to ns + } + cpu_time, err := proc.Times() if err == nil { fields[prefix+"cpu_time_user"] = cpu_time.User diff --git a/plugins/inputs/procstat/procstat_test.go b/plugins/inputs/procstat/procstat_test.go index 22c8abb89..e1ee8ab92 100644 --- a/plugins/inputs/procstat/procstat_test.go +++ b/plugins/inputs/procstat/procstat_test.go @@ -152,6 +152,10 @@ func (p *testProc) MemoryPercent() (float32, error) { return 0, nil } +func (p *testProc) CreateTime() (int64, error) { + return 0, nil +} + func (p *testProc) Times() (*cpu.TimesStat, error) { return &cpu.TimesStat{}, nil } From 3058308d38a2c18cba9b85fe55cbb0048d4e52b9 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 19 Feb 2020 12:37:08 -0800 Subject: [PATCH 1540/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8c9859167..fed2f7a0e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -47,6 +47,7 @@ - [#6993](https://github.com/influxdata/telegraf/pull/6993): Add support for MDB database information to openldap input. - [#6957](https://github.com/influxdata/telegraf/pull/6957): Add new fields for Jenkins total and busy executors. - [#7035](https://github.com/influxdata/telegraf/pull/7035): Fix dash to underscore replacement when handling embedded tags in Cisco MDT. +- [#7039](https://github.com/influxdata/telegraf/pull/7039): Add process created_at time to procstat input. #### Bugfixes From 5023df08d8546d408fcdd7bd4842f5aabba1ee4c Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Wed, 19 Feb 2020 21:46:08 -0500 Subject: [PATCH 1541/1815] resolves issues where failed api calls lead to obscure errors (#7051) --- internal/models/running_output.go | 1 + plugins/inputs/ecs/client.go | 20 ++++++++- plugins/inputs/ecs/client_test.go | 41 ++++++++++++++++--- plugins/inputs/fibaro/fibaro.go | 3 +- plugins/inputs/haproxy/haproxy.go | 1 + plugins/inputs/kibana/kibana.go | 8 ++++ plugins/inputs/logstash/logstash.go | 7 ++++ plugins/inputs/mailchimp/chimp_api.go | 7 ++++ .../nginx_upstream_check.go | 17 ++++++-- .../inputs/nsq_consumer/nsq_consumer_test.go | 2 - plugins/inputs/salesforce/salesforce.go | 6 +++ 11 files changed, 97 insertions(+), 16 deletions(-) diff --git a/internal/models/running_output.go b/internal/models/running_output.go index 752cf34ef..c48bccd3c 100644 --- a/internal/models/running_output.go +++ b/internal/models/running_output.go @@ -210,6 +210,7 @@ func (ro *RunningOutput) WriteBatch() error { return nil } +// Close closes the output func (r *RunningOutput) Close() { err := r.Output.Close() if err != nil { diff --git a/plugins/inputs/ecs/client.go b/plugins/inputs/ecs/client.go index d1d92f097..93074ad79 100644 --- a/plugins/inputs/ecs/client.go +++ b/plugins/inputs/ecs/client.go @@ -1,6 +1,9 @@ package ecs import ( + "fmt" + "io" + "io/ioutil" "net/http" "net/url" "time" @@ -50,10 +53,16 @@ func (c *EcsClient) Task() (*Task, error) { req, _ := http.NewRequest("GET", c.taskURL, nil) resp, err := c.client.Do(req) - if err != nil { return nil, err } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + // ignore the err here; LimitReader returns io.EOF and we're not interested in read errors. + body, _ := ioutil.ReadAll(io.LimitReader(resp.Body, 200)) + return nil, fmt.Errorf("%s returned HTTP status %s: %q", c.taskURL, resp.Status, body) + } task, err := unmarshalTask(resp.Body) if err != nil { @@ -71,11 +80,18 @@ func (c *EcsClient) ContainerStats() (map[string]types.StatsJSON, error) { req, _ := http.NewRequest("GET", c.statsURL, nil) resp, err := c.client.Do(req) - if err != nil { return map[string]types.StatsJSON{}, err } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + // ignore the err here; LimitReader returns io.EOF and we're not interested in read errors. + body, _ := ioutil.ReadAll(io.LimitReader(resp.Body, 200)) + return nil, fmt.Errorf("%s returned HTTP status %s: %q", c.statsURL, resp.Status, body) + } + statsMap, err := unmarshalStats(resp.Body) if err != nil { return map[string]types.StatsJSON{}, err diff --git a/plugins/inputs/ecs/client_test.go b/plugins/inputs/ecs/client_test.go index d6fbd1165..6532e5d51 100644 --- a/plugins/inputs/ecs/client_test.go +++ b/plugins/inputs/ecs/client_test.go @@ -107,7 +107,8 @@ func TestEcsClient_Task(t *testing.T) { client: mockDo{ do: func(req *http.Request) (*http.Response, error) { return &http.Response{ - Body: ioutil.NopCloser(rc), + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(rc), }, nil }, }, @@ -123,11 +124,24 @@ func TestEcsClient_Task(t *testing.T) { wantErr: true, }, { - name: "malformed resp", + name: "malformed 500 resp", client: mockDo{ do: func(req *http.Request) (*http.Response, error) { return &http.Response{ - Body: ioutil.NopCloser(bytes.NewReader([]byte("foo"))), + StatusCode: http.StatusInternalServerError, + Body: ioutil.NopCloser(bytes.NewReader([]byte("foo"))), + }, nil + }, + }, + wantErr: true, + }, + { + name: "malformed 200 resp", + client: mockDo{ + do: func(req *http.Request) (*http.Response, error) { + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("foo"))), }, nil }, }, @@ -164,7 +178,8 @@ func TestEcsClient_ContainerStats(t *testing.T) { client: mockDo{ do: func(req *http.Request) (*http.Response, error) { return &http.Response{ - Body: ioutil.NopCloser(rc), + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(rc), }, nil }, }, @@ -181,17 +196,31 @@ func TestEcsClient_ContainerStats(t *testing.T) { wantErr: true, }, { - name: "malformed resp", + name: "malformed 200 resp", client: mockDo{ do: func(req *http.Request) (*http.Response, error) { return &http.Response{ - Body: ioutil.NopCloser(bytes.NewReader([]byte("foo"))), + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("foo"))), }, nil }, }, want: map[string]types.StatsJSON{}, wantErr: true, }, + { + name: "malformed 500 resp", + client: mockDo{ + do: func(req *http.Request) (*http.Response, error) { + return &http.Response{ + StatusCode: http.StatusInternalServerError, + Body: ioutil.NopCloser(bytes.NewReader([]byte("foo"))), + }, nil + }, + }, + want: nil, + wantErr: true, + }, } for _, tt := range tests { diff --git a/plugins/inputs/fibaro/fibaro.go b/plugins/inputs/fibaro/fibaro.go index 187b74a50..492feaf03 100644 --- a/plugins/inputs/fibaro/fibaro.go +++ b/plugins/inputs/fibaro/fibaro.go @@ -97,6 +97,7 @@ func (f *Fibaro) getJSON(path string, dataStruct interface{}) error { if err != nil { return err } + defer resp.Body.Close() if resp.StatusCode != http.StatusOK { err = fmt.Errorf("Response from url \"%s\" has status code %d (%s), expected %d (%s)", @@ -108,8 +109,6 @@ func (f *Fibaro) getJSON(path string, dataStruct interface{}) error { return err } - defer resp.Body.Close() - dec := json.NewDecoder(resp.Body) err = dec.Decode(&dataStruct) if err != nil { diff --git a/plugins/inputs/haproxy/haproxy.go b/plugins/inputs/haproxy/haproxy.go index 9c22acad9..7179540d7 100644 --- a/plugins/inputs/haproxy/haproxy.go +++ b/plugins/inputs/haproxy/haproxy.go @@ -181,6 +181,7 @@ func (g *haproxy) gatherServer(addr string, acc telegraf.Accumulator) error { if err != nil { return fmt.Errorf("Unable to connect to haproxy server '%s': %s", addr, err) } + defer res.Body.Close() if res.StatusCode != 200 { return fmt.Errorf("Unable to get valid stat result from '%s', http response code : %d", addr, res.StatusCode) diff --git a/plugins/inputs/kibana/kibana.go b/plugins/inputs/kibana/kibana.go index 4b7e3c5c5..858922451 100644 --- a/plugins/inputs/kibana/kibana.go +++ b/plugins/inputs/kibana/kibana.go @@ -3,6 +3,8 @@ package kibana import ( "encoding/json" "fmt" + "io" + "io/ioutil" "net/http" "strconv" "strings" @@ -250,6 +252,12 @@ func (k *Kibana) gatherJsonData(url string, v interface{}) (host string, err err defer response.Body.Close() + if response.StatusCode != http.StatusOK { + // ignore the err here; LimitReader returns io.EOF and we're not interested in read errors. + body, _ := ioutil.ReadAll(io.LimitReader(response.Body, 200)) + return request.Host, fmt.Errorf("%s returned HTTP status %s: %q", url, response.Status, body) + } + if err = json.NewDecoder(response.Body).Decode(v); err != nil { return request.Host, err } diff --git a/plugins/inputs/logstash/logstash.go b/plugins/inputs/logstash/logstash.go index b97600700..1abcfa3a3 100644 --- a/plugins/inputs/logstash/logstash.go +++ b/plugins/inputs/logstash/logstash.go @@ -3,6 +3,8 @@ package logstash import ( "encoding/json" "fmt" + "io" + "io/ioutil" "net/http" "net/url" "strings" @@ -197,6 +199,11 @@ func (logstash *Logstash) gatherJsonData(url string, value interface{}) error { } defer response.Body.Close() + if response.StatusCode != http.StatusOK { + // ignore the err here; LimitReader returns io.EOF and we're not interested in read errors. + body, _ := ioutil.ReadAll(io.LimitReader(response.Body, 200)) + return fmt.Errorf("%s returned HTTP status %s: %q", url, response.Status, body) + } err = json.NewDecoder(response.Body).Decode(value) if err != nil { diff --git a/plugins/inputs/mailchimp/chimp_api.go b/plugins/inputs/mailchimp/chimp_api.go index 6e4ec2d4f..066ffb4e7 100644 --- a/plugins/inputs/mailchimp/chimp_api.go +++ b/plugins/inputs/mailchimp/chimp_api.go @@ -4,6 +4,7 @@ import ( "bytes" "encoding/json" "fmt" + "io" "io/ioutil" "log" "net/http" @@ -143,6 +144,12 @@ func runChimp(api *ChimpAPI, params ReportsParams) ([]byte, error) { } defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + // ignore the err here; LimitReader returns io.EOF and we're not interested in read errors. + body, _ := ioutil.ReadAll(io.LimitReader(resp.Body, 200)) + return nil, fmt.Errorf("%s returned HTTP status %s: %q", api.url.String(), resp.Status, body) + } + body, err := ioutil.ReadAll(resp.Body) if err != nil { return nil, err diff --git a/plugins/inputs/nginx_upstream_check/nginx_upstream_check.go b/plugins/inputs/nginx_upstream_check/nginx_upstream_check.go index 1293f946e..8e662849f 100644 --- a/plugins/inputs/nginx_upstream_check/nginx_upstream_check.go +++ b/plugins/inputs/nginx_upstream_check/nginx_upstream_check.go @@ -2,14 +2,18 @@ package nginx_upstream_check import ( "encoding/json" - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" - "github.com/influxdata/telegraf/internal/tls" - "github.com/influxdata/telegraf/plugins/inputs" + "fmt" + "io" + "io/ioutil" "net/http" "net/url" "strconv" "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/internal/tls" + "github.com/influxdata/telegraf/plugins/inputs" ) const sampleConfig = ` @@ -148,6 +152,11 @@ func (check *NginxUpstreamCheck) gatherJsonData(url string, value interface{}) e } defer response.Body.Close() + if response.StatusCode != http.StatusOK { + // ignore the err here; LimitReader returns io.EOF and we're not interested in read errors. + body, _ := ioutil.ReadAll(io.LimitReader(response.Body, 200)) + return fmt.Errorf("%s returned HTTP status %s: %q", url, response.Status, body) + } err = json.NewDecoder(response.Body).Decode(value) if err != nil { diff --git a/plugins/inputs/nsq_consumer/nsq_consumer_test.go b/plugins/inputs/nsq_consumer/nsq_consumer_test.go index 1e8264d06..e07b125cc 100644 --- a/plugins/inputs/nsq_consumer/nsq_consumer_test.go +++ b/plugins/inputs/nsq_consumer/nsq_consumer_test.go @@ -51,8 +51,6 @@ func TestReadsMetricsFromNSQ(t *testing.T) { assert.Equal(t, 0, len(acc.Metrics), "There should not be any points") if err := consumer.Start(&acc); err != nil { t.Fatal(err.Error()) - } else { - defer consumer.Stop() } waitForPoint(&acc, t) diff --git a/plugins/inputs/salesforce/salesforce.go b/plugins/inputs/salesforce/salesforce.go index 096550db5..ad40ec566 100644 --- a/plugins/inputs/salesforce/salesforce.go +++ b/plugins/inputs/salesforce/salesforce.go @@ -5,6 +5,7 @@ import ( "encoding/xml" "errors" "fmt" + "io" "io/ioutil" "net/http" "net/url" @@ -200,6 +201,11 @@ func (s *Salesforce) login() error { return err } defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + // ignore the err here; LimitReader returns io.EOF and we're not interested in read errors. + body, _ := ioutil.ReadAll(io.LimitReader(resp.Body, 200)) + return fmt.Errorf("%s returned HTTP status %s: %q", loginEndpoint, resp.Status, body) + } respBody, err := ioutil.ReadAll(resp.Body) if err != nil { From cd2a77a95b0fbe23f5bf1d3be9c525fc0ba74cbc Mon Sep 17 00:00:00 2001 From: James Buchan Date: Thu, 20 Feb 2020 11:31:41 -0700 Subject: [PATCH 1542/1815] Remove duplicate loop in net_response plugin (#7055) --- plugins/inputs/net_response/net_response.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/plugins/inputs/net_response/net_response.go b/plugins/inputs/net_response/net_response.go index e411aa647..3f75a6058 100644 --- a/plugins/inputs/net_response/net_response.go +++ b/plugins/inputs/net_response/net_response.go @@ -223,9 +223,6 @@ func (n *NetResponse) Gather(acc telegraf.Accumulator) error { } else { return errors.New("Bad protocol") } - for key, value := range returnTags { - tags[key] = value - } // Merge the tags for k, v := range returnTags { tags[k] = v From 2e48a3b45a4c02f1baade4a5971a3d19f076db93 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 20 Feb 2020 10:45:59 -0800 Subject: [PATCH 1543/1815] Update github.com/safchain/ethtool (#7052) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index b9495c27a..bea4cff74 100644 --- a/go.mod +++ b/go.mod @@ -100,7 +100,7 @@ require ( github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829 github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f github.com/prometheus/common v0.2.0 - github.com/safchain/ethtool v0.0.0-20200128171343-ef7e7c9c2763 + github.com/safchain/ethtool v0.0.0-20200218184317-f459e2d13664 github.com/samuel/go-zookeeper v0.0.0-20180130194729-c4fab1ac1bec // indirect github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b // indirect github.com/shirou/gopsutil v2.20.1+incompatible diff --git a/go.sum b/go.sum index a807fe68e..edb53ea89 100644 --- a/go.sum +++ b/go.sum @@ -379,8 +379,8 @@ github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1 h1:/K3IL0Z1quvmJ github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a h1:9ZKAASQSHhDYGoxY8uLVpewe1GDZ2vu2Tr/vTdVAkFQ= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/safchain/ethtool v0.0.0-20200128171343-ef7e7c9c2763 h1:rzR7qhaYDEzb9ba9+hNyRWCYuMFGPmzFDoo1QPM9KC0= -github.com/safchain/ethtool v0.0.0-20200128171343-ef7e7c9c2763/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= +github.com/safchain/ethtool v0.0.0-20200218184317-f459e2d13664 h1:gvolwzuDhul9qK6/oHqxCHD5TEYfsWNBGidOeG6kvpk= +github.com/safchain/ethtool v0.0.0-20200218184317-f459e2d13664/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= github.com/samuel/go-zookeeper v0.0.0-20180130194729-c4fab1ac1bec h1:6ncX5ko6B9LntYM0YBRXkiSaZMmLYeZ/NWcmeB43mMY= github.com/samuel/go-zookeeper v0.0.0-20180130194729-c4fab1ac1bec/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b h1:gQZ0qzfKHQIybLANtM3mBXNUtOfsCFXeTsnBqCsx1KM= From cf0667264af3847e010ea3086f9480b0a5c93822 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 20 Feb 2020 10:47:03 -0800 Subject: [PATCH 1544/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index fed2f7a0e..4a213f8a9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -61,6 +61,7 @@ - [#6988](https://github.com/influxdata/telegraf/issues/6988): Parse NaN values from summary types in prometheus input. - [#6820](https://github.com/influxdata/telegraf/issues/6820): Fix pgbouncer input when used with newer pgbouncer versions. +- [#6913](https://github.com/influxdata/telegraf/issues/6913): Support up to 8192 stats in the ethtool input. ## v1.13.3 [2020-02-04] From 82a358b9108abcd0ca0972dcd06c64b3f13af13f Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Thu, 20 Feb 2020 17:23:13 -0500 Subject: [PATCH 1545/1815] testing circle ci for mac (#7054) --- .circleci/config.yml | 53 ++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 51 insertions(+), 2 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 874a28bb4..6513c812e 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -12,7 +12,14 @@ defaults: go-1_13: &go-1_13 docker: - image: 'quay.io/influxdb/telegraf-ci:1.13.5' - + mac: &mac + macos: + xcode: 11.3.1 + working_directory: '~/go/src/github.com/influxdata/telegraf' + environment: + HOMEBREW_NO_AUTO_UPDATE: 1 + GOFLAGS: -p=8 + version: 2 jobs: deps: @@ -32,6 +39,30 @@ jobs: root: '/go' paths: - '*' + macdeps: + <<: [ *mac ] + steps: + - checkout + - restore_cache: + key: mac-go-mod-v1-{{ checksum "go.sum" }} + - run: 'brew install go@1.13' + - run: 'make deps' + - run: 'make tidy' + - save_cache: + name: 'go module cache' + key: mac-go-mod-v1-{{ checksum "go.sum" }} + paths: + - '~/go/pkg/mod' + - '/usr/local/Cellar/go' + - '/usr/local/bin/go' + - '/usr/local/bin/gofmt' + - persist_to_workspace: + root: '/' + paths: + - 'usr/local/bin/go' + - 'usr/local/Cellar/go' + - 'usr/local/bin/gofmt' + - 'Users/distiller/go' test-go-1.12: <<: [ *defaults, *go-1_12 ] @@ -65,7 +96,15 @@ jobs: - run: 'GOARCH=386 make' - run: 'GOARCH=386 make check' - run: 'GOARCH=386 make test' - + test-go-1.13-darwin: + <<: [ *mac ] + steps: + - attach_workspace: + at: '/' + - run: 'make' + - run: 'make check' + - run: 'make test' + package: <<: [ *defaults, *go-1_13 ] steps: @@ -98,6 +137,10 @@ workflows: version: 2 check: jobs: + - 'macdeps': + filters: + tags: + only: /.*/ - 'deps': filters: tags: @@ -126,6 +169,12 @@ workflows: filters: tags: only: /.*/ + - 'test-go-1.13-darwin': + requires: + - 'macdeps' + filters: + tags: # only runs on tags if you specify this filter + only: /.*/ - 'package': requires: - 'test-go-1.12' From 79ff743064c5d61ecad86661f0bfc8073e628a6c Mon Sep 17 00:00:00 2001 From: "R.I.Pienaar" Date: Thu, 20 Feb 2020 22:30:04 +0000 Subject: [PATCH 1546/1815] Add support for credentials file to nats_consumer and nats output (#7022) --- docs/LICENSE_OF_DEPENDENCIES.md | 6 +- go.mod | 5 +- go.sum | 20 +++++-- plugins/inputs/nats/nats.go | 2 +- plugins/inputs/nats_consumer/README.md | 3 + plugins/inputs/nats_consumer/nats_consumer.go | 56 ++++++++++--------- plugins/outputs/nats/README.md | 4 ++ plugins/outputs/nats/nats.go | 41 +++++++------- 8 files changed, 78 insertions(+), 59 deletions(-) diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index 71636a0b8..f22e3c7e9 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -83,8 +83,10 @@ following works: - github.com/mitchellh/mapstructure [MIT License](https://github.com/mitchellh/mapstructure/blob/master/LICENSE) - github.com/multiplay/go-ts3 [BSD 2-Clause "Simplified" License](https://github.com/multiplay/go-ts3/blob/master/LICENSE) - github.com/naoina/go-stringutil [MIT License](https://github.com/naoina/go-stringutil/blob/master/LICENSE) -- github.com/nats-io/gnatsd [Apache License 2.0](https://github.com/nats-io/gnatsd/blob/master/LICENSE) -- github.com/nats-io/go-nats [Apache License 2.0](https://github.com/nats-io/go-nats/blob/master/LICENSE) +- github.com/nats-io/nats-server [Apache License 2.0](https://github.com/nats-io/nats-server/blob/master/LICENSE) +- github.com/nats-io/nats.go [Apache License 2.0](https://github.com/nats-io/nats.go/blob/master/LICENSE) +- github.com/nats-io/jwt [Apache License 2.0](https://github.com/nats-io/jwt/blob/master/LICENSE) +- github.com/nats-io/nkeys [Apache License 2.0](https://github.com/nats-io/nkeys/blob/master/LICENSE) - github.com/nats-io/nuid [Apache License 2.0](https://github.com/nats-io/nuid/blob/master/LICENSE) - github.com/nsqio/go-nsq [MIT License](https://github.com/nsqio/go-nsq/blob/master/LICENSE) - github.com/openconfig/gnmi [Apache License 2.0](https://github.com/openconfig/gnmi/blob/master/LICENSE) diff --git a/go.mod b/go.mod index bea4cff74..84b24db20 100644 --- a/go.mod +++ b/go.mod @@ -86,9 +86,8 @@ require ( github.com/mitchellh/mapstructure v0.0.0-20180715050151-f15292f7a699 // indirect github.com/multiplay/go-ts3 v1.0.0 github.com/naoina/go-stringutil v0.1.0 // indirect - github.com/nats-io/gnatsd v1.2.0 - github.com/nats-io/go-nats v1.5.0 - github.com/nats-io/nuid v1.0.0 // indirect + github.com/nats-io/nats-server/v2 v2.1.4 + github.com/nats-io/nats.go v1.9.1 github.com/nsqio/go-nsq v1.0.7 github.com/openconfig/gnmi v0.0.0-20180912164834-33a1865c3029 github.com/opencontainers/go-digest v1.0.0-rc1 // indirect diff --git a/go.sum b/go.sum index edb53ea89..f940ab12a 100644 --- a/go.sum +++ b/go.sum @@ -318,12 +318,18 @@ github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRW github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/naoina/go-stringutil v0.1.0 h1:rCUeRUHjBjGTSHl0VC00jUPLz8/F9dDzYI70Hzifhks= github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0= -github.com/nats-io/gnatsd v1.2.0 h1:WKLzmB8LyP4CiVJuAoZMxdYBurENVX4piS358tjcBhw= -github.com/nats-io/gnatsd v1.2.0/go.mod h1:nqco77VO78hLCJpIcVfygDP2rPGfsEHkGTUk94uh5DQ= -github.com/nats-io/go-nats v1.5.0 h1:OrEQSvQQrP+A+9EBBxY86Z4Es6uaUdObZ5UhWHn9b08= -github.com/nats-io/go-nats v1.5.0/go.mod h1:+t7RHT5ApZebkrQdnn6AhQJmhJJiKAvJUio1PiiCtj0= -github.com/nats-io/nuid v1.0.0 h1:44QGdhbiANq8ZCbUkdn6W5bqtg+mHuDE4wOUuxxndFs= -github.com/nats-io/nuid v1.0.0/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= +github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= +github.com/nats-io/jwt v0.3.2 h1:+RB5hMpXUUA2dfxuhBTEkMOrYmM+gKIZYS1KjSostMI= +github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= +github.com/nats-io/nats-server/v2 v2.1.4 h1:BILRnsJ2Yb/fefiFbBWADpViGF69uh4sxe8poVDQ06g= +github.com/nats-io/nats-server/v2 v2.1.4/go.mod h1:Jw1Z28soD/QasIA2uWjXyM9El1jly3YwyFOuR8tH1rg= +github.com/nats-io/nats.go v1.9.1 h1:ik3HbLhZ0YABLto7iX80pZLPw/6dx3T+++MZJwLnMrQ= +github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= +github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= +github.com/nats-io/nkeys v0.1.3 h1:6JrEfig+HzTH85yxzhSVbjHRJv9cn0p6n3IngIcM5/k= +github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= +github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw= +github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/nsqio/go-nsq v1.0.7 h1:O0pIZJYTf+x7cZBA0UMY8WxFG79lYTURmWzAAh48ljY= github.com/nsqio/go-nsq v1.0.7/go.mod h1:XP5zaUs3pqf+Q71EqUJs3HYfBIqfK6G83WQMdNN+Ito= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= @@ -444,6 +450,7 @@ golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190404164418-38d8ce5564a5/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 h1:ULYEB3JvPRE/IfO+9uO7vKV/xzVTO7XPAwm8xbf4w2g= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -493,6 +500,7 @@ golang.org/x/sys v0.0.0-20190204203706-41f3e6584952/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456 h1:ng0gs1AKnRRuEMZoTLLlbOd+C17zUDepwGQBb/n+JVg= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4 h1:sfkvUWPNGwSV+8/fNqctR5lS2AqCSqYwXdrjCxp/dXo= diff --git a/plugins/inputs/nats/nats.go b/plugins/inputs/nats/nats.go index 83e262ec8..1afb0046d 100644 --- a/plugins/inputs/nats/nats.go +++ b/plugins/inputs/nats/nats.go @@ -13,7 +13,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" - gnatsd "github.com/nats-io/gnatsd/server" + gnatsd "github.com/nats-io/nats-server/v2/server" ) type Nats struct { diff --git a/plugins/inputs/nats_consumer/README.md b/plugins/inputs/nats_consumer/README.md index 7c1abab0b..ae40d9185 100644 --- a/plugins/inputs/nats_consumer/README.md +++ b/plugins/inputs/nats_consumer/README.md @@ -23,6 +23,9 @@ instances of telegraf can read from a NATS cluster in parallel. # username = "" # password = "" + ## Optional NATS 2.0 and NATS NGS compatible user credentials + # credentials = "/etc/telegraf/nats.creds" + ## Use Transport Layer Security # secure = false diff --git a/plugins/inputs/nats_consumer/nats_consumer.go b/plugins/inputs/nats_consumer/nats_consumer.go index eff726964..6ac19b0a8 100644 --- a/plugins/inputs/nats_consumer/nats_consumer.go +++ b/plugins/inputs/nats_consumer/nats_consumer.go @@ -3,13 +3,14 @@ package natsconsumer import ( "context" "fmt" + "strings" "sync" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal/tls" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/parsers" - nats "github.com/nats-io/go-nats" + "github.com/nats-io/nats.go" ) var ( @@ -31,12 +32,14 @@ func (e natsError) Error() string { } type natsConsumer struct { - QueueGroup string `toml:"queue_group"` - Subjects []string `toml:"subjects"` - Servers []string `toml:"servers"` - Secure bool `toml:"secure"` - Username string `toml:"username"` - Password string `toml:"password"` + QueueGroup string `toml:"queue_group"` + Subjects []string `toml:"subjects"` + Servers []string `toml:"servers"` + Secure bool `toml:"secure"` + Username string `toml:"username"` + Password string `toml:"password"` + Credentials string `toml:"credentials"` + tls.ClientConfig Log telegraf.Logger @@ -77,6 +80,9 @@ var sampleConfig = ` # username = "" # password = "" + ## Optional NATS 2.0 and NATS NGS compatible user credentials + # credentials = "/etc/telegraf/nats.creds" + ## Use Transport Layer Security # secure = false @@ -135,19 +141,18 @@ func (n *natsConsumer) Start(acc telegraf.Accumulator) error { var connectErr error - // set default NATS connection options - opts := nats.DefaultOptions - - // override max reconnection tries - opts.MaxReconnect = -1 - - // override servers if any were specified - opts.Servers = n.Servers + options := []nats.Option{ + nats.MaxReconnects(-1), + nats.ErrorHandler(n.natsErrHandler), + } // override authentication, if any was specified - if n.Username != "" { - opts.User = n.Username - opts.Password = n.Password + if n.Username != "" && n.Password != "" { + options = append(options, nats.UserInfo(n.Username, n.Password)) + } + + if n.Credentials != "" { + options = append(options, nats.UserCredentials(n.Credentials)) } if n.Secure { @@ -156,19 +161,17 @@ func (n *natsConsumer) Start(acc telegraf.Accumulator) error { return err } - opts.Secure = true - opts.TLSConfig = tlsConfig + options = append(options, nats.Secure(tlsConfig)) } if n.conn == nil || n.conn.IsClosed() { - n.conn, connectErr = opts.Connect() + n.conn, connectErr = nats.Connect(strings.Join(n.Servers, ","), options...) if connectErr != nil { return connectErr } // Setup message and error channels n.errs = make(chan error) - n.conn.SetErrorHandler(n.natsErrHandler) n.in = make(chan *nats.Msg, 1000) for _, subj := range n.Subjects { @@ -178,14 +181,13 @@ func (n *natsConsumer) Start(acc telegraf.Accumulator) error { if err != nil { return err } - // ensure that the subscription has been processed by the server - if err = n.conn.Flush(); err != nil { - return err - } + // set the subscription pending limits - if err = sub.SetPendingLimits(n.PendingMessageLimit, n.PendingBytesLimit); err != nil { + err = sub.SetPendingLimits(n.PendingMessageLimit, n.PendingBytesLimit) + if err != nil { return err } + n.subs = append(n.subs, sub) } } diff --git a/plugins/outputs/nats/README.md b/plugins/outputs/nats/README.md index f6dc04f53..c5539900b 100644 --- a/plugins/outputs/nats/README.md +++ b/plugins/outputs/nats/README.md @@ -9,6 +9,10 @@ This plugin writes to a (list of) specified NATS instance(s). ## Optional credentials # username = "" # password = "" + + ## Optional NATS 2.0 and NATS NGS compatible user credentials + # credentials = "/etc/telegraf/nats.creds" + ## NATS subject for producer messages subject = "telegraf" diff --git a/plugins/outputs/nats/nats.go b/plugins/outputs/nats/nats.go index e4817d6c9..620ac8b44 100644 --- a/plugins/outputs/nats/nats.go +++ b/plugins/outputs/nats/nats.go @@ -3,32 +3,40 @@ package nats import ( "fmt" "log" + "strings" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal/tls" "github.com/influxdata/telegraf/plugins/outputs" "github.com/influxdata/telegraf/plugins/serializers" - nats_client "github.com/nats-io/go-nats" + "github.com/nats-io/nats.go" ) type NATS struct { - Servers []string `toml:"servers"` - Secure bool `toml:"secure"` - Username string `toml:"username"` - Password string `toml:"password"` - Subject string `toml:"subject"` + Servers []string `toml:"servers"` + Secure bool `toml:"secure"` + Username string `toml:"username"` + Password string `toml:"password"` + Credentials string `toml:"credentials"` + Subject string `toml:"subject"` + tls.ClientConfig - conn *nats_client.Conn + conn *nats.Conn serializer serializers.Serializer } var sampleConfig = ` ## URLs of NATS servers servers = ["nats://localhost:4222"] + ## Optional credentials # username = "" # password = "" + + ## Optional NATS 2.0 and NATS NGS compatible user credentials + # credentials = "/etc/telegraf/nats.creds" + ## NATS subject for producer messages subject = "telegraf" @@ -56,19 +64,13 @@ func (n *NATS) SetSerializer(serializer serializers.Serializer) { func (n *NATS) Connect() error { var err error - // set default NATS connection options - opts := nats_client.DefaultOptions - - // override max reconnection tries - opts.MaxReconnect = -1 - - // override servers, if any were specified - opts.Servers = n.Servers + opts := []nats.Option{ + nats.MaxReconnects(-1), + } // override authentication, if any was specified if n.Username != "" { - opts.User = n.Username - opts.Password = n.Password + opts = append(opts, nats.UserInfo(n.Username, n.Password)) } if n.Secure { @@ -77,12 +79,11 @@ func (n *NATS) Connect() error { return err } - opts.Secure = true - opts.TLSConfig = tlsConfig + opts = append(opts, nats.Secure(tlsConfig)) } // try and connect - n.conn, err = opts.Connect() + n.conn, err = nats.Connect(strings.Join(n.Servers, ","), opts...) return err } From 3a9062394a2e1383975040c8645a9cab5a3f2d46 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 20 Feb 2020 14:31:25 -0800 Subject: [PATCH 1547/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4a213f8a9..20de55dbe 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -48,6 +48,7 @@ - [#6957](https://github.com/influxdata/telegraf/pull/6957): Add new fields for Jenkins total and busy executors. - [#7035](https://github.com/influxdata/telegraf/pull/7035): Fix dash to underscore replacement when handling embedded tags in Cisco MDT. - [#7039](https://github.com/influxdata/telegraf/pull/7039): Add process created_at time to procstat input. +- [#7022](https://github.com/influxdata/telegraf/pull/7022): Add support for credentials file to nats_consumer and nats output. #### Bugfixes From 413ca898be0d225bf3e6db634ef9aabd2d066396 Mon Sep 17 00:00:00 2001 From: Giovanni Luisotto Date: Fri, 21 Feb 2020 20:11:50 +0100 Subject: [PATCH 1548/1815] Fix perf counters collection on named instances in sqlserver input (#7060) --- plugins/inputs/sqlserver/sqlserver.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/plugins/inputs/sqlserver/sqlserver.go b/plugins/inputs/sqlserver/sqlserver.go index 511bb5b49..d09a9743d 100644 --- a/plugins/inputs/sqlserver/sqlserver.go +++ b/plugins/inputs/sqlserver/sqlserver.go @@ -632,7 +632,7 @@ WHERE ( object_name LIKE '%User Settable%' OR object_name LIKE '%SQL Errors%' ) OR ( - object_name LIKE 'SQLServer:Batch Resp Statistics%' + object_name LIKE '%Batch Resp Statistics%' ) OR ( instance_name IN ('_Total') AND counter_name IN ( @@ -2302,7 +2302,7 @@ SELECT DISTINCT RTrim(spi.object_name) object_name , spi.cntr_value , spi.cntr_type FROM sys.dm_os_performance_counters spi -WHERE spi.object_name NOT LIKE 'SQLServer:Backup Device%' +WHERE spi.object_name NOT LIKE '%Backup Device%' AND NOT EXISTS (SELECT 1 FROM sys.databases WHERE Name = spi.instance_name); WAITFOR DELAY '00:00:01'; @@ -2324,7 +2324,7 @@ SELECT DISTINCT RTrim(spi.object_name) object_name , spi.cntr_value , spi.cntr_type FROM sys.dm_os_performance_counters spi -WHERE spi.object_name NOT LIKE 'SQLServer:Backup Device%' +WHERE spi.object_name NOT LIKE '%Backup Device%' AND NOT EXISTS (SELECT 1 FROM sys.databases WHERE Name = spi.instance_name); SELECT @@ -2360,7 +2360,7 @@ INNER JOIN #PCounters pc On cc.object_name = pc.object_name And cc.cntr_type = pc.cntr_type LEFT JOIN #CCounters cbc On cc.object_name = cbc.object_name And (Case When cc.counter_name Like '%(ms)' Then Replace(cc.counter_name, ' (ms)',' Base') - When cc.object_name = 'SQLServer:FileTable' Then Replace(cc.counter_name, 'Avg ','') + ' base' + When cc.object_name like '%FileTable' Then Replace(cc.counter_name, 'Avg ','') + ' base' When cc.counter_name = 'Worktables From Cache Ratio' Then 'Worktables From Cache Base' When cc.counter_name = 'Avg. Length of Batched Writes' Then 'Avg. Length of Batched Writes BS' Else cc.counter_name + ' base' @@ -2371,7 +2371,7 @@ LEFT JOIN #CCounters cbc On cc.object_name = cbc.object_name LEFT JOIN #PCounters pbc On pc.object_name = pbc.object_name And pc.instance_name = pbc.instance_name And (Case When pc.counter_name Like '%(ms)' Then Replace(pc.counter_name, ' (ms)',' Base') - When pc.object_name = 'SQLServer:FileTable' Then Replace(pc.counter_name, 'Avg ','') + ' base' + When pc.object_name like '%FileTable' Then Replace(pc.counter_name, 'Avg ','') + ' base' When pc.counter_name = 'Worktables From Cache Ratio' Then 'Worktables From Cache Base' When pc.counter_name = 'Avg. Length of Batched Writes' Then 'Avg. Length of Batched Writes BS' Else pc.counter_name + ' base' From e9e4f2c3545709654a7d6985ed88195ffb3f802a Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 21 Feb 2020 11:13:03 -0800 Subject: [PATCH 1549/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 20de55dbe..7d1105475 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -63,6 +63,7 @@ - [#6988](https://github.com/influxdata/telegraf/issues/6988): Parse NaN values from summary types in prometheus input. - [#6820](https://github.com/influxdata/telegraf/issues/6820): Fix pgbouncer input when used with newer pgbouncer versions. - [#6913](https://github.com/influxdata/telegraf/issues/6913): Support up to 8192 stats in the ethtool input. +- [#7060](https://github.com/influxdata/telegraf/issues/7060): Fix perf counters collection on named instances in sqlserver input. ## v1.13.3 [2020-02-04] From b5e0577d6b05cef1766e550821f0efbad614dea4 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 24 Feb 2020 15:53:16 -0800 Subject: [PATCH 1550/1815] Use add time for prometheus expiration calculation (#7056) --- .../outputs/prometheus_client/v2/collector.go | 2 +- plugins/serializers/prometheus/collection.go | 20 +- .../serializers/prometheus/collection_test.go | 420 +++++++++++------- plugins/serializers/prometheus/prometheus.go | 3 +- 4 files changed, 266 insertions(+), 179 deletions(-) diff --git a/plugins/outputs/prometheus_client/v2/collector.go b/plugins/outputs/prometheus_client/v2/collector.go index 9ffc6516a..4f8efd839 100644 --- a/plugins/outputs/prometheus_client/v2/collector.go +++ b/plugins/outputs/prometheus_client/v2/collector.go @@ -83,7 +83,7 @@ func (c *Collector) Add(metrics []telegraf.Metric) error { defer c.Unlock() for _, metric := range metrics { - c.coll.Add(metric) + c.coll.Add(metric, time.Now()) } // Expire metrics, doing this on Add ensure metrics are removed even if no diff --git a/plugins/serializers/prometheus/collection.go b/plugins/serializers/prometheus/collection.go index 5c385caad..10e85de07 100644 --- a/plugins/serializers/prometheus/collection.go +++ b/plugins/serializers/prometheus/collection.go @@ -14,6 +14,8 @@ import ( const helpString = "Telegraf collected metric" +type TimeFunc func() time.Time + type MetricFamily struct { Name string Type telegraf.ValueType @@ -22,6 +24,7 @@ type MetricFamily struct { type Metric struct { Labels []LabelPair Time time.Time + AddTime time.Time Scaler *Scaler Histogram *Histogram Summary *Summary @@ -97,14 +100,14 @@ type Entry struct { } type Collection struct { - config FormatConfig Entries map[MetricFamily]Entry + config FormatConfig } func NewCollection(config FormatConfig) *Collection { cache := &Collection{ - config: config, Entries: make(map[MetricFamily]Entry), + config: config, } return cache } @@ -177,7 +180,7 @@ func (c *Collection) createLabels(metric telegraf.Metric) []LabelPair { return labels } -func (c *Collection) Add(metric telegraf.Metric) { +func (c *Collection) Add(metric telegraf.Metric, now time.Time) { labels := c.createLabels(metric) for _, field := range metric.FieldList() { metricName := MetricName(metric.Name(), field.Key, metric.Type()) @@ -225,9 +228,10 @@ func (c *Collection) Add(metric telegraf.Metric) { } m = &Metric{ - Labels: labels, - Time: metric.Time(), - Scaler: &Scaler{Value: value}, + Labels: labels, + Time: metric.Time(), + AddTime: now, + Scaler: &Scaler{Value: value}, } entry.Metrics[metricKey] = m @@ -236,6 +240,7 @@ func (c *Collection) Add(metric telegraf.Metric) { m = &Metric{ Labels: labels, Time: metric.Time(), + AddTime: now, Histogram: &Histogram{}, } } @@ -283,6 +288,7 @@ func (c *Collection) Add(metric telegraf.Metric) { m = &Metric{ Labels: labels, Time: metric.Time(), + AddTime: now, Summary: &Summary{}, } } @@ -331,7 +337,7 @@ func (c *Collection) Expire(now time.Time, age time.Duration) { expireTime := now.Add(-age) for _, entry := range c.Entries { for key, metric := range entry.Metrics { - if metric.Time.Before(expireTime) { + if metric.AddTime.Before(expireTime) { delete(entry.Metrics, key) if len(entry.Metrics) == 0 { delete(c.Entries, entry.Family) diff --git a/plugins/serializers/prometheus/collection_test.go b/plugins/serializers/prometheus/collection_test.go index 70f26dac7..d2c5f5d09 100644 --- a/plugins/serializers/prometheus/collection_test.go +++ b/plugins/serializers/prometheus/collection_test.go @@ -12,27 +12,35 @@ import ( "github.com/stretchr/testify/require" ) +type Input struct { + metric telegraf.Metric + addtime time.Time +} + func TestCollectionExpire(t *testing.T) { tests := []struct { name string now time.Time age time.Duration - metrics []telegraf.Metric + input []Input expected []*dto.MetricFamily }{ { name: "not expired", now: time.Unix(1, 0), age: 10 * time.Second, - metrics: []telegraf.Metric{ - testutil.MustMetric( - "cpu", - map[string]string{}, - map[string]interface{}{ - "time_idle": 42.0, - }, - time.Unix(0, 0), - ), + input: []Input{ + { + metric: testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": 42.0, + }, + time.Unix(0, 0), + ), + addtime: time.Unix(0, 0), + }, }, expected: []*dto.MetricFamily{ { @@ -52,23 +60,29 @@ func TestCollectionExpire(t *testing.T) { name: "update metric expiration", now: time.Unix(20, 0), age: 10 * time.Second, - metrics: []telegraf.Metric{ - testutil.MustMetric( - "cpu", - map[string]string{}, - map[string]interface{}{ - "time_idle": 42.0, - }, - time.Unix(0, 0), - ), - testutil.MustMetric( - "cpu", - map[string]string{}, - map[string]interface{}{ - "time_idle": 43.0, - }, - time.Unix(12, 0), - ), + input: []Input{ + { + metric: testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": 42.0, + }, + time.Unix(0, 0), + ), + addtime: time.Unix(0, 0), + }, + { + metric: testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": 43.0, + }, + time.Unix(12, 0), + ), + addtime: time.Unix(12, 0), + }, }, expected: []*dto.MetricFamily{ { @@ -88,23 +102,28 @@ func TestCollectionExpire(t *testing.T) { name: "update metric expiration descending order", now: time.Unix(20, 0), age: 10 * time.Second, - metrics: []telegraf.Metric{ - testutil.MustMetric( - "cpu", - map[string]string{}, - map[string]interface{}{ - "time_idle": 42.0, - }, - time.Unix(12, 0), - ), - testutil.MustMetric( - "cpu", - map[string]string{}, - map[string]interface{}{ - "time_idle": 43.0, - }, - time.Unix(0, 0), - ), + input: []Input{ + { + metric: testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": 42.0, + }, + time.Unix(12, 0), + ), + addtime: time.Unix(12, 0), + }, { + metric: testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": 43.0, + }, + time.Unix(0, 0), + ), + addtime: time.Unix(0, 0), + }, }, expected: []*dto.MetricFamily{ { @@ -124,15 +143,18 @@ func TestCollectionExpire(t *testing.T) { name: "expired single metric in metric family", now: time.Unix(20, 0), age: 10 * time.Second, - metrics: []telegraf.Metric{ - testutil.MustMetric( - "cpu", - map[string]string{}, - map[string]interface{}{ - "time_idle": 42.0, - }, - time.Unix(0, 0), - ), + input: []Input{ + { + metric: testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": 42.0, + }, + time.Unix(0, 0), + ), + addtime: time.Unix(0, 0), + }, }, expected: []*dto.MetricFamily{}, }, @@ -140,23 +162,28 @@ func TestCollectionExpire(t *testing.T) { name: "expired one metric in metric family", now: time.Unix(20, 0), age: 10 * time.Second, - metrics: []telegraf.Metric{ - testutil.MustMetric( - "cpu", - map[string]string{}, - map[string]interface{}{ - "time_idle": 42.0, - }, - time.Unix(0, 0), - ), - testutil.MustMetric( - "cpu", - map[string]string{}, - map[string]interface{}{ - "time_guest": 42.0, - }, - time.Unix(15, 0), - ), + input: []Input{ + { + metric: testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": 42.0, + }, + time.Unix(0, 0), + ), + addtime: time.Unix(0, 0), + }, { + metric: testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "time_guest": 42.0, + }, + time.Unix(15, 0), + ), + addtime: time.Unix(15, 0), + }, }, expected: []*dto.MetricFamily{ { @@ -176,64 +203,77 @@ func TestCollectionExpire(t *testing.T) { name: "histogram bucket updates", now: time.Unix(0, 0), age: 10 * time.Second, - metrics: []telegraf.Metric{ - testutil.MustMetric( - "prometheus", - map[string]string{}, - map[string]interface{}{ - "http_request_duration_seconds_sum": 10.0, - "http_request_duration_seconds_count": 2, - }, - time.Unix(0, 0), - telegraf.Histogram, - ), - testutil.MustMetric( - "prometheus", - map[string]string{"le": "0.05"}, - map[string]interface{}{ - "http_request_duration_seconds_bucket": 1.0, - }, - time.Unix(0, 0), - telegraf.Histogram, - ), - testutil.MustMetric( - "prometheus", - map[string]string{"le": "+Inf"}, - map[string]interface{}{ - "http_request_duration_seconds_bucket": 1.0, - }, - time.Unix(0, 0), - telegraf.Histogram, - ), - // Next interval - testutil.MustMetric( - "prometheus", - map[string]string{}, - map[string]interface{}{ - "http_request_duration_seconds_sum": 20.0, - "http_request_duration_seconds_count": 4, - }, - time.Unix(0, 0), - telegraf.Histogram, - ), - testutil.MustMetric( - "prometheus", - map[string]string{"le": "0.05"}, - map[string]interface{}{ - "http_request_duration_seconds_bucket": 2.0, - }, - time.Unix(0, 0), - telegraf.Histogram, - ), - testutil.MustMetric( - "prometheus", - map[string]string{"le": "+Inf"}, - map[string]interface{}{ - "http_request_duration_seconds_bucket": 2.0, - }, - time.Unix(0, 0), - telegraf.Histogram, - ), + input: []Input{ + { + metric: testutil.MustMetric( + "prometheus", + map[string]string{}, + map[string]interface{}{ + "http_request_duration_seconds_sum": 10.0, + "http_request_duration_seconds_count": 2, + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + addtime: time.Unix(0, 0), + }, { + metric: testutil.MustMetric( + "prometheus", + map[string]string{"le": "0.05"}, + map[string]interface{}{ + "http_request_duration_seconds_bucket": 1.0, + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + addtime: time.Unix(0, 0), + }, { + metric: testutil.MustMetric( + "prometheus", + map[string]string{"le": "+Inf"}, + map[string]interface{}{ + "http_request_duration_seconds_bucket": 1.0, + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + addtime: time.Unix(0, 0), + }, { + // Next interval + metric: testutil.MustMetric( + "prometheus", + map[string]string{}, + map[string]interface{}{ + "http_request_duration_seconds_sum": 20.0, + "http_request_duration_seconds_count": 4, + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + addtime: time.Unix(0, 0), + }, { + metric: testutil.MustMetric( + "prometheus", + map[string]string{"le": "0.05"}, + map[string]interface{}{ + "http_request_duration_seconds_bucket": 2.0, + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + addtime: time.Unix(0, 0), + }, { + metric: testutil.MustMetric( + "prometheus", + map[string]string{"le": "+Inf"}, + map[string]interface{}{ + "http_request_duration_seconds_bucket": 2.0, + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + addtime: time.Unix(0, 0), + }, }, expected: []*dto.MetricFamily{ { @@ -266,46 +306,55 @@ func TestCollectionExpire(t *testing.T) { name: "summary quantile updates", now: time.Unix(0, 0), age: 10 * time.Second, - metrics: []telegraf.Metric{ - testutil.MustMetric( - "prometheus", - map[string]string{}, - map[string]interface{}{ - "rpc_duration_seconds_sum": 1.0, - "rpc_duration_seconds_count": 1, - }, - time.Unix(0, 0), - telegraf.Summary, - ), - testutil.MustMetric( - "prometheus", - map[string]string{"quantile": "0.01"}, - map[string]interface{}{ - "rpc_duration_seconds": 1.0, - }, - time.Unix(0, 0), - telegraf.Summary, - ), - // Updated Summary - testutil.MustMetric( - "prometheus", - map[string]string{}, - map[string]interface{}{ - "rpc_duration_seconds_sum": 2.0, - "rpc_duration_seconds_count": 2, - }, - time.Unix(0, 0), - telegraf.Summary, - ), - testutil.MustMetric( - "prometheus", - map[string]string{"quantile": "0.01"}, - map[string]interface{}{ - "rpc_duration_seconds": 2.0, - }, - time.Unix(0, 0), - telegraf.Summary, - ), + input: []Input{ + { + metric: testutil.MustMetric( + "prometheus", + map[string]string{}, + map[string]interface{}{ + "rpc_duration_seconds_sum": 1.0, + "rpc_duration_seconds_count": 1, + }, + time.Unix(0, 0), + telegraf.Summary, + ), + addtime: time.Unix(0, 0), + }, { + metric: testutil.MustMetric( + "prometheus", + map[string]string{"quantile": "0.01"}, + map[string]interface{}{ + "rpc_duration_seconds": 1.0, + }, + time.Unix(0, 0), + telegraf.Summary, + ), + addtime: time.Unix(0, 0), + }, { + // Updated Summary + metric: testutil.MustMetric( + "prometheus", + map[string]string{}, + map[string]interface{}{ + "rpc_duration_seconds_sum": 2.0, + "rpc_duration_seconds_count": 2, + }, + time.Unix(0, 0), + telegraf.Summary, + ), + addtime: time.Unix(0, 0), + }, { + metric: testutil.MustMetric( + "prometheus", + map[string]string{"quantile": "0.01"}, + map[string]interface{}{ + "rpc_duration_seconds": 2.0, + }, + time.Unix(0, 0), + telegraf.Summary, + ), + addtime: time.Unix(0, 0), + }, }, expected: []*dto.MetricFamily{ { @@ -330,12 +379,43 @@ func TestCollectionExpire(t *testing.T) { }, }, }, + { + name: "expire based on add time", + now: time.Unix(20, 0), + age: 10 * time.Second, + input: []Input{ + { + metric: testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": 42.0, + }, + time.Unix(0, 0), + ), + addtime: time.Unix(15, 0), + }, + }, + expected: []*dto.MetricFamily{ + { + Name: proto.String("cpu_time_idle"), + Help: proto.String(helpString), + Type: dto.MetricType_UNTYPED.Enum(), + Metric: []*dto.Metric{ + { + Label: []*dto.LabelPair{}, + Untyped: &dto.Untyped{Value: proto.Float64(42.0)}, + }, + }, + }, + }, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { c := NewCollection(FormatConfig{}) - for _, metric := range tt.metrics { - c.Add(metric) + for _, item := range tt.input { + c.Add(item.metric, item.addtime) } c.Expire(tt.now, tt.age) diff --git a/plugins/serializers/prometheus/prometheus.go b/plugins/serializers/prometheus/prometheus.go index 11c305aa4..9e5df5882 100644 --- a/plugins/serializers/prometheus/prometheus.go +++ b/plugins/serializers/prometheus/prometheus.go @@ -2,6 +2,7 @@ package prometheus import ( "bytes" + "time" "github.com/influxdata/telegraf" "github.com/prometheus/common/expfmt" @@ -53,7 +54,7 @@ func (s *Serializer) Serialize(metric telegraf.Metric) ([]byte, error) { func (s *Serializer) SerializeBatch(metrics []telegraf.Metric) ([]byte, error) { coll := NewCollection(s.config) for _, metric := range metrics { - coll.Add(metric) + coll.Add(metric, time.Now()) } var buf bytes.Buffer From eb29f46721b4cd35508f8b379c1ff80ec155f3f2 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 24 Feb 2020 16:01:27 -0800 Subject: [PATCH 1551/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7d1105475..539eb3622 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -64,6 +64,7 @@ - [#6820](https://github.com/influxdata/telegraf/issues/6820): Fix pgbouncer input when used with newer pgbouncer versions. - [#6913](https://github.com/influxdata/telegraf/issues/6913): Support up to 8192 stats in the ethtool input. - [#7060](https://github.com/influxdata/telegraf/issues/7060): Fix perf counters collection on named instances in sqlserver input. +- [#6926](https://github.com/influxdata/telegraf/issues/6926): Use add time for prometheus expiration calculation. ## v1.13.3 [2020-02-04] From b127254cca22c4edaabd1b2b53a63732698de7e7 Mon Sep 17 00:00:00 2001 From: donvipre Date: Tue, 25 Feb 2020 01:32:09 +0100 Subject: [PATCH 1552/1815] Add additional tags and fields to apcupsd (#7065) - new tags: - model - new metrics: - battery_date - nominal_input_voltage - nominal_battery_voltage - nominal_power - firmware --- plugins/inputs/apcupsd/README.md | 6 ++++++ plugins/inputs/apcupsd/apcupsd.go | 26 +++++++++++++--------- plugins/inputs/apcupsd/apcupsd_test.go | 30 +++++++++++++++++--------- 3 files changed, 42 insertions(+), 20 deletions(-) diff --git a/plugins/inputs/apcupsd/README.md b/plugins/inputs/apcupsd/README.md index be79ab4a8..97526d7ec 100644 --- a/plugins/inputs/apcupsd/README.md +++ b/plugins/inputs/apcupsd/README.md @@ -25,6 +25,7 @@ apcupsd should be installed and it's daemon should be running. - serial - status (string representing the set status_flags) - ups_name + - model - fields: - status_flags ([status-bits][]) - input_voltage @@ -36,6 +37,11 @@ apcupsd should be installed and it's daemon should be running. - battery_voltage - input_frequency - time_on_battery_ns + - battery_date + - nominal_input_voltage + - nominal_battery_voltage + - nominal_power + - firmware diff --git a/plugins/inputs/apcupsd/apcupsd.go b/plugins/inputs/apcupsd/apcupsd.go index 9a73c454a..a862bbfc8 100644 --- a/plugins/inputs/apcupsd/apcupsd.go +++ b/plugins/inputs/apcupsd/apcupsd.go @@ -63,6 +63,7 @@ func (h *ApcUpsd) Gather(acc telegraf.Accumulator) error { "serial": status.SerialNumber, "ups_name": status.UPSName, "status": status.Status, + "model": status.Model, } flags, err := strconv.ParseUint(strings.Fields(status.StatusFlags)[0], 0, 64) @@ -71,16 +72,21 @@ func (h *ApcUpsd) Gather(acc telegraf.Accumulator) error { } fields := map[string]interface{}{ - "status_flags": flags, - "input_voltage": status.LineVoltage, - "load_percent": status.LoadPercent, - "battery_charge_percent": status.BatteryChargePercent, - "time_left_ns": status.TimeLeft.Nanoseconds(), - "output_voltage": status.OutputVoltage, - "internal_temp": status.InternalTemp, - "battery_voltage": status.BatteryVoltage, - "input_frequency": status.LineFrequency, - "time_on_battery_ns": status.TimeOnBattery.Nanoseconds(), + "status_flags": flags, + "input_voltage": status.LineVoltage, + "load_percent": status.LoadPercent, + "battery_charge_percent": status.BatteryChargePercent, + "time_left_ns": status.TimeLeft.Nanoseconds(), + "output_voltage": status.OutputVoltage, + "internal_temp": status.InternalTemp, + "battery_voltage": status.BatteryVoltage, + "input_frequency": status.LineFrequency, + "time_on_battery_ns": status.TimeOnBattery.Nanoseconds(), + "nominal_input_voltage": status.NominalInputVoltage, + "nominal_battery_voltage": status.NominalBatteryVoltage, + "nominal_power": status.NominalPower, + "firmware": status.Firmware, + "battery_date": status.BatteryDate, } acc.AddFields("apcupsd", fields, tags) diff --git a/plugins/inputs/apcupsd/apcupsd_test.go b/plugins/inputs/apcupsd/apcupsd_test.go index 2418faf85..dfad765b3 100644 --- a/plugins/inputs/apcupsd/apcupsd_test.go +++ b/plugins/inputs/apcupsd/apcupsd_test.go @@ -125,18 +125,24 @@ func TestApcupsdGather(t *testing.T) { "serial": "ABC123", "status": "ONLINE", "ups_name": "BERTHA", + "model": "Model 12345", }, fields: map[string]interface{}{ - "status_flags": uint64(8), - "battery_charge_percent": float64(0), - "battery_voltage": float64(0), - "input_frequency": float64(0), - "input_voltage": float64(0), - "internal_temp": float64(0), - "load_percent": float64(13), - "output_voltage": float64(0), - "time_left_ns": int64(2790000000000), - "time_on_battery_ns": int64(0), + "status_flags": uint64(8), + "battery_charge_percent": float64(0), + "battery_voltage": float64(0), + "input_frequency": float64(0), + "input_voltage": float64(0), + "internal_temp": float64(0), + "load_percent": float64(13), + "output_voltage": float64(0), + "time_left_ns": int64(2790000000000), + "time_on_battery_ns": int64(0), + "nominal_input_voltage": float64(230), + "nominal_battery_voltage": float64(12), + "nominal_power": int(865), + "firmware": string("857.L3 .I USB FW:L3"), + "battery_date": time.Date(2016, time.September, 06, 0, 0, 0, 0, time.UTC), }, out: genOutput, }, @@ -190,6 +196,7 @@ func genOutput() [][]byte { "STATUS : ONLINE", "STATFLAG : 0x08 Status Flag", "UPSNAME : BERTHA", + "MODEL : Model 12345", "DATE : 2016-09-06 22:13:28 -0400", "HOSTNAME : example", "LOADPCT : 13.0 Percent Load Capacity", @@ -198,7 +205,10 @@ func genOutput() [][]byte { "TONBATT : 0 seconds", "NUMXFERS : 0", "SELFTEST : NO", + "NOMINV : 230 Volts", + "NOMBATTV : 12.0 Volts", "NOMPOWER : 865 Watts", + "FIRMWARE : 857.L3 .I USB FW:L3", } var out [][]byte From fc2486f24c264069f0b075232fc48a9cb2abfd76 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 24 Feb 2020 16:32:58 -0800 Subject: [PATCH 1553/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 539eb3622..7086a928b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -49,6 +49,7 @@ - [#7035](https://github.com/influxdata/telegraf/pull/7035): Fix dash to underscore replacement when handling embedded tags in Cisco MDT. - [#7039](https://github.com/influxdata/telegraf/pull/7039): Add process created_at time to procstat input. - [#7022](https://github.com/influxdata/telegraf/pull/7022): Add support for credentials file to nats_consumer and nats output. +- [#7065](https://github.com/influxdata/telegraf/pull/7065): Add additional tags and fields to apcupsd. #### Bugfixes From 2e32f894b644485e03b6a578593b129f14510793 Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Tue, 25 Feb 2020 13:40:29 -0500 Subject: [PATCH 1554/1815] Fix inconsistency with input error counting (#7077) --- agent/accumulator.go | 10 +--- agent/accumulator_test.go | 6 +- agent/agent.go | 6 +- internal/models/log.go | 25 ++++++-- internal/models/log_test.go | 60 +++---------------- internal/models/running_aggregator.go | 13 ++-- internal/models/running_input.go | 19 ++++-- internal/models/running_input_test.go | 31 ++++++++++ internal/models/running_output.go | 13 ++-- internal/models/running_processor.go | 13 ++-- .../cloud_pubsub_push/pubsub_push_test.go | 4 ++ 11 files changed, 115 insertions(+), 85 deletions(-) diff --git a/agent/accumulator.go b/agent/accumulator.go index 21146e3e2..65000fd98 100644 --- a/agent/accumulator.go +++ b/agent/accumulator.go @@ -1,21 +1,16 @@ package agent import ( - "log" "time" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" - "github.com/influxdata/telegraf/selfstat" -) - -var ( - NErrors = selfstat.Register("agent", "gather_errors", map[string]string{}) ) type MetricMaker interface { LogName() string MakeMetric(metric telegraf.Metric) telegraf.Metric + Log() telegraf.Logger } type accumulator struct { @@ -110,8 +105,7 @@ func (ac *accumulator) AddError(err error) { if err == nil { return } - NErrors.Incr(1) - log.Printf("E! [%s] Error in plugin: %v", ac.maker.LogName(), err) + ac.maker.Log().Errorf("Error in plugin: %v", err) } func (ac *accumulator) SetPrecision(precision time.Duration) { diff --git a/agent/accumulator_test.go b/agent/accumulator_test.go index c84948ba9..496d131f4 100644 --- a/agent/accumulator_test.go +++ b/agent/accumulator_test.go @@ -9,6 +9,7 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal/models" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -59,7 +60,6 @@ func TestAccAddError(t *testing.T) { a.AddError(fmt.Errorf("baz")) errs := bytes.Split(errBuf.Bytes(), []byte{'\n'}) - assert.EqualValues(t, int64(3), NErrors.Get()) require.Len(t, errs, 4) // 4 because of trailing newline assert.Contains(t, string(errs[0]), "TestPlugin") assert.Contains(t, string(errs[0]), "foo") @@ -154,3 +154,7 @@ func (tm *TestMetricMaker) LogName() string { func (tm *TestMetricMaker) MakeMetric(metric telegraf.Metric) telegraf.Metric { return metric } + +func (tm *TestMetricMaker) Log() telegraf.Logger { + return models.NewLogger("TestPlugin", "test", "") +} diff --git a/agent/agent.go b/agent/agent.go index aa8d07e67..66fc140ae 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -196,6 +196,7 @@ func (a *Agent) Test(ctx context.Context, waitDuration time.Duration) error { } } + hasErrors := false for _, input := range a.Config.Inputs { select { case <-ctx.Done(): @@ -215,15 +216,18 @@ func (a *Agent) Test(ctx context.Context, waitDuration time.Duration) error { nulAcc.SetPrecision(a.Precision()) if err := input.Input.Gather(nulAcc); err != nil { acc.AddError(err) + hasErrors = true } time.Sleep(500 * time.Millisecond) if err := input.Input.Gather(acc); err != nil { acc.AddError(err) + hasErrors = true } default: if err := input.Input.Gather(acc); err != nil { acc.AddError(err) + hasErrors = true } } } @@ -235,7 +239,7 @@ func (a *Agent) Test(ctx context.Context, waitDuration time.Duration) error { a.stopServiceInputs() } - if NErrors.Get() > 0 { + if hasErrors { return fmt.Errorf("One or more input plugins had an error") } return nil diff --git a/internal/models/log.go b/internal/models/log.go index a99eb3212..a89b17763 100644 --- a/internal/models/log.go +++ b/internal/models/log.go @@ -5,24 +5,39 @@ import ( "reflect" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/selfstat" ) // Logger defines a logging structure for plugins. type Logger struct { - Errs selfstat.Stat - Name string // Name is the plugin name, will be printed in the `[]`. + OnErrs []func() + Name string // Name is the plugin name, will be printed in the `[]`. +} + +// NewLogger creates a new logger instance +func NewLogger(pluginType, name, alias string) *Logger { + return &Logger{ + Name: logName(pluginType, name, alias), + } +} + +// OnErr defines a callback that triggers only when errors are about to be written to the log +func (l *Logger) OnErr(f func()) { + l.OnErrs = append(l.OnErrs, f) } // Errorf logs an error message, patterned after log.Printf. func (l *Logger) Errorf(format string, args ...interface{}) { - l.Errs.Incr(1) + for _, f := range l.OnErrs { + f() + } log.Printf("E! ["+l.Name+"] "+format, args...) } // Error logs an error message, patterned after log.Print. func (l *Logger) Error(args ...interface{}) { - l.Errs.Incr(1) + for _, f := range l.OnErrs { + f() + } log.Print(append([]interface{}{"E! [" + l.Name + "] "}, args...)...) } diff --git a/internal/models/log_test.go b/internal/models/log_test.go index d4bb6ca09..2b5ec39c6 100644 --- a/internal/models/log_test.go +++ b/internal/models/log_test.go @@ -8,63 +8,17 @@ import ( ) func TestErrorCounting(t *testing.T) { - iLog := Logger{Name: "inputs.test", Errs: selfstat.Register( + reg := selfstat.Register( "gather", "errors", map[string]string{"input": "test"}, - )} + ) + iLog := Logger{Name: "inputs.test"} + iLog.OnErr(func() { + reg.Incr(1) + }) iLog.Error("something went wrong") iLog.Errorf("something went wrong") - aLog := Logger{Name: "aggregators.test", Errs: selfstat.Register( - "aggregate", - "errors", - map[string]string{"aggregator": "test"}, - )} - aLog.Name = "aggregators.test" - aLog.Error("another thing happened") - - oLog := Logger{Name: "outputs.test", Errs: selfstat.Register( - "write", - "errors", - map[string]string{"output": "test"}, - )} - oLog.Error("another thing happened") - - pLog := Logger{Name: "processors.test", Errs: selfstat.Register( - "process", - "errors", - map[string]string{"processor": "test"}, - )} - pLog.Error("another thing happened") - - require.Equal(t, int64(2), iLog.Errs.Get()) - require.Equal(t, int64(1), aLog.Errs.Get()) - require.Equal(t, int64(1), oLog.Errs.Get()) - require.Equal(t, int64(1), pLog.Errs.Get()) -} - -func TestLogging(t *testing.T) { - log := Logger{Name: "inputs.test", Errs: selfstat.Register( - "gather", - "errors", - map[string]string{"input": "test"}, - )} - - log.Errs.Set(0) - - log.Debugf("something happened") - log.Debug("something happened") - - log.Warnf("something happened") - log.Warn("something happened") - require.Equal(t, int64(0), log.Errs.Get()) - - log.Infof("something happened") - log.Info("something happened") - require.Equal(t, int64(0), log.Errs.Get()) - - log.Errorf("something happened") - log.Error("something happened") - require.Equal(t, int64(2), log.Errs.Get()) + require.Equal(t, int64(2), reg.Get()) } diff --git a/internal/models/running_aggregator.go b/internal/models/running_aggregator.go index b8957e30a..d0ad944b1 100644 --- a/internal/models/running_aggregator.go +++ b/internal/models/running_aggregator.go @@ -29,10 +29,11 @@ func NewRunningAggregator(aggregator telegraf.Aggregator, config *AggregatorConf tags["alias"] = config.Alias } - logger := &Logger{ - Name: logName("aggregators", config.Name, config.Alias), - Errs: selfstat.Register("aggregate", "errors", tags), - } + aggErrorsRegister := selfstat.Register("aggregate", "errors", tags) + logger := NewLogger("aggregators", config.Name, config.Alias) + logger.OnErr(func() { + aggErrorsRegister.Incr(1) + }) setLogIfExist(aggregator, logger) @@ -176,3 +177,7 @@ func (r *RunningAggregator) push(acc telegraf.Accumulator) { elapsed := time.Since(start) r.PushTime.Incr(elapsed.Nanoseconds()) } + +func (r *RunningAggregator) Log() telegraf.Logger { + return r.log +} diff --git a/internal/models/running_input.go b/internal/models/running_input.go index c09fb1409..bb1033fdd 100644 --- a/internal/models/running_input.go +++ b/internal/models/running_input.go @@ -7,7 +7,10 @@ import ( "github.com/influxdata/telegraf/selfstat" ) -var GlobalMetricsGathered = selfstat.Register("agent", "metrics_gathered", map[string]string{}) +var ( + GlobalMetricsGathered = selfstat.Register("agent", "metrics_gathered", map[string]string{}) + GlobalGatherErrors = selfstat.Register("agent", "gather_errors", map[string]string{}) +) type RunningInput struct { Input telegraf.Input @@ -26,10 +29,12 @@ func NewRunningInput(input telegraf.Input, config *InputConfig) *RunningInput { tags["alias"] = config.Alias } - logger := &Logger{ - Name: logName("inputs", config.Name, config.Alias), - Errs: selfstat.Register("gather", "errors", tags), - } + inputErrorsRegister := selfstat.Register("gather", "errors", tags) + logger := NewLogger("inputs", config.Name, config.Alias) + logger.OnErr(func() { + inputErrorsRegister.Incr(1) + GlobalGatherErrors.Incr(1) + }) setLogIfExist(input, logger) return &RunningInput{ @@ -116,3 +121,7 @@ func (r *RunningInput) Gather(acc telegraf.Accumulator) error { func (r *RunningInput) SetDefaultTags(tags map[string]string) { r.defaultTags = tags } + +func (r *RunningInput) Log() telegraf.Logger { + return r.log +} diff --git a/internal/models/running_input_test.go b/internal/models/running_input_test.go index 5978a0061..ff3747116 100644 --- a/internal/models/running_input_test.go +++ b/internal/models/running_input_test.go @@ -4,6 +4,8 @@ import ( "testing" "time" + "github.com/influxdata/telegraf/selfstat" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/testutil" @@ -256,6 +258,35 @@ func TestMakeMetricNameSuffix(t *testing.T) { require.Equal(t, expected, m) } +func TestMetricErrorCounters(t *testing.T) { + ri := NewRunningInput(&testInput{}, &InputConfig{ + Name: "TestMetricErrorCounters", + }) + + getGatherErrors := func() int64 { + for _, r := range selfstat.Metrics() { + tag, hasTag := r.GetTag("input") + if r.Name() == "internal_gather" && hasTag && tag == "TestMetricErrorCounters" { + errCount, ok := r.GetField("errors") + if !ok { + t.Fatal("Expected error field") + } + return errCount.(int64) + } + } + return 0 + } + + before := getGatherErrors() + + ri.Log().Error("Oh no") + + after := getGatherErrors() + + require.Greater(t, after, before) + require.GreaterOrEqual(t, int64(1), GlobalGatherErrors.Get()) +} + type testInput struct{} func (t *testInput) Description() string { return "" } diff --git a/internal/models/running_output.go b/internal/models/running_output.go index c48bccd3c..13f2a94d6 100644 --- a/internal/models/running_output.go +++ b/internal/models/running_output.go @@ -63,10 +63,11 @@ func NewRunningOutput( tags["alias"] = config.Alias } - logger := &Logger{ - Name: logName("outputs", config.Name, config.Alias), - Errs: selfstat.Register("write", "errors", tags), - } + writeErrorsRegister := selfstat.Register("write", "errors", tags) + logger := NewLogger("outputs", config.Name, config.Alias) + logger.OnErr(func() { + writeErrorsRegister.Incr(1) + }) setLogIfExist(output, logger) if config.MetricBufferLimit > 0 { @@ -240,3 +241,7 @@ func (r *RunningOutput) LogBufferStatus() { nBuffer := r.buffer.Len() r.log.Debugf("Buffer fullness: %d / %d metrics", nBuffer, r.MetricBufferLimit) } + +func (r *RunningOutput) Log() telegraf.Logger { + return r.log +} diff --git a/internal/models/running_processor.go b/internal/models/running_processor.go index 22a7d0198..a7871b3e8 100644 --- a/internal/models/running_processor.go +++ b/internal/models/running_processor.go @@ -34,10 +34,11 @@ func NewRunningProcessor(processor telegraf.Processor, config *ProcessorConfig) tags["alias"] = config.Alias } - logger := &Logger{ - Name: logName("processors", config.Name, config.Alias), - Errs: selfstat.Register("process", "errors", tags), - } + processErrorsRegister := selfstat.Register("process", "errors", tags) + logger := NewLogger("processors", config.Name, config.Alias) + logger.OnErr(func() { + processErrorsRegister.Incr(1) + }) setLogIfExist(processor, logger) return &RunningProcessor{ @@ -97,3 +98,7 @@ func (rp *RunningProcessor) Apply(in ...telegraf.Metric) []telegraf.Metric { return ret } + +func (r *RunningProcessor) Log() telegraf.Logger { + return r.log +} diff --git a/plugins/inputs/cloud_pubsub_push/pubsub_push_test.go b/plugins/inputs/cloud_pubsub_push/pubsub_push_test.go index a0d71da94..308a8181d 100644 --- a/plugins/inputs/cloud_pubsub_push/pubsub_push_test.go +++ b/plugins/inputs/cloud_pubsub_push/pubsub_push_test.go @@ -193,6 +193,10 @@ func (tm *testMetricMaker) MakeMetric(metric telegraf.Metric) telegraf.Metric { return metric } +func (tm *testMetricMaker) Log() telegraf.Logger { + return models.NewLogger("test", "test", "") +} + type testOutput struct { // if true, mock a write failure failWrite bool From f263b7ce59eabbbd3ef392de814806ee8376a734 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 25 Feb 2020 10:43:02 -0800 Subject: [PATCH 1555/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7086a928b..200a88762 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -66,6 +66,7 @@ - [#6913](https://github.com/influxdata/telegraf/issues/6913): Support up to 8192 stats in the ethtool input. - [#7060](https://github.com/influxdata/telegraf/issues/7060): Fix perf counters collection on named instances in sqlserver input. - [#6926](https://github.com/influxdata/telegraf/issues/6926): Use add time for prometheus expiration calculation. +- [#7057](https://github.com/influxdata/telegraf/issues/7057): Fix inconsistency with input error counting in internal input. ## v1.13.3 [2020-02-04] From 8acf276afe4c50ba4e7fd161b094fff0aee2b81a Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 25 Feb 2020 12:05:44 -0800 Subject: [PATCH 1556/1815] Use Go 1.13.8 to build official packages (#7079) --- .circleci/config.yml | 8 ++++---- Makefile | 8 ++++---- appveyor.yml | 2 +- scripts/ci-1.12.docker | 2 +- scripts/ci-1.13.docker | 2 +- 5 files changed, 11 insertions(+), 11 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 6513c812e..9e8897041 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -6,12 +6,12 @@ defaults: GOFLAGS: -p=8 go-1_12: &go-1_12 docker: - - image: 'quay.io/influxdb/telegraf-ci:1.12.14' + - image: 'quay.io/influxdb/telegraf-ci:1.12.17' environment: GO111MODULE: 'on' go-1_13: &go-1_13 docker: - - image: 'quay.io/influxdb/telegraf-ci:1.13.5' + - image: 'quay.io/influxdb/telegraf-ci:1.13.8' mac: &mac macos: xcode: 11.3.1 @@ -19,7 +19,7 @@ defaults: environment: HOMEBREW_NO_AUTO_UPDATE: 1 GOFLAGS: -p=8 - + version: 2 jobs: deps: @@ -104,7 +104,7 @@ jobs: - run: 'make' - run: 'make check' - run: 'make test' - + package: <<: [ *defaults, *go-1_13 ] steps: diff --git a/Makefile b/Makefile index 27aefdeb7..46e1cf12a 100644 --- a/Makefile +++ b/Makefile @@ -141,10 +141,10 @@ plugin-%: .PHONY: ci-1.13 ci-1.13: - docker build -t quay.io/influxdb/telegraf-ci:1.13.5 - < scripts/ci-1.13.docker - docker push quay.io/influxdb/telegraf-ci:1.13.5 + docker build -t quay.io/influxdb/telegraf-ci:1.13.8 - < scripts/ci-1.13.docker + docker push quay.io/influxdb/telegraf-ci:1.13.8 .PHONY: ci-1.12 ci-1.12: - docker build -t quay.io/influxdb/telegraf-ci:1.12.14 - < scripts/ci-1.12.docker - docker push quay.io/influxdb/telegraf-ci:1.12.14 + docker build -t quay.io/influxdb/telegraf-ci:1.12.17 - < scripts/ci-1.12.docker + docker push quay.io/influxdb/telegraf-ci:1.12.17 diff --git a/appveyor.yml b/appveyor.yml index 559647e35..bff7dc0cb 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -8,7 +8,7 @@ cache: clone_folder: C:\gopath\src\github.com\influxdata\telegraf environment: - GOVERSION: 1.13.5 + GOVERSION: 1.13.8 GOPATH: C:\gopath platform: x64 diff --git a/scripts/ci-1.12.docker b/scripts/ci-1.12.docker index e68618dbc..f3f59349a 100644 --- a/scripts/ci-1.12.docker +++ b/scripts/ci-1.12.docker @@ -1,4 +1,4 @@ -FROM golang:1.12.14 +FROM golang:1.12.17 RUN chmod -R 755 "$GOPATH" diff --git a/scripts/ci-1.13.docker b/scripts/ci-1.13.docker index ad71addb9..9ee601ee1 100644 --- a/scripts/ci-1.13.docker +++ b/scripts/ci-1.13.docker @@ -1,4 +1,4 @@ -FROM golang:1.13.5 +FROM golang:1.13.8 RUN chmod -R 755 "$GOPATH" From 9f8a73426dd8707bdb42b1185114c9ce9e74c432 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 25 Feb 2020 12:10:49 -0800 Subject: [PATCH 1557/1815] Update changelog --- CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 200a88762..cc204d6b9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -59,6 +59,10 @@ ## v1.13.4 [unreleased] +#### Release Notes + +- Official packages now built with Go 1.13.8. + #### Bugfixes - [#6988](https://github.com/influxdata/telegraf/issues/6988): Parse NaN values from summary types in prometheus input. From 8c99dc7b5ef9ac612ef0b7a0a90bf67b47e36905 Mon Sep 17 00:00:00 2001 From: Anthony Arnaud Date: Tue, 25 Feb 2020 15:19:28 -0500 Subject: [PATCH 1558/1815] Use the same timestamp per call if no time is provided (#7063) --- plugins/inputs/prometheus/parser.go | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/plugins/inputs/prometheus/parser.go b/plugins/inputs/prometheus/parser.go index 8f7061df8..6427c3f8c 100644 --- a/plugins/inputs/prometheus/parser.go +++ b/plugins/inputs/prometheus/parser.go @@ -55,6 +55,8 @@ func ParseV2(buf []byte, header http.Header) ([]telegraf.Metric, error) { } } + // make sure all metrics have a consistent timestamp so that metrics don't straddle two different seconds + now := time.Now() // read metrics for metricName, mf := range metricFamilies { for _, m := range mf.Metric { @@ -63,11 +65,11 @@ func ParseV2(buf []byte, header http.Header) ([]telegraf.Metric, error) { if mf.GetType() == dto.MetricType_SUMMARY { // summary metric - telegrafMetrics := makeQuantilesV2(m, tags, metricName, mf.GetType()) + telegrafMetrics := makeQuantilesV2(m, tags, metricName, mf.GetType(), now) metrics = append(metrics, telegrafMetrics...) } else if mf.GetType() == dto.MetricType_HISTOGRAM { // histogram metric - telegrafMetrics := makeBucketsV2(m, tags, metricName, mf.GetType()) + telegrafMetrics := makeBucketsV2(m, tags, metricName, mf.GetType(), now) metrics = append(metrics, telegrafMetrics...) } else { // standard metric @@ -80,7 +82,7 @@ func ParseV2(buf []byte, header http.Header) ([]telegraf.Metric, error) { if m.TimestampMs != nil && *m.TimestampMs > 0 { t = time.Unix(0, *m.TimestampMs*1000000) } else { - t = time.Now() + t = now } metric, err := metric.New("prometheus", tags, fields, t, valueType(mf.GetType())) if err == nil { @@ -95,14 +97,14 @@ func ParseV2(buf []byte, header http.Header) ([]telegraf.Metric, error) { } // Get Quantiles for summary metric & Buckets for histogram -func makeQuantilesV2(m *dto.Metric, tags map[string]string, metricName string, metricType dto.MetricType) []telegraf.Metric { +func makeQuantilesV2(m *dto.Metric, tags map[string]string, metricName string, metricType dto.MetricType, now time.Time) []telegraf.Metric { var metrics []telegraf.Metric fields := make(map[string]interface{}) var t time.Time if m.TimestampMs != nil && *m.TimestampMs > 0 { t = time.Unix(0, *m.TimestampMs*1000000) } else { - t = time.Now() + t = now } fields[metricName+"_count"] = float64(m.GetSummary().GetSampleCount()) fields[metricName+"_sum"] = float64(m.GetSummary().GetSampleSum()) @@ -127,14 +129,14 @@ func makeQuantilesV2(m *dto.Metric, tags map[string]string, metricName string, m } // Get Buckets from histogram metric -func makeBucketsV2(m *dto.Metric, tags map[string]string, metricName string, metricType dto.MetricType) []telegraf.Metric { +func makeBucketsV2(m *dto.Metric, tags map[string]string, metricName string, metricType dto.MetricType, now time.Time) []telegraf.Metric { var metrics []telegraf.Metric fields := make(map[string]interface{}) var t time.Time if m.TimestampMs != nil && *m.TimestampMs > 0 { t = time.Unix(0, *m.TimestampMs*1000000) } else { - t = time.Now() + t = now } fields[metricName+"_count"] = float64(m.GetHistogram().GetSampleCount()) fields[metricName+"_sum"] = float64(m.GetHistogram().GetSampleSum()) @@ -193,6 +195,8 @@ func Parse(buf []byte, header http.Header) ([]telegraf.Metric, error) { } } + // make sure all metrics have a consistent timestamp so that metrics don't straddle two different seconds + now := time.Now() // read metrics for metricName, mf := range metricFamilies { for _, m := range mf.Metric { @@ -221,7 +225,7 @@ func Parse(buf []byte, header http.Header) ([]telegraf.Metric, error) { if m.TimestampMs != nil && *m.TimestampMs > 0 { t = time.Unix(0, *m.TimestampMs*1000000) } else { - t = time.Now() + t = now } metric, err := metric.New(metricName, tags, fields, t, valueType(mf.GetType())) if err == nil { From 608891c405d81b24427c835d7972214472dfa853 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 25 Feb 2020 12:20:55 -0800 Subject: [PATCH 1559/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index cc204d6b9..b00e9ea01 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -71,6 +71,7 @@ - [#7060](https://github.com/influxdata/telegraf/issues/7060): Fix perf counters collection on named instances in sqlserver input. - [#6926](https://github.com/influxdata/telegraf/issues/6926): Use add time for prometheus expiration calculation. - [#7057](https://github.com/influxdata/telegraf/issues/7057): Fix inconsistency with input error counting in internal input. +- [#7063](https://github.com/influxdata/telegraf/pull/7063): Use the same timestamp per call if no time is provided in prometheus input. ## v1.13.3 [2020-02-04] From 9d601d6d78f13e83a1ce1717ebd86bc362124064 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 25 Feb 2020 12:51:56 -0800 Subject: [PATCH 1560/1815] Set 1.13.4 release date --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b00e9ea01..e9bd9a957 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -57,7 +57,7 @@ - [#6867](https://github.com/influxdata/telegraf/issues/6867): Fix case sensitive collation in sqlserver input. - [#7005](https://github.com/influxdata/telegraf/pull/7005): Search for chronyc only when chrony input plugin is enabled. -## v1.13.4 [unreleased] +## v1.13.4 [2020-02-25] #### Release Notes From de16279d7230bc81d2a3c3f32be65e53df1d3a34 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Joao=20Gilberto=20Magalh=C3=A3es?= Date: Tue, 25 Feb 2020 18:36:06 -0600 Subject: [PATCH 1561/1815] Add RabbitMQ slave_nodes and synchronized_slave_nodes metrics (#7084) --- plugins/inputs/rabbitmq/README.md | 4 ++- plugins/inputs/rabbitmq/rabbitmq.go | 34 +++++++++++--------- plugins/inputs/rabbitmq/rabbitmq_test.go | 2 ++ plugins/inputs/rabbitmq/testdata/queues.json | 8 ++++- 4 files changed, 31 insertions(+), 17 deletions(-) diff --git a/plugins/inputs/rabbitmq/README.md b/plugins/inputs/rabbitmq/README.md index 0e119b25e..1bdd553de 100644 --- a/plugins/inputs/rabbitmq/README.md +++ b/plugins/inputs/rabbitmq/README.md @@ -173,7 +173,9 @@ For additional details reference the [RabbitMQ Management HTTP Stats][management - messages_ready (int, count) - messages_redeliver (int, count) - messages_redeliver_rate (float, messages per second) - - messages_unack (integer, count) + - messages_unack (int, count) + - slave_nodes (int, count) + - synchronised_slave_nodes (int, count) + rabbitmq_exchange - tags: diff --git a/plugins/inputs/rabbitmq/rabbitmq.go b/plugins/inputs/rabbitmq/rabbitmq.go index d27c522bf..68652ca36 100644 --- a/plugins/inputs/rabbitmq/rabbitmq.go +++ b/plugins/inputs/rabbitmq/rabbitmq.go @@ -121,17 +121,19 @@ type QueueTotals struct { // Queue ... type Queue struct { - QueueTotals // just to not repeat the same code - MessageStats `json:"message_stats"` - Memory int64 - Consumers int64 - ConsumerUtilisation float64 `json:"consumer_utilisation"` - Name string - Node string - Vhost string - Durable bool - AutoDelete bool `json:"auto_delete"` - IdleSince string `json:"idle_since"` + QueueTotals // just to not repeat the same code + MessageStats `json:"message_stats"` + Memory int64 + Consumers int64 + ConsumerUtilisation float64 `json:"consumer_utilisation"` + Name string + Node string + Vhost string + Durable bool + AutoDelete bool `json:"auto_delete"` + IdleSince string `json:"idle_since"` + SlaveNodes []string `json:"slave_nodes"` + SynchronisedSlaveNodes []string `json:"synchronised_slave_nodes"` } // Node ... @@ -585,10 +587,12 @@ func gatherQueues(r *RabbitMQ, acc telegraf.Accumulator) { "rabbitmq_queue", map[string]interface{}{ // common information - "consumers": queue.Consumers, - "consumer_utilisation": queue.ConsumerUtilisation, - "idle_since": queue.IdleSince, - "memory": queue.Memory, + "consumers": queue.Consumers, + "consumer_utilisation": queue.ConsumerUtilisation, + "idle_since": queue.IdleSince, + "slave_nodes": len(queue.SlaveNodes), + "synchronised_slave_nodes": len(queue.SynchronisedSlaveNodes), + "memory": queue.Memory, // messages information "message_bytes": queue.MessageBytes, "message_bytes_ready": queue.MessageBytesReady, diff --git a/plugins/inputs/rabbitmq/rabbitmq_test.go b/plugins/inputs/rabbitmq/rabbitmq_test.go index 0991dd0c0..c207706c9 100644 --- a/plugins/inputs/rabbitmq/rabbitmq_test.go +++ b/plugins/inputs/rabbitmq/rabbitmq_test.go @@ -98,6 +98,8 @@ func TestRabbitMQGeneratesMetrics(t *testing.T) { "messages_redeliver": 33, "messages_redeliver_rate": 2.5, "idle_since": "2015-11-01 8:22:14", + "slave_nodes": 1, + "synchronised_slave_nodes": 1, } compareMetrics(t, queuesMetrics, acc, "rabbitmq_queue") diff --git a/plugins/inputs/rabbitmq/testdata/queues.json b/plugins/inputs/rabbitmq/testdata/queues.json index 356e1a466..294f78872 100644 --- a/plugins/inputs/rabbitmq/testdata/queues.json +++ b/plugins/inputs/rabbitmq/testdata/queues.json @@ -109,6 +109,12 @@ "exclusive_consumer_tag": null, "effective_policy_definition": [], "operator_policy": null, - "policy": null + "policy": null, + "slave_nodes":[ + "rabbit@ip-10-1-2-118" + ], + "synchronised_slave_nodes":[ + "rabbit@ip-10-1-2-118" + ] } ] \ No newline at end of file From 1301cbce0a72aa4de619183cc12765b4b308cc22 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 25 Feb 2020 16:37:29 -0800 Subject: [PATCH 1562/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index e9bd9a957..40bf18e9b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -50,6 +50,7 @@ - [#7039](https://github.com/influxdata/telegraf/pull/7039): Add process created_at time to procstat input. - [#7022](https://github.com/influxdata/telegraf/pull/7022): Add support for credentials file to nats_consumer and nats output. - [#7065](https://github.com/influxdata/telegraf/pull/7065): Add additional tags and fields to apcupsd. +- [#7084](https://github.com/influxdata/telegraf/pull/7084): Add RabbitMQ slave_nodes and synchronized_slave_nodes metrics. #### Bugfixes From f91261d7fe634a7a917dd12945c3c4e1c325495b Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 25 Feb 2020 18:58:23 -0800 Subject: [PATCH 1563/1815] Add basic benchmark for influx serializer reader (#7078) --- plugins/serializers/influx/reader_test.go | 89 +++++++++++++++++++++++ 1 file changed, 89 insertions(+) diff --git a/plugins/serializers/influx/reader_test.go b/plugins/serializers/influx/reader_test.go index 642b71b1c..7aaf3fccf 100644 --- a/plugins/serializers/influx/reader_test.go +++ b/plugins/serializers/influx/reader_test.go @@ -189,3 +189,92 @@ func TestZeroLengthBufferNoError(t *testing.T) { require.NoError(t, err) require.Equal(t, 0, n) } + +func BenchmarkReader(b *testing.B) { + m := MustMetric( + metric.New( + "procstat", + map[string]string{ + "exe": "bash", + "process_name": "bash", + }, + map[string]interface{}{ + "cpu_time": 0, + "cpu_time_guest": float64(0), + "cpu_time_guest_nice": float64(0), + "cpu_time_idle": float64(0), + "cpu_time_iowait": float64(0), + "cpu_time_irq": float64(0), + "cpu_time_nice": float64(0), + "cpu_time_soft_irq": float64(0), + "cpu_time_steal": float64(0), + "cpu_time_system": float64(0), + "cpu_time_user": float64(0.02), + "cpu_usage": float64(0), + "involuntary_context_switches": 2, + "memory_data": 1576960, + "memory_locked": 0, + "memory_rss": 5103616, + "memory_stack": 139264, + "memory_swap": 0, + "memory_vms": 21659648, + "nice_priority": 20, + "num_fds": 4, + "num_threads": 1, + "pid": 29417, + "read_bytes": 0, + "read_count": 259, + "realtime_priority": 0, + "rlimit_cpu_time_hard": 2147483647, + "rlimit_cpu_time_soft": 2147483647, + "rlimit_file_locks_hard": 2147483647, + "rlimit_file_locks_soft": 2147483647, + "rlimit_memory_data_hard": 2147483647, + "rlimit_memory_data_soft": 2147483647, + "rlimit_memory_locked_hard": 65536, + "rlimit_memory_locked_soft": 65536, + "rlimit_memory_rss_hard": 2147483647, + "rlimit_memory_rss_soft": 2147483647, + "rlimit_memory_stack_hard": 2147483647, + "rlimit_memory_stack_soft": 8388608, + "rlimit_memory_vms_hard": 2147483647, + "rlimit_memory_vms_soft": 2147483647, + "rlimit_nice_priority_hard": 0, + "rlimit_nice_priority_soft": 0, + "rlimit_num_fds_hard": 4096, + "rlimit_num_fds_soft": 1024, + "rlimit_realtime_priority_hard": 0, + "rlimit_realtime_priority_soft": 0, + "rlimit_signals_pending_hard": 78994, + "rlimit_signals_pending_soft": 78994, + "signals_pending": 0, + "voluntary_context_switches": 42, + "write_bytes": 106496, + "write_count": 35, + }, + time.Unix(0, 1517620624000000000), + ), + ) + + metrics := make([]telegraf.Metric, 1000, 1000) + for i := range metrics { + metrics[i] = m + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + readbuf := make([]byte, 4096, 4096) + serializer := NewSerializer() + reader := NewReader(metrics, serializer) + for { + _, err := reader.Read(readbuf) + if err == io.EOF { + break + } + + if err != nil { + panic(err.Error()) + } + } + } +} From 0103691eb6dd2a6d7a4a260c113015fad4f4a7c6 Mon Sep 17 00:00:00 2001 From: Rick van de Loo Date: Wed, 26 Feb 2020 18:33:24 +0100 Subject: [PATCH 1564/1815] Fix typo in exec input readme (#7086) --- etc/telegraf.conf | 2 +- plugins/outputs/exec/README.md | 2 +- plugins/outputs/exec/exec.go | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/etc/telegraf.conf b/etc/telegraf.conf index 28edd5192..79add5bd2 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -509,7 +509,7 @@ # # Send metrics to command as input over stdin # [[outputs.exec]] -# ## Command to injest metrics via stdin. +# ## Command to ingest metrics via stdin. # command = ["tee", "-a", "/dev/null"] # # ## Timeout for command to complete. diff --git a/plugins/outputs/exec/README.md b/plugins/outputs/exec/README.md index 2b0c2d3f1..d82676a25 100644 --- a/plugins/outputs/exec/README.md +++ b/plugins/outputs/exec/README.md @@ -12,7 +12,7 @@ On non-zero exit stderr will be logged at error level. ```toml [[outputs.exec]] - ## Command to injest metrics via stdin. + ## Command to ingest metrics via stdin. command = ["tee", "-a", "/dev/null"] ## Timeout for command to complete. diff --git a/plugins/outputs/exec/exec.go b/plugins/outputs/exec/exec.go index 474c96791..5995d4bca 100644 --- a/plugins/outputs/exec/exec.go +++ b/plugins/outputs/exec/exec.go @@ -26,7 +26,7 @@ type Exec struct { } var sampleConfig = ` - ## Command to injest metrics via stdin. + ## Command to ingest metrics via stdin. command = ["tee", "-a", "/dev/null"] ## Timeout for command to complete. From a20e6953d27397f61a3ea4e385d5e934044b17dc Mon Sep 17 00:00:00 2001 From: Jan Graichen Date: Fri, 28 Feb 2020 19:46:03 +0100 Subject: [PATCH 1565/1815] Add an exec daemon plugin (#4424) --- README.md | 1 + plugins/inputs/all/all.go | 1 + plugins/inputs/execd/README.md | 111 +++++++++++++ plugins/inputs/execd/examples/count.go | 24 +++ plugins/inputs/execd/examples/count.rb | 13 ++ plugins/inputs/execd/examples/count.sh | 12 ++ plugins/inputs/execd/execd.go | 209 +++++++++++++++++++++++++ plugins/inputs/execd/execd_unix.go | 33 ++++ plugins/inputs/execd/execd_win.go | 26 +++ 9 files changed, 430 insertions(+) create mode 100644 plugins/inputs/execd/README.md create mode 100644 plugins/inputs/execd/examples/count.go create mode 100755 plugins/inputs/execd/examples/count.rb create mode 100755 plugins/inputs/execd/examples/count.sh create mode 100644 plugins/inputs/execd/execd.go create mode 100644 plugins/inputs/execd/execd_unix.go create mode 100644 plugins/inputs/execd/execd_win.go diff --git a/README.md b/README.md index 0df0f003c..49a4a456e 100644 --- a/README.md +++ b/README.md @@ -185,6 +185,7 @@ For documentation on the latest development code see the [documentation index][d * [elasticsearch](./plugins/inputs/elasticsearch) * [ethtool](./plugins/inputs/ethtool) * [exec](./plugins/inputs/exec) (generic executable plugin, support JSON, influx, graphite and nagios) +* [execd](./plugins/inputs/execd) * [fail2ban](./plugins/inputs/fail2ban) * [fibaro](./plugins/inputs/fibaro) * [file](./plugins/inputs/file) diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index dec04b397..274d7fd41 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -40,6 +40,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/elasticsearch" _ "github.com/influxdata/telegraf/plugins/inputs/ethtool" _ "github.com/influxdata/telegraf/plugins/inputs/exec" + _ "github.com/influxdata/telegraf/plugins/inputs/execd" _ "github.com/influxdata/telegraf/plugins/inputs/fail2ban" _ "github.com/influxdata/telegraf/plugins/inputs/fibaro" _ "github.com/influxdata/telegraf/plugins/inputs/file" diff --git a/plugins/inputs/execd/README.md b/plugins/inputs/execd/README.md new file mode 100644 index 000000000..1205fdd56 --- /dev/null +++ b/plugins/inputs/execd/README.md @@ -0,0 +1,111 @@ +# Execd Input Plugin + +The `execd` plugin runs an external program as a daemon. The programs must output metrics in any one of the accepted [Input Data Formats](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md) on its standard output. + +The `signal` can be configured to send a signal the running daemon on each collection interval. + +Program output on standard error is mirrored to the telegraf log. + +### Configuration: + +```toml + ## Program to run as daemon + command = ["telegraf-smartctl", "-d", "/dev/sda"] + + ## Define how the process is signaled on each collection interval. + + ## Valid values are: + ## "none" : Do not signal anything. + ## The process must output metrics by itself. + ## "STDIN" : Send a newline on STDIN. + ## "SIGHUP" : Send a HUP signal. Not available on Windows. + ## "SIGUSR1" : Send a USR1 signal. Not available on Windows. + ## "SIGUSR2" : Send a USR2 signal. Not available on Windows. + signal = "none" + + ## Delay before the process is restarted after an unexpected termination + restart_delay = "10s" + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "influx" +``` + +### Example + +##### Daemon written in bash using STDIN signaling + +```bash +#!/bin/bash + +counter=0 + +while IFS= read -r LINE; do + echo "counter_bash count=${counter}" + let counter=counter+1 +done +``` + +```toml +[[inputs.execd]] + command = ["plugins/inputs/execd/examples/count.sh"] + signal = "STDIN" +``` + +##### Go daemon using SIGHUP + +```go +package main + +import ( + "fmt" + "os" + "os/signal" + "syscall" +) + +func main() { + c := make(chan os.Signal, 1) + signal.Notify(c, syscall.SIGHUP) + + counter := 0 + + for { + <-c + + fmt.Printf("counter_go count=%d\n", counter) + counter++ + } +} + +``` + +```toml +[[inputs.execd]] + command = ["plugins/inputs/execd/examples/count.go.exe"] + signal = "SIGHUP" +``` + +##### Ruby daemon running standalone + +```ruby +#!/usr/bin/env ruby + +counter = 0 + +loop do + puts "counter_ruby count=#{counter}" + STDOUT.flush + + counter += 1 + sleep 1 +end +``` + +```toml +[[inputs.execd]] + command = ["plugins/inputs/execd/examples/count.rb"] + signal = "none" +``` diff --git a/plugins/inputs/execd/examples/count.go b/plugins/inputs/execd/examples/count.go new file mode 100644 index 000000000..d5e4a12e1 --- /dev/null +++ b/plugins/inputs/execd/examples/count.go @@ -0,0 +1,24 @@ +package main + +// Example using HUP signaling + +import ( + "fmt" + "os" + "os/signal" + "syscall" +) + +func main() { + c := make(chan os.Signal, 1) + signal.Notify(c, syscall.SIGHUP) + + counter := 0 + + for { + <-c + + fmt.Printf("counter_go count=%d\n", counter) + counter++ + } +} diff --git a/plugins/inputs/execd/examples/count.rb b/plugins/inputs/execd/examples/count.rb new file mode 100755 index 000000000..220848d64 --- /dev/null +++ b/plugins/inputs/execd/examples/count.rb @@ -0,0 +1,13 @@ +#!/usr/bin/env ruby + +## Example in Ruby not using any signaling + +counter = 0 + +loop do + puts "counter_ruby count=#{counter}" + STDOUT.flush + counter += 1 + + sleep 1 +end diff --git a/plugins/inputs/execd/examples/count.sh b/plugins/inputs/execd/examples/count.sh new file mode 100755 index 000000000..aa6932a80 --- /dev/null +++ b/plugins/inputs/execd/examples/count.sh @@ -0,0 +1,12 @@ +#!/bin/bash + +## Example in bash using STDIN signaling + +counter=0 + +while read; do + echo "counter_bash count=${counter}" + let counter=counter+1 +done + +(>&2 echo "terminate") diff --git a/plugins/inputs/execd/execd.go b/plugins/inputs/execd/execd.go new file mode 100644 index 000000000..0d1fc7cc5 --- /dev/null +++ b/plugins/inputs/execd/execd.go @@ -0,0 +1,209 @@ +package execd + +import ( + "bufio" + "context" + "fmt" + "io" + "log" + "os/exec" + "sync" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/parsers" +) + +const sampleConfig = ` + ## Program to run as daemon + command = ["telegraf-smartctl", "-d", "/dev/sda"] + + ## Define how the process is signaled on each collection interval. + + ## Valid values are: + ## "none" : Do not signal anything. + ## The process must output metrics by itself. + ## "STDIN" : Send a newline on STDIN. + ## "SIGHUP" : Send a HUP signal. Not available on Windows. + signal = "none" + + ## Delay before the process is restarted after an unexpected termination + restart_delay = "10s" + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "influx" +` + +type Execd struct { + Command []string + Signal string + RestartDelay internal.Duration + + acc telegraf.Accumulator + cmd *exec.Cmd + parser parsers.Parser + stdin io.WriteCloser + cancel context.CancelFunc + wg sync.WaitGroup +} + +func (e *Execd) SampleConfig() string { + return sampleConfig +} + +func (e *Execd) Description() string { + return "Run executable as long-running input plugin" +} + +func (e *Execd) SetParser(parser parsers.Parser) { + e.parser = parser +} + +func (e *Execd) Start(acc telegraf.Accumulator) error { + e.acc = acc + + if len(e.Command) == 0 { + return fmt.Errorf("E! [inputs.execd] FATAL no command specified") + } + + e.wg.Add(1) + + var ctx context.Context + ctx, e.cancel = context.WithCancel(context.Background()) + + go func() { + e.cmdLoop(ctx) + e.wg.Done() + }() + + return nil +} + +func (e *Execd) Stop() { + e.cancel() + e.wg.Wait() +} + +func (e *Execd) cmdLoop(ctx context.Context) { + for { + // Use a buffered channel to ensure goroutine below can exit + // if `ctx.Done` is selected and nothing reads on `done` anymore + done := make(chan error, 1) + go func() { + done <- e.cmdRun() + }() + + select { + case <-ctx.Done(): + // Immediately exit process but with a graceful shutdown + // period before killing + internal.WaitTimeout(e.cmd, 0) + return + case err := <-done: + log.Printf("E! [inputs.execd] Process %s terminated: %s", e.Command, err) + } + + log.Printf("E! [inputs.execd] Restarting in %s...", e.RestartDelay.Duration) + + select { + case <-ctx.Done(): + return + case <-time.After(e.RestartDelay.Duration): + // Continue the loop and restart the process + } + } +} + +func (e *Execd) cmdRun() error { + var wg sync.WaitGroup + + if len(e.Command) > 1 { + e.cmd = exec.Command(e.Command[0], e.Command[1:]...) + } else { + e.cmd = exec.Command(e.Command[0]) + } + + stdin, err := e.cmd.StdinPipe() + if err != nil { + return fmt.Errorf("E! [inputs.execd] Error opening stdin pipe: %s", err) + } + + e.stdin = stdin + + stdout, err := e.cmd.StdoutPipe() + if err != nil { + return fmt.Errorf("E! [inputs.execd] Error opening stdout pipe: %s", err) + } + + stderr, err := e.cmd.StderrPipe() + if err != nil { + return fmt.Errorf("E! [inputs.execd] Error opening stderr pipe: %s", err) + } + + log.Printf("D! [inputs.execd] Starting process: %s", e.Command) + + err = e.cmd.Start() + if err != nil { + return fmt.Errorf("E! [inputs.execd] Error starting process: %s", err) + } + + wg.Add(2) + + go func() { + e.cmdReadOut(stdout) + wg.Done() + }() + + go func() { + e.cmdReadErr(stderr) + wg.Done() + }() + + wg.Wait() + return e.cmd.Wait() +} + +func (e *Execd) cmdReadOut(out io.Reader) { + scanner := bufio.NewScanner(out) + + for scanner.Scan() { + metrics, err := e.parser.Parse(scanner.Bytes()) + if err != nil { + e.acc.AddError(fmt.Errorf("E! [inputs.execd] Parse error: %s", err)) + } + + for _, metric := range metrics { + e.acc.AddMetric(metric) + } + } + + if err := scanner.Err(); err != nil { + e.acc.AddError(fmt.Errorf("E! [inputs.execd] Error reading stdout: %s", err)) + } +} + +func (e *Execd) cmdReadErr(out io.Reader) { + scanner := bufio.NewScanner(out) + + for scanner.Scan() { + log.Printf("E! [inputs.execd] stderr: %q", scanner.Text()) + } + + if err := scanner.Err(); err != nil { + e.acc.AddError(fmt.Errorf("E! [inputs.execd] Error reading stderr: %s", err)) + } +} + +func init() { + inputs.Add("execd", func() telegraf.Input { + return &Execd{ + Signal: "none", + RestartDelay: internal.Duration{Duration: 10 * time.Second}, + } + }) +} diff --git a/plugins/inputs/execd/execd_unix.go b/plugins/inputs/execd/execd_unix.go new file mode 100644 index 000000000..a092cfc80 --- /dev/null +++ b/plugins/inputs/execd/execd_unix.go @@ -0,0 +1,33 @@ +// +build !windows + +package execd + +import ( + "fmt" + "io" + "syscall" + + "github.com/influxdata/telegraf" +) + +func (e *Execd) Gather(acc telegraf.Accumulator) error { + if e.cmd == nil || e.cmd.Process == nil { + return nil + } + + switch e.Signal { + case "SIGHUP": + e.cmd.Process.Signal(syscall.SIGHUP) + case "SIGUSR1": + e.cmd.Process.Signal(syscall.SIGUSR1) + case "SIGUSR2": + e.cmd.Process.Signal(syscall.SIGUSR2) + case "STDIN": + io.WriteString(e.stdin, "\n") + case "none": + default: + return fmt.Errorf("invalid signal: %s", e.Signal) + } + + return nil +} diff --git a/plugins/inputs/execd/execd_win.go b/plugins/inputs/execd/execd_win.go new file mode 100644 index 000000000..85ced4a6a --- /dev/null +++ b/plugins/inputs/execd/execd_win.go @@ -0,0 +1,26 @@ +// +build windows + +package execd + +import ( + "fmt" + "io" + + "github.com/influxdata/telegraf" +) + +func (e *Execd) Gather(acc telegraf.Accumulator) error { + if e.cmd == nil || e.cmd.Process == nil { + return nil + } + + switch e.Signal { + case "STDIN": + io.WriteString(e.stdin, "\n") + case "none": + default: + return fmt.Errorf("invalid signal: %s", e.Signal) + } + + return nil +} From a34180459af91c97b1b0e885a69dec0e69e659db Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Fri, 28 Feb 2020 13:58:56 -0500 Subject: [PATCH 1566/1815] Close stdin on exit in execd input --- CHANGELOG.md | 1 + plugins/inputs/execd/README.md | 5 +++-- plugins/inputs/execd/execd.go | 10 ++++++---- 3 files changed, 10 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 40bf18e9b..5b03ca072 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,6 +14,7 @@ - [infiniband](/plugins/inputs/infiniband/README.md) - Contributed by @willfurnell - [modbus](/plugins/inputs/modbus/README.md) - Contributed by @garciaolais - [monit](/plugins/inputs/monit/README.md) - Contributed by @SirishaGopigiri +- [execd](/plugins/inputs/execd/README.md) - Contributed by @jgraichen #### New Processors diff --git a/plugins/inputs/execd/README.md b/plugins/inputs/execd/README.md index 1205fdd56..022311924 100644 --- a/plugins/inputs/execd/README.md +++ b/plugins/inputs/execd/README.md @@ -1,6 +1,7 @@ # Execd Input Plugin -The `execd` plugin runs an external program as a daemon. The programs must output metrics in any one of the accepted [Input Data Formats](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md) on its standard output. +The `execd` plugin runs an external program as a daemon. The programs must output metrics in any one of the accepted +[Input Data Formats](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md) on its standard output. The `signal` can be configured to send a signal the running daemon on each collection interval. @@ -9,11 +10,11 @@ Program output on standard error is mirrored to the telegraf log. ### Configuration: ```toml +[[inputs.execd]] ## Program to run as daemon command = ["telegraf-smartctl", "-d", "/dev/sda"] ## Define how the process is signaled on each collection interval. - ## Valid values are: ## "none" : Do not signal anything. ## The process must output metrics by itself. diff --git a/plugins/inputs/execd/execd.go b/plugins/inputs/execd/execd.go index 0d1fc7cc5..e852d045e 100644 --- a/plugins/inputs/execd/execd.go +++ b/plugins/inputs/execd/execd.go @@ -21,12 +21,13 @@ const sampleConfig = ` command = ["telegraf-smartctl", "-d", "/dev/sda"] ## Define how the process is signaled on each collection interval. - ## Valid values are: ## "none" : Do not signal anything. ## The process must output metrics by itself. - ## "STDIN" : Send a newline on STDIN. - ## "SIGHUP" : Send a HUP signal. Not available on Windows. + ## "STDIN" : Send a newline on STDIN. + ## "SIGHUP" : Send a HUP signal. Not available on Windows. + ## "SIGUSR1" : Send a USR1 signal. Not available on Windows. + ## "SIGUSR2" : Send a USR2 signal. Not available on Windows. signal = "none" ## Delay before the process is restarted after an unexpected termination @@ -100,9 +101,10 @@ func (e *Execd) cmdLoop(ctx context.Context) { select { case <-ctx.Done(): + e.stdin.Close() // Immediately exit process but with a graceful shutdown // period before killing - internal.WaitTimeout(e.cmd, 0) + internal.WaitTimeout(e.cmd, 200*time.Millisecond) return case err := <-done: log.Printf("E! [inputs.execd] Process %s terminated: %s", e.Command, err) From 88216eb4d28ee72e35913f7e4a528dd2de8657fd Mon Sep 17 00:00:00 2001 From: Andre Nathan Date: Fri, 28 Feb 2020 19:47:04 -0300 Subject: [PATCH 1567/1815] Allow globs in FPM unix socket paths (#7089) --- plugins/inputs/phpfpm/README.md | 2 + plugins/inputs/phpfpm/phpfpm.go | 84 ++++++++++++++++++++++++---- plugins/inputs/phpfpm/phpfpm_test.go | 67 +++++++++++++++++++++- 3 files changed, 141 insertions(+), 12 deletions(-) diff --git a/plugins/inputs/phpfpm/README.md b/plugins/inputs/phpfpm/README.md index e2f4e0c2f..b31f4b7e4 100644 --- a/plugins/inputs/phpfpm/README.md +++ b/plugins/inputs/phpfpm/README.md @@ -19,6 +19,8 @@ Get phpfpm stats using either HTTP status page or fpm socket. ## "/var/run/php5-fpm.sock" ## or using a custom fpm status path: ## "/var/run/php5-fpm.sock:fpm-custom-status-path" + ## glob patterns are also supported: + ## "/var/run/php*.sock" ## ## - fcgi: the URL must start with fcgi:// or cgi://, and port must be present, ie: ## "fcgi://10.0.0.12:9000/status" diff --git a/plugins/inputs/phpfpm/phpfpm.go b/plugins/inputs/phpfpm/phpfpm.go index 2d2806261..75a6aeb17 100644 --- a/plugins/inputs/phpfpm/phpfpm.go +++ b/plugins/inputs/phpfpm/phpfpm.go @@ -14,6 +14,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/internal/globpath" "github.com/influxdata/telegraf/internal/tls" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -95,7 +96,12 @@ func (g *phpfpm) Gather(acc telegraf.Accumulator) error { var wg sync.WaitGroup - for _, serv := range g.Urls { + urls, err := expandUrls(g.Urls) + if err != nil { + return err + } + + for _, serv := range urls { wg.Add(1) go func(serv string) { defer wg.Done() @@ -153,18 +159,10 @@ func (g *phpfpm) gatherServer(addr string, acc telegraf.Accumulator) error { statusPath = "status" } } else { - socketAddr := strings.Split(addr, ":") - if len(socketAddr) >= 2 { - socketPath = socketAddr[0] - statusPath = socketAddr[1] - } else { - socketPath = socketAddr[0] + socketPath, statusPath = unixSocketPaths(addr) + if statusPath == "" { statusPath = "status" } - - if _, err := os.Stat(socketPath); os.IsNotExist(err) { - return fmt.Errorf("Socket doesn't exist '%s': %s", socketPath, err) - } fcgi, err = newFcgiClient("unix", socketPath) } @@ -277,6 +275,70 @@ func importMetric(r io.Reader, acc telegraf.Accumulator, addr string) (poolStat, return stats, nil } +func expandUrls(urls []string) ([]string, error) { + addrs := make([]string, 0, len(urls)) + for _, url := range urls { + if isNetworkURL(url) { + addrs = append(addrs, url) + continue + } + paths, err := globUnixSocket(url) + if err != nil { + return nil, err + } + addrs = append(addrs, paths...) + } + return addrs, nil +} + +func globUnixSocket(url string) ([]string, error) { + pattern, status := unixSocketPaths(url) + glob, err := globpath.Compile(pattern) + if err != nil { + return nil, fmt.Errorf("could not compile glob %q: %v", pattern, err) + } + paths := glob.Match() + if len(paths) == 0 { + if _, err := os.Stat(paths[0]); err != nil { + if os.IsNotExist(err) { + return nil, fmt.Errorf("Socket doesn't exist '%s': %s", pattern, err) + } + return nil, err + } + return nil, nil + } + + addrs := make([]string, 0, len(paths)) + + for _, path := range paths { + if status != "" { + status = fmt.Sprintf(":%s", status) + } + addrs = append(addrs, fmt.Sprintf("%s%s", path, status)) + } + + return addrs, nil +} + +func unixSocketPaths(addr string) (string, string) { + var socketPath, statusPath string + + socketAddr := strings.Split(addr, ":") + if len(socketAddr) >= 2 { + socketPath = socketAddr[0] + statusPath = socketAddr[1] + } else { + socketPath = socketAddr[0] + statusPath = "" + } + + return socketPath, statusPath +} + +func isNetworkURL(addr string) bool { + return strings.HasPrefix(addr, "http://") || strings.HasPrefix(addr, "https://") || strings.HasPrefix(addr, "fcgi://") || strings.HasPrefix(addr, "cgi://") +} + func init() { inputs.Add("phpfpm", func() telegraf.Input { return &phpfpm{} diff --git a/plugins/inputs/phpfpm/phpfpm_test.go b/plugins/inputs/phpfpm/phpfpm_test.go index f449b4649..64e5fbfea 100644 --- a/plugins/inputs/phpfpm/phpfpm_test.go +++ b/plugins/inputs/phpfpm/phpfpm_test.go @@ -148,6 +148,71 @@ func TestPhpFpmGeneratesMetrics_From_Socket(t *testing.T) { acc.AssertContainsTaggedFields(t, "phpfpm", fields, tags) } +func TestPhpFpmGeneratesMetrics_From_Multiple_Sockets_With_Glob(t *testing.T) { + // Create a socket in /tmp because we always have write permission and if the + // removing of socket fail when system restart /tmp is clear so + // we don't have junk files around + var randomNumber int64 + binary.Read(rand.Reader, binary.LittleEndian, &randomNumber) + socket1 := fmt.Sprintf("/tmp/test-fpm%d.sock", randomNumber) + tcp1, err := net.Listen("unix", socket1) + if err != nil { + t.Fatal("Cannot initialize server on port ") + } + defer tcp1.Close() + + binary.Read(rand.Reader, binary.LittleEndian, &randomNumber) + socket2 := fmt.Sprintf("/tmp/test-fpm%d.sock", randomNumber) + tcp2, err := net.Listen("unix", socket2) + if err != nil { + t.Fatal("Cannot initialize server on port ") + } + defer tcp2.Close() + + s := statServer{} + go fcgi.Serve(tcp1, s) + go fcgi.Serve(tcp2, s) + + r := &phpfpm{ + Urls: []string{"/tmp/test-fpm[\\-0-9]*.sock"}, + } + + var acc1, acc2 testutil.Accumulator + + err = acc1.GatherError(r.Gather) + require.NoError(t, err) + + err = acc2.GatherError(r.Gather) + require.NoError(t, err) + + tags1 := map[string]string{ + "pool": "www", + "url": socket1, + } + + tags2 := map[string]string{ + "pool": "www", + "url": socket2, + } + + fields := map[string]interface{}{ + "start_since": int64(1991), + "accepted_conn": int64(3), + "listen_queue": int64(1), + "max_listen_queue": int64(0), + "listen_queue_len": int64(0), + "idle_processes": int64(1), + "active_processes": int64(1), + "total_processes": int64(2), + "max_active_processes": int64(1), + "max_children_reached": int64(2), + "slow_requests": int64(1), + } + + acc1.AssertContainsTaggedFields(t, "phpfpm", fields, tags1) + acc2.AssertContainsTaggedFields(t, "phpfpm", fields, tags2) +} + func TestPhpFpmGeneratesMetrics_From_Socket_Custom_Status_Path(t *testing.T) { // Create a socket in /tmp because we always have write permission. If the // removing of socket fail we won't have junk files around. Cuz when system @@ -227,7 +292,7 @@ func TestPhpFpmGeneratesMetrics_Throw_Error_When_Socket_Path_Is_Invalid(t *testi err := acc.GatherError(r.Gather) require.Error(t, err) - assert.Equal(t, `Socket doesn't exist '/tmp/invalid.sock': stat /tmp/invalid.sock: no such file or directory`, err.Error()) + assert.Equal(t, `dial unix /tmp/invalid.sock: connect: no such file or directory`, err.Error()) } From a6dc099be42e3c7e3194173e3f4598489bea8a38 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 28 Feb 2020 14:48:27 -0800 Subject: [PATCH 1568/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5b03ca072..24094dec2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -52,6 +52,7 @@ - [#7022](https://github.com/influxdata/telegraf/pull/7022): Add support for credentials file to nats_consumer and nats output. - [#7065](https://github.com/influxdata/telegraf/pull/7065): Add additional tags and fields to apcupsd. - [#7084](https://github.com/influxdata/telegraf/pull/7084): Add RabbitMQ slave_nodes and synchronized_slave_nodes metrics. +- [#7089](https://github.com/influxdata/telegraf/pull/7089): Allow globs in FPM unix socket paths. #### Bugfixes From 32d80d2a0852a3aef8f568125a0315aed348d826 Mon Sep 17 00:00:00 2001 From: Felix Edelmann Date: Mon, 2 Mar 2020 19:59:19 +0100 Subject: [PATCH 1569/1815] Add non-cumulative histogram (#7071) --- plugins/aggregators/histogram/README.md | 71 ++++-- plugins/aggregators/histogram/histogram.go | 58 +++-- .../aggregators/histogram/histogram_test.go | 214 ++++++++++++------ 3 files changed, 228 insertions(+), 115 deletions(-) diff --git a/plugins/aggregators/histogram/README.md b/plugins/aggregators/histogram/README.md index f9dafd789..f0b6c15b1 100644 --- a/plugins/aggregators/histogram/README.md +++ b/plugins/aggregators/histogram/README.md @@ -3,8 +3,9 @@ The histogram aggregator plugin creates histograms containing the counts of field values within a range. -Values added to a bucket are also added to the larger buckets in the -distribution. This creates a [cumulative histogram](https://en.wikipedia.org/wiki/Histogram#/media/File:Cumulative_vs_normal_histogram.svg). +If `cumulative` is set to true, values added to a bucket are also added to the +larger buckets in the distribution. This creates a [cumulative histogram](https://en.wikipedia.org/wiki/Histogram#/media/File:Cumulative_vs_normal_histogram.svg). +Otherwise, values are added to only one bucket, which creates an [ordinary histogram](https://en.wikipedia.org/wiki/Histogram#/media/File:Cumulative_vs_normal_histogram.svg) Like other Telegraf aggregators, the metric is emitted every `period` seconds. By default bucket counts are not reset between periods and will be non-strictly @@ -16,7 +17,7 @@ increasing while Telegraf is running. This behavior can be changed by setting th Each metric is passed to the aggregator and this aggregator searches histogram buckets for those fields, which have been specified in the config. If buckets are found, the aggregator will increment +1 to the appropriate -bucket otherwise it will be added to the `+Inf` bucket. Every `period` +bucket. Otherwise, it will be added to the `+Inf` bucket. Every `period` seconds this data will be forwarded to the outputs. The algorithm of hit counting to buckets was implemented on the base @@ -39,16 +40,20 @@ of the algorithm which is implemented in the Prometheus ## of accumulating the results. reset = false + ## Whether bucket values should be accumulated. If set to false, "gt" tag will be added. + ## Defaults to true. + cumulative = true + ## Example config that aggregates all fields of the metric. # [[aggregators.histogram.config]] - # ## The set of buckets. + # ## Right borders of buckets (with +Inf implicitly added). # buckets = [0.0, 15.6, 34.5, 49.1, 71.5, 80.5, 94.5, 100.0] # ## The name of metric. # measurement_name = "cpu" ## Example config that aggregates only specific fields of the metric. # [[aggregators.histogram.config]] - # ## The set of buckets. + # ## Right borders of buckets (with +Inf implicitly added). # buckets = [0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0] # ## The name of metric. # measurement_name = "diskio" @@ -64,8 +69,9 @@ option. Optionally, if `fields` is set only the fields listed will be aggregated. If `fields` is not set all fields are aggregated. The `buckets` option contains a list of floats which specify the bucket -boundaries. Each float value defines the inclusive upper bound of the bucket. +boundaries. Each float value defines the inclusive upper (right) bound of the bucket. The `+Inf` bucket is added automatically and does not need to be defined. +(For left boundaries, these specified bucket borders and `-Inf` will be used). ### Measurements & Fields: @@ -77,26 +83,43 @@ The postfix `bucket` will be added to each field key. ### Tags: -All measurements are given the tag `le`. This tag has the border value of -bucket. It means that the metric value is less than or equal to the value of -this tag. For example, let assume that we have the metric value 10 and the -following buckets: [5, 10, 30, 70, 100]. Then the tag `le` will have the value -10, because the metrics value is passed into bucket with right border value -`10`. +* `cumulative = true` (default): + * `le`: Right bucket border. It means that the metric value is less than or + equal to the value of this tag. If a metric value is sorted into a bucket, + it is also sorted into all larger buckets. As a result, the value of + `_bucket` is rising with rising `le` value. When `le` is `+Inf`, + the bucket value is the count of all metrics, because all metric values are + less than or equal to positive infinity. +* `cumulative = false`: + * `gt`: Left bucket border. It means that the metric value is greater than + (and not equal to) the value of this tag. + * `le`: Right bucket border. It means that the metric value is less than or + equal to the value of this tag. + * As both `gt` and `le` are present, each metric is sorted in only exactly + one bucket. + ### Example Output: +Let assume we have the buckets [0, 10, 50, 100] and the following field values +for `usage_idle`: [50, 7, 99, 12] + +With `cumulative = true`: + ``` -cpu,cpu=cpu1,host=localhost,le=0.0 usage_idle_bucket=0i 1486998330000000000 -cpu,cpu=cpu1,host=localhost,le=10.0 usage_idle_bucket=0i 1486998330000000000 -cpu,cpu=cpu1,host=localhost,le=20.0 usage_idle_bucket=1i 1486998330000000000 -cpu,cpu=cpu1,host=localhost,le=30.0 usage_idle_bucket=2i 1486998330000000000 -cpu,cpu=cpu1,host=localhost,le=40.0 usage_idle_bucket=2i 1486998330000000000 -cpu,cpu=cpu1,host=localhost,le=50.0 usage_idle_bucket=2i 1486998330000000000 -cpu,cpu=cpu1,host=localhost,le=60.0 usage_idle_bucket=2i 1486998330000000000 -cpu,cpu=cpu1,host=localhost,le=70.0 usage_idle_bucket=2i 1486998330000000000 -cpu,cpu=cpu1,host=localhost,le=80.0 usage_idle_bucket=2i 1486998330000000000 -cpu,cpu=cpu1,host=localhost,le=90.0 usage_idle_bucket=2i 1486998330000000000 -cpu,cpu=cpu1,host=localhost,le=100.0 usage_idle_bucket=2i 1486998330000000000 -cpu,cpu=cpu1,host=localhost,le=+Inf usage_idle_bucket=2i 1486998330000000000 +cpu,cpu=cpu1,host=localhost,le=0.0 usage_idle_bucket=0i 1486998330000000000 # none +cpu,cpu=cpu1,host=localhost,le=10.0 usage_idle_bucket=1i 1486998330000000000 # 7 +cpu,cpu=cpu1,host=localhost,le=50.0 usage_idle_bucket=2i 1486998330000000000 # 7, 12 +cpu,cpu=cpu1,host=localhost,le=100.0 usage_idle_bucket=4i 1486998330000000000 # 7, 12, 50, 99 +cpu,cpu=cpu1,host=localhost,le=+Inf usage_idle_bucket=4i 1486998330000000000 # 7, 12, 50, 99 +``` + +With `cumulative = false`: + +``` +cpu,cpu=cpu1,host=localhost,gt=-Inf,le=0.0 usage_idle_bucket=0i 1486998330000000000 # none +cpu,cpu=cpu1,host=localhost,gt=0.0,le=10.0 usage_idle_bucket=1i 1486998330000000000 # 7 +cpu,cpu=cpu1,host=localhost,gt=10.0,le=50.0 usage_idle_bucket=1i 1486998330000000000 # 12 +cpu,cpu=cpu1,host=localhost,gt=50.0,le=100.0 usage_idle_bucket=2i 1486998330000000000 # 50, 99 +cpu,cpu=cpu1,host=localhost,gt=100.0,le=+Inf usage_idle_bucket=0i 1486998330000000000 # none ``` diff --git a/plugins/aggregators/histogram/histogram.go b/plugins/aggregators/histogram/histogram.go index a565d8902..dab524d62 100644 --- a/plugins/aggregators/histogram/histogram.go +++ b/plugins/aggregators/histogram/histogram.go @@ -8,16 +8,23 @@ import ( "github.com/influxdata/telegraf/plugins/aggregators" ) -// bucketTag is the tag, which contains right bucket border -const bucketTag = "le" +// bucketRightTag is the tag, which contains right bucket border +const bucketRightTag = "le" -// bucketInf is the right bucket border for infinite values -const bucketInf = "+Inf" +// bucketPosInf is the right bucket border for infinite values +const bucketPosInf = "+Inf" + +// bucketLeftTag is the tag, which contains left bucket border (exclusive) +const bucketLeftTag = "gt" + +// bucketNegInf is the left bucket border for infinite values +const bucketNegInf = "-Inf" // HistogramAggregator is aggregator with histogram configs and particular histograms for defined metrics type HistogramAggregator struct { Configs []config `toml:"config"` ResetBuckets bool `toml:"reset"` + Cumulative bool `toml:"cumulative"` buckets bucketsByMetrics cache map[uint64]metricHistogramCollection @@ -57,8 +64,10 @@ type groupedByCountFields struct { } // NewHistogramAggregator creates new histogram aggregator -func NewHistogramAggregator() telegraf.Aggregator { - h := &HistogramAggregator{} +func NewHistogramAggregator() *HistogramAggregator { + h := &HistogramAggregator{ + Cumulative: true, + } h.buckets = make(bucketsByMetrics) h.resetCache() @@ -77,16 +86,20 @@ var sampleConfig = ` ## of accumulating the results. reset = false + ## Whether bucket values should be accumulated. If set to false, "gt" tag will be added. + ## Defaults to true. + cumulative = true + ## Example config that aggregates all fields of the metric. # [[aggregators.histogram.config]] - # ## The set of buckets. + # ## Right borders of buckets (with +Inf implicitly added). # buckets = [0.0, 15.6, 34.5, 49.1, 71.5, 80.5, 94.5, 100.0] # ## The name of metric. # measurement_name = "cpu" ## Example config that aggregates only specific fields of the metric. # [[aggregators.histogram.config]] - # ## The set of buckets. + # ## Right borders of buckets (with +Inf implicitly added). # buckets = [0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0] # ## The name of metric. # measurement_name = "diskio" @@ -167,18 +180,27 @@ func (h *HistogramAggregator) groupFieldsByBuckets( tags map[string]string, counts []int64, ) { - count := int64(0) - for index, bucket := range h.getBuckets(name, field) { - count += counts[index] + sum := int64(0) + buckets := h.getBuckets(name, field) // note that len(buckets) + 1 == len(counts) - tags[bucketTag] = strconv.FormatFloat(bucket, 'f', -1, 64) - h.groupField(metricsWithGroupedFields, name, field, count, copyTags(tags)) + for index, count := range counts { + if !h.Cumulative { + sum = 0 // reset sum -> don't store cumulative counts + + tags[bucketLeftTag] = bucketNegInf + if index > 0 { + tags[bucketLeftTag] = strconv.FormatFloat(buckets[index-1], 'f', -1, 64) + } + } + + tags[bucketRightTag] = bucketPosInf + if index < len(buckets) { + tags[bucketRightTag] = strconv.FormatFloat(buckets[index], 'f', -1, 64) + } + + sum += count + h.groupField(metricsWithGroupedFields, name, field, sum, copyTags(tags)) } - - count += counts[len(counts)-1] - tags[bucketTag] = bucketInf - - h.groupField(metricsWithGroupedFields, name, field, count, tags) } // groupField groups field by count value diff --git a/plugins/aggregators/histogram/histogram_test.go b/plugins/aggregators/histogram/histogram_test.go index 694235831..dfb3f5d12 100644 --- a/plugins/aggregators/histogram/histogram_test.go +++ b/plugins/aggregators/histogram/histogram_test.go @@ -11,11 +11,15 @@ import ( "github.com/stretchr/testify/assert" ) +type fields map[string]interface{} +type tags map[string]string + // NewTestHistogram creates new test histogram aggregation with specified config -func NewTestHistogram(cfg []config, reset bool) telegraf.Aggregator { - htm := &HistogramAggregator{Configs: cfg, ResetBuckets: reset} - htm.buckets = make(bucketsByMetrics) - htm.resetCache() +func NewTestHistogram(cfg []config, reset bool, cumulative bool) telegraf.Aggregator { + htm := NewHistogramAggregator() + htm.Configs = cfg + htm.ResetBuckets = reset + htm.Cumulative = cumulative return htm } @@ -23,8 +27,8 @@ func NewTestHistogram(cfg []config, reset bool) telegraf.Aggregator { // firstMetric1 is the first test metric var firstMetric1, _ = metric.New( "first_metric_name", - map[string]string{"tag_name": "tag_value"}, - map[string]interface{}{ + tags{}, + fields{ "a": float64(15.3), "b": float64(40), }, @@ -34,8 +38,8 @@ var firstMetric1, _ = metric.New( // firstMetric1 is the first test metric with other value var firstMetric2, _ = metric.New( "first_metric_name", - map[string]string{"tag_name": "tag_value"}, - map[string]interface{}{ + tags{}, + fields{ "a": float64(15.9), "c": float64(40), }, @@ -45,8 +49,8 @@ var firstMetric2, _ = metric.New( // secondMetric is the second metric var secondMetric, _ = metric.New( "second_metric_name", - map[string]string{"tag_name": "tag_value"}, - map[string]interface{}{ + tags{}, + fields{ "a": float64(105), "ignoreme": "string", "andme": true, @@ -65,11 +69,11 @@ func BenchmarkApply(b *testing.B) { } } -// TestHistogramWithPeriodAndOneField tests metrics for one period and for one field -func TestHistogramWithPeriodAndOneField(t *testing.T) { +// TestHistogram tests metrics for one period and for one field +func TestHistogram(t *testing.T) { var cfg []config cfg = append(cfg, config{Metric: "first_metric_name", Fields: []string{"a"}, Buckets: []float64{0.0, 10.0, 20.0, 30.0, 40.0}}) - histogram := NewTestHistogram(cfg, false) + histogram := NewTestHistogram(cfg, false, true) acc := &testutil.Accumulator{} @@ -81,19 +85,43 @@ func TestHistogramWithPeriodAndOneField(t *testing.T) { if len(acc.Metrics) != 6 { assert.Fail(t, "Incorrect number of metrics") } - assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(0)}, "0") - assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(0)}, "10") - assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2)}, "20") - assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2)}, "30") - assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2)}, "40") - assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2)}, bucketInf) + assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(0)}, tags{bucketRightTag: "0"}) + assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(0)}, tags{bucketRightTag: "10"}) + assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(2)}, tags{bucketRightTag: "20"}) + assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(2)}, tags{bucketRightTag: "30"}) + assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(2)}, tags{bucketRightTag: "40"}) + assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(2)}, tags{bucketRightTag: bucketPosInf}) } -// TestHistogramWithPeriodAndOneField tests metrics for one period and for one field +// TestHistogramNonCumulative tests metrics for one period and for one field +func TestHistogramNonCumulative(t *testing.T) { + var cfg []config + cfg = append(cfg, config{Metric: "first_metric_name", Fields: []string{"a"}, Buckets: []float64{0.0, 10.0, 20.0, 30.0, 40.0}}) + histogram := NewTestHistogram(cfg, false, false) + + acc := &testutil.Accumulator{} + + histogram.Add(firstMetric1) + histogram.Reset() + histogram.Add(firstMetric2) + histogram.Push(acc) + + if len(acc.Metrics) != 6 { + assert.Fail(t, "Incorrect number of metrics") + } + assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(0)}, tags{bucketLeftTag: bucketNegInf, bucketRightTag: "0"}) + assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(0)}, tags{bucketLeftTag: "0", bucketRightTag: "10"}) + assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(2)}, tags{bucketLeftTag: "10", bucketRightTag: "20"}) + assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(0)}, tags{bucketLeftTag: "20", bucketRightTag: "30"}) + assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(0)}, tags{bucketLeftTag: "30", bucketRightTag: "40"}) + assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(0)}, tags{bucketLeftTag: "40", bucketRightTag: bucketPosInf}) +} + +// TestHistogramWithReset tests metrics for one period and for one field, with reset between metrics adding func TestHistogramWithReset(t *testing.T) { var cfg []config cfg = append(cfg, config{Metric: "first_metric_name", Fields: []string{"a"}, Buckets: []float64{0.0, 10.0, 20.0, 30.0, 40.0}}) - histogram := NewTestHistogram(cfg, true) + histogram := NewTestHistogram(cfg, true, true) acc := &testutil.Accumulator{} @@ -105,20 +133,20 @@ func TestHistogramWithReset(t *testing.T) { if len(acc.Metrics) != 6 { assert.Fail(t, "Incorrect number of metrics") } - assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(0)}, "0") - assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(0)}, "10") - assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(1)}, "20") - assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(1)}, "30") - assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(1)}, "40") - assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(1)}, bucketInf) + assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(0)}, tags{bucketRightTag: "0"}) + assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(0)}, tags{bucketRightTag: "10"}) + assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(1)}, tags{bucketRightTag: "20"}) + assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(1)}, tags{bucketRightTag: "30"}) + assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(1)}, tags{bucketRightTag: "40"}) + assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(1)}, tags{bucketRightTag: bucketPosInf}) } -// TestHistogramWithPeriodAndAllFields tests two metrics for one period and for all fields -func TestHistogramWithPeriodAndAllFields(t *testing.T) { +// TestHistogramWithAllFields tests two metrics for one period and for all fields +func TestHistogramWithAllFields(t *testing.T) { var cfg []config cfg = append(cfg, config{Metric: "first_metric_name", Buckets: []float64{0.0, 15.5, 20.0, 30.0, 40.0}}) cfg = append(cfg, config{Metric: "second_metric_name", Buckets: []float64{0.0, 4.0, 10.0, 23.0, 30.0}}) - histogram := NewTestHistogram(cfg, false) + histogram := NewTestHistogram(cfg, false, true) acc := &testutil.Accumulator{} @@ -131,50 +159,83 @@ func TestHistogramWithPeriodAndAllFields(t *testing.T) { assert.Fail(t, "Incorrect number of metrics") } - assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(0), "b_bucket": int64(0), "c_bucket": int64(0)}, "0") - assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(1), "b_bucket": int64(0), "c_bucket": int64(0)}, "15.5") - assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2), "b_bucket": int64(0), "c_bucket": int64(0)}, "20") - assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2), "b_bucket": int64(0), "c_bucket": int64(0)}, "30") - assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2), "b_bucket": int64(1), "c_bucket": int64(1)}, "40") - assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2), "b_bucket": int64(1), "c_bucket": int64(1)}, bucketInf) + assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(0), "b_bucket": int64(0), "c_bucket": int64(0)}, tags{bucketRightTag: "0"}) + assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(1), "b_bucket": int64(0), "c_bucket": int64(0)}, tags{bucketRightTag: "15.5"}) + assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(2), "b_bucket": int64(0), "c_bucket": int64(0)}, tags{bucketRightTag: "20"}) + assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(2), "b_bucket": int64(0), "c_bucket": int64(0)}, tags{bucketRightTag: "30"}) + assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(2), "b_bucket": int64(1), "c_bucket": int64(1)}, tags{bucketRightTag: "40"}) + assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(2), "b_bucket": int64(1), "c_bucket": int64(1)}, tags{bucketRightTag: bucketPosInf}) - assertContainsTaggedField(t, acc, "second_metric_name", map[string]interface{}{"a_bucket": int64(0), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, "0") - assertContainsTaggedField(t, acc, "second_metric_name", map[string]interface{}{"a_bucket": int64(0), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, "4") - assertContainsTaggedField(t, acc, "second_metric_name", map[string]interface{}{"a_bucket": int64(0), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, "10") - assertContainsTaggedField(t, acc, "second_metric_name", map[string]interface{}{"a_bucket": int64(0), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, "23") - assertContainsTaggedField(t, acc, "second_metric_name", map[string]interface{}{"a_bucket": int64(0), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, "30") - assertContainsTaggedField(t, acc, "second_metric_name", map[string]interface{}{"a_bucket": int64(1), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, bucketInf) + assertContainsTaggedField(t, acc, "second_metric_name", fields{"a_bucket": int64(0), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, tags{bucketRightTag: "0"}) + assertContainsTaggedField(t, acc, "second_metric_name", fields{"a_bucket": int64(0), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, tags{bucketRightTag: "4"}) + assertContainsTaggedField(t, acc, "second_metric_name", fields{"a_bucket": int64(0), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, tags{bucketRightTag: "10"}) + assertContainsTaggedField(t, acc, "second_metric_name", fields{"a_bucket": int64(0), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, tags{bucketRightTag: "23"}) + assertContainsTaggedField(t, acc, "second_metric_name", fields{"a_bucket": int64(0), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, tags{bucketRightTag: "30"}) + assertContainsTaggedField(t, acc, "second_metric_name", fields{"a_bucket": int64(1), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, tags{bucketRightTag: bucketPosInf}) } -// TestHistogramDifferentPeriodsAndAllFields tests two metrics getting added with a push/reset in between (simulates +// TestHistogramWithAllFieldsNonCumulative tests two metrics for one period and for all fields +func TestHistogramWithAllFieldsNonCumulative(t *testing.T) { + var cfg []config + cfg = append(cfg, config{Metric: "first_metric_name", Buckets: []float64{0.0, 15.5, 20.0, 30.0, 40.0}}) + cfg = append(cfg, config{Metric: "second_metric_name", Buckets: []float64{0.0, 4.0, 10.0, 23.0, 30.0}}) + histogram := NewTestHistogram(cfg, false, false) + + acc := &testutil.Accumulator{} + + histogram.Add(firstMetric1) + histogram.Add(firstMetric2) + histogram.Add(secondMetric) + histogram.Push(acc) + + if len(acc.Metrics) != 12 { + assert.Fail(t, "Incorrect number of metrics") + } + + assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(0), "b_bucket": int64(0), "c_bucket": int64(0)}, tags{bucketLeftTag: bucketNegInf, bucketRightTag: "0"}) + assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(1), "b_bucket": int64(0), "c_bucket": int64(0)}, tags{bucketLeftTag: "0", bucketRightTag: "15.5"}) + assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(1), "b_bucket": int64(0), "c_bucket": int64(0)}, tags{bucketLeftTag: "15.5", bucketRightTag: "20"}) + assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(0), "b_bucket": int64(0), "c_bucket": int64(0)}, tags{bucketLeftTag: "20", bucketRightTag: "30"}) + assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(0), "b_bucket": int64(1), "c_bucket": int64(1)}, tags{bucketLeftTag: "30", bucketRightTag: "40"}) + assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(0), "b_bucket": int64(0), "c_bucket": int64(0)}, tags{bucketLeftTag: "40", bucketRightTag: bucketPosInf}) + + assertContainsTaggedField(t, acc, "second_metric_name", fields{"a_bucket": int64(0), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, tags{bucketLeftTag: bucketNegInf, bucketRightTag: "0"}) + assertContainsTaggedField(t, acc, "second_metric_name", fields{"a_bucket": int64(0), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, tags{bucketLeftTag: "0", bucketRightTag: "4"}) + assertContainsTaggedField(t, acc, "second_metric_name", fields{"a_bucket": int64(0), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, tags{bucketLeftTag: "4", bucketRightTag: "10"}) + assertContainsTaggedField(t, acc, "second_metric_name", fields{"a_bucket": int64(0), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, tags{bucketLeftTag: "10", bucketRightTag: "23"}) + assertContainsTaggedField(t, acc, "second_metric_name", fields{"a_bucket": int64(0), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, tags{bucketLeftTag: "23", bucketRightTag: "30"}) + assertContainsTaggedField(t, acc, "second_metric_name", fields{"a_bucket": int64(1), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, tags{bucketLeftTag: "30", bucketRightTag: bucketPosInf}) +} + +// TestHistogramWithTwoPeriodsAndAllFields tests two metrics getting added with a push/reset in between (simulates // getting added in different periods) for all fields -func TestHistogramDifferentPeriodsAndAllFields(t *testing.T) { +func TestHistogramWithTwoPeriodsAndAllFields(t *testing.T) { var cfg []config cfg = append(cfg, config{Metric: "first_metric_name", Buckets: []float64{0.0, 10.0, 20.0, 30.0, 40.0}}) - histogram := NewTestHistogram(cfg, false) + histogram := NewTestHistogram(cfg, false, true) acc := &testutil.Accumulator{} histogram.Add(firstMetric1) histogram.Push(acc) - assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(0), "b_bucket": int64(0)}, "0") - assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(0), "b_bucket": int64(0)}, "10") - assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(1), "b_bucket": int64(0)}, "20") - assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(1), "b_bucket": int64(0)}, "30") - assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(1), "b_bucket": int64(1)}, "40") - assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(1), "b_bucket": int64(1)}, bucketInf) + assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(0), "b_bucket": int64(0)}, tags{bucketRightTag: "0"}) + assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(0), "b_bucket": int64(0)}, tags{bucketRightTag: "10"}) + assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(1), "b_bucket": int64(0)}, tags{bucketRightTag: "20"}) + assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(1), "b_bucket": int64(0)}, tags{bucketRightTag: "30"}) + assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(1), "b_bucket": int64(1)}, tags{bucketRightTag: "40"}) + assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(1), "b_bucket": int64(1)}, tags{bucketRightTag: bucketPosInf}) acc.ClearMetrics() histogram.Add(firstMetric2) histogram.Push(acc) - assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(0), "b_bucket": int64(0), "c_bucket": int64(0)}, "0") - assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(0), "b_bucket": int64(0), "c_bucket": int64(0)}, "10") - assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2), "b_bucket": int64(0), "c_bucket": int64(0)}, "20") - assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2), "b_bucket": int64(0), "c_bucket": int64(0)}, "30") - assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2), "b_bucket": int64(1), "c_bucket": int64(1)}, "40") - assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2), "b_bucket": int64(1), "c_bucket": int64(1)}, bucketInf) + assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(0), "b_bucket": int64(0), "c_bucket": int64(0)}, tags{bucketRightTag: "0"}) + assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(0), "b_bucket": int64(0), "c_bucket": int64(0)}, tags{bucketRightTag: "10"}) + assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(2), "b_bucket": int64(0), "c_bucket": int64(0)}, tags{bucketRightTag: "20"}) + assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(2), "b_bucket": int64(0), "c_bucket": int64(0)}, tags{bucketRightTag: "30"}) + assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(2), "b_bucket": int64(1), "c_bucket": int64(1)}, tags{bucketRightTag: "40"}) + assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(2), "b_bucket": int64(1), "c_bucket": int64(1)}, tags{bucketRightTag: bucketPosInf}) } // TestWrongBucketsOrder tests the calling panic with incorrect order of buckets @@ -191,35 +252,42 @@ func TestWrongBucketsOrder(t *testing.T) { var cfg []config cfg = append(cfg, config{Metric: "first_metric_name", Buckets: []float64{0.0, 90.0, 20.0, 30.0, 40.0}}) - histogram := NewTestHistogram(cfg, false) + histogram := NewTestHistogram(cfg, false, true) histogram.Add(firstMetric2) } // assertContainsTaggedField is help functions to test histogram data -func assertContainsTaggedField(t *testing.T, acc *testutil.Accumulator, metricName string, fields map[string]interface{}, le string) { +func assertContainsTaggedField(t *testing.T, acc *testutil.Accumulator, metricName string, fields map[string]interface{}, tags map[string]string) { acc.Lock() defer acc.Unlock() for _, checkedMetric := range acc.Metrics { - // check metric name + // filter by metric name if checkedMetric.Measurement != metricName { continue } - // check "le" tag - if checkedMetric.Tags[bucketTag] != le { - continue - } - - // check fields - isFieldsIdentical := true - for field := range fields { - if _, ok := checkedMetric.Fields[field]; !ok { - isFieldsIdentical = false + // filter by tags + isTagsIdentical := true + for tag := range tags { + if val, ok := checkedMetric.Tags[tag]; !ok || val != tags[tag] { + isTagsIdentical = false break } } - if !isFieldsIdentical { + if !isTagsIdentical { + continue + } + + // filter by field keys + isFieldKeysIdentical := true + for field := range fields { + if _, ok := checkedMetric.Fields[field]; !ok { + isFieldKeysIdentical = false + break + } + } + if !isFieldKeysIdentical { continue } @@ -228,8 +296,8 @@ func assertContainsTaggedField(t *testing.T, acc *testutil.Accumulator, metricNa return } - assert.Fail(t, fmt.Sprintf("incorrect fields %v of metric %s", fields, metricName)) + assert.Fail(t, fmt.Sprintf("incorrect fields %v of metric %s", checkedMetric.Fields, metricName)) } - assert.Fail(t, fmt.Sprintf("unknown measurement '%s' with tags: %v, fields: %v", metricName, map[string]string{"le": le}, fields)) + assert.Fail(t, fmt.Sprintf("unknown measurement '%s' with tags: %v, fields: %v", metricName, tags, fields)) } From 69554cd92ecc1c4788a5ea8a9a1143809eec2ccf Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 2 Mar 2020 12:04:32 -0800 Subject: [PATCH 1570/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 24094dec2..aa8990767 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -53,6 +53,7 @@ - [#7065](https://github.com/influxdata/telegraf/pull/7065): Add additional tags and fields to apcupsd. - [#7084](https://github.com/influxdata/telegraf/pull/7084): Add RabbitMQ slave_nodes and synchronized_slave_nodes metrics. - [#7089](https://github.com/influxdata/telegraf/pull/7089): Allow globs in FPM unix socket paths. +- [#7071](https://github.com/influxdata/telegraf/pull/7071): Add non-cumulative histogram to histogram aggregator. #### Bugfixes From dd1ace73b0cedc1b0bd68f515d23eadca0dfc51b Mon Sep 17 00:00:00 2001 From: mg03 Date: Mon, 2 Mar 2020 18:51:31 -0800 Subject: [PATCH 1571/1815] Add label and field selectors to prometheus input k8s discovery (#6969) --- go.mod | 2 +- go.sum | 1 + plugins/inputs/prometheus/README.md | 5 ++ plugins/inputs/prometheus/kubernetes.go | 20 +++++++- plugins/inputs/prometheus/kubernetes_test.go | 49 +++++++++++++++++++- plugins/inputs/prometheus/prometheus.go | 28 +++++++++++ 6 files changed, 102 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 84b24db20..93d92db27 100644 --- a/go.mod +++ b/go.mod @@ -134,5 +134,5 @@ require ( gopkg.in/olivere/elastic.v5 v5.0.70 gopkg.in/yaml.v2 v2.2.4 gotest.tools v2.2.0+incompatible // indirect - k8s.io/apimachinery v0.17.1 // indirect + k8s.io/apimachinery v0.17.1 ) diff --git a/go.sum b/go.sum index f940ab12a..fc3ef5dcc 100644 --- a/go.sum +++ b/go.sum @@ -588,6 +588,7 @@ k8s.io/apimachinery v0.17.1 h1:zUjS3szTxoUjTDYNvdFkYt2uMEXLcthcbp+7uZvWhYM= k8s.io/apimachinery v0.17.1/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= diff --git a/plugins/inputs/prometheus/README.md b/plugins/inputs/prometheus/README.md index 7b2e054a2..b4e587452 100644 --- a/plugins/inputs/prometheus/README.md +++ b/plugins/inputs/prometheus/README.md @@ -36,6 +36,11 @@ in Prometheus format. ## Restricts Kubernetes monitoring to a single namespace ## ex: monitor_kubernetes_pods_namespace = "default" # monitor_kubernetes_pods_namespace = "" + # label selector to target pods which have the label + # kubernetes_label_selector = "env=dev,app=nginx" + # field selector to target pods + # eg. To scrape pods on a specific node + # kubernetes_field_selector = "spec.nodeName=$HOSTNAME" ## Use bearer token for authorization. ('bearer_token' takes priority) # bearer_token = "/path/to/bearer/token" diff --git a/plugins/inputs/prometheus/kubernetes.go b/plugins/inputs/prometheus/kubernetes.go index 617509384..16f69cbd1 100644 --- a/plugins/inputs/prometheus/kubernetes.go +++ b/plugins/inputs/prometheus/kubernetes.go @@ -82,8 +82,11 @@ func (p *Prometheus) start(ctx context.Context) error { // pod, causing errors in the logs. This is only true if the pod going offline is not // directed to do so by K8s. func (p *Prometheus) watch(ctx context.Context, client *k8s.Client) error { + + selectors := podSelector(p) + pod := &corev1.Pod{} - watcher, err := client.Watch(ctx, p.PodNamespace, &corev1.Pod{}) + watcher, err := client.Watch(ctx, p.PodNamespace, &corev1.Pod{}, selectors...) if err != nil { return err } @@ -135,6 +138,21 @@ func podReady(statuss []*corev1.ContainerStatus) bool { return true } +func podSelector(p *Prometheus) []k8s.Option { + options := []k8s.Option{} + + if len(p.KubernetesLabelSelector) > 0 { + options = append(options, k8s.QueryParam("labelSelector", p.KubernetesLabelSelector)) + } + + if len(p.KubernetesFieldSelector) > 0 { + options = append(options, k8s.QueryParam("fieldSelector", p.KubernetesFieldSelector)) + } + + return options + +} + func registerPod(pod *corev1.Pod, p *Prometheus) { if p.kubernetesPods == nil { p.kubernetesPods = map[string]URLAndAddress{} diff --git a/plugins/inputs/prometheus/kubernetes_test.go b/plugins/inputs/prometheus/kubernetes_test.go index b926f7393..8568ac946 100644 --- a/plugins/inputs/prometheus/kubernetes_test.go +++ b/plugins/inputs/prometheus/kubernetes_test.go @@ -1,6 +1,7 @@ package prometheus import ( + "github.com/ericchiang/k8s" "testing" "github.com/influxdata/telegraf/testutil" @@ -95,8 +96,54 @@ func TestDeletePods(t *testing.T) { assert.Equal(t, 0, len(prom.kubernetesPods)) } +func TestPodSelector(t *testing.T) { + + cases := []struct { + expected []k8s.Option + labelselector string + fieldselector string + }{ + { + expected: []k8s.Option{ + k8s.QueryParam("labelSelector", "key1=val1,key2=val2,key3"), + k8s.QueryParam("fieldSelector", "spec.nodeName=ip-1-2-3-4.acme.com"), + }, + labelselector: "key1=val1,key2=val2,key3", + fieldselector: "spec.nodeName=ip-1-2-3-4.acme.com", + }, + { + expected: []k8s.Option{ + k8s.QueryParam("labelSelector", "key1"), + k8s.QueryParam("fieldSelector", "spec.nodeName=ip-1-2-3-4.acme.com"), + }, + labelselector: "key1", + fieldselector: "spec.nodeName=ip-1-2-3-4.acme.com", + }, + { + expected: []k8s.Option{ + k8s.QueryParam("labelSelector", "key1"), + k8s.QueryParam("fieldSelector", "somefield"), + }, + labelselector: "key1", + fieldselector: "somefield", + }, + } + + for _, c := range cases { + prom := &Prometheus{ + Log: testutil.Logger{}, + KubernetesLabelSelector: c.labelselector, + KubernetesFieldSelector: c.fieldselector, + } + + output := podSelector(prom) + + assert.Equal(t, len(output), len(c.expected)) + } +} + func pod() *v1.Pod { - p := &v1.Pod{Metadata: &metav1.ObjectMeta{}, Status: &v1.PodStatus{}} + p := &v1.Pod{Metadata: &metav1.ObjectMeta{}, Status: &v1.PodStatus{}, Spec: &v1.PodSpec{}} p.Status.PodIP = str("127.0.0.1") p.Metadata.Name = str("myPod") p.Metadata.Namespace = str("default") diff --git a/plugins/inputs/prometheus/prometheus.go b/plugins/inputs/prometheus/prometheus.go index 1f0862760..35c2d3d2c 100644 --- a/plugins/inputs/prometheus/prometheus.go +++ b/plugins/inputs/prometheus/prometheus.go @@ -15,6 +15,8 @@ import ( "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/internal/tls" "github.com/influxdata/telegraf/plugins/inputs" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" ) const acceptHeader = `application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited;q=0.7,text/plain;version=0.0.4;q=0.3,*/*;q=0.1` @@ -29,6 +31,12 @@ type Prometheus struct { // Location of kubernetes config file KubeConfig string + // Label Selector/s for Kubernetes + KubernetesLabelSelector string `toml:"kubernetes_label_selector"` + + // Field Selector/s for Kubernetes + KubernetesFieldSelector string `toml:"kubernetes_field_selector"` + // Bearer Token authorization file path BearerToken string `toml:"bearer_token"` BearerTokenString string `toml:"bearer_token_string"` @@ -90,6 +98,11 @@ var sampleConfig = ` ## Restricts Kubernetes monitoring to a single namespace ## ex: monitor_kubernetes_pods_namespace = "default" # monitor_kubernetes_pods_namespace = "" + # label selector to target pods which have the label + # kubernetes_label_selector = "env=dev,app=nginx" + # field selector to target pods + # eg. To scrape pods on a specific node + # kubernetes_field_selector = "spec.nodeName=$HOSTNAME" ## Use bearer token for authorization. ('bearer_token' takes priority) # bearer_token = "/path/to/bearer/token" @@ -124,6 +137,21 @@ func (p *Prometheus) Init() error { if p.MetricVersion != 2 { p.Log.Warnf("Use of deprecated configuration: 'metric_version = 1'; please update to 'metric_version = 2'") } + + if len(p.KubernetesLabelSelector) > 0 { + _, err := labels.Parse(p.KubernetesLabelSelector) + if err != nil { + return fmt.Errorf("label selector validation failed %q: %v", p.KubernetesLabelSelector, err) + } + } + + if len(p.KubernetesFieldSelector) > 0 { + _, err := fields.ParseSelector(p.KubernetesFieldSelector) + if err != nil { + return fmt.Errorf("field selector validation failed %s: %v", p.KubernetesFieldSelector, err) + } + } + return nil } From f04d84994d6789b4161ed1f9791e9d96320af3b4 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 2 Mar 2020 18:52:22 -0800 Subject: [PATCH 1572/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index aa8990767..fd54b4c82 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -54,6 +54,7 @@ - [#7084](https://github.com/influxdata/telegraf/pull/7084): Add RabbitMQ slave_nodes and synchronized_slave_nodes metrics. - [#7089](https://github.com/influxdata/telegraf/pull/7089): Allow globs in FPM unix socket paths. - [#7071](https://github.com/influxdata/telegraf/pull/7071): Add non-cumulative histogram to histogram aggregator. +- [#6969](https://github.com/influxdata/telegraf/pull/6969): Add label and field selectors to prometheus input k8s discovery. #### Bugfixes From ab8438dcc62130ebff6daac49ebf3237d97348e2 Mon Sep 17 00:00:00 2001 From: josephpeacock <51184065+josephpeacock@users.noreply.github.com> Date: Tue, 3 Mar 2020 14:47:33 -0800 Subject: [PATCH 1573/1815] Add threaded parsing in statsd input plugin (#6922) --- plugins/inputs/statsd/statsd.go | 16 +++-- plugins/inputs/statsd/statsd_test.go | 89 ++++++++++++++++++++++++---- 2 files changed, 88 insertions(+), 17 deletions(-) diff --git a/plugins/inputs/statsd/statsd.go b/plugins/inputs/statsd/statsd.go index 32b12a7e9..9c5780d00 100644 --- a/plugins/inputs/statsd/statsd.go +++ b/plugins/inputs/statsd/statsd.go @@ -31,6 +31,8 @@ const ( defaultSeparator = "_" defaultAllowPendingMessage = 10000 MaxTCPConnections = 250 + + parserGoRoutines = 5 ) // Statsd allows the importing of statsd and dogstatsd data. @@ -398,12 +400,14 @@ func (s *Statsd) Start(ac telegraf.Accumulator) error { }() } - // Start the line parser - s.wg.Add(1) - go func() { - defer s.wg.Done() - s.parser() - }() + for i := 1; i <= parserGoRoutines; i++ { + // Start the line parser + s.wg.Add(1) + go func() { + defer s.wg.Done() + s.parser() + }() + } s.Log.Infof("Started the statsd service on %q", s.ServiceAddress) return nil } diff --git a/plugins/inputs/statsd/statsd_test.go b/plugins/inputs/statsd/statsd_test.go index 1215eeb2d..f3daa117b 100644 --- a/plugins/inputs/statsd/statsd_test.go +++ b/plugins/inputs/statsd/statsd_test.go @@ -2,19 +2,21 @@ package statsd import ( "fmt" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "net" + "sync" "testing" "time" - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) const ( - testMsg = "test.tcp.msg:100|c" + testMsg = "test.tcp.msg:100|c" + producerThreads = 10 ) func NewTestStatsd() *Statsd { @@ -137,15 +139,30 @@ func BenchmarkUDP(b *testing.B) { if err != nil { panic(err) } - for i := 0; i < 250000; i++ { - fmt.Fprintf(conn, testMsg) + + var wg sync.WaitGroup + for i := 1; i <= producerThreads; i++ { + wg.Add(1) + go sendRequests(conn, &wg) } + wg.Wait() + // wait for 250,000 metrics to get added to accumulator - time.Sleep(time.Millisecond) + for len(listener.in) > 0 { + fmt.Printf("Left in buffer: %v \n", len(listener.in)) + time.Sleep(time.Millisecond) + } listener.Stop() } } +func sendRequests(conn net.Conn, wg *sync.WaitGroup) { + defer wg.Done() + for i := 0; i < 25000; i++ { + fmt.Fprintf(conn, testMsg) + } +} + // benchmark how long it takes to accept & process 100,000 metrics: func BenchmarkTCP(b *testing.B) { listener := Statsd{ @@ -169,11 +186,16 @@ func BenchmarkTCP(b *testing.B) { if err != nil { panic(err) } - for i := 0; i < 250000; i++ { - fmt.Fprintf(conn, testMsg) + var wg sync.WaitGroup + for i := 1; i <= producerThreads; i++ { + wg.Add(1) + go sendRequests(conn, &wg) } + wg.Wait() // wait for 250,000 metrics to get added to accumulator - time.Sleep(time.Millisecond) + for len(listener.in) > 0 { + time.Sleep(time.Millisecond) + } listener.Stop() } } @@ -1678,3 +1700,48 @@ func TestTCP(t *testing.T) { testutil.IgnoreTime(), ) } + +func TestUdp(t *testing.T) { + statsd := Statsd{ + Log: testutil.Logger{}, + Protocol: "udp", + ServiceAddress: "localhost:8125", + AllowedPendingMessages: 250000, + } + var acc testutil.Accumulator + require.NoError(t, statsd.Start(&acc)) + defer statsd.Stop() + + conn, err := net.Dial("udp", "127.0.0.1:8125") + _, err = conn.Write([]byte("cpu.time_idle:42|c\n")) + require.NoError(t, err) + err = conn.Close() + require.NoError(t, err) + + for { + err = statsd.Gather(&acc) + require.NoError(t, err) + + if len(acc.Metrics) > 0 { + break + } + } + + testutil.RequireMetricsEqual(t, + []telegraf.Metric{ + testutil.MustMetric( + "cpu_time_idle", + map[string]string{ + "metric_type": "counter", + }, + map[string]interface{}{ + "value": 42, + }, + time.Now(), + telegraf.Counter, + ), + }, + acc.GetTelegrafMetrics(), + testutil.IgnoreTime(), + ) +} From a0276385b1d7bf18f7cb711d4f904d8f1cc71272 Mon Sep 17 00:00:00 2001 From: reimda Date: Wed, 4 Mar 2020 11:13:44 -0700 Subject: [PATCH 1574/1815] Refactor InfluxDB listener (#6974) Use streaming parser in InfluxDB listener --- internal/http.go | 17 +- metric/builder.go | 55 - metric/metric.go | 14 +- plugins/inputs/influxdb_listener/README.md | 8 +- .../inputs/influxdb_listener/bufferpool.go | 43 - .../inputs/influxdb_listener/http_listener.go | 464 - .../influxdb_listener/http_listener_test.go | 484 - .../influxdb_listener/influxdb_listener.go | 406 + .../influxdb_listener_benchmark_test.go | 108 + .../influxdb_listener_test.go | 656 +- plugins/outputs/health/health.go | 5 +- .../prometheus_client/prometheus_client.go | 6 +- plugins/parsers/csv/parser.go | 4 +- plugins/parsers/dropwizard/parser.go | 6 +- plugins/parsers/dropwizard/parser_test.go | 10 +- plugins/parsers/influx/handler.go | 62 +- plugins/parsers/influx/machine.go | 34919 ++++++++-------- plugins/parsers/influx/machine.go.rl | 160 +- plugins/parsers/influx/machine_test.go | 508 +- plugins/parsers/influx/parser.go | 104 +- plugins/parsers/influx/parser_test.go | 162 +- 21 files changed, 19898 insertions(+), 18303 deletions(-) delete mode 100644 metric/builder.go delete mode 100644 plugins/inputs/influxdb_listener/bufferpool.go delete mode 100644 plugins/inputs/influxdb_listener/http_listener.go delete mode 100644 plugins/inputs/influxdb_listener/http_listener_test.go create mode 100644 plugins/inputs/influxdb_listener/influxdb_listener.go create mode 100644 plugins/inputs/influxdb_listener/influxdb_listener_benchmark_test.go diff --git a/internal/http.go b/internal/http.go index 7ffd9bf2b..a44506719 100644 --- a/internal/http.go +++ b/internal/http.go @@ -6,27 +6,27 @@ import ( "net/http" ) -// ErrorFunc is a callback for writing an error response. -type ErrorFunc func(rw http.ResponseWriter, code int) +type BasicAuthErrorFunc func(rw http.ResponseWriter) // AuthHandler returns a http handler that requires HTTP basic auth // credentials to match the given username and password. -func AuthHandler(username, password string, onError ErrorFunc) func(h http.Handler) http.Handler { +func AuthHandler(username, password, realm string, onError BasicAuthErrorFunc) func(h http.Handler) http.Handler { return func(h http.Handler) http.Handler { return &basicAuthHandler{ username: username, password: password, + realm: realm, onError: onError, next: h, } } - } type basicAuthHandler struct { username string password string - onError ErrorFunc + realm string + onError BasicAuthErrorFunc next http.Handler } @@ -37,7 +37,9 @@ func (h *basicAuthHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) subtle.ConstantTimeCompare([]byte(reqUsername), []byte(h.username)) != 1 || subtle.ConstantTimeCompare([]byte(reqPassword), []byte(h.password)) != 1 { - h.onError(rw, http.StatusUnauthorized) + rw.Header().Set("WWW-Authenticate", "Basic realm=\""+h.realm+"\"") + h.onError(rw) + http.Error(rw, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized) return } } @@ -45,6 +47,9 @@ func (h *basicAuthHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) h.next.ServeHTTP(rw, req) } +// ErrorFunc is a callback for writing an error response. +type ErrorFunc func(rw http.ResponseWriter, code int) + // IPRangeHandler returns a http handler that requires the remote address to be // in the specified network. func IPRangeHandler(network []*net.IPNet, onError ErrorFunc) func(h http.Handler) http.Handler { diff --git a/metric/builder.go b/metric/builder.go deleted file mode 100644 index 9a331b9a4..000000000 --- a/metric/builder.go +++ /dev/null @@ -1,55 +0,0 @@ -package metric - -import ( - "time" - - "github.com/influxdata/telegraf" -) - -type TimeFunc func() time.Time - -type Builder struct { - TimeFunc - TimePrecision time.Duration - - *metric -} - -func NewBuilder() *Builder { - b := &Builder{ - TimeFunc: time.Now, - TimePrecision: 1 * time.Nanosecond, - } - b.Reset() - return b -} - -func (b *Builder) SetName(name string) { - b.name = name -} - -func (b *Builder) AddTag(key string, value string) { - b.metric.AddTag(key, value) -} - -func (b *Builder) AddField(key string, value interface{}) { - b.metric.AddField(key, value) -} - -func (b *Builder) SetTime(tm time.Time) { - b.tm = tm -} - -func (b *Builder) Reset() { - b.metric = &metric{ - tp: telegraf.Untyped, - } -} - -func (b *Builder) Metric() (telegraf.Metric, error) { - if b.tm.IsZero() { - b.tm = b.TimeFunc().Truncate(b.TimePrecision) - } - - return b.metric, nil -} diff --git a/metric/metric.go b/metric/metric.go index 4f1418b35..517645a83 100644 --- a/metric/metric.go +++ b/metric/metric.go @@ -50,13 +50,15 @@ func New( sort.Slice(m.tags, func(i, j int) bool { return m.tags[i].Key < m.tags[j].Key }) } - m.fields = make([]*telegraf.Field, 0, len(fields)) - for k, v := range fields { - v := convertField(v) - if v == nil { - continue + if len(fields) > 0 { + m.fields = make([]*telegraf.Field, 0, len(fields)) + for k, v := range fields { + v := convertField(v) + if v == nil { + continue + } + m.AddField(k, v) } - m.AddField(k, v) } return m, nil diff --git a/plugins/inputs/influxdb_listener/README.md b/plugins/inputs/influxdb_listener/README.md index 5efa6baf1..b93573bf4 100644 --- a/plugins/inputs/influxdb_listener/README.md +++ b/plugins/inputs/influxdb_listener/README.md @@ -30,13 +30,13 @@ submits data to InfluxDB determines the destination database. ## maximum duration before timing out write of the response write_timeout = "10s" - ## Maximum allowed http request body size in bytes. - ## 0 means to use the default of 536,870,912 bytes (500 mebibytes) + ## Maximum allowed HTTP request body size in bytes. + ## 0 means to use the default of 32MiB. max_body_size = 0 ## Maximum line size allowed to be sent in bytes. - ## 0 means to use the default of 65536 bytes (64 kibibytes) - max_line_size = 0 + ## deprecated in 1.14; parser now handles lines of unlimited length and option is ignored + # max_line_size = 0 ## Set one or more allowed client CA certificate file names to ## enable mutually authenticated TLS connections diff --git a/plugins/inputs/influxdb_listener/bufferpool.go b/plugins/inputs/influxdb_listener/bufferpool.go deleted file mode 100644 index 00a93652d..000000000 --- a/plugins/inputs/influxdb_listener/bufferpool.go +++ /dev/null @@ -1,43 +0,0 @@ -package http_listener - -import ( - "sync/atomic" -) - -type pool struct { - buffers chan []byte - size int - - created int64 -} - -// NewPool returns a new pool object. -// n is the number of buffers -// bufSize is the size (in bytes) of each buffer -func NewPool(n, bufSize int) *pool { - return &pool{ - buffers: make(chan []byte, n), - size: bufSize, - } -} - -func (p *pool) get() []byte { - select { - case b := <-p.buffers: - return b - default: - atomic.AddInt64(&p.created, 1) - return make([]byte, p.size) - } -} - -func (p *pool) put(b []byte) { - select { - case p.buffers <- b: - default: - } -} - -func (p *pool) ncreated() int64 { - return atomic.LoadInt64(&p.created) -} diff --git a/plugins/inputs/influxdb_listener/http_listener.go b/plugins/inputs/influxdb_listener/http_listener.go deleted file mode 100644 index aeb2b589f..000000000 --- a/plugins/inputs/influxdb_listener/http_listener.go +++ /dev/null @@ -1,464 +0,0 @@ -package http_listener - -import ( - "bytes" - "compress/gzip" - "crypto/subtle" - "crypto/tls" - "encoding/json" - "fmt" - "io" - "net" - "net/http" - "sync" - "time" - - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" - tlsint "github.com/influxdata/telegraf/internal/tls" - "github.com/influxdata/telegraf/plugins/inputs" - "github.com/influxdata/telegraf/plugins/parsers/influx" - "github.com/influxdata/telegraf/selfstat" -) - -const ( - // DEFAULT_MAX_BODY_SIZE is the default maximum request body size, in bytes. - // if the request body is over this size, we will return an HTTP 413 error. - // 500 MB - DEFAULT_MAX_BODY_SIZE = 500 * 1024 * 1024 - - // MAX_LINE_SIZE is the maximum size, in bytes, that can be allocated for - // a single InfluxDB point. - // 64 KB - DEFAULT_MAX_LINE_SIZE = 64 * 1024 -) - -type TimeFunc func() time.Time - -type HTTPListener struct { - ServiceAddress string `toml:"service_address"` - // Port gets pulled out of ServiceAddress - Port int - tlsint.ServerConfig - - ReadTimeout internal.Duration `toml:"read_timeout"` - WriteTimeout internal.Duration `toml:"write_timeout"` - MaxBodySize internal.Size `toml:"max_body_size"` - MaxLineSize internal.Size `toml:"max_line_size"` - BasicUsername string `toml:"basic_username"` - BasicPassword string `toml:"basic_password"` - DatabaseTag string `toml:"database_tag"` - - TimeFunc - - mu sync.Mutex - wg sync.WaitGroup - - listener net.Listener - - handler *influx.MetricHandler - parser *influx.Parser - acc telegraf.Accumulator - pool *pool - - BytesRecv selfstat.Stat - RequestsServed selfstat.Stat - WritesServed selfstat.Stat - QueriesServed selfstat.Stat - PingsServed selfstat.Stat - RequestsRecv selfstat.Stat - WritesRecv selfstat.Stat - QueriesRecv selfstat.Stat - PingsRecv selfstat.Stat - NotFoundsServed selfstat.Stat - BuffersCreated selfstat.Stat - AuthFailures selfstat.Stat - - Log telegraf.Logger - - longLines selfstat.Stat -} - -const sampleConfig = ` - ## Address and port to host HTTP listener on - service_address = ":8186" - - ## maximum duration before timing out read of the request - read_timeout = "10s" - ## maximum duration before timing out write of the response - write_timeout = "10s" - - ## Maximum allowed http request body size in bytes. - ## 0 means to use the default of 524,288,000 bytes (500 mebibytes) - max_body_size = "500MiB" - - ## Maximum line size allowed to be sent in bytes. - ## 0 means to use the default of 65536 bytes (64 kibibytes) - max_line_size = "64KiB" - - - ## Optional tag name used to store the database. - ## If the write has a database in the query string then it will be kept in this tag name. - ## This tag can be used in downstream outputs. - ## The default value of nothing means it will be off and the database will not be recorded. - # database_tag = "" - - ## Set one or more allowed client CA certificate file names to - ## enable mutually authenticated TLS connections - tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] - - ## Add service certificate and key - tls_cert = "/etc/telegraf/cert.pem" - tls_key = "/etc/telegraf/key.pem" - - ## Optional username and password to accept for HTTP basic authentication. - ## You probably want to make sure you have TLS configured above for this. - # basic_username = "foobar" - # basic_password = "barfoo" -` - -func (h *HTTPListener) SampleConfig() string { - return sampleConfig -} - -func (h *HTTPListener) Description() string { - return "Influx HTTP write listener" -} - -func (h *HTTPListener) Gather(_ telegraf.Accumulator) error { - h.BuffersCreated.Set(h.pool.ncreated()) - return nil -} - -// Start starts the http listener service. -func (h *HTTPListener) Start(acc telegraf.Accumulator) error { - h.mu.Lock() - defer h.mu.Unlock() - - tags := map[string]string{ - "address": h.ServiceAddress, - } - h.BytesRecv = selfstat.Register("http_listener", "bytes_received", tags) - h.RequestsServed = selfstat.Register("http_listener", "requests_served", tags) - h.WritesServed = selfstat.Register("http_listener", "writes_served", tags) - h.QueriesServed = selfstat.Register("http_listener", "queries_served", tags) - h.PingsServed = selfstat.Register("http_listener", "pings_served", tags) - h.RequestsRecv = selfstat.Register("http_listener", "requests_received", tags) - h.WritesRecv = selfstat.Register("http_listener", "writes_received", tags) - h.QueriesRecv = selfstat.Register("http_listener", "queries_received", tags) - h.PingsRecv = selfstat.Register("http_listener", "pings_received", tags) - h.NotFoundsServed = selfstat.Register("http_listener", "not_founds_served", tags) - h.BuffersCreated = selfstat.Register("http_listener", "buffers_created", tags) - h.AuthFailures = selfstat.Register("http_listener", "auth_failures", tags) - h.longLines = selfstat.Register("http_listener", "long_lines", tags) - - if h.MaxBodySize.Size == 0 { - h.MaxBodySize.Size = DEFAULT_MAX_BODY_SIZE - } - if h.MaxLineSize.Size == 0 { - h.MaxLineSize.Size = DEFAULT_MAX_LINE_SIZE - } - - if h.ReadTimeout.Duration < time.Second { - h.ReadTimeout.Duration = time.Second * 10 - } - if h.WriteTimeout.Duration < time.Second { - h.WriteTimeout.Duration = time.Second * 10 - } - - h.acc = acc - h.pool = NewPool(200, int(h.MaxLineSize.Size)) - - tlsConf, err := h.ServerConfig.TLSConfig() - if err != nil { - return err - } - - server := &http.Server{ - Addr: h.ServiceAddress, - Handler: h, - ReadTimeout: h.ReadTimeout.Duration, - WriteTimeout: h.WriteTimeout.Duration, - TLSConfig: tlsConf, - } - - var listener net.Listener - if tlsConf != nil { - listener, err = tls.Listen("tcp", h.ServiceAddress, tlsConf) - } else { - listener, err = net.Listen("tcp", h.ServiceAddress) - } - if err != nil { - return err - } - h.listener = listener - h.Port = listener.Addr().(*net.TCPAddr).Port - - h.handler = influx.NewMetricHandler() - h.parser = influx.NewParser(h.handler) - - h.wg.Add(1) - go func() { - defer h.wg.Done() - server.Serve(h.listener) - }() - - h.Log.Infof("Started HTTP listener service on %s", h.ServiceAddress) - - return nil -} - -// Stop cleans up all resources -func (h *HTTPListener) Stop() { - h.mu.Lock() - defer h.mu.Unlock() - - h.listener.Close() - h.wg.Wait() - - h.Log.Infof("Stopped HTTP listener service on %s", h.ServiceAddress) -} - -func (h *HTTPListener) ServeHTTP(res http.ResponseWriter, req *http.Request) { - h.RequestsRecv.Incr(1) - defer h.RequestsServed.Incr(1) - switch req.URL.Path { - case "/write": - h.WritesRecv.Incr(1) - defer h.WritesServed.Incr(1) - h.AuthenticateIfSet(h.serveWrite, res, req) - case "/query": - h.QueriesRecv.Incr(1) - defer h.QueriesServed.Incr(1) - // Deliver a dummy response to the query endpoint, as some InfluxDB - // clients test endpoint availability with a query - h.AuthenticateIfSet(func(res http.ResponseWriter, req *http.Request) { - res.Header().Set("Content-Type", "application/json") - res.Header().Set("X-Influxdb-Version", "1.0") - res.WriteHeader(http.StatusOK) - res.Write([]byte("{\"results\":[]}")) - }, res, req) - case "/ping": - h.PingsRecv.Incr(1) - defer h.PingsServed.Incr(1) - verbose := req.URL.Query().Get("verbose") - - // respond to ping requests - if verbose != "" && verbose != "0" && verbose != "false" { - res.WriteHeader(http.StatusOK) - b, _ := json.Marshal(map[string]string{"version": "1.0"}) // based on header set above - res.Write(b) - } else { - res.WriteHeader(http.StatusNoContent) - } - default: - defer h.NotFoundsServed.Incr(1) - // Don't know how to respond to calls to other endpoints - h.AuthenticateIfSet(http.NotFound, res, req) - } -} - -func (h *HTTPListener) serveWrite(res http.ResponseWriter, req *http.Request) { - // Check that the content length is not too large for us to handle. - if req.ContentLength > h.MaxBodySize.Size { - tooLarge(res) - return - } - now := h.TimeFunc() - - precision := req.URL.Query().Get("precision") - db := req.URL.Query().Get("db") - - // Handle gzip request bodies - body := req.Body - if req.Header.Get("Content-Encoding") == "gzip" { - var err error - body, err = gzip.NewReader(req.Body) - if err != nil { - h.Log.Debug(err.Error()) - badRequest(res, err.Error()) - return - } - defer body.Close() - } - body = http.MaxBytesReader(res, body, h.MaxBodySize.Size) - - var return400 bool - var hangingBytes bool - buf := h.pool.get() - defer h.pool.put(buf) - bufStart := 0 - for { - n, err := io.ReadFull(body, buf[bufStart:]) - if err != nil && err != io.ErrUnexpectedEOF && err != io.EOF { - h.Log.Debug(err.Error()) - // problem reading the request body - badRequest(res, err.Error()) - return - } - h.BytesRecv.Incr(int64(n)) - - if err == io.EOF { - if return400 { - badRequest(res, "") - } else { - res.WriteHeader(http.StatusNoContent) - } - return - } - - if hangingBytes { - i := bytes.IndexByte(buf, '\n') - if i == -1 { - // still didn't find a newline, keep scanning - continue - } - // rotate the bit remaining after the first newline to the front of the buffer - i++ // start copying after the newline - bufStart = len(buf) - i - if bufStart > 0 { - copy(buf, buf[i:]) - } - hangingBytes = false - continue - } - - if err == io.ErrUnexpectedEOF { - // finished reading the request body - err = h.parse(buf[:n+bufStart], now, precision, db) - if err != nil { - h.Log.Debugf("%s: %s", err.Error(), bufStart+n) - return400 = true - } - if return400 { - if err != nil { - badRequest(res, err.Error()) - } else { - badRequest(res, "") - } - } else { - res.WriteHeader(http.StatusNoContent) - } - return - } - - // if we got down here it means that we filled our buffer, and there - // are still bytes remaining to be read. So we will parse up until the - // final newline, then push the rest of the bytes into the next buffer. - i := bytes.LastIndexByte(buf, '\n') - if i == -1 { - h.longLines.Incr(1) - // drop any line longer than the max buffer size - h.Log.Debugf("Http_listener received a single line longer than the maximum of %d bytes", - len(buf)) - hangingBytes = true - return400 = true - bufStart = 0 - continue - } - if err := h.parse(buf[:i+1], now, precision, db); err != nil { - h.Log.Debug(err.Error()) - return400 = true - } - // rotate the bit remaining after the last newline to the front of the buffer - i++ // start copying after the newline - bufStart = len(buf) - i - if bufStart > 0 { - copy(buf, buf[i:]) - } - } -} - -func (h *HTTPListener) parse(b []byte, t time.Time, precision, db string) error { - h.mu.Lock() - defer h.mu.Unlock() - - h.handler.SetTimePrecision(getPrecisionMultiplier(precision)) - h.handler.SetTimeFunc(func() time.Time { return t }) - metrics, err := h.parser.Parse(b) - if err != nil { - return fmt.Errorf("unable to parse: %s", err.Error()) - } - - for _, m := range metrics { - // Do we need to keep the database name in the query string. - // If a tag has been supplied to put the db in and we actually got a db query, - // then we write it in. This overwrites the database tag if one was sent. - // This makes it behave like the influx endpoint. - if h.DatabaseTag != "" && db != "" { - m.AddTag(h.DatabaseTag, db) - } - h.acc.AddFields(m.Name(), m.Fields(), m.Tags(), m.Time()) - } - - return nil -} - -func tooLarge(res http.ResponseWriter) { - res.Header().Set("Content-Type", "application/json") - res.Header().Set("X-Influxdb-Version", "1.0") - res.Header().Set("X-Influxdb-Error", "http: request body too large") - res.WriteHeader(http.StatusRequestEntityTooLarge) - res.Write([]byte(`{"error":"http: request body too large"}`)) -} - -func badRequest(res http.ResponseWriter, errString string) { - res.Header().Set("Content-Type", "application/json") - res.Header().Set("X-Influxdb-Version", "1.0") - if errString == "" { - errString = "http: bad request" - } - res.Header().Set("X-Influxdb-Error", errString) - res.WriteHeader(http.StatusBadRequest) - res.Write([]byte(fmt.Sprintf(`{"error":%q}`, errString))) -} - -func (h *HTTPListener) AuthenticateIfSet(handler http.HandlerFunc, res http.ResponseWriter, req *http.Request) { - if h.BasicUsername != "" && h.BasicPassword != "" { - reqUsername, reqPassword, ok := req.BasicAuth() - if !ok || - subtle.ConstantTimeCompare([]byte(reqUsername), []byte(h.BasicUsername)) != 1 || - subtle.ConstantTimeCompare([]byte(reqPassword), []byte(h.BasicPassword)) != 1 { - - h.AuthFailures.Incr(1) - http.Error(res, "Unauthorized.", http.StatusUnauthorized) - return - } - handler(res, req) - } else { - handler(res, req) - } -} - -func getPrecisionMultiplier(precision string) time.Duration { - d := time.Nanosecond - switch precision { - case "u": - d = time.Microsecond - case "ms": - d = time.Millisecond - case "s": - d = time.Second - case "m": - d = time.Minute - case "h": - d = time.Hour - } - return d -} - -func init() { - // http_listener deprecated in 1.9 - inputs.Add("http_listener", func() telegraf.Input { - return &HTTPListener{ - ServiceAddress: ":8186", - TimeFunc: time.Now, - } - }) - inputs.Add("influxdb_listener", func() telegraf.Input { - return &HTTPListener{ - ServiceAddress: ":8186", - TimeFunc: time.Now, - } - }) -} diff --git a/plugins/inputs/influxdb_listener/http_listener_test.go b/plugins/inputs/influxdb_listener/http_listener_test.go deleted file mode 100644 index 771bb5faf..000000000 --- a/plugins/inputs/influxdb_listener/http_listener_test.go +++ /dev/null @@ -1,484 +0,0 @@ -package http_listener - -import ( - "bytes" - "crypto/tls" - "crypto/x509" - "io/ioutil" - "net/http" - "net/url" - "runtime" - "strconv" - "sync" - "testing" - "time" - - "github.com/influxdata/telegraf/internal" - "github.com/influxdata/telegraf/testutil" - - "github.com/stretchr/testify/require" -) - -const ( - testMsg = "cpu_load_short,host=server01 value=12.0 1422568543702900257\n" - - testMsgNoNewline = "cpu_load_short,host=server01 value=12.0 1422568543702900257" - - testMsgs = `cpu_load_short,host=server02 value=12.0 1422568543702900257 -cpu_load_short,host=server03 value=12.0 1422568543702900257 -cpu_load_short,host=server04 value=12.0 1422568543702900257 -cpu_load_short,host=server05 value=12.0 1422568543702900257 -cpu_load_short,host=server06 value=12.0 1422568543702900257 -` - badMsg = "blahblahblah: 42\n" - - emptyMsg = "" - - basicUsername = "test-username-please-ignore" - basicPassword = "super-secure-password!" -) - -var ( - pki = testutil.NewPKI("../../../testutil/pki") -) - -func newTestHTTPListener() *HTTPListener { - listener := &HTTPListener{ - Log: testutil.Logger{}, - ServiceAddress: "localhost:0", - TimeFunc: time.Now, - } - return listener -} - -func newTestHTTPAuthListener() *HTTPListener { - listener := newTestHTTPListener() - listener.BasicUsername = basicUsername - listener.BasicPassword = basicPassword - return listener -} - -func newTestHTTPSListener() *HTTPListener { - listener := &HTTPListener{ - Log: testutil.Logger{}, - ServiceAddress: "localhost:0", - ServerConfig: *pki.TLSServerConfig(), - TimeFunc: time.Now, - } - - return listener -} - -func getHTTPSClient() *http.Client { - tlsConfig, err := pki.TLSClientConfig().TLSConfig() - if err != nil { - panic(err) - } - return &http.Client{ - Transport: &http.Transport{ - TLSClientConfig: tlsConfig, - }, - } -} - -func createURL(listener *HTTPListener, scheme string, path string, rawquery string) string { - u := url.URL{ - Scheme: scheme, - Host: "localhost:" + strconv.Itoa(listener.Port), - Path: path, - RawQuery: rawquery, - } - return u.String() -} - -func TestWriteHTTPSNoClientAuth(t *testing.T) { - listener := newTestHTTPSListener() - listener.TLSAllowedCACerts = nil - - acc := &testutil.Accumulator{} - require.NoError(t, listener.Start(acc)) - defer listener.Stop() - - cas := x509.NewCertPool() - cas.AppendCertsFromPEM([]byte(pki.ReadServerCert())) - noClientAuthClient := &http.Client{ - Transport: &http.Transport{ - TLSClientConfig: &tls.Config{ - RootCAs: cas, - }, - }, - } - - // post single message to listener - resp, err := noClientAuthClient.Post(createURL(listener, "https", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsg))) - require.NoError(t, err) - resp.Body.Close() - require.EqualValues(t, 204, resp.StatusCode) -} - -func TestWriteHTTPSWithClientAuth(t *testing.T) { - listener := newTestHTTPSListener() - - acc := &testutil.Accumulator{} - require.NoError(t, listener.Start(acc)) - defer listener.Stop() - - // post single message to listener - resp, err := getHTTPSClient().Post(createURL(listener, "https", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsg))) - require.NoError(t, err) - resp.Body.Close() - require.EqualValues(t, 204, resp.StatusCode) -} - -func TestWriteHTTPBasicAuth(t *testing.T) { - listener := newTestHTTPAuthListener() - - acc := &testutil.Accumulator{} - require.NoError(t, listener.Start(acc)) - defer listener.Stop() - - client := &http.Client{} - - req, err := http.NewRequest("POST", createURL(listener, "http", "/write", "db=mydb"), bytes.NewBuffer([]byte(testMsg))) - require.NoError(t, err) - req.SetBasicAuth(basicUsername, basicPassword) - resp, err := client.Do(req) - require.NoError(t, err) - resp.Body.Close() - require.EqualValues(t, http.StatusNoContent, resp.StatusCode) -} - -func TestWriteHTTPKeepDatabase(t *testing.T) { - testMsgWithDB := "cpu_load_short,host=server01,database=wrongdb value=12.0 1422568543702900257\n" - - listener := newTestHTTPListener() - listener.DatabaseTag = "database" - - acc := &testutil.Accumulator{} - require.NoError(t, listener.Start(acc)) - defer listener.Stop() - - // post single message to listener - resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsg))) - require.NoError(t, err) - resp.Body.Close() - require.EqualValues(t, 204, resp.StatusCode) - - acc.Wait(1) - acc.AssertContainsTaggedFields(t, "cpu_load_short", - map[string]interface{}{"value": float64(12)}, - map[string]string{"host": "server01", "database": "mydb"}, - ) - - // post single message to listener with a database tag in it already. It should be clobbered. - resp, err = http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsgWithDB))) - require.NoError(t, err) - resp.Body.Close() - require.EqualValues(t, 204, resp.StatusCode) - - acc.Wait(1) - acc.AssertContainsTaggedFields(t, "cpu_load_short", - map[string]interface{}{"value": float64(12)}, - map[string]string{"host": "server01", "database": "mydb"}, - ) - - // post multiple message to listener - resp, err = http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsgs))) - require.NoError(t, err) - resp.Body.Close() - require.EqualValues(t, 204, resp.StatusCode) - - acc.Wait(2) - hostTags := []string{"server02", "server03", - "server04", "server05", "server06"} - for _, hostTag := range hostTags { - acc.AssertContainsTaggedFields(t, "cpu_load_short", - map[string]interface{}{"value": float64(12)}, - map[string]string{"host": hostTag, "database": "mydb"}, - ) - } -} - -// http listener should add a newline at the end of the buffer if it's not there -func TestWriteHTTPNoNewline(t *testing.T) { - listener := newTestHTTPListener() - - acc := &testutil.Accumulator{} - require.NoError(t, listener.Start(acc)) - defer listener.Stop() - - // post single message to listener - resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsgNoNewline))) - require.NoError(t, err) - resp.Body.Close() - require.EqualValues(t, 204, resp.StatusCode) - - acc.Wait(1) - acc.AssertContainsTaggedFields(t, "cpu_load_short", - map[string]interface{}{"value": float64(12)}, - map[string]string{"host": "server01"}, - ) -} - -func TestWriteHTTPMaxLineSizeIncrease(t *testing.T) { - listener := &HTTPListener{ - Log: testutil.Logger{}, - ServiceAddress: "localhost:0", - MaxLineSize: internal.Size{Size: 128 * 1000}, - TimeFunc: time.Now, - } - - acc := &testutil.Accumulator{} - require.NoError(t, listener.Start(acc)) - defer listener.Stop() - - // Post a gigantic metric to the listener and verify that it writes OK this time: - resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(hugeMetric))) - require.NoError(t, err) - resp.Body.Close() - require.EqualValues(t, 204, resp.StatusCode) -} - -func TestWriteHTTPVerySmallMaxBody(t *testing.T) { - listener := &HTTPListener{ - Log: testutil.Logger{}, - ServiceAddress: "localhost:0", - MaxBodySize: internal.Size{Size: 4096}, - TimeFunc: time.Now, - } - - acc := &testutil.Accumulator{} - require.NoError(t, listener.Start(acc)) - defer listener.Stop() - - resp, err := http.Post(createURL(listener, "http", "/write", ""), "", bytes.NewBuffer([]byte(hugeMetric))) - require.NoError(t, err) - resp.Body.Close() - require.EqualValues(t, 413, resp.StatusCode) -} - -func TestWriteHTTPVerySmallMaxLineSize(t *testing.T) { - listener := &HTTPListener{ - Log: testutil.Logger{}, - ServiceAddress: "localhost:0", - MaxLineSize: internal.Size{Size: 70}, - TimeFunc: time.Now, - } - - acc := &testutil.Accumulator{} - require.NoError(t, listener.Start(acc)) - defer listener.Stop() - - resp, err := http.Post(createURL(listener, "http", "/write", ""), "", bytes.NewBuffer([]byte(testMsgs))) - require.NoError(t, err) - resp.Body.Close() - require.EqualValues(t, 204, resp.StatusCode) - - hostTags := []string{"server02", "server03", - "server04", "server05", "server06"} - acc.Wait(len(hostTags)) - for _, hostTag := range hostTags { - acc.AssertContainsTaggedFields(t, "cpu_load_short", - map[string]interface{}{"value": float64(12)}, - map[string]string{"host": hostTag}, - ) - } -} - -func TestWriteHTTPLargeLinesSkipped(t *testing.T) { - listener := &HTTPListener{ - Log: testutil.Logger{}, - ServiceAddress: "localhost:0", - MaxLineSize: internal.Size{Size: 100}, - TimeFunc: time.Now, - } - - acc := &testutil.Accumulator{} - require.NoError(t, listener.Start(acc)) - defer listener.Stop() - - resp, err := http.Post(createURL(listener, "http", "/write", ""), "", bytes.NewBuffer([]byte(hugeMetric+testMsgs))) - require.NoError(t, err) - resp.Body.Close() - require.EqualValues(t, 400, resp.StatusCode) - - hostTags := []string{"server02", "server03", - "server04", "server05", "server06"} - acc.Wait(len(hostTags)) - for _, hostTag := range hostTags { - acc.AssertContainsTaggedFields(t, "cpu_load_short", - map[string]interface{}{"value": float64(12)}, - map[string]string{"host": hostTag}, - ) - } -} - -// test that writing gzipped data works -func TestWriteHTTPGzippedData(t *testing.T) { - listener := newTestHTTPListener() - - acc := &testutil.Accumulator{} - require.NoError(t, listener.Start(acc)) - defer listener.Stop() - - data, err := ioutil.ReadFile("./testdata/testmsgs.gz") - require.NoError(t, err) - - req, err := http.NewRequest("POST", createURL(listener, "http", "/write", ""), bytes.NewBuffer(data)) - require.NoError(t, err) - req.Header.Set("Content-Encoding", "gzip") - - client := &http.Client{} - resp, err := client.Do(req) - require.NoError(t, err) - require.EqualValues(t, 204, resp.StatusCode) - - hostTags := []string{"server02", "server03", - "server04", "server05", "server06"} - acc.Wait(len(hostTags)) - for _, hostTag := range hostTags { - acc.AssertContainsTaggedFields(t, "cpu_load_short", - map[string]interface{}{"value": float64(12)}, - map[string]string{"host": hostTag}, - ) - } -} - -// writes 25,000 metrics to the listener with 10 different writers -func TestWriteHTTPHighTraffic(t *testing.T) { - if runtime.GOOS == "darwin" { - t.Skip("Skipping due to hang on darwin") - } - listener := newTestHTTPListener() - - acc := &testutil.Accumulator{} - require.NoError(t, listener.Start(acc)) - defer listener.Stop() - - // post many messages to listener - var wg sync.WaitGroup - for i := 0; i < 10; i++ { - wg.Add(1) - go func(innerwg *sync.WaitGroup) { - defer innerwg.Done() - for i := 0; i < 500; i++ { - resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsgs))) - require.NoError(t, err) - resp.Body.Close() - require.EqualValues(t, 204, resp.StatusCode) - } - }(&wg) - } - - wg.Wait() - listener.Gather(acc) - - acc.Wait(25000) - require.Equal(t, int64(25000), int64(acc.NMetrics())) -} - -func TestReceive404ForInvalidEndpoint(t *testing.T) { - listener := newTestHTTPListener() - - acc := &testutil.Accumulator{} - require.NoError(t, listener.Start(acc)) - defer listener.Stop() - - // post single message to listener - resp, err := http.Post(createURL(listener, "http", "/foobar", ""), "", bytes.NewBuffer([]byte(testMsg))) - require.NoError(t, err) - resp.Body.Close() - require.EqualValues(t, 404, resp.StatusCode) -} - -func TestWriteHTTPInvalid(t *testing.T) { - listener := newTestHTTPListener() - - acc := &testutil.Accumulator{} - require.NoError(t, listener.Start(acc)) - defer listener.Stop() - - // post single message to listener - resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(badMsg))) - require.NoError(t, err) - resp.Body.Close() - require.EqualValues(t, 400, resp.StatusCode) -} - -func TestWriteHTTPEmpty(t *testing.T) { - listener := newTestHTTPListener() - - acc := &testutil.Accumulator{} - require.NoError(t, listener.Start(acc)) - defer listener.Stop() - - // post single message to listener - resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(emptyMsg))) - require.NoError(t, err) - resp.Body.Close() - require.EqualValues(t, 204, resp.StatusCode) -} - -func TestQueryAndPingHTTP(t *testing.T) { - listener := newTestHTTPListener() - - acc := &testutil.Accumulator{} - require.NoError(t, listener.Start(acc)) - defer listener.Stop() - - // post query to listener - resp, err := http.Post( - createURL(listener, "http", "/query", "db=&q=CREATE+DATABASE+IF+NOT+EXISTS+%22mydb%22"), "", nil) - require.NoError(t, err) - require.EqualValues(t, 200, resp.StatusCode) - - // post ping to listener - resp, err = http.Post(createURL(listener, "http", "/ping", ""), "", nil) - require.NoError(t, err) - resp.Body.Close() - require.EqualValues(t, 204, resp.StatusCode) -} - -func TestWriteWithPrecision(t *testing.T) { - listener := newTestHTTPListener() - - acc := &testutil.Accumulator{} - require.NoError(t, listener.Start(acc)) - defer listener.Stop() - - msg := "xyzzy value=42 1422568543\n" - resp, err := http.Post( - createURL(listener, "http", "/write", "precision=s"), "", bytes.NewBuffer([]byte(msg))) - require.NoError(t, err) - resp.Body.Close() - require.EqualValues(t, 204, resp.StatusCode) - - acc.Wait(1) - require.Equal(t, 1, len(acc.Metrics)) - require.Equal(t, time.Unix(0, 1422568543000000000), acc.Metrics[0].Time) -} - -func TestWriteWithPrecisionNoTimestamp(t *testing.T) { - listener := newTestHTTPListener() - listener.TimeFunc = func() time.Time { - return time.Unix(42, 123456789) - } - - acc := &testutil.Accumulator{} - require.NoError(t, listener.Start(acc)) - defer listener.Stop() - - msg := "xyzzy value=42\n" - resp, err := http.Post( - createURL(listener, "http", "/write", "precision=s"), "", bytes.NewBuffer([]byte(msg))) - require.NoError(t, err) - resp.Body.Close() - require.EqualValues(t, 204, resp.StatusCode) - - acc.Wait(1) - require.Equal(t, 1, len(acc.Metrics)) - require.Equal(t, time.Unix(42, 0), acc.Metrics[0].Time) -} - -const hugeMetric = `super_long_metric,foo=bar clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i -` diff --git a/plugins/inputs/influxdb_listener/influxdb_listener.go b/plugins/inputs/influxdb_listener/influxdb_listener.go new file mode 100644 index 000000000..60033e050 --- /dev/null +++ b/plugins/inputs/influxdb_listener/influxdb_listener.go @@ -0,0 +1,406 @@ +package influxdb_listener + +import ( + "compress/gzip" + "context" + "crypto/tls" + "encoding/json" + "fmt" + "net" + "net/http" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + tlsint "github.com/influxdata/telegraf/internal/tls" + "github.com/influxdata/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/parsers/influx" + "github.com/influxdata/telegraf/selfstat" +) + +const ( + // defaultMaxBodySize is the default maximum request body size, in bytes. + // if the request body is over this size, we will return an HTTP 413 error. + defaultMaxBodySize = 32 * 1024 * 1024 +) + +type InfluxDBListener struct { + ServiceAddress string `toml:"service_address"` + port int + tlsint.ServerConfig + + ReadTimeout internal.Duration `toml:"read_timeout"` + WriteTimeout internal.Duration `toml:"write_timeout"` + MaxBodySize internal.Size `toml:"max_body_size"` + MaxLineSize internal.Size `toml:"max_line_size"` // deprecated in 1.14; ignored + BasicUsername string `toml:"basic_username"` + BasicPassword string `toml:"basic_password"` + DatabaseTag string `toml:"database_tag"` + + timeFunc influx.TimeFunc + + listener net.Listener + server http.Server + + acc telegraf.Accumulator + + bytesRecv selfstat.Stat + requestsServed selfstat.Stat + writesServed selfstat.Stat + queriesServed selfstat.Stat + pingsServed selfstat.Stat + requestsRecv selfstat.Stat + notFoundsServed selfstat.Stat + buffersCreated selfstat.Stat + authFailures selfstat.Stat + + Log telegraf.Logger `toml:"-"` + + mux http.ServeMux +} + +const sampleConfig = ` + ## Address and port to host InfluxDB listener on + service_address = ":8186" + + ## maximum duration before timing out read of the request + read_timeout = "10s" + ## maximum duration before timing out write of the response + write_timeout = "10s" + + ## Maximum allowed HTTP request body size in bytes. + ## 0 means to use the default of 32MiB. + max_body_size = "32MiB" + + ## Optional tag name used to store the database. + ## If the write has a database in the query string then it will be kept in this tag name. + ## This tag can be used in downstream outputs. + ## The default value of nothing means it will be off and the database will not be recorded. + # database_tag = "" + + ## Set one or more allowed client CA certificate file names to + ## enable mutually authenticated TLS connections + tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] + + ## Add service certificate and key + tls_cert = "/etc/telegraf/cert.pem" + tls_key = "/etc/telegraf/key.pem" + + ## Optional username and password to accept for HTTP basic authentication. + ## You probably want to make sure you have TLS configured above for this. + # basic_username = "foobar" + # basic_password = "barfoo" +` + +func (h *InfluxDBListener) SampleConfig() string { + return sampleConfig +} + +func (h *InfluxDBListener) Description() string { + return "Accept metrics over InfluxDB 1.x HTTP API" +} + +func (h *InfluxDBListener) Gather(_ telegraf.Accumulator) error { + return nil +} + +func (h *InfluxDBListener) routes() { + authHandler := internal.AuthHandler(h.BasicUsername, h.BasicPassword, "influxdb", + func(_ http.ResponseWriter) { + h.authFailures.Incr(1) + }, + ) + + h.mux.Handle("/write", authHandler(h.handleWrite())) + h.mux.Handle("/query", authHandler(h.handleQuery())) + h.mux.Handle("/ping", h.handlePing()) + h.mux.Handle("/", authHandler(h.handleDefault())) +} + +func (h *InfluxDBListener) Init() error { + tags := map[string]string{ + "address": h.ServiceAddress, + } + h.bytesRecv = selfstat.Register("influxdb_listener", "bytes_received", tags) + h.requestsServed = selfstat.Register("influxdb_listener", "requests_served", tags) + h.writesServed = selfstat.Register("influxdb_listener", "writes_served", tags) + h.queriesServed = selfstat.Register("influxdb_listener", "queries_served", tags) + h.pingsServed = selfstat.Register("influxdb_listener", "pings_served", tags) + h.requestsRecv = selfstat.Register("influxdb_listener", "requests_received", tags) + h.notFoundsServed = selfstat.Register("influxdb_listener", "not_founds_served", tags) + h.buffersCreated = selfstat.Register("influxdb_listener", "buffers_created", tags) + h.authFailures = selfstat.Register("influxdb_listener", "auth_failures", tags) + h.routes() + + if h.MaxBodySize.Size == 0 { + h.MaxBodySize.Size = defaultMaxBodySize + } + + if h.MaxLineSize.Size != 0 { + h.Log.Warnf("Use of deprecated configuration: 'max_line_size'; parser now handles lines of unlimited length and option is ignored") + } + + if h.ReadTimeout.Duration < time.Second { + h.ReadTimeout.Duration = time.Second * 10 + } + if h.WriteTimeout.Duration < time.Second { + h.WriteTimeout.Duration = time.Second * 10 + } + + return nil +} + +// Start starts the InfluxDB listener service. +func (h *InfluxDBListener) Start(acc telegraf.Accumulator) error { + h.acc = acc + + tlsConf, err := h.ServerConfig.TLSConfig() + if err != nil { + return err + } + + h.server = http.Server{ + Addr: h.ServiceAddress, + Handler: h, + ReadTimeout: h.ReadTimeout.Duration, + WriteTimeout: h.WriteTimeout.Duration, + TLSConfig: tlsConf, + } + + var listener net.Listener + if tlsConf != nil { + listener, err = tls.Listen("tcp", h.ServiceAddress, tlsConf) + if err != nil { + return err + } + } else { + listener, err = net.Listen("tcp", h.ServiceAddress) + if err != nil { + return err + } + } + h.listener = listener + h.port = listener.Addr().(*net.TCPAddr).Port + + go func() { + err = h.server.Serve(h.listener) + if err != http.ErrServerClosed { + h.Log.Infof("Error serving HTTP on %s", h.ServiceAddress) + } + }() + + h.Log.Infof("Started HTTP listener service on %s", h.ServiceAddress) + + return nil +} + +// Stop cleans up all resources +func (h *InfluxDBListener) Stop() { + err := h.server.Shutdown(context.Background()) + if err != nil { + h.Log.Infof("Error shutting down HTTP server: %v", err.Error()) + } +} + +func (h *InfluxDBListener) ServeHTTP(res http.ResponseWriter, req *http.Request) { + h.requestsRecv.Incr(1) + h.mux.ServeHTTP(res, req) + h.requestsServed.Incr(1) +} + +func (h *InfluxDBListener) handleQuery() http.HandlerFunc { + return func(res http.ResponseWriter, req *http.Request) { + defer h.queriesServed.Incr(1) + // Deliver a dummy response to the query endpoint, as some InfluxDB + // clients test endpoint availability with a query + res.Header().Set("Content-Type", "application/json") + res.Header().Set("X-Influxdb-Version", "1.0") + res.WriteHeader(http.StatusOK) + res.Write([]byte("{\"results\":[]}")) + } +} + +func (h *InfluxDBListener) handlePing() http.HandlerFunc { + return func(res http.ResponseWriter, req *http.Request) { + defer h.pingsServed.Incr(1) + verbose := req.URL.Query().Get("verbose") + + // respond to ping requests + if verbose != "" && verbose != "0" && verbose != "false" { + res.WriteHeader(http.StatusOK) + b, _ := json.Marshal(map[string]string{"version": "1.0"}) // based on header set above + res.Write(b) + } else { + res.WriteHeader(http.StatusNoContent) + } + } +} + +func (h *InfluxDBListener) handleDefault() http.HandlerFunc { + return func(res http.ResponseWriter, req *http.Request) { + defer h.notFoundsServed.Incr(1) + http.NotFound(res, req) + } +} + +func (h *InfluxDBListener) handleWrite() http.HandlerFunc { + return func(res http.ResponseWriter, req *http.Request) { + defer h.writesServed.Incr(1) + // Check that the content length is not too large for us to handle. + if req.ContentLength > h.MaxBodySize.Size { + tooLarge(res) + return + } + + db := req.URL.Query().Get("db") + + body := req.Body + body = http.MaxBytesReader(res, body, h.MaxBodySize.Size) + // Handle gzip request bodies + if req.Header.Get("Content-Encoding") == "gzip" { + var err error + body, err = gzip.NewReader(body) + if err != nil { + h.Log.Debugf("Error decompressing request body: %v", err.Error()) + badRequest(res, err.Error()) + return + } + defer body.Close() + } + + parser := influx.NewStreamParser(body) + parser.SetTimeFunc(h.timeFunc) + + precisionStr := req.URL.Query().Get("precision") + if precisionStr != "" { + precision := getPrecisionMultiplier(precisionStr) + parser.SetTimePrecision(precision) + } + + var m telegraf.Metric + var err error + var parseErrorCount int + var lastPos int = 0 + var firstParseErrorStr string + for { + select { + case <-req.Context().Done(): + // Shutting down before parsing is finished. + res.WriteHeader(http.StatusServiceUnavailable) + return + default: + } + + m, err = parser.Next() + pos := parser.Position() + h.bytesRecv.Incr(int64(pos - lastPos)) + lastPos = pos + + // Continue parsing metrics even if some are malformed + if parseErr, ok := err.(*influx.ParseError); ok { + parseErrorCount += 1 + errStr := parseErr.Error() + if firstParseErrorStr == "" { + firstParseErrorStr = errStr + } + continue + } else if err != nil { + // Either we're exiting cleanly (err == + // influx.EOF) or there's an unexpected error + break + } + + if h.DatabaseTag != "" && db != "" { + m.AddTag(h.DatabaseTag, db) + } + + h.acc.AddMetric(m) + + } + if err != influx.EOF { + h.Log.Debugf("Error parsing the request body: %v", err.Error()) + badRequest(res, err.Error()) + return + } + if parseErrorCount > 0 { + var partialErrorString string + switch parseErrorCount { + case 1: + partialErrorString = fmt.Sprintf("%s", firstParseErrorStr) + case 2: + partialErrorString = fmt.Sprintf("%s (and 1 other parse error)", firstParseErrorStr) + default: + partialErrorString = fmt.Sprintf("%s (and %d other parse errors)", firstParseErrorStr, parseErrorCount-1) + } + partialWrite(res, partialErrorString) + return + } + + // http request success + res.WriteHeader(http.StatusNoContent) + } +} + +func tooLarge(res http.ResponseWriter) { + res.Header().Set("Content-Type", "application/json") + res.Header().Set("X-Influxdb-Version", "1.0") + res.Header().Set("X-Influxdb-Error", "http: request body too large") + res.WriteHeader(http.StatusRequestEntityTooLarge) + res.Write([]byte(`{"error":"http: request body too large"}`)) +} + +func badRequest(res http.ResponseWriter, errString string) { + res.Header().Set("Content-Type", "application/json") + res.Header().Set("X-Influxdb-Version", "1.0") + if errString == "" { + errString = "http: bad request" + } + res.Header().Set("X-Influxdb-Error", errString) + res.WriteHeader(http.StatusBadRequest) + res.Write([]byte(fmt.Sprintf(`{"error":%q}`, errString))) +} + +func partialWrite(res http.ResponseWriter, errString string) { + res.Header().Set("Content-Type", "application/json") + res.Header().Set("X-Influxdb-Version", "1.0") + res.Header().Set("X-Influxdb-Error", errString) + res.WriteHeader(http.StatusBadRequest) + res.Write([]byte(fmt.Sprintf(`{"error":%q}`, errString))) +} + +func getPrecisionMultiplier(precision string) time.Duration { + // Influxdb defaults silently to nanoseconds if precision isn't + // one of the following: + var d time.Duration + switch precision { + case "u": + d = time.Microsecond + case "ms": + d = time.Millisecond + case "s": + d = time.Second + case "m": + d = time.Minute + case "h": + d = time.Hour + default: + d = time.Nanosecond + } + return d +} + +func init() { + // http_listener deprecated in 1.9 + inputs.Add("http_listener", func() telegraf.Input { + return &InfluxDBListener{ + ServiceAddress: ":8186", + timeFunc: time.Now, + } + }) + inputs.Add("influxdb_listener", func() telegraf.Input { + return &InfluxDBListener{ + ServiceAddress: ":8186", + timeFunc: time.Now, + } + }) +} diff --git a/plugins/inputs/influxdb_listener/influxdb_listener_benchmark_test.go b/plugins/inputs/influxdb_listener/influxdb_listener_benchmark_test.go new file mode 100644 index 000000000..d3dc55219 --- /dev/null +++ b/plugins/inputs/influxdb_listener/influxdb_listener_benchmark_test.go @@ -0,0 +1,108 @@ +package influxdb_listener + +import ( + "fmt" + "net/http" + "net/http/httptest" + "strings" + "testing" + "time" + + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/selfstat" + "github.com/influxdata/telegraf/testutil" +) + +// newListener is the minimal InfluxDBListener construction to serve writes. +func newListener() *InfluxDBListener { + listener := &InfluxDBListener{ + timeFunc: time.Now, + acc: &testutil.NopAccumulator{}, + bytesRecv: selfstat.Register("influxdb_listener", "bytes_received", map[string]string{}), + writesServed: selfstat.Register("influxdb_listener", "writes_served", map[string]string{}), + MaxBodySize: internal.Size{ + Size: defaultMaxBodySize, + }, + } + return listener +} + +func BenchmarkInfluxDBListener_serveWrite(b *testing.B) { + res := httptest.NewRecorder() + addr := "http://localhost/write?db=mydb" + + benchmarks := []struct { + name string + lines string + }{ + { + name: "single line, tag, and field", + lines: lines(1, 1, 1), + }, + { + name: "single line, 10 tags and fields", + lines: lines(1, 10, 10), + }, + { + name: "single line, 100 tags and fields", + lines: lines(1, 100, 100), + }, + { + name: "1k lines, single tag and field", + lines: lines(1000, 1, 1), + }, + { + name: "1k lines, 10 tags and fields", + lines: lines(1000, 10, 10), + }, + { + name: "10k lines, 10 tags and fields", + lines: lines(10000, 10, 10), + }, + { + name: "100k lines, 10 tags and fields", + lines: lines(100000, 10, 10), + }, + } + + for _, bm := range benchmarks { + b.Run(bm.name, func(b *testing.B) { + listener := newListener() + + b.ResetTimer() + for n := 0; n < b.N; n++ { + req, err := http.NewRequest("POST", addr, strings.NewReader(bm.lines)) + if err != nil { + b.Error(err) + } + listener.handleWrite()(res, req) + if res.Code != http.StatusNoContent { + b.Errorf("unexpected status %d", res.Code) + } + } + }) + } +} + +func lines(lines, numTags, numFields int) string { + lp := make([]string, lines) + for i := 0; i < lines; i++ { + tags := make([]string, numTags) + for j := 0; j < numTags; j++ { + tags[j] = fmt.Sprintf("t%d=v%d", j, j) + } + + fields := make([]string, numFields) + for k := 0; k < numFields; k++ { + fields[k] = fmt.Sprintf("f%d=%d", k, k) + } + + lp[i] = fmt.Sprintf("m%d,%s %s", + i, + strings.Join(tags, ","), + strings.Join(fields, ","), + ) + } + + return strings.Join(lp, "\n") +} diff --git a/plugins/inputs/influxdb_listener/influxdb_listener_test.go b/plugins/inputs/influxdb_listener/influxdb_listener_test.go index 5badc1213..b8ea2014d 100644 --- a/plugins/inputs/influxdb_listener/influxdb_listener_test.go +++ b/plugins/inputs/influxdb_listener/influxdb_listener_test.go @@ -1,114 +1,592 @@ -package http_listener +package influxdb_listener import ( - "fmt" + "bytes" + "crypto/tls" + "crypto/x509" + "io/ioutil" "net/http" - "net/http/httptest" - "strings" + "net/url" + "runtime" + "strconv" + "sync" "testing" "time" "github.com/influxdata/telegraf/internal" - "github.com/influxdata/telegraf/plugins/parsers/influx" - "github.com/influxdata/telegraf/selfstat" "github.com/influxdata/telegraf/testutil" + + "github.com/stretchr/testify/require" ) -// newListener is the minimal HTTPListener construction to serve writes. -func newListener() *HTTPListener { - listener := &HTTPListener{ - TimeFunc: time.Now, - acc: &testutil.NopAccumulator{}, - BytesRecv: selfstat.Register("http_listener", "bytes_received", map[string]string{}), - handler: influx.NewMetricHandler(), - pool: NewPool(200, DEFAULT_MAX_LINE_SIZE), - MaxLineSize: internal.Size{ - Size: DEFAULT_MAX_LINE_SIZE, - }, - MaxBodySize: internal.Size{ - Size: DEFAULT_MAX_BODY_SIZE, - }, +const ( + testMsg = "cpu_load_short,host=server01 value=12.0 1422568543702900257\n" + + testMsgNoNewline = "cpu_load_short,host=server01 value=12.0 1422568543702900257" + + testMsgs = `cpu_load_short,host=server02 value=12.0 1422568543702900257 +cpu_load_short,host=server03 value=12.0 1422568543702900257 +cpu_load_short,host=server04 value=12.0 1422568543702900257 +cpu_load_short,host=server05 value=12.0 1422568543702900257 +cpu_load_short,host=server06 value=12.0 1422568543702900257 +` + testPartial = `cpu,host=a value1=1 +cpu,host=b value1=1,value2=+Inf,value3=3 +cpu,host=c value1=1` + + badMsg = "blahblahblah: 42\n" + + emptyMsg = "" + + basicUsername = "test-username-please-ignore" + basicPassword = "super-secure-password!" +) + +var ( + pki = testutil.NewPKI("../../../testutil/pki") +) + +func newTestListener() *InfluxDBListener { + listener := &InfluxDBListener{ + Log: testutil.Logger{}, + ServiceAddress: "localhost:0", + timeFunc: time.Now, } - listener.parser = influx.NewParser(listener.handler) return listener } -func BenchmarkHTTPListener_serveWrite(b *testing.B) { - res := httptest.NewRecorder() - addr := "http://localhost/write?db=mydb" +func newTestAuthListener() *InfluxDBListener { + listener := newTestListener() + listener.BasicUsername = basicUsername + listener.BasicPassword = basicPassword + return listener +} - benchmarks := []struct { - name string - lines string - }{ - { - name: "single line, tag, and field", - lines: lines(1, 1, 1), +func newTestSecureListener() *InfluxDBListener { + listener := &InfluxDBListener{ + Log: testutil.Logger{}, + ServiceAddress: "localhost:0", + ServerConfig: *pki.TLSServerConfig(), + timeFunc: time.Now, + } + + return listener +} + +func getSecureClient() *http.Client { + tlsConfig, err := pki.TLSClientConfig().TLSConfig() + if err != nil { + panic(err) + } + return &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: tlsConfig, }, - { - name: "single line, 10 tags and fields", - lines: lines(1, 10, 10), - }, - { - name: "single line, 100 tags and fields", - lines: lines(1, 100, 100), - }, - { - name: "1k lines, single tag and field", - lines: lines(1000, 1, 1), - }, - { - name: "1k lines, 10 tags and fields", - lines: lines(1000, 10, 10), - }, - { - name: "10k lines, 10 tags and fields", - lines: lines(10000, 10, 10), - }, - { - name: "100k lines, 10 tags and fields", - lines: lines(100000, 10, 10), + } +} + +func createURL(listener *InfluxDBListener, scheme string, path string, rawquery string) string { + u := url.URL{ + Scheme: scheme, + Host: "localhost:" + strconv.Itoa(listener.port), + Path: path, + RawQuery: rawquery, + } + return u.String() +} + +func TestWriteSecureNoClientAuth(t *testing.T) { + listener := newTestSecureListener() + listener.TLSAllowedCACerts = nil + + acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) + require.NoError(t, listener.Start(acc)) + defer listener.Stop() + + cas := x509.NewCertPool() + cas.AppendCertsFromPEM([]byte(pki.ReadServerCert())) + noClientAuthClient := &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: &tls.Config{ + RootCAs: cas, + }, }, } - for _, bm := range benchmarks { - b.Run(bm.name, func(b *testing.B) { - listener := newListener() + // post single message to listener + resp, err := noClientAuthClient.Post(createURL(listener, "https", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsg))) + require.NoError(t, err) + resp.Body.Close() + require.EqualValues(t, 204, resp.StatusCode) +} - b.ResetTimer() - for n := 0; n < b.N; n++ { - req, err := http.NewRequest("POST", addr, strings.NewReader(bm.lines)) - if err != nil { - b.Error(err) - } - listener.serveWrite(res, req) - if res.Code != http.StatusNoContent { - b.Errorf("unexpected status %d", res.Code) - } +func TestWriteSecureWithClientAuth(t *testing.T) { + listener := newTestSecureListener() + + acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) + require.NoError(t, listener.Start(acc)) + defer listener.Stop() + + // post single message to listener + resp, err := getSecureClient().Post(createURL(listener, "https", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsg))) + require.NoError(t, err) + resp.Body.Close() + require.EqualValues(t, 204, resp.StatusCode) +} + +func TestWriteBasicAuth(t *testing.T) { + listener := newTestAuthListener() + + acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) + require.NoError(t, listener.Start(acc)) + defer listener.Stop() + + client := &http.Client{} + + req, err := http.NewRequest("POST", createURL(listener, "http", "/write", "db=mydb"), bytes.NewBuffer([]byte(testMsg))) + require.NoError(t, err) + req.SetBasicAuth(basicUsername, basicPassword) + resp, err := client.Do(req) + require.NoError(t, err) + resp.Body.Close() + require.EqualValues(t, http.StatusNoContent, resp.StatusCode) +} + +func TestWriteKeepDatabase(t *testing.T) { + testMsgWithDB := "cpu_load_short,host=server01,database=wrongdb value=12.0 1422568543702900257\n" + + listener := newTestListener() + listener.DatabaseTag = "database" + + acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) + require.NoError(t, listener.Start(acc)) + defer listener.Stop() + + // post single message to listener + resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsg))) + require.NoError(t, err) + resp.Body.Close() + require.EqualValues(t, 204, resp.StatusCode) + + acc.Wait(1) + acc.AssertContainsTaggedFields(t, "cpu_load_short", + map[string]interface{}{"value": float64(12)}, + map[string]string{"host": "server01", "database": "mydb"}, + ) + + // post single message to listener with a database tag in it already. It should be clobbered. + resp, err = http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsgWithDB))) + require.NoError(t, err) + resp.Body.Close() + require.EqualValues(t, 204, resp.StatusCode) + + acc.Wait(1) + acc.AssertContainsTaggedFields(t, "cpu_load_short", + map[string]interface{}{"value": float64(12)}, + map[string]string{"host": "server01", "database": "mydb"}, + ) + + // post multiple message to listener + resp, err = http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsgs))) + require.NoError(t, err) + resp.Body.Close() + require.EqualValues(t, 204, resp.StatusCode) + + acc.Wait(2) + hostTags := []string{"server02", "server03", + "server04", "server05", "server06"} + for _, hostTag := range hostTags { + acc.AssertContainsTaggedFields(t, "cpu_load_short", + map[string]interface{}{"value": float64(12)}, + map[string]string{"host": hostTag, "database": "mydb"}, + ) + } +} + +// http listener should add a newline at the end of the buffer if it's not there +func TestWriteNoNewline(t *testing.T) { + listener := newTestListener() + + acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) + require.NoError(t, listener.Start(acc)) + defer listener.Stop() + + // post single message to listener + resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsgNoNewline))) + require.NoError(t, err) + resp.Body.Close() + require.EqualValues(t, 204, resp.StatusCode) + + acc.Wait(1) + acc.AssertContainsTaggedFields(t, "cpu_load_short", + map[string]interface{}{"value": float64(12)}, + map[string]string{"host": "server01"}, + ) +} + +func TestPartialWrite(t *testing.T) { + listener := newTestListener() + + acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) + require.NoError(t, listener.Start(acc)) + defer listener.Stop() + + // post single message to listener + resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testPartial))) + require.NoError(t, err) + resp.Body.Close() + require.EqualValues(t, 400, resp.StatusCode) + + acc.Wait(1) + acc.AssertContainsTaggedFields(t, "cpu", + map[string]interface{}{"value1": float64(1)}, + map[string]string{"host": "a"}, + ) + acc.AssertContainsTaggedFields(t, "cpu", + map[string]interface{}{"value1": float64(1)}, + map[string]string{"host": "c"}, + ) +} + +func TestWriteMaxLineSizeIncrease(t *testing.T) { + listener := &InfluxDBListener{ + Log: testutil.Logger{}, + ServiceAddress: "localhost:0", + timeFunc: time.Now, + } + + acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) + require.NoError(t, listener.Start(acc)) + defer listener.Stop() + + // Post a gigantic metric to the listener and verify that it writes OK this time: + resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(hugeMetric))) + require.NoError(t, err) + resp.Body.Close() + require.EqualValues(t, 204, resp.StatusCode) +} + +func TestWriteVerySmallMaxBody(t *testing.T) { + listener := &InfluxDBListener{ + Log: testutil.Logger{}, + ServiceAddress: "localhost:0", + MaxBodySize: internal.Size{Size: 4096}, + timeFunc: time.Now, + } + + acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) + require.NoError(t, listener.Start(acc)) + defer listener.Stop() + + resp, err := http.Post(createURL(listener, "http", "/write", ""), "", bytes.NewBuffer([]byte(hugeMetric))) + require.NoError(t, err) + resp.Body.Close() + require.EqualValues(t, 413, resp.StatusCode) +} + +func TestWriteLargeLine(t *testing.T) { + listener := &InfluxDBListener{ + Log: testutil.Logger{}, + ServiceAddress: "localhost:0", + timeFunc: func() time.Time { + return time.Unix(123456789, 0) + }, + } + + acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) + require.NoError(t, listener.Start(acc)) + defer listener.Stop() + + resp, err := http.Post(createURL(listener, "http", "/write", ""), "", bytes.NewBuffer([]byte(hugeMetric+testMsgs))) + require.NoError(t, err) + resp.Body.Close() + //todo: with the new parser, long lines aren't a problem. Do we need to skip them? + //require.EqualValues(t, 400, resp.StatusCode) + + expected := testutil.MustMetric( + "super_long_metric", + map[string]string{"foo": "bar"}, + map[string]interface{}{ + "clients": 42, + "connected_slaves": 43, + "evicted_keys": 44, + "expired_keys": 45, + "instantaneous_ops_per_sec": 46, + "keyspace_hitrate": 47.0, + "keyspace_hits": 48, + "keyspace_misses": 49, + "latest_fork_usec": 50, + "master_repl_offset": 51, + "mem_fragmentation_ratio": 52.58, + "pubsub_channels": 53, + "pubsub_patterns": 54, + "rdb_changes_since_last_save": 55, + "repl_backlog_active": 56, + "repl_backlog_histlen": 57, + "repl_backlog_size": 58, + "sync_full": 59, + "sync_partial_err": 60, + "sync_partial_ok": 61, + "total_commands_processed": 62, + "total_connections_received": 63, + "uptime": 64, + "used_cpu_sys": 65.07, + "used_cpu_sys_children": 66.0, + "used_cpu_user": 67.1, + "used_cpu_user_children": 68.0, + "used_memory": 692048, + "used_memory_lua": 70792, + "used_memory_peak": 711128, + "used_memory_rss": 7298144, + }, + time.Unix(123456789, 0), + ) + + m, ok := acc.Get("super_long_metric") + require.True(t, ok) + testutil.RequireMetricEqual(t, expected, testutil.FromTestMetric(m)) + + hostTags := []string{"server02", "server03", + "server04", "server05", "server06"} + acc.Wait(len(hostTags)) + for _, hostTag := range hostTags { + acc.AssertContainsTaggedFields(t, "cpu_load_short", + map[string]interface{}{"value": float64(12)}, + map[string]string{"host": hostTag}, + ) + } +} + +// test that writing gzipped data works +func TestWriteGzippedData(t *testing.T) { + listener := newTestListener() + + acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) + require.NoError(t, listener.Start(acc)) + defer listener.Stop() + + data, err := ioutil.ReadFile("./testdata/testmsgs.gz") + require.NoError(t, err) + + req, err := http.NewRequest("POST", createURL(listener, "http", "/write", ""), bytes.NewBuffer(data)) + require.NoError(t, err) + req.Header.Set("Content-Encoding", "gzip") + + client := &http.Client{} + resp, err := client.Do(req) + require.NoError(t, err) + require.EqualValues(t, 204, resp.StatusCode) + + hostTags := []string{"server02", "server03", + "server04", "server05", "server06"} + acc.Wait(len(hostTags)) + for _, hostTag := range hostTags { + acc.AssertContainsTaggedFields(t, "cpu_load_short", + map[string]interface{}{"value": float64(12)}, + map[string]string{"host": hostTag}, + ) + } +} + +// writes 25,000 metrics to the listener with 10 different writers +func TestWriteHighTraffic(t *testing.T) { + if runtime.GOOS == "darwin" { + t.Skip("Skipping due to hang on darwin") + } + listener := newTestListener() + + acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) + require.NoError(t, listener.Start(acc)) + defer listener.Stop() + + // post many messages to listener + var wg sync.WaitGroup + for i := 0; i < 10; i++ { + wg.Add(1) + go func(innerwg *sync.WaitGroup) { + defer innerwg.Done() + for i := 0; i < 500; i++ { + resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsgs))) + require.NoError(t, err) + resp.Body.Close() + require.EqualValues(t, 204, resp.StatusCode) } + }(&wg) + } + + wg.Wait() + listener.Gather(acc) + + acc.Wait(25000) + require.Equal(t, int64(25000), int64(acc.NMetrics())) +} + +func TestReceive404ForInvalidEndpoint(t *testing.T) { + listener := newTestListener() + + acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) + require.NoError(t, listener.Start(acc)) + defer listener.Stop() + + // post single message to listener + resp, err := http.Post(createURL(listener, "http", "/foobar", ""), "", bytes.NewBuffer([]byte(testMsg))) + require.NoError(t, err) + resp.Body.Close() + require.EqualValues(t, 404, resp.StatusCode) +} + +func TestWriteInvalid(t *testing.T) { + listener := newTestListener() + + acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) + require.NoError(t, listener.Start(acc)) + defer listener.Stop() + + // post single message to listener + resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(badMsg))) + require.NoError(t, err) + resp.Body.Close() + require.EqualValues(t, 400, resp.StatusCode) +} + +func TestWriteEmpty(t *testing.T) { + listener := newTestListener() + + acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) + require.NoError(t, listener.Start(acc)) + defer listener.Stop() + + // post single message to listener + resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(emptyMsg))) + require.NoError(t, err) + resp.Body.Close() + require.EqualValues(t, 204, resp.StatusCode) +} + +func TestQueryAndPing(t *testing.T) { + listener := newTestListener() + + acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) + require.NoError(t, listener.Start(acc)) + defer listener.Stop() + + // post query to listener + resp, err := http.Post( + createURL(listener, "http", "/query", "db=&q=CREATE+DATABASE+IF+NOT+EXISTS+%22mydb%22"), "", nil) + require.NoError(t, err) + require.EqualValues(t, 200, resp.StatusCode) + + // post ping to listener + resp, err = http.Post(createURL(listener, "http", "/ping", ""), "", nil) + require.NoError(t, err) + resp.Body.Close() + require.EqualValues(t, 204, resp.StatusCode) +} + +func TestWriteWithPrecision(t *testing.T) { + listener := newTestListener() + + acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) + require.NoError(t, listener.Start(acc)) + defer listener.Stop() + + msg := "xyzzy value=42 1422568543\n" + resp, err := http.Post( + createURL(listener, "http", "/write", "precision=s"), "", bytes.NewBuffer([]byte(msg))) + require.NoError(t, err) + resp.Body.Close() + require.EqualValues(t, 204, resp.StatusCode) + + acc.Wait(1) + require.Equal(t, 1, len(acc.Metrics)) + // When timestamp is provided, the precision parameter is + // overloaded to specify the timestamp's unit + require.Equal(t, time.Unix(0, 1422568543000000000), acc.Metrics[0].Time) +} + +func TestWriteWithPrecisionNoTimestamp(t *testing.T) { + listener := newTestListener() + listener.timeFunc = func() time.Time { + return time.Unix(42, 123456789) + } + + acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) + require.NoError(t, listener.Start(acc)) + defer listener.Stop() + + msg := "xyzzy value=42\n" + resp, err := http.Post( + createURL(listener, "http", "/write", "precision=s"), "", bytes.NewBuffer([]byte(msg))) + require.NoError(t, err) + resp.Body.Close() + require.EqualValues(t, 204, resp.StatusCode) + + acc.Wait(1) + require.Equal(t, 1, len(acc.Metrics)) + // When timestamp is omitted, the precision parameter actually + // specifies the precision. The timestamp is set to the greatest + // integer unit less than the provided timestamp (floor). + require.Equal(t, time.Unix(42, 0), acc.Metrics[0].Time) +} + +func TestWriteParseErrors(t *testing.T) { + var tests = []struct { + name string + input string + expected string + }{ + { + name: "one parse error", + input: "foo value=1.0\nfoo value=2asdf2.0\nfoo value=3.0\nfoo value=4.0", + expected: `metric parse error: expected field at 2:12: "foo value=2"`, + }, + { + name: "two parse errors", + input: "foo value=1asdf2.0\nfoo value=2.0\nfoo value=3asdf2.0\nfoo value=4.0", + expected: `metric parse error: expected field at 1:12: "foo value=1" (and 1 other parse error)`, + }, + { + name: "three or more parse errors", + input: "foo value=1asdf2.0\nfoo value=2.0\nfoo value=3asdf2.0\nfoo value=4asdf2.0", + expected: `metric parse error: expected field at 1:12: "foo value=1" (and 2 other parse errors)`, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + listener := newTestListener() + + acc := &testutil.NopAccumulator{} + require.NoError(t, listener.Init()) + require.NoError(t, listener.Start(acc)) + defer listener.Stop() + + // post single message to listener + resp, err := http.Post(createURL(listener, "http", "/write", ""), "", bytes.NewBuffer([]byte(tt.input))) + require.NoError(t, err) + resp.Body.Close() + require.EqualValues(t, 400, resp.StatusCode) + require.Equal(t, tt.expected, resp.Header["X-Influxdb-Error"][0]) }) } } -func lines(lines, numTags, numFields int) string { - lp := make([]string, lines) - for i := 0; i < lines; i++ { - tags := make([]string, numTags) - for j := 0; j < numTags; j++ { - tags[j] = fmt.Sprintf("t%d=v%d", j, j) - } - - fields := make([]string, numFields) - for k := 0; k < numFields; k++ { - fields[k] = fmt.Sprintf("f%d=%d", k, k) - } - - lp[i] = fmt.Sprintf("m%d,%s %s", - i, - strings.Join(tags, ","), - strings.Join(fields, ","), - ) - } - - return strings.Join(lp, "\n") -} +const hugeMetric = `super_long_metric,foo=bar clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=42i,connected_slaves=43i,evicted_keys=44i,expired_keys=45i,instantaneous_ops_per_sec=46i,keyspace_hitrate=47,keyspace_hits=48i,keyspace_misses=49i,latest_fork_usec=50i,master_repl_offset=51i,mem_fragmentation_ratio=52.58,pubsub_channels=53i,pubsub_patterns=54i,rdb_changes_since_last_save=55i,repl_backlog_active=56i,repl_backlog_histlen=57i,repl_backlog_size=58i,sync_full=59i,sync_partial_err=60i,sync_partial_ok=61i,total_commands_processed=62i,total_connections_received=63i,uptime=64i,used_cpu_sys=65.07,used_cpu_sys_children=66,used_cpu_user=67.1,used_cpu_user_children=68,used_memory=692048i,used_memory_lua=70792i,used_memory_peak=711128i,used_memory_rss=7298144i +` diff --git a/plugins/outputs/health/health.go b/plugins/outputs/health/health.go index a6db63183..b85329342 100644 --- a/plugins/outputs/health/health.go +++ b/plugins/outputs/health/health.go @@ -136,7 +136,7 @@ func (h *Health) Init() error { // Connect starts the HTTP server. func (h *Health) Connect() error { - authHandler := internal.AuthHandler(h.BasicUsername, h.BasicPassword, onAuthError) + authHandler := internal.AuthHandler(h.BasicUsername, h.BasicPassword, "health", onAuthError) h.server = &http.Server{ Addr: h.ServiceAddress, @@ -168,8 +168,7 @@ func (h *Health) Connect() error { return nil } -func onAuthError(rw http.ResponseWriter, code int) { - http.Error(rw, http.StatusText(code), code) +func onAuthError(_ http.ResponseWriter) { } func (h *Health) listen() (net.Listener, error) { diff --git a/plugins/outputs/prometheus_client/prometheus_client.go b/plugins/outputs/prometheus_client/prometheus_client.go index afdf7e107..c28ec54ec 100644 --- a/plugins/outputs/prometheus_client/prometheus_client.go +++ b/plugins/outputs/prometheus_client/prometheus_client.go @@ -156,7 +156,7 @@ func (p *PrometheusClient) Init() error { ipRange = append(ipRange, ipNet) } - authHandler := internal.AuthHandler(p.BasicUsername, p.BasicPassword, onAuthError) + authHandler := internal.AuthHandler(p.BasicUsername, p.BasicPassword, "prometheus", onAuthError) rangeHandler := internal.IPRangeHandler(ipRange, onError) promHandler := promhttp.HandlerFor(registry, promhttp.HandlerOpts{ErrorHandling: promhttp.ContinueOnError}) @@ -219,9 +219,7 @@ func (p *PrometheusClient) Connect() error { return nil } -func onAuthError(rw http.ResponseWriter, code int) { - rw.Header().Set("WWW-Authenticate", `Basic realm="Restricted"`) - http.Error(rw, http.StatusText(code), code) +func onAuthError(_ http.ResponseWriter) { } func onError(rw http.ResponseWriter, code int) { diff --git a/plugins/parsers/csv/parser.go b/plugins/parsers/csv/parser.go index b59ea9799..848a51699 100644 --- a/plugins/parsers/csv/parser.go +++ b/plugins/parsers/csv/parser.go @@ -13,6 +13,8 @@ import ( "github.com/influxdata/telegraf/metric" ) +type TimeFunc func() time.Time + type Parser struct { MetricName string HeaderRowCount int @@ -31,7 +33,7 @@ type Parser struct { TimeFunc func() time.Time } -func (p *Parser) SetTimeFunc(fn metric.TimeFunc) { +func (p *Parser) SetTimeFunc(fn TimeFunc) { p.TimeFunc = fn } diff --git a/plugins/parsers/dropwizard/parser.go b/plugins/parsers/dropwizard/parser.go index 95ce3bffd..d8dcc9204 100644 --- a/plugins/parsers/dropwizard/parser.go +++ b/plugins/parsers/dropwizard/parser.go @@ -17,6 +17,8 @@ import ( var fieldEscaper = strings.NewReplacer("\\", "\\\\", "\"", "\\\"") var keyEscaper = strings.NewReplacer(" ", "\\ ", ",", "\\,", "=", "\\=") +type TimeFunc func() time.Time + // Parser parses json inputs containing dropwizard metrics, // either top-level or embedded inside a json field. // This parser is using gjson for retrieving paths within the json file. @@ -48,7 +50,7 @@ type parser struct { separator string templateEngine *templating.Engine - timeFunc metric.TimeFunc + timeFunc TimeFunc // seriesParser parses line protocol measurement + tags seriesParser *influx.Parser @@ -267,6 +269,6 @@ func (p *parser) readDWMetrics(metricType string, dwms interface{}, metrics []te return metrics } -func (p *parser) SetTimeFunc(f metric.TimeFunc) { +func (p *parser) SetTimeFunc(f TimeFunc) { p.timeFunc = f } diff --git a/plugins/parsers/dropwizard/parser_test.go b/plugins/parsers/dropwizard/parser_test.go index df33562db..df75c7f25 100644 --- a/plugins/parsers/dropwizard/parser_test.go +++ b/plugins/parsers/dropwizard/parser_test.go @@ -13,7 +13,7 @@ import ( "github.com/stretchr/testify/require" ) -var TimeFunc = func() time.Time { +var testTimeFunc = func() time.Time { return time.Unix(0, 0) } @@ -528,7 +528,7 @@ func TestDropWizard(t *testing.T) { map[string]interface{}{ "value": 42.0, }, - TimeFunc(), + testTimeFunc(), ), ), }, @@ -547,7 +547,7 @@ func TestDropWizard(t *testing.T) { map[string]interface{}{ "value": 42.0, }, - TimeFunc(), + testTimeFunc(), ), ), }, @@ -573,7 +573,7 @@ func TestDropWizard(t *testing.T) { map[string]interface{}{ "value": 42.0, }, - TimeFunc(), + testTimeFunc(), ), ), }, @@ -584,7 +584,7 @@ func TestDropWizard(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { parser := NewParser() - parser.SetTimeFunc(TimeFunc) + parser.SetTimeFunc(testTimeFunc) metrics, err := parser.Parse(tt.input) tt.errFunc(t, err) diff --git a/plugins/parsers/influx/handler.go b/plugins/parsers/influx/handler.go index 928671cc9..2f088d19d 100644 --- a/plugins/parsers/influx/handler.go +++ b/plugins/parsers/influx/handler.go @@ -10,43 +10,53 @@ import ( "github.com/influxdata/telegraf/metric" ) +// MetricHandler implements the Handler interface and produces telegraf.Metric. type MetricHandler struct { - builder *metric.Builder - err error - precision time.Duration + err error + timePrecision time.Duration + timeFunc TimeFunc + metric telegraf.Metric } func NewMetricHandler() *MetricHandler { return &MetricHandler{ - builder: metric.NewBuilder(), - precision: time.Nanosecond, + timePrecision: time.Nanosecond, + timeFunc: time.Now, } } -func (h *MetricHandler) SetTimeFunc(f metric.TimeFunc) { - h.builder.TimeFunc = f +func (h *MetricHandler) SetTimePrecision(p time.Duration) { + h.timePrecision = p + // When the timestamp is omitted from the metric, the timestamp + // comes from the server clock, truncated to the nearest unit of + // measurement provided in precision. + // + // When a timestamp is provided in the metric, precsision is + // overloaded to hold the unit of measurement of the timestamp. } -func (h *MetricHandler) SetTimePrecision(precision time.Duration) { - h.builder.TimePrecision = precision - h.precision = precision +func (h *MetricHandler) SetTimeFunc(f TimeFunc) { + h.timeFunc = f } func (h *MetricHandler) Metric() (telegraf.Metric, error) { - m, err := h.builder.Metric() - h.builder.Reset() - return m, err + if h.metric.Time().IsZero() { + h.metric.SetTime(h.timeFunc().Truncate(h.timePrecision)) + } + return h.metric, nil } func (h *MetricHandler) SetMeasurement(name []byte) error { - h.builder.SetName(nameUnescape(name)) - return nil + var err error + h.metric, err = metric.New(nameUnescape(name), + nil, nil, time.Time{}) + return err } func (h *MetricHandler) AddTag(key []byte, value []byte) error { tk := unescape(key) tv := unescape(value) - h.builder.AddTag(tk, tv) + h.metric.AddTag(tk, tv) return nil } @@ -59,7 +69,7 @@ func (h *MetricHandler) AddInt(key []byte, value []byte) error { } return err } - h.builder.AddField(fk, fv) + h.metric.AddField(fk, fv) return nil } @@ -72,7 +82,7 @@ func (h *MetricHandler) AddUint(key []byte, value []byte) error { } return err } - h.builder.AddField(fk, fv) + h.metric.AddField(fk, fv) return nil } @@ -85,14 +95,14 @@ func (h *MetricHandler) AddFloat(key []byte, value []byte) error { } return err } - h.builder.AddField(fk, fv) + h.metric.AddField(fk, fv) return nil } func (h *MetricHandler) AddString(key []byte, value []byte) error { fk := unescape(key) fv := stringFieldUnescape(value) - h.builder.AddField(fk, fv) + h.metric.AddField(fk, fv) return nil } @@ -102,7 +112,7 @@ func (h *MetricHandler) AddBool(key []byte, value []byte) error { if err != nil { return errors.New("unparseable bool") } - h.builder.AddField(fk, fv) + h.metric.AddField(fk, fv) return nil } @@ -114,11 +124,9 @@ func (h *MetricHandler) SetTimestamp(tm []byte) error { } return err } - ns := v * int64(h.precision) - h.builder.SetTime(time.Unix(0, ns)) + + //time precision is overloaded to mean time unit here + ns := v * int64(h.timePrecision) + h.metric.SetTime(time.Unix(0, ns)) return nil } - -func (h *MetricHandler) Reset() { - h.builder.Reset() -} diff --git a/plugins/parsers/influx/machine.go b/plugins/parsers/influx/machine.go index b185eeabe..2a738f669 100644 --- a/plugins/parsers/influx/machine.go +++ b/plugins/parsers/influx/machine.go @@ -4,6 +4,7 @@ package influx import ( "errors" + "io" ) var ( @@ -16,22 +17,22 @@ var ( ) -//line plugins/parsers/influx/machine.go.rl:304 +//line plugins/parsers/influx/machine.go.rl:310 -//line plugins/parsers/influx/machine.go:24 -const LineProtocol_start int = 259 -const LineProtocol_first_final int = 259 +//line plugins/parsers/influx/machine.go:25 +const LineProtocol_start int = 270 +const LineProtocol_first_final int = 270 const LineProtocol_error int = 0 -const LineProtocol_en_main int = 259 -const LineProtocol_en_discard_line int = 247 -const LineProtocol_en_align int = 715 -const LineProtocol_en_series int = 250 +const LineProtocol_en_main int = 270 +const LineProtocol_en_discard_line int = 258 +const LineProtocol_en_align int = 740 +const LineProtocol_en_series int = 261 -//line plugins/parsers/influx/machine.go.rl:307 +//line plugins/parsers/influx/machine.go.rl:313 type Handler interface { SetMeasurement(name []byte) error @@ -45,14 +46,17 @@ type Handler interface { } type machine struct { - data []byte - cs int - p, pe, eof int - pb int - lineno int - sol int - handler Handler - initState int + data []byte + cs int + p, pe, eof int + pb int + lineno int + sol int + handler Handler + initState int + key []byte + beginMetric bool + finishMetric bool } func NewMachine(handler Handler) *machine { @@ -62,24 +66,24 @@ func NewMachine(handler Handler) *machine { } -//line plugins/parsers/influx/machine.go.rl:337 +//line plugins/parsers/influx/machine.go.rl:346 -//line plugins/parsers/influx/machine.go.rl:338 +//line plugins/parsers/influx/machine.go.rl:347 -//line plugins/parsers/influx/machine.go.rl:339 +//line plugins/parsers/influx/machine.go.rl:348 -//line plugins/parsers/influx/machine.go.rl:340 +//line plugins/parsers/influx/machine.go.rl:349 -//line plugins/parsers/influx/machine.go.rl:341 +//line plugins/parsers/influx/machine.go.rl:350 -//line plugins/parsers/influx/machine.go.rl:342 +//line plugins/parsers/influx/machine.go.rl:351 -//line plugins/parsers/influx/machine.go:78 +//line plugins/parsers/influx/machine.go:82 { ( m.cs) = LineProtocol_start } -//line plugins/parsers/influx/machine.go.rl:343 +//line plugins/parsers/influx/machine.go.rl:352 return m } @@ -91,22 +95,22 @@ func NewSeriesMachine(handler Handler) *machine { } -//line plugins/parsers/influx/machine.go.rl:354 +//line plugins/parsers/influx/machine.go.rl:363 -//line plugins/parsers/influx/machine.go.rl:355 +//line plugins/parsers/influx/machine.go.rl:364 -//line plugins/parsers/influx/machine.go.rl:356 +//line plugins/parsers/influx/machine.go.rl:365 -//line plugins/parsers/influx/machine.go.rl:357 +//line plugins/parsers/influx/machine.go.rl:366 -//line plugins/parsers/influx/machine.go.rl:358 +//line plugins/parsers/influx/machine.go.rl:367 -//line plugins/parsers/influx/machine.go:105 +//line plugins/parsers/influx/machine.go:109 { ( m.cs) = LineProtocol_start } -//line plugins/parsers/influx/machine.go.rl:359 +//line plugins/parsers/influx/machine.go.rl:368 return m } @@ -119,14 +123,17 @@ func (m *machine) SetData(data []byte) { m.sol = 0 m.pe = len(data) m.eof = len(data) + m.key = nil + m.beginMetric = false + m.finishMetric = false -//line plugins/parsers/influx/machine.go:125 +//line plugins/parsers/influx/machine.go:132 { ( m.cs) = LineProtocol_start } -//line plugins/parsers/influx/machine.go.rl:373 +//line plugins/parsers/influx/machine.go.rl:385 m.cs = m.initState } @@ -139,12 +146,17 @@ func (m *machine) Next() error { return EOF } - var err error - var key []byte - foundMetric := false + m.key = nil + m.beginMetric = false + m.finishMetric = false + return m.exec() +} + +func (m *machine) exec() error { + var err error -//line plugins/parsers/influx/machine.go:148 +//line plugins/parsers/influx/machine.go:160 { if ( m.p) == ( m.pe) { goto _test_eof @@ -153,8 +165,8 @@ func (m *machine) Next() error { _again: switch ( m.cs) { - case 259: - goto st259 + case 270: + goto st270 case 1: goto st1 case 2: @@ -171,14 +183,14 @@ _again: goto st6 case 7: goto st7 + case 271: + goto st271 + case 272: + goto st272 + case 273: + goto st273 case 8: goto st8 - case 260: - goto st260 - case 261: - goto st261 - case 262: - goto st262 case 9: goto st9 case 10: @@ -227,46 +239,22 @@ _again: goto st31 case 32: goto st32 - case 33: - goto st33 - case 263: - goto st263 - case 264: - goto st264 - case 34: - goto st34 - case 35: - goto st35 - case 265: - goto st265 - case 266: - goto st266 - case 267: - goto st267 - case 36: - goto st36 - case 268: - goto st268 - case 269: - goto st269 - case 270: - goto st270 - case 271: - goto st271 - case 272: - goto st272 - case 273: - goto st273 case 274: goto st274 case 275: goto st275 + case 33: + goto st33 + case 34: + goto st34 case 276: goto st276 case 277: goto st277 case 278: goto st278 + case 35: + goto st35 case 279: goto st279 case 280: @@ -281,26 +269,12 @@ _again: goto st284 case 285: goto st285 - case 37: - goto st37 - case 38: - goto st38 case 286: goto st286 case 287: goto st287 case 288: goto st288 - case 39: - goto st39 - case 40: - goto st40 - case 41: - goto st41 - case 42: - goto st42 - case 43: - goto st43 case 289: goto st289 case 290: @@ -309,8 +283,6 @@ _again: goto st291 case 292: goto st292 - case 44: - goto st44 case 293: goto st293 case 294: @@ -319,12 +291,26 @@ _again: goto st295 case 296: goto st296 + case 36: + goto st36 + case 37: + goto st37 case 297: goto st297 case 298: goto st298 case 299: goto st299 + case 38: + goto st38 + case 39: + goto st39 + case 40: + goto st40 + case 41: + goto st41 + case 42: + goto st42 case 300: goto st300 case 301: @@ -333,6 +319,8 @@ _again: goto st302 case 303: goto st303 + case 43: + goto st43 case 304: goto st304 case 305: @@ -355,6 +343,30 @@ _again: goto st313 case 314: goto st314 + case 315: + goto st315 + case 316: + goto st316 + case 317: + goto st317 + case 318: + goto st318 + case 319: + goto st319 + case 320: + goto st320 + case 321: + goto st321 + case 322: + goto st322 + case 323: + goto st323 + case 324: + goto st324 + case 325: + goto st325 + case 44: + goto st44 case 45: goto st45 case 46: @@ -373,14 +385,14 @@ _again: goto st52 case 53: goto st53 + case 326: + goto st326 + case 327: + goto st327 + case 328: + goto st328 case 54: goto st54 - case 315: - goto st315 - case 316: - goto st316 - case 317: - goto st317 case 55: goto st55 case 56: @@ -391,36 +403,12 @@ _again: goto st58 case 59: goto st59 - case 60: - goto st60 - case 318: - goto st318 - case 319: - goto st319 - case 61: - goto st61 - case 320: - goto st320 - case 321: - goto st321 - case 322: - goto st322 - case 323: - goto st323 - case 324: - goto st324 - case 325: - goto st325 - case 326: - goto st326 - case 327: - goto st327 - case 328: - goto st328 case 329: goto st329 case 330: goto st330 + case 60: + goto st60 case 331: goto st331 case 332: @@ -439,16 +427,12 @@ _again: goto st338 case 339: goto st339 - case 62: - goto st62 case 340: goto st340 case 341: goto st341 case 342: goto st342 - case 63: - goto st63 case 343: goto st343 case 344: @@ -465,12 +449,16 @@ _again: goto st349 case 350: goto st350 + case 61: + goto st61 case 351: goto st351 case 352: goto st352 case 353: goto st353 + case 62: + goto st62 case 354: goto st354 case 355: @@ -489,48 +477,20 @@ _again: goto st361 case 362: goto st362 - case 64: - goto st64 - case 65: - goto st65 - case 66: - goto st66 - case 67: - goto st67 - case 68: - goto st68 case 363: goto st363 - case 69: - goto st69 - case 70: - goto st70 - case 71: - goto st71 - case 72: - goto st72 - case 73: - goto st73 case 364: goto st364 case 365: goto st365 case 366: goto st366 - case 74: - goto st74 - case 75: - goto st75 case 367: goto st367 case 368: goto st368 - case 76: - goto st76 case 369: goto st369 - case 77: - goto st77 case 370: goto st370 case 371: @@ -539,20 +499,48 @@ _again: goto st372 case 373: goto st373 + case 63: + goto st63 + case 64: + goto st64 + case 65: + goto st65 + case 66: + goto st66 + case 67: + goto st67 case 374: goto st374 + case 68: + goto st68 + case 69: + goto st69 + case 70: + goto st70 + case 71: + goto st71 + case 72: + goto st72 case 375: goto st375 case 376: goto st376 case 377: goto st377 + case 73: + goto st73 + case 74: + goto st74 case 378: goto st378 case 379: goto st379 + case 75: + goto st75 case 380: goto st380 + case 76: + goto st76 case 381: goto st381 case 382: @@ -571,6 +559,30 @@ _again: goto st388 case 389: goto st389 + case 390: + goto st390 + case 391: + goto st391 + case 392: + goto st392 + case 393: + goto st393 + case 394: + goto st394 + case 395: + goto st395 + case 396: + goto st396 + case 397: + goto st397 + case 398: + goto st398 + case 399: + goto st399 + case 400: + goto st400 + case 77: + goto st77 case 78: goto st78 case 79: @@ -597,52 +609,6 @@ _again: goto st89 case 90: goto st90 - case 91: - goto st91 - case 390: - goto st390 - case 391: - goto st391 - case 392: - goto st392 - case 393: - goto st393 - case 92: - goto st92 - case 93: - goto st93 - case 94: - goto st94 - case 95: - goto st95 - case 394: - goto st394 - case 395: - goto st395 - case 96: - goto st96 - case 97: - goto st97 - case 396: - goto st396 - case 98: - goto st98 - case 99: - goto st99 - case 397: - goto st397 - case 398: - goto st398 - case 100: - goto st100 - case 399: - goto st399 - case 400: - goto st400 - case 101: - goto st101 - case 102: - goto st102 case 401: goto st401 case 402: @@ -651,20 +617,42 @@ _again: goto st403 case 404: goto st404 + case 91: + goto st91 + case 92: + goto st92 + case 93: + goto st93 + case 94: + goto st94 case 405: goto st405 case 406: goto st406 + case 95: + goto st95 + case 96: + goto st96 case 407: goto st407 + case 97: + goto st97 + case 98: + goto st98 case 408: goto st408 case 409: goto st409 + case 99: + goto st99 case 410: goto st410 case 411: goto st411 + case 100: + goto st100 + case 101: + goto st101 case 412: goto st412 case 413: @@ -679,26 +667,18 @@ _again: goto st417 case 418: goto st418 - case 103: - goto st103 case 419: goto st419 case 420: goto st420 case 421: goto st421 - case 104: - goto st104 - case 105: - goto st105 case 422: goto st422 case 423: goto st423 case 424: goto st424 - case 106: - goto st106 case 425: goto st425 case 426: @@ -709,18 +689,26 @@ _again: goto st428 case 429: goto st429 + case 102: + goto st102 case 430: goto st430 case 431: goto st431 case 432: goto st432 + case 103: + goto st103 + case 104: + goto st104 case 433: goto st433 case 434: goto st434 case 435: goto st435 + case 105: + goto st105 case 436: goto st436 case 437: @@ -739,8 +727,6 @@ _again: goto st443 case 444: goto st444 - case 107: - goto st107 case 445: goto st445 case 446: @@ -763,6 +749,8 @@ _again: goto st454 case 455: goto st455 + case 106: + goto st106 case 456: goto st456 case 457: @@ -785,26 +773,12 @@ _again: goto st465 case 466: goto st466 - case 108: - goto st108 - case 109: - goto st109 - case 110: - goto st110 - case 111: - goto st111 - case 112: - goto st112 case 467: goto st467 - case 113: - goto st113 case 468: goto st468 case 469: goto st469 - case 114: - goto st114 case 470: goto st470 case 471: @@ -821,48 +795,36 @@ _again: goto st476 case 477: goto st477 + case 107: + goto st107 + case 108: + goto st108 + case 109: + goto st109 + case 110: + goto st110 + case 111: + goto st111 case 478: goto st478 - case 115: - goto st115 - case 116: - goto st116 - case 117: - goto st117 + case 112: + goto st112 case 479: goto st479 - case 118: - goto st118 - case 119: - goto st119 - case 120: - goto st120 case 480: goto st480 - case 121: - goto st121 - case 122: - goto st122 + case 113: + goto st113 case 481: goto st481 case 482: goto st482 - case 123: - goto st123 - case 124: - goto st124 - case 125: - goto st125 - case 126: - goto st126 case 483: goto st483 case 484: goto st484 case 485: goto st485 - case 127: - goto st127 case 486: goto st486 case 487: @@ -871,20 +833,46 @@ _again: goto st488 case 489: goto st489 + case 114: + goto st114 + case 115: + goto st115 + case 116: + goto st116 case 490: goto st490 + case 117: + goto st117 + case 118: + goto st118 + case 119: + goto st119 case 491: goto st491 + case 120: + goto st120 + case 121: + goto st121 case 492: goto st492 case 493: goto st493 + case 122: + goto st122 + case 123: + goto st123 + case 124: + goto st124 + case 125: + goto st125 case 494: goto st494 case 495: goto st495 case 496: goto st496 + case 126: + goto st126 case 497: goto st497 case 498: @@ -903,10 +891,6 @@ _again: goto st504 case 505: goto st505 - case 128: - goto st128 - case 129: - goto st129 case 506: goto st506 case 507: @@ -925,46 +909,24 @@ _again: goto st513 case 514: goto st514 - case 130: - goto st130 - case 131: - goto st131 - case 132: - goto st132 case 515: goto st515 - case 133: - goto st133 - case 134: - goto st134 - case 135: - goto st135 case 516: goto st516 - case 136: - goto st136 - case 137: - goto st137 + case 127: + goto st127 + case 128: + goto st128 case 517: goto st517 case 518: goto st518 - case 138: - goto st138 - case 139: - goto st139 - case 140: - goto st140 case 519: goto st519 case 520: goto st520 - case 141: - goto st141 case 521: goto st521 - case 142: - goto st142 case 522: goto st522 case 523: @@ -973,36 +935,46 @@ _again: goto st524 case 525: goto st525 + case 129: + goto st129 + case 130: + goto st130 + case 131: + goto st131 case 526: goto st526 + case 132: + goto st132 + case 133: + goto st133 + case 134: + goto st134 case 527: goto st527 + case 135: + goto st135 + case 136: + goto st136 case 528: goto st528 case 529: goto st529 - case 143: - goto st143 - case 144: - goto st144 - case 145: - goto st145 + case 137: + goto st137 + case 138: + goto st138 + case 139: + goto st139 case 530: goto st530 - case 146: - goto st146 - case 147: - goto st147 - case 148: - goto st148 case 531: goto st531 - case 149: - goto st149 - case 150: - goto st150 + case 140: + goto st140 case 532: goto st532 + case 141: + goto st141 case 533: goto st533 case 534: @@ -1019,10 +991,26 @@ _again: goto st539 case 540: goto st540 + case 142: + goto st142 + case 143: + goto st143 + case 144: + goto st144 case 541: goto st541 + case 145: + goto st145 + case 146: + goto st146 + case 147: + goto st147 case 542: goto st542 + case 148: + goto st148 + case 149: + goto st149 case 543: goto st543 case 544: @@ -1041,24 +1029,16 @@ _again: goto st550 case 551: goto st551 - case 151: - goto st151 - case 152: - goto st152 case 552: goto st552 case 553: goto st553 case 554: goto st554 - case 153: - goto st153 case 555: goto st555 case 556: goto st556 - case 154: - goto st154 case 557: goto st557 case 558: @@ -1071,16 +1051,24 @@ _again: goto st561 case 562: goto st562 + case 150: + goto st150 + case 151: + goto st151 case 563: goto st563 case 564: goto st564 case 565: goto st565 + case 152: + goto st152 case 566: goto st566 case 567: goto st567 + case 153: + goto st153 case 568: goto st568 case 569: @@ -1095,14 +1083,8 @@ _again: goto st573 case 574: goto st574 - case 155: - goto st155 - case 156: - goto st156 case 575: goto st575 - case 157: - goto st157 case 576: goto st576 case 577: @@ -1119,42 +1101,20 @@ _again: goto st582 case 583: goto st583 - case 158: - goto st158 - case 159: - goto st159 - case 160: - goto st160 case 584: goto st584 - case 161: - goto st161 - case 162: - goto st162 - case 163: - goto st163 case 585: goto st585 - case 164: - goto st164 - case 165: - goto st165 + case 154: + goto st154 + case 155: + goto st155 case 586: goto st586 + case 156: + goto st156 case 587: goto st587 - case 166: - goto st166 - case 167: - goto st167 - case 168: - goto st168 - case 169: - goto st169 - case 170: - goto st170 - case 171: - goto st171 case 588: goto st588 case 589: @@ -1169,14 +1129,42 @@ _again: goto st593 case 594: goto st594 + case 157: + goto st157 + case 158: + goto st158 + case 159: + goto st159 case 595: goto st595 + case 160: + goto st160 + case 161: + goto st161 + case 162: + goto st162 case 596: goto st596 + case 163: + goto st163 + case 164: + goto st164 case 597: goto st597 case 598: goto st598 + case 165: + goto st165 + case 166: + goto st166 + case 167: + goto st167 + case 168: + goto st168 + case 169: + goto st169 + case 170: + goto st170 case 599: goto st599 case 600: @@ -1193,26 +1181,16 @@ _again: goto st605 case 606: goto st606 - case 172: - goto st172 - case 173: - goto st173 - case 174: - goto st174 case 607: goto st607 case 608: goto st608 case 609: goto st609 - case 175: - goto st175 case 610: goto st610 case 611: goto st611 - case 176: - goto st176 case 612: goto st612 case 613: @@ -1223,48 +1201,30 @@ _again: goto st615 case 616: goto st616 - case 177: - goto st177 - case 178: - goto st178 - case 179: - goto st179 case 617: goto st617 - case 180: - goto st180 - case 181: - goto st181 - case 182: - goto st182 + case 171: + goto st171 + case 172: + goto st172 + case 173: + goto st173 case 618: goto st618 - case 183: - goto st183 - case 184: - goto st184 case 619: goto st619 case 620: goto st620 - case 185: - goto st185 + case 174: + goto st174 case 621: goto st621 case 622: goto st622 - case 186: - goto st186 - case 187: - goto st187 - case 188: - goto st188 + case 175: + goto st175 case 623: goto st623 - case 189: - goto st189 - case 190: - goto st190 case 624: goto st624 case 625: @@ -1273,52 +1233,74 @@ _again: goto st626 case 627: goto st627 + case 176: + goto st176 + case 177: + goto st177 + case 178: + goto st178 case 628: goto st628 + case 179: + goto st179 + case 180: + goto st180 + case 181: + goto st181 case 629: goto st629 + case 182: + goto st182 + case 183: + goto st183 case 630: goto st630 case 631: goto st631 + case 184: + goto st184 + case 632: + goto st632 + case 633: + goto st633 + case 634: + goto st634 + case 185: + goto st185 + case 186: + goto st186 + case 187: + goto st187 + case 635: + goto st635 + case 188: + goto st188 + case 189: + goto st189 + case 190: + goto st190 + case 636: + goto st636 case 191: goto st191 case 192: goto st192 - case 193: - goto st193 - case 632: - goto st632 - case 194: - goto st194 - case 195: - goto st195 - case 196: - goto st196 - case 633: - goto st633 - case 197: - goto st197 - case 198: - goto st198 - case 634: - goto st634 - case 635: - goto st635 - case 199: - goto st199 - case 200: - goto st200 - case 201: - goto st201 - case 636: - goto st636 case 637: goto st637 case 638: goto st638 + case 193: + goto st193 + case 194: + goto st194 + case 195: + goto st195 case 639: goto st639 + case 196: + goto st196 + case 197: + goto st197 case 640: goto st640 case 641: @@ -1335,36 +1317,44 @@ _again: goto st646 case 647: goto st647 + case 198: + goto st198 + case 199: + goto st199 + case 200: + goto st200 case 648: goto st648 + case 201: + goto st201 + case 202: + goto st202 + case 203: + goto st203 case 649: goto st649 + case 204: + goto st204 + case 205: + goto st205 case 650: goto st650 case 651: goto st651 + case 206: + goto st206 + case 207: + goto st207 + case 208: + goto st208 case 652: goto st652 case 653: goto st653 case 654: goto st654 - case 202: - goto st202 - case 203: - goto st203 - case 204: - goto st204 - case 205: - goto st205 - case 206: - goto st206 case 655: goto st655 - case 207: - goto st207 - case 208: - goto st208 case 656: goto st656 case 657: @@ -1383,48 +1373,34 @@ _again: goto st663 case 664: goto st664 + case 665: + goto st665 + case 666: + goto st666 + case 667: + goto st667 + case 668: + goto st668 + case 669: + goto st669 + case 670: + goto st670 case 209: goto st209 case 210: goto st210 case 211: goto st211 - case 665: - goto st665 case 212: goto st212 case 213: goto st213 - case 214: - goto st214 - case 666: - goto st666 - case 215: - goto st215 - case 216: - goto st216 - case 667: - goto st667 - case 668: - goto st668 - case 217: - goto st217 - case 218: - goto st218 - case 219: - goto st219 - case 220: - goto st220 - case 669: - goto st669 - case 221: - goto st221 - case 222: - goto st222 - case 670: - goto st670 case 671: goto st671 + case 214: + goto st214 + case 215: + goto st215 case 672: goto st672 case 673: @@ -1437,44 +1413,48 @@ _again: goto st676 case 677: goto st677 - case 223: - goto st223 - case 224: - goto st224 - case 225: - goto st225 case 678: goto st678 - case 226: - goto st226 - case 227: - goto st227 - case 228: - goto st228 case 679: goto st679 - case 229: - goto st229 - case 230: - goto st230 case 680: goto st680 + case 216: + goto st216 + case 217: + goto st217 + case 218: + goto st218 case 681: goto st681 - case 231: - goto st231 - case 232: - goto st232 - case 233: - goto st233 + case 219: + goto st219 + case 220: + goto st220 + case 221: + goto st221 case 682: goto st682 + case 222: + goto st222 + case 223: + goto st223 case 683: goto st683 case 684: goto st684 + case 224: + goto st224 + case 225: + goto st225 + case 226: + goto st226 case 685: goto st685 + case 227: + goto st227 + case 228: + goto st228 case 686: goto st686 case 687: @@ -1491,8 +1471,18 @@ _again: goto st692 case 693: goto st693 + case 229: + goto st229 + case 230: + goto st230 + case 231: + goto st231 case 694: goto st694 + case 232: + goto st232 + case 233: + goto st233 case 695: goto st695 case 696: @@ -1505,96 +1495,166 @@ _again: goto st699 case 700: goto st700 + case 701: + goto st701 + case 702: + goto st702 case 234: goto st234 case 235: goto st235 - case 701: - goto st701 case 236: goto st236 - case 237: - goto st237 - case 702: - goto st702 case 703: goto st703 + case 237: + goto st237 + case 238: + goto st238 + case 239: + goto st239 case 704: goto st704 + case 240: + goto st240 + case 241: + goto st241 case 705: goto st705 case 706: goto st706 + case 242: + goto st242 + case 243: + goto st243 + case 244: + goto st244 case 707: goto st707 case 708: goto st708 case 709: goto st709 - case 238: - goto st238 - case 239: - goto st239 - case 240: - goto st240 case 710: goto st710 - case 241: - goto st241 - case 242: - goto st242 - case 243: - goto st243 case 711: goto st711 - case 244: - goto st244 - case 245: - goto st245 case 712: goto st712 case 713: goto st713 - case 246: - goto st246 - case 247: - goto st247 case 714: goto st714 - case 250: - goto st250 + case 715: + goto st715 + case 716: + goto st716 case 717: goto st717 case 718: goto st718 + case 719: + goto st719 + case 720: + goto st720 + case 721: + goto st721 + case 722: + goto st722 + case 723: + goto st723 + case 724: + goto st724 + case 725: + goto st725 + case 245: + goto st245 + case 246: + goto st246 + case 726: + goto st726 + case 247: + goto st247 + case 248: + goto st248 + case 727: + goto st727 + case 728: + goto st728 + case 729: + goto st729 + case 730: + goto st730 + case 731: + goto st731 + case 732: + goto st732 + case 733: + goto st733 + case 734: + goto st734 + case 249: + goto st249 + case 250: + goto st250 case 251: goto st251 + case 735: + goto st735 case 252: goto st252 case 253: goto st253 case 254: goto st254 - case 719: - goto st719 + case 736: + goto st736 case 255: goto st255 - case 720: - goto st720 case 256: goto st256 + case 737: + goto st737 + case 738: + goto st738 case 257: goto st257 case 258: goto st258 - case 715: - goto st715 - case 716: - goto st716 - case 248: - goto st248 - case 249: - goto st249 + case 739: + goto st739 + case 261: + goto st261 + case 741: + goto st741 + case 742: + goto st742 + case 262: + goto st262 + case 263: + goto st263 + case 264: + goto st264 + case 265: + goto st265 + case 743: + goto st743 + case 266: + goto st266 + case 744: + goto st744 + case 267: + goto st267 + case 268: + goto st268 + case 269: + goto st269 + case 740: + goto st740 + case 259: + goto st259 + case 260: + goto st260 } if ( m.p)++; ( m.p) == ( m.pe) { @@ -1602,8 +1662,8 @@ _again: } _resume: switch ( m.cs) { - case 259: - goto st_case_259 + case 270: + goto st_case_270 case 1: goto st_case_1 case 2: @@ -1620,14 +1680,14 @@ _resume: goto st_case_6 case 7: goto st_case_7 + case 271: + goto st_case_271 + case 272: + goto st_case_272 + case 273: + goto st_case_273 case 8: goto st_case_8 - case 260: - goto st_case_260 - case 261: - goto st_case_261 - case 262: - goto st_case_262 case 9: goto st_case_9 case 10: @@ -1676,46 +1736,22 @@ _resume: goto st_case_31 case 32: goto st_case_32 - case 33: - goto st_case_33 - case 263: - goto st_case_263 - case 264: - goto st_case_264 - case 34: - goto st_case_34 - case 35: - goto st_case_35 - case 265: - goto st_case_265 - case 266: - goto st_case_266 - case 267: - goto st_case_267 - case 36: - goto st_case_36 - case 268: - goto st_case_268 - case 269: - goto st_case_269 - case 270: - goto st_case_270 - case 271: - goto st_case_271 - case 272: - goto st_case_272 - case 273: - goto st_case_273 case 274: goto st_case_274 case 275: goto st_case_275 + case 33: + goto st_case_33 + case 34: + goto st_case_34 case 276: goto st_case_276 case 277: goto st_case_277 case 278: goto st_case_278 + case 35: + goto st_case_35 case 279: goto st_case_279 case 280: @@ -1730,26 +1766,12 @@ _resume: goto st_case_284 case 285: goto st_case_285 - case 37: - goto st_case_37 - case 38: - goto st_case_38 case 286: goto st_case_286 case 287: goto st_case_287 case 288: goto st_case_288 - case 39: - goto st_case_39 - case 40: - goto st_case_40 - case 41: - goto st_case_41 - case 42: - goto st_case_42 - case 43: - goto st_case_43 case 289: goto st_case_289 case 290: @@ -1758,8 +1780,6 @@ _resume: goto st_case_291 case 292: goto st_case_292 - case 44: - goto st_case_44 case 293: goto st_case_293 case 294: @@ -1768,12 +1788,26 @@ _resume: goto st_case_295 case 296: goto st_case_296 + case 36: + goto st_case_36 + case 37: + goto st_case_37 case 297: goto st_case_297 case 298: goto st_case_298 case 299: goto st_case_299 + case 38: + goto st_case_38 + case 39: + goto st_case_39 + case 40: + goto st_case_40 + case 41: + goto st_case_41 + case 42: + goto st_case_42 case 300: goto st_case_300 case 301: @@ -1782,6 +1816,8 @@ _resume: goto st_case_302 case 303: goto st_case_303 + case 43: + goto st_case_43 case 304: goto st_case_304 case 305: @@ -1804,6 +1840,30 @@ _resume: goto st_case_313 case 314: goto st_case_314 + case 315: + goto st_case_315 + case 316: + goto st_case_316 + case 317: + goto st_case_317 + case 318: + goto st_case_318 + case 319: + goto st_case_319 + case 320: + goto st_case_320 + case 321: + goto st_case_321 + case 322: + goto st_case_322 + case 323: + goto st_case_323 + case 324: + goto st_case_324 + case 325: + goto st_case_325 + case 44: + goto st_case_44 case 45: goto st_case_45 case 46: @@ -1822,14 +1882,14 @@ _resume: goto st_case_52 case 53: goto st_case_53 + case 326: + goto st_case_326 + case 327: + goto st_case_327 + case 328: + goto st_case_328 case 54: goto st_case_54 - case 315: - goto st_case_315 - case 316: - goto st_case_316 - case 317: - goto st_case_317 case 55: goto st_case_55 case 56: @@ -1840,36 +1900,12 @@ _resume: goto st_case_58 case 59: goto st_case_59 - case 60: - goto st_case_60 - case 318: - goto st_case_318 - case 319: - goto st_case_319 - case 61: - goto st_case_61 - case 320: - goto st_case_320 - case 321: - goto st_case_321 - case 322: - goto st_case_322 - case 323: - goto st_case_323 - case 324: - goto st_case_324 - case 325: - goto st_case_325 - case 326: - goto st_case_326 - case 327: - goto st_case_327 - case 328: - goto st_case_328 case 329: goto st_case_329 case 330: goto st_case_330 + case 60: + goto st_case_60 case 331: goto st_case_331 case 332: @@ -1888,16 +1924,12 @@ _resume: goto st_case_338 case 339: goto st_case_339 - case 62: - goto st_case_62 case 340: goto st_case_340 case 341: goto st_case_341 case 342: goto st_case_342 - case 63: - goto st_case_63 case 343: goto st_case_343 case 344: @@ -1914,12 +1946,16 @@ _resume: goto st_case_349 case 350: goto st_case_350 + case 61: + goto st_case_61 case 351: goto st_case_351 case 352: goto st_case_352 case 353: goto st_case_353 + case 62: + goto st_case_62 case 354: goto st_case_354 case 355: @@ -1938,48 +1974,20 @@ _resume: goto st_case_361 case 362: goto st_case_362 - case 64: - goto st_case_64 - case 65: - goto st_case_65 - case 66: - goto st_case_66 - case 67: - goto st_case_67 - case 68: - goto st_case_68 case 363: goto st_case_363 - case 69: - goto st_case_69 - case 70: - goto st_case_70 - case 71: - goto st_case_71 - case 72: - goto st_case_72 - case 73: - goto st_case_73 case 364: goto st_case_364 case 365: goto st_case_365 case 366: goto st_case_366 - case 74: - goto st_case_74 - case 75: - goto st_case_75 case 367: goto st_case_367 case 368: goto st_case_368 - case 76: - goto st_case_76 case 369: goto st_case_369 - case 77: - goto st_case_77 case 370: goto st_case_370 case 371: @@ -1988,20 +1996,48 @@ _resume: goto st_case_372 case 373: goto st_case_373 + case 63: + goto st_case_63 + case 64: + goto st_case_64 + case 65: + goto st_case_65 + case 66: + goto st_case_66 + case 67: + goto st_case_67 case 374: goto st_case_374 + case 68: + goto st_case_68 + case 69: + goto st_case_69 + case 70: + goto st_case_70 + case 71: + goto st_case_71 + case 72: + goto st_case_72 case 375: goto st_case_375 case 376: goto st_case_376 case 377: goto st_case_377 + case 73: + goto st_case_73 + case 74: + goto st_case_74 case 378: goto st_case_378 case 379: goto st_case_379 + case 75: + goto st_case_75 case 380: goto st_case_380 + case 76: + goto st_case_76 case 381: goto st_case_381 case 382: @@ -2020,6 +2056,30 @@ _resume: goto st_case_388 case 389: goto st_case_389 + case 390: + goto st_case_390 + case 391: + goto st_case_391 + case 392: + goto st_case_392 + case 393: + goto st_case_393 + case 394: + goto st_case_394 + case 395: + goto st_case_395 + case 396: + goto st_case_396 + case 397: + goto st_case_397 + case 398: + goto st_case_398 + case 399: + goto st_case_399 + case 400: + goto st_case_400 + case 77: + goto st_case_77 case 78: goto st_case_78 case 79: @@ -2046,52 +2106,6 @@ _resume: goto st_case_89 case 90: goto st_case_90 - case 91: - goto st_case_91 - case 390: - goto st_case_390 - case 391: - goto st_case_391 - case 392: - goto st_case_392 - case 393: - goto st_case_393 - case 92: - goto st_case_92 - case 93: - goto st_case_93 - case 94: - goto st_case_94 - case 95: - goto st_case_95 - case 394: - goto st_case_394 - case 395: - goto st_case_395 - case 96: - goto st_case_96 - case 97: - goto st_case_97 - case 396: - goto st_case_396 - case 98: - goto st_case_98 - case 99: - goto st_case_99 - case 397: - goto st_case_397 - case 398: - goto st_case_398 - case 100: - goto st_case_100 - case 399: - goto st_case_399 - case 400: - goto st_case_400 - case 101: - goto st_case_101 - case 102: - goto st_case_102 case 401: goto st_case_401 case 402: @@ -2100,20 +2114,42 @@ _resume: goto st_case_403 case 404: goto st_case_404 + case 91: + goto st_case_91 + case 92: + goto st_case_92 + case 93: + goto st_case_93 + case 94: + goto st_case_94 case 405: goto st_case_405 case 406: goto st_case_406 + case 95: + goto st_case_95 + case 96: + goto st_case_96 case 407: goto st_case_407 + case 97: + goto st_case_97 + case 98: + goto st_case_98 case 408: goto st_case_408 case 409: goto st_case_409 + case 99: + goto st_case_99 case 410: goto st_case_410 case 411: goto st_case_411 + case 100: + goto st_case_100 + case 101: + goto st_case_101 case 412: goto st_case_412 case 413: @@ -2128,26 +2164,18 @@ _resume: goto st_case_417 case 418: goto st_case_418 - case 103: - goto st_case_103 case 419: goto st_case_419 case 420: goto st_case_420 case 421: goto st_case_421 - case 104: - goto st_case_104 - case 105: - goto st_case_105 case 422: goto st_case_422 case 423: goto st_case_423 case 424: goto st_case_424 - case 106: - goto st_case_106 case 425: goto st_case_425 case 426: @@ -2158,18 +2186,26 @@ _resume: goto st_case_428 case 429: goto st_case_429 + case 102: + goto st_case_102 case 430: goto st_case_430 case 431: goto st_case_431 case 432: goto st_case_432 + case 103: + goto st_case_103 + case 104: + goto st_case_104 case 433: goto st_case_433 case 434: goto st_case_434 case 435: goto st_case_435 + case 105: + goto st_case_105 case 436: goto st_case_436 case 437: @@ -2188,8 +2224,6 @@ _resume: goto st_case_443 case 444: goto st_case_444 - case 107: - goto st_case_107 case 445: goto st_case_445 case 446: @@ -2212,6 +2246,8 @@ _resume: goto st_case_454 case 455: goto st_case_455 + case 106: + goto st_case_106 case 456: goto st_case_456 case 457: @@ -2234,26 +2270,12 @@ _resume: goto st_case_465 case 466: goto st_case_466 - case 108: - goto st_case_108 - case 109: - goto st_case_109 - case 110: - goto st_case_110 - case 111: - goto st_case_111 - case 112: - goto st_case_112 case 467: goto st_case_467 - case 113: - goto st_case_113 case 468: goto st_case_468 case 469: goto st_case_469 - case 114: - goto st_case_114 case 470: goto st_case_470 case 471: @@ -2270,48 +2292,36 @@ _resume: goto st_case_476 case 477: goto st_case_477 + case 107: + goto st_case_107 + case 108: + goto st_case_108 + case 109: + goto st_case_109 + case 110: + goto st_case_110 + case 111: + goto st_case_111 case 478: goto st_case_478 - case 115: - goto st_case_115 - case 116: - goto st_case_116 - case 117: - goto st_case_117 + case 112: + goto st_case_112 case 479: goto st_case_479 - case 118: - goto st_case_118 - case 119: - goto st_case_119 - case 120: - goto st_case_120 case 480: goto st_case_480 - case 121: - goto st_case_121 - case 122: - goto st_case_122 + case 113: + goto st_case_113 case 481: goto st_case_481 case 482: goto st_case_482 - case 123: - goto st_case_123 - case 124: - goto st_case_124 - case 125: - goto st_case_125 - case 126: - goto st_case_126 case 483: goto st_case_483 case 484: goto st_case_484 case 485: goto st_case_485 - case 127: - goto st_case_127 case 486: goto st_case_486 case 487: @@ -2320,20 +2330,46 @@ _resume: goto st_case_488 case 489: goto st_case_489 + case 114: + goto st_case_114 + case 115: + goto st_case_115 + case 116: + goto st_case_116 case 490: goto st_case_490 + case 117: + goto st_case_117 + case 118: + goto st_case_118 + case 119: + goto st_case_119 case 491: goto st_case_491 + case 120: + goto st_case_120 + case 121: + goto st_case_121 case 492: goto st_case_492 case 493: goto st_case_493 + case 122: + goto st_case_122 + case 123: + goto st_case_123 + case 124: + goto st_case_124 + case 125: + goto st_case_125 case 494: goto st_case_494 case 495: goto st_case_495 case 496: goto st_case_496 + case 126: + goto st_case_126 case 497: goto st_case_497 case 498: @@ -2352,10 +2388,6 @@ _resume: goto st_case_504 case 505: goto st_case_505 - case 128: - goto st_case_128 - case 129: - goto st_case_129 case 506: goto st_case_506 case 507: @@ -2374,46 +2406,24 @@ _resume: goto st_case_513 case 514: goto st_case_514 - case 130: - goto st_case_130 - case 131: - goto st_case_131 - case 132: - goto st_case_132 case 515: goto st_case_515 - case 133: - goto st_case_133 - case 134: - goto st_case_134 - case 135: - goto st_case_135 case 516: goto st_case_516 - case 136: - goto st_case_136 - case 137: - goto st_case_137 + case 127: + goto st_case_127 + case 128: + goto st_case_128 case 517: goto st_case_517 case 518: goto st_case_518 - case 138: - goto st_case_138 - case 139: - goto st_case_139 - case 140: - goto st_case_140 case 519: goto st_case_519 case 520: goto st_case_520 - case 141: - goto st_case_141 case 521: goto st_case_521 - case 142: - goto st_case_142 case 522: goto st_case_522 case 523: @@ -2422,36 +2432,46 @@ _resume: goto st_case_524 case 525: goto st_case_525 + case 129: + goto st_case_129 + case 130: + goto st_case_130 + case 131: + goto st_case_131 case 526: goto st_case_526 + case 132: + goto st_case_132 + case 133: + goto st_case_133 + case 134: + goto st_case_134 case 527: goto st_case_527 + case 135: + goto st_case_135 + case 136: + goto st_case_136 case 528: goto st_case_528 case 529: goto st_case_529 - case 143: - goto st_case_143 - case 144: - goto st_case_144 - case 145: - goto st_case_145 + case 137: + goto st_case_137 + case 138: + goto st_case_138 + case 139: + goto st_case_139 case 530: goto st_case_530 - case 146: - goto st_case_146 - case 147: - goto st_case_147 - case 148: - goto st_case_148 case 531: goto st_case_531 - case 149: - goto st_case_149 - case 150: - goto st_case_150 + case 140: + goto st_case_140 case 532: goto st_case_532 + case 141: + goto st_case_141 case 533: goto st_case_533 case 534: @@ -2468,10 +2488,26 @@ _resume: goto st_case_539 case 540: goto st_case_540 + case 142: + goto st_case_142 + case 143: + goto st_case_143 + case 144: + goto st_case_144 case 541: goto st_case_541 + case 145: + goto st_case_145 + case 146: + goto st_case_146 + case 147: + goto st_case_147 case 542: goto st_case_542 + case 148: + goto st_case_148 + case 149: + goto st_case_149 case 543: goto st_case_543 case 544: @@ -2490,24 +2526,16 @@ _resume: goto st_case_550 case 551: goto st_case_551 - case 151: - goto st_case_151 - case 152: - goto st_case_152 case 552: goto st_case_552 case 553: goto st_case_553 case 554: goto st_case_554 - case 153: - goto st_case_153 case 555: goto st_case_555 case 556: goto st_case_556 - case 154: - goto st_case_154 case 557: goto st_case_557 case 558: @@ -2520,16 +2548,24 @@ _resume: goto st_case_561 case 562: goto st_case_562 + case 150: + goto st_case_150 + case 151: + goto st_case_151 case 563: goto st_case_563 case 564: goto st_case_564 case 565: goto st_case_565 + case 152: + goto st_case_152 case 566: goto st_case_566 case 567: goto st_case_567 + case 153: + goto st_case_153 case 568: goto st_case_568 case 569: @@ -2544,14 +2580,8 @@ _resume: goto st_case_573 case 574: goto st_case_574 - case 155: - goto st_case_155 - case 156: - goto st_case_156 case 575: goto st_case_575 - case 157: - goto st_case_157 case 576: goto st_case_576 case 577: @@ -2568,42 +2598,20 @@ _resume: goto st_case_582 case 583: goto st_case_583 - case 158: - goto st_case_158 - case 159: - goto st_case_159 - case 160: - goto st_case_160 case 584: goto st_case_584 - case 161: - goto st_case_161 - case 162: - goto st_case_162 - case 163: - goto st_case_163 case 585: goto st_case_585 - case 164: - goto st_case_164 - case 165: - goto st_case_165 + case 154: + goto st_case_154 + case 155: + goto st_case_155 case 586: goto st_case_586 + case 156: + goto st_case_156 case 587: goto st_case_587 - case 166: - goto st_case_166 - case 167: - goto st_case_167 - case 168: - goto st_case_168 - case 169: - goto st_case_169 - case 170: - goto st_case_170 - case 171: - goto st_case_171 case 588: goto st_case_588 case 589: @@ -2618,14 +2626,42 @@ _resume: goto st_case_593 case 594: goto st_case_594 + case 157: + goto st_case_157 + case 158: + goto st_case_158 + case 159: + goto st_case_159 case 595: goto st_case_595 + case 160: + goto st_case_160 + case 161: + goto st_case_161 + case 162: + goto st_case_162 case 596: goto st_case_596 + case 163: + goto st_case_163 + case 164: + goto st_case_164 case 597: goto st_case_597 case 598: goto st_case_598 + case 165: + goto st_case_165 + case 166: + goto st_case_166 + case 167: + goto st_case_167 + case 168: + goto st_case_168 + case 169: + goto st_case_169 + case 170: + goto st_case_170 case 599: goto st_case_599 case 600: @@ -2642,26 +2678,16 @@ _resume: goto st_case_605 case 606: goto st_case_606 - case 172: - goto st_case_172 - case 173: - goto st_case_173 - case 174: - goto st_case_174 case 607: goto st_case_607 case 608: goto st_case_608 case 609: goto st_case_609 - case 175: - goto st_case_175 case 610: goto st_case_610 case 611: goto st_case_611 - case 176: - goto st_case_176 case 612: goto st_case_612 case 613: @@ -2672,48 +2698,30 @@ _resume: goto st_case_615 case 616: goto st_case_616 - case 177: - goto st_case_177 - case 178: - goto st_case_178 - case 179: - goto st_case_179 case 617: goto st_case_617 - case 180: - goto st_case_180 - case 181: - goto st_case_181 - case 182: - goto st_case_182 + case 171: + goto st_case_171 + case 172: + goto st_case_172 + case 173: + goto st_case_173 case 618: goto st_case_618 - case 183: - goto st_case_183 - case 184: - goto st_case_184 case 619: goto st_case_619 case 620: goto st_case_620 - case 185: - goto st_case_185 + case 174: + goto st_case_174 case 621: goto st_case_621 case 622: goto st_case_622 - case 186: - goto st_case_186 - case 187: - goto st_case_187 - case 188: - goto st_case_188 + case 175: + goto st_case_175 case 623: goto st_case_623 - case 189: - goto st_case_189 - case 190: - goto st_case_190 case 624: goto st_case_624 case 625: @@ -2722,52 +2730,74 @@ _resume: goto st_case_626 case 627: goto st_case_627 + case 176: + goto st_case_176 + case 177: + goto st_case_177 + case 178: + goto st_case_178 case 628: goto st_case_628 + case 179: + goto st_case_179 + case 180: + goto st_case_180 + case 181: + goto st_case_181 case 629: goto st_case_629 + case 182: + goto st_case_182 + case 183: + goto st_case_183 case 630: goto st_case_630 case 631: goto st_case_631 + case 184: + goto st_case_184 + case 632: + goto st_case_632 + case 633: + goto st_case_633 + case 634: + goto st_case_634 + case 185: + goto st_case_185 + case 186: + goto st_case_186 + case 187: + goto st_case_187 + case 635: + goto st_case_635 + case 188: + goto st_case_188 + case 189: + goto st_case_189 + case 190: + goto st_case_190 + case 636: + goto st_case_636 case 191: goto st_case_191 case 192: goto st_case_192 - case 193: - goto st_case_193 - case 632: - goto st_case_632 - case 194: - goto st_case_194 - case 195: - goto st_case_195 - case 196: - goto st_case_196 - case 633: - goto st_case_633 - case 197: - goto st_case_197 - case 198: - goto st_case_198 - case 634: - goto st_case_634 - case 635: - goto st_case_635 - case 199: - goto st_case_199 - case 200: - goto st_case_200 - case 201: - goto st_case_201 - case 636: - goto st_case_636 case 637: goto st_case_637 case 638: goto st_case_638 + case 193: + goto st_case_193 + case 194: + goto st_case_194 + case 195: + goto st_case_195 case 639: goto st_case_639 + case 196: + goto st_case_196 + case 197: + goto st_case_197 case 640: goto st_case_640 case 641: @@ -2784,36 +2814,44 @@ _resume: goto st_case_646 case 647: goto st_case_647 + case 198: + goto st_case_198 + case 199: + goto st_case_199 + case 200: + goto st_case_200 case 648: goto st_case_648 + case 201: + goto st_case_201 + case 202: + goto st_case_202 + case 203: + goto st_case_203 case 649: goto st_case_649 + case 204: + goto st_case_204 + case 205: + goto st_case_205 case 650: goto st_case_650 case 651: goto st_case_651 + case 206: + goto st_case_206 + case 207: + goto st_case_207 + case 208: + goto st_case_208 case 652: goto st_case_652 case 653: goto st_case_653 case 654: goto st_case_654 - case 202: - goto st_case_202 - case 203: - goto st_case_203 - case 204: - goto st_case_204 - case 205: - goto st_case_205 - case 206: - goto st_case_206 case 655: goto st_case_655 - case 207: - goto st_case_207 - case 208: - goto st_case_208 case 656: goto st_case_656 case 657: @@ -2832,48 +2870,34 @@ _resume: goto st_case_663 case 664: goto st_case_664 + case 665: + goto st_case_665 + case 666: + goto st_case_666 + case 667: + goto st_case_667 + case 668: + goto st_case_668 + case 669: + goto st_case_669 + case 670: + goto st_case_670 case 209: goto st_case_209 case 210: goto st_case_210 case 211: goto st_case_211 - case 665: - goto st_case_665 case 212: goto st_case_212 case 213: goto st_case_213 - case 214: - goto st_case_214 - case 666: - goto st_case_666 - case 215: - goto st_case_215 - case 216: - goto st_case_216 - case 667: - goto st_case_667 - case 668: - goto st_case_668 - case 217: - goto st_case_217 - case 218: - goto st_case_218 - case 219: - goto st_case_219 - case 220: - goto st_case_220 - case 669: - goto st_case_669 - case 221: - goto st_case_221 - case 222: - goto st_case_222 - case 670: - goto st_case_670 case 671: goto st_case_671 + case 214: + goto st_case_214 + case 215: + goto st_case_215 case 672: goto st_case_672 case 673: @@ -2886,44 +2910,48 @@ _resume: goto st_case_676 case 677: goto st_case_677 - case 223: - goto st_case_223 - case 224: - goto st_case_224 - case 225: - goto st_case_225 case 678: goto st_case_678 - case 226: - goto st_case_226 - case 227: - goto st_case_227 - case 228: - goto st_case_228 case 679: goto st_case_679 - case 229: - goto st_case_229 - case 230: - goto st_case_230 case 680: goto st_case_680 + case 216: + goto st_case_216 + case 217: + goto st_case_217 + case 218: + goto st_case_218 case 681: goto st_case_681 - case 231: - goto st_case_231 - case 232: - goto st_case_232 - case 233: - goto st_case_233 + case 219: + goto st_case_219 + case 220: + goto st_case_220 + case 221: + goto st_case_221 case 682: goto st_case_682 + case 222: + goto st_case_222 + case 223: + goto st_case_223 case 683: goto st_case_683 case 684: goto st_case_684 + case 224: + goto st_case_224 + case 225: + goto st_case_225 + case 226: + goto st_case_226 case 685: goto st_case_685 + case 227: + goto st_case_227 + case 228: + goto st_case_228 case 686: goto st_case_686 case 687: @@ -2940,8 +2968,18 @@ _resume: goto st_case_692 case 693: goto st_case_693 + case 229: + goto st_case_229 + case 230: + goto st_case_230 + case 231: + goto st_case_231 case 694: goto st_case_694 + case 232: + goto st_case_232 + case 233: + goto st_case_233 case 695: goto st_case_695 case 696: @@ -2954,135 +2992,205 @@ _resume: goto st_case_699 case 700: goto st_case_700 + case 701: + goto st_case_701 + case 702: + goto st_case_702 case 234: goto st_case_234 case 235: goto st_case_235 - case 701: - goto st_case_701 case 236: goto st_case_236 - case 237: - goto st_case_237 - case 702: - goto st_case_702 case 703: goto st_case_703 + case 237: + goto st_case_237 + case 238: + goto st_case_238 + case 239: + goto st_case_239 case 704: goto st_case_704 + case 240: + goto st_case_240 + case 241: + goto st_case_241 case 705: goto st_case_705 case 706: goto st_case_706 + case 242: + goto st_case_242 + case 243: + goto st_case_243 + case 244: + goto st_case_244 case 707: goto st_case_707 case 708: goto st_case_708 case 709: goto st_case_709 - case 238: - goto st_case_238 - case 239: - goto st_case_239 - case 240: - goto st_case_240 case 710: goto st_case_710 - case 241: - goto st_case_241 - case 242: - goto st_case_242 - case 243: - goto st_case_243 case 711: goto st_case_711 - case 244: - goto st_case_244 - case 245: - goto st_case_245 case 712: goto st_case_712 case 713: goto st_case_713 - case 246: - goto st_case_246 - case 247: - goto st_case_247 case 714: goto st_case_714 - case 250: - goto st_case_250 + case 715: + goto st_case_715 + case 716: + goto st_case_716 case 717: goto st_case_717 case 718: goto st_case_718 + case 719: + goto st_case_719 + case 720: + goto st_case_720 + case 721: + goto st_case_721 + case 722: + goto st_case_722 + case 723: + goto st_case_723 + case 724: + goto st_case_724 + case 725: + goto st_case_725 + case 245: + goto st_case_245 + case 246: + goto st_case_246 + case 726: + goto st_case_726 + case 247: + goto st_case_247 + case 248: + goto st_case_248 + case 727: + goto st_case_727 + case 728: + goto st_case_728 + case 729: + goto st_case_729 + case 730: + goto st_case_730 + case 731: + goto st_case_731 + case 732: + goto st_case_732 + case 733: + goto st_case_733 + case 734: + goto st_case_734 + case 249: + goto st_case_249 + case 250: + goto st_case_250 case 251: goto st_case_251 + case 735: + goto st_case_735 case 252: goto st_case_252 case 253: goto st_case_253 case 254: goto st_case_254 - case 719: - goto st_case_719 + case 736: + goto st_case_736 case 255: goto st_case_255 - case 720: - goto st_case_720 case 256: goto st_case_256 + case 737: + goto st_case_737 + case 738: + goto st_case_738 case 257: goto st_case_257 case 258: goto st_case_258 - case 715: - goto st_case_715 - case 716: - goto st_case_716 - case 248: - goto st_case_248 - case 249: - goto st_case_249 + case 739: + goto st_case_739 + case 261: + goto st_case_261 + case 741: + goto st_case_741 + case 742: + goto st_case_742 + case 262: + goto st_case_262 + case 263: + goto st_case_263 + case 264: + goto st_case_264 + case 265: + goto st_case_265 + case 743: + goto st_case_743 + case 266: + goto st_case_266 + case 744: + goto st_case_744 + case 267: + goto st_case_267 + case 268: + goto st_case_268 + case 269: + goto st_case_269 + case 740: + goto st_case_740 + case 259: + goto st_case_259 + case 260: + goto st_case_260 } goto st_out - st259: + st270: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof259 + goto _test_eof270 } - st_case_259: + st_case_270: switch ( m.data)[( m.p)] { case 10: goto tr35 case 11: - goto tr440 + goto tr459 case 13: goto tr35 case 32: - goto tr439 + goto tr458 case 35: goto tr35 case 44: goto tr35 case 92: - goto tr441 + goto tr460 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr439 + goto tr458 } - goto tr438 + goto tr457 tr33: -//line plugins/parsers/influx/machine.go.rl:19 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p goto st1 -tr438: -//line plugins/parsers/influx/machine.go.rl:73 +tr457: +//line plugins/parsers/influx/machine.go.rl:74 - foundMetric = true + m.beginMetric = true -//line plugins/parsers/influx/machine.go.rl:19 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p @@ -3092,7 +3200,7 @@ tr438: goto _test_eof1 } st_case_1: -//line plugins/parsers/influx/machine.go:3096 +//line plugins/parsers/influx/machine.go:3204 switch ( m.data)[( m.p)] { case 10: goto tr2 @@ -3105,7 +3213,7 @@ tr438: case 44: goto tr4 case 92: - goto st96 + goto st95 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { goto tr1 @@ -3113,26 +3221,26 @@ tr438: goto st1 tr1: ( m.cs) = 2 -//line plugins/parsers/influx/machine.go.rl:77 +//line plugins/parsers/influx/machine.go.rl:78 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again tr60: ( m.cs) = 2 -//line plugins/parsers/influx/machine.go.rl:90 +//line plugins/parsers/influx/machine.go.rl:91 - err = m.handler.AddTag(key, m.text()) + err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } @@ -3142,7 +3250,7 @@ tr60: goto _test_eof2 } st_case_2: -//line plugins/parsers/influx/machine.go:3146 +//line plugins/parsers/influx/machine.go:3254 switch ( m.data)[( m.p)] { case 10: goto tr8 @@ -3164,7 +3272,7 @@ tr60: } goto tr6 tr6: -//line plugins/parsers/influx/machine.go.rl:19 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p @@ -3174,7 +3282,7 @@ tr6: goto _test_eof3 } st_case_3: -//line plugins/parsers/influx/machine.go:3178 +//line plugins/parsers/influx/machine.go:3286 switch ( m.data)[( m.p)] { case 32: goto tr8 @@ -3183,7 +3291,7 @@ tr6: case 61: goto tr12 case 92: - goto st36 + goto st35 } switch { case ( m.data)[( m.p)] > 10: @@ -3196,214 +3304,214 @@ tr6: goto st3 tr2: ( m.cs) = 0 -//line plugins/parsers/influx/machine.go.rl:37 +//line plugins/parsers/influx/machine.go.rl:38 err = ErrTagParse ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } goto _again tr8: ( m.cs) = 0 -//line plugins/parsers/influx/machine.go.rl:30 +//line plugins/parsers/influx/machine.go.rl:31 err = ErrFieldParse ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } goto _again tr35: ( m.cs) = 0 -//line plugins/parsers/influx/machine.go.rl:23 +//line plugins/parsers/influx/machine.go.rl:24 err = ErrNameParse ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } goto _again tr39: ( m.cs) = 0 -//line plugins/parsers/influx/machine.go.rl:23 +//line plugins/parsers/influx/machine.go.rl:24 err = ErrNameParse ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } -//line plugins/parsers/influx/machine.go.rl:37 +//line plugins/parsers/influx/machine.go.rl:38 err = ErrTagParse ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } goto _again tr43: ( m.cs) = 0 -//line plugins/parsers/influx/machine.go.rl:23 +//line plugins/parsers/influx/machine.go.rl:24 err = ErrNameParse ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } -//line plugins/parsers/influx/machine.go.rl:30 +//line plugins/parsers/influx/machine.go.rl:31 err = ErrFieldParse ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } goto _again tr47: ( m.cs) = 0 -//line plugins/parsers/influx/machine.go.rl:37 +//line plugins/parsers/influx/machine.go.rl:38 err = ErrTagParse ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } -//line plugins/parsers/influx/machine.go.rl:30 +//line plugins/parsers/influx/machine.go.rl:31 err = ErrFieldParse ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } goto _again tr105: ( m.cs) = 0 -//line plugins/parsers/influx/machine.go.rl:30 +//line plugins/parsers/influx/machine.go.rl:31 err = ErrFieldParse ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } -//line plugins/parsers/influx/machine.go.rl:44 +//line plugins/parsers/influx/machine.go.rl:45 err = ErrTimestampParse ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } goto _again tr132: ( m.cs) = 0 -//line plugins/parsers/influx/machine.go.rl:37 +//line plugins/parsers/influx/machine.go.rl:38 err = ErrTagParse ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } -//line plugins/parsers/influx/machine.go.rl:30 +//line plugins/parsers/influx/machine.go.rl:31 err = ErrFieldParse ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } -//line plugins/parsers/influx/machine.go.rl:44 +//line plugins/parsers/influx/machine.go.rl:45 err = ErrTimestampParse ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } goto _again tr198: ( m.cs) = 0 -//line plugins/parsers/influx/machine.go.rl:37 +//line plugins/parsers/influx/machine.go.rl:38 err = ErrTagParse ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } -//line plugins/parsers/influx/machine.go.rl:44 +//line plugins/parsers/influx/machine.go.rl:45 err = ErrTimestampParse ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } goto _again -tr404: +tr423: ( m.cs) = 0 -//line plugins/parsers/influx/machine.go.rl:23 +//line plugins/parsers/influx/machine.go.rl:24 err = ErrNameParse ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } -//line plugins/parsers/influx/machine.go.rl:37 +//line plugins/parsers/influx/machine.go.rl:38 err = ErrTagParse ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } -//line plugins/parsers/influx/machine.go.rl:30 +//line plugins/parsers/influx/machine.go.rl:31 err = ErrFieldParse ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } goto _again -tr407: +tr426: ( m.cs) = 0 -//line plugins/parsers/influx/machine.go.rl:44 +//line plugins/parsers/influx/machine.go.rl:45 err = ErrTimestampParse ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } goto _again -tr1023: -//line plugins/parsers/influx/machine.go.rl:64 +tr1055: +//line plugins/parsers/influx/machine.go.rl:65 ( m.p)-- - {goto st259 } + {goto st270 } goto st0 -//line plugins/parsers/influx/machine.go:3399 +//line plugins/parsers/influx/machine.go:3507 st_case_0: st0: ( m.cs) = 0 goto _out tr12: -//line plugins/parsers/influx/machine.go.rl:99 +//line plugins/parsers/influx/machine.go.rl:100 - key = m.text() + m.key = m.text() goto st4 st4: @@ -3411,7 +3519,7 @@ tr12: goto _test_eof4 } st_case_4: -//line plugins/parsers/influx/machine.go:3415 +//line plugins/parsers/influx/machine.go:3523 switch ( m.data)[( m.p)] { case 34: goto st5 @@ -3453,65 +3561,332 @@ tr12: } goto tr23 tr23: -//line plugins/parsers/influx/machine.go.rl:19 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p + goto st6 +tr24: +//line plugins/parsers/influx/machine.go.rl:20 + + m.pb = m.p + +//line plugins/parsers/influx/machine.go.rl:158 + + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line + + goto st6 +tr29: +//line plugins/parsers/influx/machine.go.rl:158 + + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line + goto st6 st6: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof6 } st_case_6: -//line plugins/parsers/influx/machine.go:3467 +//line plugins/parsers/influx/machine.go:3595 switch ( m.data)[( m.p)] { case 10: - goto st7 + goto tr29 case 12: goto tr8 case 13: - goto st8 + goto st7 case 34: goto tr31 case 92: - goto st76 + goto st75 } goto st6 -tr24: -//line plugins/parsers/influx/machine.go.rl:19 +tr25: +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p goto st7 st7: -//line plugins/parsers/influx/machine.go.rl:157 + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof7 + } + st_case_7: +//line plugins/parsers/influx/machine.go:3620 + if ( m.data)[( m.p)] == 10 { + goto tr29 + } + goto tr8 +tr26: + ( m.cs) = 271 +//line plugins/parsers/influx/machine.go.rl:20 + + m.pb = m.p + +//line plugins/parsers/influx/machine.go.rl:140 + + err = m.handler.AddString(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + + goto _again +tr31: + ( m.cs) = 271 +//line plugins/parsers/influx/machine.go.rl:140 + + err = m.handler.AddString(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + + goto _again + st271: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof271 + } + st_case_271: +//line plugins/parsers/influx/machine.go:3660 + switch ( m.data)[( m.p)] { + case 10: + goto tr103 + case 13: + goto st33 + case 32: + goto st272 + case 44: + goto st36 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto st272 + } + goto tr105 +tr535: + ( m.cs) = 272 +//line plugins/parsers/influx/machine.go.rl:122 + + err = m.handler.AddFloat(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + + goto _again +tr932: + ( m.cs) = 272 +//line plugins/parsers/influx/machine.go.rl:104 + + err = m.handler.AddInt(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + + goto _again +tr935: + ( m.cs) = 272 +//line plugins/parsers/influx/machine.go.rl:113 + + err = m.handler.AddUint(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + + goto _again +tr939: + ( m.cs) = 272 +//line plugins/parsers/influx/machine.go.rl:131 + + err = m.handler.AddBool(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + + goto _again + st272: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof272 + } + st_case_272: +//line plugins/parsers/influx/machine.go:3732 + switch ( m.data)[( m.p)] { + case 10: + goto tr103 + case 13: + goto st33 + case 32: + goto st272 + case 45: + goto tr464 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto tr465 + } + case ( m.data)[( m.p)] >= 9: + goto st272 + } + goto tr426 +tr103: +//line plugins/parsers/influx/machine.go.rl:158 m.lineno++ m.sol = m.p m.sol++ // next char will be the first column in the line + goto st273 +tr470: + ( m.cs) = 273 +//line plugins/parsers/influx/machine.go.rl:149 + + err = m.handler.SetTimestamp(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:158 + + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line + + goto _again +tr734: + ( m.cs) = 273 +//line plugins/parsers/influx/machine.go.rl:122 + + err = m.handler.AddFloat(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:158 + + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line + + goto _again +tr952: + ( m.cs) = 273 +//line plugins/parsers/influx/machine.go.rl:104 + + err = m.handler.AddInt(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:158 + + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line + + goto _again +tr957: + ( m.cs) = 273 +//line plugins/parsers/influx/machine.go.rl:113 + + err = m.handler.AddUint(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:158 + + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line + + goto _again +tr962: + ( m.cs) = 273 +//line plugins/parsers/influx/machine.go.rl:131 + + err = m.handler.AddBool(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:158 + + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line + + goto _again + st273: +//line plugins/parsers/influx/machine.go.rl:164 + + m.finishMetric = true + ( m.cs) = 740; + {( m.p)++; goto _out } + if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof7 + goto _test_eof273 } - st_case_7: -//line plugins/parsers/influx/machine.go:3498 + st_case_273: +//line plugins/parsers/influx/machine.go:3866 switch ( m.data)[( m.p)] { case 10: - goto st7 - case 12: - goto tr8 + goto tr35 + case 11: + goto tr36 case 13: + goto tr35 + case 32: goto st8 - case 34: - goto tr31 + case 35: + goto tr35 + case 44: + goto tr35 case 92: - goto st76 + goto tr37 } - goto st6 -tr25: -//line plugins/parsers/influx/machine.go.rl:19 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto st8 + } + goto tr33 +tr458: +//line plugins/parsers/influx/machine.go.rl:74 - m.pb = m.p + m.beginMetric = true goto st8 st8: @@ -3519,220 +3894,7 @@ tr25: goto _test_eof8 } st_case_8: -//line plugins/parsers/influx/machine.go:3523 - if ( m.data)[( m.p)] == 10 { - goto st7 - } - goto tr8 -tr26: - ( m.cs) = 260 -//line plugins/parsers/influx/machine.go.rl:19 - - m.pb = m.p - -//line plugins/parsers/influx/machine.go.rl:139 - - err = m.handler.AddString(key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - - goto _again -tr31: - ( m.cs) = 260 -//line plugins/parsers/influx/machine.go.rl:139 - - err = m.handler.AddString(key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - - goto _again - st260: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof260 - } - st_case_260: -//line plugins/parsers/influx/machine.go:3563 - switch ( m.data)[( m.p)] { - case 10: - goto st262 - case 13: - goto st34 - case 32: - goto st261 - case 44: - goto st37 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st261 - } - goto tr105 -tr516: - ( m.cs) = 261 -//line plugins/parsers/influx/machine.go.rl:121 - - err = m.handler.AddFloat(key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - - goto _again -tr909: - ( m.cs) = 261 -//line plugins/parsers/influx/machine.go.rl:103 - - err = m.handler.AddInt(key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - - goto _again -tr912: - ( m.cs) = 261 -//line plugins/parsers/influx/machine.go.rl:112 - - err = m.handler.AddUint(key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - - goto _again -tr916: - ( m.cs) = 261 -//line plugins/parsers/influx/machine.go.rl:130 - - err = m.handler.AddBool(key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - - goto _again - st261: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof261 - } - st_case_261: -//line plugins/parsers/influx/machine.go:3635 - switch ( m.data)[( m.p)] { - case 10: - goto st262 - case 13: - goto st34 - case 32: - goto st261 - case 45: - goto tr445 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr446 - } - case ( m.data)[( m.p)] >= 9: - goto st261 - } - goto tr407 -tr451: - ( m.cs) = 262 -//line plugins/parsers/influx/machine.go.rl:148 - - err = m.handler.SetTimestamp(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - - goto _again -tr715: - ( m.cs) = 262 -//line plugins/parsers/influx/machine.go.rl:121 - - err = m.handler.AddFloat(key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - - goto _again -tr925: - ( m.cs) = 262 -//line plugins/parsers/influx/machine.go.rl:103 - - err = m.handler.AddInt(key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - - goto _again -tr930: - ( m.cs) = 262 -//line plugins/parsers/influx/machine.go.rl:112 - - err = m.handler.AddUint(key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - - goto _again -tr935: - ( m.cs) = 262 -//line plugins/parsers/influx/machine.go.rl:130 - - err = m.handler.AddBool(key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - - goto _again - st262: -//line plugins/parsers/influx/machine.go.rl:157 - - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line - -//line plugins/parsers/influx/machine.go.rl:163 - - ( m.cs) = 715; - {( m.p)++; goto _out } - - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof262 - } - st_case_262: -//line plugins/parsers/influx/machine.go:3736 +//line plugins/parsers/influx/machine.go:3898 switch ( m.data)[( m.p)] { case 10: goto tr35 @@ -3741,7 +3903,7 @@ tr935: case 13: goto tr35 case 32: - goto st9 + goto st8 case 35: goto tr35 case 44: @@ -3750,13 +3912,23 @@ tr935: goto tr37 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st9 + goto st8 } goto tr33 -tr439: -//line plugins/parsers/influx/machine.go.rl:73 +tr36: +//line plugins/parsers/influx/machine.go.rl:20 - foundMetric = true + m.pb = m.p + + goto st9 +tr459: +//line plugins/parsers/influx/machine.go.rl:74 + + m.beginMetric = true + +//line plugins/parsers/influx/machine.go.rl:20 + + m.pb = m.p goto st9 st9: @@ -3764,49 +3936,7 @@ tr439: goto _test_eof9 } st_case_9: -//line plugins/parsers/influx/machine.go:3768 - switch ( m.data)[( m.p)] { - case 10: - goto tr35 - case 11: - goto tr36 - case 13: - goto tr35 - case 32: - goto st9 - case 35: - goto tr35 - case 44: - goto tr35 - case 92: - goto tr37 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st9 - } - goto tr33 -tr36: -//line plugins/parsers/influx/machine.go.rl:19 - - m.pb = m.p - - goto st10 -tr440: -//line plugins/parsers/influx/machine.go.rl:73 - - foundMetric = true - -//line plugins/parsers/influx/machine.go.rl:19 - - m.pb = m.p - - goto st10 - st10: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof10 - } - st_case_10: -//line plugins/parsers/influx/machine.go:3810 +//line plugins/parsers/influx/machine.go:3940 switch ( m.data)[( m.p)] { case 10: goto tr39 @@ -3828,24 +3958,24 @@ tr440: } goto tr33 tr38: - ( m.cs) = 11 -//line plugins/parsers/influx/machine.go.rl:77 + ( m.cs) = 10 +//line plugins/parsers/influx/machine.go.rl:78 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again - st11: + st10: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof11 + goto _test_eof10 } - st_case_11: -//line plugins/parsers/influx/machine.go:3849 + st_case_10: +//line plugins/parsers/influx/machine.go:3979 switch ( m.data)[( m.p)] { case 10: goto tr43 @@ -3854,7 +3984,7 @@ tr38: case 13: goto tr43 case 32: - goto st11 + goto st10 case 35: goto tr6 case 44: @@ -3865,21 +3995,21 @@ tr38: goto tr45 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st11 + goto st10 } goto tr41 tr41: -//line plugins/parsers/influx/machine.go.rl:19 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p - goto st12 - st12: + goto st11 + st11: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof12 + goto _test_eof11 } - st_case_12: -//line plugins/parsers/influx/machine.go:3883 + st_case_11: +//line plugins/parsers/influx/machine.go:4013 switch ( m.data)[( m.p)] { case 10: goto tr47 @@ -3894,48 +4024,48 @@ tr41: case 61: goto tr49 case 92: - goto st29 + goto st28 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { goto tr1 } - goto st12 + goto st11 tr48: - ( m.cs) = 13 -//line plugins/parsers/influx/machine.go.rl:77 + ( m.cs) = 12 +//line plugins/parsers/influx/machine.go.rl:78 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again tr51: - ( m.cs) = 13 -//line plugins/parsers/influx/machine.go.rl:77 + ( m.cs) = 12 +//line plugins/parsers/influx/machine.go.rl:78 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:19 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p goto _again - st13: + st12: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof13 + goto _test_eof12 } - st_case_13: -//line plugins/parsers/influx/machine.go:3939 + st_case_12: +//line plugins/parsers/influx/machine.go:4069 switch ( m.data)[( m.p)] { case 10: goto tr47 @@ -3957,37 +4087,37 @@ tr51: } goto tr41 tr4: - ( m.cs) = 14 -//line plugins/parsers/influx/machine.go.rl:77 + ( m.cs) = 13 +//line plugins/parsers/influx/machine.go.rl:78 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again tr62: - ( m.cs) = 14 -//line plugins/parsers/influx/machine.go.rl:90 + ( m.cs) = 13 +//line plugins/parsers/influx/machine.go.rl:91 - err = m.handler.AddTag(key, m.text()) + err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again - st14: + st13: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof14 + goto _test_eof13 } - st_case_14: -//line plugins/parsers/influx/machine.go:3991 + st_case_13: +//line plugins/parsers/influx/machine.go:4121 switch ( m.data)[( m.p)] { case 32: goto tr2 @@ -4008,17 +4138,17 @@ tr62: } goto tr52 tr52: -//line plugins/parsers/influx/machine.go.rl:19 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p - goto st15 - st15: + goto st14 + st14: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof15 + goto _test_eof14 } - st_case_15: -//line plugins/parsers/influx/machine.go:4022 + st_case_14: +//line plugins/parsers/influx/machine.go:4152 switch ( m.data)[( m.p)] { case 32: goto tr2 @@ -4027,7 +4157,7 @@ tr52: case 61: goto tr55 case 92: - goto st25 + goto st24 } switch { case ( m.data)[( m.p)] > 10: @@ -4037,19 +4167,19 @@ tr52: case ( m.data)[( m.p)] >= 9: goto tr2 } - goto st15 + goto st14 tr55: -//line plugins/parsers/influx/machine.go.rl:86 +//line plugins/parsers/influx/machine.go.rl:87 - key = m.text() + m.key = m.text() - goto st16 - st16: + goto st15 + st15: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof16 + goto _test_eof15 } - st_case_16: -//line plugins/parsers/influx/machine.go:4053 + st_case_15: +//line plugins/parsers/influx/machine.go:4183 switch ( m.data)[( m.p)] { case 32: goto tr2 @@ -4070,17 +4200,17 @@ tr55: } goto tr57 tr57: -//line plugins/parsers/influx/machine.go.rl:19 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p - goto st17 - st17: + goto st16 + st16: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof17 + goto _test_eof16 } - st_case_17: -//line plugins/parsers/influx/machine.go:4084 + st_case_16: +//line plugins/parsers/influx/machine.go:4214 switch ( m.data)[( m.p)] { case 10: goto tr2 @@ -4095,31 +4225,31 @@ tr57: case 61: goto tr2 case 92: - goto st23 + goto st22 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { goto tr60 } - goto st17 + goto st16 tr61: - ( m.cs) = 18 -//line plugins/parsers/influx/machine.go.rl:90 + ( m.cs) = 17 +//line plugins/parsers/influx/machine.go.rl:91 - err = m.handler.AddTag(key, m.text()) + err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again - st18: + st17: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof18 + goto _test_eof17 } - st_case_18: -//line plugins/parsers/influx/machine.go:4123 + st_case_17: +//line plugins/parsers/influx/machine.go:4253 switch ( m.data)[( m.p)] { case 10: goto tr47 @@ -4141,17 +4271,17 @@ tr61: } goto tr64 tr64: -//line plugins/parsers/influx/machine.go.rl:19 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p - goto st19 - st19: + goto st18 + st18: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof19 + goto _test_eof18 } - st_case_19: -//line plugins/parsers/influx/machine.go:4155 + st_case_18: +//line plugins/parsers/influx/machine.go:4285 switch ( m.data)[( m.p)] { case 10: goto tr47 @@ -4166,48 +4296,48 @@ tr64: case 61: goto tr12 case 92: - goto st21 + goto st20 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { goto tr60 } - goto st19 + goto st18 tr68: - ( m.cs) = 20 -//line plugins/parsers/influx/machine.go.rl:90 + ( m.cs) = 19 +//line plugins/parsers/influx/machine.go.rl:91 - err = m.handler.AddTag(key, m.text()) + err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again tr65: - ( m.cs) = 20 -//line plugins/parsers/influx/machine.go.rl:90 + ( m.cs) = 19 +//line plugins/parsers/influx/machine.go.rl:91 - err = m.handler.AddTag(key, m.text()) + err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:19 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p goto _again - st20: + st19: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof20 + goto _test_eof19 } - st_case_20: -//line plugins/parsers/influx/machine.go:4211 + st_case_19: +//line plugins/parsers/influx/machine.go:4341 switch ( m.data)[( m.p)] { case 10: goto tr47 @@ -4229,19 +4359,19 @@ tr65: } goto tr64 tr66: -//line plugins/parsers/influx/machine.go.rl:19 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p - goto st21 - st21: + goto st20 + st20: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof21 + goto _test_eof20 } - st_case_21: -//line plugins/parsers/influx/machine.go:4243 + st_case_20: +//line plugins/parsers/influx/machine.go:4373 if ( m.data)[( m.p)] == 92 { - goto st22 + goto st21 } switch { case ( m.data)[( m.p)] > 10: @@ -4251,16 +4381,16 @@ tr66: case ( m.data)[( m.p)] >= 9: goto tr47 } - goto st19 - st22: -//line plugins/parsers/influx/machine.go.rl:234 + goto st18 + st21: +//line plugins/parsers/influx/machine.go.rl:240 ( m.p)-- if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof22 + goto _test_eof21 } - st_case_22: -//line plugins/parsers/influx/machine.go:4264 + st_case_21: +//line plugins/parsers/influx/machine.go:4394 switch ( m.data)[( m.p)] { case 10: goto tr47 @@ -4275,26 +4405,26 @@ tr66: case 61: goto tr12 case 92: - goto st21 + goto st20 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { goto tr60 } - goto st19 + goto st18 tr58: -//line plugins/parsers/influx/machine.go.rl:19 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p - goto st23 - st23: + goto st22 + st22: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof23 + goto _test_eof22 } - st_case_23: -//line plugins/parsers/influx/machine.go:4296 + st_case_22: +//line plugins/parsers/influx/machine.go:4426 if ( m.data)[( m.p)] == 92 { - goto st24 + goto st23 } switch { case ( m.data)[( m.p)] > 10: @@ -4304,16 +4434,16 @@ tr58: case ( m.data)[( m.p)] >= 9: goto tr2 } - goto st17 - st24: -//line plugins/parsers/influx/machine.go.rl:234 + goto st16 + st23: +//line plugins/parsers/influx/machine.go.rl:240 ( m.p)-- if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof24 + goto _test_eof23 } - st_case_24: -//line plugins/parsers/influx/machine.go:4317 + st_case_23: +//line plugins/parsers/influx/machine.go:4447 switch ( m.data)[( m.p)] { case 10: goto tr2 @@ -4328,53 +4458,25 @@ tr58: case 61: goto tr2 case 92: - goto st23 + goto st22 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { goto tr60 } - goto st17 + goto st16 tr53: -//line plugins/parsers/influx/machine.go.rl:19 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p - goto st25 - st25: + goto st24 + st24: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof25 + goto _test_eof24 } - st_case_25: -//line plugins/parsers/influx/machine.go:4349 + st_case_24: +//line plugins/parsers/influx/machine.go:4479 if ( m.data)[( m.p)] == 92 { - goto st26 - } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr2 - } - case ( m.data)[( m.p)] >= 9: - goto tr2 - } - goto st15 - st26: -//line plugins/parsers/influx/machine.go.rl:234 - ( m.p)-- - - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof26 - } - st_case_26: -//line plugins/parsers/influx/machine.go:4370 - switch ( m.data)[( m.p)] { - case 32: - goto tr2 - case 44: - goto tr2 - case 61: - goto tr55 - case 92: goto st25 } switch { @@ -4385,29 +4487,57 @@ tr53: case ( m.data)[( m.p)] >= 9: goto tr2 } - goto st15 + goto st14 + st25: +//line plugins/parsers/influx/machine.go.rl:240 + ( m.p)-- + + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof25 + } + st_case_25: +//line plugins/parsers/influx/machine.go:4500 + switch ( m.data)[( m.p)] { + case 32: + goto tr2 + case 44: + goto tr2 + case 61: + goto tr55 + case 92: + goto st24 + } + switch { + case ( m.data)[( m.p)] > 10: + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr2 + } + case ( m.data)[( m.p)] >= 9: + goto tr2 + } + goto st14 tr49: -//line plugins/parsers/influx/machine.go.rl:99 +//line plugins/parsers/influx/machine.go.rl:100 - key = m.text() + m.key = m.text() - goto st27 -tr406: -//line plugins/parsers/influx/machine.go.rl:19 + goto st26 +tr425: +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p -//line plugins/parsers/influx/machine.go.rl:99 +//line plugins/parsers/influx/machine.go.rl:100 - key = m.text() + m.key = m.text() - goto st27 - st27: + goto st26 + st26: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof27 + goto _test_eof26 } - st_case_27: -//line plugins/parsers/influx/machine.go:4411 + st_case_26: +//line plugins/parsers/influx/machine.go:4541 switch ( m.data)[( m.p)] { case 10: goto tr47 @@ -4418,7 +4548,7 @@ tr406: case 32: goto tr1 case 34: - goto st30 + goto st29 case 44: goto tr4 case 45: @@ -4432,7 +4562,7 @@ tr406: case 84: goto tr79 case 92: - goto st96 + goto st95 case 102: goto tr80 case 116: @@ -4448,24 +4578,24 @@ tr406: } goto st1 tr3: - ( m.cs) = 28 -//line plugins/parsers/influx/machine.go.rl:77 + ( m.cs) = 27 +//line plugins/parsers/influx/machine.go.rl:78 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again - st28: + st27: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof28 + goto _test_eof27 } - st_case_28: -//line plugins/parsers/influx/machine.go:4469 + st_case_27: +//line plugins/parsers/influx/machine.go:4599 switch ( m.data)[( m.p)] { case 10: goto tr47 @@ -4487,17 +4617,17 @@ tr3: } goto tr41 tr45: -//line plugins/parsers/influx/machine.go.rl:19 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p - goto st29 - st29: + goto st28 + st28: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof29 + goto _test_eof28 } - st_case_29: -//line plugins/parsers/influx/machine.go:4501 + st_case_28: +//line plugins/parsers/influx/machine.go:4631 switch { case ( m.data)[( m.p)] > 10: if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { @@ -4506,12 +4636,12 @@ tr45: case ( m.data)[( m.p)] >= 9: goto tr8 } - goto st12 - st30: + goto st11 + st29: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof30 + goto _test_eof29 } - st_case_30: + st_case_29: switch ( m.data)[( m.p)] { case 9: goto tr83 @@ -4534,28 +4664,28 @@ tr45: } goto tr82 tr82: -//line plugins/parsers/influx/machine.go.rl:19 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p - goto st31 - st31: + goto st30 + st30: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof31 + goto _test_eof30 } - st_case_31: -//line plugins/parsers/influx/machine.go:4548 + st_case_30: +//line plugins/parsers/influx/machine.go:4678 switch ( m.data)[( m.p)] { case 9: goto tr89 case 10: - goto st7 + goto tr29 case 11: goto tr90 case 12: goto tr1 case 13: - goto st8 + goto st7 case 32: goto tr89 case 34: @@ -4563,71 +4693,71 @@ tr82: case 44: goto tr92 case 92: - goto st142 + goto st141 } - goto st31 + goto st30 tr89: - ( m.cs) = 32 -//line plugins/parsers/influx/machine.go.rl:77 + ( m.cs) = 31 +//line plugins/parsers/influx/machine.go.rl:78 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again tr83: - ( m.cs) = 32 -//line plugins/parsers/influx/machine.go.rl:77 + ( m.cs) = 31 +//line plugins/parsers/influx/machine.go.rl:78 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:19 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p goto _again tr231: - ( m.cs) = 32 -//line plugins/parsers/influx/machine.go.rl:90 + ( m.cs) = 31 +//line plugins/parsers/influx/machine.go.rl:91 - err = m.handler.AddTag(key, m.text()) + err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again - st32: + st31: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof32 + goto _test_eof31 } - st_case_32: -//line plugins/parsers/influx/machine.go:4618 + st_case_31: +//line plugins/parsers/influx/machine.go:4748 switch ( m.data)[( m.p)] { case 9: - goto st32 + goto st31 case 10: - goto st7 + goto tr29 case 11: goto tr96 case 12: goto st2 case 13: - goto st8 + goto st7 case 32: - goto st32 + goto st31 case 34: goto tr97 case 44: @@ -4639,26 +4769,26 @@ tr231: } goto tr94 tr94: -//line plugins/parsers/influx/machine.go.rl:19 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p - goto st33 - st33: + goto st32 + st32: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof33 + goto _test_eof32 } - st_case_33: -//line plugins/parsers/influx/machine.go:4653 + st_case_32: +//line plugins/parsers/influx/machine.go:4783 switch ( m.data)[( m.p)] { case 9: goto st6 case 10: - goto st7 + goto tr29 case 12: goto tr8 case 13: - goto st8 + goto st7 case 32: goto st6 case 34: @@ -4668,560 +4798,80 @@ tr94: case 61: goto tr101 case 92: - goto st77 + goto st76 } - goto st33 + goto st32 tr97: - ( m.cs) = 263 -//line plugins/parsers/influx/machine.go.rl:19 + ( m.cs) = 274 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p -//line plugins/parsers/influx/machine.go.rl:139 +//line plugins/parsers/influx/machine.go.rl:140 - err = m.handler.AddString(key, m.text()) + err = m.handler.AddString(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again tr100: - ( m.cs) = 263 -//line plugins/parsers/influx/machine.go.rl:139 + ( m.cs) = 274 +//line plugins/parsers/influx/machine.go.rl:140 - err = m.handler.AddString(key, m.text()) + err = m.handler.AddString(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again -tr377: - ( m.cs) = 263 -//line plugins/parsers/influx/machine.go.rl:139 +tr386: + ( m.cs) = 274 +//line plugins/parsers/influx/machine.go.rl:140 - err = m.handler.AddString(key, m.text()) + err = m.handler.AddString(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:19 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p goto _again - st263: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof263 - } - st_case_263: -//line plugins/parsers/influx/machine.go:4727 - switch ( m.data)[( m.p)] { - case 10: - goto st262 - case 11: - goto st264 - case 13: - goto st34 - case 32: - goto st261 - case 44: - goto st37 - case 61: - goto tr12 - case 92: - goto st36 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st261 - } - goto st3 - st264: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof264 - } - st_case_264: - switch ( m.data)[( m.p)] { - case 10: - goto st262 - case 11: - goto st264 - case 13: - goto st34 - case 32: - goto st261 - case 44: - goto tr105 - case 45: - goto tr448 - case 61: - goto tr12 - case 92: - goto st36 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr449 - } - case ( m.data)[( m.p)] >= 9: - goto st261 - } - goto st3 -tr453: - ( m.cs) = 34 -//line plugins/parsers/influx/machine.go.rl:148 - - err = m.handler.SetTimestamp(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - - goto _again -tr717: - ( m.cs) = 34 -//line plugins/parsers/influx/machine.go.rl:121 - - err = m.handler.AddFloat(key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - - goto _again -tr927: - ( m.cs) = 34 -//line plugins/parsers/influx/machine.go.rl:103 - - err = m.handler.AddInt(key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - - goto _again -tr932: - ( m.cs) = 34 -//line plugins/parsers/influx/machine.go.rl:112 - - err = m.handler.AddUint(key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - - goto _again -tr937: - ( m.cs) = 34 -//line plugins/parsers/influx/machine.go.rl:130 - - err = m.handler.AddBool(key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - - goto _again - st34: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof34 - } - st_case_34: -//line plugins/parsers/influx/machine.go:4850 - if ( m.data)[( m.p)] == 10 { - goto st262 - } - goto st0 -tr448: -//line plugins/parsers/influx/machine.go.rl:19 - - m.pb = m.p - - goto st35 - st35: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof35 - } - st_case_35: -//line plugins/parsers/influx/machine.go:4866 - switch ( m.data)[( m.p)] { - case 32: - goto tr105 - case 44: - goto tr105 - case 61: - goto tr12 - case 92: - goto st36 - } - switch { - case ( m.data)[( m.p)] < 12: - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 10 { - goto tr105 - } - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st265 - } - default: - goto tr105 - } - goto st3 -tr449: -//line plugins/parsers/influx/machine.go.rl:19 - - m.pb = m.p - - goto st265 - st265: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof265 - } - st_case_265: -//line plugins/parsers/influx/machine.go:4901 - switch ( m.data)[( m.p)] { - case 10: - goto tr451 - case 11: - goto tr452 - case 13: - goto tr453 - case 32: - goto tr450 - case 44: - goto tr105 - case 61: - goto tr12 - case 92: - goto st36 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st268 - } - case ( m.data)[( m.p)] >= 9: - goto tr450 - } - goto st3 -tr450: - ( m.cs) = 266 -//line plugins/parsers/influx/machine.go.rl:148 - - err = m.handler.SetTimestamp(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - - goto _again - st266: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof266 - } - st_case_266: -//line plugins/parsers/influx/machine.go:4945 - switch ( m.data)[( m.p)] { - case 10: - goto st262 - case 13: - goto st34 - case 32: - goto st266 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st266 - } - goto st0 -tr452: - ( m.cs) = 267 -//line plugins/parsers/influx/machine.go.rl:148 - - err = m.handler.SetTimestamp(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - - goto _again - st267: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof267 - } - st_case_267: -//line plugins/parsers/influx/machine.go:4976 - switch ( m.data)[( m.p)] { - case 10: - goto st262 - case 11: - goto st267 - case 13: - goto st34 - case 32: - goto st266 - case 44: - goto tr8 - case 61: - goto tr12 - case 92: - goto st36 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st266 - } - goto st3 -tr10: -//line plugins/parsers/influx/machine.go.rl:19 - - m.pb = m.p - - goto st36 - st36: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof36 - } - st_case_36: -//line plugins/parsers/influx/machine.go:5008 - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr8 - } - case ( m.data)[( m.p)] >= 9: - goto tr8 - } - goto st3 - st268: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof268 - } - st_case_268: - switch ( m.data)[( m.p)] { - case 10: - goto tr451 - case 11: - goto tr452 - case 13: - goto tr453 - case 32: - goto tr450 - case 44: - goto tr105 - case 61: - goto tr12 - case 92: - goto st36 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st269 - } - case ( m.data)[( m.p)] >= 9: - goto tr450 - } - goto st3 - st269: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof269 - } - st_case_269: - switch ( m.data)[( m.p)] { - case 10: - goto tr451 - case 11: - goto tr452 - case 13: - goto tr453 - case 32: - goto tr450 - case 44: - goto tr105 - case 61: - goto tr12 - case 92: - goto st36 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st270 - } - case ( m.data)[( m.p)] >= 9: - goto tr450 - } - goto st3 - st270: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof270 - } - st_case_270: - switch ( m.data)[( m.p)] { - case 10: - goto tr451 - case 11: - goto tr452 - case 13: - goto tr453 - case 32: - goto tr450 - case 44: - goto tr105 - case 61: - goto tr12 - case 92: - goto st36 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st271 - } - case ( m.data)[( m.p)] >= 9: - goto tr450 - } - goto st3 - st271: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof271 - } - st_case_271: - switch ( m.data)[( m.p)] { - case 10: - goto tr451 - case 11: - goto tr452 - case 13: - goto tr453 - case 32: - goto tr450 - case 44: - goto tr105 - case 61: - goto tr12 - case 92: - goto st36 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st272 - } - case ( m.data)[( m.p)] >= 9: - goto tr450 - } - goto st3 - st272: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof272 - } - st_case_272: - switch ( m.data)[( m.p)] { - case 10: - goto tr451 - case 11: - goto tr452 - case 13: - goto tr453 - case 32: - goto tr450 - case 44: - goto tr105 - case 61: - goto tr12 - case 92: - goto st36 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st273 - } - case ( m.data)[( m.p)] >= 9: - goto tr450 - } - goto st3 - st273: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof273 - } - st_case_273: - switch ( m.data)[( m.p)] { - case 10: - goto tr451 - case 11: - goto tr452 - case 13: - goto tr453 - case 32: - goto tr450 - case 44: - goto tr105 - case 61: - goto tr12 - case 92: - goto st36 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st274 - } - case ( m.data)[( m.p)] >= 9: - goto tr450 - } - goto st3 st274: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof274 } st_case_274: +//line plugins/parsers/influx/machine.go:4857 switch ( m.data)[( m.p)] { case 10: - goto tr451 + goto tr103 case 11: - goto tr452 + goto st275 case 13: - goto tr453 + goto st33 case 32: - goto tr450 + goto st272 case 44: - goto tr105 + goto st36 case 61: goto tr12 case 92: - goto st36 + goto st35 } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st275 - } - case ( m.data)[( m.p)] >= 9: - goto tr450 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto st272 } goto st3 st275: @@ -5231,109 +4881,168 @@ tr10: st_case_275: switch ( m.data)[( m.p)] { case 10: - goto tr451 + goto tr103 case 11: - goto tr452 + goto st275 case 13: - goto tr453 + goto st33 case 32: - goto tr450 + goto st272 + case 44: + goto tr105 + case 45: + goto tr467 + case 61: + goto tr12 + case 92: + goto st35 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto tr468 + } + case ( m.data)[( m.p)] >= 9: + goto st272 + } + goto st3 +tr472: + ( m.cs) = 33 +//line plugins/parsers/influx/machine.go.rl:149 + + err = m.handler.SetTimestamp(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + + goto _again +tr736: + ( m.cs) = 33 +//line plugins/parsers/influx/machine.go.rl:122 + + err = m.handler.AddFloat(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + + goto _again +tr954: + ( m.cs) = 33 +//line plugins/parsers/influx/machine.go.rl:104 + + err = m.handler.AddInt(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + + goto _again +tr959: + ( m.cs) = 33 +//line plugins/parsers/influx/machine.go.rl:113 + + err = m.handler.AddUint(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + + goto _again +tr964: + ( m.cs) = 33 +//line plugins/parsers/influx/machine.go.rl:131 + + err = m.handler.AddBool(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + + goto _again + st33: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof33 + } + st_case_33: +//line plugins/parsers/influx/machine.go:4980 + if ( m.data)[( m.p)] == 10 { + goto tr103 + } + goto st0 +tr467: +//line plugins/parsers/influx/machine.go.rl:20 + + m.pb = m.p + + goto st34 + st34: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof34 + } + st_case_34: +//line plugins/parsers/influx/machine.go:4996 + switch ( m.data)[( m.p)] { + case 32: + goto tr105 case 44: goto tr105 case 61: goto tr12 case 92: - goto st36 + goto st35 } switch { - case ( m.data)[( m.p)] > 12: + case ( m.data)[( m.p)] < 12: + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 10 { + goto tr105 + } + case ( m.data)[( m.p)] > 13: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st276 } - case ( m.data)[( m.p)] >= 9: - goto tr450 + default: + goto tr105 } goto st3 +tr468: +//line plugins/parsers/influx/machine.go.rl:20 + + m.pb = m.p + + goto st276 st276: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof276 } st_case_276: +//line plugins/parsers/influx/machine.go:5031 switch ( m.data)[( m.p)] { case 10: - goto tr451 + goto tr470 case 11: - goto tr452 + goto tr471 case 13: - goto tr453 + goto tr472 case 32: - goto tr450 + goto tr469 case 44: goto tr105 case 61: goto tr12 case 92: - goto st36 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st277 - } - case ( m.data)[( m.p)] >= 9: - goto tr450 - } - goto st3 - st277: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof277 - } - st_case_277: - switch ( m.data)[( m.p)] { - case 10: - goto tr451 - case 11: - goto tr452 - case 13: - goto tr453 - case 32: - goto tr450 - case 44: - goto tr105 - case 61: - goto tr12 - case 92: - goto st36 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st278 - } - case ( m.data)[( m.p)] >= 9: - goto tr450 - } - goto st3 - st278: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof278 - } - st_case_278: - switch ( m.data)[( m.p)] { - case 10: - goto tr451 - case 11: - goto tr452 - case 13: - goto tr453 - case 32: - goto tr450 - case 44: - goto tr105 - case 61: - goto tr12 - case 92: - goto st36 + goto st35 } switch { case ( m.data)[( m.p)] > 12: @@ -5341,7 +5050,98 @@ tr10: goto st279 } case ( m.data)[( m.p)] >= 9: - goto tr450 + goto tr469 + } + goto st3 +tr469: + ( m.cs) = 277 +//line plugins/parsers/influx/machine.go.rl:149 + + err = m.handler.SetTimestamp(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + + goto _again + st277: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof277 + } + st_case_277: +//line plugins/parsers/influx/machine.go:5075 + switch ( m.data)[( m.p)] { + case 10: + goto tr103 + case 13: + goto st33 + case 32: + goto st277 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto st277 + } + goto st0 +tr471: + ( m.cs) = 278 +//line plugins/parsers/influx/machine.go.rl:149 + + err = m.handler.SetTimestamp(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + + goto _again + st278: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof278 + } + st_case_278: +//line plugins/parsers/influx/machine.go:5106 + switch ( m.data)[( m.p)] { + case 10: + goto tr103 + case 11: + goto st278 + case 13: + goto st33 + case 32: + goto st277 + case 44: + goto tr8 + case 61: + goto tr12 + case 92: + goto st35 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto st277 + } + goto st3 +tr10: +//line plugins/parsers/influx/machine.go.rl:20 + + m.pb = m.p + + goto st35 + st35: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof35 + } + st_case_35: +//line plugins/parsers/influx/machine.go:5138 + switch { + case ( m.data)[( m.p)] > 10: + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr8 + } + case ( m.data)[( m.p)] >= 9: + goto tr8 } goto st3 st279: @@ -5351,19 +5151,19 @@ tr10: st_case_279: switch ( m.data)[( m.p)] { case 10: - goto tr451 + goto tr470 case 11: - goto tr452 + goto tr471 case 13: - goto tr453 + goto tr472 case 32: - goto tr450 + goto tr469 case 44: goto tr105 case 61: goto tr12 case 92: - goto st36 + goto st35 } switch { case ( m.data)[( m.p)] > 12: @@ -5371,7 +5171,7 @@ tr10: goto st280 } case ( m.data)[( m.p)] >= 9: - goto tr450 + goto tr469 } goto st3 st280: @@ -5381,19 +5181,19 @@ tr10: st_case_280: switch ( m.data)[( m.p)] { case 10: - goto tr451 + goto tr470 case 11: - goto tr452 + goto tr471 case 13: - goto tr453 + goto tr472 case 32: - goto tr450 + goto tr469 case 44: goto tr105 case 61: goto tr12 case 92: - goto st36 + goto st35 } switch { case ( m.data)[( m.p)] > 12: @@ -5401,7 +5201,7 @@ tr10: goto st281 } case ( m.data)[( m.p)] >= 9: - goto tr450 + goto tr469 } goto st3 st281: @@ -5411,19 +5211,19 @@ tr10: st_case_281: switch ( m.data)[( m.p)] { case 10: - goto tr451 + goto tr470 case 11: - goto tr452 + goto tr471 case 13: - goto tr453 + goto tr472 case 32: - goto tr450 + goto tr469 case 44: goto tr105 case 61: goto tr12 case 92: - goto st36 + goto st35 } switch { case ( m.data)[( m.p)] > 12: @@ -5431,7 +5231,7 @@ tr10: goto st282 } case ( m.data)[( m.p)] >= 9: - goto tr450 + goto tr469 } goto st3 st282: @@ -5441,19 +5241,19 @@ tr10: st_case_282: switch ( m.data)[( m.p)] { case 10: - goto tr451 + goto tr470 case 11: - goto tr452 + goto tr471 case 13: - goto tr453 + goto tr472 case 32: - goto tr450 + goto tr469 case 44: goto tr105 case 61: goto tr12 case 92: - goto st36 + goto st35 } switch { case ( m.data)[( m.p)] > 12: @@ -5461,7 +5261,7 @@ tr10: goto st283 } case ( m.data)[( m.p)] >= 9: - goto tr450 + goto tr469 } goto st3 st283: @@ -5471,19 +5271,19 @@ tr10: st_case_283: switch ( m.data)[( m.p)] { case 10: - goto tr451 + goto tr470 case 11: - goto tr452 + goto tr471 case 13: - goto tr453 + goto tr472 case 32: - goto tr450 + goto tr469 case 44: goto tr105 case 61: goto tr12 case 92: - goto st36 + goto st35 } switch { case ( m.data)[( m.p)] > 12: @@ -5491,7 +5291,7 @@ tr10: goto st284 } case ( m.data)[( m.p)] >= 9: - goto tr450 + goto tr469 } goto st3 st284: @@ -5501,19 +5301,19 @@ tr10: st_case_284: switch ( m.data)[( m.p)] { case 10: - goto tr451 + goto tr470 case 11: - goto tr452 + goto tr471 case 13: - goto tr453 + goto tr472 case 32: - goto tr450 + goto tr469 case 44: goto tr105 case 61: goto tr12 case 92: - goto st36 + goto st35 } switch { case ( m.data)[( m.p)] > 12: @@ -5521,7 +5321,7 @@ tr10: goto st285 } case ( m.data)[( m.p)] >= 9: - goto tr450 + goto tr469 } goto st3 st285: @@ -5531,82 +5331,412 @@ tr10: st_case_285: switch ( m.data)[( m.p)] { case 10: - goto tr451 + goto tr470 case 11: - goto tr452 + goto tr471 case 13: - goto tr453 + goto tr472 case 32: - goto tr450 + goto tr469 case 44: goto tr105 case 61: goto tr12 case 92: - goto st36 + goto st35 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr450 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st286 + } + case ( m.data)[( m.p)] >= 9: + goto tr469 } goto st3 -tr907: - ( m.cs) = 37 -//line plugins/parsers/influx/machine.go.rl:121 - - err = m.handler.AddFloat(key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - - goto _again -tr1014: - ( m.cs) = 37 -//line plugins/parsers/influx/machine.go.rl:103 - - err = m.handler.AddInt(key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - - goto _again -tr1016: - ( m.cs) = 37 -//line plugins/parsers/influx/machine.go.rl:112 - - err = m.handler.AddUint(key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - - goto _again -tr1018: - ( m.cs) = 37 -//line plugins/parsers/influx/machine.go.rl:130 - - err = m.handler.AddBool(key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - - goto _again - st37: + st286: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof37 + goto _test_eof286 } - st_case_37: -//line plugins/parsers/influx/machine.go:5610 + st_case_286: + switch ( m.data)[( m.p)] { + case 10: + goto tr470 + case 11: + goto tr471 + case 13: + goto tr472 + case 32: + goto tr469 + case 44: + goto tr105 + case 61: + goto tr12 + case 92: + goto st35 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st287 + } + case ( m.data)[( m.p)] >= 9: + goto tr469 + } + goto st3 + st287: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof287 + } + st_case_287: + switch ( m.data)[( m.p)] { + case 10: + goto tr470 + case 11: + goto tr471 + case 13: + goto tr472 + case 32: + goto tr469 + case 44: + goto tr105 + case 61: + goto tr12 + case 92: + goto st35 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st288 + } + case ( m.data)[( m.p)] >= 9: + goto tr469 + } + goto st3 + st288: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof288 + } + st_case_288: + switch ( m.data)[( m.p)] { + case 10: + goto tr470 + case 11: + goto tr471 + case 13: + goto tr472 + case 32: + goto tr469 + case 44: + goto tr105 + case 61: + goto tr12 + case 92: + goto st35 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st289 + } + case ( m.data)[( m.p)] >= 9: + goto tr469 + } + goto st3 + st289: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof289 + } + st_case_289: + switch ( m.data)[( m.p)] { + case 10: + goto tr470 + case 11: + goto tr471 + case 13: + goto tr472 + case 32: + goto tr469 + case 44: + goto tr105 + case 61: + goto tr12 + case 92: + goto st35 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st290 + } + case ( m.data)[( m.p)] >= 9: + goto tr469 + } + goto st3 + st290: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof290 + } + st_case_290: + switch ( m.data)[( m.p)] { + case 10: + goto tr470 + case 11: + goto tr471 + case 13: + goto tr472 + case 32: + goto tr469 + case 44: + goto tr105 + case 61: + goto tr12 + case 92: + goto st35 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st291 + } + case ( m.data)[( m.p)] >= 9: + goto tr469 + } + goto st3 + st291: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof291 + } + st_case_291: + switch ( m.data)[( m.p)] { + case 10: + goto tr470 + case 11: + goto tr471 + case 13: + goto tr472 + case 32: + goto tr469 + case 44: + goto tr105 + case 61: + goto tr12 + case 92: + goto st35 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st292 + } + case ( m.data)[( m.p)] >= 9: + goto tr469 + } + goto st3 + st292: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof292 + } + st_case_292: + switch ( m.data)[( m.p)] { + case 10: + goto tr470 + case 11: + goto tr471 + case 13: + goto tr472 + case 32: + goto tr469 + case 44: + goto tr105 + case 61: + goto tr12 + case 92: + goto st35 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st293 + } + case ( m.data)[( m.p)] >= 9: + goto tr469 + } + goto st3 + st293: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof293 + } + st_case_293: + switch ( m.data)[( m.p)] { + case 10: + goto tr470 + case 11: + goto tr471 + case 13: + goto tr472 + case 32: + goto tr469 + case 44: + goto tr105 + case 61: + goto tr12 + case 92: + goto st35 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st294 + } + case ( m.data)[( m.p)] >= 9: + goto tr469 + } + goto st3 + st294: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof294 + } + st_case_294: + switch ( m.data)[( m.p)] { + case 10: + goto tr470 + case 11: + goto tr471 + case 13: + goto tr472 + case 32: + goto tr469 + case 44: + goto tr105 + case 61: + goto tr12 + case 92: + goto st35 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st295 + } + case ( m.data)[( m.p)] >= 9: + goto tr469 + } + goto st3 + st295: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof295 + } + st_case_295: + switch ( m.data)[( m.p)] { + case 10: + goto tr470 + case 11: + goto tr471 + case 13: + goto tr472 + case 32: + goto tr469 + case 44: + goto tr105 + case 61: + goto tr12 + case 92: + goto st35 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st296 + } + case ( m.data)[( m.p)] >= 9: + goto tr469 + } + goto st3 + st296: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof296 + } + st_case_296: + switch ( m.data)[( m.p)] { + case 10: + goto tr470 + case 11: + goto tr471 + case 13: + goto tr472 + case 32: + goto tr469 + case 44: + goto tr105 + case 61: + goto tr12 + case 92: + goto st35 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr469 + } + goto st3 +tr930: + ( m.cs) = 36 +//line plugins/parsers/influx/machine.go.rl:122 + + err = m.handler.AddFloat(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + + goto _again +tr1046: + ( m.cs) = 36 +//line plugins/parsers/influx/machine.go.rl:104 + + err = m.handler.AddInt(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + + goto _again +tr1048: + ( m.cs) = 36 +//line plugins/parsers/influx/machine.go.rl:113 + + err = m.handler.AddUint(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + + goto _again +tr1050: + ( m.cs) = 36 +//line plugins/parsers/influx/machine.go.rl:131 + + err = m.handler.AddBool(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + + goto _again + st36: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof36 + } + st_case_36: +//line plugins/parsers/influx/machine.go:5740 switch ( m.data)[( m.p)] { case 32: goto tr8 @@ -5627,24 +5757,24 @@ tr1018: } goto tr6 tr101: -//line plugins/parsers/influx/machine.go.rl:99 +//line plugins/parsers/influx/machine.go.rl:100 - key = m.text() + m.key = m.text() - goto st38 - st38: + goto st37 + st37: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof38 + goto _test_eof37 } - st_case_38: -//line plugins/parsers/influx/machine.go:5641 + st_case_37: +//line plugins/parsers/influx/machine.go:5771 switch ( m.data)[( m.p)] { case 10: - goto st7 + goto tr29 case 12: goto tr8 case 13: - goto st8 + goto st7 case 34: goto tr107 case 45: @@ -5658,7 +5788,7 @@ tr101: case 84: goto tr113 case 92: - goto st76 + goto st75 case 102: goto tr114 case 116: @@ -5669,234 +5799,330 @@ tr101: } goto st6 tr107: - ( m.cs) = 286 -//line plugins/parsers/influx/machine.go.rl:139 + ( m.cs) = 297 +//line plugins/parsers/influx/machine.go.rl:140 - err = m.handler.AddString(key, m.text()) + err = m.handler.AddString(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again - st286: + st297: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof286 + goto _test_eof297 } - st_case_286: -//line plugins/parsers/influx/machine.go:5690 + st_case_297: +//line plugins/parsers/influx/machine.go:5820 switch ( m.data)[( m.p)] { case 10: - goto tr475 + goto tr494 case 12: - goto st261 + goto st272 case 13: - goto tr476 + goto tr495 case 32: - goto tr474 + goto tr493 case 34: goto tr26 case 44: - goto tr477 + goto tr496 case 92: goto tr27 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { - goto tr474 + goto tr493 } goto tr23 -tr474: -//line plugins/parsers/influx/machine.go.rl:19 +tr493: +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p - goto st287 -tr961: - ( m.cs) = 287 -//line plugins/parsers/influx/machine.go.rl:121 + goto st298 +tr988: + ( m.cs) = 298 +//line plugins/parsers/influx/machine.go.rl:122 - err = m.handler.AddFloat(key, m.text()) + err = m.handler.AddFloat(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again -tr966: - ( m.cs) = 287 -//line plugins/parsers/influx/machine.go.rl:103 +tr993: + ( m.cs) = 298 +//line plugins/parsers/influx/machine.go.rl:104 - err = m.handler.AddInt(key, m.text()) + err = m.handler.AddInt(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again -tr969: - ( m.cs) = 287 -//line plugins/parsers/influx/machine.go.rl:112 +tr996: + ( m.cs) = 298 +//line plugins/parsers/influx/machine.go.rl:113 - err = m.handler.AddUint(key, m.text()) + err = m.handler.AddUint(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again -tr972: - ( m.cs) = 287 -//line plugins/parsers/influx/machine.go.rl:130 +tr999: + ( m.cs) = 298 +//line plugins/parsers/influx/machine.go.rl:131 - err = m.handler.AddBool(key, m.text()) + err = m.handler.AddBool(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again - st287: + st298: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof287 + goto _test_eof298 } - st_case_287: -//line plugins/parsers/influx/machine.go:5774 + st_case_298: +//line plugins/parsers/influx/machine.go:5904 switch ( m.data)[( m.p)] { case 10: - goto st288 + goto tr221 case 12: - goto st261 + goto st272 case 13: - goto st74 + goto st73 case 32: - goto st287 + goto st298 case 34: goto tr31 case 45: - goto tr480 + goto tr499 case 92: - goto st76 + goto st75 } switch { case ( m.data)[( m.p)] > 11: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr481 + goto tr500 } case ( m.data)[( m.p)] >= 9: - goto st287 + goto st298 } goto st6 -tr475: -//line plugins/parsers/influx/machine.go.rl:19 +tr494: +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p - goto st288 -tr584: - ( m.cs) = 288 -//line plugins/parsers/influx/machine.go.rl:148 - - err = m.handler.SetTimestamp(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - - goto _again -tr620: - ( m.cs) = 288 -//line plugins/parsers/influx/machine.go.rl:121 - - err = m.handler.AddFloat(key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - - goto _again -tr778: - ( m.cs) = 288 -//line plugins/parsers/influx/machine.go.rl:103 - - err = m.handler.AddInt(key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - - goto _again -tr784: - ( m.cs) = 288 -//line plugins/parsers/influx/machine.go.rl:112 - - err = m.handler.AddUint(key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - - goto _again -tr790: - ( m.cs) = 288 -//line plugins/parsers/influx/machine.go.rl:130 - - err = m.handler.AddBool(key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - - goto _again - st288: -//line plugins/parsers/influx/machine.go.rl:157 +//line plugins/parsers/influx/machine.go.rl:158 m.lineno++ m.sol = m.p m.sol++ // next char will be the first column in the line -//line plugins/parsers/influx/machine.go.rl:163 + goto st299 +tr221: +//line plugins/parsers/influx/machine.go.rl:158 - ( m.cs) = 715; + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line + + goto st299 +tr639: + ( m.cs) = 299 +//line plugins/parsers/influx/machine.go.rl:122 + + err = m.handler.AddFloat(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:158 + + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line + + goto _again +tr603: + ( m.cs) = 299 +//line plugins/parsers/influx/machine.go.rl:158 + + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line + +//line plugins/parsers/influx/machine.go.rl:149 + + err = m.handler.SetTimestamp(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + + goto _again +tr823: + ( m.cs) = 299 +//line plugins/parsers/influx/machine.go.rl:104 + + err = m.handler.AddInt(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:158 + + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line + + goto _again +tr829: + ( m.cs) = 299 +//line plugins/parsers/influx/machine.go.rl:113 + + err = m.handler.AddUint(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:158 + + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line + + goto _again +tr810: + ( m.cs) = 299 +//line plugins/parsers/influx/machine.go.rl:158 + + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line + +//line plugins/parsers/influx/machine.go.rl:131 + + err = m.handler.AddBool(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + + goto _again +tr765: + ( m.cs) = 299 +//line plugins/parsers/influx/machine.go.rl:158 + + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line + +//line plugins/parsers/influx/machine.go.rl:122 + + err = m.handler.AddFloat(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + + goto _again +tr798: + ( m.cs) = 299 +//line plugins/parsers/influx/machine.go.rl:158 + + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line + +//line plugins/parsers/influx/machine.go.rl:104 + + err = m.handler.AddInt(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + + goto _again +tr804: + ( m.cs) = 299 +//line plugins/parsers/influx/machine.go.rl:158 + + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line + +//line plugins/parsers/influx/machine.go.rl:113 + + err = m.handler.AddUint(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + + goto _again + st299: +//line plugins/parsers/influx/machine.go.rl:164 + + m.finishMetric = true + ( m.cs) = 740; {( m.p)++; goto _out } if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof288 + goto _test_eof299 } - st_case_288: -//line plugins/parsers/influx/machine.go:5887 + st_case_299: +//line plugins/parsers/influx/machine.go:6113 switch ( m.data)[( m.p)] { case 9: - goto st39 + goto st38 case 10: - goto st7 + goto tr29 case 11: goto tr117 case 12: - goto st9 - case 13: goto st8 + case 13: + goto st7 case 32: - goto st39 + goto st38 case 34: goto tr118 case 35: @@ -5907,24 +6133,24 @@ tr790: goto tr87 } goto tr82 - st39: + st38: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof39 + goto _test_eof38 } - st_case_39: + st_case_38: switch ( m.data)[( m.p)] { case 9: - goto st39 + goto st38 case 10: - goto st7 + goto tr29 case 11: goto tr117 case 12: - goto st9 - case 13: goto st8 + case 13: + goto st7 case 32: - goto st39 + goto st38 case 34: goto tr118 case 35: @@ -5936,34 +6162,34 @@ tr790: } goto tr82 tr117: -//line plugins/parsers/influx/machine.go.rl:19 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p - goto st40 - st40: + goto st39 + st39: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof40 + goto _test_eof39 } - st_case_40: -//line plugins/parsers/influx/machine.go:5950 + st_case_39: +//line plugins/parsers/influx/machine.go:6176 switch ( m.data)[( m.p)] { case 9: goto tr119 case 10: - goto st7 + goto tr29 case 11: goto tr120 case 12: goto tr38 case 13: - goto st8 + goto st7 case 32: goto tr119 case 34: goto tr85 case 35: - goto st31 + goto st30 case 44: goto tr92 case 92: @@ -5971,37 +6197,37 @@ tr117: } goto tr82 tr119: - ( m.cs) = 41 -//line plugins/parsers/influx/machine.go.rl:77 + ( m.cs) = 40 +//line plugins/parsers/influx/machine.go.rl:78 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again - st41: + st40: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof41 + goto _test_eof40 } - st_case_41: -//line plugins/parsers/influx/machine.go:5992 + st_case_40: +//line plugins/parsers/influx/machine.go:6218 switch ( m.data)[( m.p)] { case 9: - goto st41 + goto st40 case 10: - goto st7 + goto tr29 case 11: goto tr123 case 12: - goto st11 + goto st10 case 13: - goto st8 + goto st7 case 32: - goto st41 + goto st40 case 34: goto tr124 case 35: @@ -6015,28 +6241,28 @@ tr119: } goto tr121 tr121: -//line plugins/parsers/influx/machine.go.rl:19 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p - goto st42 - st42: + goto st41 + st41: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof42 + goto _test_eof41 } - st_case_42: -//line plugins/parsers/influx/machine.go:6029 + st_case_41: +//line plugins/parsers/influx/machine.go:6255 switch ( m.data)[( m.p)] { case 9: goto tr89 case 10: - goto st7 + goto tr29 case 11: goto tr127 case 12: goto tr1 case 13: - goto st8 + goto st7 case 32: goto tr89 case 34: @@ -6046,56 +6272,56 @@ tr121: case 61: goto tr129 case 92: - goto st94 + goto st93 } - goto st42 + goto st41 tr127: - ( m.cs) = 43 -//line plugins/parsers/influx/machine.go.rl:77 + ( m.cs) = 42 +//line plugins/parsers/influx/machine.go.rl:78 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again tr131: - ( m.cs) = 43 -//line plugins/parsers/influx/machine.go.rl:77 + ( m.cs) = 42 +//line plugins/parsers/influx/machine.go.rl:78 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:19 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p goto _again - st43: + st42: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof43 + goto _test_eof42 } - st_case_43: -//line plugins/parsers/influx/machine.go:6088 + st_case_42: +//line plugins/parsers/influx/machine.go:6314 switch ( m.data)[( m.p)] { case 9: goto tr89 case 10: - goto st7 + goto tr29 case 11: goto tr131 case 12: goto tr1 case 13: - goto st8 + goto st7 case 32: goto tr89 case 34: @@ -6109,290 +6335,290 @@ tr131: } goto tr121 tr124: - ( m.cs) = 289 -//line plugins/parsers/influx/machine.go.rl:19 + ( m.cs) = 300 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p -//line plugins/parsers/influx/machine.go.rl:139 +//line plugins/parsers/influx/machine.go.rl:140 - err = m.handler.AddString(key, m.text()) + err = m.handler.AddString(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again tr128: - ( m.cs) = 289 -//line plugins/parsers/influx/machine.go.rl:139 + ( m.cs) = 300 +//line plugins/parsers/influx/machine.go.rl:140 - err = m.handler.AddString(key, m.text()) + err = m.handler.AddString(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again - st289: + st300: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof289 + goto _test_eof300 } - st_case_289: -//line plugins/parsers/influx/machine.go:6147 + st_case_300: +//line plugins/parsers/influx/machine.go:6373 switch ( m.data)[( m.p)] { case 10: - goto st262 + goto tr103 case 11: - goto tr483 + goto tr502 case 13: - goto st34 + goto st33 case 32: - goto tr482 + goto tr501 case 44: - goto tr484 + goto tr503 case 61: goto tr49 case 92: - goto st29 + goto st28 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr482 + goto tr501 } - goto st12 -tr482: - ( m.cs) = 290 -//line plugins/parsers/influx/machine.go.rl:77 + goto st11 +tr501: + ( m.cs) = 301 +//line plugins/parsers/influx/machine.go.rl:78 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again -tr547: - ( m.cs) = 290 -//line plugins/parsers/influx/machine.go.rl:90 +tr566: + ( m.cs) = 301 +//line plugins/parsers/influx/machine.go.rl:91 - err = m.handler.AddTag(key, m.text()) + err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again -tr622: - ( m.cs) = 290 -//line plugins/parsers/influx/machine.go.rl:77 +tr641: + ( m.cs) = 301 +//line plugins/parsers/influx/machine.go.rl:78 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:121 +//line plugins/parsers/influx/machine.go.rl:122 - err = m.handler.AddFloat(key, m.text()) + err = m.handler.AddFloat(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; - {( m.p)++; goto _out } - } - - goto _again -tr712: - ( m.cs) = 290 -//line plugins/parsers/influx/machine.go.rl:90 - - err = m.handler.AddTag(key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:121 - - err = m.handler.AddFloat(key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - - goto _again -tr724: - ( m.cs) = 290 -//line plugins/parsers/influx/machine.go.rl:90 - - err = m.handler.AddTag(key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:103 - - err = m.handler.AddInt(key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again tr731: - ( m.cs) = 290 -//line plugins/parsers/influx/machine.go.rl:90 + ( m.cs) = 301 +//line plugins/parsers/influx/machine.go.rl:91 - err = m.handler.AddTag(key, m.text()) + err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:112 +//line plugins/parsers/influx/machine.go.rl:122 - err = m.handler.AddUint(key, m.text()) + err = m.handler.AddFloat(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again -tr738: - ( m.cs) = 290 -//line plugins/parsers/influx/machine.go.rl:90 +tr743: + ( m.cs) = 301 +//line plugins/parsers/influx/machine.go.rl:91 - err = m.handler.AddTag(key, m.text()) + err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:130 +//line plugins/parsers/influx/machine.go.rl:104 - err = m.handler.AddBool(key, m.text()) + err = m.handler.AddInt(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again -tr804: - ( m.cs) = 290 -//line plugins/parsers/influx/machine.go.rl:77 +tr750: + ( m.cs) = 301 +//line plugins/parsers/influx/machine.go.rl:91 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:113 + + err = m.handler.AddUint(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + + goto _again +tr757: + ( m.cs) = 301 +//line plugins/parsers/influx/machine.go.rl:91 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:131 + + err = m.handler.AddBool(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + + goto _again +tr825: + ( m.cs) = 301 +//line plugins/parsers/influx/machine.go.rl:78 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:103 +//line plugins/parsers/influx/machine.go.rl:104 - err = m.handler.AddInt(key, m.text()) + err = m.handler.AddInt(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again -tr809: - ( m.cs) = 290 -//line plugins/parsers/influx/machine.go.rl:77 +tr831: + ( m.cs) = 301 +//line plugins/parsers/influx/machine.go.rl:78 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:112 +//line plugins/parsers/influx/machine.go.rl:113 - err = m.handler.AddUint(key, m.text()) + err = m.handler.AddUint(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again -tr814: - ( m.cs) = 290 -//line plugins/parsers/influx/machine.go.rl:77 +tr836: + ( m.cs) = 301 +//line plugins/parsers/influx/machine.go.rl:78 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:130 +//line plugins/parsers/influx/machine.go.rl:131 - err = m.handler.AddBool(key, m.text()) + err = m.handler.AddBool(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again - st290: + st301: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof290 + goto _test_eof301 } - st_case_290: -//line plugins/parsers/influx/machine.go:6383 + st_case_301: +//line plugins/parsers/influx/machine.go:6609 switch ( m.data)[( m.p)] { case 10: - goto st262 + goto tr103 case 11: - goto tr486 + goto tr505 case 13: - goto st34 + goto st33 case 32: - goto st290 + goto st301 case 44: goto tr105 case 45: - goto tr448 + goto tr467 case 61: goto tr105 case 92: @@ -6401,37 +6627,37 @@ tr814: switch { case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr449 + goto tr468 } case ( m.data)[( m.p)] >= 9: - goto st290 + goto st301 } goto tr6 -tr486: -//line plugins/parsers/influx/machine.go.rl:19 +tr505: +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p - goto st291 - st291: + goto st302 + st302: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof291 + goto _test_eof302 } - st_case_291: -//line plugins/parsers/influx/machine.go:6422 + st_case_302: +//line plugins/parsers/influx/machine.go:6648 switch ( m.data)[( m.p)] { case 10: - goto st262 + goto tr103 case 11: - goto tr486 + goto tr505 case 13: - goto st34 + goto st33 case 32: - goto st290 + goto st301 case 44: goto tr105 case 45: - goto tr448 + goto tr467 case 61: goto tr12 case 92: @@ -6440,61 +6666,61 @@ tr486: switch { case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr449 + goto tr468 } case ( m.data)[( m.p)] >= 9: - goto st290 + goto st301 } goto tr6 -tr483: - ( m.cs) = 292 -//line plugins/parsers/influx/machine.go.rl:77 +tr502: + ( m.cs) = 303 +//line plugins/parsers/influx/machine.go.rl:78 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again -tr487: - ( m.cs) = 292 -//line plugins/parsers/influx/machine.go.rl:77 +tr506: + ( m.cs) = 303 +//line plugins/parsers/influx/machine.go.rl:78 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:19 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p goto _again - st292: + st303: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof292 + goto _test_eof303 } - st_case_292: -//line plugins/parsers/influx/machine.go:6485 + st_case_303: +//line plugins/parsers/influx/machine.go:6711 switch ( m.data)[( m.p)] { case 10: - goto st262 + goto tr103 case 11: - goto tr487 + goto tr506 case 13: - goto st34 + goto st33 case 32: - goto tr482 + goto tr501 case 44: goto tr4 case 45: - goto tr488 + goto tr507 case 61: goto tr49 case 92: @@ -6503,24 +6729,24 @@ tr487: switch { case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr489 + goto tr508 } case ( m.data)[( m.p)] >= 9: - goto tr482 + goto tr501 } goto tr41 -tr488: -//line plugins/parsers/influx/machine.go.rl:19 +tr507: +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p - goto st44 - st44: + goto st43 + st43: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof44 + goto _test_eof43 } - st_case_44: -//line plugins/parsers/influx/machine.go:6524 + st_case_43: +//line plugins/parsers/influx/machine.go:6750 switch ( m.data)[( m.p)] { case 10: goto tr132 @@ -6535,141 +6761,141 @@ tr488: case 61: goto tr49 case 92: - goto st29 + goto st28 } switch { case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st293 + goto st304 } case ( m.data)[( m.p)] >= 9: goto tr1 } - goto st12 -tr489: -//line plugins/parsers/influx/machine.go.rl:19 + goto st11 +tr508: +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p - goto st293 - st293: + goto st304 + st304: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof293 + goto _test_eof304 } - st_case_293: -//line plugins/parsers/influx/machine.go:6561 + st_case_304: +//line plugins/parsers/influx/machine.go:6787 switch ( m.data)[( m.p)] { case 10: - goto tr451 + goto tr470 case 11: - goto tr491 + goto tr510 case 13: - goto tr453 + goto tr472 case 32: - goto tr490 + goto tr509 case 44: goto tr4 case 61: goto tr49 case 92: - goto st29 + goto st28 } switch { case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st297 + goto st308 } case ( m.data)[( m.p)] >= 9: - goto tr490 + goto tr509 } - goto st12 -tr495: - ( m.cs) = 294 -//line plugins/parsers/influx/machine.go.rl:77 + goto st11 +tr514: + ( m.cs) = 305 +//line plugins/parsers/influx/machine.go.rl:78 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again -tr556: - ( m.cs) = 294 -//line plugins/parsers/influx/machine.go.rl:90 +tr575: + ( m.cs) = 305 +//line plugins/parsers/influx/machine.go.rl:91 - err = m.handler.AddTag(key, m.text()) + err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again -tr490: - ( m.cs) = 294 -//line plugins/parsers/influx/machine.go.rl:77 +tr509: + ( m.cs) = 305 +//line plugins/parsers/influx/machine.go.rl:78 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:148 +//line plugins/parsers/influx/machine.go.rl:149 err = m.handler.SetTimestamp(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again -tr553: - ( m.cs) = 294 -//line plugins/parsers/influx/machine.go.rl:90 +tr572: + ( m.cs) = 305 +//line plugins/parsers/influx/machine.go.rl:91 - err = m.handler.AddTag(key, m.text()) + err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:148 +//line plugins/parsers/influx/machine.go.rl:149 err = m.handler.SetTimestamp(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again - st294: + st305: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof294 + goto _test_eof305 } - st_case_294: -//line plugins/parsers/influx/machine.go:6664 + st_case_305: +//line plugins/parsers/influx/machine.go:6890 switch ( m.data)[( m.p)] { case 10: - goto st262 + goto tr103 case 11: - goto tr494 + goto tr513 case 13: - goto st34 + goto st33 case 32: - goto st294 + goto st305 case 44: goto tr8 case 61: @@ -6678,30 +6904,30 @@ tr553: goto tr10 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st294 + goto st305 } goto tr6 -tr494: -//line plugins/parsers/influx/machine.go.rl:19 +tr513: +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p - goto st295 - st295: + goto st306 + st306: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof295 + goto _test_eof306 } - st_case_295: -//line plugins/parsers/influx/machine.go:6696 + st_case_306: +//line plugins/parsers/influx/machine.go:6922 switch ( m.data)[( m.p)] { case 10: - goto st262 + goto tr103 case 11: - goto tr494 + goto tr513 case 13: - goto st34 + goto st33 case 32: - goto st294 + goto st305 case 44: goto tr8 case 61: @@ -6710,64 +6936,64 @@ tr494: goto tr10 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st294 + goto st305 } goto tr6 -tr496: - ( m.cs) = 296 -//line plugins/parsers/influx/machine.go.rl:77 +tr515: + ( m.cs) = 307 +//line plugins/parsers/influx/machine.go.rl:78 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:19 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p goto _again -tr491: - ( m.cs) = 296 -//line plugins/parsers/influx/machine.go.rl:77 +tr510: + ( m.cs) = 307 +//line plugins/parsers/influx/machine.go.rl:78 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:148 +//line plugins/parsers/influx/machine.go.rl:149 err = m.handler.SetTimestamp(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again - st296: + st307: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof296 + goto _test_eof307 } - st_case_296: -//line plugins/parsers/influx/machine.go:6762 + st_case_307: +//line plugins/parsers/influx/machine.go:6988 switch ( m.data)[( m.p)] { case 10: - goto st262 + goto tr103 case 11: - goto tr496 + goto tr515 case 13: - goto st34 + goto st33 case 32: - goto tr495 + goto tr514 case 44: goto tr4 case 61: @@ -6776,339 +7002,9 @@ tr491: goto tr45 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr495 + goto tr514 } goto tr41 - st297: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof297 - } - st_case_297: - switch ( m.data)[( m.p)] { - case 10: - goto tr451 - case 11: - goto tr491 - case 13: - goto tr453 - case 32: - goto tr490 - case 44: - goto tr4 - case 61: - goto tr49 - case 92: - goto st29 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st298 - } - case ( m.data)[( m.p)] >= 9: - goto tr490 - } - goto st12 - st298: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof298 - } - st_case_298: - switch ( m.data)[( m.p)] { - case 10: - goto tr451 - case 11: - goto tr491 - case 13: - goto tr453 - case 32: - goto tr490 - case 44: - goto tr4 - case 61: - goto tr49 - case 92: - goto st29 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st299 - } - case ( m.data)[( m.p)] >= 9: - goto tr490 - } - goto st12 - st299: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof299 - } - st_case_299: - switch ( m.data)[( m.p)] { - case 10: - goto tr451 - case 11: - goto tr491 - case 13: - goto tr453 - case 32: - goto tr490 - case 44: - goto tr4 - case 61: - goto tr49 - case 92: - goto st29 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st300 - } - case ( m.data)[( m.p)] >= 9: - goto tr490 - } - goto st12 - st300: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof300 - } - st_case_300: - switch ( m.data)[( m.p)] { - case 10: - goto tr451 - case 11: - goto tr491 - case 13: - goto tr453 - case 32: - goto tr490 - case 44: - goto tr4 - case 61: - goto tr49 - case 92: - goto st29 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st301 - } - case ( m.data)[( m.p)] >= 9: - goto tr490 - } - goto st12 - st301: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof301 - } - st_case_301: - switch ( m.data)[( m.p)] { - case 10: - goto tr451 - case 11: - goto tr491 - case 13: - goto tr453 - case 32: - goto tr490 - case 44: - goto tr4 - case 61: - goto tr49 - case 92: - goto st29 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st302 - } - case ( m.data)[( m.p)] >= 9: - goto tr490 - } - goto st12 - st302: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof302 - } - st_case_302: - switch ( m.data)[( m.p)] { - case 10: - goto tr451 - case 11: - goto tr491 - case 13: - goto tr453 - case 32: - goto tr490 - case 44: - goto tr4 - case 61: - goto tr49 - case 92: - goto st29 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st303 - } - case ( m.data)[( m.p)] >= 9: - goto tr490 - } - goto st12 - st303: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof303 - } - st_case_303: - switch ( m.data)[( m.p)] { - case 10: - goto tr451 - case 11: - goto tr491 - case 13: - goto tr453 - case 32: - goto tr490 - case 44: - goto tr4 - case 61: - goto tr49 - case 92: - goto st29 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st304 - } - case ( m.data)[( m.p)] >= 9: - goto tr490 - } - goto st12 - st304: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof304 - } - st_case_304: - switch ( m.data)[( m.p)] { - case 10: - goto tr451 - case 11: - goto tr491 - case 13: - goto tr453 - case 32: - goto tr490 - case 44: - goto tr4 - case 61: - goto tr49 - case 92: - goto st29 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st305 - } - case ( m.data)[( m.p)] >= 9: - goto tr490 - } - goto st12 - st305: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof305 - } - st_case_305: - switch ( m.data)[( m.p)] { - case 10: - goto tr451 - case 11: - goto tr491 - case 13: - goto tr453 - case 32: - goto tr490 - case 44: - goto tr4 - case 61: - goto tr49 - case 92: - goto st29 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st306 - } - case ( m.data)[( m.p)] >= 9: - goto tr490 - } - goto st12 - st306: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof306 - } - st_case_306: - switch ( m.data)[( m.p)] { - case 10: - goto tr451 - case 11: - goto tr491 - case 13: - goto tr453 - case 32: - goto tr490 - case 44: - goto tr4 - case 61: - goto tr49 - case 92: - goto st29 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st307 - } - case ( m.data)[( m.p)] >= 9: - goto tr490 - } - goto st12 - st307: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof307 - } - st_case_307: - switch ( m.data)[( m.p)] { - case 10: - goto tr451 - case 11: - goto tr491 - case 13: - goto tr453 - case 32: - goto tr490 - case 44: - goto tr4 - case 61: - goto tr49 - case 92: - goto st29 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st308 - } - case ( m.data)[( m.p)] >= 9: - goto tr490 - } - goto st12 st308: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof308 @@ -7116,19 +7012,19 @@ tr491: st_case_308: switch ( m.data)[( m.p)] { case 10: - goto tr451 + goto tr470 case 11: - goto tr491 + goto tr510 case 13: - goto tr453 + goto tr472 case 32: - goto tr490 + goto tr509 case 44: goto tr4 case 61: goto tr49 case 92: - goto st29 + goto st28 } switch { case ( m.data)[( m.p)] > 12: @@ -7136,9 +7032,9 @@ tr491: goto st309 } case ( m.data)[( m.p)] >= 9: - goto tr490 + goto tr509 } - goto st12 + goto st11 st309: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof309 @@ -7146,19 +7042,19 @@ tr491: st_case_309: switch ( m.data)[( m.p)] { case 10: - goto tr451 + goto tr470 case 11: - goto tr491 + goto tr510 case 13: - goto tr453 + goto tr472 case 32: - goto tr490 + goto tr509 case 44: goto tr4 case 61: goto tr49 case 92: - goto st29 + goto st28 } switch { case ( m.data)[( m.p)] > 12: @@ -7166,9 +7062,9 @@ tr491: goto st310 } case ( m.data)[( m.p)] >= 9: - goto tr490 + goto tr509 } - goto st12 + goto st11 st310: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof310 @@ -7176,19 +7072,19 @@ tr491: st_case_310: switch ( m.data)[( m.p)] { case 10: - goto tr451 + goto tr470 case 11: - goto tr491 + goto tr510 case 13: - goto tr453 + goto tr472 case 32: - goto tr490 + goto tr509 case 44: goto tr4 case 61: goto tr49 case 92: - goto st29 + goto st28 } switch { case ( m.data)[( m.p)] > 12: @@ -7196,9 +7092,9 @@ tr491: goto st311 } case ( m.data)[( m.p)] >= 9: - goto tr490 + goto tr509 } - goto st12 + goto st11 st311: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof311 @@ -7206,19 +7102,19 @@ tr491: st_case_311: switch ( m.data)[( m.p)] { case 10: - goto tr451 + goto tr470 case 11: - goto tr491 + goto tr510 case 13: - goto tr453 + goto tr472 case 32: - goto tr490 + goto tr509 case 44: goto tr4 case 61: goto tr49 case 92: - goto st29 + goto st28 } switch { case ( m.data)[( m.p)] > 12: @@ -7226,9 +7122,9 @@ tr491: goto st312 } case ( m.data)[( m.p)] >= 9: - goto tr490 + goto tr509 } - goto st12 + goto st11 st312: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof312 @@ -7236,19 +7132,19 @@ tr491: st_case_312: switch ( m.data)[( m.p)] { case 10: - goto tr451 + goto tr470 case 11: - goto tr491 + goto tr510 case 13: - goto tr453 + goto tr472 case 32: - goto tr490 + goto tr509 case 44: goto tr4 case 61: goto tr49 case 92: - goto st29 + goto st28 } switch { case ( m.data)[( m.p)] > 12: @@ -7256,9 +7152,9 @@ tr491: goto st313 } case ( m.data)[( m.p)] >= 9: - goto tr490 + goto tr509 } - goto st12 + goto st11 st313: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof313 @@ -7266,19 +7162,19 @@ tr491: st_case_313: switch ( m.data)[( m.p)] { case 10: - goto tr451 + goto tr470 case 11: - goto tr491 + goto tr510 case 13: - goto tr453 + goto tr472 case 32: - goto tr490 + goto tr509 case 44: goto tr4 case 61: goto tr49 case 92: - goto st29 + goto st28 } switch { case ( m.data)[( m.p)] > 12: @@ -7286,9 +7182,9 @@ tr491: goto st314 } case ( m.data)[( m.p)] >= 9: - goto tr490 + goto tr509 } - goto st12 + goto st11 st314: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof314 @@ -7296,240 +7192,570 @@ tr491: st_case_314: switch ( m.data)[( m.p)] { case 10: - goto tr451 + goto tr470 case 11: - goto tr491 + goto tr510 case 13: - goto tr453 + goto tr472 case 32: - goto tr490 + goto tr509 case 44: goto tr4 case 61: goto tr49 case 92: - goto st29 + goto st28 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st315 + } + case ( m.data)[( m.p)] >= 9: + goto tr509 + } + goto st11 + st315: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof315 + } + st_case_315: + switch ( m.data)[( m.p)] { + case 10: + goto tr470 + case 11: + goto tr510 + case 13: + goto tr472 + case 32: + goto tr509 + case 44: + goto tr4 + case 61: + goto tr49 + case 92: + goto st28 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st316 + } + case ( m.data)[( m.p)] >= 9: + goto tr509 + } + goto st11 + st316: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof316 + } + st_case_316: + switch ( m.data)[( m.p)] { + case 10: + goto tr470 + case 11: + goto tr510 + case 13: + goto tr472 + case 32: + goto tr509 + case 44: + goto tr4 + case 61: + goto tr49 + case 92: + goto st28 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st317 + } + case ( m.data)[( m.p)] >= 9: + goto tr509 + } + goto st11 + st317: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof317 + } + st_case_317: + switch ( m.data)[( m.p)] { + case 10: + goto tr470 + case 11: + goto tr510 + case 13: + goto tr472 + case 32: + goto tr509 + case 44: + goto tr4 + case 61: + goto tr49 + case 92: + goto st28 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st318 + } + case ( m.data)[( m.p)] >= 9: + goto tr509 + } + goto st11 + st318: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof318 + } + st_case_318: + switch ( m.data)[( m.p)] { + case 10: + goto tr470 + case 11: + goto tr510 + case 13: + goto tr472 + case 32: + goto tr509 + case 44: + goto tr4 + case 61: + goto tr49 + case 92: + goto st28 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st319 + } + case ( m.data)[( m.p)] >= 9: + goto tr509 + } + goto st11 + st319: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof319 + } + st_case_319: + switch ( m.data)[( m.p)] { + case 10: + goto tr470 + case 11: + goto tr510 + case 13: + goto tr472 + case 32: + goto tr509 + case 44: + goto tr4 + case 61: + goto tr49 + case 92: + goto st28 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st320 + } + case ( m.data)[( m.p)] >= 9: + goto tr509 + } + goto st11 + st320: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof320 + } + st_case_320: + switch ( m.data)[( m.p)] { + case 10: + goto tr470 + case 11: + goto tr510 + case 13: + goto tr472 + case 32: + goto tr509 + case 44: + goto tr4 + case 61: + goto tr49 + case 92: + goto st28 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st321 + } + case ( m.data)[( m.p)] >= 9: + goto tr509 + } + goto st11 + st321: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof321 + } + st_case_321: + switch ( m.data)[( m.p)] { + case 10: + goto tr470 + case 11: + goto tr510 + case 13: + goto tr472 + case 32: + goto tr509 + case 44: + goto tr4 + case 61: + goto tr49 + case 92: + goto st28 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st322 + } + case ( m.data)[( m.p)] >= 9: + goto tr509 + } + goto st11 + st322: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof322 + } + st_case_322: + switch ( m.data)[( m.p)] { + case 10: + goto tr470 + case 11: + goto tr510 + case 13: + goto tr472 + case 32: + goto tr509 + case 44: + goto tr4 + case 61: + goto tr49 + case 92: + goto st28 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st323 + } + case ( m.data)[( m.p)] >= 9: + goto tr509 + } + goto st11 + st323: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof323 + } + st_case_323: + switch ( m.data)[( m.p)] { + case 10: + goto tr470 + case 11: + goto tr510 + case 13: + goto tr472 + case 32: + goto tr509 + case 44: + goto tr4 + case 61: + goto tr49 + case 92: + goto st28 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st324 + } + case ( m.data)[( m.p)] >= 9: + goto tr509 + } + goto st11 + st324: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof324 + } + st_case_324: + switch ( m.data)[( m.p)] { + case 10: + goto tr470 + case 11: + goto tr510 + case 13: + goto tr472 + case 32: + goto tr509 + case 44: + goto tr4 + case 61: + goto tr49 + case 92: + goto st28 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st325 + } + case ( m.data)[( m.p)] >= 9: + goto tr509 + } + goto st11 + st325: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof325 + } + st_case_325: + switch ( m.data)[( m.p)] { + case 10: + goto tr470 + case 11: + goto tr510 + case 13: + goto tr472 + case 32: + goto tr509 + case 44: + goto tr4 + case 61: + goto tr49 + case 92: + goto st28 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr490 + goto tr509 } - goto st12 -tr484: - ( m.cs) = 45 -//line plugins/parsers/influx/machine.go.rl:77 + goto st11 +tr503: + ( m.cs) = 44 +//line plugins/parsers/influx/machine.go.rl:78 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again -tr549: - ( m.cs) = 45 -//line plugins/parsers/influx/machine.go.rl:90 +tr568: + ( m.cs) = 44 +//line plugins/parsers/influx/machine.go.rl:91 - err = m.handler.AddTag(key, m.text()) + err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again -tr799: - ( m.cs) = 45 -//line plugins/parsers/influx/machine.go.rl:77 +tr819: + ( m.cs) = 44 +//line plugins/parsers/influx/machine.go.rl:78 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:121 +//line plugins/parsers/influx/machine.go.rl:122 - err = m.handler.AddFloat(key, m.text()) + err = m.handler.AddFloat(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again -tr718: - ( m.cs) = 45 -//line plugins/parsers/influx/machine.go.rl:90 +tr737: + ( m.cs) = 44 +//line plugins/parsers/influx/machine.go.rl:91 - err = m.handler.AddTag(key, m.text()) + err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:121 +//line plugins/parsers/influx/machine.go.rl:122 - err = m.handler.AddFloat(key, m.text()) + err = m.handler.AddFloat(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again -tr928: - ( m.cs) = 45 -//line plugins/parsers/influx/machine.go.rl:90 +tr955: + ( m.cs) = 44 +//line plugins/parsers/influx/machine.go.rl:91 - err = m.handler.AddTag(key, m.text()) + err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:103 +//line plugins/parsers/influx/machine.go.rl:104 - err = m.handler.AddInt(key, m.text()) + err = m.handler.AddInt(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again -tr933: - ( m.cs) = 45 -//line plugins/parsers/influx/machine.go.rl:90 +tr960: + ( m.cs) = 44 +//line plugins/parsers/influx/machine.go.rl:91 - err = m.handler.AddTag(key, m.text()) + err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:112 +//line plugins/parsers/influx/machine.go.rl:113 - err = m.handler.AddUint(key, m.text()) + err = m.handler.AddUint(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again -tr938: - ( m.cs) = 45 -//line plugins/parsers/influx/machine.go.rl:90 +tr965: + ( m.cs) = 44 +//line plugins/parsers/influx/machine.go.rl:91 - err = m.handler.AddTag(key, m.text()) + err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:130 +//line plugins/parsers/influx/machine.go.rl:131 - err = m.handler.AddBool(key, m.text()) + err = m.handler.AddBool(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again -tr982: - ( m.cs) = 45 -//line plugins/parsers/influx/machine.go.rl:77 +tr1014: + ( m.cs) = 44 +//line plugins/parsers/influx/machine.go.rl:78 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:103 +//line plugins/parsers/influx/machine.go.rl:104 - err = m.handler.AddInt(key, m.text()) + err = m.handler.AddInt(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again -tr985: - ( m.cs) = 45 -//line plugins/parsers/influx/machine.go.rl:77 +tr1017: + ( m.cs) = 44 +//line plugins/parsers/influx/machine.go.rl:78 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:112 +//line plugins/parsers/influx/machine.go.rl:113 - err = m.handler.AddUint(key, m.text()) + err = m.handler.AddUint(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again -tr988: - ( m.cs) = 45 -//line plugins/parsers/influx/machine.go.rl:77 +tr1020: + ( m.cs) = 44 +//line plugins/parsers/influx/machine.go.rl:78 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:130 +//line plugins/parsers/influx/machine.go.rl:131 - err = m.handler.AddBool(key, m.text()) + err = m.handler.AddBool(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again - st45: + st44: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof45 + goto _test_eof44 } - st_case_45: -//line plugins/parsers/influx/machine.go:7533 + st_case_44: +//line plugins/parsers/influx/machine.go:7759 switch ( m.data)[( m.p)] { case 32: goto tr47 @@ -7550,17 +7776,17 @@ tr988: } goto tr134 tr134: -//line plugins/parsers/influx/machine.go.rl:19 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p - goto st46 - st46: + goto st45 + st45: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof46 + goto _test_eof45 } - st_case_46: -//line plugins/parsers/influx/machine.go:7564 + st_case_45: +//line plugins/parsers/influx/machine.go:7790 switch ( m.data)[( m.p)] { case 32: goto tr47 @@ -7569,7 +7795,7 @@ tr134: case 61: goto tr137 case 92: - goto st101 + goto st100 } switch { case ( m.data)[( m.p)] > 10: @@ -7579,23 +7805,23 @@ tr134: case ( m.data)[( m.p)] >= 9: goto tr47 } - goto st46 + goto st45 tr137: -//line plugins/parsers/influx/machine.go.rl:86 +//line plugins/parsers/influx/machine.go.rl:87 - key = m.text() + m.key = m.text() -//line plugins/parsers/influx/machine.go.rl:99 +//line plugins/parsers/influx/machine.go.rl:100 - key = m.text() + m.key = m.text() - goto st47 - st47: + goto st46 + st46: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof47 + goto _test_eof46 } - st_case_47: -//line plugins/parsers/influx/machine.go:7599 + st_case_46: +//line plugins/parsers/influx/machine.go:7825 switch ( m.data)[( m.p)] { case 32: goto tr47 @@ -7636,17 +7862,17 @@ tr137: } goto tr57 tr139: -//line plugins/parsers/influx/machine.go.rl:19 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p - goto st48 - st48: + goto st47 + st47: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof48 + goto _test_eof47 } - st_case_48: -//line plugins/parsers/influx/machine.go:7650 + st_case_47: +//line plugins/parsers/influx/machine.go:7876 switch ( m.data)[( m.p)] { case 9: goto tr149 @@ -7671,28 +7897,28 @@ tr139: } goto tr148 tr148: -//line plugins/parsers/influx/machine.go.rl:19 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p - goto st49 - st49: + goto st48 + st48: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof49 + goto _test_eof48 } - st_case_49: -//line plugins/parsers/influx/machine.go:7685 + st_case_48: +//line plugins/parsers/influx/machine.go:7911 switch ( m.data)[( m.p)] { case 9: goto tr155 case 10: - goto st7 + goto tr29 case 11: goto tr156 case 12: goto tr60 case 13: - goto st8 + goto st7 case 32: goto tr155 case 34: @@ -7702,71 +7928,71 @@ tr148: case 61: goto st6 case 92: - goto st64 + goto st63 } - goto st49 + goto st48 tr180: - ( m.cs) = 50 -//line plugins/parsers/influx/machine.go.rl:77 + ( m.cs) = 49 +//line plugins/parsers/influx/machine.go.rl:78 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again tr155: - ( m.cs) = 50 -//line plugins/parsers/influx/machine.go.rl:90 + ( m.cs) = 49 +//line plugins/parsers/influx/machine.go.rl:91 - err = m.handler.AddTag(key, m.text()) + err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again tr149: - ( m.cs) = 50 -//line plugins/parsers/influx/machine.go.rl:90 + ( m.cs) = 49 +//line plugins/parsers/influx/machine.go.rl:91 - err = m.handler.AddTag(key, m.text()) + err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:19 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p goto _again - st50: + st49: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof50 + goto _test_eof49 } - st_case_50: -//line plugins/parsers/influx/machine.go:7757 + st_case_49: +//line plugins/parsers/influx/machine.go:7983 switch ( m.data)[( m.p)] { case 9: - goto st50 + goto st49 case 10: - goto st7 + goto tr29 case 11: goto tr162 case 12: goto st2 case 13: - goto st8 + goto st7 case 32: - goto st50 + goto st49 case 34: goto tr97 case 44: @@ -7778,26 +8004,26 @@ tr149: } goto tr160 tr160: -//line plugins/parsers/influx/machine.go.rl:19 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p - goto st51 - st51: + goto st50 + st50: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof51 + goto _test_eof50 } - st_case_51: -//line plugins/parsers/influx/machine.go:7792 + st_case_50: +//line plugins/parsers/influx/machine.go:8018 switch ( m.data)[( m.p)] { case 9: goto st6 case 10: - goto st7 + goto tr29 case 12: goto tr8 case 13: - goto st8 + goto st7 case 32: goto st6 case 34: @@ -7807,28 +8033,28 @@ tr160: case 61: goto tr165 case 92: - goto st106 + goto st105 } - goto st51 + goto st50 tr165: -//line plugins/parsers/influx/machine.go.rl:99 +//line plugins/parsers/influx/machine.go.rl:100 - key = m.text() + m.key = m.text() - goto st52 - st52: + goto st51 + st51: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof52 + goto _test_eof51 } - st_case_52: -//line plugins/parsers/influx/machine.go:7825 + st_case_51: +//line plugins/parsers/influx/machine.go:8051 switch ( m.data)[( m.p)] { case 10: - goto st7 + goto tr29 case 12: goto tr8 case 13: - goto st8 + goto st7 case 34: goto tr107 case 45: @@ -7842,7 +8068,7 @@ tr165: case 84: goto tr172 case 92: - goto st76 + goto st75 case 102: goto tr173 case 116: @@ -7853,7 +8079,39 @@ tr165: } goto st6 tr167: -//line plugins/parsers/influx/machine.go.rl:19 +//line plugins/parsers/influx/machine.go.rl:20 + + m.pb = m.p + + goto st52 + st52: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof52 + } + st_case_52: +//line plugins/parsers/influx/machine.go:8093 + switch ( m.data)[( m.p)] { + case 10: + goto tr29 + case 12: + goto tr8 + case 13: + goto st7 + case 34: + goto tr31 + case 46: + goto st53 + case 48: + goto st632 + case 92: + goto st75 + } + if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st633 + } + goto st6 +tr168: +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p @@ -7863,279 +8121,305 @@ tr167: goto _test_eof53 } st_case_53: -//line plugins/parsers/influx/machine.go:7867 +//line plugins/parsers/influx/machine.go:8125 switch ( m.data)[( m.p)] { case 10: - goto st7 + goto tr29 case 12: goto tr8 case 13: - goto st8 - case 34: - goto tr31 - case 46: - goto st54 - case 48: - goto st621 - case 92: - goto st76 - } - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st622 - } - goto st6 -tr168: -//line plugins/parsers/influx/machine.go.rl:19 - - m.pb = m.p - - goto st54 - st54: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof54 - } - st_case_54: -//line plugins/parsers/influx/machine.go:7899 - switch ( m.data)[( m.p)] { - case 10: goto st7 - case 12: - goto tr8 - case 13: - goto st8 case 34: goto tr31 case 92: - goto st76 + goto st75 } if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st315 + goto st326 } goto st6 - st315: + st326: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof315 + goto _test_eof326 } - st_case_315: + st_case_326: switch ( m.data)[( m.p)] { case 10: - goto tr515 + goto tr534 case 12: - goto tr516 + goto tr535 case 13: - goto tr517 + goto tr536 case 32: - goto tr514 + goto tr533 case 34: goto tr31 case 44: - goto tr518 + goto tr537 case 69: - goto st175 + goto st174 case 92: - goto st76 + goto st75 case 101: - goto st175 + goto st174 } switch { case ( m.data)[( m.p)] > 11: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st315 + goto st326 } case ( m.data)[( m.p)] >= 9: - goto tr514 + goto tr533 } goto st6 -tr902: -//line plugins/parsers/influx/machine.go.rl:19 +tr925: +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p - goto st316 -tr514: - ( m.cs) = 316 -//line plugins/parsers/influx/machine.go.rl:121 + goto st327 +tr533: + ( m.cs) = 327 +//line plugins/parsers/influx/machine.go.rl:122 - err = m.handler.AddFloat(key, m.text()) + err = m.handler.AddFloat(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again -tr908: - ( m.cs) = 316 -//line plugins/parsers/influx/machine.go.rl:103 +tr931: + ( m.cs) = 327 +//line plugins/parsers/influx/machine.go.rl:104 - err = m.handler.AddInt(key, m.text()) + err = m.handler.AddInt(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again -tr911: - ( m.cs) = 316 -//line plugins/parsers/influx/machine.go.rl:112 +tr934: + ( m.cs) = 327 +//line plugins/parsers/influx/machine.go.rl:113 - err = m.handler.AddUint(key, m.text()) + err = m.handler.AddUint(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again -tr915: - ( m.cs) = 316 -//line plugins/parsers/influx/machine.go.rl:130 +tr938: + ( m.cs) = 327 +//line plugins/parsers/influx/machine.go.rl:131 - err = m.handler.AddBool(key, m.text()) + err = m.handler.AddBool(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again - st316: + st327: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof316 + goto _test_eof327 } - st_case_316: -//line plugins/parsers/influx/machine.go:8013 + st_case_327: +//line plugins/parsers/influx/machine.go:8239 switch ( m.data)[( m.p)] { case 10: - goto st317 + goto tr275 case 12: - goto st261 + goto st272 case 13: - goto st104 + goto st103 case 32: - goto st316 + goto st327 case 34: goto tr31 case 45: - goto tr522 + goto tr541 case 92: - goto st76 + goto st75 } switch { case ( m.data)[( m.p)] > 11: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr523 + goto tr542 } case ( m.data)[( m.p)] >= 9: - goto st316 + goto st327 } goto st6 -tr650: -//line plugins/parsers/influx/machine.go.rl:19 +tr669: +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p - goto st317 -tr659: - ( m.cs) = 317 -//line plugins/parsers/influx/machine.go.rl:148 - - err = m.handler.SetTimestamp(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - - goto _again -tr515: - ( m.cs) = 317 -//line plugins/parsers/influx/machine.go.rl:121 - - err = m.handler.AddFloat(key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - - goto _again -tr722: - ( m.cs) = 317 -//line plugins/parsers/influx/machine.go.rl:103 - - err = m.handler.AddInt(key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - - goto _again -tr729: - ( m.cs) = 317 -//line plugins/parsers/influx/machine.go.rl:112 - - err = m.handler.AddUint(key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - - goto _again -tr736: - ( m.cs) = 317 -//line plugins/parsers/influx/machine.go.rl:130 - - err = m.handler.AddBool(key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - - goto _again - st317: -//line plugins/parsers/influx/machine.go.rl:157 +//line plugins/parsers/influx/machine.go.rl:158 m.lineno++ m.sol = m.p m.sol++ // next char will be the first column in the line -//line plugins/parsers/influx/machine.go.rl:163 + goto st328 +tr275: +//line plugins/parsers/influx/machine.go.rl:158 - ( m.cs) = 715; + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line + + goto st328 +tr534: + ( m.cs) = 328 +//line plugins/parsers/influx/machine.go.rl:122 + + err = m.handler.AddFloat(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:158 + + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line + + goto _again +tr678: + ( m.cs) = 328 +//line plugins/parsers/influx/machine.go.rl:158 + + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line + +//line plugins/parsers/influx/machine.go.rl:149 + + err = m.handler.SetTimestamp(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + + goto _again +tr741: + ( m.cs) = 328 +//line plugins/parsers/influx/machine.go.rl:104 + + err = m.handler.AddInt(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:158 + + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line + + goto _again +tr748: + ( m.cs) = 328 +//line plugins/parsers/influx/machine.go.rl:113 + + err = m.handler.AddUint(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:158 + + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line + + goto _again +tr755: + ( m.cs) = 328 +//line plugins/parsers/influx/machine.go.rl:158 + + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line + +//line plugins/parsers/influx/machine.go.rl:131 + + err = m.handler.AddBool(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + + goto _again +tr900: + ( m.cs) = 328 +//line plugins/parsers/influx/machine.go.rl:131 + + err = m.handler.AddBool(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:158 + + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line + + goto _again + st328: +//line plugins/parsers/influx/machine.go.rl:164 + + m.finishMetric = true + ( m.cs) = 740; {( m.p)++; goto _out } if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof317 + goto _test_eof328 } - st_case_317: -//line plugins/parsers/influx/machine.go:8126 + st_case_328: +//line plugins/parsers/influx/machine.go:8410 switch ( m.data)[( m.p)] { case 9: - goto st166 + goto st165 case 10: - goto st7 + goto tr29 case 11: goto tr339 case 12: - goto st9 - case 13: goto st8 + case 13: + goto st7 case 32: - goto st166 + goto st165 case 34: goto tr118 case 35: @@ -8147,28 +8431,28 @@ tr736: } goto tr337 tr337: -//line plugins/parsers/influx/machine.go.rl:19 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p - goto st55 - st55: + goto st54 + st54: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof55 + goto _test_eof54 } - st_case_55: -//line plugins/parsers/influx/machine.go:8161 + st_case_54: +//line plugins/parsers/influx/machine.go:8445 switch ( m.data)[( m.p)] { case 9: goto tr180 case 10: - goto st7 + goto tr29 case 11: goto tr181 case 12: goto tr1 case 13: - goto st8 + goto st7 case 32: goto tr180 case 34: @@ -8176,39 +8460,39 @@ tr337: case 44: goto tr182 case 92: - goto st157 + goto st156 } - goto st55 + goto st54 tr181: - ( m.cs) = 56 -//line plugins/parsers/influx/machine.go.rl:77 + ( m.cs) = 55 +//line plugins/parsers/influx/machine.go.rl:78 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again - st56: + st55: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof56 + goto _test_eof55 } - st_case_56: -//line plugins/parsers/influx/machine.go:8201 + st_case_55: +//line plugins/parsers/influx/machine.go:8485 switch ( m.data)[( m.p)] { case 9: goto tr180 case 10: - goto st7 + goto tr29 case 11: goto tr185 case 12: goto tr1 case 13: - goto st8 + goto st7 case 32: goto tr180 case 34: @@ -8216,34 +8500,34 @@ tr181: case 44: goto tr182 case 61: - goto st55 + goto st54 case 92: goto tr186 } goto tr184 tr184: -//line plugins/parsers/influx/machine.go.rl:19 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p - goto st57 - st57: + goto st56 + st56: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof57 + goto _test_eof56 } - st_case_57: -//line plugins/parsers/influx/machine.go:8236 + st_case_56: +//line plugins/parsers/influx/machine.go:8520 switch ( m.data)[( m.p)] { case 9: goto tr180 case 10: - goto st7 + goto tr29 case 11: goto tr188 case 12: goto tr1 case 13: - goto st8 + goto st7 case 32: goto tr180 case 34: @@ -8253,56 +8537,56 @@ tr184: case 61: goto tr189 case 92: - goto st154 + goto st153 } - goto st57 + goto st56 tr188: - ( m.cs) = 58 -//line plugins/parsers/influx/machine.go.rl:77 + ( m.cs) = 57 +//line plugins/parsers/influx/machine.go.rl:78 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again tr185: - ( m.cs) = 58 -//line plugins/parsers/influx/machine.go.rl:77 + ( m.cs) = 57 +//line plugins/parsers/influx/machine.go.rl:78 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:19 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p goto _again - st58: + st57: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof58 + goto _test_eof57 } - st_case_58: -//line plugins/parsers/influx/machine.go:8295 + st_case_57: +//line plugins/parsers/influx/machine.go:8579 switch ( m.data)[( m.p)] { case 9: goto tr180 case 10: - goto st7 + goto tr29 case 11: goto tr185 case 12: goto tr1 case 13: - goto st8 + goto st7 case 32: goto tr180 case 34: @@ -8316,63 +8600,63 @@ tr185: } goto tr184 tr182: - ( m.cs) = 59 -//line plugins/parsers/influx/machine.go.rl:77 + ( m.cs) = 58 +//line plugins/parsers/influx/machine.go.rl:78 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again tr158: - ( m.cs) = 59 -//line plugins/parsers/influx/machine.go.rl:90 + ( m.cs) = 58 +//line plugins/parsers/influx/machine.go.rl:91 - err = m.handler.AddTag(key, m.text()) + err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again tr152: - ( m.cs) = 59 -//line plugins/parsers/influx/machine.go.rl:90 + ( m.cs) = 58 +//line plugins/parsers/influx/machine.go.rl:91 - err = m.handler.AddTag(key, m.text()) + err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:19 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p goto _again - st59: + st58: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof59 + goto _test_eof58 } - st_case_59: -//line plugins/parsers/influx/machine.go:8367 + st_case_58: +//line plugins/parsers/influx/machine.go:8651 switch ( m.data)[( m.p)] { case 9: goto st6 case 10: - goto st7 + goto tr29 case 12: goto tr47 case 13: - goto st8 + goto st7 case 32: goto st6 case 34: @@ -8386,26 +8670,26 @@ tr152: } goto tr191 tr191: -//line plugins/parsers/influx/machine.go.rl:19 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p - goto st60 - st60: + goto st59 + st59: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof60 + goto _test_eof59 } - st_case_60: -//line plugins/parsers/influx/machine.go:8400 + st_case_59: +//line plugins/parsers/influx/machine.go:8684 switch ( m.data)[( m.p)] { case 9: goto st6 case 10: - goto st7 + goto tr29 case 12: goto tr47 case 13: - goto st8 + goto st7 case 32: goto st6 case 34: @@ -8415,109 +8699,109 @@ tr191: case 61: goto tr196 case 92: - goto st71 + goto st70 } - goto st60 + goto st59 tr192: - ( m.cs) = 318 -//line plugins/parsers/influx/machine.go.rl:19 + ( m.cs) = 329 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p -//line plugins/parsers/influx/machine.go.rl:139 +//line plugins/parsers/influx/machine.go.rl:140 - err = m.handler.AddString(key, m.text()) + err = m.handler.AddString(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again tr195: - ( m.cs) = 318 -//line plugins/parsers/influx/machine.go.rl:139 + ( m.cs) = 329 +//line plugins/parsers/influx/machine.go.rl:140 - err = m.handler.AddString(key, m.text()) + err = m.handler.AddString(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again - st318: + st329: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof318 + goto _test_eof329 } - st_case_318: -//line plugins/parsers/influx/machine.go:8457 + st_case_329: +//line plugins/parsers/influx/machine.go:8741 switch ( m.data)[( m.p)] { case 10: - goto st262 + goto tr103 case 11: - goto st319 + goto st330 case 13: - goto st34 + goto st33 case 32: - goto st261 + goto st272 case 44: - goto st37 + goto st36 case 61: goto tr55 case 92: - goto st25 + goto st24 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st261 + goto st272 } - goto st15 - st319: + goto st14 + st330: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof319 + goto _test_eof330 } - st_case_319: + st_case_330: switch ( m.data)[( m.p)] { case 10: - goto st262 + goto tr103 case 11: - goto st319 + goto st330 case 13: - goto st34 + goto st33 case 32: - goto st261 + goto st272 case 44: goto tr198 case 45: - goto tr525 + goto tr544 case 61: goto tr55 case 92: - goto st25 + goto st24 } switch { case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr526 + goto tr545 } case ( m.data)[( m.p)] >= 9: - goto st261 + goto st272 } - goto st15 -tr525: -//line plugins/parsers/influx/machine.go.rl:19 + goto st14 +tr544: +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p - goto st61 - st61: + goto st60 + st60: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof61 + goto _test_eof60 } - st_case_61: -//line plugins/parsers/influx/machine.go:8521 + st_case_60: +//line plugins/parsers/influx/machine.go:8805 switch ( m.data)[( m.p)] { case 32: goto tr198 @@ -8526,7 +8810,7 @@ tr525: case 61: goto tr55 case 92: - goto st25 + goto st24 } switch { case ( m.data)[( m.p)] < 12: @@ -8535,408 +8819,39 @@ tr525: } case ( m.data)[( m.p)] > 13: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st320 + goto st331 } default: goto tr198 } - goto st15 -tr526: -//line plugins/parsers/influx/machine.go.rl:19 + goto st14 +tr545: +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p - goto st320 - st320: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof320 - } - st_case_320: -//line plugins/parsers/influx/machine.go:8556 - switch ( m.data)[( m.p)] { - case 10: - goto tr451 - case 11: - goto tr527 - case 13: - goto tr453 - case 32: - goto tr450 - case 44: - goto tr198 - case 61: - goto tr55 - case 92: - goto st25 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st322 - } - case ( m.data)[( m.p)] >= 9: - goto tr450 - } - goto st15 -tr527: - ( m.cs) = 321 -//line plugins/parsers/influx/machine.go.rl:148 - - err = m.handler.SetTimestamp(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - - goto _again - st321: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof321 - } - st_case_321: -//line plugins/parsers/influx/machine.go:8600 - switch ( m.data)[( m.p)] { - case 10: - goto st262 - case 11: - goto st321 - case 13: - goto st34 - case 32: - goto st266 - case 44: - goto tr2 - case 61: - goto tr55 - case 92: - goto st25 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st266 - } - goto st15 - st322: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof322 - } - st_case_322: - switch ( m.data)[( m.p)] { - case 10: - goto tr451 - case 11: - goto tr527 - case 13: - goto tr453 - case 32: - goto tr450 - case 44: - goto tr198 - case 61: - goto tr55 - case 92: - goto st25 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st323 - } - case ( m.data)[( m.p)] >= 9: - goto tr450 - } - goto st15 - st323: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof323 - } - st_case_323: - switch ( m.data)[( m.p)] { - case 10: - goto tr451 - case 11: - goto tr527 - case 13: - goto tr453 - case 32: - goto tr450 - case 44: - goto tr198 - case 61: - goto tr55 - case 92: - goto st25 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st324 - } - case ( m.data)[( m.p)] >= 9: - goto tr450 - } - goto st15 - st324: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof324 - } - st_case_324: - switch ( m.data)[( m.p)] { - case 10: - goto tr451 - case 11: - goto tr527 - case 13: - goto tr453 - case 32: - goto tr450 - case 44: - goto tr198 - case 61: - goto tr55 - case 92: - goto st25 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st325 - } - case ( m.data)[( m.p)] >= 9: - goto tr450 - } - goto st15 - st325: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof325 - } - st_case_325: - switch ( m.data)[( m.p)] { - case 10: - goto tr451 - case 11: - goto tr527 - case 13: - goto tr453 - case 32: - goto tr450 - case 44: - goto tr198 - case 61: - goto tr55 - case 92: - goto st25 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st326 - } - case ( m.data)[( m.p)] >= 9: - goto tr450 - } - goto st15 - st326: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof326 - } - st_case_326: - switch ( m.data)[( m.p)] { - case 10: - goto tr451 - case 11: - goto tr527 - case 13: - goto tr453 - case 32: - goto tr450 - case 44: - goto tr198 - case 61: - goto tr55 - case 92: - goto st25 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st327 - } - case ( m.data)[( m.p)] >= 9: - goto tr450 - } - goto st15 - st327: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof327 - } - st_case_327: - switch ( m.data)[( m.p)] { - case 10: - goto tr451 - case 11: - goto tr527 - case 13: - goto tr453 - case 32: - goto tr450 - case 44: - goto tr198 - case 61: - goto tr55 - case 92: - goto st25 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st328 - } - case ( m.data)[( m.p)] >= 9: - goto tr450 - } - goto st15 - st328: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof328 - } - st_case_328: - switch ( m.data)[( m.p)] { - case 10: - goto tr451 - case 11: - goto tr527 - case 13: - goto tr453 - case 32: - goto tr450 - case 44: - goto tr198 - case 61: - goto tr55 - case 92: - goto st25 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st329 - } - case ( m.data)[( m.p)] >= 9: - goto tr450 - } - goto st15 - st329: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof329 - } - st_case_329: - switch ( m.data)[( m.p)] { - case 10: - goto tr451 - case 11: - goto tr527 - case 13: - goto tr453 - case 32: - goto tr450 - case 44: - goto tr198 - case 61: - goto tr55 - case 92: - goto st25 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st330 - } - case ( m.data)[( m.p)] >= 9: - goto tr450 - } - goto st15 - st330: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof330 - } - st_case_330: - switch ( m.data)[( m.p)] { - case 10: - goto tr451 - case 11: - goto tr527 - case 13: - goto tr453 - case 32: - goto tr450 - case 44: - goto tr198 - case 61: - goto tr55 - case 92: - goto st25 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st331 - } - case ( m.data)[( m.p)] >= 9: - goto tr450 - } - goto st15 + goto st331 st331: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof331 } st_case_331: +//line plugins/parsers/influx/machine.go:8840 switch ( m.data)[( m.p)] { case 10: - goto tr451 + goto tr470 case 11: - goto tr527 + goto tr546 case 13: - goto tr453 + goto tr472 case 32: - goto tr450 + goto tr469 case 44: goto tr198 case 61: goto tr55 case 92: - goto st25 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st332 - } - case ( m.data)[( m.p)] >= 9: - goto tr450 - } - goto st15 - st332: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof332 - } - st_case_332: - switch ( m.data)[( m.p)] { - case 10: - goto tr451 - case 11: - goto tr527 - case 13: - goto tr453 - case 32: - goto tr450 - case 44: - goto tr198 - case 61: - goto tr55 - case 92: - goto st25 + goto st24 } switch { case ( m.data)[( m.p)] > 12: @@ -8944,9 +8859,48 @@ tr527: goto st333 } case ( m.data)[( m.p)] >= 9: - goto tr450 + goto tr469 } - goto st15 + goto st14 +tr546: + ( m.cs) = 332 +//line plugins/parsers/influx/machine.go.rl:149 + + err = m.handler.SetTimestamp(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + + goto _again + st332: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof332 + } + st_case_332: +//line plugins/parsers/influx/machine.go:8884 + switch ( m.data)[( m.p)] { + case 10: + goto tr103 + case 11: + goto st332 + case 13: + goto st33 + case 32: + goto st277 + case 44: + goto tr2 + case 61: + goto tr55 + case 92: + goto st24 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto st277 + } + goto st14 st333: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof333 @@ -8954,19 +8908,19 @@ tr527: st_case_333: switch ( m.data)[( m.p)] { case 10: - goto tr451 + goto tr470 case 11: - goto tr527 + goto tr546 case 13: - goto tr453 + goto tr472 case 32: - goto tr450 + goto tr469 case 44: goto tr198 case 61: goto tr55 case 92: - goto st25 + goto st24 } switch { case ( m.data)[( m.p)] > 12: @@ -8974,9 +8928,9 @@ tr527: goto st334 } case ( m.data)[( m.p)] >= 9: - goto tr450 + goto tr469 } - goto st15 + goto st14 st334: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof334 @@ -8984,19 +8938,19 @@ tr527: st_case_334: switch ( m.data)[( m.p)] { case 10: - goto tr451 + goto tr470 case 11: - goto tr527 + goto tr546 case 13: - goto tr453 + goto tr472 case 32: - goto tr450 + goto tr469 case 44: goto tr198 case 61: goto tr55 case 92: - goto st25 + goto st24 } switch { case ( m.data)[( m.p)] > 12: @@ -9004,9 +8958,9 @@ tr527: goto st335 } case ( m.data)[( m.p)] >= 9: - goto tr450 + goto tr469 } - goto st15 + goto st14 st335: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof335 @@ -9014,19 +8968,19 @@ tr527: st_case_335: switch ( m.data)[( m.p)] { case 10: - goto tr451 + goto tr470 case 11: - goto tr527 + goto tr546 case 13: - goto tr453 + goto tr472 case 32: - goto tr450 + goto tr469 case 44: goto tr198 case 61: goto tr55 case 92: - goto st25 + goto st24 } switch { case ( m.data)[( m.p)] > 12: @@ -9034,9 +8988,9 @@ tr527: goto st336 } case ( m.data)[( m.p)] >= 9: - goto tr450 + goto tr469 } - goto st15 + goto st14 st336: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof336 @@ -9044,19 +8998,19 @@ tr527: st_case_336: switch ( m.data)[( m.p)] { case 10: - goto tr451 + goto tr470 case 11: - goto tr527 + goto tr546 case 13: - goto tr453 + goto tr472 case 32: - goto tr450 + goto tr469 case 44: goto tr198 case 61: goto tr55 case 92: - goto st25 + goto st24 } switch { case ( m.data)[( m.p)] > 12: @@ -9064,9 +9018,9 @@ tr527: goto st337 } case ( m.data)[( m.p)] >= 9: - goto tr450 + goto tr469 } - goto st15 + goto st14 st337: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof337 @@ -9074,19 +9028,19 @@ tr527: st_case_337: switch ( m.data)[( m.p)] { case 10: - goto tr451 + goto tr470 case 11: - goto tr527 + goto tr546 case 13: - goto tr453 + goto tr472 case 32: - goto tr450 + goto tr469 case 44: goto tr198 case 61: goto tr55 case 92: - goto st25 + goto st24 } switch { case ( m.data)[( m.p)] > 12: @@ -9094,9 +9048,9 @@ tr527: goto st338 } case ( m.data)[( m.p)] >= 9: - goto tr450 + goto tr469 } - goto st15 + goto st14 st338: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof338 @@ -9104,19 +9058,19 @@ tr527: st_case_338: switch ( m.data)[( m.p)] { case 10: - goto tr451 + goto tr470 case 11: - goto tr527 + goto tr546 case 13: - goto tr453 + goto tr472 case 32: - goto tr450 + goto tr469 case 44: goto tr198 case 61: goto tr55 case 92: - goto st25 + goto st24 } switch { case ( m.data)[( m.p)] > 12: @@ -9124,9 +9078,9 @@ tr527: goto st339 } case ( m.data)[( m.p)] >= 9: - goto tr450 + goto tr469 } - goto st15 + goto st14 st339: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof339 @@ -9134,45 +9088,375 @@ tr527: st_case_339: switch ( m.data)[( m.p)] { case 10: - goto tr451 + goto tr470 case 11: - goto tr527 + goto tr546 case 13: - goto tr453 + goto tr472 case 32: - goto tr450 + goto tr469 case 44: goto tr198 case 61: goto tr55 case 92: - goto st25 + goto st24 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st340 + } + case ( m.data)[( m.p)] >= 9: + goto tr469 + } + goto st14 + st340: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof340 + } + st_case_340: + switch ( m.data)[( m.p)] { + case 10: + goto tr470 + case 11: + goto tr546 + case 13: + goto tr472 + case 32: + goto tr469 + case 44: + goto tr198 + case 61: + goto tr55 + case 92: + goto st24 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st341 + } + case ( m.data)[( m.p)] >= 9: + goto tr469 + } + goto st14 + st341: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof341 + } + st_case_341: + switch ( m.data)[( m.p)] { + case 10: + goto tr470 + case 11: + goto tr546 + case 13: + goto tr472 + case 32: + goto tr469 + case 44: + goto tr198 + case 61: + goto tr55 + case 92: + goto st24 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st342 + } + case ( m.data)[( m.p)] >= 9: + goto tr469 + } + goto st14 + st342: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof342 + } + st_case_342: + switch ( m.data)[( m.p)] { + case 10: + goto tr470 + case 11: + goto tr546 + case 13: + goto tr472 + case 32: + goto tr469 + case 44: + goto tr198 + case 61: + goto tr55 + case 92: + goto st24 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st343 + } + case ( m.data)[( m.p)] >= 9: + goto tr469 + } + goto st14 + st343: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof343 + } + st_case_343: + switch ( m.data)[( m.p)] { + case 10: + goto tr470 + case 11: + goto tr546 + case 13: + goto tr472 + case 32: + goto tr469 + case 44: + goto tr198 + case 61: + goto tr55 + case 92: + goto st24 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st344 + } + case ( m.data)[( m.p)] >= 9: + goto tr469 + } + goto st14 + st344: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof344 + } + st_case_344: + switch ( m.data)[( m.p)] { + case 10: + goto tr470 + case 11: + goto tr546 + case 13: + goto tr472 + case 32: + goto tr469 + case 44: + goto tr198 + case 61: + goto tr55 + case 92: + goto st24 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st345 + } + case ( m.data)[( m.p)] >= 9: + goto tr469 + } + goto st14 + st345: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof345 + } + st_case_345: + switch ( m.data)[( m.p)] { + case 10: + goto tr470 + case 11: + goto tr546 + case 13: + goto tr472 + case 32: + goto tr469 + case 44: + goto tr198 + case 61: + goto tr55 + case 92: + goto st24 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st346 + } + case ( m.data)[( m.p)] >= 9: + goto tr469 + } + goto st14 + st346: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof346 + } + st_case_346: + switch ( m.data)[( m.p)] { + case 10: + goto tr470 + case 11: + goto tr546 + case 13: + goto tr472 + case 32: + goto tr469 + case 44: + goto tr198 + case 61: + goto tr55 + case 92: + goto st24 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st347 + } + case ( m.data)[( m.p)] >= 9: + goto tr469 + } + goto st14 + st347: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof347 + } + st_case_347: + switch ( m.data)[( m.p)] { + case 10: + goto tr470 + case 11: + goto tr546 + case 13: + goto tr472 + case 32: + goto tr469 + case 44: + goto tr198 + case 61: + goto tr55 + case 92: + goto st24 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st348 + } + case ( m.data)[( m.p)] >= 9: + goto tr469 + } + goto st14 + st348: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof348 + } + st_case_348: + switch ( m.data)[( m.p)] { + case 10: + goto tr470 + case 11: + goto tr546 + case 13: + goto tr472 + case 32: + goto tr469 + case 44: + goto tr198 + case 61: + goto tr55 + case 92: + goto st24 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st349 + } + case ( m.data)[( m.p)] >= 9: + goto tr469 + } + goto st14 + st349: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof349 + } + st_case_349: + switch ( m.data)[( m.p)] { + case 10: + goto tr470 + case 11: + goto tr546 + case 13: + goto tr472 + case 32: + goto tr469 + case 44: + goto tr198 + case 61: + goto tr55 + case 92: + goto st24 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st350 + } + case ( m.data)[( m.p)] >= 9: + goto tr469 + } + goto st14 + st350: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof350 + } + st_case_350: + switch ( m.data)[( m.p)] { + case 10: + goto tr470 + case 11: + goto tr546 + case 13: + goto tr472 + case 32: + goto tr469 + case 44: + goto tr198 + case 61: + goto tr55 + case 92: + goto st24 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr450 + goto tr469 } - goto st15 + goto st14 tr196: -//line plugins/parsers/influx/machine.go.rl:86 +//line plugins/parsers/influx/machine.go.rl:87 - key = m.text() + m.key = m.text() - goto st62 - st62: + goto st61 + st61: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof62 + goto _test_eof61 } - st_case_62: -//line plugins/parsers/influx/machine.go:9167 + st_case_61: +//line plugins/parsers/influx/machine.go:9451 switch ( m.data)[( m.p)] { case 9: goto st6 case 10: - goto st7 + goto tr29 case 12: goto tr47 case 13: - goto st8 + goto st7 case 32: goto st6 case 34: @@ -9186,185 +9470,185 @@ tr196: } goto tr148 tr151: - ( m.cs) = 340 -//line plugins/parsers/influx/machine.go.rl:19 + ( m.cs) = 351 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p -//line plugins/parsers/influx/machine.go.rl:139 +//line plugins/parsers/influx/machine.go.rl:140 - err = m.handler.AddString(key, m.text()) + err = m.handler.AddString(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again tr157: - ( m.cs) = 340 -//line plugins/parsers/influx/machine.go.rl:139 + ( m.cs) = 351 +//line plugins/parsers/influx/machine.go.rl:140 - err = m.handler.AddString(key, m.text()) + err = m.handler.AddString(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again - st340: + st351: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof340 + goto _test_eof351 } - st_case_340: -//line plugins/parsers/influx/machine.go:9224 + st_case_351: +//line plugins/parsers/influx/machine.go:9508 switch ( m.data)[( m.p)] { case 10: - goto st262 + goto tr103 case 11: - goto tr548 + goto tr567 case 13: - goto st34 + goto st33 case 32: - goto tr547 + goto tr566 case 44: - goto tr549 + goto tr568 case 61: goto tr132 case 92: - goto st23 + goto st22 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr547 + goto tr566 } - goto st17 -tr548: - ( m.cs) = 341 -//line plugins/parsers/influx/machine.go.rl:90 + goto st16 +tr567: + ( m.cs) = 352 +//line plugins/parsers/influx/machine.go.rl:91 - err = m.handler.AddTag(key, m.text()) + err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again -tr716: - ( m.cs) = 341 -//line plugins/parsers/influx/machine.go.rl:90 +tr735: + ( m.cs) = 352 +//line plugins/parsers/influx/machine.go.rl:91 - err = m.handler.AddTag(key, m.text()) + err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:121 +//line plugins/parsers/influx/machine.go.rl:122 - err = m.handler.AddFloat(key, m.text()) + err = m.handler.AddFloat(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again -tr926: - ( m.cs) = 341 -//line plugins/parsers/influx/machine.go.rl:90 +tr953: + ( m.cs) = 352 +//line plugins/parsers/influx/machine.go.rl:91 - err = m.handler.AddTag(key, m.text()) + err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:103 +//line plugins/parsers/influx/machine.go.rl:104 - err = m.handler.AddInt(key, m.text()) + err = m.handler.AddInt(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again -tr931: - ( m.cs) = 341 -//line plugins/parsers/influx/machine.go.rl:90 +tr958: + ( m.cs) = 352 +//line plugins/parsers/influx/machine.go.rl:91 - err = m.handler.AddTag(key, m.text()) + err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:112 +//line plugins/parsers/influx/machine.go.rl:113 - err = m.handler.AddUint(key, m.text()) + err = m.handler.AddUint(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again -tr936: - ( m.cs) = 341 -//line plugins/parsers/influx/machine.go.rl:90 +tr963: + ( m.cs) = 352 +//line plugins/parsers/influx/machine.go.rl:91 - err = m.handler.AddTag(key, m.text()) + err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:130 +//line plugins/parsers/influx/machine.go.rl:131 - err = m.handler.AddBool(key, m.text()) + err = m.handler.AddBool(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again - st341: + st352: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof341 + goto _test_eof352 } - st_case_341: -//line plugins/parsers/influx/machine.go:9355 + st_case_352: +//line plugins/parsers/influx/machine.go:9639 switch ( m.data)[( m.p)] { case 10: - goto st262 + goto tr103 case 11: - goto tr550 + goto tr569 case 13: - goto st34 + goto st33 case 32: - goto tr547 + goto tr566 case 44: goto tr62 case 45: - goto tr551 + goto tr570 case 61: goto tr132 case 92: @@ -9373,61 +9657,61 @@ tr936: switch { case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr552 + goto tr571 } case ( m.data)[( m.p)] >= 9: - goto tr547 + goto tr566 } goto tr64 -tr575: - ( m.cs) = 342 -//line plugins/parsers/influx/machine.go.rl:90 +tr594: + ( m.cs) = 353 +//line plugins/parsers/influx/machine.go.rl:91 - err = m.handler.AddTag(key, m.text()) + err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again -tr550: - ( m.cs) = 342 -//line plugins/parsers/influx/machine.go.rl:90 +tr569: + ( m.cs) = 353 +//line plugins/parsers/influx/machine.go.rl:91 - err = m.handler.AddTag(key, m.text()) + err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:19 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p goto _again - st342: + st353: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof342 + goto _test_eof353 } - st_case_342: -//line plugins/parsers/influx/machine.go:9418 + st_case_353: +//line plugins/parsers/influx/machine.go:9702 switch ( m.data)[( m.p)] { case 10: - goto st262 + goto tr103 case 11: - goto tr550 + goto tr569 case 13: - goto st34 + goto st33 case 32: - goto tr547 + goto tr566 case 44: goto tr62 case 45: - goto tr551 + goto tr570 case 61: goto tr12 case 92: @@ -9436,24 +9720,24 @@ tr550: switch { case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr552 + goto tr571 } case ( m.data)[( m.p)] >= 9: - goto tr547 + goto tr566 } goto tr64 -tr551: -//line plugins/parsers/influx/machine.go.rl:19 +tr570: +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p - goto st63 - st63: + goto st62 + st62: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof63 + goto _test_eof62 } - st_case_63: -//line plugins/parsers/influx/machine.go:9457 + st_case_62: +//line plugins/parsers/influx/machine.go:9741 switch ( m.data)[( m.p)] { case 10: goto tr132 @@ -9468,109 +9752,109 @@ tr551: case 61: goto tr12 case 92: - goto st21 + goto st20 } switch { case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st343 + goto st354 } case ( m.data)[( m.p)] >= 9: goto tr60 } - goto st19 -tr552: -//line plugins/parsers/influx/machine.go.rl:19 + goto st18 +tr571: +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p - goto st343 - st343: + goto st354 + st354: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof343 + goto _test_eof354 } - st_case_343: -//line plugins/parsers/influx/machine.go:9494 + st_case_354: +//line plugins/parsers/influx/machine.go:9778 switch ( m.data)[( m.p)] { case 10: - goto tr451 + goto tr470 case 11: - goto tr554 + goto tr573 case 13: - goto tr453 + goto tr472 case 32: - goto tr553 + goto tr572 case 44: goto tr62 case 61: goto tr12 case 92: - goto st21 + goto st20 } switch { case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st345 + goto st356 } case ( m.data)[( m.p)] >= 9: - goto tr553 + goto tr572 } - goto st19 -tr557: - ( m.cs) = 344 -//line plugins/parsers/influx/machine.go.rl:90 + goto st18 +tr576: + ( m.cs) = 355 +//line plugins/parsers/influx/machine.go.rl:91 - err = m.handler.AddTag(key, m.text()) + err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:19 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p goto _again -tr554: - ( m.cs) = 344 -//line plugins/parsers/influx/machine.go.rl:90 +tr573: + ( m.cs) = 355 +//line plugins/parsers/influx/machine.go.rl:91 - err = m.handler.AddTag(key, m.text()) + err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:148 +//line plugins/parsers/influx/machine.go.rl:149 err = m.handler.SetTimestamp(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again - st344: + st355: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof344 + goto _test_eof355 } - st_case_344: -//line plugins/parsers/influx/machine.go:9565 + st_case_355: +//line plugins/parsers/influx/machine.go:9849 switch ( m.data)[( m.p)] { case 10: - goto st262 + goto tr103 case 11: - goto tr557 + goto tr576 case 13: - goto st34 + goto st33 case 32: - goto tr556 + goto tr575 case 44: goto tr62 case 61: @@ -9579,339 +9863,9 @@ tr554: goto tr66 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr556 + goto tr575 } goto tr64 - st345: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof345 - } - st_case_345: - switch ( m.data)[( m.p)] { - case 10: - goto tr451 - case 11: - goto tr554 - case 13: - goto tr453 - case 32: - goto tr553 - case 44: - goto tr62 - case 61: - goto tr12 - case 92: - goto st21 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st346 - } - case ( m.data)[( m.p)] >= 9: - goto tr553 - } - goto st19 - st346: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof346 - } - st_case_346: - switch ( m.data)[( m.p)] { - case 10: - goto tr451 - case 11: - goto tr554 - case 13: - goto tr453 - case 32: - goto tr553 - case 44: - goto tr62 - case 61: - goto tr12 - case 92: - goto st21 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st347 - } - case ( m.data)[( m.p)] >= 9: - goto tr553 - } - goto st19 - st347: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof347 - } - st_case_347: - switch ( m.data)[( m.p)] { - case 10: - goto tr451 - case 11: - goto tr554 - case 13: - goto tr453 - case 32: - goto tr553 - case 44: - goto tr62 - case 61: - goto tr12 - case 92: - goto st21 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st348 - } - case ( m.data)[( m.p)] >= 9: - goto tr553 - } - goto st19 - st348: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof348 - } - st_case_348: - switch ( m.data)[( m.p)] { - case 10: - goto tr451 - case 11: - goto tr554 - case 13: - goto tr453 - case 32: - goto tr553 - case 44: - goto tr62 - case 61: - goto tr12 - case 92: - goto st21 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st349 - } - case ( m.data)[( m.p)] >= 9: - goto tr553 - } - goto st19 - st349: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof349 - } - st_case_349: - switch ( m.data)[( m.p)] { - case 10: - goto tr451 - case 11: - goto tr554 - case 13: - goto tr453 - case 32: - goto tr553 - case 44: - goto tr62 - case 61: - goto tr12 - case 92: - goto st21 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st350 - } - case ( m.data)[( m.p)] >= 9: - goto tr553 - } - goto st19 - st350: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof350 - } - st_case_350: - switch ( m.data)[( m.p)] { - case 10: - goto tr451 - case 11: - goto tr554 - case 13: - goto tr453 - case 32: - goto tr553 - case 44: - goto tr62 - case 61: - goto tr12 - case 92: - goto st21 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st351 - } - case ( m.data)[( m.p)] >= 9: - goto tr553 - } - goto st19 - st351: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof351 - } - st_case_351: - switch ( m.data)[( m.p)] { - case 10: - goto tr451 - case 11: - goto tr554 - case 13: - goto tr453 - case 32: - goto tr553 - case 44: - goto tr62 - case 61: - goto tr12 - case 92: - goto st21 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st352 - } - case ( m.data)[( m.p)] >= 9: - goto tr553 - } - goto st19 - st352: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof352 - } - st_case_352: - switch ( m.data)[( m.p)] { - case 10: - goto tr451 - case 11: - goto tr554 - case 13: - goto tr453 - case 32: - goto tr553 - case 44: - goto tr62 - case 61: - goto tr12 - case 92: - goto st21 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st353 - } - case ( m.data)[( m.p)] >= 9: - goto tr553 - } - goto st19 - st353: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof353 - } - st_case_353: - switch ( m.data)[( m.p)] { - case 10: - goto tr451 - case 11: - goto tr554 - case 13: - goto tr453 - case 32: - goto tr553 - case 44: - goto tr62 - case 61: - goto tr12 - case 92: - goto st21 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st354 - } - case ( m.data)[( m.p)] >= 9: - goto tr553 - } - goto st19 - st354: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof354 - } - st_case_354: - switch ( m.data)[( m.p)] { - case 10: - goto tr451 - case 11: - goto tr554 - case 13: - goto tr453 - case 32: - goto tr553 - case 44: - goto tr62 - case 61: - goto tr12 - case 92: - goto st21 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st355 - } - case ( m.data)[( m.p)] >= 9: - goto tr553 - } - goto st19 - st355: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof355 - } - st_case_355: - switch ( m.data)[( m.p)] { - case 10: - goto tr451 - case 11: - goto tr554 - case 13: - goto tr453 - case 32: - goto tr553 - case 44: - goto tr62 - case 61: - goto tr12 - case 92: - goto st21 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st356 - } - case ( m.data)[( m.p)] >= 9: - goto tr553 - } - goto st19 st356: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof356 @@ -9919,19 +9873,19 @@ tr554: st_case_356: switch ( m.data)[( m.p)] { case 10: - goto tr451 + goto tr470 case 11: - goto tr554 + goto tr573 case 13: - goto tr453 + goto tr472 case 32: - goto tr553 + goto tr572 case 44: goto tr62 case 61: goto tr12 case 92: - goto st21 + goto st20 } switch { case ( m.data)[( m.p)] > 12: @@ -9939,9 +9893,9 @@ tr554: goto st357 } case ( m.data)[( m.p)] >= 9: - goto tr553 + goto tr572 } - goto st19 + goto st18 st357: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof357 @@ -9949,19 +9903,19 @@ tr554: st_case_357: switch ( m.data)[( m.p)] { case 10: - goto tr451 + goto tr470 case 11: - goto tr554 + goto tr573 case 13: - goto tr453 + goto tr472 case 32: - goto tr553 + goto tr572 case 44: goto tr62 case 61: goto tr12 case 92: - goto st21 + goto st20 } switch { case ( m.data)[( m.p)] > 12: @@ -9969,9 +9923,9 @@ tr554: goto st358 } case ( m.data)[( m.p)] >= 9: - goto tr553 + goto tr572 } - goto st19 + goto st18 st358: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof358 @@ -9979,19 +9933,19 @@ tr554: st_case_358: switch ( m.data)[( m.p)] { case 10: - goto tr451 + goto tr470 case 11: - goto tr554 + goto tr573 case 13: - goto tr453 + goto tr472 case 32: - goto tr553 + goto tr572 case 44: goto tr62 case 61: goto tr12 case 92: - goto st21 + goto st20 } switch { case ( m.data)[( m.p)] > 12: @@ -9999,9 +9953,9 @@ tr554: goto st359 } case ( m.data)[( m.p)] >= 9: - goto tr553 + goto tr572 } - goto st19 + goto st18 st359: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof359 @@ -10009,19 +9963,19 @@ tr554: st_case_359: switch ( m.data)[( m.p)] { case 10: - goto tr451 + goto tr470 case 11: - goto tr554 + goto tr573 case 13: - goto tr453 + goto tr472 case 32: - goto tr553 + goto tr572 case 44: goto tr62 case 61: goto tr12 case 92: - goto st21 + goto st20 } switch { case ( m.data)[( m.p)] > 12: @@ -10029,9 +9983,9 @@ tr554: goto st360 } case ( m.data)[( m.p)] >= 9: - goto tr553 + goto tr572 } - goto st19 + goto st18 st360: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof360 @@ -10039,19 +9993,19 @@ tr554: st_case_360: switch ( m.data)[( m.p)] { case 10: - goto tr451 + goto tr470 case 11: - goto tr554 + goto tr573 case 13: - goto tr453 + goto tr472 case 32: - goto tr553 + goto tr572 case 44: goto tr62 case 61: goto tr12 case 92: - goto st21 + goto st20 } switch { case ( m.data)[( m.p)] > 12: @@ -10059,9 +10013,9 @@ tr554: goto st361 } case ( m.data)[( m.p)] >= 9: - goto tr553 + goto tr572 } - goto st19 + goto st18 st361: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof361 @@ -10069,19 +10023,19 @@ tr554: st_case_361: switch ( m.data)[( m.p)] { case 10: - goto tr451 + goto tr470 case 11: - goto tr554 + goto tr573 case 13: - goto tr453 + goto tr472 case 32: - goto tr553 + goto tr572 case 44: goto tr62 case 61: goto tr12 case 92: - goto st21 + goto st20 } switch { case ( m.data)[( m.p)] > 12: @@ -10089,9 +10043,9 @@ tr554: goto st362 } case ( m.data)[( m.p)] >= 9: - goto tr553 + goto tr572 } - goto st19 + goto st18 st362: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof362 @@ -10099,41 +10053,371 @@ tr554: st_case_362: switch ( m.data)[( m.p)] { case 10: - goto tr451 + goto tr470 case 11: - goto tr554 + goto tr573 case 13: - goto tr453 + goto tr472 case 32: - goto tr553 + goto tr572 case 44: goto tr62 case 61: goto tr12 case 92: - goto st21 + goto st20 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st363 + } + case ( m.data)[( m.p)] >= 9: + goto tr572 + } + goto st18 + st363: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof363 + } + st_case_363: + switch ( m.data)[( m.p)] { + case 10: + goto tr470 + case 11: + goto tr573 + case 13: + goto tr472 + case 32: + goto tr572 + case 44: + goto tr62 + case 61: + goto tr12 + case 92: + goto st20 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st364 + } + case ( m.data)[( m.p)] >= 9: + goto tr572 + } + goto st18 + st364: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof364 + } + st_case_364: + switch ( m.data)[( m.p)] { + case 10: + goto tr470 + case 11: + goto tr573 + case 13: + goto tr472 + case 32: + goto tr572 + case 44: + goto tr62 + case 61: + goto tr12 + case 92: + goto st20 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st365 + } + case ( m.data)[( m.p)] >= 9: + goto tr572 + } + goto st18 + st365: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof365 + } + st_case_365: + switch ( m.data)[( m.p)] { + case 10: + goto tr470 + case 11: + goto tr573 + case 13: + goto tr472 + case 32: + goto tr572 + case 44: + goto tr62 + case 61: + goto tr12 + case 92: + goto st20 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st366 + } + case ( m.data)[( m.p)] >= 9: + goto tr572 + } + goto st18 + st366: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof366 + } + st_case_366: + switch ( m.data)[( m.p)] { + case 10: + goto tr470 + case 11: + goto tr573 + case 13: + goto tr472 + case 32: + goto tr572 + case 44: + goto tr62 + case 61: + goto tr12 + case 92: + goto st20 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st367 + } + case ( m.data)[( m.p)] >= 9: + goto tr572 + } + goto st18 + st367: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof367 + } + st_case_367: + switch ( m.data)[( m.p)] { + case 10: + goto tr470 + case 11: + goto tr573 + case 13: + goto tr472 + case 32: + goto tr572 + case 44: + goto tr62 + case 61: + goto tr12 + case 92: + goto st20 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st368 + } + case ( m.data)[( m.p)] >= 9: + goto tr572 + } + goto st18 + st368: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof368 + } + st_case_368: + switch ( m.data)[( m.p)] { + case 10: + goto tr470 + case 11: + goto tr573 + case 13: + goto tr472 + case 32: + goto tr572 + case 44: + goto tr62 + case 61: + goto tr12 + case 92: + goto st20 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st369 + } + case ( m.data)[( m.p)] >= 9: + goto tr572 + } + goto st18 + st369: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof369 + } + st_case_369: + switch ( m.data)[( m.p)] { + case 10: + goto tr470 + case 11: + goto tr573 + case 13: + goto tr472 + case 32: + goto tr572 + case 44: + goto tr62 + case 61: + goto tr12 + case 92: + goto st20 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st370 + } + case ( m.data)[( m.p)] >= 9: + goto tr572 + } + goto st18 + st370: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof370 + } + st_case_370: + switch ( m.data)[( m.p)] { + case 10: + goto tr470 + case 11: + goto tr573 + case 13: + goto tr472 + case 32: + goto tr572 + case 44: + goto tr62 + case 61: + goto tr12 + case 92: + goto st20 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st371 + } + case ( m.data)[( m.p)] >= 9: + goto tr572 + } + goto st18 + st371: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof371 + } + st_case_371: + switch ( m.data)[( m.p)] { + case 10: + goto tr470 + case 11: + goto tr573 + case 13: + goto tr472 + case 32: + goto tr572 + case 44: + goto tr62 + case 61: + goto tr12 + case 92: + goto st20 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st372 + } + case ( m.data)[( m.p)] >= 9: + goto tr572 + } + goto st18 + st372: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof372 + } + st_case_372: + switch ( m.data)[( m.p)] { + case 10: + goto tr470 + case 11: + goto tr573 + case 13: + goto tr472 + case 32: + goto tr572 + case 44: + goto tr62 + case 61: + goto tr12 + case 92: + goto st20 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st373 + } + case ( m.data)[( m.p)] >= 9: + goto tr572 + } + goto st18 + st373: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof373 + } + st_case_373: + switch ( m.data)[( m.p)] { + case 10: + goto tr470 + case 11: + goto tr573 + case 13: + goto tr472 + case 32: + goto tr572 + case 44: + goto tr62 + case 61: + goto tr12 + case 92: + goto st20 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr553 + goto tr572 } - goto st19 + goto st18 tr153: -//line plugins/parsers/influx/machine.go.rl:19 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p - goto st64 - st64: + goto st63 + st63: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof64 + goto _test_eof63 } - st_case_64: -//line plugins/parsers/influx/machine.go:10132 + st_case_63: +//line plugins/parsers/influx/machine.go:10416 switch ( m.data)[( m.p)] { case 34: - goto st49 + goto st48 case 92: - goto st65 + goto st64 } switch { case ( m.data)[( m.p)] > 10: @@ -10143,27 +10427,27 @@ tr153: case ( m.data)[( m.p)] >= 9: goto tr47 } - goto st17 - st65: -//line plugins/parsers/influx/machine.go.rl:234 + goto st16 + st64: +//line plugins/parsers/influx/machine.go.rl:240 ( m.p)-- if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof65 + goto _test_eof64 } - st_case_65: -//line plugins/parsers/influx/machine.go:10156 + st_case_64: +//line plugins/parsers/influx/machine.go:10440 switch ( m.data)[( m.p)] { case 9: goto tr155 case 10: - goto st7 + goto tr29 case 11: goto tr156 case 12: goto tr60 case 13: - goto st8 + goto st7 case 32: goto tr155 case 34: @@ -10173,56 +10457,56 @@ tr153: case 61: goto st6 case 92: - goto st64 + goto st63 } - goto st49 + goto st48 tr156: - ( m.cs) = 66 -//line plugins/parsers/influx/machine.go.rl:90 + ( m.cs) = 65 +//line plugins/parsers/influx/machine.go.rl:91 - err = m.handler.AddTag(key, m.text()) + err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again tr150: - ( m.cs) = 66 -//line plugins/parsers/influx/machine.go.rl:90 + ( m.cs) = 65 +//line plugins/parsers/influx/machine.go.rl:91 - err = m.handler.AddTag(key, m.text()) + err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:19 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p goto _again - st66: + st65: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof66 + goto _test_eof65 } - st_case_66: -//line plugins/parsers/influx/machine.go:10215 + st_case_65: +//line plugins/parsers/influx/machine.go:10499 switch ( m.data)[( m.p)] { case 9: goto tr155 case 10: - goto st7 + goto tr29 case 11: goto tr203 case 12: goto tr60 case 13: - goto st8 + goto st7 case 32: goto tr155 case 34: @@ -10236,28 +10520,28 @@ tr150: } goto tr202 tr202: -//line plugins/parsers/influx/machine.go.rl:19 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p - goto st67 - st67: + goto st66 + st66: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof67 + goto _test_eof66 } - st_case_67: -//line plugins/parsers/influx/machine.go:10250 + st_case_66: +//line plugins/parsers/influx/machine.go:10534 switch ( m.data)[( m.p)] { case 9: goto tr155 case 10: - goto st7 + goto tr29 case 11: goto tr207 case 12: goto tr60 case 13: - goto st8 + goto st7 case 32: goto tr155 case 34: @@ -10267,56 +10551,56 @@ tr202: case 61: goto tr165 case 92: - goto st69 + goto st68 } - goto st67 + goto st66 tr207: - ( m.cs) = 68 -//line plugins/parsers/influx/machine.go.rl:90 + ( m.cs) = 67 +//line plugins/parsers/influx/machine.go.rl:91 - err = m.handler.AddTag(key, m.text()) + err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again tr203: - ( m.cs) = 68 -//line plugins/parsers/influx/machine.go.rl:90 + ( m.cs) = 67 +//line plugins/parsers/influx/machine.go.rl:91 - err = m.handler.AddTag(key, m.text()) + err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:19 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p goto _again - st68: + st67: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof68 + goto _test_eof67 } - st_case_68: -//line plugins/parsers/influx/machine.go:10309 + st_case_67: +//line plugins/parsers/influx/machine.go:10593 switch ( m.data)[( m.p)] { case 9: goto tr155 case 10: - goto st7 + goto tr29 case 11: goto tr203 case 12: goto tr60 case 13: - goto st8 + goto st7 case 32: goto tr155 case 34: @@ -10330,78 +10614,78 @@ tr203: } goto tr202 tr204: - ( m.cs) = 363 -//line plugins/parsers/influx/machine.go.rl:19 + ( m.cs) = 374 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p -//line plugins/parsers/influx/machine.go.rl:139 +//line plugins/parsers/influx/machine.go.rl:140 - err = m.handler.AddString(key, m.text()) + err = m.handler.AddString(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again tr208: - ( m.cs) = 363 -//line plugins/parsers/influx/machine.go.rl:139 + ( m.cs) = 374 +//line plugins/parsers/influx/machine.go.rl:140 - err = m.handler.AddString(key, m.text()) + err = m.handler.AddString(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again - st363: + st374: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof363 + goto _test_eof374 } - st_case_363: -//line plugins/parsers/influx/machine.go:10368 + st_case_374: +//line plugins/parsers/influx/machine.go:10652 switch ( m.data)[( m.p)] { case 10: - goto st262 + goto tr103 case 11: - goto tr575 + goto tr594 case 13: - goto st34 + goto st33 case 32: - goto tr547 + goto tr566 case 44: - goto tr549 + goto tr568 case 61: goto tr12 case 92: - goto st21 + goto st20 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr547 + goto tr566 } - goto st19 + goto st18 tr205: -//line plugins/parsers/influx/machine.go.rl:19 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p - goto st69 - st69: + goto st68 + st68: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof69 + goto _test_eof68 } - st_case_69: -//line plugins/parsers/influx/machine.go:10400 + st_case_68: +//line plugins/parsers/influx/machine.go:10684 switch ( m.data)[( m.p)] { case 34: - goto st67 + goto st66 case 92: - goto st70 + goto st69 } switch { case ( m.data)[( m.p)] > 10: @@ -10411,27 +10695,27 @@ tr205: case ( m.data)[( m.p)] >= 9: goto tr47 } - goto st19 - st70: -//line plugins/parsers/influx/machine.go.rl:234 + goto st18 + st69: +//line plugins/parsers/influx/machine.go.rl:240 ( m.p)-- if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof70 + goto _test_eof69 } - st_case_70: -//line plugins/parsers/influx/machine.go:10424 + st_case_69: +//line plugins/parsers/influx/machine.go:10708 switch ( m.data)[( m.p)] { case 9: goto tr155 case 10: - goto st7 + goto tr29 case 11: goto tr207 case 12: goto tr60 case 13: - goto st8 + goto st7 case 32: goto tr155 case 34: @@ -10441,26 +10725,26 @@ tr205: case 61: goto tr165 case 92: - goto st69 + goto st68 } - goto st67 + goto st66 tr193: -//line plugins/parsers/influx/machine.go.rl:19 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p - goto st71 - st71: + goto st70 + st70: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof71 + goto _test_eof70 } - st_case_71: -//line plugins/parsers/influx/machine.go:10459 + st_case_70: +//line plugins/parsers/influx/machine.go:10743 switch ( m.data)[( m.p)] { case 34: - goto st60 + goto st59 case 92: - goto st72 + goto st71 } switch { case ( m.data)[( m.p)] > 10: @@ -10470,25 +10754,25 @@ tr193: case ( m.data)[( m.p)] >= 9: goto tr47 } - goto st15 - st72: -//line plugins/parsers/influx/machine.go.rl:234 + goto st14 + st71: +//line plugins/parsers/influx/machine.go.rl:240 ( m.p)-- if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof72 + goto _test_eof71 } - st_case_72: -//line plugins/parsers/influx/machine.go:10483 + st_case_71: +//line plugins/parsers/influx/machine.go:10767 switch ( m.data)[( m.p)] { case 9: goto st6 case 10: - goto st7 + goto tr29 case 12: goto tr47 case 13: - goto st8 + goto st7 case 32: goto st6 case 34: @@ -10498,42 +10782,42 @@ tr193: case 61: goto tr196 case 92: - goto st71 + goto st70 } - goto st60 + goto st59 tr189: -//line plugins/parsers/influx/machine.go.rl:99 +//line plugins/parsers/influx/machine.go.rl:100 - key = m.text() + m.key = m.text() - goto st73 + goto st72 tr346: -//line plugins/parsers/influx/machine.go.rl:19 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p -//line plugins/parsers/influx/machine.go.rl:99 +//line plugins/parsers/influx/machine.go.rl:100 - key = m.text() + m.key = m.text() - goto st73 - st73: + goto st72 + st72: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof73 + goto _test_eof72 } - st_case_73: -//line plugins/parsers/influx/machine.go:10526 + st_case_72: +//line plugins/parsers/influx/machine.go:10810 switch ( m.data)[( m.p)] { case 9: goto tr180 case 10: - goto st7 + goto tr29 case 11: goto tr181 case 12: goto tr1 case 13: - goto st8 + goto st7 case 32: goto tr180 case 34: @@ -10551,7 +10835,7 @@ tr346: case 84: goto tr218 case 92: - goto st157 + goto st156 case 102: goto tr219 case 116: @@ -10560,431 +10844,542 @@ tr346: if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto tr216 } - goto st55 + goto st54 tr212: - ( m.cs) = 364 -//line plugins/parsers/influx/machine.go.rl:139 + ( m.cs) = 375 +//line plugins/parsers/influx/machine.go.rl:140 - err = m.handler.AddString(key, m.text()) + err = m.handler.AddString(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again - st364: + st375: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof364 + goto _test_eof375 } - st_case_364: -//line plugins/parsers/influx/machine.go:10583 + st_case_375: +//line plugins/parsers/influx/machine.go:10867 switch ( m.data)[( m.p)] { case 9: - goto tr576 + goto tr595 case 10: - goto tr475 + goto tr494 case 11: - goto tr577 + goto tr596 case 12: - goto tr482 + goto tr501 case 13: - goto tr476 + goto tr495 case 32: - goto tr576 + goto tr595 case 34: goto tr85 case 44: - goto tr578 + goto tr597 case 92: goto tr87 } goto tr82 -tr607: - ( m.cs) = 365 -//line plugins/parsers/influx/machine.go.rl:77 +tr626: + ( m.cs) = 376 +//line plugins/parsers/influx/machine.go.rl:78 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again -tr576: - ( m.cs) = 365 -//line plugins/parsers/influx/machine.go.rl:77 +tr595: + ( m.cs) = 376 +//line plugins/parsers/influx/machine.go.rl:78 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:19 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p goto _again -tr749: - ( m.cs) = 365 -//line plugins/parsers/influx/machine.go.rl:90 +tr769: + ( m.cs) = 376 +//line plugins/parsers/influx/machine.go.rl:91 - err = m.handler.AddTag(key, m.text()) + err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again -tr619: - ( m.cs) = 365 -//line plugins/parsers/influx/machine.go.rl:77 +tr638: + ( m.cs) = 376 +//line plugins/parsers/influx/machine.go.rl:78 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:121 +//line plugins/parsers/influx/machine.go.rl:122 - err = m.handler.AddFloat(key, m.text()) + err = m.handler.AddFloat(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again -tr745: - ( m.cs) = 365 -//line plugins/parsers/influx/machine.go.rl:90 +tr764: + ( m.cs) = 376 +//line plugins/parsers/influx/machine.go.rl:91 - err = m.handler.AddTag(key, m.text()) + err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:121 +//line plugins/parsers/influx/machine.go.rl:122 - err = m.handler.AddFloat(key, m.text()) + err = m.handler.AddFloat(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again -tr777: - ( m.cs) = 365 -//line plugins/parsers/influx/machine.go.rl:90 +tr797: + ( m.cs) = 376 +//line plugins/parsers/influx/machine.go.rl:91 - err = m.handler.AddTag(key, m.text()) + err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:103 +//line plugins/parsers/influx/machine.go.rl:104 - err = m.handler.AddInt(key, m.text()) + err = m.handler.AddInt(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again -tr783: - ( m.cs) = 365 -//line plugins/parsers/influx/machine.go.rl:90 +tr803: + ( m.cs) = 376 +//line plugins/parsers/influx/machine.go.rl:91 - err = m.handler.AddTag(key, m.text()) + err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:112 +//line plugins/parsers/influx/machine.go.rl:113 - err = m.handler.AddUint(key, m.text()) + err = m.handler.AddUint(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again -tr789: - ( m.cs) = 365 -//line plugins/parsers/influx/machine.go.rl:90 +tr809: + ( m.cs) = 376 +//line plugins/parsers/influx/machine.go.rl:91 - err = m.handler.AddTag(key, m.text()) + err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:130 +//line plugins/parsers/influx/machine.go.rl:131 - err = m.handler.AddBool(key, m.text()) + err = m.handler.AddBool(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again -tr802: - ( m.cs) = 365 -//line plugins/parsers/influx/machine.go.rl:77 +tr822: + ( m.cs) = 376 +//line plugins/parsers/influx/machine.go.rl:78 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:103 +//line plugins/parsers/influx/machine.go.rl:104 - err = m.handler.AddInt(key, m.text()) + err = m.handler.AddInt(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again -tr807: - ( m.cs) = 365 -//line plugins/parsers/influx/machine.go.rl:77 +tr828: + ( m.cs) = 376 +//line plugins/parsers/influx/machine.go.rl:78 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:112 +//line plugins/parsers/influx/machine.go.rl:113 - err = m.handler.AddUint(key, m.text()) + err = m.handler.AddUint(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again -tr812: - ( m.cs) = 365 -//line plugins/parsers/influx/machine.go.rl:77 +tr834: + ( m.cs) = 376 +//line plugins/parsers/influx/machine.go.rl:78 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:130 +//line plugins/parsers/influx/machine.go.rl:131 - err = m.handler.AddBool(key, m.text()) + err = m.handler.AddBool(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again - st365: + st376: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof365 + goto _test_eof376 } - st_case_365: -//line plugins/parsers/influx/machine.go:10837 + st_case_376: +//line plugins/parsers/influx/machine.go:11121 switch ( m.data)[( m.p)] { case 9: - goto st365 + goto st376 case 10: - goto st288 + goto tr221 case 11: - goto tr580 + goto tr599 case 12: - goto st290 + goto st301 case 13: - goto st74 + goto st73 case 32: - goto st365 + goto st376 case 34: goto tr97 case 44: goto st6 case 45: - goto tr581 + goto tr600 case 61: goto st6 case 92: goto tr98 } if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr582 + goto tr601 } goto tr94 -tr580: -//line plugins/parsers/influx/machine.go.rl:19 +tr599: +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p - goto st366 - st366: + goto st377 + st377: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof366 + goto _test_eof377 } - st_case_366: -//line plugins/parsers/influx/machine.go:10877 + st_case_377: +//line plugins/parsers/influx/machine.go:11161 switch ( m.data)[( m.p)] { case 9: - goto st365 + goto st376 case 10: - goto st288 + goto tr221 case 11: - goto tr580 + goto tr599 case 12: - goto st290 + goto st301 case 13: - goto st74 + goto st73 case 32: - goto st365 + goto st376 case 34: goto tr97 case 44: goto st6 case 45: - goto tr581 + goto tr600 case 61: goto tr101 case 92: goto tr98 } if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr582 + goto tr601 } goto tr94 -tr476: -//line plugins/parsers/influx/machine.go.rl:19 +tr495: +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p - goto st74 -tr586: - ( m.cs) = 74 -//line plugins/parsers/influx/machine.go.rl:148 + goto st73 +tr605: + ( m.cs) = 73 +//line plugins/parsers/influx/machine.go.rl:149 err = m.handler.SetTimestamp(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again -tr623: - ( m.cs) = 74 -//line plugins/parsers/influx/machine.go.rl:121 +tr642: + ( m.cs) = 73 +//line plugins/parsers/influx/machine.go.rl:122 - err = m.handler.AddFloat(key, m.text()) + err = m.handler.AddFloat(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again -tr780: - ( m.cs) = 74 -//line plugins/parsers/influx/machine.go.rl:103 +tr800: + ( m.cs) = 73 +//line plugins/parsers/influx/machine.go.rl:104 - err = m.handler.AddInt(key, m.text()) + err = m.handler.AddInt(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again -tr786: - ( m.cs) = 74 -//line plugins/parsers/influx/machine.go.rl:112 +tr806: + ( m.cs) = 73 +//line plugins/parsers/influx/machine.go.rl:113 - err = m.handler.AddUint(key, m.text()) + err = m.handler.AddUint(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again -tr792: - ( m.cs) = 74 -//line plugins/parsers/influx/machine.go.rl:130 +tr812: + ( m.cs) = 73 +//line plugins/parsers/influx/machine.go.rl:131 - err = m.handler.AddBool(key, m.text()) + err = m.handler.AddBool(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again + st73: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof73 + } + st_case_73: +//line plugins/parsers/influx/machine.go:11266 + if ( m.data)[( m.p)] == 10 { + goto tr221 + } + goto tr8 +tr600: +//line plugins/parsers/influx/machine.go.rl:20 + + m.pb = m.p + + goto st74 st74: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof74 } st_case_74: -//line plugins/parsers/influx/machine.go:10982 - if ( m.data)[( m.p)] == 10 { - goto st288 +//line plugins/parsers/influx/machine.go:11282 + switch ( m.data)[( m.p)] { + case 9: + goto st6 + case 10: + goto tr29 + case 12: + goto tr105 + case 13: + goto st7 + case 32: + goto st6 + case 34: + goto tr100 + case 44: + goto st6 + case 61: + goto tr101 + case 92: + goto st76 } - goto tr8 -tr581: -//line plugins/parsers/influx/machine.go.rl:19 + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st378 + } + goto st32 +tr601: +//line plugins/parsers/influx/machine.go.rl:20 + + m.pb = m.p + + goto st378 + st378: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof378 + } + st_case_378: +//line plugins/parsers/influx/machine.go:11318 + switch ( m.data)[( m.p)] { + case 9: + goto tr602 + case 10: + goto tr603 + case 11: + goto tr604 + case 12: + goto tr469 + case 13: + goto tr605 + case 32: + goto tr602 + case 34: + goto tr100 + case 44: + goto st6 + case 61: + goto tr101 + case 92: + goto st76 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st381 + } + goto st32 +tr602: + ( m.cs) = 379 +//line plugins/parsers/influx/machine.go.rl:149 + + err = m.handler.SetTimestamp(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + + goto _again + st379: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof379 + } + st_case_379: +//line plugins/parsers/influx/machine.go:11363 + switch ( m.data)[( m.p)] { + case 10: + goto tr221 + case 12: + goto st277 + case 13: + goto st73 + case 32: + goto st379 + case 34: + goto tr31 + case 92: + goto st75 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { + goto st379 + } + goto st6 +tr27: +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p @@ -10994,108 +11389,58 @@ tr581: goto _test_eof75 } st_case_75: -//line plugins/parsers/influx/machine.go:10998 +//line plugins/parsers/influx/machine.go:11393 switch ( m.data)[( m.p)] { - case 9: - goto st6 - case 10: - goto st7 - case 12: - goto tr105 - case 13: - goto st8 - case 32: - goto st6 case 34: - goto tr100 - case 44: goto st6 - case 61: - goto tr101 case 92: - goto st77 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st367 - } - goto st33 -tr582: -//line plugins/parsers/influx/machine.go.rl:19 - - m.pb = m.p - - goto st367 - st367: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof367 - } - st_case_367: -//line plugins/parsers/influx/machine.go:11034 - switch ( m.data)[( m.p)] { - case 9: - goto tr583 - case 10: - goto tr584 - case 11: - goto tr585 - case 12: - goto tr450 - case 13: - goto tr586 - case 32: - goto tr583 - case 34: - goto tr100 - case 44: goto st6 - case 61: - goto tr101 - case 92: - goto st77 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st370 - } - goto st33 -tr583: - ( m.cs) = 368 -//line plugins/parsers/influx/machine.go.rl:148 + goto tr8 +tr604: + ( m.cs) = 380 +//line plugins/parsers/influx/machine.go.rl:149 err = m.handler.SetTimestamp(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again - st368: + st380: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof368 + goto _test_eof380 } - st_case_368: -//line plugins/parsers/influx/machine.go:11079 + st_case_380: +//line plugins/parsers/influx/machine.go:11419 switch ( m.data)[( m.p)] { + case 9: + goto st379 case 10: - goto st288 + goto tr221 + case 11: + goto st380 case 12: - goto st266 + goto st277 case 13: - goto st74 + goto st73 case 32: - goto st368 + goto st379 case 34: - goto tr31 + goto tr100 + case 44: + goto st6 + case 61: + goto tr101 case 92: goto st76 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { - goto st368 - } - goto st6 -tr27: -//line plugins/parsers/influx/machine.go.rl:19 + goto st32 +tr98: +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p @@ -11105,73 +11450,12 @@ tr27: goto _test_eof76 } st_case_76: -//line plugins/parsers/influx/machine.go:11109 +//line plugins/parsers/influx/machine.go:11454 switch ( m.data)[( m.p)] { case 34: - goto st6 + goto st32 case 92: - goto st6 - } - goto tr8 -tr585: - ( m.cs) = 369 -//line plugins/parsers/influx/machine.go.rl:148 - - err = m.handler.SetTimestamp(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - - goto _again - st369: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof369 - } - st_case_369: -//line plugins/parsers/influx/machine.go:11135 - switch ( m.data)[( m.p)] { - case 9: - goto st368 - case 10: - goto st288 - case 11: - goto st369 - case 12: - goto st266 - case 13: - goto st74 - case 32: - goto st368 - case 34: - goto tr100 - case 44: - goto st6 - case 61: - goto tr101 - case 92: - goto st77 - } - goto st33 -tr98: -//line plugins/parsers/influx/machine.go.rl:19 - - m.pb = m.p - - goto st77 - st77: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof77 - } - st_case_77: -//line plugins/parsers/influx/machine.go:11170 - switch ( m.data)[( m.p)] { - case 34: - goto st33 - case 92: - goto st33 + goto st32 } switch { case ( m.data)[( m.p)] > 10: @@ -11182,347 +11466,6 @@ tr98: goto tr8 } goto st3 - st370: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof370 - } - st_case_370: - switch ( m.data)[( m.p)] { - case 9: - goto tr583 - case 10: - goto tr584 - case 11: - goto tr585 - case 12: - goto tr450 - case 13: - goto tr586 - case 32: - goto tr583 - case 34: - goto tr100 - case 44: - goto st6 - case 61: - goto tr101 - case 92: - goto st77 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st371 - } - goto st33 - st371: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof371 - } - st_case_371: - switch ( m.data)[( m.p)] { - case 9: - goto tr583 - case 10: - goto tr584 - case 11: - goto tr585 - case 12: - goto tr450 - case 13: - goto tr586 - case 32: - goto tr583 - case 34: - goto tr100 - case 44: - goto st6 - case 61: - goto tr101 - case 92: - goto st77 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st372 - } - goto st33 - st372: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof372 - } - st_case_372: - switch ( m.data)[( m.p)] { - case 9: - goto tr583 - case 10: - goto tr584 - case 11: - goto tr585 - case 12: - goto tr450 - case 13: - goto tr586 - case 32: - goto tr583 - case 34: - goto tr100 - case 44: - goto st6 - case 61: - goto tr101 - case 92: - goto st77 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st373 - } - goto st33 - st373: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof373 - } - st_case_373: - switch ( m.data)[( m.p)] { - case 9: - goto tr583 - case 10: - goto tr584 - case 11: - goto tr585 - case 12: - goto tr450 - case 13: - goto tr586 - case 32: - goto tr583 - case 34: - goto tr100 - case 44: - goto st6 - case 61: - goto tr101 - case 92: - goto st77 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st374 - } - goto st33 - st374: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof374 - } - st_case_374: - switch ( m.data)[( m.p)] { - case 9: - goto tr583 - case 10: - goto tr584 - case 11: - goto tr585 - case 12: - goto tr450 - case 13: - goto tr586 - case 32: - goto tr583 - case 34: - goto tr100 - case 44: - goto st6 - case 61: - goto tr101 - case 92: - goto st77 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st375 - } - goto st33 - st375: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof375 - } - st_case_375: - switch ( m.data)[( m.p)] { - case 9: - goto tr583 - case 10: - goto tr584 - case 11: - goto tr585 - case 12: - goto tr450 - case 13: - goto tr586 - case 32: - goto tr583 - case 34: - goto tr100 - case 44: - goto st6 - case 61: - goto tr101 - case 92: - goto st77 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st376 - } - goto st33 - st376: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof376 - } - st_case_376: - switch ( m.data)[( m.p)] { - case 9: - goto tr583 - case 10: - goto tr584 - case 11: - goto tr585 - case 12: - goto tr450 - case 13: - goto tr586 - case 32: - goto tr583 - case 34: - goto tr100 - case 44: - goto st6 - case 61: - goto tr101 - case 92: - goto st77 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st377 - } - goto st33 - st377: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof377 - } - st_case_377: - switch ( m.data)[( m.p)] { - case 9: - goto tr583 - case 10: - goto tr584 - case 11: - goto tr585 - case 12: - goto tr450 - case 13: - goto tr586 - case 32: - goto tr583 - case 34: - goto tr100 - case 44: - goto st6 - case 61: - goto tr101 - case 92: - goto st77 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st378 - } - goto st33 - st378: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof378 - } - st_case_378: - switch ( m.data)[( m.p)] { - case 9: - goto tr583 - case 10: - goto tr584 - case 11: - goto tr585 - case 12: - goto tr450 - case 13: - goto tr586 - case 32: - goto tr583 - case 34: - goto tr100 - case 44: - goto st6 - case 61: - goto tr101 - case 92: - goto st77 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st379 - } - goto st33 - st379: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof379 - } - st_case_379: - switch ( m.data)[( m.p)] { - case 9: - goto tr583 - case 10: - goto tr584 - case 11: - goto tr585 - case 12: - goto tr450 - case 13: - goto tr586 - case 32: - goto tr583 - case 34: - goto tr100 - case 44: - goto st6 - case 61: - goto tr101 - case 92: - goto st77 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st380 - } - goto st33 - st380: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof380 - } - st_case_380: - switch ( m.data)[( m.p)] { - case 9: - goto tr583 - case 10: - goto tr584 - case 11: - goto tr585 - case 12: - goto tr450 - case 13: - goto tr586 - case 32: - goto tr583 - case 34: - goto tr100 - case 44: - goto st6 - case 61: - goto tr101 - case 92: - goto st77 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st381 - } - goto st33 st381: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof381 @@ -11530,17 +11473,17 @@ tr98: st_case_381: switch ( m.data)[( m.p)] { case 9: - goto tr583 + goto tr602 case 10: - goto tr584 + goto tr603 case 11: - goto tr585 + goto tr604 case 12: - goto tr450 + goto tr469 case 13: - goto tr586 + goto tr605 case 32: - goto tr583 + goto tr602 case 34: goto tr100 case 44: @@ -11548,12 +11491,12 @@ tr98: case 61: goto tr101 case 92: - goto st77 + goto st76 } if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st382 } - goto st33 + goto st32 st382: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof382 @@ -11561,17 +11504,17 @@ tr98: st_case_382: switch ( m.data)[( m.p)] { case 9: - goto tr583 + goto tr602 case 10: - goto tr584 + goto tr603 case 11: - goto tr585 + goto tr604 case 12: - goto tr450 + goto tr469 case 13: - goto tr586 + goto tr605 case 32: - goto tr583 + goto tr602 case 34: goto tr100 case 44: @@ -11579,12 +11522,12 @@ tr98: case 61: goto tr101 case 92: - goto st77 + goto st76 } if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st383 } - goto st33 + goto st32 st383: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof383 @@ -11592,17 +11535,17 @@ tr98: st_case_383: switch ( m.data)[( m.p)] { case 9: - goto tr583 + goto tr602 case 10: - goto tr584 + goto tr603 case 11: - goto tr585 + goto tr604 case 12: - goto tr450 + goto tr469 case 13: - goto tr586 + goto tr605 case 32: - goto tr583 + goto tr602 case 34: goto tr100 case 44: @@ -11610,12 +11553,12 @@ tr98: case 61: goto tr101 case 92: - goto st77 + goto st76 } if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st384 } - goto st33 + goto st32 st384: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof384 @@ -11623,17 +11566,17 @@ tr98: st_case_384: switch ( m.data)[( m.p)] { case 9: - goto tr583 + goto tr602 case 10: - goto tr584 + goto tr603 case 11: - goto tr585 + goto tr604 case 12: - goto tr450 + goto tr469 case 13: - goto tr586 + goto tr605 case 32: - goto tr583 + goto tr602 case 34: goto tr100 case 44: @@ -11641,12 +11584,12 @@ tr98: case 61: goto tr101 case 92: - goto st77 + goto st76 } if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st385 } - goto st33 + goto st32 st385: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof385 @@ -11654,17 +11597,17 @@ tr98: st_case_385: switch ( m.data)[( m.p)] { case 9: - goto tr583 + goto tr602 case 10: - goto tr584 + goto tr603 case 11: - goto tr585 + goto tr604 case 12: - goto tr450 + goto tr469 case 13: - goto tr586 + goto tr605 case 32: - goto tr583 + goto tr602 case 34: goto tr100 case 44: @@ -11672,12 +11615,12 @@ tr98: case 61: goto tr101 case 92: - goto st77 + goto st76 } if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st386 } - goto st33 + goto st32 st386: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof386 @@ -11685,17 +11628,17 @@ tr98: st_case_386: switch ( m.data)[( m.p)] { case 9: - goto tr583 + goto tr602 case 10: - goto tr584 + goto tr603 case 11: - goto tr585 + goto tr604 case 12: - goto tr450 + goto tr469 case 13: - goto tr586 + goto tr605 case 32: - goto tr583 + goto tr602 case 34: goto tr100 case 44: @@ -11703,12 +11646,12 @@ tr98: case 61: goto tr101 case 92: - goto st77 + goto st76 } if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st387 } - goto st33 + goto st32 st387: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof387 @@ -11716,17 +11659,17 @@ tr98: st_case_387: switch ( m.data)[( m.p)] { case 9: - goto tr583 + goto tr602 case 10: - goto tr584 + goto tr603 case 11: - goto tr585 + goto tr604 case 12: - goto tr450 + goto tr469 case 13: - goto tr586 + goto tr605 case 32: - goto tr583 + goto tr602 case 34: goto tr100 case 44: @@ -11734,261 +11677,602 @@ tr98: case 61: goto tr101 case 92: - goto st77 + goto st76 } - goto st33 -tr577: - ( m.cs) = 388 -//line plugins/parsers/influx/machine.go.rl:77 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:19 - - m.pb = m.p - - goto _again -tr621: - ( m.cs) = 388 -//line plugins/parsers/influx/machine.go.rl:77 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:121 - - err = m.handler.AddFloat(key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - - goto _again -tr803: - ( m.cs) = 388 -//line plugins/parsers/influx/machine.go.rl:77 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:103 - - err = m.handler.AddInt(key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - - goto _again -tr808: - ( m.cs) = 388 -//line plugins/parsers/influx/machine.go.rl:77 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:112 - - err = m.handler.AddUint(key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - - goto _again -tr813: - ( m.cs) = 388 -//line plugins/parsers/influx/machine.go.rl:77 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:130 - - err = m.handler.AddBool(key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - - goto _again + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st388 + } + goto st32 st388: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof388 } st_case_388: -//line plugins/parsers/influx/machine.go:11855 switch ( m.data)[( m.p)] { case 9: - goto tr607 + goto tr602 case 10: - goto st288 + goto tr603 case 11: - goto tr608 + goto tr604 case 12: - goto tr482 + goto tr469 case 13: - goto st74 + goto tr605 case 32: - goto tr607 + goto tr602 case 34: - goto tr124 + goto tr100 case 44: - goto tr92 - case 45: - goto tr609 + goto st6 case 61: - goto st31 + goto tr101 case 92: - goto tr125 + goto st76 } if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr610 + goto st389 } - goto tr121 -tr608: - ( m.cs) = 389 -//line plugins/parsers/influx/machine.go.rl:77 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:19 - - m.pb = m.p - - goto _again + goto st32 st389: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof389 } st_case_389: -//line plugins/parsers/influx/machine.go:11906 switch ( m.data)[( m.p)] { case 9: - goto tr607 + goto tr602 case 10: - goto st288 + goto tr603 case 11: - goto tr608 + goto tr604 case 12: - goto tr482 + goto tr469 case 13: - goto st74 + goto tr605 case 32: - goto tr607 + goto tr602 + case 34: + goto tr100 + case 44: + goto st6 + case 61: + goto tr101 + case 92: + goto st76 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st390 + } + goto st32 + st390: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof390 + } + st_case_390: + switch ( m.data)[( m.p)] { + case 9: + goto tr602 + case 10: + goto tr603 + case 11: + goto tr604 + case 12: + goto tr469 + case 13: + goto tr605 + case 32: + goto tr602 + case 34: + goto tr100 + case 44: + goto st6 + case 61: + goto tr101 + case 92: + goto st76 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st391 + } + goto st32 + st391: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof391 + } + st_case_391: + switch ( m.data)[( m.p)] { + case 9: + goto tr602 + case 10: + goto tr603 + case 11: + goto tr604 + case 12: + goto tr469 + case 13: + goto tr605 + case 32: + goto tr602 + case 34: + goto tr100 + case 44: + goto st6 + case 61: + goto tr101 + case 92: + goto st76 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st392 + } + goto st32 + st392: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof392 + } + st_case_392: + switch ( m.data)[( m.p)] { + case 9: + goto tr602 + case 10: + goto tr603 + case 11: + goto tr604 + case 12: + goto tr469 + case 13: + goto tr605 + case 32: + goto tr602 + case 34: + goto tr100 + case 44: + goto st6 + case 61: + goto tr101 + case 92: + goto st76 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st393 + } + goto st32 + st393: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof393 + } + st_case_393: + switch ( m.data)[( m.p)] { + case 9: + goto tr602 + case 10: + goto tr603 + case 11: + goto tr604 + case 12: + goto tr469 + case 13: + goto tr605 + case 32: + goto tr602 + case 34: + goto tr100 + case 44: + goto st6 + case 61: + goto tr101 + case 92: + goto st76 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st394 + } + goto st32 + st394: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof394 + } + st_case_394: + switch ( m.data)[( m.p)] { + case 9: + goto tr602 + case 10: + goto tr603 + case 11: + goto tr604 + case 12: + goto tr469 + case 13: + goto tr605 + case 32: + goto tr602 + case 34: + goto tr100 + case 44: + goto st6 + case 61: + goto tr101 + case 92: + goto st76 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st395 + } + goto st32 + st395: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof395 + } + st_case_395: + switch ( m.data)[( m.p)] { + case 9: + goto tr602 + case 10: + goto tr603 + case 11: + goto tr604 + case 12: + goto tr469 + case 13: + goto tr605 + case 32: + goto tr602 + case 34: + goto tr100 + case 44: + goto st6 + case 61: + goto tr101 + case 92: + goto st76 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st396 + } + goto st32 + st396: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof396 + } + st_case_396: + switch ( m.data)[( m.p)] { + case 9: + goto tr602 + case 10: + goto tr603 + case 11: + goto tr604 + case 12: + goto tr469 + case 13: + goto tr605 + case 32: + goto tr602 + case 34: + goto tr100 + case 44: + goto st6 + case 61: + goto tr101 + case 92: + goto st76 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st397 + } + goto st32 + st397: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof397 + } + st_case_397: + switch ( m.data)[( m.p)] { + case 9: + goto tr602 + case 10: + goto tr603 + case 11: + goto tr604 + case 12: + goto tr469 + case 13: + goto tr605 + case 32: + goto tr602 + case 34: + goto tr100 + case 44: + goto st6 + case 61: + goto tr101 + case 92: + goto st76 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st398 + } + goto st32 + st398: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof398 + } + st_case_398: + switch ( m.data)[( m.p)] { + case 9: + goto tr602 + case 10: + goto tr603 + case 11: + goto tr604 + case 12: + goto tr469 + case 13: + goto tr605 + case 32: + goto tr602 + case 34: + goto tr100 + case 44: + goto st6 + case 61: + goto tr101 + case 92: + goto st76 + } + goto st32 +tr596: + ( m.cs) = 399 +//line plugins/parsers/influx/machine.go.rl:78 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:20 + + m.pb = m.p + + goto _again +tr640: + ( m.cs) = 399 +//line plugins/parsers/influx/machine.go.rl:78 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:122 + + err = m.handler.AddFloat(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + + goto _again +tr824: + ( m.cs) = 399 +//line plugins/parsers/influx/machine.go.rl:78 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:104 + + err = m.handler.AddInt(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + + goto _again +tr830: + ( m.cs) = 399 +//line plugins/parsers/influx/machine.go.rl:78 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:113 + + err = m.handler.AddUint(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + + goto _again +tr835: + ( m.cs) = 399 +//line plugins/parsers/influx/machine.go.rl:78 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:131 + + err = m.handler.AddBool(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + + goto _again + st399: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof399 + } + st_case_399: +//line plugins/parsers/influx/machine.go:12139 + switch ( m.data)[( m.p)] { + case 9: + goto tr626 + case 10: + goto tr221 + case 11: + goto tr627 + case 12: + goto tr501 + case 13: + goto st73 + case 32: + goto tr626 case 34: goto tr124 case 44: goto tr92 case 45: - goto tr609 + goto tr628 + case 61: + goto st30 + case 92: + goto tr125 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto tr629 + } + goto tr121 +tr627: + ( m.cs) = 400 +//line plugins/parsers/influx/machine.go.rl:78 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:20 + + m.pb = m.p + + goto _again + st400: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof400 + } + st_case_400: +//line plugins/parsers/influx/machine.go:12190 + switch ( m.data)[( m.p)] { + case 9: + goto tr626 + case 10: + goto tr221 + case 11: + goto tr627 + case 12: + goto tr501 + case 13: + goto st73 + case 32: + goto tr626 + case 34: + goto tr124 + case 44: + goto tr92 + case 45: + goto tr628 case 61: goto tr129 case 92: goto tr125 } if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr610 + goto tr629 } goto tr121 tr92: - ( m.cs) = 78 -//line plugins/parsers/influx/machine.go.rl:77 + ( m.cs) = 77 +//line plugins/parsers/influx/machine.go.rl:78 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again tr86: - ( m.cs) = 78 -//line plugins/parsers/influx/machine.go.rl:77 + ( m.cs) = 77 +//line plugins/parsers/influx/machine.go.rl:78 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:19 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p goto _again tr233: - ( m.cs) = 78 -//line plugins/parsers/influx/machine.go.rl:90 + ( m.cs) = 77 +//line plugins/parsers/influx/machine.go.rl:91 - err = m.handler.AddTag(key, m.text()) + err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again - st78: + st77: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof78 + goto _test_eof77 } - st_case_78: -//line plugins/parsers/influx/machine.go:11983 + st_case_77: +//line plugins/parsers/influx/machine.go:12267 switch ( m.data)[( m.p)] { case 9: goto st6 case 10: - goto st7 + goto tr29 case 12: goto tr47 case 13: - goto st8 + goto st7 case 32: goto st6 case 34: @@ -12002,26 +12286,26 @@ tr233: } goto tr223 tr223: -//line plugins/parsers/influx/machine.go.rl:19 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p - goto st79 - st79: + goto st78 + st78: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof79 + goto _test_eof78 } - st_case_79: -//line plugins/parsers/influx/machine.go:12016 + st_case_78: +//line plugins/parsers/influx/machine.go:12300 switch ( m.data)[( m.p)] { case 9: goto st6 case 10: - goto st7 + goto tr29 case 12: goto tr47 case 13: - goto st8 + goto st7 case 32: goto st6 case 34: @@ -12031,30 +12315,30 @@ tr223: case 61: goto tr226 case 92: - goto st89 + goto st88 } - goto st79 + goto st78 tr226: -//line plugins/parsers/influx/machine.go.rl:86 +//line plugins/parsers/influx/machine.go.rl:87 - key = m.text() + m.key = m.text() - goto st80 - st80: + goto st79 + st79: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof80 + goto _test_eof79 } - st_case_80: -//line plugins/parsers/influx/machine.go:12049 + st_case_79: +//line plugins/parsers/influx/machine.go:12333 switch ( m.data)[( m.p)] { case 9: goto st6 case 10: - goto st7 + goto tr29 case 12: goto tr47 case 13: - goto st8 + goto st7 case 32: goto st6 case 34: @@ -12068,28 +12352,28 @@ tr226: } goto tr228 tr228: -//line plugins/parsers/influx/machine.go.rl:19 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p - goto st81 - st81: + goto st80 + st80: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof81 + goto _test_eof80 } - st_case_81: -//line plugins/parsers/influx/machine.go:12082 + st_case_80: +//line plugins/parsers/influx/machine.go:12366 switch ( m.data)[( m.p)] { case 9: goto tr231 case 10: - goto st7 + goto tr29 case 11: goto tr232 case 12: goto tr60 case 13: - goto st8 + goto st7 case 32: goto tr231 case 34: @@ -12099,39 +12383,39 @@ tr228: case 61: goto st6 case 92: - goto st87 + goto st86 } - goto st81 + goto st80 tr232: - ( m.cs) = 82 -//line plugins/parsers/influx/machine.go.rl:90 + ( m.cs) = 81 +//line plugins/parsers/influx/machine.go.rl:91 - err = m.handler.AddTag(key, m.text()) + err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again - st82: + st81: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof82 + goto _test_eof81 } - st_case_82: -//line plugins/parsers/influx/machine.go:12124 + st_case_81: +//line plugins/parsers/influx/machine.go:12408 switch ( m.data)[( m.p)] { case 9: goto tr231 case 10: - goto st7 + goto tr29 case 11: goto tr236 case 12: goto tr60 case 13: - goto st8 + goto st7 case 32: goto tr231 case 34: @@ -12145,28 +12429,28 @@ tr232: } goto tr235 tr235: -//line plugins/parsers/influx/machine.go.rl:19 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p - goto st83 - st83: + goto st82 + st82: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof83 + goto _test_eof82 } - st_case_83: -//line plugins/parsers/influx/machine.go:12159 + st_case_82: +//line plugins/parsers/influx/machine.go:12443 switch ( m.data)[( m.p)] { case 9: goto tr231 case 10: - goto st7 + goto tr29 case 11: goto tr239 case 12: goto tr60 case 13: - goto st8 + goto st7 case 32: goto tr231 case 34: @@ -12176,56 +12460,56 @@ tr235: case 61: goto tr101 case 92: - goto st85 + goto st84 } - goto st83 + goto st82 tr239: - ( m.cs) = 84 -//line plugins/parsers/influx/machine.go.rl:90 + ( m.cs) = 83 +//line plugins/parsers/influx/machine.go.rl:91 - err = m.handler.AddTag(key, m.text()) + err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again tr236: - ( m.cs) = 84 -//line plugins/parsers/influx/machine.go.rl:90 + ( m.cs) = 83 +//line plugins/parsers/influx/machine.go.rl:91 - err = m.handler.AddTag(key, m.text()) + err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:19 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p goto _again - st84: + st83: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof84 + goto _test_eof83 } - st_case_84: -//line plugins/parsers/influx/machine.go:12218 + st_case_83: +//line plugins/parsers/influx/machine.go:12502 switch ( m.data)[( m.p)] { case 9: goto tr231 case 10: - goto st7 + goto tr29 case 11: goto tr236 case 12: goto tr60 case 13: - goto st8 + goto st7 case 32: goto tr231 case 34: @@ -12239,22 +12523,22 @@ tr236: } goto tr235 tr237: -//line plugins/parsers/influx/machine.go.rl:19 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p - goto st85 - st85: + goto st84 + st84: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof85 + goto _test_eof84 } - st_case_85: -//line plugins/parsers/influx/machine.go:12253 + st_case_84: +//line plugins/parsers/influx/machine.go:12537 switch ( m.data)[( m.p)] { case 34: - goto st83 + goto st82 case 92: - goto st86 + goto st85 } switch { case ( m.data)[( m.p)] > 10: @@ -12264,27 +12548,27 @@ tr237: case ( m.data)[( m.p)] >= 9: goto tr47 } - goto st19 - st86: -//line plugins/parsers/influx/machine.go.rl:234 + goto st18 + st85: +//line plugins/parsers/influx/machine.go.rl:240 ( m.p)-- if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof86 + goto _test_eof85 } - st_case_86: -//line plugins/parsers/influx/machine.go:12277 + st_case_85: +//line plugins/parsers/influx/machine.go:12561 switch ( m.data)[( m.p)] { case 9: goto tr231 case 10: - goto st7 + goto tr29 case 11: goto tr239 case 12: goto tr60 case 13: - goto st8 + goto st7 case 32: goto tr231 case 34: @@ -12294,26 +12578,26 @@ tr237: case 61: goto tr101 case 92: - goto st85 + goto st84 } - goto st83 + goto st82 tr229: -//line plugins/parsers/influx/machine.go.rl:19 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p - goto st87 - st87: + goto st86 + st86: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof87 + goto _test_eof86 } - st_case_87: -//line plugins/parsers/influx/machine.go:12312 + st_case_86: +//line plugins/parsers/influx/machine.go:12596 switch ( m.data)[( m.p)] { case 34: - goto st81 + goto st80 case 92: - goto st88 + goto st87 } switch { case ( m.data)[( m.p)] > 10: @@ -12323,27 +12607,27 @@ tr229: case ( m.data)[( m.p)] >= 9: goto tr47 } - goto st17 - st88: -//line plugins/parsers/influx/machine.go.rl:234 + goto st16 + st87: +//line plugins/parsers/influx/machine.go.rl:240 ( m.p)-- if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof88 + goto _test_eof87 } - st_case_88: -//line plugins/parsers/influx/machine.go:12336 + st_case_87: +//line plugins/parsers/influx/machine.go:12620 switch ( m.data)[( m.p)] { case 9: goto tr231 case 10: - goto st7 + goto tr29 case 11: goto tr232 case 12: goto tr60 case 13: - goto st8 + goto st7 case 32: goto tr231 case 34: @@ -12353,26 +12637,26 @@ tr229: case 61: goto st6 case 92: - goto st87 + goto st86 } - goto st81 + goto st80 tr224: -//line plugins/parsers/influx/machine.go.rl:19 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p - goto st89 - st89: + goto st88 + st88: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof89 + goto _test_eof88 } - st_case_89: -//line plugins/parsers/influx/machine.go:12371 + st_case_88: +//line plugins/parsers/influx/machine.go:12655 switch ( m.data)[( m.p)] { case 34: - goto st79 + goto st78 case 92: - goto st90 + goto st89 } switch { case ( m.data)[( m.p)] > 10: @@ -12382,25 +12666,25 @@ tr224: case ( m.data)[( m.p)] >= 9: goto tr47 } - goto st15 - st90: -//line plugins/parsers/influx/machine.go.rl:234 + goto st14 + st89: +//line plugins/parsers/influx/machine.go.rl:240 ( m.p)-- if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof90 + goto _test_eof89 } - st_case_90: -//line plugins/parsers/influx/machine.go:12395 + st_case_89: +//line plugins/parsers/influx/machine.go:12679 switch ( m.data)[( m.p)] { case 9: goto st6 case 10: - goto st7 + goto tr29 case 12: goto tr47 case 13: - goto st8 + goto st7 case 32: goto st6 case 34: @@ -12410,32 +12694,32 @@ tr224: case 61: goto tr226 case 92: - goto st89 + goto st88 } - goto st79 -tr609: -//line plugins/parsers/influx/machine.go.rl:19 + goto st78 +tr628: +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p - goto st91 - st91: + goto st90 + st90: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof91 + goto _test_eof90 } - st_case_91: -//line plugins/parsers/influx/machine.go:12428 + st_case_90: +//line plugins/parsers/influx/machine.go:12712 switch ( m.data)[( m.p)] { case 9: goto tr89 case 10: - goto st7 + goto tr29 case 11: goto tr127 case 12: goto tr1 case 13: - goto st8 + goto st7 case 32: goto tr89 case 34: @@ -12445,37 +12729,37 @@ tr609: case 61: goto tr129 case 92: - goto st94 + goto st93 } if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st390 + goto st401 } - goto st42 -tr610: -//line plugins/parsers/influx/machine.go.rl:19 + goto st41 +tr629: +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p - goto st390 - st390: + goto st401 + st401: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof390 + goto _test_eof401 } - st_case_390: -//line plugins/parsers/influx/machine.go:12466 + st_case_401: +//line plugins/parsers/influx/machine.go:12750 switch ( m.data)[( m.p)] { case 9: - goto tr611 + goto tr630 case 10: - goto tr584 + goto tr603 case 11: - goto tr612 + goto tr631 case 12: - goto tr490 + goto tr509 case 13: - goto tr586 + goto tr605 case 32: - goto tr611 + goto tr630 case 34: goto tr128 case 44: @@ -12483,103 +12767,103 @@ tr610: case 61: goto tr129 case 92: - goto st94 + goto st93 } if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st534 + goto st545 } - goto st42 -tr616: - ( m.cs) = 391 -//line plugins/parsers/influx/machine.go.rl:77 + goto st41 +tr635: + ( m.cs) = 402 +//line plugins/parsers/influx/machine.go.rl:78 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again -tr756: - ( m.cs) = 391 -//line plugins/parsers/influx/machine.go.rl:90 +tr776: + ( m.cs) = 402 +//line plugins/parsers/influx/machine.go.rl:91 - err = m.handler.AddTag(key, m.text()) + err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again -tr611: - ( m.cs) = 391 -//line plugins/parsers/influx/machine.go.rl:77 +tr630: + ( m.cs) = 402 +//line plugins/parsers/influx/machine.go.rl:78 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:148 +//line plugins/parsers/influx/machine.go.rl:149 err = m.handler.SetTimestamp(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again -tr753: - ( m.cs) = 391 -//line plugins/parsers/influx/machine.go.rl:90 +tr773: + ( m.cs) = 402 +//line plugins/parsers/influx/machine.go.rl:91 - err = m.handler.AddTag(key, m.text()) + err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:148 +//line plugins/parsers/influx/machine.go.rl:149 err = m.handler.SetTimestamp(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again - st391: + st402: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof391 + goto _test_eof402 } - st_case_391: -//line plugins/parsers/influx/machine.go:12570 + st_case_402: +//line plugins/parsers/influx/machine.go:12854 switch ( m.data)[( m.p)] { case 9: - goto st391 + goto st402 case 10: - goto st288 + goto tr221 case 11: - goto tr615 + goto tr634 case 12: - goto st294 + goto st305 case 13: - goto st74 + goto st73 case 32: - goto st391 + goto st402 case 34: goto tr97 case 44: @@ -12590,31 +12874,31 @@ tr753: goto tr98 } goto tr94 -tr615: -//line plugins/parsers/influx/machine.go.rl:19 +tr634: +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p - goto st392 - st392: + goto st403 + st403: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof392 + goto _test_eof403 } - st_case_392: -//line plugins/parsers/influx/machine.go:12605 + st_case_403: +//line plugins/parsers/influx/machine.go:12889 switch ( m.data)[( m.p)] { case 9: - goto st391 + goto st402 case 10: - goto st288 + goto tr221 case 11: - goto tr615 + goto tr634 case 12: - goto st294 + goto st305 case 13: - goto st74 + goto st73 case 32: - goto st391 + goto st402 case 34: goto tr97 case 44: @@ -12625,65 +12909,65 @@ tr615: goto tr98 } goto tr94 -tr617: - ( m.cs) = 393 -//line plugins/parsers/influx/machine.go.rl:77 +tr636: + ( m.cs) = 404 +//line plugins/parsers/influx/machine.go.rl:78 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:19 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p goto _again -tr612: - ( m.cs) = 393 -//line plugins/parsers/influx/machine.go.rl:77 +tr631: + ( m.cs) = 404 +//line plugins/parsers/influx/machine.go.rl:78 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:148 +//line plugins/parsers/influx/machine.go.rl:149 err = m.handler.SetTimestamp(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again - st393: + st404: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof393 + goto _test_eof404 } - st_case_393: -//line plugins/parsers/influx/machine.go:12674 + st_case_404: +//line plugins/parsers/influx/machine.go:12958 switch ( m.data)[( m.p)] { case 9: - goto tr616 + goto tr635 case 10: - goto st288 + goto tr221 case 11: - goto tr617 + goto tr636 case 12: - goto tr495 + goto tr514 case 13: - goto st74 + goto st73 case 32: - goto tr616 + goto tr635 case 34: goto tr124 case 44: @@ -12695,38 +12979,38 @@ tr612: } goto tr121 tr129: -//line plugins/parsers/influx/machine.go.rl:99 +//line plugins/parsers/influx/machine.go.rl:100 - key = m.text() + m.key = m.text() - goto st92 -tr374: -//line plugins/parsers/influx/machine.go.rl:19 + goto st91 +tr383: +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p -//line plugins/parsers/influx/machine.go.rl:99 +//line plugins/parsers/influx/machine.go.rl:100 - key = m.text() + m.key = m.text() - goto st92 - st92: + goto st91 + st91: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof92 + goto _test_eof91 } - st_case_92: -//line plugins/parsers/influx/machine.go:12719 + st_case_91: +//line plugins/parsers/influx/machine.go:13003 switch ( m.data)[( m.p)] { case 9: goto tr89 case 10: - goto st7 + goto tr29 case 11: goto tr90 case 12: goto tr1 case 13: - goto st8 + goto st7 case 32: goto tr89 case 34: @@ -12744,7 +13028,7 @@ tr374: case 84: goto tr250 case 92: - goto st142 + goto st141 case 102: goto tr251 case 116: @@ -12753,54 +13037,54 @@ tr374: if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto tr248 } - goto st31 + goto st30 tr90: - ( m.cs) = 93 -//line plugins/parsers/influx/machine.go.rl:77 + ( m.cs) = 92 +//line plugins/parsers/influx/machine.go.rl:78 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again tr84: - ( m.cs) = 93 -//line plugins/parsers/influx/machine.go.rl:77 + ( m.cs) = 92 +//line plugins/parsers/influx/machine.go.rl:78 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:19 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p goto _again - st93: + st92: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof93 + goto _test_eof92 } - st_case_93: -//line plugins/parsers/influx/machine.go:12793 + st_case_92: +//line plugins/parsers/influx/machine.go:13077 switch ( m.data)[( m.p)] { case 9: goto tr89 case 10: - goto st7 + goto tr29 case 11: goto tr131 case 12: goto tr1 case 13: - goto st8 + goto st7 case 32: goto tr89 case 34: @@ -12808,28 +13092,28 @@ tr84: case 44: goto tr92 case 61: - goto st31 + goto st30 case 92: goto tr125 } goto tr121 tr125: -//line plugins/parsers/influx/machine.go.rl:19 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p - goto st94 - st94: + goto st93 + st93: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof94 + goto _test_eof93 } - st_case_94: -//line plugins/parsers/influx/machine.go:12828 + st_case_93: +//line plugins/parsers/influx/machine.go:13112 switch ( m.data)[( m.p)] { case 34: - goto st42 + goto st41 case 92: - goto st42 + goto st41 } switch { case ( m.data)[( m.p)] > 10: @@ -12839,9 +13123,268 @@ tr125: case ( m.data)[( m.p)] >= 9: goto tr8 } - goto st12 + goto st11 tr245: -//line plugins/parsers/influx/machine.go.rl:19 +//line plugins/parsers/influx/machine.go.rl:20 + + m.pb = m.p + + goto st94 + st94: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof94 + } + st_case_94: +//line plugins/parsers/influx/machine.go:13139 + switch ( m.data)[( m.p)] { + case 9: + goto tr89 + case 10: + goto tr29 + case 11: + goto tr90 + case 12: + goto tr1 + case 13: + goto st7 + case 32: + goto tr89 + case 34: + goto tr91 + case 44: + goto tr92 + case 46: + goto st96 + case 48: + goto st533 + case 92: + goto st141 + } + if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st536 + } + goto st30 +tr85: + ( m.cs) = 405 +//line plugins/parsers/influx/machine.go.rl:20 + + m.pb = m.p + +//line plugins/parsers/influx/machine.go.rl:140 + + err = m.handler.AddString(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + + goto _again +tr91: + ( m.cs) = 405 +//line plugins/parsers/influx/machine.go.rl:140 + + err = m.handler.AddString(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + + goto _again +tr118: + ( m.cs) = 405 +//line plugins/parsers/influx/machine.go.rl:140 + + err = m.handler.AddString(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:20 + + m.pb = m.p + + goto _again + st405: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof405 + } + st_case_405: +//line plugins/parsers/influx/machine.go:13220 + switch ( m.data)[( m.p)] { + case 10: + goto tr103 + case 11: + goto tr637 + case 13: + goto st33 + case 32: + goto tr501 + case 44: + goto tr503 + case 92: + goto st95 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr501 + } + goto st1 +tr637: + ( m.cs) = 406 +//line plugins/parsers/influx/machine.go.rl:78 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + + goto _again +tr818: + ( m.cs) = 406 +//line plugins/parsers/influx/machine.go.rl:78 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:122 + + err = m.handler.AddFloat(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + + goto _again +tr1013: + ( m.cs) = 406 +//line plugins/parsers/influx/machine.go.rl:78 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:104 + + err = m.handler.AddInt(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + + goto _again +tr1016: + ( m.cs) = 406 +//line plugins/parsers/influx/machine.go.rl:78 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:113 + + err = m.handler.AddUint(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + + goto _again +tr1019: + ( m.cs) = 406 +//line plugins/parsers/influx/machine.go.rl:78 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:131 + + err = m.handler.AddBool(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + + goto _again + st406: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof406 + } + st_case_406: +//line plugins/parsers/influx/machine.go:13349 + switch ( m.data)[( m.p)] { + case 10: + goto tr103 + case 11: + goto tr506 + case 13: + goto st33 + case 32: + goto tr501 + case 44: + goto tr4 + case 45: + goto tr507 + case 61: + goto st1 + case 92: + goto tr45 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto tr508 + } + case ( m.data)[( m.p)] >= 9: + goto tr501 + } + goto tr41 +tr37: +//line plugins/parsers/influx/machine.go.rl:20 + + m.pb = m.p + + goto st95 +tr460: +//line plugins/parsers/influx/machine.go.rl:74 + + m.beginMetric = true + +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p @@ -12851,266 +13394,7 @@ tr245: goto _test_eof95 } st_case_95: -//line plugins/parsers/influx/machine.go:12855 - switch ( m.data)[( m.p)] { - case 9: - goto tr89 - case 10: - goto st7 - case 11: - goto tr90 - case 12: - goto tr1 - case 13: - goto st8 - case 32: - goto tr89 - case 34: - goto tr91 - case 44: - goto tr92 - case 46: - goto st97 - case 48: - goto st522 - case 92: - goto st142 - } - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st525 - } - goto st31 -tr85: - ( m.cs) = 394 -//line plugins/parsers/influx/machine.go.rl:19 - - m.pb = m.p - -//line plugins/parsers/influx/machine.go.rl:139 - - err = m.handler.AddString(key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - - goto _again -tr91: - ( m.cs) = 394 -//line plugins/parsers/influx/machine.go.rl:139 - - err = m.handler.AddString(key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - - goto _again -tr118: - ( m.cs) = 394 -//line plugins/parsers/influx/machine.go.rl:139 - - err = m.handler.AddString(key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:19 - - m.pb = m.p - - goto _again - st394: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof394 - } - st_case_394: -//line plugins/parsers/influx/machine.go:12936 - switch ( m.data)[( m.p)] { - case 10: - goto st262 - case 11: - goto tr618 - case 13: - goto st34 - case 32: - goto tr482 - case 44: - goto tr484 - case 92: - goto st96 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr482 - } - goto st1 -tr618: - ( m.cs) = 395 -//line plugins/parsers/influx/machine.go.rl:77 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - - goto _again -tr798: - ( m.cs) = 395 -//line plugins/parsers/influx/machine.go.rl:77 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:121 - - err = m.handler.AddFloat(key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - - goto _again -tr981: - ( m.cs) = 395 -//line plugins/parsers/influx/machine.go.rl:77 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:103 - - err = m.handler.AddInt(key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - - goto _again -tr984: - ( m.cs) = 395 -//line plugins/parsers/influx/machine.go.rl:77 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:112 - - err = m.handler.AddUint(key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - - goto _again -tr987: - ( m.cs) = 395 -//line plugins/parsers/influx/machine.go.rl:77 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:130 - - err = m.handler.AddBool(key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - - goto _again - st395: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof395 - } - st_case_395: -//line plugins/parsers/influx/machine.go:13065 - switch ( m.data)[( m.p)] { - case 10: - goto st262 - case 11: - goto tr487 - case 13: - goto st34 - case 32: - goto tr482 - case 44: - goto tr4 - case 45: - goto tr488 - case 61: - goto st1 - case 92: - goto tr45 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr489 - } - case ( m.data)[( m.p)] >= 9: - goto tr482 - } - goto tr41 -tr37: -//line plugins/parsers/influx/machine.go.rl:19 - - m.pb = m.p - - goto st96 -tr441: -//line plugins/parsers/influx/machine.go.rl:73 - - foundMetric = true - -//line plugins/parsers/influx/machine.go.rl:19 - - m.pb = m.p - - goto st96 - st96: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof96 - } - st_case_96: -//line plugins/parsers/influx/machine.go:13114 +//line plugins/parsers/influx/machine.go:13398 switch { case ( m.data)[( m.p)] > 10: if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { @@ -13121,28 +13405,28 @@ tr441: } goto st1 tr246: -//line plugins/parsers/influx/machine.go.rl:19 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p - goto st97 - st97: + goto st96 + st96: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof97 + goto _test_eof96 } - st_case_97: -//line plugins/parsers/influx/machine.go:13135 + st_case_96: +//line plugins/parsers/influx/machine.go:13419 switch ( m.data)[( m.p)] { case 9: goto tr89 case 10: - goto st7 + goto tr29 case 11: goto tr90 case 12: goto tr1 case 13: - goto st8 + goto st7 case 32: goto tr89 case 34: @@ -13150,261 +13434,261 @@ tr246: case 44: goto tr92 case 92: - goto st142 + goto st141 } if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st396 + goto st407 } - goto st31 - st396: + goto st30 + st407: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof396 + goto _test_eof407 } - st_case_396: + st_case_407: switch ( m.data)[( m.p)] { case 9: - goto tr619 + goto tr638 case 10: - goto tr620 + goto tr639 case 11: - goto tr621 + goto tr640 case 12: - goto tr622 + goto tr641 case 13: - goto tr623 + goto tr642 case 32: - goto tr619 + goto tr638 case 34: goto tr91 case 44: - goto tr624 + goto tr643 case 69: - goto st140 + goto st139 case 92: - goto st142 + goto st141 case 101: - goto st140 + goto st139 } if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st396 + goto st407 } - goto st31 -tr578: - ( m.cs) = 98 -//line plugins/parsers/influx/machine.go.rl:77 + goto st30 +tr597: + ( m.cs) = 97 +//line plugins/parsers/influx/machine.go.rl:78 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:19 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p goto _again -tr624: - ( m.cs) = 98 -//line plugins/parsers/influx/machine.go.rl:77 +tr643: + ( m.cs) = 97 +//line plugins/parsers/influx/machine.go.rl:78 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:121 +//line plugins/parsers/influx/machine.go.rl:122 - err = m.handler.AddFloat(key, m.text()) + err = m.handler.AddFloat(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again -tr747: - ( m.cs) = 98 -//line plugins/parsers/influx/machine.go.rl:90 +tr767: + ( m.cs) = 97 +//line plugins/parsers/influx/machine.go.rl:91 - err = m.handler.AddTag(key, m.text()) + err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:121 +//line plugins/parsers/influx/machine.go.rl:122 - err = m.handler.AddFloat(key, m.text()) + err = m.handler.AddFloat(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again -tr781: - ( m.cs) = 98 -//line plugins/parsers/influx/machine.go.rl:90 +tr801: + ( m.cs) = 97 +//line plugins/parsers/influx/machine.go.rl:91 - err = m.handler.AddTag(key, m.text()) + err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:103 +//line plugins/parsers/influx/machine.go.rl:104 - err = m.handler.AddInt(key, m.text()) + err = m.handler.AddInt(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again -tr787: - ( m.cs) = 98 -//line plugins/parsers/influx/machine.go.rl:90 +tr807: + ( m.cs) = 97 +//line plugins/parsers/influx/machine.go.rl:91 - err = m.handler.AddTag(key, m.text()) + err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:112 +//line plugins/parsers/influx/machine.go.rl:113 - err = m.handler.AddUint(key, m.text()) + err = m.handler.AddUint(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again -tr793: - ( m.cs) = 98 -//line plugins/parsers/influx/machine.go.rl:90 +tr813: + ( m.cs) = 97 +//line plugins/parsers/influx/machine.go.rl:91 - err = m.handler.AddTag(key, m.text()) + err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:130 +//line plugins/parsers/influx/machine.go.rl:131 - err = m.handler.AddBool(key, m.text()) + err = m.handler.AddBool(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again -tr805: - ( m.cs) = 98 -//line plugins/parsers/influx/machine.go.rl:77 +tr826: + ( m.cs) = 97 +//line plugins/parsers/influx/machine.go.rl:78 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:103 +//line plugins/parsers/influx/machine.go.rl:104 - err = m.handler.AddInt(key, m.text()) + err = m.handler.AddInt(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again -tr810: - ( m.cs) = 98 -//line plugins/parsers/influx/machine.go.rl:77 +tr832: + ( m.cs) = 97 +//line plugins/parsers/influx/machine.go.rl:78 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:112 +//line plugins/parsers/influx/machine.go.rl:113 - err = m.handler.AddUint(key, m.text()) + err = m.handler.AddUint(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again -tr815: - ( m.cs) = 98 -//line plugins/parsers/influx/machine.go.rl:77 +tr837: + ( m.cs) = 97 +//line plugins/parsers/influx/machine.go.rl:78 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:130 +//line plugins/parsers/influx/machine.go.rl:131 - err = m.handler.AddBool(key, m.text()) + err = m.handler.AddBool(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again - st98: + st97: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof98 + goto _test_eof97 } - st_case_98: -//line plugins/parsers/influx/machine.go:13399 + st_case_97: +//line plugins/parsers/influx/machine.go:13683 switch ( m.data)[( m.p)] { case 9: goto st6 case 10: - goto st7 + goto tr29 case 12: goto tr47 case 13: - goto st8 + goto st7 case 32: goto st6 case 34: @@ -13418,26 +13702,26 @@ tr815: } goto tr257 tr257: -//line plugins/parsers/influx/machine.go.rl:19 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p - goto st99 - st99: + goto st98 + st98: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof99 + goto _test_eof98 } - st_case_99: -//line plugins/parsers/influx/machine.go:13432 + st_case_98: +//line plugins/parsers/influx/machine.go:13716 switch ( m.data)[( m.p)] { case 9: goto st6 case 10: - goto st7 + goto tr29 case 12: goto tr47 case 13: - goto st8 + goto st7 case 32: goto st6 case 34: @@ -13447,99 +13731,210 @@ tr257: case 61: goto tr262 case 92: - goto st138 + goto st137 } - goto st99 + goto st98 tr258: - ( m.cs) = 397 -//line plugins/parsers/influx/machine.go.rl:19 + ( m.cs) = 408 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p -//line plugins/parsers/influx/machine.go.rl:139 +//line plugins/parsers/influx/machine.go.rl:140 - err = m.handler.AddString(key, m.text()) + err = m.handler.AddString(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again tr261: - ( m.cs) = 397 -//line plugins/parsers/influx/machine.go.rl:139 + ( m.cs) = 408 +//line plugins/parsers/influx/machine.go.rl:140 - err = m.handler.AddString(key, m.text()) + err = m.handler.AddString(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again - st397: + st408: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof397 + goto _test_eof408 } - st_case_397: -//line plugins/parsers/influx/machine.go:13489 + st_case_408: +//line plugins/parsers/influx/machine.go:13773 switch ( m.data)[( m.p)] { case 10: - goto st262 + goto tr103 case 11: - goto st398 + goto st409 case 13: - goto st34 + goto st33 case 32: - goto st261 + goto st272 case 44: - goto st37 + goto st36 case 61: goto tr137 case 92: - goto st101 + goto st100 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st261 + goto st272 } - goto st46 - st398: + goto st45 + st409: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof398 + goto _test_eof409 } - st_case_398: + st_case_409: switch ( m.data)[( m.p)] { case 10: - goto st262 + goto tr103 case 11: - goto st398 + goto st409 case 13: - goto st34 + goto st33 case 32: - goto st261 + goto st272 case 44: goto tr132 case 45: - goto tr627 + goto tr646 case 61: goto tr137 case 92: - goto st101 + goto st100 } switch { case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr628 + goto tr647 } case ( m.data)[( m.p)] >= 9: - goto st261 + goto st272 } - goto st46 -tr627: -//line plugins/parsers/influx/machine.go.rl:19 + goto st45 +tr646: +//line plugins/parsers/influx/machine.go.rl:20 + + m.pb = m.p + + goto st99 + st99: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof99 + } + st_case_99: +//line plugins/parsers/influx/machine.go:13837 + switch ( m.data)[( m.p)] { + case 32: + goto tr132 + case 44: + goto tr132 + case 61: + goto tr137 + case 92: + goto st100 + } + switch { + case ( m.data)[( m.p)] < 12: + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 10 { + goto tr132 + } + case ( m.data)[( m.p)] > 13: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st410 + } + default: + goto tr132 + } + goto st45 +tr647: +//line plugins/parsers/influx/machine.go.rl:20 + + m.pb = m.p + + goto st410 + st410: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof410 + } + st_case_410: +//line plugins/parsers/influx/machine.go:13872 + switch ( m.data)[( m.p)] { + case 10: + goto tr470 + case 11: + goto tr648 + case 13: + goto tr472 + case 32: + goto tr469 + case 44: + goto tr132 + case 61: + goto tr137 + case 92: + goto st100 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st412 + } + case ( m.data)[( m.p)] >= 9: + goto tr469 + } + goto st45 +tr648: + ( m.cs) = 411 +//line plugins/parsers/influx/machine.go.rl:149 + + err = m.handler.SetTimestamp(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + + goto _again + st411: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof411 + } + st_case_411: +//line plugins/parsers/influx/machine.go:13916 + switch ( m.data)[( m.p)] { + case 10: + goto tr103 + case 11: + goto st411 + case 13: + goto st33 + case 32: + goto st277 + case 44: + goto tr47 + case 61: + goto tr137 + case 92: + goto st100 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto st277 + } + goto st45 +tr135: +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p @@ -13549,120 +13944,37 @@ tr627: goto _test_eof100 } st_case_100: -//line plugins/parsers/influx/machine.go:13553 - switch ( m.data)[( m.p)] { - case 32: - goto tr132 - case 44: - goto tr132 - case 61: - goto tr137 - case 92: +//line plugins/parsers/influx/machine.go:13948 + if ( m.data)[( m.p)] == 92 { goto st101 } switch { - case ( m.data)[( m.p)] < 12: - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 10 { - goto tr132 - } - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st399 - } - default: - goto tr132 - } - goto st46 -tr628: -//line plugins/parsers/influx/machine.go.rl:19 - - m.pb = m.p - - goto st399 - st399: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof399 - } - st_case_399: -//line plugins/parsers/influx/machine.go:13588 - switch ( m.data)[( m.p)] { - case 10: - goto tr451 - case 11: - goto tr629 - case 13: - goto tr453 - case 32: - goto tr450 - case 44: - goto tr132 - case 61: - goto tr137 - case 92: - goto st101 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st401 + case ( m.data)[( m.p)] > 10: + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr47 } case ( m.data)[( m.p)] >= 9: - goto tr450 - } - goto st46 -tr629: - ( m.cs) = 400 -//line plugins/parsers/influx/machine.go.rl:148 - - err = m.handler.SetTimestamp(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - - goto _again - st400: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof400 - } - st_case_400: -//line plugins/parsers/influx/machine.go:13632 - switch ( m.data)[( m.p)] { - case 10: - goto st262 - case 11: - goto st400 - case 13: - goto st34 - case 32: - goto st266 - case 44: goto tr47 - case 61: - goto tr137 - case 92: - goto st101 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st266 - } - goto st46 -tr135: -//line plugins/parsers/influx/machine.go.rl:19 - - m.pb = m.p - - goto st101 + goto st45 st101: +//line plugins/parsers/influx/machine.go.rl:240 + ( m.p)-- + if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof101 } st_case_101: -//line plugins/parsers/influx/machine.go:13664 - if ( m.data)[( m.p)] == 92 { - goto st102 +//line plugins/parsers/influx/machine.go:13969 + switch ( m.data)[( m.p)] { + case 32: + goto tr47 + case 44: + goto tr47 + case 61: + goto tr137 + case 92: + goto st100 } switch { case ( m.data)[( m.p)] > 10: @@ -13672,365 +13984,7 @@ tr135: case ( m.data)[( m.p)] >= 9: goto tr47 } - goto st46 - st102: -//line plugins/parsers/influx/machine.go.rl:234 - ( m.p)-- - - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof102 - } - st_case_102: -//line plugins/parsers/influx/machine.go:13685 - switch ( m.data)[( m.p)] { - case 32: - goto tr47 - case 44: - goto tr47 - case 61: - goto tr137 - case 92: - goto st101 - } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr47 - } - case ( m.data)[( m.p)] >= 9: - goto tr47 - } - goto st46 - st401: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof401 - } - st_case_401: - switch ( m.data)[( m.p)] { - case 10: - goto tr451 - case 11: - goto tr629 - case 13: - goto tr453 - case 32: - goto tr450 - case 44: - goto tr132 - case 61: - goto tr137 - case 92: - goto st101 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st402 - } - case ( m.data)[( m.p)] >= 9: - goto tr450 - } - goto st46 - st402: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof402 - } - st_case_402: - switch ( m.data)[( m.p)] { - case 10: - goto tr451 - case 11: - goto tr629 - case 13: - goto tr453 - case 32: - goto tr450 - case 44: - goto tr132 - case 61: - goto tr137 - case 92: - goto st101 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st403 - } - case ( m.data)[( m.p)] >= 9: - goto tr450 - } - goto st46 - st403: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof403 - } - st_case_403: - switch ( m.data)[( m.p)] { - case 10: - goto tr451 - case 11: - goto tr629 - case 13: - goto tr453 - case 32: - goto tr450 - case 44: - goto tr132 - case 61: - goto tr137 - case 92: - goto st101 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st404 - } - case ( m.data)[( m.p)] >= 9: - goto tr450 - } - goto st46 - st404: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof404 - } - st_case_404: - switch ( m.data)[( m.p)] { - case 10: - goto tr451 - case 11: - goto tr629 - case 13: - goto tr453 - case 32: - goto tr450 - case 44: - goto tr132 - case 61: - goto tr137 - case 92: - goto st101 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st405 - } - case ( m.data)[( m.p)] >= 9: - goto tr450 - } - goto st46 - st405: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof405 - } - st_case_405: - switch ( m.data)[( m.p)] { - case 10: - goto tr451 - case 11: - goto tr629 - case 13: - goto tr453 - case 32: - goto tr450 - case 44: - goto tr132 - case 61: - goto tr137 - case 92: - goto st101 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st406 - } - case ( m.data)[( m.p)] >= 9: - goto tr450 - } - goto st46 - st406: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof406 - } - st_case_406: - switch ( m.data)[( m.p)] { - case 10: - goto tr451 - case 11: - goto tr629 - case 13: - goto tr453 - case 32: - goto tr450 - case 44: - goto tr132 - case 61: - goto tr137 - case 92: - goto st101 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st407 - } - case ( m.data)[( m.p)] >= 9: - goto tr450 - } - goto st46 - st407: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof407 - } - st_case_407: - switch ( m.data)[( m.p)] { - case 10: - goto tr451 - case 11: - goto tr629 - case 13: - goto tr453 - case 32: - goto tr450 - case 44: - goto tr132 - case 61: - goto tr137 - case 92: - goto st101 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st408 - } - case ( m.data)[( m.p)] >= 9: - goto tr450 - } - goto st46 - st408: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof408 - } - st_case_408: - switch ( m.data)[( m.p)] { - case 10: - goto tr451 - case 11: - goto tr629 - case 13: - goto tr453 - case 32: - goto tr450 - case 44: - goto tr132 - case 61: - goto tr137 - case 92: - goto st101 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st409 - } - case ( m.data)[( m.p)] >= 9: - goto tr450 - } - goto st46 - st409: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof409 - } - st_case_409: - switch ( m.data)[( m.p)] { - case 10: - goto tr451 - case 11: - goto tr629 - case 13: - goto tr453 - case 32: - goto tr450 - case 44: - goto tr132 - case 61: - goto tr137 - case 92: - goto st101 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st410 - } - case ( m.data)[( m.p)] >= 9: - goto tr450 - } - goto st46 - st410: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof410 - } - st_case_410: - switch ( m.data)[( m.p)] { - case 10: - goto tr451 - case 11: - goto tr629 - case 13: - goto tr453 - case 32: - goto tr450 - case 44: - goto tr132 - case 61: - goto tr137 - case 92: - goto st101 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st411 - } - case ( m.data)[( m.p)] >= 9: - goto tr450 - } - goto st46 - st411: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof411 - } - st_case_411: - switch ( m.data)[( m.p)] { - case 10: - goto tr451 - case 11: - goto tr629 - case 13: - goto tr453 - case 32: - goto tr450 - case 44: - goto tr132 - case 61: - goto tr137 - case 92: - goto st101 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st412 - } - case ( m.data)[( m.p)] >= 9: - goto tr450 - } - goto st46 + goto st45 st412: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof412 @@ -14038,19 +13992,19 @@ tr135: st_case_412: switch ( m.data)[( m.p)] { case 10: - goto tr451 + goto tr470 case 11: - goto tr629 + goto tr648 case 13: - goto tr453 + goto tr472 case 32: - goto tr450 + goto tr469 case 44: goto tr132 case 61: goto tr137 case 92: - goto st101 + goto st100 } switch { case ( m.data)[( m.p)] > 12: @@ -14058,9 +14012,9 @@ tr135: goto st413 } case ( m.data)[( m.p)] >= 9: - goto tr450 + goto tr469 } - goto st46 + goto st45 st413: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof413 @@ -14068,19 +14022,19 @@ tr135: st_case_413: switch ( m.data)[( m.p)] { case 10: - goto tr451 + goto tr470 case 11: - goto tr629 + goto tr648 case 13: - goto tr453 + goto tr472 case 32: - goto tr450 + goto tr469 case 44: goto tr132 case 61: goto tr137 case 92: - goto st101 + goto st100 } switch { case ( m.data)[( m.p)] > 12: @@ -14088,9 +14042,9 @@ tr135: goto st414 } case ( m.data)[( m.p)] >= 9: - goto tr450 + goto tr469 } - goto st46 + goto st45 st414: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof414 @@ -14098,19 +14052,19 @@ tr135: st_case_414: switch ( m.data)[( m.p)] { case 10: - goto tr451 + goto tr470 case 11: - goto tr629 + goto tr648 case 13: - goto tr453 + goto tr472 case 32: - goto tr450 + goto tr469 case 44: goto tr132 case 61: goto tr137 case 92: - goto st101 + goto st100 } switch { case ( m.data)[( m.p)] > 12: @@ -14118,9 +14072,9 @@ tr135: goto st415 } case ( m.data)[( m.p)] >= 9: - goto tr450 + goto tr469 } - goto st46 + goto st45 st415: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof415 @@ -14128,19 +14082,19 @@ tr135: st_case_415: switch ( m.data)[( m.p)] { case 10: - goto tr451 + goto tr470 case 11: - goto tr629 + goto tr648 case 13: - goto tr453 + goto tr472 case 32: - goto tr450 + goto tr469 case 44: goto tr132 case 61: goto tr137 case 92: - goto st101 + goto st100 } switch { case ( m.data)[( m.p)] > 12: @@ -14148,9 +14102,9 @@ tr135: goto st416 } case ( m.data)[( m.p)] >= 9: - goto tr450 + goto tr469 } - goto st46 + goto st45 st416: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof416 @@ -14158,19 +14112,19 @@ tr135: st_case_416: switch ( m.data)[( m.p)] { case 10: - goto tr451 + goto tr470 case 11: - goto tr629 + goto tr648 case 13: - goto tr453 + goto tr472 case 32: - goto tr450 + goto tr469 case 44: goto tr132 case 61: goto tr137 case 92: - goto st101 + goto st100 } switch { case ( m.data)[( m.p)] > 12: @@ -14178,9 +14132,9 @@ tr135: goto st417 } case ( m.data)[( m.p)] >= 9: - goto tr450 + goto tr469 } - goto st46 + goto st45 st417: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof417 @@ -14188,19 +14142,19 @@ tr135: st_case_417: switch ( m.data)[( m.p)] { case 10: - goto tr451 + goto tr470 case 11: - goto tr629 + goto tr648 case 13: - goto tr453 + goto tr472 case 32: - goto tr450 + goto tr469 case 44: goto tr132 case 61: goto tr137 case 92: - goto st101 + goto st100 } switch { case ( m.data)[( m.p)] > 12: @@ -14208,9 +14162,9 @@ tr135: goto st418 } case ( m.data)[( m.p)] >= 9: - goto tr450 + goto tr469 } - goto st46 + goto st45 st418: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof418 @@ -14218,49 +14172,379 @@ tr135: st_case_418: switch ( m.data)[( m.p)] { case 10: - goto tr451 + goto tr470 case 11: - goto tr629 + goto tr648 case 13: - goto tr453 + goto tr472 case 32: - goto tr450 + goto tr469 case 44: goto tr132 case 61: goto tr137 case 92: - goto st101 + goto st100 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st419 + } + case ( m.data)[( m.p)] >= 9: + goto tr469 + } + goto st45 + st419: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof419 + } + st_case_419: + switch ( m.data)[( m.p)] { + case 10: + goto tr470 + case 11: + goto tr648 + case 13: + goto tr472 + case 32: + goto tr469 + case 44: + goto tr132 + case 61: + goto tr137 + case 92: + goto st100 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st420 + } + case ( m.data)[( m.p)] >= 9: + goto tr469 + } + goto st45 + st420: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof420 + } + st_case_420: + switch ( m.data)[( m.p)] { + case 10: + goto tr470 + case 11: + goto tr648 + case 13: + goto tr472 + case 32: + goto tr469 + case 44: + goto tr132 + case 61: + goto tr137 + case 92: + goto st100 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st421 + } + case ( m.data)[( m.p)] >= 9: + goto tr469 + } + goto st45 + st421: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof421 + } + st_case_421: + switch ( m.data)[( m.p)] { + case 10: + goto tr470 + case 11: + goto tr648 + case 13: + goto tr472 + case 32: + goto tr469 + case 44: + goto tr132 + case 61: + goto tr137 + case 92: + goto st100 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st422 + } + case ( m.data)[( m.p)] >= 9: + goto tr469 + } + goto st45 + st422: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof422 + } + st_case_422: + switch ( m.data)[( m.p)] { + case 10: + goto tr470 + case 11: + goto tr648 + case 13: + goto tr472 + case 32: + goto tr469 + case 44: + goto tr132 + case 61: + goto tr137 + case 92: + goto st100 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st423 + } + case ( m.data)[( m.p)] >= 9: + goto tr469 + } + goto st45 + st423: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof423 + } + st_case_423: + switch ( m.data)[( m.p)] { + case 10: + goto tr470 + case 11: + goto tr648 + case 13: + goto tr472 + case 32: + goto tr469 + case 44: + goto tr132 + case 61: + goto tr137 + case 92: + goto st100 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st424 + } + case ( m.data)[( m.p)] >= 9: + goto tr469 + } + goto st45 + st424: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof424 + } + st_case_424: + switch ( m.data)[( m.p)] { + case 10: + goto tr470 + case 11: + goto tr648 + case 13: + goto tr472 + case 32: + goto tr469 + case 44: + goto tr132 + case 61: + goto tr137 + case 92: + goto st100 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st425 + } + case ( m.data)[( m.p)] >= 9: + goto tr469 + } + goto st45 + st425: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof425 + } + st_case_425: + switch ( m.data)[( m.p)] { + case 10: + goto tr470 + case 11: + goto tr648 + case 13: + goto tr472 + case 32: + goto tr469 + case 44: + goto tr132 + case 61: + goto tr137 + case 92: + goto st100 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st426 + } + case ( m.data)[( m.p)] >= 9: + goto tr469 + } + goto st45 + st426: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof426 + } + st_case_426: + switch ( m.data)[( m.p)] { + case 10: + goto tr470 + case 11: + goto tr648 + case 13: + goto tr472 + case 32: + goto tr469 + case 44: + goto tr132 + case 61: + goto tr137 + case 92: + goto st100 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st427 + } + case ( m.data)[( m.p)] >= 9: + goto tr469 + } + goto st45 + st427: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof427 + } + st_case_427: + switch ( m.data)[( m.p)] { + case 10: + goto tr470 + case 11: + goto tr648 + case 13: + goto tr472 + case 32: + goto tr469 + case 44: + goto tr132 + case 61: + goto tr137 + case 92: + goto st100 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st428 + } + case ( m.data)[( m.p)] >= 9: + goto tr469 + } + goto st45 + st428: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof428 + } + st_case_428: + switch ( m.data)[( m.p)] { + case 10: + goto tr470 + case 11: + goto tr648 + case 13: + goto tr472 + case 32: + goto tr469 + case 44: + goto tr132 + case 61: + goto tr137 + case 92: + goto st100 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st429 + } + case ( m.data)[( m.p)] >= 9: + goto tr469 + } + goto st45 + st429: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof429 + } + st_case_429: + switch ( m.data)[( m.p)] { + case 10: + goto tr470 + case 11: + goto tr648 + case 13: + goto tr472 + case 32: + goto tr469 + case 44: + goto tr132 + case 61: + goto tr137 + case 92: + goto st100 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr450 + goto tr469 } - goto st46 + goto st45 tr262: -//line plugins/parsers/influx/machine.go.rl:86 +//line plugins/parsers/influx/machine.go.rl:87 - key = m.text() + m.key = m.text() -//line plugins/parsers/influx/machine.go.rl:99 +//line plugins/parsers/influx/machine.go.rl:100 - key = m.text() + m.key = m.text() - goto st103 - st103: + goto st102 + st102: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof103 + goto _test_eof102 } - st_case_103: -//line plugins/parsers/influx/machine.go:14255 + st_case_102: +//line plugins/parsers/influx/machine.go:14539 switch ( m.data)[( m.p)] { case 9: goto st6 case 10: - goto st7 + goto tr29 case 12: goto tr47 case 13: - goto st8 + goto st7 case 32: goto st6 case 34: @@ -14291,435 +14575,588 @@ tr262: } goto tr228 tr266: - ( m.cs) = 419 -//line plugins/parsers/influx/machine.go.rl:19 + ( m.cs) = 430 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p -//line plugins/parsers/influx/machine.go.rl:139 +//line plugins/parsers/influx/machine.go.rl:140 - err = m.handler.AddString(key, m.text()) + err = m.handler.AddString(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again - st419: + st430: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof419 + goto _test_eof430 } - st_case_419: -//line plugins/parsers/influx/machine.go:14316 + st_case_430: +//line plugins/parsers/influx/machine.go:14600 switch ( m.data)[( m.p)] { case 9: - goto tr649 + goto tr668 case 10: - goto tr650 + goto tr669 case 11: - goto tr651 + goto tr670 case 12: - goto tr547 + goto tr566 case 13: - goto tr652 + goto tr671 case 32: - goto tr649 + goto tr668 case 34: goto tr151 case 44: - goto tr653 + goto tr672 case 61: goto tr23 case 92: goto tr153 } goto tr148 -tr841: - ( m.cs) = 420 -//line plugins/parsers/influx/machine.go.rl:77 +tr863: + ( m.cs) = 431 +//line plugins/parsers/influx/machine.go.rl:78 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again -tr682: - ( m.cs) = 420 -//line plugins/parsers/influx/machine.go.rl:90 +tr701: + ( m.cs) = 431 +//line plugins/parsers/influx/machine.go.rl:91 - err = m.handler.AddTag(key, m.text()) + err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again -tr649: - ( m.cs) = 420 -//line plugins/parsers/influx/machine.go.rl:90 +tr668: + ( m.cs) = 431 +//line plugins/parsers/influx/machine.go.rl:91 - err = m.handler.AddTag(key, m.text()) + err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:19 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p goto _again -tr837: - ( m.cs) = 420 -//line plugins/parsers/influx/machine.go.rl:77 +tr859: + ( m.cs) = 431 +//line plugins/parsers/influx/machine.go.rl:78 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:121 +//line plugins/parsers/influx/machine.go.rl:122 - err = m.handler.AddFloat(key, m.text()) + err = m.handler.AddFloat(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again -tr710: - ( m.cs) = 420 -//line plugins/parsers/influx/machine.go.rl:90 +tr729: + ( m.cs) = 431 +//line plugins/parsers/influx/machine.go.rl:91 - err = m.handler.AddTag(key, m.text()) + err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:121 +//line plugins/parsers/influx/machine.go.rl:122 - err = m.handler.AddFloat(key, m.text()) + err = m.handler.AddFloat(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again -tr721: - ( m.cs) = 420 -//line plugins/parsers/influx/machine.go.rl:90 +tr740: + ( m.cs) = 431 +//line plugins/parsers/influx/machine.go.rl:91 - err = m.handler.AddTag(key, m.text()) + err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:103 +//line plugins/parsers/influx/machine.go.rl:104 - err = m.handler.AddInt(key, m.text()) + err = m.handler.AddInt(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again -tr728: - ( m.cs) = 420 -//line plugins/parsers/influx/machine.go.rl:90 +tr747: + ( m.cs) = 431 +//line plugins/parsers/influx/machine.go.rl:91 - err = m.handler.AddTag(key, m.text()) + err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:112 +//line plugins/parsers/influx/machine.go.rl:113 - err = m.handler.AddUint(key, m.text()) + err = m.handler.AddUint(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again -tr735: - ( m.cs) = 420 -//line plugins/parsers/influx/machine.go.rl:90 +tr754: + ( m.cs) = 431 +//line plugins/parsers/influx/machine.go.rl:91 - err = m.handler.AddTag(key, m.text()) + err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:130 +//line plugins/parsers/influx/machine.go.rl:131 - err = m.handler.AddBool(key, m.text()) + err = m.handler.AddBool(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again -tr869: - ( m.cs) = 420 -//line plugins/parsers/influx/machine.go.rl:77 +tr891: + ( m.cs) = 431 +//line plugins/parsers/influx/machine.go.rl:78 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:103 +//line plugins/parsers/influx/machine.go.rl:104 - err = m.handler.AddInt(key, m.text()) + err = m.handler.AddInt(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again -tr873: - ( m.cs) = 420 -//line plugins/parsers/influx/machine.go.rl:77 +tr895: + ( m.cs) = 431 +//line plugins/parsers/influx/machine.go.rl:78 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:112 +//line plugins/parsers/influx/machine.go.rl:113 - err = m.handler.AddUint(key, m.text()) + err = m.handler.AddUint(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again -tr877: - ( m.cs) = 420 -//line plugins/parsers/influx/machine.go.rl:77 +tr899: + ( m.cs) = 431 +//line plugins/parsers/influx/machine.go.rl:78 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:130 +//line plugins/parsers/influx/machine.go.rl:131 - err = m.handler.AddBool(key, m.text()) + err = m.handler.AddBool(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again - st420: + st431: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof420 + goto _test_eof431 } - st_case_420: -//line plugins/parsers/influx/machine.go:14572 + st_case_431: +//line plugins/parsers/influx/machine.go:14856 switch ( m.data)[( m.p)] { case 9: - goto st420 + goto st431 case 10: - goto st317 + goto tr275 case 11: - goto tr655 + goto tr674 case 12: - goto st290 + goto st301 case 13: - goto st104 + goto st103 case 32: - goto st420 + goto st431 case 34: goto tr97 case 44: goto st6 case 45: - goto tr656 + goto tr675 case 61: goto st6 case 92: goto tr163 } if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr657 + goto tr676 } goto tr160 -tr655: -//line plugins/parsers/influx/machine.go.rl:19 +tr674: +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p - goto st421 - st421: + goto st432 + st432: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof421 + goto _test_eof432 } - st_case_421: -//line plugins/parsers/influx/machine.go:14612 + st_case_432: +//line plugins/parsers/influx/machine.go:14896 switch ( m.data)[( m.p)] { case 9: - goto st420 + goto st431 case 10: - goto st317 + goto tr275 case 11: - goto tr655 + goto tr674 case 12: - goto st290 + goto st301 case 13: - goto st104 + goto st103 case 32: - goto st420 + goto st431 case 34: goto tr97 case 44: goto st6 case 45: - goto tr656 + goto tr675 case 61: goto tr165 case 92: goto tr163 } if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr657 + goto tr676 } goto tr160 -tr652: -//line plugins/parsers/influx/machine.go.rl:19 +tr671: +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p - goto st104 -tr661: - ( m.cs) = 104 -//line plugins/parsers/influx/machine.go.rl:148 + goto st103 +tr680: + ( m.cs) = 103 +//line plugins/parsers/influx/machine.go.rl:149 err = m.handler.SetTimestamp(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again -tr517: - ( m.cs) = 104 -//line plugins/parsers/influx/machine.go.rl:121 +tr536: + ( m.cs) = 103 +//line plugins/parsers/influx/machine.go.rl:122 - err = m.handler.AddFloat(key, m.text()) + err = m.handler.AddFloat(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again -tr725: - ( m.cs) = 104 -//line plugins/parsers/influx/machine.go.rl:103 +tr744: + ( m.cs) = 103 +//line plugins/parsers/influx/machine.go.rl:104 - err = m.handler.AddInt(key, m.text()) + err = m.handler.AddInt(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again -tr732: - ( m.cs) = 104 -//line plugins/parsers/influx/machine.go.rl:112 +tr751: + ( m.cs) = 103 +//line plugins/parsers/influx/machine.go.rl:113 - err = m.handler.AddUint(key, m.text()) + err = m.handler.AddUint(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again -tr739: - ( m.cs) = 104 -//line plugins/parsers/influx/machine.go.rl:130 +tr758: + ( m.cs) = 103 +//line plugins/parsers/influx/machine.go.rl:131 - err = m.handler.AddBool(key, m.text()) + err = m.handler.AddBool(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again + st103: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof103 + } + st_case_103: +//line plugins/parsers/influx/machine.go:15001 + if ( m.data)[( m.p)] == 10 { + goto tr275 + } + goto tr8 +tr675: +//line plugins/parsers/influx/machine.go.rl:20 + + m.pb = m.p + + goto st104 st104: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof104 } st_case_104: -//line plugins/parsers/influx/machine.go:14717 - if ( m.data)[( m.p)] == 10 { - goto st317 +//line plugins/parsers/influx/machine.go:15017 + switch ( m.data)[( m.p)] { + case 9: + goto st6 + case 10: + goto tr29 + case 12: + goto tr105 + case 13: + goto st7 + case 32: + goto st6 + case 34: + goto tr100 + case 44: + goto st6 + case 61: + goto tr165 + case 92: + goto st105 } - goto tr8 -tr656: -//line plugins/parsers/influx/machine.go.rl:19 + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st433 + } + goto st50 +tr676: +//line plugins/parsers/influx/machine.go.rl:20 + + m.pb = m.p + + goto st433 + st433: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof433 + } + st_case_433: +//line plugins/parsers/influx/machine.go:15053 + switch ( m.data)[( m.p)] { + case 9: + goto tr677 + case 10: + goto tr678 + case 11: + goto tr679 + case 12: + goto tr469 + case 13: + goto tr680 + case 32: + goto tr677 + case 34: + goto tr100 + case 44: + goto st6 + case 61: + goto tr165 + case 92: + goto st105 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st436 + } + goto st50 +tr677: + ( m.cs) = 434 +//line plugins/parsers/influx/machine.go.rl:149 + + err = m.handler.SetTimestamp(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + + goto _again + st434: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof434 + } + st_case_434: +//line plugins/parsers/influx/machine.go:15098 + switch ( m.data)[( m.p)] { + case 10: + goto tr275 + case 12: + goto st277 + case 13: + goto st103 + case 32: + goto st434 + case 34: + goto tr31 + case 92: + goto st75 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { + goto st434 + } + goto st6 +tr679: + ( m.cs) = 435 +//line plugins/parsers/influx/machine.go.rl:149 + + err = m.handler.SetTimestamp(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + + goto _again + st435: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof435 + } + st_case_435: +//line plugins/parsers/influx/machine.go:15135 + switch ( m.data)[( m.p)] { + case 9: + goto st434 + case 10: + goto tr275 + case 11: + goto st435 + case 12: + goto st277 + case 13: + goto st103 + case 32: + goto st434 + case 34: + goto tr100 + case 44: + goto st6 + case 61: + goto tr165 + case 92: + goto st105 + } + goto st50 +tr163: +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p @@ -14729,165 +15166,12 @@ tr656: goto _test_eof105 } st_case_105: -//line plugins/parsers/influx/machine.go:14733 - switch ( m.data)[( m.p)] { - case 9: - goto st6 - case 10: - goto st7 - case 12: - goto tr105 - case 13: - goto st8 - case 32: - goto st6 - case 34: - goto tr100 - case 44: - goto st6 - case 61: - goto tr165 - case 92: - goto st106 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st422 - } - goto st51 -tr657: -//line plugins/parsers/influx/machine.go.rl:19 - - m.pb = m.p - - goto st422 - st422: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof422 - } - st_case_422: -//line plugins/parsers/influx/machine.go:14769 - switch ( m.data)[( m.p)] { - case 9: - goto tr658 - case 10: - goto tr659 - case 11: - goto tr660 - case 12: - goto tr450 - case 13: - goto tr661 - case 32: - goto tr658 - case 34: - goto tr100 - case 44: - goto st6 - case 61: - goto tr165 - case 92: - goto st106 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st425 - } - goto st51 -tr658: - ( m.cs) = 423 -//line plugins/parsers/influx/machine.go.rl:148 - - err = m.handler.SetTimestamp(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - - goto _again - st423: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof423 - } - st_case_423: -//line plugins/parsers/influx/machine.go:14814 - switch ( m.data)[( m.p)] { - case 10: - goto st317 - case 12: - goto st266 - case 13: - goto st104 - case 32: - goto st423 - case 34: - goto tr31 - case 92: - goto st76 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { - goto st423 - } - goto st6 -tr660: - ( m.cs) = 424 -//line plugins/parsers/influx/machine.go.rl:148 - - err = m.handler.SetTimestamp(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - - goto _again - st424: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof424 - } - st_case_424: -//line plugins/parsers/influx/machine.go:14851 - switch ( m.data)[( m.p)] { - case 9: - goto st423 - case 10: - goto st317 - case 11: - goto st424 - case 12: - goto st266 - case 13: - goto st104 - case 32: - goto st423 - case 34: - goto tr100 - case 44: - goto st6 - case 61: - goto tr165 - case 92: - goto st106 - } - goto st51 -tr163: -//line plugins/parsers/influx/machine.go.rl:19 - - m.pb = m.p - - goto st106 - st106: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof106 - } - st_case_106: -//line plugins/parsers/influx/machine.go:14886 +//line plugins/parsers/influx/machine.go:15170 switch ( m.data)[( m.p)] { case 34: - goto st51 + goto st50 case 92: - goto st51 + goto st50 } switch { case ( m.data)[( m.p)] > 10: @@ -14898,347 +15182,6 @@ tr163: goto tr8 } goto st3 - st425: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof425 - } - st_case_425: - switch ( m.data)[( m.p)] { - case 9: - goto tr658 - case 10: - goto tr659 - case 11: - goto tr660 - case 12: - goto tr450 - case 13: - goto tr661 - case 32: - goto tr658 - case 34: - goto tr100 - case 44: - goto st6 - case 61: - goto tr165 - case 92: - goto st106 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st426 - } - goto st51 - st426: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof426 - } - st_case_426: - switch ( m.data)[( m.p)] { - case 9: - goto tr658 - case 10: - goto tr659 - case 11: - goto tr660 - case 12: - goto tr450 - case 13: - goto tr661 - case 32: - goto tr658 - case 34: - goto tr100 - case 44: - goto st6 - case 61: - goto tr165 - case 92: - goto st106 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st427 - } - goto st51 - st427: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof427 - } - st_case_427: - switch ( m.data)[( m.p)] { - case 9: - goto tr658 - case 10: - goto tr659 - case 11: - goto tr660 - case 12: - goto tr450 - case 13: - goto tr661 - case 32: - goto tr658 - case 34: - goto tr100 - case 44: - goto st6 - case 61: - goto tr165 - case 92: - goto st106 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st428 - } - goto st51 - st428: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof428 - } - st_case_428: - switch ( m.data)[( m.p)] { - case 9: - goto tr658 - case 10: - goto tr659 - case 11: - goto tr660 - case 12: - goto tr450 - case 13: - goto tr661 - case 32: - goto tr658 - case 34: - goto tr100 - case 44: - goto st6 - case 61: - goto tr165 - case 92: - goto st106 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st429 - } - goto st51 - st429: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof429 - } - st_case_429: - switch ( m.data)[( m.p)] { - case 9: - goto tr658 - case 10: - goto tr659 - case 11: - goto tr660 - case 12: - goto tr450 - case 13: - goto tr661 - case 32: - goto tr658 - case 34: - goto tr100 - case 44: - goto st6 - case 61: - goto tr165 - case 92: - goto st106 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st430 - } - goto st51 - st430: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof430 - } - st_case_430: - switch ( m.data)[( m.p)] { - case 9: - goto tr658 - case 10: - goto tr659 - case 11: - goto tr660 - case 12: - goto tr450 - case 13: - goto tr661 - case 32: - goto tr658 - case 34: - goto tr100 - case 44: - goto st6 - case 61: - goto tr165 - case 92: - goto st106 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st431 - } - goto st51 - st431: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof431 - } - st_case_431: - switch ( m.data)[( m.p)] { - case 9: - goto tr658 - case 10: - goto tr659 - case 11: - goto tr660 - case 12: - goto tr450 - case 13: - goto tr661 - case 32: - goto tr658 - case 34: - goto tr100 - case 44: - goto st6 - case 61: - goto tr165 - case 92: - goto st106 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st432 - } - goto st51 - st432: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof432 - } - st_case_432: - switch ( m.data)[( m.p)] { - case 9: - goto tr658 - case 10: - goto tr659 - case 11: - goto tr660 - case 12: - goto tr450 - case 13: - goto tr661 - case 32: - goto tr658 - case 34: - goto tr100 - case 44: - goto st6 - case 61: - goto tr165 - case 92: - goto st106 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st433 - } - goto st51 - st433: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof433 - } - st_case_433: - switch ( m.data)[( m.p)] { - case 9: - goto tr658 - case 10: - goto tr659 - case 11: - goto tr660 - case 12: - goto tr450 - case 13: - goto tr661 - case 32: - goto tr658 - case 34: - goto tr100 - case 44: - goto st6 - case 61: - goto tr165 - case 92: - goto st106 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st434 - } - goto st51 - st434: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof434 - } - st_case_434: - switch ( m.data)[( m.p)] { - case 9: - goto tr658 - case 10: - goto tr659 - case 11: - goto tr660 - case 12: - goto tr450 - case 13: - goto tr661 - case 32: - goto tr658 - case 34: - goto tr100 - case 44: - goto st6 - case 61: - goto tr165 - case 92: - goto st106 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st435 - } - goto st51 - st435: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof435 - } - st_case_435: - switch ( m.data)[( m.p)] { - case 9: - goto tr658 - case 10: - goto tr659 - case 11: - goto tr660 - case 12: - goto tr450 - case 13: - goto tr661 - case 32: - goto tr658 - case 34: - goto tr100 - case 44: - goto st6 - case 61: - goto tr165 - case 92: - goto st106 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st436 - } - goto st51 st436: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof436 @@ -15246,17 +15189,17 @@ tr163: st_case_436: switch ( m.data)[( m.p)] { case 9: - goto tr658 + goto tr677 case 10: - goto tr659 + goto tr678 case 11: - goto tr660 + goto tr679 case 12: - goto tr450 + goto tr469 case 13: - goto tr661 + goto tr680 case 32: - goto tr658 + goto tr677 case 34: goto tr100 case 44: @@ -15264,12 +15207,12 @@ tr163: case 61: goto tr165 case 92: - goto st106 + goto st105 } if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st437 } - goto st51 + goto st50 st437: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof437 @@ -15277,17 +15220,17 @@ tr163: st_case_437: switch ( m.data)[( m.p)] { case 9: - goto tr658 + goto tr677 case 10: - goto tr659 + goto tr678 case 11: - goto tr660 + goto tr679 case 12: - goto tr450 + goto tr469 case 13: - goto tr661 + goto tr680 case 32: - goto tr658 + goto tr677 case 34: goto tr100 case 44: @@ -15295,12 +15238,12 @@ tr163: case 61: goto tr165 case 92: - goto st106 + goto st105 } if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st438 } - goto st51 + goto st50 st438: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof438 @@ -15308,17 +15251,17 @@ tr163: st_case_438: switch ( m.data)[( m.p)] { case 9: - goto tr658 + goto tr677 case 10: - goto tr659 + goto tr678 case 11: - goto tr660 + goto tr679 case 12: - goto tr450 + goto tr469 case 13: - goto tr661 + goto tr680 case 32: - goto tr658 + goto tr677 case 34: goto tr100 case 44: @@ -15326,12 +15269,12 @@ tr163: case 61: goto tr165 case 92: - goto st106 + goto st105 } if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st439 } - goto st51 + goto st50 st439: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof439 @@ -15339,17 +15282,17 @@ tr163: st_case_439: switch ( m.data)[( m.p)] { case 9: - goto tr658 + goto tr677 case 10: - goto tr659 + goto tr678 case 11: - goto tr660 + goto tr679 case 12: - goto tr450 + goto tr469 case 13: - goto tr661 + goto tr680 case 32: - goto tr658 + goto tr677 case 34: goto tr100 case 44: @@ -15357,12 +15300,12 @@ tr163: case 61: goto tr165 case 92: - goto st106 + goto st105 } if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st440 } - goto st51 + goto st50 st440: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof440 @@ -15370,17 +15313,17 @@ tr163: st_case_440: switch ( m.data)[( m.p)] { case 9: - goto tr658 + goto tr677 case 10: - goto tr659 + goto tr678 case 11: - goto tr660 + goto tr679 case 12: - goto tr450 + goto tr469 case 13: - goto tr661 + goto tr680 case 32: - goto tr658 + goto tr677 case 34: goto tr100 case 44: @@ -15388,12 +15331,12 @@ tr163: case 61: goto tr165 case 92: - goto st106 + goto st105 } if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st441 } - goto st51 + goto st50 st441: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof441 @@ -15401,17 +15344,17 @@ tr163: st_case_441: switch ( m.data)[( m.p)] { case 9: - goto tr658 + goto tr677 case 10: - goto tr659 + goto tr678 case 11: - goto tr660 + goto tr679 case 12: - goto tr450 + goto tr469 case 13: - goto tr661 + goto tr680 case 32: - goto tr658 + goto tr677 case 34: goto tr100 case 44: @@ -15419,12 +15362,12 @@ tr163: case 61: goto tr165 case 92: - goto st106 + goto st105 } if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st442 } - goto st51 + goto st50 st442: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof442 @@ -15432,17 +15375,17 @@ tr163: st_case_442: switch ( m.data)[( m.p)] { case 9: - goto tr658 + goto tr677 case 10: - goto tr659 + goto tr678 case 11: - goto tr660 + goto tr679 case 12: - goto tr450 + goto tr469 case 13: - goto tr661 + goto tr680 case 32: - goto tr658 + goto tr677 case 34: goto tr100 case 44: @@ -15450,484 +15393,198 @@ tr163: case 61: goto tr165 case 92: - goto st106 + goto st105 } - goto st51 -tr651: - ( m.cs) = 443 -//line plugins/parsers/influx/machine.go.rl:90 - - err = m.handler.AddTag(key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:19 - - m.pb = m.p - - goto _again -tr711: - ( m.cs) = 443 -//line plugins/parsers/influx/machine.go.rl:90 - - err = m.handler.AddTag(key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:121 - - err = m.handler.AddFloat(key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - - goto _again -tr723: - ( m.cs) = 443 -//line plugins/parsers/influx/machine.go.rl:90 - - err = m.handler.AddTag(key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:103 - - err = m.handler.AddInt(key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - - goto _again -tr730: - ( m.cs) = 443 -//line plugins/parsers/influx/machine.go.rl:90 - - err = m.handler.AddTag(key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:112 - - err = m.handler.AddUint(key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - - goto _again -tr737: - ( m.cs) = 443 -//line plugins/parsers/influx/machine.go.rl:90 - - err = m.handler.AddTag(key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:130 - - err = m.handler.AddBool(key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - - goto _again + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st443 + } + goto st50 st443: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof443 } st_case_443: -//line plugins/parsers/influx/machine.go:15571 switch ( m.data)[( m.p)] { case 9: - goto tr682 + goto tr677 case 10: - goto st317 + goto tr678 case 11: - goto tr683 + goto tr679 case 12: - goto tr547 + goto tr469 case 13: - goto st104 + goto tr680 case 32: - goto tr682 + goto tr677 case 34: - goto tr204 + goto tr100 case 44: - goto tr158 - case 45: - goto tr684 - case 61: goto st6 + case 61: + goto tr165 case 92: - goto tr205 + goto st105 } if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr685 + goto st444 } - goto tr202 -tr683: - ( m.cs) = 444 -//line plugins/parsers/influx/machine.go.rl:90 - - err = m.handler.AddTag(key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:19 - - m.pb = m.p - - goto _again + goto st50 st444: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof444 } st_case_444: -//line plugins/parsers/influx/machine.go:15622 switch ( m.data)[( m.p)] { case 9: - goto tr682 + goto tr677 case 10: - goto st317 + goto tr678 case 11: - goto tr683 + goto tr679 case 12: - goto tr547 + goto tr469 case 13: - goto st104 + goto tr680 case 32: - goto tr682 + goto tr677 case 34: - goto tr204 + goto tr100 case 44: - goto tr158 - case 45: - goto tr684 + goto st6 case 61: goto tr165 case 92: - goto tr205 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr685 - } - goto tr202 -tr684: -//line plugins/parsers/influx/machine.go.rl:19 - - m.pb = m.p - - goto st107 - st107: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof107 - } - st_case_107: -//line plugins/parsers/influx/machine.go:15662 - switch ( m.data)[( m.p)] { - case 9: - goto tr155 - case 10: - goto st7 - case 11: - goto tr207 - case 12: - goto tr60 - case 13: - goto st8 - case 32: - goto tr155 - case 34: - goto tr208 - case 44: - goto tr158 - case 61: - goto tr165 - case 92: - goto st69 + goto st105 } if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st445 } - goto st67 -tr685: -//line plugins/parsers/influx/machine.go.rl:19 - - m.pb = m.p - - goto st445 + goto st50 st445: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof445 } st_case_445: -//line plugins/parsers/influx/machine.go:15700 switch ( m.data)[( m.p)] { case 9: - goto tr686 + goto tr677 case 10: - goto tr659 + goto tr678 case 11: - goto tr687 + goto tr679 case 12: - goto tr553 + goto tr469 case 13: - goto tr661 + goto tr680 case 32: - goto tr686 + goto tr677 case 34: - goto tr208 + goto tr100 case 44: - goto tr158 + goto st6 case 61: goto tr165 case 92: - goto st69 + goto st105 } if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st449 + goto st446 } - goto st67 -tr848: - ( m.cs) = 446 -//line plugins/parsers/influx/machine.go.rl:77 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - - goto _again -tr691: - ( m.cs) = 446 -//line plugins/parsers/influx/machine.go.rl:90 - - err = m.handler.AddTag(key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - - goto _again -tr845: - ( m.cs) = 446 -//line plugins/parsers/influx/machine.go.rl:77 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:148 - - err = m.handler.SetTimestamp(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - - goto _again -tr686: - ( m.cs) = 446 -//line plugins/parsers/influx/machine.go.rl:90 - - err = m.handler.AddTag(key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:148 - - err = m.handler.SetTimestamp(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - - goto _again + goto st50 st446: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof446 } st_case_446: -//line plugins/parsers/influx/machine.go:15804 switch ( m.data)[( m.p)] { case 9: - goto st446 + goto tr677 case 10: - goto st317 + goto tr678 case 11: - goto tr690 + goto tr679 case 12: - goto st294 + goto tr469 case 13: - goto st104 + goto tr680 case 32: - goto st446 + goto tr677 case 34: - goto tr97 + goto tr100 case 44: goto st6 case 61: - goto st6 + goto tr165 case 92: - goto tr163 + goto st105 } - goto tr160 -tr690: -//line plugins/parsers/influx/machine.go.rl:19 - - m.pb = m.p - - goto st447 + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st447 + } + goto st50 st447: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof447 } st_case_447: -//line plugins/parsers/influx/machine.go:15839 switch ( m.data)[( m.p)] { case 9: - goto st446 + goto tr677 case 10: - goto st317 + goto tr678 case 11: - goto tr690 + goto tr679 case 12: - goto st294 + goto tr469 case 13: - goto st104 + goto tr680 case 32: - goto st446 + goto tr677 case 34: - goto tr97 + goto tr100 case 44: goto st6 case 61: goto tr165 case 92: - goto tr163 + goto st105 } - goto tr160 -tr692: - ( m.cs) = 448 -//line plugins/parsers/influx/machine.go.rl:90 - - err = m.handler.AddTag(key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:19 - - m.pb = m.p - - goto _again -tr687: - ( m.cs) = 448 -//line plugins/parsers/influx/machine.go.rl:90 - - err = m.handler.AddTag(key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:148 - - err = m.handler.SetTimestamp(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - - goto _again + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st448 + } + goto st50 st448: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof448 } st_case_448: -//line plugins/parsers/influx/machine.go:15908 switch ( m.data)[( m.p)] { case 9: - goto tr691 + goto tr677 case 10: - goto st317 + goto tr678 case 11: - goto tr692 + goto tr679 case 12: - goto tr556 + goto tr469 case 13: - goto st104 + goto tr680 case 32: - goto tr691 + goto tr677 case 34: - goto tr204 + goto tr100 case 44: - goto tr158 + goto st6 case 61: goto tr165 case 92: - goto tr205 + goto st105 } - goto tr202 + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st449 + } + goto st50 st449: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof449 @@ -15935,30 +15592,30 @@ tr687: st_case_449: switch ( m.data)[( m.p)] { case 9: - goto tr686 + goto tr677 case 10: - goto tr659 + goto tr678 case 11: - goto tr687 + goto tr679 case 12: - goto tr553 + goto tr469 case 13: - goto tr661 + goto tr680 case 32: - goto tr686 + goto tr677 case 34: - goto tr208 + goto tr100 case 44: - goto tr158 + goto st6 case 61: goto tr165 case 92: - goto st69 + goto st105 } if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st450 } - goto st67 + goto st50 st450: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof450 @@ -15966,30 +15623,30 @@ tr687: st_case_450: switch ( m.data)[( m.p)] { case 9: - goto tr686 + goto tr677 case 10: - goto tr659 + goto tr678 case 11: - goto tr687 + goto tr679 case 12: - goto tr553 + goto tr469 case 13: - goto tr661 + goto tr680 case 32: - goto tr686 + goto tr677 case 34: - goto tr208 + goto tr100 case 44: - goto tr158 + goto st6 case 61: goto tr165 case 92: - goto st69 + goto st105 } if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st451 } - goto st67 + goto st50 st451: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof451 @@ -15997,30 +15654,30 @@ tr687: st_case_451: switch ( m.data)[( m.p)] { case 9: - goto tr686 + goto tr677 case 10: - goto tr659 + goto tr678 case 11: - goto tr687 + goto tr679 case 12: - goto tr553 + goto tr469 case 13: - goto tr661 + goto tr680 case 32: - goto tr686 + goto tr677 case 34: - goto tr208 + goto tr100 case 44: - goto tr158 + goto st6 case 61: goto tr165 case 92: - goto st69 + goto st105 } if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st452 } - goto st67 + goto st50 st452: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof452 @@ -16028,30 +15685,30 @@ tr687: st_case_452: switch ( m.data)[( m.p)] { case 9: - goto tr686 + goto tr677 case 10: - goto tr659 + goto tr678 case 11: - goto tr687 + goto tr679 case 12: - goto tr553 + goto tr469 case 13: - goto tr661 + goto tr680 case 32: - goto tr686 + goto tr677 case 34: - goto tr208 + goto tr100 case 44: - goto tr158 + goto st6 case 61: goto tr165 case 92: - goto st69 + goto st105 } if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st453 } - goto st67 + goto st50 st453: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof453 @@ -16059,79 +15716,246 @@ tr687: st_case_453: switch ( m.data)[( m.p)] { case 9: - goto tr686 + goto tr677 case 10: - goto tr659 + goto tr678 case 11: - goto tr687 + goto tr679 case 12: - goto tr553 + goto tr469 case 13: - goto tr661 + goto tr680 case 32: - goto tr686 + goto tr677 case 34: - goto tr208 + goto tr100 case 44: - goto tr158 + goto st6 case 61: goto tr165 case 92: - goto st69 + goto st105 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st454 - } - goto st67 + goto st50 +tr670: + ( m.cs) = 454 +//line plugins/parsers/influx/machine.go.rl:91 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:20 + + m.pb = m.p + + goto _again +tr730: + ( m.cs) = 454 +//line plugins/parsers/influx/machine.go.rl:91 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:122 + + err = m.handler.AddFloat(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + + goto _again +tr742: + ( m.cs) = 454 +//line plugins/parsers/influx/machine.go.rl:91 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:104 + + err = m.handler.AddInt(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + + goto _again +tr749: + ( m.cs) = 454 +//line plugins/parsers/influx/machine.go.rl:91 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:113 + + err = m.handler.AddUint(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + + goto _again +tr756: + ( m.cs) = 454 +//line plugins/parsers/influx/machine.go.rl:91 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:131 + + err = m.handler.AddBool(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + + goto _again st454: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof454 } st_case_454: +//line plugins/parsers/influx/machine.go:15855 switch ( m.data)[( m.p)] { case 9: - goto tr686 + goto tr701 case 10: - goto tr659 + goto tr275 case 11: - goto tr687 + goto tr702 case 12: - goto tr553 + goto tr566 case 13: - goto tr661 + goto st103 case 32: - goto tr686 + goto tr701 case 34: - goto tr208 + goto tr204 case 44: goto tr158 + case 45: + goto tr703 case 61: - goto tr165 + goto st6 case 92: - goto st69 + goto tr205 } if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st455 + goto tr704 } - goto st67 + goto tr202 +tr702: + ( m.cs) = 455 +//line plugins/parsers/influx/machine.go.rl:91 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:20 + + m.pb = m.p + + goto _again st455: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof455 } st_case_455: +//line plugins/parsers/influx/machine.go:15906 switch ( m.data)[( m.p)] { case 9: - goto tr686 + goto tr701 case 10: - goto tr659 + goto tr275 case 11: - goto tr687 + goto tr702 case 12: - goto tr553 + goto tr566 case 13: - goto tr661 + goto st103 case 32: - goto tr686 + goto tr701 + case 34: + goto tr204 + case 44: + goto tr158 + case 45: + goto tr703 + case 61: + goto tr165 + case 92: + goto tr205 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto tr704 + } + goto tr202 +tr703: +//line plugins/parsers/influx/machine.go.rl:20 + + m.pb = m.p + + goto st106 + st106: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof106 + } + st_case_106: +//line plugins/parsers/influx/machine.go:15946 + switch ( m.data)[( m.p)] { + case 9: + goto tr155 + case 10: + goto tr29 + case 11: + goto tr207 + case 12: + goto tr60 + case 13: + goto st7 + case 32: + goto tr155 case 34: goto tr208 case 44: @@ -16139,30 +15963,37 @@ tr687: case 61: goto tr165 case 92: - goto st69 + goto st68 } if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st456 } - goto st67 + goto st66 +tr704: +//line plugins/parsers/influx/machine.go.rl:20 + + m.pb = m.p + + goto st456 st456: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof456 } st_case_456: +//line plugins/parsers/influx/machine.go:15984 switch ( m.data)[( m.p)] { case 9: - goto tr686 + goto tr705 case 10: - goto tr659 + goto tr678 case 11: - goto tr687 + goto tr706 case 12: - goto tr553 + goto tr572 case 13: - goto tr661 + goto tr680 case 32: - goto tr686 + goto tr705 case 34: goto tr208 case 44: @@ -16170,105 +16001,217 @@ tr687: case 61: goto tr165 case 92: - goto st69 + goto st68 } if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st457 + goto st460 } - goto st67 + goto st66 +tr870: + ( m.cs) = 457 +//line plugins/parsers/influx/machine.go.rl:78 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + + goto _again +tr710: + ( m.cs) = 457 +//line plugins/parsers/influx/machine.go.rl:91 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + + goto _again +tr867: + ( m.cs) = 457 +//line plugins/parsers/influx/machine.go.rl:78 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:149 + + err = m.handler.SetTimestamp(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + + goto _again +tr705: + ( m.cs) = 457 +//line plugins/parsers/influx/machine.go.rl:91 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:149 + + err = m.handler.SetTimestamp(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + + goto _again st457: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof457 } st_case_457: +//line plugins/parsers/influx/machine.go:16088 switch ( m.data)[( m.p)] { case 9: - goto tr686 + goto st457 case 10: - goto tr659 + goto tr275 case 11: - goto tr687 + goto tr709 case 12: - goto tr553 + goto st305 case 13: - goto tr661 + goto st103 case 32: - goto tr686 + goto st457 case 34: - goto tr208 + goto tr97 case 44: - goto tr158 + goto st6 case 61: - goto tr165 + goto st6 case 92: - goto st69 + goto tr163 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st458 - } - goto st67 + goto tr160 +tr709: +//line plugins/parsers/influx/machine.go.rl:20 + + m.pb = m.p + + goto st458 st458: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof458 } st_case_458: +//line plugins/parsers/influx/machine.go:16123 switch ( m.data)[( m.p)] { case 9: - goto tr686 + goto st457 case 10: - goto tr659 + goto tr275 case 11: - goto tr687 + goto tr709 case 12: - goto tr553 + goto st305 case 13: - goto tr661 + goto st103 case 32: - goto tr686 + goto st457 case 34: - goto tr208 + goto tr97 case 44: - goto tr158 + goto st6 case 61: goto tr165 case 92: - goto st69 + goto tr163 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st459 - } - goto st67 + goto tr160 +tr711: + ( m.cs) = 459 +//line plugins/parsers/influx/machine.go.rl:91 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:20 + + m.pb = m.p + + goto _again +tr706: + ( m.cs) = 459 +//line plugins/parsers/influx/machine.go.rl:91 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:149 + + err = m.handler.SetTimestamp(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + + goto _again st459: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof459 } st_case_459: +//line plugins/parsers/influx/machine.go:16192 switch ( m.data)[( m.p)] { case 9: - goto tr686 + goto tr710 case 10: - goto tr659 + goto tr275 case 11: - goto tr687 + goto tr711 case 12: - goto tr553 + goto tr575 case 13: - goto tr661 + goto st103 case 32: - goto tr686 + goto tr710 case 34: - goto tr208 + goto tr204 case 44: goto tr158 case 61: goto tr165 case 92: - goto st69 + goto tr205 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st460 - } - goto st67 + goto tr202 st460: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof460 @@ -16276,17 +16219,17 @@ tr687: st_case_460: switch ( m.data)[( m.p)] { case 9: - goto tr686 + goto tr705 case 10: - goto tr659 + goto tr678 case 11: - goto tr687 + goto tr706 case 12: - goto tr553 + goto tr572 case 13: - goto tr661 + goto tr680 case 32: - goto tr686 + goto tr705 case 34: goto tr208 case 44: @@ -16294,12 +16237,12 @@ tr687: case 61: goto tr165 case 92: - goto st69 + goto st68 } if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st461 } - goto st67 + goto st66 st461: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof461 @@ -16307,17 +16250,17 @@ tr687: st_case_461: switch ( m.data)[( m.p)] { case 9: - goto tr686 + goto tr705 case 10: - goto tr659 + goto tr678 case 11: - goto tr687 + goto tr706 case 12: - goto tr553 + goto tr572 case 13: - goto tr661 + goto tr680 case 32: - goto tr686 + goto tr705 case 34: goto tr208 case 44: @@ -16325,12 +16268,12 @@ tr687: case 61: goto tr165 case 92: - goto st69 + goto st68 } if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st462 } - goto st67 + goto st66 st462: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof462 @@ -16338,17 +16281,17 @@ tr687: st_case_462: switch ( m.data)[( m.p)] { case 9: - goto tr686 + goto tr705 case 10: - goto tr659 + goto tr678 case 11: - goto tr687 + goto tr706 case 12: - goto tr553 + goto tr572 case 13: - goto tr661 + goto tr680 case 32: - goto tr686 + goto tr705 case 34: goto tr208 case 44: @@ -16356,12 +16299,12 @@ tr687: case 61: goto tr165 case 92: - goto st69 + goto st68 } if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st463 } - goto st67 + goto st66 st463: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof463 @@ -16369,17 +16312,17 @@ tr687: st_case_463: switch ( m.data)[( m.p)] { case 9: - goto tr686 + goto tr705 case 10: - goto tr659 + goto tr678 case 11: - goto tr687 + goto tr706 case 12: - goto tr553 + goto tr572 case 13: - goto tr661 + goto tr680 case 32: - goto tr686 + goto tr705 case 34: goto tr208 case 44: @@ -16387,12 +16330,12 @@ tr687: case 61: goto tr165 case 92: - goto st69 + goto st68 } if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st464 } - goto st67 + goto st66 st464: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof464 @@ -16400,17 +16343,17 @@ tr687: st_case_464: switch ( m.data)[( m.p)] { case 9: - goto tr686 + goto tr705 case 10: - goto tr659 + goto tr678 case 11: - goto tr687 + goto tr706 case 12: - goto tr553 + goto tr572 case 13: - goto tr661 + goto tr680 case 32: - goto tr686 + goto tr705 case 34: goto tr208 case 44: @@ -16418,12 +16361,12 @@ tr687: case 61: goto tr165 case 92: - goto st69 + goto st68 } if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st465 } - goto st67 + goto st66 st465: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof465 @@ -16431,17 +16374,17 @@ tr687: st_case_465: switch ( m.data)[( m.p)] { case 9: - goto tr686 + goto tr705 case 10: - goto tr659 + goto tr678 case 11: - goto tr687 + goto tr706 case 12: - goto tr553 + goto tr572 case 13: - goto tr661 + goto tr680 case 32: - goto tr686 + goto tr705 case 34: goto tr208 case 44: @@ -16449,12 +16392,12 @@ tr687: case 61: goto tr165 case 92: - goto st69 + goto st68 } if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st466 } - goto st67 + goto st66 st466: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof466 @@ -16462,17 +16405,17 @@ tr687: st_case_466: switch ( m.data)[( m.p)] { case 9: - goto tr686 + goto tr705 case 10: - goto tr659 + goto tr678 case 11: - goto tr687 + goto tr706 case 12: - goto tr553 + goto tr572 case 13: - goto tr661 + goto tr680 case 32: - goto tr686 + goto tr705 case 34: goto tr208 case 44: @@ -16480,225 +16423,566 @@ tr687: case 61: goto tr165 case 92: - goto st69 + goto st68 } - goto st67 -tr653: - ( m.cs) = 108 -//line plugins/parsers/influx/machine.go.rl:90 + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st467 + } + goto st66 + st467: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof467 + } + st_case_467: + switch ( m.data)[( m.p)] { + case 9: + goto tr705 + case 10: + goto tr678 + case 11: + goto tr706 + case 12: + goto tr572 + case 13: + goto tr680 + case 32: + goto tr705 + case 34: + goto tr208 + case 44: + goto tr158 + case 61: + goto tr165 + case 92: + goto st68 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st468 + } + goto st66 + st468: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof468 + } + st_case_468: + switch ( m.data)[( m.p)] { + case 9: + goto tr705 + case 10: + goto tr678 + case 11: + goto tr706 + case 12: + goto tr572 + case 13: + goto tr680 + case 32: + goto tr705 + case 34: + goto tr208 + case 44: + goto tr158 + case 61: + goto tr165 + case 92: + goto st68 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st469 + } + goto st66 + st469: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof469 + } + st_case_469: + switch ( m.data)[( m.p)] { + case 9: + goto tr705 + case 10: + goto tr678 + case 11: + goto tr706 + case 12: + goto tr572 + case 13: + goto tr680 + case 32: + goto tr705 + case 34: + goto tr208 + case 44: + goto tr158 + case 61: + goto tr165 + case 92: + goto st68 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st470 + } + goto st66 + st470: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof470 + } + st_case_470: + switch ( m.data)[( m.p)] { + case 9: + goto tr705 + case 10: + goto tr678 + case 11: + goto tr706 + case 12: + goto tr572 + case 13: + goto tr680 + case 32: + goto tr705 + case 34: + goto tr208 + case 44: + goto tr158 + case 61: + goto tr165 + case 92: + goto st68 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st471 + } + goto st66 + st471: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof471 + } + st_case_471: + switch ( m.data)[( m.p)] { + case 9: + goto tr705 + case 10: + goto tr678 + case 11: + goto tr706 + case 12: + goto tr572 + case 13: + goto tr680 + case 32: + goto tr705 + case 34: + goto tr208 + case 44: + goto tr158 + case 61: + goto tr165 + case 92: + goto st68 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st472 + } + goto st66 + st472: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof472 + } + st_case_472: + switch ( m.data)[( m.p)] { + case 9: + goto tr705 + case 10: + goto tr678 + case 11: + goto tr706 + case 12: + goto tr572 + case 13: + goto tr680 + case 32: + goto tr705 + case 34: + goto tr208 + case 44: + goto tr158 + case 61: + goto tr165 + case 92: + goto st68 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st473 + } + goto st66 + st473: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof473 + } + st_case_473: + switch ( m.data)[( m.p)] { + case 9: + goto tr705 + case 10: + goto tr678 + case 11: + goto tr706 + case 12: + goto tr572 + case 13: + goto tr680 + case 32: + goto tr705 + case 34: + goto tr208 + case 44: + goto tr158 + case 61: + goto tr165 + case 92: + goto st68 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st474 + } + goto st66 + st474: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof474 + } + st_case_474: + switch ( m.data)[( m.p)] { + case 9: + goto tr705 + case 10: + goto tr678 + case 11: + goto tr706 + case 12: + goto tr572 + case 13: + goto tr680 + case 32: + goto tr705 + case 34: + goto tr208 + case 44: + goto tr158 + case 61: + goto tr165 + case 92: + goto st68 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st475 + } + goto st66 + st475: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof475 + } + st_case_475: + switch ( m.data)[( m.p)] { + case 9: + goto tr705 + case 10: + goto tr678 + case 11: + goto tr706 + case 12: + goto tr572 + case 13: + goto tr680 + case 32: + goto tr705 + case 34: + goto tr208 + case 44: + goto tr158 + case 61: + goto tr165 + case 92: + goto st68 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st476 + } + goto st66 + st476: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof476 + } + st_case_476: + switch ( m.data)[( m.p)] { + case 9: + goto tr705 + case 10: + goto tr678 + case 11: + goto tr706 + case 12: + goto tr572 + case 13: + goto tr680 + case 32: + goto tr705 + case 34: + goto tr208 + case 44: + goto tr158 + case 61: + goto tr165 + case 92: + goto st68 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st477 + } + goto st66 + st477: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof477 + } + st_case_477: + switch ( m.data)[( m.p)] { + case 9: + goto tr705 + case 10: + goto tr678 + case 11: + goto tr706 + case 12: + goto tr572 + case 13: + goto tr680 + case 32: + goto tr705 + case 34: + goto tr208 + case 44: + goto tr158 + case 61: + goto tr165 + case 92: + goto st68 + } + goto st66 +tr672: + ( m.cs) = 107 +//line plugins/parsers/influx/machine.go.rl:91 - err = m.handler.AddTag(key, m.text()) + err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:19 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p goto _again -tr839: - ( m.cs) = 108 -//line plugins/parsers/influx/machine.go.rl:77 +tr861: + ( m.cs) = 107 +//line plugins/parsers/influx/machine.go.rl:78 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:121 +//line plugins/parsers/influx/machine.go.rl:122 - err = m.handler.AddFloat(key, m.text()) + err = m.handler.AddFloat(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again -tr713: - ( m.cs) = 108 -//line plugins/parsers/influx/machine.go.rl:90 +tr732: + ( m.cs) = 107 +//line plugins/parsers/influx/machine.go.rl:91 - err = m.handler.AddTag(key, m.text()) + err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:121 +//line plugins/parsers/influx/machine.go.rl:122 - err = m.handler.AddFloat(key, m.text()) + err = m.handler.AddFloat(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again -tr726: - ( m.cs) = 108 -//line plugins/parsers/influx/machine.go.rl:90 +tr745: + ( m.cs) = 107 +//line plugins/parsers/influx/machine.go.rl:91 - err = m.handler.AddTag(key, m.text()) + err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:103 +//line plugins/parsers/influx/machine.go.rl:104 - err = m.handler.AddInt(key, m.text()) + err = m.handler.AddInt(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again -tr733: - ( m.cs) = 108 -//line plugins/parsers/influx/machine.go.rl:90 +tr752: + ( m.cs) = 107 +//line plugins/parsers/influx/machine.go.rl:91 - err = m.handler.AddTag(key, m.text()) + err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:112 +//line plugins/parsers/influx/machine.go.rl:113 - err = m.handler.AddUint(key, m.text()) + err = m.handler.AddUint(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again -tr740: - ( m.cs) = 108 -//line plugins/parsers/influx/machine.go.rl:90 +tr759: + ( m.cs) = 107 +//line plugins/parsers/influx/machine.go.rl:91 - err = m.handler.AddTag(key, m.text()) + err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:130 +//line plugins/parsers/influx/machine.go.rl:131 - err = m.handler.AddBool(key, m.text()) + err = m.handler.AddBool(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again -tr871: - ( m.cs) = 108 -//line plugins/parsers/influx/machine.go.rl:77 +tr893: + ( m.cs) = 107 +//line plugins/parsers/influx/machine.go.rl:78 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:103 +//line plugins/parsers/influx/machine.go.rl:104 - err = m.handler.AddInt(key, m.text()) + err = m.handler.AddInt(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again -tr875: - ( m.cs) = 108 -//line plugins/parsers/influx/machine.go.rl:77 +tr897: + ( m.cs) = 107 +//line plugins/parsers/influx/machine.go.rl:78 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:112 +//line plugins/parsers/influx/machine.go.rl:113 - err = m.handler.AddUint(key, m.text()) + err = m.handler.AddUint(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again -tr879: - ( m.cs) = 108 -//line plugins/parsers/influx/machine.go.rl:77 +tr902: + ( m.cs) = 107 +//line plugins/parsers/influx/machine.go.rl:78 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:130 +//line plugins/parsers/influx/machine.go.rl:131 - err = m.handler.AddBool(key, m.text()) + err = m.handler.AddBool(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again - st108: + st107: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof108 + goto _test_eof107 } - st_case_108: -//line plugins/parsers/influx/machine.go:16693 + st_case_107: +//line plugins/parsers/influx/machine.go:16977 switch ( m.data)[( m.p)] { case 9: goto st6 case 10: - goto st7 + goto tr29 case 12: goto tr47 case 13: - goto st8 + goto st7 case 32: goto st6 case 34: @@ -16712,26 +16996,26 @@ tr879: } goto tr278 tr278: -//line plugins/parsers/influx/machine.go.rl:19 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p - goto st109 - st109: + goto st108 + st108: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof109 + goto _test_eof108 } - st_case_109: -//line plugins/parsers/influx/machine.go:16726 + st_case_108: +//line plugins/parsers/influx/machine.go:17010 switch ( m.data)[( m.p)] { case 9: goto st6 case 10: - goto st7 + goto tr29 case 12: goto tr47 case 13: - goto st8 + goto st7 case 32: goto st6 case 34: @@ -16741,34 +17025,34 @@ tr278: case 61: goto tr281 case 92: - goto st123 + goto st122 } - goto st109 + goto st108 tr281: -//line plugins/parsers/influx/machine.go.rl:86 +//line plugins/parsers/influx/machine.go.rl:87 - key = m.text() + m.key = m.text() -//line plugins/parsers/influx/machine.go.rl:99 +//line plugins/parsers/influx/machine.go.rl:100 - key = m.text() + m.key = m.text() - goto st110 - st110: + goto st109 + st109: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof110 + goto _test_eof109 } - st_case_110: -//line plugins/parsers/influx/machine.go:16763 + st_case_109: +//line plugins/parsers/influx/machine.go:17047 switch ( m.data)[( m.p)] { case 9: goto st6 case 10: - goto st7 + goto tr29 case 12: goto tr47 case 13: - goto st8 + goto st7 case 32: goto st6 case 34: @@ -16799,7 +17083,49 @@ tr281: } goto tr148 tr283: -//line plugins/parsers/influx/machine.go.rl:19 +//line plugins/parsers/influx/machine.go.rl:20 + + m.pb = m.p + + goto st110 + st110: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof110 + } + st_case_110: +//line plugins/parsers/influx/machine.go:17097 + switch ( m.data)[( m.p)] { + case 9: + goto tr155 + case 10: + goto tr29 + case 11: + goto tr156 + case 12: + goto tr60 + case 13: + goto st7 + case 32: + goto tr155 + case 34: + goto tr157 + case 44: + goto tr158 + case 46: + goto st111 + case 48: + goto st482 + case 61: + goto st6 + case 92: + goto st63 + } + if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st485 + } + goto st48 +tr284: +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p @@ -16809,110 +17135,178 @@ tr283: goto _test_eof111 } st_case_111: -//line plugins/parsers/influx/machine.go:16813 +//line plugins/parsers/influx/machine.go:17139 switch ( m.data)[( m.p)] { case 9: goto tr155 case 10: - goto st7 + goto tr29 case 11: goto tr156 case 12: goto tr60 case 13: - goto st8 + goto st7 case 32: goto tr155 case 34: goto tr157 case 44: goto tr158 - case 46: - goto st112 - case 48: - goto st471 case 61: goto st6 case 92: - goto st64 + goto st63 } - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st474 + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st478 } - goto st49 -tr284: -//line plugins/parsers/influx/machine.go.rl:19 - - m.pb = m.p - - goto st112 + goto st48 + st478: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof478 + } + st_case_478: + switch ( m.data)[( m.p)] { + case 9: + goto tr729 + case 10: + goto tr534 + case 11: + goto tr730 + case 12: + goto tr731 + case 13: + goto tr536 + case 32: + goto tr729 + case 34: + goto tr157 + case 44: + goto tr732 + case 61: + goto st6 + case 69: + goto st112 + case 92: + goto st63 + case 101: + goto st112 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st478 + } + goto st48 st112: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof112 } st_case_112: -//line plugins/parsers/influx/machine.go:16855 switch ( m.data)[( m.p)] { case 9: goto tr155 case 10: - goto st7 + goto tr29 case 11: goto tr156 case 12: goto tr60 case 13: - goto st8 + goto st7 case 32: goto tr155 case 34: - goto tr157 + goto tr295 case 44: goto tr158 case 61: goto st6 case 92: - goto st64 + goto st63 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st467 + switch { + case ( m.data)[( m.p)] > 45: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st481 + } + case ( m.data)[( m.p)] >= 43: + goto st113 } - goto st49 - st467: + goto st48 +tr295: + ( m.cs) = 479 +//line plugins/parsers/influx/machine.go.rl:140 + + err = m.handler.AddString(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + + goto _again + st479: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof467 + goto _test_eof479 } - st_case_467: + st_case_479: +//line plugins/parsers/influx/machine.go:17255 switch ( m.data)[( m.p)] { - case 9: - goto tr710 case 10: - goto tr515 + goto tr103 case 11: - goto tr711 - case 12: - goto tr712 + goto tr567 case 13: - goto tr517 + goto st33 case 32: - goto tr710 - case 34: - goto tr157 + goto tr566 case 44: - goto tr713 + goto tr568 case 61: - goto st6 - case 69: - goto st113 + goto tr132 case 92: - goto st64 - case 101: - goto st113 + goto st22 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st467 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st480 + } + case ( m.data)[( m.p)] >= 9: + goto tr566 } - goto st49 + goto st16 + st480: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof480 + } + st_case_480: + switch ( m.data)[( m.p)] { + case 10: + goto tr734 + case 11: + goto tr735 + case 13: + goto tr736 + case 32: + goto tr731 + case 44: + goto tr737 + case 61: + goto tr132 + case 92: + goto st22 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st480 + } + case ( m.data)[( m.p)] >= 9: + goto tr731 + } + goto st16 st113: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof113 @@ -16922,123 +17316,13 @@ tr284: case 9: goto tr155 case 10: - goto st7 + goto tr29 case 11: goto tr156 case 12: goto tr60 case 13: - goto st8 - case 32: - goto tr155 - case 34: - goto tr295 - case 44: - goto tr158 - case 61: - goto st6 - case 92: - goto st64 - } - switch { - case ( m.data)[( m.p)] > 45: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st470 - } - case ( m.data)[( m.p)] >= 43: - goto st114 - } - goto st49 -tr295: - ( m.cs) = 468 -//line plugins/parsers/influx/machine.go.rl:139 - - err = m.handler.AddString(key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - - goto _again - st468: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof468 - } - st_case_468: -//line plugins/parsers/influx/machine.go:16971 - switch ( m.data)[( m.p)] { - case 10: - goto st262 - case 11: - goto tr548 - case 13: - goto st34 - case 32: - goto tr547 - case 44: - goto tr549 - case 61: - goto tr132 - case 92: - goto st23 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st469 - } - case ( m.data)[( m.p)] >= 9: - goto tr547 - } - goto st17 - st469: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof469 - } - st_case_469: - switch ( m.data)[( m.p)] { - case 10: - goto tr715 - case 11: - goto tr716 - case 13: - goto tr717 - case 32: - goto tr712 - case 44: - goto tr718 - case 61: - goto tr132 - case 92: - goto st23 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st469 - } - case ( m.data)[( m.p)] >= 9: - goto tr712 - } - goto st17 - st114: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof114 - } - st_case_114: - switch ( m.data)[( m.p)] { - case 9: - goto tr155 - case 10: goto st7 - case 11: - goto tr156 - case 12: - goto tr60 - case 13: - goto st8 case 32: goto tr155 case 34: @@ -17048,867 +17332,82 @@ tr295: case 61: goto st6 case 92: - goto st64 + goto st63 } if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st470 + goto st481 } - goto st49 - st470: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof470 - } - st_case_470: - switch ( m.data)[( m.p)] { - case 9: - goto tr710 - case 10: - goto tr515 - case 11: - goto tr711 - case 12: - goto tr712 - case 13: - goto tr517 - case 32: - goto tr710 - case 34: - goto tr157 - case 44: - goto tr713 - case 61: - goto st6 - case 92: - goto st64 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st470 - } - goto st49 - st471: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof471 - } - st_case_471: - switch ( m.data)[( m.p)] { - case 9: - goto tr710 - case 10: - goto tr515 - case 11: - goto tr711 - case 12: - goto tr712 - case 13: - goto tr517 - case 32: - goto tr710 - case 34: - goto tr157 - case 44: - goto tr713 - case 46: - goto st467 - case 61: - goto st6 - case 69: - goto st113 - case 92: - goto st64 - case 101: - goto st113 - case 105: - goto st473 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st472 - } - goto st49 - st472: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof472 - } - st_case_472: - switch ( m.data)[( m.p)] { - case 9: - goto tr710 - case 10: - goto tr515 - case 11: - goto tr711 - case 12: - goto tr712 - case 13: - goto tr517 - case 32: - goto tr710 - case 34: - goto tr157 - case 44: - goto tr713 - case 46: - goto st467 - case 61: - goto st6 - case 69: - goto st113 - case 92: - goto st64 - case 101: - goto st113 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st472 - } - goto st49 - st473: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof473 - } - st_case_473: - switch ( m.data)[( m.p)] { - case 9: - goto tr721 - case 10: - goto tr722 - case 11: - goto tr723 - case 12: - goto tr724 - case 13: - goto tr725 - case 32: - goto tr721 - case 34: - goto tr157 - case 44: - goto tr726 - case 61: - goto st6 - case 92: - goto st64 - } - goto st49 - st474: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof474 - } - st_case_474: - switch ( m.data)[( m.p)] { - case 9: - goto tr710 - case 10: - goto tr515 - case 11: - goto tr711 - case 12: - goto tr712 - case 13: - goto tr517 - case 32: - goto tr710 - case 34: - goto tr157 - case 44: - goto tr713 - case 46: - goto st467 - case 61: - goto st6 - case 69: - goto st113 - case 92: - goto st64 - case 101: - goto st113 - case 105: - goto st473 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st474 - } - goto st49 -tr285: -//line plugins/parsers/influx/machine.go.rl:19 - - m.pb = m.p - - goto st475 - st475: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof475 - } - st_case_475: -//line plugins/parsers/influx/machine.go:17243 - switch ( m.data)[( m.p)] { - case 9: - goto tr710 - case 10: - goto tr515 - case 11: - goto tr711 - case 12: - goto tr712 - case 13: - goto tr517 - case 32: - goto tr710 - case 34: - goto tr157 - case 44: - goto tr713 - case 46: - goto st467 - case 61: - goto st6 - case 69: - goto st113 - case 92: - goto st64 - case 101: - goto st113 - case 105: - goto st473 - case 117: - goto st476 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st472 - } - goto st49 - st476: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof476 - } - st_case_476: - switch ( m.data)[( m.p)] { - case 9: - goto tr728 - case 10: - goto tr729 - case 11: - goto tr730 - case 12: - goto tr731 - case 13: - goto tr732 - case 32: - goto tr728 - case 34: - goto tr157 - case 44: - goto tr733 - case 61: - goto st6 - case 92: - goto st64 - } - goto st49 -tr286: -//line plugins/parsers/influx/machine.go.rl:19 - - m.pb = m.p - - goto st477 - st477: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof477 - } - st_case_477: -//line plugins/parsers/influx/machine.go:17319 - switch ( m.data)[( m.p)] { - case 9: - goto tr710 - case 10: - goto tr515 - case 11: - goto tr711 - case 12: - goto tr712 - case 13: - goto tr517 - case 32: - goto tr710 - case 34: - goto tr157 - case 44: - goto tr713 - case 46: - goto st467 - case 61: - goto st6 - case 69: - goto st113 - case 92: - goto st64 - case 101: - goto st113 - case 105: - goto st473 - case 117: - goto st476 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st477 - } - goto st49 -tr287: -//line plugins/parsers/influx/machine.go.rl:19 - - m.pb = m.p - - goto st478 - st478: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof478 - } - st_case_478: -//line plugins/parsers/influx/machine.go:17367 - switch ( m.data)[( m.p)] { - case 9: - goto tr735 - case 10: - goto tr736 - case 11: - goto tr737 - case 12: - goto tr738 - case 13: - goto tr739 - case 32: - goto tr735 - case 34: - goto tr157 - case 44: - goto tr740 - case 61: - goto st6 - case 65: - goto st115 - case 92: - goto st64 - case 97: - goto st118 - } - goto st49 - st115: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof115 - } - st_case_115: - switch ( m.data)[( m.p)] { - case 9: - goto tr155 - case 10: - goto st7 - case 11: - goto tr156 - case 12: - goto tr60 - case 13: - goto st8 - case 32: - goto tr155 - case 34: - goto tr157 - case 44: - goto tr158 - case 61: - goto st6 - case 76: - goto st116 - case 92: - goto st64 - } - goto st49 - st116: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof116 - } - st_case_116: - switch ( m.data)[( m.p)] { - case 9: - goto tr155 - case 10: - goto st7 - case 11: - goto tr156 - case 12: - goto tr60 - case 13: - goto st8 - case 32: - goto tr155 - case 34: - goto tr157 - case 44: - goto tr158 - case 61: - goto st6 - case 83: - goto st117 - case 92: - goto st64 - } - goto st49 - st117: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof117 - } - st_case_117: - switch ( m.data)[( m.p)] { - case 9: - goto tr155 - case 10: - goto st7 - case 11: - goto tr156 - case 12: - goto tr60 - case 13: - goto st8 - case 32: - goto tr155 - case 34: - goto tr157 - case 44: - goto tr158 - case 61: - goto st6 - case 69: - goto st479 - case 92: - goto st64 - } - goto st49 - st479: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof479 - } - st_case_479: - switch ( m.data)[( m.p)] { - case 9: - goto tr735 - case 10: - goto tr736 - case 11: - goto tr737 - case 12: - goto tr738 - case 13: - goto tr739 - case 32: - goto tr735 - case 34: - goto tr157 - case 44: - goto tr740 - case 61: - goto st6 - case 92: - goto st64 - } - goto st49 - st118: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof118 - } - st_case_118: - switch ( m.data)[( m.p)] { - case 9: - goto tr155 - case 10: - goto st7 - case 11: - goto tr156 - case 12: - goto tr60 - case 13: - goto st8 - case 32: - goto tr155 - case 34: - goto tr157 - case 44: - goto tr158 - case 61: - goto st6 - case 92: - goto st64 - case 108: - goto st119 - } - goto st49 - st119: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof119 - } - st_case_119: - switch ( m.data)[( m.p)] { - case 9: - goto tr155 - case 10: - goto st7 - case 11: - goto tr156 - case 12: - goto tr60 - case 13: - goto st8 - case 32: - goto tr155 - case 34: - goto tr157 - case 44: - goto tr158 - case 61: - goto st6 - case 92: - goto st64 - case 115: - goto st120 - } - goto st49 - st120: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof120 - } - st_case_120: - switch ( m.data)[( m.p)] { - case 9: - goto tr155 - case 10: - goto st7 - case 11: - goto tr156 - case 12: - goto tr60 - case 13: - goto st8 - case 32: - goto tr155 - case 34: - goto tr157 - case 44: - goto tr158 - case 61: - goto st6 - case 92: - goto st64 - case 101: - goto st479 - } - goto st49 -tr288: -//line plugins/parsers/influx/machine.go.rl:19 - - m.pb = m.p - - goto st480 - st480: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof480 - } - st_case_480: -//line plugins/parsers/influx/machine.go:17614 - switch ( m.data)[( m.p)] { - case 9: - goto tr735 - case 10: - goto tr736 - case 11: - goto tr737 - case 12: - goto tr738 - case 13: - goto tr739 - case 32: - goto tr735 - case 34: - goto tr157 - case 44: - goto tr740 - case 61: - goto st6 - case 82: - goto st121 - case 92: - goto st64 - case 114: - goto st122 - } - goto st49 - st121: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof121 - } - st_case_121: - switch ( m.data)[( m.p)] { - case 9: - goto tr155 - case 10: - goto st7 - case 11: - goto tr156 - case 12: - goto tr60 - case 13: - goto st8 - case 32: - goto tr155 - case 34: - goto tr157 - case 44: - goto tr158 - case 61: - goto st6 - case 85: - goto st117 - case 92: - goto st64 - } - goto st49 - st122: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof122 - } - st_case_122: - switch ( m.data)[( m.p)] { - case 9: - goto tr155 - case 10: - goto st7 - case 11: - goto tr156 - case 12: - goto tr60 - case 13: - goto st8 - case 32: - goto tr155 - case 34: - goto tr157 - case 44: - goto tr158 - case 61: - goto st6 - case 92: - goto st64 - case 117: - goto st120 - } - goto st49 -tr289: -//line plugins/parsers/influx/machine.go.rl:19 - - m.pb = m.p - - goto st481 + goto st48 st481: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof481 } st_case_481: -//line plugins/parsers/influx/machine.go:17713 switch ( m.data)[( m.p)] { case 9: - goto tr735 + goto tr729 case 10: - goto tr736 + goto tr534 case 11: - goto tr737 + goto tr730 case 12: - goto tr738 + goto tr731 case 13: - goto tr739 + goto tr536 case 32: - goto tr735 + goto tr729 case 34: goto tr157 case 44: - goto tr740 + goto tr732 case 61: goto st6 case 92: - goto st64 - case 97: - goto st118 + goto st63 } - goto st49 -tr290: -//line plugins/parsers/influx/machine.go.rl:19 - - m.pb = m.p - - goto st482 + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st481 + } + goto st48 st482: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof482 } st_case_482: -//line plugins/parsers/influx/machine.go:17750 switch ( m.data)[( m.p)] { case 9: - goto tr735 + goto tr729 case 10: - goto tr736 + goto tr534 case 11: - goto tr737 + goto tr730 case 12: - goto tr738 + goto tr731 case 13: - goto tr739 + goto tr536 case 32: - goto tr735 + goto tr729 case 34: goto tr157 case 44: - goto tr740 - case 61: - goto st6 - case 92: - goto st64 - case 114: - goto st122 - } - goto st49 -tr279: -//line plugins/parsers/influx/machine.go.rl:19 - - m.pb = m.p - - goto st123 - st123: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof123 - } - st_case_123: -//line plugins/parsers/influx/machine.go:17787 - switch ( m.data)[( m.p)] { - case 34: - goto st109 - case 92: - goto st124 - } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr47 - } - case ( m.data)[( m.p)] >= 9: - goto tr47 - } - goto st46 - st124: -//line plugins/parsers/influx/machine.go.rl:234 - ( m.p)-- - - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof124 - } - st_case_124: -//line plugins/parsers/influx/machine.go:17811 - switch ( m.data)[( m.p)] { - case 9: - goto st6 - case 10: - goto st7 - case 12: - goto tr47 - case 13: - goto st8 - case 32: - goto st6 - case 34: - goto tr261 - case 44: - goto st6 - case 61: - goto tr281 - case 92: - goto st123 - } - goto st109 -tr267: -//line plugins/parsers/influx/machine.go.rl:19 - - m.pb = m.p - - goto st125 - st125: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof125 - } - st_case_125: -//line plugins/parsers/influx/machine.go:17844 - switch ( m.data)[( m.p)] { - case 9: - goto tr231 - case 10: - goto st7 - case 11: - goto tr232 - case 12: - goto tr60 - case 13: - goto st8 - case 32: - goto tr231 - case 34: - goto tr157 - case 44: - goto tr233 + goto tr732 case 46: - goto st126 - case 48: - goto st507 + goto st478 case 61: goto st6 + case 69: + goto st112 case 92: - goto st87 - } - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st510 - } - goto st81 -tr268: -//line plugins/parsers/influx/machine.go.rl:19 - - m.pb = m.p - - goto st126 - st126: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof126 - } - st_case_126: -//line plugins/parsers/influx/machine.go:17886 - switch ( m.data)[( m.p)] { - case 9: - goto tr231 - case 10: - goto st7 - case 11: - goto tr232 - case 12: - goto tr60 - case 13: - goto st8 - case 32: - goto tr231 - case 34: - goto tr157 - case 44: - goto tr233 - case 61: - goto st6 - case 92: - goto st87 + goto st63 + case 101: + goto st112 + case 105: + goto st484 } if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st483 } - goto st81 + goto st48 st483: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof483 @@ -17916,251 +17415,105 @@ tr268: st_case_483: switch ( m.data)[( m.p)] { case 9: - goto tr745 + goto tr729 case 10: - goto tr620 + goto tr534 case 11: - goto tr746 + goto tr730 case 12: - goto tr712 + goto tr731 case 13: - goto tr623 + goto tr536 case 32: - goto tr745 + goto tr729 case 34: goto tr157 case 44: - goto tr747 + goto tr732 + case 46: + goto st478 case 61: goto st6 case 69: - goto st128 + goto st112 case 92: - goto st87 + goto st63 case 101: - goto st128 + goto st112 } if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st483 } - goto st81 -tr746: - ( m.cs) = 484 -//line plugins/parsers/influx/machine.go.rl:90 - - err = m.handler.AddTag(key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:121 - - err = m.handler.AddFloat(key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - - goto _again -tr779: - ( m.cs) = 484 -//line plugins/parsers/influx/machine.go.rl:90 - - err = m.handler.AddTag(key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:103 - - err = m.handler.AddInt(key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - - goto _again -tr785: - ( m.cs) = 484 -//line plugins/parsers/influx/machine.go.rl:90 - - err = m.handler.AddTag(key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:112 - - err = m.handler.AddUint(key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - - goto _again -tr791: - ( m.cs) = 484 -//line plugins/parsers/influx/machine.go.rl:90 - - err = m.handler.AddTag(key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:130 - - err = m.handler.AddBool(key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - - goto _again + goto st48 st484: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof484 } st_case_484: -//line plugins/parsers/influx/machine.go:18045 switch ( m.data)[( m.p)] { case 9: - goto tr749 + goto tr740 case 10: - goto st288 + goto tr741 case 11: - goto tr750 + goto tr742 case 12: - goto tr547 + goto tr743 case 13: - goto st74 + goto tr744 case 32: - goto tr749 + goto tr740 case 34: - goto tr204 + goto tr157 case 44: - goto tr233 - case 45: - goto tr751 + goto tr745 case 61: goto st6 case 92: - goto tr237 + goto st63 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr752 - } - goto tr235 -tr750: - ( m.cs) = 485 -//line plugins/parsers/influx/machine.go.rl:90 - - err = m.handler.AddTag(key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:19 - - m.pb = m.p - - goto _again + goto st48 st485: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof485 } st_case_485: -//line plugins/parsers/influx/machine.go:18096 switch ( m.data)[( m.p)] { case 9: - goto tr749 + goto tr729 case 10: - goto st288 + goto tr534 case 11: - goto tr750 + goto tr730 case 12: - goto tr547 + goto tr731 case 13: - goto st74 + goto tr536 case 32: - goto tr749 + goto tr729 case 34: - goto tr204 + goto tr157 case 44: - goto tr233 - case 45: - goto tr751 + goto tr732 + case 46: + goto st478 case 61: - goto tr101 + goto st6 + case 69: + goto st112 case 92: - goto tr237 + goto st63 + case 101: + goto st112 + case 105: + goto st484 } if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr752 + goto st485 } - goto tr235 -tr751: -//line plugins/parsers/influx/machine.go.rl:19 - - m.pb = m.p - - goto st127 - st127: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof127 - } - st_case_127: -//line plugins/parsers/influx/machine.go:18136 - switch ( m.data)[( m.p)] { - case 9: - goto tr231 - case 10: - goto st7 - case 11: - goto tr239 - case 12: - goto tr60 - case 13: - goto st8 - case 32: - goto tr231 - case 34: - goto tr208 - case 44: - goto tr233 - case 61: - goto tr101 - case 92: - goto st85 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st486 - } - goto st83 -tr752: -//line plugins/parsers/influx/machine.go.rl:19 + goto st48 +tr285: +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p @@ -18170,20 +17523,913 @@ tr752: goto _test_eof486 } st_case_486: -//line plugins/parsers/influx/machine.go:18174 +//line plugins/parsers/influx/machine.go:17527 switch ( m.data)[( m.p)] { case 9: - goto tr753 + goto tr729 case 10: - goto tr584 + goto tr534 case 11: - goto tr754 + goto tr730 case 12: - goto tr553 + goto tr731 case 13: - goto tr586 + goto tr536 case 32: - goto tr753 + goto tr729 + case 34: + goto tr157 + case 44: + goto tr732 + case 46: + goto st478 + case 61: + goto st6 + case 69: + goto st112 + case 92: + goto st63 + case 101: + goto st112 + case 105: + goto st484 + case 117: + goto st487 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st483 + } + goto st48 + st487: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof487 + } + st_case_487: + switch ( m.data)[( m.p)] { + case 9: + goto tr747 + case 10: + goto tr748 + case 11: + goto tr749 + case 12: + goto tr750 + case 13: + goto tr751 + case 32: + goto tr747 + case 34: + goto tr157 + case 44: + goto tr752 + case 61: + goto st6 + case 92: + goto st63 + } + goto st48 +tr286: +//line plugins/parsers/influx/machine.go.rl:20 + + m.pb = m.p + + goto st488 + st488: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof488 + } + st_case_488: +//line plugins/parsers/influx/machine.go:17603 + switch ( m.data)[( m.p)] { + case 9: + goto tr729 + case 10: + goto tr534 + case 11: + goto tr730 + case 12: + goto tr731 + case 13: + goto tr536 + case 32: + goto tr729 + case 34: + goto tr157 + case 44: + goto tr732 + case 46: + goto st478 + case 61: + goto st6 + case 69: + goto st112 + case 92: + goto st63 + case 101: + goto st112 + case 105: + goto st484 + case 117: + goto st487 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st488 + } + goto st48 +tr287: +//line plugins/parsers/influx/machine.go.rl:20 + + m.pb = m.p + + goto st489 + st489: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof489 + } + st_case_489: +//line plugins/parsers/influx/machine.go:17651 + switch ( m.data)[( m.p)] { + case 9: + goto tr754 + case 10: + goto tr755 + case 11: + goto tr756 + case 12: + goto tr757 + case 13: + goto tr758 + case 32: + goto tr754 + case 34: + goto tr157 + case 44: + goto tr759 + case 61: + goto st6 + case 65: + goto st114 + case 92: + goto st63 + case 97: + goto st117 + } + goto st48 + st114: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof114 + } + st_case_114: + switch ( m.data)[( m.p)] { + case 9: + goto tr155 + case 10: + goto tr29 + case 11: + goto tr156 + case 12: + goto tr60 + case 13: + goto st7 + case 32: + goto tr155 + case 34: + goto tr157 + case 44: + goto tr158 + case 61: + goto st6 + case 76: + goto st115 + case 92: + goto st63 + } + goto st48 + st115: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof115 + } + st_case_115: + switch ( m.data)[( m.p)] { + case 9: + goto tr155 + case 10: + goto tr29 + case 11: + goto tr156 + case 12: + goto tr60 + case 13: + goto st7 + case 32: + goto tr155 + case 34: + goto tr157 + case 44: + goto tr158 + case 61: + goto st6 + case 83: + goto st116 + case 92: + goto st63 + } + goto st48 + st116: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof116 + } + st_case_116: + switch ( m.data)[( m.p)] { + case 9: + goto tr155 + case 10: + goto tr29 + case 11: + goto tr156 + case 12: + goto tr60 + case 13: + goto st7 + case 32: + goto tr155 + case 34: + goto tr157 + case 44: + goto tr158 + case 61: + goto st6 + case 69: + goto st490 + case 92: + goto st63 + } + goto st48 + st490: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof490 + } + st_case_490: + switch ( m.data)[( m.p)] { + case 9: + goto tr754 + case 10: + goto tr755 + case 11: + goto tr756 + case 12: + goto tr757 + case 13: + goto tr758 + case 32: + goto tr754 + case 34: + goto tr157 + case 44: + goto tr759 + case 61: + goto st6 + case 92: + goto st63 + } + goto st48 + st117: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof117 + } + st_case_117: + switch ( m.data)[( m.p)] { + case 9: + goto tr155 + case 10: + goto tr29 + case 11: + goto tr156 + case 12: + goto tr60 + case 13: + goto st7 + case 32: + goto tr155 + case 34: + goto tr157 + case 44: + goto tr158 + case 61: + goto st6 + case 92: + goto st63 + case 108: + goto st118 + } + goto st48 + st118: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof118 + } + st_case_118: + switch ( m.data)[( m.p)] { + case 9: + goto tr155 + case 10: + goto tr29 + case 11: + goto tr156 + case 12: + goto tr60 + case 13: + goto st7 + case 32: + goto tr155 + case 34: + goto tr157 + case 44: + goto tr158 + case 61: + goto st6 + case 92: + goto st63 + case 115: + goto st119 + } + goto st48 + st119: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof119 + } + st_case_119: + switch ( m.data)[( m.p)] { + case 9: + goto tr155 + case 10: + goto tr29 + case 11: + goto tr156 + case 12: + goto tr60 + case 13: + goto st7 + case 32: + goto tr155 + case 34: + goto tr157 + case 44: + goto tr158 + case 61: + goto st6 + case 92: + goto st63 + case 101: + goto st490 + } + goto st48 +tr288: +//line plugins/parsers/influx/machine.go.rl:20 + + m.pb = m.p + + goto st491 + st491: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof491 + } + st_case_491: +//line plugins/parsers/influx/machine.go:17898 + switch ( m.data)[( m.p)] { + case 9: + goto tr754 + case 10: + goto tr755 + case 11: + goto tr756 + case 12: + goto tr757 + case 13: + goto tr758 + case 32: + goto tr754 + case 34: + goto tr157 + case 44: + goto tr759 + case 61: + goto st6 + case 82: + goto st120 + case 92: + goto st63 + case 114: + goto st121 + } + goto st48 + st120: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof120 + } + st_case_120: + switch ( m.data)[( m.p)] { + case 9: + goto tr155 + case 10: + goto tr29 + case 11: + goto tr156 + case 12: + goto tr60 + case 13: + goto st7 + case 32: + goto tr155 + case 34: + goto tr157 + case 44: + goto tr158 + case 61: + goto st6 + case 85: + goto st116 + case 92: + goto st63 + } + goto st48 + st121: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof121 + } + st_case_121: + switch ( m.data)[( m.p)] { + case 9: + goto tr155 + case 10: + goto tr29 + case 11: + goto tr156 + case 12: + goto tr60 + case 13: + goto st7 + case 32: + goto tr155 + case 34: + goto tr157 + case 44: + goto tr158 + case 61: + goto st6 + case 92: + goto st63 + case 117: + goto st119 + } + goto st48 +tr289: +//line plugins/parsers/influx/machine.go.rl:20 + + m.pb = m.p + + goto st492 + st492: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof492 + } + st_case_492: +//line plugins/parsers/influx/machine.go:17997 + switch ( m.data)[( m.p)] { + case 9: + goto tr754 + case 10: + goto tr755 + case 11: + goto tr756 + case 12: + goto tr757 + case 13: + goto tr758 + case 32: + goto tr754 + case 34: + goto tr157 + case 44: + goto tr759 + case 61: + goto st6 + case 92: + goto st63 + case 97: + goto st117 + } + goto st48 +tr290: +//line plugins/parsers/influx/machine.go.rl:20 + + m.pb = m.p + + goto st493 + st493: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof493 + } + st_case_493: +//line plugins/parsers/influx/machine.go:18034 + switch ( m.data)[( m.p)] { + case 9: + goto tr754 + case 10: + goto tr755 + case 11: + goto tr756 + case 12: + goto tr757 + case 13: + goto tr758 + case 32: + goto tr754 + case 34: + goto tr157 + case 44: + goto tr759 + case 61: + goto st6 + case 92: + goto st63 + case 114: + goto st121 + } + goto st48 +tr279: +//line plugins/parsers/influx/machine.go.rl:20 + + m.pb = m.p + + goto st122 + st122: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof122 + } + st_case_122: +//line plugins/parsers/influx/machine.go:18071 + switch ( m.data)[( m.p)] { + case 34: + goto st108 + case 92: + goto st123 + } + switch { + case ( m.data)[( m.p)] > 10: + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr47 + } + case ( m.data)[( m.p)] >= 9: + goto tr47 + } + goto st45 + st123: +//line plugins/parsers/influx/machine.go.rl:240 + ( m.p)-- + + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof123 + } + st_case_123: +//line plugins/parsers/influx/machine.go:18095 + switch ( m.data)[( m.p)] { + case 9: + goto st6 + case 10: + goto tr29 + case 12: + goto tr47 + case 13: + goto st7 + case 32: + goto st6 + case 34: + goto tr261 + case 44: + goto st6 + case 61: + goto tr281 + case 92: + goto st122 + } + goto st108 +tr267: +//line plugins/parsers/influx/machine.go.rl:20 + + m.pb = m.p + + goto st124 + st124: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof124 + } + st_case_124: +//line plugins/parsers/influx/machine.go:18128 + switch ( m.data)[( m.p)] { + case 9: + goto tr231 + case 10: + goto tr29 + case 11: + goto tr232 + case 12: + goto tr60 + case 13: + goto st7 + case 32: + goto tr231 + case 34: + goto tr157 + case 44: + goto tr233 + case 46: + goto st125 + case 48: + goto st518 + case 61: + goto st6 + case 92: + goto st86 + } + if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st521 + } + goto st80 +tr268: +//line plugins/parsers/influx/machine.go.rl:20 + + m.pb = m.p + + goto st125 + st125: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof125 + } + st_case_125: +//line plugins/parsers/influx/machine.go:18170 + switch ( m.data)[( m.p)] { + case 9: + goto tr231 + case 10: + goto tr29 + case 11: + goto tr232 + case 12: + goto tr60 + case 13: + goto st7 + case 32: + goto tr231 + case 34: + goto tr157 + case 44: + goto tr233 + case 61: + goto st6 + case 92: + goto st86 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st494 + } + goto st80 + st494: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof494 + } + st_case_494: + switch ( m.data)[( m.p)] { + case 9: + goto tr764 + case 10: + goto tr765 + case 11: + goto tr766 + case 12: + goto tr731 + case 13: + goto tr642 + case 32: + goto tr764 + case 34: + goto tr157 + case 44: + goto tr767 + case 61: + goto st6 + case 69: + goto st127 + case 92: + goto st86 + case 101: + goto st127 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st494 + } + goto st80 +tr766: + ( m.cs) = 495 +//line plugins/parsers/influx/machine.go.rl:91 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:122 + + err = m.handler.AddFloat(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + + goto _again +tr799: + ( m.cs) = 495 +//line plugins/parsers/influx/machine.go.rl:91 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:104 + + err = m.handler.AddInt(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + + goto _again +tr805: + ( m.cs) = 495 +//line plugins/parsers/influx/machine.go.rl:91 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:113 + + err = m.handler.AddUint(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + + goto _again +tr811: + ( m.cs) = 495 +//line plugins/parsers/influx/machine.go.rl:91 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:131 + + err = m.handler.AddBool(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + + goto _again + st495: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof495 + } + st_case_495: +//line plugins/parsers/influx/machine.go:18329 + switch ( m.data)[( m.p)] { + case 9: + goto tr769 + case 10: + goto tr221 + case 11: + goto tr770 + case 12: + goto tr566 + case 13: + goto st73 + case 32: + goto tr769 + case 34: + goto tr204 + case 44: + goto tr233 + case 45: + goto tr771 + case 61: + goto st6 + case 92: + goto tr237 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto tr772 + } + goto tr235 +tr770: + ( m.cs) = 496 +//line plugins/parsers/influx/machine.go.rl:91 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:20 + + m.pb = m.p + + goto _again + st496: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof496 + } + st_case_496: +//line plugins/parsers/influx/machine.go:18380 + switch ( m.data)[( m.p)] { + case 9: + goto tr769 + case 10: + goto tr221 + case 11: + goto tr770 + case 12: + goto tr566 + case 13: + goto st73 + case 32: + goto tr769 + case 34: + goto tr204 + case 44: + goto tr233 + case 45: + goto tr771 + case 61: + goto tr101 + case 92: + goto tr237 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto tr772 + } + goto tr235 +tr771: +//line plugins/parsers/influx/machine.go.rl:20 + + m.pb = m.p + + goto st126 + st126: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof126 + } + st_case_126: +//line plugins/parsers/influx/machine.go:18420 + switch ( m.data)[( m.p)] { + case 9: + goto tr231 + case 10: + goto tr29 + case 11: + goto tr239 + case 12: + goto tr60 + case 13: + goto st7 + case 32: + goto tr231 case 34: goto tr208 case 44: @@ -18191,71 +18437,109 @@ tr752: case 61: goto tr101 case 92: - goto st85 + goto st84 } if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st488 + goto st497 } - goto st83 -tr757: - ( m.cs) = 487 -//line plugins/parsers/influx/machine.go.rl:90 + goto st82 +tr772: +//line plugins/parsers/influx/machine.go.rl:20 - err = m.handler.AddTag(key, m.text()) + m.pb = m.p + + goto st497 + st497: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof497 + } + st_case_497: +//line plugins/parsers/influx/machine.go:18458 + switch ( m.data)[( m.p)] { + case 9: + goto tr773 + case 10: + goto tr603 + case 11: + goto tr774 + case 12: + goto tr572 + case 13: + goto tr605 + case 32: + goto tr773 + case 34: + goto tr208 + case 44: + goto tr233 + case 61: + goto tr101 + case 92: + goto st84 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st499 + } + goto st82 +tr777: + ( m.cs) = 498 +//line plugins/parsers/influx/machine.go.rl:91 + + err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:19 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p goto _again -tr754: - ( m.cs) = 487 -//line plugins/parsers/influx/machine.go.rl:90 +tr774: + ( m.cs) = 498 +//line plugins/parsers/influx/machine.go.rl:91 - err = m.handler.AddTag(key, m.text()) + err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:148 +//line plugins/parsers/influx/machine.go.rl:149 err = m.handler.SetTimestamp(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again - st487: + st498: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof487 + goto _test_eof498 } - st_case_487: -//line plugins/parsers/influx/machine.go:18246 + st_case_498: +//line plugins/parsers/influx/machine.go:18530 switch ( m.data)[( m.p)] { case 9: - goto tr756 + goto tr776 case 10: - goto st288 + goto tr221 case 11: - goto tr757 + goto tr777 case 12: - goto tr556 + goto tr575 case 13: - goto st74 + goto st73 case 32: - goto tr756 + goto tr776 case 34: goto tr204 case 44: @@ -18266,347 +18550,6 @@ tr754: goto tr237 } goto tr235 - st488: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof488 - } - st_case_488: - switch ( m.data)[( m.p)] { - case 9: - goto tr753 - case 10: - goto tr584 - case 11: - goto tr754 - case 12: - goto tr553 - case 13: - goto tr586 - case 32: - goto tr753 - case 34: - goto tr208 - case 44: - goto tr233 - case 61: - goto tr101 - case 92: - goto st85 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st489 - } - goto st83 - st489: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof489 - } - st_case_489: - switch ( m.data)[( m.p)] { - case 9: - goto tr753 - case 10: - goto tr584 - case 11: - goto tr754 - case 12: - goto tr553 - case 13: - goto tr586 - case 32: - goto tr753 - case 34: - goto tr208 - case 44: - goto tr233 - case 61: - goto tr101 - case 92: - goto st85 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st490 - } - goto st83 - st490: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof490 - } - st_case_490: - switch ( m.data)[( m.p)] { - case 9: - goto tr753 - case 10: - goto tr584 - case 11: - goto tr754 - case 12: - goto tr553 - case 13: - goto tr586 - case 32: - goto tr753 - case 34: - goto tr208 - case 44: - goto tr233 - case 61: - goto tr101 - case 92: - goto st85 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st491 - } - goto st83 - st491: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof491 - } - st_case_491: - switch ( m.data)[( m.p)] { - case 9: - goto tr753 - case 10: - goto tr584 - case 11: - goto tr754 - case 12: - goto tr553 - case 13: - goto tr586 - case 32: - goto tr753 - case 34: - goto tr208 - case 44: - goto tr233 - case 61: - goto tr101 - case 92: - goto st85 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st492 - } - goto st83 - st492: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof492 - } - st_case_492: - switch ( m.data)[( m.p)] { - case 9: - goto tr753 - case 10: - goto tr584 - case 11: - goto tr754 - case 12: - goto tr553 - case 13: - goto tr586 - case 32: - goto tr753 - case 34: - goto tr208 - case 44: - goto tr233 - case 61: - goto tr101 - case 92: - goto st85 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st493 - } - goto st83 - st493: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof493 - } - st_case_493: - switch ( m.data)[( m.p)] { - case 9: - goto tr753 - case 10: - goto tr584 - case 11: - goto tr754 - case 12: - goto tr553 - case 13: - goto tr586 - case 32: - goto tr753 - case 34: - goto tr208 - case 44: - goto tr233 - case 61: - goto tr101 - case 92: - goto st85 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st494 - } - goto st83 - st494: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof494 - } - st_case_494: - switch ( m.data)[( m.p)] { - case 9: - goto tr753 - case 10: - goto tr584 - case 11: - goto tr754 - case 12: - goto tr553 - case 13: - goto tr586 - case 32: - goto tr753 - case 34: - goto tr208 - case 44: - goto tr233 - case 61: - goto tr101 - case 92: - goto st85 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st495 - } - goto st83 - st495: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof495 - } - st_case_495: - switch ( m.data)[( m.p)] { - case 9: - goto tr753 - case 10: - goto tr584 - case 11: - goto tr754 - case 12: - goto tr553 - case 13: - goto tr586 - case 32: - goto tr753 - case 34: - goto tr208 - case 44: - goto tr233 - case 61: - goto tr101 - case 92: - goto st85 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st496 - } - goto st83 - st496: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof496 - } - st_case_496: - switch ( m.data)[( m.p)] { - case 9: - goto tr753 - case 10: - goto tr584 - case 11: - goto tr754 - case 12: - goto tr553 - case 13: - goto tr586 - case 32: - goto tr753 - case 34: - goto tr208 - case 44: - goto tr233 - case 61: - goto tr101 - case 92: - goto st85 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st497 - } - goto st83 - st497: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof497 - } - st_case_497: - switch ( m.data)[( m.p)] { - case 9: - goto tr753 - case 10: - goto tr584 - case 11: - goto tr754 - case 12: - goto tr553 - case 13: - goto tr586 - case 32: - goto tr753 - case 34: - goto tr208 - case 44: - goto tr233 - case 61: - goto tr101 - case 92: - goto st85 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st498 - } - goto st83 - st498: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof498 - } - st_case_498: - switch ( m.data)[( m.p)] { - case 9: - goto tr753 - case 10: - goto tr584 - case 11: - goto tr754 - case 12: - goto tr553 - case 13: - goto tr586 - case 32: - goto tr753 - case 34: - goto tr208 - case 44: - goto tr233 - case 61: - goto tr101 - case 92: - goto st85 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st499 - } - goto st83 st499: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof499 @@ -18614,17 +18557,17 @@ tr754: st_case_499: switch ( m.data)[( m.p)] { case 9: - goto tr753 + goto tr773 case 10: - goto tr584 + goto tr603 case 11: - goto tr754 + goto tr774 case 12: - goto tr553 + goto tr572 case 13: - goto tr586 + goto tr605 case 32: - goto tr753 + goto tr773 case 34: goto tr208 case 44: @@ -18632,12 +18575,12 @@ tr754: case 61: goto tr101 case 92: - goto st85 + goto st84 } if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st500 } - goto st83 + goto st82 st500: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof500 @@ -18645,17 +18588,17 @@ tr754: st_case_500: switch ( m.data)[( m.p)] { case 9: - goto tr753 + goto tr773 case 10: - goto tr584 + goto tr603 case 11: - goto tr754 + goto tr774 case 12: - goto tr553 + goto tr572 case 13: - goto tr586 + goto tr605 case 32: - goto tr753 + goto tr773 case 34: goto tr208 case 44: @@ -18663,12 +18606,12 @@ tr754: case 61: goto tr101 case 92: - goto st85 + goto st84 } if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st501 } - goto st83 + goto st82 st501: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof501 @@ -18676,17 +18619,17 @@ tr754: st_case_501: switch ( m.data)[( m.p)] { case 9: - goto tr753 + goto tr773 case 10: - goto tr584 + goto tr603 case 11: - goto tr754 + goto tr774 case 12: - goto tr553 + goto tr572 case 13: - goto tr586 + goto tr605 case 32: - goto tr753 + goto tr773 case 34: goto tr208 case 44: @@ -18694,12 +18637,12 @@ tr754: case 61: goto tr101 case 92: - goto st85 + goto st84 } if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st502 } - goto st83 + goto st82 st502: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof502 @@ -18707,17 +18650,17 @@ tr754: st_case_502: switch ( m.data)[( m.p)] { case 9: - goto tr753 + goto tr773 case 10: - goto tr584 + goto tr603 case 11: - goto tr754 + goto tr774 case 12: - goto tr553 + goto tr572 case 13: - goto tr586 + goto tr605 case 32: - goto tr753 + goto tr773 case 34: goto tr208 case 44: @@ -18725,12 +18668,12 @@ tr754: case 61: goto tr101 case 92: - goto st85 + goto st84 } if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st503 } - goto st83 + goto st82 st503: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof503 @@ -18738,17 +18681,17 @@ tr754: st_case_503: switch ( m.data)[( m.p)] { case 9: - goto tr753 + goto tr773 case 10: - goto tr584 + goto tr603 case 11: - goto tr754 + goto tr774 case 12: - goto tr553 + goto tr572 case 13: - goto tr586 + goto tr605 case 32: - goto tr753 + goto tr773 case 34: goto tr208 case 44: @@ -18756,12 +18699,12 @@ tr754: case 61: goto tr101 case 92: - goto st85 + goto st84 } if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st504 } - goto st83 + goto st82 st504: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof504 @@ -18769,17 +18712,17 @@ tr754: st_case_504: switch ( m.data)[( m.p)] { case 9: - goto tr753 + goto tr773 case 10: - goto tr584 + goto tr603 case 11: - goto tr754 + goto tr774 case 12: - goto tr553 + goto tr572 case 13: - goto tr586 + goto tr605 case 32: - goto tr753 + goto tr773 case 34: goto tr208 case 44: @@ -18787,12 +18730,12 @@ tr754: case 61: goto tr101 case 92: - goto st85 + goto st84 } if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st505 } - goto st83 + goto st82 st505: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof505 @@ -18800,17 +18743,17 @@ tr754: st_case_505: switch ( m.data)[( m.p)] { case 9: - goto tr753 + goto tr773 case 10: - goto tr584 + goto tr603 case 11: - goto tr754 + goto tr774 case 12: - goto tr553 + goto tr572 case 13: - goto tr586 + goto tr605 case 32: - goto tr753 + goto tr773 case 34: goto tr208 case 44: @@ -18818,25 +18761,366 @@ tr754: case 61: goto tr101 case 92: - goto st85 + goto st84 } - goto st83 - st128: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st506 + } + goto st82 + st506: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof128 + goto _test_eof506 } - st_case_128: + st_case_506: + switch ( m.data)[( m.p)] { + case 9: + goto tr773 + case 10: + goto tr603 + case 11: + goto tr774 + case 12: + goto tr572 + case 13: + goto tr605 + case 32: + goto tr773 + case 34: + goto tr208 + case 44: + goto tr233 + case 61: + goto tr101 + case 92: + goto st84 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st507 + } + goto st82 + st507: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof507 + } + st_case_507: + switch ( m.data)[( m.p)] { + case 9: + goto tr773 + case 10: + goto tr603 + case 11: + goto tr774 + case 12: + goto tr572 + case 13: + goto tr605 + case 32: + goto tr773 + case 34: + goto tr208 + case 44: + goto tr233 + case 61: + goto tr101 + case 92: + goto st84 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st508 + } + goto st82 + st508: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof508 + } + st_case_508: + switch ( m.data)[( m.p)] { + case 9: + goto tr773 + case 10: + goto tr603 + case 11: + goto tr774 + case 12: + goto tr572 + case 13: + goto tr605 + case 32: + goto tr773 + case 34: + goto tr208 + case 44: + goto tr233 + case 61: + goto tr101 + case 92: + goto st84 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st509 + } + goto st82 + st509: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof509 + } + st_case_509: + switch ( m.data)[( m.p)] { + case 9: + goto tr773 + case 10: + goto tr603 + case 11: + goto tr774 + case 12: + goto tr572 + case 13: + goto tr605 + case 32: + goto tr773 + case 34: + goto tr208 + case 44: + goto tr233 + case 61: + goto tr101 + case 92: + goto st84 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st510 + } + goto st82 + st510: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof510 + } + st_case_510: + switch ( m.data)[( m.p)] { + case 9: + goto tr773 + case 10: + goto tr603 + case 11: + goto tr774 + case 12: + goto tr572 + case 13: + goto tr605 + case 32: + goto tr773 + case 34: + goto tr208 + case 44: + goto tr233 + case 61: + goto tr101 + case 92: + goto st84 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st511 + } + goto st82 + st511: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof511 + } + st_case_511: + switch ( m.data)[( m.p)] { + case 9: + goto tr773 + case 10: + goto tr603 + case 11: + goto tr774 + case 12: + goto tr572 + case 13: + goto tr605 + case 32: + goto tr773 + case 34: + goto tr208 + case 44: + goto tr233 + case 61: + goto tr101 + case 92: + goto st84 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st512 + } + goto st82 + st512: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof512 + } + st_case_512: + switch ( m.data)[( m.p)] { + case 9: + goto tr773 + case 10: + goto tr603 + case 11: + goto tr774 + case 12: + goto tr572 + case 13: + goto tr605 + case 32: + goto tr773 + case 34: + goto tr208 + case 44: + goto tr233 + case 61: + goto tr101 + case 92: + goto st84 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st513 + } + goto st82 + st513: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof513 + } + st_case_513: + switch ( m.data)[( m.p)] { + case 9: + goto tr773 + case 10: + goto tr603 + case 11: + goto tr774 + case 12: + goto tr572 + case 13: + goto tr605 + case 32: + goto tr773 + case 34: + goto tr208 + case 44: + goto tr233 + case 61: + goto tr101 + case 92: + goto st84 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st514 + } + goto st82 + st514: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof514 + } + st_case_514: + switch ( m.data)[( m.p)] { + case 9: + goto tr773 + case 10: + goto tr603 + case 11: + goto tr774 + case 12: + goto tr572 + case 13: + goto tr605 + case 32: + goto tr773 + case 34: + goto tr208 + case 44: + goto tr233 + case 61: + goto tr101 + case 92: + goto st84 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st515 + } + goto st82 + st515: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof515 + } + st_case_515: + switch ( m.data)[( m.p)] { + case 9: + goto tr773 + case 10: + goto tr603 + case 11: + goto tr774 + case 12: + goto tr572 + case 13: + goto tr605 + case 32: + goto tr773 + case 34: + goto tr208 + case 44: + goto tr233 + case 61: + goto tr101 + case 92: + goto st84 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st516 + } + goto st82 + st516: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof516 + } + st_case_516: + switch ( m.data)[( m.p)] { + case 9: + goto tr773 + case 10: + goto tr603 + case 11: + goto tr774 + case 12: + goto tr572 + case 13: + goto tr605 + case 32: + goto tr773 + case 34: + goto tr208 + case 44: + goto tr233 + case 61: + goto tr101 + case 92: + goto st84 + } + goto st82 + st127: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof127 + } + st_case_127: switch ( m.data)[( m.p)] { case 9: goto tr231 case 10: - goto st7 + goto tr29 case 11: goto tr232 case 12: goto tr60 case 13: - goto st8 + goto st7 case 32: goto tr231 case 34: @@ -18846,17 +19130,385 @@ tr754: case 61: goto st6 case 92: - goto st87 + goto st86 } switch { case ( m.data)[( m.p)] > 45: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st506 + goto st517 } case ( m.data)[( m.p)] >= 43: - goto st129 + goto st128 } - goto st81 + goto st80 + st128: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof128 + } + st_case_128: + switch ( m.data)[( m.p)] { + case 9: + goto tr231 + case 10: + goto tr29 + case 11: + goto tr232 + case 12: + goto tr60 + case 13: + goto st7 + case 32: + goto tr231 + case 34: + goto tr157 + case 44: + goto tr233 + case 61: + goto st6 + case 92: + goto st86 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st517 + } + goto st80 + st517: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof517 + } + st_case_517: + switch ( m.data)[( m.p)] { + case 9: + goto tr764 + case 10: + goto tr765 + case 11: + goto tr766 + case 12: + goto tr731 + case 13: + goto tr642 + case 32: + goto tr764 + case 34: + goto tr157 + case 44: + goto tr767 + case 61: + goto st6 + case 92: + goto st86 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st517 + } + goto st80 + st518: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof518 + } + st_case_518: + switch ( m.data)[( m.p)] { + case 9: + goto tr764 + case 10: + goto tr765 + case 11: + goto tr766 + case 12: + goto tr731 + case 13: + goto tr642 + case 32: + goto tr764 + case 34: + goto tr157 + case 44: + goto tr767 + case 46: + goto st494 + case 61: + goto st6 + case 69: + goto st127 + case 92: + goto st86 + case 101: + goto st127 + case 105: + goto st520 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st519 + } + goto st80 + st519: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof519 + } + st_case_519: + switch ( m.data)[( m.p)] { + case 9: + goto tr764 + case 10: + goto tr765 + case 11: + goto tr766 + case 12: + goto tr731 + case 13: + goto tr642 + case 32: + goto tr764 + case 34: + goto tr157 + case 44: + goto tr767 + case 46: + goto st494 + case 61: + goto st6 + case 69: + goto st127 + case 92: + goto st86 + case 101: + goto st127 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st519 + } + goto st80 + st520: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof520 + } + st_case_520: + switch ( m.data)[( m.p)] { + case 9: + goto tr797 + case 10: + goto tr798 + case 11: + goto tr799 + case 12: + goto tr743 + case 13: + goto tr800 + case 32: + goto tr797 + case 34: + goto tr157 + case 44: + goto tr801 + case 61: + goto st6 + case 92: + goto st86 + } + goto st80 + st521: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof521 + } + st_case_521: + switch ( m.data)[( m.p)] { + case 9: + goto tr764 + case 10: + goto tr765 + case 11: + goto tr766 + case 12: + goto tr731 + case 13: + goto tr642 + case 32: + goto tr764 + case 34: + goto tr157 + case 44: + goto tr767 + case 46: + goto st494 + case 61: + goto st6 + case 69: + goto st127 + case 92: + goto st86 + case 101: + goto st127 + case 105: + goto st520 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st521 + } + goto st80 +tr269: +//line plugins/parsers/influx/machine.go.rl:20 + + m.pb = m.p + + goto st522 + st522: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof522 + } + st_case_522: +//line plugins/parsers/influx/machine.go:19361 + switch ( m.data)[( m.p)] { + case 9: + goto tr764 + case 10: + goto tr765 + case 11: + goto tr766 + case 12: + goto tr731 + case 13: + goto tr642 + case 32: + goto tr764 + case 34: + goto tr157 + case 44: + goto tr767 + case 46: + goto st494 + case 61: + goto st6 + case 69: + goto st127 + case 92: + goto st86 + case 101: + goto st127 + case 105: + goto st520 + case 117: + goto st523 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st519 + } + goto st80 + st523: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof523 + } + st_case_523: + switch ( m.data)[( m.p)] { + case 9: + goto tr803 + case 10: + goto tr804 + case 11: + goto tr805 + case 12: + goto tr750 + case 13: + goto tr806 + case 32: + goto tr803 + case 34: + goto tr157 + case 44: + goto tr807 + case 61: + goto st6 + case 92: + goto st86 + } + goto st80 +tr270: +//line plugins/parsers/influx/machine.go.rl:20 + + m.pb = m.p + + goto st524 + st524: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof524 + } + st_case_524: +//line plugins/parsers/influx/machine.go:19437 + switch ( m.data)[( m.p)] { + case 9: + goto tr764 + case 10: + goto tr765 + case 11: + goto tr766 + case 12: + goto tr731 + case 13: + goto tr642 + case 32: + goto tr764 + case 34: + goto tr157 + case 44: + goto tr767 + case 46: + goto st494 + case 61: + goto st6 + case 69: + goto st127 + case 92: + goto st86 + case 101: + goto st127 + case 105: + goto st520 + case 117: + goto st523 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st524 + } + goto st80 +tr271: +//line plugins/parsers/influx/machine.go.rl:20 + + m.pb = m.p + + goto st525 + st525: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof525 + } + st_case_525: +//line plugins/parsers/influx/machine.go:19485 + switch ( m.data)[( m.p)] { + case 9: + goto tr809 + case 10: + goto tr810 + case 11: + goto tr811 + case 12: + goto tr757 + case 13: + goto tr812 + case 32: + goto tr809 + case 34: + goto tr157 + case 44: + goto tr813 + case 61: + goto st6 + case 65: + goto st129 + case 92: + goto st86 + case 97: + goto st132 + } + goto st80 st129: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof129 @@ -18866,381 +19518,13 @@ tr754: case 9: goto tr231 case 10: - goto st7 + goto tr29 case 11: goto tr232 case 12: goto tr60 case 13: - goto st8 - case 32: - goto tr231 - case 34: - goto tr157 - case 44: - goto tr233 - case 61: - goto st6 - case 92: - goto st87 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st506 - } - goto st81 - st506: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof506 - } - st_case_506: - switch ( m.data)[( m.p)] { - case 9: - goto tr745 - case 10: - goto tr620 - case 11: - goto tr746 - case 12: - goto tr712 - case 13: - goto tr623 - case 32: - goto tr745 - case 34: - goto tr157 - case 44: - goto tr747 - case 61: - goto st6 - case 92: - goto st87 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st506 - } - goto st81 - st507: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof507 - } - st_case_507: - switch ( m.data)[( m.p)] { - case 9: - goto tr745 - case 10: - goto tr620 - case 11: - goto tr746 - case 12: - goto tr712 - case 13: - goto tr623 - case 32: - goto tr745 - case 34: - goto tr157 - case 44: - goto tr747 - case 46: - goto st483 - case 61: - goto st6 - case 69: - goto st128 - case 92: - goto st87 - case 101: - goto st128 - case 105: - goto st509 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st508 - } - goto st81 - st508: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof508 - } - st_case_508: - switch ( m.data)[( m.p)] { - case 9: - goto tr745 - case 10: - goto tr620 - case 11: - goto tr746 - case 12: - goto tr712 - case 13: - goto tr623 - case 32: - goto tr745 - case 34: - goto tr157 - case 44: - goto tr747 - case 46: - goto st483 - case 61: - goto st6 - case 69: - goto st128 - case 92: - goto st87 - case 101: - goto st128 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st508 - } - goto st81 - st509: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof509 - } - st_case_509: - switch ( m.data)[( m.p)] { - case 9: - goto tr777 - case 10: - goto tr778 - case 11: - goto tr779 - case 12: - goto tr724 - case 13: - goto tr780 - case 32: - goto tr777 - case 34: - goto tr157 - case 44: - goto tr781 - case 61: - goto st6 - case 92: - goto st87 - } - goto st81 - st510: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof510 - } - st_case_510: - switch ( m.data)[( m.p)] { - case 9: - goto tr745 - case 10: - goto tr620 - case 11: - goto tr746 - case 12: - goto tr712 - case 13: - goto tr623 - case 32: - goto tr745 - case 34: - goto tr157 - case 44: - goto tr747 - case 46: - goto st483 - case 61: - goto st6 - case 69: - goto st128 - case 92: - goto st87 - case 101: - goto st128 - case 105: - goto st509 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st510 - } - goto st81 -tr269: -//line plugins/parsers/influx/machine.go.rl:19 - - m.pb = m.p - - goto st511 - st511: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof511 - } - st_case_511: -//line plugins/parsers/influx/machine.go:19077 - switch ( m.data)[( m.p)] { - case 9: - goto tr745 - case 10: - goto tr620 - case 11: - goto tr746 - case 12: - goto tr712 - case 13: - goto tr623 - case 32: - goto tr745 - case 34: - goto tr157 - case 44: - goto tr747 - case 46: - goto st483 - case 61: - goto st6 - case 69: - goto st128 - case 92: - goto st87 - case 101: - goto st128 - case 105: - goto st509 - case 117: - goto st512 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st508 - } - goto st81 - st512: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof512 - } - st_case_512: - switch ( m.data)[( m.p)] { - case 9: - goto tr783 - case 10: - goto tr784 - case 11: - goto tr785 - case 12: - goto tr731 - case 13: - goto tr786 - case 32: - goto tr783 - case 34: - goto tr157 - case 44: - goto tr787 - case 61: - goto st6 - case 92: - goto st87 - } - goto st81 -tr270: -//line plugins/parsers/influx/machine.go.rl:19 - - m.pb = m.p - - goto st513 - st513: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof513 - } - st_case_513: -//line plugins/parsers/influx/machine.go:19153 - switch ( m.data)[( m.p)] { - case 9: - goto tr745 - case 10: - goto tr620 - case 11: - goto tr746 - case 12: - goto tr712 - case 13: - goto tr623 - case 32: - goto tr745 - case 34: - goto tr157 - case 44: - goto tr747 - case 46: - goto st483 - case 61: - goto st6 - case 69: - goto st128 - case 92: - goto st87 - case 101: - goto st128 - case 105: - goto st509 - case 117: - goto st512 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st513 - } - goto st81 -tr271: -//line plugins/parsers/influx/machine.go.rl:19 - - m.pb = m.p - - goto st514 - st514: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof514 - } - st_case_514: -//line plugins/parsers/influx/machine.go:19201 - switch ( m.data)[( m.p)] { - case 9: - goto tr789 - case 10: - goto tr790 - case 11: - goto tr791 - case 12: - goto tr738 - case 13: - goto tr792 - case 32: - goto tr789 - case 34: - goto tr157 - case 44: - goto tr793 - case 61: - goto st6 - case 65: - goto st130 - case 92: - goto st87 - case 97: - goto st133 - } - goto st81 - st130: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof130 - } - st_case_130: - switch ( m.data)[( m.p)] { - case 9: - goto tr231 - case 10: goto st7 - case 11: - goto tr232 - case 12: - goto tr60 - case 13: - goto st8 case 32: goto tr231 case 34: @@ -19250,27 +19534,27 @@ tr271: case 61: goto st6 case 76: - goto st131 + goto st130 case 92: - goto st87 + goto st86 } - goto st81 - st131: + goto st80 + st130: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof131 + goto _test_eof130 } - st_case_131: + st_case_130: switch ( m.data)[( m.p)] { case 9: goto tr231 case 10: - goto st7 + goto tr29 case 11: goto tr232 case 12: goto tr60 case 13: - goto st8 + goto st7 case 32: goto tr231 case 34: @@ -19280,27 +19564,27 @@ tr271: case 61: goto st6 case 83: - goto st132 + goto st131 case 92: - goto st87 + goto st86 } - goto st81 - st132: + goto st80 + st131: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof132 + goto _test_eof131 } - st_case_132: + st_case_131: switch ( m.data)[( m.p)] { case 9: goto tr231 case 10: - goto st7 + goto tr29 case 11: goto tr232 case 12: goto tr60 case 13: - goto st8 + goto st7 case 32: goto tr231 case 34: @@ -19310,39 +19594,69 @@ tr271: case 61: goto st6 case 69: - goto st515 + goto st526 case 92: - goto st87 + goto st86 } - goto st81 - st515: + goto st80 + st526: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof515 + goto _test_eof526 } - st_case_515: + st_case_526: switch ( m.data)[( m.p)] { case 9: - goto tr789 + goto tr809 case 10: - goto tr790 + goto tr810 case 11: - goto tr791 + goto tr811 case 12: - goto tr738 + goto tr757 case 13: - goto tr792 + goto tr812 case 32: - goto tr789 + goto tr809 case 34: goto tr157 case 44: - goto tr793 + goto tr813 case 61: goto st6 case 92: - goto st87 + goto st86 } - goto st81 + goto st80 + st132: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof132 + } + st_case_132: + switch ( m.data)[( m.p)] { + case 9: + goto tr231 + case 10: + goto tr29 + case 11: + goto tr232 + case 12: + goto tr60 + case 13: + goto st7 + case 32: + goto tr231 + case 34: + goto tr157 + case 44: + goto tr233 + case 61: + goto st6 + case 92: + goto st86 + case 108: + goto st133 + } + goto st80 st133: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof133 @@ -19352,13 +19666,13 @@ tr271: case 9: goto tr231 case 10: - goto st7 + goto tr29 case 11: goto tr232 case 12: goto tr60 case 13: - goto st8 + goto st7 case 32: goto tr231 case 34: @@ -19368,11 +19682,11 @@ tr271: case 61: goto st6 case 92: - goto st87 - case 108: + goto st86 + case 115: goto st134 } - goto st81 + goto st80 st134: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof134 @@ -19382,13 +19696,13 @@ tr271: case 9: goto tr231 case 10: - goto st7 + goto tr29 case 11: goto tr232 case 12: goto tr60 case 13: - goto st8 + goto st7 case 32: goto tr231 case 34: @@ -19398,11 +19712,50 @@ tr271: case 61: goto st6 case 92: - goto st87 - case 115: - goto st135 + goto st86 + case 101: + goto st526 } - goto st81 + goto st80 +tr272: +//line plugins/parsers/influx/machine.go.rl:20 + + m.pb = m.p + + goto st527 + st527: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof527 + } + st_case_527: +//line plugins/parsers/influx/machine.go:19732 + switch ( m.data)[( m.p)] { + case 9: + goto tr809 + case 10: + goto tr810 + case 11: + goto tr811 + case 12: + goto tr757 + case 13: + goto tr812 + case 32: + goto tr809 + case 34: + goto tr157 + case 44: + goto tr813 + case 61: + goto st6 + case 82: + goto st135 + case 92: + goto st86 + case 114: + goto st136 + } + goto st80 st135: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof135 @@ -19412,82 +19765,13 @@ tr271: case 9: goto tr231 case 10: - goto st7 + goto tr29 case 11: goto tr232 case 12: goto tr60 case 13: - goto st8 - case 32: - goto tr231 - case 34: - goto tr157 - case 44: - goto tr233 - case 61: - goto st6 - case 92: - goto st87 - case 101: - goto st515 - } - goto st81 -tr272: -//line plugins/parsers/influx/machine.go.rl:19 - - m.pb = m.p - - goto st516 - st516: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof516 - } - st_case_516: -//line plugins/parsers/influx/machine.go:19448 - switch ( m.data)[( m.p)] { - case 9: - goto tr789 - case 10: - goto tr790 - case 11: - goto tr791 - case 12: - goto tr738 - case 13: - goto tr792 - case 32: - goto tr789 - case 34: - goto tr157 - case 44: - goto tr793 - case 61: - goto st6 - case 82: - goto st136 - case 92: - goto st87 - case 114: - goto st137 - } - goto st81 - st136: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof136 - } - st_case_136: - switch ( m.data)[( m.p)] { - case 9: - goto tr231 - case 10: goto st7 - case 11: - goto tr232 - case 12: - goto tr60 - case 13: - goto st8 case 32: goto tr231 case 34: @@ -19497,27 +19781,27 @@ tr272: case 61: goto st6 case 85: - goto st132 + goto st131 case 92: - goto st87 + goto st86 } - goto st81 - st137: + goto st80 + st136: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof137 + goto _test_eof136 } - st_case_137: + st_case_136: switch ( m.data)[( m.p)] { case 9: goto tr231 case 10: - goto st7 + goto tr29 case 11: goto tr232 case 12: goto tr60 case 13: - goto st8 + goto st7 case 32: goto tr231 case 34: @@ -19527,102 +19811,102 @@ tr272: case 61: goto st6 case 92: - goto st87 + goto st86 case 117: - goto st135 + goto st134 } - goto st81 + goto st80 tr273: -//line plugins/parsers/influx/machine.go.rl:19 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p - goto st517 - st517: + goto st528 + st528: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof517 + goto _test_eof528 } - st_case_517: -//line plugins/parsers/influx/machine.go:19547 + st_case_528: +//line plugins/parsers/influx/machine.go:19831 switch ( m.data)[( m.p)] { case 9: - goto tr789 + goto tr809 case 10: - goto tr790 + goto tr810 case 11: - goto tr791 + goto tr811 case 12: - goto tr738 + goto tr757 case 13: - goto tr792 + goto tr812 case 32: - goto tr789 + goto tr809 case 34: goto tr157 case 44: - goto tr793 + goto tr813 case 61: goto st6 case 92: - goto st87 + goto st86 case 97: - goto st133 + goto st132 } - goto st81 + goto st80 tr274: -//line plugins/parsers/influx/machine.go.rl:19 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p - goto st518 - st518: + goto st529 + st529: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof518 + goto _test_eof529 } - st_case_518: -//line plugins/parsers/influx/machine.go:19584 + st_case_529: +//line plugins/parsers/influx/machine.go:19868 switch ( m.data)[( m.p)] { case 9: - goto tr789 + goto tr809 case 10: - goto tr790 + goto tr810 case 11: - goto tr791 + goto tr811 case 12: - goto tr738 + goto tr757 case 13: - goto tr792 + goto tr812 case 32: - goto tr789 + goto tr809 case 34: goto tr157 case 44: - goto tr793 + goto tr813 case 61: goto st6 case 92: - goto st87 + goto st86 case 114: - goto st137 + goto st136 } - goto st81 + goto st80 tr259: -//line plugins/parsers/influx/machine.go.rl:19 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p - goto st138 - st138: + goto st137 + st137: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof138 + goto _test_eof137 } - st_case_138: -//line plugins/parsers/influx/machine.go:19621 + st_case_137: +//line plugins/parsers/influx/machine.go:19905 switch ( m.data)[( m.p)] { case 34: - goto st99 + goto st98 case 92: - goto st139 + goto st138 } switch { case ( m.data)[( m.p)] > 10: @@ -19632,25 +19916,25 @@ tr259: case ( m.data)[( m.p)] >= 9: goto tr47 } - goto st46 - st139: -//line plugins/parsers/influx/machine.go.rl:234 + goto st45 + st138: +//line plugins/parsers/influx/machine.go.rl:240 ( m.p)-- if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof139 + goto _test_eof138 } - st_case_139: -//line plugins/parsers/influx/machine.go:19645 + st_case_138: +//line plugins/parsers/influx/machine.go:19929 switch ( m.data)[( m.p)] { case 9: goto st6 case 10: - goto st7 + goto tr29 case 12: goto tr47 case 13: - goto st8 + goto st7 case 32: goto st6 case 34: @@ -19660,9 +19944,113 @@ tr259: case 61: goto tr262 case 92: - goto st138 + goto st137 } - goto st99 + goto st98 + st139: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof139 + } + st_case_139: + switch ( m.data)[( m.p)] { + case 9: + goto tr89 + case 10: + goto tr29 + case 11: + goto tr90 + case 12: + goto tr1 + case 13: + goto st7 + case 32: + goto tr89 + case 34: + goto tr317 + case 44: + goto tr92 + case 92: + goto st141 + } + switch { + case ( m.data)[( m.p)] > 45: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st532 + } + case ( m.data)[( m.p)] >= 43: + goto st140 + } + goto st30 +tr317: + ( m.cs) = 530 +//line plugins/parsers/influx/machine.go.rl:140 + + err = m.handler.AddString(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + + goto _again + st530: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof530 + } + st_case_530: +//line plugins/parsers/influx/machine.go:20003 + switch ( m.data)[( m.p)] { + case 10: + goto tr103 + case 11: + goto tr637 + case 13: + goto st33 + case 32: + goto tr501 + case 44: + goto tr503 + case 92: + goto st95 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st531 + } + case ( m.data)[( m.p)] >= 9: + goto tr501 + } + goto st1 + st531: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof531 + } + st_case_531: + switch ( m.data)[( m.p)] { + case 10: + goto tr734 + case 11: + goto tr818 + case 13: + goto tr736 + case 32: + goto tr641 + case 44: + goto tr819 + case 92: + goto st95 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st531 + } + case ( m.data)[( m.p)] >= 9: + goto tr641 + } + goto st1 st140: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof140 @@ -19672,176 +20060,72 @@ tr259: case 9: goto tr89 case 10: - goto st7 + goto tr29 case 11: goto tr90 case 12: goto tr1 case 13: - goto st8 + goto st7 case 32: goto tr89 case 34: - goto tr317 + goto tr91 case 44: goto tr92 case 92: - goto st142 - } - switch { - case ( m.data)[( m.p)] > 45: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st521 - } - case ( m.data)[( m.p)] >= 43: goto st141 } - goto st31 -tr317: - ( m.cs) = 519 -//line plugins/parsers/influx/machine.go.rl:139 - - err = m.handler.AddString(key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - - goto _again - st519: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st532 + } + goto st30 + st532: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof519 + goto _test_eof532 } - st_case_519: -//line plugins/parsers/influx/machine.go:19719 + st_case_532: switch ( m.data)[( m.p)] { + case 9: + goto tr638 case 10: - goto st262 + goto tr639 case 11: - goto tr618 + goto tr640 + case 12: + goto tr641 case 13: - goto st34 + goto tr642 case 32: - goto tr482 + goto tr638 + case 34: + goto tr91 case 44: - goto tr484 + goto tr643 case 92: - goto st96 + goto st141 } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st520 - } - case ( m.data)[( m.p)] >= 9: - goto tr482 + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st532 } - goto st1 - st520: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof520 - } - st_case_520: - switch ( m.data)[( m.p)] { - case 10: - goto tr715 - case 11: - goto tr798 - case 13: - goto tr717 - case 32: - goto tr622 - case 44: - goto tr799 - case 92: - goto st96 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st520 - } - case ( m.data)[( m.p)] >= 9: - goto tr622 - } - goto st1 + goto st30 +tr87: +//line plugins/parsers/influx/machine.go.rl:20 + + m.pb = m.p + + goto st141 st141: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof141 } st_case_141: - switch ( m.data)[( m.p)] { - case 9: - goto tr89 - case 10: - goto st7 - case 11: - goto tr90 - case 12: - goto tr1 - case 13: - goto st8 - case 32: - goto tr89 - case 34: - goto tr91 - case 44: - goto tr92 - case 92: - goto st142 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st521 - } - goto st31 - st521: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof521 - } - st_case_521: - switch ( m.data)[( m.p)] { - case 9: - goto tr619 - case 10: - goto tr620 - case 11: - goto tr621 - case 12: - goto tr622 - case 13: - goto tr623 - case 32: - goto tr619 - case 34: - goto tr91 - case 44: - goto tr624 - case 92: - goto st142 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st521 - } - goto st31 -tr87: -//line plugins/parsers/influx/machine.go.rl:19 - - m.pb = m.p - - goto st142 - st142: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof142 - } - st_case_142: -//line plugins/parsers/influx/machine.go:19840 +//line plugins/parsers/influx/machine.go:20124 switch ( m.data)[( m.p)] { case 34: - goto st31 + goto st30 case 92: - goto st31 + goto st30 } switch { case ( m.data)[( m.p)] > 10: @@ -19852,296 +20136,324 @@ tr87: goto tr8 } goto st1 - st522: + st533: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof522 + goto _test_eof533 } - st_case_522: + st_case_533: switch ( m.data)[( m.p)] { case 9: - goto tr619 + goto tr638 case 10: - goto tr620 + goto tr639 case 11: - goto tr621 + goto tr640 case 12: - goto tr622 + goto tr641 case 13: - goto tr623 + goto tr642 case 32: - goto tr619 + goto tr638 case 34: goto tr91 case 44: - goto tr624 + goto tr643 case 46: - goto st396 + goto st407 case 69: - goto st140 + goto st139 case 92: - goto st142 + goto st141 case 101: - goto st140 + goto st139 case 105: - goto st524 + goto st535 } if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st523 + goto st534 } - goto st31 - st523: + goto st30 + st534: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof523 + goto _test_eof534 } - st_case_523: + st_case_534: switch ( m.data)[( m.p)] { case 9: - goto tr619 + goto tr638 case 10: - goto tr620 + goto tr639 case 11: - goto tr621 + goto tr640 case 12: - goto tr622 + goto tr641 case 13: - goto tr623 + goto tr642 case 32: - goto tr619 + goto tr638 case 34: goto tr91 case 44: - goto tr624 + goto tr643 case 46: - goto st396 + goto st407 case 69: - goto st140 + goto st139 case 92: - goto st142 + goto st141 case 101: - goto st140 + goto st139 } if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st523 + goto st534 } - goto st31 - st524: + goto st30 + st535: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof524 + goto _test_eof535 } - st_case_524: + st_case_535: switch ( m.data)[( m.p)] { case 9: - goto tr802 + goto tr822 case 10: - goto tr778 + goto tr823 case 11: - goto tr803 + goto tr824 case 12: - goto tr804 + goto tr825 case 13: - goto tr780 + goto tr800 case 32: - goto tr802 + goto tr822 case 34: goto tr91 case 44: - goto tr805 + goto tr826 case 92: - goto st142 + goto st141 } - goto st31 - st525: + goto st30 + st536: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof525 + goto _test_eof536 } - st_case_525: + st_case_536: switch ( m.data)[( m.p)] { case 9: - goto tr619 + goto tr638 case 10: - goto tr620 + goto tr639 case 11: - goto tr621 + goto tr640 case 12: - goto tr622 + goto tr641 case 13: - goto tr623 + goto tr642 case 32: - goto tr619 + goto tr638 case 34: goto tr91 case 44: - goto tr624 + goto tr643 case 46: - goto st396 + goto st407 case 69: - goto st140 + goto st139 case 92: - goto st142 + goto st141 case 101: - goto st140 + goto st139 case 105: - goto st524 + goto st535 } if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st525 + goto st536 } - goto st31 + goto st30 tr247: -//line plugins/parsers/influx/machine.go.rl:19 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p - goto st526 - st526: + goto st537 + st537: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof526 + goto _test_eof537 } - st_case_526: -//line plugins/parsers/influx/machine.go:20002 + st_case_537: +//line plugins/parsers/influx/machine.go:20286 switch ( m.data)[( m.p)] { case 9: - goto tr619 + goto tr638 case 10: - goto tr620 + goto tr639 case 11: - goto tr621 + goto tr640 case 12: - goto tr622 + goto tr641 case 13: - goto tr623 + goto tr642 case 32: - goto tr619 + goto tr638 case 34: goto tr91 case 44: - goto tr624 + goto tr643 case 46: - goto st396 + goto st407 case 69: - goto st140 + goto st139 case 92: - goto st142 + goto st141 case 101: - goto st140 + goto st139 case 105: - goto st524 + goto st535 case 117: - goto st527 + goto st538 } if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st523 + goto st534 } - goto st31 - st527: + goto st30 + st538: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof527 + goto _test_eof538 } - st_case_527: + st_case_538: switch ( m.data)[( m.p)] { case 9: - goto tr807 + goto tr828 case 10: - goto tr784 + goto tr829 case 11: - goto tr808 + goto tr830 case 12: - goto tr809 + goto tr831 case 13: - goto tr786 + goto tr806 case 32: - goto tr807 + goto tr828 case 34: goto tr91 case 44: - goto tr810 + goto tr832 case 92: - goto st142 + goto st141 } - goto st31 + goto st30 tr248: -//line plugins/parsers/influx/machine.go.rl:19 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p - goto st528 - st528: + goto st539 + st539: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof528 + goto _test_eof539 } - st_case_528: -//line plugins/parsers/influx/machine.go:20074 + st_case_539: +//line plugins/parsers/influx/machine.go:20358 switch ( m.data)[( m.p)] { case 9: - goto tr619 + goto tr638 case 10: - goto tr620 + goto tr639 case 11: - goto tr621 + goto tr640 case 12: - goto tr622 + goto tr641 case 13: - goto tr623 + goto tr642 case 32: - goto tr619 + goto tr638 case 34: goto tr91 case 44: - goto tr624 + goto tr643 case 46: - goto st396 + goto st407 case 69: - goto st140 + goto st139 case 92: - goto st142 + goto st141 case 101: - goto st140 + goto st139 case 105: - goto st524 + goto st535 case 117: - goto st527 + goto st538 } if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st528 + goto st539 } - goto st31 + goto st30 tr249: -//line plugins/parsers/influx/machine.go.rl:19 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p - goto st529 - st529: + goto st540 + st540: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof529 + goto _test_eof540 } - st_case_529: -//line plugins/parsers/influx/machine.go:20120 + st_case_540: +//line plugins/parsers/influx/machine.go:20404 switch ( m.data)[( m.p)] { case 9: - goto tr812 + goto tr834 case 10: - goto tr790 + goto tr810 case 11: - goto tr813 + goto tr835 case 12: - goto tr814 + goto tr836 case 13: - goto tr792 - case 32: goto tr812 + case 32: + goto tr834 case 34: goto tr91 case 44: - goto tr815 + goto tr837 case 65: + goto st142 + case 92: + goto st141 + case 97: + goto st145 + } + goto st30 + st142: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof142 + } + st_case_142: + switch ( m.data)[( m.p)] { + case 9: + goto tr89 + case 10: + goto tr29 + case 11: + goto tr90 + case 12: + goto tr1 + case 13: + goto st7 + case 32: + goto tr89 + case 34: + goto tr91 + case 44: + goto tr92 + case 76: goto st143 case 92: - goto st142 - case 97: - goto st146 + goto st141 } - goto st31 + goto st30 st143: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof143 @@ -20151,25 +20463,25 @@ tr249: case 9: goto tr89 case 10: - goto st7 + goto tr29 case 11: goto tr90 case 12: goto tr1 case 13: - goto st8 + goto st7 case 32: goto tr89 case 34: goto tr91 case 44: goto tr92 - case 76: + case 83: goto st144 case 92: - goto st142 + goto st141 } - goto st31 + goto st30 st144: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof144 @@ -20179,25 +20491,51 @@ tr249: case 9: goto tr89 case 10: - goto st7 + goto tr29 case 11: goto tr90 case 12: goto tr1 case 13: - goto st8 + goto st7 case 32: goto tr89 case 34: goto tr91 case 44: goto tr92 - case 83: - goto st145 + case 69: + goto st541 case 92: - goto st142 + goto st141 } - goto st31 + goto st30 + st541: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof541 + } + st_case_541: + switch ( m.data)[( m.p)] { + case 9: + goto tr834 + case 10: + goto tr810 + case 11: + goto tr835 + case 12: + goto tr836 + case 13: + goto tr812 + case 32: + goto tr834 + case 34: + goto tr91 + case 44: + goto tr837 + case 92: + goto st141 + } + goto st30 st145: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof145 @@ -20207,51 +20545,25 @@ tr249: case 9: goto tr89 case 10: - goto st7 + goto tr29 case 11: goto tr90 case 12: goto tr1 case 13: - goto st8 + goto st7 case 32: goto tr89 case 34: goto tr91 case 44: goto tr92 - case 69: - goto st530 case 92: - goto st142 + goto st141 + case 108: + goto st146 } - goto st31 - st530: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof530 - } - st_case_530: - switch ( m.data)[( m.p)] { - case 9: - goto tr812 - case 10: - goto tr790 - case 11: - goto tr813 - case 12: - goto tr814 - case 13: - goto tr792 - case 32: - goto tr812 - case 34: - goto tr91 - case 44: - goto tr815 - case 92: - goto st142 - } - goto st31 + goto st30 st146: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof146 @@ -20261,13 +20573,13 @@ tr249: case 9: goto tr89 case 10: - goto st7 + goto tr29 case 11: goto tr90 case 12: goto tr1 case 13: - goto st8 + goto st7 case 32: goto tr89 case 34: @@ -20275,11 +20587,11 @@ tr249: case 44: goto tr92 case 92: - goto st142 - case 108: + goto st141 + case 115: goto st147 } - goto st31 + goto st30 st147: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof147 @@ -20289,13 +20601,13 @@ tr249: case 9: goto tr89 case 10: - goto st7 + goto tr29 case 11: goto tr90 case 12: goto tr1 case 13: - goto st8 + goto st7 case 32: goto tr89 case 34: @@ -20303,11 +20615,48 @@ tr249: case 44: goto tr92 case 92: - goto st142 - case 115: - goto st148 + goto st141 + case 101: + goto st541 } - goto st31 + goto st30 +tr250: +//line plugins/parsers/influx/machine.go.rl:20 + + m.pb = m.p + + goto st542 + st542: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof542 + } + st_case_542: +//line plugins/parsers/influx/machine.go:20635 + switch ( m.data)[( m.p)] { + case 9: + goto tr834 + case 10: + goto tr810 + case 11: + goto tr835 + case 12: + goto tr836 + case 13: + goto tr812 + case 32: + goto tr834 + case 34: + goto tr91 + case 44: + goto tr837 + case 82: + goto st148 + case 92: + goto st141 + case 114: + goto st149 + } + goto st30 st148: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof148 @@ -20317,62 +20666,25 @@ tr249: case 9: goto tr89 case 10: - goto st7 + goto tr29 case 11: goto tr90 case 12: goto tr1 case 13: - goto st8 + goto st7 case 32: goto tr89 case 34: goto tr91 case 44: goto tr92 + case 85: + goto st144 case 92: - goto st142 - case 101: - goto st530 + goto st141 } - goto st31 -tr250: -//line plugins/parsers/influx/machine.go.rl:19 - - m.pb = m.p - - goto st531 - st531: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof531 - } - st_case_531: -//line plugins/parsers/influx/machine.go:20351 - switch ( m.data)[( m.p)] { - case 9: - goto tr812 - case 10: - goto tr790 - case 11: - goto tr813 - case 12: - goto tr814 - case 13: - goto tr792 - case 32: - goto tr812 - case 34: - goto tr91 - case 44: - goto tr815 - case 82: - goto st149 - case 92: - goto st142 - case 114: - goto st150 - } - goto st31 + goto st30 st149: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof149 @@ -20382,41 +20694,13 @@ tr250: case 9: goto tr89 case 10: - goto st7 + goto tr29 case 11: goto tr90 case 12: goto tr1 case 13: - goto st8 - case 32: - goto tr89 - case 34: - goto tr91 - case 44: - goto tr92 - case 85: - goto st145 - case 92: - goto st142 - } - goto st31 - st150: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof150 - } - st_case_150: - switch ( m.data)[( m.p)] { - case 9: - goto tr89 - case 10: goto st7 - case 11: - goto tr90 - case 12: - goto tr1 - case 13: - goto st8 case 32: goto tr89 case 34: @@ -20424,422 +20708,81 @@ tr250: case 44: goto tr92 case 92: - goto st142 + goto st141 case 117: - goto st148 + goto st147 } - goto st31 + goto st30 tr251: -//line plugins/parsers/influx/machine.go.rl:19 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p - goto st532 - st532: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof532 - } - st_case_532: -//line plugins/parsers/influx/machine.go:20444 - switch ( m.data)[( m.p)] { - case 9: - goto tr812 - case 10: - goto tr790 - case 11: - goto tr813 - case 12: - goto tr814 - case 13: - goto tr792 - case 32: - goto tr812 - case 34: - goto tr91 - case 44: - goto tr815 - case 92: - goto st142 - case 97: - goto st146 - } - goto st31 -tr252: -//line plugins/parsers/influx/machine.go.rl:19 - - m.pb = m.p - - goto st533 - st533: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof533 - } - st_case_533: -//line plugins/parsers/influx/machine.go:20479 - switch ( m.data)[( m.p)] { - case 9: - goto tr812 - case 10: - goto tr790 - case 11: - goto tr813 - case 12: - goto tr814 - case 13: - goto tr792 - case 32: - goto tr812 - case 34: - goto tr91 - case 44: - goto tr815 - case 92: - goto st142 - case 114: - goto st150 - } - goto st31 - st534: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof534 - } - st_case_534: - switch ( m.data)[( m.p)] { - case 9: - goto tr611 - case 10: - goto tr584 - case 11: - goto tr612 - case 12: - goto tr490 - case 13: - goto tr586 - case 32: - goto tr611 - case 34: - goto tr128 - case 44: - goto tr92 - case 61: - goto tr129 - case 92: - goto st94 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st535 - } - goto st42 - st535: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof535 - } - st_case_535: - switch ( m.data)[( m.p)] { - case 9: - goto tr611 - case 10: - goto tr584 - case 11: - goto tr612 - case 12: - goto tr490 - case 13: - goto tr586 - case 32: - goto tr611 - case 34: - goto tr128 - case 44: - goto tr92 - case 61: - goto tr129 - case 92: - goto st94 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st536 - } - goto st42 - st536: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof536 - } - st_case_536: - switch ( m.data)[( m.p)] { - case 9: - goto tr611 - case 10: - goto tr584 - case 11: - goto tr612 - case 12: - goto tr490 - case 13: - goto tr586 - case 32: - goto tr611 - case 34: - goto tr128 - case 44: - goto tr92 - case 61: - goto tr129 - case 92: - goto st94 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st537 - } - goto st42 - st537: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof537 - } - st_case_537: - switch ( m.data)[( m.p)] { - case 9: - goto tr611 - case 10: - goto tr584 - case 11: - goto tr612 - case 12: - goto tr490 - case 13: - goto tr586 - case 32: - goto tr611 - case 34: - goto tr128 - case 44: - goto tr92 - case 61: - goto tr129 - case 92: - goto st94 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st538 - } - goto st42 - st538: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof538 - } - st_case_538: - switch ( m.data)[( m.p)] { - case 9: - goto tr611 - case 10: - goto tr584 - case 11: - goto tr612 - case 12: - goto tr490 - case 13: - goto tr586 - case 32: - goto tr611 - case 34: - goto tr128 - case 44: - goto tr92 - case 61: - goto tr129 - case 92: - goto st94 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st539 - } - goto st42 - st539: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof539 - } - st_case_539: - switch ( m.data)[( m.p)] { - case 9: - goto tr611 - case 10: - goto tr584 - case 11: - goto tr612 - case 12: - goto tr490 - case 13: - goto tr586 - case 32: - goto tr611 - case 34: - goto tr128 - case 44: - goto tr92 - case 61: - goto tr129 - case 92: - goto st94 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st540 - } - goto st42 - st540: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof540 - } - st_case_540: - switch ( m.data)[( m.p)] { - case 9: - goto tr611 - case 10: - goto tr584 - case 11: - goto tr612 - case 12: - goto tr490 - case 13: - goto tr586 - case 32: - goto tr611 - case 34: - goto tr128 - case 44: - goto tr92 - case 61: - goto tr129 - case 92: - goto st94 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st541 - } - goto st42 - st541: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof541 - } - st_case_541: - switch ( m.data)[( m.p)] { - case 9: - goto tr611 - case 10: - goto tr584 - case 11: - goto tr612 - case 12: - goto tr490 - case 13: - goto tr586 - case 32: - goto tr611 - case 34: - goto tr128 - case 44: - goto tr92 - case 61: - goto tr129 - case 92: - goto st94 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st542 - } - goto st42 - st542: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof542 - } - st_case_542: - switch ( m.data)[( m.p)] { - case 9: - goto tr611 - case 10: - goto tr584 - case 11: - goto tr612 - case 12: - goto tr490 - case 13: - goto tr586 - case 32: - goto tr611 - case 34: - goto tr128 - case 44: - goto tr92 - case 61: - goto tr129 - case 92: - goto st94 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st543 - } - goto st42 + goto st543 st543: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof543 } st_case_543: +//line plugins/parsers/influx/machine.go:20728 switch ( m.data)[( m.p)] { case 9: - goto tr611 + goto tr834 case 10: - goto tr584 + goto tr810 case 11: - goto tr612 + goto tr835 case 12: - goto tr490 + goto tr836 case 13: - goto tr586 + goto tr812 case 32: - goto tr611 + goto tr834 case 34: - goto tr128 + goto tr91 case 44: - goto tr92 - case 61: - goto tr129 + goto tr837 case 92: - goto st94 + goto st141 + case 97: + goto st145 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st544 - } - goto st42 + goto st30 +tr252: +//line plugins/parsers/influx/machine.go.rl:20 + + m.pb = m.p + + goto st544 st544: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof544 } st_case_544: +//line plugins/parsers/influx/machine.go:20763 switch ( m.data)[( m.p)] { case 9: - goto tr611 + goto tr834 case 10: - goto tr584 + goto tr810 case 11: - goto tr612 + goto tr835 case 12: - goto tr490 + goto tr836 case 13: - goto tr586 + goto tr812 case 32: - goto tr611 + goto tr834 case 34: - goto tr128 + goto tr91 case 44: - goto tr92 - case 61: - goto tr129 + goto tr837 case 92: - goto st94 + goto st141 + case 114: + goto st149 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st545 - } - goto st42 + goto st30 st545: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof545 @@ -20847,17 +20790,17 @@ tr252: st_case_545: switch ( m.data)[( m.p)] { case 9: - goto tr611 + goto tr630 case 10: - goto tr584 + goto tr603 case 11: - goto tr612 + goto tr631 case 12: - goto tr490 + goto tr509 case 13: - goto tr586 + goto tr605 case 32: - goto tr611 + goto tr630 case 34: goto tr128 case 44: @@ -20865,12 +20808,12 @@ tr252: case 61: goto tr129 case 92: - goto st94 + goto st93 } if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st546 } - goto st42 + goto st41 st546: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof546 @@ -20878,17 +20821,17 @@ tr252: st_case_546: switch ( m.data)[( m.p)] { case 9: - goto tr611 + goto tr630 case 10: - goto tr584 + goto tr603 case 11: - goto tr612 + goto tr631 case 12: - goto tr490 + goto tr509 case 13: - goto tr586 + goto tr605 case 32: - goto tr611 + goto tr630 case 34: goto tr128 case 44: @@ -20896,12 +20839,12 @@ tr252: case 61: goto tr129 case 92: - goto st94 + goto st93 } if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st547 } - goto st42 + goto st41 st547: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof547 @@ -20909,17 +20852,17 @@ tr252: st_case_547: switch ( m.data)[( m.p)] { case 9: - goto tr611 + goto tr630 case 10: - goto tr584 + goto tr603 case 11: - goto tr612 + goto tr631 case 12: - goto tr490 + goto tr509 case 13: - goto tr586 + goto tr605 case 32: - goto tr611 + goto tr630 case 34: goto tr128 case 44: @@ -20927,12 +20870,12 @@ tr252: case 61: goto tr129 case 92: - goto st94 + goto st93 } if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st548 } - goto st42 + goto st41 st548: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof548 @@ -20940,17 +20883,17 @@ tr252: st_case_548: switch ( m.data)[( m.p)] { case 9: - goto tr611 + goto tr630 case 10: - goto tr584 + goto tr603 case 11: - goto tr612 + goto tr631 case 12: - goto tr490 + goto tr509 case 13: - goto tr586 + goto tr605 case 32: - goto tr611 + goto tr630 case 34: goto tr128 case 44: @@ -20958,12 +20901,12 @@ tr252: case 61: goto tr129 case 92: - goto st94 + goto st93 } if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st549 } - goto st42 + goto st41 st549: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof549 @@ -20971,17 +20914,17 @@ tr252: st_case_549: switch ( m.data)[( m.p)] { case 9: - goto tr611 + goto tr630 case 10: - goto tr584 + goto tr603 case 11: - goto tr612 + goto tr631 case 12: - goto tr490 + goto tr509 case 13: - goto tr586 + goto tr605 case 32: - goto tr611 + goto tr630 case 34: goto tr128 case 44: @@ -20989,12 +20932,12 @@ tr252: case 61: goto tr129 case 92: - goto st94 + goto st93 } if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st550 } - goto st42 + goto st41 st550: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof550 @@ -21002,17 +20945,17 @@ tr252: st_case_550: switch ( m.data)[( m.p)] { case 9: - goto tr611 + goto tr630 case 10: - goto tr584 + goto tr603 case 11: - goto tr612 + goto tr631 case 12: - goto tr490 + goto tr509 case 13: - goto tr586 + goto tr605 case 32: - goto tr611 + goto tr630 case 34: goto tr128 case 44: @@ -21020,12 +20963,12 @@ tr252: case 61: goto tr129 case 92: - goto st94 + goto st93 } if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st551 } - goto st42 + goto st41 st551: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof551 @@ -21033,17 +20976,17 @@ tr252: st_case_551: switch ( m.data)[( m.p)] { case 9: - goto tr611 + goto tr630 case 10: - goto tr584 + goto tr603 case 11: - goto tr612 + goto tr631 case 12: - goto tr490 + goto tr509 case 13: - goto tr586 + goto tr605 case 32: - goto tr611 + goto tr630 case 34: goto tr128 case 44: @@ -21051,11 +20994,392 @@ tr252: case 61: goto tr129 case 92: - goto st94 + goto st93 } - goto st42 + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st552 + } + goto st41 + st552: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof552 + } + st_case_552: + switch ( m.data)[( m.p)] { + case 9: + goto tr630 + case 10: + goto tr603 + case 11: + goto tr631 + case 12: + goto tr509 + case 13: + goto tr605 + case 32: + goto tr630 + case 34: + goto tr128 + case 44: + goto tr92 + case 61: + goto tr129 + case 92: + goto st93 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st553 + } + goto st41 + st553: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof553 + } + st_case_553: + switch ( m.data)[( m.p)] { + case 9: + goto tr630 + case 10: + goto tr603 + case 11: + goto tr631 + case 12: + goto tr509 + case 13: + goto tr605 + case 32: + goto tr630 + case 34: + goto tr128 + case 44: + goto tr92 + case 61: + goto tr129 + case 92: + goto st93 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st554 + } + goto st41 + st554: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof554 + } + st_case_554: + switch ( m.data)[( m.p)] { + case 9: + goto tr630 + case 10: + goto tr603 + case 11: + goto tr631 + case 12: + goto tr509 + case 13: + goto tr605 + case 32: + goto tr630 + case 34: + goto tr128 + case 44: + goto tr92 + case 61: + goto tr129 + case 92: + goto st93 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st555 + } + goto st41 + st555: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof555 + } + st_case_555: + switch ( m.data)[( m.p)] { + case 9: + goto tr630 + case 10: + goto tr603 + case 11: + goto tr631 + case 12: + goto tr509 + case 13: + goto tr605 + case 32: + goto tr630 + case 34: + goto tr128 + case 44: + goto tr92 + case 61: + goto tr129 + case 92: + goto st93 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st556 + } + goto st41 + st556: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof556 + } + st_case_556: + switch ( m.data)[( m.p)] { + case 9: + goto tr630 + case 10: + goto tr603 + case 11: + goto tr631 + case 12: + goto tr509 + case 13: + goto tr605 + case 32: + goto tr630 + case 34: + goto tr128 + case 44: + goto tr92 + case 61: + goto tr129 + case 92: + goto st93 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st557 + } + goto st41 + st557: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof557 + } + st_case_557: + switch ( m.data)[( m.p)] { + case 9: + goto tr630 + case 10: + goto tr603 + case 11: + goto tr631 + case 12: + goto tr509 + case 13: + goto tr605 + case 32: + goto tr630 + case 34: + goto tr128 + case 44: + goto tr92 + case 61: + goto tr129 + case 92: + goto st93 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st558 + } + goto st41 + st558: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof558 + } + st_case_558: + switch ( m.data)[( m.p)] { + case 9: + goto tr630 + case 10: + goto tr603 + case 11: + goto tr631 + case 12: + goto tr509 + case 13: + goto tr605 + case 32: + goto tr630 + case 34: + goto tr128 + case 44: + goto tr92 + case 61: + goto tr129 + case 92: + goto st93 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st559 + } + goto st41 + st559: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof559 + } + st_case_559: + switch ( m.data)[( m.p)] { + case 9: + goto tr630 + case 10: + goto tr603 + case 11: + goto tr631 + case 12: + goto tr509 + case 13: + goto tr605 + case 32: + goto tr630 + case 34: + goto tr128 + case 44: + goto tr92 + case 61: + goto tr129 + case 92: + goto st93 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st560 + } + goto st41 + st560: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof560 + } + st_case_560: + switch ( m.data)[( m.p)] { + case 9: + goto tr630 + case 10: + goto tr603 + case 11: + goto tr631 + case 12: + goto tr509 + case 13: + goto tr605 + case 32: + goto tr630 + case 34: + goto tr128 + case 44: + goto tr92 + case 61: + goto tr129 + case 92: + goto st93 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st561 + } + goto st41 + st561: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof561 + } + st_case_561: + switch ( m.data)[( m.p)] { + case 9: + goto tr630 + case 10: + goto tr603 + case 11: + goto tr631 + case 12: + goto tr509 + case 13: + goto tr605 + case 32: + goto tr630 + case 34: + goto tr128 + case 44: + goto tr92 + case 61: + goto tr129 + case 92: + goto st93 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st562 + } + goto st41 + st562: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof562 + } + st_case_562: + switch ( m.data)[( m.p)] { + case 9: + goto tr630 + case 10: + goto tr603 + case 11: + goto tr631 + case 12: + goto tr509 + case 13: + goto tr605 + case 32: + goto tr630 + case 34: + goto tr128 + case 44: + goto tr92 + case 61: + goto tr129 + case 92: + goto st93 + } + goto st41 tr213: -//line plugins/parsers/influx/machine.go.rl:19 +//line plugins/parsers/influx/machine.go.rl:20 + + m.pb = m.p + + goto st150 + st150: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof150 + } + st_case_150: +//line plugins/parsers/influx/machine.go:21353 + switch ( m.data)[( m.p)] { + case 9: + goto tr180 + case 10: + goto tr29 + case 11: + goto tr181 + case 12: + goto tr1 + case 13: + goto st7 + case 32: + goto tr180 + case 34: + goto tr91 + case 44: + goto tr182 + case 46: + goto st151 + case 48: + goto st587 + case 92: + goto st156 + } + if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st590 + } + goto st54 +tr214: +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p @@ -21065,37 +21389,243 @@ tr213: goto _test_eof151 } st_case_151: -//line plugins/parsers/influx/machine.go:21069 +//line plugins/parsers/influx/machine.go:21393 switch ( m.data)[( m.p)] { case 9: goto tr180 case 10: - goto st7 + goto tr29 case 11: goto tr181 case 12: goto tr1 case 13: - goto st8 + goto st7 case 32: goto tr180 case 34: goto tr91 case 44: goto tr182 - case 46: - goto st152 - case 48: - goto st576 case 92: - goto st157 + goto st156 } - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st579 + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st563 } - goto st55 -tr214: -//line plugins/parsers/influx/machine.go.rl:19 + goto st54 + st563: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof563 + } + st_case_563: + switch ( m.data)[( m.p)] { + case 9: + goto tr859 + case 10: + goto tr534 + case 11: + goto tr860 + case 12: + goto tr641 + case 13: + goto tr536 + case 32: + goto tr859 + case 34: + goto tr91 + case 44: + goto tr861 + case 69: + goto st154 + case 92: + goto st156 + case 101: + goto st154 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st563 + } + goto st54 +tr860: + ( m.cs) = 564 +//line plugins/parsers/influx/machine.go.rl:78 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:122 + + err = m.handler.AddFloat(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + + goto _again +tr892: + ( m.cs) = 564 +//line plugins/parsers/influx/machine.go.rl:78 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:104 + + err = m.handler.AddInt(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + + goto _again +tr896: + ( m.cs) = 564 +//line plugins/parsers/influx/machine.go.rl:78 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:113 + + err = m.handler.AddUint(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + + goto _again +tr901: + ( m.cs) = 564 +//line plugins/parsers/influx/machine.go.rl:78 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:131 + + err = m.handler.AddBool(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + + goto _again + st564: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof564 + } + st_case_564: +//line plugins/parsers/influx/machine.go:21548 + switch ( m.data)[( m.p)] { + case 9: + goto tr863 + case 10: + goto tr275 + case 11: + goto tr864 + case 12: + goto tr501 + case 13: + goto st103 + case 32: + goto tr863 + case 34: + goto tr124 + case 44: + goto tr182 + case 45: + goto tr865 + case 61: + goto st54 + case 92: + goto tr186 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto tr866 + } + goto tr184 +tr864: + ( m.cs) = 565 +//line plugins/parsers/influx/machine.go.rl:78 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:20 + + m.pb = m.p + + goto _again + st565: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof565 + } + st_case_565: +//line plugins/parsers/influx/machine.go:21599 + switch ( m.data)[( m.p)] { + case 9: + goto tr863 + case 10: + goto tr275 + case 11: + goto tr864 + case 12: + goto tr501 + case 13: + goto st103 + case 32: + goto tr863 + case 34: + goto tr124 + case 44: + goto tr182 + case 45: + goto tr865 + case 61: + goto tr189 + case 92: + goto tr186 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto tr866 + } + goto tr184 +tr865: +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p @@ -21105,264 +21635,18 @@ tr214: goto _test_eof152 } st_case_152: -//line plugins/parsers/influx/machine.go:21109 +//line plugins/parsers/influx/machine.go:21639 switch ( m.data)[( m.p)] { case 9: goto tr180 case 10: - goto st7 - case 11: - goto tr181 - case 12: - goto tr1 - case 13: - goto st8 - case 32: - goto tr180 - case 34: - goto tr91 - case 44: - goto tr182 - case 92: - goto st157 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st552 - } - goto st55 - st552: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof552 - } - st_case_552: - switch ( m.data)[( m.p)] { - case 9: - goto tr837 - case 10: - goto tr515 - case 11: - goto tr838 - case 12: - goto tr622 - case 13: - goto tr517 - case 32: - goto tr837 - case 34: - goto tr91 - case 44: - goto tr839 - case 69: - goto st155 - case 92: - goto st157 - case 101: - goto st155 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st552 - } - goto st55 -tr838: - ( m.cs) = 553 -//line plugins/parsers/influx/machine.go.rl:77 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:121 - - err = m.handler.AddFloat(key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - - goto _again -tr870: - ( m.cs) = 553 -//line plugins/parsers/influx/machine.go.rl:77 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:103 - - err = m.handler.AddInt(key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - - goto _again -tr874: - ( m.cs) = 553 -//line plugins/parsers/influx/machine.go.rl:77 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:112 - - err = m.handler.AddUint(key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - - goto _again -tr878: - ( m.cs) = 553 -//line plugins/parsers/influx/machine.go.rl:77 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:130 - - err = m.handler.AddBool(key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - - goto _again - st553: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof553 - } - st_case_553: -//line plugins/parsers/influx/machine.go:21264 - switch ( m.data)[( m.p)] { - case 9: - goto tr841 - case 10: - goto st317 - case 11: - goto tr842 - case 12: - goto tr482 - case 13: - goto st104 - case 32: - goto tr841 - case 34: - goto tr124 - case 44: - goto tr182 - case 45: - goto tr843 - case 61: - goto st55 - case 92: - goto tr186 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr844 - } - goto tr184 -tr842: - ( m.cs) = 554 -//line plugins/parsers/influx/machine.go.rl:77 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:19 - - m.pb = m.p - - goto _again - st554: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof554 - } - st_case_554: -//line plugins/parsers/influx/machine.go:21315 - switch ( m.data)[( m.p)] { - case 9: - goto tr841 - case 10: - goto st317 - case 11: - goto tr842 - case 12: - goto tr482 - case 13: - goto st104 - case 32: - goto tr841 - case 34: - goto tr124 - case 44: - goto tr182 - case 45: - goto tr843 - case 61: - goto tr189 - case 92: - goto tr186 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr844 - } - goto tr184 -tr843: -//line plugins/parsers/influx/machine.go.rl:19 - - m.pb = m.p - - goto st153 - st153: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof153 - } - st_case_153: -//line plugins/parsers/influx/machine.go:21355 - switch ( m.data)[( m.p)] { - case 9: - goto tr180 - case 10: - goto st7 + goto tr29 case 11: goto tr188 case 12: goto tr1 case 13: - goto st8 + goto st7 case 32: goto tr180 case 34: @@ -21372,37 +21656,37 @@ tr843: case 61: goto tr189 case 92: - goto st154 + goto st153 } if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st555 + goto st566 } - goto st57 -tr844: -//line plugins/parsers/influx/machine.go.rl:19 + goto st56 +tr866: +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p - goto st555 - st555: + goto st566 + st566: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof555 + goto _test_eof566 } - st_case_555: -//line plugins/parsers/influx/machine.go:21393 + st_case_566: +//line plugins/parsers/influx/machine.go:21677 switch ( m.data)[( m.p)] { case 9: - goto tr845 + goto tr867 case 10: - goto tr659 + goto tr678 case 11: - goto tr846 + goto tr868 case 12: - goto tr490 + goto tr509 case 13: - goto tr661 + goto tr680 case 32: - goto tr845 + goto tr867 case 34: goto tr128 case 44: @@ -21410,71 +21694,71 @@ tr844: case 61: goto tr189 case 92: - goto st154 + goto st153 } if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st557 + goto st568 } - goto st57 -tr849: - ( m.cs) = 556 -//line plugins/parsers/influx/machine.go.rl:77 + goto st56 +tr871: + ( m.cs) = 567 +//line plugins/parsers/influx/machine.go.rl:78 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:19 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p goto _again -tr846: - ( m.cs) = 556 -//line plugins/parsers/influx/machine.go.rl:77 +tr868: + ( m.cs) = 567 +//line plugins/parsers/influx/machine.go.rl:78 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:148 +//line plugins/parsers/influx/machine.go.rl:149 err = m.handler.SetTimestamp(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again - st556: + st567: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof556 + goto _test_eof567 } - st_case_556: -//line plugins/parsers/influx/machine.go:21465 + st_case_567: +//line plugins/parsers/influx/machine.go:21749 switch ( m.data)[( m.p)] { case 9: - goto tr848 + goto tr870 case 10: - goto st317 + goto tr275 case 11: - goto tr849 + goto tr871 case 12: - goto tr495 + goto tr514 case 13: - goto st104 + goto st103 case 32: - goto tr848 + goto tr870 case 34: goto tr124 case 44: @@ -21486,22 +21770,22 @@ tr846: } goto tr184 tr186: -//line plugins/parsers/influx/machine.go.rl:19 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p - goto st154 - st154: + goto st153 + st153: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof154 + goto _test_eof153 } - st_case_154: -//line plugins/parsers/influx/machine.go:21500 + st_case_153: +//line plugins/parsers/influx/machine.go:21784 switch ( m.data)[( m.p)] { case 34: - goto st57 + goto st56 case 92: - goto st57 + goto st56 } switch { case ( m.data)[( m.p)] > 10: @@ -21511,348 +21795,7 @@ tr186: case ( m.data)[( m.p)] >= 9: goto tr8 } - goto st12 - st557: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof557 - } - st_case_557: - switch ( m.data)[( m.p)] { - case 9: - goto tr845 - case 10: - goto tr659 - case 11: - goto tr846 - case 12: - goto tr490 - case 13: - goto tr661 - case 32: - goto tr845 - case 34: - goto tr128 - case 44: - goto tr182 - case 61: - goto tr189 - case 92: - goto st154 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st558 - } - goto st57 - st558: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof558 - } - st_case_558: - switch ( m.data)[( m.p)] { - case 9: - goto tr845 - case 10: - goto tr659 - case 11: - goto tr846 - case 12: - goto tr490 - case 13: - goto tr661 - case 32: - goto tr845 - case 34: - goto tr128 - case 44: - goto tr182 - case 61: - goto tr189 - case 92: - goto st154 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st559 - } - goto st57 - st559: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof559 - } - st_case_559: - switch ( m.data)[( m.p)] { - case 9: - goto tr845 - case 10: - goto tr659 - case 11: - goto tr846 - case 12: - goto tr490 - case 13: - goto tr661 - case 32: - goto tr845 - case 34: - goto tr128 - case 44: - goto tr182 - case 61: - goto tr189 - case 92: - goto st154 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st560 - } - goto st57 - st560: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof560 - } - st_case_560: - switch ( m.data)[( m.p)] { - case 9: - goto tr845 - case 10: - goto tr659 - case 11: - goto tr846 - case 12: - goto tr490 - case 13: - goto tr661 - case 32: - goto tr845 - case 34: - goto tr128 - case 44: - goto tr182 - case 61: - goto tr189 - case 92: - goto st154 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st561 - } - goto st57 - st561: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof561 - } - st_case_561: - switch ( m.data)[( m.p)] { - case 9: - goto tr845 - case 10: - goto tr659 - case 11: - goto tr846 - case 12: - goto tr490 - case 13: - goto tr661 - case 32: - goto tr845 - case 34: - goto tr128 - case 44: - goto tr182 - case 61: - goto tr189 - case 92: - goto st154 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st562 - } - goto st57 - st562: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof562 - } - st_case_562: - switch ( m.data)[( m.p)] { - case 9: - goto tr845 - case 10: - goto tr659 - case 11: - goto tr846 - case 12: - goto tr490 - case 13: - goto tr661 - case 32: - goto tr845 - case 34: - goto tr128 - case 44: - goto tr182 - case 61: - goto tr189 - case 92: - goto st154 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st563 - } - goto st57 - st563: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof563 - } - st_case_563: - switch ( m.data)[( m.p)] { - case 9: - goto tr845 - case 10: - goto tr659 - case 11: - goto tr846 - case 12: - goto tr490 - case 13: - goto tr661 - case 32: - goto tr845 - case 34: - goto tr128 - case 44: - goto tr182 - case 61: - goto tr189 - case 92: - goto st154 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st564 - } - goto st57 - st564: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof564 - } - st_case_564: - switch ( m.data)[( m.p)] { - case 9: - goto tr845 - case 10: - goto tr659 - case 11: - goto tr846 - case 12: - goto tr490 - case 13: - goto tr661 - case 32: - goto tr845 - case 34: - goto tr128 - case 44: - goto tr182 - case 61: - goto tr189 - case 92: - goto st154 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st565 - } - goto st57 - st565: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof565 - } - st_case_565: - switch ( m.data)[( m.p)] { - case 9: - goto tr845 - case 10: - goto tr659 - case 11: - goto tr846 - case 12: - goto tr490 - case 13: - goto tr661 - case 32: - goto tr845 - case 34: - goto tr128 - case 44: - goto tr182 - case 61: - goto tr189 - case 92: - goto st154 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st566 - } - goto st57 - st566: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof566 - } - st_case_566: - switch ( m.data)[( m.p)] { - case 9: - goto tr845 - case 10: - goto tr659 - case 11: - goto tr846 - case 12: - goto tr490 - case 13: - goto tr661 - case 32: - goto tr845 - case 34: - goto tr128 - case 44: - goto tr182 - case 61: - goto tr189 - case 92: - goto st154 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st567 - } - goto st57 - st567: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof567 - } - st_case_567: - switch ( m.data)[( m.p)] { - case 9: - goto tr845 - case 10: - goto tr659 - case 11: - goto tr846 - case 12: - goto tr490 - case 13: - goto tr661 - case 32: - goto tr845 - case 34: - goto tr128 - case 44: - goto tr182 - case 61: - goto tr189 - case 92: - goto st154 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st568 - } - goto st57 + goto st11 st568: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof568 @@ -21860,17 +21803,17 @@ tr186: st_case_568: switch ( m.data)[( m.p)] { case 9: - goto tr845 + goto tr867 case 10: - goto tr659 + goto tr678 case 11: - goto tr846 + goto tr868 case 12: - goto tr490 + goto tr509 case 13: - goto tr661 + goto tr680 case 32: - goto tr845 + goto tr867 case 34: goto tr128 case 44: @@ -21878,12 +21821,12 @@ tr186: case 61: goto tr189 case 92: - goto st154 + goto st153 } if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st569 } - goto st57 + goto st56 st569: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof569 @@ -21891,17 +21834,17 @@ tr186: st_case_569: switch ( m.data)[( m.p)] { case 9: - goto tr845 + goto tr867 case 10: - goto tr659 + goto tr678 case 11: - goto tr846 + goto tr868 case 12: - goto tr490 + goto tr509 case 13: - goto tr661 + goto tr680 case 32: - goto tr845 + goto tr867 case 34: goto tr128 case 44: @@ -21909,12 +21852,12 @@ tr186: case 61: goto tr189 case 92: - goto st154 + goto st153 } if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st570 } - goto st57 + goto st56 st570: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof570 @@ -21922,17 +21865,17 @@ tr186: st_case_570: switch ( m.data)[( m.p)] { case 9: - goto tr845 + goto tr867 case 10: - goto tr659 + goto tr678 case 11: - goto tr846 + goto tr868 case 12: - goto tr490 + goto tr509 case 13: - goto tr661 + goto tr680 case 32: - goto tr845 + goto tr867 case 34: goto tr128 case 44: @@ -21940,12 +21883,12 @@ tr186: case 61: goto tr189 case 92: - goto st154 + goto st153 } if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st571 } - goto st57 + goto st56 st571: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof571 @@ -21953,17 +21896,17 @@ tr186: st_case_571: switch ( m.data)[( m.p)] { case 9: - goto tr845 + goto tr867 case 10: - goto tr659 + goto tr678 case 11: - goto tr846 + goto tr868 case 12: - goto tr490 + goto tr509 case 13: - goto tr661 + goto tr680 case 32: - goto tr845 + goto tr867 case 34: goto tr128 case 44: @@ -21971,12 +21914,12 @@ tr186: case 61: goto tr189 case 92: - goto st154 + goto st153 } if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st572 } - goto st57 + goto st56 st572: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof572 @@ -21984,17 +21927,17 @@ tr186: st_case_572: switch ( m.data)[( m.p)] { case 9: - goto tr845 + goto tr867 case 10: - goto tr659 + goto tr678 case 11: - goto tr846 + goto tr868 case 12: - goto tr490 + goto tr509 case 13: - goto tr661 + goto tr680 case 32: - goto tr845 + goto tr867 case 34: goto tr128 case 44: @@ -22002,12 +21945,12 @@ tr186: case 61: goto tr189 case 92: - goto st154 + goto st153 } if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st573 } - goto st57 + goto st56 st573: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof573 @@ -22015,17 +21958,17 @@ tr186: st_case_573: switch ( m.data)[( m.p)] { case 9: - goto tr845 + goto tr867 case 10: - goto tr659 + goto tr678 case 11: - goto tr846 + goto tr868 case 12: - goto tr490 + goto tr509 case 13: - goto tr661 + goto tr680 case 32: - goto tr845 + goto tr867 case 34: goto tr128 case 44: @@ -22033,12 +21976,12 @@ tr186: case 61: goto tr189 case 92: - goto st154 + goto st153 } if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st574 } - goto st57 + goto st56 st574: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof574 @@ -22046,17 +21989,17 @@ tr186: st_case_574: switch ( m.data)[( m.p)] { case 9: - goto tr845 + goto tr867 case 10: - goto tr659 + goto tr678 case 11: - goto tr846 + goto tr868 case 12: - goto tr490 + goto tr509 case 13: - goto tr661 + goto tr680 case 32: - goto tr845 + goto tr867 case 34: goto tr128 case 44: @@ -22064,9 +22007,384 @@ tr186: case 61: goto tr189 case 92: - goto st154 + goto st153 } - goto st57 + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st575 + } + goto st56 + st575: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof575 + } + st_case_575: + switch ( m.data)[( m.p)] { + case 9: + goto tr867 + case 10: + goto tr678 + case 11: + goto tr868 + case 12: + goto tr509 + case 13: + goto tr680 + case 32: + goto tr867 + case 34: + goto tr128 + case 44: + goto tr182 + case 61: + goto tr189 + case 92: + goto st153 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st576 + } + goto st56 + st576: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof576 + } + st_case_576: + switch ( m.data)[( m.p)] { + case 9: + goto tr867 + case 10: + goto tr678 + case 11: + goto tr868 + case 12: + goto tr509 + case 13: + goto tr680 + case 32: + goto tr867 + case 34: + goto tr128 + case 44: + goto tr182 + case 61: + goto tr189 + case 92: + goto st153 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st577 + } + goto st56 + st577: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof577 + } + st_case_577: + switch ( m.data)[( m.p)] { + case 9: + goto tr867 + case 10: + goto tr678 + case 11: + goto tr868 + case 12: + goto tr509 + case 13: + goto tr680 + case 32: + goto tr867 + case 34: + goto tr128 + case 44: + goto tr182 + case 61: + goto tr189 + case 92: + goto st153 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st578 + } + goto st56 + st578: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof578 + } + st_case_578: + switch ( m.data)[( m.p)] { + case 9: + goto tr867 + case 10: + goto tr678 + case 11: + goto tr868 + case 12: + goto tr509 + case 13: + goto tr680 + case 32: + goto tr867 + case 34: + goto tr128 + case 44: + goto tr182 + case 61: + goto tr189 + case 92: + goto st153 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st579 + } + goto st56 + st579: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof579 + } + st_case_579: + switch ( m.data)[( m.p)] { + case 9: + goto tr867 + case 10: + goto tr678 + case 11: + goto tr868 + case 12: + goto tr509 + case 13: + goto tr680 + case 32: + goto tr867 + case 34: + goto tr128 + case 44: + goto tr182 + case 61: + goto tr189 + case 92: + goto st153 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st580 + } + goto st56 + st580: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof580 + } + st_case_580: + switch ( m.data)[( m.p)] { + case 9: + goto tr867 + case 10: + goto tr678 + case 11: + goto tr868 + case 12: + goto tr509 + case 13: + goto tr680 + case 32: + goto tr867 + case 34: + goto tr128 + case 44: + goto tr182 + case 61: + goto tr189 + case 92: + goto st153 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st581 + } + goto st56 + st581: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof581 + } + st_case_581: + switch ( m.data)[( m.p)] { + case 9: + goto tr867 + case 10: + goto tr678 + case 11: + goto tr868 + case 12: + goto tr509 + case 13: + goto tr680 + case 32: + goto tr867 + case 34: + goto tr128 + case 44: + goto tr182 + case 61: + goto tr189 + case 92: + goto st153 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st582 + } + goto st56 + st582: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof582 + } + st_case_582: + switch ( m.data)[( m.p)] { + case 9: + goto tr867 + case 10: + goto tr678 + case 11: + goto tr868 + case 12: + goto tr509 + case 13: + goto tr680 + case 32: + goto tr867 + case 34: + goto tr128 + case 44: + goto tr182 + case 61: + goto tr189 + case 92: + goto st153 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st583 + } + goto st56 + st583: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof583 + } + st_case_583: + switch ( m.data)[( m.p)] { + case 9: + goto tr867 + case 10: + goto tr678 + case 11: + goto tr868 + case 12: + goto tr509 + case 13: + goto tr680 + case 32: + goto tr867 + case 34: + goto tr128 + case 44: + goto tr182 + case 61: + goto tr189 + case 92: + goto st153 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st584 + } + goto st56 + st584: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof584 + } + st_case_584: + switch ( m.data)[( m.p)] { + case 9: + goto tr867 + case 10: + goto tr678 + case 11: + goto tr868 + case 12: + goto tr509 + case 13: + goto tr680 + case 32: + goto tr867 + case 34: + goto tr128 + case 44: + goto tr182 + case 61: + goto tr189 + case 92: + goto st153 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st585 + } + goto st56 + st585: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof585 + } + st_case_585: + switch ( m.data)[( m.p)] { + case 9: + goto tr867 + case 10: + goto tr678 + case 11: + goto tr868 + case 12: + goto tr509 + case 13: + goto tr680 + case 32: + goto tr867 + case 34: + goto tr128 + case 44: + goto tr182 + case 61: + goto tr189 + case 92: + goto st153 + } + goto st56 + st154: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof154 + } + st_case_154: + switch ( m.data)[( m.p)] { + case 9: + goto tr180 + case 10: + goto tr29 + case 11: + goto tr181 + case 12: + goto tr1 + case 13: + goto st7 + case 32: + goto tr180 + case 34: + goto tr317 + case 44: + goto tr182 + case 92: + goto st156 + } + switch { + case ( m.data)[( m.p)] > 45: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st586 + } + case ( m.data)[( m.p)] >= 43: + goto st155 + } + goto st54 st155: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof155 @@ -22076,106 +22394,72 @@ tr186: case 9: goto tr180 case 10: - goto st7 + goto tr29 case 11: goto tr181 case 12: goto tr1 case 13: - goto st8 + goto st7 case 32: goto tr180 case 34: - goto tr317 + goto tr91 case 44: goto tr182 case 92: - goto st157 - } - switch { - case ( m.data)[( m.p)] > 45: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st575 - } - case ( m.data)[( m.p)] >= 43: goto st156 } - goto st55 + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st586 + } + goto st54 + st586: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof586 + } + st_case_586: + switch ( m.data)[( m.p)] { + case 9: + goto tr859 + case 10: + goto tr534 + case 11: + goto tr860 + case 12: + goto tr641 + case 13: + goto tr536 + case 32: + goto tr859 + case 34: + goto tr91 + case 44: + goto tr861 + case 92: + goto st156 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st586 + } + goto st54 +tr340: +//line plugins/parsers/influx/machine.go.rl:20 + + m.pb = m.p + + goto st156 st156: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof156 } st_case_156: - switch ( m.data)[( m.p)] { - case 9: - goto tr180 - case 10: - goto st7 - case 11: - goto tr181 - case 12: - goto tr1 - case 13: - goto st8 - case 32: - goto tr180 - case 34: - goto tr91 - case 44: - goto tr182 - case 92: - goto st157 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st575 - } - goto st55 - st575: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof575 - } - st_case_575: - switch ( m.data)[( m.p)] { - case 9: - goto tr837 - case 10: - goto tr515 - case 11: - goto tr838 - case 12: - goto tr622 - case 13: - goto tr517 - case 32: - goto tr837 - case 34: - goto tr91 - case 44: - goto tr839 - case 92: - goto st157 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st575 - } - goto st55 -tr340: -//line plugins/parsers/influx/machine.go.rl:19 - - m.pb = m.p - - goto st157 - st157: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof157 - } - st_case_157: -//line plugins/parsers/influx/machine.go:22174 +//line plugins/parsers/influx/machine.go:22458 switch ( m.data)[( m.p)] { case 34: - goto st55 + goto st54 case 92: - goto st55 + goto st54 } switch { case ( m.data)[( m.p)] > 10: @@ -22186,296 +22470,324 @@ tr340: goto tr8 } goto st1 - st576: + st587: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof576 + goto _test_eof587 } - st_case_576: + st_case_587: switch ( m.data)[( m.p)] { case 9: - goto tr837 + goto tr859 case 10: - goto tr515 + goto tr534 case 11: - goto tr838 + goto tr860 case 12: - goto tr622 + goto tr641 case 13: - goto tr517 + goto tr536 case 32: - goto tr837 + goto tr859 case 34: goto tr91 case 44: - goto tr839 + goto tr861 case 46: - goto st552 + goto st563 case 69: - goto st155 + goto st154 case 92: - goto st157 + goto st156 case 101: - goto st155 + goto st154 case 105: - goto st578 + goto st589 } if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st577 + goto st588 } - goto st55 - st577: + goto st54 + st588: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof577 + goto _test_eof588 } - st_case_577: + st_case_588: switch ( m.data)[( m.p)] { case 9: - goto tr837 + goto tr859 case 10: - goto tr515 + goto tr534 case 11: - goto tr838 + goto tr860 case 12: - goto tr622 + goto tr641 case 13: - goto tr517 + goto tr536 case 32: - goto tr837 + goto tr859 case 34: goto tr91 case 44: - goto tr839 + goto tr861 case 46: - goto st552 + goto st563 case 69: - goto st155 + goto st154 case 92: - goto st157 + goto st156 case 101: - goto st155 + goto st154 } if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st577 + goto st588 } - goto st55 - st578: + goto st54 + st589: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof578 + goto _test_eof589 } - st_case_578: + st_case_589: switch ( m.data)[( m.p)] { case 9: - goto tr869 + goto tr891 case 10: - goto tr722 + goto tr741 case 11: - goto tr870 + goto tr892 case 12: - goto tr804 + goto tr825 case 13: - goto tr725 + goto tr744 case 32: - goto tr869 + goto tr891 case 34: goto tr91 case 44: - goto tr871 + goto tr893 case 92: - goto st157 + goto st156 } - goto st55 - st579: + goto st54 + st590: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof579 + goto _test_eof590 } - st_case_579: + st_case_590: switch ( m.data)[( m.p)] { case 9: - goto tr837 + goto tr859 case 10: - goto tr515 + goto tr534 case 11: - goto tr838 + goto tr860 case 12: - goto tr622 + goto tr641 case 13: - goto tr517 + goto tr536 case 32: - goto tr837 + goto tr859 case 34: goto tr91 case 44: - goto tr839 + goto tr861 case 46: - goto st552 + goto st563 case 69: - goto st155 + goto st154 case 92: - goto st157 + goto st156 case 101: - goto st155 + goto st154 case 105: - goto st578 + goto st589 } if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st579 + goto st590 } - goto st55 + goto st54 tr215: -//line plugins/parsers/influx/machine.go.rl:19 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p - goto st580 - st580: + goto st591 + st591: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof580 + goto _test_eof591 } - st_case_580: -//line plugins/parsers/influx/machine.go:22336 + st_case_591: +//line plugins/parsers/influx/machine.go:22620 switch ( m.data)[( m.p)] { case 9: - goto tr837 + goto tr859 case 10: - goto tr515 + goto tr534 case 11: - goto tr838 + goto tr860 case 12: - goto tr622 + goto tr641 case 13: - goto tr517 + goto tr536 case 32: - goto tr837 + goto tr859 case 34: goto tr91 case 44: - goto tr839 + goto tr861 case 46: - goto st552 + goto st563 case 69: - goto st155 + goto st154 case 92: - goto st157 + goto st156 case 101: - goto st155 + goto st154 case 105: - goto st578 + goto st589 case 117: - goto st581 + goto st592 } if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st577 + goto st588 } - goto st55 - st581: + goto st54 + st592: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof581 + goto _test_eof592 } - st_case_581: + st_case_592: switch ( m.data)[( m.p)] { case 9: - goto tr873 + goto tr895 case 10: - goto tr729 + goto tr748 case 11: - goto tr874 + goto tr896 case 12: - goto tr809 + goto tr831 case 13: - goto tr732 + goto tr751 case 32: - goto tr873 + goto tr895 case 34: goto tr91 case 44: - goto tr875 + goto tr897 case 92: - goto st157 + goto st156 } - goto st55 + goto st54 tr216: -//line plugins/parsers/influx/machine.go.rl:19 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p - goto st582 - st582: + goto st593 + st593: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof582 + goto _test_eof593 } - st_case_582: -//line plugins/parsers/influx/machine.go:22408 + st_case_593: +//line plugins/parsers/influx/machine.go:22692 switch ( m.data)[( m.p)] { case 9: - goto tr837 + goto tr859 case 10: - goto tr515 + goto tr534 case 11: - goto tr838 + goto tr860 case 12: - goto tr622 + goto tr641 case 13: - goto tr517 + goto tr536 case 32: - goto tr837 + goto tr859 case 34: goto tr91 case 44: - goto tr839 + goto tr861 case 46: - goto st552 + goto st563 case 69: - goto st155 + goto st154 case 92: - goto st157 + goto st156 case 101: - goto st155 + goto st154 case 105: - goto st578 + goto st589 case 117: - goto st581 + goto st592 } if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st582 + goto st593 } - goto st55 + goto st54 tr217: -//line plugins/parsers/influx/machine.go.rl:19 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p - goto st583 - st583: + goto st594 + st594: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof583 + goto _test_eof594 } - st_case_583: -//line plugins/parsers/influx/machine.go:22454 + st_case_594: +//line plugins/parsers/influx/machine.go:22738 switch ( m.data)[( m.p)] { case 9: - goto tr877 + goto tr899 case 10: - goto tr736 + goto tr900 case 11: - goto tr878 + goto tr901 case 12: - goto tr814 + goto tr836 case 13: - goto tr739 + goto tr758 case 32: - goto tr877 + goto tr899 case 34: goto tr91 case 44: - goto tr879 + goto tr902 case 65: + goto st157 + case 92: + goto st156 + case 97: + goto st160 + } + goto st54 + st157: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof157 + } + st_case_157: + switch ( m.data)[( m.p)] { + case 9: + goto tr180 + case 10: + goto tr29 + case 11: + goto tr181 + case 12: + goto tr1 + case 13: + goto st7 + case 32: + goto tr180 + case 34: + goto tr91 + case 44: + goto tr182 + case 76: goto st158 case 92: - goto st157 - case 97: - goto st161 + goto st156 } - goto st55 + goto st54 st158: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof158 @@ -22485,25 +22797,25 @@ tr217: case 9: goto tr180 case 10: - goto st7 + goto tr29 case 11: goto tr181 case 12: goto tr1 case 13: - goto st8 + goto st7 case 32: goto tr180 case 34: goto tr91 case 44: goto tr182 - case 76: + case 83: goto st159 case 92: - goto st157 + goto st156 } - goto st55 + goto st54 st159: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof159 @@ -22513,25 +22825,51 @@ tr217: case 9: goto tr180 case 10: - goto st7 + goto tr29 case 11: goto tr181 case 12: goto tr1 case 13: - goto st8 + goto st7 case 32: goto tr180 case 34: goto tr91 case 44: goto tr182 - case 83: - goto st160 + case 69: + goto st595 case 92: - goto st157 + goto st156 } - goto st55 + goto st54 + st595: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof595 + } + st_case_595: + switch ( m.data)[( m.p)] { + case 9: + goto tr899 + case 10: + goto tr900 + case 11: + goto tr901 + case 12: + goto tr836 + case 13: + goto tr758 + case 32: + goto tr899 + case 34: + goto tr91 + case 44: + goto tr902 + case 92: + goto st156 + } + goto st54 st160: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof160 @@ -22541,51 +22879,25 @@ tr217: case 9: goto tr180 case 10: - goto st7 + goto tr29 case 11: goto tr181 case 12: goto tr1 case 13: - goto st8 + goto st7 case 32: goto tr180 case 34: goto tr91 case 44: goto tr182 - case 69: - goto st584 case 92: - goto st157 + goto st156 + case 108: + goto st161 } - goto st55 - st584: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof584 - } - st_case_584: - switch ( m.data)[( m.p)] { - case 9: - goto tr877 - case 10: - goto tr736 - case 11: - goto tr878 - case 12: - goto tr814 - case 13: - goto tr739 - case 32: - goto tr877 - case 34: - goto tr91 - case 44: - goto tr879 - case 92: - goto st157 - } - goto st55 + goto st54 st161: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof161 @@ -22595,13 +22907,13 @@ tr217: case 9: goto tr180 case 10: - goto st7 + goto tr29 case 11: goto tr181 case 12: goto tr1 case 13: - goto st8 + goto st7 case 32: goto tr180 case 34: @@ -22609,11 +22921,11 @@ tr217: case 44: goto tr182 case 92: - goto st157 - case 108: + goto st156 + case 115: goto st162 } - goto st55 + goto st54 st162: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof162 @@ -22623,13 +22935,13 @@ tr217: case 9: goto tr180 case 10: - goto st7 + goto tr29 case 11: goto tr181 case 12: goto tr1 case 13: - goto st8 + goto st7 case 32: goto tr180 case 34: @@ -22637,11 +22949,48 @@ tr217: case 44: goto tr182 case 92: - goto st157 - case 115: - goto st163 + goto st156 + case 101: + goto st595 } - goto st55 + goto st54 +tr218: +//line plugins/parsers/influx/machine.go.rl:20 + + m.pb = m.p + + goto st596 + st596: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof596 + } + st_case_596: +//line plugins/parsers/influx/machine.go:22969 + switch ( m.data)[( m.p)] { + case 9: + goto tr899 + case 10: + goto tr900 + case 11: + goto tr901 + case 12: + goto tr836 + case 13: + goto tr758 + case 32: + goto tr899 + case 34: + goto tr91 + case 44: + goto tr902 + case 82: + goto st163 + case 92: + goto st156 + case 114: + goto st164 + } + goto st54 st163: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof163 @@ -22651,62 +23000,25 @@ tr217: case 9: goto tr180 case 10: - goto st7 + goto tr29 case 11: goto tr181 case 12: goto tr1 case 13: - goto st8 + goto st7 case 32: goto tr180 case 34: goto tr91 case 44: goto tr182 + case 85: + goto st159 case 92: - goto st157 - case 101: - goto st584 + goto st156 } - goto st55 -tr218: -//line plugins/parsers/influx/machine.go.rl:19 - - m.pb = m.p - - goto st585 - st585: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof585 - } - st_case_585: -//line plugins/parsers/influx/machine.go:22685 - switch ( m.data)[( m.p)] { - case 9: - goto tr877 - case 10: - goto tr736 - case 11: - goto tr878 - case 12: - goto tr814 - case 13: - goto tr739 - case 32: - goto tr877 - case 34: - goto tr91 - case 44: - goto tr879 - case 82: - goto st164 - case 92: - goto st157 - case 114: - goto st165 - } - goto st55 + goto st54 st164: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof164 @@ -22716,25 +23028,95 @@ tr218: case 9: goto tr180 case 10: - goto st7 + goto tr29 case 11: goto tr181 case 12: goto tr1 case 13: - goto st8 + goto st7 case 32: goto tr180 case 34: goto tr91 case 44: goto tr182 - case 85: - goto st160 case 92: - goto st157 + goto st156 + case 117: + goto st162 } - goto st55 + goto st54 +tr219: +//line plugins/parsers/influx/machine.go.rl:20 + + m.pb = m.p + + goto st597 + st597: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof597 + } + st_case_597: +//line plugins/parsers/influx/machine.go:23062 + switch ( m.data)[( m.p)] { + case 9: + goto tr899 + case 10: + goto tr900 + case 11: + goto tr901 + case 12: + goto tr836 + case 13: + goto tr758 + case 32: + goto tr899 + case 34: + goto tr91 + case 44: + goto tr902 + case 92: + goto st156 + case 97: + goto st160 + } + goto st54 +tr220: +//line plugins/parsers/influx/machine.go.rl:20 + + m.pb = m.p + + goto st598 + st598: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof598 + } + st_case_598: +//line plugins/parsers/influx/machine.go:23097 + switch ( m.data)[( m.p)] { + case 9: + goto tr899 + case 10: + goto tr900 + case 11: + goto tr901 + case 12: + goto tr836 + case 13: + goto tr758 + case 32: + goto tr899 + case 34: + goto tr91 + case 44: + goto tr902 + case 92: + goto st156 + case 114: + goto st164 + } + goto st54 st165: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof165 @@ -22742,115 +23124,17 @@ tr218: st_case_165: switch ( m.data)[( m.p)] { case 9: - goto tr180 - case 10: - goto st7 - case 11: - goto tr181 - case 12: - goto tr1 - case 13: - goto st8 - case 32: - goto tr180 - case 34: - goto tr91 - case 44: - goto tr182 - case 92: - goto st157 - case 117: - goto st163 - } - goto st55 -tr219: -//line plugins/parsers/influx/machine.go.rl:19 - - m.pb = m.p - - goto st586 - st586: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof586 - } - st_case_586: -//line plugins/parsers/influx/machine.go:22778 - switch ( m.data)[( m.p)] { - case 9: - goto tr877 - case 10: - goto tr736 - case 11: - goto tr878 - case 12: - goto tr814 - case 13: - goto tr739 - case 32: - goto tr877 - case 34: - goto tr91 - case 44: - goto tr879 - case 92: - goto st157 - case 97: - goto st161 - } - goto st55 -tr220: -//line plugins/parsers/influx/machine.go.rl:19 - - m.pb = m.p - - goto st587 - st587: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof587 - } - st_case_587: -//line plugins/parsers/influx/machine.go:22813 - switch ( m.data)[( m.p)] { - case 9: - goto tr877 - case 10: - goto tr736 - case 11: - goto tr878 - case 12: - goto tr814 - case 13: - goto tr739 - case 32: - goto tr877 - case 34: - goto tr91 - case 44: - goto tr879 - case 92: - goto st157 - case 114: goto st165 - } - goto st55 - st166: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof166 - } - st_case_166: - switch ( m.data)[( m.p)] { - case 9: - goto st166 case 10: - goto st7 + goto tr29 case 11: goto tr339 case 12: - goto st9 - case 13: goto st8 + case 13: + goto st7 case 32: - goto st166 + goto st165 case 34: goto tr118 case 35: @@ -22862,34 +23146,34 @@ tr220: } goto tr337 tr339: -//line plugins/parsers/influx/machine.go.rl:19 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p - goto st167 - st167: + goto st166 + st166: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof167 + goto _test_eof166 } - st_case_167: -//line plugins/parsers/influx/machine.go:22876 + st_case_166: +//line plugins/parsers/influx/machine.go:23160 switch ( m.data)[( m.p)] { case 9: goto tr341 case 10: - goto st7 + goto tr29 case 11: goto tr342 case 12: goto tr38 case 13: - goto st8 + goto st7 case 32: goto tr341 case 34: goto tr85 case 35: - goto st55 + goto st54 case 44: goto tr182 case 92: @@ -22897,37 +23181,37 @@ tr339: } goto tr337 tr341: - ( m.cs) = 168 -//line plugins/parsers/influx/machine.go.rl:77 + ( m.cs) = 167 +//line plugins/parsers/influx/machine.go.rl:78 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again - st168: + st167: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof168 + goto _test_eof167 } - st_case_168: -//line plugins/parsers/influx/machine.go:22918 + st_case_167: +//line plugins/parsers/influx/machine.go:23202 switch ( m.data)[( m.p)] { case 9: - goto st168 + goto st167 case 10: - goto st7 + goto tr29 case 11: goto tr344 case 12: - goto st11 + goto st10 case 13: - goto st8 + goto st7 case 32: - goto st168 + goto st167 case 34: goto tr124 case 35: @@ -22941,45 +23225,45 @@ tr341: } goto tr184 tr344: -//line plugins/parsers/influx/machine.go.rl:19 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p - goto st169 + goto st168 tr345: - ( m.cs) = 169 -//line plugins/parsers/influx/machine.go.rl:19 + ( m.cs) = 168 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p -//line plugins/parsers/influx/machine.go.rl:77 +//line plugins/parsers/influx/machine.go.rl:78 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again - st169: + st168: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof169 + goto _test_eof168 } - st_case_169: -//line plugins/parsers/influx/machine.go:22972 + st_case_168: +//line plugins/parsers/influx/machine.go:23256 switch ( m.data)[( m.p)] { case 9: goto tr341 case 10: - goto st7 + goto tr29 case 11: goto tr345 case 12: goto tr38 case 13: - goto st8 + goto st7 case 32: goto tr341 case 34: @@ -22993,39 +23277,39 @@ tr345: } goto tr184 tr342: - ( m.cs) = 170 -//line plugins/parsers/influx/machine.go.rl:19 + ( m.cs) = 169 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p -//line plugins/parsers/influx/machine.go.rl:77 +//line plugins/parsers/influx/machine.go.rl:78 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again - st170: + st169: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof170 + goto _test_eof169 } - st_case_170: -//line plugins/parsers/influx/machine.go:23018 + st_case_169: +//line plugins/parsers/influx/machine.go:23302 switch ( m.data)[( m.p)] { case 9: goto tr341 case 10: - goto st7 + goto tr29 case 11: goto tr345 case 12: goto tr38 case 13: - goto st8 + goto st7 case 32: goto tr341 case 34: @@ -23038,367 +23322,59 @@ tr342: goto tr186 } goto tr184 -tr522: -//line plugins/parsers/influx/machine.go.rl:19 +tr541: +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p - goto st171 - st171: + goto st170 + st170: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof171 + goto _test_eof170 } - st_case_171: -//line plugins/parsers/influx/machine.go:23053 + st_case_170: +//line plugins/parsers/influx/machine.go:23337 switch ( m.data)[( m.p)] { case 10: - goto st7 + goto tr29 case 12: goto tr105 case 13: - goto st8 + goto st7 case 34: goto tr31 case 92: - goto st76 + goto st75 } if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st588 + goto st599 } goto st6 -tr523: -//line plugins/parsers/influx/machine.go.rl:19 +tr542: +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p - goto st588 - st588: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof588 - } - st_case_588: -//line plugins/parsers/influx/machine.go:23081 - switch ( m.data)[( m.p)] { - case 10: - goto tr659 - case 12: - goto tr450 - case 13: - goto tr661 - case 32: - goto tr658 - case 34: - goto tr31 - case 92: - goto st76 - } - switch { - case ( m.data)[( m.p)] > 11: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st589 - } - case ( m.data)[( m.p)] >= 9: - goto tr658 - } - goto st6 - st589: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof589 - } - st_case_589: - switch ( m.data)[( m.p)] { - case 10: - goto tr659 - case 12: - goto tr450 - case 13: - goto tr661 - case 32: - goto tr658 - case 34: - goto tr31 - case 92: - goto st76 - } - switch { - case ( m.data)[( m.p)] > 11: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st590 - } - case ( m.data)[( m.p)] >= 9: - goto tr658 - } - goto st6 - st590: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof590 - } - st_case_590: - switch ( m.data)[( m.p)] { - case 10: - goto tr659 - case 12: - goto tr450 - case 13: - goto tr661 - case 32: - goto tr658 - case 34: - goto tr31 - case 92: - goto st76 - } - switch { - case ( m.data)[( m.p)] > 11: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st591 - } - case ( m.data)[( m.p)] >= 9: - goto tr658 - } - goto st6 - st591: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof591 - } - st_case_591: - switch ( m.data)[( m.p)] { - case 10: - goto tr659 - case 12: - goto tr450 - case 13: - goto tr661 - case 32: - goto tr658 - case 34: - goto tr31 - case 92: - goto st76 - } - switch { - case ( m.data)[( m.p)] > 11: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st592 - } - case ( m.data)[( m.p)] >= 9: - goto tr658 - } - goto st6 - st592: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof592 - } - st_case_592: - switch ( m.data)[( m.p)] { - case 10: - goto tr659 - case 12: - goto tr450 - case 13: - goto tr661 - case 32: - goto tr658 - case 34: - goto tr31 - case 92: - goto st76 - } - switch { - case ( m.data)[( m.p)] > 11: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st593 - } - case ( m.data)[( m.p)] >= 9: - goto tr658 - } - goto st6 - st593: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof593 - } - st_case_593: - switch ( m.data)[( m.p)] { - case 10: - goto tr659 - case 12: - goto tr450 - case 13: - goto tr661 - case 32: - goto tr658 - case 34: - goto tr31 - case 92: - goto st76 - } - switch { - case ( m.data)[( m.p)] > 11: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st594 - } - case ( m.data)[( m.p)] >= 9: - goto tr658 - } - goto st6 - st594: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof594 - } - st_case_594: - switch ( m.data)[( m.p)] { - case 10: - goto tr659 - case 12: - goto tr450 - case 13: - goto tr661 - case 32: - goto tr658 - case 34: - goto tr31 - case 92: - goto st76 - } - switch { - case ( m.data)[( m.p)] > 11: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st595 - } - case ( m.data)[( m.p)] >= 9: - goto tr658 - } - goto st6 - st595: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof595 - } - st_case_595: - switch ( m.data)[( m.p)] { - case 10: - goto tr659 - case 12: - goto tr450 - case 13: - goto tr661 - case 32: - goto tr658 - case 34: - goto tr31 - case 92: - goto st76 - } - switch { - case ( m.data)[( m.p)] > 11: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st596 - } - case ( m.data)[( m.p)] >= 9: - goto tr658 - } - goto st6 - st596: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof596 - } - st_case_596: - switch ( m.data)[( m.p)] { - case 10: - goto tr659 - case 12: - goto tr450 - case 13: - goto tr661 - case 32: - goto tr658 - case 34: - goto tr31 - case 92: - goto st76 - } - switch { - case ( m.data)[( m.p)] > 11: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st597 - } - case ( m.data)[( m.p)] >= 9: - goto tr658 - } - goto st6 - st597: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof597 - } - st_case_597: - switch ( m.data)[( m.p)] { - case 10: - goto tr659 - case 12: - goto tr450 - case 13: - goto tr661 - case 32: - goto tr658 - case 34: - goto tr31 - case 92: - goto st76 - } - switch { - case ( m.data)[( m.p)] > 11: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st598 - } - case ( m.data)[( m.p)] >= 9: - goto tr658 - } - goto st6 - st598: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof598 - } - st_case_598: - switch ( m.data)[( m.p)] { - case 10: - goto tr659 - case 12: - goto tr450 - case 13: - goto tr661 - case 32: - goto tr658 - case 34: - goto tr31 - case 92: - goto st76 - } - switch { - case ( m.data)[( m.p)] > 11: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st599 - } - case ( m.data)[( m.p)] >= 9: - goto tr658 - } - goto st6 + goto st599 st599: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof599 } st_case_599: +//line plugins/parsers/influx/machine.go:23365 switch ( m.data)[( m.p)] { case 10: - goto tr659 + goto tr678 case 12: - goto tr450 + goto tr469 case 13: - goto tr661 + goto tr680 case 32: - goto tr658 + goto tr677 case 34: goto tr31 case 92: - goto st76 + goto st75 } switch { case ( m.data)[( m.p)] > 11: @@ -23406,7 +23382,7 @@ tr523: goto st600 } case ( m.data)[( m.p)] >= 9: - goto tr658 + goto tr677 } goto st6 st600: @@ -23416,17 +23392,17 @@ tr523: st_case_600: switch ( m.data)[( m.p)] { case 10: - goto tr659 + goto tr678 case 12: - goto tr450 + goto tr469 case 13: - goto tr661 + goto tr680 case 32: - goto tr658 + goto tr677 case 34: goto tr31 case 92: - goto st76 + goto st75 } switch { case ( m.data)[( m.p)] > 11: @@ -23434,7 +23410,7 @@ tr523: goto st601 } case ( m.data)[( m.p)] >= 9: - goto tr658 + goto tr677 } goto st6 st601: @@ -23444,17 +23420,17 @@ tr523: st_case_601: switch ( m.data)[( m.p)] { case 10: - goto tr659 + goto tr678 case 12: - goto tr450 + goto tr469 case 13: - goto tr661 + goto tr680 case 32: - goto tr658 + goto tr677 case 34: goto tr31 case 92: - goto st76 + goto st75 } switch { case ( m.data)[( m.p)] > 11: @@ -23462,7 +23438,7 @@ tr523: goto st602 } case ( m.data)[( m.p)] >= 9: - goto tr658 + goto tr677 } goto st6 st602: @@ -23472,17 +23448,17 @@ tr523: st_case_602: switch ( m.data)[( m.p)] { case 10: - goto tr659 + goto tr678 case 12: - goto tr450 + goto tr469 case 13: - goto tr661 + goto tr680 case 32: - goto tr658 + goto tr677 case 34: goto tr31 case 92: - goto st76 + goto st75 } switch { case ( m.data)[( m.p)] > 11: @@ -23490,7 +23466,7 @@ tr523: goto st603 } case ( m.data)[( m.p)] >= 9: - goto tr658 + goto tr677 } goto st6 st603: @@ -23500,17 +23476,17 @@ tr523: st_case_603: switch ( m.data)[( m.p)] { case 10: - goto tr659 + goto tr678 case 12: - goto tr450 + goto tr469 case 13: - goto tr661 + goto tr680 case 32: - goto tr658 + goto tr677 case 34: goto tr31 case 92: - goto st76 + goto st75 } switch { case ( m.data)[( m.p)] > 11: @@ -23518,7 +23494,7 @@ tr523: goto st604 } case ( m.data)[( m.p)] >= 9: - goto tr658 + goto tr677 } goto st6 st604: @@ -23528,17 +23504,17 @@ tr523: st_case_604: switch ( m.data)[( m.p)] { case 10: - goto tr659 + goto tr678 case 12: - goto tr450 + goto tr469 case 13: - goto tr661 + goto tr680 case 32: - goto tr658 + goto tr677 case 34: goto tr31 case 92: - goto st76 + goto st75 } switch { case ( m.data)[( m.p)] > 11: @@ -23546,7 +23522,7 @@ tr523: goto st605 } case ( m.data)[( m.p)] >= 9: - goto tr658 + goto tr677 } goto st6 st605: @@ -23556,17 +23532,17 @@ tr523: st_case_605: switch ( m.data)[( m.p)] { case 10: - goto tr659 + goto tr678 case 12: - goto tr450 + goto tr469 case 13: - goto tr661 + goto tr680 case 32: - goto tr658 + goto tr677 case 34: goto tr31 case 92: - goto st76 + goto st75 } switch { case ( m.data)[( m.p)] > 11: @@ -23574,7 +23550,7 @@ tr523: goto st606 } case ( m.data)[( m.p)] >= 9: - goto tr658 + goto tr677 } goto st6 st606: @@ -23584,95 +23560,403 @@ tr523: st_case_606: switch ( m.data)[( m.p)] { case 10: - goto tr659 + goto tr678 case 12: - goto tr450 + goto tr469 case 13: - goto tr661 + goto tr680 case 32: - goto tr658 + goto tr677 case 34: goto tr31 case 92: - goto st76 + goto st75 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { - goto tr658 + switch { + case ( m.data)[( m.p)] > 11: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st607 + } + case ( m.data)[( m.p)] >= 9: + goto tr677 } goto st6 -tr903: -//line plugins/parsers/influx/machine.go.rl:19 + st607: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof607 + } + st_case_607: + switch ( m.data)[( m.p)] { + case 10: + goto tr678 + case 12: + goto tr469 + case 13: + goto tr680 + case 32: + goto tr677 + case 34: + goto tr31 + case 92: + goto st75 + } + switch { + case ( m.data)[( m.p)] > 11: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st608 + } + case ( m.data)[( m.p)] >= 9: + goto tr677 + } + goto st6 + st608: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof608 + } + st_case_608: + switch ( m.data)[( m.p)] { + case 10: + goto tr678 + case 12: + goto tr469 + case 13: + goto tr680 + case 32: + goto tr677 + case 34: + goto tr31 + case 92: + goto st75 + } + switch { + case ( m.data)[( m.p)] > 11: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st609 + } + case ( m.data)[( m.p)] >= 9: + goto tr677 + } + goto st6 + st609: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof609 + } + st_case_609: + switch ( m.data)[( m.p)] { + case 10: + goto tr678 + case 12: + goto tr469 + case 13: + goto tr680 + case 32: + goto tr677 + case 34: + goto tr31 + case 92: + goto st75 + } + switch { + case ( m.data)[( m.p)] > 11: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st610 + } + case ( m.data)[( m.p)] >= 9: + goto tr677 + } + goto st6 + st610: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof610 + } + st_case_610: + switch ( m.data)[( m.p)] { + case 10: + goto tr678 + case 12: + goto tr469 + case 13: + goto tr680 + case 32: + goto tr677 + case 34: + goto tr31 + case 92: + goto st75 + } + switch { + case ( m.data)[( m.p)] > 11: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st611 + } + case ( m.data)[( m.p)] >= 9: + goto tr677 + } + goto st6 + st611: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof611 + } + st_case_611: + switch ( m.data)[( m.p)] { + case 10: + goto tr678 + case 12: + goto tr469 + case 13: + goto tr680 + case 32: + goto tr677 + case 34: + goto tr31 + case 92: + goto st75 + } + switch { + case ( m.data)[( m.p)] > 11: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st612 + } + case ( m.data)[( m.p)] >= 9: + goto tr677 + } + goto st6 + st612: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof612 + } + st_case_612: + switch ( m.data)[( m.p)] { + case 10: + goto tr678 + case 12: + goto tr469 + case 13: + goto tr680 + case 32: + goto tr677 + case 34: + goto tr31 + case 92: + goto st75 + } + switch { + case ( m.data)[( m.p)] > 11: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st613 + } + case ( m.data)[( m.p)] >= 9: + goto tr677 + } + goto st6 + st613: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof613 + } + st_case_613: + switch ( m.data)[( m.p)] { + case 10: + goto tr678 + case 12: + goto tr469 + case 13: + goto tr680 + case 32: + goto tr677 + case 34: + goto tr31 + case 92: + goto st75 + } + switch { + case ( m.data)[( m.p)] > 11: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st614 + } + case ( m.data)[( m.p)] >= 9: + goto tr677 + } + goto st6 + st614: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof614 + } + st_case_614: + switch ( m.data)[( m.p)] { + case 10: + goto tr678 + case 12: + goto tr469 + case 13: + goto tr680 + case 32: + goto tr677 + case 34: + goto tr31 + case 92: + goto st75 + } + switch { + case ( m.data)[( m.p)] > 11: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st615 + } + case ( m.data)[( m.p)] >= 9: + goto tr677 + } + goto st6 + st615: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof615 + } + st_case_615: + switch ( m.data)[( m.p)] { + case 10: + goto tr678 + case 12: + goto tr469 + case 13: + goto tr680 + case 32: + goto tr677 + case 34: + goto tr31 + case 92: + goto st75 + } + switch { + case ( m.data)[( m.p)] > 11: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st616 + } + case ( m.data)[( m.p)] >= 9: + goto tr677 + } + goto st6 + st616: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof616 + } + st_case_616: + switch ( m.data)[( m.p)] { + case 10: + goto tr678 + case 12: + goto tr469 + case 13: + goto tr680 + case 32: + goto tr677 + case 34: + goto tr31 + case 92: + goto st75 + } + switch { + case ( m.data)[( m.p)] > 11: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st617 + } + case ( m.data)[( m.p)] >= 9: + goto tr677 + } + goto st6 + st617: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof617 + } + st_case_617: + switch ( m.data)[( m.p)] { + case 10: + goto tr678 + case 12: + goto tr469 + case 13: + goto tr680 + case 32: + goto tr677 + case 34: + goto tr31 + case 92: + goto st75 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { + goto tr677 + } + goto st6 +tr926: +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p - goto st172 -tr518: - ( m.cs) = 172 -//line plugins/parsers/influx/machine.go.rl:121 + goto st171 +tr537: + ( m.cs) = 171 +//line plugins/parsers/influx/machine.go.rl:122 - err = m.handler.AddFloat(key, m.text()) + err = m.handler.AddFloat(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again -tr910: - ( m.cs) = 172 -//line plugins/parsers/influx/machine.go.rl:103 +tr933: + ( m.cs) = 171 +//line plugins/parsers/influx/machine.go.rl:104 - err = m.handler.AddInt(key, m.text()) + err = m.handler.AddInt(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again -tr913: - ( m.cs) = 172 -//line plugins/parsers/influx/machine.go.rl:112 +tr936: + ( m.cs) = 171 +//line plugins/parsers/influx/machine.go.rl:113 - err = m.handler.AddUint(key, m.text()) + err = m.handler.AddUint(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again -tr917: - ( m.cs) = 172 -//line plugins/parsers/influx/machine.go.rl:130 +tr940: + ( m.cs) = 171 +//line plugins/parsers/influx/machine.go.rl:131 - err = m.handler.AddBool(key, m.text()) + err = m.handler.AddBool(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again - st172: + st171: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof172 + goto _test_eof171 } - st_case_172: -//line plugins/parsers/influx/machine.go:23667 + st_case_171: +//line plugins/parsers/influx/machine.go:23951 switch ( m.data)[( m.p)] { case 9: goto st6 case 10: - goto st7 + goto tr29 case 12: goto tr8 case 13: - goto st8 + goto st7 case 32: goto st6 case 34: @@ -23686,26 +23970,26 @@ tr917: } goto tr348 tr348: -//line plugins/parsers/influx/machine.go.rl:19 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p - goto st173 - st173: + goto st172 + st172: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof173 + goto _test_eof172 } - st_case_173: -//line plugins/parsers/influx/machine.go:23700 + st_case_172: +//line plugins/parsers/influx/machine.go:23984 switch ( m.data)[( m.p)] { case 9: goto st6 case 10: - goto st7 + goto tr29 case 12: goto tr8 case 13: - goto st8 + goto st7 case 32: goto st6 case 34: @@ -23715,28 +23999,28 @@ tr348: case 61: goto tr351 case 92: - goto st185 + goto st184 } - goto st173 + goto st172 tr351: -//line plugins/parsers/influx/machine.go.rl:99 +//line plugins/parsers/influx/machine.go.rl:100 - key = m.text() + m.key = m.text() - goto st174 - st174: + goto st173 + st173: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof174 + goto _test_eof173 } - st_case_174: -//line plugins/parsers/influx/machine.go:23733 + st_case_173: +//line plugins/parsers/influx/machine.go:24017 switch ( m.data)[( m.p)] { case 10: - goto st7 + goto tr29 case 12: goto tr8 case 13: - goto st8 + goto st7 case 34: goto tr353 case 45: @@ -23746,142 +24030,229 @@ tr351: case 48: goto tr169 case 70: - goto tr171 + goto tr354 case 84: - goto tr172 + goto tr355 case 92: - goto st76 + goto st75 case 102: - goto tr173 + goto tr356 case 116: - goto tr174 + goto tr357 } if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto tr170 } goto st6 tr353: - ( m.cs) = 607 -//line plugins/parsers/influx/machine.go.rl:139 + ( m.cs) = 618 +//line plugins/parsers/influx/machine.go.rl:140 - err = m.handler.AddString(key, m.text()) + err = m.handler.AddString(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again - st607: + st618: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof607 + goto _test_eof618 } - st_case_607: -//line plugins/parsers/influx/machine.go:23782 + st_case_618: +//line plugins/parsers/influx/machine.go:24066 switch ( m.data)[( m.p)] { case 10: - goto tr650 + goto tr669 case 12: - goto st261 + goto st272 case 13: - goto tr652 + goto tr671 case 32: - goto tr902 + goto tr925 case 34: goto tr26 case 44: - goto tr903 + goto tr926 case 92: goto tr27 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { - goto tr902 + goto tr925 } goto tr23 tr169: -//line plugins/parsers/influx/machine.go.rl:19 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p - goto st608 - st608: + goto st619 + st619: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof608 + goto _test_eof619 } - st_case_608: -//line plugins/parsers/influx/machine.go:23814 + st_case_619: +//line plugins/parsers/influx/machine.go:24098 switch ( m.data)[( m.p)] { case 10: - goto tr515 + goto tr534 case 12: - goto tr516 + goto tr535 case 13: - goto tr517 + goto tr536 case 32: - goto tr514 + goto tr533 case 34: goto tr31 case 44: - goto tr518 + goto tr537 case 46: - goto st315 + goto st326 case 69: - goto st175 + goto st174 case 92: - goto st76 + goto st75 case 101: - goto st175 + goto st174 case 105: - goto st613 + goto st624 case 117: - goto st614 + goto st625 } switch { case ( m.data)[( m.p)] > 11: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st609 + goto st620 } case ( m.data)[( m.p)] >= 9: - goto tr514 + goto tr533 } goto st6 - st609: + st620: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof609 + goto _test_eof620 } - st_case_609: + st_case_620: switch ( m.data)[( m.p)] { case 10: - goto tr515 + goto tr534 case 12: - goto tr516 + goto tr535 case 13: - goto tr517 + goto tr536 case 32: - goto tr514 + goto tr533 case 34: goto tr31 case 44: - goto tr518 + goto tr537 case 46: - goto st315 + goto st326 case 69: - goto st175 + goto st174 case 92: - goto st76 + goto st75 case 101: - goto st175 + goto st174 } switch { case ( m.data)[( m.p)] > 11: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st609 + goto st620 } case ( m.data)[( m.p)] >= 9: - goto tr514 + goto tr533 } goto st6 + st174: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof174 + } + st_case_174: + switch ( m.data)[( m.p)] { + case 10: + goto tr29 + case 12: + goto tr8 + case 13: + goto st7 + case 34: + goto tr358 + case 43: + goto st175 + case 45: + goto st175 + case 92: + goto st75 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st623 + } + goto st6 +tr358: + ( m.cs) = 621 +//line plugins/parsers/influx/machine.go.rl:140 + + err = m.handler.AddString(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + + goto _again + st621: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof621 + } + st_case_621: +//line plugins/parsers/influx/machine.go:24213 + switch ( m.data)[( m.p)] { + case 10: + goto tr103 + case 13: + goto st33 + case 32: + goto st272 + case 44: + goto st36 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st622 + } + case ( m.data)[( m.p)] >= 9: + goto st272 + } + goto tr105 + st622: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof622 + } + st_case_622: + switch ( m.data)[( m.p)] { + case 10: + goto tr734 + case 13: + goto tr736 + case 32: + goto tr535 + case 44: + goto tr930 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st622 + } + case ( m.data)[( m.p)] >= 9: + goto tr535 + } + goto tr105 st175: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof175 @@ -23889,86 +24260,183 @@ tr169: st_case_175: switch ( m.data)[( m.p)] { case 10: - goto st7 + goto tr29 case 12: goto tr8 case 13: - goto st8 + goto st7 case 34: - goto tr354 - case 43: - goto st176 - case 45: - goto st176 + goto tr31 case 92: - goto st76 + goto st75 } if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st612 + goto st623 + } + goto st6 + st623: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof623 + } + st_case_623: + switch ( m.data)[( m.p)] { + case 10: + goto tr534 + case 12: + goto tr535 + case 13: + goto tr536 + case 32: + goto tr533 + case 34: + goto tr31 + case 44: + goto tr537 + case 92: + goto st75 + } + switch { + case ( m.data)[( m.p)] > 11: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st623 + } + case ( m.data)[( m.p)] >= 9: + goto tr533 + } + goto st6 + st624: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof624 + } + st_case_624: + switch ( m.data)[( m.p)] { + case 10: + goto tr741 + case 12: + goto tr932 + case 13: + goto tr744 + case 32: + goto tr931 + case 34: + goto tr31 + case 44: + goto tr933 + case 92: + goto st75 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { + goto tr931 + } + goto st6 + st625: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof625 + } + st_case_625: + switch ( m.data)[( m.p)] { + case 10: + goto tr748 + case 12: + goto tr935 + case 13: + goto tr751 + case 32: + goto tr934 + case 34: + goto tr31 + case 44: + goto tr936 + case 92: + goto st75 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { + goto tr934 + } + goto st6 +tr170: +//line plugins/parsers/influx/machine.go.rl:20 + + m.pb = m.p + + goto st626 + st626: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof626 + } + st_case_626: +//line plugins/parsers/influx/machine.go:24369 + switch ( m.data)[( m.p)] { + case 10: + goto tr534 + case 12: + goto tr535 + case 13: + goto tr536 + case 32: + goto tr533 + case 34: + goto tr31 + case 44: + goto tr537 + case 46: + goto st326 + case 69: + goto st174 + case 92: + goto st75 + case 101: + goto st174 + case 105: + goto st624 + case 117: + goto st625 + } + switch { + case ( m.data)[( m.p)] > 11: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st626 + } + case ( m.data)[( m.p)] >= 9: + goto tr533 } goto st6 tr354: - ( m.cs) = 610 -//line plugins/parsers/influx/machine.go.rl:139 +//line plugins/parsers/influx/machine.go.rl:20 - err = m.handler.AddString(key, m.text()) - if err != nil { - ( m.p)-- + m.pb = m.p - ( m.cs) = 247; - {( m.p)++; goto _out } - } - - goto _again - st610: + goto st627 + st627: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof610 + goto _test_eof627 } - st_case_610: -//line plugins/parsers/influx/machine.go:23929 + st_case_627: +//line plugins/parsers/influx/machine.go:24416 switch ( m.data)[( m.p)] { case 10: - goto st262 + goto tr755 + case 12: + goto tr939 case 13: - goto st34 + goto tr758 case 32: - goto st261 + goto tr938 + case 34: + goto tr31 case 44: - goto st37 + goto tr940 + case 65: + goto st176 + case 92: + goto st75 + case 97: + goto st179 } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st611 - } - case ( m.data)[( m.p)] >= 9: - goto st261 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { + goto tr938 } - goto tr105 - st611: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof611 - } - st_case_611: - switch ( m.data)[( m.p)] { - case 10: - goto tr715 - case 13: - goto tr717 - case 32: - goto tr516 - case 44: - goto tr907 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st611 - } - case ( m.data)[( m.p)] >= 9: - goto tr516 - } - goto tr105 + goto st6 st176: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof176 @@ -23976,181 +24444,17 @@ tr354: st_case_176: switch ( m.data)[( m.p)] { case 10: - goto st7 + goto tr29 case 12: goto tr8 case 13: - goto st8 + goto st7 case 34: goto tr31 - case 92: - goto st76 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st612 - } - goto st6 - st612: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof612 - } - st_case_612: - switch ( m.data)[( m.p)] { - case 10: - goto tr515 - case 12: - goto tr516 - case 13: - goto tr517 - case 32: - goto tr514 - case 34: - goto tr31 - case 44: - goto tr518 - case 92: - goto st76 - } - switch { - case ( m.data)[( m.p)] > 11: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st612 - } - case ( m.data)[( m.p)] >= 9: - goto tr514 - } - goto st6 - st613: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof613 - } - st_case_613: - switch ( m.data)[( m.p)] { - case 10: - goto tr722 - case 12: - goto tr909 - case 13: - goto tr725 - case 32: - goto tr908 - case 34: - goto tr31 - case 44: - goto tr910 - case 92: - goto st76 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { - goto tr908 - } - goto st6 - st614: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof614 - } - st_case_614: - switch ( m.data)[( m.p)] { - case 10: - goto tr729 - case 12: - goto tr912 - case 13: - goto tr732 - case 32: - goto tr911 - case 34: - goto tr31 - case 44: - goto tr913 - case 92: - goto st76 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { - goto tr911 - } - goto st6 -tr170: -//line plugins/parsers/influx/machine.go.rl:19 - - m.pb = m.p - - goto st615 - st615: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof615 - } - st_case_615: -//line plugins/parsers/influx/machine.go:24085 - switch ( m.data)[( m.p)] { - case 10: - goto tr515 - case 12: - goto tr516 - case 13: - goto tr517 - case 32: - goto tr514 - case 34: - goto tr31 - case 44: - goto tr518 - case 46: - goto st315 - case 69: - goto st175 - case 92: - goto st76 - case 101: - goto st175 - case 105: - goto st613 - case 117: - goto st614 - } - switch { - case ( m.data)[( m.p)] > 11: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st615 - } - case ( m.data)[( m.p)] >= 9: - goto tr514 - } - goto st6 -tr171: -//line plugins/parsers/influx/machine.go.rl:19 - - m.pb = m.p - - goto st616 - st616: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof616 - } - st_case_616: -//line plugins/parsers/influx/machine.go:24132 - switch ( m.data)[( m.p)] { - case 10: - goto tr736 - case 12: - goto tr916 - case 13: - goto tr739 - case 32: - goto tr915 - case 34: - goto tr31 - case 44: - goto tr917 - case 65: + case 76: goto st177 case 92: - goto st76 - case 97: - goto st180 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { - goto tr915 + goto st75 } goto st6 st177: @@ -24160,17 +24464,17 @@ tr171: st_case_177: switch ( m.data)[( m.p)] { case 10: - goto st7 + goto tr29 case 12: goto tr8 case 13: - goto st8 + goto st7 case 34: goto tr31 - case 76: + case 83: goto st178 case 92: - goto st76 + goto st75 } goto st6 st178: @@ -24180,17 +24484,42 @@ tr171: st_case_178: switch ( m.data)[( m.p)] { case 10: - goto st7 + goto tr29 case 12: goto tr8 case 13: - goto st8 + goto st7 case 34: goto tr31 - case 83: - goto st179 + case 69: + goto st628 case 92: - goto st76 + goto st75 + } + goto st6 + st628: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof628 + } + st_case_628: + switch ( m.data)[( m.p)] { + case 10: + goto tr755 + case 12: + goto tr939 + case 13: + goto tr758 + case 32: + goto tr938 + case 34: + goto tr31 + case 44: + goto tr940 + case 92: + goto st75 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { + goto tr938 } goto st6 st179: @@ -24200,42 +24529,17 @@ tr171: st_case_179: switch ( m.data)[( m.p)] { case 10: - goto st7 + goto tr29 case 12: goto tr8 case 13: - goto st8 + goto st7 case 34: goto tr31 - case 69: - goto st617 case 92: - goto st76 - } - goto st6 - st617: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof617 - } - st_case_617: - switch ( m.data)[( m.p)] { - case 10: - goto tr736 - case 12: - goto tr916 - case 13: - goto tr739 - case 32: - goto tr915 - case 34: - goto tr31 - case 44: - goto tr917 - case 92: - goto st76 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { - goto tr915 + goto st75 + case 108: + goto st180 } goto st6 st180: @@ -24245,16 +24549,16 @@ tr171: st_case_180: switch ( m.data)[( m.p)] { case 10: - goto st7 + goto tr29 case 12: goto tr8 case 13: - goto st8 + goto st7 case 34: goto tr31 case 92: - goto st76 - case 108: + goto st75 + case 115: goto st181 } goto st6 @@ -24265,17 +24569,53 @@ tr171: st_case_181: switch ( m.data)[( m.p)] { case 10: - goto st7 + goto tr29 case 12: goto tr8 case 13: - goto st8 + goto st7 case 34: goto tr31 case 92: - goto st76 - case 115: + goto st75 + case 101: + goto st628 + } + goto st6 +tr355: +//line plugins/parsers/influx/machine.go.rl:20 + + m.pb = m.p + + goto st629 + st629: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof629 + } + st_case_629: +//line plugins/parsers/influx/machine.go:24597 + switch ( m.data)[( m.p)] { + case 10: + goto tr755 + case 12: + goto tr939 + case 13: + goto tr758 + case 32: + goto tr938 + case 34: + goto tr31 + case 44: + goto tr940 + case 82: goto st182 + case 92: + goto st75 + case 114: + goto st183 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { + goto tr938 } goto st6 st182: @@ -24285,53 +24625,17 @@ tr171: st_case_182: switch ( m.data)[( m.p)] { case 10: - goto st7 + goto tr29 case 12: goto tr8 case 13: - goto st8 + goto st7 case 34: goto tr31 + case 85: + goto st178 case 92: - goto st76 - case 101: - goto st617 - } - goto st6 -tr172: -//line plugins/parsers/influx/machine.go.rl:19 - - m.pb = m.p - - goto st618 - st618: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof618 - } - st_case_618: -//line plugins/parsers/influx/machine.go:24313 - switch ( m.data)[( m.p)] { - case 10: - goto tr736 - case 12: - goto tr916 - case 13: - goto tr739 - case 32: - goto tr915 - case 34: - goto tr31 - case 44: - goto tr917 - case 82: - goto st183 - case 92: - goto st76 - case 114: - goto st184 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { - goto tr915 + goto st75 } goto st6 st183: @@ -24341,124 +24645,104 @@ tr172: st_case_183: switch ( m.data)[( m.p)] { case 10: - goto st7 + goto tr29 case 12: goto tr8 case 13: - goto st8 + goto st7 case 34: goto tr31 - case 85: - goto st179 case 92: - goto st76 + goto st75 + case 117: + goto st181 } goto st6 +tr356: +//line plugins/parsers/influx/machine.go.rl:20 + + m.pb = m.p + + goto st630 + st630: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof630 + } + st_case_630: +//line plugins/parsers/influx/machine.go:24673 + switch ( m.data)[( m.p)] { + case 10: + goto tr755 + case 12: + goto tr939 + case 13: + goto tr758 + case 32: + goto tr938 + case 34: + goto tr31 + case 44: + goto tr940 + case 92: + goto st75 + case 97: + goto st179 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { + goto tr938 + } + goto st6 +tr357: +//line plugins/parsers/influx/machine.go.rl:20 + + m.pb = m.p + + goto st631 + st631: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof631 + } + st_case_631: +//line plugins/parsers/influx/machine.go:24707 + switch ( m.data)[( m.p)] { + case 10: + goto tr755 + case 12: + goto tr939 + case 13: + goto tr758 + case 32: + goto tr938 + case 34: + goto tr31 + case 44: + goto tr940 + case 92: + goto st75 + case 114: + goto st183 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { + goto tr938 + } + goto st6 +tr349: +//line plugins/parsers/influx/machine.go.rl:20 + + m.pb = m.p + + goto st184 st184: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof184 } st_case_184: - switch ( m.data)[( m.p)] { - case 10: - goto st7 - case 12: - goto tr8 - case 13: - goto st8 - case 34: - goto tr31 - case 92: - goto st76 - case 117: - goto st182 - } - goto st6 -tr173: -//line plugins/parsers/influx/machine.go.rl:19 - - m.pb = m.p - - goto st619 - st619: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof619 - } - st_case_619: -//line plugins/parsers/influx/machine.go:24389 - switch ( m.data)[( m.p)] { - case 10: - goto tr736 - case 12: - goto tr916 - case 13: - goto tr739 - case 32: - goto tr915 - case 34: - goto tr31 - case 44: - goto tr917 - case 92: - goto st76 - case 97: - goto st180 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { - goto tr915 - } - goto st6 -tr174: -//line plugins/parsers/influx/machine.go.rl:19 - - m.pb = m.p - - goto st620 - st620: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof620 - } - st_case_620: -//line plugins/parsers/influx/machine.go:24423 - switch ( m.data)[( m.p)] { - case 10: - goto tr736 - case 12: - goto tr916 - case 13: - goto tr739 - case 32: - goto tr915 - case 34: - goto tr31 - case 44: - goto tr917 - case 92: - goto st76 - case 114: - goto st184 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { - goto tr915 - } - goto st6 -tr349: -//line plugins/parsers/influx/machine.go.rl:19 - - m.pb = m.p - - goto st185 - st185: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof185 - } - st_case_185: -//line plugins/parsers/influx/machine.go:24457 +//line plugins/parsers/influx/machine.go:24741 switch ( m.data)[( m.p)] { case 34: - goto st173 + goto st172 case 92: - goto st173 + goto st172 } switch { case ( m.data)[( m.p)] > 10: @@ -24469,107 +24753,432 @@ tr349: goto tr8 } goto st3 - st621: + st632: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof621 + goto _test_eof632 } - st_case_621: + st_case_632: switch ( m.data)[( m.p)] { case 10: - goto tr515 + goto tr534 case 12: - goto tr516 + goto tr535 case 13: - goto tr517 + goto tr536 case 32: - goto tr514 + goto tr533 case 34: goto tr31 case 44: - goto tr518 + goto tr537 case 46: - goto st315 + goto st326 case 69: - goto st175 + goto st174 case 92: - goto st76 + goto st75 case 101: - goto st175 + goto st174 case 105: - goto st613 + goto st624 } switch { case ( m.data)[( m.p)] > 11: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st609 + goto st620 } case ( m.data)[( m.p)] >= 9: - goto tr514 + goto tr533 } goto st6 - st622: + st633: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof622 + goto _test_eof633 } - st_case_622: + st_case_633: switch ( m.data)[( m.p)] { case 10: - goto tr515 + goto tr534 case 12: - goto tr516 + goto tr535 case 13: - goto tr517 + goto tr536 case 32: - goto tr514 + goto tr533 case 34: goto tr31 case 44: - goto tr518 + goto tr537 case 46: - goto st315 + goto st326 case 69: - goto st175 + goto st174 case 92: - goto st76 + goto st75 case 101: - goto st175 + goto st174 case 105: - goto st613 + goto st624 } switch { case ( m.data)[( m.p)] > 11: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st622 + goto st633 } case ( m.data)[( m.p)] >= 9: - goto tr514 + goto tr533 } goto st6 -tr162: -//line plugins/parsers/influx/machine.go.rl:19 +tr171: +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p - goto st186 + goto st634 + st634: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof634 + } + st_case_634: +//line plugins/parsers/influx/machine.go:24844 + switch ( m.data)[( m.p)] { + case 10: + goto tr900 + case 12: + goto tr939 + case 13: + goto tr758 + case 32: + goto tr938 + case 34: + goto tr31 + case 44: + goto tr940 + case 65: + goto st185 + case 92: + goto st75 + case 97: + goto st188 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { + goto tr938 + } + goto st6 + st185: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof185 + } + st_case_185: + switch ( m.data)[( m.p)] { + case 10: + goto tr29 + case 12: + goto tr8 + case 13: + goto st7 + case 34: + goto tr31 + case 76: + goto st186 + case 92: + goto st75 + } + goto st6 st186: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof186 } st_case_186: -//line plugins/parsers/influx/machine.go:24560 + switch ( m.data)[( m.p)] { + case 10: + goto tr29 + case 12: + goto tr8 + case 13: + goto st7 + case 34: + goto tr31 + case 83: + goto st187 + case 92: + goto st75 + } + goto st6 + st187: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof187 + } + st_case_187: + switch ( m.data)[( m.p)] { + case 10: + goto tr29 + case 12: + goto tr8 + case 13: + goto st7 + case 34: + goto tr31 + case 69: + goto st635 + case 92: + goto st75 + } + goto st6 + st635: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof635 + } + st_case_635: + switch ( m.data)[( m.p)] { + case 10: + goto tr900 + case 12: + goto tr939 + case 13: + goto tr758 + case 32: + goto tr938 + case 34: + goto tr31 + case 44: + goto tr940 + case 92: + goto st75 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { + goto tr938 + } + goto st6 + st188: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof188 + } + st_case_188: + switch ( m.data)[( m.p)] { + case 10: + goto tr29 + case 12: + goto tr8 + case 13: + goto st7 + case 34: + goto tr31 + case 92: + goto st75 + case 108: + goto st189 + } + goto st6 + st189: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof189 + } + st_case_189: + switch ( m.data)[( m.p)] { + case 10: + goto tr29 + case 12: + goto tr8 + case 13: + goto st7 + case 34: + goto tr31 + case 92: + goto st75 + case 115: + goto st190 + } + goto st6 + st190: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof190 + } + st_case_190: + switch ( m.data)[( m.p)] { + case 10: + goto tr29 + case 12: + goto tr8 + case 13: + goto st7 + case 34: + goto tr31 + case 92: + goto st75 + case 101: + goto st635 + } + goto st6 +tr172: +//line plugins/parsers/influx/machine.go.rl:20 + + m.pb = m.p + + goto st636 + st636: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof636 + } + st_case_636: +//line plugins/parsers/influx/machine.go:25025 + switch ( m.data)[( m.p)] { + case 10: + goto tr900 + case 12: + goto tr939 + case 13: + goto tr758 + case 32: + goto tr938 + case 34: + goto tr31 + case 44: + goto tr940 + case 82: + goto st191 + case 92: + goto st75 + case 114: + goto st192 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { + goto tr938 + } + goto st6 + st191: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof191 + } + st_case_191: + switch ( m.data)[( m.p)] { + case 10: + goto tr29 + case 12: + goto tr8 + case 13: + goto st7 + case 34: + goto tr31 + case 85: + goto st187 + case 92: + goto st75 + } + goto st6 + st192: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof192 + } + st_case_192: + switch ( m.data)[( m.p)] { + case 10: + goto tr29 + case 12: + goto tr8 + case 13: + goto st7 + case 34: + goto tr31 + case 92: + goto st75 + case 117: + goto st190 + } + goto st6 +tr173: +//line plugins/parsers/influx/machine.go.rl:20 + + m.pb = m.p + + goto st637 + st637: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof637 + } + st_case_637: +//line plugins/parsers/influx/machine.go:25101 + switch ( m.data)[( m.p)] { + case 10: + goto tr900 + case 12: + goto tr939 + case 13: + goto tr758 + case 32: + goto tr938 + case 34: + goto tr31 + case 44: + goto tr940 + case 92: + goto st75 + case 97: + goto st188 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { + goto tr938 + } + goto st6 +tr174: +//line plugins/parsers/influx/machine.go.rl:20 + + m.pb = m.p + + goto st638 + st638: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof638 + } + st_case_638: +//line plugins/parsers/influx/machine.go:25135 + switch ( m.data)[( m.p)] { + case 10: + goto tr900 + case 12: + goto tr939 + case 13: + goto tr758 + case 32: + goto tr938 + case 34: + goto tr31 + case 44: + goto tr940 + case 92: + goto st75 + case 114: + goto st192 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { + goto tr938 + } + goto st6 +tr162: +//line plugins/parsers/influx/machine.go.rl:20 + + m.pb = m.p + + goto st193 + st193: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof193 + } + st_case_193: +//line plugins/parsers/influx/machine.go:25169 switch ( m.data)[( m.p)] { case 9: - goto st50 + goto st49 case 10: - goto st7 + goto tr29 case 11: goto tr162 case 12: goto st2 case 13: - goto st8 + goto st7 case 32: - goto st50 + goto st49 case 34: goto tr97 case 44: @@ -24581,586 +25190,17 @@ tr162: } goto tr160 tr140: -//line plugins/parsers/influx/machine.go.rl:19 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p - goto st187 - st187: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof187 - } - st_case_187: -//line plugins/parsers/influx/machine.go:24595 - switch ( m.data)[( m.p)] { - case 10: - goto tr47 - case 11: - goto tr61 - case 13: - goto tr47 - case 32: - goto tr60 - case 44: - goto tr62 - case 46: - goto st188 - case 48: - goto st624 - case 61: - goto tr47 - case 92: - goto st23 - } - switch { - case ( m.data)[( m.p)] > 12: - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st627 - } - case ( m.data)[( m.p)] >= 9: - goto tr60 - } - goto st17 -tr141: -//line plugins/parsers/influx/machine.go.rl:19 - - m.pb = m.p - - goto st188 - st188: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof188 - } - st_case_188: -//line plugins/parsers/influx/machine.go:24636 - switch ( m.data)[( m.p)] { - case 10: - goto tr47 - case 11: - goto tr61 - case 13: - goto tr47 - case 32: - goto tr60 - case 44: - goto tr62 - case 61: - goto tr47 - case 92: - goto st23 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st623 - } - case ( m.data)[( m.p)] >= 9: - goto tr60 - } - goto st17 - st623: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof623 - } - st_case_623: - switch ( m.data)[( m.p)] { - case 10: - goto tr715 - case 11: - goto tr716 - case 13: - goto tr717 - case 32: - goto tr712 - case 44: - goto tr718 - case 61: - goto tr132 - case 69: - goto st189 - case 92: - goto st23 - case 101: - goto st189 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st623 - } - case ( m.data)[( m.p)] >= 9: - goto tr712 - } - goto st17 - st189: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof189 - } - st_case_189: - switch ( m.data)[( m.p)] { - case 10: - goto tr47 - case 11: - goto tr61 - case 13: - goto tr47 - case 32: - goto tr60 - case 34: - goto st190 - case 44: - goto tr62 - case 61: - goto tr47 - case 92: - goto st23 - } - switch { - case ( m.data)[( m.p)] < 43: - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr60 - } - case ( m.data)[( m.p)] > 45: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st469 - } - default: - goto st190 - } - goto st17 - st190: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof190 - } - st_case_190: - switch ( m.data)[( m.p)] { - case 10: - goto tr47 - case 11: - goto tr61 - case 13: - goto tr47 - case 32: - goto tr60 - case 44: - goto tr62 - case 61: - goto tr47 - case 92: - goto st23 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st469 - } - case ( m.data)[( m.p)] >= 9: - goto tr60 - } - goto st17 - st624: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof624 - } - st_case_624: - switch ( m.data)[( m.p)] { - case 10: - goto tr715 - case 11: - goto tr716 - case 13: - goto tr717 - case 32: - goto tr712 - case 44: - goto tr718 - case 46: - goto st623 - case 61: - goto tr132 - case 69: - goto st189 - case 92: - goto st23 - case 101: - goto st189 - case 105: - goto st626 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st625 - } - case ( m.data)[( m.p)] >= 9: - goto tr712 - } - goto st17 - st625: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof625 - } - st_case_625: - switch ( m.data)[( m.p)] { - case 10: - goto tr715 - case 11: - goto tr716 - case 13: - goto tr717 - case 32: - goto tr712 - case 44: - goto tr718 - case 46: - goto st623 - case 61: - goto tr132 - case 69: - goto st189 - case 92: - goto st23 - case 101: - goto st189 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st625 - } - case ( m.data)[( m.p)] >= 9: - goto tr712 - } - goto st17 - st626: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof626 - } - st_case_626: - switch ( m.data)[( m.p)] { - case 10: - goto tr925 - case 11: - goto tr926 - case 13: - goto tr927 - case 32: - goto tr724 - case 44: - goto tr928 - case 61: - goto tr132 - case 92: - goto st23 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr724 - } - goto st17 - st627: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof627 - } - st_case_627: - switch ( m.data)[( m.p)] { - case 10: - goto tr715 - case 11: - goto tr716 - case 13: - goto tr717 - case 32: - goto tr712 - case 44: - goto tr718 - case 46: - goto st623 - case 61: - goto tr132 - case 69: - goto st189 - case 92: - goto st23 - case 101: - goto st189 - case 105: - goto st626 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st627 - } - case ( m.data)[( m.p)] >= 9: - goto tr712 - } - goto st17 -tr142: -//line plugins/parsers/influx/machine.go.rl:19 - - m.pb = m.p - - goto st628 - st628: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof628 - } - st_case_628: -//line plugins/parsers/influx/machine.go:24910 - switch ( m.data)[( m.p)] { - case 10: - goto tr715 - case 11: - goto tr716 - case 13: - goto tr717 - case 32: - goto tr712 - case 44: - goto tr718 - case 46: - goto st623 - case 61: - goto tr132 - case 69: - goto st189 - case 92: - goto st23 - case 101: - goto st189 - case 105: - goto st626 - case 117: - goto st629 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st625 - } - case ( m.data)[( m.p)] >= 9: - goto tr712 - } - goto st17 - st629: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof629 - } - st_case_629: - switch ( m.data)[( m.p)] { - case 10: - goto tr930 - case 11: - goto tr931 - case 13: - goto tr932 - case 32: - goto tr731 - case 44: - goto tr933 - case 61: - goto tr132 - case 92: - goto st23 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr731 - } - goto st17 -tr143: -//line plugins/parsers/influx/machine.go.rl:19 - - m.pb = m.p - - goto st630 - st630: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof630 - } - st_case_630: -//line plugins/parsers/influx/machine.go:24982 - switch ( m.data)[( m.p)] { - case 10: - goto tr715 - case 11: - goto tr716 - case 13: - goto tr717 - case 32: - goto tr712 - case 44: - goto tr718 - case 46: - goto st623 - case 61: - goto tr132 - case 69: - goto st189 - case 92: - goto st23 - case 101: - goto st189 - case 105: - goto st626 - case 117: - goto st629 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st630 - } - case ( m.data)[( m.p)] >= 9: - goto tr712 - } - goto st17 -tr144: -//line plugins/parsers/influx/machine.go.rl:19 - - m.pb = m.p - - goto st631 - st631: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof631 - } - st_case_631: -//line plugins/parsers/influx/machine.go:25029 - switch ( m.data)[( m.p)] { - case 10: - goto tr935 - case 11: - goto tr936 - case 13: - goto tr937 - case 32: - goto tr738 - case 44: - goto tr938 - case 61: - goto tr132 - case 65: - goto st191 - case 92: - goto st23 - case 97: - goto st194 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr738 - } - goto st17 - st191: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof191 - } - st_case_191: - switch ( m.data)[( m.p)] { - case 10: - goto tr47 - case 11: - goto tr61 - case 13: - goto tr47 - case 32: - goto tr60 - case 44: - goto tr62 - case 61: - goto tr47 - case 76: - goto st192 - case 92: - goto st23 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr60 - } - goto st17 - st192: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof192 - } - st_case_192: - switch ( m.data)[( m.p)] { - case 10: - goto tr47 - case 11: - goto tr61 - case 13: - goto tr47 - case 32: - goto tr60 - case 44: - goto tr62 - case 61: - goto tr47 - case 83: - goto st193 - case 92: - goto st23 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr60 - } - goto st17 - st193: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof193 - } - st_case_193: - switch ( m.data)[( m.p)] { - case 10: - goto tr47 - case 11: - goto tr61 - case 13: - goto tr47 - case 32: - goto tr60 - case 44: - goto tr62 - case 61: - goto tr47 - case 69: - goto st632 - case 92: - goto st23 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr60 - } - goto st17 - st632: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof632 - } - st_case_632: - switch ( m.data)[( m.p)] { - case 10: - goto tr935 - case 11: - goto tr936 - case 13: - goto tr937 - case 32: - goto tr738 - case 44: - goto tr938 - case 61: - goto tr132 - case 92: - goto st23 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr738 - } - goto st17 + goto st194 st194: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof194 } st_case_194: +//line plugins/parsers/influx/machine.go:25204 switch ( m.data)[( m.p)] { case 10: goto tr47 @@ -25172,22 +25212,36 @@ tr144: goto tr60 case 44: goto tr62 + case 46: + goto st195 + case 48: + goto st640 case 61: goto tr47 case 92: - goto st23 - case 108: - goto st195 + goto st22 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + switch { + case ( m.data)[( m.p)] > 12: + if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st643 + } + case ( m.data)[( m.p)] >= 9: goto tr60 } - goto st17 + goto st16 +tr141: +//line plugins/parsers/influx/machine.go.rl:20 + + m.pb = m.p + + goto st195 st195: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof195 } st_case_195: +//line plugins/parsers/influx/machine.go:25245 switch ( m.data)[( m.p)] { case 10: goto tr47 @@ -25202,14 +25256,51 @@ tr144: case 61: goto tr47 case 92: - goto st23 - case 115: - goto st196 + goto st22 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st639 + } + case ( m.data)[( m.p)] >= 9: goto tr60 } - goto st17 + goto st16 + st639: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof639 + } + st_case_639: + switch ( m.data)[( m.p)] { + case 10: + goto tr734 + case 11: + goto tr735 + case 13: + goto tr736 + case 32: + goto tr731 + case 44: + goto tr737 + case 61: + goto tr132 + case 69: + goto st196 + case 92: + goto st22 + case 101: + goto st196 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st639 + } + case ( m.data)[( m.p)] >= 9: + goto tr731 + } + goto st16 st196: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof196 @@ -25224,55 +25315,28 @@ tr144: goto tr47 case 32: goto tr60 + case 34: + goto st197 case 44: goto tr62 case 61: goto tr47 case 92: - goto st23 - case 101: - goto st632 + goto st22 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr60 - } - goto st17 -tr145: -//line plugins/parsers/influx/machine.go.rl:19 - - m.pb = m.p - - goto st633 - st633: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof633 - } - st_case_633: -//line plugins/parsers/influx/machine.go:25252 - switch ( m.data)[( m.p)] { - case 10: - goto tr935 - case 11: - goto tr936 - case 13: - goto tr937 - case 32: - goto tr738 - case 44: - goto tr938 - case 61: - goto tr132 - case 82: + switch { + case ( m.data)[( m.p)] < 43: + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr60 + } + case ( m.data)[( m.p)] > 45: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st480 + } + default: goto st197 - case 92: - goto st23 - case 114: - goto st198 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr738 - } - goto st17 + goto st16 st197: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof197 @@ -25291,15 +25355,310 @@ tr145: goto tr62 case 61: goto tr47 - case 85: - goto st193 case 92: - goto st23 + goto st22 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st480 + } + case ( m.data)[( m.p)] >= 9: goto tr60 } - goto st17 + goto st16 + st640: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof640 + } + st_case_640: + switch ( m.data)[( m.p)] { + case 10: + goto tr734 + case 11: + goto tr735 + case 13: + goto tr736 + case 32: + goto tr731 + case 44: + goto tr737 + case 46: + goto st639 + case 61: + goto tr132 + case 69: + goto st196 + case 92: + goto st22 + case 101: + goto st196 + case 105: + goto st642 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st641 + } + case ( m.data)[( m.p)] >= 9: + goto tr731 + } + goto st16 + st641: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof641 + } + st_case_641: + switch ( m.data)[( m.p)] { + case 10: + goto tr734 + case 11: + goto tr735 + case 13: + goto tr736 + case 32: + goto tr731 + case 44: + goto tr737 + case 46: + goto st639 + case 61: + goto tr132 + case 69: + goto st196 + case 92: + goto st22 + case 101: + goto st196 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st641 + } + case ( m.data)[( m.p)] >= 9: + goto tr731 + } + goto st16 + st642: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof642 + } + st_case_642: + switch ( m.data)[( m.p)] { + case 10: + goto tr952 + case 11: + goto tr953 + case 13: + goto tr954 + case 32: + goto tr743 + case 44: + goto tr955 + case 61: + goto tr132 + case 92: + goto st22 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr743 + } + goto st16 + st643: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof643 + } + st_case_643: + switch ( m.data)[( m.p)] { + case 10: + goto tr734 + case 11: + goto tr735 + case 13: + goto tr736 + case 32: + goto tr731 + case 44: + goto tr737 + case 46: + goto st639 + case 61: + goto tr132 + case 69: + goto st196 + case 92: + goto st22 + case 101: + goto st196 + case 105: + goto st642 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st643 + } + case ( m.data)[( m.p)] >= 9: + goto tr731 + } + goto st16 +tr142: +//line plugins/parsers/influx/machine.go.rl:20 + + m.pb = m.p + + goto st644 + st644: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof644 + } + st_case_644: +//line plugins/parsers/influx/machine.go:25519 + switch ( m.data)[( m.p)] { + case 10: + goto tr734 + case 11: + goto tr735 + case 13: + goto tr736 + case 32: + goto tr731 + case 44: + goto tr737 + case 46: + goto st639 + case 61: + goto tr132 + case 69: + goto st196 + case 92: + goto st22 + case 101: + goto st196 + case 105: + goto st642 + case 117: + goto st645 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st641 + } + case ( m.data)[( m.p)] >= 9: + goto tr731 + } + goto st16 + st645: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof645 + } + st_case_645: + switch ( m.data)[( m.p)] { + case 10: + goto tr957 + case 11: + goto tr958 + case 13: + goto tr959 + case 32: + goto tr750 + case 44: + goto tr960 + case 61: + goto tr132 + case 92: + goto st22 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr750 + } + goto st16 +tr143: +//line plugins/parsers/influx/machine.go.rl:20 + + m.pb = m.p + + goto st646 + st646: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof646 + } + st_case_646: +//line plugins/parsers/influx/machine.go:25591 + switch ( m.data)[( m.p)] { + case 10: + goto tr734 + case 11: + goto tr735 + case 13: + goto tr736 + case 32: + goto tr731 + case 44: + goto tr737 + case 46: + goto st639 + case 61: + goto tr132 + case 69: + goto st196 + case 92: + goto st22 + case 101: + goto st196 + case 105: + goto st642 + case 117: + goto st645 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st646 + } + case ( m.data)[( m.p)] >= 9: + goto tr731 + } + goto st16 +tr144: +//line plugins/parsers/influx/machine.go.rl:20 + + m.pb = m.p + + goto st647 + st647: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof647 + } + st_case_647: +//line plugins/parsers/influx/machine.go:25638 + switch ( m.data)[( m.p)] { + case 10: + goto tr962 + case 11: + goto tr963 + case 13: + goto tr964 + case 32: + goto tr757 + case 44: + goto tr965 + case 61: + goto tr132 + case 65: + goto st198 + case 92: + goto st22 + case 97: + goto st201 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr757 + } + goto st16 st198: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof198 @@ -25318,123 +25677,373 @@ tr145: goto tr62 case 61: goto tr47 + case 76: + goto st199 case 92: - goto st23 - case 117: - goto st196 + goto st22 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { goto tr60 } - goto st17 -tr146: -//line plugins/parsers/influx/machine.go.rl:19 - - m.pb = m.p - - goto st634 - st634: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof634 - } - st_case_634: -//line plugins/parsers/influx/machine.go:25342 - switch ( m.data)[( m.p)] { - case 10: - goto tr935 - case 11: - goto tr936 - case 13: - goto tr937 - case 32: - goto tr738 - case 44: - goto tr938 - case 61: - goto tr132 - case 92: - goto st23 - case 97: - goto st194 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr738 - } - goto st17 -tr147: -//line plugins/parsers/influx/machine.go.rl:19 - - m.pb = m.p - - goto st635 - st635: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof635 - } - st_case_635: -//line plugins/parsers/influx/machine.go:25376 - switch ( m.data)[( m.p)] { - case 10: - goto tr935 - case 11: - goto tr936 - case 13: - goto tr937 - case 32: - goto tr738 - case 44: - goto tr938 - case 61: - goto tr132 - case 92: - goto st23 - case 114: - goto st198 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr738 - } - goto st17 -tr123: -//line plugins/parsers/influx/machine.go.rl:19 - - m.pb = m.p - - goto st199 -tr373: - ( m.cs) = 199 -//line plugins/parsers/influx/machine.go.rl:19 - - m.pb = m.p - -//line plugins/parsers/influx/machine.go.rl:77 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - - goto _again + goto st16 st199: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof199 } st_case_199: -//line plugins/parsers/influx/machine.go:25427 + switch ( m.data)[( m.p)] { + case 10: + goto tr47 + case 11: + goto tr61 + case 13: + goto tr47 + case 32: + goto tr60 + case 44: + goto tr62 + case 61: + goto tr47 + case 83: + goto st200 + case 92: + goto st22 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr60 + } + goto st16 + st200: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof200 + } + st_case_200: + switch ( m.data)[( m.p)] { + case 10: + goto tr47 + case 11: + goto tr61 + case 13: + goto tr47 + case 32: + goto tr60 + case 44: + goto tr62 + case 61: + goto tr47 + case 69: + goto st648 + case 92: + goto st22 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr60 + } + goto st16 + st648: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof648 + } + st_case_648: + switch ( m.data)[( m.p)] { + case 10: + goto tr962 + case 11: + goto tr963 + case 13: + goto tr964 + case 32: + goto tr757 + case 44: + goto tr965 + case 61: + goto tr132 + case 92: + goto st22 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr757 + } + goto st16 + st201: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof201 + } + st_case_201: + switch ( m.data)[( m.p)] { + case 10: + goto tr47 + case 11: + goto tr61 + case 13: + goto tr47 + case 32: + goto tr60 + case 44: + goto tr62 + case 61: + goto tr47 + case 92: + goto st22 + case 108: + goto st202 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr60 + } + goto st16 + st202: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof202 + } + st_case_202: + switch ( m.data)[( m.p)] { + case 10: + goto tr47 + case 11: + goto tr61 + case 13: + goto tr47 + case 32: + goto tr60 + case 44: + goto tr62 + case 61: + goto tr47 + case 92: + goto st22 + case 115: + goto st203 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr60 + } + goto st16 + st203: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof203 + } + st_case_203: + switch ( m.data)[( m.p)] { + case 10: + goto tr47 + case 11: + goto tr61 + case 13: + goto tr47 + case 32: + goto tr60 + case 44: + goto tr62 + case 61: + goto tr47 + case 92: + goto st22 + case 101: + goto st648 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr60 + } + goto st16 +tr145: +//line plugins/parsers/influx/machine.go.rl:20 + + m.pb = m.p + + goto st649 + st649: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof649 + } + st_case_649: +//line plugins/parsers/influx/machine.go:25861 + switch ( m.data)[( m.p)] { + case 10: + goto tr962 + case 11: + goto tr963 + case 13: + goto tr964 + case 32: + goto tr757 + case 44: + goto tr965 + case 61: + goto tr132 + case 82: + goto st204 + case 92: + goto st22 + case 114: + goto st205 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr757 + } + goto st16 + st204: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof204 + } + st_case_204: + switch ( m.data)[( m.p)] { + case 10: + goto tr47 + case 11: + goto tr61 + case 13: + goto tr47 + case 32: + goto tr60 + case 44: + goto tr62 + case 61: + goto tr47 + case 85: + goto st200 + case 92: + goto st22 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr60 + } + goto st16 + st205: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof205 + } + st_case_205: + switch ( m.data)[( m.p)] { + case 10: + goto tr47 + case 11: + goto tr61 + case 13: + goto tr47 + case 32: + goto tr60 + case 44: + goto tr62 + case 61: + goto tr47 + case 92: + goto st22 + case 117: + goto st203 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr60 + } + goto st16 +tr146: +//line plugins/parsers/influx/machine.go.rl:20 + + m.pb = m.p + + goto st650 + st650: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof650 + } + st_case_650: +//line plugins/parsers/influx/machine.go:25951 + switch ( m.data)[( m.p)] { + case 10: + goto tr962 + case 11: + goto tr963 + case 13: + goto tr964 + case 32: + goto tr757 + case 44: + goto tr965 + case 61: + goto tr132 + case 92: + goto st22 + case 97: + goto st201 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr757 + } + goto st16 +tr147: +//line plugins/parsers/influx/machine.go.rl:20 + + m.pb = m.p + + goto st651 + st651: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof651 + } + st_case_651: +//line plugins/parsers/influx/machine.go:25985 + switch ( m.data)[( m.p)] { + case 10: + goto tr962 + case 11: + goto tr963 + case 13: + goto tr964 + case 32: + goto tr757 + case 44: + goto tr965 + case 61: + goto tr132 + case 92: + goto st22 + case 114: + goto st205 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr757 + } + goto st16 +tr123: +//line plugins/parsers/influx/machine.go.rl:20 + + m.pb = m.p + + goto st206 +tr382: + ( m.cs) = 206 +//line plugins/parsers/influx/machine.go.rl:20 + + m.pb = m.p + +//line plugins/parsers/influx/machine.go.rl:78 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + + goto _again + st206: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof206 + } + st_case_206: +//line plugins/parsers/influx/machine.go:26036 switch ( m.data)[( m.p)] { case 9: goto tr119 case 10: - goto st7 + goto tr29 case 11: - goto tr373 + goto tr382 case 12: goto tr38 case 13: - goto st8 + goto st7 case 32: goto tr119 case 34: @@ -25442,45 +26051,45 @@ tr373: case 44: goto tr92 case 61: - goto tr374 + goto tr383 case 92: goto tr125 } goto tr121 tr120: - ( m.cs) = 200 -//line plugins/parsers/influx/machine.go.rl:19 + ( m.cs) = 207 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p -//line plugins/parsers/influx/machine.go.rl:77 +//line plugins/parsers/influx/machine.go.rl:78 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again - st200: + st207: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof200 + goto _test_eof207 } - st_case_200: -//line plugins/parsers/influx/machine.go:25473 + st_case_207: +//line plugins/parsers/influx/machine.go:26082 switch ( m.data)[( m.p)] { case 9: goto tr119 case 10: - goto st7 + goto tr29 case 11: - goto tr373 + goto tr382 case 12: goto tr38 case 13: - goto st8 + goto st7 case 32: goto tr119 case 34: @@ -25493,507 +26102,59 @@ tr120: goto tr125 } goto tr121 -tr480: -//line plugins/parsers/influx/machine.go.rl:19 +tr499: +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p - goto st201 - st201: + goto st208 + st208: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof201 + goto _test_eof208 } - st_case_201: -//line plugins/parsers/influx/machine.go:25508 + st_case_208: +//line plugins/parsers/influx/machine.go:26117 switch ( m.data)[( m.p)] { case 10: - goto st7 + goto tr29 case 12: goto tr105 case 13: - goto st8 + goto st7 case 34: goto tr31 case 92: - goto st76 + goto st75 } if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st636 + goto st652 } goto st6 -tr481: -//line plugins/parsers/influx/machine.go.rl:19 +tr500: +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p - goto st636 - st636: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof636 - } - st_case_636: -//line plugins/parsers/influx/machine.go:25536 - switch ( m.data)[( m.p)] { - case 10: - goto tr584 - case 12: - goto tr450 - case 13: - goto tr586 - case 32: - goto tr583 - case 34: - goto tr31 - case 92: - goto st76 - } - switch { - case ( m.data)[( m.p)] > 11: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st637 - } - case ( m.data)[( m.p)] >= 9: - goto tr583 - } - goto st6 - st637: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof637 - } - st_case_637: - switch ( m.data)[( m.p)] { - case 10: - goto tr584 - case 12: - goto tr450 - case 13: - goto tr586 - case 32: - goto tr583 - case 34: - goto tr31 - case 92: - goto st76 - } - switch { - case ( m.data)[( m.p)] > 11: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st638 - } - case ( m.data)[( m.p)] >= 9: - goto tr583 - } - goto st6 - st638: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof638 - } - st_case_638: - switch ( m.data)[( m.p)] { - case 10: - goto tr584 - case 12: - goto tr450 - case 13: - goto tr586 - case 32: - goto tr583 - case 34: - goto tr31 - case 92: - goto st76 - } - switch { - case ( m.data)[( m.p)] > 11: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st639 - } - case ( m.data)[( m.p)] >= 9: - goto tr583 - } - goto st6 - st639: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof639 - } - st_case_639: - switch ( m.data)[( m.p)] { - case 10: - goto tr584 - case 12: - goto tr450 - case 13: - goto tr586 - case 32: - goto tr583 - case 34: - goto tr31 - case 92: - goto st76 - } - switch { - case ( m.data)[( m.p)] > 11: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st640 - } - case ( m.data)[( m.p)] >= 9: - goto tr583 - } - goto st6 - st640: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof640 - } - st_case_640: - switch ( m.data)[( m.p)] { - case 10: - goto tr584 - case 12: - goto tr450 - case 13: - goto tr586 - case 32: - goto tr583 - case 34: - goto tr31 - case 92: - goto st76 - } - switch { - case ( m.data)[( m.p)] > 11: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st641 - } - case ( m.data)[( m.p)] >= 9: - goto tr583 - } - goto st6 - st641: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof641 - } - st_case_641: - switch ( m.data)[( m.p)] { - case 10: - goto tr584 - case 12: - goto tr450 - case 13: - goto tr586 - case 32: - goto tr583 - case 34: - goto tr31 - case 92: - goto st76 - } - switch { - case ( m.data)[( m.p)] > 11: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st642 - } - case ( m.data)[( m.p)] >= 9: - goto tr583 - } - goto st6 - st642: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof642 - } - st_case_642: - switch ( m.data)[( m.p)] { - case 10: - goto tr584 - case 12: - goto tr450 - case 13: - goto tr586 - case 32: - goto tr583 - case 34: - goto tr31 - case 92: - goto st76 - } - switch { - case ( m.data)[( m.p)] > 11: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st643 - } - case ( m.data)[( m.p)] >= 9: - goto tr583 - } - goto st6 - st643: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof643 - } - st_case_643: - switch ( m.data)[( m.p)] { - case 10: - goto tr584 - case 12: - goto tr450 - case 13: - goto tr586 - case 32: - goto tr583 - case 34: - goto tr31 - case 92: - goto st76 - } - switch { - case ( m.data)[( m.p)] > 11: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st644 - } - case ( m.data)[( m.p)] >= 9: - goto tr583 - } - goto st6 - st644: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof644 - } - st_case_644: - switch ( m.data)[( m.p)] { - case 10: - goto tr584 - case 12: - goto tr450 - case 13: - goto tr586 - case 32: - goto tr583 - case 34: - goto tr31 - case 92: - goto st76 - } - switch { - case ( m.data)[( m.p)] > 11: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st645 - } - case ( m.data)[( m.p)] >= 9: - goto tr583 - } - goto st6 - st645: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof645 - } - st_case_645: - switch ( m.data)[( m.p)] { - case 10: - goto tr584 - case 12: - goto tr450 - case 13: - goto tr586 - case 32: - goto tr583 - case 34: - goto tr31 - case 92: - goto st76 - } - switch { - case ( m.data)[( m.p)] > 11: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st646 - } - case ( m.data)[( m.p)] >= 9: - goto tr583 - } - goto st6 - st646: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof646 - } - st_case_646: - switch ( m.data)[( m.p)] { - case 10: - goto tr584 - case 12: - goto tr450 - case 13: - goto tr586 - case 32: - goto tr583 - case 34: - goto tr31 - case 92: - goto st76 - } - switch { - case ( m.data)[( m.p)] > 11: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st647 - } - case ( m.data)[( m.p)] >= 9: - goto tr583 - } - goto st6 - st647: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof647 - } - st_case_647: - switch ( m.data)[( m.p)] { - case 10: - goto tr584 - case 12: - goto tr450 - case 13: - goto tr586 - case 32: - goto tr583 - case 34: - goto tr31 - case 92: - goto st76 - } - switch { - case ( m.data)[( m.p)] > 11: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st648 - } - case ( m.data)[( m.p)] >= 9: - goto tr583 - } - goto st6 - st648: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof648 - } - st_case_648: - switch ( m.data)[( m.p)] { - case 10: - goto tr584 - case 12: - goto tr450 - case 13: - goto tr586 - case 32: - goto tr583 - case 34: - goto tr31 - case 92: - goto st76 - } - switch { - case ( m.data)[( m.p)] > 11: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st649 - } - case ( m.data)[( m.p)] >= 9: - goto tr583 - } - goto st6 - st649: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof649 - } - st_case_649: - switch ( m.data)[( m.p)] { - case 10: - goto tr584 - case 12: - goto tr450 - case 13: - goto tr586 - case 32: - goto tr583 - case 34: - goto tr31 - case 92: - goto st76 - } - switch { - case ( m.data)[( m.p)] > 11: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st650 - } - case ( m.data)[( m.p)] >= 9: - goto tr583 - } - goto st6 - st650: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof650 - } - st_case_650: - switch ( m.data)[( m.p)] { - case 10: - goto tr584 - case 12: - goto tr450 - case 13: - goto tr586 - case 32: - goto tr583 - case 34: - goto tr31 - case 92: - goto st76 - } - switch { - case ( m.data)[( m.p)] > 11: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st651 - } - case ( m.data)[( m.p)] >= 9: - goto tr583 - } - goto st6 - st651: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof651 - } - st_case_651: - switch ( m.data)[( m.p)] { - case 10: - goto tr584 - case 12: - goto tr450 - case 13: - goto tr586 - case 32: - goto tr583 - case 34: - goto tr31 - case 92: - goto st76 - } - switch { - case ( m.data)[( m.p)] > 11: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st652 - } - case ( m.data)[( m.p)] >= 9: - goto tr583 - } - goto st6 + goto st652 st652: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof652 } st_case_652: +//line plugins/parsers/influx/machine.go:26145 switch ( m.data)[( m.p)] { case 10: - goto tr584 + goto tr603 case 12: - goto tr450 + goto tr469 case 13: - goto tr586 + goto tr605 case 32: - goto tr583 + goto tr602 case 34: goto tr31 case 92: - goto st76 + goto st75 } switch { case ( m.data)[( m.p)] > 11: @@ -26001,7 +26162,7 @@ tr481: goto st653 } case ( m.data)[( m.p)] >= 9: - goto tr583 + goto tr602 } goto st6 st653: @@ -26011,17 +26172,17 @@ tr481: st_case_653: switch ( m.data)[( m.p)] { case 10: - goto tr584 + goto tr603 case 12: - goto tr450 + goto tr469 case 13: - goto tr586 + goto tr605 case 32: - goto tr583 + goto tr602 case 34: goto tr31 case 92: - goto st76 + goto st75 } switch { case ( m.data)[( m.p)] > 11: @@ -26029,7 +26190,7 @@ tr481: goto st654 } case ( m.data)[( m.p)] >= 9: - goto tr583 + goto tr602 } goto st6 st654: @@ -26039,240 +26200,25 @@ tr481: st_case_654: switch ( m.data)[( m.p)] { case 10: - goto tr584 + goto tr603 case 12: - goto tr450 + goto tr469 case 13: - goto tr586 + goto tr605 case 32: - goto tr583 + goto tr602 case 34: goto tr31 case 92: - goto st76 + goto st75 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { - goto tr583 - } - goto st6 -tr477: -//line plugins/parsers/influx/machine.go.rl:19 - - m.pb = m.p - - goto st202 -tr962: - ( m.cs) = 202 -//line plugins/parsers/influx/machine.go.rl:121 - - err = m.handler.AddFloat(key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - - goto _again -tr967: - ( m.cs) = 202 -//line plugins/parsers/influx/machine.go.rl:103 - - err = m.handler.AddInt(key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - - goto _again -tr970: - ( m.cs) = 202 -//line plugins/parsers/influx/machine.go.rl:112 - - err = m.handler.AddUint(key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - - goto _again -tr973: - ( m.cs) = 202 -//line plugins/parsers/influx/machine.go.rl:130 - - err = m.handler.AddBool(key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - - goto _again - st202: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof202 - } - st_case_202: -//line plugins/parsers/influx/machine.go:26122 - switch ( m.data)[( m.p)] { - case 9: - goto st6 - case 10: - goto st7 - case 12: - goto tr8 - case 13: - goto st8 - case 32: - goto st6 - case 34: - goto tr377 - case 44: - goto st6 - case 61: - goto st6 - case 92: - goto tr378 - } - goto tr376 -tr376: -//line plugins/parsers/influx/machine.go.rl:19 - - m.pb = m.p - - goto st203 - st203: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof203 - } - st_case_203: -//line plugins/parsers/influx/machine.go:26155 - switch ( m.data)[( m.p)] { - case 9: - goto st6 - case 10: - goto st7 - case 12: - goto tr8 - case 13: - goto st8 - case 32: - goto st6 - case 34: - goto tr100 - case 44: - goto st6 - case 61: - goto tr380 - case 92: - goto st217 - } - goto st203 -tr380: -//line plugins/parsers/influx/machine.go.rl:99 - - key = m.text() - - goto st204 - st204: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof204 - } - st_case_204: -//line plugins/parsers/influx/machine.go:26188 - switch ( m.data)[( m.p)] { - case 10: - goto st7 - case 12: - goto tr8 - case 13: - goto st8 - case 34: - goto tr353 - case 45: - goto tr108 - case 46: - goto tr109 - case 48: - goto tr110 - case 70: - goto tr112 - case 84: - goto tr113 - case 92: - goto st76 - case 102: - goto tr114 - case 116: - goto tr115 - } - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr111 - } - goto st6 -tr108: -//line plugins/parsers/influx/machine.go.rl:19 - - m.pb = m.p - - goto st205 - st205: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof205 - } - st_case_205: -//line plugins/parsers/influx/machine.go:26230 - switch ( m.data)[( m.p)] { - case 10: - goto st7 - case 12: - goto tr8 - case 13: - goto st8 - case 34: - goto tr31 - case 46: - goto st206 - case 48: - goto st657 - case 92: - goto st76 - } - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st660 - } - goto st6 -tr109: -//line plugins/parsers/influx/machine.go.rl:19 - - m.pb = m.p - - goto st206 - st206: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof206 - } - st_case_206: -//line plugins/parsers/influx/machine.go:26262 - switch ( m.data)[( m.p)] { - case 10: - goto st7 - case 12: - goto tr8 - case 13: - goto st8 - case 34: - goto tr31 - case 92: - goto st76 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st655 + switch { + case ( m.data)[( m.p)] > 11: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st655 + } + case ( m.data)[( m.p)] >= 9: + goto tr602 } goto st6 st655: @@ -26282,77 +26228,25 @@ tr109: st_case_655: switch ( m.data)[( m.p)] { case 10: - goto tr620 + goto tr603 case 12: - goto tr516 + goto tr469 case 13: - goto tr623 + goto tr605 case 32: - goto tr961 + goto tr602 case 34: goto tr31 - case 44: - goto tr962 - case 69: - goto st207 case 92: - goto st76 - case 101: - goto st207 + goto st75 } switch { case ( m.data)[( m.p)] > 11: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st655 + goto st656 } case ( m.data)[( m.p)] >= 9: - goto tr961 - } - goto st6 - st207: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof207 - } - st_case_207: - switch ( m.data)[( m.p)] { - case 10: - goto st7 - case 12: - goto tr8 - case 13: - goto st8 - case 34: - goto tr354 - case 43: - goto st208 - case 45: - goto st208 - case 92: - goto st76 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st656 - } - goto st6 - st208: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof208 - } - st_case_208: - switch ( m.data)[( m.p)] { - case 10: - goto st7 - case 12: - goto tr8 - case 13: - goto st8 - case 34: - goto tr31 - case 92: - goto st76 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st656 + goto tr602 } goto st6 st656: @@ -26362,27 +26256,25 @@ tr109: st_case_656: switch ( m.data)[( m.p)] { case 10: - goto tr620 + goto tr603 case 12: - goto tr516 + goto tr469 case 13: - goto tr623 + goto tr605 case 32: - goto tr961 + goto tr602 case 34: goto tr31 - case 44: - goto tr962 case 92: - goto st76 + goto st75 } switch { case ( m.data)[( m.p)] > 11: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st656 + goto st657 } case ( m.data)[( m.p)] >= 9: - goto tr961 + goto tr602 } goto st6 st657: @@ -26392,27 +26284,17 @@ tr109: st_case_657: switch ( m.data)[( m.p)] { case 10: - goto tr620 + goto tr603 case 12: - goto tr516 + goto tr469 case 13: - goto tr623 + goto tr605 case 32: - goto tr961 + goto tr602 case 34: goto tr31 - case 44: - goto tr962 - case 46: - goto st655 - case 69: - goto st207 case 92: - goto st76 - case 101: - goto st207 - case 105: - goto st659 + goto st75 } switch { case ( m.data)[( m.p)] > 11: @@ -26420,7 +26302,7 @@ tr109: goto st658 } case ( m.data)[( m.p)] >= 9: - goto tr961 + goto tr602 } goto st6 st658: @@ -26430,33 +26312,25 @@ tr109: st_case_658: switch ( m.data)[( m.p)] { case 10: - goto tr620 + goto tr603 case 12: - goto tr516 + goto tr469 case 13: - goto tr623 + goto tr605 case 32: - goto tr961 + goto tr602 case 34: goto tr31 - case 44: - goto tr962 - case 46: - goto st655 - case 69: - goto st207 case 92: - goto st76 - case 101: - goto st207 + goto st75 } switch { case ( m.data)[( m.p)] > 11: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st658 + goto st659 } case ( m.data)[( m.p)] >= 9: - goto tr961 + goto tr602 } goto st6 st659: @@ -26466,22 +26340,25 @@ tr109: st_case_659: switch ( m.data)[( m.p)] { case 10: - goto tr778 + goto tr603 case 12: - goto tr909 + goto tr469 case 13: - goto tr780 + goto tr605 case 32: - goto tr966 + goto tr602 case 34: goto tr31 - case 44: - goto tr967 case 92: - goto st76 + goto st75 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { - goto tr966 + switch { + case ( m.data)[( m.p)] > 11: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st660 + } + case ( m.data)[( m.p)] >= 9: + goto tr602 } goto st6 st660: @@ -26491,82 +26368,53 @@ tr109: st_case_660: switch ( m.data)[( m.p)] { case 10: - goto tr620 + goto tr603 case 12: - goto tr516 + goto tr469 case 13: - goto tr623 + goto tr605 case 32: - goto tr961 + goto tr602 case 34: goto tr31 - case 44: - goto tr962 - case 46: - goto st655 - case 69: - goto st207 case 92: - goto st76 - case 101: - goto st207 - case 105: - goto st659 + goto st75 } switch { case ( m.data)[( m.p)] > 11: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st660 + goto st661 } case ( m.data)[( m.p)] >= 9: - goto tr961 + goto tr602 } goto st6 -tr110: -//line plugins/parsers/influx/machine.go.rl:19 - - m.pb = m.p - - goto st661 st661: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof661 } st_case_661: -//line plugins/parsers/influx/machine.go:26537 switch ( m.data)[( m.p)] { case 10: - goto tr620 + goto tr603 case 12: - goto tr516 + goto tr469 case 13: - goto tr623 + goto tr605 case 32: - goto tr961 + goto tr602 case 34: goto tr31 - case 44: - goto tr962 - case 46: - goto st655 - case 69: - goto st207 case 92: - goto st76 - case 101: - goto st207 - case 105: - goto st659 - case 117: - goto st662 + goto st75 } switch { case ( m.data)[( m.p)] > 11: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st658 + goto st662 } case ( m.data)[( m.p)] >= 9: - goto tr961 + goto tr602 } goto st6 st662: @@ -26576,61 +26424,17 @@ tr110: st_case_662: switch ( m.data)[( m.p)] { case 10: - goto tr784 + goto tr603 case 12: - goto tr912 + goto tr469 case 13: - goto tr786 + goto tr605 case 32: - goto tr969 + goto tr602 case 34: goto tr31 - case 44: - goto tr970 case 92: - goto st76 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { - goto tr969 - } - goto st6 -tr111: -//line plugins/parsers/influx/machine.go.rl:19 - - m.pb = m.p - - goto st663 - st663: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof663 - } - st_case_663: -//line plugins/parsers/influx/machine.go:26609 - switch ( m.data)[( m.p)] { - case 10: - goto tr620 - case 12: - goto tr516 - case 13: - goto tr623 - case 32: - goto tr961 - case 34: - goto tr31 - case 44: - goto tr962 - case 46: - goto st655 - case 69: - goto st207 - case 92: - goto st76 - case 101: - goto st207 - case 105: - goto st659 - case 117: - goto st662 + goto st75 } switch { case ( m.data)[( m.p)] > 11: @@ -26638,103 +26442,63 @@ tr111: goto st663 } case ( m.data)[( m.p)] >= 9: - goto tr961 + goto tr602 + } + goto st6 + st663: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof663 + } + st_case_663: + switch ( m.data)[( m.p)] { + case 10: + goto tr603 + case 12: + goto tr469 + case 13: + goto tr605 + case 32: + goto tr602 + case 34: + goto tr31 + case 92: + goto st75 + } + switch { + case ( m.data)[( m.p)] > 11: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st664 + } + case ( m.data)[( m.p)] >= 9: + goto tr602 } goto st6 -tr112: -//line plugins/parsers/influx/machine.go.rl:19 - - m.pb = m.p - - goto st664 st664: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof664 } st_case_664: -//line plugins/parsers/influx/machine.go:26656 switch ( m.data)[( m.p)] { case 10: - goto tr790 + goto tr603 case 12: - goto tr916 + goto tr469 case 13: - goto tr792 + goto tr605 case 32: - goto tr972 + goto tr602 case 34: goto tr31 - case 44: - goto tr973 - case 65: - goto st209 case 92: - goto st76 - case 97: - goto st212 + goto st75 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { - goto tr972 - } - goto st6 - st209: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof209 - } - st_case_209: - switch ( m.data)[( m.p)] { - case 10: - goto st7 - case 12: - goto tr8 - case 13: - goto st8 - case 34: - goto tr31 - case 76: - goto st210 - case 92: - goto st76 - } - goto st6 - st210: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof210 - } - st_case_210: - switch ( m.data)[( m.p)] { - case 10: - goto st7 - case 12: - goto tr8 - case 13: - goto st8 - case 34: - goto tr31 - case 83: - goto st211 - case 92: - goto st76 - } - goto st6 - st211: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof211 - } - st_case_211: - switch ( m.data)[( m.p)] { - case 10: - goto st7 - case 12: - goto tr8 - case 13: - goto st8 - case 34: - goto tr31 - case 69: - goto st665 - case 92: - goto st76 + switch { + case ( m.data)[( m.p)] > 11: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st665 + } + case ( m.data)[( m.p)] >= 9: + goto tr602 } goto st6 st665: @@ -26744,62 +26508,414 @@ tr112: st_case_665: switch ( m.data)[( m.p)] { case 10: - goto tr790 + goto tr603 case 12: - goto tr916 + goto tr469 case 13: - goto tr792 + goto tr605 case 32: - goto tr972 + goto tr602 case 34: goto tr31 - case 44: - goto tr973 case 92: - goto st76 + goto st75 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { - goto tr972 + switch { + case ( m.data)[( m.p)] > 11: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st666 + } + case ( m.data)[( m.p)] >= 9: + goto tr602 } goto st6 + st666: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof666 + } + st_case_666: + switch ( m.data)[( m.p)] { + case 10: + goto tr603 + case 12: + goto tr469 + case 13: + goto tr605 + case 32: + goto tr602 + case 34: + goto tr31 + case 92: + goto st75 + } + switch { + case ( m.data)[( m.p)] > 11: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st667 + } + case ( m.data)[( m.p)] >= 9: + goto tr602 + } + goto st6 + st667: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof667 + } + st_case_667: + switch ( m.data)[( m.p)] { + case 10: + goto tr603 + case 12: + goto tr469 + case 13: + goto tr605 + case 32: + goto tr602 + case 34: + goto tr31 + case 92: + goto st75 + } + switch { + case ( m.data)[( m.p)] > 11: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st668 + } + case ( m.data)[( m.p)] >= 9: + goto tr602 + } + goto st6 + st668: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof668 + } + st_case_668: + switch ( m.data)[( m.p)] { + case 10: + goto tr603 + case 12: + goto tr469 + case 13: + goto tr605 + case 32: + goto tr602 + case 34: + goto tr31 + case 92: + goto st75 + } + switch { + case ( m.data)[( m.p)] > 11: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st669 + } + case ( m.data)[( m.p)] >= 9: + goto tr602 + } + goto st6 + st669: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof669 + } + st_case_669: + switch ( m.data)[( m.p)] { + case 10: + goto tr603 + case 12: + goto tr469 + case 13: + goto tr605 + case 32: + goto tr602 + case 34: + goto tr31 + case 92: + goto st75 + } + switch { + case ( m.data)[( m.p)] > 11: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st670 + } + case ( m.data)[( m.p)] >= 9: + goto tr602 + } + goto st6 + st670: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof670 + } + st_case_670: + switch ( m.data)[( m.p)] { + case 10: + goto tr603 + case 12: + goto tr469 + case 13: + goto tr605 + case 32: + goto tr602 + case 34: + goto tr31 + case 92: + goto st75 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { + goto tr602 + } + goto st6 +tr496: +//line plugins/parsers/influx/machine.go.rl:20 + + m.pb = m.p + + goto st209 +tr989: + ( m.cs) = 209 +//line plugins/parsers/influx/machine.go.rl:122 + + err = m.handler.AddFloat(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + + goto _again +tr994: + ( m.cs) = 209 +//line plugins/parsers/influx/machine.go.rl:104 + + err = m.handler.AddInt(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + + goto _again +tr997: + ( m.cs) = 209 +//line plugins/parsers/influx/machine.go.rl:113 + + err = m.handler.AddUint(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + + goto _again +tr1000: + ( m.cs) = 209 +//line plugins/parsers/influx/machine.go.rl:131 + + err = m.handler.AddBool(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + + goto _again + st209: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof209 + } + st_case_209: +//line plugins/parsers/influx/machine.go:26731 + switch ( m.data)[( m.p)] { + case 9: + goto st6 + case 10: + goto tr29 + case 12: + goto tr8 + case 13: + goto st7 + case 32: + goto st6 + case 34: + goto tr386 + case 44: + goto st6 + case 61: + goto st6 + case 92: + goto tr387 + } + goto tr385 +tr385: +//line plugins/parsers/influx/machine.go.rl:20 + + m.pb = m.p + + goto st210 + st210: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof210 + } + st_case_210: +//line plugins/parsers/influx/machine.go:26764 + switch ( m.data)[( m.p)] { + case 9: + goto st6 + case 10: + goto tr29 + case 12: + goto tr8 + case 13: + goto st7 + case 32: + goto st6 + case 34: + goto tr100 + case 44: + goto st6 + case 61: + goto tr389 + case 92: + goto st224 + } + goto st210 +tr389: +//line plugins/parsers/influx/machine.go.rl:100 + + m.key = m.text() + + goto st211 + st211: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof211 + } + st_case_211: +//line plugins/parsers/influx/machine.go:26797 + switch ( m.data)[( m.p)] { + case 10: + goto tr29 + case 12: + goto tr8 + case 13: + goto st7 + case 34: + goto tr353 + case 45: + goto tr391 + case 46: + goto tr392 + case 48: + goto tr393 + case 70: + goto tr112 + case 84: + goto tr113 + case 92: + goto st75 + case 102: + goto tr114 + case 116: + goto tr115 + } + if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto tr394 + } + goto st6 +tr391: +//line plugins/parsers/influx/machine.go.rl:20 + + m.pb = m.p + + goto st212 st212: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof212 } st_case_212: +//line plugins/parsers/influx/machine.go:26839 switch ( m.data)[( m.p)] { case 10: - goto st7 + goto tr29 case 12: goto tr8 case 13: - goto st8 + goto st7 case 34: goto tr31 - case 92: - goto st76 - case 108: + case 46: goto st213 + case 48: + goto st673 + case 92: + goto st75 + } + if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st676 } goto st6 +tr392: +//line plugins/parsers/influx/machine.go.rl:20 + + m.pb = m.p + + goto st213 st213: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof213 } st_case_213: +//line plugins/parsers/influx/machine.go:26871 switch ( m.data)[( m.p)] { case 10: - goto st7 + goto tr29 case 12: goto tr8 case 13: - goto st8 + goto st7 case 34: goto tr31 case 92: - goto st76 - case 115: + goto st75 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st671 + } + goto st6 + st671: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof671 + } + st_case_671: + switch ( m.data)[( m.p)] { + case 10: + goto tr765 + case 12: + goto tr535 + case 13: + goto tr642 + case 32: + goto tr988 + case 34: + goto tr31 + case 44: + goto tr989 + case 69: goto st214 + case 92: + goto st75 + case 101: + goto st214 + } + switch { + case ( m.data)[( m.p)] > 11: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st671 + } + case ( m.data)[( m.p)] >= 9: + goto tr988 } goto st6 st214: @@ -26809,53 +26925,22 @@ tr112: st_case_214: switch ( m.data)[( m.p)] { case 10: - goto st7 + goto tr29 case 12: goto tr8 case 13: - goto st8 + goto st7 case 34: - goto tr31 - case 92: - goto st76 - case 101: - goto st665 - } - goto st6 -tr113: -//line plugins/parsers/influx/machine.go.rl:19 - - m.pb = m.p - - goto st666 - st666: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof666 - } - st_case_666: -//line plugins/parsers/influx/machine.go:26837 - switch ( m.data)[( m.p)] { - case 10: - goto tr790 - case 12: - goto tr916 - case 13: - goto tr792 - case 32: - goto tr972 - case 34: - goto tr31 - case 44: - goto tr973 - case 82: + goto tr358 + case 43: + goto st215 + case 45: goto st215 case 92: - goto st76 - case 114: - goto st216 + goto st75 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { - goto tr972 + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st672 } goto st6 st215: @@ -26865,17 +26950,340 @@ tr113: st_case_215: switch ( m.data)[( m.p)] { case 10: - goto st7 + goto tr29 case 12: goto tr8 case 13: - goto st8 + goto st7 case 34: goto tr31 - case 85: - goto st211 case 92: - goto st76 + goto st75 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st672 + } + goto st6 + st672: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof672 + } + st_case_672: + switch ( m.data)[( m.p)] { + case 10: + goto tr765 + case 12: + goto tr535 + case 13: + goto tr642 + case 32: + goto tr988 + case 34: + goto tr31 + case 44: + goto tr989 + case 92: + goto st75 + } + switch { + case ( m.data)[( m.p)] > 11: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st672 + } + case ( m.data)[( m.p)] >= 9: + goto tr988 + } + goto st6 + st673: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof673 + } + st_case_673: + switch ( m.data)[( m.p)] { + case 10: + goto tr765 + case 12: + goto tr535 + case 13: + goto tr642 + case 32: + goto tr988 + case 34: + goto tr31 + case 44: + goto tr989 + case 46: + goto st671 + case 69: + goto st214 + case 92: + goto st75 + case 101: + goto st214 + case 105: + goto st675 + } + switch { + case ( m.data)[( m.p)] > 11: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st674 + } + case ( m.data)[( m.p)] >= 9: + goto tr988 + } + goto st6 + st674: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof674 + } + st_case_674: + switch ( m.data)[( m.p)] { + case 10: + goto tr765 + case 12: + goto tr535 + case 13: + goto tr642 + case 32: + goto tr988 + case 34: + goto tr31 + case 44: + goto tr989 + case 46: + goto st671 + case 69: + goto st214 + case 92: + goto st75 + case 101: + goto st214 + } + switch { + case ( m.data)[( m.p)] > 11: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st674 + } + case ( m.data)[( m.p)] >= 9: + goto tr988 + } + goto st6 + st675: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof675 + } + st_case_675: + switch ( m.data)[( m.p)] { + case 10: + goto tr798 + case 12: + goto tr932 + case 13: + goto tr800 + case 32: + goto tr993 + case 34: + goto tr31 + case 44: + goto tr994 + case 92: + goto st75 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { + goto tr993 + } + goto st6 + st676: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof676 + } + st_case_676: + switch ( m.data)[( m.p)] { + case 10: + goto tr765 + case 12: + goto tr535 + case 13: + goto tr642 + case 32: + goto tr988 + case 34: + goto tr31 + case 44: + goto tr989 + case 46: + goto st671 + case 69: + goto st214 + case 92: + goto st75 + case 101: + goto st214 + case 105: + goto st675 + } + switch { + case ( m.data)[( m.p)] > 11: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st676 + } + case ( m.data)[( m.p)] >= 9: + goto tr988 + } + goto st6 +tr393: +//line plugins/parsers/influx/machine.go.rl:20 + + m.pb = m.p + + goto st677 + st677: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof677 + } + st_case_677: +//line plugins/parsers/influx/machine.go:27146 + switch ( m.data)[( m.p)] { + case 10: + goto tr765 + case 12: + goto tr535 + case 13: + goto tr642 + case 32: + goto tr988 + case 34: + goto tr31 + case 44: + goto tr989 + case 46: + goto st671 + case 69: + goto st214 + case 92: + goto st75 + case 101: + goto st214 + case 105: + goto st675 + case 117: + goto st678 + } + switch { + case ( m.data)[( m.p)] > 11: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st674 + } + case ( m.data)[( m.p)] >= 9: + goto tr988 + } + goto st6 + st678: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof678 + } + st_case_678: + switch ( m.data)[( m.p)] { + case 10: + goto tr804 + case 12: + goto tr935 + case 13: + goto tr806 + case 32: + goto tr996 + case 34: + goto tr31 + case 44: + goto tr997 + case 92: + goto st75 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { + goto tr996 + } + goto st6 +tr394: +//line plugins/parsers/influx/machine.go.rl:20 + + m.pb = m.p + + goto st679 + st679: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof679 + } + st_case_679: +//line plugins/parsers/influx/machine.go:27218 + switch ( m.data)[( m.p)] { + case 10: + goto tr765 + case 12: + goto tr535 + case 13: + goto tr642 + case 32: + goto tr988 + case 34: + goto tr31 + case 44: + goto tr989 + case 46: + goto st671 + case 69: + goto st214 + case 92: + goto st75 + case 101: + goto st214 + case 105: + goto st675 + case 117: + goto st678 + } + switch { + case ( m.data)[( m.p)] > 11: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st679 + } + case ( m.data)[( m.p)] >= 9: + goto tr988 + } + goto st6 +tr112: +//line plugins/parsers/influx/machine.go.rl:20 + + m.pb = m.p + + goto st680 + st680: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof680 + } + st_case_680: +//line plugins/parsers/influx/machine.go:27265 + switch ( m.data)[( m.p)] { + case 10: + goto tr810 + case 12: + goto tr939 + case 13: + goto tr812 + case 32: + goto tr999 + case 34: + goto tr31 + case 44: + goto tr1000 + case 65: + goto st216 + case 92: + goto st75 + case 97: + goto st219 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { + goto tr999 } goto st6 st216: @@ -26885,104 +27293,305 @@ tr113: st_case_216: switch ( m.data)[( m.p)] { case 10: - goto st7 + goto tr29 case 12: goto tr8 case 13: - goto st8 + goto st7 case 34: goto tr31 + case 76: + goto st217 case 92: - goto st76 - case 117: - goto st214 + goto st75 } goto st6 -tr114: -//line plugins/parsers/influx/machine.go.rl:19 - - m.pb = m.p - - goto st667 - st667: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof667 - } - st_case_667: -//line plugins/parsers/influx/machine.go:26913 - switch ( m.data)[( m.p)] { - case 10: - goto tr790 - case 12: - goto tr916 - case 13: - goto tr792 - case 32: - goto tr972 - case 34: - goto tr31 - case 44: - goto tr973 - case 92: - goto st76 - case 97: - goto st212 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { - goto tr972 - } - goto st6 -tr115: -//line plugins/parsers/influx/machine.go.rl:19 - - m.pb = m.p - - goto st668 - st668: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof668 - } - st_case_668: -//line plugins/parsers/influx/machine.go:26947 - switch ( m.data)[( m.p)] { - case 10: - goto tr790 - case 12: - goto tr916 - case 13: - goto tr792 - case 32: - goto tr972 - case 34: - goto tr31 - case 44: - goto tr973 - case 92: - goto st76 - case 114: - goto st216 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { - goto tr972 - } - goto st6 -tr378: -//line plugins/parsers/influx/machine.go.rl:19 - - m.pb = m.p - - goto st217 st217: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof217 } st_case_217: -//line plugins/parsers/influx/machine.go:26981 + switch ( m.data)[( m.p)] { + case 10: + goto tr29 + case 12: + goto tr8 + case 13: + goto st7 + case 34: + goto tr31 + case 83: + goto st218 + case 92: + goto st75 + } + goto st6 + st218: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof218 + } + st_case_218: + switch ( m.data)[( m.p)] { + case 10: + goto tr29 + case 12: + goto tr8 + case 13: + goto st7 + case 34: + goto tr31 + case 69: + goto st681 + case 92: + goto st75 + } + goto st6 + st681: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof681 + } + st_case_681: + switch ( m.data)[( m.p)] { + case 10: + goto tr810 + case 12: + goto tr939 + case 13: + goto tr812 + case 32: + goto tr999 + case 34: + goto tr31 + case 44: + goto tr1000 + case 92: + goto st75 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { + goto tr999 + } + goto st6 + st219: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof219 + } + st_case_219: + switch ( m.data)[( m.p)] { + case 10: + goto tr29 + case 12: + goto tr8 + case 13: + goto st7 + case 34: + goto tr31 + case 92: + goto st75 + case 108: + goto st220 + } + goto st6 + st220: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof220 + } + st_case_220: + switch ( m.data)[( m.p)] { + case 10: + goto tr29 + case 12: + goto tr8 + case 13: + goto st7 + case 34: + goto tr31 + case 92: + goto st75 + case 115: + goto st221 + } + goto st6 + st221: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof221 + } + st_case_221: + switch ( m.data)[( m.p)] { + case 10: + goto tr29 + case 12: + goto tr8 + case 13: + goto st7 + case 34: + goto tr31 + case 92: + goto st75 + case 101: + goto st681 + } + goto st6 +tr113: +//line plugins/parsers/influx/machine.go.rl:20 + + m.pb = m.p + + goto st682 + st682: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof682 + } + st_case_682: +//line plugins/parsers/influx/machine.go:27446 + switch ( m.data)[( m.p)] { + case 10: + goto tr810 + case 12: + goto tr939 + case 13: + goto tr812 + case 32: + goto tr999 + case 34: + goto tr31 + case 44: + goto tr1000 + case 82: + goto st222 + case 92: + goto st75 + case 114: + goto st223 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { + goto tr999 + } + goto st6 + st222: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof222 + } + st_case_222: + switch ( m.data)[( m.p)] { + case 10: + goto tr29 + case 12: + goto tr8 + case 13: + goto st7 + case 34: + goto tr31 + case 85: + goto st218 + case 92: + goto st75 + } + goto st6 + st223: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof223 + } + st_case_223: + switch ( m.data)[( m.p)] { + case 10: + goto tr29 + case 12: + goto tr8 + case 13: + goto st7 + case 34: + goto tr31 + case 92: + goto st75 + case 117: + goto st221 + } + goto st6 +tr114: +//line plugins/parsers/influx/machine.go.rl:20 + + m.pb = m.p + + goto st683 + st683: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof683 + } + st_case_683: +//line plugins/parsers/influx/machine.go:27522 + switch ( m.data)[( m.p)] { + case 10: + goto tr810 + case 12: + goto tr939 + case 13: + goto tr812 + case 32: + goto tr999 + case 34: + goto tr31 + case 44: + goto tr1000 + case 92: + goto st75 + case 97: + goto st219 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { + goto tr999 + } + goto st6 +tr115: +//line plugins/parsers/influx/machine.go.rl:20 + + m.pb = m.p + + goto st684 + st684: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof684 + } + st_case_684: +//line plugins/parsers/influx/machine.go:27556 + switch ( m.data)[( m.p)] { + case 10: + goto tr810 + case 12: + goto tr939 + case 13: + goto tr812 + case 32: + goto tr999 + case 34: + goto tr31 + case 44: + goto tr1000 + case 92: + goto st75 + case 114: + goto st223 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { + goto tr999 + } + goto st6 +tr387: +//line plugins/parsers/influx/machine.go.rl:20 + + m.pb = m.p + + goto st224 + st224: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof224 + } + st_case_224: +//line plugins/parsers/influx/machine.go:27590 switch ( m.data)[( m.p)] { case 34: - goto st203 + goto st210 case 92: - goto st203 + goto st210 } switch { case ( m.data)[( m.p)] > 10: @@ -26993,31 +27602,457 @@ tr378: goto tr8 } goto st3 -tr96: -//line plugins/parsers/influx/machine.go.rl:19 +tr108: +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p - goto st218 - st218: + goto st225 + st225: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof218 + goto _test_eof225 } - st_case_218: -//line plugins/parsers/influx/machine.go:27008 + st_case_225: +//line plugins/parsers/influx/machine.go:27617 + switch ( m.data)[( m.p)] { + case 10: + goto tr29 + case 12: + goto tr8 + case 13: + goto st7 + case 34: + goto tr31 + case 46: + goto st226 + case 48: + goto st687 + case 92: + goto st75 + } + if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st690 + } + goto st6 +tr109: +//line plugins/parsers/influx/machine.go.rl:20 + + m.pb = m.p + + goto st226 + st226: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof226 + } + st_case_226: +//line plugins/parsers/influx/machine.go:27649 + switch ( m.data)[( m.p)] { + case 10: + goto tr29 + case 12: + goto tr8 + case 13: + goto st7 + case 34: + goto tr31 + case 92: + goto st75 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st685 + } + goto st6 + st685: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof685 + } + st_case_685: + switch ( m.data)[( m.p)] { + case 10: + goto tr639 + case 12: + goto tr535 + case 13: + goto tr642 + case 32: + goto tr988 + case 34: + goto tr31 + case 44: + goto tr989 + case 69: + goto st227 + case 92: + goto st75 + case 101: + goto st227 + } + switch { + case ( m.data)[( m.p)] > 11: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st685 + } + case ( m.data)[( m.p)] >= 9: + goto tr988 + } + goto st6 + st227: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof227 + } + st_case_227: + switch ( m.data)[( m.p)] { + case 10: + goto tr29 + case 12: + goto tr8 + case 13: + goto st7 + case 34: + goto tr358 + case 43: + goto st228 + case 45: + goto st228 + case 92: + goto st75 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st686 + } + goto st6 + st228: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof228 + } + st_case_228: + switch ( m.data)[( m.p)] { + case 10: + goto tr29 + case 12: + goto tr8 + case 13: + goto st7 + case 34: + goto tr31 + case 92: + goto st75 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st686 + } + goto st6 + st686: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof686 + } + st_case_686: + switch ( m.data)[( m.p)] { + case 10: + goto tr639 + case 12: + goto tr535 + case 13: + goto tr642 + case 32: + goto tr988 + case 34: + goto tr31 + case 44: + goto tr989 + case 92: + goto st75 + } + switch { + case ( m.data)[( m.p)] > 11: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st686 + } + case ( m.data)[( m.p)] >= 9: + goto tr988 + } + goto st6 + st687: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof687 + } + st_case_687: + switch ( m.data)[( m.p)] { + case 10: + goto tr639 + case 12: + goto tr535 + case 13: + goto tr642 + case 32: + goto tr988 + case 34: + goto tr31 + case 44: + goto tr989 + case 46: + goto st685 + case 69: + goto st227 + case 92: + goto st75 + case 101: + goto st227 + case 105: + goto st689 + } + switch { + case ( m.data)[( m.p)] > 11: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st688 + } + case ( m.data)[( m.p)] >= 9: + goto tr988 + } + goto st6 + st688: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof688 + } + st_case_688: + switch ( m.data)[( m.p)] { + case 10: + goto tr639 + case 12: + goto tr535 + case 13: + goto tr642 + case 32: + goto tr988 + case 34: + goto tr31 + case 44: + goto tr989 + case 46: + goto st685 + case 69: + goto st227 + case 92: + goto st75 + case 101: + goto st227 + } + switch { + case ( m.data)[( m.p)] > 11: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st688 + } + case ( m.data)[( m.p)] >= 9: + goto tr988 + } + goto st6 + st689: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof689 + } + st_case_689: + switch ( m.data)[( m.p)] { + case 10: + goto tr823 + case 12: + goto tr932 + case 13: + goto tr800 + case 32: + goto tr993 + case 34: + goto tr31 + case 44: + goto tr994 + case 92: + goto st75 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { + goto tr993 + } + goto st6 + st690: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof690 + } + st_case_690: + switch ( m.data)[( m.p)] { + case 10: + goto tr639 + case 12: + goto tr535 + case 13: + goto tr642 + case 32: + goto tr988 + case 34: + goto tr31 + case 44: + goto tr989 + case 46: + goto st685 + case 69: + goto st227 + case 92: + goto st75 + case 101: + goto st227 + case 105: + goto st689 + } + switch { + case ( m.data)[( m.p)] > 11: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st690 + } + case ( m.data)[( m.p)] >= 9: + goto tr988 + } + goto st6 +tr110: +//line plugins/parsers/influx/machine.go.rl:20 + + m.pb = m.p + + goto st691 + st691: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof691 + } + st_case_691: +//line plugins/parsers/influx/machine.go:27924 + switch ( m.data)[( m.p)] { + case 10: + goto tr639 + case 12: + goto tr535 + case 13: + goto tr642 + case 32: + goto tr988 + case 34: + goto tr31 + case 44: + goto tr989 + case 46: + goto st685 + case 69: + goto st227 + case 92: + goto st75 + case 101: + goto st227 + case 105: + goto st689 + case 117: + goto st692 + } + switch { + case ( m.data)[( m.p)] > 11: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st688 + } + case ( m.data)[( m.p)] >= 9: + goto tr988 + } + goto st6 + st692: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof692 + } + st_case_692: + switch ( m.data)[( m.p)] { + case 10: + goto tr829 + case 12: + goto tr935 + case 13: + goto tr806 + case 32: + goto tr996 + case 34: + goto tr31 + case 44: + goto tr997 + case 92: + goto st75 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { + goto tr996 + } + goto st6 +tr111: +//line plugins/parsers/influx/machine.go.rl:20 + + m.pb = m.p + + goto st693 + st693: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof693 + } + st_case_693: +//line plugins/parsers/influx/machine.go:27996 + switch ( m.data)[( m.p)] { + case 10: + goto tr639 + case 12: + goto tr535 + case 13: + goto tr642 + case 32: + goto tr988 + case 34: + goto tr31 + case 44: + goto tr989 + case 46: + goto st685 + case 69: + goto st227 + case 92: + goto st75 + case 101: + goto st227 + case 105: + goto st689 + case 117: + goto st692 + } + switch { + case ( m.data)[( m.p)] > 11: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st693 + } + case ( m.data)[( m.p)] >= 9: + goto tr988 + } + goto st6 +tr96: +//line plugins/parsers/influx/machine.go.rl:20 + + m.pb = m.p + + goto st229 + st229: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof229 + } + st_case_229: +//line plugins/parsers/influx/machine.go:28043 switch ( m.data)[( m.p)] { case 9: - goto st32 + goto st31 case 10: - goto st7 + goto tr29 case 11: goto tr96 case 12: goto st2 case 13: - goto st8 + goto st7 case 32: - goto st32 + goto st31 case 34: goto tr97 case 44: @@ -27029,17 +28064,17 @@ tr96: } goto tr94 tr74: -//line plugins/parsers/influx/machine.go.rl:19 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p - goto st219 - st219: + goto st230 + st230: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof219 + goto _test_eof230 } - st_case_219: -//line plugins/parsers/influx/machine.go:27043 + st_case_230: +//line plugins/parsers/influx/machine.go:28078 switch ( m.data)[( m.p)] { case 10: goto tr47 @@ -27052,33 +28087,33 @@ tr74: case 44: goto tr4 case 46: - goto st220 + goto st231 case 48: - goto st670 + goto st695 case 92: - goto st96 + goto st95 } switch { case ( m.data)[( m.p)] > 12: if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st673 + goto st698 } case ( m.data)[( m.p)] >= 9: goto tr1 } goto st1 tr75: -//line plugins/parsers/influx/machine.go.rl:19 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p - goto st220 - st220: + goto st231 + st231: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof220 + goto _test_eof231 } - st_case_220: -//line plugins/parsers/influx/machine.go:27082 + st_case_231: +//line plugins/parsers/influx/machine.go:28117 switch ( m.data)[( m.p)] { case 10: goto tr47 @@ -27091,54 +28126,54 @@ tr75: case 44: goto tr4 case 92: - goto st96 + goto st95 } switch { case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st669 + goto st694 } case ( m.data)[( m.p)] >= 9: goto tr1 } goto st1 - st669: + st694: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof669 + goto _test_eof694 } - st_case_669: + st_case_694: switch ( m.data)[( m.p)] { case 10: - goto tr715 + goto tr734 case 11: - goto tr798 + goto tr818 case 13: - goto tr717 + goto tr736 case 32: - goto tr622 + goto tr641 case 44: - goto tr799 + goto tr819 case 69: - goto st221 + goto st232 case 92: - goto st96 + goto st95 case 101: - goto st221 + goto st232 } switch { case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st669 + goto st694 } case ( m.data)[( m.p)] >= 9: - goto tr622 + goto tr641 } goto st1 - st221: + st232: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof221 + goto _test_eof232 } - st_case_221: + st_case_232: switch ( m.data)[( m.p)] { case 10: goto tr47 @@ -27149,11 +28184,11 @@ tr75: case 32: goto tr1 case 34: - goto st222 + goto st233 case 44: goto tr4 case 92: - goto st96 + goto st95 } switch { case ( m.data)[( m.p)] < 43: @@ -27162,17 +28197,17 @@ tr75: } case ( m.data)[( m.p)] > 45: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st520 + goto st531 } default: - goto st222 + goto st233 } goto st1 - st222: + st233: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof222 + goto _test_eof233 } - st_case_222: + st_case_233: switch ( m.data)[( m.p)] { case 10: goto tr47 @@ -27185,298 +28220,298 @@ tr75: case 44: goto tr4 case 92: - goto st96 + goto st95 } switch { case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st520 + goto st531 } case ( m.data)[( m.p)] >= 9: goto tr1 } goto st1 - st670: + st695: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof670 + goto _test_eof695 } - st_case_670: + st_case_695: switch ( m.data)[( m.p)] { case 10: - goto tr715 + goto tr734 case 11: - goto tr798 + goto tr818 case 13: - goto tr717 + goto tr736 case 32: - goto tr622 + goto tr641 case 44: - goto tr799 + goto tr819 case 46: - goto st669 + goto st694 case 69: - goto st221 + goto st232 case 92: - goto st96 + goto st95 case 101: - goto st221 + goto st232 case 105: - goto st672 + goto st697 } switch { case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st671 + goto st696 } case ( m.data)[( m.p)] >= 9: - goto tr622 + goto tr641 } goto st1 - st671: + st696: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof671 + goto _test_eof696 } - st_case_671: + st_case_696: switch ( m.data)[( m.p)] { case 10: - goto tr715 + goto tr734 case 11: - goto tr798 + goto tr818 case 13: - goto tr717 + goto tr736 case 32: - goto tr622 + goto tr641 case 44: - goto tr799 + goto tr819 case 46: - goto st669 + goto st694 case 69: - goto st221 + goto st232 case 92: - goto st96 + goto st95 case 101: - goto st221 + goto st232 } switch { case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st671 + goto st696 } case ( m.data)[( m.p)] >= 9: - goto tr622 + goto tr641 } goto st1 - st672: + st697: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof672 + goto _test_eof697 } - st_case_672: + st_case_697: switch ( m.data)[( m.p)] { case 10: - goto tr925 + goto tr952 case 11: - goto tr981 + goto tr1013 case 13: - goto tr927 + goto tr954 case 32: - goto tr804 + goto tr825 case 44: - goto tr982 + goto tr1014 case 92: - goto st96 + goto st95 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr804 + goto tr825 } goto st1 - st673: + st698: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof673 + goto _test_eof698 } - st_case_673: + st_case_698: switch ( m.data)[( m.p)] { case 10: - goto tr715 + goto tr734 case 11: - goto tr798 + goto tr818 case 13: - goto tr717 + goto tr736 case 32: - goto tr622 + goto tr641 case 44: - goto tr799 + goto tr819 case 46: - goto st669 + goto st694 case 69: - goto st221 + goto st232 case 92: - goto st96 + goto st95 case 101: - goto st221 + goto st232 case 105: - goto st672 + goto st697 } switch { case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st673 + goto st698 } case ( m.data)[( m.p)] >= 9: - goto tr622 + goto tr641 } goto st1 tr76: -//line plugins/parsers/influx/machine.go.rl:19 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p - goto st674 - st674: + goto st699 + st699: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof674 + goto _test_eof699 } - st_case_674: -//line plugins/parsers/influx/machine.go:27340 + st_case_699: +//line plugins/parsers/influx/machine.go:28375 switch ( m.data)[( m.p)] { case 10: - goto tr715 + goto tr734 case 11: - goto tr798 + goto tr818 case 13: - goto tr717 + goto tr736 case 32: - goto tr622 + goto tr641 case 44: - goto tr799 + goto tr819 case 46: - goto st669 + goto st694 case 69: - goto st221 + goto st232 case 92: - goto st96 + goto st95 case 101: - goto st221 + goto st232 case 105: - goto st672 + goto st697 case 117: - goto st675 + goto st700 } switch { case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st671 + goto st696 } case ( m.data)[( m.p)] >= 9: - goto tr622 + goto tr641 } goto st1 - st675: + st700: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof675 + goto _test_eof700 } - st_case_675: + st_case_700: switch ( m.data)[( m.p)] { case 10: - goto tr930 + goto tr957 case 11: - goto tr984 + goto tr1016 case 13: - goto tr932 + goto tr959 case 32: - goto tr809 + goto tr831 case 44: - goto tr985 + goto tr1017 case 92: - goto st96 + goto st95 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr809 + goto tr831 } goto st1 tr77: -//line plugins/parsers/influx/machine.go.rl:19 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p - goto st676 - st676: + goto st701 + st701: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof676 + goto _test_eof701 } - st_case_676: -//line plugins/parsers/influx/machine.go:27408 + st_case_701: +//line plugins/parsers/influx/machine.go:28443 switch ( m.data)[( m.p)] { case 10: - goto tr715 + goto tr734 case 11: - goto tr798 + goto tr818 case 13: - goto tr717 + goto tr736 case 32: - goto tr622 + goto tr641 case 44: - goto tr799 + goto tr819 case 46: - goto st669 + goto st694 case 69: - goto st221 + goto st232 case 92: - goto st96 + goto st95 case 101: - goto st221 + goto st232 case 105: - goto st672 + goto st697 case 117: - goto st675 + goto st700 } switch { case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st676 + goto st701 } case ( m.data)[( m.p)] >= 9: - goto tr622 + goto tr641 } goto st1 tr78: -//line plugins/parsers/influx/machine.go.rl:19 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p - goto st677 - st677: + goto st702 + st702: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof677 + goto _test_eof702 } - st_case_677: -//line plugins/parsers/influx/machine.go:27453 + st_case_702: +//line plugins/parsers/influx/machine.go:28488 switch ( m.data)[( m.p)] { case 10: - goto tr935 + goto tr962 case 11: - goto tr987 + goto tr1019 case 13: - goto tr937 + goto tr964 case 32: - goto tr814 + goto tr836 case 44: - goto tr988 + goto tr1020 case 65: - goto st223 + goto st234 case 92: - goto st96 + goto st95 case 97: - goto st226 + goto st237 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr814 + goto tr836 } goto st1 - st223: + st234: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof223 + goto _test_eof234 } - st_case_223: + st_case_234: switch ( m.data)[( m.p)] { case 10: goto tr47 @@ -27489,19 +28524,19 @@ tr78: case 44: goto tr4 case 76: - goto st224 + goto st235 case 92: - goto st96 + goto st95 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { goto tr1 } goto st1 - st224: + st235: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof224 + goto _test_eof235 } - st_case_224: + st_case_235: switch ( m.data)[( m.p)] { case 10: goto tr47 @@ -27514,19 +28549,19 @@ tr78: case 44: goto tr4 case 83: - goto st225 + goto st236 case 92: - goto st96 + goto st95 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { goto tr1 } goto st1 - st225: + st236: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof225 + goto _test_eof236 } - st_case_225: + st_case_236: switch ( m.data)[( m.p)] { case 10: goto tr47 @@ -27539,42 +28574,42 @@ tr78: case 44: goto tr4 case 69: - goto st678 + goto st703 case 92: - goto st96 + goto st95 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { goto tr1 } goto st1 - st678: + st703: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof678 + goto _test_eof703 } - st_case_678: + st_case_703: switch ( m.data)[( m.p)] { case 10: - goto tr935 + goto tr962 case 11: - goto tr987 + goto tr1019 case 13: - goto tr937 + goto tr964 case 32: - goto tr814 + goto tr836 case 44: - goto tr988 + goto tr1020 case 92: - goto st96 + goto st95 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr814 + goto tr836 } goto st1 - st226: + st237: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof226 + goto _test_eof237 } - st_case_226: + st_case_237: switch ( m.data)[( m.p)] { case 10: goto tr47 @@ -27587,19 +28622,19 @@ tr78: case 44: goto tr4 case 92: - goto st96 + goto st95 case 108: - goto st227 + goto st238 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { goto tr1 } goto st1 - st227: + st238: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof227 + goto _test_eof238 } - st_case_227: + st_case_238: switch ( m.data)[( m.p)] { case 10: goto tr47 @@ -27612,19 +28647,19 @@ tr78: case 44: goto tr4 case 92: - goto st96 + goto st95 case 115: - goto st228 + goto st239 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { goto tr1 } goto st1 - st228: + st239: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof228 + goto _test_eof239 } - st_case_228: + st_case_239: switch ( m.data)[( m.p)] { case 10: goto tr47 @@ -27637,53 +28672,53 @@ tr78: case 44: goto tr4 case 92: - goto st96 + goto st95 case 101: - goto st678 + goto st703 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { goto tr1 } goto st1 tr79: -//line plugins/parsers/influx/machine.go.rl:19 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p - goto st679 - st679: + goto st704 + st704: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof679 + goto _test_eof704 } - st_case_679: -//line plugins/parsers/influx/machine.go:27660 + st_case_704: +//line plugins/parsers/influx/machine.go:28695 switch ( m.data)[( m.p)] { case 10: - goto tr935 + goto tr962 case 11: - goto tr987 + goto tr1019 case 13: - goto tr937 + goto tr964 case 32: - goto tr814 + goto tr836 case 44: - goto tr988 + goto tr1020 case 82: - goto st229 + goto st240 case 92: - goto st96 + goto st95 case 114: - goto st230 + goto st241 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr814 + goto tr836 } goto st1 - st229: + st240: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof229 + goto _test_eof240 } - st_case_229: + st_case_240: switch ( m.data)[( m.p)] { case 10: goto tr47 @@ -27696,19 +28731,19 @@ tr79: case 44: goto tr4 case 85: - goto st225 + goto st236 case 92: - goto st96 + goto st95 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { goto tr1 } goto st1 - st230: + st241: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof230 + goto _test_eof241 } - st_case_230: + st_case_241: switch ( m.data)[( m.p)] { case 10: goto tr47 @@ -27721,120 +28756,120 @@ tr79: case 44: goto tr4 case 92: - goto st96 + goto st95 case 117: - goto st228 + goto st239 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { goto tr1 } goto st1 tr80: -//line plugins/parsers/influx/machine.go.rl:19 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p - goto st680 - st680: + goto st705 + st705: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof680 + goto _test_eof705 } - st_case_680: -//line plugins/parsers/influx/machine.go:27744 + st_case_705: +//line plugins/parsers/influx/machine.go:28779 switch ( m.data)[( m.p)] { case 10: - goto tr935 + goto tr962 case 11: - goto tr987 + goto tr1019 case 13: - goto tr937 + goto tr964 case 32: - goto tr814 + goto tr836 case 44: - goto tr988 + goto tr1020 case 92: - goto st96 + goto st95 case 97: - goto st226 + goto st237 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr814 + goto tr836 } goto st1 tr81: -//line plugins/parsers/influx/machine.go.rl:19 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p - goto st681 - st681: + goto st706 + st706: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof681 + goto _test_eof706 } - st_case_681: -//line plugins/parsers/influx/machine.go:27776 + st_case_706: +//line plugins/parsers/influx/machine.go:28811 switch ( m.data)[( m.p)] { case 10: - goto tr935 + goto tr962 case 11: - goto tr987 + goto tr1019 case 13: - goto tr937 + goto tr964 case 32: - goto tr814 + goto tr836 case 44: - goto tr988 + goto tr1020 case 92: - goto st96 + goto st95 case 114: - goto st230 + goto st241 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr814 + goto tr836 } goto st1 tr44: -//line plugins/parsers/influx/machine.go.rl:19 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p - goto st231 -tr405: - ( m.cs) = 231 -//line plugins/parsers/influx/machine.go.rl:19 + goto st242 +tr424: + ( m.cs) = 242 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p -//line plugins/parsers/influx/machine.go.rl:77 +//line plugins/parsers/influx/machine.go.rl:78 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again - st231: + st242: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof231 + goto _test_eof242 } - st_case_231: -//line plugins/parsers/influx/machine.go:27825 + st_case_242: +//line plugins/parsers/influx/machine.go:28860 switch ( m.data)[( m.p)] { case 10: - goto tr404 + goto tr423 case 11: - goto tr405 + goto tr424 case 13: - goto tr404 + goto tr423 case 32: goto tr38 case 44: goto tr4 case 61: - goto tr406 + goto tr425 case 92: goto tr45 } @@ -27843,35 +28878,35 @@ tr405: } goto tr41 tr40: - ( m.cs) = 232 -//line plugins/parsers/influx/machine.go.rl:19 + ( m.cs) = 243 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p -//line plugins/parsers/influx/machine.go.rl:77 +//line plugins/parsers/influx/machine.go.rl:78 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again - st232: + st243: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof232 + goto _test_eof243 } - st_case_232: -//line plugins/parsers/influx/machine.go:27868 + st_case_243: +//line plugins/parsers/influx/machine.go:28903 switch ( m.data)[( m.p)] { case 10: - goto tr404 + goto tr423 case 11: - goto tr405 + goto tr424 case 13: - goto tr404 + goto tr423 case 32: goto tr38 case 44: @@ -27885,738 +28920,41 @@ tr40: goto tr38 } goto tr41 -tr445: -//line plugins/parsers/influx/machine.go.rl:19 +tr464: +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p - goto st233 - st233: + goto st244 + st244: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof233 + goto _test_eof244 } - st_case_233: -//line plugins/parsers/influx/machine.go:27900 + st_case_244: +//line plugins/parsers/influx/machine.go:28935 if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st682 - } - goto tr407 -tr446: -//line plugins/parsers/influx/machine.go.rl:19 - - m.pb = m.p - - goto st682 - st682: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof682 - } - st_case_682: -//line plugins/parsers/influx/machine.go:27916 - switch ( m.data)[( m.p)] { - case 10: - goto tr451 - case 13: - goto tr453 - case 32: - goto tr450 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st683 - } - case ( m.data)[( m.p)] >= 9: - goto tr450 - } - goto tr407 - st683: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof683 - } - st_case_683: - switch ( m.data)[( m.p)] { - case 10: - goto tr451 - case 13: - goto tr453 - case 32: - goto tr450 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st684 - } - case ( m.data)[( m.p)] >= 9: - goto tr450 - } - goto tr407 - st684: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof684 - } - st_case_684: - switch ( m.data)[( m.p)] { - case 10: - goto tr451 - case 13: - goto tr453 - case 32: - goto tr450 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st685 - } - case ( m.data)[( m.p)] >= 9: - goto tr450 - } - goto tr407 - st685: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof685 - } - st_case_685: - switch ( m.data)[( m.p)] { - case 10: - goto tr451 - case 13: - goto tr453 - case 32: - goto tr450 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st686 - } - case ( m.data)[( m.p)] >= 9: - goto tr450 - } - goto tr407 - st686: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof686 - } - st_case_686: - switch ( m.data)[( m.p)] { - case 10: - goto tr451 - case 13: - goto tr453 - case 32: - goto tr450 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st687 - } - case ( m.data)[( m.p)] >= 9: - goto tr450 - } - goto tr407 - st687: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof687 - } - st_case_687: - switch ( m.data)[( m.p)] { - case 10: - goto tr451 - case 13: - goto tr453 - case 32: - goto tr450 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st688 - } - case ( m.data)[( m.p)] >= 9: - goto tr450 - } - goto tr407 - st688: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof688 - } - st_case_688: - switch ( m.data)[( m.p)] { - case 10: - goto tr451 - case 13: - goto tr453 - case 32: - goto tr450 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st689 - } - case ( m.data)[( m.p)] >= 9: - goto tr450 - } - goto tr407 - st689: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof689 - } - st_case_689: - switch ( m.data)[( m.p)] { - case 10: - goto tr451 - case 13: - goto tr453 - case 32: - goto tr450 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st690 - } - case ( m.data)[( m.p)] >= 9: - goto tr450 - } - goto tr407 - st690: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof690 - } - st_case_690: - switch ( m.data)[( m.p)] { - case 10: - goto tr451 - case 13: - goto tr453 - case 32: - goto tr450 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st691 - } - case ( m.data)[( m.p)] >= 9: - goto tr450 - } - goto tr407 - st691: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof691 - } - st_case_691: - switch ( m.data)[( m.p)] { - case 10: - goto tr451 - case 13: - goto tr453 - case 32: - goto tr450 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st692 - } - case ( m.data)[( m.p)] >= 9: - goto tr450 - } - goto tr407 - st692: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof692 - } - st_case_692: - switch ( m.data)[( m.p)] { - case 10: - goto tr451 - case 13: - goto tr453 - case 32: - goto tr450 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st693 - } - case ( m.data)[( m.p)] >= 9: - goto tr450 - } - goto tr407 - st693: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof693 - } - st_case_693: - switch ( m.data)[( m.p)] { - case 10: - goto tr451 - case 13: - goto tr453 - case 32: - goto tr450 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st694 - } - case ( m.data)[( m.p)] >= 9: - goto tr450 - } - goto tr407 - st694: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof694 - } - st_case_694: - switch ( m.data)[( m.p)] { - case 10: - goto tr451 - case 13: - goto tr453 - case 32: - goto tr450 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st695 - } - case ( m.data)[( m.p)] >= 9: - goto tr450 - } - goto tr407 - st695: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof695 - } - st_case_695: - switch ( m.data)[( m.p)] { - case 10: - goto tr451 - case 13: - goto tr453 - case 32: - goto tr450 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st696 - } - case ( m.data)[( m.p)] >= 9: - goto tr450 - } - goto tr407 - st696: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof696 - } - st_case_696: - switch ( m.data)[( m.p)] { - case 10: - goto tr451 - case 13: - goto tr453 - case 32: - goto tr450 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st697 - } - case ( m.data)[( m.p)] >= 9: - goto tr450 - } - goto tr407 - st697: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof697 - } - st_case_697: - switch ( m.data)[( m.p)] { - case 10: - goto tr451 - case 13: - goto tr453 - case 32: - goto tr450 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st698 - } - case ( m.data)[( m.p)] >= 9: - goto tr450 - } - goto tr407 - st698: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof698 - } - st_case_698: - switch ( m.data)[( m.p)] { - case 10: - goto tr451 - case 13: - goto tr453 - case 32: - goto tr450 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st699 - } - case ( m.data)[( m.p)] >= 9: - goto tr450 - } - goto tr407 - st699: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof699 - } - st_case_699: - switch ( m.data)[( m.p)] { - case 10: - goto tr451 - case 13: - goto tr453 - case 32: - goto tr450 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st700 - } - case ( m.data)[( m.p)] >= 9: - goto tr450 - } - goto tr407 - st700: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof700 - } - st_case_700: - switch ( m.data)[( m.p)] { - case 10: - goto tr451 - case 13: - goto tr453 - case 32: - goto tr450 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr450 - } - goto tr407 -tr15: -//line plugins/parsers/influx/machine.go.rl:19 - - m.pb = m.p - - goto st234 - st234: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof234 - } - st_case_234: -//line plugins/parsers/influx/machine.go:28336 - switch ( m.data)[( m.p)] { - case 46: - goto st235 - case 48: - goto st702 - } - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st705 - } - goto tr8 -tr16: -//line plugins/parsers/influx/machine.go.rl:19 - - m.pb = m.p - - goto st235 - st235: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof235 - } - st_case_235: -//line plugins/parsers/influx/machine.go:28358 - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st701 - } - goto tr8 - st701: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof701 - } - st_case_701: - switch ( m.data)[( m.p)] { - case 10: - goto tr715 - case 13: - goto tr717 - case 32: - goto tr516 - case 44: - goto tr907 - case 69: - goto st236 - case 101: - goto st236 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st701 - } - case ( m.data)[( m.p)] >= 9: - goto tr516 - } - goto tr105 - st236: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof236 - } - st_case_236: - switch ( m.data)[( m.p)] { - case 34: - goto st237 - case 43: - goto st237 - case 45: - goto st237 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st611 - } - goto tr8 - st237: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof237 - } - st_case_237: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st611 - } - goto tr8 - st702: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof702 - } - st_case_702: - switch ( m.data)[( m.p)] { - case 10: - goto tr715 - case 13: - goto tr717 - case 32: - goto tr516 - case 44: - goto tr907 - case 46: - goto st701 - case 69: - goto st236 - case 101: - goto st236 - case 105: - goto st704 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st703 - } - case ( m.data)[( m.p)] >= 9: - goto tr516 - } - goto tr105 - st703: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof703 - } - st_case_703: - switch ( m.data)[( m.p)] { - case 10: - goto tr715 - case 13: - goto tr717 - case 32: - goto tr516 - case 44: - goto tr907 - case 46: - goto st701 - case 69: - goto st236 - case 101: - goto st236 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st703 - } - case ( m.data)[( m.p)] >= 9: - goto tr516 - } - goto tr105 - st704: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof704 - } - st_case_704: - switch ( m.data)[( m.p)] { - case 10: - goto tr925 - case 13: - goto tr927 - case 32: - goto tr909 - case 44: - goto tr1014 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr909 - } - goto tr105 - st705: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof705 - } - st_case_705: - switch ( m.data)[( m.p)] { - case 10: - goto tr715 - case 13: - goto tr717 - case 32: - goto tr516 - case 44: - goto tr907 - case 46: - goto st701 - case 69: - goto st236 - case 101: - goto st236 - case 105: - goto st704 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st705 - } - case ( m.data)[( m.p)] >= 9: - goto tr516 - } - goto tr105 -tr17: -//line plugins/parsers/influx/machine.go.rl:19 - - m.pb = m.p - - goto st706 - st706: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof706 - } - st_case_706: -//line plugins/parsers/influx/machine.go:28541 - switch ( m.data)[( m.p)] { - case 10: - goto tr715 - case 13: - goto tr717 - case 32: - goto tr516 - case 44: - goto tr907 - case 46: - goto st701 - case 69: - goto st236 - case 101: - goto st236 - case 105: - goto st704 - case 117: goto st707 } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st703 - } - case ( m.data)[( m.p)] >= 9: - goto tr516 - } - goto tr105 + goto tr426 +tr465: +//line plugins/parsers/influx/machine.go.rl:20 + + m.pb = m.p + + goto st707 st707: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof707 } st_case_707: +//line plugins/parsers/influx/machine.go:28951 switch ( m.data)[( m.p)] { case 10: - goto tr930 + goto tr470 case 13: - goto tr932 + goto tr472 case 32: - goto tr912 - case 44: - goto tr1016 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr912 - } - goto tr105 -tr18: -//line plugins/parsers/influx/machine.go.rl:19 - - m.pb = m.p - - goto st708 - st708: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof708 - } - st_case_708: -//line plugins/parsers/influx/machine.go:28601 - switch ( m.data)[( m.p)] { - case 10: - goto tr715 - case 13: - goto tr717 - case 32: - goto tr516 - case 44: - goto tr907 - case 46: - goto st701 - case 69: - goto st236 - case 101: - goto st236 - case 105: - goto st704 - case 117: - goto st707 + goto tr469 } switch { case ( m.data)[( m.p)] > 12: @@ -28624,66 +28962,53 @@ tr18: goto st708 } case ( m.data)[( m.p)] >= 9: - goto tr516 + goto tr469 } - goto tr105 -tr19: -//line plugins/parsers/influx/machine.go.rl:19 - - m.pb = m.p - - goto st709 + goto tr426 + st708: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof708 + } + st_case_708: + switch ( m.data)[( m.p)] { + case 10: + goto tr470 + case 13: + goto tr472 + case 32: + goto tr469 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st709 + } + case ( m.data)[( m.p)] >= 9: + goto tr469 + } + goto tr426 st709: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof709 } st_case_709: -//line plugins/parsers/influx/machine.go:28642 switch ( m.data)[( m.p)] { case 10: - goto tr935 + goto tr470 case 13: - goto tr937 + goto tr472 case 32: - goto tr916 - case 44: - goto tr1018 - case 65: - goto st238 - case 97: - goto st241 + goto tr469 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr916 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st710 + } + case ( m.data)[( m.p)] >= 9: + goto tr469 } - goto tr105 - st238: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof238 - } - st_case_238: - if ( m.data)[( m.p)] == 76 { - goto st239 - } - goto tr8 - st239: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof239 - } - st_case_239: - if ( m.data)[( m.p)] == 83 { - goto st240 - } - goto tr8 - st240: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof240 - } - st_case_240: - if ( m.data)[( m.p)] == 69 { - goto st710 - } - goto tr8 + goto tr426 st710: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof710 @@ -28691,151 +29016,370 @@ tr19: st_case_710: switch ( m.data)[( m.p)] { case 10: - goto tr935 + goto tr470 case 13: - goto tr937 + goto tr472 case 32: - goto tr916 - case 44: - goto tr1018 + goto tr469 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr916 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st711 + } + case ( m.data)[( m.p)] >= 9: + goto tr469 } - goto tr105 - st241: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof241 - } - st_case_241: - if ( m.data)[( m.p)] == 108 { - goto st242 - } - goto tr8 - st242: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof242 - } - st_case_242: - if ( m.data)[( m.p)] == 115 { - goto st243 - } - goto tr8 - st243: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof243 - } - st_case_243: - if ( m.data)[( m.p)] == 101 { - goto st710 - } - goto tr8 -tr20: -//line plugins/parsers/influx/machine.go.rl:19 - - m.pb = m.p - - goto st711 + goto tr426 st711: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof711 } st_case_711: -//line plugins/parsers/influx/machine.go:28745 switch ( m.data)[( m.p)] { case 10: - goto tr935 + goto tr470 case 13: - goto tr937 + goto tr472 case 32: - goto tr916 - case 44: - goto tr1018 - case 82: - goto st244 - case 114: - goto st245 + goto tr469 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr916 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st712 + } + case ( m.data)[( m.p)] >= 9: + goto tr469 } - goto tr105 - st244: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof244 - } - st_case_244: - if ( m.data)[( m.p)] == 85 { - goto st240 - } - goto tr8 - st245: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof245 - } - st_case_245: - if ( m.data)[( m.p)] == 117 { - goto st243 - } - goto tr8 -tr21: -//line plugins/parsers/influx/machine.go.rl:19 - - m.pb = m.p - - goto st712 + goto tr426 st712: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof712 } st_case_712: -//line plugins/parsers/influx/machine.go:28793 switch ( m.data)[( m.p)] { case 10: - goto tr935 + goto tr470 case 13: - goto tr937 + goto tr472 case 32: - goto tr916 - case 44: - goto tr1018 - case 97: - goto st241 + goto tr469 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr916 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st713 + } + case ( m.data)[( m.p)] >= 9: + goto tr469 } - goto tr105 -tr22: -//line plugins/parsers/influx/machine.go.rl:19 - - m.pb = m.p - - goto st713 + goto tr426 st713: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof713 } st_case_713: -//line plugins/parsers/influx/machine.go:28821 switch ( m.data)[( m.p)] { case 10: - goto tr935 + goto tr470 case 13: - goto tr937 + goto tr472 case 32: - goto tr916 - case 44: - goto tr1018 - case 114: - goto st245 + goto tr469 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st714 + } + case ( m.data)[( m.p)] >= 9: + goto tr469 + } + goto tr426 + st714: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof714 + } + st_case_714: + switch ( m.data)[( m.p)] { + case 10: + goto tr470 + case 13: + goto tr472 + case 32: + goto tr469 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st715 + } + case ( m.data)[( m.p)] >= 9: + goto tr469 + } + goto tr426 + st715: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof715 + } + st_case_715: + switch ( m.data)[( m.p)] { + case 10: + goto tr470 + case 13: + goto tr472 + case 32: + goto tr469 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st716 + } + case ( m.data)[( m.p)] >= 9: + goto tr469 + } + goto tr426 + st716: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof716 + } + st_case_716: + switch ( m.data)[( m.p)] { + case 10: + goto tr470 + case 13: + goto tr472 + case 32: + goto tr469 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st717 + } + case ( m.data)[( m.p)] >= 9: + goto tr469 + } + goto tr426 + st717: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof717 + } + st_case_717: + switch ( m.data)[( m.p)] { + case 10: + goto tr470 + case 13: + goto tr472 + case 32: + goto tr469 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st718 + } + case ( m.data)[( m.p)] >= 9: + goto tr469 + } + goto tr426 + st718: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof718 + } + st_case_718: + switch ( m.data)[( m.p)] { + case 10: + goto tr470 + case 13: + goto tr472 + case 32: + goto tr469 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st719 + } + case ( m.data)[( m.p)] >= 9: + goto tr469 + } + goto tr426 + st719: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof719 + } + st_case_719: + switch ( m.data)[( m.p)] { + case 10: + goto tr470 + case 13: + goto tr472 + case 32: + goto tr469 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st720 + } + case ( m.data)[( m.p)] >= 9: + goto tr469 + } + goto tr426 + st720: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof720 + } + st_case_720: + switch ( m.data)[( m.p)] { + case 10: + goto tr470 + case 13: + goto tr472 + case 32: + goto tr469 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st721 + } + case ( m.data)[( m.p)] >= 9: + goto tr469 + } + goto tr426 + st721: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof721 + } + st_case_721: + switch ( m.data)[( m.p)] { + case 10: + goto tr470 + case 13: + goto tr472 + case 32: + goto tr469 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st722 + } + case ( m.data)[( m.p)] >= 9: + goto tr469 + } + goto tr426 + st722: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof722 + } + st_case_722: + switch ( m.data)[( m.p)] { + case 10: + goto tr470 + case 13: + goto tr472 + case 32: + goto tr469 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st723 + } + case ( m.data)[( m.p)] >= 9: + goto tr469 + } + goto tr426 + st723: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof723 + } + st_case_723: + switch ( m.data)[( m.p)] { + case 10: + goto tr470 + case 13: + goto tr472 + case 32: + goto tr469 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st724 + } + case ( m.data)[( m.p)] >= 9: + goto tr469 + } + goto tr426 + st724: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof724 + } + st_case_724: + switch ( m.data)[( m.p)] { + case 10: + goto tr470 + case 13: + goto tr472 + case 32: + goto tr469 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st725 + } + case ( m.data)[( m.p)] >= 9: + goto tr469 + } + goto tr426 + st725: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof725 + } + st_case_725: + switch ( m.data)[( m.p)] { + case 10: + goto tr470 + case 13: + goto tr472 + case 32: + goto tr469 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr916 + goto tr469 } - goto tr105 -tr9: -//line plugins/parsers/influx/machine.go.rl:19 + goto tr426 +tr15: +//line plugins/parsers/influx/machine.go.rl:20 + + m.pb = m.p + + goto st245 + st245: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof245 + } + st_case_245: +//line plugins/parsers/influx/machine.go:29371 + switch ( m.data)[( m.p)] { + case 46: + goto st246 + case 48: + goto st727 + } + if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st730 + } + goto tr8 +tr16: +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p @@ -28845,7 +29389,498 @@ tr9: goto _test_eof246 } st_case_246: -//line plugins/parsers/influx/machine.go:28849 +//line plugins/parsers/influx/machine.go:29393 + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st726 + } + goto tr8 + st726: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof726 + } + st_case_726: + switch ( m.data)[( m.p)] { + case 10: + goto tr734 + case 13: + goto tr736 + case 32: + goto tr535 + case 44: + goto tr930 + case 69: + goto st247 + case 101: + goto st247 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st726 + } + case ( m.data)[( m.p)] >= 9: + goto tr535 + } + goto tr105 + st247: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof247 + } + st_case_247: + switch ( m.data)[( m.p)] { + case 34: + goto st248 + case 43: + goto st248 + case 45: + goto st248 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st622 + } + goto tr8 + st248: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof248 + } + st_case_248: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st622 + } + goto tr8 + st727: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof727 + } + st_case_727: + switch ( m.data)[( m.p)] { + case 10: + goto tr734 + case 13: + goto tr736 + case 32: + goto tr535 + case 44: + goto tr930 + case 46: + goto st726 + case 69: + goto st247 + case 101: + goto st247 + case 105: + goto st729 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st728 + } + case ( m.data)[( m.p)] >= 9: + goto tr535 + } + goto tr105 + st728: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof728 + } + st_case_728: + switch ( m.data)[( m.p)] { + case 10: + goto tr734 + case 13: + goto tr736 + case 32: + goto tr535 + case 44: + goto tr930 + case 46: + goto st726 + case 69: + goto st247 + case 101: + goto st247 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st728 + } + case ( m.data)[( m.p)] >= 9: + goto tr535 + } + goto tr105 + st729: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof729 + } + st_case_729: + switch ( m.data)[( m.p)] { + case 10: + goto tr952 + case 13: + goto tr954 + case 32: + goto tr932 + case 44: + goto tr1046 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr932 + } + goto tr105 + st730: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof730 + } + st_case_730: + switch ( m.data)[( m.p)] { + case 10: + goto tr734 + case 13: + goto tr736 + case 32: + goto tr535 + case 44: + goto tr930 + case 46: + goto st726 + case 69: + goto st247 + case 101: + goto st247 + case 105: + goto st729 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st730 + } + case ( m.data)[( m.p)] >= 9: + goto tr535 + } + goto tr105 +tr17: +//line plugins/parsers/influx/machine.go.rl:20 + + m.pb = m.p + + goto st731 + st731: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof731 + } + st_case_731: +//line plugins/parsers/influx/machine.go:29576 + switch ( m.data)[( m.p)] { + case 10: + goto tr734 + case 13: + goto tr736 + case 32: + goto tr535 + case 44: + goto tr930 + case 46: + goto st726 + case 69: + goto st247 + case 101: + goto st247 + case 105: + goto st729 + case 117: + goto st732 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st728 + } + case ( m.data)[( m.p)] >= 9: + goto tr535 + } + goto tr105 + st732: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof732 + } + st_case_732: + switch ( m.data)[( m.p)] { + case 10: + goto tr957 + case 13: + goto tr959 + case 32: + goto tr935 + case 44: + goto tr1048 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr935 + } + goto tr105 +tr18: +//line plugins/parsers/influx/machine.go.rl:20 + + m.pb = m.p + + goto st733 + st733: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof733 + } + st_case_733: +//line plugins/parsers/influx/machine.go:29636 + switch ( m.data)[( m.p)] { + case 10: + goto tr734 + case 13: + goto tr736 + case 32: + goto tr535 + case 44: + goto tr930 + case 46: + goto st726 + case 69: + goto st247 + case 101: + goto st247 + case 105: + goto st729 + case 117: + goto st732 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st733 + } + case ( m.data)[( m.p)] >= 9: + goto tr535 + } + goto tr105 +tr19: +//line plugins/parsers/influx/machine.go.rl:20 + + m.pb = m.p + + goto st734 + st734: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof734 + } + st_case_734: +//line plugins/parsers/influx/machine.go:29677 + switch ( m.data)[( m.p)] { + case 10: + goto tr962 + case 13: + goto tr964 + case 32: + goto tr939 + case 44: + goto tr1050 + case 65: + goto st249 + case 97: + goto st252 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr939 + } + goto tr105 + st249: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof249 + } + st_case_249: + if ( m.data)[( m.p)] == 76 { + goto st250 + } + goto tr8 + st250: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof250 + } + st_case_250: + if ( m.data)[( m.p)] == 83 { + goto st251 + } + goto tr8 + st251: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof251 + } + st_case_251: + if ( m.data)[( m.p)] == 69 { + goto st735 + } + goto tr8 + st735: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof735 + } + st_case_735: + switch ( m.data)[( m.p)] { + case 10: + goto tr962 + case 13: + goto tr964 + case 32: + goto tr939 + case 44: + goto tr1050 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr939 + } + goto tr105 + st252: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof252 + } + st_case_252: + if ( m.data)[( m.p)] == 108 { + goto st253 + } + goto tr8 + st253: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof253 + } + st_case_253: + if ( m.data)[( m.p)] == 115 { + goto st254 + } + goto tr8 + st254: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof254 + } + st_case_254: + if ( m.data)[( m.p)] == 101 { + goto st735 + } + goto tr8 +tr20: +//line plugins/parsers/influx/machine.go.rl:20 + + m.pb = m.p + + goto st736 + st736: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof736 + } + st_case_736: +//line plugins/parsers/influx/machine.go:29780 + switch ( m.data)[( m.p)] { + case 10: + goto tr962 + case 13: + goto tr964 + case 32: + goto tr939 + case 44: + goto tr1050 + case 82: + goto st255 + case 114: + goto st256 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr939 + } + goto tr105 + st255: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof255 + } + st_case_255: + if ( m.data)[( m.p)] == 85 { + goto st251 + } + goto tr8 + st256: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof256 + } + st_case_256: + if ( m.data)[( m.p)] == 117 { + goto st254 + } + goto tr8 +tr21: +//line plugins/parsers/influx/machine.go.rl:20 + + m.pb = m.p + + goto st737 + st737: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof737 + } + st_case_737: +//line plugins/parsers/influx/machine.go:29828 + switch ( m.data)[( m.p)] { + case 10: + goto tr962 + case 13: + goto tr964 + case 32: + goto tr939 + case 44: + goto tr1050 + case 97: + goto st252 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr939 + } + goto tr105 +tr22: +//line plugins/parsers/influx/machine.go.rl:20 + + m.pb = m.p + + goto st738 + st738: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof738 + } + st_case_738: +//line plugins/parsers/influx/machine.go:29856 + switch ( m.data)[( m.p)] { + case 10: + goto tr962 + case 13: + goto tr964 + case 32: + goto tr939 + case 44: + goto tr1050 + case 114: + goto st256 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr939 + } + goto tr105 +tr9: +//line plugins/parsers/influx/machine.go.rl:20 + + m.pb = m.p + + goto st257 + st257: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof257 + } + st_case_257: +//line plugins/parsers/influx/machine.go:29884 switch ( m.data)[( m.p)] { case 10: goto tr8 @@ -28866,39 +29901,39 @@ tr9: goto st2 } goto tr6 - st247: + st258: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof247 + goto _test_eof258 } - st_case_247: + st_case_258: if ( m.data)[( m.p)] == 10 { - goto tr421 + goto tr440 } - goto st247 -tr421: -//line plugins/parsers/influx/machine.go.rl:69 - - {goto st715 } - - goto st714 - st714: -//line plugins/parsers/influx/machine.go.rl:157 + goto st258 +tr440: +//line plugins/parsers/influx/machine.go.rl:158 m.lineno++ m.sol = m.p m.sol++ // next char will be the first column in the line +//line plugins/parsers/influx/machine.go.rl:70 + + {goto st740 } + + goto st739 + st739: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof714 + goto _test_eof739 } - st_case_714: -//line plugins/parsers/influx/machine.go:28896 + st_case_739: +//line plugins/parsers/influx/machine.go:29931 goto st0 - st250: + st261: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof250 + goto _test_eof261 } - st_case_250: + st_case_261: switch ( m.data)[( m.p)] { case 32: goto tr35 @@ -28907,7 +29942,7 @@ tr421: case 44: goto tr35 case 92: - goto tr425 + goto tr444 } switch { case ( m.data)[( m.p)] > 10: @@ -28917,152 +29952,167 @@ tr421: case ( m.data)[( m.p)] >= 9: goto tr35 } - goto tr424 -tr424: -//line plugins/parsers/influx/machine.go.rl:73 + goto tr443 +tr443: +//line plugins/parsers/influx/machine.go.rl:74 - foundMetric = true + m.beginMetric = true -//line plugins/parsers/influx/machine.go.rl:19 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p - goto st717 - st717: + goto st741 + st741: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof717 + goto _test_eof741 } - st_case_717: -//line plugins/parsers/influx/machine.go:28937 + st_case_741: +//line plugins/parsers/influx/machine.go:29972 switch ( m.data)[( m.p)] { case 9: goto tr2 case 10: - goto tr1026 + goto tr1058 case 12: goto tr2 case 13: - goto tr1027 + goto tr1059 case 32: goto tr2 case 44: - goto tr1028 + goto tr1060 case 92: - goto st258 + goto st269 } - goto st717 -tr1026: - ( m.cs) = 718 -//line plugins/parsers/influx/machine.go.rl:77 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - - goto _again -tr1030: - ( m.cs) = 718 -//line plugins/parsers/influx/machine.go.rl:90 - - err = m.handler.AddTag(key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; goto _out } - } - - goto _again - st718: -//line plugins/parsers/influx/machine.go.rl:157 + goto st741 +tr445: +//line plugins/parsers/influx/machine.go.rl:158 m.lineno++ m.sol = m.p m.sol++ // next char will be the first column in the line -//line plugins/parsers/influx/machine.go.rl:163 + goto st742 +tr1058: + ( m.cs) = 742 +//line plugins/parsers/influx/machine.go.rl:78 - ( m.cs) = 715; + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:158 + + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line + + goto _again +tr1062: + ( m.cs) = 742 +//line plugins/parsers/influx/machine.go.rl:91 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:158 + + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line + + goto _again + st742: +//line plugins/parsers/influx/machine.go.rl:164 + + m.finishMetric = true + ( m.cs) = 740; {( m.p)++; goto _out } if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof718 + goto _test_eof742 } - st_case_718: -//line plugins/parsers/influx/machine.go:28997 + st_case_742: +//line plugins/parsers/influx/machine.go:30047 goto st0 -tr1027: - ( m.cs) = 251 -//line plugins/parsers/influx/machine.go.rl:77 +tr1059: + ( m.cs) = 262 +//line plugins/parsers/influx/machine.go.rl:78 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again -tr1031: - ( m.cs) = 251 -//line plugins/parsers/influx/machine.go.rl:90 +tr1063: + ( m.cs) = 262 +//line plugins/parsers/influx/machine.go.rl:91 - err = m.handler.AddTag(key, m.text()) + err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again - st251: + st262: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof251 + goto _test_eof262 } - st_case_251: -//line plugins/parsers/influx/machine.go:29030 + st_case_262: +//line plugins/parsers/influx/machine.go:30080 if ( m.data)[( m.p)] == 10 { - goto st718 + goto tr445 } goto st0 -tr1028: - ( m.cs) = 252 -//line plugins/parsers/influx/machine.go.rl:77 +tr1060: + ( m.cs) = 263 +//line plugins/parsers/influx/machine.go.rl:78 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again -tr1032: - ( m.cs) = 252 -//line plugins/parsers/influx/machine.go.rl:90 +tr1064: + ( m.cs) = 263 +//line plugins/parsers/influx/machine.go.rl:91 - err = m.handler.AddTag(key, m.text()) + err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; goto _out } } goto _again - st252: + st263: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof252 + goto _test_eof263 } - st_case_252: -//line plugins/parsers/influx/machine.go:29066 + st_case_263: +//line plugins/parsers/influx/machine.go:30116 switch ( m.data)[( m.p)] { case 32: goto tr2 @@ -29071,7 +30121,7 @@ tr1032: case 61: goto tr2 case 92: - goto tr428 + goto tr447 } switch { case ( m.data)[( m.p)] > 10: @@ -29081,28 +30131,28 @@ tr1032: case ( m.data)[( m.p)] >= 9: goto tr2 } - goto tr427 -tr427: -//line plugins/parsers/influx/machine.go.rl:19 + goto tr446 +tr446: +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p - goto st253 - st253: + goto st264 + st264: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof253 + goto _test_eof264 } - st_case_253: -//line plugins/parsers/influx/machine.go:29097 + st_case_264: +//line plugins/parsers/influx/machine.go:30147 switch ( m.data)[( m.p)] { case 32: goto tr2 case 44: goto tr2 case 61: - goto tr430 + goto tr449 case 92: - goto st256 + goto st267 } switch { case ( m.data)[( m.p)] > 10: @@ -29112,19 +30162,19 @@ tr427: case ( m.data)[( m.p)] >= 9: goto tr2 } - goto st253 -tr430: -//line plugins/parsers/influx/machine.go.rl:86 + goto st264 +tr449: +//line plugins/parsers/influx/machine.go.rl:87 - key = m.text() + m.key = m.text() - goto st254 - st254: + goto st265 + st265: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof254 + goto _test_eof265 } - st_case_254: -//line plugins/parsers/influx/machine.go:29128 + st_case_265: +//line plugins/parsers/influx/machine.go:30178 switch ( m.data)[( m.p)] { case 32: goto tr2 @@ -29133,7 +30183,7 @@ tr430: case 61: goto tr2 case 92: - goto tr433 + goto tr452 } switch { case ( m.data)[( m.p)] > 10: @@ -29143,52 +30193,52 @@ tr430: case ( m.data)[( m.p)] >= 9: goto tr2 } - goto tr432 -tr432: -//line plugins/parsers/influx/machine.go.rl:19 + goto tr451 +tr451: +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p - goto st719 - st719: + goto st743 + st743: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof719 + goto _test_eof743 } - st_case_719: -//line plugins/parsers/influx/machine.go:29159 + st_case_743: +//line plugins/parsers/influx/machine.go:30209 switch ( m.data)[( m.p)] { case 9: goto tr2 case 10: - goto tr1030 + goto tr1062 case 12: goto tr2 case 13: - goto tr1031 + goto tr1063 case 32: goto tr2 case 44: - goto tr1032 + goto tr1064 case 61: goto tr2 case 92: - goto st255 + goto st266 } - goto st719 -tr433: -//line plugins/parsers/influx/machine.go.rl:19 + goto st743 +tr452: +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p - goto st255 - st255: + goto st266 + st266: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof255 + goto _test_eof266 } - st_case_255: -//line plugins/parsers/influx/machine.go:29190 + st_case_266: +//line plugins/parsers/influx/machine.go:30240 if ( m.data)[( m.p)] == 92 { - goto st720 + goto st744 } switch { case ( m.data)[( m.p)] > 10: @@ -29198,49 +30248,49 @@ tr433: case ( m.data)[( m.p)] >= 9: goto tr2 } - goto st719 - st720: -//line plugins/parsers/influx/machine.go.rl:234 + goto st743 + st744: +//line plugins/parsers/influx/machine.go.rl:240 ( m.p)-- if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof720 + goto _test_eof744 } - st_case_720: -//line plugins/parsers/influx/machine.go:29211 + st_case_744: +//line plugins/parsers/influx/machine.go:30261 switch ( m.data)[( m.p)] { case 9: goto tr2 case 10: - goto tr1030 + goto tr1062 case 12: goto tr2 case 13: - goto tr1031 + goto tr1063 case 32: goto tr2 case 44: - goto tr1032 + goto tr1064 case 61: goto tr2 case 92: - goto st255 + goto st266 } - goto st719 -tr428: -//line plugins/parsers/influx/machine.go.rl:19 + goto st743 +tr447: +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p - goto st256 - st256: + goto st267 + st267: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof256 + goto _test_eof267 } - st_case_256: -//line plugins/parsers/influx/machine.go:29242 + st_case_267: +//line plugins/parsers/influx/machine.go:30292 if ( m.data)[( m.p)] == 92 { - goto st257 + goto st268 } switch { case ( m.data)[( m.p)] > 10: @@ -29250,25 +30300,25 @@ tr428: case ( m.data)[( m.p)] >= 9: goto tr2 } - goto st253 - st257: -//line plugins/parsers/influx/machine.go.rl:234 + goto st264 + st268: +//line plugins/parsers/influx/machine.go.rl:240 ( m.p)-- if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof257 + goto _test_eof268 } - st_case_257: -//line plugins/parsers/influx/machine.go:29263 + st_case_268: +//line plugins/parsers/influx/machine.go:30313 switch ( m.data)[( m.p)] { case 32: goto tr2 case 44: goto tr2 case 61: - goto tr430 + goto tr449 case 92: - goto st256 + goto st267 } switch { case ( m.data)[( m.p)] > 10: @@ -29278,23 +30328,23 @@ tr428: case ( m.data)[( m.p)] >= 9: goto tr2 } - goto st253 -tr425: -//line plugins/parsers/influx/machine.go.rl:73 + goto st264 +tr444: +//line plugins/parsers/influx/machine.go.rl:74 - foundMetric = true + m.beginMetric = true -//line plugins/parsers/influx/machine.go.rl:19 +//line plugins/parsers/influx/machine.go.rl:20 m.pb = m.p - goto st258 - st258: + goto st269 + st269: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof258 + goto _test_eof269 } - st_case_258: -//line plugins/parsers/influx/machine.go:29298 + st_case_269: +//line plugins/parsers/influx/machine.go:30348 switch { case ( m.data)[( m.p)] > 10: if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { @@ -29303,72 +30353,55 @@ tr425: case ( m.data)[( m.p)] >= 9: goto st0 } - goto st717 - st715: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof715 - } - st_case_715: - switch ( m.data)[( m.p)] { - case 10: - goto st716 - case 13: - goto st248 - case 32: - goto st715 - case 35: - goto st249 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st715 - } - goto tr1023 - st716: -//line plugins/parsers/influx/machine.go.rl:157 + goto st741 +tr441: +//line plugins/parsers/influx/machine.go.rl:158 m.lineno++ m.sol = m.p m.sol++ // next char will be the first column in the line + goto st740 + st740: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof716 + goto _test_eof740 } - st_case_716: -//line plugins/parsers/influx/machine.go:29338 + st_case_740: +//line plugins/parsers/influx/machine.go:30371 switch ( m.data)[( m.p)] { case 10: - goto st716 + goto tr441 case 13: - goto st248 + goto st259 case 32: - goto st715 + goto st740 case 35: - goto st249 + goto st260 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st715 + goto st740 } - goto tr1023 - st248: + goto tr1055 + st259: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof248 + goto _test_eof259 } - st_case_248: + st_case_259: if ( m.data)[( m.p)] == 10 { - goto st716 + goto tr441 } goto st0 - st249: + st260: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof249 + goto _test_eof260 } - st_case_249: + st_case_260: if ( m.data)[( m.p)] == 10 { - goto st716 + goto tr441 } - goto st249 + goto st260 st_out: - _test_eof259: ( m.cs) = 259; goto _test_eof + _test_eof270: ( m.cs) = 270; goto _test_eof _test_eof1: ( m.cs) = 1; goto _test_eof _test_eof2: ( m.cs) = 2; goto _test_eof _test_eof3: ( m.cs) = 3; goto _test_eof @@ -29376,10 +30409,10 @@ tr425: _test_eof5: ( m.cs) = 5; goto _test_eof _test_eof6: ( m.cs) = 6; goto _test_eof _test_eof7: ( m.cs) = 7; goto _test_eof + _test_eof271: ( m.cs) = 271; goto _test_eof + _test_eof272: ( m.cs) = 272; goto _test_eof + _test_eof273: ( m.cs) = 273; goto _test_eof _test_eof8: ( m.cs) = 8; goto _test_eof - _test_eof260: ( m.cs) = 260; goto _test_eof - _test_eof261: ( m.cs) = 261; goto _test_eof - _test_eof262: ( m.cs) = 262; goto _test_eof _test_eof9: ( m.cs) = 9; goto _test_eof _test_eof10: ( m.cs) = 10; goto _test_eof _test_eof11: ( m.cs) = 11; goto _test_eof @@ -29404,26 +30437,14 @@ tr425: _test_eof30: ( m.cs) = 30; goto _test_eof _test_eof31: ( m.cs) = 31; goto _test_eof _test_eof32: ( m.cs) = 32; goto _test_eof - _test_eof33: ( m.cs) = 33; goto _test_eof - _test_eof263: ( m.cs) = 263; goto _test_eof - _test_eof264: ( m.cs) = 264; goto _test_eof - _test_eof34: ( m.cs) = 34; goto _test_eof - _test_eof35: ( m.cs) = 35; goto _test_eof - _test_eof265: ( m.cs) = 265; goto _test_eof - _test_eof266: ( m.cs) = 266; goto _test_eof - _test_eof267: ( m.cs) = 267; goto _test_eof - _test_eof36: ( m.cs) = 36; goto _test_eof - _test_eof268: ( m.cs) = 268; goto _test_eof - _test_eof269: ( m.cs) = 269; goto _test_eof - _test_eof270: ( m.cs) = 270; goto _test_eof - _test_eof271: ( m.cs) = 271; goto _test_eof - _test_eof272: ( m.cs) = 272; goto _test_eof - _test_eof273: ( m.cs) = 273; goto _test_eof _test_eof274: ( m.cs) = 274; goto _test_eof _test_eof275: ( m.cs) = 275; goto _test_eof + _test_eof33: ( m.cs) = 33; goto _test_eof + _test_eof34: ( m.cs) = 34; goto _test_eof _test_eof276: ( m.cs) = 276; goto _test_eof _test_eof277: ( m.cs) = 277; goto _test_eof _test_eof278: ( m.cs) = 278; goto _test_eof + _test_eof35: ( m.cs) = 35; goto _test_eof _test_eof279: ( m.cs) = 279; goto _test_eof _test_eof280: ( m.cs) = 280; goto _test_eof _test_eof281: ( m.cs) = 281; goto _test_eof @@ -29431,32 +30452,32 @@ tr425: _test_eof283: ( m.cs) = 283; goto _test_eof _test_eof284: ( m.cs) = 284; goto _test_eof _test_eof285: ( m.cs) = 285; goto _test_eof - _test_eof37: ( m.cs) = 37; goto _test_eof - _test_eof38: ( m.cs) = 38; goto _test_eof _test_eof286: ( m.cs) = 286; goto _test_eof _test_eof287: ( m.cs) = 287; goto _test_eof _test_eof288: ( m.cs) = 288; goto _test_eof - _test_eof39: ( m.cs) = 39; goto _test_eof - _test_eof40: ( m.cs) = 40; goto _test_eof - _test_eof41: ( m.cs) = 41; goto _test_eof - _test_eof42: ( m.cs) = 42; goto _test_eof - _test_eof43: ( m.cs) = 43; goto _test_eof _test_eof289: ( m.cs) = 289; goto _test_eof _test_eof290: ( m.cs) = 290; goto _test_eof _test_eof291: ( m.cs) = 291; goto _test_eof _test_eof292: ( m.cs) = 292; goto _test_eof - _test_eof44: ( m.cs) = 44; goto _test_eof _test_eof293: ( m.cs) = 293; goto _test_eof _test_eof294: ( m.cs) = 294; goto _test_eof _test_eof295: ( m.cs) = 295; goto _test_eof _test_eof296: ( m.cs) = 296; goto _test_eof + _test_eof36: ( m.cs) = 36; goto _test_eof + _test_eof37: ( m.cs) = 37; goto _test_eof _test_eof297: ( m.cs) = 297; goto _test_eof _test_eof298: ( m.cs) = 298; goto _test_eof _test_eof299: ( m.cs) = 299; goto _test_eof + _test_eof38: ( m.cs) = 38; goto _test_eof + _test_eof39: ( m.cs) = 39; goto _test_eof + _test_eof40: ( m.cs) = 40; goto _test_eof + _test_eof41: ( m.cs) = 41; goto _test_eof + _test_eof42: ( m.cs) = 42; goto _test_eof _test_eof300: ( m.cs) = 300; goto _test_eof _test_eof301: ( m.cs) = 301; goto _test_eof _test_eof302: ( m.cs) = 302; goto _test_eof _test_eof303: ( m.cs) = 303; goto _test_eof + _test_eof43: ( m.cs) = 43; goto _test_eof _test_eof304: ( m.cs) = 304; goto _test_eof _test_eof305: ( m.cs) = 305; goto _test_eof _test_eof306: ( m.cs) = 306; goto _test_eof @@ -29468,6 +30489,18 @@ tr425: _test_eof312: ( m.cs) = 312; goto _test_eof _test_eof313: ( m.cs) = 313; goto _test_eof _test_eof314: ( m.cs) = 314; goto _test_eof + _test_eof315: ( m.cs) = 315; goto _test_eof + _test_eof316: ( m.cs) = 316; goto _test_eof + _test_eof317: ( m.cs) = 317; goto _test_eof + _test_eof318: ( m.cs) = 318; goto _test_eof + _test_eof319: ( m.cs) = 319; goto _test_eof + _test_eof320: ( m.cs) = 320; goto _test_eof + _test_eof321: ( m.cs) = 321; goto _test_eof + _test_eof322: ( m.cs) = 322; goto _test_eof + _test_eof323: ( m.cs) = 323; goto _test_eof + _test_eof324: ( m.cs) = 324; goto _test_eof + _test_eof325: ( m.cs) = 325; goto _test_eof + _test_eof44: ( m.cs) = 44; goto _test_eof _test_eof45: ( m.cs) = 45; goto _test_eof _test_eof46: ( m.cs) = 46; goto _test_eof _test_eof47: ( m.cs) = 47; goto _test_eof @@ -29477,30 +30510,18 @@ tr425: _test_eof51: ( m.cs) = 51; goto _test_eof _test_eof52: ( m.cs) = 52; goto _test_eof _test_eof53: ( m.cs) = 53; goto _test_eof + _test_eof326: ( m.cs) = 326; goto _test_eof + _test_eof327: ( m.cs) = 327; goto _test_eof + _test_eof328: ( m.cs) = 328; goto _test_eof _test_eof54: ( m.cs) = 54; goto _test_eof - _test_eof315: ( m.cs) = 315; goto _test_eof - _test_eof316: ( m.cs) = 316; goto _test_eof - _test_eof317: ( m.cs) = 317; goto _test_eof _test_eof55: ( m.cs) = 55; goto _test_eof _test_eof56: ( m.cs) = 56; goto _test_eof _test_eof57: ( m.cs) = 57; goto _test_eof _test_eof58: ( m.cs) = 58; goto _test_eof _test_eof59: ( m.cs) = 59; goto _test_eof - _test_eof60: ( m.cs) = 60; goto _test_eof - _test_eof318: ( m.cs) = 318; goto _test_eof - _test_eof319: ( m.cs) = 319; goto _test_eof - _test_eof61: ( m.cs) = 61; goto _test_eof - _test_eof320: ( m.cs) = 320; goto _test_eof - _test_eof321: ( m.cs) = 321; goto _test_eof - _test_eof322: ( m.cs) = 322; goto _test_eof - _test_eof323: ( m.cs) = 323; goto _test_eof - _test_eof324: ( m.cs) = 324; goto _test_eof - _test_eof325: ( m.cs) = 325; goto _test_eof - _test_eof326: ( m.cs) = 326; goto _test_eof - _test_eof327: ( m.cs) = 327; goto _test_eof - _test_eof328: ( m.cs) = 328; goto _test_eof _test_eof329: ( m.cs) = 329; goto _test_eof _test_eof330: ( m.cs) = 330; goto _test_eof + _test_eof60: ( m.cs) = 60; goto _test_eof _test_eof331: ( m.cs) = 331; goto _test_eof _test_eof332: ( m.cs) = 332; goto _test_eof _test_eof333: ( m.cs) = 333; goto _test_eof @@ -29510,11 +30531,9 @@ tr425: _test_eof337: ( m.cs) = 337; goto _test_eof _test_eof338: ( m.cs) = 338; goto _test_eof _test_eof339: ( m.cs) = 339; goto _test_eof - _test_eof62: ( m.cs) = 62; goto _test_eof _test_eof340: ( m.cs) = 340; goto _test_eof _test_eof341: ( m.cs) = 341; goto _test_eof _test_eof342: ( m.cs) = 342; goto _test_eof - _test_eof63: ( m.cs) = 63; goto _test_eof _test_eof343: ( m.cs) = 343; goto _test_eof _test_eof344: ( m.cs) = 344; goto _test_eof _test_eof345: ( m.cs) = 345; goto _test_eof @@ -29523,9 +30542,11 @@ tr425: _test_eof348: ( m.cs) = 348; goto _test_eof _test_eof349: ( m.cs) = 349; goto _test_eof _test_eof350: ( m.cs) = 350; goto _test_eof + _test_eof61: ( m.cs) = 61; goto _test_eof _test_eof351: ( m.cs) = 351; goto _test_eof _test_eof352: ( m.cs) = 352; goto _test_eof _test_eof353: ( m.cs) = 353; goto _test_eof + _test_eof62: ( m.cs) = 62; goto _test_eof _test_eof354: ( m.cs) = 354; goto _test_eof _test_eof355: ( m.cs) = 355; goto _test_eof _test_eof356: ( m.cs) = 356; goto _test_eof @@ -29535,38 +30556,38 @@ tr425: _test_eof360: ( m.cs) = 360; goto _test_eof _test_eof361: ( m.cs) = 361; goto _test_eof _test_eof362: ( m.cs) = 362; goto _test_eof - _test_eof64: ( m.cs) = 64; goto _test_eof - _test_eof65: ( m.cs) = 65; goto _test_eof - _test_eof66: ( m.cs) = 66; goto _test_eof - _test_eof67: ( m.cs) = 67; goto _test_eof - _test_eof68: ( m.cs) = 68; goto _test_eof _test_eof363: ( m.cs) = 363; goto _test_eof - _test_eof69: ( m.cs) = 69; goto _test_eof - _test_eof70: ( m.cs) = 70; goto _test_eof - _test_eof71: ( m.cs) = 71; goto _test_eof - _test_eof72: ( m.cs) = 72; goto _test_eof - _test_eof73: ( m.cs) = 73; goto _test_eof _test_eof364: ( m.cs) = 364; goto _test_eof _test_eof365: ( m.cs) = 365; goto _test_eof _test_eof366: ( m.cs) = 366; goto _test_eof - _test_eof74: ( m.cs) = 74; goto _test_eof - _test_eof75: ( m.cs) = 75; goto _test_eof _test_eof367: ( m.cs) = 367; goto _test_eof _test_eof368: ( m.cs) = 368; goto _test_eof - _test_eof76: ( m.cs) = 76; goto _test_eof _test_eof369: ( m.cs) = 369; goto _test_eof - _test_eof77: ( m.cs) = 77; goto _test_eof _test_eof370: ( m.cs) = 370; goto _test_eof _test_eof371: ( m.cs) = 371; goto _test_eof _test_eof372: ( m.cs) = 372; goto _test_eof _test_eof373: ( m.cs) = 373; goto _test_eof + _test_eof63: ( m.cs) = 63; goto _test_eof + _test_eof64: ( m.cs) = 64; goto _test_eof + _test_eof65: ( m.cs) = 65; goto _test_eof + _test_eof66: ( m.cs) = 66; goto _test_eof + _test_eof67: ( m.cs) = 67; goto _test_eof _test_eof374: ( m.cs) = 374; goto _test_eof + _test_eof68: ( m.cs) = 68; goto _test_eof + _test_eof69: ( m.cs) = 69; goto _test_eof + _test_eof70: ( m.cs) = 70; goto _test_eof + _test_eof71: ( m.cs) = 71; goto _test_eof + _test_eof72: ( m.cs) = 72; goto _test_eof _test_eof375: ( m.cs) = 375; goto _test_eof _test_eof376: ( m.cs) = 376; goto _test_eof _test_eof377: ( m.cs) = 377; goto _test_eof + _test_eof73: ( m.cs) = 73; goto _test_eof + _test_eof74: ( m.cs) = 74; goto _test_eof _test_eof378: ( m.cs) = 378; goto _test_eof _test_eof379: ( m.cs) = 379; goto _test_eof + _test_eof75: ( m.cs) = 75; goto _test_eof _test_eof380: ( m.cs) = 380; goto _test_eof + _test_eof76: ( m.cs) = 76; goto _test_eof _test_eof381: ( m.cs) = 381; goto _test_eof _test_eof382: ( m.cs) = 382; goto _test_eof _test_eof383: ( m.cs) = 383; goto _test_eof @@ -29576,6 +30597,18 @@ tr425: _test_eof387: ( m.cs) = 387; goto _test_eof _test_eof388: ( m.cs) = 388; goto _test_eof _test_eof389: ( m.cs) = 389; goto _test_eof + _test_eof390: ( m.cs) = 390; goto _test_eof + _test_eof391: ( m.cs) = 391; goto _test_eof + _test_eof392: ( m.cs) = 392; goto _test_eof + _test_eof393: ( m.cs) = 393; goto _test_eof + _test_eof394: ( m.cs) = 394; goto _test_eof + _test_eof395: ( m.cs) = 395; goto _test_eof + _test_eof396: ( m.cs) = 396; goto _test_eof + _test_eof397: ( m.cs) = 397; goto _test_eof + _test_eof398: ( m.cs) = 398; goto _test_eof + _test_eof399: ( m.cs) = 399; goto _test_eof + _test_eof400: ( m.cs) = 400; goto _test_eof + _test_eof77: ( m.cs) = 77; goto _test_eof _test_eof78: ( m.cs) = 78; goto _test_eof _test_eof79: ( m.cs) = 79; goto _test_eof _test_eof80: ( m.cs) = 80; goto _test_eof @@ -29589,40 +30622,28 @@ tr425: _test_eof88: ( m.cs) = 88; goto _test_eof _test_eof89: ( m.cs) = 89; goto _test_eof _test_eof90: ( m.cs) = 90; goto _test_eof - _test_eof91: ( m.cs) = 91; goto _test_eof - _test_eof390: ( m.cs) = 390; goto _test_eof - _test_eof391: ( m.cs) = 391; goto _test_eof - _test_eof392: ( m.cs) = 392; goto _test_eof - _test_eof393: ( m.cs) = 393; goto _test_eof - _test_eof92: ( m.cs) = 92; goto _test_eof - _test_eof93: ( m.cs) = 93; goto _test_eof - _test_eof94: ( m.cs) = 94; goto _test_eof - _test_eof95: ( m.cs) = 95; goto _test_eof - _test_eof394: ( m.cs) = 394; goto _test_eof - _test_eof395: ( m.cs) = 395; goto _test_eof - _test_eof96: ( m.cs) = 96; goto _test_eof - _test_eof97: ( m.cs) = 97; goto _test_eof - _test_eof396: ( m.cs) = 396; goto _test_eof - _test_eof98: ( m.cs) = 98; goto _test_eof - _test_eof99: ( m.cs) = 99; goto _test_eof - _test_eof397: ( m.cs) = 397; goto _test_eof - _test_eof398: ( m.cs) = 398; goto _test_eof - _test_eof100: ( m.cs) = 100; goto _test_eof - _test_eof399: ( m.cs) = 399; goto _test_eof - _test_eof400: ( m.cs) = 400; goto _test_eof - _test_eof101: ( m.cs) = 101; goto _test_eof - _test_eof102: ( m.cs) = 102; goto _test_eof _test_eof401: ( m.cs) = 401; goto _test_eof _test_eof402: ( m.cs) = 402; goto _test_eof _test_eof403: ( m.cs) = 403; goto _test_eof _test_eof404: ( m.cs) = 404; goto _test_eof + _test_eof91: ( m.cs) = 91; goto _test_eof + _test_eof92: ( m.cs) = 92; goto _test_eof + _test_eof93: ( m.cs) = 93; goto _test_eof + _test_eof94: ( m.cs) = 94; goto _test_eof _test_eof405: ( m.cs) = 405; goto _test_eof _test_eof406: ( m.cs) = 406; goto _test_eof + _test_eof95: ( m.cs) = 95; goto _test_eof + _test_eof96: ( m.cs) = 96; goto _test_eof _test_eof407: ( m.cs) = 407; goto _test_eof + _test_eof97: ( m.cs) = 97; goto _test_eof + _test_eof98: ( m.cs) = 98; goto _test_eof _test_eof408: ( m.cs) = 408; goto _test_eof _test_eof409: ( m.cs) = 409; goto _test_eof + _test_eof99: ( m.cs) = 99; goto _test_eof _test_eof410: ( m.cs) = 410; goto _test_eof _test_eof411: ( m.cs) = 411; goto _test_eof + _test_eof100: ( m.cs) = 100; goto _test_eof + _test_eof101: ( m.cs) = 101; goto _test_eof _test_eof412: ( m.cs) = 412; goto _test_eof _test_eof413: ( m.cs) = 413; goto _test_eof _test_eof414: ( m.cs) = 414; goto _test_eof @@ -29630,27 +30651,27 @@ tr425: _test_eof416: ( m.cs) = 416; goto _test_eof _test_eof417: ( m.cs) = 417; goto _test_eof _test_eof418: ( m.cs) = 418; goto _test_eof - _test_eof103: ( m.cs) = 103; goto _test_eof _test_eof419: ( m.cs) = 419; goto _test_eof _test_eof420: ( m.cs) = 420; goto _test_eof _test_eof421: ( m.cs) = 421; goto _test_eof - _test_eof104: ( m.cs) = 104; goto _test_eof - _test_eof105: ( m.cs) = 105; goto _test_eof _test_eof422: ( m.cs) = 422; goto _test_eof _test_eof423: ( m.cs) = 423; goto _test_eof _test_eof424: ( m.cs) = 424; goto _test_eof - _test_eof106: ( m.cs) = 106; goto _test_eof _test_eof425: ( m.cs) = 425; goto _test_eof _test_eof426: ( m.cs) = 426; goto _test_eof _test_eof427: ( m.cs) = 427; goto _test_eof _test_eof428: ( m.cs) = 428; goto _test_eof _test_eof429: ( m.cs) = 429; goto _test_eof + _test_eof102: ( m.cs) = 102; goto _test_eof _test_eof430: ( m.cs) = 430; goto _test_eof _test_eof431: ( m.cs) = 431; goto _test_eof _test_eof432: ( m.cs) = 432; goto _test_eof + _test_eof103: ( m.cs) = 103; goto _test_eof + _test_eof104: ( m.cs) = 104; goto _test_eof _test_eof433: ( m.cs) = 433; goto _test_eof _test_eof434: ( m.cs) = 434; goto _test_eof _test_eof435: ( m.cs) = 435; goto _test_eof + _test_eof105: ( m.cs) = 105; goto _test_eof _test_eof436: ( m.cs) = 436; goto _test_eof _test_eof437: ( m.cs) = 437; goto _test_eof _test_eof438: ( m.cs) = 438; goto _test_eof @@ -29660,7 +30681,6 @@ tr425: _test_eof442: ( m.cs) = 442; goto _test_eof _test_eof443: ( m.cs) = 443; goto _test_eof _test_eof444: ( m.cs) = 444; goto _test_eof - _test_eof107: ( m.cs) = 107; goto _test_eof _test_eof445: ( m.cs) = 445; goto _test_eof _test_eof446: ( m.cs) = 446; goto _test_eof _test_eof447: ( m.cs) = 447; goto _test_eof @@ -29672,6 +30692,7 @@ tr425: _test_eof453: ( m.cs) = 453; goto _test_eof _test_eof454: ( m.cs) = 454; goto _test_eof _test_eof455: ( m.cs) = 455; goto _test_eof + _test_eof106: ( m.cs) = 106; goto _test_eof _test_eof456: ( m.cs) = 456; goto _test_eof _test_eof457: ( m.cs) = 457; goto _test_eof _test_eof458: ( m.cs) = 458; goto _test_eof @@ -29683,16 +30704,9 @@ tr425: _test_eof464: ( m.cs) = 464; goto _test_eof _test_eof465: ( m.cs) = 465; goto _test_eof _test_eof466: ( m.cs) = 466; goto _test_eof - _test_eof108: ( m.cs) = 108; goto _test_eof - _test_eof109: ( m.cs) = 109; goto _test_eof - _test_eof110: ( m.cs) = 110; goto _test_eof - _test_eof111: ( m.cs) = 111; goto _test_eof - _test_eof112: ( m.cs) = 112; goto _test_eof _test_eof467: ( m.cs) = 467; goto _test_eof - _test_eof113: ( m.cs) = 113; goto _test_eof _test_eof468: ( m.cs) = 468; goto _test_eof _test_eof469: ( m.cs) = 469; goto _test_eof - _test_eof114: ( m.cs) = 114; goto _test_eof _test_eof470: ( m.cs) = 470; goto _test_eof _test_eof471: ( m.cs) = 471; goto _test_eof _test_eof472: ( m.cs) = 472; goto _test_eof @@ -29701,38 +30715,45 @@ tr425: _test_eof475: ( m.cs) = 475; goto _test_eof _test_eof476: ( m.cs) = 476; goto _test_eof _test_eof477: ( m.cs) = 477; goto _test_eof + _test_eof107: ( m.cs) = 107; goto _test_eof + _test_eof108: ( m.cs) = 108; goto _test_eof + _test_eof109: ( m.cs) = 109; goto _test_eof + _test_eof110: ( m.cs) = 110; goto _test_eof + _test_eof111: ( m.cs) = 111; goto _test_eof _test_eof478: ( m.cs) = 478; goto _test_eof - _test_eof115: ( m.cs) = 115; goto _test_eof - _test_eof116: ( m.cs) = 116; goto _test_eof - _test_eof117: ( m.cs) = 117; goto _test_eof + _test_eof112: ( m.cs) = 112; goto _test_eof _test_eof479: ( m.cs) = 479; goto _test_eof - _test_eof118: ( m.cs) = 118; goto _test_eof - _test_eof119: ( m.cs) = 119; goto _test_eof - _test_eof120: ( m.cs) = 120; goto _test_eof _test_eof480: ( m.cs) = 480; goto _test_eof - _test_eof121: ( m.cs) = 121; goto _test_eof - _test_eof122: ( m.cs) = 122; goto _test_eof + _test_eof113: ( m.cs) = 113; goto _test_eof _test_eof481: ( m.cs) = 481; goto _test_eof _test_eof482: ( m.cs) = 482; goto _test_eof - _test_eof123: ( m.cs) = 123; goto _test_eof - _test_eof124: ( m.cs) = 124; goto _test_eof - _test_eof125: ( m.cs) = 125; goto _test_eof - _test_eof126: ( m.cs) = 126; goto _test_eof _test_eof483: ( m.cs) = 483; goto _test_eof _test_eof484: ( m.cs) = 484; goto _test_eof _test_eof485: ( m.cs) = 485; goto _test_eof - _test_eof127: ( m.cs) = 127; goto _test_eof _test_eof486: ( m.cs) = 486; goto _test_eof _test_eof487: ( m.cs) = 487; goto _test_eof _test_eof488: ( m.cs) = 488; goto _test_eof _test_eof489: ( m.cs) = 489; goto _test_eof + _test_eof114: ( m.cs) = 114; goto _test_eof + _test_eof115: ( m.cs) = 115; goto _test_eof + _test_eof116: ( m.cs) = 116; goto _test_eof _test_eof490: ( m.cs) = 490; goto _test_eof + _test_eof117: ( m.cs) = 117; goto _test_eof + _test_eof118: ( m.cs) = 118; goto _test_eof + _test_eof119: ( m.cs) = 119; goto _test_eof _test_eof491: ( m.cs) = 491; goto _test_eof + _test_eof120: ( m.cs) = 120; goto _test_eof + _test_eof121: ( m.cs) = 121; goto _test_eof _test_eof492: ( m.cs) = 492; goto _test_eof _test_eof493: ( m.cs) = 493; goto _test_eof + _test_eof122: ( m.cs) = 122; goto _test_eof + _test_eof123: ( m.cs) = 123; goto _test_eof + _test_eof124: ( m.cs) = 124; goto _test_eof + _test_eof125: ( m.cs) = 125; goto _test_eof _test_eof494: ( m.cs) = 494; goto _test_eof _test_eof495: ( m.cs) = 495; goto _test_eof _test_eof496: ( m.cs) = 496; goto _test_eof + _test_eof126: ( m.cs) = 126; goto _test_eof _test_eof497: ( m.cs) = 497; goto _test_eof _test_eof498: ( m.cs) = 498; goto _test_eof _test_eof499: ( m.cs) = 499; goto _test_eof @@ -29742,8 +30763,6 @@ tr425: _test_eof503: ( m.cs) = 503; goto _test_eof _test_eof504: ( m.cs) = 504; goto _test_eof _test_eof505: ( m.cs) = 505; goto _test_eof - _test_eof128: ( m.cs) = 128; goto _test_eof - _test_eof129: ( m.cs) = 129; goto _test_eof _test_eof506: ( m.cs) = 506; goto _test_eof _test_eof507: ( m.cs) = 507; goto _test_eof _test_eof508: ( m.cs) = 508; goto _test_eof @@ -29753,45 +30772,39 @@ tr425: _test_eof512: ( m.cs) = 512; goto _test_eof _test_eof513: ( m.cs) = 513; goto _test_eof _test_eof514: ( m.cs) = 514; goto _test_eof - _test_eof130: ( m.cs) = 130; goto _test_eof - _test_eof131: ( m.cs) = 131; goto _test_eof - _test_eof132: ( m.cs) = 132; goto _test_eof _test_eof515: ( m.cs) = 515; goto _test_eof - _test_eof133: ( m.cs) = 133; goto _test_eof - _test_eof134: ( m.cs) = 134; goto _test_eof - _test_eof135: ( m.cs) = 135; goto _test_eof _test_eof516: ( m.cs) = 516; goto _test_eof - _test_eof136: ( m.cs) = 136; goto _test_eof - _test_eof137: ( m.cs) = 137; goto _test_eof + _test_eof127: ( m.cs) = 127; goto _test_eof + _test_eof128: ( m.cs) = 128; goto _test_eof _test_eof517: ( m.cs) = 517; goto _test_eof _test_eof518: ( m.cs) = 518; goto _test_eof - _test_eof138: ( m.cs) = 138; goto _test_eof - _test_eof139: ( m.cs) = 139; goto _test_eof - _test_eof140: ( m.cs) = 140; goto _test_eof _test_eof519: ( m.cs) = 519; goto _test_eof _test_eof520: ( m.cs) = 520; goto _test_eof - _test_eof141: ( m.cs) = 141; goto _test_eof _test_eof521: ( m.cs) = 521; goto _test_eof - _test_eof142: ( m.cs) = 142; goto _test_eof _test_eof522: ( m.cs) = 522; goto _test_eof _test_eof523: ( m.cs) = 523; goto _test_eof _test_eof524: ( m.cs) = 524; goto _test_eof _test_eof525: ( m.cs) = 525; goto _test_eof + _test_eof129: ( m.cs) = 129; goto _test_eof + _test_eof130: ( m.cs) = 130; goto _test_eof + _test_eof131: ( m.cs) = 131; goto _test_eof _test_eof526: ( m.cs) = 526; goto _test_eof + _test_eof132: ( m.cs) = 132; goto _test_eof + _test_eof133: ( m.cs) = 133; goto _test_eof + _test_eof134: ( m.cs) = 134; goto _test_eof _test_eof527: ( m.cs) = 527; goto _test_eof + _test_eof135: ( m.cs) = 135; goto _test_eof + _test_eof136: ( m.cs) = 136; goto _test_eof _test_eof528: ( m.cs) = 528; goto _test_eof _test_eof529: ( m.cs) = 529; goto _test_eof - _test_eof143: ( m.cs) = 143; goto _test_eof - _test_eof144: ( m.cs) = 144; goto _test_eof - _test_eof145: ( m.cs) = 145; goto _test_eof + _test_eof137: ( m.cs) = 137; goto _test_eof + _test_eof138: ( m.cs) = 138; goto _test_eof + _test_eof139: ( m.cs) = 139; goto _test_eof _test_eof530: ( m.cs) = 530; goto _test_eof - _test_eof146: ( m.cs) = 146; goto _test_eof - _test_eof147: ( m.cs) = 147; goto _test_eof - _test_eof148: ( m.cs) = 148; goto _test_eof _test_eof531: ( m.cs) = 531; goto _test_eof - _test_eof149: ( m.cs) = 149; goto _test_eof - _test_eof150: ( m.cs) = 150; goto _test_eof + _test_eof140: ( m.cs) = 140; goto _test_eof _test_eof532: ( m.cs) = 532; goto _test_eof + _test_eof141: ( m.cs) = 141; goto _test_eof _test_eof533: ( m.cs) = 533; goto _test_eof _test_eof534: ( m.cs) = 534; goto _test_eof _test_eof535: ( m.cs) = 535; goto _test_eof @@ -29800,8 +30813,16 @@ tr425: _test_eof538: ( m.cs) = 538; goto _test_eof _test_eof539: ( m.cs) = 539; goto _test_eof _test_eof540: ( m.cs) = 540; goto _test_eof + _test_eof142: ( m.cs) = 142; goto _test_eof + _test_eof143: ( m.cs) = 143; goto _test_eof + _test_eof144: ( m.cs) = 144; goto _test_eof _test_eof541: ( m.cs) = 541; goto _test_eof + _test_eof145: ( m.cs) = 145; goto _test_eof + _test_eof146: ( m.cs) = 146; goto _test_eof + _test_eof147: ( m.cs) = 147; goto _test_eof _test_eof542: ( m.cs) = 542; goto _test_eof + _test_eof148: ( m.cs) = 148; goto _test_eof + _test_eof149: ( m.cs) = 149; goto _test_eof _test_eof543: ( m.cs) = 543; goto _test_eof _test_eof544: ( m.cs) = 544; goto _test_eof _test_eof545: ( m.cs) = 545; goto _test_eof @@ -29811,26 +30832,26 @@ tr425: _test_eof549: ( m.cs) = 549; goto _test_eof _test_eof550: ( m.cs) = 550; goto _test_eof _test_eof551: ( m.cs) = 551; goto _test_eof - _test_eof151: ( m.cs) = 151; goto _test_eof - _test_eof152: ( m.cs) = 152; goto _test_eof _test_eof552: ( m.cs) = 552; goto _test_eof _test_eof553: ( m.cs) = 553; goto _test_eof _test_eof554: ( m.cs) = 554; goto _test_eof - _test_eof153: ( m.cs) = 153; goto _test_eof _test_eof555: ( m.cs) = 555; goto _test_eof _test_eof556: ( m.cs) = 556; goto _test_eof - _test_eof154: ( m.cs) = 154; goto _test_eof _test_eof557: ( m.cs) = 557; goto _test_eof _test_eof558: ( m.cs) = 558; goto _test_eof _test_eof559: ( m.cs) = 559; goto _test_eof _test_eof560: ( m.cs) = 560; goto _test_eof _test_eof561: ( m.cs) = 561; goto _test_eof _test_eof562: ( m.cs) = 562; goto _test_eof + _test_eof150: ( m.cs) = 150; goto _test_eof + _test_eof151: ( m.cs) = 151; goto _test_eof _test_eof563: ( m.cs) = 563; goto _test_eof _test_eof564: ( m.cs) = 564; goto _test_eof _test_eof565: ( m.cs) = 565; goto _test_eof + _test_eof152: ( m.cs) = 152; goto _test_eof _test_eof566: ( m.cs) = 566; goto _test_eof _test_eof567: ( m.cs) = 567; goto _test_eof + _test_eof153: ( m.cs) = 153; goto _test_eof _test_eof568: ( m.cs) = 568; goto _test_eof _test_eof569: ( m.cs) = 569; goto _test_eof _test_eof570: ( m.cs) = 570; goto _test_eof @@ -29838,10 +30859,7 @@ tr425: _test_eof572: ( m.cs) = 572; goto _test_eof _test_eof573: ( m.cs) = 573; goto _test_eof _test_eof574: ( m.cs) = 574; goto _test_eof - _test_eof155: ( m.cs) = 155; goto _test_eof - _test_eof156: ( m.cs) = 156; goto _test_eof _test_eof575: ( m.cs) = 575; goto _test_eof - _test_eof157: ( m.cs) = 157; goto _test_eof _test_eof576: ( m.cs) = 576; goto _test_eof _test_eof577: ( m.cs) = 577; goto _test_eof _test_eof578: ( m.cs) = 578; goto _test_eof @@ -29850,24 +30868,13 @@ tr425: _test_eof581: ( m.cs) = 581; goto _test_eof _test_eof582: ( m.cs) = 582; goto _test_eof _test_eof583: ( m.cs) = 583; goto _test_eof - _test_eof158: ( m.cs) = 158; goto _test_eof - _test_eof159: ( m.cs) = 159; goto _test_eof - _test_eof160: ( m.cs) = 160; goto _test_eof _test_eof584: ( m.cs) = 584; goto _test_eof - _test_eof161: ( m.cs) = 161; goto _test_eof - _test_eof162: ( m.cs) = 162; goto _test_eof - _test_eof163: ( m.cs) = 163; goto _test_eof _test_eof585: ( m.cs) = 585; goto _test_eof - _test_eof164: ( m.cs) = 164; goto _test_eof - _test_eof165: ( m.cs) = 165; goto _test_eof + _test_eof154: ( m.cs) = 154; goto _test_eof + _test_eof155: ( m.cs) = 155; goto _test_eof _test_eof586: ( m.cs) = 586; goto _test_eof + _test_eof156: ( m.cs) = 156; goto _test_eof _test_eof587: ( m.cs) = 587; goto _test_eof - _test_eof166: ( m.cs) = 166; goto _test_eof - _test_eof167: ( m.cs) = 167; goto _test_eof - _test_eof168: ( m.cs) = 168; goto _test_eof - _test_eof169: ( m.cs) = 169; goto _test_eof - _test_eof170: ( m.cs) = 170; goto _test_eof - _test_eof171: ( m.cs) = 171; goto _test_eof _test_eof588: ( m.cs) = 588; goto _test_eof _test_eof589: ( m.cs) = 589; goto _test_eof _test_eof590: ( m.cs) = 590; goto _test_eof @@ -29875,10 +30882,24 @@ tr425: _test_eof592: ( m.cs) = 592; goto _test_eof _test_eof593: ( m.cs) = 593; goto _test_eof _test_eof594: ( m.cs) = 594; goto _test_eof + _test_eof157: ( m.cs) = 157; goto _test_eof + _test_eof158: ( m.cs) = 158; goto _test_eof + _test_eof159: ( m.cs) = 159; goto _test_eof _test_eof595: ( m.cs) = 595; goto _test_eof + _test_eof160: ( m.cs) = 160; goto _test_eof + _test_eof161: ( m.cs) = 161; goto _test_eof + _test_eof162: ( m.cs) = 162; goto _test_eof _test_eof596: ( m.cs) = 596; goto _test_eof + _test_eof163: ( m.cs) = 163; goto _test_eof + _test_eof164: ( m.cs) = 164; goto _test_eof _test_eof597: ( m.cs) = 597; goto _test_eof _test_eof598: ( m.cs) = 598; goto _test_eof + _test_eof165: ( m.cs) = 165; goto _test_eof + _test_eof166: ( m.cs) = 166; goto _test_eof + _test_eof167: ( m.cs) = 167; goto _test_eof + _test_eof168: ( m.cs) = 168; goto _test_eof + _test_eof169: ( m.cs) = 169; goto _test_eof + _test_eof170: ( m.cs) = 170; goto _test_eof _test_eof599: ( m.cs) = 599; goto _test_eof _test_eof600: ( m.cs) = 600; goto _test_eof _test_eof601: ( m.cs) = 601; goto _test_eof @@ -29887,69 +30908,66 @@ tr425: _test_eof604: ( m.cs) = 604; goto _test_eof _test_eof605: ( m.cs) = 605; goto _test_eof _test_eof606: ( m.cs) = 606; goto _test_eof - _test_eof172: ( m.cs) = 172; goto _test_eof - _test_eof173: ( m.cs) = 173; goto _test_eof - _test_eof174: ( m.cs) = 174; goto _test_eof _test_eof607: ( m.cs) = 607; goto _test_eof _test_eof608: ( m.cs) = 608; goto _test_eof _test_eof609: ( m.cs) = 609; goto _test_eof - _test_eof175: ( m.cs) = 175; goto _test_eof _test_eof610: ( m.cs) = 610; goto _test_eof _test_eof611: ( m.cs) = 611; goto _test_eof - _test_eof176: ( m.cs) = 176; goto _test_eof _test_eof612: ( m.cs) = 612; goto _test_eof _test_eof613: ( m.cs) = 613; goto _test_eof _test_eof614: ( m.cs) = 614; goto _test_eof _test_eof615: ( m.cs) = 615; goto _test_eof _test_eof616: ( m.cs) = 616; goto _test_eof - _test_eof177: ( m.cs) = 177; goto _test_eof - _test_eof178: ( m.cs) = 178; goto _test_eof - _test_eof179: ( m.cs) = 179; goto _test_eof _test_eof617: ( m.cs) = 617; goto _test_eof - _test_eof180: ( m.cs) = 180; goto _test_eof - _test_eof181: ( m.cs) = 181; goto _test_eof - _test_eof182: ( m.cs) = 182; goto _test_eof + _test_eof171: ( m.cs) = 171; goto _test_eof + _test_eof172: ( m.cs) = 172; goto _test_eof + _test_eof173: ( m.cs) = 173; goto _test_eof _test_eof618: ( m.cs) = 618; goto _test_eof - _test_eof183: ( m.cs) = 183; goto _test_eof - _test_eof184: ( m.cs) = 184; goto _test_eof _test_eof619: ( m.cs) = 619; goto _test_eof _test_eof620: ( m.cs) = 620; goto _test_eof - _test_eof185: ( m.cs) = 185; goto _test_eof + _test_eof174: ( m.cs) = 174; goto _test_eof _test_eof621: ( m.cs) = 621; goto _test_eof _test_eof622: ( m.cs) = 622; goto _test_eof - _test_eof186: ( m.cs) = 186; goto _test_eof - _test_eof187: ( m.cs) = 187; goto _test_eof - _test_eof188: ( m.cs) = 188; goto _test_eof + _test_eof175: ( m.cs) = 175; goto _test_eof _test_eof623: ( m.cs) = 623; goto _test_eof - _test_eof189: ( m.cs) = 189; goto _test_eof - _test_eof190: ( m.cs) = 190; goto _test_eof _test_eof624: ( m.cs) = 624; goto _test_eof _test_eof625: ( m.cs) = 625; goto _test_eof _test_eof626: ( m.cs) = 626; goto _test_eof _test_eof627: ( m.cs) = 627; goto _test_eof + _test_eof176: ( m.cs) = 176; goto _test_eof + _test_eof177: ( m.cs) = 177; goto _test_eof + _test_eof178: ( m.cs) = 178; goto _test_eof _test_eof628: ( m.cs) = 628; goto _test_eof + _test_eof179: ( m.cs) = 179; goto _test_eof + _test_eof180: ( m.cs) = 180; goto _test_eof + _test_eof181: ( m.cs) = 181; goto _test_eof _test_eof629: ( m.cs) = 629; goto _test_eof + _test_eof182: ( m.cs) = 182; goto _test_eof + _test_eof183: ( m.cs) = 183; goto _test_eof _test_eof630: ( m.cs) = 630; goto _test_eof _test_eof631: ( m.cs) = 631; goto _test_eof + _test_eof184: ( m.cs) = 184; goto _test_eof + _test_eof632: ( m.cs) = 632; goto _test_eof + _test_eof633: ( m.cs) = 633; goto _test_eof + _test_eof634: ( m.cs) = 634; goto _test_eof + _test_eof185: ( m.cs) = 185; goto _test_eof + _test_eof186: ( m.cs) = 186; goto _test_eof + _test_eof187: ( m.cs) = 187; goto _test_eof + _test_eof635: ( m.cs) = 635; goto _test_eof + _test_eof188: ( m.cs) = 188; goto _test_eof + _test_eof189: ( m.cs) = 189; goto _test_eof + _test_eof190: ( m.cs) = 190; goto _test_eof + _test_eof636: ( m.cs) = 636; goto _test_eof _test_eof191: ( m.cs) = 191; goto _test_eof _test_eof192: ( m.cs) = 192; goto _test_eof - _test_eof193: ( m.cs) = 193; goto _test_eof - _test_eof632: ( m.cs) = 632; goto _test_eof - _test_eof194: ( m.cs) = 194; goto _test_eof - _test_eof195: ( m.cs) = 195; goto _test_eof - _test_eof196: ( m.cs) = 196; goto _test_eof - _test_eof633: ( m.cs) = 633; goto _test_eof - _test_eof197: ( m.cs) = 197; goto _test_eof - _test_eof198: ( m.cs) = 198; goto _test_eof - _test_eof634: ( m.cs) = 634; goto _test_eof - _test_eof635: ( m.cs) = 635; goto _test_eof - _test_eof199: ( m.cs) = 199; goto _test_eof - _test_eof200: ( m.cs) = 200; goto _test_eof - _test_eof201: ( m.cs) = 201; goto _test_eof - _test_eof636: ( m.cs) = 636; goto _test_eof _test_eof637: ( m.cs) = 637; goto _test_eof _test_eof638: ( m.cs) = 638; goto _test_eof + _test_eof193: ( m.cs) = 193; goto _test_eof + _test_eof194: ( m.cs) = 194; goto _test_eof + _test_eof195: ( m.cs) = 195; goto _test_eof _test_eof639: ( m.cs) = 639; goto _test_eof + _test_eof196: ( m.cs) = 196; goto _test_eof + _test_eof197: ( m.cs) = 197; goto _test_eof _test_eof640: ( m.cs) = 640; goto _test_eof _test_eof641: ( m.cs) = 641; goto _test_eof _test_eof642: ( m.cs) = 642; goto _test_eof @@ -29958,21 +30976,25 @@ tr425: _test_eof645: ( m.cs) = 645; goto _test_eof _test_eof646: ( m.cs) = 646; goto _test_eof _test_eof647: ( m.cs) = 647; goto _test_eof + _test_eof198: ( m.cs) = 198; goto _test_eof + _test_eof199: ( m.cs) = 199; goto _test_eof + _test_eof200: ( m.cs) = 200; goto _test_eof _test_eof648: ( m.cs) = 648; goto _test_eof + _test_eof201: ( m.cs) = 201; goto _test_eof + _test_eof202: ( m.cs) = 202; goto _test_eof + _test_eof203: ( m.cs) = 203; goto _test_eof _test_eof649: ( m.cs) = 649; goto _test_eof + _test_eof204: ( m.cs) = 204; goto _test_eof + _test_eof205: ( m.cs) = 205; goto _test_eof _test_eof650: ( m.cs) = 650; goto _test_eof _test_eof651: ( m.cs) = 651; goto _test_eof + _test_eof206: ( m.cs) = 206; goto _test_eof + _test_eof207: ( m.cs) = 207; goto _test_eof + _test_eof208: ( m.cs) = 208; goto _test_eof _test_eof652: ( m.cs) = 652; goto _test_eof _test_eof653: ( m.cs) = 653; goto _test_eof _test_eof654: ( m.cs) = 654; goto _test_eof - _test_eof202: ( m.cs) = 202; goto _test_eof - _test_eof203: ( m.cs) = 203; goto _test_eof - _test_eof204: ( m.cs) = 204; goto _test_eof - _test_eof205: ( m.cs) = 205; goto _test_eof - _test_eof206: ( m.cs) = 206; goto _test_eof _test_eof655: ( m.cs) = 655; goto _test_eof - _test_eof207: ( m.cs) = 207; goto _test_eof - _test_eof208: ( m.cs) = 208; goto _test_eof _test_eof656: ( m.cs) = 656; goto _test_eof _test_eof657: ( m.cs) = 657; goto _test_eof _test_eof658: ( m.cs) = 658; goto _test_eof @@ -29982,52 +31004,47 @@ tr425: _test_eof662: ( m.cs) = 662; goto _test_eof _test_eof663: ( m.cs) = 663; goto _test_eof _test_eof664: ( m.cs) = 664; goto _test_eof + _test_eof665: ( m.cs) = 665; goto _test_eof + _test_eof666: ( m.cs) = 666; goto _test_eof + _test_eof667: ( m.cs) = 667; goto _test_eof + _test_eof668: ( m.cs) = 668; goto _test_eof + _test_eof669: ( m.cs) = 669; goto _test_eof + _test_eof670: ( m.cs) = 670; goto _test_eof _test_eof209: ( m.cs) = 209; goto _test_eof _test_eof210: ( m.cs) = 210; goto _test_eof _test_eof211: ( m.cs) = 211; goto _test_eof - _test_eof665: ( m.cs) = 665; goto _test_eof _test_eof212: ( m.cs) = 212; goto _test_eof _test_eof213: ( m.cs) = 213; goto _test_eof - _test_eof214: ( m.cs) = 214; goto _test_eof - _test_eof666: ( m.cs) = 666; goto _test_eof - _test_eof215: ( m.cs) = 215; goto _test_eof - _test_eof216: ( m.cs) = 216; goto _test_eof - _test_eof667: ( m.cs) = 667; goto _test_eof - _test_eof668: ( m.cs) = 668; goto _test_eof - _test_eof217: ( m.cs) = 217; goto _test_eof - _test_eof218: ( m.cs) = 218; goto _test_eof - _test_eof219: ( m.cs) = 219; goto _test_eof - _test_eof220: ( m.cs) = 220; goto _test_eof - _test_eof669: ( m.cs) = 669; goto _test_eof - _test_eof221: ( m.cs) = 221; goto _test_eof - _test_eof222: ( m.cs) = 222; goto _test_eof - _test_eof670: ( m.cs) = 670; goto _test_eof _test_eof671: ( m.cs) = 671; goto _test_eof + _test_eof214: ( m.cs) = 214; goto _test_eof + _test_eof215: ( m.cs) = 215; goto _test_eof _test_eof672: ( m.cs) = 672; goto _test_eof _test_eof673: ( m.cs) = 673; goto _test_eof _test_eof674: ( m.cs) = 674; goto _test_eof _test_eof675: ( m.cs) = 675; goto _test_eof _test_eof676: ( m.cs) = 676; goto _test_eof _test_eof677: ( m.cs) = 677; goto _test_eof - _test_eof223: ( m.cs) = 223; goto _test_eof - _test_eof224: ( m.cs) = 224; goto _test_eof - _test_eof225: ( m.cs) = 225; goto _test_eof _test_eof678: ( m.cs) = 678; goto _test_eof - _test_eof226: ( m.cs) = 226; goto _test_eof - _test_eof227: ( m.cs) = 227; goto _test_eof - _test_eof228: ( m.cs) = 228; goto _test_eof _test_eof679: ( m.cs) = 679; goto _test_eof - _test_eof229: ( m.cs) = 229; goto _test_eof - _test_eof230: ( m.cs) = 230; goto _test_eof _test_eof680: ( m.cs) = 680; goto _test_eof + _test_eof216: ( m.cs) = 216; goto _test_eof + _test_eof217: ( m.cs) = 217; goto _test_eof + _test_eof218: ( m.cs) = 218; goto _test_eof _test_eof681: ( m.cs) = 681; goto _test_eof - _test_eof231: ( m.cs) = 231; goto _test_eof - _test_eof232: ( m.cs) = 232; goto _test_eof - _test_eof233: ( m.cs) = 233; goto _test_eof + _test_eof219: ( m.cs) = 219; goto _test_eof + _test_eof220: ( m.cs) = 220; goto _test_eof + _test_eof221: ( m.cs) = 221; goto _test_eof _test_eof682: ( m.cs) = 682; goto _test_eof + _test_eof222: ( m.cs) = 222; goto _test_eof + _test_eof223: ( m.cs) = 223; goto _test_eof _test_eof683: ( m.cs) = 683; goto _test_eof _test_eof684: ( m.cs) = 684; goto _test_eof + _test_eof224: ( m.cs) = 224; goto _test_eof + _test_eof225: ( m.cs) = 225; goto _test_eof + _test_eof226: ( m.cs) = 226; goto _test_eof _test_eof685: ( m.cs) = 685; goto _test_eof + _test_eof227: ( m.cs) = 227; goto _test_eof + _test_eof228: ( m.cs) = 228; goto _test_eof _test_eof686: ( m.cs) = 686; goto _test_eof _test_eof687: ( m.cs) = 687; goto _test_eof _test_eof688: ( m.cs) = 688; goto _test_eof @@ -30036,715 +31053,854 @@ tr425: _test_eof691: ( m.cs) = 691; goto _test_eof _test_eof692: ( m.cs) = 692; goto _test_eof _test_eof693: ( m.cs) = 693; goto _test_eof + _test_eof229: ( m.cs) = 229; goto _test_eof + _test_eof230: ( m.cs) = 230; goto _test_eof + _test_eof231: ( m.cs) = 231; goto _test_eof _test_eof694: ( m.cs) = 694; goto _test_eof + _test_eof232: ( m.cs) = 232; goto _test_eof + _test_eof233: ( m.cs) = 233; goto _test_eof _test_eof695: ( m.cs) = 695; goto _test_eof _test_eof696: ( m.cs) = 696; goto _test_eof _test_eof697: ( m.cs) = 697; goto _test_eof _test_eof698: ( m.cs) = 698; goto _test_eof _test_eof699: ( m.cs) = 699; goto _test_eof _test_eof700: ( m.cs) = 700; goto _test_eof + _test_eof701: ( m.cs) = 701; goto _test_eof + _test_eof702: ( m.cs) = 702; goto _test_eof _test_eof234: ( m.cs) = 234; goto _test_eof _test_eof235: ( m.cs) = 235; goto _test_eof - _test_eof701: ( m.cs) = 701; goto _test_eof _test_eof236: ( m.cs) = 236; goto _test_eof - _test_eof237: ( m.cs) = 237; goto _test_eof - _test_eof702: ( m.cs) = 702; goto _test_eof _test_eof703: ( m.cs) = 703; goto _test_eof + _test_eof237: ( m.cs) = 237; goto _test_eof + _test_eof238: ( m.cs) = 238; goto _test_eof + _test_eof239: ( m.cs) = 239; goto _test_eof _test_eof704: ( m.cs) = 704; goto _test_eof + _test_eof240: ( m.cs) = 240; goto _test_eof + _test_eof241: ( m.cs) = 241; goto _test_eof _test_eof705: ( m.cs) = 705; goto _test_eof _test_eof706: ( m.cs) = 706; goto _test_eof + _test_eof242: ( m.cs) = 242; goto _test_eof + _test_eof243: ( m.cs) = 243; goto _test_eof + _test_eof244: ( m.cs) = 244; goto _test_eof _test_eof707: ( m.cs) = 707; goto _test_eof _test_eof708: ( m.cs) = 708; goto _test_eof _test_eof709: ( m.cs) = 709; goto _test_eof - _test_eof238: ( m.cs) = 238; goto _test_eof - _test_eof239: ( m.cs) = 239; goto _test_eof - _test_eof240: ( m.cs) = 240; goto _test_eof _test_eof710: ( m.cs) = 710; goto _test_eof - _test_eof241: ( m.cs) = 241; goto _test_eof - _test_eof242: ( m.cs) = 242; goto _test_eof - _test_eof243: ( m.cs) = 243; goto _test_eof _test_eof711: ( m.cs) = 711; goto _test_eof - _test_eof244: ( m.cs) = 244; goto _test_eof - _test_eof245: ( m.cs) = 245; goto _test_eof _test_eof712: ( m.cs) = 712; goto _test_eof _test_eof713: ( m.cs) = 713; goto _test_eof - _test_eof246: ( m.cs) = 246; goto _test_eof - _test_eof247: ( m.cs) = 247; goto _test_eof _test_eof714: ( m.cs) = 714; goto _test_eof - _test_eof250: ( m.cs) = 250; goto _test_eof + _test_eof715: ( m.cs) = 715; goto _test_eof + _test_eof716: ( m.cs) = 716; goto _test_eof _test_eof717: ( m.cs) = 717; goto _test_eof _test_eof718: ( m.cs) = 718; goto _test_eof + _test_eof719: ( m.cs) = 719; goto _test_eof + _test_eof720: ( m.cs) = 720; goto _test_eof + _test_eof721: ( m.cs) = 721; goto _test_eof + _test_eof722: ( m.cs) = 722; goto _test_eof + _test_eof723: ( m.cs) = 723; goto _test_eof + _test_eof724: ( m.cs) = 724; goto _test_eof + _test_eof725: ( m.cs) = 725; goto _test_eof + _test_eof245: ( m.cs) = 245; goto _test_eof + _test_eof246: ( m.cs) = 246; goto _test_eof + _test_eof726: ( m.cs) = 726; goto _test_eof + _test_eof247: ( m.cs) = 247; goto _test_eof + _test_eof248: ( m.cs) = 248; goto _test_eof + _test_eof727: ( m.cs) = 727; goto _test_eof + _test_eof728: ( m.cs) = 728; goto _test_eof + _test_eof729: ( m.cs) = 729; goto _test_eof + _test_eof730: ( m.cs) = 730; goto _test_eof + _test_eof731: ( m.cs) = 731; goto _test_eof + _test_eof732: ( m.cs) = 732; goto _test_eof + _test_eof733: ( m.cs) = 733; goto _test_eof + _test_eof734: ( m.cs) = 734; goto _test_eof + _test_eof249: ( m.cs) = 249; goto _test_eof + _test_eof250: ( m.cs) = 250; goto _test_eof _test_eof251: ( m.cs) = 251; goto _test_eof + _test_eof735: ( m.cs) = 735; goto _test_eof _test_eof252: ( m.cs) = 252; goto _test_eof _test_eof253: ( m.cs) = 253; goto _test_eof _test_eof254: ( m.cs) = 254; goto _test_eof - _test_eof719: ( m.cs) = 719; goto _test_eof + _test_eof736: ( m.cs) = 736; goto _test_eof _test_eof255: ( m.cs) = 255; goto _test_eof - _test_eof720: ( m.cs) = 720; goto _test_eof _test_eof256: ( m.cs) = 256; goto _test_eof + _test_eof737: ( m.cs) = 737; goto _test_eof + _test_eof738: ( m.cs) = 738; goto _test_eof _test_eof257: ( m.cs) = 257; goto _test_eof _test_eof258: ( m.cs) = 258; goto _test_eof - _test_eof715: ( m.cs) = 715; goto _test_eof - _test_eof716: ( m.cs) = 716; goto _test_eof - _test_eof248: ( m.cs) = 248; goto _test_eof - _test_eof249: ( m.cs) = 249; goto _test_eof + _test_eof739: ( m.cs) = 739; goto _test_eof + _test_eof261: ( m.cs) = 261; goto _test_eof + _test_eof741: ( m.cs) = 741; goto _test_eof + _test_eof742: ( m.cs) = 742; goto _test_eof + _test_eof262: ( m.cs) = 262; goto _test_eof + _test_eof263: ( m.cs) = 263; goto _test_eof + _test_eof264: ( m.cs) = 264; goto _test_eof + _test_eof265: ( m.cs) = 265; goto _test_eof + _test_eof743: ( m.cs) = 743; goto _test_eof + _test_eof266: ( m.cs) = 266; goto _test_eof + _test_eof744: ( m.cs) = 744; goto _test_eof + _test_eof267: ( m.cs) = 267; goto _test_eof + _test_eof268: ( m.cs) = 268; goto _test_eof + _test_eof269: ( m.cs) = 269; goto _test_eof + _test_eof740: ( m.cs) = 740; goto _test_eof + _test_eof259: ( m.cs) = 259; goto _test_eof + _test_eof260: ( m.cs) = 260; goto _test_eof _test_eof: {} if ( m.p) == ( m.eof) { switch ( m.cs) { - case 9, 250: -//line plugins/parsers/influx/machine.go.rl:23 + case 8, 261: +//line plugins/parsers/influx/machine.go.rl:24 err = ErrNameParse ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; ( m.cs) = 0; goto _out } - case 2, 3, 4, 5, 6, 7, 8, 29, 32, 33, 36, 37, 38, 50, 51, 52, 53, 54, 74, 76, 77, 94, 104, 106, 142, 154, 157, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246: -//line plugins/parsers/influx/machine.go.rl:30 + case 2, 3, 4, 5, 6, 7, 28, 31, 32, 35, 36, 37, 49, 50, 51, 52, 53, 73, 75, 76, 93, 103, 105, 141, 153, 156, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 257: +//line plugins/parsers/influx/machine.go.rl:31 err = ErrFieldParse ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; ( m.cs) = 0; goto _out } - case 14, 15, 16, 23, 25, 26, 252, 253, 254, 255, 256, 257: -//line plugins/parsers/influx/machine.go.rl:37 + case 13, 14, 15, 22, 24, 25, 263, 264, 265, 266, 267, 268: +//line plugins/parsers/influx/machine.go.rl:38 err = ErrTagParse ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; ( m.cs) = 0; goto _out } - case 233: -//line plugins/parsers/influx/machine.go.rl:44 + case 244: +//line plugins/parsers/influx/machine.go.rl:45 err = ErrTimestampParse ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; ( m.cs) = 0; goto _out } - case 259: -//line plugins/parsers/influx/machine.go.rl:73 - - foundMetric = true - - case 289, 292, 296, 364, 388, 389, 393, 394, 395, 519, 553, 554, 556, 717: -//line plugins/parsers/influx/machine.go.rl:77 + case 741: +//line plugins/parsers/influx/machine.go.rl:78 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; ( m.cs) = 0; goto _out } } - case 340, 341, 342, 344, 363, 419, 443, 444, 448, 468, 484, 485, 487, 719, 720: -//line plugins/parsers/influx/machine.go.rl:90 + case 743, 744: +//line plugins/parsers/influx/machine.go.rl:91 - err = m.handler.AddTag(key, m.text()) + err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; ( m.cs) = 0; goto _out } } - case 613, 659, 704: -//line plugins/parsers/influx/machine.go.rl:103 + case 271, 272, 273, 274, 275, 277, 278, 297, 298, 299, 301, 302, 305, 306, 327, 328, 329, 330, 332, 376, 377, 379, 380, 402, 403, 408, 409, 411, 431, 432, 434, 435, 457, 458, 618, 621: +//line plugins/parsers/influx/machine.go.rl:170 - err = m.handler.AddInt(key, m.text()) - if err != nil { - ( m.p)-- + m.finishMetric = true - ( m.cs) = 247; - {( m.p)++; ( m.cs) = 0; goto _out } - } - - case 614, 662, 707: -//line plugins/parsers/influx/machine.go.rl:112 - - err = m.handler.AddUint(key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; ( m.cs) = 0; goto _out } - } - - case 315, 608, 609, 611, 612, 615, 621, 622, 655, 656, 657, 658, 660, 661, 663, 701, 702, 703, 705, 706, 708: -//line plugins/parsers/influx/machine.go.rl:121 - - err = m.handler.AddFloat(key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; ( m.cs) = 0; goto _out } - } - - case 616, 617, 618, 619, 620, 664, 665, 666, 667, 668, 709, 710, 711, 712, 713: -//line plugins/parsers/influx/machine.go.rl:130 - - err = m.handler.AddBool(key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; ( m.cs) = 0; goto _out } - } - - case 265, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 320, 322, 323, 324, 325, 326, 327, 328, 329, 330, 331, 332, 333, 334, 335, 336, 337, 338, 339, 367, 370, 371, 372, 373, 374, 375, 376, 377, 378, 379, 380, 381, 382, 383, 384, 385, 386, 387, 399, 401, 402, 403, 404, 405, 406, 407, 408, 409, 410, 411, 412, 413, 414, 415, 416, 417, 418, 422, 425, 426, 427, 428, 429, 430, 431, 432, 433, 434, 435, 436, 437, 438, 439, 440, 441, 442, 588, 589, 590, 591, 592, 593, 594, 595, 596, 597, 598, 599, 600, 601, 602, 603, 604, 605, 606, 636, 637, 638, 639, 640, 641, 642, 643, 644, 645, 646, 647, 648, 649, 650, 651, 652, 653, 654, 682, 683, 684, 685, 686, 687, 688, 689, 690, 691, 692, 693, 694, 695, 696, 697, 698, 699, 700: -//line plugins/parsers/influx/machine.go.rl:148 - - err = m.handler.SetTimestamp(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; ( m.cs) = 0; goto _out } - } - - case 11, 39, 41, 166, 168: -//line plugins/parsers/influx/machine.go.rl:23 + case 10, 38, 40, 165, 167: +//line plugins/parsers/influx/machine.go.rl:24 err = ErrNameParse ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; ( m.cs) = 0; goto _out } -//line plugins/parsers/influx/machine.go.rl:30 +//line plugins/parsers/influx/machine.go.rl:31 err = ErrFieldParse ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; ( m.cs) = 0; goto _out } - case 35, 75, 105, 171, 201: -//line plugins/parsers/influx/machine.go.rl:30 + case 34, 74, 104, 170, 208: +//line plugins/parsers/influx/machine.go.rl:31 err = ErrFieldParse ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; ( m.cs) = 0; goto _out } -//line plugins/parsers/influx/machine.go.rl:44 +//line plugins/parsers/influx/machine.go.rl:45 err = ErrTimestampParse ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; ( m.cs) = 0; goto _out } - case 21, 45, 46, 47, 59, 60, 62, 64, 69, 71, 72, 78, 79, 80, 85, 87, 89, 90, 98, 99, 101, 102, 103, 108, 109, 110, 123, 124, 138, 139: -//line plugins/parsers/influx/machine.go.rl:37 + case 20, 44, 45, 46, 58, 59, 61, 63, 68, 70, 71, 77, 78, 79, 84, 86, 88, 89, 97, 98, 100, 101, 102, 107, 108, 109, 122, 123, 137, 138: +//line plugins/parsers/influx/machine.go.rl:38 err = ErrTagParse ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; ( m.cs) = 0; goto _out } -//line plugins/parsers/influx/machine.go.rl:30 +//line plugins/parsers/influx/machine.go.rl:31 err = ErrFieldParse ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; ( m.cs) = 0; goto _out } - case 61: -//line plugins/parsers/influx/machine.go.rl:37 + case 60: +//line plugins/parsers/influx/machine.go.rl:38 err = ErrTagParse ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; ( m.cs) = 0; goto _out } -//line plugins/parsers/influx/machine.go.rl:44 +//line plugins/parsers/influx/machine.go.rl:45 err = ErrTimestampParse ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; ( m.cs) = 0; goto _out } + case 270: +//line plugins/parsers/influx/machine.go.rl:74 + + m.beginMetric = true + +//line plugins/parsers/influx/machine.go.rl:170 + + m.finishMetric = true + case 1: -//line plugins/parsers/influx/machine.go.rl:77 +//line plugins/parsers/influx/machine.go.rl:78 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; ( m.cs) = 0; goto _out } } -//line plugins/parsers/influx/machine.go.rl:37 +//line plugins/parsers/influx/machine.go.rl:38 err = ErrTagParse ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; ( m.cs) = 0; goto _out } - case 524, 578, 672: -//line plugins/parsers/influx/machine.go.rl:77 + case 300, 303, 307, 375, 399, 400, 404, 405, 406, 530, 564, 565, 567: +//line plugins/parsers/influx/machine.go.rl:78 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; ( m.cs) = 0; goto _out } } -//line plugins/parsers/influx/machine.go.rl:103 +//line plugins/parsers/influx/machine.go.rl:170 - err = m.handler.AddInt(key, m.text()) + m.finishMetric = true + + case 16, 23: +//line plugins/parsers/influx/machine.go.rl:91 + + err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; ( m.cs) = 0; goto _out } } - case 527, 581, 675: -//line plugins/parsers/influx/machine.go.rl:77 +//line plugins/parsers/influx/machine.go.rl:38 - err = m.handler.SetMeasurement(m.text()) + err = ErrTagParse + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; ( m.cs) = 0; goto _out } + + case 351, 352, 353, 355, 374, 430, 454, 455, 459, 479, 495, 496, 498: +//line plugins/parsers/influx/machine.go.rl:91 + + err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; ( m.cs) = 0; goto _out } } -//line plugins/parsers/influx/machine.go.rl:112 +//line plugins/parsers/influx/machine.go.rl:170 - err = m.handler.AddUint(key, m.text()) + m.finishMetric = true + + case 624, 675, 689, 729: +//line plugins/parsers/influx/machine.go.rl:104 + + err = m.handler.AddInt(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; ( m.cs) = 0; goto _out } } - case 396, 520, 521, 522, 523, 525, 526, 528, 552, 575, 576, 577, 579, 580, 582, 669, 670, 671, 673, 674, 676: -//line plugins/parsers/influx/machine.go.rl:77 +//line plugins/parsers/influx/machine.go.rl:170 - err = m.handler.SetMeasurement(m.text()) + m.finishMetric = true + + case 625, 678, 692, 732: +//line plugins/parsers/influx/machine.go.rl:113 + + err = m.handler.AddUint(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; ( m.cs) = 0; goto _out } } -//line plugins/parsers/influx/machine.go.rl:121 +//line plugins/parsers/influx/machine.go.rl:170 - err = m.handler.AddFloat(key, m.text()) + m.finishMetric = true + + case 326, 619, 620, 622, 623, 626, 632, 633, 671, 672, 673, 674, 676, 677, 679, 685, 686, 687, 688, 690, 691, 693, 726, 727, 728, 730, 731, 733: +//line plugins/parsers/influx/machine.go.rl:122 + + err = m.handler.AddFloat(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; ( m.cs) = 0; goto _out } } - case 529, 530, 531, 532, 533, 583, 584, 585, 586, 587, 677, 678, 679, 680, 681: -//line plugins/parsers/influx/machine.go.rl:77 +//line plugins/parsers/influx/machine.go.rl:170 - err = m.handler.SetMeasurement(m.text()) + m.finishMetric = true + + case 627, 628, 629, 630, 631, 634, 635, 636, 637, 638, 680, 681, 682, 683, 684, 734, 735, 736, 737, 738: +//line plugins/parsers/influx/machine.go.rl:131 + + err = m.handler.AddBool(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; ( m.cs) = 0; goto _out } } -//line plugins/parsers/influx/machine.go.rl:130 +//line plugins/parsers/influx/machine.go.rl:170 - err = m.handler.AddBool(key, m.text()) - if err != nil { - ( m.p)-- + m.finishMetric = true - ( m.cs) = 247; - {( m.p)++; ( m.cs) = 0; goto _out } - } - - case 293, 297, 298, 299, 300, 301, 302, 303, 304, 305, 306, 307, 308, 309, 310, 311, 312, 313, 314, 390, 534, 535, 536, 537, 538, 539, 540, 541, 542, 543, 544, 545, 546, 547, 548, 549, 550, 551, 555, 557, 558, 559, 560, 561, 562, 563, 564, 565, 566, 567, 568, 569, 570, 571, 572, 573, 574: -//line plugins/parsers/influx/machine.go.rl:77 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; ( m.cs) = 0; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:148 + case 276, 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, 289, 290, 291, 292, 293, 294, 295, 296, 331, 333, 334, 335, 336, 337, 338, 339, 340, 341, 342, 343, 344, 345, 346, 347, 348, 349, 350, 378, 381, 382, 383, 384, 385, 386, 387, 388, 389, 390, 391, 392, 393, 394, 395, 396, 397, 398, 410, 412, 413, 414, 415, 416, 417, 418, 419, 420, 421, 422, 423, 424, 425, 426, 427, 428, 429, 433, 436, 437, 438, 439, 440, 441, 442, 443, 444, 445, 446, 447, 448, 449, 450, 451, 452, 453, 599, 600, 601, 602, 603, 604, 605, 606, 607, 608, 609, 610, 611, 612, 613, 614, 615, 616, 617, 652, 653, 654, 655, 656, 657, 658, 659, 660, 661, 662, 663, 664, 665, 666, 667, 668, 669, 670, 707, 708, 709, 710, 711, 712, 713, 714, 715, 716, 717, 718, 719, 720, 721, 722, 723, 724, 725: +//line plugins/parsers/influx/machine.go.rl:149 err = m.handler.SetTimestamp(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; ( m.cs) = 0; goto _out } } - case 17, 24: -//line plugins/parsers/influx/machine.go.rl:90 +//line plugins/parsers/influx/machine.go.rl:170 - err = m.handler.AddTag(key, m.text()) + m.finishMetric = true + + case 9: +//line plugins/parsers/influx/machine.go.rl:24 + + err = ErrNameParse + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; ( m.cs) = 0; goto _out } + +//line plugins/parsers/influx/machine.go.rl:78 + + err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; ( m.cs) = 0; goto _out } } -//line plugins/parsers/influx/machine.go.rl:37 +//line plugins/parsers/influx/machine.go.rl:38 err = ErrTagParse ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; ( m.cs) = 0; goto _out } - case 473, 509, 626: -//line plugins/parsers/influx/machine.go.rl:90 + case 99: +//line plugins/parsers/influx/machine.go.rl:38 - err = m.handler.AddTag(key, m.text()) + err = ErrTagParse + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; ( m.cs) = 0; goto _out } + +//line plugins/parsers/influx/machine.go.rl:31 + + err = ErrFieldParse + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; ( m.cs) = 0; goto _out } + +//line plugins/parsers/influx/machine.go.rl:45 + + err = ErrTimestampParse + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; ( m.cs) = 0; goto _out } + + case 11, 12, 26, 27, 29, 30, 41, 42, 54, 55, 56, 57, 72, 91, 92, 94, 96, 139, 140, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 154, 155, 157, 158, 159, 160, 161, 162, 163, 164, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241: +//line plugins/parsers/influx/machine.go.rl:78 + + err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; ( m.cs) = 0; goto _out } } -//line plugins/parsers/influx/machine.go.rl:103 +//line plugins/parsers/influx/machine.go.rl:38 - err = m.handler.AddInt(key, m.text()) + err = ErrTagParse + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; ( m.cs) = 0; goto _out } + +//line plugins/parsers/influx/machine.go.rl:31 + + err = ErrFieldParse + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; ( m.cs) = 0; goto _out } + + case 535, 589, 697: +//line plugins/parsers/influx/machine.go.rl:78 + + err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; ( m.cs) = 0; goto _out } } - case 476, 512, 629: -//line plugins/parsers/influx/machine.go.rl:90 +//line plugins/parsers/influx/machine.go.rl:104 - err = m.handler.AddTag(key, m.text()) + err = m.handler.AddInt(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; ( m.cs) = 0; goto _out } } -//line plugins/parsers/influx/machine.go.rl:112 +//line plugins/parsers/influx/machine.go.rl:170 - err = m.handler.AddUint(key, m.text()) + m.finishMetric = true + + case 538, 592, 700: +//line plugins/parsers/influx/machine.go.rl:78 + + err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; ( m.cs) = 0; goto _out } } - case 467, 469, 470, 471, 472, 474, 475, 477, 483, 506, 507, 508, 510, 511, 513, 623, 624, 625, 627, 628, 630: -//line plugins/parsers/influx/machine.go.rl:90 +//line plugins/parsers/influx/machine.go.rl:113 - err = m.handler.AddTag(key, m.text()) + err = m.handler.AddUint(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; ( m.cs) = 0; goto _out } } -//line plugins/parsers/influx/machine.go.rl:121 +//line plugins/parsers/influx/machine.go.rl:170 - err = m.handler.AddFloat(key, m.text()) + m.finishMetric = true + + case 407, 531, 532, 533, 534, 536, 537, 539, 563, 586, 587, 588, 590, 591, 593, 694, 695, 696, 698, 699, 701: +//line plugins/parsers/influx/machine.go.rl:78 + + err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; ( m.cs) = 0; goto _out } } - case 478, 479, 480, 481, 482, 514, 515, 516, 517, 518, 631, 632, 633, 634, 635: -//line plugins/parsers/influx/machine.go.rl:90 +//line plugins/parsers/influx/machine.go.rl:122 - err = m.handler.AddTag(key, m.text()) + err = m.handler.AddFloat(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; ( m.cs) = 0; goto _out } } -//line plugins/parsers/influx/machine.go.rl:130 +//line plugins/parsers/influx/machine.go.rl:170 - err = m.handler.AddBool(key, m.text()) + m.finishMetric = true + + case 540, 541, 542, 543, 544, 594, 595, 596, 597, 598, 702, 703, 704, 705, 706: +//line plugins/parsers/influx/machine.go.rl:78 + + err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; ( m.cs) = 0; goto _out } } - case 343, 345, 346, 347, 348, 349, 350, 351, 352, 353, 354, 355, 356, 357, 358, 359, 360, 361, 362, 445, 449, 450, 451, 452, 453, 454, 455, 456, 457, 458, 459, 460, 461, 462, 463, 464, 465, 466, 486, 488, 489, 490, 491, 492, 493, 494, 495, 496, 497, 498, 499, 500, 501, 502, 503, 504, 505: -//line plugins/parsers/influx/machine.go.rl:90 +//line plugins/parsers/influx/machine.go.rl:131 - err = m.handler.AddTag(key, m.text()) + err = m.handler.AddBool(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; ( m.cs) = 0; goto _out } } -//line plugins/parsers/influx/machine.go.rl:148 +//line plugins/parsers/influx/machine.go.rl:170 + + m.finishMetric = true + + case 304, 308, 309, 310, 311, 312, 313, 314, 315, 316, 317, 318, 319, 320, 321, 322, 323, 324, 325, 401, 545, 546, 547, 548, 549, 550, 551, 552, 553, 554, 555, 556, 557, 558, 559, 560, 561, 562, 566, 568, 569, 570, 571, 572, 573, 574, 575, 576, 577, 578, 579, 580, 581, 582, 583, 584, 585: +//line plugins/parsers/influx/machine.go.rl:78 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; ( m.cs) = 0; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:149 err = m.handler.SetTimestamp(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; ( m.cs) = 0; goto _out } } - case 10: -//line plugins/parsers/influx/machine.go.rl:23 +//line plugins/parsers/influx/machine.go.rl:170 + + m.finishMetric = true + + case 17, 18, 19, 21, 47, 48, 64, 65, 66, 67, 69, 80, 81, 82, 83, 85, 87, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 124, 125, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205: +//line plugins/parsers/influx/machine.go.rl:91 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; ( m.cs) = 0; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:38 + + err = ErrTagParse + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; ( m.cs) = 0; goto _out } + +//line plugins/parsers/influx/machine.go.rl:31 + + err = ErrFieldParse + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; ( m.cs) = 0; goto _out } + + case 484, 520, 642: +//line plugins/parsers/influx/machine.go.rl:91 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; ( m.cs) = 0; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:104 + + err = m.handler.AddInt(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; ( m.cs) = 0; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:170 + + m.finishMetric = true + + case 487, 523, 645: +//line plugins/parsers/influx/machine.go.rl:91 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; ( m.cs) = 0; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:113 + + err = m.handler.AddUint(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; ( m.cs) = 0; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:170 + + m.finishMetric = true + + case 478, 480, 481, 482, 483, 485, 486, 488, 494, 517, 518, 519, 521, 522, 524, 639, 640, 641, 643, 644, 646: +//line plugins/parsers/influx/machine.go.rl:91 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; ( m.cs) = 0; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:122 + + err = m.handler.AddFloat(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; ( m.cs) = 0; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:170 + + m.finishMetric = true + + case 489, 490, 491, 492, 493, 525, 526, 527, 528, 529, 647, 648, 649, 650, 651: +//line plugins/parsers/influx/machine.go.rl:91 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; ( m.cs) = 0; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:131 + + err = m.handler.AddBool(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; ( m.cs) = 0; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:170 + + m.finishMetric = true + + case 354, 356, 357, 358, 359, 360, 361, 362, 363, 364, 365, 366, 367, 368, 369, 370, 371, 372, 373, 456, 460, 461, 462, 463, 464, 465, 466, 467, 468, 469, 470, 471, 472, 473, 474, 475, 476, 477, 497, 499, 500, 501, 502, 503, 504, 505, 506, 507, 508, 509, 510, 511, 512, 513, 514, 515, 516: +//line plugins/parsers/influx/machine.go.rl:91 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; ( m.cs) = 0; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:149 + + err = m.handler.SetTimestamp(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; ( m.cs) = 0; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:170 + + m.finishMetric = true + + case 39, 166, 168, 169, 206, 207, 242, 243: +//line plugins/parsers/influx/machine.go.rl:24 err = ErrNameParse ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; ( m.cs) = 0; goto _out } -//line plugins/parsers/influx/machine.go.rl:77 +//line plugins/parsers/influx/machine.go.rl:78 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; ( m.cs) = 0; goto _out } } -//line plugins/parsers/influx/machine.go.rl:37 +//line plugins/parsers/influx/machine.go.rl:38 err = ErrTagParse ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; ( m.cs) = 0; goto _out } - case 100: -//line plugins/parsers/influx/machine.go.rl:37 - - err = ErrTagParse - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; ( m.cs) = 0; goto _out } - -//line plugins/parsers/influx/machine.go.rl:30 +//line plugins/parsers/influx/machine.go.rl:31 err = ErrFieldParse ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; ( m.cs) = 0; goto _out } -//line plugins/parsers/influx/machine.go.rl:44 + case 43, 90, 152: +//line plugins/parsers/influx/machine.go.rl:78 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; ( m.cs) = 0; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:38 + + err = ErrTagParse + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; ( m.cs) = 0; goto _out } + +//line plugins/parsers/influx/machine.go.rl:31 + + err = ErrFieldParse + ( m.p)-- + + ( m.cs) = 258; + {( m.p)++; ( m.cs) = 0; goto _out } + +//line plugins/parsers/influx/machine.go.rl:45 err = ErrTimestampParse ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; ( m.cs) = 0; goto _out } - case 12, 13, 27, 28, 30, 31, 42, 43, 55, 56, 57, 58, 73, 92, 93, 95, 97, 140, 141, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 155, 156, 158, 159, 160, 161, 162, 163, 164, 165, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230: -//line plugins/parsers/influx/machine.go.rl:77 + case 62, 106, 126: +//line plugins/parsers/influx/machine.go.rl:91 - err = m.handler.SetMeasurement(m.text()) + err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; ( m.cs) = 0; goto _out } } -//line plugins/parsers/influx/machine.go.rl:37 +//line plugins/parsers/influx/machine.go.rl:38 err = ErrTagParse ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; ( m.cs) = 0; goto _out } -//line plugins/parsers/influx/machine.go.rl:30 +//line plugins/parsers/influx/machine.go.rl:31 err = ErrFieldParse ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; ( m.cs) = 0; goto _out } - case 18, 19, 20, 22, 48, 49, 65, 66, 67, 68, 70, 81, 82, 83, 84, 86, 88, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 125, 126, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198: -//line plugins/parsers/influx/machine.go.rl:90 - - err = m.handler.AddTag(key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; ( m.cs) = 0; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:37 - - err = ErrTagParse - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; ( m.cs) = 0; goto _out } - -//line plugins/parsers/influx/machine.go.rl:30 - - err = ErrFieldParse - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; ( m.cs) = 0; goto _out } - - case 40, 167, 169, 170, 199, 200, 231, 232: -//line plugins/parsers/influx/machine.go.rl:23 - - err = ErrNameParse - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; ( m.cs) = 0; goto _out } - -//line plugins/parsers/influx/machine.go.rl:77 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; ( m.cs) = 0; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:37 - - err = ErrTagParse - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; ( m.cs) = 0; goto _out } - -//line plugins/parsers/influx/machine.go.rl:30 - - err = ErrFieldParse - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; ( m.cs) = 0; goto _out } - - case 44, 91, 153: -//line plugins/parsers/influx/machine.go.rl:77 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; ( m.cs) = 0; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:37 - - err = ErrTagParse - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; ( m.cs) = 0; goto _out } - -//line plugins/parsers/influx/machine.go.rl:30 - - err = ErrFieldParse - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; ( m.cs) = 0; goto _out } - -//line plugins/parsers/influx/machine.go.rl:44 +//line plugins/parsers/influx/machine.go.rl:45 err = ErrTimestampParse ( m.p)-- - ( m.cs) = 247; + ( m.cs) = 258; {( m.p)++; ( m.cs) = 0; goto _out } - case 63, 107, 127: -//line plugins/parsers/influx/machine.go.rl:90 - - err = m.handler.AddTag(key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; ( m.cs) = 0; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:37 - - err = ErrTagParse - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; ( m.cs) = 0; goto _out } - -//line plugins/parsers/influx/machine.go.rl:30 - - err = ErrFieldParse - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; ( m.cs) = 0; goto _out } - -//line plugins/parsers/influx/machine.go.rl:44 - - err = ErrTimestampParse - ( m.p)-- - - ( m.cs) = 247; - {( m.p)++; ( m.cs) = 0; goto _out } - -//line plugins/parsers/influx/machine.go:30741 +//line plugins/parsers/influx/machine.go:31897 } } _out: {} } -//line plugins/parsers/influx/machine.go.rl:390 +//line plugins/parsers/influx/machine.go.rl:407 if err != nil { return err @@ -30763,7 +31919,7 @@ tr425: // // Otherwise we have successfully parsed a metric line, so if we are at // the EOF we will report it the next call. - if !foundMetric && m.p == m.pe && m.pe == m.eof { + if !m.beginMetric && m.p == m.pe && m.pe == m.eof { return EOF } @@ -30795,3 +31951,96 @@ func (m *machine) Column() int { func (m *machine) text() []byte { return m.data[m.pb:m.p] } + +type streamMachine struct { + machine *machine + reader io.Reader +} + +func NewStreamMachine(r io.Reader, handler Handler) *streamMachine { + m := &streamMachine{ + machine: NewMachine(handler), + reader: r, + } + + m.machine.SetData(make([]byte, 1024)) + m.machine.pe = 0 + m.machine.eof = -1 + return m +} + +func (m *streamMachine) Next() error { + // Check if we are already at EOF, this should only happen if called again + // after already returning EOF. + if m.machine.p == m.machine.pe && m.machine.pe == m.machine.eof { + return EOF + } + + copy(m.machine.data, m.machine.data[m.machine.p:]) + m.machine.pe = m.machine.pe - m.machine.p + m.machine.sol = m.machine.sol - m.machine.p + m.machine.pb = 0 + m.machine.p = 0 + m.machine.eof = -1 + + m.machine.key = nil + m.machine.beginMetric = false + m.machine.finishMetric = false + + for { + // Expand the buffer if it is full + if m.machine.pe == len(m.machine.data) { + expanded := make([]byte, 2 * len(m.machine.data)) + copy(expanded, m.machine.data) + m.machine.data = expanded + } + + n, err := m.reader.Read(m.machine.data[m.machine.pe:]) + if n == 0 && err == io.EOF { + m.machine.eof = m.machine.pe + } else if err != nil && err != io.EOF { + return err + } + + m.machine.pe += n + + err = m.machine.exec() + if err != nil { + return err + } + + // If we have successfully parsed a full metric line break out + if m.machine.finishMetric { + break + } + + } + + return nil +} + +// Position returns the current byte offset into the data. +func (m *streamMachine) Position() int { + return m.machine.Position() +} + +// LineOffset returns the byte offset of the current line. +func (m *streamMachine) LineOffset() int { + return m.machine.LineOffset() +} + +// LineNumber returns the current line number. Lines are counted based on the +// regular expression `\r?\n`. +func (m *streamMachine) LineNumber() int { + return m.machine.LineNumber() +} + +// Column returns the current column. +func (m *streamMachine) Column() int { + return m.machine.Column() +} + +// LineText returns the text of the current line that has been parsed so far. +func (m *streamMachine) LineText() string { + return string(m.machine.data[0:m.machine.p]) +} diff --git a/plugins/parsers/influx/machine.go.rl b/plugins/parsers/influx/machine.go.rl index 52b32b2b8..54cf67fba 100644 --- a/plugins/parsers/influx/machine.go.rl +++ b/plugins/parsers/influx/machine.go.rl @@ -2,6 +2,7 @@ package influx import ( "errors" + "io" ) var ( @@ -70,8 +71,8 @@ action goto_align { fgoto align; } -action found_metric { - foundMetric = true +action begin_metric { + m.beginMetric = true } action name { @@ -84,11 +85,11 @@ action name { } action tagkey { - key = m.text() + m.key = m.text() } action tagvalue { - err = m.handler.AddTag(key, m.text()) + err = m.handler.AddTag(m.key, m.text()) if err != nil { fhold; fnext discard_line; @@ -97,11 +98,11 @@ action tagvalue { } action fieldkey { - key = m.text() + m.key = m.text() } action integer { - err = m.handler.AddInt(key, m.text()) + err = m.handler.AddInt(m.key, m.text()) if err != nil { fhold; fnext discard_line; @@ -110,7 +111,7 @@ action integer { } action unsigned { - err = m.handler.AddUint(key, m.text()) + err = m.handler.AddUint(m.key, m.text()) if err != nil { fhold; fnext discard_line; @@ -119,7 +120,7 @@ action unsigned { } action float { - err = m.handler.AddFloat(key, m.text()) + err = m.handler.AddFloat(m.key, m.text()) if err != nil { fhold; fnext discard_line; @@ -128,7 +129,7 @@ action float { } action bool { - err = m.handler.AddBool(key, m.text()) + err = m.handler.AddBool(m.key, m.text()) if err != nil { fhold; fnext discard_line; @@ -137,7 +138,7 @@ action bool { } action string { - err = m.handler.AddString(key, m.text()) + err = m.handler.AddString(m.key, m.text()) if err != nil { fhold; fnext discard_line; @@ -161,15 +162,20 @@ action incr_newline { } action eol { + m.finishMetric = true fnext align; fbreak; } +action finish_metric { + m.finishMetric = true +} + ws = [\t\v\f ]; newline = - '\r'? '\n' %to(incr_newline); + '\r'? '\n' >incr_newline; non_zero_digit = [1-9]; @@ -273,7 +279,7 @@ line_without_term = main := (line_with_term* (line_with_term | line_without_term?) - ) >found_metric + ) >begin_metric %eof(finish_metric) ; # The discard_line machine discards the current line. Useful for recovering @@ -299,7 +305,7 @@ align := # Series is a machine for matching measurement+tagset series := (measurement >err(name_error) tagset eol_break?) - >found_metric + >begin_metric ; }%% @@ -317,14 +323,17 @@ type Handler interface { } type machine struct { - data []byte - cs int - p, pe, eof int - pb int - lineno int - sol int - handler Handler - initState int + data []byte + cs int + p, pe, eof int + pb int + lineno int + sol int + handler Handler + initState int + key []byte + beginMetric bool + finishMetric bool } func NewMachine(handler Handler) *machine { @@ -368,6 +377,9 @@ func (m *machine) SetData(data []byte) { m.sol = 0 m.pe = len(data) m.eof = len(data) + m.key = nil + m.beginMetric = false + m.finishMetric = false %% write init; m.cs = m.initState @@ -382,10 +394,15 @@ func (m *machine) Next() error { return EOF } - var err error - var key []byte - foundMetric := false + m.key = nil + m.beginMetric = false + m.finishMetric = false + return m.exec() +} + +func (m *machine) exec() error { + var err error %% write exec; if err != nil { @@ -405,7 +422,7 @@ func (m *machine) Next() error { // // Otherwise we have successfully parsed a metric line, so if we are at // the EOF we will report it the next call. - if !foundMetric && m.p == m.pe && m.pe == m.eof { + if !m.beginMetric && m.p == m.pe && m.pe == m.eof { return EOF } @@ -437,3 +454,96 @@ func (m *machine) Column() int { func (m *machine) text() []byte { return m.data[m.pb:m.p] } + +type streamMachine struct { + machine *machine + reader io.Reader +} + +func NewStreamMachine(r io.Reader, handler Handler) *streamMachine { + m := &streamMachine{ + machine: NewMachine(handler), + reader: r, + } + + m.machine.SetData(make([]byte, 1024)) + m.machine.pe = 0 + m.machine.eof = -1 + return m +} + +func (m *streamMachine) Next() error { + // Check if we are already at EOF, this should only happen if called again + // after already returning EOF. + if m.machine.p == m.machine.pe && m.machine.pe == m.machine.eof { + return EOF + } + + copy(m.machine.data, m.machine.data[m.machine.p:]) + m.machine.pe = m.machine.pe - m.machine.p + m.machine.sol = m.machine.sol - m.machine.p + m.machine.pb = 0 + m.machine.p = 0 + m.machine.eof = -1 + + m.machine.key = nil + m.machine.beginMetric = false + m.machine.finishMetric = false + + for { + // Expand the buffer if it is full + if m.machine.pe == len(m.machine.data) { + expanded := make([]byte, 2 * len(m.machine.data)) + copy(expanded, m.machine.data) + m.machine.data = expanded + } + + n, err := m.reader.Read(m.machine.data[m.machine.pe:]) + if n == 0 && err == io.EOF { + m.machine.eof = m.machine.pe + } else if err != nil && err != io.EOF { + return err + } + + m.machine.pe += n + + err = m.machine.exec() + if err != nil { + return err + } + + // If we have successfully parsed a full metric line break out + if m.machine.finishMetric { + break + } + + } + + return nil +} + +// Position returns the current byte offset into the data. +func (m *streamMachine) Position() int { + return m.machine.Position() +} + +// LineOffset returns the byte offset of the current line. +func (m *streamMachine) LineOffset() int { + return m.machine.LineOffset() +} + +// LineNumber returns the current line number. Lines are counted based on the +// regular expression `\r?\n`. +func (m *streamMachine) LineNumber() int { + return m.machine.LineNumber() +} + +// Column returns the current column. +func (m *streamMachine) Column() int { + return m.machine.Column() +} + +// LineText returns the text of the current line that has been parsed so far. +func (m *streamMachine) LineText() string { + return string(m.machine.data[0:m.machine.p]) +} diff --git a/plugins/parsers/influx/machine_test.go b/plugins/parsers/influx/machine_test.go index 725634ae8..4ba3b8d68 100644 --- a/plugins/parsers/influx/machine_test.go +++ b/plugins/parsers/influx/machine_test.go @@ -1,8 +1,10 @@ package influx_test import ( + "bytes" "errors" "fmt" + "io" "testing" "github.com/influxdata/telegraf/plugins/parsers/influx" @@ -14,41 +16,59 @@ type TestingHandler struct { } func (h *TestingHandler) SetMeasurement(name []byte) error { + n := make([]byte, len(name)) + copy(n, name) + mname := Result{ Name: Measurement, - Value: name, + Value: n, } h.results = append(h.results, mname) return nil } func (h *TestingHandler) AddTag(key []byte, value []byte) error { + k := make([]byte, len(key)) + copy(k, key) + v := make([]byte, len(value)) + copy(v, value) + tagkey := Result{ Name: TagKey, - Value: key, + Value: k, } tagvalue := Result{ Name: TagValue, - Value: value, + Value: v, } h.results = append(h.results, tagkey, tagvalue) return nil } func (h *TestingHandler) AddInt(key []byte, value []byte) error { + k := make([]byte, len(key)) + copy(k, key) + v := make([]byte, len(value)) + copy(v, value) + fieldkey := Result{ Name: FieldKey, - Value: key, + Value: k, } fieldvalue := Result{ Name: FieldInt, - Value: value, + Value: v, } h.results = append(h.results, fieldkey, fieldvalue) return nil } func (h *TestingHandler) AddUint(key []byte, value []byte) error { + k := make([]byte, len(key)) + copy(k, key) + v := make([]byte, len(value)) + copy(v, value) + fieldkey := Result{ Name: FieldKey, Value: key, @@ -62,48 +82,66 @@ func (h *TestingHandler) AddUint(key []byte, value []byte) error { } func (h *TestingHandler) AddFloat(key []byte, value []byte) error { + k := make([]byte, len(key)) + copy(k, key) + v := make([]byte, len(value)) + copy(v, value) + fieldkey := Result{ Name: FieldKey, - Value: key, + Value: k, } fieldvalue := Result{ Name: FieldFloat, - Value: value, + Value: v, } h.results = append(h.results, fieldkey, fieldvalue) return nil } func (h *TestingHandler) AddString(key []byte, value []byte) error { + k := make([]byte, len(key)) + copy(k, key) + v := make([]byte, len(value)) + copy(v, value) + fieldkey := Result{ Name: FieldKey, - Value: key, + Value: k, } fieldvalue := Result{ Name: FieldString, - Value: value, + Value: v, } h.results = append(h.results, fieldkey, fieldvalue) return nil } func (h *TestingHandler) AddBool(key []byte, value []byte) error { + k := make([]byte, len(key)) + copy(k, key) + v := make([]byte, len(value)) + copy(v, value) + fieldkey := Result{ Name: FieldKey, - Value: key, + Value: k, } fieldvalue := Result{ Name: FieldBool, - Value: value, + Value: v, } h.results = append(h.results, fieldkey, fieldvalue) return nil } func (h *TestingHandler) SetTimestamp(tm []byte) error { + t := make([]byte, len(tm)) + copy(t, tm) + timestamp := Result{ Name: Timestamp, - Value: tm, + Value: t, } h.results = append(h.results, timestamp) return nil @@ -1676,63 +1714,64 @@ func TestMachine(t *testing.T) { } } +var positionTests = []struct { + name string + input []byte + lineno int + column int +}{ + { + name: "empty string", + input: []byte(""), + lineno: 1, + column: 1, + }, + { + name: "minimal", + input: []byte("cpu value=42"), + lineno: 1, + column: 13, + }, + { + name: "one newline", + input: []byte("cpu value=42\ncpu value=42"), + lineno: 2, + column: 13, + }, + { + name: "several newlines", + input: []byte("cpu value=42\n\n\n"), + lineno: 4, + column: 1, + }, + { + name: "error on second line", + input: []byte("cpu value=42\ncpu value=invalid"), + lineno: 2, + column: 11, + }, + { + name: "error after comment line", + input: []byte("cpu value=42\n# comment\ncpu value=invalid"), + lineno: 3, + column: 11, + }, + { + name: "dos line endings", + input: []byte("cpu value=42\r\ncpu value=invalid"), + lineno: 2, + column: 11, + }, + { + name: "mac line endings not supported", + input: []byte("cpu value=42\rcpu value=invalid"), + lineno: 1, + column: 14, + }, +} + func TestMachinePosition(t *testing.T) { - var tests = []struct { - name string - input []byte - lineno int - column int - }{ - { - name: "empty string", - input: []byte(""), - lineno: 1, - column: 1, - }, - { - name: "minimal", - input: []byte("cpu value=42"), - lineno: 1, - column: 13, - }, - { - name: "one newline", - input: []byte("cpu value=42\ncpu value=42"), - lineno: 2, - column: 13, - }, - { - name: "several newlines", - input: []byte("cpu value=42\n\n\n"), - lineno: 4, - column: 1, - }, - { - name: "error on second line", - input: []byte("cpu value=42\ncpu value=invalid"), - lineno: 2, - column: 11, - }, - { - name: "error after comment line", - input: []byte("cpu value=42\n# comment\ncpu value=invalid"), - lineno: 3, - column: 11, - }, - { - name: "dos line endings", - input: []byte("cpu value=42\r\ncpu value=invalid"), - lineno: 2, - column: 11, - }, - { - name: "mac line endings not supported", - input: []byte("cpu value=42\rcpu value=invalid"), - lineno: 1, - column: 14, - }, - } - for _, tt := range tests { + for _, tt := range positionTests { t.Run(tt.name, func(t *testing.T) { handler := &TestingHandler{} fsm := influx.NewMachine(handler) @@ -1932,135 +1971,136 @@ func (h *MockHandler) SetTimestamp(tm []byte) error { return h.SetTimestampF(tm) } +var errorRecoveryTests = []struct { + name string + input []byte + handler *MockHandler + results []Result +}{ + { + name: "integer", + input: []byte("cpu value=43i\ncpu value=42i"), + handler: &MockHandler{ + SetMeasurementF: func(name []byte) error { + return nil + }, + AddIntF: func(name, value []byte) error { + if string(value) != "42i" { + return errors.New("handler error") + } + return nil + }, + }, + results: []Result{ + { + Name: Measurement, + Value: []byte("cpu"), + }, + { + Name: Error, + err: errors.New("handler error"), + }, + { + Name: Measurement, + Value: []byte("cpu"), + }, + { + Name: FieldKey, + Value: []byte("value"), + }, + { + Name: FieldInt, + Value: []byte("42i"), + }, + { + Name: Success, + }, + }, + }, + { + name: "integer with timestamp", + input: []byte("cpu value=43i 1516241192000000000\ncpu value=42i"), + handler: &MockHandler{ + SetMeasurementF: func(name []byte) error { + return nil + }, + AddIntF: func(name, value []byte) error { + if string(value) != "42i" { + return errors.New("handler error") + } + return nil + }, + }, + results: []Result{ + { + Name: Measurement, + Value: []byte("cpu"), + }, + { + Name: Error, + err: errors.New("handler error"), + }, + { + Name: Measurement, + Value: []byte("cpu"), + }, + { + Name: FieldKey, + Value: []byte("value"), + }, + { + Name: FieldInt, + Value: []byte("42i"), + }, + { + Name: Success, + }, + }, + }, + { + name: "unsigned", + input: []byte("cpu value=43u\ncpu value=42u"), + handler: &MockHandler{ + SetMeasurementF: func(name []byte) error { + return nil + }, + AddUintF: func(name, value []byte) error { + if string(value) != "42u" { + return errors.New("handler error") + } + return nil + }, + }, + results: []Result{ + { + Name: Measurement, + Value: []byte("cpu"), + }, + { + Name: Error, + err: errors.New("handler error"), + }, + { + Name: Measurement, + Value: []byte("cpu"), + }, + { + Name: FieldKey, + Value: []byte("value"), + }, + { + Name: FieldUint, + Value: []byte("42u"), + }, + { + Name: Success, + }, + }, + }, +} + func TestHandlerErrorRecovery(t *testing.T) { - var tests = []struct { - name string - input []byte - handler *MockHandler - results []Result - }{ - { - name: "integer", - input: []byte("cpu value=43i\ncpu value=42i"), - handler: &MockHandler{ - SetMeasurementF: func(name []byte) error { - return nil - }, - AddIntF: func(name, value []byte) error { - if string(value) != "42i" { - return errors.New("handler error") - } - return nil - }, - }, - results: []Result{ - { - Name: Measurement, - Value: []byte("cpu"), - }, - { - Name: Error, - err: errors.New("handler error"), - }, - { - Name: Measurement, - Value: []byte("cpu"), - }, - { - Name: FieldKey, - Value: []byte("value"), - }, - { - Name: FieldInt, - Value: []byte("42i"), - }, - { - Name: Success, - }, - }, - }, - { - name: "integer with timestamp", - input: []byte("cpu value=43i 1516241192000000000\ncpu value=42i"), - handler: &MockHandler{ - SetMeasurementF: func(name []byte) error { - return nil - }, - AddIntF: func(name, value []byte) error { - if string(value) != "42i" { - return errors.New("handler error") - } - return nil - }, - }, - results: []Result{ - { - Name: Measurement, - Value: []byte("cpu"), - }, - { - Name: Error, - err: errors.New("handler error"), - }, - { - Name: Measurement, - Value: []byte("cpu"), - }, - { - Name: FieldKey, - Value: []byte("value"), - }, - { - Name: FieldInt, - Value: []byte("42i"), - }, - { - Name: Success, - }, - }, - }, - { - name: "unsigned", - input: []byte("cpu value=43u\ncpu value=42u"), - handler: &MockHandler{ - SetMeasurementF: func(name []byte) error { - return nil - }, - AddUintF: func(name, value []byte) error { - if string(value) != "42u" { - return errors.New("handler error") - } - return nil - }, - }, - results: []Result{ - { - Name: Measurement, - Value: []byte("cpu"), - }, - { - Name: Error, - err: errors.New("handler error"), - }, - { - Name: Measurement, - Value: []byte("cpu"), - }, - { - Name: FieldKey, - Value: []byte("value"), - }, - { - Name: FieldUint, - Value: []byte("42u"), - }, - { - Name: Success, - }, - }, - }, - } - for _, tt := range tests { + for _, tt := range errorRecoveryTests { t.Run(tt.name, func(t *testing.T) { fsm := influx.NewMachine(tt.handler) fsm.SetData(tt.input) @@ -2078,3 +2118,79 @@ func TestHandlerErrorRecovery(t *testing.T) { }) } } + +func TestStreamMachine(t *testing.T) { + type testcase struct { + name string + input io.Reader + results []Result + err error + } + + var tc []testcase + for _, tt := range tests { + tc = append(tc, testcase{ + name: tt.name, + input: bytes.NewBuffer([]byte(tt.input)), + results: tt.results, + err: tt.err, + }) + } + + for _, tt := range tc { + t.Run(tt.name, func(t *testing.T) { + handler := &TestingHandler{} + fsm := influx.NewStreamMachine(tt.input, handler) + + // Parse only up to 20 metrics; to avoid any bugs where the parser + // isn't terminated. + for i := 0; i < 20; i++ { + err := fsm.Next() + if err != nil && err == influx.EOF { + break + } + handler.Result(err) + } + + results := handler.Results() + require.Equal(t, tt.results, results) + }) + } +} + +func TestStreamMachinePosition(t *testing.T) { + type testcase struct { + name string + input io.Reader + lineno int + column int + } + + var tc []testcase + for _, tt := range positionTests { + tc = append(tc, testcase{ + name: tt.name, + input: bytes.NewBuffer([]byte(tt.input)), + lineno: tt.lineno, + column: tt.column, + }) + } + + for _, tt := range tc { + t.Run(tt.name, func(t *testing.T) { + handler := &TestingHandler{} + fsm := influx.NewStreamMachine(tt.input, handler) + + // Parse until an error or eof + for i := 0; i < 20; i++ { + err := fsm.Next() + if err != nil { + break + } + } + + require.Equal(t, tt.lineno, fsm.LineNumber(), "lineno") + require.Equal(t, tt.column, fsm.Column(), "column") + }) + } +} diff --git a/plugins/parsers/influx/parser.go b/plugins/parsers/influx/parser.go index f1cd9a032..1723cde33 100644 --- a/plugins/parsers/influx/parser.go +++ b/plugins/parsers/influx/parser.go @@ -3,8 +3,10 @@ package influx import ( "errors" "fmt" + "io" "strings" "sync" + "time" "github.com/influxdata/telegraf" ) @@ -17,6 +19,9 @@ var ( ErrNoMetric = errors.New("no metric in line") ) +type TimeFunc func() time.Time + +// ParseError indicates a error in the parsing of the text. type ParseError struct { Offset int LineOffset int @@ -38,6 +43,8 @@ func (e *ParseError) Error() string { return fmt.Sprintf("metric parse error: %s at %d:%d: %q", e.msg, e.LineNumber, e.Column, buffer) } +// Parser is an InfluxDB Line Protocol parser that implements the +// parsers.Parser interface. type Parser struct { DefaultTags map[string]string @@ -62,6 +69,10 @@ func NewSeriesParser(handler *MetricHandler) *Parser { } } +func (h *Parser) SetTimeFunc(f TimeFunc) { + h.handler.SetTimeFunc(f) +} + func (p *Parser) Parse(input []byte) ([]telegraf.Metric, error) { p.Lock() defer p.Unlock() @@ -75,7 +86,6 @@ func (p *Parser) Parse(input []byte) ([]telegraf.Metric, error) { } if err != nil { - p.handler.Reset() return nil, &ParseError{ Offset: p.machine.Position(), LineOffset: p.machine.LineOffset(), @@ -88,7 +98,6 @@ func (p *Parser) Parse(input []byte) ([]telegraf.Metric, error) { metric, err := p.handler.Metric() if err != nil { - p.handler.Reset() return nil, err } @@ -126,10 +135,93 @@ func (p *Parser) applyDefaultTags(metrics []telegraf.Metric) { } for _, m := range metrics { - for k, v := range p.DefaultTags { - if !m.HasTag(k) { - m.AddTag(k, v) - } + p.applyDefaultTagsSingle(m) + } +} + +func (p *Parser) applyDefaultTagsSingle(metric telegraf.Metric) { + for k, v := range p.DefaultTags { + if !metric.HasTag(k) { + metric.AddTag(k, v) } } } + +// StreamParser is an InfluxDB Line Protocol parser. It is not safe for +// concurrent use in multiple goroutines. +type StreamParser struct { + machine *streamMachine + handler *MetricHandler +} + +func NewStreamParser(r io.Reader) *StreamParser { + handler := NewMetricHandler() + return &StreamParser{ + machine: NewStreamMachine(r, handler), + handler: handler, + } +} + +// SetTimeFunc changes the function used to determine the time of metrics +// without a timestamp. The default TimeFunc is time.Now. Useful mostly for +// testing, or perhaps if you want all metrics to have the same timestamp. +func (h *StreamParser) SetTimeFunc(f TimeFunc) { + h.handler.SetTimeFunc(f) +} + +func (h *StreamParser) SetTimePrecision(u time.Duration) { + h.handler.SetTimePrecision(u) +} + +// Next parses the next item from the stream. You can repeat calls to this +// function until it returns EOF. +func (p *StreamParser) Next() (telegraf.Metric, error) { + err := p.machine.Next() + if err == EOF { + return nil, EOF + } + + if err != nil { + return nil, &ParseError{ + Offset: p.machine.Position(), + LineOffset: p.machine.LineOffset(), + LineNumber: p.machine.LineNumber(), + Column: p.machine.Column(), + msg: err.Error(), + buf: p.machine.LineText(), + } + } + + metric, err := p.handler.Metric() + if err != nil { + return nil, err + } + + return metric, nil +} + +// Position returns the current byte offset into the data. +func (p *StreamParser) Position() int { + return p.machine.Position() +} + +// LineOffset returns the byte offset of the current line. +func (p *StreamParser) LineOffset() int { + return p.machine.LineOffset() +} + +// LineNumber returns the current line number. Lines are counted based on the +// regular expression `\r?\n`. +func (p *StreamParser) LineNumber() int { + return p.machine.LineNumber() +} + +// Column returns the current column. +func (p *StreamParser) Column() int { + return p.machine.Column() +} + +// LineText returns the text of the current line that has been parsed so far. +func (p *StreamParser) LineText() string { + return p.machine.LineText() +} diff --git a/plugins/parsers/influx/parser_test.go b/plugins/parsers/influx/parser_test.go index 50ab1e10f..386b99724 100644 --- a/plugins/parsers/influx/parser_test.go +++ b/plugins/parsers/influx/parser_test.go @@ -1,6 +1,7 @@ package influx import ( + "bytes" "strconv" "strings" "testing" @@ -8,6 +9,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) @@ -23,12 +25,11 @@ var DefaultTime = func() time.Time { } var ptests = []struct { - name string - input []byte - timeFunc func() time.Time - precision time.Duration - metrics []telegraf.Metric - err error + name string + input []byte + timeFunc func() time.Time + metrics []telegraf.Metric + err error }{ { name: "minimal", @@ -495,7 +496,7 @@ var ptests = []struct { err: nil, }, { - name: "no timestamp full precision", + name: "no timestamp", input: []byte("cpu value=42"), timeFunc: func() time.Time { return time.Unix(42, 123456789) @@ -514,27 +515,6 @@ var ptests = []struct { }, err: nil, }, - { - name: "no timestamp partial precision", - input: []byte("cpu value=42"), - timeFunc: func() time.Time { - return time.Unix(42, 123456789) - }, - precision: 1 * time.Millisecond, - metrics: []telegraf.Metric{ - Metric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "value": 42.0, - }, - time.Unix(42, 123000000), - ), - ), - }, - err: nil, - }, { name: "multiple lines", input: []byte("cpu value=42\ncpu value=42"), @@ -651,14 +631,11 @@ func TestParser(t *testing.T) { for _, tt := range ptests { t.Run(tt.name, func(t *testing.T) { handler := NewMetricHandler() - handler.SetTimeFunc(DefaultTime) - if tt.timeFunc != nil { - handler.SetTimeFunc(tt.timeFunc) - } - if tt.precision > 0 { - handler.SetTimePrecision(tt.precision) - } parser := NewParser(handler) + parser.SetTimeFunc(DefaultTime) + if tt.timeFunc != nil { + parser.SetTimeFunc(tt.timeFunc) + } metrics, err := parser.Parse(tt.input) require.Equal(t, tt.err, err) @@ -688,14 +665,41 @@ func BenchmarkParser(b *testing.B) { } } +func TestStreamParser(t *testing.T) { + for _, tt := range ptests { + t.Run(tt.name, func(t *testing.T) { + r := bytes.NewBuffer(tt.input) + parser := NewStreamParser(r) + parser.SetTimeFunc(DefaultTime) + if tt.timeFunc != nil { + parser.SetTimeFunc(tt.timeFunc) + } + + var i int + for { + m, err := parser.Next() + if err != nil { + if err == EOF { + break + } + require.Equal(t, tt.err, err) + break + } + + testutil.RequireMetricEqual(t, tt.metrics[i], m) + i++ + } + }) + } +} + func TestSeriesParser(t *testing.T) { var tests = []struct { - name string - input []byte - timeFunc func() time.Time - precision time.Duration - metrics []telegraf.Metric - err error + name string + input []byte + timeFunc func() time.Time + metrics []telegraf.Metric + err error }{ { name: "empty", @@ -749,14 +753,10 @@ func TestSeriesParser(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { handler := NewMetricHandler() - handler.SetTimeFunc(DefaultTime) - if tt.timeFunc != nil { - handler.SetTimeFunc(tt.timeFunc) - } - if tt.precision > 0 { - handler.SetTimePrecision(tt.precision) - } parser := NewSeriesParser(handler) + if tt.timeFunc != nil { + parser.SetTimeFunc(tt.timeFunc) + } metrics, err := parser.Parse(tt.input) require.Equal(t, tt.err, err) @@ -791,6 +791,11 @@ func TestParserErrorString(t *testing.T) { input: []byte("cpu " + strings.Repeat("ab", maxErrorBufferSize) + "=invalid\ncpu value=42"), errString: "metric parse error: expected field at 1:2054: \"cpu " + strings.Repeat("ab", maxErrorBufferSize)[:maxErrorBufferSize-4] + "...\"", }, + { + name: "multiple line error", + input: []byte("cpu value=42\ncpu value=invalid\ncpu value=42\ncpu value=invalid"), + errString: `metric parse error: expected field at 2:11: "cpu value=invalid"`, + }, } for _, tt := range ptests { @@ -803,3 +808,64 @@ func TestParserErrorString(t *testing.T) { }) } } + +func TestStreamParserErrorString(t *testing.T) { + var ptests = []struct { + name string + input []byte + errs []string + }{ + { + name: "multiple line error", + input: []byte("cpu value=42\ncpu value=invalid\ncpu value=42"), + errs: []string{ + `metric parse error: expected field at 2:11: "cpu value="`, + }, + }, + { + name: "handler error", + input: []byte("cpu value=9223372036854775808i\ncpu value=42"), + errs: []string{ + `metric parse error: value out of range at 1:31: "cpu value=9223372036854775808i"`, + }, + }, + { + name: "buffer too long", + input: []byte("cpu " + strings.Repeat("ab", maxErrorBufferSize) + "=invalid\ncpu value=42"), + errs: []string{ + "metric parse error: expected field at 1:2054: \"cpu " + strings.Repeat("ab", maxErrorBufferSize)[:maxErrorBufferSize-4] + "...\"", + }, + }, + { + name: "multiple errors", + input: []byte("foo value=1asdf2.0\nfoo value=2.0\nfoo value=3asdf2.0\nfoo value=4.0"), + errs: []string{ + `metric parse error: expected field at 1:12: "foo value=1"`, + `metric parse error: expected field at 3:12: "foo value=3"`, + }, + }, + } + + for _, tt := range ptests { + t.Run(tt.name, func(t *testing.T) { + parser := NewStreamParser(bytes.NewBuffer(tt.input)) + + var errs []error + for i := 0; i < 20; i++ { + _, err := parser.Next() + if err == EOF { + break + } + + if err != nil { + errs = append(errs, err) + } + } + + require.Equal(t, len(tt.errs), len(errs)) + for i, err := range errs { + require.Equal(t, tt.errs[i], err.Error()) + } + }) + } +} From b6892378a0dce0571d0ca748e2a17eedfc24ba01 Mon Sep 17 00:00:00 2001 From: David Reimschussel Date: Wed, 4 Mar 2020 11:30:07 -0700 Subject: [PATCH 1575/1815] Update changelog --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index fd54b4c82..0540debfa 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -61,6 +61,8 @@ - [#6397](https://github.com/influxdata/telegraf/issues/6397): Fix conversion to floats in AzureDBResourceStats query in the sqlserver input. - [#6867](https://github.com/influxdata/telegraf/issues/6867): Fix case sensitive collation in sqlserver input. - [#7005](https://github.com/influxdata/telegraf/pull/7005): Search for chronyc only when chrony input plugin is enabled. +- [#2280](https://github.com/influxdata/telegraf/issues/2280): Fix request to InfluxDB Listener failing with EOF. +- [#6124](https://github.com/influxdata/telegraf/issues/6124): Fix InfluxDB listener to continue parsing after error. ## v1.13.4 [2020-02-25] From e4caa347a28132f55e784068aee9af6680204ae6 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 4 Mar 2020 14:20:46 -0800 Subject: [PATCH 1576/1815] Add ClickHouse input plugin (#6441) --- .gitignore | 2 +- plugins/inputs/all/all.go | 1 + plugins/inputs/clickhouse/README.md | 119 ++++++ plugins/inputs/clickhouse/clickhouse.go | 390 ++++++++++++++++++ .../inputs/clickhouse/clickhouse_go1.11.go | 6 + .../inputs/clickhouse/clickhouse_go1.12.go | 8 + plugins/inputs/clickhouse/clickhouse_test.go | 161 ++++++++ plugins/inputs/clickhouse/dev/dhparam.pem | 13 + .../inputs/clickhouse/dev/docker-compose.yml | 16 + plugins/inputs/clickhouse/dev/telegraf.conf | 12 + .../inputs/clickhouse/dev/telegraf_ssl.conf | 16 + .../inputs/clickhouse/dev/tls_settings.xml | 4 + 12 files changed, 747 insertions(+), 1 deletion(-) create mode 100644 plugins/inputs/clickhouse/README.md create mode 100644 plugins/inputs/clickhouse/clickhouse.go create mode 100644 plugins/inputs/clickhouse/clickhouse_go1.11.go create mode 100644 plugins/inputs/clickhouse/clickhouse_go1.12.go create mode 100644 plugins/inputs/clickhouse/clickhouse_test.go create mode 100644 plugins/inputs/clickhouse/dev/dhparam.pem create mode 100644 plugins/inputs/clickhouse/dev/docker-compose.yml create mode 100644 plugins/inputs/clickhouse/dev/telegraf.conf create mode 100644 plugins/inputs/clickhouse/dev/telegraf_ssl.conf create mode 100644 plugins/inputs/clickhouse/dev/tls_settings.xml diff --git a/.gitignore b/.gitignore index 4176a0413..0ae500592 100644 --- a/.gitignore +++ b/.gitignore @@ -2,4 +2,4 @@ /telegraf /telegraf.exe /telegraf.gz -/vendor +/vendor \ No newline at end of file diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index 274d7fd41..2484df614 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -19,6 +19,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/chrony" _ "github.com/influxdata/telegraf/plugins/inputs/cisco_telemetry_gnmi" _ "github.com/influxdata/telegraf/plugins/inputs/cisco_telemetry_mdt" + _ "github.com/influxdata/telegraf/plugins/inputs/clickhouse" _ "github.com/influxdata/telegraf/plugins/inputs/cloud_pubsub" _ "github.com/influxdata/telegraf/plugins/inputs/cloud_pubsub_push" _ "github.com/influxdata/telegraf/plugins/inputs/cloudwatch" diff --git a/plugins/inputs/clickhouse/README.md b/plugins/inputs/clickhouse/README.md new file mode 100644 index 000000000..8eb478fbc --- /dev/null +++ b/plugins/inputs/clickhouse/README.md @@ -0,0 +1,119 @@ +# Telegraf Input Plugin: ClickHouse + +This plugin gathers the statistic data from [ClickHouse](https://github.com/ClickHouse/ClickHouse) server. + +### Configuration +```ini +# Read metrics from one or many ClickHouse servers +[[inputs.clickhouse]] + ## Username for authorization on ClickHouse server + ## example: user = "default" + user = "default" + + ## Password for authorization on ClickHouse server + ## example: password = "super_secret" + + ## HTTP(s) timeout while getting metrics values + ## The timeout includes connection time, any redirects, and reading the response body. + ## example: timeout = 1s + # timeout = 5s + + ## List of servers for metrics scraping + ## metrics scrape via HTTP(s) clickhouse interface + ## https://clickhouse.tech/docs/en/interfaces/http/ + ## example: servers = ["http://127.0.0.1:8123","https://custom-server.mdb.yandexcloud.net"] + servers = ["http://127.0.0.1:8123"] + + ## If "auto_discovery"" is "true" plugin tries to connect to all servers available in the cluster + ## with using same "user:password" described in "user" and "password" parameters + ## and get this server hostname list from "system.clusters" table + ## see + ## - https://clickhouse.tech/docs/en/operations/system_tables/#system-clusters + ## - https://clickhouse.tech/docs/en/operations/server_settings/settings/#server_settings_remote_servers + ## - https://clickhouse.tech/docs/en/operations/table_engines/distributed/ + ## - https://clickhouse.tech/docs/en/operations/table_engines/replication/#creating-replicated-tables + ## example: auto_discovery = false + # auto_discovery = true + + ## Filter cluster names in "system.clusters" when "auto_discovery" is "true" + ## when this filter present then "WHERE cluster IN (...)" filter will apply + ## please use only full cluster names here, regexp and glob filters is not allowed + ## for "/etc/clickhouse-server/config.d/remote.xml" + ## + ## + ## + ## + ## clickhouse-ru-1.local9000 + ## clickhouse-ru-2.local9000 + ## + ## + ## clickhouse-eu-1.local9000 + ## clickhouse-eu-2.local9000 + ## + ## + ## + ## + ## + ## + ## example: cluster_include = ["my-own-cluster"] + # cluster_include = [] + + ## Filter cluster names in "system.clusters" when "auto_discovery" is "true" + ## when this filter present then "WHERE cluster NOT IN (...)" filter will apply + ## example: cluster_exclude = ["my-internal-not-discovered-cluster"] + # cluster_exclude = [] + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false +``` + +### Metrics: +- clickhouse_events + - tags: + - hostname (ClickHouse server hostname) + - cluster (Name of the cluster [optional]) + - shard_num (Shard number in the cluster [optional]) + - fields: + - all rows from system.events, all metrics is COUNTER type, look https://clickhouse.tech/docs/en/operations/system_tables/#system_tables-events + +- clickhouse_metrics + - tags: + - hostname (ClickHouse server hostname) + - cluster (Name of the cluster [optional]) + - shard_num (Shard number in the cluster [optional]) + - fields: + - all rows from system.metrics, all metrics is GAUGE type, look https://clickhouse.tech/docs/en/operations/system_tables/#system_tables-metrics + +- clickhouse_asynchronous_metrics + - tags: + - hostname (ClickHouse server hostname) + - cluster (Name of the cluster [optional]) + - shard_num (Shard number in the cluster [optional]) + - fields: + - all rows from system.asynchronous_metrics, all metrics is GAUGE type, look https://clickhouse.tech/docs/en/operations/system_tables/#system_tables-asynchronous_metrics + +- clickhouse_tables + - tags: + - hostname (ClickHouse server hostname) + - table + - database + - cluster (Name of the cluster [optional]) + - shard_num (Shard number in the cluster [optional]) + - fields: + - bytes + - parts + - rows + +### Example Output + +``` +clickhouse_events,cluster=test_cluster_two_shards_localhost,host=kshvakov,hostname=localhost,shard_num=1 read_compressed_bytes=212i,arena_alloc_chunks=35i,function_execute=85i,merge_tree_data_writer_rows=3i,rw_lock_acquired_read_locks=421i,file_open=46i,io_buffer_alloc_bytes=86451985i,inserted_bytes=196i,regexp_created=3i,real_time_microseconds=116832i,query=23i,network_receive_elapsed_microseconds=268i,merge_tree_data_writer_compressed_bytes=1080i,arena_alloc_bytes=212992i,disk_write_elapsed_microseconds=556i,inserted_rows=3i,compressed_read_buffer_bytes=81i,read_buffer_from_file_descriptor_read_bytes=148i,write_buffer_from_file_descriptor_write=47i,merge_tree_data_writer_blocks=3i,soft_page_faults=896i,hard_page_faults=7i,select_query=21i,merge_tree_data_writer_uncompressed_bytes=196i,merge_tree_data_writer_blocks_already_sorted=3i,user_time_microseconds=40196i,compressed_read_buffer_blocks=5i,write_buffer_from_file_descriptor_write_bytes=3246i,io_buffer_allocs=296i,created_write_buffer_ordinary=12i,disk_read_elapsed_microseconds=59347044i,network_send_elapsed_microseconds=1538i,context_lock=1040i,insert_query=1i,system_time_microseconds=14582i,read_buffer_from_file_descriptor_read=3i 1569421000000000000 +clickhouse_asynchronous_metrics,cluster=test_cluster_two_shards_localhost,host=kshvakov,hostname=localhost,shard_num=1 jemalloc.metadata_thp=0i,replicas_max_relative_delay=0i,jemalloc.mapped=1803177984i,jemalloc.allocated=1724839256i,jemalloc.background_thread.run_interval=0i,jemalloc.background_thread.num_threads=0i,uncompressed_cache_cells=0i,replicas_max_absolute_delay=0i,mark_cache_bytes=0i,compiled_expression_cache_count=0i,replicas_sum_queue_size=0i,number_of_tables=35i,replicas_max_merges_in_queue=0i,replicas_max_inserts_in_queue=0i,replicas_sum_merges_in_queue=0i,replicas_max_queue_size=0i,mark_cache_files=0i,jemalloc.background_thread.num_runs=0i,jemalloc.active=1726210048i,uptime=158i,jemalloc.retained=380481536i,replicas_sum_inserts_in_queue=0i,uncompressed_cache_bytes=0i,number_of_databases=2i,jemalloc.metadata=9207704i,max_part_count_for_partition=1i,jemalloc.resident=1742442496i 1569421000000000000 +clickhouse_metrics,cluster=test_cluster_two_shards_localhost,host=kshvakov,hostname=localhost,shard_num=1 replicated_send=0i,write=0i,ephemeral_node=0i,zoo_keeper_request=0i,distributed_files_to_insert=0i,replicated_fetch=0i,background_schedule_pool_task=0i,interserver_connection=0i,leader_replica=0i,delayed_inserts=0i,global_thread_active=41i,merge=0i,readonly_replica=0i,memory_tracking_in_background_schedule_pool=0i,memory_tracking_for_merges=0i,zoo_keeper_session=0i,context_lock_wait=0i,storage_buffer_bytes=0i,background_pool_task=0i,send_external_tables=0i,zoo_keeper_watch=0i,part_mutation=0i,disk_space_reserved_for_merge=0i,distributed_send=0i,version_integer=19014003i,local_thread=0i,replicated_checks=0i,memory_tracking=0i,memory_tracking_in_background_processing_pool=0i,leader_election=0i,revision=54425i,open_file_for_read=0i,open_file_for_write=0i,storage_buffer_rows=0i,rw_lock_waiting_readers=0i,rw_lock_waiting_writers=0i,rw_lock_active_writers=0i,local_thread_active=0i,query_preempted=0i,tcp_connection=1i,http_connection=1i,read=2i,query_thread=0i,dict_cache_requests=0i,rw_lock_active_readers=1i,global_thread=43i,query=1i 1569421000000000000 +clickhouse_tables,cluster=test_cluster_two_shards_localhost,database=system,host=kshvakov,hostname=localhost,shard_num=1,table=trace_log bytes=754i,parts=1i,rows=1i 1569421000000000000 +clickhouse_tables,cluster=test_cluster_two_shards_localhost,database=default,host=kshvakov,hostname=localhost,shard_num=1,table=example bytes=326i,parts=2i,rows=2i 1569421000000000000 +``` \ No newline at end of file diff --git a/plugins/inputs/clickhouse/clickhouse.go b/plugins/inputs/clickhouse/clickhouse.go new file mode 100644 index 000000000..c122af4df --- /dev/null +++ b/plugins/inputs/clickhouse/clickhouse.go @@ -0,0 +1,390 @@ +package clickhouse + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/internal/tls" + "github.com/influxdata/telegraf/plugins/inputs" +) + +var defaultTimeout = 5 * time.Second + +var sampleConfig = ` + ## Username for authorization on ClickHouse server + ## example: user = "default"" + user = "default" + + ## Password for authorization on ClickHouse server + ## example: password = "super_secret" + + ## HTTP(s) timeout while getting metrics values + ## The timeout includes connection time, any redirects, and reading the response body. + ## example: timeout = 1s + # timeout = 5s + + ## List of servers for metrics scraping + ## metrics scrape via HTTP(s) clickhouse interface + ## https://clickhouse.tech/docs/en/interfaces/http/ + ## example: servers = ["http://127.0.0.1:8123","https://custom-server.mdb.yandexcloud.net"] + servers = ["http://127.0.0.1:8123"] + + ## If "auto_discovery"" is "true" plugin tries to connect to all servers available in the cluster + ## with using same "user:password" described in "user" and "password" parameters + ## and get this server hostname list from "system.clusters" table + ## see + ## - https://clickhouse.tech/docs/en/operations/system_tables/#system-clusters + ## - https://clickhouse.tech/docs/en/operations/server_settings/settings/#server_settings_remote_servers + ## - https://clickhouse.tech/docs/en/operations/table_engines/distributed/ + ## - https://clickhouse.tech/docs/en/operations/table_engines/replication/#creating-replicated-tables + ## example: auto_discovery = false + # auto_discovery = true + + ## Filter cluster names in "system.clusters" when "auto_discovery" is "true" + ## when this filter present then "WHERE cluster IN (...)" filter will apply + ## please use only full cluster names here, regexp and glob filters is not allowed + ## for "/etc/clickhouse-server/config.d/remote.xml" + ## + ## + ## + ## + ## clickhouse-ru-1.local9000 + ## clickhouse-ru-2.local9000 + ## + ## + ## clickhouse-eu-1.local9000 + ## clickhouse-eu-2.local9000 + ## + ## + ## + ## + ## + ## + ## example: cluster_include = ["my-own-cluster"] + # cluster_include = [] + + ## Filter cluster names in "system.clusters" when "auto_discovery" is "true" + ## when this filter present then "WHERE cluster NOT IN (...)" filter will apply + ## example: cluster_exclude = ["my-internal-not-discovered-cluster"] + # cluster_exclude = [] + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false +` + +type connect struct { + Cluster string `json:"cluster"` + ShardNum int `json:"shard_num"` + Hostname string `json:"host_name"` + url *url.URL +} + +func init() { + inputs.Add("clickhouse", func() telegraf.Input { + return &ClickHouse{ + AutoDiscovery: true, + ClientConfig: tls.ClientConfig{ + InsecureSkipVerify: false, + }, + Timeout: internal.Duration{Duration: defaultTimeout}, + } + }) +} + +// ClickHouse Telegraf Input Plugin +type ClickHouse struct { + User string `toml:"user"` + Password string `toml:"password"` + Servers []string `toml:"servers"` + AutoDiscovery bool `toml:"auto_discovery"` + ClusterInclude []string `toml:"cluster_include"` + ClusterExclude []string `toml:"cluster_exclude"` + Timeout internal.Duration `toml:"timeout"` + client http.Client + tls.ClientConfig +} + +// SampleConfig returns the sample config +func (*ClickHouse) SampleConfig() string { + return sampleConfig +} + +// Description return plugin description +func (*ClickHouse) Description() string { + return "Read metrics from one or many ClickHouse servers" +} + +// Start ClickHouse input service +func (ch *ClickHouse) Start(telegraf.Accumulator) error { + timeout := defaultTimeout + if ch.Timeout.Duration != 0 { + timeout = ch.Timeout.Duration + } + tlsCfg, err := ch.ClientConfig.TLSConfig() + if err != nil { + return err + } + + ch.client = http.Client{ + Timeout: timeout, + Transport: &http.Transport{ + TLSClientConfig: tlsCfg, + Proxy: http.ProxyFromEnvironment, + }, + } + return nil +} + +// Gather collect data from ClickHouse server +func (ch *ClickHouse) Gather(acc telegraf.Accumulator) (err error) { + var ( + connects []connect + exists = func(host string) bool { + for _, c := range connects { + if c.Hostname == host { + return true + } + } + return false + } + ) + + for _, server := range ch.Servers { + u, err := url.Parse(server) + if err != nil { + return err + } + switch { + case ch.AutoDiscovery: + var conns []connect + if err := ch.execQuery(u, "SELECT cluster, shard_num, host_name FROM system.clusters "+ch.clusterIncludeExcludeFilter(), &conns); err != nil { + acc.AddError(err) + continue + } + for _, c := range conns { + if !exists(c.Hostname) { + c.url = &url.URL{ + Scheme: u.Scheme, + Host: net.JoinHostPort(c.Hostname, u.Port()), + } + connects = append(connects, c) + } + } + default: + connects = append(connects, connect{ + url: u, + }) + } + } + + for _, conn := range connects { + if err := ch.tables(acc, &conn); err != nil { + acc.AddError(err) + } + for metric := range commonMetrics { + if err := ch.commonMetrics(acc, &conn, metric); err != nil { + acc.AddError(err) + } + } + } + return nil +} + +func (ch *ClickHouse) clusterIncludeExcludeFilter() string { + if len(ch.ClusterInclude) == 0 && len(ch.ClusterExclude) == 0 { + return "" + } + var ( + escape = func(in string) string { + return "'" + strings.NewReplacer(`\`, `\\`, `'`, `\'`).Replace(in) + "'" + } + makeFilter = func(expr string, args []string) string { + in := make([]string, 0, len(args)) + for _, v := range args { + in = append(in, escape(v)) + } + return fmt.Sprintf("cluster %s (%s)", expr, strings.Join(in, ", ")) + } + includeFilter, excludeFilter string + ) + + if len(ch.ClusterInclude) != 0 { + includeFilter = makeFilter("IN", ch.ClusterInclude) + } + if len(ch.ClusterExclude) != 0 { + excludeFilter = makeFilter("NOT IN", ch.ClusterExclude) + } + if includeFilter != "" && excludeFilter != "" { + return "WHERE " + includeFilter + " OR " + excludeFilter + } + if includeFilter == "" && excludeFilter != "" { + return "WHERE " + excludeFilter + } + if includeFilter != "" && excludeFilter == "" { + return "WHERE " + includeFilter + } + return "" +} + +func (ch *ClickHouse) commonMetrics(acc telegraf.Accumulator, conn *connect, metric string) error { + var result []struct { + Metric string `json:"metric"` + Value chUInt64 `json:"value"` + } + if err := ch.execQuery(conn.url, commonMetrics[metric], &result); err != nil { + return err + } + + tags := map[string]string{ + "source": conn.Hostname, + } + if len(conn.Cluster) != 0 { + tags["cluster"] = conn.Cluster + } + if conn.ShardNum != 0 { + tags["shard_num"] = strconv.Itoa(conn.ShardNum) + } + + fields := make(map[string]interface{}) + for _, r := range result { + fields[internal.SnakeCase(r.Metric)] = uint64(r.Value) + } + + acc.AddFields("clickhouse_"+metric, fields, tags) + + return nil +} + +func (ch *ClickHouse) tables(acc telegraf.Accumulator, conn *connect) error { + var parts []struct { + Database string `json:"database"` + Table string `json:"table"` + Bytes chUInt64 `json:"bytes"` + Parts chUInt64 `json:"parts"` + Rows chUInt64 `json:"rows"` + } + + if err := ch.execQuery(conn.url, systemParts, &parts); err != nil { + return err + } + tags := map[string]string{ + "source": conn.Hostname, + } + if len(conn.Cluster) != 0 { + tags["cluster"] = conn.Cluster + } + if conn.ShardNum != 0 { + tags["shard_num"] = strconv.Itoa(conn.ShardNum) + } + for _, part := range parts { + tags["table"] = part.Table + tags["database"] = part.Database + acc.AddFields("clickhouse_tables", + map[string]interface{}{ + "bytes": uint64(part.Bytes), + "parts": uint64(part.Parts), + "rows": uint64(part.Rows), + }, + tags, + ) + } + return nil +} + +type clickhouseError struct { + StatusCode int + body []byte +} + +func (e *clickhouseError) Error() string { + return fmt.Sprintf("received error code %d: %s", e.StatusCode, e.body) +} + +func (ch *ClickHouse) execQuery(url *url.URL, query string, i interface{}) error { + q := url.Query() + q.Set("query", query+" FORMAT JSON") + url.RawQuery = q.Encode() + req, _ := http.NewRequest("GET", url.String(), nil) + if ch.User != "" { + req.Header.Add("X-ClickHouse-User", ch.User) + } + if ch.Password != "" { + req.Header.Add("X-ClickHouse-Key", ch.Password) + } + resp, err := ch.client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + if resp.StatusCode >= 300 { + body, _ := ioutil.ReadAll(io.LimitReader(resp.Body, 200)) + return &clickhouseError{ + StatusCode: resp.StatusCode, + body: body, + } + } + var response struct { + Data json.RawMessage + } + if err := json.NewDecoder(resp.Body).Decode(&response); err != nil { + return err + } + return json.Unmarshal(response.Data, i) +} + +// see https://clickhouse.yandex/docs/en/operations/settings/settings/#session_settings-output_format_json_quote_64bit_integers +type chUInt64 uint64 + +func (i *chUInt64) UnmarshalJSON(b []byte) error { + b = bytes.TrimPrefix(b, []byte(`"`)) + b = bytes.TrimSuffix(b, []byte(`"`)) + v, err := strconv.ParseUint(string(b), 10, 64) + if err != nil { + return err + } + *i = chUInt64(v) + return nil +} + +const ( + systemEventsSQL = "SELECT event AS metric, CAST(value AS UInt64) AS value FROM system.events" + systemMetricsSQL = "SELECT metric, CAST(value AS UInt64) AS value FROM system.metrics" + systemAsyncMetricsSQL = "SELECT metric, CAST(value AS UInt64) AS value FROM system.asynchronous_metrics" + systemParts = ` + SELECT + database, + table, + SUM(bytes) AS bytes, + COUNT(*) AS parts, + SUM(rows) AS rows + FROM system.parts + WHERE active = 1 + GROUP BY + database, table + ORDER BY + database, table + ` +) + +var commonMetrics = map[string]string{ + "events": systemEventsSQL, + "metrics": systemMetricsSQL, + "asynchronous_metrics": systemAsyncMetricsSQL, +} + +var _ telegraf.ServiceInput = &ClickHouse{} diff --git a/plugins/inputs/clickhouse/clickhouse_go1.11.go b/plugins/inputs/clickhouse/clickhouse_go1.11.go new file mode 100644 index 000000000..e043dd492 --- /dev/null +++ b/plugins/inputs/clickhouse/clickhouse_go1.11.go @@ -0,0 +1,6 @@ +// +build !go1.12 + +package clickhouse + +// Stop ClickHouse input service +func (ch *ClickHouse) Stop() {} diff --git a/plugins/inputs/clickhouse/clickhouse_go1.12.go b/plugins/inputs/clickhouse/clickhouse_go1.12.go new file mode 100644 index 000000000..86bb69e2b --- /dev/null +++ b/plugins/inputs/clickhouse/clickhouse_go1.12.go @@ -0,0 +1,8 @@ +// +build go1.12 + +package clickhouse + +// Stop ClickHouse input service +func (ch *ClickHouse) Stop() { + ch.client.CloseIdleConnections() +} diff --git a/plugins/inputs/clickhouse/clickhouse_test.go b/plugins/inputs/clickhouse/clickhouse_test.go new file mode 100644 index 000000000..382d2148a --- /dev/null +++ b/plugins/inputs/clickhouse/clickhouse_test.go @@ -0,0 +1,161 @@ +package clickhouse + +import ( + "encoding/json" + "net/http" + "net/http/httptest" + "strings" + "testing" + + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/assert" +) + +func TestClusterIncludeExcludeFilter(t *testing.T) { + ch := ClickHouse{} + if assert.Equal(t, "", ch.clusterIncludeExcludeFilter()) { + ch.ClusterExclude = []string{"test_cluster"} + assert.Equal(t, "WHERE cluster NOT IN ('test_cluster')", ch.clusterIncludeExcludeFilter()) + + ch.ClusterExclude = []string{"test_cluster"} + ch.ClusterInclude = []string{"cluster"} + assert.Equal(t, "WHERE cluster IN ('cluster') OR cluster NOT IN ('test_cluster')", ch.clusterIncludeExcludeFilter()) + + ch.ClusterExclude = []string{} + ch.ClusterInclude = []string{"cluster1", "cluster2"} + assert.Equal(t, "WHERE cluster IN ('cluster1', 'cluster2')", ch.clusterIncludeExcludeFilter()) + + ch.ClusterExclude = []string{"cluster1", "cluster2"} + ch.ClusterInclude = []string{} + assert.Equal(t, "WHERE cluster NOT IN ('cluster1', 'cluster2')", ch.clusterIncludeExcludeFilter()) + } +} + +func TestChInt64(t *testing.T) { + assets := map[string]uint64{ + `"1"`: 1, + "1": 1, + "42": 42, + `"42"`: 42, + "18446743937525109187": 18446743937525109187, + } + for src, expected := range assets { + var v chUInt64 + if err := v.UnmarshalJSON([]byte(src)); assert.NoError(t, err) { + assert.Equal(t, expected, uint64(v)) + } + } +} + +func TestGather(t *testing.T) { + var ( + ts = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + type result struct { + Data interface{} `json:"data"` + } + enc := json.NewEncoder(w) + switch query := r.URL.Query().Get("query"); { + case strings.Contains(query, "system.parts"): + enc.Encode(result{ + Data: []struct { + Database string `json:"database"` + Table string `json:"table"` + Bytes chUInt64 `json:"bytes"` + Parts chUInt64 `json:"parts"` + Rows chUInt64 `json:"rows"` + }{ + { + Database: "test_database", + Table: "test_table", + Bytes: 1, + Parts: 10, + Rows: 100, + }, + }, + }) + case strings.Contains(query, "system.events"): + enc.Encode(result{ + Data: []struct { + Metric string `json:"metric"` + Value chUInt64 `json:"value"` + }{ + { + Metric: "TestSystemEvent", + Value: 1000, + }, + { + Metric: "TestSystemEvent2", + Value: 2000, + }, + }, + }) + case strings.Contains(query, "system.metrics"): + enc.Encode(result{ + Data: []struct { + Metric string `json:"metric"` + Value chUInt64 `json:"value"` + }{ + { + Metric: "TestSystemMetric", + Value: 1000, + }, + { + Metric: "TestSystemMetric2", + Value: 2000, + }, + }, + }) + case strings.Contains(query, "system.asynchronous_metrics"): + enc.Encode(result{ + Data: []struct { + Metric string `json:"metric"` + Value chUInt64 `json:"value"` + }{ + { + Metric: "TestSystemAsynchronousMetric", + Value: 1000, + }, + { + Metric: "TestSystemAsynchronousMetric2", + Value: 2000, + }, + }, + }) + } + })) + ch = &ClickHouse{ + Servers: []string{ + ts.URL, + }, + } + acc = &testutil.Accumulator{} + ) + defer ts.Close() + ch.Gather(acc) + + acc.AssertContainsFields(t, "clickhouse_tables", + map[string]interface{}{ + "bytes": uint64(1), + "parts": uint64(10), + "rows": uint64(100), + }, + ) + acc.AssertContainsFields(t, "clickhouse_events", + map[string]interface{}{ + "test_system_event": uint64(1000), + "test_system_event2": uint64(2000), + }, + ) + acc.AssertContainsFields(t, "clickhouse_metrics", + map[string]interface{}{ + "test_system_metric": uint64(1000), + "test_system_metric2": uint64(2000), + }, + ) + acc.AssertContainsFields(t, "clickhouse_asynchronous_metrics", + map[string]interface{}{ + "test_system_asynchronous_metric": uint64(1000), + "test_system_asynchronous_metric2": uint64(2000), + }, + ) +} diff --git a/plugins/inputs/clickhouse/dev/dhparam.pem b/plugins/inputs/clickhouse/dev/dhparam.pem new file mode 100644 index 000000000..5ae6d7bbe --- /dev/null +++ b/plugins/inputs/clickhouse/dev/dhparam.pem @@ -0,0 +1,13 @@ +-----BEGIN DH PARAMETERS----- +MIICCAKCAgEAoo1x7wI5K57P1/AkHUmVWzKNfy46b/ni/QtClomTB78Ks1FP8dzs +CQBW/pfL8yidxTialNhMRCZO1J+uPjTvd8dG8SFZzVylkF41LBNrUD+MLyh/b6Nr +8uWf3tqYCtsiqsQsnq/oU7C29wn6UjhPPVbRRDPGyJUFOgp0ebPR0L2gOc5HhXSF +Tt0fuWnvgZJBKGvyodby3p2CSheu8K6ZteVc8ZgHuanhCQA30nVN+yNQzyozlB2H +B9jxTDPJy8+/4Mui3iiNyXg6FaiI9lWdH7xgKoZlHi8BWlLz5Se9JVNYg0dPrMTz +K0itQyyTKUlK73x+1uPm6q1AJwz08EZiCXNbk58/Sf+pdwDmAO2QSRrERC73vnvc +B1+4+Kf7RS7oYpAHknKm/MFnkCJLVIq1b6kikYcIgVCYe+Z1UytSmG1QfwdgL8QQ +TVYVHBg4w07+s3/IJ1ekvNhdxpkmmevYt7GjohWu8vKkip4se+reNdo+sqLsgFKf +1IuDMD36zn9FVukvs7e3BwZCTkdosGHvHGjA7zm2DwPPO16hCvJ4mE6ULLpp2NEw +EBYWm3Tv6M/xtrF5Afyh0gAh7eL767/qsarbx6jlqs+dnh3LptqsE3WerWK54+0B +3Hr5CVfgYbeXuW2HeFb+fS6CNUWmiAsq1XRiz5p16hpeMGYN/qyF1IsCAQI= +-----END DH PARAMETERS----- diff --git a/plugins/inputs/clickhouse/dev/docker-compose.yml b/plugins/inputs/clickhouse/dev/docker-compose.yml new file mode 100644 index 000000000..a8b22c34d --- /dev/null +++ b/plugins/inputs/clickhouse/dev/docker-compose.yml @@ -0,0 +1,16 @@ +version: '3' + +services: + clickhouse: + image: yandex/clickhouse-server:latest + volumes: + - ./dhparam.pem:/etc/clickhouse-server/dhparam.pem + - ./tls_settings.xml:/etc/clickhouse-server/config.d/00-tls_settings.xml + - ../../../../testutil/pki/serverkey.pem:/etc/clickhouse-server/server.key + - ../../../../testutil/pki/servercert.pem:/etc/clickhouse-server/server.crt + restart: always + ports: + - 8123:8123 + - 8443:8443 + - 9000:9000 + - 9009:9009 \ No newline at end of file diff --git a/plugins/inputs/clickhouse/dev/telegraf.conf b/plugins/inputs/clickhouse/dev/telegraf.conf new file mode 100644 index 000000000..883baf845 --- /dev/null +++ b/plugins/inputs/clickhouse/dev/telegraf.conf @@ -0,0 +1,12 @@ +### ClickHouse input plugin + +[[inputs.clickhouse]] + timeout = 2 + user = "default" + servers = ["http://127.0.0.1:8123"] + auto_discovery = true + cluster_include = [] + cluster_exclude = ["test_shard_localhost"] + +[[outputs.file]] + files = ["stdout"] diff --git a/plugins/inputs/clickhouse/dev/telegraf_ssl.conf b/plugins/inputs/clickhouse/dev/telegraf_ssl.conf new file mode 100644 index 000000000..21288d84f --- /dev/null +++ b/plugins/inputs/clickhouse/dev/telegraf_ssl.conf @@ -0,0 +1,16 @@ +### ClickHouse input plugin + +[[inputs.clickhouse]] + timeout = 2 + user = "default" + servers = ["https://127.0.0.1:8443"] + auto_discovery = true + cluster_include = [] + cluster_exclude = ["test_shard_localhost"] + insecure_skip_verify = false + tls_cert = "./testutil/pki/clientcert.pem" + tls_key = "./testutil/pki/clientkey.pem" + tls_ca = "./testutil/pki/cacert.pem" + +[[outputs.file]] + files = ["stdout"] diff --git a/plugins/inputs/clickhouse/dev/tls_settings.xml b/plugins/inputs/clickhouse/dev/tls_settings.xml new file mode 100644 index 000000000..cf6716b82 --- /dev/null +++ b/plugins/inputs/clickhouse/dev/tls_settings.xml @@ -0,0 +1,4 @@ + + 8443 + 9440 + \ No newline at end of file From bffd57aa5860ed49fcf33fc8093efa10eaa5f415 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 4 Mar 2020 14:42:50 -0800 Subject: [PATCH 1577/1815] Update readme and changelog --- .gitignore | 2 +- CHANGELOG.md | 3 +- README.md | 1 + plugins/inputs/clickhouse/README.md | 33 +++++++++++-------- plugins/inputs/clickhouse/clickhouse.go | 8 ++--- .../inputs/clickhouse/dev/docker-compose.yml | 2 +- .../inputs/clickhouse/dev/tls_settings.xml | 2 +- 7 files changed, 29 insertions(+), 22 deletions(-) diff --git a/.gitignore b/.gitignore index 0ae500592..4176a0413 100644 --- a/.gitignore +++ b/.gitignore @@ -2,4 +2,4 @@ /telegraf /telegraf.exe /telegraf.gz -/vendor \ No newline at end of file +/vendor diff --git a/CHANGELOG.md b/CHANGELOG.md index 0540debfa..b25ad4241 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,10 +11,11 @@ #### New Inputs +- [clickhouse](/plugins/inputs/clickhouse/README.md) - Contributed by @kshvakov +- [execd](/plugins/inputs/execd/README.md) - Contributed by @jgraichen - [infiniband](/plugins/inputs/infiniband/README.md) - Contributed by @willfurnell - [modbus](/plugins/inputs/modbus/README.md) - Contributed by @garciaolais - [monit](/plugins/inputs/monit/README.md) - Contributed by @SirishaGopigiri -- [execd](/plugins/inputs/execd/README.md) - Contributed by @jgraichen #### New Processors diff --git a/README.md b/README.md index 49a4a456e..d864c5385 100644 --- a/README.md +++ b/README.md @@ -165,6 +165,7 @@ For documentation on the latest development code see the [documentation index][d * [chrony](./plugins/inputs/chrony) * [cisco_telemetry_gnmi](./plugins/inputs/cisco_telemetry_gnmi) * [cisco_telemetry_mdt](./plugins/inputs/cisco_telemetry_mdt) +* [clickhouse](./plugins/inputs/clickhouse) * [cloud_pubsub](./plugins/inputs/cloud_pubsub) Google Cloud Pub/Sub * [cloud_pubsub_push](./plugins/inputs/cloud_pubsub_push) Google Cloud Pub/Sub push endpoint * [conntrack](./plugins/inputs/conntrack) diff --git a/plugins/inputs/clickhouse/README.md b/plugins/inputs/clickhouse/README.md index 8eb478fbc..fbd182287 100644 --- a/plugins/inputs/clickhouse/README.md +++ b/plugins/inputs/clickhouse/README.md @@ -1,22 +1,22 @@ -# Telegraf Input Plugin: ClickHouse +# ClickHouse Input Plugin -This plugin gathers the statistic data from [ClickHouse](https://github.com/ClickHouse/ClickHouse) server. +This plugin gathers the statistic data from [ClickHouse](https://github.com/ClickHouse/ClickHouse) server. ### Configuration -```ini +```toml # Read metrics from one or many ClickHouse servers [[inputs.clickhouse]] ## Username for authorization on ClickHouse server ## example: user = "default" user = "default" - + ## Password for authorization on ClickHouse server ## example: password = "super_secret" - ## HTTP(s) timeout while getting metrics values + ## HTTP(s) timeout while getting metrics values ## The timeout includes connection time, any redirects, and reading the response body. ## example: timeout = 1s - # timeout = 5s + # timeout = 5s ## List of servers for metrics scraping ## metrics scrape via HTTP(s) clickhouse interface @@ -54,7 +54,7 @@ This plugin gathers the statistic data from [ClickHouse](https://github.com/Clic ## ## ## - ## + ## ## example: cluster_include = ["my-own-cluster"] # cluster_include = [] @@ -71,22 +71,23 @@ This plugin gathers the statistic data from [ClickHouse](https://github.com/Clic # insecure_skip_verify = false ``` -### Metrics: +### Metrics + - clickhouse_events - tags: - hostname (ClickHouse server hostname) - cluster (Name of the cluster [optional]) - shard_num (Shard number in the cluster [optional]) - fields: - - all rows from system.events, all metrics is COUNTER type, look https://clickhouse.tech/docs/en/operations/system_tables/#system_tables-events + - all rows from [system.events][] -- clickhouse_metrics ++ clickhouse_metrics - tags: - hostname (ClickHouse server hostname) - cluster (Name of the cluster [optional]) - shard_num (Shard number in the cluster [optional]) - fields: - - all rows from system.metrics, all metrics is GAUGE type, look https://clickhouse.tech/docs/en/operations/system_tables/#system_tables-metrics + - all rows from [system.metrics][] - clickhouse_asynchronous_metrics - tags: @@ -94,9 +95,9 @@ This plugin gathers the statistic data from [ClickHouse](https://github.com/Clic - cluster (Name of the cluster [optional]) - shard_num (Shard number in the cluster [optional]) - fields: - - all rows from system.asynchronous_metrics, all metrics is GAUGE type, look https://clickhouse.tech/docs/en/operations/system_tables/#system_tables-asynchronous_metrics + - all rows from [system.asynchronous_metrics][] -- clickhouse_tables ++ clickhouse_tables - tags: - hostname (ClickHouse server hostname) - table @@ -116,4 +117,8 @@ clickhouse_asynchronous_metrics,cluster=test_cluster_two_shards_localhost,host=k clickhouse_metrics,cluster=test_cluster_two_shards_localhost,host=kshvakov,hostname=localhost,shard_num=1 replicated_send=0i,write=0i,ephemeral_node=0i,zoo_keeper_request=0i,distributed_files_to_insert=0i,replicated_fetch=0i,background_schedule_pool_task=0i,interserver_connection=0i,leader_replica=0i,delayed_inserts=0i,global_thread_active=41i,merge=0i,readonly_replica=0i,memory_tracking_in_background_schedule_pool=0i,memory_tracking_for_merges=0i,zoo_keeper_session=0i,context_lock_wait=0i,storage_buffer_bytes=0i,background_pool_task=0i,send_external_tables=0i,zoo_keeper_watch=0i,part_mutation=0i,disk_space_reserved_for_merge=0i,distributed_send=0i,version_integer=19014003i,local_thread=0i,replicated_checks=0i,memory_tracking=0i,memory_tracking_in_background_processing_pool=0i,leader_election=0i,revision=54425i,open_file_for_read=0i,open_file_for_write=0i,storage_buffer_rows=0i,rw_lock_waiting_readers=0i,rw_lock_waiting_writers=0i,rw_lock_active_writers=0i,local_thread_active=0i,query_preempted=0i,tcp_connection=1i,http_connection=1i,read=2i,query_thread=0i,dict_cache_requests=0i,rw_lock_active_readers=1i,global_thread=43i,query=1i 1569421000000000000 clickhouse_tables,cluster=test_cluster_two_shards_localhost,database=system,host=kshvakov,hostname=localhost,shard_num=1,table=trace_log bytes=754i,parts=1i,rows=1i 1569421000000000000 clickhouse_tables,cluster=test_cluster_two_shards_localhost,database=default,host=kshvakov,hostname=localhost,shard_num=1,table=example bytes=326i,parts=2i,rows=2i 1569421000000000000 -``` \ No newline at end of file +``` + +[system.events]: https://clickhouse.tech/docs/en/operations/system_tables/#system_tables-events +[system.metrics]: https://clickhouse.tech/docs/en/operations/system_tables/#system_tables-metrics +[system.asynchronous_metrics]: https://clickhouse.tech/docs/en/operations/system_tables/#system_tables-asynchronous_metrics diff --git a/plugins/inputs/clickhouse/clickhouse.go b/plugins/inputs/clickhouse/clickhouse.go index c122af4df..dcfc74ad6 100644 --- a/plugins/inputs/clickhouse/clickhouse.go +++ b/plugins/inputs/clickhouse/clickhouse.go @@ -25,14 +25,14 @@ var sampleConfig = ` ## Username for authorization on ClickHouse server ## example: user = "default"" user = "default" - + ## Password for authorization on ClickHouse server ## example: password = "super_secret" - ## HTTP(s) timeout while getting metrics values + ## HTTP(s) timeout while getting metrics values ## The timeout includes connection time, any redirects, and reading the response body. ## example: timeout = 1s - # timeout = 5s + # timeout = 5s ## List of servers for metrics scraping ## metrics scrape via HTTP(s) clickhouse interface @@ -70,7 +70,7 @@ var sampleConfig = ` ## ## ## - ## + ## ## example: cluster_include = ["my-own-cluster"] # cluster_include = [] diff --git a/plugins/inputs/clickhouse/dev/docker-compose.yml b/plugins/inputs/clickhouse/dev/docker-compose.yml index a8b22c34d..4dd4d1846 100644 --- a/plugins/inputs/clickhouse/dev/docker-compose.yml +++ b/plugins/inputs/clickhouse/dev/docker-compose.yml @@ -13,4 +13,4 @@ services: - 8123:8123 - 8443:8443 - 9000:9000 - - 9009:9009 \ No newline at end of file + - 9009:9009 diff --git a/plugins/inputs/clickhouse/dev/tls_settings.xml b/plugins/inputs/clickhouse/dev/tls_settings.xml index cf6716b82..6268b6a12 100644 --- a/plugins/inputs/clickhouse/dev/tls_settings.xml +++ b/plugins/inputs/clickhouse/dev/tls_settings.xml @@ -1,4 +1,4 @@ 8443 9440 - \ No newline at end of file + From 6edd57ea871350e9207095097e1390618fffa303 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 4 Mar 2020 14:44:37 -0800 Subject: [PATCH 1578/1815] Rename clickhouse user to username --- plugins/inputs/clickhouse/README.md | 2 +- plugins/inputs/clickhouse/clickhouse.go | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/plugins/inputs/clickhouse/README.md b/plugins/inputs/clickhouse/README.md index fbd182287..5561abf61 100644 --- a/plugins/inputs/clickhouse/README.md +++ b/plugins/inputs/clickhouse/README.md @@ -8,7 +8,7 @@ This plugin gathers the statistic data from [ClickHouse](https://github.com/Clic [[inputs.clickhouse]] ## Username for authorization on ClickHouse server ## example: user = "default" - user = "default" + username = "default" ## Password for authorization on ClickHouse server ## example: password = "super_secret" diff --git a/plugins/inputs/clickhouse/clickhouse.go b/plugins/inputs/clickhouse/clickhouse.go index dcfc74ad6..cf28def66 100644 --- a/plugins/inputs/clickhouse/clickhouse.go +++ b/plugins/inputs/clickhouse/clickhouse.go @@ -24,7 +24,7 @@ var defaultTimeout = 5 * time.Second var sampleConfig = ` ## Username for authorization on ClickHouse server ## example: user = "default"" - user = "default" + username = "default" ## Password for authorization on ClickHouse server ## example: password = "super_secret" @@ -108,7 +108,7 @@ func init() { // ClickHouse Telegraf Input Plugin type ClickHouse struct { - User string `toml:"user"` + Username string `toml:"username"` Password string `toml:"password"` Servers []string `toml:"servers"` AutoDiscovery bool `toml:"auto_discovery"` @@ -320,8 +320,8 @@ func (ch *ClickHouse) execQuery(url *url.URL, query string, i interface{}) error q.Set("query", query+" FORMAT JSON") url.RawQuery = q.Encode() req, _ := http.NewRequest("GET", url.String(), nil) - if ch.User != "" { - req.Header.Add("X-ClickHouse-User", ch.User) + if ch.Username != "" { + req.Header.Add("X-ClickHouse-User", ch.Username) } if ch.Password != "" { req.Header.Add("X-ClickHouse-Key", ch.Password) From 3817aafdcb063c5f22f3a6de95e070df03ced769 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 4 Mar 2020 14:46:27 -0800 Subject: [PATCH 1579/1815] Update clickhouse docs with renamed source tag --- plugins/inputs/clickhouse/README.md | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/plugins/inputs/clickhouse/README.md b/plugins/inputs/clickhouse/README.md index 5561abf61..5c1d233e6 100644 --- a/plugins/inputs/clickhouse/README.md +++ b/plugins/inputs/clickhouse/README.md @@ -75,7 +75,7 @@ This plugin gathers the statistic data from [ClickHouse](https://github.com/Clic - clickhouse_events - tags: - - hostname (ClickHouse server hostname) + - source (ClickHouse server hostname) - cluster (Name of the cluster [optional]) - shard_num (Shard number in the cluster [optional]) - fields: @@ -83,7 +83,7 @@ This plugin gathers the statistic data from [ClickHouse](https://github.com/Clic + clickhouse_metrics - tags: - - hostname (ClickHouse server hostname) + - source (ClickHouse server hostname) - cluster (Name of the cluster [optional]) - shard_num (Shard number in the cluster [optional]) - fields: @@ -91,7 +91,7 @@ This plugin gathers the statistic data from [ClickHouse](https://github.com/Clic - clickhouse_asynchronous_metrics - tags: - - hostname (ClickHouse server hostname) + - source (ClickHouse server hostname) - cluster (Name of the cluster [optional]) - shard_num (Shard number in the cluster [optional]) - fields: @@ -99,7 +99,7 @@ This plugin gathers the statistic data from [ClickHouse](https://github.com/Clic + clickhouse_tables - tags: - - hostname (ClickHouse server hostname) + - source (ClickHouse server hostname) - table - database - cluster (Name of the cluster [optional]) @@ -112,11 +112,11 @@ This plugin gathers the statistic data from [ClickHouse](https://github.com/Clic ### Example Output ``` -clickhouse_events,cluster=test_cluster_two_shards_localhost,host=kshvakov,hostname=localhost,shard_num=1 read_compressed_bytes=212i,arena_alloc_chunks=35i,function_execute=85i,merge_tree_data_writer_rows=3i,rw_lock_acquired_read_locks=421i,file_open=46i,io_buffer_alloc_bytes=86451985i,inserted_bytes=196i,regexp_created=3i,real_time_microseconds=116832i,query=23i,network_receive_elapsed_microseconds=268i,merge_tree_data_writer_compressed_bytes=1080i,arena_alloc_bytes=212992i,disk_write_elapsed_microseconds=556i,inserted_rows=3i,compressed_read_buffer_bytes=81i,read_buffer_from_file_descriptor_read_bytes=148i,write_buffer_from_file_descriptor_write=47i,merge_tree_data_writer_blocks=3i,soft_page_faults=896i,hard_page_faults=7i,select_query=21i,merge_tree_data_writer_uncompressed_bytes=196i,merge_tree_data_writer_blocks_already_sorted=3i,user_time_microseconds=40196i,compressed_read_buffer_blocks=5i,write_buffer_from_file_descriptor_write_bytes=3246i,io_buffer_allocs=296i,created_write_buffer_ordinary=12i,disk_read_elapsed_microseconds=59347044i,network_send_elapsed_microseconds=1538i,context_lock=1040i,insert_query=1i,system_time_microseconds=14582i,read_buffer_from_file_descriptor_read=3i 1569421000000000000 -clickhouse_asynchronous_metrics,cluster=test_cluster_two_shards_localhost,host=kshvakov,hostname=localhost,shard_num=1 jemalloc.metadata_thp=0i,replicas_max_relative_delay=0i,jemalloc.mapped=1803177984i,jemalloc.allocated=1724839256i,jemalloc.background_thread.run_interval=0i,jemalloc.background_thread.num_threads=0i,uncompressed_cache_cells=0i,replicas_max_absolute_delay=0i,mark_cache_bytes=0i,compiled_expression_cache_count=0i,replicas_sum_queue_size=0i,number_of_tables=35i,replicas_max_merges_in_queue=0i,replicas_max_inserts_in_queue=0i,replicas_sum_merges_in_queue=0i,replicas_max_queue_size=0i,mark_cache_files=0i,jemalloc.background_thread.num_runs=0i,jemalloc.active=1726210048i,uptime=158i,jemalloc.retained=380481536i,replicas_sum_inserts_in_queue=0i,uncompressed_cache_bytes=0i,number_of_databases=2i,jemalloc.metadata=9207704i,max_part_count_for_partition=1i,jemalloc.resident=1742442496i 1569421000000000000 -clickhouse_metrics,cluster=test_cluster_two_shards_localhost,host=kshvakov,hostname=localhost,shard_num=1 replicated_send=0i,write=0i,ephemeral_node=0i,zoo_keeper_request=0i,distributed_files_to_insert=0i,replicated_fetch=0i,background_schedule_pool_task=0i,interserver_connection=0i,leader_replica=0i,delayed_inserts=0i,global_thread_active=41i,merge=0i,readonly_replica=0i,memory_tracking_in_background_schedule_pool=0i,memory_tracking_for_merges=0i,zoo_keeper_session=0i,context_lock_wait=0i,storage_buffer_bytes=0i,background_pool_task=0i,send_external_tables=0i,zoo_keeper_watch=0i,part_mutation=0i,disk_space_reserved_for_merge=0i,distributed_send=0i,version_integer=19014003i,local_thread=0i,replicated_checks=0i,memory_tracking=0i,memory_tracking_in_background_processing_pool=0i,leader_election=0i,revision=54425i,open_file_for_read=0i,open_file_for_write=0i,storage_buffer_rows=0i,rw_lock_waiting_readers=0i,rw_lock_waiting_writers=0i,rw_lock_active_writers=0i,local_thread_active=0i,query_preempted=0i,tcp_connection=1i,http_connection=1i,read=2i,query_thread=0i,dict_cache_requests=0i,rw_lock_active_readers=1i,global_thread=43i,query=1i 1569421000000000000 -clickhouse_tables,cluster=test_cluster_two_shards_localhost,database=system,host=kshvakov,hostname=localhost,shard_num=1,table=trace_log bytes=754i,parts=1i,rows=1i 1569421000000000000 -clickhouse_tables,cluster=test_cluster_two_shards_localhost,database=default,host=kshvakov,hostname=localhost,shard_num=1,table=example bytes=326i,parts=2i,rows=2i 1569421000000000000 +clickhouse_events,cluster=test_cluster_two_shards_localhost,host=kshvakov,source=localhost,shard_num=1 read_compressed_bytes=212i,arena_alloc_chunks=35i,function_execute=85i,merge_tree_data_writer_rows=3i,rw_lock_acquired_read_locks=421i,file_open=46i,io_buffer_alloc_bytes=86451985i,inserted_bytes=196i,regexp_created=3i,real_time_microseconds=116832i,query=23i,network_receive_elapsed_microseconds=268i,merge_tree_data_writer_compressed_bytes=1080i,arena_alloc_bytes=212992i,disk_write_elapsed_microseconds=556i,inserted_rows=3i,compressed_read_buffer_bytes=81i,read_buffer_from_file_descriptor_read_bytes=148i,write_buffer_from_file_descriptor_write=47i,merge_tree_data_writer_blocks=3i,soft_page_faults=896i,hard_page_faults=7i,select_query=21i,merge_tree_data_writer_uncompressed_bytes=196i,merge_tree_data_writer_blocks_already_sorted=3i,user_time_microseconds=40196i,compressed_read_buffer_blocks=5i,write_buffer_from_file_descriptor_write_bytes=3246i,io_buffer_allocs=296i,created_write_buffer_ordinary=12i,disk_read_elapsed_microseconds=59347044i,network_send_elapsed_microseconds=1538i,context_lock=1040i,insert_query=1i,system_time_microseconds=14582i,read_buffer_from_file_descriptor_read=3i 1569421000000000000 +clickhouse_asynchronous_metrics,cluster=test_cluster_two_shards_localhost,host=kshvakov,source=localhost,shard_num=1 jemalloc.metadata_thp=0i,replicas_max_relative_delay=0i,jemalloc.mapped=1803177984i,jemalloc.allocated=1724839256i,jemalloc.background_thread.run_interval=0i,jemalloc.background_thread.num_threads=0i,uncompressed_cache_cells=0i,replicas_max_absolute_delay=0i,mark_cache_bytes=0i,compiled_expression_cache_count=0i,replicas_sum_queue_size=0i,number_of_tables=35i,replicas_max_merges_in_queue=0i,replicas_max_inserts_in_queue=0i,replicas_sum_merges_in_queue=0i,replicas_max_queue_size=0i,mark_cache_files=0i,jemalloc.background_thread.num_runs=0i,jemalloc.active=1726210048i,uptime=158i,jemalloc.retained=380481536i,replicas_sum_inserts_in_queue=0i,uncompressed_cache_bytes=0i,number_of_databases=2i,jemalloc.metadata=9207704i,max_part_count_for_partition=1i,jemalloc.resident=1742442496i 1569421000000000000 +clickhouse_metrics,cluster=test_cluster_two_shards_localhost,host=kshvakov,source=localhost,shard_num=1 replicated_send=0i,write=0i,ephemeral_node=0i,zoo_keeper_request=0i,distributed_files_to_insert=0i,replicated_fetch=0i,background_schedule_pool_task=0i,interserver_connection=0i,leader_replica=0i,delayed_inserts=0i,global_thread_active=41i,merge=0i,readonly_replica=0i,memory_tracking_in_background_schedule_pool=0i,memory_tracking_for_merges=0i,zoo_keeper_session=0i,context_lock_wait=0i,storage_buffer_bytes=0i,background_pool_task=0i,send_external_tables=0i,zoo_keeper_watch=0i,part_mutation=0i,disk_space_reserved_for_merge=0i,distributed_send=0i,version_integer=19014003i,local_thread=0i,replicated_checks=0i,memory_tracking=0i,memory_tracking_in_background_processing_pool=0i,leader_election=0i,revision=54425i,open_file_for_read=0i,open_file_for_write=0i,storage_buffer_rows=0i,rw_lock_waiting_readers=0i,rw_lock_waiting_writers=0i,rw_lock_active_writers=0i,local_thread_active=0i,query_preempted=0i,tcp_connection=1i,http_connection=1i,read=2i,query_thread=0i,dict_cache_requests=0i,rw_lock_active_readers=1i,global_thread=43i,query=1i 1569421000000000000 +clickhouse_tables,cluster=test_cluster_two_shards_localhost,database=system,host=kshvakov,source=localhost,shard_num=1,table=trace_log bytes=754i,parts=1i,rows=1i 1569421000000000000 +clickhouse_tables,cluster=test_cluster_two_shards_localhost,database=default,host=kshvakov,source=localhost,shard_num=1,table=example bytes=326i,parts=2i,rows=2i 1569421000000000000 ``` [system.events]: https://clickhouse.tech/docs/en/operations/system_tables/#system_tables-events From 561bb3df2e8563056e9cd954be83b96dfb88300e Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 5 Mar 2020 10:16:49 -0800 Subject: [PATCH 1580/1815] Fix spelling error in the SNMP field example (#7118) --- plugins/inputs/snmp/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/inputs/snmp/README.md b/plugins/inputs/snmp/README.md index 68760968a..4e9ce8e50 100644 --- a/plugins/inputs/snmp/README.md +++ b/plugins/inputs/snmp/README.md @@ -111,7 +111,7 @@ option operate similar to the `snmpget` utility. ## Apply one of the following conversions to the variable value: ## float(X) Convert the input value into a float and divides by the - ## Xth power of 10. Efficively just moves the decimal left + ## Xth power of 10. Effectively just moves the decimal left ## X places. For example a value of `123` with `float(2)` ## will result in `1.23`. ## float: Convert the value into a float with no adjustment. Same From 7b03a6f4abbf96cbb0c5fc7497071b34c19c9ee8 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 5 Mar 2020 10:36:36 -0800 Subject: [PATCH 1581/1815] Update stackdriver plugins to reflect new product naming (#7098) --- README.md | 4 ++-- plugins/inputs/stackdriver/README.md | 5 +++-- plugins/outputs/stackdriver/README.md | 13 ++++++++++--- 3 files changed, 15 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index d864c5385..bee34acd4 100644 --- a/README.md +++ b/README.md @@ -291,7 +291,7 @@ For documentation on the latest development code see the [documentation index][d * [socket_listener](./plugins/inputs/socket_listener) * [solr](./plugins/inputs/solr) * [sql server](./plugins/inputs/sqlserver) (microsoft) -* [stackdriver](./plugins/inputs/stackdriver) +* [stackdriver](./plugins/inputs/stackdriver) (Google Cloud Monitoring) * [statsd](./plugins/inputs/statsd) * [suricata](./plugins/inputs/suricata) * [swap](./plugins/inputs/swap) @@ -411,7 +411,7 @@ For documentation on the latest development code see the [documentation index][d * [riemann](./plugins/outputs/riemann) * [riemann_legacy](./plugins/outputs/riemann_legacy) * [socket_writer](./plugins/outputs/socket_writer) -* [stackdriver](./plugins/outputs/stackdriver) +* [stackdriver](./plugins/outputs/stackdriver) (Google Cloud Monitoring) * [syslog](./plugins/outputs/syslog) * [tcp](./plugins/outputs/socket_writer) * [udp](./plugins/outputs/socket_writer) diff --git a/plugins/inputs/stackdriver/README.md b/plugins/inputs/stackdriver/README.md index f2ec1471b..6469b259b 100644 --- a/plugins/inputs/stackdriver/README.md +++ b/plugins/inputs/stackdriver/README.md @@ -1,6 +1,7 @@ -# Stackdriver Input Plugin +# Stackdriver Google Cloud Monitoring Input Plugin -Stackdriver gathers metrics from the [Stackdriver Monitoring API][stackdriver]. +Query data from Google Cloud Monitoring (formerly Stackdriver) using the +[Cloud Monitoring API v3][stackdriver]. This plugin accesses APIs which are [chargeable][pricing]; you might incur costs. diff --git a/plugins/outputs/stackdriver/README.md b/plugins/outputs/stackdriver/README.md index cdf0a1591..142d1efa0 100644 --- a/plugins/outputs/stackdriver/README.md +++ b/plugins/outputs/stackdriver/README.md @@ -1,7 +1,11 @@ -# Stackdriver Output Plugin +# Stackdriver Google Cloud Monitoring Output Plugin -This plugin writes to the [Google Cloud Stackdriver API](https://cloud.google.com/monitoring/api/v3/) -and requires [authentication](https://cloud.google.com/docs/authentication/getting-started) with Google Cloud using either a service account or user credentials. See the [Stackdriver documentation](https://cloud.google.com/stackdriver/pricing#stackdriver_monitoring_services) for details on pricing. +This plugin writes to the [Google Cloud Monitoring API][stackdriver] (formerly +Stackdriver) and requires [authentication][] with Google Cloud using either a +service account or user credentials + +This plugin accesses APIs which are [chargeable][pricing]; you might incur +costs. Requires `project` to specify where Stackdriver metrics will be delivered to. @@ -47,3 +51,6 @@ aggregated before then can be written. Consider using the [basicstats][] aggregator to do this. [basicstats]: /plugins/aggregators/basicstats/README.md +[stackdriver]: https://cloud.google.com/monitoring/api/v3/ +[authentication]: https://cloud.google.com/docs/authentication/getting-started +[pricing]: https://cloud.google.com/stackdriver/pricing#stackdriver_monitoring_services From 898487b2da3b82f2e62f78cbb6fc54f62d7c7a0f Mon Sep 17 00:00:00 2001 From: Russ Savage Date: Thu, 5 Mar 2020 18:07:28 -0800 Subject: [PATCH 1582/1815] Fix typo in salesforce readme (#7120) --- plugins/inputs/salesforce/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/inputs/salesforce/README.md b/plugins/inputs/salesforce/README.md index 5ee0f6a3d..526f14a07 100644 --- a/plugins/inputs/salesforce/README.md +++ b/plugins/inputs/salesforce/README.md @@ -10,7 +10,7 @@ It fetches its data from the [limits endpoint](https://developer.salesforce.com/ [[inputs.salesforce]] username = "your_username" password = "your_password" - ## (Optional) security tokjen + ## (Optional) security token security_token = "your_security_token" ## (Optional) environment type (sandbox or production) ## default is: production From ca65d52c9a8c19b8547973e0b835911d23091bce Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 9 Mar 2020 14:08:38 -0700 Subject: [PATCH 1583/1815] Add support for converting tag or field to measurement in converter processor (#7049) --- plugins/processors/converter/README.md | 39 +- plugins/processors/converter/converter.go | 114 +++-- .../processors/converter/converter_test.go | 467 ++++++++++-------- 3 files changed, 342 insertions(+), 278 deletions(-) diff --git a/plugins/processors/converter/README.md b/plugins/processors/converter/README.md index d56985d84..d916c8764 100644 --- a/plugins/processors/converter/README.md +++ b/plugins/processors/converter/README.md @@ -9,7 +9,7 @@ Values that cannot be converted are dropped. uniquely identifiable. Fields with the same series key (measurement + tags) will overwrite one another. -### Configuration: +### Configuration ```toml # Convert values to another metric value type [[processors.converter]] @@ -19,6 +19,7 @@ will overwrite one another. ## select the keys to convert. The array may contain globs. ## = [...] [processors.converter.tags] + measurement = [] string = [] integer = [] unsigned = [] @@ -31,6 +32,7 @@ will overwrite one another. ## select the keys to convert. The array may contain globs. ## = [...] [processors.converter.fields] + measurement = [] tag = [] string = [] integer = [] @@ -39,19 +41,40 @@ will overwrite one another. float = [] ``` -### Examples: +### Example +Convert `port` tag to a string field: ```toml [[processors.converter]] [processors.converter.tags] string = ["port"] - - [processors.converter.fields] - integer = ["scboard_*"] - tag = ["ParentServerConfigGeneration"] ``` ```diff -- apache,port=80,server=debian-stretch-apache BusyWorkers=1,BytesPerReq=0,BytesPerSec=0,CPUChildrenSystem=0,CPUChildrenUser=0,CPULoad=0.00995025,CPUSystem=0.01,CPUUser=0.01,ConnsAsyncClosing=0,ConnsAsyncKeepAlive=0,ConnsAsyncWriting=0,ConnsTotal=0,IdleWorkers=49,Load1=0.01,Load15=0,Load5=0,ParentServerConfigGeneration=3,ParentServerMPMGeneration=2,ReqPerSec=0.00497512,ServerUptimeSeconds=201,TotalAccesses=1,TotalkBytes=0,Uptime=201,scboard_closing=0,scboard_dnslookup=0,scboard_finishing=0,scboard_idle_cleanup=0,scboard_keepalive=0,scboard_logging=0,scboard_open=100,scboard_reading=0,scboard_sending=1,scboard_starting=0,scboard_waiting=49 1502489900000000000 -+ apache,server=debian-stretch-apache,ParentServerConfigGeneration=3 port="80",BusyWorkers=1,BytesPerReq=0,BytesPerSec=0,CPUChildrenSystem=0,CPUChildrenUser=0,CPULoad=0.00995025,CPUSystem=0.01,CPUUser=0.01,ConnsAsyncClosing=0,ConnsAsyncKeepAlive=0,ConnsAsyncWriting=0,ConnsTotal=0,IdleWorkers=49,Load1=0.01,Load15=0,Load5=0,ParentServerMPMGeneration=2,ReqPerSec=0.00497512,ServerUptimeSeconds=201,TotalAccesses=1,TotalkBytes=0,Uptime=201,scboard_closing=0i,scboard_dnslookup=0i,scboard_finishing=0i,scboard_idle_cleanup=0i,scboard_keepalive=0i,scboard_logging=0i,scboard_open=100i,scboard_reading=0i,scboard_sending=1i,scboard_starting=0i,scboard_waiting=49i 1502489900000000000 +- apache,port=80,server=debian-stretch-apache BusyWorkers=1,BytesPerReq=0 ++ apache,server=debian-stretch-apache port="80",BusyWorkers=1,BytesPerReq=0 +``` + +Convert all `scboard_*` fields to an integer: +```toml +[[processors.converter]] + [processors.converter.fields] + integer = ["scboard_*"] +``` + +```diff +- apache scboard_closing=0,scboard_dnslookup=0,scboard_finishing=0,scboard_idle_cleanup=0,scboard_keepalive=0,scboard_logging=0,scboard_open=100,scboard_reading=0,scboard_sending=1,scboard_starting=0,scboard_waiting=49 ++ apache scboard_closing=0i,scboard_dnslookup=0i,scboard_finishing=0i,scboard_idle_cleanup=0i,scboard_keepalive=0i,scboard_logging=0i,scboard_open=100i,scboard_reading=0i,scboard_sending=1i,scboard_starting=0i,scboard_waiting=49i +``` + +Rename the measurement from a tag value: +```toml +[[processors.converter]] + [processors.converter.tags] + measurement = ["topic"] +``` + +```diff +- mqtt_consumer,topic=sensor temp=42 ++ sensor temp=42 ``` diff --git a/plugins/processors/converter/converter.go b/plugins/processors/converter/converter.go index bf9b851fb..33f2e43c0 100644 --- a/plugins/processors/converter/converter.go +++ b/plugins/processors/converter/converter.go @@ -2,7 +2,6 @@ package converter import ( "fmt" - "log" "math" "strconv" @@ -18,6 +17,7 @@ var sampleConfig = ` ## select the keys to convert. The array may contain globs. ## = [...] [processors.converter.tags] + measurement = [] string = [] integer = [] unsigned = [] @@ -30,6 +30,7 @@ var sampleConfig = ` ## select the keys to convert. The array may contain globs. ## = [...] [processors.converter.fields] + measurement = [] tag = [] string = [] integer = [] @@ -39,30 +40,32 @@ var sampleConfig = ` ` type Conversion struct { - Tag []string `toml:"tag"` - String []string `toml:"string"` - Integer []string `toml:"integer"` - Unsigned []string `toml:"unsigned"` - Boolean []string `toml:"boolean"` - Float []string `toml:"float"` + Measurement []string `toml:"measurement"` + Tag []string `toml:"tag"` + String []string `toml:"string"` + Integer []string `toml:"integer"` + Unsigned []string `toml:"unsigned"` + Boolean []string `toml:"boolean"` + Float []string `toml:"float"` } type Converter struct { - Tags *Conversion `toml:"tags"` - Fields *Conversion `toml:"fields"` + Tags *Conversion `toml:"tags"` + Fields *Conversion `toml:"fields"` + Log telegraf.Logger `toml:"-"` - initialized bool tagConversions *ConversionFilter fieldConversions *ConversionFilter } type ConversionFilter struct { - Tag filter.Filter - String filter.Filter - Integer filter.Filter - Unsigned filter.Filter - Boolean filter.Filter - Float filter.Filter + Measurement filter.Filter + Tag filter.Filter + String filter.Filter + Integer filter.Filter + Unsigned filter.Filter + Boolean filter.Filter + Float filter.Filter } func (p *Converter) SampleConfig() string { @@ -73,15 +76,11 @@ func (p *Converter) Description() string { return "Convert values to another metric value type" } -func (p *Converter) Apply(metrics ...telegraf.Metric) []telegraf.Metric { - if !p.initialized { - err := p.compile() - if err != nil { - logPrintf("initialization error: %v\n", err) - return metrics - } - } +func (p *Converter) Init() error { + return p.compile() +} +func (p *Converter) Apply(metrics ...telegraf.Metric) []telegraf.Metric { for _, metric := range metrics { p.convertTags(metric) p.convertFields(metric) @@ -106,7 +105,6 @@ func (p *Converter) compile() error { p.tagConversions = tf p.fieldConversions = ff - p.initialized = true return nil } @@ -117,6 +115,11 @@ func compileFilter(conv *Conversion) (*ConversionFilter, error) { var err error cf := &ConversionFilter{} + cf.Measurement, err = filter.Compile(conv.Measurement) + if err != nil { + return nil, err + } + cf.Tag, err = filter.Compile(conv.Tag) if err != nil { return nil, err @@ -150,13 +153,19 @@ func compileFilter(conv *Conversion) (*ConversionFilter, error) { return cf, nil } -// convertTags converts tags into fields +// convertTags converts tags into measurements or fields. func (p *Converter) convertTags(metric telegraf.Metric) { if p.tagConversions == nil { return } for key, value := range metric.Tags() { + if p.tagConversions.Measurement != nil && p.tagConversions.Measurement.Match(key) { + metric.RemoveTag(key) + metric.SetName(value) + continue + } + if p.tagConversions.String != nil && p.tagConversions.String.Match(key) { metric.RemoveTag(key) metric.AddField(key, value) @@ -167,7 +176,7 @@ func (p *Converter) convertTags(metric telegraf.Metric) { v, ok := toInteger(value) if !ok { metric.RemoveTag(key) - logPrintf("error converting to integer [%T]: %v\n", value, value) + p.Log.Errorf("error converting to integer [%T]: %v", value, value) continue } @@ -179,7 +188,7 @@ func (p *Converter) convertTags(metric telegraf.Metric) { v, ok := toUnsigned(value) if !ok { metric.RemoveTag(key) - logPrintf("error converting to unsigned [%T]: %v\n", value, value) + p.Log.Errorf("error converting to unsigned [%T]: %v", value, value) continue } @@ -192,7 +201,7 @@ func (p *Converter) convertTags(metric telegraf.Metric) { v, ok := toBool(value) if !ok { metric.RemoveTag(key) - logPrintf("error converting to boolean [%T]: %v\n", value, value) + p.Log.Errorf("error converting to boolean [%T]: %v", value, value) continue } @@ -205,7 +214,7 @@ func (p *Converter) convertTags(metric telegraf.Metric) { v, ok := toFloat(value) if !ok { metric.RemoveTag(key) - logPrintf("error converting to float [%T]: %v\n", value, value) + p.Log.Errorf("error converting to float [%T]: %v", value, value) continue } @@ -216,18 +225,31 @@ func (p *Converter) convertTags(metric telegraf.Metric) { } } -// convertFields converts fields into tags or other field types +// convertFields converts fields into measurements, tags, or other field types. func (p *Converter) convertFields(metric telegraf.Metric) { if p.fieldConversions == nil { return } for key, value := range metric.Fields() { + if p.fieldConversions.Measurement != nil && p.fieldConversions.Measurement.Match(key) { + v, ok := toString(value) + if !ok { + metric.RemoveField(key) + p.Log.Errorf("error converting to measurement [%T]: %v", value, value) + continue + } + + metric.RemoveField(key) + metric.SetName(v) + continue + } + if p.fieldConversions.Tag != nil && p.fieldConversions.Tag.Match(key) { v, ok := toString(value) if !ok { metric.RemoveField(key) - logPrintf("error converting to tag [%T]: %v\n", value, value) + p.Log.Errorf("error converting to tag [%T]: %v", value, value) continue } @@ -240,7 +262,7 @@ func (p *Converter) convertFields(metric telegraf.Metric) { v, ok := toFloat(value) if !ok { metric.RemoveField(key) - logPrintf("error converting to float [%T]: %v\n", value, value) + p.Log.Errorf("error converting to float [%T]: %v", value, value) continue } @@ -253,7 +275,7 @@ func (p *Converter) convertFields(metric telegraf.Metric) { v, ok := toInteger(value) if !ok { metric.RemoveField(key) - logPrintf("error converting to integer [%T]: %v\n", value, value) + p.Log.Errorf("error converting to integer [%T]: %v", value, value) continue } @@ -266,7 +288,7 @@ func (p *Converter) convertFields(metric telegraf.Metric) { v, ok := toUnsigned(value) if !ok { metric.RemoveField(key) - logPrintf("error converting to unsigned [%T]: %v\n", value, value) + p.Log.Errorf("error converting to unsigned [%T]: %v", value, value) continue } @@ -279,7 +301,7 @@ func (p *Converter) convertFields(metric telegraf.Metric) { v, ok := toBool(value) if !ok { metric.RemoveField(key) - logPrintf("error converting to bool [%T]: %v\n", value, value) + p.Log.Errorf("error converting to bool [%T]: %v", value, value) continue } @@ -292,7 +314,7 @@ func (p *Converter) convertFields(metric telegraf.Metric) { v, ok := toString(value) if !ok { metric.RemoveField(key) - logPrintf("error converting to string [%T]: %v\n", value, value) + p.Log.Errorf("Error converting to string [%T]: %v", value, value) continue } @@ -336,7 +358,7 @@ func toInteger(v interface{}) (int64, bool) { } else if value > float64(math.MaxInt64) { return math.MaxInt64, true } else { - return int64(Round(value)), true + return int64(math.Round(value)), true } case bool: if value { @@ -375,7 +397,7 @@ func toUnsigned(v interface{}) (uint64, bool) { } else if value > float64(math.MaxUint64) { return math.MaxUint64, true } else { - return uint64(Round(value)), true + return uint64(math.Round(value)), true } case bool: if value { @@ -435,20 +457,6 @@ func toString(v interface{}) (string, bool) { return "", false } -// math.Round was not added until Go 1.10, can be removed when support for Go -// 1.9 is dropped. -func Round(x float64) float64 { - t := math.Trunc(x) - if math.Abs(x-t) >= 0.5 { - return t + math.Copysign(1, x) - } - return t -} - -func logPrintf(format string, v ...interface{}) { - log.Printf("D! [processors.converter] "+format, v...) -} - func init() { processors.Add("converter", func() telegraf.Processor { return &Converter{} diff --git a/plugins/processors/converter/converter_test.go b/plugins/processors/converter/converter_test.go index 1d60a40fb..1310e698a 100644 --- a/plugins/processors/converter/converter_test.go +++ b/plugins/processors/converter/converter_test.go @@ -6,48 +6,17 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/metric" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) -func Metric(v telegraf.Metric, err error) telegraf.Metric { - if err != nil { - panic(err) - } - return v -} - func TestConverter(t *testing.T) { tests := []struct { name string converter *Converter input telegraf.Metric - expected telegraf.Metric + expected []telegraf.Metric }{ - { - name: "empty", - converter: &Converter{}, - input: Metric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "value": 42.0, - }, - time.Unix(0, 0), - ), - ), - expected: Metric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "value": 42.0, - }, - time.Unix(0, 0), - ), - ), - }, { name: "from tag", converter: &Converter{ @@ -60,23 +29,21 @@ func TestConverter(t *testing.T) { Tag: []string{"tag"}, }, }, - input: Metric( - metric.New( - "cpu", - map[string]string{ - "float": "42", - "int": "42", - "uint": "42", - "bool": "true", - "string": "howdy", - "tag": "tag", - }, - map[string]interface{}{}, - time.Unix(0, 0), - ), + input: testutil.MustMetric( + "cpu", + map[string]string{ + "float": "42", + "int": "42", + "uint": "42", + "bool": "true", + "string": "howdy", + "tag": "tag", + }, + map[string]interface{}{}, + time.Unix(0, 0), ), - expected: Metric( - metric.New( + expected: []telegraf.Metric{ + testutil.MustMetric( "cpu", map[string]string{ "tag": "tag", @@ -90,7 +57,7 @@ func TestConverter(t *testing.T) { }, time.Unix(0, 0), ), - ), + }, }, { name: "from tag unconvertible", @@ -102,27 +69,25 @@ func TestConverter(t *testing.T) { Float: []string{"float"}, }, }, - input: Metric( - metric.New( - "cpu", - map[string]string{ - "float": "a", - "int": "b", - "uint": "c", - "bool": "maybe", - }, - map[string]interface{}{}, - time.Unix(0, 0), - ), + input: testutil.MustMetric( + "cpu", + map[string]string{ + "float": "a", + "int": "b", + "uint": "c", + "bool": "maybe", + }, + map[string]interface{}{}, + time.Unix(0, 0), ), - expected: Metric( - metric.New( + expected: []telegraf.Metric{ + testutil.MustMetric( "cpu", map[string]string{}, map[string]interface{}{}, time.Unix(0, 0), ), - ), + }, }, { name: "from string field", @@ -136,29 +101,27 @@ func TestConverter(t *testing.T) { Tag: []string{"f"}, }, }, - input: Metric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "a": "howdy", - "b": "42", - "b1": "42.2", - "b2": "42.5", - "b3": "0x2A", - "c": "42", - "c1": "42.2", - "c2": "42.5", - "c3": "0x2A", - "d": "true", - "e": "42.0", - "f": "foo", - }, - time.Unix(0, 0), - ), + input: testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "a": "howdy", + "b": "42", + "b1": "42.2", + "b2": "42.5", + "b3": "0x2A", + "c": "42", + "c1": "42.2", + "c2": "42.5", + "c3": "0x2A", + "d": "true", + "e": "42.0", + "f": "foo", + }, + time.Unix(0, 0), ), - expected: Metric( - metric.New( + expected: []telegraf.Metric{ + testutil.MustMetric( "cpu", map[string]string{ "f": "foo", @@ -178,7 +141,7 @@ func TestConverter(t *testing.T) { }, time.Unix(0, 0), ), - ), + }, }, { name: "from string field unconvertible", @@ -190,27 +153,25 @@ func TestConverter(t *testing.T) { Float: []string{"d"}, }, }, - input: Metric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "a": "a", - "b": "b", - "c": "c", - "d": "d", - }, - time.Unix(0, 0), - ), + input: testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "a": "a", + "b": "b", + "c": "c", + "d": "d", + }, + time.Unix(0, 0), ), - expected: Metric( - metric.New( + expected: []telegraf.Metric{ + testutil.MustMetric( "cpu", map[string]string{}, map[string]interface{}{}, time.Unix(0, 0), ), - ), + }, }, { name: "from integer field", @@ -224,24 +185,22 @@ func TestConverter(t *testing.T) { Tag: []string{"f"}, }, }, - input: Metric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "a": int64(42), - "b": int64(42), - "c": int64(42), - "d": int64(42), - "e": int64(42), - "f": int64(42), - "negative_uint": int64(-42), - }, - time.Unix(0, 0), - ), + input: testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "a": int64(42), + "b": int64(42), + "c": int64(42), + "d": int64(42), + "e": int64(42), + "f": int64(42), + "negative_uint": int64(-42), + }, + time.Unix(0, 0), ), - expected: Metric( - metric.New( + expected: []telegraf.Metric{ + testutil.MustMetric( "cpu", map[string]string{ "f": "42", @@ -256,7 +215,7 @@ func TestConverter(t *testing.T) { }, time.Unix(0, 0), ), - ), + }, }, { name: "from unsigned field", @@ -270,24 +229,22 @@ func TestConverter(t *testing.T) { Tag: []string{"f"}, }, }, - input: Metric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "a": uint64(42), - "b": uint64(42), - "c": uint64(42), - "d": uint64(42), - "e": uint64(42), - "f": uint64(42), - "overflow_int": uint64(math.MaxUint64), - }, - time.Unix(0, 0), - ), + input: testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "a": uint64(42), + "b": uint64(42), + "c": uint64(42), + "d": uint64(42), + "e": uint64(42), + "f": uint64(42), + "overflow_int": uint64(math.MaxUint64), + }, + time.Unix(0, 0), ), - expected: Metric( - metric.New( + expected: []telegraf.Metric{ + testutil.MustMetric( "cpu", map[string]string{ "f": "42", @@ -302,7 +259,7 @@ func TestConverter(t *testing.T) { }, time.Unix(0, 0), ), - ), + }, }, { name: "out of range for unsigned", @@ -311,19 +268,17 @@ func TestConverter(t *testing.T) { Unsigned: []string{"a", "b"}, }, }, - input: Metric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "a": int64(-42), - "b": math.MaxFloat64, - }, - time.Unix(0, 0), - ), + input: testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "a": int64(-42), + "b": math.MaxFloat64, + }, + time.Unix(0, 0), ), - expected: Metric( - metric.New( + expected: []telegraf.Metric{ + testutil.MustMetric( "cpu", map[string]string{}, map[string]interface{}{ @@ -332,7 +287,7 @@ func TestConverter(t *testing.T) { }, time.Unix(0, 0), ), - ), + }, }, { name: "boolean field", @@ -346,29 +301,27 @@ func TestConverter(t *testing.T) { Tag: []string{"f", "ff"}, }, }, - input: Metric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "a": true, - "b": true, - "c": true, - "d": true, - "e": true, - "f": true, - "af": false, - "bf": false, - "cf": false, - "df": false, - "ef": false, - "ff": false, - }, - time.Unix(0, 0), - ), + input: testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "a": true, + "b": true, + "c": true, + "d": true, + "e": true, + "f": true, + "af": false, + "bf": false, + "cf": false, + "df": false, + "ef": false, + "ff": false, + }, + time.Unix(0, 0), ), - expected: Metric( - metric.New( + expected: []telegraf.Metric{ + testutil.MustMetric( "cpu", map[string]string{ "f": "true", @@ -388,7 +341,7 @@ func TestConverter(t *testing.T) { }, time.Unix(0, 0), ), - ), + }, }, { name: "from float field", @@ -402,28 +355,26 @@ func TestConverter(t *testing.T) { Tag: []string{"f"}, }, }, - input: Metric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "a": 42.0, - "b": 42.0, - "c": 42.0, - "d": 42.0, - "e": 42.0, - "f": 42.0, - "too_large_int": math.MaxFloat64, - "too_large_uint": math.MaxFloat64, - "too_small_int": -math.MaxFloat64, - "too_small_uint": -math.MaxFloat64, - "negative_uint": -42.0, - }, - time.Unix(0, 0), - ), + input: testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "a": 42.0, + "b": 42.0, + "c": 42.0, + "d": 42.0, + "e": 42.0, + "f": 42.0, + "too_large_int": math.MaxFloat64, + "too_large_uint": math.MaxFloat64, + "too_small_int": -math.MaxFloat64, + "too_small_uint": -math.MaxFloat64, + "negative_uint": -42.0, + }, + time.Unix(0, 0), ), - expected: Metric( - metric.New( + expected: []telegraf.Metric{ + testutil.MustMetric( "cpu", map[string]string{ "f": "42", @@ -442,7 +393,7 @@ func TestConverter(t *testing.T) { }, time.Unix(0, 0), ), - ), + }, }, { name: "globbing", @@ -451,20 +402,18 @@ func TestConverter(t *testing.T) { Integer: []string{"int_*"}, }, }, - input: Metric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "int_a": "1", - "int_b": "2", - "float_a": 1.0, - }, - time.Unix(0, 0), - ), + input: testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "int_a": "1", + "int_b": "2", + "float_a": 1.0, + }, + time.Unix(0, 0), ), - expected: Metric( - metric.New( + expected: []telegraf.Metric{ + testutil.MustMetric( "cpu", map[string]string{}, map[string]interface{}{ @@ -474,18 +423,102 @@ func TestConverter(t *testing.T) { }, time.Unix(0, 0), ), - ), + }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - metrics := tt.converter.Apply(tt.input) + tt.converter.Log = testutil.Logger{} - require.Equal(t, 1, len(metrics)) - require.Equal(t, tt.expected.Name(), metrics[0].Name()) - require.Equal(t, tt.expected.Tags(), metrics[0].Tags()) - require.Equal(t, tt.expected.Fields(), metrics[0].Fields()) - require.Equal(t, tt.expected.Time(), metrics[0].Time()) + err := tt.converter.Init() + require.NoError(t, err) + actual := tt.converter.Apply(tt.input) + + testutil.RequireMetricsEqual(t, tt.expected, actual) }) } } + +func TestMeasurement(t *testing.T) { + tests := []struct { + name string + converter *Converter + input telegraf.Metric + expected []telegraf.Metric + }{ + { + name: "measurement from tag", + converter: &Converter{ + Tags: &Conversion{ + Measurement: []string{"filepath"}, + }, + }, + input: testutil.MustMetric( + "file", + map[string]string{ + "filepath": "/var/log/syslog", + }, + map[string]interface{}{ + "msg": "howdy", + }, + time.Unix(0, 0), + ), + expected: []telegraf.Metric{ + testutil.MustMetric( + "/var/log/syslog", + map[string]string{}, + map[string]interface{}{ + "msg": "howdy", + }, + time.Unix(0, 0), + ), + }, + }, + { + name: "measurement from field", + converter: &Converter{ + Fields: &Conversion{ + Measurement: []string{"topic"}, + }, + }, + input: testutil.MustMetric( + "file", + map[string]string{}, + map[string]interface{}{ + "v": 1, + "topic": "telegraf", + }, + time.Unix(0, 0), + ), + expected: []telegraf.Metric{ + testutil.MustMetric( + "telegraf", + map[string]string{}, + map[string]interface{}{ + "v": 1, + }, + time.Unix(0, 0), + ), + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + tt.converter.Log = testutil.Logger{} + err := tt.converter.Init() + require.NoError(t, err) + + actual := tt.converter.Apply(tt.input) + + testutil.RequireMetricsEqual(t, tt.expected, actual) + }) + } +} + +func TestEmptyConfigInitError(t *testing.T) { + converter := &Converter{ + Log: testutil.Logger{}, + } + err := converter.Init() + require.Error(t, err) +} From 1eca315e157a5816c310549d21dabc4bc09f81d1 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 9 Mar 2020 14:14:00 -0700 Subject: [PATCH 1584/1815] Update changelog --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index b25ad4241..86db632ec 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -56,6 +56,8 @@ - [#7089](https://github.com/influxdata/telegraf/pull/7089): Allow globs in FPM unix socket paths. - [#7071](https://github.com/influxdata/telegraf/pull/7071): Add non-cumulative histogram to histogram aggregator. - [#6969](https://github.com/influxdata/telegraf/pull/6969): Add label and field selectors to prometheus input k8s discovery. +- [#7049](https://github.com/influxdata/telegraf/pull/7049): Add support for converting tag or field to measurement in converter processor. + #### Bugfixes From 318a963a89e0eea8edc1b1274290cd52db735f61 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 9 Mar 2020 14:47:28 -0700 Subject: [PATCH 1585/1815] Check license of dependencies file for changes (#7108) --- .circleci/config.yml | 1 + Makefile | 4 +++ docs/LICENSE_OF_DEPENDENCIES.md | 49 +++++++++++++++++-------------- go.mod | 3 ++ go.sum | 4 +-- scripts/check-deps.sh | 51 +++++++++++++++++++++++++++++++++ 6 files changed, 88 insertions(+), 24 deletions(-) create mode 100755 scripts/check-deps.sh diff --git a/.circleci/config.yml b/.circleci/config.yml index 9e8897041..f9176c97a 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -87,6 +87,7 @@ jobs: at: '/go' - run: 'make' - run: 'make check' + - run: 'make check-deps' - run: 'make test' test-go-1.13-386: <<: [ *defaults, *go-1_13 ] diff --git a/Makefile b/Makefile index 46e1cf12a..4ec767997 100644 --- a/Makefile +++ b/Makefile @@ -100,6 +100,10 @@ check: fmtcheck vet test-all: fmtcheck vet go test ./... +.PHONY: check-deps +check-deps: + ./scripts/check-deps.sh + .PHONY: package package: ./scripts/build.py --package --platform=all --arch=all diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index f22e3c7e9..e2ba9a812 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -6,16 +6,21 @@ following works: - cloud.google.com/go [Apache License 2.0](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/master/LICENSE) - code.cloudfoundry.org/clock [Apache License 2.0](https://github.com/cloudfoundry/clock/blob/master/LICENSE) - collectd.org [MIT License](https://git.octo.it/?p=collectd.git;a=blob;f=COPYING;hb=HEAD) -- contrib.go.opencensus.io/exporter/stackdriver [Apache License 2.0](https://github.com/census-ecosystem/opencensus-go-exporter-stackdriver/blob/master/LICENSE) +- github.com/Azure/azure-pipeline-go [MIT License](https://github.com/Azure/azure-pipeline-go/blob/master/LICENSE) +- github.com/Azure/azure-storage-queue-go [MIT License](https://github.com/Azure/azure-storage-queue-go/blob/master/LICENSE) +- github.com/Azure/go-autorest [Apache License 2.0](https://github.com/Azure/go-autorest/blob/master/LICENSE) +- github.com/Mellanox/rdmamap [Apache License 2.0](https://github.com/Mellanox/rdmamap/blob/master/LICENSE) +- github.com/Microsoft/ApplicationInsights-Go [MIT License](https://github.com/Microsoft/ApplicationInsights-Go/blob/master/LICENSE) +- github.com/Microsoft/go-winio [MIT License](https://github.com/Microsoft/go-winio/blob/master/LICENSE) +- github.com/Shopify/sarama [MIT License](https://github.com/Shopify/sarama/blob/master/LICENSE) +- github.com/StackExchange/wmi [MIT License](https://github.com/StackExchange/wmi/blob/master/LICENSE) - github.com/aerospike/aerospike-client-go [Apache License 2.0](https://github.com/aerospike/aerospike-client-go/blob/master/LICENSE) - github.com/alecthomas/units [MIT License](https://github.com/alecthomas/units/blob/master/COPYING) - github.com/amir/raidman [The Unlicense](https://github.com/amir/raidman/blob/master/UNLICENSE) - github.com/apache/thrift [Apache License 2.0](https://github.com/apache/thrift/blob/master/LICENSE) - github.com/aws/aws-sdk-go [Apache License 2.0](https://github.com/aws/aws-sdk-go/blob/master/LICENSE.txt) -- github.com/Azure/azure-storage-queue-go [MIT License](https://github.com/Azure/azure-storage-queue-go/blob/master/LICENSE) -- github.com/Azure/azure-pipeline-go [MIT License](https://github.com/Azure/azure-pipeline-go/blob/master/LICENSE) -- github.com/Azure/go-autorest [Apache License 2.0](https://github.com/Azure/go-autorest/blob/master/LICENSE) - github.com/beorn7/perks [MIT License](https://github.com/beorn7/perks/blob/master/LICENSE) +- github.com/caio/go-tdigest [MIT License](https://github.com/caio/go-tdigest/blob/master/LICENSE) - github.com/cenkalti/backoff [MIT License](https://github.com/cenkalti/backoff/blob/master/LICENSE) - github.com/cisco-ie/nx-telemetry-proto [Apache License 2.0](https://github.com/cisco-ie/nx-telemetry-proto/blob/master/LICENSE) - github.com/couchbase/go-couchbase [MIT License](https://github.com/couchbase/go-couchbase/blob/master/LICENSE) @@ -35,65 +40,63 @@ following works: - github.com/eapache/queue [MIT License](https://github.com/eapache/queue/blob/master/LICENSE) - github.com/eclipse/paho.mqtt.golang [Eclipse Public License - v 1.0](https://github.com/eclipse/paho.mqtt.golang/blob/master/LICENSE) - github.com/ericchiang/k8s [Apache License 2.0](https://github.com/ericchiang/k8s/blob/master/LICENSE) -- github.com/go-ini/ini [Apache License 2.0](https://github.com/go-ini/ini/blob/master/LICENSE) +- github.com/ghodss/yaml [MIT License](https://github.com/ghodss/yaml/blob/master/LICENSE) +- github.com/glinton/ping [MIT License](https://github.com/glinton/ping/blob/master/LICENSE) - github.com/go-logfmt/logfmt [MIT License](https://github.com/go-logfmt/logfmt/blob/master/LICENSE) - github.com/go-ole/go-ole [MIT License](https://github.com/go-ole/go-ole/blob/master/LICENSE) - github.com/go-redis/redis [BSD 2-Clause "Simplified" License](https://github.com/go-redis/redis/blob/master/LICENSE) - github.com/go-sql-driver/mysql [Mozilla Public License 2.0](https://github.com/go-sql-driver/mysql/blob/master/LICENSE) +- github.com/goburrow/modbus [BSD 3-Clause "New" or "Revised" License](https://github.com/goburrow/modbus/blob/master/LICENSE) +- github.com/goburrow/serial [MIT License](https://github.com/goburrow/serial/LICENSE) - github.com/gobwas/glob [MIT License](https://github.com/gobwas/glob/blob/master/LICENSE) - github.com/gofrs/uuid [MIT License](https://github.com/gofrs/uuid/blob/master/LICENSE) - github.com/gogo/protobuf [BSD 3-Clause Clear License](https://github.com/gogo/protobuf/blob/master/LICENSE) - github.com/golang/mock [Apache License 2.0](https://github.com/golang/mock/blob/master/LICENSE) - github.com/golang/protobuf [BSD 3-Clause "New" or "Revised" License](https://github.com/golang/protobuf/blob/master/LICENSE) - github.com/golang/snappy [BSD 3-Clause "New" or "Revised" License](https://github.com/golang/snappy/blob/master/LICENSE) -- github.com/google/go-cmp [BSD 3-Clause "New" or "Revised" License](https://github.com/google/go-cmp/blob/master/LICENSE) - github.com/google/go-github [BSD 3-Clause "New" or "Revised" License](https://github.com/google/go-github/blob/master/LICENSE) - github.com/google/go-querystring [BSD 3-Clause "New" or "Revised" License](https://github.com/google/go-querystring/blob/master/LICENSE) -- github.com/google/uuid [BSD 3-Clause "New" or "Revised" License](https://github.com/google/uuid/blob/master/LICENSE) - github.com/googleapis/gax-go [BSD 3-Clause "New" or "Revised" License](https://github.com/googleapis/gax-go/blob/master/LICENSE) -- github.com/gorilla/context [BSD 3-Clause "New" or "Revised" License](https://github.com/gorilla/context/blob/master/LICENSE) - github.com/gorilla/mux [BSD 3-Clause "New" or "Revised" License](https://github.com/gorilla/mux/blob/master/LICENSE) - github.com/hailocab/go-hostpool [MIT License](https://github.com/hailocab/go-hostpool/blob/master/LICENSE) - github.com/harlow/kinesis-consumer [MIT License](https://github.com/harlow/kinesis-consumer/blob/master/MIT-LICENSE) - github.com/hashicorp/consul [Mozilla Public License 2.0](https://github.com/hashicorp/consul/blob/master/LICENSE) - github.com/hashicorp/go-cleanhttp [Mozilla Public License 2.0](https://github.com/hashicorp/go-cleanhttp/blob/master/LICENSE) - github.com/hashicorp/go-rootcerts [Mozilla Public License 2.0](https://github.com/hashicorp/go-rootcerts/blob/master/LICENSE) +- github.com/hashicorp/go-uuid [Mozilla Public License 2.0](https://github.com/hashicorp/go-uuid/LICENSE) +- github.com/hashicorp/golang-lru [Mozilla Public License 2.0](https://github.com/hashicorp/golang-lru/blob/master/LICENSE) - github.com/hashicorp/serf [Mozilla Public License 2.0](https://github.com/hashicorp/serf/blob/master/LICENSE) - github.com/influxdata/go-syslog [MIT License](https://github.com/influxdata/go-syslog/blob/develop/LICENSE) - github.com/influxdata/tail [MIT License](https://github.com/influxdata/tail/blob/master/LICENSE.txt) - github.com/influxdata/toml [MIT License](https://github.com/influxdata/toml/blob/master/LICENSE) - github.com/influxdata/wlog [MIT License](https://github.com/influxdata/wlog/blob/master/LICENSE) - github.com/jackc/pgx [MIT License](https://github.com/jackc/pgx/blob/master/LICENSE) +- github.com/jcmturner/gofork [BSD 3-Clause "New" or "Revised" License](https://github.com/jcmturner/gofork/blob/master/LICENSE) - github.com/jmespath/go-jmespath [Apache License 2.0](https://github.com/jmespath/go-jmespath/blob/master/LICENSE) -- github.com/kardianos/osext [BSD 3-Clause "New" or "Revised" License](https://github.com/kardianos/osext/blob/master/LICENSE) - github.com/kardianos/service [zlib License](https://github.com/kardianos/service/blob/master/LICENSE) +- github.com/karrick/godirwalk [BSD 2-Clause "Simplified" License](https://github.com/karrick/godirwalk/blob/master/LICENSE) - github.com/kballard/go-shellquote [MIT License](https://github.com/kballard/go-shellquote/blob/master/LICENSE) - github.com/klauspost/compress [BSD 3-Clause Clear License](https://github.com/klauspost/compress/blob/master/LICENSE) -- github.com/kr/logfmt [MIT License](https://github.com/kr/logfmt/blob/master/Readme) +- github.com/konsorten/go-windows-terminal-sequences [MIT License](https://github.com/konsorten/go-windows-terminal-sequences/blob/master/LICENSE) - github.com/kubernetes/apimachinery [Apache License 2.0](https://github.com/kubernetes/apimachinery/blob/master/LICENSE) - github.com/leodido/ragel-machinery [MIT License](https://github.com/leodido/ragel-machinery/blob/develop/LICENSE) - github.com/mailru/easyjson [MIT License](https://github.com/mailru/easyjson/blob/master/LICENSE) - github.com/matttproud/golang_protobuf_extensions [Apache License 2.0](https://github.com/matttproud/golang_protobuf_extensions/blob/master/LICENSE) - github.com/mdlayher/apcupsd [MIT License](https://github.com/mdlayher/apcupsd/blob/master/LICENSE.md) -- github.com/Mellanox/rdmamap [Apache License 2.0](https://github.com/Mellanox/rdmamap/blob/master/LICENSE) -- github.com/Microsoft/ApplicationInsights-Go [MIT License](https://github.com/Microsoft/ApplicationInsights-Go/blob/master/LICENSE) -- github.com/Microsoft/go-winio [MIT License](https://github.com/Microsoft/go-winio/blob/master/LICENSE) - github.com/miekg/dns [BSD 3-Clause Clear License](https://github.com/miekg/dns/blob/master/LICENSE) - github.com/mitchellh/go-homedir [MIT License](https://github.com/mitchellh/go-homedir/blob/master/LICENSE) - github.com/mitchellh/mapstructure [MIT License](https://github.com/mitchellh/mapstructure/blob/master/LICENSE) - github.com/multiplay/go-ts3 [BSD 2-Clause "Simplified" License](https://github.com/multiplay/go-ts3/blob/master/LICENSE) - github.com/naoina/go-stringutil [MIT License](https://github.com/naoina/go-stringutil/blob/master/LICENSE) +- github.com/nats-io/jwt [Apache License 2.0](https://github.com/nats-io/jwt/blob/master/LICENSE) - github.com/nats-io/nats-server [Apache License 2.0](https://github.com/nats-io/nats-server/blob/master/LICENSE) - github.com/nats-io/nats.go [Apache License 2.0](https://github.com/nats-io/nats.go/blob/master/LICENSE) -- github.com/nats-io/jwt [Apache License 2.0](https://github.com/nats-io/jwt/blob/master/LICENSE) - github.com/nats-io/nkeys [Apache License 2.0](https://github.com/nats-io/nkeys/blob/master/LICENSE) - github.com/nats-io/nuid [Apache License 2.0](https://github.com/nats-io/nuid/blob/master/LICENSE) - github.com/nsqio/go-nsq [MIT License](https://github.com/nsqio/go-nsq/blob/master/LICENSE) - github.com/openconfig/gnmi [Apache License 2.0](https://github.com/openconfig/gnmi/blob/master/LICENSE) - github.com/opencontainers/go-digest [Apache License 2.0](https://github.com/opencontainers/go-digest/blob/master/LICENSE) - github.com/opencontainers/image-spec [Apache License 2.0](https://github.com/opencontainers/image-spec/blob/master/LICENSE) -- github.com/opentracing-contrib/go-observer [Apache License 2.0](https://github.com/opentracing-contrib/go-observer/blob/master/LICENSE) -- github.com/opentracing/opentracing-go [MIT License](https://github.com/opentracing/opentracing-go/blob/master/LICENSE) - github.com/openzipkin/zipkin-go-opentracing [MIT License](https://github.com/openzipkin/zipkin-go-opentracing/blob/master/LICENSE) - github.com/pierrec/lz4 [BSD 3-Clause "New" or "Revised" License](https://github.com/pierrec/lz4/blob/master/LICENSE) - github.com/pkg/errors [BSD 2-Clause "Simplified" License](https://github.com/pkg/errors/blob/master/LICENSE) @@ -103,23 +106,22 @@ following works: - github.com/prometheus/common [Apache License 2.0](https://github.com/prometheus/common/blob/master/LICENSE) - github.com/prometheus/procfs [Apache License 2.0](https://github.com/prometheus/procfs/blob/master/LICENSE) - github.com/rcrowley/go-metrics [MIT License](https://github.com/rcrowley/go-metrics/blob/master/LICENSE) +- github.com/safchain/ethtool [Apache License 2.0](https://github.com/safchain/ethtool/blob/master/LICENSE) - github.com/samuel/go-zookeeper [BSD 3-Clause Clear License](https://github.com/samuel/go-zookeeper/blob/master/LICENSE) - github.com/shirou/gopsutil [BSD 3-Clause Clear License](https://github.com/shirou/gopsutil/blob/master/LICENSE) -- github.com/shirou/w32 [BSD 3-Clause Clear License](https://github.com/shirou/w32/blob/master/LICENSE) -- github.com/Shopify/sarama [MIT License](https://github.com/Shopify/sarama/blob/master/LICENSE) - github.com/sirupsen/logrus [MIT License](https://github.com/sirupsen/logrus/blob/master/LICENSE) - github.com/soniah/gosnmp [BSD 2-Clause "Simplified" License](https://github.com/soniah/gosnmp/blob/master/LICENSE) -- github.com/StackExchange/wmi [MIT License](https://github.com/StackExchange/wmi/blob/master/LICENSE) - github.com/streadway/amqp [BSD 2-Clause "Simplified" License](https://github.com/streadway/amqp/blob/master/LICENSE) - github.com/stretchr/objx [MIT License](https://github.com/stretchr/objx/blob/master/LICENSE) - github.com/stretchr/testify [custom -- permissive](https://github.com/stretchr/testify/blob/master/LICENSE) - github.com/tidwall/gjson [MIT License](https://github.com/tidwall/gjson/blob/master/LICENSE) - github.com/tidwall/match [MIT License](https://github.com/tidwall/match/blob/master/LICENSE) +- github.com/tidwall/pretty [MIT License](https://github.com/tidwall/pretty/blob/master/LICENSE) - github.com/vishvananda/netlink [Apache License 2.0](https://github.com/vishvananda/netlink/blob/master/LICENSE) - github.com/vishvananda/netns [Apache License 2.0](https://github.com/vishvananda/netns/blob/master/LICENSE) - github.com/vjeantet/grok [Apache License 2.0](https://github.com/vjeantet/grok/blob/master/LICENSE) - github.com/vmware/govmomi [Apache License 2.0](https://github.com/vmware/govmomi/blob/master/LICENSE.txt) -- github.com/wavefrontHQ/wavefront-sdk-go [Apache License 2.0](https://github.com/wavefrontHQ/wavefront-sdk-go/blob/master/LICENSE) +- github.com/wavefronthq/wavefront-sdk-go [Apache License 2.0](https://github.com/wavefrontHQ/wavefront-sdk-go/blob/master/LICENSE) - github.com/wvanbergen/kafka [MIT License](https://github.com/wvanbergen/kafka/blob/master/LICENSE) - github.com/wvanbergen/kazoo-go [MIT License](https://github.com/wvanbergen/kazoo-go/blob/master/MIT-LICENSE) - github.com/yuin/gopher-lua [MIT License](https://github.com/yuin/gopher-lua/blob/master/LICENSE) @@ -131,7 +133,6 @@ following works: - golang.org/x/sys [BSD 3-Clause Clear License](https://github.com/golang/sys/blob/master/LICENSE) - golang.org/x/text [BSD 3-Clause Clear License](https://github.com/golang/text/blob/master/LICENSE) - google.golang.org/api [BSD 3-Clause "New" or "Revised" License](https://github.com/googleapis/google-api-go-client/blob/master/LICENSE) -- google.golang.org/appengine [Apache License 2.0](https://github.com/golang/appengine/blob/master/LICENSE) - google.golang.org/genproto [Apache License 2.0](https://github.com/google/go-genproto/blob/master/LICENSE) - google.golang.org/grpc [Apache License 2.0](https://github.com/grpc/grpc-go/blob/master/LICENSE) - gopkg.in/asn1-ber.v1 [MIT License](https://github.com/go-asn1-ber/asn1-ber/blob/v1.3/LICENSE) @@ -139,7 +140,11 @@ following works: - gopkg.in/fsnotify.v1 [BSD 3-Clause "New" or "Revised" License](https://github.com/fsnotify/fsnotify/blob/v1.4.7/LICENSE) - gopkg.in/gorethink/gorethink.v3 [Apache License 2.0](https://github.com/rethinkdb/rethinkdb-go/blob/v3.0.5/LICENSE) - gopkg.in/inf.v0 [BSD 3-Clause "New" or "Revised" License](https://github.com/go-inf/inf/blob/v0.9.1/LICENSE) -- gopkg.in/ldap.v2 [MIT License](https://github.com/go-ldap/ldap/blob/v2.5.1/LICENSE) +- gopkg.in/jcmturner/aescts.v1 [Apache License 2.0](https://github.com/jcmturner/aescts/blob/v1.0.1/LICENSE) +- gopkg.in/jcmturner/dnsutils.v1 [Apache License 2.0](https://github.com/jcmturner/dnsutils/blob/v1.0.1/LICENSE) +- gopkg.in/jcmturner/gokrb5.v7 [Apache License 2.0](https://github.com/jcmturner/gokrb5/tree/v7.5.0/LICENSE) +- gopkg.in/jcmturner/rpc.v1 [Apache License 2.0](https://github.com/jcmturner/rpc/blob/v1.1.0/LICENSE) +- gopkg.in/ldap.v3 [MIT License](https://github.com/go-ldap/ldap/blob/v3.1.7/LICENSE) - gopkg.in/mgo.v2 [BSD 2-Clause "Simplified" License](https://github.com/go-mgo/mgo/blob/v2/LICENSE) - gopkg.in/olivere/elastic.v5 [MIT License](https://github.com/olivere/elastic/blob/v5.0.76/LICENSE) - gopkg.in/tomb.v1 [BSD 3-Clause Clear License](https://github.com/go-tomb/tomb/blob/v1/LICENSE) diff --git a/go.mod b/go.mod index 93d92db27..18550da56 100644 --- a/go.mod +++ b/go.mod @@ -136,3 +136,6 @@ require ( gotest.tools v2.2.0+incompatible // indirect k8s.io/apimachinery v0.17.1 ) + +// replaced due to https://github.com/satori/go.uuid/issues/73 +replace github.com/satori/go.uuid => github.com/gofrs/uuid v3.2.0+incompatible diff --git a/go.sum b/go.sum index fc3ef5dcc..db4f27306 100644 --- a/go.sum +++ b/go.sum @@ -158,6 +158,8 @@ github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= github.com/gofrs/uuid v2.1.0+incompatible h1:8oEj3gioPmmDAOLQUZdnW+h4FZu9aSE/SQIas1E9pzA= github.com/gofrs/uuid v2.1.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gofrs/uuid v3.2.0+incompatible h1:y12jRkkFxsd7GpqdSZ+/KCs/fJbqpEXSGd4+jfEaewE= +github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.0 h1:xU6/SpYbvkNYiptHJYEDRseDLvYE7wSqhYYNy0QSUzI= github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= @@ -389,8 +391,6 @@ github.com/safchain/ethtool v0.0.0-20200218184317-f459e2d13664 h1:gvolwzuDhul9qK github.com/safchain/ethtool v0.0.0-20200218184317-f459e2d13664/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= github.com/samuel/go-zookeeper v0.0.0-20180130194729-c4fab1ac1bec h1:6ncX5ko6B9LntYM0YBRXkiSaZMmLYeZ/NWcmeB43mMY= github.com/samuel/go-zookeeper v0.0.0-20180130194729-c4fab1ac1bec/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= -github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b h1:gQZ0qzfKHQIybLANtM3mBXNUtOfsCFXeTsnBqCsx1KM= -github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/shirou/gopsutil v2.20.1+incompatible h1:oIq9Cq4i84Hk8uQAUOG3eNdI/29hBawGrD5YRl6JRDY= diff --git a/scripts/check-deps.sh b/scripts/check-deps.sh new file mode 100755 index 000000000..dcdcec029 --- /dev/null +++ b/scripts/check-deps.sh @@ -0,0 +1,51 @@ +#!/bin/sh + +tmpdir="$(mktemp -d)" + +cleanup() { + rm -rf "$tmpdir" +} +trap cleanup EXIT + +targets="$(go tool dist list)" + +for target in ${targets}; do + # only check platforms we build for + case "${target}" in + linux/*) ;; + windows/*) ;; + freebsd/*) ;; + darwin/*) ;; + *) continue;; + esac + + GOOS=${target%%/*} GOARCH=${target##*/} \ + go list -deps -f '{{with .Module}}{{.Path}}{{end}}' ./cmd/telegraf/ >> "${tmpdir}/golist" +done + +for dep in $(LC_ALL=C sort -u "${tmpdir}/golist"); do + case "${dep}" in + # ignore ourselves + github.com/influxdata/telegraf) continue;; + + # dependency is replaced in go.mod + github.com/satori/go.uuid) continue;; + + # go-autorest has a single license for all sub modules + github.com/Azure/go-autorest/autorest) + dep=github.com/Azure/go-autorest;; + github.com/Azure/go-autorest/*) + continue;; + esac + + # Remove single and double digit version from path; these are generally not + # actual parts of the path and instead indicate a branch or tag. + # example: github.com/influxdata/go-syslog/v2 -> github.com/influxdata/go-syslog + dep="${dep%%/v[0-9]}" + dep="${dep%%/v[0-9][0-9]}" + + echo "${dep}" >> "${tmpdir}/actual" +done + +grep '^-' docs/LICENSE_OF_DEPENDENCIES.md | grep -v github.com/DataDog/datadog-agent | cut -f 2 -d' ' > "${tmpdir}/expected" +diff -U0 "${tmpdir}/expected" "${tmpdir}/actual" From d35290ac7e7f38708012cc19abff6ba2272efaf8 Mon Sep 17 00:00:00 2001 From: Kevin Lin Date: Mon, 9 Mar 2020 15:34:43 -0700 Subject: [PATCH 1586/1815] plugins/inputs: New input for Wireguard server (#6367) --- README.md | 1 + etc/telegraf.conf | 7 ++ go.mod | 3 +- go.sum | 31 +++++ plugins/inputs/all/all.go | 1 + plugins/inputs/wireguard/README.md | 73 +++++++++++ plugins/inputs/wireguard/wireguard.go | 139 +++++++++++++++++++++ plugins/inputs/wireguard/wireguard_test.go | 84 +++++++++++++ 8 files changed, 338 insertions(+), 1 deletion(-) create mode 100644 plugins/inputs/wireguard/README.md create mode 100644 plugins/inputs/wireguard/wireguard.go create mode 100644 plugins/inputs/wireguard/wireguard_test.go diff --git a/README.md b/README.md index bee34acd4..2e84559c3 100644 --- a/README.md +++ b/README.md @@ -321,6 +321,7 @@ For documentation on the latest development code see the [documentation index][d * [rollbar](./plugins/inputs/webhooks/rollbar) * [win_perf_counters](./plugins/inputs/win_perf_counters) (windows performance counters) * [win_services](./plugins/inputs/win_services) +* [wireguard](./plugins/inputs/wireguard) * [wireless](./plugins/inputs/wireless) * [x509_cert](./plugins/inputs/x509_cert) * [zfs](./plugins/inputs/zfs) diff --git a/etc/telegraf.conf b/etc/telegraf.conf index 79add5bd2..dfa9a974e 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -4647,6 +4647,13 @@ # # timeout = "1s" +# # Collect Wireguard server interface and peer statistics +# [[inputs.wireguard]] +# ## Optional list of Wireguard device/interface names to query. +# ## If omitted, all Wireguard interfaces are queried. +# # devices = ["wg0"] + + # # Monitor wifi signal strength and quality # [[inputs.wireless]] # ## Sets 'proc' directory path diff --git a/go.mod b/go.mod index 18550da56..de62dc620 100644 --- a/go.mod +++ b/go.mod @@ -51,7 +51,7 @@ require ( github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d github.com/golang/mock v1.3.1-0.20190508161146-9fa652df1129 // indirect github.com/golang/protobuf v1.3.2 - github.com/google/go-cmp v0.3.0 + github.com/google/go-cmp v0.4.0 github.com/google/go-github v17.0.0+incompatible github.com/google/go-querystring v1.0.0 // indirect github.com/gorilla/mux v1.6.2 @@ -122,6 +122,7 @@ require ( golang.org/x/net v0.0.0-20200202094626-16171245cfb2 golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421 golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4 + golang.zx2c4.com/wireguard/wgctrl v0.0.0-20200205215550-e35592f146e4 gonum.org/v1/gonum v0.6.2 // indirect google.golang.org/api v0.3.1 google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107 diff --git a/go.sum b/go.sum index db4f27306..b15ec7343 100644 --- a/go.sum +++ b/go.sum @@ -186,6 +186,9 @@ github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Z github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-github v17.0.0+incompatible h1:N0LgJ1j65A7kfXrZnUDaYCs/Sf4rEjNlfyDHW9dolSY= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk= @@ -258,6 +261,9 @@ github.com/jcmturner/gofork v1.0.0 h1:J7uCkflzTEhUZ64xqKnkDxq3kzc96ajM1Gli5ktUem github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jsimonetti/rtnetlink v0.0.0-20190606172950-9527aa82566a/go.mod h1:Oz+70psSo5OFh8DBl0Zv2ACw7Esh6pPUphlvZG9x7uw= +github.com/jsimonetti/rtnetlink v0.0.0-20200117123717-f846d4f6c1f4 h1:nwOc1YaOrYJ37sEBrtWZrdqzK22hiJs3GpDmP3sR2Yw= +github.com/jsimonetti/rtnetlink v0.0.0-20200117123717-f846d4f6c1f4/go.mod h1:WGuG/smIU4J/54PblvSbh+xvCZmpJnFgr3ds6Z55XMQ= github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= @@ -300,8 +306,16 @@ github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0j github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/mdlayher/apcupsd v0.0.0-20190314144147-eb3dd99a75fe h1:yMrL+YorbzaBpj/h3BbLMP+qeslPZYMbzcpHFBNy1Yk= github.com/mdlayher/apcupsd v0.0.0-20190314144147-eb3dd99a75fe/go.mod h1:y3mw3VG+t0m20OMqpG8RQqw8cDXvShVb+L8Z8FEnebw= +github.com/mdlayher/genetlink v1.0.0 h1:OoHN1OdyEIkScEmRgxLEe2M9U8ClMytqA5niynLtfj0= +github.com/mdlayher/genetlink v1.0.0/go.mod h1:0rJ0h4itni50A86M2kHcgS85ttZazNt7a8H2a2cw0Gc= +github.com/mdlayher/netlink v0.0.0-20190409211403-11939a169225/go.mod h1:eQB3mZE4aiYnlUsyGGCOpPETfdQq4Jhsgf1fk3cwQaA= +github.com/mdlayher/netlink v1.0.0/go.mod h1:KxeJAFOFLG6AjpyDkQ/iIhxygIUKD+vcwqcnu43w/+M= +github.com/mdlayher/netlink v1.1.0 h1:mpdLgm+brq10nI9zM1BpX1kpDbh3NLl3RSnVq6ZSkfg= +github.com/mdlayher/netlink v1.1.0/go.mod h1:H4WCitaheIsdF9yOYu8CFmCgQthAPIWZmcKp9uZHgmY= github.com/miekg/dns v1.0.14 h1:9jZdLNd/P4+SfEJ0TNyxYpsK8N4GtfylBLqtbYN1sbA= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/mikioh/ipaddr v0.0.0-20190404000644-d465c8ab6721 h1:RlZweED6sbSArvlE924+mUcZuXKLBHA35U7LN621Bws= +github.com/mikioh/ipaddr v0.0.0-20190404000644-d465c8ab6721/go.mod h1:Ickgr2WtCLZ2MDGd4Gr0geeCH5HybhRJbonOgQpvSxc= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0= @@ -451,8 +465,11 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190404164418-38d8ce5564a5/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191002192127-34f69633bfdc/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 h1:ULYEB3JvPRE/IfO+9uO7vKV/xzVTO7XPAwm8xbf4w2g= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200204104054-c9f3fb736b72 h1:+ELyKg6m8UBf0nPFSqD0mi7zUfwPyXo23HNjMnXPz7w= +golang.org/x/crypto v0.0.0-20200204104054-c9f3fb736b72/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -476,8 +493,11 @@ golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190628185345-da137c7871d7 h1:rTIdg5QFRR7XCaK4LCjBiPbx8j4DQRpdYMnGn/bJUEU= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191003171128-d98b1b443823/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191004110552-13f9640d40b9 h1:rjwSpXsdiK0dV8/Naq3kAw9ymfAeJIyd0upUIElB+lI= golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191007182048-72f939374954/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2 h1:CCH4IOTTfewWjGOlSp+zGcjutRKlBEZQ6wTn8ozI/nI= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -498,11 +518,16 @@ golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190204203706-41f3e6584952/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190411185658-b44545bcd369/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456 h1:ng0gs1AKnRRuEMZoTLLlbOd+C17zUDepwGQBb/n+JVg= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191003212358-c178f38b412c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4 h1:sfkvUWPNGwSV+8/fNqctR5lS2AqCSqYwXdrjCxp/dXo= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -522,6 +547,12 @@ golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGm golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.zx2c4.com/wireguard v0.0.20200121 h1:vcswa5Q6f+sylDfjqyrVNNrjsFUUbPsgAQTBCAg/Qf8= +golang.zx2c4.com/wireguard v0.0.20200121/go.mod h1:P2HsVp8SKwZEufsnezXZA4GRX/T49/HlU7DGuelXsU4= +golang.zx2c4.com/wireguard/wgctrl v0.0.0-20200205215550-e35592f146e4 h1:KTi97NIQGgSMaN0v/oxniJV0MEzfzmrDUOAWxombQVc= +golang.zx2c4.com/wireguard/wgctrl v0.0.0-20200205215550-e35592f146e4/go.mod h1:UdS9frhv65KTfwxME1xE8+rHYoFpbm36gOud1GhBe9c= gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= gonum.org/v1/gonum v0.6.2 h1:4r+yNT0+8SWcOkXP+63H2zQbN+USnC73cjGUxnDF94Q= gonum.org/v1/gonum v0.6.2/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU= diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index 2484df614..ace0d0044 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -170,6 +170,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/webhooks" _ "github.com/influxdata/telegraf/plugins/inputs/win_perf_counters" _ "github.com/influxdata/telegraf/plugins/inputs/win_services" + _ "github.com/influxdata/telegraf/plugins/inputs/wireguard" _ "github.com/influxdata/telegraf/plugins/inputs/wireless" _ "github.com/influxdata/telegraf/plugins/inputs/x509_cert" _ "github.com/influxdata/telegraf/plugins/inputs/zfs" diff --git a/plugins/inputs/wireguard/README.md b/plugins/inputs/wireguard/README.md new file mode 100644 index 000000000..57e16ba49 --- /dev/null +++ b/plugins/inputs/wireguard/README.md @@ -0,0 +1,73 @@ +# Wireguard Input Plugin + +The Wireguard input plugin collects statistics on the local Wireguard server +using the [`wgctrl`](https://github.com/WireGuard/wgctrl-go) library. It +reports gauge metrics for Wireguard interface device(s) and its peers. + +### Configuration + +```toml +# Collect Wireguard server interface and peer statistics +[[inputs.wireguard]] + ## Optional list of Wireguard device/interface names to query. + ## If omitted, all Wireguard interfaces are queried. + # devices = ["wg0"] +``` + +### Metrics + +- `wireguard_device` + - tags: + - `name` (interface device name, e.g. `wg0`) + - `type` (Wireguard tunnel type, e.g. `linux_kernel` or `userspace`) + - fields: + - `listen_port` (int, UDP port on which the interface is listening) + - `firewall_mark` (int, device's current firewall mark) + - `peers` (int, number of peers associated with the device) + +- `wireguard_peer` + - tags: + - `device` (associated interface device name, e.g. `wg0`) + - `public_key` (peer public key, e.g. `NZTRIrv/ClTcQoNAnChEot+WL7OH7uEGQmx8oAN9rWE=`) + - fields: + - `persistent_keepalive_interval_ns` (int, keepalive interval in nanoseconds; 0 if unset) + - `protocol_version` (int, Wireguard protocol version number) + - `allowed_ips` (int, number of allowed IPs for this peer) + - `last_handshake_time_ns` (int, Unix timestamp of the last handshake for this peer in nanoseconds) + - `rx_bytes` (int, number of bytes received from this peer) + - `tx_bytes` (int, number of bytes transmitted to this peer) + +### Troubleshooting + +#### Error: `operation not permitted` + +When the kernelspace implementation of Wireguard is in use (as opposed to its +userspace implementations), Telegraf communicates with the module over netlink. +This requires Telegraf to either run as root, or for the Telegraf binary to +have the `CAP_NET_ADMIN` capability. + +To add this capability to the Telegraf binary (to allow this communication under +the default user `telegraf`): + +```bash +$ sudo setcap CAP_NET_ADMIN+epi $(which telegraf) +``` + +N.B.: This capability is a filesystem attribute on the binary itself. The +attribute needs to be re-applied if the Telegraf binary is rotated (e.g. +on installation of new a Telegraf version from the system package manager). + +#### Error: `error enumerating Wireguard devices` + +This usually happens when the device names specified in config are invalid. +Ensure that `sudo wg show` succeeds, and that the device names in config match +those printed by this command. + +### Example Output + +``` +wireguard_device,host=WGVPN,name=wg0,type=linux_kernel firewall_mark=51820i,listen_port=58216i 1582513589000000000 +wireguard_device,host=WGVPN,name=wg0,type=linux_kernel peers=1i 1582513589000000000 +wireguard_peer,device=wg0,host=WGVPN,public_key=NZTRIrv/ClTcQoNAnChEot+WL7OH7uEGQmx8oAN9rWE= allowed_ips=2i,persistent_keepalive_interval_ns=60000000000i,protocol_version=1i 1582513589000000000 +wireguard_peer,device=wg0,host=WGVPN,public_key=NZTRIrv/ClTcQoNAnChEot+WL7OH7uEGQmx8oAN9rWE= last_handshake_time_ns=1582513584530013376i,rx_bytes=6484i,tx_bytes=13540i 1582513589000000000 +``` diff --git a/plugins/inputs/wireguard/wireguard.go b/plugins/inputs/wireguard/wireguard.go new file mode 100644 index 000000000..ded332837 --- /dev/null +++ b/plugins/inputs/wireguard/wireguard.go @@ -0,0 +1,139 @@ +package wireguard + +import ( + "fmt" + "log" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" + "golang.zx2c4.com/wireguard/wgctrl" + "golang.zx2c4.com/wireguard/wgctrl/wgtypes" +) + +const ( + measurementDevice = "wireguard_device" + measurementPeer = "wireguard_peer" +) + +var ( + deviceTypeNames = map[wgtypes.DeviceType]string{ + wgtypes.Unknown: "unknown", + wgtypes.LinuxKernel: "linux_kernel", + wgtypes.Userspace: "userspace", + } +) + +// Wireguard is an input that enumerates all Wireguard interfaces/devices on +// the host, and reports gauge metrics for the device itself and its peers. +type Wireguard struct { + Devices []string `toml:"devices"` + + client *wgctrl.Client +} + +func (wg *Wireguard) Description() string { + return "Collect Wireguard server interface and peer statistics" +} + +func (wg *Wireguard) SampleConfig() string { + return ` + ## Optional list of Wireguard device/interface names to query. + ## If omitted, all Wireguard interfaces are queried. + # devices = ["wg0"] +` +} + +func (wg *Wireguard) Init() error { + var err error + + wg.client, err = wgctrl.New() + + return err +} + +func (wg *Wireguard) Gather(acc telegraf.Accumulator) error { + devices, err := wg.enumerateDevices() + if err != nil { + return fmt.Errorf("error enumerating Wireguard devices: %v", err) + } + + for _, device := range devices { + wg.gatherDeviceMetrics(acc, device) + + for _, peer := range device.Peers { + wg.gatherDevicePeerMetrics(acc, device, peer) + } + } + + return nil +} + +func (wg *Wireguard) enumerateDevices() ([]*wgtypes.Device, error) { + var devices []*wgtypes.Device + + // If no device names are specified, defer to the library to enumerate + // all of them + if len(wg.Devices) == 0 { + return wg.client.Devices() + } + + // Otherwise, explicitly populate only device names specified in config + for _, name := range wg.Devices { + dev, err := wg.client.Device(name) + if err != nil { + log.Printf("W! [inputs.wireguard] No Wireguard device found with name %s", name) + continue + } + + devices = append(devices, dev) + } + + return devices, nil +} + +func (wg *Wireguard) gatherDeviceMetrics(acc telegraf.Accumulator, device *wgtypes.Device) { + fields := map[string]interface{}{ + "listen_port": device.ListenPort, + "firewall_mark": device.FirewallMark, + } + + gauges := map[string]interface{}{ + "peers": len(device.Peers), + } + + tags := map[string]string{ + "name": device.Name, + "type": deviceTypeNames[device.Type], + } + + acc.AddFields(measurementDevice, fields, tags) + acc.AddGauge(measurementDevice, gauges, tags) +} + +func (wg *Wireguard) gatherDevicePeerMetrics(acc telegraf.Accumulator, device *wgtypes.Device, peer wgtypes.Peer) { + fields := map[string]interface{}{ + "persistent_keepalive_interval_ns": peer.PersistentKeepaliveInterval.Nanoseconds(), + "protocol_version": peer.ProtocolVersion, + "allowed_ips": len(peer.AllowedIPs), + } + + gauges := map[string]interface{}{ + "last_handshake_time_ns": peer.LastHandshakeTime.UnixNano(), + "rx_bytes": peer.ReceiveBytes, + "tx_bytes": peer.TransmitBytes, + } + + tags := map[string]string{ + "device": device.Name, + "public_key": peer.PublicKey.String(), + } + + acc.AddFields(measurementPeer, fields, tags) + acc.AddGauge(measurementPeer, gauges, tags) +} + +func init() { + inputs.Add("wireguard", func() telegraf.Input { + return &Wireguard{} + }) +} diff --git a/plugins/inputs/wireguard/wireguard_test.go b/plugins/inputs/wireguard/wireguard_test.go new file mode 100644 index 000000000..0cfdba75d --- /dev/null +++ b/plugins/inputs/wireguard/wireguard_test.go @@ -0,0 +1,84 @@ +package wireguard + +import ( + "net" + "testing" + "time" + + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/assert" + "golang.zx2c4.com/wireguard/wgctrl/wgtypes" +) + +func TestWireguard_gatherDeviceMetrics(t *testing.T) { + var acc testutil.Accumulator + + wg := &Wireguard{} + device := &wgtypes.Device{ + Name: "wg0", + Type: wgtypes.LinuxKernel, + ListenPort: 1, + FirewallMark: 2, + Peers: []wgtypes.Peer{{}, {}}, + } + + expectFields := map[string]interface{}{ + "listen_port": 1, + "firewall_mark": 2, + } + expectGauges := map[string]interface{}{ + "peers": 2, + } + expectTags := map[string]string{ + "name": "wg0", + "type": "linux_kernel", + } + + wg.gatherDeviceMetrics(&acc, device) + + assert.Equal(t, 3, acc.NFields()) + acc.AssertDoesNotContainMeasurement(t, measurementPeer) + acc.AssertContainsTaggedFields(t, measurementDevice, expectFields, expectTags) + acc.AssertContainsTaggedFields(t, measurementDevice, expectGauges, expectTags) +} + +func TestWireguard_gatherDevicePeerMetrics(t *testing.T) { + var acc testutil.Accumulator + pubkey, _ := wgtypes.ParseKey("NZTRIrv/ClTcQoNAnChEot+WL7OH7uEGQmx8oAN9rWE=") + + wg := &Wireguard{} + device := &wgtypes.Device{ + Name: "wg0", + } + peer := wgtypes.Peer{ + PublicKey: pubkey, + PersistentKeepaliveInterval: 1 * time.Minute, + LastHandshakeTime: time.Unix(100, 0), + ReceiveBytes: int64(40), + TransmitBytes: int64(60), + AllowedIPs: []net.IPNet{{}, {}}, + ProtocolVersion: 0, + } + + expectFields := map[string]interface{}{ + "persistent_keepalive_interval_ns": int64(60000000000), + "protocol_version": 0, + "allowed_ips": 2, + } + expectGauges := map[string]interface{}{ + "last_handshake_time_ns": int64(100000000000), + "rx_bytes": int64(40), + "tx_bytes": int64(60), + } + expectTags := map[string]string{ + "device": "wg0", + "public_key": pubkey.String(), + } + + wg.gatherDevicePeerMetrics(&acc, device, peer) + + assert.Equal(t, 6, acc.NFields()) + acc.AssertDoesNotContainMeasurement(t, measurementDevice) + acc.AssertContainsTaggedFields(t, measurementPeer, expectFields, expectTags) + acc.AssertContainsTaggedFields(t, measurementPeer, expectGauges, expectTags) +} From 874804a2ad0f131100a87fd4f83e803a1264f273 Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Mon, 9 Mar 2020 18:46:04 -0400 Subject: [PATCH 1587/1815] update changelog --- CHANGELOG.md | 1 + docs/LICENSE_OF_DEPENDENCIES.md | 1 + 2 files changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 86db632ec..fc4847767 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,6 +16,7 @@ - [infiniband](/plugins/inputs/infiniband/README.md) - Contributed by @willfurnell - [modbus](/plugins/inputs/modbus/README.md) - Contributed by @garciaolais - [monit](/plugins/inputs/monit/README.md) - Contributed by @SirishaGopigiri +- [wireguard](/plugins/inputs/wireguard/README.md) - Contributed by @LINKIWI #### New Processors diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index e2ba9a812..8342cc041 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -125,6 +125,7 @@ following works: - github.com/wvanbergen/kafka [MIT License](https://github.com/wvanbergen/kafka/blob/master/LICENSE) - github.com/wvanbergen/kazoo-go [MIT License](https://github.com/wvanbergen/kazoo-go/blob/master/MIT-LICENSE) - github.com/yuin/gopher-lua [MIT License](https://github.com/yuin/gopher-lua/blob/master/LICENSE) +- golang.zx2c4.com/wireguard/wgctrl [MIT License](https://github.com/WireGuard/wgctrl-go/blob/master/LICENSE.md) - go.opencensus.io [Apache License 2.0](https://github.com/census-instrumentation/opencensus-go/blob/master/LICENSE) - golang.org/x/crypto [BSD 3-Clause Clear License](https://github.com/golang/crypto/blob/master/LICENSE) - golang.org/x/net [BSD 3-Clause Clear License](https://github.com/golang/net/blob/master/LICENSE) From 595c8b607532963d507c8b1bccf7f00b57ea93dc Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Tue, 10 Mar 2020 09:47:26 -0400 Subject: [PATCH 1588/1815] update list of dependencies --- docs/LICENSE_OF_DEPENDENCIES.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index 8342cc041..7ed5e393c 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -83,6 +83,8 @@ following works: - github.com/mailru/easyjson [MIT License](https://github.com/mailru/easyjson/blob/master/LICENSE) - github.com/matttproud/golang_protobuf_extensions [Apache License 2.0](https://github.com/matttproud/golang_protobuf_extensions/blob/master/LICENSE) - github.com/mdlayher/apcupsd [MIT License](https://github.com/mdlayher/apcupsd/blob/master/LICENSE.md) +- github.com/mdlayher/genetlink [MIT License](https://github.com/mdlayher/genetlink/blob/master/LICENSE.md) +- github.com/mdlayher/netlink [MIT License](https://github.com/mdlayher/netlink/blob/master/LICENSE.md) - github.com/miekg/dns [BSD 3-Clause Clear License](https://github.com/miekg/dns/blob/master/LICENSE) - github.com/mitchellh/go-homedir [MIT License](https://github.com/mitchellh/go-homedir/blob/master/LICENSE) - github.com/mitchellh/mapstructure [MIT License](https://github.com/mitchellh/mapstructure/blob/master/LICENSE) @@ -125,7 +127,7 @@ following works: - github.com/wvanbergen/kafka [MIT License](https://github.com/wvanbergen/kafka/blob/master/LICENSE) - github.com/wvanbergen/kazoo-go [MIT License](https://github.com/wvanbergen/kazoo-go/blob/master/MIT-LICENSE) - github.com/yuin/gopher-lua [MIT License](https://github.com/yuin/gopher-lua/blob/master/LICENSE) -- golang.zx2c4.com/wireguard/wgctrl [MIT License](https://github.com/WireGuard/wgctrl-go/blob/master/LICENSE.md) +- golang.zx2c4.com/wireguard [MIT License](https://github.com/WireGuard/wgctrl-go/blob/master/LICENSE.md) - go.opencensus.io [Apache License 2.0](https://github.com/census-instrumentation/opencensus-go/blob/master/LICENSE) - golang.org/x/crypto [BSD 3-Clause Clear License](https://github.com/golang/crypto/blob/master/LICENSE) - golang.org/x/net [BSD 3-Clause Clear License](https://github.com/golang/net/blob/master/LICENSE) From a76d10a44ee2ee9372a676b93ea71aad66f3976f Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Tue, 10 Mar 2020 09:59:37 -0400 Subject: [PATCH 1589/1815] sort lines --- docs/LICENSE_OF_DEPENDENCIES.md | 3 ++- go.sum | 5 ----- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index 7ed5e393c..f844ad1a1 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -127,7 +127,6 @@ following works: - github.com/wvanbergen/kafka [MIT License](https://github.com/wvanbergen/kafka/blob/master/LICENSE) - github.com/wvanbergen/kazoo-go [MIT License](https://github.com/wvanbergen/kazoo-go/blob/master/MIT-LICENSE) - github.com/yuin/gopher-lua [MIT License](https://github.com/yuin/gopher-lua/blob/master/LICENSE) -- golang.zx2c4.com/wireguard [MIT License](https://github.com/WireGuard/wgctrl-go/blob/master/LICENSE.md) - go.opencensus.io [Apache License 2.0](https://github.com/census-instrumentation/opencensus-go/blob/master/LICENSE) - golang.org/x/crypto [BSD 3-Clause Clear License](https://github.com/golang/crypto/blob/master/LICENSE) - golang.org/x/net [BSD 3-Clause Clear License](https://github.com/golang/net/blob/master/LICENSE) @@ -135,6 +134,8 @@ following works: - golang.org/x/sync [BSD 3-Clause "New" or "Revised" License](https://github.com/golang/sync/blob/master/LICENSE) - golang.org/x/sys [BSD 3-Clause Clear License](https://github.com/golang/sys/blob/master/LICENSE) - golang.org/x/text [BSD 3-Clause Clear License](https://github.com/golang/text/blob/master/LICENSE) +- golang.zx2c4.com/wireguard [MIT License](https://github.com/WireGuard/wgctrl-go/blob/master/LICENSE.md) +- golang.zx2c4.com/wireguard/wgctrl [MIT License](https://github.com/WireGuard/wgctrl-go/blob/master/LICENSE.md) - google.golang.org/api [BSD 3-Clause "New" or "Revised" License](https://github.com/googleapis/google-api-go-client/blob/master/LICENSE) - google.golang.org/genproto [Apache License 2.0](https://github.com/google/go-genproto/blob/master/LICENSE) - google.golang.org/grpc [Apache License 2.0](https://github.com/grpc/grpc-go/blob/master/LICENSE) diff --git a/go.sum b/go.sum index b15ec7343..30009b6c2 100644 --- a/go.sum +++ b/go.sum @@ -615,12 +615,7 @@ gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81 honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -k8s.io/apimachinery v0.17.1 h1:zUjS3szTxoUjTDYNvdFkYt2uMEXLcthcbp+7uZvWhYM= -k8s.io/apimachinery v0.17.1/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= -k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= From 830a922a41ab80ade0ad71f8deddd580c02ac890 Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Tue, 10 Mar 2020 10:21:23 -0400 Subject: [PATCH 1590/1815] fix deps --- docs/LICENSE_OF_DEPENDENCIES.md | 2 ++ go.sum | 5 +++++ 2 files changed, 7 insertions(+) diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index f844ad1a1..2dae78856 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -153,6 +153,8 @@ following works: - gopkg.in/olivere/elastic.v5 [MIT License](https://github.com/olivere/elastic/blob/v5.0.76/LICENSE) - gopkg.in/tomb.v1 [BSD 3-Clause Clear License](https://github.com/go-tomb/tomb/blob/v1/LICENSE) - gopkg.in/yaml.v2 [Apache License 2.0](https://github.com/go-yaml/yaml/blob/v2.2.2/LICENSE) +- k8s.io/apimachinery [Apache License 2.0](https://github.com/kubernetes/apimachinery/blob/master/LICENSE) +- k8s.io/klog [Apache License 2.0](https://github.com/kubernetes/klog/blob/master/LICENSE) ## telegraf used and modified code from these projects - github.com/DataDog/datadog-agent [Apache License 2.0](https://github.com/DataDog/datadog-agent/LICENSE) diff --git a/go.sum b/go.sum index 30009b6c2..b15ec7343 100644 --- a/go.sum +++ b/go.sum @@ -615,7 +615,12 @@ gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81 honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +k8s.io/apimachinery v0.17.1 h1:zUjS3szTxoUjTDYNvdFkYt2uMEXLcthcbp+7uZvWhYM= +k8s.io/apimachinery v0.17.1/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= +k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= From 1ce56f8b594156386cf4e17cb0ac73da40c8af3e Mon Sep 17 00:00:00 2001 From: Harshit Bansal Date: Wed, 11 Mar 2020 00:32:50 +0530 Subject: [PATCH 1591/1815] Document port specification in postgres input plugins (#7132) --- etc/telegraf.conf | 18 +++++++++--------- plugins/inputs/pgbouncer/README.md | 6 +++--- plugins/inputs/postgresql/README.md | 6 +++--- plugins/inputs/postgresql_extensible/README.md | 8 ++++---- 4 files changed, 19 insertions(+), 19 deletions(-) diff --git a/etc/telegraf.conf b/etc/telegraf.conf index dfa9a974e..a13162b52 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -3772,7 +3772,7 @@ # # timeout = "5ms" -# # A plugin to collect stats from Opensmtpd - a validating, recursive, and caching DNS resolver +# # A plugin to collect stats from Opensmtpd - a validating, recursive, and caching DNS resolver # [[inputs.opensmtpd]] # ## If running as a restricted user you can prepend sudo for additional access: # #use_sudo = false @@ -5592,10 +5592,10 @@ # # Read metrics from one or many pgbouncer servers # [[inputs.pgbouncer]] # ## specify address via a url matching: -# ## postgres://[pqgotest[:password]]@localhost[/dbname]\ +# ## postgres://[pqgotest[:password]]@host:port[/dbname]\ # ## ?sslmode=[disable|verify-ca|verify-full] # ## or a simple string: -# ## host=localhost user=pqotest password=... sslmode=... dbname=app_production +# ## host=localhost port=5432 user=pqotest password=... sslmode=... dbname=app_production # ## # ## All connection parameters are optional. # ## @@ -5605,10 +5605,10 @@ # # Read metrics from one or many postgresql servers # [[inputs.postgresql]] # ## specify address via a url matching: -# ## postgres://[pqgotest[:password]]@localhost[/dbname]\ +# ## postgres://[pqgotest[:password]]@host:port[/dbname]\ # ## ?sslmode=[disable|verify-ca|verify-full] # ## or a simple string: -# ## host=localhost user=pqotest password=... sslmode=... dbname=app_production +# ## host=localhost port=5432 user=pqotest password=... sslmode=... dbname=app_production # ## # ## All connection parameters are optional. # ## @@ -5617,7 +5617,7 @@ # ## connection with the server and doesn't restrict the databases we are trying # ## to grab metrics for. # ## -# address = "host=localhost user=postgres sslmode=disable" +# address = "host=localhost port=5432 user=postgres sslmode=disable" # ## A custom name for the database that will be used as the "server" tag in the # ## measurement output. If not specified, a default one generated from # ## the connection address is used. @@ -5640,10 +5640,10 @@ # # Read metrics from one or many postgresql servers # [[inputs.postgresql_extensible]] # ## specify address via a url matching: -# ## postgres://[pqgotest[:password]]@localhost[/dbname]\ +# ## postgres://[pqgotest[:password]]@host:port[/dbname]\ # ## ?sslmode=[disable|verify-ca|verify-full] # ## or a simple string: -# ## host=localhost user=pqotest password=... sslmode=... dbname=app_production +# ## host=localhost port=5432 user=pqotest password=... sslmode=... dbname=app_production # # # ## All connection parameters are optional. # # ## Without the dbname parameter, the driver will default to a database @@ -5651,7 +5651,7 @@ # ## connection with the server and doesn't restrict the databases we are trying # ## to grab metrics for. # # -# address = "host=localhost user=postgres sslmode=disable" +# address = "host=localhost port=5432 user=postgres sslmode=disable" # # ## connection configuration. # ## maxlifetime - specify the maximum lifetime of a connection. diff --git a/plugins/inputs/pgbouncer/README.md b/plugins/inputs/pgbouncer/README.md index 58e3352c3..987b6a382 100644 --- a/plugins/inputs/pgbouncer/README.md +++ b/plugins/inputs/pgbouncer/README.md @@ -12,10 +12,10 @@ More information about the meaning of these metrics can be found in the ```toml [[inputs.pgbouncer]] ## specify address via a url matching: - ## postgres://[pqgotest[:password]]@localhost[/dbname]\ + ## postgres://[pqgotest[:password]]@host:port[/dbname]\ ## ?sslmode=[disable|verify-ca|verify-full] ## or a simple string: - ## host=localhost user=pqotest password=... sslmode=... dbname=app_production + ## host=localhost port=5432 user=pqotest password=... sslmode=... dbname=app_production ## ## All connection parameters are optional. ## @@ -30,7 +30,7 @@ Specify address via a postgresql connection string: Or via an url matching: - `postgres://[pqgotest[:password]]@localhost[/dbname]?sslmode=[disable|verify-ca|verify-full]` + `postgres://[pqgotest[:password]]@host:port[/dbname]?sslmode=[disable|verify-ca|verify-full]` All connection parameters are optional. diff --git a/plugins/inputs/postgresql/README.md b/plugins/inputs/postgresql/README.md index 21f9097aa..2ebc33ad6 100644 --- a/plugins/inputs/postgresql/README.md +++ b/plugins/inputs/postgresql/README.md @@ -33,11 +33,11 @@ More information about the meaning of these metrics can be found in the [Postgre ## Configuration Specify address via a postgresql connection string: - `host=/run/postgresql user=telegraf database=telegraf` - + `host=localhost port=5432 user=telegraf database=telegraf` + Or via an url matching: - `postgres://[pqgotest[:password]]@localhost[/dbname]?sslmode=[disable|verify-ca|verify-full]` + `postgres://[pqgotest[:password]]@host:port[/dbname]?sslmode=[disable|verify-ca|verify-full]` All connection parameters are optional. Without the dbname parameter, the driver will default to a database with the same name as the user. This dbname is just for instantiating a connection with the server and doesn't restrict the databases we are trying to grab metrics for. diff --git a/plugins/inputs/postgresql_extensible/README.md b/plugins/inputs/postgresql_extensible/README.md index 4621e46b5..337b13d1b 100644 --- a/plugins/inputs/postgresql_extensible/README.md +++ b/plugins/inputs/postgresql_extensible/README.md @@ -14,11 +14,11 @@ The example below has two queries are specified, with the following parameters: ``` [[inputs.postgresql_extensible]] # specify address via a url matching: - # postgres://[pqgotest[:password]]@localhost[/dbname]?sslmode=... + # postgres://[pqgotest[:password]]@host:port[/dbname]?sslmode=... # or a simple string: - # host=localhost user=pqotest password=... sslmode=... dbname=app_production + # host=localhost port=5432 user=pqotest password=... sslmode=... dbname=app_production # - # All connection parameters are optional. + # All connection parameters are optional. # Without the dbname parameter, the driver will default to a database # with the same name as the user. This dbname is just for instantiating a # connection with the server and doesn't restrict the databases we are trying @@ -45,7 +45,7 @@ The example below has two queries are specified, with the following parameters: # the where clause (aka with the dbname) # # The script option can be used to specify the .sql file path. - # If script and sqlquery options specified at same time, sqlquery will be used + # If script and sqlquery options specified at same time, sqlquery will be used # # the tagvalue field is used to define custom tags (separated by comas). # the query is expected to return columns which match the names of the From 134af0fe4b535e0b3ce8275da6048a4b731f1aae Mon Sep 17 00:00:00 2001 From: Harshit Bansal Date: Wed, 11 Mar 2020 00:37:42 +0530 Subject: [PATCH 1592/1815] Fix log rotation to use actual file size instead of bytes written (#7133) --- internal/rotate/file_writer.go | 5 +++++ internal/rotate/file_writer_test.go | 33 +++++++++++++++++++++++++++++ 2 files changed, 38 insertions(+) diff --git a/internal/rotate/file_writer.go b/internal/rotate/file_writer.go index 03f837f93..a167b7cb7 100644 --- a/internal/rotate/file_writer.go +++ b/internal/rotate/file_writer.go @@ -120,6 +120,11 @@ func (w *FileWriter) openCurrent() (err error) { // With time.now() as a reference we'd never rotate the file. if fileInfo, err := w.current.Stat(); err == nil { w.expireTime = fileInfo.ModTime().Add(w.interval) + w.bytesWritten = fileInfo.Size() + } + + if err = w.rotateIfNeeded(); err != nil { + return err } return nil } diff --git a/internal/rotate/file_writer_test.go b/internal/rotate/file_writer_test.go index 88ba94b9d..ca29b9a2f 100644 --- a/internal/rotate/file_writer_test.go +++ b/internal/rotate/file_writer_test.go @@ -43,6 +43,22 @@ func TestFileWriter_TimeRotation(t *testing.T) { assert.Equal(t, 2, len(files)) } +func TestFileWriter_ReopenTimeRotation(t *testing.T) { + tempDir, err := ioutil.TempDir("", "RotationTime") + require.NoError(t, err) + interval, _ := time.ParseDuration("1s") + filePath := filepath.Join(tempDir, "test.log") + err = ioutil.WriteFile(filePath, []byte("Hello World"), 0644) + time.Sleep(1 * time.Second) + assert.NoError(t, err) + writer, err := NewFileWriter(filepath.Join(tempDir, "test.log"), interval, 0, -1) + require.NoError(t, err) + defer func() { writer.Close(); os.RemoveAll(tempDir) }() + + files, _ := ioutil.ReadDir(tempDir) + assert.Equal(t, 2, len(files)) +} + func TestFileWriter_SizeRotation(t *testing.T) { tempDir, err := ioutil.TempDir("", "RotationSize") require.NoError(t, err) @@ -59,6 +75,23 @@ func TestFileWriter_SizeRotation(t *testing.T) { assert.Equal(t, 2, len(files)) } +func TestFileWriter_ReopenSizeRotation(t *testing.T) { + tempDir, err := ioutil.TempDir("", "RotationSize") + require.NoError(t, err) + maxSize := int64(12) + filePath := filepath.Join(tempDir, "test.log") + err = ioutil.WriteFile(filePath, []byte("Hello World"), 0644) + assert.NoError(t, err) + writer, err := NewFileWriter(filepath.Join(tempDir, "test.log"), 0, maxSize, -1) + require.NoError(t, err) + defer func() { writer.Close(); os.RemoveAll(tempDir) }() + + _, err = writer.Write([]byte("Hello World Again")) + require.NoError(t, err) + files, _ := ioutil.ReadDir(tempDir) + assert.Equal(t, 2, len(files)) +} + func TestFileWriter_DeleteArchives(t *testing.T) { tempDir, err := ioutil.TempDir("", "RotationDeleteArchives") require.NoError(t, err) From a9d1726f99833774e63178d6c502b09e7ab845a9 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 10 Mar 2020 12:09:19 -0700 Subject: [PATCH 1593/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index fc4847767..98c013f64 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -67,6 +67,7 @@ - [#7005](https://github.com/influxdata/telegraf/pull/7005): Search for chronyc only when chrony input plugin is enabled. - [#2280](https://github.com/influxdata/telegraf/issues/2280): Fix request to InfluxDB Listener failing with EOF. - [#6124](https://github.com/influxdata/telegraf/issues/6124): Fix InfluxDB listener to continue parsing after error. +- [#7133](https://github.com/influxdata/telegraf/issues/7133): Fix log rotation to use actual file size instead of bytes written. ## v1.13.4 [2020-02-25] From 1601a06915963fe4c6922c0d0183fec8cf37d965 Mon Sep 17 00:00:00 2001 From: Giovanni Luisotto Date: Tue, 10 Mar 2020 19:44:54 +0000 Subject: [PATCH 1594/1815] Fix several issues with DatabaseIO query in sqlserver input (#7103) --- plugins/inputs/sqlserver/sqlserver.go | 103 ++++++++++++++++---------- 1 file changed, 62 insertions(+), 41 deletions(-) diff --git a/plugins/inputs/sqlserver/sqlserver.go b/plugins/inputs/sqlserver/sqlserver.go index d09a9743d..8b22be6ed 100644 --- a/plugins/inputs/sqlserver/sqlserver.go +++ b/plugins/inputs/sqlserver/sqlserver.go @@ -352,53 +352,74 @@ EXEC(@SQL) // EngineEdition=5 is Azure SQL DB const sqlDatabaseIOV2 = ` SET DEADLOCK_PRIORITY -10; +DECLARE @SqlStatement AS nvarchar(max); IF SERVERPROPERTY('EngineEdition') = 5 BEGIN -SELECT -'sqlserver_database_io' As [measurement], -REPLACE(@@SERVERNAME,'\',':') AS [sql_instance], -DB_NAME([vfs].[database_id]) AS [database_name], -vfs.io_stall_read_ms AS read_latency_ms, -vfs.num_of_reads AS reads, -vfs.num_of_bytes_read AS read_bytes, -vfs.io_stall_write_ms AS write_latency_ms, -vfs.num_of_writes AS writes, -vfs.num_of_bytes_written AS write_bytes, -vfs.io_stall_queued_read_ms as rg_read_stall_ms, -vfs.io_stall_queued_write_ms as rg_write_stall_ms, -ISNULL(b.name ,'RBPEX') as logical_filename, -ISNULL(b.physical_name, 'RBPEX') as physical_filename, -CASE WHEN vfs.file_id = 2 THEN 'LOG'ELSE 'DATA' END AS file_type -,ISNULL(size,0)/128 AS current_size_mb -,ISNULL(FILEPROPERTY(b.name,'SpaceUsed')/128,0) as space_used_mb -FROM -[sys].[dm_io_virtual_file_stats](NULL,NULL) AS vfs -LEFT OUTER join sys.database_files b on b.file_id = vfs.file_id + SET @SqlStatement = ' + SELECT + ''sqlserver_database_io'' As [measurement] + ,REPLACE(@@SERVERNAME,''\'','':'') AS [sql_instance] + ,DB_NAME([vfs].[database_id]) AS [database_name] + ,vfs.io_stall_read_ms AS read_latency_ms + ,vfs.num_of_reads AS reads + ,vfs.num_of_bytes_read AS read_bytes + ,vfs.io_stall_write_ms AS write_latency_ms + ,vfs.num_of_writes AS writes + ,vfs.num_of_bytes_written AS write_bytes + ,vfs.io_stall_queued_read_ms as rg_read_stall_ms + ,ISNULL(b.name ,''RBPEX'') as logical_filename + ,ISNULL(b.physical_name, ''RBPEX'') as physical_filename + ,CASE WHEN vfs.file_id = 2 THEN ''LOG'' ELSE ''DATA'' END AS file_type + ,ISNULL(size,0)/128 AS current_size_mb + ,ISNULL(FILEPROPERTY(b.name,''SpaceUsed'')/128,0) as space_used_mb + ,vfs.io_stall_queued_read_ms AS [rg_read_stall_ms] + ,vfs.io_stall_queued_write_ms AS [rg_write_stall_ms] + FROM [sys].[dm_io_virtual_file_stats](NULL,NULL) AS vfs + LEFT OUTER join sys.database_files b + ON b.file_id = vfs.file_id + ' + EXEC sp_executesql @SqlStatement + END ELSE BEGIN -SELECT -'sqlserver_database_io' As [measurement], -REPLACE(@@SERVERNAME,'\',':') AS [sql_instance], -DB_NAME([vfs].[database_id]) [database_name], -vfs.io_stall_read_ms AS read_latency_ms, -vfs.num_of_reads AS reads, -vfs.num_of_bytes_read AS read_bytes, -vfs.io_stall_write_ms AS write_latency_ms, -vfs.num_of_writes AS writes, -vfs.num_of_bytes_written AS write_bytes, -vfs.io_stall_queued_read_ms as rg_read_stall_ms, -vfs.io_stall_queued_write_ms as rg_write_stall_ms, -ISNULL(b.name ,'RBPEX') as logical_filename, -ISNULL(b.physical_name, 'RBPEX') as physical_filename, -CASE WHEN vfs.file_id = 2 THEN 'LOG' ELSE 'DATA' END AS file_type -,ISNULL(size,0)/128 AS current_size_mb --- can't easily get space used without switching context to each DB for MI/On-prem making query expensive -, -1 as space_used_mb -FROM -[sys].[dm_io_virtual_file_stats](NULL,NULL) AS vfs -LEFT OUTER join sys.master_files b on b.database_id = vfs.database_id and b.file_id = vfs.file_id + + SET @SqlStatement = N' + SELECT + ''sqlserver_database_io'' AS [measurement] + ,REPLACE(@@SERVERNAME,''\'','':'') AS [sql_instance] + ,DB_NAME(vfs.[database_id]) AS [database_name] + ,COALESCE(mf.[physical_name],''RBPEX'') AS [physical_filename] --RPBEX = Resilient Buffer Pool Extension + ,COALESCE(mf.[name],''RBPEX'') AS [logical_filename] --RPBEX = Resilient Buffer Pool Extension + ,mf.[type_desc] AS [file_type] + ,IIF( RIGHT(vs.[volume_mount_point],1) = ''\'' /*Tag value cannot end with \ */ + ,LEFT(vs.[volume_mount_point],LEN(vs.[volume_mount_point])-1) + ,vs.[volume_mount_point] + ) AS [volume_mount_point] + ,vfs.[io_stall_read_ms] AS [read_latency_ms] + ,vfs.[num_of_reads] AS [reads] + ,vfs.[num_of_bytes_read] AS [read_bytes] + ,vfs.[io_stall_write_ms] AS [write_latency_ms] + ,vfs.[num_of_writes] AS [writes] + ,vfs.[num_of_bytes_written] AS [write_bytes] + ' + + + CASE + WHEN LEFT(CAST(SERVERPROPERTY('ProductVersion') AS nvarchar) ,2) = '11' + /*SQL Server 2012 (ver 11.x) does not have [io_stall_queued_read_ms] and [io_stall_queued_write_ms]*/ + THEN '' + ELSE N',vfs.io_stall_queued_read_ms AS [rg_read_stall_ms] ,vfs.io_stall_queued_write_ms AS [rg_write_stall_ms]' + END + + + N'FROM sys.dm_io_virtual_file_stats(NULL, NULL) AS vfs + INNER JOIN sys.master_files AS mf WITH (NOLOCK) + ON vfs.[database_id] = mf.[database_id] AND vfs.[file_id] = mf.[file_id] + CROSS APPLY sys.dm_os_volume_stats(vfs.[database_id], vfs.[file_id]) AS vs + ' + EXEC sp_executesql @SqlStatement + END + ` // Conditional check based on Azure SQL DB, Azure SQL Managed instance OR On-prem SQL Server From 41f39f5f5920810c39cd7f5a57552bd8490c24fd Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 10 Mar 2020 12:47:26 -0700 Subject: [PATCH 1595/1815] Update changelog --- CHANGELOG.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 98c013f64..5b0296821 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -58,7 +58,7 @@ - [#7071](https://github.com/influxdata/telegraf/pull/7071): Add non-cumulative histogram to histogram aggregator. - [#6969](https://github.com/influxdata/telegraf/pull/6969): Add label and field selectors to prometheus input k8s discovery. - [#7049](https://github.com/influxdata/telegraf/pull/7049): Add support for converting tag or field to measurement in converter processor. - +- [#7103](https://github.com/influxdata/telegraf/pull/7103): Add volume_mount_point to DatabaseIO query in sqlserver input. #### Bugfixes @@ -68,6 +68,7 @@ - [#2280](https://github.com/influxdata/telegraf/issues/2280): Fix request to InfluxDB Listener failing with EOF. - [#6124](https://github.com/influxdata/telegraf/issues/6124): Fix InfluxDB listener to continue parsing after error. - [#7133](https://github.com/influxdata/telegraf/issues/7133): Fix log rotation to use actual file size instead of bytes written. +- [#7103](https://github.com/influxdata/telegraf/pull/7103): Fix several issues with DatabaseIO query in sqlserver input. ## v1.13.4 [2020-02-25] From b6de4da41f23c527fed16cdba1b1585a95f1c456 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 10 Mar 2020 13:38:26 -0700 Subject: [PATCH 1596/1815] Add topic tag options to kafka output (#7142) --- .../inputs/kafka_consumer/kafka_consumer.go | 4 + plugins/outputs/kafka/README.md | 7 + plugins/outputs/kafka/kafka.go | 57 +++++-- plugins/outputs/kafka/kafka_test.go | 146 +++++++++++++++++- 4 files changed, 199 insertions(+), 15 deletions(-) diff --git a/plugins/inputs/kafka_consumer/kafka_consumer.go b/plugins/inputs/kafka_consumer/kafka_consumer.go index 5cd6a9771..952f50d99 100644 --- a/plugins/inputs/kafka_consumer/kafka_consumer.go +++ b/plugins/inputs/kafka_consumer/kafka_consumer.go @@ -6,9 +6,11 @@ import ( "log" "strings" "sync" + "time" "github.com/Shopify/sarama" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/internal/tls" "github.com/influxdata/telegraf/plugins/common/kafka" "github.com/influxdata/telegraf/plugins/inputs" @@ -83,6 +85,7 @@ const ( defaultMaxUndeliveredMessages = 1000 defaultMaxMessageLen = 1000000 defaultConsumerGroup = "telegraf_metrics_consumers" + reconnectDelay = 5 * time.Second ) type empty struct{} @@ -259,6 +262,7 @@ func (k *KafkaConsumer) Start(acc telegraf.Accumulator) error { err := k.consumer.Consume(ctx, k.Topics, handler) if err != nil { acc.AddError(err) + internal.SleepContext(ctx, reconnectDelay) } } err = k.consumer.Close() diff --git a/plugins/outputs/kafka/README.md b/plugins/outputs/kafka/README.md index 7ced4c5c6..d1cc9f0cb 100644 --- a/plugins/outputs/kafka/README.md +++ b/plugins/outputs/kafka/README.md @@ -10,6 +10,13 @@ This plugin writes to a [Kafka Broker](http://kafka.apache.org/07/quickstart.htm ## Kafka topic for producer messages topic = "telegraf" + ## The value of this tag will be used as the topic. If not set the 'topic' + ## option is used. + # topic_tag = "" + + ## If true, the 'topic_tag' will be removed from to the metric. + # exclude_topic_tag = false + ## Optional Client id # client_id = "Telegraf" diff --git a/plugins/outputs/kafka/kafka.go b/plugins/outputs/kafka/kafka.go index a877b334b..406febc28 100644 --- a/plugins/outputs/kafka/kafka.go +++ b/plugins/outputs/kafka/kafka.go @@ -26,16 +26,18 @@ var zeroTime = time.Unix(0, 0) type ( Kafka struct { - Brokers []string - Topic string + Brokers []string `toml:"brokers"` + Topic string `toml:"topic"` + TopicTag string `toml:"topic_tag"` + ExcludeTopicTag bool `toml:"exclude_topic_tag"` ClientID string `toml:"client_id"` TopicSuffix TopicSuffix `toml:"topic_suffix"` RoutingTag string `toml:"routing_tag"` RoutingKey string `toml:"routing_key"` - CompressionCodec int - RequiredAcks int - MaxRetry int - MaxMessageBytes int `toml:"max_message_bytes"` + CompressionCodec int `toml:"compression_codec"` + RequiredAcks int `toml:"required_acks"` + MaxRetry int `toml:"max_retry"` + MaxMessageBytes int `toml:"max_message_bytes"` Version string `toml:"version"` @@ -57,7 +59,9 @@ type ( Log telegraf.Logger `toml:"-"` tlsConfig tls.Config - producer sarama.SyncProducer + + producerFunc func(addrs []string, config *sarama.Config) (sarama.SyncProducer, error) + producer sarama.SyncProducer serializer serializers.Serializer } @@ -94,6 +98,13 @@ var sampleConfig = ` ## Kafka topic for producer messages topic = "telegraf" + ## The value of this tag will be used as the topic. If not set the 'topic' + ## option is used. + # topic_tag = "" + + ## If true, the 'topic_tag' will be removed from to the metric. + # exclude_topic_tag = false + ## Optional Client id # client_id = "Telegraf" @@ -212,14 +223,29 @@ func ValidateTopicSuffixMethod(method string) error { return fmt.Errorf("Unknown topic suffix method provided: %s", method) } -func (k *Kafka) GetTopicName(metric telegraf.Metric) string { +func (k *Kafka) GetTopicName(metric telegraf.Metric) (telegraf.Metric, string) { + topic := k.Topic + if k.TopicTag != "" { + if t, ok := metric.GetTag(k.TopicTag); ok { + topic = t + + // If excluding the topic tag, a copy is required to avoid modifying + // the metric buffer. + if k.ExcludeTopicTag { + metric = metric.Copy() + metric.Accept() + metric.RemoveTag(k.TopicTag) + } + } + } + var topicName string switch k.TopicSuffix.Method { case "measurement": - topicName = k.Topic + k.TopicSuffix.Separator + metric.Name() + topicName = topic + k.TopicSuffix.Separator + metric.Name() case "tags": var topicNameComponents []string - topicNameComponents = append(topicNameComponents, k.Topic) + topicNameComponents = append(topicNameComponents, topic) for _, tag := range k.TopicSuffix.Keys { tagValue := metric.Tags()[tag] if tagValue != "" { @@ -228,9 +254,9 @@ func (k *Kafka) GetTopicName(metric telegraf.Metric) string { } topicName = strings.Join(topicNameComponents, k.TopicSuffix.Separator) default: - topicName = k.Topic + topicName = topic } - return topicName + return metric, topicName } func (k *Kafka) SetSerializer(serializer serializers.Serializer) { @@ -306,7 +332,7 @@ func (k *Kafka) Connect() error { config.Net.SASL.Version = version } - producer, err := sarama.NewSyncProducer(k.Brokers, config) + producer, err := k.producerFunc(k.Brokers, config) if err != nil { return err } @@ -348,6 +374,8 @@ func (k *Kafka) routingKey(metric telegraf.Metric) (string, error) { func (k *Kafka) Write(metrics []telegraf.Metric) error { msgs := make([]*sarama.ProducerMessage, 0, len(metrics)) for _, metric := range metrics { + metric, topic := k.GetTopicName(metric) + buf, err := k.serializer.Serialize(metric) if err != nil { k.Log.Debugf("Could not serialize metric: %v", err) @@ -355,7 +383,7 @@ func (k *Kafka) Write(metrics []telegraf.Metric) error { } m := &sarama.ProducerMessage{ - Topic: k.GetTopicName(metric), + Topic: topic, Value: sarama.ByteEncoder(buf), } @@ -403,6 +431,7 @@ func init() { return &Kafka{ MaxRetry: 3, RequiredAcks: -1, + producerFunc: sarama.NewSyncProducer, } }) } diff --git a/plugins/outputs/kafka/kafka_test.go b/plugins/outputs/kafka/kafka_test.go index bac51c28d..070eea3f9 100644 --- a/plugins/outputs/kafka/kafka_test.go +++ b/plugins/outputs/kafka/kafka_test.go @@ -4,6 +4,7 @@ import ( "testing" "time" + "github.com/Shopify/sarama" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/plugins/serializers" @@ -81,7 +82,7 @@ func TestTopicSuffixes(t *testing.T) { TopicSuffix: topicSuffix, } - topic := k.GetTopicName(metric) + _, topic := k.GetTopicName(metric) require.Equal(t, expectedTopic, topic) } } @@ -156,3 +157,146 @@ func TestRoutingKey(t *testing.T) { }) } } + +type MockProducer struct { + sent []*sarama.ProducerMessage +} + +func (p *MockProducer) SendMessage(msg *sarama.ProducerMessage) (partition int32, offset int64, err error) { + p.sent = append(p.sent, msg) + return 0, 0, nil +} + +func (p *MockProducer) SendMessages(msgs []*sarama.ProducerMessage) error { + p.sent = append(p.sent, msgs...) + return nil +} + +func (p *MockProducer) Close() error { + return nil +} + +func NewMockProducer(addrs []string, config *sarama.Config) (sarama.SyncProducer, error) { + return &MockProducer{}, nil +} + +func TestTopicTag(t *testing.T) { + tests := []struct { + name string + plugin *Kafka + input []telegraf.Metric + topic string + value string + }{ + { + name: "static topic", + plugin: &Kafka{ + Brokers: []string{"127.0.0.1"}, + Topic: "telegraf", + producerFunc: NewMockProducer, + }, + input: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": 42.0, + }, + time.Unix(0, 0), + ), + }, + topic: "telegraf", + value: "cpu time_idle=42 0\n", + }, + { + name: "topic tag overrides static topic", + plugin: &Kafka{ + Brokers: []string{"127.0.0.1"}, + Topic: "telegraf", + TopicTag: "topic", + producerFunc: NewMockProducer, + }, + input: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{ + "topic": "xyzzy", + }, + map[string]interface{}{ + "time_idle": 42.0, + }, + time.Unix(0, 0), + ), + }, + topic: "xyzzy", + value: "cpu,topic=xyzzy time_idle=42 0\n", + }, + { + name: "missing topic tag falls back to static topic", + plugin: &Kafka{ + Brokers: []string{"127.0.0.1"}, + Topic: "telegraf", + TopicTag: "topic", + producerFunc: NewMockProducer, + }, + input: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": 42.0, + }, + time.Unix(0, 0), + ), + }, + topic: "telegraf", + value: "cpu time_idle=42 0\n", + }, + { + name: "exclude topic tag removes tag", + plugin: &Kafka{ + Brokers: []string{"127.0.0.1"}, + Topic: "telegraf", + TopicTag: "topic", + ExcludeTopicTag: true, + producerFunc: NewMockProducer, + }, + input: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{ + "topic": "xyzzy", + }, + map[string]interface{}{ + "time_idle": 42.0, + }, + time.Unix(0, 0), + ), + }, + topic: "xyzzy", + value: "cpu time_idle=42 0\n", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + s, err := serializers.NewInfluxSerializer() + require.NoError(t, err) + tt.plugin.SetSerializer(s) + + err = tt.plugin.Connect() + require.NoError(t, err) + + producer := &MockProducer{} + tt.plugin.producer = producer + + err = tt.plugin.Write(tt.input) + require.NoError(t, err) + + require.Equal(t, tt.topic, producer.sent[0].Topic) + + encoded, err := producer.sent[0].Value.Encode() + require.NoError(t, err) + require.Equal(t, tt.value, string(encoded)) + }) + } +} From 755ff25502277f20db5b3944367318abae1df15c Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 10 Mar 2020 13:44:09 -0700 Subject: [PATCH 1597/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5b0296821..57107feb2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -59,6 +59,7 @@ - [#6969](https://github.com/influxdata/telegraf/pull/6969): Add label and field selectors to prometheus input k8s discovery. - [#7049](https://github.com/influxdata/telegraf/pull/7049): Add support for converting tag or field to measurement in converter processor. - [#7103](https://github.com/influxdata/telegraf/pull/7103): Add volume_mount_point to DatabaseIO query in sqlserver input. +- [#7142](https://github.com/influxdata/telegraf/pull/7142): Add topic tag options to kafka output. #### Bugfixes From c31ba94bb859eb451e02f674eeaf5bfac722f9eb Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Tue, 10 Mar 2020 16:46:36 -0400 Subject: [PATCH 1598/1815] modbus to support scaling int32 and float32-ieee (#7148) --- plugins/inputs/modbus/modbus.go | 25 ++++++++++++++++--------- plugins/inputs/modbus/modbus_test.go | 20 ++++++++++++++++++++ 2 files changed, 36 insertions(+), 9 deletions(-) diff --git a/plugins/inputs/modbus/modbus.go b/plugins/inputs/modbus/modbus.go index d2e913039..e96b2f117 100644 --- a/plugins/inputs/modbus/modbus.go +++ b/plugins/inputs/modbus/modbus.go @@ -468,27 +468,26 @@ func convertDataType(t fieldContainer, bytes []byte) interface{} { switch t.DataType { case "UINT16": e16 := convertEndianness16(t.ByteOrder, bytes) - f16 := format16(t.DataType, e16).(uint16) - return scaleUint16(t.Scale, f16) + return scaleUint16(t.Scale, e16) case "INT16": e16 := convertEndianness16(t.ByteOrder, bytes) - f16 := format16(t.DataType, e16).(int16) + f16 := int16(e16) return scaleInt16(t.Scale, f16) case "UINT32": e32 := convertEndianness32(t.ByteOrder, bytes) - f32 := format32(t.DataType, e32).(uint32) - return scaleUint32(t.Scale, f32) + return scaleUint32(t.Scale, e32) case "INT32": e32 := convertEndianness32(t.ByteOrder, bytes) - return format32(t.DataType, e32) + f32 := int32(e32) + return scaleInt32(t.Scale, f32) case "FLOAT32-IEEE": e32 := convertEndianness32(t.ByteOrder, bytes) - return format32(t.DataType, e32) + f32 := math.Float32frombits(e32) + return scaleFloat32(t.Scale, f32) case "FLOAT32": if len(bytes) == 2 { e16 := convertEndianness16(t.ByteOrder, bytes) - f16 := format16(t.DataType, e16).(uint16) - return scale16toFloat32(t.Scale, f16) + return scale16toFloat32(t.Scale, e16) } else { e32 := convertEndianness32(t.ByteOrder, bytes) return scale32toFloat32(t.Scale, e32) @@ -568,6 +567,14 @@ func scaleUint32(s float64, v uint32) uint32 { return uint32(float64(v) * float64(s)) } +func scaleInt32(s float64, v int32) int32 { + return int32(float64(v) * float64(s)) +} + +func scaleFloat32(s float64, v float32) float32 { + return float32(float64(v) * s) +} + // Gather implements the telegraf plugin interface method for data accumulation func (m *Modbus) Gather(acc telegraf.Accumulator) error { if !m.isConnected { diff --git a/plugins/inputs/modbus/modbus_test.go b/plugins/inputs/modbus/modbus_test.go index 3317067a8..e346ece17 100644 --- a/plugins/inputs/modbus/modbus_test.go +++ b/plugins/inputs/modbus/modbus_test.go @@ -239,6 +239,16 @@ func TestHoldingRegisters(t *testing.T) { write: []byte{0xAB, 0xCD}, read: int16(-12885), }, + { + name: "register50_register51_abcd_int32_scaled", + address: []uint16{50, 51}, + quantity: 2, + byteOrder: "ABCD", + dataType: "INT32", + scale: 10, + write: []byte{0x00, 0x00, 0xAB, 0xCD}, + read: int32(439810), + }, { name: "register50_register51_abcd_int32", address: []uint16{50, 51}, @@ -329,6 +339,16 @@ func TestHoldingRegisters(t *testing.T) { write: []byte{0xAA, 0xBB, 0xCC, 0xDD}, read: float32(-3.3360025e-13), }, + { + name: "register130_register131_abcd_float32_ieee_scaled", + address: []uint16{130, 131}, + quantity: 2, + byteOrder: "ABCD", + dataType: "FLOAT32-IEEE", + scale: 10, + write: []byte{0xAA, 0xBB, 0xCC, 0xDD}, + read: float32(-3.3360025e-12), + }, } serv := mbserver.NewServer() From 3bc53558a418efb78ca27391cea07f2a86653bca Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Tue, 10 Mar 2020 16:48:30 -0400 Subject: [PATCH 1599/1815] fix concurrency panic in ntpq input. Closes #7101 (#7143) --- plugins/inputs/ntpq/ntpq.go | 79 ++++++++++++++----------- plugins/inputs/ntpq/ntpq_test.go | 99 +++++++++----------------------- 2 files changed, 71 insertions(+), 107 deletions(-) diff --git a/plugins/inputs/ntpq/ntpq.go b/plugins/inputs/ntpq/ntpq.go index d7b1b4f5b..80b5dcd0f 100644 --- a/plugins/inputs/ntpq/ntpq.go +++ b/plugins/inputs/ntpq/ntpq.go @@ -21,30 +21,11 @@ var tagHeaders map[string]string = map[string]string{ "t": "type", } -// Mapping of the ntpq tag key to the index in the command output -var tagI map[string]int = map[string]int{ - "remote": -1, - "refid": -1, - "stratum": -1, - "type": -1, -} - -// Mapping of float metrics to their index in the command output -var floatI map[string]int = map[string]int{ - "delay": -1, - "offset": -1, - "jitter": -1, -} - -// Mapping of int metrics to their index in the command output -var intI map[string]int = map[string]int{ - "when": -1, - "poll": -1, - "reach": -1, -} - type NTPQ struct { - runQ func() ([]byte, error) + runQ func() ([]byte, error) + tagI map[string]int + floatI map[string]int + intI map[string]int DNSLookup bool `toml:"dns_lookup"` } @@ -101,19 +82,19 @@ func (n *NTPQ) Gather(acc telegraf.Accumulator) error { for i, field := range fields { // Check if field is a tag: if tagKey, ok := tagHeaders[field]; ok { - tagI[tagKey] = i + n.tagI[tagKey] = i continue } // check if field is a float metric: - if _, ok := floatI[field]; ok { - floatI[field] = i + if _, ok := n.floatI[field]; ok { + n.floatI[field] = i continue } // check if field is an int metric: - if _, ok := intI[field]; ok { - intI[field] = i + if _, ok := n.intI[field]; ok { + n.intI[field] = i continue } } @@ -125,7 +106,7 @@ func (n *NTPQ) Gather(acc telegraf.Accumulator) error { mFields := make(map[string]interface{}) // Get tags from output - for key, index := range tagI { + for key, index := range n.tagI { if index == -1 { continue } @@ -133,7 +114,7 @@ func (n *NTPQ) Gather(acc telegraf.Accumulator) error { } // Get integer metrics from output - for key, index := range intI { + for key, index := range n.intI { if index == -1 || index >= len(fields) { continue } @@ -183,7 +164,7 @@ func (n *NTPQ) Gather(acc telegraf.Accumulator) error { } // get float metrics from output - for key, index := range floatI { + for key, index := range n.floatI { if index == -1 || index >= len(fields) { continue } @@ -223,10 +204,40 @@ func (n *NTPQ) runq() ([]byte, error) { return cmd.Output() } +func newNTPQ() *NTPQ { + // Mapping of the ntpq tag key to the index in the command output + tagI := map[string]int{ + "remote": -1, + "refid": -1, + "stratum": -1, + "type": -1, + } + + // Mapping of float metrics to their index in the command output + floatI := map[string]int{ + "delay": -1, + "offset": -1, + "jitter": -1, + } + + // Mapping of int metrics to their index in the command output + intI := map[string]int{ + "when": -1, + "poll": -1, + "reach": -1, + } + + n := &NTPQ{ + tagI: tagI, + floatI: floatI, + intI: intI, + } + n.runQ = n.runq + return n +} + func init() { inputs.Add("ntpq", func() telegraf.Input { - n := &NTPQ{} - n.runQ = n.runq - return n + return newNTPQ() }) } diff --git a/plugins/inputs/ntpq/ntpq_test.go b/plugins/inputs/ntpq/ntpq_test.go index 016a9e5bd..b0db77e45 100644 --- a/plugins/inputs/ntpq/ntpq_test.go +++ b/plugins/inputs/ntpq/ntpq_test.go @@ -16,9 +16,8 @@ func TestSingleNTPQ(t *testing.T) { ret: []byte(singleNTPQ), err: nil, } - n := &NTPQ{ - runQ: tt.runqTest, - } + n := newNTPQ() + n.runQ = tt.runqTest acc := testutil.Accumulator{} assert.NoError(t, acc.GatherError(n.Gather)) @@ -46,9 +45,8 @@ func TestBadIntNTPQ(t *testing.T) { ret: []byte(badIntParseNTPQ), err: nil, } - n := &NTPQ{ - runQ: tt.runqTest, - } + n := newNTPQ() + n.runQ = tt.runqTest acc := testutil.Accumulator{} assert.Error(t, acc.GatherError(n.Gather)) @@ -75,9 +73,8 @@ func TestBadFloatNTPQ(t *testing.T) { ret: []byte(badFloatParseNTPQ), err: nil, } - n := &NTPQ{ - runQ: tt.runqTest, - } + n := newNTPQ() + n.runQ = tt.runqTest acc := testutil.Accumulator{} assert.Error(t, acc.GatherError(n.Gather)) @@ -104,9 +101,8 @@ func TestDaysNTPQ(t *testing.T) { ret: []byte(whenDaysNTPQ), err: nil, } - n := &NTPQ{ - runQ: tt.runqTest, - } + n := newNTPQ() + n.runQ = tt.runqTest acc := testutil.Accumulator{} assert.NoError(t, acc.GatherError(n.Gather)) @@ -134,9 +130,8 @@ func TestHoursNTPQ(t *testing.T) { ret: []byte(whenHoursNTPQ), err: nil, } - n := &NTPQ{ - runQ: tt.runqTest, - } + n := newNTPQ() + n.runQ = tt.runqTest acc := testutil.Accumulator{} assert.NoError(t, acc.GatherError(n.Gather)) @@ -164,9 +159,8 @@ func TestMinutesNTPQ(t *testing.T) { ret: []byte(whenMinutesNTPQ), err: nil, } - n := &NTPQ{ - runQ: tt.runqTest, - } + n := newNTPQ() + n.runQ = tt.runqTest acc := testutil.Accumulator{} assert.NoError(t, acc.GatherError(n.Gather)) @@ -194,9 +188,8 @@ func TestBadWhenNTPQ(t *testing.T) { ret: []byte(whenBadNTPQ), err: nil, } - n := &NTPQ{ - runQ: tt.runqTest, - } + n := newNTPQ() + n.runQ = tt.runqTest acc := testutil.Accumulator{} assert.Error(t, acc.GatherError(n.Gather)) @@ -226,9 +219,8 @@ func TestParserNTPQ(t *testing.T) { err: nil, } - n := &NTPQ{ - runQ: tt.runqTest, - } + n := newNTPQ() + n.runQ = tt.runqTest acc := testutil.Accumulator{} assert.NoError(t, acc.GatherError(n.Gather)) @@ -289,9 +281,8 @@ func TestMultiNTPQ(t *testing.T) { ret: []byte(multiNTPQ), err: nil, } - n := &NTPQ{ - runQ: tt.runqTest, - } + n := newNTPQ() + n.runQ = tt.runqTest acc := testutil.Accumulator{} assert.NoError(t, acc.GatherError(n.Gather)) @@ -330,14 +321,12 @@ func TestMultiNTPQ(t *testing.T) { } func TestBadHeaderNTPQ(t *testing.T) { - resetVars() tt := tester{ ret: []byte(badHeaderNTPQ), err: nil, } - n := &NTPQ{ - runQ: tt.runqTest, - } + n := newNTPQ() + n.runQ = tt.runqTest acc := testutil.Accumulator{} assert.NoError(t, acc.GatherError(n.Gather)) @@ -360,14 +349,12 @@ func TestBadHeaderNTPQ(t *testing.T) { } func TestMissingDelayColumnNTPQ(t *testing.T) { - resetVars() tt := tester{ ret: []byte(missingDelayNTPQ), err: nil, } - n := &NTPQ{ - runQ: tt.runqTest, - } + n := newNTPQ() + n.runQ = tt.runqTest acc := testutil.Accumulator{} assert.NoError(t, acc.GatherError(n.Gather)) @@ -393,9 +380,8 @@ func TestFailedNTPQ(t *testing.T) { ret: []byte(singleNTPQ), err: fmt.Errorf("Test failure"), } - n := &NTPQ{ - runQ: tt.runqTest, - } + n := newNTPQ() + n.runQ = tt.runqTest acc := testutil.Accumulator{} assert.Error(t, acc.GatherError(n.Gather)) @@ -445,9 +431,8 @@ func TestNoRefID(t *testing.T) { ret: []byte(noRefID), err: nil, } - n := &NTPQ{ - runQ: tt.runqTest, - } + n := newNTPQ() + n.runQ = tt.runqTest acc := testutil.Accumulator{ TimeFunc: func() time.Time { return now }, @@ -466,38 +451,6 @@ func (t *tester) runqTest() ([]byte, error) { return t.ret, t.err } -func resetVars() { - // Mapping of ntpq header names to tag keys - tagHeaders = map[string]string{ - "remote": "remote", - "refid": "refid", - "st": "stratum", - "t": "type", - } - - // Mapping of the ntpq tag key to the index in the command output - tagI = map[string]int{ - "remote": -1, - "refid": -1, - "stratum": -1, - "type": -1, - } - - // Mapping of float metrics to their index in the command output - floatI = map[string]int{ - "delay": -1, - "offset": -1, - "jitter": -1, - } - - // Mapping of int metrics to their index in the command output - intI = map[string]int{ - "when": -1, - "poll": -1, - "reach": -1, - } -} - var singleNTPQ = ` remote refid st t when poll reach delay offset jitter ============================================================================== *uschi5-ntp-002. 10.177.80.46 2 u 101 256 37 51.016 233.010 17.462 From 64fecfa19e22d4156ac6cc9e33472db4ae497d83 Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Tue, 10 Mar 2020 17:06:29 -0400 Subject: [PATCH 1600/1815] fix inputs.postgresql panic in posgres 12 (#7151) --- plugins/inputs/postgresql/postgresql.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/plugins/inputs/postgresql/postgresql.go b/plugins/inputs/postgresql/postgresql.go index e136098f4..452c7fa2b 100644 --- a/plugins/inputs/postgresql/postgresql.go +++ b/plugins/inputs/postgresql/postgresql.go @@ -155,7 +155,12 @@ func (p *Postgresql) accRow(row scanner, acc telegraf.Accumulator, columns []str } if columnMap["datname"] != nil { // extract the database name from the column map - dbname.WriteString((*columnMap["datname"]).(string)) + if dbNameStr, ok := (*columnMap["datname"]).(string); ok { + dbname.WriteString(dbNameStr) + } else { + // PG 12 adds tracking of global objects to pg_stat_database + dbname.WriteString("postgres_global") + } } else { dbname.WriteString("postgres") } From c50b02e58dda6f24d178f5a18ffda01a72019d0c Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 10 Mar 2020 15:19:32 -0700 Subject: [PATCH 1601/1815] Fix internal metrics for output split into multiple lines (#7119) --- internal/models/buffer.go | 15 ++++++--- internal/models/running_output_test.go | 46 ++++++++++++++++++++++++++ 2 files changed, 56 insertions(+), 5 deletions(-) diff --git a/internal/models/buffer.go b/internal/models/buffer.go index 7769ac1e9..18e9987ca 100644 --- a/internal/models/buffer.go +++ b/internal/models/buffer.go @@ -33,6 +33,11 @@ type Buffer struct { // NewBuffer returns a new empty Buffer with the given capacity. func NewBuffer(name string, alias string, capacity int) *Buffer { + tags := map[string]string{"output": name} + if alias != "" { + tags["alias"] = alias + } + b := &Buffer{ buf: make([]telegraf.Metric, capacity), first: 0, @@ -43,27 +48,27 @@ func NewBuffer(name string, alias string, capacity int) *Buffer { MetricsAdded: selfstat.Register( "write", "metrics_added", - map[string]string{"output": name, "alias": alias}, + tags, ), MetricsWritten: selfstat.Register( "write", "metrics_written", - map[string]string{"output": name, "alias": alias}, + tags, ), MetricsDropped: selfstat.Register( "write", "metrics_dropped", - map[string]string{"output": name, "alias": alias}, + tags, ), BufferSize: selfstat.Register( "write", "buffer_size", - map[string]string{"output": name, "alias": alias}, + tags, ), BufferLimit: selfstat.Register( "write", "buffer_limit", - map[string]string{"output": name, "alias": alias}, + tags, ), } b.BufferSize.Set(int64(0)) diff --git a/internal/models/running_output_test.go b/internal/models/running_output_test.go index fd38b0faa..5909ec158 100644 --- a/internal/models/running_output_test.go +++ b/internal/models/running_output_test.go @@ -4,8 +4,10 @@ import ( "fmt" "sync" "testing" + "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/selfstat" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -412,6 +414,50 @@ func TestRunningOutputWriteFailOrder3(t *testing.T) { assert.Equal(t, expected, m.Metrics()) } +func TestInternalMetrics(t *testing.T) { + _ = NewRunningOutput( + "test_internal", + &mockOutput{}, + &OutputConfig{ + Filter: Filter{}, + Name: "test_name", + Alias: "test_alias", + }, + 5, + 10) + + expected := []telegraf.Metric{ + testutil.MustMetric( + "internal_write", + map[string]string{ + "output": "test_name", + "alias": "test_alias", + }, + map[string]interface{}{ + "buffer_limit": 10, + "buffer_size": 0, + "errors": 0, + "metrics_added": 0, + "metrics_dropped": 0, + "metrics_filtered": 0, + "metrics_written": 0, + "write_time_ns": 0, + }, + time.Unix(0, 0), + ), + } + + var actual []telegraf.Metric + for _, m := range selfstat.Metrics() { + output, _ := m.GetTag("output") + if m.Name() == "internal_write" && output == "test_name" { + actual = append(actual, m) + } + } + + testutil.RequireMetricsEqual(t, expected, actual, testutil.IgnoreTime()) +} + type mockOutput struct { sync.Mutex From c7146be2f26986a8f53be364a983ff2cfdd028d8 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 10 Mar 2020 15:20:03 -0700 Subject: [PATCH 1602/1815] Add support for setting retention policy using tag (#7141) --- plugins/outputs/influxdb/README.md | 7 + plugins/outputs/influxdb/http.go | 123 +++++++++------- plugins/outputs/influxdb/http_test.go | 197 ++++++++++++++++++++++++++ plugins/outputs/influxdb/influxdb.go | 79 ++++++----- 4 files changed, 317 insertions(+), 89 deletions(-) diff --git a/plugins/outputs/influxdb/README.md b/plugins/outputs/influxdb/README.md index 1d11443ac..f82a3b344 100644 --- a/plugins/outputs/influxdb/README.md +++ b/plugins/outputs/influxdb/README.md @@ -35,6 +35,13 @@ The InfluxDB output plugin writes metrics to the [InfluxDB v1.x] HTTP or UDP ser ## the default retention policy. Only takes effect when using HTTP. # retention_policy = "" + ## The value of this tag will be used to determine the retention policy. If this + ## tag is not set the 'retention_policy' option is used as the default. + # retention_policy_tag = "" + + ## If true, the 'retention_policy_tag' will not be removed from the metric. + # exclude_retention_policy_tag = false + ## Write consistency (clusters only), can be: "any", "one", "quorum", "all". ## Only takes effect when using HTTP. # write_consistency = "any" diff --git a/plugins/outputs/influxdb/http.go b/plugins/outputs/influxdb/http.go index d449c9456..b663d9198 100644 --- a/plugins/outputs/influxdb/http.go +++ b/plugins/outputs/influxdb/http.go @@ -83,21 +83,23 @@ func (r WriteResponse) Error() string { } type HTTPConfig struct { - URL *url.URL - UserAgent string - Timeout time.Duration - Username string - Password string - TLSConfig *tls.Config - Proxy *url.URL - Headers map[string]string - ContentEncoding string - Database string - DatabaseTag string - ExcludeDatabaseTag bool - RetentionPolicy string - Consistency string - SkipDatabaseCreation bool + URL *url.URL + UserAgent string + Timeout time.Duration + Username string + Password string + TLSConfig *tls.Config + Proxy *url.URL + Headers map[string]string + ContentEncoding string + Database string + DatabaseTag string + ExcludeDatabaseTag bool + RetentionPolicy string + RetentionPolicyTag string + ExcludeRetentionPolicyTag bool + Consistency string + SkipDatabaseCreation bool InfluxUintSupport bool `toml:"influx_uint_support"` Serializer *influx.Serializer @@ -236,55 +238,66 @@ func (c *httpClient) CreateDatabase(ctx context.Context, database string) error } } +type dbrp struct { + Database string + RetentionPolicy string +} + // Write sends the metrics to InfluxDB func (c *httpClient) Write(ctx context.Context, metrics []telegraf.Metric) error { - batches := make(map[string][]telegraf.Metric) - if c.config.DatabaseTag == "" { - err := c.writeBatch(ctx, c.config.Database, metrics) + // If these options are not used, we can skip in plugin batching and send + // the full batch in a single request. + if c.config.DatabaseTag == "" && c.config.RetentionPolicyTag == "" { + return c.writeBatch(ctx, c.config.Database, c.config.RetentionPolicy, metrics) + } + + batches := make(map[dbrp][]telegraf.Metric) + for _, metric := range metrics { + db, ok := metric.GetTag(c.config.DatabaseTag) + if !ok { + db = c.config.Database + } + + rp, ok := metric.GetTag(c.config.RetentionPolicyTag) + if !ok { + rp = c.config.RetentionPolicy + } + + dbrp := dbrp{ + Database: db, + RetentionPolicy: rp, + } + + if c.config.ExcludeDatabaseTag || c.config.ExcludeRetentionPolicyTag { + // Avoid modifying the metric in case we need to retry the request. + metric = metric.Copy() + metric.Accept() + metric.RemoveTag(c.config.DatabaseTag) + metric.RemoveTag(c.config.RetentionPolicyTag) + } + + batches[dbrp] = append(batches[dbrp], metric) + } + + for dbrp, batch := range batches { + if !c.config.SkipDatabaseCreation && !c.createdDatabases[dbrp.Database] { + err := c.CreateDatabase(ctx, dbrp.Database) + if err != nil { + c.log.Warnf("When writing to [%s]: database %q creation failed: %v", + c.config.URL, dbrp.Database, err) + } + } + + err := c.writeBatch(ctx, dbrp.Database, dbrp.RetentionPolicy, batch) if err != nil { return err } - } else { - for _, metric := range metrics { - db, ok := metric.GetTag(c.config.DatabaseTag) - if !ok { - db = c.config.Database - } - - if _, ok := batches[db]; !ok { - batches[db] = make([]telegraf.Metric, 0) - } - - if c.config.ExcludeDatabaseTag { - // Avoid modifying the metric in case we need to retry the request. - metric = metric.Copy() - metric.Accept() - metric.RemoveTag(c.config.DatabaseTag) - } - - batches[db] = append(batches[db], metric) - } - - for db, batch := range batches { - if !c.config.SkipDatabaseCreation && !c.createdDatabases[db] { - err := c.CreateDatabase(ctx, db) - if err != nil { - c.log.Warnf("When writing to [%s]: database %q creation failed: %v", - c.config.URL, db, err) - } - } - - err := c.writeBatch(ctx, db, batch) - if err != nil { - return err - } - } } return nil } -func (c *httpClient) writeBatch(ctx context.Context, db string, metrics []telegraf.Metric) error { - url, err := makeWriteURL(c.config.URL, db, c.config.RetentionPolicy, c.config.Consistency) +func (c *httpClient) writeBatch(ctx context.Context, db, rp string, metrics []telegraf.Metric) error { + url, err := makeWriteURL(c.config.URL, db, rp, c.config.Consistency) if err != nil { return err } diff --git a/plugins/outputs/influxdb/http_test.go b/plugins/outputs/influxdb/http_test.go index a09b02d43..3f5ef0bc6 100644 --- a/plugins/outputs/influxdb/http_test.go +++ b/plugins/outputs/influxdb/http_test.go @@ -733,3 +733,200 @@ func TestHTTP_WriteDatabaseTagWorksOnRetry(t *testing.T) { err = client.Write(ctx, metrics) require.NoError(t, err) } + +func TestDBRPTags(t *testing.T) { + ts := httptest.NewServer(http.NotFoundHandler()) + defer ts.Close() + + u, err := url.Parse(fmt.Sprintf("http://%s", ts.Listener.Addr().String())) + require.NoError(t, err) + + tests := []struct { + name string + config influxdb.HTTPConfig + metrics []telegraf.Metric + handlerFunc func(t *testing.T, w http.ResponseWriter, r *http.Request) + url string + }{ + { + name: "defaults", + config: influxdb.HTTPConfig{ + URL: u, + Database: "telegraf", + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{ + "database": "foo", + }, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(0, 0), + ), + }, + handlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { + require.Equal(t, r.FormValue("db"), "telegraf") + require.Equal(t, r.FormValue("rp"), "") + w.WriteHeader(http.StatusNoContent) + }, + }, + { + name: "static retention policy", + config: influxdb.HTTPConfig{ + URL: u, + Database: "telegraf", + RetentionPolicy: "foo", + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(0, 0), + ), + }, + handlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { + require.Equal(t, r.FormValue("db"), "telegraf") + require.Equal(t, r.FormValue("rp"), "foo") + w.WriteHeader(http.StatusNoContent) + }, + }, + { + name: "retention policy tag", + config: influxdb.HTTPConfig{ + URL: u, + SkipDatabaseCreation: true, + Database: "telegraf", + RetentionPolicyTag: "rp", + Log: testutil.Logger{}, + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{ + "rp": "foo", + }, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(0, 0), + ), + }, + handlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { + require.Equal(t, r.FormValue("db"), "telegraf") + require.Equal(t, r.FormValue("rp"), "foo") + body, err := ioutil.ReadAll(r.Body) + require.NoError(t, err) + require.Contains(t, string(body), "cpu,rp=foo value=42") + w.WriteHeader(http.StatusNoContent) + }, + }, + { + name: "retention policy tag fallback to static rp", + config: influxdb.HTTPConfig{ + URL: u, + SkipDatabaseCreation: true, + Database: "telegraf", + RetentionPolicy: "foo", + RetentionPolicyTag: "rp", + Log: testutil.Logger{}, + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(0, 0), + ), + }, + handlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { + require.Equal(t, r.FormValue("db"), "telegraf") + require.Equal(t, r.FormValue("rp"), "foo") + w.WriteHeader(http.StatusNoContent) + }, + }, + { + name: "retention policy tag fallback to unset rp", + config: influxdb.HTTPConfig{ + URL: u, + SkipDatabaseCreation: true, + Database: "telegraf", + RetentionPolicyTag: "rp", + Log: testutil.Logger{}, + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(0, 0), + ), + }, + handlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { + require.Equal(t, r.FormValue("db"), "telegraf") + require.Equal(t, r.FormValue("rp"), "") + w.WriteHeader(http.StatusNoContent) + }, + }, + { + name: "exclude retention policy tag", + config: influxdb.HTTPConfig{ + URL: u, + SkipDatabaseCreation: true, + Database: "telegraf", + RetentionPolicyTag: "rp", + ExcludeRetentionPolicyTag: true, + Log: testutil.Logger{}, + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{ + "rp": "foo", + }, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(0, 0), + ), + }, + handlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { + require.Equal(t, r.FormValue("db"), "telegraf") + require.Equal(t, r.FormValue("rp"), "foo") + body, err := ioutil.ReadAll(r.Body) + require.NoError(t, err) + require.Contains(t, string(body), "cpu value=42") + w.WriteHeader(http.StatusNoContent) + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/write": + tt.handlerFunc(t, w, r) + return + default: + w.WriteHeader(http.StatusNotFound) + return + } + }) + + client, err := influxdb.NewHTTPClient(tt.config) + require.NoError(t, err) + + ctx := context.Background() + err = client.Write(ctx, tt.metrics) + require.NoError(t, err) + }) + } +} diff --git a/plugins/outputs/influxdb/influxdb.go b/plugins/outputs/influxdb/influxdb.go index be462ba03..4306f55c6 100644 --- a/plugins/outputs/influxdb/influxdb.go +++ b/plugins/outputs/influxdb/influxdb.go @@ -31,23 +31,25 @@ type Client interface { // InfluxDB struct is the primary data structure for the plugin type InfluxDB struct { - URL string // url deprecated in 0.1.9; use urls - URLs []string `toml:"urls"` - Username string - Password string - Database string - DatabaseTag string `toml:"database_tag"` - ExcludeDatabaseTag bool `toml:"exclude_database_tag"` - UserAgent string - RetentionPolicy string - WriteConsistency string - Timeout internal.Duration - UDPPayload internal.Size `toml:"udp_payload"` - HTTPProxy string `toml:"http_proxy"` - HTTPHeaders map[string]string `toml:"http_headers"` - ContentEncoding string `toml:"content_encoding"` - SkipDatabaseCreation bool `toml:"skip_database_creation"` - InfluxUintSupport bool `toml:"influx_uint_support"` + URL string // url deprecated in 0.1.9; use urls + URLs []string `toml:"urls"` + Username string `toml:"username"` + Password string `toml:"password"` + Database string `toml:"database"` + DatabaseTag string `toml:"database_tag"` + ExcludeDatabaseTag bool `toml:"exclude_database_tag"` + RetentionPolicy string `toml:"retention_policy"` + RetentionPolicyTag string `toml:"retention_policy_tag"` + ExcludeRetentionPolicyTag bool `toml:"exclude_retention_policy_tag"` + UserAgent string `toml:"user_agent"` + WriteConsistency string `toml:"write_consistency"` + Timeout internal.Duration `toml:"timeout"` + UDPPayload internal.Size `toml:"udp_payload"` + HTTPProxy string `toml:"http_proxy"` + HTTPHeaders map[string]string `toml:"http_headers"` + ContentEncoding string `toml:"content_encoding"` + SkipDatabaseCreation bool `toml:"skip_database_creation"` + InfluxUintSupport bool `toml:"influx_uint_support"` tls.ClientConfig Precision string // precision deprecated in 1.0; value is ignored @@ -89,6 +91,13 @@ var sampleConfig = ` ## the default retention policy. Only takes effect when using HTTP. # retention_policy = "" + ## The value of this tag will be used to determine the retention policy. If this + ## tag is not set the 'retention_policy' option is used as the default. + # retention_policy_tag = "" + + ## If true, the 'retention_policy_tag' will not be removed from the metric. + # exclude_retention_policy_tag = false + ## Write consistency (clusters only), can be: "any", "one", "quorum", "all". ## Only takes effect when using HTTP. # write_consistency = "any" @@ -250,23 +259,25 @@ func (i *InfluxDB) httpClient(ctx context.Context, url *url.URL, proxy *url.URL) } config := &HTTPConfig{ - URL: url, - Timeout: i.Timeout.Duration, - TLSConfig: tlsConfig, - UserAgent: i.UserAgent, - Username: i.Username, - Password: i.Password, - Proxy: proxy, - ContentEncoding: i.ContentEncoding, - Headers: i.HTTPHeaders, - Database: i.Database, - DatabaseTag: i.DatabaseTag, - ExcludeDatabaseTag: i.ExcludeDatabaseTag, - SkipDatabaseCreation: i.SkipDatabaseCreation, - RetentionPolicy: i.RetentionPolicy, - Consistency: i.WriteConsistency, - Serializer: i.newSerializer(), - Log: i.Log, + URL: url, + Timeout: i.Timeout.Duration, + TLSConfig: tlsConfig, + UserAgent: i.UserAgent, + Username: i.Username, + Password: i.Password, + Proxy: proxy, + ContentEncoding: i.ContentEncoding, + Headers: i.HTTPHeaders, + Database: i.Database, + DatabaseTag: i.DatabaseTag, + ExcludeDatabaseTag: i.ExcludeDatabaseTag, + SkipDatabaseCreation: i.SkipDatabaseCreation, + RetentionPolicy: i.RetentionPolicy, + RetentionPolicyTag: i.RetentionPolicyTag, + ExcludeRetentionPolicyTag: i.ExcludeRetentionPolicyTag, + Consistency: i.WriteConsistency, + Serializer: i.newSerializer(), + Log: i.Log, } c, err := i.CreateHTTPClientF(config) From eb063d0cc778527445895bbe0ca3a4adf751a861 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 10 Mar 2020 15:22:19 -0700 Subject: [PATCH 1603/1815] Update changelog --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 57107feb2..6d0517720 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -60,6 +60,7 @@ - [#7049](https://github.com/influxdata/telegraf/pull/7049): Add support for converting tag or field to measurement in converter processor. - [#7103](https://github.com/influxdata/telegraf/pull/7103): Add volume_mount_point to DatabaseIO query in sqlserver input. - [#7142](https://github.com/influxdata/telegraf/pull/7142): Add topic tag options to kafka output. +- [#7141](https://github.com/influxdata/telegraf/pull/7141): Add support for setting InfluxDB retention policy using tag. #### Bugfixes @@ -70,6 +71,7 @@ - [#6124](https://github.com/influxdata/telegraf/issues/6124): Fix InfluxDB listener to continue parsing after error. - [#7133](https://github.com/influxdata/telegraf/issues/7133): Fix log rotation to use actual file size instead of bytes written. - [#7103](https://github.com/influxdata/telegraf/pull/7103): Fix several issues with DatabaseIO query in sqlserver input. +- [#7119](https://github.com/influxdata/telegraf/pull/7119): Fix internal metrics for output split into multiple lines. ## v1.13.4 [2020-02-25] From 0be62754951f39f7dbac6023399da555b9f4f312 Mon Sep 17 00:00:00 2001 From: Samantha Wang <32681364+sjwang90@users.noreply.github.com> Date: Tue, 10 Mar 2020 15:31:59 -0700 Subject: [PATCH 1604/1815] Add versioning to example readme (#7152) --- plugins/inputs/EXAMPLE_README.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/plugins/inputs/EXAMPLE_README.md b/plugins/inputs/EXAMPLE_README.md index b15b9caf2..ffe1be7cc 100644 --- a/plugins/inputs/EXAMPLE_README.md +++ b/plugins/inputs/EXAMPLE_README.md @@ -4,6 +4,9 @@ The `example` plugin gathers metrics about example things. This description explains at a high level what the plugin does and provides links to where additional information can be found. +Telegraf minimum version: Telegraf x.x +Plugin minimum tested version: x.x + ### Configuration This section contains the default TOML to configure the plugin. You can From c5234b365af63ba726c0fe3ee660af8d214dd2d5 Mon Sep 17 00:00:00 2001 From: alespour <42931850+alespour@users.noreply.github.com> Date: Tue, 10 Mar 2020 23:39:06 +0100 Subject: [PATCH 1605/1815] Add s2_geo processor plugin (#7087) --- go.mod | 1 + go.sum | 2 + plugins/processors/all/all.go | 1 + plugins/processors/geo/README.md | 29 ++++++++++++ plugins/processors/geo/geo.go | 76 ++++++++++++++++++++++++++++++ plugins/processors/geo/geo_test.go | 55 +++++++++++++++++++++ 6 files changed, 164 insertions(+) create mode 100644 plugins/processors/geo/README.md create mode 100644 plugins/processors/geo/geo.go create mode 100644 plugins/processors/geo/geo_test.go diff --git a/go.mod b/go.mod index de62dc620..5b23b61db 100644 --- a/go.mod +++ b/go.mod @@ -49,6 +49,7 @@ require ( github.com/gobwas/glob v0.2.3 github.com/gofrs/uuid v2.1.0+incompatible github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d + github.com/golang/geo v0.0.0-20190916061304-5b978397cfec github.com/golang/mock v1.3.1-0.20190508161146-9fa652df1129 // indirect github.com/golang/protobuf v1.3.2 github.com/google/go-cmp v0.4.0 diff --git a/go.sum b/go.sum index b15ec7343..995bcce10 100644 --- a/go.sum +++ b/go.sum @@ -166,6 +166,8 @@ github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7a github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d h1:3PaI8p3seN09VjbTYC/QWlUZdZ1qS1zGjy7LH2Wt07I= github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= +github.com/golang/geo v0.0.0-20190916061304-5b978397cfec h1:lJwO/92dFXWeXOZdoGXgptLmNLwynMSHUmU6besqtiw= +github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= diff --git a/plugins/processors/all/all.go b/plugins/processors/all/all.go index ba72ee10e..e47445059 100644 --- a/plugins/processors/all/all.go +++ b/plugins/processors/all/all.go @@ -5,6 +5,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/processors/converter" _ "github.com/influxdata/telegraf/plugins/processors/date" _ "github.com/influxdata/telegraf/plugins/processors/enum" + _ "github.com/influxdata/telegraf/plugins/processors/geo" _ "github.com/influxdata/telegraf/plugins/processors/override" _ "github.com/influxdata/telegraf/plugins/processors/parser" _ "github.com/influxdata/telegraf/plugins/processors/pivot" diff --git a/plugins/processors/geo/README.md b/plugins/processors/geo/README.md new file mode 100644 index 000000000..5a65d5e7d --- /dev/null +++ b/plugins/processors/geo/README.md @@ -0,0 +1,29 @@ +# S2 Geo Processor Plugin + +Use the `s2geo` processor to add tag with S2 cell ID token of specified [cell level][cell levels]. +The tag is used in `experimental/geo` Flux package functions. +The `lat` and `lon` fields values should contain WGS-84 coordinates in decimal degrees. + +### Configuration + +```toml +[[processors.geo]] + ## The name of the lat and lon fields containing WGS-84 latitude and longitude in decimal degrees + lat_field = "lat" + lon_field = "lon" + + ## New tag to create + tag_key = "s2_cell_id" + + ## Cell level (see https://s2geometry.io/resources/s2cell_statistics.html) + cell_level = 11 +``` + +### Example + +```diff +- mta,area=llir,id=GO505_20_2704,status=1 lat=40.878738,lon=-72.517572 1560540094 ++ mta,area=llir,id=GO505_20_2704,status=1,s2_cell_id=89e8ed4 lat=40.878738,lon=-72.517572 1560540094 +``` + +[cell levels]: https://s2geometry.io/resources/s2cell_statistics.html diff --git a/plugins/processors/geo/geo.go b/plugins/processors/geo/geo.go new file mode 100644 index 000000000..85f80c3df --- /dev/null +++ b/plugins/processors/geo/geo.go @@ -0,0 +1,76 @@ +package geo + +import ( + "fmt" + "github.com/golang/geo/s2" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/processors" +) + +type Geo struct { + LatField string `toml:"lat_field"` + LonField string `toml:"lon_field"` + TagKey string `toml:"tag_key"` + CellLevel int `toml:"cell_level"` +} + +var SampleConfig = ` + ## The name of the lat and lon fields containing WGS-84 latitude and longitude in decimal degrees + lat_field = "lat" + lon_field = "lon" + + ## New tag to create + tag_key = "s2_cell_id" + + ## Cell level (see https://s2geometry.io/resources/s2cell_statistics.html) + cell_level = 9 +` + +func (g *Geo) SampleConfig() string { + return SampleConfig +} + +func (g *Geo) Description() string { + return "Reads latitude and longitude fields and adds tag with with S2 cell ID token of specified level." +} + +func (g *Geo) Init() error { + if g.CellLevel < 0 || g.CellLevel > 30 { + return fmt.Errorf("invalid cell level %d", g.CellLevel) + } + return nil +} + +func (g *Geo) Apply(in ...telegraf.Metric) []telegraf.Metric { + for _, point := range in { + var latOk, lonOk bool + var lat, lon float64 + for _, field := range point.FieldList() { + switch field.Key { + case g.LatField: + lat, latOk = field.Value.(float64) + case g.LonField: + lon, lonOk = field.Value.(float64) + } + } + if latOk && lonOk { + cellID := s2.CellIDFromLatLng(s2.LatLngFromDegrees(lat, lon)) + if cellID.IsValid() { + value := cellID.Parent(g.CellLevel).ToToken() + point.AddTag(g.TagKey, value) + } + } + } + return in +} + +func init() { + processors.Add("s2geo", func() telegraf.Processor { + return &Geo{ + LatField: "lat", + LonField: "lon", + TagKey: "s2_cell_id", + CellLevel: 9, + } + }) +} diff --git a/plugins/processors/geo/geo_test.go b/plugins/processors/geo/geo_test.go new file mode 100644 index 000000000..b06a1a06d --- /dev/null +++ b/plugins/processors/geo/geo_test.go @@ -0,0 +1,55 @@ +package geo + +import ( + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +func TestGeo(t *testing.T) { + plugin := &Geo{ + LatField: "lat", + LonField: "lon", + TagKey: "s2_cell_id", + CellLevel: 11, + } + + pluginMostlyDefault := &Geo{ + CellLevel: 11, + } + + err := plugin.Init() + require.NoError(t, err) + + metric := testutil.MustMetric( + "mta", + map[string]string{}, + map[string]interface{}{ + "lat": 40.878738, + "lon": -72.517572, + }, + time.Unix(1578603600, 0), + ) + + expected := []telegraf.Metric{ + testutil.MustMetric( + "mta", + map[string]string{ + "s2_cell_id": "89e8ed4", + }, + map[string]interface{}{ + "lat": 40.878738, + "lon": -72.517572, + }, + time.Unix(1578603600, 0), + ), + } + + actual := plugin.Apply(metric) + testutil.RequireMetricsEqual(t, expected, actual) + actual = pluginMostlyDefault.Apply(metric) + testutil.RequireMetricsEqual(t, expected, actual) +} From fe4ff3a3be066e813ea1f372ef66e129aeb628fa Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 10 Mar 2020 15:46:46 -0700 Subject: [PATCH 1606/1815] Move s2_geo processor to directory with same name --- plugins/processors/all/all.go | 2 +- plugins/processors/{geo => s2_geo}/README.md | 0 plugins/processors/{geo => s2_geo}/geo.go | 0 plugins/processors/{geo => s2_geo}/geo_test.go | 0 4 files changed, 1 insertion(+), 1 deletion(-) rename plugins/processors/{geo => s2_geo}/README.md (100%) rename plugins/processors/{geo => s2_geo}/geo.go (100%) rename plugins/processors/{geo => s2_geo}/geo_test.go (100%) diff --git a/plugins/processors/all/all.go b/plugins/processors/all/all.go index e47445059..98e9ccbfa 100644 --- a/plugins/processors/all/all.go +++ b/plugins/processors/all/all.go @@ -5,13 +5,13 @@ import ( _ "github.com/influxdata/telegraf/plugins/processors/converter" _ "github.com/influxdata/telegraf/plugins/processors/date" _ "github.com/influxdata/telegraf/plugins/processors/enum" - _ "github.com/influxdata/telegraf/plugins/processors/geo" _ "github.com/influxdata/telegraf/plugins/processors/override" _ "github.com/influxdata/telegraf/plugins/processors/parser" _ "github.com/influxdata/telegraf/plugins/processors/pivot" _ "github.com/influxdata/telegraf/plugins/processors/printer" _ "github.com/influxdata/telegraf/plugins/processors/regex" _ "github.com/influxdata/telegraf/plugins/processors/rename" + _ "github.com/influxdata/telegraf/plugins/processors/s2_geo" _ "github.com/influxdata/telegraf/plugins/processors/strings" _ "github.com/influxdata/telegraf/plugins/processors/tag_limit" _ "github.com/influxdata/telegraf/plugins/processors/template" diff --git a/plugins/processors/geo/README.md b/plugins/processors/s2_geo/README.md similarity index 100% rename from plugins/processors/geo/README.md rename to plugins/processors/s2_geo/README.md diff --git a/plugins/processors/geo/geo.go b/plugins/processors/s2_geo/geo.go similarity index 100% rename from plugins/processors/geo/geo.go rename to plugins/processors/s2_geo/geo.go diff --git a/plugins/processors/geo/geo_test.go b/plugins/processors/s2_geo/geo_test.go similarity index 100% rename from plugins/processors/geo/geo_test.go rename to plugins/processors/s2_geo/geo_test.go From 389723c08afb5e852b92defbd8413bef3b900f0c Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 10 Mar 2020 15:47:05 -0700 Subject: [PATCH 1607/1815] Update changelog, readme, licenses --- CHANGELOG.md | 1 + README.md | 1 + docs/LICENSE_OF_DEPENDENCIES.md | 1 + 3 files changed, 3 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6d0517720..d9b502a4d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -21,6 +21,7 @@ #### New Processors - [template](/plugins/processors/template/README.md) - Contributed by @RobMalvern +- [s2_geo](/plugins/processors/s2_geo/README.md) - Contributed by @alespour #### New Outputs diff --git a/README.md b/README.md index 2e84559c3..3fc101154 100644 --- a/README.md +++ b/README.md @@ -365,6 +365,7 @@ For documentation on the latest development code see the [documentation index][d * [printer](/plugins/processors/printer) * [regex](/plugins/processors/regex) * [rename](/plugins/processors/rename) +* [s2_geo](/plugins/processors/s2_geo) * [strings](/plugins/processors/strings) * [tag_limit](/plugins/processors/tag_limit) * [template](/plugins/processors/template) diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index 2dae78856..b82eb9a17 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -51,6 +51,7 @@ following works: - github.com/gobwas/glob [MIT License](https://github.com/gobwas/glob/blob/master/LICENSE) - github.com/gofrs/uuid [MIT License](https://github.com/gofrs/uuid/blob/master/LICENSE) - github.com/gogo/protobuf [BSD 3-Clause Clear License](https://github.com/gogo/protobuf/blob/master/LICENSE) +- github.com/golang/geo [Apache License 2.0](https://github.com/golang/geo/blob/master/LICENSE) - github.com/golang/mock [Apache License 2.0](https://github.com/golang/mock/blob/master/LICENSE) - github.com/golang/protobuf [BSD 3-Clause "New" or "Revised" License](https://github.com/golang/protobuf/blob/master/LICENSE) - github.com/golang/snappy [BSD 3-Clause "New" or "Revised" License](https://github.com/golang/snappy/blob/master/LICENSE) From 42804b7c5629c264c6039db94f44af2f4f7e3409 Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Wed, 11 Mar 2020 18:03:11 -0400 Subject: [PATCH 1608/1815] fix issue with ping input in windows. Closes #7088 (#7157) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 5b23b61db..03fb8d9e9 100644 --- a/go.mod +++ b/go.mod @@ -39,7 +39,7 @@ require ( github.com/eclipse/paho.mqtt.golang v1.2.0 github.com/ericchiang/k8s v1.2.0 github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 - github.com/glinton/ping v0.1.3 + github.com/glinton/ping v0.1.4-0.20200311211934-5ac87da8cd96 github.com/go-logfmt/logfmt v0.4.0 github.com/go-ole/go-ole v1.2.1 // indirect github.com/go-redis/redis v6.12.0+incompatible diff --git a/go.sum b/go.sum index 995bcce10..719043dbd 100644 --- a/go.sum +++ b/go.sum @@ -132,8 +132,8 @@ github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMo github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 h1:Mn26/9ZMNWSw9C9ERFA1PUxfmGpolnw2v0bKOREu5ew= github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32/go.mod h1:GIjDIg/heH5DOkXY3YJ/wNhfHsQHoXGjl8G8amsYQ1I= -github.com/glinton/ping v0.1.3 h1:8/9mj+hCgfba0X25E0Xs7cy+Zg9jGQVyulMVlUBrDDA= -github.com/glinton/ping v0.1.3/go.mod h1:uY+1eqFUyotrQxF1wYFNtMeHp/swbYRsoGzfcPZ8x3o= +github.com/glinton/ping v0.1.4-0.20200311211934-5ac87da8cd96 h1:YpooqMW354GG47PXNBiaCv6yCQizyP3MXD9NUPrCEQ8= +github.com/glinton/ping v0.1.4-0.20200311211934-5ac87da8cd96/go.mod h1:uY+1eqFUyotrQxF1wYFNtMeHp/swbYRsoGzfcPZ8x3o= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0 h1:MP4Eh7ZCb31lleYCFuwm0oe4/YGak+5l1vA2NOE80nA= From e6f06441282ffd994fd1b13b2413c024c34cf90b Mon Sep 17 00:00:00 2001 From: Tim Hughes Date: Thu, 12 Mar 2020 23:45:35 +0000 Subject: [PATCH 1609/1815] Add Arista LANZ consumer input plugin (#4112) --- README.md | 1 + go.mod | 2 + go.sum | 5 ++ plugins/inputs/all/all.go | 1 + plugins/inputs/lanz/README.md | 87 ++++++++++++++++++++ plugins/inputs/lanz/lanz.go | 137 +++++++++++++++++++++++++++++++ plugins/inputs/lanz/lanz_test.go | 137 +++++++++++++++++++++++++++++++ 7 files changed, 370 insertions(+) create mode 100644 plugins/inputs/lanz/README.md create mode 100644 plugins/inputs/lanz/lanz.go create mode 100644 plugins/inputs/lanz/lanz_test.go diff --git a/README.md b/README.md index 3fc101154..787ade0af 100644 --- a/README.md +++ b/README.md @@ -225,6 +225,7 @@ For documentation on the latest development code see the [documentation index][d * [kibana](./plugins/inputs/kibana) * [kubernetes](./plugins/inputs/kubernetes) * [kube_inventory](./plugins/inputs/kube_inventory) +* [lanz](./plugins/inputs/lanz) * [leofs](./plugins/inputs/leofs) * [linux_sysctl_fs](./plugins/inputs/linux_sysctl_fs) * [logparser](./plugins/inputs/logparser) diff --git a/go.mod b/go.mod index 03fb8d9e9..062e70f50 100644 --- a/go.mod +++ b/go.mod @@ -18,6 +18,8 @@ require ( github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf github.com/amir/raidman v0.0.0-20170415203553-1ccc43bfb9c9 github.com/apache/thrift v0.12.0 + github.com/aristanetworks/glog v0.0.0-20191112221043-67e8567f59f3 // indirect + github.com/aristanetworks/goarista v0.0.0-20190325233358-a123909ec740 github.com/armon/go-metrics v0.3.0 // indirect github.com/aws/aws-sdk-go v1.19.41 github.com/bitly/go-hostpool v0.1.0 // indirect diff --git a/go.sum b/go.sum index 719043dbd..a0d8fd479 100644 --- a/go.sum +++ b/go.sum @@ -60,6 +60,10 @@ github.com/amir/raidman v0.0.0-20170415203553-1ccc43bfb9c9 h1:FXrPTd8Rdlc94dKccl github.com/amir/raidman v0.0.0-20170415203553-1ccc43bfb9c9/go.mod h1:eliMa/PW+RDr2QLWRmLH1R1ZA4RInpmvOzDDXtaIZkc= github.com/apache/thrift v0.12.0 h1:pODnxUFNcjP9UTLZGTdeh+j16A8lJbRvD3rOtrk/7bs= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/aristanetworks/glog v0.0.0-20191112221043-67e8567f59f3 h1:Bmjk+DjIi3tTAU0wxGaFbfjGUqlxxSXARq9A96Kgoos= +github.com/aristanetworks/glog v0.0.0-20191112221043-67e8567f59f3/go.mod h1:KASm+qXFKs/xjSoWn30NrWBBvdTTQq+UjkhjEJHfSFA= +github.com/aristanetworks/goarista v0.0.0-20190325233358-a123909ec740 h1:FD4/ikKOFxwP8muWDypbmBWc634+YcAs3eBrYAmRdZY= +github.com/aristanetworks/goarista v0.0.0-20190325233358-a123909ec740/go.mod h1:D/tb0zPVXnP7fmsLZjtdUhSsumbK/ij54UXjjVgMGxQ= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-metrics v0.3.0 h1:B7AQgHi8QSEi4uHu7Sbsga+IJDU+CENgjxoo81vDUqU= github.com/armon/go-metrics v0.3.0/go.mod h1:zXjbSimjXTd7vOpY8B0/2LpvNvDoXBuplAD+gJD3GYs= @@ -538,6 +542,7 @@ golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2 h1:z99zHgr7hKfrUcX/KsoJk5 golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c h1:fqgJT0MGcGpPgpWU7VRdRjuArfcOvC4AoJmILihzhDg= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index ace0d0044..6624053df 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -80,6 +80,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/kinesis_consumer" _ "github.com/influxdata/telegraf/plugins/inputs/kube_inventory" _ "github.com/influxdata/telegraf/plugins/inputs/kubernetes" + _ "github.com/influxdata/telegraf/plugins/inputs/lanz" _ "github.com/influxdata/telegraf/plugins/inputs/leofs" _ "github.com/influxdata/telegraf/plugins/inputs/linux_sysctl_fs" _ "github.com/influxdata/telegraf/plugins/inputs/logparser" diff --git a/plugins/inputs/lanz/README.md b/plugins/inputs/lanz/README.md new file mode 100644 index 000000000..95fe02a9b --- /dev/null +++ b/plugins/inputs/lanz/README.md @@ -0,0 +1,87 @@ +# Arista LANZ Consumer Input Plugin + +This plugin provides a consumer for use with Arista Networks’ Latency Analyzer (LANZ) + +Metrics are read from a stream of data via TCP through port 50001 on the +switches management IP. The data is in Protobuffers format. For more information on Arista LANZ + +- https://www.arista.com/en/um-eos/eos-latency-analyzer-lanz + +This plugin uses Arista's sdk. + +- https://github.com/aristanetworks/goarista + +### Configuration + +You will need to configure LANZ and enable streaming LANZ data. + +- https://www.arista.com/en/um-eos/eos-section-44-3-configuring-lanz +- https://www.arista.com/en/um-eos/eos-section-44-3-configuring-lanz#ww1149292 + +```toml +[[inputs.lanz]] + servers = [ + "tcp://switch1.int.example.com:50001", + "tcp://switch2.int.example.com:50001", + ] +``` + +### Metrics + +For more details on the metrics see https://github.com/aristanetworks/goarista/blob/master/lanz/proto/lanz.proto + +- lanz_congestion_record: + - tags: + - intf_name + - switch_id + - port_id + - entry_type + - traffic_class + - fabric_peer_intf_name + - source + - port + - fields: + - timestamp (integer) + - queue_size (integer) + - time_of_max_qlen (integer) + - tx_latency (integer) + - q_drop_count (integer) + +- lanz_global_buffer_usage_record + - tags: + - entry_type + - source + - port + - fields: + - timestamp (integer) + - buffer_size (integer) + - duration (integer) + + + +### Sample Queries + +Get the max tx_latency for the last hour for all interfaces on all switches. +``` +SELECT max("tx_latency") AS "max_tx_latency" FROM "congestion_record" WHERE time > now() - 1h GROUP BY time(10s), "hostname", "intf_name" +``` + +Get the max tx_latency for the last hour for all interfaces on all switches. +``` +SELECT max("queue_size") AS "max_queue_size" FROM "congestion_record" WHERE time > now() - 1h GROUP BY time(10s), "hostname", "intf_name" +``` + +Get the max buffer_size for over the last hour for all switches. +``` +SELECT max("buffer_size") AS "max_buffer_size" FROM "global_buffer_usage_record" WHERE time > now() - 1h GROUP BY time(10s), "hostname" +``` + +### Example output +``` +lanz_global_buffer_usage_record,entry_type=2,host=telegraf.int.example.com,port=50001,source=switch01.int.example.com timestamp=158334105824919i,buffer_size=505i,duration=0i 1583341058300643815 +lanz_congestion_record,entry_type=2,host=telegraf.int.example.com,intf_name=Ethernet36,port=50001,port_id=61,source=switch01.int.example.com,switch_id=0,traffic_class=1 time_of_max_qlen=0i,tx_latency=564480i,q_drop_count=0i,timestamp=158334105824919i,queue_size=225i 1583341058300636045 +lanz_global_buffer_usage_record,entry_type=2,host=telegraf.int.example.com,port=50001,source=switch01.int.example.com timestamp=158334105824919i,buffer_size=589i,duration=0i 1583341058300457464 +lanz_congestion_record,entry_type=1,host=telegraf.int.example.com,intf_name=Ethernet36,port=50001,port_id=61,source=switch01.int.example.com,switch_id=0,traffic_class=1 q_drop_count=0i,timestamp=158334105824919i,queue_size=232i,time_of_max_qlen=0i,tx_latency=584640i 1583341058300450302 +``` + + diff --git a/plugins/inputs/lanz/lanz.go b/plugins/inputs/lanz/lanz.go new file mode 100644 index 000000000..7553c33c7 --- /dev/null +++ b/plugins/inputs/lanz/lanz.go @@ -0,0 +1,137 @@ +package lanz + +import ( + "net/url" + "strconv" + "sync" + "time" + + "github.com/aristanetworks/goarista/lanz" + pb "github.com/aristanetworks/goarista/lanz/proto" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" +) + +var sampleConfig = ` + ## URL to Arista LANZ endpoint + servers = [ + "tcp://127.0.0.1:50001" + ] +` + +func init() { + inputs.Add("lanz", func() telegraf.Input { + return NewLanz() + }) +} + +type Lanz struct { + Servers []string `toml:"servers"` + clients []lanz.Client + wg sync.WaitGroup +} + +func NewLanz() *Lanz { + return &Lanz{} +} + +func (l *Lanz) SampleConfig() string { + return sampleConfig +} + +func (l *Lanz) Description() string { + return "Read metrics off Arista LANZ, via socket" +} + +func (l *Lanz) Gather(acc telegraf.Accumulator) error { + return nil +} + +func (l *Lanz) Start(acc telegraf.Accumulator) error { + + if len(l.Servers) == 0 { + l.Servers = append(l.Servers, "tcp://127.0.0.1:50001") + } + + for _, server := range l.Servers { + deviceUrl, err := url.Parse(server) + if err != nil { + return err + } + client := lanz.New( + lanz.WithAddr(deviceUrl.Host), + lanz.WithBackoff(1*time.Second), + lanz.WithTimeout(10*time.Second), + ) + l.clients = append(l.clients, client) + + in := make(chan *pb.LanzRecord) + go func() { + client.Run(in) + }() + l.wg.Add(1) + go func() { + l.wg.Done() + receive(acc, in, deviceUrl) + }() + } + return nil +} + +func (l *Lanz) Stop() { + for _, client := range l.clients { + client.Stop() + } + l.wg.Wait() +} + +func receive(acc telegraf.Accumulator, in <-chan *pb.LanzRecord, deviceUrl *url.URL) { + for { + select { + case msg, ok := <-in: + if !ok { + return + } + msgToAccumulator(acc, msg, deviceUrl) + } + } +} + +func msgToAccumulator(acc telegraf.Accumulator, msg *pb.LanzRecord, deviceUrl *url.URL) { + cr := msg.GetCongestionRecord() + if cr != nil { + vals := map[string]interface{}{ + "timestamp": int64(cr.GetTimestamp()), + "queue_size": int64(cr.GetQueueSize()), + "time_of_max_qlen": int64(cr.GetTimeOfMaxQLen()), + "tx_latency": int64(cr.GetTxLatency()), + "q_drop_count": int64(cr.GetQDropCount()), + } + tags := map[string]string{ + "intf_name": cr.GetIntfName(), + "switch_id": strconv.FormatInt(int64(cr.GetSwitchId()), 10), + "port_id": strconv.FormatInt(int64(cr.GetPortId()), 10), + "entry_type": strconv.FormatInt(int64(cr.GetEntryType()), 10), + "traffic_class": strconv.FormatInt(int64(cr.GetTrafficClass()), 10), + "fabric_peer_intf_name": cr.GetFabricPeerIntfName(), + "source": deviceUrl.Hostname(), + "port": deviceUrl.Port(), + } + acc.AddFields("lanz_congestion_record", vals, tags) + } + + gbur := msg.GetGlobalBufferUsageRecord() + if gbur != nil { + vals := map[string]interface{}{ + "timestamp": int64(gbur.GetTimestamp()), + "buffer_size": int64(gbur.GetBufferSize()), + "duration": int64(gbur.GetDuration()), + } + tags := map[string]string{ + "entry_type": strconv.FormatInt(int64(gbur.GetEntryType()), 10), + "source": deviceUrl.Hostname(), + "port": deviceUrl.Port(), + } + acc.AddFields("lanz_global_buffer_usage_record", vals, tags) + } +} diff --git a/plugins/inputs/lanz/lanz_test.go b/plugins/inputs/lanz/lanz_test.go new file mode 100644 index 000000000..5f9c7ab24 --- /dev/null +++ b/plugins/inputs/lanz/lanz_test.go @@ -0,0 +1,137 @@ +package lanz + +import ( + "net/url" + "strconv" + "testing" + + pb "github.com/aristanetworks/goarista/lanz/proto" + "github.com/golang/protobuf/proto" + "github.com/influxdata/telegraf/testutil" +) + +var testProtoBufCongestionRecord1 = &pb.LanzRecord{ + CongestionRecord: &pb.CongestionRecord{ + Timestamp: proto.Uint64(100000000000000), + IntfName: proto.String("eth1"), + SwitchId: proto.Uint32(1), + PortId: proto.Uint32(1), + QueueSize: proto.Uint32(1), + EntryType: pb.CongestionRecord_EntryType.Enum(1), + TrafficClass: proto.Uint32(1), + TimeOfMaxQLen: proto.Uint64(100000000000000), + TxLatency: proto.Uint32(100), + QDropCount: proto.Uint32(1), + FabricPeerIntfName: proto.String("FabricPeerIntfName1"), + }, +} +var testProtoBufCongestionRecord2 = &pb.LanzRecord{ + CongestionRecord: &pb.CongestionRecord{ + Timestamp: proto.Uint64(200000000000000), + IntfName: proto.String("eth2"), + SwitchId: proto.Uint32(2), + PortId: proto.Uint32(2), + QueueSize: proto.Uint32(2), + EntryType: pb.CongestionRecord_EntryType.Enum(2), + TrafficClass: proto.Uint32(2), + TimeOfMaxQLen: proto.Uint64(200000000000000), + TxLatency: proto.Uint32(200), + QDropCount: proto.Uint32(2), + FabricPeerIntfName: proto.String("FabricPeerIntfName2"), + }, +} + +var testProtoBufGlobalBufferUsageRecord = &pb.LanzRecord{ + GlobalBufferUsageRecord: &pb.GlobalBufferUsageRecord{ + EntryType: pb.GlobalBufferUsageRecord_EntryType.Enum(1), + Timestamp: proto.Uint64(100000000000000), + BufferSize: proto.Uint32(1), + Duration: proto.Uint32(10), + }, +} + +func TestLanzGeneratesMetrics(t *testing.T) { + + var acc testutil.Accumulator + + l := NewLanz() + + l.Servers = append(l.Servers, "tcp://switch01.int.example.com:50001") + l.Servers = append(l.Servers, "tcp://switch02.int.example.com:50001") + deviceUrl1, err := url.Parse(l.Servers[0]) + if err != nil { + t.Fail() + } + deviceUrl2, err := url.Parse(l.Servers[1]) + if err != nil { + t.Fail() + } + + msgToAccumulator(&acc, testProtoBufCongestionRecord1, deviceUrl1) + acc.Wait(1) + + vals1 := map[string]interface{}{ + "timestamp": int64(100000000000000), + "queue_size": int64(1), + "time_of_max_qlen": int64(100000000000000), + "tx_latency": int64(100), + "q_drop_count": int64(1), + } + tags1 := map[string]string{ + "intf_name": "eth1", + "switch_id": strconv.FormatInt(int64(1), 10), + "port_id": strconv.FormatInt(int64(1), 10), + "entry_type": strconv.FormatInt(int64(1), 10), + "traffic_class": strconv.FormatInt(int64(1), 10), + "fabric_peer_intf_name": "FabricPeerIntfName1", + "source": "switch01.int.example.com", + "port": "50001", + } + + acc.AssertContainsFields(t, "lanz_congestion_record", vals1) + acc.AssertContainsTaggedFields(t, "lanz_congestion_record", vals1, tags1) + + acc.ClearMetrics() + msgToAccumulator(&acc, testProtoBufCongestionRecord2, deviceUrl2) + acc.Wait(1) + + vals2 := map[string]interface{}{ + "timestamp": int64(200000000000000), + "queue_size": int64(2), + "time_of_max_qlen": int64(200000000000000), + "tx_latency": int64(200), + "q_drop_count": int64(2), + } + tags2 := map[string]string{ + "intf_name": "eth2", + "switch_id": strconv.FormatInt(int64(2), 10), + "port_id": strconv.FormatInt(int64(2), 10), + "entry_type": strconv.FormatInt(int64(2), 10), + "traffic_class": strconv.FormatInt(int64(2), 10), + "fabric_peer_intf_name": "FabricPeerIntfName2", + "source": "switch02.int.example.com", + "port": "50001", + } + + acc.AssertContainsFields(t, "lanz_congestion_record", vals2) + acc.AssertContainsTaggedFields(t, "lanz_congestion_record", vals2, tags2) + + acc.ClearMetrics() + msgToAccumulator(&acc, testProtoBufGlobalBufferUsageRecord, deviceUrl1) + acc.Wait(1) + + gburVals1 := map[string]interface{}{ + "timestamp": int64(100000000000000), + "buffer_size": int64(1), + "duration": int64(10), + } + gburTags1 := map[string]string{ + "entry_type": strconv.FormatInt(int64(1), 10), + "source": "switch01.int.example.com", + "port": "50001", + } + + acc.AssertContainsFields(t, "lanz_global_buffer_usage_record", gburVals1) + acc.AssertContainsTaggedFields(t, "lanz_global_buffer_usage_record", gburVals1, gburTags1) + +} From d5df7666d886d0a9d38410f0a46442bd6fed47a3 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 12 Mar 2020 17:01:07 -0700 Subject: [PATCH 1610/1815] Update changelog --- CHANGELOG.md | 1 + docs/LICENSE_OF_DEPENDENCIES.md | 3 +++ plugins/inputs/lanz/README.md | 2 +- 3 files changed, 5 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index d9b502a4d..4f6a6cb4c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,6 +14,7 @@ - [clickhouse](/plugins/inputs/clickhouse/README.md) - Contributed by @kshvakov - [execd](/plugins/inputs/execd/README.md) - Contributed by @jgraichen - [infiniband](/plugins/inputs/infiniband/README.md) - Contributed by @willfurnell +- [lanz](/plugins/inputs/lanz/README.md): Contributed by @timhughes - [modbus](/plugins/inputs/modbus/README.md) - Contributed by @garciaolais - [monit](/plugins/inputs/monit/README.md) - Contributed by @SirishaGopigiri - [wireguard](/plugins/inputs/wireguard/README.md) - Contributed by @LINKIWI diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index b82eb9a17..5dced88b2 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -18,6 +18,8 @@ following works: - github.com/alecthomas/units [MIT License](https://github.com/alecthomas/units/blob/master/COPYING) - github.com/amir/raidman [The Unlicense](https://github.com/amir/raidman/blob/master/UNLICENSE) - github.com/apache/thrift [Apache License 2.0](https://github.com/apache/thrift/blob/master/LICENSE) +- github.com/aristanetworks/glog [Apache License 2.0](https://github.com/aristanetworks/glog/blob/master/LICENSE) +- github.com/aristanetworks/goarista [Apache License 2.0](https://github.com/aristanetworks/goarista/blob/master/COPYING) - github.com/aws/aws-sdk-go [Apache License 2.0](https://github.com/aws/aws-sdk-go/blob/master/LICENSE.txt) - github.com/beorn7/perks [MIT License](https://github.com/beorn7/perks/blob/master/LICENSE) - github.com/caio/go-tdigest [MIT License](https://github.com/caio/go-tdigest/blob/master/LICENSE) @@ -135,6 +137,7 @@ following works: - golang.org/x/sync [BSD 3-Clause "New" or "Revised" License](https://github.com/golang/sync/blob/master/LICENSE) - golang.org/x/sys [BSD 3-Clause Clear License](https://github.com/golang/sys/blob/master/LICENSE) - golang.org/x/text [BSD 3-Clause Clear License](https://github.com/golang/text/blob/master/LICENSE) +- golang.org/x/time [BSD 3-Clause Clear License](https://github.com/golang/time/blob/master/LICENSE) - golang.zx2c4.com/wireguard [MIT License](https://github.com/WireGuard/wgctrl-go/blob/master/LICENSE.md) - golang.zx2c4.com/wireguard/wgctrl [MIT License](https://github.com/WireGuard/wgctrl-go/blob/master/LICENSE.md) - google.golang.org/api [BSD 3-Clause "New" or "Revised" License](https://github.com/googleapis/google-api-go-client/blob/master/LICENSE) diff --git a/plugins/inputs/lanz/README.md b/plugins/inputs/lanz/README.md index 95fe02a9b..32033d6ab 100644 --- a/plugins/inputs/lanz/README.md +++ b/plugins/inputs/lanz/README.md @@ -47,7 +47,7 @@ For more details on the metrics see https://github.com/aristanetworks/goarista/b - tx_latency (integer) - q_drop_count (integer) -- lanz_global_buffer_usage_record ++ lanz_global_buffer_usage_record - tags: - entry_type - source From d8b66b69d5e68f2d7cabeb9630f4a1528aa80789 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 13 Mar 2020 14:30:27 -0700 Subject: [PATCH 1611/1815] Mention metric filtering in csv parser docs --- plugins/parsers/csv/README.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/plugins/parsers/csv/README.md b/plugins/parsers/csv/README.md index 1881248ee..bd5024a1a 100644 --- a/plugins/parsers/csv/README.md +++ b/plugins/parsers/csv/README.md @@ -87,6 +87,9 @@ on how to set the time format. One metric is created for each row with the columns added as fields. The type of the field is automatically determined based on the contents of the value. +In addition to the options above, you can use [metric filtering][] to skip over +columns and rows. + ### Examples Config: @@ -109,3 +112,5 @@ Output: ``` cpu cpu=cpu0,time_user=42,time_system=42,time_idle=42 1536869008000000000 ``` + +[metric filtering]: /docs/CONFIGURATION.md#metric-filtering From da9f19ca9d718ed31fe6568dd08d942c2c924c13 Mon Sep 17 00:00:00 2001 From: M0rdecay <50422107+M0rdecay@users.noreply.github.com> Date: Sat, 14 Mar 2020 01:04:23 +0300 Subject: [PATCH 1612/1815] Allow using name_* modificators for output plugins (#7174) --- docs/CONFIGURATION.md | 3 ++ internal/config/config.go | 27 +++++++++++++ internal/models/running_output.go | 16 ++++++++ internal/models/running_output_test.go | 54 ++++++++++++++++++++++++++ 4 files changed, 100 insertions(+) diff --git a/docs/CONFIGURATION.md b/docs/CONFIGURATION.md index 428ffeab4..0acbefb48 100644 --- a/docs/CONFIGURATION.md +++ b/docs/CONFIGURATION.md @@ -269,6 +269,9 @@ Parameters that can be used with any output plugin: - **metric_buffer_limit**: The maximum number of unsent metrics to buffer. Use this setting to override the agent `metric_buffer_limit` on a per plugin basis. +- **name_override**: Override the original name of the measurement. +- **name_prefix**: Specifies a prefix to attach to the measurement name. +- **name_suffix**: Specifies a suffix to attach to the measurement name. The [metric filtering][] parameters can be used to limit what metrics are emitted from the output plugin. diff --git a/internal/config/config.go b/internal/config/config.go index 6e05ce45b..f72f1ef26 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -2138,11 +2138,38 @@ func buildOutput(name string, tbl *ast.Table) (*models.OutputConfig, error) { } } + if node, ok := tbl.Fields["name_override"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if str, ok := kv.Value.(*ast.String); ok { + oc.NameOverride = str.Value + } + } + } + + if node, ok := tbl.Fields["name_suffix"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if str, ok := kv.Value.(*ast.String); ok { + oc.NameSuffix = str.Value + } + } + } + + if node, ok := tbl.Fields["name_prefix"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if str, ok := kv.Value.(*ast.String); ok { + oc.NamePrefix = str.Value + } + } + } + delete(tbl.Fields, "flush_interval") delete(tbl.Fields, "flush_jitter") delete(tbl.Fields, "metric_buffer_limit") delete(tbl.Fields, "metric_batch_size") delete(tbl.Fields, "alias") + delete(tbl.Fields, "name_override") + delete(tbl.Fields, "name_suffix") + delete(tbl.Fields, "name_prefix") return oc, nil } diff --git a/internal/models/running_output.go b/internal/models/running_output.go index 13f2a94d6..256c18715 100644 --- a/internal/models/running_output.go +++ b/internal/models/running_output.go @@ -27,6 +27,10 @@ type OutputConfig struct { FlushJitter *time.Duration MetricBufferLimit int MetricBatchSize int + + NameOverride string + NamePrefix string + NameSuffix string } // RunningOutput contains the output configuration @@ -148,6 +152,18 @@ func (ro *RunningOutput) AddMetric(metric telegraf.Metric) { return } + if len(ro.Config.NameOverride) > 0 { + metric.SetName(ro.Config.NameOverride) + } + + if len(ro.Config.NamePrefix) > 0 { + metric.AddPrefix(ro.Config.NamePrefix) + } + + if len(ro.Config.NameSuffix) > 0 { + metric.AddSuffix(ro.Config.NameSuffix) + } + dropped := ro.buffer.Add(metric) atomic.AddInt64(&ro.droppedMetrics, int64(dropped)) diff --git a/internal/models/running_output_test.go b/internal/models/running_output_test.go index 5909ec158..89cd3beec 100644 --- a/internal/models/running_output_test.go +++ b/internal/models/running_output_test.go @@ -218,6 +218,60 @@ func TestRunningOutput_TagIncludeMatch(t *testing.T) { assert.Len(t, m.Metrics()[0].Tags(), 1) } +// Test that measurement name overriding correctly +func TestRunningOutput_NameOverride(t *testing.T) { + conf := &OutputConfig{ + NameOverride: "new_metric_name", + } + + m := &mockOutput{} + ro := NewRunningOutput("test", m, conf, 1000, 10000) + + ro.AddMetric(testutil.TestMetric(101, "metric1")) + assert.Len(t, m.Metrics(), 0) + + err := ro.Write() + assert.NoError(t, err) + assert.Len(t, m.Metrics(), 1) + assert.Equal(t, "new_metric_name", m.Metrics()[0].Name()) +} + +// Test that measurement name prefix is added correctly +func TestRunningOutput_NamePrefix(t *testing.T) { + conf := &OutputConfig{ + NamePrefix: "prefix_", + } + + m := &mockOutput{} + ro := NewRunningOutput("test", m, conf, 1000, 10000) + + ro.AddMetric(testutil.TestMetric(101, "metric1")) + assert.Len(t, m.Metrics(), 0) + + err := ro.Write() + assert.NoError(t, err) + assert.Len(t, m.Metrics(), 1) + assert.Equal(t, "prefix_metric1", m.Metrics()[0].Name()) +} + +// Test that measurement name suffix is added correctly +func TestRunningOutput_NameSuffix(t *testing.T) { + conf := &OutputConfig{ + NameSuffix: "_suffix", + } + + m := &mockOutput{} + ro := NewRunningOutput("test", m, conf, 1000, 10000) + + ro.AddMetric(testutil.TestMetric(101, "metric1")) + assert.Len(t, m.Metrics(), 0) + + err := ro.Write() + assert.NoError(t, err) + assert.Len(t, m.Metrics(), 1) + assert.Equal(t, "metric1_suffix", m.Metrics()[0].Name()) +} + // Test that we can write metrics with simple default setup. func TestRunningOutputDefault(t *testing.T) { conf := &OutputConfig{ From 2a465cc6872984865381c7c19165e44cba6d5399 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 16 Mar 2020 10:02:49 -0700 Subject: [PATCH 1613/1815] Use merge=union driver for go.sum --- .gitattributes | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitattributes b/.gitattributes index 276cc7709..21bc439bf 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1,4 +1,5 @@ CHANGELOG.md merge=union README.md merge=union +go.sum merge=union plugins/inputs/all/all.go merge=union plugins/outputs/all/all.go merge=union From f69b639aa94f92b044301f22c386468ab751fbc0 Mon Sep 17 00:00:00 2001 From: R290 <46033588+R290@users.noreply.github.com> Date: Mon, 16 Mar 2020 18:54:21 +0100 Subject: [PATCH 1614/1815] Add Azure IoT Hub / Event Hub input plugin (#6928) --- README.md | 1 + docs/LICENSE_OF_DEPENDENCIES.md | 6 + go.mod | 2 +- go.sum | 43 ++- plugins/inputs/all/all.go | 1 + plugins/inputs/eventhub/README.md | 98 +++++++ plugins/inputs/eventhub/eventhub.go | 422 ++++++++++++++++++++++++++++ 7 files changed, 558 insertions(+), 15 deletions(-) create mode 100644 plugins/inputs/eventhub/README.md create mode 100644 plugins/inputs/eventhub/eventhub.go diff --git a/README.md b/README.md index 787ade0af..ee4744755 100644 --- a/README.md +++ b/README.md @@ -185,6 +185,7 @@ For documentation on the latest development code see the [documentation index][d * [aws ecs](./plugins/inputs/ecs) (Amazon Elastic Container Service, Fargate) * [elasticsearch](./plugins/inputs/elasticsearch) * [ethtool](./plugins/inputs/ethtool) +* [eventhub](./plugins/inputs/eventhub) (Azure Event Hubs \& Azure IoT Hub) * [exec](./plugins/inputs/exec) (generic executable plugin, support JSON, influx, graphite and nagios) * [execd](./plugins/inputs/execd) * [fail2ban](./plugins/inputs/fail2ban) diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index 5dced88b2..c0ce7aff0 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -6,8 +6,12 @@ following works: - cloud.google.com/go [Apache License 2.0](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/master/LICENSE) - code.cloudfoundry.org/clock [Apache License 2.0](https://github.com/cloudfoundry/clock/blob/master/LICENSE) - collectd.org [MIT License](https://git.octo.it/?p=collectd.git;a=blob;f=COPYING;hb=HEAD) +- github.com/Azure/azure-amqp-common-go [MIT License](https://github.com/Azure/azure-amqp-common-go/blob/master/LICENSE) +- github.com/Azure/azure-event-hubs-go [MIT License](https://github.com/Azure/azure-event-hubs-go/blob/master/LICENSE) - github.com/Azure/azure-pipeline-go [MIT License](https://github.com/Azure/azure-pipeline-go/blob/master/LICENSE) +- github.com/Azure/azure-sdk-for-go [Apache License 2.0](https://github.com/Azure/azure-sdk-for-go/blob/master/LICENSE) - github.com/Azure/azure-storage-queue-go [MIT License](https://github.com/Azure/azure-storage-queue-go/blob/master/LICENSE) +- github.com/Azure/go-amqp [MIT License](https://github.com/Azure/go-amqp/blob/master/LICENSE) - github.com/Azure/go-autorest [Apache License 2.0](https://github.com/Azure/go-autorest/blob/master/LICENSE) - github.com/Mellanox/rdmamap [Apache License 2.0](https://github.com/Mellanox/rdmamap/blob/master/LICENSE) - github.com/Microsoft/ApplicationInsights-Go [MIT License](https://github.com/Microsoft/ApplicationInsights-Go/blob/master/LICENSE) @@ -30,6 +34,7 @@ following works: - github.com/couchbase/goutils [COUCHBASE INC. COMMUNITY EDITION LICENSE](https://github.com/couchbase/goutils/blob/master/LICENSE.md) - github.com/davecgh/go-spew [ISC License](https://github.com/davecgh/go-spew/blob/master/LICENSE) - github.com/denisenkom/go-mssqldb [BSD 3-Clause "New" or "Revised" License](https://github.com/denisenkom/go-mssqldb/blob/master/LICENSE.txt) +- github.com/devigned/tab [MIT License](https://github.com/devigned/tab/blob/master/LICENSE) - github.com/dgrijalva/jwt-go [MIT License](https://github.com/dgrijalva/jwt-go/blob/master/LICENSE) - github.com/dimchansky/utfbom [Apache License 2.0](https://github.com/dimchansky/utfbom/blob/master/LICENSE) - github.com/docker/distribution [Apache License 2.0](https://github.com/docker/distribution/blob/master/LICENSE) @@ -76,6 +81,7 @@ following works: - github.com/jackc/pgx [MIT License](https://github.com/jackc/pgx/blob/master/LICENSE) - github.com/jcmturner/gofork [BSD 3-Clause "New" or "Revised" License](https://github.com/jcmturner/gofork/blob/master/LICENSE) - github.com/jmespath/go-jmespath [Apache License 2.0](https://github.com/jmespath/go-jmespath/blob/master/LICENSE) +- github.com/jpillora/backoff [MIT License](https://github.com/jpillora/backoff/blob/master/LICENSE) - github.com/kardianos/service [zlib License](https://github.com/kardianos/service/blob/master/LICENSE) - github.com/karrick/godirwalk [BSD 2-Clause "Simplified" License](https://github.com/karrick/godirwalk/blob/master/LICENSE) - github.com/kballard/go-shellquote [MIT License](https://github.com/kballard/go-shellquote/blob/master/LICENSE) diff --git a/go.mod b/go.mod index 062e70f50..6c6cb6cf4 100644 --- a/go.mod +++ b/go.mod @@ -6,6 +6,7 @@ require ( cloud.google.com/go v0.37.4 code.cloudfoundry.org/clock v1.0.0 // indirect collectd.org v0.3.0 + github.com/Azure/azure-event-hubs-go/v3 v3.2.0 github.com/Azure/azure-storage-queue-go v0.0.0-20181215014128-6ed74e755687 github.com/Azure/go-autorest/autorest v0.9.3 github.com/Azure/go-autorest/autorest/azure/auth v0.4.2 @@ -86,7 +87,6 @@ require ( github.com/mdlayher/apcupsd v0.0.0-20190314144147-eb3dd99a75fe github.com/miekg/dns v1.0.14 github.com/mitchellh/go-testing-interface v1.0.0 // indirect - github.com/mitchellh/mapstructure v0.0.0-20180715050151-f15292f7a699 // indirect github.com/multiplay/go-ts3 v1.0.0 github.com/naoina/go-stringutil v0.1.0 // indirect github.com/nats-io/nats-server/v2 v2.1.4 diff --git a/go.sum b/go.sum index a0d8fd479..78ef01e23 100644 --- a/go.sum +++ b/go.sum @@ -6,10 +6,20 @@ code.cloudfoundry.org/clock v1.0.0 h1:kFXWQM4bxYvdBw2X8BbBeXwQNgfoWv1vqAk2ZZyBN2 code.cloudfoundry.org/clock v1.0.0/go.mod h1:QD9Lzhd/ux6eNQVUDVRJX/RKTigpewimNYBi7ivZKY8= collectd.org v0.3.0 h1:iNBHGw1VvPJxH2B6RiFWFZ+vsjo1lCdRszBeOuwGi00= collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE= -github.com/Azure/azure-pipeline-go v0.1.8 h1:KmVRa8oFMaargVesEuuEoiLCQ4zCCwQ8QX/xg++KS20= +github.com/Azure/azure-amqp-common-go/v3 v3.0.0 h1:j9tjcwhypb/jek3raNrwlCIl7iKQYOug7CLpSyBBodc= +github.com/Azure/azure-amqp-common-go/v3 v3.0.0/go.mod h1:SY08giD/XbhTz07tJdpw1SoxQXHPN30+DI3Z04SYqyg= +github.com/Azure/azure-event-hubs-go/v3 v3.2.0 h1:CQlxKH5a4NX1ZmbdqXUPRwuNGh2XvtgmhkZvkEuWzhs= +github.com/Azure/azure-event-hubs-go/v3 v3.2.0/go.mod h1:BPIIJNH/l/fVHYq3Rm6eg4clbrULrQ3q7+icmqHyyLc= github.com/Azure/azure-pipeline-go v0.1.8/go.mod h1:XA1kFWRVhSK+KNFiOhfv83Fv8L9achrP7OxIzeTn1Yg= +github.com/Azure/azure-pipeline-go v0.1.9 h1:u7JFb9fFTE6Y/j8ae2VK33ePrRqJqoCM/IWkQdAZ+rg= +github.com/Azure/azure-pipeline-go v0.1.9/go.mod h1:XA1kFWRVhSK+KNFiOhfv83Fv8L9achrP7OxIzeTn1Yg= +github.com/Azure/azure-sdk-for-go v37.1.0+incompatible h1:aFlw3lP7ZHQi4m1kWCpcwYtczhDkGhDoRaMTaxcOf68= +github.com/Azure/azure-sdk-for-go v37.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-storage-blob-go v0.6.0/go.mod h1:oGfmITT1V6x//CswqY2gtAHND+xIP64/qL7a5QJix0Y= github.com/Azure/azure-storage-queue-go v0.0.0-20181215014128-6ed74e755687 h1:7MiZ6Th+YTmwUdrKmFg5OMsGYz7IdQwjqL0RPxkhhOQ= github.com/Azure/azure-storage-queue-go v0.0.0-20181215014128-6ed74e755687/go.mod h1:K6am8mT+5iFXgingS9LUc7TmbsW6XBw3nxaRyaMyWc8= +github.com/Azure/go-amqp v0.12.6 h1:34yItuwhA/nusvq2sPSNPQxZLCf/CtaogYH8n578mnY= +github.com/Azure/go-amqp v0.12.6/go.mod h1:qApuH6OFTSKZFmCOxccvAv5rLizBQf4v8pRmG138DPo= github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= github.com/Azure/go-autorest/autorest v0.9.3 h1:OZEIaBbMdUE/Js+BQKlpO81XlISgipr6yDJ+PSwsgi4= github.com/Azure/go-autorest/autorest v0.9.3/go.mod h1:GsRuLYvwzLjjjRoWEIyMUaYq8GNUx2nRB378IPt/1p0= @@ -28,6 +38,10 @@ github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxB github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/mocks v0.3.0 h1:qJumjCaCudz+OcqE9/XtEPfvtOjOmKaui4EOpFI6zZc= github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= +github.com/Azure/go-autorest/autorest/to v0.3.0 h1:zebkZaadz7+wIQYgC7GXaz3Wb28yKYfVkkBKwc38VF8= +github.com/Azure/go-autorest/autorest/to v0.3.0/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA= +github.com/Azure/go-autorest/autorest/validation v0.2.0 h1:15vMO4y76dehZSq7pAaOLQxC6dZYsSrj2GQpflyM/L4= +github.com/Azure/go-autorest/autorest/validation v0.2.0/go.mod h1:3EEqHnBxQGHXRYq3HT1WyXAvT7LLY3tl70hw6tQIbjI= github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY= github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k= @@ -98,6 +112,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/denisenkom/go-mssqldb v0.0.0-20190707035753-2be1aa521ff4 h1:YcpmyvADGYw5LqMnHqSkyIELsHCGF6PkrmM31V8rF7o= github.com/denisenkom/go-mssqldb v0.0.0-20190707035753-2be1aa521ff4/go.mod h1:zAg7JM8CkOJ43xKXIj7eRO9kmWm/TW578qo+oDO6tuM= +github.com/devigned/tab v0.1.1 h1:3mD6Kb1mUOYeLpJvTVSDwSg5ZsfSxfvxGRTxRsJsITA= +github.com/devigned/tab v0.1.1/go.mod h1:XG9mPq0dFghrYvoBF3xdRrJzSTX1b7IQrvaL9mzjeJY= github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dimchansky/utfbom v1.1.0 h1:FcM3g+nofKgUteL8dm/UpdRXNC9KmADgTpLKsu0TRo4= @@ -165,7 +181,6 @@ github.com/gofrs/uuid v2.1.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRx github.com/gofrs/uuid v3.2.0+incompatible h1:y12jRkkFxsd7GpqdSZ+/KCs/fJbqpEXSGd4+jfEaewE= github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.0 h1:xU6/SpYbvkNYiptHJYEDRseDLvYE7wSqhYYNy0QSUzI= github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d h1:3PaI8p3seN09VjbTYC/QWlUZdZ1qS1zGjy7LH2Wt07I= github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= @@ -180,7 +195,6 @@ github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb github.com/golang/mock v1.3.1-0.20190508161146-9fa652df1129 h1:tT8iWCYw4uOem71yYA3htfH+LNopJvcqZQshm56G5L4= github.com/golang/mock v1.3.1-0.20190508161146-9fa652df1129/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -190,10 +204,13 @@ github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEW github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c h1:964Od4U6p2jUkFxvCydnIczKteheJEzHRToSGK3Bnlw= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= +github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-github v17.0.0+incompatible h1:N0LgJ1j65A7kfXrZnUDaYCs/Sf4rEjNlfyDHW9dolSY= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= @@ -240,7 +257,6 @@ github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerX github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= @@ -267,6 +283,10 @@ github.com/jcmturner/gofork v1.0.0 h1:J7uCkflzTEhUZ64xqKnkDxq3kzc96ajM1Gli5ktUem github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc= +github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= +github.com/jpillora/backoff v0.0.0-20180909062703-3050d21c67d7 h1:K//n/AqR5HjG3qxbrBCL4vJPW0MVFSs9CPK1OOJdRME= +github.com/jpillora/backoff v0.0.0-20180909062703-3050d21c67d7/go.mod h1:2iMrUgbbvHEiQClaW2NsSzMyGHqN+rDFqY705q49KG0= github.com/jsimonetti/rtnetlink v0.0.0-20190606172950-9527aa82566a/go.mod h1:Oz+70psSo5OFh8DBl0Zv2ACw7Esh6pPUphlvZG9x7uw= github.com/jsimonetti/rtnetlink v0.0.0-20200117123717-f846d4f6c1f4 h1:nwOc1YaOrYJ37sEBrtWZrdqzK22hiJs3GpDmP3sR2Yw= github.com/jsimonetti/rtnetlink v0.0.0-20200117123717-f846d4f6c1f4/go.mod h1:WGuG/smIU4J/54PblvSbh+xvCZmpJnFgr3ds6Z55XMQ= @@ -326,8 +346,8 @@ github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/mapstructure v0.0.0-20180715050151-f15292f7a699 h1:KXZJFdun9knAVAR8tg/aHJEr5DgtcbqyvzacK+CDCaI= -github.com/mitchellh/mapstructure v0.0.0-20180715050151-f15292f7a699/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= @@ -356,12 +376,10 @@ github.com/nsqio/go-nsq v1.0.7 h1:O0pIZJYTf+x7cZBA0UMY8WxFG79lYTURmWzAAh48ljY= github.com/nsqio/go-nsq v1.0.7/go.mod h1:XP5zaUs3pqf+Q71EqUJs3HYfBIqfK6G83WQMdNN+Ito= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.10.1 h1:q/mM8GF/n0shIN8SaAZ0V+jnLPzen6WIVZdiwrRlMlo= github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= -github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= @@ -384,7 +402,6 @@ github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144T github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4 v2.2.6+incompatible h1:6aCX4/YZ9v8q69hTyiR7dNLnTA3fgtKHVVW5BCd5Znw= github.com/pierrec/lz4 v2.2.6+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -430,7 +447,6 @@ github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -497,7 +513,7 @@ golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7 h1:rTIdg5QFRR7XCaK4LCjBiPbx8j4DQRpdYMnGn/bJUEU= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191003171128-d98b1b443823/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -538,7 +554,6 @@ golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4 h1:sfkvUWPNGwSV+8/fNqctR5lS2 golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2 h1:z99zHgr7hKfrUcX/KsoJk5FJfjTceCKIp96+biqP4To= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= @@ -556,6 +571,7 @@ golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3 golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.zx2c4.com/wireguard v0.0.20200121 h1:vcswa5Q6f+sylDfjqyrVNNrjsFUUbPsgAQTBCAg/Qf8= golang.zx2c4.com/wireguard v0.0.20200121/go.mod h1:P2HsVp8SKwZEufsnezXZA4GRX/T49/HlU7DGuelXsU4= golang.zx2c4.com/wireguard/wgctrl v0.0.0-20200205215550-e35592f146e4 h1:KTi97NIQGgSMaN0v/oxniJV0MEzfzmrDUOAWxombQVc= @@ -613,7 +629,6 @@ gopkg.in/olivere/elastic.v5 v5.0.70/go.mod h1:FylZT6jQWtfHsicejzOm3jIMVPOAksa80i gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index 6624053df..45d3f8d17 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -40,6 +40,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/ecs" _ "github.com/influxdata/telegraf/plugins/inputs/elasticsearch" _ "github.com/influxdata/telegraf/plugins/inputs/ethtool" + _ "github.com/influxdata/telegraf/plugins/inputs/eventhub" _ "github.com/influxdata/telegraf/plugins/inputs/exec" _ "github.com/influxdata/telegraf/plugins/inputs/execd" _ "github.com/influxdata/telegraf/plugins/inputs/fail2ban" diff --git a/plugins/inputs/eventhub/README.md b/plugins/inputs/eventhub/README.md new file mode 100644 index 000000000..f9b01d4b3 --- /dev/null +++ b/plugins/inputs/eventhub/README.md @@ -0,0 +1,98 @@ +# Azure Event Hubs input plugin + +This plugin provides a consumer for use with Azure Event Hubs and Azure IoT Hub. The implementation is in essence a wrapper for [Microsoft Azure Event Hubs Client for Golang](https://github.com/Azure/azure-event-hubs-go). + +## Configuration + +```toml +[[inputs.eventhub]] + ## The default behavior is to create a new Event Hub client from environment variables. + ## This requires one of the following sets of environment variables to be set: + ## + ## 1) Expected Environment Variables: + ## - "EVENTHUB_NAMESPACE" + ## - "EVENTHUB_NAME" + ## - "EVENTHUB_CONNECTION_STRING" + ## + ## 2) Expected Environment Variables: + ## - "EVENTHUB_NAMESPACE" + ## - "EVENTHUB_NAME" + ## - "EVENTHUB_KEY_NAME" + ## - "EVENTHUB_KEY_VALUE" + + ## Uncommenting the option below will create an Event Hub client based solely on the connection string. + ## This can either be the associated environment variable or hard coded directly. + # connection_string = "$EVENTHUB_CONNECTION_STRING" + + ## Set persistence directory to a valid folder to use a file persister instead of an in-memory persister + # persistence_dir = "" + + ## Change the default consumer group + # consumer_group = "" + + ## By default the event hub receives all messages present on the broker, alternative modes can be set below. + ## The timestamp should be in https://github.com/toml-lang/toml#offset-date-time format (RFC 3339). + ## The 3 options below only apply if no valid persister is read from memory or file (e.g. first run). + # from_timestamp = + # latest = true + + ## Set a custom prefetch count for the receiver(s) + # prefetch_count = 1000 + + ## Add an epoch to the receiver(s) + # epoch = 0 + + ## Change to set a custom user agent, "telegraf" is used by default + # user_agent = "telegraf" + + ## To consume from a specific partition, set the partition_ids option. + ## An empty array will result in receiving from all partitions. + # partition_ids = ["0","1"] + + ## Max undelivered messages + # max_undelivered_messages = 1000 + + ## Set either option below to true to use a system property as timestamp. + ## You have the choice between EnqueuedTime and IoTHubEnqueuedTime. + ## It is recommended to use this setting when the data itself has no timestamp. + # enqueued_time_as_ts = true + # iot_hub_enqueued_time_as_ts = true + + ## Tags or fields to create from keys present in the application property bag. + ## These could for example be set by message enrichments in Azure IoT Hub. + application_property_tags = [] + application_property_fields = [] + + ## Tag or field name to use for metadata + ## By default all metadata is disabled + # sequence_number_field = "SequenceNumber" + # enqueued_time_field = "EnqueuedTime" + # offset_field = "Offset" + # partition_id_tag = "PartitionID" + # partition_key_tag = "PartitionKey" + # iot_hub_device_connection_id_tag = "IoTHubDeviceConnectionID" + # iot_hub_auth_generation_id_tag = "IoTHubAuthGenerationID" + # iot_hub_connection_auth_method_tag = "IoTHubConnectionAuthMethod" + # iot_hub_connection_module_id_tag = "IoTHubConnectionModuleID" + # iot_hub_enqueued_time_field = "IoTHubEnqueuedTime" + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "influx" +``` +## Testing + +The main focus for development of this plugin is Azure IoT hub: + +1. Create an Azure IoT Hub by following any of the guides provided here: https://docs.microsoft.com/en-us/azure/iot-hub/ +2. Create a device, for example a [simulated Raspberry Pi](https://docs.microsoft.com/en-us/azure/iot-hub/iot-hub-raspberry-pi-web-simulator-get-started) +3. The connection string needed for the plugin is located under *Shared access policies*, both the *iothubowner* and *service* policies should work + +## Untested features: + +- Authentication with [AAD TokenProvider environment variables](https://github.com/Azure/azure-event-hubs-go#aad-tokenprovider-environment-variables) + +## Not implemented: +- [Event Processor Host](https://github.com/Azure/azure-event-hubs-go#event-processor-host) (should only be needed when using multiple Telegraf instances consuming the same partition) \ No newline at end of file diff --git a/plugins/inputs/eventhub/eventhub.go b/plugins/inputs/eventhub/eventhub.go new file mode 100644 index 000000000..dbdaeb0ab --- /dev/null +++ b/plugins/inputs/eventhub/eventhub.go @@ -0,0 +1,422 @@ +package eventhub + +import ( + "context" + "fmt" + "sync" + "time" + + eventhub "github.com/Azure/azure-event-hubs-go/v3" + "github.com/Azure/azure-event-hubs-go/v3/persist" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/parsers" +) + +const ( + defaultMaxUndeliveredMessages = 1000 +) + +type empty struct{} +type semaphore chan empty + +// EventHub is the top level struct for this plugin +type EventHub struct { + // Configuration + ConnectionString string `toml:"connection_string"` + PersistenceDir string `toml:"persistence_dir"` + ConsumerGroup string `toml:"consumer_group"` + FromTimestamp time.Time `toml:"from_timestamp"` + Latest bool `toml:"latest"` + PrefetchCount uint32 `toml:"prefetch_count"` + Epoch int64 `toml:"epoch"` + UserAgent string `toml:"user_agent"` + PartitionIDs []string `toml:"partition_ids"` + MaxUndeliveredMessages int `toml:"max_undelivered_messages"` + EnqueuedTimeAsTs bool `toml:"enqueued_time_as_ts"` + IotHubEnqueuedTimeAsTs bool `toml:"iot_hub_enqueued_time_as_ts"` + + // Metadata + ApplicationPropertyFields []string `toml:"application_property_fields"` + ApplicationPropertyTags []string `toml:"application_property_tags"` + SequenceNumberField string `toml:"sequence_number_field"` + EnqueuedTimeField string `toml:"enqueued_time_field"` + OffsetField string `toml:"offset_field"` + PartitionIDTag string `toml:"partition_id_tag"` + PartitionKeyTag string `toml:"partition_key_tag"` + IoTHubDeviceConnectionIDTag string `toml:"iot_hub_device_connection_id_tag"` + IoTHubAuthGenerationIDTag string `toml:"iot_hub_auth_generation_id_tag"` + IoTHubConnectionAuthMethodTag string `toml:"iot_hub_connection_auth_method_tag"` + IoTHubConnectionModuleIDTag string `toml:"iot_hub_connection_module_id_tag"` + IoTHubEnqueuedTimeField string `toml:"iot_hub_enqueued_time_field"` + + Log telegraf.Logger `toml:"-"` + + // Azure + hub *eventhub.Hub + cancel context.CancelFunc + wg sync.WaitGroup + + parser parsers.Parser + in chan []telegraf.Metric +} + +// SampleConfig is provided here +func (*EventHub) SampleConfig() string { + return ` + ## The default behavior is to create a new Event Hub client from environment variables. + ## This requires one of the following sets of environment variables to be set: + ## + ## 1) Expected Environment Variables: + ## - "EVENTHUB_NAMESPACE" + ## - "EVENTHUB_NAME" + ## - "EVENTHUB_CONNECTION_STRING" + ## + ## 2) Expected Environment Variables: + ## - "EVENTHUB_NAMESPACE" + ## - "EVENTHUB_NAME" + ## - "EVENTHUB_KEY_NAME" + ## - "EVENTHUB_KEY_VALUE" + + ## Uncommenting the option below will create an Event Hub client based solely on the connection string. + ## This can either be the associated environment variable or hard coded directly. + # connection_string = "$EVENTHUB_CONNECTION_STRING" + + ## Set persistence directory to a valid folder to use a file persister instead of an in-memory persister + # persistence_dir = "" + + ## Change the default consumer group + # consumer_group = "" + + ## By default the event hub receives all messages present on the broker, alternative modes can be set below. + ## The timestamp should be in https://github.com/toml-lang/toml#offset-date-time format (RFC 3339). + ## The 3 options below only apply if no valid persister is read from memory or file (e.g. first run). + # from_timestamp = + # latest = true + + ## Set a custom prefetch count for the receiver(s) + # prefetch_count = 1000 + + ## Add an epoch to the receiver(s) + # epoch = 0 + + ## Change to set a custom user agent, "telegraf" is used by default + # user_agent = "telegraf" + + ## To consume from a specific partition, set the partition_ids option. + ## An empty array will result in receiving from all partitions. + # partition_ids = ["0","1"] + + ## Max undelivered messages + # max_undelivered_messages = 1000 + + ## Set either option below to true to use a system property as timestamp. + ## You have the choice between EnqueuedTime and IoTHubEnqueuedTime. + ## It is recommended to use this setting when the data itself has no timestamp. + # enqueued_time_as_ts = true + # iot_hub_enqueued_time_as_ts = true + + ## Tags or fields to create from keys present in the application property bag. + ## These could for example be set by message enrichments in Azure IoT Hub. + application_property_tags = [] + application_property_fields = [] + + ## Tag or field name to use for metadata + ## By default all metadata is disabled + # sequence_number_field = "SequenceNumber" + # enqueued_time_field = "EnqueuedTime" + # offset_field = "Offset" + # partition_id_tag = "PartitionID" + # partition_key_tag = "PartitionKey" + # iot_hub_device_connection_id_tag = "IoTHubDeviceConnectionID" + # iot_hub_auth_generation_id_tag = "IoTHubAuthGenerationID" + # iot_hub_connection_auth_method_tag = "IoTHubConnectionAuthMethod" + # iot_hub_connection_module_id_tag = "IoTHubConnectionModuleID" + # iot_hub_enqueued_time_field = "IoTHubEnqueuedTime" + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "influx" + ` +} + +// Description of the plugin +func (*EventHub) Description() string { + return "Azure Event Hubs service input plugin" +} + +// SetParser sets the parser +func (e *EventHub) SetParser(parser parsers.Parser) { + e.parser = parser +} + +// Gather function is unused +func (*EventHub) Gather(telegraf.Accumulator) error { + return nil +} + +// Init the EventHub ServiceInput +func (e *EventHub) Init() (err error) { + if e.MaxUndeliveredMessages == 0 { + e.MaxUndeliveredMessages = defaultMaxUndeliveredMessages + } + + // Set hub options + hubOpts := []eventhub.HubOption{} + + if e.PersistenceDir != "" { + persister, err := persist.NewFilePersister(e.PersistenceDir) + if err != nil { + return err + } + + hubOpts = append(hubOpts, eventhub.HubWithOffsetPersistence(persister)) + } + + if e.UserAgent != "" { + hubOpts = append(hubOpts, eventhub.HubWithUserAgent(e.UserAgent)) + } else { + hubOpts = append(hubOpts, eventhub.HubWithUserAgent(internal.ProductToken())) + } + + // Create event hub connection + if e.ConnectionString != "" { + e.hub, err = eventhub.NewHubFromConnectionString(e.ConnectionString, hubOpts...) + } else { + e.hub, err = eventhub.NewHubFromEnvironment(hubOpts...) + } + + return err +} + +// Start the EventHub ServiceInput +func (e *EventHub) Start(acc telegraf.Accumulator) error { + e.in = make(chan []telegraf.Metric) + + var ctx context.Context + ctx, e.cancel = context.WithCancel(context.Background()) + + // Start tracking + e.wg.Add(1) + go func() { + defer e.wg.Done() + e.startTracking(ctx, acc) + }() + + // Configure receiver options + receiveOpts, err := e.configureReceiver() + if err != nil { + return err + } + + partitions := e.PartitionIDs + + if len(e.PartitionIDs) == 0 { + runtimeinfo, err := e.hub.GetRuntimeInformation(ctx) + if err != nil { + return err + } + + partitions = runtimeinfo.PartitionIDs + } + + for _, partitionID := range partitions { + _, err = e.hub.Receive(ctx, partitionID, e.onMessage, receiveOpts...) + if err != nil { + return fmt.Errorf("creating receiver for partition %q: %v", partitionID, err) + } + } + + return nil +} + +func (e *EventHub) configureReceiver() ([]eventhub.ReceiveOption, error) { + receiveOpts := []eventhub.ReceiveOption{} + + if e.ConsumerGroup != "" { + receiveOpts = append(receiveOpts, eventhub.ReceiveWithConsumerGroup(e.ConsumerGroup)) + } + + if !e.FromTimestamp.IsZero() { + receiveOpts = append(receiveOpts, eventhub.ReceiveFromTimestamp(e.FromTimestamp)) + } else if e.Latest { + receiveOpts = append(receiveOpts, eventhub.ReceiveWithLatestOffset()) + } + + if e.PrefetchCount != 0 { + receiveOpts = append(receiveOpts, eventhub.ReceiveWithPrefetchCount(e.PrefetchCount)) + } + + if e.Epoch != 0 { + receiveOpts = append(receiveOpts, eventhub.ReceiveWithEpoch(e.Epoch)) + } + + return receiveOpts, nil +} + +// OnMessage handles an Event. When this function returns without error the +// Event is immediately accepted and the offset is updated. If an error is +// returned the Event is marked for redelivery. +func (e *EventHub) onMessage(ctx context.Context, event *eventhub.Event) error { + metrics, err := e.createMetrics(event) + if err != nil { + return err + } + + select { + case <-ctx.Done(): + return ctx.Err() + case e.in <- metrics: + return nil + } +} + +// OnDelivery returns true if a new slot has opened up in the TrackingAccumulator. +func (e *EventHub) onDelivery( + acc telegraf.TrackingAccumulator, + groups map[telegraf.TrackingID][]telegraf.Metric, + track telegraf.DeliveryInfo, +) bool { + if track.Delivered() { + delete(groups, track.ID()) + return true + } + + // The metric was already accepted when onMessage completed, so we can't + // fallback on redelivery from Event Hub. Add a new copy of the metric for + // reprocessing. + metrics, ok := groups[track.ID()] + delete(groups, track.ID()) + if !ok { + // The metrics should always be found, this message indicates a programming error. + e.Log.Errorf("Could not find delievery: %d", track.ID()) + return true + } + + backup := deepCopyMetrics(metrics) + id := acc.AddTrackingMetricGroup(metrics) + groups[id] = backup + return false +} + +func (e *EventHub) startTracking(ctx context.Context, ac telegraf.Accumulator) { + acc := ac.WithTracking(e.MaxUndeliveredMessages) + sem := make(semaphore, e.MaxUndeliveredMessages) + groups := make(map[telegraf.TrackingID][]telegraf.Metric, e.MaxUndeliveredMessages) + + for { + select { + case <-ctx.Done(): + return + case track := <-acc.Delivered(): + if e.onDelivery(acc, groups, track) { + <-sem + } + case sem <- empty{}: + select { + case <-ctx.Done(): + return + case track := <-acc.Delivered(): + if e.onDelivery(acc, groups, track) { + <-sem + <-sem + } + case metrics := <-e.in: + backup := deepCopyMetrics(metrics) + id := acc.AddTrackingMetricGroup(metrics) + groups[id] = backup + } + } + } +} + +func deepCopyMetrics(in []telegraf.Metric) []telegraf.Metric { + metrics := make([]telegraf.Metric, 0, len(in)) + for _, m := range in { + metrics = append(metrics, m.Copy()) + } + return metrics +} + +// CreateMetrics returns the Metrics from the Event. +func (e *EventHub) createMetrics(event *eventhub.Event) ([]telegraf.Metric, error) { + metrics, err := e.parser.Parse(event.Data) + if err != nil { + return nil, err + } + + for i := range metrics { + for _, field := range e.ApplicationPropertyFields { + if val, ok := event.Get(field); ok { + metrics[i].AddField(field, val) + } + } + + for _, tag := range e.ApplicationPropertyTags { + if val, ok := event.Get(tag); ok { + metrics[i].AddTag(tag, fmt.Sprintf("%v", val)) + } + } + + if e.SequenceNumberField != "" { + metrics[i].AddField(e.SequenceNumberField, *event.SystemProperties.SequenceNumber) + } + + if e.EnqueuedTimeAsTs { + metrics[i].SetTime(*event.SystemProperties.EnqueuedTime) + } else if e.EnqueuedTimeField != "" { + metrics[i].AddField(e.EnqueuedTimeField, (*event.SystemProperties.EnqueuedTime).UnixNano()/int64(time.Millisecond)) + } + + if e.OffsetField != "" { + metrics[i].AddField(e.OffsetField, *event.SystemProperties.Offset) + } + + if event.SystemProperties.PartitionID != nil && e.PartitionIDTag != "" { + metrics[i].AddTag(e.PartitionIDTag, string(*event.SystemProperties.PartitionID)) + } + if event.SystemProperties.PartitionKey != nil && e.PartitionKeyTag != "" { + metrics[i].AddTag(e.PartitionKeyTag, *event.SystemProperties.PartitionKey) + } + if event.SystemProperties.IoTHubDeviceConnectionID != nil && e.IoTHubDeviceConnectionIDTag != "" { + metrics[i].AddTag(e.IoTHubDeviceConnectionIDTag, *event.SystemProperties.IoTHubDeviceConnectionID) + } + if event.SystemProperties.IoTHubAuthGenerationID != nil && e.IoTHubAuthGenerationIDTag != "" { + metrics[i].AddTag(e.IoTHubAuthGenerationIDTag, *event.SystemProperties.IoTHubAuthGenerationID) + } + if event.SystemProperties.IoTHubConnectionAuthMethod != nil && e.IoTHubConnectionAuthMethodTag != "" { + metrics[i].AddTag(e.IoTHubConnectionAuthMethodTag, *event.SystemProperties.IoTHubConnectionAuthMethod) + } + if event.SystemProperties.IoTHubConnectionModuleID != nil && e.IoTHubConnectionModuleIDTag != "" { + metrics[i].AddTag(e.IoTHubConnectionModuleIDTag, *event.SystemProperties.IoTHubConnectionModuleID) + } + if event.SystemProperties.IoTHubEnqueuedTime != nil { + if e.IotHubEnqueuedTimeAsTs { + metrics[i].SetTime(*event.SystemProperties.IoTHubEnqueuedTime) + } else if e.IoTHubEnqueuedTimeField != "" { + metrics[i].AddField(e.IoTHubEnqueuedTimeField, (*event.SystemProperties.IoTHubEnqueuedTime).UnixNano()/int64(time.Millisecond)) + } + } + } + + return metrics, nil +} + +// Stop the EventHub ServiceInput +func (e *EventHub) Stop() { + err := e.hub.Close(context.Background()) + if err != nil { + e.Log.Errorf("Error closing Event Hub connection: %v", err) + } + e.cancel() + e.wg.Wait() +} + +func init() { + inputs.Add("eventhub", func() telegraf.Input { + return &EventHub{} + }) +} From 79aad9f06a295a0e264493338c5d74874c4be6d2 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 16 Mar 2020 12:15:18 -0700 Subject: [PATCH 1615/1815] Rename eventhub -> eventhub_consumer --- plugins/inputs/all/all.go | 2 +- plugins/inputs/{eventhub => eventhub_consumer}/README.md | 4 ++-- .../eventhub.go => eventhub_consumer/eventhub_consumer.go} | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) rename plugins/inputs/{eventhub => eventhub_consumer}/README.md (98%) rename plugins/inputs/{eventhub/eventhub.go => eventhub_consumer/eventhub_consumer.go} (99%) diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index 45d3f8d17..3e1f959fa 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -40,7 +40,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/ecs" _ "github.com/influxdata/telegraf/plugins/inputs/elasticsearch" _ "github.com/influxdata/telegraf/plugins/inputs/ethtool" - _ "github.com/influxdata/telegraf/plugins/inputs/eventhub" + _ "github.com/influxdata/telegraf/plugins/inputs/eventhub_consumer" _ "github.com/influxdata/telegraf/plugins/inputs/exec" _ "github.com/influxdata/telegraf/plugins/inputs/execd" _ "github.com/influxdata/telegraf/plugins/inputs/fail2ban" diff --git a/plugins/inputs/eventhub/README.md b/plugins/inputs/eventhub_consumer/README.md similarity index 98% rename from plugins/inputs/eventhub/README.md rename to plugins/inputs/eventhub_consumer/README.md index f9b01d4b3..ae0907ca1 100644 --- a/plugins/inputs/eventhub/README.md +++ b/plugins/inputs/eventhub_consumer/README.md @@ -5,7 +5,7 @@ This plugin provides a consumer for use with Azure Event Hubs and Azure IoT Hub. ## Configuration ```toml -[[inputs.eventhub]] +[[inputs.eventhub_consumer]] ## The default behavior is to create a new Event Hub client from environment variables. ## This requires one of the following sets of environment variables to be set: ## @@ -95,4 +95,4 @@ The main focus for development of this plugin is Azure IoT hub: - Authentication with [AAD TokenProvider environment variables](https://github.com/Azure/azure-event-hubs-go#aad-tokenprovider-environment-variables) ## Not implemented: -- [Event Processor Host](https://github.com/Azure/azure-event-hubs-go#event-processor-host) (should only be needed when using multiple Telegraf instances consuming the same partition) \ No newline at end of file +- [Event Processor Host](https://github.com/Azure/azure-event-hubs-go#event-processor-host) (should only be needed when using multiple Telegraf instances consuming the same partition) diff --git a/plugins/inputs/eventhub/eventhub.go b/plugins/inputs/eventhub_consumer/eventhub_consumer.go similarity index 99% rename from plugins/inputs/eventhub/eventhub.go rename to plugins/inputs/eventhub_consumer/eventhub_consumer.go index dbdaeb0ab..d5e2be115 100644 --- a/plugins/inputs/eventhub/eventhub.go +++ b/plugins/inputs/eventhub_consumer/eventhub_consumer.go @@ -416,7 +416,7 @@ func (e *EventHub) Stop() { } func init() { - inputs.Add("eventhub", func() telegraf.Input { + inputs.Add("eventhub_consumer", func() telegraf.Input { return &EventHub{} }) } From 1b187b173d8a4ac14a68c4c96dd4da34141a8e40 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 16 Mar 2020 12:37:44 -0700 Subject: [PATCH 1616/1815] Update changelog --- CHANGELOG.md | 1 + README.md | 2 +- plugins/inputs/eventhub_consumer/README.md | 32 ++++++++++------------ 3 files changed, 17 insertions(+), 18 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4f6a6cb4c..c3a03e573 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,6 +13,7 @@ - [clickhouse](/plugins/inputs/clickhouse/README.md) - Contributed by @kshvakov - [execd](/plugins/inputs/execd/README.md) - Contributed by @jgraichen +- [eventhub_consumer](/plugins/inputs/eventhub_consumer/README.md) - Contributed by @R290 - [infiniband](/plugins/inputs/infiniband/README.md) - Contributed by @willfurnell - [lanz](/plugins/inputs/lanz/README.md): Contributed by @timhughes - [modbus](/plugins/inputs/modbus/README.md) - Contributed by @garciaolais diff --git a/README.md b/README.md index ee4744755..cb0c2169b 100644 --- a/README.md +++ b/README.md @@ -185,7 +185,7 @@ For documentation on the latest development code see the [documentation index][d * [aws ecs](./plugins/inputs/ecs) (Amazon Elastic Container Service, Fargate) * [elasticsearch](./plugins/inputs/elasticsearch) * [ethtool](./plugins/inputs/ethtool) -* [eventhub](./plugins/inputs/eventhub) (Azure Event Hubs \& Azure IoT Hub) +* [eventhub_consumer](./plugins/inputs/eventhub_consumer) (Azure Event Hubs \& Azure IoT Hub) * [exec](./plugins/inputs/exec) (generic executable plugin, support JSON, influx, graphite and nagios) * [execd](./plugins/inputs/execd) * [fail2ban](./plugins/inputs/fail2ban) diff --git a/plugins/inputs/eventhub_consumer/README.md b/plugins/inputs/eventhub_consumer/README.md index ae0907ca1..151e0802a 100644 --- a/plugins/inputs/eventhub_consumer/README.md +++ b/plugins/inputs/eventhub_consumer/README.md @@ -1,8 +1,16 @@ -# Azure Event Hubs input plugin +# Event Hub Consumer Input Plugin -This plugin provides a consumer for use with Azure Event Hubs and Azure IoT Hub. The implementation is in essence a wrapper for [Microsoft Azure Event Hubs Client for Golang](https://github.com/Azure/azure-event-hubs-go). +This plugin provides a consumer for use with Azure Event Hubs and Azure IoT Hub. -## Configuration +### IoT Hub Setup + +The main focus for development of this plugin is Azure IoT hub: + +1. Create an Azure IoT Hub by following any of the guides provided here: https://docs.microsoft.com/en-us/azure/iot-hub/ +2. Create a device, for example a [simulated Raspberry Pi](https://docs.microsoft.com/en-us/azure/iot-hub/iot-hub-raspberry-pi-web-simulator-get-started) +3. The connection string needed for the plugin is located under *Shared access policies*, both the *iothubowner* and *service* policies should work + +### Configuration ```toml [[inputs.eventhub_consumer]] @@ -30,7 +38,7 @@ This plugin provides a consumer for use with Azure Event Hubs and Azure IoT Hub. ## Change the default consumer group # consumer_group = "" - ## By default the event hub receives all messages present on the broker, alternative modes can be set below. + ## By default the event hub receives all messages present on the broker, alternative modes can be set below. ## The timestamp should be in https://github.com/toml-lang/toml#offset-date-time format (RFC 3339). ## The 3 options below only apply if no valid persister is read from memory or file (e.g. first run). # from_timestamp = @@ -44,8 +52,8 @@ This plugin provides a consumer for use with Azure Event Hubs and Azure IoT Hub. ## Change to set a custom user agent, "telegraf" is used by default # user_agent = "telegraf" - - ## To consume from a specific partition, set the partition_ids option. + + ## To consume from a specific partition, set the partition_ids option. ## An empty array will result in receiving from all partitions. # partition_ids = ["0","1"] @@ -82,17 +90,7 @@ This plugin provides a consumer for use with Azure Event Hubs and Azure IoT Hub. ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "influx" ``` -## Testing -The main focus for development of this plugin is Azure IoT hub: - -1. Create an Azure IoT Hub by following any of the guides provided here: https://docs.microsoft.com/en-us/azure/iot-hub/ -2. Create a device, for example a [simulated Raspberry Pi](https://docs.microsoft.com/en-us/azure/iot-hub/iot-hub-raspberry-pi-web-simulator-get-started) -3. The connection string needed for the plugin is located under *Shared access policies*, both the *iothubowner* and *service* policies should work - -## Untested features: +### Additional Environment - Authentication with [AAD TokenProvider environment variables](https://github.com/Azure/azure-event-hubs-go#aad-tokenprovider-environment-variables) - -## Not implemented: -- [Event Processor Host](https://github.com/Azure/azure-event-hubs-go#event-processor-host) (should only be needed when using multiple Telegraf instances consuming the same partition) From 9508bbdf292b475b92eb85f5e315a7145ab66791 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 16 Mar 2020 13:21:53 -0700 Subject: [PATCH 1617/1815] Update eventhub_consumer README. --- plugins/inputs/eventhub_consumer/README.md | 12 +++++++----- .../inputs/eventhub_consumer/eventhub_consumer.go | 6 +++--- 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/plugins/inputs/eventhub_consumer/README.md b/plugins/inputs/eventhub_consumer/README.md index 151e0802a..06c43cf31 100644 --- a/plugins/inputs/eventhub_consumer/README.md +++ b/plugins/inputs/eventhub_consumer/README.md @@ -30,7 +30,7 @@ The main focus for development of this plugin is Azure IoT hub: ## Uncommenting the option below will create an Event Hub client based solely on the connection string. ## This can either be the associated environment variable or hard coded directly. - # connection_string = "$EVENTHUB_CONNECTION_STRING" + # connection_string = "" ## Set persistence directory to a valid folder to use a file persister instead of an in-memory persister # persistence_dir = "" @@ -68,8 +68,8 @@ The main focus for development of this plugin is Azure IoT hub: ## Tags or fields to create from keys present in the application property bag. ## These could for example be set by message enrichments in Azure IoT Hub. - application_property_tags = [] - application_property_fields = [] + # application_property_tags = [] + # application_property_fields = [] ## Tag or field name to use for metadata ## By default all metadata is disabled @@ -91,6 +91,8 @@ The main focus for development of this plugin is Azure IoT hub: data_format = "influx" ``` -### Additional Environment +#### Environment Variables -- Authentication with [AAD TokenProvider environment variables](https://github.com/Azure/azure-event-hubs-go#aad-tokenprovider-environment-variables) +[Full documentation of the available environment variables][envvar]. + +[envvar]: https://github.com/Azure/azure-event-hubs-go#environment-variables diff --git a/plugins/inputs/eventhub_consumer/eventhub_consumer.go b/plugins/inputs/eventhub_consumer/eventhub_consumer.go index d5e2be115..da8a6e5f7 100644 --- a/plugins/inputs/eventhub_consumer/eventhub_consumer.go +++ b/plugins/inputs/eventhub_consumer/eventhub_consumer.go @@ -81,7 +81,7 @@ func (*EventHub) SampleConfig() string { ## Uncommenting the option below will create an Event Hub client based solely on the connection string. ## This can either be the associated environment variable or hard coded directly. - # connection_string = "$EVENTHUB_CONNECTION_STRING" + # connection_string = "" ## Set persistence directory to a valid folder to use a file persister instead of an in-memory persister # persistence_dir = "" @@ -119,8 +119,8 @@ func (*EventHub) SampleConfig() string { ## Tags or fields to create from keys present in the application property bag. ## These could for example be set by message enrichments in Azure IoT Hub. - application_property_tags = [] - application_property_fields = [] + # application_property_tags = [] + # application_property_fields = [] ## Tag or field name to use for metadata ## By default all metadata is disabled From 0a75dea70df332993813103f8c28342e7bdbd615 Mon Sep 17 00:00:00 2001 From: denzilribeiro Date: Mon, 16 Mar 2020 15:32:30 -0500 Subject: [PATCH 1618/1815] Add Database IO Tempdb per Azure DB and fix PerfmonV2 collection for on-prem (#7163) --- plugins/inputs/sqlserver/sqlserver.go | 275 ++++++++++++++------------ 1 file changed, 153 insertions(+), 122 deletions(-) diff --git a/plugins/inputs/sqlserver/sqlserver.go b/plugins/inputs/sqlserver/sqlserver.go index 8b22be6ed..a280e3c76 100644 --- a/plugins/inputs/sqlserver/sqlserver.go +++ b/plugins/inputs/sqlserver/sqlserver.go @@ -359,24 +359,40 @@ BEGIN SELECT ''sqlserver_database_io'' As [measurement] ,REPLACE(@@SERVERNAME,''\'','':'') AS [sql_instance] - ,DB_NAME([vfs].[database_id]) AS [database_name] + ,DB_NAME() as database_name + ,vfs.database_id -- /*needed as tempdb is different for each Azure SQL DB as grouping has to be by logical server + db_name + database_id*/ + ,vfs.file_id ,vfs.io_stall_read_ms AS read_latency_ms ,vfs.num_of_reads AS reads ,vfs.num_of_bytes_read AS read_bytes ,vfs.io_stall_write_ms AS write_latency_ms ,vfs.num_of_writes AS writes ,vfs.num_of_bytes_written AS write_bytes - ,vfs.io_stall_queued_read_ms as rg_read_stall_ms - ,ISNULL(b.name ,''RBPEX'') as logical_filename - ,ISNULL(b.physical_name, ''RBPEX'') as physical_filename + ,vfs.io_stall_queued_read_ms AS [rg_read_stall_ms] + ,vfs.io_stall_queued_write_ms AS [rg_write_stall_ms] + ,CASE + WHEN (vfs.database_id = 0) THEN ''RBPEX'' + ELSE b.logical_filename + END as logical_filename + ,CASE + WHEN (vfs.database_id = 0) THEN ''RBPEX'' + ELSE b.physical_filename + END as physical_filename ,CASE WHEN vfs.file_id = 2 THEN ''LOG'' ELSE ''DATA'' END AS file_type ,ISNULL(size,0)/128 AS current_size_mb - ,ISNULL(FILEPROPERTY(b.name,''SpaceUsed'')/128,0) as space_used_mb - ,vfs.io_stall_queued_read_ms AS [rg_read_stall_ms] - ,vfs.io_stall_queued_write_ms AS [rg_write_stall_ms] + ,ISNULL(FILEPROPERTY(b.logical_filename,''SpaceUsed'')/128,0) as space_used_mb FROM [sys].[dm_io_virtual_file_stats](NULL,NULL) AS vfs - LEFT OUTER join sys.database_files b - ON b.file_id = vfs.file_id + -- needed to get Tempdb file names on Azure SQL DB so you can join appropriately. Without this had a bug where join was only on file_id + LEFT OUTER join + ( + SELECT DB_ID() as database_id, file_id, logical_filename=name COLLATE SQL_Latin1_General_CP1_CI_AS + , physical_filename = physical_name COLLATE SQL_Latin1_General_CP1_CI_AS, size from sys.database_files + where type <> 2 + UNION ALL + SELECT 2 as database_id, file_id, logical_filename = name , physical_filename = physical_name, size + from tempdb.sys.database_files + ) b ON b.database_id = vfs.database_id and b.file_id = vfs.file_id + where vfs.database_id IN (DB_ID(),0,2) ' EXEC sp_executesql @SqlStatement @@ -390,7 +406,7 @@ BEGIN ,REPLACE(@@SERVERNAME,''\'','':'') AS [sql_instance] ,DB_NAME(vfs.[database_id]) AS [database_name] ,COALESCE(mf.[physical_name],''RBPEX'') AS [physical_filename] --RPBEX = Resilient Buffer Pool Extension - ,COALESCE(mf.[name],''RBPEX'') AS [logical_filename] --RPBEX = Resilient Buffer Pool Extension + ,COALESCE(mf.[name],''RBPEX'') AS [logical_filename] --RPBEX = Resilient Buffer Pool Extension ,mf.[type_desc] AS [file_type] ,IIF( RIGHT(vs.[volume_mount_point],1) = ''\'' /*Tag value cannot end with \ */ ,LEFT(vs.[volume_mount_point],LEN(vs.[volume_mount_point])-1) @@ -536,135 +552,150 @@ DECLARE @PCounters TABLE cntr_type INT, Primary Key(object_name, counter_name, instance_name) ); -INSERT INTO @PCounters -SELECT DISTINCT + +DECLARE @SQL NVARCHAR(MAX) +SET @SQL = N'SELECT DISTINCT RTrim(spi.object_name) object_name, - RTrim(spi.counter_name) counter_name, - CASE WHEN ( - RTRIM(spi.object_name) LIKE '%:Databases' - OR RTRIM(spi.object_name) LIKE '%:Database Replica' - OR RTRIM(spi.object_name) LIKE '%:Catalog Metadata' - OR RTRIM(spi.object_name) LIKE '%:Query Store' - OR RTRIM(spi.object_name) LIKE '%:Columnstore' - OR RTRIM(spi.object_name) LIKE '%:Advanced Analytics') - AND SERVERPROPERTY ('EngineEdition') IN (5,8) - AND TRY_CONVERT(uniqueidentifier, spi.instance_name) IS NOT NULL -- for cloud only + RTrim(spi.counter_name) counter_name,' + + + CASE + WHEN CAST(SERVERPROPERTY('EngineEdition') AS int) IN (5,8) --- needed to get actual DB Name for SQL DB/ Managed instance + THEN N'CASE WHEN ( + RTRIM(spi.object_name) LIKE ''%:Databases'' + OR RTRIM(spi.object_name) LIKE ''%:Database Replica'' + OR RTRIM(spi.object_name) LIKE ''%:Catalog Metadata'' + OR RTRIM(spi.object_name) LIKE ''%:Query Store'' + OR RTRIM(spi.object_name) LIKE ''%:Columnstore'' + OR RTRIM(spi.object_name) LIKE ''%:Advanced Analytics'') + AND TRY_CONVERT(uniqueidentifier, spi.instance_name) + IS NOT NULL -- for cloud only THEN d.name - WHEN RTRIM(object_name) LIKE '%:Availability Replica' - AND SERVERPROPERTY ('EngineEdition') IN (5,8) + WHEN RTRIM(object_name) LIKE ''%:Availability Replica'' AND TRY_CONVERT(uniqueidentifier, spi.instance_name) IS NOT NULL -- for cloud only THEN d.name + RTRIM(SUBSTRING(spi.instance_name, 37, LEN(spi.instance_name))) ELSE spi.instance_name - END AS instance_name, - CAST(spi.cntr_value AS BIGINT) AS cntr_value, + END AS instance_name,' + ELSE 'spi.instance_name as instance_name, ' + END + + + 'CAST(spi.cntr_value AS BIGINT) AS cntr_value, spi.cntr_type -FROM sys.dm_os_performance_counters AS spi -LEFT JOIN sys.databases AS d -ON LEFT(spi.instance_name, 36) -- some instance_name values have an additional identifier appended after the GUID - = CASE WHEN -- in SQL DB standalone, physical_database_name for master is the GUID of the user database - d.name = 'master' AND TRY_CONVERT(uniqueidentifier, d.physical_database_name) IS NOT NULL + FROM sys.dm_os_performance_counters AS spi ' ++ +CASE + WHEN CAST(SERVERPROPERTY('EngineEdition') AS int) IN (5,8) --- Join is ONLY for managed instance and SQL DB, not for on-prem + THEN CAST(N'LEFT JOIN sys.databases AS d + ON LEFT(spi.instance_name, 36) -- some instance_name values have an additional identifier appended after the GUID + = CASE WHEN -- in SQL DB standalone, physical_database_name for master is the GUID of the user database + d.name = ''master'' AND TRY_CONVERT(uniqueidentifier, d.physical_database_name) IS NOT NULL THEN d.name ELSE d.physical_database_name - END -WHERE ( + END ' as NVARCHAR(MAX)) + ELSE N' ' +END + +SET @SQL = @SQL + CAST(N' WHERE ( counter_name IN ( - 'SQL Compilations/sec', - 'SQL Re-Compilations/sec', - 'User Connections', - 'Batch Requests/sec', - 'Logouts/sec', - 'Logins/sec', - 'Processes blocked', - 'Latch Waits/sec', - 'Full Scans/sec', - 'Index Searches/sec', - 'Page Splits/sec', - 'Page lookups/sec', - 'Page reads/sec', - 'Page writes/sec', - 'Readahead pages/sec', - 'Lazy writes/sec', - 'Checkpoint pages/sec', - 'Page life expectancy', - 'Log File(s) Size (KB)', - 'Log File(s) Used Size (KB)', - 'Data File(s) Size (KB)', - 'Transactions/sec', - 'Write Transactions/sec', - 'Active Temp Tables', - 'Temp Tables Creation Rate', - 'Temp Tables For Destruction', - 'Free Space in tempdb (KB)', - 'Version Store Size (KB)', - 'Memory Grants Pending', - 'Memory Grants Outstanding', - 'Free list stalls/sec', - 'Buffer cache hit ratio', - 'Buffer cache hit ratio base', - 'Backup/Restore Throughput/sec', - 'Total Server Memory (KB)', - 'Target Server Memory (KB)', - 'Log Flushes/sec', - 'Log Flush Wait Time', - 'Memory broker clerk size', - 'Log Bytes Flushed/sec', - 'Bytes Sent to Replica/sec', - 'Log Send Queue', - 'Bytes Sent to Transport/sec', - 'Sends to Replica/sec', - 'Bytes Sent to Transport/sec', - 'Sends to Transport/sec', - 'Bytes Received from Replica/sec', - 'Receives from Replica/sec', - 'Flow Control Time (ms/sec)', - 'Flow Control/sec', - 'Resent Messages/sec', - 'Redone Bytes/sec', - 'XTP Memory Used (KB)', - 'Transaction Delay', - 'Log Bytes Received/sec', - 'Log Apply Pending Queue', - 'Redone Bytes/sec', - 'Recovery Queue', - 'Log Apply Ready Queue', - 'CPU usage %', - 'CPU usage % base', - 'Queued requests', - 'Requests completed/sec', - 'Blocked tasks', - 'Active memory grant amount (KB)', - 'Disk Read Bytes/sec', - 'Disk Read IO Throttled/sec', - 'Disk Read IO/sec', - 'Disk Write Bytes/sec', - 'Disk Write IO Throttled/sec', - 'Disk Write IO/sec', - 'Used memory (KB)', - 'Forwarded Records/sec', - 'Background Writer pages/sec', - 'Percent Log Used', - 'Log Send Queue KB', - 'Redo Queue KB', - 'Mirrored Write Transactions/sec', - 'Group Commit Time', - 'Group Commits/Sec' + ''SQL Compilations/sec'', + ''SQL Re-Compilations/sec'', + ''User Connections'', + ''Batch Requests/sec'', + ''Logouts/sec'', + ''Logins/sec'', + ''Processes blocked'', + ''Latch Waits/sec'', + ''Full Scans/sec'', + ''Index Searches/sec'', + ''Page Splits/sec'', + ''Page lookups/sec'', + ''Page reads/sec'', + ''Page writes/sec'', + ''Readahead pages/sec'', + ''Lazy writes/sec'', + ''Checkpoint pages/sec'', + ''Page life expectancy'', + ''Log File(s) Size (KB)'', + ''Log File(s) Used Size (KB)'', + ''Data File(s) Size (KB)'', + ''Transactions/sec'', + ''Write Transactions/sec'', + ''Active Temp Tables'', + ''Temp Tables Creation Rate'', + ''Temp Tables For Destruction'', + ''Free Space in tempdb (KB)'', + ''Version Store Size (KB)'', + ''Memory Grants Pending'', + ''Memory Grants Outstanding'', + ''Free list stalls/sec'', + ''Buffer cache hit ratio'', + ''Buffer cache hit ratio base'', + ''Backup/Restore Throughput/sec'', + ''Total Server Memory (KB)'', + ''Target Server Memory (KB)'', + ''Log Flushes/sec'', + ''Log Flush Wait Time'', + ''Memory broker clerk size'', + ''Log Bytes Flushed/sec'', + ''Bytes Sent to Replica/sec'', + ''Log Send Queue'', + ''Bytes Sent to Transport/sec'', + ''Sends to Replica/sec'', + ''Bytes Sent to Transport/sec'', + ''Sends to Transport/sec'', + ''Bytes Received from Replica/sec'', + ''Receives from Replica/sec'', + ''Flow Control Time (ms/sec)'', + ''Flow Control/sec'', + ''Resent Messages/sec'', + ''Redone Bytes/sec'', + ''XTP Memory Used (KB)'', + ''Transaction Delay'', + ''Log Bytes Received/sec'', + ''Log Apply Pending Queue'', + ''Redone Bytes/sec'', + ''Recovery Queue'', + ''Log Apply Ready Queue'', + ''CPU usage %'', + ''CPU usage % base'', + ''Queued requests'', + ''Requests completed/sec'', + ''Blocked tasks'', + ''Active memory grant amount (KB)'', + ''Disk Read Bytes/sec'', + ''Disk Read IO Throttled/sec'', + ''Disk Read IO/sec'', + ''Disk Write Bytes/sec'', + ''Disk Write IO Throttled/sec'', + ''Disk Write IO/sec'', + ''Used memory (KB)'', + ''Forwarded Records/sec'', + ''Background Writer pages/sec'', + ''Percent Log Used'', + ''Log Send Queue KB'', + ''Redo Queue KB'', + ''Mirrored Write Transactions/sec'', + ''Group Commit Time'', + ''Group Commits/Sec'' ) ) OR ( - object_name LIKE '%User Settable%' - OR object_name LIKE '%SQL Errors%' + object_name LIKE ''%User Settable%'' + OR object_name LIKE ''%SQL Errors%'' ) OR ( - object_name LIKE '%Batch Resp Statistics%' + object_name LIKE ''%Batch Resp Statistics%'' ) OR ( - instance_name IN ('_Total') + instance_name IN (''_Total'') AND counter_name IN ( - 'Lock Timeouts/sec', - 'Number of Deadlocks/sec', - 'Lock Waits/sec', - 'Latch Waits/sec' + ''Lock Timeouts/sec'', + ''Number of Deadlocks/sec'', + ''Lock Waits/sec'', + ''Latch Waits/sec'' ) ) +' as NVARCHAR(MAX)) +INSERT INTO @PCounters +EXEC (@SQL) + -DECLARE @SQL NVARCHAR(MAX) SET @SQL = REPLACE('SELECT "SQLServer:Workload Group Stats" AS object, counter, From 20d45d651c5e274fbd67a57499431c36106e32e3 Mon Sep 17 00:00:00 2001 From: Giovanni Luisotto Date: Mon, 16 Mar 2020 20:36:31 +0000 Subject: [PATCH 1619/1815] Fix schedulers query compatibility with pre SQL-2016 (#7144) --- plugins/inputs/sqlserver/sqlserver.go | 46 +++++++++++++++++++++++---- 1 file changed, 39 insertions(+), 7 deletions(-) diff --git a/plugins/inputs/sqlserver/sqlserver.go b/plugins/inputs/sqlserver/sqlserver.go index a280e3c76..67703d19e 100644 --- a/plugins/inputs/sqlserver/sqlserver.go +++ b/plugins/inputs/sqlserver/sqlserver.go @@ -533,13 +533,45 @@ FROM ( ` //Recommend disabling this by default, but is useful to detect single CPU spikes/bottlenecks -const sqlServerSchedulersV2 string = `SET DEADLOCK_PRIORITY -10; -SELECT 'sqlserver_schedulers' AS [measurement], - REPLACE(@@SERVERNAME,'\',':') AS [sql_instance], - DB_NAME() as [database_name], - cast(scheduler_id as varchar(4)) as scheduler_id, cast(cpu_id as varchar(4)) as cpu_id,is_online,is_idle,preemptive_switches_count,context_switches_count,current_tasks_count,runnable_tasks_count,current_workers_count - , active_workers_count,work_queue_count, pending_disk_io_count,load_factor,yield_count, total_cpu_usage_ms, total_scheduler_delay_ms -from sys.dm_os_schedulers +const sqlServerSchedulersV2 string = ` + + + + +SET DEADLOCK_PRIORITY - 10; +DECLARE @SqlStatement AS nvarchar(max); +SET @SqlStatement = N' +SELECT + ''sqlserver_schedulers'' AS [measurement] + ,REPLACE(@@SERVERNAME, ''\'', '':'') AS [sql_instance] + ,DB_NAME() AS [database_name] + ,cast(s.[scheduler_id] AS VARCHAR(4)) AS [scheduler_id] + ,cast(s.[cpu_id] AS VARCHAR(4)) AS [cpu_id] + ,s.[is_online] + ,s.[is_idle] + ,s.[preemptive_switches_count] + ,s.[context_switches_count] + ,s.[current_tasks_count] + ,s.[runnable_tasks_count] + ,s.[current_workers_count] + ,s.[active_workers_count] + ,s.[work_queue_count] + ,s.[pending_disk_io_count] + ,s.[load_factor] + ,s.[yield_count] + ' + + + CASE + WHEN CAST(LEFT(CAST(SERVERPROPERTY('ProductVersion') AS nvarchar) ,2) AS int) >= 13 + /*Only from SQL Server 2016+ (ver 13.x) [total_cpu_usage_ms] and [total_scheduler_delay_ms]*/ + THEN N',s.[total_cpu_usage_ms], s.[total_scheduler_delay_ms]' + ELSE '' + END + + +N' +FROM sys.dm_os_schedulers AS s +' +EXEC sp_executesql @SqlStatement ` const sqlPerformanceCountersV2 string = `SET DEADLOCK_PRIORITY -10; From 87f60ccf877becf3f680c821653d18d243f71c68 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 16 Mar 2020 13:45:34 -0700 Subject: [PATCH 1620/1815] Update changelog --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index c3a03e573..d42963f50 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -64,6 +64,7 @@ - [#7103](https://github.com/influxdata/telegraf/pull/7103): Add volume_mount_point to DatabaseIO query in sqlserver input. - [#7142](https://github.com/influxdata/telegraf/pull/7142): Add topic tag options to kafka output. - [#7141](https://github.com/influxdata/telegraf/pull/7141): Add support for setting InfluxDB retention policy using tag. +- [#7163](https://github.com/influxdata/telegraf/pull/7163): Add Database IO Tempdb per Azure DB to sqlserver input. #### Bugfixes @@ -75,6 +76,7 @@ - [#7133](https://github.com/influxdata/telegraf/issues/7133): Fix log rotation to use actual file size instead of bytes written. - [#7103](https://github.com/influxdata/telegraf/pull/7103): Fix several issues with DatabaseIO query in sqlserver input. - [#7119](https://github.com/influxdata/telegraf/pull/7119): Fix internal metrics for output split into multiple lines. +- [#7021](https://github.com/influxdata/telegraf/pull/7021):Fix schedulers query compatibility with pre SQL-2016. ## v1.13.4 [2020-02-25] From a612a4d85fec7b77450f58976d271c5da6ad4702 Mon Sep 17 00:00:00 2001 From: Harshit Bansal Date: Tue, 17 Mar 2020 02:16:42 +0530 Subject: [PATCH 1621/1815] Add option for explicitly including queries in sqlserver input plugin (#7150) --- etc/telegraf.conf | 9 ++++-- plugins/inputs/sqlserver/README.md | 7 +++-- plugins/inputs/sqlserver/sqlserver.go | 30 +++++++++++++++----- plugins/inputs/sqlserver/sqlserver_test.go | 33 +++++++++++++++++++++- 4 files changed, 66 insertions(+), 13 deletions(-) diff --git a/etc/telegraf.conf b/etc/telegraf.conf index a13162b52..9621b59f1 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -4346,8 +4346,7 @@ # ## If you are using AzureDB, setting this to true will gather resource utilization metrics # # azuredb = false # -# ## If you would like to exclude some of the metrics queries, list them here -# ## Possible choices: +# ## Possible queries: # ## - PerformanceCounters # ## - WaitStatsCategorized # ## - DatabaseIO @@ -4363,7 +4362,11 @@ # ## - AzureDBResourceGovernance # ## - SqlRequests # ## - ServerProperties -# exclude_query = [ 'Schedulers' ] +# ## A list of queries to include. If not specified, all the above listed queries are used. +# # include_query = [] +# +# ## A list of queries to explicitly ignore. +# exclude_query = [ 'Schedulers' , 'SqlRequests'] # # Gather timeseries from Google Cloud Platform v3 monitoring API diff --git a/plugins/inputs/sqlserver/README.md b/plugins/inputs/sqlserver/README.md index 1b71165fb..23922c169 100644 --- a/plugins/inputs/sqlserver/README.md +++ b/plugins/inputs/sqlserver/README.md @@ -54,8 +54,7 @@ GO ## If you are using AzureDB, setting this to true will gather resource utilization metrics # azuredb = true - ## If you would like to exclude some of the metrics queries, list them here - ## Possible choices: + ## Possible queries: ## - PerformanceCounters ## - WaitStatsCategorized ## - DatabaseIO @@ -70,6 +69,10 @@ GO ## - AzureDBResourceGovernance ## - SqlRequests ## - ServerProperties + ## A list of queries to include. If not specified, all the above listed queries are used. + # include_query = [] + + ## A list of queries to explicitly ignore. exclude_query = [ 'Schedulers' , 'SqlRequests'] ``` diff --git a/plugins/inputs/sqlserver/sqlserver.go b/plugins/inputs/sqlserver/sqlserver.go index 67703d19e..c69db611a 100644 --- a/plugins/inputs/sqlserver/sqlserver.go +++ b/plugins/inputs/sqlserver/sqlserver.go @@ -7,6 +7,7 @@ import ( _ "github.com/denisenkom/go-mssqldb" // go-mssqldb initialization "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/filter" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -15,6 +16,7 @@ type SQLServer struct { Servers []string `toml:"servers"` QueryVersion int `toml:"query_version"` AzureDB bool `toml:"azuredb"` + IncludeQuery []string `toml:"include_query"` ExcludeQuery []string `toml:"exclude_query"` queries MapQuery isInitialized bool @@ -52,8 +54,7 @@ const sampleConfig = ` ## If you are using AzureDB, setting this to true will gather resource utilization metrics # azuredb = false - ## If you would like to exclude some of the metrics queries, list them here - ## Possible choices: + ## Possible queries: ## - PerformanceCounters ## - WaitStatsCategorized ## - DatabaseIO @@ -69,7 +70,11 @@ const sampleConfig = ` ## - AzureDBResourceGovernance ## - SqlRequests ## - ServerProperties - exclude_query = [ 'Schedulers' ] + ## A list of queries to include. If not specified, all the above listed queries are used. + # include_query = [] + + ## A list of queries to explicitly ignore. + exclude_query = [ 'Schedulers' , 'SqlRequests'] ` // SampleConfig return the sample configuration @@ -86,7 +91,7 @@ type scanner interface { Scan(dest ...interface{}) error } -func initQueries(s *SQLServer) { +func initQueries(s *SQLServer) error { s.queries = make(MapQuery) queries := s.queries // If this is an AzureDB instance, grab some extra metrics @@ -117,18 +122,29 @@ func initQueries(s *SQLServer) { queries["PerformanceMetrics"] = Query{Script: sqlPerformanceMetrics, ResultByRow: false} } - for _, query := range s.ExcludeQuery { - delete(queries, query) + filterQueries, err := filter.NewIncludeExcludeFilter(s.IncludeQuery, s.ExcludeQuery) + if err != nil { + return err + } + + for query := range queries { + if !filterQueries.Match(query) { + delete(queries, query) + } } // Set a flag so we know that queries have already been initialized s.isInitialized = true + return nil } // Gather collect data from SQL Server func (s *SQLServer) Gather(acc telegraf.Accumulator) error { if !s.isInitialized { - initQueries(s) + if err := initQueries(s); err != nil { + acc.AddError(err) + return err + } } if len(s.Servers) == 0 { diff --git a/plugins/inputs/sqlserver/sqlserver_test.go b/plugins/inputs/sqlserver/sqlserver_test.go index b493fb13c..8097b070b 100644 --- a/plugins/inputs/sqlserver/sqlserver_test.go +++ b/plugins/inputs/sqlserver/sqlserver_test.go @@ -1,16 +1,47 @@ package sqlserver import ( - "github.com/stretchr/testify/assert" "strconv" "strings" "testing" "time" + "github.com/stretchr/testify/assert" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) +func TestSqlServer_QueriesInclusionExclusion(t *testing.T) { + cases := []map[string]interface{}{ + { + "IncludeQuery": []string{}, + "ExcludeQuery": []string{"WaitStatsCategorized", "DatabaseIO", "ServerProperties", "MemoryClerk", "Schedulers"}, + "queries": []string{"PerformanceCounters", "SqlRequests"}, + "queriesTotal": 2, + }, + { + "IncludeQuery": []string{"PerformanceCounters", "SqlRequests"}, + "ExcludeQuery": []string{"SqlRequests", "WaitStatsCategorized", "DatabaseIO"}, + "queries": []string{"PerformanceCounters"}, + "queriesTotal": 1, + }, + } + + for _, test := range cases { + s := SQLServer{ + QueryVersion: 2, + IncludeQuery: test["IncludeQuery"].([]string), + ExcludeQuery: test["ExcludeQuery"].([]string), + } + initQueries(&s) + assert.Equal(t, len(s.queries), test["queriesTotal"].(int)) + for _, query := range test["queries"].([]string) { + assert.Contains(t, s.queries, query) + } + } +} + func TestSqlServer_ParseMetrics(t *testing.T) { var acc testutil.Accumulator From 222dce49f08ababea4c84b6d0a1e1439692c03de Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 16 Mar 2020 13:47:58 -0700 Subject: [PATCH 1622/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index d42963f50..01172fd35 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -65,6 +65,7 @@ - [#7142](https://github.com/influxdata/telegraf/pull/7142): Add topic tag options to kafka output. - [#7141](https://github.com/influxdata/telegraf/pull/7141): Add support for setting InfluxDB retention policy using tag. - [#7163](https://github.com/influxdata/telegraf/pull/7163): Add Database IO Tempdb per Azure DB to sqlserver input. +- [#7150](https://github.com/influxdata/telegraf/pull/7150): Add option for explicitly including queries in sqlserver input. #### Bugfixes From c78e4b449c8bfb2ba6150ca3fda8c0c049df6440 Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Mon, 16 Mar 2020 19:07:39 -0400 Subject: [PATCH 1623/1815] fix broken test on mac (#7181) --- plugins/inputs/phpfpm/phpfpm_test.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/plugins/inputs/phpfpm/phpfpm_test.go b/plugins/inputs/phpfpm/phpfpm_test.go index 64e5fbfea..e7e36c360 100644 --- a/plugins/inputs/phpfpm/phpfpm_test.go +++ b/plugins/inputs/phpfpm/phpfpm_test.go @@ -280,7 +280,8 @@ func TestPhpFpmGeneratesMetrics_Throw_Error_When_Fpm_Status_Is_Not_Responding(t err := acc.GatherError(r.Gather) require.Error(t, err) - assert.Contains(t, err.Error(), `Unable to connect to phpfpm status page 'http://aninvalidone': Get http://aninvalidone: dial tcp: lookup aninvalidone`) + assert.Contains(t, err.Error(), `Unable to connect to phpfpm status page 'http://aninvalidone'`) + assert.Contains(t, err.Error(), `lookup aninvalidone`) } func TestPhpFpmGeneratesMetrics_Throw_Error_When_Socket_Path_Is_Invalid(t *testing.T) { From dafd08c0ca2545a167ecae1a256d8823aca2c49f Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Mon, 16 Mar 2020 19:17:37 -0400 Subject: [PATCH 1624/1815] add docker to bug report instructions --- .github/ISSUE_TEMPLATE/Bug_report.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/ISSUE_TEMPLATE/Bug_report.md b/.github/ISSUE_TEMPLATE/Bug_report.md index ee9a35d4f..5a4e810dd 100644 --- a/.github/ISSUE_TEMPLATE/Bug_report.md +++ b/.github/ISSUE_TEMPLATE/Bug_report.md @@ -21,6 +21,10 @@ section if available. +### Docker + + + ### Steps to reproduce: From fcb038d35254f73aabf8f29309fada6e2af0a3a4 Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Mon, 16 Mar 2020 19:21:21 -0400 Subject: [PATCH 1625/1815] update docker files for go modules. Closes #7170 (#7179) --- scripts/alpine.docker | 7 ++----- scripts/stretch.docker | 7 ++----- 2 files changed, 4 insertions(+), 10 deletions(-) diff --git a/scripts/alpine.docker b/scripts/alpine.docker index 0103a16d4..8eb86b39d 100644 --- a/scripts/alpine.docker +++ b/scripts/alpine.docker @@ -1,9 +1,6 @@ -FROM golang:1.11.0 as builder -ENV DEP_VERSION 0.5.0 -RUN curl -fsSL -o /usr/local/bin/dep https://github.com/golang/dep/releases/download/v${DEP_VERSION}/dep-linux-amd64 && chmod +x /usr/local/bin/dep +FROM golang:1.13.8 as builder WORKDIR /go/src/github.com/influxdata/telegraf -COPY Gopkg.toml Gopkg.lock ./ -RUN dep ensure -vendor-only + COPY . /go/src/github.com/influxdata/telegraf RUN CGO_ENABLED=0 make go-install diff --git a/scripts/stretch.docker b/scripts/stretch.docker index 906e0c504..805786075 100644 --- a/scripts/stretch.docker +++ b/scripts/stretch.docker @@ -1,9 +1,6 @@ -FROM golang:1.11.0 as builder -ENV DEP_VERSION 0.5.0 -RUN curl -fsSL -o /usr/local/bin/dep https://github.com/golang/dep/releases/download/v${DEP_VERSION}/dep-linux-amd64 && chmod +x /usr/local/bin/dep +FROM golang:1.13.8 as builder WORKDIR /go/src/github.com/influxdata/telegraf -COPY Gopkg.toml Gopkg.lock ./ -RUN dep ensure -vendor-only + COPY . /go/src/github.com/influxdata/telegraf RUN make go-install From 4162599081b2ae02e4a8b50b82f9a5f5f2adc726 Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Mon, 16 Mar 2020 19:24:02 -0400 Subject: [PATCH 1626/1815] Remove dep from contribution guidelines (#7180) --- CONTRIBUTING.md | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 71e2b4520..a9a6eb008 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -30,30 +30,24 @@ and the accumulator can be found in the GoDoc: **Adding a dependency:** -Assuming you can already build the project, run these in the telegraf directory: +Telegraf uses Go modules. Assuming you can already build the project, run this in the telegraf directory: -1. `dep ensure -vendor-only` -2. `dep ensure -add github.com/[dependency]/[new-package]` +1. `go get github.com/[dependency]/[new-package]` **Unit Tests:** Before opening a pull request you should run the linter checks and the short tests. -**Run static analysis:** - ``` make check -``` - -**Run short tests:** - -``` make test ``` **Execute integration tests:** +(Optional) + Running the integration tests requires several docker containers to be running. You can start the containers with: ``` From ef2bff291651d71faca396ad671d05969eff66ac Mon Sep 17 00:00:00 2001 From: reimda Date: Tue, 17 Mar 2020 11:03:27 -0600 Subject: [PATCH 1627/1815] Set headers on influxdb_listener ping URL (#7182) --- .../influxdb_listener/influxdb_listener.go | 2 ++ .../influxdb_listener_test.go | 30 +++++++++++++++++-- 2 files changed, 30 insertions(+), 2 deletions(-) diff --git a/plugins/inputs/influxdb_listener/influxdb_listener.go b/plugins/inputs/influxdb_listener/influxdb_listener.go index 60033e050..1eac928af 100644 --- a/plugins/inputs/influxdb_listener/influxdb_listener.go +++ b/plugins/inputs/influxdb_listener/influxdb_listener.go @@ -226,7 +226,9 @@ func (h *InfluxDBListener) handlePing() http.HandlerFunc { verbose := req.URL.Query().Get("verbose") // respond to ping requests + res.Header().Set("X-Influxdb-Version", "1.0") if verbose != "" && verbose != "0" && verbose != "false" { + res.Header().Set("Content-Type", "application/json") res.WriteHeader(http.StatusOK) b, _ := json.Marshal(map[string]string{"version": "1.0"}) // based on header set above res.Write(b) diff --git a/plugins/inputs/influxdb_listener/influxdb_listener_test.go b/plugins/inputs/influxdb_listener/influxdb_listener_test.go index b8ea2014d..6990f6fc6 100644 --- a/plugins/inputs/influxdb_listener/influxdb_listener_test.go +++ b/plugins/inputs/influxdb_listener/influxdb_listener_test.go @@ -477,7 +477,7 @@ func TestWriteEmpty(t *testing.T) { require.EqualValues(t, 204, resp.StatusCode) } -func TestQueryAndPing(t *testing.T) { +func TestQuery(t *testing.T) { listener := newTestListener() acc := &testutil.Accumulator{} @@ -490,14 +490,40 @@ func TestQueryAndPing(t *testing.T) { createURL(listener, "http", "/query", "db=&q=CREATE+DATABASE+IF+NOT+EXISTS+%22mydb%22"), "", nil) require.NoError(t, err) require.EqualValues(t, 200, resp.StatusCode) +} + +func TestPing(t *testing.T) { + listener := newTestListener() + acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) + require.NoError(t, listener.Start(acc)) + defer listener.Stop() // post ping to listener - resp, err = http.Post(createURL(listener, "http", "/ping", ""), "", nil) + resp, err := http.Post(createURL(listener, "http", "/ping", ""), "", nil) require.NoError(t, err) + require.Equal(t, "1.0", resp.Header["X-Influxdb-Version"][0]) + require.Len(t, resp.Header["Content-Type"], 0) resp.Body.Close() require.EqualValues(t, 204, resp.StatusCode) } +func TestPingVerbose(t *testing.T) { + listener := newTestListener() + acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) + require.NoError(t, listener.Start(acc)) + defer listener.Stop() + + // post ping to listener + resp, err := http.Post(createURL(listener, "http", "/ping", "verbose=1"), "", nil) + require.NoError(t, err) + require.Equal(t, "1.0", resp.Header["X-Influxdb-Version"][0]) + require.Equal(t, "application/json", resp.Header["Content-Type"][0]) + resp.Body.Close() + require.EqualValues(t, 200, resp.StatusCode) +} + func TestWriteWithPrecision(t *testing.T) { listener := newTestListener() From 6ce0660cee95b918251f3952831177d5f09b2f0d Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Tue, 17 Mar 2020 13:22:11 -0400 Subject: [PATCH 1628/1815] Update to the latest gopsutil (#7185) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 6c6cb6cf4..0cf6c2514 100644 --- a/go.mod +++ b/go.mod @@ -105,7 +105,7 @@ require ( github.com/safchain/ethtool v0.0.0-20200218184317-f459e2d13664 github.com/samuel/go-zookeeper v0.0.0-20180130194729-c4fab1ac1bec // indirect github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b // indirect - github.com/shirou/gopsutil v2.20.1+incompatible + github.com/shirou/gopsutil v2.20.2+incompatible github.com/shopspring/decimal v0.0.0-20200105231215-408a2507e114 // indirect github.com/sirupsen/logrus v1.2.0 github.com/soniah/gosnmp v1.22.0 diff --git a/go.sum b/go.sum index 78ef01e23..f3579f14b 100644 --- a/go.sum +++ b/go.sum @@ -430,8 +430,8 @@ github.com/samuel/go-zookeeper v0.0.0-20180130194729-c4fab1ac1bec h1:6ncX5ko6B9L github.com/samuel/go-zookeeper v0.0.0-20180130194729-c4fab1ac1bec/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= -github.com/shirou/gopsutil v2.20.1+incompatible h1:oIq9Cq4i84Hk8uQAUOG3eNdI/29hBawGrD5YRl6JRDY= -github.com/shirou/gopsutil v2.20.1+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= +github.com/shirou/gopsutil v2.20.2+incompatible h1:ucK79BhBpgqQxPASyS2cu9HX8cfDVljBN1WWFvbNvgY= +github.com/shirou/gopsutil v2.20.2+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shopspring/decimal v0.0.0-20200105231215-408a2507e114 h1:Pm6R878vxWWWR+Sa3ppsLce/Zq+JNTs6aVvRu13jv9A= github.com/shopspring/decimal v0.0.0-20200105231215-408a2507e114/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/sirupsen/logrus v1.2.0 h1:juTguoYk5qI21pwyTXY3B3Y5cOTH3ZUyZCg1v/mihuo= From 9d7f56052ec5614674929ce2b88fa9aa0308b824 Mon Sep 17 00:00:00 2001 From: David Reimschussel Date: Tue, 17 Mar 2020 11:07:24 -0600 Subject: [PATCH 1629/1815] Update changelog --- CHANGELOG.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 01172fd35..07a0a29fb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -77,7 +77,8 @@ - [#7133](https://github.com/influxdata/telegraf/issues/7133): Fix log rotation to use actual file size instead of bytes written. - [#7103](https://github.com/influxdata/telegraf/pull/7103): Fix several issues with DatabaseIO query in sqlserver input. - [#7119](https://github.com/influxdata/telegraf/pull/7119): Fix internal metrics for output split into multiple lines. -- [#7021](https://github.com/influxdata/telegraf/pull/7021):Fix schedulers query compatibility with pre SQL-2016. +- [#7021](https://github.com/influxdata/telegraf/pull/7021): Fix schedulers query compatibility with pre SQL-2016. +- [#7182](https://github.com/influxdata/telegraf/pull/7182): Set headers on influxdb_listener ping URL. ## v1.13.4 [2020-02-25] From 064247a34560b950268f55b6ecf06429350b8f8a Mon Sep 17 00:00:00 2001 From: Steven Barth Date: Tue, 17 Mar 2020 20:56:51 +0100 Subject: [PATCH 1630/1815] Add support for GNMI DecimalVal type (#7173) --- plugins/inputs/cisco_telemetry_gnmi/README.md | 5 ++--- plugins/inputs/cisco_telemetry_gnmi/cisco_telemetry_gnmi.go | 3 ++- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/plugins/inputs/cisco_telemetry_gnmi/README.md b/plugins/inputs/cisco_telemetry_gnmi/README.md index d70d66157..0b003fdef 100644 --- a/plugins/inputs/cisco_telemetry_gnmi/README.md +++ b/plugins/inputs/cisco_telemetry_gnmi/README.md @@ -1,9 +1,8 @@ # Cisco GNMI Telemetry -Cisco GNMI Telemetry is an input plugin that consumes telemetry data similar to the [GNMI specification](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md). -This GRPC-based protocol can utilize TLS for authentication and encryption. +Cisco GNMI Telemetry is an input plugin that consumes telemetry data based on the [GNMI](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md) Subscribe method. TLS is supported for authentication and encryption. -This plugin has been developed to support GNMI telemetry as produced by Cisco IOS XR (64-bit) version 6.5.1 and later. +It has been optimized to support GNMI telemetry as produced by Cisco IOS XR (64-bit) version 6.5.1, Cisco NX-OS 9.3 and Cisco IOS XE 16.12 and later. ### Configuration diff --git a/plugins/inputs/cisco_telemetry_gnmi/cisco_telemetry_gnmi.go b/plugins/inputs/cisco_telemetry_gnmi/cisco_telemetry_gnmi.go index c8c50e368..562c5effa 100644 --- a/plugins/inputs/cisco_telemetry_gnmi/cisco_telemetry_gnmi.go +++ b/plugins/inputs/cisco_telemetry_gnmi/cisco_telemetry_gnmi.go @@ -7,6 +7,7 @@ import ( "encoding/json" "fmt" "io" + "math" "net" "path" "strings" @@ -332,7 +333,7 @@ func (c *CiscoTelemetryGNMI) handleTelemetryField(update *gnmi.Update, tags map[ case *gnmi.TypedValue_BytesVal: value = val.BytesVal case *gnmi.TypedValue_DecimalVal: - value = val.DecimalVal + value = float64(val.DecimalVal.Digits) / math.Pow(10, float64(val.DecimalVal.Precision)) case *gnmi.TypedValue_FloatVal: value = val.FloatVal case *gnmi.TypedValue_IntVal: From f6ea2598e55306a23d7dad3e333b9243fceed1e4 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 17 Mar 2020 12:58:20 -0700 Subject: [PATCH 1631/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 07a0a29fb..e4eaddf20 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -66,6 +66,7 @@ - [#7141](https://github.com/influxdata/telegraf/pull/7141): Add support for setting InfluxDB retention policy using tag. - [#7163](https://github.com/influxdata/telegraf/pull/7163): Add Database IO Tempdb per Azure DB to sqlserver input. - [#7150](https://github.com/influxdata/telegraf/pull/7150): Add option for explicitly including queries in sqlserver input. +- [#7173](https://github.com/influxdata/telegraf/pull/7173): Add support for GNMI DecimalVal type to cisco_telemetry_gnmi. #### Bugfixes From 00382052668c9f4b213f864f001b78a10e80ce48 Mon Sep 17 00:00:00 2001 From: igomura <52306882+igomura@users.noreply.github.com> Date: Tue, 17 Mar 2020 15:53:03 -0700 Subject: [PATCH 1632/1815] Add dedup processor (#6792) --- plugins/processors/all/all.go | 1 + plugins/processors/dedup/README.md | 17 +++ plugins/processors/dedup/dedup.go | 105 +++++++++++++++++ plugins/processors/dedup/dedup_test.go | 154 +++++++++++++++++++++++++ 4 files changed, 277 insertions(+) create mode 100644 plugins/processors/dedup/README.md create mode 100644 plugins/processors/dedup/dedup.go create mode 100644 plugins/processors/dedup/dedup_test.go diff --git a/plugins/processors/all/all.go b/plugins/processors/all/all.go index 98e9ccbfa..360b37dd0 100644 --- a/plugins/processors/all/all.go +++ b/plugins/processors/all/all.go @@ -4,6 +4,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/processors/clone" _ "github.com/influxdata/telegraf/plugins/processors/converter" _ "github.com/influxdata/telegraf/plugins/processors/date" + _ "github.com/influxdata/telegraf/plugins/processors/dedup" _ "github.com/influxdata/telegraf/plugins/processors/enum" _ "github.com/influxdata/telegraf/plugins/processors/override" _ "github.com/influxdata/telegraf/plugins/processors/parser" diff --git a/plugins/processors/dedup/README.md b/plugins/processors/dedup/README.md new file mode 100644 index 000000000..5e808bcd3 --- /dev/null +++ b/plugins/processors/dedup/README.md @@ -0,0 +1,17 @@ +# Dedup Processor Plugin + +If a metric sends the same value over successive intervals, suppress sending +the same value to the TSD until this many seconds have elapsed. This helps +graphs over narrow time ranges still see timeseries with suppressed datapoints. + +This feature can be used to reduce traffic when metric's value does not change over +time while maintain proper precision when value gets changed rapidly + +### Configuration + +```toml +[[processors.dedup]] + ## Maximum time to suppress output + dedup_interval = "600s" +``` + diff --git a/plugins/processors/dedup/dedup.go b/plugins/processors/dedup/dedup.go new file mode 100644 index 000000000..d3e04e070 --- /dev/null +++ b/plugins/processors/dedup/dedup.go @@ -0,0 +1,105 @@ +package dedup + +import ( + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/processors" +) + +var sampleConfig = ` + ## Maximum time to suppress output + dedup_interval = "600s" +` + +type Dedup struct { + DedupInterval internal.Duration `toml:"dedup_interval"` + FlushTime time.Time + Cache map[uint64]telegraf.Metric +} + +func (d *Dedup) SampleConfig() string { + return sampleConfig +} + +func (d *Dedup) Description() string { + return "Deduplicate repetitive metrics" +} + +// Remove single item from slice +func remove(slice []telegraf.Metric, i int) []telegraf.Metric { + slice[len(slice)-1], slice[i] = slice[i], slice[len(slice)-1] + return slice[:len(slice)-1] +} + +// Remove expired items from cache +func (d *Dedup) cleanup() { + // No need to cleanup cache too often. Lets save some CPU + if time.Since(d.FlushTime) < d.DedupInterval.Duration { + return + } + d.FlushTime = time.Now() + keep := make(map[uint64]telegraf.Metric, 0) + for id, metric := range d.Cache { + if time.Since(metric.Time()) < d.DedupInterval.Duration { + keep[id] = metric + } + } + d.Cache = keep +} + +// Save item to cache +func (d *Dedup) save(metric telegraf.Metric, id uint64) { + d.Cache[id] = metric.Copy() + d.Cache[id].Accept() +} + +// main processing method +func (d *Dedup) Apply(metrics ...telegraf.Metric) []telegraf.Metric { + for idx, metric := range metrics { + id := metric.HashID() + m, ok := d.Cache[id] + + // If not in cache then just save it + if !ok { + d.save(metric, id) + continue + } + + // If cache item has expired then refresh it + if time.Since(m.Time()) >= d.DedupInterval.Duration { + d.save(metric, id) + continue + } + + // For each filed compare value with the cached one + changed := false + for _, f := range metric.FieldList() { + if value, ok := m.GetField(f.Key); ok && value != f.Value { + changed = true + continue + } + } + // If any field value has changed then refresh the cache + if changed { + d.save(metric, id) + continue + } + + // In any other case remove metric from the output + metrics = remove(metrics, idx) + } + d.cleanup() + return metrics +} + +func init() { + processors.Add("dedup", func() telegraf.Processor { + return &Dedup{ + DedupInterval: internal.Duration{Duration: 10 * time.Minute}, + FlushTime: time.Now(), + Cache: make(map[uint64]telegraf.Metric), + } + }) +} diff --git a/plugins/processors/dedup/dedup_test.go b/plugins/processors/dedup/dedup_test.go new file mode 100644 index 000000000..20a94ed30 --- /dev/null +++ b/plugins/processors/dedup/dedup_test.go @@ -0,0 +1,154 @@ +package dedup + +import ( + "github.com/stretchr/testify/require" + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/metric" +) + +func createMetric(name string, value int64, when time.Time) telegraf.Metric { + m, _ := metric.New(name, + map[string]string{"tag": "tag_value"}, + map[string]interface{}{"value": value}, + when, + ) + return m +} + +func createDedup(initTime time.Time) Dedup { + return Dedup{ + DedupInterval: internal.Duration{Duration: 10 * time.Minute}, + FlushTime: initTime, + Cache: make(map[uint64]telegraf.Metric), + } +} + +func assertCacheRefresh(t *testing.T, proc *Dedup, item telegraf.Metric) { + id := item.HashID() + name := item.Name() + // cache is not empty + require.NotEqual(t, 0, len(proc.Cache)) + // cache has metric with proper id + cache, present := proc.Cache[id] + require.True(t, present) + // cache has metric with proper name + require.Equal(t, name, cache.Name()) + // cached metric has proper field + cValue, present := cache.GetField("value") + require.True(t, present) + iValue, _ := item.GetField("value") + require.Equal(t, cValue, iValue) + // cached metric has proper timestamp + require.Equal(t, cache.Time(), item.Time()) +} + +func assertCacheHit(t *testing.T, proc *Dedup, item telegraf.Metric) { + id := item.HashID() + name := item.Name() + // cache is not empty + require.NotEqual(t, 0, len(proc.Cache)) + // cache has metric with proper id + cache, present := proc.Cache[id] + require.True(t, present) + // cache has metric with proper name + require.Equal(t, name, cache.Name()) + // cached metric has proper field + cValue, present := cache.GetField("value") + require.True(t, present) + iValue, _ := item.GetField("value") + require.Equal(t, cValue, iValue) + // cached metric did NOT change timestamp + require.NotEqual(t, cache.Time(), item.Time()) +} + +func assertMetricPassed(t *testing.T, target []telegraf.Metric, source telegraf.Metric) { + // target is not empty + require.NotEqual(t, 0, len(target)) + // target has metric with proper name + require.Equal(t, "m1", target[0].Name()) + // target metric has proper field + tValue, present := target[0].GetField("value") + require.True(t, present) + sValue, present := source.GetField("value") + require.Equal(t, tValue, sValue) + // target metric has proper timestamp + require.Equal(t, target[0].Time(), source.Time()) +} + +func assertMetricSuppressed(t *testing.T, target []telegraf.Metric, source telegraf.Metric) { + // target is empty + require.Equal(t, 0, len(target)) +} + +func TestProcRetainsMetric(t *testing.T) { + deduplicate := createDedup(time.Now()) + source := createMetric("m1", 1, time.Now()) + target := deduplicate.Apply(source) + + assertCacheRefresh(t, &deduplicate, source) + assertMetricPassed(t, target, source) +} + +func TestSuppressRepeatedValue(t *testing.T) { + deduplicate := createDedup(time.Now()) + // Create metric in the past + source := createMetric("m1", 1, time.Now().Add(-1*time.Second)) + target := deduplicate.Apply(source) + source = createMetric("m1", 1, time.Now()) + target = deduplicate.Apply(source) + + assertCacheHit(t, &deduplicate, source) + assertMetricSuppressed(t, target, source) +} + +func TestPassUpdatedValue(t *testing.T) { + deduplicate := createDedup(time.Now()) + // Create metric in the past + source := createMetric("m1", 1, time.Now().Add(-1*time.Second)) + target := deduplicate.Apply(source) + source = createMetric("m1", 2, time.Now()) + target = deduplicate.Apply(source) + + assertCacheRefresh(t, &deduplicate, source) + assertMetricPassed(t, target, source) +} + +func TestPassAfterCacheExpire(t *testing.T) { + deduplicate := createDedup(time.Now()) + // Create metric in the past + source := createMetric("m1", 1, time.Now().Add(-1*time.Hour)) + target := deduplicate.Apply(source) + source = createMetric("m1", 1, time.Now()) + target = deduplicate.Apply(source) + + assertCacheRefresh(t, &deduplicate, source) + assertMetricPassed(t, target, source) +} + +func TestCacheRetainsMetrics(t *testing.T) { + deduplicate := createDedup(time.Now()) + // Create metric in the past 3sec + source := createMetric("m1", 1, time.Now().Add(-3*time.Hour)) + deduplicate.Apply(source) + // Create metric in the past 2sec + source = createMetric("m1", 1, time.Now().Add(-2*time.Hour)) + deduplicate.Apply(source) + source = createMetric("m1", 1, time.Now()) + deduplicate.Apply(source) + + assertCacheRefresh(t, &deduplicate, source) +} + +func TestCacheShrink(t *testing.T) { + // Time offset is more than 2 * DedupInterval + deduplicate := createDedup(time.Now().Add(-2 * time.Hour)) + // Time offset is more than 1 * DedupInterval + source := createMetric("m1", 1, time.Now().Add(-1*time.Hour)) + deduplicate.Apply(source) + + require.Equal(t, 0, len(deduplicate.Cache)) +} From 78fb879aa74637c1d6d000f96e43740b32d6b4f8 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 17 Mar 2020 18:39:08 -0700 Subject: [PATCH 1633/1815] Update readme and changelog --- CHANGELOG.md | 1 + README.md | 1 + plugins/processors/dedup/README.md | 19 +++++++++++++------ plugins/processors/dedup/dedup.go | 4 ++-- 4 files changed, 17 insertions(+), 8 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e4eaddf20..5b7828bc4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -22,6 +22,7 @@ #### New Processors +- [dedup](/plugins/processors/dedup/README.md) - Contributed by @igomura - [template](/plugins/processors/template/README.md) - Contributed by @RobMalvern - [s2_geo](/plugins/processors/s2_geo/README.md) - Contributed by @alespour diff --git a/README.md b/README.md index cb0c2169b..d1c43ca47 100644 --- a/README.md +++ b/README.md @@ -360,6 +360,7 @@ For documentation on the latest development code see the [documentation index][d * [clone](/plugins/processors/clone) * [converter](/plugins/processors/converter) * [date](/plugins/processors/date) +* [dedup](/plugins/processors/dedup) * [enum](/plugins/processors/enum) * [override](/plugins/processors/override) * [parser](/plugins/processors/parser) diff --git a/plugins/processors/dedup/README.md b/plugins/processors/dedup/README.md index 5e808bcd3..d0b516c27 100644 --- a/plugins/processors/dedup/README.md +++ b/plugins/processors/dedup/README.md @@ -1,11 +1,6 @@ # Dedup Processor Plugin -If a metric sends the same value over successive intervals, suppress sending -the same value to the TSD until this many seconds have elapsed. This helps -graphs over narrow time ranges still see timeseries with suppressed datapoints. - -This feature can be used to reduce traffic when metric's value does not change over -time while maintain proper precision when value gets changed rapidly +Filter metrics whose field values are exact repetitions of the previous values. ### Configuration @@ -15,3 +10,15 @@ time while maintain proper precision when value gets changed rapidly dedup_interval = "600s" ``` +### Example + +```diff +- cpu,cpu=cpu0 time_idle=42i,time_guest=1i +- cpu,cpu=cpu0 time_idle=42i,time_guest=2i +- cpu,cpu=cpu0 time_idle=42i,time_guest=2i +- cpu,cpu=cpu0 time_idle=44i,time_guest=2i +- cpu,cpu=cpu0 time_idle=44i,time_guest=2i ++ cpu,cpu=cpu0 time_idle=42i,time_guest=1i ++ cpu,cpu=cpu0 time_idle=42i,time_guest=2i ++ cpu,cpu=cpu0 time_idle=44i,time_guest=2i +``` diff --git a/plugins/processors/dedup/dedup.go b/plugins/processors/dedup/dedup.go index d3e04e070..9c737da15 100644 --- a/plugins/processors/dedup/dedup.go +++ b/plugins/processors/dedup/dedup.go @@ -24,7 +24,7 @@ func (d *Dedup) SampleConfig() string { } func (d *Dedup) Description() string { - return "Deduplicate repetitive metrics" + return "Drop metrics with repeating field values" } // Remove single item from slice @@ -73,7 +73,7 @@ func (d *Dedup) Apply(metrics ...telegraf.Metric) []telegraf.Metric { continue } - // For each filed compare value with the cached one + // For each field compare value with the cached one changed := false for _, f := range metric.FieldList() { if value, ok := m.GetField(f.Key); ok && value != f.Value { From 56a7ff574c064b30b3127fbdce5e7dc8896cd89f Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 17 Mar 2020 18:46:52 -0700 Subject: [PATCH 1634/1815] Adjust dedup processor description --- plugins/processors/dedup/dedup.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/processors/dedup/dedup.go b/plugins/processors/dedup/dedup.go index 9c737da15..c0d40f434 100644 --- a/plugins/processors/dedup/dedup.go +++ b/plugins/processors/dedup/dedup.go @@ -24,7 +24,7 @@ func (d *Dedup) SampleConfig() string { } func (d *Dedup) Description() string { - return "Drop metrics with repeating field values" + return "Filter metrics with repeating field values" } // Remove single item from slice From cc92d4aba7825f764f9657adb204e27d37d3663e Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 18 Mar 2020 12:12:24 -0700 Subject: [PATCH 1635/1815] Add sFlow input plugin (#7188) --- plugins/inputs/EXAMPLE_README.md | 2 +- plugins/inputs/all/all.go | 1 + plugins/inputs/sflow/README.md | 92 ++ plugins/inputs/sflow/decoder.go | 306 ++++++ plugins/inputs/sflow/decoder/directives.go | 402 ++++++++ .../inputs/sflow/decoder/directives_test.go | 582 +++++++++++ plugins/inputs/sflow/decoder/funcs.go | 216 ++++ plugins/inputs/sflow/decoder/ops.go | 490 +++++++++ plugins/inputs/sflow/decoder/ops_test.go | 383 +++++++ plugins/inputs/sflow/decoder_test.go | 975 ++++++++++++++++++ plugins/inputs/sflow/sflow.go | 154 +++ plugins/inputs/sflow/sflow_test.go | 135 +++ 12 files changed, 3737 insertions(+), 1 deletion(-) create mode 100644 plugins/inputs/sflow/README.md create mode 100644 plugins/inputs/sflow/decoder.go create mode 100644 plugins/inputs/sflow/decoder/directives.go create mode 100644 plugins/inputs/sflow/decoder/directives_test.go create mode 100644 plugins/inputs/sflow/decoder/funcs.go create mode 100644 plugins/inputs/sflow/decoder/ops.go create mode 100644 plugins/inputs/sflow/decoder/ops_test.go create mode 100644 plugins/inputs/sflow/decoder_test.go create mode 100644 plugins/inputs/sflow/sflow.go create mode 100644 plugins/inputs/sflow/sflow_test.go diff --git a/plugins/inputs/EXAMPLE_README.md b/plugins/inputs/EXAMPLE_README.md index ffe1be7cc..6b86615b0 100644 --- a/plugins/inputs/EXAMPLE_README.md +++ b/plugins/inputs/EXAMPLE_README.md @@ -4,7 +4,7 @@ The `example` plugin gathers metrics about example things. This description explains at a high level what the plugin does and provides links to where additional information can be found. -Telegraf minimum version: Telegraf x.x +Telegraf minimum version: Telegraf x.x Plugin minimum tested version: x.x ### Configuration diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index 3e1f959fa..33038ab72 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -140,6 +140,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/riak" _ "github.com/influxdata/telegraf/plugins/inputs/salesforce" _ "github.com/influxdata/telegraf/plugins/inputs/sensors" + _ "github.com/influxdata/telegraf/plugins/inputs/sflow" _ "github.com/influxdata/telegraf/plugins/inputs/smart" _ "github.com/influxdata/telegraf/plugins/inputs/snmp" _ "github.com/influxdata/telegraf/plugins/inputs/snmp_legacy" diff --git a/plugins/inputs/sflow/README.md b/plugins/inputs/sflow/README.md new file mode 100644 index 000000000..07cd2024b --- /dev/null +++ b/plugins/inputs/sflow/README.md @@ -0,0 +1,92 @@ +# SFlow Input Plugin + +The SFlow Input Plugin provides support for acting as an SFlow V5 collector in +accordance with the specification from [sflow.org](https://sflow.org/). + +Currently only Flow Samples of Ethernet / IPv4 & IPv4 TCP & UDP headers are +turned into metrics. Counters and other header samples are ignored. + +### Configuration + +```toml +[[inputs.sflow]] + ## Address to listen for sFlow packets. + ## example: service_address = "udp://:6343" + ## service_address = "udp4://:6343" + ## service_address = "udp6://:6343" + service_address = "udp://:6343" + + ## Set the size of the operating system's receive buffer. + ## example: read_buffer_size = "64KiB" + # read_buffer_size = "" +``` + +### Metrics + +- sflow + - tags: + - agent_address (IP address of the agent that obtained the sflow sample and sent it to this collector) + - source_id_type(source_id_type field of flow_sample or flow_sample_expanded structures) + - source_id_index(source_id_index field of flow_sample or flow_sample_expanded structures) + - input_ifindex (value (input) field of flow_sample or flow_sample_expanded structures) + - output_ifindex (value (output) field of flow_sample or flow_sample_expanded structures) + - sample_direction (source_id_index, netif_index_in and netif_index_out) + - header_protocol (header_protocol field of sampled_header structures) + - ether_type (eth_type field of an ETHERNET-ISO88023 header) + - src_ip (source_ipaddr field of IPv4 or IPv6 structures) + - src_port (src_port field of TCP or UDP structures) + - src_port_name (src_port) + - src_mac (source_mac_addr field of an ETHERNET-ISO88023 header) + - src_vlan (src_vlan field of extended_switch structure) + - src_priority (src_priority field of extended_switch structure) + - src_mask_len (src_mask_len field of extended_router structure) + - dst_ip (destination_ipaddr field of IPv4 or IPv6 structures) + - dst_port (dst_port field of TCP or UDP structures) + - dst_port_name (dst_port) + - dst_mac (destination_mac_addr field of an ETHERNET-ISO88023 header) + - dst_vlan (dst_vlan field of extended_switch structure) + - dst_priority (dst_priority field of extended_switch structure) + - dst_mask_len (dst_mask_len field of extended_router structure) + - next_hop (next_hop field of extended_router structure) + - ip_version (ip_ver field of IPv4 or IPv6 structures) + - ip_protocol (ip_protocol field of IPv4 or IPv6 structures) + - ip_dscp (ip_dscp field of IPv4 or IPv6 structures) + - ip_ecn (ecn field of IPv4 or IPv6 structures) + - tcp_urgent_pointer (urgent_pointer field of TCP structure) + - fields: + - bytes (integer, the product of frame_length and packets) + - drops (integer, drops field of flow_sample or flow_sample_expanded structures) + - packets (integer, sampling_rate field of flow_sample or flow_sample_expanded structures) + - frame_length (integer, frame_length field of sampled_header structures) + - header_size (integer, header_size field of sampled_header structures) + - ip_fragment_offset (integer, ip_ver field of IPv4 structures) + - ip_header_length (integer, ip_ver field of IPv4 structures) + - ip_total_length (integer, ip_total_len field of IPv4 structures) + - ip_ttl (integer, ip_ttl field of IPv4 structures or ip_hop_limit field IPv6 structures) + - tcp_header_length (integer, size field of TCP structure. This value is specified in 32-bit words. It must be multiplied by 4 to produce a value in bytes.) + - tcp_window_size (integer, window_size field of TCP structure) + - udp_length (integer, length field of UDP structures) + - ip_flags (integer, ip_ver field of IPv4 structures) + - tcp_flags (integer, TCP flags of TCP IP header (IPv4 or IPv6)) + +### Troubleshooting + +The [sflowtool][] utility can be used to print sFlow packets, and compared +against the metrics produced by Telegraf. +``` +sflowtool -p 6343 +``` + +If opening an issue, in addition to the output of sflowtool it will also be +helpful to collect a packet capture. Adjust the interface, host and port as +needed: +``` +$ sudo tcpdump -s 0 -i eth0 -w telegraf-sflow.pcap host 127.0.0.1 and port 6343 +``` + +[sflowtool]: https://github.com/sflow/sflowtool + +### Example Output +``` +sflow,agent_address=0.0.0.0,dst_ip=10.0.0.2,dst_mac=ff:ff:ff:ff:ff:ff,dst_port=40042,ether_type=IPv4,header_protocol=ETHERNET-ISO88023,input_ifindex=6,ip_dscp=27,ip_ecn=0,output_ifindex=1073741823,source_id_index=3,source_id_type=0,src_ip=10.0.0.1,src_mac=ff:ff:ff:ff:ff:ff,src_port=443 bytes=1570i,drops=0i,frame_length=157i,header_length=128i,ip_flags=2i,ip_fragment_offset=0i,ip_total_length=139i,ip_ttl=42i,sampling_rate=10i,tcp_header_length=0i,tcp_urgent_pointer=0i,tcp_window_size=14i 1584473704793580447 +``` diff --git a/plugins/inputs/sflow/decoder.go b/plugins/inputs/sflow/decoder.go new file mode 100644 index 000000000..51a534881 --- /dev/null +++ b/plugins/inputs/sflow/decoder.go @@ -0,0 +1,306 @@ +package sflow + +import ( + "fmt" + "math" + "net" + + "github.com/influxdata/telegraf/plugins/inputs/sflow/decoder" +) + +const ( + addressTypeIPv4 = uint32(1) // line: 1383 + addressTypeIPv6 = uint32(2) // line: 1384 + + sampleTypeFlowSample = uint32(1) // line: 1614 + sampleTypeFlowSampleExpanded = uint32(3) // line: 1698 + + flowDataRawPacketHeaderFormat = uint32(1) // line: 1938 + + headerProtocolEthernetIso88023 = uint32(1) // line: 1920 + + ipProtocolTCP = byte(6) + ipProtocolUDP = byte(17) + + metricName = "sflow" +) + +var headerProtocolMap = map[uint32]string{ + headerProtocolEthernetIso88023: "ETHERNET-ISO88023", // line: 1920 +} + +var etypeMap = map[uint16]string{ + 0x0800: "IPv4", + 0x86DD: "IPv6", +} + +func bytesToIPStr(b []byte) string { + return net.IP(b).String() +} + +func bytesToMACStr(b []byte) string { + return fmt.Sprintf("%02x:%02x:%02x:%02x:%02x:%02x", b[0], b[1], b[2], b[3], b[4], b[5]) +} + +var ipvMap = map[uint32]string{ + 1: "IPV4", // line: 1383 + 2: "IPV6", // line: 1384 +} + +// V5FormatOptions captures configuration for controlling the processing of an SFlow V5 packet. +type V5FormatOptions struct { + MaxFlowsPerSample uint32 + MaxSamplesPerPacket uint32 + MaxFlowHeaderLength uint32 + MaxSampleLength uint32 +} + +// NewDefaultV5FormatOptions answers a new V5FormatOptions with default values initialised +func NewDefaultV5FormatOptions() V5FormatOptions { + return V5FormatOptions{ + MaxFlowsPerSample: math.MaxUint32, + MaxSamplesPerPacket: math.MaxUint32, + MaxFlowHeaderLength: math.MaxUint32, + MaxSampleLength: math.MaxUint32, + } +} + +// V5Format answers and decoder.Directive capable of decoding sFlow v5 packets in accordance +// with SFlow v5 specification at https://sflow.org/sflow_version_5.txt +func V5Format(options V5FormatOptions) decoder.Directive { + return decoder.Seq( // line: 1823 + decoder.U32().Do(decoder.U32Assert(func(v uint32) bool { return v == 5 }, "Version %d not supported, only version 5")), + decoder.U32().Switch( // agent_address line: 1787 + decoder.Case(addressTypeIPv4, decoder.Bytes(4).Do(decoder.BytesToStr(4, bytesToIPStr).AsT("agent_address"))), // line: 1390 + decoder.Case(addressTypeIPv6, decoder.Bytes(16).Do(decoder.BytesToStr(16, bytesToIPStr).AsT("agent_address"))), // line: 1393 + ), + decoder.U32(), // sub_agent_id line: 1790 + decoder.U32(), // sequence_number line: 1801 + decoder.U32(), // uptime line: 1804 + decoder.U32().Iter(options.MaxSamplesPerPacket, sampleRecord(options)), // samples line: 1812 + ) +} + +func sampleRecord(options V5FormatOptions) decoder.Directive { + var sampleType interface{} + return decoder.Seq( // line: 1760 + decoder.U32().Ref(&sampleType), // sample_type line: 1761 + decoder.U32().Encapsulated(options.MaxSampleLength, // sample_data line: 1762 + decoder.Ref(sampleType).Switch( + decoder.Case(sampleTypeFlowSample, flowSample(sampleType, options)), // line: 1614 + decoder.Case(sampleTypeFlowSampleExpanded, flowSampleExpanded(sampleType, options)), // line: 1698 + decoder.DefaultCase(nil), // this allows other cases to just be ignored rather than cause an error + ), + ), + ) +} + +func flowSample(sampleType interface{}, options V5FormatOptions) decoder.Directive { + var samplingRate = new(uint32) + var sourceIDIndex = new(uint32) + return decoder.Seq( // line: 1616 + decoder.U32(), // sequence_number line: 1617 + decoder.U32(). // source_id line: 1622 + Do(decoder.U32ToU32(func(v uint32) uint32 { return v >> 24 }).AsT("source_id_type")). // source_id_type Line 1465 + Do(decoder.U32ToU32(func(v uint32) uint32 { return v & 0x00ffffff }).Set(sourceIDIndex).AsT("source_id_index")), // line: 1468 + decoder.U32().Do(decoder.Set(samplingRate).AsF("sampling_rate")), // line: 1631 + decoder.U32(), // samplePool: Line 1632 + decoder.U32().Do(decoder.AsF("drops")), // Line 1636 + decoder.U32(). // line: 1651 + Do(decoder.U32ToU32(func(v uint32) uint32 { return v & 0x3fffffff }).AsT("input_ifindex")). // line: 1477 + Do(decoder.U32ToU32(func(v uint32) uint32 { return v & 0x3fffffff }). + ToString(func(v uint32) string { + if v == *sourceIDIndex { + return "ingress" + } + return "" + }). + BreakIf(""). + AsT("sample_direction")), + decoder.U32(). // line: 1652 + Do(decoder.U32ToU32(func(v uint32) uint32 { return v & 0x3fffffff }).AsT("output_ifindex")). // line: 1477 + Do(decoder.U32ToU32(func(v uint32) uint32 { return v & 0x3fffffff }). + ToString(func(v uint32) string { + if v == *sourceIDIndex { + return "egress" + } + return "" + }). + BreakIf(""). + AsT("sample_direction")), + decoder.U32().Iter(options.MaxFlowsPerSample, flowRecord(samplingRate, options)), // line: 1654 + ) +} + +func flowSampleExpanded(sampleType interface{}, options V5FormatOptions) decoder.Directive { + var samplingRate = new(uint32) + var sourceIDIndex = new(uint32) + return decoder.Seq( // line: 1700 + decoder.U32(), // sequence_number line: 1701 + decoder.U32().Do(decoder.AsT("source_id_type")), // line: 1706 + 16878 + decoder.U32().Do(decoder.Set(sourceIDIndex).AsT("source_id_index")), // line 1689 + decoder.U32().Do(decoder.Set(samplingRate).AsF("sampling_rate")), // sample_rate line: 1707 + decoder.U32(), // saple_pool line: 1708 + decoder.U32().Do(decoder.AsF("drops")), // line: 1712 + decoder.U32(), // inputt line: 1727 + decoder.U32(). // input line: 1727 + Do(decoder.AsT("input_ifindex")). // line: 1728 + Do(decoder.U32ToStr(func(v uint32) string { + if v == *sourceIDIndex { + return "ingress" + } + return "" + }). + BreakIf(""). + AsT("sample_direction")), + decoder.U32(), // output line: 1728 + decoder.U32(). // outpuit line: 1728 + Do(decoder.AsT("output_ifindex")). // line: 1729 CHANFE AS FOR NON EXPANDED + Do(decoder.U32ToStr(func(v uint32) string { + if v == *sourceIDIndex { + return "egress" + } + return "" + }). + BreakIf(""). + AsT("sample_direction")), + decoder.U32().Iter(options.MaxFlowsPerSample, flowRecord(samplingRate, options)), // line: 1730 + ) +} + +func flowRecord(samplingRate *uint32, options V5FormatOptions) decoder.Directive { + var flowFormat interface{} + return decoder.Seq( // line: 1597 + decoder.U32().Ref(&flowFormat), // line 1598 + decoder.U32().Encapsulated(options.MaxFlowHeaderLength, // line 1599 + decoder.Ref(flowFormat).Switch( + decoder.Case(flowDataRawPacketHeaderFormat, rawPacketHeaderFlowData(samplingRate, options)), // line: 1938 + decoder.DefaultCase(nil), + ), + ), + ) +} + +func rawPacketHeaderFlowData(samplingRate *uint32, options V5FormatOptions) decoder.Directive { + var protocol interface{} + var headerLength interface{} + return decoder.Seq( // line: 1940 + decoder.U32().Ref(&protocol).Do(decoder.MapU32ToStr(headerProtocolMap).AsT("header_protocol")), // line: 1941 + decoder.U32(). // line: 1942 + Do(decoder.AsF("frame_length")). + Do(decoder.U32ToU32(func(in uint32) uint32 { + return in * (*samplingRate) + }).AsF("bytes")), + decoder.U32(), // stripped line: 1967 + decoder.U32().Ref(&headerLength).Do(decoder.AsF("header_length")), + decoder.Ref(headerLength).Encapsulated(options.MaxFlowHeaderLength, + decoder.Ref(protocol).Switch( + decoder.Case(headerProtocolEthernetIso88023, ethHeader(options)), + decoder.DefaultCase(nil), + )), + ) +} + +// ethHeader answers a decode Directive that will decode an ethernet frame header +// according to https://en.wikipedia.org/wiki/Ethernet_frame +func ethHeader(options V5FormatOptions) decoder.Directive { + var tagOrEType interface{} + etype := new(uint16) + return decoder.Seq( + decoder.OpenMetric(metricName), + decoder.Bytes(6).Do(decoder.BytesToStr(6, bytesToMACStr).AsT("dst_mac")), + decoder.Bytes(6).Do(decoder.BytesToStr(6, bytesToMACStr).AsT("src_mac")), + decoder.U16().Ref(&tagOrEType).Switch( + decoder.Case(uint16(0x8100), + decoder.Seq( + decoder.U16(), + decoder.U16().Do(decoder.Set(etype)), // just follows on from vlan id + ), + ), + decoder.DefaultCase( // Not an 802.1Q VLAN Tag, just treat as an ether type + decoder.Ref(tagOrEType).Do(decoder.Set(etype)), + ), + ), + decoder.U16Value(etype).Do(decoder.MapU16ToStr(etypeMap).AsT("ether_type")), + decoder.U16Value(etype).Switch( + decoder.Case(uint16(0x0800), ipv4Header(options)), + decoder.Case(uint16(0x86DD), ipv6Header(options)), + decoder.DefaultCase(nil), + ), + decoder.CloseMetric(), + ) + +} + +// ipv4Header answers a decode Directive that decode an IPv4 header according to +// https://en.wikipedia.org/wiki/IPv4 +func ipv4Header(options V5FormatOptions) decoder.Directive { + var proto interface{} + return decoder.Seq( + decoder.U16(). + Do(decoder.U16ToU16(func(in uint16) uint16 { return (in & 0xFC) >> 2 }).AsT("ip_dscp")). + Do(decoder.U16ToU16(func(in uint16) uint16 { return in & 0x3 }).AsT("ip_ecn")), + decoder.U16().Do(decoder.AsF("ip_total_length")), + decoder.U16(), + decoder.U16(). + Do(decoder.U16ToU16(func(v uint16) uint16 { return (v & 0xE000) >> 13 }).AsF("ip_flags")). + Do(decoder.U16ToU16(func(v uint16) uint16 { return v & 0x1FFF }).AsF("ip_fragment_offset")), + decoder.Bytes(1).Do(decoder.BytesTo(1, func(b []byte) interface{} { return uint8(b[0]) }).AsF("ip_ttl")), + decoder.Bytes(1).Ref(&proto), + decoder.U16(), + decoder.Bytes(4).Do(decoder.BytesToStr(4, bytesToIPStr).AsT("src_ip")), + decoder.Bytes(4).Do(decoder.BytesToStr(4, bytesToIPStr).AsT("dst_ip")), + decoder.Ref(proto).Switch( // Does not consider IHL and Options + decoder.Case(ipProtocolTCP, tcpHeader(options)), + decoder.Case(ipProtocolUDP, udpHeader(options)), + decoder.DefaultCase(nil), + ), + ) +} + +// ipv6Header answers a decode Directive that decode an IPv6 header according to +// https://en.wikipedia.org/wiki/IPv6_packet +func ipv6Header(options V5FormatOptions) decoder.Directive { + nextHeader := new(uint16) + return decoder.Seq( + decoder.U32(). + Do(decoder.U32ToU32(func(in uint32) uint32 { return (in & 0xFC00000) >> 22 }).AsF("ip_dscp")). + Do(decoder.U32ToU32(func(in uint32) uint32 { return (in & 0x300000) >> 20 }).AsF("ip_ecn")), + decoder.U16(), + decoder.U16(). + Do(decoder.U16ToU16(func(in uint16) uint16 { return (in & 0xFF00) >> 8 }).Set(nextHeader)), + decoder.Bytes(16).Do(decoder.BytesToStr(16, bytesToIPStr).AsT("src_ip")), + decoder.Bytes(16).Do(decoder.BytesToStr(16, bytesToIPStr).AsT("dst_ip")), + decoder.U16Value(nextHeader).Switch( + decoder.Case(uint16(ipProtocolTCP), tcpHeader(options)), + decoder.Case(uint16(ipProtocolUDP), udpHeader(options)), + decoder.DefaultCase(nil), + ), + ) +} + +func tcpHeader(options V5FormatOptions) decoder.Directive { + return decoder.Seq( + decoder.U16(). + Do(decoder.AsT("src_port")), + decoder.U16(). + Do(decoder.AsT("dst_port")), + decoder.U32(), //"sequence"), + decoder.U32(), //"ack_number"), + decoder.Bytes(2). + Do(decoder.BytesToU32(2, func(b []byte) uint32 { return uint32((b[0] & 0xF0) * 4) }).AsF("tcp_header_length")), + decoder.U16().Do(decoder.AsF("tcp_window_size")), + decoder.U16(), // "checksum"), + decoder.U16().Do(decoder.AsF("tcp_urgent_pointer")), + ) +} + +func udpHeader(options V5FormatOptions) decoder.Directive { + return decoder.Seq( + decoder.U16(). + Do(decoder.AsT("src_port")), + decoder.U16(). + Do(decoder.AsT("dst_port")), + decoder.U16().Do(decoder.AsF("udp_length")), + ) +} diff --git a/plugins/inputs/sflow/decoder/directives.go b/plugins/inputs/sflow/decoder/directives.go new file mode 100644 index 000000000..9b20e1c33 --- /dev/null +++ b/plugins/inputs/sflow/decoder/directives.go @@ -0,0 +1,402 @@ +package decoder + +import ( + "bytes" + "encoding/binary" + "fmt" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" +) + +// Directive is a Decode Directive, the basic building block of a decoder +type Directive interface { + + // Execute performs the function of the decode directive. If DecodeContext is nil then the + // ask is to check that a subsequent execution (with non nill DecodeContext) is expted to work. + Execute(*bytes.Buffer, *DecodeContext) error +} + +type IterOption struct { + EOFTerminateIter bool + RemainingToGreaterEqualOrTerminate uint32 +} + +// ValueDirective is a decode directive that extracts some data from the packet, an integer or byte maybe, +// which it then processes by using it, for example, as the counter for the number of iterations to perform +// of downstream decode directives. +// +// A ValueDirective can be used to either Switch, Iter(ate), Encapsulate or Do mutually exclusively. +type ValueDirective interface { + Directive + + // Switch attaches a set of conditional decode directives downstream of this decode directive + Switch(paths ...CaseValueDirective) ValueDirective + + // Iter attaches a single downstream decode directive that will be executed repeatedly according to the iteration count + Iter(maxIterations uint32, dd Directive, iterOptions ...IterOption) ValueDirective + + // Encapsulated will form a new buffer of the encapsulated length and pass that buffer on to the downsstream decode directive + Encapsulated(maxSize uint32, dd Directive) ValueDirective + + // Ref records this decode directive in the passed reference + Ref(*interface{}) ValueDirective + + // Do attaches a Decode Operation - these are uses of the decoded information to perform work on, transform, write out etc. + Do(ddo DirectiveOp) ValueDirective +} + +type valueDirective struct { + reference *valueDirective + + value interface{} + noDecode bool + + cases []CaseValueDirective + iter Directive + maxIterations uint32 + encapsulated Directive + maxEncapsulation uint32 + ops []DirectiveOp + err error + + iterOption IterOption +} + +func valueToString(in interface{}) string { + switch v := in.(type) { + case *uint16: + return fmt.Sprintf("%d", *v) + case uint16: + return fmt.Sprintf("%d", v) + case *uint32: + return fmt.Sprintf("%d", *v) + case uint32: + return fmt.Sprintf("%d", v) + default: + return fmt.Sprintf("%v", in) + } +} + +func (dd *valueDirective) Execute(buffer *bytes.Buffer, dc *DecodeContext) error { + if dd.reference == nil && !dd.noDecode { + if e := binary.Read(buffer, binary.BigEndian, dd.value); e != nil { + return e + } + } + + // Switch downstream? + if dd.cases != nil && len(dd.cases) > 0 { + for _, c := range dd.cases { + if c.Equals(dd.value) { + return c.Execute(buffer, dc) + } + } + switch v := dd.value.(type) { + case *uint32: + return fmt.Errorf("(%T).Switch,unmatched case %d", v, *v) + case *uint16: + return fmt.Errorf("(%T).Switch,unmatched case %d", v, *v) + default: + return fmt.Errorf("(%T).Switch,unmatched case %v", dd.value, dd.value) + } + } + + // Iter downstream? + if dd.iter != nil { + fn := func(id interface{}) error { + if dd.iterOption.RemainingToGreaterEqualOrTerminate > 0 && uint32(buffer.Len()) < dd.iterOption.RemainingToGreaterEqualOrTerminate { + return nil + } + if dd.iterOption.EOFTerminateIter && buffer.Len() == 0 { + return nil + } + if e := dd.iter.Execute(buffer, dc); e != nil { + return e + } + return nil + } + switch v := dd.value.(type) { + case *uint32: + if *v > dd.maxIterations { + return fmt.Errorf("iter exceeds configured max - value %d, limit %d", *v, dd.maxIterations) + } + for i := uint32(0); i < *v; i++ { + if e := fn(i); e != nil { + return e + } + } + case *uint16: + if *v > uint16(dd.maxIterations) { + return fmt.Errorf("iter exceeds configured max - value %d, limit %d", *v, dd.maxIterations) + } + for i := uint16(0); i < *v; i++ { + if e := fn(i); e != nil { + return e + } + } + default: + // Can't actually get here if .Iter method check types (and it does) + return fmt.Errorf("(%T).Iter, cannot iterator over this type", dd.value) + } + } + + // Encapsualted downstream> + if dd.encapsulated != nil { + switch v := dd.value.(type) { + case *uint32: + if *v > dd.maxEncapsulation { + return fmt.Errorf("encap exceeds configured max - value %d, limit %d", *v, dd.maxEncapsulation) + } + return dd.encapsulated.Execute(bytes.NewBuffer(buffer.Next(int(*v))), dc) + case *uint16: + if *v > uint16(dd.maxEncapsulation) { + return fmt.Errorf("encap exceeds configured max - value %d, limit %d", *v, dd.maxEncapsulation) + } + return dd.encapsulated.Execute(bytes.NewBuffer(buffer.Next(int(*v))), dc) + } + } + + // Perform the attached operations + for _, op := range dd.ops { + if err := op.process(dc, dd.value); err != nil { + return err + } + } + + return nil +} + +// panickIfNotBlackCanvas checks the state of this value directive to see if it is has +// alrady been configured in a manner inconsistent with another configuration change +func (dd *valueDirective) panickIfNotBlackCanvas(change string, checkDOs bool) { + if dd.cases != nil { + panic(fmt.Sprintf("already have switch cases assigned, cannot assign %s", change)) + } + if dd.iter != nil { + panic(fmt.Sprintf("already have iter assigned, cannot assign %s", change)) + } + if dd.encapsulated != nil { + panic(fmt.Sprintf("already have encap assigned, cannot assign %s @", change)) + } + if checkDOs && dd.ops != nil && len(dd.ops) > 0 { + panic(fmt.Sprintf("already have do assigned, cannot assign %s", change)) + } +} + +func (dd *valueDirective) Switch(paths ...CaseValueDirective) ValueDirective { + dd.panickIfNotBlackCanvas("new switch", true) + dd.cases = paths + return dd +} + +func (dd *valueDirective) Iter(maxIterations uint32, iter Directive, iterOptions ...IterOption) ValueDirective { + dd.panickIfNotBlackCanvas("new iter", true) + switch dd.value.(type) { + case *uint32: + case *uint16: + default: + panic(fmt.Sprintf("cannot iterate a %T", dd.value)) + } + + dd.iter = iter + dd.maxIterations = maxIterations + for _, io := range iterOptions { + dd.iterOption = io + } + return dd +} + +func (dd *valueDirective) Encapsulated(maxSize uint32, encapsulated Directive) ValueDirective { + dd.panickIfNotBlackCanvas("new encapsulated", true) + switch dd.value.(type) { + case *uint32: + case *uint16: + default: + panic(fmt.Sprintf("cannot encapsulated on a %T", dd.value)) + } + + dd.encapsulated = encapsulated + dd.maxEncapsulation = maxSize + return dd +} + +func (dd *valueDirective) Do(ddo DirectiveOp) ValueDirective { + dd.panickIfNotBlackCanvas("new do", false) + for { + if ddo.prev() == nil { + break + } + ddo = ddo.prev() + } + if err := ddo.process(nil, dd.value); err != nil { + panic(fmt.Sprintf("directive operation %T cannot process %T - %s", ddo, dd.value, err)) + } + if dd.ops == nil { + dd.ops = make([]DirectiveOp, 0, 5) + } + dd.ops = append(dd.ops, ddo) + + return dd +} + +func (dd *valueDirective) Ref(ref *interface{}) ValueDirective { + if *ref != nil { + panic("ref already assigned, not overwritting") + } + *ref = dd + return dd +} + +// errorDirective a decode directive that reports an error +type errorDirective struct { + Directive +} + +func (dd *errorDirective) Execute(buffer *bytes.Buffer, dc *DecodeContext) error { + return fmt.Errorf("Error Directive") +} + +// CaseValueDirective is a decode directive that also has a switch/case test +type CaseValueDirective interface { + Directive + Equals(interface{}) bool +} + +type caseValueDirective struct { + caseValue interface{} + isDefault bool + equalsDd Directive +} + +func (dd *caseValueDirective) Execute(buffer *bytes.Buffer, dc *DecodeContext) error { + if dd.equalsDd == nil { + return nil + } + return dd.equalsDd.Execute(buffer, dc) +} + +func (dd *caseValueDirective) Equals(value interface{}) bool { + if dd.isDefault { + return true + } + switch ourV := dd.caseValue.(type) { + case uint32: + ov, ok := value.(*uint32) + if ok { + return ourV == *ov + } + case uint16: + ov, ok := value.(*uint16) + if ok { + return ourV == *ov + } + case byte: + ov, ok := value.([]byte) + if ok { + if len(ov) == 1 { + return ourV == ov[0] + } + } + } + return false +} + +// sequenceDirective is a decode directive that is a simple sequentially executed list of other decode directives +type sequenceDirective struct { + decoders []Directive +} + +func (di *sequenceDirective) Execute(buffer *bytes.Buffer, dc *DecodeContext) error { + for _, innerDD := range di.decoders { + if err := innerDD.Execute(buffer, dc); err != nil { + return err + } + } + return nil +} + +// openMetric a decode directive that opens the recording of new fields and tags +type openMetric struct { + name string +} + +func (di *openMetric) Execute(buffer *bytes.Buffer, dc *DecodeContext) error { + dc.openMetric(di.name) + return nil +} + +// closeMetric a decode directive that closes the current open metric +type closeMetric struct { +} + +func (di *closeMetric) Execute(buffer *bytes.Buffer, dc *DecodeContext) error { + dc.closeMetric() + return nil +} + +// DecodeContext provides context for the decoding of a packet and primarily acts +// as a repository for metrics that are collected during the packet decode process +type DecodeContext struct { + metrics []telegraf.Metric + timeHasBeenSet bool + + // oreMetric is used to capture tags or fields that may be recored before a metric has been openned + // these fields and tags are then copied into metrics that are then subsequently opened + preMetric telegraf.Metric + current telegraf.Metric + nano int +} + +func (dc *DecodeContext) openMetric(name string) { + t := dc.preMetric.Time() + if !dc.timeHasBeenSet { + t = time.Now().Add(time.Duration(dc.nano)) + } + m, _ := metric.New(name, make(map[string]string), make(map[string]interface{}), t) + dc.nano++ + // make sure to copy any fields and tags that were capture prior to the metric being openned + for t, v := range dc.preMetric.Tags() { + m.AddTag(t, v) + } + for f, v := range dc.preMetric.Fields() { + m.AddField(f, v) + } + dc.current = m +} + +func (dc *DecodeContext) closeMetric() { + if dc.current != nil { + dc.metrics = append(dc.metrics, dc.current) + } + dc.current = nil +} + +func (dc *DecodeContext) currentMetric() telegraf.Metric { + if dc.current == nil { + return dc.preMetric + } + return dc.current +} + +// Decode initiates the decoding of the supplied buffer according to the root decode directive that is provided +func (dc *DecodeContext) Decode(dd Directive, buffer *bytes.Buffer) error { + return dd.Execute(buffer, dc) +} + +// GetMetrics answers the metrics that have been collected during the packet decode +func (dc *DecodeContext) GetMetrics() []telegraf.Metric { + return dc.metrics +} + +type notifyDirective struct { + fn func() +} + +func (nd *notifyDirective) Execute(_ *bytes.Buffer, dc *DecodeContext) error { + if dc != nil { + nd.fn() + } + return nil +} diff --git a/plugins/inputs/sflow/decoder/directives_test.go b/plugins/inputs/sflow/decoder/directives_test.go new file mode 100644 index 000000000..0a8d99e7a --- /dev/null +++ b/plugins/inputs/sflow/decoder/directives_test.go @@ -0,0 +1,582 @@ +package decoder + +import ( + "bytes" + "encoding/binary" + "fmt" + "math" + "testing" + + "github.com/influxdata/telegraf" + "github.com/stretchr/testify/require" +) + +// Execute will ececute the decode directive relative to the supplied buffer +func Execute(dd Directive, buffer *bytes.Buffer) error { + dc := &DecodeContext{} + return dd.Execute(buffer, dc) +} + +func Test_basicUI32NotEnoughBytes(t *testing.T) { + dd := U32() + value := uint16(1001) // not enough bytes to read a U32 out as only a U16 in + var buffer bytes.Buffer + require.NoError(t, binary.Write(&buffer, binary.BigEndian, &value)) + require.Error(t, Execute(dd, &buffer)) +} + +func Test_basicUI32(t *testing.T) { + dd := U32() + value := uint32(1001) + var buffer bytes.Buffer + require.NoError(t, binary.Write(&buffer, binary.BigEndian, &value)) + require.NoError(t, Execute(dd, &buffer)) + require.Equal(t, 0, buffer.Len()) + x, _ := dd.(*valueDirective) + require.Equal(t, &value, x.value) +} + +func Test_basicBytes(t *testing.T) { + dd := Bytes(4) + value := []byte{0x01, 0x02, 0x03, 0x04} + var buffer bytes.Buffer + require.NoError(t, binary.Write(&buffer, binary.BigEndian, &value)) + require.NoError(t, Execute(dd, &buffer)) + require.Equal(t, 0, buffer.Len()) + x, _ := dd.(*valueDirective) + require.Equal(t, value, x.value) +} + +func Test_basicSeq(t *testing.T) { + + // Seq with no members compiles and executed but buffer is left untouched + dd := Seq() + value := uint32(1001) + var buffer bytes.Buffer + require.NoError(t, binary.Write(&buffer, binary.BigEndian, &value)) + originalLen := buffer.Len() + require.NoError(t, Execute(dd, &buffer)) + require.Equal(t, originalLen, buffer.Len()) + + u := U32() + dd = Seq( + u, + ) + value = uint32(1001) + buffer.Reset() + require.NoError(t, binary.Write(&buffer, binary.BigEndian, &value)) + require.NoError(t, Execute(dd, &buffer)) + require.Equal(t, 0, buffer.Len()) + x, _ := u.(*valueDirective) + require.Equal(t, &value, x.value) +} + +func Test_basicSeqOf(t *testing.T) { + // SeqOf with no members compiles and executed but buffer is left untouched + dd := SeqOf([]Directive{}) + value := uint32(1001) + var buffer bytes.Buffer + require.NoError(t, binary.Write(&buffer, binary.BigEndian, &value)) + originalLen := buffer.Len() + require.NoError(t, Execute(dd, &buffer)) + require.Equal(t, originalLen, buffer.Len()) + + u := U32() + dd = SeqOf( + []Directive{u}, + ) + value = uint32(1001) + buffer.Reset() + require.NoError(t, binary.Write(&buffer, binary.BigEndian, &value)) + require.NoError(t, Execute(dd, &buffer)) + require.Equal(t, 0, buffer.Len()) + x, _ := u.(*valueDirective) + require.Equal(t, &value, x.value) +} + +func Test_errorInSeq(t *testing.T) { + // Seq with no members compiles and executed but buffer is left untouched + dd := Seq(U32(), ErrorDirective()) + value := uint32(1001) + var buffer bytes.Buffer + require.NoError(t, binary.Write(&buffer, binary.BigEndian, &value)) + require.Error(t, Execute(dd, &buffer)) +} + +func Test_basicU32Switch(t *testing.T) { + c1 := U32() + c2 := U32() + dd := U32().Switch( + Case(uint32(1), c1), + Case(uint32(2), c2), + ) + + value1 := uint32(3) + var buffer bytes.Buffer + require.NoError(t, binary.Write(&buffer, binary.BigEndian, &value1)) + value2 := uint32(4) + require.NoError(t, binary.Write(&buffer, binary.BigEndian, &value2)) + require.Error(t, Execute(dd, &buffer)) // should error as no path + + value1 = uint32(1) + buffer.Reset() + require.NoError(t, binary.Write(&buffer, binary.BigEndian, &value1)) + require.NoError(t, binary.Write(&buffer, binary.BigEndian, &value2)) + require.NoError(t, Execute(dd, &buffer)) + x, _ := c1.(*valueDirective) + y, _ := c2.(*valueDirective) + value0 := uint32(0) + require.Equal(t, &value2, x.value) + require.Equal(t, &value0, y.value) + + // bad path shoudl raise error + // path 1 should be able to fina value in c1 and not in c2 + // then other way around +} + +func Test_basicBinSwitch(t *testing.T) { + c1 := U32() + c2 := U32() + dd := Bytes(1).Switch( + Case(byte(1), c1), + Case(byte(2), c2), + ) + + value1 := byte(3) + var buffer bytes.Buffer + require.NoError(t, binary.Write(&buffer, binary.BigEndian, &value1)) + value2 := uint32(4) + require.NoError(t, binary.Write(&buffer, binary.BigEndian, &value2)) + require.Error(t, Execute(dd, &buffer)) // should error as no path + + value1 = byte(1) + buffer.Reset() + require.NoError(t, binary.Write(&buffer, binary.BigEndian, &value1)) + require.NoError(t, binary.Write(&buffer, binary.BigEndian, &value2)) + require.NoError(t, Execute(dd, &buffer)) + x, _ := c1.(*valueDirective) + y, _ := c2.(*valueDirective) + value0 := uint32(0) + require.Equal(t, &value2, x.value) + require.Equal(t, &value0, y.value) + + // bad path shoudl raise error + // path 1 should be able to fina value in c1 and not in c2 + // then other way around +} + +func Test_basicIter(t *testing.T) { + innerDD := U32() + dd := U32().Iter(math.MaxInt32, innerDD) + + var buffer bytes.Buffer + iterations := uint32(2) + require.NoError(t, binary.Write(&buffer, binary.BigEndian, &iterations)) + it1Val := uint32(3) + require.NoError(t, binary.Write(&buffer, binary.BigEndian, &it1Val)) + it2Val := uint32(4) + require.NoError(t, binary.Write(&buffer, binary.BigEndian, &it2Val)) + require.NoError(t, Execute(dd, &buffer)) + x, _ := dd.(*valueDirective) + require.Equal(t, &iterations, x.value) + y, _ := innerDD.(*valueDirective) + // we can't test it1Val as it gets overwritten! + require.Equal(t, &it2Val, y.value) +} + +func Test_IterLimit(t *testing.T) { + innerDD := U32() + dd := U32().Iter(1, innerDD) // limit set at 1 + var buffer bytes.Buffer + iterations := uint32(2) + require.NoError(t, binary.Write(&buffer, binary.BigEndian, &iterations)) + it1Val := uint32(3) + require.NoError(t, binary.Write(&buffer, binary.BigEndian, &it1Val)) + it2Val := uint32(4) + require.NoError(t, binary.Write(&buffer, binary.BigEndian, &it2Val)) + require.Error(t, Execute(dd, &buffer)) +} + +func Test_errorWithinIter(t *testing.T) { + dd := U32().Iter(math.MaxInt32, ErrorDirective()) + + var buffer bytes.Buffer + iterations := uint32(1) + require.NoError(t, binary.Write(&buffer, binary.BigEndian, &iterations)) + + require.Error(t, Execute(dd, &buffer)) +} + +func Test_errorWithinIter2(t *testing.T) { + dd := U32().Iter(math.MaxInt32, U32().Do(ErrorOp(false))) + var buffer bytes.Buffer + iterations := uint32(1) + require.NoError(t, binary.Write(&buffer, binary.BigEndian, &iterations)) + innerValue := uint32(1) + require.NoError(t, binary.Write(&buffer, binary.BigEndian, &innerValue)) + require.Error(t, Execute(dd, &buffer)) +} + +func Test_errorWithinIter3(t *testing.T) { + defer expectPanic(t, "Test_cantIterBytes") + U32().Iter(math.MaxInt32, U32().Do(ErrorOp(true))) +} + +func Test_alreadyEncapsulated(t *testing.T) { + defer expectPanic(t, "Test_cantIterBytes") + u := U32() + inner := U32() + u.Encapsulated(math.MaxInt32, inner) + u.Encapsulated(math.MaxInt32, inner) +} + +func Test_alreadyDoAssigned(t *testing.T) { + defer expectPanic(t, "Test_cantIterBytes") + u := U32() + u.Do(AsF("foo")) + inner := U32() + u.Encapsulated(math.MaxInt32, inner) +} + +func Test_cantIterBytes(t *testing.T) { + defer expectPanic(t, "Test_cantIterBytes") + _ = Bytes(1).Iter(math.MaxInt32, U32()) +} + +// then open metric +func Test_OpenMetric(t *testing.T) { + innerDD := U32() + dd := U32().Iter(math.MaxInt32, Seq( + OpenMetric(""), + innerDD, + CloseMetric(), + )) + + var buffer bytes.Buffer + iterations := uint32(2) + require.NoError(t, binary.Write(&buffer, binary.BigEndian, &iterations)) + it1Val := uint32(3) + require.NoError(t, binary.Write(&buffer, binary.BigEndian, &it1Val)) + it2Val := uint32(3) + require.NoError(t, binary.Write(&buffer, binary.BigEndian, &it2Val)) + dc := NewDecodeContext() + require.NoError(t, dc.Decode(dd, &buffer)) + require.Equal(t, 2, len(dc.GetMetrics())) +} + +func Test_AsF(t *testing.T) { + innerDD := U32().Do(AsF("foo")) + dd := U32().Iter(math.MaxInt32, Seq( + OpenMetric(""), + innerDD, + CloseMetric(), + )) + + var buffer bytes.Buffer + iterations := uint32(2) + require.NoError(t, binary.Write(&buffer, binary.BigEndian, &iterations)) + it1Val := uint32(3) + require.NoError(t, binary.Write(&buffer, binary.BigEndian, &it1Val)) + it2Val := uint32(3) + require.NoError(t, binary.Write(&buffer, binary.BigEndian, &it2Val)) + dc := NewDecodeContext() + require.NoError(t, dc.Decode(dd, &buffer)) + require.Equal(t, 2, len(dc.GetMetrics())) + m := dc.GetMetrics() + require.Equal(t, uint64(it1Val), getField(m[0], "foo")) + require.Equal(t, uint64(it2Val), getField(m[1], "foo")) +} + +func Test_AsT(t *testing.T) { + innerDD := U32().Do(AsT("foo")) + dd := U32().Iter(math.MaxInt32, Seq( + OpenMetric(""), + innerDD, + CloseMetric(), + )) + + var buffer bytes.Buffer + iterations := uint32(2) + require.NoError(t, binary.Write(&buffer, binary.BigEndian, &iterations)) + it1Val := uint32(3) + require.NoError(t, binary.Write(&buffer, binary.BigEndian, &it1Val)) + it2Val := uint32(3) + require.NoError(t, binary.Write(&buffer, binary.BigEndian, &it2Val)) + dc := NewDecodeContext() + require.NoError(t, dc.Decode(dd, &buffer)) + require.Equal(t, 2, len(dc.GetMetrics())) + m := dc.GetMetrics() + require.Equal(t, fmt.Sprintf("%d", it1Val), getTag(m[0], "foo")) + require.Equal(t, fmt.Sprintf("%d", it2Val), getTag(m[1], "foo")) +} + +func getField(m telegraf.Metric, name string) interface{} { + v, _ := m.GetField(name) + return v +} + +func getTag(m telegraf.Metric, name string) string { + v, _ := m.GetTag(name) + return v +} + +func Test_preMetricNesting(t *testing.T) { + innerDD := U32().Do(AsF("foo")) + dd := Seq( + U32().Do(AsF("bar")), + U32().Do(AsT("baz")), + U32().Iter(math.MaxInt32, + Seq( + OpenMetric(""), + innerDD, + CloseMetric(), + ), + ), + ) + + var buffer bytes.Buffer + barVal := uint32(55) + require.NoError(t, binary.Write(&buffer, binary.BigEndian, &barVal)) + bazVal := uint32(56) + require.NoError(t, binary.Write(&buffer, binary.BigEndian, &bazVal)) + iterations := uint32(2) + require.NoError(t, binary.Write(&buffer, binary.BigEndian, &iterations)) + it1Val := uint32(3) + require.NoError(t, binary.Write(&buffer, binary.BigEndian, &it1Val)) + it2Val := uint32(3) + require.NoError(t, binary.Write(&buffer, binary.BigEndian, &it2Val)) + dc := NewDecodeContext() + require.NoError(t, dc.Decode(dd, &buffer)) + require.Equal(t, 2, len(dc.GetMetrics())) + m := dc.GetMetrics() + require.Equal(t, uint64(barVal), getField(m[0], "bar")) + require.Equal(t, fmt.Sprintf("%d", bazVal), getTag(m[0], "baz")) + require.Equal(t, uint64(it1Val), getField(m[0], "foo")) + require.Equal(t, uint64(barVal), getField(m[1], "bar")) + require.Equal(t, fmt.Sprintf("%d", bazVal), getTag(m[1], "baz")) + require.Equal(t, uint64(it2Val), getField(m[1], "foo")) +} + +func Test_BasicEncapsulated(t *testing.T) { + + encap1Value := uint32(2) + encap2Value := uint32(3) + var encapBuffer bytes.Buffer + require.NoError(t, binary.Write(&encapBuffer, binary.BigEndian, &encap1Value)) + require.NoError(t, binary.Write(&encapBuffer, binary.BigEndian, &encap2Value)) + + encapSize := uint32(encapBuffer.Len()) + envelopeValue := uint32(4) + var envelopeBuffer bytes.Buffer + + require.NoError(t, binary.Write(&envelopeBuffer, binary.BigEndian, &encapSize)) + l, e := envelopeBuffer.Write(encapBuffer.Bytes()) + require.NoError(t, e) + require.Equal(t, encapSize, uint32(l)) + require.NoError(t, binary.Write(&envelopeBuffer, binary.BigEndian, &envelopeValue)) + + innerDD := U32() + envelopeDD := U32() // the buffer contains another U32 but the encpaultation will ignore it + dd := Seq( + U32().Encapsulated(math.MaxInt32, innerDD), + envelopeDD, + ) + require.NoError(t, Execute(dd, &envelopeBuffer)) + + require.Equal(t, 0, envelopeBuffer.Len()) + x, _ := envelopeDD.(*valueDirective) + require.Equal(t, &envelopeValue, x.value) + y, _ := innerDD.(*valueDirective) + require.Equal(t, &encap1Value, y.value) +} + +func Test_EncapsulationLimit(t *testing.T) { + + encap1Value := uint32(2) + encap2Value := uint32(3) + var encapBuffer bytes.Buffer + require.NoError(t, binary.Write(&encapBuffer, binary.BigEndian, &encap1Value)) + require.NoError(t, binary.Write(&encapBuffer, binary.BigEndian, &encap2Value)) + + encapSize := uint32(encapBuffer.Len()) + envelopeValue := uint32(4) + var envelopeBuffer bytes.Buffer + + require.NoError(t, binary.Write(&envelopeBuffer, binary.BigEndian, &encapSize)) + l, e := envelopeBuffer.Write(encapBuffer.Bytes()) + require.NoError(t, e) + require.Equal(t, encapSize, uint32(l)) + require.NoError(t, binary.Write(&envelopeBuffer, binary.BigEndian, &envelopeValue)) + + innerDD := U32() + envelopeDD := U32() + dd := Seq( + U32().Encapsulated(4, innerDD), // 4 bytes, not 8 bytes or higher as max + envelopeDD, + ) + require.Error(t, Execute(dd, &envelopeBuffer)) +} + +func Test_cantEncapulatedBytes(t *testing.T) { + defer expectPanic(t, "cantEncapulatedBytes") + _ = Bytes(1).Encapsulated(math.MaxInt32, U32()) +} + +func Test_BasicRef(t *testing.T) { + var x interface{} + dd1 := U32().Ref(&x) + dd2 := Ref(x) + dd := Seq( + dd1, + dd2, + ) + y, ok := dd2.(*valueDirective) + require.True(t, ok) + require.Equal(t, y.reference, x) + + value := uint32(1001) + var buffer bytes.Buffer + require.NoError(t, binary.Write(&buffer, binary.BigEndian, &value)) + require.NoError(t, Execute(dd, &buffer)) + + y, _ = dd1.(*valueDirective) + require.Equal(t, &value, y.value) + + y, _ = dd2.(*valueDirective) + require.Equal(t, &value, y.value) +} + +func Test_RefReassignError(t *testing.T) { + defer expectPanic(t, "iter iter") + var x interface{} + U32().Ref(&x) + U32().Ref(&x) +} + +func Test_ToU32(t *testing.T) { + u := U32().Do(U32ToU32(func(in uint32) uint32 { return in >> 2 }).AsF("x")) + dd := Seq(OpenMetric(""), u, CloseMetric()) + + value := uint32(1001) + var buffer bytes.Buffer + require.NoError(t, binary.Write(&buffer, binary.BigEndian, &value)) + + dc := NewDecodeContext() + require.NoError(t, dc.Decode(dd, &buffer)) + + // require original value decoded + x, _ := u.(*valueDirective) + require.Equal(t, &value, x.value) + + // require field ejected + require.Equal(t, 1, len(dc.GetMetrics())) + m := dc.GetMetrics() + require.Equal(t, uint64(value>>2), getField(m[0], "x")) +} + +func expectPanic(t *testing.T, msg string) { + if r := recover(); r == nil { + t.Errorf(msg) + } +} + +func Test_U32BlankCanvasIter(t *testing.T) { + u := U32().Iter(math.MaxInt32, U32()) + func() { + defer expectPanic(t, "iter iter") + u.Iter(math.MaxInt32, U32()) + }() + func() { + defer expectPanic(t, "iter switch") + u.Switch(Case(uint32(0), U32())) + }() + func() { + defer expectPanic(t, "iter encap") + u.Encapsulated(math.MaxInt32, U32()) + }() + func() { + defer expectPanic(t, "iter do") + u.Do(AsF("foo")) + }() +} +func Test_U32BlankCanvasSwitch(t *testing.T) { + u := U32().Switch(Case(uint32(0), U32())) + func() { + defer expectPanic(t, "switch iter") + u.Iter(math.MaxInt32, U32()) + }() + func() { + defer expectPanic(t, "switch switch") + u.Switch(Case(uint32(0), U32())) + }() + func() { + defer expectPanic(t, "switch encap") + u.Encapsulated(math.MaxInt32, U32()) + }() + func() { + defer expectPanic(t, "switch do") + u.Do(AsF("foo")) + }() +} + +func Test_U32BasicSwitch(t *testing.T) { + s := U32().Switch(Case(uint32(0), nil)) + value := uint32(0) + var buffer bytes.Buffer + require.NoError(t, binary.Write(&buffer, binary.BigEndian, &value)) + dc := NewDecodeContext() + require.NoError(t, dc.Decode(s, &buffer)) +} + +func Test_U32BasicSwitchDefault(t *testing.T) { + s := U32().Switch(Case(uint32(0), nil), DefaultCase(nil)) + value := uint32(2) + var buffer bytes.Buffer + require.NoError(t, binary.Write(&buffer, binary.BigEndian, &value)) + dc := NewDecodeContext() + require.NoError(t, dc.Decode(s, &buffer)) +} + +func Test_U16(t *testing.T) { + dd := U16() + value := uint16(1001) + var buffer bytes.Buffer + require.NoError(t, binary.Write(&buffer, binary.BigEndian, &value)) + require.NoError(t, Execute(dd, &buffer)) + require.Equal(t, 0, buffer.Len()) + x, _ := dd.(*valueDirective) + require.Equal(t, &value, x.value) +} + +func Test_U16Value(t *testing.T) { + myU16 := uint16(5) + dd := U16Value(&myU16) + var buffer bytes.Buffer + require.NoError(t, Execute(dd, &buffer)) + x, _ := dd.(*valueDirective) + require.Equal(t, &myU16, x.value) +} + +func Test_Bytes(t *testing.T) { + dd := Bytes(4) + value := []byte{0x01, 0x02, 0x03, 0x04} + var buffer bytes.Buffer + require.NoError(t, binary.Write(&buffer, binary.BigEndian, &value)) + require.NoError(t, Execute(dd, &buffer)) + require.Equal(t, 0, buffer.Len()) + x, _ := dd.(*valueDirective) + require.Equal(t, value, x.value) +} + +func Test_nilRefAnfWongTypeRef(t *testing.T) { + func() { + defer expectPanic(t, "Test_nilRef") + Ref(nil) + }() + + func() { + defer expectPanic(t, "Test_nilRef") + f := new(uint32) + Ref(f) + }() +} diff --git a/plugins/inputs/sflow/decoder/funcs.go b/plugins/inputs/sflow/decoder/funcs.go new file mode 100644 index 000000000..c90e1488f --- /dev/null +++ b/plugins/inputs/sflow/decoder/funcs.go @@ -0,0 +1,216 @@ +package decoder + +import ( + "fmt" + "time" + + "github.com/influxdata/telegraf/metric" +) + +// U32 answers a directive for 32bit Unsigned Integers +func U32() ValueDirective { + return &valueDirective{value: new(uint32)} +} + +// U64 answers a directive for 64bit Unsigned Integers +func U64() ValueDirective { + return &valueDirective{value: new(uint64)} +} + +// U8 answers a directive for 8bit Unsigned Integers +func U8() ValueDirective { + return &valueDirective{value: new(uint8)} +} + +// U16 answers a directive for 32bit Unsigned Integers +func U16() ValueDirective { + return &valueDirective{value: new(uint16)} +} + +// U16Value answers a directive that doesn't actually decode itself but reused a value previously decoded of type uint16 +func U16Value(value *uint16) ValueDirective { + return &valueDirective{value: value, noDecode: true} +} + +// Bytes answers a value directive that will decode the specified number (len) of bytes from the packet +func Bytes(len int) ValueDirective { + return &valueDirective{value: make([]byte, len)} +} + +// Case answers a directive to be used within a Switch clause of a U32 directive +func Case(caseValue interface{}, dd Directive) CaseValueDirective { + return &caseValueDirective{caseValue: caseValue, isDefault: false, equalsDd: dd} +} + +// DefaultCase answers a case decoder directive that can be used as the default, catch all, of a Switch +func DefaultCase(dd Directive) CaseValueDirective { + return &caseValueDirective{caseValue: nil, isDefault: true, equalsDd: dd} +} + +// Ref answers a decoder that reuses, through referal, an existing U32 directive +func Ref(target interface{}) ValueDirective { + if target == nil { + panic("Ref given a nil reference") + } + r, ok := target.(*valueDirective) + if !ok { + panic(fmt.Sprintf("Ref not given a ValueDirective reference but a %T", target)) + } + return &valueDirective{reference: r, value: r.value} +} + +// Seq ansers a directive that sequentially executes a list of provided directives +func Seq(decoders ...Directive) Directive { + return &sequenceDirective{decoders: decoders} +} + +func SeqOf(decoders []Directive) Directive { + return &sequenceDirective{decoders: decoders} +} + +// OpenMetric answers a directive that opens a new metrics for collecting tags and fields +func OpenMetric(name string) Directive { + return &openMetric{name: name} +} + +// CloseMetric answers a directive that close the current metrics +func CloseMetric() Directive { + return &closeMetric{} +} + +// NewDecodeContext ansewers a new Decode Contect to support the process of decoding +func NewDecodeContext() *DecodeContext { + m, _ := metric.New("sflow", make(map[string]string), make(map[string]interface{}), time.Now()) + return &DecodeContext{preMetric: m} +} + +// U32ToU32 answers a decode operation that transforms a uint32 to a uint32 via the supplied fn +func U32ToU32(fn func(uint32) uint32) *U32ToU32DOp { + result := &U32ToU32DOp{fn: fn, baseDOp: baseDOp{}} + result.do = result + return result +} + +// U32ToStr answers a decode operation that transforms a uint32 to a string via the supplied fn +func U32ToStr(fn func(uint32) string) *U32ToStrDOp { + result := &U32ToStrDOp{baseDOp: baseDOp{}, fn: fn} + result.do = result + return result +} + +// U16ToStr answers a decode operation that transforms a uint16 to a string via the supplied fn +func U16ToStr(fn func(uint16) string) *U16ToStrDOp { + result := &U16ToStrDOp{baseDOp: baseDOp{}, fn: fn} + result.do = result + return result +} + +// U16ToU16 answers a decode operation that transforms a uint16 to a uint16 via the supplied fn +func U16ToU16(fn func(uint16) uint16) *U16ToU16DOp { + result := &U16ToU16DOp{baseDOp: baseDOp{}, fn: fn} + result.do = result + return result +} + +// AsF answers a decode operation that will output a field into the open metric with the given name +func AsF(name string) *AsFDOp { + result := &AsFDOp{baseDOp: baseDOp{}, name: name} + result.do = result + return result +} + +// AsT answers a decode operation that will output a tag into the open metric with the given name +func AsT(name string) *AsTDOp { + result := &AsTDOp{name: name, baseDOp: baseDOp{}} + result.do = result + return result +} + +// AsTimestamp answers a decode operation that will set the tiemstamp on the metric +func AsTimestamp() *AsTimestampDOp { + result := &AsTimestampDOp{baseDOp: baseDOp{}} + result.do = result + return result +} + +// BytesToStr answers a decode operation that transforms a []bytes to a string via the supplied fn +func BytesToStr(len int, fn func([]byte) string) *BytesToStrDOp { + result := &BytesToStrDOp{baseDOp: baseDOp{}, len: len, fn: fn} + result.do = result + return result +} + +// BytesTo answers a decode operation that transforms a []bytes to a interface{} via the supplied fn +func BytesTo(len int, fn func([]byte) interface{}) *BytesToDOp { + result := &BytesToDOp{baseDOp: baseDOp{}, len: len, fn: fn} + result.do = result + return result +} + +// BytesToU32 answers a decode operation that transforms a []bytes to an uint32 via the supplied fn +func BytesToU32(len int, fn func([]byte) uint32) *BytesToU32DOp { + result := &BytesToU32DOp{baseDOp: baseDOp{}, len: len, fn: fn} + result.do = result + return result +} + +// MapU32ToStr answers a decode operation that maps an uint32 to a string via the supplied map +func MapU32ToStr(m map[uint32]string) *U32ToStrDOp { + result := &U32ToStrDOp{fn: func(in uint32) string { + return m[in] + }, baseDOp: baseDOp{}} + result.do = result + return result +} + +// U32Assert answers a decode operation that will assert the uint32 is a particulr value or generate an error +func U32Assert(fn func(v uint32) bool, fmtStr string) *U32AssertDOp { + result := &U32AssertDOp{baseDOp: baseDOp{}, fn: fn, fmtStr: fmtStr} + result.do = result + return result +} + +func U16Assert(fn func(v uint16) bool, fmtStr string) *U16AssertDOp { + result := &U16AssertDOp{baseDOp: baseDOp{}, fn: fn, fmtStr: fmtStr} + result.do = result + return result +} + +// MapU16ToStr answers a decode operation that maps an uint16 to a string via the supplied map +func MapU16ToStr(m map[uint16]string) *U16ToStrDOp { + result := &U16ToStrDOp{baseDOp: baseDOp{}, fn: func(in uint16) string { + return m[in] + }} + result.do = result + return result +} + +// Set answers a decode operation that will set the supplied *value to the value passed through the operation +func Set(ptr interface{}) *SetDOp { + result := &SetDOp{ptr: ptr, baseDOp: baseDOp{}} + result.do = result + return result +} + +// ErrorDirective answers a decode directive that will generate an error +func ErrorDirective() Directive { + return &errorDirective{} +} + +// ErrorOp answers a decode operation that will generate an error +func ErrorOp(errorOnTestProcess bool) *ErrorDOp { + result := &ErrorDOp{baseDOp: baseDOp{}, errorOnTestProcess: errorOnTestProcess} + result.do = result + return result + +} + +// Notify answers a decode directive that will notify the supplied function upon execution +func Notify(fn func()) Directive { + return ¬ifyDirective{fn} +} + +// Nop answer a decode directive that is the null, benign, deocder +func Nop() Directive { + return Notify(func() {}) +} diff --git a/plugins/inputs/sflow/decoder/ops.go b/plugins/inputs/sflow/decoder/ops.go new file mode 100644 index 000000000..2a1e0c72b --- /dev/null +++ b/plugins/inputs/sflow/decoder/ops.go @@ -0,0 +1,490 @@ +package decoder + +import ( + "fmt" + "time" + + "github.com/influxdata/telegraf" +) + +// DirectiveOp are operations that are performed on values that have been decoded. +// They are expected to be chained together, in a flow programming style, and the +// Decode Directive that they are assigned to then walks back up the linked list to find the root +// operation that will then be performed (passing the value down through various transformations) +type DirectiveOp interface { + prev() DirectiveOp + // process method can be executed in two contexts, one to check that the given type + // of upstream value can be processed (not to process it) and then to actually process + // the upstream value. The difference in reqwuired behaviour is signalled by the presence + // of the DecodeContect - if nil. just test, if !nil process + process(dc *DecodeContext, upstreamValue interface{}) error +} + +type baseDOp struct { + p DirectiveOp + do DirectiveOp + n DirectiveOp +} + +func (op *baseDOp) prev() DirectiveOp { + return op.p +} + +func (op *baseDOp) AsF(name string) DirectiveOp { + result := &AsFDOp{baseDOp: baseDOp{p: op.do}, name: name} + result.do = result + op.n = result + return result +} + +func (op *baseDOp) AsT(name string) DirectiveOp { + result := &AsTDOp{baseDOp: baseDOp{p: op.do}, name: name} + result.do = result + op.n = result + return result +} + +func (op *baseDOp) Set(ptr interface{}) *SetDOp { + result := &SetDOp{baseDOp: baseDOp{p: op.do}, ptr: ptr} + result.do = result + op.n = result + return result +} + +// U32ToU32DOp is a deode operation that can process U32 to U32 +type U32ToU32DOp struct { + baseDOp + fn func(uint32) uint32 +} + +func (op *U32ToU32DOp) process(dc *DecodeContext, upstreamValue interface{}) error { + var out uint32 + switch v := upstreamValue.(type) { + case *uint32: + if dc != nil { + out = op.fn(*v) + } + default: + return fmt.Errorf("cannot process %T", v) + } + + if dc != nil && op.n != nil { + return op.n.process(dc, out) + } + return nil +} + +// ToString answers a U32ToStr decode operation that will transform this output of thie U32ToU32 into a string +func (op *U32ToU32DOp) ToString(fn func(uint32) string) *U32ToStrDOp { + result := &U32ToStrDOp{baseDOp: baseDOp{p: op}, fn: fn} + result.do = result + op.n = result + return result +} + +// AsFDOp is a deode operation that writes fields to metrics +type AsFDOp struct { + baseDOp + name string +} + +func (op *AsFDOp) process(dc *DecodeContext, upstreamValue interface{}) error { + var m telegraf.Metric + if dc != nil { + m = dc.currentMetric() + } + switch v := upstreamValue.(type) { + case *uint64: + if dc != nil { + m.AddField(op.name, *v) + } + case *uint32: + if dc != nil { + m.AddField(op.name, *v) + } + case uint32: + if dc != nil { + m.AddField(op.name, v) + } + case *uint16: + if dc != nil { + m.AddField(op.name, *v) + } + case uint16: + if dc != nil { + m.AddField(op.name, v) + } + case *uint8: + if dc != nil { + m.AddField(op.name, *v) + } + case uint8: + if dc != nil { + m.AddField(op.name, v) + } + case string: + if dc != nil { + m.AddField(op.name, v) + } + default: + return fmt.Errorf("AsF cannot process %T", v) + } + return nil +} + +// AsTimestampDOp is a deode operation that sets the timestamp on the metric +type AsTimestampDOp struct { + baseDOp +} + +func (op *AsTimestampDOp) process(dc *DecodeContext, upstreamValue interface{}) error { + var m telegraf.Metric + if dc != nil { + m = dc.currentMetric() + } + switch v := upstreamValue.(type) { + case *uint32: + if dc != nil { + m.SetTime(time.Unix(int64(*v), 0)) + dc.timeHasBeenSet = true + } + default: + return fmt.Errorf("can't process %T", upstreamValue) + } + return nil +} + +// AsTDOp is a deode operation that writes tags to metrics +type AsTDOp struct { + baseDOp + name string + skipEmpty bool +} + +func (op *AsTDOp) process(dc *DecodeContext, upstreamValue interface{}) error { + var m telegraf.Metric + if dc != nil { + m = dc.currentMetric() + } + switch v := upstreamValue.(type) { + case *uint32: + if dc != nil { + m.AddTag(op.name, fmt.Sprintf("%d", *v)) + } + case uint32: + if dc != nil { + m.AddTag(op.name, fmt.Sprintf("%d", v)) + } + case *uint16: + if dc != nil { + m.AddTag(op.name, fmt.Sprintf("%d", *v)) + } + case uint16: + if dc != nil { + m.AddTag(op.name, fmt.Sprintf("%d", v)) + } + case *uint8: + if dc != nil { + m.AddTag(op.name, fmt.Sprintf("%d", *v)) + } + case uint8: + if dc != nil { + m.AddTag(op.name, fmt.Sprintf("%d", v)) + } + case string: + if dc != nil { + if !op.skipEmpty || v != "" { + m.AddTag(op.name, v) + } + } + default: + return fmt.Errorf("can't process %T", upstreamValue) + } + return nil +} + +func (op *AsTDOp) prev() DirectiveOp { + return op.p +} + +// BytesToStrDOp is a decode operation that transforms []bytes to strings +type BytesToStrDOp struct { + baseDOp + len int + fn func([]byte) string +} + +func (op *BytesToStrDOp) process(dc *DecodeContext, upstreamValue interface{}) error { + switch v := upstreamValue.(type) { + case []byte: + if len(v) == op.len { + if dc != nil { + out := op.fn(v) + if op.n != nil { + return op.n.process(dc, out) + } + } + } else { + return fmt.Errorf("cannot process len(%d) as requrire %d", len(v), op.len) + } + default: + return fmt.Errorf("cannot process %T", upstreamValue) + } + return nil +} + +// U32AssertDOp is a decode operation that asserts a particular uint32 value +type U32AssertDOp struct { + baseDOp + fn func(uint32) bool + fmtStr string +} + +func (op *U32AssertDOp) process(dc *DecodeContext, upstreamValue interface{}) error { + switch v := upstreamValue.(type) { + case *uint32: + if dc != nil && !op.fn(*v) { + return fmt.Errorf(op.fmtStr, *v) + } + default: + return fmt.Errorf("cannot process %T", upstreamValue) + } + return nil +} + +// U16AssertDOp is a decode operation that asserts a particular uint32 value +type U16AssertDOp struct { + baseDOp + fn func(uint16) bool + fmtStr string +} + +func (op *U16AssertDOp) process(dc *DecodeContext, upstreamValue interface{}) error { + switch v := upstreamValue.(type) { + case *uint16: + if dc != nil && !op.fn(*v) { + return fmt.Errorf(op.fmtStr, *v) + } + default: + return fmt.Errorf("cannot process %T", upstreamValue) + } + return nil +} + +// U32ToStrDOp is a decod eoperation that transforms a uint32 to a string +type U32ToStrDOp struct { + baseDOp + fn func(uint32) string +} + +func (op *U32ToStrDOp) process(dc *DecodeContext, upstreamValue interface{}) error { + switch v := upstreamValue.(type) { + case uint32: + if dc != nil && op.n != nil { + op.n.process(dc, (op.fn(v))) + } + case *uint32: + if dc != nil && op.n != nil { + return op.n.process(dc, (op.fn(*v))) + } + default: + return fmt.Errorf("cannot process %T", upstreamValue) + } + return nil +} + +// BreakIf answers a BreakIf operation that will break the current decode operation chain, without an error, if the value processed +// is the supplied value +func (op *U32ToStrDOp) BreakIf(value string) *BreakIfDOp { + result := &BreakIfDOp{baseDOp: baseDOp{p: op}, value: value} + result.do = result + op.n = result + return result +} + +// U16ToStrDOp is a decode operation that transforms a uint16 to a string +type U16ToStrDOp struct { + baseDOp + fn func(uint16) string +} + +func (op *U16ToStrDOp) process(dc *DecodeContext, upstreamValue interface{}) error { + switch v := upstreamValue.(type) { + case *uint16: + if dc != nil { + return op.n.process(dc, (op.fn(*v))) + } + default: + return fmt.Errorf("cannot process %T", upstreamValue) + } + return nil +} + +// BreakIfDOp is a decode operation that will break the current outer iteration +type BreakIfDOp struct { + baseDOp + value string +} + +func (op *BreakIfDOp) process(dc *DecodeContext, upstreamValue interface{}) error { + switch v := upstreamValue.(type) { + case string: + if dc != nil { + if v != op.value { + op.n.process(dc, v) + } + } + default: + return fmt.Errorf("cannot process %T", upstreamValue) + } + return nil +} + +// U16ToU16DOp is a decode operation that transfirms one uint16 to another uint16 +type U16ToU16DOp struct { + baseDOp + fn func(uint16) uint16 +} + +func (op *U16ToU16DOp) process(dc *DecodeContext, upstreamValue interface{}) error { + var out uint16 + var err error + switch v := upstreamValue.(type) { + case *uint16: + if dc != nil { + out = op.fn(*v) + } + default: + return fmt.Errorf("cannot process %T", upstreamValue) + } + if err != nil { + return err + } + if op.n != nil && dc != nil { + return op.n.process(dc, out) + } + return nil +} + +// BytesToU32DOp is a decode operation that transforms a []byte to a uint32 +type BytesToU32DOp struct { + baseDOp + len int + fn func([]byte) uint32 +} + +func (op *BytesToU32DOp) process(dc *DecodeContext, upstreamValue interface{}) error { + switch v := upstreamValue.(type) { + case []byte: + if len(v) == op.len { + out := op.fn(v) + if op.n != nil { + return op.n.process(dc, out) + } + } else { + return fmt.Errorf("cannot process %T as len(%d) != %d", upstreamValue, v, op.len) + } + default: + return fmt.Errorf("cannot process %T", upstreamValue) + } + return nil +} + +// SetDOp is a decode operation that will Set a pointer to a value to be the value processed +type SetDOp struct { + baseDOp + ptr interface{} +} + +func (op *SetDOp) process(dc *DecodeContext, upstreamValue interface{}) error { + switch v := upstreamValue.(type) { + case *uint32: + ptr, ok := op.ptr.(*uint32) + if ok { + if dc != nil { + *ptr = *v + } + } else { + return fmt.Errorf("cannot process as ptr %T and not *uint32", op.ptr) + } + case uint32: + ptr, ok := op.ptr.(*uint32) + if ok { + if dc != nil { + *ptr = v + } + } else { + return fmt.Errorf("cannot process as ptr %T and not *uint32", op.ptr) + } + case *uint16: + ptr, ok := op.ptr.(*uint16) + if ok { + if dc != nil { + *ptr = *v + } + } else { + return fmt.Errorf("cannot process as ptr %T and not *uint16", op.ptr) + } + case uint16: + ptr, ok := op.ptr.(*uint16) + if ok { + if dc != nil { + *ptr = v + } + } else { + return fmt.Errorf("cannot process as ptr %T and not *uint16", op.ptr) + } + case string: + ptr, ok := op.ptr.(*string) + if ok { + if dc != nil { + *ptr = v + } + } else { + return fmt.Errorf("cannot process as ptr %T and not *string", op.ptr) + } + default: + return fmt.Errorf("cannot process %T", upstreamValue) + } + if op.n != nil && dc != nil { + return op.n.process(dc, upstreamValue) + } + return nil +} + +// BytesToDOp is a decode operation that will transform []byte to interface{} according to a suppied function +type BytesToDOp struct { + baseDOp + len int + fn func([]byte) interface{} +} + +func (op *BytesToDOp) process(dc *DecodeContext, upstreamValue interface{}) error { + switch v := upstreamValue.(type) { + case []byte: + if len(v) == op.len { + if dc != nil { + out := op.fn(v) + return op.n.process(dc, out) + } + } else { + return fmt.Errorf("cannot process as len:%d required %d", len(v), op.len) + } + default: + return fmt.Errorf("cannot process %T", upstreamValue) + } + return nil +} + +// ErrorDOp is a decode operation that will generate an error +type ErrorDOp struct { + baseDOp + errorOnTestProcess bool +} + +func (op *ErrorDOp) process(dc *DecodeContext, upstreamValue interface{}) error { + if dc == nil && !op.errorOnTestProcess { + return nil + } + return fmt.Errorf("Error Op") +} diff --git a/plugins/inputs/sflow/decoder/ops_test.go b/plugins/inputs/sflow/decoder/ops_test.go new file mode 100644 index 000000000..2b626b55d --- /dev/null +++ b/plugins/inputs/sflow/decoder/ops_test.go @@ -0,0 +1,383 @@ +package decoder + +import ( + "bytes" + "encoding/binary" + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func Test_U64AsF(t *testing.T) { + dc := NewDecodeContext() + dc.openMetric("") + ddo := AsF("out") + in := uint64(5) + require.NoError(t, ddo.process(dc, &in)) + m := dc.currentMetric() + require.Equal(t, in, getField(m, "out")) +} + +func Test_U32AsF(t *testing.T) { + dc := NewDecodeContext() + dc.openMetric("") + ddo := AsF("out") + in := uint32(5) + require.NoError(t, ddo.process(dc, &in)) + m := dc.currentMetric() + require.Equal(t, uint64(in), getField(m, "out")) +} + +func Test_U16PtrAsF(t *testing.T) { + dc := NewDecodeContext() + dc.openMetric("") + ddo := AsF("out") + in := uint16(5) + require.NoError(t, ddo.process(dc, &in)) + m := dc.currentMetric() + require.Equal(t, uint64(in), getField(m, "out")) +} + +func Test_U16AsF(t *testing.T) { + dc := NewDecodeContext() + dc.openMetric("") + ddo := AsF("out") + in := uint16(5) + require.NoError(t, ddo.process(dc, in)) + m := dc.currentMetric() + require.Equal(t, uint64(in), getField(m, "out")) +} + +func Test_U8AsF(t *testing.T) { + dc := NewDecodeContext() + dc.openMetric("") + ddo := AsF("out") + in := uint8(5) + require.NoError(t, ddo.process(dc, in)) + m := dc.currentMetric() + require.Equal(t, uint64(in), getField(m, "out")) +} + +func Test_U8PtrAsF(t *testing.T) { + dc := NewDecodeContext() + dc.openMetric("") + ddo := AsF("out") + in := uint8(5) + require.NoError(t, ddo.process(dc, &in)) + m := dc.currentMetric() + require.Equal(t, uint64(in), getField(m, "out")) +} + +func Test_U32AsT(t *testing.T) { + dc := NewDecodeContext() + dc.openMetric("") + ddo := AsT("out") + in := uint32(5) + require.NoError(t, ddo.process(dc, in)) + m := dc.currentMetric() + require.Equal(t, fmt.Sprintf("%d", in), getTag(m, "out")) +} + +func Test_U32PtrAsT(t *testing.T) { + dc := NewDecodeContext() + dc.openMetric("") + ddo := AsT("out") + in := uint32(5) + require.NoError(t, ddo.process(dc, &in)) + m := dc.currentMetric() + require.Equal(t, fmt.Sprintf("%d", in), getTag(m, "out")) +} + +func Test_U16AsT(t *testing.T) { + dc := NewDecodeContext() + dc.openMetric("") + ddo := AsT("out") + in := uint16(5) + require.NoError(t, ddo.process(dc, in)) + m := dc.currentMetric() + require.Equal(t, fmt.Sprintf("%d", in), getTag(m, "out")) +} + +func Test_U16PtrAsT(t *testing.T) { + dc := NewDecodeContext() + dc.openMetric("") + ddo := AsT("out") + in := uint16(5) + require.NoError(t, ddo.process(dc, &in)) + m := dc.currentMetric() + require.Equal(t, fmt.Sprintf("%d", in), getTag(m, "out")) +} + +func Test_U8AsT(t *testing.T) { + dc := NewDecodeContext() + dc.openMetric("") + ddo := AsT("out") + in := uint8(5) + require.NoError(t, ddo.process(dc, in)) + m := dc.currentMetric() + require.Equal(t, fmt.Sprintf("%d", in), getTag(m, "out")) +} + +func Test_U8PtrAsT(t *testing.T) { + dc := NewDecodeContext() + dc.openMetric("") + ddo := AsT("out") + in := uint8(5) + require.NoError(t, ddo.process(dc, &in)) + m := dc.currentMetric() + require.Equal(t, fmt.Sprintf("%d", in), getTag(m, "out")) +} + +func Test_U32ToU32AsF(t *testing.T) { + dc := NewDecodeContext() + dc.openMetric("") + ddo := U32ToU32(func(i uint32) uint32 { return i * 2 }) + ddo2 := ddo.AsF("out") + require.Equal(t, ddo, ddo2.prev()) + in := uint32(5) + require.NoError(t, ddo.process(dc, &in)) + m := dc.currentMetric() + require.Equal(t, uint64(in*2), getField(m, "out")) +} + +func Test_U16ToU16AsF(t *testing.T) { + dc := NewDecodeContext() + dc.openMetric("") + ddo := U16ToU16(func(i uint16) uint16 { return i * 2 }) + ddo2 := ddo.AsF("out") + require.Equal(t, ddo, ddo2.prev()) + in := uint16(5) + require.NoError(t, ddo.process(dc, &in)) + m := dc.currentMetric() + require.Equal(t, uint64(in*2), getField(m, "out")) +} + +func Test_U32ToStrAsT(t *testing.T) { + dc := NewDecodeContext() + dc.openMetric("") + ddo := U32ToStr(func(i uint32) string { return fmt.Sprintf("%d", i*2) }) + ddo2 := ddo.AsT("out") + require.Equal(t, ddo, ddo2.prev()) + in := uint32(5) + require.NoError(t, ddo.process(dc, &in)) + m := dc.currentMetric() + require.Equal(t, fmt.Sprintf("%d", (in*2)), getTag(m, "out")) +} + +func Test_U16ToStrAsT(t *testing.T) { + dc := NewDecodeContext() + dc.openMetric("") + ddo := U16ToStr(func(i uint16) string { return fmt.Sprintf("%d", i*2) }) + ddo2 := ddo.AsT("out") + require.Equal(t, ddo, ddo2.prev()) + in := uint16(5) + require.NoError(t, ddo.process(dc, &in)) + m := dc.currentMetric() + require.Equal(t, fmt.Sprintf("%d", (in*2)), getTag(m, "out")) +} + +func Test_MapU32ToStrAsT(t *testing.T) { + dc := NewDecodeContext() + dc.openMetric("") + myMap := map[uint32]string{5: "five"} + ddo := MapU32ToStr(myMap) + ddo2 := ddo.AsT("out") + require.Equal(t, ddo, ddo2.prev()) + in := uint32(5) + require.NoError(t, ddo.process(dc, &in)) + m := dc.currentMetric() + require.Equal(t, "five", getTag(m, "out")) +} + +func Test_MapU16ToStrAsT(t *testing.T) { + dc := NewDecodeContext() + dc.openMetric("") + myMap := map[uint16]string{5: "five"} + ddo := MapU16ToStr(myMap) + ddo2 := ddo.AsT("out") + require.Equal(t, ddo, ddo2.prev()) + in := uint16(5) + require.NoError(t, ddo.process(dc, &in)) + m := dc.currentMetric() + require.Equal(t, "five", getTag(m, "out")) +} + +func Test_DecDir_ToU32(t *testing.T) { + u := U32(). + Do(U32ToU32(func(in uint32) uint32 { return in >> 2 }).AsF("out1")). + Do(U32ToU32(func(in uint32) uint32 { return in * 2 }).AsF("out2")) + dd := Seq(OpenMetric(""), u, CloseMetric()) + + value := uint32(1001) + var buffer bytes.Buffer + require.NoError(t, binary.Write(&buffer, binary.BigEndian, &value)) + + dc := NewDecodeContext() + require.NoError(t, dc.Decode(dd, &buffer)) + + x, _ := u.(*valueDirective) + require.Equal(t, &value, x.value) + + // require field ejected + require.Equal(t, 1, len(dc.GetMetrics())) + m := dc.GetMetrics() + require.Equal(t, uint64(value>>2), getField(m[0], "out1")) + require.Equal(t, uint64(value*2), getField(m[0], "out2")) +} + +func Test_BytesToStrAsT(t *testing.T) { + dc := NewDecodeContext() + dc.openMetric("") + f := func(b []byte) string { return fmt.Sprintf("%d:%d", b[0], b[1]) } + ddo := BytesToStr(2, f) + ddo2 := ddo.AsT("out") + require.Equal(t, ddo, ddo2.prev()) + in := []byte{0x01, 0x02} + require.NoError(t, ddo.process(dc, in)) + m := dc.currentMetric() + require.Equal(t, fmt.Sprintf("%d:%d", in[0], in[1]), getTag(m, "out")) +} + +func Test_BytesToAsT(t *testing.T) { + dc := NewDecodeContext() + dc.openMetric("") + f := func(b []byte) interface{} { return fmt.Sprintf("%d:%d", b[0], b[1]) } + ddo := BytesTo(2, f) + ddo2 := ddo.AsT("out") + require.Equal(t, ddo, ddo2.prev()) + in := []byte{0x01, 0x02} + require.NoError(t, ddo.process(dc, in)) + m := dc.currentMetric() + require.Equal(t, fmt.Sprintf("%d:%d", in[0], in[1]), getTag(m, "out")) +} + +func Test_BytesToU32AsF(t *testing.T) { + dc := NewDecodeContext() + dc.openMetric("") + f := func(b []byte) uint32 { return uint32(b[0] * b[1]) } + ddo := BytesToU32(2, f) + ddo2 := ddo.AsF("out") + require.Equal(t, ddo, ddo2.prev()) + in := []byte{0x01, 0x02} + require.NoError(t, ddo.process(dc, in)) + m := dc.currentMetric() + require.Equal(t, uint64(in[0]*in[1]), getField(m, "out")) +} + +func Test_U32require(t *testing.T) { + dc := NewDecodeContext() + ddo := U32Assert(func(in uint32) bool { return false }, "bad") + in := uint32(5) + require.Error(t, ddo.process(dc, &in)) +} + +func Test_U16require(t *testing.T) { + dc := NewDecodeContext() + ddo := U16Assert(func(in uint16) bool { return false }, "bad") + in := uint16(5) + require.Error(t, ddo.process(dc, &in)) +} + +func Test_Set(t *testing.T) { + dc := NewDecodeContext() + ptr := new(uint32) + ddo := Set(ptr) + in := uint32(5) + require.NoError(t, ddo.process(dc, &in)) + require.Equal(t, *ptr, in) +} + +func Test_U16Set(t *testing.T) { + dc := NewDecodeContext() + ptr := new(uint16) + ddo := Set(ptr) + in := uint16(5) + require.NoError(t, ddo.process(dc, in)) + require.Equal(t, *ptr, in) +} + +func Test_U16PtrSet(t *testing.T) { + dc := NewDecodeContext() + ptr := new(uint16) + ddo := Set(ptr) + in := uint16(5) + require.NoError(t, ddo.process(dc, &in)) + require.Equal(t, *ptr, in) +} + +func Test_U32toU32Set(t *testing.T) { + dc := NewDecodeContext() + ptr := new(uint32) + ddo := U32ToU32(func(in uint32) uint32 { return in * 2 }).Set(ptr).prev() + in := uint32(5) + require.NoError(t, ddo.process(dc, &in)) + require.Equal(t, *ptr, in*2) +} + +func Test_U32toU32toString(t *testing.T) { + dc := NewDecodeContext() + ptr := new(string) + ddo := U32ToU32(func(in uint32) uint32 { return in * 2 }).ToString(func(in uint32) string { return fmt.Sprintf("%d", in*2) }).Set(ptr).prev().prev() + in := uint32(2) + require.NoError(t, ddo.process(dc, &in)) + require.Equal(t, "8", *ptr) +} + +func Test_U32toU32toStringBreakIf(t *testing.T) { + dc := NewDecodeContext() + ptr := new(string) + ddo := U32ToU32(func(in uint32) uint32 { return in * 2 }).ToString(func(in uint32) string { return fmt.Sprintf("%d", in*2) }).BreakIf("8").Set(ptr).prev().prev().prev() + in := uint32(2) + require.NoError(t, ddo.process(dc, &in)) + require.Equal(t, "", *ptr) + + in = uint32(1) + require.NoError(t, ddo.process(dc, &in)) + require.Equal(t, "4", *ptr) +} + +func Test_notify(t *testing.T) { + value := uint32(1001) + var buffer bytes.Buffer + require.NoError(t, binary.Write(&buffer, binary.BigEndian, &value)) + + ptr := new(uint32) + *ptr = uint32(2002) + var notificationOne uint32 + var notificationTwo uint32 + dd := Seq( + Notify(func() { notificationOne = *ptr }), + U32().Do(Set(ptr)), + Notify(func() { notificationTwo = *ptr }), + ) + + require.NoError(t, Execute(dd, &buffer)) + require.Equal(t, uint32(2002), notificationOne) + require.Equal(t, uint32(1001), notificationTwo) +} + +func Test_nop(t *testing.T) { + value := uint32(1001) + var buffer bytes.Buffer + require.NoError(t, binary.Write(&buffer, binary.BigEndian, &value)) + originalLen := buffer.Len() + dd := Seq( + Nop(), + ) + + require.NoError(t, Execute(dd, &buffer)) + require.Equal(t, originalLen, buffer.Len()) +} + +func Test_AsTimestamp(t *testing.T) { + dc := NewDecodeContext() + dc.openMetric("") + ddo := AsTimestamp() + now := time.Now() + in := uint32(now.Unix()) // only handles as uin32 (not uint64) + require.NoError(t, ddo.process(dc, &in)) + m := dc.currentMetric() + require.Equal(t, now.Unix(), m.Time().Unix()) +} diff --git a/plugins/inputs/sflow/decoder_test.go b/plugins/inputs/sflow/decoder_test.go new file mode 100644 index 000000000..33db1d1d2 --- /dev/null +++ b/plugins/inputs/sflow/decoder_test.go @@ -0,0 +1,975 @@ +package sflow + +import ( + "bytes" + "encoding/hex" + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs/sflow/decoder" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +func TestIPv4SW(t *testing.T) { + packet, err := hex.DecodeString("0000000500000001c0a80102000000100000f3d40bfa047f0000000200000001000000d00001210a000001fe000004000484240000000000000001fe00000200000000020000000100000090000000010000010b0000000400000080000c2936d3d694c691aa97600800450000f9f19040004011b4f5c0a80913c0a8090a00a1ba0500e5641f3081da02010104066d6f746f6770a281cc02047b46462e0201000201003081bd3012060d2b06010201190501010281dc710201003013060d2b06010201190501010281e66802025acc3012060d2b0601020119050101000003e9000000100000000900000000000000090000000000000001000000d00000e3cc000002100000400048eb740000000000000002100000020000000002000000010000009000000001000000970000000400000080000c2936d3d6fcecda44008f81000009080045000081186440003f119098c0a80815c0a8090a9a690202006d23083c33303e4170722031312030393a33333a3031206b6e6f64653120736e6d70645b313039385d3a20436f6e6e656374696f6e2066726f6d205544503a205b3139322e3136382e392e31305d3a34393233362d000003e90000001000000009000000000000000900000000") + require.NoError(t, err) + + dc := decoder.NewDecodeContext() + err = dc.Decode(V5Format(NewDefaultV5FormatOptions()), bytes.NewBuffer(packet)) + require.NoError(t, err) + + expected := []telegraf.Metric{ + testutil.MustMetric( + "sflow", + map[string]string{ + "agent_address": "192.168.1.2", + "dst_ip": "192.168.9.10", + "dst_mac": "00:0c:29:36:d3:d6", + "dst_port": "47621", + "ether_type": "IPv4", + "header_protocol": "ETHERNET-ISO88023", + "input_ifindex": "510", + "ip_dscp": "0", + "ip_ecn": "0", + "output_ifindex": "512", + "sample_direction": "ingress", + "source_id_index": "510", + "source_id_type": "0", + "src_ip": "192.168.9.19", + "src_mac": "94:c6:91:aa:97:60", + "src_port": "161", + }, + map[string]interface{}{ + "bytes": uint64(0x042c00), + "drops": uint64(0x00), + "frame_length": uint64(0x010b), + "header_length": uint64(0x80), + "ip_flags": uint64(0x02), + "ip_fragment_offset": uint64(0x00), + "ip_total_length": uint64(0xf9), + "ip_ttl": uint64(0x40), + "sampling_rate": uint64(0x0400), + "udp_length": uint64(0xe5), + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "sflow", + map[string]string{ + "agent_address": "192.168.1.2", + "dst_ip": "192.168.9.10", + "dst_mac": "00:0c:29:36:d3:d6", + "dst_port": "514", + "ether_type": "IPv4", + "header_protocol": "ETHERNET-ISO88023", + "input_ifindex": "528", + "ip_dscp": "0", + "ip_ecn": "0", + "output_ifindex": "512", + "sample_direction": "ingress", + "source_id_index": "528", + "source_id_type": "0", + "src_ip": "192.168.8.21", + "src_mac": "fc:ec:da:44:00:8f", + "src_port": "39529", + }, + map[string]interface{}{ + "bytes": uint64(0x25c000), + "drops": uint64(0x00), + "frame_length": uint64(0x97), + "header_length": uint64(0x80), + "ip_flags": uint64(0x02), + "ip_fragment_offset": uint64(0x00), + "ip_total_length": uint64(0x81), + "ip_ttl": uint64(0x3f), + "sampling_rate": uint64(0x4000), + "udp_length": uint64(0x6d), + }, + time.Unix(0, 0), + ), + } + actual := dc.GetMetrics() + testutil.RequireMetricsEqual(t, expected, actual, testutil.IgnoreTime()) +} + +func BenchmarkDecodeIPv4SW(b *testing.B) { + packet, err := hex.DecodeString("0000000500000001c0a80102000000100000f3d40bfa047f0000000200000001000000d00001210a000001fe000004000484240000000000000001fe00000200000000020000000100000090000000010000010b0000000400000080000c2936d3d694c691aa97600800450000f9f19040004011b4f5c0a80913c0a8090a00a1ba0500e5641f3081da02010104066d6f746f6770a281cc02047b46462e0201000201003081bd3012060d2b06010201190501010281dc710201003013060d2b06010201190501010281e66802025acc3012060d2b0601020119050101000003e9000000100000000900000000000000090000000000000001000000d00000e3cc000002100000400048eb740000000000000002100000020000000002000000010000009000000001000000970000000400000080000c2936d3d6fcecda44008f81000009080045000081186440003f119098c0a80815c0a8090a9a690202006d23083c33303e4170722031312030393a33333a3031206b6e6f64653120736e6d70645b313039385d3a20436f6e6e656374696f6e2066726f6d205544503a205b3139322e3136382e392e31305d3a34393233362d000003e90000001000000009000000000000000900000000") + require.NoError(b, err) + + dc := decoder.NewDecodeContext() + err = dc.Decode(V5Format(NewDefaultV5FormatOptions()), bytes.NewBuffer(packet)) + require.NoError(b, err) + + format := V5Format(NewDefaultV5FormatOptions()) + b.ResetTimer() + for n := 0; n < b.N; n++ { + err := dc.Decode(format, bytes.NewBuffer(packet)) + if err != nil { + panic(err) + } + + _ = dc.GetMetrics() + } +} + +func BenchmarkNewV5FormatDirective(b *testing.B) { + for n := 0; n < b.N; n++ { + _ = V5Format(NewDefaultV5FormatOptions()) + } +} + +func TestExpandFlow(t *testing.T) { + packet, err := hex.DecodeString("00000005000000010a00015000000000000f58998ae119780000000300000003000000c4000b62a90000000000100c840000040024fb7e1e0000000000000000001017840000000000100c8400000001000000010000009000000001000005bc0000000400000080001b17000130001201f58d44810023710800450205a6305440007e06ee92ac100016d94d52f505997e701fa1e17aff62574a50100200355f000000ffff00000b004175746f72697a7a6174610400008040ffff000400008040050031303030320500313030302004000000000868a200000000000000000860a200000000000000000003000000c40003cecf000000000010170400004000a168ac1c000000000000000000101784000000000010170400000001000000010000009000000001000005f200000004000000800024e8324338d4ae52aa0b54810020060800450005dc5420400080061397c0a8060cc0a806080050efcfbb25bad9a21c839a501000fff54000008a55f70975a0ff88b05735597ae274bd81fcba17e6e9206b8ea0fb07d05fc27dad06cfe3fdba5d2fc4d057b0add711e596cbe5e9b4bbe8be59cd77537b7a89f7414a628b736d00000003000000c0000c547a0000000000100c04000004005bc3c3b50000000000000000001017840000000000100c0400000001000000010000008c000000010000007e000000040000007a001b17000130001201f58d448100237108004500006824ea4000ff32c326d94d5105501018f02e88d003000001dd39b1d025d1c68689583b2ab21522d5b5a959642243804f6d51e63323091cc04544285433eb3f6b29e1046a6a2fa7806319d62041d8fa4bd25b7cd85b8db54202054a077ac11de84acbe37a550004") + require.NoError(t, err) + + dc := decoder.NewDecodeContext() + err = dc.Decode(V5Format(NewDefaultV5FormatOptions()), bytes.NewBuffer(packet)) + require.NoError(t, err) + + expected := []telegraf.Metric{ + testutil.MustMetric( + "sflow", + map[string]string{ + "agent_address": "10.0.1.80", + "dst_ip": "217.77.82.245", + "dst_mac": "00:1b:17:00:01:30", + "dst_port": "32368", + "ether_type": "IPv4", + "header_protocol": "ETHERNET-ISO88023", + "input_ifindex": "1054596", + "ip_dscp": "0", + "ip_ecn": "2", + "output_ifindex": "1051780", + "sample_direction": "egress", + "source_id_index": "1051780", + "source_id_type": "0", + "src_ip": "172.16.0.22", + "src_mac": "00:12:01:f5:8d:44", + "src_port": "1433", + }, + map[string]interface{}{ + "bytes": uint64(0x16f000), + "drops": uint64(0x00), + "frame_length": uint64(0x05bc), + "header_length": uint64(0x80), + "ip_flags": uint64(0x02), + "ip_fragment_offset": uint64(0x00), + "ip_total_length": uint64(0x05a6), + "ip_ttl": uint64(0x7e), + "sampling_rate": uint64(0x0400), + "tcp_header_length": uint64(0x40), + "tcp_urgent_pointer": uint64(0x00), + "tcp_window_size": uint64(0x0200), + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "sflow", + map[string]string{ + "agent_address": "10.0.1.80", + "dst_ip": "192.168.6.8", + "dst_mac": "00:24:e8:32:43:38", + "dst_port": "61391", + "ether_type": "IPv4", + "header_protocol": "ETHERNET-ISO88023", + "input_ifindex": "1054596", + "ip_dscp": "0", + "ip_ecn": "0", + "output_ifindex": "1054468", + "sample_direction": "egress", + "source_id_index": "1054468", + "source_id_type": "0", + "src_ip": "192.168.6.12", + "src_mac": "d4:ae:52:aa:0b:54", + "src_port": "80", + }, + map[string]interface{}{ + "bytes": uint64(0x017c8000), + "drops": uint64(0x00), + "frame_length": uint64(0x05f2), + "header_length": uint64(0x80), + "ip_flags": uint64(0x02), + "ip_fragment_offset": uint64(0x00), + "ip_total_length": uint64(0x05dc), + "ip_ttl": uint64(0x80), + "sampling_rate": uint64(0x4000), + "tcp_header_length": uint64(0x40), + "tcp_urgent_pointer": uint64(0x00), + "tcp_window_size": uint64(0xff), + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "sflow", + map[string]string{ + "agent_address": "10.0.1.80", + "dst_ip": "80.16.24.240", + "dst_mac": "00:1b:17:00:01:30", + "ether_type": "IPv4", + "header_protocol": "ETHERNET-ISO88023", + "input_ifindex": "1054596", + "ip_dscp": "0", + "ip_ecn": "0", + "output_ifindex": "1051652", + "sample_direction": "egress", + "source_id_index": "1051652", + "source_id_type": "0", + "src_ip": "217.77.81.5", + "src_mac": "00:12:01:f5:8d:44", + }, + map[string]interface{}{ + "bytes": uint64(0x01f800), + "drops": uint64(0x00), + "frame_length": uint64(0x7e), + "header_length": uint64(0x7a), + "ip_flags": uint64(0x02), + "ip_fragment_offset": uint64(0x00), + "ip_total_length": uint64(0x68), + "ip_ttl": uint64(0xff), + "sampling_rate": uint64(0x0400), + }, + time.Unix(0, 0), + ), + } + actual := dc.GetMetrics() + testutil.RequireMetricsEqual(t, expected, actual, testutil.IgnoreTime()) +} + +func TestIPv4SWRT(t *testing.T) { + packet, err := hex.DecodeString("000000050000000189dd4f010000000000003d4f21151ad40000000600000001000000bc354b97090000020c000013b175792bea000000000000028f0000020c0000000300000001000000640000000100000058000000040000005408b2587a57624c16fc0b61a5080045000046c3e440003a1118a0052aada7569e5ab367a6e35b0032d7bbf1f2fb2eb2490a97f87abc31e135834be367000002590000ffffffffffffffff02add830d51e0aec14cf000003e90000001000000000000000000000000000000000000003ea0000001000000001c342e32a000000160000000b00000001000000a88b8ffb57000002a2000013b12e344fd800000000000002a20000028f0000000300000001000000500000000100000042000000040000003e4c16fc0b6202c03e0fdecafe080045000030108000007d11fe45575185a718693996f0570e8c001c20614ad602003fd6d4afa6a6d18207324000271169b00000000003e90000001000000000000000000000000000000000000003ea000000100000000189dd4f210000000f0000001800000001000000e8354b970a0000020c000013b175793f9b000000000000028f0000020c00000003000000010000009000000001000001a500000004000000800231466d0b2c4c16fc0b61a5080045000193198f40003a114b75052aae1f5f94c778678ef24d017f50ea7622287c30799e1f7d45932d01ca92c46d930000927c0000ffffffffffffffff02ad0eea6498953d1c7ebb6dbdf0525c80e1a9a62bacfea92f69b7336c2f2f60eba0593509e14eef167eb37449f05ad70b8241c1a46d000003e90000001000000000000000000000000000000000000003ea0000001000000001c342e1fd000000160000001000000001000000e8354b970b0000020c000013b17579534c000000000000028f0000020c00000003000000010000009000000001000000b500000004000000800231466d0b2c4c16fc0b61a50800450000a327c240003606fd67b93c706a021ff365045fe8a0976d624df8207083501800edb31b0000485454502f312e3120323030204f4b0d0a5365727665723a2050726f746f636f6c20485454500d0a436f6e74656e742d4c656e6774683a20313430340d0a436f6e6e656374696f6e3a20000003e90000001000000000000000000000000000000000000003ea0000001000000001c342e1fd000000170000001000000001000000e8354b970c0000020c000013b1757966fd000000000000028f0000020c000000030000000100000090000000010000018e00000004000000800231466d0b2c4c16fc0b61a508004500017c7d2c40003a116963052abd8d021c940e67e7e0d501682342dbe7936bd47ef487dee5591ec1b24d83622e000072250000ffffffffffffffff02ad0039d8ba86a90017071d76b177de4d8c4e23bcaaaf4d795f77b032f959e0fb70234d4c28922d4e08dd3330c66e34bff51cc8ade5000003e90000001000000000000000000000000000000000000003ea0000001000000001c342e1fd000000160000001000000001000000e80d6146ac000002a1000013b17880b49d00000000000002a10000028f00000003000000010000009000000001000005ee00000004000000804c16fc0b6201d8b122766a2c0800450005dc04574000770623a11fcd80a218691d4cf2fe01bbd4f47482065fd63a5010fabd7987000052a20002c8c43ea91ca1eaa115663f5218a37fbb409dfbbedff54731ef41199b35535905ac2366a05a803146ced544abf45597f3714327d59f99e30c899c39fc5a4b67d12087bf8db2bc000003e90000001000000000000000000000000000000000000003ea000000100000000189dd4f210000001000000018") + require.NoError(t, err) + + dc := decoder.NewDecodeContext() + err = dc.Decode(V5Format(NewDefaultV5FormatOptions()), bytes.NewBuffer(packet)) + require.NoError(t, err) + + expected := []telegraf.Metric{ + testutil.MustMetric( + "sflow", + map[string]string{ + "agent_address": "137.221.79.1", + "dst_ip": "86.158.90.179", + "dst_mac": "08:b2:58:7a:57:62", + "dst_port": "58203", + "ether_type": "IPv4", + "header_protocol": "ETHERNET-ISO88023", + "input_ifindex": "655", + "ip_dscp": "0", + "ip_ecn": "0", + "output_ifindex": "524", + "sample_direction": "egress", + "source_id_index": "524", + "source_id_type": "0", + "src_ip": "5.42.173.167", + "src_mac": "4c:16:fc:0b:61:a5", + "src_port": "26534", + }, + map[string]interface{}{ + "bytes": uint64(0x06c4d8), + "drops": uint64(0x00), + "frame_length": uint64(0x58), + "header_length": uint64(0x54), + "ip_flags": uint64(0x02), + "ip_fragment_offset": uint64(0x00), + "ip_total_length": uint64(0x46), + "ip_ttl": uint64(0x3a), + "sampling_rate": uint64(0x13b1), + "udp_length": uint64(0x32), + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "sflow", + map[string]string{ + "agent_address": "137.221.79.1", + "dst_ip": "24.105.57.150", + "dst_mac": "4c:16:fc:0b:62:02", + "dst_port": "3724", + "ether_type": "IPv4", + "header_protocol": "ETHERNET-ISO88023", + "input_ifindex": "674", + "ip_dscp": "0", + "ip_ecn": "0", + "output_ifindex": "655", + "sample_direction": "ingress", + "source_id_index": "674", + "source_id_type": "0", + "src_ip": "87.81.133.167", + "src_mac": "c0:3e:0f:de:ca:fe", + "src_port": "61527", + }, + map[string]interface{}{ + "bytes": uint64(0x0513a2), + "drops": uint64(0x00), + "frame_length": uint64(0x42), + "header_length": uint64(0x3e), + "ip_flags": uint64(0x00), + "ip_fragment_offset": uint64(0x00), + "ip_total_length": uint64(0x30), + "ip_ttl": uint64(0x7d), + "sampling_rate": uint64(0x13b1), + "udp_length": uint64(0x1c), + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "sflow", + map[string]string{ + "agent_address": "137.221.79.1", + "dst_ip": "95.148.199.120", + "dst_mac": "02:31:46:6d:0b:2c", + "dst_port": "62029", + "ether_type": "IPv4", + "header_protocol": "ETHERNET-ISO88023", + "input_ifindex": "655", + "ip_dscp": "0", + "ip_ecn": "0", + "output_ifindex": "524", + "sample_direction": "egress", + "source_id_index": "524", + "source_id_type": "0", + "src_ip": "5.42.174.31", + "src_mac": "4c:16:fc:0b:61:a5", + "src_port": "26510", + }, + map[string]interface{}{ + "bytes": uint64(0x206215), + "drops": uint64(0x00), + "frame_length": uint64(0x01a5), + "header_length": uint64(0x80), + "ip_flags": uint64(0x02), + "ip_fragment_offset": uint64(0x00), + "ip_total_length": uint64(0x0193), + "ip_ttl": uint64(0x3a), + "sampling_rate": uint64(0x13b1), + "udp_length": uint64(0x017f), + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "sflow", + map[string]string{ + "agent_address": "137.221.79.1", + "dst_ip": "2.31.243.101", + "dst_mac": "02:31:46:6d:0b:2c", + "dst_port": "59552", + "ether_type": "IPv4", + "header_protocol": "ETHERNET-ISO88023", + "input_ifindex": "655", + "ip_dscp": "0", + "ip_ecn": "0", + "output_ifindex": "524", + "sample_direction": "egress", + "source_id_index": "524", + "source_id_type": "0", + "src_ip": "185.60.112.106", + "src_mac": "4c:16:fc:0b:61:a5", + "src_port": "1119", + }, + map[string]interface{}{ + "bytes": uint64(0x0dec25), + "drops": uint64(0x00), + "frame_length": uint64(0xb5), + "header_length": uint64(0x80), + "ip_flags": uint64(0x02), + "ip_fragment_offset": uint64(0x00), + "ip_total_length": uint64(0xa3), + "ip_ttl": uint64(0x36), + "sampling_rate": uint64(0x13b1), + "tcp_header_length": uint64(0x40), + "tcp_urgent_pointer": uint64(0x00), + "tcp_window_size": uint64(0xed), + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "sflow", + map[string]string{ + "agent_address": "137.221.79.1", + "dst_ip": "2.28.148.14", + "dst_mac": "02:31:46:6d:0b:2c", + "dst_port": "57557", + "ether_type": "IPv4", + "header_protocol": "ETHERNET-ISO88023", + "input_ifindex": "655", + "ip_dscp": "0", + "ip_ecn": "0", + "output_ifindex": "524", + "sample_direction": "egress", + "source_id_index": "524", + "source_id_type": "0", + "src_ip": "5.42.189.141", + "src_mac": "4c:16:fc:0b:61:a5", + "src_port": "26599", + }, + map[string]interface{}{ + "bytes": uint64(0x1e9d2e), + "drops": uint64(0x00), + "frame_length": uint64(0x018e), + "header_length": uint64(0x80), + "ip_flags": uint64(0x02), + "ip_fragment_offset": uint64(0x00), + "ip_total_length": uint64(0x017c), + "ip_ttl": uint64(0x3a), + "sampling_rate": uint64(0x13b1), + "udp_length": uint64(0x0168), + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "sflow", + map[string]string{ + "agent_address": "137.221.79.1", + "dst_ip": "24.105.29.76", + "dst_mac": "4c:16:fc:0b:62:01", + "dst_port": "443", + "ether_type": "IPv4", + "header_protocol": "ETHERNET-ISO88023", + "input_ifindex": "673", + "ip_dscp": "0", + "ip_ecn": "0", + "output_ifindex": "655", + "sample_direction": "ingress", + "source_id_index": "673", + "source_id_type": "0", + "src_ip": "31.205.128.162", + "src_mac": "d8:b1:22:76:6a:2c", + "src_port": "62206", + }, + map[string]interface{}{ + "bytes": uint64(0x74c38e), + "drops": uint64(0x00), + "frame_length": uint64(0x05ee), + "header_length": uint64(0x80), + "ip_flags": uint64(0x02), + "ip_fragment_offset": uint64(0x00), + "ip_total_length": uint64(0x05dc), + "ip_ttl": uint64(0x77), + "sampling_rate": uint64(0x13b1), + "tcp_header_length": uint64(0x40), + "tcp_urgent_pointer": uint64(0x00), + "tcp_window_size": uint64(0xfabd), + }, + time.Unix(0, 0), + ), + } + actual := dc.GetMetrics() + testutil.RequireMetricsEqual(t, expected, actual, testutil.IgnoreTime()) +} + +func TestIPv6SW(t *testing.T) { + packet, err := hex.DecodeString("00000005000000010ae0648100000002000093d824ac82340000000100000001000000d000019f94000001010000100019f94000000000000000010100000000000000020000000100000090000000010000058c00000008000000800008e3fffc10d4f4be04612486dd60000000054e113a2607f8b0400200140000000000000008262000edc000e804a25e30c581af36fa01bbfa6f054e249810b584bcbf12926c2e29a779c26c72db483e8191524fe2288bfdaceaf9d2e724d04305706efcfdef70db86873bbacf29698affe4e7d6faa21d302f9b4b023291a05a000003e90000001000000001000000000000000100000000") + require.NoError(t, err) + + dc := decoder.NewDecodeContext() + err = dc.Decode(V5Format(NewDefaultV5FormatOptions()), bytes.NewBuffer(packet)) + require.NoError(t, err) + + expected := []telegraf.Metric{ + + testutil.MustMetric( + "sflow", + map[string]string{ + "agent_address": "10.224.100.129", + "dst_ip": "2620:ed:c000:e804:a25e:30c5:81af:36fa", + "dst_mac": "00:08:e3:ff:fc:10", + "dst_port": "64111", + "ether_type": "IPv6", + "header_protocol": "ETHERNET-ISO88023", + "input_ifindex": "257", + "output_ifindex": "0", + "sample_direction": "ingress", + "source_id_index": "257", + "source_id_type": "0", + "src_ip": "2607:f8b0:4002:14::8", + "src_mac": "d4:f4:be:04:61:24", + "src_port": "443", + }, + map[string]interface{}{ + "bytes": uint64(0x58c000), + "drops": uint64(0x00), + "frame_length": uint64(0x058c), + "header_length": uint64(0x80), + "ip_dscp": uint64(0x00), + "ip_ecn": uint64(0x00), + "sampling_rate": uint64(0x1000), + "udp_length": uint64(0x054e), + }, + time.Unix(0, 0), + ), + } + actual := dc.GetMetrics() + testutil.RequireMetricsEqual(t, expected, actual, testutil.IgnoreTime()) +} + +func TestExpandFlowCounter(t *testing.T) { + packet, err := hex.DecodeString("00000005000000010a00015000000000000f58898ae0fa380000000700000004000000ec00006ece0000000000101784000000030000000200000034000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000058001017840000000600000002540be400000000010000000300007b8ebd37b97e61ff94860803e8e908ffb2b500000000000000000000000000018e7c31ee7ba4195f041874579ff021ba936300000000000000000000000100000007000000380011223344550003f8b15645e7e7d6960000002fe2fc02fc01edbf580000000000000000000000000000000001dcb9cf000000000000000000000004000000ec00006ece0000000000100184000000030000000200000034000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000058001001840000000600000002540be400000000010000000300000841131d1fd9f850bfb103617cb401e6598900000000000000000000000000000bec1902e5da9212e3e96d7996e922513250000000000000000000000001000000070000003800112233445500005c260acbddb3000100000003e2fc02fc01ee414f0000000000000000000000000000000001dccdd30000000000000000000000030000008400004606000000000010030400004000ad9dc19b0000000000000000001017840000000000100304000000010000000100000050000000010000004400000004000000400012815116c4001517cf426d8100200608004500002895da40008006d74bc0a8060ac0a8064f04ef04aab1797122cf7eaf4f5010ffff7727000000000000000000000003000000b0001bd698000000000010148400000400700b180f000000000000000000101504000000000010148400000001000000010000007c000000010000006f000000040000006b001b17000131f0f755b9afc081000439080045000059045340005206920c1f0d4703d94d52e201bbf14977d1e9f15498af36801800417f1100000101080afdf3c70400e043871503010020ff268cfe2e2fd5fffe1d3d704a91d57b895f174c4b4428c66679d80a307294303f00000003000000c40003ceca000000000010170400004000a166aa7a000000000000000000101784000000000010170400000001000000010000009000000001000005f200000004000000800024e8369e2bd4ae52aa0b54810020060800450005dc4c71400080061b45c0a8060cc0a806090050f855692a7a94a1154ae1801001046b6a00000101080a6869a48d151016d046a84a7aa1c6743fa05179f7ecbd4e567150cb6f2077ff89480ae730637d26d2237c08548806f672c7476eb1b5a447b42cb9ce405994d152fa3e000000030000008c001bd699000000000010148400000400700b180f0000000000000000001015040000000000101484000000010000000100000058000000010000004a0000000400000046001b17000131f0f755b9afc0810004390800450000340ce040003a06bea5c1ce8793d94d528f00504c3b08b18f275b83d5df8010054586ad00000101050a5b83d5de5b83d5df11d800000003000000c400004e07000000000010028400004000c7ec97f2000000000000000000100784000000000010028400000001000000010000009000000001000005f2000000040000008000005e0001ff005056800dd18100000a0800450005dc5a42400040066ef70a000ac8c0a8967201bbe17c81597908caf8a05f5010010328610000f172263da0ba5d6223c079b8238bc841256bf17c4ffb08ad11c4fbff6f87ae1624a6b057b8baa9342114e5f5b46179083020cb560c4e9eadcec6dfd83e102ddbc27024803eb5") + require.NoError(t, err) + + dc := decoder.NewDecodeContext() + err = dc.Decode(V5Format(NewDefaultV5FormatOptions()), bytes.NewBuffer(packet)) + require.NoError(t, err) + + expected := []telegraf.Metric{ + testutil.MustMetric( + "sflow", + map[string]string{ + "agent_address": "10.0.1.80", + "dst_ip": "192.168.6.79", + "dst_mac": "00:12:81:51:16:c4", + "dst_port": "1194", + "ether_type": "IPv4", + "header_protocol": "ETHERNET-ISO88023", + "input_ifindex": "1054596", + "ip_dscp": "0", + "ip_ecn": "0", + "output_ifindex": "1049348", + "sample_direction": "egress", + "source_id_index": "1049348", + "source_id_type": "0", + "src_ip": "192.168.6.10", + "src_mac": "00:15:17:cf:42:6d", + "src_port": "1263", + }, + map[string]interface{}{ + "bytes": uint64(0x110000), + "drops": uint64(0x00), + "frame_length": uint64(0x44), + "header_length": uint64(0x40), + "ip_flags": uint64(0x02), + "ip_fragment_offset": uint64(0x00), + "ip_total_length": uint64(0x28), + "ip_ttl": uint64(0x80), + "sampling_rate": uint64(0x4000), + "tcp_header_length": uint64(0x40), + "tcp_urgent_pointer": uint64(0x00), + "tcp_window_size": uint64(0xffff), + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "sflow", + map[string]string{ + "agent_address": "10.0.1.80", + "dst_ip": "217.77.82.226", + "dst_mac": "00:1b:17:00:01:31", + "dst_port": "61769", + "ether_type": "IPv4", + "header_protocol": "ETHERNET-ISO88023", + "input_ifindex": "1053956", + "ip_dscp": "0", + "ip_ecn": "0", + "output_ifindex": "1053828", + "sample_direction": "egress", + "source_id_index": "1053828", + "source_id_type": "0", + "src_ip": "31.13.71.3", + "src_mac": "f0:f7:55:b9:af:c0", + "src_port": "443", + }, + map[string]interface{}{ + "bytes": uint64(0x01bc00), + "drops": uint64(0x00), + "frame_length": uint64(0x6f), + "header_length": uint64(0x6b), + "ip_flags": uint64(0x02), + "ip_fragment_offset": uint64(0x00), + "ip_total_length": uint64(0x59), + "ip_ttl": uint64(0x52), + "sampling_rate": uint64(0x0400), + "tcp_header_length": uint64(0x00), + "tcp_urgent_pointer": uint64(0x00), + "tcp_window_size": uint64(0x41), + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "sflow", + map[string]string{ + "agent_address": "10.0.1.80", + "dst_ip": "192.168.6.9", + "dst_mac": "00:24:e8:36:9e:2b", + "dst_port": "63573", + "ether_type": "IPv4", + "header_protocol": "ETHERNET-ISO88023", + "input_ifindex": "1054596", + "ip_dscp": "0", + "ip_ecn": "0", + "output_ifindex": "1054468", + "sample_direction": "egress", + "source_id_index": "1054468", + "source_id_type": "0", + "src_ip": "192.168.6.12", + "src_mac": "d4:ae:52:aa:0b:54", + "src_port": "80", + }, + map[string]interface{}{ + "bytes": uint64(0x017c8000), + "drops": uint64(0x00), + "frame_length": uint64(0x05f2), + "header_length": uint64(0x80), + "ip_flags": uint64(0x02), + "ip_fragment_offset": uint64(0x00), + "ip_total_length": uint64(0x05dc), + "ip_ttl": uint64(0x80), + "sampling_rate": uint64(0x4000), + "tcp_header_length": uint64(0x00), + "tcp_urgent_pointer": uint64(0x00), + "tcp_window_size": uint64(0x0104), + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "sflow", + map[string]string{ + "agent_address": "10.0.1.80", + "dst_ip": "217.77.82.143", + "dst_mac": "00:1b:17:00:01:31", + "dst_port": "19515", + "ether_type": "IPv4", + "header_protocol": "ETHERNET-ISO88023", + "input_ifindex": "1053956", + "ip_dscp": "0", + "ip_ecn": "0", + "output_ifindex": "1053828", + "sample_direction": "egress", + "source_id_index": "1053828", + "source_id_type": "0", + "src_ip": "193.206.135.147", + "src_mac": "f0:f7:55:b9:af:c0", + "src_port": "80", + }, + map[string]interface{}{ + "bytes": uint64(0x012800), + "drops": uint64(0x00), + "frame_length": uint64(0x4a), + "header_length": uint64(0x46), + "ip_flags": uint64(0x02), + "ip_fragment_offset": uint64(0x00), + "ip_total_length": uint64(0x34), + "ip_ttl": uint64(0x3a), + "sampling_rate": uint64(0x0400), + "tcp_header_length": uint64(0x00), + "tcp_urgent_pointer": uint64(0x00), + "tcp_window_size": uint64(0x0545), + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "sflow", + map[string]string{ + "agent_address": "10.0.1.80", + "dst_ip": "192.168.150.114", + "dst_mac": "00:00:5e:00:01:ff", + "dst_port": "57724", + "ether_type": "IPv4", + "header_protocol": "ETHERNET-ISO88023", + "input_ifindex": "1050500", + "ip_dscp": "0", + "ip_ecn": "0", + "output_ifindex": "1049220", + "sample_direction": "egress", + "source_id_index": "1049220", + "source_id_type": "0", + "src_ip": "10.0.10.200", + "src_mac": "00:50:56:80:0d:d1", + "src_port": "443", + }, + map[string]interface{}{ + "bytes": uint64(0x017c8000), + "drops": uint64(0x00), + "frame_length": uint64(0x05f2), + "header_length": uint64(0x80), + "ip_flags": uint64(0x02), + "ip_fragment_offset": uint64(0x00), + "ip_total_length": uint64(0x05dc), + "ip_ttl": uint64(0x40), + "sampling_rate": uint64(0x4000), + "tcp_header_length": uint64(0x40), + "tcp_urgent_pointer": uint64(0x00), + "tcp_window_size": uint64(0x0103), + }, + time.Unix(0, 0), + ), + } + actual := dc.GetMetrics() + testutil.RequireMetricsEqual(t, expected, actual, testutil.IgnoreTime()) +} + +func TestFlowExpandCounter(t *testing.T) { + packet, err := hex.DecodeString("00000005000000010a000150000000000006d14d8ae0fe200000000200000004000000ac00006d15000000004b00ca000000000200000002000000340000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000584b00ca0000000001000000000000000000000001000000010000308ae33bb950eb92a8a3004d0bb406899571000000000000000000000000000012f7ed9c9db8c24ed90604eaf0bd04636edb00000000000000000000000100000004000000ac00006d15000000004b0054000000000200000002000000340000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000584b00540000000001000000003b9aca000000000100000003000067ba8e64fd23fa65f26d0215ec4a0021086600000000000000000000000000002002c3b21045c2378ad3001fb2f300061872000000000000000000000001") + require.NoError(t, err) + + dc := decoder.NewDecodeContext() + err = dc.Decode(V5Format(NewDefaultV5FormatOptions()), bytes.NewBuffer(packet)) + require.NoError(t, err) + + // we don't do anything with samples yet + expected := []telegraf.Metric{} + actual := dc.GetMetrics() + testutil.RequireMetricsEqual(t, expected, actual, testutil.IgnoreTime()) +} + +func TestUDPHeader(t *testing.T) { + options := NewDefaultV5FormatOptions() + octets := bytes.NewBuffer([]byte{ + 0x00, 0x01, // src_port + 0x00, 0x02, // dst_port + 0x00, 0x03, // udp_length + }) + + directive := decoder.Seq( + decoder.OpenMetric("sflow"), + udpHeader(options), + decoder.CloseMetric(), + ) + dc := decoder.NewDecodeContext() + err := directive.Execute(octets, dc) + require.NoError(t, err) + + expected := []telegraf.Metric{ + testutil.MustMetric( + "sflow", + map[string]string{ + "src_port": "1", + "dst_port": "2", + }, + map[string]interface{}{ + "udp_length": uint64(3), + }, + time.Unix(0, 0), + ), + } + + testutil.RequireMetricsEqual(t, expected, dc.GetMetrics(), testutil.IgnoreTime()) +} + +func BenchmarkUDPHeader(b *testing.B) { + options := NewDefaultV5FormatOptions() + octets := bytes.NewBuffer([]byte{ + 0x00, 0x01, // src_port + 0x00, 0x02, // dst_port + 0x00, 0x03, // udp_length + }) + + directive := decoder.Seq( + decoder.OpenMetric("sflow"), + udpHeader(options), + decoder.CloseMetric(), + ) + dc := decoder.NewDecodeContext() + + b.ResetTimer() + for n := 0; n < b.N; n++ { + _ = directive.Execute(octets, dc) + } +} + +func TestIPv4Header(t *testing.T) { + octets := bytes.NewBuffer( + []byte{ + 0x45, // version + IHL + 0x00, // ip_dscp + ip_ecn + 0x00, 0x00, // total length + 0x00, 0x00, // identification + 0x00, 0x00, // flags + frag offset + 0x00, // ttl + 0x11, // protocol; 0x11 = udp + 0x00, 0x00, // header checksum + 0x7f, 0x00, 0x00, 0x01, // src ip + 0x7f, 0x00, 0x00, 0x02, // dst ip + 0x00, 0x01, // src_port + 0x00, 0x02, // dst_port + 0x00, 0x03, // udp_length + }, + ) + dc := decoder.NewDecodeContext() + + options := NewDefaultV5FormatOptions() + directive := decoder.Seq( + decoder.OpenMetric("sflow"), + ipv4Header(options), + decoder.CloseMetric(), + ) + + err := directive.Execute(octets, dc) + require.NoError(t, err) + + expected := []telegraf.Metric{ + testutil.MustMetric( + "sflow", + map[string]string{ + "src_ip": "127.0.0.1", + "dst_ip": "127.0.0.2", + "ip_dscp": "0", + "ip_ecn": "0", + "src_port": "1", + "dst_port": "2", + }, + map[string]interface{}{ + "ip_flags": uint64(0), + "ip_fragment_offset": uint64(0), + "ip_total_length": uint64(0), + "ip_ttl": uint64(0), + "udp_length": uint64(3), + }, + time.Unix(0, 0), + ), + } + + testutil.RequireMetricsEqual(t, expected, dc.GetMetrics(), testutil.IgnoreTime()) +} + +// Using the same Directive instance, prior paths through the parse tree should +// not affect the latest parse. +func TestIPv4HeaderSwitch(t *testing.T) { + options := NewDefaultV5FormatOptions() + directive := decoder.Seq( + decoder.OpenMetric("sflow"), + ipv4Header(options), + decoder.CloseMetric(), + ) + + octets := bytes.NewBuffer( + []byte{ + 0x45, // version + IHL + 0x00, // ip_dscp + ip_ecn + 0x00, 0x00, // total length + 0x00, 0x00, // identification + 0x00, 0x00, // flags + frag offset + 0x00, // ttl + 0x11, // protocol; 0x11 = udp + 0x00, 0x00, // header checksum + 0x7f, 0x00, 0x00, 0x01, // src ip + 0x7f, 0x00, 0x00, 0x02, // dst ip + 0x00, 0x01, // src_port + 0x00, 0x02, // dst_port + 0x00, 0x03, // udp_length + }, + ) + dc := decoder.NewDecodeContext() + err := directive.Execute(octets, dc) + require.NoError(t, err) + + octets = bytes.NewBuffer( + []byte{ + 0x45, // version + IHL + 0x00, // ip_dscp + ip_ecn + 0x00, 0x00, // total length + 0x00, 0x00, // identification + 0x00, 0x00, // flags + frag offset + 0x00, // ttl + 0x06, // protocol; 0x06 = tcp + 0x00, 0x00, // header checksum + 0x7f, 0x00, 0x00, 0x01, // src ip + 0x7f, 0x00, 0x00, 0x02, // dst ip + 0x00, 0x01, // src_port + 0x00, 0x02, // dst_port + 0x00, 0x00, 0x00, 0x00, // sequence + 0x00, 0x00, 0x00, 0x00, // ack_number + 0x00, 0x00, // tcp_header_length + 0x00, 0x00, // tcp_window_size + 0x00, 0x00, // checksum + 0x00, 0x00, // tcp_urgent_pointer + }, + ) + dc = decoder.NewDecodeContext() + err = directive.Execute(octets, dc) + require.NoError(t, err) + + expected := []telegraf.Metric{ + testutil.MustMetric( + "sflow", + map[string]string{ + "src_ip": "127.0.0.1", + "dst_ip": "127.0.0.2", + "ip_dscp": "0", + "ip_ecn": "0", + "src_port": "1", + "dst_port": "2", + }, + map[string]interface{}{ + "ip_flags": uint64(0), + "ip_fragment_offset": uint64(0), + "ip_total_length": uint64(0), + "ip_ttl": uint64(0), + "tcp_header_length": uint64(0), + "tcp_window_size": uint64(0), + "tcp_urgent_pointer": uint64(0), + }, + time.Unix(0, 0), + ), + } + + // check that udp fields are not set on the tcp metric + testutil.RequireMetricsEqual(t, expected, dc.GetMetrics(), testutil.IgnoreTime()) +} + +func TestUnknownProtocol(t *testing.T) { + octets := bytes.NewBuffer( + []byte{ + 0x45, // version + IHL + 0x00, // ip_dscp + ip_ecn + 0x00, 0x00, // total length + 0x00, 0x00, // identification + 0x00, 0x00, // flags + frag offset + 0x00, // ttl + 0x99, // protocol + 0x00, 0x00, // header checksum + 0x7f, 0x00, 0x00, 0x01, // src ip + 0x7f, 0x00, 0x00, 0x02, // dst ip + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + }, + ) + dc := decoder.NewDecodeContext() + + options := NewDefaultV5FormatOptions() + directive := decoder.Seq( + decoder.OpenMetric("sflow"), + ipv4Header(options), + decoder.CloseMetric(), + ) + + err := directive.Execute(octets, dc) + require.NoError(t, err) + + expected := []telegraf.Metric{ + testutil.MustMetric( + "sflow", + map[string]string{ + "src_ip": "127.0.0.1", + "dst_ip": "127.0.0.2", + "ip_dscp": "0", + "ip_ecn": "0", + }, + map[string]interface{}{ + "ip_flags": uint64(0), + "ip_fragment_offset": uint64(0), + "ip_total_length": uint64(0), + "ip_ttl": uint64(0), + }, + time.Unix(0, 0), + ), + } + + testutil.RequireMetricsEqual(t, expected, dc.GetMetrics(), testutil.IgnoreTime()) +} diff --git a/plugins/inputs/sflow/sflow.go b/plugins/inputs/sflow/sflow.go new file mode 100644 index 000000000..7d113dd1e --- /dev/null +++ b/plugins/inputs/sflow/sflow.go @@ -0,0 +1,154 @@ +package sflow + +import ( + "bytes" + "context" + "fmt" + "io" + "net" + "net/url" + "strings" + "sync" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/inputs/sflow/decoder" +) + +const sampleConfig = ` + ## Address to listen for sFlow packets. + ## example: service_address = "udp://:6343" + ## service_address = "udp4://:6343" + ## service_address = "udp6://:6343" + service_address = "udp://:6343" + + ## Set the size of the operating system's receive buffer. + ## example: read_buffer_size = "64KiB" + # read_buffer_size = "" +` + +const ( + maxPacketSize = 64 * 1024 +) + +type SFlow struct { + ServiceAddress string `toml:"service_address"` + ReadBufferSize internal.Size `toml:"read_buffer_size"` + + Log telegraf.Logger `toml:"-"` + + addr net.Addr + decoderOpts decoder.Directive + closer io.Closer + cancel context.CancelFunc + wg sync.WaitGroup +} + +// Description answers a description of this input plugin +func (s *SFlow) Description() string { + return "SFlow V5 Protocol Listener" +} + +// SampleConfig answers a sample configuration +func (s *SFlow) SampleConfig() string { + return sampleConfig +} + +func (s *SFlow) Init() error { + + config := NewDefaultV5FormatOptions() + s.decoderOpts = V5Format(config) + return nil +} + +// Start starts this sFlow listener listening on the configured network for sFlow packets +func (s *SFlow) Start(acc telegraf.Accumulator) error { + u, err := url.Parse(s.ServiceAddress) + if err != nil { + return err + } + + conn, err := listenUDP(u.Scheme, u.Host) + if err != nil { + return err + } + s.closer = conn + s.addr = conn.LocalAddr() + + if s.ReadBufferSize.Size > 0 { + conn.SetReadBuffer(int(s.ReadBufferSize.Size)) + } + + s.Log.Infof("Listening on %s://%s", s.addr.Network(), s.addr.String()) + + s.wg.Add(1) + go func() { + defer s.wg.Done() + s.read(acc, conn) + }() + + return nil +} + +// Gather is a NOOP for sFlow as it receives, asynchronously, sFlow network packets +func (s *SFlow) Gather(_ telegraf.Accumulator) error { + return nil +} + +func (s *SFlow) Stop() { + if s.closer != nil { + s.closer.Close() + } + s.wg.Wait() +} + +func (s *SFlow) Address() net.Addr { + return s.addr +} + +func (s *SFlow) read(acc telegraf.Accumulator, conn net.PacketConn) { + buf := make([]byte, maxPacketSize) + for { + n, _, err := conn.ReadFrom(buf) + if err != nil { + if !strings.HasSuffix(err.Error(), ": use of closed network connection") { + acc.AddError(err) + } + break + } + s.process(acc, buf[:n]) + } +} + +func (s *SFlow) process(acc telegraf.Accumulator, buf []byte) { + decoder := decoder.NewDecodeContext() + if err := decoder.Decode(s.decoderOpts, bytes.NewBuffer(buf)); err != nil { + acc.AddError(fmt.Errorf("unable to parse incoming packet: %s", err)) + } + + metrics := decoder.GetMetrics() + for _, m := range metrics { + acc.AddMetric(m) + } +} + +func listenUDP(network string, address string) (*net.UDPConn, error) { + switch network { + case "udp", "udp4", "udp6": + addr, err := net.ResolveUDPAddr(network, address) + if err != nil { + return nil, err + } + return net.ListenUDP(network, addr) + default: + return nil, fmt.Errorf("unsupported network type: %s", network) + } +} + +// init registers this SFlow input plug in with the Telegraf framework +func init() { + inputs.Add("sflow", func() telegraf.Input { + return &SFlow{} + }) +} diff --git a/plugins/inputs/sflow/sflow_test.go b/plugins/inputs/sflow/sflow_test.go new file mode 100644 index 000000000..90f3a7c6d --- /dev/null +++ b/plugins/inputs/sflow/sflow_test.go @@ -0,0 +1,135 @@ +package sflow + +import ( + "encoding/hex" + "net" + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +func TestSFlow(t *testing.T) { + sflow := &SFlow{ + ServiceAddress: "udp://127.0.0.1:0", + Log: testutil.Logger{}, + } + err := sflow.Init() + require.NoError(t, err) + + var acc testutil.Accumulator + err = sflow.Start(&acc) + require.NoError(t, err) + defer sflow.Stop() + + client, err := net.Dial(sflow.Address().Network(), sflow.Address().String()) + require.NoError(t, err) + + packetBytes, err := hex.DecodeString("0000000500000001c0a80102000000100000f3d40bfa047f0000000200000001000000d00001210a000001fe000004000484240000000000000001fe00000200000000020000000100000090000000010000010b0000000400000080000c2936d3d694c691aa97600800450000f9f19040004011b4f5c0a80913c0a8090a00a1ba0500e5641f3081da02010104066d6f746f6770a281cc02047b46462e0201000201003081bd3012060d2b06010201190501010281dc710201003013060d2b06010201190501010281e66802025acc3012060d2b0601020119050101000003e9000000100000000900000000000000090000000000000001000000d00000e3cc000002100000400048eb740000000000000002100000020000000002000000010000009000000001000000970000000400000080000c2936d3d6fcecda44008f81000009080045000081186440003f119098c0a80815c0a8090a9a690202006d23083c33303e4170722031312030393a33333a3031206b6e6f64653120736e6d70645b313039385d3a20436f6e6e656374696f6e2066726f6d205544503a205b3139322e3136382e392e31305d3a34393233362d000003e90000001000000009000000000000000900000000") + require.NoError(t, err) + client.Write(packetBytes) + + acc.Wait(2) + + expected := []telegraf.Metric{ + testutil.MustMetric( + "sflow", + map[string]string{ + "agent_address": "192.168.1.2", + "dst_ip": "192.168.9.10", + "dst_mac": "00:0c:29:36:d3:d6", + "dst_port": "47621", + "ether_type": "IPv4", + "header_protocol": "ETHERNET-ISO88023", + "input_ifindex": "510", + "ip_dscp": "0", + "ip_ecn": "0", + "output_ifindex": "512", + "sample_direction": "ingress", + "source_id_index": "510", + "source_id_type": "0", + "src_ip": "192.168.9.19", + "src_mac": "94:c6:91:aa:97:60", + "src_port": "161", + }, + map[string]interface{}{ + "bytes": uint64(273408), + "drops": uint64(0), + "frame_length": uint64(267), + "header_length": uint64(128), + "ip_flags": uint64(2), + "ip_fragment_offset": uint64(0), + "ip_total_length": uint64(249), + "ip_ttl": uint64(64), + "sampling_rate": uint64(1024), + "udp_length": uint64(229), + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "sflow", + map[string]string{ + "agent_address": "192.168.1.2", + "dst_ip": "192.168.9.10", + "dst_mac": "00:0c:29:36:d3:d6", + "dst_port": "514", + "ether_type": "IPv4", + "header_protocol": "ETHERNET-ISO88023", + "input_ifindex": "528", + "ip_dscp": "0", + "ip_ecn": "0", + "output_ifindex": "512", + "sample_direction": "ingress", + "source_id_index": "528", + "source_id_type": "0", + "src_ip": "192.168.8.21", + "src_mac": "fc:ec:da:44:00:8f", + "src_port": "39529", + }, + map[string]interface{}{ + "bytes": uint64(2473984), + "drops": uint64(0), + "frame_length": uint64(151), + "header_length": uint64(128), + "ip_flags": uint64(2), + "ip_fragment_offset": uint64(0), + "ip_total_length": uint64(129), + "ip_ttl": uint64(63), + "sampling_rate": uint64(16384), + "udp_length": uint64(109), + }, + time.Unix(0, 0), + ), + } + + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), + testutil.IgnoreTime()) +} + +func BenchmarkSFlow(b *testing.B) { + sflow := &SFlow{ + ServiceAddress: "udp://127.0.0.1:0", + Log: testutil.Logger{}, + } + err := sflow.Init() + require.NoError(b, err) + + var acc testutil.Accumulator + err = sflow.Start(&acc) + require.NoError(b, err) + defer sflow.Stop() + + client, err := net.Dial(sflow.Address().Network(), sflow.Address().String()) + require.NoError(b, err) + + packetBytes, err := hex.DecodeString("0000000500000001c0a80102000000100000f3d40bfa047f0000000200000001000000d00001210a000001fe000004000484240000000000000001fe00000200000000020000000100000090000000010000010b0000000400000080000c2936d3d694c691aa97600800450000f9f19040004011b4f5c0a80913c0a8090a00a1ba0500e5641f3081da02010104066d6f746f6770a281cc02047b46462e0201000201003081bd3012060d2b06010201190501010281dc710201003013060d2b06010201190501010281e66802025acc3012060d2b0601020119050101000003e9000000100000000900000000000000090000000000000001000000d00000e3cc000002100000400048eb740000000000000002100000020000000002000000010000009000000001000000970000000400000080000c2936d3d6fcecda44008f81000009080045000081186440003f119098c0a80815c0a8090a9a690202006d23083c33303e4170722031312030393a33333a3031206b6e6f64653120736e6d70645b313039385d3a20436f6e6e656374696f6e2066726f6d205544503a205b3139322e3136382e392e31305d3a34393233362d000003e90000001000000009000000000000000900000000") + require.NoError(b, err) + + b.ResetTimer() + for n := 0; n < b.N; n++ { + client.Write(packetBytes) + acc.Wait(2) + } +} From ffaa9467a51ec96f758c824b7ac4ce45df8f941b Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 18 Mar 2020 12:16:05 -0700 Subject: [PATCH 1636/1815] Update changelog and readme --- CHANGELOG.md | 1 + README.md | 1 + 2 files changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5b7828bc4..39c0c10bd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -18,6 +18,7 @@ - [lanz](/plugins/inputs/lanz/README.md): Contributed by @timhughes - [modbus](/plugins/inputs/modbus/README.md) - Contributed by @garciaolais - [monit](/plugins/inputs/monit/README.md) - Contributed by @SirishaGopigiri +- [sflow](/plugins/inputs/sflow/README.md) - Contributed by @influxdata - [wireguard](/plugins/inputs/wireguard/README.md) - Contributed by @LINKIWI #### New Processors diff --git a/README.md b/README.md index d1c43ca47..87d8f9b12 100644 --- a/README.md +++ b/README.md @@ -286,6 +286,7 @@ For documentation on the latest development code see the [documentation index][d * [riak](./plugins/inputs/riak) * [salesforce](./plugins/inputs/salesforce) * [sensors](./plugins/inputs/sensors) +* [sflow](./plugins/inputs/sflow) * [smart](./plugins/inputs/smart) * [snmp_legacy](./plugins/inputs/snmp_legacy) * [snmp](./plugins/inputs/snmp) From 35ef2559d3b418d82d98929d060dc0177b79438c Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 18 Mar 2020 12:28:02 -0700 Subject: [PATCH 1637/1815] Fix s2geo documentation and code to reflect current plugin name --- CHANGELOG.md | 2 +- README.md | 2 +- plugins/processors/{s2_geo => s2geo}/README.md | 13 +++++++------ .../processors/{s2_geo/geo.go => s2geo/s2geo.go} | 14 ++++++++------ .../{s2_geo/geo_test.go => s2geo/s2geo_test.go} | 0 5 files changed, 17 insertions(+), 14 deletions(-) rename plugins/processors/{s2_geo => s2geo}/README.md (83%) rename plugins/processors/{s2_geo/geo.go => s2geo/s2geo.go} (86%) rename plugins/processors/{s2_geo/geo_test.go => s2geo/s2geo_test.go} (100%) diff --git a/CHANGELOG.md b/CHANGELOG.md index 39c0c10bd..2c613a711 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -25,7 +25,7 @@ - [dedup](/plugins/processors/dedup/README.md) - Contributed by @igomura - [template](/plugins/processors/template/README.md) - Contributed by @RobMalvern -- [s2_geo](/plugins/processors/s2_geo/README.md) - Contributed by @alespour +- [s2geo](/plugins/processors/s2geo/README.md) - Contributed by @alespour #### New Outputs diff --git a/README.md b/README.md index 87d8f9b12..57615f9a8 100644 --- a/README.md +++ b/README.md @@ -369,7 +369,7 @@ For documentation on the latest development code see the [documentation index][d * [printer](/plugins/processors/printer) * [regex](/plugins/processors/regex) * [rename](/plugins/processors/rename) -* [s2_geo](/plugins/processors/s2_geo) +* [s2geo](/plugins/processors/s2geo) * [strings](/plugins/processors/strings) * [tag_limit](/plugins/processors/tag_limit) * [template](/plugins/processors/template) diff --git a/plugins/processors/s2_geo/README.md b/plugins/processors/s2geo/README.md similarity index 83% rename from plugins/processors/s2_geo/README.md rename to plugins/processors/s2geo/README.md index 5a65d5e7d..d48947fe6 100644 --- a/plugins/processors/s2_geo/README.md +++ b/plugins/processors/s2geo/README.md @@ -7,16 +7,17 @@ The `lat` and `lon` fields values should contain WGS-84 coordinates in decimal d ### Configuration ```toml -[[processors.geo]] - ## The name of the lat and lon fields containing WGS-84 latitude and longitude in decimal degrees - lat_field = "lat" - lon_field = "lon" +[[processors.s2geo]] + ## The name of the lat and lon fields containing WGS-84 latitude and + ## longitude in decimal degrees. + # lat_field = "lat" + # lon_field = "lon" ## New tag to create - tag_key = "s2_cell_id" + # tag_key = "s2_cell_id" ## Cell level (see https://s2geometry.io/resources/s2cell_statistics.html) - cell_level = 11 + # cell_level = 9 ``` ### Example diff --git a/plugins/processors/s2_geo/geo.go b/plugins/processors/s2geo/s2geo.go similarity index 86% rename from plugins/processors/s2_geo/geo.go rename to plugins/processors/s2geo/s2geo.go index 85f80c3df..5376a6657 100644 --- a/plugins/processors/s2_geo/geo.go +++ b/plugins/processors/s2geo/s2geo.go @@ -2,6 +2,7 @@ package geo import ( "fmt" + "github.com/golang/geo/s2" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/processors" @@ -15,15 +16,16 @@ type Geo struct { } var SampleConfig = ` - ## The name of the lat and lon fields containing WGS-84 latitude and longitude in decimal degrees - lat_field = "lat" - lon_field = "lon" + ## The name of the lat and lon fields containing WGS-84 latitude and + ## longitude in decimal degrees. + # lat_field = "lat" + # lon_field = "lon" ## New tag to create - tag_key = "s2_cell_id" + # tag_key = "s2_cell_id" ## Cell level (see https://s2geometry.io/resources/s2cell_statistics.html) - cell_level = 9 + # cell_level = 9 ` func (g *Geo) SampleConfig() string { @@ -31,7 +33,7 @@ func (g *Geo) SampleConfig() string { } func (g *Geo) Description() string { - return "Reads latitude and longitude fields and adds tag with with S2 cell ID token of specified level." + return "Add the S2 Cell ID as a tag based on latitude and longitude fields" } func (g *Geo) Init() error { diff --git a/plugins/processors/s2_geo/geo_test.go b/plugins/processors/s2geo/s2geo_test.go similarity index 100% rename from plugins/processors/s2_geo/geo_test.go rename to plugins/processors/s2geo/s2geo_test.go From f11a2f80c4183c7c1e5af6a1c8d44bea131b447b Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 18 Mar 2020 13:04:42 -0700 Subject: [PATCH 1638/1815] Fix s2geo import --- plugins/processors/all/all.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/processors/all/all.go b/plugins/processors/all/all.go index 360b37dd0..ab6746c62 100644 --- a/plugins/processors/all/all.go +++ b/plugins/processors/all/all.go @@ -12,7 +12,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/processors/printer" _ "github.com/influxdata/telegraf/plugins/processors/regex" _ "github.com/influxdata/telegraf/plugins/processors/rename" - _ "github.com/influxdata/telegraf/plugins/processors/s2_geo" + _ "github.com/influxdata/telegraf/plugins/processors/s2geo" _ "github.com/influxdata/telegraf/plugins/processors/strings" _ "github.com/influxdata/telegraf/plugins/processors/tag_limit" _ "github.com/influxdata/telegraf/plugins/processors/template" From 9fbf28851843dff71e97904135ccafe717969ba2 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 18 Mar 2020 13:48:29 -0700 Subject: [PATCH 1639/1815] Remove init checks for valid label and field selectors (#7198) Importing k8s.io/klog starts a long running goroutine at init time. --- docs/LICENSE_OF_DEPENDENCIES.md | 2 -- go.mod | 2 +- plugins/inputs/prometheus/prometheus.go | 16 ---------------- 3 files changed, 1 insertion(+), 19 deletions(-) diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index c0ce7aff0..2ca3739b7 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -163,8 +163,6 @@ following works: - gopkg.in/olivere/elastic.v5 [MIT License](https://github.com/olivere/elastic/blob/v5.0.76/LICENSE) - gopkg.in/tomb.v1 [BSD 3-Clause Clear License](https://github.com/go-tomb/tomb/blob/v1/LICENSE) - gopkg.in/yaml.v2 [Apache License 2.0](https://github.com/go-yaml/yaml/blob/v2.2.2/LICENSE) -- k8s.io/apimachinery [Apache License 2.0](https://github.com/kubernetes/apimachinery/blob/master/LICENSE) -- k8s.io/klog [Apache License 2.0](https://github.com/kubernetes/klog/blob/master/LICENSE) ## telegraf used and modified code from these projects - github.com/DataDog/datadog-agent [Apache License 2.0](https://github.com/DataDog/datadog-agent/LICENSE) diff --git a/go.mod b/go.mod index 0cf6c2514..fcae7b96d 100644 --- a/go.mod +++ b/go.mod @@ -138,7 +138,7 @@ require ( gopkg.in/olivere/elastic.v5 v5.0.70 gopkg.in/yaml.v2 v2.2.4 gotest.tools v2.2.0+incompatible // indirect - k8s.io/apimachinery v0.17.1 + k8s.io/apimachinery v0.17.1 // indirect ) // replaced due to https://github.com/satori/go.uuid/issues/73 diff --git a/plugins/inputs/prometheus/prometheus.go b/plugins/inputs/prometheus/prometheus.go index 35c2d3d2c..ad98a1987 100644 --- a/plugins/inputs/prometheus/prometheus.go +++ b/plugins/inputs/prometheus/prometheus.go @@ -15,8 +15,6 @@ import ( "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/internal/tls" "github.com/influxdata/telegraf/plugins/inputs" - "k8s.io/apimachinery/pkg/fields" - "k8s.io/apimachinery/pkg/labels" ) const acceptHeader = `application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited;q=0.7,text/plain;version=0.0.4;q=0.3,*/*;q=0.1` @@ -138,20 +136,6 @@ func (p *Prometheus) Init() error { p.Log.Warnf("Use of deprecated configuration: 'metric_version = 1'; please update to 'metric_version = 2'") } - if len(p.KubernetesLabelSelector) > 0 { - _, err := labels.Parse(p.KubernetesLabelSelector) - if err != nil { - return fmt.Errorf("label selector validation failed %q: %v", p.KubernetesLabelSelector, err) - } - } - - if len(p.KubernetesFieldSelector) > 0 { - _, err := fields.ParseSelector(p.KubernetesFieldSelector) - if err != nil { - return fmt.Errorf("field selector validation failed %s: %v", p.KubernetesFieldSelector, err) - } - } - return nil } From 4e28e5721b3683f99745dd3a18cc44d90d8df598 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 18 Mar 2020 15:30:49 -0700 Subject: [PATCH 1640/1815] Update google.cloud.go to latest (#7199) --- .circleci/config.yml | 2 - docs/LICENSE_OF_DEPENDENCIES.md | 5 +- go.mod | 25 ++-- go.sum | 208 +++++++++++++++++++++++++++++++- scripts/check-deps.sh | 4 + 5 files changed, 229 insertions(+), 15 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index f9176c97a..844ed294b 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -70,7 +70,6 @@ jobs: - attach_workspace: at: '/go' - run: 'make' - - run: 'make check' - run: 'make test' test-go-1.12-386: <<: [ *defaults, *go-1_12 ] @@ -78,7 +77,6 @@ jobs: - attach_workspace: at: '/go' - run: 'GOARCH=386 make' - - run: 'GOARCH=386 make check' - run: 'GOARCH=386 make test' test-go-1.13: <<: [ *defaults, *go-1_13 ] diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index 2ca3739b7..6df679d58 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -3,7 +3,7 @@ When distributed in a binary form, Telegraf may contain portions of the following works: -- cloud.google.com/go [Apache License 2.0](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/master/LICENSE) +- cloud.google.com/go [Apache License 2.0](https://github.com/googleapis/google-cloud-go/blob/master/LICENSE) - code.cloudfoundry.org/clock [Apache License 2.0](https://github.com/cloudfoundry/clock/blob/master/LICENSE) - collectd.org [MIT License](https://git.octo.it/?p=collectd.git;a=blob;f=COPYING;hb=HEAD) - github.com/Azure/azure-amqp-common-go [MIT License](https://github.com/Azure/azure-amqp-common-go/blob/master/LICENSE) @@ -59,9 +59,11 @@ following works: - github.com/gofrs/uuid [MIT License](https://github.com/gofrs/uuid/blob/master/LICENSE) - github.com/gogo/protobuf [BSD 3-Clause Clear License](https://github.com/gogo/protobuf/blob/master/LICENSE) - github.com/golang/geo [Apache License 2.0](https://github.com/golang/geo/blob/master/LICENSE) +- github.com/golang/groupcache [Apache License 2.0](https://github.com/golang/groupcache/blob/master/LICENSE) - github.com/golang/mock [Apache License 2.0](https://github.com/golang/mock/blob/master/LICENSE) - github.com/golang/protobuf [BSD 3-Clause "New" or "Revised" License](https://github.com/golang/protobuf/blob/master/LICENSE) - github.com/golang/snappy [BSD 3-Clause "New" or "Revised" License](https://github.com/golang/snappy/blob/master/LICENSE) +- github.com/google/go-cmp [BSD 3-Clause "New" or "Revised" License](https://github.com/google/go-cmp/blob/master/LICENSE) - github.com/google/go-github [BSD 3-Clause "New" or "Revised" License](https://github.com/google/go-github/blob/master/LICENSE) - github.com/google/go-querystring [BSD 3-Clause "New" or "Revised" License](https://github.com/google/go-querystring/blob/master/LICENSE) - github.com/googleapis/gax-go [BSD 3-Clause "New" or "Revised" License](https://github.com/googleapis/gax-go/blob/master/LICENSE) @@ -72,7 +74,6 @@ following works: - github.com/hashicorp/go-cleanhttp [Mozilla Public License 2.0](https://github.com/hashicorp/go-cleanhttp/blob/master/LICENSE) - github.com/hashicorp/go-rootcerts [Mozilla Public License 2.0](https://github.com/hashicorp/go-rootcerts/blob/master/LICENSE) - github.com/hashicorp/go-uuid [Mozilla Public License 2.0](https://github.com/hashicorp/go-uuid/LICENSE) -- github.com/hashicorp/golang-lru [Mozilla Public License 2.0](https://github.com/hashicorp/golang-lru/blob/master/LICENSE) - github.com/hashicorp/serf [Mozilla Public License 2.0](https://github.com/hashicorp/serf/blob/master/LICENSE) - github.com/influxdata/go-syslog [MIT License](https://github.com/influxdata/go-syslog/blob/develop/LICENSE) - github.com/influxdata/tail [MIT License](https://github.com/influxdata/tail/blob/master/LICENSE.txt) diff --git a/go.mod b/go.mod index fcae7b96d..ced5b3c67 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,9 @@ module github.com/influxdata/telegraf go 1.12 require ( - cloud.google.com/go v0.37.4 + cloud.google.com/go v0.53.0 + cloud.google.com/go/datastore v1.1.0 // indirect + cloud.google.com/go/pubsub v1.2.0 code.cloudfoundry.org/clock v1.0.0 // indirect collectd.org v0.3.0 github.com/Azure/azure-event-hubs-go/v3 v3.2.0 @@ -53,8 +55,8 @@ require ( github.com/gofrs/uuid v2.1.0+incompatible github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d github.com/golang/geo v0.0.0-20190916061304-5b978397cfec - github.com/golang/mock v1.3.1-0.20190508161146-9fa652df1129 // indirect - github.com/golang/protobuf v1.3.2 + github.com/golang/mock v1.4.3 // indirect + github.com/golang/protobuf v1.3.5 github.com/google/go-cmp v0.4.0 github.com/google/go-github v17.0.0+incompatible github.com/google/go-querystring v1.0.0 // indirect @@ -100,7 +102,7 @@ require ( github.com/openzipkin/zipkin-go-opentracing v0.3.4 github.com/pkg/errors v0.8.1 github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829 - github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f + github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 github.com/prometheus/common v0.2.0 github.com/safchain/ethtool v0.0.0-20200218184317-f459e2d13664 github.com/samuel/go-zookeeper v0.0.0-20180130194729-c4fab1ac1bec // indirect @@ -122,14 +124,18 @@ require ( github.com/wvanbergen/kafka v0.0.0-20171203153745-e2edea948ddf github.com/wvanbergen/kazoo-go v0.0.0-20180202103751-f72d8611297a // indirect github.com/yuin/gopher-lua v0.0.0-20180630135845-46796da1b0b4 // indirect - golang.org/x/net v0.0.0-20200202094626-16171245cfb2 - golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421 + golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6 // indirect + golang.org/x/lint v0.0.0-20200302205851-738671d3881b // indirect + golang.org/x/net v0.0.0-20200301022130-244492dfa37a + golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d + golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a // indirect golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4 + golang.org/x/tools v0.0.0-20200317043434-63da46f3035e // indirect golang.zx2c4.com/wireguard/wgctrl v0.0.0-20200205215550-e35592f146e4 gonum.org/v1/gonum v0.6.2 // indirect - google.golang.org/api v0.3.1 - google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107 - google.golang.org/grpc v1.19.0 + google.golang.org/api v0.20.0 + google.golang.org/genproto v0.0.0-20200317114155-1f3552e48f24 + google.golang.org/grpc v1.28.0 gopkg.in/fatih/pool.v2 v2.0.0 // indirect gopkg.in/gorethink/gorethink.v3 v3.0.5 gopkg.in/jcmturner/gokrb5.v7 v7.3.0 // indirect @@ -138,6 +144,7 @@ require ( gopkg.in/olivere/elastic.v5 v5.0.70 gopkg.in/yaml.v2 v2.2.4 gotest.tools v2.2.0+incompatible // indirect + honnef.co/go/tools v0.0.1-2020.1.3 // indirect k8s.io/apimachinery v0.17.1 // indirect ) diff --git a/go.sum b/go.sum index f3579f14b..18ff65cfe 100644 --- a/go.sum +++ b/go.sum @@ -2,10 +2,34 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.37.4 h1:glPeL3BQJsbF6aIIYfZizMwc5LTYz250bDMjttbBGAU= cloud.google.com/go v0.37.4/go.mod h1:NHPJ89PdicEuT9hdPXMROBD91xc5uRDxsMtSB16k7hw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0 h1:MZQCQQaRwOrAcuKjiHWHrgKykt4fZyuwF2dtiG3fGW8= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0 h1:xE3CPsOgttP4ACBePh79zTKALtXwn/Edhcr16R5hMWU= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0 h1:/May9ojXjRkPBNVrq+oWLqmWCkr4OU5uRY29bu0mRyQ= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0 h1:Lpy6hKgdcl7a3WGSfJIFmxmcdjSpP6OmBEfcOv1Y680= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0 h1:RPUcBvDeYgQFMfQu1eBMq6piD1SXmLH+vK3qjewZPus= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= code.cloudfoundry.org/clock v1.0.0 h1:kFXWQM4bxYvdBw2X8BbBeXwQNgfoWv1vqAk2ZZyBN2o= code.cloudfoundry.org/clock v1.0.0/go.mod h1:QD9Lzhd/ux6eNQVUDVRJX/RKTigpewimNYBi7ivZKY8= collectd.org v0.3.0 h1:iNBHGw1VvPJxH2B6RiFWFZ+vsjo1lCdRszBeOuwGi00= collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/Azure/azure-amqp-common-go/v3 v3.0.0 h1:j9tjcwhypb/jek3raNrwlCIl7iKQYOug7CLpSyBBodc= github.com/Azure/azure-amqp-common-go/v3 v3.0.0/go.mod h1:SY08giD/XbhTz07tJdpw1SoxQXHPN30+DI3Z04SYqyg= github.com/Azure/azure-event-hubs-go/v3 v3.2.0 h1:CQlxKH5a4NX1ZmbdqXUPRwuNGh2XvtgmhkZvkEuWzhs= @@ -46,7 +70,9 @@ github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1Gn github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k= github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/Mellanox/rdmamap v0.0.0-20191106181932-7c3c4763a6ee h1:atI/FFjXh6hIVlPE1Jup9m8N4B9q/OSbMUe2EBahs+w= github.com/Mellanox/rdmamap v0.0.0-20191106181932-7c3c4763a6ee/go.mod h1:jDA6v0TUYrFEIAE5uGJ29LQOeONIgMdP4Rkqb8HUnPM= @@ -93,11 +119,16 @@ github.com/caio/go-tdigest v2.3.0+incompatible h1:zP6nR0nTSUzlSqqr7F/LhslPlSZX/f github.com/caio/go-tdigest v2.3.0+incompatible/go.mod h1:sHQM/ubZStBUmF1WbB8FAm8q9GjDajLC5T7ydxE3JHI= github.com/cenkalti/backoff v2.0.0+incompatible h1:5IIPUHhlnUZbcHQsQou5k1Tn58nJkeJL9U+ig5CHJbY= github.com/cenkalti/backoff v2.0.0+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/cisco-ie/nx-telemetry-proto v0.0.0-20190531143454-82441e232cf6 h1:57RI0wFkG/smvVTcz7F43+R0k+Hvci3jAVQF9lyMoOo= github.com/cisco-ie/nx-telemetry-proto v0.0.0-20190531143454-82441e232cf6/go.mod h1:ugEfq4B8T8ciw/h5mCkgdiDRFS4CkqqhH2dymDB4knc= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I= github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= github.com/couchbase/go-couchbase v0.0.0-20180501122049-16db1f1fe037 h1:Dbz60fpCq04vRxVVVJLbQuL0G7pRt0Gyo2BkozFc4SQ= @@ -139,6 +170,10 @@ github.com/eclipse/paho.mqtt.golang v1.2.0 h1:1F8mhG9+aO5/xpdtFkW4SxOJB67ukuDC3t github.com/eclipse/paho.mqtt.golang v1.2.0/go.mod h1:H9keYFcgq3Qr5OUJm/JZI/i6U7joQ8SYLhZwfeOo6Ts= github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/ericchiang/k8s v1.2.0 h1:vxrMwEzY43oxu8aZyD/7b1s8tsBM+xoUoxjWECWFbPI= github.com/ericchiang/k8s v1.2.0/go.mod h1:/OmBgSq2cd9IANnsGHGlEz27nwMZV2YxlpXuQtU3Bz4= github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= @@ -154,6 +189,9 @@ github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 h1:Mn26/9ZMNWSw9C9ER github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32/go.mod h1:GIjDIg/heH5DOkXY3YJ/wNhfHsQHoXGjl8G8amsYQ1I= github.com/glinton/ping v0.1.4-0.20200311211934-5ac87da8cd96 h1:YpooqMW354GG47PXNBiaCv6yCQizyP3MXD9NUPrCEQ8= github.com/glinton/ping v0.1.4-0.20200311211934-5ac87da8cd96/go.mod h1:uY+1eqFUyotrQxF1wYFNtMeHp/swbYRsoGzfcPZ8x3o= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0 h1:MP4Eh7ZCb31lleYCFuwm0oe4/YGak+5l1vA2NOE80nA= @@ -190,19 +228,31 @@ github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgR github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1-0.20190508161146-9fa652df1129 h1:tT8iWCYw4uOem71yYA3htfH+LNopJvcqZQshm56G5L4= -github.com/golang/mock v1.3.1-0.20190508161146-9fa652df1129/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3 h1:GV+pQPG/EUUbkh47niozDcADz6go/dUwhVzdUQHIVRw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5 h1:F768QJ1E9tib+q5Sc8MkdJi1RxLTbRcTf8LJV56aRls= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c h1:964Od4U6p2jUkFxvCydnIczKteheJEzHRToSGK3Bnlw= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg= @@ -219,12 +269,19 @@ github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4 h1:hU4mGcQI4DaAYW+IbTun+2qEZVFxK0ySjQLTbS0VQKc= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/gorilla/context v1.1.1 h1:AWwleXJkX/nhcU9bZSnZoi3h/qGYqQAGhq6zZe/aQW8= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= @@ -266,6 +323,7 @@ github.com/hashicorp/serf v0.8.1 h1:mYs6SMzu72+90OcPa5wr3nfznA4Dw9UyR791ZFNOIf4= github.com/hashicorp/serf v0.8.1/go.mod h1:h/Ru6tmZazX7WO/GDmwdpS975F019L4t5ng5IgwbNrE= github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/influxdata/go-syslog/v2 v2.0.1 h1:l44S4l4Q8MhGQcoOxJpbo+QQYxJqp0vdgIVHh4+DO0s= github.com/influxdata/go-syslog/v2 v2.0.1/go.mod h1:hjvie1UTaD5E1fTnDmxaCw8RRDrT4Ve+XHr5O2dKSCo= github.com/influxdata/tail v1.0.1-0.20180327235535-c43482518d41 h1:HxQo1NpNXQDpvEBzthbQLmePvTLFTa5GzSFUjL03aEs= @@ -293,6 +351,8 @@ github.com/jsimonetti/rtnetlink v0.0.0-20200117123717-f846d4f6c1f4/go.mod h1:WGu github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= github.com/kardianos/service v1.0.0 h1:HgQS3mFfOlyntWX8Oke98JcJLqt1DBcHR4kxShpYef0= @@ -415,6 +475,8 @@ github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f h1:BVwpUVJDADN2ufcGik7W992pyps0wZ888b/y9GXcLTU= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.2.0 h1:kUZDBDTdBVBYBj5Tmh2NZLlF60mfjA27rM34b+cVwNU= github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= @@ -424,6 +486,7 @@ github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1 h1:/K3IL0Z1quvmJ github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a h1:9ZKAASQSHhDYGoxY8uLVpewe1GDZ2vu2Tr/vTdVAkFQ= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/safchain/ethtool v0.0.0-20200218184317-f459e2d13664 h1:gvolwzuDhul9qK6/oHqxCHD5TEYfsWNBGidOeG6kvpk= github.com/safchain/ethtool v0.0.0-20200218184317-f459e2d13664/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= github.com/samuel/go-zookeeper v0.0.0-20180130194729-c4fab1ac1bec h1:6ncX5ko6B9LntYM0YBRXkiSaZMmLYeZ/NWcmeB43mMY= @@ -477,17 +540,26 @@ github.com/wvanbergen/kazoo-go v0.0.0-20180202103751-f72d8611297a h1:ILoU84rj4AQ github.com/wvanbergen/kazoo-go v0.0.0-20180202103751-f72d8611297a/go.mod h1:vQQATAGxVK20DC1rRubTJbZDDhhpA4QfU02pMdPxGO4= github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/gopher-lua v0.0.0-20180630135845-46796da1b0b4 h1:f6CCNiTjQZ0uWK4jPwhwYB8QIGGfn0ssD9kVzRUUUpk= github.com/yuin/gopher-lua v0.0.0-20180630135845-46796da1b0b4/go.mod h1:aEV29XrmTYFr3CiRxZeGHpkvbwq+prZduBqMaascyCU= go.opencensus.io v0.20.1 h1:pMEjRZ1M4ebWGikflH7nQpV6+Zr88KBMA2XJD3sbijw= go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3 h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190404164418-38d8ce5564a5/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191002192127-34f69633bfdc/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 h1:ULYEB3JvPRE/IfO+9uO7vKV/xzVTO7XPAwm8xbf4w2g= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200204104054-c9f3fb736b72 h1:+ELyKg6m8UBf0nPFSqD0mi7zUfwPyXo23HNjMnXPz7w= @@ -497,10 +569,38 @@ golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2 h1:y102fOLFqhV41b+4GPiJoa0k/x+pJcEi2/HB1Y5T6fU= golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6 h1:QE6XYQK6naiK1EPAe1g/ILLxN5RBoH5xkJk3CqlMI/Y= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0 h1:KU7oHjnv3XNWfa5COkzUifxZmxp1TyI7ImMXqFxLwvQ= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -513,24 +613,40 @@ golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191003171128-d98b1b443823/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191004110552-13f9640d40b9 h1:rjwSpXsdiK0dV8/Naq3kAw9ymfAeJIyd0upUIElB+lI= golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191007182048-72f939374954/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2 h1:CCH4IOTTfewWjGOlSp+zGcjutRKlBEZQ6wTn8ozI/nI= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a h1:GuSPYbZzB5/dcLNCwLQLsg3obCJtX9IJhpXkvY7kzk0= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421 h1:Wo7BWFiOk0QRFMLYMqJGFMd9CgUAcGx7V+qEg/h5IBI= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a h1:WXEvlFVvvGxCJLG6REjsT03iWnKLEWinaScsxF2Vm2o= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -544,21 +660,34 @@ golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190411185658-b44545bcd369/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456 h1:ng0gs1AKnRRuEMZoTLLlbOd+C17zUDepwGQBb/n+JVg= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191003212358-c178f38b412c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4 h1:sfkvUWPNGwSV+8/fNqctR5lS2AqCSqYwXdrjCxp/dXo= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c h1:fqgJT0MGcGpPgpWU7VRdRjuArfcOvC4AoJmILihzhDg= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -567,8 +696,35 @@ golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGm golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200317043434-63da46f3035e h1:8ogAbHWoJTPepnVbNRqXLOpzMkl0rtRsM7crbflc4XM= +golang.org/x/tools v0.0.0-20200317043434-63da46f3035e/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -584,16 +740,55 @@ gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6d gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= google.golang.org/api v0.3.1 h1:oJra/lMfmtm13/rgY/8i3MzjFWYXvQIAKjQ3HqofMk8= google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0 h1:jz2KixHX7EcCPiQrySzPdnYT7DbINAypCqKZ1Z7GM40= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107 h1:xtNn7qFlagY2mQNFHMSRPjT2RkOV4OXM7P5TVy9xATo= google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200317114155-1f3552e48f24 h1:IGPykv426z7LZSVPlaPufOyphngM4at5uZ7x5alaFvE= +google.golang.org/genproto v0.0.0-20200317114155-1f3552e48f24/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0 h1:cfg4PD8YEdSFnm7qLV4++93WcmhH2nIUhMjhdCvl3j8= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0 h1:bO/TA4OxCOummhSf10siHuG7vJOiwh7SpRpFZDkOgl4= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d h1:TxyelI5cVkbREznMhfzycHdkp5cLA7DpE+GKjSslYhM= gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= @@ -601,6 +796,7 @@ gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fatih/pool.v2 v2.0.0 h1:xIFeWtxifuQJGk/IEPKsTduEKcKvPmhoiVDGpC40nKg= gopkg.in/fatih/pool.v2 v2.0.0/go.mod h1:8xVGeu1/2jr2wm5V9SPuMht2H5AEmf5aFMGSQixtjTY= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= @@ -637,6 +833,11 @@ gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81 honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3 h1:sXmLre5bzIR6ypkjXCDI3jHPssRhc8KD/Ome589sc3U= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= k8s.io/apimachinery v0.17.1 h1:zUjS3szTxoUjTDYNvdFkYt2uMEXLcthcbp+7uZvWhYM= k8s.io/apimachinery v0.17.1/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= @@ -644,6 +845,9 @@ k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUc k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= diff --git a/scripts/check-deps.sh b/scripts/check-deps.sh index dcdcec029..793a9b49a 100755 --- a/scripts/check-deps.sh +++ b/scripts/check-deps.sh @@ -36,6 +36,10 @@ for dep in $(LC_ALL=C sort -u "${tmpdir}/golist"); do dep=github.com/Azure/go-autorest;; github.com/Azure/go-autorest/*) continue;; + + # single license for all sub modules + cloud.google.com/go/*) + continue;; esac # Remove single and double digit version from path; these are generally not From e376228a1a17526c8611fcf87c602a8bcff7f85b Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 18 Mar 2020 13:49:47 -0700 Subject: [PATCH 1641/1815] Update etc/telegraf.conf --- etc/telegraf.conf | 487 +++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 442 insertions(+), 45 deletions(-) diff --git a/etc/telegraf.conf b/etc/telegraf.conf index 9621b59f1..bc012ee30 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -131,6 +131,13 @@ ## the default retention policy. Only takes effect when using HTTP. # retention_policy = "" + ## The value of this tag will be used to determine the retention policy. If this + ## tag is not set the 'retention_policy' option is used as the default. + # retention_policy_tag = "" + + ## If true, the 'retention_policy_tag' will not be removed from the metric. + # exclude_retention_policy_tag = false + ## Write consistency (clusters only), can be: "any", "one", "quorum", "all". ## Only takes effect when using HTTP. # write_consistency = "any" @@ -577,10 +584,15 @@ # # insecure_skip_verify = false -# # Send telegraf metrics to graylog(s) +# # Send telegraf metrics to graylog # [[outputs.graylog]] # ## UDP endpoint for your graylog instance. -# servers = ["127.0.0.1:12201", "192.168.1.1:12201"] +# servers = ["127.0.0.1:12201"] +# +# ## The field to use as the GELF short_message, if unset the static string +# ## "telegraf" will be used. +# ## example: short_message_field = "message" +# # short_message_field = "" # # Configurable HTTP health check resource based on metrics @@ -742,6 +754,13 @@ # ## Kafka topic for producer messages # topic = "telegraf" # +# ## The value of this tag will be used as the topic. If not set the 'topic' +# ## option is used. +# # topic_tag = "" +# +# ## If true, the 'topic_tag' will be removed from to the metric. +# # exclude_topic_tag = false +# # ## Optional Client id # # client_id = "Telegraf" # @@ -778,13 +797,21 @@ # # keys = ["foo", "bar"] # # separator = "_" # -# ## Telegraf tag to use as a routing key -# ## ie, if this tag exists, its value will be used as the routing key +# ## The routing tag specifies a tagkey on the metric whose value is used as +# ## the message key. The message key is used to determine which partition to +# ## send the message to. This tag is prefered over the routing_key option. # routing_tag = "host" # -# ## Static routing key. Used when no routing_tag is set or as a fallback -# ## when the tag specified in routing tag is not found. If set to "random", -# ## a random value will be generated for each message. +# ## The routing key is set as the message key and used to determine which +# ## partition to send the message to. This value is only used when no +# ## routing_tag is set or as a fallback when the tag specified in routing tag +# ## is not found. +# ## +# ## If set to "random", a random value will be generated for each message. +# ## +# ## When unset, no message key is added and each message is routed to a random +# ## partition. +# ## # ## ex: routing_key = "random" # ## routing_key = "telegraf" # # routing_key = "" @@ -980,9 +1007,14 @@ # [[outputs.nats]] # ## URLs of NATS servers # servers = ["nats://localhost:4222"] +# # ## Optional credentials # # username = "" # # password = "" +# +# ## Optional NATS 2.0 and NATS NGS compatible user credentials +# # credentials = "/etc/telegraf/nats.creds" +# # ## NATS subject for producer messages # subject = "telegraf" # @@ -1266,6 +1298,34 @@ # # default_appname = "Telegraf" +# # Write metrics to Warp 10 +# [[outputs.warp10]] +# # Prefix to add to the measurement. +# prefix = "telegraf." +# +# # URL of the Warp 10 server +# warp_url = "http://localhost:8080" +# +# # Write token to access your app on warp 10 +# token = "Token" +# +# # Warp 10 query timeout +# # timeout = "15s" +# +# ## Print Warp 10 error body +# # print_error_body = false +# +# ## Max string error size +# # max_string_error_size = 511 +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + # # Configuration for Wavefront server to send metrics to # [[outputs.wavefront]] # ## Url for Wavefront Direct Ingestion or using HTTP with Wavefront Proxy @@ -1341,6 +1401,7 @@ # ## select the keys to convert. The array may contain globs. # ## = [...] # [processors.converter.tags] +# measurement = [] # string = [] # integer = [] # unsigned = [] @@ -1353,6 +1414,7 @@ # ## select the keys to convert. The array may contain globs. # ## = [...] # [processors.converter.fields] +# measurement = [] # tag = [] # string = [] # integer = [] @@ -1369,6 +1431,20 @@ # ## Date format string, must be a representation of the Go "reference time" # ## which is "Mon Jan 2 15:04:05 -0700 MST 2006". # date_format = "Jan" +# +# ## Offset duration added to the date string when writing the new tag. +# # date_offset = "0s" +# +# ## Timezone to use when creating the tag. This can be set to one of +# ## "UTC", "Local", or to a location name in the IANA Time Zone database. +# ## example: timezone = "America/Los_Angeles" +# # timezone = "UTC" + + +# # Filter metrics with repeating field values +# [[processors.dedup]] +# ## Maximum time to suppress output +# dedup_interval = "600s" # # Map enum values according to given table. @@ -1475,6 +1551,20 @@ # [[processors.rename]] +# # Add the S2 Cell ID as a tag based on latitude and longitude fields +# [[processors.s2geo]] +# ## The name of the lat and lon fields containing WGS-84 latitude and +# ## longitude in decimal degrees. +# # lat_field = "lat" +# # lon_field = "lon" +# +# ## New tag to create +# # tag_key = "s2_cell_id" +# +# ## Cell level (see https://s2geometry.io/resources/s2cell_statistics.html) +# # cell_level = 9 + + # # Perform string processing on tags, fields, and measurements # [[processors.strings]] # ## Convert a tag value to uppercase @@ -1486,6 +1576,10 @@ # # field = "uri_stem" # # dest = "uri_stem_normalised" # +# ## Convert a field value to titlecase +# # [[processors.strings.titlecase]] +# # field = "status" +# # ## Trim leading and trailing whitespace using the default cutset # # [[processors.strings.trim]] # # field = "message" @@ -1535,6 +1629,17 @@ # keep = ["foo", "bar", "baz"] +# # Uses a Go template to create a new tag +# [[processors.template]] +# ## Tag to set with the output of the template. +# tag = "topic" +# +# ## Go template used to create the tag value. In order to ease TOML +# ## escaping requirements, you may wish to use single quotes around the +# ## template string. +# template = '{{ .Tag "hostname" }}.{{ .Tag "level" }}' + + # # Print all metrics that pass through this filter. # [[processors.topk]] # ## How many seconds between aggregations @@ -1636,16 +1741,20 @@ # ## of accumulating the results. # reset = false # +# ## Whether bucket values should be accumulated. If set to false, "gt" tag will be added. +# ## Defaults to true. +# cumulative = true +# # ## Example config that aggregates all fields of the metric. # # [[aggregators.histogram.config]] -# # ## The set of buckets. +# # ## Right borders of buckets (with +Inf implicitly added). # # buckets = [0.0, 15.6, 34.5, 49.1, 71.5, 80.5, 94.5, 100.0] # # ## The name of metric. # # measurement_name = "cpu" # # ## Example config that aggregates only specific fields of the metric. # # [[aggregators.histogram.config]] -# # ## The set of buckets. +# # ## Right borders of buckets (with +Inf implicitly added). # # buckets = [0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0] # # ## The name of metric. # # measurement_name = "diskio" @@ -2123,7 +2232,7 @@ # # Gather health check statuses from services registered in Consul # [[inputs.consul]] # ## Consul server address -# # address = "localhost" +# # address = "localhost:8500" # # ## URI scheme for the Consul server, one of "http", "https" # # scheme = "http" @@ -2823,6 +2932,11 @@ # # insecure_skip_verify = true +# # Gets counters from all InfiniBand cards and ports installed +# [[inputs.infiniband]] +# # no configuration + + # # Read InfluxDB-formatted JSON metrics from one or more HTTP endpoints # [[inputs.influxdb]] # ## Works with InfluxDB debug endpoints out of the box, @@ -3136,7 +3250,7 @@ # # Read status information from one or more Kibana servers # [[inputs.kibana]] -# ## specify a list of one or more Kibana servers +# ## Specify a list of one or more Kibana servers # servers = ["http://localhost:5601"] # # ## Timeout for HTTP requests @@ -3388,6 +3502,80 @@ # # tagdrop = ["server"] +# # Retrieve data from MODBUS slave devices +# [[inputs.modbus]] +# ## Connection Configuration +# ## +# ## The plugin supports connections to PLCs via MODBUS/TCP or +# ## via serial line communication in binary (RTU) or readable (ASCII) encoding +# ## +# ## Device name +# name = "Device" +# +# ## Slave ID - addresses a MODBUS device on the bus +# ## Range: 0 - 255 [0 = broadcast; 248 - 255 = reserved] +# slave_id = 1 +# +# ## Timeout for each request +# timeout = "1s" +# +# # TCP - connect via Modbus/TCP +# controller = "tcp://localhost:502" +# +# # Serial (RS485; RS232) +# #controller = "file:///dev/ttyUSB0" +# #baud_rate = 9600 +# #data_bits = 8 +# #parity = "N" +# #stop_bits = 1 +# #transmission_mode = "RTU" +# +# +# ## Measurements +# ## +# +# ## Digital Variables, Discrete Inputs and Coils +# ## name - the variable name +# ## address - variable address +# +# discrete_inputs = [ +# { name = "start", address = [0]}, +# { name = "stop", address = [1]}, +# { name = "reset", address = [2]}, +# { name = "emergency_stop", address = [3]}, +# ] +# coils = [ +# { name = "motor1_run", address = [0]}, +# { name = "motor1_jog", address = [1]}, +# { name = "motor1_stop", address = [2]}, +# ] +# +# ## Analog Variables, Input Registers and Holding Registers +# ## name - the variable name +# ## byte_order - the ordering of bytes +# ## |---AB, ABCD - Big Endian +# ## |---BA, DCBA - Little Endian +# ## |---BADC - Mid-Big Endian +# ## |---CDAB - Mid-Little Endian +# ## data_type - UINT16, INT16, INT32, UINT32, FLOAT32, FLOAT32-IEEE (the IEEE 754 binary representation) +# ## scale - the final numeric variable representation +# ## address - variable address +# +# holding_registers = [ +# { name = "power_factor", byte_order = "AB", data_type = "FLOAT32", scale=0.01, address = [8]}, +# { name = "voltage", byte_order = "AB", data_type = "FLOAT32", scale=0.1, address = [0]}, +# { name = "energy", byte_order = "ABCD", data_type = "FLOAT32", scale=0.001, address = [5,6]}, +# { name = "current", byte_order = "ABCD", data_type = "FLOAT32", scale=0.001, address = [1,2]}, +# { name = "frequency", byte_order = "AB", data_type = "FLOAT32", scale=0.1, address = [7]}, +# { name = "power", byte_order = "ABCD", data_type = "FLOAT32", scale=0.1, address = [3,4]}, +# ] +# input_registers = [ +# { name = "tank_level", byte_order = "AB", data_type = "INT16", scale=1.0, address = [0]}, +# { name = "tank_ph", byte_order = "AB", data_type = "INT16", scale=1.0, address = [1]}, +# { name = "pump1_speed", byte_order = "ABCD", data_type = "INT32", scale=1.0, address = [3,4]}, +# ] + + # # Read metrics from one or many MongoDB servers # [[inputs.mongodb]] # ## An array of URLs of the form: @@ -3415,6 +3603,26 @@ # # insecure_skip_verify = false +# # Read metrics and status information about processes managed by Monit +# [[inputs.monit]] +# ## Monit HTTPD address +# address = "http://127.0.0.1:2812" +# +# ## Username and Password for Monit +# # username = "" +# # password = "" +# +# ## Amount of time allowed to complete the HTTP request +# # timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + # # Aggregates the contents of multiple files into a single point # [[inputs.multifile]] # ## Base directory where telegraf will look for files. @@ -3772,7 +3980,7 @@ # # timeout = "5ms" -# # A plugin to collect stats from Opensmtpd - a validating, recursive, and caching DNS resolver +# # A plugin to collect stats from Opensmtpd - a validating, recursive, and caching DNS resolver # [[inputs.opensmtpd]] # ## If running as a restricted user you can prepend sudo for additional access: # #use_sudo = false @@ -4672,6 +4880,10 @@ # ## Timeout for SSL connection # # timeout = "5s" # +# ## Pass a different name into the TLS request (Server Name Indication) +# ## example: server_name = "myhost.example.org" +# # server_name = "" +# # ## Optional TLS Config # # tls_ca = "/etc/telegraf/ca.pem" # # tls_cert = "/etc/telegraf/cert.pem" @@ -4905,6 +5117,73 @@ # ifstats = "ietf-interfaces:interfaces-state/interface/statistics" +# # Read metrics from one or many ClickHouse servers +# [[inputs.clickhouse]] +# ## Username for authorization on ClickHouse server +# ## example: user = "default"" +# username = "default" +# +# ## Password for authorization on ClickHouse server +# ## example: password = "super_secret" +# +# ## HTTP(s) timeout while getting metrics values +# ## The timeout includes connection time, any redirects, and reading the response body. +# ## example: timeout = 1s +# # timeout = 5s +# +# ## List of servers for metrics scraping +# ## metrics scrape via HTTP(s) clickhouse interface +# ## https://clickhouse.tech/docs/en/interfaces/http/ +# ## example: servers = ["http://127.0.0.1:8123","https://custom-server.mdb.yandexcloud.net"] +# servers = ["http://127.0.0.1:8123"] +# +# ## If "auto_discovery"" is "true" plugin tries to connect to all servers available in the cluster +# ## with using same "user:password" described in "user" and "password" parameters +# ## and get this server hostname list from "system.clusters" table +# ## see +# ## - https://clickhouse.tech/docs/en/operations/system_tables/#system-clusters +# ## - https://clickhouse.tech/docs/en/operations/server_settings/settings/#server_settings_remote_servers +# ## - https://clickhouse.tech/docs/en/operations/table_engines/distributed/ +# ## - https://clickhouse.tech/docs/en/operations/table_engines/replication/#creating-replicated-tables +# ## example: auto_discovery = false +# # auto_discovery = true +# +# ## Filter cluster names in "system.clusters" when "auto_discovery" is "true" +# ## when this filter present then "WHERE cluster IN (...)" filter will apply +# ## please use only full cluster names here, regexp and glob filters is not allowed +# ## for "/etc/clickhouse-server/config.d/remote.xml" +# ## +# ## +# ## +# ## +# ## clickhouse-ru-1.local9000 +# ## clickhouse-ru-2.local9000 +# ## +# ## +# ## clickhouse-eu-1.local9000 +# ## clickhouse-eu-2.local9000 +# ## +# ## +# ## +# ## +# ## +# ## +# ## example: cluster_include = ["my-own-cluster"] +# # cluster_include = [] +# +# ## Filter cluster names in "system.clusters" when "auto_discovery" is "true" +# ## when this filter present then "WHERE cluster NOT IN (...)" filter will apply +# ## example: cluster_exclude = ["my-internal-not-discovered-cluster"] +# # cluster_exclude = [] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + # # Read metrics from Google PubSub # [[inputs.cloud_pubsub]] # ## Required. Name of Google Cloud Platform (GCP) Project that owns @@ -5071,9 +5350,113 @@ # # insecure_skip_verify = false -# # Influx HTTP write listener +# # Azure Event Hubs service input plugin +# [[inputs.eventhub_consumer]] +# ## The default behavior is to create a new Event Hub client from environment variables. +# ## This requires one of the following sets of environment variables to be set: +# ## +# ## 1) Expected Environment Variables: +# ## - "EVENTHUB_NAMESPACE" +# ## - "EVENTHUB_NAME" +# ## - "EVENTHUB_CONNECTION_STRING" +# ## +# ## 2) Expected Environment Variables: +# ## - "EVENTHUB_NAMESPACE" +# ## - "EVENTHUB_NAME" +# ## - "EVENTHUB_KEY_NAME" +# ## - "EVENTHUB_KEY_VALUE" +# +# ## Uncommenting the option below will create an Event Hub client based solely on the connection string. +# ## This can either be the associated environment variable or hard coded directly. +# # connection_string = "" +# +# ## Set persistence directory to a valid folder to use a file persister instead of an in-memory persister +# # persistence_dir = "" +# +# ## Change the default consumer group +# # consumer_group = "" +# +# ## By default the event hub receives all messages present on the broker, alternative modes can be set below. +# ## The timestamp should be in https://github.com/toml-lang/toml#offset-date-time format (RFC 3339). +# ## The 3 options below only apply if no valid persister is read from memory or file (e.g. first run). +# # from_timestamp = +# # latest = true +# +# ## Set a custom prefetch count for the receiver(s) +# # prefetch_count = 1000 +# +# ## Add an epoch to the receiver(s) +# # epoch = 0 +# +# ## Change to set a custom user agent, "telegraf" is used by default +# # user_agent = "telegraf" +# +# ## To consume from a specific partition, set the partition_ids option. +# ## An empty array will result in receiving from all partitions. +# # partition_ids = ["0","1"] +# +# ## Max undelivered messages +# # max_undelivered_messages = 1000 +# +# ## Set either option below to true to use a system property as timestamp. +# ## You have the choice between EnqueuedTime and IoTHubEnqueuedTime. +# ## It is recommended to use this setting when the data itself has no timestamp. +# # enqueued_time_as_ts = true +# # iot_hub_enqueued_time_as_ts = true +# +# ## Tags or fields to create from keys present in the application property bag. +# ## These could for example be set by message enrichments in Azure IoT Hub. +# # application_property_tags = [] +# # application_property_fields = [] +# +# ## Tag or field name to use for metadata +# ## By default all metadata is disabled +# # sequence_number_field = "SequenceNumber" +# # enqueued_time_field = "EnqueuedTime" +# # offset_field = "Offset" +# # partition_id_tag = "PartitionID" +# # partition_key_tag = "PartitionKey" +# # iot_hub_device_connection_id_tag = "IoTHubDeviceConnectionID" +# # iot_hub_auth_generation_id_tag = "IoTHubAuthGenerationID" +# # iot_hub_connection_auth_method_tag = "IoTHubConnectionAuthMethod" +# # iot_hub_connection_module_id_tag = "IoTHubConnectionModuleID" +# # iot_hub_enqueued_time_field = "IoTHubEnqueuedTime" +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Run executable as long-running input plugin +# [[inputs.execd]] +# ## Program to run as daemon +# command = ["telegraf-smartctl", "-d", "/dev/sda"] +# +# ## Define how the process is signaled on each collection interval. +# ## Valid values are: +# ## "none" : Do not signal anything. +# ## The process must output metrics by itself. +# ## "STDIN" : Send a newline on STDIN. +# ## "SIGHUP" : Send a HUP signal. Not available on Windows. +# ## "SIGUSR1" : Send a USR1 signal. Not available on Windows. +# ## "SIGUSR2" : Send a USR2 signal. Not available on Windows. +# signal = "none" +# +# ## Delay before the process is restarted after an unexpected termination +# restart_delay = "10s" +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Accept metrics over InfluxDB 1.x HTTP API # [[inputs.http_listener]] -# ## Address and port to host HTTP listener on +# ## Address and port to host InfluxDB listener on # service_address = ":8186" # # ## maximum duration before timing out read of the request @@ -5081,14 +5464,9 @@ # ## maximum duration before timing out write of the response # write_timeout = "10s" # -# ## Maximum allowed http request body size in bytes. -# ## 0 means to use the default of 524,288,000 bytes (500 mebibytes) -# max_body_size = "500MiB" -# -# ## Maximum line size allowed to be sent in bytes. -# ## 0 means to use the default of 65536 bytes (64 kibibytes) -# max_line_size = "64KiB" -# +# ## Maximum allowed HTTP request body size in bytes. +# ## 0 means to use the default of 32MiB. +# max_body_size = "32MiB" # # ## Optional tag name used to store the database. # ## If the write has a database in the query string then it will be kept in this tag name. @@ -5154,9 +5532,9 @@ # data_format = "influx" -# # Influx HTTP write listener +# # Accept metrics over InfluxDB 1.x HTTP API # [[inputs.influxdb_listener]] -# ## Address and port to host HTTP listener on +# ## Address and port to host InfluxDB listener on # service_address = ":8186" # # ## maximum duration before timing out read of the request @@ -5164,14 +5542,9 @@ # ## maximum duration before timing out write of the response # write_timeout = "10s" # -# ## Maximum allowed http request body size in bytes. -# ## 0 means to use the default of 524,288,000 bytes (500 mebibytes) -# max_body_size = "500MiB" -# -# ## Maximum line size allowed to be sent in bytes. -# ## 0 means to use the default of 65536 bytes (64 kibibytes) -# max_line_size = "64KiB" -# +# ## Maximum allowed HTTP request body size in bytes. +# ## 0 means to use the default of 32MiB. +# max_body_size = "32MiB" # # ## Optional tag name used to store the database. # ## If the write has a database in the query string then it will be kept in this tag name. @@ -5393,6 +5766,14 @@ # table_name = "default" +# # Read metrics off Arista LANZ, via socket +# [[inputs.lanz]] +# ## URL to Arista LANZ endpoint +# servers = [ +# "tcp://127.0.0.1:50001" +# ] + + # # Stream and parse log file(s). # [[inputs.logparser]] # ## Log files to parse. @@ -5529,6 +5910,9 @@ # # username = "" # # password = "" # +# ## Optional NATS 2.0 and NATS NGS compatible user credentials +# # credentials = "/etc/telegraf/nats.creds" +# # ## Use Transport Layer Security # # secure = false # @@ -5595,10 +5979,10 @@ # # Read metrics from one or many pgbouncer servers # [[inputs.pgbouncer]] # ## specify address via a url matching: -# ## postgres://[pqgotest[:password]]@host:port[/dbname]\ +# ## postgres://[pqgotest[:password]]@localhost[/dbname]\ # ## ?sslmode=[disable|verify-ca|verify-full] # ## or a simple string: -# ## host=localhost port=5432 user=pqotest password=... sslmode=... dbname=app_production +# ## host=localhost user=pqotest password=... sslmode=... dbname=app_production # ## # ## All connection parameters are optional. # ## @@ -5608,10 +5992,10 @@ # # Read metrics from one or many postgresql servers # [[inputs.postgresql]] # ## specify address via a url matching: -# ## postgres://[pqgotest[:password]]@host:port[/dbname]\ +# ## postgres://[pqgotest[:password]]@localhost[/dbname]\ # ## ?sslmode=[disable|verify-ca|verify-full] # ## or a simple string: -# ## host=localhost port=5432 user=pqotest password=... sslmode=... dbname=app_production +# ## host=localhost user=pqotest password=... sslmode=... dbname=app_production # ## # ## All connection parameters are optional. # ## @@ -5620,7 +6004,7 @@ # ## connection with the server and doesn't restrict the databases we are trying # ## to grab metrics for. # ## -# address = "host=localhost port=5432 user=postgres sslmode=disable" +# address = "host=localhost user=postgres sslmode=disable" # ## A custom name for the database that will be used as the "server" tag in the # ## measurement output. If not specified, a default one generated from # ## the connection address is used. @@ -5643,10 +6027,10 @@ # # Read metrics from one or many postgresql servers # [[inputs.postgresql_extensible]] # ## specify address via a url matching: -# ## postgres://[pqgotest[:password]]@host:port[/dbname]\ +# ## postgres://[pqgotest[:password]]@localhost[/dbname]\ # ## ?sslmode=[disable|verify-ca|verify-full] # ## or a simple string: -# ## host=localhost port=5432 user=pqotest password=... sslmode=... dbname=app_production +# ## host=localhost user=pqotest password=... sslmode=... dbname=app_production # # # ## All connection parameters are optional. # # ## Without the dbname parameter, the driver will default to a database @@ -5654,7 +6038,7 @@ # ## connection with the server and doesn't restrict the databases we are trying # ## to grab metrics for. # # -# address = "host=localhost port=5432 user=postgres sslmode=disable" +# address = "host=localhost user=postgres sslmode=disable" # # ## connection configuration. # ## maxlifetime - specify the maximum lifetime of a connection. @@ -5740,6 +6124,11 @@ # ## Restricts Kubernetes monitoring to a single namespace # ## ex: monitor_kubernetes_pods_namespace = "default" # # monitor_kubernetes_pods_namespace = "" +# # label selector to target pods which have the label +# # kubernetes_label_selector = "env=dev,app=nginx" +# # field selector to target pods +# # eg. To scrape pods on a specific node +# # kubernetes_field_selector = "spec.nodeName=$HOSTNAME" # # ## Use bearer token for authorization. ('bearer_token' takes priority) # # bearer_token = "/path/to/bearer/token" @@ -5762,6 +6151,19 @@ # # insecure_skip_verify = false +# # SFlow V5 Protocol Listener +# [[inputs.sflow]] +# ## Address to listen for sFlow packets. +# ## example: service_address = "udp://:6343" +# ## service_address = "udp4://:6343" +# ## service_address = "udp6://:6343" +# service_address = "udp://:6343" +# +# ## Set the size of the operating system's receive buffer. +# ## example: read_buffer_size = "64KiB" +# # read_buffer_size = "" + + # # Receive SNMP traps # [[inputs.snmp_trap]] # ## Transport, local address, and port to listen on. Transport must @@ -6135,11 +6537,6 @@ # # collect_concurrency = 1 # # discover_concurrency = 1 # -# ## whether or not to force discovery of new objects on initial gather call before collecting metrics -# ## when true for large environments this may cause errors for time elapsed while collecting metrics -# ## when false (default) the first collection cycle may result in no or limited metrics while objects are discovered -# # force_discover_on_init = false -# # ## the interval before (re)discovering objects subject to metrics collection (default: 300s) # # object_discovery_interval = "300s" # From ccdd3cd24562b57841c4c8211ece32dfda4b4283 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 18 Mar 2020 16:09:53 -0700 Subject: [PATCH 1642/1815] Update github.com/prometheus/client_golang to latest (#7200) --- docs/LICENSE_OF_DEPENDENCIES.md | 1 + go.mod | 12 ++++++------ go.sum | 31 +++++++++++++++++++++++++++++++ 3 files changed, 38 insertions(+), 6 deletions(-) diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index 6df679d58..0c7436941 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -28,6 +28,7 @@ following works: - github.com/beorn7/perks [MIT License](https://github.com/beorn7/perks/blob/master/LICENSE) - github.com/caio/go-tdigest [MIT License](https://github.com/caio/go-tdigest/blob/master/LICENSE) - github.com/cenkalti/backoff [MIT License](https://github.com/cenkalti/backoff/blob/master/LICENSE) +- github.com/cespare/xxhash [MIT License](https://github.com/cespare/xxhash/blob/master/LICENSE.txt) - github.com/cisco-ie/nx-telemetry-proto [Apache License 2.0](https://github.com/cisco-ie/nx-telemetry-proto/blob/master/LICENSE) - github.com/couchbase/go-couchbase [MIT License](https://github.com/couchbase/go-couchbase/blob/master/LICENSE) - github.com/couchbase/gomemcached [MIT License](https://github.com/couchbase/gomemcached/blob/master/LICENSE) diff --git a/go.mod b/go.mod index ced5b3c67..f2d7c417a 100644 --- a/go.mod +++ b/go.mod @@ -18,7 +18,7 @@ require ( github.com/Shopify/sarama v1.24.1 github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 github.com/aerospike/aerospike-client-go v1.27.0 - github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf + github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4 github.com/amir/raidman v0.0.0-20170415203553-1ccc43bfb9c9 github.com/apache/thrift v0.12.0 github.com/aristanetworks/glog v0.0.0-20191112221043-67e8567f59f3 // indirect @@ -101,15 +101,15 @@ require ( github.com/opentracing/opentracing-go v1.0.2 // indirect github.com/openzipkin/zipkin-go-opentracing v0.3.4 github.com/pkg/errors v0.8.1 - github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829 - github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 - github.com/prometheus/common v0.2.0 + github.com/prometheus/client_golang v1.5.1 + github.com/prometheus/client_model v0.2.0 + github.com/prometheus/common v0.9.1 github.com/safchain/ethtool v0.0.0-20200218184317-f459e2d13664 github.com/samuel/go-zookeeper v0.0.0-20180130194729-c4fab1ac1bec // indirect github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b // indirect github.com/shirou/gopsutil v2.20.2+incompatible github.com/shopspring/decimal v0.0.0-20200105231215-408a2507e114 // indirect - github.com/sirupsen/logrus v1.2.0 + github.com/sirupsen/logrus v1.4.2 github.com/soniah/gosnmp v1.22.0 github.com/streadway/amqp v0.0.0-20180528204448-e5adc2ada8b8 github.com/stretchr/testify v1.4.0 @@ -142,7 +142,7 @@ require ( gopkg.in/ldap.v3 v3.1.0 gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce gopkg.in/olivere/elastic.v5 v5.0.70 - gopkg.in/yaml.v2 v2.2.4 + gopkg.in/yaml.v2 v2.2.5 gotest.tools v2.2.0+incompatible // indirect honnef.co/go/tools v0.0.1-2020.1.3 // indirect k8s.io/apimachinery v0.17.1 // indirect diff --git a/go.sum b/go.sum index 18ff65cfe..8f19c9966 100644 --- a/go.sum +++ b/go.sum @@ -94,8 +94,11 @@ github.com/aerospike/aerospike-client-go v1.27.0 h1:VC6/Wqqm3Qlp4/utM7Zts3cv4A2H github.com/aerospike/aerospike-client-go v1.27.0/go.mod h1:zj8LBEnWBDOVEIJt8LvaRvDG5ARAoa5dBeHaB472NRc= github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf h1:qet1QNfXsQxTZqLG4oE62mJzwPIB8+Tee4RNCL9ulrY= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4 h1:Hs82Z41s6SdL1CELW+XaDYmOH4hkBN4/N9og/AsOv7E= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/amir/raidman v0.0.0-20170415203553-1ccc43bfb9c9 h1:FXrPTd8Rdlc94dKccl7KPmdmIbVh/OjelJ8/vgMRzcQ= github.com/amir/raidman v0.0.0-20170415203553-1ccc43bfb9c9/go.mod h1:eliMa/PW+RDr2QLWRmLH1R1ZA4RInpmvOzDDXtaIZkc= github.com/apache/thrift v0.12.0 h1:pODnxUFNcjP9UTLZGTdeh+j16A8lJbRvD3rOtrk/7bs= @@ -111,6 +114,9 @@ github.com/aws/aws-sdk-go v1.19.41 h1:veutzvQP/lOmYmtX26S9mTFJLO6sp7/UsxFcCjglu4 github.com/aws/aws-sdk-go v1.19.41/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bitly/go-hostpool v0.1.0 h1:XKmsF6k5el6xHG3WPJ8U0Ku/ye7njX7W81Ng7O2ioR0= github.com/bitly/go-hostpool v0.1.0/go.mod h1:4gOCgp6+NZnVqlKyZ/iBZFTAJKembaVENUpMkpg42fw= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY= @@ -120,6 +126,8 @@ github.com/caio/go-tdigest v2.3.0+incompatible/go.mod h1:sHQM/ubZStBUmF1WbB8FAm8 github.com/cenkalti/backoff v2.0.0+incompatible h1:5IIPUHhlnUZbcHQsQou5k1Tn58nJkeJL9U+ig5CHJbY= github.com/cenkalti/backoff v2.0.0+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= @@ -193,6 +201,7 @@ github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9 github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0 h1:MP4Eh7ZCb31lleYCFuwm0oe4/YGak+5l1vA2NOE80nA= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= @@ -349,7 +358,9 @@ github.com/jsimonetti/rtnetlink v0.0.0-20190606172950-9527aa82566a/go.mod h1:Oz+ github.com/jsimonetti/rtnetlink v0.0.0-20200117123717-f846d4f6c1f4 h1:nwOc1YaOrYJ37sEBrtWZrdqzK22hiJs3GpDmP3sR2Yw= github.com/jsimonetti/rtnetlink v0.0.0-20200117123717-f846d4f6c1f4/go.mod h1:WGuG/smIU4J/54PblvSbh+xvCZmpJnFgr3ds6Z55XMQ= github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= @@ -472,18 +483,30 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829 h1:D+CiwcpGTW6pL6bv6KI3KbyEyCKyS+1JWS2h8PNDnGA= github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.5.1 h1:bdHYieyGlH+6OLEk2YQha8THib30KP0/yD0YH9m6xcA= +github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f h1:BVwpUVJDADN2ufcGik7W992pyps0wZ888b/y9GXcLTU= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.2.0 h1:kUZDBDTdBVBYBj5Tmh2NZLlF60mfjA27rM34b+cVwNU= github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.9.1 h1:KOMtN28tlbam3/7ZKEYKHhKoJZYYj3gMH4uc62x7X7U= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1 h1:/K3IL0Z1quvmJ7X0A1AwNEK7CRkVK3YwfOU/QAL4WGg= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8 h1:+fpWZdT24pJBiqJdAwYBjPSk+5YmQzYNPYzQsdzLkt8= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a h1:9ZKAASQSHhDYGoxY8uLVpewe1GDZ2vu2Tr/vTdVAkFQ= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= @@ -499,6 +522,8 @@ github.com/shopspring/decimal v0.0.0-20200105231215-408a2507e114 h1:Pm6R878vxWWW github.com/shopspring/decimal v0.0.0-20200105231215-408a2507e114/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/sirupsen/logrus v1.2.0 h1:juTguoYk5qI21pwyTXY3B3Y5cOTH3ZUyZCg1v/mihuo= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/soniah/gosnmp v1.22.0 h1:jVJi8+OGvR+JHIaZKMmnyNP0akJd2vEgNatybwhZvxg= github.com/soniah/gosnmp v1.22.0/go.mod h1:DuEpAS0az51+DyVBQwITDsoq4++e3LTNckp2GoasF2I= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= @@ -616,6 +641,7 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -660,6 +686,7 @@ golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190411185658-b44545bcd369/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -796,6 +823,8 @@ gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fatih/pool.v2 v2.0.0 h1:xIFeWtxifuQJGk/IEPKsTduEKcKvPmhoiVDGpC40nKg= gopkg.in/fatih/pool.v2 v2.0.0/go.mod h1:8xVGeu1/2jr2wm5V9SPuMht2H5AEmf5aFMGSQixtjTY= @@ -828,6 +857,8 @@ gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5 h1:ymVxjfMaHvXD8RqPRmzHHsB3VvucivSkIAvJFDI5O3c= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= From 2a4578169712c0fdc1657d38761e09d05eb809a6 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 18 Mar 2020 16:13:14 -0700 Subject: [PATCH 1643/1815] Fix datastore_include option in vsphere input readme --- plugins/inputs/vsphere/README.md | 27 +++++++++++---------------- plugins/inputs/vsphere/vsphere.go | 25 ++++++++++++++++++++----- 2 files changed, 31 insertions(+), 21 deletions(-) diff --git a/plugins/inputs/vsphere/README.md b/plugins/inputs/vsphere/README.md index f69bd2862..6f2e35029 100644 --- a/plugins/inputs/vsphere/README.md +++ b/plugins/inputs/vsphere/README.md @@ -8,7 +8,7 @@ The VMware vSphere plugin uses the vSphere API to gather metrics from multiple v * Datastores ## Supported versions of vSphere -This plugin supports vSphere version 5.5 through 6.7. +This plugin supports vSphere version 5.5 through 6.7. ## Configuration @@ -125,7 +125,7 @@ vm_metric_exclude = [ "*" ] ] ## Collect IP addresses? Valid values are "ipv4" and "ipv6" # ip_addresses = ["ipv6", "ipv4" ] - + # host_metric_exclude = [] ## Nothing excluded by default # host_instances = true ## true by default @@ -138,8 +138,8 @@ vm_metric_exclude = [ "*" ] # cluster_instances = false ## false by default ## Datastores - # cluster_include = [ "/*/datastore/**"] # Inventory path to datastores to collect (by default all are collected) - # cluster_exclude = [] # Inventory paths to exclude + # datastore_include = [ "/*/datastore/**"] # Inventory path to datastores to collect (by default all are collected) + # datastore_exclude = [] # Inventory paths to exclude # datastore_metric_include = [] ## if omitted or empty, all metrics are collected # datastore_metric_exclude = [] ## Nothing excluded by default # datastore_instances = false ## false by default @@ -167,11 +167,6 @@ vm_metric_exclude = [ "*" ] # collect_concurrency = 1 # discover_concurrency = 1 - ## whether or not to force discovery of new objects on initial gather call before collecting metrics - ## when true for large environments this may cause errors for time elapsed while collecting metrics - ## when false (default) the first collection cycle may result in no or limited metrics while objects are discovered - # force_discover_on_init = false - ## the interval before (re)discovering objects subject to metrics collection (default: 300s) # object_discovery_interval = "300s" @@ -185,17 +180,17 @@ vm_metric_exclude = [ "*" ] ## the plugin. Setting this flag to "false" will send values as floats to ## preserve the full precision when averaging takes place. # use_int_samples = true - + ## Custom attributes from vCenter can be very useful for queries in order to slice the ## metrics along different dimension and for forming ad-hoc relationships. They are disabled ## by default, since they can add a considerable amount of tags to the resulting metrics. To ## enable, simply set custom_attribute_exlude to [] (empty set) and use custom_attribute_include - ## to select the attributes you want to include. - # by default, since they can add a considerable amount of tags to the resulting metrics. To - # enable, simply set custom_attribute_exlude to [] (empty set) and use custom_attribute_include - # to select the attributes you want to include. + ## to select the attributes you want to include. + ## By default, since they can add a considerable amount of tags to the resulting metrics. To + ## enable, simply set custom_attribute_exlude to [] (empty set) and use custom_attribute_include + ## to select the attributes you want to include. # custom_attribute_include = [] - # custom_attribute_exclude = ["*"] # Default is to exclude everything + # custom_attribute_exclude = ["*"] ## Optional SSL Config # ssl_ca = "/path/to/cafile" @@ -264,7 +259,7 @@ to a file system. A vSphere inventory has a structure similar to this: #### Using Inventory Paths Using familiar UNIX-style paths, one could select e.g. VM2 with the path ```/DC0/vm/VM2```. -Often, we want to select a group of resource, such as all the VMs in a folder. We could use the path ```/DC0/vm/Folder1/*``` for that. +Often, we want to select a group of resource, such as all the VMs in a folder. We could use the path ```/DC0/vm/Folder1/*``` for that. Another possibility is to select objects using a partial name, such as ```/DC0/vm/Folder1/hadoop*``` yielding all vms in Folder1 with a name starting with "hadoop". diff --git a/plugins/inputs/vsphere/vsphere.go b/plugins/inputs/vsphere/vsphere.go index bc4042980..141b25599 100644 --- a/plugins/inputs/vsphere/vsphere.go +++ b/plugins/inputs/vsphere/vsphere.go @@ -75,6 +75,8 @@ var sampleConfig = ` ## VMs ## Typical VM metrics (if omitted or empty, all metrics are collected) + # vm_include = [ "/*/vm/**"] # Inventory path to VMs to collect (by default all are collected) + # vm_exclude = [] # Inventory paths to exclude vm_metric_include = [ "cpu.demand.average", "cpu.idle.summation", @@ -116,6 +118,8 @@ var sampleConfig = ` ## Hosts ## Typical host metrics (if omitted or empty, all metrics are collected) + # host_include = [ "/*/host/**"] # Inventory path to hosts to collect (by default all are collected) + # host_exclude [] # Inventory paths to exclude host_metric_include = [ "cpu.coreUtilization.average", "cpu.costop.summation", @@ -164,27 +168,35 @@ var sampleConfig = ` "storageAdapter.write.average", "sys.uptime.latest", ] - ## Collect IP addresses? Valid values are "ipv4" and "ipv6" + ## Collect IP addresses? Valid values are "ipv4" and "ipv6" # ip_addresses = ["ipv6", "ipv4" ] + # host_metric_exclude = [] ## Nothing excluded by default # host_instances = true ## true by default + ## Clusters + # cluster_include = [ "/*/host/**"] # Inventory path to clusters to collect (by default all are collected) + # cluster_exclude = [] # Inventory paths to exclude # cluster_metric_include = [] ## if omitted or empty, all metrics are collected # cluster_metric_exclude = [] ## Nothing excluded by default # cluster_instances = false ## false by default ## Datastores + # datastore_include = [ "/*/datastore/**"] # Inventory path to datastores to collect (by default all are collected) + # datastore_exclude = [] # Inventory paths to exclude # datastore_metric_include = [] ## if omitted or empty, all metrics are collected # datastore_metric_exclude = [] ## Nothing excluded by default - # datastore_instances = false ## false by default for Datastores only + # datastore_instances = false ## false by default ## Datacenters + # datacenter_include = [ "/*/host/**"] # Inventory path to clusters to collect (by default all are collected) + # datacenter_exclude = [] # Inventory paths to exclude datacenter_metric_include = [] ## if omitted or empty, all metrics are collected datacenter_metric_exclude = [ "*" ] ## Datacenters are not collected by default. - # datacenter_instances = false ## false by default for Datastores only + # datacenter_instances = false ## false by default - ## Plugin Settings + ## Plugin Settings ## separator character to use for measurement and field names (default: "_") # separator = "_" @@ -219,8 +231,11 @@ var sampleConfig = ` ## by default, since they can add a considerable amount of tags to the resulting metrics. To ## enable, simply set custom_attribute_exlude to [] (empty set) and use custom_attribute_include ## to select the attributes you want to include. + ## By default, since they can add a considerable amount of tags to the resulting metrics. To + ## enable, simply set custom_attribute_exlude to [] (empty set) and use custom_attribute_include + ## to select the attributes you want to include. # custom_attribute_include = [] - # custom_attribute_exclude = ["*"] + # custom_attribute_exclude = ["*"] ## Optional SSL Config # ssl_ca = "/path/to/cafile" From 6a2d35956c6bc9475c4fe65be9eb7641550fc284 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 18 Mar 2020 16:15:15 -0700 Subject: [PATCH 1644/1815] Update etc/telegraf.conf --- etc/telegraf.conf | 21 ++++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/etc/telegraf.conf b/etc/telegraf.conf index bc012ee30..52f3b7fe7 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -6412,6 +6412,8 @@ # # ## VMs # ## Typical VM metrics (if omitted or empty, all metrics are collected) +# # vm_include = [ "/*/vm/**"] # Inventory path to VMs to collect (by default all are collected) +# # vm_exclude = [] # Inventory paths to exclude # vm_metric_include = [ # "cpu.demand.average", # "cpu.idle.summation", @@ -6453,6 +6455,8 @@ # # ## Hosts # ## Typical host metrics (if omitted or empty, all metrics are collected) +# # host_include = [ "/*/host/**"] # Inventory path to hosts to collect (by default all are collected) +# # host_exclude [] # Inventory paths to exclude # host_metric_include = [ # "cpu.coreUtilization.average", # "cpu.costop.summation", @@ -6501,25 +6505,33 @@ # "storageAdapter.write.average", # "sys.uptime.latest", # ] -# ## Collect IP addresses? Valid values are "ipv4" and "ipv6" +# ## Collect IP addresses? Valid values are "ipv4" and "ipv6" # # ip_addresses = ["ipv6", "ipv4" ] +# # # host_metric_exclude = [] ## Nothing excluded by default # # host_instances = true ## true by default # +# # ## Clusters +# # cluster_include = [ "/*/host/**"] # Inventory path to clusters to collect (by default all are collected) +# # cluster_exclude = [] # Inventory paths to exclude # # cluster_metric_include = [] ## if omitted or empty, all metrics are collected # # cluster_metric_exclude = [] ## Nothing excluded by default # # cluster_instances = false ## false by default # # ## Datastores +# # datastore_include = [ "/*/datastore/**"] # Inventory path to datastores to collect (by default all are collected) +# # datastore_exclude = [] # Inventory paths to exclude # # datastore_metric_include = [] ## if omitted or empty, all metrics are collected # # datastore_metric_exclude = [] ## Nothing excluded by default -# # datastore_instances = false ## false by default for Datastores only +# # datastore_instances = false ## false by default # # ## Datacenters +# # datacenter_include = [ "/*/host/**"] # Inventory path to clusters to collect (by default all are collected) +# # datacenter_exclude = [] # Inventory paths to exclude # datacenter_metric_include = [] ## if omitted or empty, all metrics are collected # datacenter_metric_exclude = [ "*" ] ## Datacenters are not collected by default. -# # datacenter_instances = false ## false by default for Datastores only +# # datacenter_instances = false ## false by default # # ## Plugin Settings # ## separator character to use for measurement and field names (default: "_") @@ -6556,6 +6568,9 @@ # ## by default, since they can add a considerable amount of tags to the resulting metrics. To # ## enable, simply set custom_attribute_exlude to [] (empty set) and use custom_attribute_include # ## to select the attributes you want to include. +# ## By default, since they can add a considerable amount of tags to the resulting metrics. To +# ## enable, simply set custom_attribute_exlude to [] (empty set) and use custom_attribute_include +# ## to select the attributes you want to include. # # custom_attribute_include = [] # # custom_attribute_exclude = ["*"] # From 1d697dd32322f2e51df6aa4ac9d520d63843b57e Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 18 Mar 2020 16:21:48 -0700 Subject: [PATCH 1645/1815] Update next_version on master to 1.15.0 --- scripts/build.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/build.py b/scripts/build.py index 2c2d0be76..7b3601bb5 100755 --- a/scripts/build.py +++ b/scripts/build.py @@ -101,7 +101,7 @@ supported_packages = { "freebsd": [ "tar" ] } -next_version = '1.14.0' +next_version = '1.15.0' ################ #### Telegraf Functions From a907edc1f3afe920530f0534cd20ffbf9d8cd0ee Mon Sep 17 00:00:00 2001 From: Harshit Bansal Date: Tue, 24 Mar 2020 00:03:06 +0530 Subject: [PATCH 1646/1815] Fix url encoding of job names in jenkins input plugin (#7211) --- plugins/inputs/jenkins/jenkins.go | 14 +++- plugins/inputs/jenkins/jenkins_test.go | 90 ++++++++++++++++++++++++-- 2 files changed, 95 insertions(+), 9 deletions(-) diff --git a/plugins/inputs/jenkins/jenkins.go b/plugins/inputs/jenkins/jenkins.go index d6d326922..b18dc5430 100644 --- a/plugins/inputs/jenkins/jenkins.go +++ b/plugins/inputs/jenkins/jenkins.go @@ -73,7 +73,7 @@ const sampleConfig = ` ## Optional Sub Job Per Layer ## In workflow-multibranch-plugin, each branch will be created as a sub job. - ## This config will limit to call only the lasted branches in each layer, + ## This config will limit to call only the lasted branches in each layer, ## empty will use default value 10 # max_subjob_per_layer = 10 @@ -442,12 +442,20 @@ func (jr jobRequest) combined() []string { return append(jr.parents, jr.name) } +func (jr jobRequest) combinedEscaped() []string { + jobs := jr.combined() + for index, job := range jobs { + jobs[index] = url.PathEscape(job) + } + return jobs +} + func (jr jobRequest) URL() string { - return "/job/" + strings.Join(jr.combined(), "/job/") + jobPath + return "/job/" + strings.Join(jr.combinedEscaped(), "/job/") + jobPath } func (jr jobRequest) buildURL(number int64) string { - return "/job/" + strings.Join(jr.combined(), "/job/") + "/" + strconv.Itoa(int(number)) + jobPath + return "/job/" + strings.Join(jr.combinedEscaped(), "/job/") + "/" + strconv.Itoa(int(number)) + jobPath } func (jr jobRequest) hierarchyName() string { diff --git a/plugins/inputs/jenkins/jenkins_test.go b/plugins/inputs/jenkins/jenkins_test.go index 6c281390e..bf8ffb19d 100644 --- a/plugins/inputs/jenkins/jenkins_test.go +++ b/plugins/inputs/jenkins/jenkins_test.go @@ -16,12 +16,14 @@ import ( func TestJobRequest(t *testing.T) { tests := []struct { - input jobRequest - output string + input jobRequest + hierarchyName string + URL string }{ { jobRequest{}, "", + "", }, { jobRequest{ @@ -29,12 +31,26 @@ func TestJobRequest(t *testing.T) { parents: []string{"3", "2"}, }, "3/2/1", + "/job/3/job/2/job/1/api/json", + }, + { + jobRequest{ + name: "job 3", + parents: []string{"job 1", "job 2"}, + }, + "job 1/job 2/job 3", + "/job/job%201/job/job%202/job/job%203/api/json", }, } for _, test := range tests { - output := test.input.hierarchyName() - if output != test.output { - t.Errorf("Expected %s, got %s\n", test.output, output) + hierarchyName := test.input.hierarchyName() + URL := test.input.URL() + if hierarchyName != test.hierarchyName { + t.Errorf("Expected %s, got %s\n", test.hierarchyName, hierarchyName) + } + + if test.URL != "" && URL != test.URL { + t.Errorf("Expected %s, got %s\n", test.URL, URL) } } } @@ -66,7 +82,7 @@ type mockHandler struct { } func (h mockHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - o, ok := h.responseMap[r.URL.Path] + o, ok := h.responseMap[r.URL.RequestURI()] if !ok { w.WriteHeader(http.StatusNotFound) return @@ -549,6 +565,43 @@ func TestGatherJobs(t *testing.T) { }, }, }, + { + name: "gather metrics for jobs with space", + input: mockHandler{ + responseMap: map[string]interface{}{ + "/api/json": &jobResponse{ + Jobs: []innerJob{ + {Name: "job 1"}, + }, + }, + "/job/job%201/api/json": &jobResponse{ + LastBuild: jobBuild{ + Number: 3, + }, + }, + "/job/job%201/3/api/json": &buildResponse{ + Building: false, + Result: "SUCCESS", + Duration: 25558, + Timestamp: (time.Now().Unix() - int64(time.Minute.Seconds())) * 1000, + }, + }, + }, + output: &testutil.Accumulator{ + Metrics: []*testutil.Metric{ + { + Tags: map[string]string{ + "name": "job 1", + "result": "SUCCESS", + }, + Fields: map[string]interface{}{ + "duration": int64(25558), + "result_code": 0, + }, + }, + }, + }, + }, { name: "gather sub jobs, jobs filter", input: mockHandler{ @@ -582,6 +635,8 @@ func TestGatherJobs(t *testing.T) { {Name: "PR-100"}, {Name: "PR-101"}, {Name: "PR-ignore2"}, + {Name: "PR 1"}, + {Name: "PR ignore"}, }, }, "/job/apps/job/k8s-cloud/job/PR-100/api/json": &jobResponse{ @@ -594,6 +649,11 @@ func TestGatherJobs(t *testing.T) { Number: 4, }, }, + "/job/apps/job/k8s-cloud/job/PR%201/api/json": &jobResponse{ + LastBuild: jobBuild{ + Number: 1, + }, + }, "/job/apps/job/chronograf/1/api/json": &buildResponse{ Building: false, Result: "FAILURE", @@ -612,10 +672,27 @@ func TestGatherJobs(t *testing.T) { Duration: 91558, Timestamp: (time.Now().Unix() - int64(time.Minute.Seconds())) * 1000, }, + "/job/apps/job/k8s-cloud/job/PR%201/1/api/json": &buildResponse{ + Building: false, + Result: "SUCCESS", + Duration: 87832, + Timestamp: (time.Now().Unix() - int64(time.Minute.Seconds())) * 1000, + }, }, }, output: &testutil.Accumulator{ Metrics: []*testutil.Metric{ + { + Tags: map[string]string{ + "name": "PR 1", + "parents": "apps/k8s-cloud", + "result": "SUCCESS", + }, + Fields: map[string]interface{}{ + "duration": int64(87832), + "result_code": 0, + }, + }, { Tags: map[string]string{ "name": "PR-100", @@ -666,6 +743,7 @@ func TestGatherJobs(t *testing.T) { "ignore-1", "apps/ignore-all/*", "apps/k8s-cloud/PR-ignore2", + "apps/k8s-cloud/PR ignore", }, } te := j.initialize(&http.Client{Transport: &http.Transport{}}) From ba138b8f5b61c918b1d8e0f59cbfae155b6e7fed Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 23 Mar 2020 11:34:39 -0700 Subject: [PATCH 1647/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2c613a711..ab5aa9f0e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -82,6 +82,7 @@ - [#7119](https://github.com/influxdata/telegraf/pull/7119): Fix internal metrics for output split into multiple lines. - [#7021](https://github.com/influxdata/telegraf/pull/7021): Fix schedulers query compatibility with pre SQL-2016. - [#7182](https://github.com/influxdata/telegraf/pull/7182): Set headers on influxdb_listener ping URL. +- [#7165](https://github.com/influxdata/telegraf/issues/7165): Fix url encoding of job names in jenkins input plugin. ## v1.13.4 [2020-02-25] From 3e1c7a894870e80b522bd9a2fa13b6a29f2f00fb Mon Sep 17 00:00:00 2001 From: AnastasiyaRagozina Date: Mon, 23 Mar 2020 22:33:10 +0300 Subject: [PATCH 1648/1815] Add commands stats to mongodb input plugin (#6905) --- plugins/inputs/mongodb/README.md | 12 ++ plugins/inputs/mongodb/mongodb_data.go | 17 ++ plugins/inputs/mongodb/mongodb_data_test.go | 173 ++++++++++++-------- plugins/inputs/mongodb/mongostat.go | 51 ++++++ 4 files changed, 187 insertions(+), 66 deletions(-) diff --git a/plugins/inputs/mongodb/README.md b/plugins/inputs/mongodb/README.md index 6f9fa3995..8202eb023 100644 --- a/plugins/inputs/mongodb/README.md +++ b/plugins/inputs/mongodb/README.md @@ -69,14 +69,24 @@ by running Telegraf with the `--debug` argument. - cursor_pinned_count (integer) - cursor_total_count (integer) - deletes (integer) + - delete_command_total (integer) + - delete_command_failed (integer) - document_deleted (integer) - document_inserted (integer) - document_returned (integer) - document_updated (integer) + - find_command_total (integer) + - find_command_failed (integer) + - find_and_modify_command_total (integer) + - find_and_modify_command_failed (integer) - flushes (integer) - flushes_total_time_ns (integer) - getmores (integer) + - get_more_command_total (integer) + - get_more_command_failed (integer) - inserts (integer) + - insert_command_total (integer) + - insert_command_failed (integer) - jumbo_chunks (integer) - latency_commands_count (integer) - latency_commands (integer) @@ -110,6 +120,8 @@ by running Telegraf with the `--debug` argument. - ttl_deletes (integer) - ttl_passes (integer) - updates (integer) + - update_command_total (integer) + - update_command_failed (integer) - uptime_ns (integer) - vsize_megabytes (integer) - wtcache_app_threads_page_read_count (integer) diff --git a/plugins/inputs/mongodb/mongodb_data.go b/plugins/inputs/mongodb/mongodb_data.go index fd085665f..888ef6de5 100644 --- a/plugins/inputs/mongodb/mongodb_data.go +++ b/plugins/inputs/mongodb/mongodb_data.go @@ -86,6 +86,21 @@ var DefaultStats = map[string]string{ "connections_total_created": "TotalCreatedC", } +var DefaultCommandsStats = map[string]string{ + "delete_command_total": "DeleteCommandTotal", + "delete_command_failed": "DeleteCommandFailed", + "find_command_total": "FindCommandTotal", + "find_command_failed": "FindCommandFailed", + "find_and_modify_command_total": "FindAndModifyCommandTotal", + "find_and_modify_command_failed": "FindAndModifyCommandFailed", + "get_more_command_total": "GetMoreCommandTotal", + "get_more_command_failed": "GetMoreCommandFailed", + "insert_command_total": "InsertCommandTotal", + "insert_command_failed": "InsertCommandFailed", + "update_command_total": "UpdateCommandTotal", + "update_command_failed": "UpdateCommandFailed", +} + var DefaultLatencyStats = map[string]string{ "latency_writes_count": "WriteOpsCnt", "latency_writes": "WriteLatency", @@ -253,8 +268,10 @@ func (d *MongodbData) AddDefaultStats() { d.add("repl_oplog_window_sec", d.StatLine.OplogStats.TimeDiff) } + d.addStat(statLine, DefaultCommandsStats) d.addStat(statLine, DefaultClusterStats) d.addStat(statLine, DefaultShardStats) + if d.StatLine.StorageEngine == "mmapv1" || d.StatLine.StorageEngine == "rocksdb" { d.addStat(statLine, MmapStats) } else if d.StatLine.StorageEngine == "wiredTiger" { diff --git a/plugins/inputs/mongodb/mongodb_data_test.go b/plugins/inputs/mongodb/mongodb_data_test.go index e643d1820..9dcb20075 100644 --- a/plugins/inputs/mongodb/mongodb_data_test.go +++ b/plugins/inputs/mongodb/mongodb_data_test.go @@ -165,6 +165,35 @@ func TestAddLatencyStats(t *testing.T) { } } +func TestAddCommandsStats(t *testing.T) { + d := NewMongodbData( + &StatLine{ + DeleteCommandTotal: 73, + DeleteCommandFailed: 364, + FindCommandTotal: 113, + FindCommandFailed: 201, + FindAndModifyCommandTotal: 7, + FindAndModifyCommandFailed: 55, + GetMoreCommandTotal: 4, + GetMoreCommandFailed: 55, + InsertCommandTotal: 34, + InsertCommandFailed: 65, + UpdateCommandTotal: 23, + UpdateCommandFailed: 6, + }, + tags, + ) + + var acc testutil.Accumulator + + d.AddDefaultStats() + d.flush(&acc) + + for key := range DefaultCommandsStats { + assert.True(t, acc.HasInt64Field("mongodb", key)) + } +} + func TestAddShardHostStats(t *testing.T) { expectedHosts := []string{"hostA", "hostB"} hostStatLines := map[string]ShardHostStatLine{} @@ -225,72 +254,84 @@ func TestStateTag(t *testing.T) { d.AddDefaultStats() d.flush(&acc) fields := map[string]interface{}{ - "active_reads": int64(0), - "active_writes": int64(0), - "commands": int64(0), - "commands_per_sec": int64(0), - "deletes": int64(0), - "deletes_per_sec": int64(0), - "flushes": int64(0), - "flushes_per_sec": int64(0), - "flushes_total_time_ns": int64(0), - "getmores": int64(0), - "getmores_per_sec": int64(0), - "inserts": int64(0), - "inserts_per_sec": int64(0), - "member_status": "PRI", - "state": "PRIMARY", - "net_in_bytes_count": int64(0), - "net_in_bytes": int64(0), - "net_out_bytes_count": int64(0), - "net_out_bytes": int64(0), - "open_connections": int64(0), - "queries": int64(0), - "queries_per_sec": int64(0), - "queued_reads": int64(0), - "queued_writes": int64(0), - "repl_commands": int64(0), - "repl_commands_per_sec": int64(0), - "repl_deletes": int64(0), - "repl_deletes_per_sec": int64(0), - "repl_getmores": int64(0), - "repl_getmores_per_sec": int64(0), - "repl_inserts": int64(0), - "repl_inserts_per_sec": int64(0), - "repl_queries": int64(0), - "repl_queries_per_sec": int64(0), - "repl_updates": int64(0), - "repl_updates_per_sec": int64(0), - "repl_lag": int64(0), - "resident_megabytes": int64(0), - "updates": int64(0), - "updates_per_sec": int64(0), - "uptime_ns": int64(0), - "vsize_megabytes": int64(0), - "ttl_deletes": int64(0), - "ttl_deletes_per_sec": int64(0), - "ttl_passes": int64(0), - "ttl_passes_per_sec": int64(0), - "jumbo_chunks": int64(0), - "total_in_use": int64(0), - "total_available": int64(0), - "total_created": int64(0), - "total_refreshing": int64(0), - "cursor_timed_out": int64(0), - "cursor_timed_out_count": int64(0), - "cursor_no_timeout": int64(0), - "cursor_no_timeout_count": int64(0), - "cursor_pinned": int64(0), - "cursor_pinned_count": int64(0), - "cursor_total": int64(0), - "cursor_total_count": int64(0), - "document_deleted": int64(0), - "document_inserted": int64(0), - "document_returned": int64(0), - "document_updated": int64(0), - "connections_current": int64(0), - "connections_available": int64(0), - "connections_total_created": int64(0), + "active_reads": int64(0), + "active_writes": int64(0), + "commands": int64(0), + "commands_per_sec": int64(0), + "deletes": int64(0), + "deletes_per_sec": int64(0), + "flushes": int64(0), + "flushes_per_sec": int64(0), + "flushes_total_time_ns": int64(0), + "getmores": int64(0), + "getmores_per_sec": int64(0), + "inserts": int64(0), + "inserts_per_sec": int64(0), + "member_status": "PRI", + "state": "PRIMARY", + "net_in_bytes_count": int64(0), + "net_in_bytes": int64(0), + "net_out_bytes_count": int64(0), + "net_out_bytes": int64(0), + "open_connections": int64(0), + "queries": int64(0), + "queries_per_sec": int64(0), + "queued_reads": int64(0), + "queued_writes": int64(0), + "repl_commands": int64(0), + "repl_commands_per_sec": int64(0), + "repl_deletes": int64(0), + "repl_deletes_per_sec": int64(0), + "repl_getmores": int64(0), + "repl_getmores_per_sec": int64(0), + "repl_inserts": int64(0), + "repl_inserts_per_sec": int64(0), + "repl_queries": int64(0), + "repl_queries_per_sec": int64(0), + "repl_updates": int64(0), + "repl_updates_per_sec": int64(0), + "repl_lag": int64(0), + "resident_megabytes": int64(0), + "updates": int64(0), + "updates_per_sec": int64(0), + "uptime_ns": int64(0), + "vsize_megabytes": int64(0), + "ttl_deletes": int64(0), + "ttl_deletes_per_sec": int64(0), + "ttl_passes": int64(0), + "ttl_passes_per_sec": int64(0), + "jumbo_chunks": int64(0), + "total_in_use": int64(0), + "total_available": int64(0), + "total_created": int64(0), + "total_refreshing": int64(0), + "cursor_timed_out": int64(0), + "cursor_timed_out_count": int64(0), + "cursor_no_timeout": int64(0), + "cursor_no_timeout_count": int64(0), + "cursor_pinned": int64(0), + "cursor_pinned_count": int64(0), + "cursor_total": int64(0), + "cursor_total_count": int64(0), + "document_deleted": int64(0), + "document_inserted": int64(0), + "document_returned": int64(0), + "document_updated": int64(0), + "connections_current": int64(0), + "connections_available": int64(0), + "connections_total_created": int64(0), + "delete_command_total": int64(0), + "delete_command_failed": int64(0), + "find_command_total": int64(0), + "find_command_failed": int64(0), + "find_and_modify_command_total": int64(0), + "find_and_modify_command_failed": int64(0), + "get_more_command_total": int64(0), + "get_more_command_failed": int64(0), + "insert_command_total": int64(0), + "insert_command_failed": int64(0), + "update_command_total": int64(0), + "update_command_failed": int64(0), } acc.AssertContainsTaggedFields(t, "mongodb", fields, stateTags) } diff --git a/plugins/inputs/mongodb/mongostat.go b/plugins/inputs/mongodb/mongostat.go index 985627c87..6a7632260 100644 --- a/plugins/inputs/mongodb/mongostat.go +++ b/plugins/inputs/mongodb/mongostat.go @@ -333,6 +333,7 @@ type MetricsStats struct { TTL *TTLStats `bson:"ttl"` Cursor *CursorStats `bson:"cursor"` Document *DocumentStats `bson:"document"` + Commands *CommandsStats `bson:"commands"` } // TTLStats stores information related to documents with a ttl index. @@ -355,6 +356,21 @@ type DocumentStats struct { Updated int64 `bson:"updated"` } +// CommandsStats stores information related to document metrics. +type CommandsStats struct { + Delete *CommandsStatsValue `bson:"delete"` + Find *CommandsStatsValue `bson:"find"` + FindAndModify *CommandsStatsValue `bson:"findAndModify"` + GetMore *CommandsStatsValue `bson:"getMore"` + Insert *CommandsStatsValue `bson:"insert"` + Update *CommandsStatsValue `bson:"update"` +} + +type CommandsStatsValue struct { + Failed int64 `bson:"failed"` + Total int64 `bson:"total"` +} + // OpenCursorStats stores information related to open cursor metrics type OpenCursorStats struct { NoTimeout int64 `bson:"noTimeout"` @@ -528,6 +544,14 @@ type StatLine struct { // Document fields DeletedD, InsertedD, ReturnedD, UpdatedD int64 + //Commands fields + DeleteCommandTotal, DeleteCommandFailed int64 + FindCommandTotal, FindCommandFailed int64 + FindAndModifyCommandTotal, FindAndModifyCommandFailed int64 + GetMoreCommandTotal, GetMoreCommandFailed int64 + InsertCommandTotal, InsertCommandFailed int64 + UpdateCommandTotal, UpdateCommandFailed int64 + // Connection fields CurrentC, AvailableC, TotalCreatedC int64 @@ -740,6 +764,33 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec returnVal.ReturnedD = newStat.Metrics.Document.Returned returnVal.UpdatedD = newStat.Metrics.Document.Updated } + + if newStat.Metrics.Commands != nil { + if newStat.Metrics.Commands.Delete != nil { + returnVal.DeleteCommandTotal = newStat.Metrics.Commands.Delete.Total + returnVal.DeleteCommandFailed = newStat.Metrics.Commands.Delete.Failed + } + if newStat.Metrics.Commands.Find != nil { + returnVal.FindCommandTotal = newStat.Metrics.Commands.Find.Total + returnVal.FindCommandFailed = newStat.Metrics.Commands.Find.Failed + } + if newStat.Metrics.Commands.FindAndModify != nil { + returnVal.FindAndModifyCommandTotal = newStat.Metrics.Commands.FindAndModify.Total + returnVal.FindAndModifyCommandFailed = newStat.Metrics.Commands.FindAndModify.Failed + } + if newStat.Metrics.Commands.GetMore != nil { + returnVal.GetMoreCommandTotal = newStat.Metrics.Commands.GetMore.Total + returnVal.GetMoreCommandFailed = newStat.Metrics.Commands.GetMore.Failed + } + if newStat.Metrics.Commands.Insert != nil { + returnVal.InsertCommandTotal = newStat.Metrics.Commands.Insert.Total + returnVal.InsertCommandFailed = newStat.Metrics.Commands.Insert.Failed + } + if newStat.Metrics.Commands.Update != nil { + returnVal.UpdateCommandTotal = newStat.Metrics.Commands.Update.Total + returnVal.UpdateCommandFailed = newStat.Metrics.Commands.Update.Failed + } + } } if newStat.OpcountersRepl != nil && oldStat.OpcountersRepl != nil { From 4c59de9023aa4de4816eded2cf1e74cb80a421f4 Mon Sep 17 00:00:00 2001 From: Denis Pershin <48222861+denispershin@users.noreply.github.com> Date: Tue, 24 Mar 2020 02:14:56 +0300 Subject: [PATCH 1649/1815] Add additional concurrent transaction information (#7193) --- plugins/inputs/mongodb/README.md | 8 ++- plugins/inputs/mongodb/mongodb_data.go | 4 ++ plugins/inputs/mongodb/mongodb_data_test.go | 76 ++++++++++++--------- plugins/inputs/mongodb/mongostat.go | 52 ++++++++------ 4 files changed, 82 insertions(+), 58 deletions(-) diff --git a/plugins/inputs/mongodb/README.md b/plugins/inputs/mongodb/README.md index 8202eb023..7148c3a67 100644 --- a/plugins/inputs/mongodb/README.md +++ b/plugins/inputs/mongodb/README.md @@ -60,6 +60,8 @@ by running Telegraf with the `--debug` argument. - fields: - active_reads (integer) - active_writes (integer) + - available_reads (integer) + - available_writes (integer) - commands (integer) - connections_current (integer) - connections_available (integer) @@ -117,6 +119,8 @@ by running Telegraf with the `--debug` argument. - total_created (integer) - total_in_use (integer) - total_refreshing (integer) + - total_tickets_reads (integer) + - total_tickets_writes (integer) - ttl_deletes (integer) - ttl_passes (integer) - updates (integer) @@ -204,8 +208,8 @@ by running Telegraf with the `--debug` argument. ### Example Output: ``` -mongodb,hostname=127.0.0.1:27017 active_reads=0i,active_writes=0i,commands=1335i,commands_per_sec=7i,connections_available=814i,connections_current=5i,connections_total_created=0i,cursor_no_timeout=0i,cursor_no_timeout_count=0i,cursor_pinned=0i,cursor_pinned_count=1i,cursor_timed_out=0i,cursor_timed_out_count=0i,cursor_total=0i,cursor_total_count=1i,deletes=0i,deletes_per_sec=0i,document_deleted=0i,document_inserted=0i,document_returned=13i,document_updated=0i,flushes=5i,flushes_per_sec=0i,getmores=269i,getmores_per_sec=0i,inserts=0i,inserts_per_sec=0i,jumbo_chunks=0i,latency_commands_count=0i,latency_commands=0i,latency_reads_count=0i,latency_reads=0i,latency_writes_count=0i,latency_writes=0i,member_status="PRI",net_in_bytes=986i,net_in_bytes_count=358006i,net_out_bytes=23906i,net_out_bytes_count=661507i,open_connections=5i,percent_cache_dirty=0,percent_cache_used=0,queries=18i,queries_per_sec=3i,queued_reads=0i,queued_writes=0i,repl_commands=0i,repl_commands_per_sec=0i,repl_deletes=0i,repl_deletes_per_sec=0i,repl_getmores=0i,repl_getmores_per_sec=0i,repl_inserts=0i,repl_inserts_per_sec=0i,repl_lag=0i,repl_oplog_window_sec=24355215i,repl_queries=0i,repl_queries_per_sec=0i,repl_updates=0i,repl_updates_per_sec=0i,resident_megabytes=62i,state="PRIMARY",total_available=0i,total_created=0i,total_in_use=0i,total_refreshing=0i,ttl_deletes=0i,ttl_deletes_per_sec=0i,ttl_passes=23i,ttl_passes_per_sec=0i,updates=0i,updates_per_sec=0i,vsize_megabytes=713i,wtcache_app_threads_page_read_count=13i,wtcache_app_threads_page_read_time=74i,wtcache_app_threads_page_write_count=0i,wtcache_bytes_read_into=55271i,wtcache_bytes_written_from=125402i,wtcache_current_bytes=117050i,wtcache_max_bytes_configured=1073741824i,wtcache_pages_evicted_by_app_thread=0i,wtcache_pages_queued_for_eviction=0i,wtcache_server_evicting_pages=0i,wtcache_tracked_dirty_bytes=0i,wtcache_worker_thread_evictingpages=0i 1547159491000000000 -mongodb,hostname=127.0.0.1:27017,node_type=PRI active_reads=0i,active_writes=0i,commands=1335i,commands_per_sec=7i,connections_available=814i,connections_current=5i,connections_total_created=0i,cursor_no_timeout=0i,cursor_no_timeout_count=0i,cursor_pinned=0i,cursor_pinned_count=1i,cursor_timed_out=0i,cursor_timed_out_count=0i,cursor_total=0i,cursor_total_count=1i,deletes=0i,deletes_per_sec=0i,document_deleted=0i,document_inserted=0i,document_returned=13i,document_updated=0i,flushes=5i,flushes_per_sec=0i,getmores=269i,getmores_per_sec=0i,inserts=0i,inserts_per_sec=0i,jumbo_chunks=0i,member_status="PRI",net_in_bytes=986i,net_in_bytes_count=358006i,net_out_bytes=23906i,net_out_bytes_count=661507i,open_connections=5i,percent_cache_dirty=0,percent_cache_used=0,queries=18i,queries_per_sec=3i,queued_reads=0i,queued_writes=0i,repl_commands=0i,repl_commands_per_sec=0i,repl_deletes=0i,repl_deletes_per_sec=0i,repl_getmores=0i,repl_getmores_per_sec=0i,repl_inserts=0i,repl_inserts_per_sec=0i,repl_lag=0i,repl_oplog_window_sec=24355215i,repl_queries=0i,repl_queries_per_sec=0i,repl_updates=0i,repl_updates_per_sec=0i,resident_megabytes=62i,state="PRIMARY",total_available=0i,total_created=0i,total_in_use=0i,total_refreshing=0i,ttl_deletes=0i,ttl_deletes_per_sec=0i,ttl_passes=23i,ttl_passes_per_sec=0i,updates=0i,updates_per_sec=0i,vsize_megabytes=713i,wtcache_app_threads_page_read_count=13i,wtcache_app_threads_page_read_time=74i,wtcache_app_threads_page_write_count=0i,wtcache_bytes_read_into=55271i,wtcache_bytes_written_from=125402i,wtcache_current_bytes=117050i,wtcache_max_bytes_configured=1073741824i,wtcache_pages_evicted_by_app_thread=0i,wtcache_pages_queued_for_eviction=0i,wtcache_server_evicting_pages=0i,wtcache_tracked_dirty_bytes=0i,wtcache_worker_thread_evictingpages=0i 1547159491000000000 +mongodb,hostname=127.0.0.1:27017 active_reads=0i,active_writes=0i,available_reads=128i,available_writes=128i,commands=1335i,commands_per_sec=7i,connections_available=814i,connections_current=5i,connections_total_created=0i,cursor_no_timeout=0i,cursor_no_timeout_count=0i,cursor_pinned=0i,cursor_pinned_count=1i,cursor_timed_out=0i,cursor_timed_out_count=0i,cursor_total=0i,cursor_total_count=1i,deletes=0i,deletes_per_sec=0i,document_deleted=0i,document_inserted=0i,document_returned=13i,document_updated=0i,flushes=5i,flushes_per_sec=0i,getmores=269i,getmores_per_sec=0i,inserts=0i,inserts_per_sec=0i,jumbo_chunks=0i,latency_commands_count=0i,latency_commands=0i,latency_reads_count=0i,latency_reads=0i,latency_writes_count=0i,latency_writes=0i,member_status="PRI",net_in_bytes=986i,net_in_bytes_count=358006i,net_out_bytes=23906i,net_out_bytes_count=661507i,open_connections=5i,percent_cache_dirty=0,percent_cache_used=0,queries=18i,queries_per_sec=3i,queued_reads=0i,queued_writes=0i,repl_commands=0i,repl_commands_per_sec=0i,repl_deletes=0i,repl_deletes_per_sec=0i,repl_getmores=0i,repl_getmores_per_sec=0i,repl_inserts=0i,repl_inserts_per_sec=0i,repl_lag=0i,repl_oplog_window_sec=24355215i,repl_queries=0i,repl_queries_per_sec=0i,repl_updates=0i,repl_updates_per_sec=0i,resident_megabytes=62i,state="PRIMARY",total_available=0i,total_created=0i,total_in_use=0i,total_refreshing=0i,total_tickets_reads=128i,total_tickets_writes=128i,ttl_deletes=0i,ttl_deletes_per_sec=0i,ttl_passes=23i,ttl_passes_per_sec=0i,updates=0i,updates_per_sec=0i,vsize_megabytes=713i,wtcache_app_threads_page_read_count=13i,wtcache_app_threads_page_read_time=74i,wtcache_app_threads_page_write_count=0i,wtcache_bytes_read_into=55271i,wtcache_bytes_written_from=125402i,wtcache_current_bytes=117050i,wtcache_max_bytes_configured=1073741824i,wtcache_pages_evicted_by_app_thread=0i,wtcache_pages_queued_for_eviction=0i,wtcache_server_evicting_pages=0i,wtcache_tracked_dirty_bytes=0i,wtcache_worker_thread_evictingpages=0i 1547159491000000000 +mongodb,hostname=127.0.0.1:27017,node_type=PRI active_reads=0i,active_writes=0i,available_reads=128i,available_writes=128i,commands=1335i,commands_per_sec=7i,connections_available=814i,connections_current=5i,connections_total_created=0i,cursor_no_timeout=0i,cursor_no_timeout_count=0i,cursor_pinned=0i,cursor_pinned_count=1i,cursor_timed_out=0i,cursor_timed_out_count=0i,cursor_total=0i,cursor_total_count=1i,deletes=0i,deletes_per_sec=0i,document_deleted=0i,document_inserted=0i,document_returned=13i,document_updated=0i,flushes=5i,flushes_per_sec=0i,getmores=269i,getmores_per_sec=0i,inserts=0i,inserts_per_sec=0i,jumbo_chunks=0i,member_status="PRI",net_in_bytes=986i,net_in_bytes_count=358006i,net_out_bytes=23906i,net_out_bytes_count=661507i,open_connections=5i,percent_cache_dirty=0,percent_cache_used=0,queries=18i,queries_per_sec=3i,queued_reads=0i,queued_writes=0i,repl_commands=0i,repl_commands_per_sec=0i,repl_deletes=0i,repl_deletes_per_sec=0i,repl_getmores=0i,repl_getmores_per_sec=0i,repl_inserts=0i,repl_inserts_per_sec=0i,repl_lag=0i,repl_oplog_window_sec=24355215i,repl_queries=0i,repl_queries_per_sec=0i,repl_updates=0i,repl_updates_per_sec=0i,resident_megabytes=62i,state="PRIMARY",total_available=0i,total_created=0i,total_in_use=0i,total_refreshing=0i,total_tickets_reads=128i,total_tickets_writes=128i,ttl_deletes=0i,ttl_deletes_per_sec=0i,ttl_passes=23i,ttl_passes_per_sec=0i,updates=0i,updates_per_sec=0i,vsize_megabytes=713i,wtcache_app_threads_page_read_count=13i,wtcache_app_threads_page_read_time=74i,wtcache_app_threads_page_write_count=0i,wtcache_bytes_read_into=55271i,wtcache_bytes_written_from=125402i,wtcache_current_bytes=117050i,wtcache_max_bytes_configured=1073741824i,wtcache_pages_evicted_by_app_thread=0i,wtcache_pages_queued_for_eviction=0i,wtcache_server_evicting_pages=0i,wtcache_tracked_dirty_bytes=0i,wtcache_worker_thread_evictingpages=0i 1547159491000000000 mongodb_db_stats,db_name=admin,hostname=127.0.0.1:27017 avg_obj_size=241,collections=2i,data_size=723i,index_size=49152i,indexes=3i,num_extents=0i,objects=3i,ok=1i,storage_size=53248i,type="db_stat" 1547159491000000000 mongodb_db_stats,db_name=local,hostname=127.0.0.1:27017 avg_obj_size=813.9705882352941,collections=6i,data_size=55350i,index_size=102400i,indexes=5i,num_extents=0i,objects=68i,ok=1i,storage_size=204800i,type="db_stat" 1547159491000000000 mongodb_col_stats,collection=foo,db_name=local,hostname=127.0.0.1:27017 size=375005928i,avg_obj_size=5494,type="col_stat",storage_size=249307136i,total_index_size=2138112i,ok=1i,count=68251i 1547159491000000000 diff --git a/plugins/inputs/mongodb/mongodb_data.go b/plugins/inputs/mongodb/mongodb_data.go index 888ef6de5..606c2bbdc 100644 --- a/plugins/inputs/mongodb/mongodb_data.go +++ b/plugins/inputs/mongodb/mongodb_data.go @@ -60,6 +60,10 @@ var DefaultStats = map[string]string{ "queued_writes": "QueuedWriters", "active_reads": "ActiveReaders", "active_writes": "ActiveWriters", + "available_reads": "AvailableReaders", + "available_writes": "AvailableWriters", + "total_tickets_reads": "TotalTicketsReaders", + "total_tickets_writes": "TotalTicketsWriters", "net_in_bytes_count": "NetInCnt", "net_in_bytes": "NetIn", "net_out_bytes_count": "NetOutCnt", diff --git a/plugins/inputs/mongodb/mongodb_data_test.go b/plugins/inputs/mongodb/mongodb_data_test.go index 9dcb20075..34b03b464 100644 --- a/plugins/inputs/mongodb/mongodb_data_test.go +++ b/plugins/inputs/mongodb/mongodb_data_test.go @@ -14,40 +14,44 @@ var tags = make(map[string]string) func TestAddNonReplStats(t *testing.T) { d := NewMongodbData( &StatLine{ - StorageEngine: "", - Time: time.Now(), - UptimeNanos: 0, - Insert: 0, - Query: 0, - Update: 0, - UpdateCnt: 0, - Delete: 0, - GetMore: 0, - Command: 0, - Flushes: 0, - FlushesCnt: 0, - Virtual: 0, - Resident: 0, - QueuedReaders: 0, - QueuedWriters: 0, - ActiveReaders: 0, - ActiveWriters: 0, - NetIn: 0, - NetOut: 0, - NumConnections: 0, - Passes: 0, - DeletedDocuments: 0, - TimedOutC: 0, - NoTimeoutC: 0, - PinnedC: 0, - TotalC: 0, - DeletedD: 0, - InsertedD: 0, - ReturnedD: 0, - UpdatedD: 0, - CurrentC: 0, - AvailableC: 0, - TotalCreatedC: 0, + StorageEngine: "", + Time: time.Now(), + UptimeNanos: 0, + Insert: 0, + Query: 0, + Update: 0, + UpdateCnt: 0, + Delete: 0, + GetMore: 0, + Command: 0, + Flushes: 0, + FlushesCnt: 0, + Virtual: 0, + Resident: 0, + QueuedReaders: 0, + QueuedWriters: 0, + ActiveReaders: 0, + ActiveWriters: 0, + AvailableReaders: 0, + AvailableWriters: 0, + TotalTicketsReaders: 0, + TotalTicketsWriters: 0, + NetIn: 0, + NetOut: 0, + NumConnections: 0, + Passes: 0, + DeletedDocuments: 0, + TimedOutC: 0, + NoTimeoutC: 0, + PinnedC: 0, + TotalC: 0, + DeletedD: 0, + InsertedD: 0, + ReturnedD: 0, + UpdatedD: 0, + CurrentC: 0, + AvailableC: 0, + TotalCreatedC: 0, }, tags, ) @@ -256,6 +260,10 @@ func TestStateTag(t *testing.T) { fields := map[string]interface{}{ "active_reads": int64(0), "active_writes": int64(0), + "available_reads": int64(0), + "available_writes": int64(0), + "total_tickets_reads": int64(0), + "total_tickets_writes": int64(0), "commands": int64(0), "commands_per_sec": int64(0), "deletes": int64(0), diff --git a/plugins/inputs/mongodb/mongostat.go b/plugins/inputs/mongodb/mongostat.go index 6a7632260..8ec3b3bc8 100644 --- a/plugins/inputs/mongodb/mongostat.go +++ b/plugins/inputs/mongodb/mongostat.go @@ -174,7 +174,9 @@ type ConcurrentTransactions struct { } type ConcurrentTransStats struct { - Out int64 `bson:"out"` + Out int64 `bson:"out"` + Available int64 `bson:"available"` + TotalTickets int64 `bson:"totalTickets"` } // CacheStats stores cache statistics for WiredTiger. @@ -582,27 +584,29 @@ type StatLine struct { UnmodifiedPagesEvicted int64 // Replicated Opcounter fields - InsertR, InsertRCnt int64 - QueryR, QueryRCnt int64 - UpdateR, UpdateRCnt int64 - DeleteR, DeleteRCnt int64 - GetMoreR, GetMoreRCnt int64 - CommandR, CommandRCnt int64 - ReplLag int64 - OplogStats *OplogStats - Flushes, FlushesCnt int64 - FlushesTotalTime int64 - Mapped, Virtual, Resident, NonMapped int64 - Faults, FaultsCnt int64 - HighestLocked *LockStatus - QueuedReaders, QueuedWriters int64 - ActiveReaders, ActiveWriters int64 - NetIn, NetInCnt int64 - NetOut, NetOutCnt int64 - NumConnections int64 - ReplSetName string - NodeType string - NodeState string + InsertR, InsertRCnt int64 + QueryR, QueryRCnt int64 + UpdateR, UpdateRCnt int64 + DeleteR, DeleteRCnt int64 + GetMoreR, GetMoreRCnt int64 + CommandR, CommandRCnt int64 + ReplLag int64 + OplogStats *OplogStats + Flushes, FlushesCnt int64 + FlushesTotalTime int64 + Mapped, Virtual, Resident, NonMapped int64 + Faults, FaultsCnt int64 + HighestLocked *LockStatus + QueuedReaders, QueuedWriters int64 + ActiveReaders, ActiveWriters int64 + AvailableReaders, AvailableWriters int64 + TotalTicketsReaders, TotalTicketsWriters int64 + NetIn, NetInCnt int64 + NetOut, NetOutCnt int64 + NumConnections int64 + ReplSetName string + NodeType string + NodeState string // Cluster fields JumboChunksCount int64 @@ -967,6 +971,10 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec if hasWT { returnVal.ActiveReaders = newStat.WiredTiger.Concurrent.Read.Out returnVal.ActiveWriters = newStat.WiredTiger.Concurrent.Write.Out + returnVal.AvailableReaders = newStat.WiredTiger.Concurrent.Read.Available + returnVal.AvailableWriters = newStat.WiredTiger.Concurrent.Write.Available + returnVal.TotalTicketsReaders = newStat.WiredTiger.Concurrent.Read.TotalTickets + returnVal.TotalTicketsWriters = newStat.WiredTiger.Concurrent.Write.TotalTickets } else if newStat.GlobalLock.ActiveClients != nil { returnVal.ActiveReaders = newStat.GlobalLock.ActiveClients.Readers returnVal.ActiveWriters = newStat.GlobalLock.ActiveClients.Writers From 9b9a84964b4505f1815604edabd544d8d3ba75b2 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 23 Mar 2020 16:17:04 -0700 Subject: [PATCH 1650/1815] Update changelog --- CHANGELOG.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index ab5aa9f0e..bcb07da67 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,10 @@ +## v1.15 [unreleased] + +#### Features + +- [#6905](https://github.com/influxdata/telegraf/pull/6905): Add commands stats to mongodb input plugin. +- [#7193](https://github.com/influxdata/telegraf/pull/7193): Add additional concurrent transaction information. + ## v1.14 [unreleased] #### Release Notes From c12c55da96eda0611f5f9d323a67cd9aa98c932e Mon Sep 17 00:00:00 2001 From: Asgaut Eng Date: Tue, 24 Mar 2020 22:18:03 +0100 Subject: [PATCH 1651/1815] Fix 'nil' file created by Makefile on Windows (#7224) The /dev/null device on Windows is a virtual file named "nul" not "nil". This fix works under both cmd.exe and Powershell. --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 4ec767997..e7889e89d 100644 --- a/Makefile +++ b/Makefile @@ -1,5 +1,5 @@ ifeq ($(OS), Windows_NT) - VERSION := $(shell git describe --exact-match --tags 2>nil) + VERSION := $(shell git describe --exact-match --tags 2>nul) HOME := $(HOMEPATH) CGO_ENABLED ?= 0 export CGO_ENABLED From 715e991186f0eb811386d1bfe04462de3c62ffc2 Mon Sep 17 00:00:00 2001 From: Programmer19 Date: Tue, 24 Mar 2020 17:20:46 -0400 Subject: [PATCH 1652/1815] Add ability to specify HTTP Headers in http_listener_v2 which will added as tags (#7223) --- plugins/inputs/http_listener_v2/README.md | 5 ++ .../http_listener_v2/http_listener_v2.go | 13 ++++ .../http_listener_v2/http_listener_v2_test.go | 67 +++++++++++++++++++ 3 files changed, 85 insertions(+) diff --git a/plugins/inputs/http_listener_v2/README.md b/plugins/inputs/http_listener_v2/README.md index b40e3554f..05e480586 100644 --- a/plugins/inputs/http_listener_v2/README.md +++ b/plugins/inputs/http_listener_v2/README.md @@ -48,6 +48,11 @@ This is a sample configuration for the plugin. # basic_username = "foobar" # basic_password = "barfoo" + ## Optional setting to map http headers into tags + ## If the http header is not present on the request, no corresponding tag will be added + ## If multiple instances of the http header are present, only the first value will be used + # http_header_tags = {"HTTP_HEADER" = "TAG_NAME"} + ## Data format to consume. ## Each data format has its own unique set of configuration options, read ## more about them here: diff --git a/plugins/inputs/http_listener_v2/http_listener_v2.go b/plugins/inputs/http_listener_v2/http_listener_v2.go index 21d35fab9..a4237ea2a 100644 --- a/plugins/inputs/http_listener_v2/http_listener_v2.go +++ b/plugins/inputs/http_listener_v2/http_listener_v2.go @@ -44,6 +44,7 @@ type HTTPListenerV2 struct { Port int `toml:"port"` BasicUsername string `toml:"basic_username"` BasicPassword string `toml:"basic_password"` + HTTPHeaderTags map[string]string `toml:"http_header_tags"` tlsint.ServerConfig TimeFunc @@ -93,6 +94,11 @@ const sampleConfig = ` # basic_username = "foobar" # basic_password = "barfoo" + ## Optional setting to map http headers into tags + ## If the http header is not present on the request, no corresponding tag will be added + ## If multiple instances of the http header are present, only the first value will be used + # http_header_tags = {"HTTP_HEADER" = "TAG_NAME"} + ## Data format to consume. ## Each data format has its own unique set of configuration options, read ## more about them here: @@ -225,6 +231,13 @@ func (h *HTTPListenerV2) serveWrite(res http.ResponseWriter, req *http.Request) } for _, m := range metrics { + for headerName, measurementName := range h.HTTPHeaderTags { + headerValues, foundHeader := req.Header[headerName] + if foundHeader && len(headerValues) > 0 { + m.AddTag(measurementName, headerValues[0]) + } + } + h.acc.AddMetric(m) } diff --git a/plugins/inputs/http_listener_v2/http_listener_v2_test.go b/plugins/inputs/http_listener_v2/http_listener_v2_test.go index c9e96b92d..c06b3908d 100644 --- a/plugins/inputs/http_listener_v2/http_listener_v2_test.go +++ b/plugins/inputs/http_listener_v2/http_listener_v2_test.go @@ -381,6 +381,73 @@ func TestWriteHTTPEmpty(t *testing.T) { require.EqualValues(t, 204, resp.StatusCode) } +func TestWriteHTTPTransformHeaderValuesToTagsSingleWrite(t *testing.T) { + listener := newTestHTTPListenerV2() + listener.HTTPHeaderTags = map[string]string{"Present_http_header_1": "presentMeasurementKey1", "Present_http_header_2": "presentMeasurementKey2", "NOT_PRESENT_HEADER": "notPresentMeasurementKey"} + + acc := &testutil.Accumulator{} + require.NoError(t, listener.Start(acc)) + defer listener.Stop() + + req, err := http.NewRequest("POST", createURL(listener, "http", "/write", "db=mydb"), bytes.NewBuffer([]byte(testMsg))) + require.NoError(t, err) + req.Header.Set("Content-Type", "") + req.Header.Set("Present_http_header_1", "PRESENT_HTTP_VALUE_1") + req.Header.Set("Present_http_header_2", "PRESENT_HTTP_VALUE_2") + + resp, err := http.DefaultClient.Do(req) + require.NoError(t, err) + resp.Body.Close() + require.EqualValues(t, 204, resp.StatusCode) + + acc.Wait(1) + acc.AssertContainsTaggedFields(t, "cpu_load_short", + map[string]interface{}{"value": float64(12)}, + map[string]string{"host": "server01", "presentMeasurementKey1": "PRESENT_HTTP_VALUE_1", "presentMeasurementKey2": "PRESENT_HTTP_VALUE_2"}, + ) + + // post single message to listener + resp, err = http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsg))) + require.NoError(t, err) + resp.Body.Close() + require.EqualValues(t, 204, resp.StatusCode) + + acc.Wait(1) + acc.AssertContainsTaggedFields(t, "cpu_load_short", + map[string]interface{}{"value": float64(12)}, + map[string]string{"host": "server01", "presentMeasurementKey1": "PRESENT_HTTP_VALUE_1", "presentMeasurementKey2": "PRESENT_HTTP_VALUE_2"}, + ) +} + +func TestWriteHTTPTransformHeaderValuesToTagsBulkWrite(t *testing.T) { + listener := newTestHTTPListenerV2() + listener.HTTPHeaderTags = map[string]string{"Present_http_header_1": "presentMeasurementKey1", "Present_http_header_2": "presentMeasurementKey2", "NOT_PRESENT_HEADER": "notPresentMeasurementKey"} + + acc := &testutil.Accumulator{} + require.NoError(t, listener.Start(acc)) + defer listener.Stop() + + req, err := http.NewRequest("POST", createURL(listener, "http", "/write", "db=mydb"), bytes.NewBuffer([]byte(testMsgs))) + require.NoError(t, err) + req.Header.Set("Content-Type", "") + req.Header.Set("Present_http_header_1", "PRESENT_HTTP_VALUE_1") + req.Header.Set("Present_http_header_2", "PRESENT_HTTP_VALUE_2") + + resp, err := http.DefaultClient.Do(req) + require.NoError(t, err) + resp.Body.Close() + require.EqualValues(t, 204, resp.StatusCode) + + acc.Wait(2) + hostTags := []string{"server02", "server03", "server04", "server05", "server06"} + for _, hostTag := range hostTags { + acc.AssertContainsTaggedFields(t, "cpu_load_short", + map[string]interface{}{"value": float64(12)}, + map[string]string{"host": hostTag, "presentMeasurementKey1": "PRESENT_HTTP_VALUE_1", "presentMeasurementKey2": "PRESENT_HTTP_VALUE_2"}, + ) + } +} + func TestWriteHTTPQueryParams(t *testing.T) { parser, _ := parsers.NewFormUrlencodedParser("query_measurement", nil, []string{"tagKey"}) listener := newTestHTTPListenerV2() From 83925c996023a48933f94e789b50ca8c7d937c64 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 24 Mar 2020 14:22:03 -0700 Subject: [PATCH 1653/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index bcb07da67..0a2a12ddc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,7 @@ - [#6905](https://github.com/influxdata/telegraf/pull/6905): Add commands stats to mongodb input plugin. - [#7193](https://github.com/influxdata/telegraf/pull/7193): Add additional concurrent transaction information. +- [#7223](https://github.com/influxdata/telegraf/pull/7223): Add ability to specify HTTP Headers in http_listener_v2 which will added as tags. ## v1.14 [unreleased] From 124735af2e7bf1ca5da8f890e7edc2a99fb8ac07 Mon Sep 17 00:00:00 2001 From: dbutler-starry Date: Tue, 24 Mar 2020 20:02:23 -0400 Subject: [PATCH 1654/1815] Apply ping deadline to dns lookup (#7140) --- plugins/inputs/ping/ping.go | 82 ++++++++++++++++++++++---------- plugins/inputs/ping/ping_test.go | 37 ++++++++++++-- 2 files changed, 92 insertions(+), 27 deletions(-) diff --git a/plugins/inputs/ping/ping.go b/plugins/inputs/ping/ping.go index 17767bac3..008cfceac 100644 --- a/plugins/inputs/ping/ping.go +++ b/plugins/inputs/ping/ping.go @@ -23,6 +23,10 @@ import ( // for unit test purposes (see ping_test.go) type HostPinger func(binary string, timeout float64, args ...string) (string, error) +type HostResolver func(ctx context.Context, ipv6 bool, host string) (*net.IPAddr, error) + +type IsCorrectNetwork func(ip net.IPAddr) bool + type Ping struct { wg sync.WaitGroup @@ -60,6 +64,9 @@ type Ping struct { // host ping function pingHost HostPinger + // resolve host function + resolveHost HostResolver + // listenAddr is the address associated with the interface defined. listenAddr string } @@ -123,13 +130,6 @@ func (p *Ping) Gather(acc telegraf.Accumulator) error { } for _, host := range p.Urls { - _, err := net.LookupHost(host) - if err != nil { - acc.AddFields("ping", map[string]interface{}{"result_code": 1}, map[string]string{"url": host}) - acc.AddError(err) - continue - } - p.wg.Add(1) go func(host string) { defer p.wg.Done() @@ -194,25 +194,47 @@ func hostPinger(binary string, timeout float64, args ...string) (string, error) return string(out), err } +func filterIPs(addrs []net.IPAddr, filterFunc IsCorrectNetwork) []net.IPAddr { + n := 0 + for _, x := range addrs { + if filterFunc(x) { + addrs[n] = x + n++ + } + } + return addrs[:n] +} + +func hostResolver(ctx context.Context, ipv6 bool, destination string) (*net.IPAddr, error) { + resolver := &net.Resolver{} + ips, err := resolver.LookupIPAddr(ctx, destination) + + if err != nil { + return nil, err + } + + if ipv6 { + ips = filterIPs(ips, isV6) + } else { + ips = filterIPs(ips, isV4) + } + + if len(ips) == 0 { + return nil, errors.New("Cannot resolve ip address") + } + return &ips[0], err +} + +func isV4(ip net.IPAddr) bool { + return ip.IP.To4() != nil +} + +func isV6(ip net.IPAddr) bool { + return !isV4(ip) +} + func (p *Ping) pingToURLNative(destination string, acc telegraf.Accumulator) { ctx := context.Background() - - network := "ip4" - if p.IPv6 { - network = "ip6" - } - - host, err := net.ResolveIPAddr(network, destination) - if err != nil { - acc.AddFields( - "ping", - map[string]interface{}{"result_code": 1}, - map[string]string{"url": destination}, - ) - acc.AddError(err) - return - } - interval := p.PingInterval if interval < 0.2 { interval = 0.2 @@ -232,6 +254,17 @@ func (p *Ping) pingToURLNative(destination string, acc telegraf.Accumulator) { defer cancel() } + host, err := p.resolveHost(ctx, p.IPv6, destination) + if err != nil { + acc.AddFields( + "ping", + map[string]interface{}{"result_code": 1}, + map[string]string{"url": destination}, + ) + acc.AddError(err) + return + } + resps := make(chan *ping.Response) rsps := []*ping.Response{} @@ -392,6 +425,7 @@ func init() { inputs.Add("ping", func() telegraf.Input { return &Ping{ pingHost: hostPinger, + resolveHost: hostResolver, PingInterval: 1.0, Count: 1, Timeout: 1.0, diff --git a/plugins/inputs/ping/ping_test.go b/plugins/inputs/ping/ping_test.go index 8a1a0a9e1..d6f78bb79 100644 --- a/plugins/inputs/ping/ping_test.go +++ b/plugins/inputs/ping/ping_test.go @@ -3,7 +3,9 @@ package ping import ( + "context" "errors" + "net" "reflect" "sort" "testing" @@ -340,6 +342,12 @@ func TestPingBinary(t *testing.T) { acc.GatherError(p.Gather) } +func mockHostResolver(ctx context.Context, ipv6 bool, host string) (*net.IPAddr, error) { + ipaddr := net.IPAddr{} + ipaddr.IP = net.IPv4(127, 0, 0, 1) + return &ipaddr, nil +} + // Test that Gather function works using native ping func TestPingGatherNative(t *testing.T) { if testing.Short() { @@ -348,12 +356,35 @@ func TestPingGatherNative(t *testing.T) { var acc testutil.Accumulator p := Ping{ - Urls: []string{"localhost", "127.0.0.2"}, - Method: "native", - Count: 5, + Urls: []string{"localhost", "127.0.0.2"}, + Method: "native", + Count: 5, + resolveHost: mockHostResolver, } assert.NoError(t, acc.GatherError(p.Gather)) assert.True(t, acc.HasPoint("ping", map[string]string{"url": "localhost"}, "packets_transmitted", 5)) assert.True(t, acc.HasPoint("ping", map[string]string{"url": "localhost"}, "packets_received", 5)) } + +func mockHostResolverError(ctx context.Context, ipv6 bool, host string) (*net.IPAddr, error) { + return nil, errors.New("myMock error") +} + +// Test failed DNS resolutions +func TestDNSLookupError(t *testing.T) { + if testing.Short() { + t.Skip("Skipping test due to permission requirements.") + } + + var acc testutil.Accumulator + p := Ping{ + Urls: []string{"localhost"}, + Method: "native", + IPv6: false, + resolveHost: mockHostResolverError, + } + + acc.GatherError(p.Gather) + assert.True(t, len(acc.Errors) > 0) +} From cd1cd54e065eaf7773faa63ebee64c12905f0798 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 24 Mar 2020 17:03:31 -0700 Subject: [PATCH 1655/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0a2a12ddc..22b4945b2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,7 @@ - [#6905](https://github.com/influxdata/telegraf/pull/6905): Add commands stats to mongodb input plugin. - [#7193](https://github.com/influxdata/telegraf/pull/7193): Add additional concurrent transaction information. - [#7223](https://github.com/influxdata/telegraf/pull/7223): Add ability to specify HTTP Headers in http_listener_v2 which will added as tags. +- [#7140](https://github.com/influxdata/telegraf/pull/7140): Apply ping deadline to dns lookup. ## v1.14 [unreleased] From cb42f610f4e9df80e948e50f8008a6581c1cb273 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 26 Mar 2020 12:01:44 -0700 Subject: [PATCH 1656/1815] Set 1.14.0 release date --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 22b4945b2..a6507def6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,7 +7,7 @@ - [#7223](https://github.com/influxdata/telegraf/pull/7223): Add ability to specify HTTP Headers in http_listener_v2 which will added as tags. - [#7140](https://github.com/influxdata/telegraf/pull/7140): Apply ping deadline to dns lookup. -## v1.14 [unreleased] +## v1.14 [2020-03-26] #### Release Notes From f882b8f94f73fb18f26f845bad7f40d81e5c64b8 Mon Sep 17 00:00:00 2001 From: Sven Rebhan <36194019+srebhan@users.noreply.github.com> Date: Fri, 27 Mar 2020 01:05:43 +0100 Subject: [PATCH 1657/1815] Add support for 64-bit integer types to modbus input (#7225) --- plugins/inputs/modbus/README.md | 2 +- plugins/inputs/modbus/modbus.go | 88 ++++++++++++++++++----- plugins/inputs/modbus/modbus_test.go | 100 +++++++++++++++++++++++++++ 3 files changed, 170 insertions(+), 20 deletions(-) diff --git a/plugins/inputs/modbus/README.md b/plugins/inputs/modbus/README.md index 1e042deba..2c73ba5ec 100644 --- a/plugins/inputs/modbus/README.md +++ b/plugins/inputs/modbus/README.md @@ -58,7 +58,7 @@ The Modbus plugin collects Discrete Inputs, Coils, Input Registers and Holding R ## |---BA, DCBA - Little Endian ## |---BADC - Mid-Big Endian ## |---CDAB - Mid-Little Endian - ## data_type - UINT16, INT16, INT32, UINT32, FLOAT32, FLOAT32-IEEE (the IEEE 754 binary representation) + ## data_type - INT16, UINT16, INT32, UINT32, INT64, UINT64, FLOAT32, FLOAT32-IEEE (the IEEE 754 binary representation) ## scale - the final numeric variable representation ## address - variable address diff --git a/plugins/inputs/modbus/modbus.go b/plugins/inputs/modbus/modbus.go index e96b2f117..46dc748c7 100644 --- a/plugins/inputs/modbus/modbus.go +++ b/plugins/inputs/modbus/modbus.go @@ -73,17 +73,17 @@ const sampleConfig = ` ## ## Device name name = "Device" - + ## Slave ID - addresses a MODBUS device on the bus ## Range: 0 - 255 [0 = broadcast; 248 - 255 = reserved] slave_id = 1 - + ## Timeout for each request timeout = "1s" - + # TCP - connect via Modbus/TCP controller = "tcp://localhost:502" - + # Serial (RS485; RS232) #controller = "file:///dev/ttyUSB0" #baud_rate = 9600 @@ -91,15 +91,15 @@ const sampleConfig = ` #parity = "N" #stop_bits = 1 #transmission_mode = "RTU" - - + + ## Measurements ## - + ## Digital Variables, Discrete Inputs and Coils ## name - the variable name ## address - variable address - + discrete_inputs = [ { name = "start", address = [0]}, { name = "stop", address = [1]}, @@ -111,7 +111,7 @@ const sampleConfig = ` { name = "motor1_jog", address = [1]}, { name = "motor1_stop", address = [2]}, ] - + ## Analog Variables, Input Registers and Holding Registers ## name - the variable name ## byte_order - the ordering of bytes @@ -119,10 +119,10 @@ const sampleConfig = ` ## |---BA, DCBA - Little Endian ## |---BADC - Mid-Big Endian ## |---CDAB - Mid-Little Endian - ## data_type - UINT16, INT16, INT32, UINT32, FLOAT32, FLOAT32-IEEE (the IEEE 754 binary representation) + ## data_type - INT16, UINT16, INT32, UINT32, INT64, UINT64, FLOAT32, FLOAT32-IEEE (the IEEE 754 binary representation) ## scale - the final numeric variable representation ## address - variable address - + holding_registers = [ { name = "power_factor", byte_order = "AB", data_type = "FLOAT32", scale=0.01, address = [8]}, { name = "voltage", byte_order = "AB", data_type = "FLOAT32", scale=0.1, address = [0]}, @@ -328,7 +328,7 @@ func validateFieldContainers(t []fieldContainer, n string) error { if n == cInputRegisters || n == cHoldingRegisters { // search byte order switch item.ByteOrder { - case "AB", "BA", "ABCD", "CDAB", "BADC", "DCBA": + case "AB", "BA", "ABCD", "CDAB", "BADC", "DCBA", "ABCDEFGH", "HGFEDCBA", "BADCFEHG", "GHEFCDAB": break default: return fmt.Errorf("invalid byte order '%s' in '%s' - '%s'", item.ByteOrder, n, item.Name) @@ -336,7 +336,7 @@ func validateFieldContainers(t []fieldContainer, n string) error { // search data type switch item.DataType { - case "UINT16", "INT16", "UINT32", "INT32", "FLOAT32-IEEE", "FLOAT32": + case "UINT16", "INT16", "UINT32", "INT32", "UINT64", "INT64", "FLOAT32-IEEE", "FLOAT32": break default: return fmt.Errorf("invalid data type '%s' in '%s' - '%s'", item.DataType, n, item.Name) @@ -349,10 +349,12 @@ func validateFieldContainers(t []fieldContainer, n string) error { } // check address - if len(item.Address) == 0 || len(item.Address) > 2 { + if len(item.Address) != 1 && len(item.Address) != 2 && len(item.Address) != 4 { return fmt.Errorf("invalid address '%v' length '%v' in '%s' - '%s'", item.Address, len(item.Address), n, item.Name) - } else if n == cInputRegisters || n == cHoldingRegisters { - if (len(item.Address) == 1 && len(item.ByteOrder) != 2) || (len(item.Address) == 2 && len(item.ByteOrder) != 4) { + } + + if n == cInputRegisters || n == cHoldingRegisters { + if 2*len(item.Address) != len(item.ByteOrder) { return fmt.Errorf("invalid byte order '%s' and address '%v' in '%s' - '%s'", item.ByteOrder, item.Address, n, item.Name) } @@ -360,8 +362,7 @@ func validateFieldContainers(t []fieldContainer, n string) error { if len(item.Address) > len(removeDuplicates(item.Address)) { return fmt.Errorf("duplicate address '%v' in '%s' - '%s'", item.Address, n, item.Name) } - - } else if len(item.Address) > 1 || (n == cInputRegisters || n == cHoldingRegisters) { + } else if len(item.Address) != 1 { return fmt.Errorf("invalid address'%v' length'%v' in '%s' - '%s'", item.Address, len(item.Address), n, item.Name) } } @@ -480,6 +481,14 @@ func convertDataType(t fieldContainer, bytes []byte) interface{} { e32 := convertEndianness32(t.ByteOrder, bytes) f32 := int32(e32) return scaleInt32(t.Scale, f32) + case "UINT64": + e64 := convertEndianness64(t.ByteOrder, bytes) + f64 := format64(t.DataType, e64).(uint64) + return scaleUint64(t.Scale, f64) + case "INT64": + e64 := convertEndianness64(t.ByteOrder, bytes) + f64 := format64(t.DataType, e64).(int64) + return scaleInt64(t.Scale, f64) case "FLOAT32-IEEE": e32 := convertEndianness32(t.ByteOrder, bytes) f32 := math.Float32frombits(e32) @@ -488,9 +497,12 @@ func convertDataType(t fieldContainer, bytes []byte) interface{} { if len(bytes) == 2 { e16 := convertEndianness16(t.ByteOrder, bytes) return scale16toFloat32(t.Scale, e16) - } else { + } else if len(bytes) == 4 { e32 := convertEndianness32(t.ByteOrder, bytes) return scale32toFloat32(t.Scale, e32) + } else { + e64 := convertEndianness64(t.ByteOrder, bytes) + return scale64toFloat32(t.Scale, e64) } default: return 0 @@ -523,6 +535,21 @@ func convertEndianness32(o string, b []byte) uint32 { } } +func convertEndianness64(o string, b []byte) uint64 { + switch o { + case "ABCDEFGH": + return binary.BigEndian.Uint64(b) + case "HGFEDCBA": + return binary.LittleEndian.Uint64(b) + case "BADCFEHG": + return uint64(binary.LittleEndian.Uint16(b[0:]))<<48 | uint64(binary.LittleEndian.Uint16(b[2:]))<<32 | uint64(binary.LittleEndian.Uint16(b[4:]))<<16 | uint64(binary.LittleEndian.Uint16(b[6:])) + case "GHEFCDAB": + return uint64(binary.BigEndian.Uint16(b[6:]))<<48 | uint64(binary.BigEndian.Uint16(b[4:]))<<32 | uint64(binary.BigEndian.Uint16(b[2:]))<<16 | uint64(binary.BigEndian.Uint16(b[0:])) + default: + return 0 + } +} + func format16(f string, r uint16) interface{} { switch f { case "UINT16": @@ -547,6 +574,17 @@ func format32(f string, r uint32) interface{} { } } +func format64(f string, r uint64) interface{} { + switch f { + case "UINT64": + return r + case "INT64": + return int64(r) + default: + return r + } +} + func scale16toFloat32(s float64, v uint16) float64 { return float64(v) * s } @@ -555,6 +593,10 @@ func scale32toFloat32(s float64, v uint32) float64 { return float64(float64(v) * float64(s)) } +func scale64toFloat32(s float64, v uint64) float64 { + return float64(float64(v) * float64(s)) +} + func scaleInt16(s float64, v int16) int16 { return int16(float64(v) * s) } @@ -575,6 +617,14 @@ func scaleFloat32(s float64, v float32) float32 { return float32(float64(v) * s) } +func scaleUint64(s float64, v uint64) uint64 { + return uint64(float64(v) * float64(s)) +} + +func scaleInt64(s float64, v int64) int64 { + return int64(float64(v) * float64(s)) +} + // Gather implements the telegraf plugin interface method for data accumulation func (m *Modbus) Gather(acc telegraf.Accumulator) error { if !m.isConnected { diff --git a/plugins/inputs/modbus/modbus_test.go b/plugins/inputs/modbus/modbus_test.go index e346ece17..9a8c46382 100644 --- a/plugins/inputs/modbus/modbus_test.go +++ b/plugins/inputs/modbus/modbus_test.go @@ -349,6 +349,106 @@ func TestHoldingRegisters(t *testing.T) { write: []byte{0xAA, 0xBB, 0xCC, 0xDD}, read: float32(-3.3360025e-12), }, + { + name: "register140_to_register143_abcdefgh_int64_scaled", + address: []uint16{140, 141, 142, 143}, + quantity: 4, + byteOrder: "ABCDEFGH", + dataType: "INT64", + scale: 10, + write: []byte{0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0xAB, 0xCD}, + read: int64(10995116717570), + }, + { + name: "register140_to_register143_abcdefgh_int64", + address: []uint16{140, 141, 142, 143}, + quantity: 4, + byteOrder: "ABCDEFGH", + dataType: "INT64", + scale: 1, + write: []byte{0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0xAB, 0xCD}, + read: int64(1099511671757), + }, + { + name: "register150_to_register153_hgfedcba_int64", + address: []uint16{150, 151, 152, 153}, + quantity: 4, + byteOrder: "HGFEDCBA", + dataType: "INT64", + scale: 1, + write: []byte{0x84, 0xF6, 0x45, 0xF9, 0xBC, 0xFE, 0xFF, 0xFF}, + read: int64(-1387387292028), + }, + { + name: "register160_to_register163_badcfehg_int64", + address: []uint16{160, 161, 162, 163}, + quantity: 4, + byteOrder: "BADCFEHG", + dataType: "INT64", + scale: 1, + write: []byte{0xFF, 0xFF, 0xBC, 0xFE, 0x45, 0xF9, 0x84, 0xF6}, + read: int64(-1387387292028), + }, + { + name: "register170_to_register173_ghefcdab_int64", + address: []uint16{170, 171, 172, 173}, + quantity: 4, + byteOrder: "GHEFCDAB", + dataType: "INT64", + scale: 1, + write: []byte{0xF6, 0x84, 0xF9, 0x45, 0xFE, 0xBC, 0xFF, 0xFF}, + read: int64(-1387387292028), + }, + { + name: "register180_to_register183_abcdefgh_uint64_scaled", + address: []uint16{180, 181, 182, 183}, + quantity: 4, + byteOrder: "ABCDEFGH", + dataType: "UINT64", + scale: 10, + write: []byte{0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0xAB, 0xCD}, + read: uint64(10995116717570), + }, + { + name: "register180_to_register183_abcdefgh_uint64", + address: []uint16{180, 181, 182, 183}, + quantity: 4, + byteOrder: "ABCDEFGH", + dataType: "UINT64", + scale: 1, + write: []byte{0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0xAB, 0xCD}, + read: uint64(1099511671757), + }, + { + name: "register190_to_register193_hgfedcba_uint64", + address: []uint16{190, 191, 192, 193}, + quantity: 4, + byteOrder: "HGFEDCBA", + dataType: "UINT64", + scale: 1, + write: []byte{0x84, 0xF6, 0x45, 0xF9, 0xBC, 0xFE, 0xFF, 0xFF}, + read: uint64(18446742686322259968), + }, + { + name: "register200_to_register203_badcfehg_uint64", + address: []uint16{200, 201, 202, 203}, + quantity: 4, + byteOrder: "BADCFEHG", + dataType: "UINT64", + scale: 1, + write: []byte{0xFF, 0xFF, 0xBC, 0xFE, 0x45, 0xF9, 0x84, 0xF6}, + read: uint64(18446742686322259968), + }, + { + name: "register210_to_register213_ghefcdab_uint64", + address: []uint16{210, 211, 212, 213}, + quantity: 4, + byteOrder: "GHEFCDAB", + dataType: "UINT64", + scale: 1, + write: []byte{0xF6, 0x84, 0xF9, 0x45, 0xFE, 0xBC, 0xFF, 0xFF}, + read: uint64(18446742686322259968), + }, } serv := mbserver.NewServer() From f3ee1d86dfd8b7fe61a47e2ffc2141846fb0c51b Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 26 Mar 2020 17:06:27 -0700 Subject: [PATCH 1658/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index a6507def6..854b42b83 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,7 @@ - [#7193](https://github.com/influxdata/telegraf/pull/7193): Add additional concurrent transaction information. - [#7223](https://github.com/influxdata/telegraf/pull/7223): Add ability to specify HTTP Headers in http_listener_v2 which will added as tags. - [#7140](https://github.com/influxdata/telegraf/pull/7140): Apply ping deadline to dns lookup. +- [#7225](https://github.com/influxdata/telegraf/pull/7225): Add support for 64-bit integer types to modbus input. ## v1.14 [2020-03-26] From 608e818645ca574e19203f4e084c23195398f795 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 27 Mar 2020 15:39:32 -0700 Subject: [PATCH 1659/1815] Add docs for how to handle errors in check-deps script (#7243) --- scripts/check-deps.sh | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/scripts/check-deps.sh b/scripts/check-deps.sh index 793a9b49a..2d3c02dad 100755 --- a/scripts/check-deps.sh +++ b/scripts/check-deps.sh @@ -48,8 +48,19 @@ for dep in $(LC_ALL=C sort -u "${tmpdir}/golist"); do dep="${dep%%/v[0-9]}" dep="${dep%%/v[0-9][0-9]}" - echo "${dep}" >> "${tmpdir}/actual" + echo "${dep}" >> "${tmpdir}/HEAD" done -grep '^-' docs/LICENSE_OF_DEPENDENCIES.md | grep -v github.com/DataDog/datadog-agent | cut -f 2 -d' ' > "${tmpdir}/expected" -diff -U0 "${tmpdir}/expected" "${tmpdir}/actual" +grep '^-' docs/LICENSE_OF_DEPENDENCIES.md | grep -v github.com/DataDog/datadog-agent | cut -f 2 -d' ' > "${tmpdir}/LICENSE_OF_DEPENDENCIES.md" + +diff -U0 "${tmpdir}/LICENSE_OF_DEPENDENCIES.md" "${tmpdir}/HEAD" || +cat - < Date: Fri, 27 Mar 2020 15:40:08 -0700 Subject: [PATCH 1660/1815] Add limit to number of undelivered lines to read ahead in tail (#7210) --- plugins/inputs/tail/README.md | 18 ++++-- plugins/inputs/tail/tail.go | 98 ++++++++++++++++++++++---------- plugins/inputs/tail/tail_test.go | 38 +++++++++---- 3 files changed, 108 insertions(+), 46 deletions(-) diff --git a/plugins/inputs/tail/README.md b/plugins/inputs/tail/README.md index 27cb6418e..e9f9cc8cb 100644 --- a/plugins/inputs/tail/README.md +++ b/plugins/inputs/tail/README.md @@ -19,12 +19,11 @@ see http://man7.org/linux/man-pages/man1/tail.1.html for more details. The plugin expects messages in one of the [Telegraf Input Data Formats](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md). -### Configuration: +### Configuration ```toml -# Stream a log file, like the tail -f command [[inputs.tail]] - ## files to tail. + ## File names or a pattern to tail. ## These accept standard unix glob matching rules, but with the addition of ## ** as a "super asterisk". ie: ## "/var/log/**.log" -> recursively find all .log files in /var/log @@ -34,14 +33,21 @@ The plugin expects messages in one of the ## See https://github.com/gobwas/glob for more examples ## files = ["/var/mymetrics.out"] + ## Read file from beginning. - from_beginning = false + # from_beginning = false + ## Whether file is a named pipe - pipe = false + # pipe = false ## Method used to watch for file updates. Can be either "inotify" or "poll". # watch_method = "inotify" + ## Maximum lines of the file to process that have not yet be written by the + ## output. For best throughput set based on the number of metrics on each + ## line and the size of the output's metric_batch_size. + # max_undelivered_lines = 1000 + ## Data format to consume. ## Each data format has its own unique set of configuration options, read ## more about them here: @@ -49,7 +55,7 @@ The plugin expects messages in one of the data_format = "influx" ``` -### Metrics: +### Metrics Metrics are produced according to the `data_format` option. Additionally a tag labeled `path` is added to the metric containing the filename being tailed. diff --git a/plugins/inputs/tail/tail.go b/plugins/inputs/tail/tail.go index db4d56424..9e7d6ecf1 100644 --- a/plugins/inputs/tail/tail.go +++ b/plugins/inputs/tail/tail.go @@ -3,6 +3,8 @@ package tail import ( + "context" + "errors" "strings" "sync" @@ -15,7 +17,8 @@ import ( ) const ( - defaultWatchMethod = "inotify" + defaultWatchMethod = "inotify" + defaultMaxUndeliveredLines = 1000 ) var ( @@ -23,21 +26,25 @@ var ( offsetsMutex = new(sync.Mutex) ) +type empty struct{} +type semaphore chan empty + type Tail struct { - Files []string - FromBeginning bool - Pipe bool - WatchMethod string - - Log telegraf.Logger + Files []string `toml:"files"` + FromBeginning bool `toml:"from_beginning"` + Pipe bool `toml:"pipe"` + WatchMethod string `toml:"watch_method"` + MaxUndeliveredLines int `toml:"max_undelivered_lines"` + Log telegraf.Logger `toml:"-"` tailers map[string]*tail.Tail offsets map[string]int64 parserFunc parsers.ParserFunc wg sync.WaitGroup - acc telegraf.Accumulator - - sync.Mutex + ctx context.Context + cancel context.CancelFunc + acc telegraf.TrackingAccumulator + sem semaphore } func NewTail() *Tail { @@ -49,13 +56,14 @@ func NewTail() *Tail { offsetsMutex.Unlock() return &Tail{ - FromBeginning: false, - offsets: offsetsCopy, + FromBeginning: false, + MaxUndeliveredLines: 1000, + offsets: offsetsCopy, } } const sampleConfig = ` - ## files to tail. + ## File names or a pattern to tail. ## These accept standard unix glob matching rules, but with the addition of ## ** as a "super asterisk". ie: ## "/var/log/**.log" -> recursively find all .log files in /var/log @@ -65,14 +73,21 @@ const sampleConfig = ` ## See https://github.com/gobwas/glob for more examples ## files = ["/var/mymetrics.out"] + ## Read file from beginning. - from_beginning = false + # from_beginning = false + ## Whether file is a named pipe - pipe = false + # pipe = false ## Method used to watch for file updates. Can be either "inotify" or "poll". # watch_method = "inotify" + ## Maximum lines of the file to process that have not yet be written by the + ## output. For best throughput set based on the number of metrics on each + ## line and the size of the output's metric_batch_size. + # max_undelivered_lines = 1000 + ## Data format to consume. ## Each data format has its own unique set of configuration options, read ## more about them here: @@ -88,18 +103,36 @@ func (t *Tail) Description() string { return "Stream a log file, like the tail -f command" } -func (t *Tail) Gather(acc telegraf.Accumulator) error { - t.Lock() - defer t.Unlock() +func (t *Tail) Init() error { + if t.MaxUndeliveredLines == 0 { + return errors.New("max_undelivered_lines must be positive") + } + t.sem = make(semaphore, t.MaxUndeliveredLines) + return nil +} +func (t *Tail) Gather(acc telegraf.Accumulator) error { return t.tailNewFiles(true) } func (t *Tail) Start(acc telegraf.Accumulator) error { - t.Lock() - defer t.Unlock() + t.acc = acc.WithTracking(t.MaxUndeliveredLines) + + t.ctx, t.cancel = context.WithCancel(context.Background()) + + t.wg.Add(1) + go func() { + defer t.wg.Done() + for { + select { + case <-t.ctx.Done(): + return + case <-t.acc.Delivered(): + <-t.sem + } + } + }() - t.acc = acc t.tailers = make(map[string]*tail.Tail) err := t.tailNewFiles(t.FromBeginning) @@ -175,6 +208,12 @@ func (t *Tail) tailNewFiles(fromBeginning bool) error { go func() { defer t.wg.Done() t.receiver(parser, tailer) + + t.Log.Debugf("Tail removed for %q", tailer.Filename) + + if err := tailer.Err(); err != nil { + t.Log.Errorf("Tailing %q: %s", tailer.Filename, err.Error()) + } }() t.tailers[tailer.Filename] = tailer } @@ -229,21 +268,19 @@ func (t *Tail) receiver(parser parsers.Parser, tailer *tail.Tail) { for _, metric := range metrics { metric.AddTag("path", tailer.Filename) - t.acc.AddMetric(metric) } - } - t.Log.Debugf("Tail removed for %q", tailer.Filename) - - if err := tailer.Err(); err != nil { - t.Log.Errorf("Tailing %q: %s", tailer.Filename, err.Error()) + // Block until plugin is stopping or room is available to add metrics. + select { + case <-t.ctx.Done(): + return + case t.sem <- empty{}: + t.acc.AddTrackingMetricGroup(metrics) + } } } func (t *Tail) Stop() { - t.Lock() - defer t.Unlock() - for _, tailer := range t.tailers { if !t.Pipe && !t.FromBeginning { // store offset for resume @@ -260,6 +297,7 @@ func (t *Tail) Stop() { } } + t.cancel() t.wg.Wait() // persist offsets diff --git a/plugins/inputs/tail/tail_test.go b/plugins/inputs/tail/tail_test.go index 4b96e092f..88d63f723 100644 --- a/plugins/inputs/tail/tail_test.go +++ b/plugins/inputs/tail/tail_test.go @@ -26,6 +26,7 @@ func TestTailFromBeginning(t *testing.T) { tmpfile, err := ioutil.TempFile("", "") require.NoError(t, err) defer os.Remove(tmpfile.Name()) + defer tmpfile.Close() _, err = tmpfile.WriteString("cpu,mytag=foo usage_idle=100\n") require.NoError(t, err) @@ -34,11 +35,13 @@ func TestTailFromBeginning(t *testing.T) { tt.FromBeginning = true tt.Files = []string{tmpfile.Name()} tt.SetParserFunc(parsers.NewInfluxParser) - defer tt.Stop() - defer tmpfile.Close() + + err = tt.Init() + require.NoError(t, err) acc := testutil.Accumulator{} require.NoError(t, tt.Start(&acc)) + defer tt.Stop() require.NoError(t, acc.GatherError(tt.Gather)) acc.Wait(1) @@ -60,6 +63,7 @@ func TestTailFromEnd(t *testing.T) { tmpfile, err := ioutil.TempFile("", "") require.NoError(t, err) defer os.Remove(tmpfile.Name()) + defer tmpfile.Close() _, err = tmpfile.WriteString("cpu,mytag=foo usage_idle=100\n") require.NoError(t, err) @@ -67,11 +71,13 @@ func TestTailFromEnd(t *testing.T) { tt.Log = testutil.Logger{} tt.Files = []string{tmpfile.Name()} tt.SetParserFunc(parsers.NewInfluxParser) - defer tt.Stop() - defer tmpfile.Close() + + err = tt.Init() + require.NoError(t, err) acc := testutil.Accumulator{} require.NoError(t, tt.Start(&acc)) + defer tt.Stop() for _, tailer := range tt.tailers { for n, err := tailer.Tell(); err == nil && n == 0; n, err = tailer.Tell() { // wait for tailer to jump to end @@ -99,17 +105,20 @@ func TestTailBadLine(t *testing.T) { tmpfile, err := ioutil.TempFile("", "") require.NoError(t, err) defer os.Remove(tmpfile.Name()) + defer tmpfile.Close() tt := NewTail() tt.Log = testutil.Logger{} tt.FromBeginning = true tt.Files = []string{tmpfile.Name()} tt.SetParserFunc(parsers.NewInfluxParser) - defer tt.Stop() - defer tmpfile.Close() + + err = tt.Init() + require.NoError(t, err) acc := testutil.Accumulator{} require.NoError(t, tt.Start(&acc)) + defer tt.Stop() buf := &bytes.Buffer{} log.SetOutput(buf) @@ -127,6 +136,7 @@ func TestTailDosLineendings(t *testing.T) { tmpfile, err := ioutil.TempFile("", "") require.NoError(t, err) defer os.Remove(tmpfile.Name()) + defer tmpfile.Close() _, err = tmpfile.WriteString("cpu usage_idle=100\r\ncpu2 usage_idle=200\r\n") require.NoError(t, err) @@ -135,11 +145,13 @@ func TestTailDosLineendings(t *testing.T) { tt.FromBeginning = true tt.Files = []string{tmpfile.Name()} tt.SetParserFunc(parsers.NewInfluxParser) - defer tt.Stop() - defer tmpfile.Close() + + err = tt.Init() + require.NoError(t, err) acc := testutil.Accumulator{} require.NoError(t, tt.Start(&acc)) + defer tt.Stop() require.NoError(t, acc.GatherError(tt.Gather)) acc.Wait(2) @@ -180,11 +192,14 @@ cpu,42 TimeFunc: func() time.Time { return time.Unix(0, 0) }, }, nil }) - defer plugin.Stop() + + err = plugin.Init() + require.NoError(t, err) acc := testutil.Accumulator{} err = plugin.Start(&acc) require.NoError(t, err) + defer plugin.Stop() err = plugin.Gather(&acc) require.NoError(t, err) acc.Wait(2) @@ -237,11 +252,14 @@ func TestMultipleMetricsOnFirstLine(t *testing.T) { MetricName: "cpu", }) }) - defer plugin.Stop() + + err = plugin.Init() + require.NoError(t, err) acc := testutil.Accumulator{} err = plugin.Start(&acc) require.NoError(t, err) + defer plugin.Stop() err = plugin.Gather(&acc) require.NoError(t, err) acc.Wait(2) From 3650d74de2b4b2ca3638435354b836ae5fd0e970 Mon Sep 17 00:00:00 2001 From: Sven Rebhan <36194019+srebhan@users.noreply.github.com> Date: Mon, 30 Mar 2020 22:30:42 +0200 Subject: [PATCH 1661/1815] Add possibility to specify measurement per register (#7231) --- plugins/inputs/modbus/README.md | 25 ++++++++-------- plugins/inputs/modbus/modbus.go | 53 ++++++++++++++++++++++----------- 2 files changed, 48 insertions(+), 30 deletions(-) diff --git a/plugins/inputs/modbus/README.md b/plugins/inputs/modbus/README.md index 2c73ba5ec..98661e84d 100644 --- a/plugins/inputs/modbus/README.md +++ b/plugins/inputs/modbus/README.md @@ -12,17 +12,17 @@ The Modbus plugin collects Discrete Inputs, Coils, Input Registers and Holding R ## ## Device name name = "Device" - + ## Slave ID - addresses a MODBUS device on the bus ## Range: 0 - 255 [0 = broadcast; 248 - 255 = reserved] slave_id = 1 - + ## Timeout for each request timeout = "1s" - + # TCP - connect via Modbus/TCP controller = "tcp://localhost:502" - + # Serial (RS485; RS232) #controller = "file:///dev/ttyUSB0" #baud_rate = 9600 @@ -30,15 +30,15 @@ The Modbus plugin collects Discrete Inputs, Coils, Input Registers and Holding R #parity = "N" #stop_bits = 1 #transmission_mode = "RTU" - - + + ## Measurements ## - + ## Digital Variables, Discrete Inputs and Coils ## name - the variable name ## address - variable address - + discrete_inputs = [ { name = "Start", address = [0]}, { name = "Stop", address = [1]}, @@ -49,11 +49,12 @@ The Modbus plugin collects Discrete Inputs, Coils, Input Registers and Holding R { name = "Motor1-Run", address = [0]}, { name = "Motor1-Jog", address = [1]}, { name = "Motor1-Stop", address = [2]}, - ] - + ] + ## Analog Variables, Input Registers and Holding Registers + ## measurement - the (optional) measurement name, defaults to "modbus" ## name - the variable name - ## byte_order - the ordering of bytes + ## byte_order - the ordering of bytes ## |---AB, ABCD - Big Endian ## |---BA, DCBA - Little Endian ## |---BADC - Mid-Big Endian @@ -61,7 +62,7 @@ The Modbus plugin collects Discrete Inputs, Coils, Input Registers and Holding R ## data_type - INT16, UINT16, INT32, UINT32, INT64, UINT64, FLOAT32, FLOAT32-IEEE (the IEEE 754 binary representation) ## scale - the final numeric variable representation ## address - variable address - + holding_registers = [ { name = "PowerFactor", byte_order = "AB", data_type = "FLOAT32", scale=0.01, address = [8]}, { name = "Voltage", byte_order = "AB", data_type = "FLOAT32", scale=0.1, address = [0]}, diff --git a/plugins/inputs/modbus/modbus.go b/plugins/inputs/modbus/modbus.go index 46dc748c7..6775465b6 100644 --- a/plugins/inputs/modbus/modbus.go +++ b/plugins/inputs/modbus/modbus.go @@ -7,10 +7,12 @@ import ( "net" "net/url" "sort" + "time" mb "github.com/goburrow/modbus" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -44,12 +46,13 @@ type register struct { } type fieldContainer struct { - Name string `toml:"name"` - ByteOrder string `toml:"byte_order"` - DataType string `toml:"data_type"` - Scale float64 `toml:"scale"` - Address []uint16 `toml:"address"` - value interface{} + Measurement string `toml:"measurement"` + Name string `toml:"name"` + ByteOrder string `toml:"byte_order"` + DataType string `toml:"data_type"` + Scale float64 `toml:"scale"` + Address []uint16 `toml:"address"` + value interface{} } type registerRange struct { @@ -97,14 +100,15 @@ const sampleConfig = ` ## ## Digital Variables, Discrete Inputs and Coils - ## name - the variable name - ## address - variable address + ## measurement - the (optional) measurement name, defaults to "modbus" + ## name - the variable name + ## address - variable address discrete_inputs = [ { name = "start", address = [0]}, { name = "stop", address = [1]}, { name = "reset", address = [2]}, - { name = "emergency_stop", address = [3]}, + { name = "emergency_stop", address = [3]}, ] coils = [ { name = "motor1_run", address = [0]}, @@ -113,8 +117,9 @@ const sampleConfig = ` ] ## Analog Variables, Input Registers and Holding Registers - ## name - the variable name - ## byte_order - the ordering of bytes + ## measurement - the (optional) measurement name, defaults to "modbus" + ## name - the variable name + ## byte_order - the ordering of bytes ## |---AB, ABCD - Big Endian ## |---BA, DCBA - Little Endian ## |---BADC - Mid-Big Endian @@ -134,7 +139,7 @@ const sampleConfig = ` input_registers = [ { name = "tank_level", byte_order = "AB", data_type = "INT16", scale=1.0, address = [0]}, { name = "tank_ph", byte_order = "AB", data_type = "INT16", scale=1.0, address = [1]}, - { name = "pump1_speed", byte_order = "ABCD", data_type = "INT32", scale=1.0, address = [3,4]}, + { name = "pump1_speed", byte_order = "ABCD", data_type = "INT32", scale=1.0, address = [3,4]}, ] ` @@ -319,10 +324,11 @@ func validateFieldContainers(t []fieldContainer, n string) error { } //search name duplicate - if nameEncountered[item.Name] { - return fmt.Errorf("name '%s' is duplicated in '%s' - '%s'", item.Name, n, item.Name) + canonical_name := item.Measurement + "." + item.Name + if nameEncountered[canonical_name] { + return fmt.Errorf("name '%s' is duplicated in measurement '%s' '%s' - '%s'", item.Name, item.Measurement, n, item.Name) } else { - nameEncountered[item.Name] = true + nameEncountered[canonical_name] = true } if n == cInputRegisters || n == cHoldingRegisters { @@ -635,6 +641,7 @@ func (m *Modbus) Gather(acc telegraf.Accumulator) error { } } + timestamp := time.Now() err := m.getFields() if err != nil { disconnect(m) @@ -642,18 +649,28 @@ func (m *Modbus) Gather(acc telegraf.Accumulator) error { return err } + grouper := metric.NewSeriesGrouper() for _, reg := range m.registers { - fields := make(map[string]interface{}) tags := map[string]string{ "name": m.Name, "type": reg.Type, } for _, field := range reg.Fields { - fields[field.Name] = field.value + // In case no measurement was specified we use "modbus" as default + measurement := "modbus" + if field.Measurement != "" { + measurement = field.Measurement + } + + // Group the data by series + grouper.Add(measurement, tags, timestamp, field.Name, field.value) } - acc.AddFields("modbus", fields, tags) + // Add the metrics grouped by series to the accumulator + for _, metric := range grouper.Metrics() { + acc.AddMetric(metric) + } } return nil From 218fbc41b92aa2041bf0c86654557a78668ab886 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 30 Mar 2020 13:31:33 -0700 Subject: [PATCH 1662/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 854b42b83..011f060c6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ - [#7223](https://github.com/influxdata/telegraf/pull/7223): Add ability to specify HTTP Headers in http_listener_v2 which will added as tags. - [#7140](https://github.com/influxdata/telegraf/pull/7140): Apply ping deadline to dns lookup. - [#7225](https://github.com/influxdata/telegraf/pull/7225): Add support for 64-bit integer types to modbus input. +- [#7231](https://github.com/influxdata/telegraf/pull/7231): Add possibility to specify measurement per register. ## v1.14 [2020-03-26] From 0cad343de7e17c2177416433dbbd2753c2275729 Mon Sep 17 00:00:00 2001 From: Ilya Antipov Date: Tue, 31 Mar 2020 21:30:21 +0300 Subject: [PATCH 1663/1815] Support multiple templates for graphite serializers (#7136) --- internal/config/config.go | 13 +++ plugins/outputs/graphite/README.md | 10 ++ plugins/outputs/graphite/graphite.go | 23 +++-- plugins/outputs/graphite/graphite_test.go | 94 +++++++++++++++++++ plugins/outputs/instrumental/instrumental.go | 3 +- plugins/serializers/graphite/README.md | 10 ++ plugins/serializers/graphite/graphite.go | 56 ++++++++++- plugins/serializers/graphite/graphite_test.go | 91 ++++++++++++++++++ plugins/serializers/registry.go | 18 +++- 9 files changed, 308 insertions(+), 10 deletions(-) diff --git a/internal/config/config.go b/internal/config/config.go index f72f1ef26..c2335fac2 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -1891,6 +1891,18 @@ func buildSerializer(name string, tbl *ast.Table) (serializers.Serializer, error } } + if node, ok := tbl.Fields["templates"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if ary, ok := kv.Value.(*ast.Array); ok { + for _, elem := range ary.Value { + if str, ok := elem.(*ast.String); ok { + c.Templates = append(c.Templates, str.Value) + } + } + } + } + } + if node, ok := tbl.Fields["influx_max_line_bytes"]; ok { if kv, ok := node.(*ast.KeyValue); ok { if integer, ok := kv.Value.(*ast.Integer); ok { @@ -2046,6 +2058,7 @@ func buildSerializer(name string, tbl *ast.Table) (serializers.Serializer, error delete(tbl.Fields, "data_format") delete(tbl.Fields, "prefix") delete(tbl.Fields, "template") + delete(tbl.Fields, "templates") delete(tbl.Fields, "json_timestamp_units") delete(tbl.Fields, "splunkmetric_hec_routing") delete(tbl.Fields, "splunkmetric_multimetric") diff --git a/plugins/outputs/graphite/README.md b/plugins/outputs/graphite/README.md index 878eb8048..b7ffd361b 100644 --- a/plugins/outputs/graphite/README.md +++ b/plugins/outputs/graphite/README.md @@ -21,6 +21,16 @@ see the [Graphite Data Format](../../../docs/DATA_FORMATS_OUTPUT.md) ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md template = "host.tags.measurement.field" + ## Graphite templates patterns + ## 1. Template for cpu + ## 2. Template for disk* + ## 3. Default template + # templates = [ + # "cpu tags.measurement.host.field", + # "disk* measurement.field", + # "host.measurement.tags.field" + #] + ## Enable Graphite tags support # graphite_tag_support = false diff --git a/plugins/outputs/graphite/graphite.go b/plugins/outputs/graphite/graphite.go index 09cdbe080..e7d192662 100644 --- a/plugins/outputs/graphite/graphite.go +++ b/plugins/outputs/graphite/graphite.go @@ -18,11 +18,12 @@ import ( type Graphite struct { GraphiteTagSupport bool // URL is only for backwards compatibility - Servers []string - Prefix string - Template string - Timeout int - conns []net.Conn + Servers []string + Prefix string + Template string + Templates []string + Timeout int + conns []net.Conn tlsint.ClientConfig } @@ -40,6 +41,16 @@ var sampleConfig = ` ## Enable Graphite tags support # graphite_tag_support = false + ## Graphite templates patterns + ## 1. Template for cpu + ## 2. Template for disk* + ## 3. Default template + # templates = [ + # "cpu tags.measurement.host.field", + # "disk* measurement.field", + # "host.measurement.tags.field" + #] + ## timeout in seconds for the write connection to graphite timeout = 2 @@ -134,7 +145,7 @@ func checkEOF(conn net.Conn) { func (g *Graphite) Write(metrics []telegraf.Metric) error { // Prepare data var batch []byte - s, err := serializers.NewGraphiteSerializer(g.Prefix, g.Template, g.GraphiteTagSupport) + s, err := serializers.NewGraphiteSerializer(g.Prefix, g.Template, g.GraphiteTagSupport, g.Templates) if err != nil { return err } diff --git a/plugins/outputs/graphite/graphite_test.go b/plugins/outputs/graphite/graphite_test.go index 3857236e5..ad76d45b5 100644 --- a/plugins/outputs/graphite/graphite_test.go +++ b/plugins/outputs/graphite/graphite_test.go @@ -98,6 +98,70 @@ func TestGraphiteOK(t *testing.T) { g.Close() } +func TestGraphiteOKWithMultipleTemplates(t *testing.T) { + var wg sync.WaitGroup + // Start TCP server + wg.Add(1) + t.Log("Starting server") + TCPServer1WithMultipleTemplates(t, &wg) + + // Init plugin + g := Graphite{ + Prefix: "my.prefix", + Template: "measurement.host.tags.field", + Templates: []string{ + "my_* host.measurement.tags.field", + "measurement.tags.host.field", + }, + } + + // Init metrics + m1, _ := metric.New( + "mymeasurement", + map[string]string{"host": "192.168.0.1", "mytag": "valuetag"}, + map[string]interface{}{"myfield": float64(3.14)}, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + ) + m2, _ := metric.New( + "mymeasurement", + map[string]string{"host": "192.168.0.1", "mytag": "valuetag"}, + map[string]interface{}{"value": float64(3.14)}, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + ) + m3, _ := metric.New( + "my_measurement", + map[string]string{"host": "192.168.0.1", "mytag": "valuetag"}, + map[string]interface{}{"value": float64(3.14)}, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + ) + + // Prepare point list + metrics := []telegraf.Metric{m1} + metrics2 := []telegraf.Metric{m2, m3} + err1 := g.Connect() + require.NoError(t, err1) + // Send Data + t.Log("Send first data") + err2 := g.Write(metrics) + require.NoError(t, err2) + + // Waiting TCPserver, should reconnect and resend + wg.Wait() + t.Log("Finished Waiting for first data") + var wg2 sync.WaitGroup + // Start TCP server + wg2.Add(1) + TCPServer2WithMultipleTemplates(t, &wg2) + //Write but expect an error, but reconnect + err3 := g.Write(metrics2) + t.Log("Finished writing second data, it should have reconnected automatically") + + require.NoError(t, err3) + t.Log("Finished writing third data") + wg2.Wait() + g.Close() +} + func TestGraphiteOkWithTags(t *testing.T) { var wg sync.WaitGroup // Start TCP server @@ -188,6 +252,36 @@ func TCPServer2(t *testing.T, wg *sync.WaitGroup) { }() } +func TCPServer1WithMultipleTemplates(t *testing.T, wg *sync.WaitGroup) { + tcpServer, _ := net.Listen("tcp", "127.0.0.1:2003") + go func() { + defer wg.Done() + conn, _ := (tcpServer).Accept() + reader := bufio.NewReader(conn) + tp := textproto.NewReader(reader) + data1, _ := tp.ReadLine() + assert.Equal(t, "my.prefix.mymeasurement.valuetag.192_168_0_1.myfield 3.14 1289430000", data1) + conn.Close() + tcpServer.Close() + }() +} + +func TCPServer2WithMultipleTemplates(t *testing.T, wg *sync.WaitGroup) { + tcpServer, _ := net.Listen("tcp", "127.0.0.1:2003") + go func() { + defer wg.Done() + conn2, _ := (tcpServer).Accept() + reader := bufio.NewReader(conn2) + tp := textproto.NewReader(reader) + data2, _ := tp.ReadLine() + assert.Equal(t, "my.prefix.mymeasurement.valuetag.192_168_0_1 3.14 1289430000", data2) + data3, _ := tp.ReadLine() + assert.Equal(t, "my.prefix.192_168_0_1.my_measurement.valuetag 3.14 1289430000", data3) + conn2.Close() + tcpServer.Close() + }() +} + func TCPServer1WithTags(t *testing.T, wg *sync.WaitGroup) { tcpServer, _ := net.Listen("tcp", "127.0.0.1:2003") go func() { diff --git a/plugins/outputs/instrumental/instrumental.go b/plugins/outputs/instrumental/instrumental.go index f142705a5..a861ebc28 100644 --- a/plugins/outputs/instrumental/instrumental.go +++ b/plugins/outputs/instrumental/instrumental.go @@ -27,6 +27,7 @@ type Instrumental struct { Prefix string DataFormat string Template string + Templates []string Timeout internal.Duration Debug bool @@ -85,7 +86,7 @@ func (i *Instrumental) Write(metrics []telegraf.Metric) error { } } - s, err := serializers.NewGraphiteSerializer(i.Prefix, i.Template, false) + s, err := serializers.NewGraphiteSerializer(i.Prefix, i.Template, false, i.Templates) if err != nil { return err } diff --git a/plugins/serializers/graphite/README.md b/plugins/serializers/graphite/README.md index 6cff2cbe5..74bde2b5d 100644 --- a/plugins/serializers/graphite/README.md +++ b/plugins/serializers/graphite/README.md @@ -22,6 +22,16 @@ method is used, otherwise the [Template Pattern](templates) is used. prefix = "telegraf" ## Graphite template pattern template = "host.tags.measurement.field" + + ## Graphite templates patterns + ## 1. Template for cpu + ## 2. Template for disk* + ## 3. Default template + # templates = [ + # "cpu tags.measurement.host.field", + # "disk* measurement.field", + # "host.measurement.tags.field" + #] ## Support Graphite tags, recommended to enable when using Graphite 1.1 or later. # graphite_tag_support = false diff --git a/plugins/serializers/graphite/graphite.go b/plugins/serializers/graphite/graphite.go index d02b0e26b..590f80b45 100644 --- a/plugins/serializers/graphite/graphite.go +++ b/plugins/serializers/graphite/graphite.go @@ -10,6 +10,7 @@ import ( "strings" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/filter" ) const DEFAULT_TEMPLATE = "host.tags.measurement.field" @@ -29,10 +30,16 @@ var ( fieldDeleter = strings.NewReplacer(".FIELDNAME", "", "FIELDNAME.", "") ) +type GraphiteTemplate struct { + Filter filter.Filter + Value string +} + type GraphiteSerializer struct { Prefix string Template string TagSupport bool + Templates []*GraphiteTemplate } func (s *GraphiteSerializer) Serialize(metric telegraf.Metric) ([]byte, error) { @@ -59,7 +66,15 @@ func (s *GraphiteSerializer) Serialize(metric telegraf.Metric) ([]byte, error) { out = append(out, point...) } default: - bucket := SerializeBucketName(metric.Name(), metric.Tags(), s.Template, s.Prefix) + template := s.Template + for _, graphiteTemplate := range s.Templates { + if graphiteTemplate.Filter.Match(metric.Name()) { + template = graphiteTemplate.Value + break + } + } + + bucket := SerializeBucketName(metric.Name(), metric.Tags(), template, s.Prefix) if bucket == "" { return out, nil } @@ -185,6 +200,45 @@ func SerializeBucketName( return prefix + "." + strings.Join(out, ".") } +func InitGraphiteTemplates(templates []string) ([]*GraphiteTemplate, string, error) { + var graphiteTemplates []*GraphiteTemplate + defaultTemplate := "" + + for i, t := range templates { + parts := strings.Fields(t) + + if len(parts) == 0 { + return nil, "", fmt.Errorf("missing template at position: %d", i) + } + if len(parts) == 1 { + if parts[0] == "" { + return nil, "", fmt.Errorf("missing template at position: %d", i) + } else { + // Override default template + defaultTemplate = t + continue + } + } + + if len(parts) > 2 { + return nil, "", fmt.Errorf("invalid template format: '%s'", t) + } + + tFilter, err := filter.Compile([]string{parts[0]}) + + if err != nil { + return nil, "", err + } + + graphiteTemplates = append(graphiteTemplates, &GraphiteTemplate{ + Filter: tFilter, + Value: parts[1], + }) + } + + return graphiteTemplates, defaultTemplate, nil +} + // SerializeBucketNameWithTags will take the given measurement name and tags and // produce a graphite bucket. It will use the Graphite11Serializer. // http://graphite.readthedocs.io/en/latest/tags.html diff --git a/plugins/serializers/graphite/graphite_test.go b/plugins/serializers/graphite/graphite_test.go index e72ed7a30..e50b7292b 100644 --- a/plugins/serializers/graphite/graphite_test.go +++ b/plugins/serializers/graphite/graphite_test.go @@ -144,6 +144,97 @@ func TestSerializeMetricHost(t *testing.T) { assert.Equal(t, expS, mS) } +func TestSerializeMetricHostWithMultipleTemplates(t *testing.T) { + now := time.Now() + tags := map[string]string{ + "host": "localhost", + "cpu": "cpu0", + "datacenter": "us-west-2", + } + fields := map[string]interface{}{ + "usage_idle": float64(91.5), + "usage_busy": float64(8.5), + } + m1, err := metric.New("cpu", tags, fields, now) + m2, err := metric.New("new_cpu", tags, fields, now) + assert.NoError(t, err) + + templates, defaultTemplate, err := InitGraphiteTemplates([]string{ + "cp* tags.measurement.host.field", + "new_cpu tags.host.measurement.field", + }) + assert.NoError(t, err) + assert.Equal(t, defaultTemplate, "") + + s := GraphiteSerializer{ + Templates: templates, + } + + buf, _ := s.Serialize(m1) + buf2, _ := s.Serialize(m2) + + buf = append(buf, buf2...) + + mS := strings.Split(strings.TrimSpace(string(buf)), "\n") + assert.NoError(t, err) + + expS := []string{ + fmt.Sprintf("cpu0.us-west-2.cpu.localhost.usage_idle 91.5 %d", now.Unix()), + fmt.Sprintf("cpu0.us-west-2.cpu.localhost.usage_busy 8.5 %d", now.Unix()), + fmt.Sprintf("cpu0.us-west-2.localhost.new_cpu.usage_idle 91.5 %d", now.Unix()), + fmt.Sprintf("cpu0.us-west-2.localhost.new_cpu.usage_busy 8.5 %d", now.Unix()), + } + sort.Strings(mS) + sort.Strings(expS) + assert.Equal(t, expS, mS) +} + +func TestSerializeMetricHostWithMultipleTemplatesWithDefault(t *testing.T) { + now := time.Now() + tags := map[string]string{ + "host": "localhost", + "cpu": "cpu0", + "datacenter": "us-west-2", + } + fields := map[string]interface{}{ + "usage_idle": float64(91.5), + "usage_busy": float64(8.5), + } + m1, err := metric.New("cpu", tags, fields, now) + m2, err := metric.New("new_cpu", tags, fields, now) + assert.NoError(t, err) + + templates, defaultTemplate, err := InitGraphiteTemplates([]string{ + "cp* tags.measurement.host.field", + "tags.host.measurement.field", + }) + assert.NoError(t, err) + assert.Equal(t, defaultTemplate, "tags.host.measurement.field") + + s := GraphiteSerializer{ + Templates: templates, + Template: defaultTemplate, + } + + buf, _ := s.Serialize(m1) + buf2, _ := s.Serialize(m2) + + buf = append(buf, buf2...) + + mS := strings.Split(strings.TrimSpace(string(buf)), "\n") + assert.NoError(t, err) + + expS := []string{ + fmt.Sprintf("cpu0.us-west-2.cpu.localhost.usage_idle 91.5 %d", now.Unix()), + fmt.Sprintf("cpu0.us-west-2.cpu.localhost.usage_busy 8.5 %d", now.Unix()), + fmt.Sprintf("cpu0.us-west-2.localhost.new_cpu.usage_idle 91.5 %d", now.Unix()), + fmt.Sprintf("cpu0.us-west-2.localhost.new_cpu.usage_busy 8.5 %d", now.Unix()), + } + sort.Strings(mS) + sort.Strings(expS) + assert.Equal(t, expS, mS) +} + func TestSerializeMetricHostWithTagSupport(t *testing.T) { now := time.Now() tags := map[string]string{ diff --git a/plugins/serializers/registry.go b/plugins/serializers/registry.go index dc9859e34..17de980fd 100644 --- a/plugins/serializers/registry.go +++ b/plugins/serializers/registry.go @@ -68,6 +68,9 @@ type Config struct { // only supports Graphite Template string `toml:"template"` + // Templates same Template, but multiple + Templates []string `toml:"templates"` + // Timestamp units to use for JSON formatted output TimestampUnits time.Duration `toml:"timestamp_units"` @@ -104,7 +107,7 @@ func NewSerializer(config *Config) (Serializer, error) { case "influx": serializer, err = NewInfluxSerializerConfig(config) case "graphite": - serializer, err = NewGraphiteSerializer(config.Prefix, config.Template, config.GraphiteTagSupport) + serializer, err = NewGraphiteSerializer(config.Prefix, config.Template, config.GraphiteTagSupport, config.Templates) case "json": serializer, err = NewJsonSerializer(config.TimestampUnits) case "splunkmetric": @@ -188,10 +191,21 @@ func NewInfluxSerializer() (Serializer, error) { return influx.NewSerializer(), nil } -func NewGraphiteSerializer(prefix, template string, tag_support bool) (Serializer, error) { +func NewGraphiteSerializer(prefix, template string, tag_support bool, templates []string) (Serializer, error) { + graphiteTemplates, defaultTemplate, err := graphite.InitGraphiteTemplates(templates) + + if err != nil { + return nil, err + } + + if defaultTemplate != "" { + template = defaultTemplate + } + return &graphite.GraphiteSerializer{ Prefix: prefix, Template: template, TagSupport: tag_support, + Templates: graphiteTemplates, }, nil } From 8ff264555085b989be92368e36c0967e970c7721 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 31 Mar 2020 11:31:09 -0700 Subject: [PATCH 1664/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 011f060c6..5fc504056 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,7 @@ - [#7140](https://github.com/influxdata/telegraf/pull/7140): Apply ping deadline to dns lookup. - [#7225](https://github.com/influxdata/telegraf/pull/7225): Add support for 64-bit integer types to modbus input. - [#7231](https://github.com/influxdata/telegraf/pull/7231): Add possibility to specify measurement per register. +- [#7136](https://github.com/influxdata/telegraf/pull/7136): Support multiple templates for graphite serializers. ## v1.14 [2020-03-26] From ccefc4271172a79a04cf9c1277e7a4c6e0dd6338 Mon Sep 17 00:00:00 2001 From: denzilribeiro Date: Tue, 31 Mar 2020 14:54:22 -0500 Subject: [PATCH 1665/1815] Add OPTION RECOMPILE for perf reasons due to temp table (#7242) --- plugins/inputs/sqlserver/sqlserver.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/plugins/inputs/sqlserver/sqlserver.go b/plugins/inputs/sqlserver/sqlserver.go index c69db611a..0cd1453ab 100644 --- a/plugins/inputs/sqlserver/sqlserver.go +++ b/plugins/inputs/sqlserver/sqlserver.go @@ -790,7 +790,8 @@ FROM @PCounters AS pc AND pc.object_name = pc1.object_name AND pc.instance_name = pc1.instance_name AND pc1.counter_name LIKE '%base' -WHERE pc.counter_name NOT LIKE '% base'; +WHERE pc.counter_name NOT LIKE '% base' +OPTION(RECOMPILE); ` // Conditional check based on Azure SQL DB v/s the rest aka (Azure SQL Managed instance OR On-prem SQL Server) From c083c1f64d05d72bdacfe03847f2831d224a4ee0 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 31 Mar 2020 12:56:53 -0700 Subject: [PATCH 1666/1815] Update changelog --- CHANGELOG.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5fc504056..838f28075 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,6 +10,12 @@ - [#7231](https://github.com/influxdata/telegraf/pull/7231): Add possibility to specify measurement per register. - [#7136](https://github.com/influxdata/telegraf/pull/7136): Support multiple templates for graphite serializers. +## v1.14.1 [unreleased] + +#### Bugfixes + +- [#7236](https://github.com/influxdata/telegraf/pull/7236): Fix PerformanceCounter query performance degradation in sqlserver input. + ## v1.14 [2020-03-26] #### Release Notes From 6445e775cfb7f11954cadae12e0b082959d2e295 Mon Sep 17 00:00:00 2001 From: kelseiv <47797004+kelseiv@users.noreply.github.com> Date: Thu, 2 Apr 2020 18:24:10 -0700 Subject: [PATCH 1667/1815] Document kapacitor_alert and kapacitor_cluster measurements (#7278) --- plugins/inputs/kapacitor/README.md | 40 ++++++++++++++++++++++++++++-- 1 file changed, 38 insertions(+), 2 deletions(-) diff --git a/plugins/inputs/kapacitor/README.md b/plugins/inputs/kapacitor/README.md index 8a6f3477f..6284e6d77 100644 --- a/plugins/inputs/kapacitor/README.md +++ b/plugins/inputs/kapacitor/README.md @@ -1,6 +1,6 @@ # Kapacitor Plugin -The Kapacitor plugin will collect metrics from the given Kapacitor instances. +The Kapacitor plugin collects metrics from the given Kapacitor instances. ### Configuration: @@ -23,12 +23,20 @@ The Kapacitor plugin will collect metrics from the given Kapacitor instances. # insecure_skip_verify = false ``` -### Measurements & Fields +### Measurements and fields - [kapacitor](#kapacitor) - [num_enabled_tasks](#num_enabled_tasks) _(integer)_ - [num_subscriptions](#num_subscriptions) _(integer)_ - [num_tasks](#num_tasks) _(integer)_ +- [kapacitor_alert](#kapacitor_alert) + - [notification_dropped](#notification_dropped) _(integer)_ + - [primary-handle-count](#primary-handle-count) _(integer)_ + - [secondary-handle-count](#secondary-handle-count) _(integer)_ +- (Kapacitor Enterprise only) [kapacitor_cluster](#kapacitor_cluster) _(integer)_ + - [dropped_member_events](#dropped_member_events) _(integer)_ + - [dropped_user_events](#dropped_user_events) _(integer)_ + - [query_handler_errors](#query_handler_errors) _(integer)_ - [kapacitor_edges](#kapacitor_edges) - [collected](#collected) _(integer)_ - [emitted](#emitted) _(integer)_ @@ -96,6 +104,34 @@ The total number of Kapacitor tasks. --- +### kapacitor_alert +The `kapacitor_alert` measurement stores fields with information related to +[Kapacitor alerts](https://docs.influxdata.com/kapacitor/v1.5/working/alerts/). + +#### notification-dropped +The number of internal notifications dropped because they arrive too late from another Kapacitor node. +If this count is increasing, Kapacitor Enterprise nodes aren't able to communicate fast enough +to keep up with the volume of alerts. + +#### primary-handle-count +The number of times this node handled an alert as the primary. This count should increase under normal conditions. + +#### secondary-handle-count +The number of times this node handled an alert as the secondary. An increase in this counter indicates that the primary is failing to handle alerts in a timely manner. + +--- + +### kapacitor_cluster +The `kapacitor_cluster` measurement reflects the ability of [Kapacitor nodes to communicate](https://docs.influxdata.com/enterprise_kapacitor/v1.5/administration/configuration/#cluster-communications) with one another. Specifically, these metrics track the gossip communication between the Kapacitor nodes. + +#### dropped_member_events +The number of gossip member events that were dropped. + +#### dropped_user_events +The number of gossip user events that were dropped. + +--- + ### kapacitor_edges The `kapacitor_edges` measurement stores fields with information related to [edges](https://docs.influxdata.com/kapacitor/latest/tick/introduction/#pipelines) From fb0fee0fbb00583eb5c0b5a8b53ebd9e3f4b276c Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 3 Apr 2020 10:09:24 -0700 Subject: [PATCH 1668/1815] Update permission docs on postfix input (#7255) --- plugins/inputs/postfix/README.md | 23 ++++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/plugins/inputs/postfix/README.md b/plugins/inputs/postfix/README.md index a8d4a7537..2fdfacd9d 100644 --- a/plugins/inputs/postfix/README.md +++ b/plugins/inputs/postfix/README.md @@ -2,7 +2,10 @@ The postfix plugin reports metrics on the postfix queues. -For each of the active, hold, incoming, maildrop, and deferred queues (http://www.postfix.org/QSHAPE_README.html#queues), it will report the queue length (number of items), size (bytes used by items), and age (age of oldest item in seconds). +For each of the active, hold, incoming, maildrop, and deferred queues +(http://www.postfix.org/QSHAPE_README.html#queues), it will report the queue +length (number of items), size (bytes used by items), and age (age of oldest +item in seconds). ### Configuration @@ -13,12 +16,15 @@ For each of the active, hold, incoming, maildrop, and deferred queues (http://ww # queue_directory = "/var/spool/postfix" ``` -#### Permissions: +#### Permissions Telegraf will need read access to the files in the queue directory. You may need to alter the permissions of these directories to provide access to the telegraf user. +This can be setup either using standard unix permissions or with Posix ACLs, +you will only need to use one method: + Unix permissions: ```sh $ sudo chgrp -R telegraf /var/spool/postfix/{active,hold,incoming,deferred} @@ -29,21 +35,20 @@ $ sudo chmod g+r /var/spool/postfix/maildrop Posix ACL: ```sh -$ sudo setfacl -m g:telegraf:rX /var/spool/postfix/{,active,hold,incoming,deferred,maildrop} -$ sudo setfacl -Rdm g:telegraf:rX /var/spool/postfix/{,active,hold,incoming,deferred,maildrop} +$ sudo setfacl -Rm g:telegraf:rX /var/spool/postfix/ +$ sudo setfacl -dm g:telegraf:rX /var/spool/postfix/ ``` -### Measurements & Fields: +### Metrics - postfix_queue + - tags: + - queue + - fields: - length (integer) - size (integer, bytes) - age (integer, seconds) -### Tags: - -- postfix_queue - - queue ### Example Output From 71a67ef22742e7ad4aacf2a8be20718bc0198355 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 3 Apr 2020 10:11:41 -0700 Subject: [PATCH 1669/1815] Improve documentation for the Metric interface (#7256) --- metric.go | 66 ++++++++++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 61 insertions(+), 5 deletions(-) diff --git a/metric.go b/metric.go index 396321e6e..1b7dfb6b2 100644 --- a/metric.go +++ b/metric.go @@ -17,43 +17,93 @@ const ( Histogram ) +// Tag represents a single tag key and value. type Tag struct { Key string Value string } +// Field represents a single field key and value. type Field struct { Key string Value interface{} } +// Metric is the type of data that is processed by Telegraf. Input plugins, +// and to a lesser degree, Processor and Aggregator plugins create new Metrics +// and Output plugins write them. type Metric interface { - // Getting data structure functions + // Name is the primary identifier for the Metric and corresponds to the + // measurement in the InfluxDB data model. Name() string + + // Tags returns the tags as a map. This method is deprecated, use TagList instead. Tags() map[string]string + + // TagList returns the tags as a slice ordered by the tag key in lexical + // bytewise ascending order. The returned value should not be modified, + // use the AddTag or RemoveTag methods instead. TagList() []*Tag + + // Fields returns the fields as a map. This method is deprecated, use FieldList instead. Fields() map[string]interface{} + + // FieldList returns the fields as a slice in an undefined order. The + // returned value should not be modified, use the AddField or RemoveField + // methods instead. FieldList() []*Field + + // Time returns the timestamp of the metric. Time() time.Time + + // Type returns a general type for the entire metric that describes how you + // might interprete, aggregate the values. + // + // This method may be removed in the future and its use is discouraged. Type() ValueType - // Name functions + // SetName sets the metric name. SetName(name string) + + // AddPrefix adds a string to the front of the metric name. It is + // equivalent to m.SetName(prefix + m.Name()). + // + // This method is deprecated, use SetName instead. AddPrefix(prefix string) + + // AddSuffix appends a string to the back of the metric name. It is + // equivalent to m.SetName(m.Name() + suffix). + // + // This method is deprecated, use SetName instead. AddSuffix(suffix string) - // Tag functions + // GetTag returns the value of a tag and a boolean to indicate if it was set. GetTag(key string) (string, bool) + + // HasTag returns true if the tag is set on the Metric. HasTag(key string) bool + + // AddTag sets the tag on the Metric. If the Metric already has the tag + // set then the current value is replaced. AddTag(key, value string) + + // RemoveTag removes the tag if it is set. RemoveTag(key string) - // Field functions + // GetField returns the value of a field and a boolean to indicate if it was set. GetField(key string) (interface{}, bool) + + // HasField returns true if the field is set on the Metric. HasField(key string) bool + + // AddField sets the field on the Metric. If the Metric already has the field + // set then the current value is replaced. AddField(key string, value interface{}) + + // RemoveField removes the tag if it is set. RemoveField(key string) + // SetTime sets the timestamp of the Metric. SetTime(t time.Time) // HashID returns an unique identifier for the series. @@ -73,7 +123,13 @@ type Metric interface { // to any output. Drop() - // Mark Metric as an aggregate + // SetAggregate indicates the metric is an aggregated value. + // + // This method may be removed in the future and its use is discouraged. SetAggregate(bool) + + // IsAggregate returns true if the Metric is an aggregate. + // + // This method may be removed in the future and its use is discouraged. IsAggregate() bool } From 73911d00f11f22148d7d8bca653bbbfef4b45b67 Mon Sep 17 00:00:00 2001 From: Russ Savage Date: Fri, 3 Apr 2020 12:52:23 -0700 Subject: [PATCH 1670/1815] Add series cardinality warning to sflow readme (#7285) --- plugins/inputs/sflow/README.md | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/plugins/inputs/sflow/README.md b/plugins/inputs/sflow/README.md index 07cd2024b..73bbcb1e0 100644 --- a/plugins/inputs/sflow/README.md +++ b/plugins/inputs/sflow/README.md @@ -6,6 +6,20 @@ accordance with the specification from [sflow.org](https://sflow.org/). Currently only Flow Samples of Ethernet / IPv4 & IPv4 TCP & UDP headers are turned into metrics. Counters and other header samples are ignored. +#### Series Cardinality Warning + +This plugin may produce a high number of series which, when not controlled +for, will cause high load on your database. Use the following techniques to +avoid cardinality issues: + +- Use [metric filtering][] options to exclude unneeded measurements and tags. +- Write to a database with an appropriate [retention policy][]. +- Limit series cardinality in your database using the + [max-series-per-database][] and [max-values-per-tag][] settings. +- Consider using the [Time Series Index][tsi]. +- Monitor your databases [series cardinality][]. +- Consult the [InfluxDB documentation][influx-docs] for the most up-to-date techniques. + ### Configuration ```toml @@ -90,3 +104,11 @@ $ sudo tcpdump -s 0 -i eth0 -w telegraf-sflow.pcap host 127.0.0.1 and port 6343 ``` sflow,agent_address=0.0.0.0,dst_ip=10.0.0.2,dst_mac=ff:ff:ff:ff:ff:ff,dst_port=40042,ether_type=IPv4,header_protocol=ETHERNET-ISO88023,input_ifindex=6,ip_dscp=27,ip_ecn=0,output_ifindex=1073741823,source_id_index=3,source_id_type=0,src_ip=10.0.0.1,src_mac=ff:ff:ff:ff:ff:ff,src_port=443 bytes=1570i,drops=0i,frame_length=157i,header_length=128i,ip_flags=2i,ip_fragment_offset=0i,ip_total_length=139i,ip_ttl=42i,sampling_rate=10i,tcp_header_length=0i,tcp_urgent_pointer=0i,tcp_window_size=14i 1584473704793580447 ``` + +[metric filtering]: https://github.com/influxdata/telegraf/blob/master/docs/CONFIGURATION.md#metric-filtering +[retention policy]: https://docs.influxdata.com/influxdb/latest/guides/downsampling_and_retention/ +[max-series-per-database]: https://docs.influxdata.com/influxdb/latest/administration/config/#max-series-per-database-1000000 +[max-values-per-tag]: https://docs.influxdata.com/influxdb/latest/administration/config/#max-values-per-tag-100000 +[tsi]: https://docs.influxdata.com/influxdb/latest/concepts/time-series-index/ +[series cardinality]: https://docs.influxdata.com/influxdb/latest/query_language/spec/#show-cardinality +[influx-docs]: https://docs.influxdata.com/influxdb/latest/ From c56596dec2c080dc1ab601a563ed3f470f0615df Mon Sep 17 00:00:00 2001 From: Giovanni Luisotto Date: Mon, 6 Apr 2020 15:53:54 +0000 Subject: [PATCH 1671/1815] Sql Server - Disk Space Measurement (#7214) --- plugins/inputs/sqlserver/README.md | 29 ++++-- plugins/inputs/sqlserver/sqlserver.go | 111 ++++++++++++++------- plugins/inputs/sqlserver/sqlserver_test.go | 4 +- 3 files changed, 96 insertions(+), 48 deletions(-) diff --git a/plugins/inputs/sqlserver/README.md b/plugins/inputs/sqlserver/README.md index 23922c169..9d55955d1 100644 --- a/plugins/inputs/sqlserver/README.md +++ b/plugins/inputs/sqlserver/README.md @@ -48,27 +48,35 @@ GO ## Optional parameter, setting this to 2 will use a new version ## of the collection queries that break compatibility with the original - ## dashboards. All new functionality is under V2 + ## dashboards. + ## Version 2 - is compatible from SQL Server 2012 and later versions and also for SQL Azure DB query_version = 2 ## If you are using AzureDB, setting this to true will gather resource utilization metrics - # azuredb = true + # azuredb = false - ## Possible queries: + ## Possible queries + ## Version 2: ## - PerformanceCounters ## - WaitStatsCategorized ## - DatabaseIO - ## - DatabaseProperties + ## - ServerProperties + ## - MemoryClerk + ## - Schedulers + ## - SqlRequests + ## - VolumeSpace + ## Version 1: + ## - PerformanceCounters + ## - WaitStatsCategorized ## - CPUHistory + ## - DatabaseIO ## - DatabaseSize ## - DatabaseStats + ## - DatabaseProperties ## - MemoryClerk ## - VolumeSpace - ## - Schedulers - ## - AzureDBResourceStats - ## - AzureDBResourceGovernance - ## - SqlRequests - ## - ServerProperties + ## - PerformanceMetrics + ## A list of queries to include. If not specified, all the above listed queries are used. # include_query = [] @@ -79,7 +87,7 @@ GO ### Metrics: To provide backwards compatibility, this plugin support two versions of metrics queries. -**Note**: Version 2 queries are not backwards compatible with the old queries. Any dashboards or queries based on the old query format will not work with the new format. The version 2 queries are written in such a way as to only gather SQL specific metrics (no disk space or overall CPU related metrics) and they only report raw metrics, no math has been done to calculate deltas. To graph this data you must calculate deltas in your dashboarding software. +**Note**: Version 2 queries are not backwards compatible with the old queries. Any dashboards or queries based on the old query format will not work with the new format. The version 2 queries are written in such a way as to only gather SQL specific metrics (no overall CPU related metrics) and they only report raw metrics, no math has been done to calculate deltas. To graph this data you must calculate deltas in your dashboarding software. #### Version 1 (deprecated in 1.6): The original metrics queries provide: @@ -115,6 +123,7 @@ The new (version 2) metrics provide: - *SqlRequests* - This captures a snapshot of dm_exec_requests and dm_exec_sessions that gives you running requests as well as wait types and blocking sessions. +- *VolumeSpace* - uses sys.dm_os_volume_stats to get total, used and occupied space on every disk that contains a data or log file. (Note that even if enabled it won't get any data from Azure SQL Database or SQL Managed Instance). It is pointless to run this with high frequency (ie: every 10s), but it won't cause any problem. In order to allow tracking on a per statement basis this query produces a unique tag for each query. Depending on the database workload, this may diff --git a/plugins/inputs/sqlserver/sqlserver.go b/plugins/inputs/sqlserver/sqlserver.go index 0cd1453ab..ec147e4fe 100644 --- a/plugins/inputs/sqlserver/sqlserver.go +++ b/plugins/inputs/sqlserver/sqlserver.go @@ -35,46 +35,53 @@ type MapQuery map[string]Query const defaultServer = "Server=.;app name=telegraf;log=1;" const sampleConfig = ` - ## Specify instances to monitor with a list of connection strings. - ## All connection parameters are optional. - ## By default, the host is localhost, listening on default port, TCP 1433. - ## for Windows, the user is the currently running AD user (SSO). - ## See https://github.com/denisenkom/go-mssqldb for detailed connection - ## parameters, in particular, tls connections can be created like so: - ## "encrypt=true;certificate=;hostNameInCertificate=" - # servers = [ - # "Server=192.168.1.10;Port=1433;User Id=;Password=;app name=telegraf;log=1;", - # ] +## Specify instances to monitor with a list of connection strings. +## All connection parameters are optional. +## By default, the host is localhost, listening on default port, TCP 1433. +## for Windows, the user is the currently running AD user (SSO). +## See https://github.com/denisenkom/go-mssqldb for detailed connection +## parameters, in particular, tls connections can be created like so: +## "encrypt=true;certificate=;hostNameInCertificate=" +# servers = [ +# "Server=192.168.1.10;Port=1433;User Id=;Password=;app name=telegraf;log=1;", +# ] - ## Optional parameter, setting this to 2 will use a new version - ## of the collection queries that break compatibility with the original - ## dashboards. - query_version = 2 +## Optional parameter, setting this to 2 will use a new version +## of the collection queries that break compatibility with the original +## dashboards. +## Version 2 - is compatible from SQL Server 2012 and later versions and also for SQL Azure DB +query_version = 2 - ## If you are using AzureDB, setting this to true will gather resource utilization metrics - # azuredb = false +## If you are using AzureDB, setting this to true will gather resource utilization metrics +# azuredb = false - ## Possible queries: - ## - PerformanceCounters - ## - WaitStatsCategorized - ## - DatabaseIO - ## - DatabaseProperties - ## - CPUHistory - ## - DatabaseSize - ## - DatabaseStats - ## - MemoryClerk - ## - VolumeSpace - ## - PerformanceMetrics - ## - Schedulers - ## - AzureDBResourceStats - ## - AzureDBResourceGovernance - ## - SqlRequests - ## - ServerProperties - ## A list of queries to include. If not specified, all the above listed queries are used. - # include_query = [] +## Possible queries +## Version 2: +## - PerformanceCounters +## - WaitStatsCategorized +## - DatabaseIO +## - ServerProperties +## - MemoryClerk +## - Schedulers +## - SqlRequests +## - VolumeSpace +## Version 1: +## - PerformanceCounters +## - WaitStatsCategorized +## - CPUHistory +## - DatabaseIO +## - DatabaseSize +## - DatabaseStats +## - DatabaseProperties +## - MemoryClerk +## - VolumeSpace +## - PerformanceMetrics - ## A list of queries to explicitly ignore. - exclude_query = [ 'Schedulers' , 'SqlRequests'] +## A list of queries to include. If not specified, all the above listed queries are used. +# include_query = [] + +## A list of queries to explicitly ignore. +exclude_query = [ 'Schedulers' , 'SqlRequests'] ` // SampleConfig return the sample configuration @@ -109,6 +116,7 @@ func initQueries(s *SQLServer) error { queries["MemoryClerk"] = Query{Script: sqlMemoryClerkV2, ResultByRow: false} queries["Schedulers"] = Query{Script: sqlServerSchedulersV2, ResultByRow: false} queries["SqlRequests"] = Query{Script: sqlServerRequestsV2, ResultByRow: false} + queries["VolumeSpace"] = Query{Script: sqlServerVolumeSpaceV2, ResultByRow: false} } else { queries["PerformanceCounters"] = Query{Script: sqlPerformanceCounters, ResultByRow: true} queries["WaitStatsCategorized"] = Query{Script: sqlWaitStatsCategorized, ResultByRow: false} @@ -1554,6 +1562,37 @@ SELECT ` +const sqlServerVolumeSpaceV2 string = ` +/* Only for on-prem version of SQL Server +Gets data about disk space, only if the disk is used by SQL Server +EngineEdition: +1 = Personal or Desktop Engine +2 = Standard +3 = Enterprise +4 = Express +5 = SQL Database +6 = SQL Data Warehouse +8 = Managed Instance +*/ +IF SERVERPROPERTY('EngineEdition') NOT IN (5,8) + BEGIN + SELECT DISTINCT + 'sqlserver_disk_space' AS [measurement] + ,SERVERPROPERTY('machinename') AS [server_name] + ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance] + ,IIF( RIGHT(vs.[volume_mount_point],1) = '\' /*Tag value cannot end with \ */ + ,LEFT(vs.[volume_mount_point],LEN(vs.[volume_mount_point])-1) + ,vs.[volume_mount_point] + ) AS [volume_mount_point] + ,vs.[total_bytes] AS [total_space_bytes] + ,vs.[available_bytes] AS [available_space_bytes] + ,vs.[total_bytes] - vs.[available_bytes] AS [used_space_bytes] + FROM + sys.master_files as mf + CROSS APPLY sys.dm_os_volume_stats(mf.database_id, mf.file_id) as vs + END +` + // Queries V1 const sqlPerformanceMetrics string = `SET DEADLOCK_PRIORITY -10; SET NOCOUNT ON; diff --git a/plugins/inputs/sqlserver/sqlserver_test.go b/plugins/inputs/sqlserver/sqlserver_test.go index 8097b070b..ea638ef20 100644 --- a/plugins/inputs/sqlserver/sqlserver_test.go +++ b/plugins/inputs/sqlserver/sqlserver_test.go @@ -16,13 +16,13 @@ func TestSqlServer_QueriesInclusionExclusion(t *testing.T) { cases := []map[string]interface{}{ { "IncludeQuery": []string{}, - "ExcludeQuery": []string{"WaitStatsCategorized", "DatabaseIO", "ServerProperties", "MemoryClerk", "Schedulers"}, + "ExcludeQuery": []string{"WaitStatsCategorized", "DatabaseIO", "ServerProperties", "MemoryClerk", "Schedulers", "VolumeSpace"}, "queries": []string{"PerformanceCounters", "SqlRequests"}, "queriesTotal": 2, }, { "IncludeQuery": []string{"PerformanceCounters", "SqlRequests"}, - "ExcludeQuery": []string{"SqlRequests", "WaitStatsCategorized", "DatabaseIO"}, + "ExcludeQuery": []string{"SqlRequests", "WaitStatsCategorized", "DatabaseIO", "VolumeSpace"}, "queries": []string{"PerformanceCounters"}, "queriesTotal": 1, }, From df145c7e564c8ba0d4722a7a5ab96eb36834e1e0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9s=20=C3=81lvarez?= <1671935+kir4h@users.noreply.github.com> Date: Mon, 6 Apr 2020 22:21:01 +0200 Subject: [PATCH 1672/1815] Fix export timestamp not working for prometheus on v2 (#7289) --- .../prometheus_client/prometheus_client.go | 2 +- .../prometheus_client_v2_test.go | 28 +++++++++++++++++++ .../outputs/prometheus_client/v2/collector.go | 7 ++++- 3 files changed, 35 insertions(+), 2 deletions(-) diff --git a/plugins/outputs/prometheus_client/prometheus_client.go b/plugins/outputs/prometheus_client/prometheus_client.go index c28ec54ec..57cb1a510 100644 --- a/plugins/outputs/prometheus_client/prometheus_client.go +++ b/plugins/outputs/prometheus_client/prometheus_client.go @@ -139,7 +139,7 @@ func (p *PrometheusClient) Init() error { return err } case 2: - p.collector = v2.NewCollector(p.ExpirationInterval.Duration, p.StringAsLabel) + p.collector = v2.NewCollector(p.ExpirationInterval.Duration, p.StringAsLabel, p.ExportTimestamp) err := registry.Register(p.collector) if err != nil { return err diff --git a/plugins/outputs/prometheus_client/prometheus_client_v2_test.go b/plugins/outputs/prometheus_client/prometheus_client_v2_test.go index 755bd5dc4..3404ab2ed 100644 --- a/plugins/outputs/prometheus_client/prometheus_client_v2_test.go +++ b/plugins/outputs/prometheus_client/prometheus_client_v2_test.go @@ -48,6 +48,34 @@ func TestMetricVersion2(t *testing.T) { # HELP cpu_time_idle Telegraf collected metric # TYPE cpu_time_idle untyped cpu_time_idle{host="example.org"} 42 +`), + }, + { + name: "when export timestamp is true timestamp is present in the metric", + output: &PrometheusClient{ + Listen: ":0", + MetricVersion: 2, + CollectorsExclude: []string{"gocollector", "process"}, + Path: "/metrics", + ExportTimestamp: true, + Log: Logger, + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{ + "host": "example.org", + }, + map[string]interface{}{ + "time_idle": 42.0, + }, + time.Unix(0, 0), + ), + }, + expected: []byte(` +# HELP cpu_time_idle Telegraf collected metric +# TYPE cpu_time_idle untyped +cpu_time_idle{host="example.org"} 42 0 `), }, { diff --git a/plugins/outputs/prometheus_client/v2/collector.go b/plugins/outputs/prometheus_client/v2/collector.go index 4f8efd839..b28a4deab 100644 --- a/plugins/outputs/prometheus_client/v2/collector.go +++ b/plugins/outputs/prometheus_client/v2/collector.go @@ -43,11 +43,16 @@ type Collector struct { coll *serializer.Collection } -func NewCollector(expire time.Duration, stringsAsLabel bool) *Collector { +func NewCollector(expire time.Duration, stringsAsLabel bool, exportTimestamp bool) *Collector { config := serializer.FormatConfig{} if stringsAsLabel { config.StringHandling = serializer.StringAsLabel } + + if exportTimestamp { + config.TimestampExport = serializer.ExportTimestamp + } + return &Collector{ expireDuration: expire, coll: serializer.NewCollection(config), From cc6c77f3012f2407780bd037a36dbaf9c5cac5a2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9s=20=C3=81lvarez?= <1671935+kir4h@users.noreply.github.com> Date: Thu, 9 Apr 2020 20:27:59 +0200 Subject: [PATCH 1673/1815] Deploy telegraf configuration as a "non config" file (#7250) --- etc/telegraf.conf | 2 +- scripts/build.py | 108 +++++++++++++++++++++++++--------------- scripts/post-install.sh | 5 ++ 3 files changed, 74 insertions(+), 41 deletions(-) diff --git a/etc/telegraf.conf b/etc/telegraf.conf index 52f3b7fe7..de9dd6f2d 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -3980,7 +3980,7 @@ # # timeout = "5ms" -# # A plugin to collect stats from Opensmtpd - a validating, recursive, and caching DNS resolver +# # A plugin to collect stats from Opensmtpd - a validating, recursive, and caching DNS resolver # [[inputs.opensmtpd]] # ## If running as a restricted user you can prepend sudo for additional access: # #use_sudo = false diff --git a/scripts/build.py b/scripts/build.py index 7b3601bb5..e3e791a1d 100755 --- a/scripts/build.py +++ b/scripts/build.py @@ -3,7 +3,6 @@ import sys import os import subprocess -import time from datetime import datetime import shutil import tempfile @@ -53,9 +52,9 @@ VENDOR = "InfluxData" DESCRIPTION = "Plugin-driven server agent for reporting metrics into InfluxDB." # SCRIPT START -prereqs = [ 'git', 'go' ] +prereqs = ['git', 'go'] go_vet_command = "go tool vet -composites=true ./" -optional_prereqs = [ 'gvm', 'fpm', 'rpmbuild' ] +optional_prereqs = ['gvm', 'fpm', 'rpmbuild'] fpm_common_args = "-f -s dir --log error \ --vendor {} \ @@ -74,7 +73,7 @@ fpm_common_args = "-f -s dir --log error \ PACKAGE_URL, PACKAGE_LICENSE, MAINTAINER, - CONFIG_DIR + '/telegraf.conf', + CONFIG_DIR + '/telegraf.conf.sample', LOGROTATE_DIR + '/telegraf', POSTINST_SCRIPT, PREINST_SCRIPT, @@ -84,21 +83,21 @@ fpm_common_args = "-f -s dir --log error \ DESCRIPTION) targets = { - 'telegraf' : './cmd/telegraf', + 'telegraf': './cmd/telegraf', } supported_builds = { - 'darwin': [ "amd64" ], - "windows": [ "amd64", "i386" ], - "linux": [ "amd64", "i386", "armhf", "armel", "arm64", "static_amd64", "s390x", "mipsel", "mips"], - "freebsd": [ "amd64", "i386" ] + 'darwin': ["amd64"], + "windows": ["amd64", "i386"], + "linux": ["amd64", "i386", "armhf", "armel", "arm64", "static_amd64", "s390x", "mipsel", "mips"], + "freebsd": ["amd64", "i386"] } supported_packages = { - "darwin": [ "tar" ], - "linux": [ "deb", "rpm", "tar" ], - "windows": [ "zip" ], - "freebsd": [ "tar" ] + "darwin": ["tar"], + "linux": ["deb", "rpm", "tar"], + "windows": ["zip"], + "freebsd": ["tar"] } next_version = '1.15.0' @@ -107,6 +106,7 @@ next_version = '1.15.0' #### Telegraf Functions ################ + def print_banner(): logging.info(""" _____ _ __ @@ -118,17 +118,19 @@ def print_banner(): Build Script """) + def create_package_fs(build_root): """Create a filesystem structure to mimic the package filesystem. """ logging.debug("Creating a filesystem hierarchy from directory: {}".format(build_root)) # Using [1:] for the path names due to them being absolute # (will overwrite previous paths, per 'os.path.join' documentation) - dirs = [ INSTALL_ROOT_DIR[1:], LOG_DIR[1:], SCRIPT_DIR[1:], CONFIG_DIR[1:], LOGROTATE_DIR[1:], CONFIG_DIR_D[1:] ] + dirs = [INSTALL_ROOT_DIR[1:], LOG_DIR[1:], SCRIPT_DIR[1:], CONFIG_DIR[1:], LOGROTATE_DIR[1:], CONFIG_DIR_D[1:]] for d in dirs: os.makedirs(os.path.join(build_root, d)) os.chmod(os.path.join(build_root, d), 0o755) + def package_scripts(build_root, config_only=False, windows=False): """Copy the necessary scripts and configuration files to the package filesystem. @@ -136,10 +138,10 @@ def package_scripts(build_root, config_only=False, windows=False): if config_only or windows: logging.info("Copying configuration to build directory") if windows: - shutil.copyfile(DEFAULT_WINDOWS_CONFIG, os.path.join(build_root, "telegraf.conf")) + shutil.copyfile(DEFAULT_WINDOWS_CONFIG, os.path.join(build_root, "telegraf.conf.sample")) else: - shutil.copyfile(DEFAULT_CONFIG, os.path.join(build_root, "telegraf.conf")) - os.chmod(os.path.join(build_root, "telegraf.conf"), 0o644) + shutil.copyfile(DEFAULT_CONFIG, os.path.join(build_root, "telegraf.conf.sample")) + os.chmod(os.path.join(build_root, "telegraf.conf.sample"), 0o644) else: logging.info("Copying scripts and configuration to build directory") shutil.copyfile(INIT_SCRIPT, os.path.join(build_root, SCRIPT_DIR[1:], INIT_SCRIPT.split('/')[1])) @@ -148,13 +150,15 @@ def package_scripts(build_root, config_only=False, windows=False): os.chmod(os.path.join(build_root, SCRIPT_DIR[1:], SYSTEMD_SCRIPT.split('/')[1]), 0o644) shutil.copyfile(LOGROTATE_SCRIPT, os.path.join(build_root, LOGROTATE_DIR[1:], "telegraf")) os.chmod(os.path.join(build_root, LOGROTATE_DIR[1:], "telegraf"), 0o644) - shutil.copyfile(DEFAULT_CONFIG, os.path.join(build_root, CONFIG_DIR[1:], "telegraf.conf")) - os.chmod(os.path.join(build_root, CONFIG_DIR[1:], "telegraf.conf"), 0o644) + shutil.copyfile(DEFAULT_CONFIG, os.path.join(build_root, CONFIG_DIR[1:], "telegraf.conf.sample")) + os.chmod(os.path.join(build_root, CONFIG_DIR[1:], "telegraf.conf.sample"), 0o644) + def run_generate(): # NOOP for Telegraf return True + def go_get(branch, update=False, no_uncommitted=False): """Retrieve build dependencies or restore pinned dependencies. """ @@ -165,10 +169,12 @@ def go_get(branch, update=False, no_uncommitted=False): run("go mod download") return True + def run_tests(race, parallel, timeout, no_vet): # Currently a NOOP for Telegraf return True + ################ #### All Telegraf-specific content above this line ################ @@ -187,14 +193,14 @@ def run(command, allow_failure=False, shell=False): # logging.debug("Command output: {}".format(out)) except subprocess.CalledProcessError as e: if allow_failure: - logging.warn("Command '{}' failed with error: {}".format(command, e.output)) + logging.warning("Command '{}' failed with error: {}".format(command, e.output)) return None else: logging.error("Command '{}' failed with error: {}".format(command, e.output)) sys.exit(1) except OSError as e: if allow_failure: - logging.warn("Command '{}' failed with error: {}".format(command, e)) + logging.warning("Command '{}' failed with error: {}".format(command, e)) return out else: logging.error("Command '{}' failed with error: {}".format(command, e)) @@ -202,7 +208,8 @@ def run(command, allow_failure=False, shell=False): else: return out -def create_temp_dir(prefix = None): + +def create_temp_dir(prefix=None): """ Create temporary directory with optional prefix. """ if prefix is None: @@ -210,13 +217,14 @@ def create_temp_dir(prefix = None): else: return tempfile.mkdtemp(prefix=prefix) + def increment_minor_version(version): """Return the version with the minor version incremented and patch version set to zero. """ ver_list = version.split('.') if len(ver_list) != 3: - logging.warn("Could not determine how to increment version '{}', will just use provided version.".format(version)) + logging.warning("Could not determine how to increment version '{}', will just use provided version.".format(version)) return version ver_list[1] = str(int(ver_list[1]) + 1) ver_list[2] = str(0) @@ -224,13 +232,15 @@ def increment_minor_version(version): logging.debug("Incremented version from '{}' to '{}'.".format(version, inc_version)) return inc_version + def get_current_version_tag(): """Retrieve the raw git version tag. """ version = run("git describe --exact-match --tags 2>/dev/null", - allow_failure=True, shell=True) + allow_failure=True, shell=True) return version + def get_current_version(): """Parse version information from git tag output. """ @@ -242,15 +252,15 @@ def get_current_version(): version_tag = version_tag[1:] # Replace any '-'/'_' with '~' if '-' in version_tag: - version_tag = version_tag.replace("-","~") + version_tag = version_tag.replace("-", "~") if '_' in version_tag: - version_tag = version_tag.replace("_","~") + version_tag = version_tag.replace("_", "~") return version_tag + def get_current_commit(short=False): """Retrieve the current git commit. """ - command = None if short: command = "git log --pretty=format:'%h' -n 1" else: @@ -258,6 +268,7 @@ def get_current_commit(short=False): out = run(command) return out.strip('\'\n\r ') + def get_current_branch(): """Retrieve the current git branch. """ @@ -265,6 +276,7 @@ def get_current_branch(): out = run(command) return out.strip() + def local_changes(): """Return True if there are local un-committed changes. """ @@ -273,6 +285,7 @@ def local_changes(): return True return False + def get_system_arch(): """Retrieve current system architecture. """ @@ -288,6 +301,7 @@ def get_system_arch(): arch = "arm" return arch + def get_system_platform(): """Retrieve current system platform. """ @@ -296,6 +310,7 @@ def get_system_platform(): else: return sys.platform + def get_go_version(): """Retrieve version information for Go. """ @@ -305,6 +320,7 @@ def get_go_version(): return matches.groups()[0].strip() return None + def check_path_for(b): """Check the the user's path for the provided binary. """ @@ -314,21 +330,23 @@ def check_path_for(b): for path in os.environ["PATH"].split(os.pathsep): path = path.strip('"') full_path = os.path.join(path, b) - if os.path.isfile(full_path) and os.access(full_path, os.X_OK): + if is_exe(full_path): return full_path -def check_environ(build_dir = None): + +def check_environ(build_dir=None): """Check environment for common Go variables. """ logging.info("Checking environment...") - for v in [ "GOPATH", "GOBIN", "GOROOT" ]: + for v in ["GOPATH", "GOBIN", "GOROOT"]: logging.debug("Using '{}' for {}".format(os.environ.get(v), v)) cwd = os.getcwd() if build_dir is None and os.environ.get("GOPATH") and os.environ.get("GOPATH") not in cwd: - logging.warn("Your current directory is not under your GOPATH. This may lead to build failures.") + logging.warning("Your current directory is not under your GOPATH. This may lead to build failures.") return True + def check_prereqs(): """Check user path for required dependencies. """ @@ -339,6 +357,7 @@ def check_prereqs(): return False return True + def upload_packages(packages, bucket_name=None, overwrite=False): """Upload provided package output to AWS S3. """ @@ -379,9 +398,10 @@ def upload_packages(packages, bucket_name=None, overwrite=False): n = k.set_contents_from_filename(p, replace=False) k.make_public() else: - logging.warn("Not uploading file {}, as it already exists in the target bucket.".format(name)) + logging.warning("Not uploading file {}, as it already exists in the target bucket.".format(name)) return True + def go_list(vendor=False, relative=False): """ Return a list of packages @@ -408,6 +428,7 @@ def go_list(vendor=False, relative=False): packages = relative_pkgs return packages + def build(version=None, platform=None, arch=None, @@ -415,10 +436,12 @@ def build(version=None, race=False, clean=False, outdir=".", - tags=[], + tags=None, static=False): """Build each target for the specified architecture and platform. """ + if tags is None: + tags = [] logging.info("Starting build for {}/{}...".format(platform, arch)) logging.info("Using Go version: {}".format(get_go_version())) logging.info("Using git branch: {}".format(get_current_branch())) @@ -502,6 +525,7 @@ def build(version=None, logging.info("Time taken: {}s".format((end_time - start_time).total_seconds())) return True + def generate_sha256_from_file(path): """Generate SHA256 hash signature based on the contents of the file at path. """ @@ -510,13 +534,14 @@ def generate_sha256_from_file(path): m.update(f.read()) return m.hexdigest() + def generate_sig_from_file(path): """Generate a detached GPG signature from the file at path. """ logging.debug("Generating GPG signature for file: {}".format(path)) gpg_path = check_path_for('gpg') if gpg_path is None: - logging.warn("gpg binary not found on path! Skipping signature creation.") + logging.warning("gpg binary not found on path! Skipping signature creation.") return False if os.environ.get("GNUPG_HOME") is not None: run('gpg --homedir {} --armor --yes --detach-sign {}'.format(os.environ.get("GNUPG_HOME"), path)) @@ -524,6 +549,7 @@ def generate_sig_from_file(path): run('gpg --armor --detach-sign --yes {}'.format(path)) return True + def package(build_output, pkg_name, version, nightly=False, iteration=1, static=False, release=False): """Package the output of the build process. """ @@ -659,7 +685,7 @@ def package(build_output, pkg_name, version, nightly=False, iteration=1, static= if matches is not None: outfile = matches.groups()[0] if outfile is None: - logging.warn("Could not determine output from packaging output!") + logging.warning("Could not determine output from packaging output!") else: if nightly: # Strip nightly version from package name @@ -677,6 +703,7 @@ def package(build_output, pkg_name, version, nightly=False, iteration=1, static= # Cleanup shutil.rmtree(tmp_build_dir) + def main(args): global PACKAGE_NAME @@ -736,7 +763,7 @@ def main(args): platforms = [args.platform] for platform in platforms: - build_output.update( { platform : {} } ) + build_output.update({platform: {}}) archs = [] if args.arch == "all": single_build = False @@ -758,7 +785,7 @@ def main(args): tags=args.build_tags, static=args.static): return 1 - build_output.get(platform).update( { arch : od } ) + build_output.get(platform).update({arch: od}) # Build packages if args.package: @@ -774,7 +801,7 @@ def main(args): release=args.release) if args.sign: logging.debug("Generating GPG signatures for packages: {}".format(packages)) - sigs = [] # retain signatures so they can be uploaded with packages + sigs = [] # retain signatures so they can be uploaded with packages for p in packages: if generate_sig_from_file(p): sigs.append(p + '.asc') @@ -799,6 +826,7 @@ def main(args): return 0 + if __name__ == '__main__': LOG_LEVEL = logging.INFO if '--debug' in sys.argv[1:]: @@ -808,7 +836,7 @@ if __name__ == '__main__': format=log_format) parser = argparse.ArgumentParser(description='InfluxDB build and packaging script.') - parser.add_argument('--verbose','-v','--debug', + parser.add_argument('--verbose', '-v', '--debug', action='store_true', help='Use debug output') parser.add_argument('--outdir', '-o', @@ -886,7 +914,7 @@ if __name__ == '__main__': parser.add_argument('--upload', action='store_true', help='Upload output packages to AWS S3') - parser.add_argument('--upload-overwrite','-w', + parser.add_argument('--upload-overwrite', '-w', action='store_true', help='Upload output packages to AWS S3') parser.add_argument('--bucket', diff --git a/scripts/post-install.sh b/scripts/post-install.sh index 50bef1081..0f197467e 100644 --- a/scripts/post-install.sh +++ b/scripts/post-install.sh @@ -43,6 +43,11 @@ if [[ ! -d /etc/telegraf/telegraf.d ]]; then mkdir -p /etc/telegraf/telegraf.d fi +# If 'telegraf.conf' is not present use package's sample (fresh install) +if [[ ! -f /etc/telegraf/telegraf.conf ]] && [[ -f /etc/telegraf/telegraf.conf.sample ]]; then + cp /etc/telegraf/telegraf.conf.sample /etc/telegraf/telegraf.conf +fi + # Distribution-specific logic if [[ -f /etc/redhat-release ]] || [[ -f /etc/SuSE-release ]]; then # RHEL-variant logic From 3dab8450405f63c8623c09f16efda24f8c525fb8 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 13 Apr 2020 10:57:48 -0700 Subject: [PATCH 1674/1815] Fix Name field in template processor (#7258) --- .../processors/template/template_metric.go | 7 ++-- plugins/processors/template/template_test.go | 37 +++++++++++++++++++ 2 files changed, 41 insertions(+), 3 deletions(-) diff --git a/plugins/processors/template/template_metric.go b/plugins/processors/template/template_metric.go index 47d86ec57..e4a81bd1c 100644 --- a/plugins/processors/template/template_metric.go +++ b/plugins/processors/template/template_metric.go @@ -1,16 +1,17 @@ package template import ( - "github.com/influxdata/telegraf" "time" + + "github.com/influxdata/telegraf" ) type TemplateMetric struct { metric telegraf.Metric } -func (m *TemplateMetric) Measurement() string { - return m.Measurement() +func (m *TemplateMetric) Name() string { + return m.metric.Name() } func (m *TemplateMetric) Tag(key string) string { diff --git a/plugins/processors/template/template_test.go b/plugins/processors/template/template_test.go index b8c195cda..f43d69795 100644 --- a/plugins/processors/template/template_test.go +++ b/plugins/processors/template/template_test.go @@ -7,8 +7,45 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) +func TestName(t *testing.T) { + plugin := TemplateProcessor{ + Tag: "measurement", + Template: "{{ .Name }}", + } + + err := plugin.Init() + require.NoError(t, err) + + input := []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": 42, + }, + time.Unix(0, 0), + ), + } + + actual := plugin.Apply(input...) + expected := []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{ + "measurement": "cpu", + }, + map[string]interface{}{ + "time_idle": 42, + }, + time.Unix(0, 0), + ), + } + testutil.RequireMetricsEqual(t, expected, actual) +} + func TestTagTemplateConcatenate(t *testing.T) { now := time.Now() From 0479f399f9e8727f7a943b64bdd1b4d5c302988a Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 13 Apr 2020 11:20:23 -0700 Subject: [PATCH 1675/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 838f28075..d488b4574 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -15,6 +15,7 @@ #### Bugfixes - [#7236](https://github.com/influxdata/telegraf/pull/7236): Fix PerformanceCounter query performance degradation in sqlserver input. +- [#7258](https://github.com/influxdata/telegraf/pull/7258): Fix using the Name field in template processor. ## v1.14 [2020-03-26] From f4fa64170e9aa9c80d06405147770da3a9867647 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 13 Apr 2020 11:33:59 -0700 Subject: [PATCH 1676/1815] Update changelog --- CHANGELOG.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index d488b4574..eb755f60b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,8 @@ - [#7225](https://github.com/influxdata/telegraf/pull/7225): Add support for 64-bit integer types to modbus input. - [#7231](https://github.com/influxdata/telegraf/pull/7231): Add possibility to specify measurement per register. - [#7136](https://github.com/influxdata/telegraf/pull/7136): Support multiple templates for graphite serializers. +- [#7250](https://github.com/influxdata/telegraf/pull/7250): Deploy telegraf configuration as a "non config" file. +- [#7214](https://github.com/influxdata/telegraf/pull/7214): Add VolumeSpace query for sqlserver input with metric_version 2. ## v1.14.1 [unreleased] @@ -16,6 +18,7 @@ - [#7236](https://github.com/influxdata/telegraf/pull/7236): Fix PerformanceCounter query performance degradation in sqlserver input. - [#7258](https://github.com/influxdata/telegraf/pull/7258): Fix using the Name field in template processor. +- [#7289](https://github.com/influxdata/telegraf/pull/7289): Fix export timestamp not working for prometheus on v2. ## v1.14 [2020-03-26] From 94132910641bacf963d48968b7e4738457bbdc31 Mon Sep 17 00:00:00 2001 From: reimda Date: Mon, 13 Apr 2020 13:44:03 -0600 Subject: [PATCH 1677/1815] Fix error in docs about exclude_retention_policy_tag (#7311) --- plugins/outputs/influxdb/README.md | 4 ++-- plugins/outputs/influxdb/influxdb.go | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/plugins/outputs/influxdb/README.md b/plugins/outputs/influxdb/README.md index f82a3b344..a53b7a0f2 100644 --- a/plugins/outputs/influxdb/README.md +++ b/plugins/outputs/influxdb/README.md @@ -23,7 +23,7 @@ The InfluxDB output plugin writes metrics to the [InfluxDB v1.x] HTTP or UDP ser ## tag is not set the 'database' option is used as the default. # database_tag = "" - ## If true, the database tag will not be added to the metric. + ## If true, the 'database_tag' will not be included in the written metric. # exclude_database_tag = false ## If true, no CREATE DATABASE queries will be sent. Set to true when using @@ -39,7 +39,7 @@ The InfluxDB output plugin writes metrics to the [InfluxDB v1.x] HTTP or UDP ser ## tag is not set the 'retention_policy' option is used as the default. # retention_policy_tag = "" - ## If true, the 'retention_policy_tag' will not be removed from the metric. + ## If true, the 'retention_policy_tag' will not be included in the written metric. # exclude_retention_policy_tag = false ## Write consistency (clusters only), can be: "any", "one", "quorum", "all". diff --git a/plugins/outputs/influxdb/influxdb.go b/plugins/outputs/influxdb/influxdb.go index 4306f55c6..1c4af5bca 100644 --- a/plugins/outputs/influxdb/influxdb.go +++ b/plugins/outputs/influxdb/influxdb.go @@ -79,7 +79,7 @@ var sampleConfig = ` ## tag is not set the 'database' option is used as the default. # database_tag = "" - ## If true, the database tag will not be added to the metric. + ## If true, the 'database_tag' will not be included in the written metric. # exclude_database_tag = false ## If true, no CREATE DATABASE queries will be sent. Set to true when using @@ -95,7 +95,7 @@ var sampleConfig = ` ## tag is not set the 'retention_policy' option is used as the default. # retention_policy_tag = "" - ## If true, the 'retention_policy_tag' will not be removed from the metric. + ## If true, the 'retention_policy_tag' will not be included in the written metric. # exclude_retention_policy_tag = false ## Write consistency (clusters only), can be: "any", "one", "quorum", "all". From aab5800aac7a7f3debc8c16482201b012d60041e Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 13 Apr 2020 12:46:03 -0700 Subject: [PATCH 1678/1815] Regenerate telegraf.conf --- etc/telegraf.conf | 4 ++-- etc/telegraf_windows.conf | 9 ++++++++- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/etc/telegraf.conf b/etc/telegraf.conf index de9dd6f2d..05d7daadb 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -119,7 +119,7 @@ ## tag is not set the 'database' option is used as the default. # database_tag = "" - ## If true, the database tag will not be added to the metric. + ## If true, the 'database_tag' will not be included in the written metric. # exclude_database_tag = false ## If true, no CREATE DATABASE queries will be sent. Set to true when using @@ -135,7 +135,7 @@ ## tag is not set the 'retention_policy' option is used as the default. # retention_policy_tag = "" - ## If true, the 'retention_policy_tag' will not be removed from the metric. + ## If true, the 'retention_policy_tag' will not be included in the written metric. # exclude_retention_policy_tag = false ## Write consistency (clusters only), can be: "any", "one", "quorum", "all". diff --git a/etc/telegraf_windows.conf b/etc/telegraf_windows.conf index c3586cafd..5b7092899 100644 --- a/etc/telegraf_windows.conf +++ b/etc/telegraf_windows.conf @@ -119,7 +119,7 @@ ## tag is not set the 'database' option is used as the default. # database_tag = "" - ## If true, the database tag will not be added to the metric. + ## If true, the 'database_tag' will not be included in the written metric. # exclude_database_tag = false ## If true, no CREATE DATABASE queries will be sent. Set to true when using @@ -131,6 +131,13 @@ ## the default retention policy. Only takes effect when using HTTP. # retention_policy = "" + ## The value of this tag will be used to determine the retention policy. If this + ## tag is not set the 'retention_policy' option is used as the default. + # retention_policy_tag = "" + + ## If true, the 'retention_policy_tag' will not be included in the written metric. + # exclude_retention_policy_tag = false + ## Write consistency (clusters only), can be: "any", "one", "quorum", "all". ## Only takes effect when using HTTP. # write_consistency = "any" From 48f5da7e33b3b9b841326925506a47a7ed7db1c5 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 14 Apr 2020 12:14:49 -0700 Subject: [PATCH 1679/1815] Fix status path when using globs in phpfpm (#7324) --- plugins/inputs/phpfpm/phpfpm.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/inputs/phpfpm/phpfpm.go b/plugins/inputs/phpfpm/phpfpm.go index 75a6aeb17..4bb6443ab 100644 --- a/plugins/inputs/phpfpm/phpfpm.go +++ b/plugins/inputs/phpfpm/phpfpm.go @@ -312,9 +312,9 @@ func globUnixSocket(url string) ([]string, error) { for _, path := range paths { if status != "" { - status = fmt.Sprintf(":%s", status) + path = path + ":" + status } - addrs = append(addrs, fmt.Sprintf("%s%s", path, status)) + addrs = append(addrs, path) } return addrs, nil From eb8d80537096df23a6656dacea2423b9c5ba9f3f Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 14 Apr 2020 12:15:35 -0700 Subject: [PATCH 1680/1815] Fix exclude database and retention policy tags is shared (#7323) --- plugins/outputs/influxdb/http.go | 8 +++++-- plugins/outputs/influxdb/http_test.go | 31 +++++++++++++++++++++++++++ 2 files changed, 37 insertions(+), 2 deletions(-) diff --git a/plugins/outputs/influxdb/http.go b/plugins/outputs/influxdb/http.go index b663d9198..5f25572d3 100644 --- a/plugins/outputs/influxdb/http.go +++ b/plugins/outputs/influxdb/http.go @@ -272,8 +272,12 @@ func (c *httpClient) Write(ctx context.Context, metrics []telegraf.Metric) error // Avoid modifying the metric in case we need to retry the request. metric = metric.Copy() metric.Accept() - metric.RemoveTag(c.config.DatabaseTag) - metric.RemoveTag(c.config.RetentionPolicyTag) + if c.config.ExcludeDatabaseTag { + metric.RemoveTag(c.config.DatabaseTag) + } + if c.config.ExcludeRetentionPolicyTag { + metric.RemoveTag(c.config.RetentionPolicyTag) + } } batches[dbrp] = append(batches[dbrp], metric) diff --git a/plugins/outputs/influxdb/http_test.go b/plugins/outputs/influxdb/http_test.go index 3f5ef0bc6..a59fadaf4 100644 --- a/plugins/outputs/influxdb/http_test.go +++ b/plugins/outputs/influxdb/http_test.go @@ -907,6 +907,37 @@ func TestDBRPTags(t *testing.T) { w.WriteHeader(http.StatusNoContent) }, }, + { + name: "exclude database tag keeps retention policy tag", + config: influxdb.HTTPConfig{ + URL: u, + SkipDatabaseCreation: true, + Database: "telegraf", + RetentionPolicyTag: "rp", + ExcludeDatabaseTag: true, + Log: testutil.Logger{}, + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{ + "rp": "foo", + }, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(0, 0), + ), + }, + handlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { + require.Equal(t, r.FormValue("db"), "telegraf") + require.Equal(t, r.FormValue("rp"), "foo") + body, err := ioutil.ReadAll(r.Body) + require.NoError(t, err) + require.Contains(t, string(body), "cpu,rp=foo value=42") + w.WriteHeader(http.StatusNoContent) + }, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { From 0fa92a686e17fda92d4a3cfbe719d0228970ce0d Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 14 Apr 2020 12:19:43 -0700 Subject: [PATCH 1681/1815] Update changelog --- CHANGELOG.md | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index eb755f60b..21cc07691 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,9 +16,11 @@ #### Bugfixes -- [#7236](https://github.com/influxdata/telegraf/pull/7236): Fix PerformanceCounter query performance degradation in sqlserver input. -- [#7258](https://github.com/influxdata/telegraf/pull/7258): Fix using the Name field in template processor. +- [#7236](https://github.com/influxdata/telegraf/issues/7236): Fix PerformanceCounter query performance degradation in sqlserver input. +- [#7257](https://github.com/influxdata/telegraf/issues/7257): Fix error when using the Name field in template processor. - [#7289](https://github.com/influxdata/telegraf/pull/7289): Fix export timestamp not working for prometheus on v2. +- [#7310](https://github.com/influxdata/telegraf/issues/7310): Fix exclude database and retention policy tags is shared. +- [#7262](https://github.com/influxdata/telegraf/issues/7262): Fix status path when using globs in phpfpm. ## v1.14 [2020-03-26] From 27f09758ba1555967ead775222a24bc1618c32f5 Mon Sep 17 00:00:00 2001 From: M0rdecay <50422107+M0rdecay@users.noreply.github.com> Date: Tue, 14 Apr 2020 22:31:26 +0300 Subject: [PATCH 1682/1815] Add reading bearer token from a file to http input (#7304) --- plugins/inputs/http/README.md | 4 ++++ plugins/inputs/http/http.go | 16 ++++++++++++++++ 2 files changed, 20 insertions(+) diff --git a/plugins/inputs/http/README.md b/plugins/inputs/http/README.md index 9cd136bd0..59abd8256 100644 --- a/plugins/inputs/http/README.md +++ b/plugins/inputs/http/README.md @@ -26,6 +26,10 @@ The HTTP input plugin collects metrics from one or more HTTP(S) endpoints. The ## compress body or "identity" to apply no encoding. # content_encoding = "identity" + ## Optional file with Bearer token + ## file content is added as an Authorization header + # bearer_token = "/path/to/file" + ## Optional HTTP Basic Auth Credentials # username = "username" # password = "pa$$word" diff --git a/plugins/inputs/http/http.go b/plugins/inputs/http/http.go index 13c9cd170..8290a6f66 100644 --- a/plugins/inputs/http/http.go +++ b/plugins/inputs/http/http.go @@ -29,6 +29,9 @@ type HTTP struct { Password string `toml:"password"` tls.ClientConfig + // Absolute path to file with Bearer token + BearerToken string `toml:"bearer_token"` + SuccessStatusCodes []int `toml:"success_status_codes"` Timeout internal.Duration `toml:"timeout"` @@ -52,6 +55,10 @@ var sampleConfig = ` ## Optional HTTP headers # headers = {"X-Special-Header" = "Special-Value"} + ## Optional file with Bearer token + ## file content is added as an Authorization header + # bearer_token = "/path/to/file" + ## Optional HTTP Basic Auth Credentials # username = "username" # password = "pa$$word" @@ -160,6 +167,15 @@ func (h *HTTP) gatherURL( return err } + if h.BearerToken != "" { + token, err := ioutil.ReadFile(h.BearerToken) + if err != nil { + return err + } + bearer := "Bearer " + strings.Trim(string(token), "\n") + request.Header.Set("Authorization", bearer) + } + if h.ContentEncoding == "gzip" { request.Header.Set("Content-Encoding", "gzip") } From 413ec7bbd71a0400532ed22b10741994598a9b79 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 14 Apr 2020 12:32:29 -0700 Subject: [PATCH 1683/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 21cc07691..6d0923714 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,7 @@ - [#7136](https://github.com/influxdata/telegraf/pull/7136): Support multiple templates for graphite serializers. - [#7250](https://github.com/influxdata/telegraf/pull/7250): Deploy telegraf configuration as a "non config" file. - [#7214](https://github.com/influxdata/telegraf/pull/7214): Add VolumeSpace query for sqlserver input with metric_version 2. +- [#7304](https://github.com/influxdata/telegraf/pull/7304): Add reading bearer token from a file to http input. ## v1.14.1 [unreleased] From 03819ba9ec88bf3b2604c697a00768168bc950a1 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 14 Apr 2020 13:58:48 -0700 Subject: [PATCH 1684/1815] Set 1.14.1 release date --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6d0923714..4783e88ba 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,7 +13,7 @@ - [#7214](https://github.com/influxdata/telegraf/pull/7214): Add VolumeSpace query for sqlserver input with metric_version 2. - [#7304](https://github.com/influxdata/telegraf/pull/7304): Add reading bearer token from a file to http input. -## v1.14.1 [unreleased] +## v1.14.1 [2020-04-14] #### Bugfixes From 3380471e186cd3d8a243764d4e875cac4537c347 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 15 Apr 2020 10:51:31 -0700 Subject: [PATCH 1685/1815] Adjust heading level in the filtering examples to allow linking --- docs/CONFIGURATION.md | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/docs/CONFIGURATION.md b/docs/CONFIGURATION.md index 0acbefb48..975c42f14 100644 --- a/docs/CONFIGURATION.md +++ b/docs/CONFIGURATION.md @@ -454,9 +454,9 @@ The inverse of `taginclude`. Tags with a tag key matching one of the patterns will be discarded from the metric. Any tag can be filtered including global tags and the agent `host` tag. -##### Filtering Examples +#### Filtering Examples -Using tagpass and tagdrop: +##### Using tagpass and tagdrop: ```toml [[inputs.cpu]] percpu = true @@ -489,7 +489,7 @@ Using tagpass and tagdrop: instance = ["isatap*", "Local*"] ``` -Using fieldpass and fielddrop: +##### Using fieldpass and fielddrop: ```toml # Drop all metrics for guest & steal CPU usage [[inputs.cpu]] @@ -502,7 +502,7 @@ Using fieldpass and fielddrop: fieldpass = ["inodes*"] ``` -Using namepass and namedrop: +##### Using namepass and namedrop: ```toml # Drop all metrics about containers for kubelet [[inputs.prometheus]] @@ -515,7 +515,7 @@ Using namepass and namedrop: namepass = ["rest_client_*"] ``` -Using taginclude and tagexclude: +##### Using taginclude and tagexclude: ```toml # Only include the "cpu" tag in the measurements for the cpu plugin. [[inputs.cpu]] @@ -528,7 +528,7 @@ Using taginclude and tagexclude: tagexclude = ["fstype"] ``` -Metrics can be routed to different outputs using the metric name and tags: +##### Metrics can be routed to different outputs using the metric name and tags: ```toml [[outputs.influxdb]] urls = [ "http://localhost:8086" ] @@ -550,9 +550,11 @@ Metrics can be routed to different outputs using the metric name and tags: cpu = ["cpu0"] ``` -Routing metrics to different outputs based on the input. Metrics are tagged -with `influxdb_database` in the input, which is then used to select the -output. The tag is removed in the outputs before writing. +##### Routing metrics to different outputs based on the input. + +Metrics are tagged with `influxdb_database` in the input, which is then used to +select the output. The tag is removed in the outputs before writing. + ```toml [[outputs.influxdb]] urls = ["http://influxdb.example.com"] From 73ef0bcba25970b064d4963230d5fc01365f2369 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 16 Apr 2020 13:07:03 -0700 Subject: [PATCH 1686/1815] Deprecate logparser input and recommend tail input as replacement (#7352) --- CHANGELOG.md | 5 + README.md | 2 +- plugins/inputs/logparser/README.md | 262 ++++---------------------- plugins/inputs/logparser/logparser.go | 6 +- 4 files changed, 51 insertions(+), 224 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4783e88ba..f9b35941a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,10 @@ ## v1.15 [unreleased] +#### Release Notes + +- The `logparser` input is deprecated, use the `tail` input with `data_format = + "grok"` as a replacement. + #### Features - [#6905](https://github.com/influxdata/telegraf/pull/6905): Add commands stats to mongodb input plugin. diff --git a/README.md b/README.md index 57615f9a8..571272b32 100644 --- a/README.md +++ b/README.md @@ -229,7 +229,7 @@ For documentation on the latest development code see the [documentation index][d * [lanz](./plugins/inputs/lanz) * [leofs](./plugins/inputs/leofs) * [linux_sysctl_fs](./plugins/inputs/linux_sysctl_fs) -* [logparser](./plugins/inputs/logparser) +* [logparser](./plugins/inputs/logparser) (deprecated, use [tail](/plugins/inputs/tail)) * [logstash](./plugins/inputs/logstash) * [lustre2](./plugins/inputs/lustre2) * [mailchimp](./plugins/inputs/mailchimp) diff --git a/plugins/inputs/logparser/README.md b/plugins/inputs/logparser/README.md index 22250ff45..0abdba2c9 100644 --- a/plugins/inputs/logparser/README.md +++ b/plugins/inputs/logparser/README.md @@ -4,10 +4,43 @@ The `logparser` plugin streams and parses the given logfiles. Currently it has the capability of parsing "grok" patterns from logfiles, which also supports regex patterns. -### Configuration: +**Deprecated in Telegraf 1.15**: Please use the [tail][] plugin along with the [`grok` data format][grok parser]. + +The `tail` plugin now provides all the functionality of the `logparser` plugin. +Most options can be translated directly to the `tail` plugin: +- For options in the `[inputs.logparser.grok]` section, the equivalent option + will have add the `grok_` prefix when using them in the `tail` input. +- The grok `measurement` option can be replaced using the standard plugin + `name_override` option. + +Migration Example: +```diff +- [[inputs.logparser]] +- files = ["/var/log/apache/access.log"] +- from_beginning = false +- [inputs.logparser.grok] +- patterns = ["%{COMBINED_LOG_FORMAT}"] +- measurement = "apache_access_log" +- custom_pattern_files = [] +- custom_patterns = ''' +- ''' +- timezone = "Canada/Eastern" + ++ [[inputs.tail]] ++ files = ["/var/log/apache/access.log"] ++ from_beginning = false ++ grok_patterns = ["%{COMBINED_LOG_FORMAT}"] ++ name_override = "apache_access_log" ++ grok_custom_pattern_files = [] ++ grok_custom_patterns = ''' ++ ''' ++ grok_timezone = "Canada/Eastern" ++ data_format = "grok" +``` + +### Configuration ```toml -# Stream and parse log file(s). [[inputs.logparser]] ## Log files to parse. ## These accept standard unix glob matching rules, but with the addition of @@ -59,228 +92,13 @@ regex patterns. ### Grok Parser -The best way to get acquainted with grok patterns is to read the logstash docs, -which are available here: - https://www.elastic.co/guide/en/logstash/current/plugins-filters-grok.html +Reference the [grok parser][] documentation to setup the grok section of the +configuration. -The Telegraf grok parser uses a slightly modified version of logstash "grok" -patterns, with the format - -``` -%{[:][:]} -``` - -The `capture_syntax` defines the grok pattern that's used to parse the input -line and the `semantic_name` is used to name the field or tag. The extension -`modifier` controls the data type that the parsed item is converted to or -other special handling. - -By default all named captures are converted into string fields. -If a pattern does not have a semantic name it will not be captured. -Timestamp modifiers can be used to convert captures to the timestamp of the -parsed metric. If no timestamp is parsed the metric will be created using the -current time. - -**Note:** You must capture at least one field per line. -Patterns that convert all captures to tags will result in points that can't be written to InfluxDB. - -- Available modifiers: - - string (default if nothing is specified) - - int - - float - - duration (ie, 5.23ms gets converted to int nanoseconds) - - tag (converts the field into a tag) - - drop (drops the field completely) -- Timestamp modifiers: - - ts (This will auto-learn the timestamp format) - - ts-ansic ("Mon Jan _2 15:04:05 2006") - - ts-unix ("Mon Jan _2 15:04:05 MST 2006") - - ts-ruby ("Mon Jan 02 15:04:05 -0700 2006") - - ts-rfc822 ("02 Jan 06 15:04 MST") - - ts-rfc822z ("02 Jan 06 15:04 -0700") - - ts-rfc850 ("Monday, 02-Jan-06 15:04:05 MST") - - ts-rfc1123 ("Mon, 02 Jan 2006 15:04:05 MST") - - ts-rfc1123z ("Mon, 02 Jan 2006 15:04:05 -0700") - - ts-rfc3339 ("2006-01-02T15:04:05Z07:00") - - ts-rfc3339nano ("2006-01-02T15:04:05.999999999Z07:00") - - ts-httpd ("02/Jan/2006:15:04:05 -0700") - - ts-epoch (seconds since unix epoch, may contain decimal) - - ts-epochmilli (milliseconds since unix epoch, may contain decimal) - - ts-epochnano (nanoseconds since unix epoch) - - ts-syslog ("Jan 02 15:04:05", parsed time is set to the current year) - - ts-"CUSTOM" - -CUSTOM time layouts must be within quotes and be the representation of the -"reference time", which is `Mon Jan 2 15:04:05 -0700 MST 2006`. -To match a comma decimal point you can use a period. For example `%{TIMESTAMP:timestamp:ts-"2006-01-02 15:04:05.000"}` can be used to match `"2018-01-02 15:04:05,000"` -To match a comma decimal point you can use a period in the pattern string. -See https://golang.org/pkg/time/#Parse for more details. - -Telegraf has many of its own [built-in patterns](/plugins/parsers/grok/influx_patterns.go), -as well as support for most of -[logstash's builtin patterns](https://github.com/logstash-plugins/logstash-patterns-core/blob/master/patterns/grok-patterns). -_Golang regular expressions do not support lookahead or lookbehind. -logstash patterns that depend on these are not supported._ - -If you need help building patterns to match your logs, -you will find the https://grokdebug.herokuapp.com application quite useful! - -#### Timestamp Examples - -This example input and config parses a file using a custom timestamp conversion: - -``` -2017-02-21 13:10:34 value=42 -``` - -```toml -[[inputs.logparser]] - [inputs.logparser.grok] - patterns = ['%{TIMESTAMP_ISO8601:timestamp:ts-"2006-01-02 15:04:05"} value=%{NUMBER:value:int}'] -``` - -This example input and config parses a file using a timestamp in unix time: - -``` -1466004605 value=42 -1466004605.123456789 value=42 -``` - -```toml -[[inputs.logparser]] - [inputs.logparser.grok] - patterns = ['%{NUMBER:timestamp:ts-epoch} value=%{NUMBER:value:int}'] -``` - -This example parses a file using a built-in conversion and a custom pattern: - -``` -Wed Apr 12 13:10:34 PST 2017 value=42 -``` - -```toml -[[inputs.logparser]] - [inputs.logparser.grok] - patterns = ["%{TS_UNIX:timestamp:ts-unix} value=%{NUMBER:value:int}"] - custom_patterns = ''' - TS_UNIX %{DAY} %{MONTH} %{MONTHDAY} %{HOUR}:%{MINUTE}:%{SECOND} %{TZ} %{YEAR} - ''' -``` - -For cases where the timestamp itself is without offset, the `timezone` config var is available -to denote an offset. By default (with `timezone` either omit, blank or set to `"UTC"`), the times -are processed as if in the UTC timezone. If specified as `timezone = "Local"`, the timestamp -will be processed based on the current machine timezone configuration. Lastly, if using a -timezone from the list of Unix [timezones](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones), the logparser grok will attempt to offset -the timestamp accordingly. See test cases for more detailed examples. - -#### TOML Escaping - -When saving patterns to the configuration file, keep in mind the different TOML -[string](https://github.com/toml-lang/toml#string) types and the escaping -rules for each. These escaping rules must be applied in addition to the -escaping required by the grok syntax. Using the Multi-line line literal -syntax with `'''` may be useful. - -The following config examples will parse this input file: - -``` -|42|\uD83D\uDC2F|'telegraf'| -``` - -Since `|` is a special character in the grok language, we must escape it to -get a literal `|`. With a basic TOML string, special characters such as -backslash must be escaped, requiring us to escape the backslash a second time. - -```toml -[[inputs.logparser]] - [inputs.logparser.grok] - patterns = ["\\|%{NUMBER:value:int}\\|%{UNICODE_ESCAPE:escape}\\|'%{WORD:name}'\\|"] - custom_patterns = "UNICODE_ESCAPE (?:\\\\u[0-9A-F]{4})+" -``` - -We cannot use a literal TOML string for the pattern, because we cannot match a -`'` within it. However, it works well for the custom pattern. -```toml -[[inputs.logparser]] - [inputs.logparser.grok] - patterns = ["\\|%{NUMBER:value:int}\\|%{UNICODE_ESCAPE:escape}\\|'%{WORD:name}'\\|"] - custom_patterns = 'UNICODE_ESCAPE (?:\\u[0-9A-F]{4})+' -``` - -A multi-line literal string allows us to encode the pattern: -```toml -[[inputs.logparser]] - [inputs.logparser.grok] - patterns = [''' - \|%{NUMBER:value:int}\|%{UNICODE_ESCAPE:escape}\|'%{WORD:name}'\| - '''] - custom_patterns = 'UNICODE_ESCAPE (?:\\u[0-9A-F]{4})+' -``` - -#### Parsing Telegraf log file -We can use logparser to convert the log lines generated by Telegraf in metrics. - -To do this we need to configure Telegraf to write logs to a file. -This could be done using the ``agent.logfile`` parameter or configuring syslog. -```toml -[agent] - logfile = "/var/log/telegraf/telegraf.log" -``` - -Logparser configuration: -```toml -[[inputs.logparser]] - files = ["/var/log/telegraf/telegraf.log"] - - [inputs.logparser.grok] - measurement = "telegraf_log" - patterns = ['^%{TIMESTAMP_ISO8601:timestamp:ts-rfc3339} %{TELEGRAF_LOG_LEVEL:level:tag}! %{GREEDYDATA:msg}'] - custom_patterns = ''' -TELEGRAF_LOG_LEVEL (?:[DIWE]+) -''' -``` - -Example log lines: -``` -2018-06-14T06:41:35Z I! Starting Telegraf v1.6.4 -2018-06-14T06:41:35Z I! Agent Config: Interval:3s, Quiet:false, Hostname:"archer", Flush Interval:3s -2018-02-20T22:39:20Z E! Error in plugin [inputs.docker]: took longer to collect than collection interval (10s) -2018-06-01T10:34:05Z W! Skipping a scheduled flush because there is already a flush ongoing. -2018-06-14T07:33:33Z D! Output [file] buffer fullness: 0 / 10000 metrics. -``` - -Generated metrics: -``` -telegraf_log,host=somehostname,level=I msg="Starting Telegraf v1.6.4" 1528958495000000000 -telegraf_log,host=somehostname,level=I msg="Agent Config: Interval:3s, Quiet:false, Hostname:\"somehostname\", Flush Interval:3s" 1528958495001000000 -telegraf_log,host=somehostname,level=E msg="Error in plugin [inputs.docker]: took longer to collect than collection interval (10s)" 1519166360000000000 -telegraf_log,host=somehostname,level=W msg="Skipping a scheduled flush because there is already a flush ongoing." 1527849245000000000 -telegraf_log,host=somehostname,level=D msg="Output [file] buffer fullness: 0 / 10000 metrics." 1528961613000000000 -``` - - -### Tips for creating patterns - -Writing complex patterns can be difficult, here is some advice for writing a -new pattern or testing a pattern developed [online](https://grokdebug.herokuapp.com). - -Create a file output that writes to stdout, and disable other outputs while -testing. This will allow you to see the captured metrics. Keep in mind that -the file output will only print once per `flush_interval`. - -```toml -[[outputs.file]] - files = ["stdout"] -``` - -- Start with a file containing only a single line of your input. -- Remove all but the first token or piece of the line. -- Add the section of your pattern to match this piece to your configuration file. -- Verify that the metric is parsed successfully by running Telegraf. -- If successful, add the next token, update the pattern and retest. -- Continue one token at a time until the entire line is successfully parsed. ### Additional Resources - https://www.influxdata.com/telegraf-correlate-log-metrics-data-performance-bottlenecks/ + +[tail]: /plugins/inputs/tail/README.md +[grok parser]: /plugins/parsers/grok/README.md diff --git a/plugins/inputs/logparser/logparser.go b/plugins/inputs/logparser/logparser.go index 0ce3ede04..4fbd2e90d 100644 --- a/plugins/inputs/logparser/logparser.go +++ b/plugins/inputs/logparser/logparser.go @@ -8,7 +8,6 @@ import ( "sync" "github.com/influxdata/tail" - "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal/globpath" "github.com/influxdata/telegraf/plugins/inputs" @@ -138,6 +137,11 @@ func (l *LogParserPlugin) Description() string { return "Stream and parse log file(s)." } +func (l *LogParserPlugin) Init() error { + l.Log.Warnf(`The logparser plugin is deprecated; please use the 'tail' input with the 'grok' data_format`) + return nil +} + // Gather is the primary function to collect the metrics for the plugin func (l *LogParserPlugin) Gather(acc telegraf.Accumulator) error { l.Lock() From 52a3f5d4045d32d9b2b0ea72f6037d0c38e77df5 Mon Sep 17 00:00:00 2001 From: Pierrick Brossin Date: Mon, 20 Apr 2020 16:58:50 +0200 Subject: [PATCH 1687/1815] Fibaro input: for battery operated devices, add battery level scraping (#7319) --- plugins/inputs/fibaro/README.md | 2 ++ plugins/inputs/fibaro/fibaro.go | 17 ++++++++++++----- plugins/inputs/fibaro/fibaro_test.go | 3 ++- 3 files changed, 16 insertions(+), 6 deletions(-) diff --git a/plugins/inputs/fibaro/README.md b/plugins/inputs/fibaro/README.md index 68fda0586..54c203102 100644 --- a/plugins/inputs/fibaro/README.md +++ b/plugins/inputs/fibaro/README.md @@ -30,6 +30,7 @@ Those values could be true (1) or false (0) for switches, percentage for dimmers - name (device name) - type (device type) - fields: + - batteryLevel (float, when available from device) - energy (float, when available from device) - power (float, when available from device) - value (float) @@ -52,4 +53,5 @@ fibaro,deviceId=220,host=vm1,name=CO2\ (ppm),room=Salon,section=Pièces\ commune fibaro,deviceId=221,host=vm1,name=Humidité\ (%),room=Salon,section=Pièces\ communes,type=com.fibaro.humiditySensor value=61 1529996807000000000 fibaro,deviceId=222,host=vm1,name=Pression\ (mb),room=Salon,section=Pièces\ communes,type=com.fibaro.multilevelSensor value=1013.7 1529996807000000000 fibaro,deviceId=223,host=vm1,name=Bruit\ (db),room=Salon,section=Pièces\ communes,type=com.fibaro.multilevelSensor value=44 1529996807000000000 +fibaro,deviceId=248,host=vm1,name=Température,room=Garage,section=Extérieur,type=com.fibaro.temperatureSensor batteryLevel=85,value=10.8 1529996807000000000 ``` diff --git a/plugins/inputs/fibaro/fibaro.go b/plugins/inputs/fibaro/fibaro.go index 492feaf03..7def0ab09 100644 --- a/plugins/inputs/fibaro/fibaro.go +++ b/plugins/inputs/fibaro/fibaro.go @@ -69,11 +69,12 @@ type Devices struct { Type string `json:"type"` Enabled bool `json:"enabled"` Properties struct { - Dead interface{} `json:"dead"` - Energy interface{} `json:"energy"` - Power interface{} `json:"power"` - Value interface{} `json:"value"` - Value2 interface{} `json:"value2"` + BatteryLevel interface{} `json:"batteryLevel"` + Dead interface{} `json:"dead"` + Energy interface{} `json:"energy"` + Power interface{} `json:"power"` + Value interface{} `json:"value"` + Value2 interface{} `json:"value2"` } `json:"properties"` } @@ -174,6 +175,12 @@ func (f *Fibaro) Gather(acc telegraf.Accumulator) error { } fields := make(map[string]interface{}) + if device.Properties.BatteryLevel != nil { + if fValue, err := strconv.ParseFloat(device.Properties.BatteryLevel.(string), 64); err == nil { + fields["batteryLevel"] = fValue + } + } + if device.Properties.Energy != nil { if fValue, err := strconv.ParseFloat(device.Properties.Energy.(string), 64); err == nil { fields["energy"] = fValue diff --git a/plugins/inputs/fibaro/fibaro_test.go b/plugins/inputs/fibaro/fibaro_test.go index a58ad7c31..32a1447e3 100644 --- a/plugins/inputs/fibaro/fibaro_test.go +++ b/plugins/inputs/fibaro/fibaro_test.go @@ -107,6 +107,7 @@ const devicesJSON = ` "type": "com.fibaro.temperatureSensor", "enabled": true, "properties": { + "batteryLevel": "100", "dead": "false", "value": "22.80" }, @@ -196,7 +197,7 @@ func TestJSONSuccess(t *testing.T) { // Ensure fields / values are correct - Device 4 tags = map[string]string{"deviceId": "4", "section": "Section 3", "room": "Room 4", "name": "Device 4", "type": "com.fibaro.temperatureSensor"} - fields = map[string]interface{}{"value": float64(22.8)} + fields = map[string]interface{}{"batteryLevel": float64(100), "value": float64(22.8)} acc.AssertContainsTaggedFields(t, "fibaro", fields, tags) // Ensure fields / values are correct - Device 5 From 6c72c645a24bd098a17a814f89d226f4c7a553ca Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Mon, 20 Apr 2020 13:18:12 -0400 Subject: [PATCH 1688/1815] add another grok example for custom timestamps (#7367) --- plugins/parsers/grok/README.md | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/plugins/parsers/grok/README.md b/plugins/parsers/grok/README.md index 14c128f16..80936a41d 100644 --- a/plugins/parsers/grok/README.md +++ b/plugins/parsers/grok/README.md @@ -160,6 +160,21 @@ Wed Apr 12 13:10:34 PST 2017 value=42 ''' ``` +This example input and config parses a file using a custom timestamp conversion that doesn't match any specific standard: + +``` +21/02/2017 13:10:34 value=42 +``` + +```toml +[[inputs.file]] + grok_patterns = ['%{MY_TIMESTAMP:timestamp:ts-"02/01/2006 15:04:05"} value=%{NUMBER:value:int}'] + + grok_custom_patterns = ''' + MY_TIMESTAMP (?:\d{2}.\d{2}.\d{4} \d{2}:\d{2}:\d{2}) + ''' +``` + For cases where the timestamp itself is without offset, the `timezone` config var is available to denote an offset. By default (with `timezone` either omit, blank or set to `"UTC"`), the times are processed as if in the UTC timezone. If specified as `timezone = "Local"`, the timestamp From 819481b1955747359f2109794c8df4d9c24c9e60 Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Mon, 20 Apr 2020 13:49:10 -0400 Subject: [PATCH 1689/1815] add support for SIGUSR1 to trigger flush (#7366) --- agent/agent.go | 48 ++++++----- agent/agent_notwindows.go | 16 ++++ agent/agent_windows.go | 9 ++ cmd/telegraf/telegraf.go | 116 ++------------------------ cmd/telegraf/telegraf_notwindows.go | 13 +++ cmd/telegraf/telegraf_windows.go | 124 ++++++++++++++++++++++++++++ docs/OUTPUTS.md | 13 +++ internal/usage.go | 5 +- internal/usage_windows.go | 5 +- 9 files changed, 218 insertions(+), 131 deletions(-) create mode 100644 agent/agent_notwindows.go create mode 100644 agent/agent_windows.go create mode 100644 cmd/telegraf/telegraf_notwindows.go create mode 100644 cmd/telegraf/telegraf_windows.go diff --git a/agent/agent.go b/agent/agent.go index 66fc140ae..97c6b01c8 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "log" + "os" "runtime" "sync" "time" @@ -516,16 +517,7 @@ func (a *Agent) runOutputs( wg.Add(1) go func(output *models.RunningOutput) { defer wg.Done() - - if a.Config.Agent.RoundInterval { - err := internal.SleepContext( - ctx, internal.AlignDuration(startTime, interval)) - if err != nil { - return - } - } - - a.flush(ctx, output, interval, jitter) + a.flushLoop(ctx, startTime, output, interval, jitter) }(output) } @@ -546,25 +538,39 @@ func (a *Agent) runOutputs( return nil } -// flush runs an output's flush function periodically until the context is +// flushLoop runs an output's flush function periodically until the context is // done. -func (a *Agent) flush( +func (a *Agent) flushLoop( ctx context.Context, + startTime time.Time, output *models.RunningOutput, interval time.Duration, jitter time.Duration, ) { - // since we are watching two channels we need a ticker with the jitter - // integrated. - ticker := NewTicker(interval, jitter) - defer ticker.Stop() - logError := func(err error) { if err != nil { log.Printf("E! [agent] Error writing to %s: %v", output.LogName(), err) } } + // watch for flush requests + flushRequested := make(chan os.Signal, 1) + watchForFlushSignal(flushRequested) + + // align to round interval + if a.Config.Agent.RoundInterval { + err := internal.SleepContext( + ctx, internal.AlignDuration(startTime, interval)) + if err != nil { + return + } + } + + // since we are watching two channels we need a ticker with the jitter + // integrated. + ticker := NewTicker(interval, jitter) + defer ticker.Stop() + for { // Favor shutdown over other methods. select { @@ -575,8 +581,13 @@ func (a *Agent) flush( } select { + case <-ctx.Done(): + logError(a.flushOnce(output, interval, output.Write)) + return case <-ticker.C: logError(a.flushOnce(output, interval, output.Write)) + case <-flushRequested: + logError(a.flushOnce(output, interval, output.Write)) case <-output.BatchReady: // Favor the ticker over batch ready select { @@ -585,9 +596,6 @@ func (a *Agent) flush( default: logError(a.flushOnce(output, interval, output.WriteBatch)) } - case <-ctx.Done(): - logError(a.flushOnce(output, interval, output.Write)) - return } } } diff --git a/agent/agent_notwindows.go b/agent/agent_notwindows.go new file mode 100644 index 000000000..4d1f496a9 --- /dev/null +++ b/agent/agent_notwindows.go @@ -0,0 +1,16 @@ +// +build !windows + +package agent + +import ( + "os" + "os/signal" + "syscall" +) + +const flushSignal = syscall.SIGUSR1 + +func watchForFlushSignal(flushRequested chan os.Signal) { + signal.Notify(flushRequested, flushSignal) + defer signal.Stop(flushRequested) +} diff --git a/agent/agent_windows.go b/agent/agent_windows.go new file mode 100644 index 000000000..7e7a0cabd --- /dev/null +++ b/agent/agent_windows.go @@ -0,0 +1,9 @@ +// +build windows + +package agent + +import "os" + +func watchForFlushSignal(flushRequested chan os.Signal) { + // not implemented +} diff --git a/cmd/telegraf/telegraf.go b/cmd/telegraf/telegraf.go index 7b013cc6c..c1f7344da 100644 --- a/cmd/telegraf/telegraf.go +++ b/cmd/telegraf/telegraf.go @@ -10,7 +10,6 @@ import ( _ "net/http/pprof" // Comment this line to disable pprof endpoint. "os" "os/signal" - "runtime" "sort" "strings" "syscall" @@ -27,16 +26,16 @@ import ( "github.com/influxdata/telegraf/plugins/outputs" _ "github.com/influxdata/telegraf/plugins/outputs/all" _ "github.com/influxdata/telegraf/plugins/processors/all" - "github.com/kardianos/service" ) +// If you update these, update usage.go and usage_windows.go var fDebug = flag.Bool("debug", false, "turn on debug logging") var pprofAddr = flag.String("pprof-addr", "", "pprof address to listen on, not activate pprof if empty") var fQuiet = flag.Bool("quiet", false, "run in quiet mode") -var fTest = flag.Bool("test", false, "enable test mode: gather metrics, print them out, and exit") +var fTest = flag.Bool("test", false, "enable test mode: gather metrics, print them out, and exit. Note: Test mode only runs inputs, not processors, aggregators, or outputs") var fTestWait = flag.Int("test-wait", 0, "wait up to this many seconds for service inputs to complete in test mode") var fConfig = flag.String("config", "", "configuration file to load") var fConfigDirectory = flag.String("config-directory", "", @@ -78,7 +77,6 @@ var ( var stop chan struct{} func reloadLoop( - stop chan struct{}, inputFilters []string, outputFilters []string, aggregatorFilters []string, @@ -91,7 +89,7 @@ func reloadLoop( ctx, cancel := context.WithCancel(context.Background()) - signals := make(chan os.Signal) + signals := make(chan os.Signal, 1) signal.Notify(signals, os.Interrupt, syscall.SIGHUP, syscall.SIGTERM, syscall.SIGINT) go func() { @@ -208,32 +206,6 @@ func usageExit(rc int) { os.Exit(rc) } -type program struct { - inputFilters []string - outputFilters []string - aggregatorFilters []string - processorFilters []string -} - -func (p *program) Start(s service.Service) error { - go p.run() - return nil -} -func (p *program) run() { - stop = make(chan struct{}) - reloadLoop( - stop, - p.inputFilters, - p.outputFilters, - p.aggregatorFilters, - p.processorFilters, - ) -} -func (p *program) Stop(s service.Service) error { - close(stop) - return nil -} - func formatFullVersion() string { var parts = []string{"Telegraf"} @@ -380,80 +352,10 @@ func main() { log.Println("Telegraf version already configured to: " + internal.Version()) } - if runtime.GOOS == "windows" && windowsRunAsService() { - programFiles := os.Getenv("ProgramFiles") - if programFiles == "" { // Should never happen - programFiles = "C:\\Program Files" - } - svcConfig := &service.Config{ - Name: *fServiceName, - DisplayName: *fServiceDisplayName, - Description: "Collects data using a series of plugins and publishes it to" + - "another series of plugins.", - Arguments: []string{"--config", programFiles + "\\Telegraf\\telegraf.conf"}, - } - - prg := &program{ - inputFilters: inputFilters, - outputFilters: outputFilters, - aggregatorFilters: aggregatorFilters, - processorFilters: processorFilters, - } - s, err := service.New(prg, svcConfig) - if err != nil { - log.Fatal("E! " + err.Error()) - } - // Handle the --service flag here to prevent any issues with tooling that - // may not have an interactive session, e.g. installing from Ansible. - if *fService != "" { - if *fConfig != "" { - svcConfig.Arguments = []string{"--config", *fConfig} - } - if *fConfigDirectory != "" { - svcConfig.Arguments = append(svcConfig.Arguments, "--config-directory", *fConfigDirectory) - } - //set servicename to service cmd line, to have a custom name after relaunch as a service - svcConfig.Arguments = append(svcConfig.Arguments, "--service-name", *fServiceName) - - err := service.Control(s, *fService) - if err != nil { - log.Fatal("E! " + err.Error()) - } - os.Exit(0) - } else { - winlogger, err := s.Logger(nil) - if err == nil { - //When in service mode, register eventlog target andd setup default logging to eventlog - logger.RegisterEventLogger(winlogger) - logger.SetupLogging(logger.LogConfig{LogTarget: logger.LogTargetEventlog}) - } - err = s.Run() - - if err != nil { - log.Println("E! " + err.Error()) - } - } - } else { - stop = make(chan struct{}) - reloadLoop( - stop, - inputFilters, - outputFilters, - aggregatorFilters, - processorFilters, - ) - } -} - -// Return true if Telegraf should create a Windows service. -func windowsRunAsService() bool { - if *fService != "" { - return true - } - - if *fRunAsConsole { - return false - } - - return !service.Interactive() + run( + inputFilters, + outputFilters, + aggregatorFilters, + processorFilters, + ) } diff --git a/cmd/telegraf/telegraf_notwindows.go b/cmd/telegraf/telegraf_notwindows.go new file mode 100644 index 000000000..ca28622f1 --- /dev/null +++ b/cmd/telegraf/telegraf_notwindows.go @@ -0,0 +1,13 @@ +// +build !windows + +package main + +func run(inputFilters, outputFilters, aggregatorFilters, processorFilters []string) { + stop = make(chan struct{}) + reloadLoop( + inputFilters, + outputFilters, + aggregatorFilters, + processorFilters, + ) +} diff --git a/cmd/telegraf/telegraf_windows.go b/cmd/telegraf/telegraf_windows.go new file mode 100644 index 000000000..eaf700ed0 --- /dev/null +++ b/cmd/telegraf/telegraf_windows.go @@ -0,0 +1,124 @@ +// +build windows + +package main + +import ( + "log" + "os" + "runtime" + + "github.com/influxdata/telegraf/logger" + "github.com/kardianos/service" +) + +func run(inputFilters, outputFilters, aggregatorFilters, processorFilters []string) { + if runtime.GOOS == "windows" && windowsRunAsService() { + runAsWindowsService( + inputFilters, + outputFilters, + aggregatorFilters, + processorFilters, + ) + } else { + stop = make(chan struct{}) + reloadLoop( + inputFilters, + outputFilters, + aggregatorFilters, + processorFilters, + ) + } +} + +type program struct { + inputFilters []string + outputFilters []string + aggregatorFilters []string + processorFilters []string +} + +func (p *program) Start(s service.Service) error { + go p.run() + return nil +} +func (p *program) run() { + stop = make(chan struct{}) + reloadLoop( + p.inputFilters, + p.outputFilters, + p.aggregatorFilters, + p.processorFilters, + ) +} +func (p *program) Stop(s service.Service) error { + close(stop) + return nil +} + +func runAsWindowsService(inputFilters, outputFilters, aggregatorFilters, processorFilters []string) { + programFiles := os.Getenv("ProgramFiles") + if programFiles == "" { // Should never happen + programFiles = "C:\\Program Files" + } + svcConfig := &service.Config{ + Name: *fServiceName, + DisplayName: *fServiceDisplayName, + Description: "Collects data using a series of plugins and publishes it to" + + "another series of plugins.", + Arguments: []string{"--config", programFiles + "\\Telegraf\\telegraf.conf"}, + } + + prg := &program{ + inputFilters: inputFilters, + outputFilters: outputFilters, + aggregatorFilters: aggregatorFilters, + processorFilters: processorFilters, + } + s, err := service.New(prg, svcConfig) + if err != nil { + log.Fatal("E! " + err.Error()) + } + // Handle the --service flag here to prevent any issues with tooling that + // may not have an interactive session, e.g. installing from Ansible. + if *fService != "" { + if *fConfig != "" { + svcConfig.Arguments = []string{"--config", *fConfig} + } + if *fConfigDirectory != "" { + svcConfig.Arguments = append(svcConfig.Arguments, "--config-directory", *fConfigDirectory) + } + //set servicename to service cmd line, to have a custom name after relaunch as a service + svcConfig.Arguments = append(svcConfig.Arguments, "--service-name", *fServiceName) + + err := service.Control(s, *fService) + if err != nil { + log.Fatal("E! " + err.Error()) + } + os.Exit(0) + } else { + winlogger, err := s.Logger(nil) + if err == nil { + //When in service mode, register eventlog target andd setup default logging to eventlog + logger.RegisterEventLogger(winlogger) + logger.SetupLogging(logger.LogConfig{LogTarget: logger.LogTargetEventlog}) + } + err = s.Run() + + if err != nil { + log.Println("E! " + err.Error()) + } + } +} + +// Return true if Telegraf should create a Windows service. +func windowsRunAsService() bool { + if *fService != "" { + return true + } + + if *fRunAsConsole { + return false + } + + return !service.Interactive() +} diff --git a/docs/OUTPUTS.md b/docs/OUTPUTS.md index 9d89491cc..c60cd96ba 100644 --- a/docs/OUTPUTS.md +++ b/docs/OUTPUTS.md @@ -94,6 +94,19 @@ You should also add the following to your `SampleConfig()`: data_format = "influx" ``` +## Flushing Metrics to Outputs + +Metrics are flushed to outputs when any of the following events happen: +- `flush_interval + rand(flush_jitter)` has elapsed since start or the last flush interval +- At least `metric_batch_size` count of metrics are waiting in the buffer +- The telegraf process has received a SIGUSR1 signal + +Note that if the flush takes longer than the `agent.interval` to write the metrics +to the output, you'll see a message saying the output `did not complete within its +flush interval`. This may mean your output is not keeping up with the flow of metrics, +and you may want to look into enabling compression, reducing the size of your metrics, +or investigate other reasons why the writes might be taking longer than expected. + [file]: https://github.com/influxdata/telegraf/tree/master/plugins/inputs/file [output data formats]: https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md [SampleConfig]: https://github.com/influxdata/telegraf/wiki/SampleConfig diff --git a/internal/usage.go b/internal/usage.go index 90ea92986..124087343 100644 --- a/internal/usage.go +++ b/internal/usage.go @@ -32,8 +32,9 @@ The commands & flags are: Valid values are 'agent', 'global_tags', 'outputs', 'processors', 'aggregators' and 'inputs' --sample-config print out full sample configuration - --test gather metrics, print them out, and exit; - processors, aggregators, and outputs are not run + --test enable test mode: gather metrics, print them out, + and exit. Note: Test mode only runs inputs, not + processors, aggregators, or outputs --test-wait wait up to this many seconds for service inputs to complete in test mode --usage print usage for a plugin, ie, 'telegraf --usage mysql' diff --git a/internal/usage_windows.go b/internal/usage_windows.go index af2506ec1..3ee2f7eff 100644 --- a/internal/usage_windows.go +++ b/internal/usage_windows.go @@ -29,8 +29,9 @@ The commands & flags are: --section-filter filter config sections to output, separator is : Valid values are 'agent', 'global_tags', 'outputs', 'processors', 'aggregators' and 'inputs' - --test gather metrics, print them out, and exit; - processors, aggregators, and outputs are not run + --test enable test mode: gather metrics, print them out, + and exit. Note: Test mode only runs inputs, not + processors, aggregators, or outputs --test-wait wait up to this many seconds for service inputs to complete in test mode --usage print usage for a plugin, ie, 'telegraf --usage mysql' From 1568932457b4d5800ca76605cd5f90e4739b5727 Mon Sep 17 00:00:00 2001 From: anilkun Date: Mon, 20 Apr 2020 11:54:27 -0700 Subject: [PATCH 1690/1815] Update github.com/aws/aws-sdk-go (#7373) Fix cloudwatch metric writing when imdsv1 is disabled. --- go.mod | 8 ++++---- go.sum | 16 ++++++++++------ 2 files changed, 14 insertions(+), 10 deletions(-) diff --git a/go.mod b/go.mod index f2d7c417a..61515f70c 100644 --- a/go.mod +++ b/go.mod @@ -24,7 +24,7 @@ require ( github.com/aristanetworks/glog v0.0.0-20191112221043-67e8567f59f3 // indirect github.com/aristanetworks/goarista v0.0.0-20190325233358-a123909ec740 github.com/armon/go-metrics v0.3.0 // indirect - github.com/aws/aws-sdk-go v1.19.41 + github.com/aws/aws-sdk-go v1.30.9 github.com/bitly/go-hostpool v0.1.0 // indirect github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 // indirect github.com/caio/go-tdigest v2.3.0+incompatible // indirect @@ -48,7 +48,7 @@ require ( github.com/go-logfmt/logfmt v0.4.0 github.com/go-ole/go-ole v1.2.1 // indirect github.com/go-redis/redis v6.12.0+incompatible - github.com/go-sql-driver/mysql v1.4.1 + github.com/go-sql-driver/mysql v1.5.0 github.com/goburrow/modbus v0.1.0 github.com/goburrow/serial v0.1.0 // indirect github.com/gobwas/glob v0.2.3 @@ -100,7 +100,7 @@ require ( github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492 // indirect github.com/opentracing/opentracing-go v1.0.2 // indirect github.com/openzipkin/zipkin-go-opentracing v0.3.4 - github.com/pkg/errors v0.8.1 + github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.5.1 github.com/prometheus/client_model v0.2.0 github.com/prometheus/common v0.9.1 @@ -112,7 +112,7 @@ require ( github.com/sirupsen/logrus v1.4.2 github.com/soniah/gosnmp v1.22.0 github.com/streadway/amqp v0.0.0-20180528204448-e5adc2ada8b8 - github.com/stretchr/testify v1.4.0 + github.com/stretchr/testify v1.5.1 github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62 github.com/tedsuo/ifrit v0.0.0-20191009134036-9a97d0632f00 // indirect github.com/tidwall/gjson v1.3.0 diff --git a/go.sum b/go.sum index 8f19c9966..d0d30aa18 100644 --- a/go.sum +++ b/go.sum @@ -110,8 +110,8 @@ github.com/aristanetworks/goarista v0.0.0-20190325233358-a123909ec740/go.mod h1: github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-metrics v0.3.0 h1:B7AQgHi8QSEi4uHu7Sbsga+IJDU+CENgjxoo81vDUqU= github.com/armon/go-metrics v0.3.0/go.mod h1:zXjbSimjXTd7vOpY8B0/2LpvNvDoXBuplAD+gJD3GYs= -github.com/aws/aws-sdk-go v1.19.41 h1:veutzvQP/lOmYmtX26S9mTFJLO6sp7/UsxFcCjglu4A= -github.com/aws/aws-sdk-go v1.19.41/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.30.9 h1:DntpBUKkchINPDbhEzDRin1eEn1TG9TZFlzWPf0i8to= +github.com/aws/aws-sdk-go v1.30.9/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= @@ -214,8 +214,8 @@ github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nA github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= github.com/go-redis/redis v6.12.0+incompatible h1:s+64XI+z/RXqGHz2fQSgRJOEwqqSXeX3dliF7iVkMbE= github.com/go-redis/redis v6.12.0+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= -github.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZpNwpA= -github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs= +github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/goburrow/modbus v0.1.0 h1:DejRZY73nEM6+bt5JSP6IsFolJ9dVcqxsYbpLbeW/ro= github.com/goburrow/modbus v0.1.0/go.mod h1:Kx552D5rLIS8E7TyUwQ/UdHEqvX5T8tyiGBTlzMcZBg= @@ -348,8 +348,8 @@ github.com/jackc/pgx v3.6.0+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGk github.com/jcmturner/gofork v0.0.0-20190328161633-dc7c13fece03/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= github.com/jcmturner/gofork v1.0.0 h1:J7uCkflzTEhUZ64xqKnkDxq3kzc96ajM1Gli5ktUem8= github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= -github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM= -github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.3.0 h1:OS12ieG61fsCg5+qLJ+SsW9NicxNkg3b25OyT2yCeUc= +github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/jpillora/backoff v0.0.0-20180909062703-3050d21c67d7 h1:K//n/AqR5HjG3qxbrBCL4vJPW0MVFSs9CPK1OOJdRME= @@ -476,6 +476,8 @@ github.com/pierrec/lz4 v2.2.6+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -538,6 +540,8 @@ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXf github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62 h1:Oj2e7Sae4XrOsk3ij21QjjEgAcVSeo9nkp0dI//cD2o= github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62/go.mod h1:qUzPVlSj2UgxJkVbH0ZwuuiR46U8RBMDT5KLY78Ifpw= github.com/tedsuo/ifrit v0.0.0-20191009134036-9a97d0632f00 h1:mujcChM89zOHwgZBBNr5WZ77mBXP1yR+gLThGCYZgAg= From 49c915541e961d3dabb381ae82c52d830e020b53 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 20 Apr 2020 11:56:17 -0700 Subject: [PATCH 1691/1815] Update changelog --- CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index f9b35941a..37141d919 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -18,6 +18,10 @@ - [#7214](https://github.com/influxdata/telegraf/pull/7214): Add VolumeSpace query for sqlserver input with metric_version 2. - [#7304](https://github.com/influxdata/telegraf/pull/7304): Add reading bearer token from a file to http input. +#### Bugfixes + +- [#7371](https://github.com/influxdata/telegraf/issues/7371): Fix unable to write metrics to CloudWatch with IMDSv1 disabled. + ## v1.14.1 [2020-04-14] #### Bugfixes From c9a3b697b8f3494aea71f9d3f5f5e59c5dd100ec Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Mon, 20 Apr 2020 14:59:46 -0400 Subject: [PATCH 1692/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 37141d919..3bd63f4fc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,6 +17,7 @@ - [#7250](https://github.com/influxdata/telegraf/pull/7250): Deploy telegraf configuration as a "non config" file. - [#7214](https://github.com/influxdata/telegraf/pull/7214): Add VolumeSpace query for sqlserver input with metric_version 2. - [#7304](https://github.com/influxdata/telegraf/pull/7304): Add reading bearer token from a file to http input. +- [#7366](https://github.com/influxdata/telegraf/pull/7366): add support for SIGUSR1 to trigger flush. #### Bugfixes From a4eb9c2205c51502b089423f73decd4d594f7b03 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 21 Apr 2020 10:02:18 -0700 Subject: [PATCH 1693/1815] Use the product token for the user agent in more locations (#7378) --- internal/internal_test.go | 9 +++++++++ plugins/inputs/solr/solr.go | 2 +- plugins/outputs/http/http.go | 2 +- plugins/outputs/http/http_test.go | 4 +--- plugins/outputs/influxdb/http.go | 2 +- plugins/outputs/influxdb/http_test.go | 4 +--- plugins/outputs/influxdb_v2/http.go | 2 +- 7 files changed, 15 insertions(+), 10 deletions(-) diff --git a/internal/internal_test.go b/internal/internal_test.go index bb186f5fc..83a7e88d6 100644 --- a/internal/internal_test.go +++ b/internal/internal_test.go @@ -8,6 +8,7 @@ import ( "io/ioutil" "log" "os/exec" + "regexp" "testing" "time" @@ -480,3 +481,11 @@ func TestParseTimestamp(t *testing.T) { }) } } + +func TestProductToken(t *testing.T) { + token := ProductToken() + // Telegraf version depends on the call to SetVersion, it cannot be set + // multiple times and is not thread-safe. + re := regexp.MustCompile(`^Telegraf/[^\s]+ Go/\d+.\d+.\d+$`) + require.True(t, re.MatchString(token), token) +} diff --git a/plugins/inputs/solr/solr.go b/plugins/inputs/solr/solr.go index a9257c987..4629e0246 100644 --- a/plugins/inputs/solr/solr.go +++ b/plugins/inputs/solr/solr.go @@ -486,7 +486,7 @@ func (s *Solr) gatherData(url string, v interface{}) error { req.SetBasicAuth(s.Username, s.Password) } - req.Header.Set("User-Agent", "Telegraf/"+internal.Version()) + req.Header.Set("User-Agent", internal.ProductToken()) r, err := s.client.Do(req) if err != nil { diff --git a/plugins/outputs/http/http.go b/plugins/outputs/http/http.go index 746cba346..66fc0d5f0 100644 --- a/plugins/outputs/http/http.go +++ b/plugins/outputs/http/http.go @@ -193,7 +193,7 @@ func (h *HTTP) write(reqBody []byte) error { req.SetBasicAuth(h.Username, h.Password) } - req.Header.Set("User-Agent", "Telegraf/"+internal.Version()) + req.Header.Set("User-Agent", internal.ProductToken()) req.Header.Set("Content-Type", defaultContentType) if h.ContentEncoding == "gzip" { req.Header.Set("Content-Encoding", "gzip") diff --git a/plugins/outputs/http/http_test.go b/plugins/outputs/http/http_test.go index 0decdf024..abcf2db33 100644 --- a/plugins/outputs/http/http_test.go +++ b/plugins/outputs/http/http_test.go @@ -431,11 +431,9 @@ func TestDefaultUserAgent(t *testing.T) { u, err := url.Parse(fmt.Sprintf("http://%s", ts.Listener.Addr().String())) require.NoError(t, err) - internal.SetVersion("1.2.3") - t.Run("default-user-agent", func(t *testing.T) { ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - require.Equal(t, "Telegraf/1.2.3", r.Header.Get("User-Agent")) + require.Equal(t, internal.ProductToken(), r.Header.Get("User-Agent")) w.WriteHeader(http.StatusOK) }) diff --git a/plugins/outputs/influxdb/http.go b/plugins/outputs/influxdb/http.go index 5f25572d3..6656a8ee6 100644 --- a/plugins/outputs/influxdb/http.go +++ b/plugins/outputs/influxdb/http.go @@ -129,7 +129,7 @@ func NewHTTPClient(config HTTPConfig) (*httpClient, error) { userAgent := config.UserAgent if userAgent == "" { - userAgent = "Telegraf/" + internal.Version() + userAgent = internal.ProductToken() } if config.Headers == nil { diff --git a/plugins/outputs/influxdb/http_test.go b/plugins/outputs/influxdb/http_test.go index a59fadaf4..4b323c327 100644 --- a/plugins/outputs/influxdb/http_test.go +++ b/plugins/outputs/influxdb/http_test.go @@ -248,8 +248,6 @@ func TestHTTP_Write(t *testing.T) { u, err := url.Parse(fmt.Sprintf("http://%s", ts.Listener.Addr().String())) require.NoError(t, err) - internal.SetVersion("1.2.3") - tests := []struct { name string config influxdb.HTTPConfig @@ -310,7 +308,7 @@ func TestHTTP_Write(t *testing.T) { Log: testutil.Logger{}, }, queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { - require.Equal(t, r.Header.Get("User-Agent"), "Telegraf/1.2.3") + require.Equal(t, internal.ProductToken(), r.Header.Get("User-Agent")) w.WriteHeader(http.StatusNoContent) }, }, diff --git a/plugins/outputs/influxdb_v2/http.go b/plugins/outputs/influxdb_v2/http.go index b94df889b..3034207dd 100644 --- a/plugins/outputs/influxdb_v2/http.go +++ b/plugins/outputs/influxdb_v2/http.go @@ -84,7 +84,7 @@ func NewHTTPClient(config *HTTPConfig) (*httpClient, error) { userAgent := config.UserAgent if userAgent == "" { - userAgent = "Telegraf/" + internal.Version() + userAgent = internal.ProductToken() } var headers = make(map[string]string, len(config.Headers)+2) From 050ed9e61e9d20531700435adc72ee15efa26c7b Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Tue, 21 Apr 2020 13:33:47 -0400 Subject: [PATCH 1694/1815] fix issue with closing flush signal channel (#7384) --- agent/agent.go | 1 + agent/{agent_notwindows.go => agent_posix.go} | 3 +++ agent/agent_windows.go | 6 +++++- 3 files changed, 9 insertions(+), 1 deletion(-) rename agent/{agent_notwindows.go => agent_posix.go} (78%) diff --git a/agent/agent.go b/agent/agent.go index 97c6b01c8..863309f28 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -556,6 +556,7 @@ func (a *Agent) flushLoop( // watch for flush requests flushRequested := make(chan os.Signal, 1) watchForFlushSignal(flushRequested) + defer stopListeningForFlushSignal(flushRequested) // align to round interval if a.Config.Agent.RoundInterval { diff --git a/agent/agent_notwindows.go b/agent/agent_posix.go similarity index 78% rename from agent/agent_notwindows.go rename to agent/agent_posix.go index 4d1f496a9..09552cac0 100644 --- a/agent/agent_notwindows.go +++ b/agent/agent_posix.go @@ -12,5 +12,8 @@ const flushSignal = syscall.SIGUSR1 func watchForFlushSignal(flushRequested chan os.Signal) { signal.Notify(flushRequested, flushSignal) +} + +func stopListeningForFlushSignal(flushRequested chan os.Signal) { defer signal.Stop(flushRequested) } diff --git a/agent/agent_windows.go b/agent/agent_windows.go index 7e7a0cabd..94ed9d006 100644 --- a/agent/agent_windows.go +++ b/agent/agent_windows.go @@ -5,5 +5,9 @@ package agent import "os" func watchForFlushSignal(flushRequested chan os.Signal) { - // not implemented + // not supported +} + +func stopListeningForFlushSignal(flushRequested chan os.Signal) { + // not supported } From 1006c65587238e5fa1e9e8fbff99cb221bff0f5f Mon Sep 17 00:00:00 2001 From: Sven Rebhan <36194019+srebhan@users.noreply.github.com> Date: Tue, 21 Apr 2020 20:21:27 +0200 Subject: [PATCH 1695/1815] Add retry when slave is busy to modbus input (#7271) --- plugins/inputs/modbus/README.md | 10 ++- plugins/inputs/modbus/modbus.go | 36 ++++++-- plugins/inputs/modbus/modbus_test.go | 128 +++++++++++++++++++++++++++ 3 files changed, 168 insertions(+), 6 deletions(-) diff --git a/plugins/inputs/modbus/README.md b/plugins/inputs/modbus/README.md index 98661e84d..cac552c81 100644 --- a/plugins/inputs/modbus/README.md +++ b/plugins/inputs/modbus/README.md @@ -20,6 +20,14 @@ The Modbus plugin collects Discrete Inputs, Coils, Input Registers and Holding R ## Timeout for each request timeout = "1s" + ## Maximum number of retries and the time to wait between retries + ## when a slave-device is busy. + ## NOTE: Please make sure that the overall retry time (#retries * wait time) + ## is always smaller than the query interval as otherwise you will get + ## an "did not complete within its interval" warning. + #busy_retries = 0 + #busy_retries_wait = "100ms" + # TCP - connect via Modbus/TCP controller = "tcp://localhost:502" @@ -53,7 +61,7 @@ The Modbus plugin collects Discrete Inputs, Coils, Input Registers and Holding R ## Analog Variables, Input Registers and Holding Registers ## measurement - the (optional) measurement name, defaults to "modbus" - ## name - the variable name + ## name - the variable name ## byte_order - the ordering of bytes ## |---AB, ABCD - Big Endian ## |---BA, DCBA - Little Endian diff --git a/plugins/inputs/modbus/modbus.go b/plugins/inputs/modbus/modbus.go index 6775465b6..6dc21afb0 100644 --- a/plugins/inputs/modbus/modbus.go +++ b/plugins/inputs/modbus/modbus.go @@ -3,6 +3,7 @@ package modbus import ( "encoding/binary" "fmt" + "log" "math" "net" "net/url" @@ -27,6 +28,8 @@ type Modbus struct { StopBits int `toml:"stop_bits"` SlaveID int `toml:"slave_id"` Timeout internal.Duration `toml:"timeout"` + Retries int `toml:"busy_retries"` + RetriesWaitTime internal.Duration `toml:"busy_retries_wait"` DiscreteInputs []fieldContainer `toml:"discrete_inputs"` Coils []fieldContainer `toml:"coils"` HoldingRegisters []fieldContainer `toml:"holding_registers"` @@ -84,6 +87,14 @@ const sampleConfig = ` ## Timeout for each request timeout = "1s" + ## Maximum number of retries and the time to wait between retries + ## when a slave-device is busy. + ## NOTE: Please make sure that the overall retry time (#retries * wait time) + ## is always smaller than the query interval as otherwise you will get + ## an "did not complete within its interval" warning. + #busy_retries = 0 + #busy_retries_wait = "100ms" + # TCP - connect via Modbus/TCP controller = "tcp://localhost:502" @@ -159,6 +170,10 @@ func (m *Modbus) Init() error { return fmt.Errorf("device name is empty") } + if m.Retries < 0 { + return fmt.Errorf("retries cannot be negative") + } + err := m.InitRegister(m.DiscreteInputs, cDiscreteInputs) if err != nil { return err @@ -642,11 +657,22 @@ func (m *Modbus) Gather(acc telegraf.Accumulator) error { } timestamp := time.Now() - err := m.getFields() - if err != nil { - disconnect(m) - m.isConnected = false - return err + for retry := 0; retry <= m.Retries; retry += 1 { + timestamp = time.Now() + err := m.getFields() + if err != nil { + mberr, ok := err.(*mb.ModbusError) + if ok && mberr.ExceptionCode == mb.ExceptionCodeServerDeviceBusy && retry < m.Retries { + log.Printf("I! [inputs.modbus] device busy! Retrying %d more time(s)...", m.Retries-retry) + time.Sleep(m.RetriesWaitTime.Duration) + continue + } + disconnect(m) + m.isConnected = false + return err + } + // Reading was successful, leave the retry loop + break } grouper := metric.NewSeriesGrouper() diff --git a/plugins/inputs/modbus/modbus_test.go b/plugins/inputs/modbus/modbus_test.go index 9a8c46382..97265769d 100644 --- a/plugins/inputs/modbus/modbus_test.go +++ b/plugins/inputs/modbus/modbus_test.go @@ -494,3 +494,131 @@ func TestHoldingRegisters(t *testing.T) { }) } } + +func TestRetrySuccessful(t *testing.T) { + retries := 0 + maxretries := 2 + value := 1 + + serv := mbserver.NewServer() + err := serv.ListenTCP("localhost:1502") + assert.NoError(t, err) + defer serv.Close() + + // Make read on coil-registers fail for some trials by making the device + // to appear busy + serv.RegisterFunctionHandler(1, + func(s *mbserver.Server, frame mbserver.Framer) ([]byte, *mbserver.Exception) { + data := make([]byte, 2) + data[0] = byte(1) + data[1] = byte(value) + + except := &mbserver.SlaveDeviceBusy + if retries >= maxretries { + except = &mbserver.Success + } + retries += 1 + + return data, except + }) + + t.Run("retry_success", func(t *testing.T) { + modbus := Modbus{ + Name: "TestRetry", + Controller: "tcp://localhost:1502", + SlaveID: 1, + Retries: maxretries, + Coils: []fieldContainer{ + { + Name: "retry_success", + Address: []uint16{0}, + }, + }, + } + + err = modbus.Init() + assert.NoError(t, err) + var acc testutil.Accumulator + err = modbus.Gather(&acc) + assert.NoError(t, err) + assert.NotEmpty(t, modbus.registers) + + for _, coil := range modbus.registers { + assert.Equal(t, uint16(value), coil.Fields[0].value) + } + }) +} + +func TestRetryFail(t *testing.T) { + maxretries := 2 + + serv := mbserver.NewServer() + err := serv.ListenTCP("localhost:1502") + assert.NoError(t, err) + defer serv.Close() + + // Make the read on coils fail with busy + serv.RegisterFunctionHandler(1, + func(s *mbserver.Server, frame mbserver.Framer) ([]byte, *mbserver.Exception) { + data := make([]byte, 2) + data[0] = byte(1) + data[1] = byte(0) + + return data, &mbserver.SlaveDeviceBusy + }) + + t.Run("retry_fail", func(t *testing.T) { + modbus := Modbus{ + Name: "TestRetryFail", + Controller: "tcp://localhost:1502", + SlaveID: 1, + Retries: maxretries, + Coils: []fieldContainer{ + { + Name: "retry_fail", + Address: []uint16{0}, + }, + }, + } + + err = modbus.Init() + assert.NoError(t, err) + var acc testutil.Accumulator + err = modbus.Gather(&acc) + assert.Error(t, err) + }) + + // Make the read on coils fail with illegal function preventing retry + counter := 0 + serv.RegisterFunctionHandler(1, + func(s *mbserver.Server, frame mbserver.Framer) ([]byte, *mbserver.Exception) { + counter += 1 + data := make([]byte, 2) + data[0] = byte(1) + data[1] = byte(0) + + return data, &mbserver.IllegalFunction + }) + + t.Run("retry_fail", func(t *testing.T) { + modbus := Modbus{ + Name: "TestRetryFail", + Controller: "tcp://localhost:1502", + SlaveID: 1, + Retries: maxretries, + Coils: []fieldContainer{ + { + Name: "retry_fail", + Address: []uint16{0}, + }, + }, + } + + err = modbus.Init() + assert.NoError(t, err) + var acc testutil.Accumulator + err = modbus.Gather(&acc) + assert.Error(t, err) + assert.Equal(t, counter, 1) + }) +} From c7797cc3c40328807b59b7f820444166dc6768b3 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 21 Apr 2020 11:23:19 -0700 Subject: [PATCH 1696/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3bd63f4fc..71fd59392 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -18,6 +18,7 @@ - [#7214](https://github.com/influxdata/telegraf/pull/7214): Add VolumeSpace query for sqlserver input with metric_version 2. - [#7304](https://github.com/influxdata/telegraf/pull/7304): Add reading bearer token from a file to http input. - [#7366](https://github.com/influxdata/telegraf/pull/7366): add support for SIGUSR1 to trigger flush. +- [#7271](https://github.com/influxdata/telegraf/pull/7271): Add retry when slave is busy to modbus input. #### Bugfixes From d1f109b316a656e0384dd5d5bfa73103ac260c6c Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 21 Apr 2020 11:27:35 -0700 Subject: [PATCH 1697/1815] Update modbus readme --- plugins/inputs/modbus/README.md | 150 +++++++++++++++++--------------- plugins/inputs/modbus/modbus.go | 135 ++++++++++++++-------------- 2 files changed, 144 insertions(+), 141 deletions(-) diff --git a/plugins/inputs/modbus/README.md b/plugins/inputs/modbus/README.md index cac552c81..629c79027 100644 --- a/plugins/inputs/modbus/README.md +++ b/plugins/inputs/modbus/README.md @@ -1,91 +1,97 @@ -# Telegraf Input Plugin: Modbus +# Modbus Input Plugin -The Modbus plugin collects Discrete Inputs, Coils, Input Registers and Holding Registers via Modbus TCP or Modbus RTU/ASCII +The Modbus plugin collects Discrete Inputs, Coils, Input Registers and Holding +Registers via Modbus TCP or Modbus RTU/ASCII. -### Configuration: +### Configuration ```toml - ## Connection Configuration - ## - ## The module supports connections to PLCs via MODBUS/TCP or - ## via serial line communication in binary (RTU) or readable (ASCII) encoding - ## - ## Device name - name = "Device" +[[inputs.modbus]] + ## Connection Configuration + ## + ## The module supports connections to PLCs via MODBUS/TCP or + ## via serial line communication in binary (RTU) or readable (ASCII) encoding + ## + ## Device name + name = "Device" - ## Slave ID - addresses a MODBUS device on the bus - ## Range: 0 - 255 [0 = broadcast; 248 - 255 = reserved] - slave_id = 1 + ## Slave ID - addresses a MODBUS device on the bus + ## Range: 0 - 255 [0 = broadcast; 248 - 255 = reserved] + slave_id = 1 - ## Timeout for each request - timeout = "1s" + ## Timeout for each request + timeout = "1s" - ## Maximum number of retries and the time to wait between retries - ## when a slave-device is busy. - ## NOTE: Please make sure that the overall retry time (#retries * wait time) - ## is always smaller than the query interval as otherwise you will get - ## an "did not complete within its interval" warning. - #busy_retries = 0 - #busy_retries_wait = "100ms" + ## Maximum number of retries and the time to wait between retries + ## when a slave-device is busy. + # busy_retries = 0 + # busy_retries_wait = "100ms" - # TCP - connect via Modbus/TCP - controller = "tcp://localhost:502" + # TCP - connect via Modbus/TCP + controller = "tcp://localhost:502" - # Serial (RS485; RS232) - #controller = "file:///dev/ttyUSB0" - #baud_rate = 9600 - #data_bits = 8 - #parity = "N" - #stop_bits = 1 - #transmission_mode = "RTU" + ## Serial (RS485; RS232) + # controller = "file:///dev/ttyUSB0" + # baud_rate = 9600 + # data_bits = 8 + # parity = "N" + # stop_bits = 1 + # transmission_mode = "RTU" - ## Measurements - ## + ## Measurements + ## - ## Digital Variables, Discrete Inputs and Coils - ## name - the variable name - ## address - variable address + ## Digital Variables, Discrete Inputs and Coils + ## name - the variable name + ## address - variable address - discrete_inputs = [ - { name = "Start", address = [0]}, - { name = "Stop", address = [1]}, - { name = "Reset", address = [2]}, - { name = "EmergencyStop", address = [3]}, - ] - coils = [ - { name = "Motor1-Run", address = [0]}, - { name = "Motor1-Jog", address = [1]}, - { name = "Motor1-Stop", address = [2]}, - ] + discrete_inputs = [ + { name = "Start", address = [0]}, + { name = "Stop", address = [1]}, + { name = "Reset", address = [2]}, + { name = "EmergencyStop", address = [3]}, + ] + coils = [ + { name = "Motor1-Run", address = [0]}, + { name = "Motor1-Jog", address = [1]}, + { name = "Motor1-Stop", address = [2]}, + ] - ## Analog Variables, Input Registers and Holding Registers - ## measurement - the (optional) measurement name, defaults to "modbus" - ## name - the variable name - ## byte_order - the ordering of bytes - ## |---AB, ABCD - Big Endian - ## |---BA, DCBA - Little Endian - ## |---BADC - Mid-Big Endian - ## |---CDAB - Mid-Little Endian - ## data_type - INT16, UINT16, INT32, UINT32, INT64, UINT64, FLOAT32, FLOAT32-IEEE (the IEEE 754 binary representation) - ## scale - the final numeric variable representation - ## address - variable address + ## Analog Variables, Input Registers and Holding Registers + ## measurement - the (optional) measurement name, defaults to "modbus" + ## name - the variable name + ## byte_order - the ordering of bytes + ## |---AB, ABCD - Big Endian + ## |---BA, DCBA - Little Endian + ## |---BADC - Mid-Big Endian + ## |---CDAB - Mid-Little Endian + ## data_type - INT16, UINT16, INT32, UINT32, INT64, UINT64, FLOAT32, FLOAT32-IEEE (the IEEE 754 binary representation) + ## scale - the final numeric variable representation + ## address - variable address - holding_registers = [ - { name = "PowerFactor", byte_order = "AB", data_type = "FLOAT32", scale=0.01, address = [8]}, - { name = "Voltage", byte_order = "AB", data_type = "FLOAT32", scale=0.1, address = [0]}, - { name = "Energy", byte_order = "ABCD", data_type = "FLOAT32", scale=0.001, address = [5,6]}, - { name = "Current", byte_order = "ABCD", data_type = "FLOAT32", scale=0.001, address = [1,2]}, - { name = "Frequency", byte_order = "AB", data_type = "FLOAT32", scale=0.1, address = [7]}, - { name = "Power", byte_order = "ABCD", data_type = "FLOAT32", scale=0.1, address = [3,4]}, - ] - input_registers = [ - { name = "TankLevel", byte_order = "AB", data_type = "INT16", scale=1.0, address = [0]}, - { name = "TankPH", byte_order = "AB", data_type = "INT16", scale=1.0, address = [1]}, - { name = "Pump1-Speed", byte_order = "ABCD", data_type = "INT32", scale=1.0, address = [3,4]}, - ] + holding_registers = [ + { name = "PowerFactor", byte_order = "AB", data_type = "FLOAT32", scale=0.01, address = [8]}, + { name = "Voltage", byte_order = "AB", data_type = "FLOAT32", scale=0.1, address = [0]}, + { name = "Energy", byte_order = "ABCD", data_type = "FLOAT32", scale=0.001, address = [5,6]}, + { name = "Current", byte_order = "ABCD", data_type = "FLOAT32", scale=0.001, address = [1,2]}, + { name = "Frequency", byte_order = "AB", data_type = "FLOAT32", scale=0.1, address = [7]}, + { name = "Power", byte_order = "ABCD", data_type = "FLOAT32", scale=0.1, address = [3,4]}, + ] + input_registers = [ + { name = "TankLevel", byte_order = "AB", data_type = "INT16", scale=1.0, address = [0]}, + { name = "TankPH", byte_order = "AB", data_type = "INT16", scale=1.0, address = [1]}, + { name = "Pump1-Speed", byte_order = "ABCD", data_type = "INT32", scale=1.0, address = [3,4]}, + ] ``` -### Example Output: + +### Metrics + +Metric are custom and configured using the `discrete_inputs`, `coils`, +`holding_register` and `input_registers` options. + + +### Example Output ``` $ ./telegraf -config telegraf.conf -input-filter modbus -test diff --git a/plugins/inputs/modbus/modbus.go b/plugins/inputs/modbus/modbus.go index 6dc21afb0..c1ff56bab 100644 --- a/plugins/inputs/modbus/modbus.go +++ b/plugins/inputs/modbus/modbus.go @@ -72,86 +72,83 @@ const ( const description = `Retrieve data from MODBUS slave devices` const sampleConfig = ` - ## Connection Configuration - ## - ## The plugin supports connections to PLCs via MODBUS/TCP or - ## via serial line communication in binary (RTU) or readable (ASCII) encoding - ## - ## Device name - name = "Device" + ## Connection Configuration + ## + ## The plugin supports connections to PLCs via MODBUS/TCP or + ## via serial line communication in binary (RTU) or readable (ASCII) encoding + ## + ## Device name + name = "Device" - ## Slave ID - addresses a MODBUS device on the bus - ## Range: 0 - 255 [0 = broadcast; 248 - 255 = reserved] - slave_id = 1 + ## Slave ID - addresses a MODBUS device on the bus + ## Range: 0 - 255 [0 = broadcast; 248 - 255 = reserved] + slave_id = 1 - ## Timeout for each request - timeout = "1s" + ## Timeout for each request + timeout = "1s" - ## Maximum number of retries and the time to wait between retries - ## when a slave-device is busy. - ## NOTE: Please make sure that the overall retry time (#retries * wait time) - ## is always smaller than the query interval as otherwise you will get - ## an "did not complete within its interval" warning. - #busy_retries = 0 - #busy_retries_wait = "100ms" + ## Maximum number of retries and the time to wait between retries + ## when a slave-device is busy. + # busy_retries = 0 + # busy_retries_wait = "100ms" - # TCP - connect via Modbus/TCP - controller = "tcp://localhost:502" + # TCP - connect via Modbus/TCP + controller = "tcp://localhost:502" - # Serial (RS485; RS232) - #controller = "file:///dev/ttyUSB0" - #baud_rate = 9600 - #data_bits = 8 - #parity = "N" - #stop_bits = 1 - #transmission_mode = "RTU" + ## Serial (RS485; RS232) + # controller = "file:///dev/ttyUSB0" + # baud_rate = 9600 + # data_bits = 8 + # parity = "N" + # stop_bits = 1 + # transmission_mode = "RTU" - ## Measurements - ## + ## Measurements + ## - ## Digital Variables, Discrete Inputs and Coils - ## measurement - the (optional) measurement name, defaults to "modbus" - ## name - the variable name - ## address - variable address + ## Digital Variables, Discrete Inputs and Coils + ## measurement - the (optional) measurement name, defaults to "modbus" + ## name - the variable name + ## address - variable address - discrete_inputs = [ - { name = "start", address = [0]}, - { name = "stop", address = [1]}, - { name = "reset", address = [2]}, - { name = "emergency_stop", address = [3]}, - ] - coils = [ - { name = "motor1_run", address = [0]}, - { name = "motor1_jog", address = [1]}, - { name = "motor1_stop", address = [2]}, - ] + discrete_inputs = [ + { name = "start", address = [0]}, + { name = "stop", address = [1]}, + { name = "reset", address = [2]}, + { name = "emergency_stop", address = [3]}, + ] + coils = [ + { name = "motor1_run", address = [0]}, + { name = "motor1_jog", address = [1]}, + { name = "motor1_stop", address = [2]}, + ] - ## Analog Variables, Input Registers and Holding Registers - ## measurement - the (optional) measurement name, defaults to "modbus" - ## name - the variable name - ## byte_order - the ordering of bytes - ## |---AB, ABCD - Big Endian - ## |---BA, DCBA - Little Endian - ## |---BADC - Mid-Big Endian - ## |---CDAB - Mid-Little Endian - ## data_type - INT16, UINT16, INT32, UINT32, INT64, UINT64, FLOAT32, FLOAT32-IEEE (the IEEE 754 binary representation) - ## scale - the final numeric variable representation - ## address - variable address + ## Analog Variables, Input Registers and Holding Registers + ## measurement - the (optional) measurement name, defaults to "modbus" + ## name - the variable name + ## byte_order - the ordering of bytes + ## |---AB, ABCD - Big Endian + ## |---BA, DCBA - Little Endian + ## |---BADC - Mid-Big Endian + ## |---CDAB - Mid-Little Endian + ## data_type - INT16, UINT16, INT32, UINT32, INT64, UINT64, FLOAT32, FLOAT32-IEEE (the IEEE 754 binary representation) + ## scale - the final numeric variable representation + ## address - variable address - holding_registers = [ - { name = "power_factor", byte_order = "AB", data_type = "FLOAT32", scale=0.01, address = [8]}, - { name = "voltage", byte_order = "AB", data_type = "FLOAT32", scale=0.1, address = [0]}, - { name = "energy", byte_order = "ABCD", data_type = "FLOAT32", scale=0.001, address = [5,6]}, - { name = "current", byte_order = "ABCD", data_type = "FLOAT32", scale=0.001, address = [1,2]}, - { name = "frequency", byte_order = "AB", data_type = "FLOAT32", scale=0.1, address = [7]}, - { name = "power", byte_order = "ABCD", data_type = "FLOAT32", scale=0.1, address = [3,4]}, - ] - input_registers = [ - { name = "tank_level", byte_order = "AB", data_type = "INT16", scale=1.0, address = [0]}, - { name = "tank_ph", byte_order = "AB", data_type = "INT16", scale=1.0, address = [1]}, - { name = "pump1_speed", byte_order = "ABCD", data_type = "INT32", scale=1.0, address = [3,4]}, - ] + holding_registers = [ + { name = "power_factor", byte_order = "AB", data_type = "FLOAT32", scale=0.01, address = [8]}, + { name = "voltage", byte_order = "AB", data_type = "FLOAT32", scale=0.1, address = [0]}, + { name = "energy", byte_order = "ABCD", data_type = "FLOAT32", scale=0.001, address = [5,6]}, + { name = "current", byte_order = "ABCD", data_type = "FLOAT32", scale=0.001, address = [1,2]}, + { name = "frequency", byte_order = "AB", data_type = "FLOAT32", scale=0.1, address = [7]}, + { name = "power", byte_order = "ABCD", data_type = "FLOAT32", scale=0.1, address = [3,4]}, + ] + input_registers = [ + { name = "tank_level", byte_order = "AB", data_type = "INT16", scale=1.0, address = [0]}, + { name = "tank_ph", byte_order = "AB", data_type = "INT16", scale=1.0, address = [1]}, + { name = "pump1_speed", byte_order = "ABCD", data_type = "INT32", scale=1.0, address = [3,4]}, + ] ` // SampleConfig returns a basic configuration for the plugin From d0db0e8f0a67cf071c0a2a62a8b4220e73b3e602 Mon Sep 17 00:00:00 2001 From: Pontus Rydin Date: Tue, 21 Apr 2020 14:30:29 -0400 Subject: [PATCH 1698/1815] Fix vSphere 6.7 missing data issue (#7233) --- plugins/inputs/vsphere/endpoint.go | 296 +++++++++++++------------ plugins/inputs/vsphere/tscache.go | 18 +- plugins/inputs/vsphere/vsphere.go | 2 +- plugins/inputs/vsphere/vsphere_test.go | 6 +- 4 files changed, 175 insertions(+), 147 deletions(-) diff --git a/plugins/inputs/vsphere/endpoint.go b/plugins/inputs/vsphere/endpoint.go index c049e495f..a7d4db5ba 100644 --- a/plugins/inputs/vsphere/endpoint.go +++ b/plugins/inputs/vsphere/endpoint.go @@ -4,7 +4,6 @@ import ( "context" "errors" "fmt" - "log" "math" "math/rand" "net/url" @@ -32,10 +31,18 @@ var isIPv6 = regexp.MustCompile("^(?:[A-Fa-f0-9]{0,4}:){1,7}[A-Fa-f0-9]{1,4}$") const metricLookback = 3 // Number of time periods to look back at for non-realtime metrics -const maxSampleConst = 10 // Absolute maximim number of samples regardless of period +const rtMetricLookback = 3 // Number of time periods to look back at for realtime metrics + +const maxSampleConst = 10 // Absolute maximum number of samples regardless of period const maxMetadataSamples = 100 // Number of resources to sample for metric metadata +const hwMarkTTL = time.Duration(4 * time.Hour) + +type queryChunk []types.PerfQuerySpec + +type queryJob func(queryChunk) + // Endpoint is a high-level representation of a connected vCenter endpoint. It is backed by the lower // level Client type. type Endpoint struct { @@ -52,6 +59,9 @@ type Endpoint struct { customFields map[int32]string customAttrFilter filter.Filter customAttrEnabled bool + metricNameLookup map[int32]string + metricNameMux sync.RWMutex + log telegraf.Logger } type resourceKind struct { @@ -107,16 +117,17 @@ func (e *Endpoint) getParent(obj *objectRef, res *resourceKind) (*objectRef, boo // NewEndpoint returns a new connection to a vCenter based on the URL and configuration passed // as parameters. -func NewEndpoint(ctx context.Context, parent *VSphere, url *url.URL) (*Endpoint, error) { +func NewEndpoint(ctx context.Context, parent *VSphere, url *url.URL, log telegraf.Logger) (*Endpoint, error) { e := Endpoint{ URL: url, Parent: parent, - hwMarks: NewTSCache(1 * time.Hour), + hwMarks: NewTSCache(hwMarkTTL), lun2ds: make(map[string]string), initialized: false, clientFactory: NewClientFactory(ctx, url, parent), customAttrFilter: newFilterOrPanic(parent.CustomAttributeInclude, parent.CustomAttributeExclude), customAttrEnabled: anythingEnabled(parent.CustomAttributeExclude), + log: log, } e.resourceKinds = map[string]*resourceKind{ @@ -254,10 +265,10 @@ func (e *Endpoint) startDiscovery(ctx context.Context) { case <-e.discoveryTicker.C: err := e.discover(ctx) if err != nil && err != context.Canceled { - e.Parent.Log.Errorf("Discovery for %s: %s", e.URL.Host, err.Error()) + e.log.Errorf("Discovery for %s: %s", e.URL.Host, err.Error()) } case <-ctx.Done(): - e.Parent.Log.Debugf("Exiting discovery goroutine for %s", e.URL.Host) + e.log.Debugf("Exiting discovery goroutine for %s", e.URL.Host) e.discoveryTicker.Stop() return } @@ -268,7 +279,7 @@ func (e *Endpoint) startDiscovery(ctx context.Context) { func (e *Endpoint) initalDiscovery(ctx context.Context) { err := e.discover(ctx) if err != nil && err != context.Canceled { - e.Parent.Log.Errorf("Discovery for %s: %s", e.URL.Host, err.Error()) + e.log.Errorf("Discovery for %s: %s", e.URL.Host, err.Error()) } e.startDiscovery(ctx) } @@ -283,7 +294,7 @@ func (e *Endpoint) init(ctx context.Context) error { if e.customAttrEnabled { fields, err := client.GetCustomFields(ctx) if err != nil { - e.Parent.Log.Warn("Could not load custom field metadata") + e.log.Warn("Could not load custom field metadata") } else { e.customFields = fields } @@ -297,21 +308,29 @@ func (e *Endpoint) init(ctx context.Context) error { return nil } -func (e *Endpoint) getMetricNameMap(ctx context.Context) (map[int32]string, error) { +func (e *Endpoint) getMetricNameForId(id int32) string { + e.metricNameMux.RLock() + defer e.metricNameMux.RUnlock() + return e.metricNameLookup[id] +} + +func (e *Endpoint) reloadMetricNameMap(ctx context.Context) error { + e.metricNameMux.Lock() + defer e.metricNameMux.Unlock() client, err := e.clientFactory.GetClient(ctx) if err != nil { - return nil, err + return err } mn, err := client.CounterInfoByName(ctx) if err != nil { - return nil, err + return err } - names := make(map[int32]string) + e.metricNameLookup = make(map[int32]string) for name, m := range mn { - names[m.Key] = name + e.metricNameLookup[m.Key] = name } - return names, nil + return nil } func (e *Endpoint) getMetadata(ctx context.Context, obj *objectRef, sampling int32) (performance.MetricList, error) { @@ -377,7 +396,7 @@ func (e *Endpoint) discover(ctx context.Context) error { return ctx.Err() } - metricNames, err := e.getMetricNameMap(ctx) + err := e.reloadMetricNameMap(ctx) if err != nil { return err } @@ -389,7 +408,7 @@ func (e *Endpoint) discover(ctx context.Context) error { return err } - e.Parent.Log.Debugf("Discover new objects for %s", e.URL.Host) + e.log.Debugf("Discover new objects for %s", e.URL.Host) dcNameCache := make(map[string]string) numRes := int64(0) @@ -397,51 +416,47 @@ func (e *Endpoint) discover(ctx context.Context) error { // Populate resource objects, and endpoint instance info. newObjects := make(map[string]objectMap) for k, res := range e.resourceKinds { - err := func() error { - e.Parent.Log.Debugf("Discovering resources for %s", res.name) - // Need to do this for all resource types even if they are not enabled - if res.enabled || k != "vm" { - rf := ResourceFilter{ - finder: &Finder{client}, - resType: res.vcName, - paths: res.paths, - excludePaths: res.excludePaths, - } + e.log.Debugf("Discovering resources for %s", res.name) + // Need to do this for all resource types even if they are not enabled + if res.enabled || k != "vm" { + rf := ResourceFilter{ + finder: &Finder{client}, + resType: res.vcName, + paths: res.paths, + excludePaths: res.excludePaths} - ctx1, cancel1 := context.WithTimeout(ctx, e.Parent.Timeout.Duration) - defer cancel1() - objects, err := res.getObjects(ctx1, e, &rf) - if err != nil { - return err - } + ctx1, cancel1 := context.WithTimeout(ctx, e.Parent.Timeout.Duration) + objects, err := res.getObjects(ctx1, e, &rf) + cancel1() + if err != nil { + return err + } - // Fill in datacenter names where available (no need to do it for Datacenters) - if res.name != "Datacenter" { - for k, obj := range objects { - if obj.parentRef != nil { - obj.dcname = e.getDatacenterName(ctx, client, dcNameCache, *obj.parentRef) - objects[k] = obj - } + // Fill in datacenter names where available (no need to do it for Datacenters) + if res.name != "Datacenter" { + for k, obj := range objects { + if obj.parentRef != nil { + obj.dcname = e.getDatacenterName(ctx, client, dcNameCache, *obj.parentRef) + objects[k] = obj } } + } - // No need to collect metric metadata if resource type is not enabled - if res.enabled { - if res.simple { - e.simpleMetadataSelect(ctx, client, res) - } else { - e.complexMetadataSelect(ctx, res, objects, metricNames) - } + // No need to collect metric metadata if resource type is not enabled + if res.enabled { + if res.simple { + e.simpleMetadataSelect(ctx, client, res) + } else { + e.complexMetadataSelect(ctx, res, objects) } newObjects[k] = objects SendInternalCounterWithTags("discovered_objects", e.URL.Host, map[string]string{"type": res.name}, int64(len(objects))) numRes += int64(len(objects)) } - return nil - }() + } if err != nil { - return err + e.log.Error(err) } } @@ -461,7 +476,7 @@ func (e *Endpoint) discover(ctx context.Context) error { if e.customAttrEnabled { fields, err = client.GetCustomFields(ctx) if err != nil { - e.Parent.Log.Warn("Could not load custom field metadata") + e.log.Warn("Could not load custom field metadata") fields = nil } } @@ -485,10 +500,10 @@ func (e *Endpoint) discover(ctx context.Context) error { } func (e *Endpoint) simpleMetadataSelect(ctx context.Context, client *Client, res *resourceKind) { - e.Parent.Log.Debugf("Using fast metric metadata selection for %s", res.name) + e.log.Debugf("Using fast metric metadata selection for %s", res.name) m, err := client.CounterInfoByName(ctx) if err != nil { - e.Parent.Log.Errorf("Getting metric metadata. Discovery will be incomplete. Error: %s", err.Error()) + e.log.Errorf("Getting metric metadata. Discovery will be incomplete. Error: %s", err.Error()) return } res.metrics = make(performance.MetricList, 0, len(res.include)) @@ -504,12 +519,12 @@ func (e *Endpoint) simpleMetadataSelect(ctx context.Context, client *Client, res } res.metrics = append(res.metrics, cnt) } else { - e.Parent.Log.Warnf("Metric name %s is unknown. Will not be collected", s) + e.log.Warnf("Metric name %s is unknown. Will not be collected", s) } } } -func (e *Endpoint) complexMetadataSelect(ctx context.Context, res *resourceKind, objects objectMap, metricNames map[int32]string) { +func (e *Endpoint) complexMetadataSelect(ctx context.Context, res *resourceKind, objects objectMap) { // We're only going to get metadata from maxMetadataSamples resources. If we have // more resources than that, we pick maxMetadataSamples samples at random. sampledObjects := make([]*objectRef, len(objects)) @@ -537,7 +552,7 @@ func (e *Endpoint) complexMetadataSelect(ctx context.Context, res *resourceKind, te.Run(ctx, func() { metrics, err := e.getMetadata(ctx, obj, res.sampling) if err != nil { - e.Parent.Log.Errorf("Getting metric metadata. Discovery will be incomplete. Error: %s", err.Error()) + e.log.Errorf("Getting metric metadata. Discovery will be incomplete. Error: %s", err.Error()) } mMap := make(map[string]types.PerfMetricId) for _, m := range metrics { @@ -546,11 +561,11 @@ func (e *Endpoint) complexMetadataSelect(ctx context.Context, res *resourceKind, } else { m.Instance = "" } - if res.filters.Match(metricNames[m.CounterId]) { + if res.filters.Match(e.getMetricNameForId(m.CounterId)) { mMap[strconv.Itoa(int(m.CounterId))+"|"+m.Instance] = m } } - e.Parent.Log.Debugf("Found %d metrics for %s", len(mMap), obj.name) + e.log.Debugf("Found %d metrics for %s", len(mMap), obj.name) instInfoMux.Lock() defer instInfoMux.Unlock() if len(mMap) > len(res.metrics) { @@ -624,12 +639,6 @@ func getClusters(ctx context.Context, e *Endpoint, filter *ResourceFilter) (obje cache[r.Parent.Value] = p } } - m[r.ExtensibleManagedObject.Reference().Value] = &objectRef{ - name: r.Name, - ref: r.ExtensibleManagedObject.Reference(), - parentRef: p, - customValues: e.loadCustomAttributes(&r.ManagedEntity), - } return nil }() if err != nil { @@ -718,6 +727,23 @@ func getVMs(ctx context.Context, e *Endpoint, filter *ResourceFilter) (objectMap guest = cleanGuestID(r.Config.GuestId) uuid = r.Config.Uuid } + cvs := make(map[string]string) + if e.customAttrEnabled { + for _, cv := range r.Summary.CustomValue { + val := cv.(*types.CustomFieldStringValue) + if val.Value == "" { + continue + } + key, ok := e.customFields[val.Key] + if !ok { + e.log.Warnf("Metadata for custom field %d not found. Skipping", val.Key) + continue + } + if e.customAttrFilter.Match(key) { + cvs[key] = val.Value + } + } + } m[r.ExtensibleManagedObject.Reference().Value] = &objectRef{ name: r.Name, ref: r.ExtensibleManagedObject.Reference(), @@ -832,13 +858,13 @@ func (e *Endpoint) Collect(ctx context.Context, acc telegraf.Accumulator) error } // Workaround to make sure pqs is a copy of the loop variable and won't change. -func submitChunkJob(ctx context.Context, te *ThrottledExecutor, job func([]types.PerfQuerySpec), pqs []types.PerfQuerySpec) { +func submitChunkJob(ctx context.Context, te *ThrottledExecutor, job queryJob, pqs queryChunk) { te.Run(ctx, func() { job(pqs) }) } -func (e *Endpoint) chunkify(ctx context.Context, res *resourceKind, now time.Time, latest time.Time, job func([]types.PerfQuerySpec)) { +func (e *Endpoint) chunkify(ctx context.Context, res *resourceKind, now time.Time, latest time.Time, acc telegraf.Accumulator, job queryJob) { te := NewThrottledExecutor(e.Parent.CollectConcurrency) maxMetrics := e.Parent.MaxQueryMetrics if maxMetrics < 1 { @@ -851,54 +877,48 @@ func (e *Endpoint) chunkify(ctx context.Context, res *resourceKind, now time.Tim if res.name == "cluster" && maxMetrics > 10 { maxMetrics = 10 } - pqs := make([]types.PerfQuerySpec, 0, e.Parent.MaxQueryObjects) - metrics := 0 - total := 0 - nRes := 0 - for _, resource := range res.objects { - mr := len(res.metrics) - for mr > 0 { - mc := mr - headroom := maxMetrics - metrics - if !res.realTime && mc > headroom { // Metric query limit only applies to non-realtime metrics - mc = headroom - } - fm := len(res.metrics) - mr - pq := types.PerfQuerySpec{ - Entity: resource.ref, - MaxSample: maxSampleConst, - MetricId: res.metrics[fm : fm+mc], - IntervalId: res.sampling, - Format: "normal", - } - start, ok := e.hwMarks.Get(resource.ref.Value) + pqs := make(queryChunk, 0, e.Parent.MaxQueryObjects) + + for _, object := range res.objects { + timeBuckets := make(map[int64]*types.PerfQuerySpec, 0) + for metricIdx, metric := range res.metrics { + + // Determine time of last successful collection + metricName := e.getMetricNameForId(metric.CounterId) + if metricName == "" { + e.log.Info("Unable to find metric name for id %d. Skipping!", metric.CounterId) + continue + } + start, ok := e.hwMarks.Get(object.ref.Value, metricName) if !ok { - // Look back 3 sampling periods by default start = latest.Add(time.Duration(-res.sampling) * time.Second * (metricLookback - 1)) } - pq.StartTime = &start - pq.EndTime = &now + start = start.Truncate(20 * time.Second) // Truncate to maximum resolution - // Make sure endtime is always after start time. We may occasionally see samples from the future - // returned from vCenter. This is presumably due to time drift between vCenter and EXSi nodes. - if pq.StartTime.After(*pq.EndTime) { - e.Parent.Log.Debugf("Future sample. Res: %s, StartTime: %s, EndTime: %s, Now: %s", pq.Entity, *pq.StartTime, *pq.EndTime, now) - end := start.Add(time.Second) - pq.EndTime = &end + // Create bucket if we don't already have it + bucket, ok := timeBuckets[start.Unix()] + if !ok { + bucket = &types.PerfQuerySpec{ + Entity: object.ref, + MaxSample: maxSampleConst, + MetricId: make([]types.PerfMetricId, 0), + IntervalId: res.sampling, + Format: "normal", + } + bucket.StartTime = &start + bucket.EndTime = &now + timeBuckets[start.Unix()] = bucket } - pqs = append(pqs, pq) - mr -= mc - metrics += mc + // Add this metric to the bucket + bucket.MetricId = append(bucket.MetricId, metric) - // We need to dump the current chunk of metrics for one of two reasons: - // 1) We filled up the metric quota while processing the current resource - // 2) We are at the last resource and have no more data to process. - // 3) The query contains more than 100,000 individual metrics - if mr > 0 || nRes >= e.Parent.MaxQueryObjects || len(pqs) > 100000 { - e.Parent.Log.Debugf("Queueing query: %d objects, %d metrics (%d remaining) of type %s for %s. Processed objects: %d. Total objects %d", - len(pqs), metrics, mr, res.name, e.URL.Host, total+1, len(res.objects)) + // Bucket filled to capacity? (Only applies to non real time) + // OR if we're past the absolute maximum limit + if (!res.realTime && len(bucket.MetricId) >= maxMetrics) || len(bucket.MetricId) > 100000 { + e.log.Debugf("Submitting partial query: %d metrics (%d remaining) of type %s for %s. Total objects %d", + len(bucket.MetricId), len(res.metrics)-metricIdx, res.name, e.URL.Host, len(res.objects)) // Don't send work items if the context has been cancelled. if ctx.Err() == context.Canceled { @@ -906,20 +926,23 @@ func (e *Endpoint) chunkify(ctx context.Context, res *resourceKind, now time.Tim } // Run collection job - submitChunkJob(ctx, te, job, pqs) - pqs = make([]types.PerfQuerySpec, 0, e.Parent.MaxQueryObjects) - metrics = 0 - nRes = 0 + delete(timeBuckets, start.Unix()) + submitChunkJob(ctx, te, job, queryChunk{*bucket}) + } + } + // Handle data in time bucket and submit job if we've reached the maximum number of object. + for _, bucket := range timeBuckets { + pqs = append(pqs, *bucket) + if (!res.realTime && len(pqs) > e.Parent.MaxQueryObjects) || len(pqs) > 100000 { + e.log.Debugf("Submitting final bucket job for %s: %d metrics", res.name, len(bucket.MetricId)) + submitChunkJob(ctx, te, job, pqs) + pqs = make(queryChunk, 0, e.Parent.MaxQueryObjects) } } - total++ - nRes++ } - // Handle final partially filled chunk + // Submit any jobs left in the queue if len(pqs) > 0 { - // Run collection job - e.Parent.Log.Debugf("Queuing query: %d objects, %d metrics (0 remaining) of type %s for %s. Total objects %d (final chunk)", - len(pqs), metrics, res.name, e.URL.Host, len(res.objects)) + e.log.Debugf("Submitting job for %s: %d objects", res.name, len(pqs)) submitChunkJob(ctx, te, job, pqs) } @@ -950,18 +973,18 @@ func (e *Endpoint) collectResource(ctx context.Context, resourceType string, acc if estInterval < s { estInterval = s } - e.Parent.Log.Debugf("Raw interval %s, padded: %s, estimated: %s", rawInterval, paddedInterval, estInterval) + e.log.Debugf("Raw interval %s, padded: %s, estimated: %s", rawInterval, paddedInterval, estInterval) } - e.Parent.Log.Debugf("Interval estimated to %s", estInterval) + e.log.Debugf("Interval estimated to %s", estInterval) res.lastColl = localNow latest := res.latestSample if !latest.IsZero() { elapsed := now.Sub(latest).Seconds() + 5.0 // Allow 5 second jitter. - e.Parent.Log.Debugf("Latest: %s, elapsed: %f, resource: %s", latest, elapsed, resourceType) + e.log.Debugf("Latest: %s, elapsed: %f, resource: %s", latest, elapsed, resourceType) if !res.realTime && elapsed < float64(res.sampling) { // No new data would be available. We're outta here! - e.Parent.Log.Debugf("Sampling period for %s of %d has not elapsed on %s", + e.log.Debugf("Sampling period for %s of %d has not elapsed on %s", resourceType, res.sampling, e.URL.Host) return nil } @@ -972,7 +995,7 @@ func (e *Endpoint) collectResource(ctx context.Context, resourceType string, acc internalTags := map[string]string{"resourcetype": resourceType} sw := NewStopwatchWithTags("gather_duration", e.URL.Host, internalTags) - e.Parent.Log.Debugf("Collecting metrics for %d objects of type %s for %s", + e.log.Debugf("Collecting metrics for %d objects of type %s for %s", len(res.objects), resourceType, e.URL.Host) count := int64(0) @@ -981,9 +1004,10 @@ func (e *Endpoint) collectResource(ctx context.Context, resourceType string, acc latestSample := time.Time{} // Divide workload into chunks and process them concurrently - e.chunkify(ctx, res, now, latest, - func(chunk []types.PerfQuerySpec) { - n, localLatest, err := e.collectChunk(ctx, chunk, res, acc, estInterval) + e.chunkify(ctx, res, now, latest, acc, + func(chunk queryChunk) { + n, localLatest, err := e.collectChunk(ctx, chunk, res, acc, now, estInterval) + e.log.Debugf("CollectChunk for %s returned %d metrics", resourceType, n) if err != nil { acc.AddError(errors.New("while collecting " + res.name + ": " + err.Error())) return @@ -997,7 +1021,7 @@ func (e *Endpoint) collectResource(ctx context.Context, resourceType string, acc } }) - e.Parent.Log.Debugf("Latest sample for %s set to %s", resourceType, latestSample) + e.log.Debugf("Latest sample for %s set to %s", resourceType, latestSample) if !latestSample.IsZero() { res.latestSample = latestSample } @@ -1006,7 +1030,7 @@ func (e *Endpoint) collectResource(ctx context.Context, resourceType string, acc return nil } -func alignSamples(info []types.PerfSampleInfo, values []int64, interval time.Duration) ([]types.PerfSampleInfo, []float64) { +func (e *Endpoint) alignSamples(info []types.PerfSampleInfo, values []int64, interval time.Duration) ([]types.PerfSampleInfo, []float64) { rInfo := make([]types.PerfSampleInfo, 0, len(info)) rValues := make([]float64, 0, len(values)) bi := 1.0 @@ -1015,7 +1039,7 @@ func alignSamples(info []types.PerfSampleInfo, values []int64, interval time.Dur // According to the docs, SampleInfo and Value should have the same length, but we've seen corrupted // data coming back with missing values. Take care of that gracefully! if idx >= len(values) { - log.Printf("D! [inputs.vsphere] len(SampleInfo)>len(Value) %d > %d", len(info), len(values)) + e.log.Debugf("len(SampleInfo)>len(Value) %d > %d during alignment", len(info), len(values)) break } v := float64(values[idx]) @@ -1044,8 +1068,8 @@ func alignSamples(info []types.PerfSampleInfo, values []int64, interval time.Dur return rInfo, rValues } -func (e *Endpoint) collectChunk(ctx context.Context, pqs []types.PerfQuerySpec, res *resourceKind, acc telegraf.Accumulator, interval time.Duration) (int, time.Time, error) { - e.Parent.Log.Debugf("Query for %s has %d QuerySpecs", res.name, len(pqs)) +func (e *Endpoint) collectChunk(ctx context.Context, pqs queryChunk, res *resourceKind, acc telegraf.Accumulator, now time.Time, interval time.Duration) (int, time.Time, error) { + e.log.Debugf("Query for %s has %d QuerySpecs", res.name, len(pqs)) latestSample := time.Time{} count := 0 resourceType := res.name @@ -1066,14 +1090,14 @@ func (e *Endpoint) collectChunk(ctx context.Context, pqs []types.PerfQuerySpec, return count, latestSample, err } - e.Parent.Log.Debugf("Query for %s returned metrics for %d objects", resourceType, len(ems)) + e.log.Debugf("Query for %s returned metrics for %d objects", resourceType, len(ems)) // Iterate through results for _, em := range ems { moid := em.Entity.Reference().Value instInfo, found := res.objects[moid] if !found { - e.Parent.Log.Errorf("MOID %s not found in cache. Skipping! (This should not happen!)", moid) + e.log.Errorf("MOID %s not found in cache. Skipping! (This should not happen!)", moid) continue } buckets := make(map[string]metricEntry) @@ -1088,19 +1112,19 @@ func (e *Endpoint) collectChunk(ctx context.Context, pqs []types.PerfQuerySpec, // Populate tags objectRef, ok := res.objects[moid] if !ok { - e.Parent.Log.Errorf("MOID %s not found in cache. Skipping", moid) + e.log.Errorf("MOID %s not found in cache. Skipping", moid) continue } e.populateTags(objectRef, resourceType, res, t, &v) nValues := 0 - alignedInfo, alignedValues := alignSamples(em.SampleInfo, v.Value, interval) + alignedInfo, alignedValues := e.alignSamples(em.SampleInfo, v.Value, interval) for idx, sample := range alignedInfo { // According to the docs, SampleInfo and Value should have the same length, but we've seen corrupted // data coming back with missing values. Take care of that gracefully! if idx >= len(alignedValues) { - e.Parent.Log.Debugf("Len(SampleInfo)>len(Value) %d > %d", len(alignedInfo), len(alignedValues)) + e.log.Debugf("Len(SampleInfo)>len(Value) %d > %d", len(alignedInfo), len(alignedValues)) break } ts := sample.Timestamp @@ -1121,7 +1145,7 @@ func (e *Endpoint) collectChunk(ctx context.Context, pqs []types.PerfQuerySpec, // Percentage values must be scaled down by 100. info, ok := metricInfo[name] if !ok { - e.Parent.Log.Errorf("Could not determine unit for %s. Skipping", name) + e.log.Errorf("Could not determine unit for %s. Skipping", name) } v := alignedValues[idx] if info.UnitInfo.GetElementDescription().Key == "percent" { @@ -1136,10 +1160,10 @@ func (e *Endpoint) collectChunk(ctx context.Context, pqs []types.PerfQuerySpec, count++ // Update highwater marks - e.hwMarks.Put(moid, ts) + e.hwMarks.Put(moid, name, ts) } if nValues == 0 { - e.Parent.Log.Debugf("Missing value for: %s, %s", name, objectRef.name) + e.log.Debugf("Missing value for: %s, %s", name, objectRef.name) continue } } diff --git a/plugins/inputs/vsphere/tscache.go b/plugins/inputs/vsphere/tscache.go index 6e7d00c8b..1be75d760 100644 --- a/plugins/inputs/vsphere/tscache.go +++ b/plugins/inputs/vsphere/tscache.go @@ -10,7 +10,6 @@ import ( type TSCache struct { ttl time.Duration table map[string]time.Time - done chan struct{} mux sync.RWMutex } @@ -19,7 +18,6 @@ func NewTSCache(ttl time.Duration) *TSCache { return &TSCache{ ttl: ttl, table: make(map[string]time.Time), - done: make(chan struct{}), } } @@ -39,10 +37,10 @@ func (t *TSCache) Purge() { // IsNew returns true if the supplied timestamp for the supplied key is more recent than the // timestamp we have on record. -func (t *TSCache) IsNew(key string, tm time.Time) bool { +func (t *TSCache) IsNew(key string, metricName string, tm time.Time) bool { t.mux.RLock() defer t.mux.RUnlock() - v, ok := t.table[key] + v, ok := t.table[makeKey(key, metricName)] if !ok { return true // We've never seen this before, so consider everything a new sample } @@ -50,16 +48,20 @@ func (t *TSCache) IsNew(key string, tm time.Time) bool { } // Get returns a timestamp (if present) -func (t *TSCache) Get(key string) (time.Time, bool) { +func (t *TSCache) Get(key string, metricName string) (time.Time, bool) { t.mux.RLock() defer t.mux.RUnlock() - ts, ok := t.table[key] + ts, ok := t.table[makeKey(key, metricName)] return ts, ok } // Put updates the latest timestamp for the supplied key. -func (t *TSCache) Put(key string, time time.Time) { +func (t *TSCache) Put(key string, metricName string, time time.Time) { t.mux.Lock() defer t.mux.Unlock() - t.table[key] = time + t.table[makeKey(key, metricName)] = time +} + +func makeKey(resource string, metric string) string { + return resource + "|" + metric } diff --git a/plugins/inputs/vsphere/vsphere.go b/plugins/inputs/vsphere/vsphere.go index 141b25599..098c49334 100644 --- a/plugins/inputs/vsphere/vsphere.go +++ b/plugins/inputs/vsphere/vsphere.go @@ -275,7 +275,7 @@ func (v *VSphere) Start(acc telegraf.Accumulator) error { if err != nil { return err } - ep, err := NewEndpoint(ctx, v, u) + ep, err := NewEndpoint(ctx, v, u, v.Log) if err != nil { return err } diff --git a/plugins/inputs/vsphere/vsphere_test.go b/plugins/inputs/vsphere/vsphere_test.go index dce21fa78..fe0dfe41e 100644 --- a/plugins/inputs/vsphere/vsphere_test.go +++ b/plugins/inputs/vsphere/vsphere_test.go @@ -182,7 +182,8 @@ func testAlignUniform(t *testing.T, n int) { } values[i] = 1 } - newInfo, newValues := alignSamples(info, values, 60*time.Second) + e := Endpoint{log: testutil.Logger{}} + newInfo, newValues := e.alignSamples(info, values, 60*time.Second) require.Equal(t, n/3, len(newInfo), "Aligned infos have wrong size") require.Equal(t, n/3, len(newValues), "Aligned values have wrong size") for _, v := range newValues { @@ -207,7 +208,8 @@ func TestAlignMetrics(t *testing.T) { } values[i] = int64(i%3 + 1) } - newInfo, newValues := alignSamples(info, values, 60*time.Second) + e := Endpoint{log: testutil.Logger{}} + newInfo, newValues := e.alignSamples(info, values, 60*time.Second) require.Equal(t, n/3, len(newInfo), "Aligned infos have wrong size") require.Equal(t, n/3, len(newValues), "Aligned values have wrong size") for _, v := range newValues { From 2d61669cfde220db0ee55a5b32e8f1b69c58376a Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 21 Apr 2020 11:31:45 -0700 Subject: [PATCH 1699/1815] Update changelog --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 71fd59392..dacbebb85 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -23,6 +23,8 @@ #### Bugfixes - [#7371](https://github.com/influxdata/telegraf/issues/7371): Fix unable to write metrics to CloudWatch with IMDSv1 disabled. +- [#7233](https://github.com/influxdata/telegraf/issues/7233): Fix vSphere 6.7 missing data issue. + ## v1.14.1 [2020-04-14] From 82c9f0f7f849cce724e1b9fe58c205d3623062cb Mon Sep 17 00:00:00 2001 From: Giovanni Luisotto Date: Tue, 21 Apr 2020 22:42:35 +0200 Subject: [PATCH 1700/1815] Trim instance tag in the sqlserver performance counters query (#7351) --- plugins/inputs/sqlserver/sqlserver.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/inputs/sqlserver/sqlserver.go b/plugins/inputs/sqlserver/sqlserver.go index ec147e4fe..7c8778e10 100644 --- a/plugins/inputs/sqlserver/sqlserver.go +++ b/plugins/inputs/sqlserver/sqlserver.go @@ -629,9 +629,9 @@ SET @SQL = N'SELECT DISTINCT WHEN RTRIM(object_name) LIKE ''%:Availability Replica'' AND TRY_CONVERT(uniqueidentifier, spi.instance_name) IS NOT NULL -- for cloud only THEN d.name + RTRIM(SUBSTRING(spi.instance_name, 37, LEN(spi.instance_name))) - ELSE spi.instance_name + ELSE RTRIM(spi.instance_name) END AS instance_name,' - ELSE 'spi.instance_name as instance_name, ' + ELSE 'RTRIM(spi.instance_name) as instance_name, ' END + 'CAST(spi.cntr_value AS BIGINT) AS cntr_value, From b2ec7973dfaf518b29b4cebb16b9b52c56a7d3e2 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 21 Apr 2020 13:46:57 -0700 Subject: [PATCH 1701/1815] Update changelog --- CHANGELOG.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index dacbebb85..6522f280f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -25,6 +25,11 @@ - [#7371](https://github.com/influxdata/telegraf/issues/7371): Fix unable to write metrics to CloudWatch with IMDSv1 disabled. - [#7233](https://github.com/influxdata/telegraf/issues/7233): Fix vSphere 6.7 missing data issue. +## v1.14.2 [unreleased] + +#### Bugfixes + +- [#7241](https://github.com/influxdata/telegraf/issues/7241): Trim whitespace from instance tag in sqlserver input. ## v1.14.1 [2020-04-14] From 702946b5cfba8f91b3f8fb6602922bd26a4cd5b9 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 21 Apr 2020 13:54:39 -0700 Subject: [PATCH 1702/1815] Add option to save retention policy as tag in influxdb_listener (#7356) --- plugins/inputs/influxdb_listener/README.md | 4 +++ .../influxdb_listener/influxdb_listener.go | 26 ++++++++++----- .../influxdb_listener_test.go | 33 ++++++++++++++++++- 3 files changed, 54 insertions(+), 9 deletions(-) diff --git a/plugins/inputs/influxdb_listener/README.md b/plugins/inputs/influxdb_listener/README.md index b93573bf4..aae77fb96 100644 --- a/plugins/inputs/influxdb_listener/README.md +++ b/plugins/inputs/influxdb_listener/README.md @@ -54,6 +54,10 @@ submits data to InfluxDB determines the destination database. ## the tag will be overwritten with the database supplied. # database_tag = "" + ## If set the retention policy specified in the write query will be added as + ## the value of this tag name. + # retention_policy_tag = "" + ## Optional username and password to accept for HTTP basic authentication. ## You probably want to make sure you have TLS configured above for this. # basic_username = "foobar" diff --git a/plugins/inputs/influxdb_listener/influxdb_listener.go b/plugins/inputs/influxdb_listener/influxdb_listener.go index 1eac928af..4ba5a8c7c 100644 --- a/plugins/inputs/influxdb_listener/influxdb_listener.go +++ b/plugins/inputs/influxdb_listener/influxdb_listener.go @@ -29,13 +29,14 @@ type InfluxDBListener struct { port int tlsint.ServerConfig - ReadTimeout internal.Duration `toml:"read_timeout"` - WriteTimeout internal.Duration `toml:"write_timeout"` - MaxBodySize internal.Size `toml:"max_body_size"` - MaxLineSize internal.Size `toml:"max_line_size"` // deprecated in 1.14; ignored - BasicUsername string `toml:"basic_username"` - BasicPassword string `toml:"basic_password"` - DatabaseTag string `toml:"database_tag"` + ReadTimeout internal.Duration `toml:"read_timeout"` + WriteTimeout internal.Duration `toml:"write_timeout"` + MaxBodySize internal.Size `toml:"max_body_size"` + MaxLineSize internal.Size `toml:"max_line_size"` // deprecated in 1.14; ignored + BasicUsername string `toml:"basic_username"` + BasicPassword string `toml:"basic_password"` + DatabaseTag string `toml:"database_tag"` + RetentionPolicyTag string `toml:"retention_policy_tag"` timeFunc influx.TimeFunc @@ -72,12 +73,16 @@ const sampleConfig = ` ## 0 means to use the default of 32MiB. max_body_size = "32MiB" - ## Optional tag name used to store the database. + ## Optional tag name used to store the database. ## If the write has a database in the query string then it will be kept in this tag name. ## This tag can be used in downstream outputs. ## The default value of nothing means it will be off and the database will not be recorded. # database_tag = "" + ## If set the retention policy specified in the write query will be added as + ## the value of this tag name. + # retention_policy_tag = "" + ## Set one or more allowed client CA certificate file names to ## enable mutually authenticated TLS connections tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] @@ -255,6 +260,7 @@ func (h *InfluxDBListener) handleWrite() http.HandlerFunc { } db := req.URL.Query().Get("db") + rp := req.URL.Query().Get("rp") body := req.Body body = http.MaxBytesReader(res, body, h.MaxBodySize.Size) @@ -316,6 +322,10 @@ func (h *InfluxDBListener) handleWrite() http.HandlerFunc { m.AddTag(h.DatabaseTag, db) } + if h.RetentionPolicyTag != "" && rp != "" { + m.AddTag(h.RetentionPolicyTag, rp) + } + h.acc.AddMetric(m) } diff --git a/plugins/inputs/influxdb_listener/influxdb_listener_test.go b/plugins/inputs/influxdb_listener/influxdb_listener_test.go index 6990f6fc6..d0b2913cd 100644 --- a/plugins/inputs/influxdb_listener/influxdb_listener_test.go +++ b/plugins/inputs/influxdb_listener/influxdb_listener_test.go @@ -13,9 +13,9 @@ import ( "testing" "time" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" ) @@ -207,6 +207,37 @@ func TestWriteKeepDatabase(t *testing.T) { } } +func TestWriteRetentionPolicyTag(t *testing.T) { + listener := newTestListener() + listener.RetentionPolicyTag = "rp" + + acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) + require.NoError(t, listener.Start(acc)) + defer listener.Stop() + + resp, err := http.Post(createURL(listener, "http", "/write", "rp=myrp"), "", bytes.NewBuffer([]byte("cpu time_idle=42"))) + require.NoError(t, err) + resp.Body.Close() + require.Equal(t, 204, resp.StatusCode) + + expected := []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{ + "rp": "myrp", + }, + map[string]interface{}{ + "time_idle": 42.0, + }, + time.Unix(0, 0), + ), + } + + acc.Wait(1) + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime()) +} + // http listener should add a newline at the end of the buffer if it's not there func TestWriteNoNewline(t *testing.T) { listener := newTestListener() From 4903facddbc3be008b5695a17bbc034a76eb0f9d Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 21 Apr 2020 13:56:14 -0700 Subject: [PATCH 1703/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6522f280f..716ad24ea 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -19,6 +19,7 @@ - [#7304](https://github.com/influxdata/telegraf/pull/7304): Add reading bearer token from a file to http input. - [#7366](https://github.com/influxdata/telegraf/pull/7366): add support for SIGUSR1 to trigger flush. - [#7271](https://github.com/influxdata/telegraf/pull/7271): Add retry when slave is busy to modbus input. +- [#7356](https://github.com/influxdata/telegraf/pull/7356): Add option to save retention policy as tag in influxdb_listener. #### Bugfixes From 4db9e8a968d754d4acee050affaa103a1d6012f7 Mon Sep 17 00:00:00 2001 From: hydro-b Date: Wed, 22 Apr 2020 01:29:43 +0200 Subject: [PATCH 1704/1815] Add support for MDS and RGW sockets to ceph input (#6915) --- plugins/inputs/ceph/README.md | 226 +++++-- plugins/inputs/ceph/ceph.go | 29 +- plugins/inputs/ceph/ceph_test.go | 1032 ++++++++++++++++++++++++++++++ 3 files changed, 1230 insertions(+), 57 deletions(-) diff --git a/plugins/inputs/ceph/README.md b/plugins/inputs/ceph/README.md index 33585b079..f20fd18be 100644 --- a/plugins/inputs/ceph/README.md +++ b/plugins/inputs/ceph/README.md @@ -6,7 +6,7 @@ Ceph has introduced a Telegraf and Influx plugin in the 13.x Mimic release. The *Admin Socket Stats* -This gatherer works by scanning the configured SocketDir for OSD and MON socket files. When it finds +This gatherer works by scanning the configured SocketDir for OSD, MON, MDS and RGW socket files. When it finds a MON socket, it runs **ceph --admin-daemon $file perfcounters_dump**. For OSDs it runs **ceph --admin-daemon $file perf dump** The resulting JSON is parsed and grouped into collections, based on top-level key. Top-level keys are @@ -62,6 +62,8 @@ the cluster. The currently supported commands are: ## prefix of MON and OSD socket files, used to determine socket type mon_prefix = "ceph-mon" osd_prefix = "ceph-osd" + mds_prefix = "ceph-mds" + rgw_prefix = "ceph-client" ## suffix used to identify socket files socket_suffix = "asok" @@ -95,7 +97,7 @@ All fields are collected under the **ceph** measurement and stored as float64s. All admin measurements will have the following tags: -- type: either 'osd' or 'mon' to indicate which type of node was queried +- type: either 'osd', 'mon', 'mds' or 'rgw' to indicate which type of node was queried - id: a unique string identifier, parsed from the socket file name for the node - collection: the top-level key under which these fields were reported. Possible values are: - for MON nodes: @@ -133,6 +135,37 @@ All admin measurements will have the following tags: - throttle-objecter_ops - throttle-osd_client_bytes - throttle-osd_client_messages + - for MDS nodes: + - AsyncMessenger::Worker-0 + - AsyncMessenger::Worker-1 + - AsyncMessenger::Worker-2 + - finisher-PurgeQueue + - mds + - mds_cache + - mds_log + - mds_mem + - mds_server + - mds_sessions + - objecter + - purge_queue + - throttle-msgr_dispatch_throttler-mds + - throttle-objecter_bytes + - throttle-objecter_ops + - throttle-write_buf_throttle + - for RGW nodes: + - AsyncMessenger::Worker-0 + - AsyncMessenger::Worker-1 + - AsyncMessenger::Worker-2 + - cct + - finisher-radosclient + - mempool + - objecter + - rgw + - simple-throttler + - throttle-msgr_dispatch_throttler-radosclient + - throttle-objecter_bytes + - throttle-objecter_ops + - throttle-rgw_async_rados_ops *Cluster Stats* @@ -209,62 +242,145 @@ All admin measurements will have the following tags: *Cluster Stats* ``` -ceph_pool_stats,name=telegraf recovering_keys_per_sec=0,read_bytes_sec=0,write_bytes_sec=0,read_op_per_sec=0,write_op_per_sec=0,recovering_objects_per_sec=0,recovering_bytes_per_sec=0 1550658911000000000 -ceph_pool_usage,name=telegraf kb_used=0,bytes_used=0,objects=0 1550658911000000000 -ceph_pgmap_state,state=undersized+peered count=30 1550658910000000000 -ceph_pgmap bytes_total=10733223936,read_op_per_sec=0,write_op_per_sec=0,num_pgs=30,data_bytes=0,bytes_avail=9654697984,read_bytes_sec=0,write_bytes_sec=0,version=0,bytes_used=1078525952 1550658910000000000 -ceph_osdmap num_up_osds=1,num_in_osds=1,full=false,nearfull=false,num_remapped_pgs=0,epoch=34,num_osds=1 1550658910000000000 -ceph_health status="HEALTH_WARN",overall_status="HEALTH_WARN" 1550658910000000000 +ceph_health,host=stefanmon1 overall_status="",status="HEALTH_WARN" 1587118504000000000 +ceph_osdmap,host=stefanmon1 epoch=203,full=false,nearfull=false,num_in_osds=8,num_osds=9,num_remapped_pgs=0,num_up_osds=8 1587118504000000000 +ceph_pgmap,host=stefanmon1 bytes_avail=849879302144,bytes_total=858959904768,bytes_used=9080602624,data_bytes=5055,num_pgs=504,read_bytes_sec=0,read_op_per_sec=0,version=0,write_bytes_sec=0,write_op_per_sec=0 1587118504000000000 +ceph_pgmap_state,host=stefanmon1,state=active+clean count=504 1587118504000000000 +ceph_usage,host=stefanmon1 total_avail_bytes=849879302144,total_bytes=858959904768,total_used_bytes=196018176 1587118505000000000 +ceph_pool_usage,host=stefanmon1,name=cephfs_data bytes_used=0,kb_used=0,max_avail=285804986368,objects=0,percent_used=0 1587118505000000000 +ceph_pool_stats,host=stefanmon1,name=cephfs_data read_bytes_sec=0,read_op_per_sec=0,recovering_bytes_per_sec=0,recovering_keys_per_sec=0,recovering_objects_per_sec=0,write_bytes_sec=0,write_op_per_sec=0 1587118506000000000 ``` *Admin Socket Stats* ``` -ceph,collection=recoverystate_perf,id=0,type=osd reprecovering_latency.avgtime=0,repwaitrecoveryreserved_latency.avgcount=0,waitlocalbackfillreserved_latency.sum=0,reset_latency.avgtime=0.000090333,peering_latency.avgtime=0.824434333,stray_latency.avgtime=0.000030502,waitlocalrecoveryreserved_latency.sum=0,backfilling_latency.avgtime=0,reprecovering_latency.avgcount=0,incomplete_latency.avgtime=0,down_latency.avgtime=0,recovered_latency.sum=0.009692406,peering_latency.avgcount=40,notrecovering_latency.sum=0,waitremoterecoveryreserved_latency.sum=0,reprecovering_latency.sum=0,waitlocalbackfillreserved_latency.avgtime=0,started_latency.sum=9066.701648888,backfilling_latency.sum=0,waitactingchange_latency.avgcount=0,start_latency.avgtime=0.000030178,recovering_latency.avgtime=0,notbackfilling_latency.avgcount=0,waitremotebackfillreserved_latency.avgtime=0,incomplete_latency.avgcount=0,replicaactive_latency.sum=0,getinfo_latency.avgtime=0.000025945,down_latency.sum=0,recovered_latency.avgcount=40,waitactingchange_latency.avgtime=0,notrecovering_latency.avgcount=0,waitupthru_latency.sum=32.970965509,waitupthru_latency.avgtime=0.824274137,waitlocalrecoveryreserved_latency.avgcount=0,waitremoterecoveryreserved_latency.avgcount=0,activating_latency.avgcount=40,activating_latency.sum=0.83428466,activating_latency.avgtime=0.020857116,start_latency.avgcount=50,waitremotebackfillreserved_latency.avgcount=0,down_latency.avgcount=0,started_latency.avgcount=10,getlog_latency.avgcount=40,stray_latency.avgcount=10,notbackfilling_latency.sum=0,reset_latency.sum=0.00451665,active_latency.avgtime=906.505839265,repwaitbackfillreserved_latency.sum=0,waitactingchange_latency.sum=0,stray_latency.sum=0.000305022,waitremotebackfillreserved_latency.sum=0,repwaitrecoveryreserved_latency.avgtime=0,replicaactive_latency.avgtime=0,clean_latency.avgcount=10,waitremoterecoveryreserved_latency.avgtime=0,active_latency.avgcount=10,primary_latency.sum=9066.700828729,initial_latency.avgtime=0.000379351,waitlocalbackfillreserved_latency.avgcount=0,getinfo_latency.sum=0.001037815,reset_latency.avgcount=50,getlog_latency.sum=0.003079344,getlog_latency.avgtime=0.000076983,primary_latency.avgcount=10,repnotrecovering_latency.avgcount=0,initial_latency.sum=0.015174072,repwaitrecoveryreserved_latency.sum=0,replicaactive_latency.avgcount=0,clean_latency.avgtime=906.495755946,waitupthru_latency.avgcount=40,repnotrecovering_latency.sum=0,incomplete_latency.sum=0,active_latency.sum=9065.058392651,peering_latency.sum=32.977373355,repnotrecovering_latency.avgtime=0,notrecovering_latency.avgtime=0,waitlocalrecoveryreserved_latency.avgtime=0,repwaitbackfillreserved_latency.avgtime=0,recovering_latency.sum=0,getmissing_latency.sum=0.000902014,getmissing_latency.avgtime=0.00002255,clean_latency.sum=9064.957559467,getinfo_latency.avgcount=40,started_latency.avgtime=906.670164888,getmissing_latency.avgcount=40,notbackfilling_latency.avgtime=0,initial_latency.avgcount=40,recovered_latency.avgtime=0.00024231,repwaitbackfillreserved_latency.avgcount=0,backfilling_latency.avgcount=0,start_latency.sum=0.001508937,primary_latency.avgtime=906.670082872,recovering_latency.avgcount=0 1550658950000000000 -ceph,collection=throttle-msgr_dispatch_throttler-hb_back_server,id=0,type=osd put_sum=0,wait.avgtime=0,put=0,get_or_fail_success=0,wait.avgcount=0,val=0,get_sum=0,take=0,take_sum=0,max=104857600,get=0,get_or_fail_fail=0,wait.sum=0,get_started=0 1550658950000000000 -ceph,collection=throttle-msgr_dispatch_throttler-hb_front_client,id=0,type=osd wait.sum=0,val=0,take_sum=0,put=0,get_or_fail_success=0,put_sum=0,get=0,get_or_fail_fail=0,get_started=0,get_sum=0,wait.avgcount=0,wait.avgtime=0,max=104857600,take=0 1550658950000000000 -ceph,collection=bluefs,id=0,type=osd slow_used_bytes=0,wal_total_bytes=0,gift_bytes=1048576,log_compactions=0,logged_bytes=221184,files_written_sst=1,slow_total_bytes=0,bytes_written_wal=619403,bytes_written_sst=1517,reclaim_bytes=0,db_total_bytes=1086324736,wal_used_bytes=0,log_bytes=319488,num_files=10,files_written_wal=1,db_used_bytes=12582912 1550658950000000000 -ceph,collection=throttle-msgr_dispatch_throttler-ms_objecter,id=0,type=osd val=0,put=0,get=0,take=0,put_sum=0,get_started=0,take_sum=0,get_sum=0,wait.sum=0,wait.avgtime=0,get_or_fail_fail=0,get_or_fail_success=0,wait.avgcount=0,max=104857600 1550658950000000000 -ceph,collection=throttle-msgr_dispatch_throttler-client,id=0,type=osd put=100,max=104857600,wait.sum=0,wait.avgtime=0,get_or_fail_fail=0,take_sum=0,val=0,wait.avgcount=0,get_sum=48561,get_or_fail_success=100,take=0,put_sum=48561,get_started=0,get=100 1550658950000000000 -ceph,collection=mutex-OSDShard.2::sdata_wait_lock,id=0,type=osd wait.sum=0,wait.avgtime=0,wait.avgcount=0 1550658950000000000 -ceph,collection=throttle-objecter_ops,id=0,type=osd get_or_fail_fail=0,max=1024,get_sum=0,take=0,val=0,wait.avgtime=0,get_or_fail_success=0,wait.sum=0,put_sum=0,get=0,take_sum=0,put=0,wait.avgcount=0,get_started=0 1550658950000000000 -ceph,collection=AsyncMessenger::Worker-1,id=0,type=osd msgr_send_messages=266,msgr_recv_bytes=49074,msgr_active_connections=1,msgr_running_recv_time=0.136317251,msgr_running_fast_dispatch_time=0,msgr_created_connections=5,msgr_send_bytes=41569,msgr_running_send_time=0.514432253,msgr_recv_messages=81,msgr_running_total_time=0.766790051 1550658950000000000 -ceph,collection=throttle-bluestore_throttle_deferred_bytes,id=0,type=osd get_started=0,wait.sum=0,wait.avgcount=0,take_sum=0,val=12134038,max=201326592,take=0,get_or_fail_fail=0,put_sum=0,wait.avgtime=0,get_or_fail_success=18,get=18,get_sum=12134038,put=0 1550658950000000000 -ceph,collection=throttle-msgr_dispatch_throttler-hb_front_server,id=0,type=osd get=0,put_sum=0,val=0,get_or_fail_fail=0,get_or_fail_success=0,take=0,max=104857600,get_started=0,wait.sum=0,wait.avgtime=0,get_sum=0,take_sum=0,put=0,wait.avgcount=0 1550658950000000000 -ceph,collection=mutex-OSDShard.1::sdata_wait_lock,id=0,type=osd wait.avgcount=0,wait.sum=0,wait.avgtime=0 1550658950000000000 -ceph,collection=finisher-defered_finisher,id=0,type=osd queue_len=0,complete_latency.avgcount=0,complete_latency.sum=0,complete_latency.avgtime=0 1550658950000000000 -ceph,collection=mutex-OSDShard.3::shard_lock,id=0,type=osd wait.avgtime=0,wait.avgcount=0,wait.sum=0 1550658950000000000 -ceph,collection=mutex-OSDShard.0::shard_lock,id=0,type=osd wait.avgcount=0,wait.sum=0,wait.avgtime=0 1550658950000000000 -ceph,collection=throttle-osd_client_bytes,id=0,type=osd get_or_fail_fail=0,get=22,get_sum=6262,take=0,max=524288000,put=31,wait.sum=0,val=0,get_started=0,put_sum=6262,get_or_fail_success=22,take_sum=0,wait.avgtime=0,wait.avgcount=0 1550658950000000000 -ceph,collection=rocksdb,id=0,type=osd submit_latency.sum=0.019985172,rocksdb_write_pre_and_post_time.sum=0,rocksdb_write_wal_time.avgtime=0,rocksdb_write_delay_time.avgtime=0,rocksdb_write_pre_and_post_time.avgtime=0,rocksdb_write_pre_and_post_time.avgcount=0,submit_sync_latency.sum=0.559604552,compact=0,compact_queue_len=0,get_latency.avgcount=140,submit_latency.avgtime=0.000095622,submit_transaction=209,compact_range=0,rocksdb_write_wal_time.avgcount=0,submit_sync_latency.avgtime=0.011906479,compact_queue_merge=0,rocksdb_write_memtable_time.avgtime=0,get_latency.sum=0.013135139,submit_latency.avgcount=209,submit_sync_latency.avgcount=47,submit_transaction_sync=47,rocksdb_write_wal_time.sum=0,rocksdb_write_delay_time.avgcount=0,rocksdb_write_memtable_time.avgcount=0,rocksdb_write_memtable_time.sum=0,get=140,get_latency.avgtime=0.000093822,rocksdb_write_delay_time.sum=0 1550658950000000000 -ceph,collection=mutex-OSDShard.1::shard_lock,id=0,type=osd wait.avgcount=0,wait.sum=0,wait.avgtime=0 1550658950000000000 -ceph,collection=osd,id=0,type=osd subop_latency.avgtime=0,copyfrom=0,osd_pg_info=140,subop_push_latency.avgtime=0,subop_pull=0,op_rw_process_latency.sum=0,stat_bytes=10733223936,numpg_removing=0,op_latency.avgtime=0,op_w_process_latency.avgtime=0,op_rw_in_bytes=0,osd_map_cache_miss=0,loadavg=144,map_messages=31,op_w_latency.avgtime=0,op_prepare_latency.avgcount=0,op_r=0,op_latency.avgcount=0,osd_map_cache_hit=225,op_w_prepare_latency.sum=0,numpg_primary=30,op_rw_out_bytes=0,subop_w_latency.avgcount=0,subop_push_latency.avgcount=0,op_r_process_latency.avgcount=0,op_w_in_bytes=0,op_rw_latency.avgtime=0,subop_w_latency.avgtime=0,osd_map_cache_miss_low_avg.sum=0,agent_wake=0,op_before_queue_op_lat.avgtime=0.000065043,op_w_prepare_latency.avgcount=0,tier_proxy_write=0,op_rw_prepare_latency.avgtime=0,op_rw_process_latency.avgtime=0,op_in_bytes=0,op_cache_hit=0,tier_whiteout=0,op_w_prepare_latency.avgtime=0,heartbeat_to_peers=0,object_ctx_cache_hit=0,buffer_bytes=0,stat_bytes_avail=9654697984,op_w_latency.avgcount=0,tier_dirty=0,tier_flush_fail=0,op_rw_prepare_latency.avgcount=0,agent_flush=0,osd_tier_promote_lat.sum=0,subop_w_latency.sum=0,tier_promote=0,op_before_dequeue_op_lat.avgcount=22,push=0,tier_flush=0,osd_pg_biginfo=90,tier_try_flush_fail=0,subop_push_in_bytes=0,op_before_dequeue_op_lat.sum=0.00266744,osd_map_cache_miss_low=0,numpg=30,op_prepare_latency.avgtime=0,subop_pull_latency.avgtime=0,op_rw_latency.avgcount=0,subop_latency.avgcount=0,op=0,osd_tier_promote_lat.avgcount=0,cached_crc=0,op_r_prepare_latency.sum=0,subop_pull_latency.sum=0,op_before_dequeue_op_lat.avgtime=0.000121247,history_alloc_Mbytes=0,subop_push_latency.sum=0,subop_in_bytes=0,op_w_process_latency.sum=0,osd_map_cache_miss_low_avg.avgcount=0,subop=0,tier_clean=0,osd_tier_r_lat.avgtime=0,op_r_process_latency.avgtime=0,op_r_prepare_latency.avgcount=0,op_w_process_latency.avgcount=0,numpg_stray=0,op_r_prepare_latency.avgtime=0,object_ctx_cache_total=0,op_process_latency.avgtime=0,op_r_process_latency.sum=0,op_r_latency.sum=0,subop_w_in_bytes=0,op_rw=0,messages_delayed_for_map=4,map_message_epoch_dups=30,osd_map_bl_cache_miss=33,op_r_latency.avgtime=0,op_before_queue_op_lat.sum=0.001430955,map_message_epochs=64,agent_evict=0,op_out_bytes=0,op_process_latency.sum=0,osd_tier_flush_lat.sum=0,stat_bytes_used=1078525952,op_prepare_latency.sum=0,op_wip=0,osd_tier_flush_lat.avgtime=0,missed_crc=0,op_rw_latency.sum=0,op_r_latency.avgcount=0,pull=0,op_w_latency.sum=0,op_before_queue_op_lat.avgcount=22,tier_try_flush=0,numpg_replica=0,subop_push=0,osd_tier_r_lat.sum=0,op_latency.sum=0,push_out_bytes=0,op_w=0,osd_tier_promote_lat.avgtime=0,subop_latency.sum=0,osd_pg_fastinfo=0,tier_delay=0,op_rw_prepare_latency.sum=0,osd_tier_flush_lat.avgcount=0,osd_map_bl_cache_hit=0,op_r_out_bytes=0,subop_pull_latency.avgcount=0,op_process_latency.avgcount=0,tier_evict=0,tier_proxy_read=0,agent_skip=0,subop_w=0,history_alloc_num=0,osd_tier_r_lat.avgcount=0,recovery_ops=0,cached_crc_adjusted=0,op_rw_process_latency.avgcount=0 1550658950000000000 -ceph,collection=finisher-finisher-0,id=0,type=osd complete_latency.sum=0.015491438,complete_latency.avgtime=0.000174061,complete_latency.avgcount=89,queue_len=0 1550658950000000000 -ceph,collection=throttle-msgr_dispatch_throttler-hb_back_client,id=0,type=osd wait.avgtime=0,wait.avgcount=0,max=104857600,get_sum=0,take=0,get_or_fail_fail=0,val=0,get=0,get_or_fail_success=0,wait.sum=0,put=0,take_sum=0,get_started=0,put_sum=0 1550658950000000000 -ceph,collection=throttle-msgr_dispatch_throttler-cluster,id=0,type=osd get_sum=0,take=0,val=0,max=104857600,get_or_fail_success=0,put=0,put_sum=0,wait.sum=0,wait.avgtime=0,get_started=0,get_or_fail_fail=0,take_sum=0,wait.avgcount=0,get=0 1550658950000000000 -ceph,collection=mutex-OSDShard.0::sdata_wait_lock,id=0,type=osd wait.avgcount=0,wait.sum=0,wait.avgtime=0 1550658950000000000 -ceph,collection=throttle-bluestore_throttle_bytes,id=0,type=osd get_sum=140287253,put_sum=140287253,get=209,put=47,val=0,get_started=209,wait.sum=0,wait.avgcount=0,wait.avgtime=0,max=67108864,get_or_fail_fail=0,take=0,take_sum=0,get_or_fail_success=0 1550658950000000000 -ceph,collection=objecter,id=0,type=osd map_inc=15,op_w=0,osd_session_close=0,op=0,osdop_writefull=0,osdop_tmap_up=0,command_resend=0,poolstat_resend=0,osdop_setxattr=0,osdop_append=0,osdop_delete=0,op_rmw=0,poolstat_send=0,op_active=0,osdop_tmap_put=0,osdop_clonerange=0,osdop_rmxattr=0,op_send=0,op_resend=0,osdop_resetxattrs=0,osdop_call=0,osdop_pgls=0,poolstat_active=0,linger_resend=0,osdop_stat=0,op_reply=0,op_laggy=0,statfs_send=0,osdop_getxattr=0,osdop_pgls_filter=0,osdop_notify=0,linger_active=0,osdop_other=0,poolop_resend=0,statfs_active=0,command_active=0,map_epoch=34,osdop_create=0,osdop_watch=0,op_r=0,map_full=0,osdop_src_cmpxattr=0,omap_rd=0,osd_session_open=0,osdop_sparse_read=0,osdop_truncate=0,linger_ping=0,osdop_mapext=0,poolop_send=0,osdop_cmpxattr=0,osd_laggy=0,osdop_writesame=0,osd_sessions=0,osdop_tmap_get=0,op_pg=0,command_send=0,osdop_read=0,op_send_bytes=0,statfs_resend=0,omap_del=0,poolop_active=0,osdop_write=0,osdop_zero=0,omap_wr=0,linger_send=0 1550658950000000000 -ceph,collection=mutex-OSDShard.4::shard_lock,id=0,type=osd wait.avgtime=0,wait.avgcount=0,wait.sum=0 1550658950000000000 -ceph,collection=AsyncMessenger::Worker-0,id=0,type=osd msgr_recv_messages=112,msgr_recv_bytes=14550,msgr_created_connections=15,msgr_running_recv_time=0.026754699,msgr_active_connections=11,msgr_send_messages=11,msgr_running_fast_dispatch_time=0.003373472,msgr_send_bytes=2090,msgr_running_total_time=0.041323592,msgr_running_send_time=0.000441856 1550658950000000000 -ceph,collection=mutex-OSDShard.2::shard_lock,id=0,type=osd wait.sum=0,wait.avgtime=0,wait.avgcount=0 1550658950000000000 -ceph,collection=bluestore,id=0,type=osd submit_lat.avgcount=209,kv_flush_lat.avgtime=0.000002175,bluestore_write_big_bytes=0,bluestore_txc=209,kv_commit_lat.avgcount=47,kv_commit_lat.sum=0.585164754,bluestore_buffer_miss_bytes=511,commit_lat.avgcount=209,bluestore_buffer_bytes=0,bluestore_onodes=102,state_kv_queued_lat.sum=1.439223859,deferred_write_bytes=0,bluestore_write_small_bytes=60279,decompress_lat.sum=0,state_kv_done_lat.avgcount=209,submit_lat.sum=0.055637603,state_prepare_lat.avgcount=209,bluestore_write_big=0,read_wait_aio_lat.avgcount=17,bluestore_write_small_deferred=18,kv_lat.sum=0.585267001,kv_flush_lat.sum=0.000102247,bluestore_buffers=0,state_prepare_lat.sum=0.051411998,bluestore_write_small_pre_read=18,state_deferred_queued_lat.sum=0,decompress_lat.avgtime=0,state_kv_done_lat.avgtime=0.000000629,bluestore_write_small_unused=0,read_lat.avgcount=34,bluestore_onode_shard_misses=0,bluestore_blobs=72,bluestore_read_eio=0,bluestore_blob_split=0,bluestore_onode_shard_hits=0,state_kv_commiting_lat.avgcount=209,bluestore_onode_hits=153,state_kv_commiting_lat.sum=2.477385041,read_onode_meta_lat.avgcount=51,state_finishing_lat.avgtime=0.000000489,bluestore_compressed_original=0,state_kv_queued_lat.avgtime=0.006886238,bluestore_gc_merged=0,throttle_lat.avgtime=0.000001247,state_aio_wait_lat.avgtime=0.000001326,bluestore_onode_reshard=0,state_done_lat.avgcount=191,bluestore_compressed_allocated=0,write_penalty_read_ops=0,bluestore_extents=72,compress_lat.avgtime=0,state_aio_wait_lat.avgcount=209,state_io_done_lat.avgtime=0.000000519,bluestore_write_big_blobs=0,state_kv_queued_lat.avgcount=209,kv_flush_lat.avgcount=47,state_finishing_lat.sum=0.000093565,state_io_done_lat.avgcount=209,kv_lat.avgtime=0.012452489,bluestore_buffer_hit_bytes=20750,read_wait_aio_lat.avgtime=0.000038077,bluestore_allocated=4718592,state_deferred_cleanup_lat.avgtime=0,compress_lat.avgcount=0,write_pad_bytes=304265,throttle_lat.sum=0.000260785,read_onode_meta_lat.avgtime=0.000038702,compress_success_count=0,state_deferred_aio_wait_lat.sum=0,decompress_lat.avgcount=0,state_deferred_aio_wait_lat.avgtime=0,bluestore_stored=51133,state_finishing_lat.avgcount=191,bluestore_onode_misses=132,deferred_write_ops=0,read_wait_aio_lat.sum=0.000647315,csum_lat.avgcount=1,state_kv_done_lat.sum=0.000131531,state_prepare_lat.avgtime=0.00024599,state_deferred_cleanup_lat.avgcount=0,state_deferred_queued_lat.avgcount=0,bluestore_reads_with_retries=0,state_kv_commiting_lat.avgtime=0.011853516,kv_commit_lat.avgtime=0.012450313,read_lat.sum=0.003031418,throttle_lat.avgcount=209,bluestore_write_small_new=71,state_deferred_queued_lat.avgtime=0,bluestore_extent_compress=0,bluestore_write_small=89,state_deferred_cleanup_lat.sum=0,submit_lat.avgtime=0.000266208,bluestore_fragmentation_micros=0,state_aio_wait_lat.sum=0.000277323,commit_lat.avgtime=0.018987901,compress_lat.sum=0,bluestore_compressed=0,state_done_lat.sum=0.000206953,csum_lat.avgtime=0.000023281,state_deferred_aio_wait_lat.avgcount=0,compress_rejected_count=0,kv_lat.avgcount=47,read_onode_meta_lat.sum=0.001973812,read_lat.avgtime=0.000089159,csum_lat.sum=0.000023281,state_io_done_lat.sum=0.00010855,state_done_lat.avgtime=0.000001083,commit_lat.sum=3.96847136 1550658950000000000 -ceph,collection=mutex-OSDShard.3::sdata_wait_lock,id=0,type=osd wait.avgcount=0,wait.sum=0,wait.avgtime=0 1550658950000000000 -ceph,collection=AsyncMessenger::Worker-2,id=0,type=osd msgr_running_fast_dispatch_time=0,msgr_recv_bytes=246,msgr_created_connections=5,msgr_active_connections=1,msgr_running_recv_time=0.001392218,msgr_running_total_time=1.934101301,msgr_running_send_time=1.781171967,msgr_recv_messages=3,msgr_send_bytes=26504031,msgr_send_messages=15409 1550658950000000000 -ceph,collection=finisher-objecter-finisher-0,id=0,type=osd complete_latency.avgcount=0,complete_latency.sum=0,complete_latency.avgtime=0,queue_len=0 1550658950000000000 -ceph,collection=mutex-OSDShard.4::sdata_wait_lock,id=0,type=osd wait.avgcount=0,wait.sum=0,wait.avgtime=0 1550658950000000000 -ceph,collection=throttle-objecter_bytes,id=0,type=osd take=0,get_sum=0,put_sum=0,put=0,val=0,get=0,get_or_fail_fail=0,wait.avgcount=0,get_or_fail_success=0,wait.sum=0,wait.avgtime=0,get_started=0,max=104857600,take_sum=0 1550658950000000000 -ceph,collection=throttle-mon_client_bytes,id=test,type=monitor get_or_fail_fail=0,take_sum=0,wait.avgtime=0,wait.avgcount=0,get_sum=64607,take=0,get_started=0,put=950,val=240,wait.sum=0,max=104857600,get_or_fail_success=953,put_sum=64367,get=953 1550658950000000000 -ceph,collection=mon,id=test,type=monitor election_win=1,election_lose=0,num_sessions=3,session_add=199,session_rm=196,session_trim=0,num_elections=1,election_call=0 1550658950000000000 -ceph,collection=cluster,id=test,type=monitor num_pg_active=0,num_mon=1,osd_bytes_avail=9654697984,num_object=0,num_osd_in=1,osd_bytes_used=1078525952,num_bytes=0,num_osd=1,num_pg_peering=0,num_pg_active_clean=0,num_pg=30,num_mon_quorum=1,num_object_degraded=0,osd_bytes=10733223936,num_object_unfound=0,num_osd_up=1,num_pool=1,num_object_misplaced=0,osd_epoch=34 1550658950000000000 -ceph,collection=throttle-msgr_dispatch_throttler-mon-mgrc,id=test,type=monitor get=2,put=2,get_sum=16,take_sum=0,wait.avgtime=0,val=0,wait.avgcount=0,get_or_fail_success=2,put_sum=16,max=104857600,get_started=0,take=0,get_or_fail_fail=0,wait.sum=0 1550658950000000000 -ceph,collection=rocksdb,id=test,type=monitor rocksdb_write_memtable_time.avgtime=0,submit_sync_latency.avgtime=0.013689071,submit_transaction_sync=39173,rocksdb_write_pre_and_post_time.avgtime=0,get_latency.avgcount=724581,submit_latency.avgtime=0,submit_sync_latency.avgcount=39173,rocksdb_write_wal_time.avgtime=0,rocksdb_write_pre_and_post_time.sum=0,compact_range=231,compact_queue_merge=0,rocksdb_write_memtable_time.avgcount=0,submit_sync_latency.sum=536.242007888,compact=0,rocksdb_write_delay_time.sum=0,get_latency.sum=9.578173532,rocksdb_write_delay_time.avgcount=0,rocksdb_write_delay_time.avgtime=0,compact_queue_len=0,get_latency.avgtime=0.000013218,submit_latency.sum=0,get=724581,rocksdb_write_wal_time.avgcount=0,submit_transaction=0,rocksdb_write_wal_time.sum=0,submit_latency.avgcount=0,rocksdb_write_pre_and_post_time.avgcount=0,rocksdb_write_memtable_time.sum=0 1550658950000000000 -ceph,collection=finisher-mon_finisher,id=test,type=monitor complete_latency.avgtime=0,complete_latency.avgcount=0,complete_latency.sum=0,queue_len=0 1550658950000000000 -ceph,collection=paxos,id=test,type=monitor share_state_keys.sum=0,collect_keys.avgcount=0,collect=0,store_state_latency.avgtime=0,begin_latency.sum=338.90900364,collect_keys.sum=0,collect_bytes.avgcount=0,accept_timeout=0,new_pn_latency.avgcount=0,new_pn_latency.sum=0,commit_keys.sum=116820,share_state_bytes.sum=0,refresh_latency.avgcount=19576,store_state=0,collect_timeout=0,lease_ack_timeout=0,collect_latency.avgcount=0,store_state_keys.avgcount=0,commit_bytes.sum=38478195,refresh_latency.sum=8.341938952,collect_uncommitted=0,commit_latency.avgcount=19576,share_state=0,begin_latency.avgtime=0.017312474,commit_latency.avgtime=0.009926797,begin_keys.sum=58728,start_peon=0,commit_keys.avgcount=19576,begin_latency.avgcount=19576,store_state_latency.avgcount=0,start_leader=1,begin_keys.avgcount=19576,collect_bytes.sum=0,begin_bytes.avgcount=19576,store_state_bytes.sum=0,commit=19576,begin_bytes.sum=41771257,new_pn_latency.avgtime=0,refresh_latency.avgtime=0.00042613,commit_latency.sum=194.326980684,new_pn=0,refresh=19576,collect_latency.sum=0,collect_latency.avgtime=0,lease_timeout=0,begin=19576,share_state_bytes.avgcount=0,share_state_keys.avgcount=0,store_state_keys.sum=0,store_state_bytes.avgcount=0,store_state_latency.sum=0,commit_bytes.avgcount=19576,restart=2 1550658950000000000 -ceph,collection=finisher-monstore,id=test,type=monitor complete_latency.avgcount=19576,complete_latency.sum=208.300976568,complete_latency.avgtime=0.01064063,queue_len=0 1550658950000000000 -ceph,collection=AsyncMessenger::Worker-2,id=test,type=monitor msgr_created_connections=1,msgr_send_bytes=0,msgr_running_send_time=0,msgr_recv_bytes=0,msgr_send_messages=1,msgr_recv_messages=0,msgr_running_total_time=0.003026541,msgr_running_recv_time=0,msgr_running_fast_dispatch_time=0,msgr_active_connections=1 1550658950000000000 -ceph,collection=throttle-msgr_dispatch_throttler-mon,id=test,type=monitor take=0,take_sum=0,put=39933,get=39933,put_sum=56745184,wait.avgtime=0,get_or_fail_success=39933,wait.sum=0,get_sum=56745184,get_or_fail_fail=0,wait.avgcount=0,val=0,max=104857600,get_started=0 1550658950000000000 -ceph,collection=throttle-mon_daemon_bytes,id=test,type=monitor max=419430400,get_started=0,wait.avgtime=0,take_sum=0,get=262,take=0,put_sum=21212,wait.avgcount=0,get_or_fail_success=262,get_or_fail_fail=0,put=262,wait.sum=0,val=0,get_sum=21212 1550658950000000000 -ceph,collection=AsyncMessenger::Worker-1,id=test,type=monitor msgr_send_messages=1071,msgr_running_total_time=0.703589077,msgr_active_connections=146,msgr_send_bytes=3887863,msgr_running_send_time=0.361602994,msgr_running_recv_time=0.328218119,msgr_running_fast_dispatch_time=0,msgr_recv_messages=978,msgr_recv_bytes=142209,msgr_created_connections=197 1550658950000000000 -ceph,collection=AsyncMessenger::Worker-0,id=test,type=monitor msgr_created_connections=54,msgr_recv_messages=38957,msgr_active_connections=47,msgr_running_fast_dispatch_time=0,msgr_send_bytes=25338946,msgr_running_total_time=9.190267622,msgr_running_send_time=3.124663809,msgr_running_recv_time=13.03937269,msgr_send_messages=15973,msgr_recv_bytes=59558181 1550658950000000000 +> ceph,collection=cct,host=stefanmon1,id=stefanmon1,type=monitor total_workers=0,unhealthy_workers=0 1587117563000000000 +> ceph,collection=mempool,host=stefanmon1,id=stefanmon1,type=monitor bloom_filter_bytes=0,bloom_filter_items=0,bluefs_bytes=0,bluefs_items=0,bluestore_alloc_bytes=0,bluestore_alloc_items=0,bluestore_cache_data_bytes=0,bluestore_cache_data_items=0,bluestore_cache_onode_bytes=0,bluestore_cache_onode_items=0,bluestore_cache_other_bytes=0,bluestore_cache_other_items=0,bluestore_fsck_bytes=0,bluestore_fsck_items=0,bluestore_txc_bytes=0,bluestore_txc_items=0,bluestore_writing_bytes=0,bluestore_writing_deferred_bytes=0,bluestore_writing_deferred_items=0,bluestore_writing_items=0,buffer_anon_bytes=719152,buffer_anon_items=192,buffer_meta_bytes=352,buffer_meta_items=4,mds_co_bytes=0,mds_co_items=0,osd_bytes=0,osd_items=0,osd_mapbl_bytes=0,osd_mapbl_items=0,osd_pglog_bytes=0,osd_pglog_items=0,osdmap_bytes=15872,osdmap_items=138,osdmap_mapping_bytes=63112,osdmap_mapping_items=7626,pgmap_bytes=38680,pgmap_items=477,unittest_1_bytes=0,unittest_1_items=0,unittest_2_bytes=0,unittest_2_items=0 1587117563000000000 +> ceph,collection=throttle-mon_client_bytes,host=stefanmon1,id=stefanmon1,type=monitor get=1041157,get_or_fail_fail=0,get_or_fail_success=1041157,get_started=0,get_sum=64928901,max=104857600,put=1041157,put_sum=64928901,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117563000000000 +> ceph,collection=throttle-msgr_dispatch_throttler-mon,host=stefanmon1,id=stefanmon1,type=monitor get=12695426,get_or_fail_fail=0,get_or_fail_success=12695426,get_started=0,get_sum=42542216884,max=104857600,put=12695426,put_sum=42542216884,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117563000000000 +> ceph,collection=finisher-mon_finisher,host=stefanmon1,id=stefanmon1,type=monitor complete_latency.avgcount=0,complete_latency.avgtime=0,complete_latency.sum=0,queue_len=0 1587117563000000000 +> ceph,collection=finisher-monstore,host=stefanmon1,id=stefanmon1,type=monitor complete_latency.avgcount=1609831,complete_latency.avgtime=0.015857621,complete_latency.sum=25528.09131035,queue_len=0 1587117563000000000 +> ceph,collection=mon,host=stefanmon1,id=stefanmon1,type=monitor election_call=25,election_lose=0,election_win=22,num_elections=94,num_sessions=3,session_add=174679,session_rm=439316,session_trim=137 1587117563000000000 +> ceph,collection=throttle-mon_daemon_bytes,host=stefanmon1,id=stefanmon1,type=monitor get=72697,get_or_fail_fail=0,get_or_fail_success=72697,get_started=0,get_sum=32261199,max=419430400,put=72697,put_sum=32261199,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117563000000000 +> ceph,collection=rocksdb,host=stefanmon1,id=stefanmon1,type=monitor compact=1,compact_queue_len=0,compact_queue_merge=1,compact_range=19126,get=62449211,get_latency.avgcount=62449211,get_latency.avgtime=0.000022216,get_latency.sum=1387.371811726,rocksdb_write_delay_time.avgcount=0,rocksdb_write_delay_time.avgtime=0,rocksdb_write_delay_time.sum=0,rocksdb_write_memtable_time.avgcount=0,rocksdb_write_memtable_time.avgtime=0,rocksdb_write_memtable_time.sum=0,rocksdb_write_pre_and_post_time.avgcount=0,rocksdb_write_pre_and_post_time.avgtime=0,rocksdb_write_pre_and_post_time.sum=0,rocksdb_write_wal_time.avgcount=0,rocksdb_write_wal_time.avgtime=0,rocksdb_write_wal_time.sum=0,submit_latency.avgcount=0,submit_latency.avgtime=0,submit_latency.sum=0,submit_sync_latency.avgcount=3219961,submit_sync_latency.avgtime=0.007532173,submit_sync_latency.sum=24253.303584224,submit_transaction=0,submit_transaction_sync=3219961 1587117563000000000 +> ceph,collection=AsyncMessenger::Worker-0,host=stefanmon1,id=stefanmon1,type=monitor msgr_active_connections=148317,msgr_created_connections=162806,msgr_recv_bytes=11557888328,msgr_recv_messages=5113369,msgr_running_fast_dispatch_time=0,msgr_running_recv_time=868.377161686,msgr_running_send_time=1626.525392721,msgr_running_total_time=4222.235694322,msgr_send_bytes=91516226816,msgr_send_messages=6973706 1587117563000000000 +> ceph,collection=AsyncMessenger::Worker-2,host=stefanmon1,id=stefanmon1,type=monitor msgr_active_connections=146396,msgr_created_connections=159788,msgr_recv_bytes=2162802496,msgr_recv_messages=689168,msgr_running_fast_dispatch_time=0,msgr_running_recv_time=164.148550562,msgr_running_send_time=153.462890368,msgr_running_total_time=644.188791379,msgr_send_bytes=7422484152,msgr_send_messages=749381 1587117563000000000 +> ceph,collection=cluster,host=stefanmon1,id=stefanmon1,type=monitor num_bytes=5055,num_mon=3,num_mon_quorum=3,num_object=245,num_object_degraded=0,num_object_misplaced=0,num_object_unfound=0,num_osd=9,num_osd_in=8,num_osd_up=8,num_pg=504,num_pg_active=504,num_pg_active_clean=504,num_pg_peering=0,num_pool=17,osd_bytes=858959904768,osd_bytes_avail=849889787904,osd_bytes_used=9070116864,osd_epoch=203 1587117563000000000 +> ceph,collection=paxos,host=stefanmon1,id=stefanmon1,type=monitor accept_timeout=1,begin=1609847,begin_bytes.avgcount=1609847,begin_bytes.sum=41408662074,begin_keys.avgcount=1609847,begin_keys.sum=4829541,begin_latency.avgcount=1609847,begin_latency.avgtime=0.007213392,begin_latency.sum=11612.457661116,collect=0,collect_bytes.avgcount=0,collect_bytes.sum=0,collect_keys.avgcount=0,collect_keys.sum=0,collect_latency.avgcount=0,collect_latency.avgtime=0,collect_latency.sum=0,collect_timeout=1,collect_uncommitted=17,commit=1609831,commit_bytes.avgcount=1609831,commit_bytes.sum=41087428442,commit_keys.avgcount=1609831,commit_keys.sum=11637931,commit_latency.avgcount=1609831,commit_latency.avgtime=0.006236333,commit_latency.sum=10039.442388355,lease_ack_timeout=0,lease_timeout=0,new_pn=33,new_pn_latency.avgcount=33,new_pn_latency.avgtime=3.844272773,new_pn_latency.sum=126.86100151,refresh=1609856,refresh_latency.avgcount=1609856,refresh_latency.avgtime=0.005900486,refresh_latency.sum=9498.932866761,restart=109,share_state=2,share_state_bytes.avgcount=2,share_state_bytes.sum=39612,share_state_keys.avgcount=2,share_state_keys.sum=2,start_leader=22,start_peon=0,store_state=14,store_state_bytes.avgcount=14,store_state_bytes.sum=51908281,store_state_keys.avgcount=14,store_state_keys.sum=7016,store_state_latency.avgcount=14,store_state_latency.avgtime=11.668377665,store_state_latency.sum=163.357287311 1587117563000000000 +> ceph,collection=throttle-msgr_dispatch_throttler-mon-mgrc,host=stefanmon1,id=stefanmon1,type=monitor get=13225,get_or_fail_fail=0,get_or_fail_success=13225,get_started=0,get_sum=158700,max=104857600,put=13225,put_sum=158700,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117563000000000 +> ceph,collection=AsyncMessenger::Worker-1,host=stefanmon1,id=stefanmon1,type=monitor msgr_active_connections=147680,msgr_created_connections=162374,msgr_recv_bytes=29781706740,msgr_recv_messages=7170733,msgr_running_fast_dispatch_time=0,msgr_running_recv_time=1728.559151358,msgr_running_send_time=2086.681244508,msgr_running_total_time=6084.532916585,msgr_send_bytes=94062125718,msgr_send_messages=9161564 1587117563000000000 +> ceph,collection=throttle-msgr_dispatch_throttler-cluster,host=stefanosd1,id=0,type=osd get=281745,get_or_fail_fail=0,get_or_fail_success=281745,get_started=0,get_sum=446024457,max=104857600,put=281745,put_sum=446024457,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000 +> ceph,collection=throttle-bluestore_throttle_bytes,host=stefanosd1,id=0,type=osd get=275707,get_or_fail_fail=0,get_or_fail_success=0,get_started=275707,get_sum=185073179842,max=67108864,put=268870,put_sum=185073179842,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000 +> ceph,collection=throttle-msgr_dispatch_throttler-hb_front_server,host=stefanosd1,id=0,type=osd get=2606982,get_or_fail_fail=0,get_or_fail_success=2606982,get_started=0,get_sum=5224391928,max=104857600,put=2606982,put_sum=5224391928,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000 +> ceph,collection=rocksdb,host=stefanosd1,id=0,type=osd compact=0,compact_queue_len=0,compact_queue_merge=0,compact_range=0,get=1570,get_latency.avgcount=1570,get_latency.avgtime=0.000051233,get_latency.sum=0.080436788,rocksdb_write_delay_time.avgcount=0,rocksdb_write_delay_time.avgtime=0,rocksdb_write_delay_time.sum=0,rocksdb_write_memtable_time.avgcount=0,rocksdb_write_memtable_time.avgtime=0,rocksdb_write_memtable_time.sum=0,rocksdb_write_pre_and_post_time.avgcount=0,rocksdb_write_pre_and_post_time.avgtime=0,rocksdb_write_pre_and_post_time.sum=0,rocksdb_write_wal_time.avgcount=0,rocksdb_write_wal_time.avgtime=0,rocksdb_write_wal_time.sum=0,submit_latency.avgcount=275707,submit_latency.avgtime=0.000174936,submit_latency.sum=48.231345334,submit_sync_latency.avgcount=268870,submit_sync_latency.avgtime=0.006097313,submit_sync_latency.sum=1639.384555624,submit_transaction=275707,submit_transaction_sync=268870 1587117698000000000 +> ceph,collection=throttle-msgr_dispatch_throttler-hb_back_server,host=stefanosd1,id=0,type=osd get=2606982,get_or_fail_fail=0,get_or_fail_success=2606982,get_started=0,get_sum=5224391928,max=104857600,put=2606982,put_sum=5224391928,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000 +> ceph,collection=throttle-objecter_bytes,host=stefanosd1,id=0,type=osd get=0,get_or_fail_fail=0,get_or_fail_success=0,get_started=0,get_sum=0,max=104857600,put=0,put_sum=0,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000 +> ceph,collection=throttle-msgr_dispatch_throttler-hb_back_client,host=stefanosd1,id=0,type=osd get=2610285,get_or_fail_fail=0,get_or_fail_success=2610285,get_started=0,get_sum=5231011140,max=104857600,put=2610285,put_sum=5231011140,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000 +> ceph,collection=AsyncMessenger::Worker-1,host=stefanosd1,id=0,type=osd msgr_active_connections=2093,msgr_created_connections=29142,msgr_recv_bytes=7214238199,msgr_recv_messages=3928206,msgr_running_fast_dispatch_time=171.289615064,msgr_running_recv_time=278.531155966,msgr_running_send_time=489.482588813,msgr_running_total_time=1134.004853662,msgr_send_bytes=9814725232,msgr_send_messages=3814927 1587117698000000000 +> ceph,collection=throttle-msgr_dispatch_throttler-client,host=stefanosd1,id=0,type=osd get=488206,get_or_fail_fail=0,get_or_fail_success=488206,get_started=0,get_sum=104085134,max=104857600,put=488206,put_sum=104085134,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000 +> ceph,collection=finisher-defered_finisher,host=stefanosd1,id=0,type=osd complete_latency.avgcount=0,complete_latency.avgtime=0,complete_latency.sum=0,queue_len=0 1587117698000000000 +> ceph,collection=recoverystate_perf,host=stefanosd1,id=0,type=osd activating_latency.avgcount=87,activating_latency.avgtime=0.114348341,activating_latency.sum=9.948305683,active_latency.avgcount=25,active_latency.avgtime=1790.961574431,active_latency.sum=44774.039360795,backfilling_latency.avgcount=0,backfilling_latency.avgtime=0,backfilling_latency.sum=0,clean_latency.avgcount=25,clean_latency.avgtime=1790.830827794,clean_latency.sum=44770.770694867,down_latency.avgcount=0,down_latency.avgtime=0,down_latency.sum=0,getinfo_latency.avgcount=141,getinfo_latency.avgtime=0.446233476,getinfo_latency.sum=62.918920183,getlog_latency.avgcount=87,getlog_latency.avgtime=0.007708069,getlog_latency.sum=0.670602073,getmissing_latency.avgcount=87,getmissing_latency.avgtime=0.000077594,getmissing_latency.sum=0.006750701,incomplete_latency.avgcount=0,incomplete_latency.avgtime=0,incomplete_latency.sum=0,initial_latency.avgcount=166,initial_latency.avgtime=0.001313715,initial_latency.sum=0.218076764,notbackfilling_latency.avgcount=0,notbackfilling_latency.avgtime=0,notbackfilling_latency.sum=0,notrecovering_latency.avgcount=0,notrecovering_latency.avgtime=0,notrecovering_latency.sum=0,peering_latency.avgcount=141,peering_latency.avgtime=0.948324273,peering_latency.sum=133.713722563,primary_latency.avgcount=79,primary_latency.avgtime=567.706192991,primary_latency.sum=44848.78924634,recovered_latency.avgcount=87,recovered_latency.avgtime=0.000378284,recovered_latency.sum=0.032910791,recovering_latency.avgcount=2,recovering_latency.avgtime=0.338242008,recovering_latency.sum=0.676484017,replicaactive_latency.avgcount=23,replicaactive_latency.avgtime=1790.893991295,replicaactive_latency.sum=41190.561799786,repnotrecovering_latency.avgcount=25,repnotrecovering_latency.avgtime=1647.627024984,repnotrecovering_latency.sum=41190.675624616,reprecovering_latency.avgcount=2,reprecovering_latency.avgtime=0.311884638,reprecovering_latency.sum=0.623769276,repwaitbackfillreserved_latency.avgcount=0,repwaitbackfillreserved_latency.avgtime=0,repwaitbackfillreserved_latency.sum=0,repwaitrecoveryreserved_latency.avgcount=2,repwaitrecoveryreserved_latency.avgtime=0.000462873,repwaitrecoveryreserved_latency.sum=0.000925746,reset_latency.avgcount=372,reset_latency.avgtime=0.125056393,reset_latency.sum=46.520978537,start_latency.avgcount=372,start_latency.avgtime=0.000109397,start_latency.sum=0.040695881,started_latency.avgcount=206,started_latency.avgtime=418.299777245,started_latency.sum=86169.754112641,stray_latency.avgcount=231,stray_latency.avgtime=0.98203205,stray_latency.sum=226.849403565,waitactingchange_latency.avgcount=0,waitactingchange_latency.avgtime=0,waitactingchange_latency.sum=0,waitlocalbackfillreserved_latency.avgcount=0,waitlocalbackfillreserved_latency.avgtime=0,waitlocalbackfillreserved_latency.sum=0,waitlocalrecoveryreserved_latency.avgcount=2,waitlocalrecoveryreserved_latency.avgtime=0.002802377,waitlocalrecoveryreserved_latency.sum=0.005604755,waitremotebackfillreserved_latency.avgcount=0,waitremotebackfillreserved_latency.avgtime=0,waitremotebackfillreserved_latency.sum=0,waitremoterecoveryreserved_latency.avgcount=2,waitremoterecoveryreserved_latency.avgtime=0.012855439,waitremoterecoveryreserved_latency.sum=0.025710878,waitupthru_latency.avgcount=87,waitupthru_latency.avgtime=0.805727895,waitupthru_latency.sum=70.09832695 1587117698000000000 +> ceph,collection=cct,host=stefanosd1,id=0,type=osd total_workers=6,unhealthy_workers=0 1587117698000000000 +> ceph,collection=throttle-msgr_dispatch_throttler-hb_front_client,host=stefanosd1,id=0,type=osd get=2610285,get_or_fail_fail=0,get_or_fail_success=2610285,get_started=0,get_sum=5231011140,max=104857600,put=2610285,put_sum=5231011140,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000 +> ceph,collection=bluefs,host=stefanosd1,id=0,type=osd bytes_written_slow=0,bytes_written_sst=9018781,bytes_written_wal=831081573,db_total_bytes=4294967296,db_used_bytes=434110464,files_written_sst=3,files_written_wal=2,gift_bytes=0,log_bytes=134291456,log_compactions=1,logged_bytes=1101668352,max_bytes_db=1234173952,max_bytes_slow=0,max_bytes_wal=0,num_files=11,reclaim_bytes=0,slow_total_bytes=0,slow_used_bytes=0,wal_total_bytes=0,wal_used_bytes=0 1587117698000000000 +> ceph,collection=mempool,host=stefanosd1,id=0,type=osd bloom_filter_bytes=0,bloom_filter_items=0,bluefs_bytes=10600,bluefs_items=458,bluestore_alloc_bytes=230288,bluestore_alloc_items=28786,bluestore_cache_data_bytes=622592,bluestore_cache_data_items=43,bluestore_cache_onode_bytes=249280,bluestore_cache_onode_items=380,bluestore_cache_other_bytes=192678,bluestore_cache_other_items=20199,bluestore_fsck_bytes=0,bluestore_fsck_items=0,bluestore_txc_bytes=8272,bluestore_txc_items=11,bluestore_writing_bytes=0,bluestore_writing_deferred_bytes=670130,bluestore_writing_deferred_items=176,bluestore_writing_items=0,buffer_anon_bytes=2412465,buffer_anon_items=297,buffer_meta_bytes=5896,buffer_meta_items=67,mds_co_bytes=0,mds_co_items=0,osd_bytes=2124800,osd_items=166,osd_mapbl_bytes=155152,osd_mapbl_items=10,osd_pglog_bytes=3214704,osd_pglog_items=6288,osdmap_bytes=710892,osdmap_items=4426,osdmap_mapping_bytes=0,osdmap_mapping_items=0,pgmap_bytes=0,pgmap_items=0,unittest_1_bytes=0,unittest_1_items=0,unittest_2_bytes=0,unittest_2_items=0 1587117698000000000 +> ceph,collection=osd,host=stefanosd1,id=0,type=osd agent_evict=0,agent_flush=0,agent_skip=0,agent_wake=0,cached_crc=0,cached_crc_adjusted=0,copyfrom=0,heartbeat_to_peers=7,loadavg=11,map_message_epoch_dups=21,map_message_epochs=40,map_messages=31,messages_delayed_for_map=0,missed_crc=0,numpg=166,numpg_primary=62,numpg_removing=0,numpg_replica=104,numpg_stray=0,object_ctx_cache_hit=476529,object_ctx_cache_total=476536,op=476525,op_before_dequeue_op_lat.avgcount=755708,op_before_dequeue_op_lat.avgtime=0.000205759,op_before_dequeue_op_lat.sum=155.493843473,op_before_queue_op_lat.avgcount=755702,op_before_queue_op_lat.avgtime=0.000047877,op_before_queue_op_lat.sum=36.181069552,op_cache_hit=0,op_in_bytes=0,op_latency.avgcount=476525,op_latency.avgtime=0.000365956,op_latency.sum=174.387387878,op_out_bytes=10882,op_prepare_latency.avgcount=476527,op_prepare_latency.avgtime=0.000205307,op_prepare_latency.sum=97.834380034,op_process_latency.avgcount=476525,op_process_latency.avgtime=0.000139616,op_process_latency.sum=66.530847665,op_r=476521,op_r_latency.avgcount=476521,op_r_latency.avgtime=0.00036559,op_r_latency.sum=174.21148267,op_r_out_bytes=10882,op_r_prepare_latency.avgcount=476523,op_r_prepare_latency.avgtime=0.000205302,op_r_prepare_latency.sum=97.831473175,op_r_process_latency.avgcount=476521,op_r_process_latency.avgtime=0.000139396,op_r_process_latency.sum=66.425498624,op_rw=2,op_rw_in_bytes=0,op_rw_latency.avgcount=2,op_rw_latency.avgtime=0.048818975,op_rw_latency.sum=0.097637951,op_rw_out_bytes=0,op_rw_prepare_latency.avgcount=2,op_rw_prepare_latency.avgtime=0.000467887,op_rw_prepare_latency.sum=0.000935775,op_rw_process_latency.avgcount=2,op_rw_process_latency.avgtime=0.013741256,op_rw_process_latency.sum=0.027482512,op_w=2,op_w_in_bytes=0,op_w_latency.avgcount=2,op_w_latency.avgtime=0.039133628,op_w_latency.sum=0.078267257,op_w_prepare_latency.avgcount=2,op_w_prepare_latency.avgtime=0.000985542,op_w_prepare_latency.sum=0.001971084,op_w_process_latency.avgcount=2,op_w_process_latency.avgtime=0.038933264,op_w_process_latency.sum=0.077866529,op_wip=0,osd_map_bl_cache_hit=22,osd_map_bl_cache_miss=40,osd_map_cache_hit=4570,osd_map_cache_miss=15,osd_map_cache_miss_low=0,osd_map_cache_miss_low_avg.avgcount=0,osd_map_cache_miss_low_avg.sum=0,osd_pg_biginfo=2050,osd_pg_fastinfo=265780,osd_pg_info=274542,osd_tier_flush_lat.avgcount=0,osd_tier_flush_lat.avgtime=0,osd_tier_flush_lat.sum=0,osd_tier_promote_lat.avgcount=0,osd_tier_promote_lat.avgtime=0,osd_tier_promote_lat.sum=0,osd_tier_r_lat.avgcount=0,osd_tier_r_lat.avgtime=0,osd_tier_r_lat.sum=0,pull=0,push=2,push_out_bytes=10,recovery_bytes=10,recovery_ops=2,stat_bytes=107369988096,stat_bytes_avail=106271539200,stat_bytes_used=1098448896,subop=253554,subop_in_bytes=168644225,subop_latency.avgcount=253554,subop_latency.avgtime=0.0073036,subop_latency.sum=1851.857230388,subop_pull=0,subop_pull_latency.avgcount=0,subop_pull_latency.avgtime=0,subop_pull_latency.sum=0,subop_push=0,subop_push_in_bytes=0,subop_push_latency.avgcount=0,subop_push_latency.avgtime=0,subop_push_latency.sum=0,subop_w=253554,subop_w_in_bytes=168644225,subop_w_latency.avgcount=253554,subop_w_latency.avgtime=0.0073036,subop_w_latency.sum=1851.857230388,tier_clean=0,tier_delay=0,tier_dirty=0,tier_evict=0,tier_flush=0,tier_flush_fail=0,tier_promote=0,tier_proxy_read=0,tier_proxy_write=0,tier_try_flush=0,tier_try_flush_fail=0,tier_whiteout=0 1587117698000000000 +> ceph,collection=throttle-msgr_dispatch_throttler-ms_objecter,host=stefanosd1,id=0,type=osd get=0,get_or_fail_fail=0,get_or_fail_success=0,get_started=0,get_sum=0,max=104857600,put=0,put_sum=0,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000 +> ceph,collection=AsyncMessenger::Worker-2,host=stefanosd1,id=0,type=osd msgr_active_connections=2055,msgr_created_connections=27411,msgr_recv_bytes=6431950009,msgr_recv_messages=3552443,msgr_running_fast_dispatch_time=162.271664213,msgr_running_recv_time=254.307853033,msgr_running_send_time=503.037285799,msgr_running_total_time=1130.21070681,msgr_send_bytes=10865436237,msgr_send_messages=3523374 1587117698000000000 +> ceph,collection=bluestore,host=stefanosd1,id=0,type=osd bluestore_allocated=24641536,bluestore_blob_split=0,bluestore_blobs=88,bluestore_buffer_bytes=622592,bluestore_buffer_hit_bytes=160578,bluestore_buffer_miss_bytes=540236,bluestore_buffers=43,bluestore_compressed=0,bluestore_compressed_allocated=0,bluestore_compressed_original=0,bluestore_extent_compress=0,bluestore_extents=88,bluestore_fragmentation_micros=1,bluestore_gc_merged=0,bluestore_onode_hits=532102,bluestore_onode_misses=388,bluestore_onode_reshard=0,bluestore_onode_shard_hits=0,bluestore_onode_shard_misses=0,bluestore_onodes=380,bluestore_read_eio=0,bluestore_reads_with_retries=0,bluestore_stored=1987856,bluestore_txc=275707,bluestore_write_big=0,bluestore_write_big_blobs=0,bluestore_write_big_bytes=0,bluestore_write_small=60,bluestore_write_small_bytes=343843,bluestore_write_small_deferred=22,bluestore_write_small_new=38,bluestore_write_small_pre_read=22,bluestore_write_small_unused=0,commit_lat.avgcount=275707,commit_lat.avgtime=0.00699778,commit_lat.sum=1929.337103334,compress_lat.avgcount=0,compress_lat.avgtime=0,compress_lat.sum=0,compress_rejected_count=0,compress_success_count=0,csum_lat.avgcount=67,csum_lat.avgtime=0.000032601,csum_lat.sum=0.002184323,decompress_lat.avgcount=0,decompress_lat.avgtime=0,decompress_lat.sum=0,deferred_write_bytes=0,deferred_write_ops=0,kv_commit_lat.avgcount=268870,kv_commit_lat.avgtime=0.006365428,kv_commit_lat.sum=1711.472749866,kv_final_lat.avgcount=268867,kv_final_lat.avgtime=0.000043227,kv_final_lat.sum=11.622427109,kv_flush_lat.avgcount=268870,kv_flush_lat.avgtime=0.000000223,kv_flush_lat.sum=0.060141588,kv_sync_lat.avgcount=268870,kv_sync_lat.avgtime=0.006365652,kv_sync_lat.sum=1711.532891454,omap_lower_bound_lat.avgcount=2,omap_lower_bound_lat.avgtime=0.000006524,omap_lower_bound_lat.sum=0.000013048,omap_next_lat.avgcount=6704,omap_next_lat.avgtime=0.000004721,omap_next_lat.sum=0.031654097,omap_seek_to_first_lat.avgcount=323,omap_seek_to_first_lat.avgtime=0.00000522,omap_seek_to_first_lat.sum=0.00168614,omap_upper_bound_lat.avgcount=4,omap_upper_bound_lat.avgtime=0.000013086,omap_upper_bound_lat.sum=0.000052344,read_lat.avgcount=227,read_lat.avgtime=0.000699457,read_lat.sum=0.158776879,read_onode_meta_lat.avgcount=311,read_onode_meta_lat.avgtime=0.000072207,read_onode_meta_lat.sum=0.022456667,read_wait_aio_lat.avgcount=84,read_wait_aio_lat.avgtime=0.001556141,read_wait_aio_lat.sum=0.130715885,state_aio_wait_lat.avgcount=275707,state_aio_wait_lat.avgtime=0.000000345,state_aio_wait_lat.sum=0.095246457,state_deferred_aio_wait_lat.avgcount=0,state_deferred_aio_wait_lat.avgtime=0,state_deferred_aio_wait_lat.sum=0,state_deferred_cleanup_lat.avgcount=0,state_deferred_cleanup_lat.avgtime=0,state_deferred_cleanup_lat.sum=0,state_deferred_queued_lat.avgcount=0,state_deferred_queued_lat.avgtime=0,state_deferred_queued_lat.sum=0,state_done_lat.avgcount=275696,state_done_lat.avgtime=0.00000286,state_done_lat.sum=0.788700007,state_finishing_lat.avgcount=275696,state_finishing_lat.avgtime=0.000000302,state_finishing_lat.sum=0.083437168,state_io_done_lat.avgcount=275707,state_io_done_lat.avgtime=0.000001041,state_io_done_lat.sum=0.287025147,state_kv_commiting_lat.avgcount=275707,state_kv_commiting_lat.avgtime=0.006424459,state_kv_commiting_lat.sum=1771.268407864,state_kv_done_lat.avgcount=275707,state_kv_done_lat.avgtime=0.000001627,state_kv_done_lat.sum=0.448805853,state_kv_queued_lat.avgcount=275707,state_kv_queued_lat.avgtime=0.000488565,state_kv_queued_lat.sum=134.7009424,state_prepare_lat.avgcount=275707,state_prepare_lat.avgtime=0.000082464,state_prepare_lat.sum=22.736065534,submit_lat.avgcount=275707,submit_lat.avgtime=0.000120236,submit_lat.sum=33.149934412,throttle_lat.avgcount=275707,throttle_lat.avgtime=0.000001571,throttle_lat.sum=0.433185935,write_pad_bytes=151773,write_penalty_read_ops=0 1587117698000000000 +> ceph,collection=finisher-objecter-finisher-0,host=stefanosd1,id=0,type=osd complete_latency.avgcount=0,complete_latency.avgtime=0,complete_latency.sum=0,queue_len=0 1587117698000000000 +> ceph,collection=objecter,host=stefanosd1,id=0,type=osd command_active=0,command_resend=0,command_send=0,linger_active=0,linger_ping=0,linger_resend=0,linger_send=0,map_epoch=203,map_full=0,map_inc=19,omap_del=0,omap_rd=0,omap_wr=0,op=0,op_active=0,op_laggy=0,op_pg=0,op_r=0,op_reply=0,op_resend=0,op_rmw=0,op_send=0,op_send_bytes=0,op_w=0,osd_laggy=0,osd_session_close=0,osd_session_open=0,osd_sessions=0,osdop_append=0,osdop_call=0,osdop_clonerange=0,osdop_cmpxattr=0,osdop_create=0,osdop_delete=0,osdop_getxattr=0,osdop_mapext=0,osdop_notify=0,osdop_other=0,osdop_pgls=0,osdop_pgls_filter=0,osdop_read=0,osdop_resetxattrs=0,osdop_rmxattr=0,osdop_setxattr=0,osdop_sparse_read=0,osdop_src_cmpxattr=0,osdop_stat=0,osdop_truncate=0,osdop_watch=0,osdop_write=0,osdop_writefull=0,osdop_writesame=0,osdop_zero=0,poolop_active=0,poolop_resend=0,poolop_send=0,poolstat_active=0,poolstat_resend=0,poolstat_send=0,statfs_active=0,statfs_resend=0,statfs_send=0 1587117698000000000 +> ceph,collection=finisher-commit_finisher,host=stefanosd1,id=0,type=osd complete_latency.avgcount=11,complete_latency.avgtime=0.003447516,complete_latency.sum=0.037922681,queue_len=0 1587117698000000000 +> ceph,collection=throttle-objecter_ops,host=stefanosd1,id=0,type=osd get=0,get_or_fail_fail=0,get_or_fail_success=0,get_started=0,get_sum=0,max=1024,put=0,put_sum=0,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000 +> ceph,collection=AsyncMessenger::Worker-0,host=stefanosd1,id=0,type=osd msgr_active_connections=2128,msgr_created_connections=33685,msgr_recv_bytes=8679123051,msgr_recv_messages=4200356,msgr_running_fast_dispatch_time=151.889337454,msgr_running_recv_time=297.632294886,msgr_running_send_time=599.20020523,msgr_running_total_time=1321.361931202,msgr_send_bytes=11716202897,msgr_send_messages=4347418 1587117698000000000 +> ceph,collection=throttle-osd_client_bytes,host=stefanosd1,id=0,type=osd get=476554,get_or_fail_fail=0,get_or_fail_success=476554,get_started=0,get_sum=103413728,max=524288000,put=476587,put_sum=103413728,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000 +> ceph,collection=throttle-bluestore_throttle_deferred_bytes,host=stefanosd1,id=0,type=osd get=11,get_or_fail_fail=0,get_or_fail_success=11,get_started=0,get_sum=7723117,max=201326592,put=0,put_sum=0,take=0,take_sum=0,val=7723117,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000 +> ceph,collection=throttle-msgr_dispatch_throttler-cluster,host=stefanosd1,id=1,type=osd get=860895,get_or_fail_fail=0,get_or_fail_success=860895,get_started=0,get_sum=596482256,max=104857600,put=860895,put_sum=596482256,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000 +> ceph,collection=throttle-objecter_ops,host=stefanosd1,id=1,type=osd get=0,get_or_fail_fail=0,get_or_fail_success=0,get_started=0,get_sum=0,max=1024,put=0,put_sum=0,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000 +> ceph,collection=throttle-objecter_bytes,host=stefanosd1,id=1,type=osd get=0,get_or_fail_fail=0,get_or_fail_success=0,get_started=0,get_sum=0,max=104857600,put=0,put_sum=0,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000 +> ceph,collection=finisher-defered_finisher,host=stefanosd1,id=1,type=osd complete_latency.avgcount=0,complete_latency.avgtime=0,complete_latency.sum=0,queue_len=0 1587117698000000000 +> ceph,collection=osd,host=stefanosd1,id=1,type=osd agent_evict=0,agent_flush=0,agent_skip=0,agent_wake=0,cached_crc=0,cached_crc_adjusted=0,copyfrom=0,heartbeat_to_peers=7,loadavg=11,map_message_epoch_dups=29,map_message_epochs=50,map_messages=39,messages_delayed_for_map=0,missed_crc=0,numpg=188,numpg_primary=71,numpg_removing=0,numpg_replica=117,numpg_stray=0,object_ctx_cache_hit=1349777,object_ctx_cache_total=2934118,op=1319230,op_before_dequeue_op_lat.avgcount=3792053,op_before_dequeue_op_lat.avgtime=0.000405802,op_before_dequeue_op_lat.sum=1538.826381623,op_before_queue_op_lat.avgcount=3778690,op_before_queue_op_lat.avgtime=0.000033273,op_before_queue_op_lat.sum=125.731131596,op_cache_hit=0,op_in_bytes=0,op_latency.avgcount=1319230,op_latency.avgtime=0.002858138,op_latency.sum=3770.541581676,op_out_bytes=1789210,op_prepare_latency.avgcount=1336472,op_prepare_latency.avgtime=0.000279458,op_prepare_latency.sum=373.488913339,op_process_latency.avgcount=1319230,op_process_latency.avgtime=0.002666408,op_process_latency.sum=3517.606407526,op_r=1075394,op_r_latency.avgcount=1075394,op_r_latency.avgtime=0.000303779,op_r_latency.sum=326.682443032,op_r_out_bytes=1789210,op_r_prepare_latency.avgcount=1075394,op_r_prepare_latency.avgtime=0.000171228,op_r_prepare_latency.sum=184.138580631,op_r_process_latency.avgcount=1075394,op_r_process_latency.avgtime=0.00011609,op_r_process_latency.sum=124.842894319,op_rw=243832,op_rw_in_bytes=0,op_rw_latency.avgcount=243832,op_rw_latency.avgtime=0.014123636,op_rw_latency.sum=3443.79445124,op_rw_out_bytes=0,op_rw_prepare_latency.avgcount=261072,op_rw_prepare_latency.avgtime=0.000725265,op_rw_prepare_latency.sum=189.346543463,op_rw_process_latency.avgcount=243832,op_rw_process_latency.avgtime=0.013914089,op_rw_process_latency.sum=3392.700241086,op_w=4,op_w_in_bytes=0,op_w_latency.avgcount=4,op_w_latency.avgtime=0.016171851,op_w_latency.sum=0.064687404,op_w_prepare_latency.avgcount=6,op_w_prepare_latency.avgtime=0.00063154,op_w_prepare_latency.sum=0.003789245,op_w_process_latency.avgcount=4,op_w_process_latency.avgtime=0.01581803,op_w_process_latency.sum=0.063272121,op_wip=0,osd_map_bl_cache_hit=36,osd_map_bl_cache_miss=40,osd_map_cache_hit=5404,osd_map_cache_miss=14,osd_map_cache_miss_low=0,osd_map_cache_miss_low_avg.avgcount=0,osd_map_cache_miss_low_avg.sum=0,osd_pg_biginfo=2333,osd_pg_fastinfo=576157,osd_pg_info=591751,osd_tier_flush_lat.avgcount=0,osd_tier_flush_lat.avgtime=0,osd_tier_flush_lat.sum=0,osd_tier_promote_lat.avgcount=0,osd_tier_promote_lat.avgtime=0,osd_tier_promote_lat.sum=0,osd_tier_r_lat.avgcount=0,osd_tier_r_lat.avgtime=0,osd_tier_r_lat.sum=0,pull=0,push=22,push_out_bytes=0,recovery_bytes=0,recovery_ops=21,stat_bytes=107369988096,stat_bytes_avail=106271997952,stat_bytes_used=1097990144,subop=306946,subop_in_bytes=204236742,subop_latency.avgcount=306946,subop_latency.avgtime=0.006744881,subop_latency.sum=2070.314452989,subop_pull=0,subop_pull_latency.avgcount=0,subop_pull_latency.avgtime=0,subop_pull_latency.sum=0,subop_push=0,subop_push_in_bytes=0,subop_push_latency.avgcount=0,subop_push_latency.avgtime=0,subop_push_latency.sum=0,subop_w=306946,subop_w_in_bytes=204236742,subop_w_latency.avgcount=306946,subop_w_latency.avgtime=0.006744881,subop_w_latency.sum=2070.314452989,tier_clean=0,tier_delay=0,tier_dirty=8,tier_evict=0,tier_flush=0,tier_flush_fail=0,tier_promote=0,tier_proxy_read=0,tier_proxy_write=0,tier_try_flush=0,tier_try_flush_fail=0,tier_whiteout=0 1587117698000000000 +> ceph,collection=objecter,host=stefanosd1,id=1,type=osd command_active=0,command_resend=0,command_send=0,linger_active=0,linger_ping=0,linger_resend=0,linger_send=0,map_epoch=203,map_full=0,map_inc=19,omap_del=0,omap_rd=0,omap_wr=0,op=0,op_active=0,op_laggy=0,op_pg=0,op_r=0,op_reply=0,op_resend=0,op_rmw=0,op_send=0,op_send_bytes=0,op_w=0,osd_laggy=0,osd_session_close=0,osd_session_open=0,osd_sessions=0,osdop_append=0,osdop_call=0,osdop_clonerange=0,osdop_cmpxattr=0,osdop_create=0,osdop_delete=0,osdop_getxattr=0,osdop_mapext=0,osdop_notify=0,osdop_other=0,osdop_pgls=0,osdop_pgls_filter=0,osdop_read=0,osdop_resetxattrs=0,osdop_rmxattr=0,osdop_setxattr=0,osdop_sparse_read=0,osdop_src_cmpxattr=0,osdop_stat=0,osdop_truncate=0,osdop_watch=0,osdop_write=0,osdop_writefull=0,osdop_writesame=0,osdop_zero=0,poolop_active=0,poolop_resend=0,poolop_send=0,poolstat_active=0,poolstat_resend=0,poolstat_send=0,statfs_active=0,statfs_resend=0,statfs_send=0 1587117698000000000 +> ceph,collection=AsyncMessenger::Worker-0,host=stefanosd1,id=1,type=osd msgr_active_connections=1356,msgr_created_connections=12290,msgr_recv_bytes=8577187219,msgr_recv_messages=6387040,msgr_running_fast_dispatch_time=475.903632306,msgr_running_recv_time=425.937196699,msgr_running_send_time=783.676217521,msgr_running_total_time=1989.242459076,msgr_send_bytes=12583034449,msgr_send_messages=6074344 1587117698000000000 +> ceph,collection=bluestore,host=stefanosd1,id=1,type=osd bluestore_allocated=24182784,bluestore_blob_split=0,bluestore_blobs=88,bluestore_buffer_bytes=614400,bluestore_buffer_hit_bytes=142047,bluestore_buffer_miss_bytes=541480,bluestore_buffers=41,bluestore_compressed=0,bluestore_compressed_allocated=0,bluestore_compressed_original=0,bluestore_extent_compress=0,bluestore_extents=88,bluestore_fragmentation_micros=1,bluestore_gc_merged=0,bluestore_onode_hits=1403948,bluestore_onode_misses=1584732,bluestore_onode_reshard=0,bluestore_onode_shard_hits=0,bluestore_onode_shard_misses=0,bluestore_onodes=459,bluestore_read_eio=0,bluestore_reads_with_retries=0,bluestore_stored=1985647,bluestore_txc=593150,bluestore_write_big=0,bluestore_write_big_blobs=0,bluestore_write_big_bytes=0,bluestore_write_small=58,bluestore_write_small_bytes=343091,bluestore_write_small_deferred=20,bluestore_write_small_new=38,bluestore_write_small_pre_read=20,bluestore_write_small_unused=0,commit_lat.avgcount=593150,commit_lat.avgtime=0.006514834,commit_lat.sum=3864.274280733,compress_lat.avgcount=0,compress_lat.avgtime=0,compress_lat.sum=0,compress_rejected_count=0,compress_success_count=0,csum_lat.avgcount=60,csum_lat.avgtime=0.000028258,csum_lat.sum=0.001695512,decompress_lat.avgcount=0,decompress_lat.avgtime=0,decompress_lat.sum=0,deferred_write_bytes=0,deferred_write_ops=0,kv_commit_lat.avgcount=578129,kv_commit_lat.avgtime=0.00570707,kv_commit_lat.sum=3299.423186928,kv_final_lat.avgcount=578124,kv_final_lat.avgtime=0.000042752,kv_final_lat.sum=24.716171934,kv_flush_lat.avgcount=578129,kv_flush_lat.avgtime=0.000000209,kv_flush_lat.sum=0.121169044,kv_sync_lat.avgcount=578129,kv_sync_lat.avgtime=0.00570728,kv_sync_lat.sum=3299.544355972,omap_lower_bound_lat.avgcount=22,omap_lower_bound_lat.avgtime=0.000005979,omap_lower_bound_lat.sum=0.000131539,omap_next_lat.avgcount=13248,omap_next_lat.avgtime=0.000004836,omap_next_lat.sum=0.064077797,omap_seek_to_first_lat.avgcount=525,omap_seek_to_first_lat.avgtime=0.000004906,omap_seek_to_first_lat.sum=0.002575786,omap_upper_bound_lat.avgcount=0,omap_upper_bound_lat.avgtime=0,omap_upper_bound_lat.sum=0,read_lat.avgcount=406,read_lat.avgtime=0.000383254,read_lat.sum=0.155601529,read_onode_meta_lat.avgcount=483,read_onode_meta_lat.avgtime=0.000008805,read_onode_meta_lat.sum=0.004252832,read_wait_aio_lat.avgcount=77,read_wait_aio_lat.avgtime=0.001907361,read_wait_aio_lat.sum=0.146866799,state_aio_wait_lat.avgcount=593150,state_aio_wait_lat.avgtime=0.000000388,state_aio_wait_lat.sum=0.230498048,state_deferred_aio_wait_lat.avgcount=0,state_deferred_aio_wait_lat.avgtime=0,state_deferred_aio_wait_lat.sum=0,state_deferred_cleanup_lat.avgcount=0,state_deferred_cleanup_lat.avgtime=0,state_deferred_cleanup_lat.sum=0,state_deferred_queued_lat.avgcount=0,state_deferred_queued_lat.avgtime=0,state_deferred_queued_lat.sum=0,state_done_lat.avgcount=593140,state_done_lat.avgtime=0.000003048,state_done_lat.sum=1.80789161,state_finishing_lat.avgcount=593140,state_finishing_lat.avgtime=0.000000325,state_finishing_lat.sum=0.192952339,state_io_done_lat.avgcount=593150,state_io_done_lat.avgtime=0.000001202,state_io_done_lat.sum=0.713333116,state_kv_commiting_lat.avgcount=593150,state_kv_commiting_lat.avgtime=0.005788541,state_kv_commiting_lat.sum=3433.473378536,state_kv_done_lat.avgcount=593150,state_kv_done_lat.avgtime=0.000001472,state_kv_done_lat.sum=0.873559611,state_kv_queued_lat.avgcount=593150,state_kv_queued_lat.avgtime=0.000634215,state_kv_queued_lat.sum=376.18491577,state_prepare_lat.avgcount=593150,state_prepare_lat.avgtime=0.000089694,state_prepare_lat.sum=53.202464675,submit_lat.avgcount=593150,submit_lat.avgtime=0.000127856,submit_lat.sum=75.83816759,throttle_lat.avgcount=593150,throttle_lat.avgtime=0.000001726,throttle_lat.sum=1.023832181,write_pad_bytes=144333,write_penalty_read_ops=0 1587117698000000000 +> ceph,collection=throttle-osd_client_bytes,host=stefanosd1,id=1,type=osd get=2920772,get_or_fail_fail=0,get_or_fail_success=2920772,get_started=0,get_sum=739935873,max=524288000,put=4888498,put_sum=739935873,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000 +> ceph,collection=throttle-msgr_dispatch_throttler-hb_front_client,host=stefanosd1,id=1,type=osd get=2605442,get_or_fail_fail=0,get_or_fail_success=2605442,get_started=0,get_sum=5221305768,max=104857600,put=2605442,put_sum=5221305768,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000 +> ceph,collection=AsyncMessenger::Worker-2,host=stefanosd1,id=1,type=osd msgr_active_connections=1375,msgr_created_connections=12689,msgr_recv_bytes=6393440855,msgr_recv_messages=3260458,msgr_running_fast_dispatch_time=120.622437418,msgr_running_recv_time=225.24709441,msgr_running_send_time=499.150587343,msgr_running_total_time=1043.340296846,msgr_send_bytes=11134862571,msgr_send_messages=3450760 1587117698000000000 +> ceph,collection=bluefs,host=stefanosd1,id=1,type=osd bytes_written_slow=0,bytes_written_sst=19824993,bytes_written_wal=1788507023,db_total_bytes=4294967296,db_used_bytes=522190848,files_written_sst=4,files_written_wal=2,gift_bytes=0,log_bytes=1056768,log_compactions=2,logged_bytes=1933271040,max_bytes_db=1483735040,max_bytes_slow=0,max_bytes_wal=0,num_files=12,reclaim_bytes=0,slow_total_bytes=0,slow_used_bytes=0,wal_total_bytes=0,wal_used_bytes=0 1587117698000000000 +> ceph,collection=throttle-msgr_dispatch_throttler-hb_back_client,host=stefanosd1,id=1,type=osd get=2605442,get_or_fail_fail=0,get_or_fail_success=2605442,get_started=0,get_sum=5221305768,max=104857600,put=2605442,put_sum=5221305768,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000 +> ceph,collection=throttle-bluestore_throttle_deferred_bytes,host=stefanosd1,id=1,type=osd get=10,get_or_fail_fail=0,get_or_fail_success=10,get_started=0,get_sum=7052009,max=201326592,put=0,put_sum=0,take=0,take_sum=0,val=7052009,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000 +> ceph,collection=rocksdb,host=stefanosd1,id=1,type=osd compact=0,compact_queue_len=0,compact_queue_merge=0,compact_range=0,get=1586061,get_latency.avgcount=1586061,get_latency.avgtime=0.000083009,get_latency.sum=131.658296684,rocksdb_write_delay_time.avgcount=0,rocksdb_write_delay_time.avgtime=0,rocksdb_write_delay_time.sum=0,rocksdb_write_memtable_time.avgcount=0,rocksdb_write_memtable_time.avgtime=0,rocksdb_write_memtable_time.sum=0,rocksdb_write_pre_and_post_time.avgcount=0,rocksdb_write_pre_and_post_time.avgtime=0,rocksdb_write_pre_and_post_time.sum=0,rocksdb_write_wal_time.avgcount=0,rocksdb_write_wal_time.avgtime=0,rocksdb_write_wal_time.sum=0,submit_latency.avgcount=593150,submit_latency.avgtime=0.000172072,submit_latency.sum=102.064900673,submit_sync_latency.avgcount=578129,submit_sync_latency.avgtime=0.005447017,submit_sync_latency.sum=3149.078822012,submit_transaction=593150,submit_transaction_sync=578129 1587117698000000000 +> ceph,collection=throttle-msgr_dispatch_throttler-hb_back_server,host=stefanosd1,id=1,type=osd get=2607669,get_or_fail_fail=0,get_or_fail_success=2607669,get_started=0,get_sum=5225768676,max=104857600,put=2607669,put_sum=5225768676,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000 +> ceph,collection=recoverystate_perf,host=stefanosd1,id=1,type=osd activating_latency.avgcount=104,activating_latency.avgtime=0.071646485,activating_latency.sum=7.451234493,active_latency.avgcount=33,active_latency.avgtime=1734.369034268,active_latency.sum=57234.178130859,backfilling_latency.avgcount=1,backfilling_latency.avgtime=2.598401698,backfilling_latency.sum=2.598401698,clean_latency.avgcount=33,clean_latency.avgtime=1734.213467342,clean_latency.sum=57229.044422292,down_latency.avgcount=0,down_latency.avgtime=0,down_latency.sum=0,getinfo_latency.avgcount=167,getinfo_latency.avgtime=0.373444627,getinfo_latency.sum=62.365252849,getlog_latency.avgcount=105,getlog_latency.avgtime=0.003575062,getlog_latency.sum=0.375381569,getmissing_latency.avgcount=104,getmissing_latency.avgtime=0.000157091,getmissing_latency.sum=0.016337565,incomplete_latency.avgcount=0,incomplete_latency.avgtime=0,incomplete_latency.sum=0,initial_latency.avgcount=188,initial_latency.avgtime=0.001833512,initial_latency.sum=0.344700343,notbackfilling_latency.avgcount=0,notbackfilling_latency.avgtime=0,notbackfilling_latency.sum=0,notrecovering_latency.avgcount=0,notrecovering_latency.avgtime=0,notrecovering_latency.sum=0,peering_latency.avgcount=167,peering_latency.avgtime=1.501818082,peering_latency.sum=250.803619796,primary_latency.avgcount=97,primary_latency.avgtime=591.344286378,primary_latency.sum=57360.395778762,recovered_latency.avgcount=104,recovered_latency.avgtime=0.000291138,recovered_latency.sum=0.030278433,recovering_latency.avgcount=2,recovering_latency.avgtime=0.142378096,recovering_latency.sum=0.284756192,replicaactive_latency.avgcount=32,replicaactive_latency.avgtime=1788.474901442,replicaactive_latency.sum=57231.196846165,repnotrecovering_latency.avgcount=34,repnotrecovering_latency.avgtime=1683.273587087,repnotrecovering_latency.sum=57231.301960987,reprecovering_latency.avgcount=2,reprecovering_latency.avgtime=0.418094818,reprecovering_latency.sum=0.836189637,repwaitbackfillreserved_latency.avgcount=0,repwaitbackfillreserved_latency.avgtime=0,repwaitbackfillreserved_latency.sum=0,repwaitrecoveryreserved_latency.avgcount=2,repwaitrecoveryreserved_latency.avgtime=0.000588413,repwaitrecoveryreserved_latency.sum=0.001176827,reset_latency.avgcount=433,reset_latency.avgtime=0.15669689,reset_latency.sum=67.849753631,start_latency.avgcount=433,start_latency.avgtime=0.000412707,start_latency.sum=0.178702508,started_latency.avgcount=245,started_latency.avgtime=468.419544137,started_latency.sum=114762.788313581,stray_latency.avgcount=266,stray_latency.avgtime=1.489291271,stray_latency.sum=396.151478238,waitactingchange_latency.avgcount=1,waitactingchange_latency.avgtime=0.982689906,waitactingchange_latency.sum=0.982689906,waitlocalbackfillreserved_latency.avgcount=1,waitlocalbackfillreserved_latency.avgtime=0.000542092,waitlocalbackfillreserved_latency.sum=0.000542092,waitlocalrecoveryreserved_latency.avgcount=2,waitlocalrecoveryreserved_latency.avgtime=0.00391669,waitlocalrecoveryreserved_latency.sum=0.007833381,waitremotebackfillreserved_latency.avgcount=1,waitremotebackfillreserved_latency.avgtime=0.003110409,waitremotebackfillreserved_latency.sum=0.003110409,waitremoterecoveryreserved_latency.avgcount=2,waitremoterecoveryreserved_latency.avgtime=0.012229338,waitremoterecoveryreserved_latency.sum=0.024458677,waitupthru_latency.avgcount=104,waitupthru_latency.avgtime=1.807608905,waitupthru_latency.sum=187.991326197 1587117698000000000 +> ceph,collection=AsyncMessenger::Worker-1,host=stefanosd1,id=1,type=osd msgr_active_connections=1289,msgr_created_connections=9469,msgr_recv_bytes=8348149800,msgr_recv_messages=5048791,msgr_running_fast_dispatch_time=313.754567889,msgr_running_recv_time=372.054833029,msgr_running_send_time=694.900405016,msgr_running_total_time=1656.294769387,msgr_send_bytes=11550148208,msgr_send_messages=5175962 1587117698000000000 +> ceph,collection=throttle-bluestore_throttle_bytes,host=stefanosd1,id=1,type=osd get=593150,get_or_fail_fail=0,get_or_fail_success=0,get_started=593150,get_sum=398147414260,max=67108864,put=578129,put_sum=398147414260,take=0,take_sum=0,val=0,wait.avgcount=29,wait.avgtime=0.000972655,wait.sum=0.028207005 1587117698000000000 +> ceph,collection=throttle-msgr_dispatch_throttler-ms_objecter,host=stefanosd1,id=1,type=osd get=0,get_or_fail_fail=0,get_or_fail_success=0,get_started=0,get_sum=0,max=104857600,put=0,put_sum=0,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000 +> ceph,collection=cct,host=stefanosd1,id=1,type=osd total_workers=6,unhealthy_workers=0 1587117698000000000 +> ceph,collection=mempool,host=stefanosd1,id=1,type=osd bloom_filter_bytes=0,bloom_filter_items=0,bluefs_bytes=13064,bluefs_items=593,bluestore_alloc_bytes=230288,bluestore_alloc_items=28786,bluestore_cache_data_bytes=614400,bluestore_cache_data_items=41,bluestore_cache_onode_bytes=301104,bluestore_cache_onode_items=459,bluestore_cache_other_bytes=230945,bluestore_cache_other_items=26119,bluestore_fsck_bytes=0,bluestore_fsck_items=0,bluestore_txc_bytes=7520,bluestore_txc_items=10,bluestore_writing_bytes=0,bluestore_writing_deferred_bytes=657768,bluestore_writing_deferred_items=172,bluestore_writing_items=0,buffer_anon_bytes=2328515,buffer_anon_items=271,buffer_meta_bytes=5808,buffer_meta_items=66,mds_co_bytes=0,mds_co_items=0,osd_bytes=2406400,osd_items=188,osd_mapbl_bytes=139623,osd_mapbl_items=9,osd_pglog_bytes=6768784,osd_pglog_items=18179,osdmap_bytes=710892,osdmap_items=4426,osdmap_mapping_bytes=0,osdmap_mapping_items=0,pgmap_bytes=0,pgmap_items=0,unittest_1_bytes=0,unittest_1_items=0,unittest_2_bytes=0,unittest_2_items=0 1587117698000000000 +> ceph,collection=throttle-msgr_dispatch_throttler-client,host=stefanosd1,id=1,type=osd get=2932513,get_or_fail_fail=0,get_or_fail_success=2932513,get_started=0,get_sum=740620215,max=104857600,put=2932513,put_sum=740620215,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000 +> ceph,collection=throttle-msgr_dispatch_throttler-hb_front_server,host=stefanosd1,id=1,type=osd get=2607669,get_or_fail_fail=0,get_or_fail_success=2607669,get_started=0,get_sum=5225768676,max=104857600,put=2607669,put_sum=5225768676,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000 +> ceph,collection=finisher-commit_finisher,host=stefanosd1,id=1,type=osd complete_latency.avgcount=10,complete_latency.avgtime=0.002884646,complete_latency.sum=0.028846469,queue_len=0 1587117698000000000 +> ceph,collection=finisher-objecter-finisher-0,host=stefanosd1,id=1,type=osd complete_latency.avgcount=0,complete_latency.avgtime=0,complete_latency.sum=0,queue_len=0 1587117698000000000 +> ceph,collection=throttle-objecter_bytes,host=stefanosd1,id=2,type=osd get=0,get_or_fail_fail=0,get_or_fail_success=0,get_started=0,get_sum=0,max=104857600,put=0,put_sum=0,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000 +> ceph,collection=finisher-commit_finisher,host=stefanosd1,id=2,type=osd complete_latency.avgcount=11,complete_latency.avgtime=0.002714416,complete_latency.sum=0.029858583,queue_len=0 1587117698000000000 +> ceph,collection=finisher-defered_finisher,host=stefanosd1,id=2,type=osd complete_latency.avgcount=0,complete_latency.avgtime=0,complete_latency.sum=0,queue_len=0 1587117698000000000 +> ceph,collection=objecter,host=stefanosd1,id=2,type=osd command_active=0,command_resend=0,command_send=0,linger_active=0,linger_ping=0,linger_resend=0,linger_send=0,map_epoch=203,map_full=0,map_inc=19,omap_del=0,omap_rd=0,omap_wr=0,op=0,op_active=0,op_laggy=0,op_pg=0,op_r=0,op_reply=0,op_resend=0,op_rmw=0,op_send=0,op_send_bytes=0,op_w=0,osd_laggy=0,osd_session_close=0,osd_session_open=0,osd_sessions=0,osdop_append=0,osdop_call=0,osdop_clonerange=0,osdop_cmpxattr=0,osdop_create=0,osdop_delete=0,osdop_getxattr=0,osdop_mapext=0,osdop_notify=0,osdop_other=0,osdop_pgls=0,osdop_pgls_filter=0,osdop_read=0,osdop_resetxattrs=0,osdop_rmxattr=0,osdop_setxattr=0,osdop_sparse_read=0,osdop_src_cmpxattr=0,osdop_stat=0,osdop_truncate=0,osdop_watch=0,osdop_write=0,osdop_writefull=0,osdop_writesame=0,osdop_zero=0,poolop_active=0,poolop_resend=0,poolop_send=0,poolstat_active=0,poolstat_resend=0,poolstat_send=0,statfs_active=0,statfs_resend=0,statfs_send=0 1587117698000000000 +> ceph,collection=throttle-msgr_dispatch_throttler-hb_back_client,host=stefanosd1,id=2,type=osd get=2607136,get_or_fail_fail=0,get_or_fail_success=2607136,get_started=0,get_sum=5224700544,max=104857600,put=2607136,put_sum=5224700544,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000 +> ceph,collection=mempool,host=stefanosd1,id=2,type=osd bloom_filter_bytes=0,bloom_filter_items=0,bluefs_bytes=11624,bluefs_items=522,bluestore_alloc_bytes=230288,bluestore_alloc_items=28786,bluestore_cache_data_bytes=614400,bluestore_cache_data_items=41,bluestore_cache_onode_bytes=228288,bluestore_cache_onode_items=348,bluestore_cache_other_bytes=174158,bluestore_cache_other_items=18527,bluestore_fsck_bytes=0,bluestore_fsck_items=0,bluestore_txc_bytes=8272,bluestore_txc_items=11,bluestore_writing_bytes=0,bluestore_writing_deferred_bytes=670130,bluestore_writing_deferred_items=176,bluestore_writing_items=0,buffer_anon_bytes=2311664,buffer_anon_items=244,buffer_meta_bytes=5456,buffer_meta_items=62,mds_co_bytes=0,mds_co_items=0,osd_bytes=1920000,osd_items=150,osd_mapbl_bytes=155152,osd_mapbl_items=10,osd_pglog_bytes=3393520,osd_pglog_items=9128,osdmap_bytes=710892,osdmap_items=4426,osdmap_mapping_bytes=0,osdmap_mapping_items=0,pgmap_bytes=0,pgmap_items=0,unittest_1_bytes=0,unittest_1_items=0,unittest_2_bytes=0,unittest_2_items=0 1587117698000000000 +> ceph,collection=osd,host=stefanosd1,id=2,type=osd agent_evict=0,agent_flush=0,agent_skip=0,agent_wake=0,cached_crc=0,cached_crc_adjusted=0,copyfrom=0,heartbeat_to_peers=7,loadavg=11,map_message_epoch_dups=37,map_message_epochs=56,map_messages=37,messages_delayed_for_map=0,missed_crc=0,numpg=150,numpg_primary=59,numpg_removing=0,numpg_replica=91,numpg_stray=0,object_ctx_cache_hit=705923,object_ctx_cache_total=705951,op=690584,op_before_dequeue_op_lat.avgcount=1155697,op_before_dequeue_op_lat.avgtime=0.000217926,op_before_dequeue_op_lat.sum=251.856487141,op_before_queue_op_lat.avgcount=1148445,op_before_queue_op_lat.avgtime=0.000039696,op_before_queue_op_lat.sum=45.589516462,op_cache_hit=0,op_in_bytes=0,op_latency.avgcount=690584,op_latency.avgtime=0.002488685,op_latency.sum=1718.646504654,op_out_bytes=1026000,op_prepare_latency.avgcount=698700,op_prepare_latency.avgtime=0.000300375,op_prepare_latency.sum=209.872029659,op_process_latency.avgcount=690584,op_process_latency.avgtime=0.00230742,op_process_latency.sum=1593.46739165,op_r=548020,op_r_latency.avgcount=548020,op_r_latency.avgtime=0.000298287,op_r_latency.sum=163.467760649,op_r_out_bytes=1026000,op_r_prepare_latency.avgcount=548020,op_r_prepare_latency.avgtime=0.000186359,op_r_prepare_latency.sum=102.128629183,op_r_process_latency.avgcount=548020,op_r_process_latency.avgtime=0.00012716,op_r_process_latency.sum=69.686468884,op_rw=142562,op_rw_in_bytes=0,op_rw_latency.avgcount=142562,op_rw_latency.avgtime=0.010908597,op_rw_latency.sum=1555.151525732,op_rw_out_bytes=0,op_rw_prepare_latency.avgcount=150678,op_rw_prepare_latency.avgtime=0.000715043,op_rw_prepare_latency.sum=107.741399304,op_rw_process_latency.avgcount=142562,op_rw_process_latency.avgtime=0.01068836,op_rw_process_latency.sum=1523.754107887,op_w=2,op_w_in_bytes=0,op_w_latency.avgcount=2,op_w_latency.avgtime=0.013609136,op_w_latency.sum=0.027218273,op_w_prepare_latency.avgcount=2,op_w_prepare_latency.avgtime=0.001000586,op_w_prepare_latency.sum=0.002001172,op_w_process_latency.avgcount=2,op_w_process_latency.avgtime=0.013407439,op_w_process_latency.sum=0.026814879,op_wip=0,osd_map_bl_cache_hit=15,osd_map_bl_cache_miss=41,osd_map_cache_hit=4241,osd_map_cache_miss=14,osd_map_cache_miss_low=0,osd_map_cache_miss_low_avg.avgcount=0,osd_map_cache_miss_low_avg.sum=0,osd_pg_biginfo=1824,osd_pg_fastinfo=285998,osd_pg_info=294869,osd_tier_flush_lat.avgcount=0,osd_tier_flush_lat.avgtime=0,osd_tier_flush_lat.sum=0,osd_tier_promote_lat.avgcount=0,osd_tier_promote_lat.avgtime=0,osd_tier_promote_lat.sum=0,osd_tier_r_lat.avgcount=0,osd_tier_r_lat.avgtime=0,osd_tier_r_lat.sum=0,pull=0,push=1,push_out_bytes=0,recovery_bytes=0,recovery_ops=0,stat_bytes=107369988096,stat_bytes_avail=106271932416,stat_bytes_used=1098055680,subop=134165,subop_in_bytes=89501237,subop_latency.avgcount=134165,subop_latency.avgtime=0.007313523,subop_latency.sum=981.218888627,subop_pull=0,subop_pull_latency.avgcount=0,subop_pull_latency.avgtime=0,subop_pull_latency.sum=0,subop_push=0,subop_push_in_bytes=0,subop_push_latency.avgcount=0,subop_push_latency.avgtime=0,subop_push_latency.sum=0,subop_w=134165,subop_w_in_bytes=89501237,subop_w_latency.avgcount=134165,subop_w_latency.avgtime=0.007313523,subop_w_latency.sum=981.218888627,tier_clean=0,tier_delay=0,tier_dirty=4,tier_evict=0,tier_flush=0,tier_flush_fail=0,tier_promote=0,tier_proxy_read=0,tier_proxy_write=0,tier_try_flush=0,tier_try_flush_fail=0,tier_whiteout=0 1587117698000000000 +> ceph,collection=AsyncMessenger::Worker-1,host=stefanosd1,id=2,type=osd msgr_active_connections=746,msgr_created_connections=15212,msgr_recv_bytes=8633229006,msgr_recv_messages=4284202,msgr_running_fast_dispatch_time=153.820479102,msgr_running_recv_time=282.031655658,msgr_running_send_time=585.444749736,msgr_running_total_time=1231.431789242,msgr_send_bytes=11962769351,msgr_send_messages=4440622 1587117698000000000 +> ceph,collection=throttle-msgr_dispatch_throttler-ms_objecter,host=stefanosd1,id=2,type=osd get=0,get_or_fail_fail=0,get_or_fail_success=0,get_started=0,get_sum=0,max=104857600,put=0,put_sum=0,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000 +> ceph,collection=throttle-msgr_dispatch_throttler-hb_front_client,host=stefanosd1,id=2,type=osd get=2607136,get_or_fail_fail=0,get_or_fail_success=2607136,get_started=0,get_sum=5224700544,max=104857600,put=2607136,put_sum=5224700544,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000 +> ceph,collection=bluefs,host=stefanosd1,id=2,type=osd bytes_written_slow=0,bytes_written_sst=9065815,bytes_written_wal=901884611,db_total_bytes=4294967296,db_used_bytes=546308096,files_written_sst=3,files_written_wal=2,gift_bytes=0,log_bytes=225726464,log_compactions=1,logged_bytes=1195945984,max_bytes_db=1234173952,max_bytes_slow=0,max_bytes_wal=0,num_files=11,reclaim_bytes=0,slow_total_bytes=0,slow_used_bytes=0,wal_total_bytes=0,wal_used_bytes=0 1587117698000000000 +> ceph,collection=recoverystate_perf,host=stefanosd1,id=2,type=osd activating_latency.avgcount=88,activating_latency.avgtime=0.086149065,activating_latency.sum=7.581117751,active_latency.avgcount=29,active_latency.avgtime=1790.849396082,active_latency.sum=51934.632486379,backfilling_latency.avgcount=0,backfilling_latency.avgtime=0,backfilling_latency.sum=0,clean_latency.avgcount=29,clean_latency.avgtime=1790.754765195,clean_latency.sum=51931.888190683,down_latency.avgcount=0,down_latency.avgtime=0,down_latency.sum=0,getinfo_latency.avgcount=134,getinfo_latency.avgtime=0.427567953,getinfo_latency.sum=57.294105786,getlog_latency.avgcount=88,getlog_latency.avgtime=0.011810192,getlog_latency.sum=1.03929697,getmissing_latency.avgcount=88,getmissing_latency.avgtime=0.000104598,getmissing_latency.sum=0.009204673,incomplete_latency.avgcount=0,incomplete_latency.avgtime=0,incomplete_latency.sum=0,initial_latency.avgcount=150,initial_latency.avgtime=0.001251361,initial_latency.sum=0.187704197,notbackfilling_latency.avgcount=0,notbackfilling_latency.avgtime=0,notbackfilling_latency.sum=0,notrecovering_latency.avgcount=0,notrecovering_latency.avgtime=0,notrecovering_latency.sum=0,peering_latency.avgcount=134,peering_latency.avgtime=0.998405763,peering_latency.sum=133.786372331,primary_latency.avgcount=75,primary_latency.avgtime=693.473306562,primary_latency.sum=52010.497992212,recovered_latency.avgcount=88,recovered_latency.avgtime=0.000609715,recovered_latency.sum=0.053654964,recovering_latency.avgcount=1,recovering_latency.avgtime=0.100713031,recovering_latency.sum=0.100713031,replicaactive_latency.avgcount=21,replicaactive_latency.avgtime=1790.852354921,replicaactive_latency.sum=37607.89945336,repnotrecovering_latency.avgcount=21,repnotrecovering_latency.avgtime=1790.852315529,repnotrecovering_latency.sum=37607.898626121,reprecovering_latency.avgcount=0,reprecovering_latency.avgtime=0,reprecovering_latency.sum=0,repwaitbackfillreserved_latency.avgcount=0,repwaitbackfillreserved_latency.avgtime=0,repwaitbackfillreserved_latency.sum=0,repwaitrecoveryreserved_latency.avgcount=0,repwaitrecoveryreserved_latency.avgtime=0,repwaitrecoveryreserved_latency.sum=0,reset_latency.avgcount=346,reset_latency.avgtime=0.126826803,reset_latency.sum=43.882073917,start_latency.avgcount=346,start_latency.avgtime=0.000233277,start_latency.sum=0.080713962,started_latency.avgcount=196,started_latency.avgtime=457.885378797,started_latency.sum=89745.534244237,stray_latency.avgcount=212,stray_latency.avgtime=1.013774396,stray_latency.sum=214.920172121,waitactingchange_latency.avgcount=0,waitactingchange_latency.avgtime=0,waitactingchange_latency.sum=0,waitlocalbackfillreserved_latency.avgcount=0,waitlocalbackfillreserved_latency.avgtime=0,waitlocalbackfillreserved_latency.sum=0,waitlocalrecoveryreserved_latency.avgcount=1,waitlocalrecoveryreserved_latency.avgtime=0.001572379,waitlocalrecoveryreserved_latency.sum=0.001572379,waitremotebackfillreserved_latency.avgcount=0,waitremotebackfillreserved_latency.avgtime=0,waitremotebackfillreserved_latency.sum=0,waitremoterecoveryreserved_latency.avgcount=1,waitremoterecoveryreserved_latency.avgtime=0.012729633,waitremoterecoveryreserved_latency.sum=0.012729633,waitupthru_latency.avgcount=88,waitupthru_latency.avgtime=0.857137729,waitupthru_latency.sum=75.428120205 1587117698000000000 +> ceph,collection=throttle-objecter_ops,host=stefanosd1,id=2,type=osd get=0,get_or_fail_fail=0,get_or_fail_success=0,get_started=0,get_sum=0,max=1024,put=0,put_sum=0,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000 +> ceph,collection=bluestore,host=stefanosd1,id=2,type=osd bluestore_allocated=24248320,bluestore_blob_split=0,bluestore_blobs=83,bluestore_buffer_bytes=614400,bluestore_buffer_hit_bytes=161362,bluestore_buffer_miss_bytes=534799,bluestore_buffers=41,bluestore_compressed=0,bluestore_compressed_allocated=0,bluestore_compressed_original=0,bluestore_extent_compress=0,bluestore_extents=83,bluestore_fragmentation_micros=1,bluestore_gc_merged=0,bluestore_onode_hits=723852,bluestore_onode_misses=364,bluestore_onode_reshard=0,bluestore_onode_shard_hits=0,bluestore_onode_shard_misses=0,bluestore_onodes=348,bluestore_read_eio=0,bluestore_reads_with_retries=0,bluestore_stored=1984402,bluestore_txc=295997,bluestore_write_big=0,bluestore_write_big_blobs=0,bluestore_write_big_bytes=0,bluestore_write_small=60,bluestore_write_small_bytes=343843,bluestore_write_small_deferred=22,bluestore_write_small_new=38,bluestore_write_small_pre_read=22,bluestore_write_small_unused=0,commit_lat.avgcount=295997,commit_lat.avgtime=0.006994931,commit_lat.sum=2070.478673619,compress_lat.avgcount=0,compress_lat.avgtime=0,compress_lat.sum=0,compress_rejected_count=0,compress_success_count=0,csum_lat.avgcount=47,csum_lat.avgtime=0.000034434,csum_lat.sum=0.001618423,decompress_lat.avgcount=0,decompress_lat.avgtime=0,decompress_lat.sum=0,deferred_write_bytes=0,deferred_write_ops=0,kv_commit_lat.avgcount=291889,kv_commit_lat.avgtime=0.006347015,kv_commit_lat.sum=1852.624108527,kv_final_lat.avgcount=291885,kv_final_lat.avgtime=0.00004358,kv_final_lat.sum=12.720529751,kv_flush_lat.avgcount=291889,kv_flush_lat.avgtime=0.000000211,kv_flush_lat.sum=0.061636079,kv_sync_lat.avgcount=291889,kv_sync_lat.avgtime=0.006347227,kv_sync_lat.sum=1852.685744606,omap_lower_bound_lat.avgcount=1,omap_lower_bound_lat.avgtime=0.000004482,omap_lower_bound_lat.sum=0.000004482,omap_next_lat.avgcount=6933,omap_next_lat.avgtime=0.000003956,omap_next_lat.sum=0.027427456,omap_seek_to_first_lat.avgcount=309,omap_seek_to_first_lat.avgtime=0.000005879,omap_seek_to_first_lat.sum=0.001816658,omap_upper_bound_lat.avgcount=0,omap_upper_bound_lat.avgtime=0,omap_upper_bound_lat.sum=0,read_lat.avgcount=229,read_lat.avgtime=0.000394981,read_lat.sum=0.090450704,read_onode_meta_lat.avgcount=295,read_onode_meta_lat.avgtime=0.000016832,read_onode_meta_lat.sum=0.004965516,read_wait_aio_lat.avgcount=66,read_wait_aio_lat.avgtime=0.001237841,read_wait_aio_lat.sum=0.081697561,state_aio_wait_lat.avgcount=295997,state_aio_wait_lat.avgtime=0.000000357,state_aio_wait_lat.sum=0.105827433,state_deferred_aio_wait_lat.avgcount=0,state_deferred_aio_wait_lat.avgtime=0,state_deferred_aio_wait_lat.sum=0,state_deferred_cleanup_lat.avgcount=0,state_deferred_cleanup_lat.avgtime=0,state_deferred_cleanup_lat.sum=0,state_deferred_queued_lat.avgcount=0,state_deferred_queued_lat.avgtime=0,state_deferred_queued_lat.sum=0,state_done_lat.avgcount=295986,state_done_lat.avgtime=0.000003017,state_done_lat.sum=0.893199127,state_finishing_lat.avgcount=295986,state_finishing_lat.avgtime=0.000000306,state_finishing_lat.sum=0.090792683,state_io_done_lat.avgcount=295997,state_io_done_lat.avgtime=0.000001066,state_io_done_lat.sum=0.315577655,state_kv_commiting_lat.avgcount=295997,state_kv_commiting_lat.avgtime=0.006423586,state_kv_commiting_lat.sum=1901.362268572,state_kv_done_lat.avgcount=295997,state_kv_done_lat.avgtime=0.00000155,state_kv_done_lat.sum=0.458963064,state_kv_queued_lat.avgcount=295997,state_kv_queued_lat.avgtime=0.000477234,state_kv_queued_lat.sum=141.260101773,state_prepare_lat.avgcount=295997,state_prepare_lat.avgtime=0.000091806,state_prepare_lat.sum=27.174436583,submit_lat.avgcount=295997,submit_lat.avgtime=0.000135729,submit_lat.sum=40.17557682,throttle_lat.avgcount=295997,throttle_lat.avgtime=0.000002734,throttle_lat.sum=0.809479837,write_pad_bytes=151773,write_penalty_read_ops=0 1587117698000000000 +> ceph,collection=throttle-bluestore_throttle_bytes,host=stefanosd1,id=2,type=osd get=295997,get_or_fail_fail=0,get_or_fail_success=0,get_started=295997,get_sum=198686579299,max=67108864,put=291889,put_sum=198686579299,take=0,take_sum=0,val=0,wait.avgcount=83,wait.avgtime=0.003670612,wait.sum=0.304660858 1587117698000000000 +> ceph,collection=throttle-msgr_dispatch_throttler-cluster,host=stefanosd1,id=2,type=osd get=452060,get_or_fail_fail=0,get_or_fail_success=452060,get_started=0,get_sum=269934345,max=104857600,put=452060,put_sum=269934345,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000 +> ceph,collection=throttle-bluestore_throttle_deferred_bytes,host=stefanosd1,id=2,type=osd get=11,get_or_fail_fail=0,get_or_fail_success=11,get_started=0,get_sum=7723117,max=201326592,put=0,put_sum=0,take=0,take_sum=0,val=7723117,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000 +> ceph,collection=throttle-msgr_dispatch_throttler-hb_front_server,host=stefanosd1,id=2,type=osd get=2607433,get_or_fail_fail=0,get_or_fail_success=2607433,get_started=0,get_sum=5225295732,max=104857600,put=2607433,put_sum=5225295732,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000 +> ceph,collection=finisher-objecter-finisher-0,host=stefanosd1,id=2,type=osd complete_latency.avgcount=0,complete_latency.avgtime=0,complete_latency.sum=0,queue_len=0 1587117698000000000 +> ceph,collection=cct,host=stefanosd1,id=2,type=osd total_workers=6,unhealthy_workers=0 1587117698000000000 +> ceph,collection=AsyncMessenger::Worker-2,host=stefanosd1,id=2,type=osd msgr_active_connections=670,msgr_created_connections=13455,msgr_recv_bytes=6334605563,msgr_recv_messages=3287843,msgr_running_fast_dispatch_time=137.016615819,msgr_running_recv_time=240.687997039,msgr_running_send_time=471.710658466,msgr_running_total_time=1034.029109337,msgr_send_bytes=9753423475,msgr_send_messages=3439611 1587117698000000000 +> ceph,collection=throttle-msgr_dispatch_throttler-client,host=stefanosd1,id=2,type=osd get=710355,get_or_fail_fail=0,get_or_fail_success=710355,get_started=0,get_sum=166306283,max=104857600,put=710355,put_sum=166306283,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000 +> ceph,collection=throttle-msgr_dispatch_throttler-hb_back_server,host=stefanosd1,id=2,type=osd get=2607433,get_or_fail_fail=0,get_or_fail_success=2607433,get_started=0,get_sum=5225295732,max=104857600,put=2607433,put_sum=5225295732,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000 +> ceph,collection=AsyncMessenger::Worker-0,host=stefanosd1,id=2,type=osd msgr_active_connections=705,msgr_created_connections=17953,msgr_recv_bytes=7261438733,msgr_recv_messages=4496034,msgr_running_fast_dispatch_time=254.716476808,msgr_running_recv_time=272.196741555,msgr_running_send_time=571.102924903,msgr_running_total_time=1338.461077493,msgr_send_bytes=10772250508,msgr_send_messages=4192781 1587117698000000000 +> ceph,collection=rocksdb,host=stefanosd1,id=2,type=osd compact=0,compact_queue_len=0,compact_queue_merge=0,compact_range=0,get=1424,get_latency.avgcount=1424,get_latency.avgtime=0.000030752,get_latency.sum=0.043792142,rocksdb_write_delay_time.avgcount=0,rocksdb_write_delay_time.avgtime=0,rocksdb_write_delay_time.sum=0,rocksdb_write_memtable_time.avgcount=0,rocksdb_write_memtable_time.avgtime=0,rocksdb_write_memtable_time.sum=0,rocksdb_write_pre_and_post_time.avgcount=0,rocksdb_write_pre_and_post_time.avgtime=0,rocksdb_write_pre_and_post_time.sum=0,rocksdb_write_wal_time.avgcount=0,rocksdb_write_wal_time.avgtime=0,rocksdb_write_wal_time.sum=0,submit_latency.avgcount=295997,submit_latency.avgtime=0.000173137,submit_latency.sum=51.248072285,submit_sync_latency.avgcount=291889,submit_sync_latency.avgtime=0.006094397,submit_sync_latency.sum=1778.887521449,submit_transaction=295997,submit_transaction_sync=291889 1587117698000000000 +> ceph,collection=throttle-osd_client_bytes,host=stefanosd1,id=2,type=osd get=698701,get_or_fail_fail=0,get_or_fail_success=698701,get_started=0,get_sum=165630172,max=524288000,put=920880,put_sum=165630172,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000 +> ceph,collection=mds_sessions,host=stefanmds1,id=stefanmds1,type=mds average_load=0,avg_session_uptime=0,session_add=0,session_count=0,session_remove=0,sessions_open=0,sessions_stale=0,total_load=0 1587117476000000000 +> ceph,collection=mempool,host=stefanmds1,id=stefanmds1,type=mds bloom_filter_bytes=0,bloom_filter_items=0,bluefs_bytes=0,bluefs_items=0,bluestore_alloc_bytes=0,bluestore_alloc_items=0,bluestore_cache_data_bytes=0,bluestore_cache_data_items=0,bluestore_cache_onode_bytes=0,bluestore_cache_onode_items=0,bluestore_cache_other_bytes=0,bluestore_cache_other_items=0,bluestore_fsck_bytes=0,bluestore_fsck_items=0,bluestore_txc_bytes=0,bluestore_txc_items=0,bluestore_writing_bytes=0,bluestore_writing_deferred_bytes=0,bluestore_writing_deferred_items=0,bluestore_writing_items=0,buffer_anon_bytes=132069,buffer_anon_items=82,buffer_meta_bytes=0,buffer_meta_items=0,mds_co_bytes=44208,mds_co_items=154,osd_bytes=0,osd_items=0,osd_mapbl_bytes=0,osd_mapbl_items=0,osd_pglog_bytes=0,osd_pglog_items=0,osdmap_bytes=16952,osdmap_items=139,osdmap_mapping_bytes=0,osdmap_mapping_items=0,pgmap_bytes=0,pgmap_items=0,unittest_1_bytes=0,unittest_1_items=0,unittest_2_bytes=0,unittest_2_items=0 1587117476000000000 +> ceph,collection=objecter,host=stefanmds1,id=stefanmds1,type=mds command_active=0,command_resend=0,command_send=0,linger_active=0,linger_ping=0,linger_resend=0,linger_send=0,map_epoch=203,map_full=0,map_inc=1,omap_del=0,omap_rd=28,omap_wr=1,op=33,op_active=0,op_laggy=0,op_pg=0,op_r=26,op_reply=33,op_resend=2,op_rmw=0,op_send=35,op_send_bytes=364,op_w=7,osd_laggy=0,osd_session_close=91462,osd_session_open=91468,osd_sessions=6,osdop_append=0,osdop_call=0,osdop_clonerange=0,osdop_cmpxattr=0,osdop_create=0,osdop_delete=5,osdop_getxattr=14,osdop_mapext=0,osdop_notify=0,osdop_other=0,osdop_pgls=0,osdop_pgls_filter=0,osdop_read=8,osdop_resetxattrs=0,osdop_rmxattr=0,osdop_setxattr=0,osdop_sparse_read=0,osdop_src_cmpxattr=0,osdop_stat=2,osdop_truncate=0,osdop_watch=0,osdop_write=0,osdop_writefull=0,osdop_writesame=0,osdop_zero=1,poolop_active=0,poolop_resend=0,poolop_send=0,poolstat_active=0,poolstat_resend=0,poolstat_send=0,statfs_active=0,statfs_resend=0,statfs_send=0 1587117476000000000 +> ceph,collection=cct,host=stefanmds1,id=stefanmds1,type=mds total_workers=1,unhealthy_workers=0 1587117476000000000 +> ceph,collection=mds_server,host=stefanmds1,id=stefanmds1,type=mds cap_revoke_eviction=0,dispatch_client_request=0,dispatch_server_request=0,handle_client_request=0,handle_client_session=0,handle_slave_request=0,req_create_latency.avgcount=0,req_create_latency.avgtime=0,req_create_latency.sum=0,req_getattr_latency.avgcount=0,req_getattr_latency.avgtime=0,req_getattr_latency.sum=0,req_getfilelock_latency.avgcount=0,req_getfilelock_latency.avgtime=0,req_getfilelock_latency.sum=0,req_link_latency.avgcount=0,req_link_latency.avgtime=0,req_link_latency.sum=0,req_lookup_latency.avgcount=0,req_lookup_latency.avgtime=0,req_lookup_latency.sum=0,req_lookuphash_latency.avgcount=0,req_lookuphash_latency.avgtime=0,req_lookuphash_latency.sum=0,req_lookupino_latency.avgcount=0,req_lookupino_latency.avgtime=0,req_lookupino_latency.sum=0,req_lookupname_latency.avgcount=0,req_lookupname_latency.avgtime=0,req_lookupname_latency.sum=0,req_lookupparent_latency.avgcount=0,req_lookupparent_latency.avgtime=0,req_lookupparent_latency.sum=0,req_lookupsnap_latency.avgcount=0,req_lookupsnap_latency.avgtime=0,req_lookupsnap_latency.sum=0,req_lssnap_latency.avgcount=0,req_lssnap_latency.avgtime=0,req_lssnap_latency.sum=0,req_mkdir_latency.avgcount=0,req_mkdir_latency.avgtime=0,req_mkdir_latency.sum=0,req_mknod_latency.avgcount=0,req_mknod_latency.avgtime=0,req_mknod_latency.sum=0,req_mksnap_latency.avgcount=0,req_mksnap_latency.avgtime=0,req_mksnap_latency.sum=0,req_open_latency.avgcount=0,req_open_latency.avgtime=0,req_open_latency.sum=0,req_readdir_latency.avgcount=0,req_readdir_latency.avgtime=0,req_readdir_latency.sum=0,req_rename_latency.avgcount=0,req_rename_latency.avgtime=0,req_rename_latency.sum=0,req_renamesnap_latency.avgcount=0,req_renamesnap_latency.avgtime=0,req_renamesnap_latency.sum=0,req_rmdir_latency.avgcount=0,req_rmdir_latency.avgtime=0,req_rmdir_latency.sum=0,req_rmsnap_latency.avgcount=0,req_rmsnap_latency.avgtime=0,req_rmsnap_latency.sum=0,req_rmxattr_latency.avgcount=0,req_rmxattr_latency.avgtime=0,req_rmxattr_latency.sum=0,req_setattr_latency.avgcount=0,req_setattr_latency.avgtime=0,req_setattr_latency.sum=0,req_setdirlayout_latency.avgcount=0,req_setdirlayout_latency.avgtime=0,req_setdirlayout_latency.sum=0,req_setfilelock_latency.avgcount=0,req_setfilelock_latency.avgtime=0,req_setfilelock_latency.sum=0,req_setlayout_latency.avgcount=0,req_setlayout_latency.avgtime=0,req_setlayout_latency.sum=0,req_setxattr_latency.avgcount=0,req_setxattr_latency.avgtime=0,req_setxattr_latency.sum=0,req_symlink_latency.avgcount=0,req_symlink_latency.avgtime=0,req_symlink_latency.sum=0,req_unlink_latency.avgcount=0,req_unlink_latency.avgtime=0,req_unlink_latency.sum=0 1587117476000000000 +> ceph,collection=AsyncMessenger::Worker-2,host=stefanmds1,id=stefanmds1,type=mds msgr_active_connections=84,msgr_created_connections=68511,msgr_recv_bytes=238078,msgr_recv_messages=2655,msgr_running_fast_dispatch_time=0.004247777,msgr_running_recv_time=25.369012545,msgr_running_send_time=3.743427461,msgr_running_total_time=130.277111559,msgr_send_bytes=172767043,msgr_send_messages=18172 1587117476000000000 +> ceph,collection=mds_log,host=stefanmds1,id=stefanmds1,type=mds ev=0,evadd=0,evex=0,evexd=0,evexg=0,evtrm=0,expos=4194304,jlat.avgcount=0,jlat.avgtime=0,jlat.sum=0,rdpos=4194304,replayed=1,seg=1,segadd=0,segex=0,segexd=0,segexg=0,segtrm=0,wrpos=0 1587117476000000000 +> ceph,collection=AsyncMessenger::Worker-0,host=stefanmds1,id=stefanmds1,type=mds msgr_active_connections=595,msgr_created_connections=943825,msgr_recv_bytes=78618003,msgr_recv_messages=914080,msgr_running_fast_dispatch_time=0.001544386,msgr_running_recv_time=459.627068807,msgr_running_send_time=469.337032316,msgr_running_total_time=2744.084305898,msgr_send_bytes=61684163658,msgr_send_messages=1858008 1587117476000000000 +> ceph,collection=throttle-msgr_dispatch_throttler-mds,host=stefanmds1,id=stefanmds1,type=mds get=1216458,get_or_fail_fail=0,get_or_fail_success=1216458,get_started=0,get_sum=51976882,max=104857600,put=1216458,put_sum=51976882,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117476000000000 +> ceph,collection=AsyncMessenger::Worker-1,host=stefanmds1,id=stefanmds1,type=mds msgr_active_connections=226,msgr_created_connections=42679,msgr_recv_bytes=63140151,msgr_recv_messages=299727,msgr_running_fast_dispatch_time=26.316138629,msgr_running_recv_time=36.969916165,msgr_running_send_time=70.457421128,msgr_running_total_time=226.230019936,msgr_send_bytes=193154464,msgr_send_messages=310481 1587117476000000000 +> ceph,collection=mds,host=stefanmds1,id=stefanmds1,type=mds caps=0,dir_commit=0,dir_fetch=12,dir_merge=0,dir_split=0,exported=0,exported_inodes=0,forward=0,imported=0,imported_inodes=0,inode_max=2147483647,inodes=10,inodes_bottom=3,inodes_expired=0,inodes_pin_tail=0,inodes_pinned=10,inodes_top=7,inodes_with_caps=0,load_cent=0,openino_backtrace_fetch=0,openino_dir_fetch=0,openino_peer_discover=0,q=0,reply=0,reply_latency.avgcount=0,reply_latency.avgtime=0,reply_latency.sum=0,request=0,subtrees=2,traverse=0,traverse_dir_fetch=0,traverse_discover=0,traverse_forward=0,traverse_hit=0,traverse_lock=0,traverse_remote_ino=0 1587117476000000000 +> ceph,collection=purge_queue,host=stefanmds1,id=stefanmds1,type=mds pq_executed=0,pq_executing=0,pq_executing_ops=0 1587117476000000000 +> ceph,collection=throttle-write_buf_throttle,host=stefanmds1,id=stefanmds1,type=mds get=0,get_or_fail_fail=0,get_or_fail_success=0,get_started=0,get_sum=0,max=3758096384,put=0,put_sum=0,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117476000000000 +> ceph,collection=throttle-write_buf_throttle-0x5624e9377f40,host=stefanmds1,id=stefanmds1,type=mds get=0,get_or_fail_fail=0,get_or_fail_success=0,get_started=0,get_sum=0,max=3758096384,put=0,put_sum=0,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117476000000000 +> ceph,collection=mds_cache,host=stefanmds1,id=stefanmds1,type=mds ireq_enqueue_scrub=0,ireq_exportdir=0,ireq_flush=0,ireq_fragmentdir=0,ireq_fragstats=0,ireq_inodestats=0,num_recovering_enqueued=0,num_recovering_prioritized=0,num_recovering_processing=0,num_strays=0,num_strays_delayed=0,num_strays_enqueuing=0,recovery_completed=0,recovery_started=0,strays_created=0,strays_enqueued=0,strays_migrated=0,strays_reintegrated=0 1587117476000000000 +> ceph,collection=throttle-objecter_bytes,host=stefanmds1,id=stefanmds1,type=mds get=0,get_or_fail_fail=0,get_or_fail_success=0,get_started=0,get_sum=0,max=104857600,put=16,put_sum=1016,take=33,take_sum=1016,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117476000000000 +> ceph,collection=throttle-objecter_ops,host=stefanmds1,id=stefanmds1,type=mds get=0,get_or_fail_fail=0,get_or_fail_success=0,get_started=0,get_sum=0,max=1024,put=33,put_sum=33,take=33,take_sum=33,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117476000000000 +> ceph,collection=mds_mem,host=stefanmds1,id=stefanmds1,type=mds cap=0,cap+=0,cap-=0,dir=12,dir+=12,dir-=0,dn=10,dn+=10,dn-=0,heap=322284,ino=13,ino+=13,ino-=0,rss=76032 1587117476000000000 +> ceph,collection=finisher-PurgeQueue,host=stefanmds1,id=stefanmds1,type=mds complete_latency.avgcount=4,complete_latency.avgtime=0.000176985,complete_latency.sum=0.000707941,queue_len=0 1587117476000000000 +> ceph,collection=cct,host=stefanrgw1,id=rgw.stefanrgw1.4219.94113851143184,type=rgw total_workers=0,unhealthy_workers=0 1587117156000000000 +> ceph,collection=throttle-objecter_bytes,host=stefanrgw1,id=rgw.stefanrgw1.4219.94113851143184,type=rgw get=791732,get_or_fail_fail=0,get_or_fail_success=791732,get_started=0,get_sum=0,max=104857600,put=0,put_sum=0,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117156000000000 +> ceph,collection=rgw,host=stefanrgw1,id=rgw.stefanrgw1.4219.94113851143184,type=rgw cache_hit=0,cache_miss=791706,failed_req=0,get=0,get_b=0,get_initial_lat.avgcount=0,get_initial_lat.avgtime=0,get_initial_lat.sum=0,keystone_token_cache_hit=0,keystone_token_cache_miss=0,pubsub_event_lost=0,pubsub_event_triggered=0,pubsub_events=0,pubsub_push_failed=0,pubsub_push_ok=0,pubsub_push_pending=0,pubsub_store_fail=0,pubsub_store_ok=0,put=0,put_b=0,put_initial_lat.avgcount=0,put_initial_lat.avgtime=0,put_initial_lat.sum=0,qactive=0,qlen=0,req=791705 1587117156000000000 +> ceph,collection=throttle-msgr_dispatch_throttler-radosclient,host=stefanrgw1,id=rgw.stefanrgw1.4219.94113851143184,type=rgw get=2697988,get_or_fail_fail=0,get_or_fail_success=2697988,get_started=0,get_sum=444563051,max=104857600,put=2697988,put_sum=444563051,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117156000000000 +> ceph,collection=finisher-radosclient,host=stefanrgw1,id=rgw.stefanrgw1.4219.94113851143184,type=rgw complete_latency.avgcount=2,complete_latency.avgtime=0.003530161,complete_latency.sum=0.007060323,queue_len=0 1587117156000000000 +> ceph,collection=throttle-rgw_async_rados_ops,host=stefanrgw1,id=rgw.stefanrgw1.4219.94113851143184,type=rgw get=0,get_or_fail_fail=0,get_or_fail_success=0,get_started=0,get_sum=0,max=64,put=0,put_sum=0,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117156000000000 +> ceph,collection=throttle-objecter_ops,host=stefanrgw1,id=rgw.stefanrgw1.4219.94113851143184,type=rgw get=791732,get_or_fail_fail=0,get_or_fail_success=791732,get_started=0,get_sum=791732,max=24576,put=791732,put_sum=791732,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117156000000000 +> ceph,collection=throttle-objecter_bytes-0x5598969981c0,host=stefanrgw1,id=rgw.stefanrgw1.4219.94113851143184,type=rgw get=1637900,get_or_fail_fail=0,get_or_fail_success=1637900,get_started=0,get_sum=0,max=104857600,put=0,put_sum=0,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117156000000000 +> ceph,collection=objecter,host=stefanrgw1,id=rgw.stefanrgw1.4219.94113851143184,type=rgw command_active=0,command_resend=0,command_send=0,linger_active=8,linger_ping=1905736,linger_resend=4,linger_send=13,map_epoch=203,map_full=0,map_inc=17,omap_del=0,omap_rd=0,omap_wr=0,op=2697488,op_active=0,op_laggy=0,op_pg=0,op_r=791730,op_reply=2697476,op_resend=1,op_rmw=0,op_send=2697490,op_send_bytes=362,op_w=1905758,osd_laggy=5,osd_session_close=59558,osd_session_open=59566,osd_sessions=8,osdop_append=0,osdop_call=1,osdop_clonerange=0,osdop_cmpxattr=0,osdop_create=8,osdop_delete=0,osdop_getxattr=0,osdop_mapext=0,osdop_notify=0,osdop_other=791714,osdop_pgls=0,osdop_pgls_filter=0,osdop_read=16,osdop_resetxattrs=0,osdop_rmxattr=0,osdop_setxattr=0,osdop_sparse_read=0,osdop_src_cmpxattr=0,osdop_stat=791706,osdop_truncate=0,osdop_watch=1905750,osdop_write=0,osdop_writefull=0,osdop_writesame=0,osdop_zero=0,poolop_active=0,poolop_resend=0,poolop_send=0,poolstat_active=0,poolstat_resend=0,poolstat_send=0,statfs_active=0,statfs_resend=0,statfs_send=0 1587117156000000000 +> ceph,collection=AsyncMessenger::Worker-2,host=stefanrgw1,id=rgw.stefanrgw1.4219.94113851143184,type=rgw msgr_active_connections=11,msgr_created_connections=59839,msgr_recv_bytes=342697143,msgr_recv_messages=1441603,msgr_running_fast_dispatch_time=161.807937536,msgr_running_recv_time=118.174064257,msgr_running_send_time=207.679154333,msgr_running_total_time=698.527662129,msgr_send_bytes=530785909,msgr_send_messages=1679950 1587117156000000000 +> ceph,collection=mempool,host=stefanrgw1,id=rgw.stefanrgw1.4219.94113851143184,type=rgw bloom_filter_bytes=0,bloom_filter_items=0,bluefs_bytes=0,bluefs_items=0,bluestore_alloc_bytes=0,bluestore_alloc_items=0,bluestore_cache_data_bytes=0,bluestore_cache_data_items=0,bluestore_cache_onode_bytes=0,bluestore_cache_onode_items=0,bluestore_cache_other_bytes=0,bluestore_cache_other_items=0,bluestore_fsck_bytes=0,bluestore_fsck_items=0,bluestore_txc_bytes=0,bluestore_txc_items=0,bluestore_writing_bytes=0,bluestore_writing_deferred_bytes=0,bluestore_writing_deferred_items=0,bluestore_writing_items=0,buffer_anon_bytes=225471,buffer_anon_items=163,buffer_meta_bytes=0,buffer_meta_items=0,mds_co_bytes=0,mds_co_items=0,osd_bytes=0,osd_items=0,osd_mapbl_bytes=0,osd_mapbl_items=0,osd_pglog_bytes=0,osd_pglog_items=0,osdmap_bytes=33904,osdmap_items=278,osdmap_mapping_bytes=0,osdmap_mapping_items=0,pgmap_bytes=0,pgmap_items=0,unittest_1_bytes=0,unittest_1_items=0,unittest_2_bytes=0,unittest_2_items=0 1587117156000000000 +> ceph,collection=throttle-msgr_dispatch_throttler-radosclient-0x559896998120,host=stefanrgw1,id=rgw.stefanrgw1.4219.94113851143184,type=rgw get=1652935,get_or_fail_fail=0,get_or_fail_success=1652935,get_started=0,get_sum=276333029,max=104857600,put=1652935,put_sum=276333029,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117156000000000 +> ceph,collection=AsyncMessenger::Worker-1,host=stefanrgw1,id=rgw.stefanrgw1.4219.94113851143184,type=rgw msgr_active_connections=17,msgr_created_connections=84859,msgr_recv_bytes=211170759,msgr_recv_messages=922646,msgr_running_fast_dispatch_time=31.487443762,msgr_running_recv_time=83.190789333,msgr_running_send_time=174.670510496,msgr_running_total_time=484.22086275,msgr_send_bytes=1322113179,msgr_send_messages=1636839 1587117156000000000 +> ceph,collection=finisher-radosclient-0x559896998080,host=stefanrgw1,id=rgw.stefanrgw1.4219.94113851143184,type=rgw complete_latency.avgcount=0,complete_latency.avgtime=0,complete_latency.sum=0,queue_len=0 1587117156000000000 +> ceph,collection=throttle-objecter_ops-0x559896997b80,host=stefanrgw1,id=rgw.stefanrgw1.4219.94113851143184,type=rgw get=1637900,get_or_fail_fail=0,get_or_fail_success=1637900,get_started=0,get_sum=1637900,max=24576,put=1637900,put_sum=1637900,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117156000000000 +> ceph,collection=AsyncMessenger::Worker-0,host=stefanrgw1,id=rgw.stefanrgw1.4219.94113851143184,type=rgw msgr_active_connections=18,msgr_created_connections=74757,msgr_recv_bytes=489001094,msgr_recv_messages=1986686,msgr_running_fast_dispatch_time=168.60950961,msgr_running_recv_time=142.903031533,msgr_running_send_time=267.911165712,msgr_running_total_time=824.885614951,msgr_send_bytes=707973504,msgr_send_messages=2463727 1587117156000000000 +> ceph,collection=objecter-0x559896997720,host=stefanrgw1,id=rgw.stefanrgw1.4219.94113851143184,type=rgw command_active=0,command_resend=0,command_send=0,linger_active=0,linger_ping=0,linger_resend=0,linger_send=0,map_epoch=203,map_full=0,map_inc=8,omap_del=0,omap_rd=0,omap_wr=0,op=1637998,op_active=0,op_laggy=0,op_pg=0,op_r=1062803,op_reply=1637998,op_resend=15,op_rmw=0,op_send=1638013,op_send_bytes=63321099,op_w=575195,osd_laggy=0,osd_session_close=125555,osd_session_open=125563,osd_sessions=8,osdop_append=0,osdop_call=1637886,osdop_clonerange=0,osdop_cmpxattr=0,osdop_create=0,osdop_delete=0,osdop_getxattr=0,osdop_mapext=0,osdop_notify=0,osdop_other=112,osdop_pgls=0,osdop_pgls_filter=0,osdop_read=0,osdop_resetxattrs=0,osdop_rmxattr=0,osdop_setxattr=0,osdop_sparse_read=0,osdop_src_cmpxattr=0,osdop_stat=0,osdop_truncate=0,osdop_watch=0,osdop_write=0,osdop_writefull=0,osdop_writesame=0,osdop_zero=0,poolop_active=0,poolop_resend=0,poolop_send=0,poolstat_active=0,poolstat_resend=0,poolstat_send=0,statfs_active=0,statfs_resend=0,statfs_send=0 1587117156000000000 ``` diff --git a/plugins/inputs/ceph/ceph.go b/plugins/inputs/ceph/ceph.go index 9a2fc47a3..c875de8df 100644 --- a/plugins/inputs/ceph/ceph.go +++ b/plugins/inputs/ceph/ceph.go @@ -18,8 +18,12 @@ const ( measurement = "ceph" typeMon = "monitor" typeOsd = "osd" + typeMds = "mds" + typeRgw = "rgw" osdPrefix = "ceph-osd" monPrefix = "ceph-mon" + mdsPrefix = "ceph-mds" + rgwPrefix = "ceph-client" sockSuffix = "asok" ) @@ -27,6 +31,8 @@ type Ceph struct { CephBinary string OsdPrefix string MonPrefix string + MdsPrefix string + RgwPrefix string SocketDir string SocketSuffix string CephUser string @@ -36,7 +42,7 @@ type Ceph struct { } func (c *Ceph) Description() string { - return "Collects performance metrics from the MON and OSD nodes in a Ceph storage cluster." + return "Collects performance metrics from the MON, OSD, MDS and RGW nodes in a Ceph storage cluster." } var sampleConfig = ` @@ -55,6 +61,8 @@ var sampleConfig = ` ## prefix of MON and OSD socket files, used to determine socket type mon_prefix = "ceph-mon" osd_prefix = "ceph-osd" + mds_prefix = "ceph-mds" + rgw_prefix = "ceph-client" ## suffix used to identify socket files socket_suffix = "asok" @@ -148,6 +156,8 @@ func init() { CephBinary: "/usr/bin/ceph", OsdPrefix: osdPrefix, MonPrefix: monPrefix, + MdsPrefix: mdsPrefix, + RgwPrefix: rgwPrefix, SocketDir: "/var/run/ceph", SocketSuffix: sockSuffix, CephUser: "client.admin", @@ -165,6 +175,10 @@ var perfDump = func(binary string, socket *socket) (string, error) { cmdArgs = append(cmdArgs, "perf", "dump") } else if socket.sockType == typeMon { cmdArgs = append(cmdArgs, "perfcounters_dump") + } else if socket.sockType == typeMds { + cmdArgs = append(cmdArgs, "perf", "dump") + } else if socket.sockType == typeRgw { + cmdArgs = append(cmdArgs, "perf", "dump") } else { return "", fmt.Errorf("ignoring unknown socket type: %s", socket.sockType) } @@ -199,7 +213,18 @@ var findSockets = func(c *Ceph) ([]*socket, error) { sockPrefix = osdPrefix } - if sockType == typeOsd || sockType == typeMon { + if strings.HasPrefix(f, c.MdsPrefix) { + sockType = typeMds + sockPrefix = mdsPrefix + + } + if strings.HasPrefix(f, c.RgwPrefix) { + sockType = typeRgw + sockPrefix = rgwPrefix + + } + + if sockType == typeOsd || sockType == typeMon || sockType == typeMds || sockType == typeRgw { path := filepath.Join(c.SocketDir, f) sockets = append(sockets, &socket{parseSockId(f, sockPrefix, c.SocketSuffix), sockType, path}) } diff --git a/plugins/inputs/ceph/ceph_test.go b/plugins/inputs/ceph/ceph_test.go index 6403d6994..78da3438d 100644 --- a/plugins/inputs/ceph/ceph_test.go +++ b/plugins/inputs/ceph/ceph_test.go @@ -42,6 +42,20 @@ func TestParseOsdDump(t *testing.T) { assert.Equal(t, float64(0), dump["mutex-FileJournal::finisher_lock"]["wait.avgcount"]) } +func TestParseMdsDump(t *testing.T) { + dump, err := parseDump(mdsPerfDump) + assert.NoError(t, err) + assert.InEpsilon(t, 2408386.600934982, dump["mds"]["reply_latency.sum"], epsilon) + assert.Equal(t, float64(0), dump["throttle-write_buf_throttle"]["wait.avgcount"]) +} + +func TestParseRgwDump(t *testing.T) { + dump, err := parseDump(rgwPerfDump) + assert.NoError(t, err) + assert.InEpsilon(t, 0.002219876, dump["rgw"]["get_initial_lat.sum"], epsilon) + assert.Equal(t, float64(0), dump["rgw"]["put_initial_lat.avgcount"]) +} + func TestDecodeStatus(t *testing.T) { acc := &testutil.Accumulator{} err := decodeStatus(acc, clusterStatusDump) @@ -105,6 +119,8 @@ func TestFindSockets(t *testing.T) { CephBinary: "foo", OsdPrefix: "ceph-osd", MonPrefix: "ceph-mon", + MdsPrefix: "ceph-mds", + RgwPrefix: "ceph-client", SocketDir: tmpdir, SocketSuffix: "asok", CephUser: "client.admin", @@ -126,6 +142,12 @@ func TestFindSockets(t *testing.T) { for i := 1; i <= st.mons; i++ { assertFoundSocket(t, tmpdir, typeMon, i, sockets) } + for i := 1; i <= st.mdss; i++ { + assertFoundSocket(t, tmpdir, typeMds, i, sockets) + } + for i := 1; i <= st.rgws; i++ { + assertFoundSocket(t, tmpdir, typeRgw, i, sockets) + } cleanupTestFiles(tmpdir, st) } } @@ -134,6 +156,10 @@ func assertFoundSocket(t *testing.T, dir, sockType string, i int, sockets []*soc var prefix string if sockType == typeOsd { prefix = osdPrefix + } else if sockType == typeMds { + prefix = mdsPrefix + } else if sockType == typeRgw { + prefix = rgwPrefix } else { prefix = monPrefix } @@ -182,17 +208,27 @@ func tstFileApply(st *SockTest, fn func(prefix string, i int)) { for i := 1; i <= st.mons; i++ { fn(monPrefix, i) } + for i := 1; i <= st.mdss; i++ { + fn(mdsPrefix, i) + } + for i := 1; i <= st.rgws; i++ { + fn(rgwPrefix, i) + } } type SockTest struct { osds int mons int + mdss int + rgws int } var sockTestParams = []*SockTest{ { osds: 2, mons: 2, + mdss: 2, + rgws: 2, }, { mons: 1, @@ -200,6 +236,12 @@ var sockTestParams = []*SockTest{ { osds: 1, }, + { + mdss: 1, + }, + { + rgws: 1, + }, {}, } @@ -722,6 +764,996 @@ var osdPerfDump = ` "wait": { "avgcount": 0, "sum": 0.000000000}}} ` +var mdsPerfDump = ` +{ + "AsyncMessenger::Worker-0": { + "msgr_recv_messages": 2723536628, + "msgr_send_messages": 1160771414, + "msgr_recv_bytes": 1112936719134, + "msgr_send_bytes": 1368194904867, + "msgr_created_connections": 18281, + "msgr_active_connections": 83, + "msgr_running_total_time": 109001.938705141, + "msgr_running_send_time": 33686.215323581, + "msgr_running_recv_time": 8374950.111041426, + "msgr_running_fast_dispatch_time": 5828.083761243 + }, + "AsyncMessenger::Worker-1": { + "msgr_recv_messages": 1426105165, + "msgr_send_messages": 783174767, + "msgr_recv_bytes": 800620150187, + "msgr_send_bytes": 1394738277392, + "msgr_created_connections": 17677, + "msgr_active_connections": 100, + "msgr_running_total_time": 70660.929329800, + "msgr_running_send_time": 24190.940207198, + "msgr_running_recv_time": 3920894.209204916, + "msgr_running_fast_dispatch_time": 8206.816536602 + }, + "AsyncMessenger::Worker-2": { + "msgr_recv_messages": 3471200310, + "msgr_send_messages": 2757725529, + "msgr_recv_bytes": 1331676471794, + "msgr_send_bytes": 2593968875674, + "msgr_created_connections": 16714, + "msgr_active_connections": 73, + "msgr_running_total_time": 167020.893916556, + "msgr_running_send_time": 61197.682840176, + "msgr_running_recv_time": 5816036.495319415, + "msgr_running_fast_dispatch_time": 8581.768789481 + }, + "finisher-PurgeQueue": { + "queue_len": 0, + "complete_latency": { + "avgcount": 20170260, + "sum": 70213.859039869, + "avgtime": 0.003481058 + } + }, + "mds": { + "request": 2167457412, + "reply": 2167457403, + "reply_latency": { + "avgcount": 2167457403, + "sum": 2408386.600934982, + "avgtime": 0.001111157 + }, + "forward": 0, + "dir_fetch": 585012985, + "dir_commit": 58926158, + "dir_split": 8, + "dir_merge": 7, + "inode_max": 2147483647, + "inodes": 39604287, + "inodes_top": 9743493, + "inodes_bottom": 29063656, + "inodes_pin_tail": 797138, + "inodes_pinned": 25685011, + "inodes_expired": 1302542128, + "inodes_with_caps": 4517329, + "caps": 6370838, + "subtrees": 2, + "traverse": 2426357623, + "traverse_hit": 2202314009, + "traverse_forward": 0, + "traverse_discover": 0, + "traverse_dir_fetch": 35332112, + "traverse_remote_ino": 0, + "traverse_lock": 4371557, + "load_cent": 1966748, + "q": 976, + "exported": 0, + "exported_inodes": 0, + "imported": 0, + "imported_inodes": 0, + "openino_dir_fetch": 22725418, + "openino_backtrace_fetch": 6, + "openino_peer_discover": 0 + }, + "mds_cache": { + "num_strays": 384, + "num_strays_delayed": 0, + "num_strays_enqueuing": 0, + "strays_created": 29140050, + "strays_enqueued": 29134399, + "strays_reintegrated": 10171, + "strays_migrated": 0, + "num_recovering_processing": 0, + "num_recovering_enqueued": 0, + "num_recovering_prioritized": 0, + "recovery_started": 229, + "recovery_completed": 229, + "ireq_enqueue_scrub": 0, + "ireq_exportdir": 0, + "ireq_flush": 0, + "ireq_fragmentdir": 15, + "ireq_fragstats": 0, + "ireq_inodestats": 0 + }, + "mds_log": { + "evadd": 1920368707, + "evex": 1920372003, + "evtrm": 1920372003, + "ev": 106627, + "evexg": 0, + "evexd": 4369, + "segadd": 2247990, + "segex": 2247995, + "segtrm": 2247995, + "seg": 123, + "segexg": 0, + "segexd": 5, + "expos": 24852063335817, + "wrpos": 24852205446582, + "rdpos": 22044255640175, + "jlat": { + "avgcount": 182241259, + "sum": 1732094.198366820, + "avgtime": 0.009504402 + }, + "replayed": 109923 + }, + "mds_mem": { + "ino": 39604292, + "ino+": 1307214891, + "ino-": 1267610599, + "dir": 22827008, + "dir+": 591593031, + "dir-": 568766023, + "dn": 39604761, + "dn+": 1376976677, + "dn-": 1337371916, + "cap": 6370838, + "cap+": 1720930015, + "cap-": 1714559177, + "rss": 167723320, + "heap": 322260, + "buf": 0 + }, + "mds_server": { + "dispatch_client_request": 2932764331, + "dispatch_server_request": 0, + "handle_client_request": 2167457412, + "handle_client_session": 10929454, + "handle_slave_request": 0, + "req_create_latency": { + "avgcount": 30590326, + "sum": 23887.274170412, + "avgtime": 0.000780876 + }, + "req_getattr_latency": { + "avgcount": 124767480, + "sum": 718160.497644305, + "avgtime": 0.005755991 + }, + "req_getfilelock_latency": { + "avgcount": 0, + "sum": 0.000000000, + "avgtime": 0.000000000 + }, + "req_link_latency": { + "avgcount": 5636, + "sum": 2.371499732, + "avgtime": 0.000420777 + }, + "req_lookup_latency": { + "avgcount": 474590034, + "sum": 452548.849373476, + "avgtime": 0.000953557 + }, + "req_lookuphash_latency": { + "avgcount": 0, + "sum": 0.000000000, + "avgtime": 0.000000000 + }, + "req_lookupino_latency": { + "avgcount": 0, + "sum": 0.000000000, + "avgtime": 0.000000000 + }, + "req_lookupname_latency": { + "avgcount": 9794, + "sum": 54.118496591, + "avgtime": 0.005525678 + }, + "req_lookupparent_latency": { + "avgcount": 0, + "sum": 0.000000000, + "avgtime": 0.000000000 + }, + "req_lookupsnap_latency": { + "avgcount": 0, + "sum": 0.000000000, + "avgtime": 0.000000000 + }, + "req_lssnap_latency": { + "avgcount": 0, + "sum": 0.000000000, + "avgtime": 0.000000000 + }, + "req_mkdir_latency": { + "avgcount": 13394317, + "sum": 13025.982105531, + "avgtime": 0.000972500 + }, + "req_mknod_latency": { + "avgcount": 0, + "sum": 0.000000000, + "avgtime": 0.000000000 + }, + "req_mksnap_latency": { + "avgcount": 0, + "sum": 0.000000000, + "avgtime": 0.000000000 + }, + "req_open_latency": { + "avgcount": 32849768, + "sum": 12862.382994977, + "avgtime": 0.000391551 + }, + "req_readdir_latency": { + "avgcount": 654394394, + "sum": 715669.609601541, + "avgtime": 0.001093636 + }, + "req_rename_latency": { + "avgcount": 6058807, + "sum": 2126.232719555, + "avgtime": 0.000350932 + }, + "req_renamesnap_latency": { + "avgcount": 0, + "sum": 0.000000000, + "avgtime": 0.000000000 + }, + "req_rmdir_latency": { + "avgcount": 1901530, + "sum": 4064.121157858, + "avgtime": 0.002137290 + }, + "req_rmsnap_latency": { + "avgcount": 0, + "sum": 0.000000000, + "avgtime": 0.000000000 + }, + "req_rmxattr_latency": { + "avgcount": 0, + "sum": 0.000000000, + "avgtime": 0.000000000 + }, + "req_setattr_latency": { + "avgcount": 37051209, + "sum": 171198.037329531, + "avgtime": 0.004620578 + }, + "req_setdirlayout_latency": { + "avgcount": 0, + "sum": 0.000000000, + "avgtime": 0.000000000 + }, + "req_setfilelock_latency": { + "avgcount": 765439143, + "sum": 262660.582883819, + "avgtime": 0.000343150 + }, + "req_setlayout_latency": { + "avgcount": 0, + "sum": 0.000000000, + "avgtime": 0.000000000 + }, + "req_setxattr_latency": { + "avgcount": 41572, + "sum": 7.273371375, + "avgtime": 0.000174958 + }, + "req_symlink_latency": { + "avgcount": 329, + "sum": 0.117859965, + "avgtime": 0.000358236 + }, + "req_unlink_latency": { + "avgcount": 26363064, + "sum": 32119.149726314, + "avgtime": 0.001218339 + }, + "cap_revoke_eviction": 0 + }, + "mds_sessions": { + "session_count": 80, + "session_add": 90, + "session_remove": 10, + "sessions_open": 80, + "sessions_stale": 0, + "total_load": 112490, + "average_load": 1406, + "avg_session_uptime": 2221807 + }, + "objecter": { + "op_active": 0, + "op_laggy": 0, + "op_send": 955060080, + "op_send_bytes": 3178832110019, + "op_resend": 67, + "op_reply": 955060013, + "op": 955060013, + "op_r": 585982837, + "op_w": 369077176, + "op_rmw": 0, + "op_pg": 0, + "osdop_stat": 45924375, + "osdop_create": 31162274, + "osdop_read": 969513, + "osdop_write": 183211164, + "osdop_writefull": 1063233, + "osdop_writesame": 0, + "osdop_append": 0, + "osdop_zero": 2, + "osdop_truncate": 8, + "osdop_delete": 60594735, + "osdop_mapext": 0, + "osdop_sparse_read": 0, + "osdop_clonerange": 0, + "osdop_getxattr": 584941886, + "osdop_setxattr": 62324548, + "osdop_cmpxattr": 0, + "osdop_rmxattr": 0, + "osdop_resetxattrs": 0, + "osdop_tmap_up": 0, + "osdop_tmap_put": 0, + "osdop_tmap_get": 0, + "osdop_call": 0, + "osdop_watch": 0, + "osdop_notify": 0, + "osdop_src_cmpxattr": 0, + "osdop_pgls": 0, + "osdop_pgls_filter": 0, + "osdop_other": 32053182, + "linger_active": 0, + "linger_send": 0, + "linger_resend": 0, + "linger_ping": 0, + "poolop_active": 0, + "poolop_send": 0, + "poolop_resend": 0, + "poolstat_active": 0, + "poolstat_send": 0, + "poolstat_resend": 0, + "statfs_active": 0, + "statfs_send": 0, + "statfs_resend": 0, + "command_active": 0, + "command_send": 0, + "command_resend": 0, + "map_epoch": 66793, + "map_full": 0, + "map_inc": 1762, + "osd_sessions": 120, + "osd_session_open": 52554, + "osd_session_close": 52434, + "osd_laggy": 0, + "omap_wr": 106692727, + "omap_rd": 1170026044, + "omap_del": 5674762 + }, + "purge_queue": { + "pq_executing_ops": 0, + "pq_executing": 0, + "pq_executed": 29134399 + }, + "throttle-msgr_dispatch_throttler-mds": { + "val": 0, + "max": 104857600, + "get_started": 0, + "get": 7620842095, + "get_sum": 2681291022887, + "get_or_fail_fail": 53, + "get_or_fail_success": 7620842095, + "take": 0, + "take_sum": 0, + "put": 7620842095, + "put_sum": 2681291022887, + "wait": { + "avgcount": 0, + "sum": 0.000000000, + "avgtime": 0.000000000 + } + }, + "throttle-objecter_bytes": { + "val": 0, + "max": 104857600, + "get_started": 0, + "get": 0, + "get_sum": 0, + "get_or_fail_fail": 0, + "get_or_fail_success": 0, + "take": 955060013, + "take_sum": 3172776432475, + "put": 862340641, + "put_sum": 3172776432475, + "wait": { + "avgcount": 0, + "sum": 0.000000000, + "avgtime": 0.000000000 + } + }, + "throttle-objecter_ops": { + "val": 0, + "max": 1024, + "get_started": 0, + "get": 0, + "get_sum": 0, + "get_or_fail_fail": 0, + "get_or_fail_success": 0, + "take": 955060013, + "take_sum": 955060013, + "put": 955060013, + "put_sum": 955060013, + "wait": { + "avgcount": 0, + "sum": 0.000000000, + "avgtime": 0.000000000 + } + }, + "throttle-write_buf_throttle": { + "val": 0, + "max": 3758096384, + "get_started": 0, + "get": 29134399, + "get_sum": 3160498139, + "get_or_fail_fail": 0, + "get_or_fail_success": 29134399, + "take": 0, + "take_sum": 0, + "put": 969905, + "put_sum": 3160498139, + "wait": { + "avgcount": 0, + "sum": 0.000000000, + "avgtime": 0.000000000 + } + }, + "throttle-write_buf_throttle-0x561894f0b8e0": { + "val": 286270, + "max": 3758096384, + "get_started": 0, + "get": 1920368707, + "get_sum": 2807949805409, + "get_or_fail_fail": 0, + "get_or_fail_success": 1920368707, + "take": 0, + "take_sum": 0, + "put": 182241259, + "put_sum": 2807949519139, + "wait": { + "avgcount": 0, + "sum": 0.000000000, + "avgtime": 0.000000000 + } + } +} +` + +var rgwPerfDump = ` +{ + "AsyncMessenger::Worker-0": { + "msgr_recv_messages": 10684185, + "msgr_send_messages": 13448962, + "msgr_recv_bytes": 2622531258, + "msgr_send_bytes": 4195038384, + "msgr_created_connections": 8029, + "msgr_active_connections": 3, + "msgr_running_total_time": 3249.441108544, + "msgr_running_send_time": 739.821446096, + "msgr_running_recv_time": 310.354319110, + "msgr_running_fast_dispatch_time": 1915.410317430 + }, + "AsyncMessenger::Worker-1": { + "msgr_recv_messages": 2137773, + "msgr_send_messages": 3850070, + "msgr_recv_bytes": 503824366, + "msgr_send_bytes": 1130107261, + "msgr_created_connections": 11030, + "msgr_active_connections": 1, + "msgr_running_total_time": 445.055291782, + "msgr_running_send_time": 227.817750758, + "msgr_running_recv_time": 78.974093226, + "msgr_running_fast_dispatch_time": 47.587740615 + }, + "AsyncMessenger::Worker-2": { + "msgr_recv_messages": 2809014, + "msgr_send_messages": 4126613, + "msgr_recv_bytes": 653093470, + "msgr_send_bytes": 1022041970, + "msgr_created_connections": 14810, + "msgr_active_connections": 5, + "msgr_running_total_time": 453.384703728, + "msgr_running_send_time": 208.580910390, + "msgr_running_recv_time": 80.075306670, + "msgr_running_fast_dispatch_time": 46.854112208 + }, + "cct": { + "total_workers": 0, + "unhealthy_workers": 0 + }, + "finisher-radosclient": { + "queue_len": 0, + "complete_latency": { + "avgcount": 0, + "sum": 0.000000000, + "avgtime": 0.000000000 + } + }, + "finisher-radosclient-0x55994098e460": { + "queue_len": 0, + "complete_latency": { + "avgcount": 0, + "sum": 0.000000000, + "avgtime": 0.000000000 + } + }, + "finisher-radosclient-0x5599409901c0": { + "queue_len": 0, + "complete_latency": { + "avgcount": 0, + "sum": 0.000000000, + "avgtime": 0.000000000 + } + }, + "mempool": { + "bloom_filter_bytes": 0, + "bloom_filter_items": 0, + "bluestore_alloc_bytes": 0, + "bluestore_alloc_items": 0, + "bluestore_cache_data_bytes": 0, + "bluestore_cache_data_items": 0, + "bluestore_cache_onode_bytes": 0, + "bluestore_cache_onode_items": 0, + "bluestore_cache_other_bytes": 0, + "bluestore_cache_other_items": 0, + "bluestore_fsck_bytes": 0, + "bluestore_fsck_items": 0, + "bluestore_txc_bytes": 0, + "bluestore_txc_items": 0, + "bluestore_writing_deferred_bytes": 0, + "bluestore_writing_deferred_items": 0, + "bluestore_writing_bytes": 0, + "bluestore_writing_items": 0, + "bluefs_bytes": 0, + "bluefs_items": 0, + "buffer_anon_bytes": 258469, + "buffer_anon_items": 201, + "buffer_meta_bytes": 0, + "buffer_meta_items": 0, + "osd_bytes": 0, + "osd_items": 0, + "osd_mapbl_bytes": 0, + "osd_mapbl_items": 0, + "osd_pglog_bytes": 0, + "osd_pglog_items": 0, + "osdmap_bytes": 74448, + "osdmap_items": 732, + "osdmap_mapping_bytes": 0, + "osdmap_mapping_items": 0, + "pgmap_bytes": 0, + "pgmap_items": 0, + "mds_co_bytes": 0, + "mds_co_items": 0, + "unittest_1_bytes": 0, + "unittest_1_items": 0, + "unittest_2_bytes": 0, + "unittest_2_items": 0 + }, + "objecter": { + "op_active": 0, + "op_laggy": 0, + "op_send": 9377910, + "op_send_bytes": 312, + "op_resend": 0, + "op_reply": 9377904, + "op": 9377910, + "op_r": 2755291, + "op_w": 6622619, + "op_rmw": 0, + "op_pg": 0, + "osdop_stat": 2755258, + "osdop_create": 8, + "osdop_read": 25, + "osdop_write": 0, + "osdop_writefull": 0, + "osdop_writesame": 0, + "osdop_append": 0, + "osdop_zero": 0, + "osdop_truncate": 0, + "osdop_delete": 0, + "osdop_mapext": 0, + "osdop_sparse_read": 0, + "osdop_clonerange": 0, + "osdop_getxattr": 0, + "osdop_setxattr": 0, + "osdop_cmpxattr": 0, + "osdop_rmxattr": 0, + "osdop_resetxattrs": 0, + "osdop_call": 0, + "osdop_watch": 6622611, + "osdop_notify": 0, + "osdop_src_cmpxattr": 0, + "osdop_pgls": 0, + "osdop_pgls_filter": 0, + "osdop_other": 2755266, + "linger_active": 8, + "linger_send": 35, + "linger_resend": 27, + "linger_ping": 6622576, + "poolop_active": 0, + "poolop_send": 0, + "poolop_resend": 0, + "poolstat_active": 0, + "poolstat_send": 0, + "poolstat_resend": 0, + "statfs_active": 0, + "statfs_send": 0, + "statfs_resend": 0, + "command_active": 0, + "command_send": 0, + "command_resend": 0, + "map_epoch": 1064, + "map_full": 0, + "map_inc": 106, + "osd_sessions": 8, + "osd_session_open": 11928, + "osd_session_close": 11920, + "osd_laggy": 5, + "omap_wr": 0, + "omap_rd": 0, + "omap_del": 0 + }, + "objecter-0x55994098e500": { + "op_active": 0, + "op_laggy": 0, + "op_send": 827839, + "op_send_bytes": 0, + "op_resend": 0, + "op_reply": 827839, + "op": 827839, + "op_r": 0, + "op_w": 827839, + "op_rmw": 0, + "op_pg": 0, + "osdop_stat": 0, + "osdop_create": 0, + "osdop_read": 0, + "osdop_write": 0, + "osdop_writefull": 0, + "osdop_writesame": 0, + "osdop_append": 0, + "osdop_zero": 0, + "osdop_truncate": 0, + "osdop_delete": 0, + "osdop_mapext": 0, + "osdop_sparse_read": 0, + "osdop_clonerange": 0, + "osdop_getxattr": 0, + "osdop_setxattr": 0, + "osdop_cmpxattr": 0, + "osdop_rmxattr": 0, + "osdop_resetxattrs": 0, + "osdop_call": 0, + "osdop_watch": 827839, + "osdop_notify": 0, + "osdop_src_cmpxattr": 0, + "osdop_pgls": 0, + "osdop_pgls_filter": 0, + "osdop_other": 0, + "linger_active": 1, + "linger_send": 3, + "linger_resend": 2, + "linger_ping": 827836, + "poolop_active": 0, + "poolop_send": 0, + "poolop_resend": 0, + "poolstat_active": 0, + "poolstat_send": 0, + "poolstat_resend": 0, + "statfs_active": 0, + "statfs_send": 0, + "statfs_resend": 0, + "command_active": 0, + "command_send": 0, + "command_resend": 0, + "map_epoch": 1064, + "map_full": 0, + "map_inc": 106, + "osd_sessions": 1, + "osd_session_open": 1, + "osd_session_close": 0, + "osd_laggy": 1, + "omap_wr": 0, + "omap_rd": 0, + "omap_del": 0 + }, + "objecter-0x55994098f720": { + "op_active": 0, + "op_laggy": 0, + "op_send": 5415951, + "op_send_bytes": 205291238, + "op_resend": 8, + "op_reply": 5415943, + "op": 5415943, + "op_r": 3612105, + "op_w": 1803838, + "op_rmw": 0, + "op_pg": 0, + "osdop_stat": 0, + "osdop_create": 0, + "osdop_read": 0, + "osdop_write": 0, + "osdop_writefull": 0, + "osdop_writesame": 0, + "osdop_append": 0, + "osdop_zero": 0, + "osdop_truncate": 0, + "osdop_delete": 0, + "osdop_mapext": 0, + "osdop_sparse_read": 0, + "osdop_clonerange": 0, + "osdop_getxattr": 0, + "osdop_setxattr": 0, + "osdop_cmpxattr": 0, + "osdop_rmxattr": 0, + "osdop_resetxattrs": 0, + "osdop_call": 5415567, + "osdop_watch": 0, + "osdop_notify": 0, + "osdop_src_cmpxattr": 0, + "osdop_pgls": 0, + "osdop_pgls_filter": 0, + "osdop_other": 376, + "linger_active": 0, + "linger_send": 0, + "linger_resend": 0, + "linger_ping": 0, + "poolop_active": 0, + "poolop_send": 0, + "poolop_resend": 0, + "poolstat_active": 0, + "poolstat_send": 0, + "poolstat_resend": 0, + "statfs_active": 0, + "statfs_send": 0, + "statfs_resend": 0, + "command_active": 0, + "command_send": 0, + "command_resend": 0, + "map_epoch": 1064, + "map_full": 0, + "map_inc": 106, + "osd_sessions": 8, + "osd_session_open": 8834, + "osd_session_close": 8826, + "osd_laggy": 0, + "omap_wr": 0, + "omap_rd": 0, + "omap_del": 0 + }, + "rgw": { + "req": 2755258, + "failed_req": 0, + "get": 0, + "get_b": 0, + "get_initial_lat": { + "avgcount": 0, + "sum": 0.002219876, + "avgtime": 0.000000000 + }, + "put": 0, + "put_b": 0, + "put_initial_lat": { + "avgcount": 0, + "sum": 0.000000000, + "avgtime": 0.000000000 + }, + "qlen": 0, + "qactive": 0, + "cache_hit": 0, + "cache_miss": 2755261, + "keystone_token_cache_hit": 0, + "keystone_token_cache_miss": 0, + "gc_retire_object": 0, + "pubsub_event_triggered": 0, + "pubsub_event_lost": 0, + "pubsub_store_ok": 0, + "pubsub_store_fail": 0, + "pubsub_events": 0, + "pubsub_push_ok": 0, + "pubsub_push_failed": 0, + "pubsub_push_pending": 0 + }, + "simple-throttler": { + "throttle": 0 + }, + "throttle-msgr_dispatch_throttler-radosclient": { + "val": 0, + "max": 104857600, + "get_started": 0, + "get": 9379775, + "get_sum": 1545393284, + "get_or_fail_fail": 0, + "get_or_fail_success": 9379775, + "take": 0, + "take_sum": 0, + "put": 9379775, + "put_sum": 1545393284, + "wait": { + "avgcount": 0, + "sum": 0.000000000, + "avgtime": 0.000000000 + } + }, + "throttle-msgr_dispatch_throttler-radosclient-0x55994098e320": { + "val": 0, + "max": 104857600, + "get_started": 0, + "get": 829631, + "get_sum": 162850310, + "get_or_fail_fail": 0, + "get_or_fail_success": 829631, + "take": 0, + "take_sum": 0, + "put": 829631, + "put_sum": 162850310, + "wait": { + "avgcount": 0, + "sum": 0.000000000, + "avgtime": 0.000000000 + } + }, + "throttle-msgr_dispatch_throttler-radosclient-0x55994098fa40": { + "val": 0, + "max": 104857600, + "get_started": 0, + "get": 5421553, + "get_sum": 914508527, + "get_or_fail_fail": 0, + "get_or_fail_success": 5421553, + "take": 0, + "take_sum": 0, + "put": 5421553, + "put_sum": 914508527, + "wait": { + "avgcount": 0, + "sum": 0.000000000, + "avgtime": 0.000000000 + } + }, + "throttle-objecter_bytes": { + "val": 0, + "max": 104857600, + "get_started": 0, + "get": 2755292, + "get_sum": 0, + "get_or_fail_fail": 0, + "get_or_fail_success": 2755292, + "take": 0, + "take_sum": 0, + "put": 0, + "put_sum": 0, + "wait": { + "avgcount": 0, + "sum": 0.000000000, + "avgtime": 0.000000000 + } + }, + "throttle-objecter_bytes-0x55994098e780": { + "val": 0, + "max": 104857600, + "get_started": 0, + "get": 0, + "get_sum": 0, + "get_or_fail_fail": 0, + "get_or_fail_success": 0, + "take": 0, + "take_sum": 0, + "put": 0, + "put_sum": 0, + "wait": { + "avgcount": 0, + "sum": 0.000000000, + "avgtime": 0.000000000 + } + }, + "throttle-objecter_bytes-0x55994098f7c0": { + "val": 0, + "max": 104857600, + "get_started": 0, + "get": 5415614, + "get_sum": 0, + "get_or_fail_fail": 0, + "get_or_fail_success": 5415614, + "take": 0, + "take_sum": 0, + "put": 0, + "put_sum": 0, + "wait": { + "avgcount": 0, + "sum": 0.000000000, + "avgtime": 0.000000000 + } + }, + "throttle-objecter_ops": { + "val": 0, + "max": 24576, + "get_started": 0, + "get": 2755292, + "get_sum": 2755292, + "get_or_fail_fail": 0, + "get_or_fail_success": 2755292, + "take": 0, + "take_sum": 0, + "put": 2755292, + "put_sum": 2755292, + "wait": { + "avgcount": 0, + "sum": 0.000000000, + "avgtime": 0.000000000 + } + }, + "throttle-objecter_ops-0x55994098e640": { + "val": 0, + "max": 24576, + "get_started": 0, + "get": 0, + "get_sum": 0, + "get_or_fail_fail": 0, + "get_or_fail_success": 0, + "take": 0, + "take_sum": 0, + "put": 0, + "put_sum": 0, + "wait": { + "avgcount": 0, + "sum": 0.000000000, + "avgtime": 0.000000000 + } + }, + "throttle-objecter_ops-0x55994098f0e0": { + "val": 0, + "max": 24576, + "get_started": 0, + "get": 5415614, + "get_sum": 5415614, + "get_or_fail_fail": 0, + "get_or_fail_success": 5415614, + "take": 0, + "take_sum": 0, + "put": 5415614, + "put_sum": 5415614, + "wait": { + "avgcount": 0, + "sum": 0.000000000, + "avgtime": 0.000000000 + } + }, + "throttle-rgw_async_rados_ops": { + "val": 0, + "max": 64, + "get_started": 0, + "get": 0, + "get_sum": 0, + "get_or_fail_fail": 0, + "get_or_fail_success": 0, + "take": 0, + "take_sum": 0, + "put": 0, + "put_sum": 0, + "wait": { + "avgcount": 0, + "sum": 0.000000000, + "avgtime": 0.000000000 + } + } +} +` + var clusterStatusDump = ` { "health": { From 552ead3af5d4209f287924ed844e0707130d470b Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 21 Apr 2020 16:30:30 -0700 Subject: [PATCH 1705/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 716ad24ea..2cee2e5c1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -20,6 +20,7 @@ - [#7366](https://github.com/influxdata/telegraf/pull/7366): add support for SIGUSR1 to trigger flush. - [#7271](https://github.com/influxdata/telegraf/pull/7271): Add retry when slave is busy to modbus input. - [#7356](https://github.com/influxdata/telegraf/pull/7356): Add option to save retention policy as tag in influxdb_listener. +- [#6915](https://github.com/influxdata/telegraf/pull/6915): Add support for MDS and RGW sockets to ceph input. #### Bugfixes From 5a30c9816ef0312c628b9aa00844125b945f1664 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 22 Apr 2020 15:53:01 -0700 Subject: [PATCH 1706/1815] Use new higher per request limit for cloudwatch GetMetricData (#7335) --- plugins/inputs/cloudwatch/cloudwatch.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/inputs/cloudwatch/cloudwatch.go b/plugins/inputs/cloudwatch/cloudwatch.go index be4ae3700..3148e2c75 100644 --- a/plugins/inputs/cloudwatch/cloudwatch.go +++ b/plugins/inputs/cloudwatch/cloudwatch.go @@ -212,7 +212,7 @@ func (c *CloudWatch) Gather(acc telegraf.Accumulator) error { results := []*cloudwatch.MetricDataResult{} // 100 is the maximum number of metric data queries a `GetMetricData` request can contain. - batchSize := 100 + batchSize := 500 var batches [][]*cloudwatch.MetricDataQuery for batchSize < len(queries) { From 0858b779c6e0ee23df8b056e79cfa43e36b52a77 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 22 Apr 2020 16:00:55 -0700 Subject: [PATCH 1707/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2cee2e5c1..1357e8749 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -32,6 +32,7 @@ #### Bugfixes - [#7241](https://github.com/influxdata/telegraf/issues/7241): Trim whitespace from instance tag in sqlserver input. +- [#7322](https://github.com/influxdata/telegraf/issues/7322): Use increased AWS Cloudwatch GetMetricData limit of 500 metrics per call. ## v1.14.1 [2020-04-14] From 2799302142d3ebf8967107a8dd45431326895954 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 22 Apr 2020 16:16:22 -0700 Subject: [PATCH 1708/1815] Fix dimension limit on azure_monitor output (#7336) --- plugins/outputs/azure_monitor/azure_monitor.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/outputs/azure_monitor/azure_monitor.go b/plugins/outputs/azure_monitor/azure_monitor.go index 9039d4aa4..f2b1db1dd 100644 --- a/plugins/outputs/azure_monitor/azure_monitor.go +++ b/plugins/outputs/azure_monitor/azure_monitor.go @@ -392,7 +392,7 @@ func translate(m telegraf.Metric, prefix string) (*azureMonitorMetric, error) { var dimensionValues []string for _, tag := range m.TagList() { // Azure custom metrics service supports up to 10 dimensions - if len(dimensionNames) > 10 { + if len(dimensionNames) >= 10 { continue } From c8b9cb4087bfa653fb509f7be7cbdfb6d01bfc27 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 22 Apr 2020 16:18:00 -0700 Subject: [PATCH 1709/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1357e8749..377ffd83e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -33,6 +33,7 @@ - [#7241](https://github.com/influxdata/telegraf/issues/7241): Trim whitespace from instance tag in sqlserver input. - [#7322](https://github.com/influxdata/telegraf/issues/7322): Use increased AWS Cloudwatch GetMetricData limit of 500 metrics per call. +- [#7318](https://github.com/influxdata/telegraf/issues/7318): Fix dimension limit on azure_monitor output. ## v1.14.1 [2020-04-14] From 8ab555129dc6be21581d552af7daf8b4e31b018e Mon Sep 17 00:00:00 2001 From: Jesper Brix Rosenkilde Date: Fri, 24 Apr 2020 00:38:31 +0200 Subject: [PATCH 1710/1815] Extract target as a tag for each rule in iptables input (#7391) --- plugins/inputs/iptables/iptables.go | 11 ++++++----- plugins/inputs/iptables/iptables_test.go | 16 ++++++++-------- 2 files changed, 14 insertions(+), 13 deletions(-) diff --git a/plugins/inputs/iptables/iptables.go b/plugins/inputs/iptables/iptables.go index d2598cd0d..e56f8b31d 100644 --- a/plugins/inputs/iptables/iptables.go +++ b/plugins/inputs/iptables/iptables.go @@ -102,8 +102,8 @@ const measurement = "iptables" var errParse = errors.New("Cannot parse iptables list information") var chainNameRe = regexp.MustCompile(`^Chain\s+(\S+)`) -var fieldsHeaderRe = regexp.MustCompile(`^\s*pkts\s+bytes\s+`) -var valuesRe = regexp.MustCompile(`^\s*(\d+)\s+(\d+)\s+.*?/\*\s*(.+?)\s*\*/\s*`) +var fieldsHeaderRe = regexp.MustCompile(`^\s*pkts\s+bytes\s+target`) +var valuesRe = regexp.MustCompile(`^\s*(\d+)\s+(\d+)\s+(\w+).*?/\*\s*(.+?)\s*\*/\s*`) func (ipt *Iptables) parseAndGather(data string, acc telegraf.Accumulator) error { lines := strings.Split(data, "\n") @@ -119,15 +119,16 @@ func (ipt *Iptables) parseAndGather(data string, acc telegraf.Accumulator) error } for _, line := range lines[2:] { matches := valuesRe.FindStringSubmatch(line) - if len(matches) != 4 { + if len(matches) != 5 { continue } pkts := matches[1] bytes := matches[2] - comment := matches[3] + target := matches[3] + comment := matches[4] - tags := map[string]string{"table": ipt.Table, "chain": mchain[1], "ruleid": comment} + tags := map[string]string{"table": ipt.Table, "chain": mchain[1], "target": target, "ruleid": comment} fields := make(map[string]interface{}) var err error diff --git a/plugins/inputs/iptables/iptables_test.go b/plugins/inputs/iptables/iptables_test.go index cca41e1f4..681d8bbfc 100644 --- a/plugins/inputs/iptables/iptables_test.go +++ b/plugins/inputs/iptables/iptables_test.go @@ -42,7 +42,7 @@ func TestIptables_Gather(t *testing.T) { pkts bytes target prot opt in out source destination 57 4520 RETURN tcp -- * * 0.0.0.0/0 0.0.0.0/0 /* foobar */ `}, - tags: []map[string]string{{"table": "filter", "chain": "INPUT", "ruleid": "foobar"}}, + tags: []map[string]string{{"table": "filter", "chain": "INPUT", "target": "RETURN", "ruleid": "foobar"}}, fields: [][]map[string]interface{}{ {map[string]interface{}{"pkts": uint64(57), "bytes": uint64(4520)}}, }, @@ -98,9 +98,9 @@ func TestIptables_Gather(t *testing.T) { `, }, tags: []map[string]string{ - {"table": "filter", "chain": "INPUT", "ruleid": "foo"}, - {"table": "filter", "chain": "FORWARD", "ruleid": "bar"}, - {"table": "filter", "chain": "FORWARD", "ruleid": "foobar"}, + {"table": "filter", "chain": "INPUT", "target": "RETURN", "ruleid": "foo"}, + {"table": "filter", "chain": "FORWARD", "target": "RETURN", "ruleid": "bar"}, + {"table": "filter", "chain": "FORWARD", "target": "RETURN", "ruleid": "foobar"}, }, fields: [][]map[string]interface{}{ {map[string]interface{}{"pkts": uint64(200), "bytes": uint64(4520)}}, @@ -118,7 +118,7 @@ func TestIptables_Gather(t *testing.T) { 100 4520 RETURN tcp -- * * 0.0.0.0/0 0.0.0.0/0 tcp dpt:80 `}, tags: []map[string]string{ - {"table": "filter", "chain": "INPUT", "ruleid": "foobar"}, + {"table": "filter", "chain": "INPUT", "target": "RETURN", "ruleid": "foobar"}, }, fields: [][]map[string]interface{}{ {map[string]interface{}{"pkts": uint64(57), "bytes": uint64(4520)}}, @@ -134,8 +134,8 @@ func TestIptables_Gather(t *testing.T) { 0 0 CLASSIFY all -- * * 1.3.5.7 0.0.0.0/0 /* test2 */ CLASSIFY set 1:4 `}, tags: []map[string]string{ - {"table": "mangle", "chain": "SHAPER", "ruleid": "test"}, - {"table": "mangle", "chain": "SHAPER", "ruleid": "test2"}, + {"table": "mangle", "chain": "SHAPER", "target": "ACCEPT", "ruleid": "test"}, + {"table": "mangle", "chain": "SHAPER", "target": "CLASSIFY", "ruleid": "test2"}, }, fields: [][]map[string]interface{}{ {map[string]interface{}{"pkts": uint64(0), "bytes": uint64(0)}}, @@ -163,7 +163,7 @@ func TestIptables_Gather(t *testing.T) { 123 456 all -- eth0 * 0.0.0.0/0 0.0.0.0/0 /* all_recv */ `}, tags: []map[string]string{ - {"table": "all_recv", "chain": "accountfwd", "ruleid": "all_recv"}, + {"table": "all_recv", "chain": "accountfwd", "target": "all", "ruleid": "all_recv"}, }, fields: [][]map[string]interface{}{ {map[string]interface{}{"pkts": uint64(123), "bytes": uint64(456)}}, From 277b4d2fc4dbd358d09d3f16e74542febfc106f9 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 23 Apr 2020 15:39:34 -0700 Subject: [PATCH 1711/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 377ffd83e..1d6851cc9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -21,6 +21,7 @@ - [#7271](https://github.com/influxdata/telegraf/pull/7271): Add retry when slave is busy to modbus input. - [#7356](https://github.com/influxdata/telegraf/pull/7356): Add option to save retention policy as tag in influxdb_listener. - [#6915](https://github.com/influxdata/telegraf/pull/6915): Add support for MDS and RGW sockets to ceph input. +- [#7391](https://github.com/influxdata/telegraf/pull/7391): Extract target as a tag for each rule in iptables input. #### Bugfixes From 8957032790cf26e19a98fd61babec58964d7e0fc Mon Sep 17 00:00:00 2001 From: timhallinflux Date: Thu, 23 Apr 2020 17:20:35 -0700 Subject: [PATCH 1712/1815] Update nvidia-smi README for Windows users (#7399) --- plugins/inputs/nvidia_smi/README.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/plugins/inputs/nvidia_smi/README.md b/plugins/inputs/nvidia_smi/README.md index 2173c904e..892381cd5 100644 --- a/plugins/inputs/nvidia_smi/README.md +++ b/plugins/inputs/nvidia_smi/README.md @@ -17,6 +17,9 @@ This plugin uses a query on the [`nvidia-smi`](https://developer.nvidia.com/nvid #### Windows On Windows, `nvidia-smi` is generally located at `C:\Program Files\NVIDIA Corporation\NVSMI\nvidia-smi.exe` +On Windows 10, you may also find this located here `C:\Windows\System32\nvidia-smi.exe` + +You'll need to escape the `\` within the `telegraf.conf` like this: `C:\\Program Files\\NVIDIA Corporation\\NVSMI\\nvidia-smi.exe` ### Metrics - measurement: `nvidia_smi` From 1bb436e8a8df3d34cfac569dfa8a21aadb7ae4fb Mon Sep 17 00:00:00 2001 From: Mark Drayton Date: Fri, 24 Apr 2020 22:35:59 +0200 Subject: [PATCH 1713/1815] Fix string to int64 conversion for SNMP input (#7407) --- plugins/inputs/snmp/snmp.go | 4 ++-- plugins/inputs/snmp/snmp_test.go | 2 ++ 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/plugins/inputs/snmp/snmp.go b/plugins/inputs/snmp/snmp.go index 2fc56ff97..2f8bf6d5b 100644 --- a/plugins/inputs/snmp/snmp.go +++ b/plugins/inputs/snmp/snmp.go @@ -804,9 +804,9 @@ func fieldConvert(conv string, v interface{}) (interface{}, error) { case uint64: v = int64(vt) case []byte: - v, _ = strconv.Atoi(string(vt)) + v, _ = strconv.ParseInt(string(vt), 10, 64) case string: - v, _ = strconv.Atoi(vt) + v, _ = strconv.ParseInt(vt, 10, 64) } return v, nil } diff --git a/plugins/inputs/snmp/snmp_test.go b/plugins/inputs/snmp/snmp_test.go index 25382bd7d..d29b525ad 100644 --- a/plugins/inputs/snmp/snmp_test.go +++ b/plugins/inputs/snmp/snmp_test.go @@ -694,6 +694,8 @@ func TestFieldConvert(t *testing.T) { {uint64(123), "float(3)", float64(0.123)}, {"123", "int", int64(123)}, {[]byte("123"), "int", int64(123)}, + {"123123123123", "int", int64(123123123123)}, + {[]byte("123123123123"), "int", int64(123123123123)}, {float32(12.3), "int", int64(12)}, {float64(12.3), "int", int64(12)}, {int(123), "int", int64(123)}, From f89381851ec56b54b747186041aa6ae1d4f5eff6 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 24 Apr 2020 13:37:14 -0700 Subject: [PATCH 1714/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1d6851cc9..8838ae0b4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -35,6 +35,7 @@ - [#7241](https://github.com/influxdata/telegraf/issues/7241): Trim whitespace from instance tag in sqlserver input. - [#7322](https://github.com/influxdata/telegraf/issues/7322): Use increased AWS Cloudwatch GetMetricData limit of 500 metrics per call. - [#7318](https://github.com/influxdata/telegraf/issues/7318): Fix dimension limit on azure_monitor output. +- [#7407](https://github.com/influxdata/telegraf/pull/7407): Fix 64-bit integer to string conversion in snmp input. ## v1.14.1 [2020-04-14] From d915f7c7d6d7761f17dc7abb81beed5cdb50196a Mon Sep 17 00:00:00 2001 From: Len Smith Date: Fri, 24 Apr 2020 13:38:58 -0700 Subject: [PATCH 1715/1815] Fix shard indices reporting in elasticsearch input (#7332) --- plugins/inputs/elasticsearch/elasticsearch.go | 7 +- .../elasticsearch/elasticsearch_test.go | 19 +++- plugins/inputs/elasticsearch/testdata_test.go | 100 +++++++++++++++++- 3 files changed, 117 insertions(+), 9 deletions(-) diff --git a/plugins/inputs/elasticsearch/elasticsearch.go b/plugins/inputs/elasticsearch/elasticsearch.go index 7cecb2357..65d76c3ae 100644 --- a/plugins/inputs/elasticsearch/elasticsearch.go +++ b/plugins/inputs/elasticsearch/elasticsearch.go @@ -545,11 +545,12 @@ func (e *Elasticsearch) gatherIndicesStats(url string, acc telegraf.Accumulator) } if e.IndicesLevel == "shards" { - for shardNumber, shard := range index.Shards { - if len(shard) > 0 { + for shardNumber, shards := range index.Shards { + for _, shard := range shards { + // Get Shard Stats flattened := jsonparser.JSONFlattener{} - err := flattened.FullFlattenJSON("", shard[0], true, true) + err := flattened.FullFlattenJSON("", shard, true, true) if err != nil { return err } diff --git a/plugins/inputs/elasticsearch/elasticsearch_test.go b/plugins/inputs/elasticsearch/elasticsearch_test.go index e70923bc0..ad91c898a 100644 --- a/plugins/inputs/elasticsearch/elasticsearch_test.go +++ b/plugins/inputs/elasticsearch/elasticsearch_test.go @@ -327,17 +327,26 @@ func TestGatherClusterIndiceShardsStats(t *testing.T) { clusterIndicesExpected, map[string]string{"index_name": "twitter"}) - tags := map[string]string{ + primaryTags := map[string]string{ + "index_name": "twitter", + "node_id": "oqvR8I1dTpONvwRM30etww", + "shard_name": "0", + "type": "primary", + } + + acc.AssertContainsTaggedFields(t, "elasticsearch_indices_stats_shards", + clusterIndicesPrimaryShardsExpected, + primaryTags) + + replicaTags := map[string]string{ "index_name": "twitter", "node_id": "oqvR8I1dTpONvwRM30etww", "shard_name": "1", "type": "replica", } - acc.AssertContainsTaggedFields(t, "elasticsearch_indices_stats_shards", - clusterIndicesShardsExpected, - tags) - + clusterIndicesReplicaShardsExpected, + replicaTags) } func newElasticsearchWithClient() *Elasticsearch { diff --git a/plugins/inputs/elasticsearch/testdata_test.go b/plugins/inputs/elasticsearch/testdata_test.go index 63c21a85c..a04fe1521 100644 --- a/plugins/inputs/elasticsearch/testdata_test.go +++ b/plugins/inputs/elasticsearch/testdata_test.go @@ -3650,7 +3650,105 @@ const clusterIndicesShardsResponse = ` } }` -var clusterIndicesShardsExpected = map[string]interface{}{ +var clusterIndicesPrimaryShardsExpected = map[string]interface{}{ + "commit_generation": float64(4), + "commit_num_docs": float64(340), + "completion_size_in_bytes": float64(0), + "docs_count": float64(340), + "docs_deleted": float64(0), + "fielddata_evictions": float64(0), + "fielddata_memory_size_in_bytes": float64(0), + "flush_periodic": float64(0), + "flush_total": float64(1), + "flush_total_time_in_millis": float64(32), + "get_current": float64(0), + "get_exists_time_in_millis": float64(0), + "get_exists_total": float64(0), + "get_missing_time_in_millis": float64(0), + "get_missing_total": float64(0), + "get_time_in_millis": float64(0), + "get_total": float64(0), + "indexing_delete_current": float64(0), + "indexing_delete_time_in_millis": float64(0), + "indexing_delete_total": float64(0), + "indexing_index_current": float64(0), + "indexing_index_failed": float64(0), + "indexing_index_time_in_millis": float64(176), + "indexing_index_total": float64(340), + "indexing_noop_update_total": float64(0), + "indexing_throttle_time_in_millis": float64(0), + "merges_current": float64(0), + "merges_current_docs": float64(0), + "merges_current_size_in_bytes": float64(0), + "merges_total": float64(0), + "merges_total_auto_throttle_in_bytes": float64(2.097152e+07), + "merges_total_docs": float64(0), + "merges_total_size_in_bytes": float64(0), + "merges_total_stopped_time_in_millis": float64(0), + "merges_total_throttled_time_in_millis": float64(0), + "merges_total_time_in_millis": float64(0), + "query_cache_cache_count": float64(0), + "query_cache_cache_size": float64(0), + "query_cache_evictions": float64(0), + "query_cache_hit_count": float64(0), + "query_cache_memory_size_in_bytes": float64(0), + "query_cache_miss_count": float64(0), + "query_cache_total_count": float64(0), + "recovery_current_as_source": float64(0), + "recovery_current_as_target": float64(0), + "recovery_throttle_time_in_millis": float64(0), + "refresh_external_total": float64(4), + "refresh_external_total_time_in_millis": float64(105), + "refresh_listeners": float64(0), + "refresh_total": float64(6), + "refresh_total_time_in_millis": float64(103), + "request_cache_evictions": float64(0), + "request_cache_hit_count": float64(0), + "request_cache_memory_size_in_bytes": float64(0), + "request_cache_miss_count": float64(0), + "retention_leases_primary_term": float64(1), + "retention_leases_version": float64(0), + "routing_state": int(3), + "search_fetch_current": float64(0), + "search_fetch_time_in_millis": float64(0), + "search_fetch_total": float64(0), + "search_open_contexts": float64(0), + "search_query_current": float64(0), + "search_query_time_in_millis": float64(0), + "search_query_total": float64(0), + "search_scroll_current": float64(0), + "search_scroll_time_in_millis": float64(0), + "search_scroll_total": float64(0), + "search_suggest_current": float64(0), + "search_suggest_time_in_millis": float64(0), + "search_suggest_total": float64(0), + "segments_count": float64(1), + "segments_doc_values_memory_in_bytes": float64(68), + "segments_fixed_bit_set_memory_in_bytes": float64(0), + "segments_index_writer_memory_in_bytes": float64(0), + "segments_max_unsafe_auto_id_timestamp": float64(-1), + "segments_memory_in_bytes": float64(4301), + "segments_norms_memory_in_bytes": float64(384), + "segments_points_memory_in_bytes": float64(3), + "segments_stored_fields_memory_in_bytes": float64(312), + "segments_term_vectors_memory_in_bytes": float64(0), + "segments_terms_memory_in_bytes": float64(3534), + "segments_version_map_memory_in_bytes": float64(0), + "seq_no_global_checkpoint": float64(339), + "seq_no_local_checkpoint": float64(339), + "seq_no_max_seq_no": float64(339), + "store_size_in_bytes": float64(90564), + "translog_earliest_last_modified_age": float64(936870), + "translog_operations": float64(340), + "translog_size_in_bytes": float64(77158), + "translog_uncommitted_operations": float64(0), + "translog_uncommitted_size_in_bytes": float64(55), + "warmer_current": float64(0), + "warmer_total": float64(3), + "warmer_total_time_in_millis": float64(0), +} + +var clusterIndicesReplicaShardsExpected = map[string]interface{}{ "commit_generation": float64(5), "commit_num_docs": float64(352), "completion_size_in_bytes": float64(0), From 795750a7a091b0011099c904fb0961f7d4d0c3dc Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 24 Apr 2020 13:39:56 -0700 Subject: [PATCH 1716/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8838ae0b4..a0b972f58 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -36,6 +36,7 @@ - [#7322](https://github.com/influxdata/telegraf/issues/7322): Use increased AWS Cloudwatch GetMetricData limit of 500 metrics per call. - [#7318](https://github.com/influxdata/telegraf/issues/7318): Fix dimension limit on azure_monitor output. - [#7407](https://github.com/influxdata/telegraf/pull/7407): Fix 64-bit integer to string conversion in snmp input. +- [#7327](https://github.com/influxdata/telegraf/issues/7327): Fix shard indices reporting in elasticsearch input. ## v1.14.1 [2020-04-14] From be1dc49ad9384e28a067a2c9134b9192100eca35 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 24 Apr 2020 16:40:08 -0700 Subject: [PATCH 1717/1815] Document distinction between file and tail inputs (#7353) --- plugins/inputs/file/README.md | 21 ++++++++++----------- plugins/inputs/file/file.go | 12 ++++-------- plugins/inputs/tail/tail.go | 2 +- 3 files changed, 15 insertions(+), 20 deletions(-) diff --git a/plugins/inputs/file/README.md b/plugins/inputs/file/README.md index 24139973b..ef0fb90b0 100644 --- a/plugins/inputs/file/README.md +++ b/plugins/inputs/file/README.md @@ -1,22 +1,18 @@ # File Input Plugin -The file plugin updates a list of files every interval and parses the contents -using the selected [input data format](/docs/DATA_FORMATS_INPUT.md). +The file plugin parses the **complete** contents of a file **every interval** using +the selected [input data format][]. -Files will always be read in their entirety, if you wish to tail/follow a file -use the [tail input plugin](/plugins/inputs/tail) instead. +**Note:** If you wish to parse only newly appended lines use the [tail][] input +plugin instead. ### Configuration: ```toml [[inputs.file]] - ## Files to parse each interval. - ## These accept standard unix glob matching rules, but with the addition of - ## ** as a "super asterisk". ie: - ## /var/log/**.log -> recursively find all .log files in /var/log - ## /var/log/*/*.log -> find all .log files with a parent dir in /var/log - ## /var/log/apache.log -> only read the apache log file - files = ["/var/log/apache/access.log"] + ## Files to parse each interval. Accept standard unix glob matching rules, + ## as well as ** to match recursive files and directories. + files = ["/tmp/metrics.out"] ## Data format to consume. ## Each data format has its own unique set of configuration options, read @@ -28,3 +24,6 @@ use the [tail input plugin](/plugins/inputs/tail) instead. ## to disable. # file_tag = "" ``` + +[input data format]: /docs/DATA_FORMATS_INPUT.md +[tail]: /plugins/inputs/tail diff --git a/plugins/inputs/file/file.go b/plugins/inputs/file/file.go index 860595283..fe2a840fa 100644 --- a/plugins/inputs/file/file.go +++ b/plugins/inputs/file/file.go @@ -20,13 +20,9 @@ type File struct { } const sampleConfig = ` - ## Files to parse each interval. - ## These accept standard unix glob matching rules, but with the addition of - ## ** as a "super asterisk". ie: - ## /var/log/**.log -> recursively find all .log files in /var/log - ## /var/log/*/*.log -> find all .log files with a parent dir in /var/log - ## /var/log/apache.log -> only read the apache log file - files = ["/var/log/apache/access.log"] + ## Files to parse each interval. Accept standard unix glob matching rules, + ## as well as ** to match recursive files and directories. + files = ["/tmp/metrics.out"] ## The dataformat to be read from files ## Each data format has its own unique set of configuration options, read @@ -45,7 +41,7 @@ func (f *File) SampleConfig() string { } func (f *File) Description() string { - return "Reload and gather from file[s] on telegraf's interval." + return "Parse a complete file each interval" } func (f *File) Gather(acc telegraf.Accumulator) error { diff --git a/plugins/inputs/tail/tail.go b/plugins/inputs/tail/tail.go index 9e7d6ecf1..02d35c95b 100644 --- a/plugins/inputs/tail/tail.go +++ b/plugins/inputs/tail/tail.go @@ -100,7 +100,7 @@ func (t *Tail) SampleConfig() string { } func (t *Tail) Description() string { - return "Stream a log file, like the tail -f command" + return "Parse the new lines appended to a file" } func (t *Tail) Init() error { From 7ee776d67a6a5bef7205f04bad90f6cdc79b9884 Mon Sep 17 00:00:00 2001 From: Pierrick Brossin Date: Mon, 27 Apr 2020 17:52:24 +0200 Subject: [PATCH 1718/1815] Fix interfaces with pointers (#7411) --- plugins/inputs/fibaro/fibaro.go | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/plugins/inputs/fibaro/fibaro.go b/plugins/inputs/fibaro/fibaro.go index 7def0ab09..62889cc8d 100644 --- a/plugins/inputs/fibaro/fibaro.go +++ b/plugins/inputs/fibaro/fibaro.go @@ -69,12 +69,12 @@ type Devices struct { Type string `json:"type"` Enabled bool `json:"enabled"` Properties struct { - BatteryLevel interface{} `json:"batteryLevel"` - Dead interface{} `json:"dead"` - Energy interface{} `json:"energy"` - Power interface{} `json:"power"` + BatteryLevel *string `json:"batteryLevel"` + Dead string `json:"dead"` + Energy *string `json:"energy"` + Power *string `json:"power"` Value interface{} `json:"value"` - Value2 interface{} `json:"value2"` + Value2 *string `json:"value2"` } `json:"properties"` } @@ -176,19 +176,19 @@ func (f *Fibaro) Gather(acc telegraf.Accumulator) error { fields := make(map[string]interface{}) if device.Properties.BatteryLevel != nil { - if fValue, err := strconv.ParseFloat(device.Properties.BatteryLevel.(string), 64); err == nil { + if fValue, err := strconv.ParseFloat(*device.Properties.BatteryLevel, 64); err == nil { fields["batteryLevel"] = fValue } } if device.Properties.Energy != nil { - if fValue, err := strconv.ParseFloat(device.Properties.Energy.(string), 64); err == nil { + if fValue, err := strconv.ParseFloat(*device.Properties.Energy, 64); err == nil { fields["energy"] = fValue } } if device.Properties.Power != nil { - if fValue, err := strconv.ParseFloat(device.Properties.Power.(string), 64); err == nil { + if fValue, err := strconv.ParseFloat(*device.Properties.Power, 64); err == nil { fields["power"] = fValue } } @@ -208,7 +208,7 @@ func (f *Fibaro) Gather(acc telegraf.Accumulator) error { } if device.Properties.Value2 != nil { - if fValue, err := strconv.ParseFloat(device.Properties.Value2.(string), 64); err == nil { + if fValue, err := strconv.ParseFloat(*device.Properties.Value2, 64); err == nil { fields["value2"] = fValue } } From c4e9f72936d6c6bd3b303aa8f1e2b614c8bdc599 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 28 Apr 2020 13:41:25 -0700 Subject: [PATCH 1719/1815] Ignore fields with NaN or Inf floats in the JSON serializer (#7426) --- plugins/serializers/json/json.go | 23 ++++++++++++-- plugins/serializers/json/json_test.go | 46 +++++++++++++++++++++++++-- 2 files changed, 64 insertions(+), 5 deletions(-) diff --git a/plugins/serializers/json/json.go b/plugins/serializers/json/json.go index bfb84f9a7..e2d7af330 100644 --- a/plugins/serializers/json/json.go +++ b/plugins/serializers/json/json.go @@ -2,6 +2,7 @@ package json import ( "encoding/json" + "math" "time" "github.com/influxdata/telegraf" @@ -49,8 +50,26 @@ func (s *serializer) SerializeBatch(metrics []telegraf.Metric) ([]byte, error) { func (s *serializer) createObject(metric telegraf.Metric) map[string]interface{} { m := make(map[string]interface{}, 4) - m["tags"] = metric.Tags() - m["fields"] = metric.Fields() + + tags := make(map[string]string, len(metric.TagList())) + for _, tag := range metric.TagList() { + tags[tag.Key] = tag.Value + } + m["tags"] = tags + + fields := make(map[string]interface{}, len(metric.FieldList())) + for _, field := range metric.FieldList() { + switch fv := field.Value.(type) { + case float64: + // JSON does not support these special values + if math.IsNaN(fv) || math.IsInf(fv, 0) { + continue + } + } + fields[field.Key] = field.Value + } + m["fields"] = fields + m["name"] = metric.Name() m["timestamp"] = metric.Time().UnixNano() / int64(s.TimestampUnits) return m diff --git a/plugins/serializers/json/json_test.go b/plugins/serializers/json/json_test.go index 82990b747..9ea304c88 100644 --- a/plugins/serializers/json/json_test.go +++ b/plugins/serializers/json/json_test.go @@ -2,14 +2,15 @@ package json import ( "fmt" + "math" "testing" "time" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func MustMetric(v telegraf.Metric, err error) telegraf.Metric { @@ -193,3 +194,42 @@ func TestSerializeBatch(t *testing.T) { require.NoError(t, err) require.Equal(t, []byte(`{"metrics":[{"fields":{"value":42},"name":"cpu","tags":{},"timestamp":0},{"fields":{"value":42},"name":"cpu","tags":{},"timestamp":0}]}`), buf) } + +func TestSerializeBatchSkipInf(t *testing.T) { + metrics := []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "inf": math.Inf(1), + "time_idle": 42, + }, + time.Unix(0, 0), + ), + } + + s, err := NewSerializer(0) + require.NoError(t, err) + buf, err := s.SerializeBatch(metrics) + require.NoError(t, err) + require.Equal(t, []byte(`{"metrics":[{"fields":{"time_idle":42},"name":"cpu","tags":{},"timestamp":0}]}`), buf) +} + +func TestSerializeBatchSkipInfAllFields(t *testing.T) { + metrics := []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "inf": math.Inf(1), + }, + time.Unix(0, 0), + ), + } + + s, err := NewSerializer(0) + require.NoError(t, err) + buf, err := s.SerializeBatch(metrics) + require.NoError(t, err) + require.Equal(t, []byte(`{"metrics":[{"fields":{},"name":"cpu","tags":{},"timestamp":0}]}`), buf) +} From b77dac9fdf7cefd114b719140e0055d309489f7a Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 28 Apr 2020 13:41:59 -0700 Subject: [PATCH 1720/1815] Run create database query once per database (#7333) --- plugins/outputs/influxdb/http.go | 30 +++-- plugins/outputs/influxdb/http_test.go | 176 ++++++++++++++++++++++++++ 2 files changed, 196 insertions(+), 10 deletions(-) diff --git a/plugins/outputs/influxdb/http.go b/plugins/outputs/influxdb/http.go index 6656a8ee6..2e3599788 100644 --- a/plugins/outputs/influxdb/http.go +++ b/plugins/outputs/influxdb/http.go @@ -107,9 +107,13 @@ type HTTPConfig struct { } type httpClient struct { - client *http.Client - config HTTPConfig - createdDatabases map[string]bool + client *http.Client + config HTTPConfig + // Tracks that the 'create database` statement was executed for the + // database. An attempt to create the database is made each time a new + // database is encountered in the database_tag and after a "database not + // found" error occurs. + createDatabaseExecuted map[string]bool log telegraf.Logger } @@ -177,9 +181,9 @@ func NewHTTPClient(config HTTPConfig) (*httpClient, error) { Timeout: config.Timeout, Transport: transport, }, - createdDatabases: make(map[string]bool), - config: config, - log: config.Log, + createDatabaseExecuted: make(map[string]bool), + config: config, + log: config.Log, } return client, nil } @@ -215,7 +219,6 @@ func (c *httpClient) CreateDatabase(ctx context.Context, database string) error if err != nil { if resp.StatusCode == 200 { - c.createdDatabases[database] = true return nil } @@ -225,12 +228,19 @@ func (c *httpClient) CreateDatabase(ctx context.Context, database string) error } } - // Even with a 200 response there can be an error + // Even with a 200 status code there can be an error in the response body. + // If there is also no error string then the operation was successful. if resp.StatusCode == http.StatusOK && queryResp.Error() == "" { - c.createdDatabases[database] = true + c.createDatabaseExecuted[database] = true return nil } + // Don't attempt to recreate the database after a 403 Forbidden error. + // This behavior exists only to maintain backwards compatiblity. + if resp.StatusCode == http.StatusForbidden { + c.createDatabaseExecuted[database] = true + } + return &APIError{ StatusCode: resp.StatusCode, Title: resp.Status, @@ -284,7 +294,7 @@ func (c *httpClient) Write(ctx context.Context, metrics []telegraf.Metric) error } for dbrp, batch := range batches { - if !c.config.SkipDatabaseCreation && !c.createdDatabases[dbrp.Database] { + if !c.config.SkipDatabaseCreation && !c.createDatabaseExecuted[dbrp.Database] { err := c.CreateDatabase(ctx, dbrp.Database) if err != nil { c.log.Warnf("When writing to [%s]: database %q creation failed: %v", diff --git a/plugins/outputs/influxdb/http_test.go b/plugins/outputs/influxdb/http_test.go index 4b323c327..1d030d36c 100644 --- a/plugins/outputs/influxdb/http_test.go +++ b/plugins/outputs/influxdb/http_test.go @@ -959,3 +959,179 @@ func TestDBRPTags(t *testing.T) { }) } } + +type MockHandlerChain struct { + handlers []http.HandlerFunc +} + +func (h *MockHandlerChain) ServeHTTP(w http.ResponseWriter, r *http.Request) { + if len(h.handlers) == 0 { + w.WriteHeader(http.StatusInternalServerError) + return + } + next, rest := h.handlers[0], h.handlers[1:] + h.handlers = rest + next(w, r) +} + +func (h *MockHandlerChain) Done() bool { + return len(h.handlers) == 0 +} + +func TestDBRPTagsCreateDatabaseNotCalledOnRetryAfterForbidden(t *testing.T) { + ts := httptest.NewServer(http.NotFoundHandler()) + defer ts.Close() + + u, err := url.Parse(fmt.Sprintf("http://%s", ts.Listener.Addr().String())) + require.NoError(t, err) + + handlers := &MockHandlerChain{ + handlers: []http.HandlerFunc{ + func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/query": + if r.FormValue("q") != `CREATE DATABASE "telegraf"` { + w.WriteHeader(http.StatusInternalServerError) + return + } + w.WriteHeader(http.StatusForbidden) + w.Write([]byte(`{"results": [{"error": "error authorizing query"}]}`)) + default: + w.WriteHeader(http.StatusInternalServerError) + } + }, + func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/write": + w.WriteHeader(http.StatusNoContent) + default: + w.WriteHeader(http.StatusInternalServerError) + } + }, + func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/write": + w.WriteHeader(http.StatusNoContent) + default: + w.WriteHeader(http.StatusInternalServerError) + } + }, + }, + } + ts.Config.Handler = handlers + + metrics := []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": 42.0, + }, + time.Unix(0, 0), + ), + } + + output := influxdb.InfluxDB{ + URL: u.String(), + Database: "telegraf", + DatabaseTag: "database", + Log: testutil.Logger{}, + CreateHTTPClientF: func(config *influxdb.HTTPConfig) (influxdb.Client, error) { + return influxdb.NewHTTPClient(*config) + }, + } + err = output.Connect() + require.NoError(t, err) + err = output.Write(metrics) + require.NoError(t, err) + err = output.Write(metrics) + require.NoError(t, err) + + require.True(t, handlers.Done(), "all handlers not called") +} + +func TestDBRPTagsCreateDatabaseCalledOnDatabaseNotFound(t *testing.T) { + ts := httptest.NewServer(http.NotFoundHandler()) + defer ts.Close() + + u, err := url.Parse(fmt.Sprintf("http://%s", ts.Listener.Addr().String())) + require.NoError(t, err) + + handlers := &MockHandlerChain{ + handlers: []http.HandlerFunc{ + func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/query": + if r.FormValue("q") != `CREATE DATABASE "telegraf"` { + w.WriteHeader(http.StatusInternalServerError) + return + } + w.WriteHeader(http.StatusForbidden) + w.Write([]byte(`{"results": [{"error": "error authorizing query"}]}`)) + default: + w.WriteHeader(http.StatusInternalServerError) + } + }, + func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/write": + w.WriteHeader(http.StatusNotFound) + w.Write([]byte(`{"error": "database not found: \"telegraf\""}`)) + default: + w.WriteHeader(http.StatusInternalServerError) + } + }, + func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/query": + if r.FormValue("q") != `CREATE DATABASE "telegraf"` { + w.WriteHeader(http.StatusInternalServerError) + return + } + w.WriteHeader(http.StatusForbidden) + default: + w.WriteHeader(http.StatusInternalServerError) + } + }, + func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/write": + w.WriteHeader(http.StatusNoContent) + default: + w.WriteHeader(http.StatusInternalServerError) + } + }, + }, + } + ts.Config.Handler = handlers + + metrics := []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": 42.0, + }, + time.Unix(0, 0), + ), + } + + output := influxdb.InfluxDB{ + URL: u.String(), + Database: "telegraf", + DatabaseTag: "database", + Log: testutil.Logger{}, + CreateHTTPClientF: func(config *influxdb.HTTPConfig) (influxdb.Client, error) { + return influxdb.NewHTTPClient(*config) + }, + } + + err = output.Connect() + require.NoError(t, err) + err = output.Write(metrics) + require.Error(t, err) + err = output.Write(metrics) + require.NoError(t, err) + + require.True(t, handlers.Done(), "all handlers not called") +} From 476a899a1a53f2e9742a9d0dcf50dfe36b0bbc88 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 28 Apr 2020 14:41:44 -0700 Subject: [PATCH 1721/1815] Fix typo in name of gc_cpu_fraction field (#7425) --- plugins/inputs/kapacitor/README.md | 6 +++--- plugins/inputs/kapacitor/kapacitor.go | 2 +- plugins/inputs/kapacitor/kapacitor_test.go | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/plugins/inputs/kapacitor/README.md b/plugins/inputs/kapacitor/README.md index 6284e6d77..2328e0904 100644 --- a/plugins/inputs/kapacitor/README.md +++ b/plugins/inputs/kapacitor/README.md @@ -49,7 +49,7 @@ The Kapacitor plugin collects metrics from the given Kapacitor instances. - [buck_hash_sys_bytes](#buck_hash_sys_bytes) _(integer)_ - [frees](#frees) _(integer)_ - [gc_sys_bytes](#gc_sys_bytes) _(integer)_ - - [gcc_pu_fraction](#gcc_pu_fraction) _(float)_ + - [gc_cpu_fraction](#gcc_pu_fraction) _(float)_ - [heap_alloc_bytes](#heap_alloc_bytes) _(integer)_ - [heap_idle_bytes](#heap_idle_bytes) _(integer)_ - [heap_in_use_bytes](#heap_in_use_bytes) _(integer)_ @@ -178,7 +178,7 @@ The number of heap objects freed. #### gc_sys_bytes The number of bytes of memory used for garbage collection system metadata. -#### gcc_pu_fraction +#### gc_cpu_fraction The fraction of Kapacitor's available CPU time used by garbage collection since Kapacitor started. @@ -306,7 +306,7 @@ these values. ``` $ telegraf --config /etc/telegraf.conf --input-filter kapacitor --test * Plugin: inputs.kapacitor, Collection 1 -> kapacitor_memstats,host=hostname.local,kap_version=1.1.0~rc2,url=http://localhost:9092/kapacitor/v1/debug/vars alloc_bytes=6974808i,buck_hash_sys_bytes=1452609i,frees=207281i,gc_sys_bytes=802816i,gcc_pu_fraction=0.00004693548939673313,heap_alloc_bytes=6974808i,heap_idle_bytes=6742016i,heap_in_use_bytes=9183232i,heap_objects=23216i,heap_released_bytes=0i,heap_sys_bytes=15925248i,last_gc_ns=1478791460012676997i,lookups=88i,mallocs=230497i,mcache_in_use_bytes=9600i,mcache_sys_bytes=16384i,mspan_in_use_bytes=98560i,mspan_sys_bytes=131072i,next_gc_ns=11467528i,num_gc=8i,other_sys_bytes=2236087i,pause_total_ns=2994110i,stack_in_use_bytes=1900544i,stack_sys_bytes=1900544i,sys_bytes=22464760i,total_alloc_bytes=35023600i 1478791462000000000 +> kapacitor_memstats,host=hostname.local,kap_version=1.1.0~rc2,url=http://localhost:9092/kapacitor/v1/debug/vars alloc_bytes=6974808i,buck_hash_sys_bytes=1452609i,frees=207281i,gc_sys_bytes=802816i,gc_cpu_fraction=0.00004693548939673313,heap_alloc_bytes=6974808i,heap_idle_bytes=6742016i,heap_in_use_bytes=9183232i,heap_objects=23216i,heap_released_bytes=0i,heap_sys_bytes=15925248i,last_gc_ns=1478791460012676997i,lookups=88i,mallocs=230497i,mcache_in_use_bytes=9600i,mcache_sys_bytes=16384i,mspan_in_use_bytes=98560i,mspan_sys_bytes=131072i,next_gc_ns=11467528i,num_gc=8i,other_sys_bytes=2236087i,pause_total_ns=2994110i,stack_in_use_bytes=1900544i,stack_sys_bytes=1900544i,sys_bytes=22464760i,total_alloc_bytes=35023600i 1478791462000000000 > kapacitor,host=hostname.local,kap_version=1.1.0~rc2,url=http://localhost:9092/kapacitor/v1/debug/vars num_enabled_tasks=5i,num_subscriptions=5i,num_tasks=5i 1478791462000000000 > kapacitor_edges,child=stream0,host=hostname.local,parent=stream,task=deadman-test,type=stream collected=0,emitted=0 1478791462000000000 > kapacitor_ingress,database=_internal,host=hostname.local,measurement=shard,retention_policy=monitor,task_master=main points_received=120 1478791462000000000 diff --git a/plugins/inputs/kapacitor/kapacitor.go b/plugins/inputs/kapacitor/kapacitor.go index f20b98774..71febf307 100644 --- a/plugins/inputs/kapacitor/kapacitor.go +++ b/plugins/inputs/kapacitor/kapacitor.go @@ -171,7 +171,7 @@ func (k *Kapacitor) gatherURL( "alloc_bytes": s.MemStats.Alloc, "buck_hash_sys_bytes": s.MemStats.BuckHashSys, "frees": s.MemStats.Frees, - "gcc_pu_fraction": s.MemStats.GCCPUFraction, + "gc_cpu_fraction": s.MemStats.GCCPUFraction, "gc_sys_bytes": s.MemStats.GCSys, "heap_alloc_bytes": s.MemStats.HeapAlloc, "heap_idle_bytes": s.MemStats.HeapIdle, diff --git a/plugins/inputs/kapacitor/kapacitor_test.go b/plugins/inputs/kapacitor/kapacitor_test.go index b32aeec24..cae1f9ce3 100644 --- a/plugins/inputs/kapacitor/kapacitor_test.go +++ b/plugins/inputs/kapacitor/kapacitor_test.go @@ -33,7 +33,7 @@ func TestKapacitor(t *testing.T) { "alloc_bytes": int64(6950624), "buck_hash_sys_bytes": int64(1446737), "frees": int64(129656), - "gcc_pu_fraction": float64(0.006757149597237818), + "gc_cpu_fraction": float64(0.006757149597237818), "gc_sys_bytes": int64(575488), "heap_alloc_bytes": int64(6950624), "heap_idle_bytes": int64(499712), From cf3d48bb68197e21919a4439024b3710ed10463b Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 28 Apr 2020 14:42:37 -0700 Subject: [PATCH 1722/1815] Allow CR and FF inside of string fields and fix parser panic (#7427) --- plugins/parsers/influx/machine.go | 33482 +++++++++++------------ plugins/parsers/influx/machine.go.rl | 17 +- plugins/parsers/influx/machine_test.go | 21 + plugins/parsers/influx/parser.go | 8 +- plugins/parsers/influx/parser_test.go | 26 + 5 files changed, 16653 insertions(+), 16901 deletions(-) diff --git a/plugins/parsers/influx/machine.go b/plugins/parsers/influx/machine.go index 2a738f669..59bd232dd 100644 --- a/plugins/parsers/influx/machine.go +++ b/plugins/parsers/influx/machine.go @@ -7,6 +7,14 @@ import ( "io" ) +type readErr struct { + Err error +} + +func (e *readErr) Error() string { + return e.Err.Error() +} + var ( ErrNameParse = errors.New("expected measurement name") ErrFieldParse = errors.New("expected field") @@ -17,22 +25,22 @@ var ( ) -//line plugins/parsers/influx/machine.go.rl:310 +//line plugins/parsers/influx/machine.go.rl:318 -//line plugins/parsers/influx/machine.go:25 -const LineProtocol_start int = 270 -const LineProtocol_first_final int = 270 +//line plugins/parsers/influx/machine.go:33 +const LineProtocol_start int = 269 +const LineProtocol_first_final int = 269 const LineProtocol_error int = 0 -const LineProtocol_en_main int = 270 -const LineProtocol_en_discard_line int = 258 -const LineProtocol_en_align int = 740 -const LineProtocol_en_series int = 261 +const LineProtocol_en_main int = 269 +const LineProtocol_en_discard_line int = 257 +const LineProtocol_en_align int = 739 +const LineProtocol_en_series int = 260 -//line plugins/parsers/influx/machine.go.rl:313 +//line plugins/parsers/influx/machine.go.rl:321 type Handler interface { SetMeasurement(name []byte) error @@ -66,24 +74,24 @@ func NewMachine(handler Handler) *machine { } -//line plugins/parsers/influx/machine.go.rl:346 +//line plugins/parsers/influx/machine.go.rl:354 -//line plugins/parsers/influx/machine.go.rl:347 +//line plugins/parsers/influx/machine.go.rl:355 -//line plugins/parsers/influx/machine.go.rl:348 +//line plugins/parsers/influx/machine.go.rl:356 -//line plugins/parsers/influx/machine.go.rl:349 +//line plugins/parsers/influx/machine.go.rl:357 -//line plugins/parsers/influx/machine.go.rl:350 +//line plugins/parsers/influx/machine.go.rl:358 -//line plugins/parsers/influx/machine.go.rl:351 +//line plugins/parsers/influx/machine.go.rl:359 -//line plugins/parsers/influx/machine.go:82 +//line plugins/parsers/influx/machine.go:90 { ( m.cs) = LineProtocol_start } -//line plugins/parsers/influx/machine.go.rl:352 +//line plugins/parsers/influx/machine.go.rl:360 return m } @@ -95,22 +103,22 @@ func NewSeriesMachine(handler Handler) *machine { } -//line plugins/parsers/influx/machine.go.rl:363 +//line plugins/parsers/influx/machine.go.rl:371 -//line plugins/parsers/influx/machine.go.rl:364 +//line plugins/parsers/influx/machine.go.rl:372 -//line plugins/parsers/influx/machine.go.rl:365 +//line plugins/parsers/influx/machine.go.rl:373 -//line plugins/parsers/influx/machine.go.rl:366 +//line plugins/parsers/influx/machine.go.rl:374 -//line plugins/parsers/influx/machine.go.rl:367 +//line plugins/parsers/influx/machine.go.rl:375 -//line plugins/parsers/influx/machine.go:109 +//line plugins/parsers/influx/machine.go:117 { ( m.cs) = LineProtocol_start } -//line plugins/parsers/influx/machine.go.rl:368 +//line plugins/parsers/influx/machine.go.rl:376 return m } @@ -128,12 +136,12 @@ func (m *machine) SetData(data []byte) { m.finishMetric = false -//line plugins/parsers/influx/machine.go:132 +//line plugins/parsers/influx/machine.go:140 { ( m.cs) = LineProtocol_start } -//line plugins/parsers/influx/machine.go.rl:385 +//line plugins/parsers/influx/machine.go.rl:393 m.cs = m.initState } @@ -156,7 +164,7 @@ func (m *machine) Next() error { func (m *machine) exec() error { var err error -//line plugins/parsers/influx/machine.go:160 +//line plugins/parsers/influx/machine.go:168 { if ( m.p) == ( m.pe) { goto _test_eof @@ -165,8 +173,8 @@ func (m *machine) exec() error { _again: switch ( m.cs) { - case 270: - goto st270 + case 269: + goto st269 case 1: goto st1 case 2: @@ -181,14 +189,14 @@ _again: goto st5 case 6: goto st6 - case 7: - goto st7 + case 270: + goto st270 case 271: goto st271 case 272: goto st272 - case 273: - goto st273 + case 7: + goto st7 case 8: goto st8 case 9: @@ -237,24 +245,24 @@ _again: goto st30 case 31: goto st31 - case 32: - goto st32 + case 273: + goto st273 case 274: goto st274 - case 275: - goto st275 + case 32: + goto st32 case 33: goto st33 - case 34: - goto st34 + case 275: + goto st275 case 276: goto st276 case 277: goto st277 + case 34: + goto st34 case 278: goto st278 - case 35: - goto st35 case 279: goto st279 case 280: @@ -289,18 +297,18 @@ _again: goto st294 case 295: goto st295 - case 296: - goto st296 + case 35: + goto st35 case 36: goto st36 - case 37: - goto st37 + case 296: + goto st296 case 297: goto st297 case 298: goto st298 - case 299: - goto st299 + case 37: + goto st37 case 38: goto st38 case 39: @@ -309,18 +317,18 @@ _again: goto st40 case 41: goto st41 - case 42: - goto st42 + case 299: + goto st299 case 300: goto st300 case 301: goto st301 case 302: goto st302 + case 42: + goto st42 case 303: goto st303 - case 43: - goto st43 case 304: goto st304 case 305: @@ -363,8 +371,8 @@ _again: goto st323 case 324: goto st324 - case 325: - goto st325 + case 43: + goto st43 case 44: goto st44 case 45: @@ -383,14 +391,14 @@ _again: goto st51 case 52: goto st52 - case 53: - goto st53 + case 325: + goto st325 case 326: goto st326 case 327: goto st327 - case 328: - goto st328 + case 53: + goto st53 case 54: goto st54 case 55: @@ -401,14 +409,14 @@ _again: goto st57 case 58: goto st58 - case 59: - goto st59 + case 328: + goto st328 case 329: goto st329 + case 59: + goto st59 case 330: goto st330 - case 60: - goto st60 case 331: goto st331 case 332: @@ -447,18 +455,18 @@ _again: goto st348 case 349: goto st349 + case 60: + goto st60 case 350: goto st350 - case 61: - goto st61 case 351: goto st351 case 352: goto st352 + case 61: + goto st61 case 353: goto st353 - case 62: - goto st62 case 354: goto st354 case 355: @@ -497,8 +505,8 @@ _again: goto st371 case 372: goto st372 - case 373: - goto st373 + case 62: + goto st62 case 63: goto st63 case 64: @@ -507,10 +515,10 @@ _again: goto st65 case 66: goto st66 + case 373: + goto st373 case 67: goto st67 - case 374: - goto st374 case 68: goto st68 case 69: @@ -519,18 +527,20 @@ _again: goto st70 case 71: goto st71 - case 72: - goto st72 + case 374: + goto st374 case 375: goto st375 case 376: goto st376 - case 377: - goto st377 + case 72: + goto st72 case 73: goto st73 case 74: goto st74 + case 377: + goto st377 case 378: goto st378 case 379: @@ -539,8 +549,6 @@ _again: goto st75 case 380: goto st380 - case 76: - goto st76 case 381: goto st381 case 382: @@ -579,8 +587,8 @@ _again: goto st398 case 399: goto st399 - case 400: - goto st400 + case 76: + goto st76 case 77: goto st77 case 78: @@ -607,52 +615,52 @@ _again: goto st88 case 89: goto st89 - case 90: - goto st90 + case 400: + goto st400 case 401: goto st401 case 402: goto st402 case 403: goto st403 - case 404: - goto st404 + case 90: + goto st90 case 91: goto st91 case 92: goto st92 case 93: goto st93 - case 94: - goto st94 + case 404: + goto st404 case 405: goto st405 - case 406: - goto st406 + case 94: + goto st94 case 95: goto st95 + case 406: + goto st406 case 96: goto st96 - case 407: - goto st407 case 97: goto st97 - case 98: - goto st98 + case 407: + goto st407 case 408: goto st408 + case 98: + goto st98 case 409: goto st409 - case 99: - goto st99 case 410: goto st410 - case 411: - goto st411 + case 99: + goto st99 case 100: goto st100 - case 101: - goto st101 + case 411: + goto st411 case 412: goto st412 case 413: @@ -687,28 +695,28 @@ _again: goto st427 case 428: goto st428 + case 101: + goto st101 case 429: goto st429 - case 102: - goto st102 case 430: goto st430 case 431: goto st431 - case 432: - goto st432 + case 102: + goto st102 case 103: goto st103 - case 104: - goto st104 + case 432: + goto st432 case 433: goto st433 case 434: goto st434 + case 104: + goto st104 case 435: goto st435 - case 105: - goto st105 case 436: goto st436 case 437: @@ -747,10 +755,10 @@ _again: goto st453 case 454: goto st454 + case 105: + goto st105 case 455: goto st455 - case 106: - goto st106 case 456: goto st456 case 457: @@ -793,8 +801,8 @@ _again: goto st475 case 476: goto st476 - case 477: - goto st477 + case 106: + goto st106 case 107: goto st107 case 108: @@ -803,18 +811,18 @@ _again: goto st109 case 110: goto st110 + case 477: + goto st477 case 111: goto st111 case 478: goto st478 - case 112: - goto st112 case 479: goto st479 + case 112: + goto st112 case 480: goto st480 - case 113: - goto st113 case 481: goto st481 case 482: @@ -831,48 +839,48 @@ _again: goto st487 case 488: goto st488 - case 489: - goto st489 + case 113: + goto st113 case 114: goto st114 case 115: goto st115 + case 489: + goto st489 case 116: goto st116 - case 490: - goto st490 case 117: goto st117 case 118: goto st118 + case 490: + goto st490 case 119: goto st119 - case 491: - goto st491 case 120: goto st120 - case 121: - goto st121 + case 491: + goto st491 case 492: goto st492 - case 493: - goto st493 + case 121: + goto st121 case 122: goto st122 case 123: goto st123 case 124: goto st124 - case 125: - goto st125 + case 493: + goto st493 case 494: goto st494 case 495: goto st495 + case 125: + goto st125 case 496: goto st496 - case 126: - goto st126 case 497: goto st497 case 498: @@ -911,12 +919,12 @@ _again: goto st514 case 515: goto st515 - case 516: - goto st516 + case 126: + goto st126 case 127: goto st127 - case 128: - goto st128 + case 516: + goto st516 case 517: goto st517 case 518: @@ -933,48 +941,48 @@ _again: goto st523 case 524: goto st524 - case 525: - goto st525 + case 128: + goto st128 case 129: goto st129 case 130: goto st130 + case 525: + goto st525 case 131: goto st131 - case 526: - goto st526 case 132: goto st132 case 133: goto st133 + case 526: + goto st526 case 134: goto st134 - case 527: - goto st527 case 135: goto st135 - case 136: - goto st136 + case 527: + goto st527 case 528: goto st528 - case 529: - goto st529 + case 136: + goto st136 case 137: goto st137 case 138: goto st138 - case 139: - goto st139 + case 529: + goto st529 case 530: goto st530 + case 139: + goto st139 case 531: goto st531 case 140: goto st140 case 532: goto st532 - case 141: - goto st141 case 533: goto st533 case 534: @@ -989,28 +997,28 @@ _again: goto st538 case 539: goto st539 - case 540: - goto st540 + case 141: + goto st141 case 142: goto st142 case 143: goto st143 + case 540: + goto st540 case 144: goto st144 - case 541: - goto st541 case 145: goto st145 case 146: goto st146 + case 541: + goto st541 case 147: goto st147 - case 542: - goto st542 case 148: goto st148 - case 149: - goto st149 + case 542: + goto st542 case 543: goto st543 case 544: @@ -1049,26 +1057,26 @@ _again: goto st560 case 561: goto st561 - case 562: - goto st562 + case 149: + goto st149 case 150: goto st150 - case 151: - goto st151 + case 562: + goto st562 case 563: goto st563 case 564: goto st564 + case 151: + goto st151 case 565: goto st565 - case 152: - goto st152 case 566: goto st566 + case 152: + goto st152 case 567: goto st567 - case 153: - goto st153 case 568: goto st568 case 569: @@ -1103,16 +1111,16 @@ _again: goto st583 case 584: goto st584 - case 585: - goto st585 + case 153: + goto st153 case 154: goto st154 + case 585: + goto st585 case 155: goto st155 case 586: goto st586 - case 156: - goto st156 case 587: goto st587 case 588: @@ -1127,32 +1135,32 @@ _again: goto st592 case 593: goto st593 - case 594: - goto st594 + case 156: + goto st156 case 157: goto st157 case 158: goto st158 + case 594: + goto st594 case 159: goto st159 - case 595: - goto st595 case 160: goto st160 case 161: goto st161 + case 595: + goto st595 case 162: goto st162 - case 596: - goto st596 case 163: goto st163 - case 164: - goto st164 + case 596: + goto st596 case 597: goto st597 - case 598: - goto st598 + case 164: + goto st164 case 165: goto st165 case 166: @@ -1163,8 +1171,8 @@ _again: goto st168 case 169: goto st169 - case 170: - goto st170 + case 598: + goto st598 case 599: goto st599 case 600: @@ -1201,28 +1209,28 @@ _again: goto st615 case 616: goto st616 - case 617: - goto st617 + case 170: + goto st170 case 171: goto st171 case 172: goto st172 - case 173: - goto st173 + case 617: + goto st617 case 618: goto st618 case 619: goto st619 + case 173: + goto st173 case 620: goto st620 - case 174: - goto st174 case 621: goto st621 + case 174: + goto st174 case 622: goto st622 - case 175: - goto st175 case 623: goto st623 case 624: @@ -1231,76 +1239,76 @@ _again: goto st625 case 626: goto st626 - case 627: - goto st627 + case 175: + goto st175 case 176: goto st176 case 177: goto st177 + case 627: + goto st627 case 178: goto st178 - case 628: - goto st628 case 179: goto st179 case 180: goto st180 + case 628: + goto st628 case 181: goto st181 - case 629: - goto st629 case 182: goto st182 - case 183: - goto st183 + case 629: + goto st629 case 630: goto st630 + case 183: + goto st183 case 631: goto st631 - case 184: - goto st184 case 632: goto st632 case 633: goto st633 - case 634: - goto st634 + case 184: + goto st184 case 185: goto st185 case 186: goto st186 + case 634: + goto st634 case 187: goto st187 - case 635: - goto st635 case 188: goto st188 case 189: goto st189 + case 635: + goto st635 case 190: goto st190 - case 636: - goto st636 case 191: goto st191 - case 192: - goto st192 + case 636: + goto st636 case 637: goto st637 - case 638: - goto st638 + case 192: + goto st192 case 193: goto st193 case 194: goto st194 + case 638: + goto st638 case 195: goto st195 - case 639: - goto st639 case 196: goto st196 - case 197: - goto st197 + case 639: + goto st639 case 640: goto st640 case 641: @@ -1315,38 +1323,38 @@ _again: goto st645 case 646: goto st646 - case 647: - goto st647 + case 197: + goto st197 case 198: goto st198 case 199: goto st199 + case 647: + goto st647 case 200: goto st200 - case 648: - goto st648 case 201: goto st201 case 202: goto st202 + case 648: + goto st648 case 203: goto st203 - case 649: - goto st649 case 204: goto st204 - case 205: - goto st205 + case 649: + goto st649 case 650: goto st650 - case 651: - goto st651 + case 205: + goto st205 case 206: goto st206 case 207: goto st207 - case 208: - goto st208 + case 651: + goto st651 case 652: goto st652 case 653: @@ -1383,8 +1391,8 @@ _again: goto st668 case 669: goto st669 - case 670: - goto st670 + case 208: + goto st208 case 209: goto st209 case 210: @@ -1393,14 +1401,14 @@ _again: goto st211 case 212: goto st212 + case 670: + goto st670 case 213: goto st213 - case 671: - goto st671 case 214: goto st214 - case 215: - goto st215 + case 671: + goto st671 case 672: goto st672 case 673: @@ -1417,44 +1425,44 @@ _again: goto st678 case 679: goto st679 - case 680: - goto st680 + case 215: + goto st215 case 216: goto st216 case 217: goto st217 + case 680: + goto st680 case 218: goto st218 - case 681: - goto st681 case 219: goto st219 case 220: goto st220 + case 681: + goto st681 case 221: goto st221 - case 682: - goto st682 case 222: goto st222 - case 223: - goto st223 + case 682: + goto st682 case 683: goto st683 - case 684: - goto st684 + case 223: + goto st223 case 224: goto st224 case 225: goto st225 + case 684: + goto st684 case 226: goto st226 - case 685: - goto st685 case 227: goto st227 - case 228: - goto st228 + case 685: + goto st685 case 686: goto st686 case 687: @@ -1469,20 +1477,20 @@ _again: goto st691 case 692: goto st692 - case 693: - goto st693 + case 228: + goto st228 case 229: goto st229 case 230: goto st230 + case 693: + goto st693 case 231: goto st231 - case 694: - goto st694 case 232: goto st232 - case 233: - goto st233 + case 694: + goto st694 case 695: goto st695 case 696: @@ -1497,38 +1505,38 @@ _again: goto st700 case 701: goto st701 - case 702: - goto st702 + case 233: + goto st233 case 234: goto st234 case 235: goto st235 + case 702: + goto st702 case 236: goto st236 - case 703: - goto st703 case 237: goto st237 case 238: goto st238 + case 703: + goto st703 case 239: goto st239 - case 704: - goto st704 case 240: goto st240 - case 241: - goto st241 + case 704: + goto st704 case 705: goto st705 - case 706: - goto st706 + case 241: + goto st241 case 242: goto st242 case 243: goto st243 - case 244: - goto st244 + case 706: + goto st706 case 707: goto st707 case 708: @@ -1565,18 +1573,18 @@ _again: goto st723 case 724: goto st724 - case 725: - goto st725 + case 244: + goto st244 case 245: goto st245 + case 725: + goto st725 case 246: goto st246 - case 726: - goto st726 case 247: goto st247 - case 248: - goto st248 + case 726: + goto st726 case 727: goto st727 case 728: @@ -1591,70 +1599,68 @@ _again: goto st732 case 733: goto st733 - case 734: - goto st734 + case 248: + goto st248 case 249: goto st249 case 250: goto st250 + case 734: + goto st734 case 251: goto st251 - case 735: - goto st735 case 252: goto st252 case 253: goto st253 + case 735: + goto st735 case 254: goto st254 - case 736: - goto st736 case 255: goto st255 - case 256: - goto st256 + case 736: + goto st736 case 737: goto st737 - case 738: - goto st738 + case 256: + goto st256 case 257: goto st257 - case 258: - goto st258 - case 739: - goto st739 - case 261: - goto st261 + case 738: + goto st738 + case 260: + goto st260 + case 740: + goto st740 case 741: goto st741 - case 742: - goto st742 + case 261: + goto st261 case 262: goto st262 case 263: goto st263 case 264: goto st264 + case 742: + goto st742 case 265: goto st265 case 743: goto st743 case 266: goto st266 - case 744: - goto st744 case 267: goto st267 case 268: goto st268 - case 269: - goto st269 - case 740: - goto st740 + case 739: + goto st739 + case 258: + goto st258 case 259: goto st259 - case 260: - goto st260 } if ( m.p)++; ( m.p) == ( m.pe) { @@ -1662,8 +1668,8 @@ _again: } _resume: switch ( m.cs) { - case 270: - goto st_case_270 + case 269: + goto st_case_269 case 1: goto st_case_1 case 2: @@ -1678,14 +1684,14 @@ _resume: goto st_case_5 case 6: goto st_case_6 - case 7: - goto st_case_7 + case 270: + goto st_case_270 case 271: goto st_case_271 case 272: goto st_case_272 - case 273: - goto st_case_273 + case 7: + goto st_case_7 case 8: goto st_case_8 case 9: @@ -1734,24 +1740,24 @@ _resume: goto st_case_30 case 31: goto st_case_31 - case 32: - goto st_case_32 + case 273: + goto st_case_273 case 274: goto st_case_274 - case 275: - goto st_case_275 + case 32: + goto st_case_32 case 33: goto st_case_33 - case 34: - goto st_case_34 + case 275: + goto st_case_275 case 276: goto st_case_276 case 277: goto st_case_277 + case 34: + goto st_case_34 case 278: goto st_case_278 - case 35: - goto st_case_35 case 279: goto st_case_279 case 280: @@ -1786,18 +1792,18 @@ _resume: goto st_case_294 case 295: goto st_case_295 - case 296: - goto st_case_296 + case 35: + goto st_case_35 case 36: goto st_case_36 - case 37: - goto st_case_37 + case 296: + goto st_case_296 case 297: goto st_case_297 case 298: goto st_case_298 - case 299: - goto st_case_299 + case 37: + goto st_case_37 case 38: goto st_case_38 case 39: @@ -1806,18 +1812,18 @@ _resume: goto st_case_40 case 41: goto st_case_41 - case 42: - goto st_case_42 + case 299: + goto st_case_299 case 300: goto st_case_300 case 301: goto st_case_301 case 302: goto st_case_302 + case 42: + goto st_case_42 case 303: goto st_case_303 - case 43: - goto st_case_43 case 304: goto st_case_304 case 305: @@ -1860,8 +1866,8 @@ _resume: goto st_case_323 case 324: goto st_case_324 - case 325: - goto st_case_325 + case 43: + goto st_case_43 case 44: goto st_case_44 case 45: @@ -1880,14 +1886,14 @@ _resume: goto st_case_51 case 52: goto st_case_52 - case 53: - goto st_case_53 + case 325: + goto st_case_325 case 326: goto st_case_326 case 327: goto st_case_327 - case 328: - goto st_case_328 + case 53: + goto st_case_53 case 54: goto st_case_54 case 55: @@ -1898,14 +1904,14 @@ _resume: goto st_case_57 case 58: goto st_case_58 - case 59: - goto st_case_59 + case 328: + goto st_case_328 case 329: goto st_case_329 + case 59: + goto st_case_59 case 330: goto st_case_330 - case 60: - goto st_case_60 case 331: goto st_case_331 case 332: @@ -1944,18 +1950,18 @@ _resume: goto st_case_348 case 349: goto st_case_349 + case 60: + goto st_case_60 case 350: goto st_case_350 - case 61: - goto st_case_61 case 351: goto st_case_351 case 352: goto st_case_352 + case 61: + goto st_case_61 case 353: goto st_case_353 - case 62: - goto st_case_62 case 354: goto st_case_354 case 355: @@ -1994,8 +2000,8 @@ _resume: goto st_case_371 case 372: goto st_case_372 - case 373: - goto st_case_373 + case 62: + goto st_case_62 case 63: goto st_case_63 case 64: @@ -2004,10 +2010,10 @@ _resume: goto st_case_65 case 66: goto st_case_66 + case 373: + goto st_case_373 case 67: goto st_case_67 - case 374: - goto st_case_374 case 68: goto st_case_68 case 69: @@ -2016,18 +2022,20 @@ _resume: goto st_case_70 case 71: goto st_case_71 - case 72: - goto st_case_72 + case 374: + goto st_case_374 case 375: goto st_case_375 case 376: goto st_case_376 - case 377: - goto st_case_377 + case 72: + goto st_case_72 case 73: goto st_case_73 case 74: goto st_case_74 + case 377: + goto st_case_377 case 378: goto st_case_378 case 379: @@ -2036,8 +2044,6 @@ _resume: goto st_case_75 case 380: goto st_case_380 - case 76: - goto st_case_76 case 381: goto st_case_381 case 382: @@ -2076,8 +2082,8 @@ _resume: goto st_case_398 case 399: goto st_case_399 - case 400: - goto st_case_400 + case 76: + goto st_case_76 case 77: goto st_case_77 case 78: @@ -2104,52 +2110,52 @@ _resume: goto st_case_88 case 89: goto st_case_89 - case 90: - goto st_case_90 + case 400: + goto st_case_400 case 401: goto st_case_401 case 402: goto st_case_402 case 403: goto st_case_403 - case 404: - goto st_case_404 + case 90: + goto st_case_90 case 91: goto st_case_91 case 92: goto st_case_92 case 93: goto st_case_93 - case 94: - goto st_case_94 + case 404: + goto st_case_404 case 405: goto st_case_405 - case 406: - goto st_case_406 + case 94: + goto st_case_94 case 95: goto st_case_95 + case 406: + goto st_case_406 case 96: goto st_case_96 - case 407: - goto st_case_407 case 97: goto st_case_97 - case 98: - goto st_case_98 + case 407: + goto st_case_407 case 408: goto st_case_408 + case 98: + goto st_case_98 case 409: goto st_case_409 - case 99: - goto st_case_99 case 410: goto st_case_410 - case 411: - goto st_case_411 + case 99: + goto st_case_99 case 100: goto st_case_100 - case 101: - goto st_case_101 + case 411: + goto st_case_411 case 412: goto st_case_412 case 413: @@ -2184,28 +2190,28 @@ _resume: goto st_case_427 case 428: goto st_case_428 + case 101: + goto st_case_101 case 429: goto st_case_429 - case 102: - goto st_case_102 case 430: goto st_case_430 case 431: goto st_case_431 - case 432: - goto st_case_432 + case 102: + goto st_case_102 case 103: goto st_case_103 - case 104: - goto st_case_104 + case 432: + goto st_case_432 case 433: goto st_case_433 case 434: goto st_case_434 + case 104: + goto st_case_104 case 435: goto st_case_435 - case 105: - goto st_case_105 case 436: goto st_case_436 case 437: @@ -2244,10 +2250,10 @@ _resume: goto st_case_453 case 454: goto st_case_454 + case 105: + goto st_case_105 case 455: goto st_case_455 - case 106: - goto st_case_106 case 456: goto st_case_456 case 457: @@ -2290,8 +2296,8 @@ _resume: goto st_case_475 case 476: goto st_case_476 - case 477: - goto st_case_477 + case 106: + goto st_case_106 case 107: goto st_case_107 case 108: @@ -2300,18 +2306,18 @@ _resume: goto st_case_109 case 110: goto st_case_110 + case 477: + goto st_case_477 case 111: goto st_case_111 case 478: goto st_case_478 - case 112: - goto st_case_112 case 479: goto st_case_479 + case 112: + goto st_case_112 case 480: goto st_case_480 - case 113: - goto st_case_113 case 481: goto st_case_481 case 482: @@ -2328,48 +2334,48 @@ _resume: goto st_case_487 case 488: goto st_case_488 - case 489: - goto st_case_489 + case 113: + goto st_case_113 case 114: goto st_case_114 case 115: goto st_case_115 + case 489: + goto st_case_489 case 116: goto st_case_116 - case 490: - goto st_case_490 case 117: goto st_case_117 case 118: goto st_case_118 + case 490: + goto st_case_490 case 119: goto st_case_119 - case 491: - goto st_case_491 case 120: goto st_case_120 - case 121: - goto st_case_121 + case 491: + goto st_case_491 case 492: goto st_case_492 - case 493: - goto st_case_493 + case 121: + goto st_case_121 case 122: goto st_case_122 case 123: goto st_case_123 case 124: goto st_case_124 - case 125: - goto st_case_125 + case 493: + goto st_case_493 case 494: goto st_case_494 case 495: goto st_case_495 + case 125: + goto st_case_125 case 496: goto st_case_496 - case 126: - goto st_case_126 case 497: goto st_case_497 case 498: @@ -2408,12 +2414,12 @@ _resume: goto st_case_514 case 515: goto st_case_515 - case 516: - goto st_case_516 + case 126: + goto st_case_126 case 127: goto st_case_127 - case 128: - goto st_case_128 + case 516: + goto st_case_516 case 517: goto st_case_517 case 518: @@ -2430,48 +2436,48 @@ _resume: goto st_case_523 case 524: goto st_case_524 - case 525: - goto st_case_525 + case 128: + goto st_case_128 case 129: goto st_case_129 case 130: goto st_case_130 + case 525: + goto st_case_525 case 131: goto st_case_131 - case 526: - goto st_case_526 case 132: goto st_case_132 case 133: goto st_case_133 + case 526: + goto st_case_526 case 134: goto st_case_134 - case 527: - goto st_case_527 case 135: goto st_case_135 - case 136: - goto st_case_136 + case 527: + goto st_case_527 case 528: goto st_case_528 - case 529: - goto st_case_529 + case 136: + goto st_case_136 case 137: goto st_case_137 case 138: goto st_case_138 - case 139: - goto st_case_139 + case 529: + goto st_case_529 case 530: goto st_case_530 + case 139: + goto st_case_139 case 531: goto st_case_531 case 140: goto st_case_140 case 532: goto st_case_532 - case 141: - goto st_case_141 case 533: goto st_case_533 case 534: @@ -2486,28 +2492,28 @@ _resume: goto st_case_538 case 539: goto st_case_539 - case 540: - goto st_case_540 + case 141: + goto st_case_141 case 142: goto st_case_142 case 143: goto st_case_143 + case 540: + goto st_case_540 case 144: goto st_case_144 - case 541: - goto st_case_541 case 145: goto st_case_145 case 146: goto st_case_146 + case 541: + goto st_case_541 case 147: goto st_case_147 - case 542: - goto st_case_542 case 148: goto st_case_148 - case 149: - goto st_case_149 + case 542: + goto st_case_542 case 543: goto st_case_543 case 544: @@ -2546,26 +2552,26 @@ _resume: goto st_case_560 case 561: goto st_case_561 - case 562: - goto st_case_562 + case 149: + goto st_case_149 case 150: goto st_case_150 - case 151: - goto st_case_151 + case 562: + goto st_case_562 case 563: goto st_case_563 case 564: goto st_case_564 + case 151: + goto st_case_151 case 565: goto st_case_565 - case 152: - goto st_case_152 case 566: goto st_case_566 + case 152: + goto st_case_152 case 567: goto st_case_567 - case 153: - goto st_case_153 case 568: goto st_case_568 case 569: @@ -2600,16 +2606,16 @@ _resume: goto st_case_583 case 584: goto st_case_584 - case 585: - goto st_case_585 + case 153: + goto st_case_153 case 154: goto st_case_154 + case 585: + goto st_case_585 case 155: goto st_case_155 case 586: goto st_case_586 - case 156: - goto st_case_156 case 587: goto st_case_587 case 588: @@ -2624,32 +2630,32 @@ _resume: goto st_case_592 case 593: goto st_case_593 - case 594: - goto st_case_594 + case 156: + goto st_case_156 case 157: goto st_case_157 case 158: goto st_case_158 + case 594: + goto st_case_594 case 159: goto st_case_159 - case 595: - goto st_case_595 case 160: goto st_case_160 case 161: goto st_case_161 + case 595: + goto st_case_595 case 162: goto st_case_162 - case 596: - goto st_case_596 case 163: goto st_case_163 - case 164: - goto st_case_164 + case 596: + goto st_case_596 case 597: goto st_case_597 - case 598: - goto st_case_598 + case 164: + goto st_case_164 case 165: goto st_case_165 case 166: @@ -2660,8 +2666,8 @@ _resume: goto st_case_168 case 169: goto st_case_169 - case 170: - goto st_case_170 + case 598: + goto st_case_598 case 599: goto st_case_599 case 600: @@ -2698,28 +2704,28 @@ _resume: goto st_case_615 case 616: goto st_case_616 - case 617: - goto st_case_617 + case 170: + goto st_case_170 case 171: goto st_case_171 case 172: goto st_case_172 - case 173: - goto st_case_173 + case 617: + goto st_case_617 case 618: goto st_case_618 case 619: goto st_case_619 + case 173: + goto st_case_173 case 620: goto st_case_620 - case 174: - goto st_case_174 case 621: goto st_case_621 + case 174: + goto st_case_174 case 622: goto st_case_622 - case 175: - goto st_case_175 case 623: goto st_case_623 case 624: @@ -2728,76 +2734,76 @@ _resume: goto st_case_625 case 626: goto st_case_626 - case 627: - goto st_case_627 + case 175: + goto st_case_175 case 176: goto st_case_176 case 177: goto st_case_177 + case 627: + goto st_case_627 case 178: goto st_case_178 - case 628: - goto st_case_628 case 179: goto st_case_179 case 180: goto st_case_180 + case 628: + goto st_case_628 case 181: goto st_case_181 - case 629: - goto st_case_629 case 182: goto st_case_182 - case 183: - goto st_case_183 + case 629: + goto st_case_629 case 630: goto st_case_630 + case 183: + goto st_case_183 case 631: goto st_case_631 - case 184: - goto st_case_184 case 632: goto st_case_632 case 633: goto st_case_633 - case 634: - goto st_case_634 + case 184: + goto st_case_184 case 185: goto st_case_185 case 186: goto st_case_186 + case 634: + goto st_case_634 case 187: goto st_case_187 - case 635: - goto st_case_635 case 188: goto st_case_188 case 189: goto st_case_189 + case 635: + goto st_case_635 case 190: goto st_case_190 - case 636: - goto st_case_636 case 191: goto st_case_191 - case 192: - goto st_case_192 + case 636: + goto st_case_636 case 637: goto st_case_637 - case 638: - goto st_case_638 + case 192: + goto st_case_192 case 193: goto st_case_193 case 194: goto st_case_194 + case 638: + goto st_case_638 case 195: goto st_case_195 - case 639: - goto st_case_639 case 196: goto st_case_196 - case 197: - goto st_case_197 + case 639: + goto st_case_639 case 640: goto st_case_640 case 641: @@ -2812,38 +2818,38 @@ _resume: goto st_case_645 case 646: goto st_case_646 - case 647: - goto st_case_647 + case 197: + goto st_case_197 case 198: goto st_case_198 case 199: goto st_case_199 + case 647: + goto st_case_647 case 200: goto st_case_200 - case 648: - goto st_case_648 case 201: goto st_case_201 case 202: goto st_case_202 + case 648: + goto st_case_648 case 203: goto st_case_203 - case 649: - goto st_case_649 case 204: goto st_case_204 - case 205: - goto st_case_205 + case 649: + goto st_case_649 case 650: goto st_case_650 - case 651: - goto st_case_651 + case 205: + goto st_case_205 case 206: goto st_case_206 case 207: goto st_case_207 - case 208: - goto st_case_208 + case 651: + goto st_case_651 case 652: goto st_case_652 case 653: @@ -2880,8 +2886,8 @@ _resume: goto st_case_668 case 669: goto st_case_669 - case 670: - goto st_case_670 + case 208: + goto st_case_208 case 209: goto st_case_209 case 210: @@ -2890,14 +2896,14 @@ _resume: goto st_case_211 case 212: goto st_case_212 + case 670: + goto st_case_670 case 213: goto st_case_213 - case 671: - goto st_case_671 case 214: goto st_case_214 - case 215: - goto st_case_215 + case 671: + goto st_case_671 case 672: goto st_case_672 case 673: @@ -2914,44 +2920,44 @@ _resume: goto st_case_678 case 679: goto st_case_679 - case 680: - goto st_case_680 + case 215: + goto st_case_215 case 216: goto st_case_216 case 217: goto st_case_217 + case 680: + goto st_case_680 case 218: goto st_case_218 - case 681: - goto st_case_681 case 219: goto st_case_219 case 220: goto st_case_220 + case 681: + goto st_case_681 case 221: goto st_case_221 - case 682: - goto st_case_682 case 222: goto st_case_222 - case 223: - goto st_case_223 + case 682: + goto st_case_682 case 683: goto st_case_683 - case 684: - goto st_case_684 + case 223: + goto st_case_223 case 224: goto st_case_224 case 225: goto st_case_225 + case 684: + goto st_case_684 case 226: goto st_case_226 - case 685: - goto st_case_685 case 227: goto st_case_227 - case 228: - goto st_case_228 + case 685: + goto st_case_685 case 686: goto st_case_686 case 687: @@ -2966,20 +2972,20 @@ _resume: goto st_case_691 case 692: goto st_case_692 - case 693: - goto st_case_693 + case 228: + goto st_case_228 case 229: goto st_case_229 case 230: goto st_case_230 + case 693: + goto st_case_693 case 231: goto st_case_231 - case 694: - goto st_case_694 case 232: goto st_case_232 - case 233: - goto st_case_233 + case 694: + goto st_case_694 case 695: goto st_case_695 case 696: @@ -2994,38 +3000,38 @@ _resume: goto st_case_700 case 701: goto st_case_701 - case 702: - goto st_case_702 + case 233: + goto st_case_233 case 234: goto st_case_234 case 235: goto st_case_235 + case 702: + goto st_case_702 case 236: goto st_case_236 - case 703: - goto st_case_703 case 237: goto st_case_237 case 238: goto st_case_238 + case 703: + goto st_case_703 case 239: goto st_case_239 - case 704: - goto st_case_704 case 240: goto st_case_240 - case 241: - goto st_case_241 + case 704: + goto st_case_704 case 705: goto st_case_705 - case 706: - goto st_case_706 + case 241: + goto st_case_241 case 242: goto st_case_242 case 243: goto st_case_243 - case 244: - goto st_case_244 + case 706: + goto st_case_706 case 707: goto st_case_707 case 708: @@ -3062,18 +3068,18 @@ _resume: goto st_case_723 case 724: goto st_case_724 - case 725: - goto st_case_725 + case 244: + goto st_case_244 case 245: goto st_case_245 + case 725: + goto st_case_725 case 246: goto st_case_246 - case 726: - goto st_case_726 case 247: goto st_case_247 - case 248: - goto st_case_248 + case 726: + goto st_case_726 case 727: goto st_case_727 case 728: @@ -3088,109 +3094,107 @@ _resume: goto st_case_732 case 733: goto st_case_733 - case 734: - goto st_case_734 + case 248: + goto st_case_248 case 249: goto st_case_249 case 250: goto st_case_250 + case 734: + goto st_case_734 case 251: goto st_case_251 - case 735: - goto st_case_735 case 252: goto st_case_252 case 253: goto st_case_253 + case 735: + goto st_case_735 case 254: goto st_case_254 - case 736: - goto st_case_736 case 255: goto st_case_255 - case 256: - goto st_case_256 + case 736: + goto st_case_736 case 737: goto st_case_737 - case 738: - goto st_case_738 + case 256: + goto st_case_256 case 257: goto st_case_257 - case 258: - goto st_case_258 - case 739: - goto st_case_739 - case 261: - goto st_case_261 + case 738: + goto st_case_738 + case 260: + goto st_case_260 + case 740: + goto st_case_740 case 741: goto st_case_741 - case 742: - goto st_case_742 + case 261: + goto st_case_261 case 262: goto st_case_262 case 263: goto st_case_263 case 264: goto st_case_264 + case 742: + goto st_case_742 case 265: goto st_case_265 case 743: goto st_case_743 case 266: goto st_case_266 - case 744: - goto st_case_744 case 267: goto st_case_267 case 268: goto st_case_268 - case 269: - goto st_case_269 - case 740: - goto st_case_740 + case 739: + goto st_case_739 + case 258: + goto st_case_258 case 259: goto st_case_259 - case 260: - goto st_case_260 } goto st_out - st270: + st269: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof270 + goto _test_eof269 } - st_case_270: + st_case_269: switch ( m.data)[( m.p)] { case 10: - goto tr35 + goto tr33 case 11: - goto tr459 + goto tr457 case 13: - goto tr35 + goto tr33 case 32: - goto tr458 + goto tr456 case 35: - goto tr35 + goto tr33 case 44: - goto tr35 + goto tr33 case 92: - goto tr460 + goto tr458 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr458 + goto tr456 } - goto tr457 -tr33: -//line plugins/parsers/influx/machine.go.rl:20 + goto tr455 +tr31: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p goto st1 -tr457: -//line plugins/parsers/influx/machine.go.rl:74 +tr455: +//line plugins/parsers/influx/machine.go.rl:82 m.beginMetric = true -//line plugins/parsers/influx/machine.go.rl:20 +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p @@ -3200,7 +3204,7 @@ tr457: goto _test_eof1 } st_case_1: -//line plugins/parsers/influx/machine.go:3204 +//line plugins/parsers/influx/machine.go:3208 switch ( m.data)[( m.p)] { case 10: goto tr2 @@ -3213,7 +3217,7 @@ tr457: case 44: goto tr4 case 92: - goto st95 + goto st94 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { goto tr1 @@ -3221,26 +3225,26 @@ tr457: goto st1 tr1: ( m.cs) = 2 -//line plugins/parsers/influx/machine.go.rl:78 +//line plugins/parsers/influx/machine.go.rl:86 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } goto _again -tr60: +tr58: ( m.cs) = 2 -//line plugins/parsers/influx/machine.go.rl:91 +//line plugins/parsers/influx/machine.go.rl:99 err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } @@ -3250,7 +3254,7 @@ tr60: goto _test_eof2 } st_case_2: -//line plugins/parsers/influx/machine.go:3254 +//line plugins/parsers/influx/machine.go:3258 switch ( m.data)[( m.p)] { case 10: goto tr8 @@ -3272,7 +3276,7 @@ tr60: } goto tr6 tr6: -//line plugins/parsers/influx/machine.go.rl:20 +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p @@ -3282,7 +3286,7 @@ tr6: goto _test_eof3 } st_case_3: -//line plugins/parsers/influx/machine.go:3286 +//line plugins/parsers/influx/machine.go:3290 switch ( m.data)[( m.p)] { case 32: goto tr8 @@ -3291,7 +3295,7 @@ tr6: case 61: goto tr12 case 92: - goto st35 + goto st34 } switch { case ( m.data)[( m.p)] > 10: @@ -3304,212 +3308,212 @@ tr6: goto st3 tr2: ( m.cs) = 0 -//line plugins/parsers/influx/machine.go.rl:38 +//line plugins/parsers/influx/machine.go.rl:46 err = ErrTagParse ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } goto _again tr8: ( m.cs) = 0 -//line plugins/parsers/influx/machine.go.rl:31 +//line plugins/parsers/influx/machine.go.rl:39 err = ErrFieldParse ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } goto _again -tr35: +tr33: ( m.cs) = 0 -//line plugins/parsers/influx/machine.go.rl:24 +//line plugins/parsers/influx/machine.go.rl:32 err = ErrNameParse ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } goto _again -tr39: +tr37: ( m.cs) = 0 -//line plugins/parsers/influx/machine.go.rl:24 +//line plugins/parsers/influx/machine.go.rl:32 err = ErrNameParse ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } -//line plugins/parsers/influx/machine.go.rl:38 +//line plugins/parsers/influx/machine.go.rl:46 err = ErrTagParse ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } goto _again -tr43: +tr41: ( m.cs) = 0 -//line plugins/parsers/influx/machine.go.rl:24 +//line plugins/parsers/influx/machine.go.rl:32 err = ErrNameParse ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } -//line plugins/parsers/influx/machine.go.rl:31 +//line plugins/parsers/influx/machine.go.rl:39 err = ErrFieldParse ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } goto _again -tr47: +tr45: ( m.cs) = 0 -//line plugins/parsers/influx/machine.go.rl:38 +//line plugins/parsers/influx/machine.go.rl:46 err = ErrTagParse ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } -//line plugins/parsers/influx/machine.go.rl:31 +//line plugins/parsers/influx/machine.go.rl:39 err = ErrFieldParse ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } goto _again -tr105: +tr103: ( m.cs) = 0 -//line plugins/parsers/influx/machine.go.rl:31 +//line plugins/parsers/influx/machine.go.rl:39 err = ErrFieldParse ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } -//line plugins/parsers/influx/machine.go.rl:45 +//line plugins/parsers/influx/machine.go.rl:53 err = ErrTimestampParse ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } goto _again -tr132: +tr130: ( m.cs) = 0 -//line plugins/parsers/influx/machine.go.rl:38 +//line plugins/parsers/influx/machine.go.rl:46 err = ErrTagParse ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } -//line plugins/parsers/influx/machine.go.rl:31 +//line plugins/parsers/influx/machine.go.rl:39 err = ErrFieldParse ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } -//line plugins/parsers/influx/machine.go.rl:45 +//line plugins/parsers/influx/machine.go.rl:53 err = ErrTimestampParse ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } goto _again -tr198: +tr196: ( m.cs) = 0 -//line plugins/parsers/influx/machine.go.rl:38 +//line plugins/parsers/influx/machine.go.rl:46 err = ErrTagParse ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } -//line plugins/parsers/influx/machine.go.rl:45 +//line plugins/parsers/influx/machine.go.rl:53 err = ErrTimestampParse ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } goto _again -tr423: +tr421: ( m.cs) = 0 -//line plugins/parsers/influx/machine.go.rl:24 +//line plugins/parsers/influx/machine.go.rl:32 err = ErrNameParse ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } -//line plugins/parsers/influx/machine.go.rl:38 +//line plugins/parsers/influx/machine.go.rl:46 err = ErrTagParse ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } -//line plugins/parsers/influx/machine.go.rl:31 +//line plugins/parsers/influx/machine.go.rl:39 err = ErrFieldParse ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } goto _again -tr426: +tr424: ( m.cs) = 0 -//line plugins/parsers/influx/machine.go.rl:45 +//line plugins/parsers/influx/machine.go.rl:53 err = ErrTimestampParse ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } goto _again -tr1055: -//line plugins/parsers/influx/machine.go.rl:65 +tr1053: +//line plugins/parsers/influx/machine.go.rl:73 ( m.p)-- - {goto st270 } + {goto st269 } goto st0 -//line plugins/parsers/influx/machine.go:3507 +//line plugins/parsers/influx/machine.go:3511 st_case_0: st0: ( m.cs) = 0 goto _out tr12: -//line plugins/parsers/influx/machine.go.rl:100 +//line plugins/parsers/influx/machine.go.rl:108 m.key = m.text() @@ -3519,7 +3523,7 @@ tr12: goto _test_eof4 } st_case_4: -//line plugins/parsers/influx/machine.go:3523 +//line plugins/parsers/influx/machine.go:3527 switch ( m.data)[( m.p)] { case 34: goto st5 @@ -3550,36 +3554,32 @@ tr12: switch ( m.data)[( m.p)] { case 10: goto tr24 - case 12: - goto tr8 - case 13: - goto tr25 case 34: - goto tr26 + goto tr25 case 92: - goto tr27 + goto tr26 } goto tr23 tr23: -//line plugins/parsers/influx/machine.go.rl:20 +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p goto st6 tr24: -//line plugins/parsers/influx/machine.go.rl:20 +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p -//line plugins/parsers/influx/machine.go.rl:158 +//line plugins/parsers/influx/machine.go.rl:166 m.lineno++ m.sol = m.p m.sol++ // next char will be the first column in the line goto st6 -tr29: -//line plugins/parsers/influx/machine.go.rl:158 +tr28: +//line plugins/parsers/influx/machine.go.rl:166 m.lineno++ m.sol = m.p @@ -3594,59 +3594,111 @@ tr29: //line plugins/parsers/influx/machine.go:3595 switch ( m.data)[( m.p)] { case 10: - goto tr29 - case 12: - goto tr8 - case 13: - goto st7 + goto tr28 case 34: - goto tr31 + goto tr29 case 92: - goto st75 + goto st73 } goto st6 tr25: -//line plugins/parsers/influx/machine.go.rl:20 + ( m.cs) = 270 +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p - goto st7 - st7: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof7 - } - st_case_7: -//line plugins/parsers/influx/machine.go:3620 - if ( m.data)[( m.p)] == 10 { - goto tr29 - } - goto tr8 -tr26: - ( m.cs) = 271 -//line plugins/parsers/influx/machine.go.rl:20 - - m.pb = m.p - -//line plugins/parsers/influx/machine.go.rl:140 +//line plugins/parsers/influx/machine.go.rl:148 err = m.handler.AddString(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } goto _again -tr31: - ( m.cs) = 271 -//line plugins/parsers/influx/machine.go.rl:140 +tr29: + ( m.cs) = 270 +//line plugins/parsers/influx/machine.go.rl:148 err = m.handler.AddString(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again + st270: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof270 + } + st_case_270: +//line plugins/parsers/influx/machine.go:3640 + switch ( m.data)[( m.p)] { + case 10: + goto tr101 + case 13: + goto st32 + case 32: + goto st271 + case 44: + goto st35 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto st271 + } + goto tr103 +tr921: + ( m.cs) = 271 +//line plugins/parsers/influx/machine.go.rl:130 + + err = m.handler.AddFloat(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr1041: + ( m.cs) = 271 +//line plugins/parsers/influx/machine.go.rl:112 + + err = m.handler.AddInt(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr1044: + ( m.cs) = 271 +//line plugins/parsers/influx/machine.go.rl:121 + + err = m.handler.AddUint(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr1047: + ( m.cs) = 271 +//line plugins/parsers/influx/machine.go.rl:139 + + err = m.handler.AddBool(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; {( m.p)++; goto _out } } @@ -3656,774 +3708,435 @@ tr31: goto _test_eof271 } st_case_271: -//line plugins/parsers/influx/machine.go:3660 +//line plugins/parsers/influx/machine.go:3712 switch ( m.data)[( m.p)] { case 10: - goto tr103 + goto tr101 case 13: - goto st33 + goto st32 case 32: - goto st272 - case 44: - goto st36 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st272 - } - goto tr105 -tr535: - ( m.cs) = 272 -//line plugins/parsers/influx/machine.go.rl:122 - - err = m.handler.AddFloat(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again -tr932: - ( m.cs) = 272 -//line plugins/parsers/influx/machine.go.rl:104 - - err = m.handler.AddInt(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again -tr935: - ( m.cs) = 272 -//line plugins/parsers/influx/machine.go.rl:113 - - err = m.handler.AddUint(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again -tr939: - ( m.cs) = 272 -//line plugins/parsers/influx/machine.go.rl:131 - - err = m.handler.AddBool(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again - st272: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof272 - } - st_case_272: -//line plugins/parsers/influx/machine.go:3732 - switch ( m.data)[( m.p)] { - case 10: - goto tr103 - case 13: - goto st33 - case 32: - goto st272 + goto st271 case 45: - goto tr464 + goto tr462 } switch { case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr465 + goto tr463 } case ( m.data)[( m.p)] >= 9: - goto st272 + goto st271 } - goto tr426 -tr103: -//line plugins/parsers/influx/machine.go.rl:158 + goto tr424 +tr101: +//line plugins/parsers/influx/machine.go.rl:166 m.lineno++ m.sol = m.p m.sol++ // next char will be the first column in the line - goto st273 -tr470: - ( m.cs) = 273 -//line plugins/parsers/influx/machine.go.rl:149 + goto st272 +tr468: + ( m.cs) = 272 +//line plugins/parsers/influx/machine.go.rl:157 err = m.handler.SetTimestamp(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:158 +//line plugins/parsers/influx/machine.go.rl:166 m.lineno++ m.sol = m.p m.sol++ // next char will be the first column in the line goto _again -tr734: - ( m.cs) = 273 -//line plugins/parsers/influx/machine.go.rl:122 +tr730: + ( m.cs) = 272 +//line plugins/parsers/influx/machine.go.rl:130 err = m.handler.AddFloat(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:158 +//line plugins/parsers/influx/machine.go.rl:166 m.lineno++ m.sol = m.p m.sol++ // next char will be the first column in the line goto _again -tr952: - ( m.cs) = 273 -//line plugins/parsers/influx/machine.go.rl:104 +tr942: + ( m.cs) = 272 +//line plugins/parsers/influx/machine.go.rl:112 err = m.handler.AddInt(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:158 +//line plugins/parsers/influx/machine.go.rl:166 m.lineno++ m.sol = m.p m.sol++ // next char will be the first column in the line goto _again -tr957: - ( m.cs) = 273 -//line plugins/parsers/influx/machine.go.rl:113 +tr948: + ( m.cs) = 272 +//line plugins/parsers/influx/machine.go.rl:121 err = m.handler.AddUint(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:158 +//line plugins/parsers/influx/machine.go.rl:166 m.lineno++ m.sol = m.p m.sol++ // next char will be the first column in the line goto _again -tr962: - ( m.cs) = 273 -//line plugins/parsers/influx/machine.go.rl:131 +tr954: + ( m.cs) = 272 +//line plugins/parsers/influx/machine.go.rl:139 err = m.handler.AddBool(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:158 +//line plugins/parsers/influx/machine.go.rl:166 m.lineno++ m.sol = m.p m.sol++ // next char will be the first column in the line goto _again - st273: -//line plugins/parsers/influx/machine.go.rl:164 + st272: +//line plugins/parsers/influx/machine.go.rl:172 m.finishMetric = true - ( m.cs) = 740; + ( m.cs) = 739; {( m.p)++; goto _out } if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof273 + goto _test_eof272 } - st_case_273: -//line plugins/parsers/influx/machine.go:3866 + st_case_272: +//line plugins/parsers/influx/machine.go:3846 switch ( m.data)[( m.p)] { case 10: - goto tr35 + goto tr33 case 11: - goto tr36 + goto tr34 case 13: - goto tr35 + goto tr33 case 32: - goto st8 + goto st7 case 35: - goto tr35 + goto tr33 case 44: - goto tr35 + goto tr33 case 92: - goto tr37 + goto tr35 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st8 + goto st7 } - goto tr33 -tr458: -//line plugins/parsers/influx/machine.go.rl:74 + goto tr31 +tr456: +//line plugins/parsers/influx/machine.go.rl:82 m.beginMetric = true + goto st7 + st7: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof7 + } + st_case_7: +//line plugins/parsers/influx/machine.go:3878 + switch ( m.data)[( m.p)] { + case 10: + goto tr33 + case 11: + goto tr34 + case 13: + goto tr33 + case 32: + goto st7 + case 35: + goto tr33 + case 44: + goto tr33 + case 92: + goto tr35 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto st7 + } + goto tr31 +tr34: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st8 +tr457: +//line plugins/parsers/influx/machine.go.rl:82 + + m.beginMetric = true + +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + goto st8 st8: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof8 } st_case_8: -//line plugins/parsers/influx/machine.go:3898 +//line plugins/parsers/influx/machine.go:3920 switch ( m.data)[( m.p)] { case 10: - goto tr35 - case 11: - goto tr36 - case 13: - goto tr35 - case 32: - goto st8 - case 35: - goto tr35 - case 44: - goto tr35 - case 92: goto tr37 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st8 - } - goto tr33 -tr36: -//line plugins/parsers/influx/machine.go.rl:20 - - m.pb = m.p - - goto st9 -tr459: -//line plugins/parsers/influx/machine.go.rl:74 - - m.beginMetric = true - -//line plugins/parsers/influx/machine.go.rl:20 - - m.pb = m.p - - goto st9 - st9: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof9 - } - st_case_9: -//line plugins/parsers/influx/machine.go:3940 - switch ( m.data)[( m.p)] { - case 10: - goto tr39 case 11: - goto tr40 - case 13: - goto tr39 - case 32: goto tr38 + case 13: + goto tr37 + case 32: + goto tr36 case 35: goto st1 case 44: goto tr4 case 92: - goto tr37 + goto tr35 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr38 + goto tr36 } - goto tr33 -tr38: - ( m.cs) = 10 -//line plugins/parsers/influx/machine.go.rl:78 + goto tr31 +tr36: + ( m.cs) = 9 +//line plugins/parsers/influx/machine.go.rl:86 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } goto _again + st9: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof9 + } + st_case_9: +//line plugins/parsers/influx/machine.go:3959 + switch ( m.data)[( m.p)] { + case 10: + goto tr41 + case 11: + goto tr42 + case 13: + goto tr41 + case 32: + goto st9 + case 35: + goto tr6 + case 44: + goto tr41 + case 61: + goto tr31 + case 92: + goto tr43 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto st9 + } + goto tr39 +tr39: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st10 st10: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof10 } st_case_10: -//line plugins/parsers/influx/machine.go:3979 +//line plugins/parsers/influx/machine.go:3993 switch ( m.data)[( m.p)] { case 10: - goto tr43 - case 11: - goto tr44 - case 13: - goto tr43 - case 32: - goto st10 - case 35: - goto tr6 - case 44: - goto tr43 - case 61: - goto tr33 - case 92: goto tr45 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st10 - } - goto tr41 -tr41: -//line plugins/parsers/influx/machine.go.rl:20 - - m.pb = m.p - - goto st11 - st11: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof11 - } - st_case_11: -//line plugins/parsers/influx/machine.go:4013 - switch ( m.data)[( m.p)] { - case 10: - goto tr47 case 11: - goto tr48 + goto tr46 case 13: - goto tr47 + goto tr45 case 32: goto tr1 case 44: goto tr4 case 61: - goto tr49 + goto tr47 case 92: - goto st28 + goto st27 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { goto tr1 } - goto st11 -tr48: - ( m.cs) = 12 -//line plugins/parsers/influx/machine.go.rl:78 + goto st10 +tr46: + ( m.cs) = 11 +//line plugins/parsers/influx/machine.go.rl:86 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } goto _again -tr51: - ( m.cs) = 12 -//line plugins/parsers/influx/machine.go.rl:78 +tr49: + ( m.cs) = 11 +//line plugins/parsers/influx/machine.go.rl:86 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:20 +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p + goto _again + st11: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof11 + } + st_case_11: +//line plugins/parsers/influx/machine.go:4049 + switch ( m.data)[( m.p)] { + case 10: + goto tr45 + case 11: + goto tr49 + case 13: + goto tr45 + case 32: + goto tr1 + case 44: + goto tr4 + case 61: + goto tr47 + case 92: + goto tr43 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr1 + } + goto tr39 +tr4: + ( m.cs) = 12 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr60: + ( m.cs) = 12 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + goto _again st12: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof12 } st_case_12: -//line plugins/parsers/influx/machine.go:4069 +//line plugins/parsers/influx/machine.go:4101 switch ( m.data)[( m.p)] { - case 10: - goto tr47 - case 11: - goto tr51 - case 13: - goto tr47 case 32: - goto tr1 + goto tr2 case 44: - goto tr4 + goto tr2 case 61: - goto tr49 + goto tr2 case 92: - goto tr45 + goto tr51 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr1 + switch { + case ( m.data)[( m.p)] > 10: + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr2 + } + case ( m.data)[( m.p)] >= 9: + goto tr2 } - goto tr41 -tr4: - ( m.cs) = 13 -//line plugins/parsers/influx/machine.go.rl:78 + goto tr50 +tr50: +//line plugins/parsers/influx/machine.go.rl:28 - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- + m.pb = m.p - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again -tr62: - ( m.cs) = 13 -//line plugins/parsers/influx/machine.go.rl:91 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again + goto st13 st13: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof13 } st_case_13: -//line plugins/parsers/influx/machine.go:4121 +//line plugins/parsers/influx/machine.go:4132 switch ( m.data)[( m.p)] { case 32: goto tr2 case 44: goto tr2 case 61: - goto tr2 - case 92: goto tr53 - } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr2 - } - case ( m.data)[( m.p)] >= 9: - goto tr2 - } - goto tr52 -tr52: -//line plugins/parsers/influx/machine.go.rl:20 - - m.pb = m.p - - goto st14 - st14: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof14 - } - st_case_14: -//line plugins/parsers/influx/machine.go:4152 - switch ( m.data)[( m.p)] { - case 32: - goto tr2 - case 44: - goto tr2 - case 61: - goto tr55 case 92: - goto st24 - } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr2 - } - case ( m.data)[( m.p)] >= 9: - goto tr2 - } - goto st14 -tr55: -//line plugins/parsers/influx/machine.go.rl:87 - - m.key = m.text() - - goto st15 - st15: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof15 - } - st_case_15: -//line plugins/parsers/influx/machine.go:4183 - switch ( m.data)[( m.p)] { - case 32: - goto tr2 - case 44: - goto tr2 - case 61: - goto tr2 - case 92: - goto tr58 - } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr2 - } - case ( m.data)[( m.p)] >= 9: - goto tr2 - } - goto tr57 -tr57: -//line plugins/parsers/influx/machine.go.rl:20 - - m.pb = m.p - - goto st16 - st16: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof16 - } - st_case_16: -//line plugins/parsers/influx/machine.go:4214 - switch ( m.data)[( m.p)] { - case 10: - goto tr2 - case 11: - goto tr61 - case 13: - goto tr2 - case 32: - goto tr60 - case 44: - goto tr62 - case 61: - goto tr2 - case 92: - goto st22 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr60 - } - goto st16 -tr61: - ( m.cs) = 17 -//line plugins/parsers/influx/machine.go.rl:91 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again - st17: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof17 - } - st_case_17: -//line plugins/parsers/influx/machine.go:4253 - switch ( m.data)[( m.p)] { - case 10: - goto tr47 - case 11: - goto tr65 - case 13: - goto tr47 - case 32: - goto tr60 - case 44: - goto tr62 - case 61: - goto tr47 - case 92: - goto tr66 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr60 - } - goto tr64 -tr64: -//line plugins/parsers/influx/machine.go.rl:20 - - m.pb = m.p - - goto st18 - st18: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof18 - } - st_case_18: -//line plugins/parsers/influx/machine.go:4285 - switch ( m.data)[( m.p)] { - case 10: - goto tr47 - case 11: - goto tr68 - case 13: - goto tr47 - case 32: - goto tr60 - case 44: - goto tr62 - case 61: - goto tr12 - case 92: - goto st20 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr60 - } - goto st18 -tr68: - ( m.cs) = 19 -//line plugins/parsers/influx/machine.go.rl:91 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again -tr65: - ( m.cs) = 19 -//line plugins/parsers/influx/machine.go.rl:91 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:20 - - m.pb = m.p - - goto _again - st19: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof19 - } - st_case_19: -//line plugins/parsers/influx/machine.go:4341 - switch ( m.data)[( m.p)] { - case 10: - goto tr47 - case 11: - goto tr65 - case 13: - goto tr47 - case 32: - goto tr60 - case 44: - goto tr62 - case 61: - goto tr12 - case 92: - goto tr66 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr60 - } - goto tr64 -tr66: -//line plugins/parsers/influx/machine.go.rl:20 - - m.pb = m.p - - goto st20 - st20: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof20 - } - st_case_20: -//line plugins/parsers/influx/machine.go:4373 - if ( m.data)[( m.p)] == 92 { - goto st21 - } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr47 - } - case ( m.data)[( m.p)] >= 9: - goto tr47 - } - goto st18 - st21: -//line plugins/parsers/influx/machine.go.rl:240 - ( m.p)-- - - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof21 - } - st_case_21: -//line plugins/parsers/influx/machine.go:4394 - switch ( m.data)[( m.p)] { - case 10: - goto tr47 - case 11: - goto tr68 - case 13: - goto tr47 - case 32: - goto tr60 - case 44: - goto tr62 - case 61: - goto tr12 - case 92: - goto st20 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr60 - } - goto st18 -tr58: -//line plugins/parsers/influx/machine.go.rl:20 - - m.pb = m.p - - goto st22 - st22: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof22 - } - st_case_22: -//line plugins/parsers/influx/machine.go:4426 - if ( m.data)[( m.p)] == 92 { goto st23 } switch { @@ -4434,50 +4147,28 @@ tr58: case ( m.data)[( m.p)] >= 9: goto tr2 } - goto st16 - st23: -//line plugins/parsers/influx/machine.go.rl:240 - ( m.p)-- - + goto st13 +tr53: +//line plugins/parsers/influx/machine.go.rl:95 + + m.key = m.text() + + goto st14 + st14: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof23 + goto _test_eof14 } - st_case_23: -//line plugins/parsers/influx/machine.go:4447 + st_case_14: +//line plugins/parsers/influx/machine.go:4163 switch ( m.data)[( m.p)] { - case 10: - goto tr2 - case 11: - goto tr61 - case 13: - goto tr2 case 32: - goto tr60 + goto tr2 case 44: - goto tr62 + goto tr2 case 61: goto tr2 case 92: - goto st22 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr60 - } - goto st16 -tr53: -//line plugins/parsers/influx/machine.go.rl:20 - - m.pb = m.p - - goto st24 - st24: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof24 - } - st_case_24: -//line plugins/parsers/influx/machine.go:4479 - if ( m.data)[( m.p)] == 92 { - goto st25 + goto tr56 } switch { case ( m.data)[( m.p)] > 10: @@ -4487,24 +4178,285 @@ tr53: case ( m.data)[( m.p)] >= 9: goto tr2 } - goto st14 - st25: -//line plugins/parsers/influx/machine.go.rl:240 + goto tr55 +tr55: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st15 + st15: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof15 + } + st_case_15: +//line plugins/parsers/influx/machine.go:4194 + switch ( m.data)[( m.p)] { + case 10: + goto tr2 + case 11: + goto tr59 + case 13: + goto tr2 + case 32: + goto tr58 + case 44: + goto tr60 + case 61: + goto tr2 + case 92: + goto st21 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr58 + } + goto st15 +tr59: + ( m.cs) = 16 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again + st16: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof16 + } + st_case_16: +//line plugins/parsers/influx/machine.go:4233 + switch ( m.data)[( m.p)] { + case 10: + goto tr45 + case 11: + goto tr63 + case 13: + goto tr45 + case 32: + goto tr58 + case 44: + goto tr60 + case 61: + goto tr45 + case 92: + goto tr64 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr58 + } + goto tr62 +tr62: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st17 + st17: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof17 + } + st_case_17: +//line plugins/parsers/influx/machine.go:4265 + switch ( m.data)[( m.p)] { + case 10: + goto tr45 + case 11: + goto tr66 + case 13: + goto tr45 + case 32: + goto tr58 + case 44: + goto tr60 + case 61: + goto tr12 + case 92: + goto st19 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr58 + } + goto st17 +tr66: + ( m.cs) = 18 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr63: + ( m.cs) = 18 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto _again + st18: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof18 + } + st_case_18: +//line plugins/parsers/influx/machine.go:4321 + switch ( m.data)[( m.p)] { + case 10: + goto tr45 + case 11: + goto tr63 + case 13: + goto tr45 + case 32: + goto tr58 + case 44: + goto tr60 + case 61: + goto tr12 + case 92: + goto tr64 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr58 + } + goto tr62 +tr64: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st19 + st19: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof19 + } + st_case_19: +//line plugins/parsers/influx/machine.go:4353 + if ( m.data)[( m.p)] == 92 { + goto st20 + } + switch { + case ( m.data)[( m.p)] > 10: + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr45 + } + case ( m.data)[( m.p)] >= 9: + goto tr45 + } + goto st17 + st20: +//line plugins/parsers/influx/machine.go.rl:248 ( m.p)-- if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof25 + goto _test_eof20 } - st_case_25: -//line plugins/parsers/influx/machine.go:4500 + st_case_20: +//line plugins/parsers/influx/machine.go:4374 switch ( m.data)[( m.p)] { + case 10: + goto tr45 + case 11: + goto tr66 + case 13: + goto tr45 case 32: - goto tr2 + goto tr58 case 44: - goto tr2 + goto tr60 case 61: - goto tr55 + goto tr12 case 92: + goto st19 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr58 + } + goto st17 +tr56: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st21 + st21: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof21 + } + st_case_21: +//line plugins/parsers/influx/machine.go:4406 + if ( m.data)[( m.p)] == 92 { + goto st22 + } + switch { + case ( m.data)[( m.p)] > 10: + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr2 + } + case ( m.data)[( m.p)] >= 9: + goto tr2 + } + goto st15 + st22: +//line plugins/parsers/influx/machine.go.rl:248 + ( m.p)-- + + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof22 + } + st_case_22: +//line plugins/parsers/influx/machine.go:4427 + switch ( m.data)[( m.p)] { + case 10: + goto tr2 + case 11: + goto tr59 + case 13: + goto tr2 + case 32: + goto tr58 + case 44: + goto tr60 + case 61: + goto tr2 + case 92: + goto st21 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr58 + } + goto st15 +tr51: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st23 + st23: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof23 + } + st_case_23: +//line plugins/parsers/influx/machine.go:4459 + if ( m.data)[( m.p)] == 92 { goto st24 } switch { @@ -4515,94 +4467,122 @@ tr53: case ( m.data)[( m.p)] >= 9: goto tr2 } - goto st14 -tr49: -//line plugins/parsers/influx/machine.go.rl:100 + goto st13 + st24: +//line plugins/parsers/influx/machine.go.rl:248 + ( m.p)-- + + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof24 + } + st_case_24: +//line plugins/parsers/influx/machine.go:4480 + switch ( m.data)[( m.p)] { + case 32: + goto tr2 + case 44: + goto tr2 + case 61: + goto tr53 + case 92: + goto st23 + } + switch { + case ( m.data)[( m.p)] > 10: + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr2 + } + case ( m.data)[( m.p)] >= 9: + goto tr2 + } + goto st13 +tr47: +//line plugins/parsers/influx/machine.go.rl:108 m.key = m.text() - goto st26 -tr425: -//line plugins/parsers/influx/machine.go.rl:20 + goto st25 +tr423: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p -//line plugins/parsers/influx/machine.go.rl:100 +//line plugins/parsers/influx/machine.go.rl:108 m.key = m.text() - goto st26 - st26: + goto st25 + st25: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof26 + goto _test_eof25 } - st_case_26: -//line plugins/parsers/influx/machine.go:4541 + st_case_25: +//line plugins/parsers/influx/machine.go:4521 switch ( m.data)[( m.p)] { case 10: - goto tr47 + goto tr45 case 11: goto tr3 case 13: - goto tr47 + goto tr45 case 32: goto tr1 case 34: - goto st29 + goto st28 case 44: goto tr4 case 45: - goto tr74 + goto tr72 case 46: - goto tr75 + goto tr73 case 48: - goto tr76 + goto tr74 case 70: - goto tr78 + goto tr76 case 84: - goto tr79 + goto tr77 case 92: - goto st95 + goto st94 case 102: - goto tr80 + goto tr78 case 116: - goto tr81 + goto tr79 } switch { case ( m.data)[( m.p)] > 12: if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr77 + goto tr75 } case ( m.data)[( m.p)] >= 9: goto tr1 } goto st1 tr3: - ( m.cs) = 27 -//line plugins/parsers/influx/machine.go.rl:78 + ( m.cs) = 26 +//line plugins/parsers/influx/machine.go.rl:86 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } goto _again - st27: + st26: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof27 + goto _test_eof26 } - st_case_27: -//line plugins/parsers/influx/machine.go:4599 + st_case_26: +//line plugins/parsers/influx/machine.go:4579 switch ( m.data)[( m.p)] { case 10: - goto tr47 + goto tr45 case 11: - goto tr51 + goto tr49 case 13: - goto tr47 + goto tr45 case 32: goto tr1 case 44: @@ -4610,24 +4590,24 @@ tr3: case 61: goto st1 case 92: - goto tr45 + goto tr43 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { goto tr1 } - goto tr41 -tr45: -//line plugins/parsers/influx/machine.go.rl:20 + goto tr39 +tr43: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p - goto st28 - st28: + goto st27 + st27: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof28 + goto _test_eof27 } - st_case_28: -//line plugins/parsers/influx/machine.go:4631 + st_case_27: +//line plugins/parsers/influx/machine.go:4611 switch { case ( m.data)[( m.p)] > 10: if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { @@ -4636,432 +4616,459 @@ tr45: case ( m.data)[( m.p)] >= 9: goto tr8 } - goto st11 + goto st10 + st28: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof28 + } + st_case_28: + switch ( m.data)[( m.p)] { + case 10: + goto tr24 + case 11: + goto tr82 + case 13: + goto tr23 + case 32: + goto tr81 + case 34: + goto tr83 + case 44: + goto tr84 + case 92: + goto tr85 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr81 + } + goto tr80 +tr80: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st29 st29: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof29 } st_case_29: +//line plugins/parsers/influx/machine.go:4657 switch ( m.data)[( m.p)] { - case 9: - goto tr83 case 10: - goto tr24 + goto tr28 case 11: - goto tr84 - case 12: - goto tr1 + goto tr88 case 13: - goto tr25 + goto st6 case 32: - goto tr83 + goto tr87 case 34: - goto tr85 + goto tr89 case 44: - goto tr86 + goto tr90 case 92: + goto st140 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { goto tr87 } - goto tr82 -tr82: -//line plugins/parsers/influx/machine.go.rl:20 - - m.pb = m.p - - goto st30 - st30: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof30 - } - st_case_30: -//line plugins/parsers/influx/machine.go:4678 - switch ( m.data)[( m.p)] { - case 9: - goto tr89 - case 10: - goto tr29 - case 11: - goto tr90 - case 12: - goto tr1 - case 13: - goto st7 - case 32: - goto tr89 - case 34: - goto tr91 - case 44: - goto tr92 - case 92: - goto st141 - } - goto st30 -tr89: - ( m.cs) = 31 -//line plugins/parsers/influx/machine.go.rl:78 + goto st29 +tr87: + ( m.cs) = 30 +//line plugins/parsers/influx/machine.go.rl:86 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } goto _again -tr83: - ( m.cs) = 31 -//line plugins/parsers/influx/machine.go.rl:78 +tr81: + ( m.cs) = 30 +//line plugins/parsers/influx/machine.go.rl:86 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:20 +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p goto _again -tr231: - ( m.cs) = 31 -//line plugins/parsers/influx/machine.go.rl:91 +tr229: + ( m.cs) = 30 +//line plugins/parsers/influx/machine.go.rl:99 err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } goto _again + st30: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof30 + } + st_case_30: +//line plugins/parsers/influx/machine.go:4726 + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 11: + goto tr94 + case 13: + goto st6 + case 32: + goto st30 + case 34: + goto tr95 + case 44: + goto st6 + case 61: + goto st6 + case 92: + goto tr96 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto st30 + } + goto tr92 +tr92: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st31 st31: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof31 } st_case_31: -//line plugins/parsers/influx/machine.go:4748 +//line plugins/parsers/influx/machine.go:4760 switch ( m.data)[( m.p)] { case 9: - goto st31 + goto st6 case 10: - goto tr29 - case 11: - goto tr96 - case 12: - goto st2 - case 13: - goto st7 + goto tr28 case 32: - goto st31 + goto st6 case 34: - goto tr97 - case 44: - goto st6 - case 61: - goto st6 - case 92: goto tr98 - } - goto tr94 -tr94: -//line plugins/parsers/influx/machine.go.rl:20 - - m.pb = m.p - - goto st32 - st32: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof32 - } - st_case_32: -//line plugins/parsers/influx/machine.go:4783 - switch ( m.data)[( m.p)] { - case 9: - goto st6 - case 10: - goto tr29 - case 12: - goto tr8 - case 13: - goto st7 - case 32: - goto st6 - case 34: - goto tr100 case 44: goto st6 case 61: - goto tr101 + goto tr99 case 92: - goto st76 + goto st75 } - goto st32 -tr97: - ( m.cs) = 274 -//line plugins/parsers/influx/machine.go.rl:20 + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto st6 + } + goto st31 +tr95: + ( m.cs) = 273 +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p -//line plugins/parsers/influx/machine.go.rl:140 +//line plugins/parsers/influx/machine.go.rl:148 err = m.handler.AddString(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } goto _again -tr100: - ( m.cs) = 274 -//line plugins/parsers/influx/machine.go.rl:140 +tr98: + ( m.cs) = 273 +//line plugins/parsers/influx/machine.go.rl:148 err = m.handler.AddString(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } goto _again -tr386: - ( m.cs) = 274 -//line plugins/parsers/influx/machine.go.rl:140 +tr384: + ( m.cs) = 273 +//line plugins/parsers/influx/machine.go.rl:148 err = m.handler.AddString(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:20 +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p goto _again + st273: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof273 + } + st_case_273: +//line plugins/parsers/influx/machine.go:4833 + switch ( m.data)[( m.p)] { + case 10: + goto tr101 + case 11: + goto st274 + case 13: + goto st32 + case 32: + goto st271 + case 44: + goto st35 + case 61: + goto tr12 + case 92: + goto st34 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto st271 + } + goto st3 st274: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof274 } st_case_274: -//line plugins/parsers/influx/machine.go:4857 switch ( m.data)[( m.p)] { case 10: - goto tr103 + goto tr101 case 11: - goto st275 + goto st274 case 13: - goto st33 + goto st32 case 32: - goto st272 + goto st271 case 44: - goto st36 - case 61: - goto tr12 - case 92: - goto st35 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st272 - } - goto st3 - st275: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof275 - } - st_case_275: - switch ( m.data)[( m.p)] { - case 10: goto tr103 - case 11: - goto st275 - case 13: - goto st33 - case 32: - goto st272 - case 44: - goto tr105 case 45: - goto tr467 + goto tr465 case 61: goto tr12 case 92: - goto st35 + goto st34 } switch { case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr468 + goto tr466 } case ( m.data)[( m.p)] >= 9: - goto st272 + goto st271 } goto st3 -tr472: - ( m.cs) = 33 -//line plugins/parsers/influx/machine.go.rl:149 +tr470: + ( m.cs) = 32 +//line plugins/parsers/influx/machine.go.rl:157 err = m.handler.SetTimestamp(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } goto _again -tr736: - ( m.cs) = 33 -//line plugins/parsers/influx/machine.go.rl:122 +tr732: + ( m.cs) = 32 +//line plugins/parsers/influx/machine.go.rl:130 err = m.handler.AddFloat(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } goto _again -tr954: - ( m.cs) = 33 -//line plugins/parsers/influx/machine.go.rl:104 +tr944: + ( m.cs) = 32 +//line plugins/parsers/influx/machine.go.rl:112 err = m.handler.AddInt(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } goto _again -tr959: - ( m.cs) = 33 -//line plugins/parsers/influx/machine.go.rl:113 +tr950: + ( m.cs) = 32 +//line plugins/parsers/influx/machine.go.rl:121 err = m.handler.AddUint(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } goto _again -tr964: - ( m.cs) = 33 -//line plugins/parsers/influx/machine.go.rl:131 +tr956: + ( m.cs) = 32 +//line plugins/parsers/influx/machine.go.rl:139 err = m.handler.AddBool(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } goto _again + st32: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof32 + } + st_case_32: +//line plugins/parsers/influx/machine.go:4956 + if ( m.data)[( m.p)] == 10 { + goto tr101 + } + goto st0 +tr465: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st33 st33: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof33 } st_case_33: -//line plugins/parsers/influx/machine.go:4980 - if ( m.data)[( m.p)] == 10 { - goto tr103 - } - goto st0 -tr467: -//line plugins/parsers/influx/machine.go.rl:20 - - m.pb = m.p - - goto st34 - st34: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof34 - } - st_case_34: -//line plugins/parsers/influx/machine.go:4996 +//line plugins/parsers/influx/machine.go:4972 switch ( m.data)[( m.p)] { case 32: - goto tr105 + goto tr103 case 44: - goto tr105 + goto tr103 case 61: goto tr12 case 92: - goto st35 + goto st34 } switch { case ( m.data)[( m.p)] < 12: if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 10 { - goto tr105 + goto tr103 } case ( m.data)[( m.p)] > 13: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st276 + goto st275 } default: - goto tr105 + goto tr103 } goto st3 -tr468: -//line plugins/parsers/influx/machine.go.rl:20 +tr466: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p - goto st276 - st276: + goto st275 + st275: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof276 + goto _test_eof275 } - st_case_276: -//line plugins/parsers/influx/machine.go:5031 + st_case_275: +//line plugins/parsers/influx/machine.go:5007 switch ( m.data)[( m.p)] { case 10: - goto tr470 + goto tr468 case 11: - goto tr471 - case 13: - goto tr472 - case 32: goto tr469 + case 13: + goto tr470 + case 32: + goto tr467 case 44: - goto tr105 + goto tr103 case 61: goto tr12 case 92: - goto st35 + goto st34 } switch { case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st279 + goto st278 } case ( m.data)[( m.p)] >= 9: - goto tr469 + goto tr467 } goto st3 -tr469: - ( m.cs) = 277 -//line plugins/parsers/influx/machine.go.rl:149 +tr467: + ( m.cs) = 276 +//line plugins/parsers/influx/machine.go.rl:157 err = m.handler.SetTimestamp(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again + st276: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof276 + } + st_case_276: +//line plugins/parsers/influx/machine.go:5051 + switch ( m.data)[( m.p)] { + case 10: + goto tr101 + case 13: + goto st32 + case 32: + goto st276 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto st276 + } + goto st0 +tr469: + ( m.cs) = 277 +//line plugins/parsers/influx/machine.go.rl:157 + + err = m.handler.SetTimestamp(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; {( m.p)++; goto _out } } @@ -5071,70 +5078,39 @@ tr469: goto _test_eof277 } st_case_277: -//line plugins/parsers/influx/machine.go:5075 +//line plugins/parsers/influx/machine.go:5082 switch ( m.data)[( m.p)] { case 10: - goto tr103 - case 13: - goto st33 - case 32: - goto st277 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st277 - } - goto st0 -tr471: - ( m.cs) = 278 -//line plugins/parsers/influx/machine.go.rl:149 - - err = m.handler.SetTimestamp(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again - st278: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof278 - } - st_case_278: -//line plugins/parsers/influx/machine.go:5106 - switch ( m.data)[( m.p)] { - case 10: - goto tr103 + goto tr101 case 11: - goto st278 - case 13: - goto st33 - case 32: goto st277 + case 13: + goto st32 + case 32: + goto st276 case 44: goto tr8 case 61: goto tr12 case 92: - goto st35 + goto st34 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st277 + goto st276 } goto st3 tr10: -//line plugins/parsers/influx/machine.go.rl:20 +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p - goto st35 - st35: + goto st34 + st34: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof35 + goto _test_eof34 } - st_case_35: -//line plugins/parsers/influx/machine.go:5138 + st_case_34: +//line plugins/parsers/influx/machine.go:5114 switch { case ( m.data)[( m.p)] > 10: if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { @@ -5144,6 +5120,36 @@ tr10: goto tr8 } goto st3 + st278: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof278 + } + st_case_278: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr469 + case 13: + goto tr470 + case 32: + goto tr467 + case 44: + goto tr103 + case 61: + goto tr12 + case 92: + goto st34 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st279 + } + case ( m.data)[( m.p)] >= 9: + goto tr467 + } + goto st3 st279: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof279 @@ -5151,19 +5157,19 @@ tr10: st_case_279: switch ( m.data)[( m.p)] { case 10: - goto tr470 + goto tr468 case 11: - goto tr471 - case 13: - goto tr472 - case 32: goto tr469 + case 13: + goto tr470 + case 32: + goto tr467 case 44: - goto tr105 + goto tr103 case 61: goto tr12 case 92: - goto st35 + goto st34 } switch { case ( m.data)[( m.p)] > 12: @@ -5171,7 +5177,7 @@ tr10: goto st280 } case ( m.data)[( m.p)] >= 9: - goto tr469 + goto tr467 } goto st3 st280: @@ -5181,19 +5187,19 @@ tr10: st_case_280: switch ( m.data)[( m.p)] { case 10: - goto tr470 + goto tr468 case 11: - goto tr471 - case 13: - goto tr472 - case 32: goto tr469 + case 13: + goto tr470 + case 32: + goto tr467 case 44: - goto tr105 + goto tr103 case 61: goto tr12 case 92: - goto st35 + goto st34 } switch { case ( m.data)[( m.p)] > 12: @@ -5201,7 +5207,7 @@ tr10: goto st281 } case ( m.data)[( m.p)] >= 9: - goto tr469 + goto tr467 } goto st3 st281: @@ -5211,19 +5217,19 @@ tr10: st_case_281: switch ( m.data)[( m.p)] { case 10: - goto tr470 + goto tr468 case 11: - goto tr471 - case 13: - goto tr472 - case 32: goto tr469 + case 13: + goto tr470 + case 32: + goto tr467 case 44: - goto tr105 + goto tr103 case 61: goto tr12 case 92: - goto st35 + goto st34 } switch { case ( m.data)[( m.p)] > 12: @@ -5231,7 +5237,7 @@ tr10: goto st282 } case ( m.data)[( m.p)] >= 9: - goto tr469 + goto tr467 } goto st3 st282: @@ -5241,19 +5247,19 @@ tr10: st_case_282: switch ( m.data)[( m.p)] { case 10: - goto tr470 + goto tr468 case 11: - goto tr471 - case 13: - goto tr472 - case 32: goto tr469 + case 13: + goto tr470 + case 32: + goto tr467 case 44: - goto tr105 + goto tr103 case 61: goto tr12 case 92: - goto st35 + goto st34 } switch { case ( m.data)[( m.p)] > 12: @@ -5261,7 +5267,7 @@ tr10: goto st283 } case ( m.data)[( m.p)] >= 9: - goto tr469 + goto tr467 } goto st3 st283: @@ -5271,19 +5277,19 @@ tr10: st_case_283: switch ( m.data)[( m.p)] { case 10: - goto tr470 + goto tr468 case 11: - goto tr471 - case 13: - goto tr472 - case 32: goto tr469 + case 13: + goto tr470 + case 32: + goto tr467 case 44: - goto tr105 + goto tr103 case 61: goto tr12 case 92: - goto st35 + goto st34 } switch { case ( m.data)[( m.p)] > 12: @@ -5291,7 +5297,7 @@ tr10: goto st284 } case ( m.data)[( m.p)] >= 9: - goto tr469 + goto tr467 } goto st3 st284: @@ -5301,19 +5307,19 @@ tr10: st_case_284: switch ( m.data)[( m.p)] { case 10: - goto tr470 + goto tr468 case 11: - goto tr471 - case 13: - goto tr472 - case 32: goto tr469 + case 13: + goto tr470 + case 32: + goto tr467 case 44: - goto tr105 + goto tr103 case 61: goto tr12 case 92: - goto st35 + goto st34 } switch { case ( m.data)[( m.p)] > 12: @@ -5321,7 +5327,7 @@ tr10: goto st285 } case ( m.data)[( m.p)] >= 9: - goto tr469 + goto tr467 } goto st3 st285: @@ -5331,19 +5337,19 @@ tr10: st_case_285: switch ( m.data)[( m.p)] { case 10: - goto tr470 + goto tr468 case 11: - goto tr471 - case 13: - goto tr472 - case 32: goto tr469 + case 13: + goto tr470 + case 32: + goto tr467 case 44: - goto tr105 + goto tr103 case 61: goto tr12 case 92: - goto st35 + goto st34 } switch { case ( m.data)[( m.p)] > 12: @@ -5351,7 +5357,7 @@ tr10: goto st286 } case ( m.data)[( m.p)] >= 9: - goto tr469 + goto tr467 } goto st3 st286: @@ -5361,19 +5367,19 @@ tr10: st_case_286: switch ( m.data)[( m.p)] { case 10: - goto tr470 + goto tr468 case 11: - goto tr471 - case 13: - goto tr472 - case 32: goto tr469 + case 13: + goto tr470 + case 32: + goto tr467 case 44: - goto tr105 + goto tr103 case 61: goto tr12 case 92: - goto st35 + goto st34 } switch { case ( m.data)[( m.p)] > 12: @@ -5381,7 +5387,7 @@ tr10: goto st287 } case ( m.data)[( m.p)] >= 9: - goto tr469 + goto tr467 } goto st3 st287: @@ -5391,19 +5397,19 @@ tr10: st_case_287: switch ( m.data)[( m.p)] { case 10: - goto tr470 + goto tr468 case 11: - goto tr471 - case 13: - goto tr472 - case 32: goto tr469 + case 13: + goto tr470 + case 32: + goto tr467 case 44: - goto tr105 + goto tr103 case 61: goto tr12 case 92: - goto st35 + goto st34 } switch { case ( m.data)[( m.p)] > 12: @@ -5411,7 +5417,7 @@ tr10: goto st288 } case ( m.data)[( m.p)] >= 9: - goto tr469 + goto tr467 } goto st3 st288: @@ -5421,19 +5427,19 @@ tr10: st_case_288: switch ( m.data)[( m.p)] { case 10: - goto tr470 + goto tr468 case 11: - goto tr471 - case 13: - goto tr472 - case 32: goto tr469 + case 13: + goto tr470 + case 32: + goto tr467 case 44: - goto tr105 + goto tr103 case 61: goto tr12 case 92: - goto st35 + goto st34 } switch { case ( m.data)[( m.p)] > 12: @@ -5441,7 +5447,7 @@ tr10: goto st289 } case ( m.data)[( m.p)] >= 9: - goto tr469 + goto tr467 } goto st3 st289: @@ -5451,19 +5457,19 @@ tr10: st_case_289: switch ( m.data)[( m.p)] { case 10: - goto tr470 + goto tr468 case 11: - goto tr471 - case 13: - goto tr472 - case 32: goto tr469 + case 13: + goto tr470 + case 32: + goto tr467 case 44: - goto tr105 + goto tr103 case 61: goto tr12 case 92: - goto st35 + goto st34 } switch { case ( m.data)[( m.p)] > 12: @@ -5471,7 +5477,7 @@ tr10: goto st290 } case ( m.data)[( m.p)] >= 9: - goto tr469 + goto tr467 } goto st3 st290: @@ -5481,19 +5487,19 @@ tr10: st_case_290: switch ( m.data)[( m.p)] { case 10: - goto tr470 + goto tr468 case 11: - goto tr471 - case 13: - goto tr472 - case 32: goto tr469 + case 13: + goto tr470 + case 32: + goto tr467 case 44: - goto tr105 + goto tr103 case 61: goto tr12 case 92: - goto st35 + goto st34 } switch { case ( m.data)[( m.p)] > 12: @@ -5501,7 +5507,7 @@ tr10: goto st291 } case ( m.data)[( m.p)] >= 9: - goto tr469 + goto tr467 } goto st3 st291: @@ -5511,19 +5517,19 @@ tr10: st_case_291: switch ( m.data)[( m.p)] { case 10: - goto tr470 + goto tr468 case 11: - goto tr471 - case 13: - goto tr472 - case 32: goto tr469 + case 13: + goto tr470 + case 32: + goto tr467 case 44: - goto tr105 + goto tr103 case 61: goto tr12 case 92: - goto st35 + goto st34 } switch { case ( m.data)[( m.p)] > 12: @@ -5531,7 +5537,7 @@ tr10: goto st292 } case ( m.data)[( m.p)] >= 9: - goto tr469 + goto tr467 } goto st3 st292: @@ -5541,19 +5547,19 @@ tr10: st_case_292: switch ( m.data)[( m.p)] { case 10: - goto tr470 + goto tr468 case 11: - goto tr471 - case 13: - goto tr472 - case 32: goto tr469 + case 13: + goto tr470 + case 32: + goto tr467 case 44: - goto tr105 + goto tr103 case 61: goto tr12 case 92: - goto st35 + goto st34 } switch { case ( m.data)[( m.p)] > 12: @@ -5561,7 +5567,7 @@ tr10: goto st293 } case ( m.data)[( m.p)] >= 9: - goto tr469 + goto tr467 } goto st3 st293: @@ -5571,19 +5577,19 @@ tr10: st_case_293: switch ( m.data)[( m.p)] { case 10: - goto tr470 + goto tr468 case 11: - goto tr471 - case 13: - goto tr472 - case 32: goto tr469 + case 13: + goto tr470 + case 32: + goto tr467 case 44: - goto tr105 + goto tr103 case 61: goto tr12 case 92: - goto st35 + goto st34 } switch { case ( m.data)[( m.p)] > 12: @@ -5591,7 +5597,7 @@ tr10: goto st294 } case ( m.data)[( m.p)] >= 9: - goto tr469 + goto tr467 } goto st3 st294: @@ -5601,19 +5607,19 @@ tr10: st_case_294: switch ( m.data)[( m.p)] { case 10: - goto tr470 + goto tr468 case 11: - goto tr471 - case 13: - goto tr472 - case 32: goto tr469 + case 13: + goto tr470 + case 32: + goto tr467 case 44: - goto tr105 + goto tr103 case 61: goto tr12 case 92: - goto st35 + goto st34 } switch { case ( m.data)[( m.p)] > 12: @@ -5621,7 +5627,7 @@ tr10: goto st295 } case ( m.data)[( m.p)] >= 9: - goto tr469 + goto tr467 } goto st3 st295: @@ -5631,112 +5637,82 @@ tr10: st_case_295: switch ( m.data)[( m.p)] { case 10: - goto tr470 + goto tr468 case 11: - goto tr471 - case 13: - goto tr472 - case 32: goto tr469 + case 13: + goto tr470 + case 32: + goto tr467 case 44: - goto tr105 + goto tr103 case 61: goto tr12 case 92: - goto st35 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st296 - } - case ( m.data)[( m.p)] >= 9: - goto tr469 - } - goto st3 - st296: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof296 - } - st_case_296: - switch ( m.data)[( m.p)] { - case 10: - goto tr470 - case 11: - goto tr471 - case 13: - goto tr472 - case 32: - goto tr469 - case 44: - goto tr105 - case 61: - goto tr12 - case 92: - goto st35 + goto st34 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr469 + goto tr467 } goto st3 -tr930: - ( m.cs) = 36 -//line plugins/parsers/influx/machine.go.rl:122 +tr922: + ( m.cs) = 35 +//line plugins/parsers/influx/machine.go.rl:130 err = m.handler.AddFloat(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } goto _again -tr1046: - ( m.cs) = 36 -//line plugins/parsers/influx/machine.go.rl:104 +tr1042: + ( m.cs) = 35 +//line plugins/parsers/influx/machine.go.rl:112 err = m.handler.AddInt(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } goto _again -tr1048: - ( m.cs) = 36 -//line plugins/parsers/influx/machine.go.rl:113 +tr1045: + ( m.cs) = 35 +//line plugins/parsers/influx/machine.go.rl:121 err = m.handler.AddUint(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } goto _again -tr1050: - ( m.cs) = 36 -//line plugins/parsers/influx/machine.go.rl:131 +tr1048: + ( m.cs) = 35 +//line plugins/parsers/influx/machine.go.rl:139 err = m.handler.AddBool(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } goto _again - st36: + st35: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof36 + goto _test_eof35 } - st_case_36: -//line plugins/parsers/influx/machine.go:5740 + st_case_35: +//line plugins/parsers/influx/machine.go:5716 switch ( m.data)[( m.p)] { case 32: goto tr8 @@ -5756,57 +5732,135 @@ tr1050: goto tr8 } goto tr6 -tr101: -//line plugins/parsers/influx/machine.go.rl:100 +tr99: +//line plugins/parsers/influx/machine.go.rl:108 m.key = m.text() - goto st37 - st37: + goto st36 + st36: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof37 + goto _test_eof36 } - st_case_37: -//line plugins/parsers/influx/machine.go:5771 + st_case_36: +//line plugins/parsers/influx/machine.go:5747 switch ( m.data)[( m.p)] { case 10: - goto tr29 - case 12: - goto tr8 - case 13: - goto st7 + goto tr28 case 34: - goto tr107 + goto tr105 case 45: - goto tr108 + goto tr106 case 46: - goto tr109 + goto tr107 case 48: - goto tr110 + goto tr108 case 70: - goto tr112 + goto tr110 case 84: - goto tr113 + goto tr111 case 92: - goto st75 + goto st73 case 102: - goto tr114 + goto tr112 case 116: - goto tr115 + goto tr113 } if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr111 + goto tr109 } goto st6 -tr107: - ( m.cs) = 297 -//line plugins/parsers/influx/machine.go.rl:140 +tr105: + ( m.cs) = 296 +//line plugins/parsers/influx/machine.go.rl:148 err = m.handler.AddString(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again + st296: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof296 + } + st_case_296: +//line plugins/parsers/influx/machine.go:5792 + switch ( m.data)[( m.p)] { + case 10: + goto tr492 + case 13: + goto tr493 + case 32: + goto tr491 + case 34: + goto tr25 + case 44: + goto tr494 + case 92: + goto tr26 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr491 + } + goto tr23 +tr491: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st297 +tr980: + ( m.cs) = 297 +//line plugins/parsers/influx/machine.go.rl:130 + + err = m.handler.AddFloat(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr985: + ( m.cs) = 297 +//line plugins/parsers/influx/machine.go.rl:112 + + err = m.handler.AddInt(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr988: + ( m.cs) = 297 +//line plugins/parsers/influx/machine.go.rl:121 + + err = m.handler.AddUint(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr991: + ( m.cs) = 297 +//line plugins/parsers/influx/machine.go.rl:139 + + err = m.handler.AddBool(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; {( m.p)++; goto _out } } @@ -5816,550 +5870,694 @@ tr107: goto _test_eof297 } st_case_297: -//line plugins/parsers/influx/machine.go:5820 +//line plugins/parsers/influx/machine.go:5874 switch ( m.data)[( m.p)] { case 10: - goto tr494 - case 12: - goto st272 + goto tr219 case 13: - goto tr495 + goto st72 case 32: - goto tr493 + goto st297 case 34: - goto tr26 - case 44: - goto tr496 - case 92: - goto tr27 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { - goto tr493 - } - goto tr23 -tr493: -//line plugins/parsers/influx/machine.go.rl:20 - - m.pb = m.p - - goto st298 -tr988: - ( m.cs) = 298 -//line plugins/parsers/influx/machine.go.rl:122 - - err = m.handler.AddFloat(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again -tr993: - ( m.cs) = 298 -//line plugins/parsers/influx/machine.go.rl:104 - - err = m.handler.AddInt(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again -tr996: - ( m.cs) = 298 -//line plugins/parsers/influx/machine.go.rl:113 - - err = m.handler.AddUint(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again -tr999: - ( m.cs) = 298 -//line plugins/parsers/influx/machine.go.rl:131 - - err = m.handler.AddBool(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again - st298: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof298 - } - st_case_298: -//line plugins/parsers/influx/machine.go:5904 - switch ( m.data)[( m.p)] { - case 10: - goto tr221 - case 12: - goto st272 - case 13: - goto st73 - case 32: - goto st298 - case 34: - goto tr31 + goto tr29 case 45: - goto tr499 + goto tr497 case 92: - goto st75 + goto st73 } switch { - case ( m.data)[( m.p)] > 11: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr500 + goto tr498 } case ( m.data)[( m.p)] >= 9: - goto st298 + goto st297 } goto st6 -tr494: -//line plugins/parsers/influx/machine.go.rl:20 +tr492: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p -//line plugins/parsers/influx/machine.go.rl:158 +//line plugins/parsers/influx/machine.go.rl:166 m.lineno++ m.sol = m.p m.sol++ // next char will be the first column in the line - goto st299 -tr221: -//line plugins/parsers/influx/machine.go.rl:158 + goto st298 +tr219: +//line plugins/parsers/influx/machine.go.rl:166 m.lineno++ m.sol = m.p m.sol++ // next char will be the first column in the line - goto st299 -tr639: - ( m.cs) = 299 -//line plugins/parsers/influx/machine.go.rl:122 + goto st298 +tr636: + ( m.cs) = 298 +//line plugins/parsers/influx/machine.go.rl:130 err = m.handler.AddFloat(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:158 +//line plugins/parsers/influx/machine.go.rl:166 m.lineno++ m.sol = m.p m.sol++ // next char will be the first column in the line goto _again -tr603: - ( m.cs) = 299 -//line plugins/parsers/influx/machine.go.rl:158 +tr600: + ( m.cs) = 298 +//line plugins/parsers/influx/machine.go.rl:166 m.lineno++ m.sol = m.p m.sol++ // next char will be the first column in the line -//line plugins/parsers/influx/machine.go.rl:149 +//line plugins/parsers/influx/machine.go.rl:157 err = m.handler.SetTimestamp(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } goto _again -tr823: - ( m.cs) = 299 -//line plugins/parsers/influx/machine.go.rl:104 +tr817: + ( m.cs) = 298 +//line plugins/parsers/influx/machine.go.rl:112 err = m.handler.AddInt(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:158 +//line plugins/parsers/influx/machine.go.rl:166 m.lineno++ m.sol = m.p m.sol++ // next char will be the first column in the line goto _again -tr829: - ( m.cs) = 299 -//line plugins/parsers/influx/machine.go.rl:113 +tr822: + ( m.cs) = 298 +//line plugins/parsers/influx/machine.go.rl:121 err = m.handler.AddUint(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:158 +//line plugins/parsers/influx/machine.go.rl:166 m.lineno++ m.sol = m.p m.sol++ // next char will be the first column in the line goto _again -tr810: - ( m.cs) = 299 -//line plugins/parsers/influx/machine.go.rl:158 +tr803: + ( m.cs) = 298 +//line plugins/parsers/influx/machine.go.rl:166 m.lineno++ m.sol = m.p m.sol++ // next char will be the first column in the line -//line plugins/parsers/influx/machine.go.rl:131 +//line plugins/parsers/influx/machine.go.rl:139 err = m.handler.AddBool(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } goto _again -tr765: - ( m.cs) = 299 -//line plugins/parsers/influx/machine.go.rl:158 +tr758: + ( m.cs) = 298 +//line plugins/parsers/influx/machine.go.rl:166 m.lineno++ m.sol = m.p m.sol++ // next char will be the first column in the line -//line plugins/parsers/influx/machine.go.rl:122 +//line plugins/parsers/influx/machine.go.rl:130 err = m.handler.AddFloat(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } goto _again -tr798: - ( m.cs) = 299 -//line plugins/parsers/influx/machine.go.rl:158 +tr791: + ( m.cs) = 298 +//line plugins/parsers/influx/machine.go.rl:166 m.lineno++ m.sol = m.p m.sol++ // next char will be the first column in the line -//line plugins/parsers/influx/machine.go.rl:104 +//line plugins/parsers/influx/machine.go.rl:112 err = m.handler.AddInt(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } goto _again -tr804: - ( m.cs) = 299 -//line plugins/parsers/influx/machine.go.rl:158 +tr797: + ( m.cs) = 298 +//line plugins/parsers/influx/machine.go.rl:166 m.lineno++ m.sol = m.p m.sol++ // next char will be the first column in the line -//line plugins/parsers/influx/machine.go.rl:113 +//line plugins/parsers/influx/machine.go.rl:121 err = m.handler.AddUint(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } goto _again - st299: -//line plugins/parsers/influx/machine.go.rl:164 + st298: +//line plugins/parsers/influx/machine.go.rl:172 m.finishMetric = true - ( m.cs) = 740; + ( m.cs) = 739; {( m.p)++; goto _out } if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof299 + goto _test_eof298 } - st_case_299: -//line plugins/parsers/influx/machine.go:6113 + st_case_298: +//line plugins/parsers/influx/machine.go:6081 switch ( m.data)[( m.p)] { - case 9: - goto st38 case 10: - goto tr29 + goto tr28 case 11: - goto tr117 - case 12: - goto st8 + goto tr115 case 13: - goto st7 + goto st6 case 32: - goto st38 + goto st37 case 34: - goto tr118 + goto tr116 case 35: goto st6 case 44: goto st6 case 92: - goto tr87 + goto tr85 } - goto tr82 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto st37 + } + goto tr80 + st37: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof37 + } + st_case_37: + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 11: + goto tr115 + case 13: + goto st6 + case 32: + goto st37 + case 34: + goto tr116 + case 35: + goto st6 + case 44: + goto st6 + case 92: + goto tr85 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto st37 + } + goto tr80 +tr115: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st38 st38: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof38 } st_case_38: +//line plugins/parsers/influx/machine.go:6142 switch ( m.data)[( m.p)] { - case 9: - goto st38 case 10: - goto tr29 + goto tr28 case 11: - goto tr117 - case 12: - goto st8 - case 13: - goto st7 - case 32: - goto st38 - case 34: goto tr118 + case 13: + goto st6 + case 32: + goto tr117 + case 34: + goto tr83 case 35: - goto st6 + goto st29 case 44: - goto st6 + goto tr90 case 92: - goto tr87 + goto tr85 } - goto tr82 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr117 + } + goto tr80 tr117: -//line plugins/parsers/influx/machine.go.rl:20 + ( m.cs) = 39 +//line plugins/parsers/influx/machine.go.rl:86 - m.pb = m.p + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- - goto st39 + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again st39: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof39 } st_case_39: -//line plugins/parsers/influx/machine.go:6176 +//line plugins/parsers/influx/machine.go:6183 switch ( m.data)[( m.p)] { - case 9: - goto tr119 case 10: - goto tr29 + goto tr28 case 11: - goto tr120 - case 12: - goto tr38 + goto tr121 case 13: - goto st7 + goto st6 case 32: - goto tr119 + goto st39 case 34: - goto tr85 + goto tr122 case 35: - goto st30 - case 44: goto tr92 + case 44: + goto st6 + case 61: + goto tr80 case 92: - goto tr87 + goto tr123 } - goto tr82 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto st39 + } + goto tr119 tr119: - ( m.cs) = 40 -//line plugins/parsers/influx/machine.go.rl:78 +//line plugins/parsers/influx/machine.go.rl:28 - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- + m.pb = m.p - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again + goto st40 st40: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof40 } st_case_40: -//line plugins/parsers/influx/machine.go:6218 +//line plugins/parsers/influx/machine.go:6219 switch ( m.data)[( m.p)] { - case 9: - goto st40 case 10: - goto tr29 + goto tr28 case 11: - goto tr123 - case 12: - goto st10 - case 13: - goto st7 - case 32: - goto st40 - case 34: - goto tr124 - case 35: - goto tr94 - case 44: - goto st6 - case 61: - goto tr82 - case 92: goto tr125 + case 13: + goto st6 + case 32: + goto tr87 + case 34: + goto tr126 + case 44: + goto tr90 + case 61: + goto tr127 + case 92: + goto st92 } - goto tr121 -tr121: -//line plugins/parsers/influx/machine.go.rl:20 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr87 + } + goto st40 +tr125: + ( m.cs) = 41 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr129: + ( m.cs) = 41 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p - goto st41 + goto _again st41: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof41 } st_case_41: -//line plugins/parsers/influx/machine.go:6255 +//line plugins/parsers/influx/machine.go:6277 switch ( m.data)[( m.p)] { - case 9: - goto tr89 case 10: - goto tr29 + goto tr28 case 11: + goto tr129 + case 13: + goto st6 + case 32: + goto tr87 + case 34: + goto tr122 + case 44: + goto tr90 + case 61: goto tr127 - case 12: - goto tr1 - case 13: - goto st7 - case 32: - goto tr89 - case 34: - goto tr128 - case 44: - goto tr92 - case 61: - goto tr129 case 92: - goto st93 + goto tr123 } - goto st41 -tr127: - ( m.cs) = 42 -//line plugins/parsers/influx/machine.go.rl:78 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again -tr131: - ( m.cs) = 42 -//line plugins/parsers/influx/machine.go.rl:78 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:20 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr87 + } + goto tr119 +tr122: + ( m.cs) = 299 +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p +//line plugins/parsers/influx/machine.go.rl:148 + + err = m.handler.AddString(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + goto _again - st42: +tr126: + ( m.cs) = 299 +//line plugins/parsers/influx/machine.go.rl:148 + + err = m.handler.AddString(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again + st299: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof42 + goto _test_eof299 } - st_case_42: -//line plugins/parsers/influx/machine.go:6314 + st_case_299: +//line plugins/parsers/influx/machine.go:6335 switch ( m.data)[( m.p)] { - case 9: - goto tr89 case 10: - goto tr29 + goto tr101 case 11: - goto tr131 - case 12: - goto tr1 + goto tr500 case 13: - goto st7 + goto st32 case 32: - goto tr89 - case 34: - goto tr124 + goto tr499 case 44: - goto tr92 + goto tr501 case 61: - goto tr129 + goto tr47 case 92: - goto tr125 + goto st27 } - goto tr121 -tr124: + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr499 + } + goto st10 +tr499: ( m.cs) = 300 -//line plugins/parsers/influx/machine.go.rl:20 +//line plugins/parsers/influx/machine.go.rl:86 - m.pb = m.p - -//line plugins/parsers/influx/machine.go.rl:140 - - err = m.handler.AddString(m.key, m.text()) + err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } goto _again -tr128: +tr563: ( m.cs) = 300 -//line plugins/parsers/influx/machine.go.rl:140 +//line plugins/parsers/influx/machine.go.rl:99 - err = m.handler.AddString(m.key, m.text()) + err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr811: + ( m.cs) = 300 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:130 + + err = m.handler.AddFloat(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr729: + ( m.cs) = 300 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:130 + + err = m.handler.AddFloat(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr941: + ( m.cs) = 300 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:112 + + err = m.handler.AddInt(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr947: + ( m.cs) = 300 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:121 + + err = m.handler.AddUint(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr953: + ( m.cs) = 300 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:139 + + err = m.handler.AddBool(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr1005: + ( m.cs) = 300 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:112 + + err = m.handler.AddInt(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr1009: + ( m.cs) = 300 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:121 + + err = m.handler.AddUint(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr1013: + ( m.cs) = 300 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:139 + + err = m.handler.AddBool(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; {( m.p)++; goto _out } } @@ -6369,295 +6567,59 @@ tr128: goto _test_eof300 } st_case_300: -//line plugins/parsers/influx/machine.go:6373 +//line plugins/parsers/influx/machine.go:6571 switch ( m.data)[( m.p)] { case 10: - goto tr103 + goto tr101 case 11: - goto tr502 - case 13: - goto st33 - case 32: - goto tr501 - case 44: goto tr503 - case 61: - goto tr49 - case 92: - goto st28 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr501 - } - goto st11 -tr501: - ( m.cs) = 301 -//line plugins/parsers/influx/machine.go.rl:78 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again -tr566: - ( m.cs) = 301 -//line plugins/parsers/influx/machine.go.rl:91 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again -tr641: - ( m.cs) = 301 -//line plugins/parsers/influx/machine.go.rl:78 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:122 - - err = m.handler.AddFloat(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again -tr731: - ( m.cs) = 301 -//line plugins/parsers/influx/machine.go.rl:91 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:122 - - err = m.handler.AddFloat(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again -tr743: - ( m.cs) = 301 -//line plugins/parsers/influx/machine.go.rl:91 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:104 - - err = m.handler.AddInt(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again -tr750: - ( m.cs) = 301 -//line plugins/parsers/influx/machine.go.rl:91 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:113 - - err = m.handler.AddUint(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again -tr757: - ( m.cs) = 301 -//line plugins/parsers/influx/machine.go.rl:91 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:131 - - err = m.handler.AddBool(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again -tr825: - ( m.cs) = 301 -//line plugins/parsers/influx/machine.go.rl:78 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:104 - - err = m.handler.AddInt(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again -tr831: - ( m.cs) = 301 -//line plugins/parsers/influx/machine.go.rl:78 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:113 - - err = m.handler.AddUint(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again -tr836: - ( m.cs) = 301 -//line plugins/parsers/influx/machine.go.rl:78 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:131 - - err = m.handler.AddBool(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again - st301: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof301 - } - st_case_301: -//line plugins/parsers/influx/machine.go:6609 - switch ( m.data)[( m.p)] { - case 10: - goto tr103 - case 11: - goto tr505 case 13: - goto st33 + goto st32 case 32: - goto st301 + goto st300 case 44: - goto tr105 + goto tr103 case 45: - goto tr467 + goto tr465 case 61: - goto tr105 + goto tr103 case 92: goto tr10 } switch { case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr468 + goto tr466 } case ( m.data)[( m.p)] >= 9: - goto st301 + goto st300 } goto tr6 -tr505: -//line plugins/parsers/influx/machine.go.rl:20 +tr503: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p - goto st302 - st302: + goto st301 + st301: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof302 + goto _test_eof301 } - st_case_302: -//line plugins/parsers/influx/machine.go:6648 + st_case_301: +//line plugins/parsers/influx/machine.go:6610 switch ( m.data)[( m.p)] { case 10: - goto tr103 + goto tr101 case 11: - goto tr505 + goto tr503 case 13: - goto st33 + goto st32 case 32: - goto st301 + goto st300 case 44: - goto tr105 + goto tr103 case 45: - goto tr467 + goto tr465 case 61: goto tr12 case 92: @@ -6666,139 +6628,365 @@ tr505: switch { case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr468 + goto tr466 } case ( m.data)[( m.p)] >= 9: - goto st301 + goto st300 } goto tr6 -tr502: - ( m.cs) = 303 -//line plugins/parsers/influx/machine.go.rl:78 +tr500: + ( m.cs) = 302 +//line plugins/parsers/influx/machine.go.rl:86 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } goto _again -tr506: - ( m.cs) = 303 -//line plugins/parsers/influx/machine.go.rl:78 +tr504: + ( m.cs) = 302 +//line plugins/parsers/influx/machine.go.rl:86 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:20 +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p goto _again + st302: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof302 + } + st_case_302: +//line plugins/parsers/influx/machine.go:6673 + switch ( m.data)[( m.p)] { + case 10: + goto tr101 + case 11: + goto tr504 + case 13: + goto st32 + case 32: + goto tr499 + case 44: + goto tr4 + case 45: + goto tr505 + case 61: + goto tr47 + case 92: + goto tr43 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto tr506 + } + case ( m.data)[( m.p)] >= 9: + goto tr499 + } + goto tr39 +tr505: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st42 + st42: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof42 + } + st_case_42: +//line plugins/parsers/influx/machine.go:6712 + switch ( m.data)[( m.p)] { + case 10: + goto tr130 + case 11: + goto tr46 + case 13: + goto tr130 + case 32: + goto tr1 + case 44: + goto tr4 + case 61: + goto tr47 + case 92: + goto st27 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st303 + } + case ( m.data)[( m.p)] >= 9: + goto tr1 + } + goto st10 +tr506: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st303 st303: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof303 } st_case_303: -//line plugins/parsers/influx/machine.go:6711 +//line plugins/parsers/influx/machine.go:6749 switch ( m.data)[( m.p)] { case 10: - goto tr103 + goto tr468 case 11: - goto tr506 + goto tr508 case 13: - goto st33 + goto tr470 case 32: - goto tr501 - case 44: - goto tr4 - case 45: goto tr507 - case 61: - goto tr49 - case 92: - goto tr45 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr508 - } - case ( m.data)[( m.p)] >= 9: - goto tr501 - } - goto tr41 -tr507: -//line plugins/parsers/influx/machine.go.rl:20 - - m.pb = m.p - - goto st43 - st43: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof43 - } - st_case_43: -//line plugins/parsers/influx/machine.go:6750 - switch ( m.data)[( m.p)] { - case 10: - goto tr132 - case 11: - goto tr48 - case 13: - goto tr132 - case 32: - goto tr1 case 44: goto tr4 case 61: - goto tr49 + goto tr47 case 92: - goto st28 + goto st27 } switch { case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st304 + goto st307 } case ( m.data)[( m.p)] >= 9: - goto tr1 + goto tr507 } - goto st11 -tr508: -//line plugins/parsers/influx/machine.go.rl:20 + goto st10 +tr512: + ( m.cs) = 304 +//line plugins/parsers/influx/machine.go.rl:86 - m.pb = m.p + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- - goto st304 + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr572: + ( m.cs) = 304 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr507: + ( m.cs) = 304 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:157 + + err = m.handler.SetTimestamp(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr569: + ( m.cs) = 304 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:157 + + err = m.handler.SetTimestamp(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again st304: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof304 } st_case_304: -//line plugins/parsers/influx/machine.go:6787 +//line plugins/parsers/influx/machine.go:6852 switch ( m.data)[( m.p)] { case 10: - goto tr470 + goto tr101 case 11: - goto tr510 + goto tr511 case 13: - goto tr472 + goto st32 case 32: - goto tr509 + goto st304 + case 44: + goto tr8 + case 61: + goto tr8 + case 92: + goto tr10 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto st304 + } + goto tr6 +tr511: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st305 + st305: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof305 + } + st_case_305: +//line plugins/parsers/influx/machine.go:6884 + switch ( m.data)[( m.p)] { + case 10: + goto tr101 + case 11: + goto tr511 + case 13: + goto st32 + case 32: + goto st304 + case 44: + goto tr8 + case 61: + goto tr12 + case 92: + goto tr10 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto st304 + } + goto tr6 +tr513: + ( m.cs) = 306 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto _again +tr508: + ( m.cs) = 306 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:157 + + err = m.handler.SetTimestamp(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again + st306: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof306 + } + st_case_306: +//line plugins/parsers/influx/machine.go:6950 + switch ( m.data)[( m.p)] { + case 10: + goto tr101 + case 11: + goto tr513 + case 13: + goto st32 + case 32: + goto tr512 case 44: goto tr4 case 61: - goto tr49 + goto tr47 case 92: - goto st28 + goto tr43 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr512 + } + goto tr39 + st307: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof307 + } + st_case_307: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr508 + case 13: + goto tr470 + case 32: + goto tr507 + case 44: + goto tr4 + case 61: + goto tr47 + case 92: + goto st27 } switch { case ( m.data)[( m.p)] > 12: @@ -6806,205 +6994,9 @@ tr508: goto st308 } case ( m.data)[( m.p)] >= 9: - goto tr509 + goto tr507 } - goto st11 -tr514: - ( m.cs) = 305 -//line plugins/parsers/influx/machine.go.rl:78 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again -tr575: - ( m.cs) = 305 -//line plugins/parsers/influx/machine.go.rl:91 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again -tr509: - ( m.cs) = 305 -//line plugins/parsers/influx/machine.go.rl:78 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:149 - - err = m.handler.SetTimestamp(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again -tr572: - ( m.cs) = 305 -//line plugins/parsers/influx/machine.go.rl:91 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:149 - - err = m.handler.SetTimestamp(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again - st305: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof305 - } - st_case_305: -//line plugins/parsers/influx/machine.go:6890 - switch ( m.data)[( m.p)] { - case 10: - goto tr103 - case 11: - goto tr513 - case 13: - goto st33 - case 32: - goto st305 - case 44: - goto tr8 - case 61: - goto tr8 - case 92: - goto tr10 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st305 - } - goto tr6 -tr513: -//line plugins/parsers/influx/machine.go.rl:20 - - m.pb = m.p - - goto st306 - st306: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof306 - } - st_case_306: -//line plugins/parsers/influx/machine.go:6922 - switch ( m.data)[( m.p)] { - case 10: - goto tr103 - case 11: - goto tr513 - case 13: - goto st33 - case 32: - goto st305 - case 44: - goto tr8 - case 61: - goto tr12 - case 92: - goto tr10 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st305 - } - goto tr6 -tr515: - ( m.cs) = 307 -//line plugins/parsers/influx/machine.go.rl:78 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:20 - - m.pb = m.p - - goto _again -tr510: - ( m.cs) = 307 -//line plugins/parsers/influx/machine.go.rl:78 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:149 - - err = m.handler.SetTimestamp(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again - st307: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof307 - } - st_case_307: -//line plugins/parsers/influx/machine.go:6988 - switch ( m.data)[( m.p)] { - case 10: - goto tr103 - case 11: - goto tr515 - case 13: - goto st33 - case 32: - goto tr514 - case 44: - goto tr4 - case 61: - goto tr49 - case 92: - goto tr45 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr514 - } - goto tr41 + goto st10 st308: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof308 @@ -7012,19 +7004,19 @@ tr510: st_case_308: switch ( m.data)[( m.p)] { case 10: - goto tr470 + goto tr468 case 11: - goto tr510 + goto tr508 case 13: - goto tr472 + goto tr470 case 32: - goto tr509 + goto tr507 case 44: goto tr4 case 61: - goto tr49 + goto tr47 case 92: - goto st28 + goto st27 } switch { case ( m.data)[( m.p)] > 12: @@ -7032,9 +7024,9 @@ tr510: goto st309 } case ( m.data)[( m.p)] >= 9: - goto tr509 + goto tr507 } - goto st11 + goto st10 st309: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof309 @@ -7042,19 +7034,19 @@ tr510: st_case_309: switch ( m.data)[( m.p)] { case 10: - goto tr470 + goto tr468 case 11: - goto tr510 + goto tr508 case 13: - goto tr472 + goto tr470 case 32: - goto tr509 + goto tr507 case 44: goto tr4 case 61: - goto tr49 + goto tr47 case 92: - goto st28 + goto st27 } switch { case ( m.data)[( m.p)] > 12: @@ -7062,9 +7054,9 @@ tr510: goto st310 } case ( m.data)[( m.p)] >= 9: - goto tr509 + goto tr507 } - goto st11 + goto st10 st310: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof310 @@ -7072,19 +7064,19 @@ tr510: st_case_310: switch ( m.data)[( m.p)] { case 10: - goto tr470 + goto tr468 case 11: - goto tr510 + goto tr508 case 13: - goto tr472 + goto tr470 case 32: - goto tr509 + goto tr507 case 44: goto tr4 case 61: - goto tr49 + goto tr47 case 92: - goto st28 + goto st27 } switch { case ( m.data)[( m.p)] > 12: @@ -7092,9 +7084,9 @@ tr510: goto st311 } case ( m.data)[( m.p)] >= 9: - goto tr509 + goto tr507 } - goto st11 + goto st10 st311: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof311 @@ -7102,19 +7094,19 @@ tr510: st_case_311: switch ( m.data)[( m.p)] { case 10: - goto tr470 + goto tr468 case 11: - goto tr510 + goto tr508 case 13: - goto tr472 + goto tr470 case 32: - goto tr509 + goto tr507 case 44: goto tr4 case 61: - goto tr49 + goto tr47 case 92: - goto st28 + goto st27 } switch { case ( m.data)[( m.p)] > 12: @@ -7122,9 +7114,9 @@ tr510: goto st312 } case ( m.data)[( m.p)] >= 9: - goto tr509 + goto tr507 } - goto st11 + goto st10 st312: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof312 @@ -7132,19 +7124,19 @@ tr510: st_case_312: switch ( m.data)[( m.p)] { case 10: - goto tr470 + goto tr468 case 11: - goto tr510 + goto tr508 case 13: - goto tr472 + goto tr470 case 32: - goto tr509 + goto tr507 case 44: goto tr4 case 61: - goto tr49 + goto tr47 case 92: - goto st28 + goto st27 } switch { case ( m.data)[( m.p)] > 12: @@ -7152,9 +7144,9 @@ tr510: goto st313 } case ( m.data)[( m.p)] >= 9: - goto tr509 + goto tr507 } - goto st11 + goto st10 st313: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof313 @@ -7162,19 +7154,19 @@ tr510: st_case_313: switch ( m.data)[( m.p)] { case 10: - goto tr470 + goto tr468 case 11: - goto tr510 + goto tr508 case 13: - goto tr472 + goto tr470 case 32: - goto tr509 + goto tr507 case 44: goto tr4 case 61: - goto tr49 + goto tr47 case 92: - goto st28 + goto st27 } switch { case ( m.data)[( m.p)] > 12: @@ -7182,9 +7174,9 @@ tr510: goto st314 } case ( m.data)[( m.p)] >= 9: - goto tr509 + goto tr507 } - goto st11 + goto st10 st314: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof314 @@ -7192,19 +7184,19 @@ tr510: st_case_314: switch ( m.data)[( m.p)] { case 10: - goto tr470 + goto tr468 case 11: - goto tr510 + goto tr508 case 13: - goto tr472 + goto tr470 case 32: - goto tr509 + goto tr507 case 44: goto tr4 case 61: - goto tr49 + goto tr47 case 92: - goto st28 + goto st27 } switch { case ( m.data)[( m.p)] > 12: @@ -7212,9 +7204,9 @@ tr510: goto st315 } case ( m.data)[( m.p)] >= 9: - goto tr509 + goto tr507 } - goto st11 + goto st10 st315: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof315 @@ -7222,19 +7214,19 @@ tr510: st_case_315: switch ( m.data)[( m.p)] { case 10: - goto tr470 + goto tr468 case 11: - goto tr510 + goto tr508 case 13: - goto tr472 + goto tr470 case 32: - goto tr509 + goto tr507 case 44: goto tr4 case 61: - goto tr49 + goto tr47 case 92: - goto st28 + goto st27 } switch { case ( m.data)[( m.p)] > 12: @@ -7242,9 +7234,9 @@ tr510: goto st316 } case ( m.data)[( m.p)] >= 9: - goto tr509 + goto tr507 } - goto st11 + goto st10 st316: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof316 @@ -7252,19 +7244,19 @@ tr510: st_case_316: switch ( m.data)[( m.p)] { case 10: - goto tr470 + goto tr468 case 11: - goto tr510 + goto tr508 case 13: - goto tr472 + goto tr470 case 32: - goto tr509 + goto tr507 case 44: goto tr4 case 61: - goto tr49 + goto tr47 case 92: - goto st28 + goto st27 } switch { case ( m.data)[( m.p)] > 12: @@ -7272,9 +7264,9 @@ tr510: goto st317 } case ( m.data)[( m.p)] >= 9: - goto tr509 + goto tr507 } - goto st11 + goto st10 st317: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof317 @@ -7282,19 +7274,19 @@ tr510: st_case_317: switch ( m.data)[( m.p)] { case 10: - goto tr470 + goto tr468 case 11: - goto tr510 + goto tr508 case 13: - goto tr472 + goto tr470 case 32: - goto tr509 + goto tr507 case 44: goto tr4 case 61: - goto tr49 + goto tr47 case 92: - goto st28 + goto st27 } switch { case ( m.data)[( m.p)] > 12: @@ -7302,9 +7294,9 @@ tr510: goto st318 } case ( m.data)[( m.p)] >= 9: - goto tr509 + goto tr507 } - goto st11 + goto st10 st318: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof318 @@ -7312,19 +7304,19 @@ tr510: st_case_318: switch ( m.data)[( m.p)] { case 10: - goto tr470 + goto tr468 case 11: - goto tr510 + goto tr508 case 13: - goto tr472 + goto tr470 case 32: - goto tr509 + goto tr507 case 44: goto tr4 case 61: - goto tr49 + goto tr47 case 92: - goto st28 + goto st27 } switch { case ( m.data)[( m.p)] > 12: @@ -7332,9 +7324,9 @@ tr510: goto st319 } case ( m.data)[( m.p)] >= 9: - goto tr509 + goto tr507 } - goto st11 + goto st10 st319: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof319 @@ -7342,19 +7334,19 @@ tr510: st_case_319: switch ( m.data)[( m.p)] { case 10: - goto tr470 + goto tr468 case 11: - goto tr510 + goto tr508 case 13: - goto tr472 + goto tr470 case 32: - goto tr509 + goto tr507 case 44: goto tr4 case 61: - goto tr49 + goto tr47 case 92: - goto st28 + goto st27 } switch { case ( m.data)[( m.p)] > 12: @@ -7362,9 +7354,9 @@ tr510: goto st320 } case ( m.data)[( m.p)] >= 9: - goto tr509 + goto tr507 } - goto st11 + goto st10 st320: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof320 @@ -7372,19 +7364,19 @@ tr510: st_case_320: switch ( m.data)[( m.p)] { case 10: - goto tr470 + goto tr468 case 11: - goto tr510 + goto tr508 case 13: - goto tr472 + goto tr470 case 32: - goto tr509 + goto tr507 case 44: goto tr4 case 61: - goto tr49 + goto tr47 case 92: - goto st28 + goto st27 } switch { case ( m.data)[( m.p)] > 12: @@ -7392,9 +7384,9 @@ tr510: goto st321 } case ( m.data)[( m.p)] >= 9: - goto tr509 + goto tr507 } - goto st11 + goto st10 st321: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof321 @@ -7402,19 +7394,19 @@ tr510: st_case_321: switch ( m.data)[( m.p)] { case 10: - goto tr470 + goto tr468 case 11: - goto tr510 + goto tr508 case 13: - goto tr472 + goto tr470 case 32: - goto tr509 + goto tr507 case 44: goto tr4 case 61: - goto tr49 + goto tr47 case 92: - goto st28 + goto st27 } switch { case ( m.data)[( m.p)] > 12: @@ -7422,9 +7414,9 @@ tr510: goto st322 } case ( m.data)[( m.p)] >= 9: - goto tr509 + goto tr507 } - goto st11 + goto st10 st322: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof322 @@ -7432,19 +7424,19 @@ tr510: st_case_322: switch ( m.data)[( m.p)] { case 10: - goto tr470 + goto tr468 case 11: - goto tr510 + goto tr508 case 13: - goto tr472 + goto tr470 case 32: - goto tr509 + goto tr507 case 44: goto tr4 case 61: - goto tr49 + goto tr47 case 92: - goto st28 + goto st27 } switch { case ( m.data)[( m.p)] > 12: @@ -7452,9 +7444,9 @@ tr510: goto st323 } case ( m.data)[( m.p)] >= 9: - goto tr509 + goto tr507 } - goto st11 + goto st10 st323: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof323 @@ -7462,19 +7454,19 @@ tr510: st_case_323: switch ( m.data)[( m.p)] { case 10: - goto tr470 + goto tr468 case 11: - goto tr510 + goto tr508 case 13: - goto tr472 + goto tr470 case 32: - goto tr509 + goto tr507 case 44: goto tr4 case 61: - goto tr49 + goto tr47 case 92: - goto st28 + goto st27 } switch { case ( m.data)[( m.p)] > 12: @@ -7482,9 +7474,9 @@ tr510: goto st324 } case ( m.data)[( m.p)] >= 9: - goto tr509 + goto tr507 } - goto st11 + goto st10 st324: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof324 @@ -7492,293 +7484,298 @@ tr510: st_case_324: switch ( m.data)[( m.p)] { case 10: - goto tr470 + goto tr468 case 11: - goto tr510 + goto tr508 case 13: - goto tr472 + goto tr470 case 32: - goto tr509 + goto tr507 case 44: goto tr4 case 61: - goto tr49 + goto tr47 case 92: - goto st28 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st325 - } - case ( m.data)[( m.p)] >= 9: - goto tr509 - } - goto st11 - st325: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof325 - } - st_case_325: - switch ( m.data)[( m.p)] { - case 10: - goto tr470 - case 11: - goto tr510 - case 13: - goto tr472 - case 32: - goto tr509 - case 44: - goto tr4 - case 61: - goto tr49 - case 92: - goto st28 + goto st27 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr509 + goto tr507 } - goto st11 -tr503: - ( m.cs) = 44 -//line plugins/parsers/influx/machine.go.rl:78 + goto st10 +tr501: + ( m.cs) = 43 +//line plugins/parsers/influx/machine.go.rl:86 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } goto _again -tr568: - ( m.cs) = 44 -//line plugins/parsers/influx/machine.go.rl:91 +tr565: + ( m.cs) = 43 +//line plugins/parsers/influx/machine.go.rl:99 err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } goto _again -tr819: - ( m.cs) = 44 -//line plugins/parsers/influx/machine.go.rl:78 +tr813: + ( m.cs) = 43 +//line plugins/parsers/influx/machine.go.rl:86 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:122 +//line plugins/parsers/influx/machine.go.rl:130 err = m.handler.AddFloat(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } goto _again -tr737: - ( m.cs) = 44 -//line plugins/parsers/influx/machine.go.rl:91 +tr733: + ( m.cs) = 43 +//line plugins/parsers/influx/machine.go.rl:99 err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:122 +//line plugins/parsers/influx/machine.go.rl:130 err = m.handler.AddFloat(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } goto _again -tr955: - ( m.cs) = 44 -//line plugins/parsers/influx/machine.go.rl:91 +tr945: + ( m.cs) = 43 +//line plugins/parsers/influx/machine.go.rl:99 err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:104 +//line plugins/parsers/influx/machine.go.rl:112 err = m.handler.AddInt(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } goto _again -tr960: - ( m.cs) = 44 -//line plugins/parsers/influx/machine.go.rl:91 +tr951: + ( m.cs) = 43 +//line plugins/parsers/influx/machine.go.rl:99 err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:113 +//line plugins/parsers/influx/machine.go.rl:121 err = m.handler.AddUint(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } goto _again -tr965: - ( m.cs) = 44 -//line plugins/parsers/influx/machine.go.rl:91 +tr957: + ( m.cs) = 43 +//line plugins/parsers/influx/machine.go.rl:99 err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:131 +//line plugins/parsers/influx/machine.go.rl:139 err = m.handler.AddBool(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } goto _again -tr1014: - ( m.cs) = 44 -//line plugins/parsers/influx/machine.go.rl:78 +tr1007: + ( m.cs) = 43 +//line plugins/parsers/influx/machine.go.rl:86 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:104 +//line plugins/parsers/influx/machine.go.rl:112 err = m.handler.AddInt(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } goto _again -tr1017: - ( m.cs) = 44 -//line plugins/parsers/influx/machine.go.rl:78 +tr1011: + ( m.cs) = 43 +//line plugins/parsers/influx/machine.go.rl:86 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:113 +//line plugins/parsers/influx/machine.go.rl:121 err = m.handler.AddUint(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } goto _again -tr1020: - ( m.cs) = 44 -//line plugins/parsers/influx/machine.go.rl:78 +tr1015: + ( m.cs) = 43 +//line plugins/parsers/influx/machine.go.rl:86 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:131 +//line plugins/parsers/influx/machine.go.rl:139 err = m.handler.AddBool(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } goto _again + st43: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof43 + } + st_case_43: +//line plugins/parsers/influx/machine.go:7721 + switch ( m.data)[( m.p)] { + case 32: + goto tr45 + case 44: + goto tr45 + case 61: + goto tr45 + case 92: + goto tr133 + } + switch { + case ( m.data)[( m.p)] > 10: + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr45 + } + case ( m.data)[( m.p)] >= 9: + goto tr45 + } + goto tr132 +tr132: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st44 st44: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof44 } st_case_44: -//line plugins/parsers/influx/machine.go:7759 +//line plugins/parsers/influx/machine.go:7752 switch ( m.data)[( m.p)] { case 32: - goto tr47 + goto tr45 case 44: - goto tr47 + goto tr45 case 61: - goto tr47 - case 92: goto tr135 + case 92: + goto st99 } switch { case ( m.data)[( m.p)] > 10: if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr47 + goto tr45 } case ( m.data)[( m.p)] >= 9: - goto tr47 + goto tr45 } - goto tr134 -tr134: -//line plugins/parsers/influx/machine.go.rl:20 + goto st44 +tr135: +//line plugins/parsers/influx/machine.go.rl:95 - m.pb = m.p + m.key = m.text() + +//line plugins/parsers/influx/machine.go.rl:108 + + m.key = m.text() goto st45 st45: @@ -7786,34 +7783,50 @@ tr134: goto _test_eof45 } st_case_45: -//line plugins/parsers/influx/machine.go:7790 +//line plugins/parsers/influx/machine.go:7787 switch ( m.data)[( m.p)] { case 32: - goto tr47 - case 44: - goto tr47 - case 61: + goto tr45 + case 34: goto tr137 + case 44: + goto tr45 + case 45: + goto tr138 + case 46: + goto tr139 + case 48: + goto tr140 + case 61: + goto tr45 + case 70: + goto tr142 + case 84: + goto tr143 case 92: - goto st100 + goto tr56 + case 102: + goto tr144 + case 116: + goto tr145 } switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr47 + case ( m.data)[( m.p)] < 12: + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 10 { + goto tr45 } - case ( m.data)[( m.p)] >= 9: - goto tr47 + case ( m.data)[( m.p)] > 13: + if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto tr141 + } + default: + goto tr45 } - goto st45 + goto tr55 tr137: -//line plugins/parsers/influx/machine.go.rl:87 +//line plugins/parsers/influx/machine.go.rl:28 - m.key = m.text() - -//line plugins/parsers/influx/machine.go.rl:100 - - m.key = m.text() + m.pb = m.p goto st46 st46: @@ -7821,48 +7834,31 @@ tr137: goto _test_eof46 } st_case_46: -//line plugins/parsers/influx/machine.go:7825 +//line plugins/parsers/influx/machine.go:7838 switch ( m.data)[( m.p)] { + case 10: + goto tr24 + case 11: + goto tr148 + case 13: + goto tr23 case 32: - goto tr47 + goto tr147 case 34: - goto tr139 + goto tr149 case 44: - goto tr47 - case 45: - goto tr140 - case 46: - goto tr141 - case 48: - goto tr142 + goto tr150 case 61: - goto tr47 - case 70: - goto tr144 - case 84: - goto tr145 + goto tr23 case 92: - goto tr58 - case 102: - goto tr146 - case 116: + goto tr151 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { goto tr147 } - switch { - case ( m.data)[( m.p)] < 12: - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 10 { - goto tr47 - } - case ( m.data)[( m.p)] > 13: - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr143 - } - default: - goto tr47 - } - goto tr57 -tr139: -//line plugins/parsers/influx/machine.go.rl:20 + goto tr146 +tr146: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p @@ -7872,174 +7868,174 @@ tr139: goto _test_eof47 } st_case_47: -//line plugins/parsers/influx/machine.go:7876 +//line plugins/parsers/influx/machine.go:7872 switch ( m.data)[( m.p)] { - case 9: - goto tr149 case 10: - goto tr24 + goto tr28 case 11: - goto tr150 - case 12: - goto tr60 + goto tr154 case 13: - goto tr25 + goto st6 case 32: - goto tr149 - case 34: - goto tr151 - case 44: - goto tr152 - case 61: - goto tr23 - case 92: goto tr153 - } - goto tr148 -tr148: -//line plugins/parsers/influx/machine.go.rl:20 - - m.pb = m.p - - goto st48 - st48: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof48 - } - st_case_48: -//line plugins/parsers/influx/machine.go:7911 - switch ( m.data)[( m.p)] { - case 9: - goto tr155 - case 10: - goto tr29 - case 11: - goto tr156 - case 12: - goto tr60 - case 13: - goto st7 - case 32: - goto tr155 case 34: - goto tr157 + goto tr155 case 44: - goto tr158 + goto tr156 case 61: goto st6 case 92: - goto st63 + goto st62 } - goto st48 -tr180: - ( m.cs) = 49 -//line plugins/parsers/influx/machine.go.rl:78 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr153 + } + goto st47 +tr178: + ( m.cs) = 48 +//line plugins/parsers/influx/machine.go.rl:86 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } goto _again -tr155: - ( m.cs) = 49 -//line plugins/parsers/influx/machine.go.rl:91 +tr153: + ( m.cs) = 48 +//line plugins/parsers/influx/machine.go.rl:99 err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } goto _again -tr149: - ( m.cs) = 49 -//line plugins/parsers/influx/machine.go.rl:91 +tr147: + ( m.cs) = 48 +//line plugins/parsers/influx/machine.go.rl:99 err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:20 +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p goto _again - st49: + st48: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof49 + goto _test_eof48 } - st_case_49: -//line plugins/parsers/influx/machine.go:7983 + st_case_48: +//line plugins/parsers/influx/machine.go:7943 switch ( m.data)[( m.p)] { - case 9: - goto st49 case 10: - goto tr29 + goto tr28 case 11: - goto tr162 - case 12: - goto st2 + goto tr160 case 13: - goto st7 + goto st6 case 32: - goto st49 + goto st48 case 34: - goto tr97 + goto tr95 case 44: goto st6 case 61: goto st6 case 92: - goto tr163 + goto tr161 } - goto tr160 -tr160: -//line plugins/parsers/influx/machine.go.rl:20 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto st48 + } + goto tr158 +tr158: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p + goto st49 + st49: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof49 + } + st_case_49: +//line plugins/parsers/influx/machine.go:7977 + switch ( m.data)[( m.p)] { + case 9: + goto st6 + case 10: + goto tr28 + case 32: + goto st6 + case 34: + goto tr98 + case 44: + goto st6 + case 61: + goto tr163 + case 92: + goto st104 + } + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto st6 + } + goto st49 +tr163: +//line plugins/parsers/influx/machine.go.rl:108 + + m.key = m.text() + goto st50 st50: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof50 } st_case_50: -//line plugins/parsers/influx/machine.go:8018 +//line plugins/parsers/influx/machine.go:8009 switch ( m.data)[( m.p)] { - case 9: - goto st6 case 10: - goto tr29 - case 12: - goto tr8 - case 13: - goto st7 - case 32: - goto st6 + goto tr28 case 34: - goto tr100 - case 44: - goto st6 - case 61: + goto tr105 + case 45: goto tr165 + case 46: + goto tr166 + case 48: + goto tr167 + case 70: + goto tr169 + case 84: + goto tr170 case 92: - goto st105 + goto st73 + case 102: + goto tr171 + case 116: + goto tr172 } - goto st50 + if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto tr168 + } + goto st6 tr165: -//line plugins/parsers/influx/machine.go.rl:100 +//line plugins/parsers/influx/machine.go.rl:28 - m.key = m.text() + m.pb = m.p goto st51 st51: @@ -8047,39 +8043,25 @@ tr165: goto _test_eof51 } st_case_51: -//line plugins/parsers/influx/machine.go:8051 +//line plugins/parsers/influx/machine.go:8047 switch ( m.data)[( m.p)] { case 10: - goto tr29 - case 12: - goto tr8 - case 13: - goto st7 + goto tr28 case 34: - goto tr107 - case 45: - goto tr167 + goto tr29 case 46: - goto tr168 + goto st52 case 48: - goto tr169 - case 70: - goto tr171 - case 84: - goto tr172 + goto st631 case 92: - goto st75 - case 102: - goto tr173 - case 116: - goto tr174 + goto st73 } if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr170 + goto st632 } goto st6 -tr167: -//line plugins/parsers/influx/machine.go.rl:20 +tr166: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p @@ -8089,29 +8071,308 @@ tr167: goto _test_eof52 } st_case_52: -//line plugins/parsers/influx/machine.go:8093 +//line plugins/parsers/influx/machine.go:8075 switch ( m.data)[( m.p)] { case 10: - goto tr29 - case 12: - goto tr8 - case 13: - goto st7 + goto tr28 case 34: - goto tr31 - case 46: - goto st53 - case 48: - goto st632 + goto tr29 case 92: - goto st75 + goto st73 } - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st633 + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st325 } goto st6 -tr168: -//line plugins/parsers/influx/machine.go.rl:20 + st325: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof325 + } + st_case_325: + switch ( m.data)[( m.p)] { + case 10: + goto tr532 + case 13: + goto tr533 + case 32: + goto tr531 + case 34: + goto tr29 + case 44: + goto tr534 + case 69: + goto st173 + case 92: + goto st73 + case 101: + goto st173 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st325 + } + case ( m.data)[( m.p)] >= 9: + goto tr531 + } + goto st6 +tr916: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st326 +tr531: + ( m.cs) = 326 +//line plugins/parsers/influx/machine.go.rl:130 + + err = m.handler.AddFloat(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr923: + ( m.cs) = 326 +//line plugins/parsers/influx/machine.go.rl:112 + + err = m.handler.AddInt(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr925: + ( m.cs) = 326 +//line plugins/parsers/influx/machine.go.rl:121 + + err = m.handler.AddUint(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr928: + ( m.cs) = 326 +//line plugins/parsers/influx/machine.go.rl:139 + + err = m.handler.AddBool(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again + st326: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof326 + } + st_case_326: +//line plugins/parsers/influx/machine.go:8183 + switch ( m.data)[( m.p)] { + case 10: + goto tr273 + case 13: + goto st102 + case 32: + goto st326 + case 34: + goto tr29 + case 45: + goto tr538 + case 92: + goto st73 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto tr539 + } + case ( m.data)[( m.p)] >= 9: + goto st326 + } + goto st6 +tr665: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + +//line plugins/parsers/influx/machine.go.rl:166 + + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line + + goto st327 +tr273: +//line plugins/parsers/influx/machine.go.rl:166 + + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line + + goto st327 +tr532: + ( m.cs) = 327 +//line plugins/parsers/influx/machine.go.rl:130 + + err = m.handler.AddFloat(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:166 + + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line + + goto _again +tr674: + ( m.cs) = 327 +//line plugins/parsers/influx/machine.go.rl:166 + + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line + +//line plugins/parsers/influx/machine.go.rl:157 + + err = m.handler.SetTimestamp(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr737: + ( m.cs) = 327 +//line plugins/parsers/influx/machine.go.rl:112 + + err = m.handler.AddInt(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:166 + + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line + + goto _again +tr743: + ( m.cs) = 327 +//line plugins/parsers/influx/machine.go.rl:121 + + err = m.handler.AddUint(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:166 + + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line + + goto _again +tr749: + ( m.cs) = 327 +//line plugins/parsers/influx/machine.go.rl:166 + + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line + +//line plugins/parsers/influx/machine.go.rl:139 + + err = m.handler.AddBool(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr891: + ( m.cs) = 327 +//line plugins/parsers/influx/machine.go.rl:139 + + err = m.handler.AddBool(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:166 + + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line + + goto _again + st327: +//line plugins/parsers/influx/machine.go.rl:172 + + m.finishMetric = true + ( m.cs) = 739; + {( m.p)++; goto _out } + + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof327 + } + st_case_327: +//line plugins/parsers/influx/machine.go:8352 + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 11: + goto tr337 + case 13: + goto st6 + case 32: + goto st164 + case 34: + goto tr116 + case 35: + goto st6 + case 44: + goto st6 + case 92: + goto tr338 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto st164 + } + goto tr335 +tr335: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p @@ -8121,451 +8382,199 @@ tr168: goto _test_eof53 } st_case_53: -//line plugins/parsers/influx/machine.go:8125 +//line plugins/parsers/influx/machine.go:8386 switch ( m.data)[( m.p)] { case 10: - goto tr29 - case 12: - goto tr8 - case 13: - goto st7 - case 34: - goto tr31 - case 92: - goto st75 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st326 - } - goto st6 - st326: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof326 - } - st_case_326: - switch ( m.data)[( m.p)] { - case 10: - goto tr534 - case 12: - goto tr535 - case 13: - goto tr536 - case 32: - goto tr533 - case 34: - goto tr31 - case 44: - goto tr537 - case 69: - goto st174 - case 92: - goto st75 - case 101: - goto st174 - } - switch { - case ( m.data)[( m.p)] > 11: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st326 - } - case ( m.data)[( m.p)] >= 9: - goto tr533 - } - goto st6 -tr925: -//line plugins/parsers/influx/machine.go.rl:20 - - m.pb = m.p - - goto st327 -tr533: - ( m.cs) = 327 -//line plugins/parsers/influx/machine.go.rl:122 - - err = m.handler.AddFloat(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again -tr931: - ( m.cs) = 327 -//line plugins/parsers/influx/machine.go.rl:104 - - err = m.handler.AddInt(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again -tr934: - ( m.cs) = 327 -//line plugins/parsers/influx/machine.go.rl:113 - - err = m.handler.AddUint(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again -tr938: - ( m.cs) = 327 -//line plugins/parsers/influx/machine.go.rl:131 - - err = m.handler.AddBool(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again - st327: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof327 - } - st_case_327: -//line plugins/parsers/influx/machine.go:8239 - switch ( m.data)[( m.p)] { - case 10: - goto tr275 - case 12: - goto st272 - case 13: - goto st103 - case 32: - goto st327 - case 34: - goto tr31 - case 45: - goto tr541 - case 92: - goto st75 - } - switch { - case ( m.data)[( m.p)] > 11: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr542 - } - case ( m.data)[( m.p)] >= 9: - goto st327 - } - goto st6 -tr669: -//line plugins/parsers/influx/machine.go.rl:20 - - m.pb = m.p - -//line plugins/parsers/influx/machine.go.rl:158 - - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line - - goto st328 -tr275: -//line plugins/parsers/influx/machine.go.rl:158 - - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line - - goto st328 -tr534: - ( m.cs) = 328 -//line plugins/parsers/influx/machine.go.rl:122 - - err = m.handler.AddFloat(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:158 - - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line - - goto _again -tr678: - ( m.cs) = 328 -//line plugins/parsers/influx/machine.go.rl:158 - - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line - -//line plugins/parsers/influx/machine.go.rl:149 - - err = m.handler.SetTimestamp(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again -tr741: - ( m.cs) = 328 -//line plugins/parsers/influx/machine.go.rl:104 - - err = m.handler.AddInt(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:158 - - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line - - goto _again -tr748: - ( m.cs) = 328 -//line plugins/parsers/influx/machine.go.rl:113 - - err = m.handler.AddUint(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:158 - - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line - - goto _again -tr755: - ( m.cs) = 328 -//line plugins/parsers/influx/machine.go.rl:158 - - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line - -//line plugins/parsers/influx/machine.go.rl:131 - - err = m.handler.AddBool(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again -tr900: - ( m.cs) = 328 -//line plugins/parsers/influx/machine.go.rl:131 - - err = m.handler.AddBool(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:158 - - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line - - goto _again - st328: -//line plugins/parsers/influx/machine.go.rl:164 - - m.finishMetric = true - ( m.cs) = 740; - {( m.p)++; goto _out } - - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof328 - } - st_case_328: -//line plugins/parsers/influx/machine.go:8410 - switch ( m.data)[( m.p)] { - case 9: - goto st165 - case 10: - goto tr29 + goto tr28 case 11: - goto tr339 - case 12: - goto st8 + goto tr179 case 13: - goto st7 + goto st6 case 32: - goto st165 + goto tr178 case 34: - goto tr118 - case 35: - goto st6 + goto tr89 case 44: - goto st6 + goto tr180 case 92: - goto tr340 + goto st155 } - goto tr337 -tr337: -//line plugins/parsers/influx/machine.go.rl:20 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr178 + } + goto st53 +tr179: + ( m.cs) = 54 +//line plugins/parsers/influx/machine.go.rl:86 - m.pb = m.p + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- - goto st54 + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again st54: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof54 } st_case_54: -//line plugins/parsers/influx/machine.go:8445 +//line plugins/parsers/influx/machine.go:8425 switch ( m.data)[( m.p)] { - case 9: - goto tr180 case 10: - goto tr29 + goto tr28 case 11: - goto tr181 - case 12: - goto tr1 + goto tr183 case 13: - goto st7 + goto st6 case 32: - goto tr180 + goto tr178 case 34: - goto tr91 + goto tr122 case 44: - goto tr182 + goto tr180 + case 61: + goto st53 case 92: - goto st156 + goto tr184 } - goto st54 -tr181: - ( m.cs) = 55 -//line plugins/parsers/influx/machine.go.rl:78 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr178 + } + goto tr182 +tr182: +//line plugins/parsers/influx/machine.go.rl:28 - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- + m.pb = m.p - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again + goto st55 st55: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof55 } st_case_55: -//line plugins/parsers/influx/machine.go:8485 +//line plugins/parsers/influx/machine.go:8459 switch ( m.data)[( m.p)] { - case 9: - goto tr180 case 10: - goto tr29 + goto tr28 case 11: - goto tr185 - case 12: - goto tr1 - case 13: - goto st7 - case 32: - goto tr180 - case 34: - goto tr124 - case 44: - goto tr182 - case 61: - goto st54 - case 92: goto tr186 + case 13: + goto st6 + case 32: + goto tr178 + case 34: + goto tr126 + case 44: + goto tr180 + case 61: + goto tr187 + case 92: + goto st152 } - goto tr184 -tr184: -//line plugins/parsers/influx/machine.go.rl:20 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr178 + } + goto st55 +tr186: + ( m.cs) = 56 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr183: + ( m.cs) = 56 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p - goto st56 + goto _again st56: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof56 } st_case_56: -//line plugins/parsers/influx/machine.go:8520 +//line plugins/parsers/influx/machine.go:8517 switch ( m.data)[( m.p)] { - case 9: - goto tr180 case 10: - goto tr29 + goto tr28 case 11: - goto tr188 - case 12: - goto tr1 + goto tr183 case 13: - goto st7 + goto st6 case 32: - goto tr180 + goto tr178 case 34: - goto tr128 + goto tr122 case 44: - goto tr182 + goto tr180 case 61: - goto tr189 + goto tr187 case 92: - goto st153 + goto tr184 } - goto st56 -tr188: + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr178 + } + goto tr182 +tr180: ( m.cs) = 57 -//line plugins/parsers/influx/machine.go.rl:78 +//line plugins/parsers/influx/machine.go.rl:86 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } goto _again -tr185: +tr156: ( m.cs) = 57 -//line plugins/parsers/influx/machine.go.rl:78 +//line plugins/parsers/influx/machine.go.rl:99 - err = m.handler.SetMeasurement(m.text()) + err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:20 + goto _again +tr150: + ( m.cs) = 57 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p @@ -8575,102 +8584,149 @@ tr185: goto _test_eof57 } st_case_57: -//line plugins/parsers/influx/machine.go:8579 +//line plugins/parsers/influx/machine.go:8588 switch ( m.data)[( m.p)] { case 9: - goto tr180 + goto st6 case 10: - goto tr29 - case 11: - goto tr185 - case 12: - goto tr1 - case 13: - goto st7 + goto tr28 case 32: - goto tr180 + goto st6 case 34: - goto tr124 + goto tr190 case 44: - goto tr182 + goto st6 case 61: - goto tr189 + goto st6 case 92: - goto tr186 + goto tr191 } - goto tr184 -tr182: - ( m.cs) = 58 -//line plugins/parsers/influx/machine.go.rl:78 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again -tr158: - ( m.cs) = 58 -//line plugins/parsers/influx/machine.go.rl:91 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again -tr152: - ( m.cs) = 58 -//line plugins/parsers/influx/machine.go.rl:91 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:20 + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto st6 + } + goto tr189 +tr189: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p - goto _again + goto st58 st58: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof58 } st_case_58: -//line plugins/parsers/influx/machine.go:8651 +//line plugins/parsers/influx/machine.go:8620 switch ( m.data)[( m.p)] { case 9: goto st6 case 10: - goto tr29 - case 12: - goto tr47 - case 13: - goto st7 + goto tr28 case 32: goto st6 case 34: - goto tr192 + goto tr193 case 44: goto st6 case 61: - goto st6 + goto tr194 case 92: - goto tr193 + goto st69 } - goto tr191 -tr191: -//line plugins/parsers/influx/machine.go.rl:20 + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto st6 + } + goto st58 +tr190: + ( m.cs) = 328 +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + +//line plugins/parsers/influx/machine.go.rl:148 + + err = m.handler.AddString(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr193: + ( m.cs) = 328 +//line plugins/parsers/influx/machine.go.rl:148 + + err = m.handler.AddString(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again + st328: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof328 + } + st_case_328: +//line plugins/parsers/influx/machine.go:8676 + switch ( m.data)[( m.p)] { + case 10: + goto tr101 + case 11: + goto st329 + case 13: + goto st32 + case 32: + goto st271 + case 44: + goto st35 + case 61: + goto tr53 + case 92: + goto st23 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto st271 + } + goto st13 + st329: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof329 + } + st_case_329: + switch ( m.data)[( m.p)] { + case 10: + goto tr101 + case 11: + goto st329 + case 13: + goto st32 + case 32: + goto st271 + case 44: + goto tr196 + case 45: + goto tr541 + case 61: + goto tr53 + case 92: + goto st23 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto tr542 + } + case ( m.data)[( m.p)] >= 9: + goto st271 + } + goto st13 +tr541: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p @@ -8680,178 +8736,126 @@ tr191: goto _test_eof59 } st_case_59: -//line plugins/parsers/influx/machine.go:8684 +//line plugins/parsers/influx/machine.go:8740 switch ( m.data)[( m.p)] { - case 9: - goto st6 - case 10: - goto tr29 - case 12: - goto tr47 - case 13: - goto st7 case 32: - goto st6 - case 34: - goto tr195 - case 44: - goto st6 - case 61: goto tr196 + case 44: + goto tr196 + case 61: + goto tr53 case 92: - goto st70 + goto st23 } - goto st59 -tr192: - ( m.cs) = 329 -//line plugins/parsers/influx/machine.go.rl:20 + switch { + case ( m.data)[( m.p)] < 12: + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 10 { + goto tr196 + } + case ( m.data)[( m.p)] > 13: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st330 + } + default: + goto tr196 + } + goto st13 +tr542: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p -//line plugins/parsers/influx/machine.go.rl:140 - - err = m.handler.AddString(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again -tr195: - ( m.cs) = 329 -//line plugins/parsers/influx/machine.go.rl:140 - - err = m.handler.AddString(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again - st329: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof329 - } - st_case_329: -//line plugins/parsers/influx/machine.go:8741 - switch ( m.data)[( m.p)] { - case 10: - goto tr103 - case 11: - goto st330 - case 13: - goto st33 - case 32: - goto st272 - case 44: - goto st36 - case 61: - goto tr55 - case 92: - goto st24 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st272 - } - goto st14 + goto st330 st330: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof330 } st_case_330: +//line plugins/parsers/influx/machine.go:8775 switch ( m.data)[( m.p)] { case 10: - goto tr103 + goto tr468 case 11: - goto st330 + goto tr543 case 13: - goto st33 + goto tr470 case 32: - goto st272 + goto tr467 case 44: - goto tr198 - case 45: - goto tr544 + goto tr196 case 61: - goto tr55 + goto tr53 case 92: - goto st24 + goto st23 } switch { case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr545 + goto st332 } case ( m.data)[( m.p)] >= 9: - goto st272 + goto tr467 } - goto st14 -tr544: -//line plugins/parsers/influx/machine.go.rl:20 + goto st13 +tr543: + ( m.cs) = 331 +//line plugins/parsers/influx/machine.go.rl:157 - m.pb = m.p + err = m.handler.SetTimestamp(m.text()) + if err != nil { + ( m.p)-- - goto st60 - st60: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof60 - } - st_case_60: -//line plugins/parsers/influx/machine.go:8805 - switch ( m.data)[( m.p)] { - case 32: - goto tr198 - case 44: - goto tr198 - case 61: - goto tr55 - case 92: - goto st24 - } - switch { - case ( m.data)[( m.p)] < 12: - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 10 { - goto tr198 - } - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st331 - } - default: - goto tr198 - } - goto st14 -tr545: -//line plugins/parsers/influx/machine.go.rl:20 + ( m.cs) = 257; + {( m.p)++; goto _out } + } - m.pb = m.p - - goto st331 + goto _again st331: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof331 } st_case_331: -//line plugins/parsers/influx/machine.go:8840 +//line plugins/parsers/influx/machine.go:8819 switch ( m.data)[( m.p)] { case 10: - goto tr470 + goto tr101 case 11: - goto tr546 + goto st331 case 13: - goto tr472 + goto st32 case 32: - goto tr469 + goto st276 case 44: - goto tr198 + goto tr2 case 61: - goto tr55 + goto tr53 case 92: - goto st24 + goto st23 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto st276 + } + goto st13 + st332: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof332 + } + st_case_332: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr543 + case 13: + goto tr470 + case 32: + goto tr467 + case 44: + goto tr196 + case 61: + goto tr53 + case 92: + goto st23 } switch { case ( m.data)[( m.p)] > 12: @@ -8859,48 +8863,9 @@ tr545: goto st333 } case ( m.data)[( m.p)] >= 9: - goto tr469 + goto tr467 } - goto st14 -tr546: - ( m.cs) = 332 -//line plugins/parsers/influx/machine.go.rl:149 - - err = m.handler.SetTimestamp(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again - st332: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof332 - } - st_case_332: -//line plugins/parsers/influx/machine.go:8884 - switch ( m.data)[( m.p)] { - case 10: - goto tr103 - case 11: - goto st332 - case 13: - goto st33 - case 32: - goto st277 - case 44: - goto tr2 - case 61: - goto tr55 - case 92: - goto st24 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st277 - } - goto st14 + goto st13 st333: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof333 @@ -8908,19 +8873,19 @@ tr546: st_case_333: switch ( m.data)[( m.p)] { case 10: - goto tr470 + goto tr468 case 11: - goto tr546 + goto tr543 case 13: - goto tr472 + goto tr470 case 32: - goto tr469 + goto tr467 case 44: - goto tr198 + goto tr196 case 61: - goto tr55 + goto tr53 case 92: - goto st24 + goto st23 } switch { case ( m.data)[( m.p)] > 12: @@ -8928,9 +8893,9 @@ tr546: goto st334 } case ( m.data)[( m.p)] >= 9: - goto tr469 + goto tr467 } - goto st14 + goto st13 st334: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof334 @@ -8938,19 +8903,19 @@ tr546: st_case_334: switch ( m.data)[( m.p)] { case 10: - goto tr470 + goto tr468 case 11: - goto tr546 + goto tr543 case 13: - goto tr472 + goto tr470 case 32: - goto tr469 + goto tr467 case 44: - goto tr198 + goto tr196 case 61: - goto tr55 + goto tr53 case 92: - goto st24 + goto st23 } switch { case ( m.data)[( m.p)] > 12: @@ -8958,9 +8923,9 @@ tr546: goto st335 } case ( m.data)[( m.p)] >= 9: - goto tr469 + goto tr467 } - goto st14 + goto st13 st335: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof335 @@ -8968,19 +8933,19 @@ tr546: st_case_335: switch ( m.data)[( m.p)] { case 10: - goto tr470 + goto tr468 case 11: - goto tr546 + goto tr543 case 13: - goto tr472 + goto tr470 case 32: - goto tr469 + goto tr467 case 44: - goto tr198 + goto tr196 case 61: - goto tr55 + goto tr53 case 92: - goto st24 + goto st23 } switch { case ( m.data)[( m.p)] > 12: @@ -8988,9 +8953,9 @@ tr546: goto st336 } case ( m.data)[( m.p)] >= 9: - goto tr469 + goto tr467 } - goto st14 + goto st13 st336: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof336 @@ -8998,19 +8963,19 @@ tr546: st_case_336: switch ( m.data)[( m.p)] { case 10: - goto tr470 + goto tr468 case 11: - goto tr546 + goto tr543 case 13: - goto tr472 + goto tr470 case 32: - goto tr469 + goto tr467 case 44: - goto tr198 + goto tr196 case 61: - goto tr55 + goto tr53 case 92: - goto st24 + goto st23 } switch { case ( m.data)[( m.p)] > 12: @@ -9018,9 +8983,9 @@ tr546: goto st337 } case ( m.data)[( m.p)] >= 9: - goto tr469 + goto tr467 } - goto st14 + goto st13 st337: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof337 @@ -9028,19 +8993,19 @@ tr546: st_case_337: switch ( m.data)[( m.p)] { case 10: - goto tr470 + goto tr468 case 11: - goto tr546 + goto tr543 case 13: - goto tr472 + goto tr470 case 32: - goto tr469 + goto tr467 case 44: - goto tr198 + goto tr196 case 61: - goto tr55 + goto tr53 case 92: - goto st24 + goto st23 } switch { case ( m.data)[( m.p)] > 12: @@ -9048,9 +9013,9 @@ tr546: goto st338 } case ( m.data)[( m.p)] >= 9: - goto tr469 + goto tr467 } - goto st14 + goto st13 st338: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof338 @@ -9058,19 +9023,19 @@ tr546: st_case_338: switch ( m.data)[( m.p)] { case 10: - goto tr470 + goto tr468 case 11: - goto tr546 + goto tr543 case 13: - goto tr472 + goto tr470 case 32: - goto tr469 + goto tr467 case 44: - goto tr198 + goto tr196 case 61: - goto tr55 + goto tr53 case 92: - goto st24 + goto st23 } switch { case ( m.data)[( m.p)] > 12: @@ -9078,9 +9043,9 @@ tr546: goto st339 } case ( m.data)[( m.p)] >= 9: - goto tr469 + goto tr467 } - goto st14 + goto st13 st339: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof339 @@ -9088,19 +9053,19 @@ tr546: st_case_339: switch ( m.data)[( m.p)] { case 10: - goto tr470 + goto tr468 case 11: - goto tr546 + goto tr543 case 13: - goto tr472 + goto tr470 case 32: - goto tr469 + goto tr467 case 44: - goto tr198 + goto tr196 case 61: - goto tr55 + goto tr53 case 92: - goto st24 + goto st23 } switch { case ( m.data)[( m.p)] > 12: @@ -9108,9 +9073,9 @@ tr546: goto st340 } case ( m.data)[( m.p)] >= 9: - goto tr469 + goto tr467 } - goto st14 + goto st13 st340: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof340 @@ -9118,19 +9083,19 @@ tr546: st_case_340: switch ( m.data)[( m.p)] { case 10: - goto tr470 + goto tr468 case 11: - goto tr546 + goto tr543 case 13: - goto tr472 + goto tr470 case 32: - goto tr469 + goto tr467 case 44: - goto tr198 + goto tr196 case 61: - goto tr55 + goto tr53 case 92: - goto st24 + goto st23 } switch { case ( m.data)[( m.p)] > 12: @@ -9138,9 +9103,9 @@ tr546: goto st341 } case ( m.data)[( m.p)] >= 9: - goto tr469 + goto tr467 } - goto st14 + goto st13 st341: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof341 @@ -9148,19 +9113,19 @@ tr546: st_case_341: switch ( m.data)[( m.p)] { case 10: - goto tr470 + goto tr468 case 11: - goto tr546 + goto tr543 case 13: - goto tr472 + goto tr470 case 32: - goto tr469 + goto tr467 case 44: - goto tr198 + goto tr196 case 61: - goto tr55 + goto tr53 case 92: - goto st24 + goto st23 } switch { case ( m.data)[( m.p)] > 12: @@ -9168,9 +9133,9 @@ tr546: goto st342 } case ( m.data)[( m.p)] >= 9: - goto tr469 + goto tr467 } - goto st14 + goto st13 st342: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof342 @@ -9178,19 +9143,19 @@ tr546: st_case_342: switch ( m.data)[( m.p)] { case 10: - goto tr470 + goto tr468 case 11: - goto tr546 + goto tr543 case 13: - goto tr472 + goto tr470 case 32: - goto tr469 + goto tr467 case 44: - goto tr198 + goto tr196 case 61: - goto tr55 + goto tr53 case 92: - goto st24 + goto st23 } switch { case ( m.data)[( m.p)] > 12: @@ -9198,9 +9163,9 @@ tr546: goto st343 } case ( m.data)[( m.p)] >= 9: - goto tr469 + goto tr467 } - goto st14 + goto st13 st343: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof343 @@ -9208,19 +9173,19 @@ tr546: st_case_343: switch ( m.data)[( m.p)] { case 10: - goto tr470 + goto tr468 case 11: - goto tr546 + goto tr543 case 13: - goto tr472 + goto tr470 case 32: - goto tr469 + goto tr467 case 44: - goto tr198 + goto tr196 case 61: - goto tr55 + goto tr53 case 92: - goto st24 + goto st23 } switch { case ( m.data)[( m.p)] > 12: @@ -9228,9 +9193,9 @@ tr546: goto st344 } case ( m.data)[( m.p)] >= 9: - goto tr469 + goto tr467 } - goto st14 + goto st13 st344: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof344 @@ -9238,19 +9203,19 @@ tr546: st_case_344: switch ( m.data)[( m.p)] { case 10: - goto tr470 + goto tr468 case 11: - goto tr546 + goto tr543 case 13: - goto tr472 + goto tr470 case 32: - goto tr469 + goto tr467 case 44: - goto tr198 + goto tr196 case 61: - goto tr55 + goto tr53 case 92: - goto st24 + goto st23 } switch { case ( m.data)[( m.p)] > 12: @@ -9258,9 +9223,9 @@ tr546: goto st345 } case ( m.data)[( m.p)] >= 9: - goto tr469 + goto tr467 } - goto st14 + goto st13 st345: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof345 @@ -9268,19 +9233,19 @@ tr546: st_case_345: switch ( m.data)[( m.p)] { case 10: - goto tr470 + goto tr468 case 11: - goto tr546 + goto tr543 case 13: - goto tr472 + goto tr470 case 32: - goto tr469 + goto tr467 case 44: - goto tr198 + goto tr196 case 61: - goto tr55 + goto tr53 case 92: - goto st24 + goto st23 } switch { case ( m.data)[( m.p)] > 12: @@ -9288,9 +9253,9 @@ tr546: goto st346 } case ( m.data)[( m.p)] >= 9: - goto tr469 + goto tr467 } - goto st14 + goto st13 st346: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof346 @@ -9298,19 +9263,19 @@ tr546: st_case_346: switch ( m.data)[( m.p)] { case 10: - goto tr470 + goto tr468 case 11: - goto tr546 + goto tr543 case 13: - goto tr472 + goto tr470 case 32: - goto tr469 + goto tr467 case 44: - goto tr198 + goto tr196 case 61: - goto tr55 + goto tr53 case 92: - goto st24 + goto st23 } switch { case ( m.data)[( m.p)] > 12: @@ -9318,9 +9283,9 @@ tr546: goto st347 } case ( m.data)[( m.p)] >= 9: - goto tr469 + goto tr467 } - goto st14 + goto st13 st347: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof347 @@ -9328,19 +9293,19 @@ tr546: st_case_347: switch ( m.data)[( m.p)] { case 10: - goto tr470 + goto tr468 case 11: - goto tr546 + goto tr543 case 13: - goto tr472 + goto tr470 case 32: - goto tr469 + goto tr467 case 44: - goto tr198 + goto tr196 case 61: - goto tr55 + goto tr53 case 92: - goto st24 + goto st23 } switch { case ( m.data)[( m.p)] > 12: @@ -9348,9 +9313,9 @@ tr546: goto st348 } case ( m.data)[( m.p)] >= 9: - goto tr469 + goto tr467 } - goto st14 + goto st13 st348: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof348 @@ -9358,19 +9323,19 @@ tr546: st_case_348: switch ( m.data)[( m.p)] { case 10: - goto tr470 + goto tr468 case 11: - goto tr546 + goto tr543 case 13: - goto tr472 + goto tr470 case 32: - goto tr469 + goto tr467 case 44: - goto tr198 + goto tr196 case 61: - goto tr55 + goto tr53 case 92: - goto st24 + goto st23 } switch { case ( m.data)[( m.p)] > 12: @@ -9378,9 +9343,9 @@ tr546: goto st349 } case ( m.data)[( m.p)] >= 9: - goto tr469 + goto tr467 } - goto st14 + goto st13 st349: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof349 @@ -9388,113 +9353,213 @@ tr546: st_case_349: switch ( m.data)[( m.p)] { case 10: - goto tr470 + goto tr468 case 11: - goto tr546 + goto tr543 case 13: - goto tr472 + goto tr470 case 32: - goto tr469 + goto tr467 case 44: - goto tr198 + goto tr196 case 61: - goto tr55 + goto tr53 case 92: - goto st24 + goto st23 } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st350 - } - case ( m.data)[( m.p)] >= 9: - goto tr469 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr467 } - goto st14 + goto st13 +tr194: +//line plugins/parsers/influx/machine.go.rl:95 + + m.key = m.text() + + goto st60 + st60: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof60 + } + st_case_60: +//line plugins/parsers/influx/machine.go:9386 + switch ( m.data)[( m.p)] { + case 9: + goto st6 + case 10: + goto tr28 + case 32: + goto st6 + case 34: + goto tr149 + case 44: + goto st6 + case 61: + goto st6 + case 92: + goto tr151 + } + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto st6 + } + goto tr146 +tr149: + ( m.cs) = 350 +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + +//line plugins/parsers/influx/machine.go.rl:148 + + err = m.handler.AddString(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr155: + ( m.cs) = 350 +//line plugins/parsers/influx/machine.go.rl:148 + + err = m.handler.AddString(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again st350: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof350 } st_case_350: +//line plugins/parsers/influx/machine.go:9442 switch ( m.data)[( m.p)] { case 10: - goto tr470 + goto tr101 case 11: - goto tr546 + goto tr564 case 13: - goto tr472 + goto st32 case 32: - goto tr469 + goto tr563 case 44: - goto tr198 + goto tr565 case 61: - goto tr55 + goto tr130 case 92: - goto st24 + goto st21 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr469 + goto tr563 } - goto st14 -tr196: -//line plugins/parsers/influx/machine.go.rl:87 - - m.key = m.text() - - goto st61 - st61: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof61 - } - st_case_61: -//line plugins/parsers/influx/machine.go:9451 - switch ( m.data)[( m.p)] { - case 9: - goto st6 - case 10: - goto tr29 - case 12: - goto tr47 - case 13: - goto st7 - case 32: - goto st6 - case 34: - goto tr151 - case 44: - goto st6 - case 61: - goto st6 - case 92: - goto tr153 - } - goto tr148 -tr151: + goto st15 +tr564: ( m.cs) = 351 -//line plugins/parsers/influx/machine.go.rl:20 +//line plugins/parsers/influx/machine.go.rl:99 - m.pb = m.p - -//line plugins/parsers/influx/machine.go.rl:140 - - err = m.handler.AddString(m.key, m.text()) + err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } goto _again -tr157: +tr731: ( m.cs) = 351 -//line plugins/parsers/influx/machine.go.rl:140 +//line plugins/parsers/influx/machine.go.rl:99 - err = m.handler.AddString(m.key, m.text()) + err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:130 + + err = m.handler.AddFloat(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr943: + ( m.cs) = 351 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:112 + + err = m.handler.AddInt(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr949: + ( m.cs) = 351 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:121 + + err = m.handler.AddUint(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr955: + ( m.cs) = 351 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:139 + + err = m.handler.AddBool(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; {( m.p)++; goto _out } } @@ -9504,130 +9569,62 @@ tr157: goto _test_eof351 } st_case_351: -//line plugins/parsers/influx/machine.go:9508 +//line plugins/parsers/influx/machine.go:9573 switch ( m.data)[( m.p)] { case 10: - goto tr103 + goto tr101 case 11: - goto tr567 + goto tr566 case 13: - goto st33 + goto st32 case 32: - goto tr566 + goto tr563 case 44: - goto tr568 + goto tr60 + case 45: + goto tr567 case 61: - goto tr132 + goto tr130 case 92: - goto st22 + goto tr64 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr566 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto tr568 + } + case ( m.data)[( m.p)] >= 9: + goto tr563 } - goto st16 -tr567: + goto tr62 +tr591: ( m.cs) = 352 -//line plugins/parsers/influx/machine.go.rl:91 +//line plugins/parsers/influx/machine.go.rl:99 err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } goto _again -tr735: +tr566: ( m.cs) = 352 -//line plugins/parsers/influx/machine.go.rl:91 +//line plugins/parsers/influx/machine.go.rl:99 err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:122 +//line plugins/parsers/influx/machine.go.rl:28 - err = m.handler.AddFloat(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again -tr953: - ( m.cs) = 352 -//line plugins/parsers/influx/machine.go.rl:91 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:104 - - err = m.handler.AddInt(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again -tr958: - ( m.cs) = 352 -//line plugins/parsers/influx/machine.go.rl:91 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:113 - - err = m.handler.AddUint(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again -tr963: - ( m.cs) = 352 -//line plugins/parsers/influx/machine.go.rl:91 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:131 - - err = m.handler.AddBool(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } + m.pb = m.p goto _again st352: @@ -9635,99 +9632,711 @@ tr963: goto _test_eof352 } st_case_352: -//line plugins/parsers/influx/machine.go:9639 +//line plugins/parsers/influx/machine.go:9636 switch ( m.data)[( m.p)] { case 10: - goto tr103 + goto tr101 case 11: - goto tr569 - case 13: - goto st33 - case 32: goto tr566 + case 13: + goto st32 + case 32: + goto tr563 case 44: - goto tr62 + goto tr60 case 45: - goto tr570 + goto tr567 case 61: - goto tr132 + goto tr12 case 92: - goto tr66 + goto tr64 } switch { case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr571 + goto tr568 } case ( m.data)[( m.p)] >= 9: - goto tr566 + goto tr563 } - goto tr64 -tr594: - ( m.cs) = 353 -//line plugins/parsers/influx/machine.go.rl:91 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again -tr569: - ( m.cs) = 353 -//line plugins/parsers/influx/machine.go.rl:91 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:20 + goto tr62 +tr567: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p - goto _again + goto st61 + st61: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof61 + } + st_case_61: +//line plugins/parsers/influx/machine.go:9675 + switch ( m.data)[( m.p)] { + case 10: + goto tr130 + case 11: + goto tr66 + case 13: + goto tr130 + case 32: + goto tr58 + case 44: + goto tr60 + case 61: + goto tr12 + case 92: + goto st19 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st353 + } + case ( m.data)[( m.p)] >= 9: + goto tr58 + } + goto st17 +tr568: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st353 st353: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof353 } st_case_353: -//line plugins/parsers/influx/machine.go:9702 +//line plugins/parsers/influx/machine.go:9712 switch ( m.data)[( m.p)] { case 10: - goto tr103 + goto tr468 case 11: - goto tr569 - case 13: - goto st33 - case 32: - goto tr566 - case 44: - goto tr62 - case 45: goto tr570 + case 13: + goto tr470 + case 32: + goto tr569 + case 44: + goto tr60 case 61: goto tr12 case 92: - goto tr66 + goto st19 } switch { case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr571 + goto st355 } case ( m.data)[( m.p)] >= 9: - goto tr566 + goto tr569 } - goto tr64 + goto st17 +tr573: + ( m.cs) = 354 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto _again tr570: -//line plugins/parsers/influx/machine.go.rl:20 + ( m.cs) = 354 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:157 + + err = m.handler.SetTimestamp(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again + st354: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof354 + } + st_case_354: +//line plugins/parsers/influx/machine.go:9783 + switch ( m.data)[( m.p)] { + case 10: + goto tr101 + case 11: + goto tr573 + case 13: + goto st32 + case 32: + goto tr572 + case 44: + goto tr60 + case 61: + goto tr12 + case 92: + goto tr64 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr572 + } + goto tr62 + st355: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof355 + } + st_case_355: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr570 + case 13: + goto tr470 + case 32: + goto tr569 + case 44: + goto tr60 + case 61: + goto tr12 + case 92: + goto st19 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st356 + } + case ( m.data)[( m.p)] >= 9: + goto tr569 + } + goto st17 + st356: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof356 + } + st_case_356: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr570 + case 13: + goto tr470 + case 32: + goto tr569 + case 44: + goto tr60 + case 61: + goto tr12 + case 92: + goto st19 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st357 + } + case ( m.data)[( m.p)] >= 9: + goto tr569 + } + goto st17 + st357: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof357 + } + st_case_357: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr570 + case 13: + goto tr470 + case 32: + goto tr569 + case 44: + goto tr60 + case 61: + goto tr12 + case 92: + goto st19 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st358 + } + case ( m.data)[( m.p)] >= 9: + goto tr569 + } + goto st17 + st358: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof358 + } + st_case_358: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr570 + case 13: + goto tr470 + case 32: + goto tr569 + case 44: + goto tr60 + case 61: + goto tr12 + case 92: + goto st19 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st359 + } + case ( m.data)[( m.p)] >= 9: + goto tr569 + } + goto st17 + st359: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof359 + } + st_case_359: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr570 + case 13: + goto tr470 + case 32: + goto tr569 + case 44: + goto tr60 + case 61: + goto tr12 + case 92: + goto st19 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st360 + } + case ( m.data)[( m.p)] >= 9: + goto tr569 + } + goto st17 + st360: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof360 + } + st_case_360: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr570 + case 13: + goto tr470 + case 32: + goto tr569 + case 44: + goto tr60 + case 61: + goto tr12 + case 92: + goto st19 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st361 + } + case ( m.data)[( m.p)] >= 9: + goto tr569 + } + goto st17 + st361: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof361 + } + st_case_361: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr570 + case 13: + goto tr470 + case 32: + goto tr569 + case 44: + goto tr60 + case 61: + goto tr12 + case 92: + goto st19 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st362 + } + case ( m.data)[( m.p)] >= 9: + goto tr569 + } + goto st17 + st362: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof362 + } + st_case_362: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr570 + case 13: + goto tr470 + case 32: + goto tr569 + case 44: + goto tr60 + case 61: + goto tr12 + case 92: + goto st19 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st363 + } + case ( m.data)[( m.p)] >= 9: + goto tr569 + } + goto st17 + st363: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof363 + } + st_case_363: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr570 + case 13: + goto tr470 + case 32: + goto tr569 + case 44: + goto tr60 + case 61: + goto tr12 + case 92: + goto st19 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st364 + } + case ( m.data)[( m.p)] >= 9: + goto tr569 + } + goto st17 + st364: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof364 + } + st_case_364: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr570 + case 13: + goto tr470 + case 32: + goto tr569 + case 44: + goto tr60 + case 61: + goto tr12 + case 92: + goto st19 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st365 + } + case ( m.data)[( m.p)] >= 9: + goto tr569 + } + goto st17 + st365: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof365 + } + st_case_365: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr570 + case 13: + goto tr470 + case 32: + goto tr569 + case 44: + goto tr60 + case 61: + goto tr12 + case 92: + goto st19 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st366 + } + case ( m.data)[( m.p)] >= 9: + goto tr569 + } + goto st17 + st366: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof366 + } + st_case_366: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr570 + case 13: + goto tr470 + case 32: + goto tr569 + case 44: + goto tr60 + case 61: + goto tr12 + case 92: + goto st19 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st367 + } + case ( m.data)[( m.p)] >= 9: + goto tr569 + } + goto st17 + st367: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof367 + } + st_case_367: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr570 + case 13: + goto tr470 + case 32: + goto tr569 + case 44: + goto tr60 + case 61: + goto tr12 + case 92: + goto st19 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st368 + } + case ( m.data)[( m.p)] >= 9: + goto tr569 + } + goto st17 + st368: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof368 + } + st_case_368: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr570 + case 13: + goto tr470 + case 32: + goto tr569 + case 44: + goto tr60 + case 61: + goto tr12 + case 92: + goto st19 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st369 + } + case ( m.data)[( m.p)] >= 9: + goto tr569 + } + goto st17 + st369: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof369 + } + st_case_369: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr570 + case 13: + goto tr470 + case 32: + goto tr569 + case 44: + goto tr60 + case 61: + goto tr12 + case 92: + goto st19 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st370 + } + case ( m.data)[( m.p)] >= 9: + goto tr569 + } + goto st17 + st370: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof370 + } + st_case_370: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr570 + case 13: + goto tr470 + case 32: + goto tr569 + case 44: + goto tr60 + case 61: + goto tr12 + case 92: + goto st19 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st371 + } + case ( m.data)[( m.p)] >= 9: + goto tr569 + } + goto st17 + st371: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof371 + } + st_case_371: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr570 + case 13: + goto tr470 + case 32: + goto tr569 + case 44: + goto tr60 + case 61: + goto tr12 + case 92: + goto st19 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st372 + } + case ( m.data)[( m.p)] >= 9: + goto tr569 + } + goto st17 + st372: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof372 + } + st_case_372: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr570 + case 13: + goto tr470 + case 32: + goto tr569 + case 44: + goto tr60 + case 61: + goto tr12 + case 92: + goto st19 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr569 + } + goto st17 +tr151: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p @@ -9737,908 +10346,443 @@ tr570: goto _test_eof62 } st_case_62: -//line plugins/parsers/influx/machine.go:9741 - switch ( m.data)[( m.p)] { - case 10: - goto tr132 - case 11: - goto tr68 - case 13: - goto tr132 - case 32: - goto tr60 - case 44: - goto tr62 - case 61: - goto tr12 - case 92: - goto st20 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st354 - } - case ( m.data)[( m.p)] >= 9: - goto tr60 - } - goto st18 -tr571: -//line plugins/parsers/influx/machine.go.rl:20 - - m.pb = m.p - - goto st354 - st354: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof354 - } - st_case_354: -//line plugins/parsers/influx/machine.go:9778 - switch ( m.data)[( m.p)] { - case 10: - goto tr470 - case 11: - goto tr573 - case 13: - goto tr472 - case 32: - goto tr572 - case 44: - goto tr62 - case 61: - goto tr12 - case 92: - goto st20 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st356 - } - case ( m.data)[( m.p)] >= 9: - goto tr572 - } - goto st18 -tr576: - ( m.cs) = 355 -//line plugins/parsers/influx/machine.go.rl:91 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:20 - - m.pb = m.p - - goto _again -tr573: - ( m.cs) = 355 -//line plugins/parsers/influx/machine.go.rl:91 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:149 - - err = m.handler.SetTimestamp(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again - st355: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof355 - } - st_case_355: -//line plugins/parsers/influx/machine.go:9849 - switch ( m.data)[( m.p)] { - case 10: - goto tr103 - case 11: - goto tr576 - case 13: - goto st33 - case 32: - goto tr575 - case 44: - goto tr62 - case 61: - goto tr12 - case 92: - goto tr66 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr575 - } - goto tr64 - st356: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof356 - } - st_case_356: - switch ( m.data)[( m.p)] { - case 10: - goto tr470 - case 11: - goto tr573 - case 13: - goto tr472 - case 32: - goto tr572 - case 44: - goto tr62 - case 61: - goto tr12 - case 92: - goto st20 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st357 - } - case ( m.data)[( m.p)] >= 9: - goto tr572 - } - goto st18 - st357: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof357 - } - st_case_357: - switch ( m.data)[( m.p)] { - case 10: - goto tr470 - case 11: - goto tr573 - case 13: - goto tr472 - case 32: - goto tr572 - case 44: - goto tr62 - case 61: - goto tr12 - case 92: - goto st20 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st358 - } - case ( m.data)[( m.p)] >= 9: - goto tr572 - } - goto st18 - st358: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof358 - } - st_case_358: - switch ( m.data)[( m.p)] { - case 10: - goto tr470 - case 11: - goto tr573 - case 13: - goto tr472 - case 32: - goto tr572 - case 44: - goto tr62 - case 61: - goto tr12 - case 92: - goto st20 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st359 - } - case ( m.data)[( m.p)] >= 9: - goto tr572 - } - goto st18 - st359: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof359 - } - st_case_359: - switch ( m.data)[( m.p)] { - case 10: - goto tr470 - case 11: - goto tr573 - case 13: - goto tr472 - case 32: - goto tr572 - case 44: - goto tr62 - case 61: - goto tr12 - case 92: - goto st20 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st360 - } - case ( m.data)[( m.p)] >= 9: - goto tr572 - } - goto st18 - st360: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof360 - } - st_case_360: - switch ( m.data)[( m.p)] { - case 10: - goto tr470 - case 11: - goto tr573 - case 13: - goto tr472 - case 32: - goto tr572 - case 44: - goto tr62 - case 61: - goto tr12 - case 92: - goto st20 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st361 - } - case ( m.data)[( m.p)] >= 9: - goto tr572 - } - goto st18 - st361: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof361 - } - st_case_361: - switch ( m.data)[( m.p)] { - case 10: - goto tr470 - case 11: - goto tr573 - case 13: - goto tr472 - case 32: - goto tr572 - case 44: - goto tr62 - case 61: - goto tr12 - case 92: - goto st20 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st362 - } - case ( m.data)[( m.p)] >= 9: - goto tr572 - } - goto st18 - st362: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof362 - } - st_case_362: - switch ( m.data)[( m.p)] { - case 10: - goto tr470 - case 11: - goto tr573 - case 13: - goto tr472 - case 32: - goto tr572 - case 44: - goto tr62 - case 61: - goto tr12 - case 92: - goto st20 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st363 - } - case ( m.data)[( m.p)] >= 9: - goto tr572 - } - goto st18 - st363: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof363 - } - st_case_363: - switch ( m.data)[( m.p)] { - case 10: - goto tr470 - case 11: - goto tr573 - case 13: - goto tr472 - case 32: - goto tr572 - case 44: - goto tr62 - case 61: - goto tr12 - case 92: - goto st20 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st364 - } - case ( m.data)[( m.p)] >= 9: - goto tr572 - } - goto st18 - st364: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof364 - } - st_case_364: - switch ( m.data)[( m.p)] { - case 10: - goto tr470 - case 11: - goto tr573 - case 13: - goto tr472 - case 32: - goto tr572 - case 44: - goto tr62 - case 61: - goto tr12 - case 92: - goto st20 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st365 - } - case ( m.data)[( m.p)] >= 9: - goto tr572 - } - goto st18 - st365: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof365 - } - st_case_365: - switch ( m.data)[( m.p)] { - case 10: - goto tr470 - case 11: - goto tr573 - case 13: - goto tr472 - case 32: - goto tr572 - case 44: - goto tr62 - case 61: - goto tr12 - case 92: - goto st20 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st366 - } - case ( m.data)[( m.p)] >= 9: - goto tr572 - } - goto st18 - st366: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof366 - } - st_case_366: - switch ( m.data)[( m.p)] { - case 10: - goto tr470 - case 11: - goto tr573 - case 13: - goto tr472 - case 32: - goto tr572 - case 44: - goto tr62 - case 61: - goto tr12 - case 92: - goto st20 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st367 - } - case ( m.data)[( m.p)] >= 9: - goto tr572 - } - goto st18 - st367: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof367 - } - st_case_367: - switch ( m.data)[( m.p)] { - case 10: - goto tr470 - case 11: - goto tr573 - case 13: - goto tr472 - case 32: - goto tr572 - case 44: - goto tr62 - case 61: - goto tr12 - case 92: - goto st20 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st368 - } - case ( m.data)[( m.p)] >= 9: - goto tr572 - } - goto st18 - st368: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof368 - } - st_case_368: - switch ( m.data)[( m.p)] { - case 10: - goto tr470 - case 11: - goto tr573 - case 13: - goto tr472 - case 32: - goto tr572 - case 44: - goto tr62 - case 61: - goto tr12 - case 92: - goto st20 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st369 - } - case ( m.data)[( m.p)] >= 9: - goto tr572 - } - goto st18 - st369: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof369 - } - st_case_369: - switch ( m.data)[( m.p)] { - case 10: - goto tr470 - case 11: - goto tr573 - case 13: - goto tr472 - case 32: - goto tr572 - case 44: - goto tr62 - case 61: - goto tr12 - case 92: - goto st20 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st370 - } - case ( m.data)[( m.p)] >= 9: - goto tr572 - } - goto st18 - st370: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof370 - } - st_case_370: - switch ( m.data)[( m.p)] { - case 10: - goto tr470 - case 11: - goto tr573 - case 13: - goto tr472 - case 32: - goto tr572 - case 44: - goto tr62 - case 61: - goto tr12 - case 92: - goto st20 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st371 - } - case ( m.data)[( m.p)] >= 9: - goto tr572 - } - goto st18 - st371: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof371 - } - st_case_371: - switch ( m.data)[( m.p)] { - case 10: - goto tr470 - case 11: - goto tr573 - case 13: - goto tr472 - case 32: - goto tr572 - case 44: - goto tr62 - case 61: - goto tr12 - case 92: - goto st20 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st372 - } - case ( m.data)[( m.p)] >= 9: - goto tr572 - } - goto st18 - st372: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof372 - } - st_case_372: - switch ( m.data)[( m.p)] { - case 10: - goto tr470 - case 11: - goto tr573 - case 13: - goto tr472 - case 32: - goto tr572 - case 44: - goto tr62 - case 61: - goto tr12 - case 92: - goto st20 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st373 - } - case ( m.data)[( m.p)] >= 9: - goto tr572 - } - goto st18 - st373: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof373 - } - st_case_373: - switch ( m.data)[( m.p)] { - case 10: - goto tr470 - case 11: - goto tr573 - case 13: - goto tr472 - case 32: - goto tr572 - case 44: - goto tr62 - case 61: - goto tr12 - case 92: - goto st20 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr572 - } - goto st18 -tr153: -//line plugins/parsers/influx/machine.go.rl:20 - - m.pb = m.p - - goto st63 - st63: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof63 - } - st_case_63: -//line plugins/parsers/influx/machine.go:10416 +//line plugins/parsers/influx/machine.go:10350 switch ( m.data)[( m.p)] { case 34: - goto st48 + goto st47 case 92: - goto st64 + goto st63 } switch { case ( m.data)[( m.p)] > 10: if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr47 + goto tr45 } case ( m.data)[( m.p)] >= 9: - goto tr47 + goto tr45 } - goto st16 - st64: -//line plugins/parsers/influx/machine.go.rl:240 + goto st15 + st63: +//line plugins/parsers/influx/machine.go.rl:248 ( m.p)-- if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof64 + goto _test_eof63 } - st_case_64: -//line plugins/parsers/influx/machine.go:10440 + st_case_63: +//line plugins/parsers/influx/machine.go:10374 switch ( m.data)[( m.p)] { - case 9: - goto tr155 case 10: - goto tr29 + goto tr28 case 11: - goto tr156 - case 12: - goto tr60 + goto tr154 case 13: - goto st7 + goto st6 case 32: - goto tr155 + goto tr153 case 34: - goto tr157 + goto tr155 case 44: - goto tr158 + goto tr156 case 61: goto st6 case 92: - goto st63 + goto st62 } - goto st48 -tr156: - ( m.cs) = 65 -//line plugins/parsers/influx/machine.go.rl:91 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr153 + } + goto st47 +tr154: + ( m.cs) = 64 +//line plugins/parsers/influx/machine.go.rl:99 err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } goto _again -tr150: - ( m.cs) = 65 -//line plugins/parsers/influx/machine.go.rl:91 +tr148: + ( m.cs) = 64 +//line plugins/parsers/influx/machine.go.rl:99 err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:20 +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p goto _again + st64: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof64 + } + st_case_64: +//line plugins/parsers/influx/machine.go:10432 + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 11: + goto tr201 + case 13: + goto st6 + case 32: + goto tr153 + case 34: + goto tr202 + case 44: + goto tr156 + case 61: + goto st6 + case 92: + goto tr203 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr153 + } + goto tr200 +tr200: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st65 st65: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof65 } st_case_65: -//line plugins/parsers/influx/machine.go:10499 +//line plugins/parsers/influx/machine.go:10466 switch ( m.data)[( m.p)] { - case 9: - goto tr155 case 10: - goto tr29 + goto tr28 case 11: - goto tr203 - case 12: - goto tr60 - case 13: - goto st7 - case 32: - goto tr155 - case 34: - goto tr204 - case 44: - goto tr158 - case 61: - goto st6 - case 92: goto tr205 + case 13: + goto st6 + case 32: + goto tr153 + case 34: + goto tr206 + case 44: + goto tr156 + case 61: + goto tr163 + case 92: + goto st67 } - goto tr202 -tr202: -//line plugins/parsers/influx/machine.go.rl:20 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr153 + } + goto st65 +tr205: + ( m.cs) = 66 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr201: + ( m.cs) = 66 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p - goto st66 + goto _again st66: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof66 } st_case_66: -//line plugins/parsers/influx/machine.go:10534 +//line plugins/parsers/influx/machine.go:10524 switch ( m.data)[( m.p)] { - case 9: - goto tr155 case 10: - goto tr29 + goto tr28 case 11: - goto tr207 - case 12: - goto tr60 + goto tr201 case 13: - goto st7 + goto st6 case 32: - goto tr155 + goto tr153 case 34: - goto tr208 + goto tr202 case 44: - goto tr158 + goto tr156 case 61: - goto tr165 + goto tr163 case 92: - goto st68 + goto tr203 } - goto st66 -tr207: - ( m.cs) = 67 -//line plugins/parsers/influx/machine.go.rl:91 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again -tr203: - ( m.cs) = 67 -//line plugins/parsers/influx/machine.go.rl:91 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:20 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr153 + } + goto tr200 +tr202: + ( m.cs) = 373 +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p +//line plugins/parsers/influx/machine.go.rl:148 + + err = m.handler.AddString(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + goto _again +tr206: + ( m.cs) = 373 +//line plugins/parsers/influx/machine.go.rl:148 + + err = m.handler.AddString(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again + st373: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof373 + } + st_case_373: +//line plugins/parsers/influx/machine.go:10582 + switch ( m.data)[( m.p)] { + case 10: + goto tr101 + case 11: + goto tr591 + case 13: + goto st32 + case 32: + goto tr563 + case 44: + goto tr565 + case 61: + goto tr12 + case 92: + goto st19 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr563 + } + goto st17 +tr203: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st67 st67: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof67 } st_case_67: -//line plugins/parsers/influx/machine.go:10593 +//line plugins/parsers/influx/machine.go:10614 switch ( m.data)[( m.p)] { - case 9: - goto tr155 - case 10: - goto tr29 - case 11: - goto tr203 - case 12: - goto tr60 - case 13: - goto st7 - case 32: - goto tr155 case 34: - goto tr204 - case 44: - goto tr158 - case 61: - goto tr165 + goto st65 case 92: - goto tr205 + goto st68 } - goto tr202 -tr204: - ( m.cs) = 374 -//line plugins/parsers/influx/machine.go.rl:20 + switch { + case ( m.data)[( m.p)] > 10: + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr45 + } + case ( m.data)[( m.p)] >= 9: + goto tr45 + } + goto st17 + st68: +//line plugins/parsers/influx/machine.go.rl:248 + ( m.p)-- + + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof68 + } + st_case_68: +//line plugins/parsers/influx/machine.go:10638 + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 11: + goto tr205 + case 13: + goto st6 + case 32: + goto tr153 + case 34: + goto tr206 + case 44: + goto tr156 + case 61: + goto tr163 + case 92: + goto st67 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr153 + } + goto st65 +tr191: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p -//line plugins/parsers/influx/machine.go.rl:140 + goto st69 + st69: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof69 + } + st_case_69: +//line plugins/parsers/influx/machine.go:10672 + switch ( m.data)[( m.p)] { + case 34: + goto st58 + case 92: + goto st70 + } + switch { + case ( m.data)[( m.p)] > 10: + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr45 + } + case ( m.data)[( m.p)] >= 9: + goto tr45 + } + goto st13 + st70: +//line plugins/parsers/influx/machine.go.rl:248 + ( m.p)-- + + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof70 + } + st_case_70: +//line plugins/parsers/influx/machine.go:10696 + switch ( m.data)[( m.p)] { + case 9: + goto st6 + case 10: + goto tr28 + case 32: + goto st6 + case 34: + goto tr193 + case 44: + goto st6 + case 61: + goto tr194 + case 92: + goto st69 + } + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto st6 + } + goto st58 +tr187: +//line plugins/parsers/influx/machine.go.rl:108 - err = m.handler.AddString(m.key, m.text()) - if err != nil { - ( m.p)-- + m.key = m.text() - ( m.cs) = 258; - {( m.p)++; goto _out } - } + goto st71 +tr344: +//line plugins/parsers/influx/machine.go.rl:28 - goto _again -tr208: + m.pb = m.p + +//line plugins/parsers/influx/machine.go.rl:108 + + m.key = m.text() + + goto st71 + st71: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof71 + } + st_case_71: +//line plugins/parsers/influx/machine.go:10738 + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 11: + goto tr179 + case 13: + goto st6 + case 32: + goto tr178 + case 34: + goto tr210 + case 44: + goto tr180 + case 45: + goto tr211 + case 46: + goto tr212 + case 48: + goto tr213 + case 70: + goto tr215 + case 84: + goto tr216 + case 92: + goto st155 + case 102: + goto tr217 + case 116: + goto tr218 + } + switch { + case ( m.data)[( m.p)] > 12: + if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto tr214 + } + case ( m.data)[( m.p)] >= 9: + goto tr178 + } + goto st53 +tr210: ( m.cs) = 374 -//line plugins/parsers/influx/machine.go.rl:140 +//line plugins/parsers/influx/machine.go.rl:148 err = m.handler.AddString(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } @@ -10648,212 +10792,250 @@ tr208: goto _test_eof374 } st_case_374: -//line plugins/parsers/influx/machine.go:10652 +//line plugins/parsers/influx/machine.go:10796 switch ( m.data)[( m.p)] { case 10: - goto tr103 + goto tr492 case 11: - goto tr594 + goto tr593 case 13: - goto st33 + goto tr493 case 32: - goto tr566 + goto tr592 + case 34: + goto tr83 case 44: - goto tr568 - case 61: - goto tr12 + goto tr594 case 92: - goto st20 + goto tr85 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr566 + goto tr592 } - goto st18 -tr205: -//line plugins/parsers/influx/machine.go.rl:20 - - m.pb = m.p - - goto st68 - st68: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof68 - } - st_case_68: -//line plugins/parsers/influx/machine.go:10684 - switch ( m.data)[( m.p)] { - case 34: - goto st66 - case 92: - goto st69 - } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr47 - } - case ( m.data)[( m.p)] >= 9: - goto tr47 - } - goto st18 - st69: -//line plugins/parsers/influx/machine.go.rl:240 - ( m.p)-- - - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof69 - } - st_case_69: -//line plugins/parsers/influx/machine.go:10708 - switch ( m.data)[( m.p)] { - case 9: - goto tr155 - case 10: - goto tr29 - case 11: - goto tr207 - case 12: - goto tr60 - case 13: - goto st7 - case 32: - goto tr155 - case 34: - goto tr208 - case 44: - goto tr158 - case 61: - goto tr165 - case 92: - goto st68 - } - goto st66 -tr193: -//line plugins/parsers/influx/machine.go.rl:20 - - m.pb = m.p - - goto st70 - st70: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof70 - } - st_case_70: -//line plugins/parsers/influx/machine.go:10743 - switch ( m.data)[( m.p)] { - case 34: - goto st59 - case 92: - goto st71 - } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr47 - } - case ( m.data)[( m.p)] >= 9: - goto tr47 - } - goto st14 - st71: -//line plugins/parsers/influx/machine.go.rl:240 - ( m.p)-- - - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof71 - } - st_case_71: -//line plugins/parsers/influx/machine.go:10767 - switch ( m.data)[( m.p)] { - case 9: - goto st6 - case 10: - goto tr29 - case 12: - goto tr47 - case 13: - goto st7 - case 32: - goto st6 - case 34: - goto tr195 - case 44: - goto st6 - case 61: - goto tr196 - case 92: - goto st70 - } - goto st59 -tr189: -//line plugins/parsers/influx/machine.go.rl:100 - - m.key = m.text() - - goto st72 -tr346: -//line plugins/parsers/influx/machine.go.rl:20 - - m.pb = m.p - -//line plugins/parsers/influx/machine.go.rl:100 - - m.key = m.text() - - goto st72 - st72: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof72 - } - st_case_72: -//line plugins/parsers/influx/machine.go:10810 - switch ( m.data)[( m.p)] { - case 9: - goto tr180 - case 10: - goto tr29 - case 11: - goto tr181 - case 12: - goto tr1 - case 13: - goto st7 - case 32: - goto tr180 - case 34: - goto tr212 - case 44: - goto tr182 - case 45: - goto tr213 - case 46: - goto tr214 - case 48: - goto tr215 - case 70: - goto tr217 - case 84: - goto tr218 - case 92: - goto st156 - case 102: - goto tr219 - case 116: - goto tr220 - } - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr216 - } - goto st54 -tr212: + goto tr80 +tr623: ( m.cs) = 375 -//line plugins/parsers/influx/machine.go.rl:140 +//line plugins/parsers/influx/machine.go.rl:86 - err = m.handler.AddString(m.key, m.text()) + err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr592: + ( m.cs) = 375 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto _again +tr762: + ( m.cs) = 375 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr635: + ( m.cs) = 375 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:130 + + err = m.handler.AddFloat(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr757: + ( m.cs) = 375 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:130 + + err = m.handler.AddFloat(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr790: + ( m.cs) = 375 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:112 + + err = m.handler.AddInt(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr796: + ( m.cs) = 375 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:121 + + err = m.handler.AddUint(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr802: + ( m.cs) = 375 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:139 + + err = m.handler.AddBool(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr816: + ( m.cs) = 375 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:112 + + err = m.handler.AddInt(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr821: + ( m.cs) = 375 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:121 + + err = m.handler.AddUint(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr826: + ( m.cs) = 375 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:139 + + err = m.handler.AddBool(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; {( m.p)++; goto _out } } @@ -10863,412 +11045,184 @@ tr212: goto _test_eof375 } st_case_375: -//line plugins/parsers/influx/machine.go:10867 +//line plugins/parsers/influx/machine.go:11049 switch ( m.data)[( m.p)] { - case 9: - goto tr595 case 10: - goto tr494 + goto tr219 case 11: goto tr596 - case 12: - goto tr501 case 13: - goto tr495 + goto st72 case 32: - goto tr595 + goto st375 case 34: - goto tr85 + goto tr95 case 44: + goto st6 + case 45: goto tr597 + case 61: + goto st6 case 92: - goto tr87 + goto tr96 } - goto tr82 -tr626: - ( m.cs) = 376 -//line plugins/parsers/influx/machine.go.rl:78 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again -tr595: - ( m.cs) = 376 -//line plugins/parsers/influx/machine.go.rl:78 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:20 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto tr598 + } + case ( m.data)[( m.p)] >= 9: + goto st375 + } + goto tr92 +tr596: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p - goto _again -tr769: - ( m.cs) = 376 -//line plugins/parsers/influx/machine.go.rl:91 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again -tr638: - ( m.cs) = 376 -//line plugins/parsers/influx/machine.go.rl:78 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:122 - - err = m.handler.AddFloat(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again -tr764: - ( m.cs) = 376 -//line plugins/parsers/influx/machine.go.rl:91 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:122 - - err = m.handler.AddFloat(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again -tr797: - ( m.cs) = 376 -//line plugins/parsers/influx/machine.go.rl:91 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:104 - - err = m.handler.AddInt(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again -tr803: - ( m.cs) = 376 -//line plugins/parsers/influx/machine.go.rl:91 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:113 - - err = m.handler.AddUint(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again -tr809: - ( m.cs) = 376 -//line plugins/parsers/influx/machine.go.rl:91 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:131 - - err = m.handler.AddBool(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again -tr822: - ( m.cs) = 376 -//line plugins/parsers/influx/machine.go.rl:78 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:104 - - err = m.handler.AddInt(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again -tr828: - ( m.cs) = 376 -//line plugins/parsers/influx/machine.go.rl:78 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:113 - - err = m.handler.AddUint(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again -tr834: - ( m.cs) = 376 -//line plugins/parsers/influx/machine.go.rl:78 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:131 - - err = m.handler.AddBool(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again + goto st376 st376: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof376 } st_case_376: -//line plugins/parsers/influx/machine.go:11121 +//line plugins/parsers/influx/machine.go:11090 switch ( m.data)[( m.p)] { - case 9: - goto st376 case 10: - goto tr221 + goto tr219 case 11: - goto tr599 - case 12: - goto st301 + goto tr596 case 13: - goto st73 + goto st72 case 32: - goto st376 + goto st375 case 34: - goto tr97 + goto tr95 case 44: goto st6 case 45: - goto tr600 + goto tr597 case 61: - goto st6 + goto tr99 case 92: - goto tr98 + goto tr96 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr601 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto tr598 + } + case ( m.data)[( m.p)] >= 9: + goto st375 } - goto tr94 -tr599: -//line plugins/parsers/influx/machine.go.rl:20 + goto tr92 +tr493: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p - goto st377 - st377: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof377 - } - st_case_377: -//line plugins/parsers/influx/machine.go:11161 - switch ( m.data)[( m.p)] { - case 9: - goto st376 - case 10: - goto tr221 - case 11: - goto tr599 - case 12: - goto st301 - case 13: - goto st73 - case 32: - goto st376 - case 34: - goto tr97 - case 44: - goto st6 - case 45: - goto tr600 - case 61: - goto tr101 - case 92: - goto tr98 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr601 - } - goto tr94 -tr495: -//line plugins/parsers/influx/machine.go.rl:20 - - m.pb = m.p - - goto st73 -tr605: - ( m.cs) = 73 -//line plugins/parsers/influx/machine.go.rl:149 + goto st72 +tr602: + ( m.cs) = 72 +//line plugins/parsers/influx/machine.go.rl:157 err = m.handler.SetTimestamp(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } goto _again -tr642: - ( m.cs) = 73 -//line plugins/parsers/influx/machine.go.rl:122 +tr638: + ( m.cs) = 72 +//line plugins/parsers/influx/machine.go.rl:130 err = m.handler.AddFloat(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } goto _again -tr800: - ( m.cs) = 73 -//line plugins/parsers/influx/machine.go.rl:104 +tr793: + ( m.cs) = 72 +//line plugins/parsers/influx/machine.go.rl:112 err = m.handler.AddInt(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } goto _again -tr806: - ( m.cs) = 73 -//line plugins/parsers/influx/machine.go.rl:113 +tr799: + ( m.cs) = 72 +//line plugins/parsers/influx/machine.go.rl:121 err = m.handler.AddUint(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } goto _again -tr812: - ( m.cs) = 73 -//line plugins/parsers/influx/machine.go.rl:131 +tr805: + ( m.cs) = 72 +//line plugins/parsers/influx/machine.go.rl:139 err = m.handler.AddBool(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } goto _again + st72: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof72 + } + st_case_72: +//line plugins/parsers/influx/machine.go:11196 + switch ( m.data)[( m.p)] { + case 10: + goto tr219 + case 34: + goto tr29 + case 92: + goto st73 + } + goto st6 +tr26: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st73 st73: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof73 } st_case_73: -//line plugins/parsers/influx/machine.go:11266 - if ( m.data)[( m.p)] == 10 { - goto tr221 +//line plugins/parsers/influx/machine.go:11217 + switch ( m.data)[( m.p)] { + case 34: + goto st6 + case 92: + goto st6 } goto tr8 -tr600: -//line plugins/parsers/influx/machine.go.rl:20 +tr597: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p @@ -11278,78 +11232,115 @@ tr600: goto _test_eof74 } st_case_74: -//line plugins/parsers/influx/machine.go:11282 +//line plugins/parsers/influx/machine.go:11236 switch ( m.data)[( m.p)] { case 9: goto st6 case 10: - goto tr29 - case 12: - goto tr105 - case 13: - goto st7 + goto tr28 case 32: goto st6 case 34: - goto tr100 + goto tr98 case 44: goto st6 case 61: - goto tr101 + goto tr99 case 92: - goto st76 + goto st75 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st378 + switch { + case ( m.data)[( m.p)] > 13: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st377 + } + case ( m.data)[( m.p)] >= 12: + goto st6 } - goto st32 -tr601: -//line plugins/parsers/influx/machine.go.rl:20 + goto st31 +tr598: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p - goto st378 - st378: + goto st377 + st377: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof378 + goto _test_eof377 } - st_case_378: -//line plugins/parsers/influx/machine.go:11318 + st_case_377: +//line plugins/parsers/influx/machine.go:11273 switch ( m.data)[( m.p)] { - case 9: - goto tr602 case 10: - goto tr603 + goto tr600 case 11: - goto tr604 - case 12: - goto tr469 + goto tr601 case 13: - goto tr605 - case 32: goto tr602 + case 32: + goto tr599 case 34: - goto tr100 + goto tr98 case 44: goto st6 case 61: - goto tr101 + goto tr99 case 92: - goto st76 + goto st75 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st381 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st380 + } + case ( m.data)[( m.p)] >= 9: + goto tr599 } - goto st32 -tr602: - ( m.cs) = 379 -//line plugins/parsers/influx/machine.go.rl:149 + goto st31 +tr599: + ( m.cs) = 378 +//line plugins/parsers/influx/machine.go.rl:157 err = m.handler.SetTimestamp(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again + st378: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof378 + } + st_case_378: +//line plugins/parsers/influx/machine.go:11319 + switch ( m.data)[( m.p)] { + case 10: + goto tr219 + case 13: + goto st72 + case 32: + goto st378 + case 34: + goto tr29 + case 92: + goto st73 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto st378 + } + goto st6 +tr601: + ( m.cs) = 379 +//line plugins/parsers/influx/machine.go.rl:157 + + err = m.handler.SetTimestamp(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; {( m.p)++; goto _out } } @@ -11359,27 +11350,31 @@ tr602: goto _test_eof379 } st_case_379: -//line plugins/parsers/influx/machine.go:11363 +//line plugins/parsers/influx/machine.go:11354 switch ( m.data)[( m.p)] { case 10: - goto tr221 - case 12: - goto st277 - case 13: - goto st73 - case 32: + goto tr219 + case 11: goto st379 + case 13: + goto st72 + case 32: + goto st378 case 34: - goto tr31 + goto tr98 + case 44: + goto st6 + case 61: + goto tr99 case 92: goto st75 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { - goto st379 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto st378 } - goto st6 -tr27: -//line plugins/parsers/influx/machine.go.rl:20 + goto st31 +tr96: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p @@ -11389,73 +11384,12 @@ tr27: goto _test_eof75 } st_case_75: -//line plugins/parsers/influx/machine.go:11393 +//line plugins/parsers/influx/machine.go:11388 switch ( m.data)[( m.p)] { case 34: - goto st6 + goto st31 case 92: - goto st6 - } - goto tr8 -tr604: - ( m.cs) = 380 -//line plugins/parsers/influx/machine.go.rl:149 - - err = m.handler.SetTimestamp(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again - st380: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof380 - } - st_case_380: -//line plugins/parsers/influx/machine.go:11419 - switch ( m.data)[( m.p)] { - case 9: - goto st379 - case 10: - goto tr221 - case 11: - goto st380 - case 12: - goto st277 - case 13: - goto st73 - case 32: - goto st379 - case 34: - goto tr100 - case 44: - goto st6 - case 61: - goto tr101 - case 92: - goto st76 - } - goto st32 -tr98: -//line plugins/parsers/influx/machine.go.rl:20 - - m.pb = m.p - - goto st76 - st76: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof76 - } - st_case_76: -//line plugins/parsers/influx/machine.go:11454 - switch ( m.data)[( m.p)] { - case 34: - goto st32 - case 92: - goto st32 + goto st31 } switch { case ( m.data)[( m.p)] > 10: @@ -11466,862 +11400,910 @@ tr98: goto tr8 } goto st3 + st380: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof380 + } + st_case_380: + switch ( m.data)[( m.p)] { + case 10: + goto tr600 + case 11: + goto tr601 + case 13: + goto tr602 + case 32: + goto tr599 + case 34: + goto tr98 + case 44: + goto st6 + case 61: + goto tr99 + case 92: + goto st75 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st381 + } + case ( m.data)[( m.p)] >= 9: + goto tr599 + } + goto st31 st381: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof381 } st_case_381: switch ( m.data)[( m.p)] { - case 9: - goto tr602 case 10: - goto tr603 + goto tr600 case 11: - goto tr604 - case 12: - goto tr469 + goto tr601 case 13: - goto tr605 - case 32: goto tr602 + case 32: + goto tr599 case 34: - goto tr100 + goto tr98 case 44: goto st6 case 61: - goto tr101 + goto tr99 case 92: - goto st76 + goto st75 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st382 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st382 + } + case ( m.data)[( m.p)] >= 9: + goto tr599 } - goto st32 + goto st31 st382: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof382 } st_case_382: switch ( m.data)[( m.p)] { - case 9: - goto tr602 case 10: - goto tr603 + goto tr600 case 11: - goto tr604 - case 12: - goto tr469 + goto tr601 case 13: - goto tr605 - case 32: goto tr602 + case 32: + goto tr599 case 34: - goto tr100 + goto tr98 case 44: goto st6 case 61: - goto tr101 + goto tr99 case 92: - goto st76 + goto st75 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st383 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st383 + } + case ( m.data)[( m.p)] >= 9: + goto tr599 } - goto st32 + goto st31 st383: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof383 } st_case_383: switch ( m.data)[( m.p)] { - case 9: - goto tr602 case 10: - goto tr603 + goto tr600 case 11: - goto tr604 - case 12: - goto tr469 + goto tr601 case 13: - goto tr605 - case 32: goto tr602 + case 32: + goto tr599 case 34: - goto tr100 + goto tr98 case 44: goto st6 case 61: - goto tr101 + goto tr99 case 92: - goto st76 + goto st75 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st384 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st384 + } + case ( m.data)[( m.p)] >= 9: + goto tr599 } - goto st32 + goto st31 st384: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof384 } st_case_384: switch ( m.data)[( m.p)] { - case 9: - goto tr602 case 10: - goto tr603 + goto tr600 case 11: - goto tr604 - case 12: - goto tr469 + goto tr601 case 13: - goto tr605 - case 32: goto tr602 + case 32: + goto tr599 case 34: - goto tr100 + goto tr98 case 44: goto st6 case 61: - goto tr101 + goto tr99 case 92: - goto st76 + goto st75 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st385 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st385 + } + case ( m.data)[( m.p)] >= 9: + goto tr599 } - goto st32 + goto st31 st385: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof385 } st_case_385: switch ( m.data)[( m.p)] { - case 9: - goto tr602 case 10: - goto tr603 + goto tr600 case 11: - goto tr604 - case 12: - goto tr469 + goto tr601 case 13: - goto tr605 - case 32: goto tr602 + case 32: + goto tr599 case 34: - goto tr100 + goto tr98 case 44: goto st6 case 61: - goto tr101 + goto tr99 case 92: - goto st76 + goto st75 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st386 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st386 + } + case ( m.data)[( m.p)] >= 9: + goto tr599 } - goto st32 + goto st31 st386: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof386 } st_case_386: switch ( m.data)[( m.p)] { - case 9: - goto tr602 case 10: - goto tr603 + goto tr600 case 11: - goto tr604 - case 12: - goto tr469 + goto tr601 case 13: - goto tr605 - case 32: goto tr602 + case 32: + goto tr599 case 34: - goto tr100 + goto tr98 case 44: goto st6 case 61: - goto tr101 + goto tr99 case 92: - goto st76 + goto st75 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st387 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st387 + } + case ( m.data)[( m.p)] >= 9: + goto tr599 } - goto st32 + goto st31 st387: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof387 } st_case_387: switch ( m.data)[( m.p)] { - case 9: - goto tr602 case 10: - goto tr603 + goto tr600 case 11: - goto tr604 - case 12: - goto tr469 + goto tr601 case 13: - goto tr605 - case 32: goto tr602 + case 32: + goto tr599 case 34: - goto tr100 + goto tr98 case 44: goto st6 case 61: - goto tr101 + goto tr99 case 92: - goto st76 + goto st75 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st388 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st388 + } + case ( m.data)[( m.p)] >= 9: + goto tr599 } - goto st32 + goto st31 st388: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof388 } st_case_388: switch ( m.data)[( m.p)] { - case 9: - goto tr602 case 10: - goto tr603 + goto tr600 case 11: - goto tr604 - case 12: - goto tr469 + goto tr601 case 13: - goto tr605 - case 32: goto tr602 + case 32: + goto tr599 case 34: - goto tr100 + goto tr98 case 44: goto st6 case 61: - goto tr101 + goto tr99 case 92: - goto st76 + goto st75 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st389 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st389 + } + case ( m.data)[( m.p)] >= 9: + goto tr599 } - goto st32 + goto st31 st389: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof389 } st_case_389: switch ( m.data)[( m.p)] { - case 9: - goto tr602 case 10: - goto tr603 + goto tr600 case 11: - goto tr604 - case 12: - goto tr469 + goto tr601 case 13: - goto tr605 - case 32: goto tr602 + case 32: + goto tr599 case 34: - goto tr100 + goto tr98 case 44: goto st6 case 61: - goto tr101 + goto tr99 case 92: - goto st76 + goto st75 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st390 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st390 + } + case ( m.data)[( m.p)] >= 9: + goto tr599 } - goto st32 + goto st31 st390: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof390 } st_case_390: switch ( m.data)[( m.p)] { - case 9: - goto tr602 case 10: - goto tr603 + goto tr600 case 11: - goto tr604 - case 12: - goto tr469 + goto tr601 case 13: - goto tr605 - case 32: goto tr602 + case 32: + goto tr599 case 34: - goto tr100 + goto tr98 case 44: goto st6 case 61: - goto tr101 + goto tr99 case 92: - goto st76 + goto st75 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st391 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st391 + } + case ( m.data)[( m.p)] >= 9: + goto tr599 } - goto st32 + goto st31 st391: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof391 } st_case_391: switch ( m.data)[( m.p)] { - case 9: - goto tr602 case 10: - goto tr603 + goto tr600 case 11: - goto tr604 - case 12: - goto tr469 + goto tr601 case 13: - goto tr605 - case 32: goto tr602 + case 32: + goto tr599 case 34: - goto tr100 + goto tr98 case 44: goto st6 case 61: - goto tr101 + goto tr99 case 92: - goto st76 + goto st75 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st392 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st392 + } + case ( m.data)[( m.p)] >= 9: + goto tr599 } - goto st32 + goto st31 st392: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof392 } st_case_392: switch ( m.data)[( m.p)] { - case 9: - goto tr602 case 10: - goto tr603 + goto tr600 case 11: - goto tr604 - case 12: - goto tr469 + goto tr601 case 13: - goto tr605 - case 32: goto tr602 + case 32: + goto tr599 case 34: - goto tr100 + goto tr98 case 44: goto st6 case 61: - goto tr101 + goto tr99 case 92: - goto st76 + goto st75 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st393 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st393 + } + case ( m.data)[( m.p)] >= 9: + goto tr599 } - goto st32 + goto st31 st393: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof393 } st_case_393: switch ( m.data)[( m.p)] { - case 9: - goto tr602 case 10: - goto tr603 + goto tr600 case 11: - goto tr604 - case 12: - goto tr469 + goto tr601 case 13: - goto tr605 - case 32: goto tr602 + case 32: + goto tr599 case 34: - goto tr100 + goto tr98 case 44: goto st6 case 61: - goto tr101 + goto tr99 case 92: - goto st76 + goto st75 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st394 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st394 + } + case ( m.data)[( m.p)] >= 9: + goto tr599 } - goto st32 + goto st31 st394: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof394 } st_case_394: switch ( m.data)[( m.p)] { - case 9: - goto tr602 case 10: - goto tr603 + goto tr600 case 11: - goto tr604 - case 12: - goto tr469 + goto tr601 case 13: - goto tr605 - case 32: goto tr602 + case 32: + goto tr599 case 34: - goto tr100 + goto tr98 case 44: goto st6 case 61: - goto tr101 + goto tr99 case 92: - goto st76 + goto st75 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st395 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st395 + } + case ( m.data)[( m.p)] >= 9: + goto tr599 } - goto st32 + goto st31 st395: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof395 } st_case_395: switch ( m.data)[( m.p)] { - case 9: - goto tr602 case 10: - goto tr603 + goto tr600 case 11: - goto tr604 - case 12: - goto tr469 + goto tr601 case 13: - goto tr605 - case 32: goto tr602 + case 32: + goto tr599 case 34: - goto tr100 + goto tr98 case 44: goto st6 case 61: - goto tr101 + goto tr99 case 92: - goto st76 + goto st75 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st396 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st396 + } + case ( m.data)[( m.p)] >= 9: + goto tr599 } - goto st32 + goto st31 st396: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof396 } st_case_396: switch ( m.data)[( m.p)] { - case 9: - goto tr602 case 10: - goto tr603 + goto tr600 case 11: - goto tr604 - case 12: - goto tr469 + goto tr601 case 13: - goto tr605 - case 32: goto tr602 + case 32: + goto tr599 case 34: - goto tr100 + goto tr98 case 44: goto st6 case 61: - goto tr101 + goto tr99 case 92: - goto st76 + goto st75 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st397 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st397 + } + case ( m.data)[( m.p)] >= 9: + goto tr599 } - goto st32 + goto st31 st397: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof397 } st_case_397: switch ( m.data)[( m.p)] { - case 9: - goto tr602 case 10: - goto tr603 + goto tr600 case 11: - goto tr604 - case 12: - goto tr469 + goto tr601 case 13: - goto tr605 - case 32: goto tr602 + case 32: + goto tr599 case 34: - goto tr100 + goto tr98 case 44: goto st6 case 61: - goto tr101 + goto tr99 case 92: - goto st76 + goto st75 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st398 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr599 } - goto st32 - st398: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof398 - } - st_case_398: - switch ( m.data)[( m.p)] { - case 9: - goto tr602 - case 10: - goto tr603 - case 11: - goto tr604 - case 12: - goto tr469 - case 13: - goto tr605 - case 32: - goto tr602 - case 34: - goto tr100 - case 44: - goto st6 - case 61: - goto tr101 - case 92: - goto st76 - } - goto st32 -tr596: - ( m.cs) = 399 -//line plugins/parsers/influx/machine.go.rl:78 + goto st31 +tr593: + ( m.cs) = 398 +//line plugins/parsers/influx/machine.go.rl:86 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:20 +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p goto _again -tr640: - ( m.cs) = 399 -//line plugins/parsers/influx/machine.go.rl:78 +tr637: + ( m.cs) = 398 +//line plugins/parsers/influx/machine.go.rl:86 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:122 +//line plugins/parsers/influx/machine.go.rl:130 err = m.handler.AddFloat(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } goto _again -tr824: - ( m.cs) = 399 -//line plugins/parsers/influx/machine.go.rl:78 +tr818: + ( m.cs) = 398 +//line plugins/parsers/influx/machine.go.rl:86 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:104 +//line plugins/parsers/influx/machine.go.rl:112 err = m.handler.AddInt(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } goto _again -tr830: - ( m.cs) = 399 -//line plugins/parsers/influx/machine.go.rl:78 +tr823: + ( m.cs) = 398 +//line plugins/parsers/influx/machine.go.rl:86 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:113 +//line plugins/parsers/influx/machine.go.rl:121 err = m.handler.AddUint(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } goto _again -tr835: - ( m.cs) = 399 -//line plugins/parsers/influx/machine.go.rl:78 +tr827: + ( m.cs) = 398 +//line plugins/parsers/influx/machine.go.rl:86 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:131 +//line plugins/parsers/influx/machine.go.rl:139 err = m.handler.AddBool(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } + goto _again + st398: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof398 + } + st_case_398: +//line plugins/parsers/influx/machine.go:12089 + switch ( m.data)[( m.p)] { + case 10: + goto tr219 + case 11: + goto tr624 + case 13: + goto st72 + case 32: + goto tr623 + case 34: + goto tr122 + case 44: + goto tr90 + case 45: + goto tr625 + case 61: + goto st29 + case 92: + goto tr123 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto tr626 + } + case ( m.data)[( m.p)] >= 9: + goto tr623 + } + goto tr119 +tr624: + ( m.cs) = 399 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + goto _again st399: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof399 } st_case_399: -//line plugins/parsers/influx/machine.go:12139 +//line plugins/parsers/influx/machine.go:12141 switch ( m.data)[( m.p)] { - case 9: - goto tr626 case 10: - goto tr221 + goto tr219 case 11: - goto tr627 - case 12: - goto tr501 + goto tr624 case 13: - goto st73 + goto st72 case 32: - goto tr626 + goto tr623 case 34: - goto tr124 + goto tr122 case 44: - goto tr92 + goto tr90 case 45: - goto tr628 + goto tr625 case 61: - goto st30 + goto tr127 case 92: - goto tr125 + goto tr123 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr629 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto tr626 + } + case ( m.data)[( m.p)] >= 9: + goto tr623 } - goto tr121 -tr627: - ( m.cs) = 400 -//line plugins/parsers/influx/machine.go.rl:78 + goto tr119 +tr90: + ( m.cs) = 76 +//line plugins/parsers/influx/machine.go.rl:86 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:20 + goto _again +tr84: + ( m.cs) = 76 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p goto _again - st400: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof400 - } - st_case_400: -//line plugins/parsers/influx/machine.go:12190 - switch ( m.data)[( m.p)] { - case 9: - goto tr626 - case 10: - goto tr221 - case 11: - goto tr627 - case 12: - goto tr501 - case 13: - goto st73 - case 32: - goto tr626 - case 34: - goto tr124 - case 44: - goto tr92 - case 45: - goto tr628 - case 61: - goto tr129 - case 92: - goto tr125 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr629 - } - goto tr121 -tr92: - ( m.cs) = 77 -//line plugins/parsers/influx/machine.go.rl:78 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again -tr86: - ( m.cs) = 77 -//line plugins/parsers/influx/machine.go.rl:78 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:20 - - m.pb = m.p - - goto _again -tr233: - ( m.cs) = 77 -//line plugins/parsers/influx/machine.go.rl:91 +tr231: + ( m.cs) = 76 +//line plugins/parsers/influx/machine.go.rl:99 err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } goto _again - st77: + st76: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof77 + goto _test_eof76 } - st_case_77: -//line plugins/parsers/influx/machine.go:12267 + st_case_76: +//line plugins/parsers/influx/machine.go:12219 switch ( m.data)[( m.p)] { case 9: goto st6 case 10: - goto tr29 - case 12: - goto tr47 - case 13: - goto st7 + goto tr28 case 32: goto st6 case 34: - goto tr192 + goto tr190 case 44: goto st6 case 61: goto st6 case 92: - goto tr224 + goto tr222 } - goto tr223 -tr223: -//line plugins/parsers/influx/machine.go.rl:20 + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto st6 + } + goto tr221 +tr221: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p + goto st77 + st77: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof77 + } + st_case_77: +//line plugins/parsers/influx/machine.go:12251 + switch ( m.data)[( m.p)] { + case 9: + goto st6 + case 10: + goto tr28 + case 32: + goto st6 + case 34: + goto tr193 + case 44: + goto st6 + case 61: + goto tr224 + case 92: + goto st87 + } + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto st6 + } + goto st77 +tr224: +//line plugins/parsers/influx/machine.go.rl:95 + + m.key = m.text() + goto st78 st78: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof78 } st_case_78: -//line plugins/parsers/influx/machine.go:12300 +//line plugins/parsers/influx/machine.go:12283 switch ( m.data)[( m.p)] { case 9: goto st6 case 10: - goto tr29 - case 12: - goto tr47 - case 13: - goto st7 + goto tr28 case 32: goto st6 case 34: - goto tr195 + goto tr149 case 44: goto st6 case 61: - goto tr226 + goto st6 case 92: - goto st88 + goto tr227 } - goto st78 + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto st6 + } + goto tr226 tr226: -//line plugins/parsers/influx/machine.go.rl:87 +//line plugins/parsers/influx/machine.go.rl:28 - m.key = m.text() + m.pb = m.p goto st79 st79: @@ -12329,776 +12311,762 @@ tr226: goto _test_eof79 } st_case_79: -//line plugins/parsers/influx/machine.go:12333 +//line plugins/parsers/influx/machine.go:12315 switch ( m.data)[( m.p)] { - case 9: - goto st6 case 10: - goto tr29 - case 12: - goto tr47 + goto tr28 + case 11: + goto tr230 case 13: - goto st7 + goto st6 case 32: - goto st6 + goto tr229 case 34: - goto tr151 + goto tr155 case 44: - goto st6 + goto tr231 case 61: goto st6 case 92: + goto st85 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { goto tr229 } - goto tr228 -tr228: -//line plugins/parsers/influx/machine.go.rl:20 + goto st79 +tr230: + ( m.cs) = 80 +//line plugins/parsers/influx/machine.go.rl:99 - m.pb = m.p + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- - goto st80 + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again st80: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof80 } st_case_80: -//line plugins/parsers/influx/machine.go:12366 +//line plugins/parsers/influx/machine.go:12356 switch ( m.data)[( m.p)] { - case 9: - goto tr231 case 10: - goto tr29 + goto tr28 case 11: - goto tr232 - case 12: - goto tr60 + goto tr234 case 13: - goto st7 + goto st6 case 32: - goto tr231 + goto tr229 case 34: - goto tr157 + goto tr202 case 44: - goto tr233 + goto tr231 case 61: goto st6 case 92: - goto st86 + goto tr235 } - goto st80 -tr232: - ( m.cs) = 81 -//line plugins/parsers/influx/machine.go.rl:91 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr229 + } + goto tr233 +tr233: +//line plugins/parsers/influx/machine.go.rl:28 - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- + m.pb = m.p - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again + goto st81 st81: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof81 } st_case_81: -//line plugins/parsers/influx/machine.go:12408 +//line plugins/parsers/influx/machine.go:12390 switch ( m.data)[( m.p)] { - case 9: - goto tr231 case 10: - goto tr29 + goto tr28 case 11: - goto tr236 - case 12: - goto tr60 - case 13: - goto st7 - case 32: - goto tr231 - case 34: - goto tr204 - case 44: - goto tr233 - case 61: - goto st6 - case 92: goto tr237 + case 13: + goto st6 + case 32: + goto tr229 + case 34: + goto tr206 + case 44: + goto tr231 + case 61: + goto tr99 + case 92: + goto st83 } - goto tr235 -tr235: -//line plugins/parsers/influx/machine.go.rl:20 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr229 + } + goto st81 +tr237: + ( m.cs) = 82 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr234: + ( m.cs) = 82 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p - goto st82 + goto _again st82: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof82 } st_case_82: -//line plugins/parsers/influx/machine.go:12443 +//line plugins/parsers/influx/machine.go:12448 switch ( m.data)[( m.p)] { - case 9: - goto tr231 case 10: - goto tr29 + goto tr28 case 11: - goto tr239 - case 12: - goto tr60 + goto tr234 case 13: - goto st7 + goto st6 case 32: - goto tr231 + goto tr229 case 34: - goto tr208 + goto tr202 case 44: - goto tr233 + goto tr231 case 61: - goto tr101 + goto tr99 case 92: - goto st84 + goto tr235 } - goto st82 -tr239: - ( m.cs) = 83 -//line plugins/parsers/influx/machine.go.rl:91 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again -tr236: - ( m.cs) = 83 -//line plugins/parsers/influx/machine.go.rl:91 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:20 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr229 + } + goto tr233 +tr235: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p - goto _again + goto st83 st83: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof83 } st_case_83: -//line plugins/parsers/influx/machine.go:12502 +//line plugins/parsers/influx/machine.go:12482 switch ( m.data)[( m.p)] { - case 9: - goto tr231 - case 10: - goto tr29 - case 11: - goto tr236 - case 12: - goto tr60 - case 13: - goto st7 - case 32: - goto tr231 case 34: - goto tr204 - case 44: - goto tr233 - case 61: - goto tr101 + goto st81 case 92: - goto tr237 + goto st84 } - goto tr235 -tr237: -//line plugins/parsers/influx/machine.go.rl:20 - - m.pb = m.p - - goto st84 + switch { + case ( m.data)[( m.p)] > 10: + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr45 + } + case ( m.data)[( m.p)] >= 9: + goto tr45 + } + goto st17 st84: +//line plugins/parsers/influx/machine.go.rl:248 + ( m.p)-- + if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof84 } st_case_84: -//line plugins/parsers/influx/machine.go:12537 +//line plugins/parsers/influx/machine.go:12506 switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 11: + goto tr237 + case 13: + goto st6 + case 32: + goto tr229 case 34: - goto st82 + goto tr206 + case 44: + goto tr231 + case 61: + goto tr99 case 92: - goto st85 + goto st83 } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr47 - } - case ( m.data)[( m.p)] >= 9: - goto tr47 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr229 } - goto st18 + goto st81 +tr227: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st85 st85: -//line plugins/parsers/influx/machine.go.rl:240 - ( m.p)-- - if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof85 } st_case_85: -//line plugins/parsers/influx/machine.go:12561 +//line plugins/parsers/influx/machine.go:12540 switch ( m.data)[( m.p)] { - case 9: - goto tr231 - case 10: - goto tr29 - case 11: - goto tr239 - case 12: - goto tr60 - case 13: - goto st7 - case 32: - goto tr231 case 34: - goto tr208 - case 44: - goto tr233 - case 61: - goto tr101 + goto st79 case 92: - goto st84 + goto st86 } - goto st82 -tr229: -//line plugins/parsers/influx/machine.go.rl:20 - - m.pb = m.p - - goto st86 + switch { + case ( m.data)[( m.p)] > 10: + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr45 + } + case ( m.data)[( m.p)] >= 9: + goto tr45 + } + goto st15 st86: +//line plugins/parsers/influx/machine.go.rl:248 + ( m.p)-- + if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof86 } st_case_86: -//line plugins/parsers/influx/machine.go:12596 +//line plugins/parsers/influx/machine.go:12564 switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 11: + goto tr230 + case 13: + goto st6 + case 32: + goto tr229 case 34: - goto st80 + goto tr155 + case 44: + goto tr231 + case 61: + goto st6 case 92: - goto st87 + goto st85 } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr47 - } - case ( m.data)[( m.p)] >= 9: - goto tr47 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr229 } - goto st16 + goto st79 +tr222: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st87 st87: -//line plugins/parsers/influx/machine.go.rl:240 - ( m.p)-- - if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof87 } st_case_87: -//line plugins/parsers/influx/machine.go:12620 - switch ( m.data)[( m.p)] { - case 9: - goto tr231 - case 10: - goto tr29 - case 11: - goto tr232 - case 12: - goto tr60 - case 13: - goto st7 - case 32: - goto tr231 - case 34: - goto tr157 - case 44: - goto tr233 - case 61: - goto st6 - case 92: - goto st86 - } - goto st80 -tr224: -//line plugins/parsers/influx/machine.go.rl:20 - - m.pb = m.p - - goto st88 - st88: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof88 - } - st_case_88: -//line plugins/parsers/influx/machine.go:12655 +//line plugins/parsers/influx/machine.go:12598 switch ( m.data)[( m.p)] { case 34: - goto st78 + goto st77 case 92: - goto st89 + goto st88 } switch { case ( m.data)[( m.p)] > 10: if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr47 + goto tr45 } case ( m.data)[( m.p)] >= 9: - goto tr47 + goto tr45 } - goto st14 - st89: -//line plugins/parsers/influx/machine.go.rl:240 + goto st13 + st88: +//line plugins/parsers/influx/machine.go.rl:248 ( m.p)-- if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof89 + goto _test_eof88 } - st_case_89: -//line plugins/parsers/influx/machine.go:12679 + st_case_88: +//line plugins/parsers/influx/machine.go:12622 switch ( m.data)[( m.p)] { case 9: goto st6 case 10: - goto tr29 - case 12: - goto tr47 - case 13: - goto st7 + goto tr28 case 32: goto st6 case 34: - goto tr195 + goto tr193 case 44: goto st6 case 61: - goto tr226 + goto tr224 case 92: - goto st88 + goto st87 } - goto st78 -tr628: -//line plugins/parsers/influx/machine.go.rl:20 + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto st6 + } + goto st77 +tr625: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p + goto st89 + st89: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof89 + } + st_case_89: +//line plugins/parsers/influx/machine.go:12654 + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 11: + goto tr125 + case 13: + goto st6 + case 32: + goto tr87 + case 34: + goto tr126 + case 44: + goto tr90 + case 61: + goto tr127 + case 92: + goto st92 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st400 + } + case ( m.data)[( m.p)] >= 9: + goto tr87 + } + goto st40 +tr626: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st400 + st400: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof400 + } + st_case_400: +//line plugins/parsers/influx/machine.go:12693 + switch ( m.data)[( m.p)] { + case 10: + goto tr600 + case 11: + goto tr628 + case 13: + goto tr602 + case 32: + goto tr627 + case 34: + goto tr126 + case 44: + goto tr90 + case 61: + goto tr127 + case 92: + goto st92 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st544 + } + case ( m.data)[( m.p)] >= 9: + goto tr627 + } + goto st40 +tr632: + ( m.cs) = 401 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr769: + ( m.cs) = 401 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr627: + ( m.cs) = 401 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:157 + + err = m.handler.SetTimestamp(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr766: + ( m.cs) = 401 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:157 + + err = m.handler.SetTimestamp(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again + st401: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof401 + } + st_case_401: +//line plugins/parsers/influx/machine.go:12798 + switch ( m.data)[( m.p)] { + case 10: + goto tr219 + case 11: + goto tr631 + case 13: + goto st72 + case 32: + goto st401 + case 34: + goto tr95 + case 44: + goto st6 + case 61: + goto st6 + case 92: + goto tr96 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto st401 + } + goto tr92 +tr631: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st402 + st402: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof402 + } + st_case_402: +//line plugins/parsers/influx/machine.go:12832 + switch ( m.data)[( m.p)] { + case 10: + goto tr219 + case 11: + goto tr631 + case 13: + goto st72 + case 32: + goto st401 + case 34: + goto tr95 + case 44: + goto st6 + case 61: + goto tr99 + case 92: + goto tr96 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto st401 + } + goto tr92 +tr633: + ( m.cs) = 403 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto _again +tr628: + ( m.cs) = 403 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:157 + + err = m.handler.SetTimestamp(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again + st403: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof403 + } + st_case_403: +//line plugins/parsers/influx/machine.go:12900 + switch ( m.data)[( m.p)] { + case 10: + goto tr219 + case 11: + goto tr633 + case 13: + goto st72 + case 32: + goto tr632 + case 34: + goto tr122 + case 44: + goto tr90 + case 61: + goto tr127 + case 92: + goto tr123 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr632 + } + goto tr119 +tr127: +//line plugins/parsers/influx/machine.go.rl:108 + + m.key = m.text() + + goto st90 +tr381: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + +//line plugins/parsers/influx/machine.go.rl:108 + + m.key = m.text() + goto st90 st90: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof90 } st_case_90: -//line plugins/parsers/influx/machine.go:12712 +//line plugins/parsers/influx/machine.go:12944 switch ( m.data)[( m.p)] { - case 9: - goto tr89 case 10: - goto tr29 + goto tr28 case 11: - goto tr127 - case 12: - goto tr1 + goto tr88 case 13: - goto st7 - case 32: - goto tr89 - case 34: - goto tr128 - case 44: - goto tr92 - case 61: - goto tr129 - case 92: - goto st93 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st401 - } - goto st41 -tr629: -//line plugins/parsers/influx/machine.go.rl:20 - - m.pb = m.p - - goto st401 - st401: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof401 - } - st_case_401: -//line plugins/parsers/influx/machine.go:12750 - switch ( m.data)[( m.p)] { - case 9: - goto tr630 - case 10: - goto tr603 - case 11: - goto tr631 - case 12: - goto tr509 - case 13: - goto tr605 - case 32: - goto tr630 - case 34: - goto tr128 - case 44: - goto tr92 - case 61: - goto tr129 - case 92: - goto st93 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st545 - } - goto st41 -tr635: - ( m.cs) = 402 -//line plugins/parsers/influx/machine.go.rl:78 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again -tr776: - ( m.cs) = 402 -//line plugins/parsers/influx/machine.go.rl:91 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again -tr630: - ( m.cs) = 402 -//line plugins/parsers/influx/machine.go.rl:78 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:149 - - err = m.handler.SetTimestamp(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again -tr773: - ( m.cs) = 402 -//line plugins/parsers/influx/machine.go.rl:91 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:149 - - err = m.handler.SetTimestamp(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again - st402: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof402 - } - st_case_402: -//line plugins/parsers/influx/machine.go:12854 - switch ( m.data)[( m.p)] { - case 9: - goto st402 - case 10: - goto tr221 - case 11: - goto tr634 - case 12: - goto st305 - case 13: - goto st73 - case 32: - goto st402 - case 34: - goto tr97 - case 44: goto st6 - case 61: - goto st6 - case 92: - goto tr98 - } - goto tr94 -tr634: -//line plugins/parsers/influx/machine.go.rl:20 - - m.pb = m.p - - goto st403 - st403: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof403 - } - st_case_403: -//line plugins/parsers/influx/machine.go:12889 - switch ( m.data)[( m.p)] { - case 9: - goto st402 - case 10: - goto tr221 - case 11: - goto tr634 - case 12: - goto st305 - case 13: - goto st73 case 32: - goto st402 + goto tr87 case 34: - goto tr97 + goto tr210 case 44: - goto st6 - case 61: - goto tr101 + goto tr90 + case 45: + goto tr243 + case 46: + goto tr244 + case 48: + goto tr245 + case 70: + goto tr247 + case 84: + goto tr248 case 92: - goto tr98 + goto st140 + case 102: + goto tr249 + case 116: + goto tr250 } - goto tr94 -tr636: - ( m.cs) = 404 -//line plugins/parsers/influx/machine.go.rl:78 + switch { + case ( m.data)[( m.p)] > 12: + if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto tr246 + } + case ( m.data)[( m.p)] >= 9: + goto tr87 + } + goto st29 +tr88: + ( m.cs) = 91 +//line plugins/parsers/influx/machine.go.rl:86 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:20 - - m.pb = m.p - goto _again -tr631: - ( m.cs) = 404 -//line plugins/parsers/influx/machine.go.rl:78 +tr82: + ( m.cs) = 91 +//line plugins/parsers/influx/machine.go.rl:86 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:149 - - err = m.handler.SetTimestamp(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again - st404: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof404 - } - st_case_404: -//line plugins/parsers/influx/machine.go:12958 - switch ( m.data)[( m.p)] { - case 9: - goto tr635 - case 10: - goto tr221 - case 11: - goto tr636 - case 12: - goto tr514 - case 13: - goto st73 - case 32: - goto tr635 - case 34: - goto tr124 - case 44: - goto tr92 - case 61: - goto tr129 - case 92: - goto tr125 - } - goto tr121 -tr129: -//line plugins/parsers/influx/machine.go.rl:100 - - m.key = m.text() - - goto st91 -tr383: -//line plugins/parsers/influx/machine.go.rl:20 +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p -//line plugins/parsers/influx/machine.go.rl:100 - - m.key = m.text() - - goto st91 + goto _again st91: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof91 } st_case_91: -//line plugins/parsers/influx/machine.go:13003 +//line plugins/parsers/influx/machine.go:13019 switch ( m.data)[( m.p)] { - case 9: - goto tr89 case 10: - goto tr29 + goto tr28 case 11: - goto tr90 - case 12: - goto tr1 + goto tr129 case 13: - goto st7 + goto st6 case 32: - goto tr89 + goto tr87 case 34: - goto tr212 + goto tr122 case 44: - goto tr92 - case 45: - goto tr245 - case 46: - goto tr246 - case 48: - goto tr247 - case 70: - goto tr249 - case 84: - goto tr250 + goto tr90 + case 61: + goto st29 case 92: - goto st141 - case 102: - goto tr251 - case 116: - goto tr252 + goto tr123 } - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr248 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr87 } - goto st30 -tr90: - ( m.cs) = 92 -//line plugins/parsers/influx/machine.go.rl:78 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again -tr84: - ( m.cs) = 92 -//line plugins/parsers/influx/machine.go.rl:78 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:20 + goto tr119 +tr123: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p - goto _again + goto st92 st92: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof92 } st_case_92: -//line plugins/parsers/influx/machine.go:13077 +//line plugins/parsers/influx/machine.go:13053 switch ( m.data)[( m.p)] { - case 9: - goto tr89 - case 10: - goto tr29 - case 11: - goto tr131 - case 12: - goto tr1 - case 13: - goto st7 - case 32: - goto tr89 case 34: - goto tr124 - case 44: - goto tr92 - case 61: - goto st30 + goto st40 case 92: - goto tr125 + goto st40 } - goto tr121 -tr125: -//line plugins/parsers/influx/machine.go.rl:20 + switch { + case ( m.data)[( m.p)] > 10: + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr8 + } + case ( m.data)[( m.p)] >= 9: + goto tr8 + } + goto st10 +tr243: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p @@ -13108,24 +13076,257 @@ tr125: goto _test_eof93 } st_case_93: -//line plugins/parsers/influx/machine.go:13112 +//line plugins/parsers/influx/machine.go:13080 switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 11: + goto tr88 + case 13: + goto st6 + case 32: + goto tr87 case 34: - goto st41 + goto tr89 + case 44: + goto tr90 + case 46: + goto st95 + case 48: + goto st532 case 92: - goto st41 + goto st140 } switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr8 + case ( m.data)[( m.p)] > 12: + if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st535 } case ( m.data)[( m.p)] >= 9: - goto tr8 + goto tr87 } - goto st11 -tr245: -//line plugins/parsers/influx/machine.go.rl:20 + goto st29 +tr83: + ( m.cs) = 404 +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + +//line plugins/parsers/influx/machine.go.rl:148 + + err = m.handler.AddString(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr89: + ( m.cs) = 404 +//line plugins/parsers/influx/machine.go.rl:148 + + err = m.handler.AddString(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr116: + ( m.cs) = 404 +//line plugins/parsers/influx/machine.go.rl:148 + + err = m.handler.AddString(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto _again + st404: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof404 + } + st_case_404: +//line plugins/parsers/influx/machine.go:13162 + switch ( m.data)[( m.p)] { + case 10: + goto tr101 + case 11: + goto tr634 + case 13: + goto st32 + case 32: + goto tr499 + case 44: + goto tr501 + case 92: + goto st94 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr499 + } + goto st1 +tr634: + ( m.cs) = 405 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr812: + ( m.cs) = 405 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:130 + + err = m.handler.AddFloat(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr1006: + ( m.cs) = 405 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:112 + + err = m.handler.AddInt(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr1010: + ( m.cs) = 405 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:121 + + err = m.handler.AddUint(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr1014: + ( m.cs) = 405 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:139 + + err = m.handler.AddBool(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again + st405: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof405 + } + st_case_405: +//line plugins/parsers/influx/machine.go:13291 + switch ( m.data)[( m.p)] { + case 10: + goto tr101 + case 11: + goto tr504 + case 13: + goto st32 + case 32: + goto tr499 + case 44: + goto tr4 + case 45: + goto tr505 + case 61: + goto st1 + case 92: + goto tr43 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto tr506 + } + case ( m.data)[( m.p)] >= 9: + goto tr499 + } + goto tr39 +tr35: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st94 +tr458: +//line plugins/parsers/influx/machine.go.rl:82 + + m.beginMetric = true + +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p @@ -13135,266 +13336,7 @@ tr245: goto _test_eof94 } st_case_94: -//line plugins/parsers/influx/machine.go:13139 - switch ( m.data)[( m.p)] { - case 9: - goto tr89 - case 10: - goto tr29 - case 11: - goto tr90 - case 12: - goto tr1 - case 13: - goto st7 - case 32: - goto tr89 - case 34: - goto tr91 - case 44: - goto tr92 - case 46: - goto st96 - case 48: - goto st533 - case 92: - goto st141 - } - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st536 - } - goto st30 -tr85: - ( m.cs) = 405 -//line plugins/parsers/influx/machine.go.rl:20 - - m.pb = m.p - -//line plugins/parsers/influx/machine.go.rl:140 - - err = m.handler.AddString(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again -tr91: - ( m.cs) = 405 -//line plugins/parsers/influx/machine.go.rl:140 - - err = m.handler.AddString(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again -tr118: - ( m.cs) = 405 -//line plugins/parsers/influx/machine.go.rl:140 - - err = m.handler.AddString(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:20 - - m.pb = m.p - - goto _again - st405: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof405 - } - st_case_405: -//line plugins/parsers/influx/machine.go:13220 - switch ( m.data)[( m.p)] { - case 10: - goto tr103 - case 11: - goto tr637 - case 13: - goto st33 - case 32: - goto tr501 - case 44: - goto tr503 - case 92: - goto st95 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr501 - } - goto st1 -tr637: - ( m.cs) = 406 -//line plugins/parsers/influx/machine.go.rl:78 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again -tr818: - ( m.cs) = 406 -//line plugins/parsers/influx/machine.go.rl:78 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:122 - - err = m.handler.AddFloat(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again -tr1013: - ( m.cs) = 406 -//line plugins/parsers/influx/machine.go.rl:78 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:104 - - err = m.handler.AddInt(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again -tr1016: - ( m.cs) = 406 -//line plugins/parsers/influx/machine.go.rl:78 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:113 - - err = m.handler.AddUint(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again -tr1019: - ( m.cs) = 406 -//line plugins/parsers/influx/machine.go.rl:78 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:131 - - err = m.handler.AddBool(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again - st406: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof406 - } - st_case_406: -//line plugins/parsers/influx/machine.go:13349 - switch ( m.data)[( m.p)] { - case 10: - goto tr103 - case 11: - goto tr506 - case 13: - goto st33 - case 32: - goto tr501 - case 44: - goto tr4 - case 45: - goto tr507 - case 61: - goto st1 - case 92: - goto tr45 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr508 - } - case ( m.data)[( m.p)] >= 9: - goto tr501 - } - goto tr41 -tr37: -//line plugins/parsers/influx/machine.go.rl:20 - - m.pb = m.p - - goto st95 -tr460: -//line plugins/parsers/influx/machine.go.rl:74 - - m.beginMetric = true - -//line plugins/parsers/influx/machine.go.rl:20 - - m.pb = m.p - - goto st95 - st95: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof95 - } - st_case_95: -//line plugins/parsers/influx/machine.go:13398 +//line plugins/parsers/influx/machine.go:13340 switch { case ( m.data)[( m.p)] > 10: if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { @@ -13404,305 +13346,426 @@ tr460: goto st0 } goto st1 -tr246: -//line plugins/parsers/influx/machine.go.rl:20 +tr244: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p - goto st96 + goto st95 + st95: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof95 + } + st_case_95: +//line plugins/parsers/influx/machine.go:13361 + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 11: + goto tr88 + case 13: + goto st6 + case 32: + goto tr87 + case 34: + goto tr89 + case 44: + goto tr90 + case 92: + goto st140 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st406 + } + case ( m.data)[( m.p)] >= 9: + goto tr87 + } + goto st29 + st406: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof406 + } + st_case_406: + switch ( m.data)[( m.p)] { + case 10: + goto tr636 + case 11: + goto tr637 + case 13: + goto tr638 + case 32: + goto tr635 + case 34: + goto tr89 + case 44: + goto tr639 + case 69: + goto st138 + case 92: + goto st140 + case 101: + goto st138 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st406 + } + case ( m.data)[( m.p)] >= 9: + goto tr635 + } + goto st29 +tr594: + ( m.cs) = 96 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto _again +tr639: + ( m.cs) = 96 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:130 + + err = m.handler.AddFloat(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr760: + ( m.cs) = 96 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:130 + + err = m.handler.AddFloat(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr794: + ( m.cs) = 96 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:112 + + err = m.handler.AddInt(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr800: + ( m.cs) = 96 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:121 + + err = m.handler.AddUint(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr806: + ( m.cs) = 96 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:139 + + err = m.handler.AddBool(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr819: + ( m.cs) = 96 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:112 + + err = m.handler.AddInt(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr824: + ( m.cs) = 96 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:121 + + err = m.handler.AddUint(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr828: + ( m.cs) = 96 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:139 + + err = m.handler.AddBool(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again st96: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof96 } st_case_96: -//line plugins/parsers/influx/machine.go:13419 - switch ( m.data)[( m.p)] { - case 9: - goto tr89 - case 10: - goto tr29 - case 11: - goto tr90 - case 12: - goto tr1 - case 13: - goto st7 - case 32: - goto tr89 - case 34: - goto tr91 - case 44: - goto tr92 - case 92: - goto st141 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st407 - } - goto st30 - st407: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof407 - } - st_case_407: - switch ( m.data)[( m.p)] { - case 9: - goto tr638 - case 10: - goto tr639 - case 11: - goto tr640 - case 12: - goto tr641 - case 13: - goto tr642 - case 32: - goto tr638 - case 34: - goto tr91 - case 44: - goto tr643 - case 69: - goto st139 - case 92: - goto st141 - case 101: - goto st139 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st407 - } - goto st30 -tr597: - ( m.cs) = 97 -//line plugins/parsers/influx/machine.go.rl:78 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:20 - - m.pb = m.p - - goto _again -tr643: - ( m.cs) = 97 -//line plugins/parsers/influx/machine.go.rl:78 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:122 - - err = m.handler.AddFloat(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again -tr767: - ( m.cs) = 97 -//line plugins/parsers/influx/machine.go.rl:91 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:122 - - err = m.handler.AddFloat(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again -tr801: - ( m.cs) = 97 -//line plugins/parsers/influx/machine.go.rl:91 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:104 - - err = m.handler.AddInt(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again -tr807: - ( m.cs) = 97 -//line plugins/parsers/influx/machine.go.rl:91 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:113 - - err = m.handler.AddUint(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again -tr813: - ( m.cs) = 97 -//line plugins/parsers/influx/machine.go.rl:91 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:131 - - err = m.handler.AddBool(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again -tr826: - ( m.cs) = 97 -//line plugins/parsers/influx/machine.go.rl:78 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:104 - - err = m.handler.AddInt(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again -tr832: - ( m.cs) = 97 -//line plugins/parsers/influx/machine.go.rl:78 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:113 - - err = m.handler.AddUint(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again -tr837: - ( m.cs) = 97 -//line plugins/parsers/influx/machine.go.rl:78 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:131 - - err = m.handler.AddBool(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again - st97: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof97 - } - st_case_97: -//line plugins/parsers/influx/machine.go:13683 +//line plugins/parsers/influx/machine.go:13627 switch ( m.data)[( m.p)] { case 9: goto st6 case 10: - goto tr29 - case 12: - goto tr47 - case 13: - goto st7 + goto tr28 case 32: goto st6 case 34: - goto tr258 + goto tr256 case 44: goto st6 case 61: goto st6 case 92: - goto tr259 + goto tr257 } - goto tr257 -tr257: -//line plugins/parsers/influx/machine.go.rl:20 + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto st6 + } + goto tr255 +tr255: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st97 + st97: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof97 + } + st_case_97: +//line plugins/parsers/influx/machine.go:13659 + switch ( m.data)[( m.p)] { + case 9: + goto st6 + case 10: + goto tr28 + case 32: + goto st6 + case 34: + goto tr259 + case 44: + goto st6 + case 61: + goto tr260 + case 92: + goto st136 + } + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto st6 + } + goto st97 +tr256: + ( m.cs) = 407 +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + +//line plugins/parsers/influx/machine.go.rl:148 + + err = m.handler.AddString(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr259: + ( m.cs) = 407 +//line plugins/parsers/influx/machine.go.rl:148 + + err = m.handler.AddString(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again + st407: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof407 + } + st_case_407: +//line plugins/parsers/influx/machine.go:13715 + switch ( m.data)[( m.p)] { + case 10: + goto tr101 + case 11: + goto st408 + case 13: + goto st32 + case 32: + goto st271 + case 44: + goto st35 + case 61: + goto tr135 + case 92: + goto st99 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto st271 + } + goto st44 + st408: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof408 + } + st_case_408: + switch ( m.data)[( m.p)] { + case 10: + goto tr101 + case 11: + goto st408 + case 13: + goto st32 + case 32: + goto st271 + case 44: + goto tr130 + case 45: + goto tr642 + case 61: + goto tr135 + case 92: + goto st99 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto tr643 + } + case ( m.data)[( m.p)] >= 9: + goto st271 + } + goto st44 +tr642: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p @@ -13712,118 +13775,108 @@ tr257: goto _test_eof98 } st_case_98: -//line plugins/parsers/influx/machine.go:13716 +//line plugins/parsers/influx/machine.go:13779 switch ( m.data)[( m.p)] { - case 9: - goto st6 - case 10: - goto tr29 - case 12: - goto tr47 - case 13: - goto st7 case 32: - goto st6 - case 34: - goto tr261 + goto tr130 case 44: - goto st6 + goto tr130 case 61: - goto tr262 + goto tr135 case 92: - goto st137 + goto st99 } - goto st98 -tr258: - ( m.cs) = 408 -//line plugins/parsers/influx/machine.go.rl:20 + switch { + case ( m.data)[( m.p)] < 12: + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 10 { + goto tr130 + } + case ( m.data)[( m.p)] > 13: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st409 + } + default: + goto tr130 + } + goto st44 +tr643: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p -//line plugins/parsers/influx/machine.go.rl:140 - - err = m.handler.AddString(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again -tr261: - ( m.cs) = 408 -//line plugins/parsers/influx/machine.go.rl:140 - - err = m.handler.AddString(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again - st408: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof408 - } - st_case_408: -//line plugins/parsers/influx/machine.go:13773 - switch ( m.data)[( m.p)] { - case 10: - goto tr103 - case 11: - goto st409 - case 13: - goto st33 - case 32: - goto st272 - case 44: - goto st36 - case 61: - goto tr137 - case 92: - goto st100 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st272 - } - goto st45 + goto st409 st409: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof409 } st_case_409: +//line plugins/parsers/influx/machine.go:13814 switch ( m.data)[( m.p)] { case 10: - goto tr103 + goto tr468 case 11: - goto st409 + goto tr644 case 13: - goto st33 + goto tr470 case 32: - goto st272 + goto tr467 case 44: - goto tr132 - case 45: - goto tr646 + goto tr130 case 61: - goto tr137 + goto tr135 case 92: - goto st100 + goto st99 } switch { case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr647 + goto st411 } case ( m.data)[( m.p)] >= 9: - goto st272 + goto tr467 } - goto st45 -tr646: -//line plugins/parsers/influx/machine.go.rl:20 + goto st44 +tr644: + ( m.cs) = 410 +//line plugins/parsers/influx/machine.go.rl:157 + + err = m.handler.SetTimestamp(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again + st410: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof410 + } + st_case_410: +//line plugins/parsers/influx/machine.go:13858 + switch ( m.data)[( m.p)] { + case 10: + goto tr101 + case 11: + goto st410 + case 13: + goto st32 + case 32: + goto st276 + case 44: + goto tr45 + case 61: + goto tr135 + case 92: + goto st99 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto st276 + } + goto st44 +tr133: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p @@ -13833,57 +13886,67 @@ tr646: goto _test_eof99 } st_case_99: -//line plugins/parsers/influx/machine.go:13837 - switch ( m.data)[( m.p)] { - case 32: - goto tr132 - case 44: - goto tr132 - case 61: - goto tr137 - case 92: +//line plugins/parsers/influx/machine.go:13890 + if ( m.data)[( m.p)] == 92 { goto st100 } switch { - case ( m.data)[( m.p)] < 12: - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 10 { - goto tr132 + case ( m.data)[( m.p)] > 10: + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr45 } - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st410 - } - default: - goto tr132 + case ( m.data)[( m.p)] >= 9: + goto tr45 } - goto st45 -tr647: -//line plugins/parsers/influx/machine.go.rl:20 - - m.pb = m.p - - goto st410 - st410: + goto st44 + st100: +//line plugins/parsers/influx/machine.go.rl:248 + ( m.p)-- + if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof410 + goto _test_eof100 } - st_case_410: -//line plugins/parsers/influx/machine.go:13872 + st_case_100: +//line plugins/parsers/influx/machine.go:13911 + switch ( m.data)[( m.p)] { + case 32: + goto tr45 + case 44: + goto tr45 + case 61: + goto tr135 + case 92: + goto st99 + } + switch { + case ( m.data)[( m.p)] > 10: + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr45 + } + case ( m.data)[( m.p)] >= 9: + goto tr45 + } + goto st44 + st411: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof411 + } + st_case_411: switch ( m.data)[( m.p)] { case 10: - goto tr470 + goto tr468 case 11: - goto tr648 + goto tr644 case 13: - goto tr472 + goto tr470 case 32: - goto tr469 + goto tr467 case 44: - goto tr132 + goto tr130 case 61: - goto tr137 + goto tr135 case 92: - goto st100 + goto st99 } switch { case ( m.data)[( m.p)] > 12: @@ -13891,100 +13954,9 @@ tr647: goto st412 } case ( m.data)[( m.p)] >= 9: - goto tr469 + goto tr467 } - goto st45 -tr648: - ( m.cs) = 411 -//line plugins/parsers/influx/machine.go.rl:149 - - err = m.handler.SetTimestamp(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again - st411: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof411 - } - st_case_411: -//line plugins/parsers/influx/machine.go:13916 - switch ( m.data)[( m.p)] { - case 10: - goto tr103 - case 11: - goto st411 - case 13: - goto st33 - case 32: - goto st277 - case 44: - goto tr47 - case 61: - goto tr137 - case 92: - goto st100 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st277 - } - goto st45 -tr135: -//line plugins/parsers/influx/machine.go.rl:20 - - m.pb = m.p - - goto st100 - st100: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof100 - } - st_case_100: -//line plugins/parsers/influx/machine.go:13948 - if ( m.data)[( m.p)] == 92 { - goto st101 - } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr47 - } - case ( m.data)[( m.p)] >= 9: - goto tr47 - } - goto st45 - st101: -//line plugins/parsers/influx/machine.go.rl:240 - ( m.p)-- - - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof101 - } - st_case_101: -//line plugins/parsers/influx/machine.go:13969 - switch ( m.data)[( m.p)] { - case 32: - goto tr47 - case 44: - goto tr47 - case 61: - goto tr137 - case 92: - goto st100 - } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr47 - } - case ( m.data)[( m.p)] >= 9: - goto tr47 - } - goto st45 + goto st44 st412: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof412 @@ -13992,19 +13964,19 @@ tr135: st_case_412: switch ( m.data)[( m.p)] { case 10: - goto tr470 + goto tr468 case 11: - goto tr648 + goto tr644 case 13: - goto tr472 + goto tr470 case 32: - goto tr469 + goto tr467 case 44: - goto tr132 + goto tr130 case 61: - goto tr137 + goto tr135 case 92: - goto st100 + goto st99 } switch { case ( m.data)[( m.p)] > 12: @@ -14012,9 +13984,9 @@ tr135: goto st413 } case ( m.data)[( m.p)] >= 9: - goto tr469 + goto tr467 } - goto st45 + goto st44 st413: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof413 @@ -14022,19 +13994,19 @@ tr135: st_case_413: switch ( m.data)[( m.p)] { case 10: - goto tr470 + goto tr468 case 11: - goto tr648 + goto tr644 case 13: - goto tr472 + goto tr470 case 32: - goto tr469 + goto tr467 case 44: - goto tr132 + goto tr130 case 61: - goto tr137 + goto tr135 case 92: - goto st100 + goto st99 } switch { case ( m.data)[( m.p)] > 12: @@ -14042,9 +14014,9 @@ tr135: goto st414 } case ( m.data)[( m.p)] >= 9: - goto tr469 + goto tr467 } - goto st45 + goto st44 st414: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof414 @@ -14052,19 +14024,19 @@ tr135: st_case_414: switch ( m.data)[( m.p)] { case 10: - goto tr470 + goto tr468 case 11: - goto tr648 + goto tr644 case 13: - goto tr472 + goto tr470 case 32: - goto tr469 + goto tr467 case 44: - goto tr132 + goto tr130 case 61: - goto tr137 + goto tr135 case 92: - goto st100 + goto st99 } switch { case ( m.data)[( m.p)] > 12: @@ -14072,9 +14044,9 @@ tr135: goto st415 } case ( m.data)[( m.p)] >= 9: - goto tr469 + goto tr467 } - goto st45 + goto st44 st415: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof415 @@ -14082,19 +14054,19 @@ tr135: st_case_415: switch ( m.data)[( m.p)] { case 10: - goto tr470 + goto tr468 case 11: - goto tr648 + goto tr644 case 13: - goto tr472 + goto tr470 case 32: - goto tr469 + goto tr467 case 44: - goto tr132 + goto tr130 case 61: - goto tr137 + goto tr135 case 92: - goto st100 + goto st99 } switch { case ( m.data)[( m.p)] > 12: @@ -14102,9 +14074,9 @@ tr135: goto st416 } case ( m.data)[( m.p)] >= 9: - goto tr469 + goto tr467 } - goto st45 + goto st44 st416: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof416 @@ -14112,19 +14084,19 @@ tr135: st_case_416: switch ( m.data)[( m.p)] { case 10: - goto tr470 + goto tr468 case 11: - goto tr648 + goto tr644 case 13: - goto tr472 + goto tr470 case 32: - goto tr469 + goto tr467 case 44: - goto tr132 + goto tr130 case 61: - goto tr137 + goto tr135 case 92: - goto st100 + goto st99 } switch { case ( m.data)[( m.p)] > 12: @@ -14132,9 +14104,9 @@ tr135: goto st417 } case ( m.data)[( m.p)] >= 9: - goto tr469 + goto tr467 } - goto st45 + goto st44 st417: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof417 @@ -14142,19 +14114,19 @@ tr135: st_case_417: switch ( m.data)[( m.p)] { case 10: - goto tr470 + goto tr468 case 11: - goto tr648 + goto tr644 case 13: - goto tr472 + goto tr470 case 32: - goto tr469 + goto tr467 case 44: - goto tr132 + goto tr130 case 61: - goto tr137 + goto tr135 case 92: - goto st100 + goto st99 } switch { case ( m.data)[( m.p)] > 12: @@ -14162,9 +14134,9 @@ tr135: goto st418 } case ( m.data)[( m.p)] >= 9: - goto tr469 + goto tr467 } - goto st45 + goto st44 st418: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof418 @@ -14172,19 +14144,19 @@ tr135: st_case_418: switch ( m.data)[( m.p)] { case 10: - goto tr470 + goto tr468 case 11: - goto tr648 + goto tr644 case 13: - goto tr472 + goto tr470 case 32: - goto tr469 + goto tr467 case 44: - goto tr132 + goto tr130 case 61: - goto tr137 + goto tr135 case 92: - goto st100 + goto st99 } switch { case ( m.data)[( m.p)] > 12: @@ -14192,9 +14164,9 @@ tr135: goto st419 } case ( m.data)[( m.p)] >= 9: - goto tr469 + goto tr467 } - goto st45 + goto st44 st419: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof419 @@ -14202,19 +14174,19 @@ tr135: st_case_419: switch ( m.data)[( m.p)] { case 10: - goto tr470 + goto tr468 case 11: - goto tr648 + goto tr644 case 13: - goto tr472 + goto tr470 case 32: - goto tr469 + goto tr467 case 44: - goto tr132 + goto tr130 case 61: - goto tr137 + goto tr135 case 92: - goto st100 + goto st99 } switch { case ( m.data)[( m.p)] > 12: @@ -14222,9 +14194,9 @@ tr135: goto st420 } case ( m.data)[( m.p)] >= 9: - goto tr469 + goto tr467 } - goto st45 + goto st44 st420: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof420 @@ -14232,19 +14204,19 @@ tr135: st_case_420: switch ( m.data)[( m.p)] { case 10: - goto tr470 + goto tr468 case 11: - goto tr648 + goto tr644 case 13: - goto tr472 + goto tr470 case 32: - goto tr469 + goto tr467 case 44: - goto tr132 + goto tr130 case 61: - goto tr137 + goto tr135 case 92: - goto st100 + goto st99 } switch { case ( m.data)[( m.p)] > 12: @@ -14252,9 +14224,9 @@ tr135: goto st421 } case ( m.data)[( m.p)] >= 9: - goto tr469 + goto tr467 } - goto st45 + goto st44 st421: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof421 @@ -14262,19 +14234,19 @@ tr135: st_case_421: switch ( m.data)[( m.p)] { case 10: - goto tr470 + goto tr468 case 11: - goto tr648 + goto tr644 case 13: - goto tr472 + goto tr470 case 32: - goto tr469 + goto tr467 case 44: - goto tr132 + goto tr130 case 61: - goto tr137 + goto tr135 case 92: - goto st100 + goto st99 } switch { case ( m.data)[( m.p)] > 12: @@ -14282,9 +14254,9 @@ tr135: goto st422 } case ( m.data)[( m.p)] >= 9: - goto tr469 + goto tr467 } - goto st45 + goto st44 st422: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof422 @@ -14292,19 +14264,19 @@ tr135: st_case_422: switch ( m.data)[( m.p)] { case 10: - goto tr470 + goto tr468 case 11: - goto tr648 + goto tr644 case 13: - goto tr472 + goto tr470 case 32: - goto tr469 + goto tr467 case 44: - goto tr132 + goto tr130 case 61: - goto tr137 + goto tr135 case 92: - goto st100 + goto st99 } switch { case ( m.data)[( m.p)] > 12: @@ -14312,9 +14284,9 @@ tr135: goto st423 } case ( m.data)[( m.p)] >= 9: - goto tr469 + goto tr467 } - goto st45 + goto st44 st423: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof423 @@ -14322,19 +14294,19 @@ tr135: st_case_423: switch ( m.data)[( m.p)] { case 10: - goto tr470 + goto tr468 case 11: - goto tr648 + goto tr644 case 13: - goto tr472 + goto tr470 case 32: - goto tr469 + goto tr467 case 44: - goto tr132 + goto tr130 case 61: - goto tr137 + goto tr135 case 92: - goto st100 + goto st99 } switch { case ( m.data)[( m.p)] > 12: @@ -14342,9 +14314,9 @@ tr135: goto st424 } case ( m.data)[( m.p)] >= 9: - goto tr469 + goto tr467 } - goto st45 + goto st44 st424: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof424 @@ -14352,19 +14324,19 @@ tr135: st_case_424: switch ( m.data)[( m.p)] { case 10: - goto tr470 + goto tr468 case 11: - goto tr648 + goto tr644 case 13: - goto tr472 + goto tr470 case 32: - goto tr469 + goto tr467 case 44: - goto tr132 + goto tr130 case 61: - goto tr137 + goto tr135 case 92: - goto st100 + goto st99 } switch { case ( m.data)[( m.p)] > 12: @@ -14372,9 +14344,9 @@ tr135: goto st425 } case ( m.data)[( m.p)] >= 9: - goto tr469 + goto tr467 } - goto st45 + goto st44 st425: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof425 @@ -14382,19 +14354,19 @@ tr135: st_case_425: switch ( m.data)[( m.p)] { case 10: - goto tr470 + goto tr468 case 11: - goto tr648 + goto tr644 case 13: - goto tr472 + goto tr470 case 32: - goto tr469 + goto tr467 case 44: - goto tr132 + goto tr130 case 61: - goto tr137 + goto tr135 case 92: - goto st100 + goto st99 } switch { case ( m.data)[( m.p)] > 12: @@ -14402,9 +14374,9 @@ tr135: goto st426 } case ( m.data)[( m.p)] >= 9: - goto tr469 + goto tr467 } - goto st45 + goto st44 st426: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof426 @@ -14412,19 +14384,19 @@ tr135: st_case_426: switch ( m.data)[( m.p)] { case 10: - goto tr470 + goto tr468 case 11: - goto tr648 + goto tr644 case 13: - goto tr472 + goto tr470 case 32: - goto tr469 + goto tr467 case 44: - goto tr132 + goto tr130 case 61: - goto tr137 + goto tr135 case 92: - goto st100 + goto st99 } switch { case ( m.data)[( m.p)] > 12: @@ -14432,9 +14404,9 @@ tr135: goto st427 } case ( m.data)[( m.p)] >= 9: - goto tr469 + goto tr467 } - goto st45 + goto st44 st427: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof427 @@ -14442,19 +14414,19 @@ tr135: st_case_427: switch ( m.data)[( m.p)] { case 10: - goto tr470 + goto tr468 case 11: - goto tr648 + goto tr644 case 13: - goto tr472 + goto tr470 case 32: - goto tr469 + goto tr467 case 44: - goto tr132 + goto tr130 case 61: - goto tr137 + goto tr135 case 92: - goto st100 + goto st99 } switch { case ( m.data)[( m.p)] > 12: @@ -14462,9 +14434,9 @@ tr135: goto st428 } case ( m.data)[( m.p)] >= 9: - goto tr469 + goto tr467 } - goto st45 + goto st44 st428: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof428 @@ -14472,121 +14444,347 @@ tr135: st_case_428: switch ( m.data)[( m.p)] { case 10: - goto tr470 + goto tr468 case 11: - goto tr648 + goto tr644 case 13: - goto tr472 - case 32: - goto tr469 - case 44: - goto tr132 - case 61: - goto tr137 - case 92: - goto st100 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st429 - } - case ( m.data)[( m.p)] >= 9: - goto tr469 - } - goto st45 - st429: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof429 - } - st_case_429: - switch ( m.data)[( m.p)] { - case 10: goto tr470 - case 11: - goto tr648 - case 13: - goto tr472 case 32: - goto tr469 + goto tr467 case 44: - goto tr132 + goto tr130 case 61: - goto tr137 + goto tr135 case 92: - goto st100 + goto st99 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr469 + goto tr467 } - goto st45 -tr262: -//line plugins/parsers/influx/machine.go.rl:87 + goto st44 +tr260: +//line plugins/parsers/influx/machine.go.rl:95 m.key = m.text() -//line plugins/parsers/influx/machine.go.rl:100 +//line plugins/parsers/influx/machine.go.rl:108 m.key = m.text() - goto st102 - st102: + goto st101 + st101: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof102 + goto _test_eof101 } - st_case_102: -//line plugins/parsers/influx/machine.go:14539 + st_case_101: +//line plugins/parsers/influx/machine.go:14481 switch ( m.data)[( m.p)] { case 9: goto st6 case 10: - goto tr29 - case 12: - goto tr47 - case 13: - goto st7 + goto tr28 case 32: goto st6 case 34: - goto tr266 + goto tr264 case 44: goto st6 case 45: - goto tr267 + goto tr265 case 46: - goto tr268 + goto tr266 case 48: - goto tr269 + goto tr267 case 61: goto st6 case 70: - goto tr271 + goto tr269 case 84: - goto tr272 - case 92: - goto tr229 - case 102: - goto tr273 - case 116: - goto tr274 - } - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto tr270 + case 92: + goto tr227 + case 102: + goto tr271 + case 116: + goto tr272 } - goto tr228 -tr266: - ( m.cs) = 430 -//line plugins/parsers/influx/machine.go.rl:20 + switch { + case ( m.data)[( m.p)] > 13: + if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto tr268 + } + case ( m.data)[( m.p)] >= 12: + goto st6 + } + goto tr226 +tr264: + ( m.cs) = 429 +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p -//line plugins/parsers/influx/machine.go.rl:140 +//line plugins/parsers/influx/machine.go.rl:148 err = m.handler.AddString(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again + st429: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof429 + } + st_case_429: +//line plugins/parsers/influx/machine.go:14543 + switch ( m.data)[( m.p)] { + case 10: + goto tr665 + case 11: + goto tr666 + case 13: + goto tr667 + case 32: + goto tr664 + case 34: + goto tr149 + case 44: + goto tr668 + case 61: + goto tr23 + case 92: + goto tr151 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr664 + } + goto tr146 +tr854: + ( m.cs) = 430 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr697: + ( m.cs) = 430 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr664: + ( m.cs) = 430 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto _again +tr850: + ( m.cs) = 430 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:130 + + err = m.handler.AddFloat(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr725: + ( m.cs) = 430 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:130 + + err = m.handler.AddFloat(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr736: + ( m.cs) = 430 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:112 + + err = m.handler.AddInt(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr742: + ( m.cs) = 430 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:121 + + err = m.handler.AddUint(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr748: + ( m.cs) = 430 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:139 + + err = m.handler.AddBool(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr882: + ( m.cs) = 430 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:112 + + err = m.handler.AddInt(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr886: + ( m.cs) = 430 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:121 + + err = m.handler.AddUint(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr890: + ( m.cs) = 430 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:139 + + err = m.handler.AddBool(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; {( m.p)++; goto _out } } @@ -14596,293 +14794,202 @@ tr266: goto _test_eof430 } st_case_430: -//line plugins/parsers/influx/machine.go:14600 +//line plugins/parsers/influx/machine.go:14798 switch ( m.data)[( m.p)] { - case 9: - goto tr668 case 10: - goto tr669 + goto tr273 case 11: goto tr670 - case 12: - goto tr566 case 13: - goto tr671 + goto st102 case 32: - goto tr668 + goto st430 case 34: - goto tr151 + goto tr95 case 44: - goto tr672 + goto st6 + case 45: + goto tr671 case 61: - goto tr23 + goto st6 case 92: - goto tr153 + goto tr161 } - goto tr148 -tr863: - ( m.cs) = 431 -//line plugins/parsers/influx/machine.go.rl:78 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again -tr701: - ( m.cs) = 431 -//line plugins/parsers/influx/machine.go.rl:91 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again -tr668: - ( m.cs) = 431 -//line plugins/parsers/influx/machine.go.rl:91 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:20 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto tr672 + } + case ( m.data)[( m.p)] >= 9: + goto st430 + } + goto tr158 +tr670: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p - goto _again -tr859: - ( m.cs) = 431 -//line plugins/parsers/influx/machine.go.rl:78 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:122 - - err = m.handler.AddFloat(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again -tr729: - ( m.cs) = 431 -//line plugins/parsers/influx/machine.go.rl:91 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:122 - - err = m.handler.AddFloat(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again -tr740: - ( m.cs) = 431 -//line plugins/parsers/influx/machine.go.rl:91 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:104 - - err = m.handler.AddInt(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again -tr747: - ( m.cs) = 431 -//line plugins/parsers/influx/machine.go.rl:91 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:113 - - err = m.handler.AddUint(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again -tr754: - ( m.cs) = 431 -//line plugins/parsers/influx/machine.go.rl:91 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:131 - - err = m.handler.AddBool(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again -tr891: - ( m.cs) = 431 -//line plugins/parsers/influx/machine.go.rl:78 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:104 - - err = m.handler.AddInt(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again -tr895: - ( m.cs) = 431 -//line plugins/parsers/influx/machine.go.rl:78 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:113 - - err = m.handler.AddUint(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again -tr899: - ( m.cs) = 431 -//line plugins/parsers/influx/machine.go.rl:78 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:131 - - err = m.handler.AddBool(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again + goto st431 st431: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof431 } st_case_431: -//line plugins/parsers/influx/machine.go:14856 +//line plugins/parsers/influx/machine.go:14839 switch ( m.data)[( m.p)] { - case 9: - goto st431 case 10: - goto tr275 + goto tr273 case 11: - goto tr674 - case 12: - goto st301 + goto tr670 case 13: - goto st103 + goto st102 case 32: - goto st431 + goto st430 case 34: - goto tr97 + goto tr95 case 44: goto st6 case 45: - goto tr675 + goto tr671 case 61: - goto st6 - case 92: goto tr163 + case 92: + goto tr161 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr676 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto tr672 + } + case ( m.data)[( m.p)] >= 9: + goto st430 } - goto tr160 -tr674: -//line plugins/parsers/influx/machine.go.rl:20 + goto tr158 +tr667: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st102 +tr676: + ( m.cs) = 102 +//line plugins/parsers/influx/machine.go.rl:157 + + err = m.handler.SetTimestamp(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr533: + ( m.cs) = 102 +//line plugins/parsers/influx/machine.go.rl:130 + + err = m.handler.AddFloat(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr739: + ( m.cs) = 102 +//line plugins/parsers/influx/machine.go.rl:112 + + err = m.handler.AddInt(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr745: + ( m.cs) = 102 +//line plugins/parsers/influx/machine.go.rl:121 + + err = m.handler.AddUint(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr751: + ( m.cs) = 102 +//line plugins/parsers/influx/machine.go.rl:139 + + err = m.handler.AddBool(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again + st102: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof102 + } + st_case_102: +//line plugins/parsers/influx/machine.go:14945 + switch ( m.data)[( m.p)] { + case 10: + goto tr273 + case 34: + goto tr29 + case 92: + goto st73 + } + goto st6 +tr671: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st103 + st103: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof103 + } + st_case_103: +//line plugins/parsers/influx/machine.go:14966 + switch ( m.data)[( m.p)] { + case 9: + goto st6 + case 10: + goto tr28 + case 32: + goto st6 + case 34: + goto tr98 + case 44: + goto st6 + case 61: + goto tr163 + case 92: + goto st104 + } + switch { + case ( m.data)[( m.p)] > 13: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st432 + } + case ( m.data)[( m.p)] >= 12: + goto st6 + } + goto st49 +tr672: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p @@ -14892,199 +14999,78 @@ tr674: goto _test_eof432 } st_case_432: -//line plugins/parsers/influx/machine.go:14896 +//line plugins/parsers/influx/machine.go:15003 switch ( m.data)[( m.p)] { - case 9: - goto st431 case 10: - goto tr275 - case 11: goto tr674 - case 12: - goto st301 + case 11: + goto tr675 case 13: - goto st103 + goto tr676 case 32: - goto st431 + goto tr673 case 34: - goto tr97 + goto tr98 case 44: goto st6 - case 45: - goto tr675 case 61: - goto tr165 - case 92: goto tr163 + case 92: + goto st104 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr676 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st435 + } + case ( m.data)[( m.p)] >= 9: + goto tr673 } - goto tr160 -tr671: -//line plugins/parsers/influx/machine.go.rl:20 - - m.pb = m.p - - goto st103 -tr680: - ( m.cs) = 103 -//line plugins/parsers/influx/machine.go.rl:149 + goto st49 +tr673: + ( m.cs) = 433 +//line plugins/parsers/influx/machine.go.rl:157 err = m.handler.SetTimestamp(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } goto _again -tr536: - ( m.cs) = 103 -//line plugins/parsers/influx/machine.go.rl:122 - - err = m.handler.AddFloat(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again -tr744: - ( m.cs) = 103 -//line plugins/parsers/influx/machine.go.rl:104 - - err = m.handler.AddInt(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again -tr751: - ( m.cs) = 103 -//line plugins/parsers/influx/machine.go.rl:113 - - err = m.handler.AddUint(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again -tr758: - ( m.cs) = 103 -//line plugins/parsers/influx/machine.go.rl:131 - - err = m.handler.AddBool(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again - st103: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof103 - } - st_case_103: -//line plugins/parsers/influx/machine.go:15001 - if ( m.data)[( m.p)] == 10 { - goto tr275 - } - goto tr8 -tr675: -//line plugins/parsers/influx/machine.go.rl:20 - - m.pb = m.p - - goto st104 - st104: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof104 - } - st_case_104: -//line plugins/parsers/influx/machine.go:15017 - switch ( m.data)[( m.p)] { - case 9: - goto st6 - case 10: - goto tr29 - case 12: - goto tr105 - case 13: - goto st7 - case 32: - goto st6 - case 34: - goto tr100 - case 44: - goto st6 - case 61: - goto tr165 - case 92: - goto st105 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st433 - } - goto st50 -tr676: -//line plugins/parsers/influx/machine.go.rl:20 - - m.pb = m.p - - goto st433 st433: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof433 } st_case_433: -//line plugins/parsers/influx/machine.go:15053 +//line plugins/parsers/influx/machine.go:15049 switch ( m.data)[( m.p)] { - case 9: - goto tr677 case 10: - goto tr678 - case 11: - goto tr679 - case 12: - goto tr469 + goto tr273 case 13: - goto tr680 + goto st102 case 32: - goto tr677 + goto st433 case 34: - goto tr100 - case 44: - goto st6 - case 61: - goto tr165 + goto tr29 case 92: - goto st105 + goto st73 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st436 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto st433 } - goto st50 -tr677: + goto st6 +tr675: ( m.cs) = 434 -//line plugins/parsers/influx/machine.go.rl:149 +//line plugins/parsers/influx/machine.go.rl:157 err = m.handler.SetTimestamp(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } @@ -15094,84 +15080,46 @@ tr677: goto _test_eof434 } st_case_434: -//line plugins/parsers/influx/machine.go:15098 +//line plugins/parsers/influx/machine.go:15084 switch ( m.data)[( m.p)] { case 10: - goto tr275 - case 12: - goto st277 - case 13: - goto st103 - case 32: - goto st434 - case 34: - goto tr31 - case 92: - goto st75 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { - goto st434 - } - goto st6 -tr679: - ( m.cs) = 435 -//line plugins/parsers/influx/machine.go.rl:149 - - err = m.handler.SetTimestamp(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again - st435: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof435 - } - st_case_435: -//line plugins/parsers/influx/machine.go:15135 - switch ( m.data)[( m.p)] { - case 9: - goto st434 - case 10: - goto tr275 + goto tr273 case 11: - goto st435 - case 12: - goto st277 - case 13: - goto st103 - case 32: goto st434 + case 13: + goto st102 + case 32: + goto st433 case 34: - goto tr100 + goto tr98 case 44: goto st6 case 61: - goto tr165 + goto tr163 case 92: - goto st105 + goto st104 } - goto st50 -tr163: -//line plugins/parsers/influx/machine.go.rl:20 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto st433 + } + goto st49 +tr161: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p - goto st105 - st105: + goto st104 + st104: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof105 + goto _test_eof104 } - st_case_105: -//line plugins/parsers/influx/machine.go:15170 + st_case_104: +//line plugins/parsers/influx/machine.go:15118 switch ( m.data)[( m.p)] { case 34: - goto st50 + goto st49 case 92: - goto st50 + goto st49 } switch { case ( m.data)[( m.p)] > 10: @@ -15182,1860 +15130,1942 @@ tr163: goto tr8 } goto st3 + st435: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof435 + } + st_case_435: + switch ( m.data)[( m.p)] { + case 10: + goto tr674 + case 11: + goto tr675 + case 13: + goto tr676 + case 32: + goto tr673 + case 34: + goto tr98 + case 44: + goto st6 + case 61: + goto tr163 + case 92: + goto st104 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st436 + } + case ( m.data)[( m.p)] >= 9: + goto tr673 + } + goto st49 st436: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof436 } st_case_436: switch ( m.data)[( m.p)] { - case 9: - goto tr677 case 10: - goto tr678 + goto tr674 case 11: - goto tr679 - case 12: - goto tr469 + goto tr675 case 13: - goto tr680 + goto tr676 case 32: - goto tr677 + goto tr673 case 34: - goto tr100 + goto tr98 case 44: goto st6 case 61: - goto tr165 + goto tr163 case 92: - goto st105 + goto st104 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st437 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st437 + } + case ( m.data)[( m.p)] >= 9: + goto tr673 } - goto st50 + goto st49 st437: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof437 } st_case_437: switch ( m.data)[( m.p)] { - case 9: - goto tr677 case 10: - goto tr678 + goto tr674 case 11: - goto tr679 - case 12: - goto tr469 + goto tr675 case 13: - goto tr680 + goto tr676 case 32: - goto tr677 + goto tr673 case 34: - goto tr100 + goto tr98 case 44: goto st6 case 61: - goto tr165 + goto tr163 case 92: - goto st105 + goto st104 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st438 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st438 + } + case ( m.data)[( m.p)] >= 9: + goto tr673 } - goto st50 + goto st49 st438: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof438 } st_case_438: switch ( m.data)[( m.p)] { - case 9: - goto tr677 case 10: - goto tr678 + goto tr674 case 11: - goto tr679 - case 12: - goto tr469 + goto tr675 case 13: - goto tr680 + goto tr676 case 32: - goto tr677 + goto tr673 case 34: - goto tr100 + goto tr98 case 44: goto st6 case 61: - goto tr165 + goto tr163 case 92: - goto st105 + goto st104 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st439 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st439 + } + case ( m.data)[( m.p)] >= 9: + goto tr673 } - goto st50 + goto st49 st439: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof439 } st_case_439: switch ( m.data)[( m.p)] { - case 9: - goto tr677 case 10: - goto tr678 + goto tr674 case 11: - goto tr679 - case 12: - goto tr469 + goto tr675 case 13: - goto tr680 + goto tr676 case 32: - goto tr677 + goto tr673 case 34: - goto tr100 + goto tr98 case 44: goto st6 case 61: - goto tr165 + goto tr163 case 92: - goto st105 + goto st104 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st440 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st440 + } + case ( m.data)[( m.p)] >= 9: + goto tr673 } - goto st50 + goto st49 st440: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof440 } st_case_440: switch ( m.data)[( m.p)] { - case 9: - goto tr677 case 10: - goto tr678 + goto tr674 case 11: - goto tr679 - case 12: - goto tr469 + goto tr675 case 13: - goto tr680 + goto tr676 case 32: - goto tr677 + goto tr673 case 34: - goto tr100 + goto tr98 case 44: goto st6 case 61: - goto tr165 + goto tr163 case 92: - goto st105 + goto st104 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st441 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st441 + } + case ( m.data)[( m.p)] >= 9: + goto tr673 } - goto st50 + goto st49 st441: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof441 } st_case_441: switch ( m.data)[( m.p)] { - case 9: - goto tr677 case 10: - goto tr678 + goto tr674 case 11: - goto tr679 - case 12: - goto tr469 + goto tr675 case 13: - goto tr680 + goto tr676 case 32: - goto tr677 + goto tr673 case 34: - goto tr100 + goto tr98 case 44: goto st6 case 61: - goto tr165 + goto tr163 case 92: - goto st105 + goto st104 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st442 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st442 + } + case ( m.data)[( m.p)] >= 9: + goto tr673 } - goto st50 + goto st49 st442: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof442 } st_case_442: switch ( m.data)[( m.p)] { - case 9: - goto tr677 case 10: - goto tr678 + goto tr674 case 11: - goto tr679 - case 12: - goto tr469 + goto tr675 case 13: - goto tr680 + goto tr676 case 32: - goto tr677 + goto tr673 case 34: - goto tr100 + goto tr98 case 44: goto st6 case 61: - goto tr165 + goto tr163 case 92: - goto st105 + goto st104 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st443 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st443 + } + case ( m.data)[( m.p)] >= 9: + goto tr673 } - goto st50 + goto st49 st443: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof443 } st_case_443: switch ( m.data)[( m.p)] { - case 9: - goto tr677 case 10: - goto tr678 + goto tr674 case 11: - goto tr679 - case 12: - goto tr469 + goto tr675 case 13: - goto tr680 + goto tr676 case 32: - goto tr677 + goto tr673 case 34: - goto tr100 + goto tr98 case 44: goto st6 case 61: - goto tr165 + goto tr163 case 92: - goto st105 + goto st104 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st444 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st444 + } + case ( m.data)[( m.p)] >= 9: + goto tr673 } - goto st50 + goto st49 st444: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof444 } st_case_444: switch ( m.data)[( m.p)] { - case 9: - goto tr677 case 10: - goto tr678 + goto tr674 case 11: - goto tr679 - case 12: - goto tr469 + goto tr675 case 13: - goto tr680 + goto tr676 case 32: - goto tr677 + goto tr673 case 34: - goto tr100 + goto tr98 case 44: goto st6 case 61: - goto tr165 + goto tr163 case 92: - goto st105 + goto st104 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st445 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st445 + } + case ( m.data)[( m.p)] >= 9: + goto tr673 } - goto st50 + goto st49 st445: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof445 } st_case_445: switch ( m.data)[( m.p)] { - case 9: - goto tr677 case 10: - goto tr678 + goto tr674 case 11: - goto tr679 - case 12: - goto tr469 + goto tr675 case 13: - goto tr680 + goto tr676 case 32: - goto tr677 + goto tr673 case 34: - goto tr100 + goto tr98 case 44: goto st6 case 61: - goto tr165 + goto tr163 case 92: - goto st105 + goto st104 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st446 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st446 + } + case ( m.data)[( m.p)] >= 9: + goto tr673 } - goto st50 + goto st49 st446: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof446 } st_case_446: switch ( m.data)[( m.p)] { - case 9: - goto tr677 case 10: - goto tr678 + goto tr674 case 11: - goto tr679 - case 12: - goto tr469 + goto tr675 case 13: - goto tr680 + goto tr676 case 32: - goto tr677 + goto tr673 case 34: - goto tr100 + goto tr98 case 44: goto st6 case 61: - goto tr165 + goto tr163 case 92: - goto st105 + goto st104 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st447 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st447 + } + case ( m.data)[( m.p)] >= 9: + goto tr673 } - goto st50 + goto st49 st447: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof447 } st_case_447: switch ( m.data)[( m.p)] { - case 9: - goto tr677 case 10: - goto tr678 + goto tr674 case 11: - goto tr679 - case 12: - goto tr469 + goto tr675 case 13: - goto tr680 + goto tr676 case 32: - goto tr677 + goto tr673 case 34: - goto tr100 + goto tr98 case 44: goto st6 case 61: - goto tr165 + goto tr163 case 92: - goto st105 + goto st104 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st448 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st448 + } + case ( m.data)[( m.p)] >= 9: + goto tr673 } - goto st50 + goto st49 st448: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof448 } st_case_448: switch ( m.data)[( m.p)] { - case 9: - goto tr677 case 10: - goto tr678 + goto tr674 case 11: - goto tr679 - case 12: - goto tr469 + goto tr675 case 13: - goto tr680 + goto tr676 case 32: - goto tr677 + goto tr673 case 34: - goto tr100 + goto tr98 case 44: goto st6 case 61: - goto tr165 + goto tr163 case 92: - goto st105 + goto st104 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st449 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st449 + } + case ( m.data)[( m.p)] >= 9: + goto tr673 } - goto st50 + goto st49 st449: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof449 } st_case_449: switch ( m.data)[( m.p)] { - case 9: - goto tr677 case 10: - goto tr678 + goto tr674 case 11: - goto tr679 - case 12: - goto tr469 + goto tr675 case 13: - goto tr680 + goto tr676 case 32: - goto tr677 + goto tr673 case 34: - goto tr100 + goto tr98 case 44: goto st6 case 61: - goto tr165 + goto tr163 case 92: - goto st105 + goto st104 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st450 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st450 + } + case ( m.data)[( m.p)] >= 9: + goto tr673 } - goto st50 + goto st49 st450: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof450 } st_case_450: switch ( m.data)[( m.p)] { - case 9: - goto tr677 case 10: - goto tr678 + goto tr674 case 11: - goto tr679 - case 12: - goto tr469 + goto tr675 case 13: - goto tr680 + goto tr676 case 32: - goto tr677 + goto tr673 case 34: - goto tr100 + goto tr98 case 44: goto st6 case 61: - goto tr165 + goto tr163 case 92: - goto st105 + goto st104 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st451 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st451 + } + case ( m.data)[( m.p)] >= 9: + goto tr673 } - goto st50 + goto st49 st451: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof451 } st_case_451: switch ( m.data)[( m.p)] { - case 9: - goto tr677 case 10: - goto tr678 + goto tr674 case 11: - goto tr679 - case 12: - goto tr469 + goto tr675 case 13: - goto tr680 + goto tr676 case 32: - goto tr677 + goto tr673 case 34: - goto tr100 + goto tr98 case 44: goto st6 case 61: - goto tr165 + goto tr163 case 92: - goto st105 + goto st104 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st452 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st452 + } + case ( m.data)[( m.p)] >= 9: + goto tr673 } - goto st50 + goto st49 st452: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof452 } st_case_452: switch ( m.data)[( m.p)] { - case 9: - goto tr677 case 10: - goto tr678 + goto tr674 case 11: - goto tr679 - case 12: - goto tr469 + goto tr675 case 13: - goto tr680 + goto tr676 case 32: - goto tr677 + goto tr673 case 34: - goto tr100 + goto tr98 case 44: goto st6 case 61: - goto tr165 + goto tr163 case 92: - goto st105 + goto st104 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st453 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr673 } - goto st50 - st453: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof453 - } - st_case_453: - switch ( m.data)[( m.p)] { - case 9: - goto tr677 - case 10: - goto tr678 - case 11: - goto tr679 - case 12: - goto tr469 - case 13: - goto tr680 - case 32: - goto tr677 - case 34: - goto tr100 - case 44: - goto st6 - case 61: - goto tr165 - case 92: - goto st105 - } - goto st50 -tr670: - ( m.cs) = 454 -//line plugins/parsers/influx/machine.go.rl:91 + goto st49 +tr666: + ( m.cs) = 453 +//line plugins/parsers/influx/machine.go.rl:99 err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:20 +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p goto _again -tr730: - ( m.cs) = 454 -//line plugins/parsers/influx/machine.go.rl:91 +tr726: + ( m.cs) = 453 +//line plugins/parsers/influx/machine.go.rl:99 err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:122 +//line plugins/parsers/influx/machine.go.rl:130 err = m.handler.AddFloat(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } goto _again -tr742: - ( m.cs) = 454 -//line plugins/parsers/influx/machine.go.rl:91 +tr738: + ( m.cs) = 453 +//line plugins/parsers/influx/machine.go.rl:99 err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:104 +//line plugins/parsers/influx/machine.go.rl:112 err = m.handler.AddInt(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } goto _again -tr749: - ( m.cs) = 454 -//line plugins/parsers/influx/machine.go.rl:91 +tr744: + ( m.cs) = 453 +//line plugins/parsers/influx/machine.go.rl:99 err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:113 +//line plugins/parsers/influx/machine.go.rl:121 err = m.handler.AddUint(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } goto _again -tr756: - ( m.cs) = 454 -//line plugins/parsers/influx/machine.go.rl:91 +tr750: + ( m.cs) = 453 +//line plugins/parsers/influx/machine.go.rl:99 err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:131 +//line plugins/parsers/influx/machine.go.rl:139 err = m.handler.AddBool(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } + goto _again + st453: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof453 + } + st_case_453: +//line plugins/parsers/influx/machine.go:15819 + switch ( m.data)[( m.p)] { + case 10: + goto tr273 + case 11: + goto tr698 + case 13: + goto st102 + case 32: + goto tr697 + case 34: + goto tr202 + case 44: + goto tr156 + case 45: + goto tr699 + case 61: + goto st6 + case 92: + goto tr203 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto tr700 + } + case ( m.data)[( m.p)] >= 9: + goto tr697 + } + goto tr200 +tr698: + ( m.cs) = 454 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + goto _again st454: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof454 } st_case_454: -//line plugins/parsers/influx/machine.go:15855 +//line plugins/parsers/influx/machine.go:15871 switch ( m.data)[( m.p)] { - case 9: - goto tr701 case 10: - goto tr275 + goto tr273 case 11: - goto tr702 - case 12: - goto tr566 + goto tr698 case 13: - goto st103 + goto st102 case 32: - goto tr701 + goto tr697 case 34: - goto tr204 + goto tr202 case 44: - goto tr158 + goto tr156 case 45: - goto tr703 + goto tr699 case 61: - goto st6 + goto tr163 case 92: - goto tr205 + goto tr203 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr704 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto tr700 + } + case ( m.data)[( m.p)] >= 9: + goto tr697 } - goto tr202 -tr702: - ( m.cs) = 455 -//line plugins/parsers/influx/machine.go.rl:91 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:20 + goto tr200 +tr699: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p - goto _again + goto st105 + st105: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof105 + } + st_case_105: +//line plugins/parsers/influx/machine.go:15912 + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 11: + goto tr205 + case 13: + goto st6 + case 32: + goto tr153 + case 34: + goto tr206 + case 44: + goto tr156 + case 61: + goto tr163 + case 92: + goto st67 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st455 + } + case ( m.data)[( m.p)] >= 9: + goto tr153 + } + goto st65 +tr700: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st455 st455: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof455 } st_case_455: -//line plugins/parsers/influx/machine.go:15906 +//line plugins/parsers/influx/machine.go:15951 switch ( m.data)[( m.p)] { - case 9: - goto tr701 case 10: - goto tr275 + goto tr674 case 11: goto tr702 - case 12: - goto tr566 case 13: - goto st103 + goto tr676 case 32: goto tr701 case 34: - goto tr204 + goto tr206 case 44: - goto tr158 - case 45: - goto tr703 + goto tr156 case 61: - goto tr165 + goto tr163 case 92: - goto tr205 + goto st67 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr704 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st459 + } + case ( m.data)[( m.p)] >= 9: + goto tr701 } - goto tr202 -tr703: -//line plugins/parsers/influx/machine.go.rl:20 + goto st65 +tr861: + ( m.cs) = 456 +//line plugins/parsers/influx/machine.go.rl:86 - m.pb = m.p + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- - goto st106 - st106: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof106 - } - st_case_106: -//line plugins/parsers/influx/machine.go:15946 - switch ( m.data)[( m.p)] { - case 9: - goto tr155 - case 10: - goto tr29 - case 11: - goto tr207 - case 12: - goto tr60 - case 13: - goto st7 - case 32: - goto tr155 - case 34: - goto tr208 - case 44: - goto tr158 - case 61: - goto tr165 - case 92: - goto st68 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st456 - } - goto st66 -tr704: -//line plugins/parsers/influx/machine.go.rl:20 + ( m.cs) = 257; + {( m.p)++; goto _out } + } - m.pb = m.p + goto _again +tr706: + ( m.cs) = 456 +//line plugins/parsers/influx/machine.go.rl:99 - goto st456 + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr858: + ( m.cs) = 456 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:157 + + err = m.handler.SetTimestamp(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr701: + ( m.cs) = 456 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:157 + + err = m.handler.SetTimestamp(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again st456: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof456 } st_case_456: -//line plugins/parsers/influx/machine.go:15984 +//line plugins/parsers/influx/machine.go:16056 switch ( m.data)[( m.p)] { - case 9: - goto tr705 case 10: - goto tr678 + goto tr273 case 11: - goto tr706 - case 12: - goto tr572 - case 13: - goto tr680 - case 32: goto tr705 + case 13: + goto st102 + case 32: + goto st456 case 34: - goto tr208 + goto tr95 case 44: - goto tr158 + goto st6 case 61: - goto tr165 + goto st6 case 92: - goto st68 + goto tr161 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st460 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto st456 } - goto st66 -tr870: - ( m.cs) = 457 -//line plugins/parsers/influx/machine.go.rl:78 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again -tr710: - ( m.cs) = 457 -//line plugins/parsers/influx/machine.go.rl:91 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again -tr867: - ( m.cs) = 457 -//line plugins/parsers/influx/machine.go.rl:78 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:149 - - err = m.handler.SetTimestamp(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again + goto tr158 tr705: - ( m.cs) = 457 -//line plugins/parsers/influx/machine.go.rl:91 +//line plugins/parsers/influx/machine.go.rl:28 - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- + m.pb = m.p - ( m.cs) = 258; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:149 - - err = m.handler.SetTimestamp(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again + goto st457 st457: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof457 } st_case_457: -//line plugins/parsers/influx/machine.go:16088 +//line plugins/parsers/influx/machine.go:16090 switch ( m.data)[( m.p)] { - case 9: - goto st457 case 10: - goto tr275 + goto tr273 case 11: - goto tr709 - case 12: - goto st305 + goto tr705 case 13: - goto st103 + goto st102 case 32: - goto st457 + goto st456 case 34: - goto tr97 + goto tr95 case 44: goto st6 case 61: - goto st6 - case 92: goto tr163 - } - goto tr160 -tr709: -//line plugins/parsers/influx/machine.go.rl:20 - - m.pb = m.p - - goto st458 - st458: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof458 - } - st_case_458: -//line plugins/parsers/influx/machine.go:16123 - switch ( m.data)[( m.p)] { - case 9: - goto st457 - case 10: - goto tr275 - case 11: - goto tr709 - case 12: - goto st305 - case 13: - goto st103 - case 32: - goto st457 - case 34: - goto tr97 - case 44: - goto st6 - case 61: - goto tr165 case 92: - goto tr163 + goto tr161 } - goto tr160 -tr711: - ( m.cs) = 459 -//line plugins/parsers/influx/machine.go.rl:91 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto st456 + } + goto tr158 +tr707: + ( m.cs) = 458 +//line plugins/parsers/influx/machine.go.rl:99 err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:20 +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p goto _again -tr706: - ( m.cs) = 459 -//line plugins/parsers/influx/machine.go.rl:91 +tr702: + ( m.cs) = 458 +//line plugins/parsers/influx/machine.go.rl:99 err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:149 +//line plugins/parsers/influx/machine.go.rl:157 err = m.handler.SetTimestamp(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } goto _again + st458: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof458 + } + st_case_458: +//line plugins/parsers/influx/machine.go:16158 + switch ( m.data)[( m.p)] { + case 10: + goto tr273 + case 11: + goto tr707 + case 13: + goto st102 + case 32: + goto tr706 + case 34: + goto tr202 + case 44: + goto tr156 + case 61: + goto tr163 + case 92: + goto tr203 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr706 + } + goto tr200 st459: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof459 } st_case_459: -//line plugins/parsers/influx/machine.go:16192 switch ( m.data)[( m.p)] { - case 9: - goto tr710 case 10: - goto tr275 + goto tr674 case 11: - goto tr711 - case 12: - goto tr575 + goto tr702 case 13: - goto st103 + goto tr676 case 32: - goto tr710 + goto tr701 case 34: - goto tr204 + goto tr206 case 44: - goto tr158 + goto tr156 case 61: - goto tr165 + goto tr163 case 92: - goto tr205 + goto st67 } - goto tr202 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st460 + } + case ( m.data)[( m.p)] >= 9: + goto tr701 + } + goto st65 st460: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof460 } st_case_460: switch ( m.data)[( m.p)] { - case 9: - goto tr705 case 10: - goto tr678 + goto tr674 case 11: - goto tr706 - case 12: - goto tr572 + goto tr702 case 13: - goto tr680 + goto tr676 case 32: - goto tr705 + goto tr701 case 34: - goto tr208 + goto tr206 case 44: - goto tr158 + goto tr156 case 61: - goto tr165 + goto tr163 case 92: - goto st68 + goto st67 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st461 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st461 + } + case ( m.data)[( m.p)] >= 9: + goto tr701 } - goto st66 + goto st65 st461: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof461 } st_case_461: switch ( m.data)[( m.p)] { - case 9: - goto tr705 case 10: - goto tr678 + goto tr674 case 11: - goto tr706 - case 12: - goto tr572 + goto tr702 case 13: - goto tr680 + goto tr676 case 32: - goto tr705 + goto tr701 case 34: - goto tr208 + goto tr206 case 44: - goto tr158 + goto tr156 case 61: - goto tr165 + goto tr163 case 92: - goto st68 + goto st67 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st462 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st462 + } + case ( m.data)[( m.p)] >= 9: + goto tr701 } - goto st66 + goto st65 st462: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof462 } st_case_462: switch ( m.data)[( m.p)] { - case 9: - goto tr705 case 10: - goto tr678 + goto tr674 case 11: - goto tr706 - case 12: - goto tr572 + goto tr702 case 13: - goto tr680 + goto tr676 case 32: - goto tr705 + goto tr701 case 34: - goto tr208 + goto tr206 case 44: - goto tr158 + goto tr156 case 61: - goto tr165 + goto tr163 case 92: - goto st68 + goto st67 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st463 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st463 + } + case ( m.data)[( m.p)] >= 9: + goto tr701 } - goto st66 + goto st65 st463: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof463 } st_case_463: switch ( m.data)[( m.p)] { - case 9: - goto tr705 case 10: - goto tr678 + goto tr674 case 11: - goto tr706 - case 12: - goto tr572 + goto tr702 case 13: - goto tr680 + goto tr676 case 32: - goto tr705 + goto tr701 case 34: - goto tr208 + goto tr206 case 44: - goto tr158 + goto tr156 case 61: - goto tr165 + goto tr163 case 92: - goto st68 + goto st67 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st464 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st464 + } + case ( m.data)[( m.p)] >= 9: + goto tr701 } - goto st66 + goto st65 st464: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof464 } st_case_464: switch ( m.data)[( m.p)] { - case 9: - goto tr705 case 10: - goto tr678 + goto tr674 case 11: - goto tr706 - case 12: - goto tr572 + goto tr702 case 13: - goto tr680 + goto tr676 case 32: - goto tr705 + goto tr701 case 34: - goto tr208 + goto tr206 case 44: - goto tr158 + goto tr156 case 61: - goto tr165 + goto tr163 case 92: - goto st68 + goto st67 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st465 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st465 + } + case ( m.data)[( m.p)] >= 9: + goto tr701 } - goto st66 + goto st65 st465: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof465 } st_case_465: switch ( m.data)[( m.p)] { - case 9: - goto tr705 case 10: - goto tr678 + goto tr674 case 11: - goto tr706 - case 12: - goto tr572 + goto tr702 case 13: - goto tr680 + goto tr676 case 32: - goto tr705 + goto tr701 case 34: - goto tr208 + goto tr206 case 44: - goto tr158 + goto tr156 case 61: - goto tr165 + goto tr163 case 92: - goto st68 + goto st67 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st466 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st466 + } + case ( m.data)[( m.p)] >= 9: + goto tr701 } - goto st66 + goto st65 st466: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof466 } st_case_466: switch ( m.data)[( m.p)] { - case 9: - goto tr705 case 10: - goto tr678 + goto tr674 case 11: - goto tr706 - case 12: - goto tr572 + goto tr702 case 13: - goto tr680 + goto tr676 case 32: - goto tr705 + goto tr701 case 34: - goto tr208 + goto tr206 case 44: - goto tr158 + goto tr156 case 61: - goto tr165 + goto tr163 case 92: - goto st68 + goto st67 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st467 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st467 + } + case ( m.data)[( m.p)] >= 9: + goto tr701 } - goto st66 + goto st65 st467: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof467 } st_case_467: switch ( m.data)[( m.p)] { - case 9: - goto tr705 case 10: - goto tr678 + goto tr674 case 11: - goto tr706 - case 12: - goto tr572 + goto tr702 case 13: - goto tr680 + goto tr676 case 32: - goto tr705 + goto tr701 case 34: - goto tr208 + goto tr206 case 44: - goto tr158 + goto tr156 case 61: - goto tr165 + goto tr163 case 92: - goto st68 + goto st67 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st468 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st468 + } + case ( m.data)[( m.p)] >= 9: + goto tr701 } - goto st66 + goto st65 st468: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof468 } st_case_468: switch ( m.data)[( m.p)] { - case 9: - goto tr705 case 10: - goto tr678 + goto tr674 case 11: - goto tr706 - case 12: - goto tr572 + goto tr702 case 13: - goto tr680 + goto tr676 case 32: - goto tr705 + goto tr701 case 34: - goto tr208 + goto tr206 case 44: - goto tr158 + goto tr156 case 61: - goto tr165 + goto tr163 case 92: - goto st68 + goto st67 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st469 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st469 + } + case ( m.data)[( m.p)] >= 9: + goto tr701 } - goto st66 + goto st65 st469: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof469 } st_case_469: switch ( m.data)[( m.p)] { - case 9: - goto tr705 case 10: - goto tr678 + goto tr674 case 11: - goto tr706 - case 12: - goto tr572 + goto tr702 case 13: - goto tr680 + goto tr676 case 32: - goto tr705 + goto tr701 case 34: - goto tr208 + goto tr206 case 44: - goto tr158 + goto tr156 case 61: - goto tr165 + goto tr163 case 92: - goto st68 + goto st67 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st470 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st470 + } + case ( m.data)[( m.p)] >= 9: + goto tr701 } - goto st66 + goto st65 st470: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof470 } st_case_470: switch ( m.data)[( m.p)] { - case 9: - goto tr705 case 10: - goto tr678 + goto tr674 case 11: - goto tr706 - case 12: - goto tr572 + goto tr702 case 13: - goto tr680 + goto tr676 case 32: - goto tr705 + goto tr701 case 34: - goto tr208 + goto tr206 case 44: - goto tr158 + goto tr156 case 61: - goto tr165 + goto tr163 case 92: - goto st68 + goto st67 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st471 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st471 + } + case ( m.data)[( m.p)] >= 9: + goto tr701 } - goto st66 + goto st65 st471: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof471 } st_case_471: switch ( m.data)[( m.p)] { - case 9: - goto tr705 case 10: - goto tr678 + goto tr674 case 11: - goto tr706 - case 12: - goto tr572 + goto tr702 case 13: - goto tr680 + goto tr676 case 32: - goto tr705 + goto tr701 case 34: - goto tr208 + goto tr206 case 44: - goto tr158 + goto tr156 case 61: - goto tr165 + goto tr163 case 92: - goto st68 + goto st67 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st472 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st472 + } + case ( m.data)[( m.p)] >= 9: + goto tr701 } - goto st66 + goto st65 st472: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof472 } st_case_472: switch ( m.data)[( m.p)] { - case 9: - goto tr705 case 10: - goto tr678 + goto tr674 case 11: - goto tr706 - case 12: - goto tr572 + goto tr702 case 13: - goto tr680 + goto tr676 case 32: - goto tr705 + goto tr701 case 34: - goto tr208 + goto tr206 case 44: - goto tr158 + goto tr156 case 61: - goto tr165 + goto tr163 case 92: - goto st68 + goto st67 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st473 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st473 + } + case ( m.data)[( m.p)] >= 9: + goto tr701 } - goto st66 + goto st65 st473: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof473 } st_case_473: switch ( m.data)[( m.p)] { - case 9: - goto tr705 case 10: - goto tr678 + goto tr674 case 11: - goto tr706 - case 12: - goto tr572 + goto tr702 case 13: - goto tr680 + goto tr676 case 32: - goto tr705 + goto tr701 case 34: - goto tr208 + goto tr206 case 44: - goto tr158 + goto tr156 case 61: - goto tr165 + goto tr163 case 92: - goto st68 + goto st67 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st474 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st474 + } + case ( m.data)[( m.p)] >= 9: + goto tr701 } - goto st66 + goto st65 st474: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof474 } st_case_474: switch ( m.data)[( m.p)] { - case 9: - goto tr705 case 10: - goto tr678 + goto tr674 case 11: - goto tr706 - case 12: - goto tr572 + goto tr702 case 13: - goto tr680 + goto tr676 case 32: - goto tr705 + goto tr701 case 34: - goto tr208 + goto tr206 case 44: - goto tr158 + goto tr156 case 61: - goto tr165 + goto tr163 case 92: - goto st68 + goto st67 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st475 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st475 + } + case ( m.data)[( m.p)] >= 9: + goto tr701 } - goto st66 + goto st65 st475: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof475 } st_case_475: switch ( m.data)[( m.p)] { - case 9: - goto tr705 case 10: - goto tr678 + goto tr674 case 11: - goto tr706 - case 12: - goto tr572 + goto tr702 case 13: - goto tr680 + goto tr676 case 32: - goto tr705 + goto tr701 case 34: - goto tr208 + goto tr206 case 44: - goto tr158 + goto tr156 case 61: - goto tr165 + goto tr163 case 92: - goto st68 + goto st67 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st476 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st476 + } + case ( m.data)[( m.p)] >= 9: + goto tr701 } - goto st66 + goto st65 st476: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof476 } st_case_476: switch ( m.data)[( m.p)] { - case 9: - goto tr705 case 10: - goto tr678 + goto tr674 case 11: - goto tr706 - case 12: - goto tr572 + goto tr702 case 13: - goto tr680 + goto tr676 case 32: - goto tr705 + goto tr701 case 34: - goto tr208 + goto tr206 case 44: - goto tr158 + goto tr156 case 61: - goto tr165 + goto tr163 case 92: - goto st68 + goto st67 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st477 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr701 } - goto st66 - st477: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof477 - } - st_case_477: - switch ( m.data)[( m.p)] { - case 9: - goto tr705 - case 10: - goto tr678 - case 11: - goto tr706 - case 12: - goto tr572 - case 13: - goto tr680 - case 32: - goto tr705 - case 34: - goto tr208 - case 44: - goto tr158 - case 61: - goto tr165 - case 92: - goto st68 - } - goto st66 -tr672: - ( m.cs) = 107 -//line plugins/parsers/influx/machine.go.rl:91 + goto st65 +tr668: + ( m.cs) = 106 +//line plugins/parsers/influx/machine.go.rl:99 err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:20 +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p goto _again -tr861: - ( m.cs) = 107 -//line plugins/parsers/influx/machine.go.rl:78 +tr852: + ( m.cs) = 106 +//line plugins/parsers/influx/machine.go.rl:86 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:122 +//line plugins/parsers/influx/machine.go.rl:130 err = m.handler.AddFloat(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } goto _again -tr732: - ( m.cs) = 107 -//line plugins/parsers/influx/machine.go.rl:91 +tr727: + ( m.cs) = 106 +//line plugins/parsers/influx/machine.go.rl:99 err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:122 +//line plugins/parsers/influx/machine.go.rl:130 err = m.handler.AddFloat(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } goto _again -tr745: - ( m.cs) = 107 -//line plugins/parsers/influx/machine.go.rl:91 +tr740: + ( m.cs) = 106 +//line plugins/parsers/influx/machine.go.rl:99 err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:104 +//line plugins/parsers/influx/machine.go.rl:112 err = m.handler.AddInt(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr746: + ( m.cs) = 106 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:121 + + err = m.handler.AddUint(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; {( m.p)++; goto _out } } goto _again tr752: - ( m.cs) = 107 -//line plugins/parsers/influx/machine.go.rl:91 + ( m.cs) = 106 +//line plugins/parsers/influx/machine.go.rl:99 err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:113 - - err = m.handler.AddUint(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again -tr759: - ( m.cs) = 107 -//line plugins/parsers/influx/machine.go.rl:91 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:131 +//line plugins/parsers/influx/machine.go.rl:139 err = m.handler.AddBool(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } goto _again -tr893: - ( m.cs) = 107 -//line plugins/parsers/influx/machine.go.rl:78 +tr884: + ( m.cs) = 106 +//line plugins/parsers/influx/machine.go.rl:86 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:104 +//line plugins/parsers/influx/machine.go.rl:112 err = m.handler.AddInt(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } goto _again -tr897: - ( m.cs) = 107 -//line plugins/parsers/influx/machine.go.rl:78 +tr888: + ( m.cs) = 106 +//line plugins/parsers/influx/machine.go.rl:86 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:113 +//line plugins/parsers/influx/machine.go.rl:121 err = m.handler.AddUint(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } goto _again -tr902: - ( m.cs) = 107 -//line plugins/parsers/influx/machine.go.rl:78 +tr893: + ( m.cs) = 106 +//line plugins/parsers/influx/machine.go.rl:86 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:131 +//line plugins/parsers/influx/machine.go.rl:139 err = m.handler.AddBool(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } goto _again - st107: + st106: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof107 + goto _test_eof106 } - st_case_107: -//line plugins/parsers/influx/machine.go:16977 + st_case_106: +//line plugins/parsers/influx/machine.go:16958 switch ( m.data)[( m.p)] { case 9: goto st6 case 10: - goto tr29 - case 12: - goto tr47 - case 13: - goto st7 + goto tr28 case 32: goto st6 case 34: - goto tr258 + goto tr256 case 44: goto st6 case 61: goto st6 case 92: - goto tr279 + goto tr277 } - goto tr278 -tr278: -//line plugins/parsers/influx/machine.go.rl:20 + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto st6 + } + goto tr276 +tr276: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p + goto st107 + st107: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof107 + } + st_case_107: +//line plugins/parsers/influx/machine.go:16990 + switch ( m.data)[( m.p)] { + case 9: + goto st6 + case 10: + goto tr28 + case 32: + goto st6 + case 34: + goto tr259 + case 44: + goto st6 + case 61: + goto tr279 + case 92: + goto st121 + } + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto st6 + } + goto st107 +tr279: +//line plugins/parsers/influx/machine.go.rl:95 + + m.key = m.text() + +//line plugins/parsers/influx/machine.go.rl:108 + + m.key = m.text() + goto st108 st108: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof108 } st_case_108: -//line plugins/parsers/influx/machine.go:17010 +//line plugins/parsers/influx/machine.go:17026 switch ( m.data)[( m.p)] { case 9: goto st6 case 10: - goto tr29 - case 12: - goto tr47 - case 13: - goto st7 + goto tr28 case 32: goto st6 case 34: - goto tr261 + goto tr264 case 44: goto st6 - case 61: + case 45: goto tr281 + case 46: + goto tr282 + case 48: + goto tr283 + case 61: + goto st6 + case 70: + goto tr285 + case 84: + goto tr286 case 92: - goto st122 + goto tr151 + case 102: + goto tr287 + case 116: + goto tr288 } - goto st108 + switch { + case ( m.data)[( m.p)] > 13: + if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto tr284 + } + case ( m.data)[( m.p)] >= 12: + goto st6 + } + goto tr146 tr281: -//line plugins/parsers/influx/machine.go.rl:87 +//line plugins/parsers/influx/machine.go.rl:28 - m.key = m.text() - -//line plugins/parsers/influx/machine.go.rl:100 - - m.key = m.text() + m.pb = m.p goto st109 st109: @@ -17043,47 +17073,40 @@ tr281: goto _test_eof109 } st_case_109: -//line plugins/parsers/influx/machine.go:17047 +//line plugins/parsers/influx/machine.go:17077 switch ( m.data)[( m.p)] { - case 9: - goto st6 case 10: - goto tr29 - case 12: - goto tr47 + goto tr28 + case 11: + goto tr154 case 13: - goto st7 + goto st6 case 32: - goto st6 + goto tr153 case 34: - goto tr266 + goto tr155 case 44: - goto st6 - case 45: - goto tr283 + goto tr156 case 46: - goto tr284 + goto st110 case 48: - goto tr285 + goto st481 case 61: goto st6 - case 70: - goto tr287 - case 84: - goto tr288 case 92: + goto st62 + } + switch { + case ( m.data)[( m.p)] > 12: + if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st484 + } + case ( m.data)[( m.p)] >= 9: goto tr153 - case 102: - goto tr289 - case 116: - goto tr290 } - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr286 - } - goto tr148 -tr283: -//line plugins/parsers/influx/machine.go.rl:20 + goto st47 +tr282: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p @@ -17093,180 +17116,202 @@ tr283: goto _test_eof110 } st_case_110: -//line plugins/parsers/influx/machine.go:17097 +//line plugins/parsers/influx/machine.go:17120 switch ( m.data)[( m.p)] { - case 9: - goto tr155 case 10: - goto tr29 + goto tr28 case 11: - goto tr156 - case 12: - goto tr60 + goto tr154 case 13: - goto st7 + goto st6 case 32: - goto tr155 + goto tr153 case 34: - goto tr157 + goto tr155 case 44: - goto tr158 - case 46: - goto st111 - case 48: - goto st482 + goto tr156 case 61: goto st6 case 92: - goto st63 + goto st62 } - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st485 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st477 + } + case ( m.data)[( m.p)] >= 9: + goto tr153 } - goto st48 -tr284: -//line plugins/parsers/influx/machine.go.rl:20 - - m.pb = m.p - - goto st111 + goto st47 + st477: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof477 + } + st_case_477: + switch ( m.data)[( m.p)] { + case 10: + goto tr532 + case 11: + goto tr726 + case 13: + goto tr533 + case 32: + goto tr725 + case 34: + goto tr155 + case 44: + goto tr727 + case 61: + goto st6 + case 69: + goto st111 + case 92: + goto st62 + case 101: + goto st111 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st477 + } + case ( m.data)[( m.p)] >= 9: + goto tr725 + } + goto st47 st111: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof111 } st_case_111: -//line plugins/parsers/influx/machine.go:17139 switch ( m.data)[( m.p)] { - case 9: - goto tr155 case 10: - goto tr29 + goto tr28 case 11: - goto tr156 - case 12: - goto tr60 + goto tr154 case 13: - goto st7 + goto st6 case 32: - goto tr155 + goto tr153 case 34: - goto tr157 + goto tr293 case 44: - goto tr158 + goto tr156 case 61: goto st6 case 92: - goto st63 + goto st62 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st478 + switch { + case ( m.data)[( m.p)] < 43: + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr153 + } + case ( m.data)[( m.p)] > 45: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st480 + } + default: + goto st112 } - goto st48 + goto st47 +tr293: + ( m.cs) = 478 +//line plugins/parsers/influx/machine.go.rl:148 + + err = m.handler.AddString(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again st478: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof478 } st_case_478: +//line plugins/parsers/influx/machine.go:17238 switch ( m.data)[( m.p)] { - case 9: - goto tr729 case 10: - goto tr534 + goto tr101 case 11: + goto tr564 + case 13: + goto st32 + case 32: + goto tr563 + case 44: + goto tr565 + case 61: + goto tr130 + case 92: + goto st21 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st479 + } + case ( m.data)[( m.p)] >= 9: + goto tr563 + } + goto st15 + st479: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof479 + } + st_case_479: + switch ( m.data)[( m.p)] { + case 10: goto tr730 - case 12: + case 11: goto tr731 case 13: - goto tr536 + goto tr732 case 32: goto tr729 - case 34: - goto tr157 case 44: - goto tr732 + goto tr733 case 61: - goto st6 - case 69: - goto st112 + goto tr130 case 92: - goto st63 - case 101: - goto st112 + goto st21 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st478 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st479 + } + case ( m.data)[( m.p)] >= 9: + goto tr729 } - goto st48 + goto st15 st112: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof112 } st_case_112: switch ( m.data)[( m.p)] { - case 9: - goto tr155 case 10: - goto tr29 + goto tr28 case 11: - goto tr156 - case 12: - goto tr60 + goto tr154 case 13: - goto st7 + goto st6 case 32: - goto tr155 + goto tr153 case 34: - goto tr295 + goto tr155 case 44: - goto tr158 + goto tr156 case 61: goto st6 case 92: - goto st63 - } - switch { - case ( m.data)[( m.p)] > 45: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st481 - } - case ( m.data)[( m.p)] >= 43: - goto st113 - } - goto st48 -tr295: - ( m.cs) = 479 -//line plugins/parsers/influx/machine.go.rl:140 - - err = m.handler.AddString(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again - st479: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof479 - } - st_case_479: -//line plugins/parsers/influx/machine.go:17255 - switch ( m.data)[( m.p)] { - case 10: - goto tr103 - case 11: - goto tr567 - case 13: - goto st33 - case 32: - goto tr566 - case 44: - goto tr568 - case 61: - goto tr132 - case 92: - goto st22 + goto st62 } switch { case ( m.data)[( m.p)] > 12: @@ -17274,9 +17319,9 @@ tr295: goto st480 } case ( m.data)[( m.p)] >= 9: - goto tr566 + goto tr153 } - goto st16 + goto st47 st480: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof480 @@ -17284,19 +17329,21 @@ tr295: st_case_480: switch ( m.data)[( m.p)] { case 10: - goto tr734 + goto tr532 case 11: - goto tr735 + goto tr726 case 13: - goto tr736 + goto tr533 case 32: - goto tr731 + goto tr725 + case 34: + goto tr155 case 44: - goto tr737 + goto tr727 case 61: - goto tr132 + goto st6 case 92: - goto st22 + goto st62 } switch { case ( m.data)[( m.p)] > 12: @@ -17304,292 +17351,281 @@ tr295: goto st480 } case ( m.data)[( m.p)] >= 9: - goto tr731 + goto tr725 } - goto st16 - st113: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof113 - } - st_case_113: - switch ( m.data)[( m.p)] { - case 9: - goto tr155 - case 10: - goto tr29 - case 11: - goto tr156 - case 12: - goto tr60 - case 13: - goto st7 - case 32: - goto tr155 - case 34: - goto tr157 - case 44: - goto tr158 - case 61: - goto st6 - case 92: - goto st63 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st481 - } - goto st48 + goto st47 st481: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof481 } st_case_481: switch ( m.data)[( m.p)] { - case 9: - goto tr729 case 10: - goto tr534 + goto tr532 case 11: - goto tr730 - case 12: - goto tr731 + goto tr726 case 13: - goto tr536 + goto tr533 case 32: - goto tr729 + goto tr725 case 34: - goto tr157 + goto tr155 case 44: - goto tr732 + goto tr727 + case 46: + goto st477 case 61: goto st6 + case 69: + goto st111 case 92: - goto st63 + goto st62 + case 101: + goto st111 + case 105: + goto st483 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st481 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st482 + } + case ( m.data)[( m.p)] >= 9: + goto tr725 } - goto st48 + goto st47 st482: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof482 } st_case_482: switch ( m.data)[( m.p)] { - case 9: - goto tr729 case 10: - goto tr534 + goto tr532 case 11: - goto tr730 - case 12: - goto tr731 + goto tr726 case 13: - goto tr536 + goto tr533 case 32: - goto tr729 + goto tr725 case 34: - goto tr157 + goto tr155 case 44: - goto tr732 + goto tr727 case 46: - goto st478 + goto st477 case 61: goto st6 case 69: - goto st112 + goto st111 case 92: - goto st63 + goto st62 case 101: - goto st112 - case 105: - goto st484 + goto st111 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st483 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st482 + } + case ( m.data)[( m.p)] >= 9: + goto tr725 } - goto st48 + goto st47 st483: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof483 } st_case_483: switch ( m.data)[( m.p)] { - case 9: - goto tr729 case 10: - goto tr534 + goto tr737 case 11: - goto tr730 - case 12: - goto tr731 + goto tr738 case 13: - goto tr536 + goto tr739 case 32: - goto tr729 + goto tr736 case 34: - goto tr157 + goto tr155 case 44: - goto tr732 - case 46: - goto st478 + goto tr740 case 61: goto st6 - case 69: - goto st112 case 92: - goto st63 - case 101: - goto st112 + goto st62 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st483 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr736 } - goto st48 + goto st47 st484: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof484 } st_case_484: switch ( m.data)[( m.p)] { - case 9: - goto tr740 case 10: - goto tr741 + goto tr532 case 11: - goto tr742 - case 12: - goto tr743 + goto tr726 case 13: - goto tr744 + goto tr533 case 32: - goto tr740 + goto tr725 case 34: - goto tr157 + goto tr155 case 44: - goto tr745 + goto tr727 + case 46: + goto st477 case 61: goto st6 + case 69: + goto st111 case 92: - goto st63 + goto st62 + case 101: + goto st111 + case 105: + goto st483 } - goto st48 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st484 + } + case ( m.data)[( m.p)] >= 9: + goto tr725 + } + goto st47 +tr283: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st485 st485: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof485 } st_case_485: +//line plugins/parsers/influx/machine.go:17514 switch ( m.data)[( m.p)] { - case 9: - goto tr729 case 10: - goto tr534 + goto tr532 case 11: - goto tr730 - case 12: - goto tr731 + goto tr726 case 13: - goto tr536 + goto tr533 case 32: - goto tr729 + goto tr725 case 34: - goto tr157 + goto tr155 case 44: - goto tr732 + goto tr727 case 46: - goto st478 + goto st477 case 61: goto st6 case 69: - goto st112 + goto st111 case 92: - goto st63 + goto st62 case 101: - goto st112 + goto st111 case 105: - goto st484 + goto st483 + case 117: + goto st486 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st485 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st482 + } + case ( m.data)[( m.p)] >= 9: + goto tr725 } - goto st48 -tr285: -//line plugins/parsers/influx/machine.go.rl:20 - - m.pb = m.p - - goto st486 + goto st47 st486: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof486 } st_case_486: -//line plugins/parsers/influx/machine.go:17527 switch ( m.data)[( m.p)] { - case 9: - goto tr729 case 10: - goto tr534 + goto tr743 case 11: - goto tr730 - case 12: - goto tr731 + goto tr744 case 13: - goto tr536 + goto tr745 case 32: - goto tr729 + goto tr742 case 34: - goto tr157 + goto tr155 case 44: - goto tr732 - case 46: - goto st478 + goto tr746 case 61: goto st6 - case 69: - goto st112 case 92: - goto st63 - case 101: - goto st112 - case 105: - goto st484 - case 117: - goto st487 + goto st62 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st483 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr742 } - goto st48 + goto st47 +tr284: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st487 st487: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof487 } st_case_487: +//line plugins/parsers/influx/machine.go:17590 switch ( m.data)[( m.p)] { - case 9: - goto tr747 case 10: - goto tr748 + goto tr532 case 11: - goto tr749 - case 12: - goto tr750 + goto tr726 case 13: - goto tr751 + goto tr533 case 32: - goto tr747 + goto tr725 case 34: - goto tr157 + goto tr155 case 44: - goto tr752 + goto tr727 + case 46: + goto st477 case 61: goto st6 + case 69: + goto st111 case 92: - goto st63 + goto st62 + case 101: + goto st111 + case 105: + goto st483 + case 117: + goto st486 } - goto st48 -tr286: -//line plugins/parsers/influx/machine.go.rl:20 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st487 + } + case ( m.data)[( m.p)] >= 9: + goto tr725 + } + goto st47 +tr285: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p @@ -17599,292 +17635,332 @@ tr286: goto _test_eof488 } st_case_488: -//line plugins/parsers/influx/machine.go:17603 +//line plugins/parsers/influx/machine.go:17639 switch ( m.data)[( m.p)] { - case 9: - goto tr729 case 10: - goto tr534 + goto tr749 case 11: - goto tr730 - case 12: - goto tr731 + goto tr750 case 13: - goto tr536 + goto tr751 case 32: - goto tr729 + goto tr748 case 34: - goto tr157 + goto tr155 case 44: - goto tr732 - case 46: - goto st478 - case 61: - goto st6 - case 69: - goto st112 - case 92: - goto st63 - case 101: - goto st112 - case 105: - goto st484 - case 117: - goto st487 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st488 - } - goto st48 -tr287: -//line plugins/parsers/influx/machine.go.rl:20 - - m.pb = m.p - - goto st489 - st489: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof489 - } - st_case_489: -//line plugins/parsers/influx/machine.go:17651 - switch ( m.data)[( m.p)] { - case 9: - goto tr754 - case 10: - goto tr755 - case 11: - goto tr756 - case 12: - goto tr757 - case 13: - goto tr758 - case 32: - goto tr754 - case 34: - goto tr157 - case 44: - goto tr759 + goto tr752 case 61: goto st6 case 65: + goto st113 + case 92: + goto st62 + case 97: + goto st116 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr748 + } + goto st47 + st113: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof113 + } + st_case_113: + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 11: + goto tr154 + case 13: + goto st6 + case 32: + goto tr153 + case 34: + goto tr155 + case 44: + goto tr156 + case 61: + goto st6 + case 76: goto st114 case 92: - goto st63 - case 97: - goto st117 + goto st62 } - goto st48 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr153 + } + goto st47 st114: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof114 } st_case_114: switch ( m.data)[( m.p)] { - case 9: - goto tr155 case 10: - goto tr29 + goto tr28 case 11: - goto tr156 - case 12: - goto tr60 + goto tr154 case 13: - goto st7 + goto st6 case 32: - goto tr155 + goto tr153 case 34: - goto tr157 + goto tr155 case 44: - goto tr158 + goto tr156 case 61: goto st6 - case 76: + case 83: goto st115 case 92: - goto st63 + goto st62 } - goto st48 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr153 + } + goto st47 st115: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof115 } st_case_115: switch ( m.data)[( m.p)] { - case 9: - goto tr155 case 10: - goto tr29 + goto tr28 case 11: - goto tr156 - case 12: - goto tr60 + goto tr154 case 13: - goto st7 + goto st6 case 32: - goto tr155 + goto tr153 case 34: - goto tr157 + goto tr155 case 44: - goto tr158 + goto tr156 case 61: goto st6 - case 83: - goto st116 + case 69: + goto st489 case 92: - goto st63 + goto st62 } - goto st48 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr153 + } + goto st47 + st489: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof489 + } + st_case_489: + switch ( m.data)[( m.p)] { + case 10: + goto tr749 + case 11: + goto tr750 + case 13: + goto tr751 + case 32: + goto tr748 + case 34: + goto tr155 + case 44: + goto tr752 + case 61: + goto st6 + case 92: + goto st62 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr748 + } + goto st47 st116: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof116 } st_case_116: switch ( m.data)[( m.p)] { - case 9: - goto tr155 case 10: - goto tr29 + goto tr28 case 11: + goto tr154 + case 13: + goto st6 + case 32: + goto tr153 + case 34: + goto tr155 + case 44: goto tr156 - case 12: - goto tr60 - case 13: - goto st7 - case 32: - goto tr155 - case 34: - goto tr157 - case 44: - goto tr158 - case 61: - goto st6 - case 69: - goto st490 - case 92: - goto st63 - } - goto st48 - st490: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof490 - } - st_case_490: - switch ( m.data)[( m.p)] { - case 9: - goto tr754 - case 10: - goto tr755 - case 11: - goto tr756 - case 12: - goto tr757 - case 13: - goto tr758 - case 32: - goto tr754 - case 34: - goto tr157 - case 44: - goto tr759 case 61: goto st6 case 92: - goto st63 + goto st62 + case 108: + goto st117 } - goto st48 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr153 + } + goto st47 st117: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof117 } st_case_117: switch ( m.data)[( m.p)] { - case 9: - goto tr155 case 10: - goto tr29 + goto tr28 case 11: - goto tr156 - case 12: - goto tr60 + goto tr154 case 13: - goto st7 + goto st6 case 32: - goto tr155 + goto tr153 case 34: - goto tr157 + goto tr155 case 44: - goto tr158 + goto tr156 case 61: goto st6 case 92: - goto st63 - case 108: + goto st62 + case 115: goto st118 } - goto st48 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr153 + } + goto st47 st118: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof118 } st_case_118: switch ( m.data)[( m.p)] { - case 9: - goto tr155 case 10: - goto tr29 + goto tr28 case 11: - goto tr156 - case 12: - goto tr60 + goto tr154 case 13: - goto st7 + goto st6 case 32: - goto tr155 + goto tr153 case 34: - goto tr157 + goto tr155 case 44: - goto tr158 + goto tr156 case 61: goto st6 case 92: - goto st63 - case 115: - goto st119 + goto st62 + case 101: + goto st489 } - goto st48 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr153 + } + goto st47 +tr286: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st490 + st490: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof490 + } + st_case_490: +//line plugins/parsers/influx/machine.go:17878 + switch ( m.data)[( m.p)] { + case 10: + goto tr749 + case 11: + goto tr750 + case 13: + goto tr751 + case 32: + goto tr748 + case 34: + goto tr155 + case 44: + goto tr752 + case 61: + goto st6 + case 82: + goto st119 + case 92: + goto st62 + case 114: + goto st120 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr748 + } + goto st47 st119: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof119 } st_case_119: switch ( m.data)[( m.p)] { - case 9: - goto tr155 case 10: - goto tr29 + goto tr28 case 11: - goto tr156 - case 12: - goto tr60 + goto tr154 case 13: - goto st7 + goto st6 case 32: - goto tr155 + goto tr153 case 34: - goto tr157 + goto tr155 case 44: - goto tr158 + goto tr156 + case 61: + goto st6 + case 85: + goto st115 + case 92: + goto st62 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr153 + } + goto st47 + st120: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof120 + } + st_case_120: + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 11: + goto tr154 + case 13: + goto st6 + case 32: + goto tr153 + case 34: + goto tr155 + case 44: + goto tr156 case 61: goto st6 case 92: - goto st63 - case 101: - goto st490 + goto st62 + case 117: + goto st118 } - goto st48 -tr288: -//line plugins/parsers/influx/machine.go.rl:20 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr153 + } + goto st47 +tr287: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p @@ -17894,96 +17970,33 @@ tr288: goto _test_eof491 } st_case_491: -//line plugins/parsers/influx/machine.go:17898 +//line plugins/parsers/influx/machine.go:17974 switch ( m.data)[( m.p)] { - case 9: - goto tr754 case 10: - goto tr755 + goto tr749 case 11: - goto tr756 - case 12: - goto tr757 + goto tr750 case 13: - goto tr758 + goto tr751 case 32: - goto tr754 + goto tr748 case 34: - goto tr157 + goto tr155 case 44: - goto tr759 + goto tr752 case 61: goto st6 - case 82: - goto st120 case 92: - goto st63 - case 114: - goto st121 - } - goto st48 - st120: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof120 - } - st_case_120: - switch ( m.data)[( m.p)] { - case 9: - goto tr155 - case 10: - goto tr29 - case 11: - goto tr156 - case 12: - goto tr60 - case 13: - goto st7 - case 32: - goto tr155 - case 34: - goto tr157 - case 44: - goto tr158 - case 61: - goto st6 - case 85: + goto st62 + case 97: goto st116 - case 92: - goto st63 } - goto st48 - st121: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof121 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr748 } - st_case_121: - switch ( m.data)[( m.p)] { - case 9: - goto tr155 - case 10: - goto tr29 - case 11: - goto tr156 - case 12: - goto tr60 - case 13: - goto st7 - case 32: - goto tr155 - case 34: - goto tr157 - case 44: - goto tr158 - case 61: - goto st6 - case 92: - goto st63 - case 117: - goto st119 - } - goto st48 -tr289: -//line plugins/parsers/influx/machine.go.rl:20 + goto st47 +tr288: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p @@ -17993,128 +18006,132 @@ tr289: goto _test_eof492 } st_case_492: -//line plugins/parsers/influx/machine.go:17997 +//line plugins/parsers/influx/machine.go:18010 switch ( m.data)[( m.p)] { - case 9: - goto tr754 case 10: - goto tr755 + goto tr749 case 11: - goto tr756 - case 12: - goto tr757 + goto tr750 case 13: - goto tr758 + goto tr751 case 32: - goto tr754 + goto tr748 case 34: - goto tr157 + goto tr155 case 44: - goto tr759 + goto tr752 case 61: goto st6 case 92: - goto st63 - case 97: - goto st117 - } - goto st48 -tr290: -//line plugins/parsers/influx/machine.go.rl:20 - - m.pb = m.p - - goto st493 - st493: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof493 - } - st_case_493: -//line plugins/parsers/influx/machine.go:18034 - switch ( m.data)[( m.p)] { - case 9: - goto tr754 - case 10: - goto tr755 - case 11: - goto tr756 - case 12: - goto tr757 - case 13: - goto tr758 - case 32: - goto tr754 - case 34: - goto tr157 - case 44: - goto tr759 - case 61: - goto st6 - case 92: - goto st63 + goto st62 case 114: - goto st121 + goto st120 } - goto st48 -tr279: -//line plugins/parsers/influx/machine.go.rl:20 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr748 + } + goto st47 +tr277: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p - goto st122 - st122: + goto st121 + st121: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof122 + goto _test_eof121 } - st_case_122: -//line plugins/parsers/influx/machine.go:18071 + st_case_121: +//line plugins/parsers/influx/machine.go:18046 switch ( m.data)[( m.p)] { case 34: - goto st108 + goto st107 case 92: - goto st123 + goto st122 } switch { case ( m.data)[( m.p)] > 10: if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr47 + goto tr45 } case ( m.data)[( m.p)] >= 9: - goto tr47 + goto tr45 } - goto st45 - st123: -//line plugins/parsers/influx/machine.go.rl:240 + goto st44 + st122: +//line plugins/parsers/influx/machine.go.rl:248 ( m.p)-- if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof123 + goto _test_eof122 } - st_case_123: -//line plugins/parsers/influx/machine.go:18095 + st_case_122: +//line plugins/parsers/influx/machine.go:18070 switch ( m.data)[( m.p)] { case 9: goto st6 case 10: - goto tr29 - case 12: - goto tr47 - case 13: - goto st7 + goto tr28 case 32: goto st6 case 34: - goto tr261 + goto tr259 case 44: goto st6 case 61: - goto tr281 + goto tr279 case 92: - goto st122 + goto st121 } - goto st108 -tr267: -//line plugins/parsers/influx/machine.go.rl:20 + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto st6 + } + goto st107 +tr265: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st123 + st123: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof123 + } + st_case_123: +//line plugins/parsers/influx/machine.go:18102 + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 11: + goto tr230 + case 13: + goto st6 + case 32: + goto tr229 + case 34: + goto tr155 + case 44: + goto tr231 + case 46: + goto st124 + case 48: + goto st517 + case 61: + goto st6 + case 92: + goto st85 + } + switch { + case ( m.data)[( m.p)] > 12: + if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st520 + } + case ( m.data)[( m.p)] >= 9: + goto tr229 + } + goto st79 +tr266: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p @@ -18124,39 +18141,251 @@ tr267: goto _test_eof124 } st_case_124: -//line plugins/parsers/influx/machine.go:18128 +//line plugins/parsers/influx/machine.go:18145 switch ( m.data)[( m.p)] { - case 9: - goto tr231 case 10: - goto tr29 + goto tr28 case 11: - goto tr232 - case 12: - goto tr60 + goto tr230 case 13: - goto st7 + goto st6 case 32: - goto tr231 + goto tr229 case 34: - goto tr157 + goto tr155 case 44: - goto tr233 - case 46: - goto st125 - case 48: - goto st518 + goto tr231 case 61: goto st6 case 92: - goto st86 + goto st85 } - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st521 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st493 + } + case ( m.data)[( m.p)] >= 9: + goto tr229 } - goto st80 -tr268: -//line plugins/parsers/influx/machine.go.rl:20 + goto st79 + st493: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof493 + } + st_case_493: + switch ( m.data)[( m.p)] { + case 10: + goto tr758 + case 11: + goto tr759 + case 13: + goto tr638 + case 32: + goto tr757 + case 34: + goto tr155 + case 44: + goto tr760 + case 61: + goto st6 + case 69: + goto st126 + case 92: + goto st85 + case 101: + goto st126 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st493 + } + case ( m.data)[( m.p)] >= 9: + goto tr757 + } + goto st79 +tr759: + ( m.cs) = 494 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:130 + + err = m.handler.AddFloat(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr792: + ( m.cs) = 494 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:112 + + err = m.handler.AddInt(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr798: + ( m.cs) = 494 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:121 + + err = m.handler.AddUint(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr804: + ( m.cs) = 494 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:139 + + err = m.handler.AddBool(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again + st494: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof494 + } + st_case_494: +//line plugins/parsers/influx/machine.go:18306 + switch ( m.data)[( m.p)] { + case 10: + goto tr219 + case 11: + goto tr763 + case 13: + goto st72 + case 32: + goto tr762 + case 34: + goto tr202 + case 44: + goto tr231 + case 45: + goto tr764 + case 61: + goto st6 + case 92: + goto tr235 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto tr765 + } + case ( m.data)[( m.p)] >= 9: + goto tr762 + } + goto tr233 +tr763: + ( m.cs) = 495 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto _again + st495: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof495 + } + st_case_495: +//line plugins/parsers/influx/machine.go:18358 + switch ( m.data)[( m.p)] { + case 10: + goto tr219 + case 11: + goto tr763 + case 13: + goto st72 + case 32: + goto tr762 + case 34: + goto tr202 + case 44: + goto tr231 + case 45: + goto tr764 + case 61: + goto tr99 + case 92: + goto tr235 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto tr765 + } + case ( m.data)[( m.p)] >= 9: + goto tr762 + } + goto tr233 +tr764: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p @@ -18166,1264 +18395,1084 @@ tr268: goto _test_eof125 } st_case_125: -//line plugins/parsers/influx/machine.go:18170 +//line plugins/parsers/influx/machine.go:18399 switch ( m.data)[( m.p)] { - case 9: - goto tr231 case 10: - goto tr29 + goto tr28 case 11: - goto tr232 - case 12: - goto tr60 - case 13: - goto st7 - case 32: - goto tr231 - case 34: - goto tr157 - case 44: - goto tr233 - case 61: - goto st6 - case 92: - goto st86 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st494 - } - goto st80 - st494: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof494 - } - st_case_494: - switch ( m.data)[( m.p)] { - case 9: - goto tr764 - case 10: - goto tr765 - case 11: - goto tr766 - case 12: - goto tr731 - case 13: - goto tr642 - case 32: - goto tr764 - case 34: - goto tr157 - case 44: - goto tr767 - case 61: - goto st6 - case 69: - goto st127 - case 92: - goto st86 - case 101: - goto st127 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st494 - } - goto st80 -tr766: - ( m.cs) = 495 -//line plugins/parsers/influx/machine.go.rl:91 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:122 - - err = m.handler.AddFloat(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again -tr799: - ( m.cs) = 495 -//line plugins/parsers/influx/machine.go.rl:91 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:104 - - err = m.handler.AddInt(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again -tr805: - ( m.cs) = 495 -//line plugins/parsers/influx/machine.go.rl:91 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:113 - - err = m.handler.AddUint(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again -tr811: - ( m.cs) = 495 -//line plugins/parsers/influx/machine.go.rl:91 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:131 - - err = m.handler.AddBool(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again - st495: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof495 - } - st_case_495: -//line plugins/parsers/influx/machine.go:18329 - switch ( m.data)[( m.p)] { - case 9: - goto tr769 - case 10: - goto tr221 - case 11: - goto tr770 - case 12: - goto tr566 - case 13: - goto st73 - case 32: - goto tr769 - case 34: - goto tr204 - case 44: - goto tr233 - case 45: - goto tr771 - case 61: - goto st6 - case 92: goto tr237 + case 13: + goto st6 + case 32: + goto tr229 + case 34: + goto tr206 + case 44: + goto tr231 + case 61: + goto tr99 + case 92: + goto st83 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr772 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st496 + } + case ( m.data)[( m.p)] >= 9: + goto tr229 } - goto tr235 -tr770: - ( m.cs) = 496 -//line plugins/parsers/influx/machine.go.rl:91 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:20 + goto st81 +tr765: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p - goto _again + goto st496 st496: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof496 } st_case_496: -//line plugins/parsers/influx/machine.go:18380 +//line plugins/parsers/influx/machine.go:18438 switch ( m.data)[( m.p)] { - case 9: - goto tr769 case 10: - goto tr221 + goto tr600 case 11: - goto tr770 - case 12: - goto tr566 + goto tr767 case 13: - goto st73 + goto tr602 case 32: - goto tr769 + goto tr766 case 34: - goto tr204 + goto tr206 case 44: - goto tr233 - case 45: - goto tr771 - case 61: - goto tr101 - case 92: - goto tr237 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr772 - } - goto tr235 -tr771: -//line plugins/parsers/influx/machine.go.rl:20 - - m.pb = m.p - - goto st126 - st126: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof126 - } - st_case_126: -//line plugins/parsers/influx/machine.go:18420 - switch ( m.data)[( m.p)] { - case 9: goto tr231 - case 10: - goto tr29 - case 11: - goto tr239 - case 12: - goto tr60 - case 13: - goto st7 - case 32: - goto tr231 - case 34: - goto tr208 - case 44: - goto tr233 case 61: - goto tr101 + goto tr99 case 92: - goto st84 + goto st83 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st497 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st498 + } + case ( m.data)[( m.p)] >= 9: + goto tr766 } - goto st82 -tr772: -//line plugins/parsers/influx/machine.go.rl:20 - - m.pb = m.p - - goto st497 - st497: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof497 - } - st_case_497: -//line plugins/parsers/influx/machine.go:18458 - switch ( m.data)[( m.p)] { - case 9: - goto tr773 - case 10: - goto tr603 - case 11: - goto tr774 - case 12: - goto tr572 - case 13: - goto tr605 - case 32: - goto tr773 - case 34: - goto tr208 - case 44: - goto tr233 - case 61: - goto tr101 - case 92: - goto st84 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st499 - } - goto st82 -tr777: - ( m.cs) = 498 -//line plugins/parsers/influx/machine.go.rl:91 + goto st81 +tr770: + ( m.cs) = 497 +//line plugins/parsers/influx/machine.go.rl:99 err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:20 +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p goto _again -tr774: - ( m.cs) = 498 -//line plugins/parsers/influx/machine.go.rl:91 +tr767: + ( m.cs) = 497 +//line plugins/parsers/influx/machine.go.rl:99 err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:149 +//line plugins/parsers/influx/machine.go.rl:157 err = m.handler.SetTimestamp(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } goto _again + st497: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof497 + } + st_case_497: +//line plugins/parsers/influx/machine.go:18511 + switch ( m.data)[( m.p)] { + case 10: + goto tr219 + case 11: + goto tr770 + case 13: + goto st72 + case 32: + goto tr769 + case 34: + goto tr202 + case 44: + goto tr231 + case 61: + goto tr99 + case 92: + goto tr235 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr769 + } + goto tr233 st498: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof498 } st_case_498: -//line plugins/parsers/influx/machine.go:18530 switch ( m.data)[( m.p)] { - case 9: - goto tr776 case 10: - goto tr221 + goto tr600 case 11: - goto tr777 - case 12: - goto tr575 + goto tr767 case 13: - goto st73 + goto tr602 case 32: - goto tr776 + goto tr766 case 34: - goto tr204 + goto tr206 case 44: - goto tr233 + goto tr231 case 61: - goto tr101 + goto tr99 case 92: - goto tr237 + goto st83 } - goto tr235 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st499 + } + case ( m.data)[( m.p)] >= 9: + goto tr766 + } + goto st81 st499: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof499 } st_case_499: switch ( m.data)[( m.p)] { - case 9: - goto tr773 case 10: - goto tr603 + goto tr600 case 11: - goto tr774 - case 12: - goto tr572 + goto tr767 case 13: - goto tr605 + goto tr602 case 32: - goto tr773 + goto tr766 case 34: - goto tr208 + goto tr206 case 44: - goto tr233 + goto tr231 case 61: - goto tr101 + goto tr99 case 92: - goto st84 + goto st83 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st500 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st500 + } + case ( m.data)[( m.p)] >= 9: + goto tr766 } - goto st82 + goto st81 st500: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof500 } st_case_500: switch ( m.data)[( m.p)] { - case 9: - goto tr773 case 10: - goto tr603 + goto tr600 case 11: - goto tr774 - case 12: - goto tr572 + goto tr767 case 13: - goto tr605 + goto tr602 case 32: - goto tr773 + goto tr766 case 34: - goto tr208 + goto tr206 case 44: - goto tr233 + goto tr231 case 61: - goto tr101 + goto tr99 case 92: - goto st84 + goto st83 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st501 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st501 + } + case ( m.data)[( m.p)] >= 9: + goto tr766 } - goto st82 + goto st81 st501: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof501 } st_case_501: switch ( m.data)[( m.p)] { - case 9: - goto tr773 case 10: - goto tr603 + goto tr600 case 11: - goto tr774 - case 12: - goto tr572 + goto tr767 case 13: - goto tr605 + goto tr602 case 32: - goto tr773 + goto tr766 case 34: - goto tr208 + goto tr206 case 44: - goto tr233 + goto tr231 case 61: - goto tr101 + goto tr99 case 92: - goto st84 + goto st83 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st502 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st502 + } + case ( m.data)[( m.p)] >= 9: + goto tr766 } - goto st82 + goto st81 st502: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof502 } st_case_502: switch ( m.data)[( m.p)] { - case 9: - goto tr773 case 10: - goto tr603 + goto tr600 case 11: - goto tr774 - case 12: - goto tr572 + goto tr767 case 13: - goto tr605 + goto tr602 case 32: - goto tr773 + goto tr766 case 34: - goto tr208 + goto tr206 case 44: - goto tr233 + goto tr231 case 61: - goto tr101 + goto tr99 case 92: - goto st84 + goto st83 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st503 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st503 + } + case ( m.data)[( m.p)] >= 9: + goto tr766 } - goto st82 + goto st81 st503: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof503 } st_case_503: switch ( m.data)[( m.p)] { - case 9: - goto tr773 case 10: - goto tr603 + goto tr600 case 11: - goto tr774 - case 12: - goto tr572 + goto tr767 case 13: - goto tr605 + goto tr602 case 32: - goto tr773 + goto tr766 case 34: - goto tr208 + goto tr206 case 44: - goto tr233 + goto tr231 case 61: - goto tr101 + goto tr99 case 92: - goto st84 + goto st83 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st504 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st504 + } + case ( m.data)[( m.p)] >= 9: + goto tr766 } - goto st82 + goto st81 st504: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof504 } st_case_504: switch ( m.data)[( m.p)] { - case 9: - goto tr773 case 10: - goto tr603 + goto tr600 case 11: - goto tr774 - case 12: - goto tr572 + goto tr767 case 13: - goto tr605 + goto tr602 case 32: - goto tr773 + goto tr766 case 34: - goto tr208 + goto tr206 case 44: - goto tr233 + goto tr231 case 61: - goto tr101 + goto tr99 case 92: - goto st84 + goto st83 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st505 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st505 + } + case ( m.data)[( m.p)] >= 9: + goto tr766 } - goto st82 + goto st81 st505: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof505 } st_case_505: switch ( m.data)[( m.p)] { - case 9: - goto tr773 case 10: - goto tr603 + goto tr600 case 11: - goto tr774 - case 12: - goto tr572 + goto tr767 case 13: - goto tr605 + goto tr602 case 32: - goto tr773 + goto tr766 case 34: - goto tr208 + goto tr206 case 44: - goto tr233 + goto tr231 case 61: - goto tr101 + goto tr99 case 92: - goto st84 + goto st83 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st506 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st506 + } + case ( m.data)[( m.p)] >= 9: + goto tr766 } - goto st82 + goto st81 st506: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof506 } st_case_506: switch ( m.data)[( m.p)] { - case 9: - goto tr773 case 10: - goto tr603 + goto tr600 case 11: - goto tr774 - case 12: - goto tr572 + goto tr767 case 13: - goto tr605 + goto tr602 case 32: - goto tr773 + goto tr766 case 34: - goto tr208 + goto tr206 case 44: - goto tr233 + goto tr231 case 61: - goto tr101 + goto tr99 case 92: - goto st84 + goto st83 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st507 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st507 + } + case ( m.data)[( m.p)] >= 9: + goto tr766 } - goto st82 + goto st81 st507: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof507 } st_case_507: switch ( m.data)[( m.p)] { - case 9: - goto tr773 case 10: - goto tr603 + goto tr600 case 11: - goto tr774 - case 12: - goto tr572 + goto tr767 case 13: - goto tr605 + goto tr602 case 32: - goto tr773 + goto tr766 case 34: - goto tr208 + goto tr206 case 44: - goto tr233 + goto tr231 case 61: - goto tr101 + goto tr99 case 92: - goto st84 + goto st83 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st508 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st508 + } + case ( m.data)[( m.p)] >= 9: + goto tr766 } - goto st82 + goto st81 st508: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof508 } st_case_508: switch ( m.data)[( m.p)] { - case 9: - goto tr773 case 10: - goto tr603 + goto tr600 case 11: - goto tr774 - case 12: - goto tr572 + goto tr767 case 13: - goto tr605 + goto tr602 case 32: - goto tr773 + goto tr766 case 34: - goto tr208 + goto tr206 case 44: - goto tr233 + goto tr231 case 61: - goto tr101 + goto tr99 case 92: - goto st84 + goto st83 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st509 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st509 + } + case ( m.data)[( m.p)] >= 9: + goto tr766 } - goto st82 + goto st81 st509: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof509 } st_case_509: switch ( m.data)[( m.p)] { - case 9: - goto tr773 case 10: - goto tr603 + goto tr600 case 11: - goto tr774 - case 12: - goto tr572 + goto tr767 case 13: - goto tr605 + goto tr602 case 32: - goto tr773 + goto tr766 case 34: - goto tr208 + goto tr206 case 44: - goto tr233 + goto tr231 case 61: - goto tr101 + goto tr99 case 92: - goto st84 + goto st83 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st510 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st510 + } + case ( m.data)[( m.p)] >= 9: + goto tr766 } - goto st82 + goto st81 st510: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof510 } st_case_510: switch ( m.data)[( m.p)] { - case 9: - goto tr773 case 10: - goto tr603 + goto tr600 case 11: - goto tr774 - case 12: - goto tr572 + goto tr767 case 13: - goto tr605 + goto tr602 case 32: - goto tr773 + goto tr766 case 34: - goto tr208 + goto tr206 case 44: - goto tr233 + goto tr231 case 61: - goto tr101 + goto tr99 case 92: - goto st84 + goto st83 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st511 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st511 + } + case ( m.data)[( m.p)] >= 9: + goto tr766 } - goto st82 + goto st81 st511: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof511 } st_case_511: switch ( m.data)[( m.p)] { - case 9: - goto tr773 case 10: - goto tr603 + goto tr600 case 11: - goto tr774 - case 12: - goto tr572 + goto tr767 case 13: - goto tr605 + goto tr602 case 32: - goto tr773 + goto tr766 case 34: - goto tr208 + goto tr206 case 44: - goto tr233 + goto tr231 case 61: - goto tr101 + goto tr99 case 92: - goto st84 + goto st83 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st512 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st512 + } + case ( m.data)[( m.p)] >= 9: + goto tr766 } - goto st82 + goto st81 st512: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof512 } st_case_512: switch ( m.data)[( m.p)] { - case 9: - goto tr773 case 10: - goto tr603 + goto tr600 case 11: - goto tr774 - case 12: - goto tr572 + goto tr767 case 13: - goto tr605 + goto tr602 case 32: - goto tr773 + goto tr766 case 34: - goto tr208 + goto tr206 case 44: - goto tr233 + goto tr231 case 61: - goto tr101 + goto tr99 case 92: - goto st84 + goto st83 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st513 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st513 + } + case ( m.data)[( m.p)] >= 9: + goto tr766 } - goto st82 + goto st81 st513: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof513 } st_case_513: switch ( m.data)[( m.p)] { - case 9: - goto tr773 case 10: - goto tr603 + goto tr600 case 11: - goto tr774 - case 12: - goto tr572 + goto tr767 case 13: - goto tr605 + goto tr602 case 32: - goto tr773 + goto tr766 case 34: - goto tr208 + goto tr206 case 44: - goto tr233 + goto tr231 case 61: - goto tr101 + goto tr99 case 92: - goto st84 + goto st83 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st514 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st514 + } + case ( m.data)[( m.p)] >= 9: + goto tr766 } - goto st82 + goto st81 st514: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof514 } st_case_514: switch ( m.data)[( m.p)] { - case 9: - goto tr773 case 10: - goto tr603 + goto tr600 case 11: - goto tr774 - case 12: - goto tr572 + goto tr767 case 13: - goto tr605 + goto tr602 case 32: - goto tr773 + goto tr766 case 34: - goto tr208 + goto tr206 case 44: - goto tr233 + goto tr231 case 61: - goto tr101 + goto tr99 case 92: - goto st84 + goto st83 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st515 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st515 + } + case ( m.data)[( m.p)] >= 9: + goto tr766 } - goto st82 + goto st81 st515: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof515 } st_case_515: switch ( m.data)[( m.p)] { - case 9: - goto tr773 case 10: - goto tr603 + goto tr600 case 11: - goto tr774 - case 12: - goto tr572 + goto tr767 case 13: - goto tr605 + goto tr602 case 32: - goto tr773 + goto tr766 case 34: - goto tr208 + goto tr206 case 44: - goto tr233 + goto tr231 case 61: - goto tr101 + goto tr99 case 92: - goto st84 + goto st83 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st516 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr766 } - goto st82 - st516: + goto st81 + st126: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof516 + goto _test_eof126 } - st_case_516: + st_case_126: switch ( m.data)[( m.p)] { - case 9: - goto tr773 case 10: - goto tr603 + goto tr28 case 11: - goto tr774 - case 12: - goto tr572 + goto tr230 case 13: - goto tr605 + goto st6 case 32: - goto tr773 + goto tr229 case 34: - goto tr208 + goto tr293 case 44: - goto tr233 + goto tr231 case 61: - goto tr101 + goto st6 case 92: - goto st84 + goto st85 } - goto st82 + switch { + case ( m.data)[( m.p)] < 43: + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr229 + } + case ( m.data)[( m.p)] > 45: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st516 + } + default: + goto st127 + } + goto st79 st127: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof127 } st_case_127: switch ( m.data)[( m.p)] { - case 9: - goto tr231 case 10: - goto tr29 + goto tr28 case 11: - goto tr232 - case 12: - goto tr60 + goto tr230 case 13: - goto st7 + goto st6 case 32: - goto tr231 + goto tr229 case 34: - goto tr295 + goto tr155 case 44: - goto tr233 + goto tr231 case 61: goto st6 case 92: - goto st86 + goto st85 } switch { - case ( m.data)[( m.p)] > 45: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st517 + goto st516 } - case ( m.data)[( m.p)] >= 43: - goto st128 + case ( m.data)[( m.p)] >= 9: + goto tr229 } - goto st80 - st128: + goto st79 + st516: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof128 + goto _test_eof516 } - st_case_128: + st_case_516: switch ( m.data)[( m.p)] { - case 9: - goto tr231 case 10: - goto tr29 + goto tr758 case 11: - goto tr232 - case 12: - goto tr60 + goto tr759 case 13: - goto st7 + goto tr638 case 32: - goto tr231 + goto tr757 case 34: - goto tr157 + goto tr155 case 44: - goto tr233 + goto tr760 case 61: goto st6 case 92: - goto st86 + goto st85 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st517 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st516 + } + case ( m.data)[( m.p)] >= 9: + goto tr757 } - goto st80 + goto st79 st517: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof517 } st_case_517: switch ( m.data)[( m.p)] { - case 9: - goto tr764 case 10: - goto tr765 + goto tr758 case 11: - goto tr766 - case 12: - goto tr731 + goto tr759 case 13: - goto tr642 + goto tr638 case 32: - goto tr764 + goto tr757 case 34: - goto tr157 + goto tr155 case 44: - goto tr767 + goto tr760 + case 46: + goto st493 case 61: goto st6 + case 69: + goto st126 case 92: - goto st86 + goto st85 + case 101: + goto st126 + case 105: + goto st519 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st517 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st518 + } + case ( m.data)[( m.p)] >= 9: + goto tr757 } - goto st80 + goto st79 st518: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof518 } st_case_518: switch ( m.data)[( m.p)] { - case 9: - goto tr764 case 10: - goto tr765 + goto tr758 case 11: - goto tr766 - case 12: - goto tr731 + goto tr759 case 13: - goto tr642 + goto tr638 case 32: - goto tr764 + goto tr757 case 34: - goto tr157 + goto tr155 case 44: - goto tr767 + goto tr760 case 46: - goto st494 + goto st493 case 61: goto st6 case 69: - goto st127 + goto st126 case 92: - goto st86 + goto st85 case 101: - goto st127 - case 105: - goto st520 + goto st126 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st519 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st518 + } + case ( m.data)[( m.p)] >= 9: + goto tr757 } - goto st80 + goto st79 st519: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof519 } st_case_519: switch ( m.data)[( m.p)] { - case 9: - goto tr764 case 10: - goto tr765 + goto tr791 case 11: - goto tr766 - case 12: - goto tr731 + goto tr792 case 13: - goto tr642 + goto tr793 case 32: - goto tr764 + goto tr790 case 34: - goto tr157 + goto tr155 case 44: - goto tr767 - case 46: - goto st494 + goto tr794 case 61: goto st6 - case 69: - goto st127 case 92: - goto st86 - case 101: - goto st127 + goto st85 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st519 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr790 } - goto st80 + goto st79 st520: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof520 } st_case_520: switch ( m.data)[( m.p)] { - case 9: - goto tr797 case 10: - goto tr798 + goto tr758 case 11: - goto tr799 - case 12: - goto tr743 + goto tr759 case 13: - goto tr800 + goto tr638 case 32: - goto tr797 + goto tr757 case 34: - goto tr157 + goto tr155 case 44: - goto tr801 + goto tr760 + case 46: + goto st493 case 61: goto st6 + case 69: + goto st126 case 92: - goto st86 + goto st85 + case 101: + goto st126 + case 105: + goto st519 } - goto st80 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st520 + } + case ( m.data)[( m.p)] >= 9: + goto tr757 + } + goto st79 +tr267: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st521 st521: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof521 } st_case_521: +//line plugins/parsers/influx/machine.go:19361 switch ( m.data)[( m.p)] { - case 9: - goto tr764 case 10: - goto tr765 + goto tr758 case 11: - goto tr766 - case 12: - goto tr731 + goto tr759 case 13: - goto tr642 + goto tr638 case 32: - goto tr764 + goto tr757 case 34: - goto tr157 + goto tr155 case 44: - goto tr767 + goto tr760 case 46: - goto st494 + goto st493 case 61: goto st6 case 69: - goto st127 + goto st126 case 92: - goto st86 + goto st85 case 101: - goto st127 + goto st126 case 105: - goto st520 + goto st519 + case 117: + goto st522 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st521 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st518 + } + case ( m.data)[( m.p)] >= 9: + goto tr757 } - goto st80 -tr269: -//line plugins/parsers/influx/machine.go.rl:20 - - m.pb = m.p - - goto st522 + goto st79 st522: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof522 } st_case_522: -//line plugins/parsers/influx/machine.go:19361 switch ( m.data)[( m.p)] { - case 9: - goto tr764 case 10: - goto tr765 + goto tr797 case 11: - goto tr766 - case 12: - goto tr731 + goto tr798 case 13: - goto tr642 + goto tr799 case 32: - goto tr764 + goto tr796 case 34: - goto tr157 + goto tr155 case 44: - goto tr767 - case 46: - goto st494 + goto tr800 case 61: goto st6 - case 69: - goto st127 case 92: - goto st86 - case 101: - goto st127 - case 105: - goto st520 - case 117: - goto st523 + goto st85 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st519 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr796 } - goto st80 + goto st79 +tr268: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st523 st523: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof523 } st_case_523: +//line plugins/parsers/influx/machine.go:19437 switch ( m.data)[( m.p)] { - case 9: - goto tr803 case 10: - goto tr804 + goto tr758 case 11: - goto tr805 - case 12: - goto tr750 + goto tr759 case 13: - goto tr806 + goto tr638 case 32: - goto tr803 + goto tr757 case 34: - goto tr157 + goto tr155 case 44: - goto tr807 + goto tr760 + case 46: + goto st493 case 61: goto st6 + case 69: + goto st126 case 92: - goto st86 + goto st85 + case 101: + goto st126 + case 105: + goto st519 + case 117: + goto st522 } - goto st80 -tr270: -//line plugins/parsers/influx/machine.go.rl:20 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st523 + } + case ( m.data)[( m.p)] >= 9: + goto tr757 + } + goto st79 +tr269: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p @@ -19433,292 +19482,332 @@ tr270: goto _test_eof524 } st_case_524: -//line plugins/parsers/influx/machine.go:19437 +//line plugins/parsers/influx/machine.go:19486 switch ( m.data)[( m.p)] { - case 9: - goto tr764 case 10: - goto tr765 + goto tr803 case 11: - goto tr766 - case 12: - goto tr731 + goto tr804 case 13: - goto tr642 + goto tr805 case 32: - goto tr764 + goto tr802 case 34: - goto tr157 + goto tr155 case 44: - goto tr767 - case 46: - goto st494 - case 61: - goto st6 - case 69: - goto st127 - case 92: - goto st86 - case 101: - goto st127 - case 105: - goto st520 - case 117: - goto st523 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st524 - } - goto st80 -tr271: -//line plugins/parsers/influx/machine.go.rl:20 - - m.pb = m.p - - goto st525 - st525: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof525 - } - st_case_525: -//line plugins/parsers/influx/machine.go:19485 - switch ( m.data)[( m.p)] { - case 9: - goto tr809 - case 10: - goto tr810 - case 11: - goto tr811 - case 12: - goto tr757 - case 13: - goto tr812 - case 32: - goto tr809 - case 34: - goto tr157 - case 44: - goto tr813 + goto tr806 case 61: goto st6 case 65: + goto st128 + case 92: + goto st85 + case 97: + goto st131 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr802 + } + goto st79 + st128: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof128 + } + st_case_128: + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 11: + goto tr230 + case 13: + goto st6 + case 32: + goto tr229 + case 34: + goto tr155 + case 44: + goto tr231 + case 61: + goto st6 + case 76: goto st129 case 92: - goto st86 - case 97: - goto st132 + goto st85 } - goto st80 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr229 + } + goto st79 st129: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof129 } st_case_129: switch ( m.data)[( m.p)] { - case 9: - goto tr231 case 10: - goto tr29 + goto tr28 case 11: - goto tr232 - case 12: - goto tr60 + goto tr230 case 13: - goto st7 + goto st6 case 32: - goto tr231 + goto tr229 case 34: - goto tr157 + goto tr155 case 44: - goto tr233 + goto tr231 case 61: goto st6 - case 76: + case 83: goto st130 case 92: - goto st86 + goto st85 } - goto st80 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr229 + } + goto st79 st130: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof130 } st_case_130: switch ( m.data)[( m.p)] { - case 9: - goto tr231 case 10: - goto tr29 + goto tr28 case 11: - goto tr232 - case 12: - goto tr60 + goto tr230 case 13: - goto st7 + goto st6 case 32: - goto tr231 + goto tr229 case 34: - goto tr157 + goto tr155 case 44: - goto tr233 + goto tr231 case 61: goto st6 - case 83: - goto st131 + case 69: + goto st525 case 92: - goto st86 + goto st85 } - goto st80 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr229 + } + goto st79 + st525: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof525 + } + st_case_525: + switch ( m.data)[( m.p)] { + case 10: + goto tr803 + case 11: + goto tr804 + case 13: + goto tr805 + case 32: + goto tr802 + case 34: + goto tr155 + case 44: + goto tr806 + case 61: + goto st6 + case 92: + goto st85 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr802 + } + goto st79 st131: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof131 } st_case_131: switch ( m.data)[( m.p)] { - case 9: - goto tr231 case 10: - goto tr29 + goto tr28 case 11: - goto tr232 - case 12: - goto tr60 + goto tr230 case 13: - goto st7 - case 32: - goto tr231 - case 34: - goto tr157 - case 44: - goto tr233 - case 61: goto st6 - case 69: - goto st526 - case 92: - goto st86 - } - goto st80 - st526: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof526 - } - st_case_526: - switch ( m.data)[( m.p)] { - case 9: - goto tr809 - case 10: - goto tr810 - case 11: - goto tr811 - case 12: - goto tr757 - case 13: - goto tr812 case 32: - goto tr809 + goto tr229 case 34: - goto tr157 + goto tr155 case 44: - goto tr813 + goto tr231 case 61: goto st6 case 92: - goto st86 + goto st85 + case 108: + goto st132 } - goto st80 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr229 + } + goto st79 st132: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof132 } st_case_132: switch ( m.data)[( m.p)] { - case 9: - goto tr231 case 10: - goto tr29 + goto tr28 case 11: - goto tr232 - case 12: - goto tr60 + goto tr230 case 13: - goto st7 + goto st6 case 32: - goto tr231 + goto tr229 case 34: - goto tr157 + goto tr155 case 44: - goto tr233 + goto tr231 case 61: goto st6 case 92: - goto st86 - case 108: + goto st85 + case 115: goto st133 } - goto st80 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr229 + } + goto st79 st133: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof133 } st_case_133: switch ( m.data)[( m.p)] { - case 9: - goto tr231 case 10: - goto tr29 + goto tr28 case 11: - goto tr232 - case 12: - goto tr60 + goto tr230 case 13: - goto st7 + goto st6 case 32: - goto tr231 + goto tr229 case 34: - goto tr157 + goto tr155 case 44: - goto tr233 + goto tr231 case 61: goto st6 case 92: - goto st86 - case 115: - goto st134 + goto st85 + case 101: + goto st525 } - goto st80 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr229 + } + goto st79 +tr270: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st526 + st526: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof526 + } + st_case_526: +//line plugins/parsers/influx/machine.go:19725 + switch ( m.data)[( m.p)] { + case 10: + goto tr803 + case 11: + goto tr804 + case 13: + goto tr805 + case 32: + goto tr802 + case 34: + goto tr155 + case 44: + goto tr806 + case 61: + goto st6 + case 82: + goto st134 + case 92: + goto st85 + case 114: + goto st135 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr802 + } + goto st79 st134: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof134 } st_case_134: switch ( m.data)[( m.p)] { - case 9: - goto tr231 case 10: - goto tr29 + goto tr28 case 11: - goto tr232 - case 12: - goto tr60 + goto tr230 case 13: - goto st7 + goto st6 case 32: - goto tr231 + goto tr229 case 34: - goto tr157 + goto tr155 case 44: - goto tr233 + goto tr231 + case 61: + goto st6 + case 85: + goto st130 + case 92: + goto st85 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr229 + } + goto st79 + st135: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof135 + } + st_case_135: + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 11: + goto tr230 + case 13: + goto st6 + case 32: + goto tr229 + case 34: + goto tr155 + case 44: + goto tr231 case 61: goto st6 case 92: - goto st86 - case 101: - goto st526 + goto st85 + case 117: + goto st133 } - goto st80 -tr272: -//line plugins/parsers/influx/machine.go.rl:20 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr229 + } + goto st79 +tr271: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p @@ -19728,96 +19817,33 @@ tr272: goto _test_eof527 } st_case_527: -//line plugins/parsers/influx/machine.go:19732 +//line plugins/parsers/influx/machine.go:19821 switch ( m.data)[( m.p)] { - case 9: - goto tr809 case 10: - goto tr810 + goto tr803 case 11: - goto tr811 - case 12: - goto tr757 + goto tr804 case 13: - goto tr812 + goto tr805 case 32: - goto tr809 + goto tr802 case 34: - goto tr157 + goto tr155 case 44: - goto tr813 + goto tr806 case 61: goto st6 - case 82: - goto st135 case 92: - goto st86 - case 114: - goto st136 - } - goto st80 - st135: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof135 - } - st_case_135: - switch ( m.data)[( m.p)] { - case 9: - goto tr231 - case 10: - goto tr29 - case 11: - goto tr232 - case 12: - goto tr60 - case 13: - goto st7 - case 32: - goto tr231 - case 34: - goto tr157 - case 44: - goto tr233 - case 61: - goto st6 - case 85: + goto st85 + case 97: goto st131 - case 92: - goto st86 } - goto st80 - st136: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof136 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr802 } - st_case_136: - switch ( m.data)[( m.p)] { - case 9: - goto tr231 - case 10: - goto tr29 - case 11: - goto tr232 - case 12: - goto tr60 - case 13: - goto st7 - case 32: - goto tr231 - case 34: - goto tr157 - case 44: - goto tr233 - case 61: - goto st6 - case 92: - goto st86 - case 117: - goto st134 - } - goto st80 -tr273: -//line plugins/parsers/influx/machine.go.rl:20 + goto st79 +tr272: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p @@ -19827,202 +19853,221 @@ tr273: goto _test_eof528 } st_case_528: -//line plugins/parsers/influx/machine.go:19831 +//line plugins/parsers/influx/machine.go:19857 switch ( m.data)[( m.p)] { - case 9: - goto tr809 case 10: - goto tr810 + goto tr803 case 11: - goto tr811 - case 12: - goto tr757 + goto tr804 case 13: - goto tr812 + goto tr805 case 32: - goto tr809 + goto tr802 case 34: - goto tr157 + goto tr155 case 44: - goto tr813 + goto tr806 case 61: goto st6 case 92: - goto st86 - case 97: - goto st132 + goto st85 + case 114: + goto st135 } - goto st80 -tr274: -//line plugins/parsers/influx/machine.go.rl:20 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr802 + } + goto st79 +tr257: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p - goto st529 + goto st136 + st136: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof136 + } + st_case_136: +//line plugins/parsers/influx/machine.go:19893 + switch ( m.data)[( m.p)] { + case 34: + goto st97 + case 92: + goto st137 + } + switch { + case ( m.data)[( m.p)] > 10: + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr45 + } + case ( m.data)[( m.p)] >= 9: + goto tr45 + } + goto st44 + st137: +//line plugins/parsers/influx/machine.go.rl:248 + ( m.p)-- + + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof137 + } + st_case_137: +//line plugins/parsers/influx/machine.go:19917 + switch ( m.data)[( m.p)] { + case 9: + goto st6 + case 10: + goto tr28 + case 32: + goto st6 + case 34: + goto tr259 + case 44: + goto st6 + case 61: + goto tr260 + case 92: + goto st136 + } + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto st6 + } + goto st97 + st138: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof138 + } + st_case_138: + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 11: + goto tr88 + case 13: + goto st6 + case 32: + goto tr87 + case 34: + goto tr315 + case 44: + goto tr90 + case 92: + goto st140 + } + switch { + case ( m.data)[( m.p)] < 43: + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr87 + } + case ( m.data)[( m.p)] > 45: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st531 + } + default: + goto st139 + } + goto st29 +tr315: + ( m.cs) = 529 +//line plugins/parsers/influx/machine.go.rl:148 + + err = m.handler.AddString(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again st529: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof529 } st_case_529: -//line plugins/parsers/influx/machine.go:19868 +//line plugins/parsers/influx/machine.go:19990 switch ( m.data)[( m.p)] { - case 9: - goto tr809 case 10: - goto tr810 + goto tr101 case 11: - goto tr811 - case 12: - goto tr757 + goto tr634 case 13: - goto tr812 + goto st32 case 32: - goto tr809 - case 34: - goto tr157 + goto tr499 case 44: - goto tr813 - case 61: - goto st6 + goto tr501 case 92: - goto st86 - case 114: - goto st136 - } - goto st80 -tr259: -//line plugins/parsers/influx/machine.go.rl:20 - - m.pb = m.p - - goto st137 - st137: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof137 - } - st_case_137: -//line plugins/parsers/influx/machine.go:19905 - switch ( m.data)[( m.p)] { - case 34: - goto st98 - case 92: - goto st138 + goto st94 } switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr47 + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st530 } case ( m.data)[( m.p)] >= 9: - goto tr47 + goto tr499 } - goto st45 - st138: -//line plugins/parsers/influx/machine.go.rl:240 - ( m.p)-- - + goto st1 + st530: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof138 + goto _test_eof530 } - st_case_138: -//line plugins/parsers/influx/machine.go:19929 + st_case_530: switch ( m.data)[( m.p)] { - case 9: - goto st6 case 10: - goto tr29 - case 12: - goto tr47 + goto tr730 + case 11: + goto tr812 case 13: - goto st7 + goto tr732 case 32: - goto st6 - case 34: - goto tr261 + goto tr811 case 44: - goto st6 - case 61: - goto tr262 + goto tr813 case 92: - goto st137 + goto st94 } - goto st98 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st530 + } + case ( m.data)[( m.p)] >= 9: + goto tr811 + } + goto st1 st139: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof139 } st_case_139: switch ( m.data)[( m.p)] { - case 9: - goto tr89 case 10: - goto tr29 + goto tr28 case 11: - goto tr90 - case 12: - goto tr1 + goto tr88 case 13: - goto st7 + goto st6 case 32: - goto tr89 + goto tr87 case 34: - goto tr317 + goto tr89 case 44: - goto tr92 + goto tr90 case 92: - goto st141 - } - switch { - case ( m.data)[( m.p)] > 45: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st532 - } - case ( m.data)[( m.p)] >= 43: goto st140 } - goto st30 -tr317: - ( m.cs) = 530 -//line plugins/parsers/influx/machine.go.rl:140 - - err = m.handler.AddString(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again - st530: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof530 - } - st_case_530: -//line plugins/parsers/influx/machine.go:20003 - switch ( m.data)[( m.p)] { - case 10: - goto tr103 - case 11: - goto tr637 - case 13: - goto st33 - case 32: - goto tr501 - case 44: - goto tr503 - case 92: - goto st95 - } switch { case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st531 } case ( m.data)[( m.p)] >= 9: - goto tr501 + goto tr87 } - goto st1 + goto st29 st531: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof531 @@ -20030,17 +20075,19 @@ tr317: st_case_531: switch ( m.data)[( m.p)] { case 10: - goto tr734 + goto tr636 case 11: - goto tr818 + goto tr637 case 13: - goto tr736 + goto tr638 case 32: - goto tr641 + goto tr635 + case 34: + goto tr89 case 44: - goto tr819 + goto tr639 case 92: - goto st95 + goto st140 } switch { case ( m.data)[( m.p)] > 12: @@ -20048,84 +20095,26 @@ tr317: goto st531 } case ( m.data)[( m.p)] >= 9: - goto tr641 + goto tr635 } - goto st1 + goto st29 +tr85: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st140 st140: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof140 } st_case_140: - switch ( m.data)[( m.p)] { - case 9: - goto tr89 - case 10: - goto tr29 - case 11: - goto tr90 - case 12: - goto tr1 - case 13: - goto st7 - case 32: - goto tr89 - case 34: - goto tr91 - case 44: - goto tr92 - case 92: - goto st141 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st532 - } - goto st30 - st532: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof532 - } - st_case_532: - switch ( m.data)[( m.p)] { - case 9: - goto tr638 - case 10: - goto tr639 - case 11: - goto tr640 - case 12: - goto tr641 - case 13: - goto tr642 - case 32: - goto tr638 - case 34: - goto tr91 - case 44: - goto tr643 - case 92: - goto st141 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st532 - } - goto st30 -tr87: -//line plugins/parsers/influx/machine.go.rl:20 - - m.pb = m.p - - goto st141 - st141: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof141 - } - st_case_141: -//line plugins/parsers/influx/machine.go:20124 +//line plugins/parsers/influx/machine.go:20113 switch ( m.data)[( m.p)] { case 34: - goto st30 + goto st29 case 92: - goto st30 + goto st29 } switch { case ( m.data)[( m.p)] > 10: @@ -20136,215 +20125,264 @@ tr87: goto tr8 } goto st1 + st532: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof532 + } + st_case_532: + switch ( m.data)[( m.p)] { + case 10: + goto tr636 + case 11: + goto tr637 + case 13: + goto tr638 + case 32: + goto tr635 + case 34: + goto tr89 + case 44: + goto tr639 + case 46: + goto st406 + case 69: + goto st138 + case 92: + goto st140 + case 101: + goto st138 + case 105: + goto st534 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st533 + } + case ( m.data)[( m.p)] >= 9: + goto tr635 + } + goto st29 st533: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof533 } st_case_533: switch ( m.data)[( m.p)] { - case 9: - goto tr638 case 10: - goto tr639 + goto tr636 case 11: - goto tr640 - case 12: - goto tr641 + goto tr637 case 13: - goto tr642 - case 32: goto tr638 + case 32: + goto tr635 case 34: - goto tr91 + goto tr89 case 44: - goto tr643 + goto tr639 case 46: - goto st407 + goto st406 case 69: - goto st139 + goto st138 case 92: - goto st141 + goto st140 case 101: - goto st139 - case 105: - goto st535 + goto st138 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st534 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st533 + } + case ( m.data)[( m.p)] >= 9: + goto tr635 } - goto st30 + goto st29 st534: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof534 } st_case_534: switch ( m.data)[( m.p)] { - case 9: - goto tr638 case 10: - goto tr639 + goto tr817 case 11: - goto tr640 - case 12: - goto tr641 + goto tr818 case 13: - goto tr642 + goto tr793 case 32: - goto tr638 + goto tr816 case 34: - goto tr91 + goto tr89 case 44: - goto tr643 - case 46: - goto st407 - case 69: - goto st139 + goto tr819 case 92: - goto st141 - case 101: - goto st139 + goto st140 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st534 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr816 } - goto st30 + goto st29 st535: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof535 } st_case_535: switch ( m.data)[( m.p)] { - case 9: - goto tr822 case 10: - goto tr823 + goto tr636 case 11: - goto tr824 - case 12: - goto tr825 + goto tr637 case 13: - goto tr800 + goto tr638 case 32: - goto tr822 + goto tr635 case 34: - goto tr91 + goto tr89 case 44: - goto tr826 + goto tr639 + case 46: + goto st406 + case 69: + goto st138 case 92: - goto st141 + goto st140 + case 101: + goto st138 + case 105: + goto st534 } - goto st30 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st535 + } + case ( m.data)[( m.p)] >= 9: + goto tr635 + } + goto st29 +tr245: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st536 st536: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof536 } st_case_536: +//line plugins/parsers/influx/machine.go:20277 switch ( m.data)[( m.p)] { - case 9: - goto tr638 case 10: - goto tr639 + goto tr636 case 11: - goto tr640 - case 12: - goto tr641 + goto tr637 case 13: - goto tr642 - case 32: goto tr638 + case 32: + goto tr635 case 34: - goto tr91 + goto tr89 case 44: - goto tr643 + goto tr639 case 46: - goto st407 + goto st406 case 69: - goto st139 + goto st138 case 92: - goto st141 + goto st140 case 101: - goto st139 + goto st138 case 105: - goto st535 + goto st534 + case 117: + goto st537 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st536 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st533 + } + case ( m.data)[( m.p)] >= 9: + goto tr635 } - goto st30 -tr247: -//line plugins/parsers/influx/machine.go.rl:20 - - m.pb = m.p - - goto st537 + goto st29 st537: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof537 } st_case_537: -//line plugins/parsers/influx/machine.go:20286 switch ( m.data)[( m.p)] { - case 9: - goto tr638 case 10: - goto tr639 + goto tr822 case 11: - goto tr640 - case 12: - goto tr641 + goto tr823 case 13: - goto tr642 + goto tr799 case 32: - goto tr638 + goto tr821 case 34: - goto tr91 + goto tr89 case 44: - goto tr643 - case 46: - goto st407 - case 69: - goto st139 + goto tr824 case 92: - goto st141 - case 101: - goto st139 - case 105: - goto st535 - case 117: - goto st538 + goto st140 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st534 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr821 } - goto st30 + goto st29 +tr246: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st538 st538: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof538 } st_case_538: +//line plugins/parsers/influx/machine.go:20349 switch ( m.data)[( m.p)] { - case 9: - goto tr828 case 10: - goto tr829 + goto tr636 case 11: - goto tr830 - case 12: - goto tr831 + goto tr637 case 13: - goto tr806 + goto tr638 case 32: - goto tr828 + goto tr635 case 34: - goto tr91 + goto tr89 case 44: - goto tr832 + goto tr639 + case 46: + goto st406 + case 69: + goto st138 case 92: - goto st141 + goto st140 + case 101: + goto st138 + case 105: + goto st534 + case 117: + goto st537 } - goto st30 -tr248: -//line plugins/parsers/influx/machine.go.rl:20 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st538 + } + case ( m.data)[( m.p)] >= 9: + goto tr635 + } + goto st29 +tr247: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p @@ -20354,274 +20392,310 @@ tr248: goto _test_eof539 } st_case_539: -//line plugins/parsers/influx/machine.go:20358 +//line plugins/parsers/influx/machine.go:20396 switch ( m.data)[( m.p)] { - case 9: - goto tr638 case 10: - goto tr639 + goto tr803 case 11: - goto tr640 - case 12: - goto tr641 + goto tr827 case 13: - goto tr642 + goto tr805 case 32: - goto tr638 + goto tr826 case 34: - goto tr91 + goto tr89 case 44: - goto tr643 - case 46: - goto st407 - case 69: - goto st139 - case 92: - goto st141 - case 101: - goto st139 - case 105: - goto st535 - case 117: - goto st538 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st539 - } - goto st30 -tr249: -//line plugins/parsers/influx/machine.go.rl:20 - - m.pb = m.p - - goto st540 - st540: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof540 - } - st_case_540: -//line plugins/parsers/influx/machine.go:20404 - switch ( m.data)[( m.p)] { - case 9: - goto tr834 - case 10: - goto tr810 - case 11: - goto tr835 - case 12: - goto tr836 - case 13: - goto tr812 - case 32: - goto tr834 - case 34: - goto tr91 - case 44: - goto tr837 + goto tr828 case 65: + goto st141 + case 92: + goto st140 + case 97: + goto st144 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr826 + } + goto st29 + st141: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof141 + } + st_case_141: + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 11: + goto tr88 + case 13: + goto st6 + case 32: + goto tr87 + case 34: + goto tr89 + case 44: + goto tr90 + case 76: goto st142 case 92: - goto st141 - case 97: - goto st145 + goto st140 } - goto st30 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr87 + } + goto st29 st142: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof142 } st_case_142: switch ( m.data)[( m.p)] { - case 9: - goto tr89 case 10: - goto tr29 + goto tr28 case 11: - goto tr90 - case 12: - goto tr1 + goto tr88 case 13: - goto st7 + goto st6 case 32: - goto tr89 + goto tr87 case 34: - goto tr91 + goto tr89 case 44: - goto tr92 - case 76: + goto tr90 + case 83: goto st143 case 92: - goto st141 + goto st140 } - goto st30 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr87 + } + goto st29 st143: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof143 } st_case_143: switch ( m.data)[( m.p)] { - case 9: - goto tr89 case 10: - goto tr29 + goto tr28 case 11: - goto tr90 - case 12: - goto tr1 + goto tr88 case 13: - goto st7 + goto st6 case 32: - goto tr89 + goto tr87 case 34: - goto tr91 + goto tr89 case 44: - goto tr92 - case 83: - goto st144 + goto tr90 + case 69: + goto st540 case 92: - goto st141 + goto st140 } - goto st30 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr87 + } + goto st29 + st540: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof540 + } + st_case_540: + switch ( m.data)[( m.p)] { + case 10: + goto tr803 + case 11: + goto tr827 + case 13: + goto tr805 + case 32: + goto tr826 + case 34: + goto tr89 + case 44: + goto tr828 + case 92: + goto st140 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr826 + } + goto st29 st144: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof144 } st_case_144: switch ( m.data)[( m.p)] { - case 9: - goto tr89 case 10: - goto tr29 + goto tr28 case 11: + goto tr88 + case 13: + goto st6 + case 32: + goto tr87 + case 34: + goto tr89 + case 44: goto tr90 - case 12: - goto tr1 - case 13: - goto st7 - case 32: - goto tr89 - case 34: - goto tr91 - case 44: - goto tr92 - case 69: - goto st541 case 92: - goto st141 + goto st140 + case 108: + goto st145 } - goto st30 - st541: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof541 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr87 } - st_case_541: - switch ( m.data)[( m.p)] { - case 9: - goto tr834 - case 10: - goto tr810 - case 11: - goto tr835 - case 12: - goto tr836 - case 13: - goto tr812 - case 32: - goto tr834 - case 34: - goto tr91 - case 44: - goto tr837 - case 92: - goto st141 - } - goto st30 + goto st29 st145: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof145 } st_case_145: switch ( m.data)[( m.p)] { - case 9: - goto tr89 case 10: - goto tr29 + goto tr28 case 11: - goto tr90 - case 12: - goto tr1 + goto tr88 case 13: - goto st7 + goto st6 case 32: - goto tr89 + goto tr87 case 34: - goto tr91 + goto tr89 case 44: - goto tr92 + goto tr90 case 92: - goto st141 - case 108: + goto st140 + case 115: goto st146 } - goto st30 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr87 + } + goto st29 st146: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof146 } st_case_146: switch ( m.data)[( m.p)] { - case 9: - goto tr89 case 10: - goto tr29 + goto tr28 case 11: - goto tr90 - case 12: - goto tr1 + goto tr88 case 13: - goto st7 + goto st6 case 32: - goto tr89 + goto tr87 case 34: - goto tr91 + goto tr89 case 44: - goto tr92 + goto tr90 case 92: - goto st141 - case 115: - goto st147 + goto st140 + case 101: + goto st540 } - goto st30 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr87 + } + goto st29 +tr248: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st541 + st541: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof541 + } + st_case_541: +//line plugins/parsers/influx/machine.go:20619 + switch ( m.data)[( m.p)] { + case 10: + goto tr803 + case 11: + goto tr827 + case 13: + goto tr805 + case 32: + goto tr826 + case 34: + goto tr89 + case 44: + goto tr828 + case 82: + goto st147 + case 92: + goto st140 + case 114: + goto st148 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr826 + } + goto st29 st147: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof147 } st_case_147: switch ( m.data)[( m.p)] { - case 9: - goto tr89 case 10: - goto tr29 + goto tr28 case 11: - goto tr90 - case 12: - goto tr1 + goto tr88 case 13: - goto st7 + goto st6 case 32: - goto tr89 + goto tr87 case 34: - goto tr91 + goto tr89 case 44: - goto tr92 + goto tr90 + case 85: + goto st143 case 92: - goto st141 - case 101: - goto st541 + goto st140 } - goto st30 -tr250: -//line plugins/parsers/influx/machine.go.rl:20 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr87 + } + goto st29 + st148: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof148 + } + st_case_148: + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 11: + goto tr88 + case 13: + goto st6 + case 32: + goto tr87 + case 34: + goto tr89 + case 44: + goto tr90 + case 92: + goto st140 + case 117: + goto st146 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr87 + } + goto st29 +tr249: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p @@ -20631,90 +20705,31 @@ tr250: goto _test_eof542 } st_case_542: -//line plugins/parsers/influx/machine.go:20635 +//line plugins/parsers/influx/machine.go:20709 switch ( m.data)[( m.p)] { - case 9: - goto tr834 case 10: - goto tr810 + goto tr803 case 11: - goto tr835 - case 12: - goto tr836 + goto tr827 case 13: - goto tr812 + goto tr805 case 32: - goto tr834 + goto tr826 case 34: - goto tr91 + goto tr89 case 44: - goto tr837 - case 82: - goto st148 + goto tr828 case 92: - goto st141 - case 114: - goto st149 - } - goto st30 - st148: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof148 - } - st_case_148: - switch ( m.data)[( m.p)] { - case 9: - goto tr89 - case 10: - goto tr29 - case 11: - goto tr90 - case 12: - goto tr1 - case 13: - goto st7 - case 32: - goto tr89 - case 34: - goto tr91 - case 44: - goto tr92 - case 85: + goto st140 + case 97: goto st144 - case 92: - goto st141 } - goto st30 - st149: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof149 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr826 } - st_case_149: - switch ( m.data)[( m.p)] { - case 9: - goto tr89 - case 10: - goto tr29 - case 11: - goto tr90 - case 12: - goto tr1 - case 13: - goto st7 - case 32: - goto tr89 - case 34: - goto tr91 - case 44: - goto tr92 - case 92: - goto st141 - case 117: - goto st147 - } - goto st30 -tr251: -//line plugins/parsers/influx/machine.go.rl:20 + goto st29 +tr250: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p @@ -20724,622 +20739,643 @@ tr251: goto _test_eof543 } st_case_543: -//line plugins/parsers/influx/machine.go:20728 +//line plugins/parsers/influx/machine.go:20743 switch ( m.data)[( m.p)] { - case 9: - goto tr834 case 10: - goto tr810 + goto tr803 case 11: - goto tr835 - case 12: - goto tr836 + goto tr827 case 13: - goto tr812 + goto tr805 case 32: - goto tr834 + goto tr826 case 34: - goto tr91 + goto tr89 case 44: - goto tr837 + goto tr828 case 92: - goto st141 - case 97: - goto st145 + goto st140 + case 114: + goto st148 } - goto st30 -tr252: -//line plugins/parsers/influx/machine.go.rl:20 - - m.pb = m.p - - goto st544 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr826 + } + goto st29 st544: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof544 } st_case_544: -//line plugins/parsers/influx/machine.go:20763 switch ( m.data)[( m.p)] { - case 9: - goto tr834 case 10: - goto tr810 + goto tr600 case 11: - goto tr835 - case 12: - goto tr836 + goto tr628 case 13: - goto tr812 + goto tr602 case 32: - goto tr834 + goto tr627 case 34: - goto tr91 + goto tr126 case 44: - goto tr837 + goto tr90 + case 61: + goto tr127 case 92: - goto st141 - case 114: - goto st149 + goto st92 } - goto st30 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st545 + } + case ( m.data)[( m.p)] >= 9: + goto tr627 + } + goto st40 st545: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof545 } st_case_545: switch ( m.data)[( m.p)] { - case 9: - goto tr630 case 10: - goto tr603 + goto tr600 case 11: - goto tr631 - case 12: - goto tr509 + goto tr628 case 13: - goto tr605 + goto tr602 case 32: - goto tr630 + goto tr627 case 34: - goto tr128 + goto tr126 case 44: - goto tr92 + goto tr90 case 61: - goto tr129 + goto tr127 case 92: - goto st93 + goto st92 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st546 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st546 + } + case ( m.data)[( m.p)] >= 9: + goto tr627 } - goto st41 + goto st40 st546: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof546 } st_case_546: switch ( m.data)[( m.p)] { - case 9: - goto tr630 case 10: - goto tr603 + goto tr600 case 11: - goto tr631 - case 12: - goto tr509 + goto tr628 case 13: - goto tr605 + goto tr602 case 32: - goto tr630 + goto tr627 case 34: - goto tr128 + goto tr126 case 44: - goto tr92 + goto tr90 case 61: - goto tr129 + goto tr127 case 92: - goto st93 + goto st92 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st547 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st547 + } + case ( m.data)[( m.p)] >= 9: + goto tr627 } - goto st41 + goto st40 st547: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof547 } st_case_547: switch ( m.data)[( m.p)] { - case 9: - goto tr630 case 10: - goto tr603 + goto tr600 case 11: - goto tr631 - case 12: - goto tr509 + goto tr628 case 13: - goto tr605 + goto tr602 case 32: - goto tr630 + goto tr627 case 34: - goto tr128 + goto tr126 case 44: - goto tr92 + goto tr90 case 61: - goto tr129 + goto tr127 case 92: - goto st93 + goto st92 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st548 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st548 + } + case ( m.data)[( m.p)] >= 9: + goto tr627 } - goto st41 + goto st40 st548: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof548 } st_case_548: switch ( m.data)[( m.p)] { - case 9: - goto tr630 case 10: - goto tr603 + goto tr600 case 11: - goto tr631 - case 12: - goto tr509 + goto tr628 case 13: - goto tr605 + goto tr602 case 32: - goto tr630 + goto tr627 case 34: - goto tr128 + goto tr126 case 44: - goto tr92 + goto tr90 case 61: - goto tr129 + goto tr127 case 92: - goto st93 + goto st92 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st549 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st549 + } + case ( m.data)[( m.p)] >= 9: + goto tr627 } - goto st41 + goto st40 st549: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof549 } st_case_549: switch ( m.data)[( m.p)] { - case 9: - goto tr630 case 10: - goto tr603 + goto tr600 case 11: - goto tr631 - case 12: - goto tr509 + goto tr628 case 13: - goto tr605 + goto tr602 case 32: - goto tr630 + goto tr627 case 34: - goto tr128 + goto tr126 case 44: - goto tr92 + goto tr90 case 61: - goto tr129 + goto tr127 case 92: - goto st93 + goto st92 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st550 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st550 + } + case ( m.data)[( m.p)] >= 9: + goto tr627 } - goto st41 + goto st40 st550: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof550 } st_case_550: switch ( m.data)[( m.p)] { - case 9: - goto tr630 case 10: - goto tr603 + goto tr600 case 11: - goto tr631 - case 12: - goto tr509 + goto tr628 case 13: - goto tr605 + goto tr602 case 32: - goto tr630 + goto tr627 case 34: - goto tr128 + goto tr126 case 44: - goto tr92 + goto tr90 case 61: - goto tr129 + goto tr127 case 92: - goto st93 + goto st92 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st551 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st551 + } + case ( m.data)[( m.p)] >= 9: + goto tr627 } - goto st41 + goto st40 st551: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof551 } st_case_551: switch ( m.data)[( m.p)] { - case 9: - goto tr630 case 10: - goto tr603 + goto tr600 case 11: - goto tr631 - case 12: - goto tr509 + goto tr628 case 13: - goto tr605 + goto tr602 case 32: - goto tr630 + goto tr627 case 34: - goto tr128 + goto tr126 case 44: - goto tr92 + goto tr90 case 61: - goto tr129 + goto tr127 case 92: - goto st93 + goto st92 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st552 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st552 + } + case ( m.data)[( m.p)] >= 9: + goto tr627 } - goto st41 + goto st40 st552: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof552 } st_case_552: switch ( m.data)[( m.p)] { - case 9: - goto tr630 case 10: - goto tr603 + goto tr600 case 11: - goto tr631 - case 12: - goto tr509 + goto tr628 case 13: - goto tr605 + goto tr602 case 32: - goto tr630 + goto tr627 case 34: - goto tr128 + goto tr126 case 44: - goto tr92 + goto tr90 case 61: - goto tr129 + goto tr127 case 92: - goto st93 + goto st92 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st553 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st553 + } + case ( m.data)[( m.p)] >= 9: + goto tr627 } - goto st41 + goto st40 st553: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof553 } st_case_553: switch ( m.data)[( m.p)] { - case 9: - goto tr630 case 10: - goto tr603 + goto tr600 case 11: - goto tr631 - case 12: - goto tr509 + goto tr628 case 13: - goto tr605 + goto tr602 case 32: - goto tr630 + goto tr627 case 34: - goto tr128 + goto tr126 case 44: - goto tr92 + goto tr90 case 61: - goto tr129 + goto tr127 case 92: - goto st93 + goto st92 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st554 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st554 + } + case ( m.data)[( m.p)] >= 9: + goto tr627 } - goto st41 + goto st40 st554: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof554 } st_case_554: switch ( m.data)[( m.p)] { - case 9: - goto tr630 case 10: - goto tr603 + goto tr600 case 11: - goto tr631 - case 12: - goto tr509 + goto tr628 case 13: - goto tr605 + goto tr602 case 32: - goto tr630 + goto tr627 case 34: - goto tr128 + goto tr126 case 44: - goto tr92 + goto tr90 case 61: - goto tr129 + goto tr127 case 92: - goto st93 + goto st92 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st555 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st555 + } + case ( m.data)[( m.p)] >= 9: + goto tr627 } - goto st41 + goto st40 st555: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof555 } st_case_555: switch ( m.data)[( m.p)] { - case 9: - goto tr630 case 10: - goto tr603 + goto tr600 case 11: - goto tr631 - case 12: - goto tr509 + goto tr628 case 13: - goto tr605 + goto tr602 case 32: - goto tr630 + goto tr627 case 34: - goto tr128 + goto tr126 case 44: - goto tr92 + goto tr90 case 61: - goto tr129 + goto tr127 case 92: - goto st93 + goto st92 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st556 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st556 + } + case ( m.data)[( m.p)] >= 9: + goto tr627 } - goto st41 + goto st40 st556: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof556 } st_case_556: switch ( m.data)[( m.p)] { - case 9: - goto tr630 case 10: - goto tr603 + goto tr600 case 11: - goto tr631 - case 12: - goto tr509 + goto tr628 case 13: - goto tr605 + goto tr602 case 32: - goto tr630 + goto tr627 case 34: - goto tr128 + goto tr126 case 44: - goto tr92 + goto tr90 case 61: - goto tr129 + goto tr127 case 92: - goto st93 + goto st92 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st557 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st557 + } + case ( m.data)[( m.p)] >= 9: + goto tr627 } - goto st41 + goto st40 st557: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof557 } st_case_557: switch ( m.data)[( m.p)] { - case 9: - goto tr630 case 10: - goto tr603 + goto tr600 case 11: - goto tr631 - case 12: - goto tr509 + goto tr628 case 13: - goto tr605 + goto tr602 case 32: - goto tr630 + goto tr627 case 34: - goto tr128 + goto tr126 case 44: - goto tr92 + goto tr90 case 61: - goto tr129 + goto tr127 case 92: - goto st93 + goto st92 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st558 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st558 + } + case ( m.data)[( m.p)] >= 9: + goto tr627 } - goto st41 + goto st40 st558: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof558 } st_case_558: switch ( m.data)[( m.p)] { - case 9: - goto tr630 case 10: - goto tr603 + goto tr600 case 11: - goto tr631 - case 12: - goto tr509 + goto tr628 case 13: - goto tr605 + goto tr602 case 32: - goto tr630 + goto tr627 case 34: - goto tr128 + goto tr126 case 44: - goto tr92 + goto tr90 case 61: - goto tr129 + goto tr127 case 92: - goto st93 + goto st92 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st559 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st559 + } + case ( m.data)[( m.p)] >= 9: + goto tr627 } - goto st41 + goto st40 st559: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof559 } st_case_559: switch ( m.data)[( m.p)] { - case 9: - goto tr630 case 10: - goto tr603 + goto tr600 case 11: - goto tr631 - case 12: - goto tr509 + goto tr628 case 13: - goto tr605 + goto tr602 case 32: - goto tr630 + goto tr627 case 34: - goto tr128 + goto tr126 case 44: - goto tr92 + goto tr90 case 61: - goto tr129 + goto tr127 case 92: - goto st93 + goto st92 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st560 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st560 + } + case ( m.data)[( m.p)] >= 9: + goto tr627 } - goto st41 + goto st40 st560: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof560 } st_case_560: switch ( m.data)[( m.p)] { - case 9: - goto tr630 case 10: - goto tr603 + goto tr600 case 11: - goto tr631 - case 12: - goto tr509 + goto tr628 case 13: - goto tr605 + goto tr602 case 32: - goto tr630 + goto tr627 case 34: - goto tr128 + goto tr126 case 44: - goto tr92 + goto tr90 case 61: - goto tr129 + goto tr127 case 92: - goto st93 + goto st92 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st561 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st561 + } + case ( m.data)[( m.p)] >= 9: + goto tr627 } - goto st41 + goto st40 st561: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof561 } st_case_561: switch ( m.data)[( m.p)] { - case 9: - goto tr630 case 10: - goto tr603 + goto tr600 case 11: - goto tr631 - case 12: - goto tr509 + goto tr628 case 13: - goto tr605 + goto tr602 case 32: - goto tr630 + goto tr627 case 34: - goto tr128 + goto tr126 case 44: - goto tr92 + goto tr90 case 61: - goto tr129 + goto tr127 case 92: - goto st93 + goto st92 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st562 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr627 } - goto st41 - st562: + goto st40 +tr211: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st149 + st149: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof562 + goto _test_eof149 } - st_case_562: + st_case_149: +//line plugins/parsers/influx/machine.go:21348 switch ( m.data)[( m.p)] { - case 9: - goto tr630 case 10: - goto tr603 + goto tr28 case 11: - goto tr631 - case 12: - goto tr509 + goto tr179 case 13: - goto tr605 + goto st6 case 32: - goto tr630 + goto tr178 case 34: - goto tr128 + goto tr89 case 44: - goto tr92 - case 61: - goto tr129 + goto tr180 + case 46: + goto st150 + case 48: + goto st586 case 92: - goto st93 + goto st155 } - goto st41 -tr213: -//line plugins/parsers/influx/machine.go.rl:20 + switch { + case ( m.data)[( m.p)] > 12: + if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st589 + } + case ( m.data)[( m.p)] >= 9: + goto tr178 + } + goto st53 +tr212: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p @@ -21349,37 +21385,247 @@ tr213: goto _test_eof150 } st_case_150: -//line plugins/parsers/influx/machine.go:21353 +//line plugins/parsers/influx/machine.go:21389 switch ( m.data)[( m.p)] { - case 9: - goto tr180 case 10: - goto tr29 + goto tr28 case 11: - goto tr181 - case 12: - goto tr1 + goto tr179 case 13: - goto st7 + goto st6 case 32: - goto tr180 + goto tr178 case 34: - goto tr91 + goto tr89 case 44: - goto tr182 - case 46: - goto st151 - case 48: - goto st587 + goto tr180 case 92: - goto st156 + goto st155 } - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st590 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st562 + } + case ( m.data)[( m.p)] >= 9: + goto tr178 } - goto st54 -tr214: -//line plugins/parsers/influx/machine.go.rl:20 + goto st53 + st562: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof562 + } + st_case_562: + switch ( m.data)[( m.p)] { + case 10: + goto tr532 + case 11: + goto tr851 + case 13: + goto tr533 + case 32: + goto tr850 + case 34: + goto tr89 + case 44: + goto tr852 + case 69: + goto st153 + case 92: + goto st155 + case 101: + goto st153 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st562 + } + case ( m.data)[( m.p)] >= 9: + goto tr850 + } + goto st53 +tr851: + ( m.cs) = 563 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:130 + + err = m.handler.AddFloat(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr883: + ( m.cs) = 563 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:112 + + err = m.handler.AddInt(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr887: + ( m.cs) = 563 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:121 + + err = m.handler.AddUint(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr892: + ( m.cs) = 563 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:139 + + err = m.handler.AddBool(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again + st563: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof563 + } + st_case_563: +//line plugins/parsers/influx/machine.go:21546 + switch ( m.data)[( m.p)] { + case 10: + goto tr273 + case 11: + goto tr855 + case 13: + goto st102 + case 32: + goto tr854 + case 34: + goto tr122 + case 44: + goto tr180 + case 45: + goto tr856 + case 61: + goto st53 + case 92: + goto tr184 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto tr857 + } + case ( m.data)[( m.p)] >= 9: + goto tr854 + } + goto tr182 +tr855: + ( m.cs) = 564 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto _again + st564: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof564 + } + st_case_564: +//line plugins/parsers/influx/machine.go:21598 + switch ( m.data)[( m.p)] { + case 10: + goto tr273 + case 11: + goto tr855 + case 13: + goto st102 + case 32: + goto tr854 + case 34: + goto tr122 + case 44: + goto tr180 + case 45: + goto tr856 + case 61: + goto tr187 + case 92: + goto tr184 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto tr857 + } + case ( m.data)[( m.p)] >= 9: + goto tr854 + } + goto tr182 +tr856: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p @@ -21389,243 +21635,143 @@ tr214: goto _test_eof151 } st_case_151: -//line plugins/parsers/influx/machine.go:21393 +//line plugins/parsers/influx/machine.go:21639 switch ( m.data)[( m.p)] { - case 9: - goto tr180 case 10: - goto tr29 + goto tr28 case 11: - goto tr181 - case 12: - goto tr1 - case 13: - goto st7 - case 32: - goto tr180 - case 34: - goto tr91 - case 44: - goto tr182 - case 92: - goto st156 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st563 - } - goto st54 - st563: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof563 - } - st_case_563: - switch ( m.data)[( m.p)] { - case 9: - goto tr859 - case 10: - goto tr534 - case 11: - goto tr860 - case 12: - goto tr641 - case 13: - goto tr536 - case 32: - goto tr859 - case 34: - goto tr91 - case 44: - goto tr861 - case 69: - goto st154 - case 92: - goto st156 - case 101: - goto st154 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st563 - } - goto st54 -tr860: - ( m.cs) = 564 -//line plugins/parsers/influx/machine.go.rl:78 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:122 - - err = m.handler.AddFloat(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again -tr892: - ( m.cs) = 564 -//line plugins/parsers/influx/machine.go.rl:78 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:104 - - err = m.handler.AddInt(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again -tr896: - ( m.cs) = 564 -//line plugins/parsers/influx/machine.go.rl:78 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:113 - - err = m.handler.AddUint(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again -tr901: - ( m.cs) = 564 -//line plugins/parsers/influx/machine.go.rl:78 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:131 - - err = m.handler.AddBool(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again - st564: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof564 - } - st_case_564: -//line plugins/parsers/influx/machine.go:21548 - switch ( m.data)[( m.p)] { - case 9: - goto tr863 - case 10: - goto tr275 - case 11: - goto tr864 - case 12: - goto tr501 - case 13: - goto st103 - case 32: - goto tr863 - case 34: - goto tr124 - case 44: - goto tr182 - case 45: - goto tr865 - case 61: - goto st54 - case 92: goto tr186 + case 13: + goto st6 + case 32: + goto tr178 + case 34: + goto tr126 + case 44: + goto tr180 + case 61: + goto tr187 + case 92: + goto st152 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr866 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st565 + } + case ( m.data)[( m.p)] >= 9: + goto tr178 } - goto tr184 -tr864: - ( m.cs) = 565 -//line plugins/parsers/influx/machine.go.rl:78 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:20 + goto st55 +tr857: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p - goto _again + goto st565 st565: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof565 } st_case_565: -//line plugins/parsers/influx/machine.go:21599 +//line plugins/parsers/influx/machine.go:21678 switch ( m.data)[( m.p)] { - case 9: - goto tr863 case 10: - goto tr275 + goto tr674 case 11: - goto tr864 - case 12: - goto tr501 + goto tr859 case 13: - goto st103 + goto tr676 case 32: - goto tr863 + goto tr858 case 34: - goto tr124 + goto tr126 case 44: - goto tr182 - case 45: - goto tr865 + goto tr180 case 61: - goto tr189 + goto tr187 case 92: - goto tr186 + goto st152 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr866 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st567 + } + case ( m.data)[( m.p)] >= 9: + goto tr858 } - goto tr184 -tr865: -//line plugins/parsers/influx/machine.go.rl:20 + goto st55 +tr862: + ( m.cs) = 566 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto _again +tr859: + ( m.cs) = 566 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:157 + + err = m.handler.SetTimestamp(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again + st566: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof566 + } + st_case_566: +//line plugins/parsers/influx/machine.go:21751 + switch ( m.data)[( m.p)] { + case 10: + goto tr273 + case 11: + goto tr862 + case 13: + goto st102 + case 32: + goto tr861 + case 34: + goto tr122 + case 44: + goto tr180 + case 61: + goto tr187 + case 92: + goto tr184 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr861 + } + goto tr182 +tr184: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p @@ -21635,157 +21781,12 @@ tr865: goto _test_eof152 } st_case_152: -//line plugins/parsers/influx/machine.go:21639 - switch ( m.data)[( m.p)] { - case 9: - goto tr180 - case 10: - goto tr29 - case 11: - goto tr188 - case 12: - goto tr1 - case 13: - goto st7 - case 32: - goto tr180 - case 34: - goto tr128 - case 44: - goto tr182 - case 61: - goto tr189 - case 92: - goto st153 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st566 - } - goto st56 -tr866: -//line plugins/parsers/influx/machine.go.rl:20 - - m.pb = m.p - - goto st566 - st566: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof566 - } - st_case_566: -//line plugins/parsers/influx/machine.go:21677 - switch ( m.data)[( m.p)] { - case 9: - goto tr867 - case 10: - goto tr678 - case 11: - goto tr868 - case 12: - goto tr509 - case 13: - goto tr680 - case 32: - goto tr867 - case 34: - goto tr128 - case 44: - goto tr182 - case 61: - goto tr189 - case 92: - goto st153 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st568 - } - goto st56 -tr871: - ( m.cs) = 567 -//line plugins/parsers/influx/machine.go.rl:78 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:20 - - m.pb = m.p - - goto _again -tr868: - ( m.cs) = 567 -//line plugins/parsers/influx/machine.go.rl:78 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:149 - - err = m.handler.SetTimestamp(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again - st567: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof567 - } - st_case_567: -//line plugins/parsers/influx/machine.go:21749 - switch ( m.data)[( m.p)] { - case 9: - goto tr870 - case 10: - goto tr275 - case 11: - goto tr871 - case 12: - goto tr514 - case 13: - goto st103 - case 32: - goto tr870 - case 34: - goto tr124 - case 44: - goto tr182 - case 61: - goto tr189 - case 92: - goto tr186 - } - goto tr184 -tr186: -//line plugins/parsers/influx/machine.go.rl:20 - - m.pb = m.p - - goto st153 - st153: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof153 - } - st_case_153: -//line plugins/parsers/influx/machine.go:21784 +//line plugins/parsers/influx/machine.go:21785 switch ( m.data)[( m.p)] { case 34: - goto st56 + goto st55 case 92: - goto st56 + goto st55 } switch { case ( m.data)[( m.p)] > 10: @@ -21795,671 +21796,689 @@ tr186: case ( m.data)[( m.p)] >= 9: goto tr8 } - goto st11 + goto st10 + st567: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof567 + } + st_case_567: + switch ( m.data)[( m.p)] { + case 10: + goto tr674 + case 11: + goto tr859 + case 13: + goto tr676 + case 32: + goto tr858 + case 34: + goto tr126 + case 44: + goto tr180 + case 61: + goto tr187 + case 92: + goto st152 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st568 + } + case ( m.data)[( m.p)] >= 9: + goto tr858 + } + goto st55 st568: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof568 } st_case_568: switch ( m.data)[( m.p)] { - case 9: - goto tr867 case 10: - goto tr678 + goto tr674 case 11: - goto tr868 - case 12: - goto tr509 + goto tr859 case 13: - goto tr680 + goto tr676 case 32: - goto tr867 + goto tr858 case 34: - goto tr128 + goto tr126 case 44: - goto tr182 + goto tr180 case 61: - goto tr189 + goto tr187 case 92: - goto st153 + goto st152 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st569 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st569 + } + case ( m.data)[( m.p)] >= 9: + goto tr858 } - goto st56 + goto st55 st569: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof569 } st_case_569: switch ( m.data)[( m.p)] { - case 9: - goto tr867 case 10: - goto tr678 + goto tr674 case 11: - goto tr868 - case 12: - goto tr509 + goto tr859 case 13: - goto tr680 + goto tr676 case 32: - goto tr867 + goto tr858 case 34: - goto tr128 + goto tr126 case 44: - goto tr182 + goto tr180 case 61: - goto tr189 + goto tr187 case 92: - goto st153 + goto st152 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st570 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st570 + } + case ( m.data)[( m.p)] >= 9: + goto tr858 } - goto st56 + goto st55 st570: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof570 } st_case_570: switch ( m.data)[( m.p)] { - case 9: - goto tr867 case 10: - goto tr678 + goto tr674 case 11: - goto tr868 - case 12: - goto tr509 + goto tr859 case 13: - goto tr680 + goto tr676 case 32: - goto tr867 + goto tr858 case 34: - goto tr128 + goto tr126 case 44: - goto tr182 + goto tr180 case 61: - goto tr189 + goto tr187 case 92: - goto st153 + goto st152 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st571 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st571 + } + case ( m.data)[( m.p)] >= 9: + goto tr858 } - goto st56 + goto st55 st571: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof571 } st_case_571: switch ( m.data)[( m.p)] { - case 9: - goto tr867 case 10: - goto tr678 + goto tr674 case 11: - goto tr868 - case 12: - goto tr509 + goto tr859 case 13: - goto tr680 + goto tr676 case 32: - goto tr867 + goto tr858 case 34: - goto tr128 + goto tr126 case 44: - goto tr182 + goto tr180 case 61: - goto tr189 + goto tr187 case 92: - goto st153 + goto st152 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st572 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st572 + } + case ( m.data)[( m.p)] >= 9: + goto tr858 } - goto st56 + goto st55 st572: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof572 } st_case_572: switch ( m.data)[( m.p)] { - case 9: - goto tr867 case 10: - goto tr678 + goto tr674 case 11: - goto tr868 - case 12: - goto tr509 + goto tr859 case 13: - goto tr680 + goto tr676 case 32: - goto tr867 + goto tr858 case 34: - goto tr128 + goto tr126 case 44: - goto tr182 + goto tr180 case 61: - goto tr189 + goto tr187 case 92: - goto st153 + goto st152 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st573 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st573 + } + case ( m.data)[( m.p)] >= 9: + goto tr858 } - goto st56 + goto st55 st573: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof573 } st_case_573: switch ( m.data)[( m.p)] { - case 9: - goto tr867 case 10: - goto tr678 + goto tr674 case 11: - goto tr868 - case 12: - goto tr509 + goto tr859 case 13: - goto tr680 + goto tr676 case 32: - goto tr867 + goto tr858 case 34: - goto tr128 + goto tr126 case 44: - goto tr182 + goto tr180 case 61: - goto tr189 + goto tr187 case 92: - goto st153 + goto st152 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st574 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st574 + } + case ( m.data)[( m.p)] >= 9: + goto tr858 } - goto st56 + goto st55 st574: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof574 } st_case_574: switch ( m.data)[( m.p)] { - case 9: - goto tr867 case 10: - goto tr678 + goto tr674 case 11: - goto tr868 - case 12: - goto tr509 + goto tr859 case 13: - goto tr680 + goto tr676 case 32: - goto tr867 + goto tr858 case 34: - goto tr128 + goto tr126 case 44: - goto tr182 + goto tr180 case 61: - goto tr189 + goto tr187 case 92: - goto st153 + goto st152 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st575 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st575 + } + case ( m.data)[( m.p)] >= 9: + goto tr858 } - goto st56 + goto st55 st575: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof575 } st_case_575: switch ( m.data)[( m.p)] { - case 9: - goto tr867 case 10: - goto tr678 + goto tr674 case 11: - goto tr868 - case 12: - goto tr509 + goto tr859 case 13: - goto tr680 + goto tr676 case 32: - goto tr867 + goto tr858 case 34: - goto tr128 + goto tr126 case 44: - goto tr182 + goto tr180 case 61: - goto tr189 + goto tr187 case 92: - goto st153 + goto st152 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st576 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st576 + } + case ( m.data)[( m.p)] >= 9: + goto tr858 } - goto st56 + goto st55 st576: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof576 } st_case_576: switch ( m.data)[( m.p)] { - case 9: - goto tr867 case 10: - goto tr678 + goto tr674 case 11: - goto tr868 - case 12: - goto tr509 + goto tr859 case 13: - goto tr680 + goto tr676 case 32: - goto tr867 + goto tr858 case 34: - goto tr128 + goto tr126 case 44: - goto tr182 + goto tr180 case 61: - goto tr189 + goto tr187 case 92: - goto st153 + goto st152 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st577 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st577 + } + case ( m.data)[( m.p)] >= 9: + goto tr858 } - goto st56 + goto st55 st577: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof577 } st_case_577: switch ( m.data)[( m.p)] { - case 9: - goto tr867 case 10: - goto tr678 + goto tr674 case 11: - goto tr868 - case 12: - goto tr509 + goto tr859 case 13: - goto tr680 + goto tr676 case 32: - goto tr867 + goto tr858 case 34: - goto tr128 + goto tr126 case 44: - goto tr182 + goto tr180 case 61: - goto tr189 + goto tr187 case 92: - goto st153 + goto st152 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st578 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st578 + } + case ( m.data)[( m.p)] >= 9: + goto tr858 } - goto st56 + goto st55 st578: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof578 } st_case_578: switch ( m.data)[( m.p)] { - case 9: - goto tr867 case 10: - goto tr678 + goto tr674 case 11: - goto tr868 - case 12: - goto tr509 + goto tr859 case 13: - goto tr680 + goto tr676 case 32: - goto tr867 + goto tr858 case 34: - goto tr128 + goto tr126 case 44: - goto tr182 + goto tr180 case 61: - goto tr189 + goto tr187 case 92: - goto st153 + goto st152 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st579 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st579 + } + case ( m.data)[( m.p)] >= 9: + goto tr858 } - goto st56 + goto st55 st579: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof579 } st_case_579: switch ( m.data)[( m.p)] { - case 9: - goto tr867 case 10: - goto tr678 + goto tr674 case 11: - goto tr868 - case 12: - goto tr509 + goto tr859 case 13: - goto tr680 + goto tr676 case 32: - goto tr867 + goto tr858 case 34: - goto tr128 + goto tr126 case 44: - goto tr182 + goto tr180 case 61: - goto tr189 + goto tr187 case 92: - goto st153 + goto st152 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st580 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st580 + } + case ( m.data)[( m.p)] >= 9: + goto tr858 } - goto st56 + goto st55 st580: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof580 } st_case_580: switch ( m.data)[( m.p)] { - case 9: - goto tr867 case 10: - goto tr678 + goto tr674 case 11: - goto tr868 - case 12: - goto tr509 + goto tr859 case 13: - goto tr680 + goto tr676 case 32: - goto tr867 + goto tr858 case 34: - goto tr128 + goto tr126 case 44: - goto tr182 + goto tr180 case 61: - goto tr189 + goto tr187 case 92: - goto st153 + goto st152 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st581 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st581 + } + case ( m.data)[( m.p)] >= 9: + goto tr858 } - goto st56 + goto st55 st581: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof581 } st_case_581: switch ( m.data)[( m.p)] { - case 9: - goto tr867 case 10: - goto tr678 + goto tr674 case 11: - goto tr868 - case 12: - goto tr509 + goto tr859 case 13: - goto tr680 + goto tr676 case 32: - goto tr867 + goto tr858 case 34: - goto tr128 + goto tr126 case 44: - goto tr182 + goto tr180 case 61: - goto tr189 + goto tr187 case 92: - goto st153 + goto st152 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st582 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st582 + } + case ( m.data)[( m.p)] >= 9: + goto tr858 } - goto st56 + goto st55 st582: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof582 } st_case_582: switch ( m.data)[( m.p)] { - case 9: - goto tr867 case 10: - goto tr678 + goto tr674 case 11: - goto tr868 - case 12: - goto tr509 + goto tr859 case 13: - goto tr680 + goto tr676 case 32: - goto tr867 + goto tr858 case 34: - goto tr128 + goto tr126 case 44: - goto tr182 + goto tr180 case 61: - goto tr189 + goto tr187 case 92: - goto st153 + goto st152 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st583 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st583 + } + case ( m.data)[( m.p)] >= 9: + goto tr858 } - goto st56 + goto st55 st583: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof583 } st_case_583: switch ( m.data)[( m.p)] { - case 9: - goto tr867 case 10: - goto tr678 + goto tr674 case 11: - goto tr868 - case 12: - goto tr509 + goto tr859 case 13: - goto tr680 + goto tr676 case 32: - goto tr867 + goto tr858 case 34: - goto tr128 + goto tr126 case 44: - goto tr182 + goto tr180 case 61: - goto tr189 + goto tr187 case 92: - goto st153 + goto st152 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st584 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st584 + } + case ( m.data)[( m.p)] >= 9: + goto tr858 } - goto st56 + goto st55 st584: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof584 } st_case_584: switch ( m.data)[( m.p)] { - case 9: - goto tr867 case 10: - goto tr678 + goto tr674 case 11: - goto tr868 - case 12: - goto tr509 + goto tr859 case 13: - goto tr680 + goto tr676 case 32: - goto tr867 + goto tr858 case 34: - goto tr128 + goto tr126 case 44: - goto tr182 + goto tr180 case 61: - goto tr189 + goto tr187 case 92: - goto st153 + goto st152 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st585 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr858 } - goto st56 - st585: + goto st55 + st153: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof585 + goto _test_eof153 } - st_case_585: + st_case_153: switch ( m.data)[( m.p)] { - case 9: - goto tr867 case 10: - goto tr678 + goto tr28 case 11: - goto tr868 - case 12: - goto tr509 + goto tr179 case 13: - goto tr680 + goto st6 case 32: - goto tr867 + goto tr178 case 34: - goto tr128 + goto tr315 case 44: - goto tr182 - case 61: - goto tr189 + goto tr180 case 92: - goto st153 + goto st155 } - goto st56 + switch { + case ( m.data)[( m.p)] < 43: + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr178 + } + case ( m.data)[( m.p)] > 45: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st585 + } + default: + goto st154 + } + goto st53 st154: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof154 } st_case_154: switch ( m.data)[( m.p)] { - case 9: - goto tr180 case 10: - goto tr29 + goto tr28 case 11: - goto tr181 - case 12: - goto tr1 + goto tr179 case 13: - goto st7 + goto st6 case 32: - goto tr180 + goto tr178 case 34: - goto tr317 + goto tr89 case 44: - goto tr182 + goto tr180 case 92: - goto st156 - } - switch { - case ( m.data)[( m.p)] > 45: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st586 - } - case ( m.data)[( m.p)] >= 43: goto st155 } - goto st54 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st585 + } + case ( m.data)[( m.p)] >= 9: + goto tr178 + } + goto st53 + st585: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof585 + } + st_case_585: + switch ( m.data)[( m.p)] { + case 10: + goto tr532 + case 11: + goto tr851 + case 13: + goto tr533 + case 32: + goto tr850 + case 34: + goto tr89 + case 44: + goto tr852 + case 92: + goto st155 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st585 + } + case ( m.data)[( m.p)] >= 9: + goto tr850 + } + goto st53 +tr338: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st155 st155: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof155 } st_case_155: - switch ( m.data)[( m.p)] { - case 9: - goto tr180 - case 10: - goto tr29 - case 11: - goto tr181 - case 12: - goto tr1 - case 13: - goto st7 - case 32: - goto tr180 - case 34: - goto tr91 - case 44: - goto tr182 - case 92: - goto st156 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st586 - } - goto st54 - st586: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof586 - } - st_case_586: - switch ( m.data)[( m.p)] { - case 9: - goto tr859 - case 10: - goto tr534 - case 11: - goto tr860 - case 12: - goto tr641 - case 13: - goto tr536 - case 32: - goto tr859 - case 34: - goto tr91 - case 44: - goto tr861 - case 92: - goto st156 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st586 - } - goto st54 -tr340: -//line plugins/parsers/influx/machine.go.rl:20 - - m.pb = m.p - - goto st156 - st156: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof156 - } - st_case_156: -//line plugins/parsers/influx/machine.go:22458 +//line plugins/parsers/influx/machine.go:22477 switch ( m.data)[( m.p)] { case 34: - goto st54 + goto st53 case 92: - goto st54 + goto st53 } switch { case ( m.data)[( m.p)] > 10: @@ -22470,215 +22489,264 @@ tr340: goto tr8 } goto st1 + st586: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof586 + } + st_case_586: + switch ( m.data)[( m.p)] { + case 10: + goto tr532 + case 11: + goto tr851 + case 13: + goto tr533 + case 32: + goto tr850 + case 34: + goto tr89 + case 44: + goto tr852 + case 46: + goto st562 + case 69: + goto st153 + case 92: + goto st155 + case 101: + goto st153 + case 105: + goto st588 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st587 + } + case ( m.data)[( m.p)] >= 9: + goto tr850 + } + goto st53 st587: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof587 } st_case_587: switch ( m.data)[( m.p)] { - case 9: - goto tr859 case 10: - goto tr534 + goto tr532 case 11: - goto tr860 - case 12: - goto tr641 + goto tr851 case 13: - goto tr536 + goto tr533 case 32: - goto tr859 + goto tr850 case 34: - goto tr91 + goto tr89 case 44: - goto tr861 + goto tr852 case 46: - goto st563 + goto st562 case 69: - goto st154 + goto st153 case 92: - goto st156 + goto st155 case 101: - goto st154 - case 105: - goto st589 + goto st153 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st588 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st587 + } + case ( m.data)[( m.p)] >= 9: + goto tr850 } - goto st54 + goto st53 st588: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof588 } st_case_588: switch ( m.data)[( m.p)] { - case 9: - goto tr859 case 10: - goto tr534 + goto tr737 case 11: - goto tr860 - case 12: - goto tr641 + goto tr883 case 13: - goto tr536 + goto tr739 case 32: - goto tr859 + goto tr882 case 34: - goto tr91 + goto tr89 case 44: - goto tr861 - case 46: - goto st563 - case 69: - goto st154 + goto tr884 case 92: - goto st156 - case 101: - goto st154 + goto st155 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st588 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr882 } - goto st54 + goto st53 st589: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof589 } st_case_589: switch ( m.data)[( m.p)] { - case 9: - goto tr891 case 10: - goto tr741 + goto tr532 case 11: - goto tr892 - case 12: - goto tr825 + goto tr851 case 13: - goto tr744 + goto tr533 case 32: - goto tr891 + goto tr850 case 34: - goto tr91 + goto tr89 case 44: - goto tr893 + goto tr852 + case 46: + goto st562 + case 69: + goto st153 case 92: - goto st156 + goto st155 + case 101: + goto st153 + case 105: + goto st588 } - goto st54 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st589 + } + case ( m.data)[( m.p)] >= 9: + goto tr850 + } + goto st53 +tr213: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st590 st590: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof590 } st_case_590: +//line plugins/parsers/influx/machine.go:22641 switch ( m.data)[( m.p)] { - case 9: - goto tr859 case 10: - goto tr534 + goto tr532 case 11: - goto tr860 - case 12: - goto tr641 + goto tr851 case 13: - goto tr536 + goto tr533 case 32: - goto tr859 + goto tr850 case 34: - goto tr91 + goto tr89 case 44: - goto tr861 + goto tr852 case 46: - goto st563 + goto st562 case 69: - goto st154 + goto st153 case 92: - goto st156 + goto st155 case 101: - goto st154 + goto st153 case 105: - goto st589 + goto st588 + case 117: + goto st591 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st590 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st587 + } + case ( m.data)[( m.p)] >= 9: + goto tr850 } - goto st54 -tr215: -//line plugins/parsers/influx/machine.go.rl:20 - - m.pb = m.p - - goto st591 + goto st53 st591: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof591 } st_case_591: -//line plugins/parsers/influx/machine.go:22620 switch ( m.data)[( m.p)] { - case 9: - goto tr859 case 10: - goto tr534 + goto tr743 case 11: - goto tr860 - case 12: - goto tr641 + goto tr887 case 13: - goto tr536 + goto tr745 case 32: - goto tr859 + goto tr886 case 34: - goto tr91 + goto tr89 case 44: - goto tr861 - case 46: - goto st563 - case 69: - goto st154 + goto tr888 case 92: - goto st156 - case 101: - goto st154 - case 105: - goto st589 - case 117: - goto st592 + goto st155 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st588 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr886 } - goto st54 + goto st53 +tr214: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st592 st592: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof592 } st_case_592: +//line plugins/parsers/influx/machine.go:22713 switch ( m.data)[( m.p)] { - case 9: - goto tr895 case 10: - goto tr748 + goto tr532 case 11: - goto tr896 - case 12: - goto tr831 + goto tr851 case 13: - goto tr751 + goto tr533 case 32: - goto tr895 + goto tr850 case 34: - goto tr91 + goto tr89 case 44: - goto tr897 + goto tr852 + case 46: + goto st562 + case 69: + goto st153 case 92: - goto st156 + goto st155 + case 101: + goto st153 + case 105: + goto st588 + case 117: + goto st591 } - goto st54 -tr216: -//line plugins/parsers/influx/machine.go.rl:20 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st592 + } + case ( m.data)[( m.p)] >= 9: + goto tr850 + } + goto st53 +tr215: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p @@ -22688,274 +22756,310 @@ tr216: goto _test_eof593 } st_case_593: -//line plugins/parsers/influx/machine.go:22692 +//line plugins/parsers/influx/machine.go:22760 switch ( m.data)[( m.p)] { - case 9: - goto tr859 case 10: - goto tr534 + goto tr891 case 11: - goto tr860 - case 12: - goto tr641 + goto tr892 case 13: - goto tr536 + goto tr751 case 32: - goto tr859 + goto tr890 case 34: - goto tr91 + goto tr89 case 44: - goto tr861 - case 46: - goto st563 - case 69: - goto st154 - case 92: - goto st156 - case 101: - goto st154 - case 105: - goto st589 - case 117: - goto st592 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st593 - } - goto st54 -tr217: -//line plugins/parsers/influx/machine.go.rl:20 - - m.pb = m.p - - goto st594 - st594: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof594 - } - st_case_594: -//line plugins/parsers/influx/machine.go:22738 - switch ( m.data)[( m.p)] { - case 9: - goto tr899 - case 10: - goto tr900 - case 11: - goto tr901 - case 12: - goto tr836 - case 13: - goto tr758 - case 32: - goto tr899 - case 34: - goto tr91 - case 44: - goto tr902 + goto tr893 case 65: + goto st156 + case 92: + goto st155 + case 97: + goto st159 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr890 + } + goto st53 + st156: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof156 + } + st_case_156: + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 11: + goto tr179 + case 13: + goto st6 + case 32: + goto tr178 + case 34: + goto tr89 + case 44: + goto tr180 + case 76: goto st157 case 92: - goto st156 - case 97: - goto st160 + goto st155 } - goto st54 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr178 + } + goto st53 st157: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof157 } st_case_157: switch ( m.data)[( m.p)] { - case 9: - goto tr180 case 10: - goto tr29 + goto tr28 case 11: - goto tr181 - case 12: - goto tr1 + goto tr179 case 13: - goto st7 + goto st6 case 32: - goto tr180 + goto tr178 case 34: - goto tr91 + goto tr89 case 44: - goto tr182 - case 76: + goto tr180 + case 83: goto st158 case 92: - goto st156 + goto st155 } - goto st54 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr178 + } + goto st53 st158: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof158 } st_case_158: switch ( m.data)[( m.p)] { - case 9: - goto tr180 case 10: - goto tr29 + goto tr28 case 11: - goto tr181 - case 12: - goto tr1 + goto tr179 case 13: - goto st7 + goto st6 case 32: - goto tr180 + goto tr178 case 34: - goto tr91 + goto tr89 case 44: - goto tr182 - case 83: - goto st159 + goto tr180 + case 69: + goto st594 case 92: - goto st156 + goto st155 } - goto st54 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr178 + } + goto st53 + st594: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof594 + } + st_case_594: + switch ( m.data)[( m.p)] { + case 10: + goto tr891 + case 11: + goto tr892 + case 13: + goto tr751 + case 32: + goto tr890 + case 34: + goto tr89 + case 44: + goto tr893 + case 92: + goto st155 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr890 + } + goto st53 st159: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof159 } st_case_159: switch ( m.data)[( m.p)] { - case 9: - goto tr180 case 10: - goto tr29 + goto tr28 case 11: - goto tr181 - case 12: - goto tr1 + goto tr179 case 13: - goto st7 + goto st6 case 32: + goto tr178 + case 34: + goto tr89 + case 44: goto tr180 - case 34: - goto tr91 - case 44: - goto tr182 - case 69: - goto st595 case 92: - goto st156 + goto st155 + case 108: + goto st160 } - goto st54 - st595: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof595 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr178 } - st_case_595: - switch ( m.data)[( m.p)] { - case 9: - goto tr899 - case 10: - goto tr900 - case 11: - goto tr901 - case 12: - goto tr836 - case 13: - goto tr758 - case 32: - goto tr899 - case 34: - goto tr91 - case 44: - goto tr902 - case 92: - goto st156 - } - goto st54 + goto st53 st160: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof160 } st_case_160: switch ( m.data)[( m.p)] { - case 9: - goto tr180 case 10: - goto tr29 + goto tr28 case 11: - goto tr181 - case 12: - goto tr1 + goto tr179 case 13: - goto st7 + goto st6 case 32: - goto tr180 + goto tr178 case 34: - goto tr91 + goto tr89 case 44: - goto tr182 + goto tr180 case 92: - goto st156 - case 108: + goto st155 + case 115: goto st161 } - goto st54 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr178 + } + goto st53 st161: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof161 } st_case_161: switch ( m.data)[( m.p)] { - case 9: - goto tr180 case 10: - goto tr29 + goto tr28 case 11: - goto tr181 - case 12: - goto tr1 + goto tr179 case 13: - goto st7 + goto st6 case 32: - goto tr180 + goto tr178 case 34: - goto tr91 + goto tr89 case 44: - goto tr182 + goto tr180 case 92: - goto st156 - case 115: - goto st162 + goto st155 + case 101: + goto st594 } - goto st54 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr178 + } + goto st53 +tr216: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st595 + st595: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof595 + } + st_case_595: +//line plugins/parsers/influx/machine.go:22983 + switch ( m.data)[( m.p)] { + case 10: + goto tr891 + case 11: + goto tr892 + case 13: + goto tr751 + case 32: + goto tr890 + case 34: + goto tr89 + case 44: + goto tr893 + case 82: + goto st162 + case 92: + goto st155 + case 114: + goto st163 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr890 + } + goto st53 st162: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof162 } st_case_162: switch ( m.data)[( m.p)] { - case 9: - goto tr180 case 10: - goto tr29 + goto tr28 case 11: - goto tr181 - case 12: - goto tr1 + goto tr179 case 13: - goto st7 + goto st6 case 32: - goto tr180 + goto tr178 case 34: - goto tr91 + goto tr89 case 44: - goto tr182 + goto tr180 + case 85: + goto st158 case 92: - goto st156 - case 101: - goto st595 + goto st155 } - goto st54 -tr218: -//line plugins/parsers/influx/machine.go.rl:20 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr178 + } + goto st53 + st163: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof163 + } + st_case_163: + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 11: + goto tr179 + case 13: + goto st6 + case 32: + goto tr178 + case 34: + goto tr89 + case 44: + goto tr180 + case 92: + goto st155 + case 117: + goto st161 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr178 + } + goto st53 +tr217: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p @@ -22965,90 +23069,31 @@ tr218: goto _test_eof596 } st_case_596: -//line plugins/parsers/influx/machine.go:22969 +//line plugins/parsers/influx/machine.go:23073 switch ( m.data)[( m.p)] { - case 9: - goto tr899 case 10: - goto tr900 + goto tr891 case 11: - goto tr901 - case 12: - goto tr836 + goto tr892 case 13: - goto tr758 + goto tr751 case 32: - goto tr899 + goto tr890 case 34: - goto tr91 + goto tr89 case 44: - goto tr902 - case 82: - goto st163 + goto tr893 case 92: - goto st156 - case 114: - goto st164 - } - goto st54 - st163: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof163 - } - st_case_163: - switch ( m.data)[( m.p)] { - case 9: - goto tr180 - case 10: - goto tr29 - case 11: - goto tr181 - case 12: - goto tr1 - case 13: - goto st7 - case 32: - goto tr180 - case 34: - goto tr91 - case 44: - goto tr182 - case 85: + goto st155 + case 97: goto st159 - case 92: - goto st156 } - goto st54 - st164: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof164 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr890 } - st_case_164: - switch ( m.data)[( m.p)] { - case 9: - goto tr180 - case 10: - goto tr29 - case 11: - goto tr181 - case 12: - goto tr1 - case 13: - goto st7 - case 32: - goto tr180 - case 34: - goto tr91 - case 44: - goto tr182 - case 92: - goto st156 - case 117: - goto st162 - } - goto st54 -tr219: -//line plugins/parsers/influx/machine.go.rl:20 + goto st53 +tr218: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p @@ -23058,137 +23103,152 @@ tr219: goto _test_eof597 } st_case_597: -//line plugins/parsers/influx/machine.go:23062 +//line plugins/parsers/influx/machine.go:23107 switch ( m.data)[( m.p)] { - case 9: - goto tr899 case 10: - goto tr900 + goto tr891 case 11: - goto tr901 - case 12: - goto tr836 + goto tr892 case 13: - goto tr758 + goto tr751 case 32: - goto tr899 + goto tr890 case 34: - goto tr91 + goto tr89 case 44: - goto tr902 + goto tr893 case 92: - goto st156 - case 97: - goto st160 + goto st155 + case 114: + goto st163 } - goto st54 -tr220: -//line plugins/parsers/influx/machine.go.rl:20 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr890 + } + goto st53 + st164: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof164 + } + st_case_164: + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 11: + goto tr337 + case 13: + goto st6 + case 32: + goto st164 + case 34: + goto tr116 + case 35: + goto st6 + case 44: + goto st6 + case 92: + goto tr338 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto st164 + } + goto tr335 +tr337: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p - goto st598 - st598: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof598 - } - st_case_598: -//line plugins/parsers/influx/machine.go:23097 - switch ( m.data)[( m.p)] { - case 9: - goto tr899 - case 10: - goto tr900 - case 11: - goto tr901 - case 12: - goto tr836 - case 13: - goto tr758 - case 32: - goto tr899 - case 34: - goto tr91 - case 44: - goto tr902 - case 92: - goto st156 - case 114: - goto st164 - } - goto st54 + goto st165 st165: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof165 } st_case_165: +//line plugins/parsers/influx/machine.go:23168 switch ( m.data)[( m.p)] { - case 9: - goto st165 case 10: - goto tr29 + goto tr28 case 11: + goto tr340 + case 13: + goto st6 + case 32: goto tr339 - case 12: - goto st8 - case 13: - goto st7 - case 32: - goto st165 case 34: - goto tr118 + goto tr83 case 35: - goto st6 + goto st53 case 44: - goto st6 + goto tr180 case 92: - goto tr340 + goto tr338 } - goto tr337 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr339 + } + goto tr335 tr339: -//line plugins/parsers/influx/machine.go.rl:20 - - m.pb = m.p - - goto st166 - st166: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof166 - } - st_case_166: -//line plugins/parsers/influx/machine.go:23160 - switch ( m.data)[( m.p)] { - case 9: - goto tr341 - case 10: - goto tr29 - case 11: - goto tr342 - case 12: - goto tr38 - case 13: - goto st7 - case 32: - goto tr341 - case 34: - goto tr85 - case 35: - goto st54 - case 44: - goto tr182 - case 92: - goto tr340 - } - goto tr337 -tr341: - ( m.cs) = 167 -//line plugins/parsers/influx/machine.go.rl:78 + ( m.cs) = 166 +//line plugins/parsers/influx/machine.go.rl:86 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again + st166: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof166 + } + st_case_166: +//line plugins/parsers/influx/machine.go:23209 + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 11: + goto tr342 + case 13: + goto st6 + case 32: + goto st166 + case 34: + goto tr122 + case 35: + goto tr158 + case 44: + goto st6 + case 61: + goto tr335 + case 92: + goto tr184 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto st166 + } + goto tr182 +tr342: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st167 +tr343: + ( m.cs) = 167 +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; {( m.p)++; goto _out } } @@ -23198,51 +23258,42 @@ tr341: goto _test_eof167 } st_case_167: -//line plugins/parsers/influx/machine.go:23202 +//line plugins/parsers/influx/machine.go:23262 switch ( m.data)[( m.p)] { - case 9: - goto st167 case 10: - goto tr29 + goto tr28 case 11: - goto tr344 - case 12: - goto st10 + goto tr343 case 13: - goto st7 - case 32: - goto st167 - case 34: - goto tr124 - case 35: - goto tr160 - case 44: goto st6 + case 32: + goto tr339 + case 34: + goto tr122 + case 44: + goto tr180 case 61: - goto tr337 + goto tr344 case 92: - goto tr186 + goto tr184 } - goto tr184 -tr344: -//line plugins/parsers/influx/machine.go.rl:20 - - m.pb = m.p - - goto st168 -tr345: + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr339 + } + goto tr182 +tr340: ( m.cs) = 168 -//line plugins/parsers/influx/machine.go.rl:20 +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p -//line plugins/parsers/influx/machine.go.rl:78 +//line plugins/parsers/influx/machine.go.rl:86 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } @@ -23252,137 +23303,110 @@ tr345: goto _test_eof168 } st_case_168: -//line plugins/parsers/influx/machine.go:23256 +//line plugins/parsers/influx/machine.go:23307 switch ( m.data)[( m.p)] { - case 9: - goto tr341 case 10: - goto tr29 + goto tr28 case 11: - goto tr345 - case 12: - goto tr38 + goto tr343 case 13: - goto st7 + goto st6 case 32: - goto tr341 + goto tr339 case 34: - goto tr124 + goto tr122 case 44: - goto tr182 + goto tr180 case 61: - goto tr346 + goto tr335 case 92: - goto tr186 + goto tr184 } - goto tr184 -tr342: - ( m.cs) = 169 -//line plugins/parsers/influx/machine.go.rl:20 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr339 + } + goto tr182 +tr538: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p -//line plugins/parsers/influx/machine.go.rl:78 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again + goto st169 st169: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof169 } st_case_169: -//line plugins/parsers/influx/machine.go:23302 - switch ( m.data)[( m.p)] { - case 9: - goto tr341 - case 10: - goto tr29 - case 11: - goto tr345 - case 12: - goto tr38 - case 13: - goto st7 - case 32: - goto tr341 - case 34: - goto tr124 - case 44: - goto tr182 - case 61: - goto tr337 - case 92: - goto tr186 - } - goto tr184 -tr541: -//line plugins/parsers/influx/machine.go.rl:20 - - m.pb = m.p - - goto st170 - st170: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof170 - } - st_case_170: -//line plugins/parsers/influx/machine.go:23337 +//line plugins/parsers/influx/machine.go:23341 switch ( m.data)[( m.p)] { case 10: - goto tr29 - case 12: - goto tr105 - case 13: - goto st7 + goto tr28 case 34: - goto tr31 + goto tr29 case 92: - goto st75 + goto st73 } if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st599 + goto st598 } goto st6 -tr542: -//line plugins/parsers/influx/machine.go.rl:20 +tr539: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p - goto st599 + goto st598 + st598: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof598 + } + st_case_598: +//line plugins/parsers/influx/machine.go:23365 + switch ( m.data)[( m.p)] { + case 10: + goto tr674 + case 13: + goto tr676 + case 32: + goto tr673 + case 34: + goto tr29 + case 92: + goto st73 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st599 + } + case ( m.data)[( m.p)] >= 9: + goto tr673 + } + goto st6 st599: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof599 } st_case_599: -//line plugins/parsers/influx/machine.go:23365 switch ( m.data)[( m.p)] { case 10: - goto tr678 - case 12: - goto tr469 + goto tr674 case 13: - goto tr680 + goto tr676 case 32: - goto tr677 + goto tr673 case 34: - goto tr31 + goto tr29 case 92: - goto st75 + goto st73 } switch { - case ( m.data)[( m.p)] > 11: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st600 } case ( m.data)[( m.p)] >= 9: - goto tr677 + goto tr673 } goto st6 st600: @@ -23392,25 +23416,23 @@ tr542: st_case_600: switch ( m.data)[( m.p)] { case 10: - goto tr678 - case 12: - goto tr469 + goto tr674 case 13: - goto tr680 + goto tr676 case 32: - goto tr677 + goto tr673 case 34: - goto tr31 + goto tr29 case 92: - goto st75 + goto st73 } switch { - case ( m.data)[( m.p)] > 11: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st601 } case ( m.data)[( m.p)] >= 9: - goto tr677 + goto tr673 } goto st6 st601: @@ -23420,25 +23442,23 @@ tr542: st_case_601: switch ( m.data)[( m.p)] { case 10: - goto tr678 - case 12: - goto tr469 + goto tr674 case 13: - goto tr680 + goto tr676 case 32: - goto tr677 + goto tr673 case 34: - goto tr31 + goto tr29 case 92: - goto st75 + goto st73 } switch { - case ( m.data)[( m.p)] > 11: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st602 } case ( m.data)[( m.p)] >= 9: - goto tr677 + goto tr673 } goto st6 st602: @@ -23448,25 +23468,23 @@ tr542: st_case_602: switch ( m.data)[( m.p)] { case 10: - goto tr678 - case 12: - goto tr469 + goto tr674 case 13: - goto tr680 + goto tr676 case 32: - goto tr677 + goto tr673 case 34: - goto tr31 + goto tr29 case 92: - goto st75 + goto st73 } switch { - case ( m.data)[( m.p)] > 11: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st603 } case ( m.data)[( m.p)] >= 9: - goto tr677 + goto tr673 } goto st6 st603: @@ -23476,25 +23494,23 @@ tr542: st_case_603: switch ( m.data)[( m.p)] { case 10: - goto tr678 - case 12: - goto tr469 + goto tr674 case 13: - goto tr680 + goto tr676 case 32: - goto tr677 + goto tr673 case 34: - goto tr31 + goto tr29 case 92: - goto st75 + goto st73 } switch { - case ( m.data)[( m.p)] > 11: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st604 } case ( m.data)[( m.p)] >= 9: - goto tr677 + goto tr673 } goto st6 st604: @@ -23504,25 +23520,23 @@ tr542: st_case_604: switch ( m.data)[( m.p)] { case 10: - goto tr678 - case 12: - goto tr469 + goto tr674 case 13: - goto tr680 + goto tr676 case 32: - goto tr677 + goto tr673 case 34: - goto tr31 + goto tr29 case 92: - goto st75 + goto st73 } switch { - case ( m.data)[( m.p)] > 11: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st605 } case ( m.data)[( m.p)] >= 9: - goto tr677 + goto tr673 } goto st6 st605: @@ -23532,25 +23546,23 @@ tr542: st_case_605: switch ( m.data)[( m.p)] { case 10: - goto tr678 - case 12: - goto tr469 + goto tr674 case 13: - goto tr680 + goto tr676 case 32: - goto tr677 + goto tr673 case 34: - goto tr31 + goto tr29 case 92: - goto st75 + goto st73 } switch { - case ( m.data)[( m.p)] > 11: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st606 } case ( m.data)[( m.p)] >= 9: - goto tr677 + goto tr673 } goto st6 st606: @@ -23560,25 +23572,23 @@ tr542: st_case_606: switch ( m.data)[( m.p)] { case 10: - goto tr678 - case 12: - goto tr469 + goto tr674 case 13: - goto tr680 + goto tr676 case 32: - goto tr677 + goto tr673 case 34: - goto tr31 + goto tr29 case 92: - goto st75 + goto st73 } switch { - case ( m.data)[( m.p)] > 11: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st607 } case ( m.data)[( m.p)] >= 9: - goto tr677 + goto tr673 } goto st6 st607: @@ -23588,25 +23598,23 @@ tr542: st_case_607: switch ( m.data)[( m.p)] { case 10: - goto tr678 - case 12: - goto tr469 + goto tr674 case 13: - goto tr680 + goto tr676 case 32: - goto tr677 + goto tr673 case 34: - goto tr31 + goto tr29 case 92: - goto st75 + goto st73 } switch { - case ( m.data)[( m.p)] > 11: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st608 } case ( m.data)[( m.p)] >= 9: - goto tr677 + goto tr673 } goto st6 st608: @@ -23616,25 +23624,23 @@ tr542: st_case_608: switch ( m.data)[( m.p)] { case 10: - goto tr678 - case 12: - goto tr469 + goto tr674 case 13: - goto tr680 + goto tr676 case 32: - goto tr677 + goto tr673 case 34: - goto tr31 + goto tr29 case 92: - goto st75 + goto st73 } switch { - case ( m.data)[( m.p)] > 11: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st609 } case ( m.data)[( m.p)] >= 9: - goto tr677 + goto tr673 } goto st6 st609: @@ -23644,25 +23650,23 @@ tr542: st_case_609: switch ( m.data)[( m.p)] { case 10: - goto tr678 - case 12: - goto tr469 + goto tr674 case 13: - goto tr680 + goto tr676 case 32: - goto tr677 + goto tr673 case 34: - goto tr31 + goto tr29 case 92: - goto st75 + goto st73 } switch { - case ( m.data)[( m.p)] > 11: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st610 } case ( m.data)[( m.p)] >= 9: - goto tr677 + goto tr673 } goto st6 st610: @@ -23672,25 +23676,23 @@ tr542: st_case_610: switch ( m.data)[( m.p)] { case 10: - goto tr678 - case 12: - goto tr469 + goto tr674 case 13: - goto tr680 + goto tr676 case 32: - goto tr677 + goto tr673 case 34: - goto tr31 + goto tr29 case 92: - goto st75 + goto st73 } switch { - case ( m.data)[( m.p)] > 11: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st611 } case ( m.data)[( m.p)] >= 9: - goto tr677 + goto tr673 } goto st6 st611: @@ -23700,25 +23702,23 @@ tr542: st_case_611: switch ( m.data)[( m.p)] { case 10: - goto tr678 - case 12: - goto tr469 + goto tr674 case 13: - goto tr680 + goto tr676 case 32: - goto tr677 + goto tr673 case 34: - goto tr31 + goto tr29 case 92: - goto st75 + goto st73 } switch { - case ( m.data)[( m.p)] > 11: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st612 } case ( m.data)[( m.p)] >= 9: - goto tr677 + goto tr673 } goto st6 st612: @@ -23728,25 +23728,23 @@ tr542: st_case_612: switch ( m.data)[( m.p)] { case 10: - goto tr678 - case 12: - goto tr469 + goto tr674 case 13: - goto tr680 + goto tr676 case 32: - goto tr677 + goto tr673 case 34: - goto tr31 + goto tr29 case 92: - goto st75 + goto st73 } switch { - case ( m.data)[( m.p)] > 11: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st613 } case ( m.data)[( m.p)] >= 9: - goto tr677 + goto tr673 } goto st6 st613: @@ -23756,25 +23754,23 @@ tr542: st_case_613: switch ( m.data)[( m.p)] { case 10: - goto tr678 - case 12: - goto tr469 + goto tr674 case 13: - goto tr680 + goto tr676 case 32: - goto tr677 + goto tr673 case 34: - goto tr31 + goto tr29 case 92: - goto st75 + goto st73 } switch { - case ( m.data)[( m.p)] > 11: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st614 } case ( m.data)[( m.p)] >= 9: - goto tr677 + goto tr673 } goto st6 st614: @@ -23784,25 +23780,23 @@ tr542: st_case_614: switch ( m.data)[( m.p)] { case 10: - goto tr678 - case 12: - goto tr469 + goto tr674 case 13: - goto tr680 + goto tr676 case 32: - goto tr677 + goto tr673 case 34: - goto tr31 + goto tr29 case 92: - goto st75 + goto st73 } switch { - case ( m.data)[( m.p)] > 11: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st615 } case ( m.data)[( m.p)] >= 9: - goto tr677 + goto tr673 } goto st6 st615: @@ -23812,25 +23806,23 @@ tr542: st_case_615: switch ( m.data)[( m.p)] { case 10: - goto tr678 - case 12: - goto tr469 + goto tr674 case 13: - goto tr680 + goto tr676 case 32: - goto tr677 + goto tr673 case 34: - goto tr31 + goto tr29 case 92: - goto st75 + goto st73 } switch { - case ( m.data)[( m.p)] > 11: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st616 } case ( m.data)[( m.p)] >= 9: - goto tr677 + goto tr673 } goto st6 st616: @@ -23840,332 +23832,373 @@ tr542: st_case_616: switch ( m.data)[( m.p)] { case 10: - goto tr678 - case 12: - goto tr469 + goto tr674 case 13: - goto tr680 + goto tr676 case 32: - goto tr677 + goto tr673 case 34: - goto tr31 + goto tr29 case 92: - goto st75 + goto st73 } - switch { - case ( m.data)[( m.p)] > 11: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st617 - } - case ( m.data)[( m.p)] >= 9: - goto tr677 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr673 } goto st6 - st617: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof617 - } - st_case_617: - switch ( m.data)[( m.p)] { - case 10: - goto tr678 - case 12: - goto tr469 - case 13: - goto tr680 - case 32: - goto tr677 - case 34: - goto tr31 - case 92: - goto st75 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { - goto tr677 - } - goto st6 -tr926: -//line plugins/parsers/influx/machine.go.rl:20 +tr917: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p - goto st171 -tr537: - ( m.cs) = 171 -//line plugins/parsers/influx/machine.go.rl:122 + goto st170 +tr534: + ( m.cs) = 170 +//line plugins/parsers/influx/machine.go.rl:130 err = m.handler.AddFloat(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } goto _again -tr933: - ( m.cs) = 171 -//line plugins/parsers/influx/machine.go.rl:104 +tr924: + ( m.cs) = 170 +//line plugins/parsers/influx/machine.go.rl:112 err = m.handler.AddInt(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } goto _again -tr936: - ( m.cs) = 171 -//line plugins/parsers/influx/machine.go.rl:113 +tr926: + ( m.cs) = 170 +//line plugins/parsers/influx/machine.go.rl:121 err = m.handler.AddUint(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } goto _again -tr940: - ( m.cs) = 171 -//line plugins/parsers/influx/machine.go.rl:131 +tr929: + ( m.cs) = 170 +//line plugins/parsers/influx/machine.go.rl:139 err = m.handler.AddBool(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } goto _again - st171: + st170: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof171 + goto _test_eof170 } - st_case_171: -//line plugins/parsers/influx/machine.go:23951 + st_case_170: +//line plugins/parsers/influx/machine.go:23913 switch ( m.data)[( m.p)] { case 9: goto st6 case 10: - goto tr29 - case 12: - goto tr8 - case 13: - goto st7 + goto tr28 case 32: goto st6 case 34: - goto tr97 + goto tr95 case 44: goto st6 case 61: goto st6 case 92: - goto tr349 + goto tr347 } - goto tr348 -tr348: -//line plugins/parsers/influx/machine.go.rl:20 + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto st6 + } + goto tr346 +tr346: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p + goto st171 + st171: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof171 + } + st_case_171: +//line plugins/parsers/influx/machine.go:23945 + switch ( m.data)[( m.p)] { + case 9: + goto st6 + case 10: + goto tr28 + case 32: + goto st6 + case 34: + goto tr98 + case 44: + goto st6 + case 61: + goto tr349 + case 92: + goto st183 + } + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto st6 + } + goto st171 +tr349: +//line plugins/parsers/influx/machine.go.rl:108 + + m.key = m.text() + goto st172 st172: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof172 } st_case_172: -//line plugins/parsers/influx/machine.go:23984 +//line plugins/parsers/influx/machine.go:23977 switch ( m.data)[( m.p)] { - case 9: - goto st6 case 10: - goto tr29 - case 12: - goto tr8 - case 13: - goto st7 - case 32: - goto st6 + goto tr28 case 34: - goto tr100 - case 44: - goto st6 - case 61: goto tr351 - case 92: - goto st184 - } - goto st172 -tr351: -//line plugins/parsers/influx/machine.go.rl:100 - - m.key = m.text() - - goto st173 - st173: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof173 - } - st_case_173: -//line plugins/parsers/influx/machine.go:24017 - switch ( m.data)[( m.p)] { - case 10: - goto tr29 - case 12: - goto tr8 - case 13: - goto st7 - case 34: - goto tr353 case 45: - goto tr167 + goto tr165 case 46: - goto tr168 + goto tr166 case 48: - goto tr169 + goto tr167 case 70: - goto tr354 + goto tr352 case 84: - goto tr355 + goto tr353 case 92: - goto st75 + goto st73 case 102: - goto tr356 + goto tr354 case 116: - goto tr357 + goto tr355 } if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr170 + goto tr168 } goto st6 -tr353: - ( m.cs) = 618 -//line plugins/parsers/influx/machine.go.rl:140 +tr351: + ( m.cs) = 617 +//line plugins/parsers/influx/machine.go.rl:148 err = m.handler.AddString(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } goto _again + st617: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof617 + } + st_case_617: +//line plugins/parsers/influx/machine.go:24022 + switch ( m.data)[( m.p)] { + case 10: + goto tr665 + case 13: + goto tr667 + case 32: + goto tr916 + case 34: + goto tr25 + case 44: + goto tr917 + case 92: + goto tr26 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr916 + } + goto tr23 +tr167: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st618 st618: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof618 } st_case_618: -//line plugins/parsers/influx/machine.go:24066 +//line plugins/parsers/influx/machine.go:24052 switch ( m.data)[( m.p)] { case 10: - goto tr669 - case 12: - goto st272 + goto tr532 case 13: - goto tr671 + goto tr533 case 32: - goto tr925 + goto tr531 case 34: - goto tr26 + goto tr29 case 44: - goto tr926 + goto tr534 + case 46: + goto st325 + case 69: + goto st173 case 92: - goto tr27 + goto st73 + case 101: + goto st173 + case 105: + goto st623 + case 117: + goto st624 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { - goto tr925 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st619 + } + case ( m.data)[( m.p)] >= 9: + goto tr531 } - goto tr23 -tr169: -//line plugins/parsers/influx/machine.go.rl:20 - - m.pb = m.p - - goto st619 + goto st6 st619: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof619 } st_case_619: -//line plugins/parsers/influx/machine.go:24098 switch ( m.data)[( m.p)] { case 10: - goto tr534 - case 12: - goto tr535 + goto tr532 case 13: - goto tr536 - case 32: goto tr533 + case 32: + goto tr531 case 34: - goto tr31 + goto tr29 case 44: - goto tr537 + goto tr534 case 46: - goto st326 + goto st325 case 69: - goto st174 + goto st173 case 92: - goto st75 + goto st73 case 101: - goto st174 - case 105: - goto st624 - case 117: - goto st625 + goto st173 } switch { - case ( m.data)[( m.p)] > 11: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st620 + goto st619 } case ( m.data)[( m.p)] >= 9: - goto tr533 + goto tr531 } goto st6 + st173: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof173 + } + st_case_173: + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 34: + goto tr356 + case 43: + goto st174 + case 45: + goto st174 + case 92: + goto st73 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st622 + } + goto st6 +tr356: + ( m.cs) = 620 +//line plugins/parsers/influx/machine.go.rl:148 + + err = m.handler.AddString(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again st620: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof620 } st_case_620: +//line plugins/parsers/influx/machine.go:24159 switch ( m.data)[( m.p)] { case 10: - goto tr534 - case 12: - goto tr535 + goto tr101 case 13: - goto tr536 + goto st32 case 32: - goto tr533 - case 34: - goto tr31 + goto st271 case 44: - goto tr537 - case 46: - goto st326 - case 69: - goto st174 - case 92: - goto st75 - case 101: - goto st174 + goto st35 } switch { - case ( m.data)[( m.p)] > 11: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st620 + goto st621 } case ( m.data)[( m.p)] >= 9: - goto tr533 + goto st271 } - goto st6 + goto tr103 + st621: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof621 + } + st_case_621: + switch ( m.data)[( m.p)] { + case 10: + goto tr730 + case 13: + goto tr732 + case 32: + goto tr921 + case 44: + goto tr922 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st621 + } + case ( m.data)[( m.p)] >= 9: + goto tr921 + } + goto tr103 st174: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof174 @@ -24173,62 +24206,16 @@ tr169: st_case_174: switch ( m.data)[( m.p)] { case 10: - goto tr29 - case 12: - goto tr8 - case 13: - goto st7 + goto tr28 case 34: - goto tr358 - case 43: - goto st175 - case 45: - goto st175 + goto tr29 case 92: - goto st75 + goto st73 } if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st623 + goto st622 } goto st6 -tr358: - ( m.cs) = 621 -//line plugins/parsers/influx/machine.go.rl:140 - - err = m.handler.AddString(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again - st621: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof621 - } - st_case_621: -//line plugins/parsers/influx/machine.go:24213 - switch ( m.data)[( m.p)] { - case 10: - goto tr103 - case 13: - goto st33 - case 32: - goto st272 - case 44: - goto st36 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st622 - } - case ( m.data)[( m.p)] >= 9: - goto st272 - } - goto tr105 st622: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof622 @@ -24236,13 +24223,17 @@ tr358: st_case_622: switch ( m.data)[( m.p)] { case 10: - goto tr734 + goto tr532 case 13: - goto tr736 + goto tr533 case 32: - goto tr535 + goto tr531 + case 34: + goto tr29 case 44: - goto tr930 + goto tr534 + case 92: + goto st73 } switch { case ( m.data)[( m.p)] > 12: @@ -24250,28 +24241,7 @@ tr358: goto st622 } case ( m.data)[( m.p)] >= 9: - goto tr535 - } - goto tr105 - st175: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof175 - } - st_case_175: - switch ( m.data)[( m.p)] { - case 10: - goto tr29 - case 12: - goto tr8 - case 13: - goto st7 - case 34: - goto tr31 - case 92: - goto st75 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st623 + goto tr531 } goto st6 st623: @@ -24281,27 +24251,20 @@ tr358: st_case_623: switch ( m.data)[( m.p)] { case 10: - goto tr534 - case 12: - goto tr535 + goto tr737 case 13: - goto tr536 + goto tr739 case 32: - goto tr533 + goto tr923 case 34: - goto tr31 + goto tr29 case 44: - goto tr537 + goto tr924 case 92: - goto st75 + goto st73 } - switch { - case ( m.data)[( m.p)] > 11: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st623 - } - case ( m.data)[( m.p)] >= 9: - goto tr533 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr923 } goto st6 st624: @@ -24311,51 +24274,69 @@ tr358: st_case_624: switch ( m.data)[( m.p)] { case 10: - goto tr741 - case 12: - goto tr932 + goto tr743 case 13: - goto tr744 + goto tr745 case 32: - goto tr931 + goto tr925 case 34: - goto tr31 + goto tr29 case 44: - goto tr933 + goto tr926 case 92: - goto st75 + goto st73 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { - goto tr931 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr925 } goto st6 +tr168: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st625 st625: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof625 } st_case_625: +//line plugins/parsers/influx/machine.go:24305 switch ( m.data)[( m.p)] { case 10: - goto tr748 - case 12: - goto tr935 + goto tr532 case 13: - goto tr751 + goto tr533 case 32: - goto tr934 + goto tr531 case 34: - goto tr31 + goto tr29 case 44: - goto tr936 + goto tr534 + case 46: + goto st325 + case 69: + goto st173 case 92: - goto st75 + goto st73 + case 101: + goto st173 + case 105: + goto st623 + case 117: + goto st624 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { - goto tr934 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st625 + } + case ( m.data)[( m.p)] >= 9: + goto tr531 } goto st6 -tr170: -//line plugins/parsers/influx/machine.go.rl:20 +tr352: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p @@ -24365,76 +24346,43 @@ tr170: goto _test_eof626 } st_case_626: -//line plugins/parsers/influx/machine.go:24369 +//line plugins/parsers/influx/machine.go:24350 switch ( m.data)[( m.p)] { case 10: - goto tr534 - case 12: - goto tr535 + goto tr749 case 13: - goto tr536 + goto tr751 case 32: - goto tr533 + goto tr928 case 34: - goto tr31 + goto tr29 case 44: - goto tr537 - case 46: - goto st326 - case 69: - goto st174 + goto tr929 + case 65: + goto st175 case 92: - goto st75 - case 101: - goto st174 - case 105: - goto st624 - case 117: - goto st625 + goto st73 + case 97: + goto st178 } - switch { - case ( m.data)[( m.p)] > 11: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st626 - } - case ( m.data)[( m.p)] >= 9: - goto tr533 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr928 } goto st6 -tr354: -//line plugins/parsers/influx/machine.go.rl:20 - - m.pb = m.p - - goto st627 - st627: + st175: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof627 + goto _test_eof175 } - st_case_627: -//line plugins/parsers/influx/machine.go:24416 + st_case_175: switch ( m.data)[( m.p)] { case 10: - goto tr755 - case 12: - goto tr939 - case 13: - goto tr758 - case 32: - goto tr938 + goto tr28 case 34: - goto tr31 - case 44: - goto tr940 - case 65: + goto tr29 + case 76: goto st176 case 92: - goto st75 - case 97: - goto st179 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { - goto tr938 + goto st73 } goto st6 st176: @@ -24444,17 +24392,13 @@ tr354: st_case_176: switch ( m.data)[( m.p)] { case 10: - goto tr29 - case 12: - goto tr8 - case 13: - goto st7 + goto tr28 case 34: - goto tr31 - case 76: + goto tr29 + case 83: goto st177 case 92: - goto st75 + goto st73 } goto st6 st177: @@ -24464,17 +24408,36 @@ tr354: st_case_177: switch ( m.data)[( m.p)] { case 10: - goto tr29 - case 12: - goto tr8 - case 13: - goto st7 + goto tr28 case 34: - goto tr31 - case 83: - goto st178 + goto tr29 + case 69: + goto st627 case 92: - goto st75 + goto st73 + } + goto st6 + st627: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof627 + } + st_case_627: + switch ( m.data)[( m.p)] { + case 10: + goto tr749 + case 13: + goto tr751 + case 32: + goto tr928 + case 34: + goto tr29 + case 44: + goto tr929 + case 92: + goto st73 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr928 } goto st6 st178: @@ -24484,42 +24447,13 @@ tr354: st_case_178: switch ( m.data)[( m.p)] { case 10: + goto tr28 + case 34: goto tr29 - case 12: - goto tr8 - case 13: - goto st7 - case 34: - goto tr31 - case 69: - goto st628 case 92: - goto st75 - } - goto st6 - st628: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof628 - } - st_case_628: - switch ( m.data)[( m.p)] { - case 10: - goto tr755 - case 12: - goto tr939 - case 13: - goto tr758 - case 32: - goto tr938 - case 34: - goto tr31 - case 44: - goto tr940 - case 92: - goto st75 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { - goto tr938 + goto st73 + case 108: + goto st179 } goto st6 st179: @@ -24529,16 +24463,12 @@ tr354: st_case_179: switch ( m.data)[( m.p)] { case 10: - goto tr29 - case 12: - goto tr8 - case 13: - goto st7 + goto tr28 case 34: - goto tr31 + goto tr29 case 92: - goto st75 - case 108: + goto st73 + case 115: goto st180 } goto st6 @@ -24549,17 +24479,47 @@ tr354: st_case_180: switch ( m.data)[( m.p)] { case 10: - goto tr29 - case 12: - goto tr8 - case 13: - goto st7 + goto tr28 case 34: - goto tr31 + goto tr29 case 92: - goto st75 - case 115: + goto st73 + case 101: + goto st627 + } + goto st6 +tr353: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st628 + st628: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof628 + } + st_case_628: +//line plugins/parsers/influx/machine.go:24503 + switch ( m.data)[( m.p)] { + case 10: + goto tr749 + case 13: + goto tr751 + case 32: + goto tr928 + case 34: + goto tr29 + case 44: + goto tr929 + case 82: goto st181 + case 92: + goto st73 + case 114: + goto st182 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr928 } goto st6 st181: @@ -24569,53 +24529,13 @@ tr354: st_case_181: switch ( m.data)[( m.p)] { case 10: + goto tr28 + case 34: goto tr29 - case 12: - goto tr8 - case 13: - goto st7 - case 34: - goto tr31 + case 85: + goto st177 case 92: - goto st75 - case 101: - goto st628 - } - goto st6 -tr355: -//line plugins/parsers/influx/machine.go.rl:20 - - m.pb = m.p - - goto st629 - st629: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof629 - } - st_case_629: -//line plugins/parsers/influx/machine.go:24597 - switch ( m.data)[( m.p)] { - case 10: - goto tr755 - case 12: - goto tr939 - case 13: - goto tr758 - case 32: - goto tr938 - case 34: - goto tr31 - case 44: - goto tr940 - case 82: - goto st182 - case 92: - goto st75 - case 114: - goto st183 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { - goto tr938 + goto st73 } goto st6 st182: @@ -24625,41 +24545,49 @@ tr355: st_case_182: switch ( m.data)[( m.p)] { case 10: - goto tr29 - case 12: - goto tr8 - case 13: - goto st7 + goto tr28 case 34: - goto tr31 - case 85: - goto st178 + goto tr29 case 92: - goto st75 + goto st73 + case 117: + goto st180 } goto st6 - st183: +tr354: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st629 + st629: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof183 + goto _test_eof629 } - st_case_183: + st_case_629: +//line plugins/parsers/influx/machine.go:24569 switch ( m.data)[( m.p)] { case 10: - goto tr29 - case 12: - goto tr8 + goto tr749 case 13: - goto st7 + goto tr751 + case 32: + goto tr928 case 34: - goto tr31 + goto tr29 + case 44: + goto tr929 case 92: - goto st75 - case 117: - goto st181 + goto st73 + case 97: + goto st178 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr928 } goto st6 -tr356: -//line plugins/parsers/influx/machine.go.rl:20 +tr355: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p @@ -24669,80 +24597,44 @@ tr356: goto _test_eof630 } st_case_630: -//line plugins/parsers/influx/machine.go:24673 +//line plugins/parsers/influx/machine.go:24601 switch ( m.data)[( m.p)] { case 10: - goto tr755 - case 12: - goto tr939 + goto tr749 case 13: - goto tr758 + goto tr751 case 32: - goto tr938 + goto tr928 case 34: - goto tr31 + goto tr29 case 44: - goto tr940 + goto tr929 case 92: - goto st75 - case 97: - goto st179 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { - goto tr938 - } - goto st6 -tr357: -//line plugins/parsers/influx/machine.go.rl:20 - - m.pb = m.p - - goto st631 - st631: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof631 - } - st_case_631: -//line plugins/parsers/influx/machine.go:24707 - switch ( m.data)[( m.p)] { - case 10: - goto tr755 - case 12: - goto tr939 - case 13: - goto tr758 - case 32: - goto tr938 - case 34: - goto tr31 - case 44: - goto tr940 - case 92: - goto st75 + goto st73 case 114: - goto st183 + goto st182 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { - goto tr938 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr928 } goto st6 -tr349: -//line plugins/parsers/influx/machine.go.rl:20 +tr347: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p - goto st184 - st184: + goto st183 + st183: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof184 + goto _test_eof183 } - st_case_184: -//line plugins/parsers/influx/machine.go:24741 + st_case_183: +//line plugins/parsers/influx/machine.go:24633 switch ( m.data)[( m.p)] { case 34: - goto st172 + goto st171 case 92: - goto st172 + goto st171 } switch { case ( m.data)[( m.p)] > 10: @@ -24753,6 +24645,42 @@ tr349: goto tr8 } goto st3 + st631: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof631 + } + st_case_631: + switch ( m.data)[( m.p)] { + case 10: + goto tr532 + case 13: + goto tr533 + case 32: + goto tr531 + case 34: + goto tr29 + case 44: + goto tr534 + case 46: + goto st325 + case 69: + goto st173 + case 92: + goto st73 + case 101: + goto st173 + case 105: + goto st623 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st619 + } + case ( m.data)[( m.p)] >= 9: + goto tr531 + } + goto st6 st632: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof632 @@ -24760,109 +24688,83 @@ tr349: st_case_632: switch ( m.data)[( m.p)] { case 10: - goto tr534 - case 12: - goto tr535 + goto tr532 case 13: - goto tr536 - case 32: goto tr533 + case 32: + goto tr531 case 34: - goto tr31 + goto tr29 case 44: - goto tr537 + goto tr534 case 46: - goto st326 + goto st325 case 69: - goto st174 + goto st173 case 92: - goto st75 + goto st73 case 101: - goto st174 + goto st173 case 105: - goto st624 + goto st623 } switch { - case ( m.data)[( m.p)] > 11: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st620 + goto st632 } case ( m.data)[( m.p)] >= 9: - goto tr533 + goto tr531 } goto st6 +tr169: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st633 st633: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof633 } st_case_633: +//line plugins/parsers/influx/machine.go:24732 switch ( m.data)[( m.p)] { case 10: - goto tr534 - case 12: - goto tr535 + goto tr891 case 13: - goto tr536 + goto tr751 case 32: - goto tr533 + goto tr928 case 34: - goto tr31 + goto tr29 case 44: - goto tr537 - case 46: - goto st326 - case 69: - goto st174 + goto tr929 + case 65: + goto st184 case 92: - goto st75 - case 101: - goto st174 - case 105: - goto st624 + goto st73 + case 97: + goto st187 } - switch { - case ( m.data)[( m.p)] > 11: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st633 - } - case ( m.data)[( m.p)] >= 9: - goto tr533 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr928 } goto st6 -tr171: -//line plugins/parsers/influx/machine.go.rl:20 - - m.pb = m.p - - goto st634 - st634: + st184: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof634 + goto _test_eof184 } - st_case_634: -//line plugins/parsers/influx/machine.go:24844 + st_case_184: switch ( m.data)[( m.p)] { case 10: - goto tr900 - case 12: - goto tr939 - case 13: - goto tr758 - case 32: - goto tr938 + goto tr28 case 34: - goto tr31 - case 44: - goto tr940 - case 65: + goto tr29 + case 76: goto st185 case 92: - goto st75 - case 97: - goto st188 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { - goto tr938 + goto st73 } goto st6 st185: @@ -24872,17 +24774,13 @@ tr171: st_case_185: switch ( m.data)[( m.p)] { case 10: - goto tr29 - case 12: - goto tr8 - case 13: - goto st7 + goto tr28 case 34: - goto tr31 - case 76: + goto tr29 + case 83: goto st186 case 92: - goto st75 + goto st73 } goto st6 st186: @@ -24892,17 +24790,36 @@ tr171: st_case_186: switch ( m.data)[( m.p)] { case 10: - goto tr29 - case 12: - goto tr8 - case 13: - goto st7 + goto tr28 case 34: - goto tr31 - case 83: - goto st187 + goto tr29 + case 69: + goto st634 case 92: - goto st75 + goto st73 + } + goto st6 + st634: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof634 + } + st_case_634: + switch ( m.data)[( m.p)] { + case 10: + goto tr891 + case 13: + goto tr751 + case 32: + goto tr928 + case 34: + goto tr29 + case 44: + goto tr929 + case 92: + goto st73 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr928 } goto st6 st187: @@ -24912,42 +24829,13 @@ tr171: st_case_187: switch ( m.data)[( m.p)] { case 10: + goto tr28 + case 34: goto tr29 - case 12: - goto tr8 - case 13: - goto st7 - case 34: - goto tr31 - case 69: - goto st635 case 92: - goto st75 - } - goto st6 - st635: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof635 - } - st_case_635: - switch ( m.data)[( m.p)] { - case 10: - goto tr900 - case 12: - goto tr939 - case 13: - goto tr758 - case 32: - goto tr938 - case 34: - goto tr31 - case 44: - goto tr940 - case 92: - goto st75 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { - goto tr938 + goto st73 + case 108: + goto st188 } goto st6 st188: @@ -24957,16 +24845,12 @@ tr171: st_case_188: switch ( m.data)[( m.p)] { case 10: - goto tr29 - case 12: - goto tr8 - case 13: - goto st7 + goto tr28 case 34: - goto tr31 + goto tr29 case 92: - goto st75 - case 108: + goto st73 + case 115: goto st189 } goto st6 @@ -24977,17 +24861,47 @@ tr171: st_case_189: switch ( m.data)[( m.p)] { case 10: - goto tr29 - case 12: - goto tr8 - case 13: - goto st7 + goto tr28 case 34: - goto tr31 + goto tr29 case 92: - goto st75 - case 115: + goto st73 + case 101: + goto st634 + } + goto st6 +tr170: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st635 + st635: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof635 + } + st_case_635: +//line plugins/parsers/influx/machine.go:24885 + switch ( m.data)[( m.p)] { + case 10: + goto tr891 + case 13: + goto tr751 + case 32: + goto tr928 + case 34: + goto tr29 + case 44: + goto tr929 + case 82: goto st190 + case 92: + goto st73 + case 114: + goto st191 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr928 } goto st6 st190: @@ -24997,53 +24911,13 @@ tr171: st_case_190: switch ( m.data)[( m.p)] { case 10: + goto tr28 + case 34: goto tr29 - case 12: - goto tr8 - case 13: - goto st7 - case 34: - goto tr31 + case 85: + goto st186 case 92: - goto st75 - case 101: - goto st635 - } - goto st6 -tr172: -//line plugins/parsers/influx/machine.go.rl:20 - - m.pb = m.p - - goto st636 - st636: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof636 - } - st_case_636: -//line plugins/parsers/influx/machine.go:25025 - switch ( m.data)[( m.p)] { - case 10: - goto tr900 - case 12: - goto tr939 - case 13: - goto tr758 - case 32: - goto tr938 - case 34: - goto tr31 - case 44: - goto tr940 - case 82: - goto st191 - case 92: - goto st75 - case 114: - goto st192 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { - goto tr938 + goto st73 } goto st6 st191: @@ -25053,41 +24927,49 @@ tr172: st_case_191: switch ( m.data)[( m.p)] { case 10: - goto tr29 - case 12: - goto tr8 - case 13: - goto st7 + goto tr28 case 34: - goto tr31 - case 85: - goto st187 + goto tr29 case 92: - goto st75 + goto st73 + case 117: + goto st189 } goto st6 - st192: +tr171: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st636 + st636: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof192 + goto _test_eof636 } - st_case_192: + st_case_636: +//line plugins/parsers/influx/machine.go:24951 switch ( m.data)[( m.p)] { case 10: - goto tr29 - case 12: - goto tr8 + goto tr891 case 13: - goto st7 + goto tr751 + case 32: + goto tr928 case 34: - goto tr31 + goto tr29 + case 44: + goto tr929 case 92: - goto st75 - case 117: - goto st190 + goto st73 + case 97: + goto st187 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr928 } goto st6 -tr173: -//line plugins/parsers/influx/machine.go.rl:20 +tr172: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p @@ -25097,65 +24979,63 @@ tr173: goto _test_eof637 } st_case_637: -//line plugins/parsers/influx/machine.go:25101 +//line plugins/parsers/influx/machine.go:24983 switch ( m.data)[( m.p)] { case 10: - goto tr900 - case 12: - goto tr939 + goto tr891 case 13: - goto tr758 + goto tr751 case 32: - goto tr938 + goto tr928 case 34: - goto tr31 + goto tr29 case 44: - goto tr940 + goto tr929 case 92: - goto st75 - case 97: - goto st188 + goto st73 + case 114: + goto st191 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { - goto tr938 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr928 } goto st6 -tr174: -//line plugins/parsers/influx/machine.go.rl:20 +tr160: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p - goto st638 - st638: + goto st192 + st192: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof638 + goto _test_eof192 } - st_case_638: -//line plugins/parsers/influx/machine.go:25135 + st_case_192: +//line plugins/parsers/influx/machine.go:25015 switch ( m.data)[( m.p)] { case 10: - goto tr900 - case 12: - goto tr939 + goto tr28 + case 11: + goto tr160 case 13: - goto tr758 + goto st6 case 32: - goto tr938 + goto st48 case 34: - goto tr31 + goto tr95 case 44: - goto tr940 + goto st6 + case 61: + goto tr163 case 92: - goto st75 - case 114: - goto st192 + goto tr161 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { - goto tr938 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto st48 } - goto st6 -tr162: -//line plugins/parsers/influx/machine.go.rl:20 + goto tr158 +tr138: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p @@ -25165,32 +25045,38 @@ tr162: goto _test_eof193 } st_case_193: -//line plugins/parsers/influx/machine.go:25169 +//line plugins/parsers/influx/machine.go:25049 switch ( m.data)[( m.p)] { - case 9: - goto st49 case 10: - goto tr29 + goto tr45 case 11: - goto tr162 - case 12: - goto st2 + goto tr59 case 13: - goto st7 + goto tr45 case 32: - goto st49 - case 34: - goto tr97 + goto tr58 case 44: - goto st6 + goto tr60 + case 46: + goto st194 + case 48: + goto st639 case 61: - goto tr165 + goto tr45 case 92: - goto tr163 + goto st21 } - goto tr160 -tr140: -//line plugins/parsers/influx/machine.go.rl:20 + switch { + case ( m.data)[( m.p)] > 12: + if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st642 + } + case ( m.data)[( m.p)] >= 9: + goto tr58 + } + goto st15 +tr139: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p @@ -25200,107 +25086,102 @@ tr140: goto _test_eof194 } st_case_194: -//line plugins/parsers/influx/machine.go:25204 +//line plugins/parsers/influx/machine.go:25090 switch ( m.data)[( m.p)] { case 10: - goto tr47 + goto tr45 case 11: - goto tr61 + goto tr59 case 13: - goto tr47 + goto tr45 case 32: - goto tr60 + goto tr58 case 44: - goto tr62 - case 46: - goto st195 - case 48: - goto st640 + goto tr60 case 61: - goto tr47 + goto tr45 case 92: - goto st22 + goto st21 } switch { case ( m.data)[( m.p)] > 12: - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st643 + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st638 } case ( m.data)[( m.p)] >= 9: - goto tr60 + goto tr58 } - goto st16 -tr141: -//line plugins/parsers/influx/machine.go.rl:20 - - m.pb = m.p - - goto st195 + goto st15 + st638: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof638 + } + st_case_638: + switch ( m.data)[( m.p)] { + case 10: + goto tr730 + case 11: + goto tr731 + case 13: + goto tr732 + case 32: + goto tr729 + case 44: + goto tr733 + case 61: + goto tr130 + case 69: + goto st195 + case 92: + goto st21 + case 101: + goto st195 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st638 + } + case ( m.data)[( m.p)] >= 9: + goto tr729 + } + goto st15 st195: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof195 } st_case_195: -//line plugins/parsers/influx/machine.go:25245 switch ( m.data)[( m.p)] { case 10: - goto tr47 + goto tr45 case 11: - goto tr61 + goto tr59 case 13: - goto tr47 + goto tr45 case 32: - goto tr60 + goto tr58 + case 34: + goto st196 case 44: - goto tr62 + goto tr60 case 61: - goto tr47 + goto tr45 case 92: - goto st22 + goto st21 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st639 + case ( m.data)[( m.p)] < 43: + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr58 } - case ( m.data)[( m.p)] >= 9: - goto tr60 - } - goto st16 - st639: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof639 - } - st_case_639: - switch ( m.data)[( m.p)] { - case 10: - goto tr734 - case 11: - goto tr735 - case 13: - goto tr736 - case 32: - goto tr731 - case 44: - goto tr737 - case 61: - goto tr132 - case 69: - goto st196 - case 92: - goto st22 - case 101: + case ( m.data)[( m.p)] > 45: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st479 + } + default: goto st196 } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st639 - } - case ( m.data)[( m.p)] >= 9: - goto tr731 - } - goto st16 + goto st15 st196: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof196 @@ -25308,65 +25189,67 @@ tr141: st_case_196: switch ( m.data)[( m.p)] { case 10: - goto tr47 + goto tr45 case 11: - goto tr61 + goto tr59 case 13: - goto tr47 + goto tr45 case 32: - goto tr60 - case 34: - goto st197 + goto tr58 case 44: - goto tr62 - case 61: - goto tr47 - case 92: - goto st22 - } - switch { - case ( m.data)[( m.p)] < 43: - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr60 - } - case ( m.data)[( m.p)] > 45: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st480 - } - default: - goto st197 - } - goto st16 - st197: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof197 - } - st_case_197: - switch ( m.data)[( m.p)] { - case 10: - goto tr47 - case 11: - goto tr61 - case 13: - goto tr47 - case 32: goto tr60 - case 44: - goto tr62 case 61: - goto tr47 + goto tr45 case 92: - goto st22 + goto st21 } switch { case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st480 + goto st479 } case ( m.data)[( m.p)] >= 9: - goto tr60 + goto tr58 } - goto st16 + goto st15 + st639: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof639 + } + st_case_639: + switch ( m.data)[( m.p)] { + case 10: + goto tr730 + case 11: + goto tr731 + case 13: + goto tr732 + case 32: + goto tr729 + case 44: + goto tr733 + case 46: + goto st638 + case 61: + goto tr130 + case 69: + goto st195 + case 92: + goto st21 + case 101: + goto st195 + case 105: + goto st641 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st640 + } + case ( m.data)[( m.p)] >= 9: + goto tr729 + } + goto st15 st640: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof640 @@ -25374,37 +25257,35 @@ tr141: st_case_640: switch ( m.data)[( m.p)] { case 10: - goto tr734 + goto tr730 case 11: - goto tr735 - case 13: - goto tr736 - case 32: goto tr731 + case 13: + goto tr732 + case 32: + goto tr729 case 44: - goto tr737 + goto tr733 case 46: - goto st639 + goto st638 case 61: - goto tr132 + goto tr130 case 69: - goto st196 + goto st195 case 92: - goto st22 + goto st21 case 101: - goto st196 - case 105: - goto st642 + goto st195 } switch { case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st641 + goto st640 } case ( m.data)[( m.p)] >= 9: - goto tr731 + goto tr729 } - goto st16 + goto st15 st641: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof641 @@ -25412,35 +25293,24 @@ tr141: st_case_641: switch ( m.data)[( m.p)] { case 10: - goto tr734 + goto tr942 case 11: - goto tr735 + goto tr943 case 13: - goto tr736 + goto tr944 case 32: - goto tr731 + goto tr941 case 44: - goto tr737 - case 46: - goto st639 + goto tr945 case 61: - goto tr132 - case 69: - goto st196 + goto tr130 case 92: - goto st22 - case 101: - goto st196 + goto st21 } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st641 - } - case ( m.data)[( m.p)] >= 9: - goto tr731 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr941 } - goto st16 + goto st15 st642: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof642 @@ -25448,136 +25318,158 @@ tr141: st_case_642: switch ( m.data)[( m.p)] { case 10: - goto tr952 + goto tr730 case 11: - goto tr953 + goto tr731 case 13: - goto tr954 + goto tr732 case 32: - goto tr743 + goto tr729 case 44: - goto tr955 + goto tr733 + case 46: + goto st638 case 61: - goto tr132 + goto tr130 + case 69: + goto st195 case 92: - goto st22 + goto st21 + case 101: + goto st195 + case 105: + goto st641 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr743 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st642 + } + case ( m.data)[( m.p)] >= 9: + goto tr729 } - goto st16 + goto st15 +tr140: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st643 st643: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof643 } st_case_643: +//line plugins/parsers/influx/machine.go:25364 switch ( m.data)[( m.p)] { case 10: - goto tr734 + goto tr730 case 11: - goto tr735 - case 13: - goto tr736 - case 32: goto tr731 + case 13: + goto tr732 + case 32: + goto tr729 case 44: - goto tr737 + goto tr733 case 46: - goto st639 + goto st638 case 61: - goto tr132 + goto tr130 case 69: - goto st196 + goto st195 case 92: - goto st22 + goto st21 case 101: - goto st196 + goto st195 case 105: - goto st642 + goto st641 + case 117: + goto st644 } switch { case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st643 + goto st640 } case ( m.data)[( m.p)] >= 9: - goto tr731 + goto tr729 } - goto st16 -tr142: -//line plugins/parsers/influx/machine.go.rl:20 - - m.pb = m.p - - goto st644 + goto st15 st644: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof644 } st_case_644: -//line plugins/parsers/influx/machine.go:25519 switch ( m.data)[( m.p)] { case 10: - goto tr734 + goto tr948 case 11: - goto tr735 + goto tr949 case 13: - goto tr736 + goto tr950 case 32: - goto tr731 + goto tr947 case 44: - goto tr737 - case 46: - goto st639 + goto tr951 case 61: - goto tr132 - case 69: - goto st196 + goto tr130 case 92: - goto st22 - case 101: - goto st196 - case 105: - goto st642 - case 117: - goto st645 + goto st21 } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st641 - } - case ( m.data)[( m.p)] >= 9: - goto tr731 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr947 } - goto st16 + goto st15 +tr141: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st645 st645: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof645 } st_case_645: +//line plugins/parsers/influx/machine.go:25436 switch ( m.data)[( m.p)] { case 10: - goto tr957 + goto tr730 case 11: - goto tr958 + goto tr731 case 13: - goto tr959 + goto tr732 case 32: - goto tr750 + goto tr729 case 44: - goto tr960 + goto tr733 + case 46: + goto st638 case 61: - goto tr132 + goto tr130 + case 69: + goto st195 case 92: - goto st22 + goto st21 + case 101: + goto st195 + case 105: + goto st641 + case 117: + goto st644 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr750 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st645 + } + case ( m.data)[( m.p)] >= 9: + goto tr729 } - goto st16 -tr143: -//line plugins/parsers/influx/machine.go.rl:20 + goto st15 +tr142: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p @@ -25587,78 +25479,58 @@ tr143: goto _test_eof646 } st_case_646: -//line plugins/parsers/influx/machine.go:25591 +//line plugins/parsers/influx/machine.go:25483 switch ( m.data)[( m.p)] { case 10: - goto tr734 + goto tr954 case 11: - goto tr735 + goto tr955 case 13: - goto tr736 + goto tr956 case 32: - goto tr731 + goto tr953 case 44: - goto tr737 - case 46: - goto st639 + goto tr957 case 61: - goto tr132 - case 69: - goto st196 - case 92: - goto st22 - case 101: - goto st196 - case 105: - goto st642 - case 117: - goto st645 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st646 - } - case ( m.data)[( m.p)] >= 9: - goto tr731 - } - goto st16 -tr144: -//line plugins/parsers/influx/machine.go.rl:20 - - m.pb = m.p - - goto st647 - st647: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof647 - } - st_case_647: -//line plugins/parsers/influx/machine.go:25638 - switch ( m.data)[( m.p)] { - case 10: - goto tr962 - case 11: - goto tr963 - case 13: - goto tr964 - case 32: - goto tr757 - case 44: - goto tr965 - case 61: - goto tr132 + goto tr130 case 65: - goto st198 + goto st197 case 92: - goto st22 + goto st21 case 97: - goto st201 + goto st200 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr757 + goto tr953 } - goto st16 + goto st15 + st197: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof197 + } + st_case_197: + switch ( m.data)[( m.p)] { + case 10: + goto tr45 + case 11: + goto tr59 + case 13: + goto tr45 + case 32: + goto tr58 + case 44: + goto tr60 + case 61: + goto tr45 + case 76: + goto st198 + case 92: + goto st21 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr58 + } + goto st15 st198: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof198 @@ -25666,26 +25538,26 @@ tr144: st_case_198: switch ( m.data)[( m.p)] { case 10: - goto tr47 + goto tr45 case 11: - goto tr61 + goto tr59 case 13: - goto tr47 + goto tr45 case 32: - goto tr60 + goto tr58 case 44: - goto tr62 + goto tr60 case 61: - goto tr47 - case 76: + goto tr45 + case 83: goto st199 case 92: - goto st22 + goto st21 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr60 + goto tr58 } - goto st16 + goto st15 st199: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof199 @@ -25693,26 +25565,51 @@ tr144: st_case_199: switch ( m.data)[( m.p)] { case 10: - goto tr47 + goto tr45 case 11: - goto tr61 + goto tr59 case 13: - goto tr47 + goto tr45 case 32: - goto tr60 + goto tr58 case 44: - goto tr62 + goto tr60 case 61: - goto tr47 - case 83: - goto st200 + goto tr45 + case 69: + goto st647 case 92: - goto st22 + goto st21 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr60 + goto tr58 } - goto st16 + goto st15 + st647: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof647 + } + st_case_647: + switch ( m.data)[( m.p)] { + case 10: + goto tr954 + case 11: + goto tr955 + case 13: + goto tr956 + case 32: + goto tr953 + case 44: + goto tr957 + case 61: + goto tr130 + case 92: + goto st21 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr953 + } + goto st15 st200: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof200 @@ -25720,51 +25617,26 @@ tr144: st_case_200: switch ( m.data)[( m.p)] { case 10: - goto tr47 + goto tr45 case 11: - goto tr61 + goto tr59 case 13: - goto tr47 + goto tr45 case 32: - goto tr60 + goto tr58 case 44: - goto tr62 + goto tr60 case 61: - goto tr47 - case 69: - goto st648 + goto tr45 case 92: - goto st22 + goto st21 + case 108: + goto st201 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr60 + goto tr58 } - goto st16 - st648: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof648 - } - st_case_648: - switch ( m.data)[( m.p)] { - case 10: - goto tr962 - case 11: - goto tr963 - case 13: - goto tr964 - case 32: - goto tr757 - case 44: - goto tr965 - case 61: - goto tr132 - case 92: - goto st22 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr757 - } - goto st16 + goto st15 st201: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof201 @@ -25772,26 +25644,26 @@ tr144: st_case_201: switch ( m.data)[( m.p)] { case 10: - goto tr47 + goto tr45 case 11: - goto tr61 + goto tr59 case 13: - goto tr47 + goto tr45 case 32: - goto tr60 + goto tr58 case 44: - goto tr62 + goto tr60 case 61: - goto tr47 + goto tr45 case 92: - goto st22 - case 108: + goto st21 + case 115: goto st202 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr60 + goto tr58 } - goto st16 + goto st15 st202: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof202 @@ -25799,26 +25671,62 @@ tr144: st_case_202: switch ( m.data)[( m.p)] { case 10: - goto tr47 + goto tr45 case 11: - goto tr61 + goto tr59 case 13: - goto tr47 + goto tr45 case 32: - goto tr60 + goto tr58 case 44: - goto tr62 + goto tr60 case 61: - goto tr47 + goto tr45 case 92: - goto st22 - case 115: - goto st203 + goto st21 + case 101: + goto st647 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr60 + goto tr58 } - goto st16 + goto st15 +tr143: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st648 + st648: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof648 + } + st_case_648: +//line plugins/parsers/influx/machine.go:25706 + switch ( m.data)[( m.p)] { + case 10: + goto tr954 + case 11: + goto tr955 + case 13: + goto tr956 + case 32: + goto tr953 + case 44: + goto tr957 + case 61: + goto tr130 + case 82: + goto st203 + case 92: + goto st21 + case 114: + goto st204 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr953 + } + goto st15 st203: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof203 @@ -25826,28 +25734,55 @@ tr144: st_case_203: switch ( m.data)[( m.p)] { case 10: - goto tr47 + goto tr45 case 11: - goto tr61 + goto tr59 case 13: - goto tr47 + goto tr45 case 32: - goto tr60 + goto tr58 case 44: - goto tr62 + goto tr60 case 61: - goto tr47 + goto tr45 + case 85: + goto st199 case 92: - goto st22 - case 101: - goto st648 + goto st21 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr60 + goto tr58 } - goto st16 -tr145: -//line plugins/parsers/influx/machine.go.rl:20 + goto st15 + st204: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof204 + } + st_case_204: + switch ( m.data)[( m.p)] { + case 10: + goto tr45 + case 11: + goto tr59 + case 13: + goto tr45 + case 32: + goto tr58 + case 44: + goto tr60 + case 61: + goto tr45 + case 92: + goto st21 + case 117: + goto st202 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr58 + } + goto st15 +tr144: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p @@ -25857,87 +25792,31 @@ tr145: goto _test_eof649 } st_case_649: -//line plugins/parsers/influx/machine.go:25861 +//line plugins/parsers/influx/machine.go:25796 switch ( m.data)[( m.p)] { case 10: - goto tr962 + goto tr954 case 11: - goto tr963 + goto tr955 case 13: - goto tr964 + goto tr956 case 32: - goto tr757 + goto tr953 case 44: - goto tr965 + goto tr957 case 61: - goto tr132 - case 82: - goto st204 + goto tr130 case 92: - goto st22 - case 114: - goto st205 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr757 - } - goto st16 - st204: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof204 - } - st_case_204: - switch ( m.data)[( m.p)] { - case 10: - goto tr47 - case 11: - goto tr61 - case 13: - goto tr47 - case 32: - goto tr60 - case 44: - goto tr62 - case 61: - goto tr47 - case 85: + goto st21 + case 97: goto st200 - case 92: - goto st22 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr60 + goto tr953 } - goto st16 - st205: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof205 - } - st_case_205: - switch ( m.data)[( m.p)] { - case 10: - goto tr47 - case 11: - goto tr61 - case 13: - goto tr47 - case 32: - goto tr60 - case 44: - goto tr62 - case 61: - goto tr47 - case 92: - goto st22 - case 117: - goto st203 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr60 - } - goto st16 -tr146: -//line plugins/parsers/influx/machine.go.rl:20 + goto st15 +tr145: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p @@ -25947,82 +25826,93 @@ tr146: goto _test_eof650 } st_case_650: -//line plugins/parsers/influx/machine.go:25951 +//line plugins/parsers/influx/machine.go:25830 switch ( m.data)[( m.p)] { case 10: - goto tr962 + goto tr954 case 11: - goto tr963 + goto tr955 case 13: - goto tr964 + goto tr956 case 32: - goto tr757 + goto tr953 case 44: - goto tr965 + goto tr957 case 61: - goto tr132 + goto tr130 case 92: - goto st22 - case 97: - goto st201 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr757 - } - goto st16 -tr147: -//line plugins/parsers/influx/machine.go.rl:20 - - m.pb = m.p - - goto st651 - st651: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof651 - } - st_case_651: -//line plugins/parsers/influx/machine.go:25985 - switch ( m.data)[( m.p)] { - case 10: - goto tr962 - case 11: - goto tr963 - case 13: - goto tr964 - case 32: - goto tr757 - case 44: - goto tr965 - case 61: - goto tr132 - case 92: - goto st22 + goto st21 case 114: - goto st205 + goto st204 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr757 + goto tr953 } - goto st16 -tr123: -//line plugins/parsers/influx/machine.go.rl:20 + goto st15 +tr121: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p - goto st206 -tr382: - ( m.cs) = 206 -//line plugins/parsers/influx/machine.go.rl:20 + goto st205 +tr380: + ( m.cs) = 205 +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p -//line plugins/parsers/influx/machine.go.rl:78 +//line plugins/parsers/influx/machine.go.rl:86 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again + st205: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof205 + } + st_case_205: +//line plugins/parsers/influx/machine.go:25881 + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 11: + goto tr380 + case 13: + goto st6 + case 32: + goto tr117 + case 34: + goto tr122 + case 44: + goto tr90 + case 61: + goto tr381 + case 92: + goto tr123 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr117 + } + goto tr119 +tr118: + ( m.cs) = 206 +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; {( m.p)++; goto _out } } @@ -26032,137 +25922,110 @@ tr382: goto _test_eof206 } st_case_206: -//line plugins/parsers/influx/machine.go:26036 +//line plugins/parsers/influx/machine.go:25926 switch ( m.data)[( m.p)] { - case 9: - goto tr119 case 10: - goto tr29 + goto tr28 case 11: - goto tr382 - case 12: - goto tr38 + goto tr380 case 13: - goto st7 + goto st6 case 32: - goto tr119 + goto tr117 case 34: - goto tr124 + goto tr122 case 44: - goto tr92 + goto tr90 case 61: - goto tr383 + goto tr80 case 92: - goto tr125 + goto tr123 } - goto tr121 -tr120: - ( m.cs) = 207 -//line plugins/parsers/influx/machine.go.rl:20 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr117 + } + goto tr119 +tr497: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p -//line plugins/parsers/influx/machine.go.rl:78 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again + goto st207 st207: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof207 } st_case_207: -//line plugins/parsers/influx/machine.go:26082 - switch ( m.data)[( m.p)] { - case 9: - goto tr119 - case 10: - goto tr29 - case 11: - goto tr382 - case 12: - goto tr38 - case 13: - goto st7 - case 32: - goto tr119 - case 34: - goto tr124 - case 44: - goto tr92 - case 61: - goto tr82 - case 92: - goto tr125 - } - goto tr121 -tr499: -//line plugins/parsers/influx/machine.go.rl:20 - - m.pb = m.p - - goto st208 - st208: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof208 - } - st_case_208: -//line plugins/parsers/influx/machine.go:26117 +//line plugins/parsers/influx/machine.go:25960 switch ( m.data)[( m.p)] { case 10: - goto tr29 - case 12: - goto tr105 - case 13: - goto st7 + goto tr28 case 34: - goto tr31 + goto tr29 case 92: - goto st75 + goto st73 } if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st652 + goto st651 } goto st6 -tr500: -//line plugins/parsers/influx/machine.go.rl:20 +tr498: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p - goto st652 + goto st651 + st651: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof651 + } + st_case_651: +//line plugins/parsers/influx/machine.go:25984 + switch ( m.data)[( m.p)] { + case 10: + goto tr600 + case 13: + goto tr602 + case 32: + goto tr599 + case 34: + goto tr29 + case 92: + goto st73 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st652 + } + case ( m.data)[( m.p)] >= 9: + goto tr599 + } + goto st6 st652: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof652 } st_case_652: -//line plugins/parsers/influx/machine.go:26145 switch ( m.data)[( m.p)] { case 10: - goto tr603 - case 12: - goto tr469 + goto tr600 case 13: - goto tr605 - case 32: goto tr602 + case 32: + goto tr599 case 34: - goto tr31 + goto tr29 case 92: - goto st75 + goto st73 } switch { - case ( m.data)[( m.p)] > 11: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st653 } case ( m.data)[( m.p)] >= 9: - goto tr602 + goto tr599 } goto st6 st653: @@ -26172,25 +26035,23 @@ tr500: st_case_653: switch ( m.data)[( m.p)] { case 10: - goto tr603 - case 12: - goto tr469 + goto tr600 case 13: - goto tr605 - case 32: goto tr602 + case 32: + goto tr599 case 34: - goto tr31 + goto tr29 case 92: - goto st75 + goto st73 } switch { - case ( m.data)[( m.p)] > 11: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st654 } case ( m.data)[( m.p)] >= 9: - goto tr602 + goto tr599 } goto st6 st654: @@ -26200,25 +26061,23 @@ tr500: st_case_654: switch ( m.data)[( m.p)] { case 10: - goto tr603 - case 12: - goto tr469 + goto tr600 case 13: - goto tr605 - case 32: goto tr602 + case 32: + goto tr599 case 34: - goto tr31 + goto tr29 case 92: - goto st75 + goto st73 } switch { - case ( m.data)[( m.p)] > 11: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st655 } case ( m.data)[( m.p)] >= 9: - goto tr602 + goto tr599 } goto st6 st655: @@ -26228,25 +26087,23 @@ tr500: st_case_655: switch ( m.data)[( m.p)] { case 10: - goto tr603 - case 12: - goto tr469 + goto tr600 case 13: - goto tr605 - case 32: goto tr602 + case 32: + goto tr599 case 34: - goto tr31 + goto tr29 case 92: - goto st75 + goto st73 } switch { - case ( m.data)[( m.p)] > 11: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st656 } case ( m.data)[( m.p)] >= 9: - goto tr602 + goto tr599 } goto st6 st656: @@ -26256,25 +26113,23 @@ tr500: st_case_656: switch ( m.data)[( m.p)] { case 10: - goto tr603 - case 12: - goto tr469 + goto tr600 case 13: - goto tr605 - case 32: goto tr602 + case 32: + goto tr599 case 34: - goto tr31 + goto tr29 case 92: - goto st75 + goto st73 } switch { - case ( m.data)[( m.p)] > 11: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st657 } case ( m.data)[( m.p)] >= 9: - goto tr602 + goto tr599 } goto st6 st657: @@ -26284,25 +26139,23 @@ tr500: st_case_657: switch ( m.data)[( m.p)] { case 10: - goto tr603 - case 12: - goto tr469 + goto tr600 case 13: - goto tr605 - case 32: goto tr602 + case 32: + goto tr599 case 34: - goto tr31 + goto tr29 case 92: - goto st75 + goto st73 } switch { - case ( m.data)[( m.p)] > 11: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st658 } case ( m.data)[( m.p)] >= 9: - goto tr602 + goto tr599 } goto st6 st658: @@ -26312,25 +26165,23 @@ tr500: st_case_658: switch ( m.data)[( m.p)] { case 10: - goto tr603 - case 12: - goto tr469 + goto tr600 case 13: - goto tr605 - case 32: goto tr602 + case 32: + goto tr599 case 34: - goto tr31 + goto tr29 case 92: - goto st75 + goto st73 } switch { - case ( m.data)[( m.p)] > 11: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st659 } case ( m.data)[( m.p)] >= 9: - goto tr602 + goto tr599 } goto st6 st659: @@ -26340,25 +26191,23 @@ tr500: st_case_659: switch ( m.data)[( m.p)] { case 10: - goto tr603 - case 12: - goto tr469 + goto tr600 case 13: - goto tr605 - case 32: goto tr602 + case 32: + goto tr599 case 34: - goto tr31 + goto tr29 case 92: - goto st75 + goto st73 } switch { - case ( m.data)[( m.p)] > 11: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st660 } case ( m.data)[( m.p)] >= 9: - goto tr602 + goto tr599 } goto st6 st660: @@ -26368,25 +26217,23 @@ tr500: st_case_660: switch ( m.data)[( m.p)] { case 10: - goto tr603 - case 12: - goto tr469 + goto tr600 case 13: - goto tr605 - case 32: goto tr602 + case 32: + goto tr599 case 34: - goto tr31 + goto tr29 case 92: - goto st75 + goto st73 } switch { - case ( m.data)[( m.p)] > 11: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st661 } case ( m.data)[( m.p)] >= 9: - goto tr602 + goto tr599 } goto st6 st661: @@ -26396,25 +26243,23 @@ tr500: st_case_661: switch ( m.data)[( m.p)] { case 10: - goto tr603 - case 12: - goto tr469 + goto tr600 case 13: - goto tr605 - case 32: goto tr602 + case 32: + goto tr599 case 34: - goto tr31 + goto tr29 case 92: - goto st75 + goto st73 } switch { - case ( m.data)[( m.p)] > 11: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st662 } case ( m.data)[( m.p)] >= 9: - goto tr602 + goto tr599 } goto st6 st662: @@ -26424,25 +26269,23 @@ tr500: st_case_662: switch ( m.data)[( m.p)] { case 10: - goto tr603 - case 12: - goto tr469 + goto tr600 case 13: - goto tr605 - case 32: goto tr602 + case 32: + goto tr599 case 34: - goto tr31 + goto tr29 case 92: - goto st75 + goto st73 } switch { - case ( m.data)[( m.p)] > 11: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st663 } case ( m.data)[( m.p)] >= 9: - goto tr602 + goto tr599 } goto st6 st663: @@ -26452,25 +26295,23 @@ tr500: st_case_663: switch ( m.data)[( m.p)] { case 10: - goto tr603 - case 12: - goto tr469 + goto tr600 case 13: - goto tr605 - case 32: goto tr602 + case 32: + goto tr599 case 34: - goto tr31 + goto tr29 case 92: - goto st75 + goto st73 } switch { - case ( m.data)[( m.p)] > 11: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st664 } case ( m.data)[( m.p)] >= 9: - goto tr602 + goto tr599 } goto st6 st664: @@ -26480,25 +26321,23 @@ tr500: st_case_664: switch ( m.data)[( m.p)] { case 10: - goto tr603 - case 12: - goto tr469 + goto tr600 case 13: - goto tr605 - case 32: goto tr602 + case 32: + goto tr599 case 34: - goto tr31 + goto tr29 case 92: - goto st75 + goto st73 } switch { - case ( m.data)[( m.p)] > 11: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st665 } case ( m.data)[( m.p)] >= 9: - goto tr602 + goto tr599 } goto st6 st665: @@ -26508,25 +26347,23 @@ tr500: st_case_665: switch ( m.data)[( m.p)] { case 10: - goto tr603 - case 12: - goto tr469 + goto tr600 case 13: - goto tr605 - case 32: goto tr602 + case 32: + goto tr599 case 34: - goto tr31 + goto tr29 case 92: - goto st75 + goto st73 } switch { - case ( m.data)[( m.p)] > 11: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st666 } case ( m.data)[( m.p)] >= 9: - goto tr602 + goto tr599 } goto st6 st666: @@ -26536,25 +26373,23 @@ tr500: st_case_666: switch ( m.data)[( m.p)] { case 10: - goto tr603 - case 12: - goto tr469 + goto tr600 case 13: - goto tr605 - case 32: goto tr602 + case 32: + goto tr599 case 34: - goto tr31 + goto tr29 case 92: - goto st75 + goto st73 } switch { - case ( m.data)[( m.p)] > 11: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st667 } case ( m.data)[( m.p)] >= 9: - goto tr602 + goto tr599 } goto st6 st667: @@ -26564,25 +26399,23 @@ tr500: st_case_667: switch ( m.data)[( m.p)] { case 10: - goto tr603 - case 12: - goto tr469 + goto tr600 case 13: - goto tr605 - case 32: goto tr602 + case 32: + goto tr599 case 34: - goto tr31 + goto tr29 case 92: - goto st75 + goto st73 } switch { - case ( m.data)[( m.p)] > 11: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st668 } case ( m.data)[( m.p)] >= 9: - goto tr602 + goto tr599 } goto st6 st668: @@ -26592,25 +26425,23 @@ tr500: st_case_668: switch ( m.data)[( m.p)] { case 10: - goto tr603 - case 12: - goto tr469 + goto tr600 case 13: - goto tr605 - case 32: goto tr602 + case 32: + goto tr599 case 34: - goto tr31 + goto tr29 case 92: - goto st75 + goto st73 } switch { - case ( m.data)[( m.p)] > 11: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st669 } case ( m.data)[( m.p)] >= 9: - goto tr602 + goto tr599 } goto st6 st669: @@ -26620,172 +26451,178 @@ tr500: st_case_669: switch ( m.data)[( m.p)] { case 10: - goto tr603 - case 12: - goto tr469 + goto tr600 case 13: - goto tr605 + goto tr602 case 32: - goto tr602 + goto tr599 case 34: - goto tr31 + goto tr29 case 92: - goto st75 + goto st73 } - switch { - case ( m.data)[( m.p)] > 11: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st670 - } - case ( m.data)[( m.p)] >= 9: - goto tr602 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr599 } goto st6 - st670: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof670 - } - st_case_670: - switch ( m.data)[( m.p)] { - case 10: - goto tr603 - case 12: - goto tr469 - case 13: - goto tr605 - case 32: - goto tr602 - case 34: - goto tr31 - case 92: - goto st75 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { - goto tr602 - } - goto st6 -tr496: -//line plugins/parsers/influx/machine.go.rl:20 +tr494: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p - goto st209 -tr989: - ( m.cs) = 209 -//line plugins/parsers/influx/machine.go.rl:122 + goto st208 +tr981: + ( m.cs) = 208 +//line plugins/parsers/influx/machine.go.rl:130 err = m.handler.AddFloat(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } goto _again -tr994: - ( m.cs) = 209 -//line plugins/parsers/influx/machine.go.rl:104 +tr986: + ( m.cs) = 208 +//line plugins/parsers/influx/machine.go.rl:112 err = m.handler.AddInt(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } goto _again -tr997: - ( m.cs) = 209 -//line plugins/parsers/influx/machine.go.rl:113 +tr989: + ( m.cs) = 208 +//line plugins/parsers/influx/machine.go.rl:121 err = m.handler.AddUint(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } goto _again -tr1000: - ( m.cs) = 209 -//line plugins/parsers/influx/machine.go.rl:131 +tr992: + ( m.cs) = 208 +//line plugins/parsers/influx/machine.go.rl:139 err = m.handler.AddBool(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } goto _again - st209: + st208: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof209 + goto _test_eof208 } - st_case_209: -//line plugins/parsers/influx/machine.go:26731 + st_case_208: +//line plugins/parsers/influx/machine.go:26532 switch ( m.data)[( m.p)] { case 9: goto st6 case 10: - goto tr29 - case 12: - goto tr8 - case 13: - goto st7 + goto tr28 case 32: goto st6 case 34: - goto tr386 + goto tr384 case 44: goto st6 case 61: goto st6 case 92: - goto tr387 + goto tr385 } - goto tr385 -tr385: -//line plugins/parsers/influx/machine.go.rl:20 + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto st6 + } + goto tr383 +tr383: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p + goto st209 + st209: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof209 + } + st_case_209: +//line plugins/parsers/influx/machine.go:26564 + switch ( m.data)[( m.p)] { + case 9: + goto st6 + case 10: + goto tr28 + case 32: + goto st6 + case 34: + goto tr98 + case 44: + goto st6 + case 61: + goto tr387 + case 92: + goto st223 + } + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto st6 + } + goto st209 +tr387: +//line plugins/parsers/influx/machine.go.rl:108 + + m.key = m.text() + goto st210 st210: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof210 } st_case_210: -//line plugins/parsers/influx/machine.go:26764 +//line plugins/parsers/influx/machine.go:26596 switch ( m.data)[( m.p)] { - case 9: - goto st6 case 10: - goto tr29 - case 12: - goto tr8 - case 13: - goto st7 - case 32: - goto st6 + goto tr28 case 34: - goto tr100 - case 44: - goto st6 - case 61: + goto tr351 + case 45: goto tr389 + case 46: + goto tr390 + case 48: + goto tr391 + case 70: + goto tr110 + case 84: + goto tr111 case 92: - goto st224 + goto st73 + case 102: + goto tr112 + case 116: + goto tr113 } - goto st210 + if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto tr392 + } + goto st6 tr389: -//line plugins/parsers/influx/machine.go.rl:100 +//line plugins/parsers/influx/machine.go.rl:28 - m.key = m.text() + m.pb = m.p goto st211 st211: @@ -26793,39 +26630,25 @@ tr389: goto _test_eof211 } st_case_211: -//line plugins/parsers/influx/machine.go:26797 +//line plugins/parsers/influx/machine.go:26634 switch ( m.data)[( m.p)] { case 10: - goto tr29 - case 12: - goto tr8 - case 13: - goto st7 + goto tr28 case 34: - goto tr353 - case 45: - goto tr391 + goto tr29 case 46: - goto tr392 + goto st212 case 48: - goto tr393 - case 70: - goto tr112 - case 84: - goto tr113 + goto st672 case 92: - goto st75 - case 102: - goto tr114 - case 116: - goto tr115 + goto st73 } if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr394 + goto st675 } goto st6 -tr391: -//line plugins/parsers/influx/machine.go.rl:20 +tr390: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p @@ -26835,50 +26658,84 @@ tr391: goto _test_eof212 } st_case_212: -//line plugins/parsers/influx/machine.go:26839 +//line plugins/parsers/influx/machine.go:26662 switch ( m.data)[( m.p)] { case 10: - goto tr29 - case 12: - goto tr8 - case 13: - goto st7 + goto tr28 case 34: - goto tr31 - case 46: - goto st213 - case 48: - goto st673 + goto tr29 case 92: - goto st75 + goto st73 } - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st676 + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st670 + } + goto st6 + st670: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof670 + } + st_case_670: + switch ( m.data)[( m.p)] { + case 10: + goto tr758 + case 13: + goto tr638 + case 32: + goto tr980 + case 34: + goto tr29 + case 44: + goto tr981 + case 69: + goto st213 + case 92: + goto st73 + case 101: + goto st213 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st670 + } + case ( m.data)[( m.p)] >= 9: + goto tr980 } goto st6 -tr392: -//line plugins/parsers/influx/machine.go.rl:20 - - m.pb = m.p - - goto st213 st213: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof213 } st_case_213: -//line plugins/parsers/influx/machine.go:26871 switch ( m.data)[( m.p)] { case 10: - goto tr29 - case 12: - goto tr8 - case 13: - goto st7 + goto tr28 case 34: - goto tr31 + goto tr356 + case 43: + goto st214 + case 45: + goto st214 case 92: - goto st75 + goto st73 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st671 + } + goto st6 + st214: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof214 + } + st_case_214: + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 34: + goto tr29 + case 92: + goto st73 } if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st671 @@ -26891,77 +26748,25 @@ tr392: st_case_671: switch ( m.data)[( m.p)] { case 10: - goto tr765 - case 12: - goto tr535 + goto tr758 case 13: - goto tr642 + goto tr638 case 32: - goto tr988 + goto tr980 case 34: - goto tr31 + goto tr29 case 44: - goto tr989 - case 69: - goto st214 + goto tr981 case 92: - goto st75 - case 101: - goto st214 + goto st73 } switch { - case ( m.data)[( m.p)] > 11: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st671 } case ( m.data)[( m.p)] >= 9: - goto tr988 - } - goto st6 - st214: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof214 - } - st_case_214: - switch ( m.data)[( m.p)] { - case 10: - goto tr29 - case 12: - goto tr8 - case 13: - goto st7 - case 34: - goto tr358 - case 43: - goto st215 - case 45: - goto st215 - case 92: - goto st75 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st672 - } - goto st6 - st215: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof215 - } - st_case_215: - switch ( m.data)[( m.p)] { - case 10: - goto tr29 - case 12: - goto tr8 - case 13: - goto st7 - case 34: - goto tr31 - case 92: - goto st75 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st672 + goto tr980 } goto st6 st672: @@ -26971,27 +26776,33 @@ tr392: st_case_672: switch ( m.data)[( m.p)] { case 10: - goto tr765 - case 12: - goto tr535 + goto tr758 case 13: - goto tr642 + goto tr638 case 32: - goto tr988 + goto tr980 case 34: - goto tr31 + goto tr29 case 44: - goto tr989 + goto tr981 + case 46: + goto st670 + case 69: + goto st213 case 92: - goto st75 + goto st73 + case 101: + goto st213 + case 105: + goto st674 } switch { - case ( m.data)[( m.p)] > 11: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st672 + goto st673 } case ( m.data)[( m.p)] >= 9: - goto tr988 + goto tr980 } goto st6 st673: @@ -27001,35 +26812,31 @@ tr392: st_case_673: switch ( m.data)[( m.p)] { case 10: - goto tr765 - case 12: - goto tr535 + goto tr758 case 13: - goto tr642 + goto tr638 case 32: - goto tr988 + goto tr980 case 34: - goto tr31 + goto tr29 case 44: - goto tr989 + goto tr981 case 46: - goto st671 + goto st670 case 69: - goto st214 + goto st213 case 92: - goto st75 + goto st73 case 101: - goto st214 - case 105: - goto st675 + goto st213 } switch { - case ( m.data)[( m.p)] > 11: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st674 + goto st673 } case ( m.data)[( m.p)] >= 9: - goto tr988 + goto tr980 } goto st6 st674: @@ -27039,33 +26846,20 @@ tr392: st_case_674: switch ( m.data)[( m.p)] { case 10: - goto tr765 - case 12: - goto tr535 + goto tr791 case 13: - goto tr642 + goto tr793 case 32: - goto tr988 + goto tr985 case 34: - goto tr31 + goto tr29 case 44: - goto tr989 - case 46: - goto st671 - case 69: - goto st214 + goto tr986 case 92: - goto st75 - case 101: - goto st214 + goto st73 } - switch { - case ( m.data)[( m.p)] > 11: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st674 - } - case ( m.data)[( m.p)] >= 9: - goto tr988 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr985 } goto st6 st675: @@ -27075,136 +26869,150 @@ tr392: st_case_675: switch ( m.data)[( m.p)] { case 10: - goto tr798 - case 12: - goto tr932 + goto tr758 case 13: - goto tr800 + goto tr638 case 32: - goto tr993 + goto tr980 case 34: - goto tr31 + goto tr29 case 44: - goto tr994 + goto tr981 + case 46: + goto st670 + case 69: + goto st213 case 92: - goto st75 + goto st73 + case 101: + goto st213 + case 105: + goto st674 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { - goto tr993 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st675 + } + case ( m.data)[( m.p)] >= 9: + goto tr980 } goto st6 +tr391: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st676 st676: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof676 } st_case_676: +//line plugins/parsers/influx/machine.go:26913 switch ( m.data)[( m.p)] { case 10: - goto tr765 - case 12: - goto tr535 + goto tr758 case 13: - goto tr642 + goto tr638 case 32: - goto tr988 + goto tr980 case 34: - goto tr31 + goto tr29 case 44: - goto tr989 + goto tr981 case 46: - goto st671 + goto st670 case 69: - goto st214 + goto st213 case 92: - goto st75 + goto st73 case 101: - goto st214 + goto st213 case 105: - goto st675 + goto st674 + case 117: + goto st677 } switch { - case ( m.data)[( m.p)] > 11: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st676 + goto st673 } case ( m.data)[( m.p)] >= 9: - goto tr988 + goto tr980 } goto st6 -tr393: -//line plugins/parsers/influx/machine.go.rl:20 - - m.pb = m.p - - goto st677 st677: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof677 } st_case_677: -//line plugins/parsers/influx/machine.go:27146 switch ( m.data)[( m.p)] { case 10: - goto tr765 - case 12: - goto tr535 + goto tr797 case 13: - goto tr642 + goto tr799 case 32: goto tr988 case 34: - goto tr31 + goto tr29 case 44: goto tr989 - case 46: - goto st671 - case 69: - goto st214 case 92: - goto st75 - case 101: - goto st214 - case 105: - goto st675 - case 117: - goto st678 + goto st73 } - switch { - case ( m.data)[( m.p)] > 11: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st674 - } - case ( m.data)[( m.p)] >= 9: + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { goto tr988 } goto st6 +tr392: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st678 st678: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof678 } st_case_678: +//line plugins/parsers/influx/machine.go:26981 switch ( m.data)[( m.p)] { case 10: - goto tr804 - case 12: - goto tr935 + goto tr758 case 13: - goto tr806 + goto tr638 case 32: - goto tr996 + goto tr980 case 34: - goto tr31 + goto tr29 case 44: - goto tr997 + goto tr981 + case 46: + goto st670 + case 69: + goto st213 case 92: - goto st75 + goto st73 + case 101: + goto st213 + case 105: + goto st674 + case 117: + goto st677 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { - goto tr996 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st678 + } + case ( m.data)[( m.p)] >= 9: + goto tr980 } goto st6 -tr394: -//line plugins/parsers/influx/machine.go.rl:20 +tr110: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p @@ -27214,76 +27022,43 @@ tr394: goto _test_eof679 } st_case_679: -//line plugins/parsers/influx/machine.go:27218 +//line plugins/parsers/influx/machine.go:27026 switch ( m.data)[( m.p)] { case 10: - goto tr765 - case 12: - goto tr535 + goto tr803 case 13: - goto tr642 + goto tr805 case 32: - goto tr988 + goto tr991 case 34: - goto tr31 + goto tr29 case 44: - goto tr989 - case 46: - goto st671 - case 69: - goto st214 + goto tr992 + case 65: + goto st215 case 92: - goto st75 - case 101: - goto st214 - case 105: - goto st675 - case 117: - goto st678 + goto st73 + case 97: + goto st218 } - switch { - case ( m.data)[( m.p)] > 11: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st679 - } - case ( m.data)[( m.p)] >= 9: - goto tr988 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr991 } goto st6 -tr112: -//line plugins/parsers/influx/machine.go.rl:20 - - m.pb = m.p - - goto st680 - st680: + st215: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof680 + goto _test_eof215 } - st_case_680: -//line plugins/parsers/influx/machine.go:27265 + st_case_215: switch ( m.data)[( m.p)] { case 10: - goto tr810 - case 12: - goto tr939 - case 13: - goto tr812 - case 32: - goto tr999 + goto tr28 case 34: - goto tr31 - case 44: - goto tr1000 - case 65: + goto tr29 + case 76: goto st216 case 92: - goto st75 - case 97: - goto st219 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { - goto tr999 + goto st73 } goto st6 st216: @@ -27293,17 +27068,13 @@ tr112: st_case_216: switch ( m.data)[( m.p)] { case 10: - goto tr29 - case 12: - goto tr8 - case 13: - goto st7 + goto tr28 case 34: - goto tr31 - case 76: + goto tr29 + case 83: goto st217 case 92: - goto st75 + goto st73 } goto st6 st217: @@ -27313,17 +27084,36 @@ tr112: st_case_217: switch ( m.data)[( m.p)] { case 10: - goto tr29 - case 12: - goto tr8 - case 13: - goto st7 + goto tr28 case 34: - goto tr31 - case 83: - goto st218 + goto tr29 + case 69: + goto st680 case 92: - goto st75 + goto st73 + } + goto st6 + st680: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof680 + } + st_case_680: + switch ( m.data)[( m.p)] { + case 10: + goto tr803 + case 13: + goto tr805 + case 32: + goto tr991 + case 34: + goto tr29 + case 44: + goto tr992 + case 92: + goto st73 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr991 } goto st6 st218: @@ -27333,42 +27123,13 @@ tr112: st_case_218: switch ( m.data)[( m.p)] { case 10: + goto tr28 + case 34: goto tr29 - case 12: - goto tr8 - case 13: - goto st7 - case 34: - goto tr31 - case 69: - goto st681 case 92: - goto st75 - } - goto st6 - st681: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof681 - } - st_case_681: - switch ( m.data)[( m.p)] { - case 10: - goto tr810 - case 12: - goto tr939 - case 13: - goto tr812 - case 32: - goto tr999 - case 34: - goto tr31 - case 44: - goto tr1000 - case 92: - goto st75 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { - goto tr999 + goto st73 + case 108: + goto st219 } goto st6 st219: @@ -27378,16 +27139,12 @@ tr112: st_case_219: switch ( m.data)[( m.p)] { case 10: - goto tr29 - case 12: - goto tr8 - case 13: - goto st7 + goto tr28 case 34: - goto tr31 + goto tr29 case 92: - goto st75 - case 108: + goto st73 + case 115: goto st220 } goto st6 @@ -27398,17 +27155,47 @@ tr112: st_case_220: switch ( m.data)[( m.p)] { case 10: - goto tr29 - case 12: - goto tr8 - case 13: - goto st7 + goto tr28 case 34: - goto tr31 + goto tr29 case 92: - goto st75 - case 115: + goto st73 + case 101: + goto st680 + } + goto st6 +tr111: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st681 + st681: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof681 + } + st_case_681: +//line plugins/parsers/influx/machine.go:27179 + switch ( m.data)[( m.p)] { + case 10: + goto tr803 + case 13: + goto tr805 + case 32: + goto tr991 + case 34: + goto tr29 + case 44: + goto tr992 + case 82: goto st221 + case 92: + goto st73 + case 114: + goto st222 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr991 } goto st6 st221: @@ -27418,53 +27205,13 @@ tr112: st_case_221: switch ( m.data)[( m.p)] { case 10: + goto tr28 + case 34: goto tr29 - case 12: - goto tr8 - case 13: - goto st7 - case 34: - goto tr31 + case 85: + goto st217 case 92: - goto st75 - case 101: - goto st681 - } - goto st6 -tr113: -//line plugins/parsers/influx/machine.go.rl:20 - - m.pb = m.p - - goto st682 - st682: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof682 - } - st_case_682: -//line plugins/parsers/influx/machine.go:27446 - switch ( m.data)[( m.p)] { - case 10: - goto tr810 - case 12: - goto tr939 - case 13: - goto tr812 - case 32: - goto tr999 - case 34: - goto tr31 - case 44: - goto tr1000 - case 82: - goto st222 - case 92: - goto st75 - case 114: - goto st223 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { - goto tr999 + goto st73 } goto st6 st222: @@ -27474,41 +27221,49 @@ tr113: st_case_222: switch ( m.data)[( m.p)] { case 10: - goto tr29 - case 12: - goto tr8 - case 13: - goto st7 + goto tr28 case 34: - goto tr31 - case 85: - goto st218 + goto tr29 case 92: - goto st75 + goto st73 + case 117: + goto st220 } goto st6 - st223: +tr112: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st682 + st682: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof223 + goto _test_eof682 } - st_case_223: + st_case_682: +//line plugins/parsers/influx/machine.go:27245 switch ( m.data)[( m.p)] { case 10: - goto tr29 - case 12: - goto tr8 + goto tr803 case 13: - goto st7 + goto tr805 + case 32: + goto tr991 case 34: - goto tr31 + goto tr29 + case 44: + goto tr992 case 92: - goto st75 - case 117: - goto st221 + goto st73 + case 97: + goto st218 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr991 } goto st6 -tr114: -//line plugins/parsers/influx/machine.go.rl:20 +tr113: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p @@ -27518,80 +27273,44 @@ tr114: goto _test_eof683 } st_case_683: -//line plugins/parsers/influx/machine.go:27522 +//line plugins/parsers/influx/machine.go:27277 switch ( m.data)[( m.p)] { case 10: - goto tr810 - case 12: - goto tr939 + goto tr803 case 13: - goto tr812 + goto tr805 case 32: - goto tr999 + goto tr991 case 34: - goto tr31 + goto tr29 case 44: - goto tr1000 + goto tr992 case 92: - goto st75 - case 97: - goto st219 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { - goto tr999 - } - goto st6 -tr115: -//line plugins/parsers/influx/machine.go.rl:20 - - m.pb = m.p - - goto st684 - st684: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof684 - } - st_case_684: -//line plugins/parsers/influx/machine.go:27556 - switch ( m.data)[( m.p)] { - case 10: - goto tr810 - case 12: - goto tr939 - case 13: - goto tr812 - case 32: - goto tr999 - case 34: - goto tr31 - case 44: - goto tr1000 - case 92: - goto st75 + goto st73 case 114: - goto st223 + goto st222 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { - goto tr999 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr991 } goto st6 -tr387: -//line plugins/parsers/influx/machine.go.rl:20 +tr385: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p - goto st224 - st224: + goto st223 + st223: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof224 + goto _test_eof223 } - st_case_224: -//line plugins/parsers/influx/machine.go:27590 + st_case_223: +//line plugins/parsers/influx/machine.go:27309 switch ( m.data)[( m.p)] { case 34: - goto st210 + goto st209 case 92: - goto st210 + goto st209 } switch { case ( m.data)[( m.p)] > 10: @@ -27602,8 +27321,36 @@ tr387: goto tr8 } goto st3 -tr108: -//line plugins/parsers/influx/machine.go.rl:20 +tr106: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st224 + st224: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof224 + } + st_case_224: +//line plugins/parsers/influx/machine.go:27336 + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 34: + goto tr29 + case 46: + goto st225 + case 48: + goto st686 + case 92: + goto st73 + } + if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st689 + } + goto st6 +tr107: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p @@ -27613,50 +27360,84 @@ tr108: goto _test_eof225 } st_case_225: -//line plugins/parsers/influx/machine.go:27617 +//line plugins/parsers/influx/machine.go:27364 switch ( m.data)[( m.p)] { case 10: - goto tr29 - case 12: - goto tr8 - case 13: - goto st7 + goto tr28 case 34: - goto tr31 - case 46: - goto st226 - case 48: - goto st687 + goto tr29 case 92: - goto st75 + goto st73 } - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st690 + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st684 + } + goto st6 + st684: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof684 + } + st_case_684: + switch ( m.data)[( m.p)] { + case 10: + goto tr636 + case 13: + goto tr638 + case 32: + goto tr980 + case 34: + goto tr29 + case 44: + goto tr981 + case 69: + goto st226 + case 92: + goto st73 + case 101: + goto st226 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st684 + } + case ( m.data)[( m.p)] >= 9: + goto tr980 } goto st6 -tr109: -//line plugins/parsers/influx/machine.go.rl:20 - - m.pb = m.p - - goto st226 st226: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof226 } st_case_226: -//line plugins/parsers/influx/machine.go:27649 switch ( m.data)[( m.p)] { case 10: - goto tr29 - case 12: - goto tr8 - case 13: - goto st7 + goto tr28 case 34: - goto tr31 + goto tr356 + case 43: + goto st227 + case 45: + goto st227 case 92: - goto st75 + goto st73 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st685 + } + goto st6 + st227: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof227 + } + st_case_227: + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 34: + goto tr29 + case 92: + goto st73 } if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st685 @@ -27669,77 +27450,25 @@ tr109: st_case_685: switch ( m.data)[( m.p)] { case 10: - goto tr639 - case 12: - goto tr535 + goto tr636 case 13: - goto tr642 + goto tr638 case 32: - goto tr988 + goto tr980 case 34: - goto tr31 + goto tr29 case 44: - goto tr989 - case 69: - goto st227 + goto tr981 case 92: - goto st75 - case 101: - goto st227 + goto st73 } switch { - case ( m.data)[( m.p)] > 11: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st685 } case ( m.data)[( m.p)] >= 9: - goto tr988 - } - goto st6 - st227: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof227 - } - st_case_227: - switch ( m.data)[( m.p)] { - case 10: - goto tr29 - case 12: - goto tr8 - case 13: - goto st7 - case 34: - goto tr358 - case 43: - goto st228 - case 45: - goto st228 - case 92: - goto st75 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st686 - } - goto st6 - st228: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof228 - } - st_case_228: - switch ( m.data)[( m.p)] { - case 10: - goto tr29 - case 12: - goto tr8 - case 13: - goto st7 - case 34: - goto tr31 - case 92: - goto st75 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st686 + goto tr980 } goto st6 st686: @@ -27749,27 +27478,33 @@ tr109: st_case_686: switch ( m.data)[( m.p)] { case 10: - goto tr639 - case 12: - goto tr535 + goto tr636 case 13: - goto tr642 + goto tr638 case 32: - goto tr988 + goto tr980 case 34: - goto tr31 + goto tr29 case 44: - goto tr989 + goto tr981 + case 46: + goto st684 + case 69: + goto st226 case 92: - goto st75 + goto st73 + case 101: + goto st226 + case 105: + goto st688 } switch { - case ( m.data)[( m.p)] > 11: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st686 + goto st687 } case ( m.data)[( m.p)] >= 9: - goto tr988 + goto tr980 } goto st6 st687: @@ -27779,35 +27514,31 @@ tr109: st_case_687: switch ( m.data)[( m.p)] { case 10: - goto tr639 - case 12: - goto tr535 + goto tr636 case 13: - goto tr642 + goto tr638 case 32: - goto tr988 + goto tr980 case 34: - goto tr31 + goto tr29 case 44: - goto tr989 + goto tr981 case 46: - goto st685 + goto st684 case 69: - goto st227 + goto st226 case 92: - goto st75 + goto st73 case 101: - goto st227 - case 105: - goto st689 + goto st226 } switch { - case ( m.data)[( m.p)] > 11: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st688 + goto st687 } case ( m.data)[( m.p)] >= 9: - goto tr988 + goto tr980 } goto st6 st688: @@ -27817,33 +27548,20 @@ tr109: st_case_688: switch ( m.data)[( m.p)] { case 10: - goto tr639 - case 12: - goto tr535 + goto tr817 case 13: - goto tr642 + goto tr793 case 32: - goto tr988 + goto tr985 case 34: - goto tr31 + goto tr29 case 44: - goto tr989 - case 46: - goto st685 - case 69: - goto st227 + goto tr986 case 92: - goto st75 - case 101: - goto st227 + goto st73 } - switch { - case ( m.data)[( m.p)] > 11: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st688 - } - case ( m.data)[( m.p)] >= 9: - goto tr988 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr985 } goto st6 st689: @@ -27853,183 +27571,184 @@ tr109: st_case_689: switch ( m.data)[( m.p)] { case 10: - goto tr823 - case 12: - goto tr932 + goto tr636 case 13: - goto tr800 + goto tr638 case 32: - goto tr993 + goto tr980 case 34: - goto tr31 + goto tr29 case 44: - goto tr994 + goto tr981 + case 46: + goto st684 + case 69: + goto st226 case 92: - goto st75 + goto st73 + case 101: + goto st226 + case 105: + goto st688 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { - goto tr993 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st689 + } + case ( m.data)[( m.p)] >= 9: + goto tr980 } goto st6 +tr108: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st690 st690: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof690 } st_case_690: +//line plugins/parsers/influx/machine.go:27615 switch ( m.data)[( m.p)] { case 10: - goto tr639 - case 12: - goto tr535 + goto tr636 case 13: - goto tr642 + goto tr638 case 32: - goto tr988 + goto tr980 case 34: - goto tr31 + goto tr29 case 44: - goto tr989 + goto tr981 case 46: - goto st685 + goto st684 case 69: - goto st227 + goto st226 case 92: - goto st75 + goto st73 case 101: - goto st227 + goto st226 case 105: - goto st689 + goto st688 + case 117: + goto st691 } switch { - case ( m.data)[( m.p)] > 11: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st690 + goto st687 } case ( m.data)[( m.p)] >= 9: - goto tr988 + goto tr980 } goto st6 -tr110: -//line plugins/parsers/influx/machine.go.rl:20 - - m.pb = m.p - - goto st691 st691: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof691 } st_case_691: -//line plugins/parsers/influx/machine.go:27924 switch ( m.data)[( m.p)] { case 10: - goto tr639 - case 12: - goto tr535 + goto tr822 case 13: - goto tr642 + goto tr799 case 32: goto tr988 case 34: - goto tr31 + goto tr29 case 44: goto tr989 - case 46: - goto st685 - case 69: - goto st227 case 92: - goto st75 - case 101: - goto st227 - case 105: - goto st689 - case 117: - goto st692 + goto st73 } - switch { - case ( m.data)[( m.p)] > 11: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st688 - } - case ( m.data)[( m.p)] >= 9: + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { goto tr988 } goto st6 +tr109: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st692 st692: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof692 } st_case_692: +//line plugins/parsers/influx/machine.go:27683 switch ( m.data)[( m.p)] { case 10: - goto tr829 - case 12: - goto tr935 + goto tr636 case 13: - goto tr806 + goto tr638 case 32: - goto tr996 + goto tr980 case 34: - goto tr31 + goto tr29 case 44: - goto tr997 + goto tr981 + case 46: + goto st684 + case 69: + goto st226 case 92: - goto st75 + goto st73 + case 101: + goto st226 + case 105: + goto st688 + case 117: + goto st691 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { - goto tr996 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st692 + } + case ( m.data)[( m.p)] >= 9: + goto tr980 } goto st6 -tr111: -//line plugins/parsers/influx/machine.go.rl:20 +tr94: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p - goto st693 - st693: + goto st228 + st228: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof693 + goto _test_eof228 } - st_case_693: -//line plugins/parsers/influx/machine.go:27996 + st_case_228: +//line plugins/parsers/influx/machine.go:27728 switch ( m.data)[( m.p)] { case 10: - goto tr639 - case 12: - goto tr535 + goto tr28 + case 11: + goto tr94 case 13: - goto tr642 + goto st6 case 32: - goto tr988 + goto st30 case 34: - goto tr31 + goto tr95 case 44: - goto tr989 - case 46: - goto st685 - case 69: - goto st227 + goto st6 + case 61: + goto tr99 case 92: - goto st75 - case 101: - goto st227 - case 105: - goto st689 - case 117: - goto st692 + goto tr96 } - switch { - case ( m.data)[( m.p)] > 11: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st693 - } - case ( m.data)[( m.p)] >= 9: - goto tr988 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto st30 } - goto st6 -tr96: -//line plugins/parsers/influx/machine.go.rl:20 + goto tr92 +tr72: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p @@ -28039,32 +27758,36 @@ tr96: goto _test_eof229 } st_case_229: -//line plugins/parsers/influx/machine.go:28043 +//line plugins/parsers/influx/machine.go:27762 switch ( m.data)[( m.p)] { - case 9: - goto st31 case 10: - goto tr29 + goto tr45 case 11: - goto tr96 - case 12: - goto st2 + goto tr3 case 13: - goto st7 + goto tr45 case 32: - goto st31 - case 34: - goto tr97 + goto tr1 case 44: - goto st6 - case 61: - goto tr101 + goto tr4 + case 46: + goto st230 + case 48: + goto st694 case 92: - goto tr98 + goto st94 } - goto tr94 -tr74: -//line plugins/parsers/influx/machine.go.rl:20 + switch { + case ( m.data)[( m.p)] > 12: + if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st697 + } + case ( m.data)[( m.p)] >= 9: + goto tr1 + } + goto st1 +tr73: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p @@ -28074,64 +27797,119 @@ tr74: goto _test_eof230 } st_case_230: -//line plugins/parsers/influx/machine.go:28078 +//line plugins/parsers/influx/machine.go:27801 switch ( m.data)[( m.p)] { case 10: - goto tr47 + goto tr45 case 11: goto tr3 case 13: - goto tr47 + goto tr45 case 32: goto tr1 case 44: goto tr4 - case 46: - goto st231 - case 48: - goto st695 case 92: - goto st95 + goto st94 } switch { case ( m.data)[( m.p)] > 12: - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st698 + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st693 } case ( m.data)[( m.p)] >= 9: goto tr1 } goto st1 -tr75: -//line plugins/parsers/influx/machine.go.rl:20 - - m.pb = m.p - - goto st231 + st693: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof693 + } + st_case_693: + switch ( m.data)[( m.p)] { + case 10: + goto tr730 + case 11: + goto tr812 + case 13: + goto tr732 + case 32: + goto tr811 + case 44: + goto tr813 + case 69: + goto st231 + case 92: + goto st94 + case 101: + goto st231 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st693 + } + case ( m.data)[( m.p)] >= 9: + goto tr811 + } + goto st1 st231: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof231 } st_case_231: -//line plugins/parsers/influx/machine.go:28117 switch ( m.data)[( m.p)] { case 10: - goto tr47 + goto tr45 case 11: goto tr3 case 13: - goto tr47 + goto tr45 + case 32: + goto tr1 + case 34: + goto st232 + case 44: + goto tr4 + case 92: + goto st94 + } + switch { + case ( m.data)[( m.p)] < 43: + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr1 + } + case ( m.data)[( m.p)] > 45: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st530 + } + default: + goto st232 + } + goto st1 + st232: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof232 + } + st_case_232: + switch ( m.data)[( m.p)] { + case 10: + goto tr45 + case 11: + goto tr3 + case 13: + goto tr45 case 32: goto tr1 case 44: goto tr4 case 92: - goto st95 + goto st94 } switch { case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st694 + goto st530 } case ( m.data)[( m.p)] >= 9: goto tr1 @@ -28144,91 +27922,33 @@ tr75: st_case_694: switch ( m.data)[( m.p)] { case 10: - goto tr734 + goto tr730 case 11: - goto tr818 + goto tr812 case 13: - goto tr736 + goto tr732 case 32: - goto tr641 + goto tr811 case 44: - goto tr819 + goto tr813 + case 46: + goto st693 case 69: - goto st232 + goto st231 case 92: - goto st95 + goto st94 case 101: - goto st232 + goto st231 + case 105: + goto st696 } switch { case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st694 + goto st695 } case ( m.data)[( m.p)] >= 9: - goto tr641 - } - goto st1 - st232: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof232 - } - st_case_232: - switch ( m.data)[( m.p)] { - case 10: - goto tr47 - case 11: - goto tr3 - case 13: - goto tr47 - case 32: - goto tr1 - case 34: - goto st233 - case 44: - goto tr4 - case 92: - goto st95 - } - switch { - case ( m.data)[( m.p)] < 43: - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr1 - } - case ( m.data)[( m.p)] > 45: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st531 - } - default: - goto st233 - } - goto st1 - st233: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof233 - } - st_case_233: - switch ( m.data)[( m.p)] { - case 10: - goto tr47 - case 11: - goto tr3 - case 13: - goto tr47 - case 32: - goto tr1 - case 44: - goto tr4 - case 92: - goto st95 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st531 - } - case ( m.data)[( m.p)] >= 9: - goto tr1 + goto tr811 } goto st1 st695: @@ -28238,33 +27958,31 @@ tr75: st_case_695: switch ( m.data)[( m.p)] { case 10: - goto tr734 + goto tr730 case 11: - goto tr818 + goto tr812 case 13: - goto tr736 + goto tr732 case 32: - goto tr641 + goto tr811 case 44: - goto tr819 + goto tr813 case 46: - goto st694 + goto st693 case 69: - goto st232 + goto st231 case 92: - goto st95 + goto st94 case 101: - goto st232 - case 105: - goto st697 + goto st231 } switch { case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st696 + goto st695 } case ( m.data)[( m.p)] >= 9: - goto tr641 + goto tr811 } goto st1 st696: @@ -28274,31 +27992,20 @@ tr75: st_case_696: switch ( m.data)[( m.p)] { case 10: - goto tr734 + goto tr942 case 11: - goto tr818 + goto tr1006 case 13: - goto tr736 + goto tr944 case 32: - goto tr641 + goto tr1005 case 44: - goto tr819 - case 46: - goto st694 - case 69: - goto st232 + goto tr1007 case 92: - goto st95 - case 101: - goto st232 + goto st94 } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st696 - } - case ( m.data)[( m.p)] >= 9: - goto tr641 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr1005 } goto st1 st697: @@ -28308,128 +28015,150 @@ tr75: st_case_697: switch ( m.data)[( m.p)] { case 10: - goto tr952 + goto tr730 case 11: - goto tr1013 + goto tr812 case 13: - goto tr954 + goto tr732 case 32: - goto tr825 + goto tr811 case 44: - goto tr1014 + goto tr813 + case 46: + goto st693 + case 69: + goto st231 case 92: - goto st95 + goto st94 + case 101: + goto st231 + case 105: + goto st696 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr825 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st697 + } + case ( m.data)[( m.p)] >= 9: + goto tr811 } goto st1 +tr74: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st698 st698: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof698 } st_case_698: +//line plugins/parsers/influx/machine.go:28059 switch ( m.data)[( m.p)] { case 10: - goto tr734 + goto tr730 case 11: - goto tr818 + goto tr812 case 13: - goto tr736 + goto tr732 case 32: - goto tr641 + goto tr811 case 44: - goto tr819 + goto tr813 case 46: - goto st694 + goto st693 case 69: - goto st232 + goto st231 case 92: - goto st95 + goto st94 case 101: - goto st232 + goto st231 case 105: - goto st697 + goto st696 + case 117: + goto st699 } switch { case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st698 + goto st695 } case ( m.data)[( m.p)] >= 9: - goto tr641 + goto tr811 } goto st1 -tr76: -//line plugins/parsers/influx/machine.go.rl:20 - - m.pb = m.p - - goto st699 st699: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof699 } st_case_699: -//line plugins/parsers/influx/machine.go:28375 switch ( m.data)[( m.p)] { case 10: - goto tr734 + goto tr948 case 11: - goto tr818 + goto tr1010 case 13: - goto tr736 + goto tr950 case 32: - goto tr641 + goto tr1009 case 44: - goto tr819 - case 46: - goto st694 - case 69: - goto st232 + goto tr1011 case 92: - goto st95 - case 101: - goto st232 - case 105: - goto st697 - case 117: - goto st700 + goto st94 } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st696 - } - case ( m.data)[( m.p)] >= 9: - goto tr641 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr1009 } goto st1 +tr75: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st700 st700: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof700 } st_case_700: +//line plugins/parsers/influx/machine.go:28127 switch ( m.data)[( m.p)] { case 10: - goto tr957 + goto tr730 case 11: - goto tr1016 + goto tr812 case 13: - goto tr959 + goto tr732 case 32: - goto tr831 + goto tr811 case 44: - goto tr1017 + goto tr813 + case 46: + goto st693 + case 69: + goto st231 case 92: - goto st95 + goto st94 + case 101: + goto st231 + case 105: + goto st696 + case 117: + goto st699 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr831 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st700 + } + case ( m.data)[( m.p)] >= 9: + goto tr811 } goto st1 -tr77: -//line plugins/parsers/influx/machine.go.rl:20 +tr76: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p @@ -28439,72 +28168,52 @@ tr77: goto _test_eof701 } st_case_701: -//line plugins/parsers/influx/machine.go:28443 +//line plugins/parsers/influx/machine.go:28172 switch ( m.data)[( m.p)] { case 10: - goto tr734 + goto tr954 case 11: - goto tr818 + goto tr1014 case 13: - goto tr736 + goto tr956 case 32: - goto tr641 + goto tr1013 case 44: - goto tr819 - case 46: - goto st694 - case 69: - goto st232 - case 92: - goto st95 - case 101: - goto st232 - case 105: - goto st697 - case 117: - goto st700 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st701 - } - case ( m.data)[( m.p)] >= 9: - goto tr641 - } - goto st1 -tr78: -//line plugins/parsers/influx/machine.go.rl:20 - - m.pb = m.p - - goto st702 - st702: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof702 - } - st_case_702: -//line plugins/parsers/influx/machine.go:28488 - switch ( m.data)[( m.p)] { - case 10: - goto tr962 - case 11: - goto tr1019 - case 13: - goto tr964 - case 32: - goto tr836 - case 44: - goto tr1020 + goto tr1015 case 65: - goto st234 + goto st233 case 92: - goto st95 + goto st94 case 97: - goto st237 + goto st236 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr836 + goto tr1013 + } + goto st1 + st233: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof233 + } + st_case_233: + switch ( m.data)[( m.p)] { + case 10: + goto tr45 + case 11: + goto tr3 + case 13: + goto tr45 + case 32: + goto tr1 + case 44: + goto tr4 + case 76: + goto st234 + case 92: + goto st94 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr1 } goto st1 st234: @@ -28514,19 +28223,19 @@ tr78: st_case_234: switch ( m.data)[( m.p)] { case 10: - goto tr47 + goto tr45 case 11: goto tr3 case 13: - goto tr47 + goto tr45 case 32: goto tr1 case 44: goto tr4 - case 76: + case 83: goto st235 case 92: - goto st95 + goto st94 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { goto tr1 @@ -28539,24 +28248,47 @@ tr78: st_case_235: switch ( m.data)[( m.p)] { case 10: - goto tr47 + goto tr45 case 11: goto tr3 case 13: - goto tr47 + goto tr45 case 32: goto tr1 case 44: goto tr4 - case 83: - goto st236 + case 69: + goto st702 case 92: - goto st95 + goto st94 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { goto tr1 } goto st1 + st702: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof702 + } + st_case_702: + switch ( m.data)[( m.p)] { + case 10: + goto tr954 + case 11: + goto tr1014 + case 13: + goto tr956 + case 32: + goto tr1013 + case 44: + goto tr1015 + case 92: + goto st94 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr1013 + } + goto st1 st236: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof236 @@ -28564,47 +28296,24 @@ tr78: st_case_236: switch ( m.data)[( m.p)] { case 10: - goto tr47 + goto tr45 case 11: goto tr3 case 13: - goto tr47 + goto tr45 case 32: goto tr1 case 44: goto tr4 - case 69: - goto st703 case 92: - goto st95 + goto st94 + case 108: + goto st237 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { goto tr1 } goto st1 - st703: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof703 - } - st_case_703: - switch ( m.data)[( m.p)] { - case 10: - goto tr962 - case 11: - goto tr1019 - case 13: - goto tr964 - case 32: - goto tr836 - case 44: - goto tr1020 - case 92: - goto st95 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr836 - } - goto st1 st237: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof237 @@ -28612,18 +28321,18 @@ tr78: st_case_237: switch ( m.data)[( m.p)] { case 10: - goto tr47 + goto tr45 case 11: goto tr3 case 13: - goto tr47 + goto tr45 case 32: goto tr1 case 44: goto tr4 case 92: - goto st95 - case 108: + goto st94 + case 115: goto st238 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { @@ -28637,24 +28346,58 @@ tr78: st_case_238: switch ( m.data)[( m.p)] { case 10: - goto tr47 + goto tr45 case 11: goto tr3 case 13: - goto tr47 + goto tr45 case 32: goto tr1 case 44: goto tr4 case 92: - goto st95 - case 115: - goto st239 + goto st94 + case 101: + goto st702 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { goto tr1 } goto st1 +tr77: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st703 + st703: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof703 + } + st_case_703: +//line plugins/parsers/influx/machine.go:28379 + switch ( m.data)[( m.p)] { + case 10: + goto tr954 + case 11: + goto tr1014 + case 13: + goto tr956 + case 32: + goto tr1013 + case 44: + goto tr1015 + case 82: + goto st239 + case 92: + goto st94 + case 114: + goto st240 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr1013 + } + goto st1 st239: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof239 @@ -28662,58 +28405,24 @@ tr78: st_case_239: switch ( m.data)[( m.p)] { case 10: - goto tr47 + goto tr45 case 11: goto tr3 case 13: - goto tr47 + goto tr45 case 32: goto tr1 case 44: goto tr4 + case 85: + goto st235 case 92: - goto st95 - case 101: - goto st703 + goto st94 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { goto tr1 } goto st1 -tr79: -//line plugins/parsers/influx/machine.go.rl:20 - - m.pb = m.p - - goto st704 - st704: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof704 - } - st_case_704: -//line plugins/parsers/influx/machine.go:28695 - switch ( m.data)[( m.p)] { - case 10: - goto tr962 - case 11: - goto tr1019 - case 13: - goto tr964 - case 32: - goto tr836 - case 44: - goto tr1020 - case 82: - goto st240 - case 92: - goto st95 - case 114: - goto st241 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr836 - } - goto st1 st240: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof240 @@ -28721,51 +28430,58 @@ tr79: st_case_240: switch ( m.data)[( m.p)] { case 10: - goto tr47 + goto tr45 case 11: goto tr3 case 13: - goto tr47 + goto tr45 case 32: goto tr1 case 44: goto tr4 - case 85: - goto st236 case 92: - goto st95 + goto st94 + case 117: + goto st238 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { goto tr1 } goto st1 - st241: +tr78: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st704 + st704: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof241 + goto _test_eof704 } - st_case_241: + st_case_704: +//line plugins/parsers/influx/machine.go:28463 switch ( m.data)[( m.p)] { case 10: - goto tr47 + goto tr954 case 11: - goto tr3 + goto tr1014 case 13: - goto tr47 + goto tr956 case 32: - goto tr1 + goto tr1013 case 44: - goto tr4 + goto tr1015 case 92: - goto st95 - case 117: - goto st239 + goto st94 + case 97: + goto st236 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr1 + goto tr1013 } goto st1 -tr80: -//line plugins/parsers/influx/machine.go.rl:20 +tr79: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p @@ -28775,78 +28491,89 @@ tr80: goto _test_eof705 } st_case_705: -//line plugins/parsers/influx/machine.go:28779 +//line plugins/parsers/influx/machine.go:28495 switch ( m.data)[( m.p)] { case 10: - goto tr962 + goto tr954 case 11: - goto tr1019 + goto tr1014 case 13: - goto tr964 + goto tr956 case 32: - goto tr836 + goto tr1013 case 44: - goto tr1020 + goto tr1015 case 92: - goto st95 - case 97: - goto st237 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr836 - } - goto st1 -tr81: -//line plugins/parsers/influx/machine.go.rl:20 - - m.pb = m.p - - goto st706 - st706: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof706 - } - st_case_706: -//line plugins/parsers/influx/machine.go:28811 - switch ( m.data)[( m.p)] { - case 10: - goto tr962 - case 11: - goto tr1019 - case 13: - goto tr964 - case 32: - goto tr836 - case 44: - goto tr1020 - case 92: - goto st95 + goto st94 case 114: - goto st241 + goto st240 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr836 + goto tr1013 } goto st1 -tr44: -//line plugins/parsers/influx/machine.go.rl:20 +tr42: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p - goto st242 -tr424: - ( m.cs) = 242 -//line plugins/parsers/influx/machine.go.rl:20 + goto st241 +tr422: + ( m.cs) = 241 +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p -//line plugins/parsers/influx/machine.go.rl:78 +//line plugins/parsers/influx/machine.go.rl:86 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again + st241: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof241 + } + st_case_241: +//line plugins/parsers/influx/machine.go:28544 + switch ( m.data)[( m.p)] { + case 10: + goto tr421 + case 11: + goto tr422 + case 13: + goto tr421 + case 32: + goto tr36 + case 44: + goto tr4 + case 61: + goto tr423 + case 92: + goto tr43 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr36 + } + goto tr39 +tr38: + ( m.cs) = 242 +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; {( m.p)++; goto _out } } @@ -28856,72 +28583,465 @@ tr424: goto _test_eof242 } st_case_242: -//line plugins/parsers/influx/machine.go:28860 +//line plugins/parsers/influx/machine.go:28587 switch ( m.data)[( m.p)] { case 10: - goto tr423 + goto tr421 case 11: - goto tr424 + goto tr422 case 13: - goto tr423 + goto tr421 case 32: - goto tr38 + goto tr36 case 44: goto tr4 case 61: - goto tr425 + goto tr31 case 92: - goto tr45 + goto tr43 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr38 + goto tr36 } - goto tr41 -tr40: - ( m.cs) = 243 -//line plugins/parsers/influx/machine.go.rl:20 + goto tr39 +tr462: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p -//line plugins/parsers/influx/machine.go.rl:78 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again + goto st243 st243: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof243 } st_case_243: -//line plugins/parsers/influx/machine.go:28903 +//line plugins/parsers/influx/machine.go:28619 + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st706 + } + goto tr424 +tr463: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st706 + st706: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof706 + } + st_case_706: +//line plugins/parsers/influx/machine.go:28635 switch ( m.data)[( m.p)] { case 10: - goto tr423 - case 11: - goto tr424 + goto tr468 case 13: - goto tr423 + goto tr470 case 32: - goto tr38 - case 44: - goto tr4 - case 61: - goto tr33 - case 92: - goto tr45 + goto tr467 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st707 + } + case ( m.data)[( m.p)] >= 9: + goto tr467 + } + goto tr424 + st707: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof707 + } + st_case_707: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 13: + goto tr470 + case 32: + goto tr467 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st708 + } + case ( m.data)[( m.p)] >= 9: + goto tr467 + } + goto tr424 + st708: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof708 + } + st_case_708: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 13: + goto tr470 + case 32: + goto tr467 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st709 + } + case ( m.data)[( m.p)] >= 9: + goto tr467 + } + goto tr424 + st709: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof709 + } + st_case_709: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 13: + goto tr470 + case 32: + goto tr467 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st710 + } + case ( m.data)[( m.p)] >= 9: + goto tr467 + } + goto tr424 + st710: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof710 + } + st_case_710: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 13: + goto tr470 + case 32: + goto tr467 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st711 + } + case ( m.data)[( m.p)] >= 9: + goto tr467 + } + goto tr424 + st711: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof711 + } + st_case_711: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 13: + goto tr470 + case 32: + goto tr467 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st712 + } + case ( m.data)[( m.p)] >= 9: + goto tr467 + } + goto tr424 + st712: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof712 + } + st_case_712: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 13: + goto tr470 + case 32: + goto tr467 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st713 + } + case ( m.data)[( m.p)] >= 9: + goto tr467 + } + goto tr424 + st713: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof713 + } + st_case_713: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 13: + goto tr470 + case 32: + goto tr467 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st714 + } + case ( m.data)[( m.p)] >= 9: + goto tr467 + } + goto tr424 + st714: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof714 + } + st_case_714: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 13: + goto tr470 + case 32: + goto tr467 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st715 + } + case ( m.data)[( m.p)] >= 9: + goto tr467 + } + goto tr424 + st715: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof715 + } + st_case_715: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 13: + goto tr470 + case 32: + goto tr467 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st716 + } + case ( m.data)[( m.p)] >= 9: + goto tr467 + } + goto tr424 + st716: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof716 + } + st_case_716: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 13: + goto tr470 + case 32: + goto tr467 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st717 + } + case ( m.data)[( m.p)] >= 9: + goto tr467 + } + goto tr424 + st717: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof717 + } + st_case_717: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 13: + goto tr470 + case 32: + goto tr467 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st718 + } + case ( m.data)[( m.p)] >= 9: + goto tr467 + } + goto tr424 + st718: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof718 + } + st_case_718: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 13: + goto tr470 + case 32: + goto tr467 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st719 + } + case ( m.data)[( m.p)] >= 9: + goto tr467 + } + goto tr424 + st719: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof719 + } + st_case_719: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 13: + goto tr470 + case 32: + goto tr467 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st720 + } + case ( m.data)[( m.p)] >= 9: + goto tr467 + } + goto tr424 + st720: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof720 + } + st_case_720: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 13: + goto tr470 + case 32: + goto tr467 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st721 + } + case ( m.data)[( m.p)] >= 9: + goto tr467 + } + goto tr424 + st721: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof721 + } + st_case_721: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 13: + goto tr470 + case 32: + goto tr467 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st722 + } + case ( m.data)[( m.p)] >= 9: + goto tr467 + } + goto tr424 + st722: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof722 + } + st_case_722: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 13: + goto tr470 + case 32: + goto tr467 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st723 + } + case ( m.data)[( m.p)] >= 9: + goto tr467 + } + goto tr424 + st723: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof723 + } + st_case_723: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 13: + goto tr470 + case 32: + goto tr467 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st724 + } + case ( m.data)[( m.p)] >= 9: + goto tr467 + } + goto tr424 + st724: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof724 + } + st_case_724: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 13: + goto tr470 + case 32: + goto tr467 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr38 + goto tr467 } - goto tr41 -tr464: -//line plugins/parsers/influx/machine.go.rl:20 + goto tr424 +tr15: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p @@ -28931,433 +29051,19 @@ tr464: goto _test_eof244 } st_case_244: -//line plugins/parsers/influx/machine.go:28935 - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st707 - } - goto tr426 -tr465: -//line plugins/parsers/influx/machine.go.rl:20 - - m.pb = m.p - - goto st707 - st707: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof707 - } - st_case_707: -//line plugins/parsers/influx/machine.go:28951 +//line plugins/parsers/influx/machine.go:29055 switch ( m.data)[( m.p)] { - case 10: - goto tr470 - case 13: - goto tr472 - case 32: - goto tr469 + case 46: + goto st245 + case 48: + goto st726 } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st708 - } - case ( m.data)[( m.p)] >= 9: - goto tr469 + if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st729 } - goto tr426 - st708: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof708 - } - st_case_708: - switch ( m.data)[( m.p)] { - case 10: - goto tr470 - case 13: - goto tr472 - case 32: - goto tr469 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st709 - } - case ( m.data)[( m.p)] >= 9: - goto tr469 - } - goto tr426 - st709: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof709 - } - st_case_709: - switch ( m.data)[( m.p)] { - case 10: - goto tr470 - case 13: - goto tr472 - case 32: - goto tr469 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st710 - } - case ( m.data)[( m.p)] >= 9: - goto tr469 - } - goto tr426 - st710: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof710 - } - st_case_710: - switch ( m.data)[( m.p)] { - case 10: - goto tr470 - case 13: - goto tr472 - case 32: - goto tr469 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st711 - } - case ( m.data)[( m.p)] >= 9: - goto tr469 - } - goto tr426 - st711: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof711 - } - st_case_711: - switch ( m.data)[( m.p)] { - case 10: - goto tr470 - case 13: - goto tr472 - case 32: - goto tr469 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st712 - } - case ( m.data)[( m.p)] >= 9: - goto tr469 - } - goto tr426 - st712: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof712 - } - st_case_712: - switch ( m.data)[( m.p)] { - case 10: - goto tr470 - case 13: - goto tr472 - case 32: - goto tr469 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st713 - } - case ( m.data)[( m.p)] >= 9: - goto tr469 - } - goto tr426 - st713: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof713 - } - st_case_713: - switch ( m.data)[( m.p)] { - case 10: - goto tr470 - case 13: - goto tr472 - case 32: - goto tr469 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st714 - } - case ( m.data)[( m.p)] >= 9: - goto tr469 - } - goto tr426 - st714: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof714 - } - st_case_714: - switch ( m.data)[( m.p)] { - case 10: - goto tr470 - case 13: - goto tr472 - case 32: - goto tr469 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st715 - } - case ( m.data)[( m.p)] >= 9: - goto tr469 - } - goto tr426 - st715: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof715 - } - st_case_715: - switch ( m.data)[( m.p)] { - case 10: - goto tr470 - case 13: - goto tr472 - case 32: - goto tr469 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st716 - } - case ( m.data)[( m.p)] >= 9: - goto tr469 - } - goto tr426 - st716: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof716 - } - st_case_716: - switch ( m.data)[( m.p)] { - case 10: - goto tr470 - case 13: - goto tr472 - case 32: - goto tr469 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st717 - } - case ( m.data)[( m.p)] >= 9: - goto tr469 - } - goto tr426 - st717: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof717 - } - st_case_717: - switch ( m.data)[( m.p)] { - case 10: - goto tr470 - case 13: - goto tr472 - case 32: - goto tr469 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st718 - } - case ( m.data)[( m.p)] >= 9: - goto tr469 - } - goto tr426 - st718: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof718 - } - st_case_718: - switch ( m.data)[( m.p)] { - case 10: - goto tr470 - case 13: - goto tr472 - case 32: - goto tr469 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st719 - } - case ( m.data)[( m.p)] >= 9: - goto tr469 - } - goto tr426 - st719: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof719 - } - st_case_719: - switch ( m.data)[( m.p)] { - case 10: - goto tr470 - case 13: - goto tr472 - case 32: - goto tr469 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st720 - } - case ( m.data)[( m.p)] >= 9: - goto tr469 - } - goto tr426 - st720: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof720 - } - st_case_720: - switch ( m.data)[( m.p)] { - case 10: - goto tr470 - case 13: - goto tr472 - case 32: - goto tr469 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st721 - } - case ( m.data)[( m.p)] >= 9: - goto tr469 - } - goto tr426 - st721: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof721 - } - st_case_721: - switch ( m.data)[( m.p)] { - case 10: - goto tr470 - case 13: - goto tr472 - case 32: - goto tr469 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st722 - } - case ( m.data)[( m.p)] >= 9: - goto tr469 - } - goto tr426 - st722: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof722 - } - st_case_722: - switch ( m.data)[( m.p)] { - case 10: - goto tr470 - case 13: - goto tr472 - case 32: - goto tr469 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st723 - } - case ( m.data)[( m.p)] >= 9: - goto tr469 - } - goto tr426 - st723: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof723 - } - st_case_723: - switch ( m.data)[( m.p)] { - case 10: - goto tr470 - case 13: - goto tr472 - case 32: - goto tr469 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st724 - } - case ( m.data)[( m.p)] >= 9: - goto tr469 - } - goto tr426 - st724: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof724 - } - st_case_724: - switch ( m.data)[( m.p)] { - case 10: - goto tr470 - case 13: - goto tr472 - case 32: - goto tr469 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st725 - } - case ( m.data)[( m.p)] >= 9: - goto tr469 - } - goto tr426 - st725: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof725 - } - st_case_725: - switch ( m.data)[( m.p)] { - case 10: - goto tr470 - case 13: - goto tr472 - case 32: - goto tr469 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr469 - } - goto tr426 -tr15: -//line plugins/parsers/influx/machine.go.rl:20 + goto tr8 +tr16: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p @@ -29367,31 +29073,63 @@ tr15: goto _test_eof245 } st_case_245: -//line plugins/parsers/influx/machine.go:29371 - switch ( m.data)[( m.p)] { - case 46: - goto st246 - case 48: - goto st727 - } - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st730 +//line plugins/parsers/influx/machine.go:29077 + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st725 } goto tr8 -tr16: -//line plugins/parsers/influx/machine.go.rl:20 - - m.pb = m.p - - goto st246 + st725: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof725 + } + st_case_725: + switch ( m.data)[( m.p)] { + case 10: + goto tr730 + case 13: + goto tr732 + case 32: + goto tr921 + case 44: + goto tr922 + case 69: + goto st246 + case 101: + goto st246 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st725 + } + case ( m.data)[( m.p)] >= 9: + goto tr921 + } + goto tr103 st246: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof246 } st_case_246: -//line plugins/parsers/influx/machine.go:29393 + switch ( m.data)[( m.p)] { + case 34: + goto st247 + case 43: + goto st247 + case 45: + goto st247 + } if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st726 + goto st621 + } + goto tr8 + st247: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof247 + } + st_case_247: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st621 } goto tr8 st726: @@ -29401,53 +29139,31 @@ tr16: st_case_726: switch ( m.data)[( m.p)] { case 10: - goto tr734 + goto tr730 case 13: - goto tr736 + goto tr732 case 32: - goto tr535 + goto tr921 case 44: - goto tr930 + goto tr922 + case 46: + goto st725 case 69: - goto st247 + goto st246 case 101: - goto st247 + goto st246 + case 105: + goto st728 } switch { case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st726 + goto st727 } case ( m.data)[( m.p)] >= 9: - goto tr535 + goto tr921 } - goto tr105 - st247: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof247 - } - st_case_247: - switch ( m.data)[( m.p)] { - case 34: - goto st248 - case 43: - goto st248 - case 45: - goto st248 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st622 - } - goto tr8 - st248: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof248 - } - st_case_248: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st622 - } - goto tr8 + goto tr103 st727: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof727 @@ -29455,31 +29171,29 @@ tr16: st_case_727: switch ( m.data)[( m.p)] { case 10: - goto tr734 + goto tr730 case 13: - goto tr736 + goto tr732 case 32: - goto tr535 + goto tr921 case 44: - goto tr930 + goto tr922 case 46: - goto st726 + goto st725 case 69: - goto st247 + goto st246 case 101: - goto st247 - case 105: - goto st729 + goto st246 } switch { case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st728 + goto st727 } case ( m.data)[( m.p)] >= 9: - goto tr535 + goto tr921 } - goto tr105 + goto tr103 st728: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof728 @@ -29487,29 +29201,18 @@ tr16: st_case_728: switch ( m.data)[( m.p)] { case 10: - goto tr734 + goto tr942 case 13: - goto tr736 + goto tr944 case 32: - goto tr535 + goto tr1041 case 44: - goto tr930 - case 46: - goto st726 - case 69: - goto st247 - case 101: - goto st247 + goto tr1042 } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st728 - } - case ( m.data)[( m.p)] >= 9: - goto tr535 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr1041 } - goto tr105 + goto tr103 st729: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof729 @@ -29517,112 +29220,134 @@ tr16: st_case_729: switch ( m.data)[( m.p)] { case 10: - goto tr952 + goto tr730 case 13: - goto tr954 + goto tr732 case 32: - goto tr932 + goto tr921 case 44: - goto tr1046 + goto tr922 + case 46: + goto st725 + case 69: + goto st246 + case 101: + goto st246 + case 105: + goto st728 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr932 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st729 + } + case ( m.data)[( m.p)] >= 9: + goto tr921 } - goto tr105 + goto tr103 +tr17: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st730 st730: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof730 } st_case_730: +//line plugins/parsers/influx/machine.go:29260 switch ( m.data)[( m.p)] { case 10: - goto tr734 + goto tr730 case 13: - goto tr736 + goto tr732 case 32: - goto tr535 + goto tr921 case 44: - goto tr930 + goto tr922 case 46: - goto st726 + goto st725 case 69: - goto st247 + goto st246 case 101: - goto st247 + goto st246 case 105: - goto st729 + goto st728 + case 117: + goto st731 } switch { case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st730 + goto st727 } case ( m.data)[( m.p)] >= 9: - goto tr535 + goto tr921 } - goto tr105 -tr17: -//line plugins/parsers/influx/machine.go.rl:20 - - m.pb = m.p - - goto st731 + goto tr103 st731: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof731 } st_case_731: -//line plugins/parsers/influx/machine.go:29576 switch ( m.data)[( m.p)] { case 10: - goto tr734 + goto tr948 case 13: - goto tr736 + goto tr950 case 32: - goto tr535 + goto tr1044 case 44: - goto tr930 - case 46: - goto st726 - case 69: - goto st247 - case 101: - goto st247 - case 105: - goto st729 - case 117: - goto st732 + goto tr1045 } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st728 - } - case ( m.data)[( m.p)] >= 9: - goto tr535 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr1044 } - goto tr105 + goto tr103 +tr18: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st732 st732: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof732 } st_case_732: +//line plugins/parsers/influx/machine.go:29320 switch ( m.data)[( m.p)] { case 10: - goto tr957 + goto tr730 case 13: - goto tr959 + goto tr732 case 32: - goto tr935 + goto tr921 case 44: - goto tr1048 + goto tr922 + case 46: + goto st725 + case 69: + goto st246 + case 101: + goto st246 + case 105: + goto st728 + case 117: + goto st731 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr935 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st732 + } + case ( m.data)[( m.p)] >= 9: + goto tr921 } - goto tr105 -tr18: -//line plugins/parsers/influx/machine.go.rl:20 + goto tr103 +tr19: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p @@ -29632,72 +29357,40 @@ tr18: goto _test_eof733 } st_case_733: -//line plugins/parsers/influx/machine.go:29636 +//line plugins/parsers/influx/machine.go:29361 switch ( m.data)[( m.p)] { case 10: - goto tr734 + goto tr954 case 13: - goto tr736 + goto tr956 case 32: - goto tr535 + goto tr1047 case 44: - goto tr930 - case 46: - goto st726 - case 69: - goto st247 - case 101: - goto st247 - case 105: - goto st729 - case 117: - goto st732 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st733 - } - case ( m.data)[( m.p)] >= 9: - goto tr535 - } - goto tr105 -tr19: -//line plugins/parsers/influx/machine.go.rl:20 - - m.pb = m.p - - goto st734 - st734: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof734 - } - st_case_734: -//line plugins/parsers/influx/machine.go:29677 - switch ( m.data)[( m.p)] { - case 10: - goto tr962 - case 13: - goto tr964 - case 32: - goto tr939 - case 44: - goto tr1050 + goto tr1048 case 65: - goto st249 + goto st248 case 97: - goto st252 + goto st251 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr939 + goto tr1047 } - goto tr105 + goto tr103 + st248: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof248 + } + st_case_248: + if ( m.data)[( m.p)] == 76 { + goto st249 + } + goto tr8 st249: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof249 } st_case_249: - if ( m.data)[( m.p)] == 76 { + if ( m.data)[( m.p)] == 83 { goto st250 } goto tr8 @@ -29706,44 +29399,44 @@ tr19: goto _test_eof250 } st_case_250: - if ( m.data)[( m.p)] == 83 { - goto st251 + if ( m.data)[( m.p)] == 69 { + goto st734 } goto tr8 + st734: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof734 + } + st_case_734: + switch ( m.data)[( m.p)] { + case 10: + goto tr954 + case 13: + goto tr956 + case 32: + goto tr1047 + case 44: + goto tr1048 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr1047 + } + goto tr103 st251: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof251 } st_case_251: - if ( m.data)[( m.p)] == 69 { - goto st735 + if ( m.data)[( m.p)] == 108 { + goto st252 } goto tr8 - st735: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof735 - } - st_case_735: - switch ( m.data)[( m.p)] { - case 10: - goto tr962 - case 13: - goto tr964 - case 32: - goto tr939 - case 44: - goto tr1050 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr939 - } - goto tr105 st252: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof252 } st_case_252: - if ( m.data)[( m.p)] == 108 { + if ( m.data)[( m.p)] == 115 { goto st253 } goto tr8 @@ -29752,21 +29445,60 @@ tr19: goto _test_eof253 } st_case_253: - if ( m.data)[( m.p)] == 115 { - goto st254 + if ( m.data)[( m.p)] == 101 { + goto st734 } goto tr8 +tr20: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st735 + st735: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof735 + } + st_case_735: +//line plugins/parsers/influx/machine.go:29464 + switch ( m.data)[( m.p)] { + case 10: + goto tr954 + case 13: + goto tr956 + case 32: + goto tr1047 + case 44: + goto tr1048 + case 82: + goto st254 + case 114: + goto st255 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr1047 + } + goto tr103 st254: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof254 } st_case_254: - if ( m.data)[( m.p)] == 101 { - goto st735 + if ( m.data)[( m.p)] == 85 { + goto st250 } goto tr8 -tr20: -//line plugins/parsers/influx/machine.go.rl:20 + st255: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof255 + } + st_case_255: + if ( m.data)[( m.p)] == 117 { + goto st253 + } + goto tr8 +tr21: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p @@ -29776,45 +29508,25 @@ tr20: goto _test_eof736 } st_case_736: -//line plugins/parsers/influx/machine.go:29780 +//line plugins/parsers/influx/machine.go:29512 switch ( m.data)[( m.p)] { case 10: - goto tr962 + goto tr954 case 13: - goto tr964 + goto tr956 case 32: - goto tr939 + goto tr1047 case 44: - goto tr1050 - case 82: - goto st255 - case 114: - goto st256 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr939 - } - goto tr105 - st255: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof255 - } - st_case_255: - if ( m.data)[( m.p)] == 85 { + goto tr1048 + case 97: goto st251 } - goto tr8 - st256: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof256 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr1047 } - st_case_256: - if ( m.data)[( m.p)] == 117 { - goto st254 - } - goto tr8 -tr21: -//line plugins/parsers/influx/machine.go.rl:20 + goto tr103 +tr22: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p @@ -29824,63 +29536,35 @@ tr21: goto _test_eof737 } st_case_737: -//line plugins/parsers/influx/machine.go:29828 +//line plugins/parsers/influx/machine.go:29540 switch ( m.data)[( m.p)] { case 10: - goto tr962 + goto tr954 case 13: - goto tr964 + goto tr956 case 32: - goto tr939 + goto tr1047 case 44: - goto tr1050 - case 97: - goto st252 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr939 - } - goto tr105 -tr22: -//line plugins/parsers/influx/machine.go.rl:20 - - m.pb = m.p - - goto st738 - st738: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof738 - } - st_case_738: -//line plugins/parsers/influx/machine.go:29856 - switch ( m.data)[( m.p)] { - case 10: - goto tr962 - case 13: - goto tr964 - case 32: - goto tr939 - case 44: - goto tr1050 + goto tr1048 case 114: - goto st256 + goto st255 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr939 + goto tr1047 } - goto tr105 + goto tr103 tr9: -//line plugins/parsers/influx/machine.go.rl:20 +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p - goto st257 - st257: + goto st256 + st256: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof257 + goto _test_eof256 } - st_case_257: -//line plugins/parsers/influx/machine.go:29884 + st_case_256: +//line plugins/parsers/influx/machine.go:29568 switch ( m.data)[( m.p)] { case 10: goto tr8 @@ -29901,172 +29585,208 @@ tr9: goto st2 } goto tr6 - st258: + st257: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof258 + goto _test_eof257 } - st_case_258: + st_case_257: if ( m.data)[( m.p)] == 10 { - goto tr440 + goto tr438 } - goto st258 -tr440: -//line plugins/parsers/influx/machine.go.rl:158 + goto st257 +tr438: +//line plugins/parsers/influx/machine.go.rl:166 m.lineno++ m.sol = m.p m.sol++ // next char will be the first column in the line -//line plugins/parsers/influx/machine.go.rl:70 +//line plugins/parsers/influx/machine.go.rl:78 - {goto st740 } + {goto st739 } - goto st739 - st739: + goto st738 + st738: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof739 + goto _test_eof738 } - st_case_739: -//line plugins/parsers/influx/machine.go:29931 + st_case_738: +//line plugins/parsers/influx/machine.go:29615 goto st0 + st260: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof260 + } + st_case_260: + switch ( m.data)[( m.p)] { + case 32: + goto tr33 + case 35: + goto tr33 + case 44: + goto tr33 + case 92: + goto tr442 + } + switch { + case ( m.data)[( m.p)] > 10: + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr33 + } + case ( m.data)[( m.p)] >= 9: + goto tr33 + } + goto tr441 +tr441: +//line plugins/parsers/influx/machine.go.rl:82 + + m.beginMetric = true + +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st740 + st740: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof740 + } + st_case_740: +//line plugins/parsers/influx/machine.go:29656 + switch ( m.data)[( m.p)] { + case 9: + goto tr2 + case 10: + goto tr1056 + case 12: + goto tr2 + case 13: + goto tr1057 + case 32: + goto tr2 + case 44: + goto tr1058 + case 92: + goto st268 + } + goto st740 +tr443: +//line plugins/parsers/influx/machine.go.rl:166 + + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line + + goto st741 +tr1056: + ( m.cs) = 741 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:166 + + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line + + goto _again +tr1060: + ( m.cs) = 741 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:166 + + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line + + goto _again + st741: +//line plugins/parsers/influx/machine.go.rl:172 + + m.finishMetric = true + ( m.cs) = 739; + {( m.p)++; goto _out } + + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof741 + } + st_case_741: +//line plugins/parsers/influx/machine.go:29731 + goto st0 +tr1057: + ( m.cs) = 261 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr1061: + ( m.cs) = 261 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again st261: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof261 } st_case_261: - switch ( m.data)[( m.p)] { - case 32: - goto tr35 - case 35: - goto tr35 - case 44: - goto tr35 - case 92: - goto tr444 +//line plugins/parsers/influx/machine.go:29764 + if ( m.data)[( m.p)] == 10 { + goto tr443 } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr35 - } - case ( m.data)[( m.p)] >= 9: - goto tr35 - } - goto tr443 -tr443: -//line plugins/parsers/influx/machine.go.rl:74 - - m.beginMetric = true - -//line plugins/parsers/influx/machine.go.rl:20 - - m.pb = m.p - - goto st741 - st741: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof741 - } - st_case_741: -//line plugins/parsers/influx/machine.go:29972 - switch ( m.data)[( m.p)] { - case 9: - goto tr2 - case 10: - goto tr1058 - case 12: - goto tr2 - case 13: - goto tr1059 - case 32: - goto tr2 - case 44: - goto tr1060 - case 92: - goto st269 - } - goto st741 -tr445: -//line plugins/parsers/influx/machine.go.rl:158 - - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line - - goto st742 + goto st0 tr1058: - ( m.cs) = 742 -//line plugins/parsers/influx/machine.go.rl:78 + ( m.cs) = 262 +//line plugins/parsers/influx/machine.go.rl:86 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:158 - - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line - goto _again tr1062: - ( m.cs) = 742 -//line plugins/parsers/influx/machine.go.rl:91 + ( m.cs) = 262 +//line plugins/parsers/influx/machine.go.rl:99 err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:158 - - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line - - goto _again - st742: -//line plugins/parsers/influx/machine.go.rl:164 - - m.finishMetric = true - ( m.cs) = 740; - {( m.p)++; goto _out } - - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof742 - } - st_case_742: -//line plugins/parsers/influx/machine.go:30047 - goto st0 -tr1059: - ( m.cs) = 262 -//line plugins/parsers/influx/machine.go.rl:78 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again -tr1063: - ( m.cs) = 262 -//line plugins/parsers/influx/machine.go.rl:91 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; goto _out } } @@ -30076,43 +29796,7 @@ tr1063: goto _test_eof262 } st_case_262: -//line plugins/parsers/influx/machine.go:30080 - if ( m.data)[( m.p)] == 10 { - goto tr445 - } - goto st0 -tr1060: - ( m.cs) = 263 -//line plugins/parsers/influx/machine.go.rl:78 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again -tr1064: - ( m.cs) = 263 -//line plugins/parsers/influx/machine.go.rl:91 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 258; - {( m.p)++; goto _out } - } - - goto _again - st263: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof263 - } - st_case_263: -//line plugins/parsers/influx/machine.go:30116 +//line plugins/parsers/influx/machine.go:29800 switch ( m.data)[( m.p)] { case 32: goto tr2 @@ -30121,7 +29805,7 @@ tr1064: case 61: goto tr2 case 92: - goto tr447 + goto tr445 } switch { case ( m.data)[( m.p)] > 10: @@ -30131,28 +29815,59 @@ tr1064: case ( m.data)[( m.p)] >= 9: goto tr2 } - goto tr446 -tr446: -//line plugins/parsers/influx/machine.go.rl:20 + goto tr444 +tr444: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p + goto st263 + st263: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof263 + } + st_case_263: +//line plugins/parsers/influx/machine.go:29831 + switch ( m.data)[( m.p)] { + case 32: + goto tr2 + case 44: + goto tr2 + case 61: + goto tr447 + case 92: + goto st266 + } + switch { + case ( m.data)[( m.p)] > 10: + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr2 + } + case ( m.data)[( m.p)] >= 9: + goto tr2 + } + goto st263 +tr447: +//line plugins/parsers/influx/machine.go.rl:95 + + m.key = m.text() + goto st264 st264: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof264 } st_case_264: -//line plugins/parsers/influx/machine.go:30147 +//line plugins/parsers/influx/machine.go:29862 switch ( m.data)[( m.p)] { case 32: goto tr2 case 44: goto tr2 case 61: - goto tr449 + goto tr2 case 92: - goto st267 + goto tr450 } switch { case ( m.data)[( m.p)] > 10: @@ -30162,11 +29877,42 @@ tr446: case ( m.data)[( m.p)] >= 9: goto tr2 } - goto st264 + goto tr449 tr449: -//line plugins/parsers/influx/machine.go.rl:87 +//line plugins/parsers/influx/machine.go.rl:28 - m.key = m.text() + m.pb = m.p + + goto st742 + st742: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof742 + } + st_case_742: +//line plugins/parsers/influx/machine.go:29893 + switch ( m.data)[( m.p)] { + case 9: + goto tr2 + case 10: + goto tr1060 + case 12: + goto tr2 + case 13: + goto tr1061 + case 32: + goto tr2 + case 44: + goto tr1062 + case 61: + goto tr2 + case 92: + goto st265 + } + goto st742 +tr450: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p goto st265 st265: @@ -30174,16 +29920,9 @@ tr449: goto _test_eof265 } st_case_265: -//line plugins/parsers/influx/machine.go:30178 - switch ( m.data)[( m.p)] { - case 32: - goto tr2 - case 44: - goto tr2 - case 61: - goto tr2 - case 92: - goto tr452 +//line plugins/parsers/influx/machine.go:29924 + if ( m.data)[( m.p)] == 92 { + goto st743 } switch { case ( m.data)[( m.p)] > 10: @@ -30193,40 +29932,37 @@ tr449: case ( m.data)[( m.p)] >= 9: goto tr2 } - goto tr451 -tr451: -//line plugins/parsers/influx/machine.go.rl:20 - - m.pb = m.p - - goto st743 + goto st742 st743: +//line plugins/parsers/influx/machine.go.rl:248 + ( m.p)-- + if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof743 } st_case_743: -//line plugins/parsers/influx/machine.go:30209 +//line plugins/parsers/influx/machine.go:29945 switch ( m.data)[( m.p)] { case 9: goto tr2 case 10: - goto tr1062 + goto tr1060 case 12: goto tr2 case 13: - goto tr1063 + goto tr1061 case 32: goto tr2 case 44: - goto tr1064 + goto tr1062 case 61: goto tr2 case 92: - goto st266 + goto st265 } - goto st743 -tr452: -//line plugins/parsers/influx/machine.go.rl:20 + goto st742 +tr445: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p @@ -30236,88 +29972,8 @@ tr452: goto _test_eof266 } st_case_266: -//line plugins/parsers/influx/machine.go:30240 +//line plugins/parsers/influx/machine.go:29976 if ( m.data)[( m.p)] == 92 { - goto st744 - } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr2 - } - case ( m.data)[( m.p)] >= 9: - goto tr2 - } - goto st743 - st744: -//line plugins/parsers/influx/machine.go.rl:240 - ( m.p)-- - - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof744 - } - st_case_744: -//line plugins/parsers/influx/machine.go:30261 - switch ( m.data)[( m.p)] { - case 9: - goto tr2 - case 10: - goto tr1062 - case 12: - goto tr2 - case 13: - goto tr1063 - case 32: - goto tr2 - case 44: - goto tr1064 - case 61: - goto tr2 - case 92: - goto st266 - } - goto st743 -tr447: -//line plugins/parsers/influx/machine.go.rl:20 - - m.pb = m.p - - goto st267 - st267: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof267 - } - st_case_267: -//line plugins/parsers/influx/machine.go:30292 - if ( m.data)[( m.p)] == 92 { - goto st268 - } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr2 - } - case ( m.data)[( m.p)] >= 9: - goto tr2 - } - goto st264 - st268: -//line plugins/parsers/influx/machine.go.rl:240 - ( m.p)-- - - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof268 - } - st_case_268: -//line plugins/parsers/influx/machine.go:30313 - switch ( m.data)[( m.p)] { - case 32: - goto tr2 - case 44: - goto tr2 - case 61: - goto tr449 - case 92: goto st267 } switch { @@ -30328,23 +29984,51 @@ tr447: case ( m.data)[( m.p)] >= 9: goto tr2 } - goto st264 -tr444: -//line plugins/parsers/influx/machine.go.rl:74 + goto st263 + st267: +//line plugins/parsers/influx/machine.go.rl:248 + ( m.p)-- + + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof267 + } + st_case_267: +//line plugins/parsers/influx/machine.go:29997 + switch ( m.data)[( m.p)] { + case 32: + goto tr2 + case 44: + goto tr2 + case 61: + goto tr447 + case 92: + goto st266 + } + switch { + case ( m.data)[( m.p)] > 10: + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr2 + } + case ( m.data)[( m.p)] >= 9: + goto tr2 + } + goto st263 +tr442: +//line plugins/parsers/influx/machine.go.rl:82 m.beginMetric = true -//line plugins/parsers/influx/machine.go.rl:20 +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p - goto st269 - st269: + goto st268 + st268: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof269 + goto _test_eof268 } - st_case_269: -//line plugins/parsers/influx/machine.go:30348 + st_case_268: +//line plugins/parsers/influx/machine.go:30032 switch { case ( m.data)[( m.p)] > 10: if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { @@ -30353,65 +30037,65 @@ tr444: case ( m.data)[( m.p)] >= 9: goto st0 } - goto st741 -tr441: -//line plugins/parsers/influx/machine.go.rl:158 + goto st740 +tr439: +//line plugins/parsers/influx/machine.go.rl:166 m.lineno++ m.sol = m.p m.sol++ // next char will be the first column in the line - goto st740 - st740: + goto st739 + st739: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof740 + goto _test_eof739 } - st_case_740: -//line plugins/parsers/influx/machine.go:30371 + st_case_739: +//line plugins/parsers/influx/machine.go:30055 switch ( m.data)[( m.p)] { case 10: - goto tr441 + goto tr439 case 13: - goto st259 + goto st258 case 32: - goto st740 + goto st739 case 35: - goto st260 + goto st259 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st740 + goto st739 } - goto tr1055 + goto tr1053 + st258: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof258 + } + st_case_258: + if ( m.data)[( m.p)] == 10 { + goto tr439 + } + goto st0 st259: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof259 } st_case_259: if ( m.data)[( m.p)] == 10 { - goto tr441 + goto tr439 } - goto st0 - st260: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof260 - } - st_case_260: - if ( m.data)[( m.p)] == 10 { - goto tr441 - } - goto st260 + goto st259 st_out: - _test_eof270: ( m.cs) = 270; goto _test_eof + _test_eof269: ( m.cs) = 269; goto _test_eof _test_eof1: ( m.cs) = 1; goto _test_eof _test_eof2: ( m.cs) = 2; goto _test_eof _test_eof3: ( m.cs) = 3; goto _test_eof _test_eof4: ( m.cs) = 4; goto _test_eof _test_eof5: ( m.cs) = 5; goto _test_eof _test_eof6: ( m.cs) = 6; goto _test_eof - _test_eof7: ( m.cs) = 7; goto _test_eof + _test_eof270: ( m.cs) = 270; goto _test_eof _test_eof271: ( m.cs) = 271; goto _test_eof _test_eof272: ( m.cs) = 272; goto _test_eof - _test_eof273: ( m.cs) = 273; goto _test_eof + _test_eof7: ( m.cs) = 7; goto _test_eof _test_eof8: ( m.cs) = 8; goto _test_eof _test_eof9: ( m.cs) = 9; goto _test_eof _test_eof10: ( m.cs) = 10; goto _test_eof @@ -30436,15 +30120,15 @@ tr441: _test_eof29: ( m.cs) = 29; goto _test_eof _test_eof30: ( m.cs) = 30; goto _test_eof _test_eof31: ( m.cs) = 31; goto _test_eof - _test_eof32: ( m.cs) = 32; goto _test_eof + _test_eof273: ( m.cs) = 273; goto _test_eof _test_eof274: ( m.cs) = 274; goto _test_eof - _test_eof275: ( m.cs) = 275; goto _test_eof + _test_eof32: ( m.cs) = 32; goto _test_eof _test_eof33: ( m.cs) = 33; goto _test_eof - _test_eof34: ( m.cs) = 34; goto _test_eof + _test_eof275: ( m.cs) = 275; goto _test_eof _test_eof276: ( m.cs) = 276; goto _test_eof _test_eof277: ( m.cs) = 277; goto _test_eof + _test_eof34: ( m.cs) = 34; goto _test_eof _test_eof278: ( m.cs) = 278; goto _test_eof - _test_eof35: ( m.cs) = 35; goto _test_eof _test_eof279: ( m.cs) = 279; goto _test_eof _test_eof280: ( m.cs) = 280; goto _test_eof _test_eof281: ( m.cs) = 281; goto _test_eof @@ -30462,22 +30146,22 @@ tr441: _test_eof293: ( m.cs) = 293; goto _test_eof _test_eof294: ( m.cs) = 294; goto _test_eof _test_eof295: ( m.cs) = 295; goto _test_eof - _test_eof296: ( m.cs) = 296; goto _test_eof + _test_eof35: ( m.cs) = 35; goto _test_eof _test_eof36: ( m.cs) = 36; goto _test_eof - _test_eof37: ( m.cs) = 37; goto _test_eof + _test_eof296: ( m.cs) = 296; goto _test_eof _test_eof297: ( m.cs) = 297; goto _test_eof _test_eof298: ( m.cs) = 298; goto _test_eof - _test_eof299: ( m.cs) = 299; goto _test_eof + _test_eof37: ( m.cs) = 37; goto _test_eof _test_eof38: ( m.cs) = 38; goto _test_eof _test_eof39: ( m.cs) = 39; goto _test_eof _test_eof40: ( m.cs) = 40; goto _test_eof _test_eof41: ( m.cs) = 41; goto _test_eof - _test_eof42: ( m.cs) = 42; goto _test_eof + _test_eof299: ( m.cs) = 299; goto _test_eof _test_eof300: ( m.cs) = 300; goto _test_eof _test_eof301: ( m.cs) = 301; goto _test_eof _test_eof302: ( m.cs) = 302; goto _test_eof + _test_eof42: ( m.cs) = 42; goto _test_eof _test_eof303: ( m.cs) = 303; goto _test_eof - _test_eof43: ( m.cs) = 43; goto _test_eof _test_eof304: ( m.cs) = 304; goto _test_eof _test_eof305: ( m.cs) = 305; goto _test_eof _test_eof306: ( m.cs) = 306; goto _test_eof @@ -30499,7 +30183,7 @@ tr441: _test_eof322: ( m.cs) = 322; goto _test_eof _test_eof323: ( m.cs) = 323; goto _test_eof _test_eof324: ( m.cs) = 324; goto _test_eof - _test_eof325: ( m.cs) = 325; goto _test_eof + _test_eof43: ( m.cs) = 43; goto _test_eof _test_eof44: ( m.cs) = 44; goto _test_eof _test_eof45: ( m.cs) = 45; goto _test_eof _test_eof46: ( m.cs) = 46; goto _test_eof @@ -30509,19 +30193,19 @@ tr441: _test_eof50: ( m.cs) = 50; goto _test_eof _test_eof51: ( m.cs) = 51; goto _test_eof _test_eof52: ( m.cs) = 52; goto _test_eof - _test_eof53: ( m.cs) = 53; goto _test_eof + _test_eof325: ( m.cs) = 325; goto _test_eof _test_eof326: ( m.cs) = 326; goto _test_eof _test_eof327: ( m.cs) = 327; goto _test_eof - _test_eof328: ( m.cs) = 328; goto _test_eof + _test_eof53: ( m.cs) = 53; goto _test_eof _test_eof54: ( m.cs) = 54; goto _test_eof _test_eof55: ( m.cs) = 55; goto _test_eof _test_eof56: ( m.cs) = 56; goto _test_eof _test_eof57: ( m.cs) = 57; goto _test_eof _test_eof58: ( m.cs) = 58; goto _test_eof - _test_eof59: ( m.cs) = 59; goto _test_eof + _test_eof328: ( m.cs) = 328; goto _test_eof _test_eof329: ( m.cs) = 329; goto _test_eof + _test_eof59: ( m.cs) = 59; goto _test_eof _test_eof330: ( m.cs) = 330; goto _test_eof - _test_eof60: ( m.cs) = 60; goto _test_eof _test_eof331: ( m.cs) = 331; goto _test_eof _test_eof332: ( m.cs) = 332; goto _test_eof _test_eof333: ( m.cs) = 333; goto _test_eof @@ -30541,12 +30225,12 @@ tr441: _test_eof347: ( m.cs) = 347; goto _test_eof _test_eof348: ( m.cs) = 348; goto _test_eof _test_eof349: ( m.cs) = 349; goto _test_eof + _test_eof60: ( m.cs) = 60; goto _test_eof _test_eof350: ( m.cs) = 350; goto _test_eof - _test_eof61: ( m.cs) = 61; goto _test_eof _test_eof351: ( m.cs) = 351; goto _test_eof _test_eof352: ( m.cs) = 352; goto _test_eof + _test_eof61: ( m.cs) = 61; goto _test_eof _test_eof353: ( m.cs) = 353; goto _test_eof - _test_eof62: ( m.cs) = 62; goto _test_eof _test_eof354: ( m.cs) = 354; goto _test_eof _test_eof355: ( m.cs) = 355; goto _test_eof _test_eof356: ( m.cs) = 356; goto _test_eof @@ -30566,28 +30250,28 @@ tr441: _test_eof370: ( m.cs) = 370; goto _test_eof _test_eof371: ( m.cs) = 371; goto _test_eof _test_eof372: ( m.cs) = 372; goto _test_eof - _test_eof373: ( m.cs) = 373; goto _test_eof + _test_eof62: ( m.cs) = 62; goto _test_eof _test_eof63: ( m.cs) = 63; goto _test_eof _test_eof64: ( m.cs) = 64; goto _test_eof _test_eof65: ( m.cs) = 65; goto _test_eof _test_eof66: ( m.cs) = 66; goto _test_eof + _test_eof373: ( m.cs) = 373; goto _test_eof _test_eof67: ( m.cs) = 67; goto _test_eof - _test_eof374: ( m.cs) = 374; goto _test_eof _test_eof68: ( m.cs) = 68; goto _test_eof _test_eof69: ( m.cs) = 69; goto _test_eof _test_eof70: ( m.cs) = 70; goto _test_eof _test_eof71: ( m.cs) = 71; goto _test_eof - _test_eof72: ( m.cs) = 72; goto _test_eof + _test_eof374: ( m.cs) = 374; goto _test_eof _test_eof375: ( m.cs) = 375; goto _test_eof _test_eof376: ( m.cs) = 376; goto _test_eof - _test_eof377: ( m.cs) = 377; goto _test_eof + _test_eof72: ( m.cs) = 72; goto _test_eof _test_eof73: ( m.cs) = 73; goto _test_eof _test_eof74: ( m.cs) = 74; goto _test_eof + _test_eof377: ( m.cs) = 377; goto _test_eof _test_eof378: ( m.cs) = 378; goto _test_eof _test_eof379: ( m.cs) = 379; goto _test_eof _test_eof75: ( m.cs) = 75; goto _test_eof _test_eof380: ( m.cs) = 380; goto _test_eof - _test_eof76: ( m.cs) = 76; goto _test_eof _test_eof381: ( m.cs) = 381; goto _test_eof _test_eof382: ( m.cs) = 382; goto _test_eof _test_eof383: ( m.cs) = 383; goto _test_eof @@ -30607,7 +30291,7 @@ tr441: _test_eof397: ( m.cs) = 397; goto _test_eof _test_eof398: ( m.cs) = 398; goto _test_eof _test_eof399: ( m.cs) = 399; goto _test_eof - _test_eof400: ( m.cs) = 400; goto _test_eof + _test_eof76: ( m.cs) = 76; goto _test_eof _test_eof77: ( m.cs) = 77; goto _test_eof _test_eof78: ( m.cs) = 78; goto _test_eof _test_eof79: ( m.cs) = 79; goto _test_eof @@ -30621,29 +30305,29 @@ tr441: _test_eof87: ( m.cs) = 87; goto _test_eof _test_eof88: ( m.cs) = 88; goto _test_eof _test_eof89: ( m.cs) = 89; goto _test_eof - _test_eof90: ( m.cs) = 90; goto _test_eof + _test_eof400: ( m.cs) = 400; goto _test_eof _test_eof401: ( m.cs) = 401; goto _test_eof _test_eof402: ( m.cs) = 402; goto _test_eof _test_eof403: ( m.cs) = 403; goto _test_eof - _test_eof404: ( m.cs) = 404; goto _test_eof + _test_eof90: ( m.cs) = 90; goto _test_eof _test_eof91: ( m.cs) = 91; goto _test_eof _test_eof92: ( m.cs) = 92; goto _test_eof _test_eof93: ( m.cs) = 93; goto _test_eof - _test_eof94: ( m.cs) = 94; goto _test_eof + _test_eof404: ( m.cs) = 404; goto _test_eof _test_eof405: ( m.cs) = 405; goto _test_eof - _test_eof406: ( m.cs) = 406; goto _test_eof + _test_eof94: ( m.cs) = 94; goto _test_eof _test_eof95: ( m.cs) = 95; goto _test_eof + _test_eof406: ( m.cs) = 406; goto _test_eof _test_eof96: ( m.cs) = 96; goto _test_eof - _test_eof407: ( m.cs) = 407; goto _test_eof _test_eof97: ( m.cs) = 97; goto _test_eof - _test_eof98: ( m.cs) = 98; goto _test_eof + _test_eof407: ( m.cs) = 407; goto _test_eof _test_eof408: ( m.cs) = 408; goto _test_eof + _test_eof98: ( m.cs) = 98; goto _test_eof _test_eof409: ( m.cs) = 409; goto _test_eof - _test_eof99: ( m.cs) = 99; goto _test_eof _test_eof410: ( m.cs) = 410; goto _test_eof - _test_eof411: ( m.cs) = 411; goto _test_eof + _test_eof99: ( m.cs) = 99; goto _test_eof _test_eof100: ( m.cs) = 100; goto _test_eof - _test_eof101: ( m.cs) = 101; goto _test_eof + _test_eof411: ( m.cs) = 411; goto _test_eof _test_eof412: ( m.cs) = 412; goto _test_eof _test_eof413: ( m.cs) = 413; goto _test_eof _test_eof414: ( m.cs) = 414; goto _test_eof @@ -30661,17 +30345,17 @@ tr441: _test_eof426: ( m.cs) = 426; goto _test_eof _test_eof427: ( m.cs) = 427; goto _test_eof _test_eof428: ( m.cs) = 428; goto _test_eof + _test_eof101: ( m.cs) = 101; goto _test_eof _test_eof429: ( m.cs) = 429; goto _test_eof - _test_eof102: ( m.cs) = 102; goto _test_eof _test_eof430: ( m.cs) = 430; goto _test_eof _test_eof431: ( m.cs) = 431; goto _test_eof - _test_eof432: ( m.cs) = 432; goto _test_eof + _test_eof102: ( m.cs) = 102; goto _test_eof _test_eof103: ( m.cs) = 103; goto _test_eof - _test_eof104: ( m.cs) = 104; goto _test_eof + _test_eof432: ( m.cs) = 432; goto _test_eof _test_eof433: ( m.cs) = 433; goto _test_eof _test_eof434: ( m.cs) = 434; goto _test_eof + _test_eof104: ( m.cs) = 104; goto _test_eof _test_eof435: ( m.cs) = 435; goto _test_eof - _test_eof105: ( m.cs) = 105; goto _test_eof _test_eof436: ( m.cs) = 436; goto _test_eof _test_eof437: ( m.cs) = 437; goto _test_eof _test_eof438: ( m.cs) = 438; goto _test_eof @@ -30691,8 +30375,8 @@ tr441: _test_eof452: ( m.cs) = 452; goto _test_eof _test_eof453: ( m.cs) = 453; goto _test_eof _test_eof454: ( m.cs) = 454; goto _test_eof + _test_eof105: ( m.cs) = 105; goto _test_eof _test_eof455: ( m.cs) = 455; goto _test_eof - _test_eof106: ( m.cs) = 106; goto _test_eof _test_eof456: ( m.cs) = 456; goto _test_eof _test_eof457: ( m.cs) = 457; goto _test_eof _test_eof458: ( m.cs) = 458; goto _test_eof @@ -30714,17 +30398,17 @@ tr441: _test_eof474: ( m.cs) = 474; goto _test_eof _test_eof475: ( m.cs) = 475; goto _test_eof _test_eof476: ( m.cs) = 476; goto _test_eof - _test_eof477: ( m.cs) = 477; goto _test_eof + _test_eof106: ( m.cs) = 106; goto _test_eof _test_eof107: ( m.cs) = 107; goto _test_eof _test_eof108: ( m.cs) = 108; goto _test_eof _test_eof109: ( m.cs) = 109; goto _test_eof _test_eof110: ( m.cs) = 110; goto _test_eof + _test_eof477: ( m.cs) = 477; goto _test_eof _test_eof111: ( m.cs) = 111; goto _test_eof _test_eof478: ( m.cs) = 478; goto _test_eof - _test_eof112: ( m.cs) = 112; goto _test_eof _test_eof479: ( m.cs) = 479; goto _test_eof + _test_eof112: ( m.cs) = 112; goto _test_eof _test_eof480: ( m.cs) = 480; goto _test_eof - _test_eof113: ( m.cs) = 113; goto _test_eof _test_eof481: ( m.cs) = 481; goto _test_eof _test_eof482: ( m.cs) = 482; goto _test_eof _test_eof483: ( m.cs) = 483; goto _test_eof @@ -30733,27 +30417,27 @@ tr441: _test_eof486: ( m.cs) = 486; goto _test_eof _test_eof487: ( m.cs) = 487; goto _test_eof _test_eof488: ( m.cs) = 488; goto _test_eof - _test_eof489: ( m.cs) = 489; goto _test_eof + _test_eof113: ( m.cs) = 113; goto _test_eof _test_eof114: ( m.cs) = 114; goto _test_eof _test_eof115: ( m.cs) = 115; goto _test_eof + _test_eof489: ( m.cs) = 489; goto _test_eof _test_eof116: ( m.cs) = 116; goto _test_eof - _test_eof490: ( m.cs) = 490; goto _test_eof _test_eof117: ( m.cs) = 117; goto _test_eof _test_eof118: ( m.cs) = 118; goto _test_eof + _test_eof490: ( m.cs) = 490; goto _test_eof _test_eof119: ( m.cs) = 119; goto _test_eof - _test_eof491: ( m.cs) = 491; goto _test_eof _test_eof120: ( m.cs) = 120; goto _test_eof - _test_eof121: ( m.cs) = 121; goto _test_eof + _test_eof491: ( m.cs) = 491; goto _test_eof _test_eof492: ( m.cs) = 492; goto _test_eof - _test_eof493: ( m.cs) = 493; goto _test_eof + _test_eof121: ( m.cs) = 121; goto _test_eof _test_eof122: ( m.cs) = 122; goto _test_eof _test_eof123: ( m.cs) = 123; goto _test_eof _test_eof124: ( m.cs) = 124; goto _test_eof - _test_eof125: ( m.cs) = 125; goto _test_eof + _test_eof493: ( m.cs) = 493; goto _test_eof _test_eof494: ( m.cs) = 494; goto _test_eof _test_eof495: ( m.cs) = 495; goto _test_eof + _test_eof125: ( m.cs) = 125; goto _test_eof _test_eof496: ( m.cs) = 496; goto _test_eof - _test_eof126: ( m.cs) = 126; goto _test_eof _test_eof497: ( m.cs) = 497; goto _test_eof _test_eof498: ( m.cs) = 498; goto _test_eof _test_eof499: ( m.cs) = 499; goto _test_eof @@ -30773,9 +30457,9 @@ tr441: _test_eof513: ( m.cs) = 513; goto _test_eof _test_eof514: ( m.cs) = 514; goto _test_eof _test_eof515: ( m.cs) = 515; goto _test_eof - _test_eof516: ( m.cs) = 516; goto _test_eof + _test_eof126: ( m.cs) = 126; goto _test_eof _test_eof127: ( m.cs) = 127; goto _test_eof - _test_eof128: ( m.cs) = 128; goto _test_eof + _test_eof516: ( m.cs) = 516; goto _test_eof _test_eof517: ( m.cs) = 517; goto _test_eof _test_eof518: ( m.cs) = 518; goto _test_eof _test_eof519: ( m.cs) = 519; goto _test_eof @@ -30784,27 +30468,27 @@ tr441: _test_eof522: ( m.cs) = 522; goto _test_eof _test_eof523: ( m.cs) = 523; goto _test_eof _test_eof524: ( m.cs) = 524; goto _test_eof - _test_eof525: ( m.cs) = 525; goto _test_eof + _test_eof128: ( m.cs) = 128; goto _test_eof _test_eof129: ( m.cs) = 129; goto _test_eof _test_eof130: ( m.cs) = 130; goto _test_eof + _test_eof525: ( m.cs) = 525; goto _test_eof _test_eof131: ( m.cs) = 131; goto _test_eof - _test_eof526: ( m.cs) = 526; goto _test_eof _test_eof132: ( m.cs) = 132; goto _test_eof _test_eof133: ( m.cs) = 133; goto _test_eof + _test_eof526: ( m.cs) = 526; goto _test_eof _test_eof134: ( m.cs) = 134; goto _test_eof - _test_eof527: ( m.cs) = 527; goto _test_eof _test_eof135: ( m.cs) = 135; goto _test_eof - _test_eof136: ( m.cs) = 136; goto _test_eof + _test_eof527: ( m.cs) = 527; goto _test_eof _test_eof528: ( m.cs) = 528; goto _test_eof - _test_eof529: ( m.cs) = 529; goto _test_eof + _test_eof136: ( m.cs) = 136; goto _test_eof _test_eof137: ( m.cs) = 137; goto _test_eof _test_eof138: ( m.cs) = 138; goto _test_eof - _test_eof139: ( m.cs) = 139; goto _test_eof + _test_eof529: ( m.cs) = 529; goto _test_eof _test_eof530: ( m.cs) = 530; goto _test_eof + _test_eof139: ( m.cs) = 139; goto _test_eof _test_eof531: ( m.cs) = 531; goto _test_eof _test_eof140: ( m.cs) = 140; goto _test_eof _test_eof532: ( m.cs) = 532; goto _test_eof - _test_eof141: ( m.cs) = 141; goto _test_eof _test_eof533: ( m.cs) = 533; goto _test_eof _test_eof534: ( m.cs) = 534; goto _test_eof _test_eof535: ( m.cs) = 535; goto _test_eof @@ -30812,17 +30496,17 @@ tr441: _test_eof537: ( m.cs) = 537; goto _test_eof _test_eof538: ( m.cs) = 538; goto _test_eof _test_eof539: ( m.cs) = 539; goto _test_eof - _test_eof540: ( m.cs) = 540; goto _test_eof + _test_eof141: ( m.cs) = 141; goto _test_eof _test_eof142: ( m.cs) = 142; goto _test_eof _test_eof143: ( m.cs) = 143; goto _test_eof + _test_eof540: ( m.cs) = 540; goto _test_eof _test_eof144: ( m.cs) = 144; goto _test_eof - _test_eof541: ( m.cs) = 541; goto _test_eof _test_eof145: ( m.cs) = 145; goto _test_eof _test_eof146: ( m.cs) = 146; goto _test_eof + _test_eof541: ( m.cs) = 541; goto _test_eof _test_eof147: ( m.cs) = 147; goto _test_eof - _test_eof542: ( m.cs) = 542; goto _test_eof _test_eof148: ( m.cs) = 148; goto _test_eof - _test_eof149: ( m.cs) = 149; goto _test_eof + _test_eof542: ( m.cs) = 542; goto _test_eof _test_eof543: ( m.cs) = 543; goto _test_eof _test_eof544: ( m.cs) = 544; goto _test_eof _test_eof545: ( m.cs) = 545; goto _test_eof @@ -30842,16 +30526,16 @@ tr441: _test_eof559: ( m.cs) = 559; goto _test_eof _test_eof560: ( m.cs) = 560; goto _test_eof _test_eof561: ( m.cs) = 561; goto _test_eof - _test_eof562: ( m.cs) = 562; goto _test_eof + _test_eof149: ( m.cs) = 149; goto _test_eof _test_eof150: ( m.cs) = 150; goto _test_eof - _test_eof151: ( m.cs) = 151; goto _test_eof + _test_eof562: ( m.cs) = 562; goto _test_eof _test_eof563: ( m.cs) = 563; goto _test_eof _test_eof564: ( m.cs) = 564; goto _test_eof + _test_eof151: ( m.cs) = 151; goto _test_eof _test_eof565: ( m.cs) = 565; goto _test_eof - _test_eof152: ( m.cs) = 152; goto _test_eof _test_eof566: ( m.cs) = 566; goto _test_eof + _test_eof152: ( m.cs) = 152; goto _test_eof _test_eof567: ( m.cs) = 567; goto _test_eof - _test_eof153: ( m.cs) = 153; goto _test_eof _test_eof568: ( m.cs) = 568; goto _test_eof _test_eof569: ( m.cs) = 569; goto _test_eof _test_eof570: ( m.cs) = 570; goto _test_eof @@ -30869,11 +30553,11 @@ tr441: _test_eof582: ( m.cs) = 582; goto _test_eof _test_eof583: ( m.cs) = 583; goto _test_eof _test_eof584: ( m.cs) = 584; goto _test_eof - _test_eof585: ( m.cs) = 585; goto _test_eof + _test_eof153: ( m.cs) = 153; goto _test_eof _test_eof154: ( m.cs) = 154; goto _test_eof + _test_eof585: ( m.cs) = 585; goto _test_eof _test_eof155: ( m.cs) = 155; goto _test_eof _test_eof586: ( m.cs) = 586; goto _test_eof - _test_eof156: ( m.cs) = 156; goto _test_eof _test_eof587: ( m.cs) = 587; goto _test_eof _test_eof588: ( m.cs) = 588; goto _test_eof _test_eof589: ( m.cs) = 589; goto _test_eof @@ -30881,25 +30565,25 @@ tr441: _test_eof591: ( m.cs) = 591; goto _test_eof _test_eof592: ( m.cs) = 592; goto _test_eof _test_eof593: ( m.cs) = 593; goto _test_eof - _test_eof594: ( m.cs) = 594; goto _test_eof + _test_eof156: ( m.cs) = 156; goto _test_eof _test_eof157: ( m.cs) = 157; goto _test_eof _test_eof158: ( m.cs) = 158; goto _test_eof + _test_eof594: ( m.cs) = 594; goto _test_eof _test_eof159: ( m.cs) = 159; goto _test_eof - _test_eof595: ( m.cs) = 595; goto _test_eof _test_eof160: ( m.cs) = 160; goto _test_eof _test_eof161: ( m.cs) = 161; goto _test_eof + _test_eof595: ( m.cs) = 595; goto _test_eof _test_eof162: ( m.cs) = 162; goto _test_eof - _test_eof596: ( m.cs) = 596; goto _test_eof _test_eof163: ( m.cs) = 163; goto _test_eof - _test_eof164: ( m.cs) = 164; goto _test_eof + _test_eof596: ( m.cs) = 596; goto _test_eof _test_eof597: ( m.cs) = 597; goto _test_eof - _test_eof598: ( m.cs) = 598; goto _test_eof + _test_eof164: ( m.cs) = 164; goto _test_eof _test_eof165: ( m.cs) = 165; goto _test_eof _test_eof166: ( m.cs) = 166; goto _test_eof _test_eof167: ( m.cs) = 167; goto _test_eof _test_eof168: ( m.cs) = 168; goto _test_eof _test_eof169: ( m.cs) = 169; goto _test_eof - _test_eof170: ( m.cs) = 170; goto _test_eof + _test_eof598: ( m.cs) = 598; goto _test_eof _test_eof599: ( m.cs) = 599; goto _test_eof _test_eof600: ( m.cs) = 600; goto _test_eof _test_eof601: ( m.cs) = 601; goto _test_eof @@ -30918,56 +30602,56 @@ tr441: _test_eof614: ( m.cs) = 614; goto _test_eof _test_eof615: ( m.cs) = 615; goto _test_eof _test_eof616: ( m.cs) = 616; goto _test_eof - _test_eof617: ( m.cs) = 617; goto _test_eof + _test_eof170: ( m.cs) = 170; goto _test_eof _test_eof171: ( m.cs) = 171; goto _test_eof _test_eof172: ( m.cs) = 172; goto _test_eof - _test_eof173: ( m.cs) = 173; goto _test_eof + _test_eof617: ( m.cs) = 617; goto _test_eof _test_eof618: ( m.cs) = 618; goto _test_eof _test_eof619: ( m.cs) = 619; goto _test_eof + _test_eof173: ( m.cs) = 173; goto _test_eof _test_eof620: ( m.cs) = 620; goto _test_eof - _test_eof174: ( m.cs) = 174; goto _test_eof _test_eof621: ( m.cs) = 621; goto _test_eof + _test_eof174: ( m.cs) = 174; goto _test_eof _test_eof622: ( m.cs) = 622; goto _test_eof - _test_eof175: ( m.cs) = 175; goto _test_eof _test_eof623: ( m.cs) = 623; goto _test_eof _test_eof624: ( m.cs) = 624; goto _test_eof _test_eof625: ( m.cs) = 625; goto _test_eof _test_eof626: ( m.cs) = 626; goto _test_eof - _test_eof627: ( m.cs) = 627; goto _test_eof + _test_eof175: ( m.cs) = 175; goto _test_eof _test_eof176: ( m.cs) = 176; goto _test_eof _test_eof177: ( m.cs) = 177; goto _test_eof + _test_eof627: ( m.cs) = 627; goto _test_eof _test_eof178: ( m.cs) = 178; goto _test_eof - _test_eof628: ( m.cs) = 628; goto _test_eof _test_eof179: ( m.cs) = 179; goto _test_eof _test_eof180: ( m.cs) = 180; goto _test_eof + _test_eof628: ( m.cs) = 628; goto _test_eof _test_eof181: ( m.cs) = 181; goto _test_eof - _test_eof629: ( m.cs) = 629; goto _test_eof _test_eof182: ( m.cs) = 182; goto _test_eof - _test_eof183: ( m.cs) = 183; goto _test_eof + _test_eof629: ( m.cs) = 629; goto _test_eof _test_eof630: ( m.cs) = 630; goto _test_eof + _test_eof183: ( m.cs) = 183; goto _test_eof _test_eof631: ( m.cs) = 631; goto _test_eof - _test_eof184: ( m.cs) = 184; goto _test_eof _test_eof632: ( m.cs) = 632; goto _test_eof _test_eof633: ( m.cs) = 633; goto _test_eof - _test_eof634: ( m.cs) = 634; goto _test_eof + _test_eof184: ( m.cs) = 184; goto _test_eof _test_eof185: ( m.cs) = 185; goto _test_eof _test_eof186: ( m.cs) = 186; goto _test_eof + _test_eof634: ( m.cs) = 634; goto _test_eof _test_eof187: ( m.cs) = 187; goto _test_eof - _test_eof635: ( m.cs) = 635; goto _test_eof _test_eof188: ( m.cs) = 188; goto _test_eof _test_eof189: ( m.cs) = 189; goto _test_eof + _test_eof635: ( m.cs) = 635; goto _test_eof _test_eof190: ( m.cs) = 190; goto _test_eof - _test_eof636: ( m.cs) = 636; goto _test_eof _test_eof191: ( m.cs) = 191; goto _test_eof - _test_eof192: ( m.cs) = 192; goto _test_eof + _test_eof636: ( m.cs) = 636; goto _test_eof _test_eof637: ( m.cs) = 637; goto _test_eof - _test_eof638: ( m.cs) = 638; goto _test_eof + _test_eof192: ( m.cs) = 192; goto _test_eof _test_eof193: ( m.cs) = 193; goto _test_eof _test_eof194: ( m.cs) = 194; goto _test_eof + _test_eof638: ( m.cs) = 638; goto _test_eof _test_eof195: ( m.cs) = 195; goto _test_eof - _test_eof639: ( m.cs) = 639; goto _test_eof _test_eof196: ( m.cs) = 196; goto _test_eof - _test_eof197: ( m.cs) = 197; goto _test_eof + _test_eof639: ( m.cs) = 639; goto _test_eof _test_eof640: ( m.cs) = 640; goto _test_eof _test_eof641: ( m.cs) = 641; goto _test_eof _test_eof642: ( m.cs) = 642; goto _test_eof @@ -30975,22 +30659,22 @@ tr441: _test_eof644: ( m.cs) = 644; goto _test_eof _test_eof645: ( m.cs) = 645; goto _test_eof _test_eof646: ( m.cs) = 646; goto _test_eof - _test_eof647: ( m.cs) = 647; goto _test_eof + _test_eof197: ( m.cs) = 197; goto _test_eof _test_eof198: ( m.cs) = 198; goto _test_eof _test_eof199: ( m.cs) = 199; goto _test_eof + _test_eof647: ( m.cs) = 647; goto _test_eof _test_eof200: ( m.cs) = 200; goto _test_eof - _test_eof648: ( m.cs) = 648; goto _test_eof _test_eof201: ( m.cs) = 201; goto _test_eof _test_eof202: ( m.cs) = 202; goto _test_eof + _test_eof648: ( m.cs) = 648; goto _test_eof _test_eof203: ( m.cs) = 203; goto _test_eof - _test_eof649: ( m.cs) = 649; goto _test_eof _test_eof204: ( m.cs) = 204; goto _test_eof - _test_eof205: ( m.cs) = 205; goto _test_eof + _test_eof649: ( m.cs) = 649; goto _test_eof _test_eof650: ( m.cs) = 650; goto _test_eof - _test_eof651: ( m.cs) = 651; goto _test_eof + _test_eof205: ( m.cs) = 205; goto _test_eof _test_eof206: ( m.cs) = 206; goto _test_eof _test_eof207: ( m.cs) = 207; goto _test_eof - _test_eof208: ( m.cs) = 208; goto _test_eof + _test_eof651: ( m.cs) = 651; goto _test_eof _test_eof652: ( m.cs) = 652; goto _test_eof _test_eof653: ( m.cs) = 653; goto _test_eof _test_eof654: ( m.cs) = 654; goto _test_eof @@ -31009,15 +30693,15 @@ tr441: _test_eof667: ( m.cs) = 667; goto _test_eof _test_eof668: ( m.cs) = 668; goto _test_eof _test_eof669: ( m.cs) = 669; goto _test_eof - _test_eof670: ( m.cs) = 670; goto _test_eof + _test_eof208: ( m.cs) = 208; goto _test_eof _test_eof209: ( m.cs) = 209; goto _test_eof _test_eof210: ( m.cs) = 210; goto _test_eof _test_eof211: ( m.cs) = 211; goto _test_eof _test_eof212: ( m.cs) = 212; goto _test_eof + _test_eof670: ( m.cs) = 670; goto _test_eof _test_eof213: ( m.cs) = 213; goto _test_eof - _test_eof671: ( m.cs) = 671; goto _test_eof _test_eof214: ( m.cs) = 214; goto _test_eof - _test_eof215: ( m.cs) = 215; goto _test_eof + _test_eof671: ( m.cs) = 671; goto _test_eof _test_eof672: ( m.cs) = 672; goto _test_eof _test_eof673: ( m.cs) = 673; goto _test_eof _test_eof674: ( m.cs) = 674; goto _test_eof @@ -31026,25 +30710,25 @@ tr441: _test_eof677: ( m.cs) = 677; goto _test_eof _test_eof678: ( m.cs) = 678; goto _test_eof _test_eof679: ( m.cs) = 679; goto _test_eof - _test_eof680: ( m.cs) = 680; goto _test_eof + _test_eof215: ( m.cs) = 215; goto _test_eof _test_eof216: ( m.cs) = 216; goto _test_eof _test_eof217: ( m.cs) = 217; goto _test_eof + _test_eof680: ( m.cs) = 680; goto _test_eof _test_eof218: ( m.cs) = 218; goto _test_eof - _test_eof681: ( m.cs) = 681; goto _test_eof _test_eof219: ( m.cs) = 219; goto _test_eof _test_eof220: ( m.cs) = 220; goto _test_eof + _test_eof681: ( m.cs) = 681; goto _test_eof _test_eof221: ( m.cs) = 221; goto _test_eof - _test_eof682: ( m.cs) = 682; goto _test_eof _test_eof222: ( m.cs) = 222; goto _test_eof - _test_eof223: ( m.cs) = 223; goto _test_eof + _test_eof682: ( m.cs) = 682; goto _test_eof _test_eof683: ( m.cs) = 683; goto _test_eof - _test_eof684: ( m.cs) = 684; goto _test_eof + _test_eof223: ( m.cs) = 223; goto _test_eof _test_eof224: ( m.cs) = 224; goto _test_eof _test_eof225: ( m.cs) = 225; goto _test_eof + _test_eof684: ( m.cs) = 684; goto _test_eof _test_eof226: ( m.cs) = 226; goto _test_eof - _test_eof685: ( m.cs) = 685; goto _test_eof _test_eof227: ( m.cs) = 227; goto _test_eof - _test_eof228: ( m.cs) = 228; goto _test_eof + _test_eof685: ( m.cs) = 685; goto _test_eof _test_eof686: ( m.cs) = 686; goto _test_eof _test_eof687: ( m.cs) = 687; goto _test_eof _test_eof688: ( m.cs) = 688; goto _test_eof @@ -31052,13 +30736,13 @@ tr441: _test_eof690: ( m.cs) = 690; goto _test_eof _test_eof691: ( m.cs) = 691; goto _test_eof _test_eof692: ( m.cs) = 692; goto _test_eof - _test_eof693: ( m.cs) = 693; goto _test_eof + _test_eof228: ( m.cs) = 228; goto _test_eof _test_eof229: ( m.cs) = 229; goto _test_eof _test_eof230: ( m.cs) = 230; goto _test_eof + _test_eof693: ( m.cs) = 693; goto _test_eof _test_eof231: ( m.cs) = 231; goto _test_eof - _test_eof694: ( m.cs) = 694; goto _test_eof _test_eof232: ( m.cs) = 232; goto _test_eof - _test_eof233: ( m.cs) = 233; goto _test_eof + _test_eof694: ( m.cs) = 694; goto _test_eof _test_eof695: ( m.cs) = 695; goto _test_eof _test_eof696: ( m.cs) = 696; goto _test_eof _test_eof697: ( m.cs) = 697; goto _test_eof @@ -31066,22 +30750,22 @@ tr441: _test_eof699: ( m.cs) = 699; goto _test_eof _test_eof700: ( m.cs) = 700; goto _test_eof _test_eof701: ( m.cs) = 701; goto _test_eof - _test_eof702: ( m.cs) = 702; goto _test_eof + _test_eof233: ( m.cs) = 233; goto _test_eof _test_eof234: ( m.cs) = 234; goto _test_eof _test_eof235: ( m.cs) = 235; goto _test_eof + _test_eof702: ( m.cs) = 702; goto _test_eof _test_eof236: ( m.cs) = 236; goto _test_eof - _test_eof703: ( m.cs) = 703; goto _test_eof _test_eof237: ( m.cs) = 237; goto _test_eof _test_eof238: ( m.cs) = 238; goto _test_eof + _test_eof703: ( m.cs) = 703; goto _test_eof _test_eof239: ( m.cs) = 239; goto _test_eof - _test_eof704: ( m.cs) = 704; goto _test_eof _test_eof240: ( m.cs) = 240; goto _test_eof - _test_eof241: ( m.cs) = 241; goto _test_eof + _test_eof704: ( m.cs) = 704; goto _test_eof _test_eof705: ( m.cs) = 705; goto _test_eof - _test_eof706: ( m.cs) = 706; goto _test_eof + _test_eof241: ( m.cs) = 241; goto _test_eof _test_eof242: ( m.cs) = 242; goto _test_eof _test_eof243: ( m.cs) = 243; goto _test_eof - _test_eof244: ( m.cs) = 244; goto _test_eof + _test_eof706: ( m.cs) = 706; goto _test_eof _test_eof707: ( m.cs) = 707; goto _test_eof _test_eof708: ( m.cs) = 708; goto _test_eof _test_eof709: ( m.cs) = 709; goto _test_eof @@ -31100,12 +30784,12 @@ tr441: _test_eof722: ( m.cs) = 722; goto _test_eof _test_eof723: ( m.cs) = 723; goto _test_eof _test_eof724: ( m.cs) = 724; goto _test_eof - _test_eof725: ( m.cs) = 725; goto _test_eof + _test_eof244: ( m.cs) = 244; goto _test_eof _test_eof245: ( m.cs) = 245; goto _test_eof + _test_eof725: ( m.cs) = 725; goto _test_eof _test_eof246: ( m.cs) = 246; goto _test_eof - _test_eof726: ( m.cs) = 726; goto _test_eof _test_eof247: ( m.cs) = 247; goto _test_eof - _test_eof248: ( m.cs) = 248; goto _test_eof + _test_eof726: ( m.cs) = 726; goto _test_eof _test_eof727: ( m.cs) = 727; goto _test_eof _test_eof728: ( m.cs) = 728; goto _test_eof _test_eof729: ( m.cs) = 729; goto _test_eof @@ -31113,794 +30797,793 @@ tr441: _test_eof731: ( m.cs) = 731; goto _test_eof _test_eof732: ( m.cs) = 732; goto _test_eof _test_eof733: ( m.cs) = 733; goto _test_eof - _test_eof734: ( m.cs) = 734; goto _test_eof + _test_eof248: ( m.cs) = 248; goto _test_eof _test_eof249: ( m.cs) = 249; goto _test_eof _test_eof250: ( m.cs) = 250; goto _test_eof + _test_eof734: ( m.cs) = 734; goto _test_eof _test_eof251: ( m.cs) = 251; goto _test_eof - _test_eof735: ( m.cs) = 735; goto _test_eof _test_eof252: ( m.cs) = 252; goto _test_eof _test_eof253: ( m.cs) = 253; goto _test_eof + _test_eof735: ( m.cs) = 735; goto _test_eof _test_eof254: ( m.cs) = 254; goto _test_eof - _test_eof736: ( m.cs) = 736; goto _test_eof _test_eof255: ( m.cs) = 255; goto _test_eof - _test_eof256: ( m.cs) = 256; goto _test_eof + _test_eof736: ( m.cs) = 736; goto _test_eof _test_eof737: ( m.cs) = 737; goto _test_eof - _test_eof738: ( m.cs) = 738; goto _test_eof + _test_eof256: ( m.cs) = 256; goto _test_eof _test_eof257: ( m.cs) = 257; goto _test_eof - _test_eof258: ( m.cs) = 258; goto _test_eof - _test_eof739: ( m.cs) = 739; goto _test_eof - _test_eof261: ( m.cs) = 261; goto _test_eof + _test_eof738: ( m.cs) = 738; goto _test_eof + _test_eof260: ( m.cs) = 260; goto _test_eof + _test_eof740: ( m.cs) = 740; goto _test_eof _test_eof741: ( m.cs) = 741; goto _test_eof - _test_eof742: ( m.cs) = 742; goto _test_eof + _test_eof261: ( m.cs) = 261; goto _test_eof _test_eof262: ( m.cs) = 262; goto _test_eof _test_eof263: ( m.cs) = 263; goto _test_eof _test_eof264: ( m.cs) = 264; goto _test_eof + _test_eof742: ( m.cs) = 742; goto _test_eof _test_eof265: ( m.cs) = 265; goto _test_eof _test_eof743: ( m.cs) = 743; goto _test_eof _test_eof266: ( m.cs) = 266; goto _test_eof - _test_eof744: ( m.cs) = 744; goto _test_eof _test_eof267: ( m.cs) = 267; goto _test_eof _test_eof268: ( m.cs) = 268; goto _test_eof - _test_eof269: ( m.cs) = 269; goto _test_eof - _test_eof740: ( m.cs) = 740; goto _test_eof + _test_eof739: ( m.cs) = 739; goto _test_eof + _test_eof258: ( m.cs) = 258; goto _test_eof _test_eof259: ( m.cs) = 259; goto _test_eof - _test_eof260: ( m.cs) = 260; goto _test_eof _test_eof: {} if ( m.p) == ( m.eof) { switch ( m.cs) { - case 8, 261: -//line plugins/parsers/influx/machine.go.rl:24 + case 7, 260: +//line plugins/parsers/influx/machine.go.rl:32 err = ErrNameParse ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; ( m.cs) = 0; goto _out } - case 2, 3, 4, 5, 6, 7, 28, 31, 32, 35, 36, 37, 49, 50, 51, 52, 53, 73, 75, 76, 93, 103, 105, 141, 153, 156, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 257: -//line plugins/parsers/influx/machine.go.rl:31 + case 2, 3, 4, 5, 6, 27, 30, 31, 34, 35, 36, 48, 49, 50, 51, 52, 72, 73, 75, 92, 102, 104, 140, 152, 155, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256: +//line plugins/parsers/influx/machine.go.rl:39 err = ErrFieldParse ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; ( m.cs) = 0; goto _out } - case 13, 14, 15, 22, 24, 25, 263, 264, 265, 266, 267, 268: -//line plugins/parsers/influx/machine.go.rl:38 + case 12, 13, 14, 21, 23, 24, 262, 263, 264, 265, 266, 267: +//line plugins/parsers/influx/machine.go.rl:46 err = ErrTagParse ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; ( m.cs) = 0; goto _out } - case 244: -//line plugins/parsers/influx/machine.go.rl:45 + case 243: +//line plugins/parsers/influx/machine.go.rl:53 err = ErrTimestampParse ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; ( m.cs) = 0; goto _out } - case 741: -//line plugins/parsers/influx/machine.go.rl:78 + case 740: +//line plugins/parsers/influx/machine.go.rl:86 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; ( m.cs) = 0; goto _out } } - case 743, 744: -//line plugins/parsers/influx/machine.go.rl:91 + case 742, 743: +//line plugins/parsers/influx/machine.go.rl:99 err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; ( m.cs) = 0; goto _out } } - case 271, 272, 273, 274, 275, 277, 278, 297, 298, 299, 301, 302, 305, 306, 327, 328, 329, 330, 332, 376, 377, 379, 380, 402, 403, 408, 409, 411, 431, 432, 434, 435, 457, 458, 618, 621: -//line plugins/parsers/influx/machine.go.rl:170 + case 270, 271, 272, 273, 274, 276, 277, 296, 297, 298, 300, 301, 304, 305, 326, 327, 328, 329, 331, 375, 376, 378, 379, 401, 402, 407, 408, 410, 430, 431, 433, 434, 456, 457, 617, 620: +//line plugins/parsers/influx/machine.go.rl:178 m.finishMetric = true - case 10, 38, 40, 165, 167: -//line plugins/parsers/influx/machine.go.rl:24 + case 9, 37, 39, 164, 166: +//line plugins/parsers/influx/machine.go.rl:32 err = ErrNameParse ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; ( m.cs) = 0; goto _out } -//line plugins/parsers/influx/machine.go.rl:31 +//line plugins/parsers/influx/machine.go.rl:39 err = ErrFieldParse ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; ( m.cs) = 0; goto _out } - case 34, 74, 104, 170, 208: -//line plugins/parsers/influx/machine.go.rl:31 + case 33, 74, 103, 169, 207: +//line plugins/parsers/influx/machine.go.rl:39 err = ErrFieldParse ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; ( m.cs) = 0; goto _out } -//line plugins/parsers/influx/machine.go.rl:45 +//line plugins/parsers/influx/machine.go.rl:53 err = ErrTimestampParse ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; ( m.cs) = 0; goto _out } - case 20, 44, 45, 46, 58, 59, 61, 63, 68, 70, 71, 77, 78, 79, 84, 86, 88, 89, 97, 98, 100, 101, 102, 107, 108, 109, 122, 123, 137, 138: -//line plugins/parsers/influx/machine.go.rl:38 + case 19, 43, 44, 45, 57, 58, 60, 62, 67, 69, 70, 76, 77, 78, 83, 85, 87, 88, 96, 97, 99, 100, 101, 106, 107, 108, 121, 122, 136, 137: +//line plugins/parsers/influx/machine.go.rl:46 err = ErrTagParse ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; ( m.cs) = 0; goto _out } -//line plugins/parsers/influx/machine.go.rl:31 +//line plugins/parsers/influx/machine.go.rl:39 err = ErrFieldParse ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; ( m.cs) = 0; goto _out } - case 60: -//line plugins/parsers/influx/machine.go.rl:38 + case 59: +//line plugins/parsers/influx/machine.go.rl:46 err = ErrTagParse ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; ( m.cs) = 0; goto _out } -//line plugins/parsers/influx/machine.go.rl:45 +//line plugins/parsers/influx/machine.go.rl:53 err = ErrTimestampParse ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; ( m.cs) = 0; goto _out } - case 270: -//line plugins/parsers/influx/machine.go.rl:74 + case 269: +//line plugins/parsers/influx/machine.go.rl:82 m.beginMetric = true -//line plugins/parsers/influx/machine.go.rl:170 +//line plugins/parsers/influx/machine.go.rl:178 m.finishMetric = true case 1: -//line plugins/parsers/influx/machine.go.rl:78 +//line plugins/parsers/influx/machine.go.rl:86 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; ( m.cs) = 0; goto _out } } -//line plugins/parsers/influx/machine.go.rl:38 +//line plugins/parsers/influx/machine.go.rl:46 err = ErrTagParse ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; ( m.cs) = 0; goto _out } - case 300, 303, 307, 375, 399, 400, 404, 405, 406, 530, 564, 565, 567: -//line plugins/parsers/influx/machine.go.rl:78 + case 299, 302, 306, 374, 398, 399, 403, 404, 405, 529, 563, 564, 566: +//line plugins/parsers/influx/machine.go.rl:86 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; ( m.cs) = 0; goto _out } } -//line plugins/parsers/influx/machine.go.rl:170 +//line plugins/parsers/influx/machine.go.rl:178 m.finishMetric = true - case 16, 23: -//line plugins/parsers/influx/machine.go.rl:91 + case 15, 22: +//line plugins/parsers/influx/machine.go.rl:99 err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; ( m.cs) = 0; goto _out } } -//line plugins/parsers/influx/machine.go.rl:38 +//line plugins/parsers/influx/machine.go.rl:46 err = ErrTagParse ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; ( m.cs) = 0; goto _out } - case 351, 352, 353, 355, 374, 430, 454, 455, 459, 479, 495, 496, 498: -//line plugins/parsers/influx/machine.go.rl:91 + case 350, 351, 352, 354, 373, 429, 453, 454, 458, 478, 494, 495, 497: +//line plugins/parsers/influx/machine.go.rl:99 err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; ( m.cs) = 0; goto _out } } -//line plugins/parsers/influx/machine.go.rl:170 +//line plugins/parsers/influx/machine.go.rl:178 m.finishMetric = true - case 624, 675, 689, 729: -//line plugins/parsers/influx/machine.go.rl:104 + case 623, 674, 688, 728: +//line plugins/parsers/influx/machine.go.rl:112 err = m.handler.AddInt(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; ( m.cs) = 0; goto _out } } -//line plugins/parsers/influx/machine.go.rl:170 +//line plugins/parsers/influx/machine.go.rl:178 m.finishMetric = true - case 625, 678, 692, 732: -//line plugins/parsers/influx/machine.go.rl:113 + case 624, 677, 691, 731: +//line plugins/parsers/influx/machine.go.rl:121 err = m.handler.AddUint(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; ( m.cs) = 0; goto _out } } -//line plugins/parsers/influx/machine.go.rl:170 +//line plugins/parsers/influx/machine.go.rl:178 m.finishMetric = true - case 326, 619, 620, 622, 623, 626, 632, 633, 671, 672, 673, 674, 676, 677, 679, 685, 686, 687, 688, 690, 691, 693, 726, 727, 728, 730, 731, 733: -//line plugins/parsers/influx/machine.go.rl:122 + case 325, 618, 619, 621, 622, 625, 631, 632, 670, 671, 672, 673, 675, 676, 678, 684, 685, 686, 687, 689, 690, 692, 725, 726, 727, 729, 730, 732: +//line plugins/parsers/influx/machine.go.rl:130 err = m.handler.AddFloat(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; ( m.cs) = 0; goto _out } } -//line plugins/parsers/influx/machine.go.rl:170 +//line plugins/parsers/influx/machine.go.rl:178 m.finishMetric = true - case 627, 628, 629, 630, 631, 634, 635, 636, 637, 638, 680, 681, 682, 683, 684, 734, 735, 736, 737, 738: -//line plugins/parsers/influx/machine.go.rl:131 + case 626, 627, 628, 629, 630, 633, 634, 635, 636, 637, 679, 680, 681, 682, 683, 733, 734, 735, 736, 737: +//line plugins/parsers/influx/machine.go.rl:139 err = m.handler.AddBool(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; ( m.cs) = 0; goto _out } } -//line plugins/parsers/influx/machine.go.rl:170 +//line plugins/parsers/influx/machine.go.rl:178 m.finishMetric = true - case 276, 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, 289, 290, 291, 292, 293, 294, 295, 296, 331, 333, 334, 335, 336, 337, 338, 339, 340, 341, 342, 343, 344, 345, 346, 347, 348, 349, 350, 378, 381, 382, 383, 384, 385, 386, 387, 388, 389, 390, 391, 392, 393, 394, 395, 396, 397, 398, 410, 412, 413, 414, 415, 416, 417, 418, 419, 420, 421, 422, 423, 424, 425, 426, 427, 428, 429, 433, 436, 437, 438, 439, 440, 441, 442, 443, 444, 445, 446, 447, 448, 449, 450, 451, 452, 453, 599, 600, 601, 602, 603, 604, 605, 606, 607, 608, 609, 610, 611, 612, 613, 614, 615, 616, 617, 652, 653, 654, 655, 656, 657, 658, 659, 660, 661, 662, 663, 664, 665, 666, 667, 668, 669, 670, 707, 708, 709, 710, 711, 712, 713, 714, 715, 716, 717, 718, 719, 720, 721, 722, 723, 724, 725: -//line plugins/parsers/influx/machine.go.rl:149 + case 275, 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, 289, 290, 291, 292, 293, 294, 295, 330, 332, 333, 334, 335, 336, 337, 338, 339, 340, 341, 342, 343, 344, 345, 346, 347, 348, 349, 377, 380, 381, 382, 383, 384, 385, 386, 387, 388, 389, 390, 391, 392, 393, 394, 395, 396, 397, 409, 411, 412, 413, 414, 415, 416, 417, 418, 419, 420, 421, 422, 423, 424, 425, 426, 427, 428, 432, 435, 436, 437, 438, 439, 440, 441, 442, 443, 444, 445, 446, 447, 448, 449, 450, 451, 452, 598, 599, 600, 601, 602, 603, 604, 605, 606, 607, 608, 609, 610, 611, 612, 613, 614, 615, 616, 651, 652, 653, 654, 655, 656, 657, 658, 659, 660, 661, 662, 663, 664, 665, 666, 667, 668, 669, 706, 707, 708, 709, 710, 711, 712, 713, 714, 715, 716, 717, 718, 719, 720, 721, 722, 723, 724: +//line plugins/parsers/influx/machine.go.rl:157 err = m.handler.SetTimestamp(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; ( m.cs) = 0; goto _out } } -//line plugins/parsers/influx/machine.go.rl:170 +//line plugins/parsers/influx/machine.go.rl:178 m.finishMetric = true - case 9: -//line plugins/parsers/influx/machine.go.rl:24 + case 8: +//line plugins/parsers/influx/machine.go.rl:32 err = ErrNameParse ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; ( m.cs) = 0; goto _out } -//line plugins/parsers/influx/machine.go.rl:78 +//line plugins/parsers/influx/machine.go.rl:86 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; ( m.cs) = 0; goto _out } } -//line plugins/parsers/influx/machine.go.rl:38 +//line plugins/parsers/influx/machine.go.rl:46 err = ErrTagParse ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; ( m.cs) = 0; goto _out } - case 99: -//line plugins/parsers/influx/machine.go.rl:38 + case 98: +//line plugins/parsers/influx/machine.go.rl:46 err = ErrTagParse ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; ( m.cs) = 0; goto _out } -//line plugins/parsers/influx/machine.go.rl:31 +//line plugins/parsers/influx/machine.go.rl:39 err = ErrFieldParse ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; ( m.cs) = 0; goto _out } -//line plugins/parsers/influx/machine.go.rl:45 +//line plugins/parsers/influx/machine.go.rl:53 err = ErrTimestampParse ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; ( m.cs) = 0; goto _out } - case 11, 12, 26, 27, 29, 30, 41, 42, 54, 55, 56, 57, 72, 91, 92, 94, 96, 139, 140, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 154, 155, 157, 158, 159, 160, 161, 162, 163, 164, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241: -//line plugins/parsers/influx/machine.go.rl:78 + case 10, 11, 25, 26, 28, 29, 40, 41, 53, 54, 55, 56, 71, 90, 91, 93, 95, 138, 139, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 153, 154, 156, 157, 158, 159, 160, 161, 162, 163, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240: +//line plugins/parsers/influx/machine.go.rl:86 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; ( m.cs) = 0; goto _out } } -//line plugins/parsers/influx/machine.go.rl:38 +//line plugins/parsers/influx/machine.go.rl:46 err = ErrTagParse ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; ( m.cs) = 0; goto _out } -//line plugins/parsers/influx/machine.go.rl:31 +//line plugins/parsers/influx/machine.go.rl:39 err = ErrFieldParse ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; ( m.cs) = 0; goto _out } - case 535, 589, 697: -//line plugins/parsers/influx/machine.go.rl:78 + case 534, 588, 696: +//line plugins/parsers/influx/machine.go.rl:86 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; ( m.cs) = 0; goto _out } } -//line plugins/parsers/influx/machine.go.rl:104 +//line plugins/parsers/influx/machine.go.rl:112 err = m.handler.AddInt(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; ( m.cs) = 0; goto _out } } -//line plugins/parsers/influx/machine.go.rl:170 +//line plugins/parsers/influx/machine.go.rl:178 m.finishMetric = true - case 538, 592, 700: -//line plugins/parsers/influx/machine.go.rl:78 + case 537, 591, 699: +//line plugins/parsers/influx/machine.go.rl:86 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; ( m.cs) = 0; goto _out } } -//line plugins/parsers/influx/machine.go.rl:113 +//line plugins/parsers/influx/machine.go.rl:121 err = m.handler.AddUint(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; ( m.cs) = 0; goto _out } } -//line plugins/parsers/influx/machine.go.rl:170 +//line plugins/parsers/influx/machine.go.rl:178 m.finishMetric = true - case 407, 531, 532, 533, 534, 536, 537, 539, 563, 586, 587, 588, 590, 591, 593, 694, 695, 696, 698, 699, 701: -//line plugins/parsers/influx/machine.go.rl:78 + case 406, 530, 531, 532, 533, 535, 536, 538, 562, 585, 586, 587, 589, 590, 592, 693, 694, 695, 697, 698, 700: +//line plugins/parsers/influx/machine.go.rl:86 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; ( m.cs) = 0; goto _out } } -//line plugins/parsers/influx/machine.go.rl:122 +//line plugins/parsers/influx/machine.go.rl:130 err = m.handler.AddFloat(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; ( m.cs) = 0; goto _out } } -//line plugins/parsers/influx/machine.go.rl:170 +//line plugins/parsers/influx/machine.go.rl:178 m.finishMetric = true - case 540, 541, 542, 543, 544, 594, 595, 596, 597, 598, 702, 703, 704, 705, 706: -//line plugins/parsers/influx/machine.go.rl:78 + case 539, 540, 541, 542, 543, 593, 594, 595, 596, 597, 701, 702, 703, 704, 705: +//line plugins/parsers/influx/machine.go.rl:86 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; ( m.cs) = 0; goto _out } } -//line plugins/parsers/influx/machine.go.rl:131 +//line plugins/parsers/influx/machine.go.rl:139 err = m.handler.AddBool(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; ( m.cs) = 0; goto _out } } -//line plugins/parsers/influx/machine.go.rl:170 +//line plugins/parsers/influx/machine.go.rl:178 m.finishMetric = true - case 304, 308, 309, 310, 311, 312, 313, 314, 315, 316, 317, 318, 319, 320, 321, 322, 323, 324, 325, 401, 545, 546, 547, 548, 549, 550, 551, 552, 553, 554, 555, 556, 557, 558, 559, 560, 561, 562, 566, 568, 569, 570, 571, 572, 573, 574, 575, 576, 577, 578, 579, 580, 581, 582, 583, 584, 585: -//line plugins/parsers/influx/machine.go.rl:78 + case 303, 307, 308, 309, 310, 311, 312, 313, 314, 315, 316, 317, 318, 319, 320, 321, 322, 323, 324, 400, 544, 545, 546, 547, 548, 549, 550, 551, 552, 553, 554, 555, 556, 557, 558, 559, 560, 561, 565, 567, 568, 569, 570, 571, 572, 573, 574, 575, 576, 577, 578, 579, 580, 581, 582, 583, 584: +//line plugins/parsers/influx/machine.go.rl:86 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; ( m.cs) = 0; goto _out } } -//line plugins/parsers/influx/machine.go.rl:149 +//line plugins/parsers/influx/machine.go.rl:157 err = m.handler.SetTimestamp(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; ( m.cs) = 0; goto _out } } -//line plugins/parsers/influx/machine.go.rl:170 +//line plugins/parsers/influx/machine.go.rl:178 m.finishMetric = true - case 17, 18, 19, 21, 47, 48, 64, 65, 66, 67, 69, 80, 81, 82, 83, 85, 87, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 124, 125, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205: -//line plugins/parsers/influx/machine.go.rl:91 + case 16, 17, 18, 20, 46, 47, 63, 64, 65, 66, 68, 79, 80, 81, 82, 84, 86, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 123, 124, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204: +//line plugins/parsers/influx/machine.go.rl:99 err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; ( m.cs) = 0; goto _out } } -//line plugins/parsers/influx/machine.go.rl:38 +//line plugins/parsers/influx/machine.go.rl:46 err = ErrTagParse ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; ( m.cs) = 0; goto _out } -//line plugins/parsers/influx/machine.go.rl:31 +//line plugins/parsers/influx/machine.go.rl:39 err = ErrFieldParse ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; ( m.cs) = 0; goto _out } - case 484, 520, 642: -//line plugins/parsers/influx/machine.go.rl:91 + case 483, 519, 641: +//line plugins/parsers/influx/machine.go.rl:99 err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; ( m.cs) = 0; goto _out } } -//line plugins/parsers/influx/machine.go.rl:104 +//line plugins/parsers/influx/machine.go.rl:112 err = m.handler.AddInt(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; ( m.cs) = 0; goto _out } } -//line plugins/parsers/influx/machine.go.rl:170 +//line plugins/parsers/influx/machine.go.rl:178 m.finishMetric = true - case 487, 523, 645: -//line plugins/parsers/influx/machine.go.rl:91 + case 486, 522, 644: +//line plugins/parsers/influx/machine.go.rl:99 err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; ( m.cs) = 0; goto _out } } -//line plugins/parsers/influx/machine.go.rl:113 +//line plugins/parsers/influx/machine.go.rl:121 err = m.handler.AddUint(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; ( m.cs) = 0; goto _out } } -//line plugins/parsers/influx/machine.go.rl:170 +//line plugins/parsers/influx/machine.go.rl:178 m.finishMetric = true - case 478, 480, 481, 482, 483, 485, 486, 488, 494, 517, 518, 519, 521, 522, 524, 639, 640, 641, 643, 644, 646: -//line plugins/parsers/influx/machine.go.rl:91 + case 477, 479, 480, 481, 482, 484, 485, 487, 493, 516, 517, 518, 520, 521, 523, 638, 639, 640, 642, 643, 645: +//line plugins/parsers/influx/machine.go.rl:99 err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; ( m.cs) = 0; goto _out } } -//line plugins/parsers/influx/machine.go.rl:122 +//line plugins/parsers/influx/machine.go.rl:130 err = m.handler.AddFloat(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; ( m.cs) = 0; goto _out } } -//line plugins/parsers/influx/machine.go.rl:170 +//line plugins/parsers/influx/machine.go.rl:178 m.finishMetric = true - case 489, 490, 491, 492, 493, 525, 526, 527, 528, 529, 647, 648, 649, 650, 651: -//line plugins/parsers/influx/machine.go.rl:91 + case 488, 489, 490, 491, 492, 524, 525, 526, 527, 528, 646, 647, 648, 649, 650: +//line plugins/parsers/influx/machine.go.rl:99 err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; ( m.cs) = 0; goto _out } } -//line plugins/parsers/influx/machine.go.rl:131 +//line plugins/parsers/influx/machine.go.rl:139 err = m.handler.AddBool(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; ( m.cs) = 0; goto _out } } -//line plugins/parsers/influx/machine.go.rl:170 +//line plugins/parsers/influx/machine.go.rl:178 m.finishMetric = true - case 354, 356, 357, 358, 359, 360, 361, 362, 363, 364, 365, 366, 367, 368, 369, 370, 371, 372, 373, 456, 460, 461, 462, 463, 464, 465, 466, 467, 468, 469, 470, 471, 472, 473, 474, 475, 476, 477, 497, 499, 500, 501, 502, 503, 504, 505, 506, 507, 508, 509, 510, 511, 512, 513, 514, 515, 516: -//line plugins/parsers/influx/machine.go.rl:91 + case 353, 355, 356, 357, 358, 359, 360, 361, 362, 363, 364, 365, 366, 367, 368, 369, 370, 371, 372, 455, 459, 460, 461, 462, 463, 464, 465, 466, 467, 468, 469, 470, 471, 472, 473, 474, 475, 476, 496, 498, 499, 500, 501, 502, 503, 504, 505, 506, 507, 508, 509, 510, 511, 512, 513, 514, 515: +//line plugins/parsers/influx/machine.go.rl:99 err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; ( m.cs) = 0; goto _out } } -//line plugins/parsers/influx/machine.go.rl:149 +//line plugins/parsers/influx/machine.go.rl:157 err = m.handler.SetTimestamp(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; ( m.cs) = 0; goto _out } } -//line plugins/parsers/influx/machine.go.rl:170 +//line plugins/parsers/influx/machine.go.rl:178 m.finishMetric = true - case 39, 166, 168, 169, 206, 207, 242, 243: -//line plugins/parsers/influx/machine.go.rl:24 + case 38, 165, 167, 168, 205, 206, 241, 242: +//line plugins/parsers/influx/machine.go.rl:32 err = ErrNameParse ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; ( m.cs) = 0; goto _out } -//line plugins/parsers/influx/machine.go.rl:78 +//line plugins/parsers/influx/machine.go.rl:86 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; ( m.cs) = 0; goto _out } } -//line plugins/parsers/influx/machine.go.rl:38 +//line plugins/parsers/influx/machine.go.rl:46 err = ErrTagParse ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; ( m.cs) = 0; goto _out } -//line plugins/parsers/influx/machine.go.rl:31 +//line plugins/parsers/influx/machine.go.rl:39 err = ErrFieldParse ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; ( m.cs) = 0; goto _out } - case 43, 90, 152: -//line plugins/parsers/influx/machine.go.rl:78 + case 42, 89, 151: +//line plugins/parsers/influx/machine.go.rl:86 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; ( m.cs) = 0; goto _out } } -//line plugins/parsers/influx/machine.go.rl:38 +//line plugins/parsers/influx/machine.go.rl:46 err = ErrTagParse ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; ( m.cs) = 0; goto _out } -//line plugins/parsers/influx/machine.go.rl:31 +//line plugins/parsers/influx/machine.go.rl:39 err = ErrFieldParse ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; ( m.cs) = 0; goto _out } -//line plugins/parsers/influx/machine.go.rl:45 +//line plugins/parsers/influx/machine.go.rl:53 err = ErrTimestampParse ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; ( m.cs) = 0; goto _out } - case 62, 106, 126: -//line plugins/parsers/influx/machine.go.rl:91 + case 61, 105, 125: +//line plugins/parsers/influx/machine.go.rl:99 err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; ( m.cs) = 0; goto _out } } -//line plugins/parsers/influx/machine.go.rl:38 +//line plugins/parsers/influx/machine.go.rl:46 err = ErrTagParse ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; ( m.cs) = 0; goto _out } -//line plugins/parsers/influx/machine.go.rl:31 +//line plugins/parsers/influx/machine.go.rl:39 err = ErrFieldParse ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; ( m.cs) = 0; goto _out } -//line plugins/parsers/influx/machine.go.rl:45 +//line plugins/parsers/influx/machine.go.rl:53 err = ErrTimestampParse ( m.p)-- - ( m.cs) = 258; + ( m.cs) = 257; {( m.p)++; ( m.cs) = 0; goto _out } -//line plugins/parsers/influx/machine.go:31897 +//line plugins/parsers/influx/machine.go:31580 } } _out: {} } -//line plugins/parsers/influx/machine.go.rl:407 +//line plugins/parsers/influx/machine.go.rl:415 if err != nil { return err @@ -31999,7 +31682,12 @@ func (m *streamMachine) Next() error { if n == 0 && err == io.EOF { m.machine.eof = m.machine.pe } else if err != nil && err != io.EOF { - return err + // After the reader returns an error this function shouldn't be + // called again. This will cause the machine to return EOF this + // is done. + m.machine.p = m.machine.pe + m.machine.eof = m.machine.pe + return &readErr{Err: err} } m.machine.pe += n diff --git a/plugins/parsers/influx/machine.go.rl b/plugins/parsers/influx/machine.go.rl index 54cf67fba..61f49c652 100644 --- a/plugins/parsers/influx/machine.go.rl +++ b/plugins/parsers/influx/machine.go.rl @@ -5,6 +5,14 @@ import ( "io" ) +type readErr struct { + Err error +} + +func (e *readErr) Error() string { + return e.Err.Error() +} + var ( ErrNameParse = errors.New("expected measurement name") ErrFieldParse = errors.New("expected field") @@ -220,7 +228,7 @@ fieldbool = (true | false) >begin %bool; fieldstringchar = - [^\f\r\n\\"] | '\\' [\\"] | newline; + [^\n\\"] | '\\' [\\"] | newline; fieldstring = fieldstringchar* >begin %string; @@ -502,7 +510,12 @@ func (m *streamMachine) Next() error { if n == 0 && err == io.EOF { m.machine.eof = m.machine.pe } else if err != nil && err != io.EOF { - return err + // After the reader returns an error this function shouldn't be + // called again. This will cause the machine to return EOF this + // is done. + m.machine.p = m.machine.pe + m.machine.eof = m.machine.pe + return &readErr{Err: err} } m.machine.pe += n diff --git a/plugins/parsers/influx/machine_test.go b/plugins/parsers/influx/machine_test.go index 4ba3b8d68..de5353da0 100644 --- a/plugins/parsers/influx/machine_test.go +++ b/plugins/parsers/influx/machine_test.go @@ -873,6 +873,27 @@ var tests = []struct { }, }, }, + { + name: "cr in string field", + input: []byte("cpu value=\"4\r2\""), + results: []Result{ + { + Name: Measurement, + Value: []byte("cpu"), + }, + { + Name: FieldKey, + Value: []byte("value"), + }, + { + Name: FieldString, + Value: []byte("4\r2"), + }, + { + Name: Success, + }, + }, + }, { name: "bool field", input: []byte("cpu value=true"), diff --git a/plugins/parsers/influx/parser.go b/plugins/parsers/influx/parser.go index 1723cde33..68db9128f 100644 --- a/plugins/parsers/influx/parser.go +++ b/plugins/parsers/influx/parser.go @@ -174,11 +174,15 @@ func (h *StreamParser) SetTimePrecision(u time.Duration) { } // Next parses the next item from the stream. You can repeat calls to this -// function until it returns EOF. +// function if it returns ParseError to get the next metric or error. func (p *StreamParser) Next() (telegraf.Metric, error) { err := p.machine.Next() if err == EOF { - return nil, EOF + return nil, err + } + + if e, ok := err.(*readErr); ok { + return nil, e.Err } if err != nil { diff --git a/plugins/parsers/influx/parser_test.go b/plugins/parsers/influx/parser_test.go index 386b99724..3104c1f3f 100644 --- a/plugins/parsers/influx/parser_test.go +++ b/plugins/parsers/influx/parser_test.go @@ -2,6 +2,7 @@ package influx import ( "bytes" + "errors" "strconv" "strings" "testing" @@ -869,3 +870,28 @@ func TestStreamParserErrorString(t *testing.T) { }) } } + +type MockReader struct { + ReadF func(p []byte) (int, error) +} + +func (r *MockReader) Read(p []byte) (int, error) { + return r.ReadF(p) +} + +// Errors from the Reader are returned from the Parser +func TestStreamParserReaderError(t *testing.T) { + readerErr := errors.New("error but not eof") + + parser := NewStreamParser(&MockReader{ + ReadF: func(p []byte) (int, error) { + return 0, readerErr + }, + }) + _, err := parser.Next() + require.Error(t, err) + require.Equal(t, err, readerErr) + + _, err = parser.Next() + require.Equal(t, err, EOF) +} From 59aa0a3349d0b10a1be0fd31dd4c72c1210be25a Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 28 Apr 2020 14:54:18 -0700 Subject: [PATCH 1723/1815] Update changelog --- CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index a0b972f58..fa1f37a75 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -37,6 +37,10 @@ - [#7318](https://github.com/influxdata/telegraf/issues/7318): Fix dimension limit on azure_monitor output. - [#7407](https://github.com/influxdata/telegraf/pull/7407): Fix 64-bit integer to string conversion in snmp input. - [#7327](https://github.com/influxdata/telegraf/issues/7327): Fix shard indices reporting in elasticsearch input. +- [#7388](https://github.com/influxdata/telegraf/issues/7388): Ignore fields with NaN or Inf floats in the JSON serializer. +- [#7402](https://github.com/influxdata/telegraf/issues/7402): Fix typo in name of gc_cpu_fraction field of the kapacitor input. +- [#7235](https://github.com/influxdata/telegraf/issues/7235): Don't retry `create database` when using database_tag if forbidden by the server in influxdb output. +- [#7406](https://github.com/influxdata/telegraf/issues/7406): Allow CR and FF inside of string fields in influx parser. ## v1.14.1 [2020-04-14] From 31637717784b6b4fa76b541d9ce32a672373c101 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 28 Apr 2020 14:59:07 -0700 Subject: [PATCH 1724/1815] Set 1.14.2 release date --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index fa1f37a75..5af03481e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -28,7 +28,7 @@ - [#7371](https://github.com/influxdata/telegraf/issues/7371): Fix unable to write metrics to CloudWatch with IMDSv1 disabled. - [#7233](https://github.com/influxdata/telegraf/issues/7233): Fix vSphere 6.7 missing data issue. -## v1.14.2 [unreleased] +## v1.14.2 [2020-04-28] #### Bugfixes From e1b2ebe06d96d07d2f2bdce01575885e533f337d Mon Sep 17 00:00:00 2001 From: presslab-us Date: Wed, 29 Apr 2020 14:32:19 -0400 Subject: [PATCH 1725/1815] Use same timestamp for all objects in arrays in the json parser (#7412) --- plugins/parsers/json/parser.go | 22 +++++++++++----------- plugins/parsers/json/parser_test.go | 12 ++++++++++++ 2 files changed, 23 insertions(+), 11 deletions(-) diff --git a/plugins/parsers/json/parser.go b/plugins/parsers/json/parser.go index bba179e1b..bd9dee869 100644 --- a/plugins/parsers/json/parser.go +++ b/plugins/parsers/json/parser.go @@ -67,13 +67,13 @@ func New(config *Config) (*Parser, error) { }, nil } -func (p *Parser) parseArray(data []interface{}) ([]telegraf.Metric, error) { +func (p *Parser) parseArray(data []interface{}, timestamp time.Time) ([]telegraf.Metric, error) { results := make([]telegraf.Metric, 0) for _, item := range data { switch v := item.(type) { case map[string]interface{}: - metrics, err := p.parseObject(v) + metrics, err := p.parseObject(v, timestamp) if err != nil { if p.strict { return nil, err @@ -90,7 +90,7 @@ func (p *Parser) parseArray(data []interface{}) ([]telegraf.Metric, error) { return results, nil } -func (p *Parser) parseObject(data map[string]interface{}) ([]telegraf.Metric, error) { +func (p *Parser) parseObject(data map[string]interface{}, timestamp time.Time) ([]telegraf.Metric, error) { tags := make(map[string]string) for k, v := range p.defaultTags { tags[k] = v @@ -112,8 +112,7 @@ func (p *Parser) parseObject(data map[string]interface{}) ([]telegraf.Metric, er } } - //if time key is specified, set it to nTime - nTime := time.Now().UTC() + //if time key is specified, set timestamp to it if p.timeKey != "" { if p.timeFormat == "" { err := fmt.Errorf("use of 'json_time_key' requires 'json_time_format'") @@ -125,7 +124,7 @@ func (p *Parser) parseObject(data map[string]interface{}) ([]telegraf.Metric, er return nil, err } - nTime, err = internal.ParseTimestamp(p.timeFormat, f.Fields[p.timeKey], p.timezone) + timestamp, err = internal.ParseTimestamp(p.timeFormat, f.Fields[p.timeKey], p.timezone) if err != nil { return nil, err } @@ -133,13 +132,13 @@ func (p *Parser) parseObject(data map[string]interface{}) ([]telegraf.Metric, er delete(f.Fields, p.timeKey) //if the year is 0, set to current year - if nTime.Year() == 0 { - nTime = nTime.AddDate(time.Now().Year(), 0, 0) + if timestamp.Year() == 0 { + timestamp = timestamp.AddDate(time.Now().Year(), 0, 0) } } tags, nFields := p.switchFieldToTag(tags, f.Fields) - metric, err := metric.New(name, tags, nFields, nTime) + metric, err := metric.New(name, tags, nFields, timestamp) if err != nil { return nil, err } @@ -206,11 +205,12 @@ func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) { return nil, err } + timestamp := time.Now().UTC() switch v := data.(type) { case map[string]interface{}: - return p.parseObject(v) + return p.parseObject(v, timestamp) case []interface{}: - return p.parseArray(v) + return p.parseArray(v, timestamp) default: return nil, ErrWrongType } diff --git a/plugins/parsers/json/parser_test.go b/plugins/parsers/json/parser_test.go index 4571de63a..31c507e75 100644 --- a/plugins/parsers/json/parser_test.go +++ b/plugins/parsers/json/parser_test.go @@ -794,6 +794,18 @@ func TestTimeErrors(t *testing.T) { require.Equal(t, fmt.Errorf("JSON time key could not be found"), err) } +func TestShareTimestamp(t *testing.T) { + parser, err := New(&Config{ + MetricName: "json_test", + }) + require.NoError(t, err) + + metrics, err := parser.Parse([]byte(validJSONArrayMultiple)) + require.NoError(t, err) + require.Equal(t, 2, len(metrics)) + require.Equal(t, true, metrics[0].Time() == metrics[1].Time()) +} + func TestNameKey(t *testing.T) { testString := `{ "a": 5, From a0679c5427d4c565d976eb192021781d5b10ac4a Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 29 Apr 2020 11:33:46 -0700 Subject: [PATCH 1726/1815] Update changelog --- CHANGELOG.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5af03481e..6b33b280f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -28,6 +28,12 @@ - [#7371](https://github.com/influxdata/telegraf/issues/7371): Fix unable to write metrics to CloudWatch with IMDSv1 disabled. - [#7233](https://github.com/influxdata/telegraf/issues/7233): Fix vSphere 6.7 missing data issue. +## v1.14.3 [unreleased] + +#### Bugfixes + +- [#7412](https://github.com/influxdata/telegraf/pull/7412): Use same timestamp for all objects in arrays in the json parser. + ## v1.14.2 [2020-04-28] #### Bugfixes From 07c6b78c8fd21f62dfbff5f2de496e22c44dca3f Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Wed, 29 Apr 2020 17:28:55 -0400 Subject: [PATCH 1727/1815] Sflow rework (#7253) --- plugins/inputs/sflow/README.md | 7 + plugins/inputs/sflow/binaryio/minreader.go | 37 ++ .../inputs/sflow/binaryio/minreader_test.go | 39 ++ plugins/inputs/sflow/decoder.go | 306 --------- plugins/inputs/sflow/decoder/directives.go | 402 ------------ .../inputs/sflow/decoder/directives_test.go | 582 ------------------ plugins/inputs/sflow/decoder/funcs.go | 216 ------- plugins/inputs/sflow/decoder/ops.go | 490 --------------- plugins/inputs/sflow/decoder/ops_test.go | 383 ------------ plugins/inputs/sflow/decoder_test.go | 469 ++++---------- plugins/inputs/sflow/metricencoder.go | 46 ++ plugins/inputs/sflow/packetdecoder.go | 483 +++++++++++++++ plugins/inputs/sflow/packetdecoder_test.go | 207 +++++++ plugins/inputs/sflow/sflow.go | 36 +- plugins/inputs/sflow/sflow_test.go | 8 +- plugins/inputs/sflow/types.go | 285 +++++++++ 16 files changed, 1254 insertions(+), 2742 deletions(-) create mode 100644 plugins/inputs/sflow/binaryio/minreader.go create mode 100644 plugins/inputs/sflow/binaryio/minreader_test.go delete mode 100644 plugins/inputs/sflow/decoder.go delete mode 100644 plugins/inputs/sflow/decoder/directives.go delete mode 100644 plugins/inputs/sflow/decoder/directives_test.go delete mode 100644 plugins/inputs/sflow/decoder/funcs.go delete mode 100644 plugins/inputs/sflow/decoder/ops.go delete mode 100644 plugins/inputs/sflow/decoder/ops_test.go create mode 100644 plugins/inputs/sflow/metricencoder.go create mode 100644 plugins/inputs/sflow/packetdecoder.go create mode 100644 plugins/inputs/sflow/packetdecoder_test.go create mode 100644 plugins/inputs/sflow/types.go diff --git a/plugins/inputs/sflow/README.md b/plugins/inputs/sflow/README.md index 73bbcb1e0..66d556e17 100644 --- a/plugins/inputs/sflow/README.md +++ b/plugins/inputs/sflow/README.md @@ -105,6 +105,12 @@ $ sudo tcpdump -s 0 -i eth0 -w telegraf-sflow.pcap host 127.0.0.1 and port 6343 sflow,agent_address=0.0.0.0,dst_ip=10.0.0.2,dst_mac=ff:ff:ff:ff:ff:ff,dst_port=40042,ether_type=IPv4,header_protocol=ETHERNET-ISO88023,input_ifindex=6,ip_dscp=27,ip_ecn=0,output_ifindex=1073741823,source_id_index=3,source_id_type=0,src_ip=10.0.0.1,src_mac=ff:ff:ff:ff:ff:ff,src_port=443 bytes=1570i,drops=0i,frame_length=157i,header_length=128i,ip_flags=2i,ip_fragment_offset=0i,ip_total_length=139i,ip_ttl=42i,sampling_rate=10i,tcp_header_length=0i,tcp_urgent_pointer=0i,tcp_window_size=14i 1584473704793580447 ``` +### Reference Documentation + +This sflow implementation was built from the reference document +[sflow.org/sflow_version_5.txt](sflow_version_5) + + [metric filtering]: https://github.com/influxdata/telegraf/blob/master/docs/CONFIGURATION.md#metric-filtering [retention policy]: https://docs.influxdata.com/influxdb/latest/guides/downsampling_and_retention/ [max-series-per-database]: https://docs.influxdata.com/influxdb/latest/administration/config/#max-series-per-database-1000000 @@ -112,3 +118,4 @@ sflow,agent_address=0.0.0.0,dst_ip=10.0.0.2,dst_mac=ff:ff:ff:ff:ff:ff,dst_port=4 [tsi]: https://docs.influxdata.com/influxdb/latest/concepts/time-series-index/ [series cardinality]: https://docs.influxdata.com/influxdb/latest/query_language/spec/#show-cardinality [influx-docs]: https://docs.influxdata.com/influxdb/latest/ +[sflow_version_5]: https://sflow.org/sflow_version_5.txt diff --git a/plugins/inputs/sflow/binaryio/minreader.go b/plugins/inputs/sflow/binaryio/minreader.go new file mode 100644 index 000000000..35ccdbcf2 --- /dev/null +++ b/plugins/inputs/sflow/binaryio/minreader.go @@ -0,0 +1,37 @@ +package binaryio + +import "io" + +// MinimumReader is the implementation for MinReader. +type MinimumReader struct { + R io.Reader + MinNumberOfBytesToRead int64 // Min number of bytes we need to read from the reader +} + +// MinReader reads from R but ensures there is at least N bytes read from the reader. +// The reader should call Close() when they are done reading. +// Closing the MinReader will read and discard any unread bytes up to MinNumberOfBytesToRead. +// CLosing the MinReader does NOT close the underlying reader. +// The underlying implementation is a MinimumReader, which implements ReaderCloser. +func MinReader(r io.Reader, minNumberOfBytesToRead int64) *MinimumReader { + return &MinimumReader{ + R: r, + MinNumberOfBytesToRead: minNumberOfBytesToRead, + } +} + +func (r *MinimumReader) Read(p []byte) (n int, err error) { + n, err = r.R.Read(p) + r.MinNumberOfBytesToRead -= int64(n) + return n, err +} + +// Close does not close the underlying reader, only the MinimumReader +func (r *MinimumReader) Close() error { + if r.MinNumberOfBytesToRead > 0 { + b := make([]byte, r.MinNumberOfBytesToRead) + _, err := r.R.Read(b) + return err + } + return nil +} diff --git a/plugins/inputs/sflow/binaryio/minreader_test.go b/plugins/inputs/sflow/binaryio/minreader_test.go new file mode 100644 index 000000000..081564b3e --- /dev/null +++ b/plugins/inputs/sflow/binaryio/minreader_test.go @@ -0,0 +1,39 @@ +package binaryio + +import ( + "bytes" + "testing" +) + +func TestMinReader(t *testing.T) { + b := []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15} + r := bytes.NewBuffer(b) + + mr := MinReader(r, 10) + + toRead := make([]byte, 5) + n, err := mr.Read(toRead) + if err != nil { + t.Error(err) + } + if n != 5 { + t.Error("Expected n to be 5, but was ", n) + } + if string(toRead) != string([]byte{1, 2, 3, 4, 5}) { + t.Error("expected 5 specific bytes to be read") + } + err = mr.Close() + if err != nil { + t.Error(err) + } + n, err = r.Read(toRead) // read from the outer stream + if err != nil { + t.Error(err) + } + if n != 5 { + t.Error("Expected n to be 5, but was ", n) + } + if string(toRead) != string([]byte{11, 12, 13, 14, 15}) { + t.Error("expected the last 5 bytes to be read") + } +} diff --git a/plugins/inputs/sflow/decoder.go b/plugins/inputs/sflow/decoder.go deleted file mode 100644 index 51a534881..000000000 --- a/plugins/inputs/sflow/decoder.go +++ /dev/null @@ -1,306 +0,0 @@ -package sflow - -import ( - "fmt" - "math" - "net" - - "github.com/influxdata/telegraf/plugins/inputs/sflow/decoder" -) - -const ( - addressTypeIPv4 = uint32(1) // line: 1383 - addressTypeIPv6 = uint32(2) // line: 1384 - - sampleTypeFlowSample = uint32(1) // line: 1614 - sampleTypeFlowSampleExpanded = uint32(3) // line: 1698 - - flowDataRawPacketHeaderFormat = uint32(1) // line: 1938 - - headerProtocolEthernetIso88023 = uint32(1) // line: 1920 - - ipProtocolTCP = byte(6) - ipProtocolUDP = byte(17) - - metricName = "sflow" -) - -var headerProtocolMap = map[uint32]string{ - headerProtocolEthernetIso88023: "ETHERNET-ISO88023", // line: 1920 -} - -var etypeMap = map[uint16]string{ - 0x0800: "IPv4", - 0x86DD: "IPv6", -} - -func bytesToIPStr(b []byte) string { - return net.IP(b).String() -} - -func bytesToMACStr(b []byte) string { - return fmt.Sprintf("%02x:%02x:%02x:%02x:%02x:%02x", b[0], b[1], b[2], b[3], b[4], b[5]) -} - -var ipvMap = map[uint32]string{ - 1: "IPV4", // line: 1383 - 2: "IPV6", // line: 1384 -} - -// V5FormatOptions captures configuration for controlling the processing of an SFlow V5 packet. -type V5FormatOptions struct { - MaxFlowsPerSample uint32 - MaxSamplesPerPacket uint32 - MaxFlowHeaderLength uint32 - MaxSampleLength uint32 -} - -// NewDefaultV5FormatOptions answers a new V5FormatOptions with default values initialised -func NewDefaultV5FormatOptions() V5FormatOptions { - return V5FormatOptions{ - MaxFlowsPerSample: math.MaxUint32, - MaxSamplesPerPacket: math.MaxUint32, - MaxFlowHeaderLength: math.MaxUint32, - MaxSampleLength: math.MaxUint32, - } -} - -// V5Format answers and decoder.Directive capable of decoding sFlow v5 packets in accordance -// with SFlow v5 specification at https://sflow.org/sflow_version_5.txt -func V5Format(options V5FormatOptions) decoder.Directive { - return decoder.Seq( // line: 1823 - decoder.U32().Do(decoder.U32Assert(func(v uint32) bool { return v == 5 }, "Version %d not supported, only version 5")), - decoder.U32().Switch( // agent_address line: 1787 - decoder.Case(addressTypeIPv4, decoder.Bytes(4).Do(decoder.BytesToStr(4, bytesToIPStr).AsT("agent_address"))), // line: 1390 - decoder.Case(addressTypeIPv6, decoder.Bytes(16).Do(decoder.BytesToStr(16, bytesToIPStr).AsT("agent_address"))), // line: 1393 - ), - decoder.U32(), // sub_agent_id line: 1790 - decoder.U32(), // sequence_number line: 1801 - decoder.U32(), // uptime line: 1804 - decoder.U32().Iter(options.MaxSamplesPerPacket, sampleRecord(options)), // samples line: 1812 - ) -} - -func sampleRecord(options V5FormatOptions) decoder.Directive { - var sampleType interface{} - return decoder.Seq( // line: 1760 - decoder.U32().Ref(&sampleType), // sample_type line: 1761 - decoder.U32().Encapsulated(options.MaxSampleLength, // sample_data line: 1762 - decoder.Ref(sampleType).Switch( - decoder.Case(sampleTypeFlowSample, flowSample(sampleType, options)), // line: 1614 - decoder.Case(sampleTypeFlowSampleExpanded, flowSampleExpanded(sampleType, options)), // line: 1698 - decoder.DefaultCase(nil), // this allows other cases to just be ignored rather than cause an error - ), - ), - ) -} - -func flowSample(sampleType interface{}, options V5FormatOptions) decoder.Directive { - var samplingRate = new(uint32) - var sourceIDIndex = new(uint32) - return decoder.Seq( // line: 1616 - decoder.U32(), // sequence_number line: 1617 - decoder.U32(). // source_id line: 1622 - Do(decoder.U32ToU32(func(v uint32) uint32 { return v >> 24 }).AsT("source_id_type")). // source_id_type Line 1465 - Do(decoder.U32ToU32(func(v uint32) uint32 { return v & 0x00ffffff }).Set(sourceIDIndex).AsT("source_id_index")), // line: 1468 - decoder.U32().Do(decoder.Set(samplingRate).AsF("sampling_rate")), // line: 1631 - decoder.U32(), // samplePool: Line 1632 - decoder.U32().Do(decoder.AsF("drops")), // Line 1636 - decoder.U32(). // line: 1651 - Do(decoder.U32ToU32(func(v uint32) uint32 { return v & 0x3fffffff }).AsT("input_ifindex")). // line: 1477 - Do(decoder.U32ToU32(func(v uint32) uint32 { return v & 0x3fffffff }). - ToString(func(v uint32) string { - if v == *sourceIDIndex { - return "ingress" - } - return "" - }). - BreakIf(""). - AsT("sample_direction")), - decoder.U32(). // line: 1652 - Do(decoder.U32ToU32(func(v uint32) uint32 { return v & 0x3fffffff }).AsT("output_ifindex")). // line: 1477 - Do(decoder.U32ToU32(func(v uint32) uint32 { return v & 0x3fffffff }). - ToString(func(v uint32) string { - if v == *sourceIDIndex { - return "egress" - } - return "" - }). - BreakIf(""). - AsT("sample_direction")), - decoder.U32().Iter(options.MaxFlowsPerSample, flowRecord(samplingRate, options)), // line: 1654 - ) -} - -func flowSampleExpanded(sampleType interface{}, options V5FormatOptions) decoder.Directive { - var samplingRate = new(uint32) - var sourceIDIndex = new(uint32) - return decoder.Seq( // line: 1700 - decoder.U32(), // sequence_number line: 1701 - decoder.U32().Do(decoder.AsT("source_id_type")), // line: 1706 + 16878 - decoder.U32().Do(decoder.Set(sourceIDIndex).AsT("source_id_index")), // line 1689 - decoder.U32().Do(decoder.Set(samplingRate).AsF("sampling_rate")), // sample_rate line: 1707 - decoder.U32(), // saple_pool line: 1708 - decoder.U32().Do(decoder.AsF("drops")), // line: 1712 - decoder.U32(), // inputt line: 1727 - decoder.U32(). // input line: 1727 - Do(decoder.AsT("input_ifindex")). // line: 1728 - Do(decoder.U32ToStr(func(v uint32) string { - if v == *sourceIDIndex { - return "ingress" - } - return "" - }). - BreakIf(""). - AsT("sample_direction")), - decoder.U32(), // output line: 1728 - decoder.U32(). // outpuit line: 1728 - Do(decoder.AsT("output_ifindex")). // line: 1729 CHANFE AS FOR NON EXPANDED - Do(decoder.U32ToStr(func(v uint32) string { - if v == *sourceIDIndex { - return "egress" - } - return "" - }). - BreakIf(""). - AsT("sample_direction")), - decoder.U32().Iter(options.MaxFlowsPerSample, flowRecord(samplingRate, options)), // line: 1730 - ) -} - -func flowRecord(samplingRate *uint32, options V5FormatOptions) decoder.Directive { - var flowFormat interface{} - return decoder.Seq( // line: 1597 - decoder.U32().Ref(&flowFormat), // line 1598 - decoder.U32().Encapsulated(options.MaxFlowHeaderLength, // line 1599 - decoder.Ref(flowFormat).Switch( - decoder.Case(flowDataRawPacketHeaderFormat, rawPacketHeaderFlowData(samplingRate, options)), // line: 1938 - decoder.DefaultCase(nil), - ), - ), - ) -} - -func rawPacketHeaderFlowData(samplingRate *uint32, options V5FormatOptions) decoder.Directive { - var protocol interface{} - var headerLength interface{} - return decoder.Seq( // line: 1940 - decoder.U32().Ref(&protocol).Do(decoder.MapU32ToStr(headerProtocolMap).AsT("header_protocol")), // line: 1941 - decoder.U32(). // line: 1942 - Do(decoder.AsF("frame_length")). - Do(decoder.U32ToU32(func(in uint32) uint32 { - return in * (*samplingRate) - }).AsF("bytes")), - decoder.U32(), // stripped line: 1967 - decoder.U32().Ref(&headerLength).Do(decoder.AsF("header_length")), - decoder.Ref(headerLength).Encapsulated(options.MaxFlowHeaderLength, - decoder.Ref(protocol).Switch( - decoder.Case(headerProtocolEthernetIso88023, ethHeader(options)), - decoder.DefaultCase(nil), - )), - ) -} - -// ethHeader answers a decode Directive that will decode an ethernet frame header -// according to https://en.wikipedia.org/wiki/Ethernet_frame -func ethHeader(options V5FormatOptions) decoder.Directive { - var tagOrEType interface{} - etype := new(uint16) - return decoder.Seq( - decoder.OpenMetric(metricName), - decoder.Bytes(6).Do(decoder.BytesToStr(6, bytesToMACStr).AsT("dst_mac")), - decoder.Bytes(6).Do(decoder.BytesToStr(6, bytesToMACStr).AsT("src_mac")), - decoder.U16().Ref(&tagOrEType).Switch( - decoder.Case(uint16(0x8100), - decoder.Seq( - decoder.U16(), - decoder.U16().Do(decoder.Set(etype)), // just follows on from vlan id - ), - ), - decoder.DefaultCase( // Not an 802.1Q VLAN Tag, just treat as an ether type - decoder.Ref(tagOrEType).Do(decoder.Set(etype)), - ), - ), - decoder.U16Value(etype).Do(decoder.MapU16ToStr(etypeMap).AsT("ether_type")), - decoder.U16Value(etype).Switch( - decoder.Case(uint16(0x0800), ipv4Header(options)), - decoder.Case(uint16(0x86DD), ipv6Header(options)), - decoder.DefaultCase(nil), - ), - decoder.CloseMetric(), - ) - -} - -// ipv4Header answers a decode Directive that decode an IPv4 header according to -// https://en.wikipedia.org/wiki/IPv4 -func ipv4Header(options V5FormatOptions) decoder.Directive { - var proto interface{} - return decoder.Seq( - decoder.U16(). - Do(decoder.U16ToU16(func(in uint16) uint16 { return (in & 0xFC) >> 2 }).AsT("ip_dscp")). - Do(decoder.U16ToU16(func(in uint16) uint16 { return in & 0x3 }).AsT("ip_ecn")), - decoder.U16().Do(decoder.AsF("ip_total_length")), - decoder.U16(), - decoder.U16(). - Do(decoder.U16ToU16(func(v uint16) uint16 { return (v & 0xE000) >> 13 }).AsF("ip_flags")). - Do(decoder.U16ToU16(func(v uint16) uint16 { return v & 0x1FFF }).AsF("ip_fragment_offset")), - decoder.Bytes(1).Do(decoder.BytesTo(1, func(b []byte) interface{} { return uint8(b[0]) }).AsF("ip_ttl")), - decoder.Bytes(1).Ref(&proto), - decoder.U16(), - decoder.Bytes(4).Do(decoder.BytesToStr(4, bytesToIPStr).AsT("src_ip")), - decoder.Bytes(4).Do(decoder.BytesToStr(4, bytesToIPStr).AsT("dst_ip")), - decoder.Ref(proto).Switch( // Does not consider IHL and Options - decoder.Case(ipProtocolTCP, tcpHeader(options)), - decoder.Case(ipProtocolUDP, udpHeader(options)), - decoder.DefaultCase(nil), - ), - ) -} - -// ipv6Header answers a decode Directive that decode an IPv6 header according to -// https://en.wikipedia.org/wiki/IPv6_packet -func ipv6Header(options V5FormatOptions) decoder.Directive { - nextHeader := new(uint16) - return decoder.Seq( - decoder.U32(). - Do(decoder.U32ToU32(func(in uint32) uint32 { return (in & 0xFC00000) >> 22 }).AsF("ip_dscp")). - Do(decoder.U32ToU32(func(in uint32) uint32 { return (in & 0x300000) >> 20 }).AsF("ip_ecn")), - decoder.U16(), - decoder.U16(). - Do(decoder.U16ToU16(func(in uint16) uint16 { return (in & 0xFF00) >> 8 }).Set(nextHeader)), - decoder.Bytes(16).Do(decoder.BytesToStr(16, bytesToIPStr).AsT("src_ip")), - decoder.Bytes(16).Do(decoder.BytesToStr(16, bytesToIPStr).AsT("dst_ip")), - decoder.U16Value(nextHeader).Switch( - decoder.Case(uint16(ipProtocolTCP), tcpHeader(options)), - decoder.Case(uint16(ipProtocolUDP), udpHeader(options)), - decoder.DefaultCase(nil), - ), - ) -} - -func tcpHeader(options V5FormatOptions) decoder.Directive { - return decoder.Seq( - decoder.U16(). - Do(decoder.AsT("src_port")), - decoder.U16(). - Do(decoder.AsT("dst_port")), - decoder.U32(), //"sequence"), - decoder.U32(), //"ack_number"), - decoder.Bytes(2). - Do(decoder.BytesToU32(2, func(b []byte) uint32 { return uint32((b[0] & 0xF0) * 4) }).AsF("tcp_header_length")), - decoder.U16().Do(decoder.AsF("tcp_window_size")), - decoder.U16(), // "checksum"), - decoder.U16().Do(decoder.AsF("tcp_urgent_pointer")), - ) -} - -func udpHeader(options V5FormatOptions) decoder.Directive { - return decoder.Seq( - decoder.U16(). - Do(decoder.AsT("src_port")), - decoder.U16(). - Do(decoder.AsT("dst_port")), - decoder.U16().Do(decoder.AsF("udp_length")), - ) -} diff --git a/plugins/inputs/sflow/decoder/directives.go b/plugins/inputs/sflow/decoder/directives.go deleted file mode 100644 index 9b20e1c33..000000000 --- a/plugins/inputs/sflow/decoder/directives.go +++ /dev/null @@ -1,402 +0,0 @@ -package decoder - -import ( - "bytes" - "encoding/binary" - "fmt" - "time" - - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/metric" -) - -// Directive is a Decode Directive, the basic building block of a decoder -type Directive interface { - - // Execute performs the function of the decode directive. If DecodeContext is nil then the - // ask is to check that a subsequent execution (with non nill DecodeContext) is expted to work. - Execute(*bytes.Buffer, *DecodeContext) error -} - -type IterOption struct { - EOFTerminateIter bool - RemainingToGreaterEqualOrTerminate uint32 -} - -// ValueDirective is a decode directive that extracts some data from the packet, an integer or byte maybe, -// which it then processes by using it, for example, as the counter for the number of iterations to perform -// of downstream decode directives. -// -// A ValueDirective can be used to either Switch, Iter(ate), Encapsulate or Do mutually exclusively. -type ValueDirective interface { - Directive - - // Switch attaches a set of conditional decode directives downstream of this decode directive - Switch(paths ...CaseValueDirective) ValueDirective - - // Iter attaches a single downstream decode directive that will be executed repeatedly according to the iteration count - Iter(maxIterations uint32, dd Directive, iterOptions ...IterOption) ValueDirective - - // Encapsulated will form a new buffer of the encapsulated length and pass that buffer on to the downsstream decode directive - Encapsulated(maxSize uint32, dd Directive) ValueDirective - - // Ref records this decode directive in the passed reference - Ref(*interface{}) ValueDirective - - // Do attaches a Decode Operation - these are uses of the decoded information to perform work on, transform, write out etc. - Do(ddo DirectiveOp) ValueDirective -} - -type valueDirective struct { - reference *valueDirective - - value interface{} - noDecode bool - - cases []CaseValueDirective - iter Directive - maxIterations uint32 - encapsulated Directive - maxEncapsulation uint32 - ops []DirectiveOp - err error - - iterOption IterOption -} - -func valueToString(in interface{}) string { - switch v := in.(type) { - case *uint16: - return fmt.Sprintf("%d", *v) - case uint16: - return fmt.Sprintf("%d", v) - case *uint32: - return fmt.Sprintf("%d", *v) - case uint32: - return fmt.Sprintf("%d", v) - default: - return fmt.Sprintf("%v", in) - } -} - -func (dd *valueDirective) Execute(buffer *bytes.Buffer, dc *DecodeContext) error { - if dd.reference == nil && !dd.noDecode { - if e := binary.Read(buffer, binary.BigEndian, dd.value); e != nil { - return e - } - } - - // Switch downstream? - if dd.cases != nil && len(dd.cases) > 0 { - for _, c := range dd.cases { - if c.Equals(dd.value) { - return c.Execute(buffer, dc) - } - } - switch v := dd.value.(type) { - case *uint32: - return fmt.Errorf("(%T).Switch,unmatched case %d", v, *v) - case *uint16: - return fmt.Errorf("(%T).Switch,unmatched case %d", v, *v) - default: - return fmt.Errorf("(%T).Switch,unmatched case %v", dd.value, dd.value) - } - } - - // Iter downstream? - if dd.iter != nil { - fn := func(id interface{}) error { - if dd.iterOption.RemainingToGreaterEqualOrTerminate > 0 && uint32(buffer.Len()) < dd.iterOption.RemainingToGreaterEqualOrTerminate { - return nil - } - if dd.iterOption.EOFTerminateIter && buffer.Len() == 0 { - return nil - } - if e := dd.iter.Execute(buffer, dc); e != nil { - return e - } - return nil - } - switch v := dd.value.(type) { - case *uint32: - if *v > dd.maxIterations { - return fmt.Errorf("iter exceeds configured max - value %d, limit %d", *v, dd.maxIterations) - } - for i := uint32(0); i < *v; i++ { - if e := fn(i); e != nil { - return e - } - } - case *uint16: - if *v > uint16(dd.maxIterations) { - return fmt.Errorf("iter exceeds configured max - value %d, limit %d", *v, dd.maxIterations) - } - for i := uint16(0); i < *v; i++ { - if e := fn(i); e != nil { - return e - } - } - default: - // Can't actually get here if .Iter method check types (and it does) - return fmt.Errorf("(%T).Iter, cannot iterator over this type", dd.value) - } - } - - // Encapsualted downstream> - if dd.encapsulated != nil { - switch v := dd.value.(type) { - case *uint32: - if *v > dd.maxEncapsulation { - return fmt.Errorf("encap exceeds configured max - value %d, limit %d", *v, dd.maxEncapsulation) - } - return dd.encapsulated.Execute(bytes.NewBuffer(buffer.Next(int(*v))), dc) - case *uint16: - if *v > uint16(dd.maxEncapsulation) { - return fmt.Errorf("encap exceeds configured max - value %d, limit %d", *v, dd.maxEncapsulation) - } - return dd.encapsulated.Execute(bytes.NewBuffer(buffer.Next(int(*v))), dc) - } - } - - // Perform the attached operations - for _, op := range dd.ops { - if err := op.process(dc, dd.value); err != nil { - return err - } - } - - return nil -} - -// panickIfNotBlackCanvas checks the state of this value directive to see if it is has -// alrady been configured in a manner inconsistent with another configuration change -func (dd *valueDirective) panickIfNotBlackCanvas(change string, checkDOs bool) { - if dd.cases != nil { - panic(fmt.Sprintf("already have switch cases assigned, cannot assign %s", change)) - } - if dd.iter != nil { - panic(fmt.Sprintf("already have iter assigned, cannot assign %s", change)) - } - if dd.encapsulated != nil { - panic(fmt.Sprintf("already have encap assigned, cannot assign %s @", change)) - } - if checkDOs && dd.ops != nil && len(dd.ops) > 0 { - panic(fmt.Sprintf("already have do assigned, cannot assign %s", change)) - } -} - -func (dd *valueDirective) Switch(paths ...CaseValueDirective) ValueDirective { - dd.panickIfNotBlackCanvas("new switch", true) - dd.cases = paths - return dd -} - -func (dd *valueDirective) Iter(maxIterations uint32, iter Directive, iterOptions ...IterOption) ValueDirective { - dd.panickIfNotBlackCanvas("new iter", true) - switch dd.value.(type) { - case *uint32: - case *uint16: - default: - panic(fmt.Sprintf("cannot iterate a %T", dd.value)) - } - - dd.iter = iter - dd.maxIterations = maxIterations - for _, io := range iterOptions { - dd.iterOption = io - } - return dd -} - -func (dd *valueDirective) Encapsulated(maxSize uint32, encapsulated Directive) ValueDirective { - dd.panickIfNotBlackCanvas("new encapsulated", true) - switch dd.value.(type) { - case *uint32: - case *uint16: - default: - panic(fmt.Sprintf("cannot encapsulated on a %T", dd.value)) - } - - dd.encapsulated = encapsulated - dd.maxEncapsulation = maxSize - return dd -} - -func (dd *valueDirective) Do(ddo DirectiveOp) ValueDirective { - dd.panickIfNotBlackCanvas("new do", false) - for { - if ddo.prev() == nil { - break - } - ddo = ddo.prev() - } - if err := ddo.process(nil, dd.value); err != nil { - panic(fmt.Sprintf("directive operation %T cannot process %T - %s", ddo, dd.value, err)) - } - if dd.ops == nil { - dd.ops = make([]DirectiveOp, 0, 5) - } - dd.ops = append(dd.ops, ddo) - - return dd -} - -func (dd *valueDirective) Ref(ref *interface{}) ValueDirective { - if *ref != nil { - panic("ref already assigned, not overwritting") - } - *ref = dd - return dd -} - -// errorDirective a decode directive that reports an error -type errorDirective struct { - Directive -} - -func (dd *errorDirective) Execute(buffer *bytes.Buffer, dc *DecodeContext) error { - return fmt.Errorf("Error Directive") -} - -// CaseValueDirective is a decode directive that also has a switch/case test -type CaseValueDirective interface { - Directive - Equals(interface{}) bool -} - -type caseValueDirective struct { - caseValue interface{} - isDefault bool - equalsDd Directive -} - -func (dd *caseValueDirective) Execute(buffer *bytes.Buffer, dc *DecodeContext) error { - if dd.equalsDd == nil { - return nil - } - return dd.equalsDd.Execute(buffer, dc) -} - -func (dd *caseValueDirective) Equals(value interface{}) bool { - if dd.isDefault { - return true - } - switch ourV := dd.caseValue.(type) { - case uint32: - ov, ok := value.(*uint32) - if ok { - return ourV == *ov - } - case uint16: - ov, ok := value.(*uint16) - if ok { - return ourV == *ov - } - case byte: - ov, ok := value.([]byte) - if ok { - if len(ov) == 1 { - return ourV == ov[0] - } - } - } - return false -} - -// sequenceDirective is a decode directive that is a simple sequentially executed list of other decode directives -type sequenceDirective struct { - decoders []Directive -} - -func (di *sequenceDirective) Execute(buffer *bytes.Buffer, dc *DecodeContext) error { - for _, innerDD := range di.decoders { - if err := innerDD.Execute(buffer, dc); err != nil { - return err - } - } - return nil -} - -// openMetric a decode directive that opens the recording of new fields and tags -type openMetric struct { - name string -} - -func (di *openMetric) Execute(buffer *bytes.Buffer, dc *DecodeContext) error { - dc.openMetric(di.name) - return nil -} - -// closeMetric a decode directive that closes the current open metric -type closeMetric struct { -} - -func (di *closeMetric) Execute(buffer *bytes.Buffer, dc *DecodeContext) error { - dc.closeMetric() - return nil -} - -// DecodeContext provides context for the decoding of a packet and primarily acts -// as a repository for metrics that are collected during the packet decode process -type DecodeContext struct { - metrics []telegraf.Metric - timeHasBeenSet bool - - // oreMetric is used to capture tags or fields that may be recored before a metric has been openned - // these fields and tags are then copied into metrics that are then subsequently opened - preMetric telegraf.Metric - current telegraf.Metric - nano int -} - -func (dc *DecodeContext) openMetric(name string) { - t := dc.preMetric.Time() - if !dc.timeHasBeenSet { - t = time.Now().Add(time.Duration(dc.nano)) - } - m, _ := metric.New(name, make(map[string]string), make(map[string]interface{}), t) - dc.nano++ - // make sure to copy any fields and tags that were capture prior to the metric being openned - for t, v := range dc.preMetric.Tags() { - m.AddTag(t, v) - } - for f, v := range dc.preMetric.Fields() { - m.AddField(f, v) - } - dc.current = m -} - -func (dc *DecodeContext) closeMetric() { - if dc.current != nil { - dc.metrics = append(dc.metrics, dc.current) - } - dc.current = nil -} - -func (dc *DecodeContext) currentMetric() telegraf.Metric { - if dc.current == nil { - return dc.preMetric - } - return dc.current -} - -// Decode initiates the decoding of the supplied buffer according to the root decode directive that is provided -func (dc *DecodeContext) Decode(dd Directive, buffer *bytes.Buffer) error { - return dd.Execute(buffer, dc) -} - -// GetMetrics answers the metrics that have been collected during the packet decode -func (dc *DecodeContext) GetMetrics() []telegraf.Metric { - return dc.metrics -} - -type notifyDirective struct { - fn func() -} - -func (nd *notifyDirective) Execute(_ *bytes.Buffer, dc *DecodeContext) error { - if dc != nil { - nd.fn() - } - return nil -} diff --git a/plugins/inputs/sflow/decoder/directives_test.go b/plugins/inputs/sflow/decoder/directives_test.go deleted file mode 100644 index 0a8d99e7a..000000000 --- a/plugins/inputs/sflow/decoder/directives_test.go +++ /dev/null @@ -1,582 +0,0 @@ -package decoder - -import ( - "bytes" - "encoding/binary" - "fmt" - "math" - "testing" - - "github.com/influxdata/telegraf" - "github.com/stretchr/testify/require" -) - -// Execute will ececute the decode directive relative to the supplied buffer -func Execute(dd Directive, buffer *bytes.Buffer) error { - dc := &DecodeContext{} - return dd.Execute(buffer, dc) -} - -func Test_basicUI32NotEnoughBytes(t *testing.T) { - dd := U32() - value := uint16(1001) // not enough bytes to read a U32 out as only a U16 in - var buffer bytes.Buffer - require.NoError(t, binary.Write(&buffer, binary.BigEndian, &value)) - require.Error(t, Execute(dd, &buffer)) -} - -func Test_basicUI32(t *testing.T) { - dd := U32() - value := uint32(1001) - var buffer bytes.Buffer - require.NoError(t, binary.Write(&buffer, binary.BigEndian, &value)) - require.NoError(t, Execute(dd, &buffer)) - require.Equal(t, 0, buffer.Len()) - x, _ := dd.(*valueDirective) - require.Equal(t, &value, x.value) -} - -func Test_basicBytes(t *testing.T) { - dd := Bytes(4) - value := []byte{0x01, 0x02, 0x03, 0x04} - var buffer bytes.Buffer - require.NoError(t, binary.Write(&buffer, binary.BigEndian, &value)) - require.NoError(t, Execute(dd, &buffer)) - require.Equal(t, 0, buffer.Len()) - x, _ := dd.(*valueDirective) - require.Equal(t, value, x.value) -} - -func Test_basicSeq(t *testing.T) { - - // Seq with no members compiles and executed but buffer is left untouched - dd := Seq() - value := uint32(1001) - var buffer bytes.Buffer - require.NoError(t, binary.Write(&buffer, binary.BigEndian, &value)) - originalLen := buffer.Len() - require.NoError(t, Execute(dd, &buffer)) - require.Equal(t, originalLen, buffer.Len()) - - u := U32() - dd = Seq( - u, - ) - value = uint32(1001) - buffer.Reset() - require.NoError(t, binary.Write(&buffer, binary.BigEndian, &value)) - require.NoError(t, Execute(dd, &buffer)) - require.Equal(t, 0, buffer.Len()) - x, _ := u.(*valueDirective) - require.Equal(t, &value, x.value) -} - -func Test_basicSeqOf(t *testing.T) { - // SeqOf with no members compiles and executed but buffer is left untouched - dd := SeqOf([]Directive{}) - value := uint32(1001) - var buffer bytes.Buffer - require.NoError(t, binary.Write(&buffer, binary.BigEndian, &value)) - originalLen := buffer.Len() - require.NoError(t, Execute(dd, &buffer)) - require.Equal(t, originalLen, buffer.Len()) - - u := U32() - dd = SeqOf( - []Directive{u}, - ) - value = uint32(1001) - buffer.Reset() - require.NoError(t, binary.Write(&buffer, binary.BigEndian, &value)) - require.NoError(t, Execute(dd, &buffer)) - require.Equal(t, 0, buffer.Len()) - x, _ := u.(*valueDirective) - require.Equal(t, &value, x.value) -} - -func Test_errorInSeq(t *testing.T) { - // Seq with no members compiles and executed but buffer is left untouched - dd := Seq(U32(), ErrorDirective()) - value := uint32(1001) - var buffer bytes.Buffer - require.NoError(t, binary.Write(&buffer, binary.BigEndian, &value)) - require.Error(t, Execute(dd, &buffer)) -} - -func Test_basicU32Switch(t *testing.T) { - c1 := U32() - c2 := U32() - dd := U32().Switch( - Case(uint32(1), c1), - Case(uint32(2), c2), - ) - - value1 := uint32(3) - var buffer bytes.Buffer - require.NoError(t, binary.Write(&buffer, binary.BigEndian, &value1)) - value2 := uint32(4) - require.NoError(t, binary.Write(&buffer, binary.BigEndian, &value2)) - require.Error(t, Execute(dd, &buffer)) // should error as no path - - value1 = uint32(1) - buffer.Reset() - require.NoError(t, binary.Write(&buffer, binary.BigEndian, &value1)) - require.NoError(t, binary.Write(&buffer, binary.BigEndian, &value2)) - require.NoError(t, Execute(dd, &buffer)) - x, _ := c1.(*valueDirective) - y, _ := c2.(*valueDirective) - value0 := uint32(0) - require.Equal(t, &value2, x.value) - require.Equal(t, &value0, y.value) - - // bad path shoudl raise error - // path 1 should be able to fina value in c1 and not in c2 - // then other way around -} - -func Test_basicBinSwitch(t *testing.T) { - c1 := U32() - c2 := U32() - dd := Bytes(1).Switch( - Case(byte(1), c1), - Case(byte(2), c2), - ) - - value1 := byte(3) - var buffer bytes.Buffer - require.NoError(t, binary.Write(&buffer, binary.BigEndian, &value1)) - value2 := uint32(4) - require.NoError(t, binary.Write(&buffer, binary.BigEndian, &value2)) - require.Error(t, Execute(dd, &buffer)) // should error as no path - - value1 = byte(1) - buffer.Reset() - require.NoError(t, binary.Write(&buffer, binary.BigEndian, &value1)) - require.NoError(t, binary.Write(&buffer, binary.BigEndian, &value2)) - require.NoError(t, Execute(dd, &buffer)) - x, _ := c1.(*valueDirective) - y, _ := c2.(*valueDirective) - value0 := uint32(0) - require.Equal(t, &value2, x.value) - require.Equal(t, &value0, y.value) - - // bad path shoudl raise error - // path 1 should be able to fina value in c1 and not in c2 - // then other way around -} - -func Test_basicIter(t *testing.T) { - innerDD := U32() - dd := U32().Iter(math.MaxInt32, innerDD) - - var buffer bytes.Buffer - iterations := uint32(2) - require.NoError(t, binary.Write(&buffer, binary.BigEndian, &iterations)) - it1Val := uint32(3) - require.NoError(t, binary.Write(&buffer, binary.BigEndian, &it1Val)) - it2Val := uint32(4) - require.NoError(t, binary.Write(&buffer, binary.BigEndian, &it2Val)) - require.NoError(t, Execute(dd, &buffer)) - x, _ := dd.(*valueDirective) - require.Equal(t, &iterations, x.value) - y, _ := innerDD.(*valueDirective) - // we can't test it1Val as it gets overwritten! - require.Equal(t, &it2Val, y.value) -} - -func Test_IterLimit(t *testing.T) { - innerDD := U32() - dd := U32().Iter(1, innerDD) // limit set at 1 - var buffer bytes.Buffer - iterations := uint32(2) - require.NoError(t, binary.Write(&buffer, binary.BigEndian, &iterations)) - it1Val := uint32(3) - require.NoError(t, binary.Write(&buffer, binary.BigEndian, &it1Val)) - it2Val := uint32(4) - require.NoError(t, binary.Write(&buffer, binary.BigEndian, &it2Val)) - require.Error(t, Execute(dd, &buffer)) -} - -func Test_errorWithinIter(t *testing.T) { - dd := U32().Iter(math.MaxInt32, ErrorDirective()) - - var buffer bytes.Buffer - iterations := uint32(1) - require.NoError(t, binary.Write(&buffer, binary.BigEndian, &iterations)) - - require.Error(t, Execute(dd, &buffer)) -} - -func Test_errorWithinIter2(t *testing.T) { - dd := U32().Iter(math.MaxInt32, U32().Do(ErrorOp(false))) - var buffer bytes.Buffer - iterations := uint32(1) - require.NoError(t, binary.Write(&buffer, binary.BigEndian, &iterations)) - innerValue := uint32(1) - require.NoError(t, binary.Write(&buffer, binary.BigEndian, &innerValue)) - require.Error(t, Execute(dd, &buffer)) -} - -func Test_errorWithinIter3(t *testing.T) { - defer expectPanic(t, "Test_cantIterBytes") - U32().Iter(math.MaxInt32, U32().Do(ErrorOp(true))) -} - -func Test_alreadyEncapsulated(t *testing.T) { - defer expectPanic(t, "Test_cantIterBytes") - u := U32() - inner := U32() - u.Encapsulated(math.MaxInt32, inner) - u.Encapsulated(math.MaxInt32, inner) -} - -func Test_alreadyDoAssigned(t *testing.T) { - defer expectPanic(t, "Test_cantIterBytes") - u := U32() - u.Do(AsF("foo")) - inner := U32() - u.Encapsulated(math.MaxInt32, inner) -} - -func Test_cantIterBytes(t *testing.T) { - defer expectPanic(t, "Test_cantIterBytes") - _ = Bytes(1).Iter(math.MaxInt32, U32()) -} - -// then open metric -func Test_OpenMetric(t *testing.T) { - innerDD := U32() - dd := U32().Iter(math.MaxInt32, Seq( - OpenMetric(""), - innerDD, - CloseMetric(), - )) - - var buffer bytes.Buffer - iterations := uint32(2) - require.NoError(t, binary.Write(&buffer, binary.BigEndian, &iterations)) - it1Val := uint32(3) - require.NoError(t, binary.Write(&buffer, binary.BigEndian, &it1Val)) - it2Val := uint32(3) - require.NoError(t, binary.Write(&buffer, binary.BigEndian, &it2Val)) - dc := NewDecodeContext() - require.NoError(t, dc.Decode(dd, &buffer)) - require.Equal(t, 2, len(dc.GetMetrics())) -} - -func Test_AsF(t *testing.T) { - innerDD := U32().Do(AsF("foo")) - dd := U32().Iter(math.MaxInt32, Seq( - OpenMetric(""), - innerDD, - CloseMetric(), - )) - - var buffer bytes.Buffer - iterations := uint32(2) - require.NoError(t, binary.Write(&buffer, binary.BigEndian, &iterations)) - it1Val := uint32(3) - require.NoError(t, binary.Write(&buffer, binary.BigEndian, &it1Val)) - it2Val := uint32(3) - require.NoError(t, binary.Write(&buffer, binary.BigEndian, &it2Val)) - dc := NewDecodeContext() - require.NoError(t, dc.Decode(dd, &buffer)) - require.Equal(t, 2, len(dc.GetMetrics())) - m := dc.GetMetrics() - require.Equal(t, uint64(it1Val), getField(m[0], "foo")) - require.Equal(t, uint64(it2Val), getField(m[1], "foo")) -} - -func Test_AsT(t *testing.T) { - innerDD := U32().Do(AsT("foo")) - dd := U32().Iter(math.MaxInt32, Seq( - OpenMetric(""), - innerDD, - CloseMetric(), - )) - - var buffer bytes.Buffer - iterations := uint32(2) - require.NoError(t, binary.Write(&buffer, binary.BigEndian, &iterations)) - it1Val := uint32(3) - require.NoError(t, binary.Write(&buffer, binary.BigEndian, &it1Val)) - it2Val := uint32(3) - require.NoError(t, binary.Write(&buffer, binary.BigEndian, &it2Val)) - dc := NewDecodeContext() - require.NoError(t, dc.Decode(dd, &buffer)) - require.Equal(t, 2, len(dc.GetMetrics())) - m := dc.GetMetrics() - require.Equal(t, fmt.Sprintf("%d", it1Val), getTag(m[0], "foo")) - require.Equal(t, fmt.Sprintf("%d", it2Val), getTag(m[1], "foo")) -} - -func getField(m telegraf.Metric, name string) interface{} { - v, _ := m.GetField(name) - return v -} - -func getTag(m telegraf.Metric, name string) string { - v, _ := m.GetTag(name) - return v -} - -func Test_preMetricNesting(t *testing.T) { - innerDD := U32().Do(AsF("foo")) - dd := Seq( - U32().Do(AsF("bar")), - U32().Do(AsT("baz")), - U32().Iter(math.MaxInt32, - Seq( - OpenMetric(""), - innerDD, - CloseMetric(), - ), - ), - ) - - var buffer bytes.Buffer - barVal := uint32(55) - require.NoError(t, binary.Write(&buffer, binary.BigEndian, &barVal)) - bazVal := uint32(56) - require.NoError(t, binary.Write(&buffer, binary.BigEndian, &bazVal)) - iterations := uint32(2) - require.NoError(t, binary.Write(&buffer, binary.BigEndian, &iterations)) - it1Val := uint32(3) - require.NoError(t, binary.Write(&buffer, binary.BigEndian, &it1Val)) - it2Val := uint32(3) - require.NoError(t, binary.Write(&buffer, binary.BigEndian, &it2Val)) - dc := NewDecodeContext() - require.NoError(t, dc.Decode(dd, &buffer)) - require.Equal(t, 2, len(dc.GetMetrics())) - m := dc.GetMetrics() - require.Equal(t, uint64(barVal), getField(m[0], "bar")) - require.Equal(t, fmt.Sprintf("%d", bazVal), getTag(m[0], "baz")) - require.Equal(t, uint64(it1Val), getField(m[0], "foo")) - require.Equal(t, uint64(barVal), getField(m[1], "bar")) - require.Equal(t, fmt.Sprintf("%d", bazVal), getTag(m[1], "baz")) - require.Equal(t, uint64(it2Val), getField(m[1], "foo")) -} - -func Test_BasicEncapsulated(t *testing.T) { - - encap1Value := uint32(2) - encap2Value := uint32(3) - var encapBuffer bytes.Buffer - require.NoError(t, binary.Write(&encapBuffer, binary.BigEndian, &encap1Value)) - require.NoError(t, binary.Write(&encapBuffer, binary.BigEndian, &encap2Value)) - - encapSize := uint32(encapBuffer.Len()) - envelopeValue := uint32(4) - var envelopeBuffer bytes.Buffer - - require.NoError(t, binary.Write(&envelopeBuffer, binary.BigEndian, &encapSize)) - l, e := envelopeBuffer.Write(encapBuffer.Bytes()) - require.NoError(t, e) - require.Equal(t, encapSize, uint32(l)) - require.NoError(t, binary.Write(&envelopeBuffer, binary.BigEndian, &envelopeValue)) - - innerDD := U32() - envelopeDD := U32() // the buffer contains another U32 but the encpaultation will ignore it - dd := Seq( - U32().Encapsulated(math.MaxInt32, innerDD), - envelopeDD, - ) - require.NoError(t, Execute(dd, &envelopeBuffer)) - - require.Equal(t, 0, envelopeBuffer.Len()) - x, _ := envelopeDD.(*valueDirective) - require.Equal(t, &envelopeValue, x.value) - y, _ := innerDD.(*valueDirective) - require.Equal(t, &encap1Value, y.value) -} - -func Test_EncapsulationLimit(t *testing.T) { - - encap1Value := uint32(2) - encap2Value := uint32(3) - var encapBuffer bytes.Buffer - require.NoError(t, binary.Write(&encapBuffer, binary.BigEndian, &encap1Value)) - require.NoError(t, binary.Write(&encapBuffer, binary.BigEndian, &encap2Value)) - - encapSize := uint32(encapBuffer.Len()) - envelopeValue := uint32(4) - var envelopeBuffer bytes.Buffer - - require.NoError(t, binary.Write(&envelopeBuffer, binary.BigEndian, &encapSize)) - l, e := envelopeBuffer.Write(encapBuffer.Bytes()) - require.NoError(t, e) - require.Equal(t, encapSize, uint32(l)) - require.NoError(t, binary.Write(&envelopeBuffer, binary.BigEndian, &envelopeValue)) - - innerDD := U32() - envelopeDD := U32() - dd := Seq( - U32().Encapsulated(4, innerDD), // 4 bytes, not 8 bytes or higher as max - envelopeDD, - ) - require.Error(t, Execute(dd, &envelopeBuffer)) -} - -func Test_cantEncapulatedBytes(t *testing.T) { - defer expectPanic(t, "cantEncapulatedBytes") - _ = Bytes(1).Encapsulated(math.MaxInt32, U32()) -} - -func Test_BasicRef(t *testing.T) { - var x interface{} - dd1 := U32().Ref(&x) - dd2 := Ref(x) - dd := Seq( - dd1, - dd2, - ) - y, ok := dd2.(*valueDirective) - require.True(t, ok) - require.Equal(t, y.reference, x) - - value := uint32(1001) - var buffer bytes.Buffer - require.NoError(t, binary.Write(&buffer, binary.BigEndian, &value)) - require.NoError(t, Execute(dd, &buffer)) - - y, _ = dd1.(*valueDirective) - require.Equal(t, &value, y.value) - - y, _ = dd2.(*valueDirective) - require.Equal(t, &value, y.value) -} - -func Test_RefReassignError(t *testing.T) { - defer expectPanic(t, "iter iter") - var x interface{} - U32().Ref(&x) - U32().Ref(&x) -} - -func Test_ToU32(t *testing.T) { - u := U32().Do(U32ToU32(func(in uint32) uint32 { return in >> 2 }).AsF("x")) - dd := Seq(OpenMetric(""), u, CloseMetric()) - - value := uint32(1001) - var buffer bytes.Buffer - require.NoError(t, binary.Write(&buffer, binary.BigEndian, &value)) - - dc := NewDecodeContext() - require.NoError(t, dc.Decode(dd, &buffer)) - - // require original value decoded - x, _ := u.(*valueDirective) - require.Equal(t, &value, x.value) - - // require field ejected - require.Equal(t, 1, len(dc.GetMetrics())) - m := dc.GetMetrics() - require.Equal(t, uint64(value>>2), getField(m[0], "x")) -} - -func expectPanic(t *testing.T, msg string) { - if r := recover(); r == nil { - t.Errorf(msg) - } -} - -func Test_U32BlankCanvasIter(t *testing.T) { - u := U32().Iter(math.MaxInt32, U32()) - func() { - defer expectPanic(t, "iter iter") - u.Iter(math.MaxInt32, U32()) - }() - func() { - defer expectPanic(t, "iter switch") - u.Switch(Case(uint32(0), U32())) - }() - func() { - defer expectPanic(t, "iter encap") - u.Encapsulated(math.MaxInt32, U32()) - }() - func() { - defer expectPanic(t, "iter do") - u.Do(AsF("foo")) - }() -} -func Test_U32BlankCanvasSwitch(t *testing.T) { - u := U32().Switch(Case(uint32(0), U32())) - func() { - defer expectPanic(t, "switch iter") - u.Iter(math.MaxInt32, U32()) - }() - func() { - defer expectPanic(t, "switch switch") - u.Switch(Case(uint32(0), U32())) - }() - func() { - defer expectPanic(t, "switch encap") - u.Encapsulated(math.MaxInt32, U32()) - }() - func() { - defer expectPanic(t, "switch do") - u.Do(AsF("foo")) - }() -} - -func Test_U32BasicSwitch(t *testing.T) { - s := U32().Switch(Case(uint32(0), nil)) - value := uint32(0) - var buffer bytes.Buffer - require.NoError(t, binary.Write(&buffer, binary.BigEndian, &value)) - dc := NewDecodeContext() - require.NoError(t, dc.Decode(s, &buffer)) -} - -func Test_U32BasicSwitchDefault(t *testing.T) { - s := U32().Switch(Case(uint32(0), nil), DefaultCase(nil)) - value := uint32(2) - var buffer bytes.Buffer - require.NoError(t, binary.Write(&buffer, binary.BigEndian, &value)) - dc := NewDecodeContext() - require.NoError(t, dc.Decode(s, &buffer)) -} - -func Test_U16(t *testing.T) { - dd := U16() - value := uint16(1001) - var buffer bytes.Buffer - require.NoError(t, binary.Write(&buffer, binary.BigEndian, &value)) - require.NoError(t, Execute(dd, &buffer)) - require.Equal(t, 0, buffer.Len()) - x, _ := dd.(*valueDirective) - require.Equal(t, &value, x.value) -} - -func Test_U16Value(t *testing.T) { - myU16 := uint16(5) - dd := U16Value(&myU16) - var buffer bytes.Buffer - require.NoError(t, Execute(dd, &buffer)) - x, _ := dd.(*valueDirective) - require.Equal(t, &myU16, x.value) -} - -func Test_Bytes(t *testing.T) { - dd := Bytes(4) - value := []byte{0x01, 0x02, 0x03, 0x04} - var buffer bytes.Buffer - require.NoError(t, binary.Write(&buffer, binary.BigEndian, &value)) - require.NoError(t, Execute(dd, &buffer)) - require.Equal(t, 0, buffer.Len()) - x, _ := dd.(*valueDirective) - require.Equal(t, value, x.value) -} - -func Test_nilRefAnfWongTypeRef(t *testing.T) { - func() { - defer expectPanic(t, "Test_nilRef") - Ref(nil) - }() - - func() { - defer expectPanic(t, "Test_nilRef") - f := new(uint32) - Ref(f) - }() -} diff --git a/plugins/inputs/sflow/decoder/funcs.go b/plugins/inputs/sflow/decoder/funcs.go deleted file mode 100644 index c90e1488f..000000000 --- a/plugins/inputs/sflow/decoder/funcs.go +++ /dev/null @@ -1,216 +0,0 @@ -package decoder - -import ( - "fmt" - "time" - - "github.com/influxdata/telegraf/metric" -) - -// U32 answers a directive for 32bit Unsigned Integers -func U32() ValueDirective { - return &valueDirective{value: new(uint32)} -} - -// U64 answers a directive for 64bit Unsigned Integers -func U64() ValueDirective { - return &valueDirective{value: new(uint64)} -} - -// U8 answers a directive for 8bit Unsigned Integers -func U8() ValueDirective { - return &valueDirective{value: new(uint8)} -} - -// U16 answers a directive for 32bit Unsigned Integers -func U16() ValueDirective { - return &valueDirective{value: new(uint16)} -} - -// U16Value answers a directive that doesn't actually decode itself but reused a value previously decoded of type uint16 -func U16Value(value *uint16) ValueDirective { - return &valueDirective{value: value, noDecode: true} -} - -// Bytes answers a value directive that will decode the specified number (len) of bytes from the packet -func Bytes(len int) ValueDirective { - return &valueDirective{value: make([]byte, len)} -} - -// Case answers a directive to be used within a Switch clause of a U32 directive -func Case(caseValue interface{}, dd Directive) CaseValueDirective { - return &caseValueDirective{caseValue: caseValue, isDefault: false, equalsDd: dd} -} - -// DefaultCase answers a case decoder directive that can be used as the default, catch all, of a Switch -func DefaultCase(dd Directive) CaseValueDirective { - return &caseValueDirective{caseValue: nil, isDefault: true, equalsDd: dd} -} - -// Ref answers a decoder that reuses, through referal, an existing U32 directive -func Ref(target interface{}) ValueDirective { - if target == nil { - panic("Ref given a nil reference") - } - r, ok := target.(*valueDirective) - if !ok { - panic(fmt.Sprintf("Ref not given a ValueDirective reference but a %T", target)) - } - return &valueDirective{reference: r, value: r.value} -} - -// Seq ansers a directive that sequentially executes a list of provided directives -func Seq(decoders ...Directive) Directive { - return &sequenceDirective{decoders: decoders} -} - -func SeqOf(decoders []Directive) Directive { - return &sequenceDirective{decoders: decoders} -} - -// OpenMetric answers a directive that opens a new metrics for collecting tags and fields -func OpenMetric(name string) Directive { - return &openMetric{name: name} -} - -// CloseMetric answers a directive that close the current metrics -func CloseMetric() Directive { - return &closeMetric{} -} - -// NewDecodeContext ansewers a new Decode Contect to support the process of decoding -func NewDecodeContext() *DecodeContext { - m, _ := metric.New("sflow", make(map[string]string), make(map[string]interface{}), time.Now()) - return &DecodeContext{preMetric: m} -} - -// U32ToU32 answers a decode operation that transforms a uint32 to a uint32 via the supplied fn -func U32ToU32(fn func(uint32) uint32) *U32ToU32DOp { - result := &U32ToU32DOp{fn: fn, baseDOp: baseDOp{}} - result.do = result - return result -} - -// U32ToStr answers a decode operation that transforms a uint32 to a string via the supplied fn -func U32ToStr(fn func(uint32) string) *U32ToStrDOp { - result := &U32ToStrDOp{baseDOp: baseDOp{}, fn: fn} - result.do = result - return result -} - -// U16ToStr answers a decode operation that transforms a uint16 to a string via the supplied fn -func U16ToStr(fn func(uint16) string) *U16ToStrDOp { - result := &U16ToStrDOp{baseDOp: baseDOp{}, fn: fn} - result.do = result - return result -} - -// U16ToU16 answers a decode operation that transforms a uint16 to a uint16 via the supplied fn -func U16ToU16(fn func(uint16) uint16) *U16ToU16DOp { - result := &U16ToU16DOp{baseDOp: baseDOp{}, fn: fn} - result.do = result - return result -} - -// AsF answers a decode operation that will output a field into the open metric with the given name -func AsF(name string) *AsFDOp { - result := &AsFDOp{baseDOp: baseDOp{}, name: name} - result.do = result - return result -} - -// AsT answers a decode operation that will output a tag into the open metric with the given name -func AsT(name string) *AsTDOp { - result := &AsTDOp{name: name, baseDOp: baseDOp{}} - result.do = result - return result -} - -// AsTimestamp answers a decode operation that will set the tiemstamp on the metric -func AsTimestamp() *AsTimestampDOp { - result := &AsTimestampDOp{baseDOp: baseDOp{}} - result.do = result - return result -} - -// BytesToStr answers a decode operation that transforms a []bytes to a string via the supplied fn -func BytesToStr(len int, fn func([]byte) string) *BytesToStrDOp { - result := &BytesToStrDOp{baseDOp: baseDOp{}, len: len, fn: fn} - result.do = result - return result -} - -// BytesTo answers a decode operation that transforms a []bytes to a interface{} via the supplied fn -func BytesTo(len int, fn func([]byte) interface{}) *BytesToDOp { - result := &BytesToDOp{baseDOp: baseDOp{}, len: len, fn: fn} - result.do = result - return result -} - -// BytesToU32 answers a decode operation that transforms a []bytes to an uint32 via the supplied fn -func BytesToU32(len int, fn func([]byte) uint32) *BytesToU32DOp { - result := &BytesToU32DOp{baseDOp: baseDOp{}, len: len, fn: fn} - result.do = result - return result -} - -// MapU32ToStr answers a decode operation that maps an uint32 to a string via the supplied map -func MapU32ToStr(m map[uint32]string) *U32ToStrDOp { - result := &U32ToStrDOp{fn: func(in uint32) string { - return m[in] - }, baseDOp: baseDOp{}} - result.do = result - return result -} - -// U32Assert answers a decode operation that will assert the uint32 is a particulr value or generate an error -func U32Assert(fn func(v uint32) bool, fmtStr string) *U32AssertDOp { - result := &U32AssertDOp{baseDOp: baseDOp{}, fn: fn, fmtStr: fmtStr} - result.do = result - return result -} - -func U16Assert(fn func(v uint16) bool, fmtStr string) *U16AssertDOp { - result := &U16AssertDOp{baseDOp: baseDOp{}, fn: fn, fmtStr: fmtStr} - result.do = result - return result -} - -// MapU16ToStr answers a decode operation that maps an uint16 to a string via the supplied map -func MapU16ToStr(m map[uint16]string) *U16ToStrDOp { - result := &U16ToStrDOp{baseDOp: baseDOp{}, fn: func(in uint16) string { - return m[in] - }} - result.do = result - return result -} - -// Set answers a decode operation that will set the supplied *value to the value passed through the operation -func Set(ptr interface{}) *SetDOp { - result := &SetDOp{ptr: ptr, baseDOp: baseDOp{}} - result.do = result - return result -} - -// ErrorDirective answers a decode directive that will generate an error -func ErrorDirective() Directive { - return &errorDirective{} -} - -// ErrorOp answers a decode operation that will generate an error -func ErrorOp(errorOnTestProcess bool) *ErrorDOp { - result := &ErrorDOp{baseDOp: baseDOp{}, errorOnTestProcess: errorOnTestProcess} - result.do = result - return result - -} - -// Notify answers a decode directive that will notify the supplied function upon execution -func Notify(fn func()) Directive { - return ¬ifyDirective{fn} -} - -// Nop answer a decode directive that is the null, benign, deocder -func Nop() Directive { - return Notify(func() {}) -} diff --git a/plugins/inputs/sflow/decoder/ops.go b/plugins/inputs/sflow/decoder/ops.go deleted file mode 100644 index 2a1e0c72b..000000000 --- a/plugins/inputs/sflow/decoder/ops.go +++ /dev/null @@ -1,490 +0,0 @@ -package decoder - -import ( - "fmt" - "time" - - "github.com/influxdata/telegraf" -) - -// DirectiveOp are operations that are performed on values that have been decoded. -// They are expected to be chained together, in a flow programming style, and the -// Decode Directive that they are assigned to then walks back up the linked list to find the root -// operation that will then be performed (passing the value down through various transformations) -type DirectiveOp interface { - prev() DirectiveOp - // process method can be executed in two contexts, one to check that the given type - // of upstream value can be processed (not to process it) and then to actually process - // the upstream value. The difference in reqwuired behaviour is signalled by the presence - // of the DecodeContect - if nil. just test, if !nil process - process(dc *DecodeContext, upstreamValue interface{}) error -} - -type baseDOp struct { - p DirectiveOp - do DirectiveOp - n DirectiveOp -} - -func (op *baseDOp) prev() DirectiveOp { - return op.p -} - -func (op *baseDOp) AsF(name string) DirectiveOp { - result := &AsFDOp{baseDOp: baseDOp{p: op.do}, name: name} - result.do = result - op.n = result - return result -} - -func (op *baseDOp) AsT(name string) DirectiveOp { - result := &AsTDOp{baseDOp: baseDOp{p: op.do}, name: name} - result.do = result - op.n = result - return result -} - -func (op *baseDOp) Set(ptr interface{}) *SetDOp { - result := &SetDOp{baseDOp: baseDOp{p: op.do}, ptr: ptr} - result.do = result - op.n = result - return result -} - -// U32ToU32DOp is a deode operation that can process U32 to U32 -type U32ToU32DOp struct { - baseDOp - fn func(uint32) uint32 -} - -func (op *U32ToU32DOp) process(dc *DecodeContext, upstreamValue interface{}) error { - var out uint32 - switch v := upstreamValue.(type) { - case *uint32: - if dc != nil { - out = op.fn(*v) - } - default: - return fmt.Errorf("cannot process %T", v) - } - - if dc != nil && op.n != nil { - return op.n.process(dc, out) - } - return nil -} - -// ToString answers a U32ToStr decode operation that will transform this output of thie U32ToU32 into a string -func (op *U32ToU32DOp) ToString(fn func(uint32) string) *U32ToStrDOp { - result := &U32ToStrDOp{baseDOp: baseDOp{p: op}, fn: fn} - result.do = result - op.n = result - return result -} - -// AsFDOp is a deode operation that writes fields to metrics -type AsFDOp struct { - baseDOp - name string -} - -func (op *AsFDOp) process(dc *DecodeContext, upstreamValue interface{}) error { - var m telegraf.Metric - if dc != nil { - m = dc.currentMetric() - } - switch v := upstreamValue.(type) { - case *uint64: - if dc != nil { - m.AddField(op.name, *v) - } - case *uint32: - if dc != nil { - m.AddField(op.name, *v) - } - case uint32: - if dc != nil { - m.AddField(op.name, v) - } - case *uint16: - if dc != nil { - m.AddField(op.name, *v) - } - case uint16: - if dc != nil { - m.AddField(op.name, v) - } - case *uint8: - if dc != nil { - m.AddField(op.name, *v) - } - case uint8: - if dc != nil { - m.AddField(op.name, v) - } - case string: - if dc != nil { - m.AddField(op.name, v) - } - default: - return fmt.Errorf("AsF cannot process %T", v) - } - return nil -} - -// AsTimestampDOp is a deode operation that sets the timestamp on the metric -type AsTimestampDOp struct { - baseDOp -} - -func (op *AsTimestampDOp) process(dc *DecodeContext, upstreamValue interface{}) error { - var m telegraf.Metric - if dc != nil { - m = dc.currentMetric() - } - switch v := upstreamValue.(type) { - case *uint32: - if dc != nil { - m.SetTime(time.Unix(int64(*v), 0)) - dc.timeHasBeenSet = true - } - default: - return fmt.Errorf("can't process %T", upstreamValue) - } - return nil -} - -// AsTDOp is a deode operation that writes tags to metrics -type AsTDOp struct { - baseDOp - name string - skipEmpty bool -} - -func (op *AsTDOp) process(dc *DecodeContext, upstreamValue interface{}) error { - var m telegraf.Metric - if dc != nil { - m = dc.currentMetric() - } - switch v := upstreamValue.(type) { - case *uint32: - if dc != nil { - m.AddTag(op.name, fmt.Sprintf("%d", *v)) - } - case uint32: - if dc != nil { - m.AddTag(op.name, fmt.Sprintf("%d", v)) - } - case *uint16: - if dc != nil { - m.AddTag(op.name, fmt.Sprintf("%d", *v)) - } - case uint16: - if dc != nil { - m.AddTag(op.name, fmt.Sprintf("%d", v)) - } - case *uint8: - if dc != nil { - m.AddTag(op.name, fmt.Sprintf("%d", *v)) - } - case uint8: - if dc != nil { - m.AddTag(op.name, fmt.Sprintf("%d", v)) - } - case string: - if dc != nil { - if !op.skipEmpty || v != "" { - m.AddTag(op.name, v) - } - } - default: - return fmt.Errorf("can't process %T", upstreamValue) - } - return nil -} - -func (op *AsTDOp) prev() DirectiveOp { - return op.p -} - -// BytesToStrDOp is a decode operation that transforms []bytes to strings -type BytesToStrDOp struct { - baseDOp - len int - fn func([]byte) string -} - -func (op *BytesToStrDOp) process(dc *DecodeContext, upstreamValue interface{}) error { - switch v := upstreamValue.(type) { - case []byte: - if len(v) == op.len { - if dc != nil { - out := op.fn(v) - if op.n != nil { - return op.n.process(dc, out) - } - } - } else { - return fmt.Errorf("cannot process len(%d) as requrire %d", len(v), op.len) - } - default: - return fmt.Errorf("cannot process %T", upstreamValue) - } - return nil -} - -// U32AssertDOp is a decode operation that asserts a particular uint32 value -type U32AssertDOp struct { - baseDOp - fn func(uint32) bool - fmtStr string -} - -func (op *U32AssertDOp) process(dc *DecodeContext, upstreamValue interface{}) error { - switch v := upstreamValue.(type) { - case *uint32: - if dc != nil && !op.fn(*v) { - return fmt.Errorf(op.fmtStr, *v) - } - default: - return fmt.Errorf("cannot process %T", upstreamValue) - } - return nil -} - -// U16AssertDOp is a decode operation that asserts a particular uint32 value -type U16AssertDOp struct { - baseDOp - fn func(uint16) bool - fmtStr string -} - -func (op *U16AssertDOp) process(dc *DecodeContext, upstreamValue interface{}) error { - switch v := upstreamValue.(type) { - case *uint16: - if dc != nil && !op.fn(*v) { - return fmt.Errorf(op.fmtStr, *v) - } - default: - return fmt.Errorf("cannot process %T", upstreamValue) - } - return nil -} - -// U32ToStrDOp is a decod eoperation that transforms a uint32 to a string -type U32ToStrDOp struct { - baseDOp - fn func(uint32) string -} - -func (op *U32ToStrDOp) process(dc *DecodeContext, upstreamValue interface{}) error { - switch v := upstreamValue.(type) { - case uint32: - if dc != nil && op.n != nil { - op.n.process(dc, (op.fn(v))) - } - case *uint32: - if dc != nil && op.n != nil { - return op.n.process(dc, (op.fn(*v))) - } - default: - return fmt.Errorf("cannot process %T", upstreamValue) - } - return nil -} - -// BreakIf answers a BreakIf operation that will break the current decode operation chain, without an error, if the value processed -// is the supplied value -func (op *U32ToStrDOp) BreakIf(value string) *BreakIfDOp { - result := &BreakIfDOp{baseDOp: baseDOp{p: op}, value: value} - result.do = result - op.n = result - return result -} - -// U16ToStrDOp is a decode operation that transforms a uint16 to a string -type U16ToStrDOp struct { - baseDOp - fn func(uint16) string -} - -func (op *U16ToStrDOp) process(dc *DecodeContext, upstreamValue interface{}) error { - switch v := upstreamValue.(type) { - case *uint16: - if dc != nil { - return op.n.process(dc, (op.fn(*v))) - } - default: - return fmt.Errorf("cannot process %T", upstreamValue) - } - return nil -} - -// BreakIfDOp is a decode operation that will break the current outer iteration -type BreakIfDOp struct { - baseDOp - value string -} - -func (op *BreakIfDOp) process(dc *DecodeContext, upstreamValue interface{}) error { - switch v := upstreamValue.(type) { - case string: - if dc != nil { - if v != op.value { - op.n.process(dc, v) - } - } - default: - return fmt.Errorf("cannot process %T", upstreamValue) - } - return nil -} - -// U16ToU16DOp is a decode operation that transfirms one uint16 to another uint16 -type U16ToU16DOp struct { - baseDOp - fn func(uint16) uint16 -} - -func (op *U16ToU16DOp) process(dc *DecodeContext, upstreamValue interface{}) error { - var out uint16 - var err error - switch v := upstreamValue.(type) { - case *uint16: - if dc != nil { - out = op.fn(*v) - } - default: - return fmt.Errorf("cannot process %T", upstreamValue) - } - if err != nil { - return err - } - if op.n != nil && dc != nil { - return op.n.process(dc, out) - } - return nil -} - -// BytesToU32DOp is a decode operation that transforms a []byte to a uint32 -type BytesToU32DOp struct { - baseDOp - len int - fn func([]byte) uint32 -} - -func (op *BytesToU32DOp) process(dc *DecodeContext, upstreamValue interface{}) error { - switch v := upstreamValue.(type) { - case []byte: - if len(v) == op.len { - out := op.fn(v) - if op.n != nil { - return op.n.process(dc, out) - } - } else { - return fmt.Errorf("cannot process %T as len(%d) != %d", upstreamValue, v, op.len) - } - default: - return fmt.Errorf("cannot process %T", upstreamValue) - } - return nil -} - -// SetDOp is a decode operation that will Set a pointer to a value to be the value processed -type SetDOp struct { - baseDOp - ptr interface{} -} - -func (op *SetDOp) process(dc *DecodeContext, upstreamValue interface{}) error { - switch v := upstreamValue.(type) { - case *uint32: - ptr, ok := op.ptr.(*uint32) - if ok { - if dc != nil { - *ptr = *v - } - } else { - return fmt.Errorf("cannot process as ptr %T and not *uint32", op.ptr) - } - case uint32: - ptr, ok := op.ptr.(*uint32) - if ok { - if dc != nil { - *ptr = v - } - } else { - return fmt.Errorf("cannot process as ptr %T and not *uint32", op.ptr) - } - case *uint16: - ptr, ok := op.ptr.(*uint16) - if ok { - if dc != nil { - *ptr = *v - } - } else { - return fmt.Errorf("cannot process as ptr %T and not *uint16", op.ptr) - } - case uint16: - ptr, ok := op.ptr.(*uint16) - if ok { - if dc != nil { - *ptr = v - } - } else { - return fmt.Errorf("cannot process as ptr %T and not *uint16", op.ptr) - } - case string: - ptr, ok := op.ptr.(*string) - if ok { - if dc != nil { - *ptr = v - } - } else { - return fmt.Errorf("cannot process as ptr %T and not *string", op.ptr) - } - default: - return fmt.Errorf("cannot process %T", upstreamValue) - } - if op.n != nil && dc != nil { - return op.n.process(dc, upstreamValue) - } - return nil -} - -// BytesToDOp is a decode operation that will transform []byte to interface{} according to a suppied function -type BytesToDOp struct { - baseDOp - len int - fn func([]byte) interface{} -} - -func (op *BytesToDOp) process(dc *DecodeContext, upstreamValue interface{}) error { - switch v := upstreamValue.(type) { - case []byte: - if len(v) == op.len { - if dc != nil { - out := op.fn(v) - return op.n.process(dc, out) - } - } else { - return fmt.Errorf("cannot process as len:%d required %d", len(v), op.len) - } - default: - return fmt.Errorf("cannot process %T", upstreamValue) - } - return nil -} - -// ErrorDOp is a decode operation that will generate an error -type ErrorDOp struct { - baseDOp - errorOnTestProcess bool -} - -func (op *ErrorDOp) process(dc *DecodeContext, upstreamValue interface{}) error { - if dc == nil && !op.errorOnTestProcess { - return nil - } - return fmt.Errorf("Error Op") -} diff --git a/plugins/inputs/sflow/decoder/ops_test.go b/plugins/inputs/sflow/decoder/ops_test.go deleted file mode 100644 index 2b626b55d..000000000 --- a/plugins/inputs/sflow/decoder/ops_test.go +++ /dev/null @@ -1,383 +0,0 @@ -package decoder - -import ( - "bytes" - "encoding/binary" - "fmt" - "testing" - "time" - - "github.com/stretchr/testify/require" -) - -func Test_U64AsF(t *testing.T) { - dc := NewDecodeContext() - dc.openMetric("") - ddo := AsF("out") - in := uint64(5) - require.NoError(t, ddo.process(dc, &in)) - m := dc.currentMetric() - require.Equal(t, in, getField(m, "out")) -} - -func Test_U32AsF(t *testing.T) { - dc := NewDecodeContext() - dc.openMetric("") - ddo := AsF("out") - in := uint32(5) - require.NoError(t, ddo.process(dc, &in)) - m := dc.currentMetric() - require.Equal(t, uint64(in), getField(m, "out")) -} - -func Test_U16PtrAsF(t *testing.T) { - dc := NewDecodeContext() - dc.openMetric("") - ddo := AsF("out") - in := uint16(5) - require.NoError(t, ddo.process(dc, &in)) - m := dc.currentMetric() - require.Equal(t, uint64(in), getField(m, "out")) -} - -func Test_U16AsF(t *testing.T) { - dc := NewDecodeContext() - dc.openMetric("") - ddo := AsF("out") - in := uint16(5) - require.NoError(t, ddo.process(dc, in)) - m := dc.currentMetric() - require.Equal(t, uint64(in), getField(m, "out")) -} - -func Test_U8AsF(t *testing.T) { - dc := NewDecodeContext() - dc.openMetric("") - ddo := AsF("out") - in := uint8(5) - require.NoError(t, ddo.process(dc, in)) - m := dc.currentMetric() - require.Equal(t, uint64(in), getField(m, "out")) -} - -func Test_U8PtrAsF(t *testing.T) { - dc := NewDecodeContext() - dc.openMetric("") - ddo := AsF("out") - in := uint8(5) - require.NoError(t, ddo.process(dc, &in)) - m := dc.currentMetric() - require.Equal(t, uint64(in), getField(m, "out")) -} - -func Test_U32AsT(t *testing.T) { - dc := NewDecodeContext() - dc.openMetric("") - ddo := AsT("out") - in := uint32(5) - require.NoError(t, ddo.process(dc, in)) - m := dc.currentMetric() - require.Equal(t, fmt.Sprintf("%d", in), getTag(m, "out")) -} - -func Test_U32PtrAsT(t *testing.T) { - dc := NewDecodeContext() - dc.openMetric("") - ddo := AsT("out") - in := uint32(5) - require.NoError(t, ddo.process(dc, &in)) - m := dc.currentMetric() - require.Equal(t, fmt.Sprintf("%d", in), getTag(m, "out")) -} - -func Test_U16AsT(t *testing.T) { - dc := NewDecodeContext() - dc.openMetric("") - ddo := AsT("out") - in := uint16(5) - require.NoError(t, ddo.process(dc, in)) - m := dc.currentMetric() - require.Equal(t, fmt.Sprintf("%d", in), getTag(m, "out")) -} - -func Test_U16PtrAsT(t *testing.T) { - dc := NewDecodeContext() - dc.openMetric("") - ddo := AsT("out") - in := uint16(5) - require.NoError(t, ddo.process(dc, &in)) - m := dc.currentMetric() - require.Equal(t, fmt.Sprintf("%d", in), getTag(m, "out")) -} - -func Test_U8AsT(t *testing.T) { - dc := NewDecodeContext() - dc.openMetric("") - ddo := AsT("out") - in := uint8(5) - require.NoError(t, ddo.process(dc, in)) - m := dc.currentMetric() - require.Equal(t, fmt.Sprintf("%d", in), getTag(m, "out")) -} - -func Test_U8PtrAsT(t *testing.T) { - dc := NewDecodeContext() - dc.openMetric("") - ddo := AsT("out") - in := uint8(5) - require.NoError(t, ddo.process(dc, &in)) - m := dc.currentMetric() - require.Equal(t, fmt.Sprintf("%d", in), getTag(m, "out")) -} - -func Test_U32ToU32AsF(t *testing.T) { - dc := NewDecodeContext() - dc.openMetric("") - ddo := U32ToU32(func(i uint32) uint32 { return i * 2 }) - ddo2 := ddo.AsF("out") - require.Equal(t, ddo, ddo2.prev()) - in := uint32(5) - require.NoError(t, ddo.process(dc, &in)) - m := dc.currentMetric() - require.Equal(t, uint64(in*2), getField(m, "out")) -} - -func Test_U16ToU16AsF(t *testing.T) { - dc := NewDecodeContext() - dc.openMetric("") - ddo := U16ToU16(func(i uint16) uint16 { return i * 2 }) - ddo2 := ddo.AsF("out") - require.Equal(t, ddo, ddo2.prev()) - in := uint16(5) - require.NoError(t, ddo.process(dc, &in)) - m := dc.currentMetric() - require.Equal(t, uint64(in*2), getField(m, "out")) -} - -func Test_U32ToStrAsT(t *testing.T) { - dc := NewDecodeContext() - dc.openMetric("") - ddo := U32ToStr(func(i uint32) string { return fmt.Sprintf("%d", i*2) }) - ddo2 := ddo.AsT("out") - require.Equal(t, ddo, ddo2.prev()) - in := uint32(5) - require.NoError(t, ddo.process(dc, &in)) - m := dc.currentMetric() - require.Equal(t, fmt.Sprintf("%d", (in*2)), getTag(m, "out")) -} - -func Test_U16ToStrAsT(t *testing.T) { - dc := NewDecodeContext() - dc.openMetric("") - ddo := U16ToStr(func(i uint16) string { return fmt.Sprintf("%d", i*2) }) - ddo2 := ddo.AsT("out") - require.Equal(t, ddo, ddo2.prev()) - in := uint16(5) - require.NoError(t, ddo.process(dc, &in)) - m := dc.currentMetric() - require.Equal(t, fmt.Sprintf("%d", (in*2)), getTag(m, "out")) -} - -func Test_MapU32ToStrAsT(t *testing.T) { - dc := NewDecodeContext() - dc.openMetric("") - myMap := map[uint32]string{5: "five"} - ddo := MapU32ToStr(myMap) - ddo2 := ddo.AsT("out") - require.Equal(t, ddo, ddo2.prev()) - in := uint32(5) - require.NoError(t, ddo.process(dc, &in)) - m := dc.currentMetric() - require.Equal(t, "five", getTag(m, "out")) -} - -func Test_MapU16ToStrAsT(t *testing.T) { - dc := NewDecodeContext() - dc.openMetric("") - myMap := map[uint16]string{5: "five"} - ddo := MapU16ToStr(myMap) - ddo2 := ddo.AsT("out") - require.Equal(t, ddo, ddo2.prev()) - in := uint16(5) - require.NoError(t, ddo.process(dc, &in)) - m := dc.currentMetric() - require.Equal(t, "five", getTag(m, "out")) -} - -func Test_DecDir_ToU32(t *testing.T) { - u := U32(). - Do(U32ToU32(func(in uint32) uint32 { return in >> 2 }).AsF("out1")). - Do(U32ToU32(func(in uint32) uint32 { return in * 2 }).AsF("out2")) - dd := Seq(OpenMetric(""), u, CloseMetric()) - - value := uint32(1001) - var buffer bytes.Buffer - require.NoError(t, binary.Write(&buffer, binary.BigEndian, &value)) - - dc := NewDecodeContext() - require.NoError(t, dc.Decode(dd, &buffer)) - - x, _ := u.(*valueDirective) - require.Equal(t, &value, x.value) - - // require field ejected - require.Equal(t, 1, len(dc.GetMetrics())) - m := dc.GetMetrics() - require.Equal(t, uint64(value>>2), getField(m[0], "out1")) - require.Equal(t, uint64(value*2), getField(m[0], "out2")) -} - -func Test_BytesToStrAsT(t *testing.T) { - dc := NewDecodeContext() - dc.openMetric("") - f := func(b []byte) string { return fmt.Sprintf("%d:%d", b[0], b[1]) } - ddo := BytesToStr(2, f) - ddo2 := ddo.AsT("out") - require.Equal(t, ddo, ddo2.prev()) - in := []byte{0x01, 0x02} - require.NoError(t, ddo.process(dc, in)) - m := dc.currentMetric() - require.Equal(t, fmt.Sprintf("%d:%d", in[0], in[1]), getTag(m, "out")) -} - -func Test_BytesToAsT(t *testing.T) { - dc := NewDecodeContext() - dc.openMetric("") - f := func(b []byte) interface{} { return fmt.Sprintf("%d:%d", b[0], b[1]) } - ddo := BytesTo(2, f) - ddo2 := ddo.AsT("out") - require.Equal(t, ddo, ddo2.prev()) - in := []byte{0x01, 0x02} - require.NoError(t, ddo.process(dc, in)) - m := dc.currentMetric() - require.Equal(t, fmt.Sprintf("%d:%d", in[0], in[1]), getTag(m, "out")) -} - -func Test_BytesToU32AsF(t *testing.T) { - dc := NewDecodeContext() - dc.openMetric("") - f := func(b []byte) uint32 { return uint32(b[0] * b[1]) } - ddo := BytesToU32(2, f) - ddo2 := ddo.AsF("out") - require.Equal(t, ddo, ddo2.prev()) - in := []byte{0x01, 0x02} - require.NoError(t, ddo.process(dc, in)) - m := dc.currentMetric() - require.Equal(t, uint64(in[0]*in[1]), getField(m, "out")) -} - -func Test_U32require(t *testing.T) { - dc := NewDecodeContext() - ddo := U32Assert(func(in uint32) bool { return false }, "bad") - in := uint32(5) - require.Error(t, ddo.process(dc, &in)) -} - -func Test_U16require(t *testing.T) { - dc := NewDecodeContext() - ddo := U16Assert(func(in uint16) bool { return false }, "bad") - in := uint16(5) - require.Error(t, ddo.process(dc, &in)) -} - -func Test_Set(t *testing.T) { - dc := NewDecodeContext() - ptr := new(uint32) - ddo := Set(ptr) - in := uint32(5) - require.NoError(t, ddo.process(dc, &in)) - require.Equal(t, *ptr, in) -} - -func Test_U16Set(t *testing.T) { - dc := NewDecodeContext() - ptr := new(uint16) - ddo := Set(ptr) - in := uint16(5) - require.NoError(t, ddo.process(dc, in)) - require.Equal(t, *ptr, in) -} - -func Test_U16PtrSet(t *testing.T) { - dc := NewDecodeContext() - ptr := new(uint16) - ddo := Set(ptr) - in := uint16(5) - require.NoError(t, ddo.process(dc, &in)) - require.Equal(t, *ptr, in) -} - -func Test_U32toU32Set(t *testing.T) { - dc := NewDecodeContext() - ptr := new(uint32) - ddo := U32ToU32(func(in uint32) uint32 { return in * 2 }).Set(ptr).prev() - in := uint32(5) - require.NoError(t, ddo.process(dc, &in)) - require.Equal(t, *ptr, in*2) -} - -func Test_U32toU32toString(t *testing.T) { - dc := NewDecodeContext() - ptr := new(string) - ddo := U32ToU32(func(in uint32) uint32 { return in * 2 }).ToString(func(in uint32) string { return fmt.Sprintf("%d", in*2) }).Set(ptr).prev().prev() - in := uint32(2) - require.NoError(t, ddo.process(dc, &in)) - require.Equal(t, "8", *ptr) -} - -func Test_U32toU32toStringBreakIf(t *testing.T) { - dc := NewDecodeContext() - ptr := new(string) - ddo := U32ToU32(func(in uint32) uint32 { return in * 2 }).ToString(func(in uint32) string { return fmt.Sprintf("%d", in*2) }).BreakIf("8").Set(ptr).prev().prev().prev() - in := uint32(2) - require.NoError(t, ddo.process(dc, &in)) - require.Equal(t, "", *ptr) - - in = uint32(1) - require.NoError(t, ddo.process(dc, &in)) - require.Equal(t, "4", *ptr) -} - -func Test_notify(t *testing.T) { - value := uint32(1001) - var buffer bytes.Buffer - require.NoError(t, binary.Write(&buffer, binary.BigEndian, &value)) - - ptr := new(uint32) - *ptr = uint32(2002) - var notificationOne uint32 - var notificationTwo uint32 - dd := Seq( - Notify(func() { notificationOne = *ptr }), - U32().Do(Set(ptr)), - Notify(func() { notificationTwo = *ptr }), - ) - - require.NoError(t, Execute(dd, &buffer)) - require.Equal(t, uint32(2002), notificationOne) - require.Equal(t, uint32(1001), notificationTwo) -} - -func Test_nop(t *testing.T) { - value := uint32(1001) - var buffer bytes.Buffer - require.NoError(t, binary.Write(&buffer, binary.BigEndian, &value)) - originalLen := buffer.Len() - dd := Seq( - Nop(), - ) - - require.NoError(t, Execute(dd, &buffer)) - require.Equal(t, originalLen, buffer.Len()) -} - -func Test_AsTimestamp(t *testing.T) { - dc := NewDecodeContext() - dc.openMetric("") - ddo := AsTimestamp() - now := time.Now() - in := uint32(now.Unix()) // only handles as uin32 (not uint64) - require.NoError(t, ddo.process(dc, &in)) - m := dc.currentMetric() - require.Equal(t, now.Unix(), m.Time().Unix()) -} diff --git a/plugins/inputs/sflow/decoder_test.go b/plugins/inputs/sflow/decoder_test.go index 33db1d1d2..c6e3916b8 100644 --- a/plugins/inputs/sflow/decoder_test.go +++ b/plugins/inputs/sflow/decoder_test.go @@ -7,17 +7,67 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/plugins/inputs/sflow/decoder" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) func TestIPv4SW(t *testing.T) { - packet, err := hex.DecodeString("0000000500000001c0a80102000000100000f3d40bfa047f0000000200000001000000d00001210a000001fe000004000484240000000000000001fe00000200000000020000000100000090000000010000010b0000000400000080000c2936d3d694c691aa97600800450000f9f19040004011b4f5c0a80913c0a8090a00a1ba0500e5641f3081da02010104066d6f746f6770a281cc02047b46462e0201000201003081bd3012060d2b06010201190501010281dc710201003013060d2b06010201190501010281e66802025acc3012060d2b0601020119050101000003e9000000100000000900000000000000090000000000000001000000d00000e3cc000002100000400048eb740000000000000002100000020000000002000000010000009000000001000000970000000400000080000c2936d3d6fcecda44008f81000009080045000081186440003f119098c0a80815c0a8090a9a690202006d23083c33303e4170722031312030393a33333a3031206b6e6f64653120736e6d70645b313039385d3a20436f6e6e656374696f6e2066726f6d205544503a205b3139322e3136382e392e31305d3a34393233362d000003e90000001000000009000000000000000900000000") + str := `00000005` + // version + `00000001` + //address type + `c0a80102` + // ip address + `00000010` + // sub agent id + `0000f3d4` + // sequence number + `0bfa047f` + // uptime + `00000002` + // sample count + `00000001` + // sample type + `000000d0` + // sample data length + `0001210a` + // sequence number + `000001fe` + // source id 00 = source id type, 0001fe = source id index + `00000400` + // sampling rate.. apparently this should be input if index???? + `04842400` + // sample pool + `00000000` + // drops + `000001fe` + // input if index + `00000200` + // output if index + `00000002` + // flow records count + `00000001` + // FlowFormat + `00000090` + // flow length + `00000001` + // header protocol + `0000010b` + // Frame length + `00000004` + // stripped octets + `00000080` + // header length + `000c2936d3d6` + // dest mac + `94c691aa9760` + // source mac + `0800` + // etype code: ipv4 + `4500` + // dscp + ecn + `00f9` + // total length + `f190` + // identification + `4000` + // fragment offset + flags + `40` + // ttl + `11` + // protocol + `b4f5` + // header checksum + `c0a80913` + // source ip + `c0a8090a` + // dest ip + `00a1` + // source port + `ba05` + // dest port + `00e5` + // udp length + // rest of header/flowSample we ignore + `641f3081da02010104066d6f746f6770a281cc02047b46462e0201000201003081bd3012060d2b06010201190501010281dc710201003013060d2b06010201190501010281e66802025acc3012060d2b0601020119050101` + + // next flow record - ignored + `000003e90000001000000009000000000000000900000000` + + // next sample + `00000001000000d00000e3cc000002100000400048eb740000000000000002100000020000000002000000010000009000000001000000970000000400000080000c2936d3d6fcecda44008f81000009080045000081186440003f119098c0a80815c0a8090a9a690202006d23083c33303e4170722031312030393a33333a3031206b6e6f64653120736e6d70645b313039385d3a20436f6e6e656374696f6e2066726f6d205544503a205b3139322e3136382e392e31305d3a34393233362d000003e90000001000000009000000000000000900000000` + packet, err := hex.DecodeString(str) require.NoError(t, err) - dc := decoder.NewDecodeContext() - err = dc.Decode(V5Format(NewDefaultV5FormatOptions()), bytes.NewBuffer(packet)) + actual := []telegraf.Metric{} + dc := NewDecoder() + dc.OnPacket(func(p *V5Format) { + metrics, err := makeMetrics(p) + require.NoError(t, err) + actual = append(actual, metrics...) + }) + buf := bytes.NewReader(packet) + err = dc.Decode(buf) require.NoError(t, err) expected := []telegraf.Metric{ @@ -31,8 +81,6 @@ func TestIPv4SW(t *testing.T) { "ether_type": "IPv4", "header_protocol": "ETHERNET-ISO88023", "input_ifindex": "510", - "ip_dscp": "0", - "ip_ecn": "0", "output_ifindex": "512", "sample_direction": "ingress", "source_id_index": "510", @@ -52,6 +100,8 @@ func TestIPv4SW(t *testing.T) { "ip_ttl": uint64(0x40), "sampling_rate": uint64(0x0400), "udp_length": uint64(0xe5), + "ip_dscp": "0", + "ip_ecn": "0", }, time.Unix(0, 0), ), @@ -65,8 +115,6 @@ func TestIPv4SW(t *testing.T) { "ether_type": "IPv4", "header_protocol": "ETHERNET-ISO88023", "input_ifindex": "528", - "ip_dscp": "0", - "ip_ecn": "0", "output_ifindex": "512", "sample_direction": "ingress", "source_id_index": "528", @@ -86,11 +134,12 @@ func TestIPv4SW(t *testing.T) { "ip_ttl": uint64(0x3f), "sampling_rate": uint64(0x4000), "udp_length": uint64(0x6d), + "ip_dscp": "0", + "ip_ecn": "0", }, time.Unix(0, 0), ), } - actual := dc.GetMetrics() testutil.RequireMetricsEqual(t, expected, actual, testutil.IgnoreTime()) } @@ -98,25 +147,15 @@ func BenchmarkDecodeIPv4SW(b *testing.B) { packet, err := hex.DecodeString("0000000500000001c0a80102000000100000f3d40bfa047f0000000200000001000000d00001210a000001fe000004000484240000000000000001fe00000200000000020000000100000090000000010000010b0000000400000080000c2936d3d694c691aa97600800450000f9f19040004011b4f5c0a80913c0a8090a00a1ba0500e5641f3081da02010104066d6f746f6770a281cc02047b46462e0201000201003081bd3012060d2b06010201190501010281dc710201003013060d2b06010201190501010281e66802025acc3012060d2b0601020119050101000003e9000000100000000900000000000000090000000000000001000000d00000e3cc000002100000400048eb740000000000000002100000020000000002000000010000009000000001000000970000000400000080000c2936d3d6fcecda44008f81000009080045000081186440003f119098c0a80815c0a8090a9a690202006d23083c33303e4170722031312030393a33333a3031206b6e6f64653120736e6d70645b313039385d3a20436f6e6e656374696f6e2066726f6d205544503a205b3139322e3136382e392e31305d3a34393233362d000003e90000001000000009000000000000000900000000") require.NoError(b, err) - dc := decoder.NewDecodeContext() - err = dc.Decode(V5Format(NewDefaultV5FormatOptions()), bytes.NewBuffer(packet)) + dc := NewDecoder() require.NoError(b, err) - format := V5Format(NewDefaultV5FormatOptions()) b.ResetTimer() for n := 0; n < b.N; n++ { - err := dc.Decode(format, bytes.NewBuffer(packet)) + _, err = dc.DecodeOnePacket(bytes.NewBuffer(packet)) if err != nil { panic(err) } - - _ = dc.GetMetrics() - } -} - -func BenchmarkNewV5FormatDirective(b *testing.B) { - for n := 0; n < b.N; n++ { - _ = V5Format(NewDefaultV5FormatOptions()) } } @@ -124,8 +163,10 @@ func TestExpandFlow(t *testing.T) { packet, err := hex.DecodeString("00000005000000010a00015000000000000f58998ae119780000000300000003000000c4000b62a90000000000100c840000040024fb7e1e0000000000000000001017840000000000100c8400000001000000010000009000000001000005bc0000000400000080001b17000130001201f58d44810023710800450205a6305440007e06ee92ac100016d94d52f505997e701fa1e17aff62574a50100200355f000000ffff00000b004175746f72697a7a6174610400008040ffff000400008040050031303030320500313030302004000000000868a200000000000000000860a200000000000000000003000000c40003cecf000000000010170400004000a168ac1c000000000000000000101784000000000010170400000001000000010000009000000001000005f200000004000000800024e8324338d4ae52aa0b54810020060800450005dc5420400080061397c0a8060cc0a806080050efcfbb25bad9a21c839a501000fff54000008a55f70975a0ff88b05735597ae274bd81fcba17e6e9206b8ea0fb07d05fc27dad06cfe3fdba5d2fc4d057b0add711e596cbe5e9b4bbe8be59cd77537b7a89f7414a628b736d00000003000000c0000c547a0000000000100c04000004005bc3c3b50000000000000000001017840000000000100c0400000001000000010000008c000000010000007e000000040000007a001b17000130001201f58d448100237108004500006824ea4000ff32c326d94d5105501018f02e88d003000001dd39b1d025d1c68689583b2ab21522d5b5a959642243804f6d51e63323091cc04544285433eb3f6b29e1046a6a2fa7806319d62041d8fa4bd25b7cd85b8db54202054a077ac11de84acbe37a550004") require.NoError(t, err) - dc := decoder.NewDecodeContext() - err = dc.Decode(V5Format(NewDefaultV5FormatOptions()), bytes.NewBuffer(packet)) + dc := NewDecoder() + p, err := dc.DecodeOnePacket(bytes.NewBuffer(packet)) + require.NoError(t, err) + actual, err := makeMetrics(p) require.NoError(t, err) expected := []telegraf.Metric{ @@ -139,8 +180,6 @@ func TestExpandFlow(t *testing.T) { "ether_type": "IPv4", "header_protocol": "ETHERNET-ISO88023", "input_ifindex": "1054596", - "ip_dscp": "0", - "ip_ecn": "2", "output_ifindex": "1051780", "sample_direction": "egress", "source_id_index": "1051780", @@ -159,9 +198,11 @@ func TestExpandFlow(t *testing.T) { "ip_total_length": uint64(0x05a6), "ip_ttl": uint64(0x7e), "sampling_rate": uint64(0x0400), - "tcp_header_length": uint64(0x40), + "tcp_header_length": uint64(0x14), "tcp_urgent_pointer": uint64(0x00), "tcp_window_size": uint64(0x0200), + "ip_dscp": "0", + "ip_ecn": "2", }, time.Unix(0, 0), ), @@ -175,8 +216,6 @@ func TestExpandFlow(t *testing.T) { "ether_type": "IPv4", "header_protocol": "ETHERNET-ISO88023", "input_ifindex": "1054596", - "ip_dscp": "0", - "ip_ecn": "0", "output_ifindex": "1054468", "sample_direction": "egress", "source_id_index": "1054468", @@ -195,9 +234,11 @@ func TestExpandFlow(t *testing.T) { "ip_total_length": uint64(0x05dc), "ip_ttl": uint64(0x80), "sampling_rate": uint64(0x4000), - "tcp_header_length": uint64(0x40), + "tcp_header_length": uint64(0x14), "tcp_urgent_pointer": uint64(0x00), "tcp_window_size": uint64(0xff), + "ip_dscp": "0", + "ip_ecn": "0", }, time.Unix(0, 0), ), @@ -210,8 +251,6 @@ func TestExpandFlow(t *testing.T) { "ether_type": "IPv4", "header_protocol": "ETHERNET-ISO88023", "input_ifindex": "1054596", - "ip_dscp": "0", - "ip_ecn": "0", "output_ifindex": "1051652", "sample_direction": "egress", "source_id_index": "1051652", @@ -229,11 +268,12 @@ func TestExpandFlow(t *testing.T) { "ip_total_length": uint64(0x68), "ip_ttl": uint64(0xff), "sampling_rate": uint64(0x0400), + "ip_dscp": "0", + "ip_ecn": "0", }, time.Unix(0, 0), ), } - actual := dc.GetMetrics() testutil.RequireMetricsEqual(t, expected, actual, testutil.IgnoreTime()) } @@ -241,8 +281,10 @@ func TestIPv4SWRT(t *testing.T) { packet, err := hex.DecodeString("000000050000000189dd4f010000000000003d4f21151ad40000000600000001000000bc354b97090000020c000013b175792bea000000000000028f0000020c0000000300000001000000640000000100000058000000040000005408b2587a57624c16fc0b61a5080045000046c3e440003a1118a0052aada7569e5ab367a6e35b0032d7bbf1f2fb2eb2490a97f87abc31e135834be367000002590000ffffffffffffffff02add830d51e0aec14cf000003e90000001000000000000000000000000000000000000003ea0000001000000001c342e32a000000160000000b00000001000000a88b8ffb57000002a2000013b12e344fd800000000000002a20000028f0000000300000001000000500000000100000042000000040000003e4c16fc0b6202c03e0fdecafe080045000030108000007d11fe45575185a718693996f0570e8c001c20614ad602003fd6d4afa6a6d18207324000271169b00000000003e90000001000000000000000000000000000000000000003ea000000100000000189dd4f210000000f0000001800000001000000e8354b970a0000020c000013b175793f9b000000000000028f0000020c00000003000000010000009000000001000001a500000004000000800231466d0b2c4c16fc0b61a5080045000193198f40003a114b75052aae1f5f94c778678ef24d017f50ea7622287c30799e1f7d45932d01ca92c46d930000927c0000ffffffffffffffff02ad0eea6498953d1c7ebb6dbdf0525c80e1a9a62bacfea92f69b7336c2f2f60eba0593509e14eef167eb37449f05ad70b8241c1a46d000003e90000001000000000000000000000000000000000000003ea0000001000000001c342e1fd000000160000001000000001000000e8354b970b0000020c000013b17579534c000000000000028f0000020c00000003000000010000009000000001000000b500000004000000800231466d0b2c4c16fc0b61a50800450000a327c240003606fd67b93c706a021ff365045fe8a0976d624df8207083501800edb31b0000485454502f312e3120323030204f4b0d0a5365727665723a2050726f746f636f6c20485454500d0a436f6e74656e742d4c656e6774683a20313430340d0a436f6e6e656374696f6e3a20000003e90000001000000000000000000000000000000000000003ea0000001000000001c342e1fd000000170000001000000001000000e8354b970c0000020c000013b1757966fd000000000000028f0000020c000000030000000100000090000000010000018e00000004000000800231466d0b2c4c16fc0b61a508004500017c7d2c40003a116963052abd8d021c940e67e7e0d501682342dbe7936bd47ef487dee5591ec1b24d83622e000072250000ffffffffffffffff02ad0039d8ba86a90017071d76b177de4d8c4e23bcaaaf4d795f77b032f959e0fb70234d4c28922d4e08dd3330c66e34bff51cc8ade5000003e90000001000000000000000000000000000000000000003ea0000001000000001c342e1fd000000160000001000000001000000e80d6146ac000002a1000013b17880b49d00000000000002a10000028f00000003000000010000009000000001000005ee00000004000000804c16fc0b6201d8b122766a2c0800450005dc04574000770623a11fcd80a218691d4cf2fe01bbd4f47482065fd63a5010fabd7987000052a20002c8c43ea91ca1eaa115663f5218a37fbb409dfbbedff54731ef41199b35535905ac2366a05a803146ced544abf45597f3714327d59f99e30c899c39fc5a4b67d12087bf8db2bc000003e90000001000000000000000000000000000000000000003ea000000100000000189dd4f210000001000000018") require.NoError(t, err) - dc := decoder.NewDecodeContext() - err = dc.Decode(V5Format(NewDefaultV5FormatOptions()), bytes.NewBuffer(packet)) + dc := NewDecoder() + p, err := dc.DecodeOnePacket(bytes.NewBuffer(packet)) + require.NoError(t, err) + actual, err := makeMetrics(p) require.NoError(t, err) expected := []telegraf.Metric{ @@ -256,8 +298,6 @@ func TestIPv4SWRT(t *testing.T) { "ether_type": "IPv4", "header_protocol": "ETHERNET-ISO88023", "input_ifindex": "655", - "ip_dscp": "0", - "ip_ecn": "0", "output_ifindex": "524", "sample_direction": "egress", "source_id_index": "524", @@ -277,6 +317,8 @@ func TestIPv4SWRT(t *testing.T) { "ip_ttl": uint64(0x3a), "sampling_rate": uint64(0x13b1), "udp_length": uint64(0x32), + "ip_dscp": "0", + "ip_ecn": "0", }, time.Unix(0, 0), ), @@ -290,8 +332,6 @@ func TestIPv4SWRT(t *testing.T) { "ether_type": "IPv4", "header_protocol": "ETHERNET-ISO88023", "input_ifindex": "674", - "ip_dscp": "0", - "ip_ecn": "0", "output_ifindex": "655", "sample_direction": "ingress", "source_id_index": "674", @@ -311,6 +351,8 @@ func TestIPv4SWRT(t *testing.T) { "ip_ttl": uint64(0x7d), "sampling_rate": uint64(0x13b1), "udp_length": uint64(0x1c), + "ip_dscp": "0", + "ip_ecn": "0", }, time.Unix(0, 0), ), @@ -324,8 +366,6 @@ func TestIPv4SWRT(t *testing.T) { "ether_type": "IPv4", "header_protocol": "ETHERNET-ISO88023", "input_ifindex": "655", - "ip_dscp": "0", - "ip_ecn": "0", "output_ifindex": "524", "sample_direction": "egress", "source_id_index": "524", @@ -345,6 +385,8 @@ func TestIPv4SWRT(t *testing.T) { "ip_ttl": uint64(0x3a), "sampling_rate": uint64(0x13b1), "udp_length": uint64(0x017f), + "ip_dscp": "0", + "ip_ecn": "0", }, time.Unix(0, 0), ), @@ -358,8 +400,6 @@ func TestIPv4SWRT(t *testing.T) { "ether_type": "IPv4", "header_protocol": "ETHERNET-ISO88023", "input_ifindex": "655", - "ip_dscp": "0", - "ip_ecn": "0", "output_ifindex": "524", "sample_direction": "egress", "source_id_index": "524", @@ -378,9 +418,11 @@ func TestIPv4SWRT(t *testing.T) { "ip_total_length": uint64(0xa3), "ip_ttl": uint64(0x36), "sampling_rate": uint64(0x13b1), - "tcp_header_length": uint64(0x40), + "tcp_header_length": uint64(0x14), "tcp_urgent_pointer": uint64(0x00), "tcp_window_size": uint64(0xed), + "ip_dscp": "0", + "ip_ecn": "0", }, time.Unix(0, 0), ), @@ -394,8 +436,6 @@ func TestIPv4SWRT(t *testing.T) { "ether_type": "IPv4", "header_protocol": "ETHERNET-ISO88023", "input_ifindex": "655", - "ip_dscp": "0", - "ip_ecn": "0", "output_ifindex": "524", "sample_direction": "egress", "source_id_index": "524", @@ -415,6 +455,8 @@ func TestIPv4SWRT(t *testing.T) { "ip_ttl": uint64(0x3a), "sampling_rate": uint64(0x13b1), "udp_length": uint64(0x0168), + "ip_dscp": "0", + "ip_ecn": "0", }, time.Unix(0, 0), ), @@ -428,8 +470,6 @@ func TestIPv4SWRT(t *testing.T) { "ether_type": "IPv4", "header_protocol": "ETHERNET-ISO88023", "input_ifindex": "673", - "ip_dscp": "0", - "ip_ecn": "0", "output_ifindex": "655", "sample_direction": "ingress", "source_id_index": "673", @@ -448,14 +488,15 @@ func TestIPv4SWRT(t *testing.T) { "ip_total_length": uint64(0x05dc), "ip_ttl": uint64(0x77), "sampling_rate": uint64(0x13b1), - "tcp_header_length": uint64(0x40), + "tcp_header_length": uint64(0x14), "tcp_urgent_pointer": uint64(0x00), "tcp_window_size": uint64(0xfabd), + "ip_dscp": "0", + "ip_ecn": "0", }, time.Unix(0, 0), ), } - actual := dc.GetMetrics() testutil.RequireMetricsEqual(t, expected, actual, testutil.IgnoreTime()) } @@ -463,8 +504,10 @@ func TestIPv6SW(t *testing.T) { packet, err := hex.DecodeString("00000005000000010ae0648100000002000093d824ac82340000000100000001000000d000019f94000001010000100019f94000000000000000010100000000000000020000000100000090000000010000058c00000008000000800008e3fffc10d4f4be04612486dd60000000054e113a2607f8b0400200140000000000000008262000edc000e804a25e30c581af36fa01bbfa6f054e249810b584bcbf12926c2e29a779c26c72db483e8191524fe2288bfdaceaf9d2e724d04305706efcfdef70db86873bbacf29698affe4e7d6faa21d302f9b4b023291a05a000003e90000001000000001000000000000000100000000") require.NoError(t, err) - dc := decoder.NewDecodeContext() - err = dc.Decode(V5Format(NewDefaultV5FormatOptions()), bytes.NewBuffer(packet)) + dc := NewDecoder() + p, err := dc.DecodeOnePacket(bytes.NewBuffer(packet)) + require.NoError(t, err) + actual, err := makeMetrics(p) require.NoError(t, err) expected := []telegraf.Metric{ @@ -488,19 +531,19 @@ func TestIPv6SW(t *testing.T) { "src_port": "443", }, map[string]interface{}{ - "bytes": uint64(0x58c000), - "drops": uint64(0x00), - "frame_length": uint64(0x058c), - "header_length": uint64(0x80), - "ip_dscp": uint64(0x00), - "ip_ecn": uint64(0x00), - "sampling_rate": uint64(0x1000), - "udp_length": uint64(0x054e), + "bytes": uint64(0x58c000), + "drops": uint64(0x00), + "frame_length": uint64(0x058c), + "header_length": uint64(0x80), + "sampling_rate": uint64(0x1000), + "payload_length": uint64(0x054e), + "udp_length": uint64(0x054e), + "ip_dscp": "0", + "ip_ecn": "0", }, time.Unix(0, 0), ), } - actual := dc.GetMetrics() testutil.RequireMetricsEqual(t, expected, actual, testutil.IgnoreTime()) } @@ -508,8 +551,10 @@ func TestExpandFlowCounter(t *testing.T) { packet, err := hex.DecodeString("00000005000000010a00015000000000000f58898ae0fa380000000700000004000000ec00006ece0000000000101784000000030000000200000034000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000058001017840000000600000002540be400000000010000000300007b8ebd37b97e61ff94860803e8e908ffb2b500000000000000000000000000018e7c31ee7ba4195f041874579ff021ba936300000000000000000000000100000007000000380011223344550003f8b15645e7e7d6960000002fe2fc02fc01edbf580000000000000000000000000000000001dcb9cf000000000000000000000004000000ec00006ece0000000000100184000000030000000200000034000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000058001001840000000600000002540be400000000010000000300000841131d1fd9f850bfb103617cb401e6598900000000000000000000000000000bec1902e5da9212e3e96d7996e922513250000000000000000000000001000000070000003800112233445500005c260acbddb3000100000003e2fc02fc01ee414f0000000000000000000000000000000001dccdd30000000000000000000000030000008400004606000000000010030400004000ad9dc19b0000000000000000001017840000000000100304000000010000000100000050000000010000004400000004000000400012815116c4001517cf426d8100200608004500002895da40008006d74bc0a8060ac0a8064f04ef04aab1797122cf7eaf4f5010ffff7727000000000000000000000003000000b0001bd698000000000010148400000400700b180f000000000000000000101504000000000010148400000001000000010000007c000000010000006f000000040000006b001b17000131f0f755b9afc081000439080045000059045340005206920c1f0d4703d94d52e201bbf14977d1e9f15498af36801800417f1100000101080afdf3c70400e043871503010020ff268cfe2e2fd5fffe1d3d704a91d57b895f174c4b4428c66679d80a307294303f00000003000000c40003ceca000000000010170400004000a166aa7a000000000000000000101784000000000010170400000001000000010000009000000001000005f200000004000000800024e8369e2bd4ae52aa0b54810020060800450005dc4c71400080061b45c0a8060cc0a806090050f855692a7a94a1154ae1801001046b6a00000101080a6869a48d151016d046a84a7aa1c6743fa05179f7ecbd4e567150cb6f2077ff89480ae730637d26d2237c08548806f672c7476eb1b5a447b42cb9ce405994d152fa3e000000030000008c001bd699000000000010148400000400700b180f0000000000000000001015040000000000101484000000010000000100000058000000010000004a0000000400000046001b17000131f0f755b9afc0810004390800450000340ce040003a06bea5c1ce8793d94d528f00504c3b08b18f275b83d5df8010054586ad00000101050a5b83d5de5b83d5df11d800000003000000c400004e07000000000010028400004000c7ec97f2000000000000000000100784000000000010028400000001000000010000009000000001000005f2000000040000008000005e0001ff005056800dd18100000a0800450005dc5a42400040066ef70a000ac8c0a8967201bbe17c81597908caf8a05f5010010328610000f172263da0ba5d6223c079b8238bc841256bf17c4ffb08ad11c4fbff6f87ae1624a6b057b8baa9342114e5f5b46179083020cb560c4e9eadcec6dfd83e102ddbc27024803eb5") require.NoError(t, err) - dc := decoder.NewDecodeContext() - err = dc.Decode(V5Format(NewDefaultV5FormatOptions()), bytes.NewBuffer(packet)) + dc := NewDecoder() + p, err := dc.DecodeOnePacket(bytes.NewBuffer(packet)) + require.NoError(t, err) + actual, err := makeMetrics(p) require.NoError(t, err) expected := []telegraf.Metric{ @@ -523,8 +568,6 @@ func TestExpandFlowCounter(t *testing.T) { "ether_type": "IPv4", "header_protocol": "ETHERNET-ISO88023", "input_ifindex": "1054596", - "ip_dscp": "0", - "ip_ecn": "0", "output_ifindex": "1049348", "sample_direction": "egress", "source_id_index": "1049348", @@ -543,9 +586,11 @@ func TestExpandFlowCounter(t *testing.T) { "ip_total_length": uint64(0x28), "ip_ttl": uint64(0x80), "sampling_rate": uint64(0x4000), - "tcp_header_length": uint64(0x40), + "tcp_header_length": uint64(0x14), "tcp_urgent_pointer": uint64(0x00), "tcp_window_size": uint64(0xffff), + "ip_dscp": "0", + "ip_ecn": "0", }, time.Unix(0, 0), ), @@ -559,8 +604,6 @@ func TestExpandFlowCounter(t *testing.T) { "ether_type": "IPv4", "header_protocol": "ETHERNET-ISO88023", "input_ifindex": "1053956", - "ip_dscp": "0", - "ip_ecn": "0", "output_ifindex": "1053828", "sample_direction": "egress", "source_id_index": "1053828", @@ -579,9 +622,11 @@ func TestExpandFlowCounter(t *testing.T) { "ip_total_length": uint64(0x59), "ip_ttl": uint64(0x52), "sampling_rate": uint64(0x0400), - "tcp_header_length": uint64(0x00), + "tcp_header_length": uint64(0x20), "tcp_urgent_pointer": uint64(0x00), "tcp_window_size": uint64(0x41), + "ip_dscp": "0", + "ip_ecn": "0", }, time.Unix(0, 0), ), @@ -595,8 +640,6 @@ func TestExpandFlowCounter(t *testing.T) { "ether_type": "IPv4", "header_protocol": "ETHERNET-ISO88023", "input_ifindex": "1054596", - "ip_dscp": "0", - "ip_ecn": "0", "output_ifindex": "1054468", "sample_direction": "egress", "source_id_index": "1054468", @@ -615,9 +658,11 @@ func TestExpandFlowCounter(t *testing.T) { "ip_total_length": uint64(0x05dc), "ip_ttl": uint64(0x80), "sampling_rate": uint64(0x4000), - "tcp_header_length": uint64(0x00), + "tcp_header_length": uint64(0x20), "tcp_urgent_pointer": uint64(0x00), "tcp_window_size": uint64(0x0104), + "ip_dscp": "0", + "ip_ecn": "0", }, time.Unix(0, 0), ), @@ -631,8 +676,6 @@ func TestExpandFlowCounter(t *testing.T) { "ether_type": "IPv4", "header_protocol": "ETHERNET-ISO88023", "input_ifindex": "1053956", - "ip_dscp": "0", - "ip_ecn": "0", "output_ifindex": "1053828", "sample_direction": "egress", "source_id_index": "1053828", @@ -651,9 +694,11 @@ func TestExpandFlowCounter(t *testing.T) { "ip_total_length": uint64(0x34), "ip_ttl": uint64(0x3a), "sampling_rate": uint64(0x0400), - "tcp_header_length": uint64(0x00), + "tcp_header_length": uint64(0x20), "tcp_urgent_pointer": uint64(0x00), "tcp_window_size": uint64(0x0545), + "ip_dscp": "0", + "ip_ecn": "0", }, time.Unix(0, 0), ), @@ -667,8 +712,6 @@ func TestExpandFlowCounter(t *testing.T) { "ether_type": "IPv4", "header_protocol": "ETHERNET-ISO88023", "input_ifindex": "1050500", - "ip_dscp": "0", - "ip_ecn": "0", "output_ifindex": "1049220", "sample_direction": "egress", "source_id_index": "1049220", @@ -687,14 +730,15 @@ func TestExpandFlowCounter(t *testing.T) { "ip_total_length": uint64(0x05dc), "ip_ttl": uint64(0x40), "sampling_rate": uint64(0x4000), - "tcp_header_length": uint64(0x40), + "tcp_header_length": uint64(0x14), "tcp_urgent_pointer": uint64(0x00), "tcp_window_size": uint64(0x0103), + "ip_dscp": "0", + "ip_ecn": "0", }, time.Unix(0, 0), ), } - actual := dc.GetMetrics() testutil.RequireMetricsEqual(t, expected, actual, testutil.IgnoreTime()) } @@ -702,274 +746,13 @@ func TestFlowExpandCounter(t *testing.T) { packet, err := hex.DecodeString("00000005000000010a000150000000000006d14d8ae0fe200000000200000004000000ac00006d15000000004b00ca000000000200000002000000340000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000584b00ca0000000001000000000000000000000001000000010000308ae33bb950eb92a8a3004d0bb406899571000000000000000000000000000012f7ed9c9db8c24ed90604eaf0bd04636edb00000000000000000000000100000004000000ac00006d15000000004b0054000000000200000002000000340000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000584b00540000000001000000003b9aca000000000100000003000067ba8e64fd23fa65f26d0215ec4a0021086600000000000000000000000000002002c3b21045c2378ad3001fb2f300061872000000000000000000000001") require.NoError(t, err) - dc := decoder.NewDecodeContext() - err = dc.Decode(V5Format(NewDefaultV5FormatOptions()), bytes.NewBuffer(packet)) + dc := NewDecoder() + p, err := dc.DecodeOnePacket(bytes.NewBuffer(packet)) + require.NoError(t, err) + actual, err := makeMetrics(p) require.NoError(t, err) // we don't do anything with samples yet expected := []telegraf.Metric{} - actual := dc.GetMetrics() testutil.RequireMetricsEqual(t, expected, actual, testutil.IgnoreTime()) } - -func TestUDPHeader(t *testing.T) { - options := NewDefaultV5FormatOptions() - octets := bytes.NewBuffer([]byte{ - 0x00, 0x01, // src_port - 0x00, 0x02, // dst_port - 0x00, 0x03, // udp_length - }) - - directive := decoder.Seq( - decoder.OpenMetric("sflow"), - udpHeader(options), - decoder.CloseMetric(), - ) - dc := decoder.NewDecodeContext() - err := directive.Execute(octets, dc) - require.NoError(t, err) - - expected := []telegraf.Metric{ - testutil.MustMetric( - "sflow", - map[string]string{ - "src_port": "1", - "dst_port": "2", - }, - map[string]interface{}{ - "udp_length": uint64(3), - }, - time.Unix(0, 0), - ), - } - - testutil.RequireMetricsEqual(t, expected, dc.GetMetrics(), testutil.IgnoreTime()) -} - -func BenchmarkUDPHeader(b *testing.B) { - options := NewDefaultV5FormatOptions() - octets := bytes.NewBuffer([]byte{ - 0x00, 0x01, // src_port - 0x00, 0x02, // dst_port - 0x00, 0x03, // udp_length - }) - - directive := decoder.Seq( - decoder.OpenMetric("sflow"), - udpHeader(options), - decoder.CloseMetric(), - ) - dc := decoder.NewDecodeContext() - - b.ResetTimer() - for n := 0; n < b.N; n++ { - _ = directive.Execute(octets, dc) - } -} - -func TestIPv4Header(t *testing.T) { - octets := bytes.NewBuffer( - []byte{ - 0x45, // version + IHL - 0x00, // ip_dscp + ip_ecn - 0x00, 0x00, // total length - 0x00, 0x00, // identification - 0x00, 0x00, // flags + frag offset - 0x00, // ttl - 0x11, // protocol; 0x11 = udp - 0x00, 0x00, // header checksum - 0x7f, 0x00, 0x00, 0x01, // src ip - 0x7f, 0x00, 0x00, 0x02, // dst ip - 0x00, 0x01, // src_port - 0x00, 0x02, // dst_port - 0x00, 0x03, // udp_length - }, - ) - dc := decoder.NewDecodeContext() - - options := NewDefaultV5FormatOptions() - directive := decoder.Seq( - decoder.OpenMetric("sflow"), - ipv4Header(options), - decoder.CloseMetric(), - ) - - err := directive.Execute(octets, dc) - require.NoError(t, err) - - expected := []telegraf.Metric{ - testutil.MustMetric( - "sflow", - map[string]string{ - "src_ip": "127.0.0.1", - "dst_ip": "127.0.0.2", - "ip_dscp": "0", - "ip_ecn": "0", - "src_port": "1", - "dst_port": "2", - }, - map[string]interface{}{ - "ip_flags": uint64(0), - "ip_fragment_offset": uint64(0), - "ip_total_length": uint64(0), - "ip_ttl": uint64(0), - "udp_length": uint64(3), - }, - time.Unix(0, 0), - ), - } - - testutil.RequireMetricsEqual(t, expected, dc.GetMetrics(), testutil.IgnoreTime()) -} - -// Using the same Directive instance, prior paths through the parse tree should -// not affect the latest parse. -func TestIPv4HeaderSwitch(t *testing.T) { - options := NewDefaultV5FormatOptions() - directive := decoder.Seq( - decoder.OpenMetric("sflow"), - ipv4Header(options), - decoder.CloseMetric(), - ) - - octets := bytes.NewBuffer( - []byte{ - 0x45, // version + IHL - 0x00, // ip_dscp + ip_ecn - 0x00, 0x00, // total length - 0x00, 0x00, // identification - 0x00, 0x00, // flags + frag offset - 0x00, // ttl - 0x11, // protocol; 0x11 = udp - 0x00, 0x00, // header checksum - 0x7f, 0x00, 0x00, 0x01, // src ip - 0x7f, 0x00, 0x00, 0x02, // dst ip - 0x00, 0x01, // src_port - 0x00, 0x02, // dst_port - 0x00, 0x03, // udp_length - }, - ) - dc := decoder.NewDecodeContext() - err := directive.Execute(octets, dc) - require.NoError(t, err) - - octets = bytes.NewBuffer( - []byte{ - 0x45, // version + IHL - 0x00, // ip_dscp + ip_ecn - 0x00, 0x00, // total length - 0x00, 0x00, // identification - 0x00, 0x00, // flags + frag offset - 0x00, // ttl - 0x06, // protocol; 0x06 = tcp - 0x00, 0x00, // header checksum - 0x7f, 0x00, 0x00, 0x01, // src ip - 0x7f, 0x00, 0x00, 0x02, // dst ip - 0x00, 0x01, // src_port - 0x00, 0x02, // dst_port - 0x00, 0x00, 0x00, 0x00, // sequence - 0x00, 0x00, 0x00, 0x00, // ack_number - 0x00, 0x00, // tcp_header_length - 0x00, 0x00, // tcp_window_size - 0x00, 0x00, // checksum - 0x00, 0x00, // tcp_urgent_pointer - }, - ) - dc = decoder.NewDecodeContext() - err = directive.Execute(octets, dc) - require.NoError(t, err) - - expected := []telegraf.Metric{ - testutil.MustMetric( - "sflow", - map[string]string{ - "src_ip": "127.0.0.1", - "dst_ip": "127.0.0.2", - "ip_dscp": "0", - "ip_ecn": "0", - "src_port": "1", - "dst_port": "2", - }, - map[string]interface{}{ - "ip_flags": uint64(0), - "ip_fragment_offset": uint64(0), - "ip_total_length": uint64(0), - "ip_ttl": uint64(0), - "tcp_header_length": uint64(0), - "tcp_window_size": uint64(0), - "tcp_urgent_pointer": uint64(0), - }, - time.Unix(0, 0), - ), - } - - // check that udp fields are not set on the tcp metric - testutil.RequireMetricsEqual(t, expected, dc.GetMetrics(), testutil.IgnoreTime()) -} - -func TestUnknownProtocol(t *testing.T) { - octets := bytes.NewBuffer( - []byte{ - 0x45, // version + IHL - 0x00, // ip_dscp + ip_ecn - 0x00, 0x00, // total length - 0x00, 0x00, // identification - 0x00, 0x00, // flags + frag offset - 0x00, // ttl - 0x99, // protocol - 0x00, 0x00, // header checksum - 0x7f, 0x00, 0x00, 0x01, // src ip - 0x7f, 0x00, 0x00, 0x02, // dst ip - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - }, - ) - dc := decoder.NewDecodeContext() - - options := NewDefaultV5FormatOptions() - directive := decoder.Seq( - decoder.OpenMetric("sflow"), - ipv4Header(options), - decoder.CloseMetric(), - ) - - err := directive.Execute(octets, dc) - require.NoError(t, err) - - expected := []telegraf.Metric{ - testutil.MustMetric( - "sflow", - map[string]string{ - "src_ip": "127.0.0.1", - "dst_ip": "127.0.0.2", - "ip_dscp": "0", - "ip_ecn": "0", - }, - map[string]interface{}{ - "ip_flags": uint64(0), - "ip_fragment_offset": uint64(0), - "ip_total_length": uint64(0), - "ip_ttl": uint64(0), - }, - time.Unix(0, 0), - ), - } - - testutil.RequireMetricsEqual(t, expected, dc.GetMetrics(), testutil.IgnoreTime()) -} diff --git a/plugins/inputs/sflow/metricencoder.go b/plugins/inputs/sflow/metricencoder.go new file mode 100644 index 000000000..ffc9d8e02 --- /dev/null +++ b/plugins/inputs/sflow/metricencoder.go @@ -0,0 +1,46 @@ +package sflow + +import ( + "strconv" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" +) + +func makeMetrics(p *V5Format) ([]telegraf.Metric, error) { + now := time.Now() + metrics := []telegraf.Metric{} + tags := map[string]string{ + "agent_address": p.AgentAddress.String(), + } + fields := map[string]interface{}{} + for _, sample := range p.Samples { + tags["input_ifindex"] = strconv.FormatUint(uint64(sample.SampleData.InputIfIndex), 10) + tags["output_ifindex"] = strconv.FormatUint(uint64(sample.SampleData.OutputIfIndex), 10) + tags["sample_direction"] = sample.SampleData.SampleDirection + tags["source_id_index"] = strconv.FormatUint(uint64(sample.SampleData.SourceIDIndex), 10) + tags["source_id_type"] = strconv.FormatUint(uint64(sample.SampleData.SourceIDType), 10) + fields["drops"] = sample.SampleData.Drops + fields["sampling_rate"] = sample.SampleData.SamplingRate + + for _, flowRecord := range sample.SampleData.FlowRecords { + if flowRecord.FlowData != nil { + tags2 := flowRecord.FlowData.GetTags() + fields2 := flowRecord.FlowData.GetFields() + for k, v := range tags { + tags2[k] = v + } + for k, v := range fields { + fields2[k] = v + } + m, err := metric.New("sflow", tags2, fields2, now) + if err != nil { + return nil, err + } + metrics = append(metrics, m) + } + } + } + return metrics, nil +} diff --git a/plugins/inputs/sflow/packetdecoder.go b/plugins/inputs/sflow/packetdecoder.go new file mode 100644 index 000000000..9e6b2a4fe --- /dev/null +++ b/plugins/inputs/sflow/packetdecoder.go @@ -0,0 +1,483 @@ +package sflow + +import ( + "encoding/binary" + "fmt" + "io" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs/sflow/binaryio" + "github.com/pkg/errors" +) + +type PacketDecoder struct { + onPacket func(p *V5Format) + Log telegraf.Logger +} + +func NewDecoder() *PacketDecoder { + return &PacketDecoder{} +} + +func (d *PacketDecoder) debug(args ...interface{}) { + if d.Log != nil { + d.Log.Debug(args...) + } +} + +func (d *PacketDecoder) OnPacket(f func(p *V5Format)) { + d.onPacket = f +} + +func (d *PacketDecoder) Decode(r io.Reader) error { + var err error + var packet *V5Format + for err == nil { + packet, err = d.DecodeOnePacket(r) + if err != nil { + break + } + d.onPacket(packet) + } + if err != nil && errors.Cause(err) == io.EOF { + return nil + } + return err +} + +type AddressType uint32 // must be uint32 + +const ( + AddressTypeUnknown AddressType = 0 + AddressTypeIPV4 AddressType = 1 + AddressTypeIPV6 AddressType = 2 +) + +func (d *PacketDecoder) DecodeOnePacket(r io.Reader) (*V5Format, error) { + p := &V5Format{} + err := read(r, &p.Version, "version") + if err != nil { + return nil, err + } + if p.Version != 5 { + return nil, fmt.Errorf("Version %d not supported, only version 5", p.Version) + } + var addressIPType AddressType + if err = read(r, &addressIPType, "address ip type"); err != nil { + return nil, err + } + switch addressIPType { + case AddressTypeUnknown: + p.AgentAddress.IP = make([]byte, 0) + case AddressTypeIPV4: + p.AgentAddress.IP = make([]byte, 4) + case AddressTypeIPV6: + p.AgentAddress.IP = make([]byte, 16) + default: + return nil, fmt.Errorf("Unknown address IP type %d", addressIPType) + } + if err = read(r, &p.AgentAddress.IP, "Agent Address IP"); err != nil { + return nil, err + } + if err = read(r, &p.SubAgentID, "SubAgentID"); err != nil { + return nil, err + } + if err = read(r, &p.SequenceNumber, "SequenceNumber"); err != nil { + return nil, err + } + if err = read(r, &p.Uptime, "Uptime"); err != nil { + return nil, err + } + + p.Samples, err = d.decodeSamples(r) + return p, err +} + +func (d *PacketDecoder) decodeSamples(r io.Reader) ([]Sample, error) { + result := []Sample{} + // # of samples + var numOfSamples uint32 + if err := read(r, &numOfSamples, "sample count"); err != nil { + return nil, err + } + + for i := 0; i < int(numOfSamples); i++ { + sam, err := d.decodeSample(r) + if err != nil { + return result, err + } + result = append(result, sam) + } + + return result, nil +} + +func (d *PacketDecoder) decodeSample(r io.Reader) (Sample, error) { + var err error + sam := Sample{} + if err := read(r, &sam.SampleType, "SampleType"); err != nil { + return sam, err + } + sampleDataLen := uint32(0) + if err := read(r, &sampleDataLen, "Sample data length"); err != nil { + return sam, err + } + mr := binaryio.MinReader(r, int64(sampleDataLen)) + defer mr.Close() + + switch sam.SampleType { + case SampleTypeFlowSample: + sam.SampleData, err = d.decodeFlowSample(mr) + case SampleTypeFlowSampleExpanded: + sam.SampleData, err = d.decodeFlowSampleExpanded(mr) + default: + d.debug("Unknown sample type: ", sam.SampleType) + } + return sam, err +} + +type InterfaceFormatType uint8 // sflow_version_5.txt line 1497 +const ( + InterfaceFormatTypeSingleInterface InterfaceFormatType = 0 + InterfaceFormatTypePacketDiscarded InterfaceFormatType = 1 +) + +func (d *PacketDecoder) decodeFlowSample(r io.Reader) (t SampleDataFlowSampleExpanded, err error) { + if err := read(r, &t.SequenceNumber, "SequenceNumber"); err != nil { + return t, err + } + var sourceID uint32 + if err := read(r, &sourceID, "SourceID"); err != nil { // source_id sflow_version_5.txt line: 1622 + return t, err + } + // split source id to source id type and source id index + t.SourceIDIndex = sourceID & 0x00ffffff // sflow_version_5.txt line: 1468 + t.SourceIDType = sourceID >> 24 // source_id_type sflow_version_5.txt Line 1465 + if err := read(r, &t.SamplingRate, "SamplingRate"); err != nil { + return t, err + } + if err := read(r, &t.SamplePool, "SamplePool"); err != nil { + return t, err + } + if err := read(r, &t.Drops, "Drops"); err != nil { // sflow_version_5.txt line 1636 + return t, err + } + if err := read(r, &t.InputIfIndex, "InputIfIndex"); err != nil { + return t, err + } + t.InputIfFormat = t.InputIfIndex >> 30 + t.InputIfIndex = t.InputIfIndex & 0x3FFFFFFF + + if err := read(r, &t.OutputIfIndex, "OutputIfIndex"); err != nil { + return t, err + } + t.OutputIfFormat = t.OutputIfIndex >> 30 + t.OutputIfIndex = t.OutputIfIndex & 0x3FFFFFFF + + switch t.SourceIDIndex { + case t.OutputIfIndex: + t.SampleDirection = "egress" + case t.InputIfIndex: + t.SampleDirection = "ingress" + } + + t.FlowRecords, err = d.decodeFlowRecords(r, t.SamplingRate) + return t, err +} + +func (d *PacketDecoder) decodeFlowSampleExpanded(r io.Reader) (t SampleDataFlowSampleExpanded, err error) { + if err := read(r, &t.SequenceNumber, "SequenceNumber"); err != nil { // sflow_version_5.txt line 1701 + return t, err + } + if err := read(r, &t.SourceIDType, "SourceIDType"); err != nil { // sflow_version_5.txt line: 1706 + 16878 + return t, err + } + if err := read(r, &t.SourceIDIndex, "SourceIDIndex"); err != nil { // sflow_version_5.txt line: 1689 + return t, err + } + if err := read(r, &t.SamplingRate, "SamplingRate"); err != nil { // sflow_version_5.txt line: 1707 + return t, err + } + if err := read(r, &t.SamplePool, "SamplePool"); err != nil { // sflow_version_5.txt line: 1708 + return t, err + } + if err := read(r, &t.Drops, "Drops"); err != nil { // sflow_version_5.txt line: 1712 + return t, err + } + if err := read(r, &t.InputIfFormat, "InputIfFormat"); err != nil { // sflow_version_5.txt line: 1727 + return t, err + } + if err := read(r, &t.InputIfIndex, "InputIfIndex"); err != nil { + return t, err + } + if err := read(r, &t.OutputIfFormat, "OutputIfFormat"); err != nil { // sflow_version_5.txt line: 1728 + return t, err + } + if err := read(r, &t.OutputIfIndex, "OutputIfIndex"); err != nil { + return t, err + } + + switch t.SourceIDIndex { + case t.OutputIfIndex: + t.SampleDirection = "egress" + case t.InputIfIndex: + t.SampleDirection = "ingress" + } + + t.FlowRecords, err = d.decodeFlowRecords(r, t.SamplingRate) + return t, err +} + +func (d *PacketDecoder) decodeFlowRecords(r io.Reader, samplingRate uint32) (recs []FlowRecord, err error) { + var flowDataLen uint32 + var count uint32 + if err := read(r, &count, "FlowRecord count"); err != nil { + return recs, err + } + for i := uint32(0); i < count; i++ { + fr := FlowRecord{} + if err := read(r, &fr.FlowFormat, "FlowFormat"); err != nil { // sflow_version_5.txt line 1597 + return recs, err + } + if err := read(r, &flowDataLen, "Flow data length"); err != nil { + return recs, err + } + + mr := binaryio.MinReader(r, int64(flowDataLen)) + + switch fr.FlowFormat { + case FlowFormatTypeRawPacketHeader: // sflow_version_5.txt line 1938 + fr.FlowData, err = d.decodeRawPacketHeaderFlowData(mr, samplingRate) + default: + d.debug("Unknown flow format: ", fr.FlowFormat) + } + if err != nil { + mr.Close() + return recs, err + } + + recs = append(recs, fr) + mr.Close() + } + + return recs, err +} + +func (d *PacketDecoder) decodeRawPacketHeaderFlowData(r io.Reader, samplingRate uint32) (h RawPacketHeaderFlowData, err error) { + if err := read(r, &h.HeaderProtocol, "HeaderProtocol"); err != nil { // sflow_version_5.txt line 1940 + return h, err + } + if err := read(r, &h.FrameLength, "FrameLength"); err != nil { // sflow_version_5.txt line 1942 + return h, err + } + h.Bytes = h.FrameLength * samplingRate + + if err := read(r, &h.StrippedOctets, "StrippedOctets"); err != nil { // sflow_version_5.txt line 1967 + return h, err + } + if err := read(r, &h.HeaderLength, "HeaderLength"); err != nil { + return h, err + } + + mr := binaryio.MinReader(r, int64(h.HeaderLength)) + defer mr.Close() + + switch h.HeaderProtocol { + case HeaderProtocolTypeEthernetISO88023: + h.Header, err = d.decodeEthHeader(mr) + default: + d.debug("Unknown header protocol type: ", h.HeaderProtocol) + } + + return h, err +} + +// ethHeader answers a decode Directive that will decode an ethernet frame header +// according to https://en.wikipedia.org/wiki/Ethernet_frame +func (d *PacketDecoder) decodeEthHeader(r io.Reader) (h EthHeader, err error) { + // we may have to read out StrippedOctets bytes and throw them away first? + if err := read(r, &h.DestinationMAC, "DestinationMAC"); err != nil { + return h, err + } + if err := read(r, &h.SourceMAC, "SourceMAC"); err != nil { + return h, err + } + var tagOrEType uint16 + if err := read(r, &tagOrEType, "tagOrEtype"); err != nil { + return h, err + } + switch tagOrEType { + case 0x8100: // could be? + var discard uint16 + if err := read(r, &discard, "unknown"); err != nil { + return h, err + } + if err := read(r, &h.EtherTypeCode, "EtherTypeCode"); err != nil { + return h, err + } + default: + h.EtherTypeCode = tagOrEType + } + h.EtherType = ETypeMap[h.EtherTypeCode] + switch h.EtherType { + case "IPv4": + h.IPHeader, err = d.decodeIPv4Header(r) + case "IPv6": + h.IPHeader, err = d.decodeIPv6Header(r) + default: + } + if err != nil { + return h, err + } + return h, err +} + +// https://en.wikipedia.org/wiki/IPv4#Header +func (d *PacketDecoder) decodeIPv4Header(r io.Reader) (h IPV4Header, err error) { + if err := read(r, &h.Version, "Version"); err != nil { + return h, err + } + h.InternetHeaderLength = h.Version & 0x0F + h.Version = h.Version & 0xF0 + if err := read(r, &h.DSCP, "DSCP"); err != nil { + return h, err + } + h.ECN = h.DSCP & 0x03 + h.DSCP = h.DSCP >> 2 + if err := read(r, &h.TotalLength, "TotalLength"); err != nil { + return h, err + } + if err := read(r, &h.Identification, "Identification"); err != nil { + return h, err + } + if err := read(r, &h.FragmentOffset, "FragmentOffset"); err != nil { + return h, err + } + h.Flags = uint8(h.FragmentOffset >> 13) + h.FragmentOffset = h.FragmentOffset & 0x1FFF + if err := read(r, &h.TTL, "TTL"); err != nil { + return h, err + } + if err := read(r, &h.Protocol, "Protocol"); err != nil { + return h, err + } + if err := read(r, &h.HeaderChecksum, "HeaderChecksum"); err != nil { + return h, err + } + if err := read(r, &h.SourceIP, "SourceIP"); err != nil { + return h, err + } + if err := read(r, &h.DestIP, "DestIP"); err != nil { + return h, err + } + switch h.Protocol { + case IPProtocolTCP: + h.ProtocolHeader, err = d.decodeTCPHeader(r) + case IPProtocolUDP: + h.ProtocolHeader, err = d.decodeUDPHeader(r) + default: + d.debug("Unknown IP protocol: ", h.Protocol) + } + return h, err +} + +// https://en.wikipedia.org/wiki/IPv6_packet +func (d *PacketDecoder) decodeIPv6Header(r io.Reader) (h IPV6Header, err error) { + var fourByteBlock uint32 + if err := read(r, &fourByteBlock, "IPv6 header octet 0"); err != nil { + return h, err + } + version := fourByteBlock >> 28 + if version != 0x6 { + return h, fmt.Errorf("Unexpected IPv6 header version 0x%x", version) + } + h.DSCP = uint8((fourByteBlock & 0xFC00000) >> 22) + h.ECN = uint8((fourByteBlock & 0x300000) >> 20) + + // flowLabel := fourByteBlock & 0xFFFFF // not currently being used. + if err := read(r, &h.PayloadLength, "PayloadLength"); err != nil { + return h, err + } + if err := read(r, &h.NextHeaderProto, "NextHeaderProto"); err != nil { + return h, err + } + if err := read(r, &h.HopLimit, "HopLimit"); err != nil { + return h, err + } + if err := read(r, &h.SourceIP, "SourceIP"); err != nil { + return h, err + } + if err := read(r, &h.DestIP, "DestIP"); err != nil { + return h, err + } + switch h.NextHeaderProto { + case IPProtocolTCP: + h.ProtocolHeader, err = d.decodeTCPHeader(r) + case IPProtocolUDP: + h.ProtocolHeader, err = d.decodeUDPHeader(r) + default: + // not handled + d.debug("Unknown IP protocol: ", h.NextHeaderProto) + } + return h, err +} + +// https://en.wikipedia.org/wiki/Transmission_Control_Protocol#TCP_segment_structure +func (d *PacketDecoder) decodeTCPHeader(r io.Reader) (h TCPHeader, err error) { + if err := read(r, &h.SourcePort, "SourcePort"); err != nil { + return h, err + } + if err := read(r, &h.DestinationPort, "DestinationPort"); err != nil { + return h, err + } + if err := read(r, &h.Sequence, "Sequence"); err != nil { + return h, err + } + if err := read(r, &h.AckNumber, "AckNumber"); err != nil { + return h, err + } + // Next up: bit reading! + // data offset 4 bits + // reserved 3 bits + // flags 9 bits + var dataOffsetAndReservedAndFlags uint16 + if err := read(r, &dataOffsetAndReservedAndFlags, "TCP Header Octet offset 12"); err != nil { + return h, err + } + h.TCPHeaderLength = uint8((dataOffsetAndReservedAndFlags >> 12) * 4) + h.Flags = dataOffsetAndReservedAndFlags & 0x1FF + // done bit reading + + if err := read(r, &h.TCPWindowSize, "TCPWindowSize"); err != nil { + return h, err + } + if err := read(r, &h.Checksum, "Checksum"); err != nil { + return h, err + } + if err := read(r, &h.TCPUrgentPointer, "TCPUrgentPointer"); err != nil { + return h, err + } + + return h, err +} + +func (d *PacketDecoder) decodeUDPHeader(r io.Reader) (h UDPHeader, err error) { + if err := read(r, &h.SourcePort, "SourcePort"); err != nil { + return h, err + } + if err := read(r, &h.DestinationPort, "DestinationPort"); err != nil { + return h, err + } + if err := read(r, &h.UDPLength, "UDPLength"); err != nil { + return h, err + } + if err := read(r, &h.Checksum, "Checksum"); err != nil { + return h, err + } + return h, err +} + +func read(r io.Reader, data interface{}, name string) error { + err := binary.Read(r, binary.BigEndian, data) + return errors.Wrapf(err, "failed to read %s", name) +} diff --git a/plugins/inputs/sflow/packetdecoder_test.go b/plugins/inputs/sflow/packetdecoder_test.go new file mode 100644 index 000000000..f078eaf31 --- /dev/null +++ b/plugins/inputs/sflow/packetdecoder_test.go @@ -0,0 +1,207 @@ +package sflow + +import ( + "bytes" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestUDPHeader(t *testing.T) { + octets := bytes.NewBuffer([]byte{ + 0x00, 0x01, // src_port + 0x00, 0x02, // dst_port + 0x00, 0x03, // udp_length + 0x00, 0x00, // checksum + }) + + dc := NewDecoder() + actual, err := dc.decodeUDPHeader(octets) + require.NoError(t, err) + + expected := UDPHeader{ + SourcePort: 1, + DestinationPort: 2, + UDPLength: 3, + } + + require.Equal(t, expected, actual) +} + +func BenchmarkUDPHeader(b *testing.B) { + octets := bytes.NewBuffer([]byte{ + 0x00, 0x01, // src_port + 0x00, 0x02, // dst_port + 0x00, 0x03, // udp_length + 0x00, 0x00, // checksum + }) + + dc := NewDecoder() + + b.ResetTimer() + for n := 0; n < b.N; n++ { + dc.decodeUDPHeader(octets) + } +} + +func TestIPv4Header(t *testing.T) { + octets := bytes.NewBuffer( + []byte{ + 0x45, // version + IHL + 0x00, // ip_dscp + ip_ecn + 0x00, 0x00, // total length + 0x00, 0x00, // identification + 0x00, 0x00, // flags + frag offset + 0x00, // ttl + 0x11, // protocol; 0x11 = udp + 0x00, 0x00, // header checksum + 0x7f, 0x00, 0x00, 0x01, // src ip + 0x7f, 0x00, 0x00, 0x02, // dst ip + 0x00, 0x01, // src_port + 0x00, 0x02, // dst_port + 0x00, 0x03, // udp_length + 0x00, 0x00, // checksum + }, + ) + dc := NewDecoder() + actual, err := dc.decodeIPv4Header(octets) + require.NoError(t, err) + + expected := IPV4Header{ + Version: 0x40, + InternetHeaderLength: 0x05, + DSCP: 0, + ECN: 0, + TotalLength: 0, + Identification: 0, + Flags: 0, + FragmentOffset: 0, + TTL: 0, + Protocol: 0x11, + HeaderChecksum: 0, + SourceIP: [4]byte{127, 0, 0, 1}, + DestIP: [4]byte{127, 0, 0, 2}, + ProtocolHeader: UDPHeader{ + SourcePort: 1, + DestinationPort: 2, + UDPLength: 3, + Checksum: 0, + }, + } + + require.Equal(t, expected, actual) +} + +// Using the same Directive instance, prior paths through the parse tree should +// not affect the latest parse. +func TestIPv4HeaderSwitch(t *testing.T) { + octets := bytes.NewBuffer( + []byte{ + 0x45, // version + IHL + 0x00, // ip_dscp + ip_ecn + 0x00, 0x00, // total length + 0x00, 0x00, // identification + 0x00, 0x00, // flags + frag offset + 0x00, // ttl + 0x11, // protocol; 0x11 = udp + 0x00, 0x00, // header checksum + 0x7f, 0x00, 0x00, 0x01, // src ip + 0x7f, 0x00, 0x00, 0x02, // dst ip + 0x00, 0x01, // src_port + 0x00, 0x02, // dst_port + 0x00, 0x03, // udp_length + 0x00, 0x00, // checksum + }, + ) + dc := NewDecoder() + _, err := dc.decodeIPv4Header(octets) + require.NoError(t, err) + + octets = bytes.NewBuffer( + []byte{ + 0x45, // version + IHL + 0x00, // ip_dscp + ip_ecn + 0x00, 0x00, // total length + 0x00, 0x00, // identification + 0x00, 0x00, // flags + frag offset + 0x00, // ttl + 0x06, // protocol; 0x06 = tcp + 0x00, 0x00, // header checksum + 0x7f, 0x00, 0x00, 0x01, // src ip + 0x7f, 0x00, 0x00, 0x02, // dst ip + 0x00, 0x01, // src_port + 0x00, 0x02, // dst_port + 0x00, 0x00, 0x00, 0x00, // sequence + 0x00, 0x00, 0x00, 0x00, // ack_number + 0x00, 0x00, // tcp_header_length + 0x00, 0x00, // tcp_window_size + 0x00, 0x00, // checksum + 0x00, 0x00, // tcp_urgent_pointer + }, + ) + dc = NewDecoder() + actual, err := dc.decodeIPv4Header(octets) + require.NoError(t, err) + + expected := IPV4Header{ + Version: 64, + InternetHeaderLength: 5, + Protocol: 6, + SourceIP: [4]byte{127, 0, 0, 1}, + DestIP: [4]byte{127, 0, 0, 2}, + ProtocolHeader: TCPHeader{ + SourcePort: 1, + DestinationPort: 2, + }, + } + + require.Equal(t, expected, actual) +} + +func TestUnknownProtocol(t *testing.T) { + octets := bytes.NewBuffer( + []byte{ + 0x45, // version + IHL + 0x00, // ip_dscp + ip_ecn + 0x00, 0x00, // total length + 0x00, 0x00, // identification + 0x00, 0x00, // flags + frag offset + 0x00, // ttl + 0x99, // protocol + 0x00, 0x00, // header checksum + 0x7f, 0x00, 0x00, 0x01, // src ip + 0x7f, 0x00, 0x00, 0x02, // dst ip + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + }, + ) + dc := NewDecoder() + actual, err := dc.decodeIPv4Header(octets) + require.NoError(t, err) + + expected := IPV4Header{ + Version: 64, + InternetHeaderLength: 5, + Protocol: 153, + SourceIP: [4]byte{127, 0, 0, 1}, + DestIP: [4]byte{127, 0, 0, 2}, + } + + require.Equal(t, expected, actual) +} diff --git a/plugins/inputs/sflow/sflow.go b/plugins/inputs/sflow/sflow.go index 7d113dd1e..2e3fbc0cf 100644 --- a/plugins/inputs/sflow/sflow.go +++ b/plugins/inputs/sflow/sflow.go @@ -13,7 +13,6 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" - "github.com/influxdata/telegraf/plugins/inputs/sflow/decoder" ) const sampleConfig = ` @@ -38,11 +37,11 @@ type SFlow struct { Log telegraf.Logger `toml:"-"` - addr net.Addr - decoderOpts decoder.Directive - closer io.Closer - cancel context.CancelFunc - wg sync.WaitGroup + addr net.Addr + decoder *PacketDecoder + closer io.Closer + cancel context.CancelFunc + wg sync.WaitGroup } // Description answers a description of this input plugin @@ -56,14 +55,24 @@ func (s *SFlow) SampleConfig() string { } func (s *SFlow) Init() error { - - config := NewDefaultV5FormatOptions() - s.decoderOpts = V5Format(config) + s.decoder = NewDecoder() + s.decoder.Log = s.Log return nil } // Start starts this sFlow listener listening on the configured network for sFlow packets func (s *SFlow) Start(acc telegraf.Accumulator) error { + s.decoder.OnPacket(func(p *V5Format) { + metrics, err := makeMetrics(p) + if err != nil { + s.Log.Errorf("Failed to make metric from packet: %s", err) + return + } + for _, m := range metrics { + acc.AddMetric(m) + } + }) + u, err := url.Parse(s.ServiceAddress) if err != nil { return err @@ -122,14 +131,9 @@ func (s *SFlow) read(acc telegraf.Accumulator, conn net.PacketConn) { } func (s *SFlow) process(acc telegraf.Accumulator, buf []byte) { - decoder := decoder.NewDecodeContext() - if err := decoder.Decode(s.decoderOpts, bytes.NewBuffer(buf)); err != nil { - acc.AddError(fmt.Errorf("unable to parse incoming packet: %s", err)) - } - metrics := decoder.GetMetrics() - for _, m := range metrics { - acc.AddMetric(m) + if err := s.decoder.Decode(bytes.NewBuffer(buf)); err != nil { + acc.AddError(fmt.Errorf("unable to parse incoming packet: %s", err)) } } diff --git a/plugins/inputs/sflow/sflow_test.go b/plugins/inputs/sflow/sflow_test.go index 90f3a7c6d..2df56c2ae 100644 --- a/plugins/inputs/sflow/sflow_test.go +++ b/plugins/inputs/sflow/sflow_test.go @@ -44,8 +44,6 @@ func TestSFlow(t *testing.T) { "ether_type": "IPv4", "header_protocol": "ETHERNET-ISO88023", "input_ifindex": "510", - "ip_dscp": "0", - "ip_ecn": "0", "output_ifindex": "512", "sample_direction": "ingress", "source_id_index": "510", @@ -65,6 +63,8 @@ func TestSFlow(t *testing.T) { "ip_ttl": uint64(64), "sampling_rate": uint64(1024), "udp_length": uint64(229), + "ip_dscp": "0", + "ip_ecn": "0", }, time.Unix(0, 0), ), @@ -78,8 +78,6 @@ func TestSFlow(t *testing.T) { "ether_type": "IPv4", "header_protocol": "ETHERNET-ISO88023", "input_ifindex": "528", - "ip_dscp": "0", - "ip_ecn": "0", "output_ifindex": "512", "sample_direction": "ingress", "source_id_index": "528", @@ -99,6 +97,8 @@ func TestSFlow(t *testing.T) { "ip_ttl": uint64(63), "sampling_rate": uint64(16384), "udp_length": uint64(109), + "ip_dscp": "0", + "ip_ecn": "0", }, time.Unix(0, 0), ), diff --git a/plugins/inputs/sflow/types.go b/plugins/inputs/sflow/types.go new file mode 100644 index 000000000..a48857803 --- /dev/null +++ b/plugins/inputs/sflow/types.go @@ -0,0 +1,285 @@ +package sflow + +import ( + "net" + "strconv" +) + +const ( + AddressTypeIPv6 uint32 = 2 // sflow_version_5.txt line: 1384 + AddressTypeIPv4 uint32 = 1 // sflow_version_5.txt line: 1383 + + IPProtocolTCP uint8 = 6 + IPProtocolUDP uint8 = 17 + + metricName = "sflow" +) + +var ETypeMap = map[uint16]string{ + 0x0800: "IPv4", + 0x86DD: "IPv6", +} + +var IPvMap = map[uint32]string{ + 1: "IPV4", // sflow_version_5.txt line: 1383 + 2: "IPV6", // sflow_version_5.txt line: 1384 +} + +type ContainsMetricData interface { + GetTags() map[string]string + GetFields() map[string]interface{} +} + +// V5Format answers and decoder.Directive capable of decoding sFlow v5 packets in accordance +// with SFlow v5 specification at https://sflow.org/sflow_version_5.txt +type V5Format struct { + Version uint32 + AgentAddress net.IPAddr + SubAgentID uint32 + SequenceNumber uint32 + Uptime uint32 + Samples []Sample +} + +type SampleType uint32 + +const ( + SampleTypeFlowSample SampleType = 1 // sflow_version_5.txt line: 1614 + SampleTypeFlowSampleExpanded SampleType = 3 // sflow_version_5.txt line: 1698 +) + +type SampleData interface{} + +type Sample struct { + SampleType SampleType + SampleData SampleDataFlowSampleExpanded +} + +type SampleDataFlowSampleExpanded struct { + SequenceNumber uint32 + SourceIDType uint32 + SourceIDIndex uint32 + SamplingRate uint32 + SamplePool uint32 + Drops uint32 + SampleDirection string // ingress/egress + InputIfFormat uint32 + InputIfIndex uint32 + OutputIfFormat uint32 + OutputIfIndex uint32 + FlowRecords []FlowRecord +} + +type FlowFormatType uint32 + +const ( + FlowFormatTypeRawPacketHeader FlowFormatType = 1 // sflow_version_5.txt line: 1938 +) + +type FlowData ContainsMetricData + +type FlowRecord struct { + FlowFormat FlowFormatType + FlowData FlowData +} + +type HeaderProtocolType uint32 + +const ( + HeaderProtocolTypeEthernetISO88023 HeaderProtocolType = 1 + HeaderProtocolTypeISO88024TokenBus HeaderProtocolType = 2 + HeaderProtocolTypeISO88025TokenRing HeaderProtocolType = 3 + HeaderProtocolTypeFDDI HeaderProtocolType = 4 + HeaderProtocolTypeFrameRelay HeaderProtocolType = 5 + HeaderProtocolTypeX25 HeaderProtocolType = 6 + HeaderProtocolTypePPP HeaderProtocolType = 7 + HeaderProtocolTypeSMDS HeaderProtocolType = 8 + HeaderProtocolTypeAAL5 HeaderProtocolType = 9 + HeaderProtocolTypeAAL5IP HeaderProtocolType = 10 /* e.g. Cisco AAL5 mux */ + HeaderProtocolTypeIPv4 HeaderProtocolType = 11 + HeaderProtocolTypeIPv6 HeaderProtocolType = 12 + HeaderProtocolTypeMPLS HeaderProtocolType = 13 + HeaderProtocolTypePOS HeaderProtocolType = 14 /* RFC 1662, 2615 */ +) + +var HeaderProtocolMap = map[HeaderProtocolType]string{ + HeaderProtocolTypeEthernetISO88023: "ETHERNET-ISO88023", // sflow_version_5.txt line: 1920 +} + +type Header ContainsMetricData + +type RawPacketHeaderFlowData struct { + HeaderProtocol HeaderProtocolType + FrameLength uint32 + Bytes uint32 + StrippedOctets uint32 + HeaderLength uint32 + Header Header +} + +func (h RawPacketHeaderFlowData) GetTags() map[string]string { + t := h.Header.GetTags() + t["header_protocol"] = HeaderProtocolMap[h.HeaderProtocol] + return t +} +func (h RawPacketHeaderFlowData) GetFields() map[string]interface{} { + f := h.Header.GetFields() + f["bytes"] = h.Bytes + f["frame_length"] = h.FrameLength + f["header_length"] = h.HeaderLength + return f +} + +type IPHeader ContainsMetricData + +type EthHeader struct { + DestinationMAC [6]byte + SourceMAC [6]byte + TagProtocolIdentifier uint16 + TagControlInformation uint16 + EtherTypeCode uint16 + EtherType string + IPHeader IPHeader +} + +func (h EthHeader) GetTags() map[string]string { + t := h.IPHeader.GetTags() + t["src_mac"] = net.HardwareAddr(h.SourceMAC[:]).String() + t["dst_mac"] = net.HardwareAddr(h.DestinationMAC[:]).String() + t["ether_type"] = h.EtherType + return t +} +func (h EthHeader) GetFields() map[string]interface{} { + return h.IPHeader.GetFields() +} + +type ProtocolHeader ContainsMetricData + +// https://en.wikipedia.org/wiki/IPv4#Header +type IPV4Header struct { + Version uint8 // 4 bit + InternetHeaderLength uint8 // 4 bit + DSCP uint8 + ECN uint8 + TotalLength uint16 + Identification uint16 + Flags uint8 + FragmentOffset uint16 + TTL uint8 + Protocol uint8 // https://en.wikipedia.org/wiki/List_of_IP_protocol_numbers + HeaderChecksum uint16 + SourceIP [4]byte + DestIP [4]byte + ProtocolHeader ProtocolHeader +} + +func (h IPV4Header) GetTags() map[string]string { + var t map[string]string + if h.ProtocolHeader != nil { + t = h.ProtocolHeader.GetTags() + } else { + t = map[string]string{} + } + t["src_ip"] = net.IP(h.SourceIP[:]).String() + t["dst_ip"] = net.IP(h.DestIP[:]).String() + return t +} +func (h IPV4Header) GetFields() map[string]interface{} { + var f map[string]interface{} + if h.ProtocolHeader != nil { + f = h.ProtocolHeader.GetFields() + } else { + f = map[string]interface{}{} + } + f["ip_dscp"] = strconv.FormatUint(uint64(h.DSCP), 10) + f["ip_ecn"] = strconv.FormatUint(uint64(h.ECN), 10) + f["ip_flags"] = h.Flags + f["ip_fragment_offset"] = h.FragmentOffset + f["ip_total_length"] = h.TotalLength + f["ip_ttl"] = h.TTL + return f +} + +// https://en.wikipedia.org/wiki/IPv6_packet +type IPV6Header struct { + DSCP uint8 + ECN uint8 + PayloadLength uint16 + NextHeaderProto uint8 // tcp/udp? + HopLimit uint8 + SourceIP [16]byte + DestIP [16]byte + ProtocolHeader ProtocolHeader +} + +func (h IPV6Header) GetTags() map[string]string { + var t map[string]string + if h.ProtocolHeader != nil { + t = h.ProtocolHeader.GetTags() + } else { + t = map[string]string{} + } + t["src_ip"] = net.IP(h.SourceIP[:]).String() + t["dst_ip"] = net.IP(h.DestIP[:]).String() + return t +} +func (h IPV6Header) GetFields() map[string]interface{} { + var f map[string]interface{} + if h.ProtocolHeader != nil { + f = h.ProtocolHeader.GetFields() + } else { + f = map[string]interface{}{} + } + f["ip_dscp"] = strconv.FormatUint(uint64(h.DSCP), 10) + f["ip_ecn"] = strconv.FormatUint(uint64(h.ECN), 10) + f["payload_length"] = h.PayloadLength + return f +} + +// https://en.wikipedia.org/wiki/Transmission_Control_Protocol +type TCPHeader struct { + SourcePort uint16 + DestinationPort uint16 + Sequence uint32 + AckNumber uint32 + TCPHeaderLength uint8 + Flags uint16 + TCPWindowSize uint16 + Checksum uint16 + TCPUrgentPointer uint16 +} + +func (h TCPHeader) GetTags() map[string]string { + t := map[string]string{ + "dst_port": strconv.FormatUint(uint64(h.DestinationPort), 10), + "src_port": strconv.FormatUint(uint64(h.SourcePort), 10), + } + return t +} +func (h TCPHeader) GetFields() map[string]interface{} { + return map[string]interface{}{ + "tcp_header_length": h.TCPHeaderLength, + "tcp_urgent_pointer": h.TCPUrgentPointer, + "tcp_window_size": h.TCPWindowSize, + } +} + +type UDPHeader struct { + SourcePort uint16 + DestinationPort uint16 + UDPLength uint16 + Checksum uint16 +} + +func (h UDPHeader) GetTags() map[string]string { + t := map[string]string{ + "dst_port": strconv.FormatUint(uint64(h.DestinationPort), 10), + "src_port": strconv.FormatUint(uint64(h.SourcePort), 10), + } + return t +} +func (h UDPHeader) GetFields() map[string]interface{} { + return map[string]interface{}{ + "udp_length": h.UDPLength, + } +} From 59acbd4f1305909ac773ab8b4b621413907cb8bc Mon Sep 17 00:00:00 2001 From: William Austin Date: Thu, 30 Apr 2020 13:21:34 -0700 Subject: [PATCH 1728/1815] Add ContentEncoder to socket_writer for datagram sockets (#7417) --- plugins/outputs/socket_writer/README.md | 5 +++ .../outputs/socket_writer/socket_writer.go | 20 ++++++++++++ .../socket_writer/socket_writer_test.go | 32 +++++++++++++------ 3 files changed, 48 insertions(+), 9 deletions(-) diff --git a/plugins/outputs/socket_writer/README.md b/plugins/outputs/socket_writer/README.md index 149cda2a6..5dc9d0246 100644 --- a/plugins/outputs/socket_writer/README.md +++ b/plugins/outputs/socket_writer/README.md @@ -32,6 +32,11 @@ It can output data in any of the [supported output formats](https://github.com/i ## Defaults to the OS configuration. # keep_alive_period = "5m" + ## Content encoding for message payloads, can be set to "gzip" or to + ## "identity" to apply no encoding. + ## + # content_encoding = "identity" + ## Data format to generate. ## Each data format has its own unique set of configuration options, read ## more about them here: diff --git a/plugins/outputs/socket_writer/socket_writer.go b/plugins/outputs/socket_writer/socket_writer.go index 833122dfc..eb286d919 100644 --- a/plugins/outputs/socket_writer/socket_writer.go +++ b/plugins/outputs/socket_writer/socket_writer.go @@ -15,12 +15,15 @@ import ( ) type SocketWriter struct { + ContentEncoding string `toml:"content_encoding"` Address string KeepAlivePeriod *internal.Duration tlsint.ClientConfig serializers.Serializer + encoder internal.ContentEncoder + net.Conn } @@ -55,6 +58,11 @@ func (sw *SocketWriter) SampleConfig() string { ## Defaults to the OS configuration. # keep_alive_period = "5m" + ## Content encoding for packet-based connections (i.e. UDP, unixgram). + ## Can be set to "gzip" or to "identity" to apply no encoding. + ## + # content_encoding = "identity" + ## Data format to generate. ## Each data format has its own unique set of configuration options, read ## more about them here: @@ -91,6 +99,11 @@ func (sw *SocketWriter) Connect() error { if err := sw.setKeepAlive(c); err != nil { log.Printf("unable to configure keep alive (%s): %s", sw.Address, err) } + //set encoder + sw.encoder, err = internal.NewContentEncoder(sw.ContentEncoding) + if err != nil { + return err + } sw.Conn = c return nil @@ -130,6 +143,13 @@ func (sw *SocketWriter) Write(metrics []telegraf.Metric) error { log.Printf("D! [outputs.socket_writer] Could not serialize metric: %v", err) continue } + + bs, err = sw.encoder.Encode(bs) + if err != nil { + log.Printf("D! [outputs.socket_writer] Could not encode metric: %v", err) + continue + } + if _, err := sw.Conn.Write(bs); err != nil { //TODO log & keep going with remaining strings if err, ok := err.(net.Error); !ok || !err.Temporary() { diff --git a/plugins/outputs/socket_writer/socket_writer_test.go b/plugins/outputs/socket_writer/socket_writer_test.go index f7eb159ea..14b25e6c5 100644 --- a/plugins/outputs/socket_writer/socket_writer_test.go +++ b/plugins/outputs/socket_writer/socket_writer_test.go @@ -2,7 +2,6 @@ package socket_writer import ( "bufio" - "bytes" "io/ioutil" "net" "os" @@ -88,8 +87,10 @@ func testSocketWriter_stream(t *testing.T, sw *SocketWriter, lconn net.Conn) { metrics := []telegraf.Metric{} metrics = append(metrics, testutil.TestMetric(1, "test")) mbs1out, _ := sw.Serialize(metrics[0]) + mbs1out, _ = sw.encoder.Encode(mbs1out) metrics = append(metrics, testutil.TestMetric(2, "test")) mbs2out, _ := sw.Serialize(metrics[1]) + mbs2out, _ = sw.encoder.Encode(mbs2out) err := sw.Write(metrics) require.NoError(t, err) @@ -108,8 +109,12 @@ func testSocketWriter_packet(t *testing.T, sw *SocketWriter, lconn net.PacketCon metrics := []telegraf.Metric{} metrics = append(metrics, testutil.TestMetric(1, "test")) mbs1out, _ := sw.Serialize(metrics[0]) + mbs1out, _ = sw.encoder.Encode(mbs1out) + mbs1str := string(mbs1out) metrics = append(metrics, testutil.TestMetric(2, "test")) mbs2out, _ := sw.Serialize(metrics[1]) + mbs2out, _ = sw.encoder.Encode(mbs2out) + mbs2str := string(mbs2out) err := sw.Write(metrics) require.NoError(t, err) @@ -119,17 +124,12 @@ func testSocketWriter_packet(t *testing.T, sw *SocketWriter, lconn net.PacketCon for len(mstrins) < 2 { n, _, err := lconn.ReadFrom(buf) require.NoError(t, err) - for _, bs := range bytes.Split(buf[:n], []byte{'\n'}) { - if len(bs) == 0 { - continue - } - mstrins = append(mstrins, string(bs)+"\n") - } + mstrins = append(mstrins, string(buf[:n])) } require.Len(t, mstrins, 2) - assert.Equal(t, string(mbs1out), mstrins[0]) - assert.Equal(t, string(mbs2out), mstrins[1]) + assert.Equal(t, mbs1str, mstrins[0]) + assert.Equal(t, mbs2str, mstrins[1]) } func TestSocketWriter_Write_err(t *testing.T) { @@ -195,3 +195,17 @@ func TestSocketWriter_Write_reconnect(t *testing.T) { require.NoError(t, err) assert.Equal(t, string(mbsout), string(buf[:n])) } + +func TestSocketWriter_udp_gzip(t *testing.T) { + listener, err := net.ListenPacket("udp", "127.0.0.1:0") + require.NoError(t, err) + + sw := newSocketWriter() + sw.Address = "udp://" + listener.LocalAddr().String() + sw.ContentEncoding = "gzip" + + err = sw.Connect() + require.NoError(t, err) + + testSocketWriter_packet(t, sw, listener) +} From b1ae81bb75464302aa4586ceaded8921ffaa42f1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9s=20=C3=81lvarez?= <1671935+kir4h@users.noreply.github.com> Date: Fri, 1 May 2020 20:21:41 +0200 Subject: [PATCH 1729/1815] Add filepath processor plugin (#7418) --- README.md | 1 + plugins/processors/all/all.go | 1 + plugins/processors/filepath/README.md | 207 ++++++++++++++++++ plugins/processors/filepath/filepath.go | 150 +++++++++++++ plugins/processors/filepath/filepath_test.go | 70 ++++++ .../filepath/filepath_test_helpers.go | 100 +++++++++ .../filepath/filepath_windows_test.go | 43 ++++ 7 files changed, 572 insertions(+) create mode 100644 plugins/processors/filepath/README.md create mode 100644 plugins/processors/filepath/filepath.go create mode 100644 plugins/processors/filepath/filepath_test.go create mode 100644 plugins/processors/filepath/filepath_test_helpers.go create mode 100644 plugins/processors/filepath/filepath_windows_test.go diff --git a/README.md b/README.md index 571272b32..ec203c1f2 100644 --- a/README.md +++ b/README.md @@ -363,6 +363,7 @@ For documentation on the latest development code see the [documentation index][d * [date](/plugins/processors/date) * [dedup](/plugins/processors/dedup) * [enum](/plugins/processors/enum) +* [filepath](/plugins/processors/filepath) * [override](/plugins/processors/override) * [parser](/plugins/processors/parser) * [pivot](/plugins/processors/pivot) diff --git a/plugins/processors/all/all.go b/plugins/processors/all/all.go index ab6746c62..5ff977324 100644 --- a/plugins/processors/all/all.go +++ b/plugins/processors/all/all.go @@ -6,6 +6,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/processors/date" _ "github.com/influxdata/telegraf/plugins/processors/dedup" _ "github.com/influxdata/telegraf/plugins/processors/enum" + _ "github.com/influxdata/telegraf/plugins/processors/filepath" _ "github.com/influxdata/telegraf/plugins/processors/override" _ "github.com/influxdata/telegraf/plugins/processors/parser" _ "github.com/influxdata/telegraf/plugins/processors/pivot" diff --git a/plugins/processors/filepath/README.md b/plugins/processors/filepath/README.md new file mode 100644 index 000000000..f4473ff62 --- /dev/null +++ b/plugins/processors/filepath/README.md @@ -0,0 +1,207 @@ +# Filepath Processor Plugin + +The `filepath` processor plugin maps certain go functions from [path/filepath](https://golang.org/pkg/path/filepath/) +onto tag and field values. Values can be modified in place or stored in another key. + +Implemented functions are: + +* [Base](https://golang.org/pkg/path/filepath/#Base) (accessible through `[[processors.filepath.basename]]`) +* [Rel](https://golang.org/pkg/path/filepath/#Rel) (accessible through `[[processors.filepath.rel]]`) +* [Dir](https://golang.org/pkg/path/filepath/#Dir) (accessible through `[[processors.filepath.dir]]`) +* [Clean](https://golang.org/pkg/path/filepath/#Clean) (accessible through `[[processors.filepath.clean]]`) +* [ToSlash](https://golang.org/pkg/path/filepath/#ToSlash) (accessible through `[[processors.filepath.toslash]]`) + + On top of that, the plugin provides an extra function to retrieve the final path component without its extension. This + function is accessible through the `[[processors.filepath.stem]]` configuration item. + +Please note that, in this implementation, these functions are processed in the order that they appear above( except for +`stem` that is applied in the first place). + +Specify the `tag` and/or `field` that you want processed in each section and optionally a `dest` if you want the result +stored in a new tag or field. + +If you plan to apply multiple transformations to the same `tag`/`field`, bear in mind the processing order stated above. + +## Configuration + +```toml +[[processors.filepath]] + ## Treat the tag value as a path and convert it to its last element, storing the result in a new tag + # [[processors.filepath.basename]] + # tag = "path" + # dest = "basepath" + + ## Treat the field value as a path and keep all but the last element of path, typically the path's directory + # [[processors.filepath.dirname]] + # field = "path" + + ## Treat the tag value as a path, converting it to its the last element without its suffix + # [[processors.filepath.stem]] + # tag = "path" + + ## Treat the tag value as a path, converting it to the shortest path name equivalent + ## to path by purely lexical processing + # [[processors.filepath.clean]] + # tag = "path" + + ## Treat the tag value as a path, converting it to a relative path that is lexically + ## equivalent to the source path when joined to 'base_path' + # [[processors.filepath.rel]] + # tag = "path" + # base_path = "/var/log" + + ## Treat the tag value as a path, replacing each separator character in path with a '/' character. Has only + ## effect on Windows + # [[processors.filepath.toslash]] + # tag = "path" +``` + +## Considerations + +### Clean + +Even though `clean` is provided a standalone function, it is also invoked when using the `rel` and `dirname` functions, +so there is no need to use it along with them. + +That is: + + ```toml +[[processors.filepath]] + [[processors.filepath.dir]] + tag = "path" + [[processors.filepath.clean]] + tag = "path" + ``` + +Is equivalent to: + + ```toml +[[processors.filepath]] + [[processors.filepath.dir]] + tag = "path" + ``` + +### ToSlash + +The effects of this function are only noticeable on Windows platforms, because of the underlying golang implementation. + +## Examples + +### Basename + +```toml +[[processors.filepath]] + [[processors.filepath.basename]] + tag = "path" +``` + +```diff +- my_metric,path="/var/log/batch/ajob.log" duration_seconds=134 1587920425000000000 ++ my_metric,path="ajob.log" duration_seconds=134 1587920425000000000 +``` + +### Dirname + +```toml +[[processors.filepath]] + [[processors.filepath.dirname]] + field = "path" + dest = "folder" +``` + +```diff +- my_metric path="/var/log/batch/ajob.log",duration_seconds=134 1587920425000000000 ++ my_metric path="/var/log/batch/ajob.log",folder="/var/log/batch",duration_seconds=134 1587920425000000000 +``` + +### Stem + +```toml +[[processors.filepath]] + [[processors.filepath.stem]] + tag = "path" +``` + +```diff +- my_metric,path="/var/log/batch/ajob.log" duration_seconds=134 1587920425000000000 ++ my_metric,path="ajob" duration_seconds=134 1587920425000000000 +``` + +### Clean + +```toml +[[processors.filepath]] + [[processors.filepath.clean]] + tag = "path" +``` + +```diff +- my_metric,path="/var/log/dummy/../batch//ajob.log" duration_seconds=134 1587920425000000000 ++ my_metric,path="/var/log/batch/ajob.log" duration_seconds=134 1587920425000000000 +``` + +### Rel + +```toml +[[processors.filepath]] + [[processors.filepath.rel]] + tag = "path" + base_path = "/var/log" +``` + +```diff +- my_metric,path="/var/log/batch/ajob.log" duration_seconds=134 1587920425000000000 ++ my_metric,path="batch/ajob.log" duration_seconds=134 1587920425000000000 +``` + +### ToSlash + +```toml +[[processors.filepath]] + [[processors.filepath.rel]] + tag = "path" +``` + +```diff +- my_metric,path="\var\log\batch\ajob.log" duration_seconds=134 1587920425000000000 ++ my_metric,path="/var/log/batch/ajob.log" duration_seconds=134 1587920425000000000 +``` + +### Processing paths from tail plugin + +This plugin can be used together with the +[tail input plugn](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/tail) to make modifications +to the `path` tag injected for every file. + +Scenario: + +* A log file `/var/log/myjobs/mysql_backup.log`, containing logs for a job execution. Whenever the job ends, a line is +written to the log file following this format: `2020-04-05 11:45:21 total time execution: 70 seconds` +* We want to generate a measurement that captures the duration of the script as a field and includes the `path` as a +tag + * We are interested in the filename without its extensions, since it might be enough information for plotting our + execution times in a dashboard + * Just in case, we don't want to override the original path (if for some reason we end up having duplicates we might + want this information) + +For this purpose, we will use the `tail` input plugin, the `grok` parser plugin and the `filepath` processor. + +```toml +[[inputs.tail]] + files = ["/var/log/myjobs/**.log"] + data_format = "grok" + grok_patterns = ['%{TIMESTAMP_ISO8601:timestamp:ts-"2006-01-02 15:04:05"} total time execution: %{NUMBER:duration_seconds:int}'] + name_override = "myjobs" + +[[processors.filepath]] + [[processors.filepath.stem]] + tag = "path" + dest = "stempath" + +``` + +The resulting output for a job taking 70 seconds for the mentioned log file would look like: + +```text +myjobs_duration_seconds,host="my-host",path="/var/log/myjobs/mysql_backup.log",stempath="mysql_backup" 70 1587920425000000000 +``` diff --git a/plugins/processors/filepath/filepath.go b/plugins/processors/filepath/filepath.go new file mode 100644 index 000000000..70013de17 --- /dev/null +++ b/plugins/processors/filepath/filepath.go @@ -0,0 +1,150 @@ +package filepath + +import ( + "path/filepath" + "strings" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/processors" +) + +type Options struct { + BaseName []BaseOpts `toml:"basename"` + DirName []BaseOpts `toml:"dirname"` + Stem []BaseOpts + Clean []BaseOpts + Rel []RelOpts + ToSlash []BaseOpts `toml:"toslash"` +} + +type ProcessorFunc func(s string) string + +// BaseOpts contains options applicable to every function +type BaseOpts struct { + Field string + Tag string + Dest string +} + +type RelOpts struct { + BaseOpts + BasePath string +} + +const sampleConfig = ` + ## Treat the tag value as a path and convert it to its last element, storing the result in a new tag + # [[processors.filepath.basename]] + # tag = "path" + # dest = "basepath" + + ## Treat the field value as a path and keep all but the last element of path, typically the path's directory + # [[processors.filepath.dirname]] + # field = "path" + + ## Treat the tag value as a path, converting it to its the last element without its suffix + # [[processors.filepath.stem]] + # tag = "path" + + ## Treat the tag value as a path, converting it to the shortest path name equivalent + ## to path by purely lexical processing + # [[processors.filepath.clean]] + # tag = "path" + + ## Treat the tag value as a path, converting it to a relative path that is lexically + ## equivalent to the source path when joined to 'base_path' + # [[processors.filepath.rel]] + # tag = "path" + # base_path = "/var/log" + + ## Treat the tag value as a path, replacing each separator character in path with a '/' character. Has only + ## effect on Windows + # [[processors.filepath.toslash]] + # tag = "path" +` + +func (o *Options) SampleConfig() string { + return sampleConfig +} + +func (o *Options) Description() string { + return "Performs file path manipulations on tags and fields" +} + +// applyFunc applies the specified function to the metric +func (o *Options) applyFunc(bo BaseOpts, fn ProcessorFunc, metric telegraf.Metric) { + if bo.Tag != "" { + if v, ok := metric.GetTag(bo.Tag); ok { + targetTag := bo.Tag + + if bo.Dest != "" { + targetTag = bo.Dest + } + metric.AddTag(targetTag, fn(v)) + } + } + + if bo.Field != "" { + if v, ok := metric.GetField(bo.Field); ok { + targetField := bo.Field + + if bo.Dest != "" { + targetField = bo.Dest + } + + // Only string fields are considered + if v, ok := v.(string); ok { + metric.AddField(targetField, fn(v)) + } + + } + } +} + +func stemFilePath(path string) string { + return strings.TrimSuffix(filepath.Base(path), filepath.Ext(path)) +} + +// processMetric processes fields and tag values for a given metric applying the selected transformations +func (o *Options) processMetric(metric telegraf.Metric) { + // Stem + for _, v := range o.Stem { + o.applyFunc(v, stemFilePath, metric) + } + // Basename + for _, v := range o.BaseName { + o.applyFunc(v, filepath.Base, metric) + } + // Rel + for _, v := range o.Rel { + o.applyFunc(v.BaseOpts, func(s string) string { + relPath, _ := filepath.Rel(v.BasePath, s) + return relPath + }, metric) + } + // Dirname + for _, v := range o.DirName { + o.applyFunc(v, filepath.Dir, metric) + } + // Clean + for _, v := range o.Clean { + o.applyFunc(v, filepath.Clean, metric) + } + // ToSlash + for _, v := range o.ToSlash { + o.applyFunc(v, filepath.ToSlash, metric) + } +} + +func (o *Options) Apply(in ...telegraf.Metric) []telegraf.Metric { + for _, m := range in { + o.processMetric(m) + } + + return in +} + +func init() { + processors.Add("filepath", func() telegraf.Processor { + return &Options{} + }) +} diff --git a/plugins/processors/filepath/filepath_test.go b/plugins/processors/filepath/filepath_test.go new file mode 100644 index 000000000..a305c4c5c --- /dev/null +++ b/plugins/processors/filepath/filepath_test.go @@ -0,0 +1,70 @@ +// +build !windows + +package filepath + +import ( + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/testutil" +) + +var samplePath = "/my/test//c/../path/file.log" + +func TestOptions_Apply(t *testing.T) { + tests := []testCase{ + { + name: "Smoke Test", + o: newOptions("/my/test/"), + inputMetrics: getSmokeTestInputMetrics(samplePath), + expectedMetrics: []telegraf.Metric{ + testutil.MustMetric( + smokeMetricName, + map[string]string{ + "baseTag": "file.log", + "dirTag": "/my/test/path", + "stemTag": "file", + "cleanTag": "/my/test/path/file.log", + "relTag": "path/file.log", + "slashTag": "/my/test//c/../path/file.log", + }, + map[string]interface{}{ + "baseField": "file.log", + "dirField": "/my/test/path", + "stemField": "file", + "cleanField": "/my/test/path/file.log", + "relField": "path/file.log", + "slashField": "/my/test//c/../path/file.log", + }, + time.Now()), + }, + }, + { + name: "Test Dest Option", + o: &Options{ + BaseName: []BaseOpts{ + { + Field: "sourcePath", + Tag: "sourcePath", + Dest: "basePath", + }, + }}, + inputMetrics: []telegraf.Metric{ + testutil.MustMetric( + "testMetric", + map[string]string{"sourcePath": samplePath}, + map[string]interface{}{"sourcePath": samplePath}, + time.Now()), + }, + expectedMetrics: []telegraf.Metric{ + testutil.MustMetric( + "testMetric", + map[string]string{"sourcePath": samplePath, "basePath": "file.log"}, + map[string]interface{}{"sourcePath": samplePath, "basePath": "file.log"}, + time.Now()), + }, + }, + } + runTestOptionsApply(t, tests) +} diff --git a/plugins/processors/filepath/filepath_test_helpers.go b/plugins/processors/filepath/filepath_test_helpers.go new file mode 100644 index 000000000..571730b54 --- /dev/null +++ b/plugins/processors/filepath/filepath_test_helpers.go @@ -0,0 +1,100 @@ +package filepath + +import ( + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/testutil" +) + +const smokeMetricName = "testmetric" + +type testCase struct { + name string + o *Options + inputMetrics []telegraf.Metric + expectedMetrics []telegraf.Metric +} + +func newOptions(basePath string) *Options { + return &Options{ + BaseName: []BaseOpts{ + { + Field: "baseField", + Tag: "baseTag", + }, + }, + DirName: []BaseOpts{ + { + Field: "dirField", + Tag: "dirTag", + }, + }, + Stem: []BaseOpts{ + { + Field: "stemField", + Tag: "stemTag", + }, + }, + Clean: []BaseOpts{ + { + Field: "cleanField", + Tag: "cleanTag", + }, + }, + Rel: []RelOpts{ + { + BaseOpts: BaseOpts{ + Field: "relField", + Tag: "relTag", + }, + BasePath: basePath, + }, + }, + ToSlash: []BaseOpts{ + { + Field: "slashField", + Tag: "slashTag", + }, + }, + } +} + +func getSampleMetricTags(path string) map[string]string { + return map[string]string{ + "baseTag": path, + "dirTag": path, + "stemTag": path, + "cleanTag": path, + "relTag": path, + "slashTag": path, + } +} + +func getSampleMetricFields(path string) map[string]interface{} { + return map[string]interface{}{ + "baseField": path, + "dirField": path, + "stemField": path, + "cleanField": path, + "relField": path, + "slashField": path, + } +} + +func getSmokeTestInputMetrics(path string) []telegraf.Metric { + return []telegraf.Metric{ + testutil.MustMetric(smokeMetricName, getSampleMetricTags(path), getSampleMetricFields(path), + time.Now()), + } +} + +func runTestOptionsApply(t *testing.T, tests []testCase) { + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := tt.o.Apply(tt.inputMetrics...) + testutil.RequireMetricsEqual(t, tt.expectedMetrics, got, testutil.SortMetrics(), testutil.IgnoreTime()) + }) + } +} diff --git a/plugins/processors/filepath/filepath_windows_test.go b/plugins/processors/filepath/filepath_windows_test.go new file mode 100644 index 000000000..daca33d18 --- /dev/null +++ b/plugins/processors/filepath/filepath_windows_test.go @@ -0,0 +1,43 @@ +package filepath + +import ( + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/testutil" +) + +var samplePath = "c:\\my\\test\\\\c\\..\\path\\file.log" + +func TestOptions_Apply(t *testing.T) { + tests := []testCase{ + { + name: "Smoke Test", + o: newOptions("c:\\my\\test\\"), + inputMetrics: getSmokeTestInputMetrics(samplePath), + expectedMetrics: []telegraf.Metric{ + testutil.MustMetric( + smokeMetricName, + map[string]string{ + "baseTag": "file.log", + "dirTag": "c:\\my\\test\\path", + "stemTag": "file", + "cleanTag": "c:\\my\\test\\path\\file.log", + "relTag": "path\\file.log", + "slashTag": "c:/my/test//c/../path/file.log", + }, + map[string]interface{}{ + "baseField": "file.log", + "dirField": "c:\\my\\test\\path", + "stemField": "file", + "cleanField": "c:\\my\\test\\path\\file.log", + "relField": "path\\file.log", + "slashField": "c:/my/test//c/../path/file.log", + }, + time.Now()), + }, + }, + } + runTestOptionsApply(t, tests) +} From 7a5690cd367e6d4fe7d2643fa2983b4196ea3695 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 1 May 2020 11:23:25 -0700 Subject: [PATCH 1730/1815] Update changelog --- CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6b33b280f..8108bfed9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,10 @@ - The `logparser` input is deprecated, use the `tail` input with `data_format = "grok"` as a replacement. +#### New Processors + +- [filepath](/plugins/processors/filepathdedup/README.md) - Contributed by @kir4h + #### Features - [#6905](https://github.com/influxdata/telegraf/pull/6905): Add commands stats to mongodb input plugin. From b73a232a6a6a11f41041db6fe2714cfd576027da Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Mon, 4 May 2020 14:09:10 -0400 Subject: [PATCH 1731/1815] Support Go execd plugins with shim (#7283) --- EXTERNAL_PLUGINS.md | 8 + agent/accumulator_test.go | 2 +- agent/agent.go | 4 +- agent/agent_test.go | 26 +- cmd/telegraf/telegraf.go | 2 +- .../config => config}/aws/credentials.go | 0 {internal/config => config}/config.go | 2 +- {internal/config => config}/config_test.go | 2 +- .../testdata/inline_table.toml | 0 .../testdata/invalid_field.toml | 0 .../testdata/non_slice_slice.toml | 0 .../testdata/single_plugin.toml | 0 .../testdata/single_plugin_env_vars.toml | 0 .../testdata/slice_comment.toml | 0 .../testdata/special_types.toml | 0 .../testdata/subconfig/exec.conf | 0 .../testdata/subconfig/memcached.conf | 0 .../testdata/subconfig/procstat.conf | 0 .../testdata/telegraf-agent.toml | 0 .../testdata/wrong_field_type.toml | 0 .../testdata/wrong_field_type2.toml | 0 config/types.go | 88 ++++++ go.mod | 1 + {internal/models => models}/buffer.go | 0 {internal/models => models}/buffer_test.go | 0 {internal/models => models}/filter.go | 0 {internal/models => models}/filter_test.go | 0 {internal/models => models}/log.go | 0 {internal/models => models}/log_test.go | 0 {internal/models => models}/makemetric.go | 0 .../models => models}/running_aggregator.go | 0 .../running_aggregator_test.go | 0 {internal/models => models}/running_input.go | 0 .../models => models}/running_input_test.go | 0 {internal/models => models}/running_output.go | 0 .../models => models}/running_output_test.go | 0 .../models => models}/running_processor.go | 0 .../running_processor_test.go | 0 .../cloud_pubsub_push/pubsub_push_test.go | 2 +- plugins/inputs/cloudwatch/cloudwatch.go | 2 +- plugins/inputs/execd/examples/count.rb | 10 +- plugins/inputs/execd/examples/count.sh | 8 +- plugins/inputs/execd/execd.go | 94 +++--- .../execd/{execd_unix.go => execd_posix.go} | 4 +- plugins/inputs/execd/execd_test.go | 85 ++++++ .../execd/{execd_win.go => execd_windows.go} | 4 +- plugins/inputs/execd/shim/README.md | 48 +++ plugins/inputs/execd/shim/example/cmd/main.go | 60 ++++ .../inputs/execd/shim/example/cmd/plugin.conf | 2 + plugins/inputs/execd/shim/goshim.go | 278 ++++++++++++++++++ .../inputs/execd/shim/goshim_notwindows.go | 14 + plugins/inputs/execd/shim/goshim_windows.go | 13 + plugins/inputs/execd/shim/input.go | 20 ++ plugins/inputs/execd/shim/shim_posix_test.go | 76 +++++ plugins/inputs/execd/shim/shim_test.go | 119 ++++++++ .../kinesis_consumer/kinesis_consumer.go | 2 +- plugins/outputs/cloudwatch/cloudwatch.go | 2 +- plugins/outputs/kinesis/kinesis.go | 2 +- 58 files changed, 915 insertions(+), 65 deletions(-) create mode 100644 EXTERNAL_PLUGINS.md rename {internal/config => config}/aws/credentials.go (100%) rename {internal/config => config}/config.go (99%) rename {internal/config => config}/config_test.go (99%) rename {internal/config => config}/testdata/inline_table.toml (100%) rename {internal/config => config}/testdata/invalid_field.toml (100%) rename {internal/config => config}/testdata/non_slice_slice.toml (100%) rename {internal/config => config}/testdata/single_plugin.toml (100%) rename {internal/config => config}/testdata/single_plugin_env_vars.toml (100%) rename {internal/config => config}/testdata/slice_comment.toml (100%) rename {internal/config => config}/testdata/special_types.toml (100%) rename {internal/config => config}/testdata/subconfig/exec.conf (100%) rename {internal/config => config}/testdata/subconfig/memcached.conf (100%) rename {internal/config => config}/testdata/subconfig/procstat.conf (100%) rename {internal/config => config}/testdata/telegraf-agent.toml (100%) rename {internal/config => config}/testdata/wrong_field_type.toml (100%) rename {internal/config => config}/testdata/wrong_field_type2.toml (100%) create mode 100644 config/types.go rename {internal/models => models}/buffer.go (100%) rename {internal/models => models}/buffer_test.go (100%) rename {internal/models => models}/filter.go (100%) rename {internal/models => models}/filter_test.go (100%) rename {internal/models => models}/log.go (100%) rename {internal/models => models}/log_test.go (100%) rename {internal/models => models}/makemetric.go (100%) rename {internal/models => models}/running_aggregator.go (100%) rename {internal/models => models}/running_aggregator_test.go (100%) rename {internal/models => models}/running_input.go (100%) rename {internal/models => models}/running_input_test.go (100%) rename {internal/models => models}/running_output.go (100%) rename {internal/models => models}/running_output_test.go (100%) rename {internal/models => models}/running_processor.go (100%) rename {internal/models => models}/running_processor_test.go (100%) rename plugins/inputs/execd/{execd_unix.go => execd_posix.go} (81%) create mode 100644 plugins/inputs/execd/execd_test.go rename plugins/inputs/execd/{execd_win.go => execd_windows.go} (74%) create mode 100644 plugins/inputs/execd/shim/README.md create mode 100644 plugins/inputs/execd/shim/example/cmd/main.go create mode 100644 plugins/inputs/execd/shim/example/cmd/plugin.conf create mode 100644 plugins/inputs/execd/shim/goshim.go create mode 100644 plugins/inputs/execd/shim/goshim_notwindows.go create mode 100644 plugins/inputs/execd/shim/goshim_windows.go create mode 100644 plugins/inputs/execd/shim/input.go create mode 100644 plugins/inputs/execd/shim/shim_posix_test.go create mode 100644 plugins/inputs/execd/shim/shim_test.go diff --git a/EXTERNAL_PLUGINS.md b/EXTERNAL_PLUGINS.md new file mode 100644 index 000000000..273c33fbb --- /dev/null +++ b/EXTERNAL_PLUGINS.md @@ -0,0 +1,8 @@ +# External Plugins + +This is a list of plugins that can be compiled outside of Telegraf and used via the execd input. + +Pull requests welcome. + +## Inputs +- [rand](https://github.com/ssoroka/rand) - Generate random numbers \ No newline at end of file diff --git a/agent/accumulator_test.go b/agent/accumulator_test.go index 496d131f4..38a7e047c 100644 --- a/agent/accumulator_test.go +++ b/agent/accumulator_test.go @@ -9,7 +9,7 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal/models" + "github.com/influxdata/telegraf/models" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) diff --git a/agent/agent.go b/agent/agent.go index 863309f28..b68c55d13 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -10,9 +10,9 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" - "github.com/influxdata/telegraf/internal/config" - "github.com/influxdata/telegraf/internal/models" + "github.com/influxdata/telegraf/models" "github.com/influxdata/telegraf/plugins/serializers/influx" ) diff --git a/agent/agent_test.go b/agent/agent_test.go index c822a236b..9cc631b17 100644 --- a/agent/agent_test.go +++ b/agent/agent_test.go @@ -4,7 +4,7 @@ import ( "testing" "time" - "github.com/influxdata/telegraf/internal/config" + "github.com/influxdata/telegraf/config" _ "github.com/influxdata/telegraf/plugins/inputs/all" _ "github.com/influxdata/telegraf/plugins/outputs/all" "github.com/stretchr/testify/assert" @@ -22,35 +22,35 @@ func TestAgent_OmitHostname(t *testing.T) { func TestAgent_LoadPlugin(t *testing.T) { c := config.NewConfig() c.InputFilters = []string{"mysql"} - err := c.LoadConfig("../internal/config/testdata/telegraf-agent.toml") + err := c.LoadConfig("../config/testdata/telegraf-agent.toml") assert.NoError(t, err) a, _ := NewAgent(c) assert.Equal(t, 1, len(a.Config.Inputs)) c = config.NewConfig() c.InputFilters = []string{"foo"} - err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml") + err = c.LoadConfig("../config/testdata/telegraf-agent.toml") assert.NoError(t, err) a, _ = NewAgent(c) assert.Equal(t, 0, len(a.Config.Inputs)) c = config.NewConfig() c.InputFilters = []string{"mysql", "foo"} - err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml") + err = c.LoadConfig("../config/testdata/telegraf-agent.toml") assert.NoError(t, err) a, _ = NewAgent(c) assert.Equal(t, 1, len(a.Config.Inputs)) c = config.NewConfig() c.InputFilters = []string{"mysql", "redis"} - err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml") + err = c.LoadConfig("../config/testdata/telegraf-agent.toml") assert.NoError(t, err) a, _ = NewAgent(c) assert.Equal(t, 2, len(a.Config.Inputs)) c = config.NewConfig() c.InputFilters = []string{"mysql", "foo", "redis", "bar"} - err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml") + err = c.LoadConfig("../config/testdata/telegraf-agent.toml") assert.NoError(t, err) a, _ = NewAgent(c) assert.Equal(t, 2, len(a.Config.Inputs)) @@ -59,42 +59,42 @@ func TestAgent_LoadPlugin(t *testing.T) { func TestAgent_LoadOutput(t *testing.T) { c := config.NewConfig() c.OutputFilters = []string{"influxdb"} - err := c.LoadConfig("../internal/config/testdata/telegraf-agent.toml") + err := c.LoadConfig("../config/testdata/telegraf-agent.toml") assert.NoError(t, err) a, _ := NewAgent(c) assert.Equal(t, 2, len(a.Config.Outputs)) c = config.NewConfig() c.OutputFilters = []string{"kafka"} - err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml") + err = c.LoadConfig("../config/testdata/telegraf-agent.toml") assert.NoError(t, err) a, _ = NewAgent(c) assert.Equal(t, 1, len(a.Config.Outputs)) c = config.NewConfig() c.OutputFilters = []string{} - err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml") + err = c.LoadConfig("../config/testdata/telegraf-agent.toml") assert.NoError(t, err) a, _ = NewAgent(c) assert.Equal(t, 3, len(a.Config.Outputs)) c = config.NewConfig() c.OutputFilters = []string{"foo"} - err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml") + err = c.LoadConfig("../config/testdata/telegraf-agent.toml") assert.NoError(t, err) a, _ = NewAgent(c) assert.Equal(t, 0, len(a.Config.Outputs)) c = config.NewConfig() c.OutputFilters = []string{"influxdb", "foo"} - err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml") + err = c.LoadConfig("../config/testdata/telegraf-agent.toml") assert.NoError(t, err) a, _ = NewAgent(c) assert.Equal(t, 2, len(a.Config.Outputs)) c = config.NewConfig() c.OutputFilters = []string{"influxdb", "kafka"} - err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml") + err = c.LoadConfig("../config/testdata/telegraf-agent.toml") assert.NoError(t, err) assert.Equal(t, 3, len(c.Outputs)) a, _ = NewAgent(c) @@ -102,7 +102,7 @@ func TestAgent_LoadOutput(t *testing.T) { c = config.NewConfig() c.OutputFilters = []string{"influxdb", "foo", "kafka", "bar"} - err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml") + err = c.LoadConfig("../config/testdata/telegraf-agent.toml") assert.NoError(t, err) a, _ = NewAgent(c) assert.Equal(t, 3, len(a.Config.Outputs)) diff --git a/cmd/telegraf/telegraf.go b/cmd/telegraf/telegraf.go index c1f7344da..4f51bc2e1 100644 --- a/cmd/telegraf/telegraf.go +++ b/cmd/telegraf/telegraf.go @@ -16,8 +16,8 @@ import ( "time" "github.com/influxdata/telegraf/agent" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" - "github.com/influxdata/telegraf/internal/config" "github.com/influxdata/telegraf/internal/goplugin" "github.com/influxdata/telegraf/logger" _ "github.com/influxdata/telegraf/plugins/aggregators/all" diff --git a/internal/config/aws/credentials.go b/config/aws/credentials.go similarity index 100% rename from internal/config/aws/credentials.go rename to config/aws/credentials.go diff --git a/internal/config/config.go b/config/config.go similarity index 99% rename from internal/config/config.go rename to config/config.go index c2335fac2..0ebb9e29b 100644 --- a/internal/config/config.go +++ b/config/config.go @@ -20,7 +20,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" - "github.com/influxdata/telegraf/internal/models" + "github.com/influxdata/telegraf/models" "github.com/influxdata/telegraf/plugins/aggregators" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/outputs" diff --git a/internal/config/config_test.go b/config/config_test.go similarity index 99% rename from internal/config/config_test.go rename to config/config_test.go index 9d42177cd..c4a960265 100644 --- a/internal/config/config_test.go +++ b/config/config_test.go @@ -6,7 +6,7 @@ import ( "time" "github.com/influxdata/telegraf/internal" - "github.com/influxdata/telegraf/internal/models" + "github.com/influxdata/telegraf/models" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/inputs/exec" "github.com/influxdata/telegraf/plugins/inputs/http_listener_v2" diff --git a/internal/config/testdata/inline_table.toml b/config/testdata/inline_table.toml similarity index 100% rename from internal/config/testdata/inline_table.toml rename to config/testdata/inline_table.toml diff --git a/internal/config/testdata/invalid_field.toml b/config/testdata/invalid_field.toml similarity index 100% rename from internal/config/testdata/invalid_field.toml rename to config/testdata/invalid_field.toml diff --git a/internal/config/testdata/non_slice_slice.toml b/config/testdata/non_slice_slice.toml similarity index 100% rename from internal/config/testdata/non_slice_slice.toml rename to config/testdata/non_slice_slice.toml diff --git a/internal/config/testdata/single_plugin.toml b/config/testdata/single_plugin.toml similarity index 100% rename from internal/config/testdata/single_plugin.toml rename to config/testdata/single_plugin.toml diff --git a/internal/config/testdata/single_plugin_env_vars.toml b/config/testdata/single_plugin_env_vars.toml similarity index 100% rename from internal/config/testdata/single_plugin_env_vars.toml rename to config/testdata/single_plugin_env_vars.toml diff --git a/internal/config/testdata/slice_comment.toml b/config/testdata/slice_comment.toml similarity index 100% rename from internal/config/testdata/slice_comment.toml rename to config/testdata/slice_comment.toml diff --git a/internal/config/testdata/special_types.toml b/config/testdata/special_types.toml similarity index 100% rename from internal/config/testdata/special_types.toml rename to config/testdata/special_types.toml diff --git a/internal/config/testdata/subconfig/exec.conf b/config/testdata/subconfig/exec.conf similarity index 100% rename from internal/config/testdata/subconfig/exec.conf rename to config/testdata/subconfig/exec.conf diff --git a/internal/config/testdata/subconfig/memcached.conf b/config/testdata/subconfig/memcached.conf similarity index 100% rename from internal/config/testdata/subconfig/memcached.conf rename to config/testdata/subconfig/memcached.conf diff --git a/internal/config/testdata/subconfig/procstat.conf b/config/testdata/subconfig/procstat.conf similarity index 100% rename from internal/config/testdata/subconfig/procstat.conf rename to config/testdata/subconfig/procstat.conf diff --git a/internal/config/testdata/telegraf-agent.toml b/config/testdata/telegraf-agent.toml similarity index 100% rename from internal/config/testdata/telegraf-agent.toml rename to config/testdata/telegraf-agent.toml diff --git a/internal/config/testdata/wrong_field_type.toml b/config/testdata/wrong_field_type.toml similarity index 100% rename from internal/config/testdata/wrong_field_type.toml rename to config/testdata/wrong_field_type.toml diff --git a/internal/config/testdata/wrong_field_type2.toml b/config/testdata/wrong_field_type2.toml similarity index 100% rename from internal/config/testdata/wrong_field_type2.toml rename to config/testdata/wrong_field_type2.toml diff --git a/config/types.go b/config/types.go new file mode 100644 index 000000000..5703c8411 --- /dev/null +++ b/config/types.go @@ -0,0 +1,88 @@ +package config + +import ( + "bytes" + "strconv" + "time" + + "github.com/alecthomas/units" +) + +// Duration is a time.Duration +type Duration time.Duration + +// Size is an int64 +type Size int64 + +// Number is a float +type Number float64 + +// UnmarshalTOML parses the duration from the TOML config file +func (d Duration) UnmarshalTOML(b []byte) error { + var err error + b = bytes.Trim(b, `'`) + + // see if we can directly convert it + dur, err := time.ParseDuration(string(b)) + if err == nil { + d = Duration(dur) + return nil + } + + // Parse string duration, ie, "1s" + if uq, err := strconv.Unquote(string(b)); err == nil && len(uq) > 0 { + dur, err := time.ParseDuration(uq) + if err == nil { + d = Duration(dur) + return nil + } + } + + // First try parsing as integer seconds + sI, err := strconv.ParseInt(string(b), 10, 64) + if err == nil { + dur := time.Second * time.Duration(sI) + d = Duration(dur) + return nil + } + // Second try parsing as float seconds + sF, err := strconv.ParseFloat(string(b), 64) + if err == nil { + dur := time.Second * time.Duration(sF) + d = Duration(dur) + return nil + } + + return nil +} + +func (s Size) UnmarshalTOML(b []byte) error { + var err error + b = bytes.Trim(b, `'`) + + val, err := strconv.ParseInt(string(b), 10, 64) + if err == nil { + s = Size(val) + return nil + } + uq, err := strconv.Unquote(string(b)) + if err != nil { + return err + } + val, err = units.ParseStrictBytes(uq) + if err != nil { + return err + } + s = Size(val) + return nil +} + +func (n Number) UnmarshalTOML(b []byte) error { + value, err := strconv.ParseFloat(string(b), 64) + if err != nil { + return err + } + + n = Number(value) + return nil +} diff --git a/go.mod b/go.mod index 61515f70c..4986adc77 100644 --- a/go.mod +++ b/go.mod @@ -12,6 +12,7 @@ require ( github.com/Azure/azure-storage-queue-go v0.0.0-20181215014128-6ed74e755687 github.com/Azure/go-autorest/autorest v0.9.3 github.com/Azure/go-autorest/autorest/azure/auth v0.4.2 + github.com/BurntSushi/toml v0.3.1 github.com/Mellanox/rdmamap v0.0.0-20191106181932-7c3c4763a6ee github.com/Microsoft/ApplicationInsights-Go v0.4.2 github.com/Microsoft/go-winio v0.4.9 // indirect diff --git a/internal/models/buffer.go b/models/buffer.go similarity index 100% rename from internal/models/buffer.go rename to models/buffer.go diff --git a/internal/models/buffer_test.go b/models/buffer_test.go similarity index 100% rename from internal/models/buffer_test.go rename to models/buffer_test.go diff --git a/internal/models/filter.go b/models/filter.go similarity index 100% rename from internal/models/filter.go rename to models/filter.go diff --git a/internal/models/filter_test.go b/models/filter_test.go similarity index 100% rename from internal/models/filter_test.go rename to models/filter_test.go diff --git a/internal/models/log.go b/models/log.go similarity index 100% rename from internal/models/log.go rename to models/log.go diff --git a/internal/models/log_test.go b/models/log_test.go similarity index 100% rename from internal/models/log_test.go rename to models/log_test.go diff --git a/internal/models/makemetric.go b/models/makemetric.go similarity index 100% rename from internal/models/makemetric.go rename to models/makemetric.go diff --git a/internal/models/running_aggregator.go b/models/running_aggregator.go similarity index 100% rename from internal/models/running_aggregator.go rename to models/running_aggregator.go diff --git a/internal/models/running_aggregator_test.go b/models/running_aggregator_test.go similarity index 100% rename from internal/models/running_aggregator_test.go rename to models/running_aggregator_test.go diff --git a/internal/models/running_input.go b/models/running_input.go similarity index 100% rename from internal/models/running_input.go rename to models/running_input.go diff --git a/internal/models/running_input_test.go b/models/running_input_test.go similarity index 100% rename from internal/models/running_input_test.go rename to models/running_input_test.go diff --git a/internal/models/running_output.go b/models/running_output.go similarity index 100% rename from internal/models/running_output.go rename to models/running_output.go diff --git a/internal/models/running_output_test.go b/models/running_output_test.go similarity index 100% rename from internal/models/running_output_test.go rename to models/running_output_test.go diff --git a/internal/models/running_processor.go b/models/running_processor.go similarity index 100% rename from internal/models/running_processor.go rename to models/running_processor.go diff --git a/internal/models/running_processor_test.go b/models/running_processor_test.go similarity index 100% rename from internal/models/running_processor_test.go rename to models/running_processor_test.go diff --git a/plugins/inputs/cloud_pubsub_push/pubsub_push_test.go b/plugins/inputs/cloud_pubsub_push/pubsub_push_test.go index 308a8181d..ae7601b20 100644 --- a/plugins/inputs/cloud_pubsub_push/pubsub_push_test.go +++ b/plugins/inputs/cloud_pubsub_push/pubsub_push_test.go @@ -16,7 +16,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/agent" "github.com/influxdata/telegraf/internal" - "github.com/influxdata/telegraf/internal/models" + "github.com/influxdata/telegraf/models" "github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/testutil" ) diff --git a/plugins/inputs/cloudwatch/cloudwatch.go b/plugins/inputs/cloudwatch/cloudwatch.go index 3148e2c75..cb0e10ac0 100644 --- a/plugins/inputs/cloudwatch/cloudwatch.go +++ b/plugins/inputs/cloudwatch/cloudwatch.go @@ -12,9 +12,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/cloudwatch" "github.com/influxdata/telegraf" + internalaws "github.com/influxdata/telegraf/config/aws" "github.com/influxdata/telegraf/filter" "github.com/influxdata/telegraf/internal" - internalaws "github.com/influxdata/telegraf/internal/config/aws" "github.com/influxdata/telegraf/internal/limiter" "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/plugins/inputs" diff --git a/plugins/inputs/execd/examples/count.rb b/plugins/inputs/execd/examples/count.rb index 220848d64..6b60fbc17 100755 --- a/plugins/inputs/execd/examples/count.rb +++ b/plugins/inputs/execd/examples/count.rb @@ -4,8 +4,16 @@ counter = 0 +def time_ns_str(t) + ns = t.nsec.to_s + (9 - ns.size).times do + ns = "0" + ns # left pad + end + t.to_i.to_s + ns +end + loop do - puts "counter_ruby count=#{counter}" + puts "counter_ruby count=#{counter} #{time_ns_str(Time.now)}" STDOUT.flush counter += 1 diff --git a/plugins/inputs/execd/examples/count.sh b/plugins/inputs/execd/examples/count.sh index aa6932a80..bbbe8619c 100755 --- a/plugins/inputs/execd/examples/count.sh +++ b/plugins/inputs/execd/examples/count.sh @@ -1,12 +1,12 @@ -#!/bin/bash +#!/bin/sh ## Example in bash using STDIN signaling counter=0 -while read; do +while read LINE; do echo "counter_bash count=${counter}" - let counter=counter+1 + counter=$((counter+1)) done -(>&2 echo "terminate") +trap "echo terminate 1>&2" EXIT \ No newline at end of file diff --git a/plugins/inputs/execd/execd.go b/plugins/inputs/execd/execd.go index e852d045e..90a5ceffb 100644 --- a/plugins/inputs/execd/execd.go +++ b/plugins/inputs/execd/execd.go @@ -11,6 +11,7 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/parsers" @@ -43,12 +44,14 @@ const sampleConfig = ` type Execd struct { Command []string Signal string - RestartDelay internal.Duration + RestartDelay config.Duration acc telegraf.Accumulator cmd *exec.Cmd parser parsers.Parser stdin io.WriteCloser + stdout io.ReadCloser + stderr io.ReadCloser cancel context.CancelFunc wg sync.WaitGroup } @@ -69,13 +72,17 @@ func (e *Execd) Start(acc telegraf.Accumulator) error { e.acc = acc if len(e.Command) == 0 { - return fmt.Errorf("E! [inputs.execd] FATAL no command specified") + return fmt.Errorf("FATAL no command specified") } e.wg.Add(1) - var ctx context.Context - ctx, e.cancel = context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(context.Background()) + e.cancel = cancel + + if err := e.cmdStart(); err != nil { + return err + } go func() { e.cmdLoop(ctx) @@ -90,79 +97,98 @@ func (e *Execd) Stop() { e.wg.Wait() } -func (e *Execd) cmdLoop(ctx context.Context) { +// cmdLoop watches an already running process, restarting it when appropriate. +func (e *Execd) cmdLoop(ctx context.Context) error { for { // Use a buffered channel to ensure goroutine below can exit // if `ctx.Done` is selected and nothing reads on `done` anymore done := make(chan error, 1) go func() { - done <- e.cmdRun() + done <- e.cmdWait() }() select { case <-ctx.Done(): - e.stdin.Close() - // Immediately exit process but with a graceful shutdown - // period before killing - internal.WaitTimeout(e.cmd, 200*time.Millisecond) - return + if e.stdin != nil { + e.stdin.Close() + // Immediately exit process but with a graceful shutdown + // period before killing + internal.WaitTimeout(e.cmd, 200*time.Millisecond) + } + return nil case err := <-done: - log.Printf("E! [inputs.execd] Process %s terminated: %s", e.Command, err) + log.Printf("Process %s terminated: %s", e.Command, err) + if isQuitting(ctx) { + return err + } } - log.Printf("E! [inputs.execd] Restarting in %s...", e.RestartDelay.Duration) + log.Printf("Restarting in %s...", time.Duration(e.RestartDelay)) select { case <-ctx.Done(): - return - case <-time.After(e.RestartDelay.Duration): + return nil + case <-time.After(time.Duration(e.RestartDelay)): // Continue the loop and restart the process + if err := e.cmdStart(); err != nil { + return err + } } } } -func (e *Execd) cmdRun() error { - var wg sync.WaitGroup +func isQuitting(ctx context.Context) bool { + select { + case <-ctx.Done(): + return true + default: + return false + } +} +func (e *Execd) cmdStart() (err error) { if len(e.Command) > 1 { e.cmd = exec.Command(e.Command[0], e.Command[1:]...) } else { e.cmd = exec.Command(e.Command[0]) } - stdin, err := e.cmd.StdinPipe() + e.stdin, err = e.cmd.StdinPipe() if err != nil { - return fmt.Errorf("E! [inputs.execd] Error opening stdin pipe: %s", err) + return fmt.Errorf("Error opening stdin pipe: %s", err) } - e.stdin = stdin - - stdout, err := e.cmd.StdoutPipe() + e.stdout, err = e.cmd.StdoutPipe() if err != nil { - return fmt.Errorf("E! [inputs.execd] Error opening stdout pipe: %s", err) + return fmt.Errorf("Error opening stdout pipe: %s", err) } - stderr, err := e.cmd.StderrPipe() + e.stderr, err = e.cmd.StderrPipe() if err != nil { - return fmt.Errorf("E! [inputs.execd] Error opening stderr pipe: %s", err) + return fmt.Errorf("Error opening stderr pipe: %s", err) } - log.Printf("D! [inputs.execd] Starting process: %s", e.Command) + log.Printf("Starting process: %s", e.Command) err = e.cmd.Start() if err != nil { - return fmt.Errorf("E! [inputs.execd] Error starting process: %s", err) + return fmt.Errorf("Error starting process: %s", err) } + return nil +} + +func (e *Execd) cmdWait() error { + var wg sync.WaitGroup wg.Add(2) go func() { - e.cmdReadOut(stdout) + e.cmdReadOut(e.stdout) wg.Done() }() go func() { - e.cmdReadErr(stderr) + e.cmdReadErr(e.stderr) wg.Done() }() @@ -176,7 +202,7 @@ func (e *Execd) cmdReadOut(out io.Reader) { for scanner.Scan() { metrics, err := e.parser.Parse(scanner.Bytes()) if err != nil { - e.acc.AddError(fmt.Errorf("E! [inputs.execd] Parse error: %s", err)) + e.acc.AddError(fmt.Errorf("Parse error: %s", err)) } for _, metric := range metrics { @@ -185,7 +211,7 @@ func (e *Execd) cmdReadOut(out io.Reader) { } if err := scanner.Err(); err != nil { - e.acc.AddError(fmt.Errorf("E! [inputs.execd] Error reading stdout: %s", err)) + e.acc.AddError(fmt.Errorf("Error reading stdout: %s", err)) } } @@ -193,11 +219,11 @@ func (e *Execd) cmdReadErr(out io.Reader) { scanner := bufio.NewScanner(out) for scanner.Scan() { - log.Printf("E! [inputs.execd] stderr: %q", scanner.Text()) + log.Printf("stderr: %q", scanner.Text()) } if err := scanner.Err(); err != nil { - e.acc.AddError(fmt.Errorf("E! [inputs.execd] Error reading stderr: %s", err)) + e.acc.AddError(fmt.Errorf("Error reading stderr: %s", err)) } } @@ -205,7 +231,7 @@ func init() { inputs.Add("execd", func() telegraf.Input { return &Execd{ Signal: "none", - RestartDelay: internal.Duration{Duration: 10 * time.Second}, + RestartDelay: config.Duration(10 * time.Second), } }) } diff --git a/plugins/inputs/execd/execd_unix.go b/plugins/inputs/execd/execd_posix.go similarity index 81% rename from plugins/inputs/execd/execd_unix.go rename to plugins/inputs/execd/execd_posix.go index a092cfc80..919447260 100644 --- a/plugins/inputs/execd/execd_unix.go +++ b/plugins/inputs/execd/execd_posix.go @@ -23,7 +23,9 @@ func (e *Execd) Gather(acc telegraf.Accumulator) error { case "SIGUSR2": e.cmd.Process.Signal(syscall.SIGUSR2) case "STDIN": - io.WriteString(e.stdin, "\n") + if _, err := io.WriteString(e.stdin, "\n"); err != nil { + return fmt.Errorf("Error writing to stdin: %s", err) + } case "none": default: return fmt.Errorf("invalid signal: %s", e.Signal) diff --git a/plugins/inputs/execd/execd_test.go b/plugins/inputs/execd/execd_test.go new file mode 100644 index 000000000..b78075e95 --- /dev/null +++ b/plugins/inputs/execd/execd_test.go @@ -0,0 +1,85 @@ +// +build !windows + +package execd + +import ( + "testing" + "time" + + "github.com/influxdata/telegraf/agent" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/models" + "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/plugins/parsers" + + "github.com/influxdata/telegraf" +) + +func TestExternalInputWorks(t *testing.T) { + jsonParser, err := parsers.NewInfluxParser() + require.NoError(t, err) + + e := &Execd{ + Command: []string{shell(), fileShellScriptPath()}, + RestartDelay: config.Duration(5 * time.Second), + parser: jsonParser, + Signal: "STDIN", + } + + metrics := make(chan telegraf.Metric, 10) + defer close(metrics) + acc := agent.NewAccumulator(&TestMetricMaker{}, metrics) + + require.NoError(t, e.Start(acc)) + require.NoError(t, e.Gather(acc)) + e.Stop() + + // grab a metric and make sure it's a thing + m := readChanWithTimeout(t, metrics, 10*time.Second) + + require.Equal(t, "counter_bash", m.Name()) + val, ok := m.GetField("count") + require.True(t, ok) + require.Equal(t, float64(0), val) + // test that a later gather will not panic + e.Gather(acc) +} + +func readChanWithTimeout(t *testing.T, metrics chan telegraf.Metric, timeout time.Duration) telegraf.Metric { + to := time.NewTimer(timeout) + defer to.Stop() + select { + case m := <-metrics: + return m + case <-to.C: + require.FailNow(t, "timeout waiting for metric") + } + return nil +} + +func fileShellScriptPath() string { + return "./examples/count.sh" +} + +func shell() string { + return "sh" +} + +type TestMetricMaker struct{} + +func (tm *TestMetricMaker) Name() string { + return "TestPlugin" +} + +func (tm *TestMetricMaker) LogName() string { + return tm.Name() +} + +func (tm *TestMetricMaker) MakeMetric(metric telegraf.Metric) telegraf.Metric { + return metric +} + +func (tm *TestMetricMaker) Log() telegraf.Logger { + return models.NewLogger("TestPlugin", "test", "") +} diff --git a/plugins/inputs/execd/execd_win.go b/plugins/inputs/execd/execd_windows.go similarity index 74% rename from plugins/inputs/execd/execd_win.go rename to plugins/inputs/execd/execd_windows.go index 85ced4a6a..443d8f686 100644 --- a/plugins/inputs/execd/execd_win.go +++ b/plugins/inputs/execd/execd_windows.go @@ -16,7 +16,9 @@ func (e *Execd) Gather(acc telegraf.Accumulator) error { switch e.Signal { case "STDIN": - io.WriteString(e.stdin, "\n") + if _, err := io.WriteString(e.stdin, "\n"); err != nil { + return fmt.Errorf("Error writing to stdin: %s", err) + } case "none": default: return fmt.Errorf("invalid signal: %s", e.Signal) diff --git a/plugins/inputs/execd/shim/README.md b/plugins/inputs/execd/shim/README.md new file mode 100644 index 000000000..f955ef15f --- /dev/null +++ b/plugins/inputs/execd/shim/README.md @@ -0,0 +1,48 @@ +# Telegraf Execd Go Shim + +The goal of this _shim_ is to make it trivial to extract an internal input plugin +out to a stand-alone repo for the purpose of compiling it as a separate app and +running it from the inputs.execd plugin. + +The execd-shim is still experimental and the interface may change in the future. +Especially as the concept expands to prcoessors, aggregators, and outputs. + +## Steps to externalize a plugin + +1. Move the project to an external repo, optionally preserving the + _plugins/inputs/plugin_name_ folder structure. For an example of what this might + look at, take a look at [ssoroka/rand](https://github.com/ssoroka/rand) or + [danielnelson/telegraf-plugins](https://github.com/danielnelson/telegraf-plugins) +1. Copy [main.go](./example/cmd/main.go) into your project under the cmd folder. + This will be the entrypoint to the plugin when run as a stand-alone program, and + it will call the shim code for you to make that happen. +1. Edit the main.go file to import your plugin. Within Telegraf this would have + been done in an all.go file, but here we don't split the two apart, and the change + just goes in the top of main.go. If you skip this step, your plugin will do nothing. +1. Optionally add a [plugin.conf](./example/cmd/plugin.conf) for configuration + specific to your plugin. Note that this config file **must be separate from the + rest of the config for Telegraf, and must not be in a shared directory where + Telegraf is expecting to load all configs**. If Telegraf reads this config file + it will not know which plugin it relates to. + +## Steps to build and run your plugin + +1. Build the cmd/main.go. For my rand project this looks like `go build -o rand cmd/main.go` +1. Test out the binary if you haven't done this yet. eg `./rand -config plugin.conf` + Depending on your polling settings and whether you implemented a service plugin or + an input gathering plugin, you may see data right away, or you may have to hit enter + first, or wait for your poll duration to elapse, but the metrics will be written to + STDOUT. Ctrl-C to end your test. +1. Configure Telegraf to call your new plugin binary. eg: + +``` +[[inputs.execd]] + command = ["/path/to/rand", "-config", "/path/to/plugin.conf"] + signal = "none" +``` + +## Congratulations! + +You've done it! Consider publishing your plugin to github and open a Pull Request +back to the Telegraf repo letting us know about the availability of your +[external plugin](https://github.com/influxdata/telegraf/blob/master/EXTERNAL_PLUGINS.md). \ No newline at end of file diff --git a/plugins/inputs/execd/shim/example/cmd/main.go b/plugins/inputs/execd/shim/example/cmd/main.go new file mode 100644 index 000000000..bf8bd50d8 --- /dev/null +++ b/plugins/inputs/execd/shim/example/cmd/main.go @@ -0,0 +1,60 @@ +package main + +import ( + "flag" + "fmt" + "os" + "time" + + // TODO: import your plugins + // _ "github.com/my_github_user/my_plugin_repo/plugins/inputs/mypluginname" + + "github.com/influxdata/telegraf/plugins/inputs/execd/shim" +) + +var pollInterval = flag.Duration("poll_interval", 1*time.Second, "how often to send metrics") +var pollIntervalDisabled = flag.Bool("poll_interval_disabled", false, "how often to send metrics") +var configFile = flag.String("config", "", "path to the config file for this plugin") +var err error + +// This is designed to be simple; Just change the import above and you're good. +// +// However, if you want to do all your config in code, you can like so: +// +// // initialize your plugin with any settngs you want +// myInput := &mypluginname.MyPlugin{ +// DefaultSettingHere: 3, +// } +// +// shim := shim.New() +// +// shim.AddInput(myInput) +// +// // now the shim.Run() call as below. +// +func main() { + // parse command line options + flag.Parse() + if *pollIntervalDisabled { + *pollInterval = shim.PollIntervalDisabled + } + + // create the shim. This is what will run your plugins. + shim := shim.New() + + // If no config is specified, all imported plugins are loaded. + // otherwise follow what the config asks for. + // Check for settings from a config toml file, + // (or just use whatever plugins were imported above) + err = shim.LoadConfig(configFile) + if err != nil { + fmt.Fprintf(os.Stderr, "Err loading input: %s\n", err) + os.Exit(1) + } + + // run the input plugin(s) until stdin closes or we receive a termination signal + if err := shim.Run(*pollInterval); err != nil { + fmt.Fprintf(os.Stderr, "Err: %s\n", err) + os.Exit(1) + } +} diff --git a/plugins/inputs/execd/shim/example/cmd/plugin.conf b/plugins/inputs/execd/shim/example/cmd/plugin.conf new file mode 100644 index 000000000..53f89a559 --- /dev/null +++ b/plugins/inputs/execd/shim/example/cmd/plugin.conf @@ -0,0 +1,2 @@ +[[inputs.my_plugin_name]] + value_name = "value" diff --git a/plugins/inputs/execd/shim/goshim.go b/plugins/inputs/execd/shim/goshim.go new file mode 100644 index 000000000..cd0c4ddec --- /dev/null +++ b/plugins/inputs/execd/shim/goshim.go @@ -0,0 +1,278 @@ +package shim + +import ( + "bufio" + "context" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "os/signal" + "sync" + "syscall" + "time" + + "github.com/BurntSushi/toml" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/agent" + "github.com/influxdata/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/serializers/influx" +) + +type empty struct{} + +var ( + gatherPromptChans []chan empty + stdout io.Writer = os.Stdout + stdin io.Reader = os.Stdin +) + +const ( + // PollIntervalDisabled is used to indicate that you want to disable polling, + // as opposed to duration 0 meaning poll constantly. + PollIntervalDisabled = time.Duration(0) +) + +type Shim struct { + Inputs []telegraf.Input +} + +func New() *Shim { + return &Shim{} +} + +// AddInput adds the input to the shim. Later calls to Run() will run this input. +func (s *Shim) AddInput(input telegraf.Input) error { + if p, ok := input.(telegraf.Initializer); ok { + err := p.Init() + if err != nil { + return fmt.Errorf("failed to init input: %s", err) + } + } + + s.Inputs = append(s.Inputs, input) + return nil +} + +// AddInputs adds multiple inputs to the shim. Later calls to Run() will run these. +func (s *Shim) AddInputs(newInputs []telegraf.Input) error { + for _, inp := range newInputs { + if err := s.AddInput(inp); err != nil { + return err + } + } + return nil +} + +// Run the input plugins.. +func (s *Shim) Run(pollInterval time.Duration) error { + wg := sync.WaitGroup{} + quit := make(chan os.Signal, 1) + signal.Notify(quit, syscall.SIGINT, syscall.SIGTERM) + + collectMetricsPrompt := make(chan os.Signal, 1) + listenForCollectMetricsSignals(collectMetricsPrompt) + + wg.Add(1) // wait for the metric channel to close + metricCh := make(chan telegraf.Metric, 1) + + serializer := influx.NewSerializer() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + for _, input := range s.Inputs { + wrappedInput := inputShim{Input: input} + + acc := agent.NewAccumulator(wrappedInput, metricCh) + acc.SetPrecision(time.Nanosecond) + + if serviceInput, ok := input.(telegraf.ServiceInput); ok { + if err := serviceInput.Start(acc); err != nil { + return fmt.Errorf("failed to start input: %s", err) + } + } + gatherPromptCh := make(chan empty, 1) + gatherPromptChans = append(gatherPromptChans, gatherPromptCh) + wg.Add(1) + go func(input telegraf.Input) { + startGathering(ctx, input, acc, gatherPromptCh, pollInterval) + if serviceInput, ok := input.(telegraf.ServiceInput); ok { + serviceInput.Stop() + } + wg.Done() + }(input) + } + + go stdinCollectMetricsPrompt(ctx, collectMetricsPrompt) + +loop: + for { + select { + case <-quit: + // cancel, but keep looping until the metric channel closes. + cancel() + case <-collectMetricsPrompt: + collectMetrics(ctx) + case m, open := <-metricCh: + if !open { + wg.Done() + break loop + } + b, err := serializer.Serialize(m) + if err != nil { + return fmt.Errorf("failed to serialize metric: %s", err) + } + // Write this to stdout + fmt.Fprint(stdout, string(b)) + } + } + + wg.Wait() + return nil +} + +func hasQuit(ctx context.Context) bool { + select { + case <-ctx.Done(): + return true + default: + return false + } +} + +func stdinCollectMetricsPrompt(ctx context.Context, collectMetricsPrompt chan<- os.Signal) { + s := bufio.NewScanner(stdin) + // for every line read from stdin, make sure we're not supposed to quit, + // then push a message on to the collectMetricsPrompt + for s.Scan() { + // first check if we should quit + if hasQuit(ctx) { + return + } + + // now push a non-blocking message to trigger metric collection. + pushCollectMetricsRequest(collectMetricsPrompt) + } +} + +// pushCollectMetricsRequest pushes a non-blocking (nil) message to the +// collectMetricsPrompt channel to trigger metric collection. +// The channel is defined with a buffer of 1, so if it's full, duplicated +// requests are discarded. +func pushCollectMetricsRequest(collectMetricsPrompt chan<- os.Signal) { + select { + case collectMetricsPrompt <- nil: + default: + } +} + +func collectMetrics(ctx context.Context) { + if hasQuit(ctx) { + return + } + for i := 0; i < len(gatherPromptChans); i++ { + // push a message out to each channel to collect metrics. don't block. + select { + case gatherPromptChans[i] <- empty{}: + default: + } + } +} + +func startGathering(ctx context.Context, input telegraf.Input, acc telegraf.Accumulator, gatherPromptCh <-chan empty, pollInterval time.Duration) { + if pollInterval == PollIntervalDisabled { + return // don't poll + } + t := time.NewTicker(pollInterval) + defer t.Stop() + for { + // give priority to stopping. + if hasQuit(ctx) { + return + } + // see what's up + select { + case <-ctx.Done(): + return + case <-gatherPromptCh: + if err := input.Gather(acc); err != nil { + fmt.Fprintf(os.Stderr, "failed to gather metrics: %s", err) + } + case <-t.C: + if err := input.Gather(acc); err != nil { + fmt.Fprintf(os.Stderr, "failed to gather metrics: %s", err) + } + } + } +} + +// LoadConfig loads and adds the inputs to the shim +func (s *Shim) LoadConfig(filePath *string) error { + loadedInputs, err := LoadConfig(filePath) + if err != nil { + return err + } + return s.AddInputs(loadedInputs) +} + +// DefaultImportedPlugins defaults to whatever plugins happen to be loaded and +// have registered themselves with the registry. This makes loading plugins +// without having to define a config dead easy. +func DefaultImportedPlugins() (i []telegraf.Input, e error) { + for _, inputCreatorFunc := range inputs.Inputs { + i = append(i, inputCreatorFunc()) + } + return i, nil +} + +// LoadConfig loads the config and returns inputs that later need to be loaded. +func LoadConfig(filePath *string) ([]telegraf.Input, error) { + if filePath == nil { + return DefaultImportedPlugins() + } + + b, err := ioutil.ReadFile(*filePath) + if err != nil { + return nil, err + } + + conf := struct { + Inputs map[string][]toml.Primitive + }{} + + md, err := toml.Decode(string(b), &conf) + if err != nil { + return nil, err + } + + loadedInputs, err := loadConfigIntoInputs(md, conf.Inputs) + + if len(md.Undecoded()) > 0 { + fmt.Fprintf(stdout, "Some plugins were loaded but not used: %q\n", md.Undecoded()) + } + return loadedInputs, err +} + +func loadConfigIntoInputs(md toml.MetaData, inputConfigs map[string][]toml.Primitive) ([]telegraf.Input, error) { + renderedInputs := []telegraf.Input{} + + for name, primitives := range inputConfigs { + inputCreator, ok := inputs.Inputs[name] + if !ok { + return nil, errors.New("unknown input " + name) + } + + for _, primitive := range primitives { + inp := inputCreator() + // Parse specific configuration + if err := md.PrimitiveDecode(primitive, inp); err != nil { + return nil, err + } + + renderedInputs = append(renderedInputs, inp) + } + } + return renderedInputs, nil +} diff --git a/plugins/inputs/execd/shim/goshim_notwindows.go b/plugins/inputs/execd/shim/goshim_notwindows.go new file mode 100644 index 000000000..67d41884f --- /dev/null +++ b/plugins/inputs/execd/shim/goshim_notwindows.go @@ -0,0 +1,14 @@ +// +build !windows + +package shim + +import ( + "os" + "os/signal" + "syscall" +) + +func listenForCollectMetricsSignals(collectMetricsPrompt chan os.Signal) { + // just listen to all the signals. + signal.Notify(collectMetricsPrompt, syscall.SIGHUP, syscall.SIGUSR1, syscall.SIGUSR2) +} diff --git a/plugins/inputs/execd/shim/goshim_windows.go b/plugins/inputs/execd/shim/goshim_windows.go new file mode 100644 index 000000000..a6bfd1ede --- /dev/null +++ b/plugins/inputs/execd/shim/goshim_windows.go @@ -0,0 +1,13 @@ +// +build windows + +package shim + +import ( + "os" + "os/signal" + "syscall" +) + +func listenForCollectMetricsSignals(collectMetricsPrompt chan os.Signal) { + signal.Notify(collectMetricsPrompt, syscall.SIGHUP) +} diff --git a/plugins/inputs/execd/shim/input.go b/plugins/inputs/execd/shim/input.go new file mode 100644 index 000000000..6dff9cd7f --- /dev/null +++ b/plugins/inputs/execd/shim/input.go @@ -0,0 +1,20 @@ +package shim + +import "github.com/influxdata/telegraf" + +// inputShim implements the MetricMaker interface. +type inputShim struct { + Input telegraf.Input +} + +func (i inputShim) LogName() string { + return "" +} + +func (i inputShim) MakeMetric(m telegraf.Metric) telegraf.Metric { + return m // don't need to do anything to it. +} + +func (i inputShim) Log() telegraf.Logger { + return nil +} diff --git a/plugins/inputs/execd/shim/shim_posix_test.go b/plugins/inputs/execd/shim/shim_posix_test.go new file mode 100644 index 000000000..85053130f --- /dev/null +++ b/plugins/inputs/execd/shim/shim_posix_test.go @@ -0,0 +1,76 @@ +// +build !windows + +package shim + +import ( + "bytes" + "context" + "os" + "runtime" + "strings" + "syscall" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func TestShimUSR1SignalingWorks(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip() + return + } + stdoutBytes := bytes.NewBufferString("") + stdout = stdoutBytes + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + wait := runInputPlugin(t, 40*time.Second) + + // sleep a bit to avoid a race condition where the input hasn't loaded yet. + time.Sleep(10 * time.Millisecond) + + // signal USR1 to yourself. + pid := os.Getpid() + process, err := os.FindProcess(pid) + require.NoError(t, err) + + go func() { + // On slow machines this signal can fire before the service comes up. + // rather than depend on accurate sleep times, we'll just retry sending + // the signal every so often until it goes through. + for { + select { + case <-ctx.Done(): + return // test is done + default: + // test isn't done, keep going. + process.Signal(syscall.SIGUSR1) + time.Sleep(200 * time.Millisecond) + } + } + }() + + timeout := time.NewTimer(10 * time.Second) + + select { + case <-wait: + case <-timeout.C: + require.Fail(t, "Timeout waiting for metric to arrive") + } + + for stdoutBytes.Len() == 0 { + select { + case <-timeout.C: + require.Fail(t, "Timeout waiting to read metric from stdout") + return + default: + time.Sleep(10 * time.Millisecond) + } + } + + out := string(stdoutBytes.Bytes()) + require.Contains(t, out, "\n") + metricLine := strings.Split(out, "\n")[0] + require.Equal(t, "measurement,tag=tag field=1i 1234000005678", metricLine) +} diff --git a/plugins/inputs/execd/shim/shim_test.go b/plugins/inputs/execd/shim/shim_test.go new file mode 100644 index 000000000..9d97bd239 --- /dev/null +++ b/plugins/inputs/execd/shim/shim_test.go @@ -0,0 +1,119 @@ +package shim + +import ( + "bytes" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf" +) + +func TestShimWorks(t *testing.T) { + stdoutBytes := bytes.NewBufferString("") + stdout = stdoutBytes + + timeout := time.NewTimer(10 * time.Second) + wait := runInputPlugin(t, 10*time.Millisecond) + + select { + case <-wait: + case <-timeout.C: + require.Fail(t, "Timeout waiting for metric to arrive") + } + for stdoutBytes.Len() == 0 { + select { + case <-timeout.C: + require.Fail(t, "Timeout waiting to read metric from stdout") + return + default: + time.Sleep(10 * time.Millisecond) + } + } + + out := string(stdoutBytes.Bytes()) + require.Contains(t, out, "\n") + metricLine := strings.Split(out, "\n")[0] + require.Equal(t, "measurement,tag=tag field=1i 1234000005678", metricLine) +} + +func TestShimStdinSignalingWorks(t *testing.T) { + stdoutBytes := bytes.NewBufferString("") + stdout = stdoutBytes + stdinBytes := bytes.NewBufferString("") + stdin = stdinBytes + + timeout := time.NewTimer(10 * time.Second) + wait := runInputPlugin(t, 40*time.Second) + + stdinBytes.WriteString("\n") + + select { + case <-wait: + case <-timeout.C: + require.Fail(t, "Timeout waiting for metric to arrive") + } + + for stdoutBytes.Len() == 0 { + select { + case <-timeout.C: + require.Fail(t, "Timeout waiting to read metric from stdout") + return + default: + time.Sleep(10 * time.Millisecond) + } + } + + out := string(stdoutBytes.Bytes()) + require.Contains(t, out, "\n") + metricLine := strings.Split(out, "\n")[0] + require.Equal(t, "measurement,tag=tag field=1i 1234000005678", metricLine) +} + +func runInputPlugin(t *testing.T, timeout time.Duration) chan bool { + wait := make(chan bool) + inp := &testInput{ + wait: wait, + } + + shim := New() + shim.AddInput(inp) + go func() { + err := shim.Run(timeout) // we aren't using the timer here + require.NoError(t, err) + }() + return wait +} + +type testInput struct { + wait chan bool +} + +func (i *testInput) SampleConfig() string { + return "" +} + +func (i *testInput) Description() string { + return "" +} + +func (i *testInput) Gather(acc telegraf.Accumulator) error { + acc.AddFields("measurement", + map[string]interface{}{ + "field": 1, + }, + map[string]string{ + "tag": "tag", + }, time.Unix(1234, 5678)) + i.wait <- true + return nil +} + +func (i *testInput) Start(acc telegraf.Accumulator) error { + return nil +} + +func (i *testInput) Stop() { +} diff --git a/plugins/inputs/kinesis_consumer/kinesis_consumer.go b/plugins/inputs/kinesis_consumer/kinesis_consumer.go index aec806da1..b524cf9e4 100644 --- a/plugins/inputs/kinesis_consumer/kinesis_consumer.go +++ b/plugins/inputs/kinesis_consumer/kinesis_consumer.go @@ -14,7 +14,7 @@ import ( "github.com/harlow/kinesis-consumer/checkpoint/ddb" "github.com/influxdata/telegraf" - internalaws "github.com/influxdata/telegraf/internal/config/aws" + internalaws "github.com/influxdata/telegraf/config/aws" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/parsers" ) diff --git a/plugins/outputs/cloudwatch/cloudwatch.go b/plugins/outputs/cloudwatch/cloudwatch.go index 1ae8bd4f8..5e59ba2aa 100644 --- a/plugins/outputs/cloudwatch/cloudwatch.go +++ b/plugins/outputs/cloudwatch/cloudwatch.go @@ -11,7 +11,7 @@ import ( "github.com/aws/aws-sdk-go/service/cloudwatch" "github.com/influxdata/telegraf" - internalaws "github.com/influxdata/telegraf/internal/config/aws" + internalaws "github.com/influxdata/telegraf/config/aws" "github.com/influxdata/telegraf/plugins/outputs" ) diff --git a/plugins/outputs/kinesis/kinesis.go b/plugins/outputs/kinesis/kinesis.go index d2d482ff3..f6b205b1e 100644 --- a/plugins/outputs/kinesis/kinesis.go +++ b/plugins/outputs/kinesis/kinesis.go @@ -8,7 +8,7 @@ import ( "github.com/aws/aws-sdk-go/service/kinesis" "github.com/gofrs/uuid" "github.com/influxdata/telegraf" - internalaws "github.com/influxdata/telegraf/internal/config/aws" + internalaws "github.com/influxdata/telegraf/config/aws" "github.com/influxdata/telegraf/plugins/outputs" "github.com/influxdata/telegraf/plugins/serializers" ) From 145bad61944ec92979e1abd99dc0ae18fd68e17f Mon Sep 17 00:00:00 2001 From: Samantha Wang <32681364+sjwang90@users.noreply.github.com> Date: Mon, 4 May 2020 15:35:10 -0700 Subject: [PATCH 1732/1815] Fix filepath processor link in changelog (#7454) --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8108bfed9..1c810cdea 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,7 +7,7 @@ #### New Processors -- [filepath](/plugins/processors/filepathdedup/README.md) - Contributed by @kir4h +- [filepath](/plugins/processors/filepath/README.md) - Contributed by @kir4h #### Features From 973aec42b36b9b0a6dc0ce6eac6f1c35ab9fc958 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 4 May 2020 16:21:32 -0700 Subject: [PATCH 1733/1815] Remove debug fields from spunkmetric serializer (#7455) --- plugins/serializers/splunkmetric/README.md | 2 -- .../serializers/splunkmetric/splunkmetric.go | 7 +----- .../splunkmetric/splunkmetric_test.go | 25 +++++++++---------- 3 files changed, 13 insertions(+), 21 deletions(-) diff --git a/plugins/serializers/splunkmetric/README.md b/plugins/serializers/splunkmetric/README.md index 47ad8e1bf..96049e45f 100644 --- a/plugins/serializers/splunkmetric/README.md +++ b/plugins/serializers/splunkmetric/README.md @@ -38,8 +38,6 @@ you can send all of your CPU stats in one JSON struct, an example event looks li "event": "metric", "host": "mono.local", "fields": { - "_config_hecRouting": false, - "_config_multiMetric": true, "class": "osx", "cpu": "cpu0", "metric_name:telegraf.cpu.usage_guest": 0, diff --git a/plugins/serializers/splunkmetric/splunkmetric.go b/plugins/serializers/splunkmetric/splunkmetric.go index 77c724aa8..772771a10 100644 --- a/plugins/serializers/splunkmetric/splunkmetric.go +++ b/plugins/serializers/splunkmetric/splunkmetric.go @@ -179,12 +179,7 @@ func (s *serializer) createObject(metric telegraf.Metric) (metricGroup []byte, e // The tags are common to all events in this timeseries commonTags := CommonTags{} - commonObj := map[string]interface{}{} - - commonObj["config:hecRouting"] = s.HecRouting - commonObj["config:multiMetric"] = s.SplunkmetricMultiMetric - - commonTags.Fields = commonObj + commonTags.Fields = map[string]interface{}{} // Break tags out into key(n)=value(t) pairs for n, t := range metric.Tags() { diff --git a/plugins/serializers/splunkmetric/splunkmetric_test.go b/plugins/serializers/splunkmetric/splunkmetric_test.go index 70037e28a..5ce5265d8 100644 --- a/plugins/serializers/splunkmetric/splunkmetric_test.go +++ b/plugins/serializers/splunkmetric/splunkmetric_test.go @@ -4,10 +4,9 @@ import ( "testing" "time" - "github.com/stretchr/testify/assert" - "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" + "github.com/stretchr/testify/assert" ) func MustMetric(v telegraf.Metric, err error) telegraf.Metric { @@ -33,7 +32,7 @@ func TestSerializeMetricFloat(t *testing.T) { var buf []byte buf, err = s.Serialize(m) assert.NoError(t, err) - expS := `{"_value":91.5,"config:hecRouting":false,"config:multiMetric":false,"cpu":"cpu0","metric_name":"cpu.usage_idle","time":1529875740.819}` + expS := `{"_value":91.5,"cpu":"cpu0","metric_name":"cpu.usage_idle","time":1529875740.819}` assert.Equal(t, string(expS), string(buf)) } @@ -53,7 +52,7 @@ func TestSerializeMetricFloatHec(t *testing.T) { var buf []byte buf, err = s.Serialize(m) assert.NoError(t, err) - expS := `{"time":1529875740.819,"event":"metric","fields":{"_value":91.5,"config:hecRouting":true,"config:multiMetric":false,"cpu":"cpu0","metric_name":"cpu.usage_idle"}}` + expS := `{"time":1529875740.819,"event":"metric","fields":{"_value":91.5,"cpu":"cpu0","metric_name":"cpu.usage_idle"}}` assert.Equal(t, string(expS), string(buf)) } @@ -73,7 +72,7 @@ func TestSerializeMetricInt(t *testing.T) { buf, err = s.Serialize(m) assert.NoError(t, err) - expS := `{"_value":90,"config:hecRouting":false,"config:multiMetric":false,"cpu":"cpu0","metric_name":"cpu.usage_idle","time":0}` + expS := `{"_value":90,"cpu":"cpu0","metric_name":"cpu.usage_idle","time":0}` assert.Equal(t, string(expS), string(buf)) } @@ -93,7 +92,7 @@ func TestSerializeMetricIntHec(t *testing.T) { buf, err = s.Serialize(m) assert.NoError(t, err) - expS := `{"time":0,"event":"metric","fields":{"_value":90,"config:hecRouting":true,"config:multiMetric":false,"cpu":"cpu0","metric_name":"cpu.usage_idle"}}` + expS := `{"time":0,"event":"metric","fields":{"_value":90,"cpu":"cpu0","metric_name":"cpu.usage_idle"}}` assert.Equal(t, string(expS), string(buf)) } @@ -113,7 +112,7 @@ func TestSerializeMetricBool(t *testing.T) { buf, err = s.Serialize(m) assert.NoError(t, err) - expS := `{"_value":1,"config:hecRouting":false,"config:multiMetric":false,"container-name":"telegraf-test","metric_name":"docker.oomkiller","time":0}` + expS := `{"_value":1,"container-name":"telegraf-test","metric_name":"docker.oomkiller","time":0}` assert.Equal(t, string(expS), string(buf)) } @@ -133,7 +132,7 @@ func TestSerializeMetricBoolHec(t *testing.T) { buf, err = s.Serialize(m) assert.NoError(t, err) - expS := `{"time":0,"event":"metric","fields":{"_value":0,"config:hecRouting":true,"config:multiMetric":false,"container-name":"telegraf-test","metric_name":"docker.oomkiller"}}` + expS := `{"time":0,"event":"metric","fields":{"_value":0,"container-name":"telegraf-test","metric_name":"docker.oomkiller"}}` assert.Equal(t, string(expS), string(buf)) } @@ -154,7 +153,7 @@ func TestSerializeMetricString(t *testing.T) { buf, err = s.Serialize(m) assert.NoError(t, err) - expS := `{"_value":5,"config:hecRouting":false,"config:multiMetric":false,"cpu":"cpu0","metric_name":"cpu.usage_idle","time":0}` + expS := `{"_value":5,"cpu":"cpu0","metric_name":"cpu.usage_idle","time":0}` assert.Equal(t, string(expS), string(buf)) assert.NoError(t, err) } @@ -186,7 +185,7 @@ func TestSerializeBatch(t *testing.T) { buf, err := s.SerializeBatch(metrics) assert.NoError(t, err) - expS := `{"_value":42,"config:hecRouting":false,"config:multiMetric":false,"metric_name":"cpu.value","time":0}{"_value":92,"config:hecRouting":false,"config:multiMetric":false,"metric_name":"cpu.value","time":0}` + expS := `{"_value":42,"metric_name":"cpu.value","time":0}{"_value":92,"metric_name":"cpu.value","time":0}` assert.Equal(t, string(expS), string(buf)) } @@ -208,7 +207,7 @@ func TestSerializeMulti(t *testing.T) { buf, err := s.SerializeBatch(metrics) assert.NoError(t, err) - expS := `{"config:hecRouting":false,"config:multiMetric":true,"metric_name:cpu.system":8,"metric_name:cpu.user":42,"time":0}` + expS := `{"metric_name:cpu.system":8,"metric_name:cpu.user":42,"time":0}` assert.Equal(t, string(expS), string(buf)) } @@ -239,7 +238,7 @@ func TestSerializeBatchHec(t *testing.T) { buf, err := s.SerializeBatch(metrics) assert.NoError(t, err) - expS := `{"time":0,"event":"metric","fields":{"_value":42,"config:hecRouting":true,"config:multiMetric":false,"metric_name":"cpu.value"}}{"time":0,"event":"metric","fields":{"_value":92,"config:hecRouting":true,"config:multiMetric":false,"metric_name":"cpu.value"}}` + expS := `{"time":0,"event":"metric","fields":{"_value":42,"metric_name":"cpu.value"}}{"time":0,"event":"metric","fields":{"_value":92,"metric_name":"cpu.value"}}` assert.Equal(t, string(expS), string(buf)) } @@ -261,6 +260,6 @@ func TestSerializeMultiHec(t *testing.T) { buf, err := s.SerializeBatch(metrics) assert.NoError(t, err) - expS := `{"time":0,"event":"metric","fields":{"config:hecRouting":true,"config:multiMetric":true,"metric_name:cpu.system":8,"metric_name:cpu.usage":42}}` + expS := `{"time":0,"event":"metric","fields":{"metric_name:cpu.system":8,"metric_name:cpu.usage":42}}` assert.Equal(t, string(expS), string(buf)) } From f351e6a68f5e934c1c55ea80b42f295cf318fafc Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 4 May 2020 16:25:20 -0700 Subject: [PATCH 1734/1815] Update changelog --- CHANGELOG.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1c810cdea..a1a6a722e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,10 @@ - The `logparser` input is deprecated, use the `tail` input with `data_format = "grok"` as a replacement. +- Several fields used primarily for debugging have been removed from the + `splunkmetric` serializer, if you are making use of these fields they can be + added back with the `tag` option. + #### New Processors - [filepath](/plugins/processors/filepath/README.md) - Contributed by @kir4h @@ -31,6 +35,7 @@ - [#7371](https://github.com/influxdata/telegraf/issues/7371): Fix unable to write metrics to CloudWatch with IMDSv1 disabled. - [#7233](https://github.com/influxdata/telegraf/issues/7233): Fix vSphere 6.7 missing data issue. +- [#7448](https://github.com/influxdata/telegraf/issues/7448): Remove debug fields from spunkmetric serializer. ## v1.14.3 [unreleased] From d16485e1a39ad530a2698048713229948234e45b Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 4 May 2020 16:26:05 -0700 Subject: [PATCH 1735/1815] Fix gzip support in socket_listener with tcp sockets (#7446) --- internal/content_coding.go | 62 ++++++++++++++++++- internal/content_coding_test.go | 36 +++++++++++ .../inputs/socket_listener/socket_listener.go | 33 +++++----- .../socket_listener/socket_listener_test.go | 6 +- 4 files changed, 113 insertions(+), 24 deletions(-) diff --git a/internal/content_coding.go b/internal/content_coding.go index 936dd9562..daefa20ee 100644 --- a/internal/content_coding.go +++ b/internal/content_coding.go @@ -1,18 +1,78 @@ package internal import ( + "bufio" "bytes" "compress/gzip" "errors" "io" ) +// NewStreamContentDecoder returns a reader that will decode the stream +// according to the encoding type. +func NewStreamContentDecoder(encoding string, r io.Reader) (io.Reader, error) { + switch encoding { + case "gzip": + return NewGzipReader(r) + case "identity", "": + return r, nil + default: + return nil, errors.New("invalid value for content_encoding") + } +} + +// GzipReader is similar to gzip.Reader but reads only a single gzip stream per read. +type GzipReader struct { + r io.Reader + z *gzip.Reader + endOfStream bool +} + +func NewGzipReader(r io.Reader) (io.Reader, error) { + // We need a read that implements ByteReader in order to line up the next + // stream. + br := bufio.NewReader(r) + + // Reads the first gzip stream header. + z, err := gzip.NewReader(br) + if err != nil { + return nil, err + } + + // Prevent future calls to Read from reading the following gzip header. + z.Multistream(false) + + return &GzipReader{r: br, z: z}, nil +} + +func (r *GzipReader) Read(b []byte) (int, error) { + if r.endOfStream { + // Reads the next gzip header and prepares for the next stream. + err := r.z.Reset(r.r) + if err != nil { + return 0, err + } + r.z.Multistream(false) + r.endOfStream = false + } + + n, err := r.z.Read(b) + + // Since multistream is disabled, io.EOF indicates the end of the gzip + // sequence. On the next read we must read the next gzip header. + if err == io.EOF { + r.endOfStream = true + return n, nil + } + return n, err + +} + // NewContentEncoder returns a ContentEncoder for the encoding type. func NewContentEncoder(encoding string) (ContentEncoder, error) { switch encoding { case "gzip": return NewGzipEncoder() - case "identity", "": return NewIdentityEncoder(), nil default: diff --git a/internal/content_coding_test.go b/internal/content_coding_test.go index 031633112..85496df59 100644 --- a/internal/content_coding_test.go +++ b/internal/content_coding_test.go @@ -1,6 +1,8 @@ package internal import ( + "bytes" + "io/ioutil" "testing" "github.com/stretchr/testify/require" @@ -56,3 +58,37 @@ func TestIdentityEncodeDecode(t *testing.T) { require.Equal(t, "howdy", string(actual)) } + +func TestStreamIdentityDecode(t *testing.T) { + var r bytes.Buffer + n, err := r.Write([]byte("howdy")) + require.NoError(t, err) + require.Equal(t, 5, n) + + dec, err := NewStreamContentDecoder("identity", &r) + require.NoError(t, err) + + data, err := ioutil.ReadAll(dec) + require.NoError(t, err) + + require.Equal(t, []byte("howdy"), data) +} + +func TestStreamGzipDecode(t *testing.T) { + enc, err := NewGzipEncoder() + require.NoError(t, err) + written, err := enc.Encode([]byte("howdy")) + require.NoError(t, err) + + w := bytes.NewBuffer(written) + + dec, err := NewStreamContentDecoder("gzip", w) + require.NoError(t, err) + + b := make([]byte, 10) + n, err := dec.Read(b) + require.NoError(t, err) + require.Equal(t, 5, n) + + require.Equal(t, []byte("howdy"), b[:n]) +} diff --git a/plugins/inputs/socket_listener/socket_listener.go b/plugins/inputs/socket_listener/socket_listener.go index b1e933851..d79030f66 100644 --- a/plugins/inputs/socket_listener/socket_listener.go +++ b/plugins/inputs/socket_listener/socket_listener.go @@ -111,7 +111,12 @@ func (ssl *streamSocketListener) read(c net.Conn) { defer ssl.removeConnection(c) defer c.Close() - scnr := bufio.NewScanner(c) + decoder, err := internal.NewStreamContentDecoder(ssl.ContentEncoding, c) + if err != nil { + ssl.Log.Error("Read error: %v", err) + } + + scnr := bufio.NewScanner(decoder) for { if ssl.ReadTimeout != nil && ssl.ReadTimeout.Duration > 0 { c.SetReadDeadline(time.Now().Add(ssl.ReadTimeout.Duration)) @@ -120,11 +125,7 @@ func (ssl *streamSocketListener) read(c net.Conn) { break } - body, err := ssl.decoder.Decode(scnr.Bytes()) - if err != nil { - ssl.Log.Errorf("Unable to decode incoming line: %s", err.Error()) - continue - } + body := scnr.Bytes() metrics, err := ssl.Parse(body) if err != nil { @@ -149,6 +150,7 @@ func (ssl *streamSocketListener) read(c net.Conn) { type packetSocketListener struct { net.PacketConn *SocketListener + decoder internal.ContentDecoder } func (psl *packetSocketListener) listen() { @@ -196,7 +198,6 @@ type SocketListener struct { parsers.Parser telegraf.Accumulator io.Closer - decoder internal.ContentDecoder } func (sl *SocketListener) Description() string { @@ -283,12 +284,6 @@ func (sl *SocketListener) Start(acc telegraf.Accumulator) error { protocol := spl[0] addr := spl[1] - var err error - sl.decoder, err = internal.NewContentDecoder(sl.ContentEncoding) - if err != nil { - return err - } - if protocol == "unix" || protocol == "unixpacket" || protocol == "unixgram" { // no good way of testing for "file does not exist". // Instead just ignore error and blow up when we try to listen, which will @@ -298,16 +293,12 @@ func (sl *SocketListener) Start(acc telegraf.Accumulator) error { switch protocol { case "tcp", "tcp4", "tcp6", "unix", "unixpacket": - var ( - err error - l net.Listener - ) - tlsCfg, err := sl.ServerConfig.TLSConfig() if err != nil { return err } + var l net.Listener if tlsCfg == nil { l, err = net.Listen(protocol, addr) } else { @@ -344,6 +335,11 @@ func (sl *SocketListener) Start(acc telegraf.Accumulator) error { ssl.listen() }() case "udp", "udp4", "udp6", "ip", "ip4", "ip6", "unixgram": + decoder, err := internal.NewContentDecoder(sl.ContentEncoding) + if err != nil { + return err + } + pc, err := udpListen(protocol, addr) if err != nil { return err @@ -373,6 +369,7 @@ func (sl *SocketListener) Start(acc telegraf.Accumulator) error { psl := &packetSocketListener{ PacketConn: pc, SocketListener: sl, + decoder: decoder, } sl.Closer = psl diff --git a/plugins/inputs/socket_listener/socket_listener_test.go b/plugins/inputs/socket_listener/socket_listener_test.go index c6adf4cde..a46add15c 100644 --- a/plugins/inputs/socket_listener/socket_listener_test.go +++ b/plugins/inputs/socket_listener/socket_listener_test.go @@ -222,7 +222,7 @@ func TestSocketListenerDecode_udp(t *testing.T) { func testSocketListener(t *testing.T, sl *SocketListener, client net.Conn) { mstr12 := []byte("test,foo=bar v=1i 123456789\ntest,foo=baz v=2i 123456790\n") - mstr3 := []byte("test,foo=zab v=3i 123456791") + mstr3 := []byte("test,foo=zab v=3i 123456791\n") if sl.ContentEncoding == "gzip" { encoder, err := internal.NewContentEncoder(sl.ContentEncoding) @@ -238,10 +238,6 @@ func testSocketListener(t *testing.T, sl *SocketListener, client net.Conn) { client.Write(mstr12) client.Write(mstr3) - if client.LocalAddr().Network() != "udp" { - // stream connection. needs trailing newline to terminate mstr3 - client.Write([]byte{'\n'}) - } acc := sl.Accumulator.(*testutil.Accumulator) From 8ee12d07a1832b68215410e983cd6675d6785cce Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 4 May 2020 16:27:03 -0700 Subject: [PATCH 1736/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index a1a6a722e..991dca8af 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -36,6 +36,7 @@ - [#7371](https://github.com/influxdata/telegraf/issues/7371): Fix unable to write metrics to CloudWatch with IMDSv1 disabled. - [#7233](https://github.com/influxdata/telegraf/issues/7233): Fix vSphere 6.7 missing data issue. - [#7448](https://github.com/influxdata/telegraf/issues/7448): Remove debug fields from spunkmetric serializer. +- [#7446](https://github.com/influxdata/telegraf/issues/7446): Fix gzip support in socket_listener with tcp sockets. ## v1.14.3 [unreleased] From cc927357a44b3bacab39260625eea405b089b6fc Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Tue, 5 May 2020 10:14:57 -0400 Subject: [PATCH 1737/1815] shim improvements for docs, clean quit, and slow readers (#7452) --- ...legraf_notwindows.go => telegraf_posix.go} | 0 plugins/inputs/execd/README.md | 21 +++-- plugins/inputs/execd/execd.go | 2 +- plugins/inputs/execd/execd_posix.go | 5 ++ plugins/inputs/execd/execd_test.go | 3 +- plugins/inputs/execd/execd_windows.go | 5 ++ plugins/inputs/execd/shim/goshim.go | 82 ++++++++++++------- .../inputs/execd/shim/goshim_notwindows.go | 14 ---- plugins/inputs/execd/shim/goshim_posix.go | 23 ++++++ plugins/inputs/execd/shim/goshim_windows.go | 11 ++- plugins/inputs/execd/shim/shim_posix_test.go | 38 ++++----- plugins/inputs/execd/shim/shim_test.go | 59 ++++++------- 12 files changed, 159 insertions(+), 104 deletions(-) rename cmd/telegraf/{telegraf_notwindows.go => telegraf_posix.go} (100%) delete mode 100644 plugins/inputs/execd/shim/goshim_notwindows.go create mode 100644 plugins/inputs/execd/shim/goshim_posix.go diff --git a/cmd/telegraf/telegraf_notwindows.go b/cmd/telegraf/telegraf_posix.go similarity index 100% rename from cmd/telegraf/telegraf_notwindows.go rename to cmd/telegraf/telegraf_posix.go diff --git a/plugins/inputs/execd/README.md b/plugins/inputs/execd/README.md index 022311924..f8709c8be 100644 --- a/plugins/inputs/execd/README.md +++ b/plugins/inputs/execd/README.md @@ -1,9 +1,13 @@ # Execd Input Plugin -The `execd` plugin runs an external program as a daemon. The programs must output metrics in any one of the accepted -[Input Data Formats](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md) on its standard output. +The `execd` plugin runs an external program as a long-running daemon. +The programs must output metrics in any one of the accepted +[Input Data Formats](input_formats) on the process's STDOUT, and is expected to +stay running. If you'd instead like the process to collect metrics and then exit, +check out the [inputs.exec](exec_plugin) plugin. -The `signal` can be configured to send a signal the running daemon on each collection interval. +The `signal` can be configured to send a signal the running daemon on each +collection interval. Program output on standard error is mirrored to the telegraf log. @@ -16,10 +20,10 @@ Program output on standard error is mirrored to the telegraf log. ## Define how the process is signaled on each collection interval. ## Valid values are: - ## "none" : Do not signal anything. - ## The process must output metrics by itself. - ## "STDIN" : Send a newline on STDIN. - ## "SIGHUP" : Send a HUP signal. Not available on Windows. + ## "none" : Do not signal anything. (Recommended for service inputs) + ## The process must output metrics by itself. + ## "STDIN" : Send a newline on STDIN. (Recommended for gather inputs) + ## "SIGHUP" : Send a HUP signal. Not available on Windows. (not recommended) ## "SIGUSR1" : Send a USR1 signal. Not available on Windows. ## "SIGUSR2" : Send a USR2 signal. Not available on Windows. signal = "none" @@ -110,3 +114,6 @@ end command = ["plugins/inputs/execd/examples/count.rb"] signal = "none" ``` + +[input_formats]: https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +[exec_plugin]: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/exec/README.md diff --git a/plugins/inputs/execd/execd.go b/plugins/inputs/execd/execd.go index 90a5ceffb..b162c9776 100644 --- a/plugins/inputs/execd/execd.go +++ b/plugins/inputs/execd/execd.go @@ -75,7 +75,7 @@ func (e *Execd) Start(acc telegraf.Accumulator) error { return fmt.Errorf("FATAL no command specified") } - e.wg.Add(1) + e.wg.Add(1) // for the main loop ctx, cancel := context.WithCancel(context.Background()) e.cancel = cancel diff --git a/plugins/inputs/execd/execd_posix.go b/plugins/inputs/execd/execd_posix.go index 919447260..d2389c52f 100644 --- a/plugins/inputs/execd/execd_posix.go +++ b/plugins/inputs/execd/execd_posix.go @@ -5,7 +5,9 @@ package execd import ( "fmt" "io" + "os" "syscall" + "time" "github.com/influxdata/telegraf" ) @@ -23,6 +25,9 @@ func (e *Execd) Gather(acc telegraf.Accumulator) error { case "SIGUSR2": e.cmd.Process.Signal(syscall.SIGUSR2) case "STDIN": + if osStdin, ok := e.stdin.(*os.File); ok { + osStdin.SetWriteDeadline(time.Now().Add(1 * time.Second)) + } if _, err := io.WriteString(e.stdin, "\n"); err != nil { return fmt.Errorf("Error writing to stdin: %s", err) } diff --git a/plugins/inputs/execd/execd_test.go b/plugins/inputs/execd/execd_test.go index b78075e95..1c687a9df 100644 --- a/plugins/inputs/execd/execd_test.go +++ b/plugins/inputs/execd/execd_test.go @@ -33,11 +33,12 @@ func TestExternalInputWorks(t *testing.T) { require.NoError(t, e.Start(acc)) require.NoError(t, e.Gather(acc)) - e.Stop() // grab a metric and make sure it's a thing m := readChanWithTimeout(t, metrics, 10*time.Second) + e.Stop() + require.Equal(t, "counter_bash", m.Name()) val, ok := m.GetField("count") require.True(t, ok) diff --git a/plugins/inputs/execd/execd_windows.go b/plugins/inputs/execd/execd_windows.go index 443d8f686..c0dc0e846 100644 --- a/plugins/inputs/execd/execd_windows.go +++ b/plugins/inputs/execd/execd_windows.go @@ -5,6 +5,8 @@ package execd import ( "fmt" "io" + "os" + "time" "github.com/influxdata/telegraf" ) @@ -16,6 +18,9 @@ func (e *Execd) Gather(acc telegraf.Accumulator) error { switch e.Signal { case "STDIN": + if osStdin, ok := e.stdin.(*os.File); ok { + osStdin.SetWriteDeadline(time.Now().Add(1 * time.Second)) + } if _, err := io.WriteString(e.stdin, "\n"); err != nil { return fmt.Errorf("Error writing to stdin: %s", err) } diff --git a/plugins/inputs/execd/shim/goshim.go b/plugins/inputs/execd/shim/goshim.go index cd0c4ddec..3741d2b80 100644 --- a/plugins/inputs/execd/shim/goshim.go +++ b/plugins/inputs/execd/shim/goshim.go @@ -23,9 +23,9 @@ import ( type empty struct{} var ( - gatherPromptChans []chan empty - stdout io.Writer = os.Stdout - stdin io.Reader = os.Stdin + stdout io.Writer = os.Stdout + stdin io.Reader = os.Stdin + forever = 100 * 365 * 24 * time.Hour ) const ( @@ -34,10 +34,15 @@ const ( PollIntervalDisabled = time.Duration(0) ) +// Shim allows you to wrap your inputs and run them as if they were part of Telegraf, +// except built externally. type Shim struct { - Inputs []telegraf.Input + Inputs []telegraf.Input + gatherPromptChans []chan empty + metricCh chan telegraf.Metric } +// New creates a new shim interface func New() *Shim { return &Shim{} } @@ -67,25 +72,26 @@ func (s *Shim) AddInputs(newInputs []telegraf.Input) error { // Run the input plugins.. func (s *Shim) Run(pollInterval time.Duration) error { + // context is used only to close the stdin reader. everything else cascades + // from that point and closes cleanly when it's done. + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + s.metricCh = make(chan telegraf.Metric, 1) + wg := sync.WaitGroup{} quit := make(chan os.Signal, 1) signal.Notify(quit, syscall.SIGINT, syscall.SIGTERM) collectMetricsPrompt := make(chan os.Signal, 1) - listenForCollectMetricsSignals(collectMetricsPrompt) - - wg.Add(1) // wait for the metric channel to close - metricCh := make(chan telegraf.Metric, 1) + listenForCollectMetricsSignals(ctx, collectMetricsPrompt) serializer := influx.NewSerializer() - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - for _, input := range s.Inputs { wrappedInput := inputShim{Input: input} - acc := agent.NewAccumulator(wrappedInput, metricCh) + acc := agent.NewAccumulator(wrappedInput, s.metricCh) acc.SetPrecision(time.Nanosecond) if serviceInput, ok := input.(telegraf.ServiceInput); ok { @@ -94,30 +100,35 @@ func (s *Shim) Run(pollInterval time.Duration) error { } } gatherPromptCh := make(chan empty, 1) - gatherPromptChans = append(gatherPromptChans, gatherPromptCh) + s.gatherPromptChans = append(s.gatherPromptChans, gatherPromptCh) wg.Add(1) go func(input telegraf.Input) { startGathering(ctx, input, acc, gatherPromptCh, pollInterval) if serviceInput, ok := input.(telegraf.ServiceInput); ok { serviceInput.Stop() } + close(gatherPromptCh) wg.Done() }(input) } - go stdinCollectMetricsPrompt(ctx, collectMetricsPrompt) + go s.stdinCollectMetricsPrompt(ctx, cancel, collectMetricsPrompt) + go s.closeMetricChannelWhenInputsFinish(&wg) loop: for { select { - case <-quit: + case <-quit: // user-triggered quit // cancel, but keep looping until the metric channel closes. cancel() - case <-collectMetricsPrompt: - collectMetrics(ctx) - case m, open := <-metricCh: + case _, open := <-collectMetricsPrompt: + if !open { // stdin-close-triggered quit + cancel() + continue + } + s.collectMetrics(ctx) + case m, open := <-s.metricCh: if !open { - wg.Done() break loop } b, err := serializer.Serialize(m) @@ -129,7 +140,6 @@ loop: } } - wg.Wait() return nil } @@ -142,11 +152,16 @@ func hasQuit(ctx context.Context) bool { } } -func stdinCollectMetricsPrompt(ctx context.Context, collectMetricsPrompt chan<- os.Signal) { - s := bufio.NewScanner(stdin) +func (s *Shim) stdinCollectMetricsPrompt(ctx context.Context, cancel context.CancelFunc, collectMetricsPrompt chan<- os.Signal) { + defer func() { + cancel() + close(collectMetricsPrompt) + }() + + scanner := bufio.NewScanner(stdin) // for every line read from stdin, make sure we're not supposed to quit, // then push a message on to the collectMetricsPrompt - for s.Scan() { + for scanner.Scan() { // first check if we should quit if hasQuit(ctx) { return @@ -159,7 +174,7 @@ func stdinCollectMetricsPrompt(ctx context.Context, collectMetricsPrompt chan<- // pushCollectMetricsRequest pushes a non-blocking (nil) message to the // collectMetricsPrompt channel to trigger metric collection. -// The channel is defined with a buffer of 1, so if it's full, duplicated +// The channel is defined with a buffer of 1, so while it's full, subsequent // requests are discarded. func pushCollectMetricsRequest(collectMetricsPrompt chan<- os.Signal) { select { @@ -168,14 +183,14 @@ func pushCollectMetricsRequest(collectMetricsPrompt chan<- os.Signal) { } } -func collectMetrics(ctx context.Context) { +func (s *Shim) collectMetrics(ctx context.Context) { if hasQuit(ctx) { return } - for i := 0; i < len(gatherPromptChans); i++ { + for i := 0; i < len(s.gatherPromptChans); i++ { // push a message out to each channel to collect metrics. don't block. select { - case gatherPromptChans[i] <- empty{}: + case s.gatherPromptChans[i] <- empty{}: default: } } @@ -196,7 +211,11 @@ func startGathering(ctx context.Context, input telegraf.Input, acc telegraf.Accu select { case <-ctx.Done(): return - case <-gatherPromptCh: + case _, open := <-gatherPromptCh: + if !open { + // stdin has closed. + return + } if err := input.Gather(acc); err != nil { fmt.Fprintf(os.Stderr, "failed to gather metrics: %s", err) } @@ -229,7 +248,7 @@ func DefaultImportedPlugins() (i []telegraf.Input, e error) { // LoadConfig loads the config and returns inputs that later need to be loaded. func LoadConfig(filePath *string) ([]telegraf.Input, error) { - if filePath == nil { + if filePath == nil || *filePath == "" { return DefaultImportedPlugins() } @@ -276,3 +295,8 @@ func loadConfigIntoInputs(md toml.MetaData, inputConfigs map[string][]toml.Primi } return renderedInputs, nil } + +func (s *Shim) closeMetricChannelWhenInputsFinish(wg *sync.WaitGroup) { + wg.Wait() + close(s.metricCh) +} diff --git a/plugins/inputs/execd/shim/goshim_notwindows.go b/plugins/inputs/execd/shim/goshim_notwindows.go deleted file mode 100644 index 67d41884f..000000000 --- a/plugins/inputs/execd/shim/goshim_notwindows.go +++ /dev/null @@ -1,14 +0,0 @@ -// +build !windows - -package shim - -import ( - "os" - "os/signal" - "syscall" -) - -func listenForCollectMetricsSignals(collectMetricsPrompt chan os.Signal) { - // just listen to all the signals. - signal.Notify(collectMetricsPrompt, syscall.SIGHUP, syscall.SIGUSR1, syscall.SIGUSR2) -} diff --git a/plugins/inputs/execd/shim/goshim_posix.go b/plugins/inputs/execd/shim/goshim_posix.go new file mode 100644 index 000000000..4e4a04f14 --- /dev/null +++ b/plugins/inputs/execd/shim/goshim_posix.go @@ -0,0 +1,23 @@ +// +build !windows + +package shim + +import ( + "context" + "os" + "os/signal" + "syscall" +) + +func listenForCollectMetricsSignals(ctx context.Context, collectMetricsPrompt chan os.Signal) { + // just listen to all the signals. + signal.Notify(collectMetricsPrompt, syscall.SIGHUP, syscall.SIGUSR1, syscall.SIGUSR2) + + go func() { + select { + case <-ctx.Done(): + // context done. stop to signals to avoid pushing messages to a closed channel + signal.Stop(collectMetricsPrompt) + } + }() +} diff --git a/plugins/inputs/execd/shim/goshim_windows.go b/plugins/inputs/execd/shim/goshim_windows.go index a6bfd1ede..317f8a2f3 100644 --- a/plugins/inputs/execd/shim/goshim_windows.go +++ b/plugins/inputs/execd/shim/goshim_windows.go @@ -3,11 +3,20 @@ package shim import ( + "context" "os" "os/signal" "syscall" ) -func listenForCollectMetricsSignals(collectMetricsPrompt chan os.Signal) { +func listenForCollectMetricsSignals(ctx context.Context, collectMetricsPrompt chan os.Signal) { signal.Notify(collectMetricsPrompt, syscall.SIGHUP) + + go func() { + select { + case <-ctx.Done(): + // context done. stop to signals to avoid pushing messages to a closed channel + signal.Stop(collectMetricsPrompt) + } + }() } diff --git a/plugins/inputs/execd/shim/shim_posix_test.go b/plugins/inputs/execd/shim/shim_posix_test.go index 85053130f..de549cc3c 100644 --- a/plugins/inputs/execd/shim/shim_posix_test.go +++ b/plugins/inputs/execd/shim/shim_posix_test.go @@ -3,11 +3,11 @@ package shim import ( - "bytes" + "bufio" "context" + "io" "os" "runtime" - "strings" "syscall" "testing" "time" @@ -20,15 +20,15 @@ func TestShimUSR1SignalingWorks(t *testing.T) { t.Skip() return } - stdoutBytes := bytes.NewBufferString("") - stdout = stdoutBytes + stdinReader, stdinWriter := io.Pipe() + stdoutReader, stdoutWriter := io.Pipe() + + stdin = stdinReader + stdout = stdoutWriter ctx, cancel := context.WithCancel(context.Background()) defer cancel() - wait := runInputPlugin(t, 40*time.Second) - - // sleep a bit to avoid a race condition where the input hasn't loaded yet. - time.Sleep(10 * time.Millisecond) + metricProcessed, exited := runInputPlugin(t, 20*time.Minute) // signal USR1 to yourself. pid := os.Getpid() @@ -54,23 +54,17 @@ func TestShimUSR1SignalingWorks(t *testing.T) { timeout := time.NewTimer(10 * time.Second) select { - case <-wait: + case <-metricProcessed: case <-timeout.C: require.Fail(t, "Timeout waiting for metric to arrive") } + cancel() - for stdoutBytes.Len() == 0 { - select { - case <-timeout.C: - require.Fail(t, "Timeout waiting to read metric from stdout") - return - default: - time.Sleep(10 * time.Millisecond) - } - } + r := bufio.NewReader(stdoutReader) + out, err := r.ReadString('\n') + require.NoError(t, err) + require.Equal(t, "measurement,tag=tag field=1i 1234000005678\n", out) - out := string(stdoutBytes.Bytes()) - require.Contains(t, out, "\n") - metricLine := strings.Split(out, "\n")[0] - require.Equal(t, "measurement,tag=tag field=1i 1234000005678", metricLine) + stdinWriter.Close() + <-exited } diff --git a/plugins/inputs/execd/shim/shim_test.go b/plugins/inputs/execd/shim/shim_test.go index 9d97bd239..498ef4ab5 100644 --- a/plugins/inputs/execd/shim/shim_test.go +++ b/plugins/inputs/execd/shim/shim_test.go @@ -1,7 +1,9 @@ package shim import ( + "bufio" "bytes" + "io" "strings" "testing" "time" @@ -15,11 +17,13 @@ func TestShimWorks(t *testing.T) { stdoutBytes := bytes.NewBufferString("") stdout = stdoutBytes + stdin, _ = io.Pipe() // hold the stdin pipe open + timeout := time.NewTimer(10 * time.Second) - wait := runInputPlugin(t, 10*time.Millisecond) + metricProcessed, _ := runInputPlugin(t, 10*time.Millisecond) select { - case <-wait: + case <-metricProcessed: case <-timeout.C: require.Fail(t, "Timeout waiting for metric to arrive") } @@ -40,55 +44,52 @@ func TestShimWorks(t *testing.T) { } func TestShimStdinSignalingWorks(t *testing.T) { - stdoutBytes := bytes.NewBufferString("") - stdout = stdoutBytes - stdinBytes := bytes.NewBufferString("") - stdin = stdinBytes + stdinReader, stdinWriter := io.Pipe() + stdoutReader, stdoutWriter := io.Pipe() + + stdin = stdinReader + stdout = stdoutWriter timeout := time.NewTimer(10 * time.Second) - wait := runInputPlugin(t, 40*time.Second) + metricProcessed, exited := runInputPlugin(t, 40*time.Second) - stdinBytes.WriteString("\n") + stdinWriter.Write([]byte("\n")) select { - case <-wait: + case <-metricProcessed: case <-timeout.C: require.Fail(t, "Timeout waiting for metric to arrive") } - for stdoutBytes.Len() == 0 { - select { - case <-timeout.C: - require.Fail(t, "Timeout waiting to read metric from stdout") - return - default: - time.Sleep(10 * time.Millisecond) - } - } + r := bufio.NewReader(stdoutReader) + out, err := r.ReadString('\n') + require.NoError(t, err) + require.Equal(t, "measurement,tag=tag field=1i 1234000005678\n", out) - out := string(stdoutBytes.Bytes()) - require.Contains(t, out, "\n") - metricLine := strings.Split(out, "\n")[0] - require.Equal(t, "measurement,tag=tag field=1i 1234000005678", metricLine) + stdinWriter.Close() + // check that it exits cleanly + <-exited } -func runInputPlugin(t *testing.T, timeout time.Duration) chan bool { - wait := make(chan bool) +func runInputPlugin(t *testing.T, interval time.Duration) (metricProcessed chan bool, exited chan bool) { + metricProcessed = make(chan bool) + exited = make(chan bool) inp := &testInput{ - wait: wait, + metricProcessed: metricProcessed, } shim := New() shim.AddInput(inp) go func() { - err := shim.Run(timeout) // we aren't using the timer here + err := shim.Run(interval) require.NoError(t, err) + exited <- true }() - return wait + return metricProcessed, exited } type testInput struct { - wait chan bool + metricProcessed chan bool } func (i *testInput) SampleConfig() string { @@ -107,7 +108,7 @@ func (i *testInput) Gather(acc telegraf.Accumulator) error { map[string]string{ "tag": "tag", }, time.Unix(1234, 5678)) - i.wait <- true + i.metricProcessed <- true return nil } From 53c86890afc139fea3fd209c89cbb7891b1710fb Mon Sep 17 00:00:00 2001 From: Giovanni Luisotto Date: Tue, 5 May 2020 19:56:03 +0200 Subject: [PATCH 1738/1815] Rename measurement to sqlserver_volume_space (#7457) --- plugins/inputs/sqlserver/sqlserver.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/inputs/sqlserver/sqlserver.go b/plugins/inputs/sqlserver/sqlserver.go index 7c8778e10..6214ee237 100644 --- a/plugins/inputs/sqlserver/sqlserver.go +++ b/plugins/inputs/sqlserver/sqlserver.go @@ -1577,7 +1577,7 @@ EngineEdition: IF SERVERPROPERTY('EngineEdition') NOT IN (5,8) BEGIN SELECT DISTINCT - 'sqlserver_disk_space' AS [measurement] + 'sqlserver_volume_space' AS [measurement] ,SERVERPROPERTY('machinename') AS [server_name] ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance] ,IIF( RIGHT(vs.[volume_mount_point],1) = '\' /*Tag value cannot end with \ */ From f25936b79630446c48e6fa67ab1a2c2aca0aa44d Mon Sep 17 00:00:00 2001 From: Alex Samorukov Date: Tue, 5 May 2020 20:12:21 +0200 Subject: [PATCH 1739/1815] Add information about HEC JSON format limitations and workaround (#7459) --- plugins/serializers/splunkmetric/README.md | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/plugins/serializers/splunkmetric/README.md b/plugins/serializers/splunkmetric/README.md index 96049e45f..acd497dbc 100644 --- a/plugins/serializers/splunkmetric/README.md +++ b/plugins/serializers/splunkmetric/README.md @@ -165,3 +165,22 @@ An example configuration of a file based output is: splunkmetric_hec_routing = false splunkmetric_multimetric = true ``` + +## Non-numeric metric values + +Splunk supports only numeric field values, so serializer would silently drop metrics with the string values. For some cases it is possible to workaround using ENUM processor. Example, provided below doing this for the `docker_container_health.health_status` metric: + +```toml +# splunkmetric does not support sting values +[[processors.enum]] + namepass = ["docker_container_health"] + [[processors.enum.mapping]] + ## Name of the field to map + field = "health_status" + [processors.enum.mapping.value_mappings] + starting = 0 + healthy = 1 + unhealthy = 2 + none = 3 +``` + From 022ff63d29c30709c1bcd4efad1db80059775e47 Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Tue, 5 May 2020 17:43:45 -0400 Subject: [PATCH 1740/1815] fix issue with execd-multiline influx line protocol (#7463) --- plugins/inputs/execd/execd.go | 30 +++++++++++++++++++ plugins/inputs/execd/execd_test.go | 47 ++++++++++++++++++++++++++++++ 2 files changed, 77 insertions(+) diff --git a/plugins/inputs/execd/execd.go b/plugins/inputs/execd/execd.go index b162c9776..1ea136a3d 100644 --- a/plugins/inputs/execd/execd.go +++ b/plugins/inputs/execd/execd.go @@ -15,6 +15,7 @@ import ( "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/parsers" + "github.com/influxdata/telegraf/plugins/parsers/influx" ) const sampleConfig = ` @@ -197,6 +198,12 @@ func (e *Execd) cmdWait() error { } func (e *Execd) cmdReadOut(out io.Reader) { + if _, isInfluxParser := e.parser.(*influx.Parser); isInfluxParser { + // work around the lack of built-in streaming parser. :( + e.cmdReadOutStream(out) + return + } + scanner := bufio.NewScanner(out) for scanner.Scan() { @@ -215,6 +222,29 @@ func (e *Execd) cmdReadOut(out io.Reader) { } } +func (e *Execd) cmdReadOutStream(out io.Reader) { + parser := influx.NewStreamParser(out) + + for { + metric, err := parser.Next() + if err != nil { + if err == influx.EOF { + break // stream ended + } + if parseErr, isParseError := err.(*influx.ParseError); isParseError { + // parse error. + e.acc.AddError(parseErr) + continue + } + // some non-recoverable error? + e.acc.AddError(err) + return + } + + e.acc.AddMetric(metric) + } +} + func (e *Execd) cmdReadErr(out io.Reader) { scanner := bufio.NewScanner(out) diff --git a/plugins/inputs/execd/execd_test.go b/plugins/inputs/execd/execd_test.go index 1c687a9df..52c0a214b 100644 --- a/plugins/inputs/execd/execd_test.go +++ b/plugins/inputs/execd/execd_test.go @@ -3,6 +3,8 @@ package execd import ( + "fmt" + "strings" "testing" "time" @@ -47,6 +49,51 @@ func TestExternalInputWorks(t *testing.T) { e.Gather(acc) } +func TestParsesLinesContainingNewline(t *testing.T) { + parser, err := parsers.NewInfluxParser() + require.NoError(t, err) + + metrics := make(chan telegraf.Metric, 10) + defer close(metrics) + acc := agent.NewAccumulator(&TestMetricMaker{}, metrics) + + e := &Execd{ + Command: []string{shell(), fileShellScriptPath()}, + RestartDelay: config.Duration(5 * time.Second), + parser: parser, + Signal: "STDIN", + acc: acc, + } + + cases := []struct { + Name string + Value string + }{ + { + Name: "no-newline", + Value: "my message", + }, { + Name: "newline", + Value: "my\nmessage", + }, + } + + for _, test := range cases { + t.Run(test.Name, func(t *testing.T) { + line := fmt.Sprintf("event message=\"%v\" 1587128639239000000", test.Value) + + e.cmdReadOut(strings.NewReader(line)) + + m := readChanWithTimeout(t, metrics, 1*time.Second) + + require.Equal(t, "event", m.Name()) + val, ok := m.GetField("message") + require.True(t, ok) + require.Equal(t, test.Value, val) + }) + } +} + func readChanWithTimeout(t *testing.T, metrics chan telegraf.Metric, timeout time.Duration) telegraf.Metric { to := time.NewTimer(timeout) defer to.Stop() From 0924ad2668153c3b1cb28cc90e307669626cc59a Mon Sep 17 00:00:00 2001 From: i-prudnikov Date: Wed, 6 May 2020 21:20:44 +0300 Subject: [PATCH 1741/1815] Use docker log timestamp as metric time (#7434) --- plugins/inputs/docker_log/docker_log.go | 53 ++++++++++++++------ plugins/inputs/docker_log/docker_log_test.go | 20 ++++++-- 2 files changed, 53 insertions(+), 20 deletions(-) diff --git a/plugins/inputs/docker_log/docker_log.go b/plugins/inputs/docker_log/docker_log.go index 7cb2d94be..cf5960b81 100644 --- a/plugins/inputs/docker_log/docker_log.go +++ b/plugins/inputs/docker_log/docker_log.go @@ -2,8 +2,10 @@ package docker_log import ( "bufio" + "bytes" "context" "crypto/tls" + "fmt" "io" "strings" "sync" @@ -287,7 +289,7 @@ func (d *DockerLogs) tailContainerLogs( logOptions := types.ContainerLogsOptions{ ShowStdout: true, ShowStderr: true, - Timestamps: false, + Timestamps: true, Details: false, Follow: true, Tail: tail, @@ -311,6 +313,30 @@ func (d *DockerLogs) tailContainerLogs( } } +func parseLine(line []byte) (time.Time, string, error) { + parts := bytes.SplitN(line, []byte(" "), 2) + + switch len(parts) { + case 1: + parts = append(parts, []byte("")) + } + + tsString := string(parts[0]) + + // Keep any leading space, but remove whitespace from end of line. + // This preserves space in, for example, stacktraces, while removing + // annoying end of line characters and is similar to how other logging + // plugins such as syslog behave. + message := bytes.TrimRightFunc(parts[1], unicode.IsSpace) + + ts, err := time.Parse(time.RFC3339Nano, tsString) + if err != nil { + return time.Time{}, "", fmt.Errorf("error parsing timestamp %q: %v", tsString, err) + } + + return ts, string(message), nil +} + func tailStream( acc telegraf.Accumulator, baseTags map[string]string, @@ -328,22 +354,19 @@ func tailStream( r := bufio.NewReaderSize(reader, 64*1024) - var err error - var message string for { - message, err = r.ReadString('\n') + line, err := r.ReadBytes('\n') - // Keep any leading space, but remove whitespace from end of line. - // This preserves space in, for example, stacktraces, while removing - // annoying end of line characters and is similar to how other logging - // plugins such as syslog behave. - message = strings.TrimRightFunc(message, unicode.IsSpace) - - if len(message) != 0 { - acc.AddFields("docker_log", map[string]interface{}{ - "container_id": containerID, - "message": message, - }, tags) + if len(line) != 0 { + ts, message, err := parseLine(line) + if err != nil { + acc.AddError(err) + } else { + acc.AddFields("docker_log", map[string]interface{}{ + "container_id": containerID, + "message": message, + }, tags, ts) + } } if err != nil { diff --git a/plugins/inputs/docker_log/docker_log_test.go b/plugins/inputs/docker_log/docker_log_test.go index 11cf0befd..c8903c9d8 100644 --- a/plugins/inputs/docker_log/docker_log_test.go +++ b/plugins/inputs/docker_log/docker_log_test.go @@ -53,6 +53,14 @@ func (r *Response) Close() error { return nil } +func MustParse(layout, value string) time.Time { + tm, err := time.Parse(layout, value) + if err != nil { + panic(err) + } + return tm +} + func Test(t *testing.T) { tests := []struct { name string @@ -87,7 +95,7 @@ func Test(t *testing.T) { }, nil }, ContainerLogsF: func(ctx context.Context, containerID string, options types.ContainerLogsOptions) (io.ReadCloser, error) { - return &Response{Reader: bytes.NewBuffer([]byte("hello\n"))}, nil + return &Response{Reader: bytes.NewBuffer([]byte("2020-04-28T18:43:16.432691200Z hello\n"))}, nil }, }, expected: []telegraf.Metric{ @@ -104,7 +112,7 @@ func Test(t *testing.T) { "container_id": "deadbeef", "message": "hello", }, - time.Now(), + MustParse(time.RFC3339Nano, "2020-04-28T18:43:16.432691200Z"), ), }, }, @@ -130,7 +138,7 @@ func Test(t *testing.T) { ContainerLogsF: func(ctx context.Context, containerID string, options types.ContainerLogsOptions) (io.ReadCloser, error) { var buf bytes.Buffer w := stdcopy.NewStdWriter(&buf, stdcopy.Stdout) - w.Write([]byte("hello from stdout")) + w.Write([]byte("2020-04-28T18:42:16.432691200Z hello from stdout")) return &Response{Reader: &buf}, nil }, }, @@ -148,7 +156,7 @@ func Test(t *testing.T) { "container_id": "deadbeef", "message": "hello from stdout", }, - time.Now(), + MustParse(time.RFC3339Nano, "2020-04-28T18:42:16.432691200Z"), ), }, }, @@ -172,7 +180,9 @@ func Test(t *testing.T) { acc.Wait(len(tt.expected)) plugin.Stop() - testutil.RequireMetricsEqual(t, tt.expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime()) + require.Nil(t, acc.Errors) // no errors during gathering + + testutil.RequireMetricsEqual(t, tt.expected, acc.GetTelegrafMetrics()) }) } } From 283a16316aea3e4447922bd6cfcef427f0773a56 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 6 May 2020 11:25:21 -0700 Subject: [PATCH 1742/1815] Update datadog output documentation (#7467) --- plugins/outputs/datadog/README.md | 31 +++++++++++++++++++++++++----- plugins/outputs/datadog/datadog.go | 8 ++++---- 2 files changed, 30 insertions(+), 9 deletions(-) diff --git a/plugins/outputs/datadog/README.md b/plugins/outputs/datadog/README.md index 0563d6444..ad1c7a025 100644 --- a/plugins/outputs/datadog/README.md +++ b/plugins/outputs/datadog/README.md @@ -1,9 +1,30 @@ # Datadog Output Plugin -This plugin writes to the [Datadog Metrics API](http://docs.datadoghq.com/api/#metrics) -and requires an `apikey` which can be obtained [here](https://app.datadoghq.com/account/settings#api) -for the account. +This plugin writes to the [Datadog Metrics API][metrics] and requires an +`apikey` which can be obtained [here][apikey] for the account. -If the point value being sent cannot be converted to a float64, the metric is skipped. -Metrics are grouped by converting any `_` characters to `.` in the Point Name. \ No newline at end of file +### Configuration + +```toml +[[outputs.datadog]] + ## Datadog API key + apikey = "my-secret-key" + + ## Connection timeout. + # timeout = "5s" + + ## Write URL override; useful for debugging. + # url = "https://app.datadoghq.com/api/v1/series" +``` + +### Metrics + +Datadog metric names are formed by joining the Telegraf metric name and the field +key with a `.` character. + +Field values are converted to floating point numbers. Strings and floats that +cannot be sent over JSON, namely NaN and Inf, are ignored. + +[metrics]: https://docs.datadoghq.com/api/v1/metrics/#submit-metrics +[apikey]: https://app.datadoghq.com/account/settings#api diff --git a/plugins/outputs/datadog/datadog.go b/plugins/outputs/datadog/datadog.go index 736570726..2d1a93788 100644 --- a/plugins/outputs/datadog/datadog.go +++ b/plugins/outputs/datadog/datadog.go @@ -25,13 +25,13 @@ type Datadog struct { var sampleConfig = ` ## Datadog API key - apikey = "my-secret-key" # required. - - # The base endpoint URL can optionally be specified but it defaults to: - #url = "https://app.datadoghq.com/api/v1/series" + apikey = "my-secret-key" ## Connection timeout. # timeout = "5s" + + ## Write URL override; useful for debugging. + # url = "https://app.datadoghq.com/api/v1/series" ` type TimeSeries struct { From c8dbf13fc1192cfadd07cc71928e54b1fe403b4d Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 6 May 2020 11:40:59 -0700 Subject: [PATCH 1743/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 991dca8af..a987c76e3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -30,6 +30,7 @@ - [#7356](https://github.com/influxdata/telegraf/pull/7356): Add option to save retention policy as tag in influxdb_listener. - [#6915](https://github.com/influxdata/telegraf/pull/6915): Add support for MDS and RGW sockets to ceph input. - [#7391](https://github.com/influxdata/telegraf/pull/7391): Extract target as a tag for each rule in iptables input. +- [#7434](https://github.com/influxdata/telegraf/pull/7434): Use docker log timestamp as metric time. #### Bugfixes From fd76c8bf21bc956ba898b8e2dc04771bdfe64e65 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 6 May 2020 11:59:16 -0700 Subject: [PATCH 1744/1815] Rework plugin tickers to prevent drift and spread write ticks (#7390) --- agent/agent.go | 99 ++++-------- agent/tick.go | 265 ++++++++++++++++++++++++++++---- agent/tick_test.go | 251 ++++++++++++++++++++++++++++++ docs/LICENSE_OF_DEPENDENCIES.md | 1 + go.mod | 1 + go.sum | 2 + internal/internal.go | 15 +- 7 files changed, 530 insertions(+), 104 deletions(-) create mode 100644 agent/tick_test.go diff --git a/agent/agent.go b/agent/agent.go index b68c55d13..9ac51471a 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -265,57 +265,45 @@ func (a *Agent) runInputs( interval = input.Config.Interval } + var ticker Ticker + if a.Config.Agent.RoundInterval { + ticker = NewAlignedTicker(startTime, interval, jitter) + } else { + ticker = NewUnalignedTicker(interval, jitter) + } + defer ticker.Stop() + acc := NewAccumulator(input, dst) acc.SetPrecision(a.Precision()) wg.Add(1) go func(input *models.RunningInput) { defer wg.Done() - - if a.Config.Agent.RoundInterval { - err := internal.SleepContext( - ctx, internal.AlignDuration(startTime, interval)) - if err != nil { - return - } - } - - a.gatherOnInterval(ctx, acc, input, interval, jitter) + a.gatherLoop(ctx, acc, input, ticker) }(input) } - wg.Wait() + wg.Wait() return nil } // gather runs an input's gather function periodically until the context is // done. -func (a *Agent) gatherOnInterval( +func (a *Agent) gatherLoop( ctx context.Context, acc telegraf.Accumulator, input *models.RunningInput, - interval time.Duration, - jitter time.Duration, + ticker Ticker, ) { defer panicRecover(input) - ticker := time.NewTicker(interval) - defer ticker.Stop() - for { - err := internal.SleepContext(ctx, internal.RandomDuration(jitter)) - if err != nil { - return - } - - err = a.gatherOnce(acc, input, interval) - if err != nil { - acc.AddError(err) - } - select { - case <-ticker.C: - continue + case <-ticker.Elapsed(): + err := a.gatherOnce(acc, input, ticker) + if err != nil { + acc.AddError(err) + } case <-ctx.Done(): return } @@ -327,11 +315,8 @@ func (a *Agent) gatherOnInterval( func (a *Agent) gatherOnce( acc telegraf.Accumulator, input *models.RunningInput, - timeout time.Duration, + ticker Ticker, ) error { - ticker := time.NewTicker(timeout) - defer ticker.Stop() - done := make(chan error) go func() { done <- input.Gather(acc) @@ -341,7 +326,7 @@ func (a *Agent) gatherOnce( select { case err := <-done: return err - case <-ticker.C: + case <-ticker.Elapsed(): log.Printf("W! [agent] [%s] did not complete within its interval", input.LogName()) } @@ -514,10 +499,13 @@ func (a *Agent) runOutputs( jitter = *output.Config.FlushJitter } + ticker := NewRollingTicker(interval, jitter) + defer ticker.Stop() + wg.Add(1) go func(output *models.RunningOutput) { defer wg.Done() - a.flushLoop(ctx, startTime, output, interval, jitter) + a.flushLoop(ctx, output, ticker) }(output) } @@ -542,10 +530,8 @@ func (a *Agent) runOutputs( // done. func (a *Agent) flushLoop( ctx context.Context, - startTime time.Time, output *models.RunningOutput, - interval time.Duration, - jitter time.Duration, + ticker Ticker, ) { logError := func(err error) { if err != nil { @@ -558,44 +544,30 @@ func (a *Agent) flushLoop( watchForFlushSignal(flushRequested) defer stopListeningForFlushSignal(flushRequested) - // align to round interval - if a.Config.Agent.RoundInterval { - err := internal.SleepContext( - ctx, internal.AlignDuration(startTime, interval)) - if err != nil { - return - } - } - - // since we are watching two channels we need a ticker with the jitter - // integrated. - ticker := NewTicker(interval, jitter) - defer ticker.Stop() - for { // Favor shutdown over other methods. select { case <-ctx.Done(): - logError(a.flushOnce(output, interval, output.Write)) + logError(a.flushOnce(output, ticker, output.Write)) return default: } select { case <-ctx.Done(): - logError(a.flushOnce(output, interval, output.Write)) + logError(a.flushOnce(output, ticker, output.Write)) return - case <-ticker.C: - logError(a.flushOnce(output, interval, output.Write)) + case <-ticker.Elapsed(): + logError(a.flushOnce(output, ticker, output.Write)) case <-flushRequested: - logError(a.flushOnce(output, interval, output.Write)) + logError(a.flushOnce(output, ticker, output.Write)) case <-output.BatchReady: // Favor the ticker over batch ready select { - case <-ticker.C: - logError(a.flushOnce(output, interval, output.Write)) + case <-ticker.Elapsed(): + logError(a.flushOnce(output, ticker, output.Write)) default: - logError(a.flushOnce(output, interval, output.WriteBatch)) + logError(a.flushOnce(output, ticker, output.WriteBatch)) } } } @@ -605,12 +577,9 @@ func (a *Agent) flushLoop( // interval it fails to complete before. func (a *Agent) flushOnce( output *models.RunningOutput, - timeout time.Duration, + ticker Ticker, writeFunc func() error, ) error { - ticker := time.NewTicker(timeout) - defer ticker.Stop() - done := make(chan error) go func() { done <- writeFunc() @@ -621,7 +590,7 @@ func (a *Agent) flushOnce( case err := <-done: output.LogBufferStatus() return err - case <-ticker.C: + case <-ticker.Elapsed(): log.Printf("W! [agent] [%q] did not complete within its flush interval", output.LogName()) output.LogBufferStatus() diff --git a/agent/tick.go b/agent/tick.go index 64dbff50b..93e3a3d76 100644 --- a/agent/tick.go +++ b/agent/tick.go @@ -5,53 +5,264 @@ import ( "sync" "time" + "github.com/benbjohnson/clock" "github.com/influxdata/telegraf/internal" ) -type Ticker struct { - C chan time.Time - ticker *time.Ticker - jitter time.Duration - wg sync.WaitGroup - cancelFunc context.CancelFunc +type empty struct{} + +type Ticker interface { + Elapsed() <-chan time.Time + Stop() } -func NewTicker( - interval time.Duration, - jitter time.Duration, -) *Ticker { - ctx, cancel := context.WithCancel(context.Background()) +// AlignedTicker delivers ticks at aligned times plus an optional jitter. Each +// tick is realigned to avoid drift and handle changes to the system clock. +// +// The ticks may have an jitter duration applied to them as an random offset to +// the interval. However the overall pace of is that of the interval, so on +// average you will have one collection each interval. +// +// The first tick is emitted at the next alignment. +// +// Ticks are dropped for slow consumers. +// +// The implementation currently does not recalculate until the next tick with +// no maximum sleep, when using large intervals alignment is not corrected +// until the next tick. +type AlignedTicker struct { + interval time.Duration + jitter time.Duration + ch chan time.Time + cancel context.CancelFunc + wg sync.WaitGroup +} - t := &Ticker{ - C: make(chan time.Time, 1), - ticker: time.NewTicker(interval), - jitter: jitter, - cancelFunc: cancel, +func NewAlignedTicker(now time.Time, interval, jitter time.Duration) *AlignedTicker { + return newAlignedTicker(now, interval, jitter, clock.New()) +} + +func newAlignedTicker(now time.Time, interval, jitter time.Duration, clock clock.Clock) *AlignedTicker { + ctx, cancel := context.WithCancel(context.Background()) + t := &AlignedTicker{ + interval: interval, + jitter: jitter, + ch: make(chan time.Time, 1), + cancel: cancel, } + d := t.next(now) + timer := clock.Timer(d) + t.wg.Add(1) - go t.relayTime(ctx) + go func() { + defer t.wg.Done() + t.run(ctx, timer) + }() return t } -func (t *Ticker) Stop() { - t.cancelFunc() - t.wg.Wait() +func (t *AlignedTicker) next(now time.Time) time.Duration { + next := internal.AlignTime(now, t.interval) + d := next.Sub(now) + if d == 0 { + d = t.interval + } + d += internal.RandomDuration(t.jitter) + return d } -func (t *Ticker) relayTime(ctx context.Context) { - defer t.wg.Done() +func (t *AlignedTicker) run(ctx context.Context, timer *clock.Timer) { for { select { - case tm := <-t.ticker.C: - internal.SleepContext(ctx, internal.RandomDuration(t.jitter)) + case <-ctx.Done(): + timer.Stop() + return + case now := <-timer.C: select { - case t.C <- tm: + case t.ch <- now: default: } - case <-ctx.Done(): - return + + d := t.next(now) + timer.Reset(d) } } } + +func (t *AlignedTicker) Elapsed() <-chan time.Time { + return t.ch +} + +func (t *AlignedTicker) Stop() { + t.cancel() + t.wg.Wait() +} + +// UnalignedTicker delivers ticks at regular but unaligned intervals. No +// effort is made to avoid drift. +// +// The ticks may have an jitter duration applied to them as an random offset to +// the interval. However the overall pace of is that of the interval, so on +// average you will have one collection each interval. +// +// The first tick is emitted immediately. +// +// Ticks are dropped for slow consumers. +type UnalignedTicker struct { + interval time.Duration + jitter time.Duration + ch chan time.Time + cancel context.CancelFunc + wg sync.WaitGroup +} + +func NewUnalignedTicker(interval, jitter time.Duration) *UnalignedTicker { + return newUnalignedTicker(interval, jitter, clock.New()) +} + +func newUnalignedTicker(interval, jitter time.Duration, clock clock.Clock) *UnalignedTicker { + ctx, cancel := context.WithCancel(context.Background()) + t := &UnalignedTicker{ + interval: interval, + jitter: jitter, + ch: make(chan time.Time, 1), + cancel: cancel, + } + + ticker := clock.Ticker(t.interval) + t.ch <- clock.Now() + + t.wg.Add(1) + go func() { + defer t.wg.Done() + t.run(ctx, ticker, clock) + }() + + return t +} + +func sleep(ctx context.Context, duration time.Duration, clock clock.Clock) error { + if duration == 0 { + return nil + } + + t := clock.Timer(duration) + select { + case <-t.C: + return nil + case <-ctx.Done(): + t.Stop() + return ctx.Err() + } +} + +func (t *UnalignedTicker) run(ctx context.Context, ticker *clock.Ticker, clock clock.Clock) { + for { + select { + case <-ctx.Done(): + ticker.Stop() + return + case <-ticker.C: + jitter := internal.RandomDuration(t.jitter) + err := sleep(ctx, jitter, clock) + if err != nil { + ticker.Stop() + return + } + select { + case t.ch <- clock.Now(): + default: + } + } + } +} + +func (t *UnalignedTicker) InjectTick() { + t.ch <- time.Now() +} + +func (t *UnalignedTicker) Elapsed() <-chan time.Time { + return t.ch +} + +func (t *UnalignedTicker) Stop() { + t.cancel() + t.wg.Wait() +} + +// RollingTicker delivers ticks at regular but unaligned intervals. +// +// Because the next interval is scheduled based on the interval + jitter, you +// are guaranteed at least interval seconds without missing a tick and ticks +// will be evenly scheduled over time. +// +// On average you will have one collection each interval + (jitter/2). +// +// The first tick is emitted after interval+jitter seconds. +// +// Ticks are dropped for slow consumers. +type RollingTicker struct { + interval time.Duration + jitter time.Duration + ch chan time.Time + cancel context.CancelFunc + wg sync.WaitGroup +} + +func NewRollingTicker(interval, jitter time.Duration) *RollingTicker { + return newRollingTicker(interval, jitter, clock.New()) +} + +func newRollingTicker(interval, jitter time.Duration, clock clock.Clock) *RollingTicker { + ctx, cancel := context.WithCancel(context.Background()) + t := &RollingTicker{ + interval: interval, + jitter: jitter, + ch: make(chan time.Time, 1), + cancel: cancel, + } + + d := t.next() + timer := clock.Timer(d) + + t.wg.Add(1) + go func() { + defer t.wg.Done() + t.run(ctx, timer) + }() + + return t +} + +func (t *RollingTicker) next() time.Duration { + return t.interval + internal.RandomDuration(t.jitter) +} + +func (t *RollingTicker) run(ctx context.Context, timer *clock.Timer) { + for { + select { + case <-ctx.Done(): + timer.Stop() + return + case now := <-timer.C: + select { + case t.ch <- now: + default: + } + + d := t.next() + timer.Reset(d) + } + } +} + +func (t *RollingTicker) Elapsed() <-chan time.Time { + return t.ch +} + +func (t *RollingTicker) Stop() { + t.cancel() + t.wg.Wait() +} diff --git a/agent/tick_test.go b/agent/tick_test.go new file mode 100644 index 000000000..6e9755ceb --- /dev/null +++ b/agent/tick_test.go @@ -0,0 +1,251 @@ +package agent + +import ( + "fmt" + "strings" + "testing" + "time" + + "github.com/benbjohnson/clock" + "github.com/stretchr/testify/require" +) + +var format = "2006-01-02T15:04:05.999Z07:00" + +func TestAlignedTicker(t *testing.T) { + interval := 10 * time.Second + jitter := 0 * time.Second + + clock := clock.NewMock() + since := clock.Now() + until := since.Add(60 * time.Second) + + ticker := newAlignedTicker(since, interval, jitter, clock) + + expected := []time.Time{ + time.Unix(10, 0).UTC(), + time.Unix(20, 0).UTC(), + time.Unix(30, 0).UTC(), + time.Unix(40, 0).UTC(), + time.Unix(50, 0).UTC(), + time.Unix(60, 0).UTC(), + } + + actual := []time.Time{} + for !clock.Now().After(until) { + select { + case tm := <-ticker.Elapsed(): + actual = append(actual, tm.UTC()) + default: + } + clock.Add(10 * time.Second) + } + + require.Equal(t, expected, actual) +} + +func TestAlignedTickerJitter(t *testing.T) { + interval := 10 * time.Second + jitter := 5 * time.Second + + clock := clock.NewMock() + since := clock.Now() + until := since.Add(60 * time.Second) + + ticker := newAlignedTicker(since, interval, jitter, clock) + + last := since + for !clock.Now().After(until) { + select { + case tm := <-ticker.Elapsed(): + require.True(t, tm.Sub(last) <= 15*time.Second) + require.True(t, tm.Sub(last) >= 5*time.Second) + last = last.Add(interval) + default: + } + clock.Add(5 * time.Second) + } +} + +func TestAlignedTickerMissedTick(t *testing.T) { + interval := 10 * time.Second + jitter := 0 * time.Second + + clock := clock.NewMock() + since := clock.Now() + + ticker := newAlignedTicker(since, interval, jitter, clock) + + clock.Add(25 * time.Second) + tm := <-ticker.Elapsed() + require.Equal(t, time.Unix(10, 0).UTC(), tm.UTC()) + clock.Add(5 * time.Second) + tm = <-ticker.Elapsed() + require.Equal(t, time.Unix(30, 0).UTC(), tm.UTC()) +} + +func TestUnalignedTicker(t *testing.T) { + interval := 10 * time.Second + jitter := 0 * time.Second + + clock := clock.NewMock() + clock.Add(1 * time.Second) + since := clock.Now() + until := since.Add(60 * time.Second) + + ticker := newUnalignedTicker(interval, jitter, clock) + + expected := []time.Time{ + time.Unix(1, 0).UTC(), + time.Unix(11, 0).UTC(), + time.Unix(21, 0).UTC(), + time.Unix(31, 0).UTC(), + time.Unix(41, 0).UTC(), + time.Unix(51, 0).UTC(), + time.Unix(61, 0).UTC(), + } + + actual := []time.Time{} + for !clock.Now().After(until) { + select { + case tm := <-ticker.Elapsed(): + actual = append(actual, tm.UTC()) + default: + } + clock.Add(10 * time.Second) + } + + require.Equal(t, expected, actual) +} + +func TestRollingTicker(t *testing.T) { + interval := 10 * time.Second + jitter := 0 * time.Second + + clock := clock.NewMock() + clock.Add(1 * time.Second) + since := clock.Now() + until := since.Add(60 * time.Second) + + ticker := newUnalignedTicker(interval, jitter, clock) + + expected := []time.Time{ + time.Unix(1, 0).UTC(), + time.Unix(11, 0).UTC(), + time.Unix(21, 0).UTC(), + time.Unix(31, 0).UTC(), + time.Unix(41, 0).UTC(), + time.Unix(51, 0).UTC(), + time.Unix(61, 0).UTC(), + } + + actual := []time.Time{} + for !clock.Now().After(until) { + select { + case tm := <-ticker.Elapsed(): + actual = append(actual, tm.UTC()) + default: + } + clock.Add(10 * time.Second) + } + + require.Equal(t, expected, actual) +} + +// Simulates running the Ticker for an hour and displays stats about the +// operation. +func TestAlignedTickerDistribution(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + + interval := 10 * time.Second + jitter := 5 * time.Second + + clock := clock.NewMock() + since := clock.Now() + + ticker := newAlignedTicker(since, interval, jitter, clock) + dist := simulatedDist(ticker, clock) + printDist(dist) + require.True(t, 350 < dist.Count) + require.True(t, 9 < dist.Mean() && dist.Mean() < 11) +} + +// Simulates running the Ticker for an hour and displays stats about the +// operation. +func TestUnalignedTickerDistribution(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + + interval := 10 * time.Second + jitter := 5 * time.Second + + clock := clock.NewMock() + + ticker := newUnalignedTicker(interval, jitter, clock) + dist := simulatedDist(ticker, clock) + printDist(dist) + require.True(t, 350 < dist.Count) + require.True(t, 9 < dist.Mean() && dist.Mean() < 11) +} + +// Simulates running the Ticker for an hour and displays stats about the +// operation. +func TestRollingTickerDistribution(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + + interval := 10 * time.Second + jitter := 5 * time.Second + + clock := clock.NewMock() + + ticker := newRollingTicker(interval, jitter, clock) + dist := simulatedDist(ticker, clock) + printDist(dist) + require.True(t, 275 < dist.Count) + require.True(t, 12 < dist.Mean() && 13 > dist.Mean()) +} + +type Distribution struct { + Buckets [60]int + Count int + Waittime float64 +} + +func (d *Distribution) Mean() float64 { + return d.Waittime / float64(d.Count) +} + +func printDist(dist Distribution) { + for i, count := range dist.Buckets { + fmt.Printf("%2d %s\n", i, strings.Repeat("x", count)) + } + fmt.Printf("Average interval: %f\n", dist.Mean()) + fmt.Printf("Count: %d\n", dist.Count) +} + +func simulatedDist(ticker Ticker, clock *clock.Mock) Distribution { + since := clock.Now() + until := since.Add(1 * time.Hour) + + var dist Distribution + + last := clock.Now() + for !clock.Now().After(until) { + select { + case tm := <-ticker.Elapsed(): + dist.Buckets[tm.Second()] += 1 + dist.Count++ + dist.Waittime += tm.Sub(last).Seconds() + last = tm + default: + clock.Add(1 * time.Second) + } + } + + return dist +} diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index 0c7436941..4b811d8b7 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -25,6 +25,7 @@ following works: - github.com/aristanetworks/glog [Apache License 2.0](https://github.com/aristanetworks/glog/blob/master/LICENSE) - github.com/aristanetworks/goarista [Apache License 2.0](https://github.com/aristanetworks/goarista/blob/master/COPYING) - github.com/aws/aws-sdk-go [Apache License 2.0](https://github.com/aws/aws-sdk-go/blob/master/LICENSE.txt) +- github.com/benbjohnson/clock [MIT License](https://github.com/benbjohnson/clock/blob/master/LICENSE) - github.com/beorn7/perks [MIT License](https://github.com/beorn7/perks/blob/master/LICENSE) - github.com/caio/go-tdigest [MIT License](https://github.com/caio/go-tdigest/blob/master/LICENSE) - github.com/cenkalti/backoff [MIT License](https://github.com/cenkalti/backoff/blob/master/LICENSE) diff --git a/go.mod b/go.mod index 4986adc77..9645c925f 100644 --- a/go.mod +++ b/go.mod @@ -26,6 +26,7 @@ require ( github.com/aristanetworks/goarista v0.0.0-20190325233358-a123909ec740 github.com/armon/go-metrics v0.3.0 // indirect github.com/aws/aws-sdk-go v1.30.9 + github.com/benbjohnson/clock v1.0.0 github.com/bitly/go-hostpool v0.1.0 // indirect github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 // indirect github.com/caio/go-tdigest v2.3.0+incompatible // indirect diff --git a/go.sum b/go.sum index d0d30aa18..53073401d 100644 --- a/go.sum +++ b/go.sum @@ -112,6 +112,8 @@ github.com/armon/go-metrics v0.3.0 h1:B7AQgHi8QSEi4uHu7Sbsga+IJDU+CENgjxoo81vDUq github.com/armon/go-metrics v0.3.0/go.mod h1:zXjbSimjXTd7vOpY8B0/2LpvNvDoXBuplAD+gJD3GYs= github.com/aws/aws-sdk-go v1.30.9 h1:DntpBUKkchINPDbhEzDRin1eEn1TG9TZFlzWPf0i8to= github.com/aws/aws-sdk-go v1.30.9/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= +github.com/benbjohnson/clock v1.0.0 h1:78Jk/r6m4wCi6sndMpty7A//t4dw/RW5fV4ZgDVfX1w= +github.com/benbjohnson/clock v1.0.0/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= diff --git a/internal/internal.go b/internal/internal.go index 12e4b3af2..777128f66 100644 --- a/internal/internal.go +++ b/internal/internal.go @@ -5,12 +5,11 @@ import ( "bytes" "compress/gzip" "context" - "crypto/rand" "errors" "fmt" "io" "math" - "math/big" + "math/rand" "os" "os/exec" "runtime" @@ -211,12 +210,8 @@ func RandomSleep(max time.Duration, shutdown chan struct{}) { if max == 0 { return } - maxSleep := big.NewInt(max.Nanoseconds()) - var sleepns int64 - if j, err := rand.Int(rand.Reader, maxSleep); err == nil { - sleepns = j.Int64() - } + sleepns := rand.Int63n(max.Nanoseconds()) t := time.NewTimer(time.Nanosecond * time.Duration(sleepns)) select { @@ -234,11 +229,7 @@ func RandomDuration(max time.Duration) time.Duration { return 0 } - var sleepns int64 - maxSleep := big.NewInt(max.Nanoseconds()) - if j, err := rand.Int(rand.Reader, maxSleep); err == nil { - sleepns = j.Int64() - } + sleepns := rand.Int63n(max.Nanoseconds()) return time.Duration(sleepns) } From 607038c6528abf26f39c7fa681c4965fb319c824 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 6 May 2020 12:03:16 -0700 Subject: [PATCH 1745/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index a987c76e3..75235bec1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -38,6 +38,7 @@ - [#7233](https://github.com/influxdata/telegraf/issues/7233): Fix vSphere 6.7 missing data issue. - [#7448](https://github.com/influxdata/telegraf/issues/7448): Remove debug fields from spunkmetric serializer. - [#7446](https://github.com/influxdata/telegraf/issues/7446): Fix gzip support in socket_listener with tcp sockets. +- [#7390](https://github.com/influxdata/telegraf/issues/7390): Fix interval drift when round_interval is set in agent. ## v1.14.3 [unreleased] From db0d950b3ad6dcedee4eec975a14d215a5295076 Mon Sep 17 00:00:00 2001 From: Giovanni Luisotto Date: Thu, 7 May 2020 20:25:03 +0200 Subject: [PATCH 1746/1815] Add cpu query to sqlserver input (#7359) --- plugins/inputs/sqlserver/README.md | 6 ++-- plugins/inputs/sqlserver/sqlserver.go | 36 ++++++++++++++++++++++ plugins/inputs/sqlserver/sqlserver_test.go | 4 +-- 3 files changed, 42 insertions(+), 4 deletions(-) diff --git a/plugins/inputs/sqlserver/README.md b/plugins/inputs/sqlserver/README.md index 9d55955d1..320fee275 100644 --- a/plugins/inputs/sqlserver/README.md +++ b/plugins/inputs/sqlserver/README.md @@ -65,6 +65,7 @@ GO ## - Schedulers ## - SqlRequests ## - VolumeSpace + ## - Cpu ## Version 1: ## - PerformanceCounters ## - WaitStatsCategorized @@ -81,13 +82,13 @@ GO # include_query = [] ## A list of queries to explicitly ignore. - exclude_query = [ 'Schedulers' , 'SqlRequests'] + exclude_query = [ 'Schedulers' , 'SqlRequests' ] ``` ### Metrics: To provide backwards compatibility, this plugin support two versions of metrics queries. -**Note**: Version 2 queries are not backwards compatible with the old queries. Any dashboards or queries based on the old query format will not work with the new format. The version 2 queries are written in such a way as to only gather SQL specific metrics (no overall CPU related metrics) and they only report raw metrics, no math has been done to calculate deltas. To graph this data you must calculate deltas in your dashboarding software. +**Note**: Version 2 queries are not backwards compatible with the old queries. Any dashboards or queries based on the old query format will not work with the new format. The version 2 queries only report raw metrics, no math has been done to calculate deltas. To graph this data you must calculate deltas in your dashboarding software. #### Version 1 (deprecated in 1.6): The original metrics queries provide: @@ -124,6 +125,7 @@ The new (version 2) metrics provide: dm_exec_sessions that gives you running requests as well as wait types and blocking sessions. - *VolumeSpace* - uses sys.dm_os_volume_stats to get total, used and occupied space on every disk that contains a data or log file. (Note that even if enabled it won't get any data from Azure SQL Database or SQL Managed Instance). It is pointless to run this with high frequency (ie: every 10s), but it won't cause any problem. +- *Cpu* - uses the buffer ring (sys.dm_os_ring_buffers) to get CPU data, the table is updated once per minute. (Note that even if enabled it won't get any data from Azure SQL Database or SQL Managed Instance). In order to allow tracking on a per statement basis this query produces a unique tag for each query. Depending on the database workload, this may diff --git a/plugins/inputs/sqlserver/sqlserver.go b/plugins/inputs/sqlserver/sqlserver.go index 6214ee237..cb70686e2 100644 --- a/plugins/inputs/sqlserver/sqlserver.go +++ b/plugins/inputs/sqlserver/sqlserver.go @@ -117,6 +117,7 @@ func initQueries(s *SQLServer) error { queries["Schedulers"] = Query{Script: sqlServerSchedulersV2, ResultByRow: false} queries["SqlRequests"] = Query{Script: sqlServerRequestsV2, ResultByRow: false} queries["VolumeSpace"] = Query{Script: sqlServerVolumeSpaceV2, ResultByRow: false} + queries["Cpu"] = Query{Script: sqlServerCpuV2, ResultByRow: false} } else { queries["PerformanceCounters"] = Query{Script: sqlPerformanceCounters, ResultByRow: true} queries["WaitStatsCategorized"] = Query{Script: sqlWaitStatsCategorized, ResultByRow: false} @@ -1593,6 +1594,41 @@ IF SERVERPROPERTY('EngineEdition') NOT IN (5,8) END ` +const sqlServerCpuV2 string = ` +/*The ring buffer has a new value every minute*/ +IF SERVERPROPERTY('EngineEdition') NOT IN (5,8) /*No azure DB and managed instance*/ +BEGIN +SELECT + 'sqlserver_cpu' AS [measurement] + ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance] + ,[SQLProcessUtilization] AS [sqlserver_process_cpu] + ,[SystemIdle] AS [system_idle_cpu] + ,100 - [SystemIdle] - [SQLProcessUtilization] AS [other_process_cpu] +FROM ( + SELECT TOP 1 + [record_id] + /*,dateadd(ms, (y.[timestamp] - (SELECT CAST([ms_ticks] AS BIGINT) FROM sys.dm_os_sys_info)), GETDATE()) AS [EventTime] --use for check/debug purpose*/ + ,[SQLProcessUtilization] + ,[SystemIdle] + FROM ( + SELECT record.value('(./Record/@id)[1]', 'int') AS [record_id] + ,record.value('(./Record/SchedulerMonitorEvent/SystemHealth/SystemIdle)[1]', 'int') AS [SystemIdle] + ,record.value('(./Record/SchedulerMonitorEvent/SystemHealth/ProcessUtilization)[1]', 'int') AS [SQLProcessUtilization] + ,[TIMESTAMP] + FROM ( + SELECT [TIMESTAMP] + ,convert(XML, [record]) AS [record] + FROM sys.dm_os_ring_buffers + WHERE [ring_buffer_type] = N'RING_BUFFER_SCHEDULER_MONITOR' + AND [record] LIKE '%%' + ) AS x + ) AS y + ORDER BY record_id DESC +) as z + +END +` + // Queries V1 const sqlPerformanceMetrics string = `SET DEADLOCK_PRIORITY -10; SET NOCOUNT ON; diff --git a/plugins/inputs/sqlserver/sqlserver_test.go b/plugins/inputs/sqlserver/sqlserver_test.go index ea638ef20..c92353783 100644 --- a/plugins/inputs/sqlserver/sqlserver_test.go +++ b/plugins/inputs/sqlserver/sqlserver_test.go @@ -16,13 +16,13 @@ func TestSqlServer_QueriesInclusionExclusion(t *testing.T) { cases := []map[string]interface{}{ { "IncludeQuery": []string{}, - "ExcludeQuery": []string{"WaitStatsCategorized", "DatabaseIO", "ServerProperties", "MemoryClerk", "Schedulers", "VolumeSpace"}, + "ExcludeQuery": []string{"WaitStatsCategorized", "DatabaseIO", "ServerProperties", "MemoryClerk", "Schedulers", "VolumeSpace", "Cpu"}, "queries": []string{"PerformanceCounters", "SqlRequests"}, "queriesTotal": 2, }, { "IncludeQuery": []string{"PerformanceCounters", "SqlRequests"}, - "ExcludeQuery": []string{"SqlRequests", "WaitStatsCategorized", "DatabaseIO", "VolumeSpace"}, + "ExcludeQuery": []string{"SqlRequests", "WaitStatsCategorized", "DatabaseIO", "VolumeSpace", "Cpu"}, "queries": []string{"PerformanceCounters"}, "queriesTotal": 1, }, From f14b3759d4943d006c11ee08e24b239338f7e33e Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 7 May 2020 11:26:42 -0700 Subject: [PATCH 1747/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 75235bec1..1125c4f5c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -31,6 +31,7 @@ - [#6915](https://github.com/influxdata/telegraf/pull/6915): Add support for MDS and RGW sockets to ceph input. - [#7391](https://github.com/influxdata/telegraf/pull/7391): Extract target as a tag for each rule in iptables input. - [#7434](https://github.com/influxdata/telegraf/pull/7434): Use docker log timestamp as metric time. +- [#7359](https://github.com/influxdata/telegraf/pull/7359): Add cpu query to sqlserver input. #### Bugfixes From f076b6c115e4cd94d38e2dbe8b7b32b68deb07ee Mon Sep 17 00:00:00 2001 From: Rich Y Date: Fri, 8 May 2020 01:19:03 +0100 Subject: [PATCH 1748/1815] Add field creation to date processor and integer unix time support (#7464) --- plugins/processors/date/README.md | 12 ++- plugins/processors/date/date.go | 56 +++++++++++--- plugins/processors/date/date_test.go | 110 +++++++++++++++++++++++---- 3 files changed, 151 insertions(+), 27 deletions(-) diff --git a/plugins/processors/date/README.md b/plugins/processors/date/README.md index b04964b4a..215cd83e3 100644 --- a/plugins/processors/date/README.md +++ b/plugins/processors/date/README.md @@ -16,15 +16,23 @@ A few example usecases include: ## New tag to create tag_key = "month" + ## New field to create (cannot set both field_key and tag_key) + # field_key = "month" + ## Date format string, must be a representation of the Go "reference time" ## which is "Mon Jan 2 15:04:05 -0700 MST 2006". date_format = "Jan" + ## If destination is a field, date format can also be one of + ## "unix", "unix_ms", "unix_us", or "unix_ns", which will insert an integer field. + # date_format = "unix" + ## Offset duration added to the date string when writing the new tag. # date_offset = "0s" - ## Timezone to use when generating the date. This can be set to one of - ## "Local", "UTC", or to a location name in the IANA Time Zone database. + ## Timezone to use when creating the tag or field using a reference time + ## string. This can be set to one of "UTC", "Local", or to a location name + ## in the IANA Time Zone database. ## example: timezone = "America/Los_Angeles" # timezone = "UTC" ``` diff --git a/plugins/processors/date/date.go b/plugins/processors/date/date.go index c8007323f..ef8609811 100644 --- a/plugins/processors/date/date.go +++ b/plugins/processors/date/date.go @@ -1,6 +1,7 @@ package date import ( + "errors" "time" "github.com/influxdata/telegraf" @@ -9,26 +10,35 @@ import ( ) const sampleConfig = ` - ## New tag to create - tag_key = "month" + ## New tag to create + tag_key = "month" - ## Date format string, must be a representation of the Go "reference time" - ## which is "Mon Jan 2 15:04:05 -0700 MST 2006". - date_format = "Jan" + ## New field to create (cannot set both field_key and tag_key) + # field_key = "month" - ## Offset duration added to the date string when writing the new tag. - # date_offset = "0s" + ## Date format string, must be a representation of the Go "reference time" + ## which is "Mon Jan 2 15:04:05 -0700 MST 2006". + date_format = "Jan" - ## Timezone to use when creating the tag. This can be set to one of - ## "UTC", "Local", or to a location name in the IANA Time Zone database. - ## example: timezone = "America/Los_Angeles" - # timezone = "UTC" + ## If destination is a field, date format can also be one of + ## "unix", "unix_ms", "unix_us", or "unix_ns", which will insert an integer field. + # date_format = "unix" + + ## Offset duration added to the date string when writing the new tag. + # date_offset = "0s" + + ## Timezone to use when creating the tag or field using a reference time + ## string. This can be set to one of "UTC", "Local", or to a location name + ## in the IANA Time Zone database. + ## example: timezone = "America/Los_Angeles" + # timezone = "UTC" ` const defaultTimezone = "UTC" type Date struct { TagKey string `toml:"tag_key"` + FieldKey string `toml:"field_key"` DateFormat string `toml:"date_format"` DateOffset internal.Duration `toml:"date_offset"` Timezone string `toml:"timezone"` @@ -45,6 +55,13 @@ func (d *Date) Description() string { } func (d *Date) Init() error { + // Check either TagKey or FieldKey specified + if len(d.FieldKey) > 0 && len(d.TagKey) > 0 { + return errors.New("Only one of field_key or tag_key can be specified") + } else if len(d.FieldKey) == 0 && len(d.TagKey) == 0 { + return errors.New("One of field_key or tag_key must be specified") + } + var err error // LoadLocation returns UTC if timezone is the empty string. d.location, err = time.LoadLocation(d.Timezone) @@ -54,7 +71,22 @@ func (d *Date) Init() error { func (d *Date) Apply(in ...telegraf.Metric) []telegraf.Metric { for _, point := range in { tm := point.Time().In(d.location).Add(d.DateOffset.Duration) - point.AddTag(d.TagKey, tm.Format(d.DateFormat)) + if len(d.TagKey) > 0 { + point.AddTag(d.TagKey, tm.Format(d.DateFormat)) + } else if len(d.FieldKey) > 0 { + switch d.DateFormat { + case "unix": + point.AddField(d.FieldKey, tm.Unix()) + case "unix_ms": + point.AddField(d.FieldKey, tm.UnixNano()/1000000) + case "unix_us": + point.AddField(d.FieldKey, tm.UnixNano()/1000) + case "unix_ns": + point.AddField(d.FieldKey, tm.UnixNano()) + default: + point.AddField(d.FieldKey, tm.Format(d.DateFormat)) + } + } } return in diff --git a/plugins/processors/date/date_test.go b/plugins/processors/date/date_test.go index d97cc2a9c..42e094c93 100644 --- a/plugins/processors/date/date_test.go +++ b/plugins/processors/date/date_test.go @@ -23,6 +23,22 @@ func MustMetric(name string, tags map[string]string, fields map[string]interface return m } +func TestTagAndField(t *testing.T) { + dateFormatTagAndField := Date{ + TagKey: "month", + FieldKey: "month", + } + err := dateFormatTagAndField.Init() + require.Error(t, err) + +} + +func TestNoOutputSpecified(t *testing.T) { + dateFormatNoOutput := Date{} + err := dateFormatNoOutput.Init() + require.Error(t, err) +} + func TestMonthTag(t *testing.T) { dateFormatMonth := Date{ TagKey: "month", @@ -43,25 +59,25 @@ func TestMonthTag(t *testing.T) { assert.Equal(t, map[string]string{"month": month}, monthApply[2].Tags(), "should add tag 'month'") } -func TestYearTag(t *testing.T) { - dateFormatYear := Date{ - TagKey: "year", - DateFormat: "2006", +func TestMonthField(t *testing.T) { + dateFormatMonth := Date{ + FieldKey: "month", + DateFormat: "Jan", } - err := dateFormatYear.Init() + err := dateFormatMonth.Init() require.NoError(t, err) currentTime := time.Now() - year := currentTime.Format("2006") + month := currentTime.Format("Jan") - m4 := MustMetric("foo", nil, nil, currentTime) - m5 := MustMetric("bar", nil, nil, currentTime) - m6 := MustMetric("baz", nil, nil, currentTime) - yearApply := dateFormatYear.Apply(m4, m5, m6) - assert.Equal(t, map[string]string{"year": year}, yearApply[0].Tags(), "should add tag 'year'") - assert.Equal(t, map[string]string{"year": year}, yearApply[1].Tags(), "should add tag 'year'") - assert.Equal(t, map[string]string{"year": year}, yearApply[2].Tags(), "should add tag 'year'") + m1 := MustMetric("foo", nil, nil, currentTime) + m2 := MustMetric("bar", nil, nil, currentTime) + m3 := MustMetric("baz", nil, nil, currentTime) + monthApply := dateFormatMonth.Apply(m1, m2, m3) + assert.Equal(t, map[string]interface{}{"month": month}, monthApply[0].Fields(), "should add field 'month'") + assert.Equal(t, map[string]interface{}{"month": month}, monthApply[1].Fields(), "should add field 'month'") + assert.Equal(t, map[string]interface{}{"month": month}, monthApply[2].Fields(), "should add field 'month'") } func TestOldDateTag(t *testing.T) { @@ -78,6 +94,74 @@ func TestOldDateTag(t *testing.T) { assert.Equal(t, map[string]string{"year": "1993"}, customDateApply[0].Tags(), "should add tag 'year'") } +func TestFieldUnix(t *testing.T) { + dateFormatUnix := Date{ + FieldKey: "unix", + DateFormat: "unix", + } + + err := dateFormatUnix.Init() + require.NoError(t, err) + + currentTime := time.Now() + unixTime := currentTime.Unix() + + m8 := MustMetric("foo", nil, nil, currentTime) + unixApply := dateFormatUnix.Apply(m8) + assert.Equal(t, map[string]interface{}{"unix": unixTime}, unixApply[0].Fields(), "should add unix time in s as field 'unix'") +} + +func TestFieldUnixNano(t *testing.T) { + dateFormatUnixNano := Date{ + FieldKey: "unix_ns", + DateFormat: "unix_ns", + } + + err := dateFormatUnixNano.Init() + require.NoError(t, err) + + currentTime := time.Now() + unixNanoTime := currentTime.UnixNano() + + m9 := MustMetric("foo", nil, nil, currentTime) + unixNanoApply := dateFormatUnixNano.Apply(m9) + assert.Equal(t, map[string]interface{}{"unix_ns": unixNanoTime}, unixNanoApply[0].Fields(), "should add unix time in ns as field 'unix_ns'") +} + +func TestFieldUnixMillis(t *testing.T) { + dateFormatUnixMillis := Date{ + FieldKey: "unix_ms", + DateFormat: "unix_ms", + } + + err := dateFormatUnixMillis.Init() + require.NoError(t, err) + + currentTime := time.Now() + unixMillisTime := currentTime.UnixNano() / 1000000 + + m10 := MustMetric("foo", nil, nil, currentTime) + unixMillisApply := dateFormatUnixMillis.Apply(m10) + assert.Equal(t, map[string]interface{}{"unix_ms": unixMillisTime}, unixMillisApply[0].Fields(), "should add unix time in ms as field 'unix_ms'") +} + +func TestFieldUnixMicros(t *testing.T) { + dateFormatUnixMicros := Date{ + FieldKey: "unix_us", + DateFormat: "unix_us", + } + + err := dateFormatUnixMicros.Init() + require.NoError(t, err) + + currentTime := time.Now() + unixMicrosTime := currentTime.UnixNano() / 1000 + + m11 := MustMetric("foo", nil, nil, currentTime) + unixMicrosApply := dateFormatUnixMicros.Apply(m11) + assert.Equal(t, map[string]interface{}{"unix_us": unixMicrosTime}, unixMicrosApply[0].Fields(), "should add unix time in us as field 'unix_us'") +} + func TestDateOffset(t *testing.T) { plugin := &Date{ TagKey: "hour", From 48b8357f016e39724bff00899276199cd6d3f643 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 7 May 2020 17:19:46 -0700 Subject: [PATCH 1749/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1125c4f5c..b4ab2f519 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -32,6 +32,7 @@ - [#7391](https://github.com/influxdata/telegraf/pull/7391): Extract target as a tag for each rule in iptables input. - [#7434](https://github.com/influxdata/telegraf/pull/7434): Use docker log timestamp as metric time. - [#7359](https://github.com/influxdata/telegraf/pull/7359): Add cpu query to sqlserver input. +- [#7464](https://github.com/influxdata/telegraf/pull/7464): Add field creation to date processor and integer unix time support. #### Bugfixes From 134c84f45d090f73c1d2141bf7d6a5259eac0294 Mon Sep 17 00:00:00 2001 From: Alex Heylin Date: Mon, 11 May 2020 17:20:02 +0100 Subject: [PATCH 1750/1815] Fix typo in Windows service description (#7486) Co-authored-by: Alex Heylin --- cmd/telegraf/telegraf_windows.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/telegraf/telegraf_windows.go b/cmd/telegraf/telegraf_windows.go index eaf700ed0..830e6eaa4 100644 --- a/cmd/telegraf/telegraf_windows.go +++ b/cmd/telegraf/telegraf_windows.go @@ -63,7 +63,7 @@ func runAsWindowsService(inputFilters, outputFilters, aggregatorFilters, process svcConfig := &service.Config{ Name: *fServiceName, DisplayName: *fServiceDisplayName, - Description: "Collects data using a series of plugins and publishes it to" + + Description: "Collects data using a series of plugins and publishes it to " + "another series of plugins.", Arguments: []string{"--config", programFiles + "\\Telegraf\\telegraf.conf"}, } From 568cb8e64c42952eee41a6473a852b45c7d856d5 Mon Sep 17 00:00:00 2001 From: Harshit Bansal Date: Tue, 12 May 2020 00:06:21 +0530 Subject: [PATCH 1751/1815] Add integer support to enum processor (#7483) --- plugins/processors/enum/README.md | 2 +- plugins/processors/enum/enum.go | 16 +++++--- plugins/processors/enum/enum_test.go | 61 ++++++++++++++++++++-------- 3 files changed, 55 insertions(+), 24 deletions(-) diff --git a/plugins/processors/enum/README.md b/plugins/processors/enum/README.md index 0f2a6135d..72a055625 100644 --- a/plugins/processors/enum/README.md +++ b/plugins/processors/enum/README.md @@ -2,7 +2,7 @@ The Enum Processor allows the configuration of value mappings for metric tags or fields. The main use-case for this is to rewrite status codes such as _red_, _amber_ and -_green_ by numeric values such as 0, 1, 2. The plugin supports string and bool +_green_ by numeric values such as 0, 1, 2. The plugin supports string, int and bool types for the field values. Multiple tags or fields can be configured with separate value mappings for each. Default mapping values can be configured to be used for all values, which are not contained in the value_mappings. The diff --git a/plugins/processors/enum/enum.go b/plugins/processors/enum/enum.go index 427b7fb43..a96e7d509 100644 --- a/plugins/processors/enum/enum.go +++ b/plugins/processors/enum/enum.go @@ -63,7 +63,7 @@ func (mapper *EnumMapper) applyMappings(metric telegraf.Metric) telegraf.Metric for _, mapping := range mapper.Mappings { if mapping.Field != "" { if originalValue, isPresent := metric.GetField(mapping.Field); isPresent { - if adjustedValue, isString := adjustBoolValue(originalValue).(string); isString { + if adjustedValue, isString := adjustValue(originalValue).(string); isString { if mappedValue, isMappedValuePresent := mapping.mapValue(adjustedValue); isMappedValuePresent { writeField(metric, mapping.getDestination(), mappedValue) } @@ -86,11 +86,17 @@ func (mapper *EnumMapper) applyMappings(metric telegraf.Metric) telegraf.Metric return metric } -func adjustBoolValue(in interface{}) interface{} { - if mappedBool, isBool := in.(bool); isBool == true { - return strconv.FormatBool(mappedBool) +func adjustValue(in interface{}) interface{} { + switch val := in.(type) { + case bool: + return strconv.FormatBool(val) + case int64: + return strconv.FormatInt(val, 10) + case uint64: + return strconv.FormatUint(val, 10) + default: + return in } - return in } func (mapping *Mapping) mapValue(original string) (interface{}, bool) { diff --git a/plugins/processors/enum/enum_test.go b/plugins/processors/enum/enum_test.go index 5f89510ca..de13aad15 100644 --- a/plugins/processors/enum/enum_test.go +++ b/plugins/processors/enum/enum_test.go @@ -14,7 +14,9 @@ func createTestMetric() telegraf.Metric { map[string]string{"tag": "tag_value"}, map[string]interface{}{ "string_value": "test", - "int_value": int(13), + "int_value": int(200), + "uint_value": uint(500), + "float_value": float64(3.14), "true_value": true, }, time.Now(), @@ -52,21 +54,14 @@ func TestRetainsMetric(t *testing.T) { fields := target.Fields() assertFieldValue(t, "test", "string_value", fields) - assertFieldValue(t, 13, "int_value", fields) + assertFieldValue(t, 200, "int_value", fields) + assertFieldValue(t, 500, "uint_value", fields) assertFieldValue(t, true, "true_value", fields) assert.Equal(t, "m1", target.Name()) assert.Equal(t, source.Tags(), target.Tags()) assert.Equal(t, source.Time(), target.Time()) } -func TestMapsSingleStringValue(t *testing.T) { - mapper := EnumMapper{Mappings: []Mapping{{Field: "string_value", ValueMappings: map[string]interface{}{"test": int64(1)}}}} - - fields := calculateProcessedValues(mapper, createTestMetric()) - - assertFieldValue(t, 1, "string_value", fields) -} - func TestMapsSingleStringValueTag(t *testing.T) { mapper := EnumMapper{Mappings: []Mapping{{Tag: "tag", ValueMappings: map[string]interface{}{"tag_value": "valuable"}}}} @@ -75,20 +70,50 @@ func TestMapsSingleStringValueTag(t *testing.T) { assertTagValue(t, "valuable", "tag", tags) } -func TestNoFailureOnMappingsOnNonStringValuedFields(t *testing.T) { - mapper := EnumMapper{Mappings: []Mapping{{Field: "int_value", ValueMappings: map[string]interface{}{"13i": int64(7)}}}} +func TestNoFailureOnMappingsOnNonSupportedValuedFields(t *testing.T) { + mapper := EnumMapper{Mappings: []Mapping{{Field: "float_value", ValueMappings: map[string]interface{}{"3.14": "pi"}}}} fields := calculateProcessedValues(mapper, createTestMetric()) - assertFieldValue(t, 13, "int_value", fields) + assertFieldValue(t, float64(3.14), "float_value", fields) } -func TestMapSingleBoolValue(t *testing.T) { - mapper := EnumMapper{Mappings: []Mapping{{Field: "true_value", ValueMappings: map[string]interface{}{"true": int64(1)}}}} +func TestMappings(t *testing.T) { + mappings := []map[string][]interface{}{ + { + "field_name": []interface{}{"string_value"}, + "target_values": []interface{}{"test", "test", "test", "not_test", "50", "true"}, + "mapped_values": []interface{}{"test_1", 5, true, "test_1", 10, false}, + "expected_values": []interface{}{"test_1", 5, true, "test", "test", "test"}, + }, + { + "field_name": []interface{}{"true_value"}, + "target_value": []interface{}{"true", "true", "true", "false", "test", "5"}, + "mapped_value": []interface{}{false, 1, "false", false, false, false}, + "expected_value": []interface{}{false, 1, "false", true, true, true}, + }, + { + "field_name": []interface{}{"int_value"}, + "target_value": []interface{}{"200", "200", "200", "200", "test", "5"}, + "mapped_value": []interface{}{"http_ok", true, 1, float64(200.001), false, false}, + "expected_value": []interface{}{"http_ok", true, 1, float64(200.001), 200, 200}, + }, + { + "field_name": []interface{}{"uint_value"}, + "target_value": []interface{}{"500", "500", "500", "test", "false", "5"}, + "mapped_value": []interface{}{"internal_error", 1, false, false, false, false}, + "expected_value": []interface{}{"internal_error", 1, false, 500, 500, 500}, + }, + } - fields := calculateProcessedValues(mapper, createTestMetric()) - - assertFieldValue(t, 1, "true_value", fields) + for _, mapping := range mappings { + field_name := mapping["field_name"][0].(string) + for index := range mapping["target_value"] { + mapper := EnumMapper{Mappings: []Mapping{{Field: field_name, ValueMappings: map[string]interface{}{mapping["target_value"][index].(string): mapping["mapped_value"][index]}}}} + fields := calculateProcessedValues(mapper, createTestMetric()) + assertFieldValue(t, mapping["expected_value"][index], field_name, fields) + } + } } func TestMapsToDefaultValueOnUnknownSourceValue(t *testing.T) { From 6a9e879d7100a3a9a8fbc151b3ce85788e17f650 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 11 May 2020 11:37:20 -0700 Subject: [PATCH 1752/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index b4ab2f519..a28edff05 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -33,6 +33,7 @@ - [#7434](https://github.com/influxdata/telegraf/pull/7434): Use docker log timestamp as metric time. - [#7359](https://github.com/influxdata/telegraf/pull/7359): Add cpu query to sqlserver input. - [#7464](https://github.com/influxdata/telegraf/pull/7464): Add field creation to date processor and integer unix time support. +- [#7483](https://github.com/influxdata/telegraf/pull/7483): Add integer mapping support to enum processor. #### Bugfixes From 00ad5baa197ac89e5aec6bb8ba7fdac4a213989e Mon Sep 17 00:00:00 2001 From: Kostiantyn Nemchenko Date: Tue, 12 May 2020 21:31:54 +0300 Subject: [PATCH 1753/1815] Add additional fields to mongodb input (#7321) New metrics include: - assertions - database operations - documents/index keys usage - replication - tcmalloc memory management - storage - version --- plugins/inputs/mongodb/README.md | 82 +++++- plugins/inputs/mongodb/mongodb_data.go | 98 +++++-- plugins/inputs/mongodb/mongodb_data_test.go | 304 ++++++++++++++------ plugins/inputs/mongodb/mongostat.go | 266 ++++++++++++++++- 4 files changed, 632 insertions(+), 118 deletions(-) diff --git a/plugins/inputs/mongodb/README.md b/plugins/inputs/mongodb/README.md index 7148c3a67..ba2e9148e 100644 --- a/plugins/inputs/mongodb/README.md +++ b/plugins/inputs/mongodb/README.md @@ -60,35 +60,46 @@ by running Telegraf with the `--debug` argument. - fields: - active_reads (integer) - active_writes (integer) + - aggregate_command_failed (integer) + - aggregate_command_total (integer) + - assert_msg (integer) + - assert_regular (integer) + - assert_rollovers (integer) + - assert_user (integer) + - assert_warning (integer) - available_reads (integer) - available_writes (integer) - commands (integer) - - connections_current (integer) - connections_available (integer) + - connections_current (integer) - connections_total_created (integer) - - cursor_timed_out_count (integer) + - count_command_failed (integer) + - count_command_total (integer) - cursor_no_timeout_count (integer) - cursor_pinned_count (integer) + - cursor_timed_out_count (integer) - cursor_total_count (integer) - - deletes (integer) - - delete_command_total (integer) - delete_command_failed (integer) + - delete_command_total (integer) + - deletes (integer) + - distinct_command_failed (integer) + - distinct_command_total (integer) - document_deleted (integer) - document_inserted (integer) - document_returned (integer) - document_updated (integer) - - find_command_total (integer) - - find_command_failed (integer) - - find_and_modify_command_total (integer) - find_and_modify_command_failed (integer) + - find_and_modify_command_total (integer) + - find_command_failed (integer) + - find_command_total (integer) - flushes (integer) - flushes_total_time_ns (integer) - - getmores (integer) - - get_more_command_total (integer) - get_more_command_failed (integer) - - inserts (integer) - - insert_command_total (integer) + - get_more_command_total (integer) + - getmores (integer) - insert_command_failed (integer) + - insert_command_total (integer) + - inserts (integer) - jumbo_chunks (integer) - latency_commands_count (integer) - latency_commands (integer) @@ -100,33 +111,74 @@ by running Telegraf with the `--debug` argument. - net_in_bytes_count (integer) - net_out_bytes_count (integer) - open_connections (integer) + - operation_scan_and_order (integer) + - operation_write_conflicts (integer) + - page_faults (integer) - percent_cache_dirty (float) - percent_cache_used (float) - queries (integer) - queued_reads (integer) - queued_writes (integer) + - repl_apply_batches_num (integer) + - repl_apply_batches_total_millis (integer) + - repl_apply_ops (integer) + - repl_buffer_count (integer) + - repl_buffer_size_bytes (integer) - repl_commands (integer) - repl_deletes (integer) + - repl_executor_pool_in_progress_count (integer) + - repl_executor_queues_network_in_progress (integer) + - repl_executor_queues_sleepers (integer) + - repl_executor_unsignaled_events (integer) - repl_getmores (integer) - repl_inserts (integer) - repl_lag (integer) + - repl_network_bytes (integer) + - repl_network_getmores_num (integer) + - repl_network_getmores_total_millis (integer) + - repl_network_ops (integer) - repl_queries (integer) - repl_updates (integer) - repl_oplog_window_sec (integer) - resident_megabytes (integer) - state (string) + - storage_freelist_search_bucket_exhausted (integer) + - storage_freelist_search_requests (integer) + - storage_freelist_search_scanned (integer) + - tcmalloc_central_cache_free_bytes (integer) + - tcmalloc_current_allocated_bytes (integer) + - tcmalloc_current_total_thread_cache_bytes (integer) + - tcmalloc_heap_size (integer) + - tcmalloc_max_total_thread_cache_bytes (integer) + - tcmalloc_pageheap_commit_count (integer) + - tcmalloc_pageheap_committed_bytes (integer) + - tcmalloc_pageheap_decommit_count (integer) + - tcmalloc_pageheap_free_bytes (integer) + - tcmalloc_pageheap_reserve_count (integer) + - tcmalloc_pageheap_scavenge_count (integer) + - tcmalloc_pageheap_total_commit_bytes (integer) + - tcmalloc_pageheap_total_decommit_bytes (integer) + - tcmalloc_pageheap_total_reserve_bytes (integer) + - tcmalloc_pageheap_unmapped_bytes (integer) + - tcmalloc_spinlock_total_delay_ns (integer) + - tcmalloc_thread_cache_free_bytes (integer) + - tcmalloc_total_free_bytes (integer) + - tcmalloc_transfer_cache_free_bytes (integer) - total_available (integer) - total_created (integer) + - total_docs_scanned (integer) - total_in_use (integer) + - total_keys_scanned (integer) - total_refreshing (integer) - total_tickets_reads (integer) - total_tickets_writes (integer) - ttl_deletes (integer) - ttl_passes (integer) - - updates (integer) - - update_command_total (integer) - update_command_failed (integer) + - update_command_total (integer) + - updates (integer) - uptime_ns (integer) + - version (string) - vsize_megabytes (integer) - wtcache_app_threads_page_read_count (integer) - wtcache_app_threads_page_read_time (integer) @@ -208,8 +260,8 @@ by running Telegraf with the `--debug` argument. ### Example Output: ``` -mongodb,hostname=127.0.0.1:27017 active_reads=0i,active_writes=0i,available_reads=128i,available_writes=128i,commands=1335i,commands_per_sec=7i,connections_available=814i,connections_current=5i,connections_total_created=0i,cursor_no_timeout=0i,cursor_no_timeout_count=0i,cursor_pinned=0i,cursor_pinned_count=1i,cursor_timed_out=0i,cursor_timed_out_count=0i,cursor_total=0i,cursor_total_count=1i,deletes=0i,deletes_per_sec=0i,document_deleted=0i,document_inserted=0i,document_returned=13i,document_updated=0i,flushes=5i,flushes_per_sec=0i,getmores=269i,getmores_per_sec=0i,inserts=0i,inserts_per_sec=0i,jumbo_chunks=0i,latency_commands_count=0i,latency_commands=0i,latency_reads_count=0i,latency_reads=0i,latency_writes_count=0i,latency_writes=0i,member_status="PRI",net_in_bytes=986i,net_in_bytes_count=358006i,net_out_bytes=23906i,net_out_bytes_count=661507i,open_connections=5i,percent_cache_dirty=0,percent_cache_used=0,queries=18i,queries_per_sec=3i,queued_reads=0i,queued_writes=0i,repl_commands=0i,repl_commands_per_sec=0i,repl_deletes=0i,repl_deletes_per_sec=0i,repl_getmores=0i,repl_getmores_per_sec=0i,repl_inserts=0i,repl_inserts_per_sec=0i,repl_lag=0i,repl_oplog_window_sec=24355215i,repl_queries=0i,repl_queries_per_sec=0i,repl_updates=0i,repl_updates_per_sec=0i,resident_megabytes=62i,state="PRIMARY",total_available=0i,total_created=0i,total_in_use=0i,total_refreshing=0i,total_tickets_reads=128i,total_tickets_writes=128i,ttl_deletes=0i,ttl_deletes_per_sec=0i,ttl_passes=23i,ttl_passes_per_sec=0i,updates=0i,updates_per_sec=0i,vsize_megabytes=713i,wtcache_app_threads_page_read_count=13i,wtcache_app_threads_page_read_time=74i,wtcache_app_threads_page_write_count=0i,wtcache_bytes_read_into=55271i,wtcache_bytes_written_from=125402i,wtcache_current_bytes=117050i,wtcache_max_bytes_configured=1073741824i,wtcache_pages_evicted_by_app_thread=0i,wtcache_pages_queued_for_eviction=0i,wtcache_server_evicting_pages=0i,wtcache_tracked_dirty_bytes=0i,wtcache_worker_thread_evictingpages=0i 1547159491000000000 -mongodb,hostname=127.0.0.1:27017,node_type=PRI active_reads=0i,active_writes=0i,available_reads=128i,available_writes=128i,commands=1335i,commands_per_sec=7i,connections_available=814i,connections_current=5i,connections_total_created=0i,cursor_no_timeout=0i,cursor_no_timeout_count=0i,cursor_pinned=0i,cursor_pinned_count=1i,cursor_timed_out=0i,cursor_timed_out_count=0i,cursor_total=0i,cursor_total_count=1i,deletes=0i,deletes_per_sec=0i,document_deleted=0i,document_inserted=0i,document_returned=13i,document_updated=0i,flushes=5i,flushes_per_sec=0i,getmores=269i,getmores_per_sec=0i,inserts=0i,inserts_per_sec=0i,jumbo_chunks=0i,member_status="PRI",net_in_bytes=986i,net_in_bytes_count=358006i,net_out_bytes=23906i,net_out_bytes_count=661507i,open_connections=5i,percent_cache_dirty=0,percent_cache_used=0,queries=18i,queries_per_sec=3i,queued_reads=0i,queued_writes=0i,repl_commands=0i,repl_commands_per_sec=0i,repl_deletes=0i,repl_deletes_per_sec=0i,repl_getmores=0i,repl_getmores_per_sec=0i,repl_inserts=0i,repl_inserts_per_sec=0i,repl_lag=0i,repl_oplog_window_sec=24355215i,repl_queries=0i,repl_queries_per_sec=0i,repl_updates=0i,repl_updates_per_sec=0i,resident_megabytes=62i,state="PRIMARY",total_available=0i,total_created=0i,total_in_use=0i,total_refreshing=0i,total_tickets_reads=128i,total_tickets_writes=128i,ttl_deletes=0i,ttl_deletes_per_sec=0i,ttl_passes=23i,ttl_passes_per_sec=0i,updates=0i,updates_per_sec=0i,vsize_megabytes=713i,wtcache_app_threads_page_read_count=13i,wtcache_app_threads_page_read_time=74i,wtcache_app_threads_page_write_count=0i,wtcache_bytes_read_into=55271i,wtcache_bytes_written_from=125402i,wtcache_current_bytes=117050i,wtcache_max_bytes_configured=1073741824i,wtcache_pages_evicted_by_app_thread=0i,wtcache_pages_queued_for_eviction=0i,wtcache_server_evicting_pages=0i,wtcache_tracked_dirty_bytes=0i,wtcache_worker_thread_evictingpages=0i 1547159491000000000 +mongodb,hostname=127.0.0.1:27017 active_reads=3i,active_writes=0i,aggregate_command_failed=0i,aggregate_command_total=87210i,assert_msg=0i,assert_regular=0i,assert_rollovers=0i,assert_user=0i,assert_warning=0i,available_reads=125i,available_writes=128i,commands=218126i,commands_per_sec=1876i,connections_available=838853i,connections_current=7i,connections_total_created=8i,count_command_failed=0i,count_command_total=7i,cursor_no_timeout=0i,cursor_no_timeout_count=0i,cursor_pinned=0i,cursor_pinned_count=0i,cursor_timed_out=0i,cursor_timed_out_count=0i,cursor_total=0i,cursor_total_count=0i,delete_command_failed=0i,delete_command_total=0i,deletes=0i,deletes_per_sec=0i,distinct_command_failed=0i,distinct_command_total=87190i,document_deleted=0i,document_inserted=0i,document_returned=7i,document_updated=43595i,find_and_modify_command_failed=0i,find_and_modify_command_total=43595i,find_command_failed=0i,find_command_total=348819i,flushes=1i,flushes_per_sec=0i,flushes_total_time_ns=5000000i,get_more_command_failed=0i,get_more_command_total=0i,getmores=7i,getmores_per_sec=1i,insert_command_failed=0i,insert_command_total=0i,inserts=0i,inserts_per_sec=0i,jumbo_chunks=0i,latency_commands=44179i,latency_commands_count=122i,latency_reads=36662189i,latency_reads_count=523229i,latency_writes=6768713i,latency_writes_count=87190i,net_in_bytes=837378i,net_in_bytes_count=97692502i,net_out_bytes=690836i,net_out_bytes_count=75377383i,open_connections=7i,operation_scan_and_order=87193i,operation_write_conflicts=7i,page_faults=0i,percent_cache_dirty=0.9,percent_cache_used=1,queries=348816i,queries_per_sec=2988i,queued_reads=0i,queued_writes=0i,resident_megabytes=77i,storage_freelist_search_bucket_exhausted=0i,storage_freelist_search_requests=0i,storage_freelist_search_scanned=0i,tcmalloc_central_cache_free_bytes=280136i,tcmalloc_current_allocated_bytes=77677288i,tcmalloc_current_total_thread_cache_bytes=1222608i,tcmalloc_heap_size=142659584i,tcmalloc_max_total_thread_cache_bytes=260046848i,tcmalloc_pageheap_commit_count=1898i,tcmalloc_pageheap_committed_bytes=130084864i,tcmalloc_pageheap_decommit_count=889i,tcmalloc_pageheap_free_bytes=50610176i,tcmalloc_pageheap_reserve_count=50i,tcmalloc_pageheap_scavenge_count=884i,tcmalloc_pageheap_total_commit_bytes=13021937664i,tcmalloc_pageheap_total_decommit_bytes=12891852800i,tcmalloc_pageheap_total_reserve_bytes=142659584i,tcmalloc_pageheap_unmapped_bytes=12574720i,tcmalloc_spinlock_total_delay_ns=9767500i,tcmalloc_thread_cache_free_bytes=1222608i,tcmalloc_total_free_bytes=1797400i,tcmalloc_transfer_cache_free_bytes=294656i,total_available=0i,total_created=0i,total_docs_scanned=43595i,total_in_use=0i,total_keys_scanned=130805i,total_refreshing=0i,total_tickets_reads=128i,total_tickets_writes=128i,ttl_deletes=0i,ttl_deletes_per_sec=0i,ttl_passes=0i,ttl_passes_per_sec=0i,update_command_failed=0i,update_command_total=43595i,updates=43595i,updates_per_sec=372i,uptime_ns=60023000000i,version="3.6.17",vsize_megabytes=1048i,wtcache_app_threads_page_read_count=108i,wtcache_app_threads_page_read_time=25995i,wtcache_app_threads_page_write_count=0i,wtcache_bytes_read_into=2487250i,wtcache_bytes_written_from=74i,wtcache_current_bytes=5014530i,wtcache_internal_pages_evicted=0i,wtcache_max_bytes_configured=505413632i,wtcache_modified_pages_evicted=0i,wtcache_pages_evicted_by_app_thread=0i,wtcache_pages_queued_for_eviction=0i,wtcache_pages_read_into=139i,wtcache_pages_requested_from=699135i,wtcache_server_evicting_pages=0i,wtcache_tracked_dirty_bytes=4797426i,wtcache_unmodified_pages_evicted=0i,wtcache_worker_thread_evictingpages=0i 1586379818000000000 +mongodb,hostname=127.0.0.1:27017,node_type=SEC,rs_name=rs0 active_reads=1i,active_writes=0i,aggregate_command_failed=0i,aggregate_command_total=1i,assert_msg=0i,assert_regular=0i,assert_rollovers=0i,assert_user=79i,assert_warning=0i,available_reads=127i,available_writes=128i,commands=1121855i,commands_per_sec=10i,connections_available=51183i,connections_current=17i,connections_total_created=557i,count_command_failed=0i,count_command_total=46307i,cursor_no_timeout=0i,cursor_no_timeout_count=0i,cursor_pinned=0i,cursor_pinned_count=0i,cursor_timed_out=0i,cursor_timed_out_count=28i,cursor_total=0i,cursor_total_count=0i,delete_command_failed=0i,delete_command_total=0i,deletes=0i,deletes_per_sec=0i,distinct_command_failed=0i,distinct_command_total=0i,document_deleted=0i,document_inserted=0i,document_returned=2248129i,document_updated=0i,find_and_modify_command_failed=0i,find_and_modify_command_total=0i,find_command_failed=2i,find_command_total=8764i,flushes=7850i,flushes_per_sec=0i,flushes_total_time_ns=4535446000000i,get_more_command_failed=0i,get_more_command_total=1993i,getmores=2018i,getmores_per_sec=0i,insert_command_failed=0i,insert_command_total=0i,inserts=0i,inserts_per_sec=0i,jumbo_chunks=0i,latency_commands=112011949i,latency_commands_count=1072472i,latency_reads=1877142443i,latency_reads_count=57086i,latency_writes=0i,latency_writes_count=0i,member_status="SEC",net_in_bytes=1212i,net_in_bytes_count=263928689i,net_out_bytes=41051i,net_out_bytes_count=2475389483i,open_connections=17i,operation_scan_and_order=34i,operation_write_conflicts=0i,page_faults=317i,percent_cache_dirty=1.6,percent_cache_used=73,queries=8764i,queries_per_sec=0i,queued_reads=0i,queued_writes=0i,repl_apply_batches_num=17839419i,repl_apply_batches_total_millis=399929i,repl_apply_ops=23355263i,repl_buffer_count=0i,repl_buffer_size_bytes=0i,repl_commands=11i,repl_commands_per_sec=0i,repl_deletes=440608i,repl_deletes_per_sec=0i,repl_executor_pool_in_progress_count=0i,repl_executor_queues_network_in_progress=0i,repl_executor_queues_sleepers=4i,repl_executor_unsignaled_events=0i,repl_getmores=0i,repl_getmores_per_sec=0i,repl_inserts=1875729i,repl_inserts_per_sec=0i,repl_lag=0i,repl_network_bytes=39122199371i,repl_network_getmores_num=34908797i,repl_network_getmores_total_millis=434805356i,repl_network_ops=23199086i,repl_oplog_window_sec=619292i,repl_queries=0i,repl_queries_per_sec=0i,repl_updates=21034729i,repl_updates_per_sec=38i,resident_megabytes=6721i,state="SECONDARY",storage_freelist_search_bucket_exhausted=0i,storage_freelist_search_requests=0i,storage_freelist_search_scanned=0i,tcmalloc_central_cache_free_bytes=358512400i,tcmalloc_current_allocated_bytes=5427379424i,tcmalloc_current_total_thread_cache_bytes=70349552i,tcmalloc_heap_size=10199310336i,tcmalloc_max_total_thread_cache_bytes=1073741824i,tcmalloc_pageheap_commit_count=790819i,tcmalloc_pageheap_committed_bytes=7064821760i,tcmalloc_pageheap_decommit_count=533347i,tcmalloc_pageheap_free_bytes=1207816192i,tcmalloc_pageheap_reserve_count=7706i,tcmalloc_pageheap_scavenge_count=426235i,tcmalloc_pageheap_total_commit_bytes=116127649792i,tcmalloc_pageheap_total_decommit_bytes=109062828032i,tcmalloc_pageheap_total_reserve_bytes=10199310336i,tcmalloc_pageheap_unmapped_bytes=3134488576i,tcmalloc_spinlock_total_delay_ns=2518474348i,tcmalloc_thread_cache_free_bytes=70349552i,tcmalloc_total_free_bytes=429626144i,tcmalloc_transfer_cache_free_bytes=764192i,total_available=0i,total_created=0i,total_docs_scanned=735004782i,total_in_use=0i,total_keys_scanned=6188216i,total_refreshing=0i,total_tickets_reads=128i,total_tickets_writes=128i,ttl_deletes=0i,ttl_deletes_per_sec=0i,ttl_passes=7892i,ttl_passes_per_sec=0i,update_command_failed=0i,update_command_total=0i,updates=0i,updates_per_sec=0i,uptime_ns=473590288000000i,version="3.6.17",vsize_megabytes=11136i,wtcache_app_threads_page_read_count=11467625i,wtcache_app_threads_page_read_time=1700336840i,wtcache_app_threads_page_write_count=13268184i,wtcache_bytes_read_into=348022587843i,wtcache_bytes_written_from=322571702254i,wtcache_current_bytes=5509459274i,wtcache_internal_pages_evicted=109108i,wtcache_max_bytes_configured=7547650048i,wtcache_modified_pages_evicted=911196i,wtcache_pages_evicted_by_app_thread=17366i,wtcache_pages_queued_for_eviction=16572754i,wtcache_pages_read_into=11689764i,wtcache_pages_requested_from=499825861i,wtcache_server_evicting_pages=0i,wtcache_tracked_dirty_bytes=117487510i,wtcache_unmodified_pages_evicted=11058458i,wtcache_worker_thread_evictingpages=11907226i 1586379707000000000 mongodb_db_stats,db_name=admin,hostname=127.0.0.1:27017 avg_obj_size=241,collections=2i,data_size=723i,index_size=49152i,indexes=3i,num_extents=0i,objects=3i,ok=1i,storage_size=53248i,type="db_stat" 1547159491000000000 mongodb_db_stats,db_name=local,hostname=127.0.0.1:27017 avg_obj_size=813.9705882352941,collections=6i,data_size=55350i,index_size=102400i,indexes=5i,num_extents=0i,objects=68i,ok=1i,storage_size=204800i,type="db_stat" 1547159491000000000 mongodb_col_stats,collection=foo,db_name=local,hostname=127.0.0.1:27017 size=375005928i,avg_obj_size=5494,type="col_stat",storage_size=249307136i,total_index_size=2138112i,ok=1i,count=68251i 1547159491000000000 diff --git a/plugins/inputs/mongodb/mongodb_data.go b/plugins/inputs/mongodb/mongodb_data.go index 606c2bbdc..6c0884a46 100644 --- a/plugins/inputs/mongodb/mongodb_data.go +++ b/plugins/inputs/mongodb/mongodb_data.go @@ -88,11 +88,29 @@ var DefaultStats = map[string]string{ "connections_current": "CurrentC", "connections_available": "AvailableC", "connections_total_created": "TotalCreatedC", + "operation_scan_and_order": "ScanAndOrderOp", + "operation_write_conflicts": "WriteConflictsOp", + "total_keys_scanned": "TotalKeysScanned", + "total_docs_scanned": "TotalObjectsScanned", +} + +var DefaultAssertsStats = map[string]string{ + "assert_regular": "Regular", + "assert_warning": "Warning", + "assert_msg": "Msg", + "assert_user": "User", + "assert_rollovers": "Rollovers", } var DefaultCommandsStats = map[string]string{ + "aggregate_command_total": "AggregateCommandTotal", + "aggregate_command_failed": "AggregateCommandFailed", + "count_command_total": "CountCommandTotal", + "count_command_failed": "CountCommandFailed", "delete_command_total": "DeleteCommandTotal", "delete_command_failed": "DeleteCommandFailed", + "distinct_command_total": "DistinctCommandTotal", + "distinct_command_failed": "DistinctCommandFailed", "find_command_total": "FindCommandTotal", "find_command_failed": "FindCommandFailed", "find_and_modify_command_total": "FindAndModifyCommandTotal", @@ -115,21 +133,34 @@ var DefaultLatencyStats = map[string]string{ } var DefaultReplStats = map[string]string{ - "repl_inserts": "InsertRCnt", - "repl_inserts_per_sec": "InsertR", - "repl_queries": "QueryRCnt", - "repl_queries_per_sec": "QueryR", - "repl_updates": "UpdateRCnt", - "repl_updates_per_sec": "UpdateR", - "repl_deletes": "DeleteRCnt", - "repl_deletes_per_sec": "DeleteR", - "repl_getmores": "GetMoreRCnt", - "repl_getmores_per_sec": "GetMoreR", - "repl_commands": "CommandRCnt", - "repl_commands_per_sec": "CommandR", - "member_status": "NodeType", - "state": "NodeState", - "repl_lag": "ReplLag", + "repl_inserts": "InsertRCnt", + "repl_inserts_per_sec": "InsertR", + "repl_queries": "QueryRCnt", + "repl_queries_per_sec": "QueryR", + "repl_updates": "UpdateRCnt", + "repl_updates_per_sec": "UpdateR", + "repl_deletes": "DeleteRCnt", + "repl_deletes_per_sec": "DeleteR", + "repl_getmores": "GetMoreRCnt", + "repl_getmores_per_sec": "GetMoreR", + "repl_commands": "CommandRCnt", + "repl_commands_per_sec": "CommandR", + "member_status": "NodeType", + "state": "NodeState", + "repl_lag": "ReplLag", + "repl_network_bytes": "ReplNetworkBytes", + "repl_network_getmores_num": "ReplNetworkGetmoresNum", + "repl_network_getmores_total_millis": "ReplNetworkGetmoresTotalMillis", + "repl_network_ops": "ReplNetworkOps", + "repl_buffer_count": "ReplBufferCount", + "repl_buffer_size_bytes": "ReplBufferSizeBytes", + "repl_apply_batches_num": "ReplApplyBatchesNum", + "repl_apply_batches_total_millis": "ReplApplyBatchesTotalMillis", + "repl_apply_ops": "ReplApplyOps", + "repl_executor_pool_in_progress_count": "ReplExecutorPoolInProgressCount", + "repl_executor_queues_network_in_progress": "ReplExecutorQueuesNetworkInProgress", + "repl_executor_queues_sleepers": "ReplExecutorQueuesSleepers", + "repl_executor_unsignaled_events": "ReplExecutorUnsignaledEvents", } var DefaultClusterStats = map[string]string{ @@ -182,6 +213,34 @@ var WiredTigerExtStats = map[string]string{ "wtcache_unmodified_pages_evicted": "UnmodifiedPagesEvicted", } +var DefaultTCMallocStats = map[string]string{ + "tcmalloc_current_allocated_bytes": "TCMallocCurrentAllocatedBytes", + "tcmalloc_heap_size": "TCMallocHeapSize", + "tcmalloc_central_cache_free_bytes": "TCMallocCentralCacheFreeBytes", + "tcmalloc_current_total_thread_cache_bytes": "TCMallocCurrentTotalThreadCacheBytes", + "tcmalloc_max_total_thread_cache_bytes": "TCMallocMaxTotalThreadCacheBytes", + "tcmalloc_total_free_bytes": "TCMallocTotalFreeBytes", + "tcmalloc_transfer_cache_free_bytes": "TCMallocTransferCacheFreeBytes", + "tcmalloc_thread_cache_free_bytes": "TCMallocThreadCacheFreeBytes", + "tcmalloc_spinlock_total_delay_ns": "TCMallocSpinLockTotalDelayNanos", + "tcmalloc_pageheap_free_bytes": "TCMallocPageheapFreeBytes", + "tcmalloc_pageheap_unmapped_bytes": "TCMallocPageheapUnmappedBytes", + "tcmalloc_pageheap_committed_bytes": "TCMallocPageheapComittedBytes", + "tcmalloc_pageheap_scavenge_count": "TCMallocPageheapScavengeCount", + "tcmalloc_pageheap_commit_count": "TCMallocPageheapCommitCount", + "tcmalloc_pageheap_total_commit_bytes": "TCMallocPageheapTotalCommitBytes", + "tcmalloc_pageheap_decommit_count": "TCMallocPageheapDecommitCount", + "tcmalloc_pageheap_total_decommit_bytes": "TCMallocPageheapTotalDecommitBytes", + "tcmalloc_pageheap_reserve_count": "TCMallocPageheapReserveCount", + "tcmalloc_pageheap_total_reserve_bytes": "TCMallocPageheapTotalReserveBytes", +} + +var DefaultStorageStats = map[string]string{ + "storage_freelist_search_bucket_exhausted": "StorageFreelistSearchBucketExhausted", + "storage_freelist_search_requests": "StorageFreelistSearchRequests", + "storage_freelist_search_scanned": "StorageFreelistSearchScanned", +} + var DbDataStats = map[string]string{ "collections": "Collections", "objects": "Objects", @@ -272,9 +331,16 @@ func (d *MongodbData) AddDefaultStats() { d.add("repl_oplog_window_sec", d.StatLine.OplogStats.TimeDiff) } - d.addStat(statLine, DefaultCommandsStats) + if d.StatLine.Version != "" { + d.add("version", d.StatLine.Version) + } + + d.addStat(statLine, DefaultAssertsStats) d.addStat(statLine, DefaultClusterStats) + d.addStat(statLine, DefaultCommandsStats) d.addStat(statLine, DefaultShardStats) + d.addStat(statLine, DefaultStorageStats) + d.addStat(statLine, DefaultTCMallocStats) if d.StatLine.StorageEngine == "mmapv1" || d.StatLine.StorageEngine == "rocksdb" { d.addStat(statLine, MmapStats) diff --git a/plugins/inputs/mongodb/mongodb_data_test.go b/plugins/inputs/mongodb/mongodb_data_test.go index 34b03b464..706cc7e6f 100644 --- a/plugins/inputs/mongodb/mongodb_data_test.go +++ b/plugins/inputs/mongodb/mongodb_data_test.go @@ -52,6 +52,10 @@ func TestAddNonReplStats(t *testing.T) { CurrentC: 0, AvailableC: 0, TotalCreatedC: 0, + ScanAndOrderOp: 0, + WriteConflictsOp: 0, + TotalKeysScanned: 0, + TotalObjectsScanned: 0, }, tags, ) @@ -169,11 +173,39 @@ func TestAddLatencyStats(t *testing.T) { } } +func TestAddAssertsStats(t *testing.T) { + d := NewMongodbData( + &StatLine{ + Regular: 3, + Warning: 9, + Msg: 2, + User: 34, + Rollovers: 0, + }, + tags, + ) + + var acc testutil.Accumulator + + d.AddDefaultStats() + d.flush(&acc) + + for key := range DefaultAssertsStats { + assert.True(t, acc.HasInt64Field("mongodb", key)) + } +} + func TestAddCommandsStats(t *testing.T) { d := NewMongodbData( &StatLine{ + AggregateCommandTotal: 12, + AggregateCommandFailed: 2, + CountCommandTotal: 18, + CountCommandFailed: 5, DeleteCommandTotal: 73, DeleteCommandFailed: 364, + DistinctCommandTotal: 87, + DistinctCommandFailed: 19, FindCommandTotal: 113, FindCommandFailed: 201, FindAndModifyCommandTotal: 7, @@ -198,6 +230,62 @@ func TestAddCommandsStats(t *testing.T) { } } +func TestAddTCMallocStats(t *testing.T) { + d := NewMongodbData( + &StatLine{ + TCMallocCurrentAllocatedBytes: 5877253096, + TCMallocHeapSize: 8067108864, + TCMallocPageheapFreeBytes: 1054994432, + TCMallocPageheapUnmappedBytes: 677859328, + TCMallocMaxTotalThreadCacheBytes: 1073741824, + TCMallocCurrentTotalThreadCacheBytes: 80405312, + TCMallocTotalFreeBytes: 457002008, + TCMallocCentralCacheFreeBytes: 375131800, + TCMallocTransferCacheFreeBytes: 1464896, + TCMallocThreadCacheFreeBytes: 80405312, + TCMallocPageheapComittedBytes: 7389249536, + TCMallocPageheapScavengeCount: 396394, + TCMallocPageheapCommitCount: 641765, + TCMallocPageheapTotalCommitBytes: 102248751104, + TCMallocPageheapDecommitCount: 396394, + TCMallocPageheapTotalDecommitBytes: 94859501568, + TCMallocPageheapReserveCount: 6179, + TCMallocPageheapTotalReserveBytes: 8067108864, + TCMallocSpinLockTotalDelayNanos: 2344453860, + }, + tags, + ) + + var acc testutil.Accumulator + + d.AddDefaultStats() + d.flush(&acc) + + for key := range DefaultTCMallocStats { + assert.True(t, acc.HasInt64Field("mongodb", key)) + } +} + +func TestAddStorageStats(t *testing.T) { + d := NewMongodbData( + &StatLine{ + StorageFreelistSearchBucketExhausted: 0, + StorageFreelistSearchRequests: 0, + StorageFreelistSearchScanned: 0, + }, + tags, + ) + + var acc testutil.Accumulator + + d.AddDefaultStats() + d.flush(&acc) + + for key := range DefaultStorageStats { + assert.True(t, acc.HasInt64Field("mongodb", key)) + } +} + func TestAddShardHostStats(t *testing.T) { expectedHosts := []string{"hostA", "hostB"} hostStatLines := map[string]ShardHostStatLine{} @@ -245,6 +333,7 @@ func TestStateTag(t *testing.T) { NodeType: "PRI", NodeState: "PRIMARY", ReplSetName: "rs1", + Version: "3.6.17", }, tags, ) @@ -258,88 +347,139 @@ func TestStateTag(t *testing.T) { d.AddDefaultStats() d.flush(&acc) fields := map[string]interface{}{ - "active_reads": int64(0), - "active_writes": int64(0), - "available_reads": int64(0), - "available_writes": int64(0), - "total_tickets_reads": int64(0), - "total_tickets_writes": int64(0), - "commands": int64(0), - "commands_per_sec": int64(0), - "deletes": int64(0), - "deletes_per_sec": int64(0), - "flushes": int64(0), - "flushes_per_sec": int64(0), - "flushes_total_time_ns": int64(0), - "getmores": int64(0), - "getmores_per_sec": int64(0), - "inserts": int64(0), - "inserts_per_sec": int64(0), - "member_status": "PRI", - "state": "PRIMARY", - "net_in_bytes_count": int64(0), - "net_in_bytes": int64(0), - "net_out_bytes_count": int64(0), - "net_out_bytes": int64(0), - "open_connections": int64(0), - "queries": int64(0), - "queries_per_sec": int64(0), - "queued_reads": int64(0), - "queued_writes": int64(0), - "repl_commands": int64(0), - "repl_commands_per_sec": int64(0), - "repl_deletes": int64(0), - "repl_deletes_per_sec": int64(0), - "repl_getmores": int64(0), - "repl_getmores_per_sec": int64(0), - "repl_inserts": int64(0), - "repl_inserts_per_sec": int64(0), - "repl_queries": int64(0), - "repl_queries_per_sec": int64(0), - "repl_updates": int64(0), - "repl_updates_per_sec": int64(0), - "repl_lag": int64(0), - "resident_megabytes": int64(0), - "updates": int64(0), - "updates_per_sec": int64(0), - "uptime_ns": int64(0), - "vsize_megabytes": int64(0), - "ttl_deletes": int64(0), - "ttl_deletes_per_sec": int64(0), - "ttl_passes": int64(0), - "ttl_passes_per_sec": int64(0), - "jumbo_chunks": int64(0), - "total_in_use": int64(0), - "total_available": int64(0), - "total_created": int64(0), - "total_refreshing": int64(0), - "cursor_timed_out": int64(0), - "cursor_timed_out_count": int64(0), - "cursor_no_timeout": int64(0), - "cursor_no_timeout_count": int64(0), - "cursor_pinned": int64(0), - "cursor_pinned_count": int64(0), - "cursor_total": int64(0), - "cursor_total_count": int64(0), - "document_deleted": int64(0), - "document_inserted": int64(0), - "document_returned": int64(0), - "document_updated": int64(0), - "connections_current": int64(0), - "connections_available": int64(0), - "connections_total_created": int64(0), - "delete_command_total": int64(0), - "delete_command_failed": int64(0), - "find_command_total": int64(0), - "find_command_failed": int64(0), - "find_and_modify_command_total": int64(0), - "find_and_modify_command_failed": int64(0), - "get_more_command_total": int64(0), - "get_more_command_failed": int64(0), - "insert_command_total": int64(0), - "insert_command_failed": int64(0), - "update_command_total": int64(0), - "update_command_failed": int64(0), + "active_reads": int64(0), + "active_writes": int64(0), + "aggregate_command_failed": int64(0), + "aggregate_command_total": int64(0), + "assert_msg": int64(0), + "assert_regular": int64(0), + "assert_rollovers": int64(0), + "assert_user": int64(0), + "assert_warning": int64(0), + "available_reads": int64(0), + "available_writes": int64(0), + "commands": int64(0), + "commands_per_sec": int64(0), + "connections_available": int64(0), + "connections_current": int64(0), + "connections_total_created": int64(0), + "count_command_failed": int64(0), + "count_command_total": int64(0), + "cursor_no_timeout": int64(0), + "cursor_no_timeout_count": int64(0), + "cursor_pinned": int64(0), + "cursor_pinned_count": int64(0), + "cursor_timed_out": int64(0), + "cursor_timed_out_count": int64(0), + "cursor_total": int64(0), + "cursor_total_count": int64(0), + "delete_command_failed": int64(0), + "delete_command_total": int64(0), + "deletes": int64(0), + "deletes_per_sec": int64(0), + "distinct_command_failed": int64(0), + "distinct_command_total": int64(0), + "document_deleted": int64(0), + "document_inserted": int64(0), + "document_returned": int64(0), + "document_updated": int64(0), + "find_and_modify_command_failed": int64(0), + "find_and_modify_command_total": int64(0), + "find_command_failed": int64(0), + "find_command_total": int64(0), + "flushes": int64(0), + "flushes_per_sec": int64(0), + "flushes_total_time_ns": int64(0), + "get_more_command_failed": int64(0), + "get_more_command_total": int64(0), + "getmores": int64(0), + "getmores_per_sec": int64(0), + "insert_command_failed": int64(0), + "insert_command_total": int64(0), + "inserts": int64(0), + "inserts_per_sec": int64(0), + "jumbo_chunks": int64(0), + "member_status": "PRI", + "net_in_bytes": int64(0), + "net_in_bytes_count": int64(0), + "net_out_bytes": int64(0), + "net_out_bytes_count": int64(0), + "open_connections": int64(0), + "operation_scan_and_order": int64(0), + "operation_write_conflicts": int64(0), + "queries": int64(0), + "queries_per_sec": int64(0), + "queued_reads": int64(0), + "queued_writes": int64(0), + "repl_apply_batches_num": int64(0), + "repl_apply_batches_total_millis": int64(0), + "repl_apply_ops": int64(0), + "repl_buffer_count": int64(0), + "repl_buffer_size_bytes": int64(0), + "repl_commands": int64(0), + "repl_commands_per_sec": int64(0), + "repl_deletes": int64(0), + "repl_deletes_per_sec": int64(0), + "repl_executor_pool_in_progress_count": int64(0), + "repl_executor_queues_network_in_progress": int64(0), + "repl_executor_queues_sleepers": int64(0), + "repl_executor_unsignaled_events": int64(0), + "repl_getmores": int64(0), + "repl_getmores_per_sec": int64(0), + "repl_inserts": int64(0), + "repl_inserts_per_sec": int64(0), + "repl_lag": int64(0), + "repl_network_bytes": int64(0), + "repl_network_getmores_num": int64(0), + "repl_network_getmores_total_millis": int64(0), + "repl_network_ops": int64(0), + "repl_queries": int64(0), + "repl_queries_per_sec": int64(0), + "repl_updates": int64(0), + "repl_updates_per_sec": int64(0), + "resident_megabytes": int64(0), + "state": "PRIMARY", + "storage_freelist_search_bucket_exhausted": int64(0), + "storage_freelist_search_requests": int64(0), + "storage_freelist_search_scanned": int64(0), + "tcmalloc_central_cache_free_bytes": int64(0), + "tcmalloc_current_allocated_bytes": int64(0), + "tcmalloc_current_total_thread_cache_bytes": int64(0), + "tcmalloc_heap_size": int64(0), + "tcmalloc_max_total_thread_cache_bytes": int64(0), + "tcmalloc_pageheap_commit_count": int64(0), + "tcmalloc_pageheap_committed_bytes": int64(0), + "tcmalloc_pageheap_decommit_count": int64(0), + "tcmalloc_pageheap_free_bytes": int64(0), + "tcmalloc_pageheap_reserve_count": int64(0), + "tcmalloc_pageheap_scavenge_count": int64(0), + "tcmalloc_pageheap_total_commit_bytes": int64(0), + "tcmalloc_pageheap_total_decommit_bytes": int64(0), + "tcmalloc_pageheap_total_reserve_bytes": int64(0), + "tcmalloc_pageheap_unmapped_bytes": int64(0), + "tcmalloc_spinlock_total_delay_ns": int64(0), + "tcmalloc_thread_cache_free_bytes": int64(0), + "tcmalloc_total_free_bytes": int64(0), + "tcmalloc_transfer_cache_free_bytes": int64(0), + "total_available": int64(0), + "total_created": int64(0), + "total_docs_scanned": int64(0), + "total_in_use": int64(0), + "total_keys_scanned": int64(0), + "total_refreshing": int64(0), + "total_tickets_reads": int64(0), + "total_tickets_writes": int64(0), + "ttl_deletes": int64(0), + "ttl_deletes_per_sec": int64(0), + "ttl_passes": int64(0), + "ttl_passes_per_sec": int64(0), + "update_command_failed": int64(0), + "update_command_total": int64(0), + "updates": int64(0), + "updates_per_sec": int64(0), + "uptime_ns": int64(0), + "version": "3.6.17", + "vsize_megabytes": int64(0), } acc.AssertContainsTaggedFields(t, "mongodb", fields, stateTags) } diff --git a/plugins/inputs/mongodb/mongostat.go b/plugins/inputs/mongodb/mongostat.go index 8ec3b3bc8..820ea7bd3 100644 --- a/plugins/inputs/mongodb/mongostat.go +++ b/plugins/inputs/mongodb/mongostat.go @@ -48,7 +48,7 @@ type ServerStatus struct { UptimeMillis int64 `bson:"uptimeMillis"` UptimeEstimate int64 `bson:"uptimeEstimate"` LocalTime time.Time `bson:"localTime"` - Asserts map[string]int64 `bson:"asserts"` + Asserts *AssertsStats `bson:"asserts"` BackgroundFlushing *FlushStats `bson:"backgroundFlushing"` ExtraInfo *ExtraInfo `bson:"extra_info"` Connections *ConnectionStats `bson:"connections"` @@ -66,6 +66,7 @@ type ServerStatus struct { StorageEngine map[string]string `bson:"storageEngine"` WiredTiger *WiredTiger `bson:"wiredTiger"` Metrics *MetricsStats `bson:"metrics"` + TCMallocStats *TCMallocStats `bson:"tcmalloc"` } // DbStats stores stats from all dbs @@ -179,6 +180,15 @@ type ConcurrentTransStats struct { TotalTickets int64 `bson:"totalTickets"` } +// AssertsStats stores information related to assertions raised since the MongoDB process started +type AssertsStats struct { + Regular int64 `bson:"regular"` + Warning int64 `bson:"warning"` + Msg int64 `bson:"msg"` + User int64 `bson:"user"` + Rollovers int64 `bson:"rollovers"` +} + // CacheStats stores cache statistics for WiredTiger. type CacheStats struct { TrackedDirtyBytes int64 `bson:"tracked dirty bytes in the cache"` @@ -332,10 +342,14 @@ type LatencyStats struct { // MetricsStats stores information related to metrics type MetricsStats struct { - TTL *TTLStats `bson:"ttl"` - Cursor *CursorStats `bson:"cursor"` - Document *DocumentStats `bson:"document"` - Commands *CommandsStats `bson:"commands"` + TTL *TTLStats `bson:"ttl"` + Cursor *CursorStats `bson:"cursor"` + Document *DocumentStats `bson:"document"` + Commands *CommandsStats `bson:"commands"` + Operation *OperationStats `bson:"operation"` + QueryExecutor *QueryExecutorStats `bson:"queryExecutor"` + Repl *ReplStats `bson:"repl"` + Storage *StorageStats `bson:"storage"` } // TTLStats stores information related to documents with a ttl index. @@ -360,7 +374,10 @@ type DocumentStats struct { // CommandsStats stores information related to document metrics. type CommandsStats struct { + Aggregate *CommandsStatsValue `bson:"aggregate"` + Count *CommandsStatsValue `bson:"count"` Delete *CommandsStatsValue `bson:"delete"` + Distinct *CommandsStatsValue `bson:"distinct"` Find *CommandsStatsValue `bson:"find"` FindAndModify *CommandsStatsValue `bson:"findAndModify"` GetMore *CommandsStatsValue `bson:"getMore"` @@ -380,6 +397,59 @@ type OpenCursorStats struct { Total int64 `bson:"total"` } +// OperationStats stores information related to query operations +// using special operation types +type OperationStats struct { + ScanAndOrder int64 `bson:"scanAndOrder"` + WriteConflicts int64 `bson:"writeConflicts"` +} + +// QueryExecutorStats stores information related to query execution +type QueryExecutorStats struct { + Scanned int64 `bson:"scanned"` + ScannedObjects int64 `bson:"scannedObjects"` +} + +// ReplStats stores information related to replication process +type ReplStats struct { + Apply *ReplApplyStats `bson:"apply"` + Buffer *ReplBufferStats `bson:"buffer"` + Executor *ReplExecutorStats `bson:"executor,omitempty"` + Network *ReplNetworkStats `bson:"network"` +} + +// ReplApplyStats stores information related to oplog application process +type ReplApplyStats struct { + Batches *BasicStats `bson:"batches"` + Ops int64 `bson:"ops"` +} + +// ReplBufferStats stores information related to oplog buffer +type ReplBufferStats struct { + Count int64 `bson:"count"` + SizeBytes int64 `bson:"sizeBytes"` +} + +// ReplExecutorStats stores information related to replication executor +type ReplExecutorStats struct { + Pool map[string]int64 `bson:"pool"` + Queues map[string]int64 `bson:"queues"` + UnsignaledEvents int64 `bson:"unsignaledEvents"` +} + +// ReplNetworkStats stores information related to network usage by replication process +type ReplNetworkStats struct { + Bytes int64 `bson:"bytes"` + GetMores *BasicStats `bson:"getmores"` + Ops int64 `bson:"ops"` +} + +// BasicStats stores information about an operation +type BasicStats struct { + Num int64 `bson:"num"` + TotalMillis int64 `bson:"totalMillis"` +} + // ReadWriteLockTimes stores time spent holding read/write locks. type ReadWriteLockTimes struct { Read int64 `bson:"R"` @@ -406,6 +476,46 @@ type ExtraInfo struct { PageFaults *int64 `bson:"page_faults"` } +// TCMallocStats stores information related to TCMalloc memory allocator metrics +type TCMallocStats struct { + Generic *GenericTCMAllocStats `bson:"generic"` + TCMalloc *DetailedTCMallocStats `bson:"tcmalloc"` +} + +// GenericTCMAllocStats stores generic TCMalloc memory allocator metrics +type GenericTCMAllocStats struct { + CurrentAllocatedBytes int64 `bson:"current_allocated_bytes"` + HeapSize int64 `bson:"heap_size"` +} + +// DetailedTCMallocStats stores detailed TCMalloc memory allocator metrics +type DetailedTCMallocStats struct { + PageheapFreeBytes int64 `bson:"pageheap_free_bytes"` + PageheapUnmappedBytes int64 `bson:"pageheap_unmapped_bytes"` + MaxTotalThreadCacheBytes int64 `bson:"max_total_thread_cache_bytes"` + CurrentTotalThreadCacheBytes int64 `bson:"current_total_thread_cache_bytes"` + TotalFreeBytes int64 `bson:"total_free_bytes"` + CentralCacheFreeBytes int64 `bson:"central_cache_free_bytes"` + TransferCacheFreeBytes int64 `bson:"transfer_cache_free_bytes"` + ThreadCacheFreeBytes int64 `bson:"thread_cache_free_bytes"` + PageheapComittedBytes int64 `bson:"pageheap_committed_bytes"` + PageheapScavengeCount int64 `bson:"pageheap_scavenge_count"` + PageheapCommitCount int64 `bson:"pageheap_commit_count"` + PageheapTotalCommitBytes int64 `bson:"pageheap_total_commit_bytes"` + PageheapDecommitCount int64 `bson:"pageheap_decommit_count"` + PageheapTotalDecommitBytes int64 `bson:"pageheap_total_decommit_bytes"` + PageheapReserveCount int64 `bson:"pageheap_reserve_count"` + PageheapTotalReserveBytes int64 `bson:"pageheap_total_reserve_bytes"` + SpinLockTotalDelayNanos int64 `bson:"spinlock_total_delay_ns"` +} + +// StorageStats stores information related to record allocations +type StorageStats struct { + FreelistSearchBucketExhausted int64 `bson:"freelist.search.bucketExhausted"` + FreelistSearchRequests int64 `bson:"freelist.search.requests"` + FreelistSearchScanned int64 `bson:"freelist.search.scanned"` +} + // StatHeader describes a single column for mongostat's terminal output, // its formatting, and in which modes it should be displayed. type StatHeader struct { @@ -508,6 +618,7 @@ type StatLine struct { Error error IsMongos bool Host string + Version string UptimeNanos int64 @@ -525,6 +636,13 @@ type StatLine struct { GetMore, GetMoreCnt int64 Command, CommandCnt int64 + // Asserts fields + Regular int64 + Warning int64 + Msg int64 + User int64 + Rollovers int64 + // OpLatency fields WriteOpsCnt int64 WriteLatency int64 @@ -547,13 +665,22 @@ type StatLine struct { DeletedD, InsertedD, ReturnedD, UpdatedD int64 //Commands fields + AggregateCommandTotal, AggregateCommandFailed int64 + CountCommandTotal, CountCommandFailed int64 DeleteCommandTotal, DeleteCommandFailed int64 + DistinctCommandTotal, DistinctCommandFailed int64 FindCommandTotal, FindCommandFailed int64 FindAndModifyCommandTotal, FindAndModifyCommandFailed int64 GetMoreCommandTotal, GetMoreCommandFailed int64 InsertCommandTotal, InsertCommandFailed int64 UpdateCommandTotal, UpdateCommandFailed int64 + // Operation fields + ScanAndOrderOp, WriteConflictsOp int64 + + // Query Executor fields + TotalKeysScanned, TotalObjectsScanned int64 + // Connection fields CurrentC, AvailableC, TotalCreatedC int64 @@ -608,6 +735,21 @@ type StatLine struct { NodeType string NodeState string + // Replicated Metrics fields + ReplNetworkBytes int64 + ReplNetworkGetmoresNum int64 + ReplNetworkGetmoresTotalMillis int64 + ReplNetworkOps int64 + ReplBufferCount int64 + ReplBufferSizeBytes int64 + ReplApplyBatchesNum int64 + ReplApplyBatchesTotalMillis int64 + ReplApplyOps int64 + ReplExecutorPoolInProgressCount int64 + ReplExecutorQueuesNetworkInProgress int64 + ReplExecutorQueuesSleepers int64 + ReplExecutorUnsignaledEvents int64 + // Cluster fields JumboChunksCount int64 @@ -622,6 +764,32 @@ type StatLine struct { // Shard Hosts stats field ShardHostStatsLines map[string]ShardHostStatLine + + // TCMalloc stats field + TCMallocCurrentAllocatedBytes int64 + TCMallocHeapSize int64 + TCMallocCentralCacheFreeBytes int64 + TCMallocCurrentTotalThreadCacheBytes int64 + TCMallocMaxTotalThreadCacheBytes int64 + TCMallocTotalFreeBytes int64 + TCMallocTransferCacheFreeBytes int64 + TCMallocThreadCacheFreeBytes int64 + TCMallocSpinLockTotalDelayNanos int64 + TCMallocPageheapFreeBytes int64 + TCMallocPageheapUnmappedBytes int64 + TCMallocPageheapComittedBytes int64 + TCMallocPageheapScavengeCount int64 + TCMallocPageheapCommitCount int64 + TCMallocPageheapTotalCommitBytes int64 + TCMallocPageheapDecommitCount int64 + TCMallocPageheapTotalDecommitBytes int64 + TCMallocPageheapReserveCount int64 + TCMallocPageheapTotalReserveBytes int64 + + // Storage stats field + StorageFreelistSearchBucketExhausted int64 + StorageFreelistSearchRequests int64 + StorageFreelistSearchScanned int64 } type DbStatLine struct { @@ -704,6 +872,7 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec returnVal := &StatLine{ Key: key, Host: newStat.Host, + Version: newStat.Version, Mapped: -1, Virtual: -1, Resident: -1, @@ -749,6 +918,41 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec } } + if newStat.Asserts != nil { + returnVal.Regular = newStat.Asserts.Regular + returnVal.Warning = newStat.Asserts.Warning + returnVal.Msg = newStat.Asserts.Msg + returnVal.User = newStat.Asserts.User + returnVal.Rollovers = newStat.Asserts.Rollovers + } + + if newStat.TCMallocStats != nil { + if newStat.TCMallocStats.Generic != nil { + returnVal.TCMallocCurrentAllocatedBytes = newStat.TCMallocStats.Generic.CurrentAllocatedBytes + returnVal.TCMallocHeapSize = newStat.TCMallocStats.Generic.HeapSize + } + if newStat.TCMallocStats.TCMalloc != nil { + returnVal.TCMallocCentralCacheFreeBytes = newStat.TCMallocStats.TCMalloc.CentralCacheFreeBytes + returnVal.TCMallocCurrentTotalThreadCacheBytes = newStat.TCMallocStats.TCMalloc.CurrentTotalThreadCacheBytes + returnVal.TCMallocMaxTotalThreadCacheBytes = newStat.TCMallocStats.TCMalloc.MaxTotalThreadCacheBytes + returnVal.TCMallocTransferCacheFreeBytes = newStat.TCMallocStats.TCMalloc.TransferCacheFreeBytes + returnVal.TCMallocThreadCacheFreeBytes = newStat.TCMallocStats.TCMalloc.ThreadCacheFreeBytes + returnVal.TCMallocTotalFreeBytes = newStat.TCMallocStats.TCMalloc.TotalFreeBytes + returnVal.TCMallocSpinLockTotalDelayNanos = newStat.TCMallocStats.TCMalloc.SpinLockTotalDelayNanos + + returnVal.TCMallocPageheapFreeBytes = newStat.TCMallocStats.TCMalloc.PageheapFreeBytes + returnVal.TCMallocPageheapUnmappedBytes = newStat.TCMallocStats.TCMalloc.PageheapUnmappedBytes + returnVal.TCMallocPageheapComittedBytes = newStat.TCMallocStats.TCMalloc.PageheapComittedBytes + returnVal.TCMallocPageheapScavengeCount = newStat.TCMallocStats.TCMalloc.PageheapScavengeCount + returnVal.TCMallocPageheapCommitCount = newStat.TCMallocStats.TCMalloc.PageheapCommitCount + returnVal.TCMallocPageheapTotalCommitBytes = newStat.TCMallocStats.TCMalloc.PageheapTotalCommitBytes + returnVal.TCMallocPageheapDecommitCount = newStat.TCMallocStats.TCMalloc.PageheapDecommitCount + returnVal.TCMallocPageheapTotalDecommitBytes = newStat.TCMallocStats.TCMalloc.PageheapTotalDecommitBytes + returnVal.TCMallocPageheapReserveCount = newStat.TCMallocStats.TCMalloc.PageheapReserveCount + returnVal.TCMallocPageheapTotalReserveBytes = newStat.TCMallocStats.TCMalloc.PageheapTotalReserveBytes + } + } + if newStat.Metrics != nil && oldStat.Metrics != nil { if newStat.Metrics.TTL != nil && oldStat.Metrics.TTL != nil { returnVal.Passes, returnVal.PassesCnt = diff(newStat.Metrics.TTL.Passes, oldStat.Metrics.TTL.Passes, sampleSecs) @@ -770,10 +974,22 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec } if newStat.Metrics.Commands != nil { + if newStat.Metrics.Commands.Aggregate != nil { + returnVal.AggregateCommandTotal = newStat.Metrics.Commands.Aggregate.Total + returnVal.AggregateCommandFailed = newStat.Metrics.Commands.Aggregate.Failed + } + if newStat.Metrics.Commands.Count != nil { + returnVal.CountCommandTotal = newStat.Metrics.Commands.Count.Total + returnVal.CountCommandFailed = newStat.Metrics.Commands.Count.Failed + } if newStat.Metrics.Commands.Delete != nil { returnVal.DeleteCommandTotal = newStat.Metrics.Commands.Delete.Total returnVal.DeleteCommandFailed = newStat.Metrics.Commands.Delete.Failed } + if newStat.Metrics.Commands.Distinct != nil { + returnVal.DistinctCommandTotal = newStat.Metrics.Commands.Distinct.Total + returnVal.DistinctCommandFailed = newStat.Metrics.Commands.Distinct.Failed + } if newStat.Metrics.Commands.Find != nil { returnVal.FindCommandTotal = newStat.Metrics.Commands.Find.Total returnVal.FindCommandFailed = newStat.Metrics.Commands.Find.Failed @@ -795,6 +1011,46 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec returnVal.UpdateCommandFailed = newStat.Metrics.Commands.Update.Failed } } + + if newStat.Metrics.Operation != nil { + returnVal.ScanAndOrderOp = newStat.Metrics.Operation.ScanAndOrder + returnVal.WriteConflictsOp = newStat.Metrics.Operation.WriteConflicts + } + + if newStat.Metrics.QueryExecutor != nil { + returnVal.TotalKeysScanned = newStat.Metrics.QueryExecutor.Scanned + returnVal.TotalObjectsScanned = newStat.Metrics.QueryExecutor.ScannedObjects + } + + if newStat.Metrics.Repl != nil { + if newStat.Metrics.Repl.Apply != nil { + returnVal.ReplApplyBatchesNum = newStat.Metrics.Repl.Apply.Batches.Num + returnVal.ReplApplyBatchesTotalMillis = newStat.Metrics.Repl.Apply.Batches.TotalMillis + returnVal.ReplApplyOps = newStat.Metrics.Repl.Apply.Ops + } + if newStat.Metrics.Repl.Buffer != nil { + returnVal.ReplBufferCount = newStat.Metrics.Repl.Buffer.Count + returnVal.ReplBufferSizeBytes = newStat.Metrics.Repl.Buffer.SizeBytes + } + if newStat.Metrics.Repl.Executor != nil { + returnVal.ReplExecutorPoolInProgressCount = newStat.Metrics.Repl.Executor.Pool["inProgressCount"] + returnVal.ReplExecutorQueuesNetworkInProgress = newStat.Metrics.Repl.Executor.Queues["networkInProgress"] + returnVal.ReplExecutorQueuesSleepers = newStat.Metrics.Repl.Executor.Queues["sleepers"] + returnVal.ReplExecutorUnsignaledEvents = newStat.Metrics.Repl.Executor.UnsignaledEvents + } + if newStat.Metrics.Repl.Network != nil { + returnVal.ReplNetworkBytes = newStat.Metrics.Repl.Network.Bytes + returnVal.ReplNetworkGetmoresNum = newStat.Metrics.Repl.Network.GetMores.Num + returnVal.ReplNetworkGetmoresTotalMillis = newStat.Metrics.Repl.Network.GetMores.TotalMillis + returnVal.ReplNetworkOps = newStat.Metrics.Repl.Network.Ops + } + } + + if newStat.Metrics.Storage != nil { + returnVal.StorageFreelistSearchBucketExhausted = newStat.Metrics.Storage.FreelistSearchBucketExhausted + returnVal.StorageFreelistSearchRequests = newStat.Metrics.Storage.FreelistSearchRequests + returnVal.StorageFreelistSearchScanned = newStat.Metrics.Storage.FreelistSearchScanned + } } if newStat.OpcountersRepl != nil && oldStat.OpcountersRepl != nil { From 47e1cdc0ee5398f3a3857644a0ddc88696f967a9 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 12 May 2020 12:24:31 -0700 Subject: [PATCH 1754/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index a28edff05..a99cd388c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -34,6 +34,7 @@ - [#7359](https://github.com/influxdata/telegraf/pull/7359): Add cpu query to sqlserver input. - [#7464](https://github.com/influxdata/telegraf/pull/7464): Add field creation to date processor and integer unix time support. - [#7483](https://github.com/influxdata/telegraf/pull/7483): Add integer mapping support to enum processor. +- [#7321](https://github.com/influxdata/telegraf/pull/7321): Add additional fields to mongodb input. #### Bugfixes From 934f6af99fac2b71b0f12ee8c102732b21572ef6 Mon Sep 17 00:00:00 2001 From: reimda Date: Tue, 12 May 2020 13:56:35 -0600 Subject: [PATCH 1755/1815] Handle multiple metrics with the same timestamp in dedup processor (#7439) --- plugins/processors/dedup/dedup.go | 29 ++++++++++++++++-- plugins/processors/dedup/dedup_test.go | 42 +++++++++++++++++++++++++- 2 files changed, 67 insertions(+), 4 deletions(-) diff --git a/plugins/processors/dedup/dedup.go b/plugins/processors/dedup/dedup.go index c0d40f434..3dd7516a6 100644 --- a/plugins/processors/dedup/dedup.go +++ b/plugins/processors/dedup/dedup.go @@ -75,10 +75,29 @@ func (d *Dedup) Apply(metrics ...telegraf.Metric) []telegraf.Metric { // For each field compare value with the cached one changed := false + added := false + sametime := metric.Time() == m.Time() for _, f := range metric.FieldList() { - if value, ok := m.GetField(f.Key); ok && value != f.Value { - changed = true - continue + if value, ok := m.GetField(f.Key); ok { + if value != f.Value { + changed = true + break + } + } else if sametime { + // This field isn't in the cached metric but it's the + // same series and timestamp. Merge it into the cached + // metric. + + // Metrics have a ValueType that applies to all values + // in the metric. If an input needs to produce values + // with different ValueTypes but the same timestamp, + // they have to produce multiple metrics. (See the + // system input for an example.) In this case, dedup + // ignores the ValueTypes of the metrics and merges + // the fields into one metric for the dup check. + + m.AddField(f.Key, f.Value) + added = true } } // If any field value has changed then refresh the cache @@ -87,6 +106,10 @@ func (d *Dedup) Apply(metrics ...telegraf.Metric) []telegraf.Metric { continue } + if sametime && added { + continue + } + // In any other case remove metric from the output metrics = remove(metrics, idx) } diff --git a/plugins/processors/dedup/dedup_test.go b/plugins/processors/dedup/dedup_test.go index 20a94ed30..cae2bf1a5 100644 --- a/plugins/processors/dedup/dedup_test.go +++ b/plugins/processors/dedup/dedup_test.go @@ -1,10 +1,11 @@ package dedup import ( - "github.com/stretchr/testify/require" "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/metric" @@ -152,3 +153,42 @@ func TestCacheShrink(t *testing.T) { require.Equal(t, 0, len(deduplicate.Cache)) } + +func TestSameTimestamp(t *testing.T) { + now := time.Now() + dedup := createDedup(now) + var in telegraf.Metric + var out []telegraf.Metric + + in, _ = metric.New("metric", + map[string]string{"tag": "value"}, + map[string]interface{}{"foo": 1}, // field + now, + ) + out = dedup.Apply(in) + require.Equal(t, []telegraf.Metric{in}, out) // pass + + in, _ = metric.New("metric", + map[string]string{"tag": "value"}, + map[string]interface{}{"bar": 1}, // different field + now, + ) + out = dedup.Apply(in) + require.Equal(t, []telegraf.Metric{in}, out) // pass + + in, _ = metric.New("metric", + map[string]string{"tag": "value"}, + map[string]interface{}{"bar": 2}, // same field different value + now, + ) + out = dedup.Apply(in) + require.Equal(t, []telegraf.Metric{in}, out) // pass + + in, _ = metric.New("metric", + map[string]string{"tag": "value"}, + map[string]interface{}{"bar": 2}, // same field same value + now, + ) + out = dedup.Apply(in) + require.Equal(t, []telegraf.Metric{}, out) // drop +} From a78de9c5f0f3cbef3556d6ab0fa026c6af00320d Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 12 May 2020 12:57:41 -0700 Subject: [PATCH 1756/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index a99cd388c..1c014eec2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -49,6 +49,7 @@ #### Bugfixes - [#7412](https://github.com/influxdata/telegraf/pull/7412): Use same timestamp for all objects in arrays in the json parser. +- [#7439](https://github.com/influxdata/telegraf/pull/7439): Handle multiple metrics with the same timestamp in dedup processor. ## v1.14.2 [2020-04-28] From 670277f7854a7c5bd6b530f2ca5cfa42067dac94 Mon Sep 17 00:00:00 2001 From: Darin Fisher Date: Tue, 12 May 2020 15:01:17 -0600 Subject: [PATCH 1757/1815] Add authentication support to the http_response input plugin (#7491) --- plugins/inputs/http_response/http_response.go | 42 +++++++++++++++---- .../http_response/http_response_test.go | 42 +++++++++++++++++++ 2 files changed, 76 insertions(+), 8 deletions(-) diff --git a/plugins/inputs/http_response/http_response.go b/plugins/inputs/http_response/http_response.go index 24c22f72f..7ba141c23 100644 --- a/plugins/inputs/http_response/http_response.go +++ b/plugins/inputs/http_response/http_response.go @@ -21,16 +21,21 @@ import ( // HTTPResponse struct type HTTPResponse struct { - Address string // deprecated in 1.12 - URLs []string `toml:"urls"` - HTTPProxy string `toml:"http_proxy"` - Body string - Method string - ResponseTimeout internal.Duration - Headers map[string]string - FollowRedirects bool + Address string // deprecated in 1.12 + URLs []string `toml:"urls"` + HTTPProxy string `toml:"http_proxy"` + Body string + Method string + ResponseTimeout internal.Duration + Headers map[string]string + FollowRedirects bool + // Absolute path to file with Bearer token + BearerToken string `toml:"bearer_token"` ResponseStringMatch string Interface string + // HTTP Basic Auth Credentials + Username string `toml:"username"` + Password string `toml:"password"` tls.ClientConfig Log telegraf.Logger @@ -64,6 +69,14 @@ var sampleConfig = ` ## Whether to follow redirects from the server (defaults to false) # follow_redirects = false + ## Optional file with Bearer token + ## file content is added as an Authorization header + # bearer_token = "/path/to/file" + + ## Optional HTTP Basic Auth Credentials + # username = "username" + # password = "pa$$word" + ## Optional HTTP Request Body # body = ''' # {'fake':'data'} @@ -227,6 +240,15 @@ func (h *HTTPResponse) httpGather(u string) (map[string]interface{}, map[string] return nil, nil, err } + if h.BearerToken != "" { + token, err := ioutil.ReadFile(h.BearerToken) + if err != nil { + return nil, nil, err + } + bearer := "Bearer " + strings.Trim(string(token), "\n") + request.Header.Add("Authorization", bearer) + } + for key, val := range h.Headers { request.Header.Add(key, val) if key == "Host" { @@ -234,6 +256,10 @@ func (h *HTTPResponse) httpGather(u string) (map[string]interface{}, map[string] } } + if h.Username != "" || h.Password != "" { + request.SetBasicAuth(h.Username, h.Password) + } + // Start Timer start := time.Now() resp, err := h.client.Do(request) diff --git a/plugins/inputs/http_response/http_response_test.go b/plugins/inputs/http_response/http_response_test.go index 530c81901..947fde5c8 100644 --- a/plugins/inputs/http_response/http_response_test.go +++ b/plugins/inputs/http_response/http_response_test.go @@ -864,3 +864,45 @@ func TestRedirect(t *testing.T) { testutil.RequireMetricsEqual(t, expected, actual, testutil.IgnoreTime()) } + +func TestBasicAuth(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + aHeader := r.Header.Get("Authorization") + assert.Equal(t, "Basic bWU6bXlwYXNzd29yZA==", aHeader) + w.WriteHeader(http.StatusOK) + })) + defer ts.Close() + + h := &HTTPResponse{ + Log: testutil.Logger{}, + Address: ts.URL + "/good", + Body: "{ 'test': 'data'}", + Method: "GET", + ResponseTimeout: internal.Duration{Duration: time.Second * 20}, + Username: "me", + Password: "mypassword", + Headers: map[string]string{ + "Content-Type": "application/json", + }, + } + + var acc testutil.Accumulator + err := h.Gather(&acc) + require.NoError(t, err) + + expectedFields := map[string]interface{}{ + "http_response_code": http.StatusOK, + "result_type": "success", + "result_code": 0, + "response_time": nil, + "content_length": nil, + } + expectedTags := map[string]interface{}{ + "server": nil, + "method": "GET", + "status_code": "200", + "result": "success", + } + absentFields := []string{"response_string_match"} + checkOutput(t, &acc, expectedFields, expectedTags, absentFields, nil) +} From bb86ee008590c2fa0456f3c18345447042b5f78c Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 12 May 2020 14:02:21 -0700 Subject: [PATCH 1758/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1c014eec2..ae9a231ed 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -35,6 +35,7 @@ - [#7464](https://github.com/influxdata/telegraf/pull/7464): Add field creation to date processor and integer unix time support. - [#7483](https://github.com/influxdata/telegraf/pull/7483): Add integer mapping support to enum processor. - [#7321](https://github.com/influxdata/telegraf/pull/7321): Add additional fields to mongodb input. +- [#7491](https://github.com/influxdata/telegraf/pull/7491): Add authentication support to the http_response input plugin. #### Bugfixes From 23756077a4769cb63729e28f8d55631331bdbc44 Mon Sep 17 00:00:00 2001 From: Pontus Rydin Date: Wed, 13 May 2020 15:02:39 -0400 Subject: [PATCH 1759/1815] Add truncate_tags setting to wavefront output (#7503) --- plugins/outputs/wavefront/README.md | 4 ++ plugins/outputs/wavefront/wavefront.go | 38 +++++++++++----- plugins/outputs/wavefront/wavefront_test.go | 48 ++++++++++++++++++--- 3 files changed, 74 insertions(+), 16 deletions(-) diff --git a/plugins/outputs/wavefront/README.md b/plugins/outputs/wavefront/README.md index 71a760900..231e1057d 100644 --- a/plugins/outputs/wavefront/README.md +++ b/plugins/outputs/wavefront/README.md @@ -45,6 +45,10 @@ This plugin writes to a [Wavefront](https://www.wavefront.com) proxy, in Wavefro ## whether to convert boolean values to numeric values, with false -> 0.0 and true -> 1.0. default is true #convert_bool = true + + ## Truncate metric tags to a total of 254 characters for the tag name value. Wavefront will reject any + ## data point exceeding this limit if not truncated. Defaults to 'false' to provide backwards compatibility. + #truncate_tags = false ``` diff --git a/plugins/outputs/wavefront/wavefront.go b/plugins/outputs/wavefront/wavefront.go index 65666d627..c455b6fa6 100644 --- a/plugins/outputs/wavefront/wavefront.go +++ b/plugins/outputs/wavefront/wavefront.go @@ -2,7 +2,6 @@ package wavefront import ( "fmt" - "log" "regexp" "strings" @@ -11,6 +10,8 @@ import ( wavefront "github.com/wavefronthq/wavefront-sdk-go/senders" ) +const maxTagLength = 254 + type Wavefront struct { Url string Token string @@ -23,10 +24,12 @@ type Wavefront struct { ConvertBool bool UseRegex bool UseStrict bool + TruncateTags bool SourceOverride []string StringToNumber map[string][]map[string]float64 sender wavefront.Sender + Log telegraf.Logger } // catch many of the invalid chars that could appear in a metric or tag name @@ -94,6 +97,10 @@ var sampleConfig = ` ## whether to convert boolean values to numeric values, with false -> 0.0 and true -> 1.0. default is true #convert_bool = true + ## Truncate metric tags to a total of 254 characters for the tag name value. Wavefront will reject any + ## data point exceeding this limit if not truncated. Defaults to 'false' to provide backwards compatibility. + #truncate_tags = false + ## Define a mapping, namespaced by metric prefix, from string values to numeric values ## deprecated in 1.9; use the enum processor plugin #[[outputs.wavefront.string_to_number.elasticsearch]] @@ -113,11 +120,11 @@ type MetricPoint struct { func (w *Wavefront) Connect() error { if len(w.StringToNumber) > 0 { - log.Print("W! [outputs.wavefront] The string_to_number option is deprecated; please use the enum processor instead") + w.Log.Warn("The string_to_number option is deprecated; please use the enum processor instead") } if w.Url != "" { - log.Printf("D! [outputs.wavefront] connecting over http/https using Url: %s", w.Url) + w.Log.Debug("connecting over http/https using Url: %s", w.Url) sender, err := wavefront.NewDirectSender(&wavefront.DirectConfiguration{ Server: w.Url, Token: w.Token, @@ -128,7 +135,7 @@ func (w *Wavefront) Connect() error { } w.sender = sender } else { - log.Printf("D! Output [wavefront] connecting over tcp using Host: %s and Port: %d", w.Host, w.Port) + w.Log.Debug("connecting over tcp using Host: %s and Port: %d", w.Host, w.Port) sender, err := wavefront.NewProxySender(&wavefront.ProxyConfiguration{ Host: w.Host, MetricsPort: w.Port, @@ -152,18 +159,17 @@ func (w *Wavefront) Connect() error { func (w *Wavefront) Write(metrics []telegraf.Metric) error { for _, m := range metrics { - for _, point := range buildMetrics(m, w) { + for _, point := range w.buildMetrics(m) { err := w.sender.SendMetric(point.Metric, point.Value, point.Timestamp, point.Source, point.Tags) if err != nil { return fmt.Errorf("Wavefront sending error: %s", err.Error()) } } } - return nil } -func buildMetrics(m telegraf.Metric, w *Wavefront) []*MetricPoint { +func (w *Wavefront) buildMetrics(m telegraf.Metric) []*MetricPoint { ret := []*MetricPoint{} for fieldName, value := range m.Fields() { @@ -193,12 +199,12 @@ func buildMetrics(m telegraf.Metric, w *Wavefront) []*MetricPoint { metricValue, buildError := buildValue(value, metric.Metric, w) if buildError != nil { - log.Printf("D! [outputs.wavefront] %s\n", buildError.Error()) + w.Log.Debug("Error building tags: %s\n", buildError.Error()) continue } metric.Value = metricValue - source, tags := buildTags(m.Tags(), w) + source, tags := w.buildTags(m.Tags()) metric.Source = source metric.Tags = tags @@ -207,7 +213,7 @@ func buildMetrics(m telegraf.Metric, w *Wavefront) []*MetricPoint { return ret } -func buildTags(mTags map[string]string, w *Wavefront) (string, map[string]string) { +func (w *Wavefront) buildTags(mTags map[string]string) (string, map[string]string) { // Remove all empty tags. for k, v := range mTags { @@ -259,6 +265,16 @@ func buildTags(mTags map[string]string, w *Wavefront) (string, map[string]string key = sanitizedChars.Replace(k) } val := tagValueReplacer.Replace(v) + if w.TruncateTags { + if len(key) > maxTagLength { + w.Log.Warnf("Tag key length > 254. Skipping tag: %s", key) + continue + } + if len(key)+len(val) > maxTagLength { + w.Log.Debugf("Key+value length > 254: %s", key) + val = val[:maxTagLength-len(key)] + } + } tags[key] = val } @@ -296,7 +312,6 @@ func buildValue(v interface{}, name string, w *Wavefront) (float64, error) { default: return 0, fmt.Errorf("unexpected type: %T, with value: %v, for: %s", v, v, name) } - return 0, fmt.Errorf("unexpected type: %T, with value: %v, for: %s", v, v, name) } @@ -320,6 +335,7 @@ func init() { MetricSeparator: ".", ConvertPaths: true, ConvertBool: true, + TruncateTags: false, } }) } diff --git a/plugins/outputs/wavefront/wavefront_test.go b/plugins/outputs/wavefront/wavefront_test.go index 776c3698f..40707e6d6 100644 --- a/plugins/outputs/wavefront/wavefront_test.go +++ b/plugins/outputs/wavefront/wavefront_test.go @@ -4,6 +4,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" "reflect" "strings" "testing" @@ -21,6 +22,7 @@ func defaultWavefront() *Wavefront { ConvertPaths: true, ConvertBool: true, UseRegex: false, + Log: testutil.Logger{}, } } @@ -64,7 +66,7 @@ func TestBuildMetrics(t *testing.T) { } for _, mt := range metricTests { - ml := buildMetrics(mt.metric, w) + ml := w.buildMetrics(mt.metric) for i, line := range ml { if mt.metricPoints[i].Metric != line.Metric || mt.metricPoints[i].Value != line.Value { t.Errorf("\nexpected\t%+v %+v\nreceived\t%+v %+v\n", mt.metricPoints[i].Metric, mt.metricPoints[i].Value, line.Metric, line.Value) @@ -104,7 +106,7 @@ func TestBuildMetricsStrict(t *testing.T) { } for _, mt := range metricTests { - ml := buildMetrics(mt.metric, w) + ml := w.buildMetrics(mt.metric) for i, line := range ml { if mt.metricPoints[i].Metric != line.Metric || mt.metricPoints[i].Value != line.Value { t.Errorf("\nexpected\t%+v %+v\nreceived\t%+v %+v\n", mt.metricPoints[i].Metric, mt.metricPoints[i].Value, line.Metric, line.Value) @@ -143,7 +145,7 @@ func TestBuildMetricsWithSimpleFields(t *testing.T) { } for _, mt := range metricTests { - ml := buildMetrics(mt.metric, w) + ml := w.buildMetrics(mt.metric) for i, line := range ml { if mt.metricLines[i].Metric != line.Metric || mt.metricLines[i].Value != line.Value { t.Errorf("\nexpected\t%+v %+v\nreceived\t%+v %+v\n", mt.metricLines[i].Metric, mt.metricLines[i].Value, line.Metric, line.Value) @@ -195,7 +197,7 @@ func TestBuildTags(t *testing.T) { } for _, tt := range tagtests { - source, tags := buildTags(tt.ptIn, w) + source, tags := w.buildTags(tt.ptIn) if source != tt.outSource { t.Errorf("\nexpected\t%+v\nreceived\t%+v\n", tt.outSource, source) } @@ -247,7 +249,7 @@ func TestBuildTagsWithSource(t *testing.T) { } for _, tt := range tagtests { - source, tags := buildTags(tt.ptIn, w) + source, tags := w.buildTags(tt.ptIn) if source != tt.outSource { t.Errorf("\nexpected\t%+v\nreceived\t%+v\n", tt.outSource, source) } @@ -316,6 +318,42 @@ func TestBuildValueString(t *testing.T) { } +func TestTagLimits(t *testing.T) { + w := defaultWavefront() + w.TruncateTags = true + + // Should fail (all tags skipped) + template := make(map[string]string) + template[strings.Repeat("x", 255)] = "whatever" + _, tags := w.buildTags(template) + require.Empty(t, tags, "All tags should have been skipped") + + // Should truncate value + template = make(map[string]string) + longKey := strings.Repeat("x", 253) + template[longKey] = "whatever" + _, tags = w.buildTags(template) + require.Contains(t, tags, longKey, "Should contain truncated long key") + require.Equal(t, "w", tags[longKey]) + + // Should not truncate + template = make(map[string]string) + longKey = strings.Repeat("x", 251) + template[longKey] = "Hi!" + _, tags = w.buildTags(template) + require.Contains(t, tags, longKey, "Should contain non truncated long key") + require.Equal(t, "Hi!", tags[longKey]) + + // Turn off truncating and make sure it leaves the tags intact + w.TruncateTags = false + template = make(map[string]string) + longKey = strings.Repeat("x", 255) + template[longKey] = longKey + _, tags = w.buildTags(template) + require.Contains(t, tags, longKey, "Should contain non truncated long key") + require.Equal(t, longKey, tags[longKey]) +} + // Benchmarks to test performance of string replacement via Regex and Replacer var testString = "this_is*my!test/string\\for=replacement" From c78045c13f8964489d963c32efae1349a66f1ba6 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 13 May 2020 12:09:37 -0700 Subject: [PATCH 1760/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index ae9a231ed..aee24fb4b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -36,6 +36,7 @@ - [#7483](https://github.com/influxdata/telegraf/pull/7483): Add integer mapping support to enum processor. - [#7321](https://github.com/influxdata/telegraf/pull/7321): Add additional fields to mongodb input. - [#7491](https://github.com/influxdata/telegraf/pull/7491): Add authentication support to the http_response input plugin. +- [#7503](https://github.com/influxdata/telegraf/pull/7503): Add truncate_tags setting to wavefront output. #### Bugfixes From 2c56d6de81f5f9369d24c548f489f7543383b2d3 Mon Sep 17 00:00:00 2001 From: Josh Soref Date: Thu, 14 May 2020 03:41:58 -0400 Subject: [PATCH 1761/1815] Fix spelling errors in comments and documentation (#7492) --- .github/ISSUE_TEMPLATE/Bug_report.md | 2 +- CHANGELOG.md | 26 +++++++++---------- README.md | 2 +- config/testdata/telegraf-agent.toml | 2 +- docs/CONFIGURATION.md | 2 +- docs/METRICS.md | 2 +- internal/templating/node.go | 4 +-- internal/tls/utils.go | 2 +- internal/usage.go | 2 +- internal/usage_windows.go | 2 +- metric.go | 2 +- models/running_processor_test.go | 2 +- plugins/inputs/amqp_consumer/README.md | 4 +-- plugins/inputs/amqp_consumer/amqp_consumer.go | 2 +- plugins/inputs/cisco_telemetry_gnmi/README.md | 2 +- .../cisco_telemetry_gnmi.go | 2 +- plugins/inputs/conntrack/README.md | 2 +- plugins/inputs/conntrack/conntrack.go | 2 +- plugins/inputs/consul/README.md | 2 +- plugins/inputs/couchbase/README.md | 2 +- plugins/inputs/cpu/cpu_test.go | 8 +++--- plugins/inputs/dns_query/README.md | 2 +- plugins/inputs/dns_query/dns_query.go | 2 +- plugins/inputs/docker/README.md | 2 +- plugins/inputs/docker_log/docker_log.go | 2 +- plugins/inputs/elasticsearch/README.md | 2 +- plugins/inputs/exec/README.md | 2 +- plugins/inputs/execd/shim/README.md | 2 +- plugins/inputs/filecount/filecount_test.go | 2 +- .../filecount/filesystem_helpers_test.go | 2 +- plugins/inputs/fluentd/fluentd.go | 4 +-- plugins/inputs/github/README.md | 2 +- plugins/inputs/graylog/README.md | 2 +- plugins/inputs/graylog/graylog.go | 2 +- plugins/inputs/http_response/http_response.go | 2 +- .../http_response/http_response_test.go | 2 +- plugins/inputs/httpjson/httpjson.go | 2 +- plugins/inputs/ipmi_sensor/README.md | 2 +- plugins/inputs/jenkins/jenkins.go | 2 +- plugins/inputs/jenkins/jenkins_test.go | 4 +-- plugins/inputs/jolokia2/gatherer.go | 4 +-- .../jti_openconfig_telemetry/oc/oc.pb.go | 4 +-- .../jti_openconfig_telemetry/oc/oc.proto | 2 +- plugins/inputs/kinesis_consumer/README.md | 2 +- plugins/inputs/kubernetes/README.md | 4 +-- .../inputs/kubernetes/kubernetes_metrics.go | 2 +- plugins/inputs/lustre2/lustre2.go | 2 +- plugins/inputs/mesos/mesos.go | 2 +- plugins/inputs/minecraft/README.md | 2 +- .../inputs/minecraft/internal/rcon/rcon.go | 6 ++--- plugins/inputs/mongodb/README.md | 4 +-- plugins/inputs/mongodb/mongostat.go | 6 ++--- plugins/inputs/monit/README.md | 22 ++++++++-------- plugins/inputs/mqtt_consumer/mqtt_consumer.go | 4 +-- plugins/inputs/multifile/README.md | 6 ++--- plugins/inputs/mysql/README.md | 6 ++--- plugins/inputs/mysql/mysql.go | 2 +- plugins/inputs/nsq/nsq_test.go | 2 +- plugins/inputs/opensmtpd/README.md | 2 +- plugins/inputs/opensmtpd/opensmtpd.go | 2 +- plugins/inputs/pf/README.md | 4 +-- plugins/inputs/pgbouncer/README.md | 2 +- plugins/inputs/pgbouncer/pgbouncer.go | 2 +- plugins/inputs/phpfpm/fcgi_client.go | 2 +- plugins/inputs/postgresql/postgresql.go | 2 +- .../inputs/postgresql_extensible/README.md | 4 +-- .../postgresql_extensible.go | 4 +-- plugins/inputs/procstat/pgrep.go | 2 +- plugins/inputs/rabbitmq/README.md | 2 +- plugins/inputs/rabbitmq/rabbitmq.go | 6 ++--- plugins/inputs/salesforce/README.md | 2 +- plugins/inputs/salesforce/salesforce.go | 2 +- plugins/inputs/sensors/README.md | 2 +- plugins/inputs/snmp/snmp.go | 4 +-- plugins/inputs/snmp_trap/snmp_trap.go | 2 +- plugins/inputs/snmp_trap/snmp_trap_test.go | 2 +- plugins/inputs/socket_listener/README.md | 2 +- plugins/inputs/stackdriver/stackdriver.go | 2 +- plugins/inputs/statsd/statsd_test.go | 6 ++--- plugins/inputs/syslog/README.md | 2 +- plugins/inputs/syslog/octetcounting_test.go | 2 +- plugins/inputs/syslog/syslog.go | 4 +-- .../inputs/tcp_listener/tcp_listener_test.go | 6 ++--- plugins/inputs/unbound/README.md | 2 +- plugins/inputs/unbound/unbound.go | 4 +-- plugins/inputs/uwsgi/README.md | 2 +- plugins/inputs/uwsgi/uwsgi.go | 2 +- plugins/inputs/varnish/README.md | 4 +-- plugins/inputs/varnish/varnish.go | 2 +- plugins/inputs/vsphere/README.md | 14 +++++----- plugins/inputs/vsphere/client.go | 2 +- plugins/inputs/vsphere/endpoint.go | 4 +-- plugins/inputs/vsphere/vsphere.go | 8 +++--- plugins/inputs/webhooks/github/README.md | 4 +-- plugins/inputs/webhooks/particle/README.md | 2 +- plugins/inputs/win_perf_counters/pdh.go | 2 +- plugins/inputs/zfs/README.md | 2 +- plugins/inputs/zfs/zfs_freebsd_test.go | 2 +- .../cmd/thrift_serialize/thrift_serialize.go | 2 +- plugins/outputs/amqp/README.md | 4 +-- plugins/outputs/amqp/amqp.go | 2 +- .../application_insights_test.go | 2 +- plugins/outputs/exec/exec.go | 4 +-- plugins/outputs/file/README.md | 2 +- plugins/outputs/file/file.go | 2 +- plugins/outputs/influxdb/http.go | 2 +- plugins/outputs/instrumental/README.md | 2 +- plugins/outputs/instrumental/instrumental.go | 2 +- plugins/outputs/kinesis/README.md | 4 +-- plugins/outputs/kinesis/kinesis.go | 2 +- plugins/outputs/librato/librato.go | 2 +- plugins/outputs/mqtt/README.md | 2 +- plugins/outputs/stackdriver/README.md | 2 +- plugins/outputs/stackdriver/stackdriver.go | 2 +- plugins/outputs/syslog/README.md | 8 +++--- plugins/outputs/syslog/syslog.go | 8 +++--- plugins/outputs/wavefront/README.md | 4 +-- plugins/outputs/wavefront/wavefront.go | 2 +- plugins/parsers/csv/README.md | 2 +- plugins/parsers/dropwizard/README.md | 2 +- plugins/parsers/graphite/config.go | 2 +- plugins/parsers/influx/handler.go | 2 +- plugins/parsers/json/README.md | 2 +- plugins/parsers/wavefront/element.go | 2 +- plugins/processors/date/README.md | 2 +- plugins/processors/template/README.md | 2 +- plugins/processors/topk/README.md | 2 +- plugins/processors/topk/topk.go | 2 +- plugins/processors/topk/topk_test.go | 4 +-- plugins/serializers/splunkmetric/README.md | 4 +-- .../serializers/splunkmetric/splunkmetric.go | 8 +++--- plugins/serializers/wavefront/README.md | 2 +- scripts/build.py | 2 +- testutil/accumulator.go | 2 +- 134 files changed, 215 insertions(+), 215 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/Bug_report.md b/.github/ISSUE_TEMPLATE/Bug_report.md index 5a4e810dd..e03395f6c 100644 --- a/.github/ISSUE_TEMPLATE/Bug_report.md +++ b/.github/ISSUE_TEMPLATE/Bug_report.md @@ -23,7 +23,7 @@ section if available. ### Docker - + ### Steps to reproduce: diff --git a/CHANGELOG.md b/CHANGELOG.md index aee24fb4b..3bb7b2daa 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -42,7 +42,7 @@ - [#7371](https://github.com/influxdata/telegraf/issues/7371): Fix unable to write metrics to CloudWatch with IMDSv1 disabled. - [#7233](https://github.com/influxdata/telegraf/issues/7233): Fix vSphere 6.7 missing data issue. -- [#7448](https://github.com/influxdata/telegraf/issues/7448): Remove debug fields from spunkmetric serializer. +- [#7448](https://github.com/influxdata/telegraf/issues/7448): Remove debug fields from splunkmetric serializer. - [#7446](https://github.com/influxdata/telegraf/issues/7446): Fix gzip support in socket_listener with tcp sockets. - [#7390](https://github.com/influxdata/telegraf/issues/7390): Fix interval drift when round_interval is set in agent. @@ -280,7 +280,7 @@ - [#6695](https://github.com/influxdata/telegraf/pull/6695): Allow multiple certificates per file in x509_cert input. - [#6686](https://github.com/influxdata/telegraf/pull/6686): Add additional tags to the x509 input. - [#6703](https://github.com/influxdata/telegraf/pull/6703): Add batch data format support to file output. -- [#6688](https://github.com/influxdata/telegraf/pull/6688): Support partition assignement strategy configuration in kafka_consumer. +- [#6688](https://github.com/influxdata/telegraf/pull/6688): Support partition assignment strategy configuration in kafka_consumer. - [#6731](https://github.com/influxdata/telegraf/pull/6731): Add node type tag to mongodb input. - [#6669](https://github.com/influxdata/telegraf/pull/6669): Add uptime_ns field to mongodb input. - [#6735](https://github.com/influxdata/telegraf/pull/6735): Support resolution of symlinks in filecount input. @@ -344,7 +344,7 @@ - [#6445](https://github.com/influxdata/telegraf/issues/6445): Use batch serialization format in exec output. - [#6455](https://github.com/influxdata/telegraf/issues/6455): Build official packages with Go 1.12.10. -- [#6464](https://github.com/influxdata/telegraf/pull/6464): Use case insensitive serial numer match in smart input. +- [#6464](https://github.com/influxdata/telegraf/pull/6464): Use case insensitive serial number match in smart input. - [#6469](https://github.com/influxdata/telegraf/pull/6469): Add auth header only when env var is set. - [#6468](https://github.com/influxdata/telegraf/pull/6468): Fix running multiple mysql and sqlserver plugin instances. - [#6471](https://github.com/influxdata/telegraf/issues/6471): Fix database routing on retry with exclude_database_tag. @@ -378,7 +378,7 @@ #### Release Notes - The cluster health related fields in the elasticsearch input have been split - out from the `elasticsearch_indices` mesasurement into the new + out from the `elasticsearch_indices` measurement into the new `elasticsearch_cluster_health_indices` measurement as they were originally combined by error. @@ -416,7 +416,7 @@ - [#6006](https://github.com/influxdata/telegraf/pull/6006): Add support for interface field in http_response input plugin. - [#5996](https://github.com/influxdata/telegraf/pull/5996): Add container uptime_ns in docker input plugin. - [#6016](https://github.com/influxdata/telegraf/pull/6016): Add better user-facing errors for API timeouts in docker input. -- [#6027](https://github.com/influxdata/telegraf/pull/6027): Add TLS mutal auth support to jti_openconfig_telemetry input. +- [#6027](https://github.com/influxdata/telegraf/pull/6027): Add TLS mutual auth support to jti_openconfig_telemetry input. - [#6053](https://github.com/influxdata/telegraf/pull/6053): Add support for ES 7.x to elasticsearch output. - [#6062](https://github.com/influxdata/telegraf/pull/6062): Add basic auth to prometheus input plugin. - [#6064](https://github.com/influxdata/telegraf/pull/6064): Add node roles tag to elasticsearch input. @@ -784,7 +784,7 @@ - [#5261](https://github.com/influxdata/telegraf/pull/5261): Fix arithmetic overflow in sqlserver input. - [#5194](https://github.com/influxdata/telegraf/issues/5194): Fix latest metrics not sent first when output fails. -- [#5285](https://github.com/influxdata/telegraf/issues/5285): Fix amqp_consumer stops consuming when it receives unparsable messages. +- [#5285](https://github.com/influxdata/telegraf/issues/5285): Fix amqp_consumer stops consuming when it receives unparseable messages. - [#5281](https://github.com/influxdata/telegraf/issues/5281): Fix prometheus input not detecting added and removed pods. - [#5215](https://github.com/influxdata/telegraf/issues/5215): Remove userinfo from cluster tag in couchbase. - [#5298](https://github.com/influxdata/telegraf/issues/5298): Fix internal_write buffer_size not reset on timed writes. @@ -1235,7 +1235,7 @@ ### Release Notes -- The `mysql` input plugin has been updated fix a number of type convertion +- The `mysql` input plugin has been updated fix a number of type conversion issues. This may cause a `field type error` when inserting into InfluxDB due the change of types. @@ -1637,7 +1637,7 @@ - [#3058](https://github.com/influxdata/telegraf/issues/3058): Allow iptable entries with trailing text. - [#1680](https://github.com/influxdata/telegraf/issues/1680): Sanitize password from couchbase metric. - [#3104](https://github.com/influxdata/telegraf/issues/3104): Converge to typed value in prometheus output. -- [#2899](https://github.com/influxdata/telegraf/issues/2899): Skip compilcation of logparser and tail on solaris. +- [#2899](https://github.com/influxdata/telegraf/issues/2899): Skip compilation of logparser and tail on solaris. - [#2951](https://github.com/influxdata/telegraf/issues/2951): Discard logging from tail library. - [#3126](https://github.com/influxdata/telegraf/pull/3126): Remove log message on ping timeout. - [#3144](https://github.com/influxdata/telegraf/issues/3144): Don't retry points beyond retention policy. @@ -2084,7 +2084,7 @@ consistent with the behavior of `collection_jitter`. - [#1390](https://github.com/influxdata/telegraf/pull/1390): Add support for Tengine - [#1320](https://github.com/influxdata/telegraf/pull/1320): Logparser input plugin for parsing grok-style log patterns. - [#1397](https://github.com/influxdata/telegraf/issues/1397): ElasticSearch: now supports connecting to ElasticSearch via SSL -- [#1262](https://github.com/influxdata/telegraf/pull/1261): Add graylog input pluging. +- [#1262](https://github.com/influxdata/telegraf/pull/1261): Add graylog input plugin. - [#1294](https://github.com/influxdata/telegraf/pull/1294): consul input plugin. Thanks @harnash - [#1164](https://github.com/influxdata/telegraf/pull/1164): conntrack input plugin. Thanks @robinpercy! - [#1165](https://github.com/influxdata/telegraf/pull/1165): vmstat input plugin. Thanks @jshim-xm! @@ -2263,7 +2263,7 @@ It is not included on the report path. This is necessary for reporting host disk - [#1041](https://github.com/influxdata/telegraf/issues/1041): Add `n_cpus` field to the system plugin. - [#1072](https://github.com/influxdata/telegraf/pull/1072): New Input Plugin: filestat. - [#1066](https://github.com/influxdata/telegraf/pull/1066): Replication lag metrics for MongoDB input plugin -- [#1086](https://github.com/influxdata/telegraf/pull/1086): Ability to specify AWS keys in config file. Thanks @johnrengleman! +- [#1086](https://github.com/influxdata/telegraf/pull/1086): Ability to specify AWS keys in config file. Thanks @johnrengelman! - [#1096](https://github.com/influxdata/telegraf/pull/1096): Performance refactor of running output buffers. - [#967](https://github.com/influxdata/telegraf/issues/967): Buffer logging improvements. - [#1107](https://github.com/influxdata/telegraf/issues/1107): Support lustre2 job stats. Thanks @hanleyja! @@ -2351,7 +2351,7 @@ because the `value` field is redundant in the graphite/librato context. - [#656](https://github.com/influxdata/telegraf/issues/656): No longer run `lsof` on linux to get netstat data, fixes permissions issue. - [#907](https://github.com/influxdata/telegraf/issues/907): Fix prometheus invalid label/measurement name key. - [#841](https://github.com/influxdata/telegraf/issues/841): Fix memcached unix socket panic. -- [#873](https://github.com/influxdata/telegraf/issues/873): Fix SNMP plugin sometimes not returning metrics. Thanks @titiliambert! +- [#873](https://github.com/influxdata/telegraf/issues/873): Fix SNMP plugin sometimes not returning metrics. Thanks @titilambert! - [#934](https://github.com/influxdata/telegraf/pull/934): phpfpm: Fix fcgi uri path. Thanks @rudenkovk! - [#805](https://github.com/influxdata/telegraf/issues/805): Kafka consumer stops gathering after i/o timeout. - [#959](https://github.com/influxdata/telegraf/pull/959): reduce mongodb & prometheus collection timeouts. Thanks @PierreF! @@ -2362,7 +2362,7 @@ because the `value` field is redundant in the graphite/librato context. - Primarily this release was cut to fix [#859](https://github.com/influxdata/telegraf/issues/859) ### Features -- [#747](https://github.com/influxdata/telegraf/pull/747): Start telegraf on install & remove on uninstall. Thanks @pierref! +- [#747](https://github.com/influxdata/telegraf/pull/747): Start telegraf on install & remove on uninstall. Thanks @PierreF! - [#794](https://github.com/influxdata/telegraf/pull/794): Add service reload ability. Thanks @entertainyou! ### Bugfixes @@ -2850,7 +2850,7 @@ and filtering when specifying a config file. - [#98](https://github.com/influxdata/telegraf/pull/98): LeoFS plugin. Thanks @mocchira! - [#103](https://github.com/influxdata/telegraf/pull/103): Filter by metric tags. Thanks @srfraser! - [#106](https://github.com/influxdata/telegraf/pull/106): Options to filter plugins on startup. Thanks @zepouet! -- [#107](https://github.com/influxdata/telegraf/pull/107): Multiple outputs beyong influxdb. Thanks @jipperinbham! +- [#107](https://github.com/influxdata/telegraf/pull/107): Multiple outputs beyond influxdb. Thanks @jipperinbham! - [#108](https://github.com/influxdata/telegraf/issues/108): Support setting per-CPU and total-CPU gathering. - [#111](https://github.com/influxdata/telegraf/pull/111): Report CPU Usage in cpu plugin. Thanks @jpalay! diff --git a/README.md b/README.md index ec203c1f2..d29ea7df7 100644 --- a/README.md +++ b/README.md @@ -117,7 +117,7 @@ telegraf config > telegraf.conf telegraf --section-filter agent:inputs:outputs --input-filter cpu --output-filter influxdb config ``` -#### Run a single telegraf collection, outputing metrics to stdout: +#### Run a single telegraf collection, outputting metrics to stdout: ``` telegraf --config telegraf.conf --test diff --git a/config/testdata/telegraf-agent.toml b/config/testdata/telegraf-agent.toml index 9da79605f..f71b98206 100644 --- a/config/testdata/telegraf-agent.toml +++ b/config/testdata/telegraf-agent.toml @@ -256,7 +256,7 @@ # specify address via a url matching: # postgres://[pqgotest[:password]]@localhost[/dbname]?sslmode=[disable|verify-ca|verify-full] # or a simple string: - # host=localhost user=pqotest password=... sslmode=... dbname=app_production + # host=localhost user=pqgotest password=... sslmode=... dbname=app_production # # All connection parameters are optional. By default, the host is localhost # and the user is the currently running user. For localhost, we default diff --git a/docs/CONFIGURATION.md b/docs/CONFIGURATION.md index 975c42f14..ca0b3946d 100644 --- a/docs/CONFIGURATION.md +++ b/docs/CONFIGURATION.md @@ -178,7 +178,7 @@ Telegraf plugins are divided into 4 types: [inputs][], [outputs][], [processors][], and [aggregators][]. Unlike the `global_tags` and `agent` tables, any plugin can be defined -multiple times and each instance will run independantly. This allows you to +multiple times and each instance will run independently. This allows you to have plugins defined with differing configurations as needed within a single Telegraf process. diff --git a/docs/METRICS.md b/docs/METRICS.md index 1c238e30a..f903dcad4 100644 --- a/docs/METRICS.md +++ b/docs/METRICS.md @@ -12,7 +12,7 @@ four main components: - **Timestamp**: Date and time associated with the fields. This metric type exists only in memory and must be converted to a concrete -representation in order to be transmitted or viewed. To acheive this we +representation in order to be transmitted or viewed. To achieve this we provide several [output data formats][] sometimes referred to as *serializers*. Our default serializer converts to [InfluxDB Line Protocol][line protocol] which provides a high performance and one-to-one diff --git a/internal/templating/node.go b/internal/templating/node.go index 53d028fd0..bf68509a0 100644 --- a/internal/templating/node.go +++ b/internal/templating/node.go @@ -68,7 +68,7 @@ func (n *node) recursiveSearch(lineParts []string) *Template { // exclude last child from search if it is a wildcard. sort.Search expects // a lexicographically sorted set of children and we have artificially sorted // wildcards to the end of the child set - // wildcards will be searched seperately if no exact match is found + // wildcards will be searched separately if no exact match is found if hasWildcard = n.children[length-1].value == "*"; hasWildcard { length-- } @@ -79,7 +79,7 @@ func (n *node) recursiveSearch(lineParts []string) *Template { // given an exact match is found within children set if i < length && n.children[i].value == lineParts[0] { - // decend into the matching node + // descend into the matching node if tmpl := n.children[i].recursiveSearch(lineParts[1:]); tmpl != nil { // given a template is found return it return tmpl diff --git a/internal/tls/utils.go b/internal/tls/utils.go index 560d07ee2..ddc12d2c1 100644 --- a/internal/tls/utils.go +++ b/internal/tls/utils.go @@ -21,7 +21,7 @@ func ParseCiphers(ciphers []string) ([]uint16, error) { } // ParseTLSVersion returns a `uint16` by received version string key that represents tls version from crypto/tls. -// If version isn't supportes ParseTLSVersion returns 0 with error +// If version isn't supported ParseTLSVersion returns 0 with error func ParseTLSVersion(version string) (uint16, error) { if v, ok := tlsVersionMap[version]; ok { return v, nil diff --git a/internal/usage.go b/internal/usage.go index 124087343..b0df62a6f 100644 --- a/internal/usage.go +++ b/internal/usage.go @@ -48,7 +48,7 @@ Examples: # generate config with only cpu input & influxdb output plugins defined telegraf --input-filter cpu --output-filter influxdb config - # run a single telegraf collection, outputing metrics to stdout + # run a single telegraf collection, outputting metrics to stdout telegraf --config telegraf.conf --test # run telegraf with all plugins defined in config file diff --git a/internal/usage_windows.go b/internal/usage_windows.go index 3ee2f7eff..e205d6c1f 100644 --- a/internal/usage_windows.go +++ b/internal/usage_windows.go @@ -50,7 +50,7 @@ Examples: # generate config with only cpu input & influxdb output plugins defined telegraf --input-filter cpu --output-filter influxdb config - # run a single telegraf collection, outputing metrics to stdout + # run a single telegraf collection, outputting metrics to stdout telegraf --config telegraf.conf --test # run telegraf with all plugins defined in config file diff --git a/metric.go b/metric.go index 1b7dfb6b2..6c7b1c6c5 100644 --- a/metric.go +++ b/metric.go @@ -57,7 +57,7 @@ type Metric interface { Time() time.Time // Type returns a general type for the entire metric that describes how you - // might interprete, aggregate the values. + // might interpret, aggregate the values. // // This method may be removed in the future and its use is discouraged. Type() ValueType diff --git a/models/running_processor_test.go b/models/running_processor_test.go index c24347b8e..4ac4743a7 100644 --- a/models/running_processor_test.go +++ b/models/running_processor_test.go @@ -11,7 +11,7 @@ import ( "github.com/stretchr/testify/require" ) -// MockProcessor is a Processor with an overrideable Apply implementation. +// MockProcessor is a Processor with an overridable Apply implementation. type MockProcessor struct { ApplyF func(in ...telegraf.Metric) []telegraf.Metric } diff --git a/plugins/inputs/amqp_consumer/README.md b/plugins/inputs/amqp_consumer/README.md index 53fca513d..8ef6d6fe2 100644 --- a/plugins/inputs/amqp_consumer/README.md +++ b/plugins/inputs/amqp_consumer/README.md @@ -1,6 +1,6 @@ # AMQP Consumer Input Plugin -This plugin provides a consumer for use with AMQP 0-9-1, a promenent implementation of this protocol being [RabbitMQ](https://www.rabbitmq.com/). +This plugin provides a consumer for use with AMQP 0-9-1, a prominent implementation of this protocol being [RabbitMQ](https://www.rabbitmq.com/). Metrics are read from a topic exchange using the configured queue and binding_key. @@ -41,7 +41,7 @@ The following defaults are known to work with RabbitMQ: ## Additional exchange arguments. # exchange_arguments = { } - # exchange_arguments = {"hash_propery" = "timestamp"} + # exchange_arguments = {"hash_property" = "timestamp"} ## AMQP queue name queue = "telegraf" diff --git a/plugins/inputs/amqp_consumer/amqp_consumer.go b/plugins/inputs/amqp_consumer/amqp_consumer.go index cee425f60..f3ee235e7 100644 --- a/plugins/inputs/amqp_consumer/amqp_consumer.go +++ b/plugins/inputs/amqp_consumer/amqp_consumer.go @@ -116,7 +116,7 @@ func (a *AMQPConsumer) SampleConfig() string { ## Additional exchange arguments. # exchange_arguments = { } - # exchange_arguments = {"hash_propery" = "timestamp"} + # exchange_arguments = {"hash_property" = "timestamp"} ## AMQP queue name. queue = "telegraf" diff --git a/plugins/inputs/cisco_telemetry_gnmi/README.md b/plugins/inputs/cisco_telemetry_gnmi/README.md index 0b003fdef..d12817da1 100644 --- a/plugins/inputs/cisco_telemetry_gnmi/README.md +++ b/plugins/inputs/cisco_telemetry_gnmi/README.md @@ -49,7 +49,7 @@ It has been optimized to support GNMI telemetry as produced by Cisco IOS XR (64- ## See: https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#222-paths ## ## origin usually refers to a (YANG) data model implemented by the device - ## and path to a specific substructe inside it that should be subscribed to (similar to an XPath) + ## and path to a specific substructure inside it that should be subscribed to (similar to an XPath) ## YANG models can be found e.g. here: https://github.com/YangModels/yang/tree/master/vendor/cisco/xr origin = "openconfig-interfaces" path = "/interfaces/interface/state/counters" diff --git a/plugins/inputs/cisco_telemetry_gnmi/cisco_telemetry_gnmi.go b/plugins/inputs/cisco_telemetry_gnmi/cisco_telemetry_gnmi.go index 562c5effa..894b7feb0 100644 --- a/plugins/inputs/cisco_telemetry_gnmi/cisco_telemetry_gnmi.go +++ b/plugins/inputs/cisco_telemetry_gnmi/cisco_telemetry_gnmi.go @@ -515,7 +515,7 @@ const sampleConfig = ` ## See: https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#222-paths ## ## origin usually refers to a (YANG) data model implemented by the device - ## and path to a specific substructe inside it that should be subscribed to (similar to an XPath) + ## and path to a specific substructure inside it that should be subscribed to (similar to an XPath) ## YANG models can be found e.g. here: https://github.com/YangModels/yang/tree/master/vendor/cisco/xr origin = "openconfig-interfaces" path = "/interfaces/interface/state/counters" diff --git a/plugins/inputs/conntrack/README.md b/plugins/inputs/conntrack/README.md index 0eae4b3c3..813bc4861 100644 --- a/plugins/inputs/conntrack/README.md +++ b/plugins/inputs/conntrack/README.md @@ -34,7 +34,7 @@ For more information on conntrack-tools, see the "nf_conntrack_count","nf_conntrack_max"] ## Directories to search within for the conntrack files above. - ## Missing directrories will be ignored. + ## Missing directories will be ignored. dirs = ["/proc/sys/net/ipv4/netfilter","/proc/sys/net/netfilter"] ``` diff --git a/plugins/inputs/conntrack/conntrack.go b/plugins/inputs/conntrack/conntrack.go index 4df01a31f..bf6c021c8 100644 --- a/plugins/inputs/conntrack/conntrack.go +++ b/plugins/inputs/conntrack/conntrack.go @@ -61,7 +61,7 @@ var sampleConfig = ` "nf_conntrack_count","nf_conntrack_max"] ## Directories to search within for the conntrack files above. - ## Missing directrories will be ignored. + ## Missing directories will be ignored. dirs = ["/proc/sys/net/ipv4/netfilter","/proc/sys/net/netfilter"] ` diff --git a/plugins/inputs/consul/README.md b/plugins/inputs/consul/README.md index 72bdeb231..8e1ecc094 100644 --- a/plugins/inputs/consul/README.md +++ b/plugins/inputs/consul/README.md @@ -44,7 +44,7 @@ report those stats already using StatsD protocol if needed. - consul_health_checks - tags: - - node (node that check/service is registred on) + - node (node that check/service is registered on) - service_name - check_id - fields: diff --git a/plugins/inputs/couchbase/README.md b/plugins/inputs/couchbase/README.md index 13eaa02c8..6db7d3db9 100644 --- a/plugins/inputs/couchbase/README.md +++ b/plugins/inputs/couchbase/README.md @@ -12,7 +12,7 @@ ## http://admin:secret@couchbase-0.example.com:8091/ ## ## If no servers are specified, then localhost is used as the host. - ## If no protocol is specifed, HTTP is used. + ## If no protocol is specified, HTTP is used. ## If no port is specified, 8091 is used. servers = ["http://localhost:8091"] ``` diff --git a/plugins/inputs/cpu/cpu_test.go b/plugins/inputs/cpu/cpu_test.go index 34d785350..bf356ec7b 100644 --- a/plugins/inputs/cpu/cpu_test.go +++ b/plugins/inputs/cpu/cpu_test.go @@ -55,7 +55,7 @@ func TestCPUStats(t *testing.T) { err := cs.Gather(&acc) require.NoError(t, err) - // Computed values are checked with delta > 0 because of floating point arithmatic + // Computed values are checked with delta > 0 because of floating point arithmetic // imprecision assertContainsTaggedFloat(t, &acc, "cpu", "time_user", 8.8, 0, cputags) assertContainsTaggedFloat(t, &acc, "cpu", "time_system", 8.2, 0, cputags) @@ -102,7 +102,7 @@ func TestCPUStats(t *testing.T) { assertContainsTaggedFloat(t, &acc, "cpu", "usage_guest_nice", 2.2, 0.0005, cputags) } -// Asserts that a given accumulator contains a measurment of type float64 with +// Asserts that a given accumulator contains a measurement of type float64 with // specific tags within a certain distance of a given expected value. Asserts a failure // if the measurement is of the wrong type, or if no matching measurements are found // @@ -113,7 +113,7 @@ func TestCPUStats(t *testing.T) { // expectedValue float64 : Value to search for within the measurement // delta float64 : Maximum acceptable distance of an accumulated value // from the expectedValue parameter. Useful when -// floating-point arithmatic imprecision makes looking +// floating-point arithmetic imprecision makes looking // for an exact match impractical // tags map[string]string : Tag set the found measurement must have. Set to nil to // ignore the tag set. @@ -225,7 +225,7 @@ func TestCPUTimesDecrease(t *testing.T) { err := cs.Gather(&acc) require.NoError(t, err) - // Computed values are checked with delta > 0 because of floating point arithmatic + // Computed values are checked with delta > 0 because of floating point arithmetic // imprecision assertContainsTaggedFloat(t, &acc, "cpu", "time_user", 18, 0, cputags) assertContainsTaggedFloat(t, &acc, "cpu", "time_idle", 80, 0, cputags) diff --git a/plugins/inputs/dns_query/README.md b/plugins/inputs/dns_query/README.md index 51152a367..dc8ddd903 100644 --- a/plugins/inputs/dns_query/README.md +++ b/plugins/inputs/dns_query/README.md @@ -16,7 +16,7 @@ The DNS plugin gathers dns query times in miliseconds - like [Dig](https://en.wi # domains = ["."] ## Query record type. - ## Posible values: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, SRV. + ## Possible values: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, SRV. # record_type = "A" ## Dns server port. diff --git a/plugins/inputs/dns_query/dns_query.go b/plugins/inputs/dns_query/dns_query.go index b33e508ea..c56572770 100644 --- a/plugins/inputs/dns_query/dns_query.go +++ b/plugins/inputs/dns_query/dns_query.go @@ -52,7 +52,7 @@ var sampleConfig = ` # domains = ["."] ## Query record type. - ## Posible values: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, SRV. + ## Possible values: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, SRV. # record_type = "A" ## Dns server port. diff --git a/plugins/inputs/docker/README.md b/plugins/inputs/docker/README.md index 6ec95b64f..95394c94e 100644 --- a/plugins/inputs/docker/README.md +++ b/plugins/inputs/docker/README.md @@ -184,7 +184,7 @@ The above measurements for the devicemapper storage driver can now be found in t - container_status - container_version + fields: - - total_pgmafault + - total_pgmajfault - cache - mapped_file - total_inactive_file diff --git a/plugins/inputs/docker_log/docker_log.go b/plugins/inputs/docker_log/docker_log.go index cf5960b81..bf29ede43 100644 --- a/plugins/inputs/docker_log/docker_log.go +++ b/plugins/inputs/docker_log/docker_log.go @@ -73,7 +73,7 @@ const ( var ( containerStates = []string{"created", "restarting", "running", "removing", "paused", "exited", "dead"} - // ensure *DockerLogs implements telegaf.ServiceInput + // ensure *DockerLogs implements telegraf.ServiceInput _ telegraf.ServiceInput = (*DockerLogs)(nil) ) diff --git a/plugins/inputs/elasticsearch/README.md b/plugins/inputs/elasticsearch/README.md index 57c107cc2..36fd15fe8 100644 --- a/plugins/inputs/elasticsearch/README.md +++ b/plugins/inputs/elasticsearch/README.md @@ -18,7 +18,7 @@ Specific Elasticsearch endpoints that are queried: - Indices Stats: /_all/_stats - Shard Stats: /_all/_stats?level=shards -Note that specific statistics information can change between Elassticsearch versions. In general, this plugin attempts to stay as version-generic as possible by tagging high-level categories only and using a generic json parser to make unique field names of whatever statistics names are provided at the mid-low level. +Note that specific statistics information can change between Elasticsearch versions. In general, this plugin attempts to stay as version-generic as possible by tagging high-level categories only and using a generic json parser to make unique field names of whatever statistics names are provided at the mid-low level. ### Configuration diff --git a/plugins/inputs/exec/README.md b/plugins/inputs/exec/README.md index a8544e1d1..8ed0b5111 100644 --- a/plugins/inputs/exec/README.md +++ b/plugins/inputs/exec/README.md @@ -58,7 +58,7 @@ systems. #### With a PowerShell on Windows, the output of the script appears to be truncated. -You may need to set a variable in your script to increase the numer of columns +You may need to set a variable in your script to increase the number of columns available for output: ``` $host.UI.RawUI.BufferSize = new-object System.Management.Automation.Host.Size(1024,50) diff --git a/plugins/inputs/execd/shim/README.md b/plugins/inputs/execd/shim/README.md index f955ef15f..3bdb69f92 100644 --- a/plugins/inputs/execd/shim/README.md +++ b/plugins/inputs/execd/shim/README.md @@ -5,7 +5,7 @@ out to a stand-alone repo for the purpose of compiling it as a separate app and running it from the inputs.execd plugin. The execd-shim is still experimental and the interface may change in the future. -Especially as the concept expands to prcoessors, aggregators, and outputs. +Especially as the concept expands to processors, aggregators, and outputs. ## Steps to externalize a plugin diff --git a/plugins/inputs/filecount/filecount_test.go b/plugins/inputs/filecount/filecount_test.go index 96d8f0c3b..568ee07b5 100644 --- a/plugins/inputs/filecount/filecount_test.go +++ b/plugins/inputs/filecount/filecount_test.go @@ -203,7 +203,7 @@ func getFakeFileSystem(basePath string) fakeFileSystem { mtime := time.Date(2015, time.December, 14, 18, 25, 5, 0, time.UTC) olderMtime := time.Date(2010, time.December, 14, 18, 25, 5, 0, time.UTC) - // set file permisions + // set file permissions var fmask uint32 = 0666 var dmask uint32 = 0666 diff --git a/plugins/inputs/filecount/filesystem_helpers_test.go b/plugins/inputs/filecount/filesystem_helpers_test.go index de028dcab..4e7d16e16 100644 --- a/plugins/inputs/filecount/filesystem_helpers_test.go +++ b/plugins/inputs/filecount/filesystem_helpers_test.go @@ -72,7 +72,7 @@ func getTestFileSystem() fakeFileSystem { mtime := time.Date(2015, time.December, 14, 18, 25, 5, 0, time.UTC) - // set file permisions + // set file permissions var fmask uint32 = 0666 var dmask uint32 = 0666 diff --git a/plugins/inputs/fluentd/fluentd.go b/plugins/inputs/fluentd/fluentd.go index c99960740..7d4a0cd5e 100644 --- a/plugins/inputs/fluentd/fluentd.go +++ b/plugins/inputs/fluentd/fluentd.go @@ -53,7 +53,7 @@ type pluginData struct { // parse JSON from fluentd Endpoint // Parameters: -// data: unprocessed json recivied from endpoint +// data: unprocessed json received from endpoint // // Returns: // pluginData: slice that contains parsed plugins @@ -76,7 +76,7 @@ func parse(data []byte) (datapointArray []pluginData, err error) { // Description - display description func (h *Fluentd) Description() string { return description } -// SampleConfig - generate configuretion +// SampleConfig - generate configuration func (h *Fluentd) SampleConfig() string { return sampleConfig } // Gather - Main code responsible for gathering, processing and creating metrics diff --git a/plugins/inputs/github/README.md b/plugins/inputs/github/README.md index 65fda0301..46127082e 100644 --- a/plugins/inputs/github/README.md +++ b/plugins/inputs/github/README.md @@ -46,7 +46,7 @@ When the [internal][] input is enabled: + internal_github - tags: - - access_token - An obfusticated reference to the configured access token or "Unauthenticated" + - access_token - An obfuscated reference to the configured access token or "Unauthenticated" - fields: - limit - How many requests you are limited to (per hour) - remaining - How many requests you have remaining (per hour) diff --git a/plugins/inputs/graylog/README.md b/plugins/inputs/graylog/README.md index 46712ea1e..acb191f8b 100644 --- a/plugins/inputs/graylog/README.md +++ b/plugins/inputs/graylog/README.md @@ -7,7 +7,7 @@ Plugin currently support two type of end points:- - multiple (Ex http://[graylog-server-ip]:12900/system/metrics/multiple) - namespace (Ex http://[graylog-server-ip]:12900/system/metrics/namespace/{namespace}) -End Point can be a mixe of one multiple end point and several namespaces end points +End Point can be a mix of one multiple end point and several namespaces end points Note: if namespace end point specified metrics array will be ignored for that call. diff --git a/plugins/inputs/graylog/graylog.go b/plugins/inputs/graylog/graylog.go index 1e0439a42..4309c6481 100644 --- a/plugins/inputs/graylog/graylog.go +++ b/plugins/inputs/graylog/graylog.go @@ -47,7 +47,7 @@ type HTTPClient interface { // req: HTTP request object // // Returns: - // http.Response: HTTP respons object + // http.Response: HTTP response object // error : Any error that may have occurred MakeRequest(req *http.Request) (*http.Response, error) diff --git a/plugins/inputs/http_response/http_response.go b/plugins/inputs/http_response/http_response.go index 7ba141c23..a6e1d74b5 100644 --- a/plugins/inputs/http_response/http_response.go +++ b/plugins/inputs/http_response/http_response.go @@ -274,7 +274,7 @@ func (h *HTTPResponse) httpGather(u string) (map[string]interface{}, map[string] // Get error details netErr := setError(err, fields, tags) - // If recognize the returnded error, get out + // If recognize the returned error, get out if netErr != nil { return fields, tags, nil } diff --git a/plugins/inputs/http_response/http_response_test.go b/plugins/inputs/http_response/http_response_test.go index 947fde5c8..ac483127d 100644 --- a/plugins/inputs/http_response/http_response_test.go +++ b/plugins/inputs/http_response/http_response_test.go @@ -722,7 +722,7 @@ func TestNetworkErrors(t *testing.T) { absentTags := []string{"status_code"} checkOutput(t, &acc, expectedFields, expectedTags, absentFields, absentTags) - // Connecton failed + // Connection failed h = &HTTPResponse{ Log: testutil.Logger{}, Address: "https:/nonexistent.nonexistent", // Any non-routable IP works here diff --git a/plugins/inputs/httpjson/httpjson.go b/plugins/inputs/httpjson/httpjson.go index e09eafc94..7feff1a84 100644 --- a/plugins/inputs/httpjson/httpjson.go +++ b/plugins/inputs/httpjson/httpjson.go @@ -42,7 +42,7 @@ type HTTPClient interface { // req: HTTP request object // // Returns: - // http.Response: HTTP respons object + // http.Response: HTTP response object // error : Any error that may have occurred MakeRequest(req *http.Request) (*http.Response, error) diff --git a/plugins/inputs/ipmi_sensor/README.md b/plugins/inputs/ipmi_sensor/README.md index 6c93bd15e..2fd7cc707 100644 --- a/plugins/inputs/ipmi_sensor/README.md +++ b/plugins/inputs/ipmi_sensor/README.md @@ -44,7 +44,7 @@ ipmitool -I lan -H SERVER -U USERID -P PASSW0RD sdr ## # servers = ["USERID:PASSW0RD@lan(192.168.1.1)"] - ## Recomended: use metric 'interval' that is a multiple of 'timeout' to avoid + ## Recommended: use metric 'interval' that is a multiple of 'timeout' to avoid ## gaps or overlap in pulled data interval = "30s" diff --git a/plugins/inputs/jenkins/jenkins.go b/plugins/inputs/jenkins/jenkins.go index b18dc5430..a909f5ea4 100644 --- a/plugins/inputs/jenkins/jenkins.go +++ b/plugins/inputs/jenkins/jenkins.go @@ -137,7 +137,7 @@ func (j *Jenkins) newHTTPClient() (*http.Client, error) { }, nil } -// seperate the client as dependency to use httptest Client for mocking +// separate the client as dependency to use httptest Client for mocking func (j *Jenkins) initialize(client *http.Client) error { var err error diff --git a/plugins/inputs/jenkins/jenkins_test.go b/plugins/inputs/jenkins/jenkins_test.go index bf8ffb19d..b8284fc0d 100644 --- a/plugins/inputs/jenkins/jenkins_test.go +++ b/plugins/inputs/jenkins/jenkins_test.go @@ -75,8 +75,8 @@ func TestResultCode(t *testing.T) { } type mockHandler struct { - // responseMap is the path to repsonse interface - // we will ouput the serialized response in json when serving http + // responseMap is the path to response interface + // we will output the serialized response in json when serving http // example '/computer/api/json': *gojenkins. responseMap map[string]interface{} } diff --git a/plugins/inputs/jolokia2/gatherer.go b/plugins/inputs/jolokia2/gatherer.go index 5005e8225..5b2aa00d8 100644 --- a/plugins/inputs/jolokia2/gatherer.go +++ b/plugins/inputs/jolokia2/gatherer.go @@ -43,7 +43,7 @@ func (g *Gatherer) Gather(client *Client, acc telegraf.Accumulator) error { return nil } -// gatherReponses adds points to an accumulator from the ReadResponse objects +// gatherResponses adds points to an accumulator from the ReadResponse objects // returned by a Jolokia agent. func (g *Gatherer) gatherResponses(responses []ReadResponse, tags map[string]string, acc telegraf.Accumulator) { series := make(map[string][]point, 0) @@ -144,7 +144,7 @@ func metricMatchesResponse(metric Metric, response ReadResponse) bool { return false } -// compactPoints attepts to remove points by compacting points +// compactPoints attempts to remove points by compacting points // with matching tag sets. When a match is found, the fields from // one point are moved to another, and the empty point is removed. func compactPoints(points []point) []point { diff --git a/plugins/inputs/jti_openconfig_telemetry/oc/oc.pb.go b/plugins/inputs/jti_openconfig_telemetry/oc/oc.pb.go index a4cd76cc4..bc7c78045 100644 --- a/plugins/inputs/jti_openconfig_telemetry/oc/oc.pb.go +++ b/plugins/inputs/jti_openconfig_telemetry/oc/oc.pb.go @@ -980,7 +980,7 @@ type OpenConfigTelemetryClient interface { // The device should send telemetry data back on the same // connection as the subscription request. TelemetrySubscribe(ctx context.Context, in *SubscriptionRequest, opts ...grpc.CallOption) (OpenConfigTelemetry_TelemetrySubscribeClient, error) - // Terminates and removes an exisiting telemetry subscription + // Terminates and removes an existing telemetry subscription CancelTelemetrySubscription(ctx context.Context, in *CancelSubscriptionRequest, opts ...grpc.CallOption) (*CancelSubscriptionReply, error) // Get the list of current telemetry subscriptions from the // target. This command returns a list of existing subscriptions @@ -1076,7 +1076,7 @@ type OpenConfigTelemetryServer interface { // The device should send telemetry data back on the same // connection as the subscription request. TelemetrySubscribe(*SubscriptionRequest, OpenConfigTelemetry_TelemetrySubscribeServer) error - // Terminates and removes an exisiting telemetry subscription + // Terminates and removes an existing telemetry subscription CancelTelemetrySubscription(context.Context, *CancelSubscriptionRequest) (*CancelSubscriptionReply, error) // Get the list of current telemetry subscriptions from the // target. This command returns a list of existing subscriptions diff --git a/plugins/inputs/jti_openconfig_telemetry/oc/oc.proto b/plugins/inputs/jti_openconfig_telemetry/oc/oc.proto index 38ce9b422..cf4aa145e 100644 --- a/plugins/inputs/jti_openconfig_telemetry/oc/oc.proto +++ b/plugins/inputs/jti_openconfig_telemetry/oc/oc.proto @@ -44,7 +44,7 @@ service OpenConfigTelemetry { // connection as the subscription request. rpc telemetrySubscribe(SubscriptionRequest) returns (stream OpenConfigData) {} - // Terminates and removes an exisiting telemetry subscription + // Terminates and removes an existing telemetry subscription rpc cancelTelemetrySubscription(CancelSubscriptionRequest) returns (CancelSubscriptionReply) {} // Get the list of current telemetry subscriptions from the diff --git a/plugins/inputs/kinesis_consumer/README.md b/plugins/inputs/kinesis_consumer/README.md index d6f3a707b..7896557ac 100644 --- a/plugins/inputs/kinesis_consumer/README.md +++ b/plugins/inputs/kinesis_consumer/README.md @@ -78,7 +78,7 @@ DynamoDB: #### DynamoDB Checkpoint The DynamoDB checkpoint stores the last processed record in a DynamoDB. To leverage -this functionality, create a table with the folowing string type keys: +this functionality, create a table with the following string type keys: ``` Partition key: namespace diff --git a/plugins/inputs/kubernetes/README.md b/plugins/inputs/kubernetes/README.md index 2d38f23d9..a574bed06 100644 --- a/plugins/inputs/kubernetes/README.md +++ b/plugins/inputs/kubernetes/README.md @@ -116,7 +116,7 @@ Architecture][k8s-telegraf] or view the Helm charts: - rootfs_available_bytes - rootfs_capacity_bytes - rootfs_used_bytes - - logsfs_avaialble_bytes + - logsfs_available_bytes - logsfs_capacity_bytes - logsfs_used_bytes @@ -146,7 +146,7 @@ Architecture][k8s-telegraf] or view the Helm charts: ``` kubernetes_node -kubernetes_pod_container,container_name=deis-controller,namespace=deis,node_name=ip-10-0-0-0.ec2.internal,pod_name=deis-controller-3058870187-xazsr cpu_usage_core_nanoseconds=2432835i,cpu_usage_nanocores=0i,logsfs_avaialble_bytes=121128271872i,logsfs_capacity_bytes=153567944704i,logsfs_used_bytes=20787200i,memory_major_page_faults=0i,memory_page_faults=175i,memory_rss_bytes=0i,memory_usage_bytes=0i,memory_working_set_bytes=0i,rootfs_available_bytes=121128271872i,rootfs_capacity_bytes=153567944704i,rootfs_used_bytes=1110016i 1476477530000000000 +kubernetes_pod_container,container_name=deis-controller,namespace=deis,node_name=ip-10-0-0-0.ec2.internal,pod_name=deis-controller-3058870187-xazsr cpu_usage_core_nanoseconds=2432835i,cpu_usage_nanocores=0i,logsfs_available_bytes=121128271872i,logsfs_capacity_bytes=153567944704i,logsfs_used_bytes=20787200i,memory_major_page_faults=0i,memory_page_faults=175i,memory_rss_bytes=0i,memory_usage_bytes=0i,memory_working_set_bytes=0i,rootfs_available_bytes=121128271872i,rootfs_capacity_bytes=153567944704i,rootfs_used_bytes=1110016i 1476477530000000000 kubernetes_pod_network,namespace=deis,node_name=ip-10-0-0-0.ec2.internal,pod_name=deis-controller-3058870187-xazsr rx_bytes=120671099i,rx_errors=0i,tx_bytes=102451983i,tx_errors=0i 1476477530000000000 kubernetes_pod_volume,volume_name=default-token-f7wts,namespace=default,node_name=ip-172-17-0-1.internal,pod_name=storage-7 available_bytes=8415240192i,capacity_bytes=8415252480i,used_bytes=12288i 1546910783000000000 kubernetes_system_container diff --git a/plugins/inputs/kubernetes/kubernetes_metrics.go b/plugins/inputs/kubernetes/kubernetes_metrics.go index 96814bcbe..d45d4b5f1 100644 --- a/plugins/inputs/kubernetes/kubernetes_metrics.go +++ b/plugins/inputs/kubernetes/kubernetes_metrics.go @@ -2,7 +2,7 @@ package kubernetes import "time" -// SummaryMetrics represents all the summary data about a paritcular node retrieved from a kubelet +// SummaryMetrics represents all the summary data about a particular node retrieved from a kubelet type SummaryMetrics struct { Node NodeMetrics `json:"node"` Pods []PodMetrics `json:"pods"` diff --git a/plugins/inputs/lustre2/lustre2.go b/plugins/inputs/lustre2/lustre2.go index 4af999b71..611ba294d 100644 --- a/plugins/inputs/lustre2/lustre2.go +++ b/plugins/inputs/lustre2/lustre2.go @@ -366,7 +366,7 @@ func (l *Lustre2) GetLustreProcStats(fileglob string, wantedFields []*mapping, a for _, file := range files { /* Turn /proc/fs/lustre/obdfilter//stats and similar * into just the object store target name - * Assumpion: the target name is always second to last, + * Assumption: the target name is always second to last, * which is true in Lustre 2.1->2.8 */ path := strings.Split(file, "/") diff --git a/plugins/inputs/mesos/mesos.go b/plugins/inputs/mesos/mesos.go index 741dd73dc..4ce68e604 100644 --- a/plugins/inputs/mesos/mesos.go +++ b/plugins/inputs/mesos/mesos.go @@ -242,7 +242,7 @@ func metricsDiff(role Role, w []string) []string { return b } -// masterBlocks serves as kind of metrics registry groupping them in sets +// masterBlocks serves as kind of metrics registry grouping them in sets func getMetrics(role Role, group string) []string { var m map[string][]string diff --git a/plugins/inputs/minecraft/README.md b/plugins/inputs/minecraft/README.md index 933d8bb05..e27fca9ba 100644 --- a/plugins/inputs/minecraft/README.md +++ b/plugins/inputs/minecraft/README.md @@ -78,7 +78,7 @@ minecraft,player=dinnerbone,source=127.0.0.1,port=25575 deaths=1i,jumps=1999i,co minecraft,player=jeb,source=127.0.0.1,port=25575 d_pickaxe=1i,damage_dealt=80i,d_sword=2i,hunger=20i,health=20i,kills=1i,level=33i,jumps=264i,armor=15i 1498261397000000000 ``` -[server.properies]: https://minecraft.gamepedia.com/Server.properties +[server.properties]: https://minecraft.gamepedia.com/Server.properties [scoreboard]: http://minecraft.gamepedia.com/Scoreboard [objectives]: https://minecraft.gamepedia.com/Scoreboard#Objectives [rcon]: http://wiki.vg/RCON diff --git a/plugins/inputs/minecraft/internal/rcon/rcon.go b/plugins/inputs/minecraft/internal/rcon/rcon.go index a57d75629..345583a06 100644 --- a/plugins/inputs/minecraft/internal/rcon/rcon.go +++ b/plugins/inputs/minecraft/internal/rcon/rcon.go @@ -57,7 +57,7 @@ type Packet struct { Body string // Body of packet. } -// Compile converts a packets header and body into its approriate +// Compile converts a packets header and body into its appropriate // byte array payload, returning an error if the binary packages // Write method fails to write the header bytes in their little // endian byte order. @@ -112,7 +112,7 @@ func (c *Client) Execute(command string) (response *Packet, err error) { // Sends accepts the commands type and its string to execute to the clients server, // creating a packet with a random challenge id for the server to mirror, -// and compiling its payload bytes in the appropriate order. The resonse is +// and compiling its payload bytes in the appropriate order. The response is // decompiled from its bytes into a Packet type for return. An error is returned // if send fails. func (c *Client) Send(typ int32, command string) (response *Packet, err error) { @@ -152,7 +152,7 @@ func (c *Client) Send(typ int32, command string) (response *Packet, err error) { } if packet.Header.Type == Auth && header.Type == ResponseValue { - // Discard, empty SERVERDATA_RESPOSE_VALUE from authorization. + // Discard, empty SERVERDATA_RESPONSE_VALUE from authorization. c.Connection.Read(make([]byte, header.Size-int32(PacketHeaderSize))) // Reread the packet header. diff --git a/plugins/inputs/mongodb/README.md b/plugins/inputs/mongodb/README.md index ba2e9148e..1bbc05847 100644 --- a/plugins/inputs/mongodb/README.md +++ b/plugins/inputs/mongodb/README.md @@ -215,7 +215,7 @@ by running Telegraf with the `--debug` argument. - repl_inserts_per_sec (integer, deprecated in 1.10; use `repl_inserts`)) - repl_queries_per_sec (integer, deprecated in 1.10; use `repl_queries`)) - repl_updates_per_sec (integer, deprecated in 1.10; use `repl_updates`)) - - ttl_deletes_per_sec (integer, deprecated in 1.10; use `ttl_deltes`)) + - ttl_deletes_per_sec (integer, deprecated in 1.10; use `ttl_deletes`)) - ttl_passes_per_sec (integer, deprecated in 1.10; use `ttl_passes`)) - updates_per_sec (integer, deprecated in 1.10; use `updates`)) @@ -247,7 +247,7 @@ by running Telegraf with the `--debug` argument. - total_index_size (integer) - ok (integer) - count (integer) - - type (tring) + - type (string) - mongodb_shard_stats - tags: diff --git a/plugins/inputs/mongodb/mongostat.go b/plugins/inputs/mongodb/mongostat.go index 820ea7bd3..5d64d7ab4 100644 --- a/plugins/inputs/mongodb/mongostat.go +++ b/plugins/inputs/mongodb/mongostat.go @@ -1,7 +1,7 @@ /*** The code contained here came from https://github.com/mongodb/mongo-tools/blob/master/mongostat/stat_types.go and contains modifications so that no other dependency from that project is needed. Other modifications included -removing uneccessary code specific to formatting the output and determine the current state of the database. It +removing unnecessary code specific to formatting the output and determine the current state of the database. It is licensed under Apache Version 2.0, http://www.apache.org/licenses/LICENSE-2.0.html ***/ @@ -317,7 +317,7 @@ type NetworkStats struct { NumRequests int64 `bson:"numRequests"` } -// OpcountStats stores information related to comamnds and basic CRUD operations. +// OpcountStats stores information related to commands and basic CRUD operations. type OpcountStats struct { Insert int64 `bson:"insert"` Query int64 `bson:"query"` @@ -691,7 +691,7 @@ type StatLine struct { CacheDirtyPercent float64 CacheUsedPercent float64 - // Cache ultilization extended (wiredtiger only) + // Cache utilization extended (wiredtiger only) TrackedDirtyBytes int64 CurrentCachedBytes int64 MaxBytesConfigured int64 diff --git a/plugins/inputs/monit/README.md b/plugins/inputs/monit/README.md index 9abd657d5..be116394d 100644 --- a/plugins/inputs/monit/README.md +++ b/plugins/inputs/monit/README.md @@ -41,7 +41,7 @@ Minimum Version of Monit tested with is 5.16. - address - version - service - - paltform_name + - platform_name - status - monitoring_status - monitoring_mode @@ -62,7 +62,7 @@ Minimum Version of Monit tested with is 5.16. - address - version - service - - paltform_name + - platform_name - status - monitoring_status - monitoring_mode @@ -77,7 +77,7 @@ Minimum Version of Monit tested with is 5.16. - address - version - service - - paltform_name + - platform_name - status - monitoring_status - monitoring_mode @@ -93,7 +93,7 @@ Minimum Version of Monit tested with is 5.16. - address - version - service - - paltform_name + - platform_name - status - monitoring_status - monitoring_mode @@ -117,7 +117,7 @@ Minimum Version of Monit tested with is 5.16. - address - version - service - - paltform_name + - platform_name - status - monitoring_status - monitoring_mode @@ -136,7 +136,7 @@ Minimum Version of Monit tested with is 5.16. - address - version - service - - paltform_name + - platform_name - status - monitoring_status - monitoring_mode @@ -160,7 +160,7 @@ Minimum Version of Monit tested with is 5.16. - address - version - service - - paltform_name + - platform_name - status - monitoring_status - monitoring_mode @@ -175,7 +175,7 @@ Minimum Version of Monit tested with is 5.16. - address - version - service - - paltform_name + - platform_name - status - monitoring_status - monitoring_mode @@ -189,7 +189,7 @@ Minimum Version of Monit tested with is 5.16. - address - version - service - - paltform_name + - platform_name - status - monitoring_status - monitoring_mode @@ -203,7 +203,7 @@ Minimum Version of Monit tested with is 5.16. - address - version - service - - paltform_name + - platform_name - status - monitoring_status - monitoring_mode @@ -217,7 +217,7 @@ Minimum Version of Monit tested with is 5.16. - address - version - service - - paltform_name + - platform_name - status - monitoring_status - monitoring_mode diff --git a/plugins/inputs/mqtt_consumer/mqtt_consumer.go b/plugins/inputs/mqtt_consumer/mqtt_consumer.go index 5f54f4bb4..9e0ba371f 100644 --- a/plugins/inputs/mqtt_consumer/mqtt_consumer.go +++ b/plugins/inputs/mqtt_consumer/mqtt_consumer.go @@ -194,7 +194,7 @@ func (m *MQTTConsumer) Start(acc telegraf.Accumulator) error { // AddRoute sets up the function for handling messages. These need to be // added in case we find a persistent session containing subscriptions so we - // know where to dispatch presisted and new messages to. In the alternate + // know where to dispatch persisted and new messages to. In the alternate // case that we need to create the subscriptions these will be replaced. for _, topic := range m.Topics { m.client.AddRoute(topic, m.recvMessage) @@ -218,7 +218,7 @@ func (m *MQTTConsumer) connect() error { m.state = Connected m.messages = make(map[telegraf.TrackingID]bool) - // Presistent sessions should skip subscription if a session is present, as + // Persistent sessions should skip subscription if a session is present, as // the subscriptions are stored by the server. type sessionPresent interface { SessionPresent() bool diff --git a/plugins/inputs/multifile/README.md b/plugins/inputs/multifile/README.md index 558d4e442..2d71ac159 100644 --- a/plugins/inputs/multifile/README.md +++ b/plugins/inputs/multifile/README.md @@ -40,11 +40,11 @@ Path of the file to be parsed, relative to the `base_dir`. Name of the field/tag key, defaults to `$(basename file)`. * `conversion`: Data format used to parse the file contents: - * `float(X)`: Converts the input value into a float and divides by the Xth power of 10. Efficively just moves the decimal left X places. For example a value of `123` with `float(2)` will result in `1.23`. + * `float(X)`: Converts the input value into a float and divides by the Xth power of 10. Effectively just moves the decimal left X places. For example a value of `123` with `float(2)` will result in `1.23`. * `float`: Converts the value into a float with no adjustment. Same as `float(0)`. - * `int`: Convertes the value into an integer. + * `int`: Converts the value into an integer. * `string`, `""`: No conversion. - * `bool`: Convertes the value into a boolean. + * `bool`: Converts the value into a boolean. * `tag`: File content is used as a tag. ### Example Output diff --git a/plugins/inputs/mysql/README.md b/plugins/inputs/mysql/README.md index 3e07229da..8b4717168 100644 --- a/plugins/inputs/mysql/README.md +++ b/plugins/inputs/mysql/README.md @@ -45,7 +45,7 @@ This plugin gathers the statistic data from MySQL server ## <1.6: metric_version = 1 (or unset) metric_version = 2 - ## if the list is empty, then metrics are gathered from all databasee tables + ## if the list is empty, then metrics are gathered from all database tables # table_schema_databases = [] ## gather metrics from INFORMATION_SCHEMA.TABLES for databases provided above list @@ -153,7 +153,7 @@ If you wish to remove the `name_suffix` you may use Kapacitor to copy the historical data to the default name. Do this only after retiring the old measurement name. -1. Use the techinique described above to write to multiple locations: +1. Use the technique described above to write to multiple locations: ```toml [[inputs.mysql]] servers = ["tcp(127.0.0.1:3306)/"] @@ -283,7 +283,7 @@ The unit of fields varies by the tags. * events_statements_rows_examined_total(float, number) * events_statements_tmp_tables_total(float, number) * events_statements_tmp_disk_tables_total(float, number) - * events_statements_sort_merge_passes_totales(float, number) + * events_statements_sort_merge_passes_totals(float, number) * events_statements_sort_rows_total(float, number) * events_statements_no_index_used_total(float, number) * Table schema - gathers statistics of each schema. It has following measurements diff --git a/plugins/inputs/mysql/mysql.go b/plugins/inputs/mysql/mysql.go index a2dc56505..81db026ec 100644 --- a/plugins/inputs/mysql/mysql.go +++ b/plugins/inputs/mysql/mysql.go @@ -71,7 +71,7 @@ const sampleConfig = ` ## <1.6: metric_version = 1 (or unset) metric_version = 2 - ## if the list is empty, then metrics are gathered from all databasee tables + ## if the list is empty, then metrics are gathered from all database tables # table_schema_databases = [] ## gather metrics from INFORMATION_SCHEMA.TABLES for databases provided above list diff --git a/plugins/inputs/nsq/nsq_test.go b/plugins/inputs/nsq/nsq_test.go index 1d3b541e5..23af13a4c 100644 --- a/plugins/inputs/nsq/nsq_test.go +++ b/plugins/inputs/nsq/nsq_test.go @@ -151,7 +151,7 @@ func TestNSQStatsV1(t *testing.T) { } } -// v1 version of localhost/stats?format=json reesponse body +// v1 version of localhost/stats?format=json response body var responseV1 = ` { "version": "1.0.0-compat", diff --git a/plugins/inputs/opensmtpd/README.md b/plugins/inputs/opensmtpd/README.md index 4c1949869..5bbd4be89 100644 --- a/plugins/inputs/opensmtpd/README.md +++ b/plugins/inputs/opensmtpd/README.md @@ -12,7 +12,7 @@ This plugin gathers stats from [OpenSMTPD - a FREE implementation of the server- ## The default location of the smtpctl binary can be overridden with: binary = "/usr/sbin/smtpctl" - # The default timeout of 1s can be overriden with: + # The default timeout of 1s can be overridden with: #timeout = "1s" ``` diff --git a/plugins/inputs/opensmtpd/opensmtpd.go b/plugins/inputs/opensmtpd/opensmtpd.go index 1c0e5690d..c3f76f2ef 100644 --- a/plugins/inputs/opensmtpd/opensmtpd.go +++ b/plugins/inputs/opensmtpd/opensmtpd.go @@ -37,7 +37,7 @@ var sampleConfig = ` ## The default location of the smtpctl binary can be overridden with: binary = "/usr/sbin/smtpctl" - ## The default timeout of 1000ms can be overriden with (in milliseconds): + ## The default timeout of 1000ms can be overridden with (in milliseconds): timeout = 1000 ` diff --git a/plugins/inputs/pf/README.md b/plugins/inputs/pf/README.md index 2e70de5b7..96a5ed488 100644 --- a/plugins/inputs/pf/README.md +++ b/plugins/inputs/pf/README.md @@ -1,8 +1,8 @@ # PF Plugin -The pf plugin gathers information from the FreeBSD/OpenBSD pf firewall. Currently it can retrive information about the state table: the number of current entries in the table, and counters for the number of searches, inserts, and removals to the table. +The pf plugin gathers information from the FreeBSD/OpenBSD pf firewall. Currently it can retrieve information about the state table: the number of current entries in the table, and counters for the number of searches, inserts, and removals to the table. -The pf plugin retrives this information by invoking the `pfstat` command. The `pfstat` command requires read access to the device file `/dev/pf`. You have several options to permit telegraf to run `pfctl`: +The pf plugin retrieves this information by invoking the `pfstat` command. The `pfstat` command requires read access to the device file `/dev/pf`. You have several options to permit telegraf to run `pfctl`: * Run telegraf as root. This is strongly discouraged. * Change the ownership and permissions for /dev/pf such that the user telegraf runs at can read the /dev/pf device file. This is probably not that good of an idea either. diff --git a/plugins/inputs/pgbouncer/README.md b/plugins/inputs/pgbouncer/README.md index 987b6a382..53737a81a 100644 --- a/plugins/inputs/pgbouncer/README.md +++ b/plugins/inputs/pgbouncer/README.md @@ -15,7 +15,7 @@ More information about the meaning of these metrics can be found in the ## postgres://[pqgotest[:password]]@host:port[/dbname]\ ## ?sslmode=[disable|verify-ca|verify-full] ## or a simple string: - ## host=localhost port=5432 user=pqotest password=... sslmode=... dbname=app_production + ## host=localhost port=5432 user=pqgotest password=... sslmode=... dbname=app_production ## ## All connection parameters are optional. ## diff --git a/plugins/inputs/pgbouncer/pgbouncer.go b/plugins/inputs/pgbouncer/pgbouncer.go index cbc38c869..0b8c8c16a 100644 --- a/plugins/inputs/pgbouncer/pgbouncer.go +++ b/plugins/inputs/pgbouncer/pgbouncer.go @@ -24,7 +24,7 @@ var sampleConfig = ` ## postgres://[pqgotest[:password]]@localhost[/dbname]\ ## ?sslmode=[disable|verify-ca|verify-full] ## or a simple string: - ## host=localhost user=pqotest password=... sslmode=... dbname=app_production + ## host=localhost user=pqgotest password=... sslmode=... dbname=app_production ## ## All connection parameters are optional. ## diff --git a/plugins/inputs/phpfpm/fcgi_client.go b/plugins/inputs/phpfpm/fcgi_client.go index 5a4d20019..9b42d91bd 100644 --- a/plugins/inputs/phpfpm/fcgi_client.go +++ b/plugins/inputs/phpfpm/fcgi_client.go @@ -59,7 +59,7 @@ func (client *conn) Request( rec := &record{} var err1 error - // recive until EOF or FCGI_END_REQUEST + // receive until EOF or FCGI_END_REQUEST READ_LOOP: for { err1 = rec.read(client.rwc) diff --git a/plugins/inputs/postgresql/postgresql.go b/plugins/inputs/postgresql/postgresql.go index 452c7fa2b..0911b20ce 100644 --- a/plugins/inputs/postgresql/postgresql.go +++ b/plugins/inputs/postgresql/postgresql.go @@ -26,7 +26,7 @@ var sampleConfig = ` ## postgres://[pqgotest[:password]]@localhost[/dbname]\ ## ?sslmode=[disable|verify-ca|verify-full] ## or a simple string: - ## host=localhost user=pqotest password=... sslmode=... dbname=app_production + ## host=localhost user=pqgotest password=... sslmode=... dbname=app_production ## ## All connection parameters are optional. ## diff --git a/plugins/inputs/postgresql_extensible/README.md b/plugins/inputs/postgresql_extensible/README.md index 337b13d1b..5b121b66b 100644 --- a/plugins/inputs/postgresql_extensible/README.md +++ b/plugins/inputs/postgresql_extensible/README.md @@ -16,7 +16,7 @@ The example below has two queries are specified, with the following parameters: # specify address via a url matching: # postgres://[pqgotest[:password]]@host:port[/dbname]?sslmode=... # or a simple string: - # host=localhost port=5432 user=pqotest password=... sslmode=... dbname=app_production + # host=localhost port=5432 user=pqgotest password=... sslmode=... dbname=app_production # # All connection parameters are optional. # Without the dbname parameter, the driver will default to a database @@ -71,7 +71,7 @@ The example below has two queries are specified, with the following parameters: ``` The system can be easily extended using homemade metrics collection tools or -using postgreql extensions ([pg_stat_statements](http://www.postgresql.org/docs/current/static/pgstatstatements.html), [pg_proctab](https://github.com/markwkm/pg_proctab) or [powa](http://dalibo.github.io/powa/)) +using postgresql extensions ([pg_stat_statements](http://www.postgresql.org/docs/current/static/pgstatstatements.html), [pg_proctab](https://github.com/markwkm/pg_proctab) or [powa](http://dalibo.github.io/powa/)) # Sample Queries : - telegraf.conf postgresql_extensible queries (assuming that you have configured diff --git a/plugins/inputs/postgresql_extensible/postgresql_extensible.go b/plugins/inputs/postgresql_extensible/postgresql_extensible.go index 9a3457228..f91feaf40 100644 --- a/plugins/inputs/postgresql_extensible/postgresql_extensible.go +++ b/plugins/inputs/postgresql_extensible/postgresql_extensible.go @@ -41,7 +41,7 @@ var sampleConfig = ` ## postgres://[pqgotest[:password]]@localhost[/dbname]\ ## ?sslmode=[disable|verify-ca|verify-full] ## or a simple string: - ## host=localhost user=pqotest password=... sslmode=... dbname=app_production + ## host=localhost user=pqgotest password=... sslmode=... dbname=app_production # ## All connection parameters are optional. # ## Without the dbname parameter, the driver will default to a database @@ -153,7 +153,7 @@ func (p *Postgresql) Gather(acc telegraf.Accumulator) error { columns []string ) - // Retreiving the database version + // Retrieving the database version query = `SELECT setting::integer / 100 AS version FROM pg_settings WHERE name = 'server_version_num'` if err = p.DB.QueryRow(query).Scan(&db_version); err != nil { db_version = 0 diff --git a/plugins/inputs/procstat/pgrep.go b/plugins/inputs/procstat/pgrep.go index 703febaa9..48bf76ed6 100644 --- a/plugins/inputs/procstat/pgrep.go +++ b/plugins/inputs/procstat/pgrep.go @@ -10,7 +10,7 @@ import ( "github.com/influxdata/telegraf/internal" ) -// Implemention of PIDGatherer that execs pgrep to find processes +// Implementation of PIDGatherer that execs pgrep to find processes type Pgrep struct { path string } diff --git a/plugins/inputs/rabbitmq/README.md b/plugins/inputs/rabbitmq/README.md index 1bdd553de..4a53ddc6c 100644 --- a/plugins/inputs/rabbitmq/README.md +++ b/plugins/inputs/rabbitmq/README.md @@ -53,7 +53,7 @@ For additional details reference the [RabbitMQ Management HTTP Stats][management # queue_name_include = [] # queue_name_exclude = [] - ## Federation upstreams to include and exlude specified as an array of glob + ## Federation upstreams to include and exclude specified as an array of glob ## pattern strings. Federation links can also be limited by the queue and ## exchange filters. # federation_upstream_include = [] diff --git a/plugins/inputs/rabbitmq/rabbitmq.go b/plugins/inputs/rabbitmq/rabbitmq.go index 68652ca36..cb8fbb1aa 100644 --- a/plugins/inputs/rabbitmq/rabbitmq.go +++ b/plugins/inputs/rabbitmq/rabbitmq.go @@ -15,15 +15,15 @@ import ( "github.com/influxdata/telegraf/plugins/inputs" ) -// DefaultUsername will set a default value that corrasponds to the default +// DefaultUsername will set a default value that corresponds to the default // value used by Rabbitmq const DefaultUsername = "guest" -// DefaultPassword will set a default value that corrasponds to the default +// DefaultPassword will set a default value that corresponds to the default // value used by Rabbitmq const DefaultPassword = "guest" -// DefaultURL will set a default value that corrasponds to the default value +// DefaultURL will set a default value that corresponds to the default value // used by Rabbitmq const DefaultURL = "http://localhost:15672" diff --git a/plugins/inputs/salesforce/README.md b/plugins/inputs/salesforce/README.md index 526f14a07..6883f3a90 100644 --- a/plugins/inputs/salesforce/README.md +++ b/plugins/inputs/salesforce/README.md @@ -21,7 +21,7 @@ It fetches its data from the [limits endpoint](https://developer.salesforce.com/ ### Measurements & Fields: -Salesforce provide one measurment named "salesforce". +Salesforce provide one measurement named "salesforce". Each entry is converted to snake\_case and 2 fields are created. - \_max represents the limit threshold diff --git a/plugins/inputs/salesforce/salesforce.go b/plugins/inputs/salesforce/salesforce.go index ad40ec566..b66266d3f 100644 --- a/plugins/inputs/salesforce/salesforce.go +++ b/plugins/inputs/salesforce/salesforce.go @@ -166,7 +166,7 @@ func (s *Salesforce) getLoginEndpoint() (string, error) { } } -// Authenticate with Salesfroce +// Authenticate with Salesforce func (s *Salesforce) login() error { if s.Username == "" || s.Password == "" { return errors.New("missing username or password") diff --git a/plugins/inputs/sensors/README.md b/plugins/inputs/sensors/README.md index 9075bda72..19952fd82 100644 --- a/plugins/inputs/sensors/README.md +++ b/plugins/inputs/sensors/README.md @@ -18,7 +18,7 @@ This plugin collects sensor metrics with the `sensors` executable from the lm-se ``` ### Measurements & Fields: -Fields are created dynamicaly depending on the sensors. All fields are float. +Fields are created dynamically depending on the sensors. All fields are float. ### Tags: diff --git a/plugins/inputs/snmp/snmp.go b/plugins/inputs/snmp/snmp.go index 2f8bf6d5b..57f29bfb0 100644 --- a/plugins/inputs/snmp/snmp.go +++ b/plugins/inputs/snmp/snmp.go @@ -962,9 +962,9 @@ func SnmpTranslate(oid string) (mibName string, oidNum string, oidText string, c // We could speed it up by putting a lock in snmpTranslateCache and then // returning it immediately, and multiple callers would then release the // snmpTranslateCachesLock and instead wait on the individual - // snmpTranlsation.Lock to release. But I don't know that the extra complexity + // snmpTranslation.Lock to release. But I don't know that the extra complexity // is worth it. Especially when it would slam the system pretty hard if lots - // of lookups are being perfomed. + // of lookups are being performed. stc.mibName, stc.oidNum, stc.oidText, stc.conversion, stc.err = snmpTranslateCall(oid) snmpTranslateCaches[oid] = stc diff --git a/plugins/inputs/snmp_trap/snmp_trap.go b/plugins/inputs/snmp_trap/snmp_trap.go index 80fc28f7c..cb253a7d3 100644 --- a/plugins/inputs/snmp_trap/snmp_trap.go +++ b/plugins/inputs/snmp_trap/snmp_trap.go @@ -254,7 +254,7 @@ func (s *SnmpTrap) lookup(oid string) (e mibEntry, err error) { defer s.cacheLock.Unlock() var ok bool if e, ok = s.cache[oid]; !ok { - // cache miss. exec snmptranlate + // cache miss. exec snmptranslate e, err = s.snmptranslate(oid) if err == nil { s.cache[oid] = e diff --git a/plugins/inputs/snmp_trap/snmp_trap_test.go b/plugins/inputs/snmp_trap/snmp_trap_test.go index 34dd6cde0..94781cf91 100644 --- a/plugins/inputs/snmp_trap/snmp_trap_test.go +++ b/plugins/inputs/snmp_trap/snmp_trap_test.go @@ -84,7 +84,7 @@ func TestReceiveTrap(t *testing.T) { version gosnmp.SnmpVersion trap gosnmp.SnmpTrap // include pdus - // recieve + // receive entries []entry metrics []telegraf.Metric }{ diff --git a/plugins/inputs/socket_listener/README.md b/plugins/inputs/socket_listener/README.md index ec1aa0bef..840b92709 100644 --- a/plugins/inputs/socket_listener/README.md +++ b/plugins/inputs/socket_listener/README.md @@ -82,7 +82,7 @@ setting. Instructions on how to adjust these OS settings are available below. -Some OSes (most notably, Linux) place very restricive limits on the performance +Some OSes (most notably, Linux) place very restrictive limits on the performance of UDP protocols. It is _highly_ recommended that you increase these OS limits to at least 8MB before trying to run large amounts of UDP traffic to your instance. 8MB is just a recommendation, and can be adjusted higher. diff --git a/plugins/inputs/stackdriver/stackdriver.go b/plugins/inputs/stackdriver/stackdriver.go index f8b4294b7..dc692a480 100644 --- a/plugins/inputs/stackdriver/stackdriver.go +++ b/plugins/inputs/stackdriver/stackdriver.go @@ -544,7 +544,7 @@ func (s *Stackdriver) generatetimeSeriesConfs( for _, filter := range filters { // Add filter for list metric descriptors if // includeMetricTypePrefixes is specified, - // this is more effecient than iterating over + // this is more efficient than iterating over // all metric descriptors req.Filter = filter mdRespChan, err := s.client.ListMetricDescriptors(ctx, req) diff --git a/plugins/inputs/statsd/statsd_test.go b/plugins/inputs/statsd/statsd_test.go index f3daa117b..f76681134 100644 --- a/plugins/inputs/statsd/statsd_test.go +++ b/plugins/inputs/statsd/statsd_test.go @@ -35,7 +35,7 @@ func NewTestStatsd() *Statsd { return &s } -// Test that MaxTCPConections is respected +// Test that MaxTCPConnections is respected func TestConcurrentConns(t *testing.T) { listener := Statsd{ Log: testutil.Logger{}, @@ -66,7 +66,7 @@ func TestConcurrentConns(t *testing.T) { assert.Zero(t, acc.NFields()) } -// Test that MaxTCPConections is respected when max==1 +// Test that MaxTCPConnections is respected when max==1 func TestConcurrentConns1(t *testing.T) { listener := Statsd{ Log: testutil.Logger{}, @@ -95,7 +95,7 @@ func TestConcurrentConns1(t *testing.T) { assert.Zero(t, acc.NFields()) } -// Test that MaxTCPConections is respected +// Test that MaxTCPConnections is respected func TestCloseConcurrentConns(t *testing.T) { listener := Statsd{ Log: testutil.Logger{}, diff --git a/plugins/inputs/syslog/README.md b/plugins/inputs/syslog/README.md index dca51bd97..32c5f2717 100644 --- a/plugins/inputs/syslog/README.md +++ b/plugins/inputs/syslog/README.md @@ -47,7 +47,7 @@ Syslog messages should be formatted according to ## Must be one of "octect-counting", "non-transparent". # framing = "octet-counting" - ## The trailer to be expected in case of non-trasparent framing (default = "LF"). + ## The trailer to be expected in case of non-transparent framing (default = "LF"). ## Must be one of "LF", or "NUL". # trailer = "LF" diff --git a/plugins/inputs/syslog/octetcounting_test.go b/plugins/inputs/syslog/octetcounting_test.go index ea86b808d..210b64dbe 100644 --- a/plugins/inputs/syslog/octetcounting_test.go +++ b/plugins/inputs/syslog/octetcounting_test.go @@ -280,7 +280,7 @@ func getTestCasesForOctetCounting() []testCaseStream { werr: 1, }, // { - // name: "1st/of/ko", // overflow (msglen greather then max allowed octets) + // name: "1st/of/ko", // overflow (msglen greater than max allowed octets) // data: []byte(fmt.Sprintf("8193 <%d>%d %s %s %s %s %s 12 %s", maxP, maxV, maxTS, maxH, maxA, maxPID, maxMID, message7681)), // want: []testutil.Metric{}, // }, diff --git a/plugins/inputs/syslog/syslog.go b/plugins/inputs/syslog/syslog.go index 92d134092..ecf190e47 100644 --- a/plugins/inputs/syslog/syslog.go +++ b/plugins/inputs/syslog/syslog.go @@ -87,7 +87,7 @@ var sampleConfig = ` ## Must be one of "octet-counting", "non-transparent". # framing = "octet-counting" - ## The trailer to be expected in case of non-trasparent framing (default = "LF"). + ## The trailer to be expected in case of non-transparent framing (default = "LF"). ## Must be one of "LF", or "NUL". # trailer = "LF" @@ -313,7 +313,7 @@ func (s *Syslog) handle(conn net.Conn, acc telegraf.Accumulator) { opts = append(opts, syslog.WithBestEffort()) } - // Select the parser to use depeding on transport framing + // Select the parser to use depending on transport framing if s.Framing == framing.OctetCounting { // Octet counting transparent framing p = octetcounting.NewParser(opts...) diff --git a/plugins/inputs/tcp_listener/tcp_listener_test.go b/plugins/inputs/tcp_listener/tcp_listener_test.go index 7c04ecaba..16895d674 100644 --- a/plugins/inputs/tcp_listener/tcp_listener_test.go +++ b/plugins/inputs/tcp_listener/tcp_listener_test.go @@ -141,7 +141,7 @@ func TestConnectTCP(t *testing.T) { } } -// Test that MaxTCPConections is respected +// Test that MaxTCPConnections is respected func TestConcurrentConns(t *testing.T) { listener := TcpListener{ Log: testutil.Logger{}, @@ -177,7 +177,7 @@ func TestConcurrentConns(t *testing.T) { assert.Equal(t, io.EOF, err) } -// Test that MaxTCPConections is respected when max==1 +// Test that MaxTCPConnections is respected when max==1 func TestConcurrentConns1(t *testing.T) { listener := TcpListener{ Log: testutil.Logger{}, @@ -211,7 +211,7 @@ func TestConcurrentConns1(t *testing.T) { assert.Equal(t, io.EOF, err) } -// Test that MaxTCPConections is respected +// Test that MaxTCPConnections is respected func TestCloseConcurrentConns(t *testing.T) { listener := TcpListener{ Log: testutil.Logger{}, diff --git a/plugins/inputs/unbound/README.md b/plugins/inputs/unbound/README.md index d7d5c8ba9..1ccd183bc 100644 --- a/plugins/inputs/unbound/README.md +++ b/plugins/inputs/unbound/README.md @@ -21,7 +21,7 @@ a validating, recursive, and caching DNS resolver. ## The default location of the unbound config file can be overridden with: # config_file = "/etc/unbound/unbound.conf" - ## The default timeout of 1s can be overriden with: + ## The default timeout of 1s can be overridden with: # timeout = "1s" ## When set to true, thread metrics are tagged with the thread id. diff --git a/plugins/inputs/unbound/unbound.go b/plugins/inputs/unbound/unbound.go index c8247d0cf..bb4ecde58 100644 --- a/plugins/inputs/unbound/unbound.go +++ b/plugins/inputs/unbound/unbound.go @@ -49,7 +49,7 @@ var sampleConfig = ` ## The default location of the unbound config file can be overridden with: # config_file = "/etc/unbound/unbound.conf" - ## The default timeout of 1s can be overriden with: + ## The default timeout of 1s can be overridden with: # timeout = "1s" ## When set to true, thread metrics are tagged with the thread id. @@ -126,7 +126,7 @@ func unboundRunner(cmdName string, Timeout internal.Duration, UseSudo bool, Serv // All the dots in stat name will replaced by underscores. Histogram statistics will not be collected. func (s *Unbound) Gather(acc telegraf.Accumulator) error { - // Always exclude histrogram statistics + // Always exclude histogram statistics statExcluded := []string{"histogram.*"} filterExcluded, err := filter.Compile(statExcluded) if err != nil { diff --git a/plugins/inputs/uwsgi/README.md b/plugins/inputs/uwsgi/README.md index 8053676c0..c4d41a02d 100644 --- a/plugins/inputs/uwsgi/README.md +++ b/plugins/inputs/uwsgi/README.md @@ -13,7 +13,7 @@ The uWSGI input plugin gathers metrics about uWSGI using its [Stats Server](http ## servers = ["tcp://localhost:5050", "http://localhost:1717", "unix:///tmp/statsock"] servers = ["tcp://127.0.0.1:1717"] - ## General connection timout + ## General connection timeout # timeout = "5s" ``` diff --git a/plugins/inputs/uwsgi/uwsgi.go b/plugins/inputs/uwsgi/uwsgi.go index a20f3b2bf..b13a7b3e6 100644 --- a/plugins/inputs/uwsgi/uwsgi.go +++ b/plugins/inputs/uwsgi/uwsgi.go @@ -42,7 +42,7 @@ func (u *Uwsgi) SampleConfig() string { ## servers = ["tcp://localhost:5050", "http://localhost:1717", "unix:///tmp/statsock"] servers = ["tcp://127.0.0.1:1717"] - ## General connection timout + ## General connection timeout # timeout = "5s" ` } diff --git a/plugins/inputs/varnish/README.md b/plugins/inputs/varnish/README.md index 3609b12e7..2db149804 100644 --- a/plugins/inputs/varnish/README.md +++ b/plugins/inputs/varnish/README.md @@ -19,7 +19,7 @@ This plugin gathers stats from [Varnish HTTP Cache](https://varnish-cache.org/) stats = ["MAIN.cache_hit", "MAIN.cache_miss", "MAIN.uptime"] ## Optional name for the varnish instance (or working directory) to query - ## Usually appened after -n in varnish cli + ## Usually append after -n in varnish cli # instance_name = instanceName ## Timeout for varnishstat command @@ -92,7 +92,7 @@ MEMPOOL, etc). In the output, the prefix will be used as a tag, and removed from - MAIN.s_pipe (uint64, count, Total pipe sessions) - MAIN.s_pass (uint64, count, Total pass- ed requests) - MAIN.s_fetch (uint64, count, Total backend fetches) - - MAIN.s_synth (uint64, count, Total synthethic responses) + - MAIN.s_synth (uint64, count, Total synthetic responses) - MAIN.s_req_hdrbytes (uint64, count, Request header bytes) - MAIN.s_req_bodybytes (uint64, count, Request body bytes) - MAIN.s_resp_hdrbytes (uint64, count, Response header bytes) diff --git a/plugins/inputs/varnish/varnish.go b/plugins/inputs/varnish/varnish.go index 3a18deb6c..893f00c0a 100644 --- a/plugins/inputs/varnish/varnish.go +++ b/plugins/inputs/varnish/varnish.go @@ -49,7 +49,7 @@ var sampleConfig = ` stats = ["MAIN.cache_hit", "MAIN.cache_miss", "MAIN.uptime"] ## Optional name for the varnish instance (or working directory) to query - ## Usually appened after -n in varnish cli + ## Usually append after -n in varnish cli # instance_name = instanceName ## Timeout for varnishstat command diff --git a/plugins/inputs/vsphere/README.md b/plugins/inputs/vsphere/README.md index 6f2e35029..ef9e610fd 100644 --- a/plugins/inputs/vsphere/README.md +++ b/plugins/inputs/vsphere/README.md @@ -155,11 +155,11 @@ vm_metric_exclude = [ "*" ] ## separator character to use for measurement and field names (default: "_") # separator = "_" - ## number of objects to retreive per query for realtime resources (vms and hosts) + ## number of objects to retrieve per query for realtime resources (vms and hosts) ## set to 64 for vCenter 5.5 and 6.0 (default: 256) # max_query_objects = 256 - ## number of metrics to retreive per query for non-realtime resources (clusters and datastores) + ## number of metrics to retrieve per query for non-realtime resources (clusters and datastores) ## set to 64 for vCenter 5.5 and 6.0 (default: 256) # max_query_metrics = 256 @@ -184,10 +184,10 @@ vm_metric_exclude = [ "*" ] ## Custom attributes from vCenter can be very useful for queries in order to slice the ## metrics along different dimension and for forming ad-hoc relationships. They are disabled ## by default, since they can add a considerable amount of tags to the resulting metrics. To - ## enable, simply set custom_attribute_exlude to [] (empty set) and use custom_attribute_include + ## enable, simply set custom_attribute_exclude to [] (empty set) and use custom_attribute_include ## to select the attributes you want to include. ## By default, since they can add a considerable amount of tags to the resulting metrics. To - ## enable, simply set custom_attribute_exlude to [] (empty set) and use custom_attribute_include + ## enable, simply set custom_attribute_exclude to [] (empty set) and use custom_attribute_include ## to select the attributes you want to include. # custom_attribute_include = [] # custom_attribute_exclude = ["*"] @@ -208,7 +208,7 @@ A vCenter administrator can change this setting, see this [VMware KB article](ht Any modification should be reflected in this plugin by modifying the parameter `max_query_objects` ``` - ## number of objects to retreive per query for realtime resources (vms and hosts) + ## number of objects to retrieve per query for realtime resources (vms and hosts) ## set to 64 for vCenter 5.5 and 6.0 (default: 256) # max_query_objects = 256 ``` @@ -275,12 +275,12 @@ We can extend this to looking at a cluster level: ```/DC0/host/Cluster1/*/hadoop vCenter keeps two different kinds of metrics, known as realtime and historical metrics. -* Realtime metrics: Avaialable at a 20 second granularity. These metrics are stored in memory and are very fast and cheap to query. Our tests have shown that a complete set of realtime metrics for 7000 virtual machines can be obtained in less than 20 seconds. Realtime metrics are only available on **ESXi hosts** and **virtual machine** resources. Realtime metrics are only stored for 1 hour in vCenter. +* Realtime metrics: Available at a 20 second granularity. These metrics are stored in memory and are very fast and cheap to query. Our tests have shown that a complete set of realtime metrics for 7000 virtual machines can be obtained in less than 20 seconds. Realtime metrics are only available on **ESXi hosts** and **virtual machine** resources. Realtime metrics are only stored for 1 hour in vCenter. * Historical metrics: Available at a 5 minute, 30 minutes, 2 hours and 24 hours rollup levels. The vSphere Telegraf plugin only uses the 5 minute rollup. These metrics are stored in the vCenter database and can be expensive and slow to query. Historical metrics are the only type of metrics available for **clusters**, **datastores** and **datacenters**. For more information, refer to the vSphere documentation here: https://pubs.vmware.com/vsphere-50/index.jsp?topic=%2Fcom.vmware.wssdk.pg.doc_50%2FPG_Ch16_Performance.18.2.html -This distinction has an impact on how Telegraf collects metrics. A single instance of an input plugin can have one and only one collection interval, which means that you typically set the collection interval based on the most frequently collected metric. Let's assume you set the collection interval to 1 minute. All realtime metrics will be collected every minute. Since the historical metrics are only available on a 5 minute interval, the vSphere Telegraf plugin automatically skips four out of five collection cycles for these metrics. This works fine in many cases. Problems arise when the collection of historical metrics takes longer than the collecition interval. This will cause error messages similar to this to appear in the Telegraf logs: +This distinction has an impact on how Telegraf collects metrics. A single instance of an input plugin can have one and only one collection interval, which means that you typically set the collection interval based on the most frequently collected metric. Let's assume you set the collection interval to 1 minute. All realtime metrics will be collected every minute. Since the historical metrics are only available on a 5 minute interval, the vSphere Telegraf plugin automatically skips four out of five collection cycles for these metrics. This works fine in many cases. Problems arise when the collection of historical metrics takes longer than the collection interval. This will cause error messages similar to this to appear in the Telegraf logs: ```2019-01-16T13:41:10Z W! [agent] input "inputs.vsphere" did not complete within its interval``` diff --git a/plugins/inputs/vsphere/client.go b/plugins/inputs/vsphere/client.go index 176f48323..b3096f7be 100644 --- a/plugins/inputs/vsphere/client.go +++ b/plugins/inputs/vsphere/client.go @@ -36,7 +36,7 @@ type ClientFactory struct { parent *VSphere } -// Client represents a connection to vSphere and is backed by a govmoni connection +// Client represents a connection to vSphere and is backed by a govmomi connection type Client struct { Client *govmomi.Client Views *view.Manager diff --git a/plugins/inputs/vsphere/endpoint.go b/plugins/inputs/vsphere/endpoint.go index a7d4db5ba..93d74e63f 100644 --- a/plugins/inputs/vsphere/endpoint.go +++ b/plugins/inputs/vsphere/endpoint.go @@ -535,7 +535,7 @@ func (e *Endpoint) complexMetadataSelect(ctx context.Context, res *resourceKind, } n := len(sampledObjects) if n > maxMetadataSamples { - // Shuffle samples into the maxMetadatSamples positions + // Shuffle samples into the maxMetadataSamples positions for i := 0; i < maxMetadataSamples; i++ { j := int(rand.Int31n(int32(i + 1))) t := sampledObjects[i] @@ -1159,7 +1159,7 @@ func (e *Endpoint) collectChunk(ctx context.Context, pqs queryChunk, res *resour } count++ - // Update highwater marks + // Update hiwater marks e.hwMarks.Put(moid, name, ts) } if nValues == 0 { diff --git a/plugins/inputs/vsphere/vsphere.go b/plugins/inputs/vsphere/vsphere.go index 098c49334..e9a75510f 100644 --- a/plugins/inputs/vsphere/vsphere.go +++ b/plugins/inputs/vsphere/vsphere.go @@ -200,11 +200,11 @@ var sampleConfig = ` ## separator character to use for measurement and field names (default: "_") # separator = "_" - ## number of objects to retreive per query for realtime resources (vms and hosts) + ## number of objects to retrieve per query for realtime resources (vms and hosts) ## set to 64 for vCenter 5.5 and 6.0 (default: 256) # max_query_objects = 256 - ## number of metrics to retreive per query for non-realtime resources (clusters and datastores) + ## number of metrics to retrieve per query for non-realtime resources (clusters and datastores) ## set to 64 for vCenter 5.5 and 6.0 (default: 256) # max_query_metrics = 256 @@ -229,10 +229,10 @@ var sampleConfig = ` ## Custom attributes from vCenter can be very useful for queries in order to slice the ## metrics along different dimension and for forming ad-hoc relationships. They are disabled ## by default, since they can add a considerable amount of tags to the resulting metrics. To - ## enable, simply set custom_attribute_exlude to [] (empty set) and use custom_attribute_include + ## enable, simply set custom_attribute_exclude to [] (empty set) and use custom_attribute_include ## to select the attributes you want to include. ## By default, since they can add a considerable amount of tags to the resulting metrics. To - ## enable, simply set custom_attribute_exlude to [] (empty set) and use custom_attribute_include + ## enable, simply set custom_attribute_exclude to [] (empty set) and use custom_attribute_include ## to select the attributes you want to include. # custom_attribute_include = [] # custom_attribute_exclude = ["*"] diff --git a/plugins/inputs/webhooks/github/README.md b/plugins/inputs/webhooks/github/README.md index 5115d287c..4a4e64c73 100644 --- a/plugins/inputs/webhooks/github/README.md +++ b/plugins/inputs/webhooks/github/README.md @@ -78,7 +78,7 @@ The tag values and field values show the place on the incoming JSON object where * 'issues' = `event.repository.open_issues_count` int * 'commit' = `event.deployment.sha` string * 'task' = `event.deployment.task` string -* 'environment' = `event.deployment.evnironment` string +* 'environment' = `event.deployment.environment` string * 'description' = `event.deployment.description` string #### [`deployment_status` event](https://developer.github.com/v3/activity/events/types/#deploymentstatusevent) @@ -96,7 +96,7 @@ The tag values and field values show the place on the incoming JSON object where * 'issues' = `event.repository.open_issues_count` int * 'commit' = `event.deployment.sha` string * 'task' = `event.deployment.task` string -* 'environment' = `event.deployment.evnironment` string +* 'environment' = `event.deployment.environment` string * 'description' = `event.deployment.description` string * 'depState' = `event.deployment_status.state` string * 'depDescription' = `event.deployment_status.description` string diff --git a/plugins/inputs/webhooks/particle/README.md b/plugins/inputs/webhooks/particle/README.md index 688898db0..4dc83b347 100644 --- a/plugins/inputs/webhooks/particle/README.md +++ b/plugins/inputs/webhooks/particle/README.md @@ -31,7 +31,7 @@ String data = String::format("{ \"tags\" : { ``` Escaping the "" is required in the source file. -The number of tag values and field values is not restrictied so you can send as many values per webhook call as you'd like. +The number of tag values and field values is not restricted so you can send as many values per webhook call as you'd like. You will need to enable JSON messages in the Webhooks setup of Particle.io, and make sure to check the "include default data" box as well. diff --git a/plugins/inputs/win_perf_counters/pdh.go b/plugins/inputs/win_perf_counters/pdh.go index 6a8dff10b..3a24761b9 100644 --- a/plugins/inputs/win_perf_counters/pdh.go +++ b/plugins/inputs/win_perf_counters/pdh.go @@ -214,7 +214,7 @@ func init() { // // To view all (internationalized...) counters on a system, there are three non-programmatic ways: perfmon utility, // the typeperf command, and the the registry editor. perfmon.exe is perhaps the easiest way, because it's basically a -// full implemention of the pdh.dll API, except with a GUI and all that. The registry setting also provides an +// full implementation of the pdh.dll API, except with a GUI and all that. The registry setting also provides an // interface to the available counters, and can be found at the following key: // // HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows NT\CurrentVersion\Perflib\CurrentLanguage diff --git a/plugins/inputs/zfs/README.md b/plugins/inputs/zfs/README.md index b60711e30..e22156bc6 100644 --- a/plugins/inputs/zfs/README.md +++ b/plugins/inputs/zfs/README.md @@ -268,7 +268,7 @@ A short description for some of the metrics. `arcstats_evict_l2_ineligible` We evicted something which cannot be stored in the l2. Reasons could be: - - We have multiple pools, we evicted something from a pool whithout an l2 device. + - We have multiple pools, we evicted something from a pool without an l2 device. - The zfs property secondary cache. `arcstats_c` Arc target size, this is the size the system thinks the arc should have. diff --git a/plugins/inputs/zfs/zfs_freebsd_test.go b/plugins/inputs/zfs/zfs_freebsd_test.go index dba135cfd..87f21f672 100644 --- a/plugins/inputs/zfs/zfs_freebsd_test.go +++ b/plugins/inputs/zfs/zfs_freebsd_test.go @@ -155,7 +155,7 @@ func TestZfsGeneratesMetrics(t *testing.T) { err = z.Gather(&acc) require.NoError(t, err) - //four pool, vdev_cache_stats and zfetchstatus metrics + //four pool, vdev_cache_stats and zfetchstats metrics intMetrics = getKstatMetricsVdevAndZfetch() acc.AssertContainsTaggedFields(t, "zfs", intMetrics, tags) diff --git a/plugins/inputs/zipkin/cmd/thrift_serialize/thrift_serialize.go b/plugins/inputs/zipkin/cmd/thrift_serialize/thrift_serialize.go index 60bf1b51a..dde89570b 100644 --- a/plugins/inputs/zipkin/cmd/thrift_serialize/thrift_serialize.go +++ b/plugins/inputs/zipkin/cmd/thrift_serialize/thrift_serialize.go @@ -5,7 +5,7 @@ vice versa. To convert from json to thrift, the json is unmarshalled, converted to zipkincore.Span structures, and marshalled into thrift binary protocol. The json must be in an array format (even if it only has one object), -because the tool automatically tries to unmarshall the json into an array of structs. +because the tool automatically tries to unmarshal the json into an array of structs. To convert from thrift to json, the opposite process must happen. The thrift binary data must be read into an array of diff --git a/plugins/outputs/amqp/README.md b/plugins/outputs/amqp/README.md index f810a0a7b..04715f8e3 100644 --- a/plugins/outputs/amqp/README.md +++ b/plugins/outputs/amqp/README.md @@ -1,6 +1,6 @@ # AMQP Output Plugin -This plugin writes to a AMQP 0-9-1 Exchange, a promenent implementation of this protocol being [RabbitMQ](https://www.rabbitmq.com/). +This plugin writes to a AMQP 0-9-1 Exchange, a prominent implementation of this protocol being [RabbitMQ](https://www.rabbitmq.com/). This plugin does not bind the exchange to a queue. @@ -40,7 +40,7 @@ For an introduction to AMQP see: ## Additional exchange arguments. # exchange_arguments = { } - # exchange_arguments = {"hash_propery" = "timestamp"} + # exchange_arguments = {"hash_property" = "timestamp"} ## Authentication credentials for the PLAIN auth_method. # username = "" diff --git a/plugins/outputs/amqp/amqp.go b/plugins/outputs/amqp/amqp.go index cb4cc4501..b00480d5a 100644 --- a/plugins/outputs/amqp/amqp.go +++ b/plugins/outputs/amqp/amqp.go @@ -99,7 +99,7 @@ var sampleConfig = ` ## Additional exchange arguments. # exchange_arguments = { } - # exchange_arguments = {"hash_propery" = "timestamp"} + # exchange_arguments = {"hash_property" = "timestamp"} ## Authentication credentials for the PLAIN auth_method. # username = "" diff --git a/plugins/outputs/application_insights/application_insights_test.go b/plugins/outputs/application_insights/application_insights_test.go index 7255ad068..5a017823c 100644 --- a/plugins/outputs/application_insights/application_insights_test.go +++ b/plugins/outputs/application_insights/application_insights_test.go @@ -288,7 +288,7 @@ func TestTagsAppliedToTelemetry(t *testing.T) { transmitter.AssertNumberOfCalls(t, "Track", len(tt.metricValueFields)) transmitter.AssertCalled(t, "Track", mock.AnythingOfType("*appinsights.MetricTelemetry")) - // Will verify that all original tags are present in telemetry.Properies map + // Will verify that all original tags are present in telemetry.Properties map verifyAdditionalTelemetry(assert, m, transmitter, tt.metricValueFields, metricName) } diff --git a/plugins/outputs/exec/exec.go b/plugins/outputs/exec/exec.go index 5995d4bca..d3697627e 100644 --- a/plugins/outputs/exec/exec.go +++ b/plugins/outputs/exec/exec.go @@ -44,12 +44,12 @@ func (e *Exec) SetSerializer(serializer serializers.Serializer) { e.serializer = serializer } -// Connect satisfies the Ouput interface. +// Connect satisfies the Output interface. func (e *Exec) Connect() error { return nil } -// Close satisfies the Ouput interface. +// Close satisfies the Output interface. func (e *Exec) Close() error { return nil } diff --git a/plugins/outputs/file/README.md b/plugins/outputs/file/README.md index 350633c56..45d0ac155 100644 --- a/plugins/outputs/file/README.md +++ b/plugins/outputs/file/README.md @@ -11,7 +11,7 @@ This plugin writes telegraf metrics to files ## Use batch serialization format instead of line based delimiting. The ## batch format allows for the production of non line based output formats and - ## may more effiently encode and write metrics. + ## may more efficiently encode and write metrics. # use_batch_format = false ## The file will be rotated after the time interval specified. When set diff --git a/plugins/outputs/file/file.go b/plugins/outputs/file/file.go index 12d70d8f3..3798f107a 100644 --- a/plugins/outputs/file/file.go +++ b/plugins/outputs/file/file.go @@ -31,7 +31,7 @@ var sampleConfig = ` ## Use batch serialization format instead of line based delimiting. The ## batch format allows for the production of non line based output formats and - ## may more effiently encode metric groups. + ## may more efficiently encode metric groups. # use_batch_format = false ## The file will be rotated after the time interval specified. When set diff --git a/plugins/outputs/influxdb/http.go b/plugins/outputs/influxdb/http.go index 2e3599788..92498f022 100644 --- a/plugins/outputs/influxdb/http.go +++ b/plugins/outputs/influxdb/http.go @@ -236,7 +236,7 @@ func (c *httpClient) CreateDatabase(ctx context.Context, database string) error } // Don't attempt to recreate the database after a 403 Forbidden error. - // This behavior exists only to maintain backwards compatiblity. + // This behavior exists only to maintain backwards compatibility. if resp.StatusCode == http.StatusForbidden { c.createDatabaseExecuted[database] = true } diff --git a/plugins/outputs/instrumental/README.md b/plugins/outputs/instrumental/README.md index 128599ee8..f8b48fd1e 100644 --- a/plugins/outputs/instrumental/README.md +++ b/plugins/outputs/instrumental/README.md @@ -20,6 +20,6 @@ by whitespace. The `increment` type is only used if the metric comes in as a cou template = "host.tags.measurement.field" ## Timeout in seconds to connect timeout = "2s" - ## Debug true - Print communcation to Instrumental + ## Debug true - Print communication to Instrumental debug = false ``` diff --git a/plugins/outputs/instrumental/instrumental.go b/plugins/outputs/instrumental/instrumental.go index a861ebc28..7284c0ca1 100644 --- a/plugins/outputs/instrumental/instrumental.go +++ b/plugins/outputs/instrumental/instrumental.go @@ -51,7 +51,7 @@ var sampleConfig = ` template = "host.tags.measurement.field" ## Timeout in seconds to connect timeout = "2s" - ## Display Communcation to Instrumental + ## Display Communication to Instrumental debug = false ` diff --git a/plugins/outputs/kinesis/README.md b/plugins/outputs/kinesis/README.md index 12b6178fd..1931dacb9 100644 --- a/plugins/outputs/kinesis/README.md +++ b/plugins/outputs/kinesis/README.md @@ -51,7 +51,7 @@ solution to scale out. ### use_random_partitionkey [DEPRECATED] -When true a random UUID will be generated and used as the partitionkey when sending data to Kinesis. This allows data to evenly spread across multiple shards in the stream. Due to using a random paritionKey there can be no guarantee of ordering when consuming the data off the shards. +When true a random UUID will be generated and used as the partitionkey when sending data to Kinesis. This allows data to evenly spread across multiple shards in the stream. Due to using a random partitionKey there can be no guarantee of ordering when consuming the data off the shards. If true then the partitionkey option will be ignored. ### partition @@ -70,7 +70,7 @@ All metrics will be mapped to the same shard which may limit throughput. #### tag -This will take the value of the specified tag from each metric as the paritionKey. +This will take the value of the specified tag from each metric as the partitionKey. If the tag is not found the `default` value will be used or `telegraf` if unspecified #### measurement diff --git a/plugins/outputs/kinesis/kinesis.go b/plugins/outputs/kinesis/kinesis.go index f6b205b1e..1aa840974 100644 --- a/plugins/outputs/kinesis/kinesis.go +++ b/plugins/outputs/kinesis/kinesis.go @@ -70,7 +70,7 @@ var sampleConfig = ` streamname = "StreamName" ## DEPRECATED: PartitionKey as used for sharding data. partitionkey = "PartitionKey" - ## DEPRECATED: If set the paritionKey will be a random UUID on every put. + ## DEPRECATED: If set the partitionKey will be a random UUID on every put. ## This allows for scaling across multiple shards in a stream. ## This will cause issues with ordering. use_random_partitionkey = false diff --git a/plugins/outputs/librato/librato.go b/plugins/outputs/librato/librato.go index 0603394ec..53bb8c124 100644 --- a/plugins/outputs/librato/librato.go +++ b/plugins/outputs/librato/librato.go @@ -32,7 +32,7 @@ type Librato struct { var reUnacceptedChar = regexp.MustCompile("[^.a-zA-Z0-9_-]") var sampleConfig = ` - ## Librator API Docs + ## Librato API Docs ## http://dev.librato.com/v1/metrics-authentication ## Librato API user api_user = "telegraf@influxdb.com" # required. diff --git a/plugins/outputs/mqtt/README.md b/plugins/outputs/mqtt/README.md index 38eec7c3b..aa028e056 100644 --- a/plugins/outputs/mqtt/README.md +++ b/plugins/outputs/mqtt/README.md @@ -53,7 +53,7 @@ This plugin writes to a [MQTT Broker](http://http://mqtt.org/) acting as a mqtt ### Optional parameters: * `username`: The username to connect MQTT server. * `password`: The password to connect MQTT server. -* `client_id`: The unique client id to connect MQTT server. If this paramater is not set then a random ID is generated. +* `client_id`: The unique client id to connect MQTT server. If this parameter is not set then a random ID is generated. * `timeout`: Timeout for write operations. default: 5s * `tls_ca`: TLS CA * `tls_cert`: TLS CERT diff --git a/plugins/outputs/stackdriver/README.md b/plugins/outputs/stackdriver/README.md index 142d1efa0..27ef3a09f 100644 --- a/plugins/outputs/stackdriver/README.md +++ b/plugins/outputs/stackdriver/README.md @@ -28,7 +28,7 @@ Additional resource labels can be configured by `resource_labels`. By default th ## Custom resource type # resource_type = "generic_node" - ## Additonal resource labels + ## Additional resource labels # [outputs.stackdriver.resource_labels] # node_id = "$HOSTNAME" # namespace = "myapp" diff --git a/plugins/outputs/stackdriver/stackdriver.go b/plugins/outputs/stackdriver/stackdriver.go index fbb946fbd..3bd38614b 100644 --- a/plugins/outputs/stackdriver/stackdriver.go +++ b/plugins/outputs/stackdriver/stackdriver.go @@ -61,7 +61,7 @@ var sampleConfig = ` ## Custom resource type # resource_type = "generic_node" - ## Additonal resource labels + ## Additional resource labels # [outputs.stackdriver.resource_labels] # node_id = "$HOSTNAME" # namespace = "myapp" diff --git a/plugins/outputs/syslog/README.md b/plugins/outputs/syslog/README.md index 65f038f57..cb9bc8965 100644 --- a/plugins/outputs/syslog/README.md +++ b/plugins/outputs/syslog/README.md @@ -42,13 +42,13 @@ Syslog messages are formatted according to ## be one of "octet-counting", "non-transparent". # framing = "octet-counting" - ## The trailer to be expected in case of non-trasparent framing (default = "LF"). + ## The trailer to be expected in case of non-transparent framing (default = "LF"). ## Must be one of "LF", or "NUL". # trailer = "LF" ## SD-PARAMs settings ## Syslog messages can contain key/value pairs within zero or more - ## structured data sections. For each unrecognised metric tag/field a + ## structured data sections. For each unrecognized metric tag/field a ## SD-PARAMS is created. ## ## Example: @@ -64,8 +64,8 @@ Syslog messages are formatted according to # sdparam_separator = "_" ## Default sdid used for tags/fields that don't contain a prefix defined in - ## the explict sdids setting below If no default is specified, no SD-PARAMs - ## will be used for unrecognised field. + ## the explicit sdids setting below If no default is specified, no SD-PARAMs + ## will be used for unrecognized field. # default_sdid = "default@32473" ## List of explicit prefixes to extract from tag/field keys and use as the diff --git a/plugins/outputs/syslog/syslog.go b/plugins/outputs/syslog/syslog.go index 582e8e920..41833f464 100644 --- a/plugins/outputs/syslog/syslog.go +++ b/plugins/outputs/syslog/syslog.go @@ -64,13 +64,13 @@ var sampleConfig = ` ## be one of "octet-counting", "non-transparent". # framing = "octet-counting" - ## The trailer to be expected in case of non-trasparent framing (default = "LF"). + ## The trailer to be expected in case of non-transparent framing (default = "LF"). ## Must be one of "LF", or "NUL". # trailer = "LF" ## SD-PARAMs settings ## Syslog messages can contain key/value pairs within zero or more - ## structured data sections. For each unrecognised metric tag/field a + ## structured data sections. For each unrecognized metric tag/field a ## SD-PARAMS is created. ## ## Example: @@ -86,8 +86,8 @@ var sampleConfig = ` # sdparam_separator = "_" ## Default sdid used for tags/fields that don't contain a prefix defined in - ## the explict sdids setting below If no default is specified, no SD-PARAMs - ## will be used for unrecognised field. + ## the explicit sdids setting below If no default is specified, no SD-PARAMs + ## will be used for unrecognized field. # default_sdid = "default@32473" ## List of explicit prefixes to extract from tag/field keys and use as the diff --git a/plugins/outputs/wavefront/README.md b/plugins/outputs/wavefront/README.md index 231e1057d..2daca328c 100644 --- a/plugins/outputs/wavefront/README.md +++ b/plugins/outputs/wavefront/README.md @@ -33,7 +33,7 @@ This plugin writes to a [Wavefront](https://www.wavefront.com) proxy, in Wavefro #convert_paths = true ## Use Strict rules to sanitize metric and tag names from invalid characters - ## When enabled forward slash (/) and comma (,) will be accpeted + ## When enabled forward slash (/) and comma (,) will be accepted #use_strict = false ## Use Regex to sanitize metric and tag names from invalid characters @@ -75,7 +75,7 @@ source of the metric. ### Wavefront Data format The expected input for Wavefront is specified in the following way: ``` - [] = [tagk1=tagv1 ...tagkN=tagvN] + [] = [tagk1=tagv1 ...tagkN=tagvN] ``` More information about the Wavefront data format is available [here](https://community.wavefront.com/docs/DOC-1031) diff --git a/plugins/outputs/wavefront/wavefront.go b/plugins/outputs/wavefront/wavefront.go index c455b6fa6..79c998e25 100644 --- a/plugins/outputs/wavefront/wavefront.go +++ b/plugins/outputs/wavefront/wavefront.go @@ -84,7 +84,7 @@ var sampleConfig = ` #convert_paths = true ## Use Strict rules to sanitize metric and tag names from invalid characters - ## When enabled forward slash (/) and comma (,) will be accpeted + ## When enabled forward slash (/) and comma (,) will be accepted #use_strict = false ## Use Regex to sanitize metric and tag names from invalid characters diff --git a/plugins/parsers/csv/README.md b/plugins/parsers/csv/README.md index bd5024a1a..9ca34d288 100644 --- a/plugins/parsers/csv/README.md +++ b/plugins/parsers/csv/README.md @@ -40,7 +40,7 @@ values. ## These columns will be skipped in the header as well. csv_skip_columns = 0 - ## The seperator between csv fields + ## The separator between csv fields ## By default, the parser assumes a comma (",") csv_delimiter = "," diff --git a/plugins/parsers/dropwizard/README.md b/plugins/parsers/dropwizard/README.md index f0ff6d15c..436518a67 100644 --- a/plugins/parsers/dropwizard/README.md +++ b/plugins/parsers/dropwizard/README.md @@ -1,6 +1,6 @@ # Dropwizard -The `dropwizard` data format can parse the [JSON Dropwizard][dropwizard] representation of a single dropwizard metric registry. By default, tags are parsed from metric names as if they were actual influxdb line protocol keys (`measurement<,tag_set>`) which can be overriden by defining a custom [template pattern][templates]. All field value types are supported, `string`, `number` and `boolean`. +The `dropwizard` data format can parse the [JSON Dropwizard][dropwizard] representation of a single dropwizard metric registry. By default, tags are parsed from metric names as if they were actual influxdb line protocol keys (`measurement<,tag_set>`) which can be overridden by defining a custom [template pattern][templates]. All field value types are supported, `string`, `number` and `boolean`. [templates]: /docs/TEMPLATE_PATTERN.md [dropwizard]: http://metrics.dropwizard.io/3.1.0/manual/json/ diff --git a/plugins/parsers/graphite/config.go b/plugins/parsers/graphite/config.go index 7a5c759e7..915077c06 100644 --- a/plugins/parsers/graphite/config.go +++ b/plugins/parsers/graphite/config.go @@ -7,7 +7,7 @@ import ( const ( // DefaultSeparator is the default join character to use when joining multiple - // measurment parts in a template. + // measurement parts in a template. DefaultSeparator = "." ) diff --git a/plugins/parsers/influx/handler.go b/plugins/parsers/influx/handler.go index 2f088d19d..ae08d5a7c 100644 --- a/plugins/parsers/influx/handler.go +++ b/plugins/parsers/influx/handler.go @@ -31,7 +31,7 @@ func (h *MetricHandler) SetTimePrecision(p time.Duration) { // comes from the server clock, truncated to the nearest unit of // measurement provided in precision. // - // When a timestamp is provided in the metric, precsision is + // When a timestamp is provided in the metric, precision is // overloaded to hold the unit of measurement of the timestamp. } diff --git a/plugins/parsers/json/README.md b/plugins/parsers/json/README.md index b318f32e0..3bfa60044 100644 --- a/plugins/parsers/json/README.md +++ b/plugins/parsers/json/README.md @@ -46,7 +46,7 @@ ignored unless specified in the `tag_key` or `json_string_fields` options. ## metric. json_time_key = "" - ## Time format is the time layout that should be used to interprete the json_time_key. + ## Time format is the time layout that should be used to interpret the json_time_key. ## The time must be `unix`, `unix_ms`, `unix_us`, `unix_ns`, or a time in the ## "reference time". To define a different format, arrange the values from ## the "reference time" in the example to match the format you will be diff --git a/plugins/parsers/wavefront/element.go b/plugins/parsers/wavefront/element.go index 3b7c875a2..56a8d870b 100644 --- a/plugins/parsers/wavefront/element.go +++ b/plugins/parsers/wavefront/element.go @@ -37,7 +37,7 @@ type LiteralParser struct { func (ep *NameParser) parse(p *PointParser, pt *Point) error { //Valid characters are: a-z, A-Z, 0-9, hyphen ("-"), underscore ("_"), dot ("."). // Forward slash ("/") and comma (",") are allowed if metricName is enclosed in double quotes. - // Delta (U+2206) is allowed as the first characeter of the + // Delta (U+2206) is allowed as the first character of the // metricName name, err := parseLiteral(p) diff --git a/plugins/processors/date/README.md b/plugins/processors/date/README.md index 215cd83e3..9a093fe0e 100644 --- a/plugins/processors/date/README.md +++ b/plugins/processors/date/README.md @@ -6,7 +6,7 @@ A common use is to add a tag that can be used to group by month or year. A few example usecases include: 1) consumption data for utilities on per month basis -2) bandwith capacity per month +2) bandwidth capacity per month 3) compare energy production or sales on a yearly or monthly basis ### Configuration diff --git a/plugins/processors/template/README.md b/plugins/processors/template/README.md index f08a96c6b..348dae096 100644 --- a/plugins/processors/template/README.md +++ b/plugins/processors/template/README.md @@ -46,7 +46,7 @@ Add measurement name as a tag: ```diff - cpu,hostname=localhost time_idle=42 -+ cpu,hostname=localhost,meaurement=cpu time_idle=42 ++ cpu,hostname=localhost,measurement=cpu time_idle=42 ``` Add the year as a tag, similar to the date processor: diff --git a/plugins/processors/topk/README.md b/plugins/processors/topk/README.md index 15046991d..308d4f9f8 100644 --- a/plugins/processors/topk/README.md +++ b/plugins/processors/topk/README.md @@ -53,7 +53,7 @@ Note that depending on the amount of metrics on each computed bucket, more than # add_rank_fields = [] ## These settings provide a way to know what values the plugin is generating - ## when aggregating metrics. The 'add_agregate_field' setting allows to + ## when aggregating metrics. The 'add_aggregate_field' setting allows to ## specify for which fields the final aggregation value is required. If the ## list is non empty, then a field will be added to each every metric for ## each field present in this setting. This field will contain diff --git a/plugins/processors/topk/topk.go b/plugins/processors/topk/topk.go index c2244c6e3..907ec1cc4 100644 --- a/plugins/processors/topk/topk.go +++ b/plugins/processors/topk/topk.go @@ -90,7 +90,7 @@ var sampleConfig = ` # add_rank_fields = [] ## These settings provide a way to know what values the plugin is generating - ## when aggregating metrics. The 'add_agregate_field' setting allows to + ## when aggregating metrics. The 'add_aggregate_field' setting allows to ## specify for which fields the final aggregation value is required. If the ## list is non empty, then a field will be added to each every metric for ## each field present in this setting. This field will contain diff --git a/plugins/processors/topk/topk_test.go b/plugins/processors/topk/topk_test.go index ff0eb4d8b..928111b29 100644 --- a/plugins/processors/topk/topk_test.go +++ b/plugins/processors/topk/topk_test.go @@ -35,7 +35,7 @@ type metricChange struct { newTags []tag // Tags that should be added to the metric runHash bool // Sometimes the metrics' HashID must be run so reflect.DeepEqual works - // This happens because telegraf.Metric mantains an internal cache of + // This happens because telegraf.Metric maintains an internal cache of // its hash value that is set when HashID() is called for the first time } @@ -149,7 +149,7 @@ func TestTopkAggregatorsSmokeTests(t *testing.T) { aggregators := []string{"mean", "sum", "max", "min"} - //The answer is equal to the original set for these particual scenarios + //The answer is equal to the original set for these particular scenarios input := MetricsSet1 answer := MetricsSet1 diff --git a/plugins/serializers/splunkmetric/README.md b/plugins/serializers/splunkmetric/README.md index acd497dbc..ba2170d9c 100644 --- a/plugins/serializers/splunkmetric/README.md +++ b/plugins/serializers/splunkmetric/README.md @@ -89,7 +89,7 @@ to manage the HEC authorization, here's a sample config for an HTTP output: data_format = "splunkmetric" ## Provides time, index, source overrides for the HEC splunkmetric_hec_routing = true - # splunkmentric_multimetric = true + # splunkmetric_multimetric = true ## Additional HTTP headers [outputs.http.headers] @@ -102,7 +102,7 @@ to manage the HEC authorization, here's a sample config for an HTTP output: ## Overrides You can override the default values for the HEC token you are using by adding additional tags to the config file. -The following aspects of the token can be overriden with tags: +The following aspects of the token can be overridden with tags: * index * source diff --git a/plugins/serializers/splunkmetric/splunkmetric.go b/plugins/serializers/splunkmetric/splunkmetric.go index 772771a10..801d0d69e 100644 --- a/plugins/serializers/splunkmetric/splunkmetric.go +++ b/plugins/serializers/splunkmetric/splunkmetric.go @@ -83,7 +83,7 @@ func (s *serializer) createMulti(metric telegraf.Metric, dataGroup HECTimeSeries dataGroup.Source = commonTags.Source dataGroup.Fields = commonTags.Fields - // Stuff the metrid data into the structure. + // Stuff the metric data into the structure. for _, field := range metric.FieldList() { value, valid := verifyValue(field.Value) @@ -101,7 +101,7 @@ func (s *serializer) createMulti(metric telegraf.Metric, dataGroup HECTimeSeries // Output the data as a fields array and host,index,time,source overrides for the HEC. metricJSON, err = json.Marshal(dataGroup) default: - // Just output the data and the time, useful for file based outuputs + // Just output the data and the time, useful for file based outputs dataGroup.Fields["time"] = dataGroup.Time metricJSON, err = json.Marshal(dataGroup.Fields) } @@ -115,7 +115,7 @@ func (s *serializer) createMulti(metric telegraf.Metric, dataGroup HECTimeSeries } func (s *serializer) createSingle(metric telegraf.Metric, dataGroup HECTimeSeries, commonTags CommonTags) (metricGroup []byte, err error) { - /* The default mode is to generate one JSON entitiy per metric (required for pre-8.0 Splunks) + /* The default mode is to generate one JSON entity per metric (required for pre-8.0 Splunks) ** ** The format for single metric is 'nameOfMetric = valueOfMetric' */ @@ -149,7 +149,7 @@ func (s *serializer) createSingle(metric telegraf.Metric, dataGroup HECTimeSerie // Output the data as a fields array and host,index,time,source overrides for the HEC. metricJSON, err = json.Marshal(dataGroup) default: - // Just output the data and the time, useful for file based outuputs + // Just output the data and the time, useful for file based outputs dataGroup.Fields["time"] = dataGroup.Time metricJSON, err = json.Marshal(dataGroup.Fields) } diff --git a/plugins/serializers/wavefront/README.md b/plugins/serializers/wavefront/README.md index 8ab77148d..3b72d95b4 100644 --- a/plugins/serializers/wavefront/README.md +++ b/plugins/serializers/wavefront/README.md @@ -9,7 +9,7 @@ The `wavefront` serializer translates the Telegraf metric format to the [Wavefro files = ["stdout"] ## Use Strict rules to sanitize metric and tag names from invalid characters - ## When enabled forward slash (/) and comma (,) will be accpeted + ## When enabled forward slash (/) and comma (,) will be accepted # wavefront_use_strict = false ## point tags to use as the source name for Wavefront (if none found, host will be used) diff --git a/scripts/build.py b/scripts/build.py index e3e791a1d..e30f44258 100755 --- a/scripts/build.py +++ b/scripts/build.py @@ -666,7 +666,7 @@ def package(build_output, pkg_name, version, nightly=False, iteration=1, static= else: if package_type == 'rpm' and release and '~' in package_version: package_version, suffix = package_version.split('~', 1) - # The ~ indicatees that this is a prerelease so we give it a leading 0. + # The ~ indicates that this is a prerelease so we give it a leading 0. package_iteration = "0.%s" % suffix fpm_command = "fpm {} --name {} -a {} -t {} --version {} --iteration {} -C {} -p {} ".format( fpm_common_args, diff --git a/testutil/accumulator.go b/testutil/accumulator.go index 5716d3518..6e5148ef7 100644 --- a/testutil/accumulator.go +++ b/testutil/accumulator.go @@ -717,7 +717,7 @@ func (a *Accumulator) BoolField(measurement string, field string) (bool, bool) { } // NopAccumulator is used for benchmarking to isolate the plugin from the internal -// telegraf accumulator machinary. +// telegraf accumulator machinery. type NopAccumulator struct{} func (n *NopAccumulator) AddFields(measurement string, fields map[string]interface{}, tags map[string]string, t ...time.Time) { From 68b936c9f0c12a4c2abfb27bc37be461d0df577d Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 14 May 2020 00:48:05 -0700 Subject: [PATCH 1762/1815] Update procstat pid_tag documentation --- plugins/inputs/procstat/README.md | 11 +++++++---- plugins/inputs/procstat/procstat.go | 9 ++++++--- 2 files changed, 13 insertions(+), 7 deletions(-) diff --git a/plugins/inputs/procstat/README.md b/plugins/inputs/procstat/README.md index 9ecc3d367..380321569 100644 --- a/plugins/inputs/procstat/README.md +++ b/plugins/inputs/procstat/README.md @@ -1,7 +1,7 @@ # Procstat Input Plugin The procstat plugin can be used to monitor the system resource usage of one or more processes. -The procstat_lookup metric displays the query information, +The procstat_lookup metric displays the query information, specifically the number of PIDs returned on a search Processes can be selected for monitoring using one of several methods: @@ -44,9 +44,12 @@ Processes can be selected for monitoring using one of several methods: ## When true add the full cmdline as a tag. # cmdline_tag = false - ## Add PID as a tag instead of a field; useful to differentiate between - ## processes whose tags are otherwise the same. Can create a large number - ## of series, use judiciously. + ## Add the PID as a tag instead of as a field. When collecting multiple + ## processes with otherwise matching tags this setting should be enabled to + ## ensure each process has a unique identity. + ## + ## Enabling this option may result in a large number of series, especially + ## when processes have a short lifetime. # pid_tag = false ## Method to use when finding process IDs. Can be one of 'pgrep', or diff --git a/plugins/inputs/procstat/procstat.go b/plugins/inputs/procstat/procstat.go index 8e56e4bf7..61e575370 100644 --- a/plugins/inputs/procstat/procstat.go +++ b/plugins/inputs/procstat/procstat.go @@ -69,9 +69,12 @@ var sampleConfig = ` ## When true add the full cmdline as a tag. # cmdline_tag = false - ## Add PID as a tag instead of a field; useful to differentiate between - ## processes whose tags are otherwise the same. Can create a large number - ## of series, use judiciously. + ## Add the PID as a tag instead of as a field. When collecting multiple + ## processes with otherwise matching tags this setting should be enabled to + ## ensure each process has a unique identity. + ## + ## Enabling this option may result in a large number of series, especially + ## when processes have a short lifetime. # pid_tag = false ## Method to use when finding process IDs. Can be one of 'pgrep', or From d2831d73ab4ab21175f5a7e0fd446b083c5ccd87 Mon Sep 17 00:00:00 2001 From: Samantha Wang <32681364+sjwang90@users.noreply.github.com> Date: Thu, 14 May 2020 08:19:19 -0700 Subject: [PATCH 1763/1815] Update docs for execd plugins (#7465) --- CONTRIBUTING.md | 5 +++++ README.md | 1 + 2 files changed, 6 insertions(+) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index a9a6eb008..d68d726dc 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -13,6 +13,11 @@ 1. Ensure you have added proper unit tests and documentation. 1. Open a new [pull request][]. +#### Contributing an External Plugin *(experimental)* +Input plugins written for internal Telegraf can be run as externally-compiled plugins through the [Execd Input Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/execd) without having to change the plugin code. + +Follow the guidelines of how to integrate your plugin with the [Execd Go Shim](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/execd/shim) to easily compile it as a separate app and run it from the inputs.execd plugin. + #### Security Vulnerability Reporting InfluxData takes security and our users' trust very seriously. If you believe you have found a security issue in any of our open source projects, please responsibly disclose it by contacting security@influxdata.com. More details about diff --git a/README.md b/README.md index d29ea7df7..fed4c06cd 100644 --- a/README.md +++ b/README.md @@ -28,6 +28,7 @@ There are many ways to contribute: - [Review code and feature proposals](https://github.com/influxdata/telegraf/pulls) - Answer questions and discuss here on github and on the [Community Site](https://community.influxdata.com/) - [Contribute plugins](CONTRIBUTING.md) +- [Contribute external plugins](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/execd/shim) *(experimental)* ## Minimum Requirements From f74824eecb9f593c4190ae0f07f7e4983f158c04 Mon Sep 17 00:00:00 2001 From: Max Greenwald Date: Thu, 14 May 2020 16:39:00 -0400 Subject: [PATCH 1764/1815] Fix documentation of percent_packet_loss field (#7510) --- plugins/inputs/ping/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/inputs/ping/README.md b/plugins/inputs/ping/README.md index 4376c7a19..91af1b2ae 100644 --- a/plugins/inputs/ping/README.md +++ b/plugins/inputs/ping/README.md @@ -145,7 +145,7 @@ sockets and the `ping_group_range` setting. - fields: - packets_transmitted (integer) - packets_received (integer) - - percent_packets_loss (float) + - percent_packet_loss (float) - ttl (integer, Not available on Windows) - average_response_ms (integer) - minimum_response_ms (integer) From bf1eb291f250d944930832d059020ba8b2be4f95 Mon Sep 17 00:00:00 2001 From: Josh Soref Date: Fri, 15 May 2020 18:43:32 -0400 Subject: [PATCH 1765/1815] Fix assorted spelling mistakes (#7507) --- internal/tls/config.go | 2 +- internal/tls/config_test.go | 2 +- metric/metric_test.go | 2 +- models/filter_test.go | 2 +- plugins/inputs/cloudwatch/cloudwatch.go | 4 ++-- .../inputs/eventhub_consumer/eventhub_consumer.go | 2 +- plugins/inputs/fireboard/fireboard.go | 2 +- plugins/inputs/github/github.go | 8 ++++---- .../inputs/kinesis_consumer/kinesis_consumer.go | 2 +- plugins/inputs/mailchimp/chimp_api.go | 4 ++-- plugins/inputs/mailchimp/mailchimp_test.go | 2 +- plugins/inputs/marklogic/marklogic.go | 2 +- plugins/inputs/minecraft/internal/rcon/rcon.go | 4 ++-- plugins/inputs/monit/monit_test.go | 4 ++-- plugins/inputs/mqtt_consumer/mqtt_consumer.go | 2 +- .../nginx_plus_api/nginx_plus_api_metrics_test.go | 2 +- plugins/inputs/openntpd/openntpd_test.go | 2 +- plugins/inputs/puppetagent/puppetagent.go | 2 +- plugins/inputs/riak/riak.go | 2 +- plugins/inputs/smart/smart.go | 6 +++--- plugins/inputs/smart/smart_test.go | 2 +- plugins/inputs/solr/solr.go | 8 ++++---- plugins/inputs/stackdriver/stackdriver.go | 2 +- plugins/inputs/tengine/tengine.go | 4 ++-- plugins/inputs/varnish/varnish_test.go | 2 +- plugins/inputs/vsphere/vsphere_test.go | 2 +- .../inputs/win_perf_counters/performance_query.go | 10 +++++----- .../win_perf_counters_integration_test.go | 6 +++--- .../win_perf_counters/win_perf_counters_test.go | 14 +++++++------- plugins/inputs/x509_cert/x509_cert.go | 2 +- .../cmd/stress_test_write/stress_test_write.go | 2 +- plugins/outputs/kinesis/kinesis.go | 2 +- plugins/outputs/mqtt/mqtt.go | 2 +- plugins/outputs/opentsdb/opentsdb.go | 10 +++++----- plugins/parsers/csv/parser_test.go | 2 +- plugins/parsers/wavefront/element.go | 4 ++-- plugins/parsers/wavefront/parser.go | 2 +- plugins/serializers/graphite/graphite.go | 6 +++--- scripts/build.py | 2 +- selfstat/selfstat.go | 14 +++++++------- selfstat/selfstat_test.go | 2 +- 41 files changed, 79 insertions(+), 79 deletions(-) diff --git a/internal/tls/config.go b/internal/tls/config.go index 185c92cd0..59fbc4952 100644 --- a/internal/tls/config.go +++ b/internal/tls/config.go @@ -130,7 +130,7 @@ func (c *ServerConfig) TLSConfig() (*tls.Config, error) { if tlsConfig.MinVersion != 0 && tlsConfig.MaxVersion != 0 && tlsConfig.MinVersion > tlsConfig.MaxVersion { return nil, fmt.Errorf( - "tls min version %q can't be greater then tls max version %q", tlsConfig.MinVersion, tlsConfig.MaxVersion) + "tls min version %q can't be greater than tls max version %q", tlsConfig.MinVersion, tlsConfig.MaxVersion) } return tlsConfig, nil diff --git a/internal/tls/config_test.go b/internal/tls/config_test.go index d7d75236e..66ccad70d 100644 --- a/internal/tls/config_test.go +++ b/internal/tls/config_test.go @@ -225,7 +225,7 @@ func TestServerConfig(t *testing.T) { expErr: true, }, { - name: "TLS Max Version less then TLS Min version", + name: "TLS Max Version less than TLS Min version", server: tls.ServerConfig{ TLSCert: pki.ServerCertPath(), TLSKey: pki.ServerKeyPath(), diff --git a/metric/metric_test.go b/metric/metric_test.go index 004fa5915..7033d3230 100644 --- a/metric/metric_test.go +++ b/metric/metric_test.go @@ -334,7 +334,7 @@ func TestValueType(t *testing.T) { assert.Equal(t, telegraf.Gauge, m.Type()) } -func TestCopyAggreate(t *testing.T) { +func TestCopyAggregate(t *testing.T) { m1 := baseMetric() m1.SetAggregate(true) m2 := m1.Copy() diff --git a/models/filter_test.go b/models/filter_test.go index 84cd1d397..d241244b9 100644 --- a/models/filter_test.go +++ b/models/filter_test.go @@ -97,7 +97,7 @@ func TestFilter_Empty(t *testing.T) { "foo_bar", "foo.bar", "foo-bar", - "supercalifradjulisticexpialidocious", + "supercalifragilisticexpialidocious", } for _, measurement := range measurements { diff --git a/plugins/inputs/cloudwatch/cloudwatch.go b/plugins/inputs/cloudwatch/cloudwatch.go index cb0e10ac0..9a728d989 100644 --- a/plugins/inputs/cloudwatch/cloudwatch.go +++ b/plugins/inputs/cloudwatch/cloudwatch.go @@ -295,7 +295,7 @@ func getFilteredMetrics(c *CloudWatch) ([]filteredMetric, error) { if c.Metrics != nil { for _, m := range c.Metrics { metrics := []*cloudwatch.Metric{} - if !hasWilcard(m.Dimensions) { + if !hasWildcard(m.Dimensions) { dimensions := make([]*cloudwatch.Dimension, len(m.Dimensions)) for k, d := range m.Dimensions { dimensions[k] = &cloudwatch.Dimension{ @@ -603,7 +603,7 @@ func (f *metricCache) isValid() bool { return f.metrics != nil && time.Since(f.built) < f.ttl } -func hasWilcard(dimensions []*Dimension) bool { +func hasWildcard(dimensions []*Dimension) bool { for _, d := range dimensions { if d.Value == "" || d.Value == "*" { return true diff --git a/plugins/inputs/eventhub_consumer/eventhub_consumer.go b/plugins/inputs/eventhub_consumer/eventhub_consumer.go index da8a6e5f7..72cc4c25f 100644 --- a/plugins/inputs/eventhub_consumer/eventhub_consumer.go +++ b/plugins/inputs/eventhub_consumer/eventhub_consumer.go @@ -292,7 +292,7 @@ func (e *EventHub) onDelivery( delete(groups, track.ID()) if !ok { // The metrics should always be found, this message indicates a programming error. - e.Log.Errorf("Could not find delievery: %d", track.ID()) + e.Log.Errorf("Could not find delivery: %d", track.ID()) return true } diff --git a/plugins/inputs/fireboard/fireboard.go b/plugins/inputs/fireboard/fireboard.go index 2e9c7b025..a92930aae 100644 --- a/plugins/inputs/fireboard/fireboard.go +++ b/plugins/inputs/fireboard/fireboard.go @@ -106,7 +106,7 @@ func (r *Fireboard) Gather(acc telegraf.Accumulator) error { if resp.StatusCode == http.StatusForbidden { return fmt.Errorf("fireboard server responded with %d [Forbidden], verify your authToken", resp.StatusCode) } - return fmt.Errorf("fireboard responded with unexepcted status code %d", resp.StatusCode) + return fmt.Errorf("fireboard responded with unexpected status code %d", resp.StatusCode) } // Decode the response JSON into a new stats struct var stats []fireboardStats diff --git a/plugins/inputs/github/github.go b/plugins/inputs/github/github.go index 2f31a7268..3e5597707 100644 --- a/plugins/inputs/github/github.go +++ b/plugins/inputs/github/github.go @@ -24,7 +24,7 @@ type GitHub struct { HTTPTimeout internal.Duration `toml:"http_timeout"` githubClient *github.Client - obfusticatedToken string + obfuscatedToken string RateLimit selfstat.Stat RateLimitErrors selfstat.Stat @@ -67,7 +67,7 @@ func (g *GitHub) createGitHubClient(ctx context.Context) (*github.Client, error) Timeout: g.HTTPTimeout.Duration, } - g.obfusticatedToken = "Unauthenticated" + g.obfuscatedToken = "Unauthenticated" if g.AccessToken != "" { tokenSource := oauth2.StaticTokenSource( @@ -76,7 +76,7 @@ func (g *GitHub) createGitHubClient(ctx context.Context) (*github.Client, error) oauthClient := oauth2.NewClient(ctx, tokenSource) ctx = context.WithValue(ctx, oauth2.HTTPClient, oauthClient) - g.obfusticatedToken = g.AccessToken[0:4] + "..." + g.AccessToken[len(g.AccessToken)-3:] + g.obfuscatedToken = g.AccessToken[0:4] + "..." + g.AccessToken[len(g.AccessToken)-3:] return g.newGithubClient(oauthClient) } @@ -105,7 +105,7 @@ func (g *GitHub) Gather(acc telegraf.Accumulator) error { g.githubClient = githubClient tokenTags := map[string]string{ - "access_token": g.obfusticatedToken, + "access_token": g.obfuscatedToken, } g.RateLimitErrors = selfstat.Register("github", "rate_limit_blocks", tokenTags) diff --git a/plugins/inputs/kinesis_consumer/kinesis_consumer.go b/plugins/inputs/kinesis_consumer/kinesis_consumer.go index b524cf9e4..6a3b1c830 100644 --- a/plugins/inputs/kinesis_consumer/kinesis_consumer.go +++ b/plugins/inputs/kinesis_consumer/kinesis_consumer.go @@ -221,7 +221,7 @@ func (k *KinesisConsumer) connect(ac telegraf.Accumulator) error { }) if err != nil { k.cancel() - k.Log.Errorf("Scan encounterred an error: %s", err.Error()) + k.Log.Errorf("Scan encountered an error: %s", err.Error()) k.cons = nil } }() diff --git a/plugins/inputs/mailchimp/chimp_api.go b/plugins/inputs/mailchimp/chimp_api.go index 066ffb4e7..a40614b1d 100644 --- a/plugins/inputs/mailchimp/chimp_api.go +++ b/plugins/inputs/mailchimp/chimp_api.go @@ -178,7 +178,7 @@ type Report struct { Unsubscribed int `json:"unsubscribed"` SendTime string `json:"send_time"` - TimeSeries []TimeSerie + TimeSeries []TimeSeries Bounces Bounces `json:"bounces"` Forwards Forwards `json:"forwards"` Opens Opens `json:"opens"` @@ -237,7 +237,7 @@ type ListStats struct { ClickRate float64 `json:"click_rate"` } -type TimeSerie struct { +type TimeSeries struct { TimeStamp string `json:"timestamp"` EmailsSent int `json:"emails_sent"` UniqueOpens int `json:"unique_opens"` diff --git a/plugins/inputs/mailchimp/mailchimp_test.go b/plugins/inputs/mailchimp/mailchimp_test.go index ed6898e60..0c4dab56d 100644 --- a/plugins/inputs/mailchimp/mailchimp_test.go +++ b/plugins/inputs/mailchimp/mailchimp_test.go @@ -140,7 +140,7 @@ func TestMailChimpGatherReport(t *testing.T) { } -func TestMailChimpGatherErroror(t *testing.T) { +func TestMailChimpGatherError(t *testing.T) { ts := httptest.NewServer( http.HandlerFunc( func(w http.ResponseWriter, r *http.Request) { diff --git a/plugins/inputs/marklogic/marklogic.go b/plugins/inputs/marklogic/marklogic.go index b62d017de..699541d14 100644 --- a/plugins/inputs/marklogic/marklogic.go +++ b/plugins/inputs/marklogic/marklogic.go @@ -84,7 +84,7 @@ type MlHost struct { // Description of plugin returned func (c *Marklogic) Description() string { - return "Retrives information on a specific host in a MarkLogic Cluster" + return "Retrieves information on a specific host in a MarkLogic Cluster" } var sampleConfig = ` diff --git a/plugins/inputs/minecraft/internal/rcon/rcon.go b/plugins/inputs/minecraft/internal/rcon/rcon.go index 345583a06..f9e49e6e6 100644 --- a/plugins/inputs/minecraft/internal/rcon/rcon.go +++ b/plugins/inputs/minecraft/internal/rcon/rcon.go @@ -32,8 +32,8 @@ const ( // Rcon package errors. var ( - ErrInvalidWrite = errors.New("Failed to write the payload corretly to remote connection.") - ErrInvalidRead = errors.New("Failed to read the response corretly from remote connection.") + ErrInvalidWrite = errors.New("Failed to write the payload correctly to remote connection.") + ErrInvalidRead = errors.New("Failed to read the response correctly from remote connection.") ErrInvalidChallenge = errors.New("Server failed to mirror request challenge.") ErrUnauthorizedRequest = errors.New("Client not authorized to remote server.") ErrFailedAuthorization = errors.New("Failed to authorize to the remote server.") diff --git a/plugins/inputs/monit/monit_test.go b/plugins/inputs/monit/monit_test.go index 1f7e671f4..1d95b45a5 100644 --- a/plugins/inputs/monit/monit_test.go +++ b/plugins/inputs/monit/monit_test.go @@ -590,7 +590,7 @@ func TestConnection(t *testing.T) { } } -func TestInvalidUsernameorPassword(t *testing.T) { +func TestInvalidUsernameOrPassword(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { @@ -624,7 +624,7 @@ func TestInvalidUsernameorPassword(t *testing.T) { assert.EqualError(t, err, "received status code 401 (Unauthorized), expected 200") } -func TestNoUsernameorPasswordConfiguration(t *testing.T) { +func TestNoUsernameOrPasswordConfiguration(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { diff --git a/plugins/inputs/mqtt_consumer/mqtt_consumer.go b/plugins/inputs/mqtt_consumer/mqtt_consumer.go index 9e0ba371f..2f07b3aff 100644 --- a/plugins/inputs/mqtt_consumer/mqtt_consumer.go +++ b/plugins/inputs/mqtt_consumer/mqtt_consumer.go @@ -341,7 +341,7 @@ func (m *MQTTConsumer) createOpts() (*mqtt.ClientOptions, error) { } if len(m.Servers) == 0 { - return opts, fmt.Errorf("could not get host infomations") + return opts, fmt.Errorf("could not get host informations") } for _, server := range m.Servers { diff --git a/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics_test.go b/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics_test.go index 584816fe7..f309886cf 100644 --- a/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics_test.go +++ b/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics_test.go @@ -715,7 +715,7 @@ func TestGatherHttpLocationZonesMetrics(t *testing.T) { }) } -func TestHatherHttpUpstreamsMetrics(t *testing.T) { +func TestGatherHttpUpstreamsMetrics(t *testing.T) { ts, n := prepareEndpoint(t, httpUpstreamsPath, defaultApiVersion, httpUpstreamsPayload) defer ts.Close() diff --git a/plugins/inputs/openntpd/openntpd_test.go b/plugins/inputs/openntpd/openntpd_test.go index 0c2d20142..d629949a5 100644 --- a/plugins/inputs/openntpd/openntpd_test.go +++ b/plugins/inputs/openntpd/openntpd_test.go @@ -81,7 +81,7 @@ func TestParseSimpleOutputwithStatePrefix(t *testing.T) { acc.AssertContainsTaggedFields(t, "openntpd", firstpeerfields, firstpeertags) } -func TestParseSimpleOutputInavlidPeer(t *testing.T) { +func TestParseSimpleOutputInvalidPeer(t *testing.T) { acc := &testutil.Accumulator{} v := &Openntpd{ run: OpenntpdCTL(simpleOutputInvalidPeer, TestTimeout, false), diff --git a/plugins/inputs/puppetagent/puppetagent.go b/plugins/inputs/puppetagent/puppetagent.go index c8a265bb8..1d0e30aa8 100644 --- a/plugins/inputs/puppetagent/puppetagent.go +++ b/plugins/inputs/puppetagent/puppetagent.go @@ -79,7 +79,7 @@ func (pa *PuppetAgent) SampleConfig() string { // Description returns description of PuppetAgent plugin func (pa *PuppetAgent) Description() string { - return `Reads last_run_summary.yaml file and converts to measurments` + return `Reads last_run_summary.yaml file and converts to measurements` } // Gather reads stats from all configured servers accumulates stats diff --git a/plugins/inputs/riak/riak.go b/plugins/inputs/riak/riak.go index 9ddbbfa65..19f622289 100644 --- a/plugins/inputs/riak/riak.go +++ b/plugins/inputs/riak/riak.go @@ -127,7 +127,7 @@ func (r *Riak) gatherServer(s string, acc telegraf.Accumulator) error { // Successful responses will always return status code 200 if resp.StatusCode != http.StatusOK { - return fmt.Errorf("riak responded with unexepcted status code %d", resp.StatusCode) + return fmt.Errorf("riak responded with unexpected status code %d", resp.StatusCode) } // Decode the response JSON into a new stats struct diff --git a/plugins/inputs/smart/smart.go b/plugins/inputs/smart/smart.go index 6c83e9890..b34174a33 100644 --- a/plugins/inputs/smart/smart.go +++ b/plugins/inputs/smart/smart.go @@ -105,7 +105,7 @@ var ( "Available Spare": { Name: "Available_Spare", Parse: func(fields, deviceFields map[string]interface{}, str string) error { - return parseCommaSeperatedInt(fields, deviceFields, strings.TrimSuffix(str, "%")) + return parseCommaSeparatedInt(fields, deviceFields, strings.TrimSuffix(str, "%")) }, }, } @@ -360,7 +360,7 @@ func gatherDisk(acc telegraf.Accumulator, timeout internal.Duration, usesudo, co tags["id"] = attr.ID } - parse := parseCommaSeperatedInt + parse := parseCommaSeparatedInt if attr.Parse != nil { parse = attr.Parse } @@ -421,7 +421,7 @@ func parseInt(str string) int64 { return 0 } -func parseCommaSeperatedInt(fields, _ map[string]interface{}, str string) error { +func parseCommaSeparatedInt(fields, _ map[string]interface{}, str string) error { i, err := strconv.ParseInt(strings.Replace(str, ",", "", -1), 10, 64) if err != nil { return err diff --git a/plugins/inputs/smart/smart_test.go b/plugins/inputs/smart/smart_test.go index 615ea9ba6..3ea6e309f 100644 --- a/plugins/inputs/smart/smart_test.go +++ b/plugins/inputs/smart/smart_test.go @@ -714,7 +714,7 @@ Transport protocol: SAS (SPL-3) Local Time is: Wed Apr 17 15:01:28 2019 PDT SMART support is: Available - device has SMART capability. SMART support is: Enabled -Temp$rature Warning: Disabled or Not Supported +Temperature Warning: Disabled or Not Supported === START OF READ SMART DATA SECTION === SMART Health Status: OK diff --git a/plugins/inputs/solr/solr.go b/plugins/inputs/solr/solr.go index 4629e0246..ce44fa086 100644 --- a/plugins/inputs/solr/solr.go +++ b/plugins/inputs/solr/solr.go @@ -226,7 +226,7 @@ func addAdminCoresStatusToAcc(acc telegraf.Accumulator, adminCoreStatus *AdminCo func addCoreMetricsToAcc(acc telegraf.Accumulator, core string, mBeansData *MBeansData, time time.Time) error { var coreMetrics map[string]Core if len(mBeansData.SolrMbeans) < 2 { - return fmt.Errorf("no core metric data to unmarshall") + return fmt.Errorf("no core metric data to unmarshal") } if err := json.Unmarshal(mBeansData.SolrMbeans[1], &coreMetrics); err != nil { return err @@ -257,7 +257,7 @@ func addQueryHandlerMetricsToAcc(acc telegraf.Accumulator, core string, mBeansDa var queryMetrics map[string]QueryHandler if len(mBeansData.SolrMbeans) < 4 { - return fmt.Errorf("no query handler metric data to unmarshall") + return fmt.Errorf("no query handler metric data to unmarshal") } if err := json.Unmarshal(mBeansData.SolrMbeans[3], &queryMetrics); err != nil { @@ -332,7 +332,7 @@ func addUpdateHandlerMetricsToAcc(acc telegraf.Accumulator, core string, mBeansD var updateMetrics map[string]UpdateHandler if len(mBeansData.SolrMbeans) < 6 { - return fmt.Errorf("no update handler metric data to unmarshall") + return fmt.Errorf("no update handler metric data to unmarshal") } if err := json.Unmarshal(mBeansData.SolrMbeans[5], &updateMetrics); err != nil { return err @@ -410,7 +410,7 @@ func getInt(unk interface{}) int64 { // Add cache metrics section to accumulator func addCacheMetricsToAcc(acc telegraf.Accumulator, core string, mBeansData *MBeansData, time time.Time) error { if len(mBeansData.SolrMbeans) < 8 { - return fmt.Errorf("no cache metric data to unmarshall") + return fmt.Errorf("no cache metric data to unmarshal") } var cacheMetrics map[string]Cache if err := json.Unmarshal(mBeansData.SolrMbeans[7], &cacheMetrics); err != nil { diff --git a/plugins/inputs/stackdriver/stackdriver.go b/plugins/inputs/stackdriver/stackdriver.go index dc692a480..431076743 100644 --- a/plugins/inputs/stackdriver/stackdriver.go +++ b/plugins/inputs/stackdriver/stackdriver.go @@ -218,7 +218,7 @@ func (c *stackdriverMetricClient) ListMetricDescriptors( mdDesc, mdErr := mdResp.Next() if mdErr != nil { if mdErr != iterator.Done { - c.log.Errorf("Failed iterating metric desciptor responses: %q: %v", req.String(), mdErr) + c.log.Errorf("Failed iterating metric descriptor responses: %q: %v", req.String(), mdErr) } break } diff --git a/plugins/inputs/tengine/tengine.go b/plugins/inputs/tengine/tengine.go index 1ee63740f..245e0a3a2 100644 --- a/plugins/inputs/tengine/tengine.go +++ b/plugins/inputs/tengine/tengine.go @@ -101,7 +101,7 @@ func (n *Tengine) createHttpClient() (*http.Client, error) { return client, nil } -type TengineSatus struct { +type TengineStatus struct { host string bytes_in uint64 bytes_out uint64 @@ -135,7 +135,7 @@ type TengineSatus struct { } func (n *Tengine) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error { - var tenginestatus TengineSatus + var tenginestatus TengineStatus resp, err := n.client.Get(addr.String()) if err != nil { return fmt.Errorf("error making HTTP request to %s: %s", addr.String(), err) diff --git a/plugins/inputs/varnish/varnish_test.go b/plugins/inputs/varnish/varnish_test.go index e8ca94e3c..96e5c3556 100644 --- a/plugins/inputs/varnish/varnish_test.go +++ b/plugins/inputs/varnish/varnish_test.go @@ -192,7 +192,7 @@ MAIN.s_req 0 0.00 Total requests seen MAIN.s_pipe 0 0.00 Total pipe sessions seen MAIN.s_pass 0 0.00 Total pass-ed requests seen MAIN.s_fetch 0 0.00 Total backend fetches initiated -MAIN.s_synth 0 0.00 Total synthethic responses made +MAIN.s_synth 0 0.00 Total synthetic responses made MAIN.s_req_hdrbytes 0 0.00 Request header bytes MAIN.s_req_bodybytes 0 0.00 Request body bytes MAIN.s_resp_hdrbytes 0 0.00 Response header bytes diff --git a/plugins/inputs/vsphere/vsphere_test.go b/plugins/inputs/vsphere/vsphere_test.go index fe0dfe41e..3c0a31312 100644 --- a/plugins/inputs/vsphere/vsphere_test.go +++ b/plugins/inputs/vsphere/vsphere_test.go @@ -135,7 +135,7 @@ func defaultVSphere() *VSphere { VMInclude: []string{"/**"}, DatastoreMetricInclude: []string{ "disk.used.*", - "disk.provsioned.*"}, + "disk.provisioned.*"}, DatastoreMetricExclude: nil, DatastoreInclude: []string{"/**"}, DatacenterMetricInclude: nil, diff --git a/plugins/inputs/win_perf_counters/performance_query.go b/plugins/inputs/win_perf_counters/performance_query.go index ce247a495..a59f96b84 100644 --- a/plugins/inputs/win_perf_counters/performance_query.go +++ b/plugins/inputs/win_perf_counters/performance_query.go @@ -74,7 +74,7 @@ func (m *PerformanceQueryImpl) Open() error { // Close closes the counterPath, releases associated counter handles and frees resources func (m *PerformanceQueryImpl) Close() error { if m.query == 0 { - return errors.New("uninitialised query") + return errors.New("uninitialized query") } if ret := PdhCloseQuery(m.query); ret != ERROR_SUCCESS { @@ -87,7 +87,7 @@ func (m *PerformanceQueryImpl) Close() error { func (m *PerformanceQueryImpl) AddCounterToQuery(counterPath string) (PDH_HCOUNTER, error) { var counterHandle PDH_HCOUNTER if m.query == 0 { - return 0, errors.New("uninitialised query") + return 0, errors.New("uninitialized query") } if ret := PdhAddCounter(m.query, counterPath, 0, &counterHandle); ret != ERROR_SUCCESS { @@ -99,7 +99,7 @@ func (m *PerformanceQueryImpl) AddCounterToQuery(counterPath string) (PDH_HCOUNT func (m *PerformanceQueryImpl) AddEnglishCounterToQuery(counterPath string) (PDH_HCOUNTER, error) { var counterHandle PDH_HCOUNTER if m.query == 0 { - return 0, errors.New("uninitialised query") + return 0, errors.New("uninitialized query") } if ret := PdhAddEnglishCounter(m.query, counterPath, 0, &counterHandle); ret != ERROR_SUCCESS { return 0, NewPdhError(ret) @@ -184,7 +184,7 @@ func (m *PerformanceQueryImpl) GetFormattedCounterArrayDouble(hCounter PDH_HCOUN func (m *PerformanceQueryImpl) CollectData() error { var ret uint32 if m.query == 0 { - return errors.New("uninitialised query") + return errors.New("uninitialized query") } if ret = PdhCollectQueryData(m.query); ret != ERROR_SUCCESS { @@ -195,7 +195,7 @@ func (m *PerformanceQueryImpl) CollectData() error { func (m *PerformanceQueryImpl) CollectDataWithTime() (time.Time, error) { if m.query == 0 { - return time.Now(), errors.New("uninitialised query") + return time.Now(), errors.New("uninitialized query") } ret, mtime := PdhCollectQueryDataWithTime(m.query) if ret != ERROR_SUCCESS { diff --git a/plugins/inputs/win_perf_counters/win_perf_counters_integration_test.go b/plugins/inputs/win_perf_counters/win_perf_counters_integration_test.go index 546dfa143..822943949 100644 --- a/plugins/inputs/win_perf_counters/win_perf_counters_integration_test.go +++ b/plugins/inputs/win_perf_counters/win_perf_counters_integration_test.go @@ -27,15 +27,15 @@ func TestWinPerformanceQueryImpl(t *testing.T) { _, err = query.AddCounterToQuery("") require.Error(t, err, "uninitialized query must return errors") - assert.True(t, strings.Contains(err.Error(), "uninitialised")) + assert.True(t, strings.Contains(err.Error(), "uninitialized")) _, err = query.AddEnglishCounterToQuery("") require.Error(t, err, "uninitialized query must return errors") - assert.True(t, strings.Contains(err.Error(), "uninitialised")) + assert.True(t, strings.Contains(err.Error(), "uninitialized")) err = query.CollectData() require.Error(t, err, "uninitialized query must return errors") - assert.True(t, strings.Contains(err.Error(), "uninitialised")) + assert.True(t, strings.Contains(err.Error(), "uninitialized")) err = query.Open() require.NoError(t, err) diff --git a/plugins/inputs/win_perf_counters/win_perf_counters_test.go b/plugins/inputs/win_perf_counters/win_perf_counters_test.go index 13eebdc95..a11f0ace8 100644 --- a/plugins/inputs/win_perf_counters/win_perf_counters_test.go +++ b/plugins/inputs/win_perf_counters/win_perf_counters_test.go @@ -50,7 +50,7 @@ func (m *FakePerformanceQuery) Open() error { func (m *FakePerformanceQuery) Close() error { if !m.openCalled { - return errors.New("CloSe: uninitialised query") + return errors.New("CloSe: uninitialized query") } m.openCalled = false return nil @@ -58,7 +58,7 @@ func (m *FakePerformanceQuery) Close() error { func (m *FakePerformanceQuery) AddCounterToQuery(counterPath string) (PDH_HCOUNTER, error) { if !m.openCalled { - return 0, errors.New("AddCounterToQuery: uninitialised query") + return 0, errors.New("AddCounterToQuery: uninitialized query") } if c, ok := m.counters[counterPath]; ok { return c.handle, nil @@ -69,7 +69,7 @@ func (m *FakePerformanceQuery) AddCounterToQuery(counterPath string) (PDH_HCOUNT func (m *FakePerformanceQuery) AddEnglishCounterToQuery(counterPath string) (PDH_HCOUNTER, error) { if !m.openCalled { - return 0, errors.New("AddEnglishCounterToQuery: uninitialised query") + return 0, errors.New("AddEnglishCounterToQuery: uninitialized query") } if c, ok := m.counters[counterPath]; ok { return c.handle, nil @@ -97,7 +97,7 @@ func (m *FakePerformanceQuery) ExpandWildCardPath(counterPath string) ([]string, func (m *FakePerformanceQuery) GetFormattedCounterValueDouble(counterHandle PDH_HCOUNTER) (float64, error) { if !m.openCalled { - return 0, errors.New("GetFormattedCounterValueDouble: uninitialised query") + return 0, errors.New("GetFormattedCounterValueDouble: uninitialized query") } for _, counter := range m.counters { if counter.handle == counterHandle { @@ -129,7 +129,7 @@ func (m *FakePerformanceQuery) findCounterByHandle(counterHandle PDH_HCOUNTER) * func (m *FakePerformanceQuery) GetFormattedCounterArrayDouble(hCounter PDH_HCOUNTER) ([]CounterValue, error) { if !m.openCalled { - return nil, errors.New("GetFormattedCounterArrayDouble: uninitialised query") + return nil, errors.New("GetFormattedCounterArrayDouble: uninitialized query") } for _, c := range m.counters { if c.handle == hCounter { @@ -157,14 +157,14 @@ func (m *FakePerformanceQuery) GetFormattedCounterArrayDouble(hCounter PDH_HCOUN func (m *FakePerformanceQuery) CollectData() error { if !m.openCalled { - return errors.New("CollectData: uninitialised query") + return errors.New("CollectData: uninitialized query") } return nil } func (m *FakePerformanceQuery) CollectDataWithTime() (time.Time, error) { if !m.openCalled { - return time.Now(), errors.New("CollectData: uninitialised query") + return time.Now(), errors.New("CollectData: uninitialized query") } return MetricTime, nil } diff --git a/plugins/inputs/x509_cert/x509_cert.go b/plugins/inputs/x509_cert/x509_cert.go index 49f5fc88e..89744351f 100644 --- a/plugins/inputs/x509_cert/x509_cert.go +++ b/plugins/inputs/x509_cert/x509_cert.go @@ -127,7 +127,7 @@ func (c *X509Cert) getCert(u *url.URL, timeout time.Duration) ([]*x509.Certifica } return certs, nil default: - return nil, fmt.Errorf("unsuported scheme '%s' in location %s", u.Scheme, u.String()) + return nil, fmt.Errorf("unsupported scheme '%s' in location %s", u.Scheme, u.String()) } } diff --git a/plugins/inputs/zipkin/cmd/stress_test_write/stress_test_write.go b/plugins/inputs/zipkin/cmd/stress_test_write/stress_test_write.go index ddc0d4918..ea25b49a0 100644 --- a/plugins/inputs/zipkin/cmd/stress_test_write/stress_test_write.go +++ b/plugins/inputs/zipkin/cmd/stress_test_write/stress_test_write.go @@ -55,7 +55,7 @@ func main() { zipkin.HTTPBatchInterval(time.Duration(BatchTimeInterval)*time.Second)) defer collector.Close() if err != nil { - log.Fatalf("Error intializing zipkin http collector: %v\n", err) + log.Fatalf("Error initializing zipkin http collector: %v\n", err) } tracer, err := zipkin.NewTracer( diff --git a/plugins/outputs/kinesis/kinesis.go b/plugins/outputs/kinesis/kinesis.go index 1aa840974..88620fa70 100644 --- a/plugins/outputs/kinesis/kinesis.go +++ b/plugins/outputs/kinesis/kinesis.go @@ -117,7 +117,7 @@ func (k *KinesisOutput) Description() string { func (k *KinesisOutput) Connect() error { if k.Partition == nil { - log.Print("E! kinesis : Deprecated paritionkey configuration in use, please consider using outputs.kinesis.partition") + log.Print("E! kinesis : Deprecated partitionkey configuration in use, please consider using outputs.kinesis.partition") } // We attempt first to create a session to Kinesis using an IAMS role, if that fails it will fall through to using diff --git a/plugins/outputs/mqtt/mqtt.go b/plugins/outputs/mqtt/mqtt.go index f6fba5501..13785cd68 100644 --- a/plugins/outputs/mqtt/mqtt.go +++ b/plugins/outputs/mqtt/mqtt.go @@ -223,7 +223,7 @@ func (m *MQTT) createOpts() (*paho.ClientOptions, error) { } if len(m.Servers) == 0 { - return opts, fmt.Errorf("could not get host infomations") + return opts, fmt.Errorf("could not get host informations") } for _, host := range m.Servers { server := fmt.Sprintf("%s://%s", scheme, host) diff --git a/plugins/outputs/opentsdb/opentsdb.go b/plugins/outputs/opentsdb/opentsdb.go index 1dfd2ce38..766c7a304 100644 --- a/plugins/outputs/opentsdb/opentsdb.go +++ b/plugins/outputs/opentsdb/opentsdb.go @@ -16,14 +16,14 @@ import ( var ( allowedChars = regexp.MustCompile(`[^a-zA-Z0-9-_./\p{L}]`) - hypenChars = strings.NewReplacer( + hyphenChars = strings.NewReplacer( "@", "-", "*", "-", `%`, "-", "#", "-", "$", "-") defaultHttpPath = "/api/put" - defaultSeperator = "_" + defaultSeparator = "_" ) type OpenTSDB struct { @@ -261,8 +261,8 @@ func (o *OpenTSDB) Close() error { } func sanitize(value string) string { - // Apply special hypenation rules to preserve backwards compatibility - value = hypenChars.Replace(value) + // Apply special hyphenation rules to preserve backwards compatibility + value = hyphenChars.Replace(value) // Replace any remaining illegal chars return allowedChars.ReplaceAllLiteralString(value, "_") } @@ -271,7 +271,7 @@ func init() { outputs.Add("opentsdb", func() telegraf.Output { return &OpenTSDB{ HttpPath: defaultHttpPath, - Separator: defaultSeperator, + Separator: defaultSeparator, } }) } diff --git a/plugins/parsers/csv/parser_test.go b/plugins/parsers/csv/parser_test.go index 1b6fb8f3b..e39a5df70 100644 --- a/plugins/parsers/csv/parser_test.go +++ b/plugins/parsers/csv/parser_test.go @@ -243,7 +243,7 @@ func TestTrimSpace(t *testing.T) { require.Equal(t, expectedFields, metrics[0].Fields()) } -func TestTrimSpaceDelimetedBySpace(t *testing.T) { +func TestTrimSpaceDelimitedBySpace(t *testing.T) { p := Parser{ Delimiter: " ", HeaderRowCount: 1, diff --git a/plugins/parsers/wavefront/element.go b/plugins/parsers/wavefront/element.go index 56a8d870b..5ed37645c 100644 --- a/plugins/parsers/wavefront/element.go +++ b/plugins/parsers/wavefront/element.go @@ -28,7 +28,7 @@ type WhiteSpaceParser struct { type TagParser struct{} type LoopedParser struct { wrappedParser ElementParser - wsPaser *WhiteSpaceParser + wsParser *WhiteSpaceParser } type LiteralParser struct { literal string @@ -136,7 +136,7 @@ func (ep *LoopedParser) parse(p *PointParser, pt *Point) error { if err != nil { return err } - err = ep.wsPaser.parse(p, pt) + err = ep.wsParser.parse(p, pt) if err == ErrEOF { break } diff --git a/plugins/parsers/wavefront/parser.go b/plugins/parsers/wavefront/parser.go index 62fe8f5ef..7ae455d47 100644 --- a/plugins/parsers/wavefront/parser.go +++ b/plugins/parsers/wavefront/parser.go @@ -47,7 +47,7 @@ func NewWavefrontElements() []ElementParser { var elements []ElementParser wsParser := WhiteSpaceParser{} wsParserNextOpt := WhiteSpaceParser{nextOptional: true} - repeatParser := LoopedParser{wrappedParser: &TagParser{}, wsPaser: &wsParser} + repeatParser := LoopedParser{wrappedParser: &TagParser{}, wsParser: &wsParser} elements = append(elements, &NameParser{}, &wsParser, &ValueParser{}, &wsParserNextOpt, &TimestampParser{optional: true}, &wsParserNextOpt, &repeatParser) return elements diff --git a/plugins/serializers/graphite/graphite.go b/plugins/serializers/graphite/graphite.go index 590f80b45..2f6cd8da5 100644 --- a/plugins/serializers/graphite/graphite.go +++ b/plugins/serializers/graphite/graphite.go @@ -17,7 +17,7 @@ const DEFAULT_TEMPLATE = "host.tags.measurement.field" var ( allowedChars = regexp.MustCompile(`[^a-zA-Z0-9-:._=\p{L}]`) - hypenChars = strings.NewReplacer( + hyphenChars = strings.NewReplacer( "/", "-", "@", "-", "*", "-", @@ -308,8 +308,8 @@ func buildTags(tags map[string]string) string { } func sanitize(value string) string { - // Apply special hypenation rules to preserve backwards compatibility - value = hypenChars.Replace(value) + // Apply special hyphenation rules to preserve backwards compatibility + value = hyphenChars.Replace(value) // Apply rule to drop some chars to preserve backwards compatibility value = dropChars.Replace(value) // Replace any remaining illegal chars diff --git a/scripts/build.py b/scripts/build.py index e30f44258..b309f5095 100755 --- a/scripts/build.py +++ b/scripts/build.py @@ -892,7 +892,7 @@ if __name__ == '__main__': help='Send build stats to InfluxDB using provided database name') parser.add_argument('--nightly', action='store_true', - help='Mark build output as nightly build (will incremement the minor version)') + help='Mark build output as nightly build (will increment the minor version)') parser.add_argument('--update', action='store_true', help='Update build dependencies prior to building') diff --git a/selfstat/selfstat.go b/selfstat/selfstat.go index 821db1c94..a60ee099e 100644 --- a/selfstat/selfstat.go +++ b/selfstat/selfstat.go @@ -17,7 +17,7 @@ import ( ) var ( - registry *rgstry + registry *Registry ) // Stat is an interface for dealing with telegraf statistics collected @@ -109,12 +109,12 @@ func Metrics() []telegraf.Metric { return metrics } -type rgstry struct { +type Registry struct { stats map[uint64]map[string]Stat mu sync.Mutex } -func (r *rgstry) register(measurement, field string, tags map[string]string) Stat { +func (r *Registry) register(measurement, field string, tags map[string]string) Stat { r.mu.Lock() defer r.mu.Unlock() @@ -137,7 +137,7 @@ func (r *rgstry) register(measurement, field string, tags map[string]string) Sta return s } -func (r *rgstry) registerTiming(measurement, field string, tags map[string]string) Stat { +func (r *Registry) registerTiming(measurement, field string, tags map[string]string) Stat { r.mu.Lock() defer r.mu.Unlock() @@ -160,7 +160,7 @@ func (r *rgstry) registerTiming(measurement, field string, tags map[string]strin return s } -func (r *rgstry) get(key uint64, field string) (Stat, bool) { +func (r *Registry) get(key uint64, field string) (Stat, bool) { if _, ok := r.stats[key]; !ok { return nil, false } @@ -172,7 +172,7 @@ func (r *rgstry) get(key uint64, field string) (Stat, bool) { return nil, false } -func (r *rgstry) set(key uint64, s Stat) { +func (r *Registry) set(key uint64, s Stat) { if _, ok := r.stats[key]; !ok { r.stats[key] = make(map[string]Stat) } @@ -201,7 +201,7 @@ func key(measurement string, tags map[string]string) uint64 { } func init() { - registry = &rgstry{ + registry = &Registry{ stats: make(map[uint64]map[string]Stat), } } diff --git a/selfstat/selfstat_test.go b/selfstat/selfstat_test.go index 10ce32728..3d590bb96 100644 --- a/selfstat/selfstat_test.go +++ b/selfstat/selfstat_test.go @@ -18,7 +18,7 @@ var ( // testCleanup resets the global registry for test cleanup & unlocks the test lock func testCleanup() { - registry = &rgstry{ + registry = &Registry{ stats: make(map[uint64]map[string]Stat), } testLock.Unlock() From 443ac6df23578b183e40abbe124a67dca45e4cc3 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 19 May 2020 08:20:29 -0700 Subject: [PATCH 1766/1815] Fix negative value parsing in impi_sensor input (#7541) --- plugins/inputs/ipmi_sensor/ipmi.go | 2 +- plugins/inputs/ipmi_sensor/ipmi_test.go | 78 +++++++++++++++++-------- 2 files changed, 55 insertions(+), 25 deletions(-) diff --git a/plugins/inputs/ipmi_sensor/ipmi.go b/plugins/inputs/ipmi_sensor/ipmi.go index 9ac842b89..fb53e1bc7 100644 --- a/plugins/inputs/ipmi_sensor/ipmi.go +++ b/plugins/inputs/ipmi_sensor/ipmi.go @@ -21,7 +21,7 @@ var ( execCommand = exec.Command // execCommand is used to mock commands in tests. re_v1_parse_line = regexp.MustCompile(`^(?P[^|]*)\|(?P[^|]*)\|(?P.*)`) re_v2_parse_line = regexp.MustCompile(`^(?P[^|]*)\|[^|]+\|(?P[^|]*)\|(?P[^|]*)\|(?:(?P[^|]+))?`) - re_v2_parse_description = regexp.MustCompile(`^(?P[0-9.]+)\s(?P.*)|(?P.+)|^$`) + re_v2_parse_description = regexp.MustCompile(`^(?P-?[0-9.]+)\s(?P.*)|(?P.+)|^$`) re_v2_parse_unit = regexp.MustCompile(`^(?P[^,]+)(?:,\s*(?P.*))?`) ) diff --git a/plugins/inputs/ipmi_sensor/ipmi_test.go b/plugins/inputs/ipmi_sensor/ipmi_test.go index 9d448435d..bd5e02c19 100644 --- a/plugins/inputs/ipmi_sensor/ipmi_test.go +++ b/plugins/inputs/ipmi_sensor/ipmi_test.go @@ -7,6 +7,7 @@ import ( "testing" "time" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" @@ -664,11 +665,10 @@ func Test_parseV2(t *testing.T) { measuredAt time.Time } tests := []struct { - name string - args args - wantFields map[string]interface{} - wantTags map[string]string - wantErr bool + name string + args args + expected []telegraf.Metric + wantErr bool }{ { name: "Test correct V2 parsing with analog value with unit", @@ -677,14 +677,19 @@ func Test_parseV2(t *testing.T) { cmdOut: []byte("Power Supply 1 | 03h | ok | 10.1 | 110 Watts, Presence detected"), measuredAt: time.Now(), }, - wantFields: map[string]interface{}{"value": float64(110)}, - wantTags: map[string]string{ - "name": "power_supply_1", - "status_code": "ok", - "server": "host", - "entity_id": "10.1", - "unit": "watts", - "status_desc": "presence_detected", + expected: []telegraf.Metric{ + testutil.MustMetric("ipmi_sensor", + map[string]string{ + "name": "power_supply_1", + "status_code": "ok", + "server": "host", + "entity_id": "10.1", + "unit": "watts", + "status_desc": "presence_detected", + }, + map[string]interface{}{"value": 110.0}, + time.Unix(0, 0), + ), }, wantErr: false, }, @@ -695,26 +700,51 @@ func Test_parseV2(t *testing.T) { cmdOut: []byte("Intrusion | 73h | ok | 7.1 |"), measuredAt: time.Now(), }, - wantFields: map[string]interface{}{"value": float64(0)}, - wantTags: map[string]string{ - "name": "intrusion", - "status_code": "ok", - "server": "host", - "entity_id": "7.1", - "status_desc": "ok", + expected: []telegraf.Metric{ + testutil.MustMetric("ipmi_sensor", + map[string]string{ + "name": "intrusion", + "status_code": "ok", + "server": "host", + "entity_id": "7.1", + "status_desc": "ok", + }, + map[string]interface{}{"value": 0.0}, + time.Unix(0, 0), + ), + }, + wantErr: false, + }, + { + name: "parse negative value", + args: args{ + hostname: "host", + cmdOut: []byte("DIMM Thrm Mrgn 1 | B0h | ok | 8.1 | -55 degrees C"), + measuredAt: time.Now(), + }, + expected: []telegraf.Metric{ + testutil.MustMetric("ipmi_sensor", + map[string]string{ + "name": "dimm_thrm_mrgn_1", + "status_code": "ok", + "server": "host", + "entity_id": "8.1", + "unit": "degrees_c", + }, + map[string]interface{}{"value": -55.0}, + time.Unix(0, 0), + ), }, wantErr: false, }, } for _, tt := range tests { - var acc testutil.Accumulator - t.Run(tt.name, func(t *testing.T) { + var acc testutil.Accumulator if err := parseV2(&acc, tt.args.hostname, tt.args.cmdOut, tt.args.measuredAt); (err != nil) != tt.wantErr { t.Errorf("parseV2() error = %v, wantErr %v", err, tt.wantErr) } + testutil.RequireMetricsEqual(t, tt.expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime()) }) - - acc.AssertContainsTaggedFields(t, "ipmi_sensor", tt.wantFields, tt.wantTags) } } From edd83381803119836ad678f0335c2692e4cfab32 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 19 May 2020 11:53:10 -0700 Subject: [PATCH 1767/1815] Close HTTP2 connections on timeout in influxdb outputs (#7517) --- internal/http.go | 11 +++++++++++ internal/http_go1.11.go | 15 --------------- internal/http_go1.12.go | 9 --------- plugins/outputs/influxdb/http.go | 8 +++++--- plugins/outputs/influxdb_v2/http.go | 7 ++++--- 5 files changed, 20 insertions(+), 30 deletions(-) delete mode 100644 internal/http_go1.11.go delete mode 100644 internal/http_go1.12.go diff --git a/internal/http.go b/internal/http.go index a44506719..04b8a9368 100644 --- a/internal/http.go +++ b/internal/http.go @@ -4,6 +4,7 @@ import ( "crypto/subtle" "net" "net/http" + "net/url" ) type BasicAuthErrorFunc func(rw http.ResponseWriter) @@ -95,3 +96,13 @@ func (h *ipRangeHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) { h.onError(rw, http.StatusForbidden) } + +func OnClientError(client *http.Client, err error) { + // Close connection after a timeout error. If this is a HTTP2 + // connection this ensures that next interval a new connection will be + // used and name lookup will be performed. + // https://github.com/golang/go/issues/36026 + if err, ok := err.(*url.Error); ok && err.Timeout() { + client.CloseIdleConnections() + } +} diff --git a/internal/http_go1.11.go b/internal/http_go1.11.go deleted file mode 100644 index d1a1ae31a..000000000 --- a/internal/http_go1.11.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build !go1.12 - -package internal - -import "net/http" - -func CloseIdleConnections(c *http.Client) { - type closeIdler interface { - CloseIdleConnections() - } - - if tr, ok := c.Transport.(closeIdler); ok { - tr.CloseIdleConnections() - } -} diff --git a/internal/http_go1.12.go b/internal/http_go1.12.go deleted file mode 100644 index d5b1a847f..000000000 --- a/internal/http_go1.12.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build go1.12 - -package internal - -import "net/http" - -func CloseIdleConnections(c *http.Client) { - c.CloseIdleConnections() -} diff --git a/plugins/outputs/influxdb/http.go b/plugins/outputs/influxdb/http.go index 92498f022..19ae6f31f 100644 --- a/plugins/outputs/influxdb/http.go +++ b/plugins/outputs/influxdb/http.go @@ -209,6 +209,7 @@ func (c *httpClient) CreateDatabase(ctx context.Context, database string) error resp, err := c.client.Do(req.WithContext(ctx)) if err != nil { + internal.OnClientError(c.client, err) return err } defer resp.Body.Close() @@ -311,7 +312,7 @@ func (c *httpClient) Write(ctx context.Context, metrics []telegraf.Metric) error } func (c *httpClient) writeBatch(ctx context.Context, db, rp string, metrics []telegraf.Metric) error { - url, err := makeWriteURL(c.config.URL, db, rp, c.config.Consistency) + loc, err := makeWriteURL(c.config.URL, db, rp, c.config.Consistency) if err != nil { return err } @@ -322,13 +323,14 @@ func (c *httpClient) writeBatch(ctx context.Context, db, rp string, metrics []te } defer reader.Close() - req, err := c.makeWriteRequest(url, reader) + req, err := c.makeWriteRequest(loc, reader) if err != nil { return err } resp, err := c.client.Do(req.WithContext(ctx)) if err != nil { + internal.OnClientError(c.client, err) return err } defer resp.Body.Close() @@ -505,5 +507,5 @@ func makeQueryURL(loc *url.URL) (string, error) { } func (c *httpClient) Close() { - internal.CloseIdleConnections(c.client) + c.client.CloseIdleConnections() } diff --git a/plugins/outputs/influxdb_v2/http.go b/plugins/outputs/influxdb_v2/http.go index 3034207dd..2a32c5f4c 100644 --- a/plugins/outputs/influxdb_v2/http.go +++ b/plugins/outputs/influxdb_v2/http.go @@ -210,7 +210,7 @@ func (c *httpClient) Write(ctx context.Context, metrics []telegraf.Metric) error } func (c *httpClient) writeBatch(ctx context.Context, bucket string, metrics []telegraf.Metric) error { - url, err := makeWriteURL(*c.url, c.Organization, bucket) + loc, err := makeWriteURL(*c.url, c.Organization, bucket) if err != nil { return err } @@ -221,13 +221,14 @@ func (c *httpClient) writeBatch(ctx context.Context, bucket string, metrics []te } defer reader.Close() - req, err := c.makeWriteRequest(url, reader) + req, err := c.makeWriteRequest(loc, reader) if err != nil { return err } resp, err := c.client.Do(req.WithContext(ctx)) if err != nil { + internal.OnClientError(c.client, err) return err } defer resp.Body.Close() @@ -347,5 +348,5 @@ func makeWriteURL(loc url.URL, org, bucket string) (string, error) { } func (c *httpClient) Close() { - internal.CloseIdleConnections(c.client) + c.client.CloseIdleConnections() } From 7681469cd548c09b5352821f99d5330c7c0714ed Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 19 May 2020 11:59:00 -0700 Subject: [PATCH 1768/1815] Update changelog --- CHANGELOG.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3bb7b2daa..000b02599 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -51,7 +51,9 @@ #### Bugfixes - [#7412](https://github.com/influxdata/telegraf/pull/7412): Use same timestamp for all objects in arrays in the json parser. -- [#7439](https://github.com/influxdata/telegraf/pull/7439): Handle multiple metrics with the same timestamp in dedup processor. +- [#7343](https://github.com/influxdata/telegraf/issues/7343): Handle multiple metrics with the same timestamp in dedup processor. +- [#5905](https://github.com/influxdata/telegraf/issues/5905): Fix reconnection of timed out HTTP2 connections influxdb outputs. +- [#7468](https://github.com/influxdata/telegraf/issues/7468): Fix negative value parsing in impi_sensor input. ## v1.14.2 [2020-04-28] From 89f924639c8d8993aeabdb2e118f9990e0ed2edd Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 19 May 2020 12:08:13 -0700 Subject: [PATCH 1769/1815] Set 1.14.3 release date --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 000b02599..e2abe34e7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -46,7 +46,7 @@ - [#7446](https://github.com/influxdata/telegraf/issues/7446): Fix gzip support in socket_listener with tcp sockets. - [#7390](https://github.com/influxdata/telegraf/issues/7390): Fix interval drift when round_interval is set in agent. -## v1.14.3 [unreleased] +## v1.14.3 [2020-05-19] #### Bugfixes From 5280023abe36348fcb9c49034fe65fb454d27c2c Mon Sep 17 00:00:00 2001 From: denzilribeiro Date: Wed, 20 May 2020 18:55:37 -0500 Subject: [PATCH 1770/1815] Fix instance name resolution in performance counter query (#7526) --- plugins/inputs/sqlserver/sqlserver.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/plugins/inputs/sqlserver/sqlserver.go b/plugins/inputs/sqlserver/sqlserver.go index cb70686e2..085ff2986 100644 --- a/plugins/inputs/sqlserver/sqlserver.go +++ b/plugins/inputs/sqlserver/sqlserver.go @@ -626,10 +626,10 @@ SET @SQL = N'SELECT DISTINCT OR RTRIM(spi.object_name) LIKE ''%:Advanced Analytics'') AND TRY_CONVERT(uniqueidentifier, spi.instance_name) IS NOT NULL -- for cloud only - THEN d.name - WHEN RTRIM(object_name) LIKE ''%:Availability Replica'' + THEN ISNULL(d.name,RTRIM(spi.instance_name)) -- Elastic Pools counters exist for all databases but sys.databases only has current DB value + WHEN RTRIM(object_name) LIKE ''%:Availability Replica'' AND TRY_CONVERT(uniqueidentifier, spi.instance_name) IS NOT NULL -- for cloud only - THEN d.name + RTRIM(SUBSTRING(spi.instance_name, 37, LEN(spi.instance_name))) + THEN ISNULL(d.name,RTRIM(spi.instance_name)) + RTRIM(SUBSTRING(spi.instance_name, 37, LEN(spi.instance_name))) ELSE RTRIM(spi.instance_name) END AS instance_name,' ELSE 'RTRIM(spi.instance_name) as instance_name, ' From 10560e5a10c4b686d586f830c8ea870a9ee0fb2e Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 20 May 2020 17:00:11 -0700 Subject: [PATCH 1771/1815] Update changelog --- CHANGELOG.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index e2abe34e7..6b6d4fbc9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -46,6 +46,12 @@ - [#7446](https://github.com/influxdata/telegraf/issues/7446): Fix gzip support in socket_listener with tcp sockets. - [#7390](https://github.com/influxdata/telegraf/issues/7390): Fix interval drift when round_interval is set in agent. +## v1.14.4 [unreleased] + +#### Bugfixes + +- [#7325](https://github.com/influxdata/telegraf/issues/7325): Fix "cannot insert the value NULL error" with PerformanceCounters query. + ## v1.14.3 [2020-05-19] #### Bugfixes From 94c75b51a8c920e208ef34ee177ab0b7b750a998 Mon Sep 17 00:00:00 2001 From: ihard <3078342+ihard@users.noreply.github.com> Date: Thu, 21 May 2020 03:15:18 +0300 Subject: [PATCH 1772/1815] Add configurable separator graphite serializer and output (#7545) --- config/config.go | 9 + etc/telegraf.conf | 2 + plugins/outputs/graphite/README.md | 3 + plugins/outputs/graphite/graphite.go | 6 +- plugins/outputs/graphite/graphite_test.go | 272 ++++++++++++++++++ plugins/outputs/instrumental/instrumental.go | 2 +- plugins/serializers/graphite/README.md | 11 +- plugins/serializers/graphite/graphite.go | 8 +- plugins/serializers/graphite/graphite_test.go | 10 + plugins/serializers/registry.go | 12 +- 10 files changed, 327 insertions(+), 8 deletions(-) diff --git a/config/config.go b/config/config.go index 0ebb9e29b..23ba1b5b3 100644 --- a/config/config.go +++ b/config/config.go @@ -1951,6 +1951,14 @@ func buildSerializer(name string, tbl *ast.Table) (serializers.Serializer, error } } + if node, ok := tbl.Fields["graphite_separator"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if str, ok := kv.Value.(*ast.String); ok { + c.GraphiteSeparator = str.Value + } + } + } + if node, ok := tbl.Fields["json_timestamp_units"]; ok { if kv, ok := node.(*ast.KeyValue); ok { if str, ok := kv.Value.(*ast.String); ok { @@ -2055,6 +2063,7 @@ func buildSerializer(name string, tbl *ast.Table) (serializers.Serializer, error delete(tbl.Fields, "influx_sort_fields") delete(tbl.Fields, "influx_uint_support") delete(tbl.Fields, "graphite_tag_support") + delete(tbl.Fields, "graphite_separator") delete(tbl.Fields, "data_format") delete(tbl.Fields, "prefix") delete(tbl.Fields, "template") diff --git a/etc/telegraf.conf b/etc/telegraf.conf index 05d7daadb..239f77c60 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -572,6 +572,8 @@ # # ## Enable Graphite tags support # # graphite_tag_support = false +# ## Character for separating metric name and field for Graphite tags +# # graphite_separator = "." # # ## timeout in seconds for the write connection to graphite # timeout = 2 diff --git a/plugins/outputs/graphite/README.md b/plugins/outputs/graphite/README.md index b7ffd361b..b6b36cfca 100644 --- a/plugins/outputs/graphite/README.md +++ b/plugins/outputs/graphite/README.md @@ -34,6 +34,9 @@ see the [Graphite Data Format](../../../docs/DATA_FORMATS_OUTPUT.md) ## Enable Graphite tags support # graphite_tag_support = false + ## Character for separating metric name and field for Graphite tags + # graphite_separator = "." + ## timeout in seconds for the write connection to graphite timeout = 2 diff --git a/plugins/outputs/graphite/graphite.go b/plugins/outputs/graphite/graphite.go index e7d192662..4e284609d 100644 --- a/plugins/outputs/graphite/graphite.go +++ b/plugins/outputs/graphite/graphite.go @@ -17,6 +17,7 @@ import ( type Graphite struct { GraphiteTagSupport bool + GraphiteSeparator string // URL is only for backwards compatibility Servers []string Prefix string @@ -41,6 +42,9 @@ var sampleConfig = ` ## Enable Graphite tags support # graphite_tag_support = false + ## Character for separating metric name and field for Graphite tags + # graphite_separator = "." + ## Graphite templates patterns ## 1. Template for cpu ## 2. Template for disk* @@ -145,7 +149,7 @@ func checkEOF(conn net.Conn) { func (g *Graphite) Write(metrics []telegraf.Metric) error { // Prepare data var batch []byte - s, err := serializers.NewGraphiteSerializer(g.Prefix, g.Template, g.GraphiteTagSupport, g.Templates) + s, err := serializers.NewGraphiteSerializer(g.Prefix, g.Template, g.GraphiteTagSupport, g.GraphiteSeparator, g.Templates) if err != nil { return err } diff --git a/plugins/outputs/graphite/graphite_test.go b/plugins/outputs/graphite/graphite_test.go index ad76d45b5..82aad0d7d 100644 --- a/plugins/outputs/graphite/graphite_test.go +++ b/plugins/outputs/graphite/graphite_test.go @@ -98,6 +98,126 @@ func TestGraphiteOK(t *testing.T) { g.Close() } +func TestGraphiteOkWithSeparatorDot(t *testing.T) { + var wg sync.WaitGroup + // Start TCP server + wg.Add(1) + t.Log("Starting server") + TCPServer1(t, &wg) + + // Init plugin + g := Graphite{ + Prefix: "my.prefix", + GraphiteSeparator: ".", + } + + // Init metrics + m1, _ := metric.New( + "mymeasurement", + map[string]string{"host": "192.168.0.1"}, + map[string]interface{}{"myfield": float64(3.14)}, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + ) + m2, _ := metric.New( + "mymeasurement", + map[string]string{"host": "192.168.0.1"}, + map[string]interface{}{"value": float64(3.14)}, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + ) + m3, _ := metric.New( + "my_measurement", + map[string]string{"host": "192.168.0.1"}, + map[string]interface{}{"value": float64(3.14)}, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + ) + + // Prepare point list + metrics := []telegraf.Metric{m1} + metrics2 := []telegraf.Metric{m2, m3} + err1 := g.Connect() + require.NoError(t, err1) + // Send Data + t.Log("Send first data") + err2 := g.Write(metrics) + require.NoError(t, err2) + + // Waiting TCPserver, should reconnect and resend + wg.Wait() + t.Log("Finished Waiting for first data") + var wg2 sync.WaitGroup + // Start TCP server + wg2.Add(1) + TCPServer2(t, &wg2) + //Write but expect an error, but reconnect + err3 := g.Write(metrics2) + t.Log("Finished writing second data, it should have reconnected automatically") + + require.NoError(t, err3) + t.Log("Finished writing third data") + wg2.Wait() + g.Close() +} + +func TestGraphiteOkWithSeparatorUnderscore(t *testing.T) { + var wg sync.WaitGroup + // Start TCP server + wg.Add(1) + t.Log("Starting server") + TCPServer1(t, &wg) + + // Init plugin + g := Graphite{ + Prefix: "my.prefix", + GraphiteSeparator: "_", + } + + // Init metrics + m1, _ := metric.New( + "mymeasurement", + map[string]string{"host": "192.168.0.1"}, + map[string]interface{}{"myfield": float64(3.14)}, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + ) + m2, _ := metric.New( + "mymeasurement", + map[string]string{"host": "192.168.0.1"}, + map[string]interface{}{"value": float64(3.14)}, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + ) + m3, _ := metric.New( + "my_measurement", + map[string]string{"host": "192.168.0.1"}, + map[string]interface{}{"value": float64(3.14)}, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + ) + + // Prepare point list + metrics := []telegraf.Metric{m1} + metrics2 := []telegraf.Metric{m2, m3} + err1 := g.Connect() + require.NoError(t, err1) + // Send Data + t.Log("Send first data") + err2 := g.Write(metrics) + require.NoError(t, err2) + + // Waiting TCPserver, should reconnect and resend + wg.Wait() + t.Log("Finished Waiting for first data") + var wg2 sync.WaitGroup + // Start TCP server + wg2.Add(1) + TCPServer2(t, &wg2) + //Write but expect an error, but reconnect + err3 := g.Write(metrics2) + t.Log("Finished writing second data, it should have reconnected automatically") + + require.NoError(t, err3) + t.Log("Finished writing third data") + wg2.Wait() + g.Close() +} + func TestGraphiteOKWithMultipleTemplates(t *testing.T) { var wg sync.WaitGroup // Start TCP server @@ -222,6 +342,128 @@ func TestGraphiteOkWithTags(t *testing.T) { g.Close() } +func TestGraphiteOkWithTagsAndSeparatorDot(t *testing.T) { + var wg sync.WaitGroup + // Start TCP server + wg.Add(1) + t.Log("Starting server") + TCPServer1WithTags(t, &wg) + + // Init plugin + g := Graphite{ + Prefix: "my.prefix", + GraphiteTagSupport: true, + GraphiteSeparator: ".", + } + + // Init metrics + m1, _ := metric.New( + "mymeasurement", + map[string]string{"host": "192.168.0.1"}, + map[string]interface{}{"myfield": float64(3.14)}, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + ) + m2, _ := metric.New( + "mymeasurement", + map[string]string{"host": "192.168.0.1"}, + map[string]interface{}{"value": float64(3.14)}, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + ) + m3, _ := metric.New( + "my_measurement", + map[string]string{"host": "192.168.0.1"}, + map[string]interface{}{"value": float64(3.14)}, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + ) + + // Prepare point list + metrics := []telegraf.Metric{m1} + metrics2 := []telegraf.Metric{m2, m3} + err1 := g.Connect() + require.NoError(t, err1) + // Send Data + t.Log("Send first data") + err2 := g.Write(metrics) + require.NoError(t, err2) + + // Waiting TCPserver, should reconnect and resend + wg.Wait() + t.Log("Finished Waiting for first data") + var wg2 sync.WaitGroup + // Start TCP server + wg2.Add(1) + TCPServer2WithTags(t, &wg2) + //Write but expect an error, but reconnect + err3 := g.Write(metrics2) + t.Log("Finished writing second data, it should have reconnected automatically") + + require.NoError(t, err3) + t.Log("Finished writing third data") + wg2.Wait() + g.Close() +} + +func TestGraphiteOkWithTagsAndSeparatorUnderscore(t *testing.T) { + var wg sync.WaitGroup + // Start TCP server + wg.Add(1) + t.Log("Starting server") + TCPServer1WithTagsSeparatorUnderscore(t, &wg) + + // Init plugin + g := Graphite{ + Prefix: "my_prefix", + GraphiteTagSupport: true, + GraphiteSeparator: "_", + } + + // Init metrics + m1, _ := metric.New( + "mymeasurement", + map[string]string{"host": "192.168.0.1"}, + map[string]interface{}{"myfield": float64(3.14)}, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + ) + m2, _ := metric.New( + "mymeasurement", + map[string]string{"host": "192.168.0.1"}, + map[string]interface{}{"value": float64(3.14)}, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + ) + m3, _ := metric.New( + "my_measurement", + map[string]string{"host": "192.168.0.1"}, + map[string]interface{}{"value": float64(3.14)}, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + ) + + // Prepare point list + metrics := []telegraf.Metric{m1} + metrics2 := []telegraf.Metric{m2, m3} + err1 := g.Connect() + require.NoError(t, err1) + // Send Data + t.Log("Send first data") + err2 := g.Write(metrics) + require.NoError(t, err2) + + // Waiting TCPserver, should reconnect and resend + wg.Wait() + t.Log("Finished Waiting for first data") + var wg2 sync.WaitGroup + // Start TCP server + wg2.Add(1) + TCPServer2WithTagsSeparatorUnderscore(t, &wg2) + //Write but expect an error, but reconnect + err3 := g.Write(metrics2) + t.Log("Finished writing second data, it should have reconnected automatically") + + require.NoError(t, err3) + t.Log("Finished writing third data") + wg2.Wait() + g.Close() +} + func TCPServer1(t *testing.T, wg *sync.WaitGroup) { tcpServer, _ := net.Listen("tcp", "127.0.0.1:2003") go func() { @@ -311,3 +553,33 @@ func TCPServer2WithTags(t *testing.T, wg *sync.WaitGroup) { tcpServer.Close() }() } + +func TCPServer1WithTagsSeparatorUnderscore(t *testing.T, wg *sync.WaitGroup) { + tcpServer, _ := net.Listen("tcp", "127.0.0.1:2003") + go func() { + defer wg.Done() + conn, _ := (tcpServer).Accept() + reader := bufio.NewReader(conn) + tp := textproto.NewReader(reader) + data1, _ := tp.ReadLine() + assert.Equal(t, "my_prefix_mymeasurement_myfield;host=192.168.0.1 3.14 1289430000", data1) + conn.Close() + tcpServer.Close() + }() +} + +func TCPServer2WithTagsSeparatorUnderscore(t *testing.T, wg *sync.WaitGroup) { + tcpServer, _ := net.Listen("tcp", "127.0.0.1:2003") + go func() { + defer wg.Done() + conn2, _ := (tcpServer).Accept() + reader := bufio.NewReader(conn2) + tp := textproto.NewReader(reader) + data2, _ := tp.ReadLine() + assert.Equal(t, "my_prefix_mymeasurement;host=192.168.0.1 3.14 1289430000", data2) + data3, _ := tp.ReadLine() + assert.Equal(t, "my_prefix_my_measurement;host=192.168.0.1 3.14 1289430000", data3) + conn2.Close() + tcpServer.Close() + }() +} diff --git a/plugins/outputs/instrumental/instrumental.go b/plugins/outputs/instrumental/instrumental.go index 7284c0ca1..e5decbf7f 100644 --- a/plugins/outputs/instrumental/instrumental.go +++ b/plugins/outputs/instrumental/instrumental.go @@ -86,7 +86,7 @@ func (i *Instrumental) Write(metrics []telegraf.Metric) error { } } - s, err := serializers.NewGraphiteSerializer(i.Prefix, i.Template, false, i.Templates) + s, err := serializers.NewGraphiteSerializer(i.Prefix, i.Template, false, ".", i.Templates) if err != nil { return err } diff --git a/plugins/serializers/graphite/README.md b/plugins/serializers/graphite/README.md index 74bde2b5d..f6fd0c2cc 100644 --- a/plugins/serializers/graphite/README.md +++ b/plugins/serializers/graphite/README.md @@ -22,7 +22,7 @@ method is used, otherwise the [Template Pattern](templates) is used. prefix = "telegraf" ## Graphite template pattern template = "host.tags.measurement.field" - + ## Graphite templates patterns ## 1. Template for cpu ## 2. Template for disk* @@ -35,6 +35,8 @@ method is used, otherwise the [Template Pattern](templates) is used. ## Support Graphite tags, recommended to enable when using Graphite 1.1 or later. # graphite_tag_support = false + ## Character for separating metric name and field for Graphite tags + # graphite_separator = "." ``` #### graphite_tag_support @@ -54,5 +56,12 @@ cpu,cpu=cpu-total,dc=us-east-1,host=tars usage_idle=98.09,usage_user=0.89 145532 cpu.usage_user;cpu=cpu-total;dc=us-east-1;host=tars 0.89 1455320690 cpu.usage_idle;cpu=cpu-total;dc=us-east-1;host=tars 98.09 1455320690 ``` +With set option `graphite_separator` to "_" +``` +cpu,cpu=cpu-total,dc=us-east-1,host=tars usage_idle=98.09,usage_user=0.89 1455320660004257758 +=> +cpu_usage_user;cpu=cpu-total;dc=us-east-1;host=tars 0.89 1455320690 +cpu_usage_idle;cpu=cpu-total;dc=us-east-1;host=tars 98.09 1455320690 +``` [templates]: /docs/TEMPLATE_PATTERN.md diff --git a/plugins/serializers/graphite/graphite.go b/plugins/serializers/graphite/graphite.go index 2f6cd8da5..e580409fe 100644 --- a/plugins/serializers/graphite/graphite.go +++ b/plugins/serializers/graphite/graphite.go @@ -39,6 +39,7 @@ type GraphiteSerializer struct { Prefix string Template string TagSupport bool + Separator string Templates []*GraphiteTemplate } @@ -55,7 +56,7 @@ func (s *GraphiteSerializer) Serialize(metric telegraf.Metric) ([]byte, error) { if fieldValue == "" { continue } - bucket := SerializeBucketNameWithTags(metric.Name(), metric.Tags(), s.Prefix, fieldName) + bucket := SerializeBucketNameWithTags(metric.Name(), metric.Tags(), s.Prefix, s.Separator, fieldName) metricString := fmt.Sprintf("%s %s %d\n", // insert "field" section of template bucket, @@ -246,6 +247,7 @@ func SerializeBucketNameWithTags( measurement string, tags map[string]string, prefix string, + separator string, field string, ) string { var out string @@ -259,13 +261,13 @@ func SerializeBucketNameWithTags( sort.Strings(tagsCopy) if prefix != "" { - out = prefix + "." + out = prefix + separator } out += measurement if field != "value" { - out += "." + field + out += separator + field } out = sanitize(out) diff --git a/plugins/serializers/graphite/graphite_test.go b/plugins/serializers/graphite/graphite_test.go index e50b7292b..b6fcad696 100644 --- a/plugins/serializers/graphite/graphite_test.go +++ b/plugins/serializers/graphite/graphite_test.go @@ -102,6 +102,7 @@ func TestSerializeMetricNoHostWithTagSupport(t *testing.T) { s := GraphiteSerializer{ TagSupport: true, + Separator: ".", } buf, _ := s.Serialize(m) mS := strings.Split(strings.TrimSpace(string(buf)), "\n") @@ -251,6 +252,7 @@ func TestSerializeMetricHostWithTagSupport(t *testing.T) { s := GraphiteSerializer{ TagSupport: true, + Separator: ".", } buf, _ := s.Serialize(m) mS := strings.Split(strings.TrimSpace(string(buf)), "\n") @@ -305,6 +307,7 @@ func TestSerializeValueFieldWithTagSupport(t *testing.T) { s := GraphiteSerializer{ TagSupport: true, + Separator: ".", } buf, _ := s.Serialize(m) mS := strings.Split(strings.TrimSpace(string(buf)), "\n") @@ -380,6 +383,7 @@ func TestSerializeValueStringWithTagSupport(t *testing.T) { s := GraphiteSerializer{ TagSupport: true, + Separator: ".", } buf, _ := s.Serialize(m) mS := strings.Split(strings.TrimSpace(string(buf)), "\n") @@ -433,6 +437,7 @@ func TestSerializeValueBooleanWithTagSupport(t *testing.T) { s := GraphiteSerializer{ TagSupport: true, + Separator: ".", } buf, _ := s.Serialize(m) mS := strings.Split(strings.TrimSpace(string(buf)), "\n") @@ -505,6 +510,7 @@ func TestSerializeFieldWithSpacesWithTagSupport(t *testing.T) { s := GraphiteSerializer{ TagSupport: true, + Separator: ".", } buf, _ := s.Serialize(m) mS := strings.Split(strings.TrimSpace(string(buf)), "\n") @@ -558,6 +564,7 @@ func TestSerializeTagWithSpacesWithTagSupport(t *testing.T) { s := GraphiteSerializer{ TagSupport: true, + Separator: ".", } buf, _ := s.Serialize(m) mS := strings.Split(strings.TrimSpace(string(buf)), "\n") @@ -668,6 +675,7 @@ func TestSerializeMetricPrefixWithTagSupport(t *testing.T) { s := GraphiteSerializer{ Prefix: "prefix", TagSupport: true, + Separator: ".", } buf, _ := s.Serialize(m) mS := strings.Split(strings.TrimSpace(string(buf)), "\n") @@ -973,6 +981,7 @@ func TestCleanWithTagsSupport(t *testing.T) { s := GraphiteSerializer{ TagSupport: true, + Separator: ".", } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -1033,6 +1042,7 @@ func TestSerializeBatchWithTagsSupport(t *testing.T) { s := GraphiteSerializer{ TagSupport: true, + Separator: ".", } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { diff --git a/plugins/serializers/registry.go b/plugins/serializers/registry.go index 17de980fd..e5065a93c 100644 --- a/plugins/serializers/registry.go +++ b/plugins/serializers/registry.go @@ -51,6 +51,9 @@ type Config struct { // Support tags in graphite protocol GraphiteTagSupport bool `toml:"graphite_tag_support"` + // Character for separating metric name and field for Graphite tags + GraphiteSeparator string `toml:"graphite_separator"` + // Maximum line length in bytes; influx format only InfluxMaxLineBytes int `toml:"influx_max_line_bytes"` @@ -107,7 +110,7 @@ func NewSerializer(config *Config) (Serializer, error) { case "influx": serializer, err = NewInfluxSerializerConfig(config) case "graphite": - serializer, err = NewGraphiteSerializer(config.Prefix, config.Template, config.GraphiteTagSupport, config.Templates) + serializer, err = NewGraphiteSerializer(config.Prefix, config.Template, config.GraphiteTagSupport, config.GraphiteSeparator, config.Templates) case "json": serializer, err = NewJsonSerializer(config.TimestampUnits) case "splunkmetric": @@ -191,7 +194,7 @@ func NewInfluxSerializer() (Serializer, error) { return influx.NewSerializer(), nil } -func NewGraphiteSerializer(prefix, template string, tag_support bool, templates []string) (Serializer, error) { +func NewGraphiteSerializer(prefix, template string, tag_support bool, separator string, templates []string) (Serializer, error) { graphiteTemplates, defaultTemplate, err := graphite.InitGraphiteTemplates(templates) if err != nil { @@ -202,10 +205,15 @@ func NewGraphiteSerializer(prefix, template string, tag_support bool, templates template = defaultTemplate } + if separator == "" { + separator = "." + } + return &graphite.GraphiteSerializer{ Prefix: prefix, Template: template, TagSupport: tag_support, + Separator: separator, Templates: graphiteTemplates, }, nil } From c3a54f4fb8fa76fff57dfacf0298c2e274c59cf9 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 20 May 2020 17:16:01 -0700 Subject: [PATCH 1773/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6b6d4fbc9..544c17d93 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -37,6 +37,7 @@ - [#7321](https://github.com/influxdata/telegraf/pull/7321): Add additional fields to mongodb input. - [#7491](https://github.com/influxdata/telegraf/pull/7491): Add authentication support to the http_response input plugin. - [#7503](https://github.com/influxdata/telegraf/pull/7503): Add truncate_tags setting to wavefront output. +- [#7545](https://github.com/influxdata/telegraf/pull/7545): Add configurable separator graphite serializer and output. #### Bugfixes From f10c8ff92d6e4aa1a97534d768e0370c3b176f2a Mon Sep 17 00:00:00 2001 From: debu99 Date: Thu, 21 May 2020 08:34:50 +0800 Subject: [PATCH 1774/1815] Add cluster state integer to mongodb input (#7489) --- plugins/inputs/mongodb/README.md | 3 ++- plugins/inputs/mongodb/mongodb_data.go | 1 + plugins/inputs/mongodb/mongodb_data_test.go | 1 + plugins/inputs/mongodb/mongostat.go | 4 ++++ 4 files changed, 8 insertions(+), 1 deletion(-) diff --git a/plugins/inputs/mongodb/README.md b/plugins/inputs/mongodb/README.md index 1bbc05847..5e0f304d6 100644 --- a/plugins/inputs/mongodb/README.md +++ b/plugins/inputs/mongodb/README.md @@ -140,6 +140,7 @@ by running Telegraf with the `--debug` argument. - repl_queries (integer) - repl_updates (integer) - repl_oplog_window_sec (integer) + - repl_state (integer) - resident_megabytes (integer) - state (string) - storage_freelist_search_bucket_exhausted (integer) @@ -261,7 +262,7 @@ by running Telegraf with the `--debug` argument. ### Example Output: ``` mongodb,hostname=127.0.0.1:27017 active_reads=3i,active_writes=0i,aggregate_command_failed=0i,aggregate_command_total=87210i,assert_msg=0i,assert_regular=0i,assert_rollovers=0i,assert_user=0i,assert_warning=0i,available_reads=125i,available_writes=128i,commands=218126i,commands_per_sec=1876i,connections_available=838853i,connections_current=7i,connections_total_created=8i,count_command_failed=0i,count_command_total=7i,cursor_no_timeout=0i,cursor_no_timeout_count=0i,cursor_pinned=0i,cursor_pinned_count=0i,cursor_timed_out=0i,cursor_timed_out_count=0i,cursor_total=0i,cursor_total_count=0i,delete_command_failed=0i,delete_command_total=0i,deletes=0i,deletes_per_sec=0i,distinct_command_failed=0i,distinct_command_total=87190i,document_deleted=0i,document_inserted=0i,document_returned=7i,document_updated=43595i,find_and_modify_command_failed=0i,find_and_modify_command_total=43595i,find_command_failed=0i,find_command_total=348819i,flushes=1i,flushes_per_sec=0i,flushes_total_time_ns=5000000i,get_more_command_failed=0i,get_more_command_total=0i,getmores=7i,getmores_per_sec=1i,insert_command_failed=0i,insert_command_total=0i,inserts=0i,inserts_per_sec=0i,jumbo_chunks=0i,latency_commands=44179i,latency_commands_count=122i,latency_reads=36662189i,latency_reads_count=523229i,latency_writes=6768713i,latency_writes_count=87190i,net_in_bytes=837378i,net_in_bytes_count=97692502i,net_out_bytes=690836i,net_out_bytes_count=75377383i,open_connections=7i,operation_scan_and_order=87193i,operation_write_conflicts=7i,page_faults=0i,percent_cache_dirty=0.9,percent_cache_used=1,queries=348816i,queries_per_sec=2988i,queued_reads=0i,queued_writes=0i,resident_megabytes=77i,storage_freelist_search_bucket_exhausted=0i,storage_freelist_search_requests=0i,storage_freelist_search_scanned=0i,tcmalloc_central_cache_free_bytes=280136i,tcmalloc_current_allocated_bytes=77677288i,tcmalloc_current_total_thread_cache_bytes=1222608i,tcmalloc_heap_size=142659584i,tcmalloc_max_total_thread_cache_bytes=260046848i,tcmalloc_pageheap_commit_count=1898i,tcmalloc_pageheap_committed_bytes=130084864i,tcmalloc_pageheap_decommit_count=889i,tcmalloc_pageheap_free_bytes=50610176i,tcmalloc_pageheap_reserve_count=50i,tcmalloc_pageheap_scavenge_count=884i,tcmalloc_pageheap_total_commit_bytes=13021937664i,tcmalloc_pageheap_total_decommit_bytes=12891852800i,tcmalloc_pageheap_total_reserve_bytes=142659584i,tcmalloc_pageheap_unmapped_bytes=12574720i,tcmalloc_spinlock_total_delay_ns=9767500i,tcmalloc_thread_cache_free_bytes=1222608i,tcmalloc_total_free_bytes=1797400i,tcmalloc_transfer_cache_free_bytes=294656i,total_available=0i,total_created=0i,total_docs_scanned=43595i,total_in_use=0i,total_keys_scanned=130805i,total_refreshing=0i,total_tickets_reads=128i,total_tickets_writes=128i,ttl_deletes=0i,ttl_deletes_per_sec=0i,ttl_passes=0i,ttl_passes_per_sec=0i,update_command_failed=0i,update_command_total=43595i,updates=43595i,updates_per_sec=372i,uptime_ns=60023000000i,version="3.6.17",vsize_megabytes=1048i,wtcache_app_threads_page_read_count=108i,wtcache_app_threads_page_read_time=25995i,wtcache_app_threads_page_write_count=0i,wtcache_bytes_read_into=2487250i,wtcache_bytes_written_from=74i,wtcache_current_bytes=5014530i,wtcache_internal_pages_evicted=0i,wtcache_max_bytes_configured=505413632i,wtcache_modified_pages_evicted=0i,wtcache_pages_evicted_by_app_thread=0i,wtcache_pages_queued_for_eviction=0i,wtcache_pages_read_into=139i,wtcache_pages_requested_from=699135i,wtcache_server_evicting_pages=0i,wtcache_tracked_dirty_bytes=4797426i,wtcache_unmodified_pages_evicted=0i,wtcache_worker_thread_evictingpages=0i 1586379818000000000 -mongodb,hostname=127.0.0.1:27017,node_type=SEC,rs_name=rs0 active_reads=1i,active_writes=0i,aggregate_command_failed=0i,aggregate_command_total=1i,assert_msg=0i,assert_regular=0i,assert_rollovers=0i,assert_user=79i,assert_warning=0i,available_reads=127i,available_writes=128i,commands=1121855i,commands_per_sec=10i,connections_available=51183i,connections_current=17i,connections_total_created=557i,count_command_failed=0i,count_command_total=46307i,cursor_no_timeout=0i,cursor_no_timeout_count=0i,cursor_pinned=0i,cursor_pinned_count=0i,cursor_timed_out=0i,cursor_timed_out_count=28i,cursor_total=0i,cursor_total_count=0i,delete_command_failed=0i,delete_command_total=0i,deletes=0i,deletes_per_sec=0i,distinct_command_failed=0i,distinct_command_total=0i,document_deleted=0i,document_inserted=0i,document_returned=2248129i,document_updated=0i,find_and_modify_command_failed=0i,find_and_modify_command_total=0i,find_command_failed=2i,find_command_total=8764i,flushes=7850i,flushes_per_sec=0i,flushes_total_time_ns=4535446000000i,get_more_command_failed=0i,get_more_command_total=1993i,getmores=2018i,getmores_per_sec=0i,insert_command_failed=0i,insert_command_total=0i,inserts=0i,inserts_per_sec=0i,jumbo_chunks=0i,latency_commands=112011949i,latency_commands_count=1072472i,latency_reads=1877142443i,latency_reads_count=57086i,latency_writes=0i,latency_writes_count=0i,member_status="SEC",net_in_bytes=1212i,net_in_bytes_count=263928689i,net_out_bytes=41051i,net_out_bytes_count=2475389483i,open_connections=17i,operation_scan_and_order=34i,operation_write_conflicts=0i,page_faults=317i,percent_cache_dirty=1.6,percent_cache_used=73,queries=8764i,queries_per_sec=0i,queued_reads=0i,queued_writes=0i,repl_apply_batches_num=17839419i,repl_apply_batches_total_millis=399929i,repl_apply_ops=23355263i,repl_buffer_count=0i,repl_buffer_size_bytes=0i,repl_commands=11i,repl_commands_per_sec=0i,repl_deletes=440608i,repl_deletes_per_sec=0i,repl_executor_pool_in_progress_count=0i,repl_executor_queues_network_in_progress=0i,repl_executor_queues_sleepers=4i,repl_executor_unsignaled_events=0i,repl_getmores=0i,repl_getmores_per_sec=0i,repl_inserts=1875729i,repl_inserts_per_sec=0i,repl_lag=0i,repl_network_bytes=39122199371i,repl_network_getmores_num=34908797i,repl_network_getmores_total_millis=434805356i,repl_network_ops=23199086i,repl_oplog_window_sec=619292i,repl_queries=0i,repl_queries_per_sec=0i,repl_updates=21034729i,repl_updates_per_sec=38i,resident_megabytes=6721i,state="SECONDARY",storage_freelist_search_bucket_exhausted=0i,storage_freelist_search_requests=0i,storage_freelist_search_scanned=0i,tcmalloc_central_cache_free_bytes=358512400i,tcmalloc_current_allocated_bytes=5427379424i,tcmalloc_current_total_thread_cache_bytes=70349552i,tcmalloc_heap_size=10199310336i,tcmalloc_max_total_thread_cache_bytes=1073741824i,tcmalloc_pageheap_commit_count=790819i,tcmalloc_pageheap_committed_bytes=7064821760i,tcmalloc_pageheap_decommit_count=533347i,tcmalloc_pageheap_free_bytes=1207816192i,tcmalloc_pageheap_reserve_count=7706i,tcmalloc_pageheap_scavenge_count=426235i,tcmalloc_pageheap_total_commit_bytes=116127649792i,tcmalloc_pageheap_total_decommit_bytes=109062828032i,tcmalloc_pageheap_total_reserve_bytes=10199310336i,tcmalloc_pageheap_unmapped_bytes=3134488576i,tcmalloc_spinlock_total_delay_ns=2518474348i,tcmalloc_thread_cache_free_bytes=70349552i,tcmalloc_total_free_bytes=429626144i,tcmalloc_transfer_cache_free_bytes=764192i,total_available=0i,total_created=0i,total_docs_scanned=735004782i,total_in_use=0i,total_keys_scanned=6188216i,total_refreshing=0i,total_tickets_reads=128i,total_tickets_writes=128i,ttl_deletes=0i,ttl_deletes_per_sec=0i,ttl_passes=7892i,ttl_passes_per_sec=0i,update_command_failed=0i,update_command_total=0i,updates=0i,updates_per_sec=0i,uptime_ns=473590288000000i,version="3.6.17",vsize_megabytes=11136i,wtcache_app_threads_page_read_count=11467625i,wtcache_app_threads_page_read_time=1700336840i,wtcache_app_threads_page_write_count=13268184i,wtcache_bytes_read_into=348022587843i,wtcache_bytes_written_from=322571702254i,wtcache_current_bytes=5509459274i,wtcache_internal_pages_evicted=109108i,wtcache_max_bytes_configured=7547650048i,wtcache_modified_pages_evicted=911196i,wtcache_pages_evicted_by_app_thread=17366i,wtcache_pages_queued_for_eviction=16572754i,wtcache_pages_read_into=11689764i,wtcache_pages_requested_from=499825861i,wtcache_server_evicting_pages=0i,wtcache_tracked_dirty_bytes=117487510i,wtcache_unmodified_pages_evicted=11058458i,wtcache_worker_thread_evictingpages=11907226i 1586379707000000000 +mongodb,hostname=127.0.0.1:27017,node_type=SEC,rs_name=rs0 active_reads=1i,active_writes=0i,aggregate_command_failed=0i,aggregate_command_total=1i,assert_msg=0i,assert_regular=0i,assert_rollovers=0i,assert_user=79i,assert_warning=0i,available_reads=127i,available_writes=128i,commands=1121855i,commands_per_sec=10i,connections_available=51183i,connections_current=17i,connections_total_created=557i,count_command_failed=0i,count_command_total=46307i,cursor_no_timeout=0i,cursor_no_timeout_count=0i,cursor_pinned=0i,cursor_pinned_count=0i,cursor_timed_out=0i,cursor_timed_out_count=28i,cursor_total=0i,cursor_total_count=0i,delete_command_failed=0i,delete_command_total=0i,deletes=0i,deletes_per_sec=0i,distinct_command_failed=0i,distinct_command_total=0i,document_deleted=0i,document_inserted=0i,document_returned=2248129i,document_updated=0i,find_and_modify_command_failed=0i,find_and_modify_command_total=0i,find_command_failed=2i,find_command_total=8764i,flushes=7850i,flushes_per_sec=0i,flushes_total_time_ns=4535446000000i,get_more_command_failed=0i,get_more_command_total=1993i,getmores=2018i,getmores_per_sec=0i,insert_command_failed=0i,insert_command_total=0i,inserts=0i,inserts_per_sec=0i,jumbo_chunks=0i,latency_commands=112011949i,latency_commands_count=1072472i,latency_reads=1877142443i,latency_reads_count=57086i,latency_writes=0i,latency_writes_count=0i,member_status="SEC",net_in_bytes=1212i,net_in_bytes_count=263928689i,net_out_bytes=41051i,net_out_bytes_count=2475389483i,open_connections=17i,operation_scan_and_order=34i,operation_write_conflicts=0i,page_faults=317i,percent_cache_dirty=1.6,percent_cache_used=73,queries=8764i,queries_per_sec=0i,queued_reads=0i,queued_writes=0i,repl_apply_batches_num=17839419i,repl_apply_batches_total_millis=399929i,repl_apply_ops=23355263i,repl_buffer_count=0i,repl_buffer_size_bytes=0i,repl_commands=11i,repl_commands_per_sec=0i,repl_deletes=440608i,repl_deletes_per_sec=0i,repl_executor_pool_in_progress_count=0i,repl_executor_queues_network_in_progress=0i,repl_executor_queues_sleepers=4i,repl_executor_unsignaled_events=0i,repl_getmores=0i,repl_getmores_per_sec=0i,repl_inserts=1875729i,repl_inserts_per_sec=0i,repl_lag=0i,repl_network_bytes=39122199371i,repl_network_getmores_num=34908797i,repl_network_getmores_total_millis=434805356i,repl_network_ops=23199086i,repl_oplog_window_sec=619292i,repl_queries=0i,repl_queries_per_sec=0i,repl_updates=21034729i,repl_updates_per_sec=38i,repl_state=2,resident_megabytes=6721i,state="SECONDARY",storage_freelist_search_bucket_exhausted=0i,storage_freelist_search_requests=0i,storage_freelist_search_scanned=0i,tcmalloc_central_cache_free_bytes=358512400i,tcmalloc_current_allocated_bytes=5427379424i,tcmalloc_current_total_thread_cache_bytes=70349552i,tcmalloc_heap_size=10199310336i,tcmalloc_max_total_thread_cache_bytes=1073741824i,tcmalloc_pageheap_commit_count=790819i,tcmalloc_pageheap_committed_bytes=7064821760i,tcmalloc_pageheap_decommit_count=533347i,tcmalloc_pageheap_free_bytes=1207816192i,tcmalloc_pageheap_reserve_count=7706i,tcmalloc_pageheap_scavenge_count=426235i,tcmalloc_pageheap_total_commit_bytes=116127649792i,tcmalloc_pageheap_total_decommit_bytes=109062828032i,tcmalloc_pageheap_total_reserve_bytes=10199310336i,tcmalloc_pageheap_unmapped_bytes=3134488576i,tcmalloc_spinlock_total_delay_ns=2518474348i,tcmalloc_thread_cache_free_bytes=70349552i,tcmalloc_total_free_bytes=429626144i,tcmalloc_transfer_cache_free_bytes=764192i,total_available=0i,total_created=0i,total_docs_scanned=735004782i,total_in_use=0i,total_keys_scanned=6188216i,total_refreshing=0i,total_tickets_reads=128i,total_tickets_writes=128i,ttl_deletes=0i,ttl_deletes_per_sec=0i,ttl_passes=7892i,ttl_passes_per_sec=0i,update_command_failed=0i,update_command_total=0i,updates=0i,updates_per_sec=0i,uptime_ns=473590288000000i,version="3.6.17",vsize_megabytes=11136i,wtcache_app_threads_page_read_count=11467625i,wtcache_app_threads_page_read_time=1700336840i,wtcache_app_threads_page_write_count=13268184i,wtcache_bytes_read_into=348022587843i,wtcache_bytes_written_from=322571702254i,wtcache_current_bytes=5509459274i,wtcache_internal_pages_evicted=109108i,wtcache_max_bytes_configured=7547650048i,wtcache_modified_pages_evicted=911196i,wtcache_pages_evicted_by_app_thread=17366i,wtcache_pages_queued_for_eviction=16572754i,wtcache_pages_read_into=11689764i,wtcache_pages_requested_from=499825861i,wtcache_server_evicting_pages=0i,wtcache_tracked_dirty_bytes=117487510i,wtcache_unmodified_pages_evicted=11058458i,wtcache_worker_thread_evictingpages=11907226i 1586379707000000000 mongodb_db_stats,db_name=admin,hostname=127.0.0.1:27017 avg_obj_size=241,collections=2i,data_size=723i,index_size=49152i,indexes=3i,num_extents=0i,objects=3i,ok=1i,storage_size=53248i,type="db_stat" 1547159491000000000 mongodb_db_stats,db_name=local,hostname=127.0.0.1:27017 avg_obj_size=813.9705882352941,collections=6i,data_size=55350i,index_size=102400i,indexes=5i,num_extents=0i,objects=68i,ok=1i,storage_size=204800i,type="db_stat" 1547159491000000000 mongodb_col_stats,collection=foo,db_name=local,hostname=127.0.0.1:27017 size=375005928i,avg_obj_size=5494,type="col_stat",storage_size=249307136i,total_index_size=2138112i,ok=1i,count=68251i 1547159491000000000 diff --git a/plugins/inputs/mongodb/mongodb_data.go b/plugins/inputs/mongodb/mongodb_data.go index 6c0884a46..7659a1a35 100644 --- a/plugins/inputs/mongodb/mongodb_data.go +++ b/plugins/inputs/mongodb/mongodb_data.go @@ -147,6 +147,7 @@ var DefaultReplStats = map[string]string{ "repl_commands_per_sec": "CommandR", "member_status": "NodeType", "state": "NodeState", + "repl_state": "NodeStateInt", "repl_lag": "ReplLag", "repl_network_bytes": "ReplNetworkBytes", "repl_network_getmores_num": "ReplNetworkGetmoresNum", diff --git a/plugins/inputs/mongodb/mongodb_data_test.go b/plugins/inputs/mongodb/mongodb_data_test.go index 706cc7e6f..38e6cd6ad 100644 --- a/plugins/inputs/mongodb/mongodb_data_test.go +++ b/plugins/inputs/mongodb/mongodb_data_test.go @@ -437,6 +437,7 @@ func TestStateTag(t *testing.T) { "repl_queries_per_sec": int64(0), "repl_updates": int64(0), "repl_updates_per_sec": int64(0), + "repl_state": int64(0), "resident_megabytes": int64(0), "state": "PRIMARY", "storage_freelist_search_bucket_exhausted": int64(0), diff --git a/plugins/inputs/mongodb/mongostat.go b/plugins/inputs/mongodb/mongostat.go index 5d64d7ab4..70a0edf09 100644 --- a/plugins/inputs/mongodb/mongostat.go +++ b/plugins/inputs/mongodb/mongostat.go @@ -734,6 +734,7 @@ type StatLine struct { ReplSetName string NodeType string NodeState string + NodeStateInt int64 // Replicated Metrics fields ReplNetworkBytes int64 @@ -1258,6 +1259,9 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec if member.Name == myName { // Store my state string returnVal.NodeState = member.StateStr + // Store my state integer + returnVal.NodeStateInt = member.State + if member.State == 1 { // I'm the master returnVal.ReplLag = 0 From 58ad64a43bd15da9d78bfd3ac3ffab605c4d7459 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 20 May 2020 17:35:36 -0700 Subject: [PATCH 1775/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 544c17d93..6fd5ce580 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -38,6 +38,7 @@ - [#7491](https://github.com/influxdata/telegraf/pull/7491): Add authentication support to the http_response input plugin. - [#7503](https://github.com/influxdata/telegraf/pull/7503): Add truncate_tags setting to wavefront output. - [#7545](https://github.com/influxdata/telegraf/pull/7545): Add configurable separator graphite serializer and output. +- [#7489](https://github.com/influxdata/telegraf/pull/7489): Add cluster state integer to mongodb input. #### Bugfixes From d4e9fd15ce97467ca1ee9bac6e77d5df95e8628d Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Thu, 21 May 2020 16:53:07 -0400 Subject: [PATCH 1776/1815] fix randomly failing CI test (#7514) --- plugins/inputs/execd/execd.go | 32 +++++++++++++-------------- plugins/inputs/execd/execd_posix.go | 9 ++++++++ plugins/inputs/execd/execd_windows.go | 5 +++++ 3 files changed, 30 insertions(+), 16 deletions(-) diff --git a/plugins/inputs/execd/execd.go b/plugins/inputs/execd/execd.go index 1ea136a3d..ca9e589d9 100644 --- a/plugins/inputs/execd/execd.go +++ b/plugins/inputs/execd/execd.go @@ -12,7 +12,6 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" - "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/plugins/parsers/influx" @@ -47,14 +46,14 @@ type Execd struct { Signal string RestartDelay config.Duration - acc telegraf.Accumulator - cmd *exec.Cmd - parser parsers.Parser - stdin io.WriteCloser - stdout io.ReadCloser - stderr io.ReadCloser - cancel context.CancelFunc - wg sync.WaitGroup + acc telegraf.Accumulator + cmd *exec.Cmd + parser parsers.Parser + stdin io.WriteCloser + stdout io.ReadCloser + stderr io.ReadCloser + cancel context.CancelFunc + mainLoopWg sync.WaitGroup } func (e *Execd) SampleConfig() string { @@ -76,7 +75,7 @@ func (e *Execd) Start(acc telegraf.Accumulator) error { return fmt.Errorf("FATAL no command specified") } - e.wg.Add(1) // for the main loop + e.mainLoopWg.Add(1) ctx, cancel := context.WithCancel(context.Background()) e.cancel = cancel @@ -86,16 +85,19 @@ func (e *Execd) Start(acc telegraf.Accumulator) error { } go func() { - e.cmdLoop(ctx) - e.wg.Done() + if err := e.cmdLoop(ctx); err != nil { + log.Printf("Process quit with message: %s", err.Error()) + } + e.mainLoopWg.Done() }() return nil } func (e *Execd) Stop() { + // don't try to stop before all stream readers have started. e.cancel() - e.wg.Wait() + e.mainLoopWg.Wait() } // cmdLoop watches an already running process, restarting it when appropriate. @@ -112,9 +114,7 @@ func (e *Execd) cmdLoop(ctx context.Context) error { case <-ctx.Done(): if e.stdin != nil { e.stdin.Close() - // Immediately exit process but with a graceful shutdown - // period before killing - internal.WaitTimeout(e.cmd, 200*time.Millisecond) + gracefulStop(e.cmd, 5*time.Second) } return nil case err := <-done: diff --git a/plugins/inputs/execd/execd_posix.go b/plugins/inputs/execd/execd_posix.go index d2389c52f..cc3a8e8bb 100644 --- a/plugins/inputs/execd/execd_posix.go +++ b/plugins/inputs/execd/execd_posix.go @@ -6,6 +6,7 @@ import ( "fmt" "io" "os" + "os/exec" "syscall" "time" @@ -38,3 +39,11 @@ func (e *Execd) Gather(acc telegraf.Accumulator) error { return nil } + +func gracefulStop(cmd *exec.Cmd, timeout time.Duration) { + cmd.Process.Signal(syscall.SIGTERM) + go func() { + <-time.NewTimer(timeout).C + cmd.Process.Kill() + }() +} diff --git a/plugins/inputs/execd/execd_windows.go b/plugins/inputs/execd/execd_windows.go index c0dc0e846..82935d4ac 100644 --- a/plugins/inputs/execd/execd_windows.go +++ b/plugins/inputs/execd/execd_windows.go @@ -6,6 +6,7 @@ import ( "fmt" "io" "os" + "os/exec" "time" "github.com/influxdata/telegraf" @@ -31,3 +32,7 @@ func (e *Execd) Gather(acc telegraf.Accumulator) error { return nil } + +func gracefulStop(cmd *exec.Cmd, timeout time.Duration) { + cmd.Process.Kill() +} From a4459bd57a9d9096eb6ba3db175578d619dbfd0e Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 21 May 2020 23:39:37 -0700 Subject: [PATCH 1777/1815] Use updated clock package to resolve test failures (#7516) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 9645c925f..29427b02f 100644 --- a/go.mod +++ b/go.mod @@ -26,7 +26,7 @@ require ( github.com/aristanetworks/goarista v0.0.0-20190325233358-a123909ec740 github.com/armon/go-metrics v0.3.0 // indirect github.com/aws/aws-sdk-go v1.30.9 - github.com/benbjohnson/clock v1.0.0 + github.com/benbjohnson/clock v1.0.2 github.com/bitly/go-hostpool v0.1.0 // indirect github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 // indirect github.com/caio/go-tdigest v2.3.0+incompatible // indirect diff --git a/go.sum b/go.sum index 53073401d..76862fcfe 100644 --- a/go.sum +++ b/go.sum @@ -112,8 +112,8 @@ github.com/armon/go-metrics v0.3.0 h1:B7AQgHi8QSEi4uHu7Sbsga+IJDU+CENgjxoo81vDUq github.com/armon/go-metrics v0.3.0/go.mod h1:zXjbSimjXTd7vOpY8B0/2LpvNvDoXBuplAD+gJD3GYs= github.com/aws/aws-sdk-go v1.30.9 h1:DntpBUKkchINPDbhEzDRin1eEn1TG9TZFlzWPf0i8to= github.com/aws/aws-sdk-go v1.30.9/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= -github.com/benbjohnson/clock v1.0.0 h1:78Jk/r6m4wCi6sndMpty7A//t4dw/RW5fV4ZgDVfX1w= -github.com/benbjohnson/clock v1.0.0/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= +github.com/benbjohnson/clock v1.0.2 h1:Z0CN0Yb4ig9sGPXkvAQcGJfnrrMQ5QYLCMPRi9iD7YE= +github.com/benbjohnson/clock v1.0.2/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= From f975b944041337d864d1c147ef8523f3a5c1cf1d Mon Sep 17 00:00:00 2001 From: Josh Soref Date: Fri, 22 May 2020 03:05:10 -0400 Subject: [PATCH 1778/1815] Fix typos in sqlserver input (#7524) --- plugins/inputs/sqlserver/sqlserver.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/plugins/inputs/sqlserver/sqlserver.go b/plugins/inputs/sqlserver/sqlserver.go index 085ff2986..c69a0fb7c 100644 --- a/plugins/inputs/sqlserver/sqlserver.go +++ b/plugins/inputs/sqlserver/sqlserver.go @@ -767,7 +767,7 @@ FROM rgwg.total_queued_request_count AS "Queued Request Count", rgwg.total_cpu_limit_violation_count AS "CPU Limit Violation Count", rgwg.total_cpu_usage_ms AS "CPU Usage (time)", - ' + CASE WHEN SERVERPROPERTY('ProductMajorVersion') > 10 THEN 'rgwg.total_cpu_usage_preemptive_ms AS "Premptive CPU Usage (time)",' ELSE '' END + ' + ' + CASE WHEN SERVERPROPERTY('ProductMajorVersion') > 10 THEN 'rgwg.total_cpu_usage_preemptive_ms AS "Preemptive CPU Usage (time)",' ELSE '' END + ' rgwg.total_lock_wait_count AS "Lock Wait Count", rgwg.total_lock_wait_time_ms AS "Lock Wait Time", rgwg.total_reduced_memgrant_count AS "Reduced Memory Grant Count" @@ -776,7 +776,7 @@ FROM ON rgwg.pool_id = rgrp.pool_id ) AS rg UNPIVOT ( - value FOR counter IN ( [Request Count], [Queued Request Count], [CPU Limit Violation Count], [CPU Usage (time)], ' + CASE WHEN SERVERPROPERTY('ProductMajorVersion') > 10 THEN '[Premptive CPU Usage (time)], ' ELSE '' END + '[Lock Wait Count], [Lock Wait Time], [Reduced Memory Grant Count] ) + value FOR counter IN ( [Request Count], [Queued Request Count], [CPU Limit Violation Count], [CPU Usage (time)], ' + CASE WHEN SERVERPROPERTY('ProductMajorVersion') > 10 THEN '[Preemptive CPU Usage (time)], ' ELSE '' END + '[Lock Wait Count], [Lock Wait Time], [Reduced Memory Grant Count] ) ) AS vs' ,'"','''') @@ -1518,7 +1518,7 @@ SELECT , DB_NAME(s.database_id) as session_db_name , r.status , r.cpu_time as cpu_time_ms - , r.total_elapsed_time as total_elasped_time_ms + , r.total_elapsed_time as total_elapsed_time_ms , r.logical_reads , r.writes , r.command @@ -2182,7 +2182,7 @@ SELECT database_name, num_of_writes_persec FROM #baselinewritten WHERE datafile_type = ''ROWS'' ) as V -PIVOT(SUM(num_of_writes_persec) FOR database_name IN (' + @ColumnName + ')) AS PVTTabl +PIVOT(SUM(num_of_writes_persec) FOR database_name IN (' + @ColumnName + ')) AS PVTTable UNION ALL SELECT measurement = ''Log (reads/sec)'', servername = REPLACE(@@SERVERNAME, ''\'', '':''), type = ''Database IO'' , ' + @ColumnName + ', Total = ' + @ColumnName2 + ' FROM @@ -2606,7 +2606,7 @@ VALUES (N'QDS_SHUTDOWN_QUEUE'), (N'HADR_FILESTREAM_IOMGR_IOCOMPLETION'), (N'DIRTY_PAGE_POLL'), (N'DISPATCHER_QUEUE_SEMAPHORE'), (N'EXECSYNC'), (N'FSAGENT'), (N'FT_IFTS_SCHEDULER_IDLE_WAIT'), (N'FT_IFTSHC_MUTEX'), - (N'HADR_CLUSAPI_CALL'), (N'HADR_FILESTREAM_IOMGR_IOCOMPLETIO(N'), + (N'HADR_CLUSAPI_CALL'), (N'HADR_FILESTREAM_IOMGR_IOCOMPLETION'), (N'HADR_LOGCAPTURE_WAIT'), (N'HADR_NOTIFICATION_DEQUEUE'), (N'HADR_TIMER_TASK'), (N'HADR_WORK_QUEUE'), (N'KSOURCE_WAKEUP'), (N'LAZYWRITER_SLEEP'), From 130403c0c9308493c17133a61ab57e54f2cf81c7 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 22 May 2020 00:07:27 -0700 Subject: [PATCH 1779/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6fd5ce580..903dfa592 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -47,6 +47,7 @@ - [#7448](https://github.com/influxdata/telegraf/issues/7448): Remove debug fields from splunkmetric serializer. - [#7446](https://github.com/influxdata/telegraf/issues/7446): Fix gzip support in socket_listener with tcp sockets. - [#7390](https://github.com/influxdata/telegraf/issues/7390): Fix interval drift when round_interval is set in agent. +- [#7524](https://github.com/influxdata/telegraf/pull/7524): Fix typo in total_elapsed_time_ms field of sqlserver input. ## v1.14.4 [unreleased] From 4d071bed24c185daa0da1ee319256019fff75e3f Mon Sep 17 00:00:00 2001 From: raul-te <46945030+raul-te@users.noreply.github.com> Date: Fri, 22 May 2020 00:11:00 -0700 Subject: [PATCH 1780/1815] Add option to disable mongodb cluster status (#7515) It can be expensive to compute these metrics. In particular, when retrieveing the amount of jumbo chunks, an index is not being used and consequently the query triggers an expensive COLLSCAN. For big databases, this query has negative impact on the cluster performance. --- plugins/inputs/mongodb/README.md | 5 +++++ plugins/inputs/mongodb/mongodb.go | 27 ++++++++++++++++-------- plugins/inputs/mongodb/mongodb_server.go | 12 +++++++---- 3 files changed, 31 insertions(+), 13 deletions(-) diff --git a/plugins/inputs/mongodb/README.md b/plugins/inputs/mongodb/README.md index 5e0f304d6..cce93dc07 100644 --- a/plugins/inputs/mongodb/README.md +++ b/plugins/inputs/mongodb/README.md @@ -11,6 +11,11 @@ ## mongodb://10.10.3.33:18832, servers = ["mongodb://127.0.0.1:27017"] + ## When true, collect cluster status. + ## Note that the query that counts jumbo chunks triggers a COLLSCAN, which + ## may have an impact on performance. + # gather_cluster_status = true + ## When true, collect per database stats # gather_perdb_stats = false diff --git a/plugins/inputs/mongodb/mongodb.go b/plugins/inputs/mongodb/mongodb.go index 967ccbe5f..016515ea9 100644 --- a/plugins/inputs/mongodb/mongodb.go +++ b/plugins/inputs/mongodb/mongodb.go @@ -17,12 +17,13 @@ import ( ) type MongoDB struct { - Servers []string - Ssl Ssl - mongos map[string]*Server - GatherPerdbStats bool - GatherColStats bool - ColStatsDbs []string + Servers []string + Ssl Ssl + mongos map[string]*Server + GatherClusterStatus bool + GatherPerdbStats bool + GatherColStats bool + ColStatsDbs []string tlsint.ClientConfig Log telegraf.Logger @@ -41,6 +42,11 @@ var sampleConfig = ` ## mongodb://10.10.3.33:18832, servers = ["mongodb://127.0.0.1:27017"] + ## When true, collect cluster status + ## Note that the query that counts jumbo chunks triggers a COLLSCAN, which + ## may have an impact on performance. + # gather_cluster_status = true + ## When true, collect per database stats # gather_perdb_stats = false @@ -177,14 +183,17 @@ func (m *MongoDB) gatherServer(server *Server, acc telegraf.Accumulator) error { } server.Session = sess } - return server.gatherData(acc, m.GatherPerdbStats, m.GatherColStats, m.ColStatsDbs) + return server.gatherData(acc, m.GatherClusterStatus, m.GatherPerdbStats, m.GatherColStats, m.ColStatsDbs) } func init() { inputs.Add("mongodb", func() telegraf.Input { return &MongoDB{ - ColStatsDbs: []string{"local"}, - mongos: make(map[string]*Server), + mongos: make(map[string]*Server), + GatherClusterStatus: true, + GatherPerdbStats: false, + GatherColStats: false, + ColStatsDbs: []string{"local"}, } }) } diff --git a/plugins/inputs/mongodb/mongodb_server.go b/plugins/inputs/mongodb/mongodb_server.go index be3916b5e..5af48c10a 100644 --- a/plugins/inputs/mongodb/mongodb_server.go +++ b/plugins/inputs/mongodb/mongodb_server.go @@ -192,7 +192,7 @@ func (s *Server) gatherCollectionStats(colStatsDbs []string) (*ColStats, error) return results, nil } -func (s *Server) gatherData(acc telegraf.Accumulator, gatherDbStats bool, gatherColStats bool, colStatsDbs []string) error { +func (s *Server) gatherData(acc telegraf.Accumulator, gatherClusterStatus bool, gatherDbStats bool, gatherColStats bool, colStatsDbs []string) error { s.Session.SetMode(mgo.Eventual, true) s.Session.SetSocketTimeout(0) @@ -218,9 +218,13 @@ func (s *Server) gatherData(acc telegraf.Accumulator, gatherDbStats bool, gather } } - clusterStatus, err := s.gatherClusterStatus() - if err != nil { - s.Log.Debugf("Unable to gather cluster status: %s", err.Error()) + var clusterStatus *ClusterStatus + if gatherClusterStatus { + status, err := s.gatherClusterStatus() + if err != nil { + s.Log.Debugf("Unable to gather cluster status: %s", err.Error()) + } + clusterStatus = status } shardStats, err := s.gatherShardConnPoolStats() From a6010b8276105667910cfff00fc57eb37fb065a4 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 22 May 2020 00:12:03 -0700 Subject: [PATCH 1781/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 903dfa592..862d1e799 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -39,6 +39,7 @@ - [#7503](https://github.com/influxdata/telegraf/pull/7503): Add truncate_tags setting to wavefront output. - [#7545](https://github.com/influxdata/telegraf/pull/7545): Add configurable separator graphite serializer and output. - [#7489](https://github.com/influxdata/telegraf/pull/7489): Add cluster state integer to mongodb input. +- [#7515](https://github.com/influxdata/telegraf/pull/7515): Add option to disable mongodb cluster status. #### Bugfixes From e6f06457ce17d9f25d29ad0c70e30a143a1ddadd Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 22 May 2020 11:09:22 -0700 Subject: [PATCH 1782/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 862d1e799..d0b77b388 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -40,6 +40,7 @@ - [#7545](https://github.com/influxdata/telegraf/pull/7545): Add configurable separator graphite serializer and output. - [#7489](https://github.com/influxdata/telegraf/pull/7489): Add cluster state integer to mongodb input. - [#7515](https://github.com/influxdata/telegraf/pull/7515): Add option to disable mongodb cluster status. +- [#7319](https://github.com/influxdata/telegraf/pull/7319): Add support for battery level monitoring to the fibaro input. #### Bugfixes From a7674b707b0f98536360f856192bc5ef2efbb216 Mon Sep 17 00:00:00 2001 From: Jeff Registre Date: Fri, 22 May 2020 14:44:13 -0400 Subject: [PATCH 1783/1815] Add defaults processor to set default field values (#7370) --- plugins/processors/all/all.go | 1 + plugins/processors/defaults/README.md | 42 ++++++ plugins/processors/defaults/defaults.go | 72 ++++++++++ plugins/processors/defaults/defaults_test.go | 131 +++++++++++++++++++ 4 files changed, 246 insertions(+) create mode 100644 plugins/processors/defaults/README.md create mode 100644 plugins/processors/defaults/defaults.go create mode 100644 plugins/processors/defaults/defaults_test.go diff --git a/plugins/processors/all/all.go b/plugins/processors/all/all.go index 5ff977324..f917bf6a6 100644 --- a/plugins/processors/all/all.go +++ b/plugins/processors/all/all.go @@ -5,6 +5,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/processors/converter" _ "github.com/influxdata/telegraf/plugins/processors/date" _ "github.com/influxdata/telegraf/plugins/processors/dedup" + _ "github.com/influxdata/telegraf/plugins/processors/defaults" _ "github.com/influxdata/telegraf/plugins/processors/enum" _ "github.com/influxdata/telegraf/plugins/processors/filepath" _ "github.com/influxdata/telegraf/plugins/processors/override" diff --git a/plugins/processors/defaults/README.md b/plugins/processors/defaults/README.md new file mode 100644 index 000000000..638a3dac7 --- /dev/null +++ b/plugins/processors/defaults/README.md @@ -0,0 +1,42 @@ +# Defaults Processor + +The *Defaults* processor allows you to ensure certain fields will always exist with a specified default value on your metric(s). + +There are three cases where this processor will insert a configured default field. + +1. The field is nil on the incoming metric +1. The field is not nil, but its value is an empty string. +1. The field is not nil, but its value is a string of one or more empty spaces. + +### Configuration +```toml +## Set default fields on your metric(s) when they are nil or empty +[[processors.defaults]] + +## This table determines what fields will be inserted in your metric(s) + [processors.defaults.fields] + field_1 = "bar" + time_idle = 0 + is_error = true +``` + +### Example +Ensure a _status\_code_ field with _N/A_ is inserted in the metric when one it's not set in the metric be default: + +```toml +[[processors.defaults]] + [processors.defaults.fields] + status_code = "N/A" +``` + +```diff +- lb,http_method=GET cache_status=HIT,latency=230 ++ lb,http_method=GET cache_status=HIT,latency=230,status_code="N/A" +``` + +Ensure an empty string gets replaced by a default: + +```diff +- lb,http_method=GET cache_status=HIT,latency=230,status_code="" ++ lb,http_method=GET cache_status=HIT,latency=230,status_code="N/A" +``` diff --git a/plugins/processors/defaults/defaults.go b/plugins/processors/defaults/defaults.go new file mode 100644 index 000000000..eaffdf81a --- /dev/null +++ b/plugins/processors/defaults/defaults.go @@ -0,0 +1,72 @@ +package defaults + +import ( + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/processors" + "strings" +) + +const sampleConfig = ` + ## Ensures a set of fields always exists on your metric(s) with their + ## respective default value. + ## For any given field pair (key = default), if it's not set, a field + ## is set on the metric with the specified default. + ## + ## A field is considered not set if it is nil on the incoming metric; + ## or it is not nil but its value is an empty string or is a string + ## of one or more spaces. + ## = + # [processors.defaults.fields] + # field_1 = "bar" + # time_idle = 0 + # is_error = true +` + +// Defaults is a processor for ensuring certain fields always exist +// on your Metrics with at least a default value. +type Defaults struct { + DefaultFieldsSets map[string]interface{} `toml:"fields"` +} + +// SampleConfig represents a sample toml config for this plugin. +func (def *Defaults) SampleConfig() string { + return sampleConfig +} + +// Description is a brief description of this processor plugin's behaviour. +func (def *Defaults) Description() string { + return "Defaults sets default value(s) for specified fields that are not set on incoming metrics." +} + +// Apply contains the main implementation of this processor. +// For each metric in 'inputMetrics', it goes over each default pair. +// If the field in the pair does not exist on the metric, the associated default is added. +// If the field was found, then, if its value is the empty string or one or more spaces, it is replaced +// by the associated default. +func (def *Defaults) Apply(inputMetrics ...telegraf.Metric) []telegraf.Metric { + for _, metric := range inputMetrics { + for defField, defValue := range def.DefaultFieldsSets { + if maybeCurrent, isSet := metric.GetField(defField); !isSet { + metric.AddField(defField, defValue) + } else if trimmed, isStr := maybeTrimmedString(maybeCurrent); isStr && trimmed == "" { + metric.RemoveField(defField) + metric.AddField(defField, defValue) + } + } + } + return inputMetrics +} + +func maybeTrimmedString(v interface{}) (string, bool) { + switch value := v.(type) { + case string: + return strings.TrimSpace(value), true + } + return "", false +} + +func init() { + processors.Add("defaults", func() telegraf.Processor { + return &Defaults{} + }) +} diff --git a/plugins/processors/defaults/defaults_test.go b/plugins/processors/defaults/defaults_test.go new file mode 100644 index 000000000..c0e930fc6 --- /dev/null +++ b/plugins/processors/defaults/defaults_test.go @@ -0,0 +1,131 @@ +package defaults + +import ( + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/assert" +) + +func TestDefaults(t *testing.T) { + scenarios := []struct { + name string + defaults *Defaults + input telegraf.Metric + expected []telegraf.Metric + }{ + { + name: "Test that no values are changed since they are not nil or empty", + defaults: &Defaults{ + DefaultFieldsSets: map[string]interface{}{ + "usage": 30, + "wind_feel": "very chill", + "is_dead": true, + }, + }, + input: testutil.MustMetric( + "CPU metrics", + map[string]string{}, + map[string]interface{}{ + "usage": 45, + "wind_feel": "a dragon's breath", + "is_dead": false, + }, + time.Unix(0, 0), + ), + expected: []telegraf.Metric{ + testutil.MustMetric( + "CPU metrics", + map[string]string{}, + map[string]interface{}{ + "usage": 45, + "wind_feel": "a dragon's breath", + "is_dead": false, + }, + time.Unix(0, 0), + ), + }, + }, + { + name: "Tests that the missing fields are set on the metric", + defaults: &Defaults{ + DefaultFieldsSets: map[string]interface{}{ + "max_clock_gz": 6, + "wind_feel": "Unknown", + "boost_enabled": false, + "variance": 1.2, + }, + }, + input: testutil.MustMetric( + "CPU metrics", + map[string]string{}, + map[string]interface{}{ + "usage": 45, + "temperature": 64, + }, + time.Unix(0, 0), + ), + expected: []telegraf.Metric{ + testutil.MustMetric( + "CPU metrics", + map[string]string{}, + map[string]interface{}{ + "usage": 45, + "temperature": 64, + "max_clock_gz": 6, + "wind_feel": "Unknown", + "boost_enabled": false, + "variance": 1.2, + }, + time.Unix(0, 0), + ), + }, + }, + { + name: "Tests that set but empty fields are replaced by specified defaults", + defaults: &Defaults{ + DefaultFieldsSets: map[string]interface{}{ + "max_clock_gz": 6, + "wind_feel": "Unknown", + "fan_loudness": "Inaudible", + "boost_enabled": false, + }, + }, + input: testutil.MustMetric( + "CPU metrics", + map[string]string{}, + map[string]interface{}{ + "max_clock_gz": "", + "wind_feel": " ", + "fan_loudness": " ", + }, + time.Unix(0, 0), + ), + expected: []telegraf.Metric{ + testutil.MustMetric( + "CPU metrics", + map[string]string{}, + map[string]interface{}{ + "max_clock_gz": 6, + "wind_feel": "Unknown", + "fan_loudness": "Inaudible", + "boost_enabled": false, + }, + time.Unix(0, 0), + ), + }, + }, + } + + for _, scenario := range scenarios { + t.Run(scenario.name, func(t *testing.T) { + defaults := scenario.defaults + + resultMetrics := defaults.Apply(scenario.input) + assert.Len(t, resultMetrics, 1) + testutil.RequireMetricsEqual(t, scenario.expected, resultMetrics) + }) + } +} From f91d0833fb0c9240ec0d0049625f7ab7d27d7376 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 22 May 2020 11:48:49 -0700 Subject: [PATCH 1784/1815] Add defaults processor to readme/changelog --- CHANGELOG.md | 1 + README.md | 1 + 2 files changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index d0b77b388..c4cc59612 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,7 @@ #### New Processors +- [defaults](/plugins/processors/defaults/README.md) - Contributed by @jregistr - [filepath](/plugins/processors/filepath/README.md) - Contributed by @kir4h #### Features diff --git a/README.md b/README.md index fed4c06cd..3391a038e 100644 --- a/README.md +++ b/README.md @@ -363,6 +363,7 @@ For documentation on the latest development code see the [documentation index][d * [converter](/plugins/processors/converter) * [date](/plugins/processors/date) * [dedup](/plugins/processors/dedup) +* [defaults](/plugins/processors/defaults) * [enum](/plugins/processors/enum) * [filepath](/plugins/processors/filepath) * [override](/plugins/processors/override) From ad97b744a3d9d40ec930334f9ff1b63c6479632d Mon Sep 17 00:00:00 2001 From: Anton Aksola Date: Tue, 26 May 2020 21:07:24 +0200 Subject: [PATCH 1785/1815] Fix numeric to bool conversion in converter (#7579) A type switch case with multiple conditions causes the value to remain as interface which causes toBool to always return true for any numeric values. --- plugins/processors/converter/converter.go | 12 ++++++------ plugins/processors/converter/converter_test.go | 12 +++++++++--- 2 files changed, 15 insertions(+), 9 deletions(-) diff --git a/plugins/processors/converter/converter.go b/plugins/processors/converter/converter.go index 33f2e43c0..55a2a2d09 100644 --- a/plugins/processors/converter/converter.go +++ b/plugins/processors/converter/converter.go @@ -327,12 +327,12 @@ func (p *Converter) convertFields(metric telegraf.Metric) { func toBool(v interface{}) (bool, bool) { switch value := v.(type) { - case int64, uint64, float64: - if value != 0 { - return true, true - } else { - return false, false - } + case int64: + return value != 0, true + case uint64: + return value != 0, true + case float64: + return value != 0, true case bool: return value, true case string: diff --git a/plugins/processors/converter/converter_test.go b/plugins/processors/converter/converter_test.go index 1310e698a..efde0bcd9 100644 --- a/plugins/processors/converter/converter_test.go +++ b/plugins/processors/converter/converter_test.go @@ -180,7 +180,7 @@ func TestConverter(t *testing.T) { String: []string{"a"}, Integer: []string{"b"}, Unsigned: []string{"c", "negative_uint"}, - Boolean: []string{"d"}, + Boolean: []string{"d", "bool_zero"}, Float: []string{"e"}, Tag: []string{"f"}, }, @@ -196,6 +196,7 @@ func TestConverter(t *testing.T) { "e": int64(42), "f": int64(42), "negative_uint": int64(-42), + "bool_zero": int64(0), }, time.Unix(0, 0), ), @@ -212,6 +213,7 @@ func TestConverter(t *testing.T) { "d": true, "e": 42.0, "negative_uint": uint64(0), + "bool_zero": false, }, time.Unix(0, 0), ), @@ -224,7 +226,7 @@ func TestConverter(t *testing.T) { String: []string{"a"}, Integer: []string{"b", "overflow_int"}, Unsigned: []string{"c"}, - Boolean: []string{"d"}, + Boolean: []string{"d", "bool_zero"}, Float: []string{"e"}, Tag: []string{"f"}, }, @@ -240,6 +242,7 @@ func TestConverter(t *testing.T) { "e": uint64(42), "f": uint64(42), "overflow_int": uint64(math.MaxUint64), + "bool_zero": uint64(0), }, time.Unix(0, 0), ), @@ -256,6 +259,7 @@ func TestConverter(t *testing.T) { "d": true, "e": 42.0, "overflow_int": int64(math.MaxInt64), + "bool_zero": false, }, time.Unix(0, 0), ), @@ -350,7 +354,7 @@ func TestConverter(t *testing.T) { String: []string{"a"}, Integer: []string{"b", "too_large_int", "too_small_int"}, Unsigned: []string{"c", "negative_uint", "too_large_uint", "too_small_uint"}, - Boolean: []string{"d"}, + Boolean: []string{"d", "bool_zero"}, Float: []string{"e"}, Tag: []string{"f"}, }, @@ -370,6 +374,7 @@ func TestConverter(t *testing.T) { "too_small_int": -math.MaxFloat64, "too_small_uint": -math.MaxFloat64, "negative_uint": -42.0, + "bool_zero": 0.0, }, time.Unix(0, 0), ), @@ -390,6 +395,7 @@ func TestConverter(t *testing.T) { "too_small_int": int64(math.MinInt64), "too_small_uint": uint64(0), "negative_uint": uint64(0), + "bool_zero": false, }, time.Unix(0, 0), ), From f9a110f40796169f0db8462f4bfedae9e8f9fdf0 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 26 May 2020 12:08:31 -0700 Subject: [PATCH 1786/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index c4cc59612..dc3f7651b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -57,6 +57,7 @@ #### Bugfixes - [#7325](https://github.com/influxdata/telegraf/issues/7325): Fix "cannot insert the value NULL error" with PerformanceCounters query. +- [#7579](https://github.com/influxdata/telegraf/issues/7579): Fix numeric to bool conversion in converter processor. ## v1.14.3 [2020-05-19] From 22c0bdb38c4dd93302271ab061e2a57267c6e3eb Mon Sep 17 00:00:00 2001 From: Harshit Bansal Date: Wed, 27 May 2020 00:48:24 +0530 Subject: [PATCH 1787/1815] Fix the typo in `gcc_pu_fraction` to `gc_cpu_fraction` (#7573) --- plugins/inputs/influxdb/README.md | 4 ++-- plugins/inputs/influxdb/influxdb.go | 2 +- plugins/inputs/influxdb/influxdb_test.go | 2 +- plugins/inputs/kapacitor/README.md | 12 ++++++------ 4 files changed, 10 insertions(+), 10 deletions(-) diff --git a/plugins/inputs/influxdb/README.md b/plugins/inputs/influxdb/README.md index 711503245..8787c6a0e 100644 --- a/plugins/inputs/influxdb/README.md +++ b/plugins/inputs/influxdb/README.md @@ -59,7 +59,7 @@ and may vary between versions. - heap_sys - mcache_sys - next_gc - - gcc_pu_fraction + - gc_cpu_fraction - other_sys - alloc - stack_inuse @@ -95,7 +95,7 @@ telegraf --config ~/ws/telegraf.conf --input-filter influxdb --test > influxdb_measurement,database=_internal,host=tyrion,measurement=tsm1_filestore,url=http://localhost:8086/debug/vars numSeries=2 1463590500247354636 > influxdb_measurement,database=_internal,host=tyrion,measurement=tsm1_wal,url=http://localhost:8086/debug/vars numSeries=4 1463590500247354636 > influxdb_measurement,database=_internal,host=tyrion,measurement=write,url=http://localhost:8086/debug/vars numSeries=1 1463590500247354636 -> influxdb_memstats,host=tyrion,url=http://localhost:8086/debug/vars alloc=7642384i,buck_hash_sys=1463471i,frees=1169558i,gc_sys=653312i,gcc_pu_fraction=0.00003825652361068311,heap_alloc=7642384i,heap_idle=9912320i,heap_inuse=9125888i,heap_objects=48276i,heap_released=0i,heap_sys=19038208i,last_gc=1463590480877651621i,lookups=90i,mallocs=1217834i,mcache_inuse=4800i,mcache_sys=16384i,mspan_inuse=70920i,mspan_sys=81920i,next_gc=11679787i,num_gc=141i,other_sys=1244233i,pause_total_ns=24034027i,stack_inuse=884736i,stack_sys=884736i,sys=23382264i,total_alloc=679012200i 1463590500277918755 +> influxdb_memstats,host=tyrion,url=http://localhost:8086/debug/vars alloc=7642384i,buck_hash_sys=1463471i,frees=1169558i,gc_sys=653312i,gc_cpu_fraction=0.00003825652361068311,heap_alloc=7642384i,heap_idle=9912320i,heap_inuse=9125888i,heap_objects=48276i,heap_released=0i,heap_sys=19038208i,last_gc=1463590480877651621i,lookups=90i,mallocs=1217834i,mcache_inuse=4800i,mcache_sys=16384i,mspan_inuse=70920i,mspan_sys=81920i,next_gc=11679787i,num_gc=141i,other_sys=1244233i,pause_total_ns=24034027i,stack_inuse=884736i,stack_sys=884736i,sys=23382264i,total_alloc=679012200i 1463590500277918755 > influxdb_shard,database=_internal,engine=tsm1,host=tyrion,id=4,path=/Users/sparrc/.influxdb/data/_internal/monitor/4,retentionPolicy=monitor,url=http://localhost:8086/debug/vars fieldsCreate=65,seriesCreate=26,writePointsOk=7274,writeReq=280 1463590500247354636 > influxdb_subscriber,host=tyrion,url=http://localhost:8086/debug/vars pointsWritten=7274 1463590500247354636 > influxdb_tsm1_cache,database=_internal,host=tyrion,path=/Users/sparrc/.influxdb/data/_internal/monitor/1,retentionPolicy=monitor,url=http://localhost:8086/debug/vars WALCompactionTimeMs=0,cacheAgeMs=2809192,cachedBytes=0,diskBytes=0,memBytes=0,snapshotCount=0 1463590500247354636 diff --git a/plugins/inputs/influxdb/influxdb.go b/plugins/inputs/influxdb/influxdb.go index 96389a013..23fa9fdc4 100644 --- a/plugins/inputs/influxdb/influxdb.go +++ b/plugins/inputs/influxdb/influxdb.go @@ -242,7 +242,7 @@ func (i *InfluxDB) gatherURL( "pause_total_ns": m.PauseTotalNs, "pause_ns": m.PauseNs[(m.NumGC+255)%256], "num_gc": m.NumGC, - "gcc_pu_fraction": m.GCCPUFraction, + "gc_cpu_fraction": m.GCCPUFraction, }, map[string]string{ "url": url, diff --git a/plugins/inputs/influxdb/influxdb_test.go b/plugins/inputs/influxdb/influxdb_test.go index 9225c45b0..27ea81b6d 100644 --- a/plugins/inputs/influxdb/influxdb_test.go +++ b/plugins/inputs/influxdb/influxdb_test.go @@ -92,7 +92,7 @@ func TestInfluxDB(t *testing.T) { "heap_sys": int64(33849344), "mcache_sys": int64(16384), "next_gc": int64(20843042), - "gcc_pu_fraction": float64(4.287178819113636e-05), + "gc_cpu_fraction": float64(4.287178819113636e-05), "other_sys": int64(1229737), "alloc": int64(17034016), "stack_inuse": int64(753664), diff --git a/plugins/inputs/kapacitor/README.md b/plugins/inputs/kapacitor/README.md index 2328e0904..ace4f18ff 100644 --- a/plugins/inputs/kapacitor/README.md +++ b/plugins/inputs/kapacitor/README.md @@ -33,7 +33,7 @@ The Kapacitor plugin collects metrics from the given Kapacitor instances. - [notification_dropped](#notification_dropped) _(integer)_ - [primary-handle-count](#primary-handle-count) _(integer)_ - [secondary-handle-count](#secondary-handle-count) _(integer)_ -- (Kapacitor Enterprise only) [kapacitor_cluster](#kapacitor_cluster) _(integer)_ +- (Kapacitor Enterprise only) [kapacitor_cluster](#kapacitor_cluster) _(integer)_ - [dropped_member_events](#dropped_member_events) _(integer)_ - [dropped_user_events](#dropped_user_events) _(integer)_ - [query_handler_errors](#query_handler_errors) _(integer)_ @@ -49,7 +49,7 @@ The Kapacitor plugin collects metrics from the given Kapacitor instances. - [buck_hash_sys_bytes](#buck_hash_sys_bytes) _(integer)_ - [frees](#frees) _(integer)_ - [gc_sys_bytes](#gc_sys_bytes) _(integer)_ - - [gc_cpu_fraction](#gcc_pu_fraction) _(float)_ + - [gc_cpu_fraction](#gc_cpu_fraction) _(float)_ - [heap_alloc_bytes](#heap_alloc_bytes) _(integer)_ - [heap_idle_bytes](#heap_idle_bytes) _(integer)_ - [heap_in_use_bytes](#heap_in_use_bytes) _(integer)_ @@ -109,8 +109,8 @@ The `kapacitor_alert` measurement stores fields with information related to [Kapacitor alerts](https://docs.influxdata.com/kapacitor/v1.5/working/alerts/). #### notification-dropped -The number of internal notifications dropped because they arrive too late from another Kapacitor node. -If this count is increasing, Kapacitor Enterprise nodes aren't able to communicate fast enough +The number of internal notifications dropped because they arrive too late from another Kapacitor node. +If this count is increasing, Kapacitor Enterprise nodes aren't able to communicate fast enough to keep up with the volume of alerts. #### primary-handle-count @@ -199,7 +199,7 @@ The number of allocated objects. The number of heap bytes released to the operating system. #### heap_sys_bytes -The number of heap bytes obtained from `system`. +The number of heap bytes obtained from `system`. #### last_gc_ns The nanosecond epoch time of the last garbage collection. @@ -293,7 +293,7 @@ The `kapacitor_topics` measurement stores fields related to Kapacitor topics](https://docs.influxdata.com/kapacitor/latest/working/using_alert_topics/). #### collected -The number of events collected by Kapacitor topics. +The number of events collected by Kapacitor topics. --- From 5345ad529dc3ff6fe0c5048ecc12c9717dba46f8 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 26 May 2020 12:20:08 -0700 Subject: [PATCH 1788/1815] Update changelog --- CHANGELOG.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index dc3f7651b..557e21597 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -57,7 +57,8 @@ #### Bugfixes - [#7325](https://github.com/influxdata/telegraf/issues/7325): Fix "cannot insert the value NULL error" with PerformanceCounters query. -- [#7579](https://github.com/influxdata/telegraf/issues/7579): Fix numeric to bool conversion in converter processor. +- [#7579](https://github.com/influxdata/telegraf/pull/7579): Fix numeric to bool conversion in converter processor. +- [#7551](https://github.com/influxdata/telegraf/issues/7551): Fix typo in name of gc_cpu_fraction field of the influxdb input. ## v1.14.3 [2020-05-19] From 2561aac6efe97152920fb000d77d6471e571b9fa Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Tue, 26 May 2020 16:11:02 -0400 Subject: [PATCH 1789/1815] fix go version check (#7562) --- internal/internal_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/internal_test.go b/internal/internal_test.go index 83a7e88d6..cbfbabb22 100644 --- a/internal/internal_test.go +++ b/internal/internal_test.go @@ -486,6 +486,6 @@ func TestProductToken(t *testing.T) { token := ProductToken() // Telegraf version depends on the call to SetVersion, it cannot be set // multiple times and is not thread-safe. - re := regexp.MustCompile(`^Telegraf/[^\s]+ Go/\d+.\d+.\d+$`) + re := regexp.MustCompile(`^Telegraf/[^\s]+ Go/\d+.\d+(.\d+)?$`) require.True(t, re.MatchString(token), token) } From 092059c06657d44d3b44d49d051bf3568ca95e14 Mon Sep 17 00:00:00 2001 From: Harshit Bansal Date: Wed, 27 May 2020 02:46:48 +0530 Subject: [PATCH 1790/1815] Exclude csv_timestamp_column and csv_measurement_column from fields (#7572) --- plugins/inputs/tail/tail_test.go | 6 +-- plugins/parsers/csv/README.md | 6 ++- plugins/parsers/csv/parser.go | 4 ++ plugins/parsers/csv/parser_test.go | 61 +++++++++++++++++++++++++++++- 4 files changed, 69 insertions(+), 8 deletions(-) diff --git a/plugins/inputs/tail/tail_test.go b/plugins/inputs/tail/tail_test.go index 88d63f723..3b12ae080 100644 --- a/plugins/inputs/tail/tail_test.go +++ b/plugins/inputs/tail/tail_test.go @@ -211,8 +211,7 @@ cpu,42 "path": tmpfile.Name(), }, map[string]interface{}{ - "time_idle": 42, - "measurement": "cpu", + "time_idle": 42, }, time.Unix(0, 0)), testutil.MustMetric("cpu", @@ -220,8 +219,7 @@ cpu,42 "path": tmpfile.Name(), }, map[string]interface{}{ - "time_idle": 42, - "measurement": "cpu", + "time_idle": 42, }, time.Unix(0, 0)), } diff --git a/plugins/parsers/csv/README.md b/plugins/parsers/csv/README.md index 9ca34d288..2189c8ce7 100644 --- a/plugins/parsers/csv/README.md +++ b/plugins/parsers/csv/README.md @@ -56,11 +56,13 @@ values. ## will be added as fields. csv_tag_columns = [] - ## The column to extract the name of the metric from + ## The column to extract the name of the metric from. Will not be + ## included as field in metric. csv_measurement_column = "" ## The column to extract time information for the metric - ## `csv_timestamp_format` must be specified if this is used + ## `csv_timestamp_format` must be specified if this is used. + ## Will not be included as field in metric. csv_timestamp_column = "" ## The format of time data extracted from `csv_timestamp_column` diff --git a/plugins/parsers/csv/parser.go b/plugins/parsers/csv/parser.go index 848a51699..7f8076917 100644 --- a/plugins/parsers/csv/parser.go +++ b/plugins/parsers/csv/parser.go @@ -216,6 +216,10 @@ outer: return nil, err } + // Exclude `TimestampColumn` and `MeasurementColumn` + delete(recordFields, p.TimestampColumn) + delete(recordFields, p.MeasurementColumn) + m, err := metric.New(measurementName, tags, recordFields, metricTime) if err != nil { return nil, err diff --git a/plugins/parsers/csv/parser_test.go b/plugins/parsers/csv/parser_test.go index e39a5df70..c0ef5f1cb 100644 --- a/plugins/parsers/csv/parser_test.go +++ b/plugins/parsers/csv/parser_test.go @@ -281,10 +281,10 @@ hello,80,test_name2` expectedFields := map[string]interface{}{ "line2": int64(80), - "line3": "test_name2", } metrics, err := p.Parse([]byte(testCSV)) require.NoError(t, err) + require.Equal(t, "test_name2", metrics[0].Name()) require.Equal(t, expectedFields, metrics[0].Fields()) } @@ -364,7 +364,64 @@ func TestTimestampUnixFloatPrecision(t *testing.T) { map[string]string{}, map[string]interface{}{ "value": 42, - "time": 1551129661.954561233, + }, + time.Unix(1551129661, 954561233), + ), + } + + metrics, err := p.Parse([]byte(data)) + require.NoError(t, err) + testutil.RequireMetricsEqual(t, expected, metrics) +} + +func TestSkipMeasurementColumn(t *testing.T) { + p := Parser{ + MetricName: "csv", + HeaderRowCount: 1, + TimestampColumn: "timestamp", + TimestampFormat: "unix", + TimeFunc: DefaultTime, + TrimSpace: true, + } + data := `id,value,timestamp + 1,5,1551129661.954561233` + + expected := []telegraf.Metric{ + testutil.MustMetric( + "csv", + map[string]string{}, + map[string]interface{}{ + "id": 1, + "value": 5, + }, + time.Unix(1551129661, 954561233), + ), + } + + metrics, err := p.Parse([]byte(data)) + require.NoError(t, err) + testutil.RequireMetricsEqual(t, expected, metrics) +} + +func TestSkipTimestampColumn(t *testing.T) { + p := Parser{ + MetricName: "csv", + HeaderRowCount: 1, + TimestampColumn: "timestamp", + TimestampFormat: "unix", + TimeFunc: DefaultTime, + TrimSpace: true, + } + data := `id,value,timestamp + 1,5,1551129661.954561233` + + expected := []telegraf.Metric{ + testutil.MustMetric( + "csv", + map[string]string{}, + map[string]interface{}{ + "id": 1, + "value": 5, }, time.Unix(1551129661, 954561233), ), From bdbf57576a188645f20f716c6ca7052d20382421 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 26 May 2020 14:18:53 -0700 Subject: [PATCH 1791/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 557e21597..83c3206b8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -51,6 +51,7 @@ - [#7446](https://github.com/influxdata/telegraf/issues/7446): Fix gzip support in socket_listener with tcp sockets. - [#7390](https://github.com/influxdata/telegraf/issues/7390): Fix interval drift when round_interval is set in agent. - [#7524](https://github.com/influxdata/telegraf/pull/7524): Fix typo in total_elapsed_time_ms field of sqlserver input. +- [#7203](https://github.com/influxdata/telegraf/issues/7203): Exclude csv_timestamp_column and csv_measurement_column from fields. ## v1.14.4 [unreleased] From d27f6760916bd1f77ecad4c31c2ead956005f71b Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 26 May 2020 15:40:25 -0700 Subject: [PATCH 1792/1815] Update to Go 1.14.3 with testing using 1.13.11 (#7564) --- .circleci/config.yml | 82 +++++++++---------- README.md | 4 +- appveyor.yml | 6 +- docs/TLS.md | 44 ++++------ internal/tls/common.go | 4 + internal/tls/common_go112.go | 12 --- plugins/inputs/apcupsd/apcupsd_test.go | 2 - plugins/inputs/clickhouse/clickhouse.go | 4 + .../inputs/clickhouse/clickhouse_go1.11.go | 6 -- .../inputs/clickhouse/clickhouse_go1.12.go | 8 -- scripts/ci-1.12.docker | 28 ------- scripts/ci-1.13.docker | 2 +- scripts/{ci-1.9.docker => ci-1.14.docker} | 7 +- 13 files changed, 74 insertions(+), 135 deletions(-) delete mode 100644 internal/tls/common_go112.go delete mode 100644 plugins/inputs/clickhouse/clickhouse_go1.11.go delete mode 100644 plugins/inputs/clickhouse/clickhouse_go1.12.go delete mode 100644 scripts/ci-1.12.docker rename scripts/{ci-1.9.docker => ci-1.14.docker} (66%) diff --git a/.circleci/config.yml b/.circleci/config.yml index 844ed294b..fb9159ff3 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -4,14 +4,12 @@ defaults: working_directory: '/go/src/github.com/influxdata/telegraf' environment: GOFLAGS: -p=8 - go-1_12: &go-1_12 - docker: - - image: 'quay.io/influxdb/telegraf-ci:1.12.17' - environment: - GO111MODULE: 'on' go-1_13: &go-1_13 docker: - - image: 'quay.io/influxdb/telegraf-ci:1.13.8' + - image: 'quay.io/influxdb/telegraf-ci:1.13.11' + go-1_14: &go-1_14 + docker: + - image: 'quay.io/influxdb/telegraf-ci:1.14.3' mac: &mac macos: xcode: 11.3.1 @@ -23,7 +21,7 @@ defaults: version: 2 jobs: deps: - <<: [ *defaults, *go-1_13 ] + <<: [ *defaults, *go-1_14 ] steps: - checkout - restore_cache: @@ -64,22 +62,22 @@ jobs: - 'usr/local/bin/gofmt' - 'Users/distiller/go' - test-go-1.12: - <<: [ *defaults, *go-1_12 ] + test-go-1.13: + <<: [ *defaults, *go-1_13 ] steps: - attach_workspace: at: '/go' - run: 'make' - run: 'make test' - test-go-1.12-386: - <<: [ *defaults, *go-1_12 ] + test-go-1.13-386: + <<: [ *defaults, *go-1_13 ] steps: - attach_workspace: at: '/go' - run: 'GOARCH=386 make' - run: 'GOARCH=386 make test' - test-go-1.13: - <<: [ *defaults, *go-1_13 ] + test-go-1.14: + <<: [ *defaults, *go-1_14 ] steps: - attach_workspace: at: '/go' @@ -87,8 +85,8 @@ jobs: - run: 'make check' - run: 'make check-deps' - run: 'make test' - test-go-1.13-386: - <<: [ *defaults, *go-1_13 ] + test-go-1.14-386: + <<: [ *defaults, *go-1_14 ] steps: - attach_workspace: at: '/go' @@ -105,7 +103,7 @@ jobs: - run: 'make test' package: - <<: [ *defaults, *go-1_13 ] + <<: [ *defaults, *go-1_14 ] steps: - attach_workspace: at: '/go' @@ -114,7 +112,7 @@ jobs: path: './build' destination: 'build' release: - <<: [ *defaults, *go-1_13 ] + <<: [ *defaults, *go-1_14 ] steps: - attach_workspace: at: '/go' @@ -123,7 +121,7 @@ jobs: path: './build' destination: 'build' nightly: - <<: [ *defaults, *go-1_13 ] + <<: [ *defaults, *go-1_14 ] steps: - attach_workspace: at: '/go' @@ -144,18 +142,6 @@ workflows: filters: tags: only: /.*/ - - 'test-go-1.12': - requires: - - 'deps' - filters: - tags: - only: /.*/ - - 'test-go-1.12-386': - requires: - - 'deps' - filters: - tags: - only: /.*/ - 'test-go-1.13': requires: - 'deps' @@ -168,6 +154,18 @@ workflows: filters: tags: only: /.*/ + - 'test-go-1.14': + requires: + - 'deps' + filters: + tags: + only: /.*/ + - 'test-go-1.14-386': + requires: + - 'deps' + filters: + tags: + only: /.*/ - 'test-go-1.13-darwin': requires: - 'macdeps' @@ -176,16 +174,16 @@ workflows: only: /.*/ - 'package': requires: - - 'test-go-1.12' - - 'test-go-1.12-386' - 'test-go-1.13' - 'test-go-1.13-386' + - 'test-go-1.14' + - 'test-go-1.14-386' - 'release': requires: - - 'test-go-1.12' - - 'test-go-1.12-386' - 'test-go-1.13' - 'test-go-1.13-386' + - 'test-go-1.14' + - 'test-go-1.14-386' filters: tags: only: /.*/ @@ -194,24 +192,24 @@ workflows: nightly: jobs: - 'deps' - - 'test-go-1.12': - requires: - - 'deps' - - 'test-go-1.12-386': - requires: - - 'deps' - 'test-go-1.13': requires: - 'deps' - 'test-go-1.13-386': requires: - 'deps' + - 'test-go-1.14': + requires: + - 'deps' + - 'test-go-1.14-386': + requires: + - 'deps' - 'nightly': requires: - - 'test-go-1.12' - - 'test-go-1.12-386' - 'test-go-1.13' - 'test-go-1.13-386' + - 'test-go-1.14' + - 'test-go-1.14-386' triggers: - schedule: cron: "0 7 * * *" diff --git a/README.md b/README.md index 3391a038e..c749811de 100644 --- a/README.md +++ b/README.md @@ -51,9 +51,9 @@ Ansible role: https://github.com/rossmcdonald/telegraf ### From Source: -Telegraf requires Go version 1.12 or newer, the Makefile requires GNU make. +Telegraf requires Go version 1.13 or newer, the Makefile requires GNU make. -1. [Install Go](https://golang.org/doc/install) >=1.12 (1.13 recommended) +1. [Install Go](https://golang.org/doc/install) >=1.13 (1.14 recommended) 2. Clone the Telegraf repository: ``` cd ~/src diff --git a/appveyor.yml b/appveyor.yml index bff7dc0cb..b454c8dc8 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -1,5 +1,7 @@ version: "{build}" +image: Visual Studio 2019 + cache: - C:\gopath\pkg\mod -> go.sum - C:\ProgramData\chocolatey\bin -> appveyor.yml @@ -8,13 +10,13 @@ cache: clone_folder: C:\gopath\src\github.com\influxdata\telegraf environment: - GOVERSION: 1.13.8 GOPATH: C:\gopath +stack: go 1.14 + platform: x64 install: - - choco install golang --version "%GOVERSION%" - choco install make - cd "%GOPATH%\src\github.com\influxdata\telegraf" - git config --system core.longpaths true diff --git a/docs/TLS.md b/docs/TLS.md index 363b0d968..3cd6a1025 100644 --- a/docs/TLS.md +++ b/docs/TLS.md @@ -20,9 +20,23 @@ For client TLS support we have the following options: # insecure_skip_verify = false ``` +### Server Configuration + +The server TLS configuration provides support for TLS mutual authentication: + +```toml +## Set one or more allowed client CA certificate file names to +## enable mutually authenticated TLS connections. +# tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] + +## Add service certificate and key. +# tls_cert = "/etc/telegraf/cert.pem" +# tls_key = "/etc/telegraf/key.pem" +``` + #### Advanced Configuration -For plugins using the standard client configuration you can also set several +For plugins using the standard server configuration you can also set several advanced settings. These options are not included in the sample configuration for the interest of brevity. @@ -47,14 +61,14 @@ for the interest of brevity. ## "TLS_RSA_WITH_AES_128_CBC_SHA256", ## "TLS_RSA_WITH_AES_128_CBC_SHA", ## "TLS_RSA_WITH_AES_256_CBC_SHA" -# ] +## ] # tls_cipher_suites = [] ## Minimum TLS version that is acceptable. # tls_min_version = "TLS10" ## Maximum SSL/TLS version that is acceptable. -# tls_max_version = "TLS12" +# tls_max_version = "TLS13" ``` Cipher suites for use with `tls_cipher_suites`: @@ -80,8 +94,6 @@ Cipher suites for use with `tls_cipher_suites`: - `TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384` - `TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305` - `TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305` - -TLS 1.3 cipher suites require Telegraf 1.12 and Go 1.12 or later: - `TLS_AES_128_GCM_SHA256` - `TLS_AES_256_GCM_SHA384` - `TLS_CHACHA20_POLY1305_SHA256` @@ -90,24 +102,4 @@ TLS versions for use with `tls_min_version` or `tls_max_version`: - `TLS10` - `TLS11` - `TLS12` -- `TLS13` (Telegraf 1.12 and Go 1.12 required, must enable TLS 1.3 using environment variables) - -### TLS 1.3 - -TLS 1.3 is available only on an opt-in basis in Go 1.12. To enable it, set the -GODEBUG environment variable (comma-separated key=value options) such that it -includes "tls13=1". - -### Server Configuration - -The server TLS configuration provides support for TLS mutual authentication: - -```toml -## Set one or more allowed client CA certificate file names to -## enable mutually authenticated TLS connections. -# tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] - -## Add service certificate and key. -# tls_cert = "/etc/telegraf/cert.pem" -# tls_key = "/etc/telegraf/key.pem" -``` +- `TLS13` diff --git a/internal/tls/common.go b/internal/tls/common.go index 3100a73a1..1ceb20c3f 100644 --- a/internal/tls/common.go +++ b/internal/tls/common.go @@ -6,6 +6,7 @@ var tlsVersionMap = map[string]uint16{ "TLS10": tls.VersionTLS10, "TLS11": tls.VersionTLS11, "TLS12": tls.VersionTLS12, + "TLS13": tls.VersionTLS13, } var tlsCipherMap = map[string]uint16{ @@ -31,4 +32,7 @@ var tlsCipherMap = map[string]uint16{ "TLS_RSA_WITH_RC4_128_SHA": tls.TLS_RSA_WITH_RC4_128_SHA, "TLS_ECDHE_RSA_WITH_RC4_128_SHA": tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA, "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA": tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, + "TLS_AES_128_GCM_SHA256": tls.TLS_AES_128_GCM_SHA256, + "TLS_AES_256_GCM_SHA384": tls.TLS_AES_256_GCM_SHA384, + "TLS_CHACHA20_POLY1305_SHA256": tls.TLS_CHACHA20_POLY1305_SHA256, } diff --git a/internal/tls/common_go112.go b/internal/tls/common_go112.go deleted file mode 100644 index 988d6f936..000000000 --- a/internal/tls/common_go112.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build go1.12 - -package tls - -import "crypto/tls" - -func init() { - tlsVersionMap["TLS13"] = tls.VersionTLS13 - tlsCipherMap["TLS_AES_128_GCM_SHA256"] = tls.TLS_AES_128_GCM_SHA256 - tlsCipherMap["TLS_AES_256_GCM_SHA384"] = tls.TLS_AES_256_GCM_SHA384 - tlsCipherMap["TLS_CHACHA20_POLY1305_SHA256"] = tls.TLS_CHACHA20_POLY1305_SHA256 -} diff --git a/plugins/inputs/apcupsd/apcupsd_test.go b/plugins/inputs/apcupsd/apcupsd_test.go index dfad765b3..e749d5137 100644 --- a/plugins/inputs/apcupsd/apcupsd_test.go +++ b/plugins/inputs/apcupsd/apcupsd_test.go @@ -1,5 +1,3 @@ -// +build go1.11 - package apcupsd import ( diff --git a/plugins/inputs/clickhouse/clickhouse.go b/plugins/inputs/clickhouse/clickhouse.go index cf28def66..4336444eb 100644 --- a/plugins/inputs/clickhouse/clickhouse.go +++ b/plugins/inputs/clickhouse/clickhouse.go @@ -205,6 +205,10 @@ func (ch *ClickHouse) Gather(acc telegraf.Accumulator) (err error) { return nil } +func (ch *ClickHouse) Stop() { + ch.client.CloseIdleConnections() +} + func (ch *ClickHouse) clusterIncludeExcludeFilter() string { if len(ch.ClusterInclude) == 0 && len(ch.ClusterExclude) == 0 { return "" diff --git a/plugins/inputs/clickhouse/clickhouse_go1.11.go b/plugins/inputs/clickhouse/clickhouse_go1.11.go deleted file mode 100644 index e043dd492..000000000 --- a/plugins/inputs/clickhouse/clickhouse_go1.11.go +++ /dev/null @@ -1,6 +0,0 @@ -// +build !go1.12 - -package clickhouse - -// Stop ClickHouse input service -func (ch *ClickHouse) Stop() {} diff --git a/plugins/inputs/clickhouse/clickhouse_go1.12.go b/plugins/inputs/clickhouse/clickhouse_go1.12.go deleted file mode 100644 index 86bb69e2b..000000000 --- a/plugins/inputs/clickhouse/clickhouse_go1.12.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build go1.12 - -package clickhouse - -// Stop ClickHouse input service -func (ch *ClickHouse) Stop() { - ch.client.CloseIdleConnections() -} diff --git a/scripts/ci-1.12.docker b/scripts/ci-1.12.docker deleted file mode 100644 index f3f59349a..000000000 --- a/scripts/ci-1.12.docker +++ /dev/null @@ -1,28 +0,0 @@ -FROM golang:1.12.17 - -RUN chmod -R 755 "$GOPATH" - -RUN DEBIAN_FRONTEND=noninteractive \ - apt update && apt install -y --no-install-recommends \ - autoconf \ - git \ - libtool \ - locales \ - make \ - python-boto \ - rpm \ - ruby \ - ruby-dev \ - zip && \ - rm -rf /var/lib/apt/lists/* - -RUN ln -sf /usr/share/zoneinfo/Etc/UTC /etc/localtime -RUN locale-gen C.UTF-8 || true -ENV LANG=C.UTF-8 - -RUN gem install fpm - -RUN go get -d github.com/golang/dep && \ - cd src/github.com/golang/dep && \ - git checkout -q v0.5.0 && \ - go install -ldflags="-X main.version=v0.5.0" ./cmd/dep diff --git a/scripts/ci-1.13.docker b/scripts/ci-1.13.docker index 9ee601ee1..0f312bc21 100644 --- a/scripts/ci-1.13.docker +++ b/scripts/ci-1.13.docker @@ -1,4 +1,4 @@ -FROM golang:1.13.8 +FROM golang:1.13.11 RUN chmod -R 755 "$GOPATH" diff --git a/scripts/ci-1.9.docker b/scripts/ci-1.14.docker similarity index 66% rename from scripts/ci-1.9.docker rename to scripts/ci-1.14.docker index 0a931c817..64bf18d9e 100644 --- a/scripts/ci-1.9.docker +++ b/scripts/ci-1.14.docker @@ -1,4 +1,4 @@ -FROM golang:1.9.7 +FROM golang:1.14.3 RUN chmod -R 755 "$GOPATH" @@ -21,8 +21,3 @@ RUN locale-gen C.UTF-8 || true ENV LANG=C.UTF-8 RUN gem install fpm - -RUN go get -d github.com/golang/dep && \ - cd src/github.com/golang/dep && \ - git checkout -q v0.5.0 && \ - go install -ldflags="-X main.version=v0.5.0" ./cmd/dep From 40fdafe1937d464a59a9f8db6189d922d9076fc9 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 26 May 2020 15:49:36 -0700 Subject: [PATCH 1793/1815] Update changelog --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 83c3206b8..76c71c0d2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,8 @@ `splunkmetric` serializer, if you are making use of these fields they can be added back with the `tag` option. +- Official packages now built with Go 1.14.3. + #### New Processors - [defaults](/plugins/processors/defaults/README.md) - Contributed by @jregistr From 7ef1d5355181d6f911cf644c84b924abb0c78716 Mon Sep 17 00:00:00 2001 From: Nicolas Filotto Date: Wed, 27 May 2020 00:52:13 +0200 Subject: [PATCH 1794/1815] Allow collection of HTTP Headers in http_response input (#7405) --- plugins/inputs/http_response/README.md | 5 ++ plugins/inputs/http_response/http_response.go | 14 ++++ .../http_response/http_response_test.go | 71 +++++++++++++++++++ 3 files changed, 90 insertions(+) diff --git a/plugins/inputs/http_response/README.md b/plugins/inputs/http_response/README.md index 2307461ca..f1d1ab2d5 100644 --- a/plugins/inputs/http_response/README.md +++ b/plugins/inputs/http_response/README.md @@ -47,6 +47,11 @@ This input plugin checks HTTP/HTTPS connections. # [inputs.http_response.headers] # Host = "github.com" + ## Optional setting to map reponse http headers into tags + ## If the http header is not present on the request, no corresponding tag will be added + ## If multiple instances of the http header are present, only the first value will be used + # http_header_tags = {"HTTP_HEADER" = "TAG_NAME"} + ## Interface to use when dialing an address # interface = "eth0" ``` diff --git a/plugins/inputs/http_response/http_response.go b/plugins/inputs/http_response/http_response.go index a6e1d74b5..8382d28ae 100644 --- a/plugins/inputs/http_response/http_response.go +++ b/plugins/inputs/http_response/http_response.go @@ -27,6 +27,7 @@ type HTTPResponse struct { Body string Method string ResponseTimeout internal.Duration + HTTPHeaderTags map[string]string `toml:"http_header_tags"` Headers map[string]string FollowRedirects bool // Absolute path to file with Bearer token @@ -98,6 +99,11 @@ var sampleConfig = ` # [inputs.http_response.headers] # Host = "github.com" + ## Optional setting to map reponse http headers into tags + ## If the http header is not present on the request, no corresponding tag will be added + ## If multiple instances of the http header are present, only the first value will be used + # http_header_tags = {"HTTP_HEADER" = "TAG_NAME"} + ## Interface to use when dialing an address # interface = "eth0" ` @@ -265,6 +271,14 @@ func (h *HTTPResponse) httpGather(u string) (map[string]interface{}, map[string] resp, err := h.client.Do(request) response_time := time.Since(start).Seconds() + // Add the response headers + for headerName, tag := range h.HTTPHeaderTags { + headerValues, foundHeader := resp.Header[headerName] + if foundHeader && len(headerValues) > 0 { + tags[tag] = headerValues[0] + } + } + // If an error in returned, it means we are dealing with a network error, as // HTTP error codes do not generate errors in the net/http library if err != nil { diff --git a/plugins/inputs/http_response/http_response_test.go b/plugins/inputs/http_response/http_response_test.go index ac483127d..4a92f805c 100644 --- a/plugins/inputs/http_response/http_response_test.go +++ b/plugins/inputs/http_response/http_response_test.go @@ -86,6 +86,11 @@ func setUpTestMux() http.Handler { http.Redirect(w, req, "/good", http.StatusMovedPermanently) }) mux.HandleFunc("/good", func(w http.ResponseWriter, req *http.Request) { + w.Header().Set("Server", "MyTestServer") + w.Header().Set("Content-Type", "application/json; charset=utf-8") + fmt.Fprintf(w, "hit the good page!") + }) + mux.HandleFunc("/noheader", func(w http.ResponseWriter, req *http.Request) { fmt.Fprintf(w, "hit the good page!") }) mux.HandleFunc("/jsonresponse", func(w http.ResponseWriter, req *http.Request) { @@ -218,6 +223,72 @@ func TestFields(t *testing.T) { checkOutput(t, &acc, expectedFields, expectedTags, absentFields, nil) } +func TestHTTPHeaderTags(t *testing.T) { + mux := setUpTestMux() + ts := httptest.NewServer(mux) + defer ts.Close() + + h := &HTTPResponse{ + Log: testutil.Logger{}, + Address: ts.URL + "/good", + Body: "{ 'test': 'data'}", + Method: "GET", + ResponseTimeout: internal.Duration{Duration: time.Second * 20}, + HTTPHeaderTags: map[string]string{"Server": "my_server", "Content-Type": "content_type"}, + Headers: map[string]string{ + "Content-Type": "application/json", + }, + FollowRedirects: true, + } + + var acc testutil.Accumulator + err := h.Gather(&acc) + require.NoError(t, err) + + expectedFields := map[string]interface{}{ + "http_response_code": http.StatusOK, + "result_type": "success", + "result_code": 0, + "response_time": nil, + "content_length": nil, + } + expectedTags := map[string]interface{}{ + "server": nil, + "method": "GET", + "status_code": "200", + "result": "success", + "my_server": "MyTestServer", + "content_type": "application/json; charset=utf-8", + } + absentFields := []string{"response_string_match"} + checkOutput(t, &acc, expectedFields, expectedTags, absentFields, nil) + + h = &HTTPResponse{ + Log: testutil.Logger{}, + Address: ts.URL + "/noheader", + Body: "{ 'test': 'data'}", + Method: "GET", + ResponseTimeout: internal.Duration{Duration: time.Second * 20}, + HTTPHeaderTags: map[string]string{"Server": "my_server", "Content-Type": "content_type"}, + Headers: map[string]string{ + "Content-Type": "application/json", + }, + FollowRedirects: true, + } + + acc = testutil.Accumulator{} + err = h.Gather(&acc) + require.NoError(t, err) + + expectedTags = map[string]interface{}{ + "server": nil, + "method": "GET", + "status_code": "200", + "result": "success", + } + checkOutput(t, &acc, expectedFields, expectedTags, absentFields, nil) +} + func findInterface() (net.Interface, error) { potential, _ := net.Interfaces() From 7b33ef011a796f2998b78fcdd436931a10d2f0da Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 26 May 2020 15:52:53 -0700 Subject: [PATCH 1795/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 76c71c0d2..74d092c40 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -44,6 +44,7 @@ - [#7489](https://github.com/influxdata/telegraf/pull/7489): Add cluster state integer to mongodb input. - [#7515](https://github.com/influxdata/telegraf/pull/7515): Add option to disable mongodb cluster status. - [#7319](https://github.com/influxdata/telegraf/pull/7319): Add support for battery level monitoring to the fibaro input. +- [#7405](https://github.com/influxdata/telegraf/pull/7405): Allow collection of HTTP Headers in http_response input. #### Bugfixes From 580ac61cf754b960499d83edbc1a73cdd143a097 Mon Sep 17 00:00:00 2001 From: hsinghkalsi <41585091+hsinghkalsi@users.noreply.github.com> Date: Wed, 27 May 2020 14:24:49 -0400 Subject: [PATCH 1796/1815] Add newrelic output plugin (#7019) --- README.md | 1 + go.mod | 1 + go.sum | 2 + plugins/outputs/all/all.go | 1 + plugins/outputs/newrelic/README.md | 22 +++ plugins/outputs/newrelic/newrelic.go | 159 +++++++++++++++++++ plugins/outputs/newrelic/newrelic_test.go | 180 ++++++++++++++++++++++ 7 files changed, 366 insertions(+) create mode 100644 plugins/outputs/newrelic/README.md create mode 100644 plugins/outputs/newrelic/newrelic.go create mode 100644 plugins/outputs/newrelic/newrelic_test.go diff --git a/README.md b/README.md index c749811de..32ed21edb 100644 --- a/README.md +++ b/README.md @@ -414,6 +414,7 @@ For documentation on the latest development code see the [documentation index][d * [librato](./plugins/outputs/librato) * [mqtt](./plugins/outputs/mqtt) * [nats](./plugins/outputs/nats) +* [newrelic](./plugins/outputs/newrelic) * [nsq](./plugins/outputs/nsq) * [opentsdb](./plugins/outputs/opentsdb) * [prometheus](./plugins/outputs/prometheus_client) diff --git a/go.mod b/go.mod index 29427b02f..ff764b4d9 100644 --- a/go.mod +++ b/go.mod @@ -95,6 +95,7 @@ require ( github.com/naoina/go-stringutil v0.1.0 // indirect github.com/nats-io/nats-server/v2 v2.1.4 github.com/nats-io/nats.go v1.9.1 + github.com/newrelic/newrelic-telemetry-sdk-go v0.2.0 github.com/nsqio/go-nsq v1.0.7 github.com/openconfig/gnmi v0.0.0-20180912164834-33a1865c3029 github.com/opencontainers/go-digest v1.0.0-rc1 // indirect diff --git a/go.sum b/go.sum index 76862fcfe..aca1b99a8 100644 --- a/go.sum +++ b/go.sum @@ -445,6 +445,8 @@ github.com/nats-io/nkeys v0.1.3 h1:6JrEfig+HzTH85yxzhSVbjHRJv9cn0p6n3IngIcM5/k= github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= +github.com/newrelic/newrelic-telemetry-sdk-go v0.2.0 h1:W8+lNIfAldCScGiikToSprbf3DCaMXk0VIM9l73BIpY= +github.com/newrelic/newrelic-telemetry-sdk-go v0.2.0/go.mod h1:G9MqE/cHGv3Hx3qpYhfuyFUsGx2DpVcGi1iJIqTg+JQ= github.com/nsqio/go-nsq v1.0.7 h1:O0pIZJYTf+x7cZBA0UMY8WxFG79lYTURmWzAAh48ljY= github.com/nsqio/go-nsq v1.0.7/go.mod h1:XP5zaUs3pqf+Q71EqUJs3HYfBIqfK6G83WQMdNN+Ito= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= diff --git a/plugins/outputs/all/all.go b/plugins/outputs/all/all.go index 35e0393de..7d37c2208 100644 --- a/plugins/outputs/all/all.go +++ b/plugins/outputs/all/all.go @@ -25,6 +25,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/outputs/librato" _ "github.com/influxdata/telegraf/plugins/outputs/mqtt" _ "github.com/influxdata/telegraf/plugins/outputs/nats" + _ "github.com/influxdata/telegraf/plugins/outputs/newrelic" _ "github.com/influxdata/telegraf/plugins/outputs/nsq" _ "github.com/influxdata/telegraf/plugins/outputs/opentsdb" _ "github.com/influxdata/telegraf/plugins/outputs/prometheus_client" diff --git a/plugins/outputs/newrelic/README.md b/plugins/outputs/newrelic/README.md new file mode 100644 index 000000000..323595711 --- /dev/null +++ b/plugins/outputs/newrelic/README.md @@ -0,0 +1,22 @@ +#New Relic output plugin + +This plugins writes to New Relic insights. + +``` +[[outputs.newrelic]] +## New Relic Insights API key +insights_key = "insights api key" + +# metric_prefix if defined, prefix's metrics name for easy identification +# metric_prefix = "" + +# harvest timeout, default is 15 seconds +# timeout = "15s" +``` +####Parameters + +|Parameter Name|Type|Description| +|:-|:-|:-| +| insights_key | Required | Insights API Insert key | +| metric_prefix | Optional | If defined, prefix's metrics name for easy identification | +| timeout | Optional | If defined, changes harvest timeout | diff --git a/plugins/outputs/newrelic/newrelic.go b/plugins/outputs/newrelic/newrelic.go new file mode 100644 index 000000000..07e2569c3 --- /dev/null +++ b/plugins/outputs/newrelic/newrelic.go @@ -0,0 +1,159 @@ +package newrelic + +// newrelic.go +import ( + "context" + "fmt" + "net/http" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/outputs" + "github.com/newrelic/newrelic-telemetry-sdk-go/cumulative" + "github.com/newrelic/newrelic-telemetry-sdk-go/telemetry" +) + +// NewRelic nr structure +type NewRelic struct { + harvestor *telemetry.Harvester + dc *cumulative.DeltaCalculator + InsightsKey string `toml:"insights_key"` + MetricPrefix string `toml:"metric_prefix"` + Timeout internal.Duration `toml:"timeout"` + savedErrors map[int]interface{} + errorCount int + Client http.Client +} + +// Description returns a one-sentence description on the Output +func (nr *NewRelic) Description() string { + return "Send metrics to New Relic metrics endpoint" +} + +// SampleConfig : return default configuration of the Output +func (nr *NewRelic) SampleConfig() string { + return ` + ## New Relic Insights API key (required) + insights_key = "insights api key" + + # metric_prefix if defined, prefix's metrics name for easy identification (optional) + # metric_prefix = "" + + # harvest timeout, default is 15 seconds + # timeout = "15s" +` +} + +// Connect to the Output +func (nr *NewRelic) Connect() error { + if nr.InsightsKey == "" { + return fmt.Errorf("InsightKey is a required for newrelic") + } + var err error + nr.harvestor, err = telemetry.NewHarvester(telemetry.ConfigAPIKey(nr.InsightsKey), + telemetry.ConfigHarvestPeriod(0), + func(cfg *telemetry.Config) { + cfg.Product = "NewRelic-Telegraf-Plugin" + cfg.ProductVersion = "1.0" + cfg.HarvestTimeout = nr.Timeout.Duration + cfg.Client = &nr.Client + cfg.ErrorLogger = func(e map[string]interface{}) { + var errorString string + for k, v := range e { + errorString += fmt.Sprintf("%s = %s ", k, v) + } + nr.errorCount++ + nr.savedErrors[nr.errorCount] = errorString + } + }) + if err != nil { + return fmt.Errorf("unable to connect to newrelic %v", err) + } + + nr.dc = cumulative.NewDeltaCalculator() + return nil +} + +// Close any connections to the Output +func (nr *NewRelic) Close() error { + + nr.errorCount = 0 + nr.Client.CloseIdleConnections() + return nil +} + +// Write takes in group of points to be written to the Output +func (nr *NewRelic) Write(metrics []telegraf.Metric) error { + + nr.errorCount = 0 + nr.savedErrors = make(map[int]interface{}) + + for _, metric := range metrics { + // create tag map + tags := make(map[string]interface{}) + for _, tag := range metric.TagList() { + tags[tag.Key] = tag.Value + } + for _, field := range metric.FieldList() { + var mvalue float64 + var mname string + if nr.MetricPrefix != "" { + mname = nr.MetricPrefix + "." + metric.Name() + "." + field.Key + } else { + mname = metric.Name() + "." + field.Key + } + switch n := field.Value.(type) { + case int64: + mvalue = float64(n) + case uint64: + mvalue = float64(n) + case float64: + mvalue = float64(n) + case bool: + mvalue = float64(0) + if n { + mvalue = float64(1) + } + case string: + // Do not log everytime we encounter string + // we just skip + continue + default: + return fmt.Errorf("Undefined field type: %T", field.Value) + } + + switch metric.Type() { + case telegraf.Counter: + if counter, ok := nr.dc.CountMetric(mname, tags, mvalue, metric.Time()); ok { + nr.harvestor.RecordMetric(counter) + } + default: + nr.harvestor.RecordMetric(telemetry.Gauge{ + Timestamp: metric.Time(), + Value: mvalue, + Name: mname, + Attributes: tags}) + } + } + } + // By default, the Harvester sends metrics and spans to the New Relic + // backend every 5 seconds. You can force data to be sent at any time + // using HarvestNow. + nr.harvestor.HarvestNow(context.Background()) + + //Check if we encountered errors + if nr.errorCount != 0 { + return fmt.Errorf("unable to harvest metrics %s ", nr.savedErrors[nr.errorCount]) + } + return nil +} + +func init() { + outputs.Add("newrelic", func() telegraf.Output { + return &NewRelic{ + Timeout: internal.Duration{Duration: time.Second * 15}, + Client: http.Client{}, + } + }) +} diff --git a/plugins/outputs/newrelic/newrelic_test.go b/plugins/outputs/newrelic/newrelic_test.go new file mode 100644 index 000000000..aa23950c7 --- /dev/null +++ b/plugins/outputs/newrelic/newrelic_test.go @@ -0,0 +1,180 @@ +package newrelic + +import ( + "math" + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/testutil" + "github.com/newrelic/newrelic-telemetry-sdk-go/telemetry" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestBasic(t *testing.T) { + nr := &NewRelic{ + MetricPrefix: "Test", + InsightsKey: "12345", + Timeout: internal.Duration{Duration: time.Second * 5}, + } + if testing.Short() { + t.Skip("skipping test in short mode.") + } + + err := nr.Connect() + require.NoError(t, err) + + err = nr.Write(testutil.MockMetrics()) + assert.Contains(t, err.Error(), "unable to harvest metrics") +} + +func TestNewRelic_Write(t *testing.T) { + type args struct { + metrics []telegraf.Metric + } + tests := []struct { + name string + metrics []telegraf.Metric + auditMessage string + wantErr bool + }{ + { + name: "Test: Basic mock metric write", + metrics: testutil.MockMetrics(), + wantErr: false, + auditMessage: `"metrics":[{"name":"test1.value","type":"gauge","value":1,"timestamp":1257894000000,"attributes":{"tag1":"value1"}}]`, + }, + { + name: "Test: Test string ", + metrics: []telegraf.Metric{ + testutil.TestMetric("value1", "test_String"), + }, + wantErr: false, + auditMessage: "", + }, + { + name: "Test: Test int64 ", + metrics: []telegraf.Metric{ + testutil.TestMetric(int64(15), "test_int64"), + }, + wantErr: false, + auditMessage: `"metrics":[{"name":"test_int64.value","type":"gauge","value":15,"timestamp":1257894000000,"attributes":{"tag1":"value1"}}]`, + }, + { + name: "Test: Test uint64 ", + metrics: []telegraf.Metric{ + testutil.TestMetric(uint64(20), "test_uint64"), + }, + wantErr: false, + auditMessage: `"metrics":[{"name":"test_uint64.value","type":"gauge","value":20,"timestamp":1257894000000,"attributes":{"tag1":"value1"}}]`, + }, + { + name: "Test: Test bool true ", + metrics: []telegraf.Metric{ + testutil.TestMetric(bool(true), "test_bool_true"), + }, + wantErr: false, + auditMessage: `"metrics":[{"name":"test_bool_true.value","type":"gauge","value":1,"timestamp":1257894000000,"attributes":{"tag1":"value1"}}]`, + }, + { + name: "Test: Test bool false ", + metrics: []telegraf.Metric{ + testutil.TestMetric(bool(false), "test_bool_false"), + }, + wantErr: false, + auditMessage: `"metrics":[{"name":"test_bool_false.value","type":"gauge","value":0,"timestamp":1257894000000,"attributes":{"tag1":"value1"}}]`, + }, + { + name: "Test: Test max float64 ", + metrics: []telegraf.Metric{ + testutil.TestMetric(math.MaxFloat64, "test_maxfloat64"), + }, + wantErr: false, + auditMessage: `"metrics":[{"name":"test_maxfloat64.value","type":"gauge","value":1.7976931348623157e+308,"timestamp":1257894000000,"attributes":{"tag1":"value1"}}]`, + }, + { + name: "Test: Test NAN ", + metrics: []telegraf.Metric{ + testutil.TestMetric(math.NaN, "test_NaN"), + }, + wantErr: false, + auditMessage: ``, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var auditLog map[string]interface{} + nr := &NewRelic{} + nr.harvestor, _ = telemetry.NewHarvester( + telemetry.ConfigHarvestPeriod(0), + func(cfg *telemetry.Config) { + cfg.APIKey = "dummyTestKey" + cfg.HarvestPeriod = 0 + cfg.HarvestTimeout = 0 + cfg.AuditLogger = func(e map[string]interface{}) { + auditLog = e + } + }) + err := nr.Write(tt.metrics) + assert.NoError(t, err) + if auditLog["data"] != nil { + assert.Contains(t, auditLog["data"], tt.auditMessage) + } else { + assert.Contains(t, "", tt.auditMessage) + } + + if (err != nil) != tt.wantErr { + t.Errorf("NewRelic.Write() error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} + +func TestNewRelic_Connect(t *testing.T) { + tests := []struct { + name string + newrelic *NewRelic + wantErr bool + }{ + { + name: "Test: No Insights key", + newrelic: &NewRelic{ + MetricPrefix: "prefix", + }, + wantErr: true, + }, + { + name: "Test: Insights key", + newrelic: &NewRelic{ + InsightsKey: "12312133", + MetricPrefix: "prefix", + }, + wantErr: false, + }, + { + name: "Test: Only Insights key", + newrelic: &NewRelic{ + InsightsKey: "12312133", + }, + wantErr: false, + }, + { + name: "Test: Insights key and Timeout", + newrelic: &NewRelic{ + InsightsKey: "12312133", + Timeout: internal.Duration{Duration: time.Second * 5}, + }, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + nr := tt.newrelic + if err := nr.Connect(); (err != nil) != tt.wantErr { + t.Errorf("NewRelic.Connect() error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} From 7b0662488543446e51bd2fc3d04d20bfb66f3782 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 27 May 2020 11:52:21 -0700 Subject: [PATCH 1797/1815] Update docs for newrelic output --- CHANGELOG.md | 4 ++++ docs/LICENSE_OF_DEPENDENCIES.md | 1 + plugins/outputs/newrelic/README.md | 29 ++++++++++++++-------------- plugins/outputs/newrelic/newrelic.go | 27 +++++++++++++------------- 4 files changed, 32 insertions(+), 29 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 74d092c40..04e602708 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,6 +16,10 @@ - [defaults](/plugins/processors/defaults/README.md) - Contributed by @jregistr - [filepath](/plugins/processors/filepath/README.md) - Contributed by @kir4h +#### New Outputs + +- [newrelic](/plugins/outputs/newrelic/README.md) - Contributed by @hsinghkalsi + #### Features - [#6905](https://github.com/influxdata/telegraf/pull/6905): Add commands stats to mongodb input plugin. diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index 4b811d8b7..9e19d74d7 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -107,6 +107,7 @@ following works: - github.com/nats-io/nats.go [Apache License 2.0](https://github.com/nats-io/nats.go/blob/master/LICENSE) - github.com/nats-io/nkeys [Apache License 2.0](https://github.com/nats-io/nkeys/blob/master/LICENSE) - github.com/nats-io/nuid [Apache License 2.0](https://github.com/nats-io/nuid/blob/master/LICENSE) +- github.com/newrelic/newrelic-telemetry-sdk-go [Apache License 2.0](https://github.com/newrelic/newrelic-telemetry-sdk-go/blob/master/LICENSE.md) - github.com/nsqio/go-nsq [MIT License](https://github.com/nsqio/go-nsq/blob/master/LICENSE) - github.com/openconfig/gnmi [Apache License 2.0](https://github.com/openconfig/gnmi/blob/master/LICENSE) - github.com/opencontainers/go-digest [Apache License 2.0](https://github.com/opencontainers/go-digest/blob/master/LICENSE) diff --git a/plugins/outputs/newrelic/README.md b/plugins/outputs/newrelic/README.md index 323595711..ae056ed2f 100644 --- a/plugins/outputs/newrelic/README.md +++ b/plugins/outputs/newrelic/README.md @@ -1,22 +1,21 @@ #New Relic output plugin - -This plugins writes to New Relic insights. -``` +This plugins writes to New Relic Insights using the [Metrics API][]. + +To use this plugin you must first obtain an [Insights API Key][]. + +### Configuration +```toml [[outputs.newrelic]] -## New Relic Insights API key -insights_key = "insights api key" + ## New Relic Insights API key + insights_key = "insights api key" -# metric_prefix if defined, prefix's metrics name for easy identification -# metric_prefix = "" + ## Prefix to add to add to metric name for easy identification. + # metric_prefix = "" -# harvest timeout, default is 15 seconds -# timeout = "15s" + ## Timeout for writes to the New Relic API. + # timeout = "15s" ``` -####Parameters -|Parameter Name|Type|Description| -|:-|:-|:-| -| insights_key | Required | Insights API Insert key | -| metric_prefix | Optional | If defined, prefix's metrics name for easy identification | -| timeout | Optional | If defined, changes harvest timeout | +[Metrics API]: https://docs.newrelic.com/docs/data-ingest-apis/get-data-new-relic/metric-api/introduction-metric-api +[Insights API Key]: https://docs.newrelic.com/docs/apis/get-started/intro-apis/types-new-relic-api-keys#user-api-key diff --git a/plugins/outputs/newrelic/newrelic.go b/plugins/outputs/newrelic/newrelic.go index 07e2569c3..da000c222 100644 --- a/plugins/outputs/newrelic/newrelic.go +++ b/plugins/outputs/newrelic/newrelic.go @@ -16,14 +16,15 @@ import ( // NewRelic nr structure type NewRelic struct { - harvestor *telemetry.Harvester - dc *cumulative.DeltaCalculator InsightsKey string `toml:"insights_key"` MetricPrefix string `toml:"metric_prefix"` Timeout internal.Duration `toml:"timeout"` - savedErrors map[int]interface{} - errorCount int - Client http.Client + + harvestor *telemetry.Harvester + dc *cumulative.DeltaCalculator + savedErrors map[int]interface{} + errorCount int + Client http.Client `toml:"-"` } // Description returns a one-sentence description on the Output @@ -34,14 +35,14 @@ func (nr *NewRelic) Description() string { // SampleConfig : return default configuration of the Output func (nr *NewRelic) SampleConfig() string { return ` - ## New Relic Insights API key (required) - insights_key = "insights api key" + ## New Relic Insights API key + insights_key = "insights api key" - # metric_prefix if defined, prefix's metrics name for easy identification (optional) - # metric_prefix = "" - - # harvest timeout, default is 15 seconds - # timeout = "15s" + ## Prefix to add to add to metric name for easy identification. + # metric_prefix = "" + + ## Timeout for writes to the New Relic API. + # timeout = "15s" ` } @@ -77,7 +78,6 @@ func (nr *NewRelic) Connect() error { // Close any connections to the Output func (nr *NewRelic) Close() error { - nr.errorCount = 0 nr.Client.CloseIdleConnections() return nil @@ -85,7 +85,6 @@ func (nr *NewRelic) Close() error { // Write takes in group of points to be written to the Output func (nr *NewRelic) Write(metrics []telegraf.Metric) error { - nr.errorCount = 0 nr.savedErrors = make(map[int]interface{}) From a438678d5bbcc124b065006c407fbd47fb5efb6d Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Wed, 27 May 2020 14:58:58 -0400 Subject: [PATCH 1798/1815] make sure parse error includes offending text (#7561) --- plugins/parsers/influx/parser.go | 15 ++++++++++++++- plugins/parsers/influx/parser_test.go | 4 ++-- 2 files changed, 16 insertions(+), 3 deletions(-) diff --git a/plugins/parsers/influx/parser.go b/plugins/parsers/influx/parser.go index 68db9128f..620104ac6 100644 --- a/plugins/parsers/influx/parser.go +++ b/plugins/parsers/influx/parser.go @@ -38,7 +38,20 @@ func (e *ParseError) Error() string { buffer = buffer[:eol] } if len(buffer) > maxErrorBufferSize { - buffer = buffer[:maxErrorBufferSize] + "..." + startEllipsis := true + offset := e.Offset - e.LineOffset + start := offset - maxErrorBufferSize + if start < 0 { + startEllipsis = false + start = 0 + } + // if we trimmed it the column won't line up. it'll always be the last character, + // because the parser doesn't continue past it, but point it out anyway so + // it's obvious where the issue is. + buffer = buffer[start:offset] + "<-- here" + if startEllipsis { + buffer = "..." + buffer + } } return fmt.Sprintf("metric parse error: %s at %d:%d: %q", e.msg, e.LineNumber, e.Column, buffer) } diff --git a/plugins/parsers/influx/parser_test.go b/plugins/parsers/influx/parser_test.go index 3104c1f3f..368ad277d 100644 --- a/plugins/parsers/influx/parser_test.go +++ b/plugins/parsers/influx/parser_test.go @@ -790,7 +790,7 @@ func TestParserErrorString(t *testing.T) { { name: "buffer too long", input: []byte("cpu " + strings.Repeat("ab", maxErrorBufferSize) + "=invalid\ncpu value=42"), - errString: "metric parse error: expected field at 1:2054: \"cpu " + strings.Repeat("ab", maxErrorBufferSize)[:maxErrorBufferSize-4] + "...\"", + errString: "metric parse error: expected field at 1:2054: \"...b" + strings.Repeat("ab", maxErrorBufferSize/2-1) + "=<-- here\"", }, { name: "multiple line error", @@ -834,7 +834,7 @@ func TestStreamParserErrorString(t *testing.T) { name: "buffer too long", input: []byte("cpu " + strings.Repeat("ab", maxErrorBufferSize) + "=invalid\ncpu value=42"), errs: []string{ - "metric parse error: expected field at 1:2054: \"cpu " + strings.Repeat("ab", maxErrorBufferSize)[:maxErrorBufferSize-4] + "...\"", + "metric parse error: expected field at 1:2054: \"...b" + strings.Repeat("ab", maxErrorBufferSize/2-1) + "=<-- here\"", }, }, { From 71b0b962412d0e82fe3ac9101812d53e6227d11d Mon Sep 17 00:00:00 2001 From: reimda Date: Wed, 27 May 2020 15:42:59 -0600 Subject: [PATCH 1799/1815] Add processor to look up service name by port (#7540) --- Makefile | 1 + plugins/processors/all/all.go | 1 + plugins/processors/port_name/README.md | 26 ++ plugins/processors/port_name/port_name.go | 174 ++++++++++++ .../processors/port_name/port_name_test.go | 261 ++++++++++++++++++ plugins/processors/port_name/services_path.go | 12 + .../port_name/services_path_notwindows.go | 7 + 7 files changed, 482 insertions(+) create mode 100644 plugins/processors/port_name/README.md create mode 100644 plugins/processors/port_name/port_name.go create mode 100644 plugins/processors/port_name/port_name_test.go create mode 100644 plugins/processors/port_name/services_path.go create mode 100644 plugins/processors/port_name/services_path_notwindows.go diff --git a/Makefile b/Makefile index e7889e89d..2b2e9668e 100644 --- a/Makefile +++ b/Makefile @@ -72,6 +72,7 @@ test-windows: go test -short ./plugins/inputs/win_services/... go test -short ./plugins/inputs/procstat/... go test -short ./plugins/inputs/ntpq/... + go test -short ./plugins/processors/port_name/... .PHONY: vet vet: diff --git a/plugins/processors/all/all.go b/plugins/processors/all/all.go index f917bf6a6..dbf8a12e5 100644 --- a/plugins/processors/all/all.go +++ b/plugins/processors/all/all.go @@ -11,6 +11,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/processors/override" _ "github.com/influxdata/telegraf/plugins/processors/parser" _ "github.com/influxdata/telegraf/plugins/processors/pivot" + _ "github.com/influxdata/telegraf/plugins/processors/port_name" _ "github.com/influxdata/telegraf/plugins/processors/printer" _ "github.com/influxdata/telegraf/plugins/processors/regex" _ "github.com/influxdata/telegraf/plugins/processors/rename" diff --git a/plugins/processors/port_name/README.md b/plugins/processors/port_name/README.md new file mode 100644 index 000000000..c078fe1c4 --- /dev/null +++ b/plugins/processors/port_name/README.md @@ -0,0 +1,26 @@ +# Port Name Lookup Processor Plugin + +Use the `port_name` processor to convert a tag containing a well-known port number to the registered service name. + +Tag can contain a number ("80") or number and protocol separated by slash ("443/tcp"). If protocol is not provided it defaults to tcp but can be changed with the default_protocol setting. + +### Configuration + +```toml +[[processors.port_name]] + ## Name of tag holding the port number + # tag = "port" + + ## Name of output tag where service name will be added + # dest = "service" + + ## Default tcp or udp + # default_protocol = "tcp" +``` + +### Example + +```diff +- measurement,port=80 field=123 1560540094000000000 ++ measurement,port=80,service=http field=123 1560540094000000000 +``` diff --git a/plugins/processors/port_name/port_name.go b/plugins/processors/port_name/port_name.go new file mode 100644 index 000000000..50c893e60 --- /dev/null +++ b/plugins/processors/port_name/port_name.go @@ -0,0 +1,174 @@ +package portname + +import ( + "bufio" + "io" + "os" + "strconv" + "strings" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/processors" +) + +var sampleConfig = ` +[[processors.port_name]] + ## Name of tag holding the port number + # tag = "port" + + ## Name of output tag where service name will be added + # dest = "service" + + ## Default tcp or udp + # default_protocol = "tcp" +` + +type sMap map[string]map[int]string // "https" == services["tcp"][443] + +var services sMap + +type PortName struct { + SourceTag string `toml:"tag"` + DestTag string `toml:"dest"` + DefaultProtocol string `toml:"default_protocol"` + + Log telegraf.Logger `toml:"-"` +} + +func (d *PortName) SampleConfig() string { + return sampleConfig +} + +func (d *PortName) Description() string { + return "Given a tag of a TCP or UDP port number, add a tag of the service name looked up in the system services file" +} + +func readServicesFile() { + file, err := os.Open(servicesPath()) + if err != nil { + return + } + defer file.Close() + + services = readServices(file) +} + +// Read the services file into a map. +// +// This function takes a similar approach to parsing as the go +// standard library (see src/net/port_unix.go in golang source) but +// maps protocol and port number to service name, not protocol and +// service to port number. +func readServices(r io.Reader) sMap { + services = make(sMap) + scanner := bufio.NewScanner(r) + for scanner.Scan() { + line := scanner.Text() + // "http 80/tcp www www-http # World Wide Web HTTP" + if i := strings.Index(line, "#"); i >= 0 { + line = line[:i] + } + f := strings.Fields(line) + if len(f) < 2 { + continue + } + service := f[0] // "http" + portProto := f[1] // "80/tcp" + portProtoSlice := strings.SplitN(portProto, "/", 2) + if len(portProtoSlice) < 2 { + continue + } + port, err := strconv.Atoi(portProtoSlice[0]) // "80" + if err != nil || port <= 0 { + continue + } + proto := portProtoSlice[1] // "tcp" + proto = strings.ToLower(proto) + + protoMap, ok := services[proto] + if !ok { + protoMap = make(map[int]string) + services[proto] = protoMap + } + protoMap[port] = service + } + return services +} + +func (d *PortName) Apply(metrics ...telegraf.Metric) []telegraf.Metric { + for _, m := range metrics { + portProto, ok := m.GetTag(d.SourceTag) + if !ok { + // Nonexistent tag + continue + } + portProtoSlice := strings.SplitN(portProto, "/", 2) + l := len(portProtoSlice) + + if l == 0 { + // Empty tag + d.Log.Errorf("empty port tag: %v", d.SourceTag) + continue + } + + var port int + if l > 0 { + var err error + val := portProtoSlice[0] + port, err = strconv.Atoi(val) + if err != nil { + // Can't convert port to string + d.Log.Errorf("error converting port to integer: %v", val) + continue + } + } + + proto := d.DefaultProtocol + if l > 1 && len(portProtoSlice[1]) > 0 { + proto = portProtoSlice[1] + } + proto = strings.ToLower(proto) + + protoMap, ok := services[proto] + if !ok { + // Unknown protocol + // + // Protocol is normally tcp or udp. The services file + // normally has entries for both, so our map does too. If + // not, it's very likely the source tag or the services + // file doesn't make sense. + d.Log.Errorf("protocol not found in services map: %v", proto) + continue + } + + service, ok := protoMap[port] + if !ok { + // Unknown port + // + // Not all ports are named so this isn't an error, but + // it's helpful to know when debugging. + d.Log.Debugf("port not found in services map: %v", port) + continue + } + + m.AddTag(d.DestTag, service) + } + + return metrics +} + +func (h *PortName) Init() error { + services = make(sMap) + readServicesFile() + return nil +} + +func init() { + processors.Add("port_name", func() telegraf.Processor { + return &PortName{ + SourceTag: "port", + DestTag: "service", + DefaultProtocol: "tcp", + } + }) +} diff --git a/plugins/processors/port_name/port_name_test.go b/plugins/processors/port_name/port_name_test.go new file mode 100644 index 000000000..b58f95a9e --- /dev/null +++ b/plugins/processors/port_name/port_name_test.go @@ -0,0 +1,261 @@ +package portname + +import ( + "strings" + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +var fakeServices = ` +http 80/tcp www # WorldWideWeb HTTP +https 443/tcp # http protocol over TLS/SSL +tftp 69/udp` + +func TestReadServicesFile(t *testing.T) { + readServicesFile() + require.NotZero(t, len(services)) +} + +func TestFakeServices(t *testing.T) { + r := strings.NewReader(fakeServices) + m := readServices(r) + require.Equal(t, sMap{"tcp": {80: "http", 443: "https"}, "udp": {69: "tftp"}}, m) +} + +func TestTable(t *testing.T) { + var tests = []struct { + name string + tag string + dest string + prot string + input []telegraf.Metric + expected []telegraf.Metric + }{ + { + name: "ordinary tcp default", + tag: "port", + dest: "service", + prot: "tcp", + input: []telegraf.Metric{ + testutil.MustMetric( + "meas", + map[string]string{ + "port": "443", + }, + map[string]interface{}{}, + time.Unix(0, 0), + ), + }, + expected: []telegraf.Metric{ + testutil.MustMetric( + "meas", + map[string]string{ + "port": "443", + "service": "https", + }, + map[string]interface{}{}, + time.Unix(0, 0), + ), + }, + }, + { + name: "force udp default", + tag: "port", + dest: "service", + prot: "udp", + input: []telegraf.Metric{ + testutil.MustMetric( + "meas", + map[string]string{ + "port": "69", + }, + map[string]interface{}{}, + time.Unix(0, 0), + ), + }, + expected: []telegraf.Metric{ + testutil.MustMetric( + "meas", + map[string]string{ + "port": "69", + "service": "tftp", + }, + map[string]interface{}{}, + time.Unix(0, 0), + ), + }, + }, + { + name: "override default protocol", + tag: "port", + dest: "service", + prot: "foobar", + input: []telegraf.Metric{ + testutil.MustMetric( + "meas", + map[string]string{ + "port": "80/tcp", + }, + map[string]interface{}{}, + time.Unix(0, 0), + ), + }, + expected: []telegraf.Metric{ + testutil.MustMetric( + "meas", + map[string]string{ + "port": "80/tcp", + "service": "http", + }, + map[string]interface{}{}, + time.Unix(0, 0), + ), + }, + }, + { + name: "multiple metrics, multiple protocols", + tag: "port", + dest: "service", + prot: "tcp", + input: []telegraf.Metric{ + testutil.MustMetric( + "meas", + map[string]string{ + "port": "80", + }, + map[string]interface{}{}, + time.Unix(0, 0), + ), + testutil.MustMetric( + "meas", + map[string]string{ + "port": "69/udp", + }, + map[string]interface{}{}, + time.Unix(0, 0), + ), + }, + expected: []telegraf.Metric{ + testutil.MustMetric( + "meas", + map[string]string{ + "port": "80", + "service": "http", + }, + map[string]interface{}{}, + time.Unix(0, 0), + ), + testutil.MustMetric( + "meas", + map[string]string{ + "port": "69/udp", + "service": "tftp", + }, + map[string]interface{}{}, + time.Unix(0, 0), + ), + }, + }, + { + name: "rename source and destination tags", + tag: "foo", + dest: "bar", + prot: "tcp", + input: []telegraf.Metric{ + testutil.MustMetric( + "meas", + map[string]string{ + "foo": "80", + }, + map[string]interface{}{}, + time.Unix(0, 0), + ), + }, + expected: []telegraf.Metric{ + testutil.MustMetric( + "meas", + map[string]string{ + "foo": "80", + "bar": "http", + }, + map[string]interface{}{}, + time.Unix(0, 0), + ), + }, + }, + { + name: "unknown port", + tag: "port", + dest: "service", + prot: "tcp", + input: []telegraf.Metric{ + testutil.MustMetric( + "meas", + map[string]string{ + "port": "9999", + }, + map[string]interface{}{}, + time.Unix(0, 0), + ), + }, + expected: []telegraf.Metric{ + testutil.MustMetric( + "meas", + map[string]string{ + "port": "9999", + }, + map[string]interface{}{}, + time.Unix(0, 0), + ), + }, + }, + { + name: "don't mix up protocols", + tag: "port", + dest: "service", + prot: "udp", + input: []telegraf.Metric{ + testutil.MustMetric( + "meas", + map[string]string{ + "port": "80", + }, + map[string]interface{}{}, + time.Unix(0, 0), + ), + }, + expected: []telegraf.Metric{ + testutil.MustMetric( + "meas", + map[string]string{ + "port": "80", + }, + map[string]interface{}{}, + time.Unix(0, 0), + ), + }, + }, + } + + r := strings.NewReader(fakeServices) + services = readServices(r) + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + p := PortName{ + SourceTag: tt.tag, + DestTag: tt.dest, + DefaultProtocol: tt.prot, + Log: testutil.Logger{}, + } + + actual := p.Apply(tt.input...) + + testutil.RequireMetricsEqual(t, tt.expected, actual) + }) + } +} diff --git a/plugins/processors/port_name/services_path.go b/plugins/processors/port_name/services_path.go new file mode 100644 index 000000000..c8cf73d14 --- /dev/null +++ b/plugins/processors/port_name/services_path.go @@ -0,0 +1,12 @@ +// +build windows + +package portname + +import ( + "os" + "path/filepath" +) + +func servicesPath() string { + return filepath.Join(os.Getenv("WINDIR"), `system32\drivers\etc\services`) +} diff --git a/plugins/processors/port_name/services_path_notwindows.go b/plugins/processors/port_name/services_path_notwindows.go new file mode 100644 index 000000000..5097bfa9c --- /dev/null +++ b/plugins/processors/port_name/services_path_notwindows.go @@ -0,0 +1,7 @@ +// +build !windows + +package portname + +func servicesPath() string { + return "/etc/services" +} From d7bb4a931b84e972601c5c4fa72ee841b4b80104 Mon Sep 17 00:00:00 2001 From: David Reimschussel Date: Wed, 27 May 2020 15:42:33 -0600 Subject: [PATCH 1800/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 04e602708..681353dd3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -49,6 +49,7 @@ - [#7515](https://github.com/influxdata/telegraf/pull/7515): Add option to disable mongodb cluster status. - [#7319](https://github.com/influxdata/telegraf/pull/7319): Add support for battery level monitoring to the fibaro input. - [#7405](https://github.com/influxdata/telegraf/pull/7405): Allow collection of HTTP Headers in http_response input. +- [#7540](https://github.com/influxdata/telegraf/pull/7540): Add processor to look up service name by port. #### Bugfixes From 430854f6dec770817f0c4ec688df37f7bb6f470e Mon Sep 17 00:00:00 2001 From: Nicolas Filotto Date: Thu, 28 May 2020 20:05:57 +0200 Subject: [PATCH 1801/1815] Fix segmentation violation on connection failed (#7593) --- plugins/inputs/http_response/http_response.go | 16 +++++------ .../http_response/http_response_test.go | 27 +++++++++++++++++++ 2 files changed, 35 insertions(+), 8 deletions(-) diff --git a/plugins/inputs/http_response/http_response.go b/plugins/inputs/http_response/http_response.go index 8382d28ae..bc9452efc 100644 --- a/plugins/inputs/http_response/http_response.go +++ b/plugins/inputs/http_response/http_response.go @@ -271,14 +271,6 @@ func (h *HTTPResponse) httpGather(u string) (map[string]interface{}, map[string] resp, err := h.client.Do(request) response_time := time.Since(start).Seconds() - // Add the response headers - for headerName, tag := range h.HTTPHeaderTags { - headerValues, foundHeader := resp.Header[headerName] - if foundHeader && len(headerValues) > 0 { - tags[tag] = headerValues[0] - } - } - // If an error in returned, it means we are dealing with a network error, as // HTTP error codes do not generate errors in the net/http library if err != nil { @@ -306,6 +298,14 @@ func (h *HTTPResponse) httpGather(u string) (map[string]interface{}, map[string] // required by the net/http library defer resp.Body.Close() + // Add the response headers + for headerName, tag := range h.HTTPHeaderTags { + headerValues, foundHeader := resp.Header[headerName] + if foundHeader && len(headerValues) > 0 { + tags[tag] = headerValues[0] + } + } + // Set log the HTTP response code tags["status_code"] = strconv.Itoa(resp.StatusCode) fields["http_response_code"] = resp.StatusCode diff --git a/plugins/inputs/http_response/http_response_test.go b/plugins/inputs/http_response/http_response_test.go index 4a92f805c..9986ddefc 100644 --- a/plugins/inputs/http_response/http_response_test.go +++ b/plugins/inputs/http_response/http_response_test.go @@ -287,6 +287,33 @@ func TestHTTPHeaderTags(t *testing.T) { "result": "success", } checkOutput(t, &acc, expectedFields, expectedTags, absentFields, nil) + + // Connection failed + h = &HTTPResponse{ + Log: testutil.Logger{}, + Address: "https:/nonexistent.nonexistent", // Any non-routable IP works here + Body: "", + Method: "GET", + ResponseTimeout: internal.Duration{Duration: time.Second * 5}, + HTTPHeaderTags: map[string]string{"Server": "my_server", "Content-Type": "content_type"}, + FollowRedirects: false, + } + + acc = testutil.Accumulator{} + err = h.Gather(&acc) + require.NoError(t, err) + + expectedFields = map[string]interface{}{ + "result_type": "connection_failed", + "result_code": 3, + } + expectedTags = map[string]interface{}{ + "server": nil, + "method": "GET", + "result": "connection_failed", + } + absentFields = []string{"http_response_code", "response_time", "content_length", "response_string_match"} + checkOutput(t, &acc, expectedFields, expectedTags, absentFields, nil) } func findInterface() (net.Interface, error) { From 13da5e0802ae282e7c7864b7852ef1a9f3157708 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 28 May 2020 15:45:08 -0700 Subject: [PATCH 1802/1815] Add github.com/inabagumi/twitter-telegraf-plugin to list of external plugins --- EXTERNAL_PLUGINS.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/EXTERNAL_PLUGINS.md b/EXTERNAL_PLUGINS.md index 273c33fbb..cba70f9c2 100644 --- a/EXTERNAL_PLUGINS.md +++ b/EXTERNAL_PLUGINS.md @@ -4,5 +4,6 @@ This is a list of plugins that can be compiled outside of Telegraf and used via Pull requests welcome. -## Inputs -- [rand](https://github.com/ssoroka/rand) - Generate random numbers \ No newline at end of file +## Inputs +- [rand](https://github.com/ssoroka/rand) - Generate random numbers +- [twitter](https://github.com/inabagumi/twitter-telegraf-plugin) - Gather account information from Twitter accounts From 573f14460740327a129b28cd0d0298dda370673c Mon Sep 17 00:00:00 2001 From: kelseiv <47797004+kelseiv@users.noreply.github.com> Date: Fri, 29 May 2020 15:32:46 -0700 Subject: [PATCH 1803/1815] Update AGGREGATORS_AND_PROCESSORS.md (#7599) --- docs/AGGREGATORS_AND_PROCESSORS.md | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/docs/AGGREGATORS_AND_PROCESSORS.md b/docs/AGGREGATORS_AND_PROCESSORS.md index 4b5c9f1a6..7be34aed5 100644 --- a/docs/AGGREGATORS_AND_PROCESSORS.md +++ b/docs/AGGREGATORS_AND_PROCESSORS.md @@ -64,7 +64,4 @@ Since aggregates are created for each measurement, field, and unique tag combina the plugin receives, you can make use of `taginclude` to group aggregates by specific tags only. -**NOTE** That since aggregators only aggregate metrics within their period, that -historical data is not supported. In other words, if your metric timestamp is more -than `now() - period` in the past, it will not be aggregated. If this is a feature -that you need, please comment on this [github issue](https://github.com/influxdata/telegraf/issues/1992) +**Note:** Aggregator plugins only aggregate metrics within their periods (`now() - period`). Data with a timestamp earlier than `now() - period` cannot be included. From 4e93b8708555ccbe534f8c781c3b7cbfc9c81bb5 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 1 Jun 2020 15:26:20 -0700 Subject: [PATCH 1804/1815] Add support for once mode; run processors and aggregators during test (#7474) --- agent/agent.go | 292 ++++++++++++++++++++++++++++---------- cmd/telegraf/telegraf.go | 10 +- internal/usage.go | 7 +- internal/usage_windows.go | 7 +- models/running_output.go | 4 + 5 files changed, 235 insertions(+), 85 deletions(-) diff --git a/agent/agent.go b/agent/agent.go index 9ac51471a..5795eb0d4 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -142,110 +142,252 @@ func (a *Agent) Run(ctx context.Context) error { return nil } -// Test runs the inputs once and prints the output to stdout in line protocol. -func (a *Agent) Test(ctx context.Context, waitDuration time.Duration) error { - var wg sync.WaitGroup - metricC := make(chan telegraf.Metric) - nulC := make(chan telegraf.Metric) - defer func() { - close(metricC) - close(nulC) - wg.Wait() - }() - - wg.Add(1) - go func() { - defer wg.Done() - +// Test runs the inputs, processors and aggregators for a single gather and +// writes the metrics to stdout. +func (a *Agent) Test(ctx context.Context, wait time.Duration) error { + outputF := func(src <-chan telegraf.Metric) { s := influx.NewSerializer() s.SetFieldSortOrder(influx.SortFields) - for metric := range metricC { + + for metric := range src { octets, err := s.Serialize(metric) if err == nil { fmt.Print("> ", string(octets)) } metric.Reject() } - }() - - wg.Add(1) - go func() { - defer wg.Done() - for range nulC { - } - }() - - hasServiceInputs := false - for _, input := range a.Config.Inputs { - if _, ok := input.Input.(telegraf.ServiceInput); ok { - hasServiceInputs = true - break - } } + err := a.test(ctx, wait, outputF) + if err != nil { + return err + } + + if models.GlobalGatherErrors.Get() != 0 { + return fmt.Errorf("input plugins recorded %d errors", models.GlobalGatherErrors.Get()) + } + return nil + +} + +// Once runs the full agent for a single gather. +func (a *Agent) Once(ctx context.Context, wait time.Duration) error { + outputF := func(src <-chan telegraf.Metric) { + interval := a.Config.Agent.FlushInterval.Duration + + ctx, cancel := context.WithCancel(context.Background()) + + var wg sync.WaitGroup + for _, output := range a.Config.Outputs { + interval := interval + // Overwrite agent flush_interval if this plugin has its own. + if output.Config.FlushInterval != 0 { + interval = output.Config.FlushInterval + } + + jitter := 0 * time.Second + + ticker := NewRollingTicker(interval, jitter) + defer ticker.Stop() + + wg.Add(1) + go func(output *models.RunningOutput) { + defer wg.Done() + a.flushLoop(ctx, output, ticker) + }(output) + } + + for metric := range src { + for i, output := range a.Config.Outputs { + if i == len(a.Config.Outputs)-1 { + output.AddMetric(metric) + } else { + output.AddMetric(metric.Copy()) + } + } + } + + cancel() + wg.Wait() + } + + err := a.test(ctx, wait, outputF) + if err != nil { + return err + } + + if models.GlobalGatherErrors.Get() != 0 { + return fmt.Errorf("input plugins recorded %d errors", models.GlobalGatherErrors.Get()) + } + + unsent := 0 + for _, output := range a.Config.Outputs { + unsent += output.BufferLength() + } + if unsent != 0 { + return fmt.Errorf("output plugins unable to send %d metrics", unsent) + } + return nil +} + +// Test runs the agent and performs a single gather sending output to the +// outputF. After gathering pauses for the wait duration to allow service +// inputs to run. +func (a *Agent) test(ctx context.Context, wait time.Duration, outputF func(<-chan telegraf.Metric)) error { log.Printf("D! [agent] Initializing plugins") err := a.initPlugins() if err != nil { return err } - if hasServiceInputs { - log.Printf("D! [agent] Starting service inputs") - err := a.startServiceInputs(ctx, metricC) - if err != nil { - return err - } + log.Printf("D! [agent] Connecting outputs") + err = a.connectOutputs(ctx) + if err != nil { + return err } - hasErrors := false - for _, input := range a.Config.Inputs { - select { - case <-ctx.Done(): - return nil - default: - break - } + inputC := make(chan telegraf.Metric, 100) + procC := make(chan telegraf.Metric, 100) + outputC := make(chan telegraf.Metric, 100) - acc := NewAccumulator(input, metricC) - acc.SetPrecision(a.Precision()) + startTime := time.Now() - // Special instructions for some inputs. cpu, for example, needs to be - // run twice in order to return cpu usage percentages. - switch input.Config.Name { - case "cpu", "mongodb", "procstat": - nulAcc := NewAccumulator(input, nulC) - nulAcc.SetPrecision(a.Precision()) - if err := input.Input.Gather(nulAcc); err != nil { - acc.AddError(err) - hasErrors = true + var wg sync.WaitGroup + + src := inputC + dst := inputC + + wg.Add(1) + go func(dst chan telegraf.Metric) { + defer wg.Done() + + a.testRunInputs(ctx, wait, dst) + + close(dst) + log.Printf("D! [agent] Input channel closed") + }(dst) + + src = dst + + if len(a.Config.Processors) > 0 { + dst = procC + + wg.Add(1) + go func(src, dst chan telegraf.Metric) { + defer wg.Done() + + err := a.runProcessors(src, dst) + if err != nil { + log.Printf("E! [agent] Error running processors: %v", err) } + close(dst) + log.Printf("D! [agent] Processor channel closed") + }(src, dst) - time.Sleep(500 * time.Millisecond) - if err := input.Input.Gather(acc); err != nil { - acc.AddError(err) - hasErrors = true + src = dst + } + + if len(a.Config.Aggregators) > 0 { + dst = outputC + + wg.Add(1) + go func(src, dst chan telegraf.Metric) { + defer wg.Done() + + err := a.runAggregators(startTime, src, dst) + if err != nil { + log.Printf("E! [agent] Error running aggregators: %v", err) } - default: - if err := input.Input.Gather(acc); err != nil { - acc.AddError(err) - hasErrors = true - } - } + close(dst) + log.Printf("D! [agent] Output channel closed") + }(src, dst) + + src = dst } - if hasServiceInputs { - log.Printf("D! [agent] Waiting for service inputs") - internal.SleepContext(ctx, waitDuration) - log.Printf("D! [agent] Stopping service inputs") - a.stopServiceInputs() - } + wg.Add(1) + go func(src <-chan telegraf.Metric) { + defer wg.Done() + outputF(src) + }(src) + + wg.Wait() + + log.Printf("D! [agent] Closing outputs") + a.closeOutputs() + + log.Printf("D! [agent] Stopped Successfully") - if hasErrors { - return fmt.Errorf("One or more input plugins had an error") - } return nil } +func (a *Agent) testRunInputs( + ctx context.Context, + wait time.Duration, + dst chan<- telegraf.Metric, +) { + log.Printf("D! [agent] Starting service inputs") + for _, input := range a.Config.Inputs { + if si, ok := input.Input.(telegraf.ServiceInput); ok { + // Service input plugins are not subject to timestamp rounding. + // This only applies to the accumulator passed to Start(), the + // Gather() accumulator does apply rounding according to the + // precision agent setting. + acc := NewAccumulator(input, dst) + acc.SetPrecision(time.Nanosecond) + + err := si.Start(acc) + if err != nil { + acc.AddError(err) + si.Stop() + continue + } + } + } + + nul := make(chan telegraf.Metric) + go func() { + for range nul { + } + }() + + var wg sync.WaitGroup + for _, input := range a.Config.Inputs { + wg.Add(1) + go func(input *models.RunningInput) { + defer wg.Done() + + // Run plugins that require multiple gathers to calculate rate + // and delta metrics twice. + switch input.Config.Name { + case "cpu", "mongodb", "procstat": + nulAcc := NewAccumulator(input, nul) + nulAcc.SetPrecision(a.Precision()) + if err := input.Input.Gather(nulAcc); err != nil { + nulAcc.AddError(err) + } + + time.Sleep(500 * time.Millisecond) + } + + acc := NewAccumulator(input, dst) + acc.SetPrecision(a.Precision()) + + if err := input.Input.Gather(acc); err != nil { + acc.AddError(err) + } + }(input) + } + wg.Wait() + close(nul) + + internal.SleepContext(ctx, wait) + + log.Printf("D! [agent] Stopping service inputs") + a.stopServiceInputs() + +} + // runInputs starts and triggers the periodic gather for Inputs. // // When the context is done the timers are stopped and this function returns diff --git a/cmd/telegraf/telegraf.go b/cmd/telegraf/telegraf.go index 4f51bc2e1..7e0b4ec1c 100644 --- a/cmd/telegraf/telegraf.go +++ b/cmd/telegraf/telegraf.go @@ -67,6 +67,7 @@ var fServiceDisplayName = flag.String("service-display-name", "Telegraf Data Col var fRunAsConsole = flag.Bool("console", false, "run as console application (windows only)") var fPlugins = flag.String("plugin-directory", "", "path to directory containing external plugins") +var fRunOnce = flag.Bool("once", false, "run one gather and exit") var ( version string @@ -169,9 +170,14 @@ func runAgent(ctx context.Context, logger.SetupLogging(logConfig) + if *fRunOnce { + wait := time.Duration(*fTestWait) * time.Second + return ag.Once(ctx, wait) + } + if *fTest || *fTestWait != 0 { - testWaitDuration := time.Duration(*fTestWait) * time.Second - return ag.Test(ctx, testWaitDuration) + wait := time.Duration(*fTestWait) * time.Second + return ag.Test(ctx, wait) } log.Printf("I! Loaded inputs: %s", strings.Join(c.InputNames(), " ")) diff --git a/internal/usage.go b/internal/usage.go index b0df62a6f..6eff30e6b 100644 --- a/internal/usage.go +++ b/internal/usage.go @@ -32,11 +32,10 @@ The commands & flags are: Valid values are 'agent', 'global_tags', 'outputs', 'processors', 'aggregators' and 'inputs' --sample-config print out full sample configuration - --test enable test mode: gather metrics, print them out, - and exit. Note: Test mode only runs inputs, not - processors, aggregators, or outputs + --once enable once mode: gather metrics once, write them, and exit + --test enable test mode: gather metrics once and print them --test-wait wait up to this many seconds for service - inputs to complete in test mode + inputs to complete in test or once mode --usage print usage for a plugin, ie, 'telegraf --usage mysql' --version display the version and exit diff --git a/internal/usage_windows.go b/internal/usage_windows.go index e205d6c1f..7fee6a1f1 100644 --- a/internal/usage_windows.go +++ b/internal/usage_windows.go @@ -29,11 +29,10 @@ The commands & flags are: --section-filter filter config sections to output, separator is : Valid values are 'agent', 'global_tags', 'outputs', 'processors', 'aggregators' and 'inputs' - --test enable test mode: gather metrics, print them out, - and exit. Note: Test mode only runs inputs, not - processors, aggregators, or outputs + --once enable once mode: gather metrics once, write them, and exit + --test enable test mode: gather metrics once and print them --test-wait wait up to this many seconds for service - inputs to complete in test mode + inputs to complete in test or once mode --usage print usage for a plugin, ie, 'telegraf --usage mysql' --version display the version and exit diff --git a/models/running_output.go b/models/running_output.go index 256c18715..452ab796b 100644 --- a/models/running_output.go +++ b/models/running_output.go @@ -261,3 +261,7 @@ func (r *RunningOutput) LogBufferStatus() { func (r *RunningOutput) Log() telegraf.Logger { return r.log } + +func (r *RunningOutput) BufferLength() int { + return r.buffer.Len() +} From f27b709efa07c5890cdf88d7470b7522bc5faa10 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 1 Jun 2020 17:12:22 -0700 Subject: [PATCH 1805/1815] Update changelog --- CHANGELOG.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 681353dd3..4672c81de 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,9 @@ `splunkmetric` serializer, if you are making use of these fields they can be added back with the `tag` option. +- Telegraf's `--test` mode now runs processors and aggregators before printing + metrics. + - Official packages now built with Go 1.14.3. #### New Processors @@ -50,6 +53,8 @@ - [#7319](https://github.com/influxdata/telegraf/pull/7319): Add support for battery level monitoring to the fibaro input. - [#7405](https://github.com/influxdata/telegraf/pull/7405): Allow collection of HTTP Headers in http_response input. - [#7540](https://github.com/influxdata/telegraf/pull/7540): Add processor to look up service name by port. +- [#7474](https://github.com/influxdata/telegraf/pull/7474): Add new once mode that write to outputs and exits. +- [#7474](https://github.com/influxdata/telegraf/pull/7474): Run processors and aggregators during test mode. #### Bugfixes From 1e7f714b2b708610e86a3756cf56026563fd9b0a Mon Sep 17 00:00:00 2001 From: Yamagishi Kazutoshi Date: Tue, 2 Jun 2020 22:29:57 +0900 Subject: [PATCH 1806/1815] Add support for env variables to shim config (#7603) --- plugins/inputs/execd/shim/goshim.go | 25 +++++++-- plugins/inputs/execd/shim/shim_test.go | 54 +++++++++++++++++++ .../inputs/execd/shim/testdata/plugin.conf | 4 ++ 3 files changed, 79 insertions(+), 4 deletions(-) create mode 100644 plugins/inputs/execd/shim/testdata/plugin.conf diff --git a/plugins/inputs/execd/shim/goshim.go b/plugins/inputs/execd/shim/goshim.go index 3741d2b80..4c1589b48 100644 --- a/plugins/inputs/execd/shim/goshim.go +++ b/plugins/inputs/execd/shim/goshim.go @@ -9,6 +9,7 @@ import ( "io/ioutil" "os" "os/signal" + "strings" "sync" "syscall" "time" @@ -23,9 +24,13 @@ import ( type empty struct{} var ( - stdout io.Writer = os.Stdout - stdin io.Reader = os.Stdin - forever = 100 * 365 * 24 * time.Hour + stdout io.Writer = os.Stdout + stdin io.Reader = os.Stdin + forever = 100 * 365 * 24 * time.Hour + envVarEscaper = strings.NewReplacer( + `"`, `\"`, + `\`, `\\`, + ) ) const ( @@ -257,11 +262,13 @@ func LoadConfig(filePath *string) ([]telegraf.Input, error) { return nil, err } + s := expandEnvVars(b) + conf := struct { Inputs map[string][]toml.Primitive }{} - md, err := toml.Decode(string(b), &conf) + md, err := toml.Decode(s, &conf) if err != nil { return nil, err } @@ -274,6 +281,16 @@ func LoadConfig(filePath *string) ([]telegraf.Input, error) { return loadedInputs, err } +func expandEnvVars(contents []byte) string { + return os.Expand(string(contents), getEnv) +} + +func getEnv(key string) string { + v := os.Getenv(key) + + return envVarEscaper.Replace(v) +} + func loadConfigIntoInputs(md toml.MetaData, inputConfigs map[string][]toml.Primitive) ([]telegraf.Input, error) { renderedInputs := []telegraf.Input{} diff --git a/plugins/inputs/execd/shim/shim_test.go b/plugins/inputs/execd/shim/shim_test.go index 498ef4ab5..5fd79895f 100644 --- a/plugins/inputs/execd/shim/shim_test.go +++ b/plugins/inputs/execd/shim/shim_test.go @@ -4,6 +4,7 @@ import ( "bufio" "bytes" "io" + "os" "strings" "testing" "time" @@ -11,6 +12,7 @@ import ( "github.com/stretchr/testify/require" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" ) func TestShimWorks(t *testing.T) { @@ -118,3 +120,55 @@ func (i *testInput) Start(acc telegraf.Accumulator) error { func (i *testInput) Stop() { } + +func TestLoadConfig(t *testing.T) { + os.Setenv("SECRET_TOKEN", "xxxxxxxxxx") + os.Setenv("SECRET_VALUE", `test"\test`) + + inputs.Add("test", func() telegraf.Input { + return &serviceInput{} + }) + + c := "./testdata/plugin.conf" + inputs, err := LoadConfig(&c) + require.NoError(t, err) + + inp := inputs[0].(*serviceInput) + + require.Equal(t, "awesome name", inp.ServiceName) + require.Equal(t, "xxxxxxxxxx", inp.SecretToken) + require.Equal(t, `test"\test`, inp.SecretValue) +} + +type serviceInput struct { + ServiceName string `toml:"service_name"` + SecretToken string `toml:"secret_token"` + SecretValue string `toml:"secret_value"` +} + +func (i *serviceInput) SampleConfig() string { + return "" +} + +func (i *serviceInput) Description() string { + return "" +} + +func (i *serviceInput) Gather(acc telegraf.Accumulator) error { + acc.AddFields("measurement", + map[string]interface{}{ + "field": 1, + }, + map[string]string{ + "tag": "tag", + }, time.Unix(1234, 5678)) + + return nil +} + +func (i *serviceInput) Start(acc telegraf.Accumulator) error { + return nil +} + +func (i *serviceInput) Stop() { +} diff --git a/plugins/inputs/execd/shim/testdata/plugin.conf b/plugins/inputs/execd/shim/testdata/plugin.conf new file mode 100644 index 000000000..78dbb33a9 --- /dev/null +++ b/plugins/inputs/execd/shim/testdata/plugin.conf @@ -0,0 +1,4 @@ +[[inputs.test]] + service_name = "awesome name" + secret_token = "${SECRET_TOKEN}" + secret_value = "$SECRET_VALUE" From af8093e00e07092ca7a3586b497f10362fc62fbc Mon Sep 17 00:00:00 2001 From: Simon Knittel Date: Wed, 3 Jun 2020 08:11:13 +0200 Subject: [PATCH 1807/1815] Fix typo in queue depth example of diskio plugin (#7613) --- plugins/inputs/diskio/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/inputs/diskio/README.md b/plugins/inputs/diskio/README.md index ce2acda55..11e68d696 100644 --- a/plugins/inputs/diskio/README.md +++ b/plugins/inputs/diskio/README.md @@ -124,7 +124,7 @@ SELECT non_negative_derivative(last("io_time"),1ms) FROM "diskio" WHERE time > n #### Calculate average queue depth: `iops_in_progress` will give you an instantaneous value. This will give you the average between polling intervals. ``` -SELECT non_negative_derivative(last("weighted_io_time",1ms)) from "diskio" WHERE time > now() - 30m GROUP BY "host","name",time(60s) +SELECT non_negative_derivative(last("weighted_io_time"),1ms) from "diskio" WHERE time > now() - 30m GROUP BY "host","name",time(60s) ``` ### Example Output: From aa8cefeddae0f83230b5b332b2a4f9c5a6ddb0fa Mon Sep 17 00:00:00 2001 From: Yarmo Mackenbach Date: Wed, 3 Jun 2020 06:20:45 +0000 Subject: [PATCH 1808/1815] Add support for Solus distribution to maintainer scripts (#7585) --- scripts/post-install.sh | 3 +++ scripts/post-remove.sh | 3 +++ 2 files changed, 6 insertions(+) diff --git a/scripts/post-install.sh b/scripts/post-install.sh index 0f197467e..f37265593 100644 --- a/scripts/post-install.sh +++ b/scripts/post-install.sh @@ -101,5 +101,8 @@ elif [[ -f /etc/os-release ]]; then else install_chkconfig fi + elif [[ "$NAME" = "Solus" ]]; then + # Solus logic + install_systemd /usr/lib/systemd/system/telegraf.service fi fi diff --git a/scripts/post-remove.sh b/scripts/post-remove.sh index 533a4fec1..bda08e2cb 100644 --- a/scripts/post-remove.sh +++ b/scripts/post-remove.sh @@ -59,5 +59,8 @@ elif [[ -f /etc/os-release ]]; then # Amazon Linux logic disable_chkconfig fi + elif [[ "$NAME" = "Solus" ]]; then + rm -f /etc/default/telegraf + disable_systemd /usr/lib/systemd/system/telegraf.service fi fi From 36316ee8f23521a93d2fff69022c5848eef7cd50 Mon Sep 17 00:00:00 2001 From: kauppine <24810630+kauppine@users.noreply.github.com> Date: Fri, 5 Jun 2020 00:19:47 +0300 Subject: [PATCH 1809/1815] Add SNMPv3 trap support to snmp_trap input plugin (#7294) Extend snmp_trap input plugin to support SNMPv3 traps. MD5 and SHA1 authentication protocols are supported, and DES, AES, AES192, AES256, AES192c and AES256c privacy protocols are supported. --- go.mod | 3 +- go.sum | 6 +- plugins/inputs/snmp_trap/README.md | 18 + plugins/inputs/snmp_trap/snmp_trap.go | 114 +++ plugins/inputs/snmp_trap/snmp_trap_test.go | 996 ++++++++++++++++++++- 5 files changed, 1120 insertions(+), 17 deletions(-) diff --git a/go.mod b/go.mod index ff764b4d9..77c448d27 100644 --- a/go.mod +++ b/go.mod @@ -57,7 +57,6 @@ require ( github.com/gofrs/uuid v2.1.0+incompatible github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d github.com/golang/geo v0.0.0-20190916061304-5b978397cfec - github.com/golang/mock v1.4.3 // indirect github.com/golang/protobuf v1.3.5 github.com/google/go-cmp v0.4.0 github.com/google/go-github v17.0.0+incompatible @@ -113,7 +112,7 @@ require ( github.com/shirou/gopsutil v2.20.2+incompatible github.com/shopspring/decimal v0.0.0-20200105231215-408a2507e114 // indirect github.com/sirupsen/logrus v1.4.2 - github.com/soniah/gosnmp v1.22.0 + github.com/soniah/gosnmp v1.25.0 github.com/streadway/amqp v0.0.0-20180528204448-e5adc2ada8b8 github.com/stretchr/testify v1.5.1 github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62 diff --git a/go.sum b/go.sum index aca1b99a8..a3d70f21d 100644 --- a/go.sum +++ b/go.sum @@ -247,8 +247,6 @@ github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3 h1:GV+pQPG/EUUbkh47niozDcADz6go/dUwhVzdUQHIVRw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -530,8 +528,8 @@ github.com/sirupsen/logrus v1.2.0 h1:juTguoYk5qI21pwyTXY3B3Y5cOTH3ZUyZCg1v/mihuo github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/soniah/gosnmp v1.22.0 h1:jVJi8+OGvR+JHIaZKMmnyNP0akJd2vEgNatybwhZvxg= -github.com/soniah/gosnmp v1.22.0/go.mod h1:DuEpAS0az51+DyVBQwITDsoq4++e3LTNckp2GoasF2I= +github.com/soniah/gosnmp v1.25.0 h1:0y8vpjD07NPmnT+wojnUrKkYLX9Fxw1jI4cGTumWugQ= +github.com/soniah/gosnmp v1.25.0/go.mod h1:8YvfZxH388NIIw2A+X5z2Oh97VcNhtmxDLt5QeUzVuQ= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/streadway/amqp v0.0.0-20180528204448-e5adc2ada8b8 h1:l6epF6yBwuejBfhGkM5m8VSNM/QAm7ApGyH35ehA7eQ= diff --git a/plugins/inputs/snmp_trap/README.md b/plugins/inputs/snmp_trap/README.md index ceb370d8f..630566518 100644 --- a/plugins/inputs/snmp_trap/README.md +++ b/plugins/inputs/snmp_trap/README.md @@ -33,6 +33,24 @@ information. # service_address = "udp://:162" ## Timeout running snmptranslate command # timeout = "5s" + ## Snmp version + # version = "2c" + ## SNMPv3 authentication and encryption options. + ## + ## Security Name. + # sec_name = "myuser" + ## Authentication protocol; one of "MD5", "SHA" or "". + # auth_protocol = "MD5" + ## Authentication password. + # auth_password = "pass" + ## Security Level; one of "noAuthNoPriv", "authNoPriv", or "authPriv". + # sec_level = "authNoPriv" + ## Context Name. + # context_name = "" + ## Privacy protocol used for encrypted messages; one of "DES", "AES", "AES192", "AES192C", "AES256", "AES256C" or "". + # priv_protocol = "" + ## Privacy password used for encrypted messages. + # priv_password = "" ``` #### Using a Privileged Port diff --git a/plugins/inputs/snmp_trap/snmp_trap.go b/plugins/inputs/snmp_trap/snmp_trap.go index cb253a7d3..9db058955 100644 --- a/plugins/inputs/snmp_trap/snmp_trap.go +++ b/plugins/inputs/snmp_trap/snmp_trap.go @@ -31,6 +31,22 @@ type mibEntry struct { type SnmpTrap struct { ServiceAddress string `toml:"service_address"` Timeout internal.Duration `toml:"timeout"` + Version string `toml:"version"` + + // Settings for version 3 + ContextName string `toml:"context_name"` + // Values: "noAuthNoPriv", "authNoPriv", "authPriv" + SecLevel string `toml:"sec_level"` + SecName string `toml:"sec_name"` + // Values: "MD5", "SHA", "". Default: "" + AuthProtocol string `toml:"auth_protocol"` + AuthPassword string `toml:"auth_password"` + // Values: "DES", "AES", "". Default: "" + PrivProtocol string `toml:"priv_protocol"` + PrivPassword string `toml:"priv_password"` + EngineID string `toml:"-"` + EngineBoots uint32 `toml:"-"` + EngineTime uint32 `toml:"-"` acc telegraf.Accumulator listener *gosnmp.TrapListener @@ -58,6 +74,24 @@ var sampleConfig = ` # service_address = "udp://:162" ## Timeout running snmptranslate command # timeout = "5s" + ## Snmp version, defaults to 2c + # version = "2c" + ## SNMPv3 authentication and encryption options. + ## + ## Security Name. + # sec_name = "myuser" + ## Authentication protocol; one of "MD5", "SHA" or "". + # auth_protocol = "MD5" + ## Authentication password. + # auth_password = "pass" + ## Security Level; one of "noAuthNoPriv", "authNoPriv", or "authPriv". + # sec_level = "authNoPriv" + ## Context Name. + # context_name = "" + ## Privacy protocol used for encrypted messages; one of "DES", "AES", "AES192", "AES192C", "AES256", "AES256C" or "". + # priv_protocol = "" + ## Privacy password used for encrypted messages. + # priv_password = "" ` func (s *SnmpTrap) SampleConfig() string { @@ -78,6 +112,7 @@ func init() { timeFunc: time.Now, ServiceAddress: "udp://:162", Timeout: defaultTimeout, + Version: "2c", } }) } @@ -105,6 +140,85 @@ func (s *SnmpTrap) Start(acc telegraf.Accumulator) error { s.listener.OnNewTrap = makeTrapHandler(s) s.listener.Params = gosnmp.Default + switch s.Version { + case "3": + s.listener.Params.Version = gosnmp.Version3 + case "2c": + s.listener.Params.Version = gosnmp.Version2c + case "1": + s.listener.Params.Version = gosnmp.Version1 + default: + s.listener.Params.Version = gosnmp.Version2c + } + + if s.listener.Params.Version == gosnmp.Version3 { + s.listener.Params.ContextName = s.ContextName + s.listener.Params.SecurityModel = gosnmp.UserSecurityModel + + switch strings.ToLower(s.SecLevel) { + case "noauthnopriv", "": + s.listener.Params.MsgFlags = gosnmp.NoAuthNoPriv + case "authnopriv": + s.listener.Params.MsgFlags = gosnmp.AuthNoPriv + case "authpriv": + s.listener.Params.MsgFlags = gosnmp.AuthPriv + default: + return fmt.Errorf("unknown security level '%s'", s.SecLevel) + } + + var authenticationProtocol gosnmp.SnmpV3AuthProtocol + switch strings.ToLower(s.AuthProtocol) { + case "md5": + authenticationProtocol = gosnmp.MD5 + case "sha": + authenticationProtocol = gosnmp.SHA + //case "sha224": + // authenticationProtocol = gosnmp.SHA224 + //case "sha256": + // authenticationProtocol = gosnmp.SHA256 + //case "sha384": + // authenticationProtocol = gosnmp.SHA384 + //case "sha512": + // authenticationProtocol = gosnmp.SHA512 + case "": + authenticationProtocol = gosnmp.NoAuth + default: + return fmt.Errorf("unknown authentication protocol '%s'", s.AuthProtocol) + } + + var privacyProtocol gosnmp.SnmpV3PrivProtocol + switch strings.ToLower(s.PrivProtocol) { + case "aes": + privacyProtocol = gosnmp.AES + case "des": + privacyProtocol = gosnmp.DES + case "aes192": + privacyProtocol = gosnmp.AES192 + case "aes192c": + privacyProtocol = gosnmp.AES192C + case "aes256": + privacyProtocol = gosnmp.AES256 + case "aes256c": + privacyProtocol = gosnmp.AES256C + case "": + privacyProtocol = gosnmp.NoPriv + default: + return fmt.Errorf("unknown privacy protocol '%s'", s.PrivProtocol) + } + + s.listener.Params.SecurityParameters = &gosnmp.UsmSecurityParameters{ + AuthoritativeEngineID: s.EngineID, + AuthoritativeEngineBoots: s.EngineBoots, + AuthoritativeEngineTime: s.EngineTime, + UserName: s.SecName, + PrivacyProtocol: privacyProtocol, + PrivacyPassphrase: s.PrivPassword, + AuthenticationPassphrase: s.AuthPassword, + AuthenticationProtocol: authenticationProtocol, + } + + } + // wrap the handler, used in unit tests if nil != s.makeHandlerWrapper { s.listener.OnNewTrap = s.makeHandlerWrapper(s.listener.OnNewTrap) diff --git a/plugins/inputs/snmp_trap/snmp_trap_test.go b/plugins/inputs/snmp_trap/snmp_trap_test.go index 94781cf91..ccd723399 100644 --- a/plugins/inputs/snmp_trap/snmp_trap_test.go +++ b/plugins/inputs/snmp_trap/snmp_trap_test.go @@ -40,15 +40,93 @@ func fakeExecCmd(_ internal.Duration, x string, y ...string) ([]byte, error) { return nil, fmt.Errorf("mock " + x + " " + strings.Join(y, " ")) } -func sendTrap(t *testing.T, port uint16, now uint32, trap gosnmp.SnmpTrap, version gosnmp.SnmpVersion) { - s := &gosnmp.GoSNMP{ - Port: port, - Community: "public", - Version: version, - Timeout: time.Duration(2) * time.Second, - Retries: 3, - MaxOids: gosnmp.MaxOids, - Target: "127.0.0.1", +func sendTrap(t *testing.T, port uint16, now uint32, trap gosnmp.SnmpTrap, version gosnmp.SnmpVersion, seclevel string, username string, authproto string, authpass string, privproto string, privpass string) { + var s gosnmp.GoSNMP + + if version == gosnmp.Version3 { + var msgFlags gosnmp.SnmpV3MsgFlags + switch strings.ToLower(seclevel) { + case "noauthnopriv", "": + msgFlags = gosnmp.NoAuthNoPriv + case "authnopriv": + msgFlags = gosnmp.AuthNoPriv + case "authpriv": + msgFlags = gosnmp.AuthPriv + default: + msgFlags = gosnmp.NoAuthNoPriv + } + + var authenticationProtocol gosnmp.SnmpV3AuthProtocol + switch strings.ToLower(authproto) { + case "md5": + authenticationProtocol = gosnmp.MD5 + case "sha": + authenticationProtocol = gosnmp.SHA + //case "sha224": + // authenticationProtocol = gosnmp.SHA224 + //case "sha256": + // authenticationProtocol = gosnmp.SHA256 + //case "sha384": + // authenticationProtocol = gosnmp.SHA384 + //case "sha512": + // authenticationProtocol = gosnmp.SHA512 + case "": + authenticationProtocol = gosnmp.NoAuth + default: + authenticationProtocol = gosnmp.NoAuth + } + + var privacyProtocol gosnmp.SnmpV3PrivProtocol + switch strings.ToLower(privproto) { + case "aes": + privacyProtocol = gosnmp.AES + case "des": + privacyProtocol = gosnmp.DES + case "aes192": + privacyProtocol = gosnmp.AES192 + case "aes192c": + privacyProtocol = gosnmp.AES192C + case "aes256": + privacyProtocol = gosnmp.AES256 + case "aes256c": + privacyProtocol = gosnmp.AES256C + case "": + privacyProtocol = gosnmp.NoPriv + default: + privacyProtocol = gosnmp.NoPriv + } + + sp := &gosnmp.UsmSecurityParameters{ + AuthoritativeEngineID: "1", + AuthoritativeEngineBoots: 1, + AuthoritativeEngineTime: 1, + UserName: username, + PrivacyProtocol: privacyProtocol, + PrivacyPassphrase: privpass, + AuthenticationPassphrase: authpass, + AuthenticationProtocol: authenticationProtocol, + } + s = gosnmp.GoSNMP{ + Port: port, + Version: version, + Timeout: time.Duration(2) * time.Second, + Retries: 1, + MaxOids: gosnmp.MaxOids, + Target: "127.0.0.1", + SecurityParameters: sp, + SecurityModel: gosnmp.UserSecurityModel, + MsgFlags: msgFlags, + } + } else { + s = gosnmp.GoSNMP{ + Port: port, + Version: version, + Timeout: time.Duration(2) * time.Second, + Retries: 1, + MaxOids: gosnmp.MaxOids, + Target: "127.0.0.1", + Community: "public", + } } err := s.Connect() @@ -83,6 +161,13 @@ func TestReceiveTrap(t *testing.T) { // send version gosnmp.SnmpVersion trap gosnmp.SnmpTrap // include pdus + // V3 auth and priv parameters + secname string // v3 username + seclevel string // v3 security level + authproto string // Auth protocol: "", MD5 or SHA + authpass string // Auth passphrase + privproto string // Priv protocol: "", DES or AES + privpass string // Priv passphrase // receive entries []entry @@ -276,6 +361,885 @@ func TestReceiveTrap(t *testing.T) { ), }, }, + //ordinary v3 coldStart trap no auth and no priv + { + name: "v3 coldStart noAuthNoPriv", + version: gosnmp.Version3, + secname: "noAuthNoPriv", + seclevel: "noAuthNoPriv", + trap: gosnmp.SnmpTrap{ + Variables: []gosnmp.SnmpPDU{ + { + Name: ".1.3.6.1.2.1.1.3.0", + Type: gosnmp.TimeTicks, + Value: now, + }, + { + Name: ".1.3.6.1.6.3.1.1.4.1.0", // SNMPv2-MIB::snmpTrapOID.0 + Type: gosnmp.ObjectIdentifier, + Value: ".1.3.6.1.6.3.1.1.5.1", // coldStart + }, + }, + }, + entries: []entry{ + { + oid: ".1.3.6.1.6.3.1.1.4.1.0", + e: mibEntry{ + "SNMPv2-MIB", + "snmpTrapOID.0", + }, + }, + { + oid: ".1.3.6.1.6.3.1.1.5.1", + e: mibEntry{ + "SNMPv2-MIB", + "coldStart", + }, + }, + { + oid: ".1.3.6.1.2.1.1.3.0", + e: mibEntry{ + "UNUSED_MIB_NAME", + "sysUpTimeInstance", + }, + }, + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "snmp_trap", // name + map[string]string{ // tags + "oid": ".1.3.6.1.6.3.1.1.5.1", + "name": "coldStart", + "mib": "SNMPv2-MIB", + "version": "3", + "source": "127.0.0.1", + }, + map[string]interface{}{ // fields + "sysUpTimeInstance": now, + }, + fakeTime, + ), + }, + }, + //ordinary v3 coldstart trap SHA auth and no priv + { + name: "v3 coldStart authShaNoPriv", + version: gosnmp.Version3, + secname: "authShaNoPriv", + seclevel: "authNoPriv", + authproto: "SHA", + authpass: "passpass", + trap: gosnmp.SnmpTrap{ + Variables: []gosnmp.SnmpPDU{ + { + Name: ".1.3.6.1.2.1.1.3.0", + Type: gosnmp.TimeTicks, + Value: now, + }, + { + Name: ".1.3.6.1.6.3.1.1.4.1.0", // SNMPv2-MIB::snmpTrapOID.0 + Type: gosnmp.ObjectIdentifier, + Value: ".1.3.6.1.6.3.1.1.5.1", // coldStart + }, + }, + }, + entries: []entry{ + { + oid: ".1.3.6.1.6.3.1.1.4.1.0", + e: mibEntry{ + "SNMPv2-MIB", + "snmpTrapOID.0", + }, + }, + { + oid: ".1.3.6.1.6.3.1.1.5.1", + e: mibEntry{ + "SNMPv2-MIB", + "coldStart", + }, + }, + { + oid: ".1.3.6.1.2.1.1.3.0", + e: mibEntry{ + "UNUSED_MIB_NAME", + "sysUpTimeInstance", + }, + }, + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "snmp_trap", // name + map[string]string{ // tags + "oid": ".1.3.6.1.6.3.1.1.5.1", + "name": "coldStart", + "mib": "SNMPv2-MIB", + "version": "3", + "source": "127.0.0.1", + }, + map[string]interface{}{ // fields + "sysUpTimeInstance": now, + }, + fakeTime, + ), + }, + }, + /* + //ordinary v3 coldstart trap SHA224 auth and no priv + { + name: "v3 coldStart authShaNoPriv", + version: gosnmp.Version3, + secname: "authSha224NoPriv", + seclevel: "authNoPriv", + authproto: "SHA224", + authpass: "passpass", + trap: gosnmp.SnmpTrap{ + Variables: []gosnmp.SnmpPDU{ + { + Name: ".1.3.6.1.2.1.1.3.0", + Type: gosnmp.TimeTicks, + Value: now, + }, + { + Name: ".1.3.6.1.6.3.1.1.4.1.0", // SNMPv2-MIB::snmpTrapOID.0 + Type: gosnmp.ObjectIdentifier, + Value: ".1.3.6.1.6.3.1.1.5.1", // coldStart + }, + }, + }, + entries: []entry{ + { + oid: ".1.3.6.1.6.3.1.1.4.1.0", + e: mibEntry{ + "SNMPv2-MIB", + "snmpTrapOID.0", + }, + }, + { + oid: ".1.3.6.1.6.3.1.1.5.1", + e: mibEntry{ + "SNMPv2-MIB", + "coldStart", + }, + }, + { + oid: ".1.3.6.1.2.1.1.3.0", + e: mibEntry{ + "UNUSED_MIB_NAME", + "sysUpTimeInstance", + }, + }, + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "snmp_trap", // name + map[string]string{ // tags + "oid": ".1.3.6.1.6.3.1.1.5.1", + "name": "coldStart", + "mib": "SNMPv2-MIB", + "version": "3", + "source": "127.0.0.1", + }, + map[string]interface{}{ // fields + "sysUpTimeInstance": now, + }, + fakeTime, + ), + }, + }, + //ordinary v3 coldstart trap SHA256 auth and no priv + { + name: "v3 coldStart authSha256NoPriv", + version: gosnmp.Version3, + secname: "authSha256NoPriv", + seclevel: "authNoPriv", + authproto: "SHA256", + authpass: "passpass", + trap: gosnmp.SnmpTrap{ + Variables: []gosnmp.SnmpPDU{ + { + Name: ".1.3.6.1.2.1.1.3.0", + Type: gosnmp.TimeTicks, + Value: now, + }, + { + Name: ".1.3.6.1.6.3.1.1.4.1.0", // SNMPv2-MIB::snmpTrapOID.0 + Type: gosnmp.ObjectIdentifier, + Value: ".1.3.6.1.6.3.1.1.5.1", // coldStart + }, + }, + }, + entries: []entry{ + { + oid: ".1.3.6.1.6.3.1.1.4.1.0", + e: mibEntry{ + "SNMPv2-MIB", + "snmpTrapOID.0", + }, + }, + { + oid: ".1.3.6.1.6.3.1.1.5.1", + e: mibEntry{ + "SNMPv2-MIB", + "coldStart", + }, + }, + { + oid: ".1.3.6.1.2.1.1.3.0", + e: mibEntry{ + "UNUSED_MIB_NAME", + "sysUpTimeInstance", + }, + }, + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "snmp_trap", // name + map[string]string{ // tags + "oid": ".1.3.6.1.6.3.1.1.5.1", + "name": "coldStart", + "mib": "SNMPv2-MIB", + "version": "3", + "source": "127.0.0.1", + }, + map[string]interface{}{ // fields + "sysUpTimeInstance": now, + }, + fakeTime, + ), + }, + }, + //ordinary v3 coldstart trap SHA384 auth and no priv + { + name: "v3 coldStart authSha384NoPriv", + version: gosnmp.Version3, + secname: "authSha384NoPriv", + seclevel: "authNoPriv", + authproto: "SHA384", + authpass: "passpass", + trap: gosnmp.SnmpTrap{ + Variables: []gosnmp.SnmpPDU{ + { + Name: ".1.3.6.1.2.1.1.3.0", + Type: gosnmp.TimeTicks, + Value: now, + }, + { + Name: ".1.3.6.1.6.3.1.1.4.1.0", // SNMPv2-MIB::snmpTrapOID.0 + Type: gosnmp.ObjectIdentifier, + Value: ".1.3.6.1.6.3.1.1.5.1", // coldStart + }, + }, + }, + entries: []entry{ + { + oid: ".1.3.6.1.6.3.1.1.4.1.0", + e: mibEntry{ + "SNMPv2-MIB", + "snmpTrapOID.0", + }, + }, + { + oid: ".1.3.6.1.6.3.1.1.5.1", + e: mibEntry{ + "SNMPv2-MIB", + "coldStart", + }, + }, + { + oid: ".1.3.6.1.2.1.1.3.0", + e: mibEntry{ + "UNUSED_MIB_NAME", + "sysUpTimeInstance", + }, + }, + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "snmp_trap", // name + map[string]string{ // tags + "oid": ".1.3.6.1.6.3.1.1.5.1", + "name": "coldStart", + "mib": "SNMPv2-MIB", + "version": "3", + "source": "127.0.0.1", + }, + map[string]interface{}{ // fields + "sysUpTimeInstance": now, + }, + fakeTime, + ), + }, + }, + //ordinary v3 coldstart trap SHA512 auth and no priv + { + name: "v3 coldStart authShaNoPriv", + version: gosnmp.Version3, + secname: "authSha512NoPriv", + seclevel: "authNoPriv", + authproto: "SHA512", + authpass: "passpass", + trap: gosnmp.SnmpTrap{ + Variables: []gosnmp.SnmpPDU{ + { + Name: ".1.3.6.1.2.1.1.3.0", + Type: gosnmp.TimeTicks, + Value: now, + }, + { + Name: ".1.3.6.1.6.3.1.1.4.1.0", // SNMPv2-MIB::snmpTrapOID.0 + Type: gosnmp.ObjectIdentifier, + Value: ".1.3.6.1.6.3.1.1.5.1", // coldStart + }, + }, + }, + entries: []entry{ + { + oid: ".1.3.6.1.6.3.1.1.4.1.0", + e: mibEntry{ + "SNMPv2-MIB", + "snmpTrapOID.0", + }, + }, + { + oid: ".1.3.6.1.6.3.1.1.5.1", + e: mibEntry{ + "SNMPv2-MIB", + "coldStart", + }, + }, + { + oid: ".1.3.6.1.2.1.1.3.0", + e: mibEntry{ + "UNUSED_MIB_NAME", + "sysUpTimeInstance", + }, + }, + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "snmp_trap", // name + map[string]string{ // tags + "oid": ".1.3.6.1.6.3.1.1.5.1", + "name": "coldStart", + "mib": "SNMPv2-MIB", + "version": "3", + "source": "127.0.0.1", + }, + map[string]interface{}{ // fields + "sysUpTimeInstance": now, + }, + fakeTime, + ), + }, + },*/ + //ordinary v3 coldstart trap SHA auth and no priv + { + name: "v3 coldStart authShaNoPriv", + version: gosnmp.Version3, + secname: "authShaNoPriv", + seclevel: "authNoPriv", + authproto: "SHA", + authpass: "passpass", + trap: gosnmp.SnmpTrap{ + Variables: []gosnmp.SnmpPDU{ + { + Name: ".1.3.6.1.2.1.1.3.0", + Type: gosnmp.TimeTicks, + Value: now, + }, + { + Name: ".1.3.6.1.6.3.1.1.4.1.0", // SNMPv2-MIB::snmpTrapOID.0 + Type: gosnmp.ObjectIdentifier, + Value: ".1.3.6.1.6.3.1.1.5.1", // coldStart + }, + }, + }, + entries: []entry{ + { + oid: ".1.3.6.1.6.3.1.1.4.1.0", + e: mibEntry{ + "SNMPv2-MIB", + "snmpTrapOID.0", + }, + }, + { + oid: ".1.3.6.1.6.3.1.1.5.1", + e: mibEntry{ + "SNMPv2-MIB", + "coldStart", + }, + }, + { + oid: ".1.3.6.1.2.1.1.3.0", + e: mibEntry{ + "UNUSED_MIB_NAME", + "sysUpTimeInstance", + }, + }, + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "snmp_trap", // name + map[string]string{ // tags + "oid": ".1.3.6.1.6.3.1.1.5.1", + "name": "coldStart", + "mib": "SNMPv2-MIB", + "version": "3", + "source": "127.0.0.1", + }, + map[string]interface{}{ // fields + "sysUpTimeInstance": now, + }, + fakeTime, + ), + }, + }, + //ordinary v3 coldstart trap MD5 auth and no priv + { + name: "v3 coldStart authMD5NoPriv", + version: gosnmp.Version3, + secname: "authMD5NoPriv", + seclevel: "authNoPriv", + authproto: "MD5", + authpass: "passpass", + trap: gosnmp.SnmpTrap{ + Variables: []gosnmp.SnmpPDU{ + { + Name: ".1.3.6.1.2.1.1.3.0", + Type: gosnmp.TimeTicks, + Value: now, + }, + { + Name: ".1.3.6.1.6.3.1.1.4.1.0", // SNMPv2-MIB::snmpTrapOID.0 + Type: gosnmp.ObjectIdentifier, + Value: ".1.3.6.1.6.3.1.1.5.1", // coldStart + }, + }, + }, + entries: []entry{ + { + oid: ".1.3.6.1.6.3.1.1.4.1.0", + e: mibEntry{ + "SNMPv2-MIB", + "snmpTrapOID.0", + }, + }, + { + oid: ".1.3.6.1.6.3.1.1.5.1", + e: mibEntry{ + "SNMPv2-MIB", + "coldStart", + }, + }, + { + oid: ".1.3.6.1.2.1.1.3.0", + e: mibEntry{ + "UNUSED_MIB_NAME", + "sysUpTimeInstance", + }, + }, + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "snmp_trap", // name + map[string]string{ // tags + "oid": ".1.3.6.1.6.3.1.1.5.1", + "name": "coldStart", + "mib": "SNMPv2-MIB", + "version": "3", + "source": "127.0.0.1", + }, + map[string]interface{}{ // fields + "sysUpTimeInstance": now, + }, + fakeTime, + ), + }, + }, + //ordinary v3 coldStart SHA trap auth and AES priv + { + name: "v3 coldStart authSHAPrivAES", + version: gosnmp.Version3, + secname: "authSHAPrivAES", + seclevel: "authPriv", + authproto: "SHA", + authpass: "passpass", + privproto: "AES", + privpass: "passpass", + trap: gosnmp.SnmpTrap{ + Variables: []gosnmp.SnmpPDU{ + { + Name: ".1.3.6.1.2.1.1.3.0", + Type: gosnmp.TimeTicks, + Value: now, + }, + { + Name: ".1.3.6.1.6.3.1.1.4.1.0", // SNMPv2-MIB::snmpTrapOID.0 + Type: gosnmp.ObjectIdentifier, + Value: ".1.3.6.1.6.3.1.1.5.1", // coldStart + }, + }, + }, + entries: []entry{ + { + oid: ".1.3.6.1.6.3.1.1.4.1.0", + e: mibEntry{ + "SNMPv2-MIB", + "snmpTrapOID.0", + }, + }, + { + oid: ".1.3.6.1.6.3.1.1.5.1", + e: mibEntry{ + "SNMPv2-MIB", + "coldStart", + }, + }, + { + oid: ".1.3.6.1.2.1.1.3.0", + e: mibEntry{ + "UNUSED_MIB_NAME", + "sysUpTimeInstance", + }, + }, + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "snmp_trap", // name + map[string]string{ // tags + "oid": ".1.3.6.1.6.3.1.1.5.1", + "name": "coldStart", + "mib": "SNMPv2-MIB", + "version": "3", + "source": "127.0.0.1", + }, + map[string]interface{}{ // fields + "sysUpTimeInstance": now, + }, + fakeTime, + ), + }, + }, + //ordinary v3 coldStart SHA trap auth and DES priv + { + name: "v3 coldStart authSHAPrivDES", + version: gosnmp.Version3, + secname: "authSHAPrivDES", + seclevel: "authPriv", + authproto: "SHA", + authpass: "passpass", + privproto: "DES", + privpass: "passpass", + trap: gosnmp.SnmpTrap{ + Variables: []gosnmp.SnmpPDU{ + { + Name: ".1.3.6.1.2.1.1.3.0", + Type: gosnmp.TimeTicks, + Value: now, + }, + { + Name: ".1.3.6.1.6.3.1.1.4.1.0", // SNMPv2-MIB::snmpTrapOID.0 + Type: gosnmp.ObjectIdentifier, + Value: ".1.3.6.1.6.3.1.1.5.1", // coldStart + }, + }, + }, + entries: []entry{ + { + oid: ".1.3.6.1.6.3.1.1.4.1.0", + e: mibEntry{ + "SNMPv2-MIB", + "snmpTrapOID.0", + }, + }, + { + oid: ".1.3.6.1.6.3.1.1.5.1", + e: mibEntry{ + "SNMPv2-MIB", + "coldStart", + }, + }, + { + oid: ".1.3.6.1.2.1.1.3.0", + e: mibEntry{ + "UNUSED_MIB_NAME", + "sysUpTimeInstance", + }, + }, + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "snmp_trap", // name + map[string]string{ // tags + "oid": ".1.3.6.1.6.3.1.1.5.1", + "name": "coldStart", + "mib": "SNMPv2-MIB", + "version": "3", + "source": "127.0.0.1", + }, + map[string]interface{}{ // fields + "sysUpTimeInstance": now, + }, + fakeTime, + ), + }, + }, + //ordinary v3 coldStart SHA trap auth and AES192 priv + { + name: "v3 coldStart authSHAPrivAES192", + version: gosnmp.Version3, + secname: "authSHAPrivAES192", + seclevel: "authPriv", + authproto: "SHA", + authpass: "passpass", + privproto: "AES192", + privpass: "passpass", + trap: gosnmp.SnmpTrap{ + Variables: []gosnmp.SnmpPDU{ + { + Name: ".1.3.6.1.2.1.1.3.0", + Type: gosnmp.TimeTicks, + Value: now, + }, + { + Name: ".1.3.6.1.6.3.1.1.4.1.0", // SNMPv2-MIB::snmpTrapOID.0 + Type: gosnmp.ObjectIdentifier, + Value: ".1.3.6.1.6.3.1.1.5.1", // coldStart + }, + }, + }, + entries: []entry{ + { + oid: ".1.3.6.1.6.3.1.1.4.1.0", + e: mibEntry{ + "SNMPv2-MIB", + "snmpTrapOID.0", + }, + }, + { + oid: ".1.3.6.1.6.3.1.1.5.1", + e: mibEntry{ + "SNMPv2-MIB", + "coldStart", + }, + }, + { + oid: ".1.3.6.1.2.1.1.3.0", + e: mibEntry{ + "UNUSED_MIB_NAME", + "sysUpTimeInstance", + }, + }, + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "snmp_trap", // name + map[string]string{ // tags + "oid": ".1.3.6.1.6.3.1.1.5.1", + "name": "coldStart", + "mib": "SNMPv2-MIB", + "version": "3", + "source": "127.0.0.1", + }, + map[string]interface{}{ // fields + "sysUpTimeInstance": now, + }, + fakeTime, + ), + }, + }, + //ordinary v3 coldStart SHA trap auth and AES192C priv + { + name: "v3 coldStart authSHAPrivAES192C", + version: gosnmp.Version3, + secname: "authSHAPrivAES192C", + seclevel: "authPriv", + authproto: "SHA", + authpass: "passpass", + privproto: "AES192C", + privpass: "passpass", + trap: gosnmp.SnmpTrap{ + Variables: []gosnmp.SnmpPDU{ + { + Name: ".1.3.6.1.2.1.1.3.0", + Type: gosnmp.TimeTicks, + Value: now, + }, + { + Name: ".1.3.6.1.6.3.1.1.4.1.0", // SNMPv2-MIB::snmpTrapOID.0 + Type: gosnmp.ObjectIdentifier, + Value: ".1.3.6.1.6.3.1.1.5.1", // coldStart + }, + }, + }, + entries: []entry{ + { + oid: ".1.3.6.1.6.3.1.1.4.1.0", + e: mibEntry{ + "SNMPv2-MIB", + "snmpTrapOID.0", + }, + }, + { + oid: ".1.3.6.1.6.3.1.1.5.1", + e: mibEntry{ + "SNMPv2-MIB", + "coldStart", + }, + }, + { + oid: ".1.3.6.1.2.1.1.3.0", + e: mibEntry{ + "UNUSED_MIB_NAME", + "sysUpTimeInstance", + }, + }, + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "snmp_trap", // name + map[string]string{ // tags + "oid": ".1.3.6.1.6.3.1.1.5.1", + "name": "coldStart", + "mib": "SNMPv2-MIB", + "version": "3", + "source": "127.0.0.1", + }, + map[string]interface{}{ // fields + "sysUpTimeInstance": now, + }, + fakeTime, + ), + }, + }, + //ordinary v3 coldStart SHA trap auth and AES256 priv + { + name: "v3 coldStart authSHAPrivAES256", + version: gosnmp.Version3, + secname: "authSHAPrivAES256", + seclevel: "authPriv", + authproto: "SHA", + authpass: "passpass", + privproto: "AES256", + privpass: "passpass", + trap: gosnmp.SnmpTrap{ + Variables: []gosnmp.SnmpPDU{ + { + Name: ".1.3.6.1.2.1.1.3.0", + Type: gosnmp.TimeTicks, + Value: now, + }, + { + Name: ".1.3.6.1.6.3.1.1.4.1.0", // SNMPv2-MIB::snmpTrapOID.0 + Type: gosnmp.ObjectIdentifier, + Value: ".1.3.6.1.6.3.1.1.5.1", // coldStart + }, + }, + }, + entries: []entry{ + { + oid: ".1.3.6.1.6.3.1.1.4.1.0", + e: mibEntry{ + "SNMPv2-MIB", + "snmpTrapOID.0", + }, + }, + { + oid: ".1.3.6.1.6.3.1.1.5.1", + e: mibEntry{ + "SNMPv2-MIB", + "coldStart", + }, + }, + { + oid: ".1.3.6.1.2.1.1.3.0", + e: mibEntry{ + "UNUSED_MIB_NAME", + "sysUpTimeInstance", + }, + }, + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "snmp_trap", // name + map[string]string{ // tags + "oid": ".1.3.6.1.6.3.1.1.5.1", + "name": "coldStart", + "mib": "SNMPv2-MIB", + "version": "3", + "source": "127.0.0.1", + }, + map[string]interface{}{ // fields + "sysUpTimeInstance": now, + }, + fakeTime, + ), + }, + }, + //ordinary v3 coldStart SHA trap auth and AES256C priv + { + name: "v3 coldStart authSHAPrivAES256C", + version: gosnmp.Version3, + secname: "authSHAPrivAES256C", + seclevel: "authPriv", + authproto: "SHA", + authpass: "passpass", + privproto: "AES256C", + privpass: "passpass", + trap: gosnmp.SnmpTrap{ + Variables: []gosnmp.SnmpPDU{ + { + Name: ".1.3.6.1.2.1.1.3.0", + Type: gosnmp.TimeTicks, + Value: now, + }, + { + Name: ".1.3.6.1.6.3.1.1.4.1.0", // SNMPv2-MIB::snmpTrapOID.0 + Type: gosnmp.ObjectIdentifier, + Value: ".1.3.6.1.6.3.1.1.5.1", // coldStart + }, + }, + }, + entries: []entry{ + { + oid: ".1.3.6.1.6.3.1.1.4.1.0", + e: mibEntry{ + "SNMPv2-MIB", + "snmpTrapOID.0", + }, + }, + { + oid: ".1.3.6.1.6.3.1.1.5.1", + e: mibEntry{ + "SNMPv2-MIB", + "coldStart", + }, + }, + { + oid: ".1.3.6.1.2.1.1.3.0", + e: mibEntry{ + "UNUSED_MIB_NAME", + "sysUpTimeInstance", + }, + }, + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "snmp_trap", // name + map[string]string{ // tags + "oid": ".1.3.6.1.6.3.1.1.5.1", + "name": "coldStart", + "mib": "SNMPv2-MIB", + "version": "3", + "source": "127.0.0.1", + }, + map[string]interface{}{ // fields + "sysUpTimeInstance": now, + }, + fakeTime, + ), + }, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -303,7 +1267,17 @@ func TestReceiveTrap(t *testing.T) { timeFunc: func() time.Time { return fakeTime }, - Log: testutil.Logger{}, + Log: testutil.Logger{}, + Version: tt.version.String(), + SecName: tt.secname, + SecLevel: tt.seclevel, + AuthProtocol: tt.authproto, + AuthPassword: tt.authpass, + PrivProtocol: tt.privproto, + PrivPassword: tt.privpass, + EngineID: "80001f8880031dd407f608905e00000000", + EngineBoots: 1, + EngineTime: 1, } require.Nil(t, s.Init()) var acc testutil.Accumulator @@ -320,7 +1294,7 @@ func TestReceiveTrap(t *testing.T) { s.execCmd = fakeExecCmd // Send the trap - sendTrap(t, port, now, tt.trap, tt.version) + sendTrap(t, port, now, tt.trap, tt.version, tt.seclevel, tt.secname, tt.authproto, tt.authpass, tt.privproto, tt.privpass) // Wait for trap to be received select { From 6f931c98348adb327948d62198fdb79109675f07 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 4 Jun 2020 14:43:05 -0700 Subject: [PATCH 1810/1815] Clarify use of multiple mqtt broker servers --- plugins/inputs/mqtt_consumer/README.md | 7 +++++-- plugins/inputs/mqtt_consumer/mqtt_consumer.go | 7 +++++-- 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/plugins/inputs/mqtt_consumer/README.md b/plugins/inputs/mqtt_consumer/README.md index ddb5a073b..a9e8236ee 100644 --- a/plugins/inputs/mqtt_consumer/README.md +++ b/plugins/inputs/mqtt_consumer/README.md @@ -7,8 +7,11 @@ and creates metrics using one of the supported [input data formats][]. ```toml [[inputs.mqtt_consumer]] - ## MQTT broker URLs to be used. The format should be scheme://host:port, - ## schema can be tcp, ssl, or ws. + ## Broker URLs for the MQTT server or cluster. To connect to multiple + ## clusters or standalone servers, use a seperate plugin instance. + ## example: servers = ["tcp://localhost:1883"] + ## servers = ["ssl://localhost:1883"] + ## servers = ["ws://localhost:1883"] servers = ["tcp://127.0.0.1:1883"] ## Topics that will be subscribed to. diff --git a/plugins/inputs/mqtt_consumer/mqtt_consumer.go b/plugins/inputs/mqtt_consumer/mqtt_consumer.go index 2f07b3aff..9ceeb1389 100644 --- a/plugins/inputs/mqtt_consumer/mqtt_consumer.go +++ b/plugins/inputs/mqtt_consumer/mqtt_consumer.go @@ -76,8 +76,11 @@ type MQTTConsumer struct { } var sampleConfig = ` - ## MQTT broker URLs to be used. The format should be scheme://host:port, - ## schema can be tcp, ssl, or ws. + ## Broker URLs for the MQTT server or cluster. To connect to multiple + ## clusters or standalone servers, use a seperate plugin instance. + ## example: servers = ["tcp://localhost:1883"] + ## servers = ["ssl://localhost:1883"] + ## servers = ["ws://localhost:1883"] servers = ["tcp://127.0.0.1:1883"] ## Topics that will be subscribed to. From 49caba9b2f0f61f06ba4ab4020c864970e952351 Mon Sep 17 00:00:00 2001 From: reimda Date: Fri, 5 Jun 2020 08:35:14 -0600 Subject: [PATCH 1811/1815] Add tags to snmp_trap input for context name and engine ID (#7633) Add tags for the context name and engine ID --- plugins/inputs/snmp_trap/README.md | 4 +- plugins/inputs/snmp_trap/snmp_trap.go | 20 +-- plugins/inputs/snmp_trap/snmp_trap_test.go | 197 +++++++++++---------- 3 files changed, 114 insertions(+), 107 deletions(-) diff --git a/plugins/inputs/snmp_trap/README.md b/plugins/inputs/snmp_trap/README.md index 630566518..046f18e49 100644 --- a/plugins/inputs/snmp_trap/README.md +++ b/plugins/inputs/snmp_trap/README.md @@ -45,8 +45,6 @@ information. # auth_password = "pass" ## Security Level; one of "noAuthNoPriv", "authNoPriv", or "authPriv". # sec_level = "authNoPriv" - ## Context Name. - # context_name = "" ## Privacy protocol used for encrypted messages; one of "DES", "AES", "AES192", "AES192C", "AES256", "AES256C" or "". # priv_protocol = "" ## Privacy password used for encrypted messages. @@ -87,6 +85,8 @@ On Mac OS, listening on privileged ports is unrestricted on versions - mib (string, MIB from SNMPv2-MIB::snmpTrapOID.0 PDU) - oid (string, OID string from SNMPv2-MIB::snmpTrapOID.0 PDU) - version (string, "1" or "2c" or "3") + - context_name (string, value from v3 trap) + - engine_id (string, value from v3 trap) - fields: - Fields are mapped from variables in the trap. Field names are the trap variable names after MIB lookup. Field values are trap diff --git a/plugins/inputs/snmp_trap/snmp_trap.go b/plugins/inputs/snmp_trap/snmp_trap.go index 9db058955..dbf0cdbf3 100644 --- a/plugins/inputs/snmp_trap/snmp_trap.go +++ b/plugins/inputs/snmp_trap/snmp_trap.go @@ -34,7 +34,6 @@ type SnmpTrap struct { Version string `toml:"version"` // Settings for version 3 - ContextName string `toml:"context_name"` // Values: "noAuthNoPriv", "authNoPriv", "authPriv" SecLevel string `toml:"sec_level"` SecName string `toml:"sec_name"` @@ -44,9 +43,6 @@ type SnmpTrap struct { // Values: "DES", "AES", "". Default: "" PrivProtocol string `toml:"priv_protocol"` PrivPassword string `toml:"priv_password"` - EngineID string `toml:"-"` - EngineBoots uint32 `toml:"-"` - EngineTime uint32 `toml:"-"` acc telegraf.Accumulator listener *gosnmp.TrapListener @@ -86,8 +82,6 @@ var sampleConfig = ` # auth_password = "pass" ## Security Level; one of "noAuthNoPriv", "authNoPriv", or "authPriv". # sec_level = "authNoPriv" - ## Context Name. - # context_name = "" ## Privacy protocol used for encrypted messages; one of "DES", "AES", "AES192", "AES192C", "AES256", "AES256C" or "". # priv_protocol = "" ## Privacy password used for encrypted messages. @@ -152,7 +146,6 @@ func (s *SnmpTrap) Start(acc telegraf.Accumulator) error { } if s.listener.Params.Version == gosnmp.Version3 { - s.listener.Params.ContextName = s.ContextName s.listener.Params.SecurityModel = gosnmp.UserSecurityModel switch strings.ToLower(s.SecLevel) { @@ -207,9 +200,6 @@ func (s *SnmpTrap) Start(acc telegraf.Accumulator) error { } s.listener.Params.SecurityParameters = &gosnmp.UsmSecurityParameters{ - AuthoritativeEngineID: s.EngineID, - AuthoritativeEngineBoots: s.EngineBoots, - AuthoritativeEngineTime: s.EngineTime, UserName: s.SecName, PrivacyProtocol: privacyProtocol, PrivacyPassphrase: s.PrivPassword, @@ -359,6 +349,16 @@ func makeTrapHandler(s *SnmpTrap) handler { fields[name] = value } + if packet.Version == gosnmp.Version3 { + if packet.ContextName != "" { + tags["context_name"] = packet.ContextName + } + if packet.ContextEngineID != "" { + // SNMP RFCs like 3411 and 5343 show engine ID as a hex string + tags["engine_id"] = fmt.Sprintf("%x", packet.ContextEngineID) + } + } + s.acc.AddFields("snmp_trap", fields, tags, tm) } } diff --git a/plugins/inputs/snmp_trap/snmp_trap_test.go b/plugins/inputs/snmp_trap/snmp_trap_test.go index ccd723399..dcc9b5d68 100644 --- a/plugins/inputs/snmp_trap/snmp_trap_test.go +++ b/plugins/inputs/snmp_trap/snmp_trap_test.go @@ -40,12 +40,12 @@ func fakeExecCmd(_ internal.Duration, x string, y ...string) ([]byte, error) { return nil, fmt.Errorf("mock " + x + " " + strings.Join(y, " ")) } -func sendTrap(t *testing.T, port uint16, now uint32, trap gosnmp.SnmpTrap, version gosnmp.SnmpVersion, seclevel string, username string, authproto string, authpass string, privproto string, privpass string) { +func sendTrap(t *testing.T, port uint16, now uint32, trap gosnmp.SnmpTrap, version gosnmp.SnmpVersion, secLevel string, username string, authProto string, authPass string, privProto string, privPass string, contextName string, engineID string) { var s gosnmp.GoSNMP if version == gosnmp.Version3 { var msgFlags gosnmp.SnmpV3MsgFlags - switch strings.ToLower(seclevel) { + switch strings.ToLower(secLevel) { case "noauthnopriv", "": msgFlags = gosnmp.NoAuthNoPriv case "authnopriv": @@ -57,7 +57,7 @@ func sendTrap(t *testing.T, port uint16, now uint32, trap gosnmp.SnmpTrap, versi } var authenticationProtocol gosnmp.SnmpV3AuthProtocol - switch strings.ToLower(authproto) { + switch strings.ToLower(authProto) { case "md5": authenticationProtocol = gosnmp.MD5 case "sha": @@ -77,7 +77,7 @@ func sendTrap(t *testing.T, port uint16, now uint32, trap gosnmp.SnmpTrap, versi } var privacyProtocol gosnmp.SnmpV3PrivProtocol - switch strings.ToLower(privproto) { + switch strings.ToLower(privProto) { case "aes": privacyProtocol = gosnmp.AES case "des": @@ -102,8 +102,8 @@ func sendTrap(t *testing.T, port uint16, now uint32, trap gosnmp.SnmpTrap, versi AuthoritativeEngineTime: 1, UserName: username, PrivacyProtocol: privacyProtocol, - PrivacyPassphrase: privpass, - AuthenticationPassphrase: authpass, + PrivacyPassphrase: privPass, + AuthenticationPassphrase: authPass, AuthenticationProtocol: authenticationProtocol, } s = gosnmp.GoSNMP{ @@ -116,6 +116,8 @@ func sendTrap(t *testing.T, port uint16, now uint32, trap gosnmp.SnmpTrap, versi SecurityParameters: sp, SecurityModel: gosnmp.UserSecurityModel, MsgFlags: msgFlags, + ContextName: contextName, + ContextEngineID: engineID, } } else { s = gosnmp.GoSNMP{ @@ -162,12 +164,16 @@ func TestReceiveTrap(t *testing.T) { version gosnmp.SnmpVersion trap gosnmp.SnmpTrap // include pdus // V3 auth and priv parameters - secname string // v3 username - seclevel string // v3 security level - authproto string // Auth protocol: "", MD5 or SHA - authpass string // Auth passphrase - privproto string // Priv protocol: "", DES or AES - privpass string // Priv passphrase + secName string // v3 username + secLevel string // v3 security level + authProto string // Auth protocol: "", MD5 or SHA + authPass string // Auth passphrase + privProto string // Priv protocol: "", DES or AES + privPass string // Priv passphrase + + // V3 sender context + contextName string + engineID string // receive entries []entry @@ -363,10 +369,12 @@ func TestReceiveTrap(t *testing.T) { }, //ordinary v3 coldStart trap no auth and no priv { - name: "v3 coldStart noAuthNoPriv", - version: gosnmp.Version3, - secname: "noAuthNoPriv", - seclevel: "noAuthNoPriv", + name: "v3 coldStart noAuthNoPriv", + version: gosnmp.Version3, + secName: "noAuthNoPriv", + secLevel: "noAuthNoPriv", + contextName: "foo_context_name", + engineID: "bar_engine_id", trap: gosnmp.SnmpTrap{ Variables: []gosnmp.SnmpPDU{ { @@ -408,11 +416,13 @@ func TestReceiveTrap(t *testing.T) { testutil.MustMetric( "snmp_trap", // name map[string]string{ // tags - "oid": ".1.3.6.1.6.3.1.1.5.1", - "name": "coldStart", - "mib": "SNMPv2-MIB", - "version": "3", - "source": "127.0.0.1", + "oid": ".1.3.6.1.6.3.1.1.5.1", + "name": "coldStart", + "mib": "SNMPv2-MIB", + "version": "3", + "source": "127.0.0.1", + "context_name": "foo_context_name", + "engine_id": "6261725f656e67696e655f6964", }, map[string]interface{}{ // fields "sysUpTimeInstance": now, @@ -425,10 +435,10 @@ func TestReceiveTrap(t *testing.T) { { name: "v3 coldStart authShaNoPriv", version: gosnmp.Version3, - secname: "authShaNoPriv", - seclevel: "authNoPriv", - authproto: "SHA", - authpass: "passpass", + secName: "authShaNoPriv", + secLevel: "authNoPriv", + authProto: "SHA", + authPass: "passpass", trap: gosnmp.SnmpTrap{ Variables: []gosnmp.SnmpPDU{ { @@ -488,10 +498,10 @@ func TestReceiveTrap(t *testing.T) { { name: "v3 coldStart authShaNoPriv", version: gosnmp.Version3, - secname: "authSha224NoPriv", - seclevel: "authNoPriv", - authproto: "SHA224", - authpass: "passpass", + secName: "authSha224NoPriv", + secLevel: "authNoPriv", + authProto: "SHA224", + authPass: "passpass", trap: gosnmp.SnmpTrap{ Variables: []gosnmp.SnmpPDU{ { @@ -550,10 +560,10 @@ func TestReceiveTrap(t *testing.T) { { name: "v3 coldStart authSha256NoPriv", version: gosnmp.Version3, - secname: "authSha256NoPriv", - seclevel: "authNoPriv", - authproto: "SHA256", - authpass: "passpass", + secName: "authSha256NoPriv", + secLevel: "authNoPriv", + authProto: "SHA256", + authPass: "passpass", trap: gosnmp.SnmpTrap{ Variables: []gosnmp.SnmpPDU{ { @@ -612,10 +622,10 @@ func TestReceiveTrap(t *testing.T) { { name: "v3 coldStart authSha384NoPriv", version: gosnmp.Version3, - secname: "authSha384NoPriv", - seclevel: "authNoPriv", - authproto: "SHA384", - authpass: "passpass", + secName: "authSha384NoPriv", + secLevel: "authNoPriv", + authProto: "SHA384", + authPass: "passpass", trap: gosnmp.SnmpTrap{ Variables: []gosnmp.SnmpPDU{ { @@ -674,10 +684,10 @@ func TestReceiveTrap(t *testing.T) { { name: "v3 coldStart authShaNoPriv", version: gosnmp.Version3, - secname: "authSha512NoPriv", - seclevel: "authNoPriv", - authproto: "SHA512", - authpass: "passpass", + secName: "authSha512NoPriv", + secLevel: "authNoPriv", + authProto: "SHA512", + authPass: "passpass", trap: gosnmp.SnmpTrap{ Variables: []gosnmp.SnmpPDU{ { @@ -736,10 +746,10 @@ func TestReceiveTrap(t *testing.T) { { name: "v3 coldStart authShaNoPriv", version: gosnmp.Version3, - secname: "authShaNoPriv", - seclevel: "authNoPriv", - authproto: "SHA", - authpass: "passpass", + secName: "authShaNoPriv", + secLevel: "authNoPriv", + authProto: "SHA", + authPass: "passpass", trap: gosnmp.SnmpTrap{ Variables: []gosnmp.SnmpPDU{ { @@ -798,10 +808,10 @@ func TestReceiveTrap(t *testing.T) { { name: "v3 coldStart authMD5NoPriv", version: gosnmp.Version3, - secname: "authMD5NoPriv", - seclevel: "authNoPriv", - authproto: "MD5", - authpass: "passpass", + secName: "authMD5NoPriv", + secLevel: "authNoPriv", + authProto: "MD5", + authPass: "passpass", trap: gosnmp.SnmpTrap{ Variables: []gosnmp.SnmpPDU{ { @@ -860,12 +870,12 @@ func TestReceiveTrap(t *testing.T) { { name: "v3 coldStart authSHAPrivAES", version: gosnmp.Version3, - secname: "authSHAPrivAES", - seclevel: "authPriv", - authproto: "SHA", - authpass: "passpass", - privproto: "AES", - privpass: "passpass", + secName: "authSHAPrivAES", + secLevel: "authPriv", + authProto: "SHA", + authPass: "passpass", + privProto: "AES", + privPass: "passpass", trap: gosnmp.SnmpTrap{ Variables: []gosnmp.SnmpPDU{ { @@ -924,12 +934,12 @@ func TestReceiveTrap(t *testing.T) { { name: "v3 coldStart authSHAPrivDES", version: gosnmp.Version3, - secname: "authSHAPrivDES", - seclevel: "authPriv", - authproto: "SHA", - authpass: "passpass", - privproto: "DES", - privpass: "passpass", + secName: "authSHAPrivDES", + secLevel: "authPriv", + authProto: "SHA", + authPass: "passpass", + privProto: "DES", + privPass: "passpass", trap: gosnmp.SnmpTrap{ Variables: []gosnmp.SnmpPDU{ { @@ -988,12 +998,12 @@ func TestReceiveTrap(t *testing.T) { { name: "v3 coldStart authSHAPrivAES192", version: gosnmp.Version3, - secname: "authSHAPrivAES192", - seclevel: "authPriv", - authproto: "SHA", - authpass: "passpass", - privproto: "AES192", - privpass: "passpass", + secName: "authSHAPrivAES192", + secLevel: "authPriv", + authProto: "SHA", + authPass: "passpass", + privProto: "AES192", + privPass: "passpass", trap: gosnmp.SnmpTrap{ Variables: []gosnmp.SnmpPDU{ { @@ -1052,12 +1062,12 @@ func TestReceiveTrap(t *testing.T) { { name: "v3 coldStart authSHAPrivAES192C", version: gosnmp.Version3, - secname: "authSHAPrivAES192C", - seclevel: "authPriv", - authproto: "SHA", - authpass: "passpass", - privproto: "AES192C", - privpass: "passpass", + secName: "authSHAPrivAES192C", + secLevel: "authPriv", + authProto: "SHA", + authPass: "passpass", + privProto: "AES192C", + privPass: "passpass", trap: gosnmp.SnmpTrap{ Variables: []gosnmp.SnmpPDU{ { @@ -1116,12 +1126,12 @@ func TestReceiveTrap(t *testing.T) { { name: "v3 coldStart authSHAPrivAES256", version: gosnmp.Version3, - secname: "authSHAPrivAES256", - seclevel: "authPriv", - authproto: "SHA", - authpass: "passpass", - privproto: "AES256", - privpass: "passpass", + secName: "authSHAPrivAES256", + secLevel: "authPriv", + authProto: "SHA", + authPass: "passpass", + privProto: "AES256", + privPass: "passpass", trap: gosnmp.SnmpTrap{ Variables: []gosnmp.SnmpPDU{ { @@ -1180,12 +1190,12 @@ func TestReceiveTrap(t *testing.T) { { name: "v3 coldStart authSHAPrivAES256C", version: gosnmp.Version3, - secname: "authSHAPrivAES256C", - seclevel: "authPriv", - authproto: "SHA", - authpass: "passpass", - privproto: "AES256C", - privpass: "passpass", + secName: "authSHAPrivAES256C", + secLevel: "authPriv", + authProto: "SHA", + authPass: "passpass", + privProto: "AES256C", + privPass: "passpass", trap: gosnmp.SnmpTrap{ Variables: []gosnmp.SnmpPDU{ { @@ -1269,15 +1279,12 @@ func TestReceiveTrap(t *testing.T) { }, Log: testutil.Logger{}, Version: tt.version.String(), - SecName: tt.secname, - SecLevel: tt.seclevel, - AuthProtocol: tt.authproto, - AuthPassword: tt.authpass, - PrivProtocol: tt.privproto, - PrivPassword: tt.privpass, - EngineID: "80001f8880031dd407f608905e00000000", - EngineBoots: 1, - EngineTime: 1, + SecName: tt.secName, + SecLevel: tt.secLevel, + AuthProtocol: tt.authProto, + AuthPassword: tt.authPass, + PrivProtocol: tt.privProto, + PrivPassword: tt.privPass, } require.Nil(t, s.Init()) var acc testutil.Accumulator @@ -1294,7 +1301,7 @@ func TestReceiveTrap(t *testing.T) { s.execCmd = fakeExecCmd // Send the trap - sendTrap(t, port, now, tt.trap, tt.version, tt.seclevel, tt.secname, tt.authproto, tt.authpass, tt.privproto, tt.privpass) + sendTrap(t, port, now, tt.trap, tt.version, tt.secLevel, tt.secName, tt.authProto, tt.authPass, tt.privProto, tt.privPass, tt.contextName, tt.engineID) // Wait for trap to be received select { From b99e3bc63d7f4f452985281ce84bdd71754c1e2e Mon Sep 17 00:00:00 2001 From: David Reimschussel Date: Fri, 5 Jun 2020 08:42:13 -0600 Subject: [PATCH 1812/1815] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4672c81de..cad19b9ab 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -55,6 +55,7 @@ - [#7540](https://github.com/influxdata/telegraf/pull/7540): Add processor to look up service name by port. - [#7474](https://github.com/influxdata/telegraf/pull/7474): Add new once mode that write to outputs and exits. - [#7474](https://github.com/influxdata/telegraf/pull/7474): Run processors and aggregators during test mode. +- [#7294](https://github.com/influxdata/telegraf/pull/7294): Add SNMPv3 trap support to snmp_trap input. #### Bugfixes From 741ea839d2cd014d91c18412bd5f6d2d3e25f4ec Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Fri, 5 Jun 2020 10:43:43 -0400 Subject: [PATCH 1813/1815] add support for streaming processors (#7634) --- agent/agent.go | 1158 +++++++++++++--------- aggregator.go | 6 +- config/config.go | 109 +- config/config_test.go | 8 +- input.go | 6 +- models/running_processor.go | 74 +- models/running_processor_test.go | 20 +- output.go | 6 +- plugin.go | 10 + plugins/processors/registry.go | 17 +- plugins/processors/streamingprocessor.go | 49 + processor.go | 29 +- 12 files changed, 913 insertions(+), 579 deletions(-) create mode 100644 plugins/processors/streamingprocessor.go diff --git a/agent/agent.go b/agent/agent.go index 5795eb0d4..72e906a59 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -6,6 +6,7 @@ import ( "log" "os" "runtime" + "sort" "sync" "time" @@ -29,6 +30,70 @@ func NewAgent(config *config.Config) (*Agent, error) { return a, nil } +// inputUnit is a group of input plugins and the shared channel they write to. +// +// ┌───────┐ +// │ Input │───┐ +// └───────┘ │ +// ┌───────┐ │ ______ +// │ Input │───┼──▶ ()_____) +// └───────┘ │ +// ┌───────┐ │ +// │ Input │───┘ +// └───────┘ +type inputUnit struct { + dst chan<- telegraf.Metric + inputs []*models.RunningInput +} + +// ______ ┌───────────┐ ______ +// ()_____)──▶ │ Processor │──▶ ()_____) +// └───────────┘ +type processorUnit struct { + src <-chan telegraf.Metric + dst chan<- telegraf.Metric + processor *models.RunningProcessor +} + +// aggregatorUnit is a group of Aggregators and their source and sink channels. +// Typically the aggregators write to a processor channel and pass the original +// metrics to the output channel. The sink channels may be the same channel. +// +// ┌────────────┐ +// ┌──▶ │ Aggregator │───┐ +// │ └────────────┘ │ +// ______ │ ┌────────────┐ │ ______ +// ()_____)───┼──▶ │ Aggregator │───┼──▶ ()_____) +// │ └────────────┘ │ +// │ ┌────────────┐ │ +// ├──▶ │ Aggregator │───┘ +// │ └────────────┘ +// │ ______ +// └────────────────────────▶ ()_____) +type aggregatorUnit struct { + src <-chan telegraf.Metric + aggC chan<- telegraf.Metric + outputC chan<- telegraf.Metric + aggregators []*models.RunningAggregator +} + +// outputUnit is a group of Outputs and their source channel. Metrics on the +// channel are written to all outputs. +// +// ┌────────┐ +// ┌──▶ │ Output │ +// │ └────────┘ +// ______ ┌─────┐ │ ┌────────┐ +// ()_____)──▶ │ Fan │───┼──▶ │ Output │ +// └─────┘ │ └────────┘ +// │ ┌────────┐ +// └──▶ │ Output │ +// └────────┘ +type outputUnit struct { + src <-chan telegraf.Metric + outputs []*models.RunningOutput +} + // Run starts and runs the Agent until the context is done. func (a *Agent) Run(ctx context.Context) error { log.Printf("I! [agent] Config: Interval:%s, Quiet:%#v, Hostname:%#v, "+ @@ -36,298 +101,150 @@ func (a *Agent) Run(ctx context.Context) error { a.Config.Agent.Interval.Duration, a.Config.Agent.Quiet, a.Config.Agent.Hostname, a.Config.Agent.FlushInterval.Duration) - if ctx.Err() != nil { - return ctx.Err() - } - log.Printf("D! [agent] Initializing plugins") err := a.initPlugins() if err != nil { return err } + startTime := time.Now() + log.Printf("D! [agent] Connecting outputs") - err = a.connectOutputs(ctx) + next, ou, err := a.startOutputs(ctx, a.Config.Outputs) if err != nil { return err } - inputC := make(chan telegraf.Metric, 100) - procC := make(chan telegraf.Metric, 100) - outputC := make(chan telegraf.Metric, 100) + var apu []*processorUnit + var au *aggregatorUnit + if len(a.Config.Aggregators) != 0 { + aggC := next + if len(a.Config.AggProcessors) != 0 { + aggC, apu, err = a.startProcessors(next, a.Config.AggProcessors) + if err != nil { + return err + } + } - startTime := time.Now() + next, au, err = a.startAggregators(aggC, next, a.Config.Aggregators) + if err != nil { + return err + } + } - log.Printf("D! [agent] Starting service inputs") - err = a.startServiceInputs(ctx, inputC) + var pu []*processorUnit + if len(a.Config.Processors) != 0 { + next, pu, err = a.startProcessors(next, a.Config.Processors) + if err != nil { + return err + } + } + + iu, err := a.startInputs(next, a.Config.Inputs) if err != nil { return err } var wg sync.WaitGroup - - src := inputC - dst := inputC - wg.Add(1) - go func(dst chan telegraf.Metric) { + go func() { defer wg.Done() - - err := a.runInputs(ctx, startTime, dst) - if err != nil { - log.Printf("E! [agent] Error running inputs: %v", err) - } - - log.Printf("D! [agent] Stopping service inputs") - a.stopServiceInputs() - - close(dst) - log.Printf("D! [agent] Input channel closed") - }(dst) - - src = dst - - if len(a.Config.Processors) > 0 { - dst = procC - - wg.Add(1) - go func(src, dst chan telegraf.Metric) { - defer wg.Done() - - err := a.runProcessors(src, dst) - if err != nil { - log.Printf("E! [agent] Error running processors: %v", err) - } - close(dst) - log.Printf("D! [agent] Processor channel closed") - }(src, dst) - - src = dst - } - - if len(a.Config.Aggregators) > 0 { - dst = outputC - - wg.Add(1) - go func(src, dst chan telegraf.Metric) { - defer wg.Done() - - err := a.runAggregators(startTime, src, dst) - if err != nil { - log.Printf("E! [agent] Error running aggregators: %v", err) - } - close(dst) - log.Printf("D! [agent] Output channel closed") - }(src, dst) - - src = dst - } - - wg.Add(1) - go func(src chan telegraf.Metric) { - defer wg.Done() - - err := a.runOutputs(startTime, src) + err = a.runOutputs(ou) if err != nil { log.Printf("E! [agent] Error running outputs: %v", err) } - }(src) - - wg.Wait() - - log.Printf("D! [agent] Closing outputs") - a.closeOutputs() - - log.Printf("D! [agent] Stopped Successfully") - return nil -} - -// Test runs the inputs, processors and aggregators for a single gather and -// writes the metrics to stdout. -func (a *Agent) Test(ctx context.Context, wait time.Duration) error { - outputF := func(src <-chan telegraf.Metric) { - s := influx.NewSerializer() - s.SetFieldSortOrder(influx.SortFields) - - for metric := range src { - octets, err := s.Serialize(metric) - if err == nil { - fmt.Print("> ", string(octets)) - } - metric.Reject() - } - } - - err := a.test(ctx, wait, outputF) - if err != nil { - return err - } - - if models.GlobalGatherErrors.Get() != 0 { - return fmt.Errorf("input plugins recorded %d errors", models.GlobalGatherErrors.Get()) - } - return nil - -} - -// Once runs the full agent for a single gather. -func (a *Agent) Once(ctx context.Context, wait time.Duration) error { - outputF := func(src <-chan telegraf.Metric) { - interval := a.Config.Agent.FlushInterval.Duration - - ctx, cancel := context.WithCancel(context.Background()) - - var wg sync.WaitGroup - for _, output := range a.Config.Outputs { - interval := interval - // Overwrite agent flush_interval if this plugin has its own. - if output.Config.FlushInterval != 0 { - interval = output.Config.FlushInterval - } - - jitter := 0 * time.Second - - ticker := NewRollingTicker(interval, jitter) - defer ticker.Stop() - - wg.Add(1) - go func(output *models.RunningOutput) { - defer wg.Done() - a.flushLoop(ctx, output, ticker) - }(output) - } - - for metric := range src { - for i, output := range a.Config.Outputs { - if i == len(a.Config.Outputs)-1 { - output.AddMetric(metric) - } else { - output.AddMetric(metric.Copy()) - } - } - } - - cancel() - wg.Wait() - } - - err := a.test(ctx, wait, outputF) - if err != nil { - return err - } - - if models.GlobalGatherErrors.Get() != 0 { - return fmt.Errorf("input plugins recorded %d errors", models.GlobalGatherErrors.Get()) - } - - unsent := 0 - for _, output := range a.Config.Outputs { - unsent += output.BufferLength() - } - if unsent != 0 { - return fmt.Errorf("output plugins unable to send %d metrics", unsent) - } - return nil -} - -// Test runs the agent and performs a single gather sending output to the -// outputF. After gathering pauses for the wait duration to allow service -// inputs to run. -func (a *Agent) test(ctx context.Context, wait time.Duration, outputF func(<-chan telegraf.Metric)) error { - log.Printf("D! [agent] Initializing plugins") - err := a.initPlugins() - if err != nil { - return err - } - - log.Printf("D! [agent] Connecting outputs") - err = a.connectOutputs(ctx) - if err != nil { - return err - } - - inputC := make(chan telegraf.Metric, 100) - procC := make(chan telegraf.Metric, 100) - outputC := make(chan telegraf.Metric, 100) - - startTime := time.Now() - - var wg sync.WaitGroup - - src := inputC - dst := inputC - - wg.Add(1) - go func(dst chan telegraf.Metric) { - defer wg.Done() - - a.testRunInputs(ctx, wait, dst) - - close(dst) - log.Printf("D! [agent] Input channel closed") - }(dst) - - src = dst - - if len(a.Config.Processors) > 0 { - dst = procC + }() + if au != nil { wg.Add(1) - go func(src, dst chan telegraf.Metric) { + go func() { defer wg.Done() - - err := a.runProcessors(src, dst) + err = a.runProcessors(apu) if err != nil { log.Printf("E! [agent] Error running processors: %v", err) } - close(dst) - log.Printf("D! [agent] Processor channel closed") - }(src, dst) - - src = dst - } - - if len(a.Config.Aggregators) > 0 { - dst = outputC + }() wg.Add(1) - go func(src, dst chan telegraf.Metric) { + go func() { defer wg.Done() - - err := a.runAggregators(startTime, src, dst) + err = a.runAggregators(startTime, au) if err != nil { log.Printf("E! [agent] Error running aggregators: %v", err) } - close(dst) - log.Printf("D! [agent] Output channel closed") - }(src, dst) + }() + } - src = dst + if pu != nil { + wg.Add(1) + go func() { + defer wg.Done() + err = a.runProcessors(pu) + if err != nil { + log.Printf("E! [agent] Error running processors: %v", err) + } + }() } wg.Add(1) - go func(src <-chan telegraf.Metric) { + go func() { defer wg.Done() - outputF(src) - }(src) + err = a.runInputs(ctx, startTime, iu) + if err != nil { + log.Printf("E! [agent] Error running inputs: %v", err) + } + }() wg.Wait() - log.Printf("D! [agent] Closing outputs") - a.closeOutputs() - log.Printf("D! [agent] Stopped Successfully") + return err +} +// initPlugins runs the Init function on plugins. +func (a *Agent) initPlugins() error { + for _, input := range a.Config.Inputs { + err := input.Init() + if err != nil { + return fmt.Errorf("could not initialize input %s: %v", + input.LogName(), err) + } + } + for _, processor := range a.Config.Processors { + err := processor.Init() + if err != nil { + return fmt.Errorf("could not initialize processor %s: %v", + processor.Config.Name, err) + } + } + for _, aggregator := range a.Config.Aggregators { + err := aggregator.Init() + if err != nil { + return fmt.Errorf("could not initialize aggregator %s: %v", + aggregator.Config.Name, err) + } + } + for _, output := range a.Config.Outputs { + err := output.Init() + if err != nil { + return fmt.Errorf("could not initialize output %s: %v", + output.Config.Name, err) + } + } return nil } -func (a *Agent) testRunInputs( - ctx context.Context, - wait time.Duration, +func (a *Agent) startInputs( dst chan<- telegraf.Metric, -) { + inputs []*models.RunningInput, +) (*inputUnit, error) { log.Printf("D! [agent] Starting service inputs") - for _, input := range a.Config.Inputs { + + unit := &inputUnit{ + dst: dst, + } + + for _, input := range inputs { if si, ok := input.Input.(telegraf.ServiceInput); ok { // Service input plugins are not subject to timestamp rounding. // This only applies to the accumulator passed to Start(), the @@ -338,21 +255,115 @@ func (a *Agent) testRunInputs( err := si.Start(acc) if err != nil { - acc.AddError(err) - si.Stop() - continue + stopServiceInputs(unit.inputs) + return nil, fmt.Errorf("starting input %s: %w", input.LogName(), err) } } + unit.inputs = append(unit.inputs, input) } + return unit, nil +} + +// runInputs starts and triggers the periodic gather for Inputs. +// +// When the context is done the timers are stopped and this function returns +// after all ongoing Gather calls complete. +func (a *Agent) runInputs( + ctx context.Context, + startTime time.Time, + unit *inputUnit, +) error { + var wg sync.WaitGroup + for _, input := range unit.inputs { + interval := a.Config.Agent.Interval.Duration + jitter := a.Config.Agent.CollectionJitter.Duration + + // Overwrite agent interval if this plugin has its own. + if input.Config.Interval != 0 { + interval = input.Config.Interval + } + + var ticker Ticker + if a.Config.Agent.RoundInterval { + ticker = NewAlignedTicker(startTime, interval, jitter) + } else { + ticker = NewUnalignedTicker(interval, jitter) + } + defer ticker.Stop() + + acc := NewAccumulator(input, unit.dst) + acc.SetPrecision(a.Precision()) + + wg.Add(1) + go func(input *models.RunningInput) { + defer wg.Done() + a.gatherLoop(ctx, acc, input, ticker) + }(input) + } + + wg.Wait() + + log.Printf("D! [agent] Stopping service inputs") + stopServiceInputs(unit.inputs) + + close(unit.dst) + log.Printf("D! [agent] Input channel closed") + + return nil +} + +// testStartInputs is a variation of startInputs for use in --test and --once +// mode. It differs by logging Start errors and returning only plugins +// successfully started. +func (a *Agent) testStartInputs( + dst chan<- telegraf.Metric, + inputs []*models.RunningInput, +) (*inputUnit, error) { + log.Printf("D! [agent] Starting service inputs") + + unit := &inputUnit{ + dst: dst, + } + + for _, input := range inputs { + if si, ok := input.Input.(telegraf.ServiceInput); ok { + // Service input plugins are not subject to timestamp rounding. + // This only applies to the accumulator passed to Start(), the + // Gather() accumulator does apply rounding according to the + // precision agent setting. + acc := NewAccumulator(input, dst) + acc.SetPrecision(time.Nanosecond) + + err := si.Start(acc) + if err != nil { + log.Printf("E! [agent] Starting input %s: %v", input.LogName(), err) + } + + } + + unit.inputs = append(unit.inputs, input) + } + + return unit, nil +} + +// testRunInputs is a variation of runInputs for use in --test and --once mode. +// Instead of using a ticker to run the inputs they are called once immediately. +func (a *Agent) testRunInputs( + ctx context.Context, + wait time.Duration, + unit *inputUnit, +) error { + var wg sync.WaitGroup + nul := make(chan telegraf.Metric) go func() { for range nul { } }() - var wg sync.WaitGroup - for _, input := range a.Config.Inputs { + for _, input := range unit.inputs { wg.Add(1) go func(input *models.RunningInput) { defer wg.Done() @@ -370,7 +381,7 @@ func (a *Agent) testRunInputs( time.Sleep(500 * time.Millisecond) } - acc := NewAccumulator(input, dst) + acc := NewAccumulator(input, unit.dst) acc.SetPrecision(a.Precision()) if err := input.Input.Gather(acc); err != nil { @@ -379,54 +390,24 @@ func (a *Agent) testRunInputs( }(input) } wg.Wait() - close(nul) internal.SleepContext(ctx, wait) log.Printf("D! [agent] Stopping service inputs") - a.stopServiceInputs() + stopServiceInputs(unit.inputs) + close(unit.dst) + log.Printf("D! [agent] Input channel closed") + return nil } -// runInputs starts and triggers the periodic gather for Inputs. -// -// When the context is done the timers are stopped and this function returns -// after all ongoing Gather calls complete. -func (a *Agent) runInputs( - ctx context.Context, - startTime time.Time, - dst chan<- telegraf.Metric, -) error { - var wg sync.WaitGroup - for _, input := range a.Config.Inputs { - interval := a.Config.Agent.Interval.Duration - jitter := a.Config.Agent.CollectionJitter.Duration - - // Overwrite agent interval if this plugin has its own. - if input.Config.Interval != 0 { - interval = input.Config.Interval +// stopServiceInputs stops all service inputs. +func stopServiceInputs(inputs []*models.RunningInput) { + for _, input := range inputs { + if si, ok := input.Input.(telegraf.ServiceInput); ok { + si.Stop() } - - var ticker Ticker - if a.Config.Agent.RoundInterval { - ticker = NewAlignedTicker(startTime, interval, jitter) - } else { - ticker = NewUnalignedTicker(interval, jitter) - } - defer ticker.Stop() - - acc := NewAccumulator(input, dst) - acc.SetPrecision(a.Precision()) - - wg.Add(1) - go func(input *models.RunningInput) { - defer wg.Done() - a.gatherLoop(ctx, acc, input, ticker) - }(input) } - - wg.Wait() - return nil } // gather runs an input's gather function periodically until the context is @@ -475,30 +456,142 @@ func (a *Agent) gatherOnce( } } -// runProcessors applies processors to metrics. -func (a *Agent) runProcessors( - src <-chan telegraf.Metric, - agg chan<- telegraf.Metric, -) error { - for metric := range src { - metrics := a.applyProcessors(metric) +// startProcessors sets up the processor chain and calls Start on all +// processors. If an error occurs any started processors are Stopped. +func (a *Agent) startProcessors( + dst chan<- telegraf.Metric, + processors models.RunningProcessors, +) (chan<- telegraf.Metric, []*processorUnit, error) { + var units []*processorUnit - for _, metric := range metrics { - agg <- metric + // Sort from last to first + sort.SliceStable(processors, func(i, j int) bool { + return processors[i].Config.Order > processors[j].Config.Order + }) + + var src chan telegraf.Metric + for _, processor := range processors { + src = make(chan telegraf.Metric, 100) + acc := NewAccumulator(processor, dst) + + err := processor.Start(acc) + if err != nil { + for _, u := range units { + u.processor.Stop() + close(u.dst) + } + return nil, nil, fmt.Errorf("starting processor %s: %w", processor.LogName(), err) } + + units = append(units, &processorUnit{ + src: src, + dst: dst, + processor: processor, + }) + + dst = src } + return src, units, nil +} + +// runProcessors begins processing metrics and runs until the source channel is +// closed and all metrics have been written. +func (a *Agent) runProcessors( + units []*processorUnit, +) error { + var wg sync.WaitGroup + for _, unit := range units { + wg.Add(1) + go func(unit *processorUnit) { + defer wg.Done() + + acc := NewAccumulator(unit.processor, unit.dst) + for m := range unit.src { + unit.processor.Add(m, acc) + } + unit.processor.Stop() + close(unit.dst) + log.Printf("D! [agent] Processor channel closed") + }(unit) + } + wg.Wait() + return nil } -// applyProcessors applies all processors to a metric. -func (a *Agent) applyProcessors(m telegraf.Metric) []telegraf.Metric { - metrics := []telegraf.Metric{m} - for _, processor := range a.Config.Processors { - metrics = processor.Apply(metrics...) +// startAggregators sets up the aggregator unit and returns the source channel. +func (a *Agent) startAggregators( + aggC chan<- telegraf.Metric, + outputC chan<- telegraf.Metric, + aggregators []*models.RunningAggregator, +) (chan<- telegraf.Metric, *aggregatorUnit, error) { + src := make(chan telegraf.Metric, 100) + unit := &aggregatorUnit{ + src: src, + aggC: aggC, + outputC: outputC, + aggregators: aggregators, + } + return src, unit, nil +} + +// runAggregators beings aggregating metrics and runs until the source channel +// is closed and all metrics have been written. +func (a *Agent) runAggregators( + startTime time.Time, + unit *aggregatorUnit, +) error { + ctx, cancel := context.WithCancel(context.Background()) + + // Before calling Add, initialize the aggregation window. This ensures + // that any metric created after start time will be aggregated. + for _, agg := range a.Config.Aggregators { + since, until := updateWindow(startTime, a.Config.Agent.RoundInterval, agg.Period()) + agg.UpdateWindow(since, until) } - return metrics + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + for metric := range unit.src { + var dropOriginal bool + for _, agg := range a.Config.Aggregators { + if ok := agg.Add(metric); ok { + dropOriginal = true + } + } + + if !dropOriginal { + unit.outputC <- metric // keep original. + } else { + metric.Drop() + } + } + cancel() + }() + + for _, agg := range a.Config.Aggregators { + wg.Add(1) + go func(agg *models.RunningAggregator) { + defer wg.Done() + + acc := NewAccumulator(agg, unit.aggC) + acc.SetPrecision(a.Precision()) + a.push(ctx, agg, acc) + }(agg) + } + + wg.Wait() + + // In the case that there are no processors, both aggC and outputC are the + // same channel. If there are processors, we close the aggC and the + // processor chain will close the outputC when it finishes processing. + close(unit.aggC) + log.Printf("D! [agent] Aggregator channel closed") + + return nil } func updateWindow(start time.Time, roundInterval bool, period time.Duration) (time.Time, time.Time) { @@ -517,78 +610,6 @@ func updateWindow(start time.Time, roundInterval bool, period time.Duration) (ti return since, until } -// runAggregators adds metrics to the aggregators and triggers their periodic -// push call. -// -// Runs until src is closed and all metrics have been processed. Will call -// push one final time before returning. -func (a *Agent) runAggregators( - startTime time.Time, - src <-chan telegraf.Metric, - dst chan<- telegraf.Metric, -) error { - ctx, cancel := context.WithCancel(context.Background()) - - // Before calling Add, initialize the aggregation window. This ensures - // that any metric created after start time will be aggregated. - for _, agg := range a.Config.Aggregators { - since, until := updateWindow(startTime, a.Config.Agent.RoundInterval, agg.Period()) - agg.UpdateWindow(since, until) - } - - var wg sync.WaitGroup - wg.Add(1) - go func() { - defer wg.Done() - for metric := range src { - var dropOriginal bool - for _, agg := range a.Config.Aggregators { - if ok := agg.Add(metric); ok { - dropOriginal = true - } - } - - if !dropOriginal { - dst <- metric - } else { - metric.Drop() - } - } - cancel() - }() - - aggregations := make(chan telegraf.Metric, 100) - wg.Add(1) - go func() { - defer wg.Done() - - var aggWg sync.WaitGroup - for _, agg := range a.Config.Aggregators { - aggWg.Add(1) - go func(agg *models.RunningAggregator) { - defer aggWg.Done() - - acc := NewAccumulator(agg, aggregations) - acc.SetPrecision(a.Precision()) - a.push(ctx, agg, acc) - }(agg) - } - - aggWg.Wait() - close(aggregations) - }() - - for metric := range aggregations { - metrics := a.applyProcessors(metric) - for _, metric := range metrics { - dst <- metric - } - } - - wg.Wait() - return nil -} - // push runs the push for a single aggregator every period. func (a *Agent) push( ctx context.Context, @@ -613,22 +634,66 @@ func (a *Agent) push( } } -// runOutputs triggers the periodic write for Outputs. -// +// startOutputs calls Connect on all outputs and returns the source channel. +// If an error occurs calling Connect all stared plugins have Close called. +func (a *Agent) startOutputs( + ctx context.Context, + outputs []*models.RunningOutput, +) (chan<- telegraf.Metric, *outputUnit, error) { + src := make(chan telegraf.Metric, 100) + unit := &outputUnit{src: src} + for _, output := range outputs { + err := a.connectOutput(ctx, output) + if err != nil { + for _, output := range unit.outputs { + output.Close() + } + return nil, nil, fmt.Errorf("connecting output %s: %w", output.LogName(), err) + } -// Runs until src is closed and all metrics have been processed. Will call -// Write one final time before returning. + unit.outputs = append(unit.outputs, output) + } + + return src, unit, nil +} + +// connectOutputs connects to all outputs. +func (a *Agent) connectOutput(ctx context.Context, output *models.RunningOutput) error { + log.Printf("D! [agent] Attempting connection to [%s]", output.LogName()) + err := output.Output.Connect() + if err != nil { + log.Printf("E! [agent] Failed to connect to [%s], retrying in 15s, "+ + "error was '%s'", output.LogName(), err) + + err := internal.SleepContext(ctx, 15*time.Second) + if err != nil { + return err + } + + err = output.Output.Connect() + if err != nil { + return fmt.Errorf("Error connecting to output %q: %w", output.LogName(), err) + } + } + log.Printf("D! [agent] Successfully connected to %s", output.LogName()) + return nil +} + +// runOutputs begins processing metrics and returns until the source channel is +// closed and all metrics have been written. On shutdown metrics will be +// written one last time and dropped if unsuccessful. func (a *Agent) runOutputs( - startTime time.Time, - src <-chan telegraf.Metric, + unit *outputUnit, ) error { + var wg sync.WaitGroup + + // Start flush loop interval := a.Config.Agent.FlushInterval.Duration jitter := a.Config.Agent.FlushJitter.Duration ctx, cancel := context.WithCancel(context.Background()) - var wg sync.WaitGroup - for _, output := range a.Config.Outputs { + for _, output := range unit.outputs { interval := interval // Overwrite agent flush_interval if this plugin has its own. if output.Config.FlushInterval != 0 { @@ -641,18 +706,19 @@ func (a *Agent) runOutputs( jitter = *output.Config.FlushJitter } - ticker := NewRollingTicker(interval, jitter) - defer ticker.Stop() - wg.Add(1) go func(output *models.RunningOutput) { defer wg.Done() + + ticker := NewRollingTicker(interval, jitter) + defer ticker.Stop() + a.flushLoop(ctx, output, ticker) }(output) } - for metric := range src { - for i, output := range a.Config.Outputs { + for metric := range unit.src { + for i, output := range unit.outputs { if i == len(a.Config.Outputs)-1 { output.AddMetric(metric) } else { @@ -738,115 +804,259 @@ func (a *Agent) flushOnce( output.LogBufferStatus() } } - } -// initPlugins runs the Init function on plugins. -func (a *Agent) initPlugins() error { - for _, input := range a.Config.Inputs { - err := input.Init() - if err != nil { - return fmt.Errorf("could not initialize input %s: %v", - input.LogName(), err) +// Test runs the inputs, processors and aggregators for a single gather and +// writes the metrics to stdout. +func (a *Agent) Test(ctx context.Context, wait time.Duration) error { + src := make(chan telegraf.Metric, 100) + + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + s := influx.NewSerializer() + s.SetFieldSortOrder(influx.SortFields) + + for metric := range src { + octets, err := s.Serialize(metric) + if err == nil { + fmt.Print("> ", string(octets)) + } + metric.Reject() } + }() + + err := a.test(ctx, wait, src) + if err != nil { + return err } - for _, processor := range a.Config.Processors { - err := processor.Init() - if err != nil { - return fmt.Errorf("could not initialize processor %s: %v", - processor.Config.Name, err) - } - } - for _, aggregator := range a.Config.Aggregators { - err := aggregator.Init() - if err != nil { - return fmt.Errorf("could not initialize aggregator %s: %v", - aggregator.Config.Name, err) - } - } - for _, output := range a.Config.Outputs { - err := output.Init() - if err != nil { - return fmt.Errorf("could not initialize output %s: %v", - output.Config.Name, err) - } + + wg.Wait() + + if models.GlobalGatherErrors.Get() != 0 { + return fmt.Errorf("input plugins recorded %d errors", models.GlobalGatherErrors.Get()) } return nil } -// connectOutputs connects to all outputs. -func (a *Agent) connectOutputs(ctx context.Context) error { - for _, output := range a.Config.Outputs { - log.Printf("D! [agent] Attempting connection to [%s]", output.LogName()) - err := output.Output.Connect() +// Test runs the agent and performs a single gather sending output to the +// outputF. After gathering pauses for the wait duration to allow service +// inputs to run. +func (a *Agent) test(ctx context.Context, wait time.Duration, outputC chan<- telegraf.Metric) error { + log.Printf("D! [agent] Initializing plugins") + err := a.initPlugins() + if err != nil { + return err + } + + startTime := time.Now() + + next := outputC + + var apu []*processorUnit + var au *aggregatorUnit + if len(a.Config.Aggregators) != 0 { + procC := next + if len(a.Config.AggProcessors) != 0 { + procC, apu, err = a.startProcessors(next, a.Config.AggProcessors) + if err != nil { + return err + } + } + + next, au, err = a.startAggregators(procC, next, a.Config.Aggregators) if err != nil { - log.Printf("E! [agent] Failed to connect to [%s], retrying in 15s, "+ - "error was '%s'", output.LogName(), err) - - err := internal.SleepContext(ctx, 15*time.Second) - if err != nil { - return err - } - - err = output.Output.Connect() - if err != nil { - return err - } + return err } - log.Printf("D! [agent] Successfully connected to %s", output.LogName()) } + + var pu []*processorUnit + if len(a.Config.Processors) != 0 { + next, pu, err = a.startProcessors(next, a.Config.Processors) + if err != nil { + return err + } + } + + iu, err := a.testStartInputs(next, a.Config.Inputs) + if err != nil { + return err + } + + var wg sync.WaitGroup + + if au != nil { + wg.Add(1) + go func() { + defer wg.Done() + err = a.runProcessors(apu) + if err != nil { + log.Printf("E! [agent] Error running processors: %v", err) + } + }() + + wg.Add(1) + go func() { + defer wg.Done() + err = a.runAggregators(startTime, au) + if err != nil { + log.Printf("E! [agent] Error running aggregators: %v", err) + } + }() + } + + if pu != nil { + wg.Add(1) + go func() { + defer wg.Done() + err = a.runProcessors(pu) + if err != nil { + log.Printf("E! [agent] Error running processors: %v", err) + } + }() + } + + wg.Add(1) + go func() { + defer wg.Done() + err = a.testRunInputs(ctx, wait, iu) + if err != nil { + log.Printf("E! [agent] Error running inputs: %v", err) + } + }() + + wg.Wait() + + log.Printf("D! [agent] Stopped Successfully") + return nil } -// closeOutputs closes all outputs. -func (a *Agent) closeOutputs() { +// Once runs the full agent for a single gather. +func (a *Agent) Once(ctx context.Context, wait time.Duration) error { + err := a.once(ctx, wait) + if err != nil { + return err + } + + if models.GlobalGatherErrors.Get() != 0 { + return fmt.Errorf("input plugins recorded %d errors", models.GlobalGatherErrors.Get()) + } + + unsent := 0 for _, output := range a.Config.Outputs { - output.Close() + unsent += output.BufferLength() } -} - -// startServiceInputs starts all service inputs. -func (a *Agent) startServiceInputs( - ctx context.Context, - dst chan<- telegraf.Metric, -) error { - started := []telegraf.ServiceInput{} - - for _, input := range a.Config.Inputs { - if si, ok := input.Input.(telegraf.ServiceInput); ok { - // Service input plugins are not subject to timestamp rounding. - // This only applies to the accumulator passed to Start(), the - // Gather() accumulator does apply rounding according to the - // precision agent setting. - acc := NewAccumulator(input, dst) - acc.SetPrecision(time.Nanosecond) - - err := si.Start(acc) - if err != nil { - log.Printf("E! [agent] Service for [%s] failed to start: %v", - input.LogName(), err) - - for _, si := range started { - si.Stop() - } - - return err - } - - started = append(started, si) - } + if unsent != 0 { + return fmt.Errorf("output plugins unable to send %d metrics", unsent) } - return nil } -// stopServiceInputs stops all service inputs. -func (a *Agent) stopServiceInputs() { - for _, input := range a.Config.Inputs { - if si, ok := input.Input.(telegraf.ServiceInput); ok { - si.Stop() +// On runs the agent and performs a single gather sending output to the +// outputF. After gathering pauses for the wait duration to allow service +// inputs to run. +func (a *Agent) once(ctx context.Context, wait time.Duration) error { + log.Printf("D! [agent] Initializing plugins") + err := a.initPlugins() + if err != nil { + return err + } + + startTime := time.Now() + + log.Printf("D! [agent] Connecting outputs") + next, ou, err := a.startOutputs(ctx, a.Config.Outputs) + if err != nil { + return err + } + + var apu []*processorUnit + var au *aggregatorUnit + if len(a.Config.Aggregators) != 0 { + procC := next + if len(a.Config.AggProcessors) != 0 { + procC, apu, err = a.startProcessors(next, a.Config.AggProcessors) + if err != nil { + return err + } + } + + next, au, err = a.startAggregators(procC, next, a.Config.Aggregators) + if err != nil { + return err } } + + var pu []*processorUnit + if len(a.Config.Processors) != 0 { + next, pu, err = a.startProcessors(next, a.Config.Processors) + if err != nil { + return err + } + } + + iu, err := a.testStartInputs(next, a.Config.Inputs) + if err != nil { + return err + } + + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + err = a.runOutputs(ou) + if err != nil { + log.Printf("E! [agent] Error running outputs: %v", err) + } + }() + + if au != nil { + wg.Add(1) + go func() { + defer wg.Done() + err = a.runProcessors(apu) + if err != nil { + log.Printf("E! [agent] Error running processors: %v", err) + } + }() + + wg.Add(1) + go func() { + defer wg.Done() + err = a.runAggregators(startTime, au) + if err != nil { + log.Printf("E! [agent] Error running aggregators: %v", err) + } + }() + } + + if pu != nil { + wg.Add(1) + go func() { + defer wg.Done() + err = a.runProcessors(pu) + if err != nil { + log.Printf("E! [agent] Error running processors: %v", err) + } + }() + } + + wg.Add(1) + go func() { + defer wg.Done() + err = a.testRunInputs(ctx, wait, iu) + if err != nil { + log.Printf("E! [agent] Error running inputs: %v", err) + } + }() + + wg.Wait() + + log.Printf("D! [agent] Stopped Successfully") + + return nil } // Returns the rounding precision for metrics. diff --git a/aggregator.go b/aggregator.go index 48aa8e4bf..f168b04d0 100644 --- a/aggregator.go +++ b/aggregator.go @@ -5,11 +5,7 @@ package telegraf // Add, Push, and Reset can not be called concurrently, so locking is not // required when implementing an Aggregator plugin. type Aggregator interface { - // SampleConfig returns the default configuration of the Input. - SampleConfig() string - - // Description returns a one-sentence description on the Input. - Description() string + PluginDescriber // Add the metric to the aggregator. Add(in Metric) diff --git a/config/config.go b/config/config.go index 23ba1b5b3..bca178cb0 100644 --- a/config/config.go +++ b/config/config.go @@ -65,7 +65,8 @@ type Config struct { Outputs []*models.RunningOutput Aggregators []*models.RunningAggregator // Processors have a slice wrapper type because they need to be sorted - Processors models.RunningProcessors + Processors models.RunningProcessors + AggProcessors models.RunningProcessors } func NewConfig() *Config { @@ -83,6 +84,7 @@ func NewConfig() *Config { Inputs: make([]*models.RunningInput, 0), Outputs: make([]*models.RunningOutput, 0), Processors: make([]*models.RunningProcessor, 0), + AggProcessors: make([]*models.RunningProcessor, 0), InputFilters: make([]string, 0), OutputFilters: make([]string, 0), } @@ -561,12 +563,7 @@ func printFilteredGlobalSections(sectionFilters []string) { } } -type printer interface { - Description() string - SampleConfig() string -} - -func printConfig(name string, p printer, op string, commented bool) { +func printConfig(name string, p telegraf.PluginDescriber, op string, commented bool) { comment := "" if commented { comment = "# " @@ -684,12 +681,20 @@ func (c *Config) LoadConfig(path string) error { } data, err := loadConfig(path) if err != nil { - return fmt.Errorf("Error loading %s, %s", path, err) + return fmt.Errorf("Error loading config file %s: %w", path, err) } + if err = c.LoadConfigData(data); err != nil { + return fmt.Errorf("Error loading config file %s: %w", path, err) + } + return nil +} + +// LoadConfigData loads TOML-formatted config data +func (c *Config) LoadConfigData(data []byte) error { tbl, err := parseConfig(data) if err != nil { - return fmt.Errorf("Error parsing %s, %s", path, err) + return fmt.Errorf("Error parsing data: %s", err) } // Parse tags tables first: @@ -697,11 +702,10 @@ func (c *Config) LoadConfig(path string) error { if val, ok := tbl.Fields[tableName]; ok { subTable, ok := val.(*ast.Table) if !ok { - return fmt.Errorf("%s: invalid configuration", path) + return fmt.Errorf("invalid configuration, bad table name %q", tableName) } if err = toml.UnmarshalTable(subTable, c.Tags); err != nil { - log.Printf("E! Could not parse [global_tags] config\n") - return fmt.Errorf("Error parsing %s, %s", path, err) + return fmt.Errorf("error parsing table name %q: %w", tableName, err) } } } @@ -710,11 +714,10 @@ func (c *Config) LoadConfig(path string) error { if val, ok := tbl.Fields["agent"]; ok { subTable, ok := val.(*ast.Table) if !ok { - return fmt.Errorf("%s: invalid configuration", path) + return fmt.Errorf("invalid configuration, error parsing agent table") } if err = toml.UnmarshalTable(subTable, c.Agent); err != nil { - log.Printf("E! Could not parse [agent] config\n") - return fmt.Errorf("Error parsing %s, %s", path, err) + return fmt.Errorf("error parsing agent table: %w", err) } } @@ -735,7 +738,7 @@ func (c *Config) LoadConfig(path string) error { for name, val := range tbl.Fields { subTable, ok := val.(*ast.Table) if !ok { - return fmt.Errorf("%s: invalid configuration", path) + return fmt.Errorf("invalid configuration, error parsing field %q as table", name) } switch name { @@ -746,17 +749,17 @@ func (c *Config) LoadConfig(path string) error { // legacy [outputs.influxdb] support case *ast.Table: if err = c.addOutput(pluginName, pluginSubTable); err != nil { - return fmt.Errorf("Error parsing %s, %s", path, err) + return fmt.Errorf("Error parsing %s, %s", pluginName, err) } case []*ast.Table: for _, t := range pluginSubTable { if err = c.addOutput(pluginName, t); err != nil { - return fmt.Errorf("Error parsing %s, %s", path, err) + return fmt.Errorf("Error parsing %s array, %s", pluginName, err) } } default: - return fmt.Errorf("Unsupported config format: %s, file %s", - pluginName, path) + return fmt.Errorf("Unsupported config format: %s", + pluginName) } } case "inputs", "plugins": @@ -765,17 +768,17 @@ func (c *Config) LoadConfig(path string) error { // legacy [inputs.cpu] support case *ast.Table: if err = c.addInput(pluginName, pluginSubTable); err != nil { - return fmt.Errorf("Error parsing %s, %s", path, err) + return fmt.Errorf("Error parsing %s, %s", pluginName, err) } case []*ast.Table: for _, t := range pluginSubTable { if err = c.addInput(pluginName, t); err != nil { - return fmt.Errorf("Error parsing %s, %s", path, err) + return fmt.Errorf("Error parsing %s, %s", pluginName, err) } } default: - return fmt.Errorf("Unsupported config format: %s, file %s", - pluginName, path) + return fmt.Errorf("Unsupported config format: %s", + pluginName) } } case "processors": @@ -784,12 +787,12 @@ func (c *Config) LoadConfig(path string) error { case []*ast.Table: for _, t := range pluginSubTable { if err = c.addProcessor(pluginName, t); err != nil { - return fmt.Errorf("Error parsing %s, %s", path, err) + return fmt.Errorf("Error parsing %s, %s", pluginName, err) } } default: - return fmt.Errorf("Unsupported config format: %s, file %s", - pluginName, path) + return fmt.Errorf("Unsupported config format: %s", + pluginName) } } case "aggregators": @@ -798,19 +801,19 @@ func (c *Config) LoadConfig(path string) error { case []*ast.Table: for _, t := range pluginSubTable { if err = c.addAggregator(pluginName, t); err != nil { - return fmt.Errorf("Error parsing %s, %s", path, err) + return fmt.Errorf("Error parsing %s, %s", pluginName, err) } } default: - return fmt.Errorf("Unsupported config format: %s, file %s", - pluginName, path) + return fmt.Errorf("Unsupported config format: %s", + pluginName) } } // Assume it's an input input for legacy config file support if no other // identifiers are present default: if err = c.addInput(name, subTable); err != nil { - return fmt.Errorf("Error parsing %s, %s", path, err) + return fmt.Errorf("Error parsing %s, %s", name, err) } } } @@ -929,21 +932,48 @@ func (c *Config) addProcessor(name string, table *ast.Table) error { if !ok { return fmt.Errorf("Undefined but requested processor: %s", name) } - processor := creator() processorConfig, err := buildProcessor(name, table) if err != nil { return err } - if err := toml.UnmarshalTable(table, processor); err != nil { + rf, err := c.newRunningProcessor(creator, processorConfig, name, table) + if err != nil { return err } + c.Processors = append(c.Processors, rf) + + // save a copy for the aggregator + rf, err = c.newRunningProcessor(creator, processorConfig, name, table) + if err != nil { + return err + } + c.AggProcessors = append(c.AggProcessors, rf) + + return nil +} + +func (c *Config) newRunningProcessor( + creator processors.StreamingCreator, + processorConfig *models.ProcessorConfig, + name string, + table *ast.Table, +) (*models.RunningProcessor, error) { + processor := creator() + + if p, ok := processor.(unwrappable); ok { + if err := toml.UnmarshalTable(table, p.Unwrap()); err != nil { + return nil, err + } + } else { + if err := toml.UnmarshalTable(table, processor); err != nil { + return nil, err + } + } rf := models.NewRunningProcessor(processor, processorConfig) - - c.Processors = append(c.Processors, rf) - return nil + return rf, nil } func (c *Config) addOutput(name string, table *ast.Table) error { @@ -2195,3 +2225,10 @@ func buildOutput(name string, tbl *ast.Table) (*models.OutputConfig, error) { return oc, nil } + +// unwrappable lets you retrieve the original telegraf.Processor from the +// StreamingProcessor. This is necessary because the toml Unmarshaller won't +// look inside composed types. +type unwrappable interface { + Unwrap() telegraf.Processor +} diff --git a/config/config_test.go b/config/config_test.go index c4a960265..6c5e3662a 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -207,7 +207,7 @@ func TestConfig_FieldNotDefined(t *testing.T) { c := NewConfig() err := c.LoadConfig("./testdata/invalid_field.toml") require.Error(t, err, "invalid field name") - assert.Equal(t, "Error parsing ./testdata/invalid_field.toml, line 2: field corresponding to `not_a_field' is not defined in http_listener_v2.HTTPListenerV2", err.Error()) + assert.Equal(t, "Error loading config file ./testdata/invalid_field.toml: Error parsing http_listener_v2, line 2: field corresponding to `not_a_field' is not defined in http_listener_v2.HTTPListenerV2", err.Error()) } @@ -215,12 +215,12 @@ func TestConfig_WrongFieldType(t *testing.T) { c := NewConfig() err := c.LoadConfig("./testdata/wrong_field_type.toml") require.Error(t, err, "invalid field type") - assert.Equal(t, "Error parsing ./testdata/wrong_field_type.toml, line 2: (http_listener_v2.HTTPListenerV2.Port) cannot unmarshal TOML string into int", err.Error()) + assert.Equal(t, "Error loading config file ./testdata/wrong_field_type.toml: Error parsing http_listener_v2, line 2: (http_listener_v2.HTTPListenerV2.Port) cannot unmarshal TOML string into int", err.Error()) c = NewConfig() err = c.LoadConfig("./testdata/wrong_field_type2.toml") require.Error(t, err, "invalid field type2") - assert.Equal(t, "Error parsing ./testdata/wrong_field_type2.toml, line 2: (http_listener_v2.HTTPListenerV2.Methods) cannot unmarshal TOML string into []string", err.Error()) + assert.Equal(t, "Error loading config file ./testdata/wrong_field_type2.toml: Error parsing http_listener_v2, line 2: (http_listener_v2.HTTPListenerV2.Methods) cannot unmarshal TOML string into []string", err.Error()) } func TestConfig_InlineTables(t *testing.T) { @@ -255,5 +255,5 @@ func TestConfig_BadOrdering(t *testing.T) { c := NewConfig() err := c.LoadConfig("./testdata/non_slice_slice.toml") require.Error(t, err, "bad ordering") - assert.Equal(t, "Error parsing ./testdata/non_slice_slice.toml, line 4: cannot unmarshal TOML array into string (need slice)", err.Error()) + assert.Equal(t, "Error loading config file ./testdata/non_slice_slice.toml: Error parsing http array, line 4: cannot unmarshal TOML array into string (need slice)", err.Error()) } diff --git a/input.go b/input.go index 071ab7d9d..08cfd75b9 100644 --- a/input.go +++ b/input.go @@ -1,11 +1,7 @@ package telegraf type Input interface { - // SampleConfig returns the default configuration of the Input - SampleConfig() string - - // Description returns a one-sentence description on the Input - Description() string + PluginDescriber // Gather takes in an accumulator and adds the metrics that the Input // gathers. This is called every "interval" diff --git a/models/running_processor.go b/models/running_processor.go index a7871b3e8..86b1887a1 100644 --- a/models/running_processor.go +++ b/models/running_processor.go @@ -10,7 +10,7 @@ import ( type RunningProcessor struct { sync.Mutex log telegraf.Logger - Processor telegraf.Processor + Processor telegraf.StreamingProcessor Config *ProcessorConfig } @@ -28,7 +28,7 @@ type ProcessorConfig struct { Filter Filter } -func NewRunningProcessor(processor telegraf.Processor, config *ProcessorConfig) *RunningProcessor { +func NewRunningProcessor(processor telegraf.StreamingProcessor, config *ProcessorConfig) *RunningProcessor { tags := map[string]string{"processor": config.Name} if config.Alias != "" { tags["alias"] = config.Alias @@ -52,15 +52,6 @@ func (rp *RunningProcessor) metricFiltered(metric telegraf.Metric) { metric.Drop() } -func containsMetric(item telegraf.Metric, metrics []telegraf.Metric) bool { - for _, m := range metrics { - if item == m { - return true - } - } - return false -} - func (r *RunningProcessor) Init() error { if p, ok := r.Processor.(telegraf.Initializer); ok { err := p.Init() @@ -71,34 +62,39 @@ func (r *RunningProcessor) Init() error { return nil } -func (rp *RunningProcessor) Apply(in ...telegraf.Metric) []telegraf.Metric { - rp.Lock() - defer rp.Unlock() - - ret := []telegraf.Metric{} - - for _, metric := range in { - // In processors when a filter selects a metric it is sent through the - // processor. Otherwise the metric continues downstream unmodified. - if ok := rp.Config.Filter.Select(metric); !ok { - ret = append(ret, metric) - continue - } - - rp.Config.Filter.Modify(metric) - if len(metric.FieldList()) == 0 { - rp.metricFiltered(metric) - continue - } - - // This metric should pass through the filter, so call the filter Apply - // function and append results to the output slice. - ret = append(ret, rp.Processor.Apply(metric)...) - } - - return ret -} - func (r *RunningProcessor) Log() telegraf.Logger { return r.log } + +func (r *RunningProcessor) LogName() string { + return logName("processors", r.Config.Name, r.Config.Alias) +} + +func (r *RunningProcessor) MakeMetric(metric telegraf.Metric) telegraf.Metric { + return metric +} + +func (r *RunningProcessor) Start(acc telegraf.Accumulator) error { + return r.Processor.Start(acc) +} + +func (r *RunningProcessor) Add(m telegraf.Metric, acc telegraf.Accumulator) { + if ok := r.Config.Filter.Select(m); !ok { + // pass downstream + acc.AddMetric(m) + return + } + + r.Config.Filter.Modify(m) + if len(m.FieldList()) == 0 { + // drop metric + r.metricFiltered(m) + return + } + + r.Processor.Add(m, acc) +} + +func (r *RunningProcessor) Stop() { + r.Processor.Stop() +} diff --git a/models/running_processor_test.go b/models/running_processor_test.go index 4ac4743a7..ee1d50ef2 100644 --- a/models/running_processor_test.go +++ b/models/running_processor_test.go @@ -6,8 +6,8 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/processors" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" ) @@ -43,7 +43,7 @@ func TagProcessor(key, value string) *MockProcessor { func TestRunningProcessor_Apply(t *testing.T) { type args struct { - Processor telegraf.Processor + Processor telegraf.StreamingProcessor Config *ProcessorConfig } @@ -56,7 +56,7 @@ func TestRunningProcessor_Apply(t *testing.T) { { name: "inactive filter applies metrics", args: args{ - Processor: TagProcessor("apply", "true"), + Processor: processors.NewStreamingProcessorFromProcessor(TagProcessor("apply", "true")), Config: &ProcessorConfig{ Filter: Filter{}, }, @@ -87,7 +87,7 @@ func TestRunningProcessor_Apply(t *testing.T) { { name: "filter applies", args: args{ - Processor: TagProcessor("apply", "true"), + Processor: processors.NewStreamingProcessorFromProcessor(TagProcessor("apply", "true")), Config: &ProcessorConfig{ Filter: Filter{ NamePass: []string{"cpu"}, @@ -120,7 +120,7 @@ func TestRunningProcessor_Apply(t *testing.T) { { name: "filter doesn't apply", args: args{ - Processor: TagProcessor("apply", "true"), + Processor: processors.NewStreamingProcessorFromProcessor(TagProcessor("apply", "true")), Config: &ProcessorConfig{ Filter: Filter{ NameDrop: []string{"cpu"}, @@ -158,7 +158,15 @@ func TestRunningProcessor_Apply(t *testing.T) { } rp.Config.Filter.Compile() - actual := rp.Apply(tt.input...) + acc := testutil.Accumulator{} + err := rp.Start(&acc) + require.NoError(t, err) + for _, m := range tt.input { + rp.Add(m, &acc) + } + rp.Stop() + + actual := acc.GetTelegrafMetrics() require.Equal(t, tt.expected, actual) }) } diff --git a/output.go b/output.go index 3c4a85ddb..0045b2ca6 100644 --- a/output.go +++ b/output.go @@ -1,14 +1,12 @@ package telegraf type Output interface { + PluginDescriber + // Connect to the Output Connect() error // Close any connections to the Output Close() error - // Description returns a one-sentence description on the Output - Description() string - // SampleConfig returns the default configuration of the Output - SampleConfig() string // Write takes in group of points to be written to the Output Write(metrics []Metric) error } diff --git a/plugin.go b/plugin.go index f79721958..29e8bb683 100644 --- a/plugin.go +++ b/plugin.go @@ -9,6 +9,16 @@ type Initializer interface { Init() error } +// PluginDescriber contains the functions all plugins must implement to describe +// themselves to Telegraf +type PluginDescriber interface { + // SampleConfig returns the default configuration of the Processor + SampleConfig() string + + // Description returns a one-sentence description on the Processor + Description() string +} + // Logger defines an interface for logging. type Logger interface { // Errorf logs an error message, patterned after log.Printf. diff --git a/plugins/processors/registry.go b/plugins/processors/registry.go index 592c688f3..efade2966 100644 --- a/plugins/processors/registry.go +++ b/plugins/processors/registry.go @@ -3,9 +3,24 @@ package processors import "github.com/influxdata/telegraf" type Creator func() telegraf.Processor +type StreamingCreator func() telegraf.StreamingProcessor -var Processors = map[string]Creator{} +// all processors are streaming processors. +// telegraf.Processor processors are upgraded to telegraf.StreamingProcessor +var Processors = map[string]StreamingCreator{} +// Add adds a telegraf.Processor processor func Add(name string, creator Creator) { + Processors[name] = upgradeToStreamingProcessor(creator) +} + +// AddStreaming adds a telegraf.StreamingProcessor streaming processor +func AddStreaming(name string, creator StreamingCreator) { Processors[name] = creator } + +func upgradeToStreamingProcessor(oldCreator Creator) StreamingCreator { + return func() telegraf.StreamingProcessor { + return NewStreamingProcessorFromProcessor(oldCreator()) + } +} diff --git a/plugins/processors/streamingprocessor.go b/plugins/processors/streamingprocessor.go new file mode 100644 index 000000000..4078ac26c --- /dev/null +++ b/plugins/processors/streamingprocessor.go @@ -0,0 +1,49 @@ +package processors + +import ( + "github.com/influxdata/telegraf" +) + +// NewStreamingProcessorFromProcessor is a converter that turns a standard +// processor into a streaming processor +func NewStreamingProcessorFromProcessor(p telegraf.Processor) telegraf.StreamingProcessor { + sp := &streamingProcessor{ + processor: p, + } + return sp +} + +type streamingProcessor struct { + processor telegraf.Processor + acc telegraf.Accumulator +} + +func (sp *streamingProcessor) SampleConfig() string { + return sp.processor.SampleConfig() +} + +func (sp *streamingProcessor) Description() string { + return sp.processor.Description() +} + +func (sp *streamingProcessor) Start(acc telegraf.Accumulator) error { + sp.acc = acc + return nil +} + +func (sp *streamingProcessor) Add(m telegraf.Metric, acc telegraf.Accumulator) { + for _, m := range sp.processor.Apply(m) { + acc.AddMetric(m) + } +} + +func (sp *streamingProcessor) Stop() error { + return nil +} + +// Unwrap lets you retrieve the original telegraf.Processor from the +// StreamingProcessor. This is necessary because the toml Unmarshaller won't +// look inside composed types. +func (sp *streamingProcessor) Unwrap() telegraf.Processor { + return sp.processor +} diff --git a/processor.go b/processor.go index e084adab7..5e2d46914 100644 --- a/processor.go +++ b/processor.go @@ -1,12 +1,31 @@ package telegraf +// Processor is a processor plugin interface for defining new inline processors. +// these are extremely efficient and should be used over StreamingProcessor if +// you do not need asynchronous metric writes. type Processor interface { - // SampleConfig returns the default configuration of the Input - SampleConfig() string - - // Description returns a one-sentence description on the Input - Description() string + PluginDescriber // Apply the filter to the given metric. Apply(in ...Metric) []Metric } + +// StreamingProcessor is a processor that can take in a stream of messages +type StreamingProcessor interface { + PluginDescriber + + // Start is the initializer for the processor + // Start is only called once per plugin instance, and never in parallel. + // Start should exit immediately after setup + Start(acc Accumulator) error + + // Add is called for each metric to be processed. + Add(metric Metric, acc Accumulator) + + // Stop gives you a callback to free resources. + // by the time Stop is called, the input stream will have already been closed + // and Add will not be called anymore. + // When stop returns, you should no longer be writing metrics to the + // accumulator. + Stop() error +} From 7ba226a00b11fa78bb7201cc723f1bd7c6c0c435 Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Fri, 5 Jun 2020 18:30:25 -0400 Subject: [PATCH 1814/1815] fix issue with stream parser blocking when data is in buffer (#7631) --- plugins/parsers/influx/machine.go | 20 ++++++++++---------- plugins/parsers/influx/machine.go.rl | 20 ++++++++++---------- plugins/parsers/influx/parser_test.go | 17 +++++++++++++++++ 3 files changed, 37 insertions(+), 20 deletions(-) diff --git a/plugins/parsers/influx/machine.go b/plugins/parsers/influx/machine.go index 59bd232dd..332b73592 100644 --- a/plugins/parsers/influx/machine.go +++ b/plugins/parsers/influx/machine.go @@ -31678,6 +31678,16 @@ func (m *streamMachine) Next() error { m.machine.data = expanded } + err := m.machine.exec() + if err != nil { + return err + } + + // If we have successfully parsed a full metric line break out + if m.machine.finishMetric { + break + } + n, err := m.reader.Read(m.machine.data[m.machine.pe:]) if n == 0 && err == io.EOF { m.machine.eof = m.machine.pe @@ -31692,16 +31702,6 @@ func (m *streamMachine) Next() error { m.machine.pe += n - err = m.machine.exec() - if err != nil { - return err - } - - // If we have successfully parsed a full metric line break out - if m.machine.finishMetric { - break - } - } return nil diff --git a/plugins/parsers/influx/machine.go.rl b/plugins/parsers/influx/machine.go.rl index 61f49c652..f8f40cd7c 100644 --- a/plugins/parsers/influx/machine.go.rl +++ b/plugins/parsers/influx/machine.go.rl @@ -506,6 +506,16 @@ func (m *streamMachine) Next() error { m.machine.data = expanded } + err := m.machine.exec() + if err != nil { + return err + } + + // If we have successfully parsed a full metric line break out + if m.machine.finishMetric { + break + } + n, err := m.reader.Read(m.machine.data[m.machine.pe:]) if n == 0 && err == io.EOF { m.machine.eof = m.machine.pe @@ -520,16 +530,6 @@ func (m *streamMachine) Next() error { m.machine.pe += n - err = m.machine.exec() - if err != nil { - return err - } - - // If we have successfully parsed a full metric line break out - if m.machine.finishMetric { - break - } - } return nil diff --git a/plugins/parsers/influx/parser_test.go b/plugins/parsers/influx/parser_test.go index 368ad277d..569eb3a22 100644 --- a/plugins/parsers/influx/parser_test.go +++ b/plugins/parsers/influx/parser_test.go @@ -3,6 +3,7 @@ package influx import ( "bytes" "errors" + "io" "strconv" "strings" "testing" @@ -895,3 +896,19 @@ func TestStreamParserReaderError(t *testing.T) { _, err = parser.Next() require.Equal(t, err, EOF) } + +func TestStreamParserProducesAllAvailableMetrics(t *testing.T) { + r, w := io.Pipe() + + parser := NewStreamParser(r) + parser.SetTimeFunc(DefaultTime) + + go w.Write([]byte("metric value=1\nmetric2 value=1\n")) + + _, err := parser.Next() + require.NoError(t, err) + + // should not block on second read + _, err = parser.Next() + require.NoError(t, err) +} From 62302a0f37ea0204de08a7827187b69882b0b686 Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Fri, 5 Jun 2020 18:46:58 -0400 Subject: [PATCH 1815/1815] Update CHANGELOG.md --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index cad19b9ab..d458426df 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -25,6 +25,7 @@ #### Features +- [#7634](https://github.com/influxdata/telegraf/pull/7634): Add support for streaming processors. - [#6905](https://github.com/influxdata/telegraf/pull/6905): Add commands stats to mongodb input plugin. - [#7193](https://github.com/influxdata/telegraf/pull/7193): Add additional concurrent transaction information. - [#7223](https://github.com/influxdata/telegraf/pull/7223): Add ability to specify HTTP Headers in http_listener_v2 which will added as tags. @@ -59,6 +60,7 @@ #### Bugfixes +- [#7631](https://github.com/influxdata/telegraf/issues/7617): Fix issue with influx stream parser blocking when data is in buffer. - [#7371](https://github.com/influxdata/telegraf/issues/7371): Fix unable to write metrics to CloudWatch with IMDSv1 disabled. - [#7233](https://github.com/influxdata/telegraf/issues/7233): Fix vSphere 6.7 missing data issue. - [#7448](https://github.com/influxdata/telegraf/issues/7448): Remove debug fields from splunkmetric serializer.